diff --git a/.azure-pipelines/bazel.yml b/.azure-pipelines/bazel.yml index 40ebbba15e2dc..52dfac3ba1cb3 100644 --- a/.azure-pipelines/bazel.yml +++ b/.azure-pipelines/bazel.yml @@ -3,6 +3,20 @@ parameters: displayName: "CI target" type: string default: bazel.release + - name: artifactSuffix + displayName: "Suffix of artifact" + type: string + default: "" + - name: rbe + displayName: "Enable RBE" + type: boolean + default: true + - name: managedAgent + type: boolean + default: true + - name: bazelBuildExtraOptions + type: string + default: "" steps: - task: Cache@2 @@ -13,6 +27,7 @@ steps: - bash: .azure-pipelines/cleanup.sh displayName: "Removing tools from agent" + condition: ${{ parameters.managedAgent }} - bash: | echo "disk space at beginning of build:" @@ -27,18 +42,22 @@ steps: }' | sudo tee /etc/docker/daemon.json sudo service docker restart displayName: "Enable IPv6" + condition: ${{ parameters.managedAgent }} - script: ci/run_envoy_docker.sh 'ci/do_ci.sh ${{ parameters.ciTarget }}' workingDirectory: $(Build.SourcesDirectory) env: ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) - ENVOY_RBE: "true" - # Use https://docs.bazel.build/versions/master/command-line-reference.html#flag--experimental_repository_cache_hardlinks - # to save disk space. - BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --jobs=$(RbeJobs) --curses=no --experimental_repository_cache_hardlinks" - BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com - BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance - GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + ${{ if parameters.rbe }}: + ENVOY_RBE: "1" + BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --jobs=$(RbeJobs) ${{ parameters.bazelBuildExtraOptions }}" + BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com + BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + ${{ if eq(parameters.rbe, false) }}: + BAZEL_BUILD_EXTRA_OPTIONS: "${{ parameters.bazelBuildExtraOptions }}" + BAZEL_REMOTE_CACHE: $(LocalBuildCache) + displayName: "Run CI script" - bash: | @@ -57,5 +76,5 @@ steps: - task: PublishBuildArtifacts@1 inputs: pathtoPublish: "$(Build.StagingDirectory)/envoy" - artifactName: ${{ parameters.ciTarget }} + artifactName: ${{ parameters.ciTarget }}${{ parameters.artifactSuffix }} condition: always() diff --git a/.azure-pipelines/cleanup.sh b/.azure-pipelines/cleanup.sh index 0a3807d56ddd7..8fa8c11cfcb34 100755 --- a/.azure-pipelines/cleanup.sh +++ b/.azure-pipelines/cleanup.sh @@ -3,7 +3,7 @@ set -e # Temporary script to remove tools from Azure pipelines agent to create more disk space room. - -sudo apt-get purge -y 'ghc-*' 'zulu-*-azure-jdk' 'libllvm*' 'mysql-*' 'dotnet-*' 'cpp-*' +sudo apt-get update -y +sudo apt-get purge -y --no-upgrade 'ghc-*' 'zulu-*-azure-jdk' 'libllvm*' 'mysql-*' 'dotnet-*' 'libgl1' dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -rn diff --git a/.azure-pipelines/linux.yml b/.azure-pipelines/linux.yml deleted file mode 120000 index ea3cc67f3da8f..0000000000000 --- a/.azure-pipelines/linux.yml +++ /dev/null @@ -1 +0,0 @@ -pipelines.yml \ No newline at end of file diff --git a/.azure-pipelines/pipelines.yml b/.azure-pipelines/pipelines.yml index 3395229a721c0..492884110c88b 100644 --- a/.azure-pipelines/pipelines.yml +++ b/.azure-pipelines/pipelines.yml @@ -14,7 +14,7 @@ jobs: - job: format dependsOn: [] # this removes the implicit dependency on previous stage and causes this to run in parallel. pool: - vmImage: "ubuntu-16.04" + vmImage: "ubuntu-18.04" steps: - task: Cache@2 inputs: @@ -44,12 +44,28 @@ jobs: condition: and(not(canceled()), or(succeeded(), ne(variables['Build.Reason'], 'PullRequest'))) timeoutInMinutes: 360 pool: - vmImage: "ubuntu-16.04" + vmImage: "ubuntu-18.04" steps: - template: bazel.yml parameters: ciTarget: bazel.release + - job: release_arm64 + displayName: "Linux-arm64 release" + dependsOn: ["format"] + # For master builds, continue even if format fails + condition: and(not(canceled()), or(succeeded(), ne(variables['Build.Reason'], 'PullRequest'))) + timeoutInMinutes: 360 + pool: "arm-large" + steps: + - template: bazel.yml + parameters: + managedAgent: false + ciTarget: bazel.release + rbe: false + artifactSuffix: ".arm64" + bazelBuildExtraOptions: "--sandbox_base=/tmp/sandbox_base" + - job: bazel displayName: "Linux-x64" dependsOn: ["release"] @@ -70,18 +86,47 @@ jobs: CI_TARGET: "bazel.compile_time_options" timeoutInMinutes: 360 pool: - vmImage: "Ubuntu 16.04" + vmImage: "ubuntu-18.04" steps: - template: bazel.yml parameters: ciTarget: $(CI_TARGET) - - job: docker - displayName: "Linux-x64 docker" + - job: coverage + displayName: "Linux-x64" dependsOn: ["release"] + timeoutInMinutes: 360 + pool: "x64-large" + strategy: + maxParallel: 2 + matrix: + coverage: + CI_TARGET: "coverage" + fuzz_coverage: + CI_TARGET: "fuzz_coverage" + steps: + - template: bazel.yml + parameters: + managedAgent: false + ciTarget: bazel.$(CI_TARGET) + rbe: false + # /tmp/sandbox_base is a tmpfs in CI environment to optimize large I/O for coverage traces + bazelBuildExtraOptions: "--define=no_debug_info=1 --linkopt=-Wl,-s --test_env=ENVOY_IP_TEST_VERSIONS=v4only --sandbox_base=/tmp/sandbox_base" + + - script: ci/run_envoy_docker.sh 'ci/upload_gcs_artifact.sh /source/generated/$(CI_TARGET) $(CI_TARGET)' + displayName: "Upload $(CI_TARGET) Report to GCS" + env: + ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + GCS_ARTIFACT_BUCKET: $(GcsArtifactBucket) + condition: always() + + - job: docker + displayName: "Linux multi-arch docker" + dependsOn: ["release","release_arm64"] condition: and(succeeded(), eq(variables['PostSubmit'], 'true'), ne(variables['Build.Reason'], 'PullRequest')) pool: - vmImage: "ubuntu-16.04" + vmImage: "ubuntu-18.04" steps: - task: DownloadBuildArtifacts@0 inputs: @@ -90,10 +135,17 @@ jobs: itemPattern: "bazel.release/envoy_binary.tar.gz" downloadType: single targetPath: $(Build.StagingDirectory) - + - task: DownloadBuildArtifacts@0 + inputs: + buildType: current + artifactName: "bazel.release.arm64" + itemPattern: "bazel.release.arm64/envoy_binary.tar.gz" + downloadType: single + targetPath: $(Build.StagingDirectory) - bash: | set -e - tar zxf $(Build.StagingDirectory)/bazel.release/envoy_binary.tar.gz + mkdir -p linux/amd64 && tar zxf $(Build.StagingDirectory)/bazel.release/envoy_binary.tar.gz -C ./linux/amd64 + mkdir -p linux/arm64 && tar zxf $(Build.StagingDirectory)/bazel.release.arm64/envoy_binary.tar.gz -C ./linux/arm64 ci/docker_ci.sh workingDirectory: $(Build.SourcesDirectory) env: @@ -114,6 +166,7 @@ jobs: - script: ./ci/mac_ci_steps.sh displayName: "Run Mac CI" env: + BAZEL_BUILD_EXTRA_OPTIONS: --remote_download_toplevel BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) @@ -130,24 +183,11 @@ jobs: pool: vmImage: "windows-latest" steps: - - powershell: | - .\ci\windows_ci_setup.ps1 - Write-Host "##vso[task.prependpath]$env:TOOLS_BIN_DIR\usr\bin" - Write-Host "##vso[task.prependpath]$env:VC_TOOLS_BIN_X64;$env:VC_CMAKE_PATH\CMake\bin;$env:VC_CMAKE_PATH\Ninja" - Write-Host "##vso[task.prependpath]$env:TOOLS_BIN_DIR" - displayName: "Install dependencies" - env: - TOOLS_BIN_DIR: $(Pipeline.Workspace)\bin - VC_CMAKE_PATH: "C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Enterprise\\Common7\\IDE\\CommonExtensions\\Microsoft\\CMake" - VC_TOOLS_BIN_X64: "C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Enterprise\\VC\\Tools\\MSVC\\14.25.28610\\bin\\HostX64\\x64" - - - bash: ci/windows_ci_steps.sh + - bash: ci/run_envoy_docker_windows.sh ci/windows_ci_steps.sh displayName: "Run Windows CI" env: - BAZEL_VC: "C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Enterprise\\VC" - BAZEL_SH: $(Pipeline.Workspace)/bin/usr/bin/bash.exe + ENVOY_RBE: "true" + BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --config=remote-msvc-cl --jobs=$(RbeJobs)" BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) - MSYS2_ARG_CONV_EXCL: "*" - TMPDIR: $(Agent.TempDirectory) diff --git a/.bazelci/presubmit.yml b/.bazelci/presubmit.yml index 6aa45662598ca..ab83156fbc474 100644 --- a/.bazelci/presubmit.yml +++ b/.bazelci/presubmit.yml @@ -1,14 +1,30 @@ --- tasks: gcc: + name: "GCC" platform: ubuntu1804 build_targets: - "//source/exe:envoy-static" rbe: + name: "RBE" platform: ubuntu1804 test_targets: - - "//test/..." + - "//test/common/common/..." + - "//test/integration/..." + - "//test/exe/..." test_flags: - - "--config=remote-clang" + - "--config=remote-clang-libc++" - "--config=remote-ci" - "--jobs=75" + coverage: + name: "Coverage" + platform: ubuntu1804 + shell_commands: + - "bazel/setup_clang.sh /usr/lib/llvm-10" + test_targets: + - "//test/common/common/..." + - "//test/integration/..." + - "//test/exe/..." + test_flags: + - "--config=coverage" + - "--config=clang" diff --git a/.bazelrc b/.bazelrc index f39013f270003..fa8d80a0242d7 100644 --- a/.bazelrc +++ b/.bazelrc @@ -11,7 +11,6 @@ startup --host_jvm_args=-Xmx2g build --workspace_status_command="bash bazel/get_workspace_status" -build --experimental_local_memory_estimate build --experimental_strict_action_env=true build --host_force_python=PY3 build --action_env=BAZEL_LINKLIBS=-l%:libstdc++.a @@ -22,6 +21,8 @@ build --enable_platform_specific_config # Enable position independent code, this option is not supported on Windows and default on on macOS. build:linux --copt=-fPIC +build:linux --cxxopt=-std=c++17 +build:linux --conlyopt=-fexceptions # We already have absl in the build, define absl=1 to tell googletest to use absl for backtrace. build --define absl=1 @@ -32,6 +33,9 @@ build --action_env=CXX build --action_env=LLVM_CONFIG build --action_env=PATH +# Skip system ICU linking. +build --@com_googlesource_googleurl//build_config:system_icu=0 + # Common flags for sanitizers build:sanitizer --define tcmalloc=disabled build:sanitizer --linkopt -ldl @@ -50,8 +54,7 @@ build:asan --define signal_trace=disabled build:asan --define ENVOY_CONFIG_ASAN=1 build:asan --copt -fsanitize=address,undefined build:asan --linkopt -fsanitize=address,undefined -# TODO(lizan): vptr and function requires C++ UBSAN runtime which we're not currently linking to. -# Enable them when bazel has better support for that or with explicit linker options. +# vptr and function sanitizer are enabled in clang-asan if it is set up via bazel/setup_clang.sh. build:asan --copt -fno-sanitize=vptr,function build:asan --linkopt -fno-sanitize=vptr,function build:asan --copt -DADDRESS_SANITIZER=1 @@ -65,6 +68,7 @@ build:clang-asan --config=asan build:clang-asan --linkopt -fuse-ld=lld # macOS ASAN/UBSAN +build:macos --cxxopt=-std=c++17 build:macos-asan --config=asan # Workaround, see https://github.com/bazelbuild/bazel/issues/6932 build:macos-asan --copt -Wno-macro-redefined @@ -81,8 +85,13 @@ build:clang-tsan --define ENVOY_CONFIG_TSAN=1 build:clang-tsan --copt -fsanitize=thread build:clang-tsan --linkopt -fsanitize=thread build:clang-tsan --linkopt -fuse-ld=lld +build:clang-tsan --build_tag_filters=-no_san,-no_tsan +build:clang-tsan --test_tag_filters=-no_san,-no_tsan # Needed due to https://github.com/libevent/libevent/issues/777 build:clang-tsan --copt -DEVENT__DISABLE_DEBUG_MODE +# https://github.com/abseil/abseil-cpp/issues/760 +# https://github.com/google/sanitizers/issues/953 +build:clang-tsan --test_env="TSAN_OPTIONS=report_atomic_races=0" # Clang MSAN - this is the base config for remote-msan and docker-msan. To run this config without # our build image, follow https://github.com/google/sanitizers/wiki/MemorySanitizerLibcxxHowTo @@ -110,17 +119,43 @@ build:sizeopt -c opt --copt -Os # Test options build --test_env=HEAPCHECK=normal --test_env=PPROF_PATH +# Coverage options +coverage --config=coverage +coverage --build_tests_only +build:coverage --action_env=BAZEL_USE_LLVM_NATIVE_COVERAGE=1 +build:coverage --action_env=GCOV=llvm-profdata +build:coverage --copt=-DNDEBUG +# 1.5x original timeout + 300s for trace merger in all categories +build:coverage --test_timeout=390,750,1500,5700 +build:coverage --define=ENVOY_CONFIG_COVERAGE=1 +build:coverage --cxxopt="-DENVOY_CONFIG_COVERAGE=1" +build:coverage --coverage_support=@envoy//bazel/coverage:coverage_support +build:coverage --test_env=CC_CODE_COVERAGE_SCRIPT=external/envoy/bazel/coverage/collect_cc_coverage.sh +build:coverage --test_env=HEAPCHECK= +build:coverage --combined_report=lcov +build:coverage --strategy=TestRunner=sandboxed,local +build:coverage --strategy=CoverageReport=sandboxed,local +build:coverage --experimental_use_llvm_covmap +build:coverage --collect_code_coverage +build:coverage --test_tag_filters=-nocoverage +build:coverage --instrumentation_filter="//source(?!/extensions/quic_listeners/quiche/platform)[/:],//include[/:]" +coverage:test-coverage --test_arg="-l trace" +coverage:fuzz-coverage --config=plain-fuzzer +coverage:fuzz-coverage --run_under=@envoy//bazel/coverage:fuzz_coverage_wrapper.sh + # Remote execution: https://docs.bazel.build/versions/master/remote-execution.html -build:rbe-toolchain --host_platform=@envoy_build_tools//toolchains:rbe_ubuntu_clang_platform -build:rbe-toolchain --platforms=@envoy_build_tools//toolchains:rbe_ubuntu_clang_platform build:rbe-toolchain --action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1 build:rbe-toolchain-clang --config=rbe-toolchain +build:rbe-toolchain-clang --platforms=@rbe_ubuntu_clang//config:platform +build:rbe-toolchain-clang --host_platform=@rbe_ubuntu_clang//config:platform build:rbe-toolchain-clang --crosstool_top=@rbe_ubuntu_clang//cc:toolchain build:rbe-toolchain-clang --extra_toolchains=@rbe_ubuntu_clang//config:cc-toolchain build:rbe-toolchain-clang --action_env=CC=clang --action_env=CXX=clang++ --action_env=PATH=/usr/sbin:/usr/bin:/sbin:/bin:/opt/llvm/bin build:rbe-toolchain-clang-libc++ --config=rbe-toolchain +build:rbe-toolchain-clang-libc++ --platforms=@rbe_ubuntu_clang_libcxx//config:platform +build:rbe-toolchain-clang-libc++ --host_platform=@rbe_ubuntu_clang_libcxx//config:platform build:rbe-toolchain-clang-libc++ --crosstool_top=@rbe_ubuntu_clang_libcxx//cc:toolchain build:rbe-toolchain-clang-libc++ --extra_toolchains=@rbe_ubuntu_clang_libcxx//config:cc-toolchain build:rbe-toolchain-clang-libc++ --action_env=CC=clang --action_env=CXX=clang++ --action_env=PATH=/usr/sbin:/usr/bin:/sbin:/bin:/opt/llvm/bin @@ -132,10 +167,21 @@ build:rbe-toolchain-msan --linkopt=-L/opt/libcxx_msan/lib build:rbe-toolchain-msan --linkopt=-Wl,-rpath,/opt/libcxx_msan/lib build:rbe-toolchain-msan --config=clang-msan +build:rbe-toolchain-tsan --linkopt=-L/opt/libcxx_tsan/lib +build:rbe-toolchain-tsan --linkopt=-Wl,-rpath,/opt/libcxx_tsan/lib +build:rbe-toolchain-tsan --config=clang-tsan + build:rbe-toolchain-gcc --config=rbe-toolchain +build:rbe-toolchain-gcc --platforms=@rbe_ubuntu_gcc//config:platform +build:rbe-toolchain-gcc --host_platform=@rbe_ubuntu_gcc//config:platform build:rbe-toolchain-gcc --crosstool_top=@rbe_ubuntu_gcc//cc:toolchain build:rbe-toolchain-gcc --extra_toolchains=@rbe_ubuntu_gcc//config:cc-toolchain +build:rbe-toolchain-msvc-cl --host_platform=@rbe_windows_msvc_cl//config:platform +build:rbe-toolchain-msvc-cl --platforms=@rbe_windows_msvc_cl//config:platform +build:rbe-toolchain-msvc-cl --crosstool_top=@rbe_windows_msvc_cl//cc:toolchain +build:rbe-toolchain-msvc-cl --extra_toolchains=@rbe_windows_msvc_cl//config:cc-toolchain + build:remote --spawn_strategy=remote,sandboxed,local build:remote --strategy=Javac=remote,sandboxed,local build:remote --strategy=Closure=remote,sandboxed,local @@ -144,6 +190,15 @@ build:remote --remote_timeout=7200 build:remote --auth_enabled=true build:remote --remote_download_toplevel +# Windows bazel does not allow sandboxed as a spawn strategy +build:remote-windows --spawn_strategy=remote,local +build:remote-windows --strategy=Javac=remote,local +build:remote-windows --strategy=Closure=remote,local +build:remote-windows --strategy=Genrule=remote,local +build:remote-windows --remote_timeout=7200 +build:remote-windows --auth_enabled=true +build:remote-windows --remote_download_toplevel + build:remote-clang --config=remote build:remote-clang --config=rbe-toolchain-clang @@ -157,9 +212,12 @@ build:remote-msan --config=remote build:remote-msan --config=rbe-toolchain-clang-libc++ build:remote-msan --config=rbe-toolchain-msan +build:remote-msvc-cl --config=remote-windows +build:remote-msvc-cl --config=rbe-toolchain-msvc-cl + # Docker sandbox # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8 -build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:09a5a914c904faa39dbc641181cb43b68cabf626 +build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:923df85a4ba7f30dcd0cb6b0c6d8d604f0e20f48 build:docker-sandbox --spawn_strategy=docker build:docker-sandbox --strategy=Javac=docker build:docker-sandbox --strategy=Closure=docker @@ -181,12 +239,20 @@ build:docker-msan --config=docker-sandbox build:docker-msan --config=rbe-toolchain-clang-libc++ build:docker-msan --config=rbe-toolchain-msan +build:docker-tsan --config=docker-sandbox +build:docker-tsan --config=rbe-toolchain-clang-libc++ +build:docker-tsan --config=rbe-toolchain-tsan + # CI configurations build:remote-ci --remote_cache=grpcs://remotebuildexecution.googleapis.com build:remote-ci --remote_executor=grpcs://remotebuildexecution.googleapis.com # Fuzz builds -build:asan-fuzzer --config=clang-asan +# -DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION is passed in in the bazel build target +# rules for fuzz tests. Passing it in the CLI will cause dependencies to be build +# with the macro. Causing issues in RouteMatcherTest.TestRoutes that expect prod +# behavior from RE2 library. +build:asan-fuzzer --config=asan build:asan-fuzzer --define=FUZZING_ENGINE=libfuzzer build:asan-fuzzer --copt=-fsanitize=fuzzer-no-link build:asan-fuzzer --copt=-fno-omit-frame-pointer @@ -195,15 +261,10 @@ build:asan-fuzzer --test_env=UBSAN_OPTIONS=print_stacktrace=1 # Fuzzing without ASAN. This is useful for profiling fuzzers without any ASAN artifacts. build:plain-fuzzer --define=FUZZING_ENGINE=libfuzzer -build:plain-fuzzer --copt=-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION -build:plain-fuzzer --copt=-fsanitize=fuzzer-no-link +build:plain-fuzzer --define ENVOY_CONFIG_ASAN=1 # Compile database generation config -# We don't care about built binaries so always strip and use fastbuild. -build:compdb -c fastbuild -build:compdb --strip=always build:compdb --build_tag_filters=-nocompdb -build:compdb --define=ENVOY_CONFIG_COMPILATION_DATABASE=1 # Windows build quirks build:windows --action_env=TMPDIR @@ -218,6 +279,7 @@ build:windows --define manual_stamp=manual_stamp build:windows --copt="-DCARES_STATICLIB" build:windows --copt="-DNGHTTP2_STATICLIB" build:windows --copt="-DCURL_STATICLIB" +build:windows --cxxopt="/std:c++17" # Required to work around build defects on Windows MSVC cl # Unguarded gcc pragmas in quiche are not recognized by MSVC diff --git a/.bazelversion b/.bazelversion index 4a36342fcab70..47b322c971c3c 100644 --- a/.bazelversion +++ b/.bazelversion @@ -1 +1 @@ -3.0.0 +3.4.1 diff --git a/.circleci/config.yml b/.circleci/config.yml index aa6ba5f7ac456..a9f9145da241e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -4,8 +4,8 @@ executors: ubuntu-build: description: "A regular build executor based on ubuntu image" docker: - # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L7 - - image: envoyproxy/envoy-build-ubuntu@sha256:3788a87461f2b3dc8048ad0ce5df40438a56e0a8f1a4ab0f61b4ef0d8c11ff1f + # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8 + - image: envoyproxy/envoy-build-ubuntu:923df85a4ba7f30dcd0cb6b0c6d8d604f0e20f48 resource_class: xlarge working_directory: /source @@ -45,33 +45,6 @@ jobs: - "f6:f9:df:90:9c:4b:5f:9c:f4:69:fd:42:94:ff:88:24" - run: ci/filter_example_mirror.sh - coverage: - executor: ubuntu-build - steps: - - run: rm -rf /home/circleci/project/.git # CircleCI git caching is likely broken - - checkout - - run: - command: - ci/do_circle_ci.sh bazel.coverage - no_output_timeout: 60m - - persist_to_workspace: - root: /build/envoy/generated - paths: - - coverage - - store_artifacts: - path: /build/envoy/generated - destination: / - - coverage_publish: - docker: - - image: google/cloud-sdk - steps: - - run: rm -rf /home/circleci/project/.git # CircleCI git caching is likely broken - - checkout - - attach_workspace: - at: /build/envoy/generated - - run: ci/coverage_publish.sh - docs: executor: ubuntu-build steps: @@ -92,9 +65,6 @@ workflows: - api - go_control_plane_mirror - filter_example_mirror - - coverage - - coverage_publish: - requires: [coverage] - docs: filters: tags: diff --git a/.clang-tidy b/.clang-tidy index 93d48258a9ae9..693858657d471 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -1,4 +1,5 @@ -Checks: '-clang-analyzer-optin.cplusplus.UninitializedObject, +Checks: '-clang-analyzer-core.NonNullParamChecker, + -clang-analyzer-optin.cplusplus.UninitializedObject, abseil-duration-*, abseil-faster-strsplit-delimiter, abseil-no-namespace, diff --git a/.devcontainer/.gitignore b/.devcontainer/.gitignore new file mode 100644 index 0000000000000..55abd6a0566cf --- /dev/null +++ b/.devcontainer/.gitignore @@ -0,0 +1 @@ +devcontainer.env diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 0000000000000..53d721238dde6 --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,23 @@ +FROM gcr.io/envoy-ci/envoy-build:923df85a4ba7f30dcd0cb6b0c6d8d604f0e20f48 + +ARG USERNAME=vscode +ARG USER_UID=501 +ARG USER_GID=$USER_UID + +ENV BUILD_DIR=/build +ENV ENVOY_STDLIB=libstdc++ + +ENV DEBIAN_FRONTEND=noninteractive +RUN apt-get -y update \ + && apt-get -y install --no-install-recommends libpython2.7 net-tools psmisc vim 2>&1 \ + # Create a non-root user to use if preferred - see https://aka.ms/vscode-remote/containers/non-root-user. + && groupadd --gid $USER_GID $USERNAME \ + && useradd -s /bin/bash --uid $USER_UID --gid $USER_GID -m $USERNAME -G pcap -d /build \ + # [Optional] Add sudo support for non-root user + && echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME \ + && chmod 0440 /etc/sudoers.d/$USERNAME + +ENV DEBIAN_FRONTEND= +ENV PATH=/opt/llvm/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + +ENV CLANG_FORMAT=/opt/llvm/bin/clang-format diff --git a/.devcontainer/README.md b/.devcontainer/README.md new file mode 100644 index 0000000000000..1cd314d2e4e02 --- /dev/null +++ b/.devcontainer/README.md @@ -0,0 +1,35 @@ +# Envoy Dev Container (experimental) + +This directory contains some experimental tools for Envoy Development in [VSCode Remote - Containers](https://code.visualstudio.com/docs/remote/containers). + +## How to use + +Open with VSCode with the Container extension installed. Follow the [official guide](https://code.visualstudio.com/docs/remote/containers) to open this +repository directly from GitHub or from checked-out source tree. + +After opening, run the `Refresh Compilation Database` task to generate compilation database to navigate in source code. +This will run partial build of Envoy and may take a while depends on the machine performance. +This task is needed to run everytime after: +- Changing a BUILD file that add/remove files from a target, changes dependencies +- Changing API proto files + +## Advanced Usages + +### Using Remote Build Execution + +Write the following content to `devcontainer.env` and rebuild the container. The key will be persisted in the container's `~/.bazelrc`. + +``` +GCP_SERVICE_ACCOUNT_KEY= +BAZEL_REMOTE_INSTANCE= +BAZEL_REMOTE_CACHE=grpcs://remotebuildexecution.googleapis.com +BAZEL_BUILD_EXTRA_OPTIONS=--config=remote-ci --config=remote --jobs= +``` + +By default the `--config=remote` implies [`--remote_download_toplevel`](https://docs.bazel.build/versions/master/command-line-reference.html#flag--remote_download_toplevel), +change this to `minimal` or `all` depending on where you're running the container by adding them to `BAZEL_BUILD_EXTRA_OPTIONS`. + +### Disk performance + +Docker for Mac/Windows is known to have disk performance issue, this makes formatting all files in the container very slow. +[Update the mount consistency to 'delegated'](https://code.visualstudio.com/docs/remote/containers-advanced#_update-the-mount-consistency-to-delegated-for-macos) is recommended. diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000000000..462b00ee78d09 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,44 @@ +{ + "name": "Envoy Dev", + "dockerFile": "Dockerfile", + "runArgs": [ + "--user=vscode", + "--cap-add=SYS_PTRACE", + "--cap-add=NET_RAW", + "--cap-add=NET_ADMIN", + "--security-opt=seccomp=unconfined", + "--volume=${env:HOME}:${env:HOME}", + "--volume=envoy-build:/build", + // Uncomment next line if you have devcontainer.env + // "--env-file=.devcontainer/devcontainer.env" + ], + "settings": { + "terminal.integrated.shell.linux": "/bin/bash", + "bazel.buildifierFixOnFormat": true, + "clangd.path": "/opt/llvm/bin/clangd", + "python.pythonPath": "/usr/bin/python3", + "python.formatting.provider": "yapf", + "python.formatting.yapfArgs": [ + "--style=${workspaceFolder}/tools/code_format/.style.yapf" + ], + "files.exclude": { + "**/.clangd/**": true, + "**/bazel-*/**": true + }, + "files.watcherExclude": { + "**/.clangd/**": true, + "**/bazel-*/**": true + } + }, + "remoteUser": "vscode", + "containerUser": "vscode", + "postCreateCommand": ".devcontainer/setup.sh", + "extensions": [ + "github.vscode-pull-request-github", + "zxh404.vscode-proto3", + "bazelbuild.vscode-bazel", + "llvm-vs-code-extensions.vscode-clangd", + "webfreak.debug", + "ms-python.python" + ] +} diff --git a/.devcontainer/setup.sh b/.devcontainer/setup.sh new file mode 100755 index 0000000000000..9a8e4ab5ac976 --- /dev/null +++ b/.devcontainer/setup.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +. ci/setup_cache.sh +trap - EXIT # Don't remove the key file written into a temporary file + +BAZELRC_FILE=~/.bazelrc bazel/setup_clang.sh /opt/llvm + +# Use generated toolchain config because we know the base container is the one we're using in RBE. +# Not using libc++ here because clangd will raise some tidy issue in libc++ header as of version 9. +echo "build --config=rbe-toolchain-clang" >> ~/.bazelrc +echo "build ${BAZEL_BUILD_EXTRA_OPTIONS}" | tee -a ~/.bazelrc + +# Ideally we want this line so bazel doesn't pollute things outside of the devcontainer, but some of +# API tooling (proto_sync) depends on symlink like bazel-bin. +# TODO(lizan): Fix API tooling and enable this again +#echo "build --symlink_prefix=/" >> ~/.bazelrc + +[[ ! -z "${BUILD_DIR}" ]] && sudo chown -R "$(id -u):$(id -g)" ${BUILD_DIR} diff --git a/.gitattributes b/.gitattributes index bed26d270e074..03203a47dda8d 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,10 +1,6 @@ /docs/root/version_history/current.rst merge=union -*.generated.pb_text linguist-generated=true -*.generated.pb_text -diff -merge +/api/envoy/**/v4alpha/* linguist-generated=true /generated_api_shadow/envoy/** linguist-generated=true -/generated_api_shadow/envoy/** -diff -merge /generated_api_shadow/bazel/** linguist-generated=true -/generated_api_shadow/bazel/** -diff -merge *.svg binary /test/**/*_corpus/* linguist-generated=true -/test/**/*_corpus/* -diff -merge diff --git a/.github/workflows/codeql-daily.yml b/.github/workflows/codeql-daily.yml new file mode 100644 index 0000000000000..d947fa50a5a65 --- /dev/null +++ b/.github/workflows/codeql-daily.yml @@ -0,0 +1,55 @@ +on: + schedule: + - cron: '0 12 * * 4' + +jobs: + CodeQL-Build: + + strategy: + fail-fast: false + + # CodeQL runs on ubuntu-latest and windows-latest + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + with: + # We must fetch at least the immediate parents so that if this is + # a pull request then we can checkout the head. + fetch-depth: 2 + + # If this run was triggered by a pull request event, then checkout + # the head of the pull request instead of the merge commit. + - run: git checkout HEAD^2 + if: ${{ github.event_name == 'pull_request' }} + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v1 + # Override language selection by uncommenting this and choosing your languages + with: + languages: cpp + + - name: Install deps + shell: bash + run: | + sudo apt-get update && sudo apt-get install libtool cmake automake autoconf make ninja-build curl unzip virtualenv openjdk-11-jdk build-essential libc++1 + mkdir -p bin/clang10 + cd bin/clang10 + wget https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.0/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz + tar -xf clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz --strip-components 1 + export PATH=bin/clang10/bin:$PATH + + - name: Build + run: | + bazel/setup_clang.sh bin/clang10 + bazelisk shutdown + bazelisk build -c fastbuild --spawn_strategy=local --discard_analysis_cache --nouse_action_cache --config clang --config libc++ //source/common/http/... + + - name: Clean Artifacts + run: | + git clean -xdf + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v1 diff --git a/.github/workflows/codeql-push.yml b/.github/workflows/codeql-push.yml new file mode 100644 index 0000000000000..639ef7ce14422 --- /dev/null +++ b/.github/workflows/codeql-push.yml @@ -0,0 +1,57 @@ +on: + push: + paths: + - 'source/common/**' + pull_request: + +jobs: + CodeQL-Build: + + strategy: + fail-fast: false + + # CodeQL runs on ubuntu-latest and windows-latest + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + with: + # We must fetch at least the immediate parents so that if this is + # a pull request then we can checkout the head. + fetch-depth: 2 + + # If this run was triggered by a pull request event, then checkout + # the head of the pull request instead of the merge commit. + - run: git checkout HEAD^2 + if: ${{ github.event_name == 'pull_request' }} + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v1 + # Override language selection by uncommenting this and choosing your languages + with: + languages: cpp + + - name: Install deps + shell: bash + run: | + sudo apt-get update && sudo apt-get install libtool cmake automake autoconf make ninja-build curl unzip virtualenv openjdk-11-jdk build-essential libc++1 + mkdir -p bin/clang10 + cd bin/clang10 + wget https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.0/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz + tar -xf clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz --strip-components 1 + export PATH=bin/clang10/bin:$PATH + + - name: Build + run: | + bazel/setup_clang.sh bin/clang10 + bazelisk shutdown + bazelisk build -c fastbuild --spawn_strategy=local --discard_analysis_cache --nouse_action_cache --config clang --config libc++ //source/common/http/http1:codec_lib //source/common/http/http2:codec_lib + + - name: Clean Artifacts + run: | + git clean -xdf + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v1 diff --git a/.gitignore b/.gitignore index 7b7c6ff04d58b..134967bc2bb78 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,6 @@ /bazel-* BROWSE /build -/build_* *.bzlc .cache .clangd @@ -34,3 +33,4 @@ clang.bazelrc user.bazelrc CMakeLists.txt cmake-build-debug +/linux diff --git a/.vscode/.gitignore b/.vscode/.gitignore new file mode 100644 index 0000000000000..c2393f450708d --- /dev/null +++ b/.vscode/.gitignore @@ -0,0 +1,2 @@ +settings.json +launch.json diff --git a/.vscode/tasks.json b/.vscode/tasks.json new file mode 100644 index 0000000000000..fe0a5963698e7 --- /dev/null +++ b/.vscode/tasks.json @@ -0,0 +1,31 @@ +{ + // See https://go.microsoft.com/fwlink/?LinkId=733558 + // for the documentation about the tasks.json format + "version": "2.0.0", + "tasks": [ + { + "label": "Build All Tests", + "type": "shell", + "command": "bazel build //test/...", + "group": { + "kind": "build", + "isDefault": true + } + }, + { + "label": "Run All Tests", + "type": "shell", + "command": "bazel test //test/...", + "group": { + "kind": "test", + "isDefault": true + } + }, + { + "label": "Refresh Compilation Database", + "type": "shell", + "command": "tools/vscode/refresh_compdb.sh", + "problemMatcher": [] + } + ] +} diff --git a/BUILD b/BUILD index 4dc2cadee42d0..8518272d537fb 100644 --- a/BUILD +++ b/BUILD @@ -1,6 +1,36 @@ +load( + "@envoy_build_config//:extensions_build_config.bzl", + "ADDITIONAL_VISIBILITY", +) + licenses(["notice"]) # Apache 2 exports_files([ "VERSION", ".clang-format", ]) + +# These two definitions exist to help reduce Envoy upstream core code depending on extensions. +# To avoid visibility problems, one can extend ADDITIONAL_VISIBILITY in source/extensions/extensions_build_config.bzl +# +# TODO(#9953) //test/config_test:__pkg__ should probably be split up and removed. +# TODO(#9953) the config fuzz tests should be moved somewhere local and //test/config_test and //test/server removed. +package_group( + name = "extension_config", + packages = [ + "//source/exe", + "//source/extensions/...", + "//test/config_test", + "//test/extensions/...", + "//test/server", + "//test/server/config_validation", + ] + ADDITIONAL_VISIBILITY, +) + +package_group( + name = "extension_library", + packages = [ + "//source/extensions/...", + "//test/extensions/...", + ] + ADDITIONAL_VISIBILITY, +) diff --git a/CODEOWNERS b/CODEOWNERS index 37e376e77e79d..3c6ccecfac91c 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -6,6 +6,9 @@ /api/ @envoyproxy/api-shepherds # access loggers /*/extensions/access_loggers/common @auni53 @zuercher +# compression extensions +/*/extensions/compression/common/compressor @rojkov @junr03 +/*/extensions/compression/gzip/compressor @rojkov @junr03 # csrf extension /*/extensions/filters/http/csrf @dschaller @mattklein123 # original_src http filter extension @@ -16,10 +19,13 @@ extensions/filters/common/original_src @snowp @klarose # dubbo_proxy extension /*/extensions/filters/network/dubbo_proxy @zyfjeff @lizan +# rocketmq_proxy extension +/*/extensions/filters/network/rocketmq_proxy @aaron-ai @lizhanhui @lizan # thrift_proxy extension -/*/extensions/filters/network/thrift_proxy @zuercher @brian-pane +/*/extensions/filters/network/thrift_proxy @zuercher @rgs1 # compressor used by http compression filters /*/extensions/filters/http/common/compressor @gsagula @rojkov @dio +/*/extensions/filters/http/compressor @rojkov @dio # jwt_authn http filter extension /*/extensions/filters/http/jwt_authn @qiwzhang @lizan # grpc_http1_reverse_bridge http filter extension @@ -30,6 +36,10 @@ extensions/filters/common/original_src @snowp @klarose /*/extensions/transport_sockets/alts @htuch @yangminzhu # tls transport socket extension /*/extensions/transport_sockets/tls @PiotrSikora @lizan +# proxy protocol socket extension +/*/extensions/transport_sockets/proxy_protocol @alyssawilk @wez470 +# common transport socket +/*/extensions/transport_sockets/common @alyssawilk @wez470 # sni_cluster extension /*/extensions/filters/network/sni_cluster @rshriram @lizan # sni_dynamic_forward_proxy extension @@ -62,15 +72,20 @@ extensions/filters/common/original_src @snowp @klarose /*/extensions/common/aws @lavignes @mattklein123 # adaptive concurrency limit extension. /*/extensions/filters/http/adaptive_concurrency @tonya11en @mattklein123 +# admission control extension. +/*/extensions/filters/http/admission_control @tonya11en @mattklein123 # http inspector /*/extensions/filters/listener/http_inspector @yxue @PiotrSikora @lizan # attribute context /*/extensions/filters/common/expr @kyessenov @yangminzhu @lizan # webassembly common extension /*/extensions/common/wasm @jplevyak @PiotrSikora @lizan +# common matcher +/*/extensions/common/matcher @mattklein123 @yangminzhu # common crypto extension /*/extensions/common/crypto @lizan @PiotrSikora @bdecoste /*/extensions/common/proxy_protocol @alyssawilk @wez470 +/*/extensions/common/sqlutils @cpakulski @dio /*/extensions/filters/http/grpc_http1_bridge @snowp @jose /*/extensions/filters/http/gzip @gsagula @dio /*/extensions/filters/http/fault @rshriram @alyssawilk @@ -86,6 +101,7 @@ extensions/filters/common/original_src @snowp @klarose /*/extensions/filters/listener/tls_inspector @piotrsikora @htuch /*/extensions/grpc_credentials/example @wozz @htuch /*/extensions/grpc_credentials/file_based_metadata @wozz @htuch +/*/extensions/internal_redirect @alyssawilk @penguingao /*/extensions/stat_sinks/dog_statsd @taiki45 @jmarantz /*/extensions/stat_sinks/hystrix @trabetti @jmarantz /*/extensions/stat_sinks/metrics_service @ramaraochavali @jmarantz @@ -108,3 +124,12 @@ extensions/filters/common/original_src @snowp @klarose /*/extensions/filters/network/local_ratelimit @mattklein123 @junr03 /*/extensions/filters/http/aws_request_signing @rgs1 @derekargueta @mattklein123 @marcomagdy /*/extensions/filters/http/aws_lambda @mattklein123 @marcomagdy @lavignes +# Compression +/*/extensions/compression/common @junr03 @rojkov +/*/extensions/compression/gzip @junr03 @rojkov +/*/extensions/filters/http/decompressor @rojkov @dio +# Core upstream code +extensions/upstreams/http @alyssawilk @snowp @mattklein123 +extensions/upstreams/http/http @alyssawilk @snowp @mattklein123 +extensions/upstreams/http/tcp @alyssawilk @mattklein123 +extensions/upstreams/http/default @alyssawilk @snowp @mattklein123 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ffd804f25b816..7a35daf39100d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -21,6 +21,23 @@ maximize the chances of your PR being merged. * See [STYLE.md](STYLE.md) +# Inclusive language policy + +The Envoy community has an explicit goal to be inclusive to all. As such, all PRs must adhere to the +following guidelines for all code, APIs, and documentation: + +* The following words and phrases are not allowed: + * *Whitelist*: use allowlist instead. + * *Blacklist*: use denylist or blocklist instead. + * *Master*: use primary instead. + * *Slave*: use secondary or replica instead. +* Documentation should be written in an inclusive style. The [Google developer + documentation](https://developers.google.com/style/inclusive-documentation) contains an excellent + reference on this topic. +* The above policy is not considered definitive and may be amended in the future as industry best + practices evolve. Additional comments on this topic may be provided by maintainers during code + review. + # Breaking change policy Both API and implementation stability are important to Envoy. Since the API is consumed by clients @@ -89,7 +106,10 @@ versioning guidelines: open it. * Any PR that changes user-facing behavior **must** have associated documentation in [docs](docs) as well as [release notes](docs/root/version_history/current.rst). API changes should be documented - inline with protos as per the [API contribution guidelines](api/CONTRIBUTING.md). + inline with protos as per the [API contribution guidelines](api/CONTRIBUTING.md). If a change applies + to multiple sections of the release notes, it should be noted in the first (most important) section + that applies. For instance, a bug fix that introduces incompatible behavior should be noted in + `Incompatible Behavior Changes` but not in `Bug Fixes`. * All code comments and documentation are expected to have proper English grammar and punctuation. If you are not a fluent English speaker (or a bad writer ;-)) please let us know and we will try to find some help but there are no guarantees. @@ -97,8 +117,15 @@ versioning guidelines: colon. Examples: * "docs: fix grammar error" * "http conn man: add new feature" +* Your PR commit message will be used as the commit message when your PR is merged. You should + update this field if your PR diverges during review. * Your PR description should have details on what the PR does. If it fixes an existing issue it should end with "Fixes #XXX". +* If your PR is co-authored or based on an earlier PR from another contributor, + please attribute them with `Co-authored-by: name `. See + GitHub's [multiple author + guidance](https://help.github.com/en/github/committing-changes-to-your-project/creating-a-commit-with-multiple-authors) + for further details. * When all of the tests are passing and all other conditions described herein are satisfied, a maintainer will be assigned to review and merge the PR. * Once you submit a PR, *please do not rebase it*. It's much easier to review if subsequent commits @@ -123,10 +150,14 @@ versioning guidelines: # Runtime guarding -Some high risk changes in Envoy are deemed worthy of runtime guarding. Instead of just replacing +Some changes in Envoy are deemed worthy of runtime guarding. Instead of just replacing old code with new code, both code paths are supported for between one Envoy release (if it is guarded due to performance concerns) and a full deprecation cycle (if it is a high risk behavioral -change). +change). Generally as a community we try to guard both high risk changes (major +refactors such as replacing Envoy's buffer implementation) and most user-visible +non-config-guarded changes to protocol processing (for example additions or changes to HTTP headers or +how HTTP is serialized out) for non-alpha features. Feel free to tag @envoyproxy/maintainers +if you aren't sure if a given change merits runtime guarding. The canonical way to runtime guard a feature is ``` @@ -154,9 +185,10 @@ time. Runtime guarded features may either set true (running the new code by default) in the initial PR, after a testing interval, or during the next release cycle, at the PR author's and reviewing maintainer's discretion. Generally all runtime guarded features will be set true when a -release is cut, and the old code path will be deprecated at that time. Runtime features -are set true by default by inclusion in -[source/common/runtime/runtime_features.h](https://github.com/envoyproxy/envoy/blob/master/source/common/runtime/runtime_features.h) +release is cut. Old code paths for refactors can be cleaned up after a release and there has been +some production run time. Old code for behavioral changes will be deprecated after six months. +Runtime features are set true by default by inclusion in +[source/common/runtime/runtime_features.cc](https://github.com/envoyproxy/envoy/blob/master/source/common/runtime/runtime_features.cc) There are four suggested options for testing new runtime features: @@ -166,7 +198,7 @@ There are four suggested options for testing new runtime features: GetParam() as outlined in (1). 3. Set up integration tests with custom runtime defaults as documented in the [integration test README](https://github.com/envoyproxy/envoy/blob/master/test/integration/README.md) -4. Run a given unit test with the new runtime value explicitly set true as done +4. Run a given unit test with the new runtime value explicitly set true or false as done for [runtime_flag_override_test](https://github.com/envoyproxy/envoy/blob/master/test/common/runtime/BUILD) Runtime code is held to the same standard as regular Envoy code, so both the old @@ -190,14 +222,36 @@ and false. organization specific shortcuts into the code. * If there is a question on who should review a PR please discuss in Slack. * Anyone is welcome to review any PR that they want, whether they are a maintainer or not. +* Please make sure that the PR title, commit message, and description are updated if the PR changes + significantly during review. * Please **clean up the title and body** before merging. By default, GitHub fills the squash merge title with the original title, and the commit body with every individual commit from the PR. The maintainer doing the merge should make sure the title follows the guidelines above and should - overwrite the body with the original extended description from the PR (cleaning it up if necessary) + overwrite the body with the original commit message from the PR (cleaning it up if necessary) while preserving the PR author's final DCO sign-off. * If a PR includes a deprecation/breaking change, notification should be sent to the [envoy-announce](https://groups.google.com/forum/#!forum/envoy-announce) email list. +# Adding new extensions + +For developers adding a new extension, one can take an existing extension as the starting point. + +Extension configuration should be located in a directory structure like +`api/envoy/extensions/area/plugin/`, for example `api/envoy/extensions/access_loggers/file/` + +The code for the extension should be located under the equivalent +`source/extensions/area/plugin`, and include an *envoy_cc_extension* with the +configuration and tagged with the appropriate security posture, and an +*envoy_cc_library* with the code. More details on how to add a new extension +API can be found [here](api/STYLE.md#adding-an-extension-configuration-to-the-api): + +Other changes will likely include + + * Editing [source/extensions/extensions_build_config.bzl](source/extensions/extensions_build_config.bzl) to include the new extensions + * Editing [docs/root/api-v3/config/config.rst](docs/root/api-v3/config/config.rst) to add area/area + * Adding `docs/root/api-v3/config/area/area.rst` to add a table of contents for the API docs + * Adding `source/extensions/area/well_known_names.h` for registered plugins + # DCO: Sign your work Envoy ships commit hooks that allow you to auto-generate the DCO signoff line if diff --git a/DEPRECATED.md b/DEPRECATED.md index 1b2962adcb975..a82576c77ffaf 100644 --- a/DEPRECATED.md +++ b/DEPRECATED.md @@ -1,3 +1,4 @@ # DEPRECATED -The [deprecated log](https://www.envoyproxy.io/docs/envoy/latest/intro/deprecated) can be found in the official Envoy developer documentation. +The [deprecated log](https://www.envoyproxy.io/docs/envoy/latest/version_history/version_history) +for each version can be found in the official Envoy developer documentation. diff --git a/GOVERNANCE.md b/GOVERNANCE.md index fc59ba4702477..ce60a6e82ca7e 100644 --- a/GOVERNANCE.md +++ b/GOVERNANCE.md @@ -65,8 +65,8 @@ questions and do all reviews, but it is their responsibility to make sure that everything is being actively covered by someone. * The on-call rotation is tracked at Opsgenie. The calendar is visible -[here](https://calendar.google.com/calendar/embed?src=ms6efr2erlvum9aolnvg1688cd3mu85e%40import.calendar.google.com&ctz=America%2FNew_York) -or you can subscribe to the iCal feed [here](https://app.opsgenie.com/webcal/getRecentSchedule?webcalToken=75f2990470ca21de1033ecf4586bea1e40bae32bf3c39e2289f6186da1904ee0&scheduleId=a3505963-c064-4c97-8865-947dfcb06060) +[here](https://calendar.google.com/calendar/embed?src=d6glc0l5rc3v235q9l2j29dgovh3dn48%40import.calendar.google.com&ctz=America%2FNew_York) +or you can subscribe to the iCal feed [here](webcal://kubernetes.app.opsgenie.com/webapi/webcal/getRecentSchedule?webcalToken=39dd1a892faa8d0d689f889b9d09ae787355ddff894396546726a5a02bac5b26&scheduleId=a3505963-c064-4c97-8865-947dfcb06060) ## Cutting a release @@ -88,18 +88,20 @@ or you can subscribe to the iCal feed [here](https://app.opsgenie.com/webcal/get * Switch the [VERSION](VERSION) from a "dev" variant to a final variant. E.g., "1.6.0-dev" to "1.6.0". * Get a review and merge. +* Wait for tests to pass on [master](https://dev.azure.com/cncf/envoy/_build). * Create a [tagged release](https://github.com/envoyproxy/envoy/releases). The release should start with "v" and be followed by the version number. E.g., "v1.6.0". **This must match the [VERSION](VERSION).** -* Create a branch from the tagged release, e.g. "release/v1.6". It will be used for the +* From the envoy [landing page](https://github.com/envoyproxy/envoy) use the branch drop-down to create a branch + from the tagged release, e.g. "release/v1.6". It will be used for the [stable releases](RELEASES.md#stable-releases). * Monitor the AZP tag build to make sure that the final docker images get pushed along with the final docs. The final documentation will end up in the [envoyproxy.github.io repository](https://github.com/envoyproxy/envoyproxy.github.io/tree/master/docs/envoy). * Update the website ([example PR](https://github.com/envoyproxy/envoyproxy.github.io/pull/148)) for the new release. * Craft a witty/uplifting email and send it to all the email aliases including envoy-announce@. -* If possible post on Twitter (either have Matt do it or contact caniszczyk@ on Slack and have the - Envoy account post). +* Make sure we tweet the new release: either have Matt do it or email social@cncf.io and ask them to do an Envoy account + post. * Do a new PR to setup the next version * Update [VERSION](VERSION) to the next development release. E.g., "1.7.0-dev". * `git mv docs/root/version_history/current.rst docs/root/version_history/v1.6.0.rst`, filling in the previous @@ -112,8 +114,24 @@ or you can subscribe to the iCal feed [here](https://app.opsgenie.com/webcal/get 1.7.0 (Pending) =============== -Changes -------- +Incompatible Behavior Changes +----------------------------- +*Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required* + +Minor Behavior Changes +---------------------- +*Changes that may cause incompatibilities for some users, but should not for most* + +Bug Fixes +--------- +*Changes expected to improve the state of the world and are unlikely to have negative effects* + +Removed Config or Runtime +------------------------- +*Normally occurs at the end of the* :ref:`deprecation period ` + +New Features +------------ Deprecated ---------- diff --git a/ISSUES.md b/ISSUES.md new file mode 100644 index 0000000000000..70ad1b0c2b151 --- /dev/null +++ b/ISSUES.md @@ -0,0 +1,54 @@ +**If you are reporting *any* crash or *any* potential security issue, *do not* +open an issue in this repo. Please report the issue via emailing +envoy-security@googlegroups.com where the issue will be triaged appropriately.** + +**Issue Template** + +*Title*: *One line description* + +*Description*: +>Describe the issue. Please be detailed. If a feature request, please +describe the desired behaviour, what scenario it enables and how it +would be used. + +[optional *Relevant Links*:] +>Any extra documentation required to understand the issue. + +**Bug Template** + +*Title*: *One line description* + +*Description*: +>What issue is being seen? Describe what should be happening instead of +the bug, for example: Envoy should not crash, the expected value isn't +returned, etc. + +*Repro steps*: +> Include sample requests, environment, etc. All data and inputs +required to reproduce the bug. + +>**Note**: The [Envoy_collect tool](https://github.com/envoyproxy/envoy/blob/master/tools/envoy_collect/README.md) +gathers a tarball with debug logs, config and the following admin +endpoints: /stats, /clusters and /server_info. Please note if there are +privacy concerns, sanitize the data prior to sharing the tarball/pasting. + +*Admin and Stats Output*: +>Include the admin output for the following endpoints: /stats, +/clusters, /routes, /server_info. For more information, refer to the +[admin endpoint documentation.](https://www.envoyproxy.io/docs/envoy/latest/operations/admin) + +>**Note**: If there are privacy concerns, sanitize the data prior to +sharing. + +*Config*: +>Include the config used to configure Envoy. + +*Logs*: +>Include the access logs and the Envoy logs. + +>**Note**: If there are privacy concerns, sanitize the data prior to +sharing. + +*Call Stack*: +> If the Envoy binary is crashing, a call stack is **required**. +Please refer to the [Bazel Stack trace documentation](https://github.com/envoyproxy/envoy/tree/master/bazel#stack-trace-symbol-resolution). diff --git a/ISSUE_TEMPLATE.md b/ISSUE_TEMPLATE.md index 8d87611c68e8a..e38a93abbb696 100644 --- a/ISSUE_TEMPLATE.md +++ b/ISSUE_TEMPLATE.md @@ -1,56 +1,12 @@ -**WARNING: If you want to report crashes, leaking of sensitive information, -and/or other security issues, please consider -[reporting them using appropriate channels](https://github.com/envoyproxy/envoy#reporting-security-vulnerabilities).** +!!!ATTENTION!!! -**Issue Template** +If you are reporting *any* crash or *any* potential security issue, *do not* +open an issue in this repo. Please report the issue via emailing +envoy-security@googlegroups.com where the issue will be triaged appropriately. +Thank you in advance for helping to keep Envoy secure. -*Title*: *One line description* +!!!ATTENTION!!! -*Description*: ->Describe the issue. Please be detailed. If a feature request, please -describe the desired behaviour, what scenario it enables and how it -would be used. - -[optional *Relevant Links*:] ->Any extra documentation required to understand the issue. - - - -**Bug Template** - -*Title*: *One line description* - -*Description*: ->What issue is being seen? Describe what should be happening instead of -the bug, for example: Envoy should not crash, the expected value isn't -returned, etc. - -*Repro steps*: -> Include sample requests, environment, etc. All data and inputs -required to reproduce the bug. - ->**Note**: The [Envoy_collect tool](https://github.com/envoyproxy/envoy/blob/master/tools/envoy_collect/README.md) -gathers a tarball with debug logs, config and the following admin -endpoints: /stats, /clusters and /server_info. Please note if there are -privacy concerns, sanitize the data prior to sharing the tarball/pasting. - -*Admin and Stats Output*: ->Include the admin output for the following endpoints: /stats, -/clusters, /routes, /server_info. For more information, refer to the -[admin endpoint documentation.](https://www.envoyproxy.io/docs/envoy/latest/operations/admin) - ->**Note**: If there are privacy concerns, sanitize the data prior to -sharing. - -*Config*: ->Include the config used to configure Envoy. - -*Logs*: ->Include the access logs and the Envoy logs. - ->**Note**: If there are privacy concerns, sanitize the data prior to -sharing. - -*Call Stack*: -> If the Envoy binary is crashing, a call stack is **required**. -Please refer to the [Bazel Stack trace documentation](https://github.com/envoyproxy/envoy/tree/master/bazel#stack-trace-symbol-resolution). +If this is not a crash or potential security issue please use +[ISSUES.md](https://github.com/envoyproxy/envoy/blob/master/ISSUES.md) as a +template. diff --git a/PULL_REQUESTS.md b/PULL_REQUESTS.md index deb77bb326fd4..9b3d5cd043ba5 100644 --- a/PULL_REQUESTS.md +++ b/PULL_REQUESTS.md @@ -12,13 +12,21 @@ explaining the overall change. Both the component and the explanation must be lo * router:add x-envoy-overloaded header * tls: add support for specifying TLS session ticket keys -### Description +### Commit Message -The description field should include a more verbose explanation of what this PR -does. If this PR causes a change in behavior it should document the behavior -before and after If fixing a bug, please describe what the original issue is and -how the change resolves it. If it is configuration controlled, it should note -how the feature is enabled etc... +The commit message field should include an explanation of what this PR +does. This will be used as the final commit message that maintainers will use to +populate the commit message when merging. If this PR causes a change in behavior +it should document the behavior before and after. If fixing a bug, please +describe what the original issue is and how the change resolves it. If it is +configuration controlled, it should note how the feature is enabled etc... + + +### Additional Description + +The additional description field should include information of what this PR does +that may be out of scope for a commit message. This could include additional +information or context useful to reviewers. ### Risk @@ -51,6 +59,9 @@ If there are documentation changes, please include a brief description of what t changes may be in [docs/root](docs/root) and/or inline with the API protos. Please write in N/A if there were no documentation changes. +Any PRs with structural changes to the dataplane should also update the [Life of a +Request](docs/root/intro/life_of_a_request.md) documentation as appropriate. + ### Release notes If this change is user impacting OR extension developer impacting (filter API, etc.) you **must** @@ -59,6 +70,16 @@ current version. Please include any relevant links. Each release note should be relevant subsystem in **alphabetical order** (see existing examples as a guide) and include links to relevant parts of the documentation. Thank you! Please write in N/A if there are no release notes. +### Runtime guard + +If this PR has a user-visible behavioral change, or otherwise falls under the +guidelines for runtime guarding in the [contributing doc](CONTRIBUTING.md) +it should have a runtime guard, which should be documented both in the release +notes and here in the PR description. + +For new feature additions guarded by configs, no-op refactors, docs changes etc. +this field can be disregarded and/or removed. + ### Issues If this PR fixes an outstanding issue, please add a line of the form: diff --git a/PULL_REQUEST_TEMPLATE.md b/PULL_REQUEST_TEMPLATE.md index f8bb15ff43e47..d72e0564dbf1f 100644 --- a/PULL_REQUEST_TEMPLATE.md +++ b/PULL_REQUEST_TEMPLATE.md @@ -1,10 +1,12 @@ For an explanation of how to fill out the fields, please see the relevant section in [PULL_REQUESTS.md](https://github.com/envoyproxy/envoy/blob/master/PULL_REQUESTS.md) -Description: +Commit Message: +Additional Description: Risk Level: Testing: Docs Changes: Release Notes: +[Optional Runtime guard:] [Optional Fixes #Issue] [Optional Deprecated:] diff --git a/README.md b/README.md index 8c88a1fffa427..290119f82e236 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,6 @@ involved and how Envoy plays a role, read the CNCF [![Azure Pipelines](https://dev.azure.com/cncf/envoy/_apis/build/status/11?branchName=master)](https://dev.azure.com/cncf/envoy/_build/latest?definitionId=11&branchName=master) [![CircleCI](https://circleci.com/gh/envoyproxy/envoy/tree/master.svg?style=shield)](https://circleci.com/gh/envoyproxy/envoy/tree/master) [![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/envoy.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:envoy) -[![fuzzit](https://app.fuzzit.dev/badge?org_id=envoyproxy)](https://app.fuzzit.dev/orgs/envoyproxy/dashboard) [![Jenkins](https://img.shields.io/jenkins/s/https/powerci.osuosl.org/job/build-envoy-master/badge/icon/.svg?label=ppc64le%20build)](http://powerci.osuosl.org/job/build-envoy-master/) ## Documentation diff --git a/RELEASES.md b/RELEASES.md index d76b3fe7981f9..3ca3f28c376c3 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -49,6 +49,8 @@ stable releases and sending announcements about them. This role is rotating on a | Quarter | Release manager | |:-------:|:----------------------------:| | 2020 Q1 | Piotr Sikora ([PiotrSikora]) | +| 2020 Q2 | Piotr Sikora ([PiotrSikora]) | +| 2020 Q3 | Yuchen Dai ([lambdai]) | ## Release schedule @@ -60,8 +62,8 @@ deadline of 3 weeks. |:-------:|:----------:|:----------:|:----------:|:-----------:| | 1.12.0 | 2019/09/30 | 2019/10/31 | +31 days | 2020/10/31 | | 1.13.0 | 2019/12/31 | 2020/01/20 | +20 days | 2021/01/20 | -| 1.14.0 | 2020/03/31 | | | | -| 1.15.0 | 2020/06/30 | | | | +| 1.14.0 | 2020/03/31 | 2020/04/08 | +8 days | 2021/04/08 | +| 1.15.0 | 2020/06/30 | 2020/07/07 | +7 days | 2021/07/07 | | 1.16.0 | 2020/09/30 | | | | | 1.17.0 | 2020/12/31 | | | | diff --git a/SECURITY.md b/SECURITY.md index 98bf6bffb5c2a..3483408e7ea88 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -72,18 +72,18 @@ severity. If a vulnerability does not affect any point release but only master, additional caveats apply: -* If the issue is detected and a fix is available within 5 days of the introduction of the - vulnerability, the fix will be publicly reviewed and landed on master. A courtesy e-mail will be - sent to envoy-users@googlegroups.com, envoy-dev@googlegroups.com, - envoy-security-announce@googlegroups.com and cncf-envoy-distributors-announce@lists.cncf.io if - the severity is medium or greater. -* If the vulnerability has been in existence for more than 5 days, we will activate the security - release process for any medium or higher vulnerabilities. Low severity vulnerabilities will still - be merged onto master as soon as a fix is available. - -We advise distributors and operators working from the master branch to allow at least 3 days soak +* If the issue is detected and a fix is available within 7 days of the introduction of the + vulnerability, or the issue is deemed a low severity vulnerability by the Envoy maintainer and + security teams, the fix will be publicly reviewed and landed on master. If the severity is at least + medium or at maintainer discretion a courtesy e-mail will be sent to envoy-users@googlegroups.com, + envoy-dev@googlegroups.com, envoy-security-announce@googlegroups.com and + cncf-envoy-distributors-announce@lists.cncf.io. +* If the vulnerability has been in existence for more than 7 days and is medium or higher, we will + activate the security release process. + +We advise distributors and operators working from the master branch to allow at least 5 days soak time after cutting a binary release before distribution or rollout, to allow time for our fuzzers to -detect issues during their execution on ClusterFuzz. A soak period of 5 days provides an even stronger +detect issues during their execution on ClusterFuzz. A soak period of 7 days provides an even stronger guarantee, since we will invoke the security release process for medium or higher severity issues for these older bugs. @@ -118,12 +118,39 @@ score](https://www.first.org/cvss/specification-document#i5)) the Fix Team can d release process down in the face of holidays, developer bandwidth, etc. These decisions must be discussed on the envoy-security mailing list. -A two week window will be provided to members of the private distributor list from candidate patch +A three week window will be provided to members of the private distributor list from candidate patch availability until the security release date. It is expected that distributors will normally be able to perform a release within this time window. If there are exceptional circumstances, the Envoy security team will raise this window to four weeks. The release window will be reduced if the security issue is public or embargo is broken. +We will endeavor not to overlap this three week window with or place it adjacent to major corporate +holiday periods or end-of-quarter (e.g. impacting downstream Istio releases), where possible. + +### Fix and disclosure SLOs + +* All reports to envoy-security@googlegroups.com will be triaged and have an + initial response within 1 business day. + +* Privately disclosed issues will be fixed or publicly disclosed within 90 days + by the Envoy security team. In exceptional circumstances we reserve the right + to work with the discloser to coordinate on an extension, but this will be + rarely used. + +* Any issue discovered by the Envoy security team and raised in our private bug + tracker will be converted to a public issue within 90 days. We will regularly + audit these issues to ensure that no major vulnerability (from the perspective + of the threat model) is accidentally leaked. + +* Fuzz bugs are subject to a 90 day disclosure deadline. + +* Three weeks notice will be provided to private distributors from patch + availability until the embargo deadline. + +* Public zero days will be fixed ASAP, but there is no SLO for this, since this + will depend on the severity and impact to the organizations backing the Envoy + security team. + ### Fix Disclosure Process With the fix development underway, the Fix Lead needs to come up with an overall communication plan @@ -148,7 +175,8 @@ patches, understand exact mitigation steps, etc. should be reserved for remotely exploitable or privilege escalation issues. Otherwise, this process can be skipped. - The Fix Lead will email the patches to cncf-envoy-distributors-announce@lists.cncf.io so - distributors can prepare builds to be available to users on the day of the issue's announcement. + distributors can prepare builds to be available to users on the day of the issue's announcement. Any + patches against main will be updated and resent weekly. Distributors should read about the [Private Distributors List](#private-distributors-list) to find out the requirements for being added to this list. - **What if a vendor breaks embargo?** The PST will assess the damage. The Fix Lead will make the @@ -302,7 +330,7 @@ use of Envoy should: have a way to privately stage and validate your updates that does not violate the embargo. 7. Be willing to [contribute back](#contributing-back) as outlined above. -8. Be able to perform a security release of your product within a two week window from candidate fix +8. Be able to perform a security release of your product within a three week window from candidate fix patch availability. 9. Have someone already on the list vouch for the person requesting membership on behalf of your distribution. @@ -382,7 +410,7 @@ We accept. We are definitely willing to help! -> 8. Be able to perform a security release of your product within a two week window from candidate fix +> 8. Be able to perform a security release of your product within a three week window from candidate fix patch availability. We affirm we can spin out new security releases within a 2 week window. diff --git a/STYLE.md b/STYLE.md index 54e513ef3435b..7965f90f7236d 100644 --- a/STYLE.md +++ b/STYLE.md @@ -16,6 +16,11 @@ * Please see [REPO_LAYOUT.md](REPO_LAYOUT.md). +# Documentation + +* If you are modifying the data plane structually, please keep the [Life of a + Request](docs/root/intro/life_of_a_request.md) documentation up-to-date. + # Deviations from Google C++ style guidelines * Exceptions are allowed and encouraged where appropriate. When using exceptions, do not add @@ -46,7 +51,7 @@ * Regular pointers (e.g. `int* foo`) should not be type aliased. * `absl::optional> is type aliased: * `using FooOptRef = absl::optional>;` - * `using FooOptConstRef = absl::optional>;` + * `using FooOptConstRef = absl::optional>;` * If move semantics are intended, prefer specifying function arguments with `&&`. E.g., `void onHeaders(Http::HeaderMapPtr&& headers, ...)`. The rationale for this is that it forces the caller to specify `std::move(...)` or pass a temporary and makes the intention at diff --git a/VERSION b/VERSION index 9a4866bbcedef..1f0d2f335194a 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.15.0-dev +1.16.0-dev diff --git a/WORKSPACE b/WORKSPACE index ef120bc53d4ff..a96cba5013021 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -12,6 +12,10 @@ load("//bazel:repositories.bzl", "envoy_dependencies") envoy_dependencies() +load("//bazel:repositories_extra.bzl", "envoy_dependencies_extra") + +envoy_dependencies_extra() + load("//bazel:dependency_imports.bzl", "envoy_dependency_imports") envoy_dependency_imports() diff --git a/api/API_VERSIONING.md b/api/API_VERSIONING.md index 4684ed3e86e90..25e80aaa8407d 100644 --- a/api/API_VERSIONING.md +++ b/api/API_VERSIONING.md @@ -76,16 +76,28 @@ implementations within a major version should set explicit values for these fiel # API lifecycle -The API lifecycle follows a calendar clock. At the end of Q4 each year, a major API version -increment may occur for any Envoy API package, in concert with the quarterly Envoy release. - +A new major version is a significant event in the xDS API ecosystem, inevitably requiring support +from clients (Envoy, gRPC) and a large number of control planes, ranging from simple in-house custom +management servers to xDS-as-a-service offerings run by vendors. The [xDS API +shepherds](https://github.com/orgs/envoyproxy/teams/api-shepherds) will make the decision to add a +new major version subject to the following constraints: +* There exists sufficient technical debt in the xDS APIs in the existing supported major version + to justify the cost burden for xDS client/server implementations. +* At least one year has elapsed since the last major version was cut. +* Consultation with the Envoy community (via Envoy community call, `#xds` channel on Slack), as + well as gRPC OSS community (via reaching out to language maintainers) is made. This is not a veto + process; the API shepherds retain the right to move forward with a new major API version after + weighing this input with the first two considerations above. + +Following the release of a new major version, the API lifecycle follows a deprecation clock. Envoy will support at most three major versions of any API package at all times: * The current stable major version, e.g. v3. * The previous stable major version, e.g. v2. This is needed to ensure that we provide at least 1 year for a supported major version to sunset. By supporting two stable major versions simultaneously, this makes it easier to coordinate control plane and Envoy - rollouts as well. This previous stable major version will be supported for 1 - year after the introduction of the new current stable major version. + rollouts as well. This previous stable major version will be supported for exactly 1 + year after the introduction of the new current stable major version, after which it will be + removed from the Envoy implementation. * Optionally, the next experimental alpha major version, e.g. v4alpha. This is a release candidate for the next stable major version. This is only generated when the current stable major version requires a breaking change at the next cycle, e.g. a deprecation or field rename. This release @@ -94,16 +106,27 @@ Envoy will support at most three major versions of any API package at all times: current stable major version, making use of annotations such as `deprecated = true`. This is not a human editable artifact. -An example of how this might play out is that at the end of September in 2020, we will freeze -`envoy.config.bootstrap.v4alpha` and this package will become the current stable major version +An example of how this might play out is that at the end of December in 2020, if a v4 major version +is justified, we might freeze +`envoy.config.bootstrap.v4alpha` and this package would then become the current stable major version `envoy.config.bootstrap.v4`. The `envoy.config.bootstrap.v3` package will become the previous stable major version and support for `envoy.config.bootstrap.v2` will be dropped from the Envoy implementation. Note that some transitively referenced package, e.g. `envoy.config.filter.network.foo.v2` may remain at version 2 during this release, if no changes were -made to the referenced package. +made to the referenced package. If no major version is justified at this point, the decision to cut +v4 might occur at some point in 2021 or beyond, however v2 support will still be removed at the end +of 2020. + +The implication of this API lifecycle and clock is that any deprecated feature in the Envoy API will +retain implementation support for at least 1-2 years. -The implication of this API lifecycle and clock is that any deprecated feature in the Envoy API will retain -implementation support for 1-2 years (1.5 years on average). +We are currently working on a strategy to introduce minor versions +(https://github.com/envoyproxy/envoy/issues/8416). This will bump the xDS API minor version on every +deprecation and field introduction/modification. This will provide an opportunity for the control +plane to condition on client and major/minor API version support. Currently under discussion, but +not finalized will be the sunsetting of Envoy client support for deprecated features after a year +of support within a major version. Please post to https://github.com/envoyproxy/envoy/issues/8416 +any thoughts around this. # New API features @@ -152,10 +175,27 @@ candidate for this class of change. The following steps are required: 3. The old message/enum/field/enum value should be annotated as deprecated. 4. At the next major version, `protoxform` will remove the deprecated version automatically. -This approach ensures that API major version releases are predictable and mechanical, and has the -bulk of the Envoy code and test changes owned by feature developers, rather than the API owners. -There will be no major `vN` initiative to address technical debt beyond that enabled by the above -process. +This make-before-break approach ensures that API major version releases are predictable and +mechanical, and has the bulk of the Envoy code and test changes owned by feature developers, rather +than the API owners. There will be no major `vN` initiative to address technical debt beyond that +enabled by the above process. + +# Client features + +Not all clients will support all fields and features in a given major API version. In general, it is +preferable to use Protobuf semantics to support this, for example: +* Ignoring a field's contents is sufficient to indicate that the support is missing in a client. +* Setting both deprecated and the new method for expressing a field if support for a range of + clients is desired (where this does not involve huge overhead or gymnastics). + +This approach does not always work, for example: +* A route matcher conjunct condition should not be ignored just because the client is missing the + ability to implement the match; this might result in route policy bypass. +* A client may expect the server to provide a response in a certain format or encoding, for example + a JSON encoded `Struct`-in-`Any` representation of opaque extension configuration. + +For this purpose, we have [client +features](https://www.envoyproxy.io/docs/envoy/latest/api/client_features). # One Definition Rule (ODR) diff --git a/api/BUILD b/api/BUILD index 0dafe82267e98..99bd1b119c62a 100644 --- a/api/BUILD +++ b/api/BUILD @@ -81,7 +81,6 @@ proto_library( "//envoy/config/filter/network/zookeeper_proxy/v1alpha1:pkg", "//envoy/config/filter/thrift/rate_limit/v2alpha1:pkg", "//envoy/config/filter/thrift/router/v2alpha1:pkg", - "//envoy/config/filter/udp/dns_filter/v2alpha:pkg", "//envoy/config/filter/udp/udp_proxy/v2alpha:pkg", "//envoy/config/grpc_credential/v2alpha:pkg", "//envoy/config/health_checker/redis/v2:pkg", @@ -101,7 +100,6 @@ proto_library( "//envoy/config/transport_socket/alts/v2alpha:pkg", "//envoy/config/transport_socket/raw_buffer/v2:pkg", "//envoy/config/transport_socket/tap/v2alpha:pkg", - "//envoy/config/wasm/v2alpha:pkg", "//envoy/data/accesslog/v2:pkg", "//envoy/data/cluster/v2alpha:pkg", "//envoy/data/core/v2alpha:pkg", @@ -132,10 +130,10 @@ proto_library( "//envoy/config/accesslog/v3:pkg", "//envoy/config/bootstrap/v3:pkg", "//envoy/config/cluster/v3:pkg", + "//envoy/config/common/matcher/v3:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/endpoint/v3:pkg", "//envoy/config/filter/thrift/router/v2alpha1:pkg", - "//envoy/config/filter/udp/udp_proxy/v2alpha:pkg", "//envoy/config/grpc_credential/v3:pkg", "//envoy/config/health_checker/redis/v2:pkg", "//envoy/config/listener/v3:pkg", @@ -157,15 +155,18 @@ proto_library( "//envoy/data/tap/v3:pkg", "//envoy/extensions/access_loggers/file/v3:pkg", "//envoy/extensions/access_loggers/grpc/v3:pkg", + "//envoy/extensions/access_loggers/wasm/v3:pkg", "//envoy/extensions/clusters/aggregate/v3:pkg", "//envoy/extensions/clusters/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/clusters/redis/v3:pkg", "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/common/ratelimit/v3:pkg", "//envoy/extensions/common/tap/v3:pkg", - "//envoy/extensions/filter/udp/dns_filter/v3alpha:pkg", + "//envoy/extensions/compression/gzip/compressor/v3:pkg", + "//envoy/extensions/compression/gzip/decompressor/v3:pkg", "//envoy/extensions/filters/common/fault/v3:pkg", "//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg", + "//envoy/extensions/filters/http/admission_control/v3alpha:pkg", "//envoy/extensions/filters/http/aws_lambda/v3:pkg", "//envoy/extensions/filters/http/aws_request_signing/v3:pkg", "//envoy/extensions/filters/http/buffer/v3:pkg", @@ -173,6 +174,7 @@ proto_library( "//envoy/extensions/filters/http/compressor/v3:pkg", "//envoy/extensions/filters/http/cors/v3:pkg", "//envoy/extensions/filters/http/csrf/v3:pkg", + "//envoy/extensions/filters/http/decompressor/v3:pkg", "//envoy/extensions/filters/http/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/filters/http/dynamo/v3:pkg", "//envoy/extensions/filters/http/ext_authz/v3:pkg", @@ -195,6 +197,7 @@ proto_library( "//envoy/extensions/filters/http/router/v3:pkg", "//envoy/extensions/filters/http/squash/v3:pkg", "//envoy/extensions/filters/http/tap/v3:pkg", + "//envoy/extensions/filters/http/wasm/v3:pkg", "//envoy/extensions/filters/listener/http_inspector/v3:pkg", "//envoy/extensions/filters/listener/original_dst/v3:pkg", "//envoy/extensions/filters/listener/original_src/v3:pkg", @@ -215,18 +218,31 @@ proto_library( "//envoy/extensions/filters/network/ratelimit/v3:pkg", "//envoy/extensions/filters/network/rbac/v3:pkg", "//envoy/extensions/filters/network/redis_proxy/v3:pkg", + "//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg", "//envoy/extensions/filters/network/sni_cluster/v3:pkg", "//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg", "//envoy/extensions/filters/network/tcp_proxy/v3:pkg", "//envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3:pkg", "//envoy/extensions/filters/network/thrift_proxy/v3:pkg", + "//envoy/extensions/filters/network/wasm/v3:pkg", "//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg", + "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg", + "//envoy/extensions/filters/udp/udp_proxy/v3:pkg", + "//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg", + "//envoy/extensions/internal_redirect/previous_routes/v3:pkg", + "//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg", + "//envoy/extensions/network/socket_interface/v3:pkg", "//envoy/extensions/retry/host/omit_host_metadata/v3:pkg", "//envoy/extensions/retry/priority/previous_priorities/v3:pkg", "//envoy/extensions/transport_sockets/alts/v3:pkg", + "//envoy/extensions/transport_sockets/proxy_protocol/v3:pkg", + "//envoy/extensions/transport_sockets/quic/v3:pkg", "//envoy/extensions/transport_sockets/raw_buffer/v3:pkg", "//envoy/extensions/transport_sockets/tap/v3:pkg", "//envoy/extensions/transport_sockets/tls/v3:pkg", + "//envoy/extensions/upstreams/http/generic/v3:pkg", + "//envoy/extensions/upstreams/http/http/v3:pkg", + "//envoy/extensions/upstreams/http/tcp/v3:pkg", "//envoy/extensions/wasm/v3:pkg", "//envoy/service/accesslog/v3:pkg", "//envoy/service/auth/v3:pkg", @@ -234,6 +250,7 @@ proto_library( "//envoy/service/discovery/v3:pkg", "//envoy/service/endpoint/v3:pkg", "//envoy/service/event_reporting/v3:pkg", + "//envoy/service/extension/v3:pkg", "//envoy/service/health/v3:pkg", "//envoy/service/listener/v3:pkg", "//envoy/service/load_stats/v3:pkg", diff --git a/api/CONTRIBUTING.md b/api/CONTRIBUTING.md index dc77573c683b1..773248f2e2ea6 100644 --- a/api/CONTRIBUTING.md +++ b/api/CONTRIBUTING.md @@ -26,6 +26,12 @@ The documentation can be built locally in the root of https://github.com/envoypr docs/build.sh ``` +To skip configuration examples validation: + +``` +SPHINX_SKIP_CONFIG_VALIDATION=true docs/build.sh +``` + Or to use a hermetic Docker container: ``` diff --git a/api/bazel/BUILD b/api/bazel/BUILD index 279c7c9e6a9b0..4b582bb8be3f7 100644 --- a/api/bazel/BUILD +++ b/api/bazel/BUILD @@ -1,7 +1,7 @@ -licenses(["notice"]) # Apache 2 - load("@io_bazel_rules_go//proto:compiler.bzl", "go_proto_compiler") +licenses(["notice"]) # Apache 2 + go_proto_compiler( name = "pgv_plugin_go", options = ["lang=go"], diff --git a/api/bazel/api_build_system.bzl b/api/bazel/api_build_system.bzl index 7e88ab2bf9e56..c0269d161f805 100644 --- a/api/bazel/api_build_system.bzl +++ b/api/bazel/api_build_system.bzl @@ -1,7 +1,8 @@ +load("@rules_cc//cc:defs.bzl", "cc_test") load("@com_envoyproxy_protoc_gen_validate//bazel:pgv_proto_library.bzl", "pgv_cc_proto_library") load("@com_github_grpc_grpc//bazel:cc_grpc_library.bzl", "cc_grpc_library") load("@com_google_protobuf//:protobuf.bzl", _py_proto_library = "py_proto_library") -load("@io_bazel_rules_go//proto:def.bzl", "go_grpc_library", "go_proto_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") load("@io_bazel_rules_go//go:def.bzl", "go_test") load("@rules_proto//proto:defs.bzl", "proto_library") load( @@ -79,6 +80,10 @@ def py_proto_library(name, deps = [], plugin = None): if name == "annotations_py_proto": proto_deps = proto_deps + [":http_py_proto"] + # checked.proto depends on syntax.proto, we have to add this dependency manually as well. + if name == "checked_py_proto": + proto_deps = proto_deps + [":syntax_py_proto"] + # py_proto_library does not support plugin as an argument yet at gRPC v1.25.0: # https://github.com/grpc/grpc/blob/v1.25.0/bazel/python_rules.bzl#L72. # plugin should also be passed in here when gRPC version is greater than v1.25.x. @@ -138,7 +143,7 @@ def api_cc_py_proto_library( _api_cc_grpc_library(name = cc_grpc_name, proto = relative_name, deps = cc_proto_deps) def api_cc_test(name, **kwargs): - native.cc_test( + cc_test( name = name, **kwargs ) @@ -171,13 +176,16 @@ def api_proto_package( if has_services: compilers = ["@io_bazel_rules_go//proto:go_grpc", "@envoy_api//bazel:pgv_plugin_go"] + # Because RBAC proro depends on googleapis syntax.proto and checked.proto, + # which share the same go proto library, it causes duplicative dependencies. + # Thus, we use depset().to_list() to remove duplicated depenencies. go_proto_library( name = name + _GO_PROTO_SUFFIX, compilers = compilers, importpath = _GO_IMPORTPATH_PREFIX + native.package_name(), proto = name, visibility = ["//visibility:public"], - deps = [_go_proto_mapping(dep) for dep in deps] + [ + deps = depset([_go_proto_mapping(dep) for dep in deps] + [ "@com_github_golang_protobuf//ptypes:go_default_library", "@com_github_golang_protobuf//ptypes/any:go_default_library", "@com_github_golang_protobuf//ptypes/duration:go_default_library", @@ -187,5 +195,5 @@ def api_proto_package( "@com_envoyproxy_protoc_gen_validate//validate:go_default_library", "@com_google_googleapis//google/api:annotations_go_proto", "@com_google_googleapis//google/rpc:status_go_proto", - ], + ]).to_list(), ) diff --git a/api/bazel/external_proto_deps.bzl b/api/bazel/external_proto_deps.bzl index 514093abef90a..659c7a72d73e0 100644 --- a/api/bazel/external_proto_deps.bzl +++ b/api/bazel/external_proto_deps.bzl @@ -9,6 +9,7 @@ # external dependencies. Since BUILD files are generated, this is the canonical # place to define this mapping. EXTERNAL_PROTO_IMPORT_BAZEL_DEP_MAP = { + "google/api/expr/v1alpha1/checked.proto": "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto", "google/api/expr/v1alpha1/syntax.proto": "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", "metrics.proto": "@prometheus_metrics_model//:client_model", "opencensus/proto/trace/v1/trace.proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto", @@ -17,6 +18,7 @@ EXTERNAL_PROTO_IMPORT_BAZEL_DEP_MAP = { # This maps from the Bazel proto_library target to the Go language binding target for external dependencies. EXTERNAL_PROTO_GO_BAZEL_DEP_MAP = { + "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto": "@com_google_googleapis//google/api/expr/v1alpha1:expr_go_proto", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto": "@com_google_googleapis//google/api/expr/v1alpha1:expr_go_proto", "@opencensus_proto//opencensus/proto/trace/v1:trace_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto_go", "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_and_config_proto_go", @@ -24,6 +26,7 @@ EXTERNAL_PROTO_GO_BAZEL_DEP_MAP = { # This maps from the Bazel proto_library target to the C++ language binding target for external dependencies. EXTERNAL_PROTO_CC_BAZEL_DEP_MAP = { + "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto": "@com_google_googleapis//google/api/expr/v1alpha1:checked_cc_proto", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto": "@com_google_googleapis//google/api/expr/v1alpha1:syntax_cc_proto", "@opencensus_proto//opencensus/proto/trace/v1:trace_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto_cc", "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto_cc", @@ -31,6 +34,7 @@ EXTERNAL_PROTO_CC_BAZEL_DEP_MAP = { # This maps from the Bazel proto_library target to the Python language binding target for external dependencies. EXTERNAL_PROTO_PY_BAZEL_DEP_MAP = { + "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto": "@com_google_googleapis//google/api/expr/v1alpha1:checked_py_proto", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto": "@com_google_googleapis//google/api/expr/v1alpha1:syntax_py_proto", "@opencensus_proto//opencensus/proto/trace/v1:trace_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto_py", "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto_py", diff --git a/api/bazel/repositories.bzl b/api/bazel/repositories.bzl index af1f11331d013..a64e733cf74a9 100644 --- a/api/bazel/repositories.bzl +++ b/api/bazel/repositories.bzl @@ -1,4 +1,3 @@ -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") load(":envoy_http_archive.bzl", "envoy_http_archive") load(":repository_locations.bzl", "REPOSITORY_LOCATIONS") diff --git a/api/bazel/repository_locations.bzl b/api/bazel/repository_locations.bzl index c275a8c658353..0a0379f7685e3 100644 --- a/api/bazel/repository_locations.bzl +++ b/api/bazel/repository_locations.bzl @@ -4,8 +4,8 @@ BAZEL_SKYLIB_SHA256 = "1dde365491125a3db70731e25658dfdd3bc5dbdfd11b840b3e987ecf0 OPENCENSUS_PROTO_GIT_SHA = "be218fb6bd674af7519b1850cdf8410d8cbd48e8" # Dec 20, 2019 OPENCENSUS_PROTO_SHA256 = "e3bbdc94375e86c0edfb2fc5851507e08a3f26ee725ffff7c5c0e73264bdfcde" -PGV_GIT_SHA = "ab56c3dd1cf9b516b62c5087e1ec1471bd63631e" # Mar 11, 2020 -PGV_SHA256 = "3be12077affd1ebf8787001f5fba545cc5f1b914964dab4e0cc77c43fba03b41" +PGV_GIT_SHA = "278964a8052f96a2f514add0298098f63fb7f47f" # June 9, 2020 +PGV_SHA256 = "e368733c9fb7f8489591ffaf269170d7658cc0cd1ee322b601512b769446d3c8" GOOGLEAPIS_GIT_SHA = "82944da21578a53b74e547774cf62ed31a05b841" # Dec 2, 2019 GOOGLEAPIS_SHA = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d1405" @@ -13,8 +13,8 @@ GOOGLEAPIS_SHA = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d14 PROMETHEUS_GIT_SHA = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" # Nov 17, 2017 PROMETHEUS_SHA = "783bdaf8ee0464b35ec0c8704871e1e72afa0005c3f3587f65d9d6694bf3911b" -UDPA_GIT_SHA = "e8cd3a4bb307e2c810cffff99f93e96e6d7fee85" # Mar 27, 2020 -UDPA_SHA256 = "1fd7857cb61daee7726fca8f4d55e4923774a8d00a53007a4093830dc0482685" +UDPA_GIT_SHA = "efcf912fb35470672231c7b7bef620f3d17f655a" # June 29, 2020 +UDPA_SHA256 = "0f8179fbe3d27b89a4c34b2fbd55832f3b27b6810ea9b03b36d18da2629cc871" ZIPKINAPI_RELEASE = "0.2.2" # Aug 23, 2019 ZIPKINAPI_SHA256 = "688c4fe170821dd589f36ec45aaadc03a618a40283bc1f97da8fa11686fc816b" @@ -33,7 +33,7 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/envoyproxy/protoc-gen-validate/archive/" + PGV_GIT_SHA + ".tar.gz"], ), com_google_googleapis = dict( - # TODO(dio): Consider writing a Skylark macro for importing Google API proto. + # TODO(dio): Consider writing a Starlark macro for importing Google API proto. sha256 = GOOGLEAPIS_SHA, strip_prefix = "googleapis-" + GOOGLEAPIS_GIT_SHA, urls = ["https://github.com/googleapis/googleapis/archive/" + GOOGLEAPIS_GIT_SHA + ".tar.gz"], diff --git a/api/envoy/admin/v3/config_dump.proto b/api/envoy/admin/v3/config_dump.proto index b3c3836a8cc00..73156697fdb21 100644 --- a/api/envoy/admin/v3/config_dump.proto +++ b/api/envoy/admin/v3/config_dump.proto @@ -30,9 +30,12 @@ message ConfigDump { // // * *bootstrap*: :ref:`BootstrapConfigDump ` // * *clusters*: :ref:`ClustersConfigDump ` + // * *endpoints*: :ref:`EndpointsConfigDump ` // * *listeners*: :ref:`ListenersConfigDump ` // * *routes*: :ref:`RoutesConfigDump ` // + // EDS Configuration will only be dumped by using parameter `?include_eds` + // // You can filter output with the resource and mask query parameters. // See :ref:`/config_dump?resource={} `, // :ref:`/config_dump?mask={} `, @@ -346,3 +349,35 @@ message SecretsConfigDump { // warming in preparation to service clusters or listeners. repeated DynamicSecret dynamic_warming_secrets = 3; } + +// Envoy's admin fill this message with all currently known endpoints. Endpoint +// configuration information can be used to recreate an Envoy configuration by populating all +// endpoints as static endpoints or by returning them in an EDS response. +message EndpointsConfigDump { + message StaticEndpointConfig { + // The endpoint config. + google.protobuf.Any endpoint_config = 1; + + // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. + google.protobuf.Timestamp last_updated = 2; + } + + message DynamicEndpointConfig { + // [#not-implemented-hide:] This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time that + // the endpoint configuration was loaded. + string version_info = 1; + + // The endpoint config. + google.protobuf.Any endpoint_config = 2; + + // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. + google.protobuf.Timestamp last_updated = 3; + } + + // The statically loaded endpoint configs. + repeated StaticEndpointConfig static_endpoint_configs = 2; + + // The dynamically loaded endpoint configs. + repeated DynamicEndpointConfig dynamic_endpoint_configs = 3; +} diff --git a/api/envoy/admin/v3/server_info.proto b/api/envoy/admin/v3/server_info.proto index ac0204428053e..7f8ea45650d48 100644 --- a/api/envoy/admin/v3/server_info.proto +++ b/api/envoy/admin/v3/server_info.proto @@ -54,7 +54,7 @@ message ServerInfo { CommandLineOptions command_line_options = 6; } -// [#next-free-field: 29] +// [#next-free-field: 34] message CommandLineOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.CommandLineOptions"; @@ -75,6 +75,14 @@ message CommandLineOptions { InitOnly = 2; } + enum DrainStrategy { + // Gradually discourage connections over the course of the drain period. + Gradual = 0; + + // Discourage all connections for the duration of the drain sequence. + Immediate = 1; + } + reserved 12, 20, 21; reserved "max_stats", "max_obj_name_len"; @@ -82,6 +90,12 @@ message CommandLineOptions { // See :option:`--base-id` for details. uint64 base_id = 1; + // See :option:`--use-dynamic-base-id` for details. + bool use_dynamic_base_id = 31; + + // See :option:`--base-id-path` for details. + string base_id_path = 32; + // See :option:`--concurrency` for details. uint32 concurrency = 2; @@ -97,6 +111,9 @@ message CommandLineOptions { // See :option:`--reject-unknown-dynamic-fields` for details. bool reject_unknown_dynamic_fields = 26; + // See :option:`--ignore-unknown-dynamic-fields` for details. + bool ignore_unknown_dynamic_fields = 30; + // See :option:`--admin-address-path` for details. string admin_address_path = 6; @@ -133,6 +150,9 @@ message CommandLineOptions { // See :option:`--drain-time-s` for details. google.protobuf.Duration drain_time = 17; + // See :option:`--drain-strategy` for details. + DrainStrategy drain_strategy = 33; + // See :option:`--parent-shutdown-time-s` for details. google.protobuf.Duration parent_shutdown_time = 18; @@ -153,4 +173,7 @@ message CommandLineOptions { // See :option:`--disable-extensions` for details. repeated string disabled_extensions = 28; + + // See :option:`--bootstrap-version` for details. + uint32 bootstrap_version = 29; } diff --git a/api/envoy/admin/v4alpha/BUILD b/api/envoy/admin/v4alpha/BUILD index 6da5b60bad287..d64c4f6a08167 100644 --- a/api/envoy/admin/v4alpha/BUILD +++ b/api/envoy/admin/v4alpha/BUILD @@ -10,7 +10,7 @@ api_proto_package( "//envoy/annotations:pkg", "//envoy/config/bootstrap/v4alpha:pkg", "//envoy/config/core/v4alpha:pkg", - "//envoy/config/tap/v3:pkg", + "//envoy/config/tap/v4alpha:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/api/envoy/admin/v4alpha/config_dump.proto b/api/envoy/admin/v4alpha/config_dump.proto index 02709a4145063..8bbd5743219da 100644 --- a/api/envoy/admin/v4alpha/config_dump.proto +++ b/api/envoy/admin/v4alpha/config_dump.proto @@ -30,9 +30,12 @@ message ConfigDump { // // * *bootstrap*: :ref:`BootstrapConfigDump ` // * *clusters*: :ref:`ClustersConfigDump ` + // * *endpoints*: :ref:`EndpointsConfigDump ` // * *listeners*: :ref:`ListenersConfigDump ` // * *routes*: :ref:`RoutesConfigDump ` // + // EDS Configuration will only be dumped by using parameter `?include_eds` + // // You can filter output with the resource and mask query parameters. // See :ref:`/config_dump?resource={} `, // :ref:`/config_dump?mask={} `, @@ -340,3 +343,43 @@ message SecretsConfigDump { // warming in preparation to service clusters or listeners. repeated DynamicSecret dynamic_warming_secrets = 3; } + +// Envoy's admin fill this message with all currently known endpoints. Endpoint +// configuration information can be used to recreate an Envoy configuration by populating all +// endpoints as static endpoints or by returning them in an EDS response. +message EndpointsConfigDump { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.EndpointsConfigDump"; + + message StaticEndpointConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig"; + + // The endpoint config. + google.protobuf.Any endpoint_config = 1; + + // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. + google.protobuf.Timestamp last_updated = 2; + } + + message DynamicEndpointConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig"; + + // [#not-implemented-hide:] This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time that + // the endpoint configuration was loaded. + string version_info = 1; + + // The endpoint config. + google.protobuf.Any endpoint_config = 2; + + // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. + google.protobuf.Timestamp last_updated = 3; + } + + // The statically loaded endpoint configs. + repeated StaticEndpointConfig static_endpoint_configs = 2; + + // The dynamically loaded endpoint configs. + repeated DynamicEndpointConfig dynamic_endpoint_configs = 3; +} diff --git a/api/envoy/admin/v4alpha/server_info.proto b/api/envoy/admin/v4alpha/server_info.proto index 867a9255bc51f..e3e40ac2eabc1 100644 --- a/api/envoy/admin/v4alpha/server_info.proto +++ b/api/envoy/admin/v4alpha/server_info.proto @@ -54,7 +54,7 @@ message ServerInfo { CommandLineOptions command_line_options = 6; } -// [#next-free-field: 29] +// [#next-free-field: 34] message CommandLineOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.CommandLineOptions"; @@ -74,6 +74,14 @@ message CommandLineOptions { InitOnly = 2; } + enum DrainStrategy { + // Gradually discourage connections over the course of the drain period. + Gradual = 0; + + // Discourage all connections for the duration of the drain sequence. + Immediate = 1; + } + reserved 12, 20, 21; reserved "max_stats", "max_obj_name_len"; @@ -81,6 +89,12 @@ message CommandLineOptions { // See :option:`--base-id` for details. uint64 base_id = 1; + // See :option:`--use-dynamic-base-id` for details. + bool use_dynamic_base_id = 31; + + // See :option:`--base-id-path` for details. + string base_id_path = 32; + // See :option:`--concurrency` for details. uint32 concurrency = 2; @@ -96,6 +110,9 @@ message CommandLineOptions { // See :option:`--reject-unknown-dynamic-fields` for details. bool reject_unknown_dynamic_fields = 26; + // See :option:`--ignore-unknown-dynamic-fields` for details. + bool ignore_unknown_dynamic_fields = 30; + // See :option:`--admin-address-path` for details. string admin_address_path = 6; @@ -132,6 +149,9 @@ message CommandLineOptions { // See :option:`--drain-time-s` for details. google.protobuf.Duration drain_time = 17; + // See :option:`--drain-strategy` for details. + DrainStrategy drain_strategy = 33; + // See :option:`--parent-shutdown-time-s` for details. google.protobuf.Duration parent_shutdown_time = 18; @@ -152,4 +172,7 @@ message CommandLineOptions { // See :option:`--disable-extensions` for details. repeated string disabled_extensions = 28; + + // See :option:`--bootstrap-version` for details. + uint32 bootstrap_version = 29; } diff --git a/api/envoy/admin/v4alpha/tap.proto b/api/envoy/admin/v4alpha/tap.proto index c47b308d6ee6d..039dfcfeb8120 100644 --- a/api/envoy/admin/v4alpha/tap.proto +++ b/api/envoy/admin/v4alpha/tap.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package envoy.admin.v4alpha; -import "envoy/config/tap/v3/common.proto"; +import "envoy/config/tap/v4alpha/common.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -24,5 +24,5 @@ message TapRequest { string config_id = 1 [(validate.rules).string = {min_bytes: 1}]; // The tap configuration to load. - config.tap.v3.TapConfig tap_config = 2 [(validate.rules).message = {required: true}]; + config.tap.v4alpha.TapConfig tap_config = 2 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/api/v2/auth/cert.proto b/api/envoy/api/v2/auth/cert.proto index a1642318e0438..49e8b8c70fa2e 100644 --- a/api/envoy/api/v2/auth/cert.proto +++ b/api/envoy/api/v2/auth/cert.proto @@ -2,486 +2,15 @@ syntax = "proto3"; package envoy.api.v2.auth; -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/config_source.proto"; -import "envoy/type/matcher/string.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - import "udpa/annotations/migrate.proto"; -import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; -import "validate/validate.proto"; + +import public "envoy/api/v2/auth/common.proto"; +import public "envoy/api/v2/auth/secret.proto"; +import public "envoy/api/v2/auth/tls.proto"; option java_package = "io.envoyproxy.envoy.api.v2.auth"; option java_outer_classname = "CertProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.transport_sockets.tls.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Common TLS configuration] - -message TlsParameters { - enum TlsProtocol { - // Envoy will choose the optimal TLS version. - TLS_AUTO = 0; - - // TLS 1.0 - TLSv1_0 = 1; - - // TLS 1.1 - TLSv1_1 = 2; - - // TLS 1.2 - TLSv1_2 = 3; - - // TLS 1.3 - TLSv1_3 = 4; - } - - // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for - // servers. - TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; - - // Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and - // ``TLSv1_2`` for clients and for servers using :ref:`BoringSSL FIPS `. - TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; - - // If specified, the TLS listener will only support the specified `cipher list - // `_ - // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not - // specified, the default list will be used. - // - // In non-FIPS builds, the default cipher list is: - // - // .. code-block:: none - // - // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] - // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] - // ECDHE-ECDSA-AES128-SHA - // ECDHE-RSA-AES128-SHA - // AES128-GCM-SHA256 - // AES128-SHA - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // ECDHE-ECDSA-AES256-SHA - // ECDHE-RSA-AES256-SHA - // AES256-GCM-SHA384 - // AES256-SHA - // - // In builds using :ref:`BoringSSL FIPS `, the default cipher list is: - // - // .. code-block:: none - // - // ECDHE-ECDSA-AES128-GCM-SHA256 - // ECDHE-RSA-AES128-GCM-SHA256 - // ECDHE-ECDSA-AES128-SHA - // ECDHE-RSA-AES128-SHA - // AES128-GCM-SHA256 - // AES128-SHA - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // ECDHE-ECDSA-AES256-SHA - // ECDHE-RSA-AES256-SHA - // AES256-GCM-SHA384 - // AES256-SHA - repeated string cipher_suites = 3; - - // If specified, the TLS connection will only support the specified ECDH - // curves. If not specified, the default curves will be used. - // - // In non-FIPS builds, the default curves are: - // - // .. code-block:: none - // - // X25519 - // P-256 - // - // In builds using :ref:`BoringSSL FIPS `, the default curve is: - // - // .. code-block:: none - // - // P-256 - repeated string ecdh_curves = 4; -} - -// BoringSSL private key method configuration. The private key methods are used for external -// (potentially asynchronous) signing and decryption operations. Some use cases for private key -// methods would be TPM support and TLS acceleration. -message PrivateKeyProvider { - // Private key method provider name. The name must match a - // supported private key method provider type. - string provider_name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Private key method provider specific configuration. - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true, (udpa.annotations.sensitive) = true]; - - google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; - } -} - -// [#next-free-field: 7] -message TlsCertificate { - // The TLS certificate chain. - core.DataSource certificate_chain = 1; - - // The TLS private key. - core.DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; - - // BoringSSL private key method provider. This is an alternative to :ref:`private_key - // ` field. This can't be - // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key - // ` and - // :ref:`private_key_provider - // ` fields will result in an - // error. - PrivateKeyProvider private_key_provider = 6; - - // The password to decrypt the TLS private key. If this field is not set, it is assumed that the - // TLS private key is not password encrypted. - core.DataSource password = 3 [(udpa.annotations.sensitive) = true]; - - // [#not-implemented-hide:] - core.DataSource ocsp_staple = 4; - - // [#not-implemented-hide:] - repeated core.DataSource signed_certificate_timestamp = 5; -} - -message TlsSessionTicketKeys { - // Keys for encrypting and decrypting TLS session tickets. The - // first key in the array contains the key to encrypt all new sessions created by this context. - // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys - // by, for example, putting the new key first, and the previous key second. - // - // If :ref:`session_ticket_keys ` - // is not specified, the TLS library will still support resuming sessions via tickets, but it will - // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts - // or on different hosts. - // - // Each key must contain exactly 80 bytes of cryptographically-secure random data. For - // example, the output of ``openssl rand 80``. - // - // .. attention:: - // - // Using this feature has serious security considerations and risks. Improper handling of keys - // may result in loss of secrecy in connections, even if ciphers supporting perfect forward - // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some - // discussion. To minimize the risk, you must: - // - // * Keep the session ticket keys at least as secure as your TLS certificate private keys - // * Rotate session ticket keys at least daily, and preferably hourly - // * Always generate keys using a cryptographically-secure random data source - repeated core.DataSource keys = 1 - [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; -} - -// [#next-free-field: 11] -message CertificateValidationContext { - // Peer certificate verification mode. - enum TrustChainVerification { - // Perform default certificate verification (e.g., against CA / verification lists) - VERIFY_TRUST_CHAIN = 0; - - // Connections where the certificate fails verification will be permitted. - // For HTTP connections, the result of certificate verification can be used in route matching. ( - // see :ref:`validated ` ). - ACCEPT_UNTRUSTED = 1; - } - - // TLS certificate data containing certificate authority certificates to use in verifying - // a presented peer certificate (e.g. server certificate for clusters or client certificate - // for listeners). If not specified and a peer certificate is presented it will not be - // verified. By default, a client certificate is optional, unless one of the additional - // options (:ref:`require_client_certificate - // `, - // :ref:`verify_certificate_spki - // `, - // :ref:`verify_certificate_hash - // `, or - // :ref:`match_subject_alt_names - // `) is also - // specified. - // - // It can optionally contain certificate revocation lists, in which case Envoy will verify - // that the presented peer certificate has not been revoked by one of the included CRLs. - // - // See :ref:`the TLS overview ` for a list of common - // system CA locations. - core.DataSource trusted_ca = 1; - - // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the - // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate - // matches one of the specified values. - // - // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate - // can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -noout -pubkey - // | openssl pkey -pubin -outform DER - // | openssl dgst -sha256 -binary - // | openssl enc -base64 - // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= - // - // This is the format used in HTTP Public Key Pinning. - // - // When both: - // :ref:`verify_certificate_hash - // ` and - // :ref:`verify_certificate_spki - // ` are specified, - // a hash matching value from either of the lists will result in the certificate being accepted. - // - // .. attention:: - // - // This option is preferred over :ref:`verify_certificate_hash - // `, - // because SPKI is tied to a private key, so it doesn't change when the certificate - // is renewed using the same private key. - repeated string verify_certificate_spki = 3 - [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}]; - - // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that - // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. - // - // A hex-encoded SHA-256 of the certificate can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 - // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a - // - // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate - // can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 - // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A - // - // Both of those formats are acceptable. - // - // When both: - // :ref:`verify_certificate_hash - // ` and - // :ref:`verify_certificate_spki - // ` are specified, - // a hash matching value from either of the lists will result in the certificate being accepted. - repeated string verify_certificate_hash = 2 - [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}]; - - // An optional list of Subject Alternative Names. If specified, Envoy will verify that the - // Subject Alternative Name of the presented certificate matches one of the specified values. - // - // .. attention:: - // - // Subject Alternative Names are easily spoofable and verifying only them is insecure, - // therefore this option must be used together with :ref:`trusted_ca - // `. - repeated string verify_subject_alt_name = 4 [deprecated = true]; - - // An optional list of Subject Alternative name matchers. Envoy will verify that the - // Subject Alternative Name of the presented certificate matches one of the specified matches. - // - // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be - // configured with exact match type in the :ref:`string matcher `. - // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", - // it should be configured as shown below. - // - // .. code-block:: yaml - // - // match_subject_alt_names: - // exact: "api.example.com" - // - // .. attention:: - // - // Subject Alternative Names are easily spoofable and verifying only them is insecure, - // therefore this option must be used together with :ref:`trusted_ca - // `. - repeated type.matcher.StringMatcher match_subject_alt_names = 9; - - // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. - google.protobuf.BoolValue require_ocsp_staple = 5; - - // [#not-implemented-hide:] Must present signed certificate time-stamp. - google.protobuf.BoolValue require_signed_certificate_timestamp = 6; - - // An optional `certificate revocation list - // `_ - // (in PEM format). If specified, Envoy will verify that the presented peer - // certificate has not been revoked by this CRL. If this DataSource contains - // multiple CRLs, all of them will be used. - core.DataSource crl = 7; - - // If specified, Envoy will not reject expired certificates. - bool allow_expired_certificate = 8; - - // Certificate trust chain verification mode. - TrustChainVerification trust_chain_verification = 10 - [(validate.rules).enum = {defined_only: true}]; -} - -// TLS context shared by both client and server TLS contexts. -// [#next-free-field: 9] -message CommonTlsContext { - message CombinedCertificateValidationContext { - // How to validate peer certificates. - CertificateValidationContext default_validation_context = 1 - [(validate.rules).message = {required: true}]; - - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 2 - [(validate.rules).message = {required: true}]; - } - - reserved 5; - - // TLS protocol versions, cipher suites etc. - TlsParameters tls_params = 1; - - // :ref:`Multiple TLS certificates ` can be associated with the - // same context to allow both RSA and ECDSA certificates. - // - // Only a single TLS certificate is supported in client contexts. In server contexts, the first - // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is - // used for clients that support ECDSA. - repeated TlsCertificate tls_certificates = 2; - - // Configs for fetching TLS certificates via SDS API. - repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 - [(validate.rules).repeated = {max_items: 1}]; - - oneof validation_context_type { - // How to validate peer certificates. - CertificateValidationContext validation_context = 3; - - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 7; - - // Combined certificate validation context holds a default CertificateValidationContext - // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic - // and default CertificateValidationContext are merged into a new CertificateValidationContext - // for validation. This merge is done by Message::MergeFrom(), so dynamic - // CertificateValidationContext overwrites singular fields in default - // CertificateValidationContext, and concatenates repeated fields to default - // CertificateValidationContext, and logical OR is applied to boolean fields. - CombinedCertificateValidationContext combined_validation_context = 8; - } - - // Supplies the list of ALPN protocols that the listener should expose. In - // practice this is likely to be set to one of two values (see the - // :ref:`codec_type - // ` - // parameter in the HTTP connection manager for more information): - // - // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. - // * "http/1.1" If the listener is only going to support HTTP/1.1. - // - // There is no default for this parameter. If empty, Envoy will not expose ALPN. - repeated string alpn_protocols = 4; -} - -message UpstreamTlsContext { - // Common TLS context settings. - // - // .. attention:: - // - // Server certificate verification is not enabled by default. Configure - // :ref:`trusted_ca` to enable - // verification. - CommonTlsContext common_tls_context = 1; - - // SNI string to use when creating TLS backend connections. - string sni = 2 [(validate.rules).string = {max_bytes: 255}]; - - // If true, server-initiated TLS renegotiation will be allowed. - // - // .. attention:: - // - // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. - bool allow_renegotiation = 3; - - // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets - // for TLSv1.2 and older) to store for the purpose of session resumption. - // - // Defaults to 1, setting this to 0 disables session resumption. - google.protobuf.UInt32Value max_session_keys = 4; -} - -// [#next-free-field: 8] -message DownstreamTlsContext { - // Common TLS context settings. - CommonTlsContext common_tls_context = 1; - - // If specified, Envoy will reject connections without a valid client - // certificate. - google.protobuf.BoolValue require_client_certificate = 2; - - // If specified, Envoy will reject connections without a valid and matching SNI. - // [#not-implemented-hide:] - google.protobuf.BoolValue require_sni = 3; - - oneof session_ticket_keys_type { - // TLS session ticket key settings. - TlsSessionTicketKeys session_ticket_keys = 4; - - // Config for fetching TLS session ticket keys via SDS API. - SdsSecretConfig session_ticket_keys_sds_secret_config = 5; - - // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS - // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. - // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using - // the keys specified through either :ref:`session_ticket_keys ` - // or :ref:`session_ticket_keys_sds_secret_config `. - // If this config is set to false and no keys are explicitly configured, the TLS server will issue - // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the - // implication that sessions cannot be resumed across hot restarts or on different hosts. - bool disable_stateless_session_resumption = 7; - } - - // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session - // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) - // ` - // only seconds could be specified (fractional seconds are going to be ignored). - google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { - lt {seconds: 4294967296} - gte {} - }]; -} - -message GenericSecret { - // Secret of generic type and is available to filters. - core.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; -} - -message SdsSecretConfig { - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - // When both name and config are specified, then secret can be fetched and/or reloaded via - // SDS. When only name is specified, then secret will be loaded from static resources. - string name = 1; - - core.ConfigSource sds_config = 2; -} - -// [#next-free-field: 6] -message Secret { - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - string name = 1; - - oneof type { - TlsCertificate tls_certificate = 2; - - TlsSessionTicketKeys session_ticket_keys = 3; - - CertificateValidationContext validation_context = 4; - - GenericSecret generic_secret = 5; - } -} diff --git a/api/envoy/api/v2/auth/common.proto b/api/envoy/api/v2/auth/common.proto new file mode 100644 index 0000000000000..c8122f4010297 --- /dev/null +++ b/api/envoy/api/v2/auth/common.proto @@ -0,0 +1,327 @@ +syntax = "proto3"; + +package envoy.api.v2.auth; + +import "envoy/api/v2/core/base.proto"; +import "envoy/type/matcher/string.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2.auth"; +option java_outer_classname = "CommonProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = + "envoy.extensions.transport_sockets.tls.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#protodoc-title: Common TLS configuration] + +message TlsParameters { + enum TlsProtocol { + // Envoy will choose the optimal TLS version. + TLS_AUTO = 0; + + // TLS 1.0 + TLSv1_0 = 1; + + // TLS 1.1 + TLSv1_1 = 2; + + // TLS 1.2 + TLSv1_2 = 3; + + // TLS 1.3 + TLSv1_3 = 4; + } + + // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for + // servers. + TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; + + // Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_3`` for + // servers. + TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; + + // If specified, the TLS listener will only support the specified `cipher list + // `_ + // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not + // specified, the default list will be used. + // + // In non-FIPS builds, the default cipher list is: + // + // .. code-block:: none + // + // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] + // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] + // ECDHE-ECDSA-AES128-SHA + // ECDHE-RSA-AES128-SHA + // AES128-GCM-SHA256 + // AES128-SHA + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // ECDHE-ECDSA-AES256-SHA + // ECDHE-RSA-AES256-SHA + // AES256-GCM-SHA384 + // AES256-SHA + // + // In builds using :ref:`BoringSSL FIPS `, the default cipher list is: + // + // .. code-block:: none + // + // ECDHE-ECDSA-AES128-GCM-SHA256 + // ECDHE-RSA-AES128-GCM-SHA256 + // ECDHE-ECDSA-AES128-SHA + // ECDHE-RSA-AES128-SHA + // AES128-GCM-SHA256 + // AES128-SHA + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // ECDHE-ECDSA-AES256-SHA + // ECDHE-RSA-AES256-SHA + // AES256-GCM-SHA384 + // AES256-SHA + repeated string cipher_suites = 3; + + // If specified, the TLS connection will only support the specified ECDH + // curves. If not specified, the default curves will be used. + // + // In non-FIPS builds, the default curves are: + // + // .. code-block:: none + // + // X25519 + // P-256 + // + // In builds using :ref:`BoringSSL FIPS `, the default curve is: + // + // .. code-block:: none + // + // P-256 + repeated string ecdh_curves = 4; +} + +// BoringSSL private key method configuration. The private key methods are used for external +// (potentially asynchronous) signing and decryption operations. Some use cases for private key +// methods would be TPM support and TLS acceleration. +message PrivateKeyProvider { + // Private key method provider name. The name must match a + // supported private key method provider type. + string provider_name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Private key method provider specific configuration. + oneof config_type { + google.protobuf.Struct config = 2 [deprecated = true, (udpa.annotations.sensitive) = true]; + + google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; + } +} + +// [#next-free-field: 7] +message TlsCertificate { + // The TLS certificate chain. + core.DataSource certificate_chain = 1; + + // The TLS private key. + core.DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; + + // BoringSSL private key method provider. This is an alternative to :ref:`private_key + // ` field. This can't be + // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key + // ` and + // :ref:`private_key_provider + // ` fields will result in an + // error. + PrivateKeyProvider private_key_provider = 6; + + // The password to decrypt the TLS private key. If this field is not set, it is assumed that the + // TLS private key is not password encrypted. + core.DataSource password = 3 [(udpa.annotations.sensitive) = true]; + + // [#not-implemented-hide:] + core.DataSource ocsp_staple = 4; + + // [#not-implemented-hide:] + repeated core.DataSource signed_certificate_timestamp = 5; +} + +message TlsSessionTicketKeys { + // Keys for encrypting and decrypting TLS session tickets. The + // first key in the array contains the key to encrypt all new sessions created by this context. + // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys + // by, for example, putting the new key first, and the previous key second. + // + // If :ref:`session_ticket_keys ` + // is not specified, the TLS library will still support resuming sessions via tickets, but it will + // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts + // or on different hosts. + // + // Each key must contain exactly 80 bytes of cryptographically-secure random data. For + // example, the output of ``openssl rand 80``. + // + // .. attention:: + // + // Using this feature has serious security considerations and risks. Improper handling of keys + // may result in loss of secrecy in connections, even if ciphers supporting perfect forward + // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some + // discussion. To minimize the risk, you must: + // + // * Keep the session ticket keys at least as secure as your TLS certificate private keys + // * Rotate session ticket keys at least daily, and preferably hourly + // * Always generate keys using a cryptographically-secure random data source + repeated core.DataSource keys = 1 + [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; +} + +// [#next-free-field: 11] +message CertificateValidationContext { + // Peer certificate verification mode. + enum TrustChainVerification { + // Perform default certificate verification (e.g., against CA / verification lists) + VERIFY_TRUST_CHAIN = 0; + + // Connections where the certificate fails verification will be permitted. + // For HTTP connections, the result of certificate verification can be used in route matching. ( + // see :ref:`validated ` ). + ACCEPT_UNTRUSTED = 1; + } + + // TLS certificate data containing certificate authority certificates to use in verifying + // a presented peer certificate (e.g. server certificate for clusters or client certificate + // for listeners). If not specified and a peer certificate is presented it will not be + // verified. By default, a client certificate is optional, unless one of the additional + // options (:ref:`require_client_certificate + // `, + // :ref:`verify_certificate_spki + // `, + // :ref:`verify_certificate_hash + // `, or + // :ref:`match_subject_alt_names + // `) is also + // specified. + // + // It can optionally contain certificate revocation lists, in which case Envoy will verify + // that the presented peer certificate has not been revoked by one of the included CRLs. + // + // See :ref:`the TLS overview ` for a list of common + // system CA locations. + core.DataSource trusted_ca = 1; + + // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the + // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate + // matches one of the specified values. + // + // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate + // can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -noout -pubkey + // | openssl pkey -pubin -outform DER + // | openssl dgst -sha256 -binary + // | openssl enc -base64 + // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= + // + // This is the format used in HTTP Public Key Pinning. + // + // When both: + // :ref:`verify_certificate_hash + // ` and + // :ref:`verify_certificate_spki + // ` are specified, + // a hash matching value from either of the lists will result in the certificate being accepted. + // + // .. attention:: + // + // This option is preferred over :ref:`verify_certificate_hash + // `, + // because SPKI is tied to a private key, so it doesn't change when the certificate + // is renewed using the same private key. + repeated string verify_certificate_spki = 3 + [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}]; + + // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that + // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. + // + // A hex-encoded SHA-256 of the certificate can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 + // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a + // + // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate + // can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 + // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A + // + // Both of those formats are acceptable. + // + // When both: + // :ref:`verify_certificate_hash + // ` and + // :ref:`verify_certificate_spki + // ` are specified, + // a hash matching value from either of the lists will result in the certificate being accepted. + repeated string verify_certificate_hash = 2 + [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}]; + + // An optional list of Subject Alternative Names. If specified, Envoy will verify that the + // Subject Alternative Name of the presented certificate matches one of the specified values. + // + // .. attention:: + // + // Subject Alternative Names are easily spoofable and verifying only them is insecure, + // therefore this option must be used together with :ref:`trusted_ca + // `. + repeated string verify_subject_alt_name = 4 [deprecated = true]; + + // An optional list of Subject Alternative name matchers. Envoy will verify that the + // Subject Alternative Name of the presented certificate matches one of the specified matches. + // + // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be + // configured with exact match type in the :ref:`string matcher `. + // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", + // it should be configured as shown below. + // + // .. code-block:: yaml + // + // match_subject_alt_names: + // exact: "api.example.com" + // + // .. attention:: + // + // Subject Alternative Names are easily spoofable and verifying only them is insecure, + // therefore this option must be used together with :ref:`trusted_ca + // `. + repeated type.matcher.StringMatcher match_subject_alt_names = 9; + + // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. + google.protobuf.BoolValue require_ocsp_staple = 5; + + // [#not-implemented-hide:] Must present signed certificate time-stamp. + google.protobuf.BoolValue require_signed_certificate_timestamp = 6; + + // An optional `certificate revocation list + // `_ + // (in PEM format). If specified, Envoy will verify that the presented peer + // certificate has not been revoked by this CRL. If this DataSource contains + // multiple CRLs, all of them will be used. + core.DataSource crl = 7; + + // If specified, Envoy will not reject expired certificates. + bool allow_expired_certificate = 8; + + // Certificate trust chain verification mode. + TrustChainVerification trust_chain_verification = 10 + [(validate.rules).enum = {defined_only: true}]; +} diff --git a/api/envoy/api/v2/auth/secret.proto b/api/envoy/api/v2/auth/secret.proto new file mode 100644 index 0000000000000..3a6d8cf7dcb67 --- /dev/null +++ b/api/envoy/api/v2/auth/secret.proto @@ -0,0 +1,50 @@ +syntax = "proto3"; + +package envoy.api.v2.auth; + +import "envoy/api/v2/auth/common.proto"; +import "envoy/api/v2/core/base.proto"; +import "envoy/api/v2/core/config_source.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2.auth"; +option java_outer_classname = "SecretProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = + "envoy.extensions.transport_sockets.tls.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#protodoc-title: Secrets configuration] + +message GenericSecret { + // Secret of generic type and is available to filters. + core.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; +} + +message SdsSecretConfig { + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + // When both name and config are specified, then secret can be fetched and/or reloaded via + // SDS. When only name is specified, then secret will be loaded from static resources. + string name = 1; + + core.ConfigSource sds_config = 2; +} + +// [#next-free-field: 6] +message Secret { + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + string name = 1; + + oneof type { + TlsCertificate tls_certificate = 2; + + TlsSessionTicketKeys session_ticket_keys = 3; + + CertificateValidationContext validation_context = 4; + + GenericSecret generic_secret = 5; + } +} diff --git a/api/envoy/api/v2/auth/tls.proto b/api/envoy/api/v2/auth/tls.proto new file mode 100644 index 0000000000000..201973a2b9de8 --- /dev/null +++ b/api/envoy/api/v2/auth/tls.proto @@ -0,0 +1,152 @@ +syntax = "proto3"; + +package envoy.api.v2.auth; + +import "envoy/api/v2/auth/common.proto"; +import "envoy/api/v2/auth/secret.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2.auth"; +option java_outer_classname = "TlsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = + "envoy.extensions.transport_sockets.tls.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#protodoc-title: TLS transport socket] +// [#extension: envoy.transport_sockets.tls] +// The TLS contexts below provide the transport socket configuration for upstream/downstream TLS. + +message UpstreamTlsContext { + // Common TLS context settings. + // + // .. attention:: + // + // Server certificate verification is not enabled by default. Configure + // :ref:`trusted_ca` to enable + // verification. + CommonTlsContext common_tls_context = 1; + + // SNI string to use when creating TLS backend connections. + string sni = 2 [(validate.rules).string = {max_bytes: 255}]; + + // If true, server-initiated TLS renegotiation will be allowed. + // + // .. attention:: + // + // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. + bool allow_renegotiation = 3; + + // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets + // for TLSv1.2 and older) to store for the purpose of session resumption. + // + // Defaults to 1, setting this to 0 disables session resumption. + google.protobuf.UInt32Value max_session_keys = 4; +} + +// [#next-free-field: 8] +message DownstreamTlsContext { + // Common TLS context settings. + CommonTlsContext common_tls_context = 1; + + // If specified, Envoy will reject connections without a valid client + // certificate. + google.protobuf.BoolValue require_client_certificate = 2; + + // If specified, Envoy will reject connections without a valid and matching SNI. + // [#not-implemented-hide:] + google.protobuf.BoolValue require_sni = 3; + + oneof session_ticket_keys_type { + // TLS session ticket key settings. + TlsSessionTicketKeys session_ticket_keys = 4; + + // Config for fetching TLS session ticket keys via SDS API. + SdsSecretConfig session_ticket_keys_sds_secret_config = 5; + + // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS + // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. + // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using + // the keys specified through either :ref:`session_ticket_keys ` + // or :ref:`session_ticket_keys_sds_secret_config `. + // If this config is set to false and no keys are explicitly configured, the TLS server will issue + // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the + // implication that sessions cannot be resumed across hot restarts or on different hosts. + bool disable_stateless_session_resumption = 7; + } + + // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session + // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) + // ` + // only seconds could be specified (fractional seconds are going to be ignored). + google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { + lt {seconds: 4294967296} + gte {} + }]; +} + +// TLS context shared by both client and server TLS contexts. +// [#next-free-field: 9] +message CommonTlsContext { + message CombinedCertificateValidationContext { + // How to validate peer certificates. + CertificateValidationContext default_validation_context = 1 + [(validate.rules).message = {required: true}]; + + // Config for fetching validation context via SDS API. + SdsSecretConfig validation_context_sds_secret_config = 2 + [(validate.rules).message = {required: true}]; + } + + reserved 5; + + // TLS protocol versions, cipher suites etc. + TlsParameters tls_params = 1; + + // :ref:`Multiple TLS certificates ` can be associated with the + // same context to allow both RSA and ECDSA certificates. + // + // Only a single TLS certificate is supported in client contexts. In server contexts, the first + // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is + // used for clients that support ECDSA. + repeated TlsCertificate tls_certificates = 2; + + // Configs for fetching TLS certificates via SDS API. + repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 + [(validate.rules).repeated = {max_items: 1}]; + + oneof validation_context_type { + // How to validate peer certificates. + CertificateValidationContext validation_context = 3; + + // Config for fetching validation context via SDS API. + SdsSecretConfig validation_context_sds_secret_config = 7; + + // Combined certificate validation context holds a default CertificateValidationContext + // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic + // and default CertificateValidationContext are merged into a new CertificateValidationContext + // for validation. This merge is done by Message::MergeFrom(), so dynamic + // CertificateValidationContext overwrites singular fields in default + // CertificateValidationContext, and concatenates repeated fields to default + // CertificateValidationContext, and logical OR is applied to boolean fields. + CombinedCertificateValidationContext combined_validation_context = 8; + } + + // Supplies the list of ALPN protocols that the listener should expose. In + // practice this is likely to be set to one of two values (see the + // :ref:`codec_type + // ` + // parameter in the HTTP connection manager for more information): + // + // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. + // * "http/1.1" If the listener is only going to support HTTP/1.1. + // + // There is no default for this parameter. If empty, Envoy will not expose ALPN. + repeated string alpn_protocols = 4; +} diff --git a/api/envoy/api/v2/cluster.proto b/api/envoy/api/v2/cluster.proto index 5de5c20df570d..c95de62c128d4 100644 --- a/api/envoy/api/v2/cluster.proto +++ b/api/envoy/api/v2/cluster.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package envoy.api.v2; -import "envoy/api/v2/auth/cert.proto"; +import "envoy/api/v2/auth/tls.proto"; import "envoy/api/v2/cluster/circuit_breaker.proto"; import "envoy/api/v2/cluster/filter.proto"; import "envoy/api/v2/cluster/outlier_detection.proto"; @@ -471,7 +471,7 @@ message Cluster { reserved 12, 15; // Configuration to use different transport sockets for different endpoints. - // The entry of *envoy.transport_socket* in the + // The entry of *envoy.transport_socket_match* in the // :ref:`LbEndpoint.Metadata ` // is used to match against the transport sockets as they appear in the list. The first // :ref:`match ` is used. @@ -491,14 +491,14 @@ message Cluster { // transport_socket: // name: envoy.transport_sockets.raw_buffer // - // Connections to the endpoints whose metadata value under *envoy.transport_socket* + // Connections to the endpoints whose metadata value under *envoy.transport_socket_match* // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. // // If a :ref:`socket match ` with empty match // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" // socket match in case above. // - // If an endpoint metadata's value under *envoy.transport_socket* does not match any + // If an endpoint metadata's value under *envoy.transport_socket_match* does not match any // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or // *transport_socket* specified in this cluster. // diff --git a/api/envoy/api/v2/core/base.proto b/api/envoy/api/v2/core/base.proto index b7145d77efd3c..39846bc658a88 100644 --- a/api/envoy/api/v2/core/base.proto +++ b/api/envoy/api/v2/core/base.proto @@ -93,7 +93,7 @@ message BuildVersion { type.SemanticVersion version = 1; // Free-form build information. - // Envoy defines several well known keys in the source/common/common/version.h file + // Envoy defines several well known keys in the source/common/version/version.h file google.protobuf.Struct metadata = 2; } diff --git a/api/envoy/api/v2/core/config_source.proto b/api/envoy/api/v2/core/config_source.proto index fa42a7aeec1ce..7032b2c10d878 100644 --- a/api/envoy/api/v2/core/config_source.proto +++ b/api/envoy/api/v2/core/config_source.proto @@ -57,10 +57,6 @@ message ApiConfigSource { // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state // with every update, the xDS server only sends what has changed since the last update. - // - // DELTA_GRPC is not yet entirely implemented! Initially, only CDS is available. - // Do not use for other xDSes. - // [#comment:TODO(fredlas) update/remove this warning when appropriate.] DELTA_GRPC = 3; } @@ -110,6 +106,9 @@ message AggregatedConfigSource { // set in :ref:`ConfigSource ` can be used to // specify that other data can be obtained from the same server. message SelfConfigSource { + // API version for xDS transport protocol. This describes the xDS gRPC/REST + // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. + ApiVersion transport_api_version = 1 [(validate.rules).enum = {defined_only: true}]; } // Rate Limit settings to be applied for discovery requests made by Envoy. diff --git a/api/envoy/api/v2/core/protocol.proto b/api/envoy/api/v2/core/protocol.proto index 5838ca7440759..9c47e388ee1af 100644 --- a/api/envoy/api/v2/core/protocol.proto +++ b/api/envoy/api/v2/core/protocol.proto @@ -85,8 +85,6 @@ message HttpProtocolOptions { // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be // reset independent of any other timeouts. If not specified, this value is not set. - // The current implementation implements this timeout on downstream connections only. - // [#comment:TODO(shikugawa): add this functionality to upstream.] google.protobuf.Duration max_stream_duration = 4; // Action to take when a client request with a header name containing underscore characters is received. diff --git a/api/envoy/api/v2/endpoint.proto b/api/envoy/api/v2/endpoint.proto index e233b0e7d34ea..92a2b13a8947e 100644 --- a/api/envoy/api/v2/endpoint.proto +++ b/api/envoy/api/v2/endpoint.proto @@ -36,6 +36,7 @@ message ClusterLoadAssignment { // Load balancing policy settings. // [#next-free-field: 6] message Policy { + // [#not-implemented-hide:] message DropOverload { // Identifier for the policy specifying the drop. string category = 1 [(validate.rules).string = {min_bytes: 1}]; @@ -65,6 +66,7 @@ message ClusterLoadAssignment { // "throttle"_drop = 60% // "lb"_drop = 20% // 50% of the remaining 'actual' load, which is 40%. // actual_outgoing_load = 20% // remaining after applying all categories. + // [#not-implemented-hide:] repeated DropOverload drop_overloads = 2; // Priority levels and localities are considered overprovisioned with this diff --git a/api/envoy/api/v2/listener/listener_components.proto b/api/envoy/api/v2/listener/listener_components.proto index fe449c63358a1..a6791c86cd0be 100644 --- a/api/envoy/api/v2/listener/listener_components.proto +++ b/api/envoy/api/v2/listener/listener_components.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package envoy.api.v2.listener; -import "envoy/api/v2/auth/cert.proto"; +import "envoy/api/v2/auth/tls.proto"; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; import "envoy/type/range.proto"; diff --git a/api/envoy/api/v2/route/route_components.proto b/api/envoy/api/v2/route/route_components.proto index c890134414e55..007f71d57cb51 100644 --- a/api/envoy/api/v2/route/route_components.proto +++ b/api/envoy/api/v2/route/route_components.proto @@ -1177,6 +1177,21 @@ message RedirectAction { oneof path_rewrite_specifier { // The path portion of the URL will be swapped with this value. + // Please note that query string in path_redirect will override the + // request's query string and will not be stripped. + // + // For example, let's say we have the following routes: + // + // - match: { path: "/old-path-1" } + // redirect: { path_redirect: "/new-path-1" } + // - match: { path: "/old-path-2" } + // redirect: { path_redirect: "/new-path-2", strip-query: "true" } + // - match: { path: "/old-path-3" } + // redirect: { path_redirect: "/new-path-3?foo=1", strip_query: "true" } + // + // 1. if request uri is "/old-path-1?bar=1", users will be redirected to "/new-path-1?bar=1" + // 2. if request uri is "/old-path-2?bar=1", users will be redirected to "/new-path-2" + // 3. if request uri is "/old-path-3?bar=1", users will be redirected to "/new-path-3?foo=1" string path_redirect = 2 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; diff --git a/api/envoy/config/accesslog/v3/BUILD b/api/envoy/config/accesslog/v3/BUILD index 92e9f39492511..518ca23126cd1 100644 --- a/api/envoy/config/accesslog/v3/BUILD +++ b/api/envoy/config/accesslog/v3/BUILD @@ -9,6 +9,7 @@ api_proto_package( "//envoy/config/core/v3:pkg", "//envoy/config/filter/accesslog/v2:pkg", "//envoy/config/route/v3:pkg", + "//envoy/type/matcher/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/api/envoy/config/accesslog/v3/accesslog.proto b/api/envoy/config/accesslog/v3/accesslog.proto index f5732ba3f8e42..e9d815aafcea9 100644 --- a/api/envoy/config/accesslog/v3/accesslog.proto +++ b/api/envoy/config/accesslog/v3/accesslog.proto @@ -4,10 +4,12 @@ package envoy.config.accesslog.v3; import "envoy/config/core/v3/base.proto"; import "envoy/config/route/v3/route_components.proto"; +import "envoy/type/matcher/v3/metadata.proto"; import "envoy/type/v3/percent.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -39,8 +41,8 @@ message AccessLog { // Filter which is used to determine if the access log needs to be written. AccessLogFilter filter = 2; - // Custom configuration that depends on the access log being instantiated. Built-in - // configurations include: + // Custom configuration that depends on the access log being instantiated. + // Built-in configurations include: // // #. "envoy.access_loggers.file": :ref:`FileAccessLog // ` @@ -53,7 +55,7 @@ message AccessLog { } } -// [#next-free-field: 12] +// [#next-free-field: 13] message AccessLogFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.AccessLogFilter"; @@ -93,6 +95,9 @@ message AccessLogFilter { // Extension filter. ExtensionFilter extension_filter = 11; + + // Metadata Filter + MetadataFilter metadata_filter = 12; } } @@ -156,25 +161,30 @@ message RuntimeFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.RuntimeFilter"; - // Runtime key to get an optional overridden numerator for use in the *percent_sampled* field. - // If found in runtime, this value will replace the default numerator. + // Runtime key to get an optional overridden numerator for use in the + // *percent_sampled* field. If found in runtime, this value will replace the + // default numerator. string runtime_key = 1 [(validate.rules).string = {min_bytes: 1}]; - // The default sampling percentage. If not specified, defaults to 0% with denominator of 100. + // The default sampling percentage. If not specified, defaults to 0% with + // denominator of 100. type.v3.FractionalPercent percent_sampled = 2; // By default, sampling pivots on the header - // :ref:`x-request-id` being present. If - // :ref:`x-request-id` is present, the filter will - // consistently sample across multiple hosts based on the runtime key value and the value - // extracted from :ref:`x-request-id`. If it is - // missing, or *use_independent_randomness* is set to true, the filter will randomly sample based - // on the runtime key value alone. *use_independent_randomness* can be used for logging kill - // switches within complex nested :ref:`AndFilter + // :ref:`x-request-id` being + // present. If :ref:`x-request-id` + // is present, the filter will consistently sample across multiple hosts based + // on the runtime key value and the value extracted from + // :ref:`x-request-id`. If it is + // missing, or *use_independent_randomness* is set to true, the filter will + // randomly sample based on the runtime key value alone. + // *use_independent_randomness* can be used for logging kill switches within + // complex nested :ref:`AndFilter // ` and :ref:`OrFilter - // ` blocks that are easier to reason about - // from a probability perspective (i.e., setting to true will cause the filter to behave like - // an independent random variable when composed within logical operator filters). + // ` blocks that are easier to + // reason about from a probability perspective (i.e., setting to true will + // cause the filter to behave like an independent random variable when + // composed within logical operator filters). bool use_independent_randomness = 3; } @@ -203,21 +213,22 @@ message HeaderFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.HeaderFilter"; - // Only requests with a header which matches the specified HeaderMatcher will pass the filter - // check. + // Only requests with a header which matches the specified HeaderMatcher will + // pass the filter check. route.v3.HeaderMatcher header = 1 [(validate.rules).message = {required: true}]; } // Filters requests that received responses with an Envoy response flag set. // A list of the response flags can be found -// in the access log formatter :ref:`documentation`. +// in the access log formatter +// :ref:`documentation`. message ResponseFlagFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.ResponseFlagFilter"; - // Only responses with the any of the flags listed in this field will be logged. - // This field is optional. If it is not specified, then any response flag will pass - // the filter check. + // Only responses with the any of the flags listed in this field will be + // logged. This field is optional. If it is not specified, then any response + // flag will pass the filter check. repeated string flags = 1 [(validate.rules).repeated = { items { string { @@ -240,13 +251,16 @@ message ResponseFlagFilter { in: "SI" in: "IH" in: "DPE" + in: "UMSDR" + in: "RFCF" + in: "NFCF" } } }]; } -// Filters gRPC requests based on their response status. If a gRPC status is not provided, the -// filter will infer the status from the HTTP status code. +// Filters gRPC requests based on their response status. If a gRPC status is not +// provided, the filter will infer the status from the HTTP status code. message GrpcStatusFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.GrpcStatusFilter"; @@ -274,11 +288,32 @@ message GrpcStatusFilter { // Logs only responses that have any one of the gRPC statuses in this field. repeated Status statuses = 1 [(validate.rules).repeated = {items {enum {defined_only: true}}}]; - // If included and set to true, the filter will instead block all responses with a gRPC status or - // inferred gRPC status enumerated in statuses, and allow all other responses. + // If included and set to true, the filter will instead block all responses + // with a gRPC status or inferred gRPC status enumerated in statuses, and + // allow all other responses. bool exclude = 2; } +// Filters based on matching dynamic metadata. +// If the matcher path and key correspond to an existing key in dynamic +// metadata, the request is logged only if the matcher value is equal to the +// metadata value. If the matcher path and key *do not* correspond to an +// existing key in dynamic metadata, the request is logged only if +// match_if_key_not_found is "true" or unset. +message MetadataFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.filter.accesslog.v2.MetadataFilter"; + + // Matcher to check metadata for specified value. For example, to match on the + // access_log_hint metadata, set the filter to "envoy.common" and the path to + // "access_log_hint", and the value to "true". + type.matcher.v3.MetadataMatcher matcher = 1; + + // Default result if the key does not exist in dynamic metadata: if unset or + // true, then log; if false, then don't log. + google.protobuf.BoolValue match_if_key_not_found = 2; +} + // Extension filter is statically registered at runtime. message ExtensionFilter { option (udpa.annotations.versioning).previous_message_type = diff --git a/api/envoy/config/accesslog/v4alpha/BUILD b/api/envoy/config/accesslog/v4alpha/BUILD new file mode 100644 index 0000000000000..e426e922fa726 --- /dev/null +++ b/api/envoy/config/accesslog/v4alpha/BUILD @@ -0,0 +1,16 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/accesslog/v3:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/route/v4alpha:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/config/accesslog/v4alpha/accesslog.proto b/api/envoy/config/accesslog/v4alpha/accesslog.proto new file mode 100644 index 0000000000000..bd4bcd48c4b4a --- /dev/null +++ b/api/envoy/config/accesslog/v4alpha/accesslog.proto @@ -0,0 +1,333 @@ +syntax = "proto3"; + +package envoy.config.accesslog.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/route/v4alpha/route_components.proto"; +import "envoy/type/matcher/v4alpha/metadata.proto"; +import "envoy/type/v3/percent.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.accesslog.v4alpha"; +option java_outer_classname = "AccesslogProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Common access log types] + +message AccessLog { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.AccessLog"; + + reserved 3; + + reserved "config"; + + // The name of the access log implementation to instantiate. The name must + // match a statically registered access log. Current built-in loggers include: + // + // #. "envoy.access_loggers.file" + // #. "envoy.access_loggers.http_grpc" + // #. "envoy.access_loggers.tcp_grpc" + string name = 1; + + // Filter which is used to determine if the access log needs to be written. + AccessLogFilter filter = 2; + + // Custom configuration that depends on the access log being instantiated. + // Built-in configurations include: + // + // #. "envoy.access_loggers.file": :ref:`FileAccessLog + // ` + // #. "envoy.access_loggers.http_grpc": :ref:`HttpGrpcAccessLogConfig + // ` + // #. "envoy.access_loggers.tcp_grpc": :ref:`TcpGrpcAccessLogConfig + // ` + oneof config_type { + google.protobuf.Any typed_config = 4; + } +} + +// [#next-free-field: 13] +message AccessLogFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.AccessLogFilter"; + + oneof filter_specifier { + option (validate.required) = true; + + // Status code filter. + StatusCodeFilter status_code_filter = 1; + + // Duration filter. + DurationFilter duration_filter = 2; + + // Not health check filter. + NotHealthCheckFilter not_health_check_filter = 3; + + // Traceable filter. + TraceableFilter traceable_filter = 4; + + // Runtime filter. + RuntimeFilter runtime_filter = 5; + + // And filter. + AndFilter and_filter = 6; + + // Or filter. + OrFilter or_filter = 7; + + // Header filter. + HeaderFilter header_filter = 8; + + // Response flag filter. + ResponseFlagFilter response_flag_filter = 9; + + // gRPC status filter. + GrpcStatusFilter grpc_status_filter = 10; + + // Extension filter. + ExtensionFilter extension_filter = 11; + + // Metadata Filter + MetadataFilter metadata_filter = 12; + } +} + +// Filter on an integer comparison. +message ComparisonFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.ComparisonFilter"; + + enum Op { + // = + EQ = 0; + + // >= + GE = 1; + + // <= + LE = 2; + } + + // Comparison operator. + Op op = 1 [(validate.rules).enum = {defined_only: true}]; + + // Value to compare against. + core.v4alpha.RuntimeUInt32 value = 2; +} + +// Filters on HTTP response/status code. +message StatusCodeFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.StatusCodeFilter"; + + // Comparison. + ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; +} + +// Filters on total request duration in milliseconds. +message DurationFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.DurationFilter"; + + // Comparison. + ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; +} + +// Filters for requests that are not health check requests. A health check +// request is marked by the health check filter. +message NotHealthCheckFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.NotHealthCheckFilter"; +} + +// Filters for requests that are traceable. See the tracing overview for more +// information on how a request becomes traceable. +message TraceableFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.TraceableFilter"; +} + +// Filters for random sampling of requests. +message RuntimeFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.RuntimeFilter"; + + // Runtime key to get an optional overridden numerator for use in the + // *percent_sampled* field. If found in runtime, this value will replace the + // default numerator. + string runtime_key = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The default sampling percentage. If not specified, defaults to 0% with + // denominator of 100. + type.v3.FractionalPercent percent_sampled = 2; + + // By default, sampling pivots on the header + // :ref:`x-request-id` being + // present. If :ref:`x-request-id` + // is present, the filter will consistently sample across multiple hosts based + // on the runtime key value and the value extracted from + // :ref:`x-request-id`. If it is + // missing, or *use_independent_randomness* is set to true, the filter will + // randomly sample based on the runtime key value alone. + // *use_independent_randomness* can be used for logging kill switches within + // complex nested :ref:`AndFilter + // ` and :ref:`OrFilter + // ` blocks that are easier to + // reason about from a probability perspective (i.e., setting to true will + // cause the filter to behave like an independent random variable when + // composed within logical operator filters). + bool use_independent_randomness = 3; +} + +// Performs a logical “and” operation on the result of each filter in filters. +// Filters are evaluated sequentially and if one of them returns false, the +// filter returns false immediately. +message AndFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.AndFilter"; + + repeated AccessLogFilter filters = 1 [(validate.rules).repeated = {min_items: 2}]; +} + +// Performs a logical “or” operation on the result of each individual filter. +// Filters are evaluated sequentially and if one of them returns true, the +// filter returns true immediately. +message OrFilter { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.OrFilter"; + + repeated AccessLogFilter filters = 2 [(validate.rules).repeated = {min_items: 2}]; +} + +// Filters requests based on the presence or value of a request header. +message HeaderFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.HeaderFilter"; + + // Only requests with a header which matches the specified HeaderMatcher will + // pass the filter check. + route.v4alpha.HeaderMatcher header = 1 [(validate.rules).message = {required: true}]; +} + +// Filters requests that received responses with an Envoy response flag set. +// A list of the response flags can be found +// in the access log formatter +// :ref:`documentation`. +message ResponseFlagFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.ResponseFlagFilter"; + + // Only responses with the any of the flags listed in this field will be + // logged. This field is optional. If it is not specified, then any response + // flag will pass the filter check. + repeated string flags = 1 [(validate.rules).repeated = { + items { + string { + in: "LH" + in: "UH" + in: "UT" + in: "LR" + in: "UR" + in: "UF" + in: "UC" + in: "UO" + in: "NR" + in: "DI" + in: "FI" + in: "RL" + in: "UAEX" + in: "RLSE" + in: "DC" + in: "URX" + in: "SI" + in: "IH" + in: "DPE" + in: "UMSDR" + in: "RFCF" + in: "NFCF" + } + } + }]; +} + +// Filters gRPC requests based on their response status. If a gRPC status is not +// provided, the filter will infer the status from the HTTP status code. +message GrpcStatusFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.GrpcStatusFilter"; + + enum Status { + OK = 0; + CANCELED = 1; + UNKNOWN = 2; + INVALID_ARGUMENT = 3; + DEADLINE_EXCEEDED = 4; + NOT_FOUND = 5; + ALREADY_EXISTS = 6; + PERMISSION_DENIED = 7; + RESOURCE_EXHAUSTED = 8; + FAILED_PRECONDITION = 9; + ABORTED = 10; + OUT_OF_RANGE = 11; + UNIMPLEMENTED = 12; + INTERNAL = 13; + UNAVAILABLE = 14; + DATA_LOSS = 15; + UNAUTHENTICATED = 16; + } + + // Logs only responses that have any one of the gRPC statuses in this field. + repeated Status statuses = 1 [(validate.rules).repeated = {items {enum {defined_only: true}}}]; + + // If included and set to true, the filter will instead block all responses + // with a gRPC status or inferred gRPC status enumerated in statuses, and + // allow all other responses. + bool exclude = 2; +} + +// Filters based on matching dynamic metadata. +// If the matcher path and key correspond to an existing key in dynamic +// metadata, the request is logged only if the matcher value is equal to the +// metadata value. If the matcher path and key *do not* correspond to an +// existing key in dynamic metadata, the request is logged only if +// match_if_key_not_found is "true" or unset. +message MetadataFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.MetadataFilter"; + + // Matcher to check metadata for specified value. For example, to match on the + // access_log_hint metadata, set the filter to "envoy.common" and the path to + // "access_log_hint", and the value to "true". + type.matcher.v4alpha.MetadataMatcher matcher = 1; + + // Default result if the key does not exist in dynamic metadata: if unset or + // true, then log; if false, then don't log. + google.protobuf.BoolValue match_if_key_not_found = 2; +} + +// Extension filter is statically registered at runtime. +message ExtensionFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.ExtensionFilter"; + + reserved 2; + + reserved "config"; + + // The name of the filter implementation to instantiate. The name must + // match a statically registered filter. + string name = 1; + + // Custom configuration that depends on the filter being instantiated. + oneof config_type { + google.protobuf.Any typed_config = 3; + } +} diff --git a/api/envoy/config/bootstrap/v2/bootstrap.proto b/api/envoy/config/bootstrap/v2/bootstrap.proto index 622304483eb2d..da88dce786ae7 100644 --- a/api/envoy/config/bootstrap/v2/bootstrap.proto +++ b/api/envoy/config/bootstrap/v2/bootstrap.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package envoy.config.bootstrap.v2; -import "envoy/api/v2/auth/cert.proto"; +import "envoy/api/v2/auth/secret.proto"; import "envoy/api/v2/cluster.proto"; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; diff --git a/api/envoy/config/bootstrap/v3/BUILD b/api/envoy/config/bootstrap/v3/BUILD index 645d50d891a2f..63eb22d36ea0c 100644 --- a/api/envoy/config/bootstrap/v3/BUILD +++ b/api/envoy/config/bootstrap/v3/BUILD @@ -15,6 +15,8 @@ api_proto_package( "//envoy/config/overload/v3:pkg", "//envoy/config/trace/v3:pkg", "//envoy/extensions/transport_sockets/tls/v3:pkg", + "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/api/envoy/config/bootstrap/v3/bootstrap.proto b/api/envoy/config/bootstrap/v3/bootstrap.proto index c8219d1b22e31..2d096a39c73b6 100644 --- a/api/envoy/config/bootstrap/v3/bootstrap.proto +++ b/api/envoy/config/bootstrap/v3/bootstrap.proto @@ -7,18 +7,24 @@ import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/event_service_config.proto"; +import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/socket_option.proto"; import "envoy/config/listener/v3/listener.proto"; import "envoy/config/metrics/v3/stats.proto"; import "envoy/config/overload/v3/overload.proto"; import "envoy/config/trace/v3/http_tracer.proto"; -import "envoy/extensions/transport_sockets/tls/v3/cert.proto"; +import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; +import "envoy/type/v3/percent.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/resource_locator.proto"; + import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -34,7 +40,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // ` for more detail. // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 21] +// [#next-free-field: 26] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Bootstrap"; @@ -60,6 +66,7 @@ message Bootstrap { repeated envoy.extensions.transport_sockets.tls.v3.Secret secrets = 3; } + // [#next-free-field: 7] message DynamicResources { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Bootstrap.DynamicResources"; @@ -70,11 +77,19 @@ message Bootstrap { // :ref:`LDS ` configuration source. core.v3.ConfigSource lds_config = 1; + // Resource locator for listener collection. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator lds_resources_locator = 5; + // All post-bootstrap :ref:`Cluster ` definitions are // provided by a single :ref:`CDS ` // configuration source. core.v3.ConfigSource cds_config = 2; + // Resource locator for cluster collection. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator cds_resources_locator = 6; + // A single :ref:`ADS ` source may be optionally // specified. This must have :ref:`api_type // ` :ref:`GRPC @@ -145,7 +160,10 @@ message Bootstrap { Admin admin = 12; // Optional overload manager configuration. - overload.v3.OverloadManager overload_manager = 15; + overload.v3.OverloadManager overload_manager = 15 [ + (udpa.annotations.security).configure_for_untrusted_downstream = true, + (udpa.annotations.security).configure_for_untrusted_upstream = true + ]; // Enable :ref:`stats for event dispatcher `, defaults to false. // Note that this records a value for each iteration of the event loop on every thread. This @@ -177,6 +195,45 @@ message Bootstrap { // :ref:`use_tcp_for_dns_lookups ` are // specified. bool use_tcp_for_dns_lookups = 20; + + // Specifies optional bootstrap extensions to be instantiated at startup time. + // Each item contains extension specific configuration. + repeated core.v3.TypedExtensionConfig bootstrap_extensions = 21; + + // Configuration sources that will participate in + // *udpa.core.v1.ResourceLocator* authority resolution. The algorithm is as + // follows: + // 1. The authority field is taken from the *udpa.core.v1.ResourceLocator*, call + // this *resource_authority*. + // 2. *resource_authority* is compared against the authorities in any peer + // *ConfigSource*. The peer *ConfigSource* is the configuration source + // message which would have been used unconditionally for resolution + // with opaque resource names. If there is a match with an authority, the + // peer *ConfigSource* message is used. + // 3. *resource_authority* is compared sequentially with the authorities in + // each configuration source in *config_sources*. The first *ConfigSource* + // to match wins. + // 4. As a fallback, if no configuration source matches, then + // *default_config_source* is used. + // 5. If *default_config_source* is not specified, resolution fails. + // [#not-implemented-hide:] + repeated core.v3.ConfigSource config_sources = 22; + + // Default configuration source for *udpa.core.v1.ResourceLocator* if all + // other resolution fails. + // [#not-implemented-hide:] + core.v3.ConfigSource default_config_source = 23; + + // Optional overriding of default socket interface. The value must be the name of one of the + // socket interface factories initialized through a bootstrap extension + string default_socket_interface = 24; + + // Global map of CertificateProvider instances. These instances are referred to by name in the + // :ref:`CommonTlsContext.CertificateProviderInstance.instance_name + // ` + // field. + // [#not-implemented-hide:] + map certificate_provider_instances = 25; } // Administration interface :ref:`operations documentation @@ -248,6 +305,7 @@ message ClusterManager { // Envoy process watchdog configuration. When configured, this monitors for // nonresponsive threads and kills the process after the configured thresholds. // See the :ref:`watchdog documentation ` for more information. +// [#next-free-field: 7] message Watchdog { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Watchdog"; @@ -265,10 +323,22 @@ message Watchdog { // kill behavior. If not specified the default is 0 (disabled). google.protobuf.Duration kill_timeout = 3; - // If at least two watched threads have been nonresponsive for at least this - // duration assume a true deadlock and kill the entire Envoy process. Set to 0 - // to disable this behavior. If not specified the default is 0 (disabled). + // Defines the maximum jitter used to adjust the *kill_timeout* if *kill_timeout* is + // enabled. Enabling this feature would help to reduce risk of synchronized + // watchdog kill events across proxies due to external triggers. Set to 0 to + // disable. If not specified the default is 0 (disabled). + google.protobuf.Duration max_kill_timeout_jitter = 6 [(validate.rules).duration = {gte {}}]; + + // If max(2, ceil(registered_threads * Fraction(*multikill_threshold*))) + // threads have been nonresponsive for at least this duration kill the entire + // Envoy process. Set to 0 to disable this behavior. If not specified the + // default is 0 (disabled). google.protobuf.Duration multikill_timeout = 4; + + // Sets the threshold for *multikill_timeout* in terms of the percentage of + // nonresponsive threads required for the *multikill_timeout*. + // If not specified the default is 0. + type.v3.Percent multikill_threshold = 5; } // Runtime :ref:`configuration overview ` (deprecated). @@ -344,7 +414,12 @@ message RuntimeLayer { "envoy.config.bootstrap.v2.RuntimeLayer.RtdsLayer"; // Resource to subscribe to at *rtds_config* for the RTDS layer. - string name = 1; + string name = 1 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + + // Resource locator for RTDS layer. This is mutually exclusive to *name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator rtds_resource_locator = 3 + [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; // RTDS configuration source. core.v3.ConfigSource rtds_config = 2; diff --git a/api/envoy/config/bootstrap/v4alpha/BUILD b/api/envoy/config/bootstrap/v4alpha/BUILD index 884b942b2dacd..97d0d49f07ffe 100644 --- a/api/envoy/config/bootstrap/v4alpha/BUILD +++ b/api/envoy/config/bootstrap/v4alpha/BUILD @@ -10,10 +10,12 @@ api_proto_package( "//envoy/config/bootstrap/v3:pkg", "//envoy/config/cluster/v4alpha:pkg", "//envoy/config/core/v4alpha:pkg", - "//envoy/config/listener/v3:pkg", - "//envoy/config/metrics/v3:pkg", + "//envoy/config/listener/v4alpha:pkg", + "//envoy/config/metrics/v4alpha:pkg", "//envoy/config/overload/v3:pkg", "//envoy/extensions/transport_sockets/tls/v4alpha:pkg", + "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/api/envoy/config/bootstrap/v4alpha/bootstrap.proto b/api/envoy/config/bootstrap/v4alpha/bootstrap.proto index e76695c4b6447..ba6107aa8dfe0 100644 --- a/api/envoy/config/bootstrap/v4alpha/bootstrap.proto +++ b/api/envoy/config/bootstrap/v4alpha/bootstrap.proto @@ -7,17 +7,22 @@ import "envoy/config/core/v4alpha/address.proto"; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/config/core/v4alpha/event_service_config.proto"; +import "envoy/config/core/v4alpha/extension.proto"; import "envoy/config/core/v4alpha/socket_option.proto"; -import "envoy/config/listener/v3/listener.proto"; -import "envoy/config/metrics/v3/stats.proto"; +import "envoy/config/listener/v4alpha/listener.proto"; +import "envoy/config/metrics/v4alpha/stats.proto"; import "envoy/config/overload/v3/overload.proto"; -import "envoy/extensions/transport_sockets/tls/v4alpha/cert.proto"; +import "envoy/extensions/transport_sockets/tls/v4alpha/secret.proto"; +import "envoy/type/v3/percent.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/resource_locator.proto"; + import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -33,7 +38,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // ` for more detail. // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 21] +// [#next-free-field: 26] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Bootstrap"; @@ -42,9 +47,9 @@ message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Bootstrap.StaticResources"; - // Static :ref:`Listeners `. These listeners are + // Static :ref:`Listeners `. These listeners are // available regardless of LDS configuration. - repeated listener.v3.Listener listeners = 1; + repeated listener.v4alpha.Listener listeners = 1; // If a network based configuration source is specified for :ref:`cds_config // `, it's necessary @@ -59,21 +64,30 @@ message Bootstrap { repeated envoy.extensions.transport_sockets.tls.v4alpha.Secret secrets = 3; } + // [#next-free-field: 7] message DynamicResources { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Bootstrap.DynamicResources"; reserved 4; - // All :ref:`Listeners ` are provided by a single + // All :ref:`Listeners ` are provided by a single // :ref:`LDS ` configuration source. core.v4alpha.ConfigSource lds_config = 1; + // Resource locator for listener collection. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator lds_resources_locator = 5; + // All post-bootstrap :ref:`Cluster ` definitions are // provided by a single :ref:`CDS ` // configuration source. core.v4alpha.ConfigSource cds_config = 2; + // Resource locator for cluster collection. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator cds_resources_locator = 6; + // A single :ref:`ADS ` source may be optionally // specified. This must have :ref:`api_type // ` :ref:`GRPC @@ -110,10 +124,10 @@ message Bootstrap { string flags_path = 5; // Optional set of stats sinks. - repeated metrics.v3.StatsSink stats_sinks = 6; + repeated metrics.v4alpha.StatsSink stats_sinks = 6; // Configuration for internal processing of stats. - metrics.v3.StatsConfig stats_config = 13; + metrics.v4alpha.StatsConfig stats_config = 13; // Optional duration between flushes to configured stats sinks. For // performance reasons Envoy latches counters and only flushes counters and @@ -137,12 +151,15 @@ message Bootstrap { Admin admin = 12; // Optional overload manager configuration. - overload.v3.OverloadManager overload_manager = 15; + overload.v3.OverloadManager overload_manager = 15 [ + (udpa.annotations.security).configure_for_untrusted_downstream = true, + (udpa.annotations.security).configure_for_untrusted_upstream = true + ]; // Enable :ref:`stats for event dispatcher `, defaults to false. // Note that this records a value for each iteration of the event loop on every thread. This // should normally be minimal overhead, but when using - // :ref:`statsd `, it will send each observed value + // :ref:`statsd `, it will send each observed value // over the wire individually because the statsd protocol doesn't have any way to represent a // histogram summary. Be aware that this can be a very large volume of data. bool enable_dispatcher_stats = 16; @@ -160,7 +177,7 @@ message Bootstrap { // Optional proxy version which will be used to set the value of :ref:`server.version statistic // ` if specified. Envoy will not process this value, it will be sent as is to - // :ref:`stats sinks `. + // :ref:`stats sinks `. google.protobuf.UInt64Value stats_server_version_override = 19; // Always use TCP queries instead of UDP queries for DNS lookups. @@ -169,6 +186,45 @@ message Bootstrap { // :ref:`use_tcp_for_dns_lookups ` are // specified. bool use_tcp_for_dns_lookups = 20; + + // Specifies optional bootstrap extensions to be instantiated at startup time. + // Each item contains extension specific configuration. + repeated core.v4alpha.TypedExtensionConfig bootstrap_extensions = 21; + + // Configuration sources that will participate in + // *udpa.core.v1.ResourceLocator* authority resolution. The algorithm is as + // follows: + // 1. The authority field is taken from the *udpa.core.v1.ResourceLocator*, call + // this *resource_authority*. + // 2. *resource_authority* is compared against the authorities in any peer + // *ConfigSource*. The peer *ConfigSource* is the configuration source + // message which would have been used unconditionally for resolution + // with opaque resource names. If there is a match with an authority, the + // peer *ConfigSource* message is used. + // 3. *resource_authority* is compared sequentially with the authorities in + // each configuration source in *config_sources*. The first *ConfigSource* + // to match wins. + // 4. As a fallback, if no configuration source matches, then + // *default_config_source* is used. + // 5. If *default_config_source* is not specified, resolution fails. + // [#not-implemented-hide:] + repeated core.v4alpha.ConfigSource config_sources = 22; + + // Default configuration source for *udpa.core.v1.ResourceLocator* if all + // other resolution fails. + // [#not-implemented-hide:] + core.v4alpha.ConfigSource default_config_source = 23; + + // Optional overriding of default socket interface. The value must be the name of one of the + // socket interface factories initialized through a bootstrap extension + string default_socket_interface = 24; + + // Global map of CertificateProvider instances. These instances are referred to by name in the + // :ref:`CommonTlsContext.CertificateProviderInstance.instance_name + // ` + // field. + // [#not-implemented-hide:] + map certificate_provider_instances = 25; } // Administration interface :ref:`operations documentation @@ -240,6 +296,7 @@ message ClusterManager { // Envoy process watchdog configuration. When configured, this monitors for // nonresponsive threads and kills the process after the configured thresholds. // See the :ref:`watchdog documentation ` for more information. +// [#next-free-field: 7] message Watchdog { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Watchdog"; @@ -257,10 +314,22 @@ message Watchdog { // kill behavior. If not specified the default is 0 (disabled). google.protobuf.Duration kill_timeout = 3; - // If at least two watched threads have been nonresponsive for at least this - // duration assume a true deadlock and kill the entire Envoy process. Set to 0 - // to disable this behavior. If not specified the default is 0 (disabled). + // Defines the maximum jitter used to adjust the *kill_timeout* if *kill_timeout* is + // enabled. Enabling this feature would help to reduce risk of synchronized + // watchdog kill events across proxies due to external triggers. Set to 0 to + // disable. If not specified the default is 0 (disabled). + google.protobuf.Duration max_kill_timeout_jitter = 6 [(validate.rules).duration = {gte {}}]; + + // If max(2, ceil(registered_threads * Fraction(*multikill_threshold*))) + // threads have been nonresponsive for at least this duration kill the entire + // Envoy process. Set to 0 to disable this behavior. If not specified the + // default is 0 (disabled). google.protobuf.Duration multikill_timeout = 4; + + // Sets the threshold for *multikill_timeout* in terms of the percentage of + // nonresponsive threads required for the *multikill_timeout*. + // If not specified the default is 0. + type.v3.Percent multikill_threshold = 5; } // Runtime :ref:`configuration overview ` (deprecated). @@ -335,8 +404,14 @@ message RuntimeLayer { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer"; - // Resource to subscribe to at *rtds_config* for the RTDS layer. - string name = 1; + oneof name_specifier { + // Resource to subscribe to at *rtds_config* for the RTDS layer. + string name = 1; + + // Resource locator for RTDS layer. This is mutually exclusive to *name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator rtds_resource_locator = 3; + } // RTDS configuration source. core.v4alpha.ConfigSource rtds_config = 2; diff --git a/api/envoy/config/cluster/redis/redis_cluster.proto b/api/envoy/config/cluster/redis/redis_cluster.proto index b1872501e8eb2..abe88f76a6ff8 100644 --- a/api/envoy/config/cluster/redis/redis_cluster.proto +++ b/api/envoy/config/cluster/redis/redis_cluster.proto @@ -18,7 +18,7 @@ option (udpa.annotations.file_status).package_version_status = FROZEN; // of :ref:`Envoy's support for Redis Cluster `. // // Redis Cluster is an extension of Redis which supports sharding and high availability (where a -// shard that loses its master fails over to a replica, and designates it as the new master). +// shard that loses its primary fails over to a replica, and designates it as the new primary). // However, as there is no unified frontend or proxy service in front of Redis Cluster, the client // (in this case Envoy) must locally maintain the state of the Redis Cluster, specifically the // topology. A random node in the cluster is queried for the topology using the `CLUSTER SLOTS diff --git a/api/envoy/config/cluster/v3/BUILD b/api/envoy/config/cluster/v3/BUILD index 2c838d35e6f18..27f31fad4d3d8 100644 --- a/api/envoy/config/cluster/v3/BUILD +++ b/api/envoy/config/cluster/v3/BUILD @@ -13,5 +13,6 @@ api_proto_package( "//envoy/config/endpoint/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/api/envoy/config/cluster/v3/cluster.proto b/api/envoy/config/cluster/v3/cluster.proto index 06de8bbbead04..9edba75862e6f 100644 --- a/api/envoy/config/cluster/v3/cluster.proto +++ b/api/envoy/config/cluster/v3/cluster.proto @@ -8,6 +8,7 @@ import "envoy/config/cluster/v3/outlier_detection.proto"; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/config_source.proto"; +import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/health_check.proto"; import "envoy/config/core/v3/protocol.proto"; import "envoy/config/endpoint/v3/endpoint.proto"; @@ -18,7 +19,12 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/collection_entry.proto"; +import "udpa/core/v1/resource_locator.proto"; + import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -30,8 +36,14 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Cluster configuration] +// Cluster list collections. Entries are *Cluster* resources or references. +// [#not-implemented-hide:] +message ClusterCollection { + udpa.core.v1.CollectionEntry entries = 1; +} + // Configuration for a single upstream cluster. -// [#next-free-field: 48] +// [#next-free-field: 51] message Cluster { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster"; @@ -176,7 +188,12 @@ message Cluster { // Optional alternative to cluster name to present to EDS. This does not // have the same restrictions as cluster name, i.e. it may be arbitrary // length. - string service_name = 2; + string service_name = 2 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + + // Resource locator for EDS. This is mutually exclusive to *service_name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator eds_resource_locator = 3 + [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; } // Optionally divide the endpoints in this cluster into subsets defined by @@ -317,6 +334,31 @@ message Cluster { // The number of random healthy hosts from which the host with the fewest active requests will // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}]; + + // The following formula is used to calculate the dynamic weights when hosts have different load + // balancing weights: + // + // `weight = load_balancing_weight / (active_requests + 1)^active_request_bias` + // + // The larger the active request bias is, the more aggressively active requests will lower the + // effective weight when all host weights are not equal. + // + // `active_request_bias` must be greater than or equal to 0.0. + // + // When `active_request_bias == 0.0` the Least Request Load Balancer doesn't consider the number + // of active requests at the time it picks a host and behaves like the Round Robin Load + // Balancer. + // + // When `active_request_bias > 0.0` the Least Request Load Balancer scales the load balancing + // weight by the number of active requests at the time it does a pick. + // + // The value is cached for performance reasons and refreshed whenever one of the Load Balancer's + // host sets changes, e.g., whenever there is a host membership update or a host load balancing + // weight change. + // + // .. note:: + // This setting only takes effect if all host weights are not equal. + core.v3.RuntimeDouble active_request_bias = 2; } // Specific configuration for the :ref:`RingHash` @@ -499,12 +541,41 @@ message Cluster { google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}]; } + // [#not-implemented-hide:] + message PrefetchPolicy { + // Indicates how many many streams (rounded up) can be anticipated per-upstream for each + // stream, useful for high-QPS or latency-sensitive services. + // + // For example if this is 2, for an incoming HTTP/1.1 stream, 2 connections will be + // established, one for the new incoming stream, and one for a presumed follow-up stream. For + // HTTP/2, only one connection would be established by default as one connection can + // serve both the original and presumed follow-up stream. + // + // In steady state for non-multiplexed connections a value of 1.5 would mean if there were 100 + // active streams, there would be 100 connections in use, and 50 connections prefetched. + // This might be a useful value for something like short lived single-use connections, + // for example proxying HTTP/1.1 if keep-alive were false and each stream resulted in connection + // termination. It would likely be overkill for long lived connections, such as TCP proxying SMTP + // or regular HTTP/1.1 with keep-alive. For long lived traffic, a value of 1.05 would be more + // reasonable, where for every 100 connections, 5 prefetched connections would be in the queue + // in case of unexpected disconnects where the connection could not be reused. + // + // If this value is not set, or set explicitly to one, Envoy will fetch as many connections + // as needed to serve streams in flight. This means in steady state if a connection is torn down, + // a subsequent streams will pay an upstream-rtt latency penalty waiting for streams to be + // prefetched. + // + // This is limited somewhat arbitrarily to 3 because prefetching connections too aggressively can + // harm latency more than the prefetching helps. + google.protobuf.DoubleValue prefetch_ratio = 1 [(validate.rules).double = {lte: 3.0 gte: 1.0}]; + } + reserved 12, 15, 7, 11, 35; reserved "hosts", "tls_context", "extension_protocol_options"; // Configuration to use different transport sockets for different endpoints. - // The entry of *envoy.transport_socket* in the + // The entry of *envoy.transport_socket_match* in the // :ref:`LbEndpoint.Metadata ` // is used to match against the transport sockets as they appear in the list. The first // :ref:`match ` is used. @@ -524,14 +595,14 @@ message Cluster { // transport_socket: // name: envoy.transport_sockets.raw_buffer // - // Connections to the endpoints whose metadata value under *envoy.transport_socket* + // Connections to the endpoints whose metadata value under *envoy.transport_socket_match* // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. // // If a :ref:`socket match ` with empty match // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" // socket match in case above. // - // If an endpoint metadata's value under *envoy.transport_socket* does not match any + // If an endpoint metadata's value under *envoy.transport_socket_match* does not match any // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or // *transport_socket* specified in this cluster. // @@ -547,6 +618,10 @@ message Cluster { // *TransportSocketMatch* in this field. Other client Envoys receive CDS without // *transport_socket_match* set, and still send plain text traffic to the same cluster. // + // This field can be used to specify custom transport socket configurations for health + // checks by adding matching key/value pairs in a health check's + // :ref:`transport socket match criteria ` field. + // // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.] repeated TransportSocketMatch transport_socket_matches = 43; @@ -580,11 +655,13 @@ message Cluster { // Soft limit on size of the cluster’s connections read and write buffers. If // unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 + [(udpa.annotations.security).configure_for_untrusted_upstream = true]; // The :ref:`load balancer type ` to use // when picking a host in the cluster. - LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}]; + // [#comment:TODO: Remove enum constraint :ref:`LOAD_BALANCING_POLICY_CONFIG` when implemented.] + LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true not_in: 7}]; // Setting this is required for specifying members of // :ref:`STATIC`, @@ -631,7 +708,8 @@ message Cluster { // supports prior knowledge for upstream connections. Even if TLS is used // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 // connections to happen over plain text. - core.v3.Http2ProtocolOptions http2_protocol_options = 14; + core.v3.Http2ProtocolOptions http2_protocol_options = 14 + [(udpa.annotations.security).configure_for_untrusted_upstream = true]; // The extension_protocol_options field is used to provide extension-specific protocol options // for upstream connections. The key should match the extension filter name, such as @@ -807,7 +885,38 @@ message Cluster { // request. These show what percentage of a request's per try and global timeout was used. A value // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value // of 100 would indicate that the request took the entirety of the timeout given to it. - bool track_timeout_budgets = 47; + // + // .. attention:: + // + // This field has been deprecated in favor of `timeout_budgets`, part of + // :ref:`track_cluster_stats `. + bool track_timeout_budgets = 47 [deprecated = true]; + + // Optional customization and configuration of upstream connection pool, and upstream type. + // + // Currently this field only applies for HTTP traffic but is designed for eventual use for custom + // TCP upstreams. + // + // For HTTP traffic, Envoy will generally take downstream HTTP and send it upstream as upstream + // HTTP, using the http connection pool and the codec from `http2_protocol_options` + // + // For routes where CONNECT termination is configured, Envoy will take downstream CONNECT + // requests and forward the CONNECT payload upstream over raw TCP using the tcp connection pool. + // + // The default pool used is the generic connection pool which creates the HTTP upstream for most + // HTTP requests, and the TCP upstream if CONNECT termination is configured. + // + // If users desire custom connection pool or upstream behavior, for example terminating + // CONNECT only if a custom filter indicates it is appropriate, the custom factories + // can be registered and configured here. + core.v3.TypedExtensionConfig upstream_config = 48; + + // Configuration to track optional cluster stats. + TrackClusterStats track_cluster_stats = 49; + + // [#not-implemented-hide:] + // Prefetch configuration for this cluster. + PrefetchPolicy prefetch_policy = 50; } // [#not-implemented-hide:] Extensible load balancing policy configuration. @@ -868,3 +977,17 @@ message UpstreamConnectionOptions { // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. core.v3.TcpKeepalive tcp_keepalive = 1; } + +message TrackClusterStats { + // If timeout_budgets is true, the :ref:`timeout budget histograms + // ` will be published for each + // request. These show what percentage of a request's per try and global timeout was used. A value + // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value + // of 100 would indicate that the request took the entirety of the timeout given to it. + bool timeout_budgets = 1; + + // If request_response_sizes is true, then the :ref:`histograms + // ` tracking header and body sizes + // of requests and responses will be published. + bool request_response_sizes = 2; +} diff --git a/api/envoy/config/cluster/v4alpha/BUILD b/api/envoy/config/cluster/v4alpha/BUILD index 3aff84b82faa4..196ea73f908a9 100644 --- a/api/envoy/config/cluster/v4alpha/BUILD +++ b/api/envoy/config/cluster/v4alpha/BUILD @@ -12,5 +12,6 @@ api_proto_package( "//envoy/config/endpoint/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/api/envoy/config/cluster/v4alpha/cluster.proto b/api/envoy/config/cluster/v4alpha/cluster.proto index 887ef9c3fe337..07d2c7b9e65cb 100644 --- a/api/envoy/config/cluster/v4alpha/cluster.proto +++ b/api/envoy/config/cluster/v4alpha/cluster.proto @@ -8,6 +8,7 @@ import "envoy/config/cluster/v4alpha/outlier_detection.proto"; import "envoy/config/core/v4alpha/address.proto"; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/config/core/v4alpha/extension.proto"; import "envoy/config/core/v4alpha/health_check.proto"; import "envoy/config/core/v4alpha/protocol.proto"; import "envoy/config/endpoint/v3/endpoint.proto"; @@ -18,7 +19,11 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/collection_entry.proto"; +import "udpa/core/v1/resource_locator.proto"; + import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -30,8 +35,17 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // [#protodoc-title: Cluster configuration] +// Cluster list collections. Entries are *Cluster* resources or references. +// [#not-implemented-hide:] +message ClusterCollection { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.ClusterCollection"; + + udpa.core.v1.CollectionEntry entries = 1; +} + // Configuration for a single upstream cluster. -// [#next-free-field: 48] +// [#next-free-field: 51] message Cluster { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster"; @@ -173,10 +187,16 @@ message Cluster { // Configuration for the source of EDS updates for this Cluster. core.v4alpha.ConfigSource eds_config = 1; - // Optional alternative to cluster name to present to EDS. This does not - // have the same restrictions as cluster name, i.e. it may be arbitrary - // length. - string service_name = 2; + oneof name_specifier { + // Optional alternative to cluster name to present to EDS. This does not + // have the same restrictions as cluster name, i.e. it may be arbitrary + // length. + string service_name = 2; + + // Resource locator for EDS. This is mutually exclusive to *service_name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator eds_resource_locator = 3; + } } // Optionally divide the endpoints in this cluster into subsets defined by @@ -317,6 +337,31 @@ message Cluster { // The number of random healthy hosts from which the host with the fewest active requests will // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}]; + + // The following formula is used to calculate the dynamic weights when hosts have different load + // balancing weights: + // + // `weight = load_balancing_weight / (active_requests + 1)^active_request_bias` + // + // The larger the active request bias is, the more aggressively active requests will lower the + // effective weight when all host weights are not equal. + // + // `active_request_bias` must be greater than or equal to 0.0. + // + // When `active_request_bias == 0.0` the Least Request Load Balancer doesn't consider the number + // of active requests at the time it picks a host and behaves like the Round Robin Load + // Balancer. + // + // When `active_request_bias > 0.0` the Least Request Load Balancer scales the load balancing + // weight by the number of active requests at the time it does a pick. + // + // The value is cached for performance reasons and refreshed whenever one of the Load Balancer's + // host sets changes, e.g., whenever there is a host membership update or a host load balancing + // weight change. + // + // .. note:: + // This setting only takes effect if all host weights are not equal. + core.v4alpha.RuntimeDouble active_request_bias = 2; } // Specific configuration for the :ref:`RingHash` @@ -500,12 +545,44 @@ message Cluster { google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}]; } - reserved 12, 15, 7, 11, 35; + // [#not-implemented-hide:] + message PrefetchPolicy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.PrefetchPolicy"; + + // Indicates how many many streams (rounded up) can be anticipated per-upstream for each + // stream, useful for high-QPS or latency-sensitive services. + // + // For example if this is 2, for an incoming HTTP/1.1 stream, 2 connections will be + // established, one for the new incoming stream, and one for a presumed follow-up stream. For + // HTTP/2, only one connection would be established by default as one connection can + // serve both the original and presumed follow-up stream. + // + // In steady state for non-multiplexed connections a value of 1.5 would mean if there were 100 + // active streams, there would be 100 connections in use, and 50 connections prefetched. + // This might be a useful value for something like short lived single-use connections, + // for example proxying HTTP/1.1 if keep-alive were false and each stream resulted in connection + // termination. It would likely be overkill for long lived connections, such as TCP proxying SMTP + // or regular HTTP/1.1 with keep-alive. For long lived traffic, a value of 1.05 would be more + // reasonable, where for every 100 connections, 5 prefetched connections would be in the queue + // in case of unexpected disconnects where the connection could not be reused. + // + // If this value is not set, or set explicitly to one, Envoy will fetch as many connections + // as needed to serve streams in flight. This means in steady state if a connection is torn down, + // a subsequent streams will pay an upstream-rtt latency penalty waiting for streams to be + // prefetched. + // + // This is limited somewhat arbitrarily to 3 because prefetching connections too aggressively can + // harm latency more than the prefetching helps. + google.protobuf.DoubleValue prefetch_ratio = 1 [(validate.rules).double = {lte: 3.0 gte: 1.0}]; + } - reserved "hosts", "tls_context", "extension_protocol_options"; + reserved 12, 15, 7, 11, 35, 47; + + reserved "hosts", "tls_context", "extension_protocol_options", "track_timeout_budgets"; // Configuration to use different transport sockets for different endpoints. - // The entry of *envoy.transport_socket* in the + // The entry of *envoy.transport_socket_match* in the // :ref:`LbEndpoint.Metadata ` // is used to match against the transport sockets as they appear in the list. The first // :ref:`match ` is used. @@ -525,14 +602,14 @@ message Cluster { // transport_socket: // name: envoy.transport_sockets.raw_buffer // - // Connections to the endpoints whose metadata value under *envoy.transport_socket* + // Connections to the endpoints whose metadata value under *envoy.transport_socket_match* // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. // // If a :ref:`socket match ` with empty match // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" // socket match in case above. // - // If an endpoint metadata's value under *envoy.transport_socket* does not match any + // If an endpoint metadata's value under *envoy.transport_socket_match* does not match any // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or // *transport_socket* specified in this cluster. // @@ -548,6 +625,10 @@ message Cluster { // *TransportSocketMatch* in this field. Other client Envoys receive CDS without // *transport_socket_match* set, and still send plain text traffic to the same cluster. // + // This field can be used to specify custom transport socket configurations for health + // checks by adding matching key/value pairs in a health check's + // :ref:`transport socket match criteria ` field. + // // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.] repeated TransportSocketMatch transport_socket_matches = 43; @@ -581,11 +662,13 @@ message Cluster { // Soft limit on size of the cluster’s connections read and write buffers. If // unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 + [(udpa.annotations.security).configure_for_untrusted_upstream = true]; // The :ref:`load balancer type ` to use // when picking a host in the cluster. - LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}]; + // [#comment:TODO: Remove enum constraint :ref:`LOAD_BALANCING_POLICY_CONFIG` when implemented.] + LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true not_in: 7}]; // Setting this is required for specifying members of // :ref:`STATIC`, @@ -632,7 +715,8 @@ message Cluster { // supports prior knowledge for upstream connections. Even if TLS is used // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 // connections to happen over plain text. - core.v4alpha.Http2ProtocolOptions http2_protocol_options = 14; + core.v4alpha.Http2ProtocolOptions http2_protocol_options = 14 + [(udpa.annotations.security).configure_for_untrusted_upstream = true]; // The extension_protocol_options field is used to provide extension-specific protocol options // for upstream connections. The key should match the extension filter name, such as @@ -803,12 +887,31 @@ message Cluster { // from the LRS stream here.] core.v4alpha.ConfigSource lrs_server = 42; - // If track_timeout_budgets is true, the :ref:`timeout budget histograms - // ` will be published for each - // request. These show what percentage of a request's per try and global timeout was used. A value - // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value - // of 100 would indicate that the request took the entirety of the timeout given to it. - bool track_timeout_budgets = 47; + // Optional customization and configuration of upstream connection pool, and upstream type. + // + // Currently this field only applies for HTTP traffic but is designed for eventual use for custom + // TCP upstreams. + // + // For HTTP traffic, Envoy will generally take downstream HTTP and send it upstream as upstream + // HTTP, using the http connection pool and the codec from `http2_protocol_options` + // + // For routes where CONNECT termination is configured, Envoy will take downstream CONNECT + // requests and forward the CONNECT payload upstream over raw TCP using the tcp connection pool. + // + // The default pool used is the generic connection pool which creates the HTTP upstream for most + // HTTP requests, and the TCP upstream if CONNECT termination is configured. + // + // If users desire custom connection pool or upstream behavior, for example terminating + // CONNECT only if a custom filter indicates it is appropriate, the custom factories + // can be registered and configured here. + core.v4alpha.TypedExtensionConfig upstream_config = 48; + + // Configuration to track optional cluster stats. + TrackClusterStats track_cluster_stats = 49; + + // [#not-implemented-hide:] + // Prefetch configuration for this cluster. + PrefetchPolicy prefetch_policy = 50; } // [#not-implemented-hide:] Extensible load balancing policy configuration. @@ -871,3 +974,20 @@ message UpstreamConnectionOptions { // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. core.v4alpha.TcpKeepalive tcp_keepalive = 1; } + +message TrackClusterStats { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.TrackClusterStats"; + + // If timeout_budgets is true, the :ref:`timeout budget histograms + // ` will be published for each + // request. These show what percentage of a request's per try and global timeout was used. A value + // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value + // of 100 would indicate that the request took the entirety of the timeout given to it. + bool timeout_budgets = 1; + + // If request_response_sizes is true, then the :ref:`histograms + // ` tracking header and body sizes + // of requests and responses will be published. + bool request_response_sizes = 2; +} diff --git a/api/envoy/config/filter/udp/dns_filter/v2alpha/BUILD b/api/envoy/config/common/matcher/v3/BUILD similarity index 77% rename from api/envoy/config/filter/udp/dns_filter/v2alpha/BUILD rename to api/envoy/config/common/matcher/v3/BUILD index c6f01577c8283..c312b8eb6a613 100644 --- a/api/envoy/config/filter/udp/dns_filter/v2alpha/BUILD +++ b/api/envoy/config/common/matcher/v3/BUILD @@ -6,8 +6,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/data/dns/v2alpha:pkg", + "//envoy/config/route/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/config/common/matcher/v3/matcher.proto b/api/envoy/config/common/matcher/v3/matcher.proto new file mode 100644 index 0000000000000..d0955e7a1f8c1 --- /dev/null +++ b/api/envoy/config/common/matcher/v3/matcher.proto @@ -0,0 +1,100 @@ +syntax = "proto3"; + +package envoy.config.common.matcher.v3; + +import "envoy/config/route/v3/route_components.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.common.matcher.v3"; +option java_outer_classname = "MatcherProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Unified Matcher API] + +// Match configuration. This is a recursive structure which allows complex nested match +// configurations to be built using various logical operators. +// [#next-free-field: 11] +message MatchPredicate { + // A set of match configurations used for logical operations. + message MatchSet { + // The list of rules that make up the set. + repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; + } + + oneof rule { + option (validate.required) = true; + + // A set that describes a logical OR. If any member of the set matches, the match configuration + // matches. + MatchSet or_match = 1; + + // A set that describes a logical AND. If all members of the set match, the match configuration + // matches. + MatchSet and_match = 2; + + // A negation match. The match configuration will match if the negated match condition matches. + MatchPredicate not_match = 3; + + // The match configuration will always match. + bool any_match = 4 [(validate.rules).bool = {const: true}]; + + // HTTP request headers match configuration. + HttpHeadersMatch http_request_headers_match = 5; + + // HTTP request trailers match configuration. + HttpHeadersMatch http_request_trailers_match = 6; + + // HTTP response headers match configuration. + HttpHeadersMatch http_response_headers_match = 7; + + // HTTP response trailers match configuration. + HttpHeadersMatch http_response_trailers_match = 8; + + // HTTP request generic body match configuration. + HttpGenericBodyMatch http_request_generic_body_match = 9; + + // HTTP response generic body match configuration. + HttpGenericBodyMatch http_response_generic_body_match = 10; + } +} + +// HTTP headers match configuration. +message HttpHeadersMatch { + // HTTP headers to match. + repeated route.v3.HeaderMatcher headers = 1; +} + +// HTTP generic body match configuration. +// List of text strings and hex strings to be located in HTTP body. +// All specified strings must be found in the HTTP body for positive match. +// The search may be limited to specified number of bytes from the body start. +// +// .. attention:: +// +// Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. +// If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified +// to scan only part of the http body. +message HttpGenericBodyMatch { + message GenericTextMatch { + oneof rule { + option (validate.required) = true; + + // Text string to be located in HTTP body. + string string_match = 1; + + // Sequence of bytes to be located in HTTP body. + bytes binary_match = 2; + } + } + + // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). + uint32 bytes_limit = 1; + + // List of patterns to match. + repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}]; +} diff --git a/api/envoy/config/common/matcher/v4alpha/BUILD b/api/envoy/config/common/matcher/v4alpha/BUILD new file mode 100644 index 0000000000000..7028ce1a2aea3 --- /dev/null +++ b/api/envoy/config/common/matcher/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/common/matcher/v3:pkg", + "//envoy/config/route/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/config/common/matcher/v4alpha/matcher.proto b/api/envoy/config/common/matcher/v4alpha/matcher.proto new file mode 100644 index 0000000000000..685ae03a1878f --- /dev/null +++ b/api/envoy/config/common/matcher/v4alpha/matcher.proto @@ -0,0 +1,114 @@ +syntax = "proto3"; + +package envoy.config.common.matcher.v4alpha; + +import "envoy/config/route/v4alpha/route_components.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.common.matcher.v4alpha"; +option java_outer_classname = "MatcherProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Unified Matcher API] + +// Match configuration. This is a recursive structure which allows complex nested match +// configurations to be built using various logical operators. +// [#next-free-field: 11] +message MatchPredicate { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.common.matcher.v3.MatchPredicate"; + + // A set of match configurations used for logical operations. + message MatchSet { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.common.matcher.v3.MatchPredicate.MatchSet"; + + // The list of rules that make up the set. + repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; + } + + oneof rule { + option (validate.required) = true; + + // A set that describes a logical OR. If any member of the set matches, the match configuration + // matches. + MatchSet or_match = 1; + + // A set that describes a logical AND. If all members of the set match, the match configuration + // matches. + MatchSet and_match = 2; + + // A negation match. The match configuration will match if the negated match condition matches. + MatchPredicate not_match = 3; + + // The match configuration will always match. + bool any_match = 4 [(validate.rules).bool = {const: true}]; + + // HTTP request headers match configuration. + HttpHeadersMatch http_request_headers_match = 5; + + // HTTP request trailers match configuration. + HttpHeadersMatch http_request_trailers_match = 6; + + // HTTP response headers match configuration. + HttpHeadersMatch http_response_headers_match = 7; + + // HTTP response trailers match configuration. + HttpHeadersMatch http_response_trailers_match = 8; + + // HTTP request generic body match configuration. + HttpGenericBodyMatch http_request_generic_body_match = 9; + + // HTTP response generic body match configuration. + HttpGenericBodyMatch http_response_generic_body_match = 10; + } +} + +// HTTP headers match configuration. +message HttpHeadersMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.common.matcher.v3.HttpHeadersMatch"; + + // HTTP headers to match. + repeated route.v4alpha.HeaderMatcher headers = 1; +} + +// HTTP generic body match configuration. +// List of text strings and hex strings to be located in HTTP body. +// All specified strings must be found in the HTTP body for positive match. +// The search may be limited to specified number of bytes from the body start. +// +// .. attention:: +// +// Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. +// If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified +// to scan only part of the http body. +message HttpGenericBodyMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.common.matcher.v3.HttpGenericBodyMatch"; + + message GenericTextMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.common.matcher.v3.HttpGenericBodyMatch.GenericTextMatch"; + + oneof rule { + option (validate.required) = true; + + // Text string to be located in HTTP body. + string string_match = 1; + + // Sequence of bytes to be located in HTTP body. + bytes binary_match = 2; + } + } + + // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). + uint32 bytes_limit = 1; + + // List of patterns to match. + repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}]; +} diff --git a/api/envoy/config/core/v3/BUILD b/api/envoy/config/core/v3/BUILD index e52b984a61c73..60461220c20c4 100644 --- a/api/envoy/config/core/v3/BUILD +++ b/api/envoy/config/core/v3/BUILD @@ -11,5 +11,6 @@ api_proto_package( "//envoy/type/matcher/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/api/envoy/config/core/v3/base.proto b/api/envoy/config/core/v3/base.proto index b8ce5bff4bd55..4509c16625679 100644 --- a/api/envoy/config/core/v3/base.proto +++ b/api/envoy/config/core/v3/base.proto @@ -13,6 +13,7 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -94,7 +95,7 @@ message BuildVersion { type.v3.SemanticVersion version = 1; // Free-form build information. - // Envoy defines several well known keys in the source/common/common/version.h file + // Envoy defines several well known keys in the source/common/version/version.h file google.protobuf.Struct metadata = 2; } @@ -332,7 +333,8 @@ message RetryPolicy { // Specifies the allowed number of retries. This parameter is optional and // defaults to 1. - google.protobuf.UInt32Value num_retries = 2; + google.protobuf.UInt32Value num_retries = 2 + [(udpa.annotations.field_migrate).rename = "max_retries"]; } // The message specifies how to fetch data from remote and how to verify it. diff --git a/api/envoy/config/core/v3/config_source.proto b/api/envoy/config/core/v3/config_source.proto index b56e06e6de4fd..72837bb3bee10 100644 --- a/api/envoy/config/core/v3/config_source.proto +++ b/api/envoy/config/core/v3/config_source.proto @@ -7,6 +7,8 @@ import "envoy/config/core/v3/grpc_service.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/authority.proto"; + import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -19,7 +21,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Configuration sources] -// xDS API version. This is used to describe both resource and transport +// xDS API and non-xDS services version. This is used to describe both resource and transport // protocol versions (in distinct configuration fields). enum ApiVersion { // When not specified, we assume v2, to ease migration to Envoy's stable API @@ -52,17 +54,23 @@ message ApiConfigSource { // the v2 protos is used. REST = 1; - // gRPC v2 API. + // SotW gRPC service. GRPC = 2; // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state // with every update, the xDS server only sends what has changed since the last update. - // - // DELTA_GRPC is not yet entirely implemented! Initially, only CDS is available. - // Do not use for other xDSes. - // [#comment:TODO(fredlas) update/remove this warning when appropriate.] DELTA_GRPC = 3; + + // SotW xDS gRPC with ADS. All resources which resolve to this configuration source will be + // multiplexed on a single connection to an ADS endpoint. + // [#not-implemented-hide:] + AGGREGATED_GRPC = 5; + + // Delta xDS gRPC with ADS. All resources which resolve to this configuration source will be + // multiplexed on a single connection to an ADS endpoint. + // [#not-implemented-hide:] + AGGREGATED_DELTA_GRPC = 6; } // API type (gRPC, REST, delta gRPC) @@ -114,6 +122,10 @@ message AggregatedConfigSource { // specify that other data can be obtained from the same server. message SelfConfigSource { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.SelfConfigSource"; + + // API version for xDS transport protocol. This describes the xDS gRPC/REST + // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. + ApiVersion transport_api_version = 1 [(validate.rules).enum = {defined_only: true}]; } // Rate Limit settings to be applied for discovery requests made by Envoy. @@ -136,10 +148,17 @@ message RateLimitSettings { // ` etc. may either be sourced from the // filesystem or from an xDS API source. Filesystem configs are watched with // inotify for updates. -// [#next-free-field: 7] +// [#next-free-field: 8] message ConfigSource { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.ConfigSource"; + // Authorities that this config source may be used for. An authority specified + // in a *udpa.core.v1.ResourceLocator* is resolved to a *ConfigSource* prior + // to configuration fetch. This field provides the association between + // authority name and configuration source. + // [#not-implemented-hide:] + repeated udpa.core.v1.Authority authorities = 7; + oneof config_source_specifier { option (validate.required) = true; diff --git a/api/envoy/config/core/v3/extension.proto b/api/envoy/config/core/v3/extension.proto new file mode 100644 index 0000000000000..ba66da6a8e363 --- /dev/null +++ b/api/envoy/config/core/v3/extension.proto @@ -0,0 +1,61 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "envoy/config/core/v3/config_source.proto"; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "ExtensionProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Extension configuration] + +// Message type for extension configuration. +// [#next-major-version: revisit all existing typed_config that doesn't use this wrapper.]. +message TypedExtensionConfig { + // The name of an extension. This is not used to select the extension, instead + // it serves the role of an opaque identifier. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // The typed config for the extension. The type URL will be used to identify + // the extension. In the case that the type URL is *udpa.type.v1.TypedStruct*, + // the inner type URL of *TypedStruct* will be utilized. See the + // :ref:`extension configuration overview + // ` for further details. + google.protobuf.Any typed_config = 2 [(validate.rules).any = {required: true}]; +} + +// Configuration source specifier for a late-bound extension configuration. The +// parent resource is warmed until all the initial extension configurations are +// received, unless the flag to apply the default configuration is set. +// Subsequent extension updates are atomic on a per-worker basis. Once an +// extension configuration is applied to a request or a connection, it remains +// constant for the duration of processing. If the initial delivery of the +// extension configuration fails, due to a timeout for example, the optional +// default configuration is applied. Without a default configuration, the +// extension is disabled, until an extension configuration is received. The +// behavior of a disabled extension depends on the context. For example, a +// filter chain with a disabled extension filter rejects all incoming streams. +message ExtensionConfigSource { + ConfigSource config_source = 1 [(validate.rules).any = {required: true}]; + + // Optional default configuration to use as the initial configuration if + // there is a failure to receive the initial extension configuration or if + // `apply_default_config_without_warming` flag is set. + google.protobuf.Any default_config = 2; + + // Use the default config as the initial configuration without warming and + // waiting for the first discovery response. Requires the default configuration + // to be supplied. + bool apply_default_config_without_warming = 3; + + // A set of permitted extension type URLs. Extension configuration updates are rejected + // if they do not match any type URL in the set. + repeated string type_urls = 4 [(validate.rules).repeated = {min_items: 1}]; +} diff --git a/api/envoy/config/core/v3/grpc_service.proto b/api/envoy/config/core/v3/grpc_service.proto index 8719652a6bbe8..967c694d2bc4a 100644 --- a/api/envoy/config/core/v3/grpc_service.proto +++ b/api/envoy/config/core/v3/grpc_service.proto @@ -8,6 +8,7 @@ import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; @@ -35,9 +36,15 @@ message GrpcService { // in the :ref:`Cluster ` :ref:`transport_socket // `. string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The `:authority` header in the grpc request. If this field is not set, the authority header value will be `cluster_name`. + // Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster. + string authority = 2 + [(validate.rules).string = + {min_bytes: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}]; } - // [#next-free-field: 7] + // [#next-free-field: 9] message GoogleGrpc { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcService.GoogleGrpc"; @@ -202,6 +209,24 @@ message GrpcService { } } + // Channel arguments. + message ChannelArgs { + message Value { + // Pointer values are not supported, since they don't make any sense when + // delivered via the API. + oneof value_specifier { + option (validate.required) = true; + + string string_value = 1; + + int64 int_value = 2; + } + } + + // See grpc_types.h GRPC_ARG #defines for keys that work here. + map args = 1; + } + // The target URI when using the `Google C++ gRPC client // `_. SSL credentials will be supplied in // :ref:`channel_credentials `. @@ -232,6 +257,13 @@ message GrpcService { // Additional configuration for site-specific customizations of the Google // gRPC library. google.protobuf.Struct config = 6; + + // How many bytes each stream can buffer internally. + // If not set an implementation defined default is applied (1MiB). + google.protobuf.UInt32Value per_stream_buffer_limit_bytes = 7; + + // Custom channels args. + ChannelArgs channel_args = 8; } reserved 4; diff --git a/api/envoy/config/core/v3/health_check.proto b/api/envoy/config/core/v3/health_check.proto index f4ef02e0f9666..c6b4acfa937ac 100644 --- a/api/envoy/config/core/v3/health_check.proto +++ b/api/envoy/config/core/v3/health_check.proto @@ -54,7 +54,7 @@ enum HealthStatus { DEGRADED = 5; } -// [#next-free-field: 23] +// [#next-free-field: 24] message HealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HealthCheck"; @@ -323,4 +323,36 @@ message HealthCheck { // This allows overriding the cluster TLS settings, just for health check connections. TlsOptions tls_options = 21; + + // Optional key/value pairs that will be used to match a transport socket from those specified in the cluster's + // :ref:`tranport socket matches `. + // For example, the following match criteria + // + // .. code-block:: yaml + // + // transport_socket_match_criteria: + // useMTLS: true + // + // Will match the following :ref:`cluster socket match ` + // + // .. code-block:: yaml + // + // transport_socket_matches: + // - name: "useMTLS" + // match: + // useMTLS: true + // transport_socket: + // name: envoy.transport_sockets.tls + // config: { ... } # tls socket configuration + // + // If this field is set, then for health checks it will supersede an entry of *envoy.transport_socket* in the + // :ref:`LbEndpoint.Metadata `. + // This allows using different transport socket capabilities for health checking versus proxying to the + // endpoint. + // + // If the key/values pairs specified do not match any + // :ref:`transport socket matches `, + // the cluster's :ref:`transport socket ` + // will be used for health check socket configuration. + google.protobuf.Struct transport_socket_match_criteria = 23; } diff --git a/api/envoy/config/core/v3/protocol.proto b/api/envoy/config/core/v3/protocol.proto index 400b0dd95a940..0ab6289e9659d 100644 --- a/api/envoy/config/core/v3/protocol.proto +++ b/api/envoy/config/core/v3/protocol.proto @@ -92,8 +92,6 @@ message HttpProtocolOptions { // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be // reset independent of any other timeouts. If not specified, this value is not set. - // The current implementation implements this timeout on downstream connections only. - // [#comment:TODO(shikugawa): add this functionality to upstream.] google.protobuf.Duration max_stream_duration = 4; // Action to take when a client request with a header name containing underscore characters is received. @@ -161,7 +159,7 @@ message Http1ProtocolOptions { bool enable_trailers = 5; } -// [#next-free-field: 14] +// [#next-free-field: 15] message Http2ProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Http2ProtocolOptions"; @@ -174,7 +172,7 @@ message Http2ProtocolOptions { // The 16 bit parameter identifier. google.protobuf.UInt32Value identifier = 1 [ - (validate.rules).uint32 = {lte: 65536 gte: 1}, + (validate.rules).uint32 = {lte: 65535 gte: 0}, (validate.rules).message = {required: true} ]; @@ -282,8 +280,25 @@ message Http2ProtocolOptions { // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, // when this option is enabled, only the offending stream is terminated. // + // This is overridden by HCM :ref:`stream_error_on_invalid_http_messaging + // ` + // iff present. + // + // This is deprecated in favor of :ref:`override_stream_error_on_invalid_http_message + // ` + // + // See `RFC7540, sec. 8.1 `_ for details. + bool stream_error_on_invalid_http_messaging = 12 [deprecated = true]; + + // Allows invalid HTTP messaging and headers. When this option is disabled (default), then + // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, + // when this option is enabled, only the offending stream is terminated. + // + // This overrides any HCM :ref:`stream_error_on_invalid_http_messaging + // ` + // // See `RFC7540, sec. 8.1 `_ for details. - bool stream_error_on_invalid_http_messaging = 12; + google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 14; // [#not-implemented-hide:] // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions: diff --git a/api/envoy/config/core/v3/proxy_protocol.proto b/api/envoy/config/core/v3/proxy_protocol.proto new file mode 100644 index 0000000000000..225a8971f23a6 --- /dev/null +++ b/api/envoy/config/core/v3/proxy_protocol.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "ProxyProtocolProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Proxy Protocol] + +message ProxyProtocolConfig { + enum Version { + // PROXY protocol version 1. Human readable format. + V1 = 0; + + // PROXY protocol version 2. Binary format. + V2 = 1; + } + + // The PROXY protocol version to use. See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details + Version version = 1; +} diff --git a/api/envoy/config/core/v3/substitution_format_string.proto b/api/envoy/config/core/v3/substitution_format_string.proto new file mode 100644 index 0000000000000..7537a1178b645 --- /dev/null +++ b/api/envoy/config/core/v3/substitution_format_string.proto @@ -0,0 +1,61 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "google/protobuf/struct.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "SubstitutionFormatStringProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Substitution format string] + +// Configuration to use multiple :ref:`command operators ` +// to generate a new string in either plain text or JSON format. +message SubstitutionFormatString { + oneof format { + option (validate.required) = true; + + // Specify a format with command operators to form a text string. + // Its details is described in :ref:`format string`. + // + // .. code-block:: + // + // text_format: %LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% + // + // The following plain text will be created: + // + // .. code-block:: + // + // upstream connect error:204:path=/foo + // + string text_format = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Specify a format with command operators to form a JSON string. + // Its details is described in :ref:`format dictionary`. + // Values are rendered as strings, numbers, or boolean values as appropriate. + // Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA). + // See the documentation for a specific command operator for details. + // + // .. code-block:: + // + // json_format: + // status: %RESPONSE_CODE% + // message: %LOCAL_REPLY_BODY% + // + // The following JSON object would be created: + // + // .. code-block:: json + // + // { + // "status": 500, + // "message": "My error message" + // } + // + google.protobuf.Struct json_format = 2 [(validate.rules).message = {required: true}]; + } +} diff --git a/api/envoy/config/core/v4alpha/BUILD b/api/envoy/config/core/v4alpha/BUILD index aeac38ac2833c..a4aa06ce9b44b 100644 --- a/api/envoy/config/core/v4alpha/BUILD +++ b/api/envoy/config/core/v4alpha/BUILD @@ -8,8 +8,9 @@ api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/config/core/v3:pkg", - "//envoy/type/matcher/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/api/envoy/config/core/v4alpha/address.proto b/api/envoy/config/core/v4alpha/address.proto index a2e6070103aef..ffade4bed75b4 100644 --- a/api/envoy/config/core/v4alpha/address.proto +++ b/api/envoy/config/core/v4alpha/address.proto @@ -45,7 +45,7 @@ message SocketAddress { // to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::`` // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented: // It is possible to distinguish a Listener address via the prefix/suffix matching - // in :ref:`FilterChainMatch `.] When used + // in :ref:`FilterChainMatch `.] When used // within an upstream :ref:`BindConfig `, the address // controls the source address of outbound connections. For :ref:`clusters // `, the cluster type determines whether the diff --git a/api/envoy/config/core/v4alpha/base.proto b/api/envoy/config/core/v4alpha/base.proto index dbc3c31e40e44..d7b5fd5836ff9 100644 --- a/api/envoy/config/core/v4alpha/base.proto +++ b/api/envoy/config/core/v4alpha/base.proto @@ -94,7 +94,7 @@ message BuildVersion { type.v3.SemanticVersion version = 1; // Free-form build information. - // Envoy defines several well known keys in the source/common/common/version.h file + // Envoy defines several well known keys in the source/common/version/version.h file google.protobuf.Struct metadata = 2; } @@ -332,7 +332,7 @@ message RetryPolicy { // Specifies the allowed number of retries. This parameter is optional and // defaults to 1. - google.protobuf.UInt32Value num_retries = 2; + google.protobuf.UInt32Value max_retries = 2; } // The message specifies how to fetch data from remote and how to verify it. diff --git a/api/envoy/config/core/v4alpha/config_source.proto b/api/envoy/config/core/v4alpha/config_source.proto index be600bd0096ed..72b4f03574396 100644 --- a/api/envoy/config/core/v4alpha/config_source.proto +++ b/api/envoy/config/core/v4alpha/config_source.proto @@ -7,6 +7,8 @@ import "envoy/config/core/v4alpha/grpc_service.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/authority.proto"; + import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -19,7 +21,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // [#protodoc-title: Configuration sources] -// xDS API version. This is used to describe both resource and transport +// xDS API and non-xDS services version. This is used to describe both resource and transport // protocol versions (in distinct configuration fields). enum ApiVersion { // When not specified, we assume v2, to ease migration to Envoy's stable API @@ -53,17 +55,23 @@ message ApiConfigSource { // the v2 protos is used. REST = 1; - // gRPC v2 API. + // SotW gRPC service. GRPC = 2; // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state // with every update, the xDS server only sends what has changed since the last update. - // - // DELTA_GRPC is not yet entirely implemented! Initially, only CDS is available. - // Do not use for other xDSes. - // [#comment:TODO(fredlas) update/remove this warning when appropriate.] DELTA_GRPC = 3; + + // SotW xDS gRPC with ADS. All resources which resolve to this configuration source will be + // multiplexed on a single connection to an ADS endpoint. + // [#not-implemented-hide:] + AGGREGATED_GRPC = 5; + + // Delta xDS gRPC with ADS. All resources which resolve to this configuration source will be + // multiplexed on a single connection to an ADS endpoint. + // [#not-implemented-hide:] + AGGREGATED_DELTA_GRPC = 6; } // API type (gRPC, REST, delta gRPC) @@ -116,6 +124,10 @@ message AggregatedConfigSource { message SelfConfigSource { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.SelfConfigSource"; + + // API version for xDS transport protocol. This describes the xDS gRPC/REST + // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. + ApiVersion transport_api_version = 1 [(validate.rules).enum = {defined_only: true}]; } // Rate Limit settings to be applied for discovery requests made by Envoy. @@ -138,10 +150,17 @@ message RateLimitSettings { // ` etc. may either be sourced from the // filesystem or from an xDS API source. Filesystem configs are watched with // inotify for updates. -// [#next-free-field: 7] +// [#next-free-field: 8] message ConfigSource { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.ConfigSource"; + // Authorities that this config source may be used for. An authority specified + // in a *udpa.core.v1.ResourceLocator* is resolved to a *ConfigSource* prior + // to configuration fetch. This field provides the association between + // authority name and configuration source. + // [#not-implemented-hide:] + repeated udpa.core.v1.Authority authorities = 7; + oneof config_source_specifier { option (validate.required) = true; diff --git a/api/envoy/config/core/v4alpha/extension.proto b/api/envoy/config/core/v4alpha/extension.proto new file mode 100644 index 0000000000000..4de107580d072 --- /dev/null +++ b/api/envoy/config/core/v4alpha/extension.proto @@ -0,0 +1,68 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "envoy/config/core/v4alpha/config_source.proto"; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "ExtensionProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Extension configuration] + +// Message type for extension configuration. +// [#next-major-version: revisit all existing typed_config that doesn't use this wrapper.]. +message TypedExtensionConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.TypedExtensionConfig"; + + // The name of an extension. This is not used to select the extension, instead + // it serves the role of an opaque identifier. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // The typed config for the extension. The type URL will be used to identify + // the extension. In the case that the type URL is *udpa.type.v1.TypedStruct*, + // the inner type URL of *TypedStruct* will be utilized. See the + // :ref:`extension configuration overview + // ` for further details. + google.protobuf.Any typed_config = 2 [(validate.rules).any = {required: true}]; +} + +// Configuration source specifier for a late-bound extension configuration. The +// parent resource is warmed until all the initial extension configurations are +// received, unless the flag to apply the default configuration is set. +// Subsequent extension updates are atomic on a per-worker basis. Once an +// extension configuration is applied to a request or a connection, it remains +// constant for the duration of processing. If the initial delivery of the +// extension configuration fails, due to a timeout for example, the optional +// default configuration is applied. Without a default configuration, the +// extension is disabled, until an extension configuration is received. The +// behavior of a disabled extension depends on the context. For example, a +// filter chain with a disabled extension filter rejects all incoming streams. +message ExtensionConfigSource { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.ExtensionConfigSource"; + + ConfigSource config_source = 1 [(validate.rules).any = {required: true}]; + + // Optional default configuration to use as the initial configuration if + // there is a failure to receive the initial extension configuration or if + // `apply_default_config_without_warming` flag is set. + google.protobuf.Any default_config = 2; + + // Use the default config as the initial configuration without warming and + // waiting for the first discovery response. Requires the default configuration + // to be supplied. + bool apply_default_config_without_warming = 3; + + // A set of permitted extension type URLs. Extension configuration updates are rejected + // if they do not match any type URL in the set. + repeated string type_urls = 4 [(validate.rules).repeated = {min_items: 1}]; +} diff --git a/api/envoy/config/core/v4alpha/grpc_service.proto b/api/envoy/config/core/v4alpha/grpc_service.proto index 64bbc6b5f0778..51f11fa1f3467 100644 --- a/api/envoy/config/core/v4alpha/grpc_service.proto +++ b/api/envoy/config/core/v4alpha/grpc_service.proto @@ -8,6 +8,7 @@ import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; @@ -35,9 +36,15 @@ message GrpcService { // in the :ref:`Cluster ` :ref:`transport_socket // `. string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The `:authority` header in the grpc request. If this field is not set, the authority header value will be `cluster_name`. + // Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster. + string authority = 2 + [(validate.rules).string = + {min_bytes: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}]; } - // [#next-free-field: 7] + // [#next-free-field: 9] message GoogleGrpc { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.GrpcService.GoogleGrpc"; @@ -202,6 +209,30 @@ message GrpcService { } } + // Channel arguments. + message ChannelArgs { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs"; + + message Value { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.Value"; + + // Pointer values are not supported, since they don't make any sense when + // delivered via the API. + oneof value_specifier { + option (validate.required) = true; + + string string_value = 1; + + int64 int_value = 2; + } + } + + // See grpc_types.h GRPC_ARG #defines for keys that work here. + map args = 1; + } + // The target URI when using the `Google C++ gRPC client // `_. SSL credentials will be supplied in // :ref:`channel_credentials `. @@ -232,6 +263,13 @@ message GrpcService { // Additional configuration for site-specific customizations of the Google // gRPC library. google.protobuf.Struct config = 6; + + // How many bytes each stream can buffer internally. + // If not set an implementation defined default is applied (1MiB). + google.protobuf.UInt32Value per_stream_buffer_limit_bytes = 7; + + // Custom channels args. + ChannelArgs channel_args = 8; } reserved 4; diff --git a/api/envoy/config/core/v4alpha/health_check.proto b/api/envoy/config/core/v4alpha/health_check.proto index 1975c309a7ded..39badc334b01c 100644 --- a/api/envoy/config/core/v4alpha/health_check.proto +++ b/api/envoy/config/core/v4alpha/health_check.proto @@ -4,7 +4,7 @@ package envoy.config.core.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/event_service_config.proto"; -import "envoy/type/matcher/v3/string.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; import "envoy/type/v3/http.proto"; import "envoy/type/v3/range.proto"; @@ -54,7 +54,7 @@ enum HealthStatus { DEGRADED = 5; } -// [#next-free-field: 23] +// [#next-free-field: 24] message HealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HealthCheck"; @@ -125,9 +125,9 @@ message HealthCheck { // An optional service name parameter which is used to validate the identity of // the health checked cluster using a :ref:`StringMatcher - // `. See the :ref:`architecture overview + // `. See the :ref:`architecture overview // ` for more information. - type.matcher.v3.StringMatcher service_name_matcher = 11; + type.matcher.v4alpha.StringMatcher service_name_matcher = 11; } message TcpHealthCheck { @@ -206,7 +206,7 @@ message HealthCheck { // Specifies the ALPN protocols for health check connections. This is useful if the // corresponding upstream is using ALPN-based :ref:`FilterChainMatch - // ` along with different protocols for health checks + // ` along with different protocols for health checks // versus data connections. If empty, no ALPN protocols will be set on health check connections. repeated string alpn_protocols = 1; } @@ -323,4 +323,36 @@ message HealthCheck { // This allows overriding the cluster TLS settings, just for health check connections. TlsOptions tls_options = 21; + + // Optional key/value pairs that will be used to match a transport socket from those specified in the cluster's + // :ref:`tranport socket matches `. + // For example, the following match criteria + // + // .. code-block:: yaml + // + // transport_socket_match_criteria: + // useMTLS: true + // + // Will match the following :ref:`cluster socket match ` + // + // .. code-block:: yaml + // + // transport_socket_matches: + // - name: "useMTLS" + // match: + // useMTLS: true + // transport_socket: + // name: envoy.transport_sockets.tls + // config: { ... } # tls socket configuration + // + // If this field is set, then for health checks it will supersede an entry of *envoy.transport_socket* in the + // :ref:`LbEndpoint.Metadata `. + // This allows using different transport socket capabilities for health checking versus proxying to the + // endpoint. + // + // If the key/values pairs specified do not match any + // :ref:`transport socket matches `, + // the cluster's :ref:`transport socket ` + // will be used for health check socket configuration. + google.protobuf.Struct transport_socket_match_criteria = 23; } diff --git a/api/envoy/config/core/v4alpha/protocol.proto b/api/envoy/config/core/v4alpha/protocol.proto index dcb205444524f..955c29335a3f4 100644 --- a/api/envoy/config/core/v4alpha/protocol.proto +++ b/api/envoy/config/core/v4alpha/protocol.proto @@ -92,8 +92,6 @@ message HttpProtocolOptions { // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be // reset independent of any other timeouts. If not specified, this value is not set. - // The current implementation implements this timeout on downstream connections only. - // [#comment:TODO(shikugawa): add this functionality to upstream.] google.protobuf.Duration max_stream_duration = 4; // Action to take when a client request with a header name containing underscore characters is received. @@ -161,7 +159,7 @@ message Http1ProtocolOptions { bool enable_trailers = 5; } -// [#next-free-field: 14] +// [#next-free-field: 15] message Http2ProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Http2ProtocolOptions"; @@ -174,7 +172,7 @@ message Http2ProtocolOptions { // The 16 bit parameter identifier. google.protobuf.UInt32Value identifier = 1 [ - (validate.rules).uint32 = {lte: 65536 gte: 1}, + (validate.rules).uint32 = {lte: 65535 gte: 0}, (validate.rules).message = {required: true} ]; @@ -182,6 +180,10 @@ message Http2ProtocolOptions { google.protobuf.UInt32Value value = 2 [(validate.rules).message = {required: true}]; } + reserved 12; + + reserved "stream_error_on_invalid_http_messaging"; + // `Maximum table size `_ // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values // range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header @@ -282,8 +284,11 @@ message Http2ProtocolOptions { // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, // when this option is enabled, only the offending stream is terminated. // + // This overrides any HCM :ref:`stream_error_on_invalid_http_messaging + // ` + // // See `RFC7540, sec. 8.1 `_ for details. - bool stream_error_on_invalid_http_messaging = 12; + google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 14; // [#not-implemented-hide:] // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions: diff --git a/api/envoy/config/core/v4alpha/proxy_protocol.proto b/api/envoy/config/core/v4alpha/proxy_protocol.proto new file mode 100644 index 0000000000000..c7a8d1f454ddf --- /dev/null +++ b/api/envoy/config/core/v4alpha/proxy_protocol.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "ProxyProtocolProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Proxy Protocol] + +message ProxyProtocolConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.ProxyProtocolConfig"; + + enum Version { + // PROXY protocol version 1. Human readable format. + V1 = 0; + + // PROXY protocol version 2. Binary format. + V2 = 1; + } + + // The PROXY protocol version to use. See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details + Version version = 1; +} diff --git a/api/envoy/config/core/v4alpha/substitution_format_string.proto b/api/envoy/config/core/v4alpha/substitution_format_string.proto new file mode 100644 index 0000000000000..2d3e0a21b7905 --- /dev/null +++ b/api/envoy/config/core/v4alpha/substitution_format_string.proto @@ -0,0 +1,65 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "google/protobuf/struct.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "SubstitutionFormatStringProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Substitution format string] + +// Configuration to use multiple :ref:`command operators ` +// to generate a new string in either plain text or JSON format. +message SubstitutionFormatString { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.SubstitutionFormatString"; + + oneof format { + option (validate.required) = true; + + // Specify a format with command operators to form a text string. + // Its details is described in :ref:`format string`. + // + // .. code-block:: + // + // text_format: %LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% + // + // The following plain text will be created: + // + // .. code-block:: + // + // upstream connect error:204:path=/foo + // + string text_format = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Specify a format with command operators to form a JSON string. + // Its details is described in :ref:`format dictionary`. + // Values are rendered as strings, numbers, or boolean values as appropriate. + // Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA). + // See the documentation for a specific command operator for details. + // + // .. code-block:: + // + // json_format: + // status: %RESPONSE_CODE% + // message: %LOCAL_REPLY_BODY% + // + // The following JSON object would be created: + // + // .. code-block:: json + // + // { + // "status": 500, + // "message": "My error message" + // } + // + google.protobuf.Struct json_format = 2 [(validate.rules).message = {required: true}]; + } +} diff --git a/api/envoy/config/endpoint/v3/endpoint.proto b/api/envoy/config/endpoint/v3/endpoint.proto index 008b4ddc4993f..e58c327156cf5 100644 --- a/api/envoy/config/endpoint/v3/endpoint.proto +++ b/api/envoy/config/endpoint/v3/endpoint.proto @@ -40,6 +40,7 @@ message ClusterLoadAssignment { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.ClusterLoadAssignment.Policy"; + // [#not-implemented-hide:] message DropOverload { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.ClusterLoadAssignment.Policy.DropOverload"; @@ -74,11 +75,12 @@ message ClusterLoadAssignment { // "throttle"_drop = 60% // "lb"_drop = 20% // 50% of the remaining 'actual' load, which is 40%. // actual_outgoing_load = 20% // remaining after applying all categories. + // [#not-implemented-hide:] repeated DropOverload drop_overloads = 2; // Priority levels and localities are considered overprovisioned with this // factor (in percentage). This means that we don't consider a priority - // level or locality unhealthy until the percentage of healthy hosts + // level or locality unhealthy until the fraction of healthy hosts // multiplied by the overprovisioning factor drops below 100. // With the default value 140(1.4), Envoy doesn't consider a priority level // or a locality unhealthy until their percentage of healthy hosts drops diff --git a/api/envoy/config/endpoint/v3/load_report.proto b/api/envoy/config/endpoint/v3/load_report.proto index 01eb7b12cf1aa..3f067737ec25d 100644 --- a/api/envoy/config/endpoint/v3/load_report.proto +++ b/api/envoy/config/endpoint/v3/load_report.proto @@ -17,11 +17,11 @@ option java_outer_classname = "LoadReportProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; -// These are stats Envoy reports to GLB every so often. Report frequency is -// defined by +// [#protodoc-title: Load Report] + +// These are stats Envoy reports to the management server at a frequency defined by // :ref:`LoadStatsResponse.load_reporting_interval`. // Stats per upstream region/zone and optionally per subzone. -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. // [#next-free-field: 9] message UpstreamLocalityStats { option (udpa.annotations.versioning).previous_message_type = @@ -60,7 +60,6 @@ message UpstreamLocalityStats { uint32 priority = 6; } -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. // [#next-free-field: 8] message UpstreamEndpointStats { option (udpa.annotations.versioning).previous_message_type = @@ -103,7 +102,6 @@ message UpstreamEndpointStats { repeated EndpointLoadMetricStats load_metric_stats = 5; } -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. message EndpointLoadMetricStats { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.EndpointLoadMetricStats"; @@ -121,7 +119,6 @@ message EndpointLoadMetricStats { // Per cluster load stats. Envoy reports these stats a management server in a // :ref:`LoadStatsRequest` -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. // Next ID: 7 // [#next-free-field: 7] message ClusterStats { diff --git a/api/envoy/config/filter/http/cache/v2alpha/cache.proto b/api/envoy/config/filter/http/cache/v2alpha/cache.proto index a9e51cf56a103..d08b5462fd88e 100644 --- a/api/envoy/config/filter/http/cache/v2alpha/cache.proto +++ b/api/envoy/config/filter/http/cache/v2alpha/cache.proto @@ -57,7 +57,7 @@ message CacheConfig { // contents of a response, as described by // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. // - // During insertion, *allowed_vary_headers* acts as a whitelist: if a + // During insertion, *allowed_vary_headers* acts as a allowlist: if a // response's *vary* header mentions any header names that aren't in // *allowed_vary_headers*, that response will not be cached. // diff --git a/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto b/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto index a407f4628d2e4..db188a572ae09 100644 --- a/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto +++ b/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto @@ -24,7 +24,7 @@ option (udpa.annotations.file_status).package_version_status = FROZEN; // External Authorization :ref:`configuration overview `. // [#extension: envoy.filters.http.ext_authz] -// [#next-free-field: 11] +// [#next-free-field: 12] message ExtAuthz { // External authorization service configuration. oneof services { @@ -98,6 +98,15 @@ message ExtAuthz { // If this field is not specified, the filter will be enabled for all requests. api.v2.core.RuntimeFractionalPercent filter_enabled = 9; + // Specifies whether to deny the requests, when the filter is disabled. + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to determine whether to deny request for + // filter protected path at filter disabling. If filter is disabled in + // typed_per_filter_config for the path, requests will not be denied. + // + // If this field is not specified, all requests will be allowed when disabled. + api.v2.core.RuntimeFeatureFlag deny_at_disable = 11; + // Specifies if the peer certificate is sent to the external service. // // When this field is true, Envoy will include the peer X.509 certificate, if available, in the diff --git a/api/envoy/config/filter/http/health_check/v2/health_check.proto b/api/envoy/config/filter/http/health_check/v2/health_check.proto index d7f6da8c82d43..7f2a486b26188 100644 --- a/api/envoy/config/filter/http/health_check/v2/health_check.proto +++ b/api/envoy/config/filter/http/health_check/v2/health_check.proto @@ -37,6 +37,11 @@ message HealthCheck { // If operating in non-pass-through mode, specifies a set of upstream cluster // names and the minimum percentage of servers in each of those clusters that // must be healthy or degraded in order for the filter to return a 200. + // + // .. note:: + // + // This value is interpreted as an integer by truncating, so 12.50% will be calculated + // as if it were 12%. map cluster_min_healthy_percentages = 4; // Specifies a set of health check request headers to match on. The health check filter will diff --git a/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto b/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto index d1f459078f20e..4da6d97ca2992 100644 --- a/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto +++ b/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto @@ -41,6 +41,6 @@ message ClientSSLAuth { // An optional list of IP address and subnet masks that should be white // listed for access by the filter. If no list is provided, there is no - // IP white list. + // IP allowlist. repeated api.v2.core.CidrRange ip_white_list = 4; } diff --git a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto index c78e69b2ae309..06b13acb2f632 100644 --- a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -232,7 +232,7 @@ message HttpConnectionManager { // Determines if upgrades are enabled or disabled by default. Defaults to true. // This can be overridden on a per-route basis with :ref:`cluster // ` as documented in the - // :ref:`upgrade documentation `. + // :ref:`upgrade documentation `. google.protobuf.BoolValue enabled = 3; } @@ -262,8 +262,8 @@ message HttpConnectionManager { } // A list of individual HTTP filters that make up the filter chain for - // requests made to the connection manager. Order matters as the filters are - // processed sequentially as request events happen. + // requests made to the connection manager. :ref:`Order matters ` + // as the filters are processed sequentially as request events happen. repeated HttpFilter http_filters = 5; // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` @@ -332,6 +332,16 @@ message HttpConnectionManager { // is terminated with a 408 Request Timeout error code if no upstream response // header has been received, otherwise a stream reset occurs. // + // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough + // window to write any remaining stream data once the entirety of stream data (local end stream is + // true) has been buffered pending available window. In other words, this timeout defends against + // a peer that does not release enough window to completely write the stream, even though all + // data has been proxied within available flow control windows. If the timeout is hit in this + // case, the :ref:`tx_flush_timeout ` counter will be + // incremented. Note that :ref:`max_stream_duration + // ` does not apply to this corner + // case. + // // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due // to the granularity of events presented to the connection manager. For example, while receiving // very large request headers, it may be the case that there is traffic regularly arriving on the @@ -487,17 +497,17 @@ message HttpConnectionManager { // true in the future. When not specified, this value may be overridden by the // runtime variable // :ref:`http_connection_manager.normalize_path`. - // See `Normalization and Comparison ` + // See `Normalization and Comparison `_ // for details of normalization. // Note that Envoy does not perform - // `case normalization ` + // `case normalization `_ google.protobuf.BoolValue normalize_path = 30; // Determines if adjacent slashes in the path are merged into one before any processing of // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without // setting this option, incoming requests with path `//dir///file` will not match against route // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of - // `HTTP spec ` and is provided for convenience. + // `HTTP spec `_ and is provided for convenience. bool merge_slashes = 33; // The configuration of the request ID extension. This includes operations such as diff --git a/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto index caca630fd297d..948d7c349ff00 100644 --- a/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto +++ b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto @@ -34,10 +34,10 @@ message RedisProxy { // because replication is asynchronous and requires some delay. You need to ensure that your // application can tolerate stale data. enum ReadPolicy { - // Default mode. Read from the current master node. + // Default mode. Read from the current primary node. MASTER = 0; - // Read from the master, but if it is unavailable, read from replica nodes. + // Read from the primary, but if it is unavailable, read from replica nodes. PREFER_MASTER = 1; // Read from replica nodes. If multiple replica nodes are present within a shard, a random @@ -45,11 +45,11 @@ message RedisProxy { REPLICA = 2; // Read from the replica nodes (similar to REPLICA), but if all replicas are unavailable (not - // present or unhealthy), read from the master. + // present or unhealthy), read from the primary. PREFER_REPLICA = 3; - // Read from any node of the cluster. A random node is selected among the master and replicas, - // healthy nodes have precedent over unhealthy nodes. + // Read from any node of the cluster. A random node is selected among the primary and + // replicas, healthy nodes have precedent over unhealthy nodes. ANY = 4; } @@ -112,7 +112,7 @@ message RedisProxy { // count. bool enable_command_stats = 8; - // Read policy. The default is to read from the master. + // Read policy. The default is to read from the primary. ReadPolicy read_policy = 7 [(validate.rules).enum = {defined_only: true}]; } diff --git a/api/envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.proto b/api/envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.proto deleted file mode 100644 index de2608d44306c..0000000000000 --- a/api/envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.proto +++ /dev/null @@ -1,48 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.udp.dns_filter.v2alpha; - -import "envoy/api/v2/core/base.proto"; -import "envoy/data/dns/v2alpha/dns_table.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.udp.dns_filter.v2alpha"; -option java_outer_classname = "DnsFilterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filter.udp.dns_filter.v3alpha"; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: DNS Filter] -// DNS Filter :ref:`configuration overview `. -// [#extension: envoy.filters.udp_listener.dns_filter] - -// Configuration for the DNS filter. -message DnsFilterConfig { - // This message contains the configuration for the Dns Filter operating - // in a server context. This message will contain the virtual hosts and - // associated addresses with which Envoy will respond to queries - message ServerContextConfig { - oneof config_source { - option (validate.required) = true; - - // Load the configuration specified from the control plane - data.dns.v2alpha.DnsTable inline_dns_table = 1; - - // Seed the filter configuration from an external path. This source - // is a yaml formatted file that contains the DnsTable driving Envoy's - // responses to DNS queries - api.v2.core.DataSource external_dns_table = 2; - } - } - - // The stat prefix used when emitting DNS filter statistics - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // Server context configuration - ServerContextConfig server_config = 2; -} diff --git a/api/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto b/api/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto index 5079c1f0df484..06dc150d5c70b 100644 --- a/api/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto +++ b/api/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto @@ -4,13 +4,16 @@ package envoy.config.filter.udp.udp_proxy.v2alpha; import "google/protobuf/duration.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.udp.udp_proxy.v2alpha"; option java_outer_classname = "UdpProxyProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; +option (udpa.annotations.file_migrate).move_to_package = + "envoy.extensions.filters.udp.udp_proxy.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: UDP proxy] // UDP proxy :ref:`configuration overview `. diff --git a/api/envoy/config/listener/v3/BUILD b/api/envoy/config/listener/v3/BUILD index 71c151c040bcd..25a099645cce3 100644 --- a/api/envoy/config/listener/v3/BUILD +++ b/api/envoy/config/listener/v3/BUILD @@ -13,5 +13,6 @@ api_proto_package( "//envoy/config/listener/v2:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/api/envoy/config/listener/v3/listener.proto b/api/envoy/config/listener/v3/listener.proto index 473a5eb2b42b6..8c5066909caf9 100644 --- a/api/envoy/config/listener/v3/listener.proto +++ b/api/envoy/config/listener/v3/listener.proto @@ -5,6 +5,7 @@ package envoy.config.listener.v3; import "envoy/config/accesslog/v3/accesslog.proto"; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/socket_option.proto"; import "envoy/config/listener/v3/api_listener.proto"; import "envoy/config/listener/v3/listener_components.proto"; @@ -14,6 +15,9 @@ import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/collection_entry.proto"; + +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -26,7 +30,13 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Listener configuration] // Listener :ref:`configuration overview ` -// [#next-free-field: 23] +// Listener list collections. Entries are *Listener* resources or references. +// [#not-implemented-hide:] +message ListenerCollection { + udpa.core.v1.CollectionEntry entries = 1; +} + +// [#next-free-field: 24] message Listener { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Listener"; @@ -108,7 +118,8 @@ message Listener { // Soft limit on size of the listener’s new connection read and write buffers. // If unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // Listener metadata. core.v3.Metadata metadata = 6; @@ -238,4 +249,14 @@ message Listener { // Configuration for :ref:`access logs ` // emitted by this listener. repeated accesslog.v3.AccessLog access_log = 22; + + // If the protocol in the listener socket address in :ref:`protocol + // ` is :ref:`UDP + // `, this field specifies the actual udp + // writer to create, i.e. :ref:`name ` + // = "udp_default_writer" for creating a udp writer with writing in passthrough mode, + // = "udp_gso_batch_writer" for creating a udp writer with writing in batch mode. + // If not present, treat it as "udp_default_writer". + // [#not-implemented-hide:] + core.v3.TypedExtensionConfig udp_writer_config = 23; } diff --git a/api/envoy/config/listener/v3/quic_config.proto b/api/envoy/config/listener/v3/quic_config.proto index 9949da2e0d708..c024be95bacee 100644 --- a/api/envoy/config/listener/v3/quic_config.proto +++ b/api/envoy/config/listener/v3/quic_config.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.config.listener.v3; +import "envoy/config/core/v3/base.proto"; + import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; @@ -16,7 +18,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: QUIC listener Config] // Configuration specific to the QUIC protocol. -// Next id: 4 +// Next id: 5 message QuicProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.listener.QuicProtocolOptions"; @@ -32,4 +34,8 @@ message QuicProtocolOptions { // Connection timeout in milliseconds before the crypto handshake is finished. // 20000ms if not specified. google.protobuf.Duration crypto_handshake_timeout = 3; + + // Runtime flag that controls whether the listener is enabled or not. If not specified, defaults + // to enabled. + core.v3.RuntimeFeatureFlag enabled = 4; } diff --git a/api/envoy/config/listener/v3/udp_default_writer_config.proto b/api/envoy/config/listener/v3/udp_default_writer_config.proto new file mode 100644 index 0000000000000..707a66c7b5c48 --- /dev/null +++ b/api/envoy/config/listener/v3/udp_default_writer_config.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package envoy.config.listener.v3; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v3"; +option java_outer_classname = "UdpDefaultWriterConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Udp Default Writer Config] + +// [#not-implemented-hide:] +// Configuration specific to the Udp Default Writer. +message UdpDefaultWriterOptions { +} diff --git a/api/envoy/config/listener/v3/udp_gso_batch_writer_config.proto b/api/envoy/config/listener/v3/udp_gso_batch_writer_config.proto new file mode 100644 index 0000000000000..134cb6a42dd22 --- /dev/null +++ b/api/envoy/config/listener/v3/udp_gso_batch_writer_config.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package envoy.config.listener.v3; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v3"; +option java_outer_classname = "UdpGsoBatchWriterConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Udp Gso Batch Writer Config] + +// [#not-implemented-hide:] +// Configuration specific to the Udp Gso Batch Writer. +message UdpGsoBatchWriterOptions { +} diff --git a/api/envoy/config/listener/v4alpha/BUILD b/api/envoy/config/listener/v4alpha/BUILD new file mode 100644 index 0000000000000..cde02c9329192 --- /dev/null +++ b/api/envoy/config/listener/v4alpha/BUILD @@ -0,0 +1,16 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/accesslog/v4alpha:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/listener/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", + ], +) diff --git a/api/envoy/config/listener/v4alpha/api_listener.proto b/api/envoy/config/listener/v4alpha/api_listener.proto new file mode 100644 index 0000000000000..b8d076c365832 --- /dev/null +++ b/api/envoy/config/listener/v4alpha/api_listener.proto @@ -0,0 +1,32 @@ +syntax = "proto3"; + +package envoy.config.listener.v4alpha; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; +option java_outer_classname = "ApiListenerProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: API listener] + +// Describes a type of API listener, which is used in non-proxy clients. The type of API +// exposed to the non-proxy application depends on the type of API listener. +message ApiListener { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.ApiListener"; + + // The type in this field determines the type of API listener. At present, the following + // types are supported: + // envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager (HTTP) + // [#next-major-version: In the v3 API, replace this Any field with a oneof containing the + // specific config message for each type of API listener. We could not do this in v2 because + // it would have caused circular dependencies for go protos: lds.proto depends on this file, + // and http_connection_manager.proto depends on rds.proto, which is in the same directory as + // lds.proto, so lds.proto cannot depend on this file.] + google.protobuf.Any api_listener = 1; +} diff --git a/api/envoy/config/listener/v4alpha/listener.proto b/api/envoy/config/listener/v4alpha/listener.proto new file mode 100644 index 0000000000000..c188ecb244904 --- /dev/null +++ b/api/envoy/config/listener/v4alpha/listener.proto @@ -0,0 +1,265 @@ +syntax = "proto3"; + +package envoy.config.listener.v4alpha; + +import "envoy/config/accesslog/v4alpha/accesslog.proto"; +import "envoy/config/core/v4alpha/address.proto"; +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/extension.proto"; +import "envoy/config/core/v4alpha/socket_option.proto"; +import "envoy/config/listener/v4alpha/api_listener.proto"; +import "envoy/config/listener/v4alpha/listener_components.proto"; +import "envoy/config/listener/v4alpha/udp_listener_config.proto"; + +import "google/api/annotations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/core/v1/collection_entry.proto"; + +import "udpa/annotations/security.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; +option java_outer_classname = "ListenerProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Listener configuration] +// Listener :ref:`configuration overview ` + +// Listener list collections. Entries are *Listener* resources or references. +// [#not-implemented-hide:] +message ListenerCollection { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.ListenerCollection"; + + udpa.core.v1.CollectionEntry entries = 1; +} + +// [#next-free-field: 24] +message Listener { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.Listener"; + + enum DrainType { + // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check + // filter), listener removal/modification, and hot restart. + DEFAULT = 0; + + // Drain in response to listener removal/modification and hot restart. This setting does not + // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress + // and egress listeners. + MODIFY_ONLY = 1; + } + + // [#not-implemented-hide:] + message DeprecatedV1 { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.Listener.DeprecatedV1"; + + // Whether the listener should bind to the port. A listener that doesn't + // bind can only receive connections redirected from other listeners that + // set use_original_dst parameter to true. Default is true. + // + // This is deprecated in v2, all Listeners will bind to their port. An + // additional filter chain must be created for every original destination + // port this listener may redirect to in v2, with the original port + // specified in the FilterChainMatch destination_port field. + // + // [#comment:TODO(PiotrSikora): Remove this once verified that we no longer need it.] + google.protobuf.BoolValue bind_to_port = 1; + } + + // Configuration for listener connection balancing. + message ConnectionBalanceConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.Listener.ConnectionBalanceConfig"; + + // A connection balancer implementation that does exact balancing. This means that a lock is + // held during balancing so that connection counts are nearly exactly balanced between worker + // threads. This is "nearly" exact in the sense that a connection might close in parallel thus + // making the counts incorrect, but this should be rectified on the next accept. This balancer + // sacrifices accept throughput for accuracy and should be used when there are a small number of + // connections that rarely cycle (e.g., service mesh gRPC egress). + message ExactBalance { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.Listener.ConnectionBalanceConfig.ExactBalance"; + } + + oneof balance_type { + option (validate.required) = true; + + // If specified, the listener will use the exact connection balancer. + ExactBalance exact_balance = 1; + } + } + + reserved 14, 4; + + reserved "use_original_dst"; + + // The unique name by which this listener is known. If no name is provided, + // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically + // updated or removed via :ref:`LDS ` a unique name must be provided. + string name = 1; + + // The address that the listener should listen on. In general, the address must be unique, though + // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on + // Linux as the actual port will be allocated by the OS. + core.v4alpha.Address address = 2 [(validate.rules).message = {required: true}]; + + // A list of filter chains to consider for this listener. The + // :ref:`FilterChain ` with the most specific + // :ref:`FilterChainMatch ` criteria is used on a + // connection. + // + // Example using SNI for filter chain selection can be found in the + // :ref:`FAQ entry `. + repeated FilterChain filter_chains = 3; + + // Soft limit on size of the listener’s new connection read and write buffers. + // If unspecified, an implementation defined default is applied (1MiB). + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; + + // Listener metadata. + core.v4alpha.Metadata metadata = 6; + + // [#not-implemented-hide:] + DeprecatedV1 deprecated_v1 = 7; + + // The type of draining to perform at a listener-wide level. + DrainType drain_type = 8; + + // Listener filters have the opportunity to manipulate and augment the connection metadata that + // is used in connection filter chain matching, for example. These filters are run before any in + // :ref:`filter_chains `. Order matters as the + // filters are processed sequentially right after a socket has been accepted by the listener, and + // before a connection is created. + // UDP Listener filters can be specified when the protocol in the listener socket address in + // :ref:`protocol ` is :ref:`UDP + // `. + // UDP listeners currently support a single filter. + repeated ListenerFilter listener_filters = 9; + + // The timeout to wait for all listener filters to complete operation. If the timeout is reached, + // the accepted socket is closed without a connection being created unless + // `continue_on_listener_filters_timeout` is set to true. Specify 0 to disable the + // timeout. If not specified, a default timeout of 15s is used. + google.protobuf.Duration listener_filters_timeout = 15; + + // Whether a connection should be created when listener filters timeout. Default is false. + // + // .. attention:: + // + // Some listener filters, such as :ref:`Proxy Protocol filter + // `, should not be used with this option. It will cause + // unexpected behavior when a connection is created. + bool continue_on_listener_filters_timeout = 17; + + // Whether the listener should be set as a transparent socket. + // When this flag is set to true, connections can be redirected to the listener using an + // *iptables* *TPROXY* target, in which case the original source and destination addresses and + // ports are preserved on accepted connections. This flag should be used in combination with + // :ref:`an original_dst ` :ref:`listener filter + // ` to mark the connections' local addresses as + // "restored." This can be used to hand off each redirected connection to another listener + // associated with the connection's destination address. Direct connections to the socket without + // using *TPROXY* cannot be distinguished from connections redirected using *TPROXY* and are + // therefore treated as if they were redirected. + // When this flag is set to false, the listener's socket is explicitly reset as non-transparent. + // Setting this flag requires Envoy to run with the *CAP_NET_ADMIN* capability. + // When this flag is not set (default), the socket is not modified, i.e. the transparent option + // is neither set nor reset. + google.protobuf.BoolValue transparent = 10; + + // Whether the listener should set the *IP_FREEBIND* socket option. When this + // flag is set to true, listeners can be bound to an IP address that is not + // configured on the system running Envoy. When this flag is set to false, the + // option *IP_FREEBIND* is disabled on the socket. When this flag is not set + // (default), the socket is not modified, i.e. the option is neither enabled + // nor disabled. + google.protobuf.BoolValue freebind = 11; + + // Additional socket options that may not be present in Envoy source code or + // precompiled binaries. + repeated core.v4alpha.SocketOption socket_options = 13; + + // Whether the listener should accept TCP Fast Open (TFO) connections. + // When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on + // the socket, with a queue length of the specified size + // (see `details in RFC7413 `_). + // When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket. + // When this flag is not set (default), the socket is not modified, + // i.e. the option is neither enabled nor disabled. + // + // On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable + // TCP_FASTOPEN. + // See `ip-sysctl.txt `_. + // + // On macOS, only values of 0, 1, and unset are valid; other values may result in an error. + // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter. + google.protobuf.UInt32Value tcp_fast_open_queue_length = 12; + + // Specifies the intended direction of the traffic relative to the local Envoy. + core.v4alpha.TrafficDirection traffic_direction = 16; + + // If the protocol in the listener socket address in :ref:`protocol + // ` is :ref:`UDP + // `, this field specifies the actual udp + // listener to create, i.e. :ref:`udp_listener_name + // ` = "raw_udp_listener" for + // creating a packet-oriented UDP listener. If not present, treat it as "raw_udp_listener". + UdpListenerConfig udp_listener_config = 18; + + // Used to represent an API listener, which is used in non-proxy clients. The type of API + // exposed to the non-proxy application depends on the type of API listener. + // When this field is set, no other field except for :ref:`name` + // should be set. + // + // .. note:: + // + // Currently only one ApiListener can be installed; and it can only be done via bootstrap config, + // not LDS. + // + // [#next-major-version: In the v3 API, instead of this messy approach where the socket + // listener fields are directly in the top-level Listener message and the API listener types + // are in the ApiListener message, the socket listener messages should be in their own message, + // and the top-level Listener should essentially be a oneof that selects between the + // socket listener and the various types of API listener. That way, a given Listener message + // can structurally only contain the fields of the relevant type.] + ApiListener api_listener = 19; + + // The listener's connection balancer configuration, currently only applicable to TCP listeners. + // If no configuration is specified, Envoy will not attempt to balance active connections between + // worker threads. + ConnectionBalanceConfig connection_balance_config = 20; + + // When this flag is set to true, listeners set the *SO_REUSEPORT* socket option and + // create one socket for each worker thread. This makes inbound connections + // distribute among worker threads roughly evenly in cases where there are a high number + // of connections. When this flag is set to false, all worker threads share one socket. + // + // Before Linux v4.19-rc1, new TCP connections may be rejected during hot restart + // (see `3rd paragraph in 'soreuseport' commit message + // `_). + // This issue was fixed by `tcp: Avoid TCP syncookie rejected by SO_REUSEPORT socket + // `_. + bool reuse_port = 21; + + // Configuration for :ref:`access logs ` + // emitted by this listener. + repeated accesslog.v4alpha.AccessLog access_log = 22; + + // If the protocol in the listener socket address in :ref:`protocol + // ` is :ref:`UDP + // `, this field specifies the actual udp + // writer to create, i.e. :ref:`name ` + // = "udp_default_writer" for creating a udp writer with writing in passthrough mode, + // = "udp_gso_batch_writer" for creating a udp writer with writing in batch mode. + // If not present, treat it as "udp_default_writer". + // [#not-implemented-hide:] + core.v4alpha.TypedExtensionConfig udp_writer_config = 23; +} diff --git a/api/envoy/config/listener/v4alpha/listener_components.proto b/api/envoy/config/listener/v4alpha/listener_components.proto new file mode 100644 index 0000000000000..6900cde390162 --- /dev/null +++ b/api/envoy/config/listener/v4alpha/listener_components.proto @@ -0,0 +1,298 @@ +syntax = "proto3"; + +package envoy.config.listener.v4alpha; + +import "envoy/config/core/v4alpha/address.proto"; +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/type/v3/range.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; +option java_outer_classname = "ListenerComponentsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Listener components] +// Listener :ref:`configuration overview ` + +message Filter { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.Filter"; + + reserved 3, 2; + + reserved "config"; + + // The name of the filter to instantiate. The name must match a + // :ref:`supported filter `. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Filter specific configuration which depends on the filter being + // instantiated. See the supported filters for further documentation. + oneof config_type { + google.protobuf.Any typed_config = 4; + } +} + +// Specifies the match criteria for selecting a specific filter chain for a +// listener. +// +// In order for a filter chain to be selected, *ALL* of its criteria must be +// fulfilled by the incoming connection, properties of which are set by the +// networking stack and/or listener filters. +// +// The following order applies: +// +// 1. Destination port. +// 2. Destination IP address. +// 3. Server name (e.g. SNI for TLS protocol), +// 4. Transport protocol. +// 5. Application protocols (e.g. ALPN for TLS protocol). +// 6. Source type (e.g. any, local or external network). +// 7. Source IP address. +// 8. Source port. +// +// For criteria that allow ranges or wildcards, the most specific value in any +// of the configured filter chains that matches the incoming connection is going +// to be used (e.g. for SNI ``www.example.com`` the most specific match would be +// ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter +// chain without ``server_names`` requirements). +// +// [#comment: Implemented rules are kept in the preference order, with deprecated fields +// listed at the end, because that's how we want to list them in the docs. +// +// [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules] +// [#next-free-field: 13] +message FilterChainMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.FilterChainMatch"; + + enum ConnectionSourceType { + // Any connection source matches. + ANY = 0; + + // Match a connection originating from the same host. + SAME_IP_OR_LOOPBACK = 1; + + // Match a connection originating from a different host. + EXTERNAL = 2; + } + + reserved 1; + + // Optional destination port to consider when use_original_dst is set on the + // listener in determining a filter chain match. + google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {lte: 65535 gte: 1}]; + + // If non-empty, an IP address and prefix length to match addresses when the + // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. + repeated core.v4alpha.CidrRange prefix_ranges = 3; + + // If non-empty, an IP address and suffix length to match addresses when the + // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. + // [#not-implemented-hide:] + string address_suffix = 4; + + // [#not-implemented-hide:] + google.protobuf.UInt32Value suffix_len = 5; + + // Specifies the connection source IP match type. Can be any, local or external network. + ConnectionSourceType source_type = 12 [(validate.rules).enum = {defined_only: true}]; + + // The criteria is satisfied if the source IP address of the downstream + // connection is contained in at least one of the specified subnets. If the + // parameter is not specified or the list is empty, the source IP address is + // ignored. + repeated core.v4alpha.CidrRange source_prefix_ranges = 6; + + // The criteria is satisfied if the source port of the downstream connection + // is contained in at least one of the specified ports. If the parameter is + // not specified, the source port is ignored. + repeated uint32 source_ports = 7 + [(validate.rules).repeated = {items {uint32 {lte: 65535 gte: 1}}}]; + + // If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining + // a filter chain match. Those values will be compared against the server names of a new + // connection, when detected by one of the listener filters. + // + // The server name will be matched against all wildcard domains, i.e. ``www.example.com`` + // will be first matched against ``www.example.com``, then ``*.example.com``, then ``*.com``. + // + // Note that partial wildcards are not supported, and values like ``*w.example.com`` are invalid. + // + // .. attention:: + // + // See the :ref:`FAQ entry ` on how to configure SNI for more + // information. + repeated string server_names = 11; + + // If non-empty, a transport protocol to consider when determining a filter chain match. + // This value will be compared against the transport protocol of a new connection, when + // it's detected by one of the listener filters. + // + // Suggested values include: + // + // * ``raw_buffer`` - default, used when no transport protocol is detected, + // * ``tls`` - set by :ref:`envoy.filters.listener.tls_inspector ` + // when TLS protocol is detected. + string transport_protocol = 9; + + // If non-empty, a list of application protocols (e.g. ALPN for TLS protocol) to consider when + // determining a filter chain match. Those values will be compared against the application + // protocols of a new connection, when detected by one of the listener filters. + // + // Suggested values include: + // + // * ``http/1.1`` - set by :ref:`envoy.filters.listener.tls_inspector + // `, + // * ``h2`` - set by :ref:`envoy.filters.listener.tls_inspector ` + // + // .. attention:: + // + // Currently, only :ref:`TLS Inspector ` provides + // application protocol detection based on the requested + // `ALPN `_ values. + // + // However, the use of ALPN is pretty much limited to the HTTP/2 traffic on the Internet, + // and matching on values other than ``h2`` is going to lead to a lot of false negatives, + // unless all connecting clients are known to use ALPN. + repeated string application_protocols = 10; +} + +// A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and +// various other parameters. +// [#next-free-field: 8] +message FilterChain { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.FilterChain"; + + reserved 2; + + reserved "tls_context"; + + // The criteria to use when matching a connection to this filter chain. + FilterChainMatch filter_chain_match = 1; + + // A list of individual network filters that make up the filter chain for + // connections established with the listener. Order matters as the filters are + // processed sequentially as connection events happen. Note: If the filter + // list is empty, the connection will close by default. + repeated Filter filters = 3; + + // Whether the listener should expect a PROXY protocol V1 header on new + // connections. If this option is enabled, the listener will assume that that + // remote address of the connection is the one specified in the header. Some + // load balancers including the AWS ELB support this option. If the option is + // absent or set to false, Envoy will use the physical peer address of the + // connection as the remote address. + google.protobuf.BoolValue use_proxy_proto = 4; + + // [#not-implemented-hide:] filter chain metadata. + core.v4alpha.Metadata metadata = 5; + + // Optional custom transport socket implementation to use for downstream connections. + // To setup TLS, set a transport socket with name `tls` and + // :ref:`DownstreamTlsContext ` in the `typed_config`. + // If no transport socket configuration is specified, new connections + // will be set up with plaintext. + core.v4alpha.TransportSocket transport_socket = 6; + + // [#not-implemented-hide:] The unique name (or empty) by which this filter chain is known. If no + // name is provided, Envoy will allocate an internal UUID for the filter chain. If the filter + // chain is to be dynamically updated or removed via FCDS a unique name must be provided. + string name = 7; +} + +// Listener filter chain match configuration. This is a recursive structure which allows complex +// nested match configurations to be built using various logical operators. +// +// Examples: +// +// * Matches if the destination port is 3306. +// +// .. code-block:: yaml +// +// destination_port_range: +// start: 3306 +// end: 3307 +// +// * Matches if the destination port is 3306 or 15000. +// +// .. code-block:: yaml +// +// or_match: +// rules: +// - destination_port_range: +// start: 3306 +// end: 3306 +// - destination_port_range: +// start: 15000 +// end: 15001 +// +// [#next-free-field: 6] +message ListenerFilterChainMatchPredicate { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.ListenerFilterChainMatchPredicate"; + + // A set of match configurations used for logical operations. + message MatchSet { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.ListenerFilterChainMatchPredicate.MatchSet"; + + // The list of rules that make up the set. + repeated ListenerFilterChainMatchPredicate rules = 1 + [(validate.rules).repeated = {min_items: 2}]; + } + + oneof rule { + option (validate.required) = true; + + // A set that describes a logical OR. If any member of the set matches, the match configuration + // matches. + MatchSet or_match = 1; + + // A set that describes a logical AND. If all members of the set match, the match configuration + // matches. + MatchSet and_match = 2; + + // A negation match. The match configuration will match if the negated match condition matches. + ListenerFilterChainMatchPredicate not_match = 3; + + // The match configuration will always match. + bool any_match = 4 [(validate.rules).bool = {const: true}]; + + // Match destination port. Particularly, the match evaluation must use the recovered local port if + // the owning listener filter is after :ref:`an original_dst listener filter `. + type.v3.Int32Range destination_port_range = 5; + } +} + +message ListenerFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.ListenerFilter"; + + reserved 2; + + reserved "config"; + + // The name of the filter to instantiate. The name must match a + // :ref:`supported filter `. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Filter specific configuration which depends on the filter being instantiated. + // See the supported filters for further documentation. + oneof config_type { + google.protobuf.Any typed_config = 3; + } + + // Optional match predicate used to disable the filter. The filter is enabled when this field is empty. + // See :ref:`ListenerFilterChainMatchPredicate ` + // for further examples. + ListenerFilterChainMatchPredicate filter_disabled = 4; +} diff --git a/api/envoy/config/listener/v4alpha/quic_config.proto b/api/envoy/config/listener/v4alpha/quic_config.proto new file mode 100644 index 0000000000000..b2b1df1e374f6 --- /dev/null +++ b/api/envoy/config/listener/v4alpha/quic_config.proto @@ -0,0 +1,41 @@ +syntax = "proto3"; + +package envoy.config.listener.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; +option java_outer_classname = "QuicConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: QUIC listener Config] + +// Configuration specific to the QUIC protocol. +// Next id: 5 +message QuicProtocolOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.QuicProtocolOptions"; + + // Maximum number of streams that the client can negotiate per connection. 100 + // if not specified. + google.protobuf.UInt32Value max_concurrent_streams = 1; + + // Maximum number of milliseconds that connection will be alive when there is + // no network activity. 300000ms if not specified. + google.protobuf.Duration idle_timeout = 2; + + // Connection timeout in milliseconds before the crypto handshake is finished. + // 20000ms if not specified. + google.protobuf.Duration crypto_handshake_timeout = 3; + + // Runtime flag that controls whether the listener is enabled or not. If not specified, defaults + // to enabled. + core.v4alpha.RuntimeFeatureFlag enabled = 4; +} diff --git a/api/envoy/config/listener/v4alpha/udp_default_writer_config.proto b/api/envoy/config/listener/v4alpha/udp_default_writer_config.proto new file mode 100644 index 0000000000000..02660a7b49f4d --- /dev/null +++ b/api/envoy/config/listener/v4alpha/udp_default_writer_config.proto @@ -0,0 +1,23 @@ +syntax = "proto3"; + +package envoy.config.listener.v4alpha; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; +option java_outer_classname = "UdpDefaultWriterConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Udp Default Writer Config] + +// [#not-implemented-hide:] +// Configuration specific to the Udp Default Writer. +message UdpDefaultWriterOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.UdpDefaultWriterOptions"; +} diff --git a/api/envoy/config/listener/v4alpha/udp_gso_batch_writer_config.proto b/api/envoy/config/listener/v4alpha/udp_gso_batch_writer_config.proto new file mode 100644 index 0000000000000..5427fe19e7e13 --- /dev/null +++ b/api/envoy/config/listener/v4alpha/udp_gso_batch_writer_config.proto @@ -0,0 +1,23 @@ +syntax = "proto3"; + +package envoy.config.listener.v4alpha; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; +option java_outer_classname = "UdpGsoBatchWriterConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Udp Gso Batch Writer Config] + +// [#not-implemented-hide:] +// Configuration specific to the Udp Gso Batch Writer. +message UdpGsoBatchWriterOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.UdpGsoBatchWriterOptions"; +} diff --git a/api/envoy/config/listener/v4alpha/udp_listener_config.proto b/api/envoy/config/listener/v4alpha/udp_listener_config.proto new file mode 100644 index 0000000000000..7e40e9529f99c --- /dev/null +++ b/api/envoy/config/listener/v4alpha/udp_listener_config.proto @@ -0,0 +1,42 @@ +syntax = "proto3"; + +package envoy.config.listener.v4alpha; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; +option java_outer_classname = "UdpListenerConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: UDP Listener Config] +// Listener :ref:`configuration overview ` + +message UdpListenerConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.UdpListenerConfig"; + + reserved 2; + + reserved "config"; + + // Used to look up UDP listener factory, matches "raw_udp_listener" or + // "quic_listener" to create a specific udp listener. + // If not specified, treat as "raw_udp_listener". + string udp_listener_name = 1; + + // Used to create a specific listener factory. To some factory, e.g. + // "raw_udp_listener", config is not needed. + oneof config_type { + google.protobuf.Any typed_config = 3; + } +} + +message ActiveRawUdpListenerConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.ActiveRawUdpListenerConfig"; +} diff --git a/api/envoy/config/metrics/v3/metrics_service.proto b/api/envoy/config/metrics/v3/metrics_service.proto index ad9879055ba3c..4bb6c77e66c23 100644 --- a/api/envoy/config/metrics/v3/metrics_service.proto +++ b/api/envoy/config/metrics/v3/metrics_service.proto @@ -2,8 +2,11 @@ syntax = "proto3"; package envoy.config.metrics.v3; +import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/grpc_service.proto"; +import "google/protobuf/wrappers.proto"; + import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -25,4 +28,14 @@ message MetricsServiceConfig { // The upstream gRPC cluster that hosts the metrics service. core.v3.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; + + // API version for metric service transport protocol. This describes the metric service gRPC + // endpoint and version of messages used on the wire. + core.v3.ApiVersion transport_api_version = 3 [(validate.rules).enum = {defined_only: true}]; + + // If true, counters are reported as the delta between flushing intervals. Otherwise, the current + // counter value is reported. Defaults to false. + // Eventually (https://github.com/envoyproxy/envoy/issues/10968) if this value is not set, the + // sink will take updates from the :ref:`MetricsResponse `. + google.protobuf.BoolValue report_counters_as_deltas = 2; } diff --git a/api/envoy/config/metrics/v3/stats.proto b/api/envoy/config/metrics/v3/stats.proto index f2f12d73a6254..275db1f6457ac 100644 --- a/api/envoy/config/metrics/v3/stats.proto +++ b/api/envoy/config/metrics/v3/stats.proto @@ -83,6 +83,35 @@ message StatsConfig { // `issue #8771 `_ for more information. // If any unexpected behavior changes are observed, please open a new issue immediately. StatsMatcher stats_matcher = 3; + + // Defines rules for setting the histogram buckets. Rules are evaluated in order, and the first + // match is applied. If no match is found (or if no rules are set), the following default buckets + // are used: + // + // .. code-block:: json + // + // [ + // 0.5, + // 1, + // 5, + // 10, + // 25, + // 50, + // 100, + // 250, + // 500, + // 1000, + // 2500, + // 5000, + // 10000, + // 30000, + // 60000, + // 300000, + // 600000, + // 1800000, + // 3600000 + // ] + repeated HistogramBucketSettings histogram_bucket_settings = 4; } // Configuration for disabling stat instantiation. @@ -259,6 +288,21 @@ message TagSpecifier { } } +// Specifies a matcher for stats and the buckets that matching stats should use. +message HistogramBucketSettings { + // The stats that this rule applies to. The match is applied to the original stat name + // before tag-extraction, for example `cluster.exampleclustername.upstream_cx_length_ms`. + type.matcher.v3.StringMatcher match = 1 [(validate.rules).message = {required: true}]; + + // Each value is the upper bound of a bucket. Each bucket must be greater than 0 and unique. + // The order of the buckets does not matter. + repeated double buckets = 2 [(validate.rules).repeated = { + min_items: 1 + unique: true + items {double {gt: 0.0}} + }]; +} + // Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support // tagged metrics. // [#extension: envoy.stat_sinks.statsd] @@ -330,6 +374,14 @@ message DogStatsdSink { // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field // ` for more details. string prefix = 3; + + // Optional max datagram size to use when sending UDP messages. By default Envoy + // will emit one metric per datagram. By specifying a max-size larger than a single + // metric, Envoy will emit multiple, new-line separated metrics. The max datagram + // size should not exceed your network's MTU. + // + // Note that this value may not be respected if smaller than a single metric. + google.protobuf.UInt64Value max_bytes_per_datagram = 4 [(validate.rules).uint64 = {gt: 0}]; } // Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink. diff --git a/api/envoy/config/metrics/v4alpha/BUILD b/api/envoy/config/metrics/v4alpha/BUILD new file mode 100644 index 0000000000000..4b70ffb4110a5 --- /dev/null +++ b/api/envoy/config/metrics/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/metrics/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/config/metrics/v4alpha/metrics_service.proto b/api/envoy/config/metrics/v4alpha/metrics_service.proto new file mode 100644 index 0000000000000..e2d83ce4c1c97 --- /dev/null +++ b/api/envoy/config/metrics/v4alpha/metrics_service.proto @@ -0,0 +1,41 @@ +syntax = "proto3"; + +package envoy.config.metrics.v4alpha; + +import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/config/core/v4alpha/grpc_service.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.metrics.v4alpha"; +option java_outer_classname = "MetricsServiceProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Metrics service] + +// Metrics Service is configured as a built-in *envoy.stat_sinks.metrics_service* :ref:`StatsSink +// `. This opaque configuration will be used to create +// Metrics Service. +// [#extension: envoy.stat_sinks.metrics_service] +message MetricsServiceConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.metrics.v3.MetricsServiceConfig"; + + // The upstream gRPC cluster that hosts the metrics service. + core.v4alpha.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; + + // API version for metric service transport protocol. This describes the metric service gRPC + // endpoint and version of messages used on the wire. + core.v4alpha.ApiVersion transport_api_version = 3 [(validate.rules).enum = {defined_only: true}]; + + // If true, counters are reported as the delta between flushing intervals. Otherwise, the current + // counter value is reported. Defaults to false. + // Eventually (https://github.com/envoyproxy/envoy/issues/10968) if this value is not set, the + // sink will take updates from the :ref:`MetricsResponse `. + google.protobuf.BoolValue report_counters_as_deltas = 2; +} diff --git a/api/envoy/config/metrics/v4alpha/stats.proto b/api/envoy/config/metrics/v4alpha/stats.proto new file mode 100644 index 0000000000000..6265118cf9b87 --- /dev/null +++ b/api/envoy/config/metrics/v4alpha/stats.proto @@ -0,0 +1,416 @@ +syntax = "proto3"; + +package envoy.config.metrics.v4alpha; + +import "envoy/config/core/v4alpha/address.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.metrics.v4alpha"; +option java_outer_classname = "StatsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Stats] +// Statistics :ref:`architecture overview `. + +// Configuration for pluggable stats sinks. +message StatsSink { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v3.StatsSink"; + + reserved 2; + + reserved "config"; + + // The name of the stats sink to instantiate. The name must match a supported + // stats sink. The built-in stats sinks are: + // + // * :ref:`envoy.stat_sinks.statsd ` + // * :ref:`envoy.stat_sinks.dog_statsd ` + // * :ref:`envoy.stat_sinks.metrics_service ` + // * :ref:`envoy.stat_sinks.hystrix ` + // + // Sinks optionally support tagged/multiple dimensional metrics. + string name = 1; + + // Stats sink specific configuration which depends on the sink being instantiated. See + // :ref:`StatsdSink ` for an example. + oneof config_type { + google.protobuf.Any typed_config = 3; + } +} + +// Statistics configuration such as tagging. +message StatsConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.metrics.v3.StatsConfig"; + + // Each stat name is iteratively processed through these tag specifiers. + // When a tag is matched, the first capture group is removed from the name so + // later :ref:`TagSpecifiers ` cannot match that + // same portion of the match. + repeated TagSpecifier stats_tags = 1; + + // Use all default tag regexes specified in Envoy. These can be combined with + // custom tags specified in :ref:`stats_tags + // `. They will be processed before + // the custom tags. + // + // .. note:: + // + // If any default tags are specified twice, the config will be considered + // invalid. + // + // See :repo:`well_known_names.h ` for a list of the + // default tags in Envoy. + // + // If not provided, the value is assumed to be true. + google.protobuf.BoolValue use_all_default_tags = 2; + + // Inclusion/exclusion matcher for stat name creation. If not provided, all stats are instantiated + // as normal. Preventing the instantiation of certain families of stats can improve memory + // performance for Envoys running especially large configs. + // + // .. warning:: + // Excluding stats may affect Envoy's behavior in undocumented ways. See + // `issue #8771 `_ for more information. + // If any unexpected behavior changes are observed, please open a new issue immediately. + StatsMatcher stats_matcher = 3; + + // Defines rules for setting the histogram buckets. Rules are evaluated in order, and the first + // match is applied. If no match is found (or if no rules are set), the following default buckets + // are used: + // + // .. code-block:: json + // + // [ + // 0.5, + // 1, + // 5, + // 10, + // 25, + // 50, + // 100, + // 250, + // 500, + // 1000, + // 2500, + // 5000, + // 10000, + // 30000, + // 60000, + // 300000, + // 600000, + // 1800000, + // 3600000 + // ] + repeated HistogramBucketSettings histogram_bucket_settings = 4; +} + +// Configuration for disabling stat instantiation. +message StatsMatcher { + // The instantiation of stats is unrestricted by default. If the goal is to configure Envoy to + // instantiate all stats, there is no need to construct a StatsMatcher. + // + // However, StatsMatcher can be used to limit the creation of families of stats in order to + // conserve memory. Stats can either be disabled entirely, or they can be + // limited by either an exclusion or an inclusion list of :ref:`StringMatcher + // ` protos: + // + // * If `reject_all` is set to `true`, no stats will be instantiated. If `reject_all` is set to + // `false`, all stats will be instantiated. + // + // * If an exclusion list is supplied, any stat name matching *any* of the StringMatchers in the + // list will not instantiate. + // + // * If an inclusion list is supplied, no stats will instantiate, except those matching *any* of + // the StringMatchers in the list. + // + // + // A StringMatcher can be used to match against an exact string, a suffix / prefix, or a regex. + // **NB:** For performance reasons, it is highly recommended to use a prefix- or suffix-based + // matcher rather than a regex-based matcher. + // + // Example 1. Excluding all stats. + // + // .. code-block:: json + // + // { + // "statsMatcher": { + // "rejectAll": "true" + // } + // } + // + // Example 2. Excluding all cluster-specific stats, but not cluster-manager stats: + // + // .. code-block:: json + // + // { + // "statsMatcher": { + // "exclusionList": { + // "patterns": [ + // { + // "prefix": "cluster." + // } + // ] + // } + // } + // } + // + // Example 3. Including only manager-related stats: + // + // .. code-block:: json + // + // { + // "statsMatcher": { + // "inclusionList": { + // "patterns": [ + // { + // "prefix": "cluster_manager." + // }, + // { + // "prefix": "listener_manager." + // } + // ] + // } + // } + // } + // + + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.metrics.v3.StatsMatcher"; + + oneof stats_matcher { + option (validate.required) = true; + + // If `reject_all` is true, then all stats are disabled. If `reject_all` is false, then all + // stats are enabled. + bool reject_all = 1; + + // Exclusive match. All stats are enabled except for those matching one of the supplied + // StringMatcher protos. + type.matcher.v4alpha.ListStringMatcher exclusion_list = 2; + + // Inclusive match. No stats are enabled except for those matching one of the supplied + // StringMatcher protos. + type.matcher.v4alpha.ListStringMatcher inclusion_list = 3; + } +} + +// Designates a tag name and value pair. The value may be either a fixed value +// or a regex providing the value via capture groups. The specified tag will be +// unconditionally set if a fixed value, otherwise it will only be set if one +// or more capture groups in the regex match. +message TagSpecifier { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.metrics.v3.TagSpecifier"; + + // Attaches an identifier to the tag values to identify the tag being in the + // sink. Envoy has a set of default names and regexes to extract dynamic + // portions of existing stats, which can be found in :repo:`well_known_names.h + // ` in the Envoy repository. If a :ref:`tag_name + // ` is provided in the config and + // neither :ref:`regex ` or + // :ref:`fixed_value ` were specified, + // Envoy will attempt to find that name in its set of defaults and use the accompanying regex. + // + // .. note:: + // + // It is invalid to specify the same tag name twice in a config. + string tag_name = 1; + + oneof tag_value { + // Designates a tag to strip from the tag extracted name and provide as a named + // tag value for all statistics. This will only occur if any part of the name + // matches the regex provided with one or more capture groups. + // + // The first capture group identifies the portion of the name to remove. The + // second capture group (which will normally be nested inside the first) will + // designate the value of the tag for the statistic. If no second capture + // group is provided, the first will also be used to set the value of the tag. + // All other capture groups will be ignored. + // + // Example 1. a stat name ``cluster.foo_cluster.upstream_rq_timeout`` and + // one tag specifier: + // + // .. code-block:: json + // + // { + // "tag_name": "envoy.cluster_name", + // "regex": "^cluster\.((.+?)\.)" + // } + // + // Note that the regex will remove ``foo_cluster.`` making the tag extracted + // name ``cluster.upstream_rq_timeout`` and the tag value for + // ``envoy.cluster_name`` will be ``foo_cluster`` (note: there will be no + // ``.`` character because of the second capture group). + // + // Example 2. a stat name + // ``http.connection_manager_1.user_agent.ios.downstream_cx_total`` and two + // tag specifiers: + // + // .. code-block:: json + // + // [ + // { + // "tag_name": "envoy.http_user_agent", + // "regex": "^http(?=\.).*?\.user_agent\.((.+?)\.)\w+?$" + // }, + // { + // "tag_name": "envoy.http_conn_manager_prefix", + // "regex": "^http\.((.*?)\.)" + // } + // ] + // + // The two regexes of the specifiers will be processed in the definition order. + // + // The first regex will remove ``ios.``, leaving the tag extracted name + // ``http.connection_manager_1.user_agent.downstream_cx_total``. The tag + // ``envoy.http_user_agent`` will be added with tag value ``ios``. + // + // The second regex will remove ``connection_manager_1.`` from the tag + // extracted name produced by the first regex + // ``http.connection_manager_1.user_agent.downstream_cx_total``, leaving + // ``http.user_agent.downstream_cx_total`` as the tag extracted name. The tag + // ``envoy.http_conn_manager_prefix`` will be added with the tag value + // ``connection_manager_1``. + string regex = 2 [(validate.rules).string = {max_bytes: 1024}]; + + // Specifies a fixed tag value for the ``tag_name``. + string fixed_value = 3; + } +} + +// Specifies a matcher for stats and the buckets that matching stats should use. +message HistogramBucketSettings { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.metrics.v3.HistogramBucketSettings"; + + // The stats that this rule applies to. The match is applied to the original stat name + // before tag-extraction, for example `cluster.exampleclustername.upstream_cx_length_ms`. + type.matcher.v4alpha.StringMatcher match = 1 [(validate.rules).message = {required: true}]; + + // Each value is the upper bound of a bucket. Each bucket must be greater than 0 and unique. + // The order of the buckets does not matter. + repeated double buckets = 2 [(validate.rules).repeated = { + min_items: 1 + unique: true + items {double {gt: 0.0}} + }]; +} + +// Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support +// tagged metrics. +// [#extension: envoy.stat_sinks.statsd] +message StatsdSink { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v3.StatsdSink"; + + oneof statsd_specifier { + option (validate.required) = true; + + // The UDP address of a running `statsd `_ + // compliant listener. If specified, statistics will be flushed to this + // address. + core.v4alpha.Address address = 1; + + // The name of a cluster that is running a TCP `statsd + // `_ compliant listener. If specified, + // Envoy will connect to this cluster to flush statistics. + string tcp_cluster_name = 2; + } + + // Optional custom prefix for StatsdSink. If + // specified, this will override the default prefix. + // For example: + // + // .. code-block:: json + // + // { + // "prefix" : "envoy-prod" + // } + // + // will change emitted stats to + // + // .. code-block:: cpp + // + // envoy-prod.test_counter:1|c + // envoy-prod.test_timer:5|ms + // + // Note that the default prefix, "envoy", will be used if a prefix is not + // specified. + // + // Stats with default prefix: + // + // .. code-block:: cpp + // + // envoy.test_counter:1|c + // envoy.test_timer:5|ms + string prefix = 3; +} + +// Stats configuration proto schema for built-in *envoy.stat_sinks.dog_statsd* sink. +// The sink emits stats with `DogStatsD `_ +// compatible tags. Tags are configurable via :ref:`StatsConfig +// `. +// [#extension: envoy.stat_sinks.dog_statsd] +message DogStatsdSink { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.metrics.v3.DogStatsdSink"; + + reserved 2; + + oneof dog_statsd_specifier { + option (validate.required) = true; + + // The UDP address of a running DogStatsD compliant listener. If specified, + // statistics will be flushed to this address. + core.v4alpha.Address address = 1; + } + + // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field + // ` for more details. + string prefix = 3; + + // Optional max datagram size to use when sending UDP messages. By default Envoy + // will emit one metric per datagram. By specifying a max-size larger than a single + // metric, Envoy will emit multiple, new-line separated metrics. The max datagram + // size should not exceed your network's MTU. + // + // Note that this value may not be respected if smaller than a single metric. + google.protobuf.UInt64Value max_bytes_per_datagram = 4 [(validate.rules).uint64 = {gt: 0}]; +} + +// Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink. +// The sink emits stats in `text/event-stream +// `_ +// formatted stream for use by `Hystrix dashboard +// `_. +// +// Note that only a single HystrixSink should be configured. +// +// Streaming is started through an admin endpoint :http:get:`/hystrix_event_stream`. +// [#extension: envoy.stat_sinks.hystrix] +message HystrixSink { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.metrics.v3.HystrixSink"; + + // The number of buckets the rolling statistical window is divided into. + // + // Each time the sink is flushed, all relevant Envoy statistics are sampled and + // added to the rolling window (removing the oldest samples in the window + // in the process). The sink then outputs the aggregate statistics across the + // current rolling window to the event stream(s). + // + // rolling_window(ms) = stats_flush_interval(ms) * num_of_buckets + // + // More detailed explanation can be found in `Hystrix wiki + // `_. + int64 num_buckets = 1; +} diff --git a/api/envoy/config/ratelimit/v3/rls.proto b/api/envoy/config/ratelimit/v3/rls.proto index bb3c538bbabff..98889b1e28825 100644 --- a/api/envoy/config/ratelimit/v3/rls.proto +++ b/api/envoy/config/ratelimit/v3/rls.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package envoy.config.ratelimit.v3; +import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "udpa/annotations/status.proto"; @@ -26,4 +27,8 @@ message RateLimitServiceConfig { // will connect to this cluster when it needs to make rate limit service // requests. core.v3.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; + + // API version for rate limit transport protocol. This describes the rate limit gRPC endpoint and + // version of messages used on the wire. + core.v3.ApiVersion transport_api_version = 4 [(validate.rules).enum = {defined_only: true}]; } diff --git a/api/envoy/config/rbac/v3/BUILD b/api/envoy/config/rbac/v3/BUILD index bef4331a1e651..ce88bd5e6c626 100644 --- a/api/envoy/config/rbac/v3/BUILD +++ b/api/envoy/config/rbac/v3/BUILD @@ -11,6 +11,7 @@ api_proto_package( "//envoy/config/route/v3:pkg", "//envoy/type/matcher/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", ], ) diff --git a/api/envoy/config/rbac/v3/rbac.proto b/api/envoy/config/rbac/v3/rbac.proto index 040f537d1f5c8..278e6857603fe 100644 --- a/api/envoy/config/rbac/v3/rbac.proto +++ b/api/envoy/config/rbac/v3/rbac.proto @@ -8,8 +8,10 @@ import "envoy/type/matcher/v3/metadata.proto"; import "envoy/type/matcher/v3/path.proto"; import "envoy/type/matcher/v3/string.proto"; +import "google/api/expr/v1alpha1/checked.proto"; import "google/api/expr/v1alpha1/syntax.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -22,8 +24,14 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Role Based Access Control (RBAC)] // Role Based Access Control (RBAC) provides service-level and method-level access control for a -// service. RBAC policies are additive. The policies are examined in order. A request is allowed -// once a matching policy is found (suppose the `action` is ALLOW). +// service. RBAC policies are additive. The policies are examined in order. Requests are allowed +// or denied based on the `action` and whether a matching policy is found. For instance, if the +// action is ALLOW and a matching policy is found the request should be allowed. +// +// RBAC can also be used to make access logging decisions by communicating with access loggers +// through dynamic metadata. When the action is LOG and at least one policy matches, the +// `access_log_hint` value in the shared key namespace 'envoy.common' is set to `true` indicating +// the request should be logged. // // Here is an example of RBAC configuration. It has two policies: // @@ -66,45 +74,69 @@ message RBAC { // Should we do safe-list or block-list style access control? enum Action { - // The policies grant access to principals. The rest is denied. This is safe-list style + // The policies grant access to principals. The rest are denied. This is safe-list style // access control. This is the default type. ALLOW = 0; - // The policies deny access to principals. The rest is allowed. This is block-list style + // The policies deny access to principals. The rest are allowed. This is block-list style // access control. DENY = 1; + + // The policies set the `access_log_hint` dynamic metadata key based on if requests match. + // All requests are allowed. + LOG = 2; } - // The action to take if a policy matches. The request is allowed if and only if: + // The action to take if a policy matches. Every action either allows or denies a request, + // and can also carry out action-specific operations. + // + // Actions: + // + // * ALLOW: Allows the request if and only if there is a policy that matches + // the request. + // * DENY: Allows the request if and only if there are no policies that + // match the request. + // * LOG: Allows all requests. If at least one policy matches, the dynamic + // metadata key `access_log_hint` is set to the value `true` under the shared + // key namespace 'envoy.common'. If no policies match, it is set to `false`. + // Other actions do not modify this key. // - // * `action` is "ALLOWED" and at least one policy matches - // * `action` is "DENY" and none of the policies match Action action = 1; // Maps from policy name to policy. A match occurs when at least one policy matches the request. map policies = 2; } -// Policy specifies a role and the principals that are assigned/denied the role. A policy matches if -// and only if at least one of its permissions match the action taking place AND at least one of its -// principals match the downstream AND the condition is true if specified. +// Policy specifies a role and the principals that are assigned/denied the role. +// A policy matches if and only if at least one of its permissions match the +// action taking place AND at least one of its principals match the downstream +// AND the condition is true if specified. message Policy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Policy"; - // Required. The set of permissions that define a role. Each permission is matched with OR - // semantics. To match all actions for this policy, a single Permission with the `any` field set - // to true should be used. + // Required. The set of permissions that define a role. Each permission is + // matched with OR semantics. To match all actions for this policy, a single + // Permission with the `any` field set to true should be used. repeated Permission permissions = 1 [(validate.rules).repeated = {min_items: 1}]; - // Required. The set of principals that are assigned/denied the role based on “action”. Each - // principal is matched with OR semantics. To match all downstreams for this policy, a single - // Principal with the `any` field set to true should be used. + // Required. The set of principals that are assigned/denied the role based on + // “action”. Each principal is matched with OR semantics. To match all + // downstreams for this policy, a single Principal with the `any` field set to + // true should be used. repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}]; // An optional symbolic expression specifying an access control // :ref:`condition `. The condition is combined // with the permissions and the principals as a clause with AND semantics. - google.api.expr.v1alpha1.Expr condition = 3; + // Only be used when checked_condition is not used. + google.api.expr.v1alpha1.Expr condition = 3 + [(udpa.annotations.field_migrate).oneof_promotion = "expression_specifier"]; + + // [#not-implemented-hide:] + // An optional symbolic expression that has been successfully type checked. + // Only be used when condition is not used. + google.api.expr.v1alpha1.CheckedExpr checked_condition = 4 + [(udpa.annotations.field_migrate).oneof_promotion = "expression_specifier"]; } // Permission defines an action (or actions) that a principal can take. @@ -151,9 +183,9 @@ message Permission { // Metadata that describes additional information about the action. type.matcher.v3.MetadataMatcher metadata = 7; - // Negates matching the provided permission. For instance, if the value of `not_rule` would - // match, this permission would not match. Conversely, if the value of `not_rule` would not - // match, this permission would match. + // Negates matching the provided permission. For instance, if the value of + // `not_rule` would match, this permission would not match. Conversely, if + // the value of `not_rule` would not match, this permission would match. Permission not_rule = 8; // The request server from the client's connection request. This is @@ -166,7 +198,8 @@ message Permission { // // * If the :ref:`TLS Inspector ` // filter is not added, and if a `FilterChainMatch` is not defined for - // the :ref:`server name `, + // the :ref:`server name + // `, // a TLS connection's requested SNI server name will be treated as if it // wasn't present. // @@ -179,13 +212,14 @@ message Permission { } } -// Principal defines an identity or a group of identities for a downstream subject. +// Principal defines an identity or a group of identities for a downstream +// subject. // [#next-free-field: 12] message Principal { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Principal"; - // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. Depending on the context, - // each are applied with the associated behavior. + // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. + // Depending on the context, each are applied with the associated behavior. message Set { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Principal.Set"; @@ -200,19 +234,21 @@ message Principal { reserved 1; - // The name of the principal. If set, The URI SAN or DNS SAN in that order is used from the - // certificate, otherwise the subject field is used. If unset, it applies to any user that is - // authenticated. + // The name of the principal. If set, The URI SAN or DNS SAN in that order + // is used from the certificate, otherwise the subject field is used. If + // unset, it applies to any user that is authenticated. type.matcher.v3.StringMatcher principal_name = 2; } oneof identifier { option (validate.required) = true; - // A set of identifiers that all must match in order to define the downstream. + // A set of identifiers that all must match in order to define the + // downstream. Set and_ids = 1; - // A set of identifiers at least one must match in order to define the downstream. + // A set of identifiers at least one must match in order to define the + // downstream. Set or_ids = 2; // When any is set, it matches any downstream. @@ -227,21 +263,23 @@ message Principal { // A CIDR block that describes the downstream remote/origin address. // Note: This is always the physical peer even if the - // :ref:`remote_ip ` is inferred - // from for example the x-forwarder-for header, proxy protocol, etc. + // :ref:`remote_ip ` is + // inferred from for example the x-forwarder-for header, proxy protocol, + // etc. core.v3.CidrRange direct_remote_ip = 10; // A CIDR block that describes the downstream remote/origin address. // Note: This may not be the physical peer and could be different from the - // :ref:`direct_remote_ip `. - // E.g, if the remote ip is inferred from for example the x-forwarder-for header, - // proxy protocol, etc. + // :ref:`direct_remote_ip + // `. E.g, if the + // remote ip is inferred from for example the x-forwarder-for header, proxy + // protocol, etc. core.v3.CidrRange remote_ip = 11; - // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only - // available for HTTP request. - // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path` - // field if you want to match the URL path without the query and fragment string. + // A header (or pseudo-header such as :path or :method) on the incoming HTTP + // request. Only available for HTTP request. Note: the pseudo-header :path + // includes the query and fragment string. Use the `url_path` field if you + // want to match the URL path without the query and fragment string. route.v3.HeaderMatcher header = 6; // A URL path on the incoming HTTP request. Only available for HTTP. @@ -250,9 +288,9 @@ message Principal { // Metadata that describes additional information about the principal. type.matcher.v3.MetadataMatcher metadata = 7; - // Negates matching the provided principal. For instance, if the value of `not_id` would match, - // this principal would not match. Conversely, if the value of `not_id` would not match, this - // principal would match. + // Negates matching the provided principal. For instance, if the value of + // `not_id` would match, this principal would not match. Conversely, if the + // value of `not_id` would not match, this principal would match. Principal not_id = 8; } } diff --git a/api/envoy/config/rbac/v4alpha/BUILD b/api/envoy/config/rbac/v4alpha/BUILD index dbfa8be4f36f2..be78d751372e6 100644 --- a/api/envoy/config/rbac/v4alpha/BUILD +++ b/api/envoy/config/rbac/v4alpha/BUILD @@ -9,8 +9,9 @@ api_proto_package( "//envoy/config/core/v4alpha:pkg", "//envoy/config/rbac/v3:pkg", "//envoy/config/route/v4alpha:pkg", - "//envoy/type/matcher/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", ], ) diff --git a/api/envoy/config/rbac/v4alpha/rbac.proto b/api/envoy/config/rbac/v4alpha/rbac.proto index 097231282f455..cc9d8933ababc 100644 --- a/api/envoy/config/rbac/v4alpha/rbac.proto +++ b/api/envoy/config/rbac/v4alpha/rbac.proto @@ -4,10 +4,11 @@ package envoy.config.rbac.v4alpha; import "envoy/config/core/v4alpha/address.proto"; import "envoy/config/route/v4alpha/route_components.proto"; -import "envoy/type/matcher/v3/metadata.proto"; -import "envoy/type/matcher/v3/path.proto"; -import "envoy/type/matcher/v3/string.proto"; +import "envoy/type/matcher/v4alpha/metadata.proto"; +import "envoy/type/matcher/v4alpha/path.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; +import "google/api/expr/v1alpha1/checked.proto"; import "google/api/expr/v1alpha1/syntax.proto"; import "udpa/annotations/status.proto"; @@ -22,8 +23,14 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // [#protodoc-title: Role Based Access Control (RBAC)] // Role Based Access Control (RBAC) provides service-level and method-level access control for a -// service. RBAC policies are additive. The policies are examined in order. A request is allowed -// once a matching policy is found (suppose the `action` is ALLOW). +// service. RBAC policies are additive. The policies are examined in order. Requests are allowed +// or denied based on the `action` and whether a matching policy is found. For instance, if the +// action is ALLOW and a matching policy is found the request should be allowed. +// +// RBAC can also be used to make access logging decisions by communicating with access loggers +// through dynamic metadata. When the action is LOG and at least one policy matches, the +// `access_log_hint` value in the shared key namespace 'envoy.common' is set to `true` indicating +// the request should be logged. // // Here is an example of RBAC configuration. It has two policies: // @@ -66,45 +73,69 @@ message RBAC { // Should we do safe-list or block-list style access control? enum Action { - // The policies grant access to principals. The rest is denied. This is safe-list style + // The policies grant access to principals. The rest are denied. This is safe-list style // access control. This is the default type. ALLOW = 0; - // The policies deny access to principals. The rest is allowed. This is block-list style + // The policies deny access to principals. The rest are allowed. This is block-list style // access control. DENY = 1; + + // The policies set the `access_log_hint` dynamic metadata key based on if requests match. + // All requests are allowed. + LOG = 2; } - // The action to take if a policy matches. The request is allowed if and only if: + // The action to take if a policy matches. Every action either allows or denies a request, + // and can also carry out action-specific operations. + // + // Actions: + // + // * ALLOW: Allows the request if and only if there is a policy that matches + // the request. + // * DENY: Allows the request if and only if there are no policies that + // match the request. + // * LOG: Allows all requests. If at least one policy matches, the dynamic + // metadata key `access_log_hint` is set to the value `true` under the shared + // key namespace 'envoy.common'. If no policies match, it is set to `false`. + // Other actions do not modify this key. // - // * `action` is "ALLOWED" and at least one policy matches - // * `action` is "DENY" and none of the policies match Action action = 1; // Maps from policy name to policy. A match occurs when at least one policy matches the request. map policies = 2; } -// Policy specifies a role and the principals that are assigned/denied the role. A policy matches if -// and only if at least one of its permissions match the action taking place AND at least one of its -// principals match the downstream AND the condition is true if specified. +// Policy specifies a role and the principals that are assigned/denied the role. +// A policy matches if and only if at least one of its permissions match the +// action taking place AND at least one of its principals match the downstream +// AND the condition is true if specified. message Policy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Policy"; - // Required. The set of permissions that define a role. Each permission is matched with OR - // semantics. To match all actions for this policy, a single Permission with the `any` field set - // to true should be used. + // Required. The set of permissions that define a role. Each permission is + // matched with OR semantics. To match all actions for this policy, a single + // Permission with the `any` field set to true should be used. repeated Permission permissions = 1 [(validate.rules).repeated = {min_items: 1}]; - // Required. The set of principals that are assigned/denied the role based on “action”. Each - // principal is matched with OR semantics. To match all downstreams for this policy, a single - // Principal with the `any` field set to true should be used. + // Required. The set of principals that are assigned/denied the role based on + // “action”. Each principal is matched with OR semantics. To match all + // downstreams for this policy, a single Principal with the `any` field set to + // true should be used. repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}]; - // An optional symbolic expression specifying an access control - // :ref:`condition `. The condition is combined - // with the permissions and the principals as a clause with AND semantics. - google.api.expr.v1alpha1.Expr condition = 3; + oneof expression_specifier { + // An optional symbolic expression specifying an access control + // :ref:`condition `. The condition is combined + // with the permissions and the principals as a clause with AND semantics. + // Only be used when checked_condition is not used. + google.api.expr.v1alpha1.Expr condition = 3; + + // [#not-implemented-hide:] + // An optional symbolic expression that has been successfully type checked. + // Only be used when condition is not used. + google.api.expr.v1alpha1.CheckedExpr checked_condition = 4; + } } // Permission defines an action (or actions) that a principal can take. @@ -140,7 +171,7 @@ message Permission { route.v4alpha.HeaderMatcher header = 4; // A URL path on the incoming HTTP request. Only available for HTTP. - type.matcher.v3.PathMatcher url_path = 10; + type.matcher.v4alpha.PathMatcher url_path = 10; // A CIDR block that describes the destination IP. core.v4alpha.CidrRange destination_ip = 5; @@ -149,11 +180,11 @@ message Permission { uint32 destination_port = 6 [(validate.rules).uint32 = {lte: 65535}]; // Metadata that describes additional information about the action. - type.matcher.v3.MetadataMatcher metadata = 7; + type.matcher.v4alpha.MetadataMatcher metadata = 7; - // Negates matching the provided permission. For instance, if the value of `not_rule` would - // match, this permission would not match. Conversely, if the value of `not_rule` would not - // match, this permission would match. + // Negates matching the provided permission. For instance, if the value of + // `not_rule` would match, this permission would not match. Conversely, if + // the value of `not_rule` would not match, this permission would match. Permission not_rule = 8; // The request server from the client's connection request. This is @@ -166,7 +197,8 @@ message Permission { // // * If the :ref:`TLS Inspector ` // filter is not added, and if a `FilterChainMatch` is not defined for - // the :ref:`server name `, + // the :ref:`server name + // `, // a TLS connection's requested SNI server name will be treated as if it // wasn't present. // @@ -175,17 +207,18 @@ message Permission { // // Please refer to :ref:`this FAQ entry ` to learn to // setup SNI. - type.matcher.v3.StringMatcher requested_server_name = 9; + type.matcher.v4alpha.StringMatcher requested_server_name = 9; } } -// Principal defines an identity or a group of identities for a downstream subject. +// Principal defines an identity or a group of identities for a downstream +// subject. // [#next-free-field: 12] message Principal { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Principal"; - // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. Depending on the context, - // each are applied with the associated behavior. + // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. + // Depending on the context, each are applied with the associated behavior. message Set { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Principal.Set"; @@ -200,10 +233,10 @@ message Principal { reserved 1; - // The name of the principal. If set, The URI SAN or DNS SAN in that order is used from the - // certificate, otherwise the subject field is used. If unset, it applies to any user that is - // authenticated. - type.matcher.v3.StringMatcher principal_name = 2; + // The name of the principal. If set, The URI SAN or DNS SAN in that order + // is used from the certificate, otherwise the subject field is used. If + // unset, it applies to any user that is authenticated. + type.matcher.v4alpha.StringMatcher principal_name = 2; } reserved 5; @@ -213,10 +246,12 @@ message Principal { oneof identifier { option (validate.required) = true; - // A set of identifiers that all must match in order to define the downstream. + // A set of identifiers that all must match in order to define the + // downstream. Set and_ids = 1; - // A set of identifiers at least one must match in order to define the downstream. + // A set of identifiers at least one must match in order to define the + // downstream. Set or_ids = 2; // When any is set, it matches any downstream. @@ -227,32 +262,34 @@ message Principal { // A CIDR block that describes the downstream remote/origin address. // Note: This is always the physical peer even if the - // :ref:`remote_ip ` is inferred - // from for example the x-forwarder-for header, proxy protocol, etc. + // :ref:`remote_ip ` is + // inferred from for example the x-forwarder-for header, proxy protocol, + // etc. core.v4alpha.CidrRange direct_remote_ip = 10; // A CIDR block that describes the downstream remote/origin address. // Note: This may not be the physical peer and could be different from the - // :ref:`direct_remote_ip `. - // E.g, if the remote ip is inferred from for example the x-forwarder-for header, - // proxy protocol, etc. + // :ref:`direct_remote_ip + // `. E.g, if the + // remote ip is inferred from for example the x-forwarder-for header, proxy + // protocol, etc. core.v4alpha.CidrRange remote_ip = 11; - // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only - // available for HTTP request. - // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path` - // field if you want to match the URL path without the query and fragment string. + // A header (or pseudo-header such as :path or :method) on the incoming HTTP + // request. Only available for HTTP request. Note: the pseudo-header :path + // includes the query and fragment string. Use the `url_path` field if you + // want to match the URL path without the query and fragment string. route.v4alpha.HeaderMatcher header = 6; // A URL path on the incoming HTTP request. Only available for HTTP. - type.matcher.v3.PathMatcher url_path = 9; + type.matcher.v4alpha.PathMatcher url_path = 9; // Metadata that describes additional information about the principal. - type.matcher.v3.MetadataMatcher metadata = 7; + type.matcher.v4alpha.MetadataMatcher metadata = 7; - // Negates matching the provided principal. For instance, if the value of `not_id` would match, - // this principal would not match. Conversely, if the value of `not_id` would not match, this - // principal would match. + // Negates matching the provided principal. For instance, if the value of + // `not_id` would match, this principal would not match. Conversely, if the + // value of `not_id` would not match, this principal would match. Principal not_id = 8; } } diff --git a/api/envoy/config/route/v3/BUILD b/api/envoy/config/route/v3/BUILD index 019cf27528c6a..6f653723e5ae3 100644 --- a/api/envoy/config/route/v3/BUILD +++ b/api/envoy/config/route/v3/BUILD @@ -11,6 +11,7 @@ api_proto_package( "//envoy/api/v2/route:pkg", "//envoy/config/core/v3:pkg", "//envoy/type/matcher/v3:pkg", + "//envoy/type/metadata/v3:pkg", "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto index 70c52010efa0f..c35e210691c5e 100644 --- a/api/envoy/config/route/v3/route_components.proto +++ b/api/envoy/config/route/v3/route_components.proto @@ -3,8 +3,11 @@ syntax = "proto3"; package envoy.config.route.v3; import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/extension.proto"; +import "envoy/config/core/v3/proxy_protocol.proto"; import "envoy/type/matcher/v3/regex.proto"; import "envoy/type/matcher/v3/string.proto"; +import "envoy/type/metadata/v3/metadata.proto"; import "envoy/type/tracing/v3/custom_tag.proto"; import "envoy/type/v3/percent.proto"; import "envoy/type/v3/range.proto"; @@ -15,6 +18,7 @@ import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -123,7 +127,9 @@ message VirtualHost { // Specifies a list of HTTP headers that should be removed from each response // handled by this virtual host. - repeated string response_headers_to_remove = 11; + repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { + items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; // Indicates that the virtual host has a CORS policy. CorsPolicy cors = 8; @@ -223,6 +229,8 @@ message Route { // [#not-implemented-hide:] // If true, a filter will define the action (e.g., it could dynamically generate the // RouteAction). + // [#comment: TODO(samflattery): Remove cleanup in route_fuzz_test.cc when + // implemented] FilterAction filter_action = 17; } @@ -269,7 +277,9 @@ message Route { // Specifies a list of HTTP headers that should be removed from each response // to requests matching this route. - repeated string response_headers_to_remove = 11; + repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { + items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; // Presence of the object defines whether the connection manager's tracing configuration // is overridden by this route specific instance. @@ -370,7 +380,7 @@ message WeightedCluster { string runtime_key_prefix = 2; } -// [#next-free-field: 12] +// [#next-free-field: 13] message RouteMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteMatch"; @@ -392,6 +402,10 @@ message RouteMatch { google.protobuf.BoolValue validated = 2; } + // An extensible message for matching CONNECT requests. + message ConnectMatcher { + } + reserved 5, 3; reserved "regex"; @@ -420,6 +434,17 @@ message RouteMatch { // on :path, etc. The issue with that is it is unclear how to generically deal with query string // stripping. This needs more thought.] type.matcher.v3.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; + + // If this is used as the matcher, the matcher will only match CONNECT requests. + // Note that this will not match HTTP/2 upgrade-style CONNECT requests + // (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style + // upgrades. + // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2, + // where CONNECT requests may have a path, the path matchers will work if + // there is a path present. + // Note that CONNECT support is currently considered alpha in Envoy. + // [#comment:TODO(htuch): Replace the above comment with an alpha tag. + ConnectMatcher connect_matcher = 12; } // Indicates that prefix/path matching should be case insensitive. The default @@ -520,7 +545,7 @@ message CorsPolicy { core.v3.RuntimeFractionalPercent shadow_enabled = 10; } -// [#next-free-field: 34] +// [#next-free-field: 35] message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction"; @@ -533,7 +558,10 @@ message RouteAction { } // Configures :ref:`internal redirect ` behavior. + // [#next-major-version: remove this definition - it's defined in the InternalRedirectPolicy message.] enum InternalRedirectAction { + option deprecated = true; + PASS_THROUGH_INTERNAL_REDIRECT = 0; HANDLE_INTERNAL_REDIRECT = 1; } @@ -591,6 +619,10 @@ message RouteAction { string header_name = 1 [ (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false} ]; + + // If specified, the request header value will be rewritten and used + // to produce the hash key. + type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 2; } // Envoy supports two types of cookie affinity: @@ -705,6 +737,13 @@ message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction.UpgradeConfig"; + // Configuration for sending data upstream as a raw data payload. This is used for + // CONNECT requests, when forwarding CONNECT payload as raw TCP. + message ConnectConfig { + // If present, the proxy protocol header will be prepended to the CONNECT payload sent upstream. + core.v3.ProxyProtocolConfig proxy_protocol_config = 1; + } + // The case-insensitive name of this upgrade, e.g. "websocket". // For each upgrade type present in upgrade_configs, requests with // Upgrade: [upgrade_type] will be proxied upstream. @@ -713,6 +752,12 @@ message RouteAction { // Determines if upgrades are available on this route. Defaults to true. google.protobuf.BoolValue enabled = 2; + + // Configuration for sending data upstream as a raw data payload. This is used for + // CONNECT requests, when forwarding CONNECT payload as raw TCP. + // Note that CONNECT support is currently considered alpha in Envoy. + // [#comment:TODO(htuch): Replace the above comment with an alpha tag. + ConnectConfig connect_config = 3; } reserved 12, 18, 19, 16, 22, 21, 10; @@ -957,7 +1002,13 @@ message RouteAction { repeated UpgradeConfig upgrade_configs = 25; - InternalRedirectAction internal_redirect_action = 26; + // If present, Envoy will try to follow an upstream redirect response instead of proxying the + // response back to the downstream. An upstream redirect response is defined + // by :ref:`redirect_response_codes + // `. + InternalRedirectPolicy internal_redirect_policy = 34; + + InternalRedirectAction internal_redirect_action = 26 [deprecated = true]; // An internal redirect is handled, iff the number of previous internal redirects that a // downstream request has encountered is lower than this value, and @@ -973,7 +1024,7 @@ message RouteAction { // will pass the redirect back to downstream. // // If not specified, at most one redirect will be followed. - google.protobuf.UInt32Value max_internal_redirects = 31; + google.protobuf.UInt32Value max_internal_redirects = 31 [deprecated = true]; // Indicates that the route has a hedge policy. Note that if this is set, // it'll take precedence over the virtual host level hedge policy entirely @@ -1044,7 +1095,8 @@ message RetryPolicy { // Specifies the allowed number of retries. This parameter is optional and // defaults to 1. These are the same conditions documented for // :ref:`config_http_filters_router_x-envoy-max-retries`. - google.protobuf.UInt32Value num_retries = 2; + google.protobuf.UInt32Value num_retries = 2 + [(udpa.annotations.field_migrate).rename = "max_retries"]; // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The // same conditions documented for @@ -1164,6 +1216,21 @@ message RedirectAction { oneof path_rewrite_specifier { // The path portion of the URL will be swapped with this value. + // Please note that query string in path_redirect will override the + // request's query string and will not be stripped. + // + // For example, let's say we have the following routes: + // + // - match: { path: "/old-path-1" } + // redirect: { path_redirect: "/new-path-1" } + // - match: { path: "/old-path-2" } + // redirect: { path_redirect: "/new-path-2", strip-query: "true" } + // - match: { path: "/old-path-3" } + // redirect: { path_redirect: "/new-path-3?foo=1", strip_query: "true" } + // + // 1. if request uri is "/old-path-1?bar=1", users will be redirected to "/new-path-1?bar=1" + // 2. if request uri is "/old-path-2?bar=1", users will be redirected to "/new-path-2" + // 3. if request uri is "/old-path-3?bar=1", users will be redirected to "/new-path-3?foo=1" string path_redirect = 2 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; @@ -1299,7 +1366,7 @@ message VirtualCluster { message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit"; - // [#next-free-field: 7] + // [#next-free-field: 8] message Action { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit.Action"; @@ -1356,6 +1423,11 @@ message RateLimit { // The key to use in the descriptor entry. string descriptor_key = 2 [(validate.rules).string = {min_bytes: 1}]; + + // If set to true, Envoy skips the descriptor while calling rate limiting service + // when header is not present in the request. By default it skips calling the + // rate limiting service if this header is not present in the request. + bool skip_if_absent = 3; } // The following descriptor entry is appended to the descriptor and is populated using the @@ -1408,6 +1480,24 @@ message RateLimit { repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; } + // The following descriptor entry is appended when the dynamic metadata contains a key value: + // + // .. code-block:: cpp + // + // ("", "") + message DynamicMetaData { + // The key to use in the descriptor entry. + string descriptor_key = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Metadata struct that defines the key and path to retrieve the string value. A match will + // only happen if the value in the dynamic metadata is of type string. + type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; + + // An optional value to use if *metadata_key* is empty. If not set and + // no value is present under the metadata_key then no descriptor is generated. + string default_value = 3; + } + oneof action_specifier { option (validate.required) = true; @@ -1428,6 +1518,27 @@ message RateLimit { // Rate limit on the existence of request headers. HeaderValueMatch header_value_match = 6; + + // Rate limit on dynamic metadata. + DynamicMetaData dynamic_metadata = 7; + } + } + + message Override { + // Fetches the override from the dynamic metadata. + message DynamicMetadata { + // Metadata struct that defines the key and path to retrieve the struct value. + // The value must be a struct containing an integer "requests_per_unit" property + // and a "unit" property with a value parseable to :ref:`RateLimitUnit + // enum ` + type.metadata.v3.MetadataKey metadata_key = 1 [(validate.rules).message = {required: true}]; + } + + oneof override_specifier { + option (validate.required) = true; + + // Limit override from dynamic metadata. + DynamicMetadata dynamic_metadata = 1; } } @@ -1450,6 +1561,12 @@ message RateLimit { // configuration. See :ref:`composing actions // ` for additional documentation. repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}]; + + // An optional limit override to be appended to the descriptor produced by this + // rate limit configuration. If the override value is invalid or cannot be resolved + // from metadata, no override is provided. See :ref:`rate limit override + // ` for more information. + Override limit = 4; } // .. attention:: @@ -1564,3 +1681,30 @@ message QueryParameterMatcher { bool present_match = 6; } } + +// HTTP Internal Redirect :ref:`architecture overview `. +message InternalRedirectPolicy { + // An internal redirect is not handled, unless the number of previous internal redirects that a + // downstream request has encountered is lower than this value. + // In the case where a downstream request is bounced among multiple routes by internal redirect, + // the first route that hits this threshold, or does not set :ref:`internal_redirect_policy + // ` + // will pass the redirect back to downstream. + // + // If not specified, at most one redirect will be followed. + google.protobuf.UInt32Value max_internal_redirects = 1; + + // Defines what upstream response codes are allowed to trigger internal redirect. If unspecified, + // only 302 will be treated as internal redirect. + // Only 301, 302, 303, 307 and 308 are valid values. Any other codes will be ignored. + repeated uint32 redirect_response_codes = 2 [(validate.rules).repeated = {max_items: 5}]; + + // Specifies a list of predicates that are queried when an upstream response is deemed + // to trigger an internal redirect by all other criteria. Any predicate in the list can reject + // the redirect, causing the response to be proxied to downstream. + repeated core.v3.TypedExtensionConfig predicates = 3; + + // Allow internal redirect to follow a target URI with a different scheme than the value of + // x-forwarded-proto. The default is false. + bool allow_cross_scheme_redirect = 4; +} diff --git a/api/envoy/config/route/v4alpha/BUILD b/api/envoy/config/route/v4alpha/BUILD index 507bedd76bdf3..c72b7030b9fbb 100644 --- a/api/envoy/config/route/v4alpha/BUILD +++ b/api/envoy/config/route/v4alpha/BUILD @@ -9,7 +9,8 @@ api_proto_package( "//envoy/annotations:pkg", "//envoy/config/core/v4alpha:pkg", "//envoy/config/route/v3:pkg", - "//envoy/type/matcher/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "//envoy/type/metadata/v3:pkg", "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", diff --git a/api/envoy/config/route/v4alpha/route_components.proto b/api/envoy/config/route/v4alpha/route_components.proto index e813b632edb04..f921ea506d997 100644 --- a/api/envoy/config/route/v4alpha/route_components.proto +++ b/api/envoy/config/route/v4alpha/route_components.proto @@ -3,8 +3,11 @@ syntax = "proto3"; package envoy.config.route.v4alpha; import "envoy/config/core/v4alpha/base.proto"; -import "envoy/type/matcher/v3/regex.proto"; -import "envoy/type/matcher/v3/string.proto"; +import "envoy/config/core/v4alpha/extension.proto"; +import "envoy/config/core/v4alpha/proxy_protocol.proto"; +import "envoy/type/matcher/v4alpha/regex.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; +import "envoy/type/metadata/v3/metadata.proto"; import "envoy/type/tracing/v3/custom_tag.proto"; import "envoy/type/v3/percent.proto"; import "envoy/type/v3/range.proto"; @@ -123,7 +126,9 @@ message VirtualHost { // Specifies a list of HTTP headers that should be removed from each response // handled by this virtual host. - repeated string response_headers_to_remove = 11; + repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { + items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; // Indicates that the virtual host has a CORS policy. CorsPolicy cors = 8; @@ -142,7 +147,7 @@ message VirtualHost { // will see the attempt count as perceived by the second Envoy. Defaults to false. // This header is unaffected by the // :ref:`suppress_envoy_headers - // ` flag. + // ` flag. // // [#next-major-version: rename to include_attempt_count_in_request.] bool include_request_attempt_count = 14; @@ -154,7 +159,7 @@ message VirtualHost { // will see the attempt count as perceived by the Envoy closest upstream from itself. Defaults to false. // This header is unaffected by the // :ref:`suppress_envoy_headers - // ` flag. + // ` flag. bool include_attempt_count_in_response = 19; // Indicates the retry policy for all routes in this virtual host. Note that setting a @@ -223,6 +228,8 @@ message Route { // [#not-implemented-hide:] // If true, a filter will define the action (e.g., it could dynamically generate the // RouteAction). + // [#comment: TODO(samflattery): Remove cleanup in route_fuzz_test.cc when + // implemented] FilterAction filter_action = 17; } @@ -269,7 +276,9 @@ message Route { // Specifies a list of HTTP headers that should be removed from each response // to requests matching this route. - repeated string response_headers_to_remove = 11; + repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { + items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; // Presence of the object defines whether the connection manager's tracing configuration // is overridden by this route specific instance. @@ -371,7 +380,7 @@ message WeightedCluster { string runtime_key_prefix = 2; } -// [#next-free-field: 12] +// [#next-free-field: 13] message RouteMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteMatch"; @@ -393,6 +402,12 @@ message RouteMatch { google.protobuf.BoolValue validated = 2; } + // An extensible message for matching CONNECT requests. + message ConnectMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteMatch.ConnectMatcher"; + } + reserved 5, 3; reserved "regex"; @@ -420,7 +435,18 @@ message RouteMatch { // path_specifier entirely and just rely on a set of header matchers which can already match // on :path, etc. The issue with that is it is unclear how to generically deal with query string // stripping. This needs more thought.] - type.matcher.v3.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; + type.matcher.v4alpha.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; + + // If this is used as the matcher, the matcher will only match CONNECT requests. + // Note that this will not match HTTP/2 upgrade-style CONNECT requests + // (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style + // upgrades. + // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2, + // where CONNECT requests may have a path, the path matchers will work if + // there is a path present. + // Note that CONNECT support is currently considered alpha in Envoy. + // [#comment:TODO(htuch): Replace the above comment with an alpha tag. + ConnectMatcher connect_matcher = 12; } // Indicates that prefix/path matching should be case insensitive. The default @@ -481,7 +507,7 @@ message CorsPolicy { // Specifies string patterns that match allowed origins. An origin is allowed if any of the // string matchers match. - repeated type.matcher.v3.StringMatcher allow_origin_string_match = 11; + repeated type.matcher.v4alpha.StringMatcher allow_origin_string_match = 11; // Specifies the content for the *access-control-allow-methods* header. string allow_methods = 2; @@ -521,7 +547,7 @@ message CorsPolicy { core.v4alpha.RuntimeFractionalPercent shadow_enabled = 10; } -// [#next-free-field: 34] +// [#next-free-field: 35] message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction"; @@ -533,12 +559,6 @@ message RouteAction { NOT_FOUND = 1; } - // Configures :ref:`internal redirect ` behavior. - enum InternalRedirectAction { - PASS_THROUGH_INTERNAL_REDIRECT = 0; - HANDLE_INTERNAL_REDIRECT = 1; - } - // The router is capable of shadowing traffic from one cluster to another. The current // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to // respond before returning the response from the primary cluster. All normal statistics are @@ -592,6 +612,10 @@ message RouteAction { string header_name = 1 [ (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false} ]; + + // If specified, the request header value will be rewritten and used + // to produce the hash key. + type.matcher.v4alpha.RegexMatchAndSubstitute regex_rewrite = 2; } // Envoy supports two types of cookie affinity: @@ -706,6 +730,16 @@ message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction.UpgradeConfig"; + // Configuration for sending data upstream as a raw data payload. This is used for + // CONNECT requests, when forwarding CONNECT payload as raw TCP. + message ConnectConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteAction.UpgradeConfig.ConnectConfig"; + + // If present, the proxy protocol header will be prepended to the CONNECT payload sent upstream. + core.v4alpha.ProxyProtocolConfig proxy_protocol_config = 1; + } + // The case-insensitive name of this upgrade, e.g. "websocket". // For each upgrade type present in upgrade_configs, requests with // Upgrade: [upgrade_type] will be proxied upstream. @@ -714,11 +748,17 @@ message RouteAction { // Determines if upgrades are available on this route. Defaults to true. google.protobuf.BoolValue enabled = 2; + + // Configuration for sending data upstream as a raw data payload. This is used for + // CONNECT requests, when forwarding CONNECT payload as raw TCP. + // Note that CONNECT support is currently considered alpha in Envoy. + // [#comment:TODO(htuch): Replace the above comment with an alpha tag. + ConnectConfig connect_config = 3; } - reserved 12, 18, 19, 16, 22, 21, 10; + reserved 12, 18, 19, 16, 22, 21, 10, 26, 31; - reserved "request_mirror_policy"; + reserved "request_mirror_policy", "internal_redirect_action", "max_internal_redirects"; oneof cluster_specifier { option (validate.required) = true; @@ -821,7 +861,7 @@ message RouteAction { // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` // would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to // ``/aaa/yyy/bbb``. - type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 32; + type.matcher.v4alpha.RegexMatchAndSubstitute regex_rewrite = 32; oneof host_rewrite_specifier { // Indicates that during forwarding, the host header will be swapped with @@ -958,23 +998,11 @@ message RouteAction { repeated UpgradeConfig upgrade_configs = 25; - InternalRedirectAction internal_redirect_action = 26; - - // An internal redirect is handled, iff the number of previous internal redirects that a - // downstream request has encountered is lower than this value, and - // :ref:`internal_redirect_action ` - // is set to :ref:`HANDLE_INTERNAL_REDIRECT - // ` - // In the case where a downstream request is bounced among multiple routes by internal redirect, - // the first route that hits this threshold, or has - // :ref:`internal_redirect_action ` - // set to - // :ref:`PASS_THROUGH_INTERNAL_REDIRECT - // ` - // will pass the redirect back to downstream. - // - // If not specified, at most one redirect will be followed. - google.protobuf.UInt32Value max_internal_redirects = 31; + // If present, Envoy will try to follow an upstream redirect response instead of proxying the + // response back to the downstream. An upstream redirect response is defined + // by :ref:`redirect_response_codes + // `. + InternalRedirectPolicy internal_redirect_policy = 34; // Indicates that the route has a hedge policy. Note that if this is set, // it'll take precedence over the virtual host level hedge policy entirely @@ -1045,7 +1073,7 @@ message RetryPolicy { // Specifies the allowed number of retries. This parameter is optional and // defaults to 1. These are the same conditions documented for // :ref:`config_http_filters_router_x-envoy-max-retries`. - google.protobuf.UInt32Value num_retries = 2; + google.protobuf.UInt32Value max_retries = 2; // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The // same conditions documented for @@ -1166,6 +1194,21 @@ message RedirectAction { oneof path_rewrite_specifier { // The path portion of the URL will be swapped with this value. + // Please note that query string in path_redirect will override the + // request's query string and will not be stripped. + // + // For example, let's say we have the following routes: + // + // - match: { path: "/old-path-1" } + // redirect: { path_redirect: "/new-path-1" } + // - match: { path: "/old-path-2" } + // redirect: { path_redirect: "/new-path-2", strip-query: "true" } + // - match: { path: "/old-path-3" } + // redirect: { path_redirect: "/new-path-3?foo=1", strip_query: "true" } + // + // 1. if request uri is "/old-path-1?bar=1", users will be redirected to "/new-path-1?bar=1" + // 2. if request uri is "/old-path-2?bar=1", users will be redirected to "/new-path-2" + // 3. if request uri is "/old-path-3?bar=1", users will be redirected to "/new-path-3?foo=1" string path_redirect = 2 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; @@ -1302,7 +1345,7 @@ message VirtualCluster { message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit"; - // [#next-free-field: 7] + // [#next-free-field: 8] message Action { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit.Action"; @@ -1359,6 +1402,11 @@ message RateLimit { // The key to use in the descriptor entry. string descriptor_key = 2 [(validate.rules).string = {min_bytes: 1}]; + + // If set to true, Envoy skips the descriptor while calling rate limiting service + // when header is not present in the request. By default it skips calling the + // rate limiting service if this header is not present in the request. + bool skip_if_absent = 3; } // The following descriptor entry is appended to the descriptor and is populated using the @@ -1411,6 +1459,27 @@ message RateLimit { repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; } + // The following descriptor entry is appended when the dynamic metadata contains a key value: + // + // .. code-block:: cpp + // + // ("", "") + message DynamicMetaData { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Action.DynamicMetaData"; + + // The key to use in the descriptor entry. + string descriptor_key = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Metadata struct that defines the key and path to retrieve the string value. A match will + // only happen if the value in the dynamic metadata is of type string. + type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; + + // An optional value to use if *metadata_key* is empty. If not set and + // no value is present under the metadata_key then no descriptor is generated. + string default_value = 3; + } + oneof action_specifier { option (validate.required) = true; @@ -1431,6 +1500,33 @@ message RateLimit { // Rate limit on the existence of request headers. HeaderValueMatch header_value_match = 6; + + // Rate limit on dynamic metadata. + DynamicMetaData dynamic_metadata = 7; + } + } + + message Override { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Override"; + + // Fetches the override from the dynamic metadata. + message DynamicMetadata { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Override.DynamicMetadata"; + + // Metadata struct that defines the key and path to retrieve the struct value. + // The value must be a struct containing an integer "requests_per_unit" property + // and a "unit" property with a value parseable to :ref:`RateLimitUnit + // enum ` + type.metadata.v3.MetadataKey metadata_key = 1 [(validate.rules).message = {required: true}]; + } + + oneof override_specifier { + option (validate.required) = true; + + // Limit override from dynamic metadata. + DynamicMetadata dynamic_metadata = 1; } } @@ -1453,6 +1549,12 @@ message RateLimit { // configuration. See :ref:`composing actions // ` for additional documentation. repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}]; + + // An optional limit override to be appended to the descriptor produced by this + // rate limit configuration. If the override value is invalid or cannot be resolved + // from metadata, no override is provided. See :ref:`rate limit override + // ` for more information. + Override limit = 4; } // .. attention:: @@ -1500,7 +1602,7 @@ message HeaderMatcher { // If specified, this regex string is a regular expression rule which implies the entire request // header value must match the regex. The rule will not match if only a subsequence of the // request header value matches the regex. - type.matcher.v3.RegexMatcher safe_regex_match = 11; + type.matcher.v4alpha.RegexMatcher safe_regex_match = 11; // If specified, header match will be performed based on range. // The rule will match if the request header value is within this range. @@ -1562,9 +1664,40 @@ message QueryParameterMatcher { oneof query_parameter_match_specifier { // Specifies whether a query parameter value should match against a string. - type.matcher.v3.StringMatcher string_match = 5 [(validate.rules).message = {required: true}]; + type.matcher.v4alpha.StringMatcher string_match = 5 + [(validate.rules).message = {required: true}]; // Specifies whether a query parameter should be present. bool present_match = 6; } } + +// HTTP Internal Redirect :ref:`architecture overview `. +message InternalRedirectPolicy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.InternalRedirectPolicy"; + + // An internal redirect is not handled, unless the number of previous internal redirects that a + // downstream request has encountered is lower than this value. + // In the case where a downstream request is bounced among multiple routes by internal redirect, + // the first route that hits this threshold, or does not set :ref:`internal_redirect_policy + // ` + // will pass the redirect back to downstream. + // + // If not specified, at most one redirect will be followed. + google.protobuf.UInt32Value max_internal_redirects = 1; + + // Defines what upstream response codes are allowed to trigger internal redirect. If unspecified, + // only 302 will be treated as internal redirect. + // Only 301, 302, 303, 307 and 308 are valid values. Any other codes will be ignored. + repeated uint32 redirect_response_codes = 2 [(validate.rules).repeated = {max_items: 5}]; + + // Specifies a list of predicates that are queried when an upstream response is deemed + // to trigger an internal redirect by all other criteria. Any predicate in the list can reject + // the redirect, causing the response to be proxied to downstream. + repeated core.v4alpha.TypedExtensionConfig predicates = 3; + + // Allow internal redirect to follow a target URI with a different scheme than the value of + // x-forwarded-proto. The default is false. + bool allow_cross_scheme_redirect = 4; +} diff --git a/api/envoy/config/tap/v3/BUILD b/api/envoy/config/tap/v3/BUILD index f266efc592a2a..6fd3142264d9f 100644 --- a/api/envoy/config/tap/v3/BUILD +++ b/api/envoy/config/tap/v3/BUILD @@ -6,6 +6,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ + "//envoy/config/common/matcher/v3:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/route/v3:pkg", "//envoy/service/tap/v2alpha:pkg", diff --git a/api/envoy/config/tap/v3/common.proto b/api/envoy/config/tap/v3/common.proto index 0fea8f88a638e..42783115f8719 100644 --- a/api/envoy/config/tap/v3/common.proto +++ b/api/envoy/config/tap/v3/common.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package envoy.config.tap.v3; +import "envoy/config/common/matcher/v3/matcher.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "envoy/config/route/v3/route_components.proto"; @@ -28,7 +29,17 @@ message TapConfig { // The match configuration. If the configuration matches the data source being tapped, a tap will // occur, with the result written to the configured output. - MatchPredicate match_config = 1 [(validate.rules).message = {required: true}]; + // Exactly one of :ref:`match ` and + // :ref:`match_config ` must be set. If both + // are set, the :ref:`match ` will be used. + MatchPredicate match_config = 1 [deprecated = true]; + + // The match configuration. If the configuration matches the data source being tapped, a tap will + // occur, with the result written to the configured output. + // Exactly one of :ref:`match ` and + // :ref:`match_config ` must be set. If both + // are set, the :ref:`match ` will be used. + common.matcher.v3.MatchPredicate match = 4; // The tap output configuration. If a match configuration matches a data source being tapped, // a tap will occur and the data will be written to the configured output. @@ -47,7 +58,7 @@ message TapConfig { // Tap match configuration. This is a recursive structure which allows complex nested match // configurations to be built using various logical operators. -// [#next-free-field: 9] +// [#next-free-field: 11] message MatchPredicate { option (udpa.annotations.versioning).previous_message_type = "envoy.service.tap.v2alpha.MatchPredicate"; @@ -89,6 +100,12 @@ message MatchPredicate { // HTTP response trailers match configuration. HttpHeadersMatch http_response_trailers_match = 8; + + // HTTP request generic body match configuration. + HttpGenericBodyMatch http_request_generic_body_match = 9; + + // HTTP response generic body match configuration. + HttpGenericBodyMatch http_response_generic_body_match = 10; } } @@ -101,6 +118,36 @@ message HttpHeadersMatch { repeated route.v3.HeaderMatcher headers = 1; } +// HTTP generic body match configuration. +// List of text strings and hex strings to be located in HTTP body. +// All specified strings must be found in the HTTP body for positive match. +// The search may be limited to specified number of bytes from the body start. +// +// .. attention:: +// +// Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. +// If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified +// to scan only part of the http body. +message HttpGenericBodyMatch { + message GenericTextMatch { + oneof rule { + option (validate.required) = true; + + // Text string to be located in HTTP body. + string string_match = 1 [(validate.rules).string = {min_len: 1}]; + + // Sequence of bytes to be located in HTTP body. + bytes binary_match = 2 [(validate.rules).bytes = {min_len: 1}]; + } + } + + // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). + uint32 bytes_limit = 1; + + // List of patterns to match. + repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}]; +} + // Tap output configuration. message OutputConfig { option (udpa.annotations.versioning).previous_message_type = @@ -195,6 +242,7 @@ message OutputSink { // [#not-implemented-hide:] // GrpcService to stream data to. The format argument must be PROTO_BINARY. + // [#comment: TODO(samflattery): remove cleanup in uber_per_filter.cc once implemented] StreamingGrpcSink streaming_grpc = 4; } } diff --git a/api/envoy/config/tap/v4alpha/BUILD b/api/envoy/config/tap/v4alpha/BUILD new file mode 100644 index 0000000000000..be8b1c3a17e31 --- /dev/null +++ b/api/envoy/config/tap/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/common/matcher/v4alpha:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/route/v4alpha:pkg", + "//envoy/config/tap/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/config/tap/v4alpha/common.proto b/api/envoy/config/tap/v4alpha/common.proto new file mode 100644 index 0000000000000..8366187fd1bf2 --- /dev/null +++ b/api/envoy/config/tap/v4alpha/common.proto @@ -0,0 +1,276 @@ +syntax = "proto3"; + +package envoy.config.tap.v4alpha; + +import "envoy/config/common/matcher/v4alpha/matcher.proto"; +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/grpc_service.proto"; +import "envoy/config/route/v4alpha/route_components.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.tap.v4alpha"; +option java_outer_classname = "CommonProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Common tap configuration] + +// Tap configuration. +message TapConfig { + // [#comment:TODO(mattklein123): Rate limiting] + + option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.TapConfig"; + + reserved 1; + + reserved "match_config"; + + // The match configuration. If the configuration matches the data source being tapped, a tap will + // occur, with the result written to the configured output. + // Exactly one of :ref:`match ` and + // :ref:`match_config ` must be set. If both + // are set, the :ref:`match ` will be used. + common.matcher.v4alpha.MatchPredicate match = 4; + + // The tap output configuration. If a match configuration matches a data source being tapped, + // a tap will occur and the data will be written to the configured output. + OutputConfig output_config = 2 [(validate.rules).message = {required: true}]; + + // [#not-implemented-hide:] Specify if Tap matching is enabled. The % of requests\connections for + // which the tap matching is enabled. When not enabled, the request\connection will not be + // recorded. + // + // .. note:: + // + // This field defaults to 100/:ref:`HUNDRED + // `. + core.v4alpha.RuntimeFractionalPercent tap_enabled = 3; +} + +// Tap match configuration. This is a recursive structure which allows complex nested match +// configurations to be built using various logical operators. +// [#next-free-field: 11] +message MatchPredicate { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.MatchPredicate"; + + // A set of match configurations used for logical operations. + message MatchSet { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.tap.v3.MatchPredicate.MatchSet"; + + // The list of rules that make up the set. + repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; + } + + oneof rule { + option (validate.required) = true; + + // A set that describes a logical OR. If any member of the set matches, the match configuration + // matches. + MatchSet or_match = 1; + + // A set that describes a logical AND. If all members of the set match, the match configuration + // matches. + MatchSet and_match = 2; + + // A negation match. The match configuration will match if the negated match condition matches. + MatchPredicate not_match = 3; + + // The match configuration will always match. + bool any_match = 4 [(validate.rules).bool = {const: true}]; + + // HTTP request headers match configuration. + HttpHeadersMatch http_request_headers_match = 5; + + // HTTP request trailers match configuration. + HttpHeadersMatch http_request_trailers_match = 6; + + // HTTP response headers match configuration. + HttpHeadersMatch http_response_headers_match = 7; + + // HTTP response trailers match configuration. + HttpHeadersMatch http_response_trailers_match = 8; + + // HTTP request generic body match configuration. + HttpGenericBodyMatch http_request_generic_body_match = 9; + + // HTTP response generic body match configuration. + HttpGenericBodyMatch http_response_generic_body_match = 10; + } +} + +// HTTP headers match configuration. +message HttpHeadersMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.tap.v3.HttpHeadersMatch"; + + // HTTP headers to match. + repeated route.v4alpha.HeaderMatcher headers = 1; +} + +// HTTP generic body match configuration. +// List of text strings and hex strings to be located in HTTP body. +// All specified strings must be found in the HTTP body for positive match. +// The search may be limited to specified number of bytes from the body start. +// +// .. attention:: +// +// Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. +// If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified +// to scan only part of the http body. +message HttpGenericBodyMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.tap.v3.HttpGenericBodyMatch"; + + message GenericTextMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.tap.v3.HttpGenericBodyMatch.GenericTextMatch"; + + oneof rule { + option (validate.required) = true; + + // Text string to be located in HTTP body. + string string_match = 1 [(validate.rules).string = {min_len: 1}]; + + // Sequence of bytes to be located in HTTP body. + bytes binary_match = 2 [(validate.rules).bytes = {min_len: 1}]; + } + } + + // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). + uint32 bytes_limit = 1; + + // List of patterns to match. + repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}]; +} + +// Tap output configuration. +message OutputConfig { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.OutputConfig"; + + // Output sinks for tap data. Currently a single sink is allowed in the list. Once multiple + // sink types are supported this constraint will be relaxed. + repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1 max_items: 1}]; + + // For buffered tapping, the maximum amount of received body that will be buffered prior to + // truncation. If truncation occurs, the :ref:`truncated + // ` field will be set. If not specified, the + // default is 1KiB. + google.protobuf.UInt32Value max_buffered_rx_bytes = 2; + + // For buffered tapping, the maximum amount of transmitted body that will be buffered prior to + // truncation. If truncation occurs, the :ref:`truncated + // ` field will be set. If not specified, the + // default is 1KiB. + google.protobuf.UInt32Value max_buffered_tx_bytes = 3; + + // Indicates whether taps produce a single buffered message per tap, or multiple streamed + // messages per tap in the emitted :ref:`TraceWrapper + // ` messages. Note that streamed tapping does not + // mean that no buffering takes place. Buffering may be required if data is processed before a + // match can be determined. See the HTTP tap filter :ref:`streaming + // ` documentation for more information. + bool streaming = 4; +} + +// Tap output sink configuration. +message OutputSink { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.OutputSink"; + + // Output format. All output is in the form of one or more :ref:`TraceWrapper + // ` messages. This enumeration indicates + // how those messages are written. Note that not all sinks support all output formats. See + // individual sink documentation for more information. + enum Format { + // Each message will be written as JSON. Any :ref:`body ` + // data will be present in the :ref:`as_bytes + // ` field. This means that body data will be + // base64 encoded as per the `proto3 JSON mappings + // `_. + JSON_BODY_AS_BYTES = 0; + + // Each message will be written as JSON. Any :ref:`body ` + // data will be present in the :ref:`as_string + // ` field. This means that body data will be + // string encoded as per the `proto3 JSON mappings + // `_. This format type is + // useful when it is known that that body is human readable (e.g., JSON over HTTP) and the + // user wishes to view it directly without being forced to base64 decode the body. + JSON_BODY_AS_STRING = 1; + + // Binary proto format. Note that binary proto is not self-delimiting. If a sink writes + // multiple binary messages without any length information the data stream will not be + // useful. However, for certain sinks that are self-delimiting (e.g., one message per file) + // this output format makes consumption simpler. + PROTO_BINARY = 2; + + // Messages are written as a sequence tuples, where each tuple is the message length encoded + // as a `protobuf 32-bit varint + // `_ + // followed by the binary message. The messages can be read back using the language specific + // protobuf coded stream implementation to obtain the message length and the message. + PROTO_BINARY_LENGTH_DELIMITED = 3; + + // Text proto format. + PROTO_TEXT = 4; + } + + // Sink output format. + Format format = 1 [(validate.rules).enum = {defined_only: true}]; + + oneof output_sink_type { + option (validate.required) = true; + + // Tap output will be streamed out the :http:post:`/tap` admin endpoint. + // + // .. attention:: + // + // It is only allowed to specify the streaming admin output sink if the tap is being + // configured from the :http:post:`/tap` admin endpoint. Thus, if an extension has + // been configured to receive tap configuration from some other source (e.g., static + // file, XDS, etc.) configuring the streaming admin output type will fail. + StreamingAdminSink streaming_admin = 2; + + // Tap output will be written to a file per tap sink. + FilePerTapSink file_per_tap = 3; + + // [#not-implemented-hide:] + // GrpcService to stream data to. The format argument must be PROTO_BINARY. + // [#comment: TODO(samflattery): remove cleanup in uber_per_filter.cc once implemented] + StreamingGrpcSink streaming_grpc = 4; + } +} + +// Streaming admin sink configuration. +message StreamingAdminSink { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.tap.v3.StreamingAdminSink"; +} + +// The file per tap sink outputs a discrete file for every tapped stream. +message FilePerTapSink { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.FilePerTapSink"; + + // Path prefix. The output file will be of the form _.pb, where is an + // identifier distinguishing the recorded trace for stream instances (the Envoy + // connection ID, HTTP stream ID, etc.). + string path_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; +} + +// [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC +// server. +message StreamingGrpcSink { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.tap.v3.StreamingGrpcSink"; + + // Opaque identifier, that will be sent back to the streaming grpc server. + string tap_id = 1; + + // The gRPC server that hosts the Tap Sink Service. + core.v4alpha.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; +} diff --git a/api/envoy/config/wasm/v2alpha/wasm.proto b/api/envoy/config/wasm/v2alpha/wasm.proto deleted file mode 100644 index b8f050a23d2b0..0000000000000 --- a/api/envoy/config/wasm/v2alpha/wasm.proto +++ /dev/null @@ -1,83 +0,0 @@ -syntax = "proto3"; - -package envoy.config.wasm.v2alpha; - -import "envoy/api/v2/core/base.proto"; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.wasm.v2alpha"; -option java_outer_classname = "WasmProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.wasm.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Wasm service] - -// Configuration for a Wasm VM. -// [#next-free-field: 6] -// [#not-implemented-hide:] pending implementation. -message VmConfig { - // An ID which will be used along with a hash of the wasm code (or the name of the registered Null - // VM plugin) to determine which VM will be used for the plugin. All plugins which use the same - // *vm_id* and code will use the same VM. May be left blank. Sharing a VM between plugins can - // reduce memory utilization and make sharing of data easier which may have security implications. - // See ref: "TODO: add ref" for details. - string vm_id = 1; - - // The Wasm runtime type (either "v8" or "null" for code compiled into Envoy). - string runtime = 2 [(validate.rules).string = {min_bytes: 1}]; - - // The Wasm code that Envoy will execute. - api.v2.core.AsyncDataSource code = 3; - - // The Wasm configuration used in initialization of a new VM (proxy_on_start). - google.protobuf.Any configuration = 4; - - // Allow the wasm file to include pre-compiled code on VMs which support it. - // Warning: this should only be enable for trusted sources as the precompiled code is not - // verified. - bool allow_precompiled = 5; -} - -// Base Configuration for Wasm Plugins e.g. filters and services. -// [#next-free-field: 6] -// [#not-implemented-hide:] pending implementation. -message PluginConfig { - // A unique name for a filters/services in a VM for use in identifying the filter/service if - // multiple filters/services are handled by the same *vm_id* and *group_name* and for - // logging/debugging. - string name = 1; - - // A unique ID for a set of filters/services in a VM which will share a RootContext and Contexts - // if applicable (e.g. an Wasm HttpFilter and an Wasm AccessLog). If left blank, all - // filters/services with a blank group_name with the same *vm_id* will share Context(s). - string group_name = 2; - - // Configuration for finding or starting VM. - oneof vm_config { - VmConfig inline_vm_config = 3; - // In the future add referential VM configurations. - } - - // Filter/service configuration used to configure or reconfigure a plugin - // (proxy_on_configuration). - google.protobuf.Any configuration = 5; -} - -// WasmService is configured as a built-in *envoy.wasm_service* :ref:`ServiceConfig -// `. This opaque configuration will be used to -// create a Wasm Service. -// [#not-implemented-hide:] pending implementation. -message WasmService { - // General plugin configuration. - PluginConfig config = 1; - - // If true, create a single VM rather than creating one VM per worker. Such a singleton can - // not be used with filters. - bool singleton = 2; -} diff --git a/api/envoy/data/accesslog/v3/accesslog.proto b/api/envoy/data/accesslog/v3/accesslog.proto index 374569d937f28..c16b5be1ff0ed 100644 --- a/api/envoy/data/accesslog/v3/accesslog.proto +++ b/api/envoy/data/accesslog/v3/accesslog.proto @@ -186,7 +186,7 @@ message AccessLogCommon { } // Flags indicating occurrences during request/response processing. -// [#next-free-field: 20] +// [#next-free-field: 23] message ResponseFlags { option (udpa.annotations.versioning).previous_message_type = "envoy.data.accesslog.v2.ResponseFlags"; @@ -263,6 +263,15 @@ message ResponseFlags { // Indicates there was an HTTP protocol error on the downstream request. bool downstream_protocol_error = 19; + + // Indicates there was a max stream duration reached on the upstream request. + bool upstream_max_stream_duration_reached = 20; + + // Indicates the response was served from a cache filter. + bool response_from_cache_filter = 21; + + // Indicates that a filter configuration is not available. + bool no_filter_config_found = 22; } // Properties of a negotiated TLS connection. diff --git a/api/envoy/data/dns/v3/dns_table.proto b/api/envoy/data/dns/v3/dns_table.proto index a6457e118672d..fd68847b892f1 100644 --- a/api/envoy/data/dns/v3/dns_table.proto +++ b/api/envoy/data/dns/v3/dns_table.proto @@ -28,22 +28,19 @@ message DnsTable { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v2alpha.DnsTable.AddressList"; - // This field contains a well formed IP address that is returned - // in the answer for a name query. The address field can be an - // IPv4 or IPv6 address. Address family detection is done automatically - // when Envoy parses the string. Since this field is repeated, - // Envoy will return one randomly chosen entry from this list in the - // DNS response. The random index will vary per query so that we prevent - // clients pinning on a single address for a configured domain + // This field contains a well formed IP address that is returned in the answer for a + // name query. The address field can be an IPv4 or IPv6 address. Address family + // detection is done automatically when Envoy parses the string. Since this field is + // repeated, Envoy will return as many entries from this list in the DNS response while + // keeping the response under 512 bytes repeated string address = 1 [(validate.rules).repeated = { min_items: 1 items {string {min_len: 3}} }]; } - // This message type is extensible and can contain a list of addresses - // or dictate some other method for resolving the addresses for an - // endpoint + // This message type is extensible and can contain a list of addresses, clusters or + // dictate a different method for resolving the addresses for an endpoint message DnsEndpoint { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v2alpha.DnsTable.DnsEndpoint"; @@ -52,6 +49,8 @@ message DnsTable { option (validate.required) = true; AddressList address_list = 1; + + string cluster_name = 2; } } @@ -59,27 +58,25 @@ message DnsTable { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v2alpha.DnsTable.DnsVirtualDomain"; - // The domain name for which Envoy will respond to query requests - string name = 1 [(validate.rules).string = {min_len: 2 well_known_regex: HTTP_HEADER_NAME}]; + // A domain name for which Envoy will respond to query requests + string name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; - // The configuration containing the method to determine the address - // of this endpoint + // The configuration containing the method to determine the address of this endpoint DnsEndpoint endpoint = 2; - // Sets the TTL in dns answers from Envoy returned to the client - google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gt {}}]; + // Sets the TTL in DNS answers from Envoy returned to the client. The default TTL is 300s + google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gte {seconds: 30}}]; } - // Control how many times envoy makes an attempt to forward a query to - // an external server - uint32 external_retry_count = 1; + // Control how many times Envoy makes an attempt to forward a query to an external DNS server + uint32 external_retry_count = 1 [(validate.rules).uint32 = {lte: 3}]; - // Fully qualified domain names for which Envoy will respond to queries - repeated DnsVirtualDomain virtual_domains = 2 [(validate.rules).repeated = {min_items: 1}]; + // Fully qualified domain names for which Envoy will respond to DNS queries. By leaving this + // list empty, Envoy will forward all queries to external resolvers + repeated DnsVirtualDomain virtual_domains = 2; - // This field serves to help Envoy determine whether it can authoritatively - // answer a query for a name matching a suffix in this list. If the query - // name does not match a suffix in this list, Envoy will forward - // the query to an upstream DNS server + // This field serves to help Envoy determine whether it can authoritatively answer a query + // for a name matching a suffix in this list. If the query name does not match a suffix in + // this list, Envoy will forward the query to an upstream DNS server repeated type.matcher.v3.StringMatcher known_suffixes = 3; } diff --git a/api/envoy/data/dns/v4alpha/BUILD b/api/envoy/data/dns/v4alpha/BUILD new file mode 100644 index 0000000000000..bc8958ceab0bf --- /dev/null +++ b/api/envoy/data/dns/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/data/dns/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/data/dns/v4alpha/dns_table.proto b/api/envoy/data/dns/v4alpha/dns_table.proto new file mode 100644 index 0000000000000..22fe377281ddf --- /dev/null +++ b/api/envoy/data/dns/v4alpha/dns_table.proto @@ -0,0 +1,82 @@ +syntax = "proto3"; + +package envoy.data.dns.v4alpha; + +import "envoy/type/matcher/v4alpha/string.proto"; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.data.dns.v4alpha"; +option java_outer_classname = "DnsTableProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: DNS Filter Table Data] +// :ref:`DNS Filter config overview `. + +// This message contains the configuration for the DNS Filter if populated +// from the control plane +message DnsTable { + option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v3.DnsTable"; + + // This message contains a list of IP addresses returned for a query for a known name + message AddressList { + option (udpa.annotations.versioning).previous_message_type = + "envoy.data.dns.v3.DnsTable.AddressList"; + + // This field contains a well formed IP address that is returned in the answer for a + // name query. The address field can be an IPv4 or IPv6 address. Address family + // detection is done automatically when Envoy parses the string. Since this field is + // repeated, Envoy will return as many entries from this list in the DNS response while + // keeping the response under 512 bytes + repeated string address = 1 [(validate.rules).repeated = { + min_items: 1 + items {string {min_len: 3}} + }]; + } + + // This message type is extensible and can contain a list of addresses, clusters or + // dictate a different method for resolving the addresses for an endpoint + message DnsEndpoint { + option (udpa.annotations.versioning).previous_message_type = + "envoy.data.dns.v3.DnsTable.DnsEndpoint"; + + oneof endpoint_config { + option (validate.required) = true; + + AddressList address_list = 1; + + string cluster_name = 2; + } + } + + message DnsVirtualDomain { + option (udpa.annotations.versioning).previous_message_type = + "envoy.data.dns.v3.DnsTable.DnsVirtualDomain"; + + // A domain name for which Envoy will respond to query requests + string name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; + + // The configuration containing the method to determine the address of this endpoint + DnsEndpoint endpoint = 2; + + // Sets the TTL in DNS answers from Envoy returned to the client. The default TTL is 300s + google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gte {seconds: 30}}]; + } + + // Control how many times Envoy makes an attempt to forward a query to an external DNS server + uint32 external_retry_count = 1 [(validate.rules).uint32 = {lte: 3}]; + + // Fully qualified domain names for which Envoy will respond to DNS queries. By leaving this + // list empty, Envoy will forward all queries to external resolvers + repeated DnsVirtualDomain virtual_domains = 2; + + // This field serves to help Envoy determine whether it can authoritatively answer a query + // for a name matching a suffix in this list. If the query name does not match a suffix in + // this list, Envoy will forward the query to an upstream DNS server + repeated type.matcher.v4alpha.StringMatcher known_suffixes = 3; +} diff --git a/api/envoy/extensions/access_loggers/file/v3/BUILD b/api/envoy/extensions/access_loggers/file/v3/BUILD index db752e857c62e..3edacd3aafea1 100644 --- a/api/envoy/extensions/access_loggers/file/v3/BUILD +++ b/api/envoy/extensions/access_loggers/file/v3/BUILD @@ -7,6 +7,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/accesslog/v2:pkg", + "//envoy/config/core/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/extensions/access_loggers/file/v3/file.proto b/api/envoy/extensions/access_loggers/file/v3/file.proto index f3c9c0a11612f..de33623c207f9 100644 --- a/api/envoy/extensions/access_loggers/file/v3/file.proto +++ b/api/envoy/extensions/access_loggers/file/v3/file.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.extensions.access_loggers.file.v3; +import "envoy/config/core/v3/substitution_format_string.proto"; + import "google/protobuf/struct.proto"; import "udpa/annotations/status.proto"; @@ -19,6 +21,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Custom configuration for an :ref:`AccessLog ` // that writes log entries directly to a file. Configures the built-in *envoy.access_loggers.file* // AccessLog. +// [#next-free-field: 6] message FileAccessLog { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v2.FileAccessLog"; @@ -30,16 +33,27 @@ message FileAccessLog { // Access log :ref:`format string`. // Envoy supports :ref:`custom access log formats ` as well as a // :ref:`default format `. - string format = 2; + // This field is deprecated. + // Please use :ref:`log_format `. + string format = 2 [deprecated = true]; // Access log :ref:`format dictionary`. All values // are rendered as strings. - google.protobuf.Struct json_format = 3; + // This field is deprecated. + // Please use :ref:`log_format `. + google.protobuf.Struct json_format = 3 [deprecated = true]; // Access log :ref:`format dictionary`. Values are // rendered as strings, numbers, or boolean values as appropriate. Nested JSON objects may // be produced by some command operators (e.g.FILTER_STATE or DYNAMIC_METADATA). See the // documentation for a specific command operator for details. - google.protobuf.Struct typed_json_format = 4; + // This field is deprecated. + // Please use :ref:`log_format `. + google.protobuf.Struct typed_json_format = 4 [deprecated = true]; + + // Configuration to form access log data and format. + // If not specified, use :ref:`default format `. + config.core.v3.SubstitutionFormatString log_format = 5 + [(validate.rules).message = {required: true}]; } } diff --git a/api/envoy/extensions/access_loggers/file/v4alpha/BUILD b/api/envoy/extensions/access_loggers/file/v4alpha/BUILD new file mode 100644 index 0000000000000..ba8c3042328bb --- /dev/null +++ b/api/envoy/extensions/access_loggers/file/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/extensions/access_loggers/file/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/access_loggers/file/v4alpha/file.proto b/api/envoy/extensions/access_loggers/file/v4alpha/file.proto new file mode 100644 index 0000000000000..fa2ec9a504952 --- /dev/null +++ b/api/envoy/extensions/access_loggers/file/v4alpha/file.proto @@ -0,0 +1,42 @@ +syntax = "proto3"; + +package envoy.extensions.access_loggers.file.v4alpha; + +import "envoy/config/core/v4alpha/substitution_format_string.proto"; + +import "google/protobuf/struct.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.access_loggers.file.v4alpha"; +option java_outer_classname = "FileProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: File access log] +// [#extension: envoy.access_loggers.file] + +// Custom configuration for an :ref:`AccessLog ` +// that writes log entries directly to a file. Configures the built-in *envoy.access_loggers.file* +// AccessLog. +// [#next-free-field: 6] +message FileAccessLog { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.access_loggers.file.v3.FileAccessLog"; + + reserved 2, 3, 4; + + reserved "format", "json_format", "typed_json_format"; + + // A path to a local file to which to write the access log entries. + string path = 1 [(validate.rules).string = {min_bytes: 1}]; + + oneof access_log_format { + // Configuration to form access log data and format. + // If not specified, use :ref:`default format `. + config.core.v4alpha.SubstitutionFormatString log_format = 5 + [(validate.rules).message = {required: true}]; + } +} diff --git a/api/envoy/extensions/access_loggers/grpc/v3/als.proto b/api/envoy/extensions/access_loggers/grpc/v3/als.proto index 3cc154416627e..4996a877a9c6a 100644 --- a/api/envoy/extensions/access_loggers/grpc/v3/als.proto +++ b/api/envoy/extensions/access_loggers/grpc/v3/als.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package envoy.extensions.access_loggers.grpc.v3; +import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "google/protobuf/duration.proto"; @@ -53,7 +54,7 @@ message TcpGrpcAccessLogConfig { } // Common configuration for gRPC access logs. -// [#next-free-field: 6] +// [#next-free-field: 7] message CommonGrpcAccessLogConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v2.CommonGrpcAccessLogConfig"; @@ -66,6 +67,11 @@ message CommonGrpcAccessLogConfig { // The gRPC service for the access log service. config.core.v3.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; + // API version for access logs service transport protocol. This describes the access logs service + // gRPC endpoint and version of messages used on the wire. + config.core.v3.ApiVersion transport_api_version = 6 + [(validate.rules).enum = {defined_only: true}]; + // Interval for flushing access logs to the gRPC stream. Logger will flush requests every time // this interval is elapsed, or when batch size limit is hit, whichever comes first. Defaults to // 1 second. diff --git a/generated_api_shadow/envoy/config/filter/udp/dns_filter/v2alpha/BUILD b/api/envoy/extensions/access_loggers/wasm/v3/BUILD similarity index 77% rename from generated_api_shadow/envoy/config/filter/udp/dns_filter/v2alpha/BUILD rename to api/envoy/extensions/access_loggers/wasm/v3/BUILD index c6f01577c8283..8bad369e35113 100644 --- a/generated_api_shadow/envoy/config/filter/udp/dns_filter/v2alpha/BUILD +++ b/api/envoy/extensions/access_loggers/wasm/v3/BUILD @@ -6,8 +6,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/data/dns/v2alpha:pkg", + "//envoy/extensions/wasm/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/extensions/access_loggers/wasm/v3/wasm.proto b/api/envoy/extensions/access_loggers/wasm/v3/wasm.proto new file mode 100644 index 0000000000000..cd9db5906436f --- /dev/null +++ b/api/envoy/extensions/access_loggers/wasm/v3/wasm.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package envoy.extensions.access_loggers.wasm.v3; + +import "envoy/extensions/wasm/v3/wasm.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.access_loggers.wasm.v3"; +option java_outer_classname = "WasmProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [[#not-implemented-hide:] +// Custom configuration for an :ref:`AccessLog ` +// that calls into a WASM VM. +message WasmAccessLog { + envoy.extensions.wasm.v3.PluginConfig config = 1; +} diff --git a/api/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto b/api/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto index 6f100d9dbb7e9..869e8c42caba1 100644 --- a/api/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto +++ b/api/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto @@ -27,4 +27,9 @@ message ClusterConfig { // `. common.dynamic_forward_proxy.v3.DnsCacheConfig dns_cache_config = 1 [(validate.rules).message = {required: true}]; + + // If true allow the cluster configuration to disable the auto_sni and auto_san_validation options + // in the :ref:`cluster's upstream_http_protocol_options + // ` + bool allow_insecure_cluster_options = 2; } diff --git a/api/envoy/extensions/clusters/redis/v3/redis_cluster.proto b/api/envoy/extensions/clusters/redis/v3/redis_cluster.proto index cf01359e55abb..afc19777edf2b 100644 --- a/api/envoy/extensions/clusters/redis/v3/redis_cluster.proto +++ b/api/envoy/extensions/clusters/redis/v3/redis_cluster.proto @@ -19,7 +19,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // of :ref:`Envoy's support for Redis Cluster `. // // Redis Cluster is an extension of Redis which supports sharding and high availability (where a -// shard that loses its master fails over to a replica, and designates it as the new master). +// shard that loses its primary fails over to a replica, and designates it as the new primary). // However, as there is no unified frontend or proxy service in front of Redis Cluster, the client // (in this case Envoy) must locally maintain the state of the Redis Cluster, specifically the // topology. A random node in the cluster is queried for the topology using the `CLUSTER SLOTS diff --git a/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto b/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto index 7c72af35af33e..79cd583486ac9 100644 --- a/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto +++ b/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto @@ -18,9 +18,16 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Dynamic forward proxy common configuration] +// Configuration of circuit breakers for resolver. +message DnsCacheCircuitBreakers { + // The maximum number of pending requests that Envoy will allow to the + // resolver. If not specified, the default is 1024. + google.protobuf.UInt32Value max_pending_requests = 1; +} + // Configuration for the dynamic forward proxy DNS cache. See the :ref:`architecture overview // ` for more information. -// [#next-free-field: 7] +// [#next-free-field: 9] message DnsCacheConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.common.dynamic_forward_proxy.v2alpha.DnsCacheConfig"; @@ -83,4 +90,13 @@ message DnsCacheConfig { // this is used as the cache's DNS refresh rate when DNS requests are failing. If this setting is // not specified, the failure refresh rate defaults to the dns_refresh_rate. config.cluster.v3.Cluster.RefreshRate dns_failure_refresh_rate = 6; + + // The config of circuit breakers for resolver. It provides a configurable threshold. + // If `envoy.reloadable_features.enable_dns_cache_circuit_breakers` is enabled, + // envoy will use dns cache circuit breakers with default settings even if this value is not set. + DnsCacheCircuitBreakers dns_cache_circuit_breaker = 7; + + // [#next-major-version: Reconcile DNS options in a single message.] + // Always use TCP queries instead of UDP queries for DNS lookups. + bool use_tcp_for_dns_lookups = 8; } diff --git a/api/envoy/extensions/common/ratelimit/v3/BUILD b/api/envoy/extensions/common/ratelimit/v3/BUILD index ee90746aa30a5..256b1e65eda58 100644 --- a/api/envoy/extensions/common/ratelimit/v3/BUILD +++ b/api/envoy/extensions/common/ratelimit/v3/BUILD @@ -7,6 +7,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/ratelimit:pkg", + "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/extensions/common/ratelimit/v3/ratelimit.proto b/api/envoy/extensions/common/ratelimit/v3/ratelimit.proto index 187ae3f229c46..9255deb4b64dc 100644 --- a/api/envoy/extensions/common/ratelimit/v3/ratelimit.proto +++ b/api/envoy/extensions/common/ratelimit/v3/ratelimit.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.extensions.common.ratelimit.v3; +import "envoy/type/v3/ratelimit_unit.proto"; + import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -54,6 +56,10 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // // The idea behind the API is that (1)/(2)/(3) and (4)/(5) can be sent in 1 request if desired. // This enables building complex application scenarios with a generic backend. +// +// Optionally the descriptor can contain a limit override under a "limit" key, that specifies +// the number of requests per unit to use instead of the number configured in the +// rate limiting service. message RateLimitDescriptor { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.ratelimit.RateLimitDescriptor"; @@ -69,6 +75,20 @@ message RateLimitDescriptor { string value = 2 [(validate.rules).string = {min_bytes: 1}]; } + // Override rate limit to apply to this descriptor instead of the limit + // configured in the rate limit service. See :ref:`rate limit override + // ` for more information. + message RateLimitOverride { + // The number of requests per unit of time. + uint32 requests_per_unit = 1; + + // The unit of time. + type.v3.RateLimitUnit unit = 2 [(validate.rules).enum = {defined_only: true}]; + } + // Descriptor entries. repeated Entry entries = 1 [(validate.rules).repeated = {min_items: 1}]; + + // Optional rate limit override to supply to the ratelimit service. + RateLimitOverride limit = 2; } diff --git a/api/envoy/extensions/common/tap/v3/BUILD b/api/envoy/extensions/common/tap/v3/BUILD index 64688f5bb438e..eb16b73a21112 100644 --- a/api/envoy/extensions/common/tap/v3/BUILD +++ b/api/envoy/extensions/common/tap/v3/BUILD @@ -10,5 +10,6 @@ api_proto_package( "//envoy/config/core/v3:pkg", "//envoy/config/tap/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/api/envoy/extensions/common/tap/v3/common.proto b/api/envoy/extensions/common/tap/v3/common.proto index 46a25b164d67a..68e80dad76b4b 100644 --- a/api/envoy/extensions/common/tap/v3/common.proto +++ b/api/envoy/extensions/common/tap/v3/common.proto @@ -5,6 +5,9 @@ package envoy.extensions.common.tap.v3; import "envoy/config/core/v3/config_source.proto"; import "envoy/config/tap/v3/common.proto"; +import "udpa/core/v1/resource_locator.proto"; + +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -30,7 +33,12 @@ message CommonExtensionConfig { config.core.v3.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; // Tap config to request from XDS server. - string name = 2 [(validate.rules).string = {min_bytes: 1}]; + string name = 2 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + + // Resource locator for TAP. This is mutually exclusive to *name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator tap_resource_locator = 3 + [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; } oneof config_type { diff --git a/api/envoy/extensions/common/tap/v4alpha/BUILD b/api/envoy/extensions/common/tap/v4alpha/BUILD index d1fe49142a8e2..351e64d868455 100644 --- a/api/envoy/extensions/common/tap/v4alpha/BUILD +++ b/api/envoy/extensions/common/tap/v4alpha/BUILD @@ -7,8 +7,9 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v4alpha:pkg", - "//envoy/config/tap/v3:pkg", + "//envoy/config/tap/v4alpha:pkg", "//envoy/extensions/common/tap/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/api/envoy/extensions/common/tap/v4alpha/common.proto b/api/envoy/extensions/common/tap/v4alpha/common.proto index 63de14a3d6f63..536f13d049c34 100644 --- a/api/envoy/extensions/common/tap/v4alpha/common.proto +++ b/api/envoy/extensions/common/tap/v4alpha/common.proto @@ -3,7 +3,9 @@ syntax = "proto3"; package envoy.extensions.common.tap.v4alpha; import "envoy/config/core/v4alpha/config_source.proto"; -import "envoy/config/tap/v3/common.proto"; +import "envoy/config/tap/v4alpha/common.proto"; + +import "udpa/core/v1/resource_locator.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -30,8 +32,14 @@ message CommonExtensionConfig { config.core.v4alpha.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; - // Tap config to request from XDS server. - string name = 2 [(validate.rules).string = {min_bytes: 1}]; + oneof name_specifier { + // Tap config to request from XDS server. + string name = 2; + + // Resource locator for TAP. This is mutually exclusive to *name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator tap_resource_locator = 3; + } } oneof config_type { @@ -42,7 +50,7 @@ message CommonExtensionConfig { // If specified, the tap filter will be configured via a static configuration that cannot be // changed. - config.tap.v3.TapConfig static_config = 2; + config.tap.v4alpha.TapConfig static_config = 2; // [#not-implemented-hide:] Configuration to use for TapDS updates for the filter. TapDSConfig tapds_config = 3; diff --git a/api/envoy/extensions/compression/gzip/compressor/v3/BUILD b/api/envoy/extensions/compression/gzip/compressor/v3/BUILD new file mode 100644 index 0000000000000..ef3541ebcb1df --- /dev/null +++ b/api/envoy/extensions/compression/gzip/compressor/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/extensions/compression/gzip/compressor/v3/gzip.proto b/api/envoy/extensions/compression/gzip/compressor/v3/gzip.proto new file mode 100644 index 0000000000000..d4d60eaa43ee2 --- /dev/null +++ b/api/envoy/extensions/compression/gzip/compressor/v3/gzip.proto @@ -0,0 +1,79 @@ +syntax = "proto3"; + +package envoy.extensions.compression.gzip.compressor.v3; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.compression.gzip.compressor.v3"; +option java_outer_classname = "GzipProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Gzip Compressor] +// [#extension: envoy.compression.gzip.compressor] + +// [#next-free-field: 6] +message Gzip { + // All the values of this enumeration translate directly to zlib's compression strategies. + // For more information about each strategy, please refer to zlib manual. + enum CompressionStrategy { + DEFAULT_STRATEGY = 0; + FILTERED = 1; + HUFFMAN_ONLY = 2; + RLE = 3; + FIXED = 4; + } + + enum CompressionLevel { + option allow_alias = true; + + DEFAULT_COMPRESSION = 0; + BEST_SPEED = 1; + COMPRESSION_LEVEL_1 = 1; + COMPRESSION_LEVEL_2 = 2; + COMPRESSION_LEVEL_3 = 3; + COMPRESSION_LEVEL_4 = 4; + COMPRESSION_LEVEL_5 = 5; + COMPRESSION_LEVEL_6 = 6; + COMPRESSION_LEVEL_7 = 7; + COMPRESSION_LEVEL_8 = 8; + COMPRESSION_LEVEL_9 = 9; + BEST_COMPRESSION = 9; + } + + // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values + // use more memory, but are faster and produce better compression results. The default value is 5. + google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {lte: 9 gte: 1}]; + + // A value used for selecting the zlib compression level. This setting will affect speed and + // amount of compression applied to the content. "BEST_COMPRESSION" provides higher compression + // at the cost of higher latency and is equal to "COMPRESSION_LEVEL_9". "BEST_SPEED" provides + // lower compression with minimum impact on response time, the same as "COMPRESSION_LEVEL_1". + // "DEFAULT_COMPRESSION" provides an optimal result between speed and compression. According + // to zlib's manual this level gives the same result as "COMPRESSION_LEVEL_6". + // This field will be set to "DEFAULT_COMPRESSION" if not specified. + CompressionLevel compression_level = 2 [(validate.rules).enum = {defined_only: true}]; + + // A value used for selecting the zlib compression strategy which is directly related to the + // characteristics of the content. Most of the time "DEFAULT_STRATEGY" will be the best choice, + // which is also the default value for the parameter, though there are situations when + // changing this parameter might produce better results. For example, run-length encoding (RLE) + // is typically used when the content is known for having sequences which same data occurs many + // consecutive times. For more information about each strategy, please refer to zlib manual. + CompressionStrategy compression_strategy = 3 [(validate.rules).enum = {defined_only: true}]; + + // Value from 9 to 15 that represents the base two logarithmic of the compressor's window size. + // Larger window results in better compression at the expense of memory usage. The default is 12 + // which will produce a 4096 bytes window. For more details about this parameter, please refer to + // zlib manual > deflateInit2. + google.protobuf.UInt32Value window_bits = 4 [(validate.rules).uint32 = {lte: 15 gte: 9}]; + + // Value for Zlib's next output buffer. If not set, defaults to 4096. + // See https://www.zlib.net/manual.html for more details. Also see + // https://github.com/envoyproxy/envoy/issues/8448 for context on this filter's performance. + google.protobuf.UInt32Value chunk_size = 5 [(validate.rules).uint32 = {lte: 65536 gte: 4096}]; +} diff --git a/api/envoy/extensions/compression/gzip/decompressor/v3/BUILD b/api/envoy/extensions/compression/gzip/decompressor/v3/BUILD new file mode 100644 index 0000000000000..ef3541ebcb1df --- /dev/null +++ b/api/envoy/extensions/compression/gzip/decompressor/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto b/api/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto new file mode 100644 index 0000000000000..0ab0d947bd01d --- /dev/null +++ b/api/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto @@ -0,0 +1,31 @@ +syntax = "proto3"; + +package envoy.extensions.compression.gzip.decompressor.v3; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.compression.gzip.decompressor.v3"; +option java_outer_classname = "GzipProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Gzip Decompressor] +// [#extension: envoy.compression.gzip.decompressor] + +message Gzip { + // Value from 9 to 15 that represents the base two logarithmic of the decompressor's window size. + // The decompression window size needs to be equal or larger than the compression window size. + // The default is 12 to match the default in the + // :ref:`gzip compressor `. + // For more details about this parameter, please refer to `zlib manual `_ > inflateInit2. + google.protobuf.UInt32Value window_bits = 1 [(validate.rules).uint32 = {lte: 15 gte: 9}]; + + // Value for zlib's decompressor output buffer. If not set, defaults to 4096. + // See https://www.zlib.net/manual.html for more details. + google.protobuf.UInt32Value chunk_size = 2 [(validate.rules).uint32 = {lte: 65536 gte: 4096}]; +} diff --git a/api/envoy/extensions/filter/udp/dns_filter/v3alpha/dns_filter.proto b/api/envoy/extensions/filter/udp/dns_filter/v3alpha/dns_filter.proto deleted file mode 100644 index 38a8872d323e1..0000000000000 --- a/api/envoy/extensions/filter/udp/dns_filter/v3alpha/dns_filter.proto +++ /dev/null @@ -1,52 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filter.udp.dns_filter.v3alpha; - -import "envoy/config/core/v3/base.proto"; -import "envoy/data/dns/v3/dns_table.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filter.udp.dns_filter.v3alpha"; -option java_outer_classname = "DnsFilterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: DNS Filter] -// DNS Filter :ref:`configuration overview `. -// [#extension: envoy.filters.udp_listener.dns_filter] - -// Configuration for the DNS filter. -message DnsFilterConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.udp.dns_filter.v2alpha.DnsFilterConfig"; - - // This message contains the configuration for the Dns Filter operating - // in a server context. This message will contain the virtual hosts and - // associated addresses with which Envoy will respond to queries - message ServerContextConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.udp.dns_filter.v2alpha.DnsFilterConfig.ServerContextConfig"; - - oneof config_source { - option (validate.required) = true; - - // Load the configuration specified from the control plane - data.dns.v3.DnsTable inline_dns_table = 1; - - // Seed the filter configuration from an external path. This source - // is a yaml formatted file that contains the DnsTable driving Envoy's - // responses to DNS queries - config.core.v3.DataSource external_dns_table = 2; - } - } - - // The stat prefix used when emitting DNS filter statistics - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // Server context configuration - ServerContextConfig server_config = 2; -} diff --git a/api/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto b/api/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto index 3d2ef3e96d968..8dd851f4020a5 100644 --- a/api/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto +++ b/api/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto @@ -5,7 +5,6 @@ package envoy.extensions.filters.http.adaptive_concurrency.v3; import "envoy/config/core/v3/base.proto"; import "envoy/type/v3/percent.proto"; -import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; diff --git a/api/envoy/extensions/filters/http/admission_control/v3alpha/BUILD b/api/envoy/extensions/filters/http/admission_control/v3alpha/BUILD new file mode 100644 index 0000000000000..f139cce54af25 --- /dev/null +++ b/api/envoy/extensions/filters/http/admission_control/v3alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto b/api/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto new file mode 100644 index 0000000000000..6f01c88885f4e --- /dev/null +++ b/api/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto @@ -0,0 +1,90 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.admission_control.v3alpha; + +import "envoy/config/core/v3/base.proto"; +import "envoy/type/v3/range.proto"; + +import "google/api/annotations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; +import "google/rpc/status.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.admission_control.v3alpha"; +option java_outer_classname = "AdmissionControlProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Admission Control] +// [#extension: envoy.filters.http.admission_control] + +message AdmissionControl { + // Default method of specifying what constitutes a successful request. All status codes that + // indicate a successful request must be explicitly specified if not relying on the default + // values. + message SuccessCriteria { + message HttpCriteria { + // Status code ranges that constitute a successful request. Configurable codes are in the + // range [100, 600). + repeated type.v3.Int32Range http_success_status = 1 + [(validate.rules).repeated = {min_items: 1}]; + } + + message GrpcCriteria { + // Status codes that constitute a successful request. + // Mappings can be found at: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. + repeated uint32 grpc_success_status = 1 [(validate.rules).repeated = {min_items: 1}]; + } + + // If HTTP criteria are unspecified, all HTTP status codes below 500 are treated as successful + // responses. + // + // .. note:: + // + // The default HTTP codes considered successful by the admission controller are done so due + // to the unlikelihood that sending fewer requests would change their behavior (for example: + // redirects, unauthorized access, or bad requests won't be alleviated by sending less + // traffic). + HttpCriteria http_criteria = 1; + + // GRPC status codes to consider as request successes. If unspecified, defaults to: Ok, + // Cancelled, Unknown, InvalidArgument, NotFound, AlreadyExists, Unauthenticated, + // FailedPrecondition, OutOfRange, PermissionDenied, and Unimplemented. + // + // .. note:: + // + // The default gRPC codes that are considered successful by the admission controller are + // chosen because of the unlikelihood that sending fewer requests will change the behavior. + GrpcCriteria grpc_criteria = 2; + } + + // If set to false, the admission control filter will operate as a pass-through filter. If the + // message is unspecified, the filter will be enabled. + config.core.v3.RuntimeFeatureFlag enabled = 1; + + // Defines how a request is considered a success/failure. + oneof evaluation_criteria { + option (validate.required) = true; + + SuccessCriteria success_criteria = 2; + } + + // The sliding time window over which the success rate is calculated. The window is rounded to the + // nearest second. Defaults to 120s. + google.protobuf.Duration sampling_window = 3; + + // Rejection probability is defined by the formula:: + // + // max(0, (rq_count - aggression_coefficient * rq_success_count) / (rq_count + 1)) + // + // The coefficient dictates how aggressively the admission controller will throttle requests as + // the success rate drops. Lower values will cause throttling to kick in at higher success rates + // and result in more aggressive throttling. Any values less than 1.0, will be set to 1.0. If the + // message is unspecified, the coefficient is 2.0. + config.core.v3.RuntimeDouble aggression_coefficient = 4; +} diff --git a/api/envoy/extensions/filters/http/cache/v3alpha/cache.proto b/api/envoy/extensions/filters/http/cache/v3alpha/cache.proto index 1ff305bb0e279..f78b1d24ac2ce 100644 --- a/api/envoy/extensions/filters/http/cache/v3alpha/cache.proto +++ b/api/envoy/extensions/filters/http/cache/v3alpha/cache.proto @@ -61,7 +61,7 @@ message CacheConfig { // contents of a response, as described by // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. // - // During insertion, *allowed_vary_headers* acts as a whitelist: if a + // During insertion, *allowed_vary_headers* acts as a allowlist: if a // response's *vary* header mentions any header names that aren't in // *allowed_vary_headers*, that response will not be cached. // diff --git a/api/envoy/extensions/filters/http/cache/v4alpha/BUILD b/api/envoy/extensions/filters/http/cache/v4alpha/BUILD new file mode 100644 index 0000000000000..63033acab5cf1 --- /dev/null +++ b/api/envoy/extensions/filters/http/cache/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/route/v4alpha:pkg", + "//envoy/extensions/filters/http/cache/v3alpha:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/http/cache/v4alpha/cache.proto b/api/envoy/extensions/filters/http/cache/v4alpha/cache.proto new file mode 100644 index 0000000000000..19921edb0310b --- /dev/null +++ b/api/envoy/extensions/filters/http/cache/v4alpha/cache.proto @@ -0,0 +1,84 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.cache.v4alpha; + +import "envoy/config/route/v4alpha/route_components.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.cache.v4alpha"; +option java_outer_classname = "CacheProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: HTTP Cache Filter] +// [#extension: envoy.filters.http.cache] + +message CacheConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.cache.v3alpha.CacheConfig"; + + // [#not-implemented-hide:] + // Modifies cache key creation by restricting which parts of the URL are included. + message KeyCreatorParams { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.cache.v3alpha.CacheConfig.KeyCreatorParams"; + + // If true, exclude the URL scheme from the cache key. Set to true if your origins always + // produce the same response for http and https requests. + bool exclude_scheme = 1; + + // If true, exclude the host from the cache key. Set to true if your origins' responses don't + // ever depend on host. + bool exclude_host = 2; + + // If *query_parameters_included* is nonempty, only query parameters matched + // by one or more of its matchers are included in the cache key. Any other + // query params will not affect cache lookup. + repeated config.route.v4alpha.QueryParameterMatcher query_parameters_included = 3; + + // If *query_parameters_excluded* is nonempty, query parameters matched by one + // or more of its matchers are excluded from the cache key (even if also + // matched by *query_parameters_included*), and will not affect cache lookup. + repeated config.route.v4alpha.QueryParameterMatcher query_parameters_excluded = 4; + } + + // Config specific to the cache storage implementation. + google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}]; + + // [#not-implemented-hide:] + // + // + // List of allowed *Vary* headers. + // + // The *vary* response header holds a list of header names that affect the + // contents of a response, as described by + // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. + // + // During insertion, *allowed_vary_headers* acts as a allowlist: if a + // response's *vary* header mentions any header names that aren't in + // *allowed_vary_headers*, that response will not be cached. + // + // During lookup, *allowed_vary_headers* controls what request headers will be + // sent to the cache storage implementation. + repeated type.matcher.v4alpha.StringMatcher allowed_vary_headers = 2; + + // [#not-implemented-hide:] + // + // + // Modifies cache key creation by restricting which parts of the URL are included. + KeyCreatorParams key_creator_params = 3; + + // [#not-implemented-hide:] + // + // + // Max body size the cache filter will insert into a cache. 0 means unlimited (though the cache + // storage implementation may have its own limit beyond which it will reject insertions). + uint32 max_body_bytes = 4; +} diff --git a/api/envoy/extensions/filters/http/compressor/v3/compressor.proto b/api/envoy/extensions/filters/http/compressor/v3/compressor.proto index 0eefe55140d26..0bfa5c1860d44 100644 --- a/api/envoy/extensions/filters/http/compressor/v3/compressor.proto +++ b/api/envoy/extensions/filters/http/compressor/v3/compressor.proto @@ -3,11 +3,14 @@ syntax = "proto3"; package envoy.extensions.filters.http.compressor.v3; import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/extension.proto"; +import "google/protobuf/any.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.compressor.v3"; option java_outer_classname = "CompressorProto"; @@ -15,8 +18,10 @@ option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Compressor] +// Compressor :ref:`configuration overview `. +// [#extension: envoy.filters.http.compressor] -// [#next-free-field: 6] +// [#next-free-field: 7] message Compressor { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.compressor.v2.Compressor"; @@ -46,4 +51,10 @@ message Compressor { // Runtime flag that controls whether the filter is enabled or not. If set to false, the // filter will operate as a pass-through filter. If not specified, defaults to enabled. config.core.v3.RuntimeFeatureFlag runtime_enabled = 5; + + // A compressor library to use for compression. Currently only + // :ref:`envoy.compression.gzip.compressor` + // is included in Envoy. + // This field is ignored if used in the context of the gzip http-filter, but is mandatory otherwise. + config.core.v3.TypedExtensionConfig compressor_library = 6; } diff --git a/api/envoy/extensions/filters/http/csrf/v4alpha/BUILD b/api/envoy/extensions/filters/http/csrf/v4alpha/BUILD new file mode 100644 index 0000000000000..72211218ff525 --- /dev/null +++ b/api/envoy/extensions/filters/http/csrf/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/extensions/filters/http/csrf/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/http/csrf/v4alpha/csrf.proto b/api/envoy/extensions/filters/http/csrf/v4alpha/csrf.proto new file mode 100644 index 0000000000000..dda915a059af5 --- /dev/null +++ b/api/envoy/extensions/filters/http/csrf/v4alpha/csrf.proto @@ -0,0 +1,54 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.csrf.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.csrf.v4alpha"; +option java_outer_classname = "CsrfProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: CSRF] +// Cross-Site Request Forgery :ref:`configuration overview `. +// [#extension: envoy.filters.http.csrf] + +// CSRF filter config. +message CsrfPolicy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.csrf.v3.CsrfPolicy"; + + // Specifies the % of requests for which the CSRF filter is enabled. + // + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to get the percentage of requests to filter. + // + // .. note:: + // + // This field defaults to 100/:ref:`HUNDRED + // `. + config.core.v4alpha.RuntimeFractionalPercent filter_enabled = 1 + [(validate.rules).message = {required: true}]; + + // Specifies that CSRF policies will be evaluated and tracked, but not enforced. + // + // This is intended to be used when ``filter_enabled`` is off and will be ignored otherwise. + // + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate + // and track the request's *Origin* and *Destination* to determine if it's valid, but will not + // enforce any policies. + config.core.v4alpha.RuntimeFractionalPercent shadow_enabled = 2; + + // Specifies additional source origins that will be allowed in addition to + // the destination origin. + // + // More information on how this can be configured via runtime can be found + // :ref:`here `. + repeated type.matcher.v4alpha.StringMatcher additional_origins = 3; +} diff --git a/generated_api_shadow/envoy/config/wasm/v2alpha/BUILD b/api/envoy/extensions/filters/http/decompressor/v3/BUILD similarity index 87% rename from generated_api_shadow/envoy/config/wasm/v2alpha/BUILD rename to api/envoy/extensions/filters/http/decompressor/v3/BUILD index 69168ad0cf246..2c3dad6453b65 100644 --- a/generated_api_shadow/envoy/config/wasm/v2alpha/BUILD +++ b/api/envoy/extensions/filters/http/decompressor/v3/BUILD @@ -6,7 +6,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ - "//envoy/api/v2/core:pkg", + "//envoy/config/core/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/extensions/filters/http/decompressor/v3/decompressor.proto b/api/envoy/extensions/filters/http/decompressor/v3/decompressor.proto new file mode 100644 index 0000000000000..1e3d72766d054 --- /dev/null +++ b/api/envoy/extensions/filters/http/decompressor/v3/decompressor.proto @@ -0,0 +1,57 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.decompressor.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/extension.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.decompressor.v3"; +option java_outer_classname = "DecompressorProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Decompressor] +// [#extension: envoy.filters.http.decompressor] + +message Decompressor { + // Common configuration for filter behavior on both the request and response direction. + message CommonDirectionConfig { + // Runtime flag that controls whether the filter is enabled for decompression or not. If set to false, the + // filter will operate as a pass-through filter. If the message is unspecified, the filter will be enabled. + config.core.v3.RuntimeFeatureFlag enabled = 1; + } + + // Configuration for filter behavior on the request direction. + message RequestDirectionConfig { + CommonDirectionConfig common_config = 1; + + // If set to true, and response decompression is enabled, the filter modifies the Accept-Encoding + // request header by appending the decompressor_library's encoding. Defaults to true. + google.protobuf.BoolValue advertise_accept_encoding = 2; + } + + // Configuration for filter behavior on the response direction. + message ResponseDirectionConfig { + CommonDirectionConfig common_config = 1; + } + + // A decompressor library to use for both request and response decompression. Currently only + // :ref:`envoy.compression.gzip.compressor` + // is included in Envoy. + config.core.v3.TypedExtensionConfig decompressor_library = 1 + [(validate.rules).message = {required: true}]; + + // Configuration for request decompression. Decompression is enabled by default if left empty. + RequestDirectionConfig request_direction_config = 2; + + // Configuration for response decompression. Decompression is enabled by default if left empty. + ResponseDirectionConfig response_direction_config = 3; +} diff --git a/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto b/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto index 44673ad6ff263..d9264ca66b664 100644 --- a/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto +++ b/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.extensions.filters.http.ext_authz.v3; import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "envoy/config/core/v3/http_uri.proto"; import "envoy/type/matcher/v3/string.proto"; @@ -22,7 +23,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // External Authorization :ref:`configuration overview `. // [#extension: envoy.filters.http.ext_authz] -// [#next-free-field: 11] +// [#next-free-field: 13] message ExtAuthz { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.ext_authz.v2.ExtAuthz"; @@ -40,6 +41,11 @@ message ExtAuthz { HttpService http_service = 3; } + // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and + // version of messages used on the wire. + config.core.v3.ApiVersion transport_api_version = 12 + [(validate.rules).enum = {defined_only: true}]; + // Changes filter's behaviour on errors: // // 1. When set to true, the filter will *accept* client request even if the communication with @@ -97,6 +103,15 @@ message ExtAuthz { // If this field is not specified, the filter will be enabled for all requests. config.core.v3.RuntimeFractionalPercent filter_enabled = 9; + // Specifies whether to deny the requests, when the filter is disabled. + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to determine whether to deny request for + // filter protected path at filter disabling. If filter is disabled in + // typed_per_filter_config for the path, requests will not be denied. + // + // If this field is not specified, all requests will be allowed when disabled. + config.core.v3.RuntimeFeatureFlag deny_at_disable = 11; + // Specifies if the peer certificate is sent to the external service. // // When this field is true, Envoy will include the peer X.509 certificate, if available, in the @@ -198,6 +213,11 @@ message AuthorizationResponse { // Note that coexistent headers will be overridden. type.matcher.v3.ListStringMatcher allowed_upstream_headers = 1; + // When this :ref:`list ` is set, authorization + // response headers that have a correspondent match will be added to the client's response. Note + // that coexistent headers will be appended. + type.matcher.v3.ListStringMatcher allowed_upstream_headers_to_append = 3; + // When this :ref:`list `. is set, authorization // response headers that have a correspondent match will be added to the client's response. Note // that when this list is *not* set, all the authorization response headers, except *Authority diff --git a/api/envoy/extensions/filters/http/ext_authz/v4alpha/BUILD b/api/envoy/extensions/filters/http/ext_authz/v4alpha/BUILD new file mode 100644 index 0000000000000..9a3d8a574a9b0 --- /dev/null +++ b/api/envoy/extensions/filters/http/ext_authz/v4alpha/BUILD @@ -0,0 +1,16 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/annotations:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/extensions/filters/http/ext_authz/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto b/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto new file mode 100644 index 0000000000000..7442715a0db34 --- /dev/null +++ b/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto @@ -0,0 +1,265 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.ext_authz.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/config/core/v4alpha/grpc_service.proto"; +import "envoy/config/core/v4alpha/http_uri.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; +import "envoy/type/v3/http_status.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.ext_authz.v4alpha"; +option java_outer_classname = "ExtAuthzProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: External Authorization] +// External Authorization :ref:`configuration overview `. +// [#extension: envoy.filters.http.ext_authz] + +// [#next-free-field: 13] +message ExtAuthz { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.ext_authz.v3.ExtAuthz"; + + reserved 4; + + reserved "use_alpha"; + + // External authorization service configuration. + oneof services { + // gRPC service configuration (default timeout: 200ms). + config.core.v4alpha.GrpcService grpc_service = 1; + + // HTTP service configuration (default timeout: 200ms). + HttpService http_service = 3; + } + + // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and + // version of messages used on the wire. + config.core.v4alpha.ApiVersion transport_api_version = 12 + [(validate.rules).enum = {defined_only: true}]; + + // Changes filter's behaviour on errors: + // + // 1. When set to true, the filter will *accept* client request even if the communication with + // the authorization service has failed, or if the authorization service has returned a HTTP 5xx + // error. + // + // 2. When set to false, ext-authz will *reject* client requests and return a *Forbidden* + // response if the communication with the authorization service has failed, or if the + // authorization service has returned a HTTP 5xx error. + // + // Note that errors can be *always* tracked in the :ref:`stats + // `. + bool failure_mode_allow = 2; + + // Enables filter to buffer the client request body and send it within the authorization request. + // A ``x-envoy-auth-partial-body: false|true`` metadata header will be added to the authorization + // request message indicating if the body data is partial. + BufferSettings with_request_body = 5; + + // Clears route cache in order to allow the external authorization service to correctly affect + // routing decisions. Filter clears all cached routes when: + // + // 1. The field is set to *true*. + // + // 2. The status returned from the authorization service is a HTTP 200 or gRPC 0. + // + // 3. At least one *authorization response header* is added to the client request, or is used for + // altering another client request header. + // + bool clear_route_cache = 6; + + // Sets the HTTP status that is returned to the client when there is a network error between the + // filter and the authorization server. The default status is HTTP 403 Forbidden. + type.v3.HttpStatus status_on_error = 7; + + // Specifies a list of metadata namespaces whose values, if present, will be passed to the + // ext_authz service as an opaque *protobuf::Struct*. + // + // For example, if the *jwt_authn* filter is used and :ref:`payload_in_metadata + // ` is set, + // then the following will pass the jwt payload to the authorization server. + // + // .. code-block:: yaml + // + // metadata_context_namespaces: + // - envoy.filters.http.jwt_authn + // + repeated string metadata_context_namespaces = 8; + + // Specifies if the filter is enabled. + // + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to get the percentage of requests to filter. + // + // If this field is not specified, the filter will be enabled for all requests. + config.core.v4alpha.RuntimeFractionalPercent filter_enabled = 9; + + // Specifies whether to deny the requests, when the filter is disabled. + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to determine whether to deny request for + // filter protected path at filter disabling. If filter is disabled in + // typed_per_filter_config for the path, requests will not be denied. + // + // If this field is not specified, all requests will be allowed when disabled. + config.core.v4alpha.RuntimeFeatureFlag deny_at_disable = 11; + + // Specifies if the peer certificate is sent to the external service. + // + // When this field is true, Envoy will include the peer X.509 certificate, if available, in the + // :ref:`certificate`. + bool include_peer_certificate = 10; +} + +// Configuration for buffering the request data. +message BufferSettings { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.ext_authz.v3.BufferSettings"; + + // Sets the maximum size of a message body that the filter will hold in memory. Envoy will return + // *HTTP 413* and will *not* initiate the authorization process when buffer reaches the number + // set in this field. Note that this setting will have precedence over :ref:`failure_mode_allow + // `. + uint32 max_request_bytes = 1 [(validate.rules).uint32 = {gt: 0}]; + + // When this field is true, Envoy will buffer the message until *max_request_bytes* is reached. + // The authorization request will be dispatched and no 413 HTTP error will be returned by the + // filter. + bool allow_partial_message = 2; +} + +// HttpService is used for raw HTTP communication between the filter and the authorization service. +// When configured, the filter will parse the client request and use these attributes to call the +// authorization server. Depending on the response, the filter may reject or accept the client +// request. Note that in any of these events, metadata can be added, removed or overridden by the +// filter: +// +// *On authorization request*, a list of allowed request headers may be supplied. See +// :ref:`allowed_headers +// ` +// for details. Additional headers metadata may be added to the authorization request. See +// :ref:`headers_to_add +// ` for +// details. +// +// On authorization response status HTTP 200 OK, the filter will allow traffic to the upstream and +// additional headers metadata may be added to the original client request. See +// :ref:`allowed_upstream_headers +// ` +// for details. +// +// On other authorization response statuses, the filter will not allow traffic. Additional headers +// metadata as well as body may be added to the client's response. See :ref:`allowed_client_headers +// ` +// for details. +// [#next-free-field: 9] +message HttpService { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.ext_authz.v3.HttpService"; + + reserved 3, 4, 5, 6; + + // Sets the HTTP server URI which the authorization requests must be sent to. + config.core.v4alpha.HttpUri server_uri = 1; + + // Sets a prefix to the value of authorization request header *Path*. + string path_prefix = 2; + + // Settings used for controlling authorization request metadata. + AuthorizationRequest authorization_request = 7; + + // Settings used for controlling authorization response metadata. + AuthorizationResponse authorization_response = 8; +} + +message AuthorizationRequest { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.ext_authz.v3.AuthorizationRequest"; + + // Authorization request will include the client request headers that have a correspondent match + // in the :ref:`list `. Note that in addition to the + // user's supplied matchers: + // + // 1. *Host*, *Method*, *Path* and *Content-Length* are automatically included to the list. + // + // 2. *Content-Length* will be set to 0 and the request to the authorization service will not have + // a message body. However, the authorization request can include the buffered client request body + // (controlled by :ref:`with_request_body + // ` setting), + // consequently the value of *Content-Length* of the authorization request reflects the size of + // its payload size. + // + type.matcher.v4alpha.ListStringMatcher allowed_headers = 1; + + // Sets a list of headers that will be included to the request to authorization service. Note that + // client request of the same key will be overridden. + repeated config.core.v4alpha.HeaderValue headers_to_add = 2; +} + +message AuthorizationResponse { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.ext_authz.v3.AuthorizationResponse"; + + // When this :ref:`list ` is set, authorization + // response headers that have a correspondent match will be added to the original client request. + // Note that coexistent headers will be overridden. + type.matcher.v4alpha.ListStringMatcher allowed_upstream_headers = 1; + + // When this :ref:`list ` is set, authorization + // response headers that have a correspondent match will be added to the client's response. Note + // that coexistent headers will be appended. + type.matcher.v4alpha.ListStringMatcher allowed_upstream_headers_to_append = 3; + + // When this :ref:`list `. is set, authorization + // response headers that have a correspondent match will be added to the client's response. Note + // that when this list is *not* set, all the authorization response headers, except *Authority + // (Host)* will be in the response to the client. When a header is included in this list, *Path*, + // *Status*, *Content-Length*, *WWWAuthenticate* and *Location* are automatically added. + type.matcher.v4alpha.ListStringMatcher allowed_client_headers = 2; +} + +// Extra settings on a per virtualhost/route/weighted-cluster level. +message ExtAuthzPerRoute { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute"; + + oneof override { + option (validate.required) = true; + + // Disable the ext auth filter for this particular vhost or route. + // If disabled is specified in multiple per-filter-configs, the most specific one will be used. + bool disabled = 1 [(validate.rules).bool = {const: true}]; + + // Check request settings for this route. + CheckSettings check_settings = 2 [(validate.rules).message = {required: true}]; + } +} + +// Extra settings for the check request. You can use this to provide extra context for the +// external authorization server on specific virtual hosts \ routes. For example, adding a context +// extension on the virtual host level can give the ext-authz server information on what virtual +// host is used without needing to parse the host header. If CheckSettings is specified in multiple +// per-filter-configs, they will be merged in order, and the result will be used. +message CheckSettings { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.ext_authz.v3.CheckSettings"; + + // Context extensions to set on the CheckRequest's + // :ref:`AttributeContext.context_extensions` + // + // Merge semantics for this field are such that keys from more specific configs override. + // + // .. note:: + // + // These settings are only applied to a filter configured with a + // :ref:`grpc_service`. + map context_extensions = 1; +} diff --git a/api/envoy/extensions/filters/http/fault/v3/fault.proto b/api/envoy/extensions/filters/http/fault/v3/fault.proto index 534a0da35b16c..d28ed28b11100 100644 --- a/api/envoy/extensions/filters/http/fault/v3/fault.proto +++ b/api/envoy/extensions/filters/http/fault/v3/fault.proto @@ -21,6 +21,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Fault Injection :ref:`configuration overview `. // [#extension: envoy.filters.http.fault] +// [#next-free-field: 6] message FaultAbort { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.fault.v2.FaultAbort"; @@ -41,6 +42,9 @@ message FaultAbort { // HTTP status code to use to abort the HTTP request. uint32 http_status = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; + // gRPC status code to use to abort the gRPC request. + uint32 grpc_status = 5; + // Fault aborts are controlled via an HTTP header (if applicable). HeaderAbort header_abort = 4; } @@ -50,7 +54,7 @@ message FaultAbort { type.v3.FractionalPercent percentage = 3; } -// [#next-free-field: 14] +// [#next-free-field: 15] message HTTPFault { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.fault.v2.HTTPFault"; @@ -133,4 +137,8 @@ message HTTPFault { // The runtime key to override the :ref:`default ` // runtime. The default is: fault.http.rate_limit.response_percent string response_rate_limit_percent_runtime = 13; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.abort.grpc_status + string abort_grpc_status_runtime = 14; } diff --git a/api/envoy/extensions/filters/http/fault/v4alpha/BUILD b/api/envoy/extensions/filters/http/fault/v4alpha/BUILD new file mode 100644 index 0000000000000..936ee4414038e --- /dev/null +++ b/api/envoy/extensions/filters/http/fault/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/route/v4alpha:pkg", + "//envoy/extensions/filters/common/fault/v3:pkg", + "//envoy/extensions/filters/http/fault/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/http/fault/v4alpha/fault.proto b/api/envoy/extensions/filters/http/fault/v4alpha/fault.proto new file mode 100644 index 0000000000000..7dd4f48aa476b --- /dev/null +++ b/api/envoy/extensions/filters/http/fault/v4alpha/fault.proto @@ -0,0 +1,144 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.fault.v4alpha; + +import "envoy/config/route/v4alpha/route_components.proto"; +import "envoy/extensions/filters/common/fault/v3/fault.proto"; +import "envoy/type/v3/percent.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.fault.v4alpha"; +option java_outer_classname = "FaultProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Fault Injection] +// Fault Injection :ref:`configuration overview `. +// [#extension: envoy.filters.http.fault] + +// [#next-free-field: 6] +message FaultAbort { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.fault.v3.FaultAbort"; + + // Fault aborts are controlled via an HTTP header (if applicable). See the + // :ref:`HTTP fault filter ` documentation for + // more information. + message HeaderAbort { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.fault.v3.FaultAbort.HeaderAbort"; + } + + reserved 1; + + oneof error_type { + option (validate.required) = true; + + // HTTP status code to use to abort the HTTP request. + uint32 http_status = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; + + // gRPC status code to use to abort the gRPC request. + uint32 grpc_status = 5; + + // Fault aborts are controlled via an HTTP header (if applicable). + HeaderAbort header_abort = 4; + } + + // The percentage of requests/operations/connections that will be aborted with the error code + // provided. + type.v3.FractionalPercent percentage = 3; +} + +// [#next-free-field: 15] +message HTTPFault { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.fault.v3.HTTPFault"; + + // If specified, the filter will inject delays based on the values in the + // object. + common.fault.v3.FaultDelay delay = 1; + + // If specified, the filter will abort requests based on the values in + // the object. At least *abort* or *delay* must be specified. + FaultAbort abort = 2; + + // Specifies the name of the (destination) upstream cluster that the + // filter should match on. Fault injection will be restricted to requests + // bound to the specific upstream cluster. + string upstream_cluster = 3; + + // Specifies a set of headers that the filter should match on. The fault + // injection filter can be applied selectively to requests that match a set of + // headers specified in the fault filter config. The chances of actual fault + // injection further depend on the value of the :ref:`percentage + // ` field. + // The filter will check the request's headers against all the specified + // headers in the filter config. A match will happen if all the headers in the + // config are present in the request with the same values (or based on + // presence if the *value* field is not in the config). + repeated config.route.v4alpha.HeaderMatcher headers = 4; + + // Faults are injected for the specified list of downstream hosts. If this + // setting is not set, faults are injected for all downstream nodes. + // Downstream node name is taken from :ref:`the HTTP + // x-envoy-downstream-service-node + // ` header and compared + // against downstream_nodes list. + repeated string downstream_nodes = 5; + + // The maximum number of faults that can be active at a single time via the configured fault + // filter. Note that because this setting can be overridden at the route level, it's possible + // for the number of active faults to be greater than this value (if injected via a different + // route). If not specified, defaults to unlimited. This setting can be overridden via + // `runtime ` and any faults that are not injected + // due to overflow will be indicated via the `faults_overflow + // ` stat. + // + // .. attention:: + // Like other :ref:`circuit breakers ` in Envoy, this is a fuzzy + // limit. It's possible for the number of active faults to rise slightly above the configured + // amount due to the implementation details. + google.protobuf.UInt32Value max_active_faults = 6; + + // The response rate limit to be applied to the response body of the stream. When configured, + // the percentage can be overridden by the :ref:`fault.http.rate_limit.response_percent + // ` runtime key. + // + // .. attention:: + // This is a per-stream limit versus a connection level limit. This means that concurrent streams + // will each get an independent limit. + common.fault.v3.FaultRateLimit response_rate_limit = 7; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.delay.fixed_delay_percent + string delay_percent_runtime = 8; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.abort.abort_percent + string abort_percent_runtime = 9; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.delay.fixed_duration_ms + string delay_duration_runtime = 10; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.abort.http_status + string abort_http_status_runtime = 11; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.max_active_faults + string max_active_faults_runtime = 12; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.rate_limit.response_percent + string response_rate_limit_percent_runtime = 13; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.abort.grpc_status + string abort_grpc_status_runtime = 14; +} diff --git a/api/envoy/extensions/filters/http/gzip/v3/gzip.proto b/api/envoy/extensions/filters/http/gzip/v3/gzip.proto index eb8a69f083ba1..20cae5c400d3a 100644 --- a/api/envoy/extensions/filters/http/gzip/v3/gzip.proto +++ b/api/envoy/extensions/filters/http/gzip/v3/gzip.proto @@ -19,7 +19,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Gzip :ref:`configuration overview `. // [#extension: envoy.filters.http.gzip] -// [#next-free-field: 11] +// [#next-free-field: 12] message Gzip { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.gzip.v2.Gzip"; @@ -76,4 +76,9 @@ message Gzip { // the fields `content_length`, `content_type`, `disable_on_etag_header` and // `remove_accept_encoding_header` are ignored. compressor.v3.Compressor compressor = 10; + + // Value for Zlib's next output buffer. If not set, defaults to 4096. + // See https://www.zlib.net/manual.html for more details. Also see + // https://github.com/envoyproxy/envoy/issues/8448 for context on this filter's performance. + google.protobuf.UInt32Value chunk_size = 11 [(validate.rules).uint32 = {lte: 65536 gte: 4096}]; } diff --git a/api/envoy/extensions/filters/http/header_to_metadata/v3/BUILD b/api/envoy/extensions/filters/http/header_to_metadata/v3/BUILD index a8dda77ddfc31..8253ea6dff83b 100644 --- a/api/envoy/extensions/filters/http/header_to_metadata/v3/BUILD +++ b/api/envoy/extensions/filters/http/header_to_metadata/v3/BUILD @@ -7,6 +7,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/http/header_to_metadata/v2:pkg", + "//envoy/type/matcher/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto b/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto index 8e7c490f01b66..ace7c535069ac 100644 --- a/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto +++ b/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto @@ -2,6 +2,9 @@ syntax = "proto3"; package envoy.extensions.filters.http.header_to_metadata.v3; +import "envoy/type/matcher/v3/regex.proto"; + +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -44,7 +47,7 @@ message Config { BASE64 = 1; } - // [#next-free-field: 6] + // [#next-free-field: 7] message KeyValuePair { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.header_to_metadata.v2.Config.KeyValuePair"; @@ -57,15 +60,25 @@ message Config { // The value to pair with the given key. // - // When used for a `on_header_present` case, if value is non-empty it'll be used - // instead of the header value. If both are empty, no metadata is added. + // When used for a + // :ref:`on_header_present ` + // case, if value is non-empty it'll be used instead of the header value. If both are empty, no metadata is added. + // + // When used for a :ref:`on_header_missing ` + // case, a non-empty value must be provided otherwise no metadata is added. + string value = 3 [(udpa.annotations.field_migrate).oneof_promotion = "value_type"]; + + // If present, the header's value will be matched and substituted with this. If there is no match or substitution, the header value + // is used as-is. + // + // This is only used for :ref:`on_header_present `. // - // When used for a `on_header_missing` case, a non-empty value must be provided - // otherwise no metadata is added. - string value = 3; + // Note: if the `value` field is non-empty this field should be empty. + type.matcher.v3.RegexMatchAndSubstitute regex_value_rewrite = 6 + [(udpa.annotations.field_migrate).oneof_promotion = "value_type"]; // The value's type — defaults to string. - ValueType type = 4; + ValueType type = 4 [(validate.rules).enum = {defined_only: true}]; // How is the value encoded, default is NONE (not encoded). // The value will be decoded accordingly before storing to metadata. @@ -73,29 +86,41 @@ message Config { } // A Rule defines what metadata to apply when a header is present or missing. + // [#next-free-field: 6] message Rule { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.header_to_metadata.v2.Config.Rule"; - // The header that triggers this rule — required. - string header = 1 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // If the header is present, apply this metadata KeyValuePair. + // Specifies that a match will be performed on the value of a header or a cookie. + // + // The header to be extracted. + string header = 1 [ + (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}, + (udpa.annotations.field_migrate).oneof_promotion = "header_cookie_specifier" + ]; + + // The cookie to be extracted. + string cookie = 5 [ + (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}, + (udpa.annotations.field_migrate).oneof_promotion = "header_cookie_specifier" + ]; + + // If the header or cookie is present, apply this metadata KeyValuePair. // // If the value in the KeyValuePair is non-empty, it'll be used instead - // of the header value. - KeyValuePair on_header_present = 2; + // of the header or cookie value. + KeyValuePair on_header_present = 2 [(udpa.annotations.field_migrate).rename = "on_present"]; - // If the header is not present, apply this metadata KeyValuePair. + // If the header or cookie is not present, apply this metadata KeyValuePair. // // The value in the KeyValuePair must be set, since it'll be used in lieu - // of the missing header value. - KeyValuePair on_header_missing = 3; + // of the missing header or cookie value. + KeyValuePair on_header_missing = 3 [(udpa.annotations.field_migrate).rename = "on_missing"]; // Whether or not to remove the header after a rule is applied. // // This prevents headers from leaking. + // This field is not supported in case of a cookie. bool remove = 4; } diff --git a/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/BUILD b/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/BUILD new file mode 100644 index 0000000000000..285e2346e0ff7 --- /dev/null +++ b/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/extensions/filters/http/header_to_metadata/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto b/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto new file mode 100644 index 0000000000000..0d7c814584dce --- /dev/null +++ b/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto @@ -0,0 +1,130 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.header_to_metadata.v4alpha; + +import "envoy/type/matcher/v4alpha/regex.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.header_to_metadata.v4alpha"; +option java_outer_classname = "HeaderToMetadataProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Header-To-Metadata Filter] +// +// The configuration for transforming headers into metadata. This is useful +// for matching load balancer subsets, logging, etc. +// +// Header to Metadata :ref:`configuration overview `. +// [#extension: envoy.filters.http.header_to_metadata] + +message Config { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.header_to_metadata.v3.Config"; + + enum ValueType { + STRING = 0; + + NUMBER = 1; + + // The value is a serialized `protobuf.Value + // `_. + PROTOBUF_VALUE = 2; + } + + // ValueEncode defines the encoding algorithm. + enum ValueEncode { + // The value is not encoded. + NONE = 0; + + // The value is encoded in `Base64 `_. + // Note: this is mostly used for STRING and PROTOBUF_VALUE to escape the + // non-ASCII characters in the header. + BASE64 = 1; + } + + // [#next-free-field: 7] + message KeyValuePair { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.header_to_metadata.v3.Config.KeyValuePair"; + + // The namespace — if this is empty, the filter's namespace will be used. + string metadata_namespace = 1; + + // The key to use within the namespace. + string key = 2 [(validate.rules).string = {min_bytes: 1}]; + + oneof value_type { + // The value to pair with the given key. + // + // When used for a + // :ref:`on_header_present ` + // case, if value is non-empty it'll be used instead of the header value. If both are empty, no metadata is added. + // + // When used for a :ref:`on_header_missing ` + // case, a non-empty value must be provided otherwise no metadata is added. + string value = 3; + + // If present, the header's value will be matched and substituted with this. If there is no match or substitution, the header value + // is used as-is. + // + // This is only used for :ref:`on_header_present `. + // + // Note: if the `value` field is non-empty this field should be empty. + type.matcher.v4alpha.RegexMatchAndSubstitute regex_value_rewrite = 6; + } + + // The value's type — defaults to string. + ValueType type = 4 [(validate.rules).enum = {defined_only: true}]; + + // How is the value encoded, default is NONE (not encoded). + // The value will be decoded accordingly before storing to metadata. + ValueEncode encode = 5; + } + + // A Rule defines what metadata to apply when a header is present or missing. + // [#next-free-field: 6] + message Rule { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.header_to_metadata.v3.Config.Rule"; + + oneof header_cookie_specifier { + // Specifies that a match will be performed on the value of a header or a cookie. + // + // The header to be extracted. + string header = 1 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // The cookie to be extracted. + string cookie = 5 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; + } + + // If the header or cookie is present, apply this metadata KeyValuePair. + // + // If the value in the KeyValuePair is non-empty, it'll be used instead + // of the header or cookie value. + KeyValuePair on_present = 2; + + // If the header or cookie is not present, apply this metadata KeyValuePair. + // + // The value in the KeyValuePair must be set, since it'll be used in lieu + // of the missing header or cookie value. + KeyValuePair on_missing = 3; + + // Whether or not to remove the header after a rule is applied. + // + // This prevents headers from leaking. + // This field is not supported in case of a cookie. + bool remove = 4; + } + + // The list of rules to apply to requests. + repeated Rule request_rules = 1; + + // The list of rules to apply to responses. + repeated Rule response_rules = 2; +} diff --git a/api/envoy/extensions/filters/http/health_check/v3/health_check.proto b/api/envoy/extensions/filters/http/health_check/v3/health_check.proto index 1a5dbf1bb9006..f3a0c42c388c6 100644 --- a/api/envoy/extensions/filters/http/health_check/v3/health_check.proto +++ b/api/envoy/extensions/filters/http/health_check/v3/health_check.proto @@ -38,6 +38,11 @@ message HealthCheck { // If operating in non-pass-through mode, specifies a set of upstream cluster // names and the minimum percentage of servers in each of those clusters that // must be healthy or degraded in order for the filter to return a 200. + // + // .. note:: + // + // This value is interpreted as an integer by truncating, so 12.50% will be calculated + // as if it were 12%. map cluster_min_healthy_percentages = 4; // Specifies a set of health check request headers to match on. The health check filter will diff --git a/api/envoy/extensions/filters/http/health_check/v4alpha/BUILD b/api/envoy/extensions/filters/http/health_check/v4alpha/BUILD new file mode 100644 index 0000000000000..97b6ad2feb2d0 --- /dev/null +++ b/api/envoy/extensions/filters/http/health_check/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/route/v4alpha:pkg", + "//envoy/extensions/filters/http/health_check/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto b/api/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto new file mode 100644 index 0000000000000..3725d085dd7b0 --- /dev/null +++ b/api/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto @@ -0,0 +1,52 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.health_check.v4alpha; + +import "envoy/config/route/v4alpha/route_components.proto"; +import "envoy/type/v3/percent.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.health_check.v4alpha"; +option java_outer_classname = "HealthCheckProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Health check] +// Health check :ref:`configuration overview `. +// [#extension: envoy.filters.http.health_check] + +// [#next-free-field: 6] +message HealthCheck { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.health_check.v3.HealthCheck"; + + reserved 2; + + // Specifies whether the filter operates in pass through mode or not. + google.protobuf.BoolValue pass_through_mode = 1 [(validate.rules).message = {required: true}]; + + // If operating in pass through mode, the amount of time in milliseconds + // that the filter should cache the upstream response. + google.protobuf.Duration cache_time = 3; + + // If operating in non-pass-through mode, specifies a set of upstream cluster + // names and the minimum percentage of servers in each of those clusters that + // must be healthy or degraded in order for the filter to return a 200. + // + // .. note:: + // + // This value is interpreted as an integer by truncating, so 12.50% will be calculated + // as if it were 12%. + map cluster_min_healthy_percentages = 4; + + // Specifies a set of health check request headers to match on. The health check filter will + // check a request’s headers against all the specified headers. To specify the health check + // endpoint, set the ``:path`` header to match on. + repeated config.route.v4alpha.HeaderMatcher headers = 5; +} diff --git a/api/envoy/extensions/filters/http/jwt_authn/v4alpha/BUILD b/api/envoy/extensions/filters/http/jwt_authn/v4alpha/BUILD new file mode 100644 index 0000000000000..a9f9b8bc44c32 --- /dev/null +++ b/api/envoy/extensions/filters/http/jwt_authn/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/route/v4alpha:pkg", + "//envoy/extensions/filters/http/jwt_authn/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto b/api/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto new file mode 100644 index 0000000000000..302cf7253dde3 --- /dev/null +++ b/api/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto @@ -0,0 +1,531 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.jwt_authn.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/http_uri.proto"; +import "envoy/config/route/v4alpha/route_components.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/empty.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.jwt_authn.v4alpha"; +option java_outer_classname = "ConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: JWT Authentication] +// JWT Authentication :ref:`configuration overview `. +// [#extension: envoy.filters.http.jwt_authn] + +// Please see following for JWT authentication flow: +// +// * `JSON Web Token (JWT) `_ +// * `The OAuth 2.0 Authorization Framework `_ +// * `OpenID Connect `_ +// +// A JwtProvider message specifies how a JSON Web Token (JWT) can be verified. It specifies: +// +// * issuer: the principal that issues the JWT. It has to match the one from the token. +// * allowed audiences: the ones in the token have to be listed here. +// * how to fetch public key JWKS to verify the token signature. +// * how to extract JWT token in the request. +// * how to pass successfully verified token payload. +// +// Example: +// +// .. code-block:: yaml +// +// issuer: https://example.com +// audiences: +// - bookstore_android.apps.googleusercontent.com +// - bookstore_web.apps.googleusercontent.com +// remote_jwks: +// http_uri: +// uri: https://example.com/.well-known/jwks.json +// cluster: example_jwks_cluster +// cache_duration: +// seconds: 300 +// +// [#next-free-field: 10] +message JwtProvider { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.JwtProvider"; + + // Specify the `principal `_ that issued + // the JWT, usually a URL or an email address. + // + // Example: https://securetoken.google.com + // Example: 1234567-compute@developer.gserviceaccount.com + // + string issuer = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The list of JWT `audiences `_ are + // allowed to access. A JWT containing any of these audiences will be accepted. If not specified, + // will not check audiences in the token. + // + // Example: + // + // .. code-block:: yaml + // + // audiences: + // - bookstore_android.apps.googleusercontent.com + // - bookstore_web.apps.googleusercontent.com + // + repeated string audiences = 2; + + // `JSON Web Key Set (JWKS) `_ is needed to + // validate signature of a JWT. This field specifies where to fetch JWKS. + oneof jwks_source_specifier { + option (validate.required) = true; + + // JWKS can be fetched from remote server via HTTP/HTTPS. This field specifies the remote HTTP + // URI and how the fetched JWKS should be cached. + // + // Example: + // + // .. code-block:: yaml + // + // remote_jwks: + // http_uri: + // uri: https://www.googleapis.com/oauth2/v1/certs + // cluster: jwt.www.googleapis.com|443 + // cache_duration: + // seconds: 300 + // + RemoteJwks remote_jwks = 3; + + // JWKS is in local data source. It could be either in a local file or embedded in the + // inline_string. + // + // Example: local file + // + // .. code-block:: yaml + // + // local_jwks: + // filename: /etc/envoy/jwks/jwks1.txt + // + // Example: inline_string + // + // .. code-block:: yaml + // + // local_jwks: + // inline_string: ACADADADADA + // + config.core.v4alpha.DataSource local_jwks = 4; + } + + // If false, the JWT is removed in the request after a success verification. If true, the JWT is + // not removed in the request. Default value is false. + bool forward = 5; + + // Two fields below define where to extract the JWT from an HTTP request. + // + // If no explicit location is specified, the following default locations are tried in order: + // + // 1. The Authorization header using the `Bearer schema + // `_. Example:: + // + // Authorization: Bearer . + // + // 2. `access_token `_ query parameter. + // + // Multiple JWTs can be verified for a request. Each JWT has to be extracted from the locations + // its provider specified or from the default locations. + // + // Specify the HTTP headers to extract JWT token. For examples, following config: + // + // .. code-block:: yaml + // + // from_headers: + // - name: x-goog-iap-jwt-assertion + // + // can be used to extract token from header:: + // + // ``x-goog-iap-jwt-assertion: ``. + // + repeated JwtHeader from_headers = 6; + + // JWT is sent in a query parameter. `jwt_params` represents the query parameter names. + // + // For example, if config is: + // + // .. code-block:: yaml + // + // from_params: + // - jwt_token + // + // The JWT format in query parameter is:: + // + // /path?jwt_token= + // + repeated string from_params = 7; + + // This field specifies the header name to forward a successfully verified JWT payload to the + // backend. The forwarded data is:: + // + // base64url_encoded(jwt_payload_in_JSON) + // + // If it is not specified, the payload will not be forwarded. + string forward_payload_header = 8 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata + // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn** + // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields* + // and the value is the *protobuf::Struct* converted from JWT JSON payload. + // + // For example, if payload_in_metadata is *my_payload*: + // + // .. code-block:: yaml + // + // envoy.filters.http.jwt_authn: + // my_payload: + // iss: https://example.com + // sub: test@example.com + // aud: https://example.com + // exp: 1501281058 + // + string payload_in_metadata = 9; +} + +// This message specifies how to fetch JWKS from remote and how to cache it. +message RemoteJwks { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.RemoteJwks"; + + // The HTTP URI to fetch the JWKS. For example: + // + // .. code-block:: yaml + // + // http_uri: + // uri: https://www.googleapis.com/oauth2/v1/certs + // cluster: jwt.www.googleapis.com|443 + // + config.core.v4alpha.HttpUri http_uri = 1; + + // Duration after which the cached JWKS should be expired. If not specified, default cache + // duration is 5 minutes. + google.protobuf.Duration cache_duration = 2; +} + +// This message specifies a header location to extract JWT token. +message JwtHeader { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.JwtHeader"; + + // The HTTP header name. + string name = 1 + [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // The value prefix. The value format is "value_prefix" + // For example, for "Authorization: Bearer ", value_prefix="Bearer " with a space at the + // end. + string value_prefix = 2 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; +} + +// Specify a required provider with audiences. +message ProviderWithAudiences { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.ProviderWithAudiences"; + + // Specify a required provider name. + string provider_name = 1; + + // This field overrides the one specified in the JwtProvider. + repeated string audiences = 2; +} + +// This message specifies a Jwt requirement. An empty message means JWT verification is not +// required. Here are some config examples: +// +// .. code-block:: yaml +// +// # Example 1: not required with an empty message +// +// # Example 2: require A +// provider_name: provider-A +// +// # Example 3: require A or B +// requires_any: +// requirements: +// - provider_name: provider-A +// - provider_name: provider-B +// +// # Example 4: require A and B +// requires_all: +// requirements: +// - provider_name: provider-A +// - provider_name: provider-B +// +// # Example 5: require A and (B or C) +// requires_all: +// requirements: +// - provider_name: provider-A +// - requires_any: +// requirements: +// - provider_name: provider-B +// - provider_name: provider-C +// +// # Example 6: require A or (B and C) +// requires_any: +// requirements: +// - provider_name: provider-A +// - requires_all: +// requirements: +// - provider_name: provider-B +// - provider_name: provider-C +// +// # Example 7: A is optional (if token from A is provided, it must be valid, but also allows +// missing token.) +// requires_any: +// requirements: +// - provider_name: provider-A +// - allow_missing: {} +// +// # Example 8: A is optional and B is required. +// requires_all: +// requirements: +// - requires_any: +// requirements: +// - provider_name: provider-A +// - allow_missing: {} +// - provider_name: provider-B +// +// [#next-free-field: 7] +message JwtRequirement { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.JwtRequirement"; + + oneof requires_type { + // Specify a required provider name. + string provider_name = 1; + + // Specify a required provider with audiences. + ProviderWithAudiences provider_and_audiences = 2; + + // Specify list of JwtRequirement. Their results are OR-ed. + // If any one of them passes, the result is passed. + JwtRequirementOrList requires_any = 3; + + // Specify list of JwtRequirement. Their results are AND-ed. + // All of them must pass, if one of them fails or missing, it fails. + JwtRequirementAndList requires_all = 4; + + // The requirement is always satisfied even if JWT is missing or the JWT + // verification fails. A typical usage is: this filter is used to only verify + // JWTs and pass the verified JWT payloads to another filter, the other filter + // will make decision. In this mode, all JWT tokens will be verified. + google.protobuf.Empty allow_missing_or_failed = 5; + + // The requirement is satisfied if JWT is missing, but failed if JWT is + // presented but invalid. Similar to allow_missing_or_failed, this is used + // to only verify JWTs and pass the verified payload to another filter. The + // different is this mode will reject requests with invalid tokens. + google.protobuf.Empty allow_missing = 6; + } +} + +// This message specifies a list of RequiredProvider. +// Their results are OR-ed; if any one of them passes, the result is passed +message JwtRequirementOrList { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.JwtRequirementOrList"; + + // Specify a list of JwtRequirement. + repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; +} + +// This message specifies a list of RequiredProvider. +// Their results are AND-ed; all of them must pass, if one of them fails or missing, it fails. +message JwtRequirementAndList { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.JwtRequirementAndList"; + + // Specify a list of JwtRequirement. + repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; +} + +// This message specifies a Jwt requirement for a specific Route condition. +// Example 1: +// +// .. code-block:: yaml +// +// - match: +// prefix: /healthz +// +// In above example, "requires" field is empty for /healthz prefix match, +// it means that requests matching the path prefix don't require JWT authentication. +// +// Example 2: +// +// .. code-block:: yaml +// +// - match: +// prefix: / +// requires: { provider_name: provider-A } +// +// In above example, all requests matched the path prefix require jwt authentication +// from "provider-A". +message RequirementRule { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.RequirementRule"; + + // The route matching parameter. Only when the match is satisfied, the "requires" field will + // apply. + // + // For example: following match will match all requests. + // + // .. code-block:: yaml + // + // match: + // prefix: / + // + config.route.v4alpha.RouteMatch match = 1 [(validate.rules).message = {required: true}]; + + // Specify a Jwt Requirement. Please detail comment in message JwtRequirement. + JwtRequirement requires = 2; +} + +// This message specifies Jwt requirements based on stream_info.filterState. +// This FilterState should use `Router::StringAccessor` object to set a string value. +// Other HTTP filters can use it to specify Jwt requirements dynamically. +// +// Example: +// +// .. code-block:: yaml +// +// name: jwt_selector +// requires: +// issuer_1: +// provider_name: issuer1 +// issuer_2: +// provider_name: issuer2 +// +// If a filter set "jwt_selector" with "issuer_1" to FilterState for a request, +// jwt_authn filter will use JwtRequirement{"provider_name": "issuer1"} to verify. +message FilterStateRule { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.FilterStateRule"; + + // The filter state name to retrieve the `Router::StringAccessor` object. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // A map of string keys to requirements. The string key is the string value + // in the FilterState with the name specified in the *name* field above. + map requires = 3; +} + +// This is the Envoy HTTP filter config for JWT authentication. +// +// For example: +// +// .. code-block:: yaml +// +// providers: +// provider1: +// issuer: issuer1 +// audiences: +// - audience1 +// - audience2 +// remote_jwks: +// http_uri: +// uri: https://example.com/.well-known/jwks.json +// cluster: example_jwks_cluster +// provider2: +// issuer: issuer2 +// local_jwks: +// inline_string: jwks_string +// +// rules: +// # Not jwt verification is required for /health path +// - match: +// prefix: /health +// +// # Jwt verification for provider1 is required for path prefixed with "prefix" +// - match: +// prefix: /prefix +// requires: +// provider_name: provider1 +// +// # Jwt verification for either provider1 or provider2 is required for all other requests. +// - match: +// prefix: / +// requires: +// requires_any: +// requirements: +// - provider_name: provider1 +// - provider_name: provider2 +// +message JwtAuthentication { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.JwtAuthentication"; + + // Map of provider names to JwtProviders. + // + // .. code-block:: yaml + // + // providers: + // provider1: + // issuer: issuer1 + // audiences: + // - audience1 + // - audience2 + // remote_jwks: + // http_uri: + // uri: https://example.com/.well-known/jwks.json + // cluster: example_jwks_cluster + // provider2: + // issuer: provider2 + // local_jwks: + // inline_string: jwks_string + // + map providers = 1; + + // Specifies requirements based on the route matches. The first matched requirement will be + // applied. If there are overlapped match conditions, please put the most specific match first. + // + // Examples + // + // .. code-block:: yaml + // + // rules: + // - match: + // prefix: /healthz + // - match: + // prefix: /baz + // requires: + // provider_name: provider1 + // - match: + // prefix: /foo + // requires: + // requires_any: + // requirements: + // - provider_name: provider1 + // - provider_name: provider2 + // - match: + // prefix: /bar + // requires: + // requires_all: + // requirements: + // - provider_name: provider1 + // - provider_name: provider2 + // + repeated RequirementRule rules = 2; + + // This message specifies Jwt requirements based on stream_info.filterState. + // Other HTTP filters can use it to specify Jwt requirements dynamically. + // The *rules* field above is checked first, if it could not find any matches, + // check this one. + FilterStateRule filter_state_rules = 3; + + // When set to true, bypass the `CORS preflight request + // `_ regardless of JWT + // requirements specified in the rules. + bool bypass_cors_preflight = 4; +} diff --git a/api/envoy/extensions/filters/http/lua/v3/BUILD b/api/envoy/extensions/filters/http/lua/v3/BUILD index 69390e69786a1..8878a585f46d2 100644 --- a/api/envoy/extensions/filters/http/lua/v3/BUILD +++ b/api/envoy/extensions/filters/http/lua/v3/BUILD @@ -6,6 +6,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ + "//envoy/config/core/v3:pkg", "//envoy/config/filter/http/lua/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/api/envoy/extensions/filters/http/lua/v3/lua.proto b/api/envoy/extensions/filters/http/lua/v3/lua.proto index da6b0c09a0f61..622726744de6c 100644 --- a/api/envoy/extensions/filters/http/lua/v3/lua.proto +++ b/api/envoy/extensions/filters/http/lua/v3/lua.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.extensions.filters.http.lua.v3; +import "envoy/config/core/v3/base.proto"; + import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -24,4 +26,37 @@ message Lua { // be properly escaped. YAML configuration may be easier to read since YAML supports multi-line // strings so complex scripts can be easily expressed inline in the configuration. string inline_code = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Map of named Lua source codes that can be referenced in :ref:`LuaPerRoute + // `. The Lua source codes can be + // loaded from inline string or local files. + // + // Example: + // + // .. code-block:: yaml + // + // source_codes: + // hello.lua: + // inline_string: | + // function envoy_on_response(response_handle) + // -- Do something. + // end + // world.lua: + // filename: /etc/lua/world.lua + // + map source_codes = 2; +} + +message LuaPerRoute { + oneof override { + option (validate.required) = true; + + // Disable the Lua filter for this particular vhost or route. If disabled is specified in + // multiple per-filter-configs, the most specific one will be used. + bool disabled = 1 [(validate.rules).bool = {const: true}]; + + // A name of a Lua source code stored in + // :ref:`Lua.source_codes `. + string name = 2 [(validate.rules).string = {min_len: 1}]; + } } diff --git a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto index 057b7c3d44032..781fddc1939c0 100644 --- a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto +++ b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto @@ -19,11 +19,20 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Rate limit :ref:`configuration overview `. // [#extension: envoy.filters.http.ratelimit] -// [#next-free-field: 8] +// [#next-free-field: 9] message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.rate_limit.v2.RateLimit"; + // Defines the version of the standard to use for X-RateLimit headers. + enum XRateLimitHeadersRFCVersion { + // X-RateLimit headers disabled. + OFF = 0; + + // Use `draft RFC Version 03 `_. + DRAFT_VERSION_03 = 1; + } + // The rate limit domain to use when calling the rate limit service. string domain = 1 [(validate.rules).string = {min_bytes: 1}]; @@ -64,4 +73,30 @@ message RateLimit { // success. config.ratelimit.v3.RateLimitServiceConfig rate_limit_service = 7 [(validate.rules).message = {required: true}]; + + // Defines the standard version to use for X-RateLimit headers emitted by the filter: + // + // * ``X-RateLimit-Limit`` - indicates the request-quota associated to the + // client in the current time-window followed by the description of the + // quota policy. The values are returned by the rate limiting service in + // :ref:`current_limit` + // field. Example: `10, 10;w=1;name="per-ip", 1000;w=3600`. + // * ``X-RateLimit-Remaining`` - indicates the remaining requests in the + // current time-window. The values are returned by the rate limiting service + // in :ref:`limit_remaining` + // field. + // * ``X-RateLimit-Reset`` - indicates the number of seconds until reset of + // the current time-window. The values are returned by the rate limiting service + // in :ref:`duration_until_reset` + // field. + // + // In case rate limiting policy specifies more then one time window, the values + // above represent the window that is closest to reaching its limit. + // + // For more information about the headers specification see selected version of + // the `draft RFC `_. + // + // Disabled by default. + XRateLimitHeadersRFCVersion enable_x_ratelimit_headers = 8 + [(validate.rules).enum = {defined_only: true}]; } diff --git a/api/envoy/extensions/filters/http/router/v4alpha/BUILD b/api/envoy/extensions/filters/http/router/v4alpha/BUILD new file mode 100644 index 0000000000000..df329be542301 --- /dev/null +++ b/api/envoy/extensions/filters/http/router/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/accesslog/v4alpha:pkg", + "//envoy/extensions/filters/http/router/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/http/router/v4alpha/router.proto b/api/envoy/extensions/filters/http/router/v4alpha/router.proto new file mode 100644 index 0000000000000..d0baaab84a397 --- /dev/null +++ b/api/envoy/extensions/filters/http/router/v4alpha/router.proto @@ -0,0 +1,81 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.router.v4alpha; + +import "envoy/config/accesslog/v4alpha/accesslog.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.router.v4alpha"; +option java_outer_classname = "RouterProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Router] +// Router :ref:`configuration overview `. +// [#extension: envoy.filters.http.router] + +// [#next-free-field: 7] +message Router { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.router.v3.Router"; + + // Whether the router generates dynamic cluster statistics. Defaults to + // true. Can be disabled in high performance scenarios. + google.protobuf.BoolValue dynamic_stats = 1; + + // Whether to start a child span for egress routed calls. This can be + // useful in scenarios where other filters (auth, ratelimit, etc.) make + // outbound calls and have child spans rooted at the same ingress + // parent. Defaults to false. + bool start_child_span = 2; + + // Configuration for HTTP upstream logs emitted by the router. Upstream logs + // are configured in the same way as access logs, but each log entry represents + // an upstream request. Presuming retries are configured, multiple upstream + // requests may be made for each downstream (inbound) request. + repeated config.accesslog.v4alpha.AccessLog upstream_log = 3; + + // Do not add any additional *x-envoy-* headers to requests or responses. This + // only affects the :ref:`router filter generated *x-envoy-* headers + // `, other Envoy filters and the HTTP + // connection manager may continue to set *x-envoy-* headers. + bool suppress_envoy_headers = 4; + + // Specifies a list of HTTP headers to strictly validate. Envoy will reject a + // request and respond with HTTP status 400 if the request contains an invalid + // value for any of the headers listed in this field. Strict header checking + // is only supported for the following headers: + // + // Value must be a ','-delimited list (i.e. no spaces) of supported retry + // policy values: + // + // * :ref:`config_http_filters_router_x-envoy-retry-grpc-on` + // * :ref:`config_http_filters_router_x-envoy-retry-on` + // + // Value must be an integer: + // + // * :ref:`config_http_filters_router_x-envoy-max-retries` + // * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` + // * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` + repeated string strict_check_headers = 5 [(validate.rules).repeated = { + items { + string { + in: "x-envoy-upstream-rq-timeout-ms" + in: "x-envoy-upstream-rq-per-try-timeout-ms" + in: "x-envoy-max-retries" + in: "x-envoy-retry-grpc-on" + in: "x-envoy-retry-on" + } + } + }]; + + // If not set, ingress Envoy will ignore + // :ref:`config_http_filters_router_x-envoy-expected-rq-timeout-ms` header, populated by egress + // Envoy, when deriving timeout for upstream cluster. + bool respect_expected_rq_timeout = 6; +} diff --git a/api/envoy/extensions/filters/http/wasm/v3/BUILD b/api/envoy/extensions/filters/http/wasm/v3/BUILD new file mode 100644 index 0000000000000..8bad369e35113 --- /dev/null +++ b/api/envoy/extensions/filters/http/wasm/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/extensions/wasm/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/http/wasm/v3/wasm.proto b/api/envoy/extensions/filters/http/wasm/v3/wasm.proto new file mode 100644 index 0000000000000..a812992a5b84e --- /dev/null +++ b/api/envoy/extensions/filters/http/wasm/v3/wasm.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.wasm.v3; + +import "envoy/extensions/wasm/v3/wasm.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.wasm.v3"; +option java_outer_classname = "WasmProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [[#not-implemented-hide:] +message Wasm { + // General Plugin configuration. + envoy.extensions.wasm.v3.PluginConfig config = 1; +} diff --git a/api/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto b/api/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto index 63ad72945e280..8fd0c63d0c825 100644 --- a/api/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto +++ b/api/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto @@ -4,6 +4,7 @@ package envoy.extensions.filters.listener.proxy_protocol.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.listener.proxy_protocol.v3"; option java_outer_classname = "ProxyProtocolProto"; @@ -17,4 +18,26 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; message ProxyProtocol { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.listener.proxy_protocol.v2.ProxyProtocol"; + + message KeyValuePair { + // The namespace — if this is empty, the filter's namespace will be used. + string metadata_namespace = 1; + + // The key to use within the namespace. + string key = 2 [(validate.rules).string = {min_bytes: 1}]; + } + + // A Rule defines what metadata to apply when a header is present or missing. + message Rule { + // The type that triggers the rule - required + // TLV type is defined as uint8_t in proxy protocol. See `the spec + // `_ for details. + uint32 tlv_type = 1 [(validate.rules).uint32 = {lt: 256}]; + + // If the TLV type is present, apply this metadata KeyValuePair. + KeyValuePair on_tlv_present = 2; + } + + // The list of rules to apply to requests. + repeated Rule rules = 1; } diff --git a/api/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto b/api/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto index e2da157574f89..b3af267a77ad1 100644 --- a/api/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto +++ b/api/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto @@ -6,6 +6,7 @@ import "envoy/config/core/v3/address.proto"; import "google/protobuf/duration.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -28,7 +29,8 @@ message ClientSSLAuth { // the authentication service. The filter will connect to the service every 60s to fetch the list // of principals. The service must support the expected :ref:`REST API // `. - string auth_api_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string auth_api_cluster = 1 + [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; // The prefix to use when emitting :ref:`statistics // `. @@ -42,6 +44,7 @@ message ClientSSLAuth { // An optional list of IP address and subnet masks that should be white // listed for access by the filter. If no list is provided, there is no - // IP white list. - repeated config.core.v3.CidrRange ip_white_list = 4; + // IP allowlist. + repeated config.core.v3.CidrRange ip_white_list = 4 + [(udpa.annotations.field_migrate).rename = "ip_allowlist"]; } diff --git a/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/BUILD b/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/BUILD new file mode 100644 index 0000000000000..663eb0d52d25c --- /dev/null +++ b/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/route/v4alpha:pkg", + "//envoy/extensions/filters/network/dubbo_proxy/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto b/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto new file mode 100644 index 0000000000000..4894c7693fd7a --- /dev/null +++ b/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto @@ -0,0 +1,70 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.dubbo_proxy.v4alpha; + +import "envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto"; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v4alpha"; +option java_outer_classname = "DubboProxyProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Dubbo Proxy] +// Dubbo Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.dubbo_proxy] + +// Dubbo Protocol types supported by Envoy. +enum ProtocolType { + // the default protocol. + Dubbo = 0; +} + +// Dubbo Serialization types supported by Envoy. +enum SerializationType { + // the default serialization protocol. + Hessian2 = 0; +} + +// [#next-free-field: 6] +message DubboProxy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.DubboProxy"; + + // The human readable prefix to use when emitting statistics. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Configure the protocol used. + ProtocolType protocol_type = 2 [(validate.rules).enum = {defined_only: true}]; + + // Configure the serialization protocol used. + SerializationType serialization_type = 3 [(validate.rules).enum = {defined_only: true}]; + + // The route table for the connection manager is static and is specified in this property. + repeated RouteConfiguration route_config = 4; + + // A list of individual Dubbo filters that make up the filter chain for requests made to the + // Dubbo proxy. Order matters as the filters are processed sequentially. For backwards + // compatibility, if no dubbo_filters are specified, a default Dubbo router filter + // (`envoy.filters.dubbo.router`) is used. + repeated DubboFilter dubbo_filters = 5; +} + +// DubboFilter configures a Dubbo filter. +message DubboFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.DubboFilter"; + + // The name of the filter to instantiate. The name must match a supported + // filter. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Filter specific configuration which depends on the filter being + // instantiated. See the supported filters for further documentation. + google.protobuf.Any config = 2; +} diff --git a/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto b/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto new file mode 100644 index 0000000000000..c2ff03b33fb14 --- /dev/null +++ b/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto @@ -0,0 +1,121 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.dubbo_proxy.v4alpha; + +import "envoy/config/route/v4alpha/route_components.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; +import "envoy/type/v3/range.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v4alpha"; +option java_outer_classname = "RouteProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Dubbo Proxy Route Configuration] +// Dubbo Proxy :ref:`configuration overview `. + +// [#next-free-field: 6] +message RouteConfiguration { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.RouteConfiguration"; + + // The name of the route configuration. Reserved for future use in asynchronous route discovery. + string name = 1; + + // The interface name of the service. + string interface = 2; + + // Which group does the interface belong to. + string group = 3; + + // The version number of the interface. + string version = 4; + + // The list of routes that will be matched, in order, against incoming requests. The first route + // that matches will be used. + repeated Route routes = 5; +} + +message Route { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.Route"; + + // Route matching parameters. + RouteMatch match = 1 [(validate.rules).message = {required: true}]; + + // Route request to some upstream cluster. + RouteAction route = 2 [(validate.rules).message = {required: true}]; +} + +message RouteMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.RouteMatch"; + + // Method level routing matching. + MethodMatch method = 1; + + // Specifies a set of headers that the route should match on. The router will check the request’s + // headers against all the specified headers in the route config. A match will happen if all the + // headers in the route are present in the request with the same values (or based on presence if + // the value field is not in the config). + repeated config.route.v4alpha.HeaderMatcher headers = 2; +} + +message RouteAction { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.RouteAction"; + + oneof cluster_specifier { + option (validate.required) = true; + + // Indicates the upstream cluster to which the request should be routed. + string cluster = 1; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. + // Currently ClusterWeight only supports the name and weight fields. + config.route.v4alpha.WeightedCluster weighted_clusters = 2; + } +} + +message MethodMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.MethodMatch"; + + // The parameter matching type. + message ParameterMatchSpecifier { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.MethodMatch.ParameterMatchSpecifier"; + + oneof parameter_match_specifier { + // If specified, header match will be performed based on the value of the header. + string exact_match = 3; + + // If specified, header match will be performed based on range. + // The rule will match if the request header value is within this range. + // The entire request header value must represent an integer in base 10 notation: consisting + // of an optional plus or minus sign followed by a sequence of digits. The rule will not match + // if the header value does not represent an integer. Match will fail for empty values, + // floating point numbers or if only a subsequence of the header value is an integer. + // + // Examples: + // + // * For range [-10,0), route will match for header value -1, but not for 0, + // "somestring", 10.9, "-1somestring" + type.v3.Int64Range range_match = 4; + } + } + + // The name of the method. + type.matcher.v4alpha.StringMatcher name = 1; + + // Method parameter definition. + // The key is the parameter index, starting from 0. + // The value is the parameter matching type. + map params_match = 2; +} diff --git a/api/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto b/api/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto index c3a63ac0a4f65..50161f1cb92bc 100644 --- a/api/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto +++ b/api/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package envoy.extensions.filters.network.ext_authz.v3; +import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "udpa/annotations/status.proto"; @@ -22,6 +23,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // gRPC Authorization API defined by // :ref:`CheckRequest `. // A failed check will cause this filter to close the TCP connection. +// [#next-free-field: 6] message ExtAuthz { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.ext_authz.v2.ExtAuthz"; @@ -44,4 +46,9 @@ message ExtAuthz { // When this field is true, Envoy will include the peer X.509 certificate, if available, in the // :ref:`certificate`. bool include_peer_certificate = 4; + + // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and + // version of Check{Request,Response} used on the wire. + config.core.v3.ApiVersion transport_api_version = 5 + [(validate.rules).enum = {defined_only: true}]; } diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v3/BUILD b/api/envoy/extensions/filters/network/http_connection_manager/v3/BUILD index bd07dbcbb020d..283fd11e5f09c 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v3/BUILD +++ b/api/envoy/extensions/filters/network/http_connection_manager/v3/BUILD @@ -15,5 +15,6 @@ api_proto_package( "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index d802ec4ce7743..04a132ad26720 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -3,8 +3,11 @@ syntax = "proto3"; package envoy.extensions.filters.network.http_connection_manager.v3; import "envoy/config/accesslog/v3/accesslog.proto"; +import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/config_source.proto"; +import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/protocol.proto"; +import "envoy/config/core/v3/substitution_format_string.proto"; import "envoy/config/route/v3/route.proto"; import "envoy/config/route/v3/scoped_route.proto"; import "envoy/config/trace/v3/http_tracer.proto"; @@ -16,7 +19,11 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/resource_locator.proto"; + import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -30,7 +37,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // HTTP connection manager :ref:`configuration overview `. // [#extension: envoy.filters.network.http_connection_manager] -// [#next-free-field: 37] +// [#next-free-field: 41] message HttpConnectionManager { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager"; @@ -231,7 +238,7 @@ message HttpConnectionManager { // Determines if upgrades are enabled or disabled by default. Defaults to true. // This can be overridden on a per-route basis with :ref:`cluster // ` as documented in the - // :ref:`upgrade documentation `. + // :ref:`upgrade documentation `. google.protobuf.BoolValue enabled = 3; } @@ -263,8 +270,8 @@ message HttpConnectionManager { } // A list of individual HTTP filters that make up the filter chain for - // requests made to the connection manager. Order matters as the filters are - // processed sequentially as request events happen. + // requests made to the connection manager. :ref:`Order matters ` + // as the filters are processed sequentially as request events happen. repeated HttpFilter http_filters = 5; // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` @@ -279,13 +286,15 @@ message HttpConnectionManager { // Additional settings for HTTP requests handled by the connection manager. These will be // applicable to both HTTP1 and HTTP2 requests. - config.core.v3.HttpProtocolOptions common_http_protocol_options = 35; + config.core.v3.HttpProtocolOptions common_http_protocol_options = 35 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // Additional HTTP/1 settings that are passed to the HTTP/1 codec. config.core.v3.Http1ProtocolOptions http_protocol_options = 8; // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. - config.core.v3.Http2ProtocolOptions http2_protocol_options = 9; + config.core.v3.Http2ProtocolOptions http2_protocol_options = 9 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // An optional override that the connection manager will write to the server // header in responses. If not set, the default is *envoy*. @@ -322,6 +331,16 @@ message HttpConnectionManager { // is terminated with a 408 Request Timeout error code if no upstream response // header has been received, otherwise a stream reset occurs. // + // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough + // window to write any remaining stream data once the entirety of stream data (local end stream is + // true) has been buffered pending available window. In other words, this timeout defends against + // a peer that does not release enough window to completely write the stream, even though all + // data has been proxied within available flow control windows. If the timeout is hit in this + // case, the :ref:`tx_flush_timeout ` counter will be + // incremented. Note that :ref:`max_stream_duration + // ` does not apply to + // this corner case. + // // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due // to the granularity of events presented to the connection manager. For example, while receiving // very large request headers, it may be the case that there is traffic regularly arriving on the @@ -330,13 +349,15 @@ message HttpConnectionManager { // // A value of 0 will completely disable the connection manager stream idle // timeout, although per-route idle timeout overrides will continue to apply. - google.protobuf.Duration stream_idle_timeout = 24; + google.protobuf.Duration stream_idle_timeout = 24 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The amount of time that Envoy will wait for the entire request to be received. // The timer is activated when the request is initiated, and is disarmed when the last byte of the // request is sent upstream (i.e. all decoding filters have processed the request), OR when the // response is initiated. If not specified or set to 0, this timeout is disabled. - google.protobuf.Duration request_timeout = 28; + google.protobuf.Duration request_timeout = 28 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The time that Envoy will wait between sending an HTTP/2 “shutdown // notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame. @@ -392,7 +413,8 @@ message HttpConnectionManager { // :ref:`config_http_conn_man_headers_x-forwarded-for`, // :ref:`config_http_conn_man_headers_x-envoy-internal`, and // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. - google.protobuf.BoolValue use_remote_address = 14; + google.protobuf.BoolValue use_remote_address = 14 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The number of additional ingress proxy hops from the right side of the // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when @@ -433,6 +455,11 @@ message HttpConnectionManager { // is the current Envoy behaviour. This defaults to false. bool preserve_external_request_id = 32; + // If set, Envoy will always set :ref:`x-request-id ` header in response. + // If this is false or not set, the request ID is returned in responses only if tracing is forced using + // :ref:`x-envoy-force-trace ` header. + bool always_set_request_id_in_response = 37; + // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP // header. ForwardClientCertDetails forward_client_cert_details = 16 @@ -477,17 +504,17 @@ message HttpConnectionManager { // true in the future. When not specified, this value may be overridden by the // runtime variable // :ref:`http_connection_manager.normalize_path`. - // See `Normalization and Comparison ` + // See `Normalization and Comparison `_ // for details of normalization. // Note that Envoy does not perform - // `case normalization ` + // `case normalization `_ google.protobuf.BoolValue normalize_path = 30; // Determines if adjacent slashes in the path are merged into one before any processing of // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without // setting this option, incoming requests with path `//dir///file` will not match against route // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of - // `HTTP spec ` and is provided for convenience. + // `HTTP spec `_ and is provided for convenience. bool merge_slashes = 33; // The configuration of the request ID extension. This includes operations such as @@ -501,6 +528,104 @@ message HttpConnectionManager { // // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. RequestIDExtension request_id_extension = 36; + + // The configuration to customize local reply returned by Envoy. It can customize status code, + // body text and response content type. If not specified, status code and text body are hard + // coded in Envoy, the response content type is plain text. + LocalReplyConfig local_reply_config = 38; + + // Determines if the port part should be removed from host/authority header before any processing + // of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's` + // local port and request method is not CONNECT. This affects the upstream host header as well. + // Without setting this option, incoming requests with host `example:443` will not match against + // route with :ref:`domains` match set to `example`. Defaults to `false`. Note that port removal is not part + // of `HTTP spec `_ and is provided for convenience. + bool strip_matching_host_port = 39; + + // Governs Envoy's behavior when receiving invalid HTTP from downstream. + // If this option is false (default), Envoy will err on the conservative side handling HTTP + // errors, terminating both HTTP/1.1 and HTTP/2 connections when receiving an invalid request. + // If this option is set to true, Envoy will be more permissive, only resetting the invalid + // stream in the case of HTTP/2 and leaving the connection open where possible (if the entire + // request is read for HTTP/1.1) + // In general this should be true for deployments receiving trusted traffic (L2 Envoys, + // company-internal mesh) and false when receiving untrusted traffic (edge deployments). + // + // If different behaviors for invalid_http_message for HTTP/1 and HTTP/2 are + // desired, one *must* use the new HTTP/2 option + // :ref:`override_stream_error_on_invalid_http_message + // ` + // *not* the deprecated but similarly named :ref:`stream_error_on_invalid_http_messaging + // ` + google.protobuf.BoolValue stream_error_on_invalid_http_message = 40; +} + +// The configuration to customize local reply returned by Envoy. +message LocalReplyConfig { + // Configuration of list of mappers which allows to filter and change local response. + // The mappers will be checked by the specified order until one is matched. + repeated ResponseMapper mappers = 1; + + // The configuration to form response body from the :ref:`command operators ` + // and to specify response content type as one of: plain/text or application/json. + // + // Example one: plain/text body_format. + // + // .. code-block:: + // + // text_format: %LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% + // + // The following response body in `plain/text` format will be generated for a request with + // local reply body of "upstream connection error", response_code=503 and path=/foo. + // + // .. code-block:: + // + // upstream connect error:503:path=/foo + // + // Example two: application/json body_format. + // + // .. code-block:: + // + // json_format: + // status: %RESPONSE_CODE% + // message: %LOCAL_REPLY_BODY% + // path: $REQ(:path)% + // + // The following response body in "application/json" format would be generated for a request with + // local reply body of "upstream connection error", response_code=503 and path=/foo. + // + // .. code-block:: json + // + // { + // "status": 503, + // "message": "upstream connection error", + // "path": "/foo" + // } + // + config.core.v3.SubstitutionFormatString body_format = 2; +} + +// The configuration to filter and change local response. +// [#next-free-field: 6] +message ResponseMapper { + // Filter to determine if this mapper should apply. + config.accesslog.v3.AccessLogFilter filter = 1 [(validate.rules).message = {required: true}]; + + // The new response status code if specified. + google.protobuf.UInt32Value status_code = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; + + // The new local reply body text if specified. It will be used in the `%LOCAL_REPLY_BODY%` + // command operator in the `body_format`. + config.core.v3.DataSource body = 3; + + // A per mapper `body_format` to override the :ref:`body_format `. + // It will be used when this mapper is matched. + config.core.v3.SubstitutionFormatString body_format_override = 4; + + // HTTP headers to add to a local reply. This allows the response mapper to append, to add + // or to override headers of any local reply before it is sent to a downstream client. + repeated config.core.v3.HeaderValueOption headers_to_add = 5 + [(validate.rules).repeated = {max_items: 1000}]; } message Rds { @@ -514,7 +639,13 @@ message Rds { // API. This allows an Envoy configuration with multiple HTTP listeners (and // associated HTTP connection manager filters) to use different route // configurations. - string route_config_name = 2 [(validate.rules).string = {min_bytes: 1}]; + string route_config_name = 2 + [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + + // Resource locator for RDS. This is mutually exclusive to *route_config_name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator rds_resource_locator = 3 + [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; } // This message is used to work around the limitations with 'oneof' and repeated fields. @@ -662,6 +793,7 @@ message ScopedRds { [(validate.rules).message = {required: true}]; } +// [#next-free-field: 6] message HttpFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.HttpFilter"; @@ -670,14 +802,20 @@ message HttpFilter { reserved "config"; - // The name of the filter to instantiate. The name must match a - // :ref:`supported filter `. + // The name of the filter configuration. The name is used as a fallback to + // select an extension if the type of the configuration proto is not + // sufficient. It also serves as a resource name in ExtensionConfigDS. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being instantiated. See the supported // filters for further documentation. oneof config_type { google.protobuf.Any typed_config = 4; + + // Configuration source specifier for an extension configuration discovery service. + // In case of a failure and without the default configuration, the HTTP listener responds with 500. + // Extension configs delivered through this mechanism are not expected to require warming (see https://github.com/envoyproxy/envoy/issues/12061). + config.core.v3.ExtensionConfigSource config_discovery = 5; } } diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD index 792ccf7ab6772..837b7b898f265 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD +++ b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD @@ -7,7 +7,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", - "//envoy/config/accesslog/v3:pkg", + "//envoy/config/accesslog/v4alpha:pkg", "//envoy/config/core/v4alpha:pkg", "//envoy/config/route/v4alpha:pkg", "//envoy/config/trace/v4alpha:pkg", @@ -15,5 +15,6 @@ api_proto_package( "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index 975b71cc892f3..042a39863f810 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -2,9 +2,12 @@ syntax = "proto3"; package envoy.extensions.filters.network.http_connection_manager.v4alpha; -import "envoy/config/accesslog/v3/accesslog.proto"; +import "envoy/config/accesslog/v4alpha/accesslog.proto"; +import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/config/core/v4alpha/extension.proto"; import "envoy/config/core/v4alpha/protocol.proto"; +import "envoy/config/core/v4alpha/substitution_format_string.proto"; import "envoy/config/route/v4alpha/route.proto"; import "envoy/config/route/v4alpha/scoped_route.proto"; import "envoy/config/trace/v4alpha/http_tracer.proto"; @@ -16,7 +19,10 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/resource_locator.proto"; + import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -30,7 +36,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // HTTP connection manager :ref:`configuration overview `. // [#extension: envoy.filters.network.http_connection_manager] -// [#next-free-field: 37] +// [#next-free-field: 41] message HttpConnectionManager { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager"; @@ -231,7 +237,7 @@ message HttpConnectionManager { // Determines if upgrades are enabled or disabled by default. Defaults to true. // This can be overridden on a per-route basis with :ref:`cluster // ` as documented in the - // :ref:`upgrade documentation `. + // :ref:`upgrade documentation `. google.protobuf.BoolValue enabled = 3; } @@ -263,8 +269,8 @@ message HttpConnectionManager { } // A list of individual HTTP filters that make up the filter chain for - // requests made to the connection manager. Order matters as the filters are - // processed sequentially as request events happen. + // requests made to the connection manager. :ref:`Order matters ` + // as the filters are processed sequentially as request events happen. repeated HttpFilter http_filters = 5; // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` @@ -279,13 +285,15 @@ message HttpConnectionManager { // Additional settings for HTTP requests handled by the connection manager. These will be // applicable to both HTTP1 and HTTP2 requests. - config.core.v4alpha.HttpProtocolOptions common_http_protocol_options = 35; + config.core.v4alpha.HttpProtocolOptions common_http_protocol_options = 35 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // Additional HTTP/1 settings that are passed to the HTTP/1 codec. config.core.v4alpha.Http1ProtocolOptions http_protocol_options = 8; // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. - config.core.v4alpha.Http2ProtocolOptions http2_protocol_options = 9; + config.core.v4alpha.Http2ProtocolOptions http2_protocol_options = 9 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // An optional override that the connection manager will write to the server // header in responses. If not set, the default is *envoy*. @@ -322,6 +330,16 @@ message HttpConnectionManager { // is terminated with a 408 Request Timeout error code if no upstream response // header has been received, otherwise a stream reset occurs. // + // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough + // window to write any remaining stream data once the entirety of stream data (local end stream is + // true) has been buffered pending available window. In other words, this timeout defends against + // a peer that does not release enough window to completely write the stream, even though all + // data has been proxied within available flow control windows. If the timeout is hit in this + // case, the :ref:`tx_flush_timeout ` counter will be + // incremented. Note that :ref:`max_stream_duration + // ` does not apply to + // this corner case. + // // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due // to the granularity of events presented to the connection manager. For example, while receiving // very large request headers, it may be the case that there is traffic regularly arriving on the @@ -330,13 +348,15 @@ message HttpConnectionManager { // // A value of 0 will completely disable the connection manager stream idle // timeout, although per-route idle timeout overrides will continue to apply. - google.protobuf.Duration stream_idle_timeout = 24; + google.protobuf.Duration stream_idle_timeout = 24 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The amount of time that Envoy will wait for the entire request to be received. // The timer is activated when the request is initiated, and is disarmed when the last byte of the // request is sent upstream (i.e. all decoding filters have processed the request), OR when the // response is initiated. If not specified or set to 0, this timeout is disabled. - google.protobuf.Duration request_timeout = 28; + google.protobuf.Duration request_timeout = 28 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The time that Envoy will wait between sending an HTTP/2 “shutdown // notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame. @@ -383,7 +403,7 @@ message HttpConnectionManager { // Configuration for :ref:`HTTP access logs ` // emitted by the connection manager. - repeated config.accesslog.v3.AccessLog access_log = 13; + repeated config.accesslog.v4alpha.AccessLog access_log = 13; // If set to true, the connection manager will use the real remote address // of the client connection when determining internal versus external origin and manipulating @@ -392,7 +412,8 @@ message HttpConnectionManager { // :ref:`config_http_conn_man_headers_x-forwarded-for`, // :ref:`config_http_conn_man_headers_x-envoy-internal`, and // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. - google.protobuf.BoolValue use_remote_address = 14; + google.protobuf.BoolValue use_remote_address = 14 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The number of additional ingress proxy hops from the right side of the // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when @@ -433,6 +454,11 @@ message HttpConnectionManager { // is the current Envoy behaviour. This defaults to false. bool preserve_external_request_id = 32; + // If set, Envoy will always set :ref:`x-request-id ` header in response. + // If this is false or not set, the request ID is returned in responses only if tracing is forced using + // :ref:`x-envoy-force-trace ` header. + bool always_set_request_id_in_response = 37; + // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP // header. ForwardClientCertDetails forward_client_cert_details = 16 @@ -477,17 +503,17 @@ message HttpConnectionManager { // true in the future. When not specified, this value may be overridden by the // runtime variable // :ref:`http_connection_manager.normalize_path`. - // See `Normalization and Comparison ` + // See `Normalization and Comparison `_ // for details of normalization. // Note that Envoy does not perform - // `case normalization ` + // `case normalization `_ google.protobuf.BoolValue normalize_path = 30; // Determines if adjacent slashes in the path are merged into one before any processing of // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without // setting this option, incoming requests with path `//dir///file` will not match against route // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of - // `HTTP spec ` and is provided for convenience. + // `HTTP spec `_ and is provided for convenience. bool merge_slashes = 33; // The configuration of the request ID extension. This includes operations such as @@ -501,6 +527,110 @@ message HttpConnectionManager { // // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. RequestIDExtension request_id_extension = 36; + + // The configuration to customize local reply returned by Envoy. It can customize status code, + // body text and response content type. If not specified, status code and text body are hard + // coded in Envoy, the response content type is plain text. + LocalReplyConfig local_reply_config = 38; + + // Determines if the port part should be removed from host/authority header before any processing + // of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's` + // local port and request method is not CONNECT. This affects the upstream host header as well. + // Without setting this option, incoming requests with host `example:443` will not match against + // route with :ref:`domains` match set to `example`. Defaults to `false`. Note that port removal is not part + // of `HTTP spec `_ and is provided for convenience. + bool strip_matching_host_port = 39; + + // Governs Envoy's behavior when receiving invalid HTTP from downstream. + // If this option is false (default), Envoy will err on the conservative side handling HTTP + // errors, terminating both HTTP/1.1 and HTTP/2 connections when receiving an invalid request. + // If this option is set to true, Envoy will be more permissive, only resetting the invalid + // stream in the case of HTTP/2 and leaving the connection open where possible (if the entire + // request is read for HTTP/1.1) + // In general this should be true for deployments receiving trusted traffic (L2 Envoys, + // company-internal mesh) and false when receiving untrusted traffic (edge deployments). + // + // If different behaviors for invalid_http_message for HTTP/1 and HTTP/2 are + // desired, one *must* use the new HTTP/2 option + // :ref:`override_stream_error_on_invalid_http_message + // ` + // *not* the deprecated but similarly named :ref:`stream_error_on_invalid_http_messaging + // ` + google.protobuf.BoolValue stream_error_on_invalid_http_message = 40; +} + +// The configuration to customize local reply returned by Envoy. +message LocalReplyConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.LocalReplyConfig"; + + // Configuration of list of mappers which allows to filter and change local response. + // The mappers will be checked by the specified order until one is matched. + repeated ResponseMapper mappers = 1; + + // The configuration to form response body from the :ref:`command operators ` + // and to specify response content type as one of: plain/text or application/json. + // + // Example one: plain/text body_format. + // + // .. code-block:: + // + // text_format: %LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% + // + // The following response body in `plain/text` format will be generated for a request with + // local reply body of "upstream connection error", response_code=503 and path=/foo. + // + // .. code-block:: + // + // upstream connect error:503:path=/foo + // + // Example two: application/json body_format. + // + // .. code-block:: + // + // json_format: + // status: %RESPONSE_CODE% + // message: %LOCAL_REPLY_BODY% + // path: $REQ(:path)% + // + // The following response body in "application/json" format would be generated for a request with + // local reply body of "upstream connection error", response_code=503 and path=/foo. + // + // .. code-block:: json + // + // { + // "status": 503, + // "message": "upstream connection error", + // "path": "/foo" + // } + // + config.core.v4alpha.SubstitutionFormatString body_format = 2; +} + +// The configuration to filter and change local response. +// [#next-free-field: 6] +message ResponseMapper { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper"; + + // Filter to determine if this mapper should apply. + config.accesslog.v4alpha.AccessLogFilter filter = 1 [(validate.rules).message = {required: true}]; + + // The new response status code if specified. + google.protobuf.UInt32Value status_code = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; + + // The new local reply body text if specified. It will be used in the `%LOCAL_REPLY_BODY%` + // command operator in the `body_format`. + config.core.v4alpha.DataSource body = 3; + + // A per mapper `body_format` to override the :ref:`body_format `. + // It will be used when this mapper is matched. + config.core.v4alpha.SubstitutionFormatString body_format_override = 4; + + // HTTP headers to add to a local reply. This allows the response mapper to append, to add + // or to override headers of any local reply before it is sent to a downstream client. + repeated config.core.v4alpha.HeaderValueOption headers_to_add = 5 + [(validate.rules).repeated = {max_items: 1000}]; } message Rds { @@ -510,11 +640,17 @@ message Rds { // Configuration source specifier for RDS. config.core.v4alpha.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; - // The name of the route configuration. This name will be passed to the RDS - // API. This allows an Envoy configuration with multiple HTTP listeners (and - // associated HTTP connection manager filters) to use different route - // configurations. - string route_config_name = 2 [(validate.rules).string = {min_bytes: 1}]; + oneof name_specifier { + // The name of the route configuration. This name will be passed to the RDS + // API. This allows an Envoy configuration with multiple HTTP listeners (and + // associated HTTP connection manager filters) to use different route + // configurations. + string route_config_name = 2; + + // Resource locator for RDS. This is mutually exclusive to *route_config_name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator rds_resource_locator = 3; + } } // This message is used to work around the limitations with 'oneof' and repeated fields. @@ -663,6 +799,7 @@ message ScopedRds { [(validate.rules).message = {required: true}]; } +// [#next-free-field: 6] message HttpFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter"; @@ -671,14 +808,20 @@ message HttpFilter { reserved "config"; - // The name of the filter to instantiate. The name must match a - // :ref:`supported filter `. + // The name of the filter configuration. The name is used as a fallback to + // select an extension if the type of the configuration proto is not + // sufficient. It also serves as a resource name in ExtensionConfigDS. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being instantiated. See the supported // filters for further documentation. oneof config_type { google.protobuf.Any typed_config = 4; + + // Configuration source specifier for an extension configuration discovery service. + // In case of a failure and without the default configuration, the HTTP listener responds with 500. + // Extension configs delivered through this mechanism are not expected to require warming (see https://github.com/envoyproxy/envoy/issues/12061). + config.core.v4alpha.ExtensionConfigSource config_discovery = 5; } } diff --git a/api/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto b/api/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto index 61f3ec45c8838..aa8e0f5941bf8 100644 --- a/api/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto +++ b/api/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.extensions.filters.network.postgres_proxy.v3alpha; +import "google/protobuf/wrappers.proto"; + import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -22,4 +24,9 @@ message PostgresProxy { // The human readable prefix to use when emitting :ref:`statistics // `. string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; + + // Controls whether SQL statements received in Frontend Query messages + // are parsed. Parsing is required to produce Postgres proxy filter + // metadata. Defaults to true. + google.protobuf.BoolValue enable_sql_parsing = 2; } diff --git a/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto b/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto index a3341b5ac6067..af69d33a63401 100644 --- a/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto +++ b/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto @@ -8,6 +8,7 @@ import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -22,7 +23,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Redis Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.redis_proxy] -// [#next-free-field: 7] +// [#next-free-field: 8] message RedisProxy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.redis_proxy.v2.RedisProxy"; @@ -38,22 +39,22 @@ message RedisProxy { // because replication is asynchronous and requires some delay. You need to ensure that your // application can tolerate stale data. enum ReadPolicy { - // Default mode. Read from the current master node. - MASTER = 0; + // Default mode. Read from the current primary node. + MASTER = 0 [(udpa.annotations.enum_value_migrate).rename = "PRIMARY"]; - // Read from the master, but if it is unavailable, read from replica nodes. - PREFER_MASTER = 1; + // Read from the primary, but if it is unavailable, read from replica nodes. + PREFER_MASTER = 1 [(udpa.annotations.enum_value_migrate).rename = "PREFER_PRIMARY"]; // Read from replica nodes. If multiple replica nodes are present within a shard, a random // node is selected. Healthy nodes have precedent over unhealthy nodes. REPLICA = 2; // Read from the replica nodes (similar to REPLICA), but if all replicas are unavailable (not - // present or unhealthy), read from the master. + // present or unhealthy), read from the primary. PREFER_REPLICA = 3; - // Read from any node of the cluster. A random node is selected among the master and replicas, - // healthy nodes have precedent over unhealthy nodes. + // Read from any node of the cluster. A random node is selected among the primary and + // replicas, healthy nodes have precedent over unhealthy nodes. ANY = 4; } @@ -113,10 +114,10 @@ message RedisProxy { google.protobuf.UInt32Value max_upstream_unknown_connections = 6; // Enable per-command statistics per upstream cluster, in addition to the filter level aggregate - // count. + // count. These commands are measured in microseconds. bool enable_command_stats = 8; - // Read policy. The default is to read from the master. + // Read policy. The default is to read from the primary. ReadPolicy read_policy = 7 [(validate.rules).enum = {defined_only: true}]; } @@ -193,7 +194,7 @@ message RedisProxy { ConnPoolSettings settings = 3 [(validate.rules).message = {required: true}]; // Indicates that latency stat should be computed in microseconds. By default it is computed in - // milliseconds. + // milliseconds. This does not apply to upstream command stats currently. bool latency_in_micros = 4; // List of **unique** prefixes used to separate keys from different workloads to different @@ -234,6 +235,18 @@ message RedisProxy { // client. If an AUTH command is received when the password is not set, then an "ERR Client sent // AUTH, but no password is set" error will be returned. config.core.v3.DataSource downstream_auth_password = 6 [(udpa.annotations.sensitive) = true]; + + // If a username is provided an ACL style AUTH command will be required with a username and password. + // Authenticate Redis client connections locally by forcing downstream clients to issue a `Redis + // AUTH command `_ with this username and the *downstream_auth_password* + // before enabling any other command. If an AUTH command's username and password matches this username + // and the *downstream_auth_password* , an "OK" response will be returned to the client. If the AUTH + // command username or password does not match this username or the *downstream_auth_password*, then an + // "WRONGPASS invalid username-password pair" error will be returned. If any other command is received before AUTH when this + // password is set, then a "NOAUTH Authentication required." error response will be sent to the + // client. If an AUTH command is received when the password is not set, then an "ERR Client sent + // AUTH, but no ACL is set" error will be returned. + config.core.v3.DataSource downstream_auth_username = 7 [(udpa.annotations.sensitive) = true]; } // RedisProtocolOptions specifies Redis upstream protocol options. This object is used in @@ -246,4 +259,8 @@ message RedisProtocolOptions { // Upstream server password as defined by the `requirepass` directive // `_ in the server's configuration file. config.core.v3.DataSource auth_password = 1 [(udpa.annotations.sensitive) = true]; + + // Upstream server username as defined by the `user` directive + // `_ in the server's configuration file. + config.core.v3.DataSource auth_username = 2 [(udpa.annotations.sensitive) = true]; } diff --git a/api/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD b/api/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD new file mode 100644 index 0000000000000..e6bc5699efc45 --- /dev/null +++ b/api/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v3:pkg", + "//envoy/config/route/v3:pkg", + "//envoy/type/matcher/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/network/rocketmq_proxy/v3/README.md b/api/envoy/extensions/filters/network/rocketmq_proxy/v3/README.md new file mode 100644 index 0000000000000..3bd849bc25303 --- /dev/null +++ b/api/envoy/extensions/filters/network/rocketmq_proxy/v3/README.md @@ -0,0 +1 @@ +Protocol buffer definitions for the Rocketmq proxy. \ No newline at end of file diff --git a/api/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto b/api/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto new file mode 100644 index 0000000000000..ee77ab9095924 --- /dev/null +++ b/api/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.rocketmq_proxy.v3; + +import "envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v3"; +option java_outer_classname = "RocketmqProxyProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: RocketMQ Proxy] +// RocketMQ Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.rocketmq_proxy] + +message RocketmqProxy { + // The human readable prefix to use when emitting statistics. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The route table for the connection manager is specified in this property. + RouteConfiguration route_config = 2; + + // The largest duration transient object expected to live, more than 10s is recommended. + google.protobuf.Duration transient_object_life_span = 3; + + // If develop_mode is enabled, this proxy plugin may work without dedicated traffic intercepting + // facility without considering backward compatibility of exiting RocketMQ client SDK. + bool develop_mode = 4; +} diff --git a/api/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto b/api/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto new file mode 100644 index 0000000000000..5fe5d33ffacf4 --- /dev/null +++ b/api/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto @@ -0,0 +1,55 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.rocketmq_proxy.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/config/route/v3/route_components.proto"; +import "envoy/type/matcher/v3/string.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v3"; +option java_outer_classname = "RouteProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Rocketmq Proxy Route Configuration] +// Rocketmq Proxy :ref:`configuration overview `. + +message RouteConfiguration { + // The name of the route configuration. + string name = 1; + + // The list of routes that will be matched, in order, against incoming requests. The first route + // that matches will be used. + repeated Route routes = 2; +} + +message Route { + // Route matching parameters. + RouteMatch match = 1 [(validate.rules).message = {required: true}]; + + // Route request to some upstream cluster. + RouteAction route = 2 [(validate.rules).message = {required: true}]; +} + +message RouteMatch { + // The name of the topic. + type.matcher.v3.StringMatcher topic = 1 [(validate.rules).message = {required: true}]; + + // Specifies a set of headers that the route should match on. The router will check the request’s + // headers against all the specified headers in the route config. A match will happen if all the + // headers in the route are present in the request with the same values (or based on presence if + // the value field is not in the config). + repeated config.route.v3.HeaderMatcher headers = 2; +} + +message RouteAction { + // Indicates the upstream cluster to which the request should be routed. + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Optional endpoint metadata match criteria used by the subset load balancer. + config.core.v3.Metadata metadata_match = 2; +} diff --git a/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/BUILD b/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/BUILD new file mode 100644 index 0000000000000..d8d88f7f3bb4f --- /dev/null +++ b/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/route/v4alpha:pkg", + "//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto b/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto new file mode 100644 index 0000000000000..a765734e66db5 --- /dev/null +++ b/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto @@ -0,0 +1,39 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.rocketmq_proxy.v4alpha; + +import "envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v4alpha"; +option java_outer_classname = "RocketmqProxyProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: RocketMQ Proxy] +// RocketMQ Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.rocketmq_proxy] + +message RocketmqProxy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.rocketmq_proxy.v3.RocketmqProxy"; + + // The human readable prefix to use when emitting statistics. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The route table for the connection manager is specified in this property. + RouteConfiguration route_config = 2; + + // The largest duration transient object expected to live, more than 10s is recommended. + google.protobuf.Duration transient_object_life_span = 3; + + // If develop_mode is enabled, this proxy plugin may work without dedicated traffic intercepting + // facility without considering backward compatibility of exiting RocketMQ client SDK. + bool develop_mode = 4; +} diff --git a/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto b/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto new file mode 100644 index 0000000000000..995e8bcb05e36 --- /dev/null +++ b/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto @@ -0,0 +1,67 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.rocketmq_proxy.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/route/v4alpha/route_components.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v4alpha"; +option java_outer_classname = "RouteProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Rocketmq Proxy Route Configuration] +// Rocketmq Proxy :ref:`configuration overview `. + +message RouteConfiguration { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.rocketmq_proxy.v3.RouteConfiguration"; + + // The name of the route configuration. + string name = 1; + + // The list of routes that will be matched, in order, against incoming requests. The first route + // that matches will be used. + repeated Route routes = 2; +} + +message Route { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.rocketmq_proxy.v3.Route"; + + // Route matching parameters. + RouteMatch match = 1 [(validate.rules).message = {required: true}]; + + // Route request to some upstream cluster. + RouteAction route = 2 [(validate.rules).message = {required: true}]; +} + +message RouteMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.rocketmq_proxy.v3.RouteMatch"; + + // The name of the topic. + type.matcher.v4alpha.StringMatcher topic = 1 [(validate.rules).message = {required: true}]; + + // Specifies a set of headers that the route should match on. The router will check the request’s + // headers against all the specified headers in the route config. A match will happen if all the + // headers in the route are present in the request with the same values (or based on presence if + // the value field is not in the config). + repeated config.route.v4alpha.HeaderMatcher headers = 2; +} + +message RouteAction { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.rocketmq_proxy.v3.RouteAction"; + + // Indicates the upstream cluster to which the request should be routed. + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Optional endpoint metadata match criteria used by the subset load balancer. + config.core.v4alpha.Metadata metadata_match = 2; +} diff --git a/api/envoy/extensions/filters/network/tcp_proxy/v4alpha/BUILD b/api/envoy/extensions/filters/network/tcp_proxy/v4alpha/BUILD new file mode 100644 index 0000000000000..3825be9a8afc9 --- /dev/null +++ b/api/envoy/extensions/filters/network/tcp_proxy/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/accesslog/v4alpha:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/extensions/filters/network/tcp_proxy/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto b/api/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto new file mode 100644 index 0000000000000..1857f2abcd4e9 --- /dev/null +++ b/api/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto @@ -0,0 +1,137 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.tcp_proxy.v4alpha; + +import "envoy/config/accesslog/v4alpha/accesslog.proto"; +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/type/v3/hash_policy.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.tcp_proxy.v4alpha"; +option java_outer_classname = "TcpProxyProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: TCP Proxy] +// TCP Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.tcp_proxy] + +// [#next-free-field: 13] +message TcpProxy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy"; + + // Allows for specification of multiple upstream clusters along with weights + // that indicate the percentage of traffic to be forwarded to each cluster. + // The router selects an upstream cluster based on these weights. + message WeightedCluster { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.WeightedCluster"; + + message ClusterWeight { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.WeightedCluster.ClusterWeight"; + + // Name of the upstream cluster. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // When a request matches the route, the choice of an upstream cluster is + // determined by its weight. The sum of weights across all entries in the + // clusters array determines the total weight. + uint32 weight = 2 [(validate.rules).uint32 = {gte: 1}]; + + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints + // in the upstream cluster with metadata matching what is set in this field will be considered + // for load balancing. Note that this will be merged with what's provided in + // :ref:`TcpProxy.metadata_match + // `, with values + // here taking precedence. The filter name should be specified as *envoy.lb*. + config.core.v4alpha.Metadata metadata_match = 3; + } + + // Specifies one or more upstream clusters associated with the route. + repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; + } + + // Configuration for tunneling TCP over other transports or application layers. + // Currently, only HTTP/2 is supported. When other options exist, HTTP/2 will + // remain the default. + message TunnelingConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.TunnelingConfig"; + + // The hostname to send in the synthesized CONNECT headers to the upstream proxy. + string hostname = 1 [(validate.rules).string = {min_bytes: 1}]; + } + + reserved 6; + + reserved "deprecated_v1"; + + // The prefix to use when emitting :ref:`statistics + // `. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + oneof cluster_specifier { + option (validate.required) = true; + + // The upstream cluster to connect to. + string cluster = 2; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. + WeightedCluster weighted_clusters = 10; + } + + // Optional endpoint metadata match criteria. Only endpoints in the upstream + // cluster with metadata matching that set in metadata_match will be + // considered. The filter name should be specified as *envoy.lb*. + config.core.v4alpha.Metadata metadata_match = 9; + + // The idle timeout for connections managed by the TCP proxy filter. The idle timeout + // is defined as the period in which there are no bytes sent or received on either + // the upstream or downstream connection. If not set, the default idle timeout is 1 hour. If set + // to 0s, the timeout will be disabled. + // + // .. warning:: + // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP + // FIN packets, etc. + google.protobuf.Duration idle_timeout = 8; + + // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy + // filter. The idle timeout is defined as the period in which there is no + // active traffic. If not set, there is no idle timeout. When the idle timeout + // is reached the connection will be closed. The distinction between + // downstream_idle_timeout/upstream_idle_timeout provides a means to set + // timeout based on the last byte sent on the downstream/upstream connection. + google.protobuf.Duration downstream_idle_timeout = 3; + + // [#not-implemented-hide:] + google.protobuf.Duration upstream_idle_timeout = 4; + + // Configuration for :ref:`access logs ` + // emitted by the this tcp_proxy. + repeated config.accesslog.v4alpha.AccessLog access_log = 5; + + // The maximum number of unsuccessful connection attempts that will be made before + // giving up. If the parameter is not specified, 1 connection attempt will be made. + google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32 = {gte: 1}]; + + // Optional configuration for TCP proxy hash policy. If hash_policy is not set, the hash-based + // load balancing algorithms will select a host randomly. Currently the number of hash policies is + // limited to 1. + repeated type.v3.HashPolicy hash_policy = 11 [(validate.rules).repeated = {max_items: 1}]; + + // [#not-implemented-hide:] feature in progress + // If set, this configures tunneling, e.g. configuration options to tunnel multiple TCP + // payloads over a shared HTTP/2 tunnel. If this message is absent, the payload + // will be proxied upstream as per usual. + TunnelingConfig tunneling_config = 12; +} diff --git a/api/envoy/extensions/filters/network/thrift_proxy/v3/route.proto b/api/envoy/extensions/filters/network/thrift_proxy/v3/route.proto index 5ce18fd06233a..b7afc4f0b8037 100644 --- a/api/envoy/extensions/filters/network/thrift_proxy/v3/route.proto +++ b/api/envoy/extensions/filters/network/thrift_proxy/v3/route.proto @@ -103,7 +103,9 @@ message RouteAction { // header is not found or the referenced cluster does not exist Envoy will // respond with an unknown method exception or an internal error exception, // respectively. - string cluster_header = 6 [(validate.rules).string = {min_bytes: 1}]; + string cluster_header = 6 [ + (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_VALUE strict: false} + ]; } // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in diff --git a/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/BUILD b/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/BUILD new file mode 100644 index 0000000000000..9ec74c0a9b83a --- /dev/null +++ b/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/route/v4alpha:pkg", + "//envoy/extensions/filters/network/thrift_proxy/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto b/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto new file mode 100644 index 0000000000000..374cc131ddf83 --- /dev/null +++ b/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto @@ -0,0 +1,159 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.thrift_proxy.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/route/v4alpha/route_components.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v4alpha"; +option java_outer_classname = "RouteProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Thrift Proxy Route Configuration] +// Thrift Proxy :ref:`configuration overview `. + +message RouteConfiguration { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.RouteConfiguration"; + + // The name of the route configuration. Reserved for future use in asynchronous route discovery. + string name = 1; + + // The list of routes that will be matched, in order, against incoming requests. The first route + // that matches will be used. + repeated Route routes = 2; +} + +message Route { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.Route"; + + // Route matching parameters. + RouteMatch match = 1 [(validate.rules).message = {required: true}]; + + // Route request to some upstream cluster. + RouteAction route = 2 [(validate.rules).message = {required: true}]; +} + +message RouteMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.RouteMatch"; + + oneof match_specifier { + option (validate.required) = true; + + // If specified, the route must exactly match the request method name. As a special case, an + // empty string matches any request method name. + string method_name = 1; + + // If specified, the route must have the service name as the request method name prefix. As a + // special case, an empty string matches any service name. Only relevant when service + // multiplexing. + string service_name = 2; + } + + // Inverts whatever matching is done in the :ref:`method_name + // ` or + // :ref:`service_name + // ` fields. + // Cannot be combined with wildcard matching as that would result in routes never being matched. + // + // .. note:: + // + // This does not invert matching done as part of the :ref:`headers field + // ` field. To + // invert header matching, see :ref:`invert_match + // `. + bool invert = 3; + + // Specifies a set of headers that the route should match on. The router will check the request’s + // headers against all the specified headers in the route config. A match will happen if all the + // headers in the route are present in the request with the same values (or based on presence if + // the value field is not in the config). Note that this only applies for Thrift transports and/or + // protocols that support headers. + repeated config.route.v4alpha.HeaderMatcher headers = 4; +} + +// [#next-free-field: 7] +message RouteAction { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.RouteAction"; + + oneof cluster_specifier { + option (validate.required) = true; + + // Indicates a single upstream cluster to which the request should be routed + // to. + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. + WeightedCluster weighted_clusters = 2; + + // Envoy will determine the cluster to route to by reading the value of the + // Thrift header named by cluster_header from the request headers. If the + // header is not found or the referenced cluster does not exist Envoy will + // respond with an unknown method exception or an internal error exception, + // respectively. + string cluster_header = 6 [ + (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_VALUE strict: false} + ]; + } + + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in + // the upstream cluster with metadata matching what is set in this field will be considered. + // Note that this will be merged with what's provided in :ref:`WeightedCluster.metadata_match + // `, + // with values there taking precedence. Keys and values should be provided under the "envoy.lb" + // metadata key. + config.core.v4alpha.Metadata metadata_match = 3; + + // Specifies a set of rate limit configurations that could be applied to the route. + // N.B. Thrift service or method name matching can be achieved by specifying a RequestHeaders + // action with the header name ":method-name". + repeated config.route.v4alpha.RateLimit rate_limits = 4; + + // Strip the service prefix from the method name, if there's a prefix. For + // example, the method call Service:method would end up being just method. + bool strip_service_name = 5; +} + +// Allows for specification of multiple upstream clusters along with weights that indicate the +// percentage of traffic to be forwarded to each cluster. The router selects an upstream cluster +// based on these weights. +message WeightedCluster { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.WeightedCluster"; + + message ClusterWeight { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.WeightedCluster.ClusterWeight"; + + // Name of the upstream cluster. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // When a request matches the route, the choice of an upstream cluster is determined by its + // weight. The sum of weights across all entries in the clusters array determines the total + // weight. + google.protobuf.UInt32Value weight = 2 [(validate.rules).uint32 = {gte: 1}]; + + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in + // the upstream cluster with metadata matching what is set in this field, combined with what's + // provided in :ref:`RouteAction's metadata_match + // `, + // will be considered. Values here will take precedence. Keys and values should be provided + // under the "envoy.lb" metadata key. + config.core.v4alpha.Metadata metadata_match = 3; + } + + // Specifies one or more upstream clusters associated with the route. + repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; +} diff --git a/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto b/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto new file mode 100644 index 0000000000000..6bf055da3ce65 --- /dev/null +++ b/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto @@ -0,0 +1,130 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.thrift_proxy.v4alpha; + +import "envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v4alpha"; +option java_outer_classname = "ThriftProxyProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Thrift Proxy] +// Thrift Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.thrift_proxy] + +// Thrift transport types supported by Envoy. +enum TransportType { + // For downstream connections, the Thrift proxy will attempt to determine which transport to use. + // For upstream connections, the Thrift proxy will use same transport as the downstream + // connection. + AUTO_TRANSPORT = 0; + + // The Thrift proxy will use the Thrift framed transport. + FRAMED = 1; + + // The Thrift proxy will use the Thrift unframed transport. + UNFRAMED = 2; + + // The Thrift proxy will assume the client is using the Thrift header transport. + HEADER = 3; +} + +// Thrift Protocol types supported by Envoy. +enum ProtocolType { + // For downstream connections, the Thrift proxy will attempt to determine which protocol to use. + // Note that the older, non-strict (or lax) binary protocol is not included in automatic protocol + // detection. For upstream connections, the Thrift proxy will use the same protocol as the + // downstream connection. + AUTO_PROTOCOL = 0; + + // The Thrift proxy will use the Thrift binary protocol. + BINARY = 1; + + // The Thrift proxy will use Thrift non-strict binary protocol. + LAX_BINARY = 2; + + // The Thrift proxy will use the Thrift compact protocol. + COMPACT = 3; + + // The Thrift proxy will use the Thrift "Twitter" protocol implemented by the finagle library. + TWITTER = 4; +} + +// [#next-free-field: 6] +message ThriftProxy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy"; + + // Supplies the type of transport that the Thrift proxy should use. Defaults to + // :ref:`AUTO_TRANSPORT`. + TransportType transport = 2 [(validate.rules).enum = {defined_only: true}]; + + // Supplies the type of protocol that the Thrift proxy should use. Defaults to + // :ref:`AUTO_PROTOCOL`. + ProtocolType protocol = 3 [(validate.rules).enum = {defined_only: true}]; + + // The human readable prefix to use when emitting statistics. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The route table for the connection manager is static and is specified in this property. + RouteConfiguration route_config = 4; + + // A list of individual Thrift filters that make up the filter chain for requests made to the + // Thrift proxy. Order matters as the filters are processed sequentially. For backwards + // compatibility, if no thrift_filters are specified, a default Thrift router filter + // (`envoy.filters.thrift.router`) is used. + repeated ThriftFilter thrift_filters = 5; +} + +// ThriftFilter configures a Thrift filter. +message ThriftFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.ThriftFilter"; + + reserved 2; + + reserved "config"; + + // The name of the filter to instantiate. The name must match a supported + // filter. The built-in filters are: + // + // [#comment:TODO(zuercher): Auto generate the following list] + // * :ref:`envoy.filters.thrift.router ` + // * :ref:`envoy.filters.thrift.rate_limit ` + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Filter specific configuration which depends on the filter being instantiated. See the supported + // filters for further documentation. + oneof config_type { + google.protobuf.Any typed_config = 3; + } +} + +// ThriftProtocolOptions specifies Thrift upstream protocol options. This object is used in +// in +// :ref:`typed_extension_protocol_options`, +// keyed by the name `envoy.filters.network.thrift_proxy`. +message ThriftProtocolOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.ThriftProtocolOptions"; + + // Supplies the type of transport that the Thrift proxy should use for upstream connections. + // Selecting + // :ref:`AUTO_TRANSPORT`, + // which is the default, causes the proxy to use the same transport as the downstream connection. + TransportType transport = 1 [(validate.rules).enum = {defined_only: true}]; + + // Supplies the type of protocol that the Thrift proxy should use for upstream connections. + // Selecting + // :ref:`AUTO_PROTOCOL`, + // which is the default, causes the proxy to use the same protocol as the downstream connection. + ProtocolType protocol = 2 [(validate.rules).enum = {defined_only: true}]; +} diff --git a/api/envoy/extensions/filters/network/wasm/v3/BUILD b/api/envoy/extensions/filters/network/wasm/v3/BUILD new file mode 100644 index 0000000000000..8bad369e35113 --- /dev/null +++ b/api/envoy/extensions/filters/network/wasm/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/extensions/wasm/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/network/wasm/v3/wasm.proto b/api/envoy/extensions/filters/network/wasm/v3/wasm.proto new file mode 100644 index 0000000000000..131582762b590 --- /dev/null +++ b/api/envoy/extensions/filters/network/wasm/v3/wasm.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.wasm.v3; + +import "envoy/extensions/wasm/v3/wasm.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.wasm.v3"; +option java_outer_classname = "WasmProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [[#not-implemented-hide:] +message Wasm { + // General Plugin configuration. + envoy.extensions.wasm.v3.PluginConfig config = 1; +} diff --git a/api/envoy/extensions/filter/udp/dns_filter/v3alpha/BUILD b/api/envoy/extensions/filters/udp/dns_filter/v3alpha/BUILD similarity index 84% rename from api/envoy/extensions/filter/udp/dns_filter/v3alpha/BUILD rename to api/envoy/extensions/filters/udp/dns_filter/v3alpha/BUILD index d011b4d830ad4..dbf0a33e662e9 100644 --- a/api/envoy/extensions/filter/udp/dns_filter/v3alpha/BUILD +++ b/api/envoy/extensions/filters/udp/dns_filter/v3alpha/BUILD @@ -7,7 +7,6 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", - "//envoy/config/filter/udp/dns_filter/v2alpha:pkg", "//envoy/data/dns/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/api/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto b/api/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto new file mode 100644 index 0000000000000..32103540c1d2b --- /dev/null +++ b/api/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto @@ -0,0 +1,76 @@ +syntax = "proto3"; + +package envoy.extensions.filters.udp.dns_filter.v3alpha; + +import "envoy/config/core/v3/address.proto"; +import "envoy/config/core/v3/base.proto"; +import "envoy/data/dns/v3/dns_table.proto"; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.udp.dns_filter.v3alpha"; +option java_outer_classname = "DnsFilterProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: DNS Filter] +// DNS Filter :ref:`configuration overview `. +// [#extension: envoy.filters.udp_listener.dns_filter] + +// Configuration for the DNS filter. +message DnsFilterConfig { + // This message contains the configuration for the DNS Filter operating + // in a server context. This message will contain the virtual hosts and + // associated addresses with which Envoy will respond to queries + message ServerContextConfig { + oneof config_source { + option (validate.required) = true; + + // Load the configuration specified from the control plane + data.dns.v3.DnsTable inline_dns_table = 1; + + // Seed the filter configuration from an external path. This source + // is a yaml formatted file that contains the DnsTable driving Envoy's + // responses to DNS queries + config.core.v3.DataSource external_dns_table = 2; + } + } + + // This message contains the configuration for the DNS Filter operating + // in a client context. This message will contain the timeouts, retry, + // and forwarding configuration for Envoy to make DNS requests to other + // resolvers + message ClientContextConfig { + // Sets the maximum time we will wait for the upstream query to complete + // We allow 5s for the upstream resolution to complete, so the minimum + // value here is 1. Note that the total latency for a failed query is the + // number of retries multiplied by the resolver_timeout. + google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 1}}]; + + // A list of DNS servers to which we can forward queries. If not + // specified, Envoy will use the ambient DNS resolvers in the + // system. + repeated config.core.v3.Address upstream_resolvers = 2; + + // Controls how many outstanding external lookup contexts the filter tracks. + // The context structure allows the filter to respond to every query even if the external + // resolution times out or is otherwise unsuccessful + uint64 max_pending_lookups = 3 [(validate.rules).uint64 = {gte: 1}]; + } + + // The stat prefix used when emitting DNS filter statistics + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; + + // Server context configuration contains the data that the filter uses to respond + // to DNS requests. + ServerContextConfig server_config = 2; + + // Client context configuration controls Envoy's behavior when it must use external + // resolvers to answer a query. This object is optional and if omitted instructs + // the filter to resolve queries from the data in the server_config + ClientContextConfig client_config = 3; +} diff --git a/api/envoy/extensions/filters/udp/dns_filter/v4alpha/BUILD b/api/envoy/extensions/filters/udp/dns_filter/v4alpha/BUILD new file mode 100644 index 0000000000000..f869cf5ac123a --- /dev/null +++ b/api/envoy/extensions/filters/udp/dns_filter/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/data/dns/v4alpha:pkg", + "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto b/api/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto new file mode 100644 index 0000000000000..54615b8b93ed8 --- /dev/null +++ b/api/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto @@ -0,0 +1,86 @@ +syntax = "proto3"; + +package envoy.extensions.filters.udp.dns_filter.v4alpha; + +import "envoy/config/core/v4alpha/address.proto"; +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/data/dns/v4alpha/dns_table.proto"; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.udp.dns_filter.v4alpha"; +option java_outer_classname = "DnsFilterProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: DNS Filter] +// DNS Filter :ref:`configuration overview `. +// [#extension: envoy.filters.udp_listener.dns_filter] + +// Configuration for the DNS filter. +message DnsFilterConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig"; + + // This message contains the configuration for the DNS Filter operating + // in a server context. This message will contain the virtual hosts and + // associated addresses with which Envoy will respond to queries + message ServerContextConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig.ServerContextConfig"; + + oneof config_source { + option (validate.required) = true; + + // Load the configuration specified from the control plane + data.dns.v4alpha.DnsTable inline_dns_table = 1; + + // Seed the filter configuration from an external path. This source + // is a yaml formatted file that contains the DnsTable driving Envoy's + // responses to DNS queries + config.core.v4alpha.DataSource external_dns_table = 2; + } + } + + // This message contains the configuration for the DNS Filter operating + // in a client context. This message will contain the timeouts, retry, + // and forwarding configuration for Envoy to make DNS requests to other + // resolvers + message ClientContextConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig.ClientContextConfig"; + + // Sets the maximum time we will wait for the upstream query to complete + // We allow 5s for the upstream resolution to complete, so the minimum + // value here is 1. Note that the total latency for a failed query is the + // number of retries multiplied by the resolver_timeout. + google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 1}}]; + + // A list of DNS servers to which we can forward queries. If not + // specified, Envoy will use the ambient DNS resolvers in the + // system. + repeated config.core.v4alpha.Address upstream_resolvers = 2; + + // Controls how many outstanding external lookup contexts the filter tracks. + // The context structure allows the filter to respond to every query even if the external + // resolution times out or is otherwise unsuccessful + uint64 max_pending_lookups = 3 [(validate.rules).uint64 = {gte: 1}]; + } + + // The stat prefix used when emitting DNS filter statistics + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; + + // Server context configuration contains the data that the filter uses to respond + // to DNS requests. + ServerContextConfig server_config = 2; + + // Client context configuration controls Envoy's behavior when it must use external + // resolvers to answer a query. This object is optional and if omitted instructs + // the filter to resolve queries from the data in the server_config + ClientContextConfig client_config = 3; +} diff --git a/api/envoy/extensions/filters/udp/udp_proxy/v3/BUILD b/api/envoy/extensions/filters/udp/udp_proxy/v3/BUILD new file mode 100644 index 0000000000000..c9a0d31060397 --- /dev/null +++ b/api/envoy/extensions/filters/udp/udp_proxy/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/filter/udp/udp_proxy/v2alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto b/api/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto new file mode 100644 index 0000000000000..43d2c56c06738 --- /dev/null +++ b/api/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto @@ -0,0 +1,38 @@ +syntax = "proto3"; + +package envoy.extensions.filters.udp.udp_proxy.v3; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.udp.udp_proxy.v3"; +option java_outer_classname = "UdpProxyProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: UDP proxy] +// UDP proxy :ref:`configuration overview `. +// [#extension: envoy.filters.udp_listener.udp_proxy] + +// Configuration for the UDP proxy filter. +message UdpProxyConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.filter.udp.udp_proxy.v2alpha.UdpProxyConfig"; + + // The stat prefix used when emitting UDP proxy filter stats. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + oneof route_specifier { + option (validate.required) = true; + + // The upstream cluster to connect to. + string cluster = 2 [(validate.rules).string = {min_bytes: 1}]; + } + + // The idle timeout for sessions. Idle is defined as no datagrams between received or sent by + // the session. The default if not specified is 1 minute. + google.protobuf.Duration idle_timeout = 3; +} diff --git a/api/envoy/extensions/internal_redirect/allow_listed_routes/v3/BUILD b/api/envoy/extensions/internal_redirect/allow_listed_routes/v3/BUILD new file mode 100644 index 0000000000000..ef3541ebcb1df --- /dev/null +++ b/api/envoy/extensions/internal_redirect/allow_listed_routes/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.proto b/api/envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.proto new file mode 100644 index 0000000000000..a6da5b0f5d9b6 --- /dev/null +++ b/api/envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package envoy.extensions.internal_redirect.allow_listed_routes.v3; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.internal_redirect.allow_listed_routes.v3"; +option java_outer_classname = "AllowListedRoutesConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Allow listed routes internal redirect predicate] + +// An internal redirect predicate that accepts only explicitly allowed target routes. +// [#extension: envoy.internal_redirect_predicates.allow_listed_routes] +message AllowListedRoutesConfig { + // The list of routes that's allowed as redirect target by this predicate, + // identified by the route's :ref:`name `. + // Empty route names are not allowed. + repeated string allowed_route_names = 1 + [(validate.rules).repeated = {items {string {min_len: 1}}}]; +} diff --git a/api/envoy/extensions/internal_redirect/previous_routes/v3/BUILD b/api/envoy/extensions/internal_redirect/previous_routes/v3/BUILD new file mode 100644 index 0000000000000..ef3541ebcb1df --- /dev/null +++ b/api/envoy/extensions/internal_redirect/previous_routes/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.proto b/api/envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.proto new file mode 100644 index 0000000000000..6cc5fba871ea0 --- /dev/null +++ b/api/envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package envoy.extensions.internal_redirect.previous_routes.v3; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.internal_redirect.previous_routes.v3"; +option java_outer_classname = "PreviousRoutesConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Previous routes internal redirect predicate] + +// An internal redirect predicate that rejects redirect targets that are pointing +// to a route that has been followed by a previous redirect from the current route. +// [#extension: envoy.internal_redirect_predicates.previous_routes] +message PreviousRoutesConfig { +} diff --git a/api/envoy/extensions/internal_redirect/safe_cross_scheme/v3/BUILD b/api/envoy/extensions/internal_redirect/safe_cross_scheme/v3/BUILD new file mode 100644 index 0000000000000..ef3541ebcb1df --- /dev/null +++ b/api/envoy/extensions/internal_redirect/safe_cross_scheme/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.proto b/api/envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.proto new file mode 100644 index 0000000000000..54cec2f09bbba --- /dev/null +++ b/api/envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package envoy.extensions.internal_redirect.safe_cross_scheme.v3; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.internal_redirect.safe_cross_scheme.v3"; +option java_outer_classname = "SafeCrossSchemeConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: SafeCrossScheme internal redirect predicate] + +// An internal redirect predicate that checks the scheme between the +// downstream url and the redirect target url and allows a) same scheme +// redirect and b) safe cross scheme redirect, which means if the downstream +// scheme is HTTPS, both HTTPS and HTTP redirect targets are allowed, but if the +// downstream scheme is HTTP, only HTTP redirect targets are allowed. +// [#extension: +// envoy.internal_redirect_predicates.safe_cross_scheme] +message SafeCrossSchemeConfig { +} diff --git a/api/envoy/extensions/network/socket_interface/v3/BUILD b/api/envoy/extensions/network/socket_interface/v3/BUILD new file mode 100644 index 0000000000000..ef3541ebcb1df --- /dev/null +++ b/api/envoy/extensions/network/socket_interface/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/extensions/network/socket_interface/v3/default_socket_interface.proto b/api/envoy/extensions/network/socket_interface/v3/default_socket_interface.proto new file mode 100644 index 0000000000000..d2c747ec49fb1 --- /dev/null +++ b/api/envoy/extensions/network/socket_interface/v3/default_socket_interface.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package envoy.extensions.network.socket_interface.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.network.socket_interface.v3"; +option java_outer_classname = "DefaultSocketInterfaceProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Default Socket Interface configuration] + +// Configuration for default socket interface that relies on OS dependent syscall to create +// sockets. +message DefaultSocketInterface { +} diff --git a/api/envoy/config/wasm/v2alpha/BUILD b/api/envoy/extensions/transport_sockets/proxy_protocol/v3/BUILD similarity index 87% rename from api/envoy/config/wasm/v2alpha/BUILD rename to api/envoy/extensions/transport_sockets/proxy_protocol/v3/BUILD index 69168ad0cf246..2c3dad6453b65 100644 --- a/api/envoy/config/wasm/v2alpha/BUILD +++ b/api/envoy/extensions/transport_sockets/proxy_protocol/v3/BUILD @@ -6,7 +6,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ - "//envoy/api/v2/core:pkg", + "//envoy/config/core/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto b/api/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto new file mode 100644 index 0000000000000..c6c2ee9798d6c --- /dev/null +++ b/api/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.proxy_protocol.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/proxy_protocol.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.proxy_protocol.v3"; +option java_outer_classname = "UpstreamProxyProtocolProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Upstream Proxy Protocol] +// [#extension: envoy.transport_sockets.upstream_proxy_protocol] +// [#not-implemented-hide:] +// Configuration for PROXY protocol socket +message ProxyProtocolUpstreamTransport { + config.core.v3.ProxyProtocolConfig config = 1; + + // The underlying transport socket being wrapped. + config.core.v3.TransportSocket transport_socket = 2 [(validate.rules).message = {required: true}]; +} diff --git a/api/envoy/extensions/transport_sockets/quic/v3/BUILD b/api/envoy/extensions/transport_sockets/quic/v3/BUILD new file mode 100644 index 0000000000000..e95e504f3caf3 --- /dev/null +++ b/api/envoy/extensions/transport_sockets/quic/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/extensions/transport_sockets/tls/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/transport_sockets/quic/v3/quic_transport.proto b/api/envoy/extensions/transport_sockets/quic/v3/quic_transport.proto new file mode 100644 index 0000000000000..b17e2262bc1e7 --- /dev/null +++ b/api/envoy/extensions/transport_sockets/quic/v3/quic_transport.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.quic.v3; + +import "envoy/extensions/transport_sockets/tls/v3/tls.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.quic.v3"; +option java_outer_classname = "QuicTransportProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: quic transport] +// [#extension: envoy.transport_sockets.quic] + +// Configuration for Downstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy. +message QuicDownstreamTransport { + tls.v3.DownstreamTlsContext downstream_tls_context = 1 + [(validate.rules).message = {required: true}]; +} + +// Configuration for Upstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy. +message QuicUpstreamTransport { + tls.v3.UpstreamTlsContext upstream_tls_context = 1 [(validate.rules).message = {required: true}]; +} diff --git a/api/envoy/extensions/transport_sockets/quic/v4alpha/BUILD b/api/envoy/extensions/transport_sockets/quic/v4alpha/BUILD new file mode 100644 index 0000000000000..47c94aa706ee8 --- /dev/null +++ b/api/envoy/extensions/transport_sockets/quic/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/extensions/transport_sockets/quic/v3:pkg", + "//envoy/extensions/transport_sockets/tls/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/transport_sockets/quic/v4alpha/quic_transport.proto b/api/envoy/extensions/transport_sockets/quic/v4alpha/quic_transport.proto new file mode 100644 index 0000000000000..255bfe627b74c --- /dev/null +++ b/api/envoy/extensions/transport_sockets/quic/v4alpha/quic_transport.proto @@ -0,0 +1,35 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.quic.v4alpha; + +import "envoy/extensions/transport_sockets/tls/v4alpha/tls.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.quic.v4alpha"; +option java_outer_classname = "QuicTransportProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: quic transport] +// [#extension: envoy.transport_sockets.quic] + +// Configuration for Downstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy. +message QuicDownstreamTransport { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.quic.v3.QuicDownstreamTransport"; + + tls.v4alpha.DownstreamTlsContext downstream_tls_context = 1 + [(validate.rules).message = {required: true}]; +} + +// Configuration for Upstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy. +message QuicUpstreamTransport { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.quic.v3.QuicUpstreamTransport"; + + tls.v4alpha.UpstreamTlsContext upstream_tls_context = 1 + [(validate.rules).message = {required: true}]; +} diff --git a/api/envoy/extensions/transport_sockets/tls/v3/BUILD b/api/envoy/extensions/transport_sockets/tls/v3/BUILD index 62b69636c78cc..14187bea65a7d 100644 --- a/api/envoy/extensions/transport_sockets/tls/v3/BUILD +++ b/api/envoy/extensions/transport_sockets/tls/v3/BUILD @@ -10,5 +10,6 @@ api_proto_package( "//envoy/config/core/v3:pkg", "//envoy/type/matcher/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/api/envoy/extensions/transport_sockets/tls/v3/cert.proto b/api/envoy/extensions/transport_sockets/tls/v3/cert.proto index ea4bc1475c470..cf5dc597aafb7 100644 --- a/api/envoy/extensions/transport_sockets/tls/v3/cert.proto +++ b/api/envoy/extensions/transport_sockets/tls/v3/cert.proto @@ -2,510 +2,12 @@ syntax = "proto3"; package envoy.extensions.transport_sockets.tls.v3; -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/config_source.proto"; -import "envoy/type/matcher/v3/string.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; + +import public "envoy/extensions/transport_sockets/tls/v3/common.proto"; +import public "envoy/extensions/transport_sockets/tls/v3/secret.proto"; +import public "envoy/extensions/transport_sockets/tls/v3/tls.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; option java_outer_classname = "CertProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Common TLS configuration] - -message TlsParameters { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.TlsParameters"; - - enum TlsProtocol { - // Envoy will choose the optimal TLS version. - TLS_AUTO = 0; - - // TLS 1.0 - TLSv1_0 = 1; - - // TLS 1.1 - TLSv1_1 = 2; - - // TLS 1.2 - TLSv1_2 = 3; - - // TLS 1.3 - TLSv1_3 = 4; - } - - // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for - // servers. - TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; - - // Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and - // ``TLSv1_2`` for clients and for servers using :ref:`BoringSSL FIPS `. - TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; - - // If specified, the TLS listener will only support the specified `cipher list - // `_ - // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not - // specified, the default list will be used. - // - // In non-FIPS builds, the default cipher list is: - // - // .. code-block:: none - // - // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] - // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] - // ECDHE-ECDSA-AES128-SHA - // ECDHE-RSA-AES128-SHA - // AES128-GCM-SHA256 - // AES128-SHA - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // ECDHE-ECDSA-AES256-SHA - // ECDHE-RSA-AES256-SHA - // AES256-GCM-SHA384 - // AES256-SHA - // - // In builds using :ref:`BoringSSL FIPS `, the default cipher list is: - // - // .. code-block:: none - // - // ECDHE-ECDSA-AES128-GCM-SHA256 - // ECDHE-RSA-AES128-GCM-SHA256 - // ECDHE-ECDSA-AES128-SHA - // ECDHE-RSA-AES128-SHA - // AES128-GCM-SHA256 - // AES128-SHA - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // ECDHE-ECDSA-AES256-SHA - // ECDHE-RSA-AES256-SHA - // AES256-GCM-SHA384 - // AES256-SHA - repeated string cipher_suites = 3; - - // If specified, the TLS connection will only support the specified ECDH - // curves. If not specified, the default curves will be used. - // - // In non-FIPS builds, the default curves are: - // - // .. code-block:: none - // - // X25519 - // P-256 - // - // In builds using :ref:`BoringSSL FIPS `, the default curve is: - // - // .. code-block:: none - // - // P-256 - repeated string ecdh_curves = 4; -} - -// BoringSSL private key method configuration. The private key methods are used for external -// (potentially asynchronous) signing and decryption operations. Some use cases for private key -// methods would be TPM support and TLS acceleration. -message PrivateKeyProvider { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.PrivateKeyProvider"; - - reserved 2; - - reserved "config"; - - // Private key method provider name. The name must match a - // supported private key method provider type. - string provider_name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Private key method provider specific configuration. - oneof config_type { - google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; - } -} - -// [#next-free-field: 7] -message TlsCertificate { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.TlsCertificate"; - - // The TLS certificate chain. - config.core.v3.DataSource certificate_chain = 1; - - // The TLS private key. - config.core.v3.DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; - - // BoringSSL private key method provider. This is an alternative to :ref:`private_key - // ` field. This can't be - // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key - // ` and - // :ref:`private_key_provider - // ` fields will result in an - // error. - PrivateKeyProvider private_key_provider = 6; - - // The password to decrypt the TLS private key. If this field is not set, it is assumed that the - // TLS private key is not password encrypted. - config.core.v3.DataSource password = 3 [(udpa.annotations.sensitive) = true]; - - // [#not-implemented-hide:] - config.core.v3.DataSource ocsp_staple = 4; - - // [#not-implemented-hide:] - repeated config.core.v3.DataSource signed_certificate_timestamp = 5; -} - -message TlsSessionTicketKeys { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.TlsSessionTicketKeys"; - - // Keys for encrypting and decrypting TLS session tickets. The - // first key in the array contains the key to encrypt all new sessions created by this context. - // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys - // by, for example, putting the new key first, and the previous key second. - // - // If :ref:`session_ticket_keys ` - // is not specified, the TLS library will still support resuming sessions via tickets, but it will - // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts - // or on different hosts. - // - // Each key must contain exactly 80 bytes of cryptographically-secure random data. For - // example, the output of ``openssl rand 80``. - // - // .. attention:: - // - // Using this feature has serious security considerations and risks. Improper handling of keys - // may result in loss of secrecy in connections, even if ciphers supporting perfect forward - // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some - // discussion. To minimize the risk, you must: - // - // * Keep the session ticket keys at least as secure as your TLS certificate private keys - // * Rotate session ticket keys at least daily, and preferably hourly - // * Always generate keys using a cryptographically-secure random data source - repeated config.core.v3.DataSource keys = 1 - [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; -} - -// [#next-free-field: 11] -message CertificateValidationContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.CertificateValidationContext"; - - // Peer certificate verification mode. - enum TrustChainVerification { - // Perform default certificate verification (e.g., against CA / verification lists) - VERIFY_TRUST_CHAIN = 0; - - // Connections where the certificate fails verification will be permitted. - // For HTTP connections, the result of certificate verification can be used in route matching. ( - // see :ref:`validated ` ). - ACCEPT_UNTRUSTED = 1; - } - - reserved 4; - - reserved "verify_subject_alt_name"; - - // TLS certificate data containing certificate authority certificates to use in verifying - // a presented peer certificate (e.g. server certificate for clusters or client certificate - // for listeners). If not specified and a peer certificate is presented it will not be - // verified. By default, a client certificate is optional, unless one of the additional - // options (:ref:`require_client_certificate - // `, - // :ref:`verify_certificate_spki - // `, - // :ref:`verify_certificate_hash - // `, or - // :ref:`match_subject_alt_names - // `) is also - // specified. - // - // It can optionally contain certificate revocation lists, in which case Envoy will verify - // that the presented peer certificate has not been revoked by one of the included CRLs. - // - // See :ref:`the TLS overview ` for a list of common - // system CA locations. - config.core.v3.DataSource trusted_ca = 1; - - // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the - // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate - // matches one of the specified values. - // - // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate - // can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -noout -pubkey - // | openssl pkey -pubin -outform DER - // | openssl dgst -sha256 -binary - // | openssl enc -base64 - // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= - // - // This is the format used in HTTP Public Key Pinning. - // - // When both: - // :ref:`verify_certificate_hash - // ` and - // :ref:`verify_certificate_spki - // ` are specified, - // a hash matching value from either of the lists will result in the certificate being accepted. - // - // .. attention:: - // - // This option is preferred over :ref:`verify_certificate_hash - // `, - // because SPKI is tied to a private key, so it doesn't change when the certificate - // is renewed using the same private key. - repeated string verify_certificate_spki = 3 - [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}]; - - // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that - // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. - // - // A hex-encoded SHA-256 of the certificate can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 - // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a - // - // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate - // can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 - // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A - // - // Both of those formats are acceptable. - // - // When both: - // :ref:`verify_certificate_hash - // ` and - // :ref:`verify_certificate_spki - // ` are specified, - // a hash matching value from either of the lists will result in the certificate being accepted. - repeated string verify_certificate_hash = 2 - [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}]; - - // An optional list of Subject Alternative name matchers. Envoy will verify that the - // Subject Alternative Name of the presented certificate matches one of the specified matches. - // - // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be - // configured with exact match type in the :ref:`string matcher `. - // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", - // it should be configured as shown below. - // - // .. code-block:: yaml - // - // match_subject_alt_names: - // exact: "api.example.com" - // - // .. attention:: - // - // Subject Alternative Names are easily spoofable and verifying only them is insecure, - // therefore this option must be used together with :ref:`trusted_ca - // `. - repeated type.matcher.v3.StringMatcher match_subject_alt_names = 9; - - // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. - google.protobuf.BoolValue require_ocsp_staple = 5; - - // [#not-implemented-hide:] Must present signed certificate time-stamp. - google.protobuf.BoolValue require_signed_certificate_timestamp = 6; - - // An optional `certificate revocation list - // `_ - // (in PEM format). If specified, Envoy will verify that the presented peer - // certificate has not been revoked by this CRL. If this DataSource contains - // multiple CRLs, all of them will be used. - config.core.v3.DataSource crl = 7; - - // If specified, Envoy will not reject expired certificates. - bool allow_expired_certificate = 8; - - // Certificate trust chain verification mode. - TrustChainVerification trust_chain_verification = 10 - [(validate.rules).enum = {defined_only: true}]; -} - -// TLS context shared by both client and server TLS contexts. -// [#next-free-field: 9] -message CommonTlsContext { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.CommonTlsContext"; - - message CombinedCertificateValidationContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.CommonTlsContext.CombinedCertificateValidationContext"; - - // How to validate peer certificates. - CertificateValidationContext default_validation_context = 1 - [(validate.rules).message = {required: true}]; - - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 2 - [(validate.rules).message = {required: true}]; - } - - reserved 5; - - // TLS protocol versions, cipher suites etc. - TlsParameters tls_params = 1; - - // :ref:`Multiple TLS certificates ` can be associated with the - // same context to allow both RSA and ECDSA certificates. - // - // Only a single TLS certificate is supported in client contexts. In server contexts, the first - // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is - // used for clients that support ECDSA. - repeated TlsCertificate tls_certificates = 2; - - // Configs for fetching TLS certificates via SDS API. - repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 - [(validate.rules).repeated = {max_items: 1}]; - - oneof validation_context_type { - // How to validate peer certificates. - CertificateValidationContext validation_context = 3; - - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 7; - - // Combined certificate validation context holds a default CertificateValidationContext - // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic - // and default CertificateValidationContext are merged into a new CertificateValidationContext - // for validation. This merge is done by Message::MergeFrom(), so dynamic - // CertificateValidationContext overwrites singular fields in default - // CertificateValidationContext, and concatenates repeated fields to default - // CertificateValidationContext, and logical OR is applied to boolean fields. - CombinedCertificateValidationContext combined_validation_context = 8; - } - - // Supplies the list of ALPN protocols that the listener should expose. In - // practice this is likely to be set to one of two values (see the - // :ref:`codec_type - // ` - // parameter in the HTTP connection manager for more information): - // - // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. - // * "http/1.1" If the listener is only going to support HTTP/1.1. - // - // There is no default for this parameter. If empty, Envoy will not expose ALPN. - repeated string alpn_protocols = 4; -} - -message UpstreamTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.UpstreamTlsContext"; - - // Common TLS context settings. - // - // .. attention:: - // - // Server certificate verification is not enabled by default. Configure - // :ref:`trusted_ca` to enable - // verification. - CommonTlsContext common_tls_context = 1; - - // SNI string to use when creating TLS backend connections. - string sni = 2 [(validate.rules).string = {max_bytes: 255}]; - - // If true, server-initiated TLS renegotiation will be allowed. - // - // .. attention:: - // - // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. - bool allow_renegotiation = 3; - - // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets - // for TLSv1.2 and older) to store for the purpose of session resumption. - // - // Defaults to 1, setting this to 0 disables session resumption. - google.protobuf.UInt32Value max_session_keys = 4; -} - -// [#next-free-field: 8] -message DownstreamTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.DownstreamTlsContext"; - - // Common TLS context settings. - CommonTlsContext common_tls_context = 1; - - // If specified, Envoy will reject connections without a valid client - // certificate. - google.protobuf.BoolValue require_client_certificate = 2; - - // If specified, Envoy will reject connections without a valid and matching SNI. - // [#not-implemented-hide:] - google.protobuf.BoolValue require_sni = 3; - - oneof session_ticket_keys_type { - // TLS session ticket key settings. - TlsSessionTicketKeys session_ticket_keys = 4; - - // Config for fetching TLS session ticket keys via SDS API. - SdsSecretConfig session_ticket_keys_sds_secret_config = 5; - - // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS - // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. - // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using - // the keys specified through either :ref:`session_ticket_keys ` - // or :ref:`session_ticket_keys_sds_secret_config `. - // If this config is set to false and no keys are explicitly configured, the TLS server will issue - // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the - // implication that sessions cannot be resumed across hot restarts or on different hosts. - bool disable_stateless_session_resumption = 7; - } - - // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session - // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) - // ` - // only seconds could be specified (fractional seconds are going to be ignored). - google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { - lt {seconds: 4294967296} - gte {} - }]; -} - -message GenericSecret { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.GenericSecret"; - - // Secret of generic type and is available to filters. - config.core.v3.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; -} - -message SdsSecretConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.SdsSecretConfig"; - - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - // When both name and config are specified, then secret can be fetched and/or reloaded via - // SDS. When only name is specified, then secret will be loaded from static resources. - string name = 1; - - config.core.v3.ConfigSource sds_config = 2; -} - -// [#next-free-field: 6] -message Secret { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.Secret"; - - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - string name = 1; - - oneof type { - TlsCertificate tls_certificate = 2; - - TlsSessionTicketKeys session_ticket_keys = 3; - - CertificateValidationContext validation_context = 4; - - GenericSecret generic_secret = 5; - } -} diff --git a/api/envoy/extensions/transport_sockets/tls/v3/common.proto b/api/envoy/extensions/transport_sockets/tls/v3/common.proto new file mode 100644 index 0000000000000..115ecad72f992 --- /dev/null +++ b/api/envoy/extensions/transport_sockets/tls/v3/common.proto @@ -0,0 +1,334 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tls.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/type/matcher/v3/string.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; +option java_outer_classname = "CommonProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Common TLS configuration] + +message TlsParameters { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.TlsParameters"; + + enum TlsProtocol { + // Envoy will choose the optimal TLS version. + TLS_AUTO = 0; + + // TLS 1.0 + TLSv1_0 = 1; + + // TLS 1.1 + TLSv1_1 = 2; + + // TLS 1.2 + TLSv1_2 = 3; + + // TLS 1.3 + TLSv1_3 = 4; + } + + // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for + // servers. + TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; + + // Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_3`` for + // servers. + TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; + + // If specified, the TLS listener will only support the specified `cipher list + // `_ + // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not + // specified, the default list will be used. + // + // In non-FIPS builds, the default cipher list is: + // + // .. code-block:: none + // + // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] + // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] + // ECDHE-ECDSA-AES128-SHA + // ECDHE-RSA-AES128-SHA + // AES128-GCM-SHA256 + // AES128-SHA + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // ECDHE-ECDSA-AES256-SHA + // ECDHE-RSA-AES256-SHA + // AES256-GCM-SHA384 + // AES256-SHA + // + // In builds using :ref:`BoringSSL FIPS `, the default cipher list is: + // + // .. code-block:: none + // + // ECDHE-ECDSA-AES128-GCM-SHA256 + // ECDHE-RSA-AES128-GCM-SHA256 + // ECDHE-ECDSA-AES128-SHA + // ECDHE-RSA-AES128-SHA + // AES128-GCM-SHA256 + // AES128-SHA + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // ECDHE-ECDSA-AES256-SHA + // ECDHE-RSA-AES256-SHA + // AES256-GCM-SHA384 + // AES256-SHA + repeated string cipher_suites = 3; + + // If specified, the TLS connection will only support the specified ECDH + // curves. If not specified, the default curves will be used. + // + // In non-FIPS builds, the default curves are: + // + // .. code-block:: none + // + // X25519 + // P-256 + // + // In builds using :ref:`BoringSSL FIPS `, the default curve is: + // + // .. code-block:: none + // + // P-256 + repeated string ecdh_curves = 4; +} + +// BoringSSL private key method configuration. The private key methods are used for external +// (potentially asynchronous) signing and decryption operations. Some use cases for private key +// methods would be TPM support and TLS acceleration. +message PrivateKeyProvider { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.PrivateKeyProvider"; + + reserved 2; + + reserved "config"; + + // Private key method provider name. The name must match a + // supported private key method provider type. + string provider_name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Private key method provider specific configuration. + oneof config_type { + google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; + } +} + +// [#next-free-field: 7] +message TlsCertificate { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.TlsCertificate"; + + // The TLS certificate chain. + config.core.v3.DataSource certificate_chain = 1; + + // The TLS private key. + config.core.v3.DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; + + // BoringSSL private key method provider. This is an alternative to :ref:`private_key + // ` field. This can't be + // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key + // ` and + // :ref:`private_key_provider + // ` fields will result in an + // error. + PrivateKeyProvider private_key_provider = 6; + + // The password to decrypt the TLS private key. If this field is not set, it is assumed that the + // TLS private key is not password encrypted. + config.core.v3.DataSource password = 3 [(udpa.annotations.sensitive) = true]; + + // [#not-implemented-hide:] + config.core.v3.DataSource ocsp_staple = 4; + + // [#not-implemented-hide:] + repeated config.core.v3.DataSource signed_certificate_timestamp = 5; +} + +message TlsSessionTicketKeys { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.TlsSessionTicketKeys"; + + // Keys for encrypting and decrypting TLS session tickets. The + // first key in the array contains the key to encrypt all new sessions created by this context. + // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys + // by, for example, putting the new key first, and the previous key second. + // + // If :ref:`session_ticket_keys ` + // is not specified, the TLS library will still support resuming sessions via tickets, but it will + // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts + // or on different hosts. + // + // Each key must contain exactly 80 bytes of cryptographically-secure random data. For + // example, the output of ``openssl rand 80``. + // + // .. attention:: + // + // Using this feature has serious security considerations and risks. Improper handling of keys + // may result in loss of secrecy in connections, even if ciphers supporting perfect forward + // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some + // discussion. To minimize the risk, you must: + // + // * Keep the session ticket keys at least as secure as your TLS certificate private keys + // * Rotate session ticket keys at least daily, and preferably hourly + // * Always generate keys using a cryptographically-secure random data source + repeated config.core.v3.DataSource keys = 1 + [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; +} + +// [#next-free-field: 11] +message CertificateValidationContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.CertificateValidationContext"; + + // Peer certificate verification mode. + enum TrustChainVerification { + // Perform default certificate verification (e.g., against CA / verification lists) + VERIFY_TRUST_CHAIN = 0; + + // Connections where the certificate fails verification will be permitted. + // For HTTP connections, the result of certificate verification can be used in route matching. ( + // see :ref:`validated ` ). + ACCEPT_UNTRUSTED = 1; + } + + reserved 4; + + reserved "verify_subject_alt_name"; + + // TLS certificate data containing certificate authority certificates to use in verifying + // a presented peer certificate (e.g. server certificate for clusters or client certificate + // for listeners). If not specified and a peer certificate is presented it will not be + // verified. By default, a client certificate is optional, unless one of the additional + // options (:ref:`require_client_certificate + // `, + // :ref:`verify_certificate_spki + // `, + // :ref:`verify_certificate_hash + // `, or + // :ref:`match_subject_alt_names + // `) is also + // specified. + // + // It can optionally contain certificate revocation lists, in which case Envoy will verify + // that the presented peer certificate has not been revoked by one of the included CRLs. + // + // See :ref:`the TLS overview ` for a list of common + // system CA locations. + config.core.v3.DataSource trusted_ca = 1; + + // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the + // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate + // matches one of the specified values. + // + // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate + // can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -noout -pubkey + // | openssl pkey -pubin -outform DER + // | openssl dgst -sha256 -binary + // | openssl enc -base64 + // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= + // + // This is the format used in HTTP Public Key Pinning. + // + // When both: + // :ref:`verify_certificate_hash + // ` and + // :ref:`verify_certificate_spki + // ` are specified, + // a hash matching value from either of the lists will result in the certificate being accepted. + // + // .. attention:: + // + // This option is preferred over :ref:`verify_certificate_hash + // `, + // because SPKI is tied to a private key, so it doesn't change when the certificate + // is renewed using the same private key. + repeated string verify_certificate_spki = 3 + [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}]; + + // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that + // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. + // + // A hex-encoded SHA-256 of the certificate can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 + // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a + // + // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate + // can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 + // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A + // + // Both of those formats are acceptable. + // + // When both: + // :ref:`verify_certificate_hash + // ` and + // :ref:`verify_certificate_spki + // ` are specified, + // a hash matching value from either of the lists will result in the certificate being accepted. + repeated string verify_certificate_hash = 2 + [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}]; + + // An optional list of Subject Alternative name matchers. Envoy will verify that the + // Subject Alternative Name of the presented certificate matches one of the specified matches. + // + // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be + // configured with exact match type in the :ref:`string matcher `. + // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", + // it should be configured as shown below. + // + // .. code-block:: yaml + // + // match_subject_alt_names: + // exact: "api.example.com" + // + // .. attention:: + // + // Subject Alternative Names are easily spoofable and verifying only them is insecure, + // therefore this option must be used together with :ref:`trusted_ca + // `. + repeated type.matcher.v3.StringMatcher match_subject_alt_names = 9; + + // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. + google.protobuf.BoolValue require_ocsp_staple = 5; + + // [#not-implemented-hide:] Must present signed certificate time-stamp. + google.protobuf.BoolValue require_signed_certificate_timestamp = 6; + + // An optional `certificate revocation list + // `_ + // (in PEM format). If specified, Envoy will verify that the presented peer + // certificate has not been revoked by this CRL. If this DataSource contains + // multiple CRLs, all of them will be used. + config.core.v3.DataSource crl = 7; + + // If specified, Envoy will not reject expired certificates. + bool allow_expired_certificate = 8; + + // Certificate trust chain verification mode. + TrustChainVerification trust_chain_verification = 10 + [(validate.rules).enum = {defined_only: true}]; +} diff --git a/api/envoy/extensions/transport_sockets/tls/v3/secret.proto b/api/envoy/extensions/transport_sockets/tls/v3/secret.proto new file mode 100644 index 0000000000000..80c68a56f5ce5 --- /dev/null +++ b/api/envoy/extensions/transport_sockets/tls/v3/secret.proto @@ -0,0 +1,62 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tls.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/config_source.proto"; +import "envoy/extensions/transport_sockets/tls/v3/common.proto"; + +import "udpa/core/v1/resource_locator.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; +option java_outer_classname = "SecretProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Secrets configuration] + +message GenericSecret { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.GenericSecret"; + + // Secret of generic type and is available to filters. + config.core.v3.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; +} + +message SdsSecretConfig { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.SdsSecretConfig"; + + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + // When both name and config are specified, then secret can be fetched and/or reloaded via + // SDS. When only name is specified, then secret will be loaded from static resources. + string name = 1 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + + // Resource locator for SDS. This is mutually exclusive to *name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator sds_resource_locator = 3 + [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + + config.core.v3.ConfigSource sds_config = 2; +} + +// [#next-free-field: 6] +message Secret { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.Secret"; + + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + string name = 1; + + oneof type { + TlsCertificate tls_certificate = 2; + + TlsSessionTicketKeys session_ticket_keys = 3; + + CertificateValidationContext validation_context = 4; + + GenericSecret generic_secret = 5; + } +} diff --git a/api/envoy/extensions/transport_sockets/tls/v3/tls.proto b/api/envoy/extensions/transport_sockets/tls/v3/tls.proto new file mode 100644 index 0000000000000..7ee7920c724d1 --- /dev/null +++ b/api/envoy/extensions/transport_sockets/tls/v3/tls.proto @@ -0,0 +1,241 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tls.v3; + +import "envoy/config/core/v3/extension.proto"; +import "envoy/extensions/transport_sockets/tls/v3/common.proto"; +import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; +option java_outer_classname = "TlsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: TLS transport socket] +// [#extension: envoy.transport_sockets.tls] +// The TLS contexts below provide the transport socket configuration for upstream/downstream TLS. + +message UpstreamTlsContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.UpstreamTlsContext"; + + // Common TLS context settings. + // + // .. attention:: + // + // Server certificate verification is not enabled by default. Configure + // :ref:`trusted_ca` to enable + // verification. + CommonTlsContext common_tls_context = 1; + + // SNI string to use when creating TLS backend connections. + string sni = 2 [(validate.rules).string = {max_bytes: 255}]; + + // If true, server-initiated TLS renegotiation will be allowed. + // + // .. attention:: + // + // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. + bool allow_renegotiation = 3; + + // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets + // for TLSv1.2 and older) to store for the purpose of session resumption. + // + // Defaults to 1, setting this to 0 disables session resumption. + google.protobuf.UInt32Value max_session_keys = 4; +} + +// [#next-free-field: 8] +message DownstreamTlsContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.DownstreamTlsContext"; + + // Common TLS context settings. + CommonTlsContext common_tls_context = 1; + + // If specified, Envoy will reject connections without a valid client + // certificate. + google.protobuf.BoolValue require_client_certificate = 2; + + // If specified, Envoy will reject connections without a valid and matching SNI. + // [#not-implemented-hide:] + google.protobuf.BoolValue require_sni = 3; + + oneof session_ticket_keys_type { + // TLS session ticket key settings. + TlsSessionTicketKeys session_ticket_keys = 4; + + // Config for fetching TLS session ticket keys via SDS API. + SdsSecretConfig session_ticket_keys_sds_secret_config = 5; + + // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS + // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. + // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using + // the keys specified through either :ref:`session_ticket_keys ` + // or :ref:`session_ticket_keys_sds_secret_config `. + // If this config is set to false and no keys are explicitly configured, the TLS server will issue + // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the + // implication that sessions cannot be resumed across hot restarts or on different hosts. + bool disable_stateless_session_resumption = 7; + } + + // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session + // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) + // ` + // only seconds could be specified (fractional seconds are going to be ignored). + google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { + lt {seconds: 4294967296} + gte {} + }]; +} + +// TLS context shared by both client and server TLS contexts. +// [#next-free-field: 13] +message CommonTlsContext { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.CommonTlsContext"; + + // Config for Certificate provider to get certificates. This provider should allow certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + message CertificateProvider { + // opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify + // a root-certificate (validation context) or "TLS" to specify a new tls-certificate. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Provider specific config. + // Note: an implementation is expected to dedup multiple instances of the same config + // to maintain a single certificate-provider instance. The sharing can happen, for + // example, among multiple clusters or between the tls_certificate and validation_context + // certificate providers of a cluster. + // This config could be supplied inline or (in future) a named xDS resource. + oneof config { + option (validate.required) = true; + + config.core.v3.TypedExtensionConfig typed_config = 2; + } + } + + // Similar to CertificateProvider above, but allows the provider instances to be configured on + // the client side instead of being sent from the control plane. + message CertificateProviderInstance { + // Provider instance name. This name must be defined in the client's configuration (e.g., a + // bootstrap file) to correspond to a provider instance (i.e., the same data in the typed_config + // field that would be sent in the CertificateProvider message if the config was sent by the + // control plane). If not present, defaults to "default". + // + // Instance names should generally be defined not in terms of the underlying provider + // implementation (e.g., "file_watcher") but rather in terms of the function of the + // certificates (e.g., "foo_deployment_identity"). + string instance_name = 1; + + // Opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify + // a root-certificate (validation context) or "example.com" to specify a certificate for a + // particular domain. Not all provider instances will actually use this field, so the value + // defaults to the empty string. + string certificate_name = 2; + } + + message CombinedCertificateValidationContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.CommonTlsContext.CombinedCertificateValidationContext"; + + // How to validate peer certificates. + CertificateValidationContext default_validation_context = 1 + [(validate.rules).message = {required: true}]; + + // Config for fetching validation context via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, + // or validation_context_certificate_provider_instance may be used. + SdsSecretConfig validation_context_sds_secret_config = 2 [ + (validate.rules).message = {required: true}, + (udpa.annotations.field_migrate).oneof_promotion = "dynamic_validation_context" + ]; + + // Certificate provider for fetching validation context. + // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, + // or validation_context_certificate_provider_instance may be used. + // [#not-implemented-hide:] + CertificateProvider validation_context_certificate_provider = 3 + [(udpa.annotations.field_migrate).oneof_promotion = "dynamic_validation_context"]; + + // Certificate provider instance for fetching validation context. + // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, + // or validation_context_certificate_provider_instance may be used. + // [#not-implemented-hide:] + CertificateProviderInstance validation_context_certificate_provider_instance = 4 + [(udpa.annotations.field_migrate).oneof_promotion = "dynamic_validation_context"]; + } + + reserved 5; + + // TLS protocol versions, cipher suites etc. + TlsParameters tls_params = 1; + + // :ref:`Multiple TLS certificates ` can be associated with the + // same context to allow both RSA and ECDSA certificates. + // + // Only a single TLS certificate is supported in client contexts. In server contexts, the first + // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is + // used for clients that support ECDSA. + repeated TlsCertificate tls_certificates = 2; + + // Configs for fetching TLS certificates via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 + [(validate.rules).repeated = {max_items: 1}]; + + // Certificate provider for fetching TLS certificates. + // [#not-implemented-hide:] + CertificateProvider tls_certificate_certificate_provider = 9; + + // Certificate provider instance for fetching TLS certificates. + // [#not-implemented-hide:] + CertificateProviderInstance tls_certificate_certificate_provider_instance = 11; + + oneof validation_context_type { + // How to validate peer certificates. + CertificateValidationContext validation_context = 3; + + // Config for fetching validation context via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + SdsSecretConfig validation_context_sds_secret_config = 7; + + // Combined certificate validation context holds a default CertificateValidationContext + // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic + // and default CertificateValidationContext are merged into a new CertificateValidationContext + // for validation. This merge is done by Message::MergeFrom(), so dynamic + // CertificateValidationContext overwrites singular fields in default + // CertificateValidationContext, and concatenates repeated fields to default + // CertificateValidationContext, and logical OR is applied to boolean fields. + CombinedCertificateValidationContext combined_validation_context = 8; + + // Certificate provider for fetching validation context. + // [#not-implemented-hide:] + CertificateProvider validation_context_certificate_provider = 10; + + // Certificate provider instance for fetching validation context. + // [#not-implemented-hide:] + CertificateProviderInstance validation_context_certificate_provider_instance = 12; + } + + // Supplies the list of ALPN protocols that the listener should expose. In + // practice this is likely to be set to one of two values (see the + // :ref:`codec_type + // ` + // parameter in the HTTP connection manager for more information): + // + // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. + // * "http/1.1" If the listener is only going to support HTTP/1.1. + // + // There is no default for this parameter. If empty, Envoy will not expose ALPN. + repeated string alpn_protocols = 4; +} diff --git a/api/envoy/extensions/transport_sockets/tls/v4alpha/BUILD b/api/envoy/extensions/transport_sockets/tls/v4alpha/BUILD index e56544584bfe2..5471fdfbe0b1a 100644 --- a/api/envoy/extensions/transport_sockets/tls/v4alpha/BUILD +++ b/api/envoy/extensions/transport_sockets/tls/v4alpha/BUILD @@ -8,7 +8,8 @@ api_proto_package( deps = [ "//envoy/config/core/v4alpha:pkg", "//envoy/extensions/transport_sockets/tls/v3:pkg", - "//envoy/type/matcher/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/api/envoy/extensions/transport_sockets/tls/v4alpha/cert.proto b/api/envoy/extensions/transport_sockets/tls/v4alpha/common.proto similarity index 63% rename from api/envoy/extensions/transport_sockets/tls/v4alpha/cert.proto rename to api/envoy/extensions/transport_sockets/tls/v4alpha/common.proto index febb6d665240b..0b63ade128d3c 100644 --- a/api/envoy/extensions/transport_sockets/tls/v4alpha/cert.proto +++ b/api/envoy/extensions/transport_sockets/tls/v4alpha/common.proto @@ -3,11 +3,9 @@ syntax = "proto3"; package envoy.extensions.transport_sockets.tls.v4alpha; import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/config_source.proto"; -import "envoy/type/matcher/v3/string.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; @@ -17,7 +15,7 @@ import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha"; -option java_outer_classname = "CertProto"; +option java_outer_classname = "CommonProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; @@ -48,8 +46,8 @@ message TlsParameters { // servers. TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; - // Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and - // ``TLSv1_2`` for clients and for servers using :ref:`BoringSSL FIPS `. + // Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_3`` for + // servers. TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; // If specified, the TLS listener will only support the specified `cipher list @@ -300,7 +298,7 @@ message CertificateValidationContext { // Subject Alternative Name of the presented certificate matches one of the specified matches. // // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be - // configured with exact match type in the :ref:`string matcher `. + // configured with exact match type in the :ref:`string matcher `. // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", // it should be configured as shown below. // @@ -314,7 +312,7 @@ message CertificateValidationContext { // Subject Alternative Names are easily spoofable and verifying only them is insecure, // therefore this option must be used together with :ref:`trusted_ca // `. - repeated type.matcher.v3.StringMatcher match_subject_alt_names = 9; + repeated type.matcher.v4alpha.StringMatcher match_subject_alt_names = 9; // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. google.protobuf.BoolValue require_ocsp_staple = 5; @@ -336,183 +334,3 @@ message CertificateValidationContext { TrustChainVerification trust_chain_verification = 10 [(validate.rules).enum = {defined_only: true}]; } - -// TLS context shared by both client and server TLS contexts. -// [#next-free-field: 9] -message CommonTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext"; - - message CombinedCertificateValidationContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext." - "CombinedCertificateValidationContext"; - - // How to validate peer certificates. - CertificateValidationContext default_validation_context = 1 - [(validate.rules).message = {required: true}]; - - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 2 - [(validate.rules).message = {required: true}]; - } - - reserved 5; - - // TLS protocol versions, cipher suites etc. - TlsParameters tls_params = 1; - - // :ref:`Multiple TLS certificates ` can be associated with the - // same context to allow both RSA and ECDSA certificates. - // - // Only a single TLS certificate is supported in client contexts. In server contexts, the first - // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is - // used for clients that support ECDSA. - repeated TlsCertificate tls_certificates = 2; - - // Configs for fetching TLS certificates via SDS API. - repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 - [(validate.rules).repeated = {max_items: 1}]; - - oneof validation_context_type { - // How to validate peer certificates. - CertificateValidationContext validation_context = 3; - - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 7; - - // Combined certificate validation context holds a default CertificateValidationContext - // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic - // and default CertificateValidationContext are merged into a new CertificateValidationContext - // for validation. This merge is done by Message::MergeFrom(), so dynamic - // CertificateValidationContext overwrites singular fields in default - // CertificateValidationContext, and concatenates repeated fields to default - // CertificateValidationContext, and logical OR is applied to boolean fields. - CombinedCertificateValidationContext combined_validation_context = 8; - } - - // Supplies the list of ALPN protocols that the listener should expose. In - // practice this is likely to be set to one of two values (see the - // :ref:`codec_type - // ` - // parameter in the HTTP connection manager for more information): - // - // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. - // * "http/1.1" If the listener is only going to support HTTP/1.1. - // - // There is no default for this parameter. If empty, Envoy will not expose ALPN. - repeated string alpn_protocols = 4; -} - -message UpstreamTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext"; - - // Common TLS context settings. - // - // .. attention:: - // - // Server certificate verification is not enabled by default. Configure - // :ref:`trusted_ca` to enable - // verification. - CommonTlsContext common_tls_context = 1; - - // SNI string to use when creating TLS backend connections. - string sni = 2 [(validate.rules).string = {max_bytes: 255}]; - - // If true, server-initiated TLS renegotiation will be allowed. - // - // .. attention:: - // - // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. - bool allow_renegotiation = 3; - - // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets - // for TLSv1.2 and older) to store for the purpose of session resumption. - // - // Defaults to 1, setting this to 0 disables session resumption. - google.protobuf.UInt32Value max_session_keys = 4; -} - -// [#next-free-field: 8] -message DownstreamTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext"; - - // Common TLS context settings. - CommonTlsContext common_tls_context = 1; - - // If specified, Envoy will reject connections without a valid client - // certificate. - google.protobuf.BoolValue require_client_certificate = 2; - - // If specified, Envoy will reject connections without a valid and matching SNI. - // [#not-implemented-hide:] - google.protobuf.BoolValue require_sni = 3; - - oneof session_ticket_keys_type { - // TLS session ticket key settings. - TlsSessionTicketKeys session_ticket_keys = 4; - - // Config for fetching TLS session ticket keys via SDS API. - SdsSecretConfig session_ticket_keys_sds_secret_config = 5; - - // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS - // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. - // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using - // the keys specified through either :ref:`session_ticket_keys ` - // or :ref:`session_ticket_keys_sds_secret_config `. - // If this config is set to false and no keys are explicitly configured, the TLS server will issue - // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the - // implication that sessions cannot be resumed across hot restarts or on different hosts. - bool disable_stateless_session_resumption = 7; - } - - // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session - // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) - // ` - // only seconds could be specified (fractional seconds are going to be ignored). - google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { - lt {seconds: 4294967296} - gte {} - }]; -} - -message GenericSecret { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.GenericSecret"; - - // Secret of generic type and is available to filters. - config.core.v4alpha.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; -} - -message SdsSecretConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig"; - - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - // When both name and config are specified, then secret can be fetched and/or reloaded via - // SDS. When only name is specified, then secret will be loaded from static resources. - string name = 1; - - config.core.v4alpha.ConfigSource sds_config = 2; -} - -// [#next-free-field: 6] -message Secret { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.Secret"; - - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - string name = 1; - - oneof type { - TlsCertificate tls_certificate = 2; - - TlsSessionTicketKeys session_ticket_keys = 3; - - CertificateValidationContext validation_context = 4; - - GenericSecret generic_secret = 5; - } -} diff --git a/api/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto b/api/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto new file mode 100644 index 0000000000000..11306f21415a3 --- /dev/null +++ b/api/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto @@ -0,0 +1,65 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tls.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/extensions/transport_sockets/tls/v4alpha/common.proto"; + +import "udpa/core/v1/resource_locator.proto"; + +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha"; +option java_outer_classname = "SecretProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Secrets configuration] + +message GenericSecret { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.GenericSecret"; + + // Secret of generic type and is available to filters. + config.core.v4alpha.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; +} + +message SdsSecretConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig"; + + oneof name_specifier { + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + // When both name and config are specified, then secret can be fetched and/or reloaded via + // SDS. When only name is specified, then secret will be loaded from static resources. + string name = 1; + + // Resource locator for SDS. This is mutually exclusive to *name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator sds_resource_locator = 3; + } + + config.core.v4alpha.ConfigSource sds_config = 2; +} + +// [#next-free-field: 6] +message Secret { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.Secret"; + + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + string name = 1; + + oneof type { + TlsCertificate tls_certificate = 2; + + TlsSessionTicketKeys session_ticket_keys = 3; + + CertificateValidationContext validation_context = 4; + + GenericSecret generic_secret = 5; + } +} diff --git a/api/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto b/api/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto new file mode 100644 index 0000000000000..a73ba6e002ba2 --- /dev/null +++ b/api/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto @@ -0,0 +1,246 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tls.v4alpha; + +import "envoy/config/core/v4alpha/extension.proto"; +import "envoy/extensions/transport_sockets/tls/v4alpha/common.proto"; +import "envoy/extensions/transport_sockets/tls/v4alpha/secret.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha"; +option java_outer_classname = "TlsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: TLS transport socket] +// [#extension: envoy.transport_sockets.tls] +// The TLS contexts below provide the transport socket configuration for upstream/downstream TLS. + +message UpstreamTlsContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext"; + + // Common TLS context settings. + // + // .. attention:: + // + // Server certificate verification is not enabled by default. Configure + // :ref:`trusted_ca` to enable + // verification. + CommonTlsContext common_tls_context = 1; + + // SNI string to use when creating TLS backend connections. + string sni = 2 [(validate.rules).string = {max_bytes: 255}]; + + // If true, server-initiated TLS renegotiation will be allowed. + // + // .. attention:: + // + // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. + bool allow_renegotiation = 3; + + // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets + // for TLSv1.2 and older) to store for the purpose of session resumption. + // + // Defaults to 1, setting this to 0 disables session resumption. + google.protobuf.UInt32Value max_session_keys = 4; +} + +// [#next-free-field: 8] +message DownstreamTlsContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext"; + + // Common TLS context settings. + CommonTlsContext common_tls_context = 1; + + // If specified, Envoy will reject connections without a valid client + // certificate. + google.protobuf.BoolValue require_client_certificate = 2; + + // If specified, Envoy will reject connections without a valid and matching SNI. + // [#not-implemented-hide:] + google.protobuf.BoolValue require_sni = 3; + + oneof session_ticket_keys_type { + // TLS session ticket key settings. + TlsSessionTicketKeys session_ticket_keys = 4; + + // Config for fetching TLS session ticket keys via SDS API. + SdsSecretConfig session_ticket_keys_sds_secret_config = 5; + + // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS + // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. + // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using + // the keys specified through either :ref:`session_ticket_keys ` + // or :ref:`session_ticket_keys_sds_secret_config `. + // If this config is set to false and no keys are explicitly configured, the TLS server will issue + // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the + // implication that sessions cannot be resumed across hot restarts or on different hosts. + bool disable_stateless_session_resumption = 7; + } + + // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session + // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) + // ` + // only seconds could be specified (fractional seconds are going to be ignored). + google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { + lt {seconds: 4294967296} + gte {} + }]; +} + +// TLS context shared by both client and server TLS contexts. +// [#next-free-field: 13] +message CommonTlsContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext"; + + // Config for Certificate provider to get certificates. This provider should allow certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + message CertificateProvider { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider"; + + // opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify + // a root-certificate (validation context) or "TLS" to specify a new tls-certificate. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Provider specific config. + // Note: an implementation is expected to dedup multiple instances of the same config + // to maintain a single certificate-provider instance. The sharing can happen, for + // example, among multiple clusters or between the tls_certificate and validation_context + // certificate providers of a cluster. + // This config could be supplied inline or (in future) a named xDS resource. + oneof config { + option (validate.required) = true; + + config.core.v4alpha.TypedExtensionConfig typed_config = 2; + } + } + + // Similar to CertificateProvider above, but allows the provider instances to be configured on + // the client side instead of being sent from the control plane. + message CertificateProviderInstance { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance"; + + // Provider instance name. This name must be defined in the client's configuration (e.g., a + // bootstrap file) to correspond to a provider instance (i.e., the same data in the typed_config + // field that would be sent in the CertificateProvider message if the config was sent by the + // control plane). If not present, defaults to "default". + // + // Instance names should generally be defined not in terms of the underlying provider + // implementation (e.g., "file_watcher") but rather in terms of the function of the + // certificates (e.g., "foo_deployment_identity"). + string instance_name = 1; + + // Opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify + // a root-certificate (validation context) or "example.com" to specify a certificate for a + // particular domain. Not all provider instances will actually use this field, so the value + // defaults to the empty string. + string certificate_name = 2; + } + + message CombinedCertificateValidationContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext." + "CombinedCertificateValidationContext"; + + // How to validate peer certificates. + CertificateValidationContext default_validation_context = 1 + [(validate.rules).message = {required: true}]; + + oneof dynamic_validation_context { + // Config for fetching validation context via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, + // or validation_context_certificate_provider_instance may be used. + SdsSecretConfig validation_context_sds_secret_config = 2 + [(validate.rules).message = {required: true}]; + + // Certificate provider for fetching validation context. + // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, + // or validation_context_certificate_provider_instance may be used. + // [#not-implemented-hide:] + CertificateProvider validation_context_certificate_provider = 3; + + // Certificate provider instance for fetching validation context. + // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, + // or validation_context_certificate_provider_instance may be used. + // [#not-implemented-hide:] + CertificateProviderInstance validation_context_certificate_provider_instance = 4; + } + } + + reserved 5; + + // TLS protocol versions, cipher suites etc. + TlsParameters tls_params = 1; + + // :ref:`Multiple TLS certificates ` can be associated with the + // same context to allow both RSA and ECDSA certificates. + // + // Only a single TLS certificate is supported in client contexts. In server contexts, the first + // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is + // used for clients that support ECDSA. + repeated TlsCertificate tls_certificates = 2; + + // Configs for fetching TLS certificates via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 + [(validate.rules).repeated = {max_items: 1}]; + + // Certificate provider for fetching TLS certificates. + // [#not-implemented-hide:] + CertificateProvider tls_certificate_certificate_provider = 9; + + // Certificate provider instance for fetching TLS certificates. + // [#not-implemented-hide:] + CertificateProviderInstance tls_certificate_certificate_provider_instance = 11; + + oneof validation_context_type { + // How to validate peer certificates. + CertificateValidationContext validation_context = 3; + + // Config for fetching validation context via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + SdsSecretConfig validation_context_sds_secret_config = 7; + + // Combined certificate validation context holds a default CertificateValidationContext + // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic + // and default CertificateValidationContext are merged into a new CertificateValidationContext + // for validation. This merge is done by Message::MergeFrom(), so dynamic + // CertificateValidationContext overwrites singular fields in default + // CertificateValidationContext, and concatenates repeated fields to default + // CertificateValidationContext, and logical OR is applied to boolean fields. + CombinedCertificateValidationContext combined_validation_context = 8; + + // Certificate provider for fetching validation context. + // [#not-implemented-hide:] + CertificateProvider validation_context_certificate_provider = 10; + + // Certificate provider instance for fetching validation context. + // [#not-implemented-hide:] + CertificateProviderInstance validation_context_certificate_provider_instance = 12; + } + + // Supplies the list of ALPN protocols that the listener should expose. In + // practice this is likely to be set to one of two values (see the + // :ref:`codec_type + // ` + // parameter in the HTTP connection manager for more information): + // + // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. + // * "http/1.1" If the listener is only going to support HTTP/1.1. + // + // There is no default for this parameter. If empty, Envoy will not expose ALPN. + repeated string alpn_protocols = 4; +} diff --git a/api/envoy/extensions/upstreams/http/generic/v3/BUILD b/api/envoy/extensions/upstreams/http/generic/v3/BUILD new file mode 100644 index 0000000000000..ef3541ebcb1df --- /dev/null +++ b/api/envoy/extensions/upstreams/http/generic/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto b/api/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto new file mode 100644 index 0000000000000..c6b02364aa2d2 --- /dev/null +++ b/api/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; + +package envoy.extensions.upstreams.http.generic.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.generic.v3"; +option java_outer_classname = "GenericConnectionPoolProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Generic Connection Pool] + +// A connection pool which forwards downstream HTTP as TCP or HTTP to upstream, +// based on CONNECT configuration. +// [#extension: envoy.upstreams.http.generic] +message GenericConnectionPoolProto { +} diff --git a/api/envoy/extensions/upstreams/http/http/v3/BUILD b/api/envoy/extensions/upstreams/http/http/v3/BUILD new file mode 100644 index 0000000000000..ef3541ebcb1df --- /dev/null +++ b/api/envoy/extensions/upstreams/http/http/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto b/api/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto new file mode 100644 index 0000000000000..e4c2d6ff9b84f --- /dev/null +++ b/api/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package envoy.extensions.upstreams.http.http.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.http.v3"; +option java_outer_classname = "HttpConnectionPoolProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Http Connection Pool] + +// A connection pool which forwards downstream HTTP as HTTP to upstream. +// [#extension: envoy.upstreams.http.http] +message HttpConnectionPoolProto { +} diff --git a/api/envoy/extensions/upstreams/http/tcp/v3/BUILD b/api/envoy/extensions/upstreams/http/tcp/v3/BUILD new file mode 100644 index 0000000000000..ef3541ebcb1df --- /dev/null +++ b/api/envoy/extensions/upstreams/http/tcp/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto b/api/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto new file mode 100644 index 0000000000000..5bc8734cb3f79 --- /dev/null +++ b/api/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package envoy.extensions.upstreams.http.tcp.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.tcp.v3"; +option java_outer_classname = "TcpConnectionPoolProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Tcp Connection Pool] + +// A connection pool which forwards downstream HTTP as TCP to upstream, +// [#extension: envoy.upstreams.http.tcp] +message TcpConnectionPoolProto { +} diff --git a/api/envoy/extensions/wasm/v3/BUILD b/api/envoy/extensions/wasm/v3/BUILD index d29790ff5e75b..2c3dad6453b65 100644 --- a/api/envoy/extensions/wasm/v3/BUILD +++ b/api/envoy/extensions/wasm/v3/BUILD @@ -7,7 +7,6 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", - "//envoy/config/wasm/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/extensions/wasm/v3/wasm.proto b/api/envoy/extensions/wasm/v3/wasm.proto index 8cbaf20a39061..26f458214466e 100644 --- a/api/envoy/extensions/wasm/v3/wasm.proto +++ b/api/envoy/extensions/wasm/v3/wasm.proto @@ -15,14 +15,12 @@ option java_outer_classname = "WasmProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; -// [#protodoc-title: Wasm service] +// [#protodoc-title: Wasm] +// [[#not-implemented-hide:] // Configuration for a Wasm VM. -// [#next-free-field: 6] -// [#not-implemented-hide:] pending implementation. +// [#next-free-field: 7] message VmConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.wasm.v2alpha.VmConfig"; - // An ID which will be used along with a hash of the wasm code (or the name of the registered Null // VM plugin) to determine which VM will be used for the plugin. All plugins which use the same // *vm_id* and code will use the same VM. May be left blank. Sharing a VM between plugins can @@ -36,31 +34,36 @@ message VmConfig { // The Wasm code that Envoy will execute. config.core.v3.AsyncDataSource code = 3; - // The Wasm configuration used in initialization of a new VM (proxy_on_start). + // The Wasm configuration used in initialization of a new VM + // (proxy_on_start). `google.protobuf.Struct` is serialized as JSON before + // passing it to the plugin. `google.protobuf.BytesValue` and + // `google.protobuf.StringValue` are passed directly without the wrapper. google.protobuf.Any configuration = 4; // Allow the wasm file to include pre-compiled code on VMs which support it. // Warning: this should only be enable for trusted sources as the precompiled code is not // verified. bool allow_precompiled = 5; + + // If true and the code needs to be remotely fetched and it is not in the cache then NACK the configuration + // update and do a background fetch to fill the cache, otherwise fetch the code asynchronously and enter + // warming state. + bool nack_on_code_cache_miss = 6; } +// [[#not-implemented-hide:] // Base Configuration for Wasm Plugins e.g. filters and services. // [#next-free-field: 6] -// [#not-implemented-hide:] pending implementation. message PluginConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.wasm.v2alpha.PluginConfig"; - // A unique name for a filters/services in a VM for use in identifying the filter/service if - // multiple filters/services are handled by the same *vm_id* and *group_name* and for + // multiple filters/services are handled by the same *vm_id* and *root_id* and for // logging/debugging. string name = 1; // A unique ID for a set of filters/services in a VM which will share a RootContext and Contexts // if applicable (e.g. an Wasm HttpFilter and an Wasm AccessLog). If left blank, all - // filters/services with a blank group_name with the same *vm_id* will share Context(s). - string group_name = 2; + // filters/services with a blank root_id with the same *vm_id* will share Context(s). + string root_id = 2; // Configuration for finding or starting VM. oneof vm_config { @@ -70,17 +73,23 @@ message PluginConfig { // Filter/service configuration used to configure or reconfigure a plugin // (proxy_on_configuration). - google.protobuf.Any configuration = 5; + // `google.protobuf.Struct` is serialized as JSON before + // passing it to the plugin. `google.protobuf.BytesValue` and + // `google.protobuf.StringValue` are passed directly without the wrapper. + google.protobuf.Any configuration = 4; + + // If there is a fatal error on the VM (e.g. exception, abort(), on_start or on_configure return false), + // then all plugins associated with the VM will either fail closed (by default), e.g. by returning an HTTP 503 error, + // or fail open (if 'fail_open' is set to true) by bypassing the filter. Note: when on_start or on_configure return false + // during xDS updates the xDS configuration will be rejected and when on_start or on_configuration return false on initial + // startup the proxy will not start. + bool fail_open = 5; } -// WasmService is configured as a built-in *envoy.wasm_service* :ref:`ServiceConfig -// `. This opaque configuration will be used to -// create a Wasm Service. -// [#not-implemented-hide:] pending implementation. +// [[#not-implemented-hide:] +// WasmService is configured as a built-in *envoy.wasm_service* :ref:`WasmService +// ` This opaque configuration will be used to create a Wasm Service. message WasmService { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.wasm.v2alpha.WasmService"; - // General plugin configuration. PluginConfig config = 1; diff --git a/api/envoy/service/auth/v2alpha/BUILD b/api/envoy/service/auth/v2alpha/BUILD index 0bd31fdc6ff8f..c75dabe1a8a00 100644 --- a/api/envoy/service/auth/v2alpha/BUILD +++ b/api/envoy/service/auth/v2alpha/BUILD @@ -1,9 +1,9 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + licenses(["notice"]) # Apache 2 # DO NOT EDIT. This file is generated by tools/proto_sync.py. -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - api_proto_package( has_services = True, deps = ["//envoy/service/auth/v2:pkg"], diff --git a/api/envoy/service/auth/v3/external_auth.proto b/api/envoy/service/auth/v3/external_auth.proto index b93b61a3bde95..e2ee274fdfdbe 100644 --- a/api/envoy/service/auth/v3/external_auth.proto +++ b/api/envoy/service/auth/v3/external_auth.proto @@ -6,6 +6,7 @@ import "envoy/config/core/v3/base.proto"; import "envoy/service/auth/v3/attribute_context.proto"; import "envoy/type/v3/http_status.proto"; +import "google/protobuf/struct.proto"; import "google/rpc/status.proto"; import "udpa/annotations/status.proto"; @@ -57,7 +58,7 @@ message DeniedHttpResponse { string body = 3; } -// HTTP attributes for an ok response. +// HTTP attributes for an OK response. message OkHttpResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v2.OkHttpResponse"; @@ -69,6 +70,14 @@ message OkHttpResponse { // by Leaving `append` as false, the filter will either add a new header, or override an existing // one if there is a match. repeated config.core.v3.HeaderValueOption headers = 2; + + // Optional response metadata that will be emitted as dynamic metadata to be consumed by the next + // filter. This metadata lives in a namespace specified by the canonical name of extension filter + // that requires it: + // + // - :ref:`envoy.filters.http.ext_authz ` for HTTP filter. + // - :ref:`envoy.filters.network.ext_authz ` for network filter. + google.protobuf.Struct dynamic_metadata = 3; } // Intended for gRPC and Network Authorization servers `only`. diff --git a/api/envoy/service/discovery/v3/BUILD b/api/envoy/service/discovery/v3/BUILD index bfe0abc351dfa..d74aebc3424bc 100644 --- a/api/envoy/service/discovery/v3/BUILD +++ b/api/envoy/service/discovery/v3/BUILD @@ -11,5 +11,6 @@ api_proto_package( "//envoy/config/core/v3:pkg", "//envoy/service/discovery/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/api/envoy/service/discovery/v3/discovery.proto b/api/envoy/service/discovery/v3/discovery.proto index b8e31160a88b8..40479539213cf 100644 --- a/api/envoy/service/discovery/v3/discovery.proto +++ b/api/envoy/service/discovery/v3/discovery.proto @@ -7,6 +7,10 @@ import "envoy/config/core/v3/base.proto"; import "google/protobuf/any.proto"; import "google/rpc/status.proto"; +import "udpa/core/v1/resource_locator.proto"; +import "udpa/core/v1/resource_name.proto"; + +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -140,7 +144,7 @@ message DiscoveryResponse { // In particular, initial_resource_versions being sent at the "start" of every // gRPC stream actually entails a message for each type_url, each with its own // initial_resource_versions. -// [#next-free-field: 8] +// [#next-free-field: 10] message DeltaDiscoveryRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.DeltaDiscoveryRequest"; @@ -148,7 +152,9 @@ message DeltaDiscoveryRequest { config.core.v3.Node node = 1; // Type of the resource that is being requested, e.g. - // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". + // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This does not need to be set if + // resources are only referenced via *udpa_resource_subscribe* and + // *udpa_resources_unsubscribe*. string type_url = 2; // DeltaDiscoveryRequests allow the client to add or remove individual @@ -174,9 +180,22 @@ message DeltaDiscoveryRequest { // A list of Resource names to add to the list of tracked resources. repeated string resource_names_subscribe = 3; + // As with *resource_names_subscribe* but used when subscribing to resources indicated + // by a *udpa.core.v1.ResourceLocator*. The directives in the resource locator + // are ignored and the context parameters are matched with + // *context_param_specifier* specific semantics. + // [#not-implemented-hide:] + repeated udpa.core.v1.ResourceLocator udpa_resources_subscribe = 8; + // A list of Resource names to remove from the list of tracked resources. repeated string resource_names_unsubscribe = 4; + // As with *resource_names_unsubscribe* but used when unsubscribing to resources indicated by a + // *udpa.core.v1.ResourceLocator*. This must match a previously subscribed + // resource locator provided in *udpa_resources_subscribe*. + // [#not-implemented-hide:] + repeated udpa.core.v1.ResourceLocator udpa_resources_unsubscribe = 9; + // Informs the server of the versions of the resources the xDS client knows of, to enable the // client to continue the same logical xDS session even in the face of gRPC stream reconnection. // It will not be populated: [1] in the very first stream of a session, since the client will @@ -199,7 +218,7 @@ message DeltaDiscoveryRequest { google.rpc.Status error_detail = 7; } -// [#next-free-field: 7] +// [#next-free-field: 8] message DeltaDiscoveryResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.DeltaDiscoveryResponse"; @@ -215,22 +234,34 @@ message DeltaDiscoveryResponse { // Type URL for resources. Identifies the xDS API when muxing over ADS. // Must be consistent with the type_url in the Any within 'resources' if 'resources' is non-empty. + // This does not need to be set if *udpa_removed_resources* is used instead of + // *removed_resources*. string type_url = 4; // Resources names of resources that have be deleted and to be removed from the xDS Client. // Removed resources for missing resources can be ignored. repeated string removed_resources = 6; + // As with *removed_resources* but used when a removed resource was named in + // its *Resource*s with a *udpa.core.v1.ResourceName*. + // [#not-implemented-hide:] + repeated udpa.core.v1.ResourceName udpa_removed_resources = 7; + // The nonce provides a way for DeltaDiscoveryRequests to uniquely // reference a DeltaDiscoveryResponse when (N)ACKing. The nonce is required. string nonce = 5; } +// [#next-free-field: 6] message Resource { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Resource"; // The resource's name, to distinguish it from others of the same type of resource. - string name = 3; + string name = 3 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + + // Used instead of *name* when a resource with a *udpa.core.v1.ResourceName* is delivered. + udpa.core.v1.ResourceName udpa_resource_name = 5 + [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; // The aliases are a list of other names that this resource can go by. repeated string aliases = 4; diff --git a/api/envoy/service/extension/v3/BUILD b/api/envoy/service/extension/v3/BUILD new file mode 100644 index 0000000000000..6c68a071b8731 --- /dev/null +++ b/api/envoy/service/extension/v3/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + has_services = True, + deps = [ + "//envoy/annotations:pkg", + "//envoy/service/discovery/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/service/extension/v3/config_discovery.proto b/api/envoy/service/extension/v3/config_discovery.proto new file mode 100644 index 0000000000000..652355b707e3d --- /dev/null +++ b/api/envoy/service/extension/v3/config_discovery.proto @@ -0,0 +1,44 @@ +syntax = "proto3"; + +package envoy.service.extension.v3; + +import "envoy/service/discovery/v3/discovery.proto"; + +import "google/api/annotations.proto"; + +import "envoy/annotations/resource.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.service.extension.v3"; +option java_outer_classname = "ConfigDiscoveryProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: ExtensionConfigDS] + +// Return extension configurations. +service ExtensionConfigDiscoveryService { + option (envoy.annotations.resource).type = "envoy.config.core.v3.TypedExtensionConfig"; + + rpc StreamExtensionConfigs(stream discovery.v3.DiscoveryRequest) + returns (stream discovery.v3.DiscoveryResponse) { + } + + rpc DeltaExtensionConfigs(stream discovery.v3.DeltaDiscoveryRequest) + returns (stream discovery.v3.DeltaDiscoveryResponse) { + } + + rpc FetchExtensionConfigs(discovery.v3.DiscoveryRequest) + returns (discovery.v3.DiscoveryResponse) { + option (google.api.http).post = "/v3/discovery:extension_configs"; + option (google.api.http).body = "*"; + } +} + +// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue +// with importing services: https://github.com/google/protobuf/issues/4221 and +// protoxform to upgrade the file. +message EcdsDummy { +} diff --git a/api/envoy/service/health/v3/hds.proto b/api/envoy/service/health/v3/hds.proto index 0b09134709c82..484c0477ae466 100644 --- a/api/envoy/service/health/v3/hds.proto +++ b/api/envoy/service/health/v3/hds.proto @@ -9,6 +9,7 @@ import "envoy/config/endpoint/v3/endpoint_components.proto"; import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -108,11 +109,32 @@ message EndpointHealth { config.core.v3.HealthStatus health_status = 2; } +// Group endpoint health by locality under each cluster. +message LocalityEndpointsHealth { + config.core.v3.Locality locality = 1; + + repeated EndpointHealth endpoints_health = 2; +} + +// The health status of endpoints in a cluster. The cluster name and locality +// should match the corresponding fields in ClusterHealthCheck message. +message ClusterEndpointsHealth { + string cluster_name = 1; + + repeated LocalityEndpointsHealth locality_endpoints_health = 2; +} + message EndpointHealthResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.discovery.v2.EndpointHealthResponse"; + // [#comment:TODO(drewsortega): add deprecate annotation once cluster_endpoints_health is implemented] + // Deprecated - Flat list of endpoint health information. repeated EndpointHealth endpoints_health = 1; + + // [#not-implemented-hide:] + // Organize Endpoint health information by cluster. + repeated ClusterEndpointsHealth cluster_endpoints_health = 2; } message HealthCheckRequestOrEndpointHealthResponse { diff --git a/api/envoy/service/health/v4alpha/BUILD b/api/envoy/service/health/v4alpha/BUILD new file mode 100644 index 0000000000000..b7b2a13bd4958 --- /dev/null +++ b/api/envoy/service/health/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + has_services = True, + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/endpoint/v3:pkg", + "//envoy/service/health/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/service/health/v4alpha/hds.proto b/api/envoy/service/health/v4alpha/hds.proto new file mode 100644 index 0000000000000..957f058b9c576 --- /dev/null +++ b/api/envoy/service/health/v4alpha/hds.proto @@ -0,0 +1,187 @@ +syntax = "proto3"; + +package envoy.service.health.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/health_check.proto"; +import "envoy/config/endpoint/v3/endpoint_components.proto"; + +import "google/api/annotations.proto"; +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.service.health.v4alpha"; +option java_outer_classname = "HdsProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Health Discovery Service (HDS)] + +// HDS is Health Discovery Service. It compliments Envoy’s health checking +// service by designating this Envoy to be a healthchecker for a subset of hosts +// in the cluster. The status of these health checks will be reported to the +// management server, where it can be aggregated etc and redistributed back to +// Envoy through EDS. +service HealthDiscoveryService { + // 1. Envoy starts up and if its can_healthcheck option in the static + // bootstrap config is enabled, sends HealthCheckRequest to the management + // server. It supplies its capabilities (which protocol it can health check + // with, what zone it resides in, etc.). + // 2. In response to (1), the management server designates this Envoy as a + // healthchecker to health check a subset of all upstream hosts for a given + // cluster (for example upstream Host 1 and Host 2). It streams + // HealthCheckSpecifier messages with cluster related configuration for all + // clusters this Envoy is designated to health check. Subsequent + // HealthCheckSpecifier message will be sent on changes to: + // a. Endpoints to health checks + // b. Per cluster configuration change + // 3. Envoy creates a health probe based on the HealthCheck config and sends + // it to endpoint(ip:port) of Host 1 and 2. Based on the HealthCheck + // configuration Envoy waits upon the arrival of the probe response and + // looks at the content of the response to decide whether the endpoint is + // healthy or not. If a response hasn't been received within the timeout + // interval, the endpoint health status is considered TIMEOUT. + // 4. Envoy reports results back in an EndpointHealthResponse message. + // Envoy streams responses as often as the interval configured by the + // management server in HealthCheckSpecifier. + // 5. The management Server collects health statuses for all endpoints in the + // cluster (for all clusters) and uses this information to construct + // EndpointDiscoveryResponse messages. + // 6. Once Envoy has a list of upstream endpoints to send traffic to, it load + // balances traffic to them without additional health checking. It may + // use inline healthcheck (i.e. consider endpoint UNHEALTHY if connection + // failed to a particular endpoint to account for health status propagation + // delay between HDS and EDS). + // By default, can_healthcheck is true. If can_healthcheck is false, Cluster + // configuration may not contain HealthCheck message. + // TODO(htuch): How is can_healthcheck communicated to CDS to ensure the above + // invariant? + // TODO(htuch): Add @amb67's diagram. + rpc StreamHealthCheck(stream HealthCheckRequestOrEndpointHealthResponse) + returns (stream HealthCheckSpecifier) { + } + + // TODO(htuch): Unlike the gRPC version, there is no stream-based binding of + // request/response. Should we add an identifier to the HealthCheckSpecifier + // to bind with the response? + rpc FetchHealthCheck(HealthCheckRequestOrEndpointHealthResponse) returns (HealthCheckSpecifier) { + option (google.api.http).post = "/v3/discovery:health_check"; + option (google.api.http).body = "*"; + } +} + +// Defines supported protocols etc, so the management server can assign proper +// endpoints to healthcheck. +message Capability { + option (udpa.annotations.versioning).previous_message_type = "envoy.service.health.v3.Capability"; + + // Different Envoy instances may have different capabilities (e.g. Redis) + // and/or have ports enabled for different protocols. + enum Protocol { + HTTP = 0; + TCP = 1; + REDIS = 2; + } + + repeated Protocol health_check_protocols = 1; +} + +message HealthCheckRequest { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.HealthCheckRequest"; + + config.core.v4alpha.Node node = 1; + + Capability capability = 2; +} + +message EndpointHealth { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.EndpointHealth"; + + config.endpoint.v3.Endpoint endpoint = 1; + + config.core.v4alpha.HealthStatus health_status = 2; +} + +// Group endpoint health by locality under each cluster. +message LocalityEndpointsHealth { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.LocalityEndpointsHealth"; + + config.core.v4alpha.Locality locality = 1; + + repeated EndpointHealth endpoints_health = 2; +} + +// The health status of endpoints in a cluster. The cluster name and locality +// should match the corresponding fields in ClusterHealthCheck message. +message ClusterEndpointsHealth { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.ClusterEndpointsHealth"; + + string cluster_name = 1; + + repeated LocalityEndpointsHealth locality_endpoints_health = 2; +} + +message EndpointHealthResponse { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.EndpointHealthResponse"; + + // [#comment:TODO(drewsortega): add deprecate annotation once cluster_endpoints_health is implemented] + // Deprecated - Flat list of endpoint health information. + repeated EndpointHealth endpoints_health = 1; + + // [#not-implemented-hide:] + // Organize Endpoint health information by cluster. + repeated ClusterEndpointsHealth cluster_endpoints_health = 2; +} + +message HealthCheckRequestOrEndpointHealthResponse { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.HealthCheckRequestOrEndpointHealthResponse"; + + oneof request_type { + HealthCheckRequest health_check_request = 1; + + EndpointHealthResponse endpoint_health_response = 2; + } +} + +message LocalityEndpoints { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.LocalityEndpoints"; + + config.core.v4alpha.Locality locality = 1; + + repeated config.endpoint.v3.Endpoint endpoints = 2; +} + +// The cluster name and locality is provided to Envoy for the endpoints that it +// health checks to support statistics reporting, logging and debugging by the +// Envoy instance (outside of HDS). For maximum usefulness, it should match the +// same cluster structure as that provided by EDS. +message ClusterHealthCheck { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.ClusterHealthCheck"; + + string cluster_name = 1; + + repeated config.core.v4alpha.HealthCheck health_checks = 2; + + repeated LocalityEndpoints locality_endpoints = 3; +} + +message HealthCheckSpecifier { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.HealthCheckSpecifier"; + + repeated ClusterHealthCheck cluster_health_checks = 1; + + // The default is 1 second. + google.protobuf.Duration interval = 2; +} diff --git a/api/envoy/service/load_stats/v2/lrs.proto b/api/envoy/service/load_stats/v2/lrs.proto index a71039e7ceeb0..d8707bd62cb2a 100644 --- a/api/envoy/service/load_stats/v2/lrs.proto +++ b/api/envoy/service/load_stats/v2/lrs.proto @@ -66,7 +66,13 @@ message LoadStatsRequest { // [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. message LoadStatsResponse { // Clusters to report stats for. - repeated string clusters = 1 [(validate.rules).repeated = {min_items: 1}]; + // Not populated if *send_all_clusters* is true. + repeated string clusters = 1; + + // If true, the client should send all clusters it knows about. + // Only clients that advertise the "envoy.lrs.supports_send_all_clusters" capability in their + // :ref:`client_features` field will honor this field. + bool send_all_clusters = 4; // The minimum interval of time to collect stats over. This is only a minimum for two reasons: // 1. There may be some delay from when the timer fires until stats sampling occurs. diff --git a/api/envoy/service/load_stats/v3/lrs.proto b/api/envoy/service/load_stats/v3/lrs.proto index ce48574826a90..76705ba77771e 100644 --- a/api/envoy/service/load_stats/v3/lrs.proto +++ b/api/envoy/service/load_stats/v3/lrs.proto @@ -17,7 +17,15 @@ option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; -// [#protodoc-title: Load reporting service] +// [#protodoc-title: Load Reporting service (LRS)] + +// Load Reporting Service is an Envoy API to emit load reports. Envoy will initiate a bi-directional +// stream with a management server. Upon connecting, the management server can send a +// :ref:`LoadStatsResponse ` to a node it is +// interested in getting the load reports for. Envoy in this node will start sending +// :ref:`LoadStatsRequest `. This is done periodically +// based on the :ref:`load reporting interval ` +// For details, take a look at the :ref:`Load Reporting Service sandbox example `. service LoadReportingService { // Advanced API to allow for multi-dimensional load balancing by remote @@ -53,7 +61,6 @@ service LoadReportingService { } // A load report Envoy sends to the management server. -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. message LoadStatsRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.service.load_stats.v2.LoadStatsRequest"; @@ -67,15 +74,21 @@ message LoadStatsRequest { // The management server sends envoy a LoadStatsResponse with all clusters it // is interested in learning load stats about. -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. message LoadStatsResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.load_stats.v2.LoadStatsResponse"; // Clusters to report stats for. - repeated string clusters = 1 [(validate.rules).repeated = {min_items: 1}]; + // Not populated if *send_all_clusters* is true. + repeated string clusters = 1; + + // If true, the client should send all clusters it knows about. + // Only clients that advertise the "envoy.lrs.supports_send_all_clusters" capability in their + // :ref:`client_features` field will honor this field. + bool send_all_clusters = 4; // The minimum interval of time to collect stats over. This is only a minimum for two reasons: + // // 1. There may be some delay from when the timer fires until stats sampling occurs. // 2. For clusters that were already feature in the previous *LoadStatsResponse*, any traffic // that is observed in between the corresponding previous *LoadStatsRequest* and this diff --git a/api/envoy/service/ratelimit/v3/rls.proto b/api/envoy/service/ratelimit/v3/rls.proto index 4aad42fcaa813..42f24cfb0805c 100644 --- a/api/envoy/service/ratelimit/v3/rls.proto +++ b/api/envoy/service/ratelimit/v3/rls.proto @@ -5,6 +5,8 @@ package envoy.service.ratelimit.v3; import "envoy/config/core/v3/base.proto"; import "envoy/extensions/common/ratelimit/v3/ratelimit.proto"; +import "google/protobuf/duration.proto"; + import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -69,6 +71,8 @@ message RateLimitResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.ratelimit.v2.RateLimitResponse.RateLimit"; + // Identifies the unit of of time for rate limit. + // [#comment: replace by envoy/type/v3/ratelimit_unit.proto in v4] enum Unit { // The time unit is not known. UNKNOWN = 0; @@ -108,6 +112,9 @@ message RateLimitResponse { // The limit remaining in the current time unit. uint32 limit_remaining = 3; + + // Duration until reset of the current limit window. + google.protobuf.Duration duration_until_reset = 4; } // The overall response code which takes into account all of the descriptors that were passed diff --git a/api/envoy/service/status/v3/csds.proto b/api/envoy/service/status/v3/csds.proto index 3347def21d8f9..beccfb8cb58ee 100644 --- a/api/envoy/service/status/v3/csds.proto +++ b/api/envoy/service/status/v3/csds.proto @@ -64,7 +64,7 @@ message ClientStatusRequest { } // Detailed config (per xDS) with status. -// [#next-free-field: 6] +// [#next-free-field: 7] message PerXdsConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.service.status.v2.PerXdsConfig"; @@ -79,6 +79,9 @@ message PerXdsConfig { admin.v3.RoutesConfigDump route_config = 4; admin.v3.ScopedRoutesConfigDump scoped_route_config = 5; + + // [#not-implemented-hide:] + admin.v3.EndpointsConfigDump endpoint_config = 6; } } diff --git a/api/envoy/service/status/v4alpha/BUILD b/api/envoy/service/status/v4alpha/BUILD new file mode 100644 index 0000000000000..fb238648fbca1 --- /dev/null +++ b/api/envoy/service/status/v4alpha/BUILD @@ -0,0 +1,16 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + has_services = True, + deps = [ + "//envoy/admin/v4alpha:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/service/status/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/service/status/v4alpha/csds.proto b/api/envoy/service/status/v4alpha/csds.proto new file mode 100644 index 0000000000000..2286eb94a8a73 --- /dev/null +++ b/api/envoy/service/status/v4alpha/csds.proto @@ -0,0 +1,105 @@ +syntax = "proto3"; + +package envoy.service.status.v4alpha; + +import "envoy/admin/v4alpha/config_dump.proto"; +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/type/matcher/v4alpha/node.proto"; + +import "google/api/annotations.proto"; +import "google/protobuf/struct.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.service.status.v4alpha"; +option java_outer_classname = "CsdsProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Client Status Discovery Service (CSDS)] + +// CSDS is Client Status Discovery Service. It can be used to get the status of +// an xDS-compliant client from the management server's point of view. In the +// future, it can potentially be used as an interface to get the current +// state directly from the client. +service ClientStatusDiscoveryService { + rpc StreamClientStatus(stream ClientStatusRequest) returns (stream ClientStatusResponse) { + } + + rpc FetchClientStatus(ClientStatusRequest) returns (ClientStatusResponse) { + option (google.api.http).post = "/v3/discovery:client_status"; + option (google.api.http).body = "*"; + } +} + +// Status of a config. +enum ConfigStatus { + // Status info is not available/unknown. + UNKNOWN = 0; + + // Management server has sent the config to client and received ACK. + SYNCED = 1; + + // Config is not sent. + NOT_SENT = 2; + + // Management server has sent the config to client but hasn’t received + // ACK/NACK. + STALE = 3; + + // Management server has sent the config to client but received NACK. + ERROR = 4; +} + +// Request for client status of clients identified by a list of NodeMatchers. +message ClientStatusRequest { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.status.v3.ClientStatusRequest"; + + // Management server can use these match criteria to identify clients. + // The match follows OR semantics. + repeated type.matcher.v4alpha.NodeMatcher node_matchers = 1; +} + +// Detailed config (per xDS) with status. +// [#next-free-field: 7] +message PerXdsConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.status.v3.PerXdsConfig"; + + ConfigStatus status = 1; + + oneof per_xds_config { + admin.v4alpha.ListenersConfigDump listener_config = 2; + + admin.v4alpha.ClustersConfigDump cluster_config = 3; + + admin.v4alpha.RoutesConfigDump route_config = 4; + + admin.v4alpha.ScopedRoutesConfigDump scoped_route_config = 5; + + // [#not-implemented-hide:] + admin.v4alpha.EndpointsConfigDump endpoint_config = 6; + } +} + +// All xds configs for a particular client. +message ClientConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.status.v3.ClientConfig"; + + // Node for a particular client. + config.core.v4alpha.Node node = 1; + + repeated PerXdsConfig xds_config = 2; +} + +message ClientStatusResponse { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.status.v3.ClientStatusResponse"; + + // Client configs for the clients specified in the ClientStatusRequest. + repeated ClientConfig config = 1; +} diff --git a/api/envoy/service/tap/v4alpha/BUILD b/api/envoy/service/tap/v4alpha/BUILD new file mode 100644 index 0000000000000..5f75886cd068a --- /dev/null +++ b/api/envoy/service/tap/v4alpha/BUILD @@ -0,0 +1,17 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + has_services = True, + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/tap/v4alpha:pkg", + "//envoy/data/tap/v3:pkg", + "//envoy/service/discovery/v3:pkg", + "//envoy/service/tap/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/service/tap/v4alpha/tap.proto b/api/envoy/service/tap/v4alpha/tap.proto new file mode 100644 index 0000000000000..a1654d18bebbf --- /dev/null +++ b/api/envoy/service/tap/v4alpha/tap.proto @@ -0,0 +1,64 @@ +syntax = "proto3"; + +package envoy.service.tap.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/data/tap/v3/wrapper.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.service.tap.v4alpha"; +option java_outer_classname = "TapProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Tap Sink Service] + +// [#not-implemented-hide:] A tap service to receive incoming taps. Envoy will call +// StreamTaps to deliver captured taps to the server +service TapSinkService { + // Envoy will connect and send StreamTapsRequest messages forever. It does not expect any + // response to be sent as nothing would be done in the case of failure. The server should + // disconnect if it expects Envoy to reconnect. + rpc StreamTaps(stream StreamTapsRequest) returns (StreamTapsResponse) { + } +} + +// [#not-implemented-hide:] Stream message for the Tap API. Envoy will open a stream to the server +// and stream taps without ever expecting a response. +message StreamTapsRequest { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.tap.v3.StreamTapsRequest"; + + message Identifier { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.tap.v3.StreamTapsRequest.Identifier"; + + // The node sending taps over the stream. + config.core.v4alpha.Node node = 1 [(validate.rules).message = {required: true}]; + + // The opaque identifier that was set in the :ref:`output config + // `. + string tap_id = 2; + } + + // Identifier data effectively is a structured metadata. As a performance optimization this will + // only be sent in the first message on the stream. + Identifier identifier = 1; + + // The trace id. this can be used to merge together a streaming trace. Note that the trace_id + // is not guaranteed to be spatially or temporally unique. + uint64 trace_id = 2; + + // The trace data. + data.tap.v3.TraceWrapper trace = 3; +} + +// [#not-implemented-hide:] +message StreamTapsResponse { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.tap.v3.StreamTapsResponse"; +} diff --git a/api/envoy/service/tap/v4alpha/tapds.proto b/api/envoy/service/tap/v4alpha/tapds.proto new file mode 100644 index 0000000000000..855fde8c8e63f --- /dev/null +++ b/api/envoy/service/tap/v4alpha/tapds.proto @@ -0,0 +1,48 @@ +syntax = "proto3"; + +package envoy.service.tap.v4alpha; + +import "envoy/config/tap/v4alpha/common.proto"; +import "envoy/service/discovery/v3/discovery.proto"; + +import "google/api/annotations.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.service.tap.v4alpha"; +option java_outer_classname = "TapdsProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Tap discovery service] + +// [#not-implemented-hide:] Tap discovery service. +service TapDiscoveryService { + rpc StreamTapConfigs(stream discovery.v3.DiscoveryRequest) + returns (stream discovery.v3.DiscoveryResponse) { + } + + rpc DeltaTapConfigs(stream discovery.v3.DeltaDiscoveryRequest) + returns (stream discovery.v3.DeltaDiscoveryResponse) { + } + + rpc FetchTapConfigs(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { + option (google.api.http).post = "/v3/discovery:tap_configs"; + option (google.api.http).body = "*"; + } +} + +// [#not-implemented-hide:] A tap resource is essentially a tap configuration with a name +// The filter TapDS config references this name. +message TapResource { + option (udpa.annotations.versioning).previous_message_type = "envoy.service.tap.v3.TapResource"; + + // The name of the tap configuration. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Tap config to apply + config.tap.v4alpha.TapConfig config = 2; +} diff --git a/api/envoy/type/matcher/regex.proto b/api/envoy/type/matcher/regex.proto index 78b4a2c1d61e6..b23c0bff30750 100644 --- a/api/envoy/type/matcher/regex.proto +++ b/api/envoy/type/matcher/regex.proto @@ -19,12 +19,25 @@ message RegexMatcher { // Google's `RE2 `_ regex engine. The regex string must adhere to // the documented `syntax `_. The engine is designed // to complete execution in linear time as well as limit the amount of memory used. + // + // Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level` + // and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or + // complexity that a compiled regex can have before an exception is thrown or a warning is + // logged, respectively. `re2.max_program_size.error_level` defaults to 100, and + // `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning). + // + // Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`, + // which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented + // each time the program size exceeds the warn level threshold. message GoogleRE2 { // This field controls the RE2 "program size" which is a rough estimate of how complex a // compiled regex is to evaluate. A regex that has a program size greater than the configured // value will fail to compile. In this case, the configured max program size can be increased // or the regex can be simplified. If not specified, the default is 100. - google.protobuf.UInt32Value max_program_size = 1; + // + // This field is deprecated; regexp validation should be performed on the management server + // instead of being done by each individual client. + google.protobuf.UInt32Value max_program_size = 1 [deprecated = true]; } oneof engine_type { diff --git a/api/envoy/type/matcher/v3/regex.proto b/api/envoy/type/matcher/v3/regex.proto index 393274794abf4..6087c6f90fadf 100644 --- a/api/envoy/type/matcher/v3/regex.proto +++ b/api/envoy/type/matcher/v3/regex.proto @@ -22,6 +22,16 @@ message RegexMatcher { // Google's `RE2 `_ regex engine. The regex string must adhere to // the documented `syntax `_. The engine is designed // to complete execution in linear time as well as limit the amount of memory used. + // + // Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level` + // and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or + // complexity that a compiled regex can have before an exception is thrown or a warning is + // logged, respectively. `re2.max_program_size.error_level` defaults to 100, and + // `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning). + // + // Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`, + // which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented + // each time the program size exceeds the warn level threshold. message GoogleRE2 { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.RegexMatcher.GoogleRE2"; @@ -30,7 +40,10 @@ message RegexMatcher { // compiled regex is to evaluate. A regex that has a program size greater than the configured // value will fail to compile. In this case, the configured max program size can be increased // or the regex can be simplified. If not specified, the default is 100. - google.protobuf.UInt32Value max_program_size = 1; + // + // This field is deprecated; regexp validation should be performed on the management server + // instead of being done by each individual client. + google.protobuf.UInt32Value max_program_size = 1 [deprecated = true]; } oneof engine_type { @@ -59,7 +72,7 @@ message RegexMatchAndSubstitute { // so as to replace just one occurrence of a pattern. Capture groups can be // used in the pattern to extract portions of the subject string, and then // referenced in the substitution string. - RegexMatcher pattern = 1; + RegexMatcher pattern = 1 [(validate.rules).message = {required: true}]; // The string that should be substituted into matching portions of the // subject string during a substitution operation to produce a new string. diff --git a/api/envoy/type/matcher/v4alpha/BUILD b/api/envoy/type/matcher/v4alpha/BUILD new file mode 100644 index 0000000000000..e63f52b2baa50 --- /dev/null +++ b/api/envoy/type/matcher/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/annotations:pkg", + "//envoy/type/matcher/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/type/matcher/v4alpha/metadata.proto b/api/envoy/type/matcher/v4alpha/metadata.proto new file mode 100644 index 0000000000000..8abe14e7b6673 --- /dev/null +++ b/api/envoy/type/matcher/v4alpha/metadata.proto @@ -0,0 +1,105 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "envoy/type/matcher/v4alpha/value.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "MetadataProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Metadata matcher] + +// MetadataMatcher provides a general interface to check if a given value is matched in +// :ref:`Metadata `. It uses `filter` and `path` to retrieve the value +// from the Metadata and then check if it's matched to the specified value. +// +// For example, for the following Metadata: +// +// .. code-block:: yaml +// +// filter_metadata: +// envoy.filters.http.rbac: +// fields: +// a: +// struct_value: +// fields: +// b: +// struct_value: +// fields: +// c: +// string_value: pro +// t: +// list_value: +// values: +// - string_value: m +// - string_value: n +// +// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" +// from the Metadata which is matched to the specified prefix match. +// +// .. code-block:: yaml +// +// filter: envoy.filters.http.rbac +// path: +// - key: a +// - key: b +// - key: c +// value: +// string_match: +// prefix: pr +// +// The following MetadataMatcher is matched as the code will match one of the string values in the +// list at the path [a, t]. +// +// .. code-block:: yaml +// +// filter: envoy.filters.http.rbac +// path: +// - key: a +// - key: t +// value: +// list_match: +// one_of: +// string_match: +// exact: m +// +// An example use of MetadataMatcher is specifying additional metadata in envoy.filters.http.rbac to +// enforce access control based on dynamic metadata in a request. See :ref:`Permission +// ` and :ref:`Principal +// `. + +// [#next-major-version: MetadataMatcher should use StructMatcher] +message MetadataMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.MetadataMatcher"; + + // Specifies the segment in a path to retrieve value from Metadata. + // Note: Currently it's not supported to retrieve a value from a list in Metadata. This means that + // if the segment key refers to a list, it has to be the last segment in a path. + message PathSegment { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.MetadataMatcher.PathSegment"; + + oneof segment { + option (validate.required) = true; + + // If specified, use the key to retrieve the value in a Struct. + string key = 1 [(validate.rules).string = {min_bytes: 1}]; + } + } + + // The filter name to retrieve the Struct from the Metadata. + string filter = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The path to retrieve the Value from the Struct. + repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; + + // The MetadataMatcher is matched if the value retrieved by path is matched to this value. + ValueMatcher value = 3 [(validate.rules).message = {required: true}]; +} diff --git a/api/envoy/type/matcher/v4alpha/node.proto b/api/envoy/type/matcher/v4alpha/node.proto new file mode 100644 index 0000000000000..a74bf808f05ae --- /dev/null +++ b/api/envoy/type/matcher/v4alpha/node.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "envoy/type/matcher/v4alpha/string.proto"; +import "envoy/type/matcher/v4alpha/struct.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "NodeProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Node matcher] + +// Specifies the way to match a Node. +// The match follows AND semantics. +message NodeMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.NodeMatcher"; + + // Specifies match criteria on the node id. + StringMatcher node_id = 1; + + // Specifies match criteria on the node metadata. + repeated StructMatcher node_metadatas = 2; +} diff --git a/api/envoy/type/matcher/v4alpha/number.proto b/api/envoy/type/matcher/v4alpha/number.proto new file mode 100644 index 0000000000000..b168af19ab50c --- /dev/null +++ b/api/envoy/type/matcher/v4alpha/number.proto @@ -0,0 +1,33 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "envoy/type/v3/range.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "NumberProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Number matcher] + +// Specifies the way to match a double value. +message DoubleMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.DoubleMatcher"; + + oneof match_pattern { + option (validate.required) = true; + + // If specified, the input double value must be in the range specified here. + // Note: The range is using half-open interval semantics [start, end). + v3.DoubleRange range = 1; + + // If specified, the input double value must be equal to the value specified here. + double exact = 2; + } +} diff --git a/api/envoy/type/matcher/v4alpha/path.proto b/api/envoy/type/matcher/v4alpha/path.proto new file mode 100644 index 0000000000000..9150939bf2eed --- /dev/null +++ b/api/envoy/type/matcher/v4alpha/path.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "envoy/type/matcher/v4alpha/string.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "PathProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Path matcher] + +// Specifies the way to match a path on HTTP request. +message PathMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.PathMatcher"; + + oneof rule { + option (validate.required) = true; + + // The `path` must match the URL path portion of the :path header. The query and fragment + // string (if present) are removed in the URL path portion. + // For example, the path */data* will match the *:path* header */data#fragment?param=value*. + StringMatcher path = 1 [(validate.rules).message = {required: true}]; + } +} diff --git a/api/envoy/type/matcher/v4alpha/regex.proto b/api/envoy/type/matcher/v4alpha/regex.proto new file mode 100644 index 0000000000000..087c5e3f72920 --- /dev/null +++ b/api/envoy/type/matcher/v4alpha/regex.proto @@ -0,0 +1,82 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "RegexProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Regex matcher] + +// A regex matcher designed for safety when used with untrusted input. +message RegexMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.RegexMatcher"; + + // Google's `RE2 `_ regex engine. The regex string must adhere to + // the documented `syntax `_. The engine is designed + // to complete execution in linear time as well as limit the amount of memory used. + // + // Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level` + // and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or + // complexity that a compiled regex can have before an exception is thrown or a warning is + // logged, respectively. `re2.max_program_size.error_level` defaults to 100, and + // `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning). + // + // Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`, + // which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented + // each time the program size exceeds the warn level threshold. + message GoogleRE2 { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.RegexMatcher.GoogleRE2"; + + reserved 1; + + reserved "max_program_size"; + } + + oneof engine_type { + option (validate.required) = true; + + // Google's RE2 regex engine. + GoogleRE2 google_re2 = 1 [(validate.rules).message = {required: true}]; + } + + // The regex match string. The string must be supported by the configured engine. + string regex = 2 [(validate.rules).string = {min_bytes: 1}]; +} + +// Describes how to match a string and then produce a new string using a regular +// expression and a substitution string. +message RegexMatchAndSubstitute { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.RegexMatchAndSubstitute"; + + // The regular expression used to find portions of a string (hereafter called + // the "subject string") that should be replaced. When a new string is + // produced during the substitution operation, the new string is initially + // the same as the subject string, but then all matches in the subject string + // are replaced by the substitution string. If replacing all matches isn't + // desired, regular expression anchors can be used to ensure a single match, + // so as to replace just one occurrence of a pattern. Capture groups can be + // used in the pattern to extract portions of the subject string, and then + // referenced in the substitution string. + RegexMatcher pattern = 1 [(validate.rules).message = {required: true}]; + + // The string that should be substituted into matching portions of the + // subject string during a substitution operation to produce a new string. + // Capture groups in the pattern can be referenced in the substitution + // string. Note, however, that the syntax for referring to capture groups is + // defined by the chosen regular expression engine. Google's `RE2 + // `_ regular expression engine uses a + // backslash followed by the capture group number to denote a numbered + // capture group. E.g., ``\1`` refers to capture group 1, and ``\2`` refers + // to capture group 2. + string substitution = 2; +} diff --git a/api/envoy/type/matcher/v4alpha/string.proto b/api/envoy/type/matcher/v4alpha/string.proto new file mode 100644 index 0000000000000..8ce0b12f9e2a7 --- /dev/null +++ b/api/envoy/type/matcher/v4alpha/string.proto @@ -0,0 +1,71 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "envoy/type/matcher/v4alpha/regex.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "StringProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: String matcher] + +// Specifies the way to match a string. +// [#next-free-field: 7] +message StringMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.StringMatcher"; + + reserved 4; + + reserved "regex"; + + oneof match_pattern { + option (validate.required) = true; + + // The input string must match exactly the string specified here. + // + // Examples: + // + // * *abc* only matches the value *abc*. + string exact = 1; + + // The input string must have the prefix specified here. + // Note: empty prefix is not allowed, please use regex instead. + // + // Examples: + // + // * *abc* matches the value *abc.xyz* + string prefix = 2 [(validate.rules).string = {min_bytes: 1}]; + + // The input string must have the suffix specified here. + // Note: empty prefix is not allowed, please use regex instead. + // + // Examples: + // + // * *abc* matches the value *xyz.abc* + string suffix = 3 [(validate.rules).string = {min_bytes: 1}]; + + // The input string must match the regular expression specified here. + RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}]; + } + + // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no + // effect for the safe_regex match. + // For example, the matcher *data* will match both input string *Data* and *data* if set to true. + bool ignore_case = 6; +} + +// Specifies a list of ways to match a string. +message ListStringMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.ListStringMatcher"; + + repeated StringMatcher patterns = 1 [(validate.rules).repeated = {min_items: 1}]; +} diff --git a/api/envoy/type/matcher/v4alpha/struct.proto b/api/envoy/type/matcher/v4alpha/struct.proto new file mode 100644 index 0000000000000..643cc5a475708 --- /dev/null +++ b/api/envoy/type/matcher/v4alpha/struct.proto @@ -0,0 +1,91 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "envoy/type/matcher/v4alpha/value.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "StructProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Struct matcher] + +// StructMatcher provides a general interface to check if a given value is matched in +// google.protobuf.Struct. It uses `path` to retrieve the value +// from the struct and then check if it's matched to the specified value. +// +// For example, for the following Struct: +// +// .. code-block:: yaml +// +// fields: +// a: +// struct_value: +// fields: +// b: +// struct_value: +// fields: +// c: +// string_value: pro +// t: +// list_value: +// values: +// - string_value: m +// - string_value: n +// +// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" +// from the Metadata which is matched to the specified prefix match. +// +// .. code-block:: yaml +// +// path: +// - key: a +// - key: b +// - key: c +// value: +// string_match: +// prefix: pr +// +// The following StructMatcher is matched as the code will match one of the string values in the +// list at the path [a, t]. +// +// .. code-block:: yaml +// +// path: +// - key: a +// - key: t +// value: +// list_match: +// one_of: +// string_match: +// exact: m +// +// An example use of StructMatcher is to match metadata in envoy.v*.core.Node. +message StructMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.StructMatcher"; + + // Specifies the segment in a path to retrieve value from Struct. + message PathSegment { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.StructMatcher.PathSegment"; + + oneof segment { + option (validate.required) = true; + + // If specified, use the key to retrieve the value in a Struct. + string key = 1 [(validate.rules).string = {min_bytes: 1}]; + } + } + + // The path to retrieve the Value from the Struct. + repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; + + // The StructMatcher is matched if the value retrieved by path is matched to this value. + ValueMatcher value = 3 [(validate.rules).message = {required: true}]; +} diff --git a/api/envoy/type/matcher/v4alpha/value.proto b/api/envoy/type/matcher/v4alpha/value.proto new file mode 100644 index 0000000000000..6e509d4601099 --- /dev/null +++ b/api/envoy/type/matcher/v4alpha/value.proto @@ -0,0 +1,71 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "envoy/type/matcher/v4alpha/number.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "ValueProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Value matcher] + +// Specifies the way to match a ProtobufWkt::Value. Primitive values and ListValue are supported. +// StructValue is not supported and is always not matched. +// [#next-free-field: 7] +message ValueMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.ValueMatcher"; + + // NullMatch is an empty message to specify a null value. + message NullMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.ValueMatcher.NullMatch"; + } + + // Specifies how to match a value. + oneof match_pattern { + option (validate.required) = true; + + // If specified, a match occurs if and only if the target value is a NullValue. + NullMatch null_match = 1; + + // If specified, a match occurs if and only if the target value is a double value and is + // matched to this field. + DoubleMatcher double_match = 2; + + // If specified, a match occurs if and only if the target value is a string value and is + // matched to this field. + StringMatcher string_match = 3; + + // If specified, a match occurs if and only if the target value is a bool value and is equal + // to this field. + bool bool_match = 4; + + // If specified, value match will be performed based on whether the path is referring to a + // valid primitive value in the metadata. If the path is referring to a non-primitive value, + // the result is always not matched. + bool present_match = 5; + + // If specified, a match occurs if and only if the target value is a list value and + // is matched to this field. + ListMatcher list_match = 6; + } +} + +// Specifies the way to match a list value. +message ListMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.ListMatcher"; + + oneof match_pattern { + option (validate.required) = true; + + // If specified, at least one of the values in the list must match the value specified. + ValueMatcher one_of = 1; + } +} diff --git a/api/envoy/type/v3/ratelimit_unit.proto b/api/envoy/type/v3/ratelimit_unit.proto new file mode 100644 index 0000000000000..a3fb27ff47ba0 --- /dev/null +++ b/api/envoy/type/v3/ratelimit_unit.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package envoy.type.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.type.v3"; +option java_outer_classname = "RatelimitUnitProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Ratelimit Time Unit] + +// Identifies the unit of of time for rate limit. +enum RateLimitUnit { + // The time unit is not known. + UNKNOWN = 0; + + // The time unit representing a second. + SECOND = 1; + + // The time unit representing a minute. + MINUTE = 2; + + // The time unit representing an hour. + HOUR = 3; + + // The time unit representing a day. + DAY = 4; +} diff --git a/api/test/build/BUILD b/api/test/build/BUILD index 59f0a36410118..2dae9fa0de03f 100644 --- a/api/test/build/BUILD +++ b/api/test/build/BUILD @@ -1,7 +1,7 @@ -licenses(["notice"]) # Apache 2 - load("@envoy_api//bazel:api_build_system.bzl", "api_cc_test", "api_go_test") +licenses(["notice"]) # Apache 2 + api_cc_test( name = "build_test", srcs = ["build_test.cc"], diff --git a/api/test/validate/BUILD b/api/test/validate/BUILD index 4398672c27aff..c9a7ba701f979 100644 --- a/api/test/validate/BUILD +++ b/api/test/validate/BUILD @@ -1,7 +1,7 @@ -licenses(["notice"]) # Apache 2 - load("@envoy_api//bazel:api_build_system.bzl", "api_cc_test") +licenses(["notice"]) # Apache 2 + api_cc_test( name = "pgv_test", srcs = ["pgv_test.cc"], diff --git a/api/tools/BUILD b/api/tools/BUILD index 8d2207b940703..2273a9b9dd0b6 100644 --- a/api/tools/BUILD +++ b/api/tools/BUILD @@ -1,3 +1,5 @@ +load("@rules_python//python:defs.bzl", "py_binary", "py_test") + licenses(["notice"]) # Apache 2 py_binary( diff --git a/api/versioning/BUILD b/api/versioning/BUILD index 0ffaf85a1cdd0..e0a67d2f3cb19 100644 --- a/api/versioning/BUILD +++ b/api/versioning/BUILD @@ -1,9 +1,9 @@ # DO NOT EDIT. This file is generated by tools/proto_format/active_protos_gen.py. -licenses(["notice"]) # Apache 2 - load("@rules_proto//proto:defs.bzl", "proto_library") +licenses(["notice"]) # Apache 2 + # This tracks active development versions of protos. proto_library( name = "active_protos", @@ -13,10 +13,10 @@ proto_library( "//envoy/config/accesslog/v3:pkg", "//envoy/config/bootstrap/v3:pkg", "//envoy/config/cluster/v3:pkg", + "//envoy/config/common/matcher/v3:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/endpoint/v3:pkg", "//envoy/config/filter/thrift/router/v2alpha1:pkg", - "//envoy/config/filter/udp/udp_proxy/v2alpha:pkg", "//envoy/config/grpc_credential/v3:pkg", "//envoy/config/health_checker/redis/v2:pkg", "//envoy/config/listener/v3:pkg", @@ -38,15 +38,18 @@ proto_library( "//envoy/data/tap/v3:pkg", "//envoy/extensions/access_loggers/file/v3:pkg", "//envoy/extensions/access_loggers/grpc/v3:pkg", + "//envoy/extensions/access_loggers/wasm/v3:pkg", "//envoy/extensions/clusters/aggregate/v3:pkg", "//envoy/extensions/clusters/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/clusters/redis/v3:pkg", "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/common/ratelimit/v3:pkg", "//envoy/extensions/common/tap/v3:pkg", - "//envoy/extensions/filter/udp/dns_filter/v3alpha:pkg", + "//envoy/extensions/compression/gzip/compressor/v3:pkg", + "//envoy/extensions/compression/gzip/decompressor/v3:pkg", "//envoy/extensions/filters/common/fault/v3:pkg", "//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg", + "//envoy/extensions/filters/http/admission_control/v3alpha:pkg", "//envoy/extensions/filters/http/aws_lambda/v3:pkg", "//envoy/extensions/filters/http/aws_request_signing/v3:pkg", "//envoy/extensions/filters/http/buffer/v3:pkg", @@ -54,6 +57,7 @@ proto_library( "//envoy/extensions/filters/http/compressor/v3:pkg", "//envoy/extensions/filters/http/cors/v3:pkg", "//envoy/extensions/filters/http/csrf/v3:pkg", + "//envoy/extensions/filters/http/decompressor/v3:pkg", "//envoy/extensions/filters/http/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/filters/http/dynamo/v3:pkg", "//envoy/extensions/filters/http/ext_authz/v3:pkg", @@ -76,6 +80,7 @@ proto_library( "//envoy/extensions/filters/http/router/v3:pkg", "//envoy/extensions/filters/http/squash/v3:pkg", "//envoy/extensions/filters/http/tap/v3:pkg", + "//envoy/extensions/filters/http/wasm/v3:pkg", "//envoy/extensions/filters/listener/http_inspector/v3:pkg", "//envoy/extensions/filters/listener/original_dst/v3:pkg", "//envoy/extensions/filters/listener/original_src/v3:pkg", @@ -96,18 +101,31 @@ proto_library( "//envoy/extensions/filters/network/ratelimit/v3:pkg", "//envoy/extensions/filters/network/rbac/v3:pkg", "//envoy/extensions/filters/network/redis_proxy/v3:pkg", + "//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg", "//envoy/extensions/filters/network/sni_cluster/v3:pkg", "//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg", "//envoy/extensions/filters/network/tcp_proxy/v3:pkg", "//envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3:pkg", "//envoy/extensions/filters/network/thrift_proxy/v3:pkg", + "//envoy/extensions/filters/network/wasm/v3:pkg", "//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg", + "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg", + "//envoy/extensions/filters/udp/udp_proxy/v3:pkg", + "//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg", + "//envoy/extensions/internal_redirect/previous_routes/v3:pkg", + "//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg", + "//envoy/extensions/network/socket_interface/v3:pkg", "//envoy/extensions/retry/host/omit_host_metadata/v3:pkg", "//envoy/extensions/retry/priority/previous_priorities/v3:pkg", "//envoy/extensions/transport_sockets/alts/v3:pkg", + "//envoy/extensions/transport_sockets/proxy_protocol/v3:pkg", + "//envoy/extensions/transport_sockets/quic/v3:pkg", "//envoy/extensions/transport_sockets/raw_buffer/v3:pkg", "//envoy/extensions/transport_sockets/tap/v3:pkg", "//envoy/extensions/transport_sockets/tls/v3:pkg", + "//envoy/extensions/upstreams/http/generic/v3:pkg", + "//envoy/extensions/upstreams/http/http/v3:pkg", + "//envoy/extensions/upstreams/http/tcp/v3:pkg", "//envoy/extensions/wasm/v3:pkg", "//envoy/service/accesslog/v3:pkg", "//envoy/service/auth/v3:pkg", @@ -115,6 +133,7 @@ proto_library( "//envoy/service/discovery/v3:pkg", "//envoy/service/endpoint/v3:pkg", "//envoy/service/event_reporting/v3:pkg", + "//envoy/service/extension/v3:pkg", "//envoy/service/health/v3:pkg", "//envoy/service/listener/v3:pkg", "//envoy/service/load_stats/v3:pkg", @@ -210,7 +229,7 @@ proto_library( "//envoy/config/filter/network/thrift_proxy/v2alpha1:pkg", "//envoy/config/filter/network/zookeeper_proxy/v1alpha1:pkg", "//envoy/config/filter/thrift/rate_limit/v2alpha1:pkg", - "//envoy/config/filter/udp/dns_filter/v2alpha:pkg", + "//envoy/config/filter/udp/udp_proxy/v2alpha:pkg", "//envoy/config/grpc_credential/v2alpha:pkg", "//envoy/config/listener/v2:pkg", "//envoy/config/metrics/v2:pkg", @@ -224,7 +243,6 @@ proto_library( "//envoy/config/transport_socket/alts/v2alpha:pkg", "//envoy/config/transport_socket/raw_buffer/v2:pkg", "//envoy/config/transport_socket/tap/v2alpha:pkg", - "//envoy/config/wasm/v2alpha:pkg", "//envoy/data/accesslog/v2:pkg", "//envoy/data/cluster/v2alpha:pkg", "//envoy/data/core/v2alpha:pkg", diff --git a/api/xds_protocol.rst b/api/xds_protocol.rst index 6b580ad3d4467..1b254ad7f1f34 100644 --- a/api/xds_protocol.rst +++ b/api/xds_protocol.rst @@ -77,7 +77,7 @@ API flow For typical HTTP routing scenarios, the core resource types for the client's configuration are `Listener`, `RouteConfiguration`, `Cluster`, and `ClusterLoadAssignment`. Each `Listener` resource may point to a `RouteConfiguration` resource, which may point to one or more `Cluster` resources, -and each Cluster` resource may point to a `ClusterLoadAssignment` resource. +and each `Cluster` resource may point to a `ClusterLoadAssignment` resource. Envoy fetches all `Listener` and `Cluster` resources at startup. It then fetches whatever `RouteConfiguration` and `ClusterLoadAssignment` resources that are required by the `Listener` and diff --git a/bazel/BUILD b/bazel/BUILD index 8c2b74a4428a1..ee7ad281d8090 100644 --- a/bazel/BUILD +++ b/bazel/BUILD @@ -1,13 +1,11 @@ load("@rules_cc//cc:defs.bzl", "cc_library", "cc_proto_library") +load("//bazel:envoy_build_system.bzl", "envoy_package") +load("//bazel:envoy_internal.bzl", "envoy_select_force_libcpp") licenses(["notice"]) # Apache 2 -load("//bazel:envoy_build_system.bzl", "envoy_package") - envoy_package() -load("//bazel:envoy_internal.bzl", "envoy_select_force_libcpp") - exports_files([ "gen_sh_test_runner.sh", "sh_test_wrapper.sh", @@ -43,12 +41,6 @@ genrule( stamp = 1, ) -sh_binary( - name = "fuzzit_wrapper", - srcs = ["fuzzit_wrapper.sh"], - data = ["@fuzzit_linux//:fuzzit"], -) - # A target to optionally link C++ standard library dynamically in sanitizer runs. # TSAN doesn't support libc/libstdc++ static linking per doc: # http://releases.llvm.org/8.0.1/tools/clang/docs/ThreadSanitizer.html @@ -107,6 +99,11 @@ config_setting( values = {"compilation_mode": "dbg"}, ) +config_setting( + name = "no_debug_info", + values = {"define": "no_debug_info=1"}, +) + config_setting( name = "asan_build", values = {"define": "ENVOY_CONFIG_ASAN=1"}, @@ -127,11 +124,6 @@ config_setting( values = {"define": "ENVOY_CONFIG_COVERAGE=1"}, ) -config_setting( - name = "compdb_build", - values = {"define": "ENVOY_CONFIG_COMPILATION_DATABASE=1"}, -) - config_setting( name = "clang_build", flag_values = { @@ -202,6 +194,11 @@ config_setting( values = {"define": "path_normalization_by_default=true"}, ) +config_setting( + name = "enable_new_codecs_in_integration_tests", + values = {"define": "use_new_codecs_in_integration_tests=true"}, +) + cc_proto_library( name = "grpc_health_proto", deps = ["@com_github_grpc_grpc//src/proto/grpc/health/v1:_health_proto_only"], @@ -344,6 +341,19 @@ config_setting( values = {"define": "logger=android"}, ) +config_setting( + name = "libfuzzer_coverage", + define_values = { + "FUZZING_ENGINE": "libfuzzer", + "ENVOY_CONFIG_COVERAGE": "1", + }, +) + +config_setting( + name = "libfuzzer", + values = {"define": "FUZZING_ENGINE=libfuzzer"}, +) + alias( name = "apple", actual = select( diff --git a/bazel/EXTERNAL_DEPS.md b/bazel/EXTERNAL_DEPS.md index 7793129376aaa..4f66ef80eac8b 100644 --- a/bazel/EXTERNAL_DEPS.md +++ b/bazel/EXTERNAL_DEPS.md @@ -6,7 +6,9 @@ values can change when Github change their tar/gzip libraries breaking builds. Maintainer provided tarballs are more stable and the maintainer can provide the SHA256. -# Adding external dependencies to Envoy (native Bazel) +# Adding external dependencies to Envoy (C++) + +## Native Bazel This is the preferred style of adding dependencies that use Bazel for their build process. @@ -17,19 +19,20 @@ build process. `external_deps` attribute. 3. `bazel test //test/...` -# Adding external dependencies to Envoy (external CMake) +## External CMake (preferred) This is the preferred style of adding dependencies that use CMake for their build system. 1. Define a the source Bazel repository in [`bazel/repositories.bzl`](repositories.bzl), in the `envoy_dependencies()` function. -2. Add a `cmake_external` rule to [`bazel/foreign_cc/BUILD`](bazel/foreign_cc/BUILD). This will - reference the source repository in step 1. +2. Add a `cmake_external` rule to [`bazel/foreign_cc/BUILD`](foreign_cc/BUILD). This will reference + the source repository in step 1. 3. Reference your new external dependency in some `envoy_cc_library` via the name bound in step 1 `external_deps` attribute. 4. `bazel test //test/...` -# Adding external dependencies to Envoy (genrule repository) + +## genrule repository This is the newer style of adding dependencies with no upstream Bazel configs. It wraps the dependency's native build tooling in a Bazel-aware shell script, @@ -54,6 +57,24 @@ Dependencies between external libraries can use the standard Bazel dependency resolution logic, using the `$(location)` shell extension to resolve paths to binaries, libraries, headers, etc. +# Adding external dependencies to Envoy (Python) + +Python dependencies should be added via `pip3` and `rules_python`. The process +is: + +1. Define a `pip3_import()` pointing at your target `requirements.txt` in + [`bazel/repositories_extra.bzl`](repositories_extra.bzl) + +2. Add a `pip_install()` invocation in + [`bazel/dependency_imports.bzl`](dependency_imports.bzl). + +3. Add a `requirements("> user.bazelrc ``` + Note: Either `libc++` or `libstdc++-7-dev` (or higher) must be installed. These are typically + available via a package manager, but may not be available in default repositories depending on + OS version. To build against `libc++` build with the `--config=libc++` instead of the + `--config=clang` flag. + + ### macOS On macOS, you'll need to install several dependencies. This can be accomplished via [Homebrew](https://brew.sh/): ``` brew install coreutils wget cmake libtool go bazel automake ninja clang-format autoconf aspell ``` _notes_: `coreutils` is used for `realpath`, `gmd5sum` and `gsha256sum` - Xcode is also required to build Envoy on macOS. + The full version of Xcode (not just Command Line Tools) is also required to build Envoy on macOS. Envoy compiles and passes tests with the version of clang installed by Xcode 11.1: Apple clang version 11.0.0 (clang-1100.0.33.8). @@ -97,36 +111,79 @@ for how to update or override dependencies. version of `ar` on the PATH, so if you run into issues building third party code like luajit consider uninstalling binutils. - On Windows, additional dependencies are required: + ### Windows + On Windows, you'll need to install several dependencies manually. - Install the [MSYS2 shell](https://msys2.github.io/) and install the `diffutils`, `patch`, - `unzip`, and `zip` packages using `pacman`. Set the `BAZEL_SH` environment variable to the path - of the installed MSYS2 `bash.exe` executable. Setting the `MSYS2_ARG_CONV_EXCL` environment - variable to a value of `*` is often advisable to ensure argument parsing in the MSYS2 shell - behaves as expected. - - `Git` is required. The version installable via MSYS2 is sufficient. + [python3](https://www.python.org/downloads/): Specifically, the Windows-native flavor. The POSIX flavor + available via MSYS2 will not work, nor will the Windows Store flavor. You need to add a symlink for `python3.exe` pointing to + the installed `python.exe` for Bazel rules which follow POSIX conventions. Be sure to add + `pip.exe` to the PATH and install the `wheel` package. + ``` + mklink %USERPROFILE%\Python38\python3.exe %USERPROFILE%\Python38\python.exe + set PATH=%PATH%;%USERPROFILE%\Python38 + set PATH=%PATH%;%USERPROFILE%\Python38\Scripts + pip install wheel + ``` - Install the Windows-native [python3](https://www.python.org/downloads/), the POSIX flavor - available via MSYS2 will not work. - - For building with MSVC (the `msvc-cl` config option), you must install at least the VC++ - workload from the - [Build Tools for Visual Studio 2019](https://visualstudio.microsoft.com/downloads/#build-tools-for-visual-studio-2019). + [Build Tools for Visual Studio 2019](https://visualstudio.microsoft.com/downloads/#build-tools-for-visual-studio-2019): + For building with MSVC (the `msvc-cl` config option), you must install at least the VC++ workload. You may also download Visual Studio 2019 and use the Build Tools packaged with that installation. Earlier versions of VC++ Build Tools/Visual Studio are not recommended at this time. If installed in a non-standard filesystem location, be sure to set the `BAZEL_VC` environment variable to the path of the VC++ package to allow Bazel to find your installation of VC++. Use caution to ensure the `link.exe` that resolves on your PATH is from VC++ Build Tools and not MSYS2. + ``` + set BAZEL_VC=%USERPROFILE%\VSBT2019\VC + set PATH=%PATH%;%USERPROFILE%\VSBT2019\VC\Tools\MSVC\14.26.28801\bin\Hostx64\x64 + ``` Ensure `CMake` and `ninja` binaries are on the PATH. The versions packaged with VC++ Build Tools are sufficient. + ``` + set PATH=%PATH%;%USERPROFILE%\VSBT2019\Common7\IDE\CommonExtensions\Microsoft\CMake\CMake\bin + set PATH=%PATH%;%USERPROFILE%\VSBT2019\Common7\IDE\CommonExtensions\Microsoft\CMake\Ninja + ``` + [MSYS2 shell](https://msys2.github.io/): Set the `BAZEL_SH` environment variable to the path + of the installed MSYS2 `bash.exe` executable. Additionally, setting the `MSYS2_ARG_CONV_EXCL` environment + variable to a value of `*` is often advisable to ensure argument parsing in the MSYS2 shell + behaves as expected. + ``` + set PATH=%PATH%;%USERPROFILE%\msys64\usr\bin + set BAZEL_SH=%USERPROFILE%\msys64\usr\bin\bash.exe + set MSYS2_ARG_CONV_EXCL=* + ``` In addition, because of the behavior of the `rules_foreign_cc` component of Bazel, set the `TMPDIR` environment variable to a path usable as a temporary directory (e.g. `C:\Windows\TEMP`). This variable is used frequently by `mktemp` from MSYS2 in the Envoy Bazel - build and can cause problems if not set to a value outside the MSYS2 filesystem. + build and can cause problems if not set to a value outside the MSYS2 filesystem. Note that + using the `ci/windows_ci_steps.sh` script (to build and run tests) will create a directory + symlink linking `C:\c` to `C:\` in order to enable build scripts run via MSYS2 to access + dependencies in the temporary directory specified above. If you are not using that script, you + will need to create that symlink manually. + ``` + set TMPDIR=C:\Windows\TEMP + mklink /d C:\c C:\ + ``` + In the MSYS2 shell, install additional packages via pacman: + ``` + pacman -S diffutils patch unzip zip + ``` + + [Git](https://git-scm.com/downloads): The version installable via MSYS2 is also sufficient. + ``` + set PATH=%PATH%;%USERPROFILE%\Git\bin + ``` + + Lastly, persist environment variable changes. + ``` + setx PATH "%PATH%" + setx BAZEL_SH "%BAZEL_SH%" + setx MSYS2_ARG_CONV_EXCL "%MSYS2_ARG_CONV_EXCL%" + setx BAZEL_VC "%BAZEL_VC%" + setx TMPDIR "%TMPDIR%" + ``` 1. Install Golang on your machine. This is required as part of building [BoringSSL](https://boringssl.googlesource.com/boringssl/+/HEAD/BUILDING.md) and also for [Buildifer](https://github.com/bazelbuild/buildtools) which is used for formatting bazel BUILD files. @@ -138,12 +195,20 @@ for how to update or override dependencies. ## Building Envoy with the CI Docker image -Envoy can also be built with the Docker image used for CI, by installing Docker and executing: +Envoy can also be built with the Docker image used for CI, by installing Docker and executing the following. + +On Linux, run: ``` ./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.dev' ``` +On Windows: + +``` +./ci/run_envoy_docker_windows.sh './ci/windows_ci_steps.sh' +``` + See also the [documentation](https://github.com/envoyproxy/envoy/tree/master/ci) for developer use of the CI Docker image. @@ -207,7 +272,7 @@ for more details. ## Supported compiler versions We now require Clang >= 5.0 due to known issues with std::string thread safety and C++14 support. GCC >= 7 is also -known to work. Currently the CI is running with Clang 9. +known to work. Currently the CI is running with Clang 10. ## Clang STL debug symbols @@ -215,6 +280,16 @@ By default Clang drops some debug symbols that are required for pretty printing More information can be found [here](https://bugs.llvm.org/show_bug.cgi?id=24202). The easy solution is to set ```--copt=-fno-limit-debug-info``` on the CLI or in your .bazelrc file. +## Removing debug info + +If you don't want your debug or release binaries to contain debug info +to reduce binary size, pass `--define=no_debug_info=1` when building. +This is primarily useful when building envoy as a static library. When +building a linked envoy binary you can build the implicit `.stripped` +target from [`cc_binary`](https://docs.bazel.build/versions/master/be/c-cpp.html#cc_binary) +or pass [`--strip=always`](https://docs.bazel.build/versions/master/command-line-reference.html#flag--strip) +instead. + # Testing Envoy with Bazel All the Envoy tests can be built and run with: @@ -448,6 +523,14 @@ bazel test -c dbg --config=macos-asan //test/... Log verbosity is controlled at runtime in all builds. +To obtain `nghttp2` traces, you can set `ENVOY_NGHTTP2_TRACE` in the environment for enhanced +logging at `-l trace`. For example, in tests: + +``` +bazel test //test/integration:protocol_integration_test --test_output=streamed \ + --test_arg="-l trace" --test_env="ENVOY_NGHTTP2_TRACE=" +``` + ## Disabling optional features The following optional features can be disabled on the Bazel build command-line: @@ -470,7 +553,7 @@ The following optional features can be enabled on the Bazel build command-line: * Perf annotation with `--define perf_annotation=enabled` (see source/common/common/perf_annotation.h for details). * BoringSSL can be built in a FIPS-compliant mode with `--define boringssl=fips` - (see [FIPS 140-2](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/ssl.html#fips-140-2) for details). + (see [FIPS 140-2](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/security/ssl#fips-140-2) for details). * ASSERT() can be configured to log failures and increment a stat counter in a release build with `--define log_debug_assert_in_release=enabled`. The default behavior is to compile debug assertions out of release builds so that the condition is not evaluated. This option has no effect in debug builds. @@ -528,6 +611,17 @@ local_repository( ... ``` +## Extra extensions + +If you are building your own Envoy extensions or custom Envoy builds and encounter visibility +problems with, you may need to adjust the default visibility rules. +By default, Envoy extensions are set up to only be visible to code within the +[//source/extensions](../source/extensions/), or the Envoy server target. To adjust this, +add any additional targets you need to `ADDITIONAL_VISIBILITY` in +[extensions_build_config.bzl](../source/extensions/extensions_build_config.bzl). +See the instructions above about how to create your own custom version of +[extensions_build_config.bzl](../source/extensions/extensions_build_config.bzl). + # Release builds Release builds should be built in `opt` mode, processed with `strip` and have a @@ -555,21 +649,27 @@ test/run_envoy_bazel_coverage.sh The summary results are printed to the standard output and the full coverage report is available in `generated/coverage/coverage.html`. +To generate coverage results for fuzz targets, use the `FUZZ_COVERAGE` environment variable, e.g.: +``` +FUZZ_COVERAGE=true VALIDATE_COVERAGE=false test/run_envoy_bazel_coverage.sh +``` +This generates a coverage report for fuzz targets after running the target for one minute against fuzzing engine libfuzzer using its coprus as initial seed inputs. The full coverage report will be available in `generated/fuzz_coverage/coverage.html`. + Coverage for every PR is available in Circle in the "artifacts" tab of the coverage job. You will need to navigate down and open "coverage.html" but then you can navigate per normal. NOTE: We have seen some issues with seeing the artifacts tab. If you can't see it, log out of Circle, and then log back in and it should start working. The latest coverage report for master is available -[here](https://storage.googleapis.com/envoy-coverage/report-master/index.html). +[here](https://storage.googleapis.com/envoy-postsubmit/master/coverage/index.html). The latest fuzz coverage report for master is available [here](https://storage.googleapis.com/envoy-postsubmit/master/fuzz_coverage/index.html). It's also possible to specialize the coverage build to a specified test or test dir. This is useful when doing things like exploring the coverage of a fuzzer over its corpus. This can be done by passing coverage targets as the command-line arguments and using the `VALIDATE_COVERAGE` environment -variable, e.g.: +variable, e.g. for a fuzz test: ``` -VALIDATE_COVERAGE=false test/run_envoy_bazel_coverage.sh //test/common/common:base64_fuzz_test +FUZZ_COVERAGE=true VALIDATE_COVERAGE=false test/run_envoy_bazel_coverage.sh //test/common/common:base64_fuzz_test ``` # Cleaning the build and test artifacts @@ -635,7 +735,7 @@ For example, you can use [You Complete Me](https://valloric.github.io/YouComplet For example, use following command to prepare a compilation database: ``` -TEST_TMPDIR=/tmp tools/gen_compilation_database.py --run_bazel_build +TEST_TMPDIR=/tmp tools/gen_compilation_database.py ``` @@ -655,7 +755,7 @@ also have 'buildifier' installed from the bazel distribution. Edit the paths shown here to reflect the installation locations on your system: ```shell -export CLANG_FORMAT="$HOME/ext/clang+llvm-9.0.0-x86_64-linux-gnu-ubuntu-16.04/bin/clang-format" +export CLANG_FORMAT="$HOME/ext/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04/bin/clang-format" export BUILDIFIER_BIN="/usr/bin/buildifier" ``` diff --git a/bazel/antlr.patch b/bazel/antlr.patch new file mode 100644 index 0000000000000..ad0efbc8642e2 --- /dev/null +++ b/bazel/antlr.patch @@ -0,0 +1,26 @@ +diff --git a/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp b/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp +index c6cceda13..e86533759 100755 +--- a/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp ++++ b/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp +@@ -104,7 +104,7 @@ void deserializeSets( + + } + +-ATNDeserializer::ATNDeserializer(): ATNDeserializer(ATNDeserializationOptions::getDefaultOptions()) { ++ATNDeserializer::ATNDeserializer(): ATNDeserializer(ATNDeserializationOptions()) { + } + + ATNDeserializer::ATNDeserializer(const ATNDeserializationOptions& dso): deserializationOptions(dso) { +diff --git a/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp b/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp +index 827c3d59f..62914cf55 100755 +--- a/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp ++++ b/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp +@@ -69,7 +69,7 @@ void LexerATNSimulator::copyState(LexerATNSimulator *simulator) { + } + + size_t LexerATNSimulator::match(CharStream *input, size_t mode) { +- match_calls++; ++ // match_calls++; + _mode = mode; + ssize_t mark = input->mark(); + diff --git a/bazel/coverage/BUILD b/bazel/coverage/BUILD new file mode 100644 index 0000000000000..9aa87d0869687 --- /dev/null +++ b/bazel/coverage/BUILD @@ -0,0 +1,9 @@ +licenses(["notice"]) # Apache 2 + +# TODO(lizan): Add test for this and upstream to upstream Bazel. +filegroup( + name = "coverage_support", + srcs = ["collect_cc_coverage.sh"], +) + +exports_files(["fuzz_coverage_wrapper.sh"]) diff --git a/bazel/coverage/collect_cc_coverage.sh b/bazel/coverage/collect_cc_coverage.sh new file mode 100755 index 0000000000000..53926e5cb6aff --- /dev/null +++ b/bazel/coverage/collect_cc_coverage.sh @@ -0,0 +1,175 @@ +#!/bin/bash -x +# +# This is a fork of https://github.com/bazelbuild/bazel/blob/3.1.0/tools/test/collect_cc_coverage.sh +# to cover most of use cases in Envoy. +# TODO(lizan): Move this to upstream Bazel +# +# Copyright 2016 The Bazel Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script collects code coverage data for C++ sources, after the tests +# were executed. +# +# Bazel C++ code coverage collection support is poor and limited. There is +# an ongoing effort to improve this (tracking issue #1118). +# +# Bazel uses the lcov tool for gathering coverage data. There is also +# an experimental support for clang llvm coverage, which uses the .profraw +# data files to compute the coverage report. +# +# This script assumes the following environment variables are set: +# - COVERAGE_DIR Directory containing metadata files needed for +# coverage collection (e.g. gcda files, profraw). +# - COVERAGE_MANIFEST Location of the instrumented file manifest. +# - COVERAGE_GCOV_PATH Location of gcov. This is set by the TestRunner. +# - COVERAGE_GCOV_OPTIONS Additional options to pass to gcov. +# - ROOT Location from where the code coverage collection +# was invoked. +# +# The script looks in $COVERAGE_DIR for the C++ metadata coverage files (either +# gcda or profraw) and uses either lcov or gcov to get the coverage data. +# The coverage data is placed in $COVERAGE_OUTPUT_FILE. + +# Checks if clang llvm coverage should be used instead of lcov. +function uses_llvm() { + if stat "${COVERAGE_DIR}"/*.profraw >/dev/null 2>&1; then + return 0 + fi + return 1 +} + +# Returns 0 if gcov must be used, 1 otherwise. +function uses_gcov() { + [[ "$GCOV_COVERAGE" -eq "1" ]] && return 0 + return 1 +} + +function init_gcov() { + # Symlink the gcov tool such with a link called gcov. Clang comes with a tool + # called llvm-cov, which behaves like gcov if symlinked in this way (otherwise + # we would need to invoke it with "llvm-cov gcov"). + # For more details see https://llvm.org/docs/CommandGuide/llvm-cov.html. + GCOV="${COVERAGE_DIR}/gcov" + ln -s "${COVERAGE_GCOV_PATH}" "${GCOV}" +} + +# Computes code coverage data using the clang generated metadata found under +# $COVERAGE_DIR. +# Writes the collected coverage into the given output file. +function llvm_coverage() { + local output_file="${1}"; shift + export LLVM_PROFILE_FILE="${COVERAGE_DIR}/%h-%p-%m.profraw" + "${COVERAGE_GCOV_PATH}" merge -output "${output_file}.data" \ + "${COVERAGE_DIR}"/*.profraw + + + local object_files="$(find -L "${RUNFILES_DIR}" -type f -exec file -L {} \; \ + | grep ELF | grep -v "LSB core" | sed 's,:.*,,')" + + local object_param="" + for object_file in ${object_files}; do + object_param+=" -object ${object_file}" + done + + llvm-cov export -instr-profile "${output_file}.data" -format=lcov \ + -ignore-filename-regex='.*external/.+' \ + -ignore-filename-regex='/tmp/.+' \ + ${object_param} | sed 's#/proc/self/cwd/##' > "${output_file}" +} + +# Generates a code coverage report in gcov intermediate text format by invoking +# gcov and using the profile data (.gcda) and notes (.gcno) files. +# +# The profile data files are expected to be found under $COVERAGE_DIR. +# The notes file are expected to be found under $ROOT. +# +# - output_file The location of the file where the generated code coverage +# report is written. +function gcov_coverage() { + local output_file="${1}"; shift + + # We'll save the standard output of each the gcov command in this log. + local gcov_log="$output_file.gcov.log" + + # Copy .gcno files next to their corresponding .gcda files in $COVERAGE_DIR + # because gcov expects them to be in the same directory. + while read -r line; do + if [[ ${line: -4} == "gcno" ]]; then + gcno_path=${line} + local gcda="${COVERAGE_DIR}/$(dirname ${gcno_path})/$(basename ${gcno_path} .gcno).gcda" + # If the gcda file was not found we skip generating coverage from the gcno + # file. + if [[ -f "$gcda" ]]; then + # gcov expects both gcno and gcda files to be in the same directory. + # We overcome this by copying the gcno to $COVERAGE_DIR where the gcda + # files are expected to be. + if [ ! -f "${COVERAGE_DIR}/${gcno_path}" ]; then + mkdir -p "${COVERAGE_DIR}/$(dirname ${gcno_path})" + cp "$ROOT/${gcno_path}" "${COVERAGE_DIR}/${gcno_path}" + fi + # Invoke gcov to generate a code coverage report with the flags: + # -i Output gcov file in an intermediate text format. + # The output is a single .gcov file per .gcda file. + # No source code is required. + # -o directory The directory containing the .gcno and + # .gcda data files. + # "${gcda"} The input file name. gcov is looking for data files + # named after the input filename without its extension. + # gcov produces files called .gcov in the current + # directory. These contain the coverage information of the source file + # they correspond to. One .gcov file is produced for each source + # (or header) file containing code which was compiled to produce the + # .gcda files. + # Don't generate branch coverage (-b) because of a gcov issue that + # segfaults when both -i and -b are used (see + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84879). + "${GCOV}" -i $COVERAGE_GCOV_OPTIONS -o "$(dirname ${gcda})" "${gcda}" + + # Append all .gcov files in the current directory to the output file. + cat *.gcov >> "$output_file" + # Delete the .gcov files. + rm *.gcov + fi + fi + done < "${COVERAGE_MANIFEST}" +} + +function main() { + init_gcov + + # If llvm code coverage is used, we output the raw code coverage report in + # the $COVERAGE_OUTPUT_FILE. This report will not be converted to any other + # format by LcovMerger. + # TODO(#5881): Convert profdata reports to lcov. + if uses_llvm; then + BAZEL_CC_COVERAGE_TOOL="PROFDATA" + fi + + # When using either gcov or lcov, have an output file specific to the test + # and format used. For lcov we generate a ".dat" output file and for gcov + # a ".gcov" output file. It is important that these files are generated under + # COVERAGE_DIR. + # When this script is invoked by tools/test/collect_coverage.sh either of + # these two coverage reports will be picked up by LcovMerger and their + # content will be converted and/or merged with other reports to an lcov + # format, generating the final code coverage report. + case "$BAZEL_CC_COVERAGE_TOOL" in + ("GCOV") gcov_coverage "$COVERAGE_DIR/_cc_coverage.gcov" ;; + ("PROFDATA") llvm_coverage "$COVERAGE_DIR/_cc_coverage.dat" ;; + (*) echo "Coverage tool $BAZEL_CC_COVERAGE_TOOL not supported" \ + && exit 1 + esac +} + +main diff --git a/bazel/coverage/fuzz_coverage_wrapper.sh b/bazel/coverage/fuzz_coverage_wrapper.sh new file mode 100755 index 0000000000000..0510befd60bc4 --- /dev/null +++ b/bazel/coverage/fuzz_coverage_wrapper.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +set -x + +TEST_BINARY=$1 +shift + +# Clear existing corpus if previous run wasn't in sandbox +rm -rf fuzz_corpus + +mkdir -p fuzz_corpus/seed_corpus +cp -r $@ fuzz_corpus/seed_corpus + +# TODO(asraa): When fuzz targets are stable, remove error suppression and run coverage while fuzzing. +LLVM_PROFILE_FILE= ${TEST_BINARY} fuzz_corpus -seed=${FUZZ_CORPUS_SEED:-1} -max_total_time=${FUZZ_CORPUS_TIME:-60} -max_len=2048 || true + +${TEST_BINARY} fuzz_corpus -runs=0 diff --git a/bazel/dependency_imports.bzl b/bazel/dependency_imports.bzl index 051923e315a4a..56aa348f4be00 100644 --- a/bazel/dependency_imports.bzl +++ b/bazel/dependency_imports.bzl @@ -1,12 +1,16 @@ load("@rules_foreign_cc//:workspace_definitions.bzl", "rules_foreign_cc_dependencies") load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies") load("@envoy_build_tools//toolchains:rbe_toolchains_config.bzl", "rbe_toolchains_config") +load("@bazel_toolchains//rules/exec_properties:exec_properties.bzl", "create_rbe_exec_properties_dict", "custom_exec_properties") load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository") load("@build_bazel_rules_apple//apple:repositories.bzl", "apple_rules_dependencies") load("@upb//bazel:repository_defs.bzl", upb_bazel_version_repository = "bazel_version_repository") +load("@config_validation_pip3//:requirements.bzl", config_validation_pip_install = "pip_install") +load("@protodoc_pip3//:requirements.bzl", protodoc_pip_install = "pip_install") +load("@rules_antlr//antlr:deps.bzl", "antlr_dependencies") # go version for rules_go -GO_VERSION = "1.13.5" +GO_VERSION = "1.14.7" def envoy_dependency_imports(go_version = GO_VERSION): rules_foreign_cc_dependencies() @@ -16,13 +20,21 @@ def envoy_dependency_imports(go_version = GO_VERSION): gazelle_dependencies() apple_rules_dependencies() upb_bazel_version_repository(name = "upb_bazel_version") + antlr_dependencies(471) + + custom_exec_properties( + name = "envoy_large_machine_exec_property", + constants = { + "LARGE_MACHINE": create_rbe_exec_properties_dict(labels = dict(size = "large")), + }, + ) go_repository( name = "org_golang_google_grpc", build_file_proto_mode = "disable", importpath = "google.golang.org/grpc", - sum = "h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A=", - version = "v1.23.0", + sum = "h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=", + version = "v1.29.1", ) go_repository( @@ -38,3 +50,6 @@ def envoy_dependency_imports(go_version = GO_VERSION): sum = "h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=", version = "v0.3.0", ) + + config_validation_pip_install() + protodoc_pip_install() diff --git a/bazel/envoy_binary.bzl b/bazel/envoy_binary.bzl index ac15656af1d88..16adfb38a4396 100644 --- a/bazel/envoy_binary.bzl +++ b/bazel/envoy_binary.bzl @@ -1,3 +1,5 @@ +load("@rules_cc//cc:defs.bzl", "cc_binary") + # DO NOT LOAD THIS FILE. Load envoy_build_system.bzl instead. # Envoy binary targets load( @@ -27,7 +29,7 @@ def envoy_cc_binary( linkopts = linkopts + _envoy_stamped_linkopts() deps = deps + _envoy_stamped_deps() deps = deps + [envoy_external_dep_path(dep) for dep in external_deps] + envoy_stdlib_deps() - native.cc_binary( + cc_binary( name = name, srcs = srcs, data = data, @@ -61,6 +63,7 @@ def _envoy_linkopts(): "@envoy//bazel:windows_x86_64": [ "-DEFAULTLIB:advapi32.lib", "-DEFAULTLIB:ws2_32.lib", + "-DEFAULTLIB:iphlpapi.lib", "-WX", ], "//conditions:default": [ diff --git a/bazel/envoy_build_system.bzl b/bazel/envoy_build_system.bzl index 70ef3df4f1d22..548ffff4a7146 100644 --- a/bazel/envoy_build_system.bzl +++ b/bazel/envoy_build_system.bzl @@ -18,6 +18,7 @@ load( _envoy_select_boringssl = "envoy_select_boringssl", _envoy_select_google_grpc = "envoy_select_google_grpc", _envoy_select_hot_restart = "envoy_select_hot_restart", + _envoy_select_new_codecs_in_integration_tests = "envoy_select_new_codecs_in_integration_tests", ) load( ":envoy_test.bzl", @@ -35,6 +36,11 @@ load( def envoy_package(): native.package(default_visibility = ["//visibility:public"]) +def envoy_extension_package(): + # TODO(rgs1): revert this to //:extension_library once + # https://github.com/envoyproxy/envoy/issues/12444 is fixed. + native.package(default_visibility = ["//visibility:public"]) + # A genrule variant that can output a directory. This is useful when doing things like # generating a fuzz corpus mechanically. def _envoy_directory_genrule_impl(ctx): @@ -168,6 +174,7 @@ def envoy_google_grpc_external_deps(): envoy_select_boringssl = _envoy_select_boringssl envoy_select_google_grpc = _envoy_select_google_grpc envoy_select_hot_restart = _envoy_select_hot_restart +envoy_select_new_codecs_in_integration_tests = _envoy_select_new_codecs_in_integration_tests # Binary wrappers (from envoy_binary.bzl) envoy_cc_binary = _envoy_cc_binary diff --git a/bazel/envoy_internal.bzl b/bazel/envoy_internal.bzl index 4406f08c4eda3..07dad501e5a40 100644 --- a/bazel/envoy_internal.bzl +++ b/bazel/envoy_internal.bzl @@ -14,7 +14,6 @@ def envoy_copts(repository, test = False): "-Wformat", "-Wformat-security", "-Wvla", - "-std=c++14", ] # Windows options for cleanest service compilation; @@ -25,7 +24,6 @@ def envoy_copts(repository, test = False): msvc_options = [ "-WX", "-Zc:__cplusplus", - "-std:c++14", "-DWIN32", "-D_WIN32_WINNT=0x0A00", # _WIN32_WINNT_WIN10 "-DNTDDI_VERSION=0x0A000000", # NTDDI_WIN10 @@ -34,6 +32,9 @@ def envoy_copts(repository, test = False): "-DNOMCX", "-DNOIME", "-DNOCRYPT", + # this is to silence the incorrect MSVC compiler warning when trying to convert between + # std::optional data types while conversions between primitive types are producing no error + "-wd4244", ] return select({ @@ -48,9 +49,12 @@ def envoy_copts(repository, test = False): repository + "//bazel:windows_fastbuild_build": [], repository + "//bazel:windows_dbg_build": [], }) + select({ - repository + "//bazel:clang_build": ["-fno-limit-debug-info", "-Wgnu-conditional-omitted-operand"], + repository + "//bazel:clang_build": ["-fno-limit-debug-info", "-Wgnu-conditional-omitted-operand", "-Wc++2a-extensions"], repository + "//bazel:gcc_build": ["-Wno-maybe-uninitialized"], "//conditions:default": [], + }) + select({ + repository + "//bazel:no_debug_info": ["-g0"], + "//conditions:default": [], }) + select({ repository + "//bazel:disable_tcmalloc": ["-DABSL_MALLOC_HOOK_MMAP_DISABLE"], "//conditions:default": ["-DTCMALLOC"], diff --git a/bazel/envoy_library.bzl b/bazel/envoy_library.bzl index 69453fd1b6fdc..25b4c6ba17d75 100644 --- a/bazel/envoy_library.bzl +++ b/bazel/envoy_library.bzl @@ -1,3 +1,5 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + # DO NOT LOAD THIS FILE. Load envoy_build_system.bzl instead. # Envoy library targets load( @@ -6,7 +8,6 @@ load( "envoy_external_dep_path", "envoy_linkstatic", ) -load("@com_google_protobuf//:protobuf.bzl", "cc_proto_library", "py_proto_library") load("@envoy_api//bazel:api_build_system.bzl", "api_cc_py_proto_library") # As above, but wrapped in list form for adding to dep lists. This smell seems needed as @@ -20,10 +21,10 @@ def tcmalloc_external_deps(repository): # Envoy C++ library targets that need no transformations or additional dependencies before being # passed to cc_library should be specified with this function. Note: this exists to ensure that -# all envoy targets pass through an envoy-declared skylark function where they can be modified +# all envoy targets pass through an envoy-declared starlark function where they can be modified # before being passed to a native bazel function. def envoy_basic_cc_library(name, deps = [], external_deps = [], **kargs): - native.cc_library( + cc_library( name = name, deps = deps + [envoy_external_dep_path(dep) for dep in external_deps], **kargs @@ -69,12 +70,15 @@ def envoy_cc_extension( undocumented = False, status = "stable", tags = [], + # TODO(rgs1): revert this to //:extension_config once + # https://github.com/envoyproxy/envoy/issues/12444 is fixed. + visibility = ["//visibility:public"], **kwargs): if security_posture not in EXTENSION_SECURITY_POSTURES: fail("Unknown extension security posture: " + security_posture) if status not in EXTENSION_STATUS_VALUES: fail("Unknown extension status: " + status) - envoy_cc_library(name, tags = tags, **kwargs) + envoy_cc_library(name, tags = tags, visibility = visibility, **kwargs) # Envoy C++ library targets should be specified with this function. def envoy_cc_library( @@ -86,7 +90,6 @@ def envoy_cc_library( external_deps = [], tcmalloc_dep = None, repository = "", - linkstamp = None, tags = [], deps = [], strip_include_prefix = None, @@ -94,15 +97,7 @@ def envoy_cc_library( if tcmalloc_dep: deps += tcmalloc_external_deps(repository) - # Intended for compilation database generation. This generates an empty cc - # source file so Bazel generates virtual includes and recognize them as C++. - # Workaround for https://github.com/bazelbuild/bazel/issues/10845. - srcs += select({ - "@envoy//bazel:compdb_build": ["@envoy//bazel/external:empty.cc"], - "//conditions:default": [], - }) - - native.cc_library( + cc_library( name = name, srcs = srcs, hdrs = hdrs, @@ -122,16 +117,12 @@ def envoy_cc_library( include_prefix = envoy_include_prefix(native.package_name()), alwayslink = 1, linkstatic = envoy_linkstatic(), - linkstamp = select({ - repository + "//bazel:windows_x86_64": None, - "//conditions:default": linkstamp, - }), strip_include_prefix = strip_include_prefix, ) # Intended for usage by external consumers. This allows them to disambiguate # include paths via `external/envoy...` - native.cc_library( + cc_library( name = name + "_with_external_headers", hdrs = hdrs, copts = envoy_copts(repository) + copts, diff --git a/bazel/envoy_select.bzl b/bazel/envoy_select.bzl index f2167f29bec43..107ad2a21bde7 100644 --- a/bazel/envoy_select.bzl +++ b/bazel/envoy_select.bzl @@ -31,3 +31,10 @@ def envoy_select_hot_restart(xs, repository = ""): repository + "//bazel:disable_hot_restart_or_apple": [], "//conditions:default": xs, }) + +# Select the given values if use legacy codecs in test is on in the current build. +def envoy_select_new_codecs_in_integration_tests(xs, repository = ""): + return select({ + repository + "//bazel:enable_new_codecs_in_integration_tests": xs, + "//conditions:default": [], + }) diff --git a/bazel/envoy_test.bzl b/bazel/envoy_test.bzl index ca0b430c16f0b..21f83d1980deb 100644 --- a/bazel/envoy_test.bzl +++ b/bazel/envoy_test.bzl @@ -1,3 +1,6 @@ +load("@rules_python//python:defs.bzl", "py_binary") +load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test") + # DO NOT LOAD THIS FILE. Load envoy_build_system.bzl instead. # Envoy test targets. This includes both test library and test binary targets. load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar") @@ -29,8 +32,7 @@ def _envoy_cc_test_infrastructure_library( **kargs): # Add implicit tcmalloc external dependency(if available) in order to enable CPU and heap profiling in tests. deps += tcmalloc_external_deps(repository) - - native.cc_library( + cc_library( name = name, srcs = srcs, hdrs = hdrs, @@ -58,6 +60,7 @@ def _envoy_test_linkopts(): "@envoy//bazel:windows_x86_64": [ "-DEFAULTLIB:advapi32.lib", "-DEFAULTLIB:ws2_32.lib", + "-DEFAULTLIB:iphlpapi.lib", "-WX", ], @@ -99,36 +102,47 @@ def envoy_cc_fuzz_test( name = test_lib_name, deps = deps + envoy_stdlib_deps() + [ repository + "//test/fuzz:fuzz_runner_lib", + repository + "//test/test_common:test_version_linkstamp", ], repository = repository, tags = tags, **kwargs ) - native.cc_test( + cc_test( name = name, copts = fuzz_copts + envoy_copts("@envoy", test = True), - linkopts = _envoy_test_linkopts(), - linkstatic = 1, - args = ["$(locations %s)" % corpus_name], + linkopts = _envoy_test_linkopts() + select({ + "@envoy//bazel:libfuzzer": ["-fsanitize=fuzzer"], + "//conditions:default": [], + }), + linkstatic = envoy_linkstatic(), + args = select({ + "@envoy//bazel:libfuzzer_coverage": ["$(locations %s)" % corpus_name], + "@envoy//bazel:libfuzzer": [], + "//conditions:default": ["$(locations %s)" % corpus_name], + }), data = [corpus_name], # No fuzzing on macOS or Windows deps = select({ "@envoy//bazel:apple": [repository + "//test:dummy_main"], "@envoy//bazel:windows_x86_64": [repository + "//test:dummy_main"], + "@envoy//bazel:libfuzzer": [ + ":" + test_lib_name, + ], "//conditions:default": [ ":" + test_lib_name, repository + "//test/fuzz:main", ], }), size = size, - tags = tags, + tags = ["fuzz_target"] + tags, ) # This target exists only for # https://github.com/google/oss-fuzz/blob/master/projects/envoy/build.sh. It won't yield # anything useful on its own, as it expects to be run in an environment where the linker options # provide a path to FuzzingEngine. - native.cc_binary( + cc_binary( name = name + "_driverless", copts = fuzz_copts + envoy_copts("@envoy", test = True), linkopts = ["-lFuzzingEngine"] + _envoy_test_linkopts(), @@ -138,17 +152,6 @@ def envoy_cc_fuzz_test( tags = ["manual"] + tags, ) - native.cc_test( - name = name + "_with_libfuzzer", - copts = fuzz_copts + envoy_copts("@envoy", test = True), - linkopts = ["-fsanitize=fuzzer"] + _envoy_test_linkopts(), - linkstatic = 1, - testonly = 1, - data = [corpus_name], - deps = [":" + test_lib_name], - tags = ["manual", "fuzzer"] + tags, - ) - # Envoy C++ test targets should be specified with this function. def envoy_cc_test( name, @@ -166,33 +169,18 @@ def envoy_cc_test( local = False, size = "medium", flaky = False): - if coverage: - coverage_tags = tags + ["coverage_test_lib"] - else: - coverage_tags = tags - _envoy_cc_test_infrastructure_library( - name = name + "_lib_internal_only", + coverage_tags = tags + ([] if coverage else ["nocoverage"]) + cc_test( + name = name, srcs = srcs, data = data, - external_deps = external_deps, - deps = deps + [repository + "//test/test_common:printers_includes"], - repository = repository, - tags = coverage_tags, - copts = copts, - # Allow public visibility so these can be consumed in coverage tests in external projects. - visibility = ["//visibility:public"], - ) - if coverage: - coverage_tags = tags + ["coverage_test"] - native.cc_test( - name = name, copts = envoy_copts(repository, test = True) + copts, linkopts = _envoy_test_linkopts(), linkstatic = envoy_linkstatic(), malloc = tcmalloc_external_dep(repository), - deps = envoy_stdlib_deps() + [ - ":" + name + "_lib_internal_only", + deps = envoy_stdlib_deps() + deps + [envoy_external_dep_path(dep) for dep in external_deps + ["googletest"]] + [ repository + "//test:main", + repository + "//test/test_common:test_version_linkstamp", ], # from https://github.com/google/googletest/blob/6e1970e2376c14bf658eb88f655a054030353f9f/googlemock/src/gmock.cc#L51 # 2 - by default, mocks act as StrictMocks. @@ -222,11 +210,6 @@ def envoy_cc_test_library( repository + "//test/test_common:printers_includes", ] - # Same as envoy_cc_library - srcs += select({ - "@envoy//bazel:compdb_build": ["@envoy//bazel/external:empty.cc"], - "//conditions:default": [], - }) _envoy_cc_test_infrastructure_library( name, srcs, @@ -246,16 +229,21 @@ def envoy_cc_test_library( def envoy_cc_test_binary( name, tags = [], + deps = [], **kargs): envoy_cc_binary( name, testonly = 1, linkopts = _envoy_test_linkopts(), tags = tags + ["compilation_db_dep"], + deps = deps + [ + "@envoy//test/test_common:test_version_linkstamp", + ], **kargs ) -# Envoy benchmark binaries should be specified with this function. +# Envoy benchmark binaries should be specified with this function. bazel run +# these targets to measure performance. def envoy_cc_benchmark_binary( name, deps = [], @@ -266,17 +254,21 @@ def envoy_cc_benchmark_binary( **kargs ) -# Tests to validate that Envoy benchmarks run successfully should be specified with this function. +# Tests to validate that Envoy benchmarks run successfully should be specified +# with this function. Not for actual performance measurements: iteratons and +# expensive benchmarks will be skipped in the interest of execution time. def envoy_benchmark_test( name, benchmark_binary, data = [], + tags = [], **kargs): native.sh_test( name = name, srcs = ["//bazel:test_for_benchmark_wrapper.sh"], data = [":" + benchmark_binary] + data, args = ["%s/%s" % (native.package_name(), benchmark_binary)], + tags = tags + ["nocoverage"], **kargs ) @@ -286,7 +278,7 @@ def envoy_py_test_binary( external_deps = [], deps = [], **kargs): - native.py_binary( + py_binary( name = name, deps = deps + [envoy_external_dep_path(dep) for dep in external_deps], **kargs @@ -302,9 +294,12 @@ def envoy_sh_test( srcs = [], data = [], coverage = True, + cc_binary = [], tags = [], **kargs): if coverage: + if cc_binary == []: + fail("cc_binary is required for coverage-enabled test.") test_runner_cc = name + "_test_runner.cc" native.genrule( name = name + "_gen_test_runner", @@ -313,18 +308,21 @@ def envoy_sh_test( cmd = "$(location //bazel:gen_sh_test_runner.sh) $(SRCS) >> $@", tools = ["//bazel:gen_sh_test_runner.sh"], ) - envoy_cc_test_library( - name = name + "_lib", + envoy_cc_test( + name = name, srcs = [test_runner_cc], - data = srcs + data, - tags = tags + ["coverage_test_lib"], - deps = ["//test/test_common:environment_lib"], + data = srcs + data + cc_binary, + tags = tags, + deps = ["//test/test_common:environment_lib"] + cc_binary, + **kargs + ) + + else: + native.sh_test( + name = name, + srcs = ["//bazel:sh_test_wrapper.sh"], + data = srcs + data + cc_binary, + args = srcs, + tags = tags + ["nocoverage"], + **kargs ) - native.sh_test( - name = name, - srcs = ["//bazel:sh_test_wrapper.sh"], - data = srcs + data, - args = srcs, - tags = tags, - **kargs - ) diff --git a/bazel/external/BUILD b/bazel/external/BUILD index 11dabbc90ceeb..719adb21855a8 100644 --- a/bazel/external/BUILD +++ b/bazel/external/BUILD @@ -1,3 +1,5 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + licenses(["notice"]) # Apache 2 # Use a wrapper cc_library with an empty source source file to force diff --git a/bazel/external/apache_thrift.BUILD b/bazel/external/apache_thrift.BUILD index 02cbf535514d6..db12d91f0b841 100644 --- a/bazel/external/apache_thrift.BUILD +++ b/bazel/external/apache_thrift.BUILD @@ -1,3 +1,5 @@ +load("@rules_python//python:defs.bzl", "py_library") + licenses(["notice"]) # Apache 2 # The apache-thrift distribution does not keep the thrift files in a directory with the diff --git a/bazel/external/boringssl_fips.BUILD b/bazel/external/boringssl_fips.BUILD index 6add632b3a347..7b913e4136144 100644 --- a/bazel/external/boringssl_fips.BUILD +++ b/bazel/external/boringssl_fips.BUILD @@ -1,7 +1,8 @@ -licenses(["notice"]) # Apache 2 - +load("@rules_cc//cc:defs.bzl", "cc_library") load(":genrule_cmd.bzl", "genrule_cmd") +licenses(["notice"]) # Apache 2 + cc_library( name = "crypto", srcs = [ diff --git a/bazel/external/boringssl_fips.genrule_cmd b/bazel/external/boringssl_fips.genrule_cmd index cff25f0f084ee..25455c91e564e 100644 --- a/bazel/external/boringssl_fips.genrule_cmd +++ b/bazel/external/boringssl_fips.genrule_cmd @@ -2,8 +2,8 @@ set -e -# BoringSSL build as described in the Security Policy for BoringCrypto module (2018-10-25): -# https://csrc.nist.gov/CSRC/media/projects/cryptographic-module-validation-program/documents/security-policies/140sp3318.pdf +# BoringSSL build as described in the Security Policy for BoringCrypto module (2020-07-02): +# https://csrc.nist.gov/CSRC/media/projects/cryptographic-module-validation-program/documents/security-policies/140sp3678.pdf # This works only on Linux-x86_64. if [[ `uname` != "Linux" || `uname -m` != "x86_64" ]]; then @@ -16,16 +16,16 @@ ROOT=$$(dirname $(rootpath boringssl/BUILDING.md))/.. pushd $$ROOT # Build tools requirements: -# - Clang compiler version 6.0.1 (https://releases.llvm.org/download.html) -# - Go programming language version 1.10.3 (https://golang.org/dl/) -# - Ninja build system version 1.8.2 (https://github.com/ninja-build/ninja/releases) +# - Clang compiler version 7.0.1 (https://releases.llvm.org/download.html) +# - Go programming language version 1.12.7 (https://golang.org/dl/) +# - Ninja build system version 1.9.0 (https://github.com/ninja-build/ninja/releases) # Override $$PATH for build tools, to avoid picking up anything else. export PATH="$$(dirname `which cmake`):/usr/bin:/bin" -# Clang 6.0.1 -VERSION=6.0.1 -SHA256=7ea204ecd78c39154d72dfc0d4a79f7cce1b2264da2551bb2eef10e266d54d91 +# Clang 7.0.1 +VERSION=7.0.1 +SHA256=02ad925add5b2b934d64c3dd5cbd1b2002258059f7d962993ba7f16524c3089c PLATFORM="x86_64-linux-gnu-ubuntu-16.04" curl -sLO https://releases.llvm.org/"$$VERSION"/clang+llvm-"$$VERSION"-"$$PLATFORM".tar.xz \ @@ -41,26 +41,27 @@ if [[ `clang --version | head -1 | awk '{print $$3}'` != "$$VERSION" ]]; then exit 1 fi -# Go 1.10.3 -VERSION=1.10.3 -SHA256=fa1b0e45d3b647c252f51f5e1204aba049cde4af177ef9f2181f43004f901035 +# Go 1.12.7 +VERSION=1.12.7 +SHA256=66d83bfb5a9ede000e33c6579a91a29e6b101829ad41fffb5c5bb6c900e109d9 PLATFORM="linux-amd64" curl -sLO https://dl.google.com/go/go"$$VERSION"."$$PLATFORM".tar.gz \ && echo "$$SHA256" go"$$VERSION"."$$PLATFORM".tar.gz | sha256sum --check tar xf go"$$VERSION"."$$PLATFORM".tar.gz +export GOPATH="$$PWD/gopath" export GOROOT="$$PWD/go" -export PATH="$$GOROOT/bin:$$PATH" +export PATH="$$GOPATH/bin:$$GOROOT/bin:$$PATH" if [[ `go version | awk '{print $$3}'` != "go$$VERSION" ]]; then echo "ERROR: Go version doesn't match." exit 1 fi -# Ninja 1.8.2 -VERSION=1.8.2 -SHA256=d2fea9ff33b3ef353161ed906f260d565ca55b8ca0568fa07b1d2cab90a84a07 +# Ninja 1.9.0 +VERSION=1.9.0 +SHA256=1b1235f2b0b4df55ac6d80bbe681ea3639c9d2c505c7ff2159a3daf63d196305 PLATFORM="linux" curl -sLO https://github.com/ninja-build/ninja/releases/download/v"$$VERSION"/ninja-"$$PLATFORM".zip \ diff --git a/bazel/external/boringssl_fips.patch b/bazel/external/boringssl_fips.patch new file mode 100644 index 0000000000000..37247dc2f5c53 --- /dev/null +++ b/bazel/external/boringssl_fips.patch @@ -0,0 +1,18 @@ +# Fix FIPS build (from BoringSSL commit 4ca15d5dcbe6e8051a4654df7c971ea8307abfe0). +# +# The modulewrapper is not a part of the FIPS module, so it can be patched without +# concern about breaking the FIPS validation. +--- boringssl/util/fipstools/acvp/modulewrapper/modulewrapper.cc ++++ boringssl/util/fipstools/acvp/modulewrapper/modulewrapper.cc +@@ -12,9 +12,11 @@ + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + ++#include + #include + + #include ++#include + #include + #include + #include diff --git a/bazel/external/compiler_rt.BUILD b/bazel/external/compiler_rt.BUILD index 96d90b46ab23d..666c4eca06a0e 100644 --- a/bazel/external/compiler_rt.BUILD +++ b/bazel/external/compiler_rt.BUILD @@ -1,7 +1,40 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + licenses(["notice"]) # Apache 2 cc_library( name = "fuzzed_data_provider", - hdrs = ["fuzzer/utils/FuzzedDataProvider.h"], + hdrs = ["include/fuzzer/FuzzedDataProvider.h"], + strip_include_prefix = "include", + visibility = ["//visibility:public"], +) + +libfuzzer_copts = [ + "-fno-sanitize=address,thread,undefined", + "-fsanitize-coverage=0", + "-O3", +] + +cc_library( + name = "libfuzzer_main", + srcs = ["lib/fuzzer/FuzzerMain.cpp"], + copts = libfuzzer_copts, + visibility = ["//visibility:public"], + deps = [":libfuzzer_no_main"], + alwayslink = True, +) + +cc_library( + name = "libfuzzer_no_main", + srcs = glob( + ["lib/fuzzer/Fuzzer*.cpp"], + exclude = ["lib/fuzzer/FuzzerMain.cpp"], + ), + hdrs = glob([ + "lib/fuzzer/Fuzzer*.h", + "lib/fuzzer/Fuzzer*.def", + ]), + copts = libfuzzer_copts, visibility = ["//visibility:public"], + alwayslink = True, ) diff --git a/bazel/external/fmtlib.BUILD b/bazel/external/fmtlib.BUILD index 7ac5ecceffbd5..da85ce22ad8f0 100644 --- a/bazel/external/fmtlib.BUILD +++ b/bazel/external/fmtlib.BUILD @@ -1,10 +1,9 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + licenses(["notice"]) # Apache 2 cc_library( name = "fmtlib", - srcs = glob([ - "fmt/*.cc", - ]), hdrs = glob([ "include/fmt/*.h", ]), diff --git a/bazel/external/googleurl.patch b/bazel/external/googleurl.patch new file mode 100644 index 0000000000000..fb33ca4475fb1 --- /dev/null +++ b/bazel/external/googleurl.patch @@ -0,0 +1,52 @@ +# TODO(dio): Consider to remove this patch when we have the ability to compile the project using +# clang-cl. Tracked in https://github.com/envoyproxy/envoy/issues/11974. + +diff --git a/base/compiler_specific.h b/base/compiler_specific.h +index 0cd36dc..8c4cbd4 100644 +--- a/base/compiler_specific.h ++++ b/base/compiler_specific.h +@@ -7,10 +7,6 @@ + + #include "build/build_config.h" + +-#if defined(COMPILER_MSVC) && !defined(__clang__) +-#error "Only clang-cl is supported on Windows, see https://crbug.com/988071" +-#endif +- + // Annotate a variable indicating it's ok if the variable is not used. + // (Typically used to silence a compiler warning when the assignment + // is important for some other reason.) +@@ -55,8 +51,12 @@ + // prevent code folding, see gurl_base::debug::Alias. + // Use like: + // void NOT_TAIL_CALLED FooBar(); +-#if defined(__clang__) && __has_attribute(not_tail_called) ++#if defined(__clang__) ++#if defined(__has_attribute) ++#if __has_attribute(not_tail_called) + #define NOT_TAIL_CALLED __attribute__((not_tail_called)) ++#endif ++#endif + #else + #define NOT_TAIL_CALLED + #endif +@@ -226,7 +226,9 @@ + #endif + #endif + +-#if defined(__clang__) && __has_attribute(uninitialized) ++#if defined(__clang__) ++#if defined(__has_attribute) ++#if __has_attribute(uninitialized) + // Attribute "uninitialized" disables -ftrivial-auto-var-init=pattern for + // the specified variable. + // Library-wide alternative is +@@ -257,6 +259,8 @@ + // E.g. platform, bot, benchmark or test name in patch description or next to + // the attribute. + #define STACK_UNINITIALIZED __attribute__((uninitialized)) ++#endif ++#endif + #else + #define STACK_UNINITIALIZED + #endif diff --git a/bazel/external/http-parser.BUILD b/bazel/external/http-parser.BUILD index 303950d7c00b6..5fefacde47dcf 100644 --- a/bazel/external/http-parser.BUILD +++ b/bazel/external/http-parser.BUILD @@ -1,3 +1,5 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + licenses(["notice"]) # Apache 2 cc_library( diff --git a/bazel/external/icuuc.BUILD b/bazel/external/icuuc.BUILD new file mode 100644 index 0000000000000..305d0db952b1d --- /dev/null +++ b/bazel/external/icuuc.BUILD @@ -0,0 +1,55 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + +licenses(["notice"]) # Apache 2 + +exports_files(["LICENSE"]) + +icuuc_copts = [ + "-DU_STATIC_IMPLEMENTATION", + "-DU_COMMON_IMPLEMENTATION", + "-DU_HAVE_STD_ATOMICS", +] + select({ + "@envoy//bazel:apple": [ + "-Wno-shorten-64-to-32", + "-Wno-unused-variable", + ], + "@envoy//bazel:windows_x86_64": [ + "/utf-8", + "/DLOCALE_ALLOW_NEUTRAL_NAMES=0", + ], + # TODO(dio): Add "@envoy//bazel:android" when we have it. + # "@envoy//bazel:android": [ + # "-fdata-sections", + # "-DU_HAVE_NL_LANGINFO_CODESET=0", + # "-Wno-deprecated-declarations", + # ], + "//conditions:default": [], +}) + +cc_library( + name = "headers", + hdrs = glob(["source/common/unicode/*.h"]), + includes = ["source/common"], + visibility = ["//visibility:public"], +) + +cc_library( + name = "common", + hdrs = glob(["source/common/unicode/*.h"]), + includes = ["source/common"], + visibility = ["//visibility:public"], + deps = [":icuuc"], +) + +cc_library( + name = "icuuc", + srcs = glob([ + "source/common/*.c", + "source/common/*.cpp", + "source/stubdata/*.cpp", + ]), + hdrs = glob(["source/common/*.h"]), + copts = icuuc_copts, + visibility = ["//visibility:private"], + deps = [":headers"], +) diff --git a/bazel/external/jinja.BUILD b/bazel/external/jinja.BUILD index f7ce6718caeb9..4ca60460e41df 100644 --- a/bazel/external/jinja.BUILD +++ b/bazel/external/jinja.BUILD @@ -1,3 +1,5 @@ +load("@rules_python//python:defs.bzl", "py_library") + licenses(["notice"]) # Apache 2 py_library( diff --git a/bazel/external/libcircllhist.BUILD b/bazel/external/libcircllhist.BUILD index a77269ef60b02..4dff51012671b 100644 --- a/bazel/external/libcircllhist.BUILD +++ b/bazel/external/libcircllhist.BUILD @@ -1,3 +1,5 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + licenses(["notice"]) # Apache 2 cc_library( diff --git a/bazel/external/libprotobuf_mutator.BUILD b/bazel/external/libprotobuf_mutator.BUILD index 12fd8b49b51f6..697a3c6334a4a 100644 --- a/bazel/external/libprotobuf_mutator.BUILD +++ b/bazel/external/libprotobuf_mutator.BUILD @@ -1,3 +1,5 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + licenses(["notice"]) # Apache 2 cc_library( diff --git a/bazel/external/markupsafe.BUILD b/bazel/external/markupsafe.BUILD index 4d792e1d4ad38..87e2871e9dfc1 100644 --- a/bazel/external/markupsafe.BUILD +++ b/bazel/external/markupsafe.BUILD @@ -1,3 +1,5 @@ +load("@rules_python//python:defs.bzl", "py_library") + licenses(["notice"]) # Apache 2 py_library( diff --git a/bazel/external/proxy_wasm_cpp_host.BUILD b/bazel/external/proxy_wasm_cpp_host.BUILD new file mode 100644 index 0000000000000..4cb87cf98ec15 --- /dev/null +++ b/bazel/external/proxy_wasm_cpp_host.BUILD @@ -0,0 +1,37 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + +licenses(["notice"]) # Apache 2 + +package(default_visibility = ["//visibility:public"]) + +cc_library( + name = "include", + hdrs = glob(["include/proxy-wasm/**/*.h"]), + deps = [ + "@proxy_wasm_cpp_sdk//:common_lib", + ], +) + +cc_library( + name = "lib", + srcs = glob( + [ + "src/**/*.h", + "src/**/*.cc", + ], + exclude = ["src/**/wavm*"], + ), + copts = ["-std=c++14"], + deps = [ + ":include", + "//external:abseil_flat_hash_map", + "//external:abseil_optional", + "//external:abseil_strings", + "//external:protobuf", + "//external:ssl", + "//external:wee8", + "//external:zlib", + "@proxy_wasm_cpp_sdk//:api_lib", + "@proxy_wasm_cpp_sdk//:common_lib", + ], +) diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD index eefb54e2fcd25..b641e9d59e848 100644 --- a/bazel/external/quiche.BUILD +++ b/bazel/external/quiche.BUILD @@ -1,3 +1,13 @@ +load("@rules_cc//cc:defs.bzl", "cc_proto_library") +load("@rules_proto//proto:defs.bzl", "proto_library") +load(":genrule_cmd.bzl", "genrule_cmd") +load( + "@envoy//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_cc_test", + "envoy_cc_test_library", +) + licenses(["notice"]) # Apache 2 # QUICHE is Google's implementation of QUIC and related protocols. It is the @@ -25,16 +35,6 @@ licenses(["notice"]) # Apache 2 # QUICHE platform APIs in //source/extensions/quic_listeners/quiche/platform/, # should remain largely the same. -load("@rules_proto//proto:defs.bzl", "proto_library") -load(":genrule_cmd.bzl", "genrule_cmd") -load( - "@envoy//bazel:envoy_build_system.bzl", - "envoy_cc_library", - "envoy_cc_test", - "envoy_cc_test_library", - "envoy_proto_library", -) - src_files = glob([ "**/*.h", "**/*.c", @@ -59,15 +59,29 @@ quiche_copts = select({ # Remove these after upstream fix. "-Wno-unused-parameter", "-Wno-unused-function", - "-Wno-unused-const-variable", - "-Wno-type-limits", + "-Wno-return-type", + "-Wno-unknown-warning-option", + "-Wno-deprecated-copy", + "-Wno-ignored-qualifiers", + "-Wno-sign-compare", + "-Wno-inconsistent-missing-override", # quic_inlined_frame.h uses offsetof() to optimize memory usage in frames. "-Wno-invalid-offsetof", - "-Wno-type-limits", - "-Wno-return-type", + # to suppress errors re: size_t vs. int comparisons + "-Wno-sign-compare", ], }) +test_suite( + name = "ci_tests", + tests = [ + "http2_platform_api_test", + "quic_platform_api_test", + "quiche_common_test", + "spdy_platform_api_test", + ], +) + envoy_cc_test_library( name = "http2_test_tools_random", srcs = ["quiche/http2/test_tools/http2_random.cc"], @@ -1138,6 +1152,17 @@ envoy_cc_test_library( deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_port_utils_impl_lib"], ) +envoy_cc_library( + name = "quic_platform_udp_socket", + hdrs = select({ + "@envoy//bazel:linux": ["quiche/quic/platform/api/quic_udp_socket_platform_api.h"], + "//conditions:default": [], + }), + repository = "@envoy", + tags = ["nofips"], + deps = ["@envoy//source/extensions/quic_listeners/quiche/platform:quic_platform_udp_socket_impl_lib"], +) + envoy_cc_test_library( name = "quic_platform_sleep", hdrs = ["quiche/quic/platform/api/quic_sleep.h"], @@ -1342,6 +1367,125 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "quic_core_batch_writer_batch_writer_buffer_lib", + srcs = select({ + "@envoy//bazel:linux": [ + "quiche/quic/core/batch_writer/quic_batch_writer_buffer.cc", + ], + "//conditions:default": [], + }), + hdrs = select({ + "@envoy//bazel:linux": [ + "quiche/quic/core/batch_writer/quic_batch_writer_buffer.h", + ], + "//conditions:default": [], + }), + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + visibility = ["//visibility:public"], + deps = [ + ":quic_core_circular_deque_lib", + ":quic_core_linux_socket_utils_lib", + ":quic_core_packet_writer_interface_lib", + ":quic_platform", + ], +) + +envoy_cc_library( + name = "quic_core_batch_writer_batch_writer_base_lib", + srcs = select({ + "@envoy//bazel:linux": [ + "quiche/quic/core/batch_writer/quic_batch_writer_base.cc", + ], + "//conditions:default": [], + }), + hdrs = select({ + "@envoy//bazel:linux": [ + "quiche/quic/core/batch_writer/quic_batch_writer_base.h", + ], + "//conditions:default": [], + }), + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + visibility = ["//visibility:public"], + deps = [ + ":quic_core_batch_writer_batch_writer_buffer_lib", + ":quic_core_packet_writer_interface_lib", + ":quic_core_types_lib", + ":quic_platform", + ], +) + +envoy_cc_test_library( + name = "quic_core_batch_writer_batch_writer_test_lib", + hdrs = select({ + "@envoy//bazel:linux": [ + "quiche/quic/core/batch_writer/quic_batch_writer_test.h", + ], + "//conditions:default": [], + }), + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quic_core_batch_writer_batch_writer_base_lib", + ":quic_core_udp_socket_lib", + ":quic_platform_test", + ], +) + +envoy_cc_library( + name = "quic_core_batch_writer_gso_batch_writer_lib", + srcs = select({ + "@envoy//bazel:linux": [ + "quiche/quic/core/batch_writer/quic_gso_batch_writer.cc", + ], + "//conditions:default": [], + }), + hdrs = select({ + "@envoy//bazel:linux": [ + "quiche/quic/core/batch_writer/quic_gso_batch_writer.h", + ], + "//conditions:default": [], + }), + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + visibility = ["//visibility:public"], + deps = [ + ":quic_core_batch_writer_batch_writer_base_lib", + ":quic_core_linux_socket_utils_lib", + ":quic_platform", + ], +) + +envoy_cc_library( + name = "quic_core_batch_writer_sendmmsg_batch_writer_lib", + srcs = select({ + "@envoy//bazel:linux": [ + "quiche/quic/core/batch_writer/quic_sendmmsg_batch_writer.cc", + ], + "//conditions:default": [], + }), + hdrs = select({ + "@envoy//bazel:linux": [ + "quiche/quic/core/batch_writer/quic_sendmmsg_batch_writer.h", + ], + "//conditions:default": [], + }), + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + visibility = ["//visibility:public"], + deps = [ + ":quic_core_batch_writer_batch_writer_base_lib", + ":quic_core_linux_socket_utils_lib", + ], +) + envoy_cc_library( name = "quic_core_blocked_writer_interface_lib", hdrs = ["quiche/quic/core/quic_blocked_writer_interface.h"], @@ -1486,6 +1630,7 @@ envoy_cc_library( deps = [ ":quic_core_bandwidth_lib", ":quic_core_congestion_control_bandwidth_sampler_lib", + ":quic_core_congestion_control_bbr_lib", ":quic_core_congestion_control_congestion_control_interface_lib", ":quic_core_congestion_control_rtt_stats_lib", ":quic_core_congestion_control_windowed_filter_lib", @@ -1681,7 +1826,10 @@ envoy_cc_library( ":quic_core_crypto_crypto_handshake_lib", ":quic_core_crypto_encryption_lib", ":quic_core_framer_lib", + ":quic_core_idle_network_detector_lib", + ":quic_core_legacy_version_encapsulator_lib", ":quic_core_mtu_discovery_lib", + ":quic_core_network_blackhole_detector_lib", ":quic_core_one_block_arena_lib", ":quic_core_packet_creator_lib", ":quic_core_packet_writer_interface_lib", @@ -1808,6 +1956,37 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "quic_core_crypto_boring_utils_lib", + hdrs = ["quiche/quic/core/crypto/boring_utils.h"], + copts = quiche_copts, + external_deps = ["ssl"], + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quic_platform_export", + ":quiche_common_platform", + ], +) + +envoy_cc_library( + name = "quic_core_crypto_certificate_view_lib", + srcs = ["quiche/quic/core/crypto/certificate_view.cc"], + hdrs = ["quiche/quic/core/crypto/certificate_view.h"], + copts = quiche_copts, + external_deps = ["ssl"], + repository = "@envoy", + tags = ["nofips"], + visibility = ["//visibility:public"], + deps = [ + ":quic_core_crypto_boring_utils_lib", + ":quic_core_types_lib", + ":quic_platform", + ":quic_platform_ip_address", + ":quiche_common_platform", + ], +) + envoy_cc_library( name = "quic_core_crypto_encryption_lib", srcs = [ @@ -1935,6 +2114,7 @@ envoy_cc_library( repository = "@envoy", tags = ["nofips"], deps = [ + ":quic_core_crypto_proof_source_interface_lib", ":quic_core_types_lib", ":quic_platform_base", ], @@ -1979,10 +2159,14 @@ envoy_cc_library( srcs = ["quiche/quic/core/quic_error_codes.cc"], hdrs = ["quiche/quic/core/quic_error_codes.h"], copts = quiche_copts, + external_deps = ["ssl"], repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], - deps = [":quic_platform_export"], + deps = [ + ":quic_platform_base", + ":quic_platform_export", + ], ) envoy_cc_library( @@ -2012,6 +2196,7 @@ envoy_cc_library( name = "quic_core_frames_frames_lib", srcs = [ "quiche/quic/core/frames/quic_ack_frame.cc", + "quiche/quic/core/frames/quic_ack_frequency_frame.cc", "quiche/quic/core/frames/quic_blocked_frame.cc", "quiche/quic/core/frames/quic_connection_close_frame.cc", "quiche/quic/core/frames/quic_crypto_frame.cc", @@ -2036,6 +2221,7 @@ envoy_cc_library( ], hdrs = [ "quiche/quic/core/frames/quic_ack_frame.h", + "quiche/quic/core/frames/quic_ack_frequency_frame.h", "quiche/quic/core/frames/quic_blocked_frame.h", "quiche/quic/core/frames/quic_connection_close_frame.h", "quiche/quic/core/frames/quic_crypto_frame.h", @@ -2133,6 +2319,7 @@ envoy_cc_library( deps = [ ":quic_core_circular_deque_lib", ":quic_core_packets_lib", + ":quic_core_qpack_qpack_header_table_lib", ":quic_platform_base", ":spdy_core_header_block_lib", ":spdy_core_headers_handler_interface_lib", @@ -2283,6 +2470,23 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "quic_core_idle_network_detector_lib", + srcs = ["quiche/quic/core/quic_idle_network_detector.cc"], + hdrs = ["quiche/quic/core/quic_idle_network_detector.h"], + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quic_core_alarm_factory_interface_lib", + ":quic_core_alarm_interface_lib", + ":quic_core_constants_lib", + ":quic_core_one_block_arena_lib", + ":quic_core_time_lib", + ":quic_platform_export", + ], +) + envoy_cc_library( name = "quic_core_interval_lib", hdrs = ["quiche/quic/core/quic_interval.h"], @@ -2351,6 +2555,85 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "quic_core_syscall_wrapper_lib", + srcs = select({ + "@envoy//bazel:linux": ["quiche/quic/core/quic_syscall_wrapper.cc"], + "//conditions:default": [], + }), + hdrs = select({ + "@envoy//bazel:linux": ["quiche/quic/core/quic_syscall_wrapper.h"], + "//conditions:default": [], + }), + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quic_platform_export", + ], +) + +envoy_cc_library( + name = "quic_core_legacy_version_encapsulator_lib", + srcs = [ + "quiche/quic/core/quic_legacy_version_encapsulator.cc", + ], + hdrs = [ + "quiche/quic/core/quic_legacy_version_encapsulator.h", + ], + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quic_core_crypto_crypto_handshake_lib", + ":quic_core_crypto_encryption_lib", + ":quic_core_packet_creator_lib", + ":quic_core_packets_lib", + ":quic_core_types_lib", + ":quic_core_utils_lib", + ":quic_platform", + ":quiche_common_platform", + ], +) + +envoy_cc_library( + name = "quic_core_linux_socket_utils_lib", + srcs = select({ + "@envoy//bazel:linux": ["quiche/quic/core/quic_linux_socket_utils.cc"], + "//conditions:default": [], + }), + hdrs = select({ + "@envoy//bazel:linux": ["quiche/quic/core/quic_linux_socket_utils.h"], + "//conditions:default": [], + }), + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quic_core_packet_writer_interface_lib", + ":quic_core_syscall_wrapper_lib", + ":quic_core_types_lib", + ":quic_platform", + ], +) + +envoy_cc_library( + name = "quic_core_network_blackhole_detector_lib", + srcs = ["quiche/quic/core/quic_network_blackhole_detector.cc"], + hdrs = ["quiche/quic/core/quic_network_blackhole_detector.h"], + repository = "@envoy", + tags = ["nofips"], + visibility = ["//visibility:public"], + deps = [ + ":quic_core_alarm_factory_interface_lib", + ":quic_core_alarm_interface_lib", + ":quic_core_constants_lib", + ":quic_core_one_block_arena_lib", + ":quic_core_time_lib", + ":quic_platform_export", + ], +) + envoy_cc_library( name = "quic_core_packet_creator_lib", srcs = ["quiche/quic/core/quic_packet_creator.cc"], @@ -2460,16 +2743,6 @@ envoy_cc_library( ], ) -envoy_cc_library( - name = "quic_core_qpack_qpack_instructions_lib", - srcs = ["quiche/quic/core/qpack/qpack_instructions.cc"], - hdrs = ["quiche/quic/core/qpack/qpack_instructions.h"], - copts = quiche_copts, - repository = "@envoy", - tags = ["nofips"], - deps = [":quic_platform_base"], -) - envoy_cc_library( name = "quic_core_qpack_qpack_decoder_lib", srcs = ["quiche/quic/core/qpack/qpack_decoder.cc"], @@ -2538,6 +2811,16 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "quic_core_qpack_qpack_instructions_lib", + srcs = ["quiche/quic/core/qpack/qpack_instructions.cc"], + hdrs = ["quiche/quic/core/qpack/qpack_instructions.h"], + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + deps = [":quic_platform_base"], +) + envoy_cc_library( name = "quic_core_qpack_qpack_instruction_encoder_lib", srcs = ["quiche/quic/core/qpack/qpack_instruction_encoder.cc"], @@ -2802,11 +3085,13 @@ envoy_cc_library( "quiche/quic/core/chlo_extractor.cc", "quiche/quic/core/quic_buffered_packet_store.cc", "quiche/quic/core/quic_dispatcher.cc", + "quiche/quic/core/tls_chlo_extractor.cc", ], hdrs = [ "quiche/quic/core/chlo_extractor.h", "quiche/quic/core/quic_buffered_packet_store.h", "quiche/quic/core/quic_dispatcher.h", + "quiche/quic/core/tls_chlo_extractor.h", ], copts = quiche_copts, repository = "@envoy", @@ -3097,6 +3382,32 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "quic_core_udp_socket_lib", + srcs = select({ + "@envoy//bazel:windows_x86_64": [], + "//conditions:default": ["quiche/quic/core/quic_udp_socket_posix.cc"], + }), + hdrs = select({ + "@envoy//bazel:windows_x86_64": [], + "//conditions:default": ["quiche/quic/core/quic_udp_socket.h"], + }), + copts = quiche_copts + select({ + # On OSX/iOS, condstants from RFC 3542 (e.g. IPV6_RECVPKTINFO) are not usable + # without this define. + "@envoy//bazel:apple": ["-D__APPLE_USE_RFC_3542"], + "//conditions:default": [], + }), + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quic_core_types_lib", + ":quic_core_utils_lib", + ":quic_platform", + ":quic_platform_udp_socket", + ], +) + envoy_cc_library( name = "quic_core_unacked_packet_map_lib", srcs = ["quiche/quic/core/quic_unacked_packet_map.cc"], @@ -3178,6 +3489,51 @@ envoy_cc_test_library( ], ) +envoy_cc_test_library( + name = "quic_test_tools_crypto_server_config_peer_lib", + srcs = [ + "quiche/quic/test_tools/quic_crypto_server_config_peer.cc", + ], + hdrs = [ + "quiche/quic/test_tools/quic_crypto_server_config_peer.h", + ], + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quic_core_crypto_crypto_handshake_lib", + ":quic_test_tools_mock_clock_lib", + ":quic_test_tools_mock_random_lib", + ":quic_test_tools_test_utils_interface_lib", + ":quiche_common_platform", + ], +) + +envoy_cc_test_library( + name = "quic_test_tools_first_flight_lib", + srcs = [ + "quiche/quic/test_tools/first_flight.cc", + ], + hdrs = [ + "quiche/quic/test_tools/first_flight.h", + ], + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quic_core_config_lib", + ":quic_core_connection_lib", + ":quic_core_crypto_crypto_handshake_lib", + ":quic_core_http_client_lib", + ":quic_core_packet_writer_interface_lib", + ":quic_core_packets_lib", + ":quic_core_types_lib", + ":quic_core_versions_lib", + ":quic_platform", + ":quic_test_tools_test_utils_interface_lib", + ], +) + envoy_cc_test_library( name = "quic_test_tools_framer_peer_lib", srcs = ["quiche/quic/test_tools/quic_framer_peer.cc"], @@ -3228,6 +3584,20 @@ envoy_cc_test_library( deps = [":quic_core_crypto_random_lib"], ) +envoy_cc_test_library( + name = "quic_test_tools_mock_syscall_wrapper_lib", + srcs = ["quiche/quic/test_tools/quic_mock_syscall_wrapper.cc"], + hdrs = ["quiche/quic/test_tools/quic_mock_syscall_wrapper.h"], + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quic_core_syscall_wrapper_lib", + ":quic_platform_base", + ":quic_platform_test", + ], +) + envoy_cc_test_library( name = "quic_test_tools_sent_packet_manager_peer_lib", srcs = ["quiche/quic/test_tools/quic_sent_packet_manager_peer.cc"], @@ -3301,6 +3671,19 @@ envoy_cc_test_library( ], ) +envoy_cc_test_library( + name = "quic_test_tools_test_certificates_lib", + srcs = ["quiche/quic/test_tools/test_certificates.cc"], + hdrs = ["quiche/quic/test_tools/test_certificates.h"], + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quic_platform_base", + ":quiche_common_platform", + ], +) + envoy_cc_test_library( name = "quic_test_tools_test_utils_interface_lib", srcs = [ @@ -3362,6 +3745,25 @@ envoy_cc_test_library( ], ) +envoy_cc_test_library( + name = "quic_test_tools_session_peer_lib", + srcs = [ + "quiche/quic/test_tools/quic_session_peer.cc", + ], + hdrs = [ + "quiche/quic/test_tools/quic_session_peer.h", + ], + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quic_core_packets_lib", + ":quic_core_session_lib", + ":quic_core_utils_lib", + ":quic_platform", + ], +) + envoy_cc_test_library( name = "quic_test_tools_unacked_packet_map_peer_lib", srcs = ["quiche/quic/test_tools/quic_unacked_packet_map_peer.cc"], @@ -3412,11 +3814,22 @@ envoy_cc_test_library( deps = [":epoll_server_platform"], ) +envoy_cc_library( + name = "quiche_common_platform_optional", + hdrs = ["quiche/common/platform/api/quiche_optional.h"], + repository = "@envoy", + tags = ["nofips"], + visibility = ["//visibility:public"], + deps = [ + ":quiche_common_platform_export", + "@envoy//source/extensions/quic_listeners/quiche/platform:quiche_common_platform_optional_impl_lib", + ], +) + envoy_cc_library( name = "quiche_common_platform", hdrs = [ "quiche/common/platform/api/quiche_arraysize.h", - "quiche/common/platform/api/quiche_export.h", "quiche/common/platform/api/quiche_logging.h", "quiche/common/platform/api/quiche_map_util.h", "quiche/common/platform/api/quiche_optional.h", @@ -3424,6 +3837,7 @@ envoy_cc_library( "quiche/common/platform/api/quiche_str_cat.h", "quiche/common/platform/api/quiche_string_piece.h", "quiche/common/platform/api/quiche_text_utils.h", + "quiche/common/platform/api/quiche_time_utils.h", "quiche/common/platform/api/quiche_unordered_containers.h", ], repository = "@envoy", @@ -3431,6 +3845,7 @@ envoy_cc_library( visibility = ["//visibility:public"], deps = [ ":quiche_common_platform_export", + ":quiche_common_platform_optional", "@envoy//source/extensions/quic_listeners/quiche/platform:quiche_common_platform_impl_lib", ], ) @@ -3441,6 +3856,7 @@ envoy_cc_test_library( "quiche/common/platform/api/quiche_endian_test.cc", "quiche/common/platform/api/quiche_str_cat_test.cc", "quiche/common/platform/api/quiche_text_utils_test.cc", + "quiche/common/platform/api/quiche_time_utils_test.cc", ], hdrs = ["quiche/common/platform/api/quiche_test.h"], repository = "@envoy", @@ -3553,6 +3969,7 @@ envoy_cc_test( name = "spdy_core_header_block_test", srcs = ["quiche/spdy/core/spdy_header_block_test.cc"], copts = quiche_copts, + coverage = False, repository = "@envoy", tags = ["nofips"], deps = [ @@ -3584,3 +4001,20 @@ envoy_cc_test( ":quic_platform_test_mem_slice_vector_lib", ], ) + +envoy_cc_test( + name = "quic_core_batch_writer_batch_writer_test", + srcs = select({ + "@envoy//bazel:linux": ["quiche/quic/core/batch_writer/quic_batch_writer_test.cc"], + "//conditions:default": [], + }), + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quic_core_batch_writer_batch_writer_test_lib", + ":quic_core_batch_writer_gso_batch_writer_lib", + ":quic_core_batch_writer_sendmmsg_batch_writer_lib", + ":quic_platform", + ], +) diff --git a/bazel/external/rapidjson.BUILD b/bazel/external/rapidjson.BUILD index 97948eee70722..a74a0fe55d375 100644 --- a/bazel/external/rapidjson.BUILD +++ b/bazel/external/rapidjson.BUILD @@ -1,3 +1,5 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + licenses(["notice"]) # Apache 2 cc_library( diff --git a/bazel/external/spdlog.BUILD b/bazel/external/spdlog.BUILD index dec2ab43d3b66..41080ccda63fe 100644 --- a/bazel/external/spdlog.BUILD +++ b/bazel/external/spdlog.BUILD @@ -1,9 +1,10 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + licenses(["notice"]) # Apache 2 cc_library( name = "spdlog", hdrs = glob([ - "include/**/*.cc", "include/**/*.h", ]), defines = ["SPDLOG_FMT_EXTERNAL"], diff --git a/bazel/external/sqlparser.BUILD b/bazel/external/sqlparser.BUILD index 8e14f45e53605..5a12383074f53 100644 --- a/bazel/external/sqlparser.BUILD +++ b/bazel/external/sqlparser.BUILD @@ -1,3 +1,5 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + licenses(["notice"]) # Apache 2 cc_library( diff --git a/bazel/external/tclap.BUILD b/bazel/external/tclap.BUILD index fabf6c4c3f990..39bd270fd7490 100644 --- a/bazel/external/tclap.BUILD +++ b/bazel/external/tclap.BUILD @@ -1,3 +1,5 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + licenses(["notice"]) # Apache 2 cc_library( diff --git a/bazel/external/twitter_common_finagle_thrift.BUILD b/bazel/external/twitter_common_finagle_thrift.BUILD index ee1d121d77c9e..9121874d43508 100644 --- a/bazel/external/twitter_common_finagle_thrift.BUILD +++ b/bazel/external/twitter_common_finagle_thrift.BUILD @@ -1,3 +1,5 @@ +load("@rules_python//python:defs.bzl", "py_library") + licenses(["notice"]) # Apache 2 py_library( diff --git a/bazel/external/twitter_common_lang.BUILD b/bazel/external/twitter_common_lang.BUILD index 469ee3331cffc..40fac2f1d1ed5 100644 --- a/bazel/external/twitter_common_lang.BUILD +++ b/bazel/external/twitter_common_lang.BUILD @@ -1,3 +1,5 @@ +load("@rules_python//python:defs.bzl", "py_library") + licenses(["notice"]) # Apache 2 py_library( diff --git a/bazel/external/twitter_common_rpc.BUILD b/bazel/external/twitter_common_rpc.BUILD index 8e8622ebb4db6..df79842360bd7 100644 --- a/bazel/external/twitter_common_rpc.BUILD +++ b/bazel/external/twitter_common_rpc.BUILD @@ -1,3 +1,5 @@ +load("@rules_python//python:defs.bzl", "py_library") + licenses(["notice"]) # Apache 2 py_library( diff --git a/bazel/external/wee8.BUILD b/bazel/external/wee8.BUILD index c6e64f43bc3b9..b61f957486727 100644 --- a/bazel/external/wee8.BUILD +++ b/bazel/external/wee8.BUILD @@ -1,7 +1,9 @@ -licenses(["notice"]) # Apache 2 - +load("@rules_cc//cc:defs.bzl", "cc_library") +load("@envoy_large_machine_exec_property//:constants.bzl", "LARGE_MACHINE") load(":genrule_cmd.bzl", "genrule_cmd") +licenses(["notice"]) # Apache 2 + cc_library( name = "wee8", srcs = [ @@ -25,4 +27,5 @@ genrule( "libwee8.a", ], cmd = genrule_cmd("@envoy//bazel/external:wee8.genrule_cmd"), + exec_properties = LARGE_MACHINE, ) diff --git a/bazel/external/wee8.genrule_cmd b/bazel/external/wee8.genrule_cmd index f62997af1f0e8..8cb0e24c5f495 100644 --- a/bazel/external/wee8.genrule_cmd +++ b/bazel/external/wee8.genrule_cmd @@ -19,10 +19,12 @@ pushd $$ROOT/wee8 rm -rf out/wee8 # Export compiler configuration. +export CXXFLAGS="$${CXXFLAGS-} -Wno-deprecated-copy -Wno-unknown-warning-option" if [[ ( `uname` == "Darwin" && $${CXX-} == "" ) || $${CXX-} == *"clang"* ]]; then export IS_CLANG=true export CC=$${CC:-clang} export CXX=$${CXX:-clang++} + export CXXFLAGS="$${CXXFLAGS} -Wno-implicit-int-float-conversion -Wno-builtin-assume-aligned-alignment -Wno-final-dtor-non-final-class" else export IS_CLANG=false export CC=$${CC:-gcc} @@ -49,8 +51,17 @@ if [[ $${ENVOY_TSAN-} == "1" ]]; then WEE8_BUILD_ARGS+=" is_tsan=true" fi -# Release build. -WEE8_BUILD_ARGS+=" is_debug=false" +# Debug/release build. +if [[ $(COMPILATION_MODE) == "dbg" && $${ENVOY_UBSAN_VPTR-} != "1" && $${ENVOY_MSAN-} != "1" && $${ENVOY_TSAN-} != "1" ]]; then + WEE8_BUILD_ARGS+=" is_debug=true" + WEE8_BUILD_ARGS+=" v8_symbol_level=2" + WEE8_BUILD_ARGS+=" v8_optimized_debug=false" +else + WEE8_BUILD_ARGS+=" is_debug=false" + WEE8_BUILD_ARGS+=" v8_symbol_level=1" + WEE8_BUILD_ARGS+=" v8_enable_handle_zapping=false" +fi + # Clang or not Clang, that is the question. WEE8_BUILD_ARGS+=" is_clang=$$IS_CLANG" # Hack to disable bleeding-edge compiler flags. @@ -79,17 +90,28 @@ if [[ `uname -m` == "aarch64" ]]; then fi # Build wee8. -if [[ "$$(uname -s)" == "Darwin" ]]; then +if [[ -f /etc/centos-release ]] && [[ $$(cat /etc/centos-release) =~ "CentOS Linux release 7" ]] && [[ -x "$$(command -v gn)" ]]; then + # Using system default gn tools + # This is done only for CentOS 7, as it has an old version of GLIBC which is otherwise incompatible + gn=$$(command -v gn) +elif [[ "$$(uname -s)" == "Darwin" ]]; then gn=buildtools/mac/gn - ninja=third_party/depot_tools/ninja elif [[ "$$(uname -s)-$$(uname -m)" == "Linux-x86_64" ]]; then gn=buildtools/linux64/gn - ninja=third_party/depot_tools/ninja else - # Using system default ninja & gn tools + # Using system default gn tools gn=$$(command -v gn) +fi + +if [[ "$$(uname -s)" == "Darwin" ]]; then + ninja=third_party/depot_tools/ninja +elif [[ "$$(uname -s)-$$(uname -m)" == "Linux-x86_64" ]]; then + ninja=third_party/depot_tools/ninja +else + # Using system default ninja tools ninja=$$(command -v ninja) fi + "$$gn" gen out/wee8 --args="$$WEE8_BUILD_ARGS" "$$ninja" -C out/wee8 wee8 diff --git a/bazel/external/wee8.patch b/bazel/external/wee8.patch index 3f95bc83926a2..ad1c20b6c00b9 100644 --- a/bazel/external/wee8.patch +++ b/bazel/external/wee8.patch @@ -1,9 +1,9 @@ # 1. Fix linking with unbundled toolchain on macOS. -# 2. Increase VSZ limit to 4TiB (allows us to start up to 370 VMs). +# 2. Increase VSZ limit to 4TiB (allows us to start up to 409 VMs). # 3. Fix MSAN linking. --- wee8/build/toolchain/gcc_toolchain.gni +++ wee8/build/toolchain/gcc_toolchain.gni -@@ -355,6 +355,8 @@ template("gcc_toolchain") { +@@ -329,6 +329,8 @@ template("gcc_toolchain") { # AIX does not support either -D (deterministic output) or response # files. command = "$ar -X64 {{arflags}} -r -c -s {{output}} {{inputs}}" @@ -12,7 +12,7 @@ } else { rspfile = "{{output}}.rsp" rspfile_content = "{{inputs}}" -@@ -546,7 +548,7 @@ template("gcc_toolchain") { +@@ -507,7 +509,7 @@ template("gcc_toolchain") { start_group_flag = "" end_group_flag = "" @@ -21,9 +21,9 @@ # the "--start-group .. --end-group" feature isn't available on the aix ld. start_group_flag = "-Wl,--start-group" end_group_flag = "-Wl,--end-group " ---- wee8/src/wasm/wasm-memory.cc -+++ wee8/src/wasm/wasm-memory.cc -@@ -142,7 +142,7 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap, +--- wee8/src/objects/backing-store.cc ++++ wee8/src/objects/backing-store.cc +@@ -34,7 +34,7 @@ constexpr bool kUseGuardRegions = false; // address space limits needs to be smaller. constexpr size_t kAddressSpaceLimit = 0x8000000000L; // 512 GiB #elif V8_TARGET_ARCH_64_BIT @@ -34,7 +34,7 @@ #endif --- wee8/build/config/sanitizers/sanitizers.gni +++ wee8/build/config/sanitizers/sanitizers.gni -@@ -145,7 +145,7 @@ if (current_toolchain != default_toolchain) { +@@ -147,7 +147,7 @@ if (!is_a_target_toolchain) { # standard system libraries. We have instrumented system libraries for msan, # which requires them to prevent false positives. # TODO(thakis): Maybe remove this variable. @@ -43,7 +43,7 @@ # Whether we are doing a fuzzer build. Normally this should be checked instead # of checking "use_libfuzzer || use_afl" because often developers forget to -@@ -185,8 +185,7 @@ assert(!using_sanitizer || is_clang, +@@ -195,8 +195,7 @@ assert(!using_sanitizer || is_clang, assert(!is_cfi || is_clang, "is_cfi requires setting is_clang = true in 'gn args'") diff --git a/bazel/external/xxhash.BUILD b/bazel/external/xxhash.BUILD index 5f8120dfee0f0..33f9bbe697054 100644 --- a/bazel/external/xxhash.BUILD +++ b/bazel/external/xxhash.BUILD @@ -1,3 +1,5 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + licenses(["notice"]) # Apache 2 cc_library( diff --git a/bazel/foreign_cc/BUILD b/bazel/foreign_cc/BUILD index b2ee9cb74d7bf..c4a59ab20bda5 100644 --- a/bazel/foreign_cc/BUILD +++ b/bazel/foreign_cc/BUILD @@ -1,8 +1,9 @@ -licenses(["notice"]) # Apache 2 - +load("@rules_cc//cc:defs.bzl", "cc_library") load("//bazel:envoy_build_system.bzl", "envoy_cmake_external", "envoy_package") load("@rules_foreign_cc//tools/build_defs:configure.bzl", "configure_make") +licenses(["notice"]) # Apache 2 + envoy_package() # autotools packages are unusable on Windows as-is @@ -44,6 +45,7 @@ configure_make( # https://github.com/envoyproxy/envoy/issues/6084 # TODO(htuch): Remove when #6084 is fixed "//bazel:asan_build": {"ENVOY_CONFIG_ASAN": "1"}, + "//bazel:msan_build": {"ENVOY_CONFIG_MSAN": "1"}, "//conditions:default": {}, }), lib_source = "@com_github_luajit_luajit//:all", @@ -53,6 +55,7 @@ configure_make( "//bazel:windows_x86_64": ["lua51.lib"], "//conditions:default": ["libluajit-5.1.a"], }), + tags = ["skip_on_windows"], ) configure_make( @@ -63,6 +66,7 @@ configure_make( # https://github.com/envoyproxy/envoy/issues/6084 # TODO(htuch): Remove when #6084 is fixed "//bazel:asan_build": {"ENVOY_CONFIG_ASAN": "1"}, + "//bazel:msan_build": {"ENVOY_CONFIG_MSAN": "1"}, "//conditions:default": {}, }), lib_source = "@com_github_moonjit_moonjit//:all", @@ -72,6 +76,7 @@ configure_make( "//bazel:windows_x86_64": ["lua51.lib"], "//conditions:default": ["libluajit-5.1.a"], }), + tags = ["skip_on_windows"], ) envoy_cmake_external( @@ -80,9 +85,18 @@ envoy_cmake_external( "CARES_SHARED": "no", "CARES_STATIC": "on", "CMAKE_CXX_COMPILER_FORCED": "on", + "CMAKE_INSTALL_LIBDIR": "lib", }, defines = ["CARES_STATICLIB"], lib_source = "@com_github_c_ares_c_ares//:all", + linkopts = select({ + "//bazel:apple": ["-lresolv"], + "//conditions:default": [], + }), + postfix_script = select({ + "//bazel:windows_x86_64": "cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/nameser.h $INSTALLDIR/include/nameser.h", + "//conditions:default": "", + }), static_libraries = select({ "//bazel:windows_x86_64": ["cares.lib"], "//conditions:default": ["libcares.a"], @@ -101,6 +115,9 @@ envoy_cmake_external( "CMAKE_USE_GSSAPI": "off", "HTTP_ONLY": "on", "CMAKE_INSTALL_LIBDIR": "lib", + # Explicitly enable Unix sockets and disable crypto for Windows + "USE_UNIX_SOCKETS": "on", + "CURL_DISABLE_CRYPTO_AUTH": "on", # C-Ares. "ENABLE_ARES": "on", "CARES_LIBRARY": "$EXT_BUILD_DEPS/ares", @@ -141,6 +158,7 @@ envoy_cmake_external( name = "event", cache_entries = { "EVENT__DISABLE_OPENSSL": "on", + "EVENT__DISABLE_MBEDTLS": "on", "EVENT__DISABLE_REGRESS": "on", "EVENT__DISABLE_TESTS": "on", "EVENT__LIBRARY_TYPE": "STATIC", @@ -188,8 +206,8 @@ envoy_cmake_external( defines = ["NGHTTP2_STATICLIB"], lib_source = "@com_github_nghttp2_nghttp2//:all", static_libraries = select({ - "//bazel:windows_x86_64": ["nghttp2_static.lib"], - "//conditions:default": ["libnghttp2_static.a"], + "//bazel:windows_x86_64": ["nghttp2.lib"], + "//conditions:default": ["libnghttp2.a"], }), ) @@ -198,11 +216,13 @@ envoy_cmake_external( cache_entries = { "YAML_CPP_BUILD_TESTS": "off", "YAML_CPP_BUILD_TOOLS": "off", + "YAML_BUILD_SHARED_LIBS": "off", "CMAKE_CXX_COMPILER_FORCED": "on", + "YAML_MSVC_SHARED_RT": "off", }, lib_source = "@com_github_jbeder_yaml_cpp//:all", static_libraries = select({ - "//bazel:windows_x86_64": ["libyaml-cpp.lib"], + "//bazel:windows_x86_64": ["yaml-cpp.lib"], "//conditions:default": ["libyaml-cpp.a"], }), ) @@ -212,6 +232,8 @@ envoy_cmake_external( cache_entries = { "BUILD_SHARED_LIBS": "off", "CMAKE_CXX_COMPILER_FORCED": "on", + "CMAKE_C_COMPILER_FORCED": "on", + "SKIP_BUILD_EXAMPLES": "on", }, lib_source = "@net_zlib//:all", static_libraries = select({ diff --git a/bazel/foreign_cc/cares-win32-nameser.patch b/bazel/foreign_cc/cares-win32-nameser.patch deleted file mode 100644 index 756c3933edcf6..0000000000000 --- a/bazel/foreign_cc/cares-win32-nameser.patch +++ /dev/null @@ -1,12 +0,0 @@ ---- CMakeLists.txt.orig 2020-02-19 14:42:47.978299400 -0500 -+++ CMakeLists.txt 2020-02-19 14:45:18.925903400 -0500 -@@ -652,6 +652,9 @@ - # Headers installation target - IF (CARES_INSTALL) - SET (CARES_HEADERS ares.h ares_version.h ares_dns.h "${PROJECT_BINARY_DIR}/ares_build.h" ares_rules.h) -+ IF (WIN32) -+ SET (CARES_HEADERS ${CARES_HEADERS} nameser.h) -+ ENDIF() - INSTALL (FILES ${CARES_HEADERS} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) - ENDIF () - diff --git a/bazel/foreign_cc/libevent_msvc.patch b/bazel/foreign_cc/libevent_msvc.patch deleted file mode 100644 index ebbd053c76519..0000000000000 --- a/bazel/foreign_cc/libevent_msvc.patch +++ /dev/null @@ -1,10 +0,0 @@ ---- CMakeLists.txt 2019-10-02 20:20:58.449181400 -0400 -+++ CMakeLists.txt 2019-10-02 20:21:19.390279100 -0400 -@@ -236,7 +236,6 @@ - - if (EVENT__MSVC_STATIC_RUNTIME) - foreach (flag_var -- CMAKE_C_FLAGS - CMAKE_C_FLAGS_DEBUG - CMAKE_C_FLAGS_RELEASE - CMAKE_C_FLAGS_MINSIZEREL diff --git a/bazel/foreign_cc/luajit.patch b/bazel/foreign_cc/luajit.patch index 7623ddafa49a0..5a6cefe29e096 100644 --- a/bazel/foreign_cc/luajit.patch +++ b/bazel/foreign_cc/luajit.patch @@ -58,7 +58,7 @@ index 0000000..9c71271 --- /dev/null +++ b/build.py @@ -0,0 +1,56 @@ -+#!/usr/bin/env python ++#!/usr/bin/env python3 + +import argparse +import os @@ -86,14 +86,14 @@ index 0000000..9c71271 + os.environ["HOST_CFLAGS"] = "-fno-sanitize=memory" + os.environ["HOST_LDFLAGS"] = "-fno-sanitize=memory" + -+ # Blacklist LuaJIT from ASAN for now. ++ # Remove LuaJIT from ASAN for now. + # TODO(htuch): Remove this when https://github.com/envoyproxy/envoy/issues/6084 is resolved. -+ if "ENVOY_CONFIG_ASAN" in os.environ: -+ os.environ["TARGET_CFLAGS"] += " -fsanitize-blacklist=%s/com_github_luajit_luajit/clang-asan-blacklist.txt" % os.environ["PWD"] -+ with open("clang-asan-blacklist.txt", "w") as f: ++ if "ENVOY_CONFIG_ASAN" in os.environ or "ENVOY_CONFIG_MSAN" in os.environ: ++ os.environ["TARGET_CFLAGS"] += " -fsanitize-blacklist=%s/com_github_luajit_luajit/clang-asan-blocklist.txt" % os.environ["PWD"] ++ with open("clang-asan-blocklist.txt", "w") as f: + f.write("fun:*\n") + -+ os.system('make V=1 PREFIX="{}" install'.format(args.prefix)) ++ os.system('make -j{} V=1 PREFIX="{}" install'.format(os.cpu_count(), args.prefix)) + +def win_main(): + src_dir = os.path.dirname(os.path.realpath(__file__)) @@ -108,7 +108,7 @@ index 0000000..9c71271 + shutil.copy(header, dst_dir + "/include/luajit-2.1") + os.makedirs(dst_dir + "/bin", exist_ok=True) + shutil.copy("luajit.exe", dst_dir + "/bin") -+ ++ +if os.name == 'nt': + win_main() +else: diff --git a/bazel/foreign_cc/moonjit.patch b/bazel/foreign_cc/moonjit.patch index c0d2c274eaae8..d7a67050f1702 100644 --- a/bazel/foreign_cc/moonjit.patch +++ b/bazel/foreign_cc/moonjit.patch @@ -49,7 +49,7 @@ index 0000000..9c71271 --- /dev/null +++ b/build.py @@ -0,0 +1,56 @@ -+#!/usr/bin/env python ++#!/usr/bin/env python3 + +import argparse +import os @@ -77,14 +77,14 @@ index 0000000..9c71271 + os.environ["HOST_CFLAGS"] = "-fno-sanitize=memory" + os.environ["HOST_LDFLAGS"] = "-fno-sanitize=memory" + -+ # Blacklist LuaJIT from ASAN for now. ++ # Remove LuaJIT from ASAN for now. + # TODO(htuch): Remove this when https://github.com/envoyproxy/envoy/issues/6084 is resolved. -+ if "ENVOY_CONFIG_ASAN" in os.environ: -+ os.environ["TARGET_CFLAGS"] += " -fsanitize-blacklist=%s/com_github_moonjit_moonjit/clang-asan-blacklist.txt" % os.environ["PWD"] -+ with open("clang-asan-blacklist.txt", "w") as f: ++ if "ENVOY_CONFIG_ASAN" in os.environ or "ENVOY_CONFIG_MSAN" in os.environ: ++ os.environ["TARGET_CFLAGS"] += " -fsanitize-blacklist=%s/com_github_moonjit_moonjit/clang-asan-blocklist.txt" % os.environ["PWD"] ++ with open("clang-asan-blocklist.txt", "w") as f: + f.write("fun:*\n") + -+ os.system('make V=1 PREFIX="{}" install'.format(args.prefix)) ++ os.system('make -j{} V=1 PREFIX="{}" install'.format(os.cpu_count(), args.prefix)) + +def win_main(): + src_dir = os.path.dirname(os.path.realpath(__file__)) @@ -99,7 +99,7 @@ index 0000000..9c71271 + shutil.copy(header, dst_dir + "/include/moonjit-2.2") + os.makedirs(dst_dir + "/bin", exist_ok=True) + shutil.copy("luajit.exe", dst_dir + "/bin") -+ ++ +if os.name == 'nt': + win_main() +else: diff --git a/bazel/foreign_cc/nghttp2.patch b/bazel/foreign_cc/nghttp2.patch index 55768dca20036..91ddf1898e45b 100644 --- a/bazel/foreign_cc/nghttp2.patch +++ b/bazel/foreign_cc/nghttp2.patch @@ -15,3 +15,73 @@ index 35c77d1d..47bd63f5 100644 endif() # AC_TYPE_UINT8_T # AC_TYPE_UINT16_T +# https://github.com/nghttp2/nghttp2/pull/1468 +diff --git a/lib/nghttp2_buf.c b/lib/nghttp2_buf.c +index 2a435bebf..92f97f7f2 100644 +--- a/lib/nghttp2_buf.c ++++ b/lib/nghttp2_buf.c +@@ -82,8 +82,10 @@ void nghttp2_buf_reset(nghttp2_buf *buf) { + } + + void nghttp2_buf_wrap_init(nghttp2_buf *buf, uint8_t *begin, size_t len) { +- buf->begin = buf->pos = buf->last = buf->mark = begin; +- buf->end = begin + len; ++ buf->begin = buf->pos = buf->last = buf->mark = buf->end = begin; ++ if (buf->end != NULL) { ++ buf->end += len; ++ } + } + + static int buf_chain_new(nghttp2_buf_chain **chain, size_t chunk_length, +diff --git a/lib/nghttp2_frame.c b/lib/nghttp2_frame.c +index 4821de408..940c723b0 100644 +--- a/lib/nghttp2_frame.c ++++ b/lib/nghttp2_frame.c +@@ -818,8 +818,10 @@ int nghttp2_frame_unpack_origin_payload(nghttp2_extension *frame, + size_t len = 0; + + origin = frame->payload; +- p = payload; +- end = p + payloadlen; ++ p = end = payload; ++ if (end != NULL) { ++ end += payloadlen; ++ } + + for (; p != end;) { + if (end - p < 2) { +diff --git a/lib/nghttp2_session.c b/lib/nghttp2_session.c +index 563ccd7de..794f141a1 100644 +--- a/lib/nghttp2_session.c ++++ b/lib/nghttp2_session.c +@@ -5349,7 +5349,7 @@ static ssize_t inbound_frame_effective_readlen(nghttp2_inbound_frame *iframe, + + ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, + size_t inlen) { +- const uint8_t *first = in, *last = in + inlen; ++ const uint8_t *first = in, *last = in; + nghttp2_inbound_frame *iframe = &session->iframe; + size_t readlen; + ssize_t padlen; +@@ -5360,6 +5360,10 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, + size_t pri_fieldlen; + nghttp2_mem *mem; + ++ if (in != NULL) { ++ last += inlen; ++ } ++ + DEBUGF("recv: connection recv_window_size=%d, local_window=%d\n", + session->recv_window_size, session->local_window_size); + +@@ -5389,7 +5393,9 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, + } + + iframe->payloadleft -= readlen; +- in += readlen; ++ if (in != NULL) { ++ in += readlen; ++ } + + if (iframe->payloadleft == 0) { + session_inbound_frame_reset(session); diff --git a/bazel/foreign_cc/zlib.patch b/bazel/foreign_cc/zlib.patch new file mode 100644 index 0000000000000..d8a7354dc6daa --- /dev/null +++ b/bazel/foreign_cc/zlib.patch @@ -0,0 +1,44 @@ +diff --git a/CMakeLists.txt b/CMakeLists.txt +index 0fe939d..2f0475a 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -229,21 +229,22 @@ endif() + #============================================================================ + # Example binaries + #============================================================================ +- +-add_executable(example test/example.c) +-target_link_libraries(example zlib) +-add_test(example example) +- +-add_executable(minigzip test/minigzip.c) +-target_link_libraries(minigzip zlib) +- +-if(HAVE_OFF64_T) +- add_executable(example64 test/example.c) +- target_link_libraries(example64 zlib) +- set_target_properties(example64 PROPERTIES COMPILE_FLAGS "-D_FILE_OFFSET_BITS=64") +- add_test(example64 example64) +- +- add_executable(minigzip64 test/minigzip.c) +- target_link_libraries(minigzip64 zlib) +- set_target_properties(minigzip64 PROPERTIES COMPILE_FLAGS "-D_FILE_OFFSET_BITS=64") ++if(NOT SKIP_BUILD_EXAMPLES) ++ add_executable(example test/example.c) ++ target_link_libraries(example zlib) ++ add_test(example example) ++ ++ add_executable(minigzip test/minigzip.c) ++ target_link_libraries(minigzip zlib) ++ ++ if(HAVE_OFF64_T) ++ add_executable(example64 test/example.c) ++ target_link_libraries(example64 zlib) ++ set_target_properties(example64 PROPERTIES COMPILE_FLAGS "-D_FILE_OFFSET_BITS=64") ++ add_test(example64 example64) ++ ++ add_executable(minigzip64 test/minigzip.c) ++ target_link_libraries(minigzip64 zlib) ++ set_target_properties(minigzip64 PROPERTIES COMPILE_FLAGS "-D_FILE_OFFSET_BITS=64") ++ endif() + endif() diff --git a/bazel/fuzzit_wrapper.sh b/bazel/fuzzit_wrapper.sh deleted file mode 100755 index 5f66247bb867b..0000000000000 --- a/bazel/fuzzit_wrapper.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -set -e - -# run fuzzing regression or upload to Fuzzit for long running fuzzing job depending on whether FUZZIT_API_KEY is set - -FUZZIT="${TEST_SRCDIR}/fuzzit_linux/fuzzit" - -FUZZER_BINARY=$1 -FUZZIT_TARGET_NAME="$(basename $1 | sed -e s/_fuzz_test_with_libfuzzer$// -e s/_/-/g)" - -if [[ ! -z "${FUZZIT_API_KEY}" ]]; then - "${FUZZIT}" create target --skip-if-exists --public-corpus envoyproxy/"${FUZZIT_TARGET_NAME}" - - # Run fuzzing first so this is not affected by local-regression timeout - "${FUZZIT}" create job --skip-if-not-exists --host "${ENVOY_BUILD_IMAGE}" --type fuzzing envoyproxy/"${FUZZIT_TARGET_NAME}" "${FUZZER_BINARY}" -fi - -"${FUZZIT}" create job --skip-if-not-exists --host "${ENVOY_BUILD_IMAGE}" --type local-regression envoyproxy/"${FUZZIT_TARGET_NAME}" "${FUZZER_BINARY}" diff --git a/bazel/genrule_repository.bzl b/bazel/genrule_repository.bzl index 0689c39c88b0b..28f37adfe55c7 100644 --- a/bazel/genrule_repository.bzl +++ b/bazel/genrule_repository.bzl @@ -68,7 +68,7 @@ def _genrule_cc_deps(ctx): genrule_cc_deps = rule( attrs = { "deps": attr.label_list( - providers = [], # CcSkylarkApiProvider + providers = [], # CcStarlarkApiProvider mandatory = True, allow_empty = False, ), @@ -115,7 +115,7 @@ def _genrule_environment(ctx): ld_flags = [] ld_libs = [] if ctx.var.get("ENVOY_CONFIG_COVERAGE"): - ld_libs += ["-lgcov"] + ld_libs.append("-lgcov") if ctx.var.get("ENVOY_CONFIG_ASAN"): cc_flags += asan_flags ld_flags += asan_flags @@ -137,8 +137,8 @@ def _genrule_environment(ctx): lines.append("export ASAN_OPTIONS=detect_leaks=0") lines.append("") - out = ctx.new_file(ctx.attr.name + ".sh") - ctx.file_action(out, "\n".join(lines)) + out = ctx.actions.declare_file(ctx.attr.name + ".sh") + ctx.actions.write(out, "\n".join(lines)) return DefaultInfo(files = depset([out])) genrule_environment = rule( diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 90cefccf20941..4e0293ef288b1 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -2,12 +2,13 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") load(":dev_binding.bzl", "envoy_dev_binding") load(":genrule_repository.bzl", "genrule_repository") load("@envoy_api//bazel:envoy_http_archive.bzl", "envoy_http_archive") -load(":repository_locations.bzl", "REPOSITORY_LOCATIONS") +load(":repository_locations.bzl", "DEPENDENCY_ANNOTATIONS", "DEPENDENCY_REPOSITORIES", "USE_CATEGORIES", "USE_CATEGORIES_WITH_CPE_OPTIONAL") load("@com_google_googleapis//:repository_rules.bzl", "switched_rules_by_language") PPC_SKIP_TARGETS = ["envoy.filters.http.lua"] WINDOWS_SKIP_TARGETS = [ + "envoy.filters.http.lua", "envoy.tracers.dynamic_ot", "envoy.tracers.lightstep", "envoy.tracers.datadog", @@ -18,6 +19,37 @@ WINDOWS_SKIP_TARGETS = [ # archives, e.g. cares. BUILD_ALL_CONTENT = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])""" +# Method for verifying content of the DEPENDENCY_REPOSITORIES defined in bazel/repository_locations.bzl +# Verification is here so that bazel/repository_locations.bzl can be loaded into other tools written in Python, +# and as such needs to be free of bazel specific constructs. +def _repository_locations(): + locations = dict(DEPENDENCY_REPOSITORIES) + for key, location in locations.items(): + if "sha256" not in location or len(location["sha256"]) == 0: + fail("SHA256 missing for external dependency " + str(location["urls"])) + + if "use_category" not in location: + fail("The 'use_category' attribute must be defined for external dependecy " + str(location["urls"])) + + if "cpe" not in location and not [category for category in USE_CATEGORIES_WITH_CPE_OPTIONAL if category in location["use_category"]]: + fail("The 'cpe' attribute must be defined for external dependecy " + str(location["urls"])) + + for category in location["use_category"]: + if category not in USE_CATEGORIES: + fail("Unknown use_category value '" + category + "' for dependecy " + str(location["urls"])) + + return locations + +REPOSITORY_LOCATIONS = _repository_locations() + +# To initialize http_archive REPOSITORY_LOCATIONS dictionaries must be stripped of annotations. +# See repository_locations.bzl for the list of annotation attributes. +def _get_location(dependency): + stripped = dict(REPOSITORY_LOCATIONS[dependency]) + for attribute in DEPENDENCY_ANNOTATIONS: + stripped.pop(attribute, None) + return stripped + def _repository_impl(name, **kwargs): envoy_http_archive( name, @@ -37,9 +69,9 @@ _default_envoy_build_config = repository_rule( }, ) -# Python dependencies. If these become non-trivial, we might be better off using a virtualenv to -# wrap them, but for now we can treat them as first-class Bazel. +# Python dependencies. def _python_deps(): + # TODO(htuch): convert these to pip3_import. _repository_impl( name = "com_github_pallets_markupsafe", build_file = "@envoy//bazel/external:markupsafe.BUILD", @@ -93,7 +125,12 @@ def _go_deps(skip_targets): # Keep the skip_targets check around until Istio Proxy has stopped using # it to exclude the Go rules. if "io_bazel_rules_go" not in skip_targets: - _repository_impl("io_bazel_rules_go") + _repository_impl( + name = "io_bazel_rules_go", + # TODO(wrowe, sunjayBhatia): remove when Windows RBE supports batch file invocation + patch_args = ["-p1"], + patches = ["@envoy//bazel:rules_go.patch"], + ) _repository_impl("bazel_gazelle") def envoy_dependencies(skip_targets = []): @@ -154,15 +191,20 @@ def envoy_dependencies(skip_targets = []): _io_opentracing_cpp() _net_zlib() _upb() + _proxy_wasm_cpp_sdk() + _proxy_wasm_cpp_host() + _emscripten_toolchain() _repository_impl("com_googlesource_code_re2") _com_google_cel_cpp() + _repository_impl("com_github_google_flatbuffers") _repository_impl("bazel_toolchains") _repository_impl("bazel_compdb") _repository_impl("envoy_build_tools") + _repository_impl("rules_cc") + _org_unicode_icuuc() # Unconditional, since we use this only for compiler-agnostic fuzzing utils. _org_llvm_releases_compiler_rt() - _fuzzit_linux() _python_deps() _cc_deps() @@ -198,6 +240,7 @@ def _boringssl_fips(): sha256 = location["sha256"], genrule_cmd_file = "@envoy//bazel/external:boringssl_fips.genrule_cmd", build_file = "@envoy//bazel/external:boringssl_fips.BUILD", + patches = ["@envoy//bazel/external:boringssl_fips.patch"], ) def _com_github_circonus_labs_libcircllhist(): @@ -211,10 +254,9 @@ def _com_github_circonus_labs_libcircllhist(): ) def _com_github_c_ares_c_ares(): - location = REPOSITORY_LOCATIONS["com_github_c_ares_c_ares"] + location = _get_location("com_github_c_ares_c_ares") http_archive( name = "com_github_c_ares_c_ares", - patches = ["@envoy//bazel/foreign_cc:cares-win32-nameser.patch"], build_file_content = BUILD_ALL_CONTENT, **location ) @@ -280,7 +322,7 @@ def _com_github_gabime_spdlog(): ) def _com_github_google_benchmark(): - location = REPOSITORY_LOCATIONS["com_github_google_benchmark"] + location = _get_location("com_github_google_benchmark") http_archive( name = "com_github_google_benchmark", **location @@ -297,7 +339,7 @@ def _com_github_google_libprotobuf_mutator(): ) def _com_github_jbeder_yaml_cpp(): - location = REPOSITORY_LOCATIONS["com_github_jbeder_yaml_cpp"] + location = _get_location("com_github_jbeder_yaml_cpp") http_archive( name = "com_github_jbeder_yaml_cpp", build_file_content = BUILD_ALL_CONTENT, @@ -309,12 +351,10 @@ def _com_github_jbeder_yaml_cpp(): ) def _com_github_libevent_libevent(): - location = REPOSITORY_LOCATIONS["com_github_libevent_libevent"] + location = _get_location("com_github_libevent_libevent") http_archive( name = "com_github_libevent_libevent", build_file_content = BUILD_ALL_CONTENT, - patch_args = ["-p0"], - patches = ["@envoy//bazel/foreign_cc:libevent_msvc.patch"], **location ) native.bind( @@ -323,12 +363,11 @@ def _com_github_libevent_libevent(): ) def _net_zlib(): - location = REPOSITORY_LOCATIONS["net_zlib"] - - http_archive( + _repository_impl( name = "net_zlib", build_file_content = BUILD_ALL_CONTENT, - **location + patch_args = ["-p1"], + patches = ["@envoy//bazel/foreign_cc:zlib.patch"], ) native.bind( @@ -344,9 +383,27 @@ def _net_zlib(): def _com_google_cel_cpp(): _repository_impl("com_google_cel_cpp") + _repository_impl("rules_antlr") + location = _get_location("antlr4_runtimes") + http_archive( + name = "antlr4_runtimes", + build_file_content = """ +package(default_visibility = ["//visibility:public"]) +cc_library( + name = "cpp", + srcs = glob(["runtime/Cpp/runtime/src/**/*.cpp"]), + hdrs = glob(["runtime/Cpp/runtime/src/**/*.h"]), + includes = ["runtime/Cpp/runtime/src"], +) +""", + patch_args = ["-p1"], + # Patches ASAN violation of initialization fiasco + patches = ["@envoy//bazel:antlr.patch"], + **location + ) def _com_github_nghttp2_nghttp2(): - location = REPOSITORY_LOCATIONS["com_github_nghttp2_nghttp2"] + location = _get_location("com_github_nghttp2_nghttp2") http_archive( name = "com_github_nghttp2_nghttp2", build_file_content = BUILD_ALL_CONTENT, @@ -560,7 +617,7 @@ def _com_google_protobuf(): ) def _io_opencensus_cpp(): - location = REPOSITORY_LOCATIONS["io_opencensus_cpp"] + location = _get_location("io_opencensus_cpp") http_archive( name = "io_opencensus_cpp", **location @@ -604,7 +661,7 @@ def _io_opencensus_cpp(): def _com_github_curl(): # Used by OpenCensus Zipkin exporter. - location = REPOSITORY_LOCATIONS["com_github_curl"] + location = _get_location("com_github_curl") http_archive( name = "com_github_curl", build_file_content = BUILD_ALL_CONTENT + """ @@ -620,7 +677,7 @@ cc_library(name = "curl", visibility = ["//visibility:public"], deps = ["@envoy/ ) def _com_googlesource_chromium_v8(): - location = REPOSITORY_LOCATIONS["com_googlesource_chromium_v8"] + location = _get_location("com_googlesource_chromium_v8") genrule_repository( name = "com_googlesource_chromium_v8", genrule_cmd_file = "@envoy//bazel/external:wee8.genrule_cmd", @@ -666,6 +723,8 @@ def _com_googlesource_quiche(): def _com_googlesource_googleurl(): _repository_impl( name = "com_googlesource_googleurl", + patches = ["@envoy//bazel/external:googleurl.patch"], + patch_args = ["-p1"], ) native.bind( name = "googleurl", @@ -678,12 +737,6 @@ def _org_llvm_releases_compiler_rt(): build_file = "@envoy//bazel/external:compiler_rt.BUILD", ) -def _fuzzit_linux(): - _repository_impl( - name = "fuzzit_linux", - build_file_content = "exports_files([\"fuzzit\"])", - ) - def _com_github_grpc_grpc(): _repository_impl("com_github_grpc_grpc") _repository_impl("build_bazel_rules_apple") @@ -717,6 +770,16 @@ def _com_github_grpc_grpc(): actual = "@com_github_grpc_grpc//test/core/tsi/alts/fake_handshaker:fake_handshaker_lib", ) + native.bind( + name = "grpc_alts_handshaker_proto", + actual = "@com_github_grpc_grpc//test/core/tsi/alts/fake_handshaker:handshaker_proto", + ) + + native.bind( + name = "grpc_alts_transport_security_common_proto", + actual = "@com_github_grpc_grpc//test/core/tsi/alts/fake_handshaker:transport_security_common_proto", + ) + def _upb(): _repository_impl( name = "upb", @@ -729,6 +792,22 @@ def _upb(): actual = "@upb//:upb", ) +def _proxy_wasm_cpp_sdk(): + _repository_impl(name = "proxy_wasm_cpp_sdk") + +def _proxy_wasm_cpp_host(): + _repository_impl( + name = "proxy_wasm_cpp_host", + build_file = "@envoy//bazel/external:proxy_wasm_cpp_host.BUILD", + ) + +def _emscripten_toolchain(): + _repository_impl( + name = "emscripten_toolchain", + build_file_content = BUILD_ALL_CONTENT, + patch_cmds = REPOSITORY_LOCATIONS["emscripten_toolchain"]["patch_cmds"], + ) + def _com_github_google_jwt_verify(): _repository_impl("com_github_google_jwt_verify") @@ -738,7 +817,7 @@ def _com_github_google_jwt_verify(): ) def _com_github_luajit_luajit(): - location = REPOSITORY_LOCATIONS["com_github_luajit_luajit"] + location = _get_location("com_github_luajit_luajit") http_archive( name = "com_github_luajit_luajit", build_file_content = BUILD_ALL_CONTENT, @@ -754,7 +833,7 @@ def _com_github_luajit_luajit(): ) def _com_github_moonjit_moonjit(): - location = REPOSITORY_LOCATIONS["com_github_moonjit_moonjit"] + location = _get_location("com_github_moonjit_moonjit") http_archive( name = "com_github_moonjit_moonjit", build_file_content = BUILD_ALL_CONTENT, @@ -770,11 +849,10 @@ def _com_github_moonjit_moonjit(): ) def _com_github_gperftools_gperftools(): - location = REPOSITORY_LOCATIONS["com_github_gperftools_gperftools"] + location = _get_location("com_github_gperftools_gperftools") http_archive( name = "com_github_gperftools_gperftools", build_file_content = BUILD_ALL_CONTENT, - patch_cmds = ["./autogen.sh"], **location ) @@ -802,7 +880,7 @@ filegroup( name = "kafka_source", build_file_content = KAFKASOURCE_BUILD_CONTENT, patches = ["@envoy//bazel/external:kafka_int32.patch"], - **REPOSITORY_LOCATIONS["kafka_source"] + **_get_location("kafka_source") ) # This archive provides Kafka (and Zookeeper) binaries, that are used during Kafka integration @@ -810,7 +888,7 @@ filegroup( http_archive( name = "kafka_server_binary", build_file_content = BUILD_ALL_CONTENT, - **REPOSITORY_LOCATIONS["kafka_server_binary"] + **_get_location("kafka_server_binary") ) # This archive provides Kafka client in Python, so we can use it to interact with Kafka server @@ -818,7 +896,13 @@ filegroup( http_archive( name = "kafka_python_client", build_file_content = BUILD_ALL_CONTENT, - **REPOSITORY_LOCATIONS["kafka_python_client"] + **_get_location("kafka_python_client") + ) + +def _org_unicode_icuuc(): + _repository_impl( + name = "org_unicode_icuuc", + build_file = "@envoy//bazel/external:icuuc.BUILD", ) def _foreign_cc_dependencies(): diff --git a/bazel/repositories_extra.bzl b/bazel/repositories_extra.bzl new file mode 100644 index 0000000000000..aef6b8c69b24a --- /dev/null +++ b/bazel/repositories_extra.bzl @@ -0,0 +1,20 @@ +load("@rules_python//python:repositories.bzl", "py_repositories") +load("@rules_python//python:pip.bzl", "pip3_import", "pip_repositories") + +# Python dependencies. +def _python_deps(): + py_repositories() + pip_repositories() + + pip3_import( + name = "config_validation_pip3", + requirements = "@envoy//tools/config_validation:requirements.txt", + ) + pip3_import( + name = "protodoc_pip3", + requirements = "@envoy//tools/protodoc:requirements.txt", + ) + +# Envoy deps that rely on a first stage of dependency loading in envoy_dependencies(). +def envoy_dependencies_extra(): + _python_deps() diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 58a5c88786084..11cf908a21c05 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -1,167 +1,254 @@ -REPOSITORY_LOCATIONS = dict( +# Validation of content in this file is done on the bazel/repositories.bzl file to make it free of bazel +# constructs. This is to allow this file to be loaded into Python based build and maintenance tools. + +# Envoy dependencies may be annotated with the following attributes: +DEPENDENCY_ANNOTATIONS = [ + # List of the categories describing how the dependency is being used. This attribute is used + # for automatic tracking of security posture of Envoy's dependencies. + # Possible values are documented in the USE_CATEGORIES list below. + # This attribute is mandatory for each dependecy. + "use_category", + + # Attribute specifying CPE (Common Platform Enumeration, see https://nvd.nist.gov/products/cpe) ID + # of the dependency. The ID may be in v2.3 or v2.2 format, although v2.3 is prefferred. See + # https://nvd.nist.gov/products/cpe for CPE format. Use single wildcard '*' for version and vector elements + # i.e. 'cpe:2.3:a:nghttp2:nghttp2:*'. Use "N/A" for dependencies without CPE assigned. + # This attribute is optional for components with use categories listed in the + # USE_CATEGORIES_WITH_CPE_OPTIONAL + "cpe", +] + +# NOTE: If a dependency use case is either dataplane or controlplane, the other uses are not needed +# to be declared. +USE_CATEGORIES = [ + # This dependency is used in build process. + "build", + # This dependency is used for unit tests. + "test", + # This dependency is used in API protos. + "api", + # This dependency is used in processing downstream or upstream requests. + "dataplane", + # This dependency is used to process xDS requests. + "controlplane", + # This dependecy is used for logging, metrics or tracing. It may process unstrusted input. + "observability", + # This dependency does not handle untrusted data and is used for various utility purposes. + "other", +] + +# Components with these use categories are not required to specify the 'cpe' annotation. +USE_CATEGORIES_WITH_CPE_OPTIONAL = ["build", "test", "other"] + +DEPENDENCY_REPOSITORIES = dict( bazel_compdb = dict( - sha256 = "87e376a685eacfb27bcc0d0cdf5ded1d0b99d868390ac50f452ba6ed781caffe", - strip_prefix = "bazel-compilation-database-0.4.2", - urls = ["https://github.com/grailbio/bazel-compilation-database/archive/0.4.2.tar.gz"], + sha256 = "bcecfd622c4ef272fd4ba42726a52e140b961c4eac23025f18b346c968a8cfb4", + strip_prefix = "bazel-compilation-database-0.4.5", + urls = ["https://github.com/grailbio/bazel-compilation-database/archive/0.4.5.tar.gz"], + use_category = ["build"], ), bazel_gazelle = dict( sha256 = "86c6d481b3f7aedc1d60c1c211c6f76da282ae197c3b3160f54bd3a8f847896f", urls = ["https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.19.1/bazel-gazelle-v0.19.1.tar.gz"], + use_category = ["build"], ), bazel_toolchains = dict( - sha256 = "239a1a673861eabf988e9804f45da3b94da28d1aff05c373b013193c315d9d9e", - strip_prefix = "bazel-toolchains-3.0.1", + sha256 = "882fecfc88d3dc528f5c5681d95d730e213e39099abff2e637688a91a9619395", + strip_prefix = "bazel-toolchains-3.4.0", urls = [ - "https://github.com/bazelbuild/bazel-toolchains/releases/download/3.0.1/bazel-toolchains-3.0.1.tar.gz", - "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/3.0.1.tar.gz", + "https://github.com/bazelbuild/bazel-toolchains/releases/download/3.4.0/bazel-toolchains-3.4.0.tar.gz", + "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/3.4.0.tar.gz", ], + use_category = ["build"], ), build_bazel_rules_apple = dict( sha256 = "7a7afdd4869bb201c9352eed2daf37294d42b093579b70423490c1b4d4f6ce42", urls = ["https://github.com/bazelbuild/rules_apple/releases/download/0.19.0/rules_apple.0.19.0.tar.gz"], + use_category = ["build"], ), envoy_build_tools = dict( - sha256 = "9d348f92ae8fb2495393109aac28aea314ad1fb013cdec1ab7b1224f804be1b7", - strip_prefix = "envoy-build-tools-823c2e9386eee5117f7ef9e3d7c90e784cd0d047", - # 2020-04-07 - urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/823c2e9386eee5117f7ef9e3d7c90e784cd0d047.tar.gz"], + sha256 = "88e58fdb42021e64a0b35ae3554a82e92f5c37f630a4dab08a132fc77f8db4b7", + strip_prefix = "envoy-build-tools-1d6573e60207efaae6436b25ecc594360294f63a", + # 2020-07-18 + urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/1d6573e60207efaae6436b25ecc594360294f63a.tar.gz"], + use_category = ["build"], ), boringssl = dict( - sha256 = "a3d4de4f03cb321ef943678d72a045c9a19d26b23d6f4e313f97600c65201a27", - strip_prefix = "boringssl-1c2769383f027befac5b75b6cedd25daf3bf4dcf", + sha256 = "07f1524766b9ed1543674b48e7fce7e3569b6e2b6c0c43ec124dedee9b60f641", + strip_prefix = "boringssl-a0899df79b3a63e606448c72d63a090d86bdb75b", # To update BoringSSL, which tracks Chromium releases: # 1. Open https://omahaproxy.appspot.com/ and note of linux/stable release. # 2. Open https://chromium.googlesource.com/chromium/src/+/refs/tags//DEPS and note . # 3. Find a commit in BoringSSL's "master-with-bazel" branch that merges . # - # chromium-81.0.4044.69 - urls = ["https://github.com/google/boringssl/archive/1c2769383f027befac5b75b6cedd25daf3bf4dcf.tar.gz"], + # chromium-84.0.4147.45(beta) + # 2020-05-14 + urls = ["https://github.com/google/boringssl/archive/a0899df79b3a63e606448c72d63a090d86bdb75b.tar.gz"], + use_category = ["dataplane"], + cpe = "N/A", ), boringssl_fips = dict( - sha256 = "b12ad676ee533824f698741bd127f6fbc82c46344398a6d78d25e62c6c418c73", - # fips-20180730 - urls = ["https://commondatastorage.googleapis.com/chromium-boringssl-docs/fips/boringssl-66005f41fbc3529ffe8d007708756720529da20d.tar.xz"], + sha256 = "3b5fdf23274d4179c2077b5e8fa625d9debd7a390aac1d165b7e47234f648bb8", + # fips-20190808 + urls = ["https://commondatastorage.googleapis.com/chromium-boringssl-fips/boringssl-ae223d6138807a13006342edfeef32e813246b39.tar.xz"], + use_category = ["dataplane"], + cpe = "N/A", ), com_google_absl = dict( - sha256 = "2693730730247afb0e7cb2d41664ac2af3ad75c79944efd266be40ba944179b9", - strip_prefix = "abseil-cpp-06f0e767d13d4d68071c4fc51e25724e0fc8bc74", - # 2020-03-03 - urls = ["https://github.com/abseil/abseil-cpp/archive/06f0e767d13d4d68071c4fc51e25724e0fc8bc74.tar.gz"], + sha256 = "573baccd67aa591b8c7209bfb0c77e0d15633d77ced39d1ccbb1232828f7f7d9", + strip_prefix = "abseil-cpp-ce4bc927755fdf0ed03d679d9c7fa041175bb3cb", + # 2020-08-08 + urls = ["https://github.com/abseil/abseil-cpp/archive/ce4bc927755fdf0ed03d679d9c7fa041175bb3cb.tar.gz"], + use_category = ["dataplane", "controlplane"], + cpe = "N/A", ), com_github_apache_thrift = dict( sha256 = "7d59ac4fdcb2c58037ebd4a9da5f9a49e3e034bf75b3f26d9fe48ba3d8806e6b", strip_prefix = "thrift-0.11.0", urls = ["https://files.pythonhosted.org/packages/c6/b4/510617906f8e0c5660e7d96fbc5585113f83ad547a3989b80297ac72a74c/thrift-0.11.0.tar.gz"], + use_category = ["dataplane"], + cpe = "cpe:2.3:a:apache:thrift:*", ), com_github_c_ares_c_ares = dict( - sha256 = "bbaab13d6ad399a278d476f533e4d88a7ec7d729507348bb9c2e3b207ba4c606", - strip_prefix = "c-ares-d7e070e7283f822b1d2787903cce3615536c5610", - # 2019-06-19 - # 27 new commits from release-1.15.0. Upgrade for commit 7d3591ee8a1a63e7748e68e6d880bd1763a32885 "getaddrinfo enhancements" and follow up fixes. - # Use getaddrinfo to query DNS record and TTL. - # TODO(crazyxy): Update to release-1.16.0 when it is released. - urls = ["https://github.com/c-ares/c-ares/archive/d7e070e7283f822b1d2787903cce3615536c5610.tar.gz"], + sha256 = "d08312d0ecc3bd48eee0a4cc0d2137c9f194e0a28de2028928c0f6cae85f86ce", + strip_prefix = "c-ares-1.16.1", + urls = ["https://github.com/c-ares/c-ares/releases/download/cares-1_16_1/c-ares-1.16.1.tar.gz"], + use_category = ["dataplane"], + cpe = "cpe:2.3:a:c-ares_project:c-ares:*", ), com_github_circonus_labs_libcircllhist = dict( sha256 = "8165aa25e529d7d4b9ae849d3bf30371255a99d6db0421516abcff23214cdc2c", strip_prefix = "libcircllhist-63a16dd6f2fc7bc841bb17ff92be8318df60e2e1", # 2019-02-11 urls = ["https://github.com/circonus-labs/libcircllhist/archive/63a16dd6f2fc7bc841bb17ff92be8318df60e2e1.tar.gz"], + use_category = ["observability"], + cpe = "N/A", ), com_github_cyan4973_xxhash = dict( sha256 = "952ebbf5b11fbf59ae5d760a562d1e9112278f244340ad7714e8556cbe54f7f7", strip_prefix = "xxHash-0.7.3", urls = ["https://github.com/Cyan4973/xxHash/archive/v0.7.3.tar.gz"], + use_category = ["dataplane", "controlplane"], + cpe = "N/A", ), com_github_envoyproxy_sqlparser = dict( - sha256 = "b2d3882698cf85b64c87121e208ce0b24d5fe2a00a5d058cf4571f1b25b45403", - strip_prefix = "sql-parser-b14d010afd4313f2372a1cc96aa2327e674cc798", - # 2020-01-10 - urls = ["https://github.com/envoyproxy/sql-parser/archive/b14d010afd4313f2372a1cc96aa2327e674cc798.tar.gz"], + sha256 = "96c10c8e950a141a32034f19b19cdeb1da48fe859cf96ae5e19f894f36c62c71", + strip_prefix = "sql-parser-3b40ba2d106587bdf053a292f7e3bb17e818a57f", + # 2020-06-10 + urls = ["https://github.com/envoyproxy/sql-parser/archive/3b40ba2d106587bdf053a292f7e3bb17e818a57f.tar.gz"], + use_category = ["dataplane"], + cpe = "N/A", ), com_github_mirror_tclap = dict( sha256 = "f0ede0721dddbb5eba3a47385a6e8681b14f155e1129dd39d1a959411935098f", strip_prefix = "tclap-tclap-1-2-1-release-final", urls = ["https://github.com/mirror/tclap/archive/tclap-1-2-1-release-final.tar.gz"], + use_category = ["other"], ), com_github_fmtlib_fmt = dict( - sha256 = "f1907a58d5e86e6c382e51441d92ad9e23aea63827ba47fd647eacc0d3a16c78", - strip_prefix = "fmt-6.0.0", - urls = ["https://github.com/fmtlib/fmt/archive/6.0.0.tar.gz"], + sha256 = "5014aacf55285bf79654539791de0d6925063fddf4dfdd597ef76b53eb994f86", + strip_prefix = "fmt-e2ff910675c7800e5c4e28e1509ca6a50bdceafa", + # 2020-04-29 + urls = ["https://github.com/fmtlib/fmt/archive/e2ff910675c7800e5c4e28e1509ca6a50bdceafa.tar.gz"], + use_category = ["observability"], + cpe = "N/A", ), com_github_gabime_spdlog = dict( - sha256 = "afd18f62d1bc466c60bef088e6b637b0284be88c515cedc59ad4554150af6043", - strip_prefix = "spdlog-1.4.0", - urls = ["https://github.com/gabime/spdlog/archive/v1.4.0.tar.gz"], + sha256 = "378a040d91f787aec96d269b0c39189f58a6b852e4cbf9150ccfacbe85ebbbfc", + strip_prefix = "spdlog-1.6.1", + urls = ["https://github.com/gabime/spdlog/archive/v1.6.1.tar.gz"], + use_category = ["observability"], + cpe = "N/A", ), com_github_google_libprotobuf_mutator = dict( - sha256 = "", - strip_prefix = "libprotobuf-mutator-3521f47a2828da9ace403e4ecc4aece1a84feb36", - # 2020-02-04 - urls = ["https://github.com/google/libprotobuf-mutator/archive/3521f47a2828da9ace403e4ecc4aece1a84feb36.tar.gz"], + sha256 = "d51365191580c4bf5e9ff104eebcfe34f7ff5f471006d7a460c15dcb3657501c", + strip_prefix = "libprotobuf-mutator-7a2ed51a6b682a83e345ff49fc4cfd7ca47550db", + # 2020-06-25 + urls = ["https://github.com/google/libprotobuf-mutator/archive/7a2ed51a6b682a83e345ff49fc4cfd7ca47550db.tar.gz"], + use_category = ["test"], ), com_github_gperftools_gperftools = dict( - # TODO(cmluciano): Bump to release 2.8 - # The currently used version is specifically chosen to fix ppc64le builds that require inclusion - # of asm/ptrace.h, and also s390x builds that require special handling of mmap syscall. - sha256 = "97f0bc2b389c29305f5d1d8cc4d95e9212c33b55827ae65476fc761d78e3ec5d", - strip_prefix = "gperftools-gperftools-2.7.90", - urls = ["https://github.com/gperftools/gperftools/archive/gperftools-2.7.90.tar.gz"], + sha256 = "240deacdd628b6459671b83eb0c4db8e97baadf659f25b92e9a078d536bd513e", + strip_prefix = "gperftools-2.8", + urls = ["https://github.com/gperftools/gperftools/releases/download/gperftools-2.8/gperftools-2.8.tar.gz"], + use_category = ["test"], ), com_github_grpc_grpc = dict( # TODO(JimmyCYJ): Bump to release 1.27 # This sha on grpc:v1.25.x branch is specifically chosen to fix gRPC STS call credential options. sha256 = "bbc8f020f4e85ec029b047fab939b8c81f3d67254b5c724e1003a2bc49ddd123", strip_prefix = "grpc-d8f4928fa779f6005a7fe55a176bdb373b0f910f", + # 2020-02-11 urls = ["https://github.com/grpc/grpc/archive/d8f4928fa779f6005a7fe55a176bdb373b0f910f.tar.gz"], + use_category = ["dataplane", "controlplane"], + cpe = "cpe:2.3:a:grpc:grpc:*", ), com_github_luajit_luajit = dict( sha256 = "409f7fe570d3c16558e594421c47bdd130238323c9d6fd6c83dedd2aaeb082a8", strip_prefix = "LuaJIT-2.1.0-beta3", urls = ["https://github.com/LuaJIT/LuaJIT/archive/v2.1.0-beta3.tar.gz"], + use_category = ["dataplane"], + cpe = "N/A", ), com_github_moonjit_moonjit = dict( sha256 = "83deb2c880488dfe7dd8ebf09e3b1e7613ef4b8420de53de6f712f01aabca2b6", strip_prefix = "moonjit-2.2.0", urls = ["https://github.com/moonjit/moonjit/archive/2.2.0.tar.gz"], + use_category = ["dataplane"], + cpe = "N/A", ), com_github_nghttp2_nghttp2 = dict( - sha256 = "eb9d9046495a49dd40c7ef5d6c9907b51e5a6b320ea6e2add11eb8b52c982c47", - strip_prefix = "nghttp2-1.40.0", - urls = ["https://github.com/nghttp2/nghttp2/releases/download/v1.40.0/nghttp2-1.40.0.tar.gz"], + sha256 = "eacc6f0f8543583ecd659faf0a3f906ed03826f1d4157b536b4b385fe47c5bb8", + strip_prefix = "nghttp2-1.41.0", + urls = ["https://github.com/nghttp2/nghttp2/releases/download/v1.41.0/nghttp2-1.41.0.tar.gz"], + use_category = ["dataplane"], + cpe = "cpe:2.3:a:nghttp2:nghttp2:*", ), io_opentracing_cpp = dict( sha256 = "015c4187f7a6426a2b5196f0ccd982aa87f010cf61f507ae3ce5c90523f92301", strip_prefix = "opentracing-cpp-1.5.1", urls = ["https://github.com/opentracing/opentracing-cpp/archive/v1.5.1.tar.gz"], + use_category = ["observability"], + cpe = "N/A", ), com_lightstep_tracer_cpp = dict( sha256 = "0e99716598c010e56bc427ea3482be5ad2c534be8b039d172564deec1264a213", strip_prefix = "lightstep-tracer-cpp-3efe2372ee3d7c2138d6b26e542d757494a7938d", # 2020-03-24 urls = ["https://github.com/lightstep/lightstep-tracer-cpp/archive/3efe2372ee3d7c2138d6b26e542d757494a7938d.tar.gz"], + use_category = ["observability"], + cpe = "N/A", ), com_github_datadog_dd_opentracing_cpp = dict( - sha256 = "6dc1088ab7f788b6c849fbaa6300517c8fdf88991a70b778be79c284c36857bf", - strip_prefix = "dd-opentracing-cpp-1.1.3", - urls = ["https://github.com/DataDog/dd-opentracing-cpp/archive/v1.1.3.tar.gz"], + sha256 = "b84fd2fb0bb0578af4901db31d1c0ae909b532a1016fe6534cbe31a6c3ad6924", + strip_prefix = "dd-opentracing-cpp-1.1.5", + urls = ["https://github.com/DataDog/dd-opentracing-cpp/archive/v1.1.5.tar.gz"], + use_category = ["observability"], + cpe = "N/A", ), com_github_google_benchmark = dict( - sha256 = "3c6a165b6ecc948967a1ead710d4a181d7b0fbcaa183ef7ea84604994966221a", - strip_prefix = "benchmark-1.5.0", - urls = ["https://github.com/google/benchmark/archive/v1.5.0.tar.gz"], + sha256 = "23082937d1663a53b90cb5b61df4bcc312f6dee7018da78ba00dd6bd669dfef2", + strip_prefix = "benchmark-1.5.1", + urls = ["https://github.com/google/benchmark/archive/v1.5.1.tar.gz"], + use_category = ["test"], ), com_github_libevent_libevent = dict( - sha256 = "549d34065eb2485dfad6c8de638caaa6616ed130eec36dd978f73b6bdd5af113", + sha256 = "4c80e5fe044ce5f8055b20a2f141ee32ec2614000f3e95d2aa81611a4c8f5213", # This SHA includes the new "prepare" and "check" watchers, used for event loop performance # stats (see https://github.com/libevent/libevent/pull/793) and the fix for a race condition # in the watchers (see https://github.com/libevent/libevent/pull/802). # This also includes the fixes for https://github.com/libevent/libevent/issues/806 # and https://github.com/lyft/envoy-mobile/issues/215. - # TODO(mergeconflict): Update to v2.2 when it is released. - strip_prefix = "libevent-0d7d85c2083f7a4c9efe01c061486f332b576d28", - # 2019-07-02 - urls = ["https://github.com/libevent/libevent/archive/0d7d85c2083f7a4c9efe01c061486f332b576d28.tar.gz"], + # This also includes the fixes for Phantom events with EV_ET (see + # https://github.com/libevent/libevent/issues/984). + # This also includes the wepoll backend for Windows (see + # https://github.com/libevent/libevent/pull/1006) + # TODO(adip): Update to v2.2 when it is released. + strip_prefix = "libevent-62c152d9a7cd264b993dad730c4163c6ede2e0a3", + # 2020-07-31 + urls = ["https://github.com/libevent/libevent/archive/62c152d9a7cd264b993dad730c4163c6ede2e0a3.tar.gz"], + use_category = ["dataplane"], + cpe = "cpe:2.3:a:libevent_project:libevent:*", ), net_zlib = dict( # Use the dev branch of zlib to resolve fuzz bugs and out of bound @@ -171,163 +258,273 @@ REPOSITORY_LOCATIONS = dict( strip_prefix = "zlib-79baebe50e4d6b73ae1f8b603f0ef41300110aa3", # 2019-04-14 development branch urls = ["https://github.com/madler/zlib/archive/79baebe50e4d6b73ae1f8b603f0ef41300110aa3.tar.gz"], + use_category = ["dataplane"], + cpe = "cpe:2.3:a:gnu:zlib:*", ), com_github_jbeder_yaml_cpp = dict( - sha256 = "77ea1b90b3718aa0c324207cb29418f5bced2354c2e483a9523d98c3460af1ed", - strip_prefix = "yaml-cpp-yaml-cpp-0.6.3", - urls = ["https://github.com/jbeder/yaml-cpp/archive/yaml-cpp-0.6.3.tar.gz"], + sha256 = "79ab7069ef1c7c3632e7ffe095f7185d4c77b64d8035db3c085c239d4fe96d5f", + strip_prefix = "yaml-cpp-98acc5a8874faab28b82c28936f4b400b389f5d6", + # 2020-07-28 + urls = ["https://github.com/greenhouse-org/yaml-cpp/archive/98acc5a8874faab28b82c28936f4b400b389f5d6.tar.gz"], + use_category = ["dataplane"], + cpe = "N/A", ), com_github_msgpack_msgpack_c = dict( sha256 = "433cbcd741e1813db9ae4b2e192b83ac7b1d2dd7968a3e11470eacc6f4ab58d2", strip_prefix = "msgpack-3.2.1", urls = ["https://github.com/msgpack/msgpack-c/releases/download/cpp-3.2.1/msgpack-3.2.1.tar.gz"], + use_category = ["observability"], + cpe = "N/A", ), com_github_google_jwt_verify = dict( - sha256 = "d422a6eadd4bcdd0f9b122cd843a4015f8b18aebea6e1deb004bd4d401a8ef92", - strip_prefix = "jwt_verify_lib-40e2cc938f4bcd059a97dc6c73f59ecfa5a71bac", - # 2020-02-11 - urls = ["https://github.com/google/jwt_verify_lib/archive/40e2cc938f4bcd059a97dc6c73f59ecfa5a71bac.tar.gz"], + sha256 = "f1fde4f3ebb3b2d841332c7a02a4b50e0529a19709934c63bc6208d1bbe28fb1", + strip_prefix = "jwt_verify_lib-7276a339af8426724b744216f619c99152f8c141", + # 2020-07-09 + urls = ["https://github.com/google/jwt_verify_lib/archive/7276a339af8426724b744216f619c99152f8c141.tar.gz"], + use_category = ["dataplane"], + cpe = "N/A", ), com_github_nodejs_http_parser = dict( sha256 = "8fa0ab8770fd8425a9b431fdbf91623c4d7a9cdb842b9339289bd2b0b01b0d3d", strip_prefix = "http-parser-2.9.3", urls = ["https://github.com/nodejs/http-parser/archive/v2.9.3.tar.gz"], + use_category = ["dataplane"], + cpe = "cpe:2.3:a:nodejs:node.js:*", ), com_github_pallets_jinja = dict( sha256 = "db49236731373e4f3118af880eb91bb0aa6978bc0cf8b35760f6a026f1a9ffc4", strip_prefix = "jinja-2.10.3", urls = ["https://github.com/pallets/jinja/archive/2.10.3.tar.gz"], + use_category = ["build"], ), com_github_pallets_markupsafe = dict( sha256 = "222a10e3237d92a9cd45ed5ea882626bc72bc5e0264d3ed0f2c9129fa69fc167", strip_prefix = "markupsafe-1.1.1/src", urls = ["https://github.com/pallets/markupsafe/archive/1.1.1.tar.gz"], + use_category = ["build"], ), com_github_tencent_rapidjson = dict( sha256 = "a2faafbc402394df0fa94602df4b5e4befd734aad6bb55dfef46f62fcaf1090b", strip_prefix = "rapidjson-dfbe1db9da455552f7a9ad5d2aea17dd9d832ac1", # Changes through 2019-12-02 urls = ["https://github.com/Tencent/rapidjson/archive/dfbe1db9da455552f7a9ad5d2aea17dd9d832ac1.tar.gz"], + use_category = ["dataplane"], + cpe = "cpe:2.3:a:tencent:rapidjson:*", ), com_github_twitter_common_lang = dict( sha256 = "56d1d266fd4767941d11c27061a57bc1266a3342e551bde3780f9e9eb5ad0ed1", strip_prefix = "twitter.common.lang-0.3.9/src", urls = ["https://files.pythonhosted.org/packages/08/bc/d6409a813a9dccd4920a6262eb6e5889e90381453a5f58938ba4cf1d9420/twitter.common.lang-0.3.9.tar.gz"], + use_category = ["dataplane"], + cpe = "N/A", ), com_github_twitter_common_rpc = dict( sha256 = "0792b63fb2fb32d970c2e9a409d3d00633190a22eb185145fe3d9067fdaa4514", strip_prefix = "twitter.common.rpc-0.3.9/src", urls = ["https://files.pythonhosted.org/packages/be/97/f5f701b703d0f25fbf148992cd58d55b4d08d3db785aad209255ee67e2d0/twitter.common.rpc-0.3.9.tar.gz"], + use_category = ["dataplane"], + cpe = "N/A", ), com_github_twitter_common_finagle_thrift = dict( sha256 = "1e3a57d11f94f58745e6b83348ecd4fa74194618704f45444a15bc391fde497a", strip_prefix = "twitter.common.finagle-thrift-0.3.9/src", urls = ["https://files.pythonhosted.org/packages/f9/e7/4f80d582578f8489226370762d2cf6bc9381175d1929eba1754e03f70708/twitter.common.finagle-thrift-0.3.9.tar.gz"], + use_category = ["dataplane"], + cpe = "N/A", ), com_google_googletest = dict( sha256 = "9dc9157a9a1551ec7a7e43daea9a694a0bb5fb8bec81235d8a1e6ef64c716dcb", strip_prefix = "googletest-release-1.10.0", urls = ["https://github.com/google/googletest/archive/release-1.10.0.tar.gz"], + use_category = ["test"], ), com_google_protobuf = dict( sha256 = "d7cfd31620a352b2ee8c1ed883222a0d77e44346643458e062e86b1d069ace3e", strip_prefix = "protobuf-3.10.1", urls = ["https://github.com/protocolbuffers/protobuf/releases/download/v3.10.1/protobuf-all-3.10.1.tar.gz"], + use_category = ["dataplane", "controlplane"], + cpe = "N/A", ), grpc_httpjson_transcoding = dict( sha256 = "62c8cb5ea2cca1142cde9d4a0778c52c6022345c3268c60ef81666946b958ad5", strip_prefix = "grpc-httpjson-transcoding-faf8af1e9788cd4385b94c8f85edab5ea5d4b2d6", # 2020-03-02 urls = ["https://github.com/grpc-ecosystem/grpc-httpjson-transcoding/archive/faf8af1e9788cd4385b94c8f85edab5ea5d4b2d6.tar.gz"], + use_category = ["dataplane"], + cpe = "N/A", ), io_bazel_rules_go = dict( - sha256 = "e88471aea3a3a4f19ec1310a55ba94772d087e9ce46e41ae38ecebe17935de7b", - urls = ["https://github.com/bazelbuild/rules_go/releases/download/v0.20.3/rules_go-v0.20.3.tar.gz"], + sha256 = "0310e837aed522875791750de44408ec91046c630374990edd51827cb169f616", + urls = ["https://github.com/bazelbuild/rules_go/releases/download/v0.23.7/rules_go-v0.23.7.tar.gz"], + use_category = ["build"], + ), + rules_cc = dict( + sha256 = "9d48151ea71b3e225adfb6867e6d2c7d0dce46cbdc8710d9a9a628574dfd40a0", + strip_prefix = "rules_cc-818289e5613731ae410efb54218a4077fb9dbb03", + # 2020-05-13 + # TODO(lizan): pin to a point releases when there's a released version. + urls = ["https://github.com/bazelbuild/rules_cc/archive/818289e5613731ae410efb54218a4077fb9dbb03.tar.gz"], + use_category = ["build"], ), rules_foreign_cc = dict( - sha256 = "3184c244b32e65637a74213fc448964b687390eeeca42a36286f874c046bba15", - strip_prefix = "rules_foreign_cc-7bc4be735b0560289f6b86ab6136ee25d20b65b7", - # 2019-09-26 - urls = ["https://github.com/bazelbuild/rules_foreign_cc/archive/7bc4be735b0560289f6b86ab6136ee25d20b65b7.tar.gz"], + sha256 = "7ca49ac5b0bc8f5a2c9a7e87b7f86aca604bda197259c9b96f8b7f0a4f38b57b", + strip_prefix = "rules_foreign_cc-f54b7ae56dcf1b81bcafed3a08d58fc08ac095a7", + # 2020-06-09 + urls = ["https://github.com/bazelbuild/rules_foreign_cc/archive/f54b7ae56dcf1b81bcafed3a08d58fc08ac095a7.tar.gz"], + use_category = ["build"], ), rules_python = dict( - sha256 = "aa96a691d3a8177f3215b14b0edc9641787abaaa30363a080165d06ab65e1161", - urls = ["https://github.com/bazelbuild/rules_python/releases/download/0.0.1/rules_python-0.0.1.tar.gz"], + sha256 = "76a8fd4e7eca2a3590f816958faa0d83c9b2ce9c32634c5c375bcccf161d3bb5", + strip_prefix = "rules_python-a0fbf98d4e3a232144df4d0d80b577c7a693b570", + # 2020-04-09 + # TODO(htuch): revert back to a point releases when pip3_import appears. + urls = ["https://github.com/bazelbuild/rules_python/archive/a0fbf98d4e3a232144df4d0d80b577c7a693b570.tar.gz"], + use_category = ["build"], ), six = dict( sha256 = "d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73", urls = ["https://files.pythonhosted.org/packages/dd/bf/4138e7bfb757de47d1f4b6994648ec67a51efe58fa907c1e11e350cddfca/six-1.12.0.tar.gz"], + use_category = ["other"], ), io_opencensus_cpp = dict( - sha256 = "193ffb4e13bd7886757fd22b61b7f7a400634412ad8e7e1071e73f57bedd7fc6", - strip_prefix = "opencensus-cpp-04ed0211931f12b03c1a76b3907248ca4db7bc90", - # 2020-03-24 - urls = ["https://github.com/census-instrumentation/opencensus-cpp/archive/04ed0211931f12b03c1a76b3907248ca4db7bc90.tar.gz"], + sha256 = "12ff300fa804f97bd07e2ff071d969e09d5f3d7bbffeac438c725fa52a51a212", + strip_prefix = "opencensus-cpp-7877337633466358ed680f9b26967da5b310d7aa", + # 2020-06-01 + urls = ["https://github.com/census-instrumentation/opencensus-cpp/archive/7877337633466358ed680f9b26967da5b310d7aa.tar.gz"], + use_category = ["observability"], + cpe = "N/A", ), com_github_curl = dict( sha256 = "01ae0c123dee45b01bbaef94c0bc00ed2aec89cb2ee0fd598e0d302a6b5e0a98", strip_prefix = "curl-7.69.1", urls = ["https://github.com/curl/curl/releases/download/curl-7_69_1/curl-7.69.1.tar.gz"], + use_category = ["dataplane"], + cpe = "N/A", ), com_googlesource_chromium_v8 = dict( # This archive was created using https://storage.googleapis.com/envoyproxy-wee8/wee8-archive.sh # and contains complete checkout of V8 with all dependencies necessary to build wee8. - sha256 = "03ff00e41cf259db473dfade9548493e4a2372c0b701a66cd7ff76215bd55a64", - urls = ["https://storage.googleapis.com/envoyproxy-wee8/wee8-8.1.307.28.tar.gz"], + sha256 = "cc6f5357cd10922bfcf667bd882624ad313e21b009b919ce00f322f390012476", + urls = ["https://storage.googleapis.com/envoyproxy-wee8/wee8-8.3.110.9.tar.gz"], + use_category = ["dataplane"], + cpe = "N/A", ), com_googlesource_quiche = dict( - # Static snapshot of https://quiche.googlesource.com/quiche/+archive/41c9fdead26b31deefae3c325a2cf1a873688ba3.tar.gz - sha256 = "75af53154402e1654cfd32d8aaeed5fab4dbb79d3cab8c9866019d5369c1889e", - urls = ["https://storage.googleapis.com/quiche-envoy-integration/41c9fdead26b31deefae3c325a2cf1a873688ba3.tar.gz"], + # Static snapshot of https://quiche.googlesource.com/quiche/+archive/96bd860bec207d4b722ab7f319fa47be129a85cd.tar.gz + sha256 = "d7129a2f41f2bd00a8a38b33f9b7b955d3e7de3dec20f69b70d7000d3a856360", + urls = ["https://storage.googleapis.com/quiche-envoy-integration/96bd860bec207d4b722ab7f319fa47be129a85cd.tar.gz"], + use_category = ["dataplane"], + cpe = "N/A", ), com_googlesource_googleurl = dict( - # Static snapshot of https://quiche.googlesource.com/quiche/+archive/googleurl_dbf5ad147f60afc125e99db7549402af49a5eae8.tar.gz - sha256 = "b40cd22cadba577b7281a76db66f6a66dd744edbad8cc2c861c2c976ef721e4d", - urls = ["https://storage.googleapis.com/quiche-envoy-integration/googleurl_dbf5ad147f60afc125e99db7549402af49a5eae8.tar.gz"], + # Static snapshot of https://quiche.googlesource.com/quiche/+archive/ef0d23689e240e6c8de4c3a5296b209128c87373.tar.gz. + sha256 = "d769283fed1319bca68bae8bdd47fbc3a7933999329eee850eff1f1ea61ce176", + # 2020-08-05 + urls = ["https://storage.googleapis.com/quiche-envoy-integration/googleurl_ef0d23689e240e6c8de4c3a5296b209128c87373.tar.gz"], + use_category = ["dataplane"], + cpe = "N/A", ), com_google_cel_cpp = dict( - sha256 = "326ec397b55e39f48bd5380ccded1af5b04653ee96e769cd4d694f9a3bacef50", - strip_prefix = "cel-cpp-80e1cca533190d537a780ad007e8db64164c582e", - # 2020-02-26 - urls = ["https://github.com/google/cel-cpp/archive/80e1cca533190d537a780ad007e8db64164c582e.tar.gz"], + sha256 = "cad7d01139947d78e413d112cb8f7431fbb33cf66b0adf9c280824803fc2a72e", + strip_prefix = "cel-cpp-b9453a09b28a1531c4917e8792b3ea61f6b1a447", + # 2020-07-14 + urls = ["https://github.com/google/cel-cpp/archive/b9453a09b28a1531c4917e8792b3ea61f6b1a447.tar.gz"], + use_category = ["dataplane"], + cpe = "N/A", + ), + com_github_google_flatbuffers = dict( + sha256 = "b8efbc25721e76780752bad775a97c3f77a0250271e2db37fc747b20e8b0f24a", + strip_prefix = "flatbuffers-a83caf5910644ba1c421c002ef68e42f21c15f9f", + urls = ["https://github.com/google/flatbuffers/archive/a83caf5910644ba1c421c002ef68e42f21c15f9f.tar.gz"], + use_category = ["dataplane"], + cpe = "N/A", ), com_googlesource_code_re2 = dict( - sha256 = "04ee2aaebaa5038554683329afc494e684c30f82f2a1e47eb62450e59338f84d", - strip_prefix = "re2-2020-03-03", - urls = ["https://github.com/google/re2/archive/2020-03-03.tar.gz"], + sha256 = "2e9489a31ae007c81e90e8ec8a15d62d58a9c18d4fd1603f6441ef248556b41f", + strip_prefix = "re2-2020-07-06", + # 2020-07-06 + urls = ["https://github.com/google/re2/archive/2020-07-06.tar.gz"], + use_category = ["dataplane"], + cpe = "N/A", ), # Included to access FuzzedDataProvider.h. This is compiler agnostic but # provided as part of the compiler-rt source distribution. We can't use the # Clang variant as we are not a Clang-LLVM only shop today. org_llvm_releases_compiler_rt = dict( - sha256 = "56e4cd96dd1d8c346b07b4d6b255f976570c6f2389697347a6c3dcb9e820d10e", + sha256 = "6a7da64d3a0a7320577b68b9ca4933bdcab676e898b759850e827333c3282c75", # Only allow peeking at fuzzer related files for now. - strip_prefix = "compiler-rt-9.0.0.src/lib", - urls = ["http://releases.llvm.org/9.0.0/compiler-rt-9.0.0.src.tar.xz"], - ), - fuzzit_linux = dict( - sha256 = "9ca76ac1c22d9360936006efddf992977ebf8e4788ded8e5f9d511285c9ac774", - urls = ["https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.76/fuzzit_Linux_x86_64.zip"], + strip_prefix = "compiler-rt-10.0.0.src", + urls = ["https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.0/compiler-rt-10.0.0.src.tar.xz"], + use_category = ["test"], ), upb = dict( sha256 = "e9f281c56ab1eb1f97a80ca8a83bb7ef73d230eabb8591f83876f4e7b85d9b47", strip_prefix = "upb-8a3ae1ef3e3e3f26b45dec735c5776737fc7247f", # 2019-11-19 urls = ["https://github.com/protocolbuffers/upb/archive/8a3ae1ef3e3e3f26b45dec735c5776737fc7247f.tar.gz"], + use_category = ["dataplane", "controlplane"], + cpe = "N/A", ), kafka_source = dict( sha256 = "e7b748a62e432b5770db6dbb3b034c68c0ea212812cb51603ee7f3a8a35f06be", strip_prefix = "kafka-2.4.0/clients/src/main/resources/common/message", urls = ["https://github.com/apache/kafka/archive/2.4.0.zip"], + use_category = ["dataplane"], + cpe = "cpe:2.3:a:apache:kafka:*", ), kafka_server_binary = dict( sha256 = "b9582bab0c3e8d131953b1afa72d6885ca1caae0061c2623071e7f396f2ccfee", strip_prefix = "kafka_2.12-2.4.0", urls = ["http://us.mirrors.quenda.co/apache/kafka/2.4.0/kafka_2.12-2.4.0.tgz"], + use_category = ["test"], ), kafka_python_client = dict( sha256 = "454bf3aafef9348017192417b7f0828a347ec2eaf3efba59336f3a3b68f10094", strip_prefix = "kafka-python-2.0.0", urls = ["https://github.com/dpkp/kafka-python/archive/2.0.0.tar.gz"], + use_category = ["test"], + ), + org_unicode_icuuc = dict( + strip_prefix = "icu", + sha256 = "94a80cd6f251a53bd2a997f6f1b5ac6653fe791dfab66e1eb0227740fb86d5dc", + urls = ["https://github.com/unicode-org/icu/releases/download/release-67-1/icu4c-67_1-src.tgz"], + use_category = ["dataplane"], + cpe = "cpe:2.3:a:icu-project:international_components_for_unicode", + ), + proxy_wasm_cpp_sdk = dict( + sha256 = "7d9e1f2e299215ed3e5fa8c8149740872b1100cfe3230fc639f967d9dcfd812e", + strip_prefix = "proxy-wasm-cpp-sdk-5cec30b448975e1fd3f4117311f0957309df5cb0", + urls = ["https://github.com/proxy-wasm/proxy-wasm-cpp-sdk/archive/5cec30b448975e1fd3f4117311f0957309df5cb0.tar.gz"], + use_category = ["dataplane"], + cpe = "N/A", + ), + proxy_wasm_cpp_host = dict( + sha256 = "494d3f81156b92bac640c26000497fbf3a7b1bc35f9789594280450c6e5d8129", + strip_prefix = "proxy-wasm-cpp-host-928db4d79ec7b90aea3ad13ea5df36dc60c9c31d", + urls = ["https://github.com/proxy-wasm/proxy-wasm-cpp-host/archive/928db4d79ec7b90aea3ad13ea5df36dc60c9c31d.tar.gz"], + use_category = ["dataplane"], + cpe = "N/A", + ), + emscripten_toolchain = dict( + sha256 = "2bdbee6947e32ad1e03cd075b48fda493ab16157b2b0225b445222cd528e1843", + patch_cmds = [ + "./emsdk install 1.39.19-upstream", + "./emsdk activate --embedded 1.39.19-upstream", + ], + strip_prefix = "emsdk-dec8a63594753fe5f4ad3b47850bf64d66c14a4e", + urls = ["https://github.com/emscripten-core/emsdk/archive/dec8a63594753fe5f4ad3b47850bf64d66c14a4e.tar.gz"], + use_category = ["build"], + ), + rules_antlr = dict( + sha256 = "7249d1569293d9b239e23c65f6b4c81a07da921738bde0dfeb231ed98be40429", + strip_prefix = "rules_antlr-3cc2f9502a54ceb7b79b37383316b23c4da66f9a", + urls = ["https://github.com/marcohu/rules_antlr/archive/3cc2f9502a54ceb7b79b37383316b23c4da66f9a.tar.gz"], + use_category = ["build"], + ), + antlr4_runtimes = dict( + sha256 = "4d0714f441333a63e50031c9e8e4890c78f3d21e053d46416949803e122a6574", + strip_prefix = "antlr4-4.7.1", + urls = ["https://github.com/antlr/antlr4/archive/4.7.1.tar.gz"], + use_category = ["build"], ), ) diff --git a/bazel/rules_go.patch b/bazel/rules_go.patch new file mode 100644 index 0000000000000..39f99ebb189bf --- /dev/null +++ b/bazel/rules_go.patch @@ -0,0 +1,30 @@ +# +# Bazel RBE on Windows GCP workers currently will not invoke cmd.exe batch files correctly +# +# Symptom is program not found 'bazel-out', because of the way that the CreateProcess command +# is constructed by bazel with actions.run with forward slashes, e.g. the command +# cmd.exe /c "bazel-out/host/bin/external/go_sdk/builder.exe.bat" +# where cmd.exe on GCP is treating 'bazel-out' as the target, and /host as a command line switch. +# This problem was not observed on Azure CI pipelines or locally by the developers. The eventual +# fix is not specific to rules_go; this patch simply addresses immediate breakage and can be removed +# once the underlying issue within Bazel/RBE is fixed. +# See: +# - https://github.com/bazelbuild/rules_go/pull/2542 +# - https://github.com/envoyproxy/envoy/issues/11657 +# +diff --git a/go/private/rules/binary.bzl b/go/private/rules/binary.bzl +index b88dfd96..e68b5ece 100644 +--- a/go/private/rules/binary.bzl ++++ b/go/private/rules/binary.bzl +@@ -128,8 +128,9 @@ def _go_tool_binary_impl(ctx): + content = cmd, + ) + ctx.actions.run( +- executable = bat, +- inputs = sdk.libs + sdk.headers + sdk.tools + ctx.files.srcs + [sdk.go], ++ executable = "cmd.exe", ++ arguments = ["/S", "/C", bat.path.replace("/", "\\")], ++ inputs = sdk.libs + sdk.headers + sdk.tools + ctx.files.srcs + [sdk.go, bat], + outputs = [cout], + env = {"GOROOT": sdk.root_file.dirname}, # NOTE(#2005): avoid realpath in sandbox + mnemonic = "GoToolchainBinaryCompile", diff --git a/bazel/setup_clang.sh b/bazel/setup_clang.sh index 4fd8a2bf8a5d4..0ed987b9d4d0b 100755 --- a/bazel/setup_clang.sh +++ b/bazel/setup_clang.sh @@ -1,6 +1,6 @@ #!/bin/bash -BAZELRC_FILE="$(bazel info workspace)/clang.bazelrc" +BAZELRC_FILE="${BAZELRC_FILE:-$(bazel info workspace)/clang.bazelrc}" LLVM_PREFIX=$1 @@ -14,20 +14,19 @@ export PATH="$(${LLVM_PREFIX}/bin/llvm-config --bindir):${PATH}" RT_LIBRARY_PATH="$(dirname $(find $(llvm-config --libdir) -name libclang_rt.ubsan_standalone_cxx-x86_64.a | head -1))" echo "# Generated file, do not edit. If you want to disable clang, just delete this file. -build:clang --action_env=PATH=${PATH} +build:clang --action_env='PATH=${PATH}' build:clang --action_env=CC=clang build:clang --action_env=CXX=clang++ -build:clang --action_env=LLVM_CONFIG=${LLVM_PREFIX}/bin/llvm-config -build:clang --repo_env=LLVM_CONFIG=${LLVM_PREFIX}/bin/llvm-config -build:clang --linkopt=-L$(llvm-config --libdir) -build:clang --linkopt=-Wl,-rpath,$(llvm-config --libdir) +build:clang --action_env='LLVM_CONFIG=${LLVM_PREFIX}/bin/llvm-config' +build:clang --repo_env='LLVM_CONFIG=${LLVM_PREFIX}/bin/llvm-config' +build:clang --linkopt='-L$(llvm-config --libdir)' +build:clang --linkopt='-Wl,-rpath,$(llvm-config --libdir)' build:clang-asan --action_env=ENVOY_UBSAN_VPTR=1 build:clang-asan --copt=-fsanitize=vptr,function build:clang-asan --linkopt=-fsanitize=vptr,function -build:clang-asan --linkopt=-L${RT_LIBRARY_PATH} +build:clang-asan --linkopt='-L${RT_LIBRARY_PATH}' build:clang-asan --linkopt=-l:libclang_rt.ubsan_standalone-x86_64.a build:clang-asan --linkopt=-l:libclang_rt.ubsan_standalone_cxx-x86_64.a - " > ${BAZELRC_FILE} diff --git a/bazel/sh_test_wrapper.sh b/bazel/sh_test_wrapper.sh index 262a122864633..9e2f1138dea73 100755 --- a/bazel/sh_test_wrapper.sh +++ b/bazel/sh_test_wrapper.sh @@ -6,4 +6,6 @@ cd $(dirname "$0") -"$@" +if [ $# -gt 0 ]; then + "./$@" +fi diff --git a/bazel/test/BUILD b/bazel/test/BUILD new file mode 100644 index 0000000000000..0a40c2f107a0a --- /dev/null +++ b/bazel/test/BUILD @@ -0,0 +1,3 @@ +licenses(["notice"]) # Apache 2 + +exports_files(["verify_tap_test.sh"]) diff --git a/bazel/test/verify_tap_test.sh b/bazel/test/verify_tap_test.sh new file mode 100755 index 0000000000000..4a047e27e9072 --- /dev/null +++ b/bazel/test/verify_tap_test.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +set -ex + +# Clear existing tap directory if previous run wasn't in sandbox +rm -rf tap + +mkdir -p tap +TAP_TMP="$(realpath tap)" + +TAP_PATH="${TAP_TMP}/tap" "$@" + +# TODO(htuch): Check for pcap, now CI (with or without RBE) does have +# enough capabilities. +# Verify that some pb_text files have been created. +ls -l "${TAP_TMP}"/tap_*.pb_text > /dev/null diff --git a/bazel/test_for_benchmark_wrapper.sh b/bazel/test_for_benchmark_wrapper.sh index 7c1dc7a1def62..37de6d0d0d810 100755 --- a/bazel/test_for_benchmark_wrapper.sh +++ b/bazel/test_for_benchmark_wrapper.sh @@ -1,4 +1,6 @@ #!/bin/bash -# Set the benchmark time to 0 to just verify that the benchmark runs to completion. -"${TEST_SRCDIR}/envoy/$@" --benchmark_min_time=0 +# Set the benchmark time to 0 to just verify that the benchmark runs to +# completion. We're interacting with two different flag parsers, so the order +# of flags and the -- matters. +"${TEST_SRCDIR}/envoy/$@" --skip_expensive_benchmarks -- --benchmark_min_time=0 diff --git a/bazel/wasm/BUILD b/bazel/wasm/BUILD new file mode 100644 index 0000000000000..779d1695d3b7c --- /dev/null +++ b/bazel/wasm/BUILD @@ -0,0 +1 @@ +licenses(["notice"]) # Apache 2 diff --git a/bazel/wasm/wasm.bzl b/bazel/wasm/wasm.bzl new file mode 100644 index 0000000000000..65fefcb49e909 --- /dev/null +++ b/bazel/wasm/wasm.bzl @@ -0,0 +1,67 @@ +load("@rules_cc//cc:defs.bzl", "cc_binary") + +def _wasm_transition_impl(settings, attr): + return { + "//command_line_option:cpu": "wasm32", + "//command_line_option:crosstool_top": "@proxy_wasm_cpp_sdk//toolchain:emscripten", + + # Overriding copt/cxxopt/linkopt to prevent sanitizers/coverage options leak + # into WASM build configuration + "//command_line_option:copt": [], + "//command_line_option:cxxopt": [], + "//command_line_option:linkopt": [], + "//command_line_option:collect_code_coverage": "false", + } + +wasm_transition = transition( + implementation = _wasm_transition_impl, + inputs = [], + outputs = [ + "//command_line_option:cpu", + "//command_line_option:crosstool_top", + "//command_line_option:copt", + "//command_line_option:cxxopt", + "//command_line_option:linkopt", + "//command_line_option:collect_code_coverage", + ], +) + +def _wasm_binary_impl(ctx): + out = ctx.actions.declare_file(ctx.label.name) + ctx.actions.run_shell( + command = 'cp "{}" "{}"'.format(ctx.files.binary[0].path, out.path), + outputs = [out], + inputs = ctx.files.binary, + ) + + return [DefaultInfo(runfiles = ctx.runfiles([out]))] + +# WASM binary rule implementation. +# This copies the binary specified in binary attribute in WASM configuration to +# target configuration, so a binary in non-WASM configuration can depend on them. +wasm_binary = rule( + implementation = _wasm_binary_impl, + attrs = { + "binary": attr.label(mandatory = True, cfg = wasm_transition), + "_whitelist_function_transition": attr.label(default = "@bazel_tools//tools/whitelists/function_transition_whitelist"), + }, +) + +def wasm_cc_binary(name, **kwargs): + wasm_name = "_wasm_" + name + kwargs.setdefault("additional_linker_inputs", ["@proxy_wasm_cpp_sdk//:jslib"]) + kwargs.setdefault("linkopts", ["--js-library external/proxy_wasm_cpp_sdk/proxy_wasm_intrinsics.js"]) + kwargs.setdefault("visibility", ["//visibility:public"]) + cc_binary( + name = wasm_name, + # Adding manual tag it won't be built in non-WASM (e.g. x86_64 config) + # when an wildcard is specified, but it will be built in WASM configuration + # when the wasm_binary below is built. + tags = ["manual"], + **kwargs + ) + + wasm_binary( + name = name, + binary = ":" + wasm_name, + ) diff --git a/ci/Dockerfile-envoy b/ci/Dockerfile-envoy index 8e65dcaf55f5d..ee6709912db9a 100644 --- a/ci/Dockerfile-envoy +++ b/ci/Dockerfile-envoy @@ -1,5 +1,22 @@ -FROM ubuntu:16.04 +ARG BUILD_FROM=ubuntu:18.04 +# Build stage +FROM $BUILD_FROM as build + +RUN apt-get update \ + && apt-get upgrade -y \ + && apt-get install --no-install-recommends -y ca-certificates curl gcc libc-dev \ + && echo "d6c40440609a23483f12eb6295b5191e94baf08298a856bab6e15b10c3b82891 /tmp/su-exec.c" > /tmp/checksum \ + && curl -o /tmp/su-exec.c https://raw.githubusercontent.com/ncopa/su-exec/212b75144bbc06722fbd7661f651390dc47a43d1/su-exec.c \ + && sha256sum -c /tmp/checksum \ + && gcc -Wall /tmp/su-exec.c -o/usr/local/bin/su-exec \ + && chown root:root /usr/local/bin/su-exec \ + && chmod 0755 /usr/local/bin/su-exec + + +# Final stage +FROM $BUILD_FROM +ARG TARGETPLATFORM RUN apt-get update \ && apt-get upgrade -y \ && apt-get install -y ca-certificates \ @@ -8,9 +25,12 @@ RUN apt-get update \ && rm -rf /tmp/* /var/tmp/* \ && rm -rf /var/lib/apt/lists/* +COPY --from=build /usr/local/bin/su-exec /usr/local/bin/su-exec +RUN adduser --group --system envoy + RUN mkdir -p /etc/envoy -ADD build_release_stripped/envoy /usr/local/bin/envoy +ADD ${TARGETPLATFORM}/build_release_stripped/envoy /usr/local/bin/envoy ADD configs/google_com_proxy.v2.yaml /etc/envoy/envoy.yaml EXPOSE 10000 diff --git a/ci/Dockerfile-envoy-alpine b/ci/Dockerfile-envoy-alpine index 6d993080d3b25..b9bf2320af230 100644 --- a/ci/Dockerfile-envoy-alpine +++ b/ci/Dockerfile-envoy-alpine @@ -1,9 +1,11 @@ -FROM frolvlad/alpine-glibc - +FROM frolvlad/alpine-glibc:alpine-3.12_glibc-2.31 RUN mkdir -p /etc/envoy -ADD build_release_stripped/envoy /usr/local/bin/envoy +ADD linux/amd64/build_release_stripped/envoy /usr/local/bin/envoy + ADD configs/google_com_proxy.v2.yaml /etc/envoy/envoy.yaml +RUN apk add --no-cache shadow su-exec \ + && addgroup -S envoy && adduser --no-create-home -S envoy -G envoy EXPOSE 10000 diff --git a/ci/Dockerfile-envoy-alpine-debug b/ci/Dockerfile-envoy-alpine-debug index 56162717ae645..c58df8ccd211e 100644 --- a/ci/Dockerfile-envoy-alpine-debug +++ b/ci/Dockerfile-envoy-alpine-debug @@ -1,9 +1,10 @@ -FROM frolvlad/alpine-glibc - +FROM frolvlad/alpine-glibc:alpine-3.12_glibc-2.31 RUN mkdir -p /etc/envoy -ADD build_release/envoy /usr/local/bin/envoy +ADD linux/amd64/build_release/envoy /usr/local/bin/envoy ADD configs/google_com_proxy.v2.yaml /etc/envoy/envoy.yaml +RUN apk add --no-cache shadow su-exec \ + && addgroup -S envoy && adduser --no-create-home -S envoy -G envoy EXPOSE 10000 diff --git a/ci/Dockerfile-envoy-google-vrp b/ci/Dockerfile-envoy-google-vrp new file mode 100644 index 0000000000000..868b6a5840df9 --- /dev/null +++ b/ci/Dockerfile-envoy-google-vrp @@ -0,0 +1,22 @@ +FROM envoyproxy/envoy:local + +RUN apt-get update \ + && apt-get upgrade -y \ + && apt-get install -y libc++1 supervisor gdb strace tshark \ + && apt-get autoremove -y \ + && apt-get clean \ + && rm -rf /tmp/* /var/tmp/* \ + && rm -rf /var/lib/apt/lists/* + +ADD configs/google-vrp/envoy-edge.yaml /etc/envoy/envoy-edge.yaml +ADD configs/google-vrp/envoy-origin.yaml /etc/envoy/envoy-origin.yaml +ADD configs/google-vrp/launch_envoy.sh /usr/local/bin/launch_envoy.sh +ADD configs/google-vrp/supervisor.conf /etc/supervisor.conf +ADD test/config/integration/certs/serverkey.pem /etc/envoy/certs/serverkey.pem +ADD test/config/integration/certs/servercert.pem /etc/envoy/certs/servercert.pem +# ADD %local envoy bin% /usr/local/bin/envoy + +EXPOSE 10000 +EXPOSE 10001 + +CMD ["supervisord", "-c", "/etc/supervisor.conf"] diff --git a/ci/README.md b/ci/README.md index deecdb41cc1f4..b7d39ecab1943 100644 --- a/ci/README.md +++ b/ci/README.md @@ -1,14 +1,16 @@ # Developer use of CI Docker images -Two flavors of Envoy Docker images, based on Ubuntu and Alpine Linux, are built. +There are two available flavors of Envoy Docker images for Linux, based on Ubuntu and Alpine Linux +and an image based on Windows2019. ## Ubuntu Envoy image + The Ubuntu based Envoy Docker image at [`envoyproxy/envoy-build:`](https://hub.docker.com/r/envoyproxy/envoy-build/) is used for CircleCI checks, where `` is specified in [`envoy_build_sha.sh`](https://github.com/envoyproxy/envoy/blob/master/ci/envoy_build_sha.sh). Developers -may work with `envoyproxy/envoy-build:latest` to provide a self-contained environment for building Envoy binaries and -running tests that reflects the latest built Ubuntu Envoy image. Moreover, the Docker image -at [`envoyproxy/envoy:`](https://hub.docker.com/r/envoyproxy/envoy/) is an image that has an Envoy binary at `/usr/local/bin/envoy`. The `` -corresponds to the master commit at which the binary was compiled. Lastly, `envoyproxy/envoy-dev:latest` contains an Envoy +may work with the latest build image SHA in [envoy-build-tools](https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8) +repo to provide a self-contained environment for building Envoy binaries and running tests that reflects the latest built Ubuntu Envoy image. +Moreover, the Docker image at [`envoyproxy/envoy-dev:`](https://hub.docker.com/r/envoyproxy/envoy-dev/) is an image that has an Envoy binary at `/usr/local/bin/envoy`. +The `` corresponds to the master commit at which the binary was compiled. Lastly, `envoyproxy/envoy-dev:latest` contains an Envoy binary built from the latest tip of master that passed tests. ## Alpine Envoy image @@ -18,18 +20,27 @@ one with an Envoy binary with debug (`envoyproxy/envoy-alpine-debug`) symbols an Both images are pushed with two different tags: `` and `latest`. Parallel to the Ubuntu images above, `` corresponds to the master commit at which the binary was compiled, and `latest` corresponds to a binary built from the latest tip of master that passed tests. +## Windows 2019 Envoy image + +The Windows 2019 based Envoy Docker image at [`envoyproxy/envoy-build-windows2019:`](https://hub.docker.com/r/envoyproxy/envoy-build-windows2019/) +is used for CI checks, where `` is specified in [`envoy_build_sha.sh`](https://github.com/envoyproxy/envoy/blob/master/ci/envoy_build_sha.sh). +Developers may work with the most recent `envoyproxy/envoy-build-windows2019` image to provide a self-contained environment for building Envoy binaries and +running tests that reflects the latest built Windows 2019 Envoy image. + # Build image base and compiler versions -Currently there are three build images: +Currently there are three build images for Linux and one for Windows: * `envoyproxy/envoy-build` — alias to `envoyproxy/envoy-build-ubuntu`. -* `envoyproxy/envoy-build-ubuntu` — based on Ubuntu 16.04 (Xenial) with GCC 7 and Clang 9 compiler. -* `envoyproxy/envoy-build-centos` — based on CentOS 7 with GCC 7 and Clang 9 compiler, this image is experimental and not well tested. +* `envoyproxy/envoy-build-ubuntu` — based on Ubuntu 18.04 (Bionic) with GCC 9 and Clang 10 compiler. +* `envoyproxy/envoy-build-centos` — based on CentOS 7 with GCC 9 and Clang 10 compiler, this image is experimental and not well tested. +* `envoyproxy/envoy-build-windows2019` — based on Windows 2019 LTS with VS 2019 Build Tools. The source for these images is located in the [envoyproxy/envoy-build-tools](https://github.com/envoyproxy/envoy-build-tools) repository. -We use the Clang compiler for all CI runs with tests. We have an additional CI run with GCC which builds binary only. +We use the Clang compiler for all Linux CI runs with tests. We have an additional Linux CI run with GCC which builds binary only. +Currently, Windows CI builds the static Envoy binary only. # C++ standard library @@ -40,6 +51,8 @@ run `./ci/do_ci.sh` as described below. # Building and running tests as a developer +## On Linux + An example basic invocation to build a developer version of the Envoy static binary (using the Bazel `fastbuild` type) is: ```bash @@ -114,7 +127,7 @@ The `./ci/run_envoy_docker.sh './ci/do_ci.sh '` targets are: * `bazel.fuzz ` — build and run a specified fuzz test or test dir under `-c dbg --config=asan-fuzzer` with clang. If specifying a single fuzz test, must use the full target name with "_with_libfuzzer" for ``. * `bazel.compile_time_options` — build Envoy and run tests with various compile-time options toggled to their non-default state, to ensure they still build. * `bazel.compile_time_options ` — build Envoy and run a specified test or test dir with various compile-time options toggled to their non-default state, to ensure they still build. -* `bazel.clang_tidy` — build and run clang-tidy over all source files. +* `bazel.clang_tidy ` — build and run clang-tidy specified source files, if no files specified, runs against the diff with the last GitHub commit. * `check_format`— run `clang-format` and `buildifier` on entire source tree. * `fix_format`— run and enforce `clang-format` and `buildifier` on entire source tree. * `check_spelling`— run `misspell` on entire project. @@ -122,6 +135,24 @@ The `./ci/run_envoy_docker.sh './ci/do_ci.sh '` targets are: * `check_spelling_pedantic`— run `aspell` on C++ and proto comments. * `docs`— build documentation tree in `generated/docs`. +## On Windows + +An example basic invocation to build the Envoy static binary and run tests is: + +```bash +./ci/run_envoy_docker_windows.sh './ci/windows_ci_steps.sh' +``` + +You can modify `./ci/windows_ci_steps.sh` to modify `bazel` arguments, tests to run, etc. + +If you would like to run an interactive session to keep the build container running (to persist your local build environment), run: + +```bash +./ci/run_envoy_docker_windows.sh 'bash' +``` + +From an interactive session, you can invoke `bazel` manually or use the `./ci/windows_ci_steps.sh` script to build and run tests. + # Testing changes to the build image as a developer While all changes to the build image should eventually be upstreamed, it can be useful to @@ -145,8 +176,7 @@ The macOS CI build is part of the [CircleCI](https://circleci.com/gh/envoyproxy/ Dependencies are installed by the `ci/mac_ci_setup.sh` script, via [Homebrew](https://brew.sh), which is pre-installed on the CircleCI macOS image. The dependencies are cached are re-installed on every build. The `ci/mac_ci_steps.sh` script executes the specific commands that -build and test Envoy. If Envoy cannot be built (`error: /Library/Developer/CommandLineTools/usr/bin/libtool: no output file specified (specify with -o output)`), -ensure that Xcode is installed. +build and test Envoy. Note that the full version of Xcode (not just Command Line Tools) is required. # Coverity Scan Build Flow diff --git a/ci/WORKSPACE.filter.example b/ci/WORKSPACE.filter.example index db20a3146ac9a..0159ddfd78214 100644 --- a/ci/WORKSPACE.filter.example +++ b/ci/WORKSPACE.filter.example @@ -17,6 +17,10 @@ load("@envoy//bazel:repositories.bzl", "envoy_dependencies") envoy_dependencies() +load("@envoy//bazel:repositories_extra.bzl", "envoy_dependencies_extra") + +envoy_dependencies_extra() + load("@envoy//bazel:dependency_imports.bzl", "envoy_dependency_imports") envoy_dependency_imports() diff --git a/ci/build_setup.sh b/ci/build_setup.sh index 29e3098bf1483..93330224137d9 100755 --- a/ci/build_setup.sh +++ b/ci/build_setup.sh @@ -9,8 +9,10 @@ export PPROF_PATH=/thirdparty_build/bin/pprof [ -z "${NUM_CPUS}" ] && NUM_CPUS=`grep -c ^processor /proc/cpuinfo` [ -z "${ENVOY_SRCDIR}" ] && export ENVOY_SRCDIR=/source [ -z "${ENVOY_BUILD_TARGET}" ] && export ENVOY_BUILD_TARGET=//source/exe:envoy-static +[ -z "${ENVOY_BUILD_ARCH}" ] && export ENVOY_BUILD_ARCH=$(uname -m) echo "ENVOY_SRCDIR=${ENVOY_SRCDIR}" echo "ENVOY_BUILD_TARGET=${ENVOY_BUILD_TARGET}" +echo "ENVOY_BUILD_ARCH=${ENVOY_BUILD_ARCH}" function setup_gcc_toolchain() { if [[ ! -z "${ENVOY_STDLIB}" && "${ENVOY_STDLIB}" != "libstdc++" ]]; then @@ -45,14 +47,6 @@ function setup_clang_toolchain() { echo "clang toolchain with ${ENVOY_STDLIB} configured" } -# Create a fake home. Python site libs tries to do getpwuid(3) if we don't and the CI -# Docker image gets confused as it has no passwd entry when running non-root -# unless we do this. -FAKE_HOME=/tmp/fake_home -mkdir -p "${FAKE_HOME}" -export HOME="${FAKE_HOME}" -export PYTHONUSERBASE="${FAKE_HOME}" - export BUILD_DIR=${BUILD_DIR:-/build} if [[ ! -d "${BUILD_DIR}" ]] then @@ -61,10 +55,8 @@ then fi # Environment setup. -export USER=bazel export TEST_TMPDIR=${BUILD_DIR}/tmp -export BAZEL="bazel" -export PATH=/opt/llvm/bin:$PATH +export PATH=/opt/llvm/bin:${PATH} export CLANG_FORMAT="${CLANG_FORMAT:-clang-format}" if [[ -f "/etc/redhat-release" ]]; then @@ -83,16 +75,19 @@ trap cleanup EXIT export LLVM_ROOT="${LLVM_ROOT:-/opt/llvm}" "$(dirname "$0")"/../bazel/setup_clang.sh "${LLVM_ROOT}" -[[ "${BUILD_REASON}" != "PullRequest" ]] && BAZEL_EXTRA_TEST_OPTIONS+=" --nocache_test_results --test_output=all" +[[ "${BUILD_REASON}" != "PullRequest" ]] && BAZEL_EXTRA_TEST_OPTIONS+=" --nocache_test_results" export BAZEL_QUERY_OPTIONS="${BAZEL_OPTIONS}" -export BAZEL_BUILD_OPTIONS="--verbose_failures ${BAZEL_OPTIONS} --action_env=HOME --action_env=PYTHONUSERBASE \ - --local_cpu_resources=${NUM_CPUS} --show_task_finish --experimental_generate_json_trace_profile \ - --test_env=HOME --test_env=PYTHONUSERBASE --test_output=errors \ - --repository_cache=${BUILD_DIR}/repository_cache --experimental_repository_cache_hardlinks \ +# Use https://docs.bazel.build/versions/master/command-line-reference.html#flag--experimental_repository_cache_hardlinks +# to save disk space. +export BAZEL_BUILD_OPTIONS=" ${BAZEL_OPTIONS} --verbose_failures --show_task_finish --experimental_generate_json_trace_profile \ + --build_event_json_file=${BUILD_DIR}/build_event.json \ + --test_output=errors --repository_cache=${BUILD_DIR}/repository_cache --experimental_repository_cache_hardlinks \ ${BAZEL_BUILD_EXTRA_OPTIONS} ${BAZEL_EXTRA_TEST_OPTIONS}" -[[ "${BAZEL_EXPUNGE}" == "1" ]] && "${BAZEL}" clean --expunge +[[ "${ENVOY_BUILD_ARCH}" == "aarch64" ]] && BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --test_env=HEAPCHECK=" + +[[ "${BAZEL_EXPUNGE}" == "1" ]] && bazel clean --expunge # Also setup some space for building Envoy standalone. export ENVOY_BUILD_DIR="${BUILD_DIR}"/envoy @@ -103,8 +98,10 @@ export ENVOY_DELIVERY_DIR="${ENVOY_BUILD_DIR}"/source/exe mkdir -p "${ENVOY_DELIVERY_DIR}" # This is where we copy the coverage report to. -export ENVOY_COVERAGE_DIR="${ENVOY_BUILD_DIR}"/generated/coverage -mkdir -p "${ENVOY_COVERAGE_DIR}" +export ENVOY_COVERAGE_ARTIFACT="${ENVOY_BUILD_DIR}"/generated/coverage.tar.gz + +# This is where we copy the fuzz coverage report to. +export ENVOY_FUZZ_COVERAGE_ARTIFACT="${ENVOY_BUILD_DIR}"/generated/fuzz_coverage.tar.gz # This is where we dump failed test logs for CI collection. export ENVOY_FAILED_TEST_LOGS="${ENVOY_BUILD_DIR}"/generated/failed-testlogs diff --git a/ci/coverage_publish.sh b/ci/coverage_publish.sh deleted file mode 100755 index c04eafff0323a..0000000000000 --- a/ci/coverage_publish.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -# Do not ever set -x here, it is a security hazard as it will place the credentials below in the -# CircleCI logs. -set -e - -if [ "${CIRCLECI}" != "true" ]; then - exit 0 -fi - -[[ -z "${ENVOY_BUILD_DIR}" ]] && ENVOY_BUILD_DIR=/build -COVERAGE_FILE="${ENVOY_BUILD_DIR}/envoy/generated/coverage/index.html" - -if [ ! -f "${COVERAGE_FILE}" ]; then - echo "ERROR: Coverage file not found." - exit 1 -fi - -# available for master builds -if [ -z "$CIRCLE_PR_NUMBER" ] -then - echo "Uploading coverage report..." - - BRANCH_NAME="${CIRCLE_BRANCH}" - COVERAGE_DIR="$(dirname "${COVERAGE_FILE}")" - GCS_LOCATION="envoy-coverage/report-${BRANCH_NAME}" - - echo ${GCP_SERVICE_ACCOUNT_KEY} | base64 --decode | gcloud auth activate-service-account --key-file=- - gsutil -m rsync -dr ${COVERAGE_DIR} gs://${GCS_LOCATION} - echo "Coverage report for branch '${BRANCH_NAME}': https://storage.googleapis.com/${GCS_LOCATION}/index.html" -else - echo "Coverage report will not be uploaded for this build." -fi diff --git a/ci/do_ci.sh b/ci/do_ci.sh index cc321a6dffca6..40fa5312b805c 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -16,10 +16,23 @@ SRCDIR="${PWD}" . "$(dirname "$0")"/build_setup.sh $build_setup_args cd "${SRCDIR}" +if [[ "${ENVOY_BUILD_ARCH}" == "x86_64" ]]; then + BUILD_ARCH_DIR="/linux/amd64" +elif [[ "${ENVOY_BUILD_ARCH}" == "aarch64" ]]; then + BUILD_ARCH_DIR="/linux/arm64" +else + # Fall back to use the ENVOY_BUILD_ARCH itself. + BUILD_ARCH_DIR="/linux/${ENVOY_BUILD_ARCH}" +fi + echo "building using ${NUM_CPUS} CPUs" +echo "building for ${ENVOY_BUILD_ARCH}" function collect_build_profile() { - cp -f "$(bazel info output_base)/command.profile" "${ENVOY_BUILD_PROFILE}/$1.profile" || true + declare -g build_profile_count=${build_profile_count:-1} + mv -f "$(bazel info output_base)/command.profile.gz" "${ENVOY_BUILD_PROFILE}/${build_profile_count}-$1.profile.gz" || true + mv -f ${BUILD_DIR}/build_event.json "${ENVOY_BUILD_PROFILE}/${build_profile_count}-$1.build_event.json" || true + ((build_profile_count++)) } function bazel_with_collection() { @@ -42,7 +55,6 @@ function bazel_with_collection() { function cp_binary_for_outside_access() { DELIVERY_LOCATION="$1" - ENVOY_BIN=$(echo "${ENVOY_BUILD_TARGET}" | sed -e 's#^@\([^/]*\)/#external/\1#;s#^//##;s#:#/#') cp -f \ bazel-bin/"${ENVOY_BIN}" \ "${ENVOY_DELIVERY_DIR}"/"${DELIVERY_LOCATION}" @@ -50,18 +62,19 @@ function cp_binary_for_outside_access() { function cp_binary_for_image_build() { # TODO(mattklein123): Replace this with caching and a different job which creates images. + local BASE_TARGET_DIR="${ENVOY_SRCDIR}${BUILD_ARCH_DIR}" echo "Copying binary for image build..." - mkdir -p "${ENVOY_SRCDIR}"/build_"$1" - cp -f "${ENVOY_DELIVERY_DIR}"/envoy "${ENVOY_SRCDIR}"/build_"$1" - mkdir -p "${ENVOY_SRCDIR}"/build_"$1"_stripped - strip "${ENVOY_DELIVERY_DIR}"/envoy -o "${ENVOY_SRCDIR}"/build_"$1"_stripped/envoy + mkdir -p "${BASE_TARGET_DIR}"/build_"$1" + cp -f "${ENVOY_DELIVERY_DIR}"/envoy "${BASE_TARGET_DIR}"/build_"$1" + mkdir -p "${BASE_TARGET_DIR}"/build_"$1"_stripped + strip "${ENVOY_DELIVERY_DIR}"/envoy -o "${BASE_TARGET_DIR}"/build_"$1"_stripped/envoy # Copy for azp which doesn't preserve permissions, creating a tar archive - tar czf "${ENVOY_BUILD_DIR}"/envoy_binary.tar.gz -C "${ENVOY_SRCDIR}" build_"$1" build_"$1"_stripped + tar czf "${ENVOY_BUILD_DIR}"/envoy_binary.tar.gz -C "${BASE_TARGET_DIR}" build_"$1" build_"$1"_stripped # Remove binaries to save space, only if BUILD_REASON exists (running in AZP) [[ -z "${BUILD_REASON}" ]] || \ - rm -rf "${ENVOY_SRCDIR}"/build_"$1" "${ENVOY_SRCDIR}"/build_"$1"_stripped "${ENVOY_DELIVERY_DIR}"/envoy \ + rm -rf "${BASE_TARGET_DIR}"/build_"$1" "${BASE_TARGET_DIR}"/build_"$1"_stripped "${ENVOY_DELIVERY_DIR}"/envoy \ bazel-bin/"${ENVOY_BIN}" } @@ -81,6 +94,11 @@ function bazel_binary_build() { fi echo "Building..." + ENVOY_BIN=$(echo "${ENVOY_BUILD_TARGET}" | sed -e 's#^@\([^/]*\)/#external/\1#;s#^//##;s#:#/#') + + # This is a workaround for https://github.com/bazelbuild/bazel/issues/11834 + [[ ! -z "${ENVOY_RBE}" ]] && rm -rf bazel-bin/"${ENVOY_BIN}"* + bazel build ${BAZEL_BUILD_OPTIONS} -c "${COMPILE_TYPE}" "${ENVOY_BUILD_TARGET}" ${CONFIG_ARGS} collect_build_profile "${BINARY_TYPE}"_build @@ -92,12 +110,15 @@ function bazel_binary_build() { } CI_TARGET=$1 +shift -if [[ $# -gt 1 ]]; then - shift - TEST_TARGETS=$* +if [[ $# -ge 1 ]]; then + COVERAGE_TEST_TARGETS=$* + TEST_TARGETS="$COVERAGE_TEST_TARGETS" else - TEST_TARGETS=//test/... + # Coverage test will add QUICHE tests by itself. + COVERAGE_TEST_TARGETS=//test/... + TEST_TARGETS="${COVERAGE_TEST_TARGETS} @com_googlesource_quiche//:ci_tests" fi if [[ "$CI_TARGET" == "bazel.release" ]]; then @@ -107,14 +128,14 @@ if [[ "$CI_TARGET" == "bazel.release" ]]; then # toolchain is kept consistent. This ifdef is checked in # test/common/stats/stat_test_utility.cc when computing # Stats::TestUtil::MemoryTest::mode(). - BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --test_env=ENVOY_MEMORY_TEST_EXACT=true" + [[ "${ENVOY_BUILD_ARCH}" == "x86_64" ]] && BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --test_env=ENVOY_MEMORY_TEST_EXACT=true" setup_clang_toolchain - echo "bazel release build with tests..." - bazel_binary_build release - echo "Testing ${TEST_TARGETS}" bazel_with_collection test ${BAZEL_BUILD_OPTIONS} -c opt ${TEST_TARGETS} + + echo "bazel release build with tests..." + bazel_binary_build release exit 0 elif [[ "$CI_TARGET" == "bazel.release.server_only" ]]; then setup_clang_toolchain @@ -128,22 +149,29 @@ elif [[ "$CI_TARGET" == "bazel.sizeopt.server_only" ]]; then exit 0 elif [[ "$CI_TARGET" == "bazel.sizeopt" ]]; then setup_clang_toolchain + echo "Testing ${TEST_TARGETS}" + bazel_with_collection test ${BAZEL_BUILD_OPTIONS} --config=sizeopt ${TEST_TARGETS} + echo "bazel size optimized build with tests..." bazel_binary_build sizeopt - echo "Testing ${TEST_TARGETS}" - bazel test ${BAZEL_BUILD_OPTIONS} --config=sizeopt ${TEST_TARGETS} exit 0 elif [[ "$CI_TARGET" == "bazel.gcc" ]]; then + BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --test_env=HEAPCHECK=" setup_gcc_toolchain - echo "bazel fastbuild build..." - bazel_binary_build fastbuild + + echo "Testing ${TEST_TARGETS}" + bazel_with_collection test ${BAZEL_BUILD_OPTIONS} -c opt ${TEST_TARGETS} + + echo "bazel release build with gcc..." + bazel_binary_build release exit 0 elif [[ "$CI_TARGET" == "bazel.debug" ]]; then setup_clang_toolchain - echo "bazel debug build with tests..." - bazel_binary_build debug echo "Testing ${TEST_TARGETS}" bazel test ${BAZEL_BUILD_OPTIONS} -c dbg ${TEST_TARGETS} + + echo "bazel debug build with tests..." + bazel_binary_build debug exit 0 elif [[ "$CI_TARGET" == "bazel.debug.server_only" ]]; then setup_clang_toolchain @@ -166,23 +194,15 @@ elif [[ "$CI_TARGET" == "bazel.asan" ]]; then # works. This requires that we set TAP_PATH. We do this under bazel.asan to # ensure a debug build in CI. echo "Validating integration test traffic tapping..." - TAP_TMP=/tmp/tap/ - rm -rf "${TAP_TMP}" - mkdir -p "${TAP_TMP}" bazel_with_collection test ${BAZEL_BUILD_OPTIONS} \ - --strategy=TestRunner=local --test_env=TAP_PATH="${TAP_TMP}/tap" \ - --test_env=PATH="/usr/sbin:${PATH}" \ + --run_under=@envoy//bazel/test:verify_tap_test.sh \ //test/extensions/transport_sockets/tls/integration:ssl_integration_test - # Verify that some pb_text files have been created. We can't check for pcap, - # since tcpdump is not available in general due to CircleCI lack of support - # for privileged Docker executors. - ls -l "${TAP_TMP}"/tap_*.pb_text > /dev/null exit 0 elif [[ "$CI_TARGET" == "bazel.tsan" ]]; then setup_clang_toolchain echo "bazel TSAN debug build with tests" echo "Building and testing envoy tests ${TEST_TARGETS}" - bazel_with_collection test ${BAZEL_BUILD_OPTIONS} -c dbg --config=clang-tsan --build_tests_only ${TEST_TARGETS} + bazel_with_collection test --config=rbe-toolchain-tsan ${BAZEL_BUILD_OPTIONS} -c dbg --build_tests_only ${TEST_TARGETS} if [ "${ENVOY_BUILD_FILTER_EXAMPLE}" == "1" ]; then echo "Building and testing envoy-filter-example tests..." pushd "${ENVOY_FILTER_EXAMPLE_SRCDIR}" @@ -206,7 +226,7 @@ elif [[ "$CI_TARGET" == "bazel.dev" ]]; then bazel_binary_build fastbuild echo "Building and testing ${TEST_TARGETS}" - bazel test ${BAZEL_BUILD_OPTIONS} -c fastbuild ${TEST_TARGETS} + bazel_with_collection test ${BAZEL_BUILD_OPTIONS} -c fastbuild ${TEST_TARGETS} exit 0 elif [[ "$CI_TARGET" == "bazel.compile_time_options" ]]; then # Right now, none of the available compile-time options conflict with each other. If this @@ -221,6 +241,7 @@ elif [[ "$CI_TARGET" == "bazel.compile_time_options" ]]; then --define quiche=enabled \ --define path_normalization_by_default=true \ --define deprecated_features=disabled \ + --define use_new_codecs_in_integration_tests=true \ " ENVOY_STDLIB="${ENVOY_STDLIB:-libstdc++}" setup_clang_toolchain @@ -232,14 +253,20 @@ elif [[ "$CI_TARGET" == "bazel.compile_time_options" ]]; then TEST_TARGETS="@envoy//test/..." fi # Building all the dependencies from scratch to link them against libc++. - echo "Building..." - bazel build ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c dbg @envoy//source/exe:envoy-static --build_tag_filters=-nofips echo "Building and testing ${TEST_TARGETS}" - bazel test ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c dbg ${TEST_TARGETS} --test_tag_filters=-nofips --build_tests_only + bazel_with_collection test ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c dbg ${TEST_TARGETS} --test_tag_filters=-nofips --build_tests_only + + # Legacy codecs "--define legacy_codecs_in_integration_tests=true" should also be tested in + # integration tests with asan. + bazel_with_collection test ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c dbg @envoy//test/integration/... --config=clang-asan --build_tests_only # "--define log_debug_assert_in_release=enabled" must be tested with a release build, so run only # these tests under "-c opt" to save time in CI. - bazel test ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c opt @envoy//test/common/common:assert_test @envoy//test/server:server_test + bazel_with_collection test ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c opt @envoy//test/common/common:assert_test @envoy//test/server:server_test + + echo "Building binary..." + bazel build ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c dbg @envoy//source/exe:envoy-static --build_tag_filters=-nofips + collect_build_profile build exit 0 elif [[ "$CI_TARGET" == "bazel.api" ]]; then @@ -257,21 +284,20 @@ elif [[ "$CI_TARGET" == "bazel.api" ]]; then # We use custom BAZEL_BUILD_OPTIONS here; the API booster isn't capable of working with libc++ yet. LLVM_CONFIG="${LLVM_ROOT}"/bin/llvm-config BAZEL_BUILD_OPTIONS="--config=clang" python3.8 ./tools/api_boost/api_boost_test.py exit 0 -elif [[ "$CI_TARGET" == "bazel.coverage" ]]; then +elif [[ "$CI_TARGET" == "bazel.coverage" || "$CI_TARGET" == "bazel.fuzz_coverage" ]]; then setup_clang_toolchain - echo "bazel coverage build with tests ${TEST_TARGETS}" + echo "${CI_TARGET} build with tests ${COVERAGE_TEST_TARGETS}" - # Reduce the amount of memory Bazel tries to use to prevent it from launching too many subprocesses. - # This should prevent the system from running out of memory and killing tasks. See discussion on - # https://github.com/envoyproxy/envoy/pull/5611. - [ -z "$CIRCLECI" ] || export BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --local_ram_resources=12288" + [[ "$CI_TARGET" == "bazel.fuzz_coverage" ]] && export FUZZ_COVERAGE=true - test/run_envoy_bazel_coverage.sh ${TEST_TARGETS} + test/run_envoy_bazel_coverage.sh ${COVERAGE_TEST_TARGETS} collect_build_profile coverage exit 0 elif [[ "$CI_TARGET" == "bazel.clang_tidy" ]]; then + # clang-tidy will warn on standard library issues with libc++ + ENVOY_STDLIB="libstdc++" setup_clang_toolchain - NUM_CPUS=$NUM_CPUS ci/run_clang_tidy.sh + NUM_CPUS=$NUM_CPUS ci/run_clang_tidy.sh "$@" exit 0 elif [[ "$CI_TARGET" == "bazel.coverity" ]]; then # Coverity Scan version 2017.07 fails to analyze the entirely of the Envoy @@ -297,14 +323,6 @@ elif [[ "$CI_TARGET" == "bazel.fuzz" ]]; then echo "Building envoy fuzzers and executing 100 fuzz iterations..." bazel_with_collection test ${BAZEL_BUILD_OPTIONS} --config=asan-fuzzer ${FUZZ_TEST_TARGETS} --test_arg="-runs=10" exit 0 -elif [[ "$CI_TARGET" == "bazel.fuzzit" ]]; then - setup_clang_toolchain - FUZZ_TEST_TARGETS="$(bazel query "attr('tags','fuzzer',${TEST_TARGETS})")" - echo "bazel ASAN libFuzzer build with fuzz tests ${FUZZ_TEST_TARGETS}" - echo "Building fuzzers and run under Fuzzit" - bazel_with_collection test ${BAZEL_BUILD_OPTIONS} --config=asan-fuzzer ${FUZZ_TEST_TARGETS} \ - --test_env=FUZZIT_API_KEY --test_env=ENVOY_BUILD_IMAGE --test_timeout=1200 --run_under=//bazel:fuzzit_wrapper - exit 0 elif [[ "$CI_TARGET" == "fix_format" ]]; then # proto_format.sh needs to build protobuf. setup_clang_toolchain diff --git a/ci/do_circle_ci.sh b/ci/do_circle_ci.sh index 036a75b1b8cb1..29469a24b814a 100755 --- a/ci/do_circle_ci.sh +++ b/ci/do_circle_ci.sh @@ -11,6 +11,16 @@ if [[ -e "~/.gitconfig" ]]; then mv ~/.gitconfig ~/.gitconfig_save fi +# Workaround for not using ci/run_envoy_docker.sh +# Create a fake home. Python site libs tries to do getpwuid(3) if we don't and the CI +# Docker image gets confused as it has no passwd entry when running non-root +# unless we do this. +FAKE_HOME=/tmp/fake_home +mkdir -p "${FAKE_HOME}" +export HOME="${FAKE_HOME}" +export PYTHONUSERBASE="${FAKE_HOME}" +export USER=bazel + export ENVOY_SRCDIR="$(pwd)" # xlarge resource_class. @@ -20,7 +30,8 @@ export NUM_CPUS=6 # CircleCI doesn't support IPv6 by default, so we run all tests with IPv4 only. # IPv6 tests are run with Azure Pipelines. -export BAZEL_EXTRA_TEST_OPTIONS="--test_env=ENVOY_IP_TEST_VERSIONS=v4only" +export BAZEL_BUILD_EXTRA_OPTIONS+="--test_env=ENVOY_IP_TEST_VERSIONS=v4only --local_cpu_resources=${NUM_CPUS} \ + --action_env=HOME --action_env=PYTHONUSERBASE --test_env=HOME --test_env=PYTHONUSERBASE" function finish { echo "disk space at end of build:" diff --git a/ci/docker-entrypoint.sh b/ci/docker-entrypoint.sh index b731653319ad4..677e617e9fce3 100755 --- a/ci/docker-entrypoint.sh +++ b/ci/docker-entrypoint.sh @@ -13,4 +13,16 @@ if [ "$1" = 'envoy' ]; then fi fi -exec "$@" +if [ "$ENVOY_UID" != "0" ]; then + if [ -n "$ENVOY_UID" ]; then + usermod -u "$ENVOY_UID" envoy + fi + if [ -n "$ENVOY_GID" ]; then + groupmod -g "$ENVOY_GID" envoy + fi + # Ensure the envoy user is able to write to container logs + chown envoy:envoy /dev/stdout /dev/stderr + su-exec envoy "${@}" +else + exec "${@}" +fi diff --git a/ci/docker_ci.sh b/ci/docker_ci.sh index d4594df2ffca5..d91af54cda36f 100755 --- a/ci/docker_ci.sh +++ b/ci/docker_ci.sh @@ -4,13 +4,59 @@ # CI logs. set -e +# Setting environments for buildx tools +config_env(){ + # Qemu configurations + docker run --rm --privileged multiarch/qemu-user-static --reset -p yes + + # Remove older build instance + docker buildx rm multi-builder | true + docker buildx create --use --name multi-builder --platform linux/arm64,linux/amd64 +} + +build_images(){ + TYPE=$1 + BUILD_TAG=$2 + + # Only build/push envoyproxy/envoy multi-arch images since others still do not support. + if [ -z "${TYPE}" ]; then + docker buildx build --platform linux/arm64 -f ci/Dockerfile-envoy"${TYPE}" -t ${BUILD_TAG} . + # Export envoyproxy/envoy amd64 image which will be used for building envoyproxy/envoy-google-vrp + docker buildx build --platform linux/amd64 -f ci/Dockerfile-envoy"${TYPE}" -o type=docker -t ${BUILD_TAG} . + elif [ "${TYPE}" == "-google-vrp" ]; then + # The envoyproxy/envoy-google-vrp is based on envoyproxy/envoy image. So it is built from cache envoyproxy/envoy:local + docker build -f ci/Dockerfile-envoy"${TYPE}" --cache-from "${DOCKER_IMAGE_PREFIX}:local" -t ${BUILD_TAG} . + else + docker build -f ci/Dockerfile-envoy"${TYPE}" -t ${BUILD_TAG} . + fi +} + +push_images(){ + TYPE=$1 + BUILD_TAG=$2 + + if [ -z "${TYPE}" ]; then + # Only push envoyproxy/envoy multi-arch images since others still do not support. + docker buildx build --platform linux/arm64,linux/amd64 --push -f ci/Dockerfile-envoy"${TYPE}" -t ${BUILD_TAG} . + else + docker tag "${DOCKER_IMAGE_PREFIX}${TYPE}:local" ${BUILD_TAG} + docker push ${BUILD_TAG} + fi +} + # This prefix is altered for the private security images on setec builds. DOCKER_IMAGE_PREFIX="${DOCKER_IMAGE_PREFIX:-envoyproxy/envoy}" +# "-google-vrp" must come afer "" to ensure we rebuild the local base image dependency. +BUILD_TYPES=("" "-alpine" "-alpine-debug" "-google-vrp") + +# Configure docker-buildx tools +config_env + # Test the docker build in all cases, but use a local tag that we will overwrite before push in the # cases where we do push. -for BUILD_TYPE in "" "-alpine" "-alpine-debug"; do - docker build -f ci/Dockerfile-envoy"${BUILD_TYPE}" -t "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}:local" . +for BUILD_TYPE in "${BUILD_TYPES[@]}"; do + build_images "${BUILD_TYPE}" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}:local" done MASTER_BRANCH="refs/heads/master" @@ -38,15 +84,17 @@ fi docker login -u "$DOCKERHUB_USERNAME" -p "$DOCKERHUB_PASSWORD" -for BUILD_TYPE in "" "-alpine" "-alpine-debug"; do - docker tag "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}:local" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${IMAGE_NAME}" - docker push "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${IMAGE_NAME}" +for BUILD_TYPE in "${BUILD_TYPES[@]}"; do + push_images "${BUILD_TYPE}" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${IMAGE_NAME}" # Only push latest on master builds. if [[ "${AZP_BRANCH}" == "${MASTER_BRANCH}" ]]; then - docker tag "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}:local" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:latest" - docker push "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:latest" + push_images "${BUILD_TYPE}" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:latest" fi -done - + # Push vX.Y-latest to tag the latest image in a release line + if [[ "${AZP_BRANCH}" =~ ${RELEASE_TAG_REGEX} ]]; then + RELEASE_LINE=$(echo "$IMAGE_NAME" | sed -E 's/(v[0-9]+\.[0-9]+)\.[0-9]+/\1-latest/') + push_images "${BUILD_TYPE}" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${RELEASE_LINE}" + fi +done diff --git a/ci/docker_rebuild_google-vrp.sh b/ci/docker_rebuild_google-vrp.sh new file mode 100755 index 0000000000000..7a6656378d946 --- /dev/null +++ b/ci/docker_rebuild_google-vrp.sh @@ -0,0 +1,53 @@ +#!/bin/bash + +# Script to rebuild Dockerfile-envoy-google-vrp locally (i.e. not in CI) for development purposes. +# This makes use of the latest envoy-dev base image on Docker Hub as the base and takes an +# optional local path for an Envoy binary. When a custom local Envoy binary is used, the script +# switches to using ${BASE_DOCKER_IMAGE} for the build, which should be configured to provide +# compatibility with your local build environment (specifically glibc). +# +# Usage: +# +# Basic rebuild of Docker image (tagged envoy-google-vrp:local): +# +# ./ci/docker_rebuild_google-vrp.sh +# +# Basic rebuild of Docker image (tagged envoy-google-vrp:local) with some local Envoy binary: +# +# bazel build //source/exe:envoy-static --config=libc++ -copt +# ./ci/docker_rebuild_google-vrp.sh bazel-bin/source/exe/envoy-static + +set -e + +# This should match your local machine if you are building custom Envoy binaries outside of Docker. +BASE_DOCKER_IMAGE="ubuntu:20.04" + +declare -r BUILD_DIR="$(mktemp -d)" +cp ci/Dockerfile-envoy-google-vrp "${BUILD_DIR}" +declare -r DOCKER_BUILD_FILE="${BUILD_DIR}"/Dockerfile-envoy-google-vrp + +# If we have a local Envoy binary, use a variant of the build environment that supports it. +if [[ -n "$1" ]] +then + # Switch to a base image similar to the local build environment. This provides compatibility of + # locally built Envoy and glibc in the Docker env. + sed -i -e "s#envoyproxy/envoy:local#${BASE_DOCKER_IMAGE}#" "${DOCKER_BUILD_FILE}" + # Copy the binary to deal with symlinks in Bazel cache and Docker daemon confusion. + declare -r LOCAL_ENVOY="envoy-binary" + cp -f "$1" "${PWD}/${LOCAL_ENVOY}" + sed -i -e "s@# ADD %local envoy bin%@ADD ${LOCAL_ENVOY}@" "${DOCKER_BUILD_FILE}" +else + # Don't use the local envoy-dev, but pull from Docker Hub instead, this avoids having to rebuild + # this local dep which is fairly stable. + sed -i -e "s#envoyproxy/envoy:local#envoyproxy/envoy-dev:latest#" "${DOCKER_BUILD_FILE}" +fi + +cat "${DOCKER_BUILD_FILE}" + +docker build -t "envoy-google-vrp:local" -f "${DOCKER_BUILD_FILE}" . + +if [[ -n "$1" ]] +then + rm -f "${LOCAL_ENVOY}" +fi +rm -r "${BUILD_DIR}" diff --git a/ci/filter_example_setup.sh b/ci/filter_example_setup.sh index df1ec083664f8..ade91f673b874 100644 --- a/ci/filter_example_setup.sh +++ b/ci/filter_example_setup.sh @@ -5,7 +5,7 @@ set -e # This is the hash on https://github.com/envoyproxy/envoy-filter-example.git we pin to. -ENVOY_FILTER_EXAMPLE_GITSHA="c6c986cca7ad676cc1c33f2df7515cbbd2e02502" +ENVOY_FILTER_EXAMPLE_GITSHA="777342f20d93b3a50b641556749ad41502a63d09" ENVOY_FILTER_EXAMPLE_SRCDIR="${BUILD_DIR}/envoy-filter-example" export ENVOY_FILTER_EXAMPLE_TESTS="//:echo2_integration_test //http-filter-example:http_filter_integration_test //:envoy_binary_test" diff --git a/ci/mac_ci_setup.sh b/ci/mac_ci_setup.sh index b58c3a3eeed44..b9870f9426968 100755 --- a/ci/mac_ci_setup.sh +++ b/ci/mac_ci_setup.sh @@ -6,6 +6,8 @@ # https://github.com/actions/virtual-environments/blob/master/images/macos/macos-10.15-Readme.md for # a list of pre-installed tools in the macOS image. +export HOMEBREW_NO_AUTO_UPDATE=1 + function is_installed { brew ls --versions "$1" >/dev/null } @@ -38,7 +40,8 @@ fi # Required as bazel and a foreign bazelisk are installed in the latest macos vm image, we have # to unlink/overwrite them to install bazelisk echo "Installing bazelbuild/tap/bazelisk" -brew install --force bazelbuild/tap/bazelisk +brew tap bazelbuild/tap +brew reinstall --force bazelbuild/tap/bazelisk if ! brew link --overwrite bazelbuild/tap/bazelisk; then echo "Failed to install and link bazelbuild/tap/bazelisk" exit 1 diff --git a/ci/repokitteh/modules/azure_pipelines.star b/ci/repokitteh/modules/azure_pipelines.star new file mode 100644 index 0000000000000..dc619e06d2263 --- /dev/null +++ b/ci/repokitteh/modules/azure_pipelines.star @@ -0,0 +1,49 @@ +load("github.com/repokitteh/modules/lib/utils.star", "react") + +_azp_context_prefix = "ci/azp: " + +def _retry_azp(organization, project, build_id, token): + """Makes an Azure Pipelines Build API request with retry""" + + url = "https://dev.azure.com/{organization}/{project}/_apis/build/builds/{buildId}?retry=true&api-version=5.1".format(organization = organization, project = project, buildId = build_id) + return http(url = url, method = "PATCH", headers = { + "authorization": "Basic " + token, + "content-type": "application/json;odata=verbose", + }) + +def _get_azp_checks(): + github_checks = github.check_list_runs()["check_runs"] + + check_ids = [] + checks = [] + for check in github_checks: + if check["app"]["slug"] == "azure-pipelines" and check["external_id"] not in check_ids: + check_ids.append(check["external_id"]) + checks.append(check) + + return checks + +def _retry(config, comment_id, command): + msgs = "Retrying Azure Pipelines, to retry CircleCI checks, use `/retest-circle`.\n" + checks = _get_azp_checks() + + retried_checks = [] + for check in checks: + name_with_link = "[{}]({})".format(check["name"], check["details_url"]) + if check["status"] != "completed": + msgs += "Cannot retry non-completed check: {}, please wait.\n".format(name_with_link) + elif check["conclusion"] != "failure": + msgs += "Check {} didn't fail.\n".format(name_with_link) + else: + _, build_id, project = check["external_id"].split("|") + _retry_azp("cncf", project, build_id, config["token"]) + retried_checks.append(name_with_link) + + if len(retried_checks) == 0: + react(comment_id, msgs) + else: + react(comment_id, None) + msgs += "Retried failed jobs in: {}".format(", ".join(retried_checks)) + github.issue_create_comment(msgs) + +handlers.command(name = "retry-azp", func = _retry) diff --git a/ci/repokitteh/modules/ownerscheck.star b/ci/repokitteh/modules/ownerscheck.star new file mode 100644 index 0000000000000..e93010f89a7f6 --- /dev/null +++ b/ci/repokitteh/modules/ownerscheck.star @@ -0,0 +1,241 @@ +# Ownership specified by list of specs, like so: +# +# use( +# "github.com/repokitteh/modules/ownerscheck.star", +# paths=[ +# { +# "owner": "envoyproxy/api-shepherds!", +# "path": "api/", +# "label": "api", +# "allow_global_approval": True, +# "github_status_label" = "any API change", +# }, +# ], +# ) +# +# This module will maintain a commit status per specified path regex (also aka as spec). +# +# Two types of approvals: +# 1. Global approvals, done by approving the PR using Github's review approval feature. +# 2. Partial approval, done by commenting "/lgtm [label]" where label is the label +# associated with the path. This does not affect GitHub's PR approve status, only +# this module's maintained commit status. This approval is automatically revoked +# if any further changes are done to the relevant files in this spec. +# +# By default, 'allow_global_approval' is true and either (1) or (2) above can unblock +# merges. If 'allow_global_approval' is set false, then only (2) will unblock a merge. +# +# 'label' refers to a GitHub label applied to any matching PR. The GitHub check status +# can be customized with `github_status_label`. + +load("text", "match") +load("github.com/repokitteh/modules/lib/utils.star", "react") + +def _store_partial_approval(who, files): + for f in files: + store_put('ownerscheck/partial/%s:%s' % (who, f['filename']), f['sha']) + + +def _is_partially_approved(who, files): + for f in files: + sha = store_get('ownerscheck/partial/%s:%s' % (who, f['filename'])) + if sha != f['sha']: + return False + + return True + + +def _get_relevant_specs(specs, changed_files): + if not specs: + print("no specs") + return [] + + relevant = [] + + for spec in specs: + path_match = spec["path"] + + files = [f for f in changed_files if match(path_match, f['filename'])] + allow_global_approval = spec.get("allow_global_approval", True) + status_label = spec.get("github_status_label", "") + if files: + relevant.append(struct(files=files, + owner=spec["owner"], + label=spec.get("label", None), + path_match=path_match, + allow_global_approval=allow_global_approval, + status_label=status_label)) + + print("specs: %s" % relevant) + + return relevant + + +def _get_global_approvers(): # -> List[str] (owners) + reviews = [{'login': r['user']['login'], 'state': r['state']} for r in github.pr_list_reviews()] + + print("reviews=%s" % reviews) + + return [r['login'] for r in reviews if r['state'] == 'APPROVED'] + + +def _is_approved(spec, approvers): + owner = spec.owner + + if owner[-1] == '!': + owner = owner[:-1] + + required = [owner] + + if '/' in owner: + team_name = owner.split('/')[1] + + # this is a team, parse it. + team_id = github.team_get_by_name(team_name)['id'] + required = [m['login'] for m in github.team_list_members(team_id)] + + print("team %s(%d) = %s" % (team_name, team_id, required)) + + for r in required: + if spec.allow_global_approval and any([a for a in approvers if a == r]): + print("global approver: %s" % r) + return True + + if _is_partially_approved(r, spec.files): + print("partial approval: %s" % r) + return True + + return False + + +def _update_status(owner, status_label, path_match, approved): + changes_to = path_match or '/' + github.create_status( + state=approved and 'success' or 'pending', + context='%s must approve for %s' % (owner, status_label), + description='changes to %s' % changes_to, + ) + +def _get_specs(config): + return _get_relevant_specs(config.get('paths', []), github.pr_list_files()) + +def _reconcile(config, specs=None): + specs = specs or _get_specs(config) + + if not specs: + return [] + + approvers = _get_global_approvers() + + print("approvers: %s" % approvers) + + results = [] + + for spec in specs: + approved = _is_approved(spec, approvers) + + print("%s -> %s" % (spec, approved)) + + results.append((spec, approved)) + + if spec.owner[-1] == '!': + _update_status(spec.owner[:-1], spec.status_label, spec.path_match, approved) + + if spec.label: + if approved: + github.issue_unlabel(spec.label) + else: + github.issue_label(spec.label) + elif spec.label: # fyis + github.issue_label(spec.label) + + return results + + +def _comment(config, results, force=False): + lines = [] + + for spec, approved in results: + if approved: + continue + + mention = spec.owner + + if mention[0] != '@': + mention = '@' + mention + + if mention[-1] == '!': + mention = mention[:-1] + + match_description = spec.path_match + if match_description: + match_description = ' for changes made to `' + match_description + '`' + + mode = spec.owner[-1] == '!' and 'approval' or 'fyi' + + key = "ownerscheck/%s/%s" % (spec.owner, spec.path_match) + + if (not force) and (store_get(key) == mode): + mode = 'skip' + else: + store_put(key, mode) + + if mode == 'approval': + lines.append('CC %s: Your approval is needed%s.' % (mention, match_description)) + elif mode == 'fyi': + lines.append('CC %s: FYI only%s.' % (mention, match_description)) + + if lines: + github.issue_create_comment('\n'.join(lines)) + + +def _reconcile_and_comment(config): + _comment(config, _reconcile(config)) + + +def _force_reconcile_and_comment(config): + _comment(config, _reconcile(config), force=True) + + +def _pr(action, config): + if action in ['synchronize', 'opened']: + _reconcile_and_comment(config) + + +def _pr_review(action, review_state, config): + if action != 'submitted' or not review_state: + return + + _reconcile(config) + + +# Partial approvals are done by commenting "/lgtm [label]". +def _lgtm_by_comment(config, comment_id, command, sender, sha): + labels = command.args + + if len(labels) != 1: + react(comment_id, 'please specify a single label can be specified') + return + + label = labels[0] + + specs = [s for s in _get_specs(config) if s.label and s.label == label] + + if len(specs) == 0: + react(comment_id, 'no relevant owners for "%s"' % label) + return + + for spec in specs: + _store_partial_approval(sender, spec.files) + + react(comment_id, None) + + _reconcile(config, specs) + + +handlers.pull_request(func=_pr) +handlers.pull_request_review(func=_pr_review) + +handlers.command(name='checkowners', func=_reconcile) +handlers.command(name='checkowners!', func=_force_reconcile_and_comment) +handlers.command(name='lgtm', func=_lgtm_by_comment) diff --git a/ci/run_clang_tidy.sh b/ci/run_clang_tidy.sh index d5c2697e2b440..8114f4f32bb4a 100755 --- a/ci/run_clang_tidy.sh +++ b/ci/run_clang_tidy.sh @@ -2,7 +2,10 @@ set -eo pipefail -ENVOY_SRCDIR=${ENVOY_SRCDIR:-$(cd $(dirname $0)/.. && pwd)} +# ENVOY_SRCDIR should point to where Envoy source lives, while SRCDIR could be a downstream build +# (for example envoy-filter-example). +[[ -z "${ENVOY_SRCDIR}" ]] && ENVOY_SRCDIR="${PWD}" +[[ -z "${SRCDIR}" ]] && SRCDIR="${ENVOY_SRCDIR}" export LLVM_CONFIG=${LLVM_CONFIG:-llvm-config} LLVM_PREFIX=${LLVM_PREFIX:-$(${LLVM_CONFIG} --prefix)} @@ -21,23 +24,20 @@ rm clang-tidy-config-errors.txt echo "Generating compilation database..." -cp -f .bazelrc .bazelrc.bak - -function cleanup() { - cp -f .bazelrc.bak .bazelrc - rm -f .bazelrc.bak -} -trap cleanup EXIT - # bazel build need to be run to setup virtual includes, generating files which are consumed # by clang-tidy -"${ENVOY_SRCDIR}/tools/gen_compilation_database.py" --run_bazel_build --include_headers +"${ENVOY_SRCDIR}/tools/gen_compilation_database.py" --include_headers # Do not run clang-tidy against win32 impl -# TODO(scw00): We should run clang-tidy against win32 impl. But currently we only have -# linux ci box. +# TODO(scw00): We should run clang-tidy against win32 impl once we have clang-cl support for Windows function exclude_win32_impl() { - grep -v source/common/filesystem/win32/ | grep -v source/common/common/win32 | grep -v source/exe/win32 + grep -v source/common/filesystem/win32/ | grep -v source/common/common/win32 | grep -v source/exe/win32 | grep -v source/common/api/win32 +} + +# Do not run clang-tidy against macOS impl +# TODO: We should run clang-tidy against macOS impl for completeness +function exclude_macos_impl() { + grep -v source/common/filesystem/kqueue/ } # Do not run incremental clang-tidy on check_format testdata files. @@ -45,47 +45,48 @@ function exclude_testdata() { grep -v tools/testdata/check_format/ } -# Do not run clang-tidy against Chromium URL import, this needs to largely -# reflect the upstream structure. -function exclude_chromium_url() { - grep -v source/common/chromium_url/ -} - # Exclude files in third_party which are temporary forks from other OSS projects. function exclude_third_party() { grep -v third_party/ } function filter_excludes() { - exclude_testdata | exclude_chromium_url | exclude_win32_impl | exclude_third_party + exclude_testdata | exclude_win32_impl | exclude_macos_impl | exclude_third_party } -if [[ -z "${DIFF_REF}" && "${BUILD_REASON}" != "PullRequest" ]]; then - DIFF_REF=HEAD^ -fi - -if [[ "${RUN_FULL_CLANG_TIDY}" == 1 ]]; then - echo "Running full clang-tidy..." +function run_clang_tidy() { python3 "${LLVM_PREFIX}/share/clang/run-clang-tidy.py" \ -clang-tidy-binary=${CLANG_TIDY} \ -clang-apply-replacements-binary=${CLANG_APPLY_REPLACEMENTS} \ - -export-fixes=${FIX_YAML} \ - -j ${NUM_CPUS:-0} -p 1 -quiet \ - ${APPLY_CLANG_TIDY_FIXES:+-fix} -elif [[ -n "${DIFF_REF}" ]]; then - echo "Running clang-tidy-diff against ref ${DIFF_REF}" - git diff ${DIFF_REF} | filter_excludes | \ + -export-fixes=${FIX_YAML} -j ${NUM_CPUS:-0} -p ${SRCDIR} -quiet \ + ${APPLY_CLANG_TIDY_FIXES:+-fix} $@ +} + +function run_clang_tidy_diff() { + git diff $1 | filter_excludes | \ python3 "${LLVM_PREFIX}/share/clang/clang-tidy-diff.py" \ -clang-tidy-binary=${CLANG_TIDY} \ - -export-fixes=${FIX_YAML} \ - -j ${NUM_CPUS:-0} -p 1 -quiet + -export-fixes=${FIX_YAML} -j ${NUM_CPUS:-0} -p 1 -quiet +} + +if [[ $# -gt 0 ]]; then + echo "Running clang-tidy on: $@" + run_clang_tidy $@ +elif [[ "${RUN_FULL_CLANG_TIDY}" == 1 ]]; then + echo "Running a full clang-tidy" + run_clang_tidy else - echo "Running clang-tidy-diff against master branch..." - git diff "remotes/origin/${SYSTEM_PULLREQUEST_TARGETBRANCH}" | filter_excludes | \ - python3 "${LLVM_PREFIX}/share/clang/clang-tidy-diff.py" \ - -clang-tidy-binary=${CLANG_TIDY} \ - -export-fixes=${FIX_YAML} \ - -j ${NUM_CPUS:-0} -p 1 -quiet + if [[ -z "${DIFF_REF}" ]]; then + if [[ "${BUILD_REASON}" == "PullRequest" ]]; then + DIFF_REF="remotes/origin/${SYSTEM_PULLREQUEST_TARGETBRANCH}" + elif [[ "${BUILD_REASON}" == *CI ]]; then + DIFF_REF="HEAD^" + else + DIFF_REF=$(${ENVOY_SRCDIR}/tools/git/last_github_commit.sh) + fi + fi + echo "Running clang-tidy-diff against ${DIFF_REF} ($(git rev-parse ${DIFF_REF})), current HEAD ($(git rev-parse HEAD))" + run_clang_tidy_diff ${DIFF_REF} fi if [[ -s "${FIX_YAML}" ]]; then diff --git a/ci/run_envoy_docker.sh b/ci/run_envoy_docker.sh index 3ac8671d5b2d7..ca29667c14c94 100755 --- a/ci/run_envoy_docker.sh +++ b/ci/run_envoy_docker.sh @@ -16,19 +16,19 @@ USER_GROUP=root [[ -z "${IMAGE_ID}" ]] && IMAGE_ID="${ENVOY_BUILD_SHA}" [[ -z "${ENVOY_DOCKER_BUILD_DIR}" ]] && ENVOY_DOCKER_BUILD_DIR=/tmp/envoy-docker-build -[[ -f .git ]] && [[ ! -d .git ]] && GIT_VOLUME_OPTION="-v $(git rev-parse --git-common-dir):$(git rev-parse --git-common-dir)" - -[[ -t 1 ]] && DOCKER_TTY_OPTION=-it +[[ -t 1 ]] && ENVOY_DOCKER_OPTIONS+=" -it" +[[ -f .git ]] && [[ ! -d .git ]] && ENVOY_DOCKER_OPTIONS+=" -v $(git rev-parse --git-common-dir):$(git rev-parse --git-common-dir)" export ENVOY_BUILD_IMAGE="${IMAGE_NAME}:${IMAGE_ID}" mkdir -p "${ENVOY_DOCKER_BUILD_DIR}" # Since we specify an explicit hash, docker-run will pull from the remote repo if missing. -docker run --rm ${DOCKER_TTY_OPTION} -e HTTP_PROXY=${http_proxy} -e HTTPS_PROXY=${https_proxy} \ - -u "${USER}":"${USER_GROUP}" -v "${ENVOY_DOCKER_BUILD_DIR}":/build -v /var/run/docker.sock:/var/run/docker.sock ${GIT_VOLUME_OPTION} \ +docker run --rm ${ENVOY_DOCKER_OPTIONS} -e HTTP_PROXY=${http_proxy} -e HTTPS_PROXY=${https_proxy} -e NO_PROXY=${no_proxy} \ + -u "${USER}":"${USER_GROUP}" -v "${ENVOY_DOCKER_BUILD_DIR}":/build -v /var/run/docker.sock:/var/run/docker.sock \ -e BAZEL_BUILD_EXTRA_OPTIONS -e BAZEL_EXTRA_TEST_OPTIONS -e BAZEL_REMOTE_CACHE -e ENVOY_STDLIB -e BUILD_REASON \ -e BAZEL_REMOTE_INSTANCE -e GCP_SERVICE_ACCOUNT_KEY -e NUM_CPUS -e ENVOY_RBE -e FUZZIT_API_KEY -e ENVOY_BUILD_IMAGE \ - -e ENVOY_SRCDIR -e ENVOY_BUILD_TARGET -e SYSTEM_PULLREQUEST_TARGETBRANCH \ + -e ENVOY_SRCDIR -e ENVOY_BUILD_TARGET -e SYSTEM_PULLREQUEST_TARGETBRANCH -e SYSTEM_PULLREQUEST_PULLREQUESTNUMBER \ + -e GCS_ARTIFACT_BUCKET -e BUILD_SOURCEBRANCHNAME -e BAZELISK_BASE_URL -e ENVOY_BUILD_ARCH \ -v "$PWD":/source --cap-add SYS_PTRACE --cap-add NET_RAW --cap-add NET_ADMIN "${ENVOY_BUILD_IMAGE}" \ /bin/bash -lc "groupadd --gid $(id -g) -f envoygroup && useradd -o --uid $(id -u) --gid $(id -g) --no-create-home \ - --home-dir /source envoybuild && usermod -a -G pcap envoybuild && su envoybuild -c \"cd source && $*\"" + --home-dir /build envoybuild && usermod -a -G pcap envoybuild && sudo -EHs -u envoybuild bash -c \"cd /source && $*\"" diff --git a/ci/run_envoy_docker_windows.sh b/ci/run_envoy_docker_windows.sh new file mode 100644 index 0000000000000..a1f4e7372b527 --- /dev/null +++ b/ci/run_envoy_docker_windows.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +set -e + +# The image tag for the Windows image is the same as the Linux one so we use the same mechanism to find it +. $(dirname $0)/envoy_build_sha.sh + +[[ -z "${IMAGE_NAME}" ]] && IMAGE_NAME="envoyproxy/envoy-build-windows2019" +# The IMAGE_ID defaults to the CI hash but can be set to an arbitrary image ID (found with 'docker +# images'). +[[ -z "${IMAGE_ID}" ]] && IMAGE_ID="${ENVOY_BUILD_SHA}" + +ENVOY_SOURCE_DIR=$(echo "${PWD}" | sed -E "s#/([a-zA-Z])/#\1:/#") + +[[ -f .git ]] && [[ ! -d .git ]] && GIT_VOLUME_OPTION="-v $(git rev-parse --git-common-dir):$(git rev-parse --git-common-dir)" + +[[ -t 1 ]] && DOCKER_TTY_OPTION=-it + +export ENVOY_BUILD_IMAGE="${IMAGE_NAME}:${IMAGE_ID}" + +# Since we specify an explicit hash, docker-run will pull from the remote repo if missing. +docker run --rm ${DOCKER_TTY_OPTION} -e HTTP_PROXY=${http_proxy} -e HTTPS_PROXY=${https_proxy} \ + ${GIT_VOLUME_OPTION} -e BAZEL_BUILD_EXTRA_OPTIONS -e BAZEL_EXTRA_TEST_OPTIONS -e BAZEL_REMOTE_CACHE \ + -e ENVOY_STDLIB -e BUILD_REASON -e BAZEL_REMOTE_INSTANCE -e GCP_SERVICE_ACCOUNT_KEY -e NUM_CPUS -e ENVOY_RBE \ + -e ENVOY_BUILD_IMAGE -e ENVOY_SRCDIR -e ENVOY_BUILD_TARGET -e SYSTEM_PULLREQUEST_TARGETBRANCH -v ${ENVOY_SOURCE_DIR}:C:/source \ + "${ENVOY_BUILD_IMAGE}" \ + bash -c "cd source && $*" diff --git a/ci/setup_cache.sh b/ci/setup_cache.sh index 699961bbb082d..f615b8b41d5df 100755 --- a/ci/setup_cache.sh +++ b/ci/setup_cache.sh @@ -2,7 +2,7 @@ set -e -if [[ ! -z "${GCP_SERVICE_ACCOUNT_KEY}" ]]; then +if [[ ! -z "${GCP_SERVICE_ACCOUNT_KEY:0:1}" ]]; then # mktemp will create a tempfile with u+rw permission minus umask, it will not be readable by all # users by default. GCP_SERVICE_ACCOUNT_KEY_FILE=$(mktemp -t gcp_service_account.XXXXXX.json) @@ -14,27 +14,24 @@ if [[ ! -z "${GCP_SERVICE_ACCOUNT_KEY}" ]]; then trap gcp_service_account_cleanup EXIT - echo "${GCP_SERVICE_ACCOUNT_KEY}" | base64 --decode > "${GCP_SERVICE_ACCOUNT_KEY_FILE}" + bash -c 'echo "${GCP_SERVICE_ACCOUNT_KEY}"' | base64 --decode > "${GCP_SERVICE_ACCOUNT_KEY_FILE}" + + export BAZEL_BUILD_EXTRA_OPTIONS+=" --google_credentials=${GCP_SERVICE_ACCOUNT_KEY_FILE}" fi -if [[ "${BAZEL_REMOTE_CACHE}" =~ ^http ]]; then - if [[ ! -z "${GCP_SERVICE_ACCOUNT_KEY}" ]]; then - export BAZEL_BUILD_EXTRA_OPTIONS="${BAZEL_BUILD_EXTRA_OPTIONS} \ - --remote_http_cache=${BAZEL_REMOTE_CACHE} \ - --google_credentials=${GCP_SERVICE_ACCOUNT_KEY_FILE}" - echo "Set up bazel HTTP read/write cache at ${BAZEL_REMOTE_CACHE}." - else - export BAZEL_BUILD_EXTRA_OPTIONS="${BAZEL_BUILD_EXTRA_OPTIONS} \ - --remote_http_cache=${BAZEL_REMOTE_CACHE} --noremote_upload_local_results" - echo "Set up bazel HTTP read only cache at ${BAZEL_REMOTE_CACHE}." + +if [[ ! -z "${BAZEL_REMOTE_CACHE}" ]]; then + export BAZEL_BUILD_EXTRA_OPTIONS+=" --remote_cache=${BAZEL_REMOTE_CACHE}" + echo "Set up bazel remote read/write cache at ${BAZEL_REMOTE_CACHE}." + + if [[ ! -z "${BAZEL_REMOTE_INSTANCE}" ]]; then + export BAZEL_BUILD_EXTRA_OPTIONS+=" --remote_instance_name=${BAZEL_REMOTE_INSTANCE}" + echo "instance_name: ${BAZEL_REMOTE_INSTANCE}." + elif [[ -z "${ENVOY_RBE}" ]]; then + export BAZEL_BUILD_EXTRA_OPTIONS+=" --jobs=HOST_CPUS*.9 --remote_timeout=600" + echo "using local build cache." fi -elif [[ ! -z "${BAZEL_REMOTE_CACHE}" ]]; then - export BAZEL_BUILD_EXTRA_OPTIONS="${BAZEL_BUILD_EXTRA_OPTIONS} \ - --remote_cache=${BAZEL_REMOTE_CACHE} \ - --remote_instance_name=${BAZEL_REMOTE_INSTANCE} \ - --google_credentials=${GCP_SERVICE_ACCOUNT_KEY_FILE} \ - --auth_enabled=true" - echo "Set up bazel remote read/write cache at ${BAZEL_REMOTE_CACHE} instance: ${BAZEL_REMOTE_INSTANCE}." + else - echo "No remote cache bucket is set, skipping setup remote cache." + echo "No remote cache is set, skipping setup remote cache." fi diff --git a/ci/upload_gcs_artifact.sh b/ci/upload_gcs_artifact.sh new file mode 100755 index 0000000000000..7bd5b02013593 --- /dev/null +++ b/ci/upload_gcs_artifact.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +set -e -o pipefail + +if [[ -z "${GCS_ARTIFACT_BUCKET}" ]]; then + echo "Artifact bucket is not set, not uploading artifacts." + exit 0 +fi + +# Fail when service account key is not specified +bash -c 'echo ${GCP_SERVICE_ACCOUNT_KEY}' | base64 --decode | gcloud auth activate-service-account --key-file=- + +SOURCE_DIRECTORY="$1" +TARGET_SUFFIX="$2" + +if [ ! -d "${SOURCE_DIRECTORY}" ]; then + echo "ERROR: ${SOURCE_DIRECTORY} is not found." + exit 1 +fi + +BRANCH=${SYSTEM_PULLREQUEST_PULLREQUESTNUMBER:-${BUILD_SOURCEBRANCHNAME}} +GCS_LOCATION="${GCS_ARTIFACT_BUCKET}/${BRANCH}/${TARGET_SUFFIX}" + +echo "Uploading to gs://${GCS_LOCATION} ..." +gsutil -mq rsync -dr ${SOURCE_DIRECTORY} gs://${GCS_LOCATION} +echo "Artifacts uploaded to: https://storage.googleapis.com/${GCS_LOCATION}/index.html" diff --git a/ci/verify_examples.sh b/ci/verify_examples.sh index 4b9273ee052a1..711ceb5f25a30 100755 --- a/ci/verify_examples.sh +++ b/ci/verify_examples.sh @@ -23,8 +23,9 @@ cd ../ # Test grpc bridge example # install go -curl -O https://storage.googleapis.com/golang/go1.13.5.linux-amd64.tar.gz -tar -xf go1.13.5.linux-amd64.tar.gz +GO_VERSION="1.14.7" +curl -O https://storage.googleapis.com/golang/go$GO_VERSION.linux-amd64.tar.gz +tar -xf go$GO_VERSION.linux-amd64.tar.gz sudo mv go /usr/local export PATH=$PATH:/usr/local/go/bin export GOPATH=$HOME/go diff --git a/ci/windows_ci_setup.ps1 b/ci/windows_ci_setup.ps1 deleted file mode 100644 index c39f2c083e22a..0000000000000 --- a/ci/windows_ci_setup.ps1 +++ /dev/null @@ -1,40 +0,0 @@ -# This file only installs dependencies needed in additio to Azure pipelines hosted image. -# The list of installed software can be found at: -# https://github.com/actions/virtual-environments/blob/master/images/win/Windows2019-Readme.md - -function Checksum -{ - param([string]$filepath, [string]$expected, [string]$algorithm) - - $actual = Get-FileHash -Path $filePath -Algorithm $algorithm; - if ($actual.Hash -eq $expected) { - Write-Host "$filepath is valid"; - } else { - Write-Host "$filepath is invalid, expected: $expected, but got: $actual"; - exit 1 - } -} - -mkdir "$env:TOOLS_BIN_DIR" -$wc = New-Object System.Net.WebClient -$wc.DownloadFile("https://github.com/bazelbuild/bazelisk/releases/download/v1.0/bazelisk-windows-amd64.exe", "$env:TOOLS_BIN_DIR\bazel.exe") -# See https://sourceforge.net/projects/msys2/files/Base/x86_64/ for msys2 download source -$wc.DownloadFile("http://repo.msys2.org/distrib/x86_64/msys2-base-x86_64-20190524.tar.xz", "$env:TEMP\msys2.tar.xz") - -# Check the SHA256 file hash of each downloaded file. -Checksum $env:TOOLS_BIN_DIR\bazel.exe 96395ee9e3fb9f4499fcaffa8a94dd72b0748f495f366bc4be44dbf09d6827fc SHA256 -Checksum $env:TEMP\msys2.tar.xz 168e156fa9f00d90a8445676c023c63be6e82f71487f4e2688ab5cb13b345383 SHA256 - -# Unpack and install msys2 and required packages -$tarpath="$env:ProgramFiles\Git\usr\bin\tar.exe" -$msys2TarPathClean = "/$env:TEMP/msys2.tar.xz".replace(':', '').replace('\', '/') -$outDirClean = "/$env:TOOLS_BIN_DIR".replace(':', '').replace('\', '/') -&"$tarpath" -Jxf $msys2TarPathClean -C $outDirClean --strip-components=1 -# Add utils to the path for msys2 setup -$env:PATH = "$env:TOOLS_BIN_DIR\usr\bin;$env:TOOLS_BIN_DIR\mingw64\bin;$env:PATH" -bash.exe -c "pacman-key --init 2>&1" -bash.exe -c "pacman-key --populate msys2 2>&1" -bash.exe -c "pacman.exe -Syyuu --noconfirm 2>&1" -bash.exe -c "pacman.exe -Syuu --noconfirm 2>&1" -bash.exe -c "pacman.exe -S --noconfirm --needed compression diffutils patch 2>&1" -bash.exe -c "pacman.exe -Scc --noconfirm 2>&1" diff --git a/ci/windows_ci_steps.sh b/ci/windows_ci_steps.sh index 0ded44c4dd82b..62b101d078473 100755 --- a/ci/windows_ci_steps.sh +++ b/ci/windows_ci_steps.sh @@ -26,18 +26,12 @@ fi BAZEL_STARTUP_OPTIONS="--output_base=c:/_eb" BAZEL_BUILD_OPTIONS="-c opt --config=msvc-cl --show_task_finish --verbose_failures \ - --test_output=all ${BAZEL_BUILD_EXTRA_OPTIONS} ${BAZEL_EXTRA_TEST_OPTIONS}" - -# With all envoy-static and //test/ tree building, no need to test compile externals -# bazel ${BAZEL_STARTUP_OPTIONS} build ${BAZEL_BUILD_OPTIONS} //bazel/... --build_tag_filters=-skip_on_windows + --test_output=errors ${BAZEL_BUILD_EXTRA_OPTIONS} ${BAZEL_EXTRA_TEST_OPTIONS}" bazel ${BAZEL_STARTUP_OPTIONS} build ${BAZEL_BUILD_OPTIONS} //source/exe:envoy-static --build_tag_filters=-skip_on_windows -# TODO(sunjayBhatia, wrowe): We are disabling building/running tests for now as the AZP pipelines -# workers do not provide enough resources for us to produce fast enough or reliable enough builds. -# Test compilation of known MSVC-compatible test sources -# bazel ${BAZEL_STARTUP_OPTIONS} build ${BAZEL_BUILD_OPTIONS} //test/... --test_tag_filters=-skip_on_windows --build_tests_only - # Test invocations of known-working tests on Windows -# bazel ${BAZEL_STARTUP_OPTIONS} test ${BAZEL_BUILD_OPTIONS} //test/... --test_tag_filters=-skip_on_windows,-fails_on_windows --build_tests_only --test_summary=terse --test_output=errors +bazel ${BAZEL_STARTUP_OPTIONS} test ${BAZEL_BUILD_OPTIONS} //test/... --test_tag_filters=-skip_on_windows,-fails_on_windows --build_tests_only +# Build tests that are failing to ensure no regressions +bazel ${BAZEL_STARTUP_OPTIONS} build ${BAZEL_BUILD_OPTIONS} //test/... --test_tag_filters=-skip_on_windows,fails_on_windows --build_tests_only diff --git a/configs/BUILD b/configs/BUILD index 9846609607e9e..7240a5cb34909 100644 --- a/configs/BUILD +++ b/configs/BUILD @@ -1,20 +1,18 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", + "envoy_py_test_binary", ) -envoy_package() +licenses(["notice"]) # Apache 2 -load("//bazel:envoy_build_system.bzl", "envoy_py_test_binary") +envoy_package() envoy_py_test_binary( name = "configgen", srcs = ["configgen.py"], data = glob([ "*.yaml", - "*.json", ]), external_deps = ["jinja2"], ) @@ -22,6 +20,8 @@ envoy_py_test_binary( filegroup( name = "configs", srcs = [ + "google-vrp/envoy-edge.yaml", + "google-vrp/envoy-origin.yaml", "original-dst-cluster/proxy_config.yaml", ] + select({ "//bazel:apple": [], diff --git a/configs/configgen.py b/configs/configgen.py index c255b0d4e2a18..557dd9ed2b1b2 100755 --- a/configs/configgen.py +++ b/configs/configgen.py @@ -134,3 +134,6 @@ def generate_config(template_path, template, output_file, **context): for google_ext in ['v2.yaml']: shutil.copy(os.path.join(SCRIPT_DIR, 'google_com_proxy.%s' % google_ext), OUT_DIR) + +shutil.copy(os.path.join(SCRIPT_DIR, 'encapsulate_in_connect.v3.yaml'), OUT_DIR) +shutil.copy(os.path.join(SCRIPT_DIR, 'terminate_connect.v3.yaml'), OUT_DIR) diff --git a/configs/encapsulate_in_connect.v3.yaml b/configs/encapsulate_in_connect.v3.yaml new file mode 100644 index 0000000000000..c6470b13ed58e --- /dev/null +++ b/configs/encapsulate_in_connect.v3.yaml @@ -0,0 +1,38 @@ +admin: + access_log_path: /tmp/admin_access.log + address: + socket_address: + protocol: TCP + address: 127.0.0.1 + port_value: 9903 +static_resources: + listeners: + - name: listener_0 + address: + socket_address: + protocol: TCP + address: 127.0.0.1 + port_value: 10000 + filter_chains: + - filters: + - name: tcp + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy + stat_prefix: tcp_stats + cluster: "cluster_0" + tunneling_config: + hostname: host.com + clusters: + - name: cluster_0 + connect_timeout: 5s + http2_protocol_options: + {} + load_assignment: + cluster_name: cluster_0 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 10001 diff --git a/configs/google-vrp/envoy-edge.yaml b/configs/google-vrp/envoy-edge.yaml new file mode 100644 index 0000000000000..803b01116ad1c --- /dev/null +++ b/configs/google-vrp/envoy-edge.yaml @@ -0,0 +1,92 @@ +overload_manager: + refresh_interval: 0.25s + resource_monitors: + - name: "envoy.resource_monitors.fixed_heap" + typed_config: + "@type": type.googleapis.com/envoy.config.resource_monitor.fixed_heap.v2alpha.FixedHeapConfig + # TODO: Tune for your system. + max_heap_size_bytes: 1073741824 # 1 GiB + actions: + - name: "envoy.overload_actions.shrink_heap" + triggers: + - name: "envoy.resource_monitors.fixed_heap" + threshold: + value: 0.90 + - name: "envoy.overload_actions.stop_accepting_requests" + triggers: + - name: "envoy.resource_monitors.fixed_heap" + threshold: + value: 0.95 + +static_resources: + listeners: + - name: listener_https + address: + socket_address: + protocol: TCP + address: 0.0.0.0 + port_value: 10000 + per_connection_buffer_limit_bytes: 32768 # 32 KiB + filter_chains: + - transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "certs/servercert.pem" } + private_key: { filename: "certs/serverkey.pem" } + # Uncomment if Envoy is behind a load balancer that exposes client IP address using the PROXY protocol. + # use_proxy_proto: true + filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + use_remote_address: true + common_http_protocol_options: + idle_timeout: 3600s # 1 hour + headers_with_underscores_action: REJECT_REQUEST + http2_protocol_options: + max_concurrent_streams: 100 + initial_stream_window_size: 65536 # 64 KiB + initial_connection_window_size: 1048576 # 1 MiB + stream_idle_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests + request_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: ["*"] + # The exact route table is not super important in this example (this is the model + # for the Google VRP scenario). + routes: + - match: + prefix: "/content" + route: + cluster: service_foo + idle_timeout: 15s # must be disabled for long-lived and streaming requests + - match: + prefix: "/" + direct_response: + status: 403 + body: + inline_string: "denied\n" + http_filters: + - name: envoy.filters.http.router + clusters: + name: service_foo + connect_timeout: 5s + per_connection_buffer_limit_bytes: 32768 # 32 KiB + load_assignment: + cluster_name: service_foo + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 10002 + http2_protocol_options: + initial_stream_window_size: 65536 # 64 KiB + initial_connection_window_size: 1048576 # 1 MiB diff --git a/configs/google-vrp/envoy-origin.yaml b/configs/google-vrp/envoy-origin.yaml new file mode 100644 index 0000000000000..283d347e5a276 --- /dev/null +++ b/configs/google-vrp/envoy-origin.yaml @@ -0,0 +1,64 @@ +overload_manager: + refresh_interval: 0.25s + resource_monitors: + - name: "envoy.resource_monitors.fixed_heap" + typed_config: + "@type": type.googleapis.com/envoy.config.resource_monitor.fixed_heap.v2alpha.FixedHeapConfig + max_heap_size_bytes: 1073741824 # 1 GiB + actions: + - name: "envoy.overload_actions.shrink_heap" + triggers: + - name: "envoy.resource_monitors.fixed_heap" + threshold: + value: 0.95 + - name: "envoy.overload_actions.stop_accepting_requests" + triggers: + - name: "envoy.resource_monitors.fixed_heap" + threshold: + value: 0.98 + +static_resources: + listeners: + - name: listener_0 + address: + socket_address: + protocol: TCP + address: 0.0.0.0 + port_value: 10002 + per_connection_buffer_limit_bytes: 32768 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + use_remote_address: true + common_http_protocol_options: + idle_timeout: 3600s # 1 hour + headers_with_underscores_action: REJECT_REQUEST + http2_protocol_options: + max_concurrent_streams: 100 + initial_stream_window_size: 65536 # 64 KiB + initial_connection_window_size: 1048576 # 1 MiB + stream_idle_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests + request_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: ["*"] + routes: + - match: + path: "/blockedz" + direct_response: + status: 200 + body: + inline_string: "hidden treasure\n" + - match: + prefix: "/" + direct_response: + status: 200 + body: + inline_string: "normal\n" + http_filters: + - name: envoy.filters.http.router diff --git a/configs/google-vrp/launch_envoy.sh b/configs/google-vrp/launch_envoy.sh new file mode 100755 index 0000000000000..1d402df0cbef9 --- /dev/null +++ b/configs/google-vrp/launch_envoy.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +cd /etc/envoy +envoy "$@" diff --git a/configs/google-vrp/supervisor.conf b/configs/google-vrp/supervisor.conf new file mode 100644 index 0000000000000..e019581d079c2 --- /dev/null +++ b/configs/google-vrp/supervisor.conf @@ -0,0 +1,16 @@ +[supervisord] +nodaemon=true + +[program:envoy-edge] +command=launch_envoy.sh -c /etc/envoy/envoy-edge.yaml %(ENV_ENVOY_EDGE_EXTRA_ARGS)s + --log-format "(edge)[%%Y-%%m-%%d %%T.%%e][%%t][%%l][%%n] %%v" --base-id 0 +redirect_stderr=true +stdout_logfile_maxbytes=0 +stdout_logfile=/dev/stdout + +[program:envoy-origin] +command=launch_envoy.sh -c /etc/envoy/envoy-origin.yaml %(ENV_ENVOY_ORIGIN_EXTRA_ARGS)s + --log-format "(origin)[%%Y-%%m-%%d %%T.%%e][%%t][%%l][%%n] %%v" --base-id 1 +redirect_stderr=true +stdout_logfile_maxbytes=0 +stdout_logfile=/dev/stdout diff --git a/configs/terminate_connect.v3.yaml b/configs/terminate_connect.v3.yaml new file mode 100644 index 0000000000000..419bd80c6b8e5 --- /dev/null +++ b/configs/terminate_connect.v3.yaml @@ -0,0 +1,64 @@ +admin: + access_log_path: /tmp/admin_access.log + address: + socket_address: + protocol: TCP + address: 127.0.0.1 + port_value: 9902 +static_resources: + listeners: + - name: listener_0 + address: + socket_address: + protocol: TCP + address: 127.0.0.1 + port_value: 10001 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: + - "*" + routes: + - match: + connect_matcher: + {} + route: + cluster: service_google + upgrade_configs: + - upgrade_type: CONNECT + connect_config: + {} + http_filters: + - name: envoy.filters.http.router + http2_protocol_options: + allow_connect: true + upgrade_configs: + - upgrade_type: CONNECT + clusters: + - name: service_google + connect_timeout: 0.25s + type: LOGICAL_DNS + # Comment out the following line to test on v6 networks + dns_lookup_family: V4_ONLY + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: service_google + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: www.google.com + port_value: 443 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + sni: www.google.com diff --git a/docs/BUILD b/docs/BUILD new file mode 100644 index 0000000000000..ead7bddb9a7fa --- /dev/null +++ b/docs/BUILD @@ -0,0 +1,3 @@ +licenses(["notice"]) # Apache 2 + +exports_files(["protodoc_manifest.yaml"]) diff --git a/docs/README.md b/docs/README.md index 119596fec9805..b672f51c8a4f8 100644 --- a/docs/README.md +++ b/docs/README.md @@ -4,7 +4,13 @@ ./docs/build.sh ``` -The output can be found in `generated/docs`. +The output can be found in `generated/docs`. By default configuration examples are going to be validated during build. +To disable validation, set `SPHINX_SKIP_CONFIG_VALIDATION` environment variable to `true`: + +```bash +SPHINX_SKIP_CONFIG_VALIDATION=true docs/build.sh +``` + # How the Envoy website and docs are updated diff --git a/docs/_ext/validating_code_block.py b/docs/_ext/validating_code_block.py new file mode 100644 index 0000000000000..6220ae98618bb --- /dev/null +++ b/docs/_ext/validating_code_block.py @@ -0,0 +1,62 @@ +from typing import List +from docutils import nodes +from docutils.parsers.rst import Directive +from docutils.parsers.rst import directives +from sphinx.application import Sphinx +from sphinx.util.docutils import SphinxDirective +from sphinx.directives.code import CodeBlock +from sphinx.errors import ExtensionError + +import os +import subprocess + + +class ValidatingCodeBlock(CodeBlock): + """A directive that provides protobuf yaml formatting and validation. + + 'type-name' option is required and expected to conain full Envoy API type. + An ExtensionError is raised on validation failure. + Validation will be skipped if SPHINX_SKIP_CONFIG_VALIDATION environment variable is set. + """ + has_content = True + required_arguments = CodeBlock.required_arguments + optional_arguments = CodeBlock.optional_arguments + final_argument_whitespace = CodeBlock.final_argument_whitespace + option_spec = { + 'type-name': directives.unchanged, + } + option_spec.update(CodeBlock.option_spec) + skip_validation = (os.getenv('SPHINX_SKIP_CONFIG_VALIDATION') or 'false').lower() == 'true' + + def run(self): + source, line = self.state_machine.get_source_and_line(self.lineno) + # built-in directives.unchanged_required option validator produces a confusing error message + if self.options.get('type-name') == None: + raise ExtensionError("Expected type name in: {0} line: {1}".format(source, line)) + + if not ValidatingCodeBlock.skip_validation: + args = [ + 'bazel-bin/tools/config_validation/validate_fragment', + self.options.get('type-name'), '-s', '\n'.join(self.content) + ] + completed = subprocess.run(args, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + encoding='utf-8') + if completed.returncode != 0: + raise ExtensionError( + "Failed config validation for type: '{0}' in: {1} line: {2}:\n {3}".format( + self.options.get('type-name'), source, line, completed.stderr)) + + self.options.pop('type-name', None) + return list(CodeBlock.run(self)) + + +def setup(app): + app.add_directive("validated-code-block", ValidatingCodeBlock) + + return { + 'version': '0.1', + 'parallel_read_safe': True, + 'parallel_write_safe': True, + } diff --git a/docs/build.sh b/docs/build.sh index 0ebb4d0854971..9ca1bec440ebc 100755 --- a/docs/build.sh +++ b/docs/build.sh @@ -1,5 +1,8 @@ #!/usr/bin/env bash +# set SPHINX_SKIP_CONFIG_VALIDATION environment variable to true to skip +# validation of configuration examples + . tools/shell_utils.sh set -e @@ -31,8 +34,10 @@ else export ENVOY_BLOB_SHA="$BUILD_SHA" fi -SCRIPT_DIR=$(dirname "$0") -API_DIR=$(dirname "$dir")/api +SCRIPT_DIR="$(dirname "$0")" +SRC_DIR="$(dirname "$dir")" +API_DIR="${SRC_DIR}"/api +CONFIGS_DIR="${SRC_DIR}"/configs BUILD_DIR=build_docs [[ -z "${DOCS_OUTPUT_DIR}" ]] && DOCS_OUTPUT_DIR=generated/docs [[ -z "${GENERATED_RST_DIR}" ]] && GENERATED_RST_DIR=generated/rst @@ -115,12 +120,18 @@ generate_api_rst v3 find "${GENERATED_RST_DIR}"/api-v3 -name "*.rst" -print0 | xargs -0 sed -i -e "s#envoy_api_#envoy_v3_api_#g" find "${GENERATED_RST_DIR}"/api-v3 -name "*.rst" -print0 | xargs -0 sed -i -e "s#config_resource_monitors#v3_config_resource_monitors#g" +# xDS protocol spec. mkdir -p ${GENERATED_RST_DIR}/api-docs - -cp -f $API_DIR/xds_protocol.rst "${GENERATED_RST_DIR}/api-docs/xds_protocol.rst" +cp -f "${API_DIR}"/xds_protocol.rst "${GENERATED_RST_DIR}/api-docs/xds_protocol.rst" +# Edge hardening example YAML. +mkdir -p "${GENERATED_RST_DIR}"/configuration/best_practices +cp -f "${CONFIGS_DIR}"/google-vrp/envoy-edge.yaml "${GENERATED_RST_DIR}"/configuration/best_practices rsync -rav $API_DIR/diagrams "${GENERATED_RST_DIR}/api-docs" -rsync -av "${SCRIPT_DIR}"/root/ "${SCRIPT_DIR}"/conf.py "${GENERATED_RST_DIR}" +rsync -av "${SCRIPT_DIR}"/root/ "${SCRIPT_DIR}"/conf.py "${SCRIPT_DIR}"/_ext "${GENERATED_RST_DIR}" + +# To speed up validate_fragment invocations in validating_code_block +bazel build ${BAZEL_BUILD_OPTIONS} //tools/config_validation:validate_fragment sphinx-build -W --keep-going -b html "${GENERATED_RST_DIR}" "${DOCS_OUTPUT_DIR}" diff --git a/docs/conf.py b/docs/conf.py index a2f4d250d939b..1eb5725b689b8 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -67,7 +67,13 @@ def setup(app): # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = ['sphinxcontrib.httpdomain', 'sphinx.ext.extlinks', 'sphinx.ext.ifconfig'] + +sys.path.append(os.path.abspath("./_ext")) + +extensions = [ + 'sphinxcontrib.httpdomain', 'sphinx.ext.extlinks', 'sphinx.ext.ifconfig', + 'validating_code_block' +] extlinks = { 'repo': ('https://github.com/envoyproxy/envoy/blob/{}/%s'.format(blob_sha), ''), 'api': ('https://github.com/envoyproxy/envoy/blob/{}/api/%s'.format(blob_sha), ''), diff --git a/docs/generate_extension_db.py b/docs/generate_extension_db.py index d021b75e48ab1..fe7b5aa70c98d 100755 --- a/docs/generate_extension_db.py +++ b/docs/generate_extension_db.py @@ -40,7 +40,9 @@ def GetExtensionMetadata(target): stderr=subprocess.PIPE) security_posture, status, undocumented = r.stdout.decode('utf-8').strip().split(' ') if IsMissing(security_posture): - raise ExtensionDbError('Missing security posture for %s' % target) + raise ExtensionDbError( + 'Missing security posture for %s. Please make sure the target is an envoy_cc_extension and security_posture is set' + % target) return { 'security_posture': security_posture, 'undocumented': False if IsMissing(undocumented) else bool(undocumented), @@ -53,5 +55,11 @@ def GetExtensionMetadata(target): extension_db = {} for extension, target in extensions_build_config.EXTENSIONS.items(): extension_db[extension] = GetExtensionMetadata(target) + # The TLS and generic upstream extensions are hard-coded into the build, so + # not in source/extensions/extensions_build_config.bzl + extension_db['envoy.transport_sockets.tls'] = GetExtensionMetadata( + '//source/extensions/transport_sockets/tls:config') + extension_db['envoy.upstreams.http.generic'] = GetExtensionMetadata( + '//source/extensions/upstreams/http/generic:config') pathlib.Path(output_path).write_text(json.dumps(extension_db)) diff --git a/docs/protodoc_manifest.yaml b/docs/protodoc_manifest.yaml new file mode 100644 index 0000000000000..2e2afff3264da --- /dev/null +++ b/docs/protodoc_manifest.yaml @@ -0,0 +1,51 @@ +fields: + envoy.config.bootstrap.v3.Bootstrap.overload_manager: + edge_config: + example: + refresh_interval: 0.25s + resource_monitors: + - name: "envoy.resource_monitors.fixed_heap" + typed_config: + "@type": type.googleapis.com/envoy.config.resource_monitor.fixed_heap.v2alpha.FixedHeapConfig + max_heap_size_bytes: 1073741824 + actions: + - name: "envoy.overload_actions.shrink_heap" + triggers: + - name: "envoy.resource_monitors.fixed_heap" + threshold: + value: 0.90 + - name: "envoy.overload_actions.stop_accepting_requests" + triggers: + - name: "envoy.resource_monitors.fixed_heap" + threshold: + value: 0.95 + envoy.config.cluster.v3.Cluster.per_connection_buffer_limit_bytes: + edge_config: { example: 32768 } + envoy.config.cluster.v3.Cluster.http2_protocol_options: + edge_config: + example: + initial_stream_window_size: 65536 # 64 KiB + initial_connection_window_size: 1048576 # 1 MiB + envoy.config.listener.v3.Listener.per_connection_buffer_limit_bytes: + edge_config: { example: 32768 } + envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.common_http_protocol_options: + edge_config: + example: + idle_timeout: 900s # 15 mins + headers_with_underscores_action: REJECT_REQUEST + envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.http2_protocol_options: + edge_config: + example: + max_concurrent_streams: 100 + initial_stream_window_size: 65536 # 64 KiB + initial_connection_window_size: 1048576 # 1 MiB + envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_idle_timeout: + edge_config: + example: 300s # 5 mins + envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.request_timeout: + edge_config: + note: > + This timeout is not compatible with streaming requests. + example: 300s # 5 mins + envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.use_remote_address: + edge_config: { example: true } diff --git a/docs/root/_static/css/envoy.css b/docs/root/_static/css/envoy.css index bab090478464a..c65a71f052626 100644 --- a/docs/root/_static/css/envoy.css +++ b/docs/root/_static/css/envoy.css @@ -9,3 +9,8 @@ table.docutils div.line-block { margin-left: 0; } +/* Breaking long words */ +.wy-nav-content { + overflow-wrap: break-word; + max-width: 1000px; +} diff --git a/docs/root/_static/docker_compose_front_proxy.svg b/docs/root/_static/docker_compose_front_proxy.svg new file mode 100644 index 0000000000000..12dc03b13253b --- /dev/null +++ b/docs/root/_static/docker_compose_front_proxy.svg @@ -0,0 +1 @@ + diff --git a/docs/root/_static/docker_compose_v0.1.svg b/docs/root/_static/docker_compose_v0.1.svg deleted file mode 100644 index 55236771d5009..0000000000000 --- a/docs/root/_static/docker_compose_v0.1.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/docs/root/_static/lor-architecture.svg b/docs/root/_static/lor-architecture.svg new file mode 100644 index 0000000000000..7231fc674185c --- /dev/null +++ b/docs/root/_static/lor-architecture.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-client.svg b/docs/root/_static/lor-client.svg new file mode 100644 index 0000000000000..3542be34a2ef2 --- /dev/null +++ b/docs/root/_static/lor-client.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-filter-chain-match.svg b/docs/root/_static/lor-filter-chain-match.svg new file mode 100644 index 0000000000000..2f5d8ab1aad59 --- /dev/null +++ b/docs/root/_static/lor-filter-chain-match.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-http-decode.svg b/docs/root/_static/lor-http-decode.svg new file mode 100644 index 0000000000000..1ed02946fb7b7 --- /dev/null +++ b/docs/root/_static/lor-http-decode.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-http-encode.svg b/docs/root/_static/lor-http-encode.svg new file mode 100644 index 0000000000000..bac0d585d0664 --- /dev/null +++ b/docs/root/_static/lor-http-encode.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-http-filters.svg b/docs/root/_static/lor-http-filters.svg new file mode 100644 index 0000000000000..7d27798a1acc0 --- /dev/null +++ b/docs/root/_static/lor-http-filters.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-http.svg b/docs/root/_static/lor-http.svg new file mode 100644 index 0000000000000..d48e243ac46cc --- /dev/null +++ b/docs/root/_static/lor-http.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-lb.svg b/docs/root/_static/lor-lb.svg new file mode 100644 index 0000000000000..94733ffd19049 --- /dev/null +++ b/docs/root/_static/lor-lb.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-listener-filters.svg b/docs/root/_static/lor-listener-filters.svg new file mode 100644 index 0000000000000..61171a3ca423d --- /dev/null +++ b/docs/root/_static/lor-listener-filters.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-listeners.svg b/docs/root/_static/lor-listeners.svg new file mode 100644 index 0000000000000..ccff9c40fbf6f --- /dev/null +++ b/docs/root/_static/lor-listeners.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-network-filters.svg b/docs/root/_static/lor-network-filters.svg new file mode 100644 index 0000000000000..04aac073759ec --- /dev/null +++ b/docs/root/_static/lor-network-filters.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-network-read.svg b/docs/root/_static/lor-network-read.svg new file mode 100644 index 0000000000000..5fcfa5cc38db5 --- /dev/null +++ b/docs/root/_static/lor-network-read.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-network-write.svg b/docs/root/_static/lor-network-write.svg new file mode 100644 index 0000000000000..a719b6d58aef4 --- /dev/null +++ b/docs/root/_static/lor-network-write.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-route-config.svg b/docs/root/_static/lor-route-config.svg new file mode 100644 index 0000000000000..148113b59cc42 --- /dev/null +++ b/docs/root/_static/lor-route-config.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-topology-edge.svg b/docs/root/_static/lor-topology-edge.svg new file mode 100644 index 0000000000000..7ccc85858fc25 --- /dev/null +++ b/docs/root/_static/lor-topology-edge.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-topology-hybrid.svg b/docs/root/_static/lor-topology-hybrid.svg new file mode 100644 index 0000000000000..e210d8506854e --- /dev/null +++ b/docs/root/_static/lor-topology-hybrid.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-topology-ilb.svg b/docs/root/_static/lor-topology-ilb.svg new file mode 100644 index 0000000000000..e0112d94ef970 --- /dev/null +++ b/docs/root/_static/lor-topology-ilb.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-topology-service-mesh-node.svg b/docs/root/_static/lor-topology-service-mesh-node.svg new file mode 100644 index 0000000000000..7622445478e93 --- /dev/null +++ b/docs/root/_static/lor-topology-service-mesh-node.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-topology-service-mesh.svg b/docs/root/_static/lor-topology-service-mesh.svg new file mode 100644 index 0000000000000..e7446e68e4e18 --- /dev/null +++ b/docs/root/_static/lor-topology-service-mesh.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-topology-tiered.svg b/docs/root/_static/lor-topology-tiered.svg new file mode 100644 index 0000000000000..0a212d597d714 --- /dev/null +++ b/docs/root/_static/lor-topology-tiered.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-transport-socket.svg b/docs/root/_static/lor-transport-socket.svg new file mode 100644 index 0000000000000..b14f96209b0fc --- /dev/null +++ b/docs/root/_static/lor-transport-socket.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/service_to_service_egress_listener.svg b/docs/root/_static/service_to_service_egress_listener.svg new file mode 100644 index 0000000000000..ef0bbe70b8068 --- /dev/null +++ b/docs/root/_static/service_to_service_egress_listener.svg @@ -0,0 +1,237 @@ + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + :9901 + diff --git a/docs/root/_static/service_to_service_ingress_listener.svg b/docs/root/_static/service_to_service_ingress_listener.svg new file mode 100644 index 0000000000000..5b9109239ab0a --- /dev/null +++ b/docs/root/_static/service_to_service_ingress_listener.svg @@ -0,0 +1,309 @@ + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + :9221 + diff --git a/docs/root/api-v2/common_messages/common_messages.rst b/docs/root/api-v2/common_messages/common_messages.rst index 853c1604f8ccd..d2d2a0a8ac628 100644 --- a/docs/root/api-v2/common_messages/common_messages.rst +++ b/docs/root/api-v2/common_messages/common_messages.rst @@ -15,5 +15,6 @@ Common messages ../api/v2/core/grpc_method_list.proto ../api/v2/core/http_uri.proto ../api/v2/core/socket_option.proto - ../api/v2/auth/cert.proto + ../api/v2/auth/common.proto + ../api/v2/auth/secret.proto ../api/v2/ratelimit/ratelimit.proto diff --git a/docs/root/api-v2/config/config.rst b/docs/root/api-v2/config/config.rst index feaa9c5b0c9aa..8fe20069ebc7f 100644 --- a/docs/root/api-v2/config/config.rst +++ b/docs/root/api-v2/config/config.rst @@ -17,4 +17,3 @@ Extensions grpc_credential/grpc_credential retry/retry trace/trace - wasm/wasm diff --git a/docs/root/api-v2/config/transport_socket/transport_socket.rst b/docs/root/api-v2/config/transport_socket/transport_socket.rst index defee4f8cc815..f664acf6d29ec 100644 --- a/docs/root/api-v2/config/transport_socket/transport_socket.rst +++ b/docs/root/api-v2/config/transport_socket/transport_socket.rst @@ -7,3 +7,4 @@ Transport sockets */v2alpha/* */v2/* + ../../api/v2/auth/tls.proto diff --git a/docs/root/api-v3/common_messages/common_messages.rst b/docs/root/api-v3/common_messages/common_messages.rst index faea72f757d66..ceff6d6681ee9 100644 --- a/docs/root/api-v3/common_messages/common_messages.rst +++ b/docs/root/api-v3/common_messages/common_messages.rst @@ -6,14 +6,18 @@ Common messages :maxdepth: 2 ../config/core/v3/base.proto + ../config/core/v3/extension.proto ../config/core/v3/address.proto ../config/core/v3/backoff.proto ../config/core/v3/protocol.proto + ../config/core/v3/proxy_protocol.proto ../service/discovery/v3/discovery.proto ../config/core/v3/config_source.proto ../config/core/v3/grpc_service.proto ../config/core/v3/grpc_method_list.proto ../config/core/v3/http_uri.proto ../config/core/v3/socket_option.proto + ../config/core/v3/substitution_format_string.proto ../extensions/common/ratelimit/v3/ratelimit.proto ../extensions/filters/common/fault/v3/fault.proto + ../extensions/network/socket_interface/v3/default_socket_interface.proto diff --git a/docs/root/api-v3/config/common/common.rst b/docs/root/api-v3/config/common/common.rst index 5739dffe3676e..bb6965a5f1497 100644 --- a/docs/root/api-v3/config/common/common.rst +++ b/docs/root/api-v3/config/common/common.rst @@ -5,5 +5,6 @@ Common :glob: :maxdepth: 2 + matcher/v3/* ../../extensions/common/dynamic_forward_proxy/v3/* ../../extensions/common/tap/v3/* diff --git a/docs/root/api-v3/config/compression/compression.rst b/docs/root/api-v3/config/compression/compression.rst new file mode 100644 index 0000000000000..80aa0ba927ccb --- /dev/null +++ b/docs/root/api-v3/config/compression/compression.rst @@ -0,0 +1,8 @@ +Compression +=========== + +.. toctree:: + :glob: + :maxdepth: 2 + + ../../extensions/compression/gzip/*/v3/* diff --git a/docs/root/api-v3/config/config.rst b/docs/root/api-v3/config/config.rst index d7e6e6edd43c5..536bd94689794 100644 --- a/docs/root/api-v3/config/config.rst +++ b/docs/root/api-v3/config/config.rst @@ -12,7 +12,12 @@ Extensions transport_socket/transport_socket resource_monitor/resource_monitor common/common + compression/compression cluster/cluster grpc_credential/grpc_credential retry/retry trace/trace + internal_redirect/internal_redirect + endpoint/endpoint + upstream/upstream + wasm/wasm diff --git a/docs/root/api-v3/config/endpoint/endpoint.rst b/docs/root/api-v3/config/endpoint/endpoint.rst new file mode 100644 index 0000000000000..c1b64b1e46511 --- /dev/null +++ b/docs/root/api-v3/config/endpoint/endpoint.rst @@ -0,0 +1,8 @@ +Endpoint +======== + +.. toctree:: + :glob: + :maxdepth: 2 + + v3/* \ No newline at end of file diff --git a/docs/root/api-v3/config/filter/udp/udp.rst b/docs/root/api-v3/config/filter/udp/udp.rst index beaeaf857fc65..c430280ca06a9 100644 --- a/docs/root/api-v3/config/filter/udp/udp.rst +++ b/docs/root/api-v3/config/filter/udp/udp.rst @@ -5,5 +5,5 @@ UDP listener filters :glob: :maxdepth: 2 - */v2alpha/* - ../../../extensions/filter/udp/*/v3alpha/* + ../../../extensions/filters/udp/*/v3/* + ../../../extensions/filters/udp/*/v3alpha/* diff --git a/docs/root/api-v3/config/internal_redirect/internal_redirect.rst b/docs/root/api-v3/config/internal_redirect/internal_redirect.rst new file mode 100644 index 0000000000000..5452e8accee74 --- /dev/null +++ b/docs/root/api-v3/config/internal_redirect/internal_redirect.rst @@ -0,0 +1,8 @@ +Internal Redirect Predicates +============================ + +.. toctree:: + :glob: + :maxdepth: 2 + + ../../extensions/internal_redirect/** diff --git a/docs/root/api-v3/config/upstream/upstream.rst b/docs/root/api-v3/config/upstream/upstream.rst new file mode 100644 index 0000000000000..5047eaa92b289 --- /dev/null +++ b/docs/root/api-v3/config/upstream/upstream.rst @@ -0,0 +1,8 @@ +Upstream Configuration +====================== + +.. toctree:: + :glob: + :maxdepth: 3 + + ../../extensions/upstreams/http/*/v3/** diff --git a/docs/root/api-v2/config/wasm/wasm.rst b/docs/root/api-v3/config/wasm/wasm.rst similarity index 62% rename from docs/root/api-v2/config/wasm/wasm.rst rename to docs/root/api-v3/config/wasm/wasm.rst index 8ce884b18ba5e..efdb96212478f 100644 --- a/docs/root/api-v2/config/wasm/wasm.rst +++ b/docs/root/api-v3/config/wasm/wasm.rst @@ -5,4 +5,4 @@ WASM :glob: :maxdepth: 2 - v2alpha/* + ../../extensions/wasm/v3/* diff --git a/docs/root/api-v3/service/service.rst b/docs/root/api-v3/service/service.rst index 6ad5674d4bde4..d651856c678b7 100644 --- a/docs/root/api-v3/service/service.rst +++ b/docs/root/api-v3/service/service.rst @@ -6,6 +6,7 @@ Services :maxdepth: 2 accesslog/v3/* + load_stats/v3/* auth/v3/* health/v3/* metrics/v3/* @@ -15,3 +16,4 @@ Services tap/v3/* ../config/tap/v3/* trace/v3/* + extension/v3/* diff --git a/docs/root/api-v3/types/types.rst b/docs/root/api-v3/types/types.rst index f9c1cad3ea820..3e6af53865bde 100644 --- a/docs/root/api-v3/types/types.rst +++ b/docs/root/api-v3/types/types.rst @@ -10,6 +10,7 @@ Types ../type/v3/http_status.proto ../type/v3/percent.proto ../type/v3/range.proto + ../type/v3/ratelimit_unit.proto ../type/v3/semantic_version.proto ../type/v3/token_bucket.proto ../type/matcher/v3/metadata.proto diff --git a/docs/root/api/client_features.rst b/docs/root/api/client_features.rst index a233f7e7448de..4cd6594f0bdc1 100644 --- a/docs/root/api/client_features.rst +++ b/docs/root/api/client_features.rst @@ -10,6 +10,8 @@ Client features use reverse DNS naming scheme, for example `com.acme.feature`. Currently Defined Client Features --------------------------------- +.. It would be nice to use an RST ref here for service.load_stats.v2.LoadStatsResponse.send_all_clusters, but we can't due to https://github.com/envoyproxy/envoy/issues/3091. + - **envoy.config.require-any-fields-contain-struct**: This feature indicates that xDS client requires that the configuration entries of type *google.protobuf.Any* contain messages of type *udpa.type.v1.TypedStruct* only. @@ -18,3 +20,6 @@ Currently Defined Client Features :ref:`overprovisioning_factor` field. If graceful failover functionality is required, it must be supplied by the management server. +- **envoy.lrs.supports_send_all_clusters**: This feature indicates that the client supports + the *envoy_api_field_service.load_stats.v2.LoadStatsResponse.send_all_clusters* + field in the LRS response. diff --git a/docs/root/configuration/advanced/well_known_dynamic_metadata.rst b/docs/root/configuration/advanced/well_known_dynamic_metadata.rst index 73215617e46db..4d7f8ed3872c6 100644 --- a/docs/root/configuration/advanced/well_known_dynamic_metadata.rst +++ b/docs/root/configuration/advanced/well_known_dynamic_metadata.rst @@ -13,8 +13,38 @@ by looking at the operational metadata emitted by the MongoDB filter. The following Envoy filters emit dynamic metadata that other filters can leverage. +* :ref:`External Authorization Filter ` +* :ref:`External Authorization Network Filter ` * :ref:`Mongo Proxy Filter ` * :ref:`MySQL Proxy Filter ` +* :ref:`Postgres Proxy Filter ` * :ref:`Role Based Access Control (RBAC) Filter ` * :ref:`Role Based Access Control (RBAC) Network Filter ` * :ref:`ZooKeeper Proxy Filter ` + +The following Envoy filters can be configured to consume dynamic metadata emitted by other filters. + +* :ref:`External Authorization Filter via the metadata context namespaces + ` +* :ref:`RateLimit Filter limit override ` + +.. _shared_dynamic_metadata: + +Shared Dynamic Metadata +----------------------- +Dynamic metadata that is set by multiple filters is placed in the common key namespace `envoy.common`. Refer to the corresponding rules when setting this metadata. + +.. csv-table:: + :header: Name, Type, Description, Rules + :widths: 1, 1, 3, 3 + + access_log_hint, boolean, Whether access loggers should log the request., "When this metadata is already set: A `true` value should not be overwritten by a `false` value, while a `false` value can be overwritten by a `true` value." + +The following Envoy filters emit shared dynamic metadata. + +* :ref:`Role Based Access Control (RBAC) Filter ` +* :ref:`Role Based Access Control (RBAC) Network Filter ` + +The following filters consume shared dynamic metadata. + +* :ref:`Metadata Access Log Filter` diff --git a/docs/root/configuration/best_practices/edge.rst b/docs/root/configuration/best_practices/edge.rst index d70345971f01b..fc717a5f9235d 100644 --- a/docs/root/configuration/best_practices/edge.rst +++ b/docs/root/configuration/best_practices/edge.rst @@ -11,21 +11,24 @@ TCP proxies should configure: * restrict access to the admin endpoint, * :ref:`overload_manager `, -* :ref:`listener buffer limits ` to 32 KiB, -* :ref:`cluster buffer limits ` to 32 KiB. +* :ref:`listener buffer limits ` to 32 KiB, +* :ref:`cluster buffer limits ` to 32 KiB. HTTP proxies should additionally configure: -* :ref:`use_remote_address ` +* :ref:`use_remote_address ` to true (to avoid consuming HTTP headers from external clients, see :ref:`HTTP header sanitizing ` for details), * :ref:`connection and stream timeouts `, -* :ref:`HTTP/2 maximum concurrent streams limit ` to 100, -* :ref:`HTTP/2 initial stream window size limit ` to 64 KiB, -* :ref:`HTTP/2 initial connection window size limit ` to 1 MiB. -* :ref:`headers_with_underscores_action setting ` to REJECT_REQUEST, to protect upstream services that treat '_' and '-' as interchangeable. +* :ref:`HTTP/2 maximum concurrent streams limit ` to 100, +* :ref:`HTTP/2 initial stream window size limit ` to 64 KiB, +* :ref:`HTTP/2 initial connection window size limit ` to 1 MiB. +* :ref:`headers_with_underscores_action setting ` to REJECT_REQUEST, to protect upstream services that treat '_' and '-' as interchangeable. +* :ref:`Listener connection limits. ` +* :ref:`Global downstream connection limits `. -The following is a YAML example of the above recommendation. +The following is a YAML example of the above recommendation (taken from the :ref:`Google VRP +` edge server configuration): .. code-block:: yaml @@ -69,17 +72,20 @@ The following is a YAML example of the above recommendation. filter_chains: - filter_chain_match: server_names: ["example.com", "www.example.com"] - tls_context: - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "example_com_cert.pem" } - private_key: { filename: "example_com_key.pem" } + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "example_com_cert.pem" } + private_key: { filename: "example_com_key.pem" } # Uncomment if Envoy is behind a load balancer that exposes client IP address using the PROXY protocol. # use_proxy_proto: true filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http use_remote_address: true common_http_protocol_options: @@ -104,10 +110,27 @@ The following is a YAML example of the above recommendation. name: service_foo connect_timeout: 15s per_connection_buffer_limit_bytes: 32768 # 32 KiB - hosts: - socket_address: - address: 127.0.0.1 - port_value: 8080 + load_assignment: + cluster_name: some_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8080 http2_protocol_options: initial_stream_window_size: 65536 # 64 KiB initial_connection_window_size: 1048576 # 1 MiB + + layered_runtime: + layers: + - name: static_layer_0 + static_layer: + envoy: + resource_limits: + listener: + example_listener_name: + connection_limit: 10000 + overload: + global_downstream_max_connections: 50000 diff --git a/docs/root/configuration/best_practices/level_two.rst b/docs/root/configuration/best_practices/level_two.rst index a7a0b6e7e49d0..44c52ace8a8e2 100644 --- a/docs/root/configuration/best_practices/level_two.rst +++ b/docs/root/configuration/best_practices/level_two.rst @@ -5,14 +5,14 @@ Configuring Envoy as a level two proxy Envoy is a production-ready proxy, however, the default settings that are tailored for the edge use case may need to be adjusted when using Envoy in a multi-level deployment as a -"level two" HTTP/2 proxy. +"level two" proxy. .. image:: /_static/multilevel_deployment.svg **In summary, if you run level two Envoy version 1.11.1 or greater which terminates -HTTP/2, we strongly advise you to change the HTTP/2 configuration of your level +HTTP/2, we strongly advise you to change the HttpConnectionManager configuration of your level two Envoy, by setting its downstream** -:ref:`validation of HTTP/2 messaging option ` +:ref:`validation of HTTP messaging option ` **to true.** If there is an invalid HTTP/2 request and this option is not set, the Envoy in @@ -29,9 +29,7 @@ user has insight into what traffic will bypass level one checks, they could spra “bad” traffic across the level one fleet, causing serious disruption to other users’ traffic. -Please note that the -:ref:`validation of HTTP/2 messaging option ` -is planned to be deprecated and replaced with mandatory configuration in the HttpConnectionManager, to ensure -that what is now an easily overlooked option would need to be configured, ideally -appropriately for the given Envoy deployment. Please refer to the -https://github.com/envoyproxy/envoy/issues/9285 for more information. +This configuration option also has implications for invalid HTTP/1.1 though slightly less +severe ones. For Envoy L1s, invalid HTTP/1 requests will also result in connection +reset. If the option is set to true, and the request is completely read, the connection +will persist and can be reused for a subsequent request. diff --git a/docs/root/configuration/http/http_conn_man/header_casing.rst b/docs/root/configuration/http/http_conn_man/header_casing.rst index 7bdc116162847..e5476513810ec 100644 --- a/docs/root/configuration/http/http_conn_man/header_casing.rst +++ b/docs/root/configuration/http/http_conn_man/header_casing.rst @@ -7,5 +7,5 @@ existing systems that might rely on specific header casing. To support these use cases, Envoy allows configuring a formatting scheme for the headers, which will have Envoy transform the header keys during serialization. To configure this formatting on -response headers, specify the format in the :ref:`http_protocol_options `. -To configure this for upstream request headers, specify the formatting on the :ref:`Cluster `. +response headers, specify the format in the :ref:`http_protocol_options `. +To configure this for upstream request headers, specify the formatting on the :ref:`Cluster `. diff --git a/docs/root/configuration/http/http_conn_man/header_sanitizing.rst b/docs/root/configuration/http/http_conn_man/header_sanitizing.rst index db0a55f886fa3..47040620f7578 100644 --- a/docs/root/configuration/http/http_conn_man/header_sanitizing.rst +++ b/docs/root/configuration/http/http_conn_man/header_sanitizing.rst @@ -9,10 +9,10 @@ result in addition, removal, or modification. Ultimately, whether the request is or external is governed by the :ref:`x-forwarded-for ` header (please read the linked section carefully as how Envoy populates the header is complex and depends on the :ref:`use_remote_address -` +` setting). In addition, the :ref:`internal_address_config -` +` setting can be used to configure the internal/external determination. Envoy will potentially sanitize the following headers: diff --git a/docs/root/configuration/http/http_conn_man/headers.rst b/docs/root/configuration/http/http_conn_man/headers.rst index 44764759a3bc3..0ef3e630c5e05 100644 --- a/docs/root/configuration/http/http_conn_man/headers.rst +++ b/docs/root/configuration/http/http_conn_man/headers.rst @@ -15,7 +15,7 @@ user-agent ---------- The *user-agent* header may be set by the connection manager during decoding if the :ref:`add_user_agent -` option is +` option is enabled. The header is only modified if it is not already set. If the connection manager does set the header, the value is determined by the :option:`--service-cluster` command line option. @@ -25,7 +25,7 @@ server ------ The *server* header will be set during encoding to the value in the :ref:`server_name -` option. +` option. .. _config_http_conn_man_headers_x-client-trace-id: @@ -49,7 +49,7 @@ that in the current implementation, this should be considered a hint as it is se could be easily spoofed by any internal entity. In the future Envoy will support a mutual authentication TLS mesh which will make this header fully secure. Like *user-agent*, the value is determined by the :option:`--service-cluster` command line option. In order to enable this -feature you need to set the :ref:`user_agent ` option to true. +feature you need to set the :ref:`user_agent ` option to true. .. _config_http_conn_man_headers_downstream-service-node: @@ -108,7 +108,7 @@ The header used to override destination address when using the load balancing policy. It is ignored, unless the use of it is enabled via -:ref:`use_http_header `. +:ref:`use_http_header `. .. _config_http_conn_man_headers_x-forwarded-client-cert: @@ -149,9 +149,9 @@ Some examples of the XFCC header are: 3. For one client certificate with both URI type and DNS type Subject Alternative Name: ``x-forwarded-client-cert: By=http://frontend.lyft.com;Hash=468ed33be74eee6556d90c0149c1309e9ba61d6425303443c0748a02dd8de688;Subject="/C=US/ST=CA/L=San Francisco/OU=Lyft/CN=Test Client";URI=http://testclient.lyft.com;DNS=lyft.com;DNS=www.lyft.com`` How Envoy processes XFCC is specified by the -:ref:`forward_client_cert_details` +:ref:`forward_client_cert_details` and the -:ref:`set_current_client_cert_details` +:ref:`set_current_client_cert_details` HTTP connection manager options. If *forward_client_cert_details* is unset, the XFCC header will be sanitized by default. @@ -169,9 +169,9 @@ address of the nearest client to the XFF list before proxying the request. Some 3. ``x-forwarded-for: 50.0.0.1, 10.0.0.1`` (internal proxy hop) Envoy will only append to XFF if the :ref:`use_remote_address -` +` HTTP connection manager option is set to true and the :ref:`skip_xff_append -` +` is set false. This means that if *use_remote_address* is false (which is the default) or *skip_xff_append* is true, the connection manager operates in a transparent mode where it does not modify XFF. @@ -318,7 +318,7 @@ A few very important notes about XFF: * **NOTE**: If an internal service proxies an external request to another internal service, and includes the original XFF header, Envoy will append to it on egress if - :ref:`use_remote_address ` is set. This will cause + :ref:`use_remote_address ` is set. This will cause the other side to think the request is external. Generally, this is what is intended if XFF is being forwarded. If it is not intended, do not forward XFF, and forward :ref:`config_http_conn_man_headers_x-envoy-internal` instead. @@ -352,7 +352,7 @@ is out of scope for this documentation. If *x-request-id* is propagated across a following features are available: * Stable :ref:`access logging ` via the - :ref:`v2 API runtime filter`. + :ref:`v3 API runtime filter`. * Stable tracing when performing random sampling via the :ref:`tracing.random_sampling ` runtime setting or via forced tracing using the :ref:`config_http_conn_man_headers_x-envoy-force-trace` and @@ -467,13 +467,13 @@ Custom request/response headers Custom request/response headers can be added to a request/response at the weighted cluster, route, virtual host, and/or global route configuration level. See the -:ref:`v2 ` API documentation. +:ref:`v3 ` API documentation. No *:-prefixed* pseudo-header may be modified via this mechanism. The *:path* and *:authority* headers may instead be modified via mechanisms such as -:ref:`prefix_rewrite `, -:ref:`regex_rewrite `, and -:ref:`host_rewrite `. +:ref:`prefix_rewrite `, +:ref:`regex_rewrite `, and +:ref:`host_rewrite `. Headers are appended to requests/responses in the following order: weighted cluster level headers, route level headers, virtual host level headers and finally global level headers. @@ -496,7 +496,7 @@ Supported variable names are: .. note:: This may not be the physical remote address of the peer if the address has been inferred from - :ref:`proxy proto ` or :ref:`x-forwarded-for + :ref:`proxy proto ` or :ref:`x-forwarded-for `. %DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% @@ -571,6 +571,12 @@ Supported variable names are: TCP The hex-encoded SHA256 fingerprint of the client certificate used to establish the downstream TLS connection. +%DOWNSTREAM_PEER_FINGERPRINT_1% + HTTP + The hex-encoded SHA1 fingerprint of the client certificate used to establish the downstream TLS connection. + TCP + The hex-encoded SHA1 fingerprint of the client certificate used to establish the downstream TLS connection. + %DOWNSTREAM_PEER_SERIAL% HTTP The serial number of the client certificate used to establish the downstream TLS connection. @@ -603,7 +609,7 @@ Supported variable names are: :ref:`x-forwarded-proto ` request header. %UPSTREAM_METADATA(["namespace", "key", ...])% - Populates the header with :ref:`EDS endpoint metadata ` from the + Populates the header with :ref:`EDS endpoint metadata ` from the upstream host selected by the router. Metadata may be selected from any namespace. In general, metadata values may be strings, numbers, booleans, lists, nested structures, or null. Upstream metadata values may be selected from nested structs by specifying multiple keys. Otherwise, @@ -644,3 +650,11 @@ Supported variable names are: key: "x-request-start" value: "%START_TIME(%s.%3f)%" append: true + +%RESPONSE_FLAGS% + Additional details about the response or connection, if any. Possible values and their meanings + are listed in the access log formatter :ref:`documentation`. + +%RESPONSE_CODE_DETAILS% + Response code details provides additional information about the HTTP response code, such as + who set it (the upstream or envoy) and why. \ No newline at end of file diff --git a/docs/root/configuration/http/http_conn_man/http_conn_man.rst b/docs/root/configuration/http/http_conn_man/http_conn_man.rst index f75ebacae09de..a726c3983a7aa 100644 --- a/docs/root/configuration/http/http_conn_man/http_conn_man.rst +++ b/docs/root/configuration/http/http_conn_man/http_conn_man.rst @@ -12,6 +12,7 @@ HTTP connection manager header_casing headers header_sanitizing + local_reply stats runtime rds diff --git a/docs/root/configuration/http/http_conn_man/local_reply.rst b/docs/root/configuration/http/http_conn_man/local_reply.rst new file mode 100644 index 0000000000000..5b87d9e3ef5ca --- /dev/null +++ b/docs/root/configuration/http/http_conn_man/local_reply.rst @@ -0,0 +1,78 @@ +.. _config_http_conn_man_local_reply: + +Local reply modification +======================== + +The :ref:`HTTP connection manager ` supports modification of local reply which is response returned by Envoy itself. + +Features: + +* :ref:`Local reply content modification`. +* :ref:`Local reply format modification`. + +.. _config_http_conn_man_local_reply_modification: + +Local reply content modification +-------------------------------- + +The local response content returned by Envoy can be customized. A list of :ref:`mappers ` can be specified. Each mapper must have a :ref:`filter `. It may have following rewrite rules; a :ref:`status_code ` rule to rewrite response code, a :ref:`headers_to_add ` rule to add/override/append response HTTP headers, a :ref:`body ` rule to rewrite the local reply body and a :ref:`body_format_override ` to specify the response body format. Envoy checks each `mapper` according to the specified order until the first one is matched. If a `mapper` is matched, all its rewrite rules will apply. + +Example of a LocalReplyConfig + +.. code-block:: + + mappers: + - filter: + status_code_filter: + comparison: + op: EQ + value: + default_value: 400 + runtime_key: key_b + headers_to_add: + - header: + key: "foo" + value: "bar" + append: false + status_code: 401 + body: + inline_string: "not allowed" + +In above example, if the status_code is 400, it will be rewritten to 401, the response body will be rewritten to as "not allowed". + +.. _config_http_conn_man_local_reply_format: + +Local reply format modification +------------------------------- + +The response body content type can be customized. If not specified, the content type is plain/text. There are two `body_format` fields; one is the :ref:`body_format ` field in the :ref:`LocalReplyConfig ` message and the other :ref:`body_format_override ` field in the `mapper`. The latter is only used when its mapper is matched. The former is used if there is no any matched mappers, or the matched mapper doesn't have the `body_format` specified. + +Local reply format can be specified as :ref:`SubstitutionFormatString `. It supports :ref:`text_format ` and :ref:`json_format `. + +Example of a LocalReplyConfig with `body_format` field. + +.. code-block:: + + mappers: + - filter: + status_code_filter: + comparison: + op: EQ + value: + default_value: 400 + runtime_key: key_b + status_code: 401 + body_format_override: + text_format: "%LOCAL_REPLY_BODY% %REQ(:path)%" + - filter: + status_code_filter: + comparison: + op: EQ + value: + default_value: 500 + runtime_key: key_b + status_code: 501 + body_format: + text_format: "%LOCAL_REPLY_BODY% %RESPONSE_CODE%" + +In above example, there is a `body_format_override` inside the first `mapper` with a filter matching `status_code == 400`. It generates the response body in plain text format by concatenating %LOCAL_REPLY_BODY% with the `:path` request header. It is only used when the first mapper is matched. There is a `body_format` at the bottom of the config and at the same level as field `mappers`. It is used when non of the mappers is matched or the matched mapper doesn't have its own `body_format_override` specified. diff --git a/docs/root/configuration/http/http_conn_man/overview.rst b/docs/root/configuration/http/http_conn_man/overview.rst index dbb8fbc8c46d7..280008b4f77bc 100644 --- a/docs/root/configuration/http/http_conn_man/overview.rst +++ b/docs/root/configuration/http/http_conn_man/overview.rst @@ -3,5 +3,5 @@ Overview * HTTP connection manager :ref:`architecture overview ` * HTTP protocols :ref:`architecture overview ` -* :ref:`v2 API reference - ` +* :ref:`v3 API reference + ` diff --git a/docs/root/configuration/http/http_conn_man/rds.rst b/docs/root/configuration/http/http_conn_man/rds.rst index 516d0832868df..11f8e367a04dd 100644 --- a/docs/root/configuration/http/http_conn_man/rds.rst +++ b/docs/root/configuration/http/http_conn_man/rds.rst @@ -4,7 +4,7 @@ Route discovery service (RDS) ============================= The route discovery service (RDS) API is an optional API that Envoy will call to dynamically fetch -:ref:`route configurations `. A route configuration includes both +:ref:`route configurations `. A route configuration includes both HTTP header modifications, virtual hosts, and the individual route entries contained within each virtual host. Each :ref:`HTTP connection manager filter ` can independently fetch its own route configuration via the API. Optionally, the diff --git a/docs/root/configuration/http/http_conn_man/route_matching.rst b/docs/root/configuration/http/http_conn_man/route_matching.rst index 5f425fb31d8ee..9cd71e2b2bbb1 100644 --- a/docs/root/configuration/http/http_conn_man/route_matching.rst +++ b/docs/root/configuration/http/http_conn_man/route_matching.rst @@ -6,9 +6,9 @@ Route matching When Envoy matches a route, it uses the following procedure: #. The HTTP request's *host* or *:authority* header is matched to a :ref:`virtual host - `. -#. Each :ref:`route entry ` in the virtual host is checked, + `. +#. Each :ref:`route entry ` in the virtual host is checked, *in order*. If there is a match, the route is used and no further route checks are made. -#. Independently, each :ref:`virtual cluster ` in the +#. Independently, each :ref:`virtual cluster ` in the virtual host is checked, *in order*. If there is a match, the virtual cluster is used and no further virtual cluster checks are made. diff --git a/docs/root/configuration/http/http_conn_man/runtime.rst b/docs/root/configuration/http/http_conn_man/runtime.rst index dcc85412c6315..2c104c8065080 100644 --- a/docs/root/configuration/http/http_conn_man/runtime.rst +++ b/docs/root/configuration/http/http_conn_man/runtime.rst @@ -9,7 +9,7 @@ The HTTP connection manager supports the following runtime settings: http_connection_manager.normalize_path % of requests that will have path normalization applied if not already configured in - :ref:`normalize_path `. + :ref:`normalize_path `. This is evaluated at configuration load time and will apply to all requests for a given configuration. diff --git a/docs/root/configuration/http/http_conn_man/stats.rst b/docs/root/configuration/http/http_conn_man/stats.rst index d68abc8ce8e64..b8d4bf23591f8 100644 --- a/docs/root/configuration/http/http_conn_man/stats.rst +++ b/docs/root/configuration/http/http_conn_man/stats.rst @@ -111,10 +111,10 @@ All http1 statistics are rooted at *http1.* :header: Name, Type, Description :widths: 1, 1, 2 - dropped_headers_with_underscores, Counter, Total number of dropped headers with names containing underscores. This action is configured by setting the :ref:`headers_with_underscores_action config setting `. + dropped_headers_with_underscores, Counter, Total number of dropped headers with names containing underscores. This action is configured by setting the :ref:`headers_with_underscores_action config setting `. metadata_not_supported_error, Counter, Total number of metadata dropped during HTTP/1 encoding response_flood, Counter, Total number of connections closed due to response flooding - requests_rejected_with_underscores_in_headers, Counter, Total numbers of rejected requests due to header names containing underscores. This action is configured by setting the :ref:`headers_with_underscores_action config setting `. + requests_rejected_with_underscores_in_headers, Counter, Total numbers of rejected requests due to header names containing underscores. This action is configured by setting the :ref:`headers_with_underscores_action config setting `. Http2 codec statistics ~~~~~~~~~~~~~~~~~~~~~~ @@ -125,20 +125,28 @@ All http2 statistics are rooted at *http2.* :header: Name, Type, Description :widths: 1, 1, 2 - dropped_headers_with_underscores, Counter, Total number of dropped headers with names containing underscores. This action is configured by setting the :ref:`headers_with_underscores_action config setting `. - header_overflow, Counter, Total number of connections reset due to the headers being larger than the :ref:`configured value `. + dropped_headers_with_underscores, Counter, Total number of dropped headers with names containing underscores. This action is configured by setting the :ref:`headers_with_underscores_action config setting `. + header_overflow, Counter, Total number of connections reset due to the headers being larger than the :ref:`configured value `. headers_cb_no_stream, Counter, Total number of errors where a header callback is called without an associated stream. This tracks an unexpected occurrence due to an as yet undiagnosed bug - inbound_empty_frames_flood, Counter, Total number of connections terminated for exceeding the limit on consecutive inbound frames with an empty payload and no end stream flag. The limit is configured by setting the :ref:`max_consecutive_inbound_frames_with_empty_payload config setting `. - inbound_priority_frames_flood, Counter, Total number of connections terminated for exceeding the limit on inbound frames of type PRIORITY. The limit is configured by setting the :ref:`max_inbound_priority_frames_per_stream config setting `. - inbound_window_update_frames_flood, Counter, Total number of connections terminated for exceeding the limit on inbound frames of type WINDOW_UPDATE. The limit is configured by setting the :ref:`max_inbound_window_updateframes_per_data_frame_sent config setting `. - outbound_flood, Counter, Total number of connections terminated for exceeding the limit on outbound frames of all types. The limit is configured by setting the :ref:`max_outbound_frames config setting `. - outbound_control_flood, Counter, "Total number of connections terminated for exceeding the limit on outbound frames of types PING, SETTINGS and RST_STREAM. The limit is configured by setting the :ref:`max_outbound_control_frames config setting `." - requests_rejected_with_underscores_in_headers, Counter, Total numbers of rejected requests due to header names containing underscores. This action is configured by setting the :ref:`headers_with_underscores_action config setting `. + inbound_empty_frames_flood, Counter, Total number of connections terminated for exceeding the limit on consecutive inbound frames with an empty payload and no end stream flag. The limit is configured by setting the :ref:`max_consecutive_inbound_frames_with_empty_payload config setting `. + inbound_priority_frames_flood, Counter, Total number of connections terminated for exceeding the limit on inbound frames of type PRIORITY. The limit is configured by setting the :ref:`max_inbound_priority_frames_per_stream config setting `. + inbound_window_update_frames_flood, Counter, Total number of connections terminated for exceeding the limit on inbound frames of type WINDOW_UPDATE. The limit is configured by setting the :ref:`max_inbound_window_updateframes_per_data_frame_sent config setting `. + outbound_flood, Counter, Total number of connections terminated for exceeding the limit on outbound frames of all types. The limit is configured by setting the :ref:`max_outbound_frames config setting `. + outbound_control_flood, Counter, "Total number of connections terminated for exceeding the limit on outbound frames of types PING, SETTINGS and RST_STREAM. The limit is configured by setting the :ref:`max_outbound_control_frames config setting `." + requests_rejected_with_underscores_in_headers, Counter, Total numbers of rejected requests due to header names containing underscores. This action is configured by setting the :ref:`headers_with_underscores_action config setting `. rx_messaging_error, Counter, Total number of invalid received frames that violated `section 8 `_ of the HTTP/2 spec. This will result in a *tx_reset* rx_reset, Counter, Total number of reset stream frames received by Envoy - too_many_header_frames, Counter, Total number of times an HTTP2 connection is reset due to receiving too many headers frames. Envoy currently supports proxying at most one header frame for 100-Continue one non-100 response code header frame and one frame with trailers trailers, Counter, Total number of trailers seen on requests coming from downstream + tx_flush_timeout, Counter, Total number of :ref:`stream idle timeouts ` waiting for open stream window to flush the remainder of a stream tx_reset, Counter, Total number of reset stream frames transmitted by Envoy + streams_active, Gauge, Active streams as observed by the codec + pending_send_bytes, Gauge, Currently buffered body data in bytes waiting to be written when stream/connection window is opened. + +.. attention:: + + The HTTP/2 `streams_active` gauge may be greater than the HTTP connection manager + `downstream_rq_active` gauge due to differences in stream accounting between the codec and the + HTTP connection manager. Tracing statistics ------------------ diff --git a/docs/root/configuration/http/http_conn_man/traffic_splitting.rst b/docs/root/configuration/http/http_conn_man/traffic_splitting.rst index bfbe0c1919865..eab4577f53016 100644 --- a/docs/root/configuration/http/http_conn_man/traffic_splitting.rst +++ b/docs/root/configuration/http/http_conn_man/traffic_splitting.rst @@ -26,7 +26,7 @@ section describes this scenario in more detail. Traffic shifting between two upstreams -------------------------------------- -The :ref:`runtime ` object +The :ref:`runtime ` object in the route configuration determines the probability of selecting a particular route (and hence its cluster). By using the *runtime_fraction* configuration, traffic to a particular route in a virtual host can be @@ -59,7 +59,7 @@ envoy configuration file. Envoy matches routes with a :ref:`first match ` policy. If the route has a runtime_fraction object, the request will be additionally matched based on the runtime_fraction -:ref:`value ` +:ref:`value ` (or the default, if no value is specified). Thus, by placing routes back-to-back in the above example and specifying a runtime_fraction object in the first route, traffic shifting can be accomplished by changing the runtime_fraction @@ -93,8 +93,8 @@ v3) instead of two. To split traffic evenly across the three versions specify the weight for each upstream cluster. Unlike the previous example, a **single** :ref:`route -` entry is sufficient. The -:ref:`weighted_clusters ` +` entry is sufficient. The +:ref:`weighted_clusters ` configuration block in a route can be used to specify multiple upstream clusters along with weights that indicate the **percentage** of traffic to be sent to each upstream cluster. @@ -120,7 +120,7 @@ to each upstream cluster. By default, the weights must sum to exactly 100. In the V2 API, the -:ref:`total weight ` defaults to 100, but can +:ref:`total weight ` defaults to 100, but can be modified to allow finer granularity. The weights assigned to each cluster can be dynamically adjusted using the diff --git a/docs/root/configuration/http/http_conn_man/vhds.rst b/docs/root/configuration/http/http_conn_man/vhds.rst index 73d9a14f0ec90..f9bcdf517bec2 100644 --- a/docs/root/configuration/http/http_conn_man/vhds.rst +++ b/docs/root/configuration/http/http_conn_man/vhds.rst @@ -4,7 +4,7 @@ Virtual Host Discovery Service (VHDS) ===================================== The virtual host discovery service (VHDS) API is an optional API that Envoy will call to -dynamically fetch :ref:`virtual hosts `. A virtual host includes +dynamically fetch :ref:`virtual hosts `. A virtual host includes a name and set of domains that get routed to it based on the incoming request's host header. By default in RDS, all routes for a cluster are sent to every Envoy instance in the mesh. This @@ -32,20 +32,20 @@ a route configuration name can. Subscribing to Resources ^^^^^^^^^^^^^^^^^^^^^^^^ VHDS allows resources to be :ref:`subscribed ` to using a -:ref:`DeltaDiscoveryRequest ` with the -:ref:`type_url ` set to -`type.googleapis.com/envoy.api.v2.route.VirtualHost` -and :ref:`resource_names_subscribe ` +:ref:`DeltaDiscoveryRequest ` with the +:ref:`type_url ` set to +`type.googleapis.com/envoy.config.route.v3.VirtualHost` +and :ref:`resource_names_subscribe ` set to a list of virtual host resource names for which it would like configuration. If a route for the contents of a host/authority header cannot be resolved, the active stream is paused while a -:ref:`DeltaDiscoveryRequest ` is sent. -When a :ref:`DeltaDiscoveryResponse ` is received where one of -the :ref:`aliases ` or the -:ref:`name ` in the response exactly matches the -:ref:`resource_names_subscribe ` -entry from the :ref:`DeltaDiscoveryRequest `, the route +:ref:`DeltaDiscoveryRequest ` is sent. +When a :ref:`DeltaDiscoveryResponse ` is received where one of +the :ref:`aliases ` or the +:ref:`name ` in the response exactly matches the +:ref:`resource_names_subscribe ` +entry from the :ref:`DeltaDiscoveryRequest `, the route configuration is updated, the stream is resumed, and processing of the filter chain continues. Updates to virtual hosts occur in two ways. If a virtual host was originally sent over RDS, then the @@ -53,19 +53,19 @@ virtual host should be updated over RDS. If a virtual host was subscribed to ove will take place over VHDS. When a route configuration entry is updated, if the -:ref:`vhds field ` has changed, the virtual host table for +:ref:`vhds field ` has changed, the virtual host table for that route configuration is cleared, which will require that all virtual hosts be sent again. Compatibility with Scoped RDS ----------------------------- VHDS shouldn't present any compatibility issues with -:ref:`scoped RDS `. +:ref:`scoped RDS `. Route configuration names can still be used for virtual host matching, but with scoped RDS configured it would point to a scoped route configuration. However, it is important to note that using -on-demand :ref:`scoped RDS ` +on-demand :ref:`scoped RDS ` and VHDS together will require two on-demand subscriptions per routing scope. diff --git a/docs/root/configuration/http/http_filters/adaptive_concurrency_filter.rst b/docs/root/configuration/http/http_filters/adaptive_concurrency_filter.rst index f5f1467a738e0..19c7d0c588721 100644 --- a/docs/root/configuration/http/http_filters/adaptive_concurrency_filter.rst +++ b/docs/root/configuration/http/http_filters/adaptive_concurrency_filter.rst @@ -9,7 +9,7 @@ Adaptive Concurrency This filter should be configured with the name `envoy.filters.http.adaptive_concurrency`. -See the :ref:`v2 API reference ` for details on each configuration parameter. +See the :ref:`v3 API reference ` for details on each configuration parameter. Overview -------- @@ -28,7 +28,7 @@ Gradient Controller The gradient controller makes forwarding decisions based on a periodically measured ideal round-trip time (minRTT) for an upstream. -:ref:`v2 API reference ` +:ref:`v3 API reference ` Calculating the minRTT ^^^^^^^^^^^^^^^^^^^^^^ @@ -73,7 +73,7 @@ Notice that *B*, the buffer value added to the minRTT, allows for normal varianc latencies by requiring the sampled latencies the exceed the minRTT by some configurable threshold before decreasing the gradient value. -The buffer will be a percentage of the measured minRTT value whose value is modified via the buffer field in the :ref:`minRTT calculation parameters `. The buffer is calculated as follows: +The buffer will be a percentage of the measured minRTT value whose value is modified via the buffer field in the :ref:`minRTT calculation parameters `. The buffer is calculated as follows: .. math:: @@ -118,7 +118,7 @@ fields can be overridden via runtime settings. name: envoy.filters.http.adaptive_concurrency typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.adaptive_concurrency.v2alpha.AdaptiveConcurrency + "@type": type.googleapis.com/envoy.extensions.filters.http.adaptive_concurrency.v3.AdaptiveConcurrency gradient_controller_config: sample_aggregate_percentile: value: 90 @@ -191,7 +191,7 @@ Statistics ---------- The adaptive concurrency filter outputs statistics in the *http..adaptive_concurrency.* namespace. The :ref:`stat prefix -` +` comes from the owning HTTP connection manager. Statistics are specific to the concurrency controllers. diff --git a/docs/root/configuration/http/http_filters/aws_lambda_filter.rst b/docs/root/configuration/http/http_filters/aws_lambda_filter.rst index 28bfe645ea935..d281de9b0ab7c 100644 --- a/docs/root/configuration/http/http_filters/aws_lambda_filter.rst +++ b/docs/root/configuration/http/http_filters/aws_lambda_filter.rst @@ -4,7 +4,7 @@ AWS Lambda ========== -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.aws_lambda*. .. attention:: @@ -15,11 +15,11 @@ The HTTP AWS Lambda filter is used to trigger an AWS Lambda function from a stan It supports a few options to control whether to pass through the HTTP request payload as is or to wrap it in a JSON schema. -If :ref:`payload_passthrough ` is set to +If :ref:`payload_passthrough ` is set to ``true``, then the payload is sent to Lambda without any transformations. *Note*: This means you lose access to all the HTTP headers in the Lambda function. -However, if :ref:`payload_passthrough ` +However, if :ref:`payload_passthrough ` is set to ``false``, then the HTTP request is transformed to a JSON payload with the following schema: .. code-block:: @@ -81,7 +81,7 @@ On the other end, the response of the Lambda function must conform to the follow .. _regional Lambda endpoint: https://docs.aws.amazon.com/general/latest/gr/lambda-service.html The filter supports :ref:`per-filter configuration -`. +`. If you use the per-filter configuration, the target cluster _must_ have the following metadata: @@ -132,7 +132,7 @@ in us-west-2: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext sni: "*.amazonaws.com" @@ -179,7 +179,7 @@ An example with the Lambda metadata applied to a weighted-cluster: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext sni: "*.amazonaws.com" diff --git a/docs/root/configuration/http/http_filters/aws_request_signing_filter.rst b/docs/root/configuration/http/http_filters/aws_request_signing_filter.rst index 4c9e097b879fd..0280a012a05dc 100644 --- a/docs/root/configuration/http/http_filters/aws_request_signing_filter.rst +++ b/docs/root/configuration/http/http_filters/aws_request_signing_filter.rst @@ -4,7 +4,7 @@ AWS Request Signing =================== -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.aws_request_signing*. .. attention:: @@ -24,7 +24,7 @@ Example filter configuration: name: envoy.filters.http.aws_request_signing typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.aws_request_signing.v2alpha.AwsRequestSigning + "@type": type.googleapis.com/envoy.extensions.filters.http.aws_request_signing.v3.AwsRequestSigning service_name: s3 region: us-west-2 @@ -33,7 +33,7 @@ Statistics ---------- The AWS request signing filter outputs statistics in the *http..aws_request_signing.* namespace. The -:ref:`stat prefix ` +:ref:`stat prefix ` comes from the owning HTTP connection manager. .. csv-table:: diff --git a/docs/root/configuration/http/http_filters/buffer_filter.rst b/docs/root/configuration/http/http_filters/buffer_filter.rst index d1e55ac3138ed..d2665c58c1372 100644 --- a/docs/root/configuration/http/http_filters/buffer_filter.rst +++ b/docs/root/configuration/http/http_filters/buffer_filter.rst @@ -11,12 +11,12 @@ If enabled the buffer filter populates content-length header if it is not presen already. The behavior can be disabled using the runtime feature `envoy.reloadable_features.buffer_filter_populate_content_length`. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.buffer*. Per-Route Configuration ----------------------- The buffer filter configuration can be overridden or disabled on a per-route basis by providing a -:ref:`BufferPerRoute ` configuration on +:ref:`BufferPerRoute ` configuration on the virtual host, route, or weighted cluster. diff --git a/docs/root/configuration/http/http_filters/compressor_filter.rst b/docs/root/configuration/http/http_filters/compressor_filter.rst new file mode 100644 index 0000000000000..862af5304065b --- /dev/null +++ b/docs/root/configuration/http/http_filters/compressor_filter.rst @@ -0,0 +1,115 @@ +.. _config_http_filters_compressor: + +Compressor +========== +Compressor is an HTTP filter which enables Envoy to compress dispatched data +from an upstream service upon client request. Compression is useful in +situations when bandwidth is scarce and large payloads can be effectively compressed +at the expense of higher CPU load or offloading it to a compression accelerator. + +.. note:: + + This filter deprecates the :ref:`HTTP Gzip filter `. + +Configuration +------------- +* :ref:`v3 API reference ` +* This filter should be configured with the name *envoy.filters.http.compressor*. + +How it works +------------ +When compressor filter is enabled, request and response headers are inspected to +determine whether or not the content should be compressed. The content is +compressed and then sent to the client with the appropriate headers, if +response and request allow. + +Currently the filter supports :ref:`gzip compression ` +only. Other compression libraries can be supported as extensions. + +An example configuration of the filter may look like the following: + +.. code-block:: yaml + + http_filters: + - name: envoy.filters.http.compressor + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor + disable_on_etag_header: true + content_length: 100 + content_type: + - text/html + - application/json + compressor_library: + name: text_optimized + typed_config: + "@type": type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip + memory_level: 3 + window_bits: 10 + compression_level: best_compression + compression_strategy: default_strategy + +By *default* compression will be *skipped* when: + +- A request does NOT contain *accept-encoding* header. +- A request includes *accept-encoding* header, but it does not contain "gzip" or "\*". +- A request includes *accept-encoding* with "gzip" or "\*" with the weight "q=0". Note + that the "gzip" will have a higher weight then "\*". For example, if *accept-encoding* + is "gzip;q=0,\*;q=1", the filter will not compress. But if the header is set to + "\*;q=0,gzip;q=1", the filter will compress. +- A request whose *accept-encoding* header includes any encoding type with a higher + weight than "gzip"'s given the corresponding compression filter is present in the chain. +- A response contains a *content-encoding* header. +- A response contains a *cache-control* header whose value includes "no-transform". +- A response contains a *transfer-encoding* header whose value includes "gzip". +- A response does not contain a *content-type* value that matches one of the selected + mime-types, which default to *application/javascript*, *application/json*, + *application/xhtml+xml*, *image/svg+xml*, *text/css*, *text/html*, *text/plain*, + *text/xml*. +- Neither *content-length* nor *transfer-encoding* headers are present in + the response. +- Response size is smaller than 30 bytes (only applicable when *transfer-encoding* + is not chunked). + +Please note that in case the filter is configured to use a compression library extension +other than gzip it looks for content encoding in the *accept-encoding* header provided by +the extension. + +When compression is *applied*: + +- The *content-length* is removed from response headers. +- Response headers contain "*transfer-encoding: chunked*" and do not contain + "*content-encoding*" header. +- The "*vary: accept-encoding*" header is inserted on every response. + +Also the "*vary: accept-encoding*" header may be inserted even if compression is *not* +applied due to incompatible "*accept-encoding*" header in a request. This happens +when the requested resource still can be compressed given compatible "*accept-encoding*". +Otherwise, if an uncompressed response is cached by a caching proxy in front of Envoy, +the proxy won't know to fetch a new incoming request with compatible "*accept-encoding*" +from upstream. + +.. _compressor-statistics: + +Statistics +---------- + +Every configured Compressor filter has statistics rooted at +.compressor...* +with the following: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + compressed, Counter, Number of requests compressed. + not_compressed, Counter, Number of requests not compressed. + no_accept_header, Counter, Number of requests with no accept header sent. + header_identity, Counter, Number of requests sent with "identity" set as the *accept-encoding*. + header_compressor_used, Counter, Number of requests sent with "gzip" set as the *accept-encoding*. + header_compressor_overshadowed, Counter, Number of requests skipped by this filter instance because they were handled by another filter in the same filter chain. + header_wildcard, Counter, Number of requests sent with "\*" set as the *accept-encoding*. + header_not_valid, Counter, Number of requests sent with a not valid *accept-encoding* header (aka "q=0" or an unsupported encoding type). + total_uncompressed_bytes, Counter, The total uncompressed bytes of all the requests that were marked for compression. + total_compressed_bytes, Counter, The total compressed bytes of all the requests that were marked for compression. + content_length_too_small, Counter, Number of requests that accepted gzip encoding but did not compress because the payload was too small. + not_compressed_etag, Counter, Number of requests that were not compressed due to the etag header. *disable_on_etag_header* must be turned on for this to happen. diff --git a/docs/root/configuration/http/http_filters/cors_filter.rst b/docs/root/configuration/http/http_filters/cors_filter.rst index d51cbd923b54b..f7109ef6eaa91 100644 --- a/docs/root/configuration/http/http_filters/cors_filter.rst +++ b/docs/root/configuration/http/http_filters/cors_filter.rst @@ -8,7 +8,7 @@ For the meaning of the headers please refer to the pages below. * https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS * https://www.w3.org/TR/cors/ -* :ref:`v2 API reference ` +* :ref:`v2 API reference ` * This filter should be configured with the name *envoy.filters.http.cors*. .. _cors-runtime: @@ -16,12 +16,12 @@ For the meaning of the headers please refer to the pages below. Runtime ------- The fraction of requests for which the filter is enabled can be configured via the :ref:`runtime_key -` value of the :ref:`filter_enabled -` field. +` value of the :ref:`filter_enabled +` field. The fraction of requests for which the filter is enabled in shadow-only mode can be configured via -the :ref:`runtime_key ` value of the -:ref:`shadow_enabled ` field. When enabled in +the :ref:`runtime_key ` value of the +:ref:`shadow_enabled ` field. When enabled in shadow-only mode, the filter will evaluate the request's *Origin* to determine if it's valid but will not enforce any policies. diff --git a/docs/root/configuration/http/http_filters/csrf_filter.rst b/docs/root/configuration/http/http_filters/csrf_filter.rst index fb8c770d03dab..0295b5f9fa59c 100644 --- a/docs/root/configuration/http/http_filters/csrf_filter.rst +++ b/docs/root/configuration/http/http_filters/csrf_filter.rst @@ -41,7 +41,7 @@ For more information on CSRF please refer to the pages below. * https://www.owasp.org/index.php/Cross-Site_Request_Forgery_%28CSRF%29 * https://seclab.stanford.edu/websec/csrf/csrf.pdf -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` .. note:: @@ -57,7 +57,7 @@ valid. The reason it is able to do this while still mitigating cross-site reques forgery attempts is because the target origin has already been reached by the time front-envoy is applying the filter. This means that while endpoints may support cross-origin requests they are still protected from malicious third-parties who -have not been whitelisted. +have not been allowlisted. It's important to note that requests should generally originate from the same origin as the target but there are use cases where that may not be possible. @@ -76,12 +76,12 @@ Runtime ------- The fraction of requests for which the filter is enabled can be configured via the :ref:`runtime_key -` value of the :ref:`filter_enabled -` field. +` value of the :ref:`filter_enabled +` field. The fraction of requests for which the filter is enabled in shadow-only mode can be configured via -the :ref:`runtime_key ` value of the -:ref:`shadow_enabled ` field. +the :ref:`runtime_key ` value of the +:ref:`shadow_enabled ` field. When enabled in shadow-only mode, the filter will evaluate the request's *Origin* and *Destination* to determine if it's valid but will not enforce any policies. diff --git a/docs/root/configuration/http/http_filters/decompressor_filter.rst b/docs/root/configuration/http/http_filters/decompressor_filter.rst new file mode 100644 index 0000000000000..eb71d1c2df666 --- /dev/null +++ b/docs/root/configuration/http/http_filters/decompressor_filter.rst @@ -0,0 +1,115 @@ +.. _config_http_filters_decompressor: + +Decompressor +============ +Decompressor is an HTTP filter which enables Envoy to bidirectionally decompress data. + + +Configuration +------------- +* :ref:`v3 API reference ` + +How it works +------------ +When the decompressor filter is enabled, headers are inspected to +determine whether or not the content should be decompressed. The content is +decompressed and passed on to the rest of the filter chain. Note that decompression happens +independently for request and responses based on the rules described below. + +Currently the filter supports :ref:`gzip compression ` +only. Other compression libraries can be supported as extensions. + +An example configuration of the filter may look like the following: + +.. code-block:: yaml + + http_filters: + - name: decompressor + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.decompressor.v3.Decompressor + decompressor_library: + name: basic + typed_config: + "@type": type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip + window_bits: 10 + +By *default* decompression will be *skipped* when: + +- A request/response does NOT contain *content-encoding* header. +- A request/response includes *content-encoding* header, but it does not contain the configured + decompressor's content-encoding. +- A request/response contains a *cache-control* header whose value includes "no-transform". + +When decompression is *applied*: + +- The *content-length* is removed from headers. + + .. note:: + + If an updated *content-length* header is desired, the buffer filter can be installed as part + of the filter chain to buffer decompressed frames, and ultimately update the header. Due to + :ref:`filter ordering ` a buffer filter needs to be + installed after the decompressor for requests and prior to the decompressor for responses. + +- The *content-encoding* header is modified to remove the decompression that was applied. + +.. _decompressor-statistics: + +Using different decompressors for requests and responses +-------------------------------------------------------- + +If different compression libraries are desired for requests and responses, it is possible to install +multiple decompressor filters enabled only for requests or responses. For instance: + +.. code-block:: yaml + + http_filters: + # This filter is only enabled for requests. + - name: envoy.filters.http.decompressor + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.decompressor.v3.Decompressor + decompressor_library: + name: small + typed_config: + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip" + window_bits: 9 + chunk_size: 8192 + response_direction_config: + common_config: + enabled: + default_value: false + runtime_key: response_decompressor_enabled + # This filter is only enabled for responses. + - name: envoy.filters.http.decompressor + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.decompressor.v3.Decompressor + decompressor_library: + name: large + typed_config: + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip" + window_bits: 12 + chunk_size: 16384 + request_direction_config: + common_config: + enabled: + default_value: false + runtime_key: request_decompressor_enabled + +Statistics +---------- + +Every configured Deompressor filter has statistics rooted at +.decompressor...* +with the following: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + decompressed, Counter, Number of request/responses compressed. + not_decompressed, Counter, Number of request/responses not compressed. + total_uncompressed_bytes, Counter, The total uncompressed bytes of all the request/responses that were marked for decompression. + total_compressed_bytes, Counter, The total compressed bytes of all the request/responses that were marked for decompression. + +Additional stats for the decompressor library are rooted at +.decompressor...decompressor_library. diff --git a/docs/root/configuration/http/http_filters/dynamic_forward_proxy_filter.rst b/docs/root/configuration/http/http_filters/dynamic_forward_proxy_filter.rst index 3767964230289..d3f3d57dc215c 100644 --- a/docs/root/configuration/http/http_filters/dynamic_forward_proxy_filter.rst +++ b/docs/root/configuration/http/http_filters/dynamic_forward_proxy_filter.rst @@ -8,30 +8,37 @@ Dynamic forward proxy HTTP dynamic forward proxy support should be considered alpha and not production ready. * HTTP dynamic forward proxy :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.dynamic_forward_proxy* The following is a complete configuration that configures both the :ref:`dynamic forward proxy HTTP filter -` +` as well as the :ref:`dynamic forward proxy cluster -`. Both filter and cluster +`. Both filter and cluster must be configured together and point to the same DNS cache parameters for Envoy to operate as an HTTP dynamic forward proxy. -This filter supports :ref:`host rewrite ` -via the :ref:`virtual host's per_filter_config ` or the -:ref:`route's per_filter_config `. This can be used to rewrite +This filter supports :ref:`host rewrite ` +via the :ref:`virtual host's typed_per_filter_config ` or the +:ref:`route's typed_per_filter_config `. This can be used to rewrite the host header with the provided value before DNS lookup, thus allowing to route traffic to the rewritten host when forwarding. See the example below within the configured routes. .. note:: - Configuring a :ref:`tls_context ` on the cluster with + Configuring a :ref:`transport_socket with name envoy.transport_sockets.tls ` on the cluster with *trusted_ca* certificates instructs Envoy to use TLS when connecting to upstream hosts and verify the certificate chain. Additionally, Envoy will automatically perform SAN verification for the resolved host name as well as specify the host name via SNI. +.. _dns_cache_circuit_breakers: + + Dynamic forward proxy uses circuit breakers built in to the DNS cache with the configuration + of :ref:`DNS cache circuit breakers `. By default, this behavior is enabled by the runtime feature `envoy.reloadable_features.enable_dns_cache_circuit_breakers`. + If this runtime feature is disabled, cluster circuit breakers will be used even when setting the configuration + of :ref:`DNS cache circuit breakers `. + .. code-block:: yaml admin: @@ -53,7 +60,7 @@ host when forwarding. See the example below within the configured routes. - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http route_config: name: local_route @@ -65,9 +72,10 @@ host when forwarding. See the example below within the configured routes. prefix: "/force-host-rewrite" route: cluster: dynamic_forward_proxy_cluster - per_filter_config: + typed_per_filter_config: envoy.filters.http.dynamic_forward_proxy: - host_rewrite: www.example.org + "@type": type.googleapis.com/envoy.extensions.filters.http.dynamic_forward_proxy.v3.PerRouteConfig + host_rewrite_literal: www.example.org - match: prefix: "/" route: @@ -80,6 +88,8 @@ host when forwarding. See the example below within the configured routes. name: dynamic_forward_proxy_cache_config dns_lookup_family: V4_ONLY - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router clusters: - name: dynamic_forward_proxy_cluster connect_timeout: 1s @@ -87,14 +97,14 @@ host when forwarding. See the example below within the configured routes. cluster_type: name: envoy.clusters.dynamic_forward_proxy typed_config: - "@type": type.googleapis.com/envoy.config.cluster.dynamic_forward_proxy.v2alpha.ClusterConfig + "@type": type.googleapis.com/envoy.extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig dns_cache_config: name: dynamic_forward_proxy_cache_config dns_lookup_family: V4_ONLY transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext common_tls_context: validation_context: trusted_ca: {filename: /etc/ssl/certs/ca-certificates.crt} @@ -116,3 +126,14 @@ namespace. host_added, Counter, Number of hosts that have been added to the cache. host_removed, Counter, Number of hosts that have been removed from the cache. num_hosts, Gauge, Number of hosts that are currently in the cache. + dns_rq_pending_overflow, Counter, Number of dns pending request overflow. + +The dynamic forward proxy DNS cache circuit breakers outputs statistics in the dns_cache..circuit_breakers* +namespace. + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + rq_pending_open, Gauge, Whether the requests circuit breaker is closed (0) or open (1) + rq_pending_remaining, Gauge, Number of remaining requests until the circuit breaker opens \ No newline at end of file diff --git a/docs/root/configuration/http/http_filters/dynamodb_filter.rst b/docs/root/configuration/http/http_filters/dynamodb_filter.rst index df06e05ef6c0e..c66c474e0bc7a 100644 --- a/docs/root/configuration/http/http_filters/dynamodb_filter.rst +++ b/docs/root/configuration/http/http_filters/dynamodb_filter.rst @@ -4,14 +4,14 @@ DynamoDB ======== * DynamoDB :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.dynamo*. Statistics ---------- The DynamoDB filter outputs statistics in the *http..dynamodb.* namespace. The :ref:`stat prefix -` comes from the +` comes from the owning HTTP connection manager. Per operation stats can be found in the *http..dynamodb.operation..* diff --git a/docs/root/configuration/http/http_filters/ext_authz_filter.rst b/docs/root/configuration/http/http_filters/ext_authz_filter.rst index bc1c69c611c87..ba7bfe0b87512 100644 --- a/docs/root/configuration/http/http_filters/ext_authz_filter.rst +++ b/docs/root/configuration/http/http_filters/ext_authz_filter.rst @@ -3,23 +3,23 @@ External Authorization ====================== * External authorization :ref:`architecture overview ` -* :ref:`HTTP filter v2 API reference ` +* :ref:`HTTP filter v3 API reference ` * This filter should be configured with the name *envoy.filters.http.ext_authz*. The external authorization filter calls an external gRPC or HTTP service to check whether an incoming HTTP request is authorized or not. If the request is deemed unauthorized, then the request will be denied normally with 403 (Forbidden) response. Note that sending additional custom metadata from the authorization service to the upstream, to the downstream or to the authorization service is -also possible. This is explained in more details at :ref:`HTTP filter `. +also possible. This is explained in more details at :ref:`HTTP filter `. The content of the requests that are passed to an authorization service is specified by -:ref:`CheckRequest `. +:ref:`CheckRequest `. .. _config_http_filters_ext_authz_http_configuration: The HTTP filter, using a gRPC/HTTP service, can be configured as follows. You can see all the configuration options at -:ref:`HTTP filter `. +:ref:`HTTP filter `. Configuration Examples ----------------------------- @@ -31,7 +31,7 @@ A sample filter configuration for a gRPC authorization server: http_filters: - name: envoy.filters.http.ext_authz typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.ext_authz.v2.ExtAuthz + "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz grpc_service: envoy_grpc: cluster_name: ext-authz @@ -67,7 +67,7 @@ A sample filter configuration for a raw HTTP authorization server: http_filters: - name: envoy.filters.http.ext_authz typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.ext_authz.v2.ExtAuthz + "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz http_service: server_uri: uri: 127.0.0.1:10003 @@ -106,16 +106,18 @@ In this example we add additional context on the virtual host, and disabled the virtual_hosts: - name: local_service domains: ["*"] - per_filter_config: + typed_per_filter_config: envoy.filters.http.ext_authz: + "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute check_settings: context_extensions: virtual_host: local_service routes: - match: { prefix: "/static" } route: { cluster: some_service } - per_filter_config: + typed_per_filter_config: envoy.filters.http.ext_authz: + "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute disabled: true - match: { prefix: "/" } route: { cluster: some_service } @@ -136,8 +138,23 @@ The HTTP filter outputs statistics in the *cluster..ext_au failure_mode_allowed, Counter, "Total requests that were error(s) but were allowed through because of failure_mode_allow set to true." +Dynamic Metadata +---------------- +.. _config_http_filters_ext_authz_dynamic_metadata: + +.. note:: + + The External Authorization filter emits dynamic metadata only when it is configured to use + gRPC service as the authorization server. + +The External Authorization filter emits dynamic metadata as an opaque ``google.protobuf.Struct`` +*only* when the gRPC authorization server returns an :ref:`OK +` :ref:`CheckResponse +` with a filled :ref:`dynamic_metadata +` field. + Runtime ------- The fraction of requests for which the filter is enabled can be configured via the :ref:`runtime_key -` value of the :ref:`filter_enabled -` field. +` value of the :ref:`filter_enabled +` field. diff --git a/docs/root/configuration/http/http_filters/fault_filter.rst b/docs/root/configuration/http/http_filters/fault_filter.rst index 80678714db699..62b9cd9e28c82 100644 --- a/docs/root/configuration/http/http_filters/fault_filter.rst +++ b/docs/root/configuration/http/http_filters/fault_filter.rst @@ -24,7 +24,7 @@ Configuration The fault injection filter must be inserted before any other filter, including the router filter. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.fault*. .. _config_http_filters_fault_injection_http_header: @@ -40,59 +40,69 @@ x-envoy-fault-abort-request HTTP status code to abort a request with. The header value should be an integer that specifies the HTTP status code to return in response to a request and must be in the range [200, 600). In order for the header to work, :ref:`header_abort - ` needs to be set. + ` needs to be set. + +x-envoy-fault-abort-grpc-request + gRPC status code to abort a request with. The header value should be a non-negative integer that specifies + the gRPC status code to return in response to a request. Its value range is [0, UInt32.Max] instead of [0, 16] + to allow testing even not well-defined gRPC status codes. When this header is set, the HTTP response status code + will be set to 200. In order for the header to work, :ref:`header_abort + ` needs to be set. If both + *x-envoy-fault-abort-request* and *x-envoy-fault-abort-grpc-request* headers are set then + *x-envoy-fault-abort-grpc-request* header will be **ignored** and fault response http status code will be + set to *x-envoy-fault-abort-request* header value. x-envoy-fault-abort-request-percentage The percentage of requests that should be failed with a status code that's defined - by the value of *x-envoy-fault-abort-request* HTTP header. The header value should be an integer - that specifies the numerator of the percentage of request to apply aborts to and must be greater - or equal to 0 and its maximum value is capped by the value of the numerator of - :ref:`percentage ` field. + by the value of *x-envoy-fault-abort-request* or *x-envoy-fault-abort-grpc-request* HTTP headers. + The header value should be an integer that specifies the numerator of the percentage of request to apply aborts + to and must be greater or equal to 0 and its maximum value is capped by the value of the numerator of + :ref:`percentage ` field. Percentage's denominator is equal to default percentage's denominator - :ref:`percentage ` field. + :ref:`percentage ` field. In order for the header to work, :ref:`header_abort - ` needs to be set and - *x-envoy-fault-abort-request* HTTP header needs to be a part of a request. + ` needs to be set and + either *x-envoy-fault-abort-request* or *x-envoy-fault-abort-grpc-request* HTTP header needs to be a part of the request. x-envoy-fault-delay-request The duration to delay a request by. The header value should be an integer that specifies the number of milliseconds to throttle the latency for. In order for the header to work, :ref:`header_delay - ` needs to be set. + ` needs to be set. x-envoy-fault-delay-request-percentage The percentage of requests that should be delayed by a duration that's defined by the value of *x-envoy-fault-delay-request* HTTP header. The header value should be an integer that specifies the percentage of request to apply delays to and must be greater or equal to 0 and its maximum value is capped by the value of the numerator of - :ref:`percentage ` field. + :ref:`percentage ` field. Percentage's denominator is equal to default percentage's denominator - :ref:`percentage ` field. + :ref:`percentage ` field. In order for the header to work, :ref:`header_delay - ` needs to be set and + ` needs to be set and *x-envoy-fault-delay-request* HTTP header needs to be a part of a request. x-envoy-fault-throughput-response The rate limit to use when a response to a caller is sent. The header value should be an integer that specifies the limit in KiB/s and must be > 0. In order for the header to work, :ref:`header_limit - ` needs to be set. + ` needs to be set. x-envoy-fault-throughput-response-percentage The percentage of requests whose response rate should be limited to the value of *x-envoy-fault-throughput-response* HTTP header. The header value should be an integer that specifies the percentage of request to apply delays to and must be greater or equal to 0 and its maximum value is capped by the value of the numerator of - :ref:`percentage ` field. + :ref:`percentage ` field. Percentage's denominator is equal to default percentage's denominator - :ref:`percentage ` field. + :ref:`percentage ` field. In order for the header to work, :ref:`header_limit - ` needs to be set and + ` needs to be set and *x-envoy-fault-delay-request* HTTP header needs to be a part of a request. .. attention:: Allowing header control is inherently dangerous if exposed to untrusted clients. In this case, it is suggested to use the :ref:`max_active_faults - ` setting to limit the + ` setting to limit the maximum concurrent faults that can be active at any given time. The following is an example configuration that enables header control for both of the above @@ -102,7 +112,7 @@ options: name: envoy.filters.http.fault typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.fault.v2.HTTPFault + "@type": type.googleapis.com/envoy.extensions.filters.http.fault.v3.HTTPFault max_active_faults: 100 abort: header_abort: {} @@ -134,7 +144,7 @@ fault.http.abort.abort_percent *abort_percent* specified in config. If the config does not contain an *abort* block, then *abort_percent* defaults to 0. For historic reasons, this runtime key is available regardless of whether the filter is :ref:`configured for abort - `. + `. fault.http.abort.http_status HTTP status code that will be used as the response status code of requests that will be @@ -142,33 +152,40 @@ fault.http.abort.http_status in the config. If the config does not contain an *abort* block, then *http_status* defaults to 0. For historic reasons, this runtime key is available regardless of whether the filter is :ref:`configured for abort - `. + `. + +fault.http.abort.grpc_status + gRPC status code that will be used as the response status code of requests that will be + aborted if the headers match. Defaults to the gRPC status code specified in the config. + If this field is missing from both the runtime and the config, gRPC status code in the response + will be derived from *fault.http.abort.http_status* field. This runtime key is only available when + the filter is :ref:`configured for abort `. fault.http.delay.fixed_delay_percent % of requests that will be delayed if the headers match. Defaults to the *delay_percent* specified in the config or 0 otherwise. This runtime key is only available when the filter is :ref:`configured for delay - `. + `. fault.http.delay.fixed_duration_ms The delay duration in milliseconds. If not specified, the *fixed_duration_ms* specified in the config will be used. If this field is missing from both the runtime and the config, no delays will be injected. This runtime key is only available when the filter is :ref:`configured for delay - `. + `. fault.http.max_active_faults The maximum number of active faults (of all types) that Envoy will will inject via the fault filter. This can be used in cases where it is desired that faults are 100% injected, but the user wants to avoid a situation in which too many unexpected concurrent faulting requests cause resource constraint issues. If not specified, the :ref:`max_active_faults - ` setting will be used. + ` setting will be used. fault.http.rate_limit.response_percent % of requests which will have a response rate limit fault injected. Defaults to the value set in - the :ref:`percentage ` field. + the :ref:`percentage ` field. This runtime key is only available when the filter is :ref:`configured for response rate limiting - `. + `. *Note*, fault filter runtime settings for the specific downstream cluster override the default ones if present. The following are downstream specific @@ -190,7 +207,7 @@ Statistics ---------- The fault filter outputs statistics in the *http..fault.* namespace. The :ref:`stat prefix -` comes from the +` comes from the owning HTTP connection manager. .. csv-table:: @@ -200,7 +217,7 @@ owning HTTP connection manager. delays_injected, Counter, Total requests that were delayed aborts_injected, Counter, Total requests that were aborted response_rl_injected, Counter, "Total requests that had a response rate limit selected for injection (actually injection may not occur due to disconnect, reset, no body, etc.)" - faults_overflow, Counter, Total number of faults that were not injected due to overflowing the :ref:`max_active_faults ` setting + faults_overflow, Counter, Total number of faults that were not injected due to overflowing the :ref:`max_active_faults ` setting active_faults, Gauge, Total number of faults active at the current time .delays_injected, Counter, Total delayed requests for the given downstream cluster .aborts_injected, Counter, Total aborted requests for the given downstream cluster diff --git a/docs/root/configuration/http/http_filters/grpc_http1_bridge_filter.rst b/docs/root/configuration/http/http_filters/grpc_http1_bridge_filter.rst index 47039f2b6fcd3..5454468c5ac24 100644 --- a/docs/root/configuration/http/http_filters/grpc_http1_bridge_filter.rst +++ b/docs/root/configuration/http/http_filters/grpc_http1_bridge_filter.rst @@ -4,7 +4,7 @@ gRPC HTTP/1.1 bridge ==================== * gRPC :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.grpc_http1_bridge*. This is a simple filter which enables the bridging of an HTTP/1.1 client which does not support diff --git a/docs/root/configuration/http/http_filters/grpc_http1_reverse_bridge_filter.rst b/docs/root/configuration/http/http_filters/grpc_http1_reverse_bridge_filter.rst index ace79b2813ade..ed668b936a3f8 100644 --- a/docs/root/configuration/http/http_filters/grpc_http1_reverse_bridge_filter.rst +++ b/docs/root/configuration/http/http_filters/grpc_http1_reverse_bridge_filter.rst @@ -4,7 +4,7 @@ gRPC HTTP/1.1 reverse bridge ============================ * gRPC :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.grpc_http1_reverse_bridge*. This is a filter that enables converting an incoming gRPC request into a HTTP/1.1 request to allow @@ -61,11 +61,11 @@ How to disable HTTP/1.1 reverse bridge filter per route - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager access_log: - name: envoy.access_loggers.file typed_config: - "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog path: /dev/stdout stat_prefix: ingress_http route_config: @@ -81,8 +81,9 @@ How to disable HTTP/1.1 reverse bridge filter per route cluster: grpc timeout: 5.00s # per_filter_config disables the filter for this route - per_filter_config: + typed_per_filter_config: envoy.filters.http.grpc_http1_reverse_bridge: + "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3.FilterConfigPerRoute disabled: true - match: prefix: "/route-with-filter-enabled" @@ -93,7 +94,7 @@ How to disable HTTP/1.1 reverse bridge filter per route http_filters: - name: envoy.filters.http.grpc_http1_reverse_bridge typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1.FilterConfig + "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3.FilterConfig content_type: application/grpc+proto withhold_grpc_frames: true - name: envoy.filters.http.router @@ -104,10 +105,15 @@ How to disable HTTP/1.1 reverse bridge filter per route type: LOGICAL_DNS dns_lookup_family: V4_ONLY lb_policy: ROUND_ROBIN - hosts: - - socket_address: - address: localhost - port_value: 4630 + load_assignment: + cluster_name: some_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 4630 - name: grpc connect_timeout: 5.00s type: strict_dns diff --git a/docs/root/configuration/http/http_filters/grpc_json_transcoder_filter.rst b/docs/root/configuration/http/http_filters/grpc_json_transcoder_filter.rst index a1fdfdcccdf57..a8c796b5bcb43 100644 --- a/docs/root/configuration/http/http_filters/grpc_json_transcoder_filter.rst +++ b/docs/root/configuration/http/http_filters/grpc_json_transcoder_filter.rst @@ -4,7 +4,7 @@ gRPC-JSON transcoder ==================== * gRPC :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.grpc_json_transcoder*. This is a filter which allows a RESTful JSON API client to send requests to Envoy over HTTP @@ -85,7 +85,16 @@ as its output message type. The implementation needs to set Multiple `google.api.HttpBody `_ can be send by the gRPC server in the server streaming case. In this case, HTTP response header `Content-Type` will use the `content-type` from the first -`google.api.HttpBody `. +`google.api.HttpBody `_. + +Headers +-------- + +gRPC-JSON forwards the following headers to the gRPC server: + +* `x-envoy-original-path`, containing the value of the original path of HTTP request +* `x-envoy-original-method`, containing the value of the original method of HTTP request + Sample Envoy configuration -------------------------- @@ -110,7 +119,7 @@ gRPC or RESTful JSON requests to localhost:51051. - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: grpc_json codec_type: AUTO route_config: @@ -126,7 +135,7 @@ gRPC or RESTful JSON requests to localhost:51051. http_filters: - name: envoy.filters.http.grpc_json_transcoder typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.transcoder.v2.GrpcJsonTranscoder + "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_json_transcoder.v3.GrpcJsonTranscoder proto_descriptor: "/tmp/envoy/proto.pb" services: ["helloworld.Greeter"] print_options: diff --git a/docs/root/configuration/http/http_filters/grpc_stats_filter.rst b/docs/root/configuration/http/http_filters/grpc_stats_filter.rst index 984a2f9348d73..80458525b72b1 100644 --- a/docs/root/configuration/http/http_filters/grpc_stats_filter.rst +++ b/docs/root/configuration/http/http_filters/grpc_stats_filter.rst @@ -4,10 +4,10 @@ gRPC Statistics =============== * gRPC :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.grpc_stats*. * This filter can be enabled to emit a :ref:`filter state object - ` + ` This is a filter which enables telemetry of gRPC calls. Additionally, the filter detects message boundaries in streaming gRPC calls and emits the message @@ -18,8 +18,8 @@ More info: wire format in `gRPC over HTTP/2 .grpc.* namespace. Depending on the configuration, the stats may be prefixed with `..`; the stats in the table below are shown in this form. See the documentation for -:ref:`individual_method_stats_allowlist ` -and :ref:`stats_for_all_methods `. +:ref:`individual_method_stats_allowlist ` +and :ref:`stats_for_all_methods `. To enable *upstream_rq_time* (v3 API only) see :ref:`enable_upstream_stats `. diff --git a/docs/root/configuration/http/http_filters/grpc_web_filter.rst b/docs/root/configuration/http/http_filters/grpc_web_filter.rst index fe4dea6f4b092..8241262a7c027 100644 --- a/docs/root/configuration/http/http_filters/grpc_web_filter.rst +++ b/docs/root/configuration/http/http_filters/grpc_web_filter.rst @@ -4,7 +4,7 @@ gRPC-Web ======== * gRPC :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.grpc_web*. This is a filter which enables the bridging of a gRPC-Web client to a compliant gRPC server by diff --git a/docs/root/configuration/http/http_filters/gzip_filter.rst b/docs/root/configuration/http/http_filters/gzip_filter.rst index 0251012244f16..71947d2510ac9 100644 --- a/docs/root/configuration/http/http_filters/gzip_filter.rst +++ b/docs/root/configuration/http/http_filters/gzip_filter.rst @@ -1,5 +1,10 @@ .. _config_http_filters_gzip: +.. warning:: + + This filter has been deprecated in favor the + :ref:`HTTP Compressor filter `. + Gzip ==== Gzip is an HTTP filter which enables Envoy to compress dispatched data @@ -9,7 +14,7 @@ compromising the response time. Configuration ------------- -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.gzip*. .. attention:: @@ -24,11 +29,9 @@ Configuration Runtime ------- -The Gzip filter supports the following runtime settings: - -gzip.filter_enabled - The % of requests for which the filter is enabled. Default is 100. - +The Gzip filter can be runtime feature flagged via the :ref:`runtime_enabled +` +configuration field within the compressor field. How it works ------------ diff --git a/docs/root/configuration/http/http_filters/header_to_metadata_filter.rst b/docs/root/configuration/http/http_filters/header_to_metadata_filter.rst index 38a55736861ec..bdf2cecc63fe3 100644 --- a/docs/root/configuration/http/http_filters/header_to_metadata_filter.rst +++ b/docs/root/configuration/http/http_filters/header_to_metadata_filter.rst @@ -2,12 +2,18 @@ Envoy Header-To-Metadata Filter =============================== -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.header_to_metadata*. This filter is configured with rules that will be matched against requests and responses. -Each rule has a header and can be triggered either when the header is present or missing. When -a rule is triggered, dynamic metadata will be added based on the configuration of the rule. +Each rule has either a cookie or a header and can be triggered either when the header +or cookie is present or missing. + +When a rule is triggered, dynamic metadata will be added based on the configuration of the rule. +If the header or cookie is present, it's value is extracted and used along with the specified +key as metadata. If the header or cookie is missing, on missing case is triggered and the value +specified is used for adding metadata. + The metadata can then be used for load balancing decisions, consumed from logs, etc. A typical use case for this filter is to dynamically match requests with load balancer @@ -25,7 +31,7 @@ absence of a version header could be: http_filters: - name: envoy.filters.http.header_to_metadata typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.header_to_metadata.v2.Config + "@type": type.googleapis.com/envoy.extensions.filters.http.header_to_metadata.v3.Config request_rules: - header: x-version on_header_present: @@ -39,6 +45,29 @@ absence of a version header could be: type: STRING remove: false +As with headers, the value of the specified cookie will be extracted from the request +and added as metadata with the key specified. +Removing a cookie when a rule matches is unsupported. + +.. code-block:: yaml + + http_filters: + - name: envoy.filters.http.header_to_metadata + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.header_to_metadata.v3.Config + request_rules: + - cookie: cookie + on_header_present: + metadata_namespace: envoy.lb + key: version + type: STRING + on_header_missing: + metadata_namespace: envoy.lb + key: default + value: 'true' + type: STRING + remove: false + A corresponding upstream cluster configuration could be: @@ -60,6 +89,26 @@ This would then allow requests with the `x-version` header set to be matched aga endpoints with the corresponding version. Whereas requests with that header missing would be matched with the default endpoints. +If the header's value needs to be transformed before it's added to the request as +dynamic metadata, this filter supports regex matching and substitution: + +.. code-block:: yaml + + http_filters: + - name: envoy.filters.http.header_to_metadata + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.header_to_metadata.v3.Config + request_rules: + - header: ":path" + on_header_present: + metadata_namespace: envoy.lb + key: cluster + regex_value_rewrite: + pattern: + google_re2: {} + regex: "^/(cluster[\\d\\w-]+)/?.*$" + substitution: "\\1" + Note that this filter also supports per route configuration: .. code-block:: yaml @@ -72,8 +121,9 @@ Note that this filter also supports per route configuration: routes: - match: { prefix: "/version-to-metadata" } route: { cluster: service } - per_filter_config: + typed_per_filter_config: envoy.filters.http.header_to_metadata: + "@type": type.googleapis.com/envoy.extensions.filters.http.header_to_metadata.v3.Config request_rules: - header: x-version on_header_present: diff --git a/docs/root/configuration/http/http_filters/health_check_filter.rst b/docs/root/configuration/http/http_filters/health_check_filter.rst index 14b35114adb49..809b1fd42e986 100644 --- a/docs/root/configuration/http/http_filters/health_check_filter.rst +++ b/docs/root/configuration/http/http_filters/health_check_filter.rst @@ -4,7 +4,7 @@ Health check ============ * Health check filter :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.health_check*. .. note:: diff --git a/docs/root/configuration/http/http_filters/http_filters.rst b/docs/root/configuration/http/http_filters/http_filters.rst index aa435d6d5a9fe..97626448d2495 100644 --- a/docs/root/configuration/http/http_filters/http_filters.rst +++ b/docs/root/configuration/http/http_filters/http_filters.rst @@ -10,8 +10,10 @@ HTTP filters aws_lambda_filter aws_request_signing_filter buffer_filter + compressor_filter cors_filter csrf_filter + decompressor_filter dynamic_forward_proxy_filter dynamodb_filter ext_authz_filter @@ -39,4 +41,5 @@ HTTP filters .. toctree:: :hidden: + ../../../api-v3/extensions/filters/http/admission_control/v3alpha/admission_control.proto ../../../api-v3/extensions/filters/http/cache/v3alpha/cache.proto diff --git a/docs/root/configuration/http/http_filters/ip_tagging_filter.rst b/docs/root/configuration/http/http_filters/ip_tagging_filter.rst index 45bb1efefcfcd..a991c1f65f242 100644 --- a/docs/root/configuration/http/http_filters/ip_tagging_filter.rst +++ b/docs/root/configuration/http/http_filters/ip_tagging_filter.rst @@ -16,7 +16,7 @@ G. Karlsson. Configuration ------------- -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.ip_tagging*. Statistics diff --git a/docs/root/configuration/http/http_filters/jwt_authn_filter.rst b/docs/root/configuration/http/http_filters/jwt_authn_filter.rst index 8fe9b9c16f7cb..50790a2309052 100644 --- a/docs/root/configuration/http/http_filters/jwt_authn_filter.rst +++ b/docs/root/configuration/http/http_filters/jwt_authn_filter.rst @@ -7,15 +7,22 @@ This HTTP filter can be used to verify JSON Web Token (JWT). It will verify its JWKS is needed to verify JWT signatures. They can be specified in the filter config or can be fetched remotely from a JWKS server. -.. attention:: - ES256, ES384, ES512, HS256, HS384, HS512, RS256, RS384 and RS512 are supported for the JWT alg. +Following are supported JWT alg: + +.. code-block:: + + ES256, ES384, ES512, + HS256, HS384, HS512, + RS256, RS384, RS512, + PS256, PS384, PS512, + EdDSA Configuration ------------- This filter should be configured with the name *envoy.filters.http.jwt_authn*. -This HTTP :ref:`filter config ` has two fields: +This HTTP :ref:`filter config ` has two fields: * Field *providers* specifies how a JWT should be verified, such as where to extract the token, where to fetch the public key (JWKS) and where to output its payload. * Field *rules* specifies matching rules and their requirements. If a request matches a rule, its requirement applies. The requirement specifies which JWT providers should be used. @@ -23,7 +30,7 @@ This HTTP :ref:`filter config ` specifies how a JWT should be verified. It has the following fields: +:ref:`JwtProvider ` specifies how a JWT should be verified. It has the following fields: * *issuer*: the principal that issued the JWT, usually a URL or an email address. * *audiences*: a list of JWT audiences allowed to access. A JWT containing any of these audiences will be accepted. @@ -42,11 +49,13 @@ If *from_headers* and *from_params* is empty, the default location to extract J Authorization: Bearer -If fails to extract a JWT from above header, then check query parameter key *access_token* as in this example:: +and query parameter key *access_token* as:: /path?access_token= -In the :ref:`filter config `, *providers* is a map, to map *provider_name* to a :ref:`JwtProvider `. The *provider_name* must be unique, it is referred in the `JwtRequirement ` in its *provider_name* field. +If a request has two tokens, one from the header and the other from the query parameter, all of them must be valid. + +In the :ref:`filter config `, *providers* is a map, to map *provider_name* to a :ref:`JwtProvider `. The *provider_name* must be unique, it is referred in the `JwtRequirement ` in its *provider_name* field. .. important:: For *remote_jwks*, a **jwks_cluster** cluster is required. @@ -119,7 +128,7 @@ JWT payload will be added to the request header as following format:: RequirementRule ~~~~~~~~~~~~~~~ -:ref:`RequirementRule ` has two fields: +:ref:`RequirementRule ` has two fields: * Field *match* specifies how a request can be matched; e.g. by HTTP headers, or by query parameters, or by path prefixes. * Field *requires* specifies the JWT requirement, e.g. which provider is required. diff --git a/docs/root/configuration/http/http_filters/lua_filter.rst b/docs/root/configuration/http/http_filters/lua_filter.rst index 8868a39d15b71..8e6f8eeffef8e 100644 --- a/docs/root/configuration/http/http_filters/lua_filter.rst +++ b/docs/root/configuration/http/http_filters/lua_filter.rst @@ -23,10 +23,6 @@ supported Lua version is mostly 5.1 with some 5.2 features. See the `LuaJIT docu supports more 5.2 features and additional architectures. Envoy can be built with moonjit support by using the following bazel option: ``--//source/extensions/filters/common/lua:moonjit=1``. -The filter only supports loading Lua code in-line in the configuration. If local filesystem code -is desired, a trivial in-line script can be used to load the rest of the code from the local -environment. - The design of the filter and Lua support at a high level is as follows: * All Lua environments are :ref:`per worker thread `. This means that @@ -35,7 +31,7 @@ The design of the filter and Lua support at a high level is as follows: * All scripts are run as coroutines. This means that they are written in a synchronous style even though they may perform complex asynchronous tasks. This makes the scripts substantially easier to write. All network/async processing is performed by Envoy via a set of APIs. Envoy will - yield the script as appropriate and resume it when async tasks are complete. + suspend execution of the script as appropriate and resume it when async tasks are complete. * **Do not perform blocking operations from scripts.** It is critical for performance that Envoy APIs are used for all IO. @@ -60,9 +56,86 @@ API. Configuration ------------- -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.lua*. +A simple example of configuring Lua HTTP filter that contains only :ref:`inline_code +` is as follow: + +.. code-block:: yaml + + name: envoy.filters.http.lua + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua + inline_code: | + -- Called on the request path. + function envoy_on_request(request_handle) + -- Do something. + end + -- Called on the response path. + function envoy_on_response(response_handle) + -- Do something. + end + +By default, Lua script defined in ``inline_code`` will be treated as a ``GLOBAL`` script. Envoy will +execute it for every HTTP request. + +Per-Route Configuration +----------------------- + +The Lua HTTP filter also can be disabled or overridden on a per-route basis by providing a +:ref:`LuaPerRoute ` configuration +on the virtual host, route, or weighted cluster. + +As a concrete example, given the following Lua filter configuration: + +.. code-block:: yaml + + name: envoy.filters.http.lua + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua + inline_code: | + function envoy_on_request(request_handle) + -- do something + end + source_codes: + hello.lua: + inline_string: | + function envoy_on_request(request_handle) + request_handle:logInfo("Hello World.") + end + bye.lua: + inline_string: | + function envoy_on_response(response_handle) + response_handle:logInfo("Bye Bye.") + end + +The HTTP Lua filter can be disabled on some virtual host, route, or weighted cluster by the +:ref:`LuaPerRoute ` configuration as +follow: + +.. code-block:: yaml + + per_filter_config: + envoy.filters.http.lua: + disabled: true + +We can also refer to a Lua script in the filter configuration by specifying a name in LuaPerRoute. +The ``GLOBAL`` Lua script will be overridden by the referenced script: + +.. code-block:: yaml + + per_filter_config: + envoy.filters.http.lua: + name: hello.lua + +.. attention:: + + The name ``GLOBAL`` is reserved for :ref:`Lua.inline_code + `. Therefore, do not use + ``GLOBAL`` as name for other Lua scripts. + + Script examples --------------- @@ -151,8 +224,9 @@ script defines: end A script can define either or both of these functions. During the request path, Envoy will -run *envoy_on_request* as a coroutine, passing an API handle. During the response path, Envoy will -run *envoy_on_response* as a coroutine, passing an API handle. +run *envoy_on_request* as a coroutine, passing a handle to the request API. During the +response path, Envoy will run *envoy_on_response* as a coroutine, passing handle to the +response API. .. attention:: @@ -167,7 +241,7 @@ headers() .. code-block:: lua - headers = handle:headers() + local headers = handle:headers() Returns the stream's headers. The headers can be modified as long as they have not been sent to the next filter in the header chain. For example, they can be modified after an *httpCall()* or @@ -181,11 +255,12 @@ body() .. code-block:: lua - body = handle:body() + local body = handle:body() -Returns the stream's body. This call will cause Envoy to yield the script until the entire body -has been buffered. Note that all buffering must adhere to the flow control policies in place. -Envoy will not buffer more data than is allowed by the connection manager. +Returns the stream's body. This call will cause Envoy to suspend execution of the script until +the entire body has been received in a buffer. Note that all buffering must adhere to the +flow-control policies in place. Envoy will not buffer more data than is allowed by the connection +manager. Returns a :ref:`buffer object `. @@ -194,11 +269,11 @@ bodyChunks() .. code-block:: lua - iterator = handle:bodyChunks() + local iterator = handle:bodyChunks() Returns an iterator that can be used to iterate through all received body chunks as they arrive. -Envoy will yield the script in between chunks, but *will not buffer* them. This can be used by -a script to inspect data as it is streaming by. +Envoy will suspend executing the script in between chunks, but *will not buffer* them. This can be +used by a script to inspect data as it is streaming by. .. code-block:: lua @@ -213,7 +288,7 @@ trailers() .. code-block:: lua - trailers = handle:trailers() + local trailers = handle:trailers() Returns the stream's trailers. May return nil if there are no trailers. The trailers may be modified before they are sent to the next filter. @@ -239,7 +314,7 @@ httpCall() .. code-block:: lua - headers, body = handle:httpCall(cluster, headers, body, timeout, asynchronous) + local headers, body = handle:httpCall(cluster, headers, body, timeout, asynchronous) Makes an HTTP call to an upstream host. *cluster* is a string which maps to a configured cluster manager cluster. *headers* is a table of key/value pairs to send (the value can be a string or table of strings). Note that @@ -247,7 +322,7 @@ the *:method*, *:path*, and *:authority* headers must be set. *body* is an optio data to send. *timeout* is an integer that specifies the call timeout in milliseconds. *asynchronous* is a boolean flag. If asynchronous is set to true, Envoy will make the HTTP request and continue, -regardless of response success or failure. If this is set to false, or not set, Envoy will yield the script +regardless of response success or failure. If this is set to false, or not set, Envoy will suspend executing the script until the call completes or has an error. Returns *headers* which is a table of response headers. Returns *body* which is the string response @@ -283,11 +358,11 @@ metadata() .. code-block:: lua - metadata = handle:metadata() + local metadata = handle:metadata() Returns the current route entry metadata. Note that the metadata should be specified under the filter name i.e. *envoy.filters.http.lua*. Below is an example of a *metadata* in a -:ref:`route entry `. +:ref:`route entry `. .. code-block:: yaml @@ -306,7 +381,7 @@ streamInfo() .. code-block:: lua - streamInfo = handle:streamInfo() + local streamInfo = handle:streamInfo() Returns :repo:`information ` related to the current request. @@ -317,7 +392,7 @@ connection() .. code-block:: lua - connection = handle:connection() + local connection = handle:connection() Returns the current request's underlying :repo:`connection `. @@ -328,7 +403,7 @@ importPublicKey() .. code-block:: lua - pubkey = handle:importPublicKey(keyder, keyderLength) + local pubkey = handle:importPublicKey(keyder, keyderLength) Returns public key which is used by :ref:`verifySignature ` to verify digital signature. @@ -339,7 +414,7 @@ verifySignature() .. code-block:: lua - ok, error = verifySignature(hashFunction, pubkey, signature, signatureLength, data, dataLength) + local ok, error = verifySignature(hashFunction, pubkey, signature, signatureLength, data, dataLength) Verify signature using provided parameters. *hashFunction* is the variable for hash function which be used for verifying signature. *SHA1*, *SHA224*, *SHA256*, *SHA384* and *SHA512* are supported. @@ -420,7 +495,7 @@ length() .. code-block:: lua - size = buffer:length() + local size = buffer:length() Gets the size of the buffer in bytes. Returns an integer. @@ -487,6 +562,17 @@ dynamicMetadata() Returns a :ref:`dynamic metadata object `. +downstreamSslConnection() +^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + streamInfo:downstreamSslConnection() + +Returns :repo:`information ` related to the current SSL connection. + +Returns a downstream :ref:`SSL connection info object `. + .. _config_http_filters_lua_stream_info_dynamic_metadata_wrapper: Dynamic metadata object API @@ -522,13 +608,13 @@ its keys can only be *string* or *numeric*. function envoy_on_request(request_handle) local headers = request_handle:headers() request_handle:streamInfo():dynamicMetadata():set("envoy.filters.http.lua", "request.info", { - auth: headers:get("authorization), + auth: headers:get("authorization"), token: headers:get("x-request-token"), }) end function envoy_on_response(response_handle) - local meta = response_handle:streamInfo():dynamicMetadata()["request.info"] + local meta = response_handle:streamInfo():dynamicMetadata():get("envoy.filters.http.lua")["request.info"] response_handle:logInfo("Auth: "..meta.auth..", token: "..meta.token) end @@ -550,7 +636,7 @@ Connection object API --------------------- ssl() -^^^^^^^^ +^^^^^ .. code-block:: lua @@ -563,6 +649,207 @@ ssl() Returns :repo:`SSL connection ` object when the connection is secured and *nil* when it is not. -.. note:: +Returns an :ref:`SSL connection info object `. + +.. _config_http_filters_lua_ssl_socket_info: + +SSL connection object API +------------------------- + +peerCertificatePresented() +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + if downstreamSslConnection:peerCertificatePresented() then + print("peer certificate is presented") + end + +Returns bool whether the peer certificate is presented. + +peerCertificateValidated() +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + if downstreamSslConnection:peerCertificateVaidated() then + print("peer certificate is valiedated") + end + +Returns bool whether the peer certificate was validated. + +uriSanLocalCertificate() +^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + -- For example, uriSanLocalCertificate contains {"san1", "san2"} + local certs = downstreamSslConnection:uriSanLocalCertificate() + + -- The following prints san1,san2 + handle:logTrace(table.concat(certs, ",")) + +Returns the URIs (as a table) in the SAN field of the local certificate. Returns an empty table if +there is no local certificate, or no SAN field, or no URI SAN entries. + +sha256PeerCertificateDigest() +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:sha256PeerCertificateDigest() + +Returns the SHA256 digest of the peer certificate. Returns ``""`` if there is no peer certificate +which can happen in TLS (non-mTLS) connections. + +serialNumberPeerCertificate() +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:serialNumberPeerCertificate() + +Returns the serial number field of the peer certificate. Returns ``""`` if there is no peer +certificate, or no serial number. + +issuerPeerCertificate() +^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:issuerPeerCertificate() + +Returns the issuer field of the peer certificate in RFC 2253 format. Returns ``""`` if there is no +peer certificate, or no issuer. + +subjectPeerCertificate() +^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:subjectPeerCertificate() + +Return the subject field of the peer certificate in RFC 2253 format. Returns ``""`` if there is no +peer certificate, or no subject. + +uriSanPeerCertificate() +^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:uriSanPeerCertificate() + +Returns the URIs (as a table) in the SAN field of the peer certificate. Returns en empty table if +there is no peer certificate, or no SAN field, or no URI SAN entries. + +subjectLocalCertificate() +^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:subjectLocalCertificate() + +Returns the subject field of the local certificate in RFC 2253 format. Returns ``""`` if there is no +local certificate, or no subject. + +urlEncodedPemEncodedPeerCertificate() +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:urlEncodedPemEncodedPeerCertificate() + +Returns the URL-encoded PEM-encoded representation of the peer certificate. Returns ``""`` if there +is no peer certificate or encoding fails. + +urlEncodedPemEncodedPeerCertificateChain() +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:urlEncodedPemEncodedPeerCertificateChain() + +Returnns the URL-encoded PEM-encoded representation of the full peer certificate chain including the +leaf certificate. Returns ``""`` if there is no peer certificate or encoding fails. + +dnsSansPeerCertificate() +^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:dnsSansPeerCertificate() + +Returns the DNS entries (as a table) in the SAN field of the peer certificate. Returns an empty +table if there is no peer certificate, or no SAN field, or no DNS SAN entries. + +dnsSansLocalCertificate() +^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:dnsSansLocalCertificate() + +Returns the DNS entries (as a table) in the SAN field of the local certificate. Returns an empty +table if there is no local certificate, or no SAN field, or no DNS SAN entries. + +validFromPeerCertificate() +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:validFromPeerCertificate() + +Returns the time (timestamp-since-epoch in seconds) that the peer certificate was issued and should +be considered valid from. Returns ``0`` if there is no peer certificate. + +In Lua, we usually use ``os.time(os.date("!*t"))`` to get current timestamp-since-epoch in seconds. + +expirationPeerCertificate() +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:validFromPeerCertificate() + +Returns the time (timestamp-since-epoch in seconds) that the peer certificate expires and should not +be considered valid after. Returns ``0`` if there is no peer certificate. + +In Lua, we usually use ``os.time(os.date("!*t"))`` to get current timestamp-since-epoch in seconds. + +sessionId() +^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:sessionId() + +Returns the hex-encoded TLS session ID as defined in RFC 5246. + +ciphersuiteId() +^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:ciphersuiteId() + +Returns the standard ID (hex-encoded) for the ciphers used in the established TLS connection. +Returns ``"0xffff"`` if there is no current negotiated ciphersuite. + +ciphersuiteString() +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:ciphersuiteString() + +Returns the OpenSSL name for the set of ciphers used in the established TLS connection. Returns +``""`` if there is no current negotiated ciphersuite. + +tlsVersion() +^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:urlEncodedPemEncodedPeerCertificateChain() - Currently the SSL connection object has no exposed APIs. +Returns the TLS version (e.g., TLSv1.2, TLSv1.3) used in the established TLS connection. diff --git a/docs/root/configuration/http/http_filters/on_demand_updates_filter.rst b/docs/root/configuration/http/http_filters/on_demand_updates_filter.rst index ee39fa06aa5f3..d856d3e7597c4 100644 --- a/docs/root/configuration/http/http_filters/on_demand_updates_filter.rst +++ b/docs/root/configuration/http/http_filters/on_demand_updates_filter.rst @@ -3,16 +3,16 @@ On-demand VHDS Updates ====================== -The on-demand VHDS filter is used to request a :ref:`virtual host ` -data if it's not already present in the :ref:`Route Configuration `. The +The on-demand VHDS filter is used to request a :ref:`virtual host ` +data if it's not already present in the :ref:`Route Configuration `. The contents of the *Host* or *:authority* header is used to create the on-demand request. For an on-demand -request to be created, :ref:`VHDS ` must be enabled and either *Host* +request to be created, :ref:`VHDS ` must be enabled and either *Host* or *:authority* header be present. On-demand VHDS cannot be used with SRDS at this point. Configuration ------------- -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.on_demand*. * The filter should be placed before *envoy.filters.http.router* filter in the HttpConnectionManager's filter chain. diff --git a/docs/root/configuration/http/http_filters/original_src_filter.rst b/docs/root/configuration/http/http_filters/original_src_filter.rst index 0dc13c6506065..2103d7e5e97de 100644 --- a/docs/root/configuration/http/http_filters/original_src_filter.rst +++ b/docs/root/configuration/http/http_filters/original_src_filter.rst @@ -3,7 +3,7 @@ Original Source =============== -* :ref:`HTTP filter v2 API reference ` +* :ref:`HTTP filter v3 API reference ` * This filter should be configured with the name *envoy.filters.http.original_src*. The original source http filter replicates the downstream remote address of the connection on @@ -32,10 +32,10 @@ to forcefully route any traffic whose IP was replicated by Envoy back through th If Envoy and the upstream are on the same host -- e.g. in an sidecar deployment --, then iptables and routing rules can be used to ensure correct behaviour. The filter has an unsigned integer configuration, -:ref:`mark `. Setting +:ref:`mark `. Setting this to *X* causes Envoy to *mark* all upstream packets originating from this http with value *X*. Note that if -:ref:`mark ` is set +:ref:`mark ` is set to 0, Envoy will not mark upstream packets. We can use the following set of commands to ensure that all ipv4 and ipv6 traffic marked with *X* @@ -66,7 +66,7 @@ The following example configures Envoy to use the original source for all connec http_filters: - name: envoy.filters.http.original_src typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.original_src.v2alpha1.OriginalSrc + "@type": type.googleapis.com/envoy.extensions.filters.listener.original_src.v3.OriginalSrc mark: 123 - name: envoy.filters.http.router typed_config: {} diff --git a/docs/root/configuration/http/http_filters/rate_limit_filter.rst b/docs/root/configuration/http/http_filters/rate_limit_filter.rst index e76dda3d6f02c..91ce997c72cde 100644 --- a/docs/root/configuration/http/http_filters/rate_limit_filter.rst +++ b/docs/root/configuration/http/http_filters/rate_limit_filter.rst @@ -4,19 +4,19 @@ Rate limit ========== * Global rate limiting :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.ratelimit*. The HTTP rate limit filter will call the rate limit service when the request's route or virtual host -has one or more :ref:`rate limit configurations` -that match the filter stage setting. The :ref:`route` +has one or more :ref:`rate limit configurations` +that match the filter stage setting. The :ref:`route` can optionally include the virtual host rate limit configurations. More than one configuration can apply to a request. Each configuration results in a descriptor being sent to the rate limit service. If the rate limit service is called, and the response for any of the descriptors is over limit, a 429 response is returned. The rate limit filter also sets the :ref:`x-envoy-ratelimited` header. -If there is an error in calling rate limit service or rate limit service returns an error and :ref:`failure_mode_deny ` is +If there is an error in calling rate limit service or rate limit service returns an error and :ref:`failure_mode_deny ` is set to true, a 500 response is returned. .. _config_http_filters_rate_limit_composing_actions: @@ -24,7 +24,7 @@ set to true, a 500 response is returned. Composing Actions ----------------- -Each :ref:`rate limit action ` on the route or +Each :ref:`rate limit action ` on the route or virtual host populates a descriptor entry. A vector of descriptor entries compose a descriptor. To create more complex rate limit descriptors, actions can be composed in any order. The descriptor will be populated in the order the actions are specified in the configuration. @@ -75,6 +75,50 @@ the following descriptor is generated: ("remote_address", "") ("source_cluster", "from_cluster") +.. _config_http_filters_rate_limit_rate_limit_override: + +Rate Limit Override +------------------- + +A :ref:`rate limit action ` can optionally contain +a :ref:`limit override `. The limit value +will be appended to the descriptor produced by the action and sent to the ratelimit service, +overriding the static service configuration. + +The override can be configured to be taken from the :ref:`Dynamic Metadata +` under a specified :ref: `key +`. If the value is misconfigured +or key does not exist, the override configuration is ignored. + +Example 3 +^^^^^^^^^ + +The following configuration + +.. code-block:: yaml + + actions: + - {generic_key: {descriptor_value: some_value}} + limit: + metadata_key: + key: test.filter.key + path: + - key: test + +.. _config_http_filters_rate_limit_override_dynamic_metadata: + +Will lookup the value of the dynamic metadata. The value must be a structure with integer field +"requests_per_unit" and a string field "unit" which is parseable to :ref:`RateLimitUnit enum +`. For example, with the following dynamic metadata +the rate limit override of 42 requests per hour will be appended to the rate limit descriptor. + +.. code-block:: yaml + + test.filter.key: + test: + requests_per_unit: 42 + unit: HOUR + Statistics ---------- @@ -90,7 +134,7 @@ The rate limit filter outputs statistics in the *cluster.. error, Counter, Total errors contacting the rate limit service over_limit, Counter, total over limit responses from the rate limit service failure_mode_allowed, Counter, "Total requests that were error(s) but were allowed through because - of :ref:`failure_mode_deny ` set to false." + of :ref:`failure_mode_deny ` set to false." Runtime ------- @@ -106,4 +150,4 @@ ratelimit.http_filter_enforcing ratelimit..http_filter_enabled % of requests that will call the rate limit service for a given *route_key* specified in the - :ref:`rate limit configuration `. Defaults to 100. + :ref:`rate limit configuration `. Defaults to 100. diff --git a/docs/root/configuration/http/http_filters/rbac_filter.rst b/docs/root/configuration/http/http_filters/rbac_filter.rst index a905b2a6c99fa..5db112d924eff 100644 --- a/docs/root/configuration/http/http_filters/rbac_filter.rst +++ b/docs/root/configuration/http/http_filters/rbac_filter.rst @@ -11,21 +11,21 @@ as well as the incoming request's HTTP headers. This filter also supports policy and shadow mode, shadow mode won't effect real users, it is used to test that a new set of policies work before rolling out to production. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.rbac*. Per-Route Configuration ----------------------- The RBAC filter configuration can be overridden or disabled on a per-route basis by providing a -:ref:`RBACPerRoute ` configuration on +:ref:`RBACPerRoute ` configuration on the virtual host, route, or weighted cluster. Statistics ---------- The RBAC filter outputs statistics in the *http..rbac.* namespace. The :ref:`stat prefix -` comes from the +` comes from the owning HTTP connection manager. .. csv-table:: @@ -36,6 +36,8 @@ owning HTTP connection manager. denied, Counter, Total requests that were denied access shadow_allowed, Counter, Total requests that would be allowed access by the filter's shadow rules shadow_denied, Counter, Total requests that would be denied access by the filter's shadow rules + logged, Counter, Total requests that should be logged + not_logged, Counter, Total requests that should not be logged .. _config_http_filters_rbac_dynamic_metadata: @@ -50,3 +52,4 @@ The RBAC filter emits the following dynamic metadata. shadow_effective_policy_id, string, The effective shadow policy ID matching the action (if any). shadow_engine_result, string, The engine result for the shadow rules (i.e. either `allowed` or `denied`). + access_log_hint, boolean, Whether the request should be logged. This metadata is shared and set under the key namespace 'envoy.common' (See :ref:`Shared Dynamic Metadata`). diff --git a/docs/root/configuration/http/http_filters/router_filter.rst b/docs/root/configuration/http/http_filters/router_filter.rst index 6575e0ed23362..4ca285e9eda75 100644 --- a/docs/root/configuration/http/http_filters/router_filter.rst +++ b/docs/root/configuration/http/http_filters/router_filter.rst @@ -5,16 +5,16 @@ Router The router filter implements HTTP forwarding. It will be used in almost all HTTP proxy scenarios that Envoy is deployed for. The filter's main job is to follow the instructions specified in the -configured :ref:`route table `. In addition to forwarding and +configured :ref:`route table `. In addition to forwarding and redirection, the filter also handles retry, statistics, etc. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.router*. .. _config_http_filters_router_headers_consumed: -HTTP headers (consumed) ------------------------ +HTTP headers (consumed from downstreams) +---------------------------------------- The router consumes and sets various HTTP headers both on the egress/request path as well as on the ingress/response path. They are documented in this section. @@ -26,8 +26,8 @@ ingress/response path. They are documented in this section. x-envoy-max-retries ^^^^^^^^^^^^^^^^^^^ -If a :ref:`route config retry policy ` or a -:ref:`virtual host retry policy ` is in place, Envoy will default to retrying +If a :ref:`route config retry policy ` or a +:ref:`virtual host retry policy ` is in place, Envoy will default to retrying one time unless explicitly specified. The number of retries can be explicitly set in the virtual host retry config, the route retry config, or by using this header. If this header is used, its value takes precedence over the number of retries set in either retry policy. If a retry policy is not configured and :ref:`config_http_filters_router_x-envoy-retry-on` @@ -36,9 +36,9 @@ or :ref:`config_http_filters_router_x-envoy-retry-grpc-on` headers are not speci A few notes on how Envoy does retries: * The route timeout (set via :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` or the - :ref:`timeout ` in route configuration or set via + :ref:`timeout ` in route configuration or set via `grpc-timeout header `_ by specifying - :ref:`max_grpc_timeout ` in route configuration) **includes** all + :ref:`max_grpc_timeout ` in route configuration) **includes** all retries. Thus if the request timeout is set to 3s, and the first request attempt takes 2.7s, the retry (including back-off) has .3s to complete. This is by design to avoid an exponential retry/timeout explosion. @@ -50,19 +50,17 @@ A few notes on how Envoy does retries: The default base interval (and therefore the maximum interval) can be manipulated by setting the upstream.base_retry_backoff_ms runtime parameter. The back-off intervals can also be modified by configuring the retry policy's - :ref:`retry back-off `. -* If max retries is set both by header as well as in the route configuration, the maximum value is - taken when determining the max retries to use for the request. + :ref:`retry back-off `. .. _config_http_filters_router_x-envoy-retry-on: x-envoy-retry-on ^^^^^^^^^^^^^^^^ -Setting this header on egress requests will cause Envoy to attempt to retry failed requests (number +Setting this header will cause Envoy to attempt to retry failed requests (number of retries defaults to 1 and can be controlled by :ref:`x-envoy-max-retries ` header or the :ref:`route config retry policy -` or the :ref:`virtual host retry policy `). +` or the :ref:`virtual host retry policy `). The value to which the x-envoy-retry-on header is set indicates the retry policy. One or more policies can be specified using a ',' delimited list. The supported policies are: @@ -91,8 +89,14 @@ connect-failure * **NOTE:** A connection failure/timeout is a the TCP level, not the request level. This does not include upstream request timeouts specified via :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` or via :ref:`route - configuration ` or via - :ref:`virtual host retry policy `. + configuration ` or via + :ref:`virtual host retry policy `. + +.. _config_http_filters_router_retry_policy-envoy-ratelimited: + +envoy-ratelimited + Envoy will retry if the header :ref:`x-envoy-ratelimited` + is present. retriable-4xx Envoy will attempt a retry if the upstream server responds with a retriable 4xx response code. @@ -109,22 +113,22 @@ refused-stream retriable-status-codes Envoy will attempt a retry if the upstream server responds with any response code matching one defined - in either :ref:`the retry policy ` + in either :ref:`the retry policy ` or in the :ref:`config_http_filters_router_x-envoy-retriable-status-codes` header. retriable-headers Envoy will attempt a retry if the upstream server response includes any headers matching in either - :ref:`the retry policy ` or in the + :ref:`the retry policy ` or in the :ref:`config_http_filters_router_x-envoy-retriable-header-names` header. The number of retries can be controlled via the :ref:`config_http_filters_router_x-envoy-max-retries` header or via the :ref:`route -configuration ` or via the -:ref:`virtual host retry policy `. +configuration ` or via the +:ref:`virtual host retry policy `. Note that retry policies can also be applied at the :ref:`route level -` or the -:ref:`virtual host level `. +` or the +:ref:`virtual host level `. By default, Envoy will *not* perform retries unless you've configured them per above. @@ -132,11 +136,10 @@ By default, Envoy will *not* perform retries unless you've configured them per a x-envoy-retry-grpc-on ^^^^^^^^^^^^^^^^^^^^^ -Setting this header on egress requests will cause Envoy to attempt to retry failed requests (number of -retries defaults to 1, and can be controlled by -:ref:`x-envoy-max-retries ` -header or the :ref:`route config retry policy `) or the -:ref:`virtual host retry policy `. +Setting this header will cause Envoy to attempt to retry failed requests (number of retries defaults +to 1, and can be controlled by :ref:`x-envoy-max-retries ` +header or the :ref:`route config retry policy `) or the +:ref:`virtual host retry policy `. gRPC retries are currently only supported for gRPC status codes in response headers. gRPC status codes in trailers will not trigger retry logic. One or more policies can be specified using a ',' delimited list. The supported policies are: @@ -160,8 +163,8 @@ As with the x-envoy-retry-grpc-on header, the number of retries can be controlle :ref:`config_http_filters_router_x-envoy-max-retries` header Note that retry policies can also be applied at the :ref:`route level -` or the -:ref:`virtual host level `. +` or the +:ref:`virtual host level `. By default, Envoy will *not* perform retries unless you've configured them per above. @@ -180,7 +183,7 @@ is enabled. Header names are case-insensitive. Only the names of retriable response headers can be specified via the request header. A more sophisticated retry policy based on the response headers can be specified by using arbitrary header matching rules -via :ref:`retry policy configuration `. +via :ref:`retry policy configuration `. This header will only be honored for requests from internal clients. @@ -203,38 +206,32 @@ This header will only be honored for requests from internal clients. x-envoy-upstream-alt-stat-name ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Setting this header on egress requests will cause Envoy to emit upstream response code/timing -statistics to a dual stat tree. This can be useful for application level categories that Envoy -doesn't know about. The output tree is documented :ref:`here `. +Setting this header will cause Envoy to emit upstream response code/timing statistics to a dual stat tree. +This can be useful for application level categories that Envoy doesn't know about. The output tree +is documented :ref:`here `. -This should not be confused with :ref:`alt_stat_name ` which +This should not be confused with :ref:`alt_stat_name ` which is specified while defining the cluster and when provided specifies an alternative name for the cluster at the root of the statistic tree. -x-envoy-upstream-canary -^^^^^^^^^^^^^^^^^^^^^^^ - -If an upstream host sets this header, the router will use it to generate canary specific statistics. -The output tree is documented :ref:`here `. - .. _config_http_filters_router_x-envoy-upstream-rq-timeout-alt-response: x-envoy-upstream-rq-timeout-alt-response ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Setting this header on egress requests will cause Envoy to set a 204 response code (instead of 504) -in the event of a request timeout. The actual value of the header is ignored; only its presence -is considered. See also :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`. +Setting this header will cause Envoy to set a 204 response code (instead of 504) in the event of a request timeout. +The actual value of the header is ignored; only its presence is considered. See also +:ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`. .. _config_http_filters_router_x-envoy-upstream-rq-timeout-ms: x-envoy-upstream-rq-timeout-ms ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Setting this header on egress requests will cause Envoy to override the :ref:`route configuration timeout -` or gRPC client timeout set via `grpc-timeout header +Setting this header will cause Envoy to override the :ref:`route configuration timeout +` or gRPC client timeout set via `grpc-timeout header `_ by specifying :ref:`max_grpc_timeout -`. The timeout must be specified in millisecond +`. The timeout must be specified in millisecond units. See also :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`. .. _config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms: @@ -242,8 +239,8 @@ units. See also :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-tim x-envoy-upstream-rq-per-try-timeout-ms ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Setting this header on egress requests will cause Envoy to set a *per try* timeout on routed -requests. If a global route timeout is configured, this timeout must be less than the global route +Setting this header will cause Envoy to set a *per try* timeout on routed requests. +If a global route timeout is configured, this timeout must be less than the global route timeout (see :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`) or it is ignored. This allows a caller to set a tight per try timeout to allow for retries while maintaining a reasonable overall timeout. This timeout only applies before any part of the response is sent to @@ -252,15 +249,37 @@ the downstream, which normally happens after the upstream has sent response head x-envoy-hedge-on-per-try-timeout ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Setting this header on egress requests will cause Envoy to use a request -hedging strategy in the case of a per try timeout. This overrides the value set -in the :ref:`route configuration -`. This means that a retry +Setting this header will cause Envoy to use a request hedging strategy in the case of a per try timeout. +This overrides the value set in the :ref:`route configuration +`. This means that a retry will be issued without resetting the original request, leaving multiple upstream requests in flight. The value of the header should be "true" or "false", and is ignored if invalid. +.. _config_http_filters_router_x-envoy-decorator-operation: + +x-envoy-decorator-operation +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The value of this header will override any locally defined operation (span) name on the +server span generated by the tracing mechanism. + +HTTP response headers consumed from upstream +-------------------------------------------- + +x-envoy-decorator-operation +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The value of this header will override any locally defined operation (span) name on the +client span generated by the tracing mechanism. + +x-envoy-upstream-canary +^^^^^^^^^^^^^^^^^^^^^^^ + +If an upstream host sets this header, the router will use it to generate canary specific statistics. +The output tree is documented :ref:`here `. + .. _config_http_filters_router_x-envoy-immediate-health-check-fail: x-envoy-immediate-health-check-fail @@ -274,37 +293,21 @@ for the next health check interval. The host can become healthy again via standa checks. See the :ref:`health checking overview ` for more information. -.. _config_http_filters_router_x-envoy-overloaded_consumed: - -x-envoy-overloaded -^^^^^^^^^^^^^^^^^^ - -If this header is set by upstream, Envoy will not retry. Currently the value of the header is not -looked at, only its presence. - .. _config_http_filters_router_x-envoy-ratelimited: x-envoy-ratelimited ^^^^^^^^^^^^^^^^^^^ -If this header is set by upstream, Envoy will not retry. Currently the value of the header is not -looked at, only its presence. This header is set by :ref:`rate limit filter` -when the request is rate limited. - -.. _config_http_filters_router_x-envoy-decorator-operation: - -x-envoy-decorator-operation -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If this header is present on ingress requests, its value will override any locally defined -operation (span) name on the server span generated by the tracing mechanism. Similarly, if -this header is present on an egress response, its value will override any locally defined -operation (span) name on the client span. +If this header is set by upstream, Envoy will not retry unless the retry policy +:ref:`envoy-ratelimited` +is enabled. Currently, the value of the header is not looked at, only its +presence. This header is set by :ref:`rate limit +filter` when the request is rate limited. .. _config_http_filters_router_headers_set: -HTTP headers (set) ------------------- +HTTP request headers set on upstream calls +------------------------------------------ The router sets various HTTP headers both on the egress/request path as well as on the ingress/response path. They are documented in this section. @@ -319,13 +322,13 @@ x-envoy-attempt-count Sent to the upstream to indicate which attempt the current request is in a series of retries. The value will be "1" on the initial request, incrementing by one for each retry. Only set if the -:ref:`include_request_attempt_count ` +:ref:`include_request_attempt_count ` flag is set to true. Sent to the downstream to indicate how many upstream requests took place. The header will be absent if the router did not send any upstream requests. The value will be "1" if only the original upstream request was sent, incrementing by one for each retry. Only set if the -:ref:`include_attempt_count_in_response ` +:ref:`include_attempt_count_in_response ` flag is set to true. .. _config_http_filters_router_x-envoy-expected-rq-timeout-ms: @@ -337,25 +340,30 @@ This is the time in milliseconds the router expects the request to be completed. header so that the upstream host receiving the request can make decisions based on the request timeout, e.g., early exit. This is set on internal requests and is either taken from the :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` header or the :ref:`route timeout -`, in that order. - -x-envoy-upstream-service-time -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Contains the time in milliseconds spent by the upstream host processing the request. This is useful -if the client wants to determine service time compared to network latency. This header is set on -responses. +`, in that order. .. _config_http_filters_router_x-envoy-original-path: x-envoy-original-path ^^^^^^^^^^^^^^^^^^^^^ -If the route utilizes :ref:`prefix_rewrite ` -or :ref:`regex_rewrite `, +If the route utilizes :ref:`prefix_rewrite ` +or :ref:`regex_rewrite `, Envoy will put the original path header in this header. This can be useful for logging and debugging. +HTTP response headers set on downstream responses +------------------------------------------------- + +.. _config_http_filters_router_x-envoy-upstream-service-time: + +x-envoy-upstream-service-time +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Contains the time in milliseconds spent by the upstream host processing the request and the network +latency between Envoy and upstream host. This is useful if the client wants to determine service time +compared to network latency between client and Envoy. This header is set on responses. + .. _config_http_filters_router_x-envoy-overloaded_set: x-envoy-overloaded @@ -375,7 +383,7 @@ The router outputs many statistics in the cluster namespace (depending on the cl the chosen route). See :ref:`here ` for more information. The router filter outputs statistics in the *http..* namespace. The :ref:`stat prefix -` comes from the +` comes from the owning HTTP connection manager. .. csv-table:: @@ -406,7 +414,7 @@ statistics: upstream_rq_<\*>, Counter, "Specific HTTP response codes (e.g., 201, 302, etc.)" upstream_rq_retry, Counter, Total request retries upstream_rq_retry_limit_exceeded, Counter, Total requests not retried due to exceeding :ref:`the configured number of maximum retries ` - upstream_rq_retry_overflow, Counter, Total requests not retried due to circuit breaking or exceeding the :ref:`retry budgets ` + upstream_rq_retry_overflow, Counter, Total requests not retried due to circuit breaking or exceeding the :ref:`retry budgets ` upstream_rq_retry_success, Counter, Total request retry successes upstream_rq_time, Histogram, Request time milliseconds upstream_rq_timeout, Counter, Total requests that timed out waiting for a response diff --git a/docs/root/configuration/http/http_filters/squash_filter.rst b/docs/root/configuration/http/http_filters/squash_filter.rst index 006d7612417f6..494f05f03b432 100644 --- a/docs/root/configuration/http/http_filters/squash_filter.rst +++ b/docs/root/configuration/http/http_filters/squash_filter.rst @@ -20,7 +20,7 @@ request, before the request arrive to the application code, without any changes Configuration ------------- -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.squash*. How it works diff --git a/docs/root/configuration/http/http_filters/tap_filter.rst b/docs/root/configuration/http/http_filters/tap_filter.rst index 5bcae0a77659b..f5ed7c7a32ea7 100644 --- a/docs/root/configuration/http/http_filters/tap_filter.rst +++ b/docs/root/configuration/http/http_filters/tap_filter.rst @@ -3,7 +3,7 @@ Tap === -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.tap*. .. attention:: @@ -15,9 +15,9 @@ Tap The HTTP tap filter is used to interpose on and record HTTP traffic. At a high level, the configuration is composed of two pieces: -1. :ref:`Match configuration `: a list of +1. :ref:`Match configuration `: a list of conditions under which the filter will match an HTTP request and begin a tap session. -2. :ref:`Output configuration `: a list of output +2. :ref:`Output configuration `: a list of output sinks that the filter will write the matched and tapped data to. Each of these concepts will be covered incrementally over the course of several example @@ -32,7 +32,7 @@ Example filter configuration: name: envoy.filters.http.tap typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.tap.v2alpha.Tap + "@type": type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap common_config: admin_config: config_id: test_config_id @@ -46,15 +46,15 @@ Admin handler ------------- When the HTTP filter specifies an :ref:`admin_config -`, it is configured for admin control and +`, it is configured for admin control and the :http:post:`/tap` admin handler will be installed. The admin handler can be used for live tapping and debugging of HTTP traffic. It works as follows: 1. A POST request is used to provide a valid tap configuration. The POST request body can be either the JSON or YAML representation of the :ref:`TapConfig - ` message. + ` message. 2. If the POST request is accepted, Envoy will stream :ref:`HttpBufferedTrace - ` messages (serialized to JSON) until the admin + ` messages (serialized to JSON) until the admin request is terminated. An example POST body: @@ -122,20 +122,57 @@ Another example POST body: The preceding configuration instructs the tap filter to match any HTTP requests. All requests will be tapped and streamed out the admin endpoint. +Another example POST body: + +.. code-block:: yaml + + config_id: test_config_id + tap_config: + match_config: + and_match: + rules: + - http_request_headers_match: + headers: + - name: foo + exact_match: bar + - http_request_generic_body_match: + patterns: + - string_match: test + - binary_match: 3q2+7w== + bytes_limit: 128 + - http_response_generic_body_match: + patterns: + - binary_match: vu8= + bytes_limit: 64 + output_config: + sinks: + - streaming_admin: {} + +The preceding configuration instructs the tap filter to match any HTTP requests in which a request +header ``foo: bar`` is present AND request body contains string ``test`` and hex bytes ``deadbeef`` (``3q2+7w==`` in base64 format) +in the first 128 bytes AND response body contains hex bytes ``beef`` (``vu8=`` in base64 format) in the first 64 bytes. If all of these +conditions are met, the request will be tapped and streamed out to the admin endpoint. + +.. attention:: + + Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. + If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified + to scan only part of the http body. + Output format ------------- Each output sink has an associated :ref:`format -`. The default format is +`. The default format is :ref:`JSON_BODY_AS_BYTES -`. This format is +`. This format is easy to read JSON, but has the downside that body data is base64 encoded. In the case that the tap is known to be on human readable data, the :ref:`JSON_BODY_AS_STRING -` format may be +` format may be more user friendly. See the reference documentation for more information on other available formats. An example of a streaming admin tap configuration that uses the :ref:`JSON_BODY_AS_STRING -` format: +` format: .. code-block:: yaml @@ -154,9 +191,9 @@ Buffered body limits For buffered taps, Envoy will limit the amount of body data that is tapped to avoid OOM situations. The default limit is 1KiB for both received (request) and transmitted (response) data. This is configurable via the :ref:`max_buffered_rx_bytes -` and +` and :ref:`max_buffered_tx_bytes -` settings. +` settings. .. _config_http_filters_tap_streaming: @@ -169,18 +206,18 @@ first the request headers will be matched, then the request body if present, the trailers if present, then the response headers if present, etc. The filter additionally supports optional streamed output which is governed by the :ref:`streaming -` setting. If this setting is false +` setting. If this setting is false (the default), Envoy will emit :ref:`fully buffered traces -`. Users are likely to find this format easier +`. Users are likely to find this format easier to interact with for simple cases. In cases where fully buffered traces are not practical (e.g., very large request and responses, long lived streaming APIs, etc.), the streaming setting can be set to true, and Envoy will emit -multiple :ref:`streamed trace segments ` for +multiple :ref:`streamed trace segments ` for each tap. In this case, it is required that post-processing is performed to stitch all of the trace segments back together into a usable form. Also note that binary protobuf is not a self-delimiting format. If binary protobuf output is desired, the :ref:`PROTO_BINARY_LENGTH_DELIMITED -` output +` output format should be used. An static filter configuration to enable streaming output looks like: @@ -189,7 +226,7 @@ An static filter configuration to enable streaming output looks like: name: envoy.filters.http.tap typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.tap.v2alpha.Tap + "@type": type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap common_config: static_config: match_config: @@ -232,7 +269,7 @@ Statistics ---------- The tap filter outputs statistics in the *http..tap.* namespace. The :ref:`stat prefix -` +` comes from the owning HTTP connection manager. .. csv-table:: diff --git a/docs/root/configuration/listeners/lds.rst b/docs/root/configuration/listeners/lds.rst index 94511a1cb2213..a54c9ab89b0f6 100644 --- a/docs/root/configuration/listeners/lds.rst +++ b/docs/root/configuration/listeners/lds.rst @@ -9,7 +9,7 @@ depending on what is required. The semantics of listener updates are as follows: -* Every listener must have a unique :ref:`name `. If a name is not +* Every listener must have a unique :ref:`name `. If a name is not provided, Envoy will create a UUID. Listeners that are to be dynamically updated should have a unique name supplied by the management server. * When a listener is added, it will be "warmed" before taking traffic. For example, if the listener @@ -31,9 +31,9 @@ The semantics of listener updates are as follows: Configuration ------------- -* :ref:`v2 LDS API ` +* :ref:`v3 LDS API ` Statistics ---------- -LDS has a :ref:`statistics ` tree rooted at *listener_manager.lds.* \ No newline at end of file +LDS has a :ref:`statistics ` tree rooted at *listener_manager.lds.* diff --git a/docs/root/configuration/listeners/listener_filters/http_inspector.rst b/docs/root/configuration/listeners/listener_filters/http_inspector.rst index 7fc1b620414e8..0c744531c9141 100644 --- a/docs/root/configuration/listeners/listener_filters/http_inspector.rst +++ b/docs/root/configuration/listeners/listener_filters/http_inspector.rst @@ -5,10 +5,10 @@ HTTP Inspector HTTP Inspector listener filter allows detecting whether the application protocol appears to be HTTP, and if it is HTTP, it detects the HTTP protocol (HTTP/1.x or HTTP/2) further. This can be used to select a -:ref:`FilterChain ` via the :ref:`application_protocols ` -of a :ref:`FilterChainMatch `. +:ref:`FilterChain ` via the :ref:`application_protocols ` +of a :ref:`FilterChainMatch `. -* :ref:`Listener filter v2 API reference ` +* :ref:`Listener filter v3 API reference ` * This filter should be configured with the name *envoy.filters.listener.http_inspector*. Example diff --git a/docs/root/configuration/listeners/listener_filters/original_dst_filter.rst b/docs/root/configuration/listeners/listener_filters/original_dst_filter.rst index fba1d5cb48178..5d764068a518d 100644 --- a/docs/root/configuration/listeners/listener_filters/original_dst_filter.rst +++ b/docs/root/configuration/listeners/listener_filters/original_dst_filter.rst @@ -5,11 +5,11 @@ Original Destination Original destination listener filter reads the SO_ORIGINAL_DST socket option set when a connection has been redirected by an iptables REDIRECT target, or by an iptables TPROXY target in combination -with setting the listener's :ref:`transparent ` option. +with setting the listener's :ref:`transparent ` option. Later processing in Envoy sees the restored destination address as the connection's local address, rather than the address at which the listener is listening at. Furthermore, :ref:`an original destination cluster ` may be used to forward HTTP requests or TCP connections to the restored destination address. -* :ref:`v2 API reference ` +* :ref:`v2 API reference ` * This filter should be configured with the name *envoy.filters.listener.original_dst*. diff --git a/docs/root/configuration/listeners/listener_filters/original_src_filter.rst b/docs/root/configuration/listeners/listener_filters/original_src_filter.rst index 72f98dd97e187..8aa4a679678d6 100644 --- a/docs/root/configuration/listeners/listener_filters/original_src_filter.rst +++ b/docs/root/configuration/listeners/listener_filters/original_src_filter.rst @@ -3,7 +3,7 @@ Original Source =============== -* :ref:`Listener filter v2 API reference ` +* :ref:`Listener filter v3 API reference ` * This filter should be configured with the name *envoy.filters.listener.original_src*. The original source listener filter replicates the downstream remote address of the connection on @@ -33,10 +33,10 @@ to forcefully route any traffic whose IP was replicated by Envoy back through th If Envoy and the upstream are on the same host -- e.g. in an sidecar deployment --, then iptables and routing rules can be used to ensure correct behaviour. The filter has an unsigned integer configuration, -:ref:`mark `. Setting +:ref:`mark `. Setting this to *X* causes Envoy to *mark* all upstream packets originating from this listener with value *X*. Note that if -:ref:`mark ` is set +:ref:`mark ` is set to 0, Envoy will not mark upstream packets. We can use the following set of commands to ensure that all ipv4 and ipv6 traffic marked with *X* @@ -72,7 +72,9 @@ marked with 123. port_value: 8888 listener_filters: - name: envoy.filters.listener.proxy_protocol + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.listener.proxy_protocol.v3.ProxyProtocol - name: envoy.filters.listener.original_src typed_config: - "@type": type.googleapis.com/envoy.config.filter.listener.original_src.v2alpha1.OriginalSrc + "@type": type.googleapis.com/envoy.extensions.filters.listener.original_src.v3.OriginalSrc mark: 123 diff --git a/docs/root/configuration/listeners/listener_filters/proxy_protocol.rst b/docs/root/configuration/listeners/listener_filters/proxy_protocol.rst index e679607debb7a..4848c364308f6 100644 --- a/docs/root/configuration/listeners/listener_filters/proxy_protocol.rst +++ b/docs/root/configuration/listeners/listener_filters/proxy_protocol.rst @@ -11,8 +11,8 @@ which places the original coordinates (IP, PORT) into a connection-string. Envoy then extracts these and uses them as the remote address. In Proxy Protocol v2 there exists the concept of extensions (TLV) -tags that are optional. This implementation skips over these without -using them. +tags that are optional. If the type of the TLV is added to the filter's configuration, +the TLV will be emitted as dynamic metadata with user-specified key. This implementation supports both version 1 and version 2, it automatically determines on a per-connection basis which of the two @@ -23,7 +23,7 @@ the standard does not allow parsing to determine if it is present or not. If there is a protocol error or an unsupported address family (e.g. AF_UNIX) the connection will be closed and an error thrown. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.listener.proxy_protocol*. Statistics @@ -35,4 +35,4 @@ This filter emits the following statistics: :header: Name, Type, Description :widths: 1, 1, 2 - downstream_cx_proxy_proto_error, Counter, Total proxy protocol errors + downstream_cx_proxy_proto_error, Counter, Total proxy protocol errors \ No newline at end of file diff --git a/docs/root/configuration/listeners/listener_filters/tls_inspector.rst b/docs/root/configuration/listeners/listener_filters/tls_inspector.rst index 5aba1cea0b2c1..e9897435e8809 100644 --- a/docs/root/configuration/listeners/listener_filters/tls_inspector.rst +++ b/docs/root/configuration/listeners/listener_filters/tls_inspector.rst @@ -9,13 +9,13 @@ TLS or plaintext, and if it is TLS, it detects the and/or `Application-Layer Protocol Negotiation `_ from the client. This can be used to select a -:ref:`FilterChain ` via the -:ref:`server_names ` and/or -:ref:`application_protocols ` -of a :ref:`FilterChainMatch `. +:ref:`FilterChain ` via the +:ref:`server_names ` and/or +:ref:`application_protocols ` +of a :ref:`FilterChainMatch `. * :ref:`SNI ` -* :ref:`v2 API reference ` +* :ref:`v2 API reference ` * This filter should be configured with the name *envoy.filters.listener.tls_inspector*. Example @@ -26,8 +26,8 @@ A sample filter configuration could be: .. code-block:: yaml listener_filters: - - name: "envoy.filters.listener.tls_inspector" - typed_config: {} + - name: "envoy.filters.listener.tls_inspector" + typed_config: {} Statistics ---------- diff --git a/docs/root/configuration/listeners/listeners.rst b/docs/root/configuration/listeners/listeners.rst index 9b3e2161ef0c8..5e4cc6b22c5e4 100644 --- a/docs/root/configuration/listeners/listeners.rst +++ b/docs/root/configuration/listeners/listeners.rst @@ -8,6 +8,7 @@ Listeners overview stats + runtime listener_filters/listener_filters network_filters/network_filters udp_filters/udp_filters diff --git a/docs/root/configuration/listeners/network_filters/client_ssl_auth_filter.rst b/docs/root/configuration/listeners/network_filters/client_ssl_auth_filter.rst index d42a235953afd..d2243f21cc44a 100644 --- a/docs/root/configuration/listeners/network_filters/client_ssl_auth_filter.rst +++ b/docs/root/configuration/listeners/network_filters/client_ssl_auth_filter.rst @@ -4,7 +4,7 @@ Client TLS authentication ========================= * Client TLS authentication filter :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.network.client_ssl_auth*. .. _config_network_filters_client_ssl_auth_stats: @@ -22,7 +22,7 @@ Every configured client TLS authentication filter has statistics rooted at update_success, Counter, Total principal update successes update_failure, Counter, Total principal update failures auth_no_ssl, Counter, Total connections ignored due to no TLS - auth_ip_white_list, Counter, Total connections allowed due to the IP white list + auth_ip_allowlist, Counter, Total connections allowed due to the IP allowlist auth_digest_match, Counter, Total connections allowed due to certificate match auth_digest_no_match, Counter, Total connections denied due to no certificate match total_principals, Gauge, Total loaded principals diff --git a/docs/root/configuration/listeners/network_filters/direct_response_filter.rst b/docs/root/configuration/listeners/network_filters/direct_response_filter.rst index de8ae26e3a8ae..c8d4750123f4f 100644 --- a/docs/root/configuration/listeners/network_filters/direct_response_filter.rst +++ b/docs/root/configuration/listeners/network_filters/direct_response_filter.rst @@ -9,4 +9,4 @@ can be used, for example, as a terminal filter in filter chains to collect telemetry for blocked traffic. This filter should be configured with the name *envoy.filters.network.direct_response*. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` diff --git a/docs/root/configuration/listeners/network_filters/dubbo_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/dubbo_proxy_filter.rst index 28e730cd37e8d..fd8c449cc0e65 100644 --- a/docs/root/configuration/listeners/network_filters/dubbo_proxy_filter.rst +++ b/docs/root/configuration/listeners/network_filters/dubbo_proxy_filter.rst @@ -9,7 +9,7 @@ the metadata includes the basic request ID, request type, serialization type, and the required service name, method name, parameter name, and parameter value for routing. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.network.dubbo_proxy*. .. _config_network_filters_dubbo_proxy_stats: @@ -17,7 +17,7 @@ and parameter value for routing. Statistics ---------- -Every configured dubbo proxy filter has statistics rooted at *redis..* with the +Every configured dubbo proxy filter has statistics rooted at *dubbo..* with the following statistics: .. csv-table:: @@ -60,7 +60,7 @@ the second step is to add your configuration, configuration method refer to the - filters: - name: envoy.filters.network.dubbo_proxy typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.dubbo_proxy.v2alpha1.DubboProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.dubbo_proxy.v3.DubboProxy stat_prefix: dubbo_incomming_stats protocol_type: Dubbo serialization_type: Hessian2 @@ -80,4 +80,4 @@ the second step is to add your configuration, configuration method refer to the "@type": type.googleapis.com/google.protobuf.Struct value: name: test_service - - name: envoy.filters.dubbo.router \ No newline at end of file + - name: envoy.filters.dubbo.router diff --git a/docs/root/configuration/listeners/network_filters/echo_filter.rst b/docs/root/configuration/listeners/network_filters/echo_filter.rst index ff1fdfa70166c..7d9dc21e5fe7b 100644 --- a/docs/root/configuration/listeners/network_filters/echo_filter.rst +++ b/docs/root/configuration/listeners/network_filters/echo_filter.rst @@ -7,4 +7,4 @@ The echo is a trivial network filter mainly meant to demonstrate the network fil installed it will echo (write) all received data back to the connected downstream client. This filter should be configured with the name *envoy.filters.network.echo*. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` diff --git a/docs/root/configuration/listeners/network_filters/ext_authz_filter.rst b/docs/root/configuration/listeners/network_filters/ext_authz_filter.rst index 65c25788a3c79..83dffae8a7a18 100644 --- a/docs/root/configuration/listeners/network_filters/ext_authz_filter.rst +++ b/docs/root/configuration/listeners/network_filters/ext_authz_filter.rst @@ -4,7 +4,7 @@ External Authorization ====================== * External authorization :ref:`architecture overview ` -* :ref:`Network filter v2 API reference ` +* :ref:`Network filter v3 API reference ` * This filter should be configured with the name *envoy.filters.network.ext_authz*. The external authorization network filter calls an external authorization service to check if the @@ -16,12 +16,12 @@ then the connection will be closed. authorized prior to rest of the filters processing the request. The content of the request that are passed to an authorization service is specified by -:ref:`CheckRequest `. +:ref:`CheckRequest `. .. _config_network_filters_ext_authz_network_configuration: The network filter, gRPC service, can be configured as follows. You can see all the configuration -options at :ref:`Network filter `. +options at :ref:`Network filter `. Example ------- @@ -33,7 +33,7 @@ A sample filter configuration could be: filters: - name: envoy.filters.network.ext_authz typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.ext_authz.v2.ExtAuthz + "@type": type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz stat_prefix: ext_authz grpc_service: envoy_grpc: @@ -65,9 +65,19 @@ The network filter outputs statistics in the *config.ext_authz.* namespace. total, Counter, Total responses from the filter. error, Counter, Total errors contacting the external service. - denied, Counter, Total responses from the authorizations service that were to deny the traffic. + denied, Counter, Total responses from the authorizations service that were to deny the traffic. failure_mode_allowed, Counter, "Total requests that were error(s) but were allowed through because of failure_mode_allow set to true." ok, Counter, Total responses from the authorization service that were to allow the traffic. cx_closed, Counter, Total connections that were closed. active, Gauge, Total currently active requests in transit to the authorization service. + +Dynamic Metadata +---------------- +.. _config_network_filters_ext_authz_dynamic_metadata: + +The External Authorization filter emits dynamic metadata as an opaque ``google.protobuf.Struct`` +*only* when the gRPC authorization server returns an :ref:`OK +` :ref:`CheckResponse +` with a filled :ref:`dynamic_metadata +` field. diff --git a/docs/root/configuration/listeners/network_filters/kafka_broker_filter.rst b/docs/root/configuration/listeners/network_filters/kafka_broker_filter.rst index 8f7ef37427fee..4753f3845a78c 100644 --- a/docs/root/configuration/listeners/network_filters/kafka_broker_filter.rst +++ b/docs/root/configuration/listeners/network_filters/kafka_broker_filter.rst @@ -11,7 +11,7 @@ The filter attempts not to influence the communication between client and broker that could not be decoded (due to Kafka client or broker running a newer version than supported by this filter) are forwarded as-is. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.network.kafka_broker*. .. attention:: @@ -38,11 +38,11 @@ in the configuration snippet below: - filters: - name: envoy.filters.network.kafka_broker typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.kafka_broker.v2alpha1.KafkaBroker + "@type": type.googleapis.com/envoy.extensions.filters.network.kafka_broker.v3.KafkaBroker stat_prefix: exampleprefix - name: envoy.filters.network.tcp_proxy typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy stat_prefix: tcp cluster: localkafka clusters: @@ -50,10 +50,15 @@ in the configuration snippet below: connect_timeout: 0.25s type: strict_dns lb_policy: round_robin - hosts: - - socket_address: - address: 127.0.0.1 # Kafka broker's host. - port_value: 9092 # Kafka broker's port. + load_assignment: + cluster_name: some_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 # Kafka broker's host + port_value: 9092 # Kafka broker's port. The Kafka broker needs to advertise the Envoy listener port instead of its own. diff --git a/docs/root/configuration/listeners/network_filters/local_rate_limit_filter.rst b/docs/root/configuration/listeners/network_filters/local_rate_limit_filter.rst index 5939a63ae7d12..4ab02eb7145a9 100644 --- a/docs/root/configuration/listeners/network_filters/local_rate_limit_filter.rst +++ b/docs/root/configuration/listeners/network_filters/local_rate_limit_filter.rst @@ -4,8 +4,8 @@ Local rate limit ================ * Local rate limiting :ref:`architecture overview ` -* :ref:`v2 API reference - ` +* :ref:`v3 API reference + ` * This filter should be configured with the name *envoy.filters.network.local_ratelimit*. .. note:: @@ -16,7 +16,7 @@ Overview -------- The local rate limit filter applies a :ref:`token bucket -` rate +` rate limit to incoming connections that are processed by the filter's filter chain. Each connection processed by the filter utilizes a single token, and if no tokens are available, the connection will be immediately closed without further filter iteration. @@ -42,5 +42,5 @@ Runtime ------- The local rate limit filter can be runtime feature flagged via the :ref:`enabled -` +` configuration field. diff --git a/docs/root/configuration/listeners/network_filters/mongo_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/mongo_proxy_filter.rst index 9aa734c69ec52..8c5a451ba4ceb 100644 --- a/docs/root/configuration/listeners/network_filters/mongo_proxy_filter.rst +++ b/docs/root/configuration/listeners/network_filters/mongo_proxy_filter.rst @@ -4,7 +4,7 @@ Mongo proxy =========== * MongoDB :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.network.mongo_proxy*. .. _config_network_filters_mongo_proxy_fault_injection: @@ -12,7 +12,7 @@ Mongo proxy Fault injection --------------- -The Mongo proxy filter supports fault injection. See the v2 API reference for how to +The Mongo proxy filter supports fault injection. See the v3 API reference for how to configure. .. _config_network_filters_mongo_proxy_stats: @@ -181,7 +181,7 @@ Dynamic Metadata ---------------- The Mongo filter emits the following dynamic metadata when enabled via the -:ref:`configuration `. +:ref:`configuration `. This dynamic metadata is available as key-value pairs where the key represents the database and the collection being accessed, and the value is a list of operations performed on the collection. diff --git a/docs/root/configuration/listeners/network_filters/mysql_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/mysql_proxy_filter.rst index 750e3b30e31ef..24eb5cb12878a 100644 --- a/docs/root/configuration/listeners/network_filters/mysql_proxy_filter.rst +++ b/docs/root/configuration/listeners/network_filters/mysql_proxy_filter.rst @@ -34,11 +34,11 @@ in the configuration snippet below: - filters: - name: envoy.filters.network.mysql_proxy typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.mysql_proxy.v1alpha1.MySQLProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.mysql_proxy.v3.MySQLProxy stat_prefix: mysql - name: envoy.filters.network.tcp_proxy typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy stat_prefix: tcp cluster: ... @@ -96,11 +96,11 @@ _catalog_ table in the _productdb_ database. - filters: - name: envoy.filters.network.mysql_proxy typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.mysql_proxy.v1alpha1.MySQLProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.mysql_proxy.v3.MySQLProxy stat_prefix: mysql - name: envoy.filters.network.rbac typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.rbac.v2.RBAC + "@type": type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC stat_prefix: rbac rules: action: DENY @@ -120,6 +120,6 @@ _catalog_ table in the _productdb_ database. - any: true - name: envoy.filters.network.tcp_proxy typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy stat_prefix: tcp cluster: mysql diff --git a/docs/root/configuration/listeners/network_filters/network_filters.rst b/docs/root/configuration/listeners/network_filters/network_filters.rst index 65511250f84bb..4c29a385acad6 100644 --- a/docs/root/configuration/listeners/network_filters/network_filters.rst +++ b/docs/root/configuration/listeners/network_filters/network_filters.rst @@ -23,6 +23,7 @@ filters. rate_limit_filter rbac_filter redis_proxy_filter + rocketmq_proxy_filter tcp_proxy_filter thrift_proxy_filter sni_cluster_filter diff --git a/docs/root/configuration/listeners/network_filters/postgres_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/postgres_proxy_filter.rst index dd7b489fd344d..eb9ffb93c79df 100644 --- a/docs/root/configuration/listeners/network_filters/postgres_proxy_filter.rst +++ b/docs/root/configuration/listeners/network_filters/postgres_proxy_filter.rst @@ -4,9 +4,11 @@ Postgres proxy ================ The Postgres proxy filter decodes the wire protocol between a Postgres client (downstream) and a Postgres server -(upstream). The decoded information is currently used only to produce Postgres level statistics like sesions, -statements or transactions executed, among others. This current version does not decode SQL queries. Future versions may -add more statistics and more advanced capabilities. When the Postgres filter detects that a session is encrypted, the messages are ignored and no decoding takes +(upstream). The decoded information is used to produce Postgres level statistics like sessions, +statements or transactions executed, among others. The Postgres proxy filter parses SQL queries carried in ``Query`` and ``Parse`` messages. +When SQL query has been parsed successfully, the :ref:`metadata ` is created, +which may be used by other filters like :ref:`RBAC `. +When the Postgres filter detects that a session is encrypted, the messages are ignored and no decoding takes place. More information: * Postgres :ref:`architecture overview ` @@ -78,6 +80,8 @@ Every configured Postgres proxy filter has statistics rooted at postgres., string, The resource name in *table.db* format. + [], list, A list of strings representing the operations executed on the resource. Operations can be one of insert/update/select/drop/delete/create/alter/show. + +.. attention:: + + Currently used parser does not successfully parse all SQL statements and it cannot be assumed that all SQL queries will successfully produce Dynamic Metadata. + Creating Dynamic Metadata from SQL queries is on best-effort basis at the moment. If parsing of an SQL query fails, ``statements_parse_error`` counter is increased, log message is created, Dynamic Metadata is not + produced, but the Postgres message is still forwarded to upstream Postgres server. + +Parsing SQL statements and emitting Dynamic Metadata can be disabled by setting :ref:`enable_sql_parsing` to false. diff --git a/docs/root/configuration/listeners/network_filters/rate_limit_filter.rst b/docs/root/configuration/listeners/network_filters/rate_limit_filter.rst index 4196956ff00cd..1a94053f0c5af 100644 --- a/docs/root/configuration/listeners/network_filters/rate_limit_filter.rst +++ b/docs/root/configuration/listeners/network_filters/rate_limit_filter.rst @@ -4,7 +4,7 @@ Rate limit ========== * Global rate limiting :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.network.ratelimit*. .. note:: @@ -30,7 +30,7 @@ following statistics: cx_closed, Counter, Total connections closed due to an over limit response from the rate limit service active, Gauge, Total active requests to the rate limit service failure_mode_allowed, Counter, "Total requests that were error(s) but were allowed through because - of :ref:`failure_mode_deny ` set to false." + of :ref:`failure_mode_deny ` set to false." Runtime ------- diff --git a/docs/root/configuration/listeners/network_filters/rbac_filter.rst b/docs/root/configuration/listeners/network_filters/rbac_filter.rst index 9d9821c41af51..68ae9f2172d46 100644 --- a/docs/root/configuration/listeners/network_filters/rbac_filter.rst +++ b/docs/root/configuration/listeners/network_filters/rbac_filter.rst @@ -10,7 +10,7 @@ block-list (DENY) set of policies based on properties of the connection (IPs, po This filter also supports policy in both enforcement and shadow modes. Shadow mode won't effect real users, it is used to test that a new set of policies work before rolling out to production. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.network.rbac*. Statistics @@ -26,6 +26,8 @@ The RBAC network filter outputs statistics in the *.rbac.* namespac denied, Counter, Total requests that were denied access shadow_allowed, Counter, Total requests that would be allowed access by the filter's shadow rules shadow_denied, Counter, Total requests that would be denied access by the filter's shadow rules + logged, Counter, Total requests that should be logged + not_logged, Counter, Total requests that should not be logged .. _config_network_filters_rbac_dynamic_metadata: @@ -40,3 +42,4 @@ The RBAC filter emits the following dynamic metadata. shadow_effective_policy_id, string, The effective shadow policy ID matching the action (if any). shadow_engine_result, string, The engine result for the shadow rules (i.e. either `allowed` or `denied`). + access_log_hint, boolean, Whether the request should be logged. This metadata is shared and set under the key namespace 'envoy.common' (See :ref:`Shared Dynamic Metadata`). diff --git a/docs/root/configuration/listeners/network_filters/redis_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/redis_proxy_filter.rst index 666bd3bb1b023..3c3fb77f3861f 100644 --- a/docs/root/configuration/listeners/network_filters/redis_proxy_filter.rst +++ b/docs/root/configuration/listeners/network_filters/redis_proxy_filter.rst @@ -4,7 +4,7 @@ Redis proxy =========== * Redis :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.network.redis_proxy*. .. _config_network_filters_redis_proxy_stats: @@ -49,7 +49,7 @@ Per command statistics The Redis filter will gather statistics for commands in the *redis..command..* namespace. By default latency stats are in milliseconds and can be -changed to microseconds by setting the configuration parameter :ref:`latency_in_micros ` to true. +changed to microseconds by setting the configuration parameter :ref:`latency_in_micros ` to true. .. csv-table:: :header: Name, Type, Description diff --git a/docs/root/configuration/listeners/network_filters/rocketmq_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/rocketmq_proxy_filter.rst new file mode 100644 index 0000000000000..50033efc899ce --- /dev/null +++ b/docs/root/configuration/listeners/network_filters/rocketmq_proxy_filter.rst @@ -0,0 +1,76 @@ +.. _config_network_filters_rocketmq_proxy: + +RocketMQ proxy +============== + +Apache RocketMQ is a distributed messaging system, which is composed of four types of roles: producer, consumer, name +server and broker server. The former two are embedded into user application in form of SDK; whilst the latter are +standalone servers. + +A message in RocketMQ carries a topic as its destination and optionally one or more tags as application specific labels. + +Producers are used to send messages to brokers according to their topics. Similar to many distributed systems, +producers need to know how to connect to these serving brokers. To achieve this goal, RocketMQ provides name server +clusters for producers to lookup. Namely, when producers attempts to send messages with a new topic, it first +tries to lookup the addresses(called route info) of brokers that serve the topic from name servers. Once producers +get the route info of the topic, they actively cache them in memory and renew them periodically thereafter. This +mechanism, though simple, effectively keeps service availability high without demanding availability of name server +service. + +Brokers provides messaging service to end users. In addition to various messaging services, they also periodically +report health status and route info of topics currently served to name servers. + +Major role of the name server is to serve querying of route info for a topic. Additionally, it also purges route info +entries once the belonging brokers fail to report their health info for a configured period of time. This ensures +clients almost always connect to brokers that are online and ready to serve. + +Consumers are used by application to pull message from brokers. They perform similar heartbeats to maintain alive +status. RocketMQ brokers support two message-fetch approaches: long-pulling and pop. + +Using the first approach, consumers have to implement load-balancing algorithm. The pop approach, in the perspective of +consumers, is stateless. + +Envoy RocketMQ filter proxies requests and responses between producers/consumer and brokers. Various statistical items +are collected to enhance observability. + +At present, pop-based message fetching is implemented. Long-pulling will be implemented in the next pull request. + +.. _config_network_filters_rocketmq_proxy_stats: + +Statistics +---------- + +Every configured rocketmq proxy filter has statistics rooted at *rocketmq..* with the +following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + request, Counter, Total requests + request_decoding_error, Counter, Total decoding error requests + request_decoding_success, Counter, Total decoding success requests + response, Counter, Total responses + response_decoding_error, Counter, Total decoding error responses + response_decoding_success, Counter, Total decoding success responses + response_error, Counter, Total error responses + response_success, Counter, Total success responses + heartbeat, Counter, Total heartbeat requests + unregister, Counter, Total unregister requests + get_topic_route, Counter, Total getting topic route requests + send_message_v1, Counter, Total sending message v1 requests + send_message_v2, Counter, Total sending message v2 requests + pop_message, Counter, Total poping message requests + ack_message, Counter, Total acking message requests + get_consumer_list, Counter, Total getting consumer list requests + maintenance_failure, Counter, Total maintenance failure + request_active, Gauge, Total active requests + send_message_v1_active, Gauge, Total active sending message v1 requests + send_message_v2_active, Gauge, Total active sending message v2 requests + pop_message_active, Gauge, Total active poping message active requests + get_topic_route_active, Gauge, Total active geting topic route requests + send_message_pending, Gauge, Total pending sending message requests + pop_message_pending, Gauge, Total pending poping message requests + get_topic_route_pending, Gauge, Total pending geting topic route requests + total_pending, Gauge, Total pending requests + request_time_ms, Histogram, Request time in milliseconds \ No newline at end of file diff --git a/docs/root/configuration/listeners/network_filters/sni_cluster_filter.rst b/docs/root/configuration/listeners/network_filters/sni_cluster_filter.rst index 1ad5d26f946ff..207b5932b7d5a 100644 --- a/docs/root/configuration/listeners/network_filters/sni_cluster_filter.rst +++ b/docs/root/configuration/listeners/network_filters/sni_cluster_filter.rst @@ -11,4 +11,4 @@ with the name *envoy.filters.network.sni_cluster*. This filter has no configuration. It must be installed before the :ref:`tcp_proxy ` filter. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` diff --git a/docs/root/configuration/listeners/network_filters/sni_dynamic_forward_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/sni_dynamic_forward_proxy_filter.rst index 4751b3a614e58..1e01ec5922400 100644 --- a/docs/root/configuration/listeners/network_filters/sni_dynamic_forward_proxy_filter.rst +++ b/docs/root/configuration/listeners/network_filters/sni_dynamic_forward_proxy_filter.rst @@ -48,14 +48,14 @@ SNI dynamic forward proxy. - filters: - name: envoy.filters.network.sni_dynamic_forward_proxy typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.sni_dynamic_forward_proxy.v2alpha.FilterConfig + "@type": type.googleapis.com/envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha.FilterConfig port_value: 443 dns_cache_config: name: dynamic_forward_proxy_cache_config dns_lookup_family: V4_ONLY - name: envoy.tcp_proxy typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy stat_prefix: tcp cluster: dynamic_forward_proxy_cluster clusters: @@ -65,7 +65,7 @@ SNI dynamic forward proxy. cluster_type: name: envoy.clusters.dynamic_forward_proxy typed_config: - "@type": type.googleapis.com/envoy.config.cluster.dynamic_forward_proxy.v2alpha.ClusterConfig + "@type": type.googleapis.com/envoy.extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig dns_cache_config: name: dynamic_forward_proxy_cache_config dns_lookup_family: V4_ONLY diff --git a/docs/root/configuration/listeners/network_filters/tcp_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/tcp_proxy_filter.rst index e137dfc58ff6a..1822e08715c9d 100644 --- a/docs/root/configuration/listeners/network_filters/tcp_proxy_filter.rst +++ b/docs/root/configuration/listeners/network_filters/tcp_proxy_filter.rst @@ -4,7 +4,7 @@ TCP proxy ========= * TCP proxy :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.network.tcp_proxy*. .. _config_network_filters_tcp_proxy_dynamic_cluster: @@ -26,12 +26,12 @@ TCP proxy can be configured to route to a subset of hosts within an upstream clu To define metadata that a suitable upstream host must match, use one of the following fields: -#. Use :ref:`TcpProxy.metadata_match` +#. Use :ref:`TcpProxy.metadata_match` to define required metadata for a single upstream cluster. -#. Use :ref:`ClusterWeight.metadata_match` +#. Use :ref:`ClusterWeight.metadata_match` to define required metadata for a weighted upstream cluster. -#. Use combination of :ref:`TcpProxy.metadata_match` - and :ref:`ClusterWeight.metadata_match` +#. Use combination of :ref:`TcpProxy.metadata_match` + and :ref:`ClusterWeight.metadata_match` to define required metadata for a weighted upstream cluster (metadata from the latter will be merged on top of the former). .. _config_network_filters_tcp_proxy_stats: diff --git a/docs/root/configuration/listeners/network_filters/thrift_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/thrift_proxy_filter.rst index ba7060160a1b1..504f6f8737529 100644 --- a/docs/root/configuration/listeners/network_filters/thrift_proxy_filter.rst +++ b/docs/root/configuration/listeners/network_filters/thrift_proxy_filter.rst @@ -3,23 +3,23 @@ Thrift proxy ============ -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.network.thrift_proxy*. Cluster Protocol Options ------------------------ Thrift connections to upstream hosts can be configured by adding an entry to the appropriate -Cluster's :ref:`extension_protocol_options` +Cluster's :ref:`extension_protocol_options` keyed by `envoy.filters.network.thrift_proxy`. The -:ref:`ThriftProtocolOptions` +:ref:`ThriftProtocolOptions` message describes the available options. Thrift Request Metadata ----------------------- -The :ref:`HEADER transport` -and :ref:`TWITTER protocol` +The :ref:`HEADER transport` +and :ref:`TWITTER protocol` support metadata. In particular, the `Header transport `_ supports informational key/value pairs and the Twitter protocol transmits @@ -29,13 +29,13 @@ Header Transport Metadata ~~~~~~~~~~~~~~~~~~~~~~~~~ Header transport key/value pairs are available for routing as -:ref:`headers `. +:ref:`headers `. Twitter Protocol Metadata ~~~~~~~~~~~~~~~~~~~~~~~~~ Twitter protocol request contexts are converted into headers which are available for routing as -:ref:`headers `. +:ref:`headers `. In addition, the following fields are presented as headers: Client Identifier diff --git a/docs/root/configuration/listeners/network_filters/zookeeper_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/zookeeper_proxy_filter.rst index 587ebc7f77306..b0c85ecd7857a 100644 --- a/docs/root/configuration/listeners/network_filters/zookeeper_proxy_filter.rst +++ b/docs/root/configuration/listeners/network_filters/zookeeper_proxy_filter.rst @@ -29,11 +29,11 @@ in the configuration snippet below: - filters: - name: envoy.filters.network.zookeeper_proxy typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.zookeeper_proxy.v1alpha1.ZooKeeperProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy stat_prefix: zookeeper - name: envoy.filters.network.tcp_proxy typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy stat_prefix: tcp cluster: ... @@ -43,8 +43,8 @@ in the configuration snippet below: Statistics ---------- -Every configured ZooKeeper proxy filter has statistics rooted at *zookeeper..* with the -following statistics: +Every configured ZooKeeper proxy filter has statistics rooted at *.zookeeper.*. The +following counters are available: .. csv-table:: :header: Name, Type, Description @@ -103,6 +103,48 @@ following statistics: removewatches_resp, Counter, Number of removewatches responses check_resp, Counter, Number of check responses + +.. _config_network_filters_zookeeper_proxy_latency_stats: + +Per opcode latency statistics +----------------------------- + +The filter will gather latency statistics in the *.zookeeper._response_latency* namespace. +Latency stats are in milliseconds: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + connect_response_latency, Histogram, Opcode execution time in milliseconds + ping_response_latency, Histogram, Opcode execution time in milliseconds + auth_response_latency, Histogram, Opcode execution time in milliseconds + watch_event, Histogram, Opcode execution time in milliseconds + getdata_response_latency, Histogram, Opcode execution time in milliseconds + create_response_latency, Histogram, Opcode execution time in milliseconds + create2_response_latency, Histogram, Opcode execution time in milliseconds + createcontainer_response_latency, Histogram, Opcode execution time in milliseconds + createttl_response_latency, Histogram, Opcode execution time in milliseconds + setdata_response_latency, Histogram, Opcode execution time in milliseconds + getchildren_response_latency, Histogram, Opcode execution time in milliseconds + getchildren2_response_latency, Histogram, Opcode execution time in milliseconds + getephemerals_response_latency, Histogram, Opcode execution time in milliseconds + getallchildrennumber_response_latency, Histogram, Opcode execution time in milliseconds + remove_response_latency, Histogram, Opcode execution time in milliseconds + exists_response_latency, Histogram, Opcode execution time in milliseconds + getacl_response_latency, Histogram, Opcode execution time in milliseconds + setacl_response_latency, Histogram, Opcode execution time in milliseconds + sync_response_latency, Histogram, Opcode execution time in milliseconds + multi_response_latency, Histogram, Opcode execution time in milliseconds + reconfig_response_latency, Histogram, Opcode execution time in milliseconds + close_response_latency, Histogram, Opcode execution time in milliseconds + setauth_response_latency, Histogram, Opcode execution time in milliseconds + setwatches_response_latency, Histogram, Opcode execution time in milliseconds + checkwatches_response_latency, Histogram, Opcode execution time in milliseconds + removewatches_response_latency, Histogram, Opcode execution time in milliseconds + check_response_latency, Histogram, Opcode execution time in milliseconds + + .. _config_network_filters_zookeeper_proxy_dynamic_metadata: Dynamic Metadata diff --git a/docs/root/configuration/listeners/overview.rst b/docs/root/configuration/listeners/overview.rst index 8b4549b456481..06c19f698ae80 100644 --- a/docs/root/configuration/listeners/overview.rst +++ b/docs/root/configuration/listeners/overview.rst @@ -4,4 +4,4 @@ Overview The top level Envoy configuration contains a list of :ref:`listeners `. Each individual listener configuration has the following format: -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` diff --git a/docs/root/configuration/listeners/runtime.rst b/docs/root/configuration/listeners/runtime.rst new file mode 100644 index 0000000000000..b42b6aa5fa3ff --- /dev/null +++ b/docs/root/configuration/listeners/runtime.rst @@ -0,0 +1,8 @@ +.. _config_listeners_runtime: + +Runtime +------- +The following runtime settings are supported: + +envoy.resource_limits.listener..connection_limit + Sets a limit on the number of active connections to the specified listener. diff --git a/docs/root/configuration/listeners/stats.rst b/docs/root/configuration/listeners/stats.rst index 58bc2f57e2977..ff70567aac8ed 100644 --- a/docs/root/configuration/listeners/stats.rst +++ b/docs/root/configuration/listeners/stats.rst @@ -16,8 +16,10 @@ Every listener has a statistics tree rooted at *listener.
.* with the fo downstream_cx_destroy, Counter, Total destroyed connections downstream_cx_active, Gauge, Total active connections downstream_cx_length_ms, Histogram, Connection length milliseconds + downstream_cx_overflow, Counter, Total connections rejected due to enforcement of listener connection limit downstream_pre_cx_timeout, Counter, Sockets that timed out during listener filter processing downstream_pre_cx_active, Gauge, Sockets currently undergoing listener filter processing + global_cx_overflow, Counter, Total connections rejected due to enforecement of the global connection limit no_filter_chain_match, Counter, Total connections that didn't match any filter chain ssl.connection_error, Counter, Total TLS connection errors not including failed certificate verifications ssl.handshake, Counter, Total successful TLS connection handshakes @@ -64,13 +66,15 @@ statistics. Any ``:`` character in the stats name is replaced with ``_``. :header: Name, Type, Description :widths: 1, 1, 2 - listener_added, Counter, Total listeners added (either via static config or LDS) - listener_modified, Counter, Total listeners modified (via LDS) - listener_removed, Counter, Total listeners removed (via LDS) - listener_stopped, Counter, Total listeners stopped - listener_create_success, Counter, Total listener objects successfully added to workers - listener_create_failure, Counter, Total failed listener object additions to workers - total_listeners_warming, Gauge, Number of currently warming listeners - total_listeners_active, Gauge, Number of currently active listeners - total_listeners_draining, Gauge, Number of currently draining listeners + listener_added, Counter, Total listeners added (either via static config or LDS). + listener_modified, Counter, Total listeners modified (via LDS). + listener_removed, Counter, Total listeners removed (via LDS). + listener_stopped, Counter, Total listeners stopped. + listener_create_success, Counter, Total listener objects successfully added to workers. + listener_create_failure, Counter, Total failed listener object additions to workers. + listener_in_place_updated, Counter, Total listener objects created to execute filter chain update path. + total_filter_chains_draining, Gauge, Number of currently draining filter chains. + total_listeners_warming, Gauge, Number of currently warming listeners. + total_listeners_active, Gauge, Number of currently active listeners. + total_listeners_draining, Gauge, Number of currently draining listeners. workers_started, Gauge, A boolean (1 if started and 0 otherwise) that indicates whether listeners have been initialized on workers. diff --git a/docs/root/configuration/listeners/udp_filters/dns_filter.rst b/docs/root/configuration/listeners/udp_filters/dns_filter.rst index 25b667ff40f25..da0780d780f4f 100644 --- a/docs/root/configuration/listeners/udp_filters/dns_filter.rst +++ b/docs/root/configuration/listeners/udp_filters/dns_filter.rst @@ -7,18 +7,24 @@ DNS Filter DNS Filter is under active development and should be considered alpha and not production ready. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.udp_listener.dns_filter* Overview -------- -The DNS filter allows Envoy to respond to DNS queries as an authoritative server for any configured -domains. The filter's configuration specifies the names and addresses for which Envoy will answer -as well as the configuration needed to send queries externally for unknown domains. +The DNS filter allows Envoy to resolve forward DNS queries as an authoritative server for any +configured domains. The filter's configuration specifies the names and addresses for which Envoy +will answer as well as the configuration needed to send queries externally for unknown domains. + +The filter supports local and external DNS resolution. If a lookup for a name does not match a +statically configured domain, or a provisioned cluster name, Envoy can refer the query to an +external resolver for an answer. Users have the option of specifying the DNS servers that Envoy +will use for external resolution. Users can disable external DNS resolution by omitting the +client configuration object. The filter supports :ref:`per-filter configuration -`. +`. An Example configuration follows that illustrates how the filter can be used. Example Configuration @@ -27,24 +33,33 @@ Example Configuration .. code-block:: yaml listener_filters: - name: "envoy.filters.udp.dns_filter" + name: envoy.filters.udp.dns_filter typed_config: - "@type": "type.googleapis.com/envoy.config.filter.udp.dns_filter.v2alpha.DnsFilterConfig" + "@type": "type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig" stat_prefix: "dns_filter_prefix" + client_config: + resolution_timeout: 5s + upstream_resolvers: + - socket_address: + address: "8.8.8.8" + port_value: 53 + - socket_address: + address: "8.8.4.4" + port_value: 53 + max_pending_lookups: 256 server_config: inline_dns_table: - external_retry_count: 3 known_suffixes: - - suffix: "domain1.com" - - suffix: "domain2.com" - - suffix: "domain3.com" + - suffix: "domain1.com" + - suffix: "domain2.com" + - suffix: "domain3.com" virtual_domains: - - name: "www.domain1.com" - endpoint: - address_list: - address: - - 10.0.0.1 - - 10.0.0.2 + - name: "www.domain1.com" + endpoint: + address_list: + address: + - 10.0.0.1 + - 10.0.0.2 - name: "www.domain2.com" endpoint: address_list: @@ -54,8 +69,73 @@ Example Configuration endpoint: address_list: address: - - 10.0.3.1 + - 10.0.3.1 + - name: "www.domain4.com" + endpoint: + cluster_name: cluster_0 + + +In this example, Envoy is configured to respond to client queries for four domains. For any +other query, it will forward upstream to external resolvers. The filter will return an address +matching the input query type. If the query is for type A records and no A records are configured, +Envoy will return no addresses and set the response code appropriately. Conversely, if there are +matching records for the query type, each configured address is returned. This is also true for +AAAA records. Only A and AAAA records are supported. If the filter parses other queries for other +record types, the filter immediately responds indicating that the query is not supported. The +filter can also redirect a query for a DNS name to the enpoints of a cluster. The last domain +in the configuration demonstrates this. Along with an address list, a cluster name is a valid +endpoint for a DNS name. + +The filter can also consume its domain configuration from an external DNS table. The same entities +appearing in the static configuration can be stored as JSON or YAML in a separate file and referenced +using the :ref:`external_dns_table DataSource ` directive: + +Example External DnsTable Configuration +--------------------------------------- + +.. code-block:: yaml + + listener_filters: + name: "envoy.filters.udp.dns_filter" + typed_config: + '@type': 'type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig' + stat_prefix: "my_prefix" + server_config: + external_dns_table: + filename: "/home/ubuntu/configs/dns_table.json" + +In the file, the table can be defined as follows: + +DnsTable JSON Configuration +--------------------------- + +.. code-block:: json + + { + "known_suffixes": [ + { "suffix": "suffix1.com" }, + { "suffix": "suffix2.com" } + ], + "virtual_domains": [ + { + "name": "www.suffix1.com", + "endpoint": { + "address_list": { + "address": [ "10.0.0.1", "10.0.0.2" ] + } + } + }, + { + "name": "www.suffix2.com", + "endpoint": { + "address_list": { + "address": [ "2001:8a:c1::2800:7" ] + } + } + } + ] + } -In this example, Envoy is configured to respond to client queries for three domains. For any -other query, it will forward upstream to external resolvers. +By utilizing this configuration, the DNS responses can be configured separately from the Envoy +configuration. diff --git a/docs/root/configuration/listeners/udp_filters/udp_proxy.rst b/docs/root/configuration/listeners/udp_filters/udp_proxy.rst index e5a4bfdb245e4..1ea17b0e830a2 100644 --- a/docs/root/configuration/listeners/udp_filters/udp_proxy.rst +++ b/docs/root/configuration/listeners/udp_filters/udp_proxy.rst @@ -3,11 +3,7 @@ UDP proxy ========= -.. attention:: - - UDP proxy support should be considered alpha and not production ready. - -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.udp_listener.udp_proxy* Overview @@ -22,7 +18,7 @@ Because UDP is not a connection oriented protocol, Envoy must keep track of a cl such that the response datagrams from an upstream server can be routed back to the correct client. Each session is index by the 4-tuple consisting of source IP/port and local IP/port that the datagram is received on. Sessions last until the :ref:`idle timeout -` is reached. +` is reached. Load balancing and unhealthy host handling ------------------------------------------ @@ -69,7 +65,7 @@ server listening on port 1235. listener_filters: name: envoy.filters.udp_listener.udp_proxy typed_config: - '@type': type.googleapis.com/envoy.config.filter.udp.udp_proxy.v2alpha.UdpProxyConfig + '@type': type.googleapis.com/envoy.extensions.filters.udp.udp_proxy.v3.UdpProxyConfig stat_prefix: service cluster: service_udp clusters: diff --git a/docs/root/configuration/observability/access_log/access_log.rst b/docs/root/configuration/observability/access_log/access_log.rst new file mode 100644 index 0000000000000..f1d24152257aa --- /dev/null +++ b/docs/root/configuration/observability/access_log/access_log.rst @@ -0,0 +1,9 @@ +Access Logs +=========== + +.. toctree:: + :maxdepth: 2 + + overview + stats + usage diff --git a/docs/root/configuration/observability/access_log/overview.rst b/docs/root/configuration/observability/access_log/overview.rst new file mode 100644 index 0000000000000..33b29018b9127 --- /dev/null +++ b/docs/root/configuration/observability/access_log/overview.rst @@ -0,0 +1,6 @@ +Overview +======== + +* Access logging :ref:`architecture overview ` +* :ref:`Configuration overview ` +* :ref:`v2 API reference ` diff --git a/docs/root/configuration/observability/access_log/stats.rst b/docs/root/configuration/observability/access_log/stats.rst new file mode 100644 index 0000000000000..9ea5d26ccec0b --- /dev/null +++ b/docs/root/configuration/observability/access_log/stats.rst @@ -0,0 +1,35 @@ +.. _config_access_log_stats: + +Statistics +========== + +Currently only the gRPC and file based access logs have statistics. + +gRPC access log statistics +-------------------------- + +The gRPC access log has statistics rooted at *access_logs.grpc_access_log.* with the following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + logs_written, Counter, Total log entries sent to the logger which were not dropped. This does not imply the logs have been flushed to the gRPC endpoint yet. + logs_dropped, Counter, Total log entries dropped due to network or HTTP/2 back up. + + +File access log statistics +-------------------------- + +The file access log has statistics rooted at the *filesystem.* namespace. + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + write_buffered, Counter, Total number of times file data is moved to Envoy's internal flush buffer + write_completed, Counter, Total number of times a file was successfully written + write_failed, Counter, Total number of times an error occurred during a file write operation + flushed_by_timer, Counter, Total number of times internal flush buffers are written to a file due to flush timeout + reopen_failed, Counter, Total number of times a file was failed to be opened + write_total_buffered, Gauge, Current total size of internal flush buffer in bytes diff --git a/docs/root/configuration/observability/access_log.rst b/docs/root/configuration/observability/access_log/usage.rst similarity index 93% rename from docs/root/configuration/observability/access_log.rst rename to docs/root/configuration/observability/access_log/usage.rst index 46637c05ec4b5..894c8ac61c362 100644 --- a/docs/root/configuration/observability/access_log.rst +++ b/docs/root/configuration/observability/access_log/usage.rst @@ -9,7 +9,7 @@ Configuration Access logs are configured as part of the :ref:`HTTP connection manager config ` or :ref:`TCP Proxy `. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` .. _config_access_log_format: @@ -104,6 +104,8 @@ Format dictionaries have the following restrictions: When using the ``typed_json_format``, integer values that exceed :math:`2^{53}` will be represented with reduced precision as they must be converted to floating point numbers. +.. _config_access_log_command_operators: + Command Operators ----------------- @@ -257,7 +259,7 @@ The following command operators are supported: * **UF**: Upstream connection failure in addition to 503 response code. * **UO**: Upstream overflow (:ref:`circuit breaking `) in addition to 503 response code. * **NR**: No :ref:`route configured ` for a given request in addition to 404 response code, or no matching filter chain for a downstream connection. - * **URX**: The request was rejected because the :ref:`upstream retry limit (HTTP) ` or :ref:`maximum connect attempts (TCP) ` was reached. + * **URX**: The request was rejected because the :ref:`upstream retry limit (HTTP) ` or :ref:`maximum connect attempts (TCP) ` was reached. HTTP only * **DC**: Downstream connection termination. * **LH**: Local service failed :ref:`health check request ` in addition to 503 response code. @@ -271,9 +273,10 @@ The following command operators are supported: * **UAEX**: The request was denied by the external authorization service. * **RLSE**: The request was rejected because there was an error in rate limit service. * **IH**: The request was rejected because it set an invalid value for a - :ref:`strictly-checked header ` in addition to 400 response code. + :ref:`strictly-checked header ` in addition to 400 response code. * **SI**: Stream idle timeout in addition to 408 response code. * **DPE**: The downstream request had an HTTP protocol error. + * **UMSDR**: The upstream request reached to max stream duration. %ROUTE_NAME% Name of the route. @@ -306,7 +309,7 @@ The following command operators are supported: .. note:: This may not be the physical remote address of the peer if the address has been inferred from - :ref:`proxy proto ` or :ref:`x-forwarded-for + :ref:`proxy proto ` or :ref:`x-forwarded-for `. %DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% @@ -316,7 +319,7 @@ The following command operators are supported: .. note:: This may not be the physical remote address of the peer if the address has been inferred from - :ref:`proxy proto ` or :ref:`x-forwarded-for + :ref:`proxy proto ` or :ref:`x-forwarded-for `. %DOWNSTREAM_DIRECT_REMOTE_ADDRESS% @@ -326,7 +329,7 @@ The following command operators are supported: .. note:: This is always the physical remote address of the peer even if the downstream remote address has - been inferred from :ref:`proxy proto ` + been inferred from :ref:`proxy proto ` or :ref:`x-forwarded-for `. %DOWNSTREAM_DIRECT_REMOTE_ADDRESS_WITHOUT_PORT% @@ -336,7 +339,7 @@ The following command operators are supported: .. note:: This is always the physical remote address of the peer even if the downstream remote address has - been inferred from :ref:`proxy proto ` + been inferred from :ref:`proxy proto ` or :ref:`x-forwarded-for `. %DOWNSTREAM_LOCAL_ADDRESS% @@ -383,7 +386,7 @@ The following command operators are supported: %DYNAMIC_METADATA(NAMESPACE:KEY*):Z% HTTP - :ref:`Dynamic Metadata ` info, + :ref:`Dynamic Metadata ` info, where NAMESPACE is the filter namespace used when setting the metadata, KEY is an optional lookup up key in the namespace with the option of specifying nested keys separated by ':', and Z is an optional parameter denoting string truncation up to Z characters long. Dynamic Metadata @@ -492,6 +495,12 @@ The following command operators are supported: TCP The hex-encoded SHA256 fingerprint of the client certificate used to establish the downstream TLS connection. +%DOWNSTREAM_PEER_FINGERPRINT_1% + HTTP + The hex-encoded SHA1 fingerprint of the client certificate used to establish the downstream TLS connection. + TCP + The hex-encoded SHA1 fingerprint of the client certificate used to establish the downstream TLS connection. + %DOWNSTREAM_PEER_SERIAL% HTTP The serial number of the client certificate used to establish the downstream TLS connection. @@ -518,3 +527,6 @@ The following command operators are supported: %HOSTNAME% The system hostname. + +%LOCAL_REPLY_BODY% + The body text for the requests rejected by the Envoy. diff --git a/docs/root/configuration/observability/observability.rst b/docs/root/configuration/observability/observability.rst index ae77507e99f3a..1e314881d6bad 100644 --- a/docs/root/configuration/observability/observability.rst +++ b/docs/root/configuration/observability/observability.rst @@ -6,4 +6,4 @@ Observability statistics application_logging - access_log + access_log/access_log.rst diff --git a/docs/root/configuration/observability/statistics.rst b/docs/root/configuration/observability/statistics.rst index a3a3feab1a9c3..b531c0583b61c 100644 --- a/docs/root/configuration/observability/statistics.rst +++ b/docs/root/configuration/observability/statistics.rst @@ -20,32 +20,16 @@ Server related statistics are rooted at *server.* with following statistics: memory_heap_size, Gauge, Current reserved heap size in bytes. New Envoy process heap size on hot restart. memory_physical_size, Gauge, Current estimate of total bytes of the physical memory. New Envoy process physical memory size on hot restart. live, Gauge, "1 if the server is not currently draining, 0 otherwise" - state, Gauge, Current :ref:`State ` of the Server. + state, Gauge, Current :ref:`State ` of the Server. parent_connections, Gauge, Total connections of the old Envoy process on hot restart total_connections, Gauge, Total connections of both new and old Envoy processes - version, Gauge, Integer represented version number based on SCM revision or :ref:`stats_server_version_override ` if set. + version, Gauge, Integer represented version number based on SCM revision or :ref:`stats_server_version_override ` if set. days_until_first_cert_expiring, Gauge, Number of days until the next certificate being managed will expire hot_restart_epoch, Gauge, Current hot restart epoch -- an integer passed via command line flag `--restart-epoch` usually indicating generation. hot_restart_generation, Gauge, Current hot restart generation -- like hot_restart_epoch but computed automatically by incrementing from parent. initialization_time_ms, Histogram, Total time taken for Envoy initialization in milliseconds. This is the time from server start-up until the worker threads are ready to accept new connections debug_assertion_failures, Counter, Number of debug assertion failures detected in a release build if compiled with `--define log_debug_assert_in_release=enabled` or zero otherwise + envoy_bug_failures, Counter, Number of envoy bug failures detected in a release build. File or report the issue if this increments as this may be serious. static_unknown_fields, Counter, Number of messages in static configuration with unknown fields dynamic_unknown_fields, Counter, Number of messages in dynamic configuration with unknown fields -.. _filesystem_stats: - -File system ------------ - -Statistics related to file system are emitted in the *filesystem.* namespace. - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - write_buffered, Counter, Total number of times file data is moved to Envoy's internal flush buffer - write_completed, Counter, Total number of times a file was successfully written - write_failed, Counter, Total number of times an error occurred during a file write operation - flushed_by_timer, Counter, Total number of times internal flush buffers are written to a file due to flush timeout - reopen_failed, Counter, Total number of times a file was failed to be opened - write_total_buffered, Gauge, Current total size of internal flush buffer in bytes diff --git a/docs/root/configuration/operations/overload_manager/overload_manager.rst b/docs/root/configuration/operations/overload_manager/overload_manager.rst index c1eb8fc7a96a3..2dd2e7fe5cc77 100644 --- a/docs/root/configuration/operations/overload_manager/overload_manager.rst +++ b/docs/root/configuration/operations/overload_manager/overload_manager.rst @@ -4,7 +4,7 @@ Overload manager ================ The :ref:`overload manager ` is configured in the Bootstrap -:ref:`overload_manager ` +:ref:`overload_manager ` field. An example configuration of the overload manager is shown below. It shows a configuration to @@ -54,6 +54,30 @@ The following overload actions are supported: envoy.overload_actions.stop_accepting_connections, Envoy will stop accepting new network connections on its configured listeners envoy.overload_actions.shrink_heap, Envoy will periodically try to shrink the heap by releasing free memory to the system +Limiting Active Connections +--------------------------- + +Currently, the only supported way to limit the total number of active connections allowed across all +listeners is via specifying an integer through the runtime key +``overload.global_downstream_max_connections``. The connection limit is recommended to be less than +half of the system's file descriptor limit, to account for upstream connections, files, and other +usage of file descriptors. +If the value is unspecified, there is no global limit on the number of active downstream connections +and Envoy will emit a warning indicating this at startup. To disable the warning without setting a +limit on the number of active downstream connections, the runtime value may be set to a very large +limit (~2e9). + +If it is desired to only limit the number of downstream connections for a particular listener, +per-listener limits can be set via the :ref:`listener configuration `. + +One may simultaneously specify both per-listener and global downstream connection limits and the +conditions will be enforced independently. For instance, if it is known that a particular listener +should have a smaller number of open connections than others, one may specify a smaller connection +limit for that specific listener and allow the global limit to enforce resource utilization among +all listeners. + +An example configuration can be found in the :ref:`edge best practices document `. + Statistics ---------- diff --git a/docs/root/configuration/operations/runtime.rst b/docs/root/configuration/operations/runtime.rst index 9546f1970b08a..2e72e52bb953b 100644 --- a/docs/root/configuration/operations/runtime.rst +++ b/docs/root/configuration/operations/runtime.rst @@ -7,7 +7,7 @@ The :ref:`runtime configuration ` specifies a virtual fil contains re-loadable configuration elements. This virtual file system can be realized via a series of local file system, static bootstrap configuration, RTDS and admin console derived overlays. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` .. _config_virtual_filesystem: @@ -20,11 +20,12 @@ Layering ++++++++ The runtime can be viewed as a virtual file system consisting of multiple layers. The :ref:`layered -runtime ` bootstrap configuration specifies this +runtime ` bootstrap configuration specifies this layering. Runtime settings in later layers override earlier layers. A typical configuration might be: -.. code-block:: yaml +.. validated-code-block:: yaml + :type-name: envoy.config.bootstrap.v3.LayeredRuntime layers: - name: static_layer_0 @@ -38,7 +39,7 @@ be: - name: admin_layer_0 admin_layer: {} -In the deprecated :ref:`runtime ` bootstrap +In the deprecated :ref:`runtime ` bootstrap configuration, the layering was implicit and fixed: 1. :ref:`Static bootstrap configuration ` @@ -69,7 +70,7 @@ Static bootstrap ++++++++++++++++ A static base runtime may be specified in the :ref:`bootstrap configuration -` via a :ref:`protobuf JSON representation +` via a :ref:`protobuf JSON representation `. .. _config_runtime_local_disk: @@ -90,9 +91,9 @@ Overrides ~~~~~~~~~ An arbitrary number of disk file system layers can be overlaid in the :ref:`layered -runtime ` bootstrap configuration. +runtime ` bootstrap configuration. -In the deprecated :ref:`runtime ` bootstrap configuration, +In the deprecated :ref:`runtime ` bootstrap configuration, there was a distinguished file system override. Assume that the folder ``/srv/runtime/v1`` points to the actual file system path where global runtime configurations are stored. The following would be a typical configuration setting for runtime: @@ -108,7 +109,7 @@ Where ``/srv/runtime/current`` is a symbolic link to ``/srv/runtime/v1``. Cluster-specific subdirectories ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -In the deprecated :ref:`runtime ` bootstrap configuration, +In the deprecated :ref:`runtime ` bootstrap configuration, the *override_subdirectory* is used along with the :option:`--service-cluster` CLI option. Assume that :option:`--service-cluster` has been set to ``my-cluster``. Envoy will first look for the *health_check.min_interval* key in the following full file system path: @@ -118,9 +119,9 @@ that :option:`--service-cluster` has been set to ``my-cluster``. Envoy will firs If found, the value will override any value found in the primary lookup path. This allows the user to customize the runtime values for individual clusters on top of global defaults. -With the :ref:`layered runtime ` bootstrap +With the :ref:`layered runtime ` bootstrap configuration, it is possible to specialize on service cluster via the :ref:`append_service_cluster -` option at any +` option at any disk layer. .. _config_runtime_symbolic_link_swap: @@ -144,10 +145,10 @@ Runtime Discovery Service (RTDS) ++++++++++++++++++++++++++++++++ One or more runtime layers may be specified and delivered by specifying a :ref:`rtds_layer -`. This points the runtime layer at a +`. This points the runtime layer at a regular :ref:`xDS ` endpoint, subscribing to a single xDS resource for the given layer. The resource type for these layers is a :ref:`Runtime message -`. +`. .. _config_runtime_admin: @@ -167,7 +168,7 @@ built into the code, except for any values added via `/runtime_modify`. secured `. At most one admin layer may be specified. If a non-empty :ref:`layered runtime -` bootstrap configuration is specified with an +` bootstrap configuration is specified with an absent admin layer, any mutating admin console actions will elicit a 503 response. .. _config_runtime_atomicity: @@ -201,7 +202,7 @@ modeling a JSON object with the following rules: * Dot separators map to tree edges. * Scalar leaves (integer, strings, booleans, doubles) are represented with their respective JSON type. -* :ref:`FractionalPercent ` is represented with via its +* :ref:`FractionalPercent ` is represented with via its `canonical JSON encoding `_. An example representation of a setting for the *health_check.min_interval* key in YAML is: @@ -274,6 +275,7 @@ The file system runtime provider emits some statistics in the *runtime.* namespa admin_overrides_active, Gauge, 1 if any admin overrides are active otherwise 0 deprecated_feature_use, Counter, Total number of times deprecated features were used. Detailed information about the feature used will be logged to warning logs in the form "Using deprecated option 'X' from file Y". + deprecated_feature_seen_since_process_start, Gauge, Number of times deprecated features were used. This is not carried over during hot restarts. load_error, Counter, Total number of load attempts that resulted in an error in any layer load_success, Counter, Total number of load attempts that were successful at all layers num_keys, Gauge, Number of keys currently loaded diff --git a/docs/root/configuration/operations/tools/router_check.rst b/docs/root/configuration/operations/tools/router_check.rst index 1752b084b55f7..5ac7902eeaeae 100644 --- a/docs/root/configuration/operations/tools/router_check.rst +++ b/docs/root/configuration/operations/tools/router_check.rst @@ -10,7 +10,7 @@ Route table check tool file. The following specifies input to the route table check tool. The route table check tool checks if -the route returned by a :ref:`router ` matches what is expected. +the route returned by a :ref:`router ` matches what is expected. The tool can be used to check cluster name, virtual cluster name, virtual host name, manual path rewrite, manual host rewrite, path redirect, and header field matches. Extensions for other test cases can be added. Details about installing the tool diff --git a/docs/root/configuration/other_features/rate_limit.rst b/docs/root/configuration/other_features/rate_limit.rst index a4c456257a2ad..d3503a899878f 100644 --- a/docs/root/configuration/other_features/rate_limit.rst +++ b/docs/root/configuration/other_features/rate_limit.rst @@ -7,12 +7,12 @@ The :ref:`rate limit service ` configuration sp limit service Envoy should talk to when it needs to make global rate limit decisions. If no rate limit service is configured, a "null" service will be used which will always return OK if called. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` gRPC service IDL ---------------- Envoy expects the rate limit service to support the gRPC IDL specified in -:ref:`rls.proto `. See the IDL documentation +:ref:`rls.proto `. See the IDL documentation for more information on how the API works. See Lyft's reference implementation `here `_. diff --git a/docs/root/configuration/other_protocols/dubbo_filters/router_filter.rst b/docs/root/configuration/other_protocols/dubbo_filters/router_filter.rst index b51a7dd455b4e..615a0b03da271 100644 --- a/docs/root/configuration/other_protocols/dubbo_filters/router_filter.rst +++ b/docs/root/configuration/other_protocols/dubbo_filters/router_filter.rst @@ -5,7 +5,7 @@ Router The router filter implements Dubbo forwarding. It will be used in almost all Dubbo proxying scenarios. The filter's main job is to follow the instructions specified in the configured -:ref:`route table `. +:ref:`route table `. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.dubbo.router*. diff --git a/docs/root/configuration/other_protocols/thrift_filters/rate_limit_filter.rst b/docs/root/configuration/other_protocols/thrift_filters/rate_limit_filter.rst index 4fa27e08febd1..366059c65a0d6 100644 --- a/docs/root/configuration/other_protocols/thrift_filters/rate_limit_filter.rst +++ b/docs/root/configuration/other_protocols/thrift_filters/rate_limit_filter.rst @@ -4,12 +4,12 @@ Rate limit ========== * Global rate limiting :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.thrift.rate_limit*. The Thrift rate limit filter will call the rate limit service when the request's route has one or more :ref:`rate limit configurations -` that +` that match the filter's stage setting. More than one configuration can apply to a request. Each configuration results in a descriptor being sent to the rate limit service. @@ -18,7 +18,7 @@ application exception indicating an internal error is returned. If there is an error in calling the rate limit service or it returns an error and :ref:`failure_mode_deny -` is set to +` is set to true, an application exception indicating an internal error is returned. .. _config_thrift_filters_rate_limit_stats: @@ -37,5 +37,5 @@ The filter outputs statistics in the *cluster..ratelimit.* over_limit, Counter, Total over limit responses from the rate limit service. failure_mode_allowed, Counter, "Total requests that were error(s) but were allowed through because of :ref:`failure_mode_deny - ` set to + ` set to false." diff --git a/docs/root/configuration/other_protocols/thrift_filters/router_filter.rst b/docs/root/configuration/other_protocols/thrift_filters/router_filter.rst index c9ced73dd5d3a..22ce7bcbf1376 100644 --- a/docs/root/configuration/other_protocols/thrift_filters/router_filter.rst +++ b/docs/root/configuration/other_protocols/thrift_filters/router_filter.rst @@ -5,9 +5,9 @@ Router The router filter implements Thrift forwarding. It will be used in almost all Thrift proxying scenarios. The filter's main job is to follow the instructions specified in the configured -:ref:`route table `. +:ref:`route table `. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.thrift.router*. Statistics diff --git a/docs/root/configuration/overview/bootstrap.rst b/docs/root/configuration/overview/bootstrap.rst index 03019bfc86aee..ec75f71f7a4a8 100644 --- a/docs/root/configuration/overview/bootstrap.rst +++ b/docs/root/configuration/overview/bootstrap.rst @@ -14,12 +14,12 @@ the :option:`-c` flag, i.e.: where the extension reflects the underlying config representation. -The :ref:`Bootstrap ` message is the root of the -configuration. A key concept in the :ref:`Bootstrap ` +The :ref:`Bootstrap ` message is the root of the +configuration. A key concept in the :ref:`Bootstrap ` message is the distinction between static and dynamic resources. Resources such -as a :ref:`Listener ` or :ref:`Cluster -` may be supplied either statically in -:ref:`static_resources ` or have +as a :ref:`Listener ` or :ref:`Cluster +` may be supplied either statically in +:ref:`static_resources ` or have an xDS service such as :ref:`LDS ` or :ref:`CDS ` configured in -:ref:`dynamic_resources `. +:ref:`dynamic_resources `. diff --git a/docs/root/configuration/overview/examples.rst b/docs/root/configuration/overview/examples.rst index f26345a3138ef..bc8124c488823 100644 --- a/docs/root/configuration/overview/examples.rst +++ b/docs/root/configuration/overview/examples.rst @@ -9,7 +9,8 @@ Static A minimal fully static bootstrap config is provided below: -.. code-block:: yaml +.. validated-code-block:: yaml + :type-name: envoy.config.bootstrap.v3.Bootstrap admin: access_log_path: /tmp/admin_access.log @@ -25,7 +26,7 @@ A minimal fully static bootstrap config is provided below: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http codec_type: AUTO route_config: @@ -58,10 +59,11 @@ Mostly static with dynamic EDS A bootstrap config that continues from the above example with :ref:`dynamic endpoint discovery ` via an -:ref:`EDS` gRPC management server listening +:ref:`EDS` gRPC management server listening on 127.0.0.1:5678 is provided below: -.. code-block:: yaml +.. validated-code-block:: yaml + :type-name: envoy.config.bootstrap.v3.Bootstrap admin: access_log_path: /tmp/admin_access.log @@ -77,7 +79,7 @@ on 127.0.0.1:5678 is provided below: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http codec_type: AUTO route_config: @@ -100,8 +102,8 @@ on 127.0.0.1:5678 is provided below: api_config_source: api_type: GRPC grpc_services: - envoy_grpc: - cluster_name: xds_cluster + - envoy_grpc: + cluster_name: xds_cluster - name: xds_cluster connect_timeout: 0.25s type: STATIC @@ -125,18 +127,18 @@ Notice above that *xds_cluster* is defined to point Envoy at the management serv an otherwise completely dynamic configurations, some static resources need to be defined to point Envoy at its xDS management server(s). -It's important to set appropriate :ref:`TCP Keep-Alive options ` +It's important to set appropriate :ref:`TCP Keep-Alive options ` in the `tcp_keepalive` block. This will help detect TCP half open connections to the xDS management server and re-establish a full connection. In the above example, the EDS management server could then return a proto encoding of a -:ref:`DiscoveryResponse `: +:ref:`DiscoveryResponse `: .. code-block:: yaml version_info: "0" resources: - - "@type": type.googleapis.com/envoy.api.v2.ClusterLoadAssignment + - "@type": type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment cluster_name: some_service endpoints: - lb_endpoints: @@ -159,7 +161,8 @@ A fully dynamic bootstrap configuration, in which all resources other than those belonging to the management server are discovered via xDS is provided below: -.. code-block:: yaml +.. validated-code-block:: yaml + :type-name: envoy.config.bootstrap.v3.Bootstrap admin: access_log_path: /tmp/admin_access.log @@ -171,14 +174,14 @@ below: api_config_source: api_type: GRPC grpc_services: - envoy_grpc: - cluster_name: xds_cluster + - envoy_grpc: + cluster_name: xds_cluster cds_config: api_config_source: api_type: GRPC grpc_services: - envoy_grpc: - cluster_name: xds_cluster + - envoy_grpc: + cluster_name: xds_cluster static_resources: clusters: @@ -207,7 +210,7 @@ The management server could respond to LDS requests with: version_info: "0" resources: - - "@type": type.googleapis.com/envoy.api.v2.Listener + - "@type": type.googleapis.com/envoy.config.listener.v3.Listener name: listener_0 address: socket_address: @@ -217,7 +220,7 @@ The management server could respond to LDS requests with: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http codec_type: AUTO rds: @@ -226,8 +229,8 @@ The management server could respond to LDS requests with: api_config_source: api_type: GRPC grpc_services: - envoy_grpc: - cluster_name: xds_cluster + - envoy_grpc: + cluster_name: xds_cluster http_filters: - name: envoy.filters.http.router @@ -237,7 +240,7 @@ The management server could respond to RDS requests with: version_info: "0" resources: - - "@type": type.googleapis.com/envoy.api.v2.RouteConfiguration + - "@type": type.googleapis.com/envoy.config.route.v3.RouteConfiguration name: local_route virtual_hosts: - name: local_service @@ -252,7 +255,7 @@ The management server could respond to CDS requests with: version_info: "0" resources: - - "@type": type.googleapis.com/envoy.api.v2.Cluster + - "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: some_service connect_timeout: 0.25s lb_policy: ROUND_ROBIN @@ -262,8 +265,8 @@ The management server could respond to CDS requests with: api_config_source: api_type: GRPC grpc_services: - envoy_grpc: - cluster_name: xds_cluster + - envoy_grpc: + cluster_name: xds_cluster The management server could respond to EDS requests with: @@ -271,7 +274,7 @@ The management server could respond to EDS requests with: version_info: "0" resources: - - "@type": type.googleapis.com/envoy.api.v2.ClusterLoadAssignment + - "@type": type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment cluster_name: some_service endpoints: - lb_endpoints: diff --git a/docs/root/configuration/overview/extension.rst b/docs/root/configuration/overview/extension.rst index e131a7515cc3a..dab59eaf6b977 100644 --- a/docs/root/configuration/overview/extension.rst +++ b/docs/root/configuration/overview/extension.rst @@ -15,7 +15,7 @@ filter configuration snippet is permitted: name: front-http-proxy typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http codec_type: AUTO rds: @@ -29,7 +29,7 @@ filter configuration snippet is permitted: http_filters: - name: front-router typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.router.v2.Router + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router dynamic_stats: true In case the control plane lacks the schema definitions for an extension, @@ -43,7 +43,7 @@ follows: name: front-http-proxy typed_config: "@type": type.googleapis.com/udpa.type.v1.TypedStruct - type_url: type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + type_url: type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager value: stat_prefix: ingress_http codec_type: AUTO @@ -59,5 +59,27 @@ follows: - name: front-router typed_config: "@type": type.googleapis.com/udpa.type.v1.TypedStruct - type_url: type.googleapis.com/envoy.config.filter.http.router.v2.Router + type_url: type.googleapis.com/envoy.extensions.filters.http.router.v3Router +Discovery service +^^^^^^^^^^^^^^^^^ + +Extension configuration can be supplied dynamically from a :ref:`an xDS +management server` using :ref:`ExtensionConfiguration discovery +service`. +The name field in the extension configuration acts as the resource identifier. +For example, HTTP connection manager supports :ref:`dynamic filter +re-configuration` +for HTTP filters. + +Extension config discovery service has a :ref:`statistics +` tree rooted at +*.extension_config_discovery..*. In addition +to the common subscription statistics, it also provides the following: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + config_reload, Counter, Total number of successful configuration updates + config_fail, Counter, Total number of failed configuration updates diff --git a/docs/root/configuration/overview/mgmt_server.rst b/docs/root/configuration/overview/mgmt_server.rst index 3004ff329163a..68d1f3b1d958d 100644 --- a/docs/root/configuration/overview/mgmt_server.rst +++ b/docs/root/configuration/overview/mgmt_server.rst @@ -29,6 +29,7 @@ Management Server has a statistics tree rooted at *control_plane.* with the foll connected_state, Gauge, A boolean (1 for connected and 0 for disconnected) that indicates the current connection state with management server rate_limit_enforced, Counter, Total number of times rate limit was enforced for management server requests pending_requests, Gauge, Total number of pending requests when the rate limit was enforced + identifier, TextReadout, The identifier of the control plane instance that sent the last discovery response .. _subscription_statistics: @@ -46,11 +47,12 @@ The following statistics are generated for all subscriptions. :widths: 1, 1, 2 config_reload, Counter, Total API fetches that resulted in a config reload due to a different config - init_fetch_timeout, Counter, Total :ref:`initial fetch timeouts ` + init_fetch_timeout, Counter, Total :ref:`initial fetch timeouts ` update_attempt, Counter, Total API fetches attempted update_success, Counter, Total API fetches completed successfully update_failure, Counter, Total API fetches that failed because of network errors update_rejected, Counter, Total API fetches that failed because of schema/validation errors update_time, Gauge, Timestamp of the last successful API fetch attempt as milliseconds since the epoch. Refreshed even after a trivial configuration reload that contained no configuration changes. version, Gauge, Hash of the contents from the last successful API fetch + version_text, TextReadout, The version text from the last successful API fetch control_plane.connected_state, Gauge, A boolean (1 for connected and 0 for disconnected) that indicates the current connection state with management server diff --git a/docs/root/configuration/overview/xds_api.rst b/docs/root/configuration/overview/xds_api.rst index dc28631d87c58..428575afda8fa 100644 --- a/docs/root/configuration/overview/xds_api.rst +++ b/docs/root/configuration/overview/xds_api.rst @@ -5,8 +5,8 @@ xDS API endpoints An xDS management server will implement the below endpoints as required for gRPC and/or REST serving. In both streaming gRPC and -REST-JSON cases, a :ref:`DiscoveryRequest ` is sent and a -:ref:`DiscoveryResponse ` received following the +REST-JSON cases, a :ref:`DiscoveryRequest ` is sent and a +:ref:`DiscoveryResponse ` received following the :ref:`xDS protocol `. Below we describe endpoints for the v2 and v3 transport API versions. @@ -19,7 +19,7 @@ gRPC streaming endpoints .. http:post:: /envoy.api.v2.ClusterDiscoveryService/StreamClusters .. http:post:: /envoy.service.cluster.v3.ClusterDiscoveryService/StreamClusters -See :repo:`cds.proto ` for the service definition. This is used by Envoy +See :repo:`cds.proto ` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -33,14 +33,14 @@ as a client when cluster_name: some_xds_cluster is set in the :ref:`dynamic_resources -` of the :ref:`Bootstrap -` config. +` of the :ref:`Bootstrap +` config. .. http:post:: /envoy.api.v2.EndpointDiscoveryService/StreamEndpoints .. http:post:: /envoy.service.endpoint.v3.EndpointDiscoveryService/StreamEndpoints See :repo:`eds.proto -` +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -54,14 +54,14 @@ for the service definition. This is used by Envoy as a client when cluster_name: some_xds_cluster is set in the :ref:`eds_cluster_config -` field of the :ref:`Cluster -` config. +` field of the :ref:`Cluster +` config. .. http:post:: /envoy.api.v2.ListenerDiscoveryService/StreamListeners .. http:post:: /envoy.service.listener.v3.ListenerDiscoveryService/StreamListeners See :repo:`lds.proto -` +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -75,14 +75,14 @@ for the service definition. This is used by Envoy as a client when cluster_name: some_xds_cluster is set in the :ref:`dynamic_resources -` of the :ref:`Bootstrap -` config. +` of the :ref:`Bootstrap +` config. .. http:post:: /envoy.api.v2.RouteDiscoveryService/StreamRoutes .. http:post:: /envoy.service.route.v3.RouteDiscoveryService/StreamRoutes See :repo:`rds.proto -` +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -97,15 +97,15 @@ for the service definition. This is used by Envoy as a client when cluster_name: some_xds_cluster is set in the :ref:`rds -` field +` field of the :ref:`HttpConnectionManager -` config. +` config. .. http:post:: /envoy.api.v2.ScopedRoutesDiscoveryService/StreamScopedRoutes .. http:post:: /envoy.service.route.v3.ScopedRoutesDiscoveryService/StreamScopedRoutes See :repo:`srds.proto -` +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -121,15 +121,15 @@ for the service definition. This is used by Envoy as a client when cluster_name: some_xds_cluster is set in the :ref:`scoped_routes -` +` field of the :ref:`HttpConnectionManager -` config. +` config. .. http:post:: /envoy.service.discovery.v2.SecretDiscoveryService/StreamSecrets .. http:post:: /envoy.service.secret.v3.SecretDiscoveryService/StreamSecrets See :repo:`sds.proto -` +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -143,14 +143,14 @@ for the service definition. This is used by Envoy as a client when envoy_grpc: cluster_name: some_xds_cluster -is set inside a :ref:`SdsSecretConfig ` message. This message -is used in various places such as the :ref:`CommonTlsContext `. +is set inside a :ref:`SdsSecretConfig ` message. This message +is used in various places such as the :ref:`CommonTlsContext `. .. http:post:: /envoy.service.discovery.v2.RuntimeDiscoveryService/StreamRuntime .. http:post:: /envoy.service.runtime.v3.RuntimeDiscoveryService/StreamRuntime See :repo:`rtds.proto -` +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -164,7 +164,7 @@ for the service definition. This is used by Envoy as a client when envoy_grpc: cluster_name: some_xds_cluster -is set inside the :ref:`rtds_layer ` +is set inside the :ref:`rtds_layer ` field. REST endpoints @@ -174,7 +174,7 @@ REST endpoints .. http:post:: /v3/discovery:clusters See :repo:`cds.proto -` +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -186,14 +186,14 @@ for the service definition. This is used by Envoy as a client when cluster_names: [some_xds_cluster] is set in the :ref:`dynamic_resources -` of the :ref:`Bootstrap -` config. +` of the :ref:`Bootstrap +` config. .. http:post:: /v2/discovery:endpoints .. http:post:: /v3/discovery:endpoints See :repo:`eds.proto -` +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -205,14 +205,14 @@ for the service definition. This is used by Envoy as a client when cluster_names: [some_xds_cluster] is set in the :ref:`eds_cluster_config -` field of the :ref:`Cluster -` config. +` field of the :ref:`Cluster +` config. .. http:post:: /v2/discovery:listeners .. http:post:: /v3/discovery:listeners See :repo:`lds.proto -` +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -224,14 +224,14 @@ for the service definition. This is used by Envoy as a client when cluster_names: [some_xds_cluster] is set in the :ref:`dynamic_resources -` of the :ref:`Bootstrap -` config. +` of the :ref:`Bootstrap +` config. .. http:post:: /v2/discovery:routes .. http:post:: /v3/discovery:routes See :repo:`rds.proto -` +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -244,8 +244,8 @@ for the service definition. This is used by Envoy as a client when cluster_names: [some_xds_cluster] is set in the :ref:`rds -` field of the :ref:`HttpConnectionManager -` config. +` field of the :ref:`HttpConnectionManager +` config. .. note:: @@ -288,7 +288,7 @@ document. The gRPC endpoint is: .. http:post:: /envoy.service.discovery.v3.AggregatedDiscoveryService/StreamAggregatedResources See :repo:`discovery.proto -` +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -301,8 +301,8 @@ for the service definition. This is used by Envoy as a client when cluster_name: some_ads_cluster is set in the :ref:`dynamic_resources -` of the :ref:`Bootstrap -` config. +` of the :ref:`Bootstrap +` config. When this is set, any of the configuration sources :ref:`above ` can be set to use the ADS channel. For example, a LDS config could be changed from @@ -336,7 +336,7 @@ churn, these state-of-the-world updates can be cumbersome. As of 1.12.0, Envoy supports a "delta" variant of xDS (including ADS), where updates only contain resources added/changed/removed. Delta xDS is a gRPC (only) protocol. Delta uses different request/response protos than SotW (DeltaDiscovery{Request,Response}); see -:repo:`discovery.proto `. Conceptually, delta should be viewed as +:repo:`discovery.proto `. Conceptually, delta should be viewed as a new xDS transport type: there is static, filesystem, REST, gRPC-SotW, and now gRPC-delta. (Envoy's implementation of the gRPC-SotW/delta client happens to share most of its code between the two, and something similar is likely possible on the server side. However, they are in fact @@ -344,7 +344,7 @@ incompatible protocols. :ref:`The specification of the delta xDS protocol's behavior is here `.) To use delta, simply set the api_type field of your -:ref:`ApiConfigSource ` proto(s) to DELTA_GRPC. +:ref:`ApiConfigSource ` proto(s) to DELTA_GRPC. That works for both xDS and ADS; for ADS, it's the api_type field of -:ref:`DynamicResources.ads_config `, +:ref:`DynamicResources.ads_config `, as described in the previous section. diff --git a/docs/root/configuration/security/secret.rst b/docs/root/configuration/security/secret.rst index b1b3e1ec33fc5..060fcb79b53fc 100644 --- a/docs/root/configuration/security/secret.rst +++ b/docs/root/configuration/security/secret.rst @@ -4,7 +4,7 @@ Secret discovery service (SDS) ============================== TLS certificates, the secrets, can be specified in the bootstrap.static_resource -:ref:`secrets `. +:ref:`secrets `. But they can also be fetched remotely by secret discovery service (SDS). The most important benefit of SDS is to simplify the certificate management. Without this feature, in k8s deployment, certificates must be created as secrets and mounted into the proxy containers. If certificates are expired, the secrets need to be updated and the proxy containers need to be re-deployed. With SDS, a central SDS server will push certificates to all Envoy instances. If certificates are expired, the server just pushes new certificates to Envoy instances, Envoy will use the new ones right away without re-deployment. @@ -23,15 +23,15 @@ The connection between Envoy proxy and SDS server has to be secure. One option i SDS server ---------- -A SDS server needs to implement the gRPC service :repo:`SecretDiscoveryService `. +A SDS server needs to implement the gRPC service :repo:`SecretDiscoveryService `. It follows the same protocol as other :ref:`xDS `. SDS Configuration ----------------- -:ref:`SdsSecretConfig ` is used to specify the secret. Its field *name* is a required field. If its *sds_config* field is empty, the *name* field specifies the secret in the bootstrap static_resource :ref:`secrets `. Otherwise, it specifies the SDS server as :ref:`ConfigSource `. Only gRPC is supported for the SDS service so its *api_config_source* must specify a **grpc_service**. +:ref:`SdsSecretConfig ` is used to specify the secret. Its field *name* is a required field. If its *sds_config* field is empty, the *name* field specifies the secret in the bootstrap static_resource :ref:`secrets `. Otherwise, it specifies the SDS server as :ref:`ConfigSource `. Only gRPC is supported for the SDS service so its *api_config_source* must specify a **grpc_service**. -*SdsSecretConfig* is used in two fields in :ref:`CommonTlsContext `. The first field is *tls_certificate_sds_secret_configs* to use SDS to get :ref:`TlsCertificate `. The second field is *validation_context_sds_secret_config* to use SDS to get :ref:`CertificateValidationContext `. +*SdsSecretConfig* is used in two fields in :ref:`CommonTlsContext `. The first field is *tls_certificate_sds_secret_configs* to use SDS to get :ref:`TlsCertificate `. The second field is *validation_context_sds_secret_config* to use SDS to get :ref:`CertificateValidationContext `. Example one: static_resource ----------------------------- @@ -68,7 +68,7 @@ This example show how to configure secrets in the static_resource: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext common_tls_context: tls_certificate_sds_secret_configs: - name: client_cert @@ -78,7 +78,7 @@ This example show how to configure secrets in the static_resource: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext common_tls_context: tls_certificate_sds_secret_configs: - name: server_cert @@ -112,7 +112,7 @@ This example shows how to configure secrets fetched from remote SDS servers: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext common_tls_context: - tls_certificate: certificate_chain: @@ -137,7 +137,7 @@ This example shows how to configure secrets fetched from remote SDS servers: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext common_tls_context: tls_certificate_sds_secret_configs: - name: client_cert @@ -153,7 +153,7 @@ This example shows how to configure secrets fetched from remote SDS servers: - transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext common_tls_context: tls_certificate_sds_secret_configs: - name: server_cert @@ -205,7 +205,7 @@ In contrast, :ref:`sds_server_example` requires a restart to reload xDS certific transport_socket: name: "envoy.transport_sockets.tls" typed_config: - "@type": "type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext" + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext" common_tls_context: tls_certificate_sds_secret_configs: sds_config: @@ -219,7 +219,7 @@ Paths to client certificate, including client's certificate chain and private ke .. code-block:: yaml resources: - - "@type": "type.googleapis.com/envoy.api.v2.auth.Secret" + - "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret" tls_certificate: certificate_chain: filename: /certs/sds_cert.pem @@ -231,7 +231,7 @@ Path to CA certificate bundle for validating the xDS server certificate is given .. code-block:: yaml resources: - - "@type": "type.googleapis.com/envoy.api.v2.auth.Secret" + - "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret" validation_context: trusted_ca: filename: /certs/cacert.pem diff --git a/docs/root/configuration/upstream/cluster_manager/cds.rst b/docs/root/configuration/upstream/cluster_manager/cds.rst index dcea74d79710c..9d747f4c8349c 100644 --- a/docs/root/configuration/upstream/cluster_manager/cds.rst +++ b/docs/root/configuration/upstream/cluster_manager/cds.rst @@ -12,7 +12,7 @@ clusters depending on what is required. Any clusters that are statically defined within the Envoy configuration cannot be modified or removed via the CDS API. -* :ref:`v2 CDS API ` +* :ref:`v3 CDS API ` Statistics ---------- diff --git a/docs/root/configuration/upstream/cluster_manager/cluster_circuit_breakers.rst b/docs/root/configuration/upstream/cluster_manager/cluster_circuit_breakers.rst index 53a08ca497e56..6c0ca34773c07 100644 --- a/docs/root/configuration/upstream/cluster_manager/cluster_circuit_breakers.rst +++ b/docs/root/configuration/upstream/cluster_manager/cluster_circuit_breakers.rst @@ -4,14 +4,14 @@ Circuit breaking ================ * Circuit Breaking :ref:`architecture overview `. -* :ref:`v2 API documentation `. +* :ref:`v3 API documentation `. The following is an example circuit breaker configuration: .. code-block:: yaml circuit_breakers: - thresholds: + thresholds: - priority: "DEFAULT" max_requests: 75 max_pending_requests: 35 @@ -26,5 +26,5 @@ Runtime All circuit breaking settings are runtime configurable for all defined priorities based on cluster name. They follow the following naming scheme ``circuit_breakers...``. ``cluster_name`` is the name field in each cluster's configuration, which is set in the Envoy -:ref:`config file `. Available runtime settings will override +:ref:`config file `. Available runtime settings will override settings set in the Envoy config file. diff --git a/docs/root/configuration/upstream/cluster_manager/cluster_hc.rst b/docs/root/configuration/upstream/cluster_manager/cluster_hc.rst index 683a89cad8004..5c73695597f27 100644 --- a/docs/root/configuration/upstream/cluster_manager/cluster_hc.rst +++ b/docs/root/configuration/upstream/cluster_manager/cluster_hc.rst @@ -6,7 +6,7 @@ Health checking * Health checking :ref:`architecture overview `. * If health checking is configured for a cluster, additional statistics are emitted. They are documented :ref:`here `. -* :ref:`v2 API documentation `. +* :ref:`v3 API documentation `. .. _config_cluster_manager_cluster_hc_tcp_health_checking: diff --git a/docs/root/configuration/upstream/cluster_manager/cluster_runtime.rst b/docs/root/configuration/upstream/cluster_manager/cluster_runtime.rst index 34e0cb9058ebe..ae138196d1417 100644 --- a/docs/root/configuration/upstream/cluster_manager/cluster_runtime.rst +++ b/docs/root/configuration/upstream/cluster_manager/cluster_runtime.rst @@ -9,18 +9,18 @@ Active health checking ---------------------- health_check.min_interval - Min value for the health checking :ref:`interval `. + Min value for the health checking :ref:`interval `. Default value is 1 ms. The effective health check interval will be no less than 1ms. The health checking interval will be between *min_interval* and *max_interval*. health_check.max_interval - Max value for the health checking :ref:`interval `. + Max value for the health checking :ref:`interval `. Default value is MAX_INT. The effective health check interval will be no less than 1ms. The health checking interval will be between *min_interval* and *max_interval*. health_check.verify_cluster What % of health check requests will be verified against the :ref:`expected upstream service - ` as the :ref:`health check filter + ` as the :ref:`health check filter ` will write the remote service cluster into the response. .. _config_cluster_manager_cluster_runtime_outlier_detection: @@ -30,101 +30,101 @@ Outlier detection See the outlier detection :ref:`architecture overview ` for more information on outlier detection. The runtime parameters supported by outlier detection are the -same as the :ref:`static configuration parameters `, namely: +same as the :ref:`static configuration parameters `, namely: outlier_detection.consecutive_5xx :ref:`consecutive_5XX - ` + ` setting in outlier detection outlier_detection.consecutive_gateway_failure :ref:`consecutive_gateway_failure - ` + ` setting in outlier detection outlier_detection.consecutive_local_origin_failure :ref:`consecutive_local_origin_failure - ` + ` setting in outlier detection outlier_detection.interval_ms :ref:`interval_ms - ` + ` setting in outlier detection outlier_detection.base_ejection_time_ms :ref:`base_ejection_time_ms - ` + ` setting in outlier detection outlier_detection.max_ejection_percent :ref:`max_ejection_percent - ` + ` setting in outlier detection outlier_detection.enforcing_consecutive_5xx :ref:`enforcing_consecutive_5xx - ` + ` setting in outlier detection outlier_detection.enforcing_consecutive_gateway_failure :ref:`enforcing_consecutive_gateway_failure - ` + ` setting in outlier detection outlier_detection.enforcing_consecutive_local_origin_failure :ref:`enforcing_consecutive_local_origin_failure - ` + ` setting in outlier detection outlier_detection.enforcing_success_rate :ref:`enforcing_success_rate - ` + ` setting in outlier detection outlier_detection.enforcing_local_origin_success_rate :ref:`enforcing_local_origin_success_rate - ` + ` setting in outlier detection outlier_detection.success_rate_minimum_hosts :ref:`success_rate_minimum_hosts - ` + ` setting in outlier detection outlier_detection.success_rate_request_volume :ref:`success_rate_request_volume - ` + ` setting in outlier detection outlier_detection.success_rate_stdev_factor :ref:`success_rate_stdev_factor - ` + ` setting in outlier detection outlier_detection.enforcing_failure_percentage :ref:`enforcing_failure_percentage - ` + ` setting in outlier detection outlier_detection.enforcing_failure_percentage_local_origin :ref:`enforcing_failure_percentage_local_origin - ` + ` setting in outlier detection outlier_detection.failure_percentage_request_volume :ref:`failure_percentage_request_volume - ` + ` setting in outlier detection outlier_detection.failure_percentage_minimum_hosts :ref:`failure_percentage_minimum_hosts - ` + ` setting in outlier detection outlier_detection.failure_percentage_threshold :ref:`failure_percentage_threshold - ` + ` setting in outlier detection Core @@ -135,7 +135,7 @@ upstream.healthy_panic_threshold Defaults to 50%. upstream.use_http2 - Whether the cluster utilizes the *http2* :ref:`protocol options ` + Whether the cluster utilizes the *http2* :ref:`protocol options ` if configured. Set to 0 to disable HTTP/2 even if the feature is configured. Defaults to enabled. .. _config_cluster_manager_cluster_runtime_zone_routing: @@ -155,19 +155,19 @@ Circuit breaking ---------------- circuit_breakers...max_connections - :ref:`Max connections circuit breaker setting ` + :ref:`Max connections circuit breaker setting ` circuit_breakers...max_pending_requests - :ref:`Max pending requests circuit breaker setting ` + :ref:`Max pending requests circuit breaker setting ` circuit_breakers...max_requests - :ref:`Max requests circuit breaker setting ` + :ref:`Max requests circuit breaker setting ` circuit_breakers...max_retries - :ref:`Max retries circuit breaker setting ` + :ref:`Max retries circuit breaker setting ` circuit_breakers...retry_budget.budget_percent - :ref:`Max retries circuit breaker setting ` + :ref:`Max retries circuit breaker setting ` circuit_breakers...retry_budget.min_retry_concurrency - :ref:`Max retries circuit breaker setting ` + :ref:`Max retries circuit breaker setting ` diff --git a/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst b/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst index e58a1d32c90c5..5d956c28d2b35 100644 --- a/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst +++ b/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst @@ -63,19 +63,20 @@ Every cluster has a statistics tree rooted at *cluster..* with the followi upstream_rq_total, Counter, Total requests upstream_rq_active, Gauge, Total active requests upstream_rq_pending_total, Counter, Total requests pending a connection pool connection - upstream_rq_pending_overflow, Counter, Total requests that overflowed connection pool circuit breaking and were failed - upstream_rq_pending_failure_eject, Counter, Total requests that were failed due to a connection pool connection failure + upstream_rq_pending_overflow, Counter, Total requests that overflowed connection pool or requests (mainly for HTTP/2) circuit breaking and were failed + upstream_rq_pending_failure_eject, Counter, Total requests that were failed due to a connection pool connection failure or remote connection termination upstream_rq_pending_active, Gauge, Total active requests pending a connection pool connection upstream_rq_cancelled, Counter, Total requests cancelled before obtaining a connection pool connection upstream_rq_maintenance_mode, Counter, Total requests that resulted in an immediate 503 due to :ref:`maintenance mode` upstream_rq_timeout, Counter, Total requests that timed out waiting for a response + upstream_rq_max_duration_reached, Counter, Total requests closed due to max duration reached upstream_rq_per_try_timeout, Counter, Total requests that hit the per try timeout upstream_rq_rx_reset, Counter, Total requests that were reset remotely upstream_rq_tx_reset, Counter, Total requests that were reset locally upstream_rq_retry, Counter, Total request retries upstream_rq_retry_limit_exceeded, Counter, Total requests not retried due to exceeding :ref:`the configured number of maximum retries ` upstream_rq_retry_success, Counter, Total request retry successes - upstream_rq_retry_overflow, Counter, Total requests not retried due to circuit breaking or exceeding the :ref:`retry budget ` + upstream_rq_retry_overflow, Counter, Total requests not retried due to circuit breaking or exceeding the :ref:`retry budget ` upstream_flow_control_paused_reading_total, Counter, Total number of times flow control paused reading from upstream upstream_flow_control_resumed_reading_total, Counter, Total number of times flow control resumed reading from upstream upstream_flow_control_backed_up_total, Counter, Total number of times the upstream connection backed up and paused reads from downstream @@ -88,9 +89,9 @@ Every cluster has a statistics tree rooted at *cluster..* with the followi membership_total, Gauge, Current cluster membership total retry_or_shadow_abandoned, Counter, Total number of times shadowing or retry buffering was canceled due to buffer limits config_reload, Counter, Total API fetches that resulted in a config reload due to a different config - update_attempt, Counter, Total cluster membership update attempts - update_success, Counter, Total cluster membership update successes - update_failure, Counter, Total cluster membership update failures + update_attempt, Counter, Total attempted cluster membership updates by service discovery + update_success, Counter, Total successful cluster membership updates by service discovery + update_failure, Counter, Total failed cluster membership updates by service discovery update_empty, Counter, Total cluster membership updates ending with empty cluster load assignment and continuing with previous config update_no_rebuild, Counter, Total successful cluster membership updates that didn't result in any cluster load balancing structure rebuilds version, Gauge, Hash of the contents from the last successful API fetch @@ -134,16 +135,16 @@ statistics will be rooted at *cluster..outlier_detection.* and contain the ejections_overflow, Counter, Number of ejections aborted due to the max ejection % ejections_enforced_consecutive_5xx, Counter, Number of enforced consecutive 5xx ejections ejections_detected_consecutive_5xx, Counter, Number of detected consecutive 5xx ejections (even if unenforced) - ejections_enforced_success_rate, Counter, Number of enforced success rate outlier ejections. Exact meaning of this counter depends on :ref:`outlier_detection.split_external_local_origin_errors` config item. Refer to :ref:`Outlier Detection documentation` for details. - ejections_detected_success_rate, Counter, Number of detected success rate outlier ejections (even if unenforced). Exact meaning of this counter depends on :ref:`outlier_detection.split_external_local_origin_errors` config item. Refer to :ref:`Outlier Detection documentation` for details. + ejections_enforced_success_rate, Counter, Number of enforced success rate outlier ejections. Exact meaning of this counter depends on :ref:`outlier_detection.split_external_local_origin_errors` config item. Refer to :ref:`Outlier Detection documentation` for details. + ejections_detected_success_rate, Counter, Number of detected success rate outlier ejections (even if unenforced). Exact meaning of this counter depends on :ref:`outlier_detection.split_external_local_origin_errors` config item. Refer to :ref:`Outlier Detection documentation` for details. ejections_enforced_consecutive_gateway_failure, Counter, Number of enforced consecutive gateway failure ejections ejections_detected_consecutive_gateway_failure, Counter, Number of detected consecutive gateway failure ejections (even if unenforced) ejections_enforced_consecutive_local_origin_failure, Counter, Number of enforced consecutive local origin failure ejections ejections_detected_consecutive_local_origin_failure, Counter, Number of detected consecutive local origin failure ejections (even if unenforced) ejections_enforced_local_origin_success_rate, Counter, Number of enforced success rate outlier ejections for locally originated failures ejections_detected_local_origin_success_rate, Counter, Number of detected success rate outlier ejections for locally originated failures (even if unenforced) - ejections_enforced_failure_percentage, Counter, Number of enforced failure percentage outlier ejections. Exact meaning of this counter depends on :ref:`outlier_detection.split_external_local_origin_errors` config item. Refer to :ref:`Outlier Detection documentation` for details. - ejections_detected_failure_percentage, Counter, Number of detected failure percentage outlier ejections (even if unenforced). Exact meaning of this counter depends on :ref:`outlier_detection.split_external_local_origin_errors` config item. Refer to :ref:`Outlier Detection documentation` for details. + ejections_enforced_failure_percentage, Counter, Number of enforced failure percentage outlier ejections. Exact meaning of this counter depends on :ref:`outlier_detection.split_external_local_origin_errors` config item. Refer to :ref:`Outlier Detection documentation` for details. + ejections_detected_failure_percentage, Counter, Number of detected failure percentage outlier ejections (even if unenforced). Exact meaning of this counter depends on :ref:`outlier_detection.split_external_local_origin_errors` config item. Refer to :ref:`Outlier Detection documentation` for details. ejections_enforced_failure_percentage_local_origin, Counter, Number of enforced failure percentage outlier ejections for locally originated failures ejections_detected_failure_percentage_local_origin, Counter, Number of detected failure percentage outlier ejections for locally originated failures (even if unenforced) ejections_total, Counter, Deprecated. Number of ejections due to any outlier type (even if unenforced) @@ -175,7 +176,7 @@ Circuit breakers statistics will be rooted at *cluster..circuit_breakers.< Timeout budget statistics ------------------------- -If :ref:`timeout budget statistic tracking ` is +If :ref:`timeout budget statistic tracking ` is turned on, statistics will be added to *cluster.* and contain the following: .. csv-table:: @@ -313,3 +314,20 @@ Statistics for monitoring effective host weights when using the min_entries_per_host, Gauge, Minimum number of entries for a single host max_entries_per_host, Gauge, Maximum number of entries for a single host + +.. _config_cluster_manager_cluster_stats_request_response_sizes: + +Request Response Size statistics +-------------------------------- + +If :ref:`request response size statistics ` are tracked, +statistics will be added to *cluster.* and contain the following: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + upstream_rq_headers_size, Histogram, Request headers size in bytes per upstream + upstream_rq_body_size, Histogram, Request body size in bytes per upstream + upstream_rs_headers_size, Histogram, Response headers size in bytes per upstream + upstream_rs_body_size, Histogram, Response body size in bytes per upstream diff --git a/docs/root/configuration/upstream/cluster_manager/overview.rst b/docs/root/configuration/upstream/cluster_manager/overview.rst index d54fc5806c154..899c16b27bb83 100644 --- a/docs/root/configuration/upstream/cluster_manager/overview.rst +++ b/docs/root/configuration/upstream/cluster_manager/overview.rst @@ -2,4 +2,4 @@ Overview ======== * Cluster manager :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` diff --git a/docs/root/configuration/upstream/health_checkers/redis.rst b/docs/root/configuration/upstream/health_checkers/redis.rst index 6fb7112327b60..03ad07741e673 100644 --- a/docs/root/configuration/upstream/health_checkers/redis.rst +++ b/docs/root/configuration/upstream/health_checkers/redis.rst @@ -8,10 +8,10 @@ which checks Redis upstream hosts. It sends a Redis PING command and expect a PO Redis server can respond with anything other than PONG to cause an immediate active health check failure. Optionally, Envoy can perform EXISTS on a user-specified key. If the key does not exist it is considered a passing health check. This allows the user to mark a Redis instance for maintenance by setting the -specified :ref:`key ` to any value and waiting +specified :ref:`key ` to any value and waiting for traffic to drain. -An example setting for :ref:`custom_health_check ` as a +An example setting for :ref:`custom_health_check ` as a Redis health checker is shown below: .. code-block:: yaml @@ -19,7 +19,7 @@ Redis health checker is shown below: custom_health_check: name: envoy.health_checkers.redis typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.redis_proxy.v2.RedisProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.redis_proxy.v3.RedisProxy key: foo -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` diff --git a/docs/root/faq/api/control_plane.rst b/docs/root/faq/api/control_plane.rst index bc717e3939132..c489eab5648ca 100644 --- a/docs/root/faq/api/control_plane.rst +++ b/docs/root/faq/api/control_plane.rst @@ -1,9 +1,12 @@ +.. _control_plane: + How do I support multiple xDS API major versions in my control plane? ===================================================================== Where possible, it is highly recommended that control planes support a single major version at a given point in time for simplicity. This works in situations where control planes need to only -support a window of Envoy versions which spans less than a year. +support a window of Envoy versions which spans less than a year. Temporary support for multiple +versions during rollout in this scenario is described :ref:`here `. For control planes that need to support a wider range of versions, there are a few approaches: diff --git a/docs/root/faq/api/control_plane_version_support.rst b/docs/root/faq/api/control_plane_version_support.rst new file mode 100644 index 0000000000000..599ec8d7d8d8d --- /dev/null +++ b/docs/root/faq/api/control_plane_version_support.rst @@ -0,0 +1,36 @@ +.. _control_plane_version_support: + +Which xDS transport and resource versions does my control plane need to support? +================================================================================ + +If a control plane is serving a well known set of clients at a given API major version, it only +needs to support that version (both transport and resource version). However, even in this +relatively basic scenario, if the set of clients straddles a major version drop or the control plane +wishes to move from v2/v3, there are considerations around rollout of client and server binaries. + +One approach to this problem is to add temporary support to the management server for both v2 and v3 +transport versions (see https://github.com/envoyproxy/go-control-plane). For resources, messages +are binary compatible modulo deprecated or new fields between API major versions. If the control +plane no longer emits resources with deprecated fields, this allows for a trivial replacement of +type URL based on the requested resource from the client to serve the same resource for v2 and v3. A +typical rollout sequence might look like: + +1. Clients with a mix of v2 and v3 support are in operation, with a v2 management server. The + client bootstraps will reference v2 API transport endpoints. + +2. A management server with dual v2/v3 API support is rolled out. Both v2 and v3 transport endpoints + are supported, while a trivial type URL replacement in the returned resource is sufficient for + matching the requested v2 or v3 resource type URL with the existing v2 resource in the control + plane. When returning resources with embedded `ConfigSource` messages pointing at xDS resources + for a v3 request, it will be necessary to set the `transport_api_version` and + `resource_api_version` to v3. No deprecated v2 fields or new v3 fields can be used at this point. + +3. Client bootstraps are upgraded to v3 API transport endpoints and v3 API resource versions. + +4. Support for v2 is removed in the management server. The management server moves to v3 exclusively + internally and can support newer fields. + +If you are operating a managed control plane as-a-service, you will likely need to support a wide +range of client versions. In this scenario, you will require long term support for multiple major +API transport and resource versions. Strategies for managing this support are described :ref:`here +`. diff --git a/docs/root/faq/api/envoy_v2_support.rst b/docs/root/faq/api/envoy_v2_support.rst new file mode 100644 index 0000000000000..f9a2f97786461 --- /dev/null +++ b/docs/root/faq/api/envoy_v2_support.rst @@ -0,0 +1,6 @@ +How long will the v2 APIs be supported? +======================================= + +The v2 xDS APIs are deprecated and will be removed form Envoy at the end of 2020, as per the +:repo:`API versioning policy `. + diff --git a/docs/root/faq/api/incremental.rst b/docs/root/faq/api/incremental.rst new file mode 100644 index 0000000000000..e6d4d3f11bbee --- /dev/null +++ b/docs/root/faq/api/incremental.rst @@ -0,0 +1,11 @@ +What is the status of incremental xDS support? +============================================== + +The :ref:`incremental xDS ` protocol is designed to improve efficiency, +scalability and functional use of xDS updates via two mechanisms: + +* Delta xDS. Resource deltas are delivered rather than state-of-the-world. +* On-demand xDS. Resource can be lazy loaded depending on request contents. + +Currently, all xDS protocols (including ADS) support delta xDS. On-demand xDS is supported for +:ref:`VHDS ` only. diff --git a/docs/root/faq/api/why_versioning.rst b/docs/root/faq/api/why_versioning.rst index 5a0b027e8bf6e..917a16ae2afec 100644 --- a/docs/root/faq/api/why_versioning.rst +++ b/docs/root/faq/api/why_versioning.rst @@ -12,3 +12,24 @@ We had previously put in place policies around :repo:`breaking changes ` takes this a step further, articulating a guaranteed multi-year support window for APIs that provides control plane authors a predictable clock when considering support for a range of Envoy versions. + +For the v3 xDS APIs, a brief list of the key improvements that were made with a clean break from v2: + +* Packages organization was improved to reflect a more logical grouping of related APIs: + + - The legacy `envoy.api.v2` tree was eliminated, with protos moved to their logical groupings, + e.g. `envoy.config.core.v3`, `envoy.server.listener.v3`. + - All packages are now versioned with a `vN` at the end. This allows for type-level identification + of major version. + - xDS service endpoints/transport and configuration are split between `envoy.service` and + `envoy.config`. + - Extensions now reflect the Envoy source tree layout under `envoy.extensions`. +* `std::regex` regular expressions were dropped from the API, in favor of RE2. The former have dangerous + security implications. +* `google.protobug.Struct` configuration of extensions was dropped from the API, in favor of + typed configuration. This provides for better support for multiple instances of extensions, e.g. + in filter chains, and more flexible naming of extension instances. +* Over 60 deprecated fields were removed from the API. +* Tooling and processes were established for API versioning support. This has now been reflected in + the bootstrap `Node`, providing a long term notion of API support that control planes can depend + upon for client negotiation. diff --git a/docs/root/faq/configuration/deprecation.rst b/docs/root/faq/configuration/deprecation.rst index c71ee63645a84..7d503bde03c11 100644 --- a/docs/root/faq/configuration/deprecation.rst +++ b/docs/root/faq/configuration/deprecation.rst @@ -11,5 +11,5 @@ annotated in the API proto itself and explained in detail in the For the first 3 months following deprecation, use of deprecated fields will result in a logged warning and incrementing the :ref:`deprecated_feature_use ` counter. After that point, the field will be annotated as fatal-by-default and further use of the field -will will be treated as invalid configuration unless +will be treated as invalid configuration unless :ref:`runtime overrides ` are employed to re-enable use. diff --git a/docs/root/faq/configuration/flow_control.rst b/docs/root/faq/configuration/flow_control.rst index 7d3f9e775fed7..9bbce146a95ec 100644 --- a/docs/root/faq/configuration/flow_control.rst +++ b/docs/root/faq/configuration/flow_control.rst @@ -1,3 +1,5 @@ +.. _faq_flow_control: + How do I configure flow control? ================================ @@ -10,9 +12,9 @@ response body must be buffered and exceeds the limit, Envoy will increment the (if headers have already been sent downstream) or send a 500 response. There are three knobs for configuring Envoy flow control: -:ref:`listener limits `, -:ref:`cluster limits ` and -:ref:`http2 stream limits ` +:ref:`listener limits `, +:ref:`cluster limits ` and +:ref:`http2 stream limits ` The listener limits apply to how much raw data will be read per read() call from downstream, as well as how much data may be buffered in userspace between Envoy @@ -22,7 +24,7 @@ The listener limits are also propogated to the HttpConnectionManager, and applie basis to HTTP/1.1 L7 buffers described below. As such they limit the size of HTTP/1 requests and response bodies that can be buffered. For HTTP/2, as many streams can be multiplexed over one TCP connection, the L7 and L4 buffer limits can be tuned separately, and the configuration option -:ref:`http2 stream limits ` +:ref:`http2 stream limits ` is applied to all of the L7 buffers. Note that for both HTTP/1 and HTTP/2 Envoy can and will proxy arbitrarily large bodies on routes where all L7 filters are streaming, but many filters such as the transcoder or buffer filters require the full HTTP body to @@ -33,7 +35,7 @@ well as how much data may be buffered in userspace between Envoy and upstream. The following code block shows how to adjust all three fields mentioned above, though generally the only one which needs to be amended is the listener -:ref:`per_connection_buffer_limit_bytes ` +:ref:`per_connection_buffer_limit_bytes ` .. code-block:: yaml @@ -48,7 +50,7 @@ the only one which needs to be amended is the listener filters: name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager http2_protocol_options: initial_stream_window_size: 65535 route_config: {} @@ -60,7 +62,12 @@ the only one which needs to be amended is the listener name: cluster_0 connect_timeout: 5s per_connection_buffer_limit_bytes: 1024 - hosts: - socket_address: - address: '::1' - port_value: 46685 + load_assignment: + cluster_name: some_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: ::1 + port_value: 46685 diff --git a/docs/root/faq/configuration/resource_limits.rst b/docs/root/faq/configuration/resource_limits.rst new file mode 100644 index 0000000000000..214096486eb6f --- /dev/null +++ b/docs/root/faq/configuration/resource_limits.rst @@ -0,0 +1,20 @@ +.. _faq_resource_limits: + +How does Envoy prevent file descriptor exhaustion? +================================================== + +:ref:`Per-listener connection limits ` may be configured as an upper bound +on the number of active connections a particular listener will accept. The listener may accept more +connections than the configured value on the order of the number of worker threads. + +In addition, one may configure a :ref:`global limit ` on the number of +connections that will apply across all listeners. + +On Unix-based systems, it is recommended to keep the sum of all connection limits less than half of +the system's file descriptor limit to account for upstream connections, files, and other usage of +file descriptors. + +.. note:: + + This per-listener connection limiting will eventually be handled by the :ref:`overload manager + `. diff --git a/docs/root/faq/configuration/sni.rst b/docs/root/faq/configuration/sni.rst index 1e8da3f9c6d5c..7ef61ef565e32 100644 --- a/docs/root/faq/configuration/sni.rst +++ b/docs/root/faq/configuration/sni.rst @@ -3,7 +3,7 @@ How do I configure SNI for listeners? ===================================== -`SNI `_ is only supported in the :ref:`v2 +`SNI `_ is only supported in the :ref:`v3 configuration/API `. .. attention:: @@ -26,7 +26,7 @@ The following is a YAML example of the above requirement. transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext common_tls_context: tls_certificates: - certificate_chain: { filename: "example_com_cert.pem" } @@ -34,7 +34,7 @@ The following is a YAML example of the above requirement. filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http route_config: virtual_hosts: @@ -48,7 +48,7 @@ The following is a YAML example of the above requirement. transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext common_tls_context: tls_certificates: - certificate_chain: { filename: "api_example_com_cert.pem" } @@ -56,7 +56,7 @@ The following is a YAML example of the above requirement. filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http route_config: virtual_hosts: @@ -70,9 +70,9 @@ The following is a YAML example of the above requirement. How do I configure SNI for clusters? ==================================== -For clusters, a fixed SNI can be set in :ref:`UpstreamTlsContext `. +For clusters, a fixed SNI can be set in :ref:`UpstreamTlsContext `. To derive SNI from HTTP `host` or `:authority` header, turn on -:ref:`auto_sni ` to override the fixed SNI in +:ref:`auto_sni ` to override the fixed SNI in `UpstreamTlsContext`. If upstream will present certificates with the hostname in SAN, turn on -:ref:`auto_san_validation ` too. +:ref:`auto_san_validation ` too. It still needs a trust CA in validation context in `UpstreamTlsContext` for trust anchor. diff --git a/docs/root/faq/configuration/timeouts.rst b/docs/root/faq/configuration/timeouts.rst index 4cdca3a57167e..3c87cca44d9db 100644 --- a/docs/root/faq/configuration/timeouts.rst +++ b/docs/root/faq/configuration/timeouts.rst @@ -19,16 +19,16 @@ Connection timeouts Connection timeouts apply to the entire HTTP connection and all streams the connection carries. -* The HTTP protocol :ref:`idle timeout ` +* The HTTP protocol :ref:`idle timeout ` is defined in a generic message used by both the HTTP connection manager as well as upstream cluster HTTP connections. The idle timeout is the time at which a downstream or upstream connection will be terminated if there are no active streams. The default idle timeout if not otherwise specified is *1 hour*. To modify the idle timeout for downstream connections use the :ref:`common_http_protocol_options - ` + ` field in the HTTP connection manager configuration. To modify the idle timeout for upstream connections use the - :ref:`common_http_protocol_options ` field + :ref:`common_http_protocol_options ` field in the cluster configuration. Stream timeouts @@ -39,7 +39,7 @@ an HTTP/2 and HTTP/3 concept, however internally Envoy maps HTTP/1 requests to s context request/stream is interchangeable. * The HTTP connection manager :ref:`request_timeout - ` + ` is the amount of time the connection manager will allow for the *entire request stream* to be received from the client. @@ -49,20 +49,19 @@ context request/stream is interchangeable. (requests that never end). See the stream idle timeout that follows. However, if using the :ref:`buffer filter `, it is recommended to configure this timeout. * The HTTP connection manager :ref:`stream_idle_timeout - ` + ` is the amount of time that the connection manager will allow a stream to exist with no upstream or downstream activity. The default stream idle timeout is *5 minutes*. This timeout is strongly - recommended for streaming APIs (requests or responses that never end). -* The HTTP protocol :ref:`max_stream_duration ` + recommended for all requests (not just streaming requests/responses) as it additionally defends + against an HTTP/2 peer that does not open stream window once an entire response has been buffered + to be sent to a downstream client. +* The HTTP protocol :ref:`max_stream_duration ` is defined in a generic message used by the HTTP connection manager. The max stream duration is the maximum time that a stream's lifetime will span. You can use this functionality when you want to reset HTTP request/response streams periodically. You can't use :ref:`request_timeout - ` + ` in this situation because this timer will be disarmed if a response header is received on the request/response streams. - - .. attention:: - - The current implementation implements this timeout on downstream connections only. + This timeout is available on both upstream and downstream connections. Route timeouts ^^^^^^^^^^^^^^ @@ -70,7 +69,7 @@ Route timeouts Envoy supports additional stream timeouts at the route level, as well as overriding some of the stream timeouts already introduced above. -* A route :ref:`timeout ` is the amount of time that +* A route :ref:`timeout ` is the amount of time that Envoy will wait for the upstream to respond with a complete response. *This timeout does not start until the entire downstream request stream has been received*. @@ -79,11 +78,11 @@ stream timeouts already introduced above. This timeout defaults to *15 seconds*, however, it is not compatible with streaming responses (responses that never end), and will need to be disabled. Stream idle timeouts should be used in the case of streaming APIs as described elsewhere on this page. -* The route :ref:`idle_timeout ` allows overriding +* The route :ref:`idle_timeout ` allows overriding of the HTTP connection manager :ref:`stream_idle_timeout - ` + ` and does the same thing. -* The route :ref:`per_try_timeout ` can be +* The route :ref:`per_try_timeout ` can be configured when using retries so that individual tries using a shorter timeout than the overall request timeout described above. This timeout only applies before any part of the response is sent to the downstream, which normally happens after the upstream has sent response headers. @@ -93,7 +92,7 @@ stream timeouts already introduced above. TCP --- -* The cluster :ref:`connect_timeout ` specifies the amount +* The cluster :ref:`connect_timeout ` specifies the amount of time Envoy will wait for an upstream TCP connection to be established. This timeout has no default, but is required in the configuration. @@ -101,6 +100,6 @@ TCP For TLS connections, the connect timeout includes the TLS handshake. * The TCP proxy :ref:`idle_timeout - ` + ` is the amount of time that the TCP proxy will allow a connection to exist with no upstream or downstream activity. The default idle timeout if not otherwise specified is *1 hour*. diff --git a/docs/root/faq/configuration/zone_aware_routing.rst b/docs/root/faq/configuration/zone_aware_routing.rst index 65c7967e0142b..78b502a02ea16 100644 --- a/docs/root/faq/configuration/zone_aware_routing.rst +++ b/docs/root/faq/configuration/zone_aware_routing.rst @@ -12,8 +12,8 @@ This section describes the specific configuration for the Envoy running side by These are the requirements: * Envoy must be launched with :option:`--service-zone` option which defines the zone for the current host. -* Both definitions of the source and the destination clusters must have :ref:`EDS ` type. -* :ref:`local_cluster_name ` must be set to the +* Both definitions of the source and the destination clusters must have :ref:`EDS ` type. +* :ref:`local_cluster_name ` must be set to the source cluster. Only essential parts are listed in the configuration below for the cluster manager. @@ -35,7 +35,7 @@ Envoy configuration on the destination service ---------------------------------------------- It's not necessary to run Envoy side by side with the destination service, but it's important that each host in the destination cluster registers with the discovery service :ref:`queried by the source service Envoy -`. :ref:`Zone ` +`. :ref:`Zone ` information must be available as part of that response. Only zone related data is listed in the response below. diff --git a/docs/root/faq/debugging/why_is_envoy_404ing_connect_requests.rst b/docs/root/faq/debugging/why_is_envoy_404ing_connect_requests.rst new file mode 100644 index 0000000000000..ef7ce6fddfbbb --- /dev/null +++ b/docs/root/faq/debugging/why_is_envoy_404ing_connect_requests.rst @@ -0,0 +1,6 @@ +.. _faq_why_is_envoy_404ing_connect_requests: + +Why is Envoy sending 404s to CONNECT requests? +============================================== + +Envoy's default matchers match based on host and path. Because CONNECT requests (generally) do not have a path, most matchers will fail to match CONNECT requests, and Envoy will send a 404 because the route is not found. The solution for HTTP/1.1 CONNECT requests, is to use a :ref:`connect_matcher ` as described in the CONNECT section of the :ref:`upgrade documentation`. diff --git a/docs/root/faq/debugging/why_is_envoy_sending_413s.rst b/docs/root/faq/debugging/why_is_envoy_sending_413s.rst new file mode 100644 index 0000000000000..39769282ea0a5 --- /dev/null +++ b/docs/root/faq/debugging/why_is_envoy_sending_413s.rst @@ -0,0 +1,6 @@ +.. _faq_why_is_envoy_sending_413: + +Why is Envoy sending 413s? +========================== + +Envoy by default imposes limits to how much it will buffer for a given request. Generally, Envoy filters are designed to be streaming, and will pass data from downstream to upstream, or will simply pause processing while waiting for an external event (e.g. doing auth checks). Some filters, for example the buffer filter, require buffering the full request or response. If a request body is too large to buffer, but buffering is required by the filter, Envoy will send a 413. The buffer limits can be increased at the risk of making OOMs more possible. Please see the ref:`flow control docs ` for details. diff --git a/docs/root/faq/debugging/why_is_envoy_sending_http2_resets.rst b/docs/root/faq/debugging/why_is_envoy_sending_http2_resets.rst new file mode 100644 index 0000000000000..8427ed9ba7615 --- /dev/null +++ b/docs/root/faq/debugging/why_is_envoy_sending_http2_resets.rst @@ -0,0 +1,24 @@ +.. _why_is_envoy_sending_http2_resets: + +Why is Envoy sending HTTP/2 resets? +=================================== + +The HTTP/2 reset path is mostly governed by the codec Envoy uses to frame HTTP/2, nghttp2. nghttp2 has +extremely good adherence to the HTTP/2 spec, but as many clients are not exactly as compliant, this +mismatch can cause unexpected resets. Unfortunately, unlike the debugging the +:ref:`internal response path `, Envoy has limited visibility into +the specific reason nghttp2 reset a given stream. + +If you have a reproducible failure case, you can run it against a debug Envoy with "-l trace" to get +detailed nghttp2 error logs, which often indicate which header failed compliance checks. Alternately, +if you can afford to run with "-l trace" on a machine encountering the errors, you can look for logs +from the file "source/common/http/http2/codec_impl.cc" of the form +`invalid http2: [nghttp2 error detail]` +for example: +`invalid http2: Invalid HTTP header field was received: frame type: 1, stream: 1, name: [content-length], value: [3]` + +You can also check :ref:`HTTP/2 stats``: in many cases where +Envoy resets streams, for example if there are more headers than allowed by configuration or flood +detection kicks in, http2 counters will be incremented as the streams are reset. + + diff --git a/docs/root/faq/debugging/why_is_envoy_sending_internal_responses.rst b/docs/root/faq/debugging/why_is_envoy_sending_internal_responses.rst new file mode 100644 index 0000000000000..eaaeca31c2907 --- /dev/null +++ b/docs/root/faq/debugging/why_is_envoy_sending_internal_responses.rst @@ -0,0 +1,9 @@ +.. _why_is_envoy_sending_internal_responses: + +Why is Envoy sending internal responses? +======================================== + +One of the easiest ways to get an understanding of why Envoy sends a given local response, is to turn on trace logging. If you can run your instance with “-l trace” you will slow Envoy down significantly, but get detailed information on various events in the lifetime of each stream and connection. Any time Envoy sends an internally generated response it will log to the _debug_ level “Sending local reply with details [unique reason]” which gives you information about why the local response was sent. Each individual response detail is used at one point in the code base, be it a codec validation check or a failed route match. + +If turning on debug logging is not plausible, the response details can be added to the access logs using _%RESPONSE_CODE_DETAILS%_, and again it will let you pinpoint the exact reason a given response was generated. + diff --git a/docs/root/faq/debugging/why_is_my_route_not_found.rst b/docs/root/faq/debugging/why_is_my_route_not_found.rst new file mode 100644 index 0000000000000..17b6447c2f7eb --- /dev/null +++ b/docs/root/faq/debugging/why_is_my_route_not_found.rst @@ -0,0 +1,33 @@ +.. _why_is_my_route_not_found: + +Why is my route not found? +========================== + +Once you've drilled down into Envoy responses and discovered Envoy generating local responses with the message +"Sending local reply with details route_not_found" the next question is _why_? + +Often you can look at your route configuration and the headers sent, and see what is missing. +One often overlooked problem is host:port matching. If your route configuration matches the domain +www.host.com but the client is sending requests to www.host.com:443, it will not match. + +If this is the problem you are encountering you can solve it one of two ways. First by changing your +configuration to match host:port pairs, going from + +.. code-block:: yaml + + domains: + - "www.host.com" + +to + +.. code-block:: yaml + + domains: + - "www.host.com" + - "www.host.com:80" + - "www.host.com:443" + +The other is to strip ports entirely using :ref:`stripping port from host header `. Not that this will only stip port 80 from insecure requests and 443 from secure request. It does +not just stop ports when routes are matched, but changes +the host sent downstream to also not include the port. + diff --git a/docs/root/faq/extensions/contract.rst b/docs/root/faq/extensions/contract.rst new file mode 100644 index 0000000000000..35e9a05f06ba3 --- /dev/null +++ b/docs/root/faq/extensions/contract.rst @@ -0,0 +1,48 @@ +.. _faq_filter_contract: + +Is there a contract my HTTP filter must adhere to? +-------------------------------------------------- + +* Headers encoding/decoding + + * During encoding/decoding of headers if a filter returns ``FilterHeadersStatus::StopIteration``, + the processing can be resumed if ``encodeData()``/``decodeData()`` return + ``FilterDataStatus::Continue`` or by explicitly calling + ``continueEncoding()``/``continueDecoding()``. + + * During encoding/decoding of headers if a filter returns + ``FilterHeadersStatus::StopAllIterationAndBuffer`` or + ``FilterHeadersStatus::StopAllIterationAndWatermark``, the processing can be resumed by calling + ``continueEncoding()``/``continueDecoding()``. + + * A filter's ``decodeHeaders()`` implementation must not return + ``FilterHeadersStatus::ContinueAndEndStream`` when called with ``end_stream`` set to *true*. In this case + ``FilterHeadersStatus::Continue`` should be returned. + + * A filter's ``encode100ContinueHeaders()`` must return ``FilterHeadersStatus::Continue`` or + ``FilterHeadersStatus::StopIteration``. + +* Data encoding/decoding + + * During encoding/decoding of data if a filter returns + ``FilterDataStatus::StopIterationAndBuffer``, ``FilterDataStatus::StopIterationAndWatermark``, + or ``FilterDataStatus::StopIterationNoBuffer``, the processing can be resumed if + ``encodeData()``/``decodeData()`` return ``FilterDataStatus::Continue`` or by explicitly + calling ``continueEncoding()``/``continueDecoding()``. + +* Trailers encoding/decoding + + * During encoding/decoding of trailers if a filter returns ``FilterTrailersStatus::StopIteration``, + the processing can be resumed by explicitly calling ``continueEncoding()``/``continueDecoding()``. + +Are there well-known headers that will appear in the given headers map of ``decodeHeaders()``? +---------------------------------------------------------------------------------------------- + +The first filter of the decoding filter chain will have the following headers in the map: + +* ``Host`` +* ``Path`` (this might be omitted for CONNECT requests). + +Although these headers may be omitted by one of the filters on the decoding filter chain, +they should be reinserted before the terminal filter is triggered. + diff --git a/docs/root/faq/load_balancing/disable_circuit_breaking.rst b/docs/root/faq/load_balancing/disable_circuit_breaking.rst index 764de5158f712..338c30caf0c78 100644 --- a/docs/root/faq/load_balancing/disable_circuit_breaking.rst +++ b/docs/root/faq/load_balancing/disable_circuit_breaking.rst @@ -1,7 +1,9 @@ +.. _faq_disable_circuit_breaking: + Is there a way to disable circuit breaking? =========================================== -Envoy comes with :ref:`certain defaults ` +Envoy comes with :ref:`certain defaults ` for each kind of circuit breaking. Currently, there isn't a switch to turn circuit breaking off completely; however, you could achieve a similar behavior by setting these thresholds very high, for example, to `std::numeric_limits::max()`. @@ -13,8 +15,15 @@ of circuit breaking by setting the thresholds to a value of `1000000000`. circuit_breakers: thresholds: - priority: HIGH - max_connections: 1000000000 - max_pending_requests: 1000000000 - max_requests: 1000000000 - max_retries: 1000000000 + - priority: DEFAULT + max_connections: 1000000000 + max_pending_requests: 1000000000 + max_requests: 1000000000 + max_retries: 1000000000 + - priority: HIGH + max_connections: 1000000000 + max_pending_requests: 1000000000 + max_requests: 1000000000 + max_retries: 1000000000 + +Envoy supports priority routing at the route level. You may adjust the thresholds accordingly. diff --git a/docs/root/faq/overview.rst b/docs/root/faq/overview.rst index e6f4eaa275e6a..d8225f95933e9 100644 --- a/docs/root/faq/overview.rst +++ b/docs/root/faq/overview.rst @@ -12,17 +12,33 @@ Build build/binaries build/boringssl -API versioning --------------- +API +--- .. toctree:: :maxdepth: 2 + api/envoy_v2_support api/envoy_v3 api/envoy_upgrade_v3 api/extensions + api/control_plane_version_support api/control_plane api/package_naming api/why_versioning + api/incremental + +.. _faq_overview_debug: + +Debugging +--------- +.. toctree:: + :maxdepth: 2 + + debugging/why_is_envoy_sending_internal_responses + debugging/why_is_envoy_sending_http2_resets + debugging/why_is_envoy_404ing_connect_requests + debugging/why_is_envoy_sending_413s + debugging/why_is_my_route_not_found Performance ----------- @@ -31,6 +47,7 @@ Performance :maxdepth: 2 performance/how_fast_is_envoy + performance/how_to_benchmark_envoy Configuration ------------- @@ -46,6 +63,7 @@ Configuration configuration/flow_control configuration/timeouts configuration/deprecation + configuration/resource_limits Load balancing -------------- @@ -58,3 +76,11 @@ Load balancing load_balancing/disable_circuit_breaking load_balancing/transient_failures load_balancing/region_failover + +Extensions +---------- + +.. toctree:: + :maxdepth: 2 + + extensions/contract diff --git a/docs/root/faq/performance/how_fast_is_envoy.rst b/docs/root/faq/performance/how_fast_is_envoy.rst index 78b1dd4d20bc0..f2d7ceadaa91b 100644 --- a/docs/root/faq/performance/how_fast_is_envoy.rst +++ b/docs/root/faq/performance/how_fast_is_envoy.rst @@ -1,3 +1,5 @@ +.. _faq_how_fast_is_envoy: + How fast is Envoy? ================== diff --git a/docs/root/faq/performance/how_to_benchmark_envoy.rst b/docs/root/faq/performance/how_to_benchmark_envoy.rst new file mode 100644 index 0000000000000..4152cf6d2fa3e --- /dev/null +++ b/docs/root/faq/performance/how_to_benchmark_envoy.rst @@ -0,0 +1,83 @@ +What are best practices for benchmarking Envoy? +=============================================== + +There is :ref:`no single QPS, latency or throughput overhead ` that can +characterize a network proxy such as Envoy. Instead, any measurements need to be contextually aware, +ensuring an apples-to-apples comparison with other systems by configuring and load testing Envoy +appropriately. As a result, we can't provide a canonical benchmark configuration, but instead offer +the following guidance: + +* A release Envoy binary should be used. If building, please ensure that `-c opt` + is used on the Bazel command line. When consuming Envoy point releases, make + sure you are using the latest point release; given the pace of Envoy development + it's not reasonable to pick older versions when making a statement about Envoy + performance. Similarly, if working on a master build, please perform due diligence + and ensure no regressions or performance improvements have landed proximal to your + benchmark work and that your are close to HEAD. + +* The :option:`--concurrency` Envoy CLI flag should be unset (providing one worker thread per + logical core on your machine) or set to match the number of cores/threads made available to other + network proxies in your comparison. + +* Disable :ref:`circuit breaking `. A common issue during benchmarking + is that Envoy's default circuit breaker limits are low, leading to connection and request queuing. + +* Disable :ref:`generate_request_id + `. + +* Disable :ref:`dynamic_stats + `. If you are measuring + the overhead vs. a direct connection, you might want to consider disabling all stats via + :ref:`reject_all `. + +* Ensure that the networking and HTTP filter chains are reflective of comparable features + in the systems that Envoy is being compared with. + +* Ensure that TLS settings (if any) are realistic and that consistent cyphers are used in + any comparison. Session reuse may have a significant impact on results and should be tracked via + :ref:`listener SSL stats `. + +* Ensure that :ref:`HTTP/2 settings `, in + particular those that affect flow control and stream concurrency, are consistent in any + comparison. Ideally taking into account BDP and network link latencies when optimizing any + HTTP/2 settings. + +* Verify in the listener and cluster stats that the number of streams, connections and errors + matches what is expected in any given experiment. + +* Make sure you are aware of how connections created by your load generator are + distributed across Envoy worker threads. This is especially important for + benchmarks that use low connection counts and perfect keep-alive. You should be aware that + Envoy will allocate all streams for a given connection to a single worker thread. This means, + for example, that if you have 72 logical cores and worker threads, but only a single HTTP/2 + connection from your load generator, then only 1 worker thread will be active. + +* Make sure request-release timing expectations line up with what is intended. + Some load generators produce naturally jittery and/or batchy timings. This + might end up being an unintended dominant factor in certain tests. + +* The specifics of how your load generator reuses connections is an important factor (e.g. MRU, + random, LRU, etc.) as this impacts work distribution. + +* If you're trying to measure small (say < 1ms) latencies, make sure the measurement tool and + environment have the required sensitivity and the noise floor is sufficiently low. + +* Be critical of your bootstrap or xDS configuration. Ideally every line has a motivation and is + necessary for the benchmark under consideration. + +* Consider using `Nighthawk `_ as your + load generator and measurement tool. We are committed to building out + benchmarking and latency measurement best practices in this tool. + +* Examine `perf` profiles of Envoy during the benchmark run, e.g. with `flame graphs + `_. Verify that Envoy is spending its time + doing the expected essential work under test, rather than some unrelated or tangential + work. + +* Familiarize yourself with `latency measurement best practices + `_. In particular, never measure latency at + max load, this is not generally meaningful or reflecting of real system performance; aim + to measure below the knee of the QPS-latency curve. Prefer open vs. closed loop load + generators. + +* Avoid `benchmarking crimes `_. diff --git a/docs/root/img/envoy-logo.png b/docs/root/img/envoy-logo.png new file mode 100644 index 0000000000000..5c5b78ebba162 Binary files /dev/null and b/docs/root/img/envoy-logo.png differ diff --git a/docs/root/install/building.rst b/docs/root/install/building.rst index ada695fa53b57..7031a93ca05af 100644 --- a/docs/root/install/building.rst +++ b/docs/root/install/building.rst @@ -15,8 +15,8 @@ In order to build manually, follow the instructions at :repo:`bazel/README.md`. Requirements ------------ -Envoy was initially developed and deployed on Ubuntu 14 LTS. It should work on any reasonably -recent Linux including Ubuntu 16 LTS. +Envoy was initially developed and deployed on Ubuntu 14.04 LTS. It should work on any reasonably +recent Linux including Ubuntu 18.04 LTS. Building Envoy has the following requirements: @@ -35,7 +35,7 @@ We build and tag Docker images with release versions when we do official release be found in the following repositories: * `envoyproxy/envoy `_: Release binary with - symbols stripped on top of an Ubuntu Xenial base. + symbols stripped on top of an Ubuntu Bionic base. * `envoyproxy/envoy-alpine `_: Release binary with symbols stripped on top of a **glibc** alpine base. * `envoyproxy/envoy-alpine-debug `_: @@ -43,14 +43,13 @@ be found in the following repositories: .. note:: - In the above repositories, we do **not** tag a *latest* image. As we now do security/stable - releases, *latest* has no good meaning and users should pin to a specific tag. + In the above repositories, we tag a *vX.Y-latest* image for each security/stable release line. On every master commit we additionally create a set of development Docker images. These images can be found in the following repositories: * `envoyproxy/envoy-dev `_: Release binary with - symbols stripped on top of an Ubuntu Xenial base. + symbols stripped on top of an Ubuntu Bionic base. * `envoyproxy/envoy-alpine-dev `_: Release binary with symbols stripped on top of a **glibc** alpine base. * `envoyproxy/envoy-alpine-debug-dev `_: @@ -72,6 +71,16 @@ We will consider producing additional binary types depending on community intere CI, packaging, etc. Please open an `issue in GetEnvoy `_ for pre-built binaries for different platforms. +.. _arm_binaries: + +ARM64 binaries +^^^^^^^^^^^^^^ + +`envoyproxy/envoy `_ and +`envoyproxy/envoy-dev `_ are Docker +`multi-arch `_ images +and should run transparently on compatible ARM64 hosts. + Modifying Envoy --------------- diff --git a/docs/root/install/sandboxes/local_docker_build.rst b/docs/root/install/sandboxes/local_docker_build.rst index d5dda8c03191a..fe275ec8aef71 100644 --- a/docs/root/install/sandboxes/local_docker_build.rst +++ b/docs/root/install/sandboxes/local_docker_build.rst @@ -26,7 +26,7 @@ of the software used to build it.:: $ pwd src/envoy/ - $ docker build -f ci/Dockerfile-envoy-image -t envoy . + $ docker build -f ci/Dockerfile-envoy -t envoy . Now you can use this ``envoy`` image to build the any of the sandboxes if you change the ``FROM`` line in any Dockerfile. diff --git a/docs/root/install/tools/schema_validator_check_tool.rst b/docs/root/install/tools/schema_validator_check_tool.rst index 067ebabc128f0..a3e1a7c7bc417 100644 --- a/docs/root/install/tools/schema_validator_check_tool.rst +++ b/docs/root/install/tools/schema_validator_check_tool.rst @@ -13,8 +13,8 @@ Input 1. The schema type to check the passed in configuration against. The supported types are: - * `route` - for :ref:`route configuration` validation. - * `discovery_response` for :ref:`discovery response` validation. + * `route` - for :ref:`route configuration` validation. + * `discovery_response` for :ref:`discovery response` validation. 2. The path to the configuration file. diff --git a/docs/root/intro/arch_overview/advanced/data_sharing_between_filters.rst b/docs/root/intro/arch_overview/advanced/data_sharing_between_filters.rst index 3ef220bb89a52..326d9e1bc9bc7 100644 --- a/docs/root/intro/arch_overview/advanced/data_sharing_between_filters.rst +++ b/docs/root/intro/arch_overview/advanced/data_sharing_between_filters.rst @@ -17,7 +17,7 @@ Metadata -------- Several parts of Envoy configuration (e.g. listeners, routes, clusters) -contain a :ref:`metadata ` where arbitrary +contain a :ref:`metadata ` where arbitrary key-value pairs can be encoded. The typical pattern is to use the filter names in reverse DNS format as the key and encode filter specific configuration metadata in the value. This metadata is immutable and shared @@ -31,7 +31,7 @@ weighted cluster to select appropriate endpoints in a cluster Typed Metadata -------------- -:ref:`Metadata ` as such is untyped. Before +:ref:`Metadata ` as such is untyped. Before acting on the metadata, callers typically convert it to a typed class object. The cost of conversion becomes non-negligible when performed repeatedly (e.g., for each request stream or connection). Typed Metadata @@ -57,8 +57,8 @@ is specified as part of the configuration. A `FilterState::Object` implements HTTP Per-Route Filter Configuration ----------------------------------- -In HTTP routes, :ref:`per_filter_config -` allows HTTP filters +In HTTP routes, :ref:`typed_per_filter_config +` allows HTTP filters to have virtualhost/route-specific configuration in addition to a global filter config common to all virtual hosts. This configuration is converted and embedded into the route table. It is up to the HTTP filter @@ -66,9 +66,9 @@ implementation to treat the route-specific filter config as a replacement to global config or an enhancement. For example, the HTTP fault filter uses this technique to provide per-route fault configuration. -`per_filter_config` is a `map`. The Connection +`typed_per_filter_config` is a `map`. The Connection manager iterates over this map and invokes the filter factory interface -`createRouteSpecificFilterConfig` to parse/validate the struct value and +`createRouteSpecificFilterConfigTyped` to parse/validate the struct value and convert it into a typed class object that’s stored with the route itself. HTTP filters can then query the route-specific filter config during request processing. diff --git a/docs/root/intro/arch_overview/http/http.rst b/docs/root/intro/arch_overview/http/http.rst index f5729560e0f6c..33b7ebffa6fa2 100644 --- a/docs/root/intro/arch_overview/http/http.rst +++ b/docs/root/intro/arch_overview/http/http.rst @@ -7,5 +7,5 @@ HTTP http_connection_management http_filters http_routing - websocket + upgrades http_proxy diff --git a/docs/root/intro/arch_overview/http/http_connection_management.rst b/docs/root/intro/arch_overview/http/http_connection_management.rst index 93c4ebb336008..74e8d90b99e82 100644 --- a/docs/root/intro/arch_overview/http/http_connection_management.rst +++ b/docs/root/intro/arch_overview/http/http_connection_management.rst @@ -51,7 +51,7 @@ Retry plugin configuration Normally during retries, host selection follows the same process as the original request. Retry plugins can be used to modify this behavior, and they fall into two categories: -* :ref:`Host Predicates `: +* :ref:`Host Predicates `: These predicates can be used to "reject" a host, which will cause host selection to be reattempted. Any number of these predicates can be specified, and the host will be rejected if any of the predicates reject the host. @@ -62,22 +62,22 @@ can be used to modify this behavior, and they fall into two categories: * *envoy.retry_host_predicates.omit_canary_hosts*: This will reject any host that is a marked as canary host. Hosts are marked by setting ``canary: true`` for the ``envoy.lb`` filter in the endpoint's filter metadata. - See :ref:`LbEndpoint ` for more details. + See :ref:`LbEndpoint ` for more details. * *envoy.retry_host_predicates.omit_host_metadata*: This will reject any host based on predefined metadata match criteria. See the configuration example below for more details. -* :ref:`Priority Predicates`: These predicates can +* :ref:`Priority Predicates`: These predicates can be used to adjust the priority load used when selecting a priority for a retry attempt. Only one such predicate may be specified. Envoy supports the following built-in priority predicates - * *envoy.retry_priority.previous_priorities*: This will keep track of previously attempted priorities, + * *envoy.retry_priorities.previous_priorities*: This will keep track of previously attempted priorities, and adjust the priority load such that other priorities will be targeted in subsequent retry attempts. Host selection will continue until either the configured predicates accept the host or a configurable -:ref:`max attempts ` has been reached. +:ref:`max attempts ` has been reached. These plugins can be combined to affect both host selection and priority load. Envoy can also be extended with custom retry plugins similar to how custom filters can be added. @@ -108,7 +108,7 @@ To reject a host based on its metadata, ``envoy.retry_host_predicates.omit_host_ retry_host_predicate: - name: envoy.retry_host_predicates.omit_host_metadata typed_config: - "@type": type.googleapis.com/envoy.config.retry.omit_host_metadata.v2.OmitHostMetadataConfig + "@type": type.googleapis.com/envoy.extensions.retry.host.omit_host_metadata.v3.OmitHostMetadataConfig metadata_match: filter_metadata: envoy.lb: @@ -117,7 +117,7 @@ To reject a host based on its metadata, ``envoy.retry_host_predicates.omit_host_ This will reject any host with matching (key, value) in its metadata. To configure retries to attempt other priorities during retries, the built-in -``envoy.retry_priority.previous_priorities`` can be used. +``envoy.retry_priorities.previous_priorities`` can be used. .. code-block:: yaml @@ -125,7 +125,7 @@ To configure retries to attempt other priorities during retries, the built-in retry_priority: name: envoy.retry_priorities.previous_priorities typed_config: - "@type": type.googleapis.com/envoy.config.retry.previous_priorities.PreviousPrioritiesConfig + "@type": type.googleapis.com/envoy.extensions.retry.priority.previous_priorities.v3.PreviousPrioritiesConfig update_frequency: 2 This will target priorities in subsequent retry attempts that haven't been already used. The ``update_frequency`` parameter decides how @@ -143,7 +143,7 @@ previously attempted priorities. retry_priority: name: envoy.retry_priorities.previous_priorities typed_config: - "@type": type.googleapis.com/envoy.config.retry.previous_priorities.PreviousPrioritiesConfig + "@type": type.googleapis.com/envoy.extensions.retry.priority.previous_priorities.v3.PreviousPrioritiesConfig update_frequency: 2 .. _arch_overview_internal_redirects: @@ -151,37 +151,60 @@ previously attempted priorities. Internal redirects -------------------------- -Envoy supports handling 302 redirects internally, that is capturing a 302 redirect response, -synthesizing a new request, sending it to the upstream specified by the new route match, and -returning the redirected response as the response to the original request. +Envoy supports handling 3xx redirects internally, that is capturing a configurable 3xx redirect +response, synthesizing a new request, sending it to the upstream specified by the new route match, +and returning the redirected response as the response to the original request. -Internal redirects are configured via the ref:`internal redirect action -` field and -`max internal redirects ` field in -route configuration. When redirect handling is on, any 302 response from upstream is -subject to the redirect being handled by Envoy. +Internal redirects are configured via the :ref:`internal redirect policy +` field in route configuration. +When redirect handling is on, any 3xx response from upstream, that matches +:ref:`redirect_response_codes +` +is subject to the redirect being handled by Envoy. For a redirect to be handled successfully it must pass the following checks: -1. Be a 302 response. -2. Have a *location* header with a valid, fully qualified URL matching the scheme of the original request. +1. Have a response code matching one of :ref:`redirect_response_codes + `, which is + either 302 (by default), or a set of 3xx codes (301, 302, 303, 307, 308). +2. Have a *location* header with a valid, fully qualified URL. 3. The request must have been fully processed by Envoy. 4. The request must not have a body. -5. The number of previously handled internal redirect within a given downstream request does not exceed - `max internal redirects ` of the route - that the request or redirected request is hitting. +5. :ref:`allow_cross_scheme_redirect + ` is true (default to false), + or the scheme of the downstream request and the *location* header are the same. +6. The number of previously handled internal redirect within a given downstream request does not + exceed :ref:`max internal redirects + ` + of the route that the request or redirected request is hitting. +7. All :ref:`predicates ` accept + the target route. Any failure will result in redirect being passed downstream instead. Since a redirected request may be bounced between different routes, any route in the chain of redirects that 1. does not have internal redirect enabled -2. or has a `max internal redirects - ` +2. or has a :ref:`max internal redirects + ` smaller or equal to the redirect chain length when the redirect chain hits it +3. or is disallowed by any of the :ref:`predicates + ` will cause the redirect to be passed downstream. +Two predicates can be used to create a DAG that defines the redirect chain, the :ref:`previous routes +` predicate, and +the :ref:`allow_listed_routes +`. +Specifically, the *allow listed routes* predicate defines edges of individual node in the DAG +and the *previous routes* predicate defines "visited" state of the edges, so that loop can be avoided +if so desired. + +A third predicate :ref:`safe_cross_scheme +` +can be used to prevent HTTP -> HTTPS redirect. + Once the redirect has passed these checks, the request headers which were shipped to the original upstream will be modified by: diff --git a/docs/root/intro/arch_overview/http/http_filters.rst b/docs/root/intro/arch_overview/http/http_filters.rst index 36f2ab3228a0a..32f177d8e5f7a 100644 --- a/docs/root/intro/arch_overview/http/http_filters.rst +++ b/docs/root/intro/arch_overview/http/http_filters.rst @@ -25,3 +25,26 @@ themselves within the context of a single request stream. Refer to :ref:`data sh between filters ` for more details. Envoy already includes several HTTP level filters that are documented in this architecture overview as well as the :ref:`configuration reference `. + +.. _arch_overview_http_filters_ordering: + +Filter ordering +--------------- + +Filter ordering in the :ref:`http_filters field ` +matters. If filters are configured in the following order (and assuming all three filters are +decoder/encoder filters): + +.. code-block:: yaml + + http_filters: + - A + - B + # The last configured filter has to be a terminal filter, as determined by the + # NamedHttpFilterConfigFactory::isTerminalFilter() function. This is most likely the router + # filter. + - C + +The connection manager will invoke decoder filters in the order: ``A``, ``B``, ``C``. +On the other hand, the connection manager will invoke encoder filters in the **reverse** +order: ``C``, ``B``, ``A``. diff --git a/docs/root/intro/arch_overview/http/http_proxy.rst b/docs/root/intro/arch_overview/http/http_proxy.rst index 2ed691203abb4..50dc9ce2ec01d 100644 --- a/docs/root/intro/arch_overview/http/http_proxy.rst +++ b/docs/root/intro/arch_overview/http/http_proxy.rst @@ -8,7 +8,7 @@ HTTP dynamic forward proxy HTTP dynamic forward proxy support should be considered alpha and not production ready. Through the combination of both an :ref:`HTTP filter ` and -:ref:`custom cluster `, +:ref:`custom cluster `, Envoy supports HTTP dynamic forward proxy. This means that Envoy can perform the role of an HTTP proxy without prior knowledge of all configured DNS addresses, while still retaining the vast majority of Envoy's benefits including asynchronous DNS resolution. The implementation works as @@ -49,15 +49,15 @@ Memory usage detail's for Envoy's dynamic forward proxy support are as follows: * Hosts removed via TTL are purged once all active connections stop referring to them and all used memory is regained. * The :ref:`max_hosts - ` field can + ` field can be used to limit the number of hosts that the DNS cache will store at any given time. * The cluster's :ref:`max_pending_requests - ` circuit breaker can + ` circuit breaker can be used to limit the number of requests that are pending waiting for the DNS cache to load a host. * Long lived upstream connections can have the underlying logical host expire via TTL while the connection is still open. Upstream requests and connections are still bound by other cluster circuit breakers such as :ref:`max_requests - `. The current assumption is that + `. The current assumption is that host data shared between connections uses a marginal amount of memory compared to the connections and requests themselves, making it not worth controlling independently. diff --git a/docs/root/intro/arch_overview/http/http_routing.rst b/docs/root/intro/arch_overview/http/http_routing.rst index 95ce2b6ed796f..ff24b0fd512e7 100644 --- a/docs/root/intro/arch_overview/http/http_routing.rst +++ b/docs/root/intro/arch_overview/http/http_routing.rst @@ -15,41 +15,41 @@ request. The router filter supports the following features: * Virtual hosts that map domains/authorities to a set of routing rules. * Prefix and exact path matching rules (both :ref:`case sensitive - ` and case insensitive). Regex/slug + ` and case insensitive). Regex/slug matching is not currently supported, mainly because it makes it difficult/impossible to programmatically determine whether routing rules conflict with each other. For this reason we don’t recommend regex/slug routing at the reverse proxy level, however we may add support in the future depending on demand. -* :ref:`TLS redirection ` at the virtual host +* :ref:`TLS redirection ` at the virtual host level. -* :ref:`Path `/:ref:`host - ` redirection at the route level. +* :ref:`Path `/:ref:`host + ` redirection at the route level. * :ref:`Direct (non-proxied) HTTP responses ` at the route level. -* :ref:`Explicit host rewriting `. -* :ref:`Automatic host rewriting ` based on +* :ref:`Explicit host rewriting `. +* :ref:`Automatic host rewriting ` based on the DNS name of the selected upstream host. -* :ref:`Prefix rewriting `. -* :ref:`Path rewriting using a regular expression and capture groups `. +* :ref:`Prefix rewriting `. +* :ref:`Path rewriting using a regular expression and capture groups `. * :ref:`Request retries ` specified either via HTTP header or via route configuration. * Request timeout specified either via :ref:`HTTP header ` or via :ref:`route configuration - `. + `. * :ref:`Request hedging ` for retries in response to a request (per try) timeout. * Traffic shifting from one upstream cluster to another via :ref:`runtime values - ` (see :ref:`traffic shifting/splitting + ` (see :ref:`traffic shifting/splitting `). * Traffic splitting across multiple upstream clusters using :ref:`weight/percentage-based routing - ` (see :ref:`traffic shifting/splitting + ` (see :ref:`traffic shifting/splitting `). -* Arbitrary header matching :ref:`routing rules `. +* Arbitrary header matching :ref:`routing rules `. * Virtual cluster specifications. A virtual cluster is specified at the virtual host level and is used by Envoy to generate additional statistics on top of the standard cluster level ones. Virtual clusters can use regex matching. * :ref:`Priority ` based routing. -* :ref:`Hash policy ` based routing. -* :ref:`Absolute urls ` are supported for non-tls forward proxies. +* :ref:`Hash policy ` based routing. +* :ref:`Absolute urls ` are supported for non-tls forward proxies. .. _arch_overview_http_routing_route_scope: @@ -60,8 +60,8 @@ Scoped routing enables Envoy to put constraints on search space of domains and r A :ref:`Route Scope` associates a key with a :ref:`route table `. For each request, a scope key is computed dynamically by the HTTP connection manager to pick the :ref:`route table`. -The Scoped RDS (SRDS) API contains a set of :ref:`Scopes ` resources, each defining independent routing configuration, -along with a :ref:`ScopeKeyBuilder ` +The Scoped RDS (SRDS) API contains a set of :ref:`Scopes ` resources, each defining independent routing configuration, +along with a :ref:`ScopeKeyBuilder ` defining the key construction algorithm used by Envoy to look up the scope corresponding to each request. For example, for the following scoped route configuration, Envoy will look into the "addr" header value, split the header value by ";" first, and use the first value for key 'x-foo-key' as the scope key. @@ -80,8 +80,8 @@ If the "addr" header value is "foo=1;x-foo-key=127.0.0.1;x-bar-key=1.1.1.1", the .. _arch_overview_http_routing_route_table: -For a key to match a :ref:`ScopedRouteConfiguration`, the number of fragments in the computed key has to match that of -the :ref:`ScopedRouteConfiguration`. +For a key to match a :ref:`ScopedRouteConfiguration`, the number of fragments in the computed key has to match that of +the :ref:`ScopedRouteConfiguration`. Then fragments are matched in order. A missing fragment(treated as NULL) in the built key makes the request unable to match any scope, i.e. no route entry can be found for the request. @@ -89,7 +89,7 @@ Route table ----------- The :ref:`configuration ` for the HTTP connection manager owns the :ref:`route -table ` that is used by all configured HTTP filters. Although the +table ` that is used by all configured HTTP filters. Although the router filter is the primary consumer of the route table, other filters also have access in case they want to make decisions based on the ultimate destination of the request. For example, the built in rate limit filter consults the route table to determine whether the global rate limit service @@ -103,7 +103,7 @@ Retry semantics --------------- Envoy allows retries to be configured both in the :ref:`route configuration -` as well as for specific requests via :ref:`request +` as well as for specific requests via :ref:`request headers `. The following configurations are possible: * **Maximum number of retries**: Envoy will continue to retry any number of times. An exponential @@ -112,18 +112,20 @@ headers `. The following configurat * **Retry conditions**: Envoy can retry on different types of conditions depending on application requirements. For example, network failure, all 5xx response codes, idempotent 4xx response codes, etc. -* **Retry budgets**: Envoy can limit the proportion of active requests via :ref:`retry budgets ` that can be retries to +* **Retry budgets**: Envoy can limit the proportion of active requests via :ref:`retry budgets ` that can be retries to prevent their contribution to large increases in traffic volume. * **Host selection retry plugins**: Envoy can be configured to apply additional logic to the host selection logic when selecting hosts for retries. Specifying a - :ref:`retry host predicate ` + :ref:`retry host predicate ` allows for reattempting host selection when certain hosts are selected (e.g. when an already attempted host is selected), while a - :ref:`retry priority ` can be + :ref:`retry priority ` can be configured to adjust the priority load used when selecting a priority for retries. -Note that retries may be disabled depending on the contents of the :ref:`x-envoy-overloaded -`. +Note that Envoy retries requests when :ref:`x-envoy-overloaded +` is present. It is recommended to either configure +:ref:`retry budgets (preferred) ` or set +:ref:`maximum active retries circuit breaker ` to an appropriate value to avoid retry storms. .. _arch_overview_http_routing_hedging: @@ -131,7 +133,7 @@ Request Hedging --------------- Envoy supports request hedging which can be enabled by specifying a :ref:`hedge -policy `. This means that Envoy will race +policy `. This means that Envoy will race multiple simultaneous upstream requests and return the response associated with the first acceptable response headers to the downstream. The retry policy is used to determine whether a response should be returned or whether more @@ -151,7 +153,7 @@ response, creating two retriable events. Priority routing ---------------- -Envoy supports priority routing at the :ref:`route ` level. +Envoy supports priority routing at the :ref:`route ` level. The current priority implementation uses different :ref:`connection pool ` and :ref:`circuit breaking ` settings for each priority level. This means that even for HTTP/2 requests, two physical connections will be used to @@ -170,9 +172,9 @@ that do not require proxying to an upstream server. There are two ways to specify a direct response in a Route: -* Set the :ref:`direct_response ` field. +* Set the :ref:`direct_response ` field. This works for all HTTP response statuses. -* Set the :ref:`redirect ` field. This works for +* Set the :ref:`redirect ` field. This works for redirect response statuses only, but it simplifies the setting of the *Location* header. A direct response has an HTTP status code and an optional body. The Route configuration diff --git a/docs/root/intro/arch_overview/http/upgrades.rst b/docs/root/intro/arch_overview/http/upgrades.rst new file mode 100644 index 0000000000000..a00b43d15d0dd --- /dev/null +++ b/docs/root/intro/arch_overview/http/upgrades.rst @@ -0,0 +1,104 @@ +.. _arch_overview_upgrades: + +HTTP upgrades +=========================== + +Envoy Upgrade support is intended mainly for WebSocket and CONNECT support, but may be used for +arbitrary upgrades as well. Upgrades pass both the HTTP headers and the upgrade payload +through an HTTP filter chain. One may configure the +:ref:`upgrade_configs ` +with or without custom filter chains. If only the +:ref:`upgrade_type ` +is specified, both the upgrade headers, any request and response body, and HTTP data payload will +pass through the default HTTP filter chain. To avoid the use of HTTP-only filters for upgrade payload, +one can set up custom +:ref:`filters ` +for the given upgrade type, up to and including only using the router filter to send the HTTP +data upstream. + +Upgrades can be enabled or disabled on a :ref:`per-route ` basis. +Any per-route enabling/disabling automatically overrides HttpConnectionManager configuration as +laid out below, but custom filter chains can only be configured on a per-HttpConnectionManager basis. + ++-----------------------+-------------------------+-------------------+ +| *HCM Upgrade Enabled* | *Route Upgrade Enabled* | *Upgrade Enabled* | ++=======================+=========================+===================+ +| T (Default) | T (Default) | T | ++-----------------------+-------------------------+-------------------+ +| T (Default) | F | F | ++-----------------------+-------------------------+-------------------+ +| F | T (Default) | T | ++-----------------------+-------------------------+-------------------+ +| F | F | F | ++-----------------------+-------------------------+-------------------+ + +Note that the statistics for upgrades are all bundled together so WebSocket and other upgrades +:ref:`statistics ` are tracked by stats such as +downstream_cx_upgrades_total and downstream_cx_upgrades_active + +Websocket over HTTP/2 hops +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +While HTTP/2 support for WebSockets is off by default, Envoy does support tunneling WebSockets over +HTTP/2 streams for deployments that prefer a uniform HTTP/2 mesh throughout; this enables, for example, +a deployment of the form: + +[Client] ---- HTTP/1.1 ---- [Front Envoy] ---- HTTP/2 ---- [Sidecar Envoy ---- H1 ---- App] + +In this case, if a client is for example using WebSocket, we want the Websocket to arrive at the +upstream server functionally intact, which means it needs to traverse the HTTP/2 hop. + +This is accomplished via `extended CONNECT `_ support, +turned on by setting :ref:`allow_connect ` +true at the second layer Envoy. The +WebSocket request will be transformed into an HTTP/2 CONNECT stream, with :protocol header +indicating the original upgrade, traverse the HTTP/2 hop, and be downgraded back into an HTTP/1 +WebSocket Upgrade. This same Upgrade-CONNECT-Upgrade transformation will be performed on any +HTTP/2 hop, with the documented flaw that the HTTP/1.1 method is always assumed to be GET. +Non-WebSocket upgrades are allowed to use any valid HTTP method (i.e. POST) and the current +upgrade/downgrade mechanism will drop the original method and transform the Upgrade request to +a GET method on the final Envoy-Upstream hop. + +Note that the HTTP/2 upgrade path has very strict HTTP/1.1 compliance, so will not proxy WebSocket +upgrade requests or responses with bodies. + +CONNECT support +^^^^^^^^^^^^^^^ + +Envoy CONNECT support is off by default (Envoy will send an internally generated 403 in response to +CONNECT requests). CONNECT support can be enabled via the upgrade options described above, setting +the upgrade value to the special keyword "CONNECT". + +While for HTTP/2, CONNECT request may have a path, in general and for HTTP/1.1 CONNECT requests do +not have a path, and can only be matched using a +:ref:`connect_matcher ` + +Envoy can handle CONNECT in one of two ways, either proxying the CONNECT headers through as if they +were any other request, and letting the upstream terminate the CONNECT request, or by terminating the +CONNECT request, and forwarding the payload as raw TCP data. When CONNECT upgrade configuration is +set up, the default behavior is to proxy the CONNECT request, treating it like any other request using +the upgrade path. +If termination is desired, this can be accomplished by setting +:ref:`connect_config ` +If it that message is present for CONNECT requests, the router filter will strip the request headers, +and forward the HTTP payload upstream. On receipt of initial TCP data from upstream, the router +will synthesize 200 response headers, and then forward the TCP data as the HTTP response body. + +.. warning:: + This mode of CONNECT support can create major security holes if configured correctly, as the upstream + will be forwarded *unsanitized* headers if they are in the body payload. Please use with caution + +Tunneling TCP over HTTP/2 +^^^^^^^^^^^^^^^^^^^^^^^^^ +Envoy also has support for transforming raw TCP into HTTP/2 CONNECT requests. This can be used to +proxy multiplexed TCP over pre-warmed secure connections and amortize the cost of any TLS handshake. +An example set up proxying SMTP would look something like this + +[SMTP Upstream] --- raw SMTP --- [L2 Envoy] --- SMTP tunneled over HTTP/2 --- [L1 Envoy] --- raw SMTP --- [Client] + +Examples of such a set up can be found in the Envoy example config :repo:`directory ` +If you run `bazel-bin/source/exe/envoy-static --config-path configs/encapsulate_in_connect.v3.yaml --base-id 1` +and `bazel-bin/source/exe/envoy-static --config-path configs/terminate_connect.v3.yaml` +you will be running two Envoys, the first listening for TCP traffic on port 10000 and encapsulating it in an HTTP/2 +CONNECT request, and the second listening for HTTP/2 on 10001, stripping the CONNECT headers, and forwarding the +original TCP upstream, in this case to google.com. diff --git a/docs/root/intro/arch_overview/http/websocket.rst b/docs/root/intro/arch_overview/http/websocket.rst deleted file mode 100644 index fa4e0b1f055d9..0000000000000 --- a/docs/root/intro/arch_overview/http/websocket.rst +++ /dev/null @@ -1,63 +0,0 @@ -.. _arch_overview_websocket: - -WebSocket and HTTP upgrades -=========================== - -Envoy Upgrade support is intended mainly for WebSocket but may be used for non-WebSocket -upgrades as well. Upgrades pass both the HTTP headers and the upgrade payload -through an HTTP filter chain. One may configure the -:ref:`upgrade_configs ` -with or without custom filter chains. If only the -:ref:`upgrade_type ` -is specified, both the upgrade headers, any request and response body, and WebSocket payload will -pass through the default HTTP filter chain. To avoid the use of HTTP-only filters for upgrade payload, -one can set up custom -:ref:`filters ` -for the given upgrade type, up to and including only using the router filter to send the WebSocket -data upstream. - -Upgrades can be enabled or disabled on a :ref:`per-route ` basis. -Any per-route enabling/disabling automatically overrides HttpConnectionManager configuration as -laid out below, but custom filter chains can only be configured on a per-HttpConnectionManager basis. - -+-----------------------+-------------------------+-------------------+ -| *HCM Upgrade Enabled* | *Route Upgrade Enabled* | *Upgrade Enabled* | -+=======================+=========================+===================+ -| T (Default) | T (Default) | T | -+-----------------------+-------------------------+-------------------+ -| T (Default) | F | F | -+-----------------------+-------------------------+-------------------+ -| F | T (Default) | T | -+-----------------------+-------------------------+-------------------+ -| F | F | F | -+-----------------------+-------------------------+-------------------+ - -Note that the statistics for upgrades are all bundled together so WebSocket -:ref:`statistics ` are tracked by stats such as -downstream_cx_upgrades_total and downstream_cx_upgrades_active - -Handling HTTP/2 hops -^^^^^^^^^^^^^^^^^^^^ - -While HTTP/2 support for WebSockets is off by default, Envoy does support tunneling WebSockets over -HTTP/2 streams for deployments that prefer a uniform HTTP/2 mesh throughout; this enables, for example, -a deployment of the form: - -[Client] ---- HTTP/1.1 ---- [Front Envoy] ---- HTTP/2 ---- [Sidecar Envoy ---- H1 ---- App] - -In this case, if a client is for example using WebSocket, we want the Websocket to arrive at the -upstream server functionally intact, which means it needs to traverse the HTTP/2 hop. - -This is accomplished via `extended CONNECT `_ support, -turned on by setting :ref:`allow_connect ` -true at the second layer Envoy. The -WebSocket request will be transformed into an HTTP/2 CONNECT stream, with :protocol header -indicating the original upgrade, traverse the HTTP/2 hop, and be downgraded back into an HTTP/1 -WebSocket Upgrade. This same Upgrade-CONNECT-Upgrade transformation will be performed on any -HTTP/2 hop, with the documented flaw that the HTTP/1.1 method is always assumed to be GET. -Non-WebSocket upgrades are allowed to use any valid HTTP method (i.e. POST) and the current -upgrade/downgrade mechanism will drop the original method and transform the Upgrade request to -a GET method on the final Envoy-Upstream hop. - -Note that the HTTP/2 upgrade path has very strict HTTP/1.1 compliance, so will not proxy WebSocket -upgrade requests or responses with bodies. diff --git a/docs/root/intro/arch_overview/intro/threading_model.rst b/docs/root/intro/arch_overview/intro/threading_model.rst index 110660c80c73e..ca83cb92e92c5 100644 --- a/docs/root/intro/arch_overview/intro/threading_model.rst +++ b/docs/root/intro/arch_overview/intro/threading_model.rst @@ -3,7 +3,7 @@ Threading model =============== -Envoy uses a single process with multiple threads architecture. A single *master* thread controls +Envoy uses a single process with multiple threads architecture. A single *primary* thread controls various sporadic coordination tasks while some number of *worker* threads perform listening, filtering, and forwarding. Once a connection is accepted by a listener, the connection spends the rest of its lifetime bound to a single worker thread. This allows the majority of Envoy to be @@ -22,5 +22,5 @@ balancing incoming connections. However, for some workloads, particularly those number of very long lived connections (e.g., service mesh HTTP2/gRPC egress), it may be desirable to have Envoy forcibly balance connections between worker threads. To support this behavior, Envoy allows for different types of :ref:`connection balancing -` to be configured on each :ref:`listener +` to be configured on each :ref:`listener `. diff --git a/docs/root/intro/arch_overview/listeners/dns_filter.rst b/docs/root/intro/arch_overview/listeners/dns_filter.rst index f6090c577ff98..87295a8d09507 100644 --- a/docs/root/intro/arch_overview/listeners/dns_filter.rst +++ b/docs/root/intro/arch_overview/listeners/dns_filter.rst @@ -1,5 +1,11 @@ DNS Filter ========== -Envoy supports DNS responses via a :ref:`UDP listener DNS Filter +Envoy supports responding to DNS requests by configuring a :ref:`UDP listener DNS Filter `. + +The DNS filter supports responding to forward queries for A and AAAA records. The answers are +discovered from statically configured resources, clusters, or external DNS servers. The filter +will return DNS responses up to to 512 bytes. If domains are configured with multiple addresses, +or clusters with multiple endpoints, Envoy will return each discovered address up to the +aforementioned size limit. diff --git a/docs/root/intro/arch_overview/listeners/listeners.rst b/docs/root/intro/arch_overview/listeners/listeners.rst index a6e201fdb1168..153d344f57db0 100644 --- a/docs/root/intro/arch_overview/listeners/listeners.rst +++ b/docs/root/intro/arch_overview/listeners/listeners.rst @@ -12,8 +12,8 @@ TCP --- Each listener is independently configured with some number :ref:`filter chains -`, where an individual chain is selected based on its -:ref:`match criteria `. An individual filter chain is +`, where an individual chain is selected based on its +:ref:`match criteria `. An individual filter chain is composed of one or more network level (L3/L4) :ref:`filters `. When a new connection is received on a listener, the appropriate filter chain is selected, and the configured connection local filter stack is instantiated and begins processing subsequent events. @@ -33,6 +33,8 @@ Listeners can also be fetched dynamically via the :ref:`listener discovery servi Listener :ref:`configuration `. +.. _arch_overview_listeners_udp: + UDP --- diff --git a/docs/root/intro/arch_overview/listeners/tcp_proxy.rst b/docs/root/intro/arch_overview/listeners/tcp_proxy.rst index a8f5a9686f296..cceb6abe80641 100644 --- a/docs/root/intro/arch_overview/listeners/tcp_proxy.rst +++ b/docs/root/intro/arch_overview/listeners/tcp_proxy.rst @@ -10,7 +10,7 @@ such as the :ref:`MongoDB filter ` or the :ref:`rate limit ` filter. The TCP proxy filter will respect the -:ref:`connection limits ` +:ref:`connection limits ` imposed by each upstream cluster's global resource manager. The TCP proxy filter checks with the upstream cluster's resource manager if it can create a connection without going over that cluster's maximum number of connections, if it can't the TCP proxy will not make the connection. diff --git a/docs/root/intro/arch_overview/observability/access_logging.rst b/docs/root/intro/arch_overview/observability/access_logging.rst index fa41bd0d377b6..afaacda8630c5 100644 --- a/docs/root/intro/arch_overview/observability/access_logging.rst +++ b/docs/root/intro/arch_overview/observability/access_logging.rst @@ -12,7 +12,7 @@ features: to different access logs. Downstream connection access logging can be enabled using :ref:`listener access -logs`. The listener access logs complement +logs`. The listener access logs complement HTTP request access logging and can be enabled separately and independently from filter access logs. @@ -22,10 +22,12 @@ Access log filters ------------------ Envoy supports several built-in -:ref:`access log filters` and -:ref:`extension filters` +:ref:`access log filters` and +:ref:`extension filters` that are registered at runtime. +.. _arch_overview_access_logs_sinks: + Access logging sinks -------------------- @@ -48,6 +50,6 @@ Further reading --------------- * Access log :ref:`configuration `. -* File :ref:`access log sink `. -* gRPC :ref:`Access Log Service (ALS) ` +* File :ref:`access log sink `. +* gRPC :ref:`Access Log Service (ALS) ` sink. diff --git a/docs/root/intro/arch_overview/observability/statistics.rst b/docs/root/intro/arch_overview/observability/statistics.rst index 8cab36f9c98f3..1ff0d8957811a 100644 --- a/docs/root/intro/arch_overview/observability/statistics.rst +++ b/docs/root/intro/arch_overview/observability/statistics.rst @@ -19,12 +19,12 @@ mesh give a very detailed picture of each hop and overall network health. The st documented in detail in the operations guide. As of the v2 API, Envoy has the ability to support custom, pluggable sinks. :ref:`A -few standard sink implementations` are included in Envoy. +few standard sink implementations` are included in Envoy. Some sinks also support emitting statistics with tags/dimensions. Within Envoy and throughout the documentation, statistics are identified by a canonical string representation. The dynamic portions of these strings are stripped to become tags. Users can -configure this behavior via :ref:`the Tag Specifier configuration `. +configure this behavior via :ref:`the Tag Specifier configuration `. Envoy emits three types of values as statistics: @@ -37,4 +37,4 @@ Internally, counters and gauges are batched and periodically flushed to improve Histograms are written as they are received. Note: what were previously referred to as timers have become histograms as the only difference between the two representations was the units. -* :ref:`v2 API reference `. +* :ref:`v3 API reference `. diff --git a/docs/root/intro/arch_overview/observability/tracing.rst b/docs/root/intro/arch_overview/observability/tracing.rst index 74657ff4ca996..958b003a5d9a3 100644 --- a/docs/root/intro/arch_overview/observability/tracing.rst +++ b/docs/root/intro/arch_overview/observability/tracing.rst @@ -11,7 +11,7 @@ sources of latency. Envoy supports three features related to system wide tracing * **Request ID generation**: Envoy will generate UUIDs when needed and populate the :ref:`config_http_conn_man_headers_x-request-id` HTTP header. Applications can forward the - x-request-id header for unified logging as well as tracing. The behavior can be configured on per :ref:`HTTP connection manager` basis using an extension. + x-request-id header for unified logging as well as tracing. The behavior can be configured on per :ref:`HTTP connection manager` basis using an extension. * **Client trace ID joining**: The :ref:`config_http_conn_man_headers_x-client-trace-id` header can be used to join untrusted request IDs to the trusted internal :ref:`config_http_conn_man_headers_x-request-id`. @@ -28,7 +28,7 @@ Support for other tracing providers would not be difficult to add. How to initiate a trace ----------------------- The HTTP connection manager that handles the request must have the :ref:`tracing -` object set. There are several ways tracing can be +` object set. There are several ways tracing can be initiated: * By an external client via the :ref:`config_http_conn_man_headers_x-client-trace-id` @@ -39,7 +39,7 @@ initiated: runtime setting. The router filter is also capable of creating a child span for egress calls via the -:ref:`start_child_span ` option. +:ref:`start_child_span ` option. Trace context propagation ------------------------- @@ -95,7 +95,7 @@ associated with it. Each span generated by Envoy contains the following data: header. * HTTP request URL, method, protocol and user-agent. * Additional custom tags set via :ref:`custom_tags - `. + `. * Upstream cluster name and address. * HTTP response status code. * GRPC response status and message (if available). @@ -103,7 +103,7 @@ associated with it. Each span generated by Envoy contains the following data: * Tracing system-specific metadata. The span also includes a name (or operation) which by default is defined as the host of the invoked -service. However this can be customized using a :ref:`envoy_api_msg_route.Decorator` on +service. However this can be customized using a :ref:`envoy_v3_api_msg_config.route.v3.Decorator` on the route. The name can also be overridden using the :ref:`config_http_filters_router_x-envoy-decorator-operation` header. @@ -111,5 +111,19 @@ Envoy automatically sends spans to tracing collectors. Depending on the tracing multiple spans are stitched together using common information such as the globally unique request ID :ref:`config_http_conn_man_headers_x-request-id` (LightStep) or the trace ID configuration (Zipkin and Datadog). See -:ref:`v2 API reference ` +:ref:`v3 API reference ` for more information on how to setup tracing in Envoy. + +Baggage +----------------------------- +Baggage provides a mechanism for data to be available throughout the entirety of a trace. +While metadata such as tags are usually communicated to collectors out-of-band, baggage data is injected into the actual +request context and available to applications during the duration of the request. This enables metadata to transparently +travel from the beginning of the request throughout your entire mesh without relying on application-specific modifications for +propagation. See `OpenTracing's documentation `_ for more information about baggage. + +Tracing providers have varying level of support for getting and setting baggage: + +* Lightstep (and any OpenTracing-compliant tracer) can read/write baggage +* Zipkin support is not yet implemented +* X-Ray and OpenCensus don't support baggage diff --git a/docs/root/intro/arch_overview/operations/draining.rst b/docs/root/intro/arch_overview/operations/draining.rst index 2c9045a853952..18003197c8448 100644 --- a/docs/root/intro/arch_overview/operations/draining.rst +++ b/docs/root/intro/arch_overview/operations/draining.rst @@ -3,18 +3,44 @@ Draining ======== -Draining is the process by which Envoy attempts to gracefully shed connections in response to -various events. Draining occurs at the following times: +In a few different scenarios, Envoy will attempt to gracefully shed connections. For instance, +during server shutdown, existing requests can be discouraged and listeners set to stop accepting, +to reduce the number of open connections when the server shuts down. Draining behaviour is defined +by the server options in addition to individual listener configs. +Draining occurs at the following times: + +* The server is being :ref:`hot restarted `. +* The server begins the graceful drain sequence via the :ref:`drain_listeners?graceful + ` admin endpoint. * The server has been manually health check failed via the :ref:`healthcheck/fail ` admin endpoint. See the :ref:`health check filter ` architecture overview for more information. -* The server is being :ref:`hot restarted `. * Individual listeners are being modified or removed via :ref:`LDS `. +By default, the Envoy server will close listeners immediately on server shutdown. To drain listeners +for some duration of time prior to server shutdown, use :ref:`drain_listeners ` +before shutting down the server. The listeners will be directly stopped without any graceful draining behaviour, +and cease accepting new connections immediately. + +To add a graceful drain period prior to listeners being closed, use the query parameter +:ref:`drain_listeners?graceful `. By default, Envoy +will discourage requests for some period of time (as determined by :option:`--drain-time-s`). +The behaviour of request discouraging is determined by the drain manager. + +Note that although draining is a per-listener concept, it must be supported at the network filter +level. Currently the only filters that support graceful draining are +:ref:`Redis `, +:ref:`Mongo `, +and :ref:`HTTP connection manager `. + +By default, the :ref:`HTTP connection manager ` filter will +add "Connection: close" to HTTP1 requests, send HTTP2 GOAWAY, and terminate connections +on request completion (after the delayed close period). + Each :ref:`configured listener ` has a :ref:`drain_type -` setting which controls when draining takes place. The currently +` setting which controls when draining takes place. The currently supported values are: default @@ -27,13 +53,3 @@ modify_only It may be desirable to set *modify_only* on egress listeners so they only drain during modifications while relying on ingress listener draining to perform full server draining when attempting to do a controlled shutdown. - -Note that although draining is a per-listener concept, it must be supported at the network filter -level. Currently the only filters that support graceful draining are -:ref:`HTTP connection manager `, -:ref:`Redis `, and -:ref:`Mongo `. - -Listeners can also be stopped via :ref:`drain_listeners `. In this case, -they are directly stopped (with out going through the actual draining process) on worker threads, -so that they will not accept any new requests. diff --git a/docs/root/intro/arch_overview/operations/dynamic_configuration.rst b/docs/root/intro/arch_overview/operations/dynamic_configuration.rst index e2c0a00ea14dc..458a4589d008d 100644 --- a/docs/root/intro/arch_overview/operations/dynamic_configuration.rst +++ b/docs/root/intro/arch_overview/operations/dynamic_configuration.rst @@ -13,14 +13,14 @@ overview of the options currently available. * Top level configuration :ref:`reference `. * :ref:`Reference configurations `. -* Envoy :ref:`v2 API overview `. +* Envoy :ref:`v3 API overview `. * :ref:`xDS API endpoints `. Fully static ------------ In a fully static configuration, the implementor provides a set of :ref:`listeners -` (and :ref:`filter chains `), :ref:`clusters +` (and :ref:`filter chains `), :ref:`clusters `, etc. Dynamic host discovery is only possible via DNS based :ref:`service discovery `. Configuration reloads must take place via the built in :ref:`hot restart ` mechanism. @@ -50,7 +50,7 @@ and remove clusters as specified by the API. This API allows implementors to bui which Envoy does not need to be aware of all upstream clusters at initial configuration time. Typically, when doing HTTP routing along with CDS (but without route discovery service), implementors will make use of the router's ability to forward requests to a cluster specified in an -:ref:`HTTP request header `. +:ref:`HTTP request header `. Although it is possible to use CDS without EDS by specifying fully static clusters, we recommend still using the EDS API for clusters specified via CDS. Internally, when a cluster definition is diff --git a/docs/root/intro/arch_overview/operations/hot_restart.rst b/docs/root/intro/arch_overview/operations/hot_restart.rst index 0add1f3fb2f19..38a4dc35c0ae1 100644 --- a/docs/root/intro/arch_overview/operations/hot_restart.rst +++ b/docs/root/intro/arch_overview/operations/hot_restart.rst @@ -26,3 +26,9 @@ hot restart functionality has the following general architecture: the processes takes place only using unix domain sockets. * An example restarter/parent process written in Python is included in the source distribution. This parent process is usable with standard process control utilities such as monit/runit/etc. + +Envoy's default command line options assume that only a single set of Envoy processes is running on +a given host: an active Envoy server process and, potentially, a draining Envoy server process that +will exit as described above. The :option:`--base-id` or :option:`--use-dynamic-base-id` options +may be used to allow multiple, distinctly configured Envoys to run on the same host and hot restart +independently. diff --git a/docs/root/intro/arch_overview/operations/init.rst b/docs/root/intro/arch_overview/operations/init.rst index 4ce245d78f51b..51effe697b3a2 100644 --- a/docs/root/intro/arch_overview/operations/init.rst +++ b/docs/root/intro/arch_overview/operations/init.rst @@ -11,16 +11,16 @@ accepting new connections. multi-phase initialization where it first initializes static/DNS clusters, then predefined :ref:`EDS ` clusters. Then it initializes :ref:`CDS ` if applicable, waits for one response (or failure) - for a :ref:`bounded period of time `, + for a :ref:`bounded period of time `, and does the same primary/secondary initialization of CDS provided clusters. * If clusters use :ref:`active health checking `, Envoy also does a single active health check round. * Once cluster manager initialization is done, :ref:`RDS ` and :ref:`LDS ` initialize (if applicable). The server waits - for a :ref:`bounded period of time ` + for a :ref:`bounded period of time ` for at least one response (or failure) for LDS/RDS requests. After which, it starts accepting connections. * If LDS itself returns a listener that needs an RDS response, Envoy further waits for - a :ref:`bounded period of time ` until an RDS + a :ref:`bounded period of time ` until an RDS response (or failure) is received. Note that this process takes place on every future listener addition via LDS and is known as :ref:`listener warming `. * After all of the previous steps have taken place, the listeners start accepting new connections. @@ -28,6 +28,6 @@ accepting new connections. processing new connections before the draining of the old process begins. A key design principle of initialization is that an Envoy is always guaranteed to initialize within -:ref:`initial_fetch_timeout `, +:ref:`initial_fetch_timeout `, with a best effort made to obtain the complete set of xDS configuration within that subject to the management server availability. diff --git a/docs/root/intro/arch_overview/other_protocols/grpc.rst b/docs/root/intro/arch_overview/other_protocols/grpc.rst index 87ae32984c88a..01275183b1553 100644 --- a/docs/root/intro/arch_overview/other_protocols/grpc.rst +++ b/docs/root/intro/arch_overview/other_protocols/grpc.rst @@ -47,8 +47,8 @@ control plane, where it :ref:`fetches configuration from management server(s) *gRPC services*. When specifying gRPC services, it's necessary to specify the use of either the -:ref:`Envoy gRPC client ` or the -:ref:`Google C++ gRPC client `. We +:ref:`Envoy gRPC client ` or the +:ref:`Google C++ gRPC client `. We discuss the tradeoffs in this choice below. The Envoy gRPC client is a minimal custom implementation of gRPC that makes use diff --git a/docs/root/intro/arch_overview/other_protocols/redis.rst b/docs/root/intro/arch_overview/other_protocols/redis.rst index d95fc35db179a..5c670dea5d451 100644 --- a/docs/root/intro/arch_overview/other_protocols/redis.rst +++ b/docs/root/intro/arch_overview/other_protocols/redis.rst @@ -27,7 +27,7 @@ The Redis project offers a thorough reference on partitioning as it relates to R * Prefix routing. * Separate downstream client and upstream server authentication. * Request mirroring for all requests or write requests only. -* Control :ref:`read requests routing`. This only works with Redis Cluster. +* Control :ref:`read requests routing`. This only works with Redis Cluster. **Planned future enhancements**: @@ -47,11 +47,11 @@ For filter configuration details, see the Redis proxy filter :ref:`configuration reference `. The corresponding cluster definition should be configured with -:ref:`ring hash load balancing `. +:ref:`ring hash load balancing `. If :ref:`active health checking ` is desired, the cluster should be configured with a :ref:`custom health check -` which configured as a +` which configured as a :ref:`Redis health checker `. If passive healthchecking is desired, also configure @@ -78,11 +78,11 @@ Envoy proxy tracks the topology of the cluster by sending periodic following information: * List of known nodes. -* The masters for each shard. +* The primaries for each shard. * Nodes entering or leaving the cluster. For topology configuration details, see the Redis Cluster -:ref:`v2 API reference `. +:ref:`v2 API reference `. Every Redis cluster has its own extra statistics tree rooted at *cluster..redis_cluster.* with the following statistics: @@ -96,7 +96,7 @@ Every Redis cluster has its own extra statistics tree rooted at *cluster.. .. _arch_overview_redis_cluster_command_stats: -Per-cluster command statistics can be enabled via the setting :ref:`enable_command_stats `: +Per-cluster command statistics can be enabled via the setting :ref:`enable_command_stats `.: .. csv-table:: :header: Name, Type, Description @@ -106,7 +106,7 @@ Per-cluster command statistics can be enabled via the setting :ref:`enable_comma upstream_commands.[command].failure, Counter, Total number of failed or cancelled requests for a specific Redis command upstream_commands.[command].total, Counter, Total number of requests for a specific Redis command (sum of success and failure) upstream_commands.[command].latency, Histogram, Latency of requests for a specific Redis command - + Supported commands ------------------ diff --git a/docs/root/intro/arch_overview/security/ext_authz_filter.rst b/docs/root/intro/arch_overview/security/ext_authz_filter.rst index aaa8b2a3610c9..b6935e96412d4 100644 --- a/docs/root/intro/arch_overview/security/ext_authz_filter.rst +++ b/docs/root/intro/arch_overview/security/ext_authz_filter.rst @@ -21,8 +21,8 @@ The external authorization service cluster may be either statically configured o the :ref:`Cluster Discovery Service `. If the external service is not available when a request comes in then whether the request is authorized or not is defined by the configuration setting of *failure_mode_allow* configuration in the applicable -:ref:`network filter ` or -:ref:`HTTP filter `. If it is set to +:ref:`network filter ` or +:ref:`HTTP filter `. If it is set to true then the request will be permitted (fail open) otherwise it will be denied. The default setting is *false*. @@ -32,7 +32,7 @@ Service Definition The context of the traffic is passed on to an external authorization service using the service definition listed here. The content of the request that are passed to an authorization service is specified by -:ref:`CheckRequest `. +:ref:`CheckRequest `. .. toctree:: :glob: diff --git a/docs/root/intro/arch_overview/security/google_vrp.rst b/docs/root/intro/arch_overview/security/google_vrp.rst new file mode 100644 index 0000000000000..b05adc3aaf32d --- /dev/null +++ b/docs/root/intro/arch_overview/security/google_vrp.rst @@ -0,0 +1,185 @@ +.. _arch_overview_google_vrp: + +Google Vulnerability Reward Program (VRP) +========================================= + +Envoy is a participant in `Google's Vulnerability Reward Program (VRP) +`_. This is open to all security +researchers and will provide rewards for vulnerabilities discovered and reported according to the +rules below. + +.. _arch_overview_google_vrp_rules: + +Rules +----- + +The goal of the VRP is to provide a formal process to honor contributions from external +security researchers to Envoy's security. Vulnerabilities should meet the following conditions +to be eligible for the program: + +1. Vulnerabilities must meet one of the below :ref:`objectives + `, demonstrated with the supplied Docker-based + :ref:`execution environment ` and be consistent with the + program's :ref:`threat model `. + +2. Vulnerabilities must be reported to envoy-security@googlegroups.com and be kept under embargo + while triage and potential security releases occur. Please follow the :repo:`disclosure guidance + ` when submitting reports. Disclosure SLOs are documented :repo:`here + `. In general, security disclosures are subject to the + `Linux Foundation's privacy policy `_ with the added + proviso that VRP reports (including reporter e-mail address and name) may be freely shared with + Google for VRP purposes. + +3. Vulnerabilities must not be previously known in a public forum, e.g. GitHub issues trackers, + CVE databases (when previously associated with Envoy), etc. Existing CVEs that have not been + previously associated with an Envoy vulnerability are fair game. + +4. Vulnerabilities must not be also submitted to a parallel reward program run by Google or + `Lyft `_. + +Rewards are at the discretion of the Envoy OSS security team and Google. They will be conditioned on +the above criteria. If multiple instances of the same vulnerability are reported at the same time by +independent researchers or the vulnerability is already tracked under embargo by the OSS Envoy +security team, we will aim to fairly divide the reward amongst reporters. + +.. _arch_overview_google_vrp_threat_model: + +Threat model +------------ + +The base threat model matches that of Envoy's :ref:`OSS security posture +`. We add a number of temporary restrictions to provide a constrained +attack surface for the initial stages of this program. We exclude any threat from: + +* Untrusted control planes. +* Runtime services such as access logging, external authorization, etc. +* Untrusted upstreams. +* DoS attacks except as stipulated below. +* Any filters apart from the HTTP connection manager network filter and HTTP router filter. +* Admin console; this is disabled in the execution environment. + +We also explicitly exclude any local attacks (e.g. via local processes, shells, etc.) against +the Envoy process. All attacks must occur via the network data plane on port 10000. Similarly, +kernel and Docker vulnerabilities are outside the threat model. + +In the future we may relax some of these restrictions as we increase the sophistication of the +program's execution environment. + +.. _arch_overview_google_vrp_ee: + +Execution environment +--------------------- + +We supply Docker images that act as the reference environment for this program: + +* `envoyproxy/envoy-google-vrp `_ images + are based on Envoy point releases. Only the latest point release at the time of vulnerability + submission is eligible for the program. The first point release available for VRP will be the + 1.15.0 Envoy release. + +* `envoyproxy/envoy-google-vrp-dev `_ + images are based on Envoy master builds. Only builds within the last 5 days at the time of + vulnerability submission are eligible for the program. They must not be subject to any + publicly disclosed vulnerability at that point in time. + +Two Envoy processes are available when these images are launched via `docker run`: + +* The *edge* Envoy is listening on ports 10000 (HTTPS). It has a :repo:`static configuration + ` that is configured according to Envoy's :ref:`edge hardening + principles `. It has sinkhole, direct response and request forwarding routing rules (in + order): + + 1. `/content/*`: route to the origin Envoy server. + 2. `/*`: return 403 (denied). + + +* The *origin* Envoy is an upstream of the edge Envoy. It has a :repo:`static configuration + ` that features only direct responses, effectively acting + as an HTTP origin server. There are two route rules (in order): + + 1. `/blockedz`: return 200 `hidden treasure`. It should never be possible to have + traffic on the Envoy edge server's 10000 port receive this response unless a + qualifying vulnerability is present. + 2. `/*`: return 200 `normal`. + +When running the Docker images, the following command line options should be supplied: + +* `-m 3g` to ensure that memory is bounded to 3GB. At least this much memory should be available + to the execution environment. Each Envoy process has an overload manager configured to limit + at 1GB. + +* `-e ENVOY_EDGE_EXTRA_ARGS="<...>"` supplies additional CLI args for the edge Envoy. This + needs to be set but can be empty. + +* `-e ENVOY_ORIGIN_EXTRA_ARGS="<...>"` supplies additional CLI args for the origin Envoy. This + needs to be set but can be empty. + +.. _arch_overview_google_vrp_objectives: + +Objectives +---------- + +Vulnerabilities will be evidenced by requests on 10000 that trigger a failure mode +that falls into one of these categories: + +* Query-of-death: requests that cause the Envoy process to segfault or abort + in some immediate way. +* OOM: requests that cause the edge Envoy process to OOM. There should be no more than + 100 connections and streams in total involved to cause this to happen (i.e. brute force + connection/stream DoS is excluded). +* Routing rule bypass: requests that are able to access `hidden treasure`. +* TLS certificate exfiltration: requests that are able to obtain the edge Envoy's + `serverkey.pem`. +* Remote code exploits: any root shell obtained via the network data plane. +* At the discretion of the OSS Envoy security team, sufficiently interesting vulnerabilities that + don't fit the above categories but are likely to fall into the category of high or critical + vulnerabilities. + +Working with the Docker images +------------------------------ + +A basic invocation of the execution environment that will bring up the edge Envoy on local +port 10000 looks like: + +.. code-block:: bash + + docker run -m 3g -p 10000:10000 --name envoy-google-vrp \ + -e ENVOY_EDGE_EXTRA_ARGS="" \ + -e ENVOY_ORIGIN_EXTRA_ARGS="" \ + envoyproxy/envoy-google-vrp-dev:latest + +When debugging, additional args may prove useful, e.g. in order to obtain trace logs, make +use of `wireshark` and `gdb`: + +.. code-block:: bash + + docker run -m 3g -p 10000:10000 --name envoy-google-vrp \ + -e ENVOY_EDGE_EXTRA_ARGS="-l trace" \ + -e ENVOY_ORIGIN_EXTRA_ARGS="-l trace" \ + --cap-add SYS_PTRACE --cap-add NET_RAW --cap-add NET_ADMIN \ + envoyproxy/envoy-google-vrp-dev:latest + +You can obtain a shell in the Docker container with: + +.. code-block:: bash + + docker exec -it envoy-google-vrp /bin/bash + +The Docker images include `gdb`, `strace`, `tshark` (feel free to contribute other +suggestions via PRs updating the :repo:`Docker build file `). + +Rebuilding the Docker image +--------------------------- + +It's helpful to be able to regenerate your own Docker base image for research purposes. +To do this without relying on CI, follow the instructions at the top of +:repo:`ci/docker_rebuild_google-vrp.sh`. An example of this flow looks like: + +.. code-block:: bash + + bazel build //source/exe:envoy-static + ./ci/docker_rebuild_google-vrp.sh bazel-bin/source/exe/envoy-static + docker run -m 3g -p 10000:10000 --name envoy-google-vrp \ + -e ENVOY_EDGE_EXTRA_ARGS="" \ + -e ENVOY_ORIGIN_EXTRA_ARGS="" \ + envoy-google-vrp:local diff --git a/docs/root/intro/arch_overview/security/jwt_authn_filter.rst b/docs/root/intro/arch_overview/security/jwt_authn_filter.rst index 848d172989747..9c53106ab146f 100644 --- a/docs/root/intro/arch_overview/security/jwt_authn_filter.rst +++ b/docs/root/intro/arch_overview/security/jwt_authn_filter.rst @@ -13,7 +13,7 @@ could be configured to either reject the request with invalid JWT immediately or to later filters by passing the JWT payload to other filters. The JWT Authentication filter supports to check the JWT under various conditions of the request, it -could be configured to check JWT only on specific paths so that you could whitelist some paths from +could be configured to check JWT only on specific paths so that you could allowlist some paths from the JWT authentication, which is useful if a path is accessible publicly and doesn't require any JWT authentication. diff --git a/docs/root/intro/arch_overview/security/rbac_filter.rst b/docs/root/intro/arch_overview/security/rbac_filter.rst index f5c22ebfef819..fc98580e4f84f 100644 --- a/docs/root/intro/arch_overview/security/rbac_filter.rst +++ b/docs/root/intro/arch_overview/security/rbac_filter.rst @@ -19,9 +19,9 @@ Policy ------ The RBAC filter checks the request based on a list of -:ref:`policies `. A policy consists of a list of -:ref:`permissions ` and -:ref:`principals `. The permission specifies the actions of +:ref:`policies `. A policy consists of a list of +:ref:`permissions ` and +:ref:`principals `. The permission specifies the actions of the request, for example, the method and path of a HTTP request. The principal specifies the downstream client identities of the request, for example, the URI SAN of the downstream client certificate. A policy is matched if its permissions and principals are matched at the same time. @@ -30,7 +30,7 @@ Shadow Policy ------------- The filter can be configured with a -:ref:`shadow policy ` that doesn't +:ref:`shadow policy ` that doesn't have any effect (i.e. not deny the request) but only emit stats and log the result. This is useful for testing a rule before applying in production. @@ -108,6 +108,8 @@ The following attributes are exposed to the language runtime: upstream.dns_san_peer_certificate, string, The first DNS entry in the SAN field of the peer certificate in the upstream TLS connection upstream.uri_san_local_certificate, string, The first URI entry in the SAN field of the local certificate in the upstream TLS connection upstream.uri_san_peer_certificate, string, The first URI entry in the SAN field of the peer certificate in the upstream TLS connection + upstream.local_address, string, The local address of the upstream connection + upstream.transport_failure_reason, string, The upstream transport failure reason e.g. certificate validation failed Most attributes are optional and provide the default value based on the type of the attribute. diff --git a/docs/root/intro/arch_overview/security/security.rst b/docs/root/intro/arch_overview/security/security.rst index 16409d759de14..4c19cdf54a28e 100644 --- a/docs/root/intro/arch_overview/security/security.rst +++ b/docs/root/intro/arch_overview/security/security.rst @@ -5,6 +5,7 @@ Security :maxdepth: 2 threat_model + google_vrp ssl jwt_authn_filter ext_authz_filter diff --git a/docs/root/intro/arch_overview/security/ssl.rst b/docs/root/intro/arch_overview/security/ssl.rst index ebcb2e8f6838e..4a5d4f0ea2469 100644 --- a/docs/root/intro/arch_overview/security/ssl.rst +++ b/docs/root/intro/arch_overview/security/ssl.rst @@ -3,8 +3,8 @@ TLS === -Envoy supports both :ref:`TLS termination ` in listeners as well as -:ref:`TLS origination ` when making connections to upstream +Envoy supports both :ref:`TLS termination ` in listeners as well as +:ref:`TLS origination ` when making connections to upstream clusters. Support is sufficient for Envoy to perform standard edge proxy duties for modern web services as well as to initiate connections with external services that have advanced TLS requirements (TLS1.2, SNI, etc.). Envoy supports the following TLS features: @@ -15,7 +15,7 @@ requirements (TLS1.2, SNI, etc.). Envoy supports the following TLS features: * **Certificate verification and pinning**: Certificate verification options include basic chain verification, subject name verification, and hash pinning. * **Certificate revocation**: Envoy can check peer certificates against a certificate revocation list - (CRL) if one is :ref:`provided `. + (CRL) if one is :ref:`provided `. * **ALPN**: TLS listeners support ALPN. The HTTP connection manager uses this information (in addition to protocol inference) to determine whether a client is speaking HTTP/1.1 or HTTP/2. * **SNI**: SNI is supported for both server (listener) and client (upstream) connections. @@ -42,7 +42,7 @@ FIPS 140-2 BoringSSL can be built in a `FIPS-compliant mode `_, following the build instructions from the `Security Policy for BoringCrypto module -`_, +`_, using ``--define boringssl=fips`` Bazel option. Currently, this option is only available on Linux-x86_64. The correctness of the FIPS build can be verified by checking the presence of ``BoringSSL-FIPS`` @@ -53,11 +53,11 @@ it's not sufficient by itself, and depending on the context, additional steps mi The extra requirements may include using only approved algorithms and/or using only private keys generated by a module operating in FIPS-approved mode. For more information, please refer to the `Security Policy for BoringCrypto module -`_ +`_ and/or an `accredited CMVP laboratory `_. Please note that the FIPS-compliant build is based on an older version of BoringSSL than -the non-FIPS build, and it predates the final version of TLS 1.3. +the non-FIPS build, and it doesn't support the most recent QUIC APIs. .. _arch_overview_ssl_enabling_verification: @@ -83,7 +83,7 @@ Example configuration transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext common_tls_context: validation_context: trusted_ca: @@ -105,19 +105,20 @@ Example configuration transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext common_tls_context: tls_certificates: certificate_chain: { "filename": "/cert.crt" } private_key: { "filename": "/cert.key" } validation_context: - verify_subject_alt_name: [ foo ] + match_subject_alt_names: + exact: "foo" trusted_ca: filename: /etc/ssl/certs/ca-certificates.crt */etc/ssl/certs/ca-certificates.crt* is the default path for the system CA bundle on Debian systems. -:ref:`trusted_ca ` along with -:ref:`verify_subject_alt_name ` +:ref:`trusted_ca ` along with +:ref:`match_subject_alt_names ` makes Envoy verify the server identity of *127.0.0.2:1234* as "foo" in the same way as e.g. cURL does on standard Debian installations. Common paths for system CA bundles on Linux and BSD are: @@ -128,12 +129,12 @@ does on standard Debian installations. Common paths for system CA bundles on Lin * /usr/local/etc/ssl/cert.pem (FreeBSD) * /etc/ssl/cert.pem (OpenBSD) -See the reference for :ref:`UpstreamTlsContexts ` and -:ref:`DownstreamTlsContexts ` for other TLS options. +See the reference for :ref:`UpstreamTlsContexts ` and +:ref:`DownstreamTlsContexts ` for other TLS options. .. attention:: - If only :ref:`trusted_ca ` is + If only :ref:`trusted_ca ` is specified, Envoy will verify the certificate chain of the presented certificate, but not its subject name, hash, etc. Other validation context configuration is typically required depending on the deployment. @@ -143,22 +144,22 @@ See the reference for :ref:`UpstreamTlsContexts ` support multiple TLS +:ref:`DownstreamTlsContexts ` support multiple TLS certificates. These may be a mix of RSA and P-256 ECDSA certificates. The following rules apply: * Only one certificate of a particular type (RSA or ECDSA) may be specified. * Non-P-256 server ECDSA certificates are rejected. * If the client supports P-256 ECDSA, a P-256 ECDSA certificate will be selected if present in the - :ref:`DownstreamTlsContext `. + :ref:`DownstreamTlsContext `. * If the client only supports RSA certificates, a RSA certificate will be selected if present in the - :ref:`DownstreamTlsContext `. + :ref:`DownstreamTlsContext `. * Otherwise, the first certificate listed is used. This will result in a failed handshake if the client only supports RSA certificates and the server only has ECDSA certificates. * Static and SDS certificates may not be mixed in a given :ref:`DownstreamTlsContext - `. + `. Only a single TLS certificate is supported today for :ref:`UpstreamTlsContexts -`. +`. Secret discovery service (SDS) ------------------------------ @@ -174,7 +175,7 @@ Authentication filter Envoy provides a network filter that performs TLS client authentication via principals fetched from a REST VPN service. This filter matches the presented client certificate hash against the principal -list to determine whether the connection should be allowed or not. Optional IP white listing can +list to determine whether the connection should be allowed or not. Optional IP allowlisting can also be configured. This functionality can be used to build edge proxy VPN support for web infrastructure. @@ -188,7 +189,7 @@ Trouble shooting When Envoy originates TLS when making connections to upstream clusters, any errors will be logged into :ref:`UPSTREAM_TRANSPORT_FAILURE_REASON` field or -:ref:`AccessLogCommon.upstream_transport_failure_reason` field. +:ref:`AccessLogCommon.upstream_transport_failure_reason` field. Common errors are: * ``Secret is not supplied by SDS``: Envoy is still waiting SDS to deliver key/cert or root CA. diff --git a/docs/root/intro/arch_overview/security/threat_model.rst b/docs/root/intro/arch_overview/security/threat_model.rst index 765d1ba0d3a74..f0e4713e672be 100644 --- a/docs/root/intro/arch_overview/security/threat_model.rst +++ b/docs/root/intro/arch_overview/security/threat_model.rst @@ -15,16 +15,34 @@ highest priority concerns. Availability, in particular in areas relating to DoS exhaustion, is also a serious security concern for Envoy operators, in particular those utilizing Envoy in edge deployments. -The Envoy availability stance around CPU and memory DoS, as well as Query-of-Death (QoD), is still -evolving. We will continue to iterate and fix well known resource issues in the open, e.g. overload -manager and watermark improvements. We will activate the security process for disclosures that -appear to present a risk profile that is significantly greater than the current Envoy availability -hardening status quo. Examples of disclosures that would elicit this response: - -* QoD; where a single query from a client can bring down an Envoy server. - -* Highly asymmetric resource exhaustion attacks, where very little traffic can cause resource exhaustion, - e.g. that delivered by a single client. +We will activate the security release process for disclosures that meet the following criteria: + +* All issues that lead to loss of data confidentiality or integrity trigger the security release process. +* An availability issue, such as Query-of-Death (QoD) or resource exhaustion needs to meet all of the + following criteria to trigger the security release process: + + - A component tagged as hardened is affected (see `Core and extensions`_ for the list of hardened components). + + - The type of traffic (upstream or downstream) that exhibits the issue matches the component's hardening tag. + I.e. component tagged as “hardened to untrusted downstream” is affected by downstream request. + + - A resource exhaustion issue needs to meet these additional criteria: + + + Not covered by an existing timeout or where applying short timeout values is impractical and either + + + Memory exhaustion, including out of memory conditions, where per-request memory use 100x or more above + the configured header or high watermark limit. I.e. 10 KiB client request leading to 1 MiB bytes of + memory consumed by Envoy; + + + Highly asymmetric CPU utilization where Envoy uses 100x or more CPU compared to client. + + +The Envoy availability stance around CPU and memory DoS is still evolving, especially for brute force +attacks. We acknowledge that brute force (i.e. these with amplification factor less than 100) attacks are +likely for Envoy deployments as part of cloud infrastructure or with the use of botnets. We will continue +to iterate and fix well known resource issues in the open, e.g. overload manager and watermark improvements. +We will activate the security process for brute force disclosures that appear to present a risk to +existing Envoy deployments. Note that we do not currently consider the default settings for Envoy to be safe from an availability perspective. It is necessary for operators to explicitly :ref:`configure ` diff --git a/docs/root/intro/arch_overview/upstream/aggregate_cluster.rst b/docs/root/intro/arch_overview/upstream/aggregate_cluster.rst index 96216c5651457..e51ea4a1a8297 100644 --- a/docs/root/intro/arch_overview/upstream/aggregate_cluster.rst +++ b/docs/root/intro/arch_overview/upstream/aggregate_cluster.rst @@ -7,8 +7,8 @@ Aggregate cluster is used for failover between clusters with different configura upstream cluster to STRICT_DNS upstream cluster, from cluster using ROUND_ROBIN load balancing policy to cluster using MAGLEV, from cluster with 0.1s connection timeout to cluster with 1s connection timeout, etc. Aggregate cluster loosely couples multiple clusters by referencing their -name in the :ref:`configuration `. The -fallback priority is defined implicitly by the ordering in the :ref:`clusters list `. +name in the :ref:`configuration `. The +fallback priority is defined implicitly by the ordering in the :ref:`clusters list `. Aggregate cluster uses tiered load balancing. The load balancer chooses cluster and priority first and then delegates the load balancing to the load balancer of the selected cluster. The top level load balancer reuses the existing load balancing algorithm by linearizing the priority set of @@ -54,14 +54,14 @@ A sample aggregate cluster configuration could be: cluster_type: name: envoy.clusters.aggregate typed_config: - "@type": type.googleapis.com/envoy.config.cluster.aggregate.v2alpha.ClusterConfig + "@type": type.googleapis.com/envoy.extensions.clusters.aggregate.v3.ClusterConfig clusters: # cluster primary, secondary and tertiary should be defined outside. - primary - secondary - tertiary -Note: :ref:`PriorityLoad retry plugins ` won't +Note: :ref:`PriorityLoad retry plugins ` won't work for aggregate cluster because the aggregate load balancer will override the *PriorityLoad* during load balancing. diff --git a/docs/root/intro/arch_overview/upstream/circuit_breaking.rst b/docs/root/intro/arch_overview/upstream/circuit_breaking.rst index 532d9645bd56c..5c808e4d4c06c 100644 --- a/docs/root/intro/arch_overview/upstream/circuit_breaking.rst +++ b/docs/root/intro/arch_overview/upstream/circuit_breaking.rst @@ -13,12 +13,21 @@ configure and code each application independently. Envoy supports various types * **Cluster maximum connections**: The maximum number of connections that Envoy will establish to all hosts in an upstream cluster. If this circuit breaker overflows the :ref:`upstream_cx_overflow - ` counter for the cluster will increment. + ` counter for the cluster will increment. All connections, + whether active or draining, count against this limit. Even if this circuit breaker has overflowed, + Envoy will ensure that a host selected by cluster load balancing has at least one connection + allocated. This has the implication that the :ref:`upstream_cx_active + ` count for a cluster may be higher than the cluster maximum + connection circuit breaker, with an upper bound of + `cluster maximum connections + (number of endpoints in a cluster) * (connection pools for the + cluster)`. This bound applies to the sum of connections across all workers threads. See + :ref:`connection pooling ` for details on how many connection + pools a cluster may have. * **Cluster maximum pending requests**: The maximum number of requests that will be queued while waiting for a ready connection pool connection. Requests are added to the list of pending requests whenever there aren't enough upstream connections available to immediately dispatch - the request. For HTTP/2 connections, if :ref:`max concurrent streams ` - and :ref:`max requests per connection ` are not + the request. For HTTP/2 connections, if :ref:`max concurrent streams ` + and :ref:`max requests per connection ` are not configured, all requests will be multiplexed over the same connection so this circuit breaker will only be hit when no connection is already established. If this circuit breaker overflows the :ref:`upstream_rq_pending_overflow ` counter for the cluster will @@ -27,7 +36,7 @@ configure and code each application independently. Envoy supports various types in a cluster at any given time. If this circuit breaker overflows the :ref:`upstream_rq_pending_overflow ` counter for the cluster will increment. * **Cluster maximum active retries**: The maximum number of retries that can be outstanding to all - hosts in a cluster at any given time. In general we recommend using :ref:`retry budgets `; however, if static circuit breaking is preferred it should aggressively circuit break + hosts in a cluster at any given time. In general we recommend using :ref:`retry budgets `; however, if static circuit breaking is preferred it should aggressively circuit break retries. This is so that retries for sporadic failures are allowed, but the overall retry volume cannot explode and cause large scale cascading failure. If this circuit breaker overflows the :ref:`upstream_rq_retry_overflow ` counter for the cluster @@ -57,6 +66,15 @@ the distributed system to be tuned independently and have different limits. The circuit breakers, including the number of resources remaining until a circuit breaker opens, can be observed via :ref:`statistics `. +Workers threads share circuit breaker limits, i.e. if the active connection threshold is 500, worker +thread 1 has 498 connections active, then worker thread 2 can only allocate 2 more connections. +Since the implementation is eventually consistent, races between threads may allow limits to be +potentially exceeded. + +Circuit breakers are enabled by default and have modest default values, e.g. 1024 connections per +cluster. To disable circuit breakers, set the :ref:`thresholds ` to +the highest allowed values. + Note that circuit breaking will cause the :ref:`x-envoy-overloaded ` header to be set by the router filter in the case of HTTP requests. diff --git a/docs/root/intro/arch_overview/upstream/connection_pooling.rst b/docs/root/intro/arch_overview/upstream/connection_pooling.rst index ebb0031e4e971..2b239f479b3b0 100644 --- a/docs/root/intro/arch_overview/upstream/connection_pooling.rst +++ b/docs/root/intro/arch_overview/upstream/connection_pooling.rst @@ -21,14 +21,36 @@ HTTP/2 ------ The HTTP/2 connection pool multiplexes multiple requests over a single connection, up to the limits -imposed by :ref:`max concurrent streams ` -and :ref:`max requests per connection `. -The HTTP/2 connection pool establishes only as many connections as are needed to serve the current -requests. With no limits, this will be only a single connection. If a GOAWAY frame is received or -if the connection reaches the maximum stream limit, the connection pool will drain the existing one. -New connections are established anytime there is a pending request without a connection that it can -be dispatched to (up to circuit breaker limits for connections). -HTTP/2 is the preferred communication protocol as connections rarely if ever get severed. +imposed by :ref:`max concurrent streams +` and :ref:`max +requests per connection `. +The HTTP/2 connection pool establishes as many connections as are needed to serve requests. With no +limits, this will be only a single connection. If a GOAWAY frame is received or if the connection +reaches the :ref:`maximum requests per connection +` limit, the connection +pool will drain the affected connection. Once a connection reaches its :ref:`maximum concurrent +stream limit `, it +will be marked as busy until a stream is available. New connections are established anytime there is +a pending request without a connection that can be dispatched to (up to circuit breaker limits for +connections). HTTP/2 is the preferred communication protocol, as connections rarely, if ever, get +severed. + +.. _arch_overview_conn_pool_how_many: + +Number of connection pools +-------------------------- + +Each host in each cluster will have one or more connection pools. If the cluster is HTTP/1 or HTTP/2 +only, then the host may have only a single connection pool. However, if the cluster supports multiple +upstream protocols, then at least one connection pool per protocol will be allocated. Separate +connection pools are also allocated for each of the following features: + +* :ref:`Routing priority ` +* :ref:`Socket options ` +* :ref:`Transport socket (e.g. TLS) options ` + +Each worker thread maintains its own connection pools for each cluster, so if an Envoy has two +threads and a cluster with both HTTP/1 and HTTP/2 support, there will be at least 4 connection pools. .. _arch_overview_conn_pool_health_checking: diff --git a/docs/root/intro/arch_overview/upstream/health_checking.rst b/docs/root/intro/arch_overview/upstream/health_checking.rst index c63b59ece132b..d6b7bf9cc6187 100644 --- a/docs/root/intro/arch_overview/upstream/health_checking.rst +++ b/docs/root/intro/arch_overview/upstream/health_checking.rst @@ -13,7 +13,7 @@ unhealthy, successes required before marking a host healthy, etc.): * **HTTP**: During HTTP health checking Envoy will send an HTTP request to the upstream host. By default, it expects a 200 response if the host is healthy. Expected response codes are - :ref:`configurable `. The + :ref:`configurable `. The upstream host can return 503 if it wants to immediately notify downstream hosts to no longer forward traffic to it. * **L3/L4**: During L3/L4 health checking, Envoy will send a configurable byte buffer to the @@ -24,13 +24,13 @@ unhealthy, successes required before marking a host healthy, etc.): failure. Optionally, Envoy can perform EXISTS on a user-specified key. If the key does not exist it is considered a passing healthcheck. This allows the user to mark a Redis instance for maintenance by setting the specified key to any value and waiting for traffic to drain. See - :ref:`redis_key `. + :ref:`redis_key `. Health checks occur over the transport socket specified for the cluster. This implies that if a cluster is using a TLS-enabled transport socket, the health check will also occur over TLS. The -:ref:`TLS options ` used for health check connections +:ref:`TLS options ` used for health check connections can be specified, which is useful if the corresponding upstream is using ALPN-based -:ref:`FilterChainMatch ` with different protocols for +:ref:`FilterChainMatch ` with different protocols for health checks versus data connections. .. _arch_overview_per_cluster_health_check_config: @@ -40,14 +40,14 @@ Per cluster member health check config If active health checking is configured for an upstream cluster, a specific additional configuration for each registered member can be specified by setting the -:ref:`HealthCheckConfig` -in the :ref:`Endpoint` of an :ref:`LbEndpoint` -of each defined :ref:`LocalityLbEndpoints` in a -:ref:`ClusterLoadAssignment`. +:ref:`HealthCheckConfig` +in the :ref:`Endpoint` of an :ref:`LbEndpoint` +of each defined :ref:`LocalityLbEndpoints` in a +:ref:`ClusterLoadAssignment`. -An example of setting up :ref:`health check config` -to set a :ref:`cluster member`'s alternative health check -:ref:`port` is: +An example of setting up :ref:`health check config` +to set a :ref:`cluster member`'s alternative health check +:ref:`port` is: .. code-block:: yaml @@ -68,12 +68,12 @@ Health check event logging -------------------------- A per-healthchecker log of ejection and addition events can optionally be produced by Envoy by -specifying a log file path in :ref:`the HealthCheck config `. +specifying a log file path in :ref:`the HealthCheck config `. The log is structured as JSON dumps of -:ref:`HealthCheckEvent messages `. +:ref:`HealthCheckEvent messages `. Envoy can be configured to log all health check failure events by setting the :ref:`always_log_health_check_failures -flag ` to true. +flag ` to true. Passive health checking ----------------------- @@ -100,7 +100,7 @@ operation: Envoy will respond with a 200 or a 503 depending on the current draining state of the server. * **No pass through, computed from upstream cluster health**: In this mode, the health checking filter will return a 200 or a 503 depending on whether at least a :ref:`specified percentage - ` + ` of the servers are available (healthy + degraded) in one or more upstream clusters. (If the Envoy server is in a draining state, though, it will respond with a 503 regardless of the upstream cluster health.) @@ -151,7 +151,7 @@ is having a different HTTP health checking URL for every service type. The downs is that overall configuration becomes more complicated as every health check URL is fully custom. The Envoy HTTP health checker supports the :ref:`service_name_matcher -` option. If this option is set, +` option. If this option is set, the health checker additionally compares the value of the *x-envoy-upstream-healthchecked-cluster* response header to *service_name_matcher*. If the values do not match, the health check does not pass. The upstream health check filter appends *x-envoy-upstream-healthchecked-cluster* to the response headers. diff --git a/docs/root/intro/arch_overview/upstream/load_balancing/load_balancers.rst b/docs/root/intro/arch_overview/upstream/load_balancing/load_balancers.rst index 48605a7ad6d13..38b6c6fae88ab 100644 --- a/docs/root/intro/arch_overview/upstream/load_balancing/load_balancers.rst +++ b/docs/root/intro/arch_overview/upstream/load_balancing/load_balancers.rst @@ -6,10 +6,10 @@ Supported load balancers When a filter needs to acquire a connection to a host in an upstream cluster, the cluster manager uses a load balancing policy to determine which host is selected. The load balancing policies are pluggable and are specified on a per upstream cluster basis in the :ref:`configuration -`. Note that if no active health checking policy is :ref:`configured +`. Note that if no active health checking policy is :ref:`configured ` for a cluster, all upstream cluster members are considered healthy, unless otherwise specified through -:ref:`health_status `. +:ref:`health_status `. .. _arch_overview_load_balancing_types_round_robin: @@ -18,7 +18,7 @@ Weighted round robin This is a simple policy in which each available upstream host is selected in round robin order. If :ref:`weights -` are assigned to +` are assigned to endpoints in a locality, then a weighted round robin schedule is used, where higher weighted endpoints will appear more often in the rotation to achieve the effective weighting. @@ -32,7 +32,7 @@ The least request load balancer uses different algorithms depending on whether h same or different weights. * *all weights equal*: An O(1) algorithm which selects N random available hosts as specified in the - :ref:`configuration ` (2 by default) and picks the + :ref:`configuration ` (2 by default) and picks the host which has the fewest active requests (`Mitzenmacher et al. `_ has shown that this approach is nearly as good as an O(N) full scan). This is also known as P2C (power of two @@ -41,11 +41,25 @@ same or different weights. less than or equal to all of the other hosts. * *all weights not equal*: If two or more hosts in the cluster have different load balancing weights, the load balancer shifts into a mode where it uses a weighted round robin schedule in - which weights are dynamically adjusted based on the host's request load at the time of selection - (weight is divided by the current active request count. For example, a host with weight 2 and an - active request count of 4 will have a synthetic weight of 2 / 4 = 0.5). This algorithm provides - good balance at steady state but may not adapt to load imbalance as quickly. Additionally, unlike - P2C, a host will never truly drain, though it will receive fewer requests over time. + which weights are dynamically adjusted based on the host's request load at the time of selection. + + In this case the weights are calculated at the time a host is picked using the following formula: + + `weight = load_balancing_weight / (active_requests + 1)^active_request_bias`. + + :ref:`active_request_bias` + can be configured via runtime and defaults to 1.0. It must be greater than or equal to 0.0. + + The larger the active request bias is, the more aggressively active requests will lower the + effective weight. + + If `active_request_bias` is set to 0.0, the least request load balancer behaves like the round + robin load balancer and ignores the active request count at the time of picking. + + For example, if active_request_bias is 1.0, a host with weight 2 and an active request count of 4 + will have an effective weight of 2 / (4 + 1)^1 = 0.4. This algorithm provides good balance at + steady state but may not adapt to load imbalance as quickly. Additionally, unlike P2C, a host will + never truly drain, though it will receive fewer requests over time. .. _arch_overview_load_balancing_types_ring_hash: @@ -66,8 +80,8 @@ partitioning of the circle, however, since the computed hashes could be coincide one another; so it is necessary to multiply the number of hashes per host---for example inserting 100 entries on the ring for host A and 200 entries for host B---to better approximate the desired distribution. Best practice is to explicitly set -:ref:`minimum_ring_size` and -:ref:`maximum_ring_size`, and monitor +:ref:`minimum_ring_size` and +:ref:`maximum_ring_size`, and monitor the :ref:`min_hashes_per_host and max_hashes_per_host gauges` to ensure good distribution. With the ring partitioned appropriately, the addition or removal of one host from a set of N hosts will diff --git a/docs/root/intro/arch_overview/upstream/load_balancing/locality_weight.rst b/docs/root/intro/arch_overview/upstream/load_balancing/locality_weight.rst index 1003d98418ed8..d5abaa4c82ed4 100644 --- a/docs/root/intro/arch_overview/upstream/load_balancing/locality_weight.rst +++ b/docs/root/intro/arch_overview/upstream/load_balancing/locality_weight.rst @@ -5,7 +5,7 @@ Locality weighted load balancing One approach to determining how to weight assignments across different zones and geographical locations is by using explicit weights supplied via EDS in the -:ref:`LocalityLbEndpoints ` message. +:ref:`LocalityLbEndpoints ` message. This approach is mutually exclusive with :ref:`zone aware routing `, since in the case of locality aware LB, we rely on the management server to provide the @@ -59,10 +59,12 @@ picked. The load balancer follows these steps: Locality weighted load balancing is configured by setting :ref:`locality_weighted_lb_config -` in the -cluster configuration and providing weights in :ref:`LocalityLbEndpoints -` via :ref:`load_balancing_weight -`. +` in the +cluster configuration and by providing weights via :ref:`load_balancing_weight +` and +identifying the location of the upstream hosts via :ref:`locality +` in +:ref:`LocalityLbEndpoints `. This feature is not compatible with :ref:`load balancer subsetting `, since it is not straightforward to diff --git a/docs/root/intro/arch_overview/upstream/load_balancing/original_dst.rst b/docs/root/intro/arch_overview/upstream/load_balancing/original_dst.rst index 212a7e9e645b7..38b489476442a 100644 --- a/docs/root/intro/arch_overview/upstream/load_balancing/original_dst.rst +++ b/docs/root/intro/arch_overview/upstream/load_balancing/original_dst.rst @@ -8,8 +8,8 @@ cluster `. Upstream based on the downstream connection metadata, i.e., connections are opened to the same address as the destination address of the incoming connection was before the connection was redirected to Envoy. New destinations are added to the cluster by the load balancer on-demand, and the cluster -:ref:`periodically ` cleans out unused hosts -from the cluster. No other :ref:`load balancing policy ` can +:ref:`periodically ` cleans out unused hosts +from the cluster. No other :ref:`load balancing policy ` can be used with original destination clusters. .. _arch_overview_load_balancing_types_original_destination_request_header: diff --git a/docs/root/intro/arch_overview/upstream/load_balancing/overprovisioning.rst b/docs/root/intro/arch_overview/upstream/load_balancing/overprovisioning.rst index e04cf55722131..e8fa36127fdf1 100644 --- a/docs/root/intro/arch_overview/upstream/load_balancing/overprovisioning.rst +++ b/docs/root/intro/arch_overview/upstream/load_balancing/overprovisioning.rst @@ -3,8 +3,8 @@ Overprovisioning Factor ----------------------- Priority levels and localities are considered overprovisioned with -:ref:`this percentage `. +:ref:`this percentage `. Envoy doesn't consider a priority level or locality unavailable until the -percentage of available hosts multiplied by the overprovisioning factor drops -below 100. The default value is 1.4, so a priority level or locality will not be +fraction of available hosts multiplied by the overprovisioning factor drops +below 100. The default value is 140 (in percentage, which means 140%), so a priority level or locality will not be considered unavailable until the percentage of available endpoints goes below 72%. diff --git a/docs/root/intro/arch_overview/upstream/load_balancing/panic_threshold.rst b/docs/root/intro/arch_overview/upstream/load_balancing/panic_threshold.rst index 2fcfafb4e0573..86996cfb891a3 100644 --- a/docs/root/intro/arch_overview/upstream/load_balancing/panic_threshold.rst +++ b/docs/root/intro/arch_overview/upstream/load_balancing/panic_threshold.rst @@ -8,13 +8,13 @@ an upstream cluster. However, if the percentage of available hosts in the cluste Envoy will disregard health status and balance either amongst all hosts or no hosts. This is known as the *panic threshold*. The default panic threshold is 50%. This is :ref:`configurable ` via runtime as well as in the -:ref:`cluster configuration `. +:ref:`cluster configuration `. The panic threshold is used to avoid a situation in which host failures cascade throughout the cluster as load increases. There are two modes Envoy can choose from when in a panic state: traffic will either be sent to all hosts, or will be sent to no hosts (and therefore will always fail). This is configured in the -:ref:`cluster configuration `. +:ref:`cluster configuration `. Choosing to fail traffic during panic scenarios can help avoid overwhelming potentially failing upstream services, as it will reduce the load on the upstream service before all hosts have been determined to be unhealthy. However, it eliminates the possibility of _some_ requests succeeding diff --git a/docs/root/intro/arch_overview/upstream/load_balancing/priority.rst b/docs/root/intro/arch_overview/upstream/load_balancing/priority.rst index 23bc5fb6f22dd..0ce7f090ca877 100644 --- a/docs/root/intro/arch_overview/upstream/load_balancing/priority.rst +++ b/docs/root/intro/arch_overview/upstream/load_balancing/priority.rst @@ -4,7 +4,7 @@ Priority levels ------------------ During load balancing, Envoy will generally only consider hosts configured at the highest priority -level. For each EDS :ref:`LocalityLbEndpoints` an optional +level. For each EDS :ref:`LocalityLbEndpoints` an optional priority may also be specified. When endpoints at the highest priority level (P=0) are healthy, all traffic will land on endpoints in that priority level. As endpoints for the highest priority level become unhealthy, traffic will begin to trickle to lower priority levels. diff --git a/docs/root/intro/arch_overview/upstream/load_balancing/subsets.rst b/docs/root/intro/arch_overview/upstream/load_balancing/subsets.rst index 6543ca31ad691..789bf822e187b 100644 --- a/docs/root/intro/arch_overview/upstream/load_balancing/subsets.rst +++ b/docs/root/intro/arch_overview/upstream/load_balancing/subsets.rst @@ -13,7 +13,7 @@ not be used with subsets because the upstream hosts are not known in advance. Su with zone aware routing, but be aware that the use of subsets may easily violate the minimum hosts condition described above. -If subsets are :ref:`configured ` and a route +If subsets are :ref:`configured ` and a route specifies no metadata or no subset matching the metadata exists, the subset load balancer initiates its fallback policy. The default policy is ``NO_FALLBACK``, in which case the request fails as if the cluster had no hosts. Conversely, the ``ANY_ENDPOINT`` fallback policy load balances across all @@ -36,8 +36,8 @@ balancing to occur. This feature can only be enabled using the V2 configuration API. Furthermore, host metadata is only supported when hosts are defined using -:ref:`ClusterLoadAssignments `. ClusterLoadAssignments are -available via EDS or the Cluster :ref:`load_assignment ` +:ref:`ClusterLoadAssignments `. ClusterLoadAssignments are +available via EDS or the Cluster :ref:`load_assignment ` field. Host metadata for subset load balancing must be placed under the filter name ``"envoy.lb"``. Similarly, route metadata match criteria use ``"envoy.lb"`` filter name. Host metadata may be hierarchical (e.g., the value for a top-level key may be a structured value or list), but the @@ -46,8 +46,7 @@ values, a route's match criteria will only match if an identical structured valu host's metadata. Finally, note that subset load balancing is not available for the -:ref:`ORIGINAL_DST_LB ` or -:ref:`CLUSTER_PROVIDED ` load balancer +:ref:`CLUSTER_PROVIDED ` load balancer policies. Examples diff --git a/docs/root/intro/arch_overview/upstream/load_reporting_service.rst b/docs/root/intro/arch_overview/upstream/load_reporting_service.rst new file mode 100644 index 0000000000000..669bce1605924 --- /dev/null +++ b/docs/root/intro/arch_overview/upstream/load_reporting_service.rst @@ -0,0 +1,15 @@ +.. _arch_overview_load_reporting_service: + +Load Reporting Service (LRS) +============================ + +The Load Reporting Service provides a mechanism by which Envoy can emit Load Reports to a management +server at a regular cadence. + +This will initiate a bi-directional stream with a management server. Upon connecting, the management +server can send a :ref:`LoadStatsResponse ` +to a node it is interested in getting the load reports for. Envoy in this node will start sending +:ref:`LoadStatsRequest `. This is done periodically +based on the :ref:`load reporting interval ` + +Envoy config with LRS can be found at :repo:`/examples/load-reporting-service/service-envoy-w-lrs.yaml`. diff --git a/docs/root/intro/arch_overview/upstream/outlier.rst b/docs/root/intro/arch_overview/upstream/outlier.rst index a2ac3ed5fceeb..fd9dc7158a744 100644 --- a/docs/root/intro/arch_overview/upstream/outlier.rst +++ b/docs/root/intro/arch_overview/upstream/outlier.rst @@ -10,35 +10,35 @@ such as consecutive failures, temporal success rate, temporal latency, etc. Outl form of *passive* health checking. Envoy also supports :ref:`active health checking `. *Passive* and *active* health checking can be enabled together or independently, and form the basis for an overall upstream health checking solution. -Outlier detection is part of :ref:`cluster configuration ` -and it needs filters to report errors, timeouts, resets. Currently the following filters support +Outlier detection is part of the :ref:`cluster configuration ` +and it needs filters to report errors, timeouts, and resets. Currently, the following filters support outlier detection: :ref:`http router `, :ref:`tcp proxy ` and :ref:`redis proxy `. Detected errors fall into two categories: externally and locally originated errors. Externally generated errors -are transaction specific and occur on the upstream server in response to the received request. For example, HTTP server returning error code 500 or redis server returning payload which cannot be decoded. Those errors are generated on the upstream host after Envoy has successfully connected to it. -Locally originated errors are generated by Envoy in response to an event which interrupted or prevented communication with the upstream host. Examples of locally originated errors are timeout, TCP reset, inability to connect to a specified port, etc. +are transaction specific and occur on the upstream server in response to the received request. For example, an HTTP server returning error code 500 or a redis server returning a payload which cannot be decoded. Those errors are generated on the upstream host after Envoy has connected to it successfully. +Locally originated errors are generated by Envoy in response to an event which interrupted or prevented communication with the upstream host. Examples of locally originated errors are timeout, TCP reset, inability to connect to a specified port, etc. -Type of detected errors depends on filter type. :ref:`http router ` filter for example +The type of detected errors depends on the filter type. The :ref:`http router ` filter, for example, detects locally originated errors (timeouts, resets - errors related to connection to upstream host) and because it -also understands HTTP protocol it reports -errors returned by HTTP server (externally generated errors). In such scenario, even when connection to upstream HTTP server is successful, -transaction with the server may fail. -On the contrary, :ref:`tcp proxy ` filter does not understand any protocol above -TCP layer and reports only locally originated errors. - -In default configuration (:ref:`outlier_detection.split_external_local_origin_errors` is *false*) -locally originated errors are not distinguished from externally generated (transaction) errors and all end up -in the same bucket and are compared against -:ref:`outlier_detection.consecutive_5xx`, -:ref:`outlier_detection.consecutive_gateway_failure` and -:ref:`outlier_detection.success_rate_stdev_factor` +also understands the HTTP protocol it reports +errors returned by the HTTP server (externally generated errors). In such a scenario, even when the connection to the upstream HTTP server is successful, +the transaction with the server may fail. +By contrast, the :ref:`tcp proxy ` filter does not understand any protocol above +the TCP layer and reports only locally originated errors. + +Under the default configuration (:ref:`outlier_detection.split_external_local_origin_errors` is *false*) +locally originated errors are not distinguished from externally generated (transaction) errors, all end up +in the same bucket, and are compared against the +:ref:`outlier_detection.consecutive_5xx`, +:ref:`outlier_detection.consecutive_gateway_failure` and +:ref:`outlier_detection.success_rate_stdev_factor` configuration items. For example, if connection to an upstream HTTP server fails twice because of timeout and -then, after successful connection, the server returns error code 500, the total error count will be 3. +then, after successful connection establishment, the server returns error code 500 then the total error count will be 3. Outlier detection may also be configured to distinguish locally originated errors from externally originated (transaction) errors. -It is done via -:ref:`outlier_detection.split_external_local_origin_errors` configuration item. +It is done via the +:ref:`outlier_detection.split_external_local_origin_errors` configuration item. In that mode locally originated errors are tracked by separate counters than externally originated (transaction) errors and the outlier detector may be configured to react to locally originated errors and ignore externally originated errors @@ -46,7 +46,7 @@ or vice-versa. It is important to understand that a cluster may be shared among several filter chains. If one filter chain ejects a host based on its outlier detection type, other filter chains will be also affected even though their -outlier detection type would not eject that host. +outlier detection type would not have ejected that host. Ejection algorithm ------------------ @@ -58,13 +58,13 @@ ejection algorithm works as follows: #. A host is determined to be an outlier. #. If no hosts have been ejected, Envoy will eject the host immediately. Otherwise, it checks to make sure the number of ejected hosts is below the allowed threshold (specified via the - :ref:`outlier_detection.max_ejection_percent` + :ref:`outlier_detection.max_ejection_percent` setting). If the number of ejected hosts is above the threshold, the host is not ejected. #. The host is ejected for some number of milliseconds. Ejection means that the host is marked unhealthy and will not be used during load balancing unless the load balancer is in a :ref:`panic ` scenario. The number of milliseconds is equal to the :ref:`outlier_detection.base_ejection_time_ms - ` value + ` value multiplied by the number of times the host has been ejected. This causes hosts to get ejected for longer and longer periods if they continue to fail. #. An ejected host will automatically be brought back into service after the ejection time has @@ -79,97 +79,97 @@ Envoy supports the following outlier detection types: Consecutive 5xx ^^^^^^^^^^^^^^^ -In default mode (:ref:`outlier_detection.split_external_local_origin_errors` is *false*) this detection type takes into account all generated errors: locally -originated and externally originated (transaction) type of errors. +In the default mode (:ref:`outlier_detection.split_external_local_origin_errors` is *false*) this detection type takes into account all generated errors: locally +originated and externally originated (transaction) errors. Errors generated by non-HTTP filters, like :ref:`tcp proxy ` or :ref:`redis proxy ` are internally mapped to HTTP 5xx codes and treated as such. -In split mode (:ref:`outlier_detection.split_external_local_origin_errors` is *true*) this detection type takes into account only externally originated (transaction) errors ignoring locally originated errors. -If an upstream host is HTTP-server, only 5xx types of error are taken into account (see :ref:`Consecutive Gateway Failure` for exceptions). +In split mode (:ref:`outlier_detection.split_external_local_origin_errors` is *true*) this detection type takes into account only externally originated (transaction) errors, ignoring locally originated errors. +If an upstream host is an HTTP-server, only 5xx types of error are taken into account (see :ref:`Consecutive Gateway Failure` for exceptions). For redis servers, served via :ref:`redis proxy ` only malformed responses from the server are taken into account. -Properly formatted responses, even when they carry operational error (like index not found, access denied) are not taken into account. +Properly formatted responses, even when they carry an operational error (like index not found, access denied) are not taken into account. If an upstream host returns some number of errors which are treated as consecutive 5xx type errors, it will be ejected. The number of consecutive 5xx required for ejection is controlled by -the :ref:`outlier_detection.consecutive_5xx` value. +the :ref:`outlier_detection.consecutive_5xx` value. .. _consecutive_gateway_failure: Consecutive Gateway Failure ^^^^^^^^^^^^^^^^^^^^^^^^^^^ -This detection type takes into account subset of 5xx errors, called "gateway errors" (502, 503 or 504 status code) -and is supported only by :ref:`http router `. +This detection type takes into account a subset of 5xx errors, called "gateway errors" (502, 503 or 504 status code) +and is supported only by the :ref:`http router `. If an upstream host returns some number of consecutive "gateway errors" (502, 503 or 504 status code), it will be ejected. The number of consecutive gateway failures required for ejection is controlled by the :ref:`outlier_detection.consecutive_gateway_failure -` value. +` value. Consecutive Local Origin Failure ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -This detection type is enabled only when :ref:`outlier_detection.split_external_local_origin_errors` is *true* and takes into account only locally originated errors (timeout, reset, etc). +This detection type is enabled only when :ref:`outlier_detection.split_external_local_origin_errors` is *true* and takes into account only locally originated errors (timeout, reset, etc). If Envoy repeatedly cannot connect to an upstream host or communication with the upstream host is repeatedly interrupted, it will be ejected. Various locally originated problems are detected: timeout, TCP reset, ICMP errors, etc. The number of consecutive locally originated failures required for ejection is controlled by the :ref:`outlier_detection.consecutive_local_origin_failure -` value. +` value. This detection type is supported by :ref:`http router `, :ref:`tcp proxy ` and :ref:`redis proxy `. Success Rate ^^^^^^^^^^^^ -Success Rate based outlier ejection aggregates success rate data from every host in a cluster. Then at given -intervals ejects hosts based on statistical outlier detection. Success Rate outlier ejection will not be +Success Rate based outlier detection aggregates success rate data from every host in a cluster. Then at given +intervals ejects hosts based on statistical outlier detection. Success Rate outlier detection will not be calculated for a host if its request volume over the aggregation interval is less than the -:ref:`outlier_detection.success_rate_request_volume` +:ref:`outlier_detection.success_rate_request_volume` value. Moreover, detection will not be performed for a cluster if the number of hosts with the minimum required request volume in an interval is less than the -:ref:`outlier_detection.success_rate_minimum_hosts` +:ref:`outlier_detection.success_rate_minimum_hosts` value. -In default configuration mode (:ref:`outlier_detection.split_external_local_origin_errors` is *false*) -this detection type takes into account all type of errors: locally and externally originated. -:ref:`outlier_detection.enforcing_local_origin_success` config item is ignored. +In the default configuration mode (:ref:`outlier_detection.split_external_local_origin_errors` is *false*) +this detection type takes into account all types of errors: locally and externally originated. The +:ref:`outlier_detection.enforcing_local_origin_success` config item is ignored. -In split mode (:ref:`outlier_detection.split_external_local_origin_errors` is *true*), +In split mode (:ref:`outlier_detection.split_external_local_origin_errors` is *true*), locally originated errors and externally originated (transaction) errors are counted and treated separately. Most configuration items, namely -:ref:`outlier_detection.success_rate_minimum_hosts`, -:ref:`outlier_detection.success_rate_request_volume`, -:ref:`outlier_detection.success_rate_stdev_factor` apply to both -types of errors, but :ref:`outlier_detection.enforcing_success_rate` applies -to externally originated errors only and :ref:`outlier_detection.enforcing_local_origin_success_rate` applies to locally originated errors only. +:ref:`outlier_detection.success_rate_minimum_hosts`, +:ref:`outlier_detection.success_rate_request_volume`, +:ref:`outlier_detection.success_rate_stdev_factor` apply to both +types of errors, but :ref:`outlier_detection.enforcing_success_rate` applies +to externally originated errors only and :ref:`outlier_detection.enforcing_local_origin_success_rate` applies to locally originated errors only. .. _arch_overview_outlier_detection_failure_percentage: Failure Percentage ^^^^^^^^^^^^^^^^^^ -Failure Percentage based outlier ejection functions similarly to the success rate detecion type, in +Failure Percentage based outlier detection functions similarly to success rate detection, in that it relies on success rate data from each host in a cluster. However, rather than compare those values to the mean success rate of the cluster as a whole, they are compared to a flat user-configured threshold. This threshold is configured via the -:ref:`outlier_detection.failure_percentage_threshold` +:ref:`outlier_detection.failure_percentage_threshold` field. -The other configuration fields for failure percentage based ejection are similar to the fields for -success rate ejection. Failure percentage based ejection also obeys -:ref:`outlier_detection.split_external_local_origin_errors`; +The other configuration fields for failure percentage based detection are similar to the fields for +success rate detection. Failure percentage based detection also obeys +:ref:`outlier_detection.split_external_local_origin_errors`; the enforcement percentages for externally- and locally-originated errors are controlled by -:ref:`outlier_detection.enforcing_failure_percentage` +:ref:`outlier_detection.enforcing_failure_percentage` and -:ref:`outlier_detection.enforcing_failure_percentage_local_origin`, +:ref:`outlier_detection.enforcing_failure_percentage_local_origin`, respectively. As with success rate detection, detection will not be performed for a host if its request volume over the aggregation interval is less than the -:ref:`outlier_detection.failure_percentage_request_volume` +:ref:`outlier_detection.failure_percentage_request_volume` value. Detection also will not be performed for a cluster if the number of hosts with the minimum required request volume in an interval is less than the -:ref:`outlier_detection.failure_percentage_minimum_hosts` +:ref:`outlier_detection.failure_percentage_minimum_hosts` value. .. _arch_overview_outlier_detection_grpc: @@ -188,13 +188,13 @@ Ejection event logging A log of outlier ejection events can optionally be produced by Envoy. This is extremely useful during daily operations since global stats do not provide enough information on which hosts are being ejected and for what reasons. The log is structured as protobuf-based dumps of -:ref:`OutlierDetectionEvent messages `. -Ejection event logging is configured in the Cluster manager :ref:`outlier detection configuration `. +:ref:`OutlierDetectionEvent messages `. +Ejection event logging is configured in the Cluster manager :ref:`outlier detection configuration `. Configuration reference ----------------------- -* Cluster manager :ref:`global configuration ` -* Per cluster :ref:`configuration ` +* Cluster manager :ref:`global configuration ` +* Per cluster :ref:`configuration ` * Runtime :ref:`settings ` * Statistics :ref:`reference ` diff --git a/docs/root/intro/arch_overview/upstream/service_discovery.rst b/docs/root/intro/arch_overview/upstream/service_discovery.rst index 678877ff81de2..ee3c6c4db13e0 100644 --- a/docs/root/intro/arch_overview/upstream/service_discovery.rst +++ b/docs/root/intro/arch_overview/upstream/service_discovery.rst @@ -3,7 +3,7 @@ Service discovery ================= -When an upstream cluster is defined in the :ref:`configuration `, +When an upstream cluster is defined in the :ref:`configuration `, Envoy needs to know how to resolve the members of the cluster. This is known as *service discovery*. .. _arch_overview_service_discovery_types: @@ -41,13 +41,15 @@ This means that care should be taken if active health checking is used with DNS to the same IPs: if an IP is repeated many times between DNS names it might cause undue load on the upstream host. -If :ref:`respect_dns_ttl ` is enabled, DNS record TTLs and -:ref:`dns_refresh_rate ` are used to control DNS refresh rate. -For strict DNS cluster, if the minimum of all record TTLs is 0, :ref:`dns_refresh_rate ` -will be used as the cluster's DNS refresh rate. :ref:`dns_refresh_rate ` -defaults to 5000ms if not specified. The :ref:`dns_failure_refresh_rate ` +If :ref:`respect_dns_ttl ` is enabled, DNS record TTLs and +:ref:`dns_refresh_rate ` are used to control DNS refresh rate. +For strict DNS cluster, if the minimum of all record TTLs is 0, :ref:`dns_refresh_rate ` +will be used as the cluster's DNS refresh rate. :ref:`dns_refresh_rate ` +defaults to 5000ms if not specified. The :ref:`dns_failure_refresh_rate ` controls the refresh frequency during failures, and, if not configured, the DNS refresh rate will be used. +DNS resolving emits :ref:`cluster statistics ` fields *update_attempt*, *update_success* and *update_failure*. + .. _arch_overview_service_discovery_types_logical_dns: Logical DNS @@ -70,13 +72,15 @@ When interacting with large scale web services, this is the best of all possible asynchronous/eventually consistent DNS resolution, long lived connections, and zero blocking in the forwarding path. -If :ref:`respect_dns_ttl ` is enabled, DNS record TTLs and -:ref:`dns_refresh_rate ` are used to control DNS refresh rate. -For logical DNS cluster, if the TTL of first record is 0, :ref:`dns_refresh_rate ` -will be used as the cluster's DNS refresh rate. :ref:`dns_refresh_rate ` -defaults to 5000ms if not specified. The :ref:`dns_failure_refresh_rate ` +If :ref:`respect_dns_ttl ` is enabled, DNS record TTLs and +:ref:`dns_refresh_rate ` are used to control DNS refresh rate. +For logical DNS cluster, if the TTL of first record is 0, :ref:`dns_refresh_rate ` +will be used as the cluster's DNS refresh rate. :ref:`dns_refresh_rate ` +defaults to 5000ms if not specified. The :ref:`dns_failure_refresh_rate ` controls the refresh frequency during failures, and, if not configured, the DNS refresh rate will be used. +DNS resolving emits :ref:`cluster statistics ` fields *update_attempt*, *update_success* and *update_failure*. + .. _arch_overview_service_discovery_types_original_destination: Original destination @@ -87,7 +91,7 @@ via an iptables REDIRECT or TPROXY target or with Proxy Protocol. In these cases to an original destination cluster are forwarded to upstream hosts as addressed by the redirection metadata, without any explicit host configuration or upstream host discovery. Connections to upstream hosts are pooled and unused hosts are flushed out when they have been idle longer than -:ref:`cleanup_interval `, which defaults to +:ref:`cleanup_interval `, which defaults to 5000ms. If the original destination address is not available, no upstream connection is opened. Envoy can also pickup the original destination from a :ref:`HTTP header `. @@ -121,7 +125,7 @@ Custom cluster ^^^^^^^^^^^^^^ Envoy also supports custom cluster discovery mechanism. Custom clusters are specified using -:ref:`cluster_type field ` on the cluster configuration. +:ref:`cluster_type field ` on the cluster configuration. Generally active health checking is used in conjunction with the eventually consistent service discovery service data to making load balancing and routing decisions. This is discussed further in diff --git a/docs/root/intro/arch_overview/upstream/upstream.rst b/docs/root/intro/arch_overview/upstream/upstream.rst index 112dc78854466..3c976f0212c31 100644 --- a/docs/root/intro/arch_overview/upstream/upstream.rst +++ b/docs/root/intro/arch_overview/upstream/upstream.rst @@ -13,3 +13,4 @@ Upstream clusters outlier circuit_breaking upstream_filters + load_reporting_service diff --git a/docs/root/intro/arch_overview/upstream/upstream_filters.rst b/docs/root/intro/arch_overview/upstream/upstream_filters.rst index 1fe902dcf9197..4a2b4da0d3b35 100644 --- a/docs/root/intro/arch_overview/upstream/upstream_filters.rst +++ b/docs/root/intro/arch_overview/upstream/upstream_filters.rst @@ -4,8 +4,10 @@ Upstream network filters ======================== Upstream clusters provide an ability to inject network level (L3/L4) -:ref:`filters `. The filters apply to the -connection to the upstream hosts, using the same API presented by listeners for -the downstream connections. The write callbacks are invoked for any chunk of -data sent to the upstream host, and the read callbacks are invoked for data +filters. It should be noted that a network filter needs to +be registered in code as an upstream filter before usage. Currently, +there are no upstream filters available in Envoy out of the box. +The filters apply to the connection to the upstream hosts, using the same API presented by listeners for +the downstream connections. The write-callbacks are invoked for any chunk of +data sent to the upstream host, and the read-callbacks are invoked for data received from the upstream host. diff --git a/docs/root/intro/deployment_types/service_to_service.rst b/docs/root/intro/deployment_types/service_to_service.rst index 9f16d8063e1e5..a4200a607ab3a 100644 --- a/docs/root/intro/deployment_types/service_to_service.rst +++ b/docs/root/intro/deployment_types/service_to_service.rst @@ -8,7 +8,7 @@ Service to service only The above diagram shows the simplest Envoy deployment which uses Envoy as a communication bus for all traffic internal to a service oriented architecture (SOA). In this scenario, Envoy exposes -several listeners that are used for local origin traffic as well as service to service traffic. +several listeners that are used for local origin traffic as well as service-to-service traffic. Service to service egress listener ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -22,13 +22,16 @@ themselves with network topology, whether they are running in development or pro This listener supports both HTTP/1.1 or HTTP/2 depending on the capabilities of the application. +.. image:: /_static/service_to_service_egress_listener.svg + :width: 40% + .. _deployment_type_service_to_service_ingress: Service to service ingress listener ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This is the port used by remote Envoys when they want to talk to the local Envoy. For example, -*http://localhost:9211*. Incoming requests are routed to the local service on the configured +*http://servicename:9211*. Envoy routes incoming requests to the local service on the configured port(s). Multiple application ports may be involved depending on application or load balancing needs (for example if the service needs both an HTTP port and a gRPC port). The local Envoy performs buffering, circuit breaking, etc. as needed. @@ -37,6 +40,10 @@ Our default configurations use HTTP/2 for all Envoy to Envoy communication, rega the application uses HTTP/1.1 or HTTP/2 when egressing out of a local Envoy. HTTP/2 provides better performance via long lived connections and explicit reset notifications. +.. image:: /_static/service_to_service_ingress_listener.svg + :width: 55% + + Optional external service egress listeners ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -50,13 +57,12 @@ being consistent and using local port routing for all external services. Discovery service integration ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The recommended service to service configuration uses an external discovery service for all cluster +The recommended service-to-service configuration uses an external discovery service for all cluster lookups. This provides Envoy with the most detailed information possible for use when performing load balancing, statistics gathering, etc. Configuration template ^^^^^^^^^^^^^^^^^^^^^^ -The source distribution includes an example service to service configuration that is very similar to -the version that Lyft runs in production. See :ref:`here ` for more -information. +The source distribution includes :ref:`an example service-to-service configuration` +that is very similar to the version that Lyft runs in production. diff --git a/docs/root/intro/deprecated.rst b/docs/root/intro/deprecated.rst new file mode 100644 index 0000000000000..e630390e94e53 --- /dev/null +++ b/docs/root/intro/deprecated.rst @@ -0,0 +1,6 @@ +Deprecated +========== + +The deprecations for each version have moved :ref:`here `. + +.. This page only exists because previous versions of Envoy link here. diff --git a/docs/root/intro/intro.rst b/docs/root/intro/intro.rst index 055bca7276795..4fd158e49f7f4 100644 --- a/docs/root/intro/intro.rst +++ b/docs/root/intro/intro.rst @@ -8,5 +8,13 @@ Introduction what_is_envoy arch_overview/arch_overview + life_of_a_request deployment_types/deployment_types getting_help + +.. These pages are only here for redirects from log lines from shipping versions of Envoy, so hide them. +.. toctree:: + :hidden: + + version_history + deprecated diff --git a/docs/root/intro/life_of_a_request.rst b/docs/root/intro/life_of_a_request.rst new file mode 100644 index 0000000000000..ac6e5334689bf --- /dev/null +++ b/docs/root/intro/life_of_a_request.rst @@ -0,0 +1,647 @@ +.. _life_of_a_request: + +Life of a Request +================= + +Below we describe the events in the life of a request passing through an Envoy proxy. We first +describe how Envoy fits into the request path for a request and then the internal events that take +place following the arrival of a request at the Envoy proxy from downstream. We follow the request +until the corresponding dispatch upstream and the response path. + + +Terminology +----------- + +Envoy uses the following terms through its codebase and documentation: + +* *Cluster*: a logical service with a set of endpoints that Envoy forwards requests to. +* *Downstream*: an entity connecting to Envoy. This may be a local application (in a sidecar model) or + a network node. In non-sidecar models, this is a remote client. +* *Endpoints*: network nodes that implement a logical service. They are grouped into clusters. + Endpoints in a cluster are *upstream* of an Envoy proxy. +* *Filter*: a module in the connection or request processing pipeline providing some aspect of + request handling. An analogy from Unix is the composition of small utilities (filters) with Unix + pipes (filter chains). +* *Filter chain*: a series of filters. +* *Listeners*: Envoy module responsible for binding to an IP/port, accepting new TCP connections (or + UDP datagrams) and orchestrating the downstream facing aspects of request processing. +* *Upstream*: an endpoint (network node) that Envoy connects to when forwarding requests for a + service. This may be a local application (in a sidecar model) or a network node. In non-sidecar + models, this corresponds with a remote backend. + +Network topology +---------------- + +How a request flows through the components in a network (including Envoy) depends on the network’s +topology. Envoy can be used in a wide variety of networking topologies. We focus on the inner +operation of Envoy below, but briefly we address how Envoy relates to the rest of the network in +this section. + +Envoy originated as a `service mesh +`_ sidecar proxy, +factoring out load balancing, routing, observability, security and discovery services from +applications. In the service mesh model, requests flow through Envoys as a gateway to the network. +Requests arrive at an Envoy via either ingress or egress listeners: + +* Ingress listeners take requests from other nodes in the service mesh and forward them to the + local application. Responses from the local application flow back through Envoy to the downstream. +* Egress listeners take requests from the local application and forward them to other nodes in the + network. These receiving nodes will also be typically running Envoy and accepting the request via + their ingress listeners. + +.. image:: /_static/lor-topology-service-mesh.svg + :width: 80% + :align: center + +.. image:: /_static/lor-topology-service-mesh-node.svg + :width: 40% + :align: center + + +Envoy is used in a variety of configurations beyond the service mesh. For example, it can also act +as an internal load balancer: + +.. image:: /_static/lor-topology-ilb.svg + :width: 65% + :align: center + +Or as an ingress/egress proxy on the network edge: + +.. image:: /_static/lor-topology-edge.svg + :width: 90% + :align: center + +In practice, a hybrid of these is often used, where Envoy features in a service mesh, on the edge +and as an internal load balancer. A request path may traverse multiple Envoys. + +.. image:: /_static/lor-topology-hybrid.svg + :width: 90% + :align: center + +Envoy may be configured in multi-tier topologies for scalability and reliability, with a request +first passing through an edge Envoy prior to passing through a second Envoy tier: + +.. image:: /_static/lor-topology-tiered.svg + :width: 80% + :align: center + +In all the above cases, a request will arrive at a specific Envoy via TCP, UDP or Unix domain +sockets from downstream. Envoy will forward requests upstream via TCP, UDP or Unix domain sockets. +We focus on a single Envoy proxy below. + +Configuration +------------- + +Envoy is a very extensible platform. This results in a combinatorial explosion of possible request +paths, depending on: + +* L3/4 protocol, e.g. TCP, UDP, Unix domain sockets. +* L7 protocol, e.g. HTTP/1, HTTP/2, HTTP/3, gRPC, Thrift, Dubbo, Kafka, Redis and various databases. +* Transport socket, e.g. plain text, TLS, ALTS. +* Connection routing, e.g. PROXY protocol, original destination, dynamic forwarding. +* Authentication and authorization. +* Circuit breakers and outlier detection configuration and activation state. +* Many other configurations for networking, HTTP, listener, access logging, health checking, tracing + and stats extensions. + +It's helpful to focus on one at a time, so this example covers the following: + +* An HTTP/2 request with :ref:`TLS ` over a TCP connection for both downstream + and upstream. +* The :ref:`HTTP connection manager ` as the only :ref:`network filter + `. +* A hypothetical CustomFilter and the `router ` filter as the :ref:`HTTP + filter ` chain. +* :ref:`Filesystem access logging `. +* :ref:`Statsd sink `. +* A single :ref:`cluster ` with static endpoints. + +We assume a static bootstrap configuration file for simplicity: + +.. code-block:: yaml + + static_resources: + listeners: + # There is a single listener bound to port 443. + - name: listener_https + address: + socket_address: + protocol: TCP + address: 0.0.0.0 + port_value: 443 + # A single listener filter exists for TLS inspector. + listener_filters: + - name: "envoy.filters.listener.tls_inspector" + typed_config: {} + # On the listener, there is a single filter chain that matches SNI for acme.com. + filter_chains: + - filter_chain_match: + # This will match the SNI extracted by the TLS Inspector filter. + server_names: ["acme.com"] + # Downstream TLS configuration. + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "certs/servercert.pem" } + private_key: { filename: "certs/serverkey.pem" } + filters: + # The HTTP connection manager is the only network filter. + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + use_remote_address: true + http2_protocol_options: + max_concurrent_streams: 100 + # File system based access logging. + access_log: + - name: envoy.access_loggers.file + typed_config: + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: "/var/log/envoy/access.log" + # The route table, mapping /foo to some_service. + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: ["acme.com"] + routes: + - match: + path: "/foo" + route: + cluster: some_service + # CustomFilter and the HTTP router filter are the HTTP filter chain. + http_filters: + - name: some.customer.filter + - name: envoy.filters.http.router + clusters: + - name: some_service + connect_timeout: 5s + # Upstream TLS configuration. + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + load_assignment: + cluster_name: some_service + # Static endpoint assignment. + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.1.2.10 + port_value: 10002 + - endpoint: + address: + socket_address: + address: 10.1.2.11 + port_value: 10002 + http2_protocol_options: + max_concurrent_streams: 100 + - name: some_statsd_sink + connect_timeout: 5s + # The rest of the configuration for statsd sink cluster. + # statsd sink. + stats_sinks: + - name: envoy.stat_sinks.statsd + typed_config: + "@type": type.googleapis.com/envoy.config.metrics.v3.StatsdSink + tcp_cluster_name: some_statsd_cluster + +High level architecture +----------------------- + +The request processing path in Envoy has two main parts: + +* :ref:`Listener subsystem ` which handles **downstream** request + processing. It is also responsible for managing the downstream request lifecycle and for the + response path to the client. The downstream HTTP/2 codec lives here. +* :ref:`Cluster subsystem ` which is responsible for selecting and + configuring the **upstream** connection to an endpoint. This is where knowledge of cluster and + endpoint health, load balancing and connection pooling exists. The upstream HTTP/2 codec lives + here. + +The two subsystems are bridged with the HTTP router filter, which forwards the HTTP request from +downstream to upstream. + +.. image:: /_static/lor-architecture.svg + :width: 80% + :align: center + +We use the terms :ref:`listener subsystem ` and :ref:`cluster subsystem +` above to refer to the group of modules and instance classes that +are created by the top level `ListenerManager` and `ClusterManager` classes. There are many +components that we discuss below that are instantiated before and during the course of a request by +these management systems, for example listeners, filter chains, codecs, connection pools and load +balancing data structures. + +Envoy has an `event-based thread model +`_. A main thread is responsible for +the server lifecycle, configuration processing, stats, etc. and some number of :ref:`worker threads +` process requests. All threads operate around an event loop (`libevent +`_) and any given downstream TCP connection (including all the multiplexed +streams on it) will be handled by exactly one worker thread for its lifetime. Each worker thread +maintains its own pool of TCP connections to upstream endpoints. :ref:`UDP +` handling makes use of SO_REUSEPORT to have the kernel consistently +hash the source/destination IP:port tuples to the same worker thread. UDP filter state is shared for +a given worker thread, with the filter responsible for providing session semantics as needed. This +is in contrast to the connection oriented TCP filters we discuss below, where filter state exists on +a per connection and, in the case of HTTP filters, per-request basis. + +Worker threads rarely share state and operate in a trivially parallel fashion. This threading model +enables scaling to very high core count CPUs. + +Request flow +------------ + +Overview +^^^^^^^^ + +A brief outline of the life cycle of a request and response using the example configuration above: + +1. A TCP connection from downstream is accepted by an Envoy :ref:`listener + ` running on a :ref:`worker thread `. +2. The :ref:`listener filter ` chain is created and runs. It can + provide SNI and other pre-TLS info. Once completed, the listener will match a network filter + chain. Each listener may have multiple filter chains which match on some combination of + destination IP CIDR range, SNI, ALPN, source ports, etc. A transport socket, in our case the TLS + transport socket, is associated with this filter chain. +3. On network reads, the :ref:`TLS ` transport socket decrypts the data read from + the TCP connection to a decrypted data stream for further processing. +4. The :ref:`network filter ` chain is created and runs. The most + important filter for HTTP is the HTTP connection manager, which is the last network filter in the + chain. +5. The HTTP/2 codec in :ref:`HTTP connection manager ` deframes and + demultiplexes the decrypted data stream from the TLS connection to a number of independent + streams. Each stream handles a single request and response. +6. For each HTTP stream, an :ref:`HTTP filter ` chain is created and + runs. The request first passes through CustomFilter which may read and modify the request. The + most important HTTP filter is the router filter which sits at the end of the HTTP filter chain. + When `decodeHeaders` is invoked on the router filter, the route is selected and a cluster is + picked. The request headers on the stream are forwarded to an upstream endpoint in that cluster. + The :ref:`router ` filter obtains an HTTP :ref:`connection pool + ` from the cluster manager for the matched cluster to do this. +7. Cluster specific :ref:`load balancing ` is performed to find an + endpoint. The cluster’s circuit breakers are checked to determine if a new stream is allowed. A + new connection to the endpoint is created if the endpoint's connection pool is empty or lacks + capacity. +8. The upstream endpoint connection's HTTP/2 codec multiplexes and frames the request’s stream with + any other streams going to that upstream over a single TCP connection. +9. The upstream endpoint connection's TLS transport socket encrypts these bytes and writes them to a + TCP socket for the upstream connection. +10. The request, consisting of headers, and optional body and trailers, is proxied upstream, and the + response is proxied downstream. The response passes through the HTTP filters in the + :ref:`opposite order ` from the request, starting at the + router filter and passing through CustomFilter, before being sent downstream. +11. When the response is complete, the stream is destroyed. Post-request processing will update + stats, write to the access log and finalize trace spans. + +We elaborate on each of these steps in the sections below. + +1. Listener TCP accept +^^^^^^^^^^^^^^^^^^^^^^ + +.. image:: /_static/lor-listeners.svg + :width: 90% + :align: center + +The *ListenerManager* is responsible for taking configuration representing :ref:`listeners +` and instantiating a number of *Listener* instances bound to their +respective IP/ports. Listeners may be in one of three states: + +* *Warming*: the listener is waiting for configuration dependencies (e.g. route configuration, + dynamic secrets). The listener is not yet ready to accept TCP connections. +* *Active*: the listener is bound to its IP/port and accepts TCP connections. +* *Draining*: the listener no longer accepts new TCP connections while its existing TCP connections + are allowed to continue for a drain period. + +Each :ref:`worker thread ` maintains its own *Listener* instance for each +of the configured listeners. Each listener may bind to the same port via SO_REUSEPORT or share a +single socket bound to this port. When a new TCP connection arrives, the kernel decides which +worker thread will accept the connection and the *Listener* for this worker thread will have its +``Server::ConnectionHandlerImpl::ActiveTcpListener::onAccept()`` callback invoked. + +2. Listener filter chains and network filter chain matching +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The worker thread’s *Listener* then creates and runs the :ref:`listener filter +` chain. Filter chains are created by applying each filter’s *filter +factory*. The factory is aware of the filter’s configuration and creates a new instance of the +filter for each connection or stream. + +In the case of our TLS listener configuration, the listener filter chain consists of the :ref:`TLS +inspector ` filter +(``envoy.filters.listener.tls_inspector``). This filter examines the initial TLS handshake and +extracts the server name (SNI). The SNI is then made available for filter chain matching. While the +TLS inspector appears explicitly in the listener filter chain configuration, Envoy is also capable +of inserting this automatically whenever there is a need for SNI (or ALPN) in a listener’s filter +chain. + +.. image:: /_static/lor-listener-filters.svg + :width: 80% + :align: center + +The TLS inspector filter implements the :repo:`ListenerFilter ` +interface. All filter interfaces, whether listener or network/HTTP, require that filters implement +callbacks for specific connection or stream events. In the case of `ListenerFilter`, this is: + + +.. code-block:: cpp + + virtual FilterStatus onAccept(ListenerFilterCallbacks& cb) PURE; + +``onAccept()`` allows a filter to run during the TCP accept processing. The ``FilterStatus`` +returned by the callback controls how the listener filter chain will continue. Listener filters may +pause the filter chain and then later resume, e.g. in response to an RPC made to another service. + +Information extracted from the listener filters and connection properties is then used to match a +filter chain, giving the network filter chain and transport socket that will be used to handle the +connection. + +.. image:: /_static/lor-filter-chain-match.svg + :width: 50% + :align: center + +.. _life_of_a_request_tls_decryption: + +3. TLS transport socket decryption +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Envoy offers pluggable transport sockets via the +:repo:`TransportSocket ` +extension interface. Transport sockets follow the lifecycle events of a TCP connection and +read/write into network buffers. Some key methods that transport sockets must implement are: + +.. code-block:: cpp + + virtual void onConnected() PURE; + virtual IoResult doRead(Buffer::Instance& buffer) PURE; + virtual IoResult doWrite(Buffer::Instance& buffer, bool end_stream) PURE; + virtual void closeSocket(Network::ConnectionEvent event) PURE; + +When data is available on a TCP connection, ``Network::ConnectionImpl::onReadReady()`` invokes the +:ref:`TLS ` transport socket via ``SslSocket::doRead()``. The transport socket +then performs a TLS handshake on the TCP connection. When the handshake completes, +``SslSocket::doRead()`` provides a decrypted byte stream to an instance of +``Network::FilterManagerImpl``, responsible for managing the network filter chain. + +.. image:: /_static/lor-transport-socket.svg + :width: 80% + :align: center + +It’s important to note that no operation, whether it’s a TLS handshake or a pause of a filter +pipeline is truly blocking. Since Envoy is event-based, any situation in which processing requires +additional data will lead to early event completion and yielding of the CPU to another event. When +the network makes more data available to read, a read event will trigger the resumption of a TLS +handshake. + +4. Network filter chain processing +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +As with the listener filter chain, Envoy, via `Network::FilterManagerImpl`, will instantiate a +series of :ref:`network filters ` from their filter factories. The +instance is fresh for each new connection. Network filters, like transport sockets, follow TCP +lifecycle events and are invoked as data becomes available from the transport socket. + +.. image:: /_static/lor-network-filters.svg + :width: 80% + :align: center + +Network filters are composed as a pipeline, unlike transport sockets which are one-per-connection. +Network filters come in three varieties: + +* :repo:`ReadFilter ` implementing ``onData()``, called when data is + available from the connection (due to some request). +* :repo:`WriteFilter ` implementing ``onWrite()``, called when data + is about to be written to the connection (due to some response). +* :repo:`Filter ` implementing both *ReadFilter* and *WriteFilter*. + +The method signatures for the key filter methods are: + +.. code-block:: cpp + + virtual FilterStatus onNewConnection() PURE; + virtual FilterStatus onData(Buffer::Instance& data, bool end_stream) PURE; + virtual FilterStatus onWrite(Buffer::Instance& data, bool end_stream) PURE; + +As with the listener filter, the ``FilterStatus`` allows filters to pause execution of the filter +chain. For example, if a rate limiting service needs to be queried, a rate limiting network filter +would return ``Network::FilterStatus::StopIteration`` from ``onData()`` and later invoke +``continueReading()`` when the query completes. + +The last network filter for a listener dealing with HTTP is :ref:`HTTP connection manager +` (HCM). This is responsible for creating the HTTP/2 codec and managing +the HTTP filter chain. In our example, this is the only network filter. An example network filter +chain making use of multiple network filters would look like: + +.. image:: /_static/lor-network-read.svg + :width: 80% + :align: center + +On the response path, the network filter chain is executed in the reverse order to the request path. + +.. image:: /_static/lor-network-write.svg + :width: 80% + :align: center + +.. _life_of_a_request_http2_decoding: + +5. HTTP/2 codec decoding +^^^^^^^^^^^^^^^^^^^^^^^^ + +The HTTP/2 codec in Envoy is based on `nghttp2 `_. It is invoked by the HCM +with plaintext bytes from the TCP connection (after network filter chain transformation). The codec +decodes the byte stream as a series of HTTP/2 frames and demultiplexes the connection into a number +of independent HTTP streams. Stream multiplexing is a key feature in HTTP/2, providing significant +performance advantages over HTTP/1. Each HTTP stream handles a single request and response. + +The codec is also responsible for handling HTTP/2 setting frames and both stream and connection +level :repo:`flow control `. + +The codecs are responsible for abstracting the specifics of the HTTP connection, presenting a +standard view to the HTTP connection manager and HTTP filter chain of a connection split into +streams, each with request/response headers/body/trailers. This is true regardless of whether the +protocol is HTTP/1, HTTP/2 or HTTP/3. + +6. HTTP filter chain processing +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +For each HTTP stream, the HCM instantiates an :ref:`HTTP filter ` chain, +following the pattern established above for listener and network filter chains. + +.. image:: /_static/lor-http-filters.svg + :width: 80% + :align: center + +There are three kinds of HTTP filter interfaces: + +* :repo:`StreamDecoderFilter ` with callbacks for request processing. +* :repo:`StreamEncoderFilter ` with callbacks for response processing. +* :repo:`StreamFilter ` implementing both `StreamDecoderFilter` and + `StreamEncoderFilter`. + +Looking at the decoder filter interface: + +.. code-block:: cpp + + virtual FilterHeadersStatus decodeHeaders(RequestHeaderMap& headers, bool end_stream) PURE; + virtual FilterDataStatus decodeData(Buffer::Instance& data, bool end_stream) PURE; + virtual FilterTrailersStatus decodeTrailers(RequestTrailerMap& trailers) PURE; + +Rather than operating on connection buffers and events, HTTP filters follow the lifecycle of an HTTP +request, e.g. ``decodeHeaders()`` takes HTTP headers as an argument rather than a byte buffer. The +returned ``FilterStatus`` provides, as with network and listener filters, the ability to manage filter +chain control flow. + +When the HTTP/2 codec makes available the HTTP requests headers, these are first passed to +``decodeHeaders()`` in CustomFilter. If the returned ``FilterHeadersStatus`` is ``Continue``, HCM +then passes the headers (possibly mutated by CustomFilter) to the router filter. + +Decoder and encoder-decoder filters are executed on the request path. Encoder and encoder-decoder +filters are executed on the response path, in :ref:`reverse direction +`. Consider the following example filter chain: + +.. image:: /_static/lor-http.svg + :width: 80% + :align: center + +The request path will look like: + +.. image:: /_static/lor-http-decode.svg + :width: 80% + :align: center + +While the response path will look like: + +.. image:: /_static/lor-http-encode.svg + :width: 80% + :align: center + +When ``decodeHeaders()`` is invoked on the :ref:`router ` filter, the +route selection is finalized and a cluster is picked. The HCM selects a route from its +``RouteConfiguration`` at the start of HTTP filter chain execution. This is referred to as the +*cached route*. Filters may modify headers and cause a new route to be selected, by asking HCM to +clear the route cache and requesting HCM to reevaluate the route selection. When the router filter +is invoked, the route is finalized. The selected route’s configuration will point at an upstream +cluster name. The router filter then asks the `ClusterManager` for an HTTP :ref:`connection pool +` for the cluster. This involves load balancing and the connection pool, +discussed in the next section. + +.. image:: /_static/lor-route-config.svg + :width: 70% + :align: center + +The resulting HTTP connection pool is used to build an `UpstreamRequest` object in the router, which +encapsulates the HTTP encoding and decoding callback methods for the upstream HTTP request. Once a +stream is allocated on a connection in the HTTP connection pool, the request headers are forwarded +to the upstream endpoint by the invocation of ``UpstreamRequest::encoderHeaders()``. + +The router filter is responsible for all aspects of upstream request lifecycle management on the +stream allocated from the HTTP connection pool. It also is responsible for request timeouts, retries +and affinity. + +7. Load balancing +^^^^^^^^^^^^^^^^^ + +Each cluster has a :ref:`load balancer ` which picks an endpoint when +a new request arrives. Envoy supports a variety of load balancing algorithms, e.g. weighted +round-robin, Maglev, least-loaded, random. Load balancers obtain their effective assignments from a +combination of static bootstrap configuration, DNS, dynamic xDS (the CDS and EDS discovery services) +and active/passive health checks. Further details on how load balancing works in Envoy are provided +in the :ref:`load balancing documentation `. + +Once an endpoint is selected, the :ref:`connection pool ` for this endpoint +is used to find a connection to forward the request on. If no connection to the host exists, or all +connections are at their maximum concurrent stream limit, a new connection is established and placed +in the connection pool, unless the circuit breaker for maximum connections for the cluster has +tripped. If a maximum lifetime stream limit for a connection is configured and reached, a new +connection is allocated in the pool and the affected HTTP/2 connection is drained. Other circuit +breakers, e.g. maximum concurrent requests to a cluster are also checked. See :repo:`circuit +breakers ` and :ref:`connection pools ` for +further details. + +.. image:: /_static/lor-lb.svg + :width: 80% + :align: center + +8. HTTP/2 codec encoding +^^^^^^^^^^^^^^^^^^^^^^^^ + +The selected connection's HTTP/2 codec multiplexes the request stream with any other streams going +to the same upstream over a single TCP connection. This is the reverse of :ref:`HTTP/2 codec +decoding `. + +As with the downstream HTTP/2 codec, the upstream codec is responsible for taking Envoy’s standard +abstraction of HTTP, i.e. multiple streams multiplexed on a single connection with request/response +headers/body/trailers, and mapping this to the specifics of HTTP/2 by generating a series of HTTP/2 +frames. + +9. TLS transport socket encryption +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The upstream endpoint connection's TLS transport socket encrypts the bytes from the HTTP/2 codec +output and writes them to a TCP socket for the upstream connection. As with :ref:`TLS transport +socket decryption `, in our example the cluster has a transport +socket configured that provides TLS transport security. The same interfaces exist for upstream and +downstream transport socket extensions. + +.. image:: /_static/lor-client.svg + :width: 70% + :align: center + +10. Response path and HTTP lifecycle +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The request, consisting of headers, and optional body and trailers, is proxied upstream, and the +response is proxied downstream. The response passes through the HTTP and network filters in the +:ref:`opposite order `. from the request. + +Various callbacks for decoder/encoder request lifecycle events will be invoked in HTTP filters, e.g. +when response trailers are being forwarded or the request body is streamed. Similarly, read/write +network filters will also have their respective callbacks invoked as data continues to flow in both +directions during a request. + +:ref:`Outlier detection ` status for the endpoint is revised as the +request progresses. + +A request completes when the upstream response reaches its end-of-stream, i.e. when trailers or the +response header/body with end-stream set are received. This is handled in +``Router::Filter::onUpstreamComplete()``. + +It is possible for a request to terminate early. This may be due to (but not limited to): + +* Request timeout. +* Upstream endpoint steam reset. +* HTTP filter stream reset. +* Circuit breaking. +* Unavailability of upstream resources, e.g. missing a cluster for a route. +* No healthy endpoints. +* DoS protection. +* HTTP protocol violations. +* Local reply from either the HCM or an HTTP filter. E.g. a rate limit HTTP filter returning a 429 + response. + +If any of these occur, Envoy may either send an internally generated response, if upstream response +headers have not yet been sent, or will reset the stream, if response headers have already been +forwarded downstream. The Envoy :ref:`debugging FAQ ` has further information on +interpreting these early stream terminations. + +11. Post-request processing +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Once a request completes, the stream is destroyed. The following also takes places: + +* The post-request :ref:`statistics ` are updated (e.g. timing, active + requests, upgrades, health checks). Some statistics are updated earlier however, during request + processing. Stats are not written to the stats :ref:`sink + ` at this point, they are batched + and written by the main thread periodically. In our example this is a statsd sink. +* :ref:`Access logs ` are written to the access log :ref:`sinks + `. In our example this is a file access log. +* :ref:`Trace ` spans are finalized. If our example request was traced, a + trace span, describing the duration and details of the request would be created by the HCM when + processing request headers and then finalized by the HCM during post-request processing. diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst new file mode 100644 index 0000000000000..307b7bb140e8d --- /dev/null +++ b/docs/root/intro/version_history.rst @@ -0,0 +1,6 @@ +Version History +=============== + +The changes for each version have moved :ref:`here `. + +.. This page only exists because previous versions of Envoy link here. diff --git a/docs/root/intro/what_is_envoy.rst b/docs/root/intro/what_is_envoy.rst index 2de7b36d775c1..d9a6e62a3b644 100644 --- a/docs/root/intro/what_is_envoy.rst +++ b/docs/root/intro/what_is_envoy.rst @@ -97,7 +97,7 @@ instead of a library, it is able to implement advanced load balancing techniques and have them be accessible to any application. Currently Envoy includes support for :ref:`automatic retries `, :ref:`circuit breaking `, :ref:`global rate limiting ` via an external rate limiting service, -:ref:`request shadowing `, and +:ref:`request shadowing `, and :ref:`outlier detection `. Future support is planned for request racing. diff --git a/docs/root/operations/admin.rst b/docs/root/operations/admin.rst index 3ce7ff5725c63..b2ad2e7c6391c 100644 --- a/docs/root/operations/admin.rst +++ b/docs/root/operations/admin.rst @@ -6,7 +6,7 @@ Administration interface Envoy exposes a local administration interface that can be used to query and modify different aspects of the server: -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` .. _operations_admin_interface_security: @@ -48,7 +48,7 @@ modify different aspects of the server: .. http:get:: /certs List out all loaded TLS certificates, including file name, serial number, subject alternate names and days until - expiration in JSON format conforming to the :ref:`certificate proto definition `. + expiration in JSON format conforming to the :ref:`certificate proto definition `. .. _operations_admin_interface_clusters: @@ -68,10 +68,10 @@ modify different aspects of the server: - :ref:`circuit breakers` settings for all priority settings. - Information about :ref:`outlier detection` if a detector is installed. Currently - :ref:`average success rate `, - and :ref:`ejection threshold` + :ref:`average success rate `, + and :ref:`ejection threshold` are presented. Both of these values could be ``-1`` if there was not enough data to calculate them in the last - :ref:`interval`. + :ref/`interval`. - ``added_via_api`` flag -- ``false`` if the cluster was added via static configuration, ``true`` if it was added via the :ref:`CDS` api. @@ -94,8 +94,8 @@ modify different aspects of the server: zone, String, Service zone canary, Boolean, Whether the host is a canary success_rate, Double, "Request success rate (0-100). -1 if there was not enough - :ref:`request volume` - in the :ref:`interval` + :ref:`request volume` + in the :ref:`interval` to calculate it" Host health status @@ -115,18 +115,18 @@ modify different aspects of the server: .. http:get:: /clusters?format=json Dump the */clusters* output in a JSON-serialized proto. See the - :ref:`definition ` for more information. + :ref:`definition ` for more information. .. _operations_admin_interface_config_dump: .. http:get:: /config_dump Dump currently loaded configuration from various Envoy components as JSON-serialized proto - messages. See the :ref:`response definition ` for more + messages. See the :ref:`response definition ` for more information. .. warning:: - Configuration may include :ref:`TLS certificates `. Before + Configuration may include :ref:`TLS certificates `. Before dumping the configuration, Envoy will attempt to redact the ``private_key`` and ``password`` fields from any certificates it finds. This relies on the configuration being a strongly-typed protobuf message. If your Envoy configuration uses deprecated ``config`` fields (of type @@ -137,14 +137,21 @@ modify different aspects of the server: The underlying proto is marked v2alpha and hence its contents, including the JSON representation, are not guaranteed to be stable. +.. _operations_admin_interface_config_dump_include_eds: + +.. http:get:: /config_dump?include_eds + + Dump currently loaded configuration including EDS. See the :ref:`response definition ` for more + information. + .. _operations_admin_interface_config_dump_by_mask: .. http:get:: /config_dump?mask={} Specify a subset of fields that you would like to be returned. The mask is parsed as a ``ProtobufWkt::FieldMask`` and applied to each top level dump such as - :ref:`BootstrapConfigDump ` and - :ref:`ClustersConfigDump `. + :ref:`BootstrapConfigDump ` and + :ref:`ClustersConfigDump `. This behavior changes if both resource and mask query parameters are specified. See below for details. @@ -154,10 +161,10 @@ modify different aspects of the server: Dump only the currently loaded configuration that matches the specified resource. The resource must be a repeated field in one of the top level config dumps such as - :ref:`static_listeners ` from - :ref:`ListenersConfigDump ` or - :ref:`dynamic_active_clusters ` from - :ref:`ClustersConfigDump `. If you need a non-repeated + :ref:`static_listeners ` from + :ref:`ListenersConfigDump ` or + :ref:`dynamic_active_clusters ` from + :ref:`ClustersConfigDump `. If you need a non-repeated field, use the mask query parameter documented above. If you want only a subset of fields from the repeated resource, use both as documented below. @@ -174,7 +181,7 @@ modify different aspects of the server: .. http:get:: /contention - Dump current Envoy mutex contention stats (:ref:`MutexStats `) in JSON + Dump current Envoy mutex contention stats (:ref:`MutexStats `) in JSON format, if mutex tracing is enabled. See :option:`--enable-mutex-tracing`. .. http:post:: /cpuprofiler @@ -216,7 +223,7 @@ modify different aspects of the server: .. http:get:: /listeners?format=json Dump the */listeners* output in a JSON-serialized proto. See the - :ref:`definition ` for more information. + :ref:`definition ` for more information. .. _operations_admin_interface_logging: @@ -255,9 +262,15 @@ modify different aspects of the server: .. http:post:: /drain_listeners?inboundonly :ref:`Drains ` all inbound listeners. `traffic_direction` field in - :ref:`Listener ` is used to determine whether a listener + :ref:`Listener ` is used to determine whether a listener is inbound or outbound. + .. http:post:: /drain_listeners?graceful + + When draining listeners, enter a graceful drain period prior to closing listeners. + This behaviour and duration is configurable via server options or CLI + (:option:`--drain-time-s` and :option:`--drain-strategy`). + .. attention:: This operation directly stops the matched listeners on workers. Once listeners in a given @@ -304,7 +317,7 @@ modify different aspects of the server: "uptime_all_epochs": "6s" } - See the :ref:`ServerInfo proto ` for an + See the :ref:`ServerInfo proto ` for an explanation of the output. .. http:get:: /ready @@ -318,7 +331,7 @@ modify different aspects of the server: LIVE - See the `state` field of the :ref:`ServerInfo proto ` for an + See the `state` field of the :ref:`ServerInfo proto ` for an explanation of the output. .. _operations_admin_interface_stats: @@ -527,11 +540,11 @@ modify different aspects of the server: format, as expected by the Hystrix dashboard. If invoked from a browser or a terminal, the response will be shown as a continuous stream, - sent in intervals defined by the :ref:`Bootstrap ` - :ref:`stats_flush_interval ` + sent in intervals defined by the :ref:`Bootstrap ` + :ref:`stats_flush_interval ` This handler is enabled only when a Hystrix sink is enabled in the config file as documented - :ref:`here `. + :ref:`here `. As Envoy's and Hystrix resiliency mechanisms differ, some of the statistics shown in the dashboard had to be adapted: diff --git a/docs/root/operations/certificates.rst b/docs/root/operations/certificates.rst new file mode 100644 index 0000000000000..0dff05aa93602 --- /dev/null +++ b/docs/root/operations/certificates.rst @@ -0,0 +1,15 @@ +.. _operations_certificates: + +Certificate Management +====================== + +Envoy provides several mechanisms for cert management. At a high level they can be broken into + +1. Static :ref:`CommonTlsContext ` referenced certificates. + These will *not* reload automatically, and requires either a restart of the proxy or + reloading the clusters/listeners that reference them. + :ref:`Hot restarting ` can be used here to pick up the new + certificates without dropping traffic. +2. :ref:`Secret Discovery Service ` referenced certificates. + By using SDS, certificates can either be referenced as files (reloading the certs when the + parent directory is moved) or through an external SDS server that can push new certificates. diff --git a/docs/root/operations/cli.rst b/docs/root/operations/cli.rst index a4729f1e3f078..5cbd5b9110513 100644 --- a/docs/root/operations/cli.rst +++ b/docs/root/operations/cli.rst @@ -21,7 +21,7 @@ following are the command line options that Envoy supports. .. option:: --config-yaml - *(optional)* The YAML string for a v2 bootstrap configuration. If :option:`--config-path` is also set, + *(optional)* The YAML string for a bootstrap configuration. If :option:`--config-path` is also set, the values in this YAML string will override and merge with the bootstrap loaded from :option:`--config-path`. Because YAML is a superset of JSON, a JSON string may also be passed to :option:`--config-yaml`. @@ -31,6 +31,13 @@ following are the command line options that Envoy supports. ./envoy -c bootstrap.yaml --config-yaml "node: {id: 'node1'}" +.. option:: --bootstrap-version + + *(optional)* The API version to load the bootstrap as. The value should be a single integer, e.g. + to parse the bootstrap configuration as V3, specify ``--bootstrap-version 3``. If unset, Envoy will + attempt to load the bootstrap as the previous API version and upgrade it to the latest. If that fails, + Envoy will attempt to load the configuration as the latest version. + .. option:: --mode *(optional)* One of the operating modes for Envoy: @@ -59,10 +66,25 @@ following are the command line options that Envoy supports. set this option. However, if Envoy needs to be run multiple times on the same machine, each running Envoy will need a unique base ID so that the shared memory regions do not conflict. +.. option:: --use-dynamic-base-id + + *(optional)* Selects an unused base ID to use when allocating shared memory regions. Using + preselected values with :option:`--base-id` is preferred, however. If this option is enabled, + it supersedes the :option:`--base-id` value. This flag may not be used when the value of + :option:`--restart-epoch` is non-zero. Instead, for subsequent hot restarts, set + :option:`--base-id` option with the selected base ID. See :option:`--base-id-path`. + +.. option:: --base-id-path + + *(optional)* Writes the base ID to the given path. While this option is compatible with + :option:`--base-id`, its intended use is to provide access to the dynamic base ID selected by + :option:`--use-dynamic-base-id`. + .. option:: --concurrency *(optional)* The number of :ref:`worker threads ` to run. If not - specified defaults to the number of hardware threads on the machine. + specified defaults to the number of hardware threads on the machine. If set to zero, Envoy will + still run one worker thread. .. option:: -l , --log-level @@ -71,9 +93,9 @@ following are the command line options that Envoy supports. .. option:: --component-log-level - *(optional)* The comma separated list of logging level per component. Non developers should generally - never set this option. For example, if you want `upstream` component to run at `debug` level and - `connection` component to run at `trace` level, you should pass ``upstream:debug,connection:trace`` to + *(optional)* The comma separated list of logging level per component. Non developers should generally + never set this option. For example, if you want `upstream` component to run at `debug` level and + `connection` component to run at `trace` level, you should pass ``upstream:debug,connection:trace`` to this flag. See ``ALL_LOGGER_IDS`` in :repo:`/source/common/common/logger.h` for a list of components. .. option:: --cpuset-threads @@ -92,13 +114,10 @@ following are the command line options that Envoy supports. .. option:: --log-format *(optional)* The format string to use for laying out the log message metadata. If this is not - set, a default format string ``"[%Y-%m-%d %T.%e][%t][%l][%n] %v"`` is used. - - When used in conjunction with :option:`--log-format-prefix-with-location` set to 0, the logger can be - configured to not prefix ``%v`` by a file path and a line number. + set, a default format string ``"[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v"`` is used. - **NOTE**: The default log format will be changed to ``"[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v"`` - together with the default value of :option:`--log-format-prefix-with-location` to 0 at 1.16.0 release. + When used in conjunction with :option:`--log-format-prefix-with-location` set to 1, the logger can be + configured to prefix ``%v`` by a file path and a line number. When used in conjunction with :option:`--log-format-escaped`, the logger can be configured to log in a format that is parsable by log viewers. Known integrations are documented @@ -145,10 +164,9 @@ following are the command line options that Envoy supports. *(optional)* This temporary flag allows replacing all entries of ``"%v"`` in the log format by ``"[%g:%#] %v"``. This flag is provided for migration purposes only. If this is not set, a - default value 1 is used. + default value 0 is used. - **NOTE**: The default value will be changed to 0 at 1.16.0 release and the flag will be - removed at 1.17.0 release. + **NOTE**: The flag will be removed at 1.17.0 release. .. option:: --log-format-escaped @@ -176,15 +194,15 @@ following are the command line options that Envoy supports. *(optional)* Defines the local service cluster name where Envoy is running. The local service cluster name is first sourced from the :ref:`Bootstrap node - ` message's :ref:`cluster - ` field. This CLI option provides an alternative + ` message's :ref:`cluster + ` field. This CLI option provides an alternative method for specifying this value and will override any value set in bootstrap configuration. It should be set if any of the following features are used: :ref:`statsd `, :ref:`health check cluster - verification `, - :ref:`runtime override directory `, + verification `, + :ref:`runtime override directory `, :ref:`user agent addition - `, + `, :ref:`HTTP global rate limiting `, :ref:`CDS `, and :ref:`HTTP tracing `, either via this CLI option or in the bootstrap @@ -194,8 +212,8 @@ following are the command line options that Envoy supports. *(optional)* Defines the local service node name where Envoy is running. The local service node name is first sourced from the :ref:`Bootstrap node - ` message's :ref:`id - ` field. This CLI option provides an alternative + ` message's :ref:`id + ` field. This CLI option provides an alternative method for specifying this value and will override any value set in bootstrap configuration. It should be set if any of the following features are used: :ref:`statsd `, :ref:`CDS @@ -207,12 +225,12 @@ following are the command line options that Envoy supports. *(optional)* Defines the local service zone where Envoy is running. The local service zone is first sourced from the :ref:`Bootstrap node - ` message's :ref:`locality.zone - ` field. This CLI option provides an + ` message's :ref:`locality.zone + ` field. This CLI option provides an alternative method for specifying this value and will override any value set in bootstrap configuration. It should be set if discovery service routing is used and the discovery service exposes :ref:`zone data - `, either via this CLI option or in + `, either via this CLI option or in the bootstrap configuration. The meaning of zone is context dependent, e.g. `Availability Zone (AZ) `_ @@ -231,15 +249,23 @@ following are the command line options that Envoy supports. .. option:: --drain-time-s - *(optional)* The time in seconds that Envoy will drain connections during + *(optional)* The time in seconds that Envoy will drain connections during a :ref:`hot restart ` or when individual listeners are being - modified or removed via :ref:`LDS `. - Defaults to 600 seconds (10 minutes). Generally the drain time should be less than - the parent shutdown time set via the :option:`--parent-shutdown-time-s` option. How the two + modified or removed via :ref:`LDS `. + Defaults to 600 seconds (10 minutes). Generally the drain time should be less than + the parent shutdown time set via the :option:`--parent-shutdown-time-s` option. How the two settings are configured depends on the specific deployment. In edge scenarios, it might be desirable to have a very long drain time. In service to service scenarios, it might be possible to make the drain and shutdown time much shorter (e.g., 60s/90s). +.. option:: --drain-strategy + + *(optional)* Determine behaviour of Envoy during the hot restart drain sequence. During the drain sequence, the drain manager encourages draining through terminating connections on request completion, sending "Connection: CLOSE" on HTTP1, and sending GOAWAY on HTTP2. + + * ``gradual``: *(default)* The percentage of requests encouraged to drain increases to 100% as the drain time elapses. + + * ``immediate``: All requests are encouraged to drain as soon as the drain sequence begins. + .. option:: --parent-shutdown-time-s *(optional)* The time in seconds that Envoy will wait before shutting down the parent process @@ -254,7 +280,7 @@ following are the command line options that Envoy supports. .. option:: --enable-mutex-tracing *(optional)* This flag enables the collection of mutex contention statistics - (:ref:`MutexStats `) as well as a contention endpoint + (:ref:`MutexStats `) as well as a contention endpoint (:http:get:`/contention`). Mutex tracing is not enabled by default, since it incurs a slight performance penalty for those Envoys which already experience mutex contention. @@ -280,6 +306,13 @@ following are the command line options that Envoy supports. and these occurrences are counted in the :ref:`server.dynamic_unknown_fields ` statistic. +.. option:: --ignore-unknown-dynamic-fields + + *(optional)* This flag disables validation of protobuf configuration for unknown fields in dynamic + configuration. Unlike setting :option:`--reject-unknown-dynamic-fields` to false, it does not log warnings + or count occurrences of unknown fields, in the interest of configuration processing speed. If + :option:`--reject-unknown-dynamic-fields` is set to true, this flag has no effect. + .. option:: --disable-extensions *(optional)* This flag disabled the provided list of comma-separated extension names. Disabled diff --git a/docs/root/operations/fs_flags.rst b/docs/root/operations/fs_flags.rst index a4c154bd207a6..ba322a31ef328 100644 --- a/docs/root/operations/fs_flags.rst +++ b/docs/root/operations/fs_flags.rst @@ -6,7 +6,7 @@ File system flags Envoy supports file system "flags" that alter state at startup. This is used to persist changes between restarts if necessary. The flag files should be placed in the directory specified in the :ref:`flags_path -` configuration +` configuration option. The currently supported flag files are: drain diff --git a/docs/root/operations/hot_restarter.rst b/docs/root/operations/hot_restarter.rst index 72e09b0976869..3cb902dedca0e 100644 --- a/docs/root/operations/hot_restarter.rst +++ b/docs/root/operations/hot_restarter.rst @@ -21,15 +21,22 @@ The restarter is invoked like so: ulimit -n {{ pillar.get('envoy_max_open_files', '102400') }} sysctl fs.inotify.max_user_watches={{ pillar.get('envoy_max_inotify_watches', '524288') }} - + exec /usr/sbin/envoy -c /etc/envoy/envoy.cfg --restart-epoch $RESTART_EPOCH --service-cluster {{ grains['cluster_name'] }} --service-node {{ grains['service_node'] }} --service-zone {{ grains.get('ec2_availability-zone', 'unknown') }} Note on `inotify.max_user_watches`: If Envoy is being configured to watch many files for configuration in a directory on a Linux machine, increase this value as Linux enforces limits on the maximum number of files that can be watched. - -The *RESTART_EPOCH* environment variable is set by the restarter on each restart and can be passed + +The *RESTART_EPOCH* environment variable is set by the restarter on each restart and must be passed to the :option:`--restart-epoch` option. +.. warning:: + + Special care must be taken if you wish to use the :option:`--use-dynamic-base-id` option. That + flag may only be set when the *RESTART_EPOCH* is 0 and your *start_envoy.sh* must obtain the + chosen base ID (via :option:`--base-id-path`), store it, and use it as the :option:`--base-id` + value on subsequent invocations (when *RESTART_EPOCH* is greater than 0). + The restarter handles the following signals: * **SIGTERM** or **SIGINT** (Ctrl-C): Will cleanly terminate all child processes and exit. diff --git a/docs/root/operations/operations.rst b/docs/root/operations/operations.rst index 3f1ada49c1ae5..c4dee98ce876a 100644 --- a/docs/root/operations/operations.rst +++ b/docs/root/operations/operations.rst @@ -13,4 +13,5 @@ Operations and administration runtime fs_flags traffic_tapping + certificates performance diff --git a/docs/root/operations/performance.rst b/docs/root/operations/performance.rst index 555e7a03b5eb1..01acce4acc1fb 100644 --- a/docs/root/operations/performance.rst +++ b/docs/root/operations/performance.rst @@ -23,14 +23,14 @@ Envoy exposes two statistics to monitor performance of the event loops on all th running---but if this number elevates substantially above its normal observed baseline, it likely indicates kernel scheduler delays. -These statistics can be enabled by setting :ref:`enable_dispatcher_stats ` +These statistics can be enabled by setting :ref:`enable_dispatcher_stats ` to true. .. warning:: Note that enabling dispatcher stats records a value for each iteration of the event loop on every thread. This should normally be minimal overhead, but when using - :ref:`statsd `, it will send each observed value over + :ref:`statsd `, it will send each observed value over the wire individually because the statsd protocol doesn't have any way to represent a histogram summary. Be aware that this can be a very large volume of data. @@ -56,7 +56,7 @@ Watchdog -------- In addition to event loop statistics, Envoy also include a configurable -:ref:`watchdog ` system that can increment +:ref:`watchdog ` system that can increment statistics when Envoy is not responsive and optionally kill the server. The statistics are useful for understanding at a high level whether Envoy's event loop is not responsive either because it is doing too much work, blocking, or not being scheduled by the OS. diff --git a/docs/root/operations/runtime.rst b/docs/root/operations/runtime.rst index 4fdb15ddf70dc..fae342182a660 100644 --- a/docs/root/operations/runtime.rst +++ b/docs/root/operations/runtime.rst @@ -6,3 +6,13 @@ Runtime :ref:`Runtime configuration ` can be used to modify various server settings without restarting Envoy. The runtime settings that are available depend on how the server is configured. They are documented in the relevant sections of the :ref:`configuration guide `. + +Runtime guards are also used as a mechanism to disable new behavior or risky changes not otherwise +guarded by configuration. Such changes will tend to introduce a runtime guard that can be used to +disable the new behavior/code path. The names of these runtime guards will be included in the +release notes alongside an explanation of the change that warrented the runtime guard. + +Due to this usage of runtime guards, some deployments might find it useful to set up +dynamic runtime configuration as a safety measure to be able to quickly disable the new behavior +without having to revert to an older version of Envoy or redeploy it with a new set of static +runtime flags. diff --git a/docs/root/operations/traffic_tapping.rst b/docs/root/operations/traffic_tapping.rst index c67e3848ff0c5..d15eee5cffc95 100644 --- a/docs/root/operations/traffic_tapping.rst +++ b/docs/root/operations/traffic_tapping.rst @@ -7,9 +7,9 @@ Envoy currently provides two experimental extensions that can tap traffic: * :ref:`HTTP tap filter `. See the linked filter documentation for more information. - * :ref:`Tap transport socket extension ` that can intercept + * :ref:`Tap transport socket extension ` that can intercept traffic and write to a :ref:`protobuf trace file - `. The remainder of this document describes + `. The remainder of this document describes the configuration of the tap transport socket. Tap transport socket configuration @@ -22,12 +22,12 @@ Tap transport socket configuration Capabilities will be expanded over time and the configuration structures are likely to change. Tapping can be configured on :ref:`Listener -` and :ref:`Cluster -` transport sockets, providing the ability to interpose on +` and :ref:`Cluster +` transport sockets, providing the ability to interpose on downstream and upstream L4 connections respectively. To configure traffic tapping, add an `envoy.transport_sockets.tap` transport socket -:ref:`configuration ` to the listener +:ref:`configuration ` to the listener or cluster. For a plain text socket this might look like: .. code-block:: yaml @@ -35,7 +35,7 @@ or cluster. For a plain text socket this might look like: transport_socket: name: envoy.transport_sockets.tap typed_config: - "@type": type.googleapis.com/envoy.config.transport_socket.tap.v2alpha.Tap + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tap.v3.Tap common_config: static_config: match_config: @@ -55,7 +55,7 @@ For a TLS socket, this will be: transport_socket: name: envoy.transport_sockets.tap typed_config: - "@type": type.googleapis.com/envoy.config.transport_socket.tap.v2alpha.Tap + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tap.v3.Tap common_config: static_config: match_config: @@ -70,8 +70,8 @@ For a TLS socket, this will be: typed_config: where the TLS context configuration replaces any existing :ref:`downstream -` or :ref:`upstream -` +` or :ref:`upstream +` TLS configuration on the listener or cluster, respectively. Each unique socket instance will generate a trace file prefixed with `path_prefix`. E.g. @@ -83,22 +83,22 @@ Buffered data limits For buffered socket taps, Envoy will limit the amount of body data that is tapped to avoid OOM situations. The default limit is 1KiB for both received and transmitted data. This is configurable via the :ref:`max_buffered_rx_bytes -` and +` and :ref:`max_buffered_tx_bytes -` settings. When a buffered +` settings. When a buffered socket tap is truncated, the trace will indicate truncation via the :ref:`read_truncated -` and :ref:`write_truncated -` fields as well as the body -:ref:`truncated ` field. +` and :ref:`write_truncated +` fields as well as the body +:ref:`truncated ` field. Streaming --------- The tap transport socket supports both buffered and streaming, controlled by the :ref:`streaming -` setting. When buffering, -:ref:`SocketBufferedTrace ` messages are +` setting. When buffering, +:ref:`SocketBufferedTrace ` messages are emitted. When streaming, a series of :ref:`SocketStreamedTraceSegment -` are emitted. +` are emitted. See the :ref:`HTTP tap filter streaming ` documentation for more information. Most of the concepts overlap between the HTTP filter and the transport socket. diff --git a/docs/root/start/sandboxes/cors.rst b/docs/root/start/sandboxes/cors.rst index 3225cb0e81f53..8e3ac24996ee1 100644 --- a/docs/root/start/sandboxes/cors.rst +++ b/docs/root/start/sandboxes/cors.rst @@ -53,9 +53,9 @@ Terminal 1 $ docker-compose ps Name Command State Ports - ---------------------------------------------------------------------------------------------------------------------------- - frontend_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp - frontend_frontend-service_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp + ------------------------------------------------------------------------------------------------------------------------------ + frontend_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8001->8001/tcp + frontend_frontend-service_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp Terminal 2 @@ -67,9 +67,9 @@ Terminal 2 $ docker-compose ps Name Command State Ports - -------------------------------------------------------------------------------------------------------------------------- - backend_backend-service_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp - backend_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8002->80/tcp, 0.0.0.0:8003->8001/tcp + ---------------------------------------------------------------------------------------------------------------------------- + backend_backend-service_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp + backend_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8002->8000/tcp, 0.0.0.0:8003->8001/tcp **Step 3: Test Envoy's CORS capabilities** diff --git a/docs/root/start/sandboxes/csrf.rst b/docs/root/start/sandboxes/csrf.rst index 5b6defcae9e34..66268dd1e50ac 100644 --- a/docs/root/start/sandboxes/csrf.rst +++ b/docs/root/start/sandboxes/csrf.rst @@ -55,8 +55,8 @@ Terminal 1 (samesite) Name Command State Ports ---------------------------------------------------------------------------------------------------------------------- - samesite_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp - samesite_service_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp + samesite_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8001->8001/tcp + samesite_service_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp Terminal 2 (crosssite) @@ -69,8 +69,8 @@ Terminal 2 (crosssite) Name Command State Ports ---------------------------------------------------------------------------------------------------------------------- - crosssite_front-envoy_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 0.0.0.0:8002->80/tcp, 0.0.0.0:8003->8001/tcp - crosssite_service_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 80/tcp + crosssite_front-envoy_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 0.0.0.0:8002->8000/tcp, 0.0.0.0:8003->8001/tcp + crosssite_service_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 8000/tcp **Step 3: Test Envoy's CSRF capabilities** diff --git a/docs/root/start/sandboxes/ext_authz.rst b/docs/root/start/sandboxes/ext_authz.rst new file mode 100644 index 0000000000000..fd890c5562996 --- /dev/null +++ b/docs/root/start/sandboxes/ext_authz.rst @@ -0,0 +1,197 @@ +.. _install_sandboxes_ext_authz: + +External Authorization Filter +============================= + +The External Authorization sandbox demonstrates Envoy's :ref:`ext_authz filter ` +capability to delegate authorization of incoming requests through Envoy to an external services. + +While ext_authz can also be employed as a network filter, this sandbox is limited to exhibit +ext_authz HTTP Filter, which supports to call HTTP or gRPC service. + +The setup of this sandbox is very similar to front-proxy deployment, however calls to upstream +service behind the proxy will be checked by an external HTTP or gRPC service. In this sandbox, +for every authorized call, the external authorization service adds additional ``x-current-user`` +header entry to the original request headers to be forwarded to the upstream service. + +Running the Sandbox +~~~~~~~~~~~~~~~~~~~ + +**Step 1: Install Docker** + +Ensure that you have a recent versions of ``docker`` and ``docker-compose``. + +A simple way to achieve this is via the `Docker Desktop `_. + +**Step 2: Clone the Envoy repository and start all of our containers** + +If you have not cloned the Envoy repository, clone it with ``git clone git@github.com:envoyproxy/envoy`` +or ``git clone https://github.com/envoyproxy/envoy.git``. + +To build this sandbox example and start the example services, run the following commands:: + + $ pwd + envoy/examples/ext_authz + $ docker-compose pull + $ docker-compose up --build -d + $ docker-compose ps + + Name Command State Ports + --------------------------------------------------------------------------------------------------------------------------------------- + ext_authz_ext_authz-grpc-service_1 /app/server -users /etc/us Up + ext_authz_ext_authz-http-service_1 docker-entrypoint.sh node Up + ext_authz_front-envoy_1 /docker-entrypoint.sh /bin Up 10000/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8001->8001/tcp + ext_authz_upstream-service_1 python3 /app/service/server.py Up + +.. note:: + This sandbox has multiple setup controlled by ``FRONT_ENVOY_YAML`` environment variable which + points to the effective Envoy configuration to be used. The default value of ``FRONT_ENVOY_YAML`` + can be defined in the ``.env`` file or provided inline when running the ``docker-compose up`` + command. For more information, pease take a look at `environment variables in Compose documentation `_. + +By default, ``FRONT_ENVOY_YAML`` points to ``config/grpc-service/v3.yaml`` file which bootstraps +front-envoy with ext_authz HTTP filter with gRPC service ``V3`` (this is specified by :ref:`transport_api_version field`). +The possible values of ``FRONT_ENVOY_YAML`` can be found inside the ``envoy/examples/ext_authz/config`` +directory. + +For example, to run Envoy with ext_authz HTTP filter with HTTP service will be:: + + $ pwd + envoy/examples/ext_authz + $ docker-compose pull + $ # Tearing down the currently running setup + $ docker-compose down + $ FRONT_ENVOY_YAML=config/http-service.yaml docker-compose up --build -d + $ # Or you can update the .env file with the above FRONT_ENVOY_YAML value, so you don't have to specify it when running the "up" command. + +**Step 3: Access the upstream-service behind the Front Envoy** + +You can now try to send a request to upstream-service via the front-envoy as follows:: + + $ curl -v localhost:8000/service + * Trying 127.0.0.1... + * TCP_NODELAY set + * Connected to localhost (127.0.0.1) port 8000 (#0) + > GET /service HTTP/1.1 + > Host: localhost:8000 + > User-Agent: curl/7.58.0 + > Accept: */* + > + < HTTP/1.1 403 Forbidden + < date: Fri, 19 Jun 2020 15:02:24 GMT + < server: envoy + < content-length: 0 + +As observed, the request failed with ``403 Forbidden`` status code. This happened since the ext_authz +filter employed by Envoy rejected the call. To let the request reach the upstream service, you need +to provide a ``Bearer`` token via the ``Authorization`` header. + +.. note:: + A complete list of users is defined in ``envoy/examples/ext_authz/auth/users.json`` file. For + example, the ``token1`` used in the below example is corresponding to ``user1``. + +An example of successful requests can be observed as follows:: + + $ curl -v -H "Authorization: Bearer token1" localhost:8000/service + * Trying 127.0.0.1... + * TCP_NODELAY set + * Connected to localhost (127.0.0.1) port 8000 (#0) + > GET /service HTTP/1.1 + > Host: localhost:8000 + > User-Agent: curl/7.58.0 + > Accept: */* + > Authorization: Bearer token1 + > + < HTTP/1.1 200 OK + < content-type: text/html; charset=utf-8 + < content-length: 24 + < server: envoy + < date: Fri, 19 Jun 2020 15:04:29 GMT + < x-envoy-upstream-service-time: 2 + < + * Connection #0 to host localhost left intact + Hello user1 from behind Envoy! + +We can also employ `Open Policy Agent `_ server +(with `envoy_ext_authz_grpc `_ plugin enabled) +as the authorization server. To run this example:: + + $ pwd + envoy/examples/ext_authz + $ docker-compose pull + $ # Tearing down the currently running setup + $ docker-compose down + $ FRONT_ENVOY_YAML=config/opa-service/v2.yaml docker-compose up --build -d + +And sending a request to the upstream service (via the Front Envoy) gives:: + + $ curl localhost:8000/service --verbose + * Trying ::1... + * TCP_NODELAY set + * Connected to localhost (::1) port 8000 (#0) + > GET /service HTTP/1.1 + > Host: localhost:8000 + > User-Agent: curl/7.64.1 + > Accept: */* + > + < HTTP/1.1 200 OK + < content-type: text/html; charset=utf-8 + < content-length: 28 + < server: envoy + < date: Thu, 02 Jul 2020 06:29:58 GMT + < x-envoy-upstream-service-time: 2 + < + * Connection #0 to host localhost left intact + Hello OPA from behind Envoy! + +From the logs, we can observe the policy decision message from the Open Policy Agent server (for +the above request against the defined policy in ``config/opa-service/policy.rego``):: + + $ docker-compose logs ext_authz-opa-service | grep decision_id -A 30 + ext_authz-opa-service_1 | "decision_id": "8143ca68-42d8-43e6-ade6-d1169bf69110", + ext_authz-opa-service_1 | "input": { + ext_authz-opa-service_1 | "attributes": { + ext_authz-opa-service_1 | "destination": { + ext_authz-opa-service_1 | "address": { + ext_authz-opa-service_1 | "Address": { + ext_authz-opa-service_1 | "SocketAddress": { + ext_authz-opa-service_1 | "PortSpecifier": { + ext_authz-opa-service_1 | "PortValue": 8000 + ext_authz-opa-service_1 | }, + ext_authz-opa-service_1 | "address": "172.28.0.6" + ext_authz-opa-service_1 | } + ext_authz-opa-service_1 | } + ext_authz-opa-service_1 | } + ext_authz-opa-service_1 | }, + ext_authz-opa-service_1 | "metadata_context": {}, + ext_authz-opa-service_1 | "request": { + ext_authz-opa-service_1 | "http": { + ext_authz-opa-service_1 | "headers": { + ext_authz-opa-service_1 | ":authority": "localhost:8000", + ext_authz-opa-service_1 | ":method": "GET", + ext_authz-opa-service_1 | ":path": "/service", + ext_authz-opa-service_1 | "accept": "*/*", + ext_authz-opa-service_1 | "user-agent": "curl/7.64.1", + ext_authz-opa-service_1 | "x-forwarded-proto": "http", + ext_authz-opa-service_1 | "x-request-id": "b77919c0-f1d4-4b06-b444-5a8b32d5daf4" + ext_authz-opa-service_1 | }, + ext_authz-opa-service_1 | "host": "localhost:8000", + ext_authz-opa-service_1 | "id": "16617514055874272263", + ext_authz-opa-service_1 | "method": "GET", + ext_authz-opa-service_1 | "path": "/service", + +Trying to send a request with method other than ``GET`` gives a rejection:: + + $ curl -X POST localhost:8000/service --verbose + * Trying ::1... + * TCP_NODELAY set + * Connected to localhost (::1) port 8000 (#0) + > PUT /service HTTP/1.1 + > Host: localhost:8000 + > User-Agent: curl/7.64.1 + > Accept: */* + > + < HTTP/1.1 403 Forbidden + < date: Thu, 02 Jul 2020 06:46:13 GMT + < server: envoy + < content-length: 0 diff --git a/docs/root/start/sandboxes/fault_injection.rst b/docs/root/start/sandboxes/fault_injection.rst index 237c52c972865..a091c2ada258a 100644 --- a/docs/root/start/sandboxes/fault_injection.rst +++ b/docs/root/start/sandboxes/fault_injection.rst @@ -48,7 +48,7 @@ Terminal 2 $ docker-compose exec envoy bash $ bash send_request.sh -The script above (``send_request.sh``) sends a continuous stream of HTTP requests to Envoy, which in turn forwards the requests to the backend container. Fauilt injection is configured in Evoy but turned off (i.e. affects 0% of requests). Consequently, you should see a continuous sequence of HTTP 200 response codes. +The script above (``send_request.sh``) sends a continuous stream of HTTP requests to Envoy, which in turn forwards the requests to the backend container. Fauilt injection is configured in Envoy but turned off (i.e. affects 0% of requests). Consequently, you should see a continuous sequence of HTTP 200 response codes. **Step 4: Test Envoy's abort fault injection** diff --git a/docs/root/start/sandboxes/front_proxy.rst b/docs/root/start/sandboxes/front_proxy.rst index df191abb25186..41baf801a5090 100644 --- a/docs/root/start/sandboxes/front_proxy.rst +++ b/docs/root/start/sandboxes/front_proxy.rst @@ -3,26 +3,25 @@ Front Proxy =========== -To get a flavor of what Envoy has to offer as a front proxy, we are releasing a -`docker compose `_ sandbox that deploys a front -envoy and a couple of services (simple flask apps) colocated with a running -service Envoy. The three containers will be deployed inside a virtual network -called ``envoymesh``. +To get a flavor of what Envoy has to offer as a front proxy, we are releasing a `docker compose `_ +sandbox that deploys a front Envoy and a couple of services (simple Flask apps) colocated with a +running service Envoy. The three containers will be deployed inside a virtual network called +``envoymesh``. Below you can see a graphic showing the docker compose deployment: -.. image:: /_static/docker_compose_v0.1.svg +.. image:: /_static/docker_compose_front_proxy.svg :width: 100% All incoming requests are routed via the front Envoy, which is acting as a reverse proxy sitting on -the edge of the ``envoymesh`` network. Port ``80`` is mapped to port ``8000`` by docker compose -(see :repo:`/examples/front-proxy/docker-compose.yaml`). Moreover, notice -that all traffic routed by the front Envoy to the service containers is actually routed to the -service Envoys (routes setup in :repo:`/examples/front-proxy/front-envoy.yaml`). In turn the service -envoys route the request to the flask app via the loopback address (routes setup in -:repo:`/examples/front-proxy/service-envoy.yaml`). This setup -illustrates the advantage of running service Envoys collocated with your services: all requests are -handled by the service Envoy, and efficiently routed to your services. +the edge of the ``envoymesh`` network. Port ``8080``, ``8443``, and ``8001`` are exposed by docker +compose (see :repo:`/examples/front-proxy/docker-compose.yaml`) to handle ``HTTP``, ``HTTPS`` calls +to the services and requests to ``/admin`` respectively. Moreover, notice that all traffic routed +by the front Envoy to the service containers is actually routed to the service Envoys +(routes setup in :repo:`/examples/front-proxy/front-envoy.yaml`). In turn the service Envoys route +the request to the Flask app via the loopback address (routes setup in :repo:`/examples/front-proxy/service-envoy.yaml`). +This setup illustrates the advantage of running service Envoys collocated with your services: all +requests are handled by the service Envoy, and efficiently routed to your services. Running the Sandbox ~~~~~~~~~~~~~~~~~~~ @@ -43,98 +42,139 @@ or ``git clone https://github.com/envoyproxy/envoy.git``:: $ pwd envoy/examples/front-proxy - $ docker-compose pull - $ docker-compose up --build -d + $ docker-compose build --pull + $ docker-compose up -d $ docker-compose ps - Name Command State Ports - -------------------------------------------------------------------------------------------------------------------------- - front-proxy_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp - front-proxy_service1_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp - front-proxy_service2_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp + Name Command State Ports + ------------------------------------------------------------------------------------------------------------------------------------------------------ + front-proxy_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8080->8080/tcp, 0.0.0.0:8001->8001/tcp, 0.0.0.0:8443->8443/tcp + front-proxy_service1_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp + front-proxy_service2_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp **Step 3: Test Envoy's routing capabilities** -You can now send a request to both services via the front-envoy. +You can now send a request to both services via the ``front-envoy``. -For service1:: +For ``service1``:: - $ curl -v localhost:8000/service/1 - * Trying 192.168.99.100... - * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) + $ curl -v localhost:8080/service/1 + * Trying ::1... + * TCP_NODELAY set + * Connected to localhost (::1) port 8080 (#0) > GET /service/1 HTTP/1.1 - > Host: 192.168.99.100:8000 - > User-Agent: curl/7.54.0 + > Host: localhost:8080 + > User-Agent: curl/7.64.1 > Accept: */* > < HTTP/1.1 200 OK < content-type: text/html; charset=utf-8 - < content-length: 89 - < x-envoy-upstream-service-time: 1 + < content-length: 92 < server: envoy - < date: Fri, 26 Aug 2018 19:39:19 GMT + < date: Mon, 06 Jul 2020 06:20:00 GMT + < x-envoy-upstream-service-time: 2 < - Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6 - * Connection #0 to host 192.168.99.100 left intact + Hello from behind Envoy (service 1)! hostname: 36418bc3c824 resolvedhostname: 192.168.160.4 -For service2:: +For ``service2``:: - $ curl -v localhost:8000/service/2 - * Trying 192.168.99.100... - * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) + $ curl -v localhost:8080/service/2 + * Trying ::1... + * TCP_NODELAY set + * Connected to localhost (::1) port 8080 (#0) > GET /service/2 HTTP/1.1 - > Host: 192.168.99.100:8000 - > User-Agent: curl/7.54.0 + > Host: localhost:8080 + > User-Agent: curl/7.64.1 > Accept: */* > < HTTP/1.1 200 OK < content-type: text/html; charset=utf-8 - < content-length: 89 + < content-length: 92 + < server: envoy + < date: Mon, 06 Jul 2020 06:23:13 GMT < x-envoy-upstream-service-time: 2 + < + Hello from behind Envoy (service 2)! hostname: ea6165ee4fee resolvedhostname: 192.168.160.2 + +Notice that each request, while sent to the front Envoy, was correctly routed to the respective +application. + +We can also use ``HTTPS`` to call services behind the front Envoy. For example, calling ``service1``:: + + $ curl https://localhost:8443/service/1 -k -v + * Trying ::1... + * TCP_NODELAY set + * Connected to localhost (::1) port 8443 (#0) + * ALPN, offering h2 + * ALPN, offering http/1.1 + * successfully set certificate verify locations: + * CAfile: /etc/ssl/cert.pem + CApath: none + * TLSv1.2 (OUT), TLS handshake, Client hello (1): + * TLSv1.2 (IN), TLS handshake, Server hello (2): + * TLSv1.2 (IN), TLS handshake, Certificate (11): + * TLSv1.2 (IN), TLS handshake, Server key exchange (12): + * TLSv1.2 (IN), TLS handshake, Server finished (14): + * TLSv1.2 (OUT), TLS handshake, Client key exchange (16): + * TLSv1.2 (OUT), TLS change cipher, Change cipher spec (1): + * TLSv1.2 (OUT), TLS handshake, Finished (20): + * TLSv1.2 (IN), TLS change cipher, Change cipher spec (1): + * TLSv1.2 (IN), TLS handshake, Finished (20): + * SSL connection using TLSv1.2 / ECDHE-RSA-CHACHA20-POLY1305 + * ALPN, server did not agree to a protocol + * Server certificate: + * subject: CN=front-envoy + * start date: Jul 5 15:18:44 2020 GMT + * expire date: Jul 5 15:18:44 2021 GMT + * issuer: CN=front-envoy + * SSL certificate verify result: self signed certificate (18), continuing anyway. + > GET /service/1 HTTP/1.1 + > Host: localhost:8443 + > User-Agent: curl/7.64.1 + > Accept: */* + > + < HTTP/1.1 200 OK + < content-type: text/html; charset=utf-8 + < content-length: 92 < server: envoy - < date: Fri, 26 Aug 2018 19:39:23 GMT + < date: Mon, 06 Jul 2020 06:17:14 GMT + < x-envoy-upstream-service-time: 3 < - Hello from behind Envoy (service 2)! hostname: 92f4a3737bbc resolvedhostname: 172.19.0.2 - * Connection #0 to host 192.168.99.100 left intact - -Notice that each request, while sent to the front Envoy, was correctly routed -to the respective application. + Hello from behind Envoy (service 1)! hostname: 36418bc3c824 resolvedhostname: 192.168.160.4 **Step 4: Test Envoy's load balancing capabilities** -Now let's scale up our service1 nodes to demonstrate the load balancing abilities -of Envoy.:: +Now let's scale up our ``service1`` nodes to demonstrate the load balancing abilities of Envoy:: $ docker-compose scale service1=3 Creating and starting example_service1_2 ... done Creating and starting example_service1_3 ... done -Now if we send a request to service1 multiple times, the front Envoy will load balance the -requests by doing a round robin of the three service1 machines:: +Now if we send a request to ``service1`` multiple times, the front Envoy will load balance the +requests by doing a round robin of the three ``service1`` machines:: - $ curl -v localhost:8000/service/1 - * Trying 192.168.99.100... - * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) + $ curl -v localhost:8080/service/1 + * Trying ::1... + * TCP_NODELAY set + * Connected to localhost (::1) port 8080 (#0) > GET /service/1 HTTP/1.1 - > Host: 192.168.99.100:8000 - > User-Agent: curl/7.43.0 + > Host: localhost:8080 + > User-Agent: curl/7.64.1 > Accept: */* > < HTTP/1.1 200 OK < content-type: text/html; charset=utf-8 - < content-length: 89 - < x-envoy-upstream-service-time: 1 + < content-length: 92 < server: envoy - < date: Fri, 26 Aug 2018 19:40:21 GMT - < x-envoy-protocol-version: HTTP/1.1 + < date: Mon, 06 Jul 2020 06:21:47 GMT + < x-envoy-upstream-service-time: 6 < - Hello from behind Envoy (service 1)! hostname: 85ac151715c6 resolvedhostname: 172.19.0.3 - * Connection #0 to host 192.168.99.100 left intact - $ curl -v localhost:8000/service/1 + Hello from behind Envoy (service 1)! hostname: 3dc787578c23 resolvedhostname: 192.168.160.6 + $ curl -v localhost:8080/service/1 * Trying 192.168.99.100... - * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) + * Connected to 192.168.99.100 (192.168.99.100) port 8080 (#0) > GET /service/1 HTTP/1.1 - > Host: 192.168.99.100:8000 + > Host: 192.168.99.100:8080 > User-Agent: curl/7.54.0 > Accept: */* > @@ -145,13 +185,12 @@ requests by doing a round robin of the three service1 machines:: < server: envoy < date: Fri, 26 Aug 2018 19:40:22 GMT < - Hello from behind Envoy (service 1)! hostname: 20da22cfc955 resolvedhostname: 172.19.0.5 - * Connection #0 to host 192.168.99.100 left intact - $ curl -v localhost:8000/service/1 + Hello from behind Envoy (service 1)! hostname: 3a93ece62129 resolvedhostname: 192.168.160.5 + $ curl -v localhost:8080/service/1 * Trying 192.168.99.100... - * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) + * Connected to 192.168.99.100 (192.168.99.100) port 8080 (#0) > GET /service/1 HTTP/1.1 - > Host: 192.168.99.100:8000 + > Host: 192.168.99.100:8080 > User-Agent: curl/7.43.0 > Accept: */* > @@ -163,8 +202,7 @@ requests by doing a round robin of the three service1 machines:: < date: Fri, 26 Aug 2018 19:40:24 GMT < x-envoy-protocol-version: HTTP/1.1 < - Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6 - * Connection #0 to host 192.168.99.100 left intact + Hello from behind Envoy (service 1)! hostname: 36418bc3c824 resolvedhostname: 192.168.160.4 **Step 5: enter containers and curl services** @@ -174,13 +212,13 @@ can use ``docker-compose exec /bin/bash``. For example we can enter the ``front-envoy`` container, and ``curl`` for services locally:: $ docker-compose exec front-envoy /bin/bash - root@81288499f9d7:/# curl localhost:80/service/1 + root@81288499f9d7:/# curl localhost:8080/service/1 Hello from behind Envoy (service 1)! hostname: 85ac151715c6 resolvedhostname: 172.19.0.3 - root@81288499f9d7:/# curl localhost:80/service/1 + root@81288499f9d7:/# curl localhost:8080/service/1 Hello from behind Envoy (service 1)! hostname: 20da22cfc955 resolvedhostname: 172.19.0.5 - root@81288499f9d7:/# curl localhost:80/service/1 + root@81288499f9d7:/# curl localhost:8080/service/1 Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6 - root@81288499f9d7:/# curl localhost:80/service/2 + root@81288499f9d7:/# curl localhost:8080/service/2 Hello from behind Envoy (service 2)! hostname: 92f4a3737bbc resolvedhostname: 172.19.0.2 **Step 6: enter containers and curl admin** @@ -188,8 +226,8 @@ enter the ``front-envoy`` container, and ``curl`` for services locally:: When Envoy runs it also attaches an ``admin`` to your desired port. In the example configs the admin is bound to port ``8001``. We can ``curl`` it to gain useful information. For example you can ``curl`` ``/server_info`` to get information about the -envoy version you are running. Additionally you can ``curl`` ``/stats`` to get -statistics. For example inside ``frontenvoy`` we can get:: +Envoy version you are running. Additionally you can ``curl`` ``/stats`` to get +statistics. For example inside ``front-envoy`` we can get:: $ docker-compose exec front-envoy /bin/bash root@e654c2c83277:/# curl localhost:8001/server_info @@ -197,37 +235,47 @@ statistics. For example inside ``frontenvoy`` we can get:: .. code-block:: json { - "version": "3ba949a9cb5b0b1cccd61e76159969a49377fd7d/1.10.0-dev/Clean/RELEASE/BoringSSL", + "version": "093e2ffe046313242144d0431f1bb5cf18d82544/1.15.0-dev/Clean/RELEASE/BoringSSL", "state": "LIVE", + "hot_restart_version": "11.104", "command_line_options": { "base_id": "0", - "concurrency": 4, + "use_dynamic_base_id": false, + "base_id_path": "", + "concurrency": 8, "config_path": "/etc/front-envoy.yaml", "config_yaml": "", "allow_unknown_static_fields": false, + "reject_unknown_dynamic_fields": false, + "ignore_unknown_dynamic_fields": false, "admin_address_path": "", "local_address_ip_version": "v4", "log_level": "info", "component_log_level": "", - "log_format": "[%Y-%m-%d %T.%e][%t][%l][%n] %v", + "log_format": "[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v", + "log_format_escaped": false, "log_path": "", - "hot_restart_version": false, "service_cluster": "front-proxy", "service_node": "", "service_zone": "", + "drain_strategy": "Gradual", "mode": "Serve", "disable_hot_restart": false, "enable_mutex_tracing": false, "restart_epoch": 0, "cpuset_threads": false, + "disabled_extensions": [], + "bootstrap_version": 0, + "hidden_envoy_deprecated_max_stats": "0", + "hidden_envoy_deprecated_max_obj_name_len": "0", "file_flush_interval": "10s", "drain_time": "600s", "parent_shutdown_time": "900s" }, - "uptime_current_epoch": "401s", - "uptime_all_epochs": "401s" + "uptime_current_epoch": "188s", + "uptime_all_epochs": "188s" } - + .. code-block:: text root@e654c2c83277:/# curl localhost:8001/stats @@ -250,6 +298,5 @@ statistics. For example inside ``frontenvoy`` we can get:: cluster.service2.upstream_rq_total: 2 ... -Notice that we can get the number of members of upstream clusters, number of requests -fulfilled by them, information about http ingress, and a plethora of other useful -stats. +Notice that we can get the number of members of upstream clusters, number of requests fulfilled by +them, information about http ingress, and a plethora of other useful stats. diff --git a/docs/root/start/sandboxes/grpc_bridge.rst b/docs/root/start/sandboxes/grpc_bridge.rst index 3382a075bf537..aa61e60742699 100644 --- a/docs/root/start/sandboxes/grpc_bridge.rst +++ b/docs/root/start/sandboxes/grpc_bridge.rst @@ -32,7 +32,7 @@ Docker compose ~~~~~~~~~~~~~~ To run the docker compose file, and set up both the Python and the gRPC containers -run: +run:: $ pwd envoy/examples/grpc-bridge diff --git a/docs/root/start/sandboxes/jaeger_native_tracing.rst b/docs/root/start/sandboxes/jaeger_native_tracing.rst index 07193e03f9740..5c41560d96c41 100644 --- a/docs/root/start/sandboxes/jaeger_native_tracing.rst +++ b/docs/root/start/sandboxes/jaeger_native_tracing.rst @@ -21,7 +21,7 @@ The three containers will be deployed inside a virtual network called ``envoymes only works on x86-64). All incoming requests are routed via the front Envoy, which is acting as a reverse proxy -sitting on the edge of the ``envoymesh`` network. Port ``80`` is mapped to port ``8000`` +sitting on the edge of the ``envoymesh`` network. Port ``8000`` is exposed by docker compose (see :repo:`/examples/jaeger-native-tracing/docker-compose.yaml`). Notice that all Envoys are configured to collect request traces (e.g., http_connection_manager/config/tracing setup in :repo:`/examples/jaeger-native-tracing/front-envoy-jaeger.yaml`) and setup to propagate the spans generated @@ -59,10 +59,10 @@ To build this sandbox example, and start the example apps run the following comm Name Command State Ports ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - jaeger-native-tracing_front-envoy_1 /start-front.sh Up 10000/tcp, 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp + jaeger-native-tracing_front-envoy_1 /start-front.sh Up 10000/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8001->8001/tcp jaeger-native-tracing_jaeger_1 /go/bin/all-in-one-linux - ... Up 14250/tcp, 14268/tcp, 0.0.0.0:16686->16686/tcp, 5775/udp, 5778/tcp, 6831/udp, 6832/udp, 0.0.0.0:9411->9411/tcp - jaeger-native-tracing_service1_1 /start-service.sh Up 10000/tcp, 80/tcp - jaeger-native-tracing_service2_1 /start-service.sh Up 10000/tcp, 80/tcp + jaeger-native-tracing_service1_1 /start-service.sh Up 10000/tcp, 8000/tcp + jaeger-native-tracing_service2_1 /start-service.sh Up 10000/tcp, 8000/tcp **Step 2: Generate some load** diff --git a/docs/root/start/sandboxes/jaeger_tracing.rst b/docs/root/start/sandboxes/jaeger_tracing.rst index bad25e5bd26fc..ce73e6679ddb0 100644 --- a/docs/root/start/sandboxes/jaeger_tracing.rst +++ b/docs/root/start/sandboxes/jaeger_tracing.rst @@ -10,7 +10,7 @@ service1 makes an API call to service2 before returning a response. The three containers will be deployed inside a virtual network called ``envoymesh``. All incoming requests are routed via the front Envoy, which is acting as a reverse proxy -sitting on the edge of the ``envoymesh`` network. Port ``80`` is mapped to port ``8000`` +sitting on the edge of the ``envoymesh`` network. Port ``8000`` is exposed by docker compose (see :repo:`/examples/jaeger-tracing/docker-compose.yaml`). Notice that all Envoys are configured to collect request traces (e.g., http_connection_manager/config/tracing setup in :repo:`/examples/jaeger-tracing/front-envoy-jaeger.yaml`) and setup to propagate the spans generated @@ -48,10 +48,10 @@ To build this sandbox example, and start the example apps run the following comm Name Command State Ports ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ - jaeger-tracing_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp + jaeger-tracing_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8001->8001/tcp jaeger-tracing_jaeger_1 /go/bin/all-in-one-linux - ... Up 14250/tcp, 14268/tcp, 0.0.0.0:16686->16686/tcp, 5775/udp, 5778/tcp, 6831/udp, 6832/udp, 0.0.0.0:9411->9411/tcp - jaeger-tracing_service1_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp - jaeger-tracing_service2_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp + jaeger-tracing_service1_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp + jaeger-tracing_service2_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp **Step 2: Generate some load** diff --git a/docs/root/start/sandboxes/lua.rst b/docs/root/start/sandboxes/lua.rst index 874711617aa1a..3a9b5c75cf91f 100644 --- a/docs/root/start/sandboxes/lua.rst +++ b/docs/root/start/sandboxes/lua.rst @@ -37,7 +37,7 @@ Terminal 1 Name Command State Ports -------------------------------------------------------------------------------------------------------------------- - lua_proxy_1 /docker-entrypoint.sh /bin Up 10000/tcp, 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp + lua_proxy_1 /docker-entrypoint.sh /bin Up 10000/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8001->8001/tcp lua_web_service_1 node ./index.js Up 0.0.0.0:8080->80/tcp **Step 3: Send a request to the service** diff --git a/docs/root/start/sandboxes/zipkin_tracing.rst b/docs/root/start/sandboxes/zipkin_tracing.rst index c64ce82e9f39a..649e78bffacd5 100644 --- a/docs/root/start/sandboxes/zipkin_tracing.rst +++ b/docs/root/start/sandboxes/zipkin_tracing.rst @@ -10,7 +10,7 @@ service1 makes an API call to service2 before returning a response. The three containers will be deployed inside a virtual network called ``envoymesh``. All incoming requests are routed via the front Envoy, which is acting as a reverse proxy -sitting on the edge of the ``envoymesh`` network. Port ``80`` is mapped to port ``8000`` +sitting on the edge of the ``envoymesh`` network. Port ``8000`` is exposed by docker compose (see :repo:`/examples/zipkin-tracing/docker-compose.yaml`). Notice that all Envoys are configured to collect request traces (e.g., http_connection_manager/config/tracing setup in :repo:`/examples/zipkin-tracing/front-envoy-zipkin.yaml`) and setup to propagate the spans generated @@ -48,9 +48,9 @@ To build this sandbox example, and start the example apps run the following comm Name Command State Ports ----------------------------------------------------------------------------------------------------------------------------- - zipkin-tracing_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp - zipkin-tracing_service1_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp - zipkin-tracing_service2_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp + zipkin-tracing_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8001->8001/tcp + zipkin-tracing_service1_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp + zipkin-tracing_service2_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp zipkin-tracing_zipkin_1 /busybox/sh run.sh Up 9410/tcp, 0.0.0.0:9411->9411/tcp **Step 2: Generate some load** diff --git a/docs/root/start/start.rst b/docs/root/start/start.rst index a8aa1d24139cf..2d3e81951aff6 100644 --- a/docs/root/start/start.rst +++ b/docs/root/start/start.rst @@ -8,7 +8,7 @@ This section gets you started with a very simple configuration and provides some The fastest way to get started using Envoy is :ref:`installing pre-built binaries `. You can also :ref:`build it ` from source. -These examples use the :ref:`v2 Envoy API `, but use only the static configuration +These examples use the :ref:`v3 Envoy API `, but use only the static configuration feature of the API, which is most useful for simple requirements. For more complex requirements :ref:`Dynamic Configuration ` is supported. @@ -38,9 +38,9 @@ Simple Configuration Envoy can be configured using a single YAML file passed in as an argument on the command line. -The :ref:`admin message ` is required to configure +The :ref:`admin message ` is required to configure the administration server. The `address` key specifies the -listening :ref:`address ` +listening :ref:`address ` which in this case is simply `0.0.0.0:9901`. .. code-block:: yaml @@ -50,7 +50,7 @@ which in this case is simply `0.0.0.0:9901`. address: socket_address: { address: 0.0.0.0, port_value: 9901 } -The :ref:`static_resources ` contains everything that is configured statically when Envoy starts, +The :ref:`static_resources ` contains everything that is configured statically when Envoy starts, as opposed to the means of configuring resources dynamically when Envoy is running. The :ref:`v2 API Overview ` describes this. @@ -58,7 +58,7 @@ The :ref:`v2 API Overview ` describes this. static_resources: -The specification of the :ref:`listeners `. +The specification of the :ref:`listeners `. .. code-block:: yaml @@ -70,7 +70,7 @@ The specification of the :ref:`listeners `. +The specification of the :ref:`clusters `. .. code-block:: yaml @@ -107,7 +107,7 @@ The specification of the :ref:`clusters ` transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext sni: www.google.com @@ -148,6 +148,51 @@ by using a volume. volumes: - ./envoy.yaml:/etc/envoy/envoy.yaml +By default the Docker image will run as the ``envoy`` user created at build time. + +The ``uid`` and ``gid`` of this user can be set at runtime using the ``ENVOY_UID`` and ``ENVOY_GID`` +environment variables. This can be done, for example, on the Docker command line: + + $ docker run -d --name envoy -e ENVOY_UID=777 -e ENVOY_GID=777 -p 9901:9901 -p 10000:10000 envoy:v1 + +This can be useful if you wish to restrict or provide access to ``unix`` sockets inside the container, or +for controlling access to an ``envoy`` socket from outside of the container. + +If you wish to run the container as the ``root`` user you can set ``ENVOY_UID`` to ``0``. + +The ``envoy`` image sends application logs to ``/dev/stdout`` and ``/dev/stderr`` by default, and these +can be viewed in the container log. + +If you send application, admin or access logs to a file output, the ``envoy`` user will require the +necessary permissions to write to this file. This can be achieved by setting the ``ENVOY_UID`` and/or +by making the file writeable by the envoy user. + +For example, to mount a log folder from the host and make it writable, you can: + +.. substitution-code-block:: none + + $ mkdir logs + $ chown 777 logs + $ docker run -d -v `pwd`/logs:/var/log --name envoy -e ENVOY_UID=777 -p 9901:9901 -p 10000:10000 envoy:v1 + +You can then configure ``envoy`` to log to files in ``/var/log`` + +The default ``envoy`` ``uid`` and ``gid`` are ``101``. + +The ``envoy`` user also needs to have permission to access any required configuration files mounted +into the container. + +If you are running in an environment with a strict ``umask`` setting, you may need to provide envoy with +access either by setting the ``uid`` or ``gid`` of the file, or by making the configuration file readable +by the envoy user. + +One method of doing this without changing any file permissions or running as root inside the container +is to start the container with the host user's ``uid``, for example: + +.. substitution-code-block:: none + + $ docker run -d --name envoy -e ENVOY_UID=`id -u` -p 9901:9901 -p 10000:10000 envoy:v1 + Sandboxes --------- @@ -162,6 +207,7 @@ features. The following sandboxes are available: sandboxes/cors sandboxes/csrf + sandboxes/ext_authz sandboxes/fault_injection sandboxes/front_proxy sandboxes/grpc_bridge diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 7cb8b8f7bdfa8..4a6ac04a2576d 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -1,30 +1,81 @@ -1.15.0 (Pending) +1.16.0 (Pending) ================ -Changes -------- - -* access loggers: added GRPC_STATUS operator on logging format. -* access loggers: extened specifier for FilterStateFormatter to output :ref:`unstructured log string `. -* dynamic forward proxy: added :ref:`SNI based dynamic forward proxy ` support. -* fault: added support for controlling the percentage of requests that abort, delay and response rate limits faults - are applied to using :ref:`HTTP headers ` to the HTTP fault filter. -* filter: add `upstram_rq_time` stats to the GPRC stats filter. - Disabled by default and can be enabled via :ref:`enable_upstream_stats `. -* grpc-json: added support for streaming response using - `google.api.HttpBody `_. -* http: fixed a bug where the upgrade header was not cleared on responses to non-upgrade requests. - Can be reverted temporarily by setting runtime feature `envoy.reloadable_features.fix_upgrade_response` to false. -* logger: added :ref:`--log-format-prefix-with-location ` command line option to prefix '%v' with file path and line number. -* network filters: added a :ref:`postgres proxy filter `. -* router: allow retries of streaming or incomplete requests. This removes stat `rq_retry_skipped_request_not_complete`. -* tracing: tracing configuration has been made fully dynamic and every HTTP connection manager - can now have a separate :ref:`tracing provider `. -* upstream: fixed a bug where Envoy would panic when receiving a GRPC SERVICE_UNKNOWN status on the health check. +Incompatible Behavior Changes +----------------------------- +*Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required* + +* build: added visibility rules for upstream. If these cause visibility related breakage, see notes in //BUILD. + +Minor Behavior Changes +---------------------- +*Changes that may cause incompatibilities for some users, but should not for most* + +* compressor: always insert `Vary` headers for compressible resources even if it's decided not to compress a response due to incompatible `Accept-Encoding` value. The `Vary` header needs to be inserted to let a caching proxy in front of Envoy know that the requested resource still can be served with compression applied. +* decompressor: headers-only requests were incorrectly not advertising accept-encoding when configured to do so. This is now fixed. +* http: added :ref:`headers_to_add ` to :ref:`local reply mapper ` to allow its users to add/append/override response HTTP headers to local replies. +* http: added HCM level configuration of :ref:`error handling on invalid messaging ` which substantially changes Envoy's behavior when encountering invalid HTTP/1.1 defaulting to closing the connection instead of allowing reuse. This can temporarily be reverted by setting `envoy.reloadable_features.hcm_stream_error_on_invalid_message` to false, or permanently reverted by setting the :ref:`HCM option ` to true to restore prior HTTP/1.1 beavior and setting the *new* HTTP/2 configuration :ref:`override_stream_error_on_invalid_http_message ` to false to retain prior HTTP/2 behavior. +* http: changed Envoy to send error headers and body when possible. This behavior may be temporarily reverted by setting `envoy.reloadable_features.allow_response_for_timeout` to false. +* http: changed empty trailers encoding behavior by sending empty data with ``end_stream`` true (instead of sending empty trailers) for HTTP/2. This behavior can be reverted temporarily by setting runtime feature ``envoy.reloadable_features.http2_skip_encoding_empty_trailers`` to false. +* http: clarified and enforced 1xx handling. Multiple 100-continue headers are coalesced when proxying. 1xx headers other than {100, 101} are dropped. +* http: fixed the 100-continue response path to properly handle upstream failure by sending 5xx responses. This behavior can be temporarily reverted by setting `envoy.reloadable_features.allow_500_after_100` to false. +* http: the per-stream FilterState maintained by the HTTP connection manager will now provide read/write access to the downstream connection FilterState. As such, code that relies on interacting with this might + see a change in behavior. +* logging: change default log format to `"[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v"` and default value of :option:`--log-format-prefix-with-location` to `0`. +* logging: nghttp2 log messages no longer appear at trace level unless `ENVOY_NGHTTP2_TRACE` is set + in the environment. +* router: added transport failure reason to response body when upstream reset happens. After this change, the response body will be of the form `upstream connect error or disconnect/reset before headers. reset reason:{}, transport failure reason:{}`.This behavior may be reverted by setting runtime feature `envoy.reloadable_features.http_transport_failure_reason_in_body` to false. +* router: now consumes all retry related headers to prevent them from being propagated to the upstream. This behavior may be reverted by setting runtime feature `envoy.reloadable_features.consume_all_retry_headers` to false. +* thrift_proxy: special characters {'\0', '\r', '\n'} will be stripped from thrift headers. + +Bug Fixes +--------- +*Changes expected to improve the state of the world and are unlikely to have negative effects* + +* csrf: fixed issues with regards to origin and host header parsing. +* dynamic_forward_proxy: only perform DNS lookups for routes to Dynamic Forward Proxy clusters since other cluster types handle DNS lookup themselves. +* fault: fixed an issue with `active_faults` gauge not being decremented for when abort faults were injected. +* grpc-web: fixed an issue with failing HTTP/2 requests on some browsers. Notably, WebKit-based browsers (https://bugs.webkit.org/show_bug.cgi?id=210108), Internet Explorer 11, and Edge (pre-Chromium). +* rocketmq_proxy network-level filter: fixed an issue involving incorrect header lengths. In debug mode it causes crash and in release mode it causes underflow. + +Removed Config or Runtime +------------------------- +*Normally occurs at the end of the* :ref:`deprecation period ` + +* http: removed legacy header sanitization and the runtime guard `envoy.reloadable_features.strict_header_validation`. +* http: removed legacy transfer-encoding enforcement and runtime guard `envoy.reloadable_features.reject_unsupported_transfer_encodings`. +* http: removed configurable strict host validation and runtime guard `envoy.reloadable_features.strict_authority_validation`. + +New Features +------------ +* access log: added a :ref:`dynamic metadata filter` for access logs, which filters whether to log based on matching dynamic metadata. +* access log: added support for :ref:`%DOWNSTREAM_PEER_FINGERPRINT_1% ` as a response flag. +* build: enable building envoy :ref:`arm64 images ` by buildx tool in x86 CI platform. +* dynamic_forward_proxy: added :ref:`use_tcp_for_dns_lookups` option to use TCP for DNS lookups in order to match the DNS options for :ref:`Clusters`. +* ext_authz filter: added support for emitting dynamic metadata for both :ref:`HTTP ` and :ref:`network ` filters. +* grpc-json: support specifying `response_body` field in for `google.api.HttpBody` message. +* http: added support for :ref:`%DOWNSTREAM_PEER_FINGERPRINT_1% ` as custom header. +* http: introduced new HTTP/1 and HTTP/2 codec implementations that will remove the use of exceptions for control flow due to high risk factors and instead use error statuses. The old behavior is used by default, but the new codecs can be enabled for testing by setting the runtime feature `envoy.reloadable_features.new_codec_behavior` to true. The new codecs will be in development for one month, and then enabled by default while the old codecs are deprecated. +* load balancer: added a :ref:`configuration` option to specify the active request bias used by the least request load balancer. +* lua: added Lua APIs to access :ref:`SSL connection info ` object. +* postgres network filter: :ref:`metadata ` is produced based on SQL query. +* ratelimit: added :ref:`enable_x_ratelimit_headers ` option to enable `X-RateLimit-*` headers as defined in `draft RFC `_. +* rbac filter: added a log action to the :ref:`RBAC filter ` which sets dynamic metadata to inform access loggers whether to log. +* router: added new + :ref:`envoy-ratelimited` + retry policy, which allows retrying envoy's own rate limited responses. +* signal: added support for calling fatal error handlers without envoy's signal handler, via FatalErrorHandler::callFatalErrorHandlers(). +* stats: added optional histograms to :ref:`cluster stats ` + that track headers and body sizes of requests and responses. +* stats: allow configuring histogram buckets for stats sinks and admin endpoints that support it. +* tap: added :ref:`generic body matcher` to scan http requests and responses for text or hex patterns. +* tcp: switched the TCP connection pool to the new "shared" connection pool, sharing a common code base with HTTP and HTTP/2. Any unexpected behavioral changes can be temporarily reverted by setting `envoy.reloadable_features.new_tcp_connection_pool` to false. +* watchdog: support randomizing the watchdog's kill timeout to prevent synchronized kills via a maximium jitter parameter :ref:`max_kill_timeout_jitter`. +* xds: added :ref:`extension config discovery` support for HTTP filters. Deprecated ---------- - -* Tracing provider configuration as part of :ref:`bootstrap config ` - has been deprecated in favor of configuration as part of :ref:`HTTP connection manager - `. +* The :ref:`track_timeout_budgets ` + field has been deprecated in favor of `timeout_budgets` part of an :ref:`Optional Configuration `. +* tap: the :ref:`match_config ` field has been deprecated in favor of + :ref:`match ` field. diff --git a/docs/root/version_history/v1.11.0.rst b/docs/root/version_history/v1.11.0.rst index 78f761f99671c..1bc4051b7da4a 100644 --- a/docs/root/version_history/v1.11.0.rst +++ b/docs/root/version_history/v1.11.0.rst @@ -19,7 +19,7 @@ Changes * build: releases are built with Clang and linked with LLD. * config: added :ref:stats_server_version_override` ` in bootstrap, that can be used to override :ref:`server.version statistic `. * control-plane: management servers can respond with HTTP 304 to indicate that config is up to date for Envoy proxies polling a :ref:`REST API Config Type ` -* csrf: added support for whitelisting additional source origins. +* csrf: added support for allowlisting additional source origins. * dns: added support for getting DNS record TTL which is used by STRICT_DNS/LOGICAL_DNS cluster as DNS refresh rate. * dubbo_proxy: support the :ref:`dubbo proxy filter `. * dynamo_request_parser: adding support for transactions. Adds check for new types of dynamodb operations (TransactWriteItems, TransactGetItems) and awareness for new types of dynamodb errors (IdempotentParameterMismatchException, TransactionCanceledException, TransactionInProgressException). diff --git a/docs/root/version_history/v1.12.0.rst b/docs/root/version_history/v1.12.0.rst index da2930e8e479a..9bc7510639ec6 100644 --- a/docs/root/version_history/v1.12.0.rst +++ b/docs/root/version_history/v1.12.0.rst @@ -8,7 +8,7 @@ Changes * access log: added :ref:`buffering ` and :ref:`periodical flushing ` support to gRPC access logger. Defaults to 16KB buffer and flushing every 1 second. * access log: added DOWNSTREAM_DIRECT_REMOTE_ADDRESS and DOWNSTREAM_DIRECT_REMOTE_ADDRESS_WITHOUT_PORT :ref:`access log formatters ` and gRPC access logger. * access log: gRPC Access Log Service (ALS) support added for :ref:`TCP access logs `. -* access log: reintroduced :ref:`filesystem ` stats and added the `write_failed` counter to track failed log writes. +* access log: reintroduced :ref:`filesystem ` stats and added the `write_failed` counter to track failed log writes. * admin: added ability to configure listener :ref:`socket options `. * admin: added config dump support for Secret Discovery Service :ref:`SecretConfigDump `. * admin: added support for :ref:`draining ` listeners via admin interface. diff --git a/docs/root/version_history/v1.12.4.rst b/docs/root/version_history/v1.12.4.rst new file mode 100644 index 0000000000000..1635bbb5f0000 --- /dev/null +++ b/docs/root/version_history/v1.12.4.rst @@ -0,0 +1,8 @@ +1.12.4 (June 8, 2020) +===================== + +Changes +------- + +* http: added :ref:`headers_with_underscores_action setting ` to control how client requests with header names containing underscore characters are handled. The options are to allow such headers, reject request or drop headers. The default is to allow headers, preserving existing behavior. +* http: fixed CVE-2020-11080 by rejecting HTTP/2 SETTINGS frames with too many parameters. diff --git a/docs/root/version_history/v1.12.5.rst b/docs/root/version_history/v1.12.5.rst new file mode 100644 index 0000000000000..b246e20d885b6 --- /dev/null +++ b/docs/root/version_history/v1.12.5.rst @@ -0,0 +1,11 @@ +1.12.5 (June 30, 2020) +====================== + +Changes +------- +* buffer: fixed CVE-2020-12603 by avoiding fragmentation, and tracking of HTTP/2 data and control frames in the output buffer. +* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout ` + to also defend against an HTTP/2 peer that does not open stream window once an entire response has been buffered to be sent to a downstream client. +* http: fixed CVE-2020-12605 by including request URL in request header size computation, and rejecting partial headers that exceed configured limits. +* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits ` on active/accepted connections. +* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits ` on active/accepted connections. diff --git a/docs/root/version_history/v1.13.2.rst b/docs/root/version_history/v1.13.2.rst new file mode 100644 index 0000000000000..641bbaa451d46 --- /dev/null +++ b/docs/root/version_history/v1.13.2.rst @@ -0,0 +1,8 @@ +1.13.2 (June 8, 2020) +===================== + +Changes +------- + +* http: added :ref:`headers_with_underscores_action setting ` to control how client requests with header names containing underscore characters are handled. The options are to allow such headers, reject request or drop headers. The default is to allow headers, preserving existing behavior. +* http: fixed CVE-2020-11080 by rejecting HTTP/2 SETTINGS frames with too many parameters. diff --git a/docs/root/version_history/v1.13.3.rst b/docs/root/version_history/v1.13.3.rst new file mode 100644 index 0000000000000..6002a62c496b0 --- /dev/null +++ b/docs/root/version_history/v1.13.3.rst @@ -0,0 +1,12 @@ +1.13.3 (June 30, 2020) +====================== + +Changes +------- + +* buffer: fixed CVE-2020-12603 by avoiding fragmentation, and tracking of HTTP/2 data and control frames in the output buffer. +* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout ` + to also defend against an HTTP/2 peer that does not open stream window once an entire response has been buffered to be sent to a downstream client. +* http: fixed CVE-2020-12605 by including request URL in request header size computation, and rejecting partial headers that exceed configured limits. +* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits ` on active/accepted connections. +* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits ` on active/accepted connections. diff --git a/docs/root/version_history/v1.14.2.rst b/docs/root/version_history/v1.14.2.rst new file mode 100644 index 0000000000000..c20f93650dcaa --- /dev/null +++ b/docs/root/version_history/v1.14.2.rst @@ -0,0 +1,14 @@ +1.14.2 (June 8, 2020) +===================== + +Changes +------- + +* http: fixed CVE-2020-11080 by rejecting HTTP/2 SETTINGS frames with too many parameters. +* http: the :ref:`stream_idle_timeout ` + now also defends against an HTTP/2 peer that does not open stream window once an entire response + has been buffered to be sent to a downstream client. +* listener: Add runtime support for `per-listener limits ` on + active/accepted connections. +* overload management: Add runtime support for :ref:`global limits ` + on active/accepted connections. diff --git a/docs/root/version_history/v1.14.3.rst b/docs/root/version_history/v1.14.3.rst new file mode 100644 index 0000000000000..8a3a3d91da089 --- /dev/null +++ b/docs/root/version_history/v1.14.3.rst @@ -0,0 +1,11 @@ +1.14.3 (June 30, 2020) +====================== + +Changes +------- +* buffer: fixed CVE-2020-12603 by avoiding fragmentation, and tracking of HTTP/2 data and control frames in the output buffer. +* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout ` + to also defend against an HTTP/2 peer that does not open stream window once an entire response has been buffered to be sent to a downstream client. +* http: fixed CVE-2020-12605 by including request URL in request header size computation, and rejecting partial headers that exceed configured limits. +* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits ` on active/accepted connections. +* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits ` on active/accepted connections. diff --git a/docs/root/version_history/v1.15.0.rst b/docs/root/version_history/v1.15.0.rst new file mode 100644 index 0000000000000..eb214a33cc404 --- /dev/null +++ b/docs/root/version_history/v1.15.0.rst @@ -0,0 +1,166 @@ +1.15.0 (July 7, 2020) +===================== + + +Incompatible Behavior Changes +----------------------------- +*Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required* + +* build: official released binary is now built on Ubuntu 18.04, requires glibc >= 2.27. +* client_ssl_auth: the `auth_ip_white_list` stat has been renamed to + :ref:`auth_ip_allowlist `. +* header to metadata: on_header_missing rules with empty values are now rejected (they were skipped before). +* router: path_redirect now keeps query string by default. This behavior may be reverted by setting runtime feature `envoy.reloadable_features.preserve_query_string_in_path_redirects` to false. +* tls: fixed a bug where wilcard matching for "\*.foo.com" also matched domains of the form "a.b.foo.com". This behavior can be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_wildcard_matching` to false. + +Minor Behavior Changes +---------------------- +*Changes that may cause incompatibilities for some users, but should not for most* + +* access loggers: applied existing buffer limits to access logs, as well as :ref:`stats ` for logged / dropped logs. This can be reverted temporarily by setting runtime feature `envoy.reloadable_features.disallow_unbounded_access_logs` to false. +* build: runs as non-root inside Docker containers. Existing behaviour can be restored by setting the environment variable `ENVOY_UID` to `0`. `ENVOY_UID` and `ENVOY_GID` can be used to set the envoy user's `uid` and `gid` respectively. +* health check: in the health check filter the :ref:`percentage of healthy servers in upstream clusters ` is now interpreted as an integer. +* hot restart: added the option :option:`--use-dynamic-base-id` to select an unused base ID at startup and the option :option:`--base-id-path` to write the base id to a file (for reuse with later hot restarts). +* http: changed early error path for HTTP/1.1 so that responses consistently flow through the http connection manager, and the http filter chains. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.early_errors_via_hcm` to false. +* http: fixed several bugs with applying correct connection close behavior across the http connection manager, health checker, and connection pool. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_connection_close` to false. +* http: fixed a bug where the upgrade header was not cleared on responses to non-upgrade requests. + Can be reverted temporarily by setting runtime feature `envoy.reloadable_features.fix_upgrade_response` to false. +* http: stopped overwriting `date` response headers. Responses without a `date` header will still have the header properly set. This behavior can be temporarily reverted by setting `envoy.reloadable_features.preserve_upstream_date` to false. +* http: stopped adding a synthetic path to CONNECT requests, meaning unconfigured CONNECT requests will now return 404 instead of 403. This behavior can be temporarily reverted by setting `envoy.reloadable_features.stop_faking_paths` to false. +* http: stopped allowing upstream 1xx or 204 responses with Transfer-Encoding or non-zero Content-Length headers. Content-Length of 0 is allowed, but stripped. This behavior can be temporarily reverted by setting `envoy.reloadable_features.strict_1xx_and_204_response_headers` to false. +* http: upstream connections will now automatically set ALPN when this value is not explicitly set elsewhere (e.g. on the upstream TLS config). This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.http_default_alpn` to false. +* listener: fixed a bug where when a static listener fails to be added to a worker, the listener was not removed from the active listener list. +* router: extended to allow retries of streaming or incomplete requests. This removes stat `rq_retry_skipped_request_not_complete`. +* router: extended to allow retries by default when upstream responds with :ref:`x-envoy-overloaded `. + +Bug Fixes +--------- +*Changes expected to improve the state of the world and are unlikely to have negative effects* + +* adaptive concurrency: fixed a minRTT calculation bug where requests started before the concurrency + limit was pinned to the minimum would skew the new minRTT value if the replies arrived after the + start of the new minRTT window. +* buffer: fixed CVE-2020-12603 by avoiding fragmentation, and tracking of HTTP/2 data and control frames in the output buffer. +* grpc-json: fixed a bug when in trailers only gRPC response (e.g. error) HTTP status code is not being re-written. +* http: fixed a bug in the grpc_http1_reverse_bridge filter where header-only requests were forwarded with a non-zero content length. +* http: fixed a bug where in some cases slash was moved from path to query string when :ref:`merging of adjacent slashes` is enabled. +* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout ` + to also defend against an HTTP/2 peer that does not open stream window once an entire response has been buffered to be sent to a downstream client. +* http: fixed CVE-2020-12605 by including request URL in request header size computation, and rejecting partial headers that exceed configured limits. +* http: fixed several bugs with applying correct connection close behavior across the http connection manager, health checker, and connection pool. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_connection_close` to false. +* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits ` on active/accepted connections. +* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits ` on active/accepted connections. +* prometheus stats: fixed the sort order of output lines to comply with the standard. +* udp: the :ref:`reuse_port ` listener option must now be + specified for UDP listeners if concurrency is > 1. This previously crashed so is considered a + bug fix. +* upstream: fixed a bug where Envoy would panic when receiving a GRPC SERVICE_UNKNOWN status on the health check. + +Removed Config or Runtime +------------------------- +*Normally occurs at the end of the* :ref:`deprecation period ` + +* http: removed legacy connection pool code and their runtime features: `envoy.reloadable_features.new_http1_connection_pool_behavior` and + `envoy.reloadable_features.new_http2_connection_pool_behavior`. + +New Features +------------ + +* access loggers: added file access logger config :ref:`log_format `. +* access loggers: added GRPC_STATUS operator on logging format. +* access loggers: added gRPC access logger config added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. +* access loggers: extended specifier for FilterStateFormatter to output :ref:`unstructured log string `. +* admin: added support for dumping EDS config at :ref:`/config_dump?include_eds `. +* aggregate cluster: made route :ref:`retry_priority ` predicates work with :ref:`this cluster type `. +* build: official released binary is now built on Ubuntu 18.04, requires glibc >= 2.27. +* build: official released binary is now built with Clang 10.0.0. +* cluster: added an extension point for configurable :ref:`upstreams `. +* compressor: exposed generic :ref:`compressor ` filter to users. +* config: added :ref:`identifier ` stat that reflects control plane identifier. +* config: added :ref:`version_text ` stat that reflects xDS version. +* decompressor: exposed generic :ref:`decompressor ` filter to users. +* dynamic forward proxy: added :ref:`SNI based dynamic forward proxy ` support. +* dynamic forward proxy: added configurable :ref:`circuit breakers ` for resolver on DNS cache. + This behavior can be temporarily disabled by the runtime feature `envoy.reloadable_features.enable_dns_cache_circuit_breakers`. + If this runtime feature is disabled, the upstream circuit breakers for the cluster will be used even if the :ref:`DNS Cache circuit breakers ` are configured. +* dynamic forward proxy: added :ref:`allow_insecure_cluster_options` to allow disabling of auto_san_validation and auto_sni. +* ext_authz filter: added :ref:`v2 deny_at_disable `, :ref:`v3 deny_at_disable `. This allows force denying protected paths while filter gets disabled, by setting this key to true. +* ext_authz filter: added API version field for both :ref:`HTTP ` + and :ref:`Network ` filters to explicitly set the version of gRPC service endpoint and message to be used. +* ext_authz filter: added :ref:`v3 allowed_upstream_headers_to_append ` to allow appending multiple header entries (returned by the authorization server) with the same key to the original request headers. +* fault: added support for controlling the percentage of requests that abort, delay and response rate limits faults + are applied to using :ref:`HTTP headers ` to the HTTP fault filter. +* fault: added support for specifying grpc_status code in abort faults using + :ref:`HTTP header ` or abort fault configuration in HTTP fault filter. +* filter: added `upstram_rq_time` stats to the GPRC stats filter. + Disabled by default and can be enabled via :ref:`enable_upstream_stats `. +* grpc: added support for Google gRPC :ref:`custom channel arguments `. +* grpc-json: added support for streaming response using + `google.api.HttpBody `_. +* grpc-json: send a `x-envoy-original-method` header to grpc services. +* gzip filter: added option to set zlib's next output buffer size. +* hds: updated to allow to explicitly set the API version of gRPC service endpoint and message to be used. +* header to metadata: added support for regex substitutions on header values. +* health checks: allowed configuring health check transport sockets by specifying :ref:`transport socket match criteria `. +* http: added :ref:`local_reply config ` to http_connection_manager to customize :ref:`local reply `. +* http: added :ref:`stripping port from host header ` support. +* http: added support for proxying CONNECT requests, terminating CONNECT requests, and converting raw TCP streams into HTTP/2 CONNECT requests. See :ref:`upgrade documentation` for details. +* listener: added in place filter chain update flow for tcp listener update which doesn't close connections if the corresponding network filter chain is equivalent during the listener update. + Can be disabled by setting runtime feature `envoy.reloadable_features.listener_in_place_filterchain_update` to false. + Also added additional draining filter chain stat for :ref:`listener manager ` to track the number of draining filter chains and the number of in place update attempts. +* logger: added :option:`--log-format-prefix-with-location` command line option to prefix '%v' with file path and line number. +* lrs: added new *envoy_api_field_service.load_stats.v2.LoadStatsResponse.send_all_clusters* field + in LRS response, which allows management servers to avoid explicitly listing all clusters it is + interested in; behavior is allowed based on new "envoy.lrs.supports_send_all_clusters" capability + in :ref:`client_features` field. +* lrs: updated to allow to explicitly set the API version of gRPC service endpoint and message to be used. +* lua: added :ref:`per route config ` for Lua filter. +* lua: added tracing to the ``httpCall()`` API. +* metrics service: added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. +* network filters: added a :ref:`postgres proxy filter `. +* network filters: added a :ref:`rocketmq proxy filter `. +* performance: enabled stats symbol table implementation by default. To disable it, add + `--use-fake-symbol-table 1` to the command-line arguments when starting Envoy. +* ratelimit: added support for use of dynamic metadata :ref:`dynamic_metadata ` as a ratelimit action. +* ratelimit: added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. +* ratelimit: support specifying dynamic overrides in rate limit descriptors using :ref:`limit override ` config. +* redis: added acl support :ref:`downstream_auth_username ` for downstream client ACL authentication, and :ref:`auth_username ` to configure authentication usernames for upstream Redis 6+ server clusters with ACL enabled. +* regex: added support for enforcing max program size via runtime and stats to monitor program size for :ref:`Google RE2 `. +* request_id: added to :ref:`always_set_request_id_in_response setting ` + to set :ref:`x-request-id ` header in response even if + tracing is not forced. +* router: added more fine grained internal redirect configs to the :ref:`internal_redirect_policy + ` field. +* router: added regex substitution support for header based hashing. +* router: added support for RESPONSE_FLAGS and RESPONSE_CODE_DETAILS :ref:`header formatters + `. +* router: allow Rate Limiting Service to be called in case of missing request header for a descriptor if the :ref:`skip_if_absent ` field is set to true. +* runtime: added new gauge :ref:`deprecated_feature_seen_since_process_start ` that gets reset across hot restarts. +* server: added the option :option:`--drain-strategy` to enable different drain strategies for DrainManager::drainClose(). +* server: added :ref:`server.envoy_bug_failures ` statistic to count ENVOY_BUG failures. +* stats: added the option to :ref:`report counters as deltas ` to the metrics service stats sink. +* tracing: made tracing configuration fully dynamic and every HTTP connection manager + can now have a separate :ref:`tracing provider `. +* udp: upgraded :ref:`udp_proxy ` filter to v3 and promoted it out of alpha. + +Deprecated +---------- + +* Tracing provider configuration as part of :ref:`bootstrap config ` + has been deprecated in favor of configuration as part of :ref:`HTTP connection manager + `. +* The :ref:`HTTP Gzip filter ` has been deprecated in favor of + :ref:`Compressor `. +* The * :ref:`GoogleRE2.max_program_size` + field is now deprecated. Management servers are expected to validate regexp program sizes + instead of expecting the client to do it. Alternatively, the max program size can be enforced by Envoy via runtime. +* The :ref:`internal_redirect_action ` + field and :ref:`max_internal_redirects ` field + are now deprecated. This changes the implemented default cross scheme redirect behavior. + All cross scheme redirects are disallowed by default. To restore + the previous behavior, set allow_cross_scheme_redirect=true and use + :ref:`safe_cross_scheme`, + in :ref:`predicates `. +* File access logger fields :ref:`format `, :ref:`json_format ` and :ref:`typed_json_format ` are deprecated in favor of :ref:`log_format `. +* A warning is now logged when v2 xDS api is used. This behavior can be temporarily disabled by setting `envoy.reloadable_features.enable_deprecated_v2_api_warning` to `false`. +* Using cluster circuit breakers for DNS Cache is now deprecated in favor of :ref:`DNS cache circuit breakers `. This behavior can be temporarily disabled by setting `envoy.reloadable_features.enable_dns_cache_circuit_breakers` to `false`. diff --git a/docs/root/version_history/v1.4.0.rst b/docs/root/version_history/v1.4.0.rst index 1acf5011305ed..f940deb1b5a65 100644 --- a/docs/root/version_history/v1.4.0.rst +++ b/docs/root/version_history/v1.4.0.rst @@ -14,7 +14,7 @@ Changes * Hot restart :repo:`compile time flag ` added. * Original destination :ref:`cluster ` and :ref:`load balancer ` added. -* :ref:`WebSocket ` is now supported. +* :ref:`WebSocket ` is now supported. * Virtual cluster priorities have been hard removed without deprecation as we are reasonably sure no one is using this feature. * Route `validate_clusters` option added. diff --git a/docs/root/version_history/v1.6.0.rst b/docs/root/version_history/v1.6.0.rst index cdcbbcf447f2f..879eb2f8df771 100644 --- a/docs/root/version_history/v1.6.0.rst +++ b/docs/root/version_history/v1.6.0.rst @@ -68,7 +68,7 @@ Changes * lua: extended to support :ref:`metadata object ` API. * redis: added local `PING` support to the :ref:`Redis filter `. * redis: added `GEORADIUS_RO` and `GEORADIUSBYMEMBER_RO` to the :ref:`Redis command splitter - ` whitelist. + ` allowlist. * router: added DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT, DOWNSTREAM_LOCAL_ADDRESS, DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT, PROTOCOL, and UPSTREAM_METADATA :ref:`header formatters `. The CLIENT_IP header formatter diff --git a/docs/root/version_history/version_history.rst b/docs/root/version_history/version_history.rst index b869b08080e05..07db664892d4e 100644 --- a/docs/root/version_history/version_history.rst +++ b/docs/root/version_history/version_history.rst @@ -1,3 +1,5 @@ +.. _version_history: + Version history --------------- @@ -5,10 +7,17 @@ Version history :titlesonly: current + v1.15.0 + v1.14.3 + v1.14.2 v1.14.1 v1.14.0 + v1.13.3 + v1.13.2 v1.13.1 v1.13.0 + v1.12.5 + v1.12.4 v1.12.3 v1.12.2 v1.12.1 diff --git a/examples/BUILD b/examples/BUILD index 341a86be76007..72c67907b8793 100644 --- a/examples/BUILD +++ b/examples/BUILD @@ -1,10 +1,10 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() filegroup( @@ -14,6 +14,14 @@ filegroup( "cors/backend/service-envoy.yaml", "cors/frontend/front-envoy.yaml", "cors/frontend/service-envoy.yaml", + "csrf/crosssite/front-envoy.yaml", + "csrf/samesite/front-envoy.yaml", + "csrf/service-envoy.yaml", + "ext_authz/config/grpc-service/v2.yaml", + "ext_authz/config/grpc-service/v3.yaml", + "ext_authz/config/http-service.yaml", + "ext_authz/config/opa-service/v2.yaml", + "fault-injection/envoy.yaml", "front-proxy/front-envoy.yaml", "front-proxy/service-envoy.yaml", "grpc-bridge/client/envoy-proxy.yaml", @@ -24,6 +32,8 @@ filegroup( "load-reporting-service/service-envoy-w-lrs.yaml", "lua/envoy.yaml", "lua/lib/mylibrary.lua", + "mysql/envoy.yaml", + "redis/envoy.yaml", "zipkin-tracing/front-envoy-zipkin.yaml", "zipkin-tracing/service1-envoy-zipkin.yaml", "zipkin-tracing/service2-envoy-zipkin.yaml", diff --git a/examples/cors/backend/Dockerfile-frontenvoy b/examples/cors/backend/Dockerfile-frontenvoy index 83b5ba806c6a1..0b2e25a0de1bd 100644 --- a/examples/cors/backend/Dockerfile-frontenvoy +++ b/examples/cors/backend/Dockerfile-frontenvoy @@ -2,4 +2,6 @@ FROM envoyproxy/envoy-dev:latest RUN apt-get update && apt-get -q install -y \ curl +COPY ./front-envoy.yaml /etc/front-envoy.yaml +RUN chmod go+r /etc/front-envoy.yaml CMD /usr/local/bin/envoy -c /etc/front-envoy.yaml --service-cluster front-proxy diff --git a/examples/cors/backend/Dockerfile-service b/examples/cors/backend/Dockerfile-service index 89b5fc12736ec..37c253fa81f19 100644 --- a/examples/cors/backend/Dockerfile-service +++ b/examples/cors/backend/Dockerfile-service @@ -1,6 +1,6 @@ FROM envoyproxy/envoy-alpine-dev:latest -RUN apk update && apk add python3 bash +RUN apk update && apk add py3-pip bash RUN pip3 install -q Flask==0.11.1 RUN mkdir /code ADD ./service.py /code/ diff --git a/examples/cors/backend/docker-compose.yaml b/examples/cors/backend/docker-compose.yaml index 987b4ef157bab..af233b442c410 100644 --- a/examples/cors/backend/docker-compose.yaml +++ b/examples/cors/backend/docker-compose.yaml @@ -5,15 +5,13 @@ services: build: context: . dockerfile: Dockerfile-frontenvoy - volumes: - - ./front-envoy.yaml:/etc/front-envoy.yaml networks: - envoymesh expose: - - "80" + - "8000" - "8001" ports: - - "8002:80" + - "8002:8000" - "8003:8001" backend-service: @@ -27,7 +25,7 @@ services: aliases: - backendservice expose: - - "80" + - "8000" networks: envoymesh: {} diff --git a/examples/cors/backend/front-envoy.yaml b/examples/cors/backend/front-envoy.yaml index d11e7f11a6294..0dd81339f5785 100644 --- a/examples/cors/backend/front-envoy.yaml +++ b/examples/cors/backend/front-envoy.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 filter_chains: - filters: - name: envoy.filters.network.http_connection_manager @@ -15,7 +15,7 @@ static_resources: - name: envoy.access_loggers.file typed_config: "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog - path: "/var/log/access.log" + path: /dev/stdout route_config: name: local_route virtual_hosts: @@ -85,7 +85,7 @@ static_resources: address: socket_address: address: backendservice - port_value: 80 + port_value: 8000 admin: access_log_path: "/dev/null" address: diff --git a/examples/cors/backend/service-envoy.yaml b/examples/cors/backend/service-envoy.yaml index 49be77b85953b..c49e69ccd0023 100644 --- a/examples/cors/backend/service-envoy.yaml +++ b/examples/cors/backend/service-envoy.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 filter_chains: - filters: - name: envoy.filters.network.http_connection_manager diff --git a/examples/cors/frontend/Dockerfile-frontenvoy b/examples/cors/frontend/Dockerfile-frontenvoy index 83b5ba806c6a1..0b2e25a0de1bd 100644 --- a/examples/cors/frontend/Dockerfile-frontenvoy +++ b/examples/cors/frontend/Dockerfile-frontenvoy @@ -2,4 +2,6 @@ FROM envoyproxy/envoy-dev:latest RUN apt-get update && apt-get -q install -y \ curl +COPY ./front-envoy.yaml /etc/front-envoy.yaml +RUN chmod go+r /etc/front-envoy.yaml CMD /usr/local/bin/envoy -c /etc/front-envoy.yaml --service-cluster front-proxy diff --git a/examples/cors/frontend/Dockerfile-service b/examples/cors/frontend/Dockerfile-service index 8d882faa172fc..735aaf42a095f 100644 --- a/examples/cors/frontend/Dockerfile-service +++ b/examples/cors/frontend/Dockerfile-service @@ -1,6 +1,6 @@ FROM envoyproxy/envoy-alpine-dev:latest -RUN apk update && apk add python3 bash +RUN apk update && apk add py3-pip bash RUN pip3 install -q Flask==0.11.1 RUN mkdir /code ADD ./service.py ./index.html /code/ diff --git a/examples/cors/frontend/docker-compose.yaml b/examples/cors/frontend/docker-compose.yaml index 96b19d222e431..7872d92ae83df 100644 --- a/examples/cors/frontend/docker-compose.yaml +++ b/examples/cors/frontend/docker-compose.yaml @@ -5,15 +5,13 @@ services: build: context: . dockerfile: Dockerfile-frontenvoy - volumes: - - ./front-envoy.yaml:/etc/front-envoy.yaml networks: - envoymesh expose: - - "80" + - "8000" - "8001" ports: - - "8000:80" + - "8000:8000" - "8001:8001" frontend-service: @@ -27,7 +25,7 @@ services: aliases: - frontendservice expose: - - "80" + - "8000" networks: envoymesh: {} diff --git a/examples/cors/frontend/front-envoy.yaml b/examples/cors/frontend/front-envoy.yaml index 31174a20174d2..e871ebea1e91a 100644 --- a/examples/cors/frontend/front-envoy.yaml +++ b/examples/cors/frontend/front-envoy.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 filter_chains: - filters: - name: envoy.filters.network.http_connection_manager @@ -15,7 +15,7 @@ static_resources: - name: envoy.access_loggers.file typed_config: "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog - path: "/var/log/access.log" + path: /dev/stdout route_config: name: local_route virtual_hosts: @@ -46,7 +46,7 @@ static_resources: address: socket_address: address: frontendservice - port_value: 80 + port_value: 8000 admin: access_log_path: "/dev/null" address: diff --git a/examples/cors/frontend/service-envoy.yaml b/examples/cors/frontend/service-envoy.yaml index 49be77b85953b..c49e69ccd0023 100644 --- a/examples/cors/frontend/service-envoy.yaml +++ b/examples/cors/frontend/service-envoy.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 filter_chains: - filters: - name: envoy.filters.network.http_connection_manager diff --git a/examples/csrf/Dockerfile-frontenvoy b/examples/csrf/crosssite/Dockerfile-frontenvoy similarity index 67% rename from examples/csrf/Dockerfile-frontenvoy rename to examples/csrf/crosssite/Dockerfile-frontenvoy index 83b5ba806c6a1..0b2e25a0de1bd 100644 --- a/examples/csrf/Dockerfile-frontenvoy +++ b/examples/csrf/crosssite/Dockerfile-frontenvoy @@ -2,4 +2,6 @@ FROM envoyproxy/envoy-dev:latest RUN apt-get update && apt-get -q install -y \ curl +COPY ./front-envoy.yaml /etc/front-envoy.yaml +RUN chmod go+r /etc/front-envoy.yaml CMD /usr/local/bin/envoy -c /etc/front-envoy.yaml --service-cluster front-proxy diff --git a/examples/csrf/crosssite/Dockerfile-service b/examples/csrf/crosssite/Dockerfile-service index 63ff61ef75cef..37c5296aeefcc 100644 --- a/examples/csrf/crosssite/Dockerfile-service +++ b/examples/csrf/crosssite/Dockerfile-service @@ -1,6 +1,6 @@ FROM envoyproxy/envoy-alpine-dev:latest -RUN apk update && apk add python3 bash +RUN apk update && apk add py3-pip bash RUN pip3 install -q Flask==0.11.1 RUN mkdir /code ADD ./crosssite/service.py ./index.html /code/ diff --git a/examples/csrf/crosssite/docker-compose.yml b/examples/csrf/crosssite/docker-compose.yml index 5d25ea0d6d1a2..4a2f3fdbf43e4 100644 --- a/examples/csrf/crosssite/docker-compose.yml +++ b/examples/csrf/crosssite/docker-compose.yml @@ -3,17 +3,15 @@ services: front-envoy: build: - context: .. + context: . dockerfile: Dockerfile-frontenvoy - volumes: - - ./front-envoy.yaml:/etc/front-envoy.yaml networks: - envoymesh expose: - - "80" + - "8000" - "8001" ports: - - "8002:80" + - "8002:8000" - "8003:8001" service: @@ -27,7 +25,7 @@ services: aliases: - service expose: - - "80" + - "8000" networks: envoymesh: {} diff --git a/examples/csrf/crosssite/front-envoy.yaml b/examples/csrf/crosssite/front-envoy.yaml index 56449447686d0..879a0fa665761 100644 --- a/examples/csrf/crosssite/front-envoy.yaml +++ b/examples/csrf/crosssite/front-envoy.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 filter_chains: - filters: - name: envoy.filters.network.http_connection_manager @@ -15,7 +15,7 @@ static_resources: - name: envoy.access_loggers.file typed_config: "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog - path: "/var/log/access.log" + path: "/dev/stdout" route_config: name: local_route virtual_hosts: @@ -44,7 +44,7 @@ static_resources: address: socket_address: address: service - port_value: 80 + port_value: 8000 admin: access_log_path: "/dev/null" address: diff --git a/examples/csrf/samesite/Dockerfile-frontenvoy b/examples/csrf/samesite/Dockerfile-frontenvoy new file mode 100644 index 0000000000000..0b2e25a0de1bd --- /dev/null +++ b/examples/csrf/samesite/Dockerfile-frontenvoy @@ -0,0 +1,7 @@ +FROM envoyproxy/envoy-dev:latest + +RUN apt-get update && apt-get -q install -y \ + curl +COPY ./front-envoy.yaml /etc/front-envoy.yaml +RUN chmod go+r /etc/front-envoy.yaml +CMD /usr/local/bin/envoy -c /etc/front-envoy.yaml --service-cluster front-proxy diff --git a/examples/csrf/samesite/Dockerfile-service b/examples/csrf/samesite/Dockerfile-service index d97322f8acbdf..f2413d895618f 100644 --- a/examples/csrf/samesite/Dockerfile-service +++ b/examples/csrf/samesite/Dockerfile-service @@ -1,6 +1,6 @@ FROM envoyproxy/envoy-alpine-dev:latest -RUN apk update && apk add python3 bash +RUN apk update && apk add py3-pip bash RUN pip3 install -q Flask==0.11.1 RUN mkdir /code ADD ./samesite/service.py ./index.html /code/ diff --git a/examples/csrf/samesite/docker-compose.yml b/examples/csrf/samesite/docker-compose.yml index 490a4896417fa..2fcac143f6f69 100644 --- a/examples/csrf/samesite/docker-compose.yml +++ b/examples/csrf/samesite/docker-compose.yml @@ -3,17 +3,15 @@ services: front-envoy: build: - context: .. + context: . dockerfile: Dockerfile-frontenvoy - volumes: - - ./front-envoy.yaml:/etc/front-envoy.yaml networks: - envoymesh expose: - - "80" + - "8000" - "8001" ports: - - "8000:80" + - "8000:8000" - "8001:8001" service: @@ -27,7 +25,7 @@ services: aliases: - service expose: - - "80" + - "8000" networks: envoymesh: {} diff --git a/examples/csrf/samesite/front-envoy.yaml b/examples/csrf/samesite/front-envoy.yaml index 479f4d7485123..cc18e2080a244 100644 --- a/examples/csrf/samesite/front-envoy.yaml +++ b/examples/csrf/samesite/front-envoy.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 filter_chains: - filters: - name: envoy.filters.network.http_connection_manager @@ -15,7 +15,7 @@ static_resources: - name: envoy.access_loggers.file typed_config: "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog - path: "/var/log/access.log" + path: "/dev/stdout" route_config: name: local_route virtual_hosts: @@ -23,14 +23,17 @@ static_resources: domains: - "*" cors: - allow_origin: - - "*" + allow_origin_string_match: + - safe_regex: + google_re2: {} + regex: \* filter_enabled: default_value: numerator: 100 denominator: HUNDRED - per_filter_config: + typed_per_filter_config: envoy.filters.http.csrf: + "@type": type.googleapis.com/envoy.extensions.filters.http.csrf.v3.CsrfPolicy filter_enabled: default_value: numerator: 100 @@ -46,8 +49,9 @@ static_resources: prefix: "/csrf/disabled" route: cluster: generic_service - per_filter_config: + typed_per_filter_config: envoy.filters.http.csrf: + "@type": type.googleapis.com/envoy.extensions.filters.http.csrf.v3.CsrfPolicy filter_enabled: default_value: numerator: 0 @@ -56,8 +60,9 @@ static_resources: prefix: "/csrf/shadow" route: cluster: generic_service - per_filter_config: + typed_per_filter_config: envoy.filters.http.csrf: + "@type": type.googleapis.com/envoy.extensions.filters.http.csrf.v3.CsrfPolicy filter_enabled: default_value: numerator: 0 @@ -70,14 +75,17 @@ static_resources: prefix: "/csrf/additional_origin" route: cluster: generic_service - per_filter_config: + typed_per_filter_config: envoy.filters.http.csrf: + "@type": type.googleapis.com/envoy.extensions.filters.http.csrf.v3.CsrfPolicy filter_enabled: default_value: numerator: 100 denominator: HUNDRED additional_origins: - - regex: .* + - safe_regex: + google_re2: {} + regex: .* - match: prefix: "/" route: @@ -86,7 +94,8 @@ static_resources: - name: envoy.filters.http.cors typed_config: {} - name: envoy.filters.http.csrf - config: + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.csrf.v3.CsrfPolicy filter_enabled: default_value: numerator: 0 @@ -107,7 +116,7 @@ static_resources: address: socket_address: address: service - port_value: 80 + port_value: 8000 admin: access_log_path: "/dev/null" address: diff --git a/examples/csrf/service-envoy.yaml b/examples/csrf/service-envoy.yaml index 49be77b85953b..c49e69ccd0023 100644 --- a/examples/csrf/service-envoy.yaml +++ b/examples/csrf/service-envoy.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 filter_chains: - filters: - name: envoy.filters.network.http_connection_manager diff --git a/examples/ext_authz/.env b/examples/ext_authz/.env new file mode 100644 index 0000000000000..0a7d4cb0eaf01 --- /dev/null +++ b/examples/ext_authz/.env @@ -0,0 +1 @@ +FRONT_ENVOY_YAML=config/grpc-service/v3.yaml diff --git a/examples/ext_authz/Dockerfile-frontenvoy b/examples/ext_authz/Dockerfile-frontenvoy new file mode 100644 index 0000000000000..f329c86ce6559 --- /dev/null +++ b/examples/ext_authz/Dockerfile-frontenvoy @@ -0,0 +1,9 @@ +FROM envoyproxy/envoy-dev:latest + +RUN apt-get update && apt-get -q install -y \ + curl +COPY ./config /etc/envoy-config +COPY ./run_envoy.sh /run_envoy.sh +RUN chmod go+r -R /etc/envoy-config \ + && chmod go+rx /run_envoy.sh /etc/envoy-config /etc/envoy-config/* +CMD /run_envoy.sh diff --git a/examples/ext_authz/README.md b/examples/ext_authz/README.md new file mode 100644 index 0000000000000..c0a121144d07a --- /dev/null +++ b/examples/ext_authz/README.md @@ -0,0 +1,2 @@ +To learn about this sandbox and for instructions on how to run it please head over +to the [envoy docs](https://www.envoyproxy.io/docs/envoy/latest/start/sandboxes/ext_authz) diff --git a/examples/ext_authz/auth/grpc-service/Dockerfile b/examples/ext_authz/auth/grpc-service/Dockerfile new file mode 100644 index 0000000000000..f77cdd69e39fd --- /dev/null +++ b/examples/ext_authz/auth/grpc-service/Dockerfile @@ -0,0 +1,10 @@ +FROM golang:alpine AS builder + +RUN apk --no-cache add make +COPY . /app +RUN make -C /app/grpc-service + +FROM alpine + +COPY --from=builder /app/grpc-service/server /app/server +CMD ["/app/server", "-users", "/etc/users.json"] diff --git a/examples/ext_authz/auth/grpc-service/Makefile b/examples/ext_authz/auth/grpc-service/Makefile new file mode 100644 index 0000000000000..e9ee1e9581cff --- /dev/null +++ b/examples/ext_authz/auth/grpc-service/Makefile @@ -0,0 +1,8 @@ +all: server + +server: + @CGO_ENABLED=0 GOOS=linux go build -a --ldflags '-extldflags "-static"' \ + -tags "netgo" -installsuffix netgo \ + -o server +clean: + @rm -fr server diff --git a/examples/ext_authz/auth/grpc-service/go.mod b/examples/ext_authz/auth/grpc-service/go.mod new file mode 100644 index 0000000000000..e1eebc33626ad --- /dev/null +++ b/examples/ext_authz/auth/grpc-service/go.mod @@ -0,0 +1,10 @@ +module github.com/envoyproxy/envoy/examples/ext_authz/auth/grpc-service + +go 1.14 + +require ( + github.com/envoyproxy/go-control-plane v0.9.5 + github.com/golang/protobuf v1.3.2 + google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 + google.golang.org/grpc v1.25.1 +) diff --git a/examples/ext_authz/auth/grpc-service/go.sum b/examples/ext_authz/auth/grpc-service/go.sum new file mode 100644 index 0000000000000..e5921d26237e0 --- /dev/null +++ b/examples/ext_authz/auth/grpc-service/go.sum @@ -0,0 +1,74 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20200313221541-5f7e5dd04533 h1:8wZizuKuZVu5COB7EsBYxBQz8nRcXXn5d4Gt91eJLvU= +github.com/cncf/udpa/go v0.0.0-20200313221541-5f7e5dd04533/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/envoyproxy/go-control-plane v0.9.0 h1:67WMNTvGrl7V1dWdKCeTwxDr7nio9clKoTlLhwIPnT4= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.5 h1:lRJIqDD8yjV1YyPRqecMdytjDLs2fTXq363aCib5xPU= +github.com/envoyproxy/go-control-plane v0.9.5/go.mod h1:OXl5to++W0ctG+EHWTFUjiypVxC/Y4VLc/KFU+al13s= +github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/examples/ext_authz/auth/grpc-service/main.go b/examples/ext_authz/auth/grpc-service/main.go new file mode 100644 index 0000000000000..6861bce060552 --- /dev/null +++ b/examples/ext_authz/auth/grpc-service/main.go @@ -0,0 +1,42 @@ +package main + +import ( + "flag" + "fmt" + "log" + "net" + + envoy_service_auth_v2 "github.com/envoyproxy/go-control-plane/envoy/service/auth/v2" + envoy_service_auth_v3 "github.com/envoyproxy/go-control-plane/envoy/service/auth/v3" + "google.golang.org/grpc" + + "github.com/envoyproxy/envoy/examples/ext_authz/auth/grpc-service/pkg/auth" + auth_v2 "github.com/envoyproxy/envoy/examples/ext_authz/auth/grpc-service/pkg/auth/v2" + auth_v3 "github.com/envoyproxy/envoy/examples/ext_authz/auth/grpc-service/pkg/auth/v3" +) + +func main() { + port := flag.Int("port", 9001, "gRPC port") + data := flag.String("users", "../../users.json", "users file") + + flag.Parse() + + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) + if err != nil { + log.Fatalf("failed to listen to %d: %v", *port, err) + } + + users, err := auth.LoadUsers(*data) + if err != nil { + log.Fatalf("failed to load user data:%s %v", *data, err) + } + gs := grpc.NewServer() + + // Serve v3 and v2. + envoy_service_auth_v3.RegisterAuthorizationServer(gs, auth_v3.New(users)) + envoy_service_auth_v2.RegisterAuthorizationServer(gs, auth_v2.New(users)) + + log.Printf("starting gRPC server on: %d\n", *port) + + gs.Serve(lis) +} diff --git a/examples/ext_authz/auth/grpc-service/pkg/auth/users.go b/examples/ext_authz/auth/grpc-service/pkg/auth/users.go new file mode 100644 index 0000000000000..507c3560bdf12 --- /dev/null +++ b/examples/ext_authz/auth/grpc-service/pkg/auth/users.go @@ -0,0 +1,32 @@ +package auth + +import ( + "encoding/json" + "io/ioutil" +) + +// Users holds a list of users. +type Users map[string]string + +// Check checks if a key could retrieve a user from a list of users. +func (u Users) Check(key string) (bool, string) { + value, ok := u[key] + if !ok { + return false, "" + } + return ok, value +} + +// LoadUsers load users data from a JSON file. +func LoadUsers(jsonFile string) (Users, error) { + var users Users + data, err := ioutil.ReadFile(jsonFile) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(data, &users); err != nil { + return nil, err + } + return users, nil +} diff --git a/examples/ext_authz/auth/grpc-service/pkg/auth/v2/auth.go b/examples/ext_authz/auth/grpc-service/pkg/auth/v2/auth.go new file mode 100644 index 0000000000000..3b16c6c5cb9f8 --- /dev/null +++ b/examples/ext_authz/auth/grpc-service/pkg/auth/v2/auth.go @@ -0,0 +1,68 @@ +package v2 + +import ( + "context" + "log" + "strings" + + envoy_api_v2_core "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" + envoy_service_auth_v2 "github.com/envoyproxy/go-control-plane/envoy/service/auth/v2" + "github.com/golang/protobuf/ptypes/wrappers" + "google.golang.org/genproto/googleapis/rpc/code" + "google.golang.org/genproto/googleapis/rpc/status" + + "github.com/envoyproxy/envoy/examples/ext_authz/auth/grpc-service/pkg/auth" +) + +type server struct { + users auth.Users +} + +var _ envoy_service_auth_v2.AuthorizationServer = &server{} + +// New creates a new authorization server. +func New(users auth.Users) envoy_service_auth_v2.AuthorizationServer { + return &server{users} +} + +// Check implements authorization's Check interface which performs authorization check based on the +// attributes associated with the incoming request. +func (s *server) Check( + ctx context.Context, + req *envoy_service_auth_v2.CheckRequest) (*envoy_service_auth_v2.CheckResponse, error) { + authorization := req.Attributes.Request.Http.Headers["authorization"] + log.Println(authorization) + + extracted := strings.Fields(authorization) + if len(extracted) == 2 && extracted[0] == "Bearer" { + valid, user := s.users.Check(extracted[1]) + if valid { + return &envoy_service_auth_v2.CheckResponse{ + HttpResponse: &envoy_service_auth_v2.CheckResponse_OkResponse{ + OkResponse: &envoy_service_auth_v2.OkHttpResponse{ + Headers: []*envoy_api_v2_core.HeaderValueOption{ + { + Append: &wrappers.BoolValue{Value: false}, + Header: &envoy_api_v2_core.HeaderValue{ + // For a successful request, the authorization server sets the + // x-current-user value. + Key: "x-current-user", + Value: user, + }, + }, + }, + }, + }, + Status: &status.Status{ + Code: int32(code.Code_OK), + }, + }, nil + } + } + + return &envoy_service_auth_v2.CheckResponse{ + Status: &status.Status{ + Code: int32(code.Code_PERMISSION_DENIED), + }, + }, nil +} diff --git a/examples/ext_authz/auth/grpc-service/pkg/auth/v3/auth.go b/examples/ext_authz/auth/grpc-service/pkg/auth/v3/auth.go new file mode 100644 index 0000000000000..1cae7cbd8d43d --- /dev/null +++ b/examples/ext_authz/auth/grpc-service/pkg/auth/v3/auth.go @@ -0,0 +1,68 @@ +package v3 + +import ( + "context" + "log" + "strings" + + envoy_api_v3_core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + envoy_service_auth_v3 "github.com/envoyproxy/go-control-plane/envoy/service/auth/v3" + "github.com/golang/protobuf/ptypes/wrappers" + "google.golang.org/genproto/googleapis/rpc/code" + "google.golang.org/genproto/googleapis/rpc/status" + + "github.com/envoyproxy/envoy/examples/ext_authz/auth/grpc-service/pkg/auth" +) + +type server struct { + users auth.Users +} + +var _ envoy_service_auth_v3.AuthorizationServer = &server{} + +// New creates a new authorization server. +func New(users auth.Users) envoy_service_auth_v3.AuthorizationServer { + return &server{users} +} + +// Check implements authorization's Check interface which performs authorization check based on the +// attributes associated with the incoming request. +func (s *server) Check( + ctx context.Context, + req *envoy_service_auth_v3.CheckRequest) (*envoy_service_auth_v3.CheckResponse, error) { + authorization := req.Attributes.Request.Http.Headers["authorization"] + log.Println(authorization) + + extracted := strings.Fields(authorization) + if len(extracted) == 2 && extracted[0] == "Bearer" { + valid, user := s.users.Check(extracted[1]) + if valid { + return &envoy_service_auth_v3.CheckResponse{ + HttpResponse: &envoy_service_auth_v3.CheckResponse_OkResponse{ + OkResponse: &envoy_service_auth_v3.OkHttpResponse{ + Headers: []*envoy_api_v3_core.HeaderValueOption{ + { + Append: &wrappers.BoolValue{Value: false}, + Header: &envoy_api_v3_core.HeaderValue{ + // For a successful request, the authorization server sets the + // x-current-user value. + Key: "x-current-user", + Value: user, + }, + }, + }, + }, + }, + Status: &status.Status{ + Code: int32(code.Code_OK), + }, + }, nil + } + } + + return &envoy_service_auth_v3.CheckResponse{ + Status: &status.Status{ + Code: int32(code.Code_PERMISSION_DENIED), + }, + }, nil +} diff --git a/examples/ext_authz/auth/http-service/Dockerfile b/examples/ext_authz/auth/http-service/Dockerfile new file mode 100644 index 0000000000000..d0bcbc91f8b07 --- /dev/null +++ b/examples/ext_authz/auth/http-service/Dockerfile @@ -0,0 +1,4 @@ +FROM node:alpine + +COPY . /app +CMD ["node", "/app/http-service/server"] diff --git a/examples/ext_authz/auth/http-service/server.js b/examples/ext_authz/auth/http-service/server.js new file mode 100644 index 0000000000000..9c890d75226a0 --- /dev/null +++ b/examples/ext_authz/auth/http-service/server.js @@ -0,0 +1,29 @@ +const Http = require("http"); +const path = require("path"); + +const tokens = require(process.env.USERS || + path.join(__dirname, "..", "users.json")); + +const server = new Http.Server((req, res) => { + const authorization = req.headers["authorization"] || ""; + const extracted = authorization.split(" "); + if (extracted.length === 2 && extracted[0] === "Bearer") { + const user = checkToken(extracted[1]); + if (user !== undefined) { + // The authorization server returns a response with "x-current-user" header for a successful + // request. + res.writeHead(200, { "x-current-user": user }); + return res.end(); + } + } + res.writeHead(403); + res.end(); +}); + +const port = process.env.PORT || 9002; +server.listen(port); +console.log(`starting HTTP server on: ${port}`); + +function checkToken(token) { + return tokens[token]; +} diff --git a/examples/ext_authz/auth/users.json b/examples/ext_authz/auth/users.json new file mode 100644 index 0000000000000..4068bcb7628e5 --- /dev/null +++ b/examples/ext_authz/auth/users.json @@ -0,0 +1,5 @@ +{ + "token1": "user1", + "token2": "user2", + "token3": "user3" +} diff --git a/examples/ext_authz/config/grpc-service/v2.yaml b/examples/ext_authz/config/grpc-service/v2.yaml new file mode 100644 index 0000000000000..bd1a6eee7f6dd --- /dev/null +++ b/examples/ext_authz/config/grpc-service/v2.yaml @@ -0,0 +1,72 @@ +static_resources: + listeners: + - address: + socket_address: + address: 0.0.0.0 + port_value: 8000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + codec_type: auto + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: upstream + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: upstream-service + http_filters: + - name: envoy.filters.http.ext_authz + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz + grpc_service: + envoy_grpc: + cluster_name: ext_authz-grpc-service + timeout: 0.250s + transport_api_version: V2 + - name: envoy.filters.http.router + typed_config: {} + + clusters: + - name: upstream-service + connect_timeout: 0.250s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: upstream-service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: upstream-service + port_value: 8080 + + - name: ext_authz-grpc-service + connect_timeout: 0.250s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + http2_protocol_options: {} + load_assignment: + cluster_name: ext_authz-grpc-service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: ext_authz-grpc-service + port_value: 9001 + +admin: + access_log_path: "/dev/null" + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 diff --git a/examples/ext_authz/config/grpc-service/v3.yaml b/examples/ext_authz/config/grpc-service/v3.yaml new file mode 100644 index 0000000000000..2b4829e2c90ce --- /dev/null +++ b/examples/ext_authz/config/grpc-service/v3.yaml @@ -0,0 +1,72 @@ +static_resources: + listeners: + - address: + socket_address: + address: 0.0.0.0 + port_value: 8000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + codec_type: auto + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: upstream + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: upstream-service + http_filters: + - name: envoy.filters.http.ext_authz + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz + grpc_service: + envoy_grpc: + cluster_name: ext_authz-grpc-service + timeout: 0.250s + transport_api_version: V3 + - name: envoy.filters.http.router + typed_config: {} + + clusters: + - name: upstream-service + connect_timeout: 0.250s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: upstream-service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: upstream-service + port_value: 8080 + + - name: ext_authz-grpc-service + connect_timeout: 0.250s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + http2_protocol_options: {} + load_assignment: + cluster_name: ext_authz-grpc-service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: ext_authz-grpc-service + port_value: 9001 + +admin: + access_log_path: "/dev/null" + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 diff --git a/examples/ext_authz/config/http-service.yaml b/examples/ext_authz/config/http-service.yaml new file mode 100644 index 0000000000000..85065d99806c4 --- /dev/null +++ b/examples/ext_authz/config/http-service.yaml @@ -0,0 +1,75 @@ +static_resources: + listeners: + - address: + socket_address: + address: 0.0.0.0 + port_value: 8000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + codec_type: auto + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: upstream + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: upstream-service + http_filters: + - name: envoy.filters.http.ext_authz + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz + http_service: + server_uri: + uri: ext_authz + cluster: ext_authz-http-service + timeout: 0.250s + authorization_response: + allowed_upstream_headers: + patterns: + - exact: x-current-user + - name: envoy.filters.http.router + typed_config: {} + + clusters: + - name: upstream-service + connect_timeout: 0.250s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: upstream-service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: upstream-service + port_value: 8080 + + - name: ext_authz-http-service + connect_timeout: 0.250s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: ext_authz-http-service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: ext_authz-http-service + port_value: 9002 + +admin: + access_log_path: "/dev/null" + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 diff --git a/examples/ext_authz/config/opa-service/policy.rego b/examples/ext_authz/config/opa-service/policy.rego new file mode 100644 index 0000000000000..2f9bdf5d2db99 --- /dev/null +++ b/examples/ext_authz/config/opa-service/policy.rego @@ -0,0 +1,11 @@ +package istio.authz + +default allow = false + +allow = response { + input.attributes.request.http.method == "GET" + response := { + "allowed": true, + "headers": {"x-current-user": "OPA"} + } +} diff --git a/examples/ext_authz/config/opa-service/v2.yaml b/examples/ext_authz/config/opa-service/v2.yaml new file mode 100644 index 0000000000000..401c6df468323 --- /dev/null +++ b/examples/ext_authz/config/opa-service/v2.yaml @@ -0,0 +1,72 @@ +static_resources: + listeners: + - address: + socket_address: + address: 0.0.0.0 + port_value: 8000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + codec_type: auto + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: upstream + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: upstream-service + http_filters: + - name: envoy.filters.http.ext_authz + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz + grpc_service: + envoy_grpc: + cluster_name: ext_authz-opa-service + timeout: 0.250s + transport_api_version: V2 + - name: envoy.filters.http.router + typed_config: {} + + clusters: + - name: upstream-service + connect_timeout: 0.250s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: upstream-service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: upstream-service + port_value: 8080 + + - name: ext_authz-opa-service + connect_timeout: 0.250s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + http2_protocol_options: {} + load_assignment: + cluster_name: ext_authz-opa-service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: ext_authz-opa-service + port_value: 9002 + +admin: + access_log_path: "/dev/null" + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 diff --git a/examples/ext_authz/docker-compose.yaml b/examples/ext_authz/docker-compose.yaml new file mode 100644 index 0000000000000..148ecc489f8e7 --- /dev/null +++ b/examples/ext_authz/docker-compose.yaml @@ -0,0 +1,70 @@ +version: "3.7" +services: + + front-envoy: + build: + context: . + dockerfile: Dockerfile-frontenvoy + environment: + - FRONT_ENVOY_YAML + networks: + - envoymesh + expose: + - "8000" + - "8001" + ports: + - "8000:8000" + - "8001:8001" + + ext_authz-http-service: + build: + context: ./auth + dockerfile: http-service/Dockerfile + volumes: + - ./users.json:/etc/users.json + environment: + - USERS=/etc/users.json + networks: + envoymesh: + aliases: + - ext_authz-http-service + + ext_authz-grpc-service: + build: + context: ./auth + dockerfile: grpc-service/Dockerfile + volumes: + - ./users.json:/etc/users.json + networks: + envoymesh: + aliases: + - ext_authz-grpc-service + + ext_authz-opa-service: + image: openpolicyagent/opa:0.21.0-istio + volumes: + - ./config/opa-service/policy.rego:/etc/policy.rego + command: + - run + - --log-level=debug + - --server + - --log-format=json-pretty + - --set=plugins.envoy_ext_authz_grpc.addr=:9002 + - --set=decision_logs.console=true + - /etc/policy.rego + networks: + envoymesh: + aliases: + - ext_authz-opa-service + + upstream-service: + build: + context: ./upstream + dockerfile: service/Dockerfile + networks: + envoymesh: + aliases: + - upstream-service + +networks: + envoymesh: {} diff --git a/examples/ext_authz/run_envoy.sh b/examples/ext_authz/run_envoy.sh new file mode 100755 index 0000000000000..c9bb7ca58b4d3 --- /dev/null +++ b/examples/ext_authz/run_envoy.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +/usr/local/bin/envoy -c "/etc/envoy-${FRONT_ENVOY_YAML}" --service-cluster front-proxy diff --git a/examples/ext_authz/upstream/service/Dockerfile b/examples/ext_authz/upstream/service/Dockerfile new file mode 100644 index 0000000000000..5f70f40aca7c6 --- /dev/null +++ b/examples/ext_authz/upstream/service/Dockerfile @@ -0,0 +1,5 @@ +FROM python:3-alpine + +RUN pip3 install -q Flask==0.11.1 +COPY . ./app +CMD ["python3", "/app/service/server.py"] diff --git a/examples/ext_authz/upstream/service/server.py b/examples/ext_authz/upstream/service/server.py new file mode 100644 index 0000000000000..a3d539f195abb --- /dev/null +++ b/examples/ext_authz/upstream/service/server.py @@ -0,0 +1,12 @@ +from flask import Flask, request + +app = Flask(__name__) + + +@app.route('/service') +def hello(): + return 'Hello ' + request.headers.get('x-current-user') + ' from behind Envoy!' + + +if __name__ == "__main__": + app.run(host='0.0.0.0', port=8080, debug=False) diff --git a/examples/ext_authz/users.json b/examples/ext_authz/users.json new file mode 100644 index 0000000000000..4068bcb7628e5 --- /dev/null +++ b/examples/ext_authz/users.json @@ -0,0 +1,5 @@ +{ + "token1": "user1", + "token2": "user2", + "token3": "user3" +} diff --git a/examples/fault-injection/Dockerfile-envoy b/examples/fault-injection/Dockerfile-envoy index f4c09bae67c5e..13dec2521a999 100644 --- a/examples/fault-injection/Dockerfile-envoy +++ b/examples/fault-injection/Dockerfile-envoy @@ -1,4 +1,6 @@ FROM envoyproxy/envoy-dev:latest RUN apt-get update && apt-get install -y curl tree +COPY ./envoy.yaml /etc/envoy.yaml +RUN chmod go+r /etc/envoy.yaml COPY enable_delay_fault_injection.sh disable_delay_fault_injection.sh enable_abort_fault_injection.sh disable_abort_fault_injection.sh send_request.sh / diff --git a/examples/fault-injection/docker-compose.yaml b/examples/fault-injection/docker-compose.yaml index fe8ec0c9d68fb..50daad870d67a 100644 --- a/examples/fault-injection/docker-compose.yaml +++ b/examples/fault-injection/docker-compose.yaml @@ -6,7 +6,6 @@ services: dockerfile: Dockerfile-envoy command: /usr/local/bin/envoy -c /etc/envoy.yaml volumes: - - ./envoy.yaml:/etc/envoy.yaml - ./runtime:/srv/runtime networks: - envoymesh diff --git a/examples/fault-injection/envoy.yaml b/examples/fault-injection/envoy.yaml index 9ba70be94ba95..ac0bd82b3568a 100644 --- a/examples/fault-injection/envoy.yaml +++ b/examples/fault-injection/envoy.yaml @@ -36,6 +36,11 @@ static_resources: percentage: numerator: 0 denominator: HUNDRED + delay: + fixed_delay: 3s + percentage: + numerator: 0 + denominator: HUNDRED - name: envoy.filters.http.router typed_config: {} clusters: @@ -58,6 +63,9 @@ admin: socket_address: address: 0.0.0.0 port_value: 9901 -runtime: - symlink_root: /srv/runtime/current - subdirectory: envoy +layered_runtime: + layers: + - name: disk_layer_0 + disk_layer: + symlink_root: /srv/runtime/current + subdirectory: envoy diff --git a/examples/front-proxy/Dockerfile-frontenvoy b/examples/front-proxy/Dockerfile-frontenvoy index 83b5ba806c6a1..0b2e25a0de1bd 100644 --- a/examples/front-proxy/Dockerfile-frontenvoy +++ b/examples/front-proxy/Dockerfile-frontenvoy @@ -2,4 +2,6 @@ FROM envoyproxy/envoy-dev:latest RUN apt-get update && apt-get -q install -y \ curl +COPY ./front-envoy.yaml /etc/front-envoy.yaml +RUN chmod go+r /etc/front-envoy.yaml CMD /usr/local/bin/envoy -c /etc/front-envoy.yaml --service-cluster front-proxy diff --git a/examples/front-proxy/Dockerfile-service b/examples/front-proxy/Dockerfile-service index c3f5bafefc19b..03a6a9422ea2e 100644 --- a/examples/front-proxy/Dockerfile-service +++ b/examples/front-proxy/Dockerfile-service @@ -1,6 +1,6 @@ FROM envoyproxy/envoy-alpine-dev:latest -RUN apk update && apk add python3 bash curl +RUN apk update && apk add py3-pip bash curl RUN pip3 install -q Flask==0.11.1 requests==2.18.4 RUN mkdir /code ADD ./service.py /code diff --git a/examples/front-proxy/docker-compose.yaml b/examples/front-proxy/docker-compose.yaml index 34491c3636ce0..cac5826a46d21 100644 --- a/examples/front-proxy/docker-compose.yaml +++ b/examples/front-proxy/docker-compose.yaml @@ -5,15 +5,15 @@ services: build: context: . dockerfile: Dockerfile-frontenvoy - volumes: - - ./front-envoy.yaml:/etc/front-envoy.yaml networks: - envoymesh expose: - - "80" + - "8080" + - "8443" - "8001" ports: - - "8000:80" + - "8080:8080" + - "8443:8443" - "8001:8001" service1: @@ -29,7 +29,7 @@ services: environment: - SERVICE_NAME=1 expose: - - "80" + - "8000" service2: build: @@ -44,7 +44,7 @@ services: environment: - SERVICE_NAME=2 expose: - - "80" + - "8000" networks: envoymesh: {} diff --git a/examples/front-proxy/front-envoy.yaml b/examples/front-proxy/front-envoy.yaml index a612487c8d6b4..c266022e68067 100644 --- a/examples/front-proxy/front-envoy.yaml +++ b/examples/front-proxy/front-envoy.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8080 filter_chains: - filters: - name: envoy.filters.network.http_connection_manager @@ -29,6 +29,101 @@ static_resources: http_filters: - name: envoy.filters.http.router typed_config: {} + + - address: + socket_address: + address: 0.0.0.0 + port_value: 8443 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + codec_type: auto + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: backend + domains: + - "*" + routes: + - match: + prefix: "/service/1" + route: + cluster: service1 + - match: + prefix: "/service/2" + route: + cluster: service2 + http_filters: + - name: envoy.filters.http.router + typed_config: {} + + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + # The following self-signed certificate pair is generated using: + # $ openssl req -x509 -newkey rsa:2048 -keyout a/front-proxy-key.pem -out a/front-proxy-crt.pem -days 3650 -nodes -subj '/CN=front-envoy' + # + # Instead of feeding it as an inline_string, certificate pair can also be fed to Envoy + # via filename. Reference: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/core/v3/base.proto#config-core-v3-datasource. + # + # Or in a dynamic configuration scenario, certificate pair can be fetched remotely via + # Secret Discovery Service (SDS). Reference: https://www.envoyproxy.io/docs/envoy/latest/configuration/security/secret. + certificate_chain: + inline_string: | + -----BEGIN CERTIFICATE----- + MIICqDCCAZACCQCquzpHNpqBcDANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDDAtm + cm9udC1lbnZveTAeFw0yMDA3MDgwMTMxNDZaFw0zMDA3MDYwMTMxNDZaMBYxFDAS + BgNVBAMMC2Zyb250LWVudm95MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC + AQEAthnYkqVQBX+Wg7aQWyCCb87hBce1hAFhbRM8Y9dQTqxoMXZiA2n8G089hUou + oQpEdJgitXVS6YMFPFUUWfwcqxYAynLK4X5im26Yfa1eO8La8sZUS+4Bjao1gF5/ + VJxSEo2yZ7fFBo8M4E44ZehIIocipCRS+YZehFs6dmHoq/MGvh2eAHIa+O9xssPt + ofFcQMR8rwBHVbKy484O10tNCouX4yUkyQXqCRy6HRu7kSjOjNKSGtjfG+h5M8bh + 10W7ZrsJ1hWhzBulSaMZaUY3vh5ngpws1JATQVSK1Jm/dmMRciwlTK7KfzgxHlSX + 58ENpS7yPTISkEICcLbXkkKGEQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQCmj6Hg + vwOxWz0xu+6fSfRL6PGJUGq6wghCfUvjfwZ7zppDUqU47fk+yqPIOzuGZMdAqi7N + v1DXkeO4A3hnMD22Rlqt25vfogAaZVToBeQxCPd/ALBLFrvLUFYuSlS3zXSBpQqQ + Ny2IKFYsMllz5RSROONHBjaJOn5OwqenJ91MPmTAG7ujXKN6INSBM0PjX9Jy4Xb9 + zT+I85jRDQHnTFce1WICBDCYidTIvJtdSSokGSuy4/xyxAAc/BpZAfOjBQ4G1QRe + 9XwOi790LyNUYFJVyeOvNJwveloWuPLHb9idmY5YABwikUY6QNcXwyHTbRCkPB2I + m+/R4XnmL4cKQ+5Z + -----END CERTIFICATE----- + private_key: + inline_string: | + -----BEGIN PRIVATE KEY----- + MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC2GdiSpVAFf5aD + tpBbIIJvzuEFx7WEAWFtEzxj11BOrGgxdmIDafwbTz2FSi6hCkR0mCK1dVLpgwU8 + VRRZ/ByrFgDKcsrhfmKbbph9rV47wtryxlRL7gGNqjWAXn9UnFISjbJnt8UGjwzg + Tjhl6EgihyKkJFL5hl6EWzp2Yeir8wa+HZ4Achr473Gyw+2h8VxAxHyvAEdVsrLj + zg7XS00Ki5fjJSTJBeoJHLodG7uRKM6M0pIa2N8b6HkzxuHXRbtmuwnWFaHMG6VJ + oxlpRje+HmeCnCzUkBNBVIrUmb92YxFyLCVMrsp/ODEeVJfnwQ2lLvI9MhKQQgJw + tteSQoYRAgMBAAECggEAeDGdEkYNCGQLe8pvg8Z0ccoSGpeTxpqGrNEKhjfi6NrB + NwyVav10iq4FxEmPd3nobzDPkAftfvWc6hKaCT7vyTkPspCMOsQJ39/ixOk+jqFx + lNa1YxyoZ9IV2DIHR1iaj2Z5gB367PZUoGTgstrbafbaNY9IOSyojCIO935ubbcx + DWwL24XAf51ez6sXnI8V5tXmrFlNXhbhJdH8iIxNyM45HrnlUlOk0lCK4gmLJjy9 + 10IS2H2Wh3M5zsTpihH1JvM56oAH1ahrhMXs/rVFXXkg50yD1KV+HQiEbglYKUxO + eMYtfaY9i2CuLwhDnWp3oxP3HfgQQhD09OEN3e0IlQKBgQDZ/3poG9TiMZSjfKqL + xnCABMXGVQsfFWNC8THoW6RRx5Rqi8q08yJrmhCu32YKvccsOljDQJQQJdQO1g09 + e/adJmCnTrqxNtjPkX9txV23Lp6Ak7emjiQ5ICu7iWxrcO3zf7hmKtj7z+av8sjO + mDI7NkX5vnlE74nztBEjp3eC0wKBgQDV2GeJV028RW3b/QyP3Gwmax2+cKLR9PKR + nJnmO5bxAT0nQ3xuJEAqMIss/Rfb/macWc2N/6CWJCRT6a2vgy6xBW+bqG6RdQMB + xEZXFZl+sSKhXPkc5Wjb4lQ14YWyRPrTjMlwez3k4UolIJhJmwl+D7OkMRrOUERO + EtUvc7odCwKBgBi+nhdZKWXveM7B5N3uzXBKmmRz3MpPdC/yDtcwJ8u8msUpTv4R + JxQNrd0bsIqBli0YBmFLYEMg+BwjAee7vXeDFq+HCTv6XMva2RsNryCO4yD3I359 + XfE6DJzB8ZOUgv4Dvluie3TB2Y6ZQV/p+LGt7G13yG4hvofyJYvlg3RPAoGAcjDg + +OH5zLN2eqah8qBN0CYa9/rFt0AJ19+7/smLTJ7QvQq4g0gwS1couplcCEnNGWiK + 72y1n/ckvvplmPeAE19HveMvR9UoCeV5ej86fACy8V/oVpnaaLBvL2aCMjPLjPP9 + DWeCIZp8MV86cvOrGfngf6kJG2qZTueXl4NAuwkCgYEArKkhlZVXjwBoVvtHYmN2 + o+F6cGMlRJTLhNc391WApsgDZfTZSdeJsBsvvzS/Nc0burrufJg0wYioTlpReSy4 + ohhtprnQQAddfjHP7rh2LGt+irFzhdXXQ1ybGaGM9D764KUNCXLuwdly0vzXU4HU + q5sGxGrC1RECGB5Zwx2S2ZY= + -----END PRIVATE KEY----- + clusters: - name: service1 connect_timeout: 0.25s @@ -43,7 +138,7 @@ static_resources: address: socket_address: address: service1 - port_value: 80 + port_value: 8000 - name: service2 connect_timeout: 0.25s type: strict_dns @@ -57,10 +152,19 @@ static_resources: address: socket_address: address: service2 - port_value: 80 + port_value: 8000 admin: access_log_path: "/dev/null" address: socket_address: address: 0.0.0.0 port_value: 8001 +layered_runtime: + layers: + - name: static_layer_0 + static_layer: + envoy: + resource_limits: + listener: + example_listener_name: + connection_limit: 10000 diff --git a/examples/front-proxy/service-envoy.yaml b/examples/front-proxy/service-envoy.yaml index df0dfd199a971..67ac03d7287f7 100644 --- a/examples/front-proxy/service-envoy.yaml +++ b/examples/front-proxy/service-envoy.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 filter_chains: - filters: - name: envoy.filters.network.http_connection_manager diff --git a/examples/front-proxy/service.py b/examples/front-proxy/service.py index 30e8d5219b313..1d5d5920a8e32 100644 --- a/examples/front-proxy/service.py +++ b/examples/front-proxy/service.py @@ -38,7 +38,7 @@ def trace(service_number): for header in TRACE_HEADERS_TO_PROPAGATE: if header in request.headers: headers[header] = request.headers[header] - ret = requests.get("http://localhost:9000/trace/2", headers=headers) + requests.get("http://localhost:9000/trace/2", headers=headers) return ('Hello from behind Envoy (service {})! hostname: {} resolved' 'hostname: {}\n'.format(os.environ['SERVICE_NAME'], socket.gethostname(), socket.gethostbyname(socket.gethostname()))) diff --git a/examples/grpc-bridge/Dockerfile-client b/examples/grpc-bridge/Dockerfile-client new file mode 100644 index 0000000000000..da27eecaf6891 --- /dev/null +++ b/examples/grpc-bridge/Dockerfile-client @@ -0,0 +1,5 @@ +FROM envoyproxy/envoy-dev:latest + +COPY ./client/envoy-proxy.yaml /etc/client-envoy-proxy.yaml +RUN chmod go+r /etc/client-envoy-proxy.yaml +CMD /usr/local/bin/envoy -c /etc/client-envoy-proxy.yaml diff --git a/examples/grpc-bridge/Dockerfile-server b/examples/grpc-bridge/Dockerfile-server new file mode 100644 index 0000000000000..a59690934ede4 --- /dev/null +++ b/examples/grpc-bridge/Dockerfile-server @@ -0,0 +1,5 @@ +FROM envoyproxy/envoy-dev:latest + +COPY ./server/envoy-proxy.yaml /etc/server-envoy-proxy.yaml +RUN chmod go+r /etc/server-envoy-proxy.yaml +CMD /usr/local/bin/envoy -c /etc/server-envoy-proxy.yaml --service-cluster backend-proxy diff --git a/examples/grpc-bridge/docker-compose.yaml b/examples/grpc-bridge/docker-compose.yaml index c09707a310e55..3ffaa58447c6b 100644 --- a/examples/grpc-bridge/docker-compose.yaml +++ b/examples/grpc-bridge/docker-compose.yaml @@ -17,10 +17,9 @@ services: - kv-backend-service grpc-server-proxy: - image: envoyproxy/envoy:latest - command: /usr/local/bin/envoy -c /etc/server-envoy-proxy.yaml --service-cluster backend-proxy - volumes: - - ./server/envoy-proxy.yaml:/etc/server-envoy-proxy.yaml + build: + context: . + dockerfile: Dockerfile-server networks: envoymesh: aliases: @@ -45,10 +44,9 @@ services: - grpc-client grpc-client-proxy: - image: envoyproxy/envoy:latest - command: /usr/local/bin/envoy -c /etc/client-envoy-proxy.yaml - volumes: - - ./client/envoy-proxy.yaml:/etc/client-envoy-proxy.yaml + build: + context: . + dockerfile: Dockerfile-client networks: envoymesh: aliases: diff --git a/examples/jaeger-native-tracing/Dockerfile-frontenvoy b/examples/jaeger-native-tracing/Dockerfile-frontenvoy new file mode 100644 index 0000000000000..5379dfe5e242c --- /dev/null +++ b/examples/jaeger-native-tracing/Dockerfile-frontenvoy @@ -0,0 +1,17 @@ +FROM envoyproxy/envoy-dev:latest + +RUN apt-get update && apt-get -q install -y \ + curl +COPY ./front-envoy-jaeger.yaml /etc/front-envoy.yaml +# +# for discussion on jaeger binary compatibility, and the source of the file, see here: +# https://github.com/envoyproxy/envoy/issues/11382#issuecomment-638012072 +# +RUN echo "4a7d17d4724ee890490bcd6cfdedb12a02316a3d33214348d30979abd201f1ca /usr/local/lib/libjaegertracing_plugin.so" > /tmp/checksum \ + && curl -Ls https://github.com/tetratelabs/getenvoy-package/files/3518103/getenvoy-centos-jaegertracing-plugin.tar.gz \ + | tar zxf - -C /usr/local/lib \ + && mv /usr/local/lib/libjaegertracing.so.0.4.2 /usr/local/lib/libjaegertracing_plugin.so \ + && sha256sum -c /tmp/checksum \ + && rm /tmp/checksum \ + && chmod go+r /etc/front-envoy.yaml +CMD /usr/local/bin/envoy -c /etc/front-envoy.yaml --service-cluster front-proxy diff --git a/examples/jaeger-native-tracing/docker-compose.yaml b/examples/jaeger-native-tracing/docker-compose.yaml index 3321e110cbb83..b0060928551aa 100644 --- a/examples/jaeger-native-tracing/docker-compose.yaml +++ b/examples/jaeger-native-tracing/docker-compose.yaml @@ -3,20 +3,15 @@ services: front-envoy: build: - context: ../ - dockerfile: front-proxy/Dockerfile-frontenvoy - volumes: - - ./front-envoy-jaeger.yaml:/etc/front-envoy.yaml - - ./install-jaeger-plugin.sh:/install-jaeger-plugin.sh - - ./start-front.sh:/start-front.sh - entrypoint: /start-front.sh + context: . + dockerfile: Dockerfile-frontenvoy networks: - envoymesh expose: - - "80" + - "8000" - "8001" ports: - - "8000:80" + - "8000:8000" - "8001:8001" dns: - 8.8.8.8 @@ -28,9 +23,7 @@ services: dockerfile: Dockerfile-service volumes: - ./service1-envoy-jaeger.yaml:/etc/service-envoy.yaml - - ./install-jaeger-plugin.sh:/install-jaeger-plugin.sh - - ./start-service.sh:/start-service.sh - entrypoint: /start-service.sh + - ./libjaegertracing.so.0.4.2:/usr/local/lib/libjaegertracing_plugin.so networks: envoymesh: aliases: @@ -38,7 +31,7 @@ services: environment: - SERVICE_NAME=1 expose: - - "80" + - "8000" dns: - 8.8.8.8 - 8.8.4.4 @@ -49,9 +42,7 @@ services: dockerfile: Dockerfile-service volumes: - ./service2-envoy-jaeger.yaml:/etc/service-envoy.yaml - - ./install-jaeger-plugin.sh:/install-jaeger-plugin.sh - - ./start-service.sh:/start-service.sh - entrypoint: /start-service.sh + - ./libjaegertracing.so.0.4.2:/usr/local/lib/libjaegertracing_plugin.so networks: envoymesh: aliases: @@ -59,7 +50,7 @@ services: environment: - SERVICE_NAME=2 expose: - - "80" + - "8000" dns: - 8.8.8.8 - 8.8.4.4 diff --git a/examples/jaeger-native-tracing/front-envoy-jaeger.yaml b/examples/jaeger-native-tracing/front-envoy-jaeger.yaml index 79e82af2f8aa2..b2f3430a3aaa2 100644 --- a/examples/jaeger-native-tracing/front-envoy-jaeger.yaml +++ b/examples/jaeger-native-tracing/front-envoy-jaeger.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 traffic_direction: OUTBOUND filter_chains: - filters: @@ -64,7 +64,7 @@ static_resources: address: socket_address: address: service1 - port_value: 80 + port_value: 8000 admin: access_log_path: "/dev/null" address: diff --git a/examples/jaeger-native-tracing/service1-envoy-jaeger.yaml b/examples/jaeger-native-tracing/service1-envoy-jaeger.yaml index d64a6ea33af14..9be0a80c1fe17 100644 --- a/examples/jaeger-native-tracing/service1-envoy-jaeger.yaml +++ b/examples/jaeger-native-tracing/service1-envoy-jaeger.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 traffic_direction: INBOUND filter_chains: - filters: @@ -103,7 +103,7 @@ static_resources: address: socket_address: address: service2 - port_value: 80 + port_value: 8000 admin: access_log_path: "/dev/null" address: diff --git a/examples/jaeger-native-tracing/service2-envoy-jaeger.yaml b/examples/jaeger-native-tracing/service2-envoy-jaeger.yaml index b04970c9738f0..88d4f54ec5946 100644 --- a/examples/jaeger-native-tracing/service2-envoy-jaeger.yaml +++ b/examples/jaeger-native-tracing/service2-envoy-jaeger.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 traffic_direction: INBOUND filter_chains: - filters: diff --git a/examples/jaeger-native-tracing/start-front.sh b/examples/jaeger-native-tracing/start-front.sh deleted file mode 100755 index 0f2eff4030211..0000000000000 --- a/examples/jaeger-native-tracing/start-front.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env bash -/install-jaeger-plugin.sh -/usr/local/bin/envoy -c /etc/front-envoy.yaml --service-cluster front-proxy diff --git a/examples/jaeger-native-tracing/start-service.sh b/examples/jaeger-native-tracing/start-service.sh deleted file mode 100755 index e4d9643215e07..0000000000000 --- a/examples/jaeger-native-tracing/start-service.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env bash -/install-jaeger-plugin.sh -/usr/local/bin/start_service.sh diff --git a/examples/jaeger-tracing/Dockerfile-frontenvoy b/examples/jaeger-tracing/Dockerfile-frontenvoy new file mode 100644 index 0000000000000..e955e76bb9b85 --- /dev/null +++ b/examples/jaeger-tracing/Dockerfile-frontenvoy @@ -0,0 +1,7 @@ +FROM envoyproxy/envoy-dev:latest + +RUN apt-get update && apt-get -q install -y \ + curl +COPY ./front-envoy-jaeger.yaml /etc/front-envoy.yaml +RUN chmod go+r /etc/front-envoy.yaml +CMD /usr/local/bin/envoy -c /etc/front-envoy.yaml --service-cluster front-proxy diff --git a/examples/jaeger-tracing/docker-compose.yaml b/examples/jaeger-tracing/docker-compose.yaml index 6c353fada6f49..22026eac8befa 100644 --- a/examples/jaeger-tracing/docker-compose.yaml +++ b/examples/jaeger-tracing/docker-compose.yaml @@ -3,17 +3,15 @@ services: front-envoy: build: - context: ../ - dockerfile: front-proxy/Dockerfile-frontenvoy - volumes: - - ./front-envoy-jaeger.yaml:/etc/front-envoy.yaml + context: . + dockerfile: Dockerfile-frontenvoy networks: - envoymesh expose: - - "80" + - "8000" - "8001" ports: - - "8000:80" + - "8000:8000" - "8001:8001" service1: @@ -29,7 +27,7 @@ services: environment: - SERVICE_NAME=1 expose: - - "80" + - "8000" service2: build: @@ -44,7 +42,7 @@ services: environment: - SERVICE_NAME=2 expose: - - "80" + - "8000" jaeger: image: jaegertracing/all-in-one diff --git a/examples/jaeger-tracing/front-envoy-jaeger.yaml b/examples/jaeger-tracing/front-envoy-jaeger.yaml index f23bffdff5bf0..07c1575994991 100644 --- a/examples/jaeger-tracing/front-envoy-jaeger.yaml +++ b/examples/jaeger-tracing/front-envoy-jaeger.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 traffic_direction: OUTBOUND filter_chains: - filters: @@ -53,7 +53,7 @@ static_resources: address: socket_address: address: service1 - port_value: 80 + port_value: 8000 - name: jaeger connect_timeout: 1s type: strict_dns diff --git a/examples/jaeger-tracing/service1-envoy-jaeger.yaml b/examples/jaeger-tracing/service1-envoy-jaeger.yaml index f5ff3b046132b..b40ec8b8f1c9a 100644 --- a/examples/jaeger-tracing/service1-envoy-jaeger.yaml +++ b/examples/jaeger-tracing/service1-envoy-jaeger.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 traffic_direction: INBOUND filter_chains: - filters: @@ -101,7 +101,7 @@ static_resources: address: socket_address: address: service2 - port_value: 80 + port_value: 8000 - name: jaeger connect_timeout: 1s type: strict_dns diff --git a/examples/jaeger-tracing/service2-envoy-jaeger.yaml b/examples/jaeger-tracing/service2-envoy-jaeger.yaml index 24a38c9fb03e2..5b6a7d93b65ae 100644 --- a/examples/jaeger-tracing/service2-envoy-jaeger.yaml +++ b/examples/jaeger-tracing/service2-envoy-jaeger.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 traffic_direction: INBOUND filter_chains: - filters: diff --git a/examples/load-reporting-service/Dockerfile-http-server b/examples/load-reporting-service/Dockerfile-http-server index 6139ee948402e..3ae32015fb8dd 100644 --- a/examples/load-reporting-service/Dockerfile-http-server +++ b/examples/load-reporting-service/Dockerfile-http-server @@ -1,6 +1,6 @@ FROM envoyproxy/envoy-alpine-dev:latest -RUN apk update && apk add python3 bash curl +RUN apk update && apk add py3-pip bash curl RUN mkdir /code ADD ./start_service.sh /usr/local/bin/start_service.sh COPY . ./code diff --git a/examples/load-reporting-service/docker-compose.yaml b/examples/load-reporting-service/docker-compose.yaml index 4ed40f3a33fab..ca7f40334dcf0 100644 --- a/examples/load-reporting-service/docker-compose.yaml +++ b/examples/load-reporting-service/docker-compose.yaml @@ -7,6 +7,8 @@ services: dockerfile: Dockerfile-http-server volumes: - ./service-envoy-w-lrs.yaml:/etc/service-envoy-w-lrs.yaml + environment: + ENVOY_UID: 0 networks: envoymesh: aliases: @@ -17,7 +19,7 @@ services: ports: - "80-81:80" - "8081-8082:8081" - + lrs_server: build: context: . @@ -32,6 +34,6 @@ services: - "18000" ports: - "18000:18000" - + networks: - envoymesh: {} \ No newline at end of file + envoymesh: {} diff --git a/examples/lua/Dockerfile-proxy b/examples/lua/Dockerfile-proxy index 5ba5c9c33a0db..03cb54ac245bf 100644 --- a/examples/lua/Dockerfile-proxy +++ b/examples/lua/Dockerfile-proxy @@ -1,3 +1,5 @@ FROM envoyproxy/envoy-dev:latest ADD ./lib/mylibrary.lua /lib/mylibrary.lua +COPY ./envoy.yaml /etc/envoy.yaml +RUN chmod go+r /etc/envoy.yaml /lib/mylibrary.lua CMD /usr/local/bin/envoy -c /etc/envoy.yaml -l debug --service-cluster proxy diff --git a/examples/lua/docker-compose.yaml b/examples/lua/docker-compose.yaml index 2ee4860cfc48d..c5472e4aa8fd1 100644 --- a/examples/lua/docker-compose.yaml +++ b/examples/lua/docker-compose.yaml @@ -5,15 +5,13 @@ services: build: context: . dockerfile: Dockerfile-proxy - volumes: - - ./envoy.yaml:/etc/envoy.yaml networks: - envoymesh expose: - - "80" + - "8000" - "8001" ports: - - "8000:80" + - "8000:8000" - "8001:8001" web_service: diff --git a/examples/lua/envoy.yaml b/examples/lua/envoy.yaml index ab154f528d479..a106bc2feee09 100644 --- a/examples/lua/envoy.yaml +++ b/examples/lua/envoy.yaml @@ -4,7 +4,7 @@ static_resources: address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 filter_chains: - filters: - name: envoy.filters.network.http_connection_manager @@ -21,7 +21,7 @@ static_resources: routes: - match: prefix: "/" - route: + route: cluster: web_service http_filters: - name: envoy.filters.http.lua @@ -29,7 +29,7 @@ static_resources: "@type": type.googleapis.com/envoy.config.filter.http.lua.v2.Lua inline_code: | local mylibrary = require("lib.mylibrary") - + function envoy_on_request(request_handle) request_handle:headers():add("foo", mylibrary.foobar()) end diff --git a/examples/mysql/Dockerfile-proxy b/examples/mysql/Dockerfile-proxy index ad18604cd0c78..09595e6e6279e 100644 --- a/examples/mysql/Dockerfile-proxy +++ b/examples/mysql/Dockerfile-proxy @@ -1,3 +1,5 @@ FROM envoyproxy/envoy-dev:latest +COPY ./envoy.yaml /etc/envoy.yaml +RUN chmod go+r /etc/envoy.yaml CMD /usr/local/bin/envoy -c /etc/envoy.yaml -l debug diff --git a/examples/mysql/docker-compose.yaml b/examples/mysql/docker-compose.yaml index d4b8b13e13c1e..720a05acb2a2c 100644 --- a/examples/mysql/docker-compose.yaml +++ b/examples/mysql/docker-compose.yaml @@ -5,8 +5,6 @@ services: build: context: . dockerfile: Dockerfile-proxy - volumes: - - ./envoy.yaml:/etc/envoy.yaml networks: envoymesh: aliases: diff --git a/examples/zipkin-tracing/Dockerfile-frontenvoy b/examples/zipkin-tracing/Dockerfile-frontenvoy new file mode 100644 index 0000000000000..87040962caf2f --- /dev/null +++ b/examples/zipkin-tracing/Dockerfile-frontenvoy @@ -0,0 +1,7 @@ +FROM envoyproxy/envoy-dev:latest + +RUN apt-get update && apt-get -q install -y \ + curl +COPY ./front-envoy-zipkin.yaml /etc/front-envoy.yaml +RUN chmod go+r /etc/front-envoy.yaml +CMD /usr/local/bin/envoy -c /etc/front-envoy.yaml --service-cluster front-proxy diff --git a/examples/zipkin-tracing/docker-compose.yaml b/examples/zipkin-tracing/docker-compose.yaml index 132e752793058..dc82e926ef5a9 100644 --- a/examples/zipkin-tracing/docker-compose.yaml +++ b/examples/zipkin-tracing/docker-compose.yaml @@ -3,17 +3,15 @@ services: front-envoy: build: - context: ../ - dockerfile: front-proxy/Dockerfile-frontenvoy - volumes: - - ./front-envoy-zipkin.yaml:/etc/front-envoy.yaml + context: . + dockerfile: Dockerfile-frontenvoy networks: - envoymesh expose: - - "80" + - "8000" - "8001" ports: - - "8000:80" + - "8000:8000" - "8001:8001" service1: @@ -29,7 +27,7 @@ services: environment: - SERVICE_NAME=1 expose: - - "80" + - "8000" service2: build: @@ -44,7 +42,7 @@ services: environment: - SERVICE_NAME=2 expose: - - "80" + - "8000" zipkin: image: openzipkin/zipkin diff --git a/examples/zipkin-tracing/front-envoy-zipkin.yaml b/examples/zipkin-tracing/front-envoy-zipkin.yaml index 8af5504e6d3c7..41e864552c3cf 100644 --- a/examples/zipkin-tracing/front-envoy-zipkin.yaml +++ b/examples/zipkin-tracing/front-envoy-zipkin.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 traffic_direction: OUTBOUND filter_chains: - filters: @@ -58,7 +58,7 @@ static_resources: address: socket_address: address: service1 - port_value: 80 + port_value: 8000 - name: zipkin connect_timeout: 1s type: strict_dns diff --git a/examples/zipkin-tracing/service1-envoy-zipkin.yaml b/examples/zipkin-tracing/service1-envoy-zipkin.yaml index 99a9a2df207e0..fe7318366db6c 100644 --- a/examples/zipkin-tracing/service1-envoy-zipkin.yaml +++ b/examples/zipkin-tracing/service1-envoy-zipkin.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 traffic_direction: INBOUND filter_chains: - filters: @@ -99,7 +99,7 @@ static_resources: address: socket_address: address: service2 - port_value: 80 + port_value: 8000 - name: zipkin connect_timeout: 1s type: strict_dns diff --git a/examples/zipkin-tracing/service2-envoy-zipkin.yaml b/examples/zipkin-tracing/service2-envoy-zipkin.yaml index 27b7e322149e6..ceebbcf609176 100644 --- a/examples/zipkin-tracing/service2-envoy-zipkin.yaml +++ b/examples/zipkin-tracing/service2-envoy-zipkin.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 traffic_direction: INBOUND filter_chains: - filters: diff --git a/generated_api_shadow/BUILD b/generated_api_shadow/BUILD index a028250022dd9..9ae658780070b 100644 --- a/generated_api_shadow/BUILD +++ b/generated_api_shadow/BUILD @@ -34,6 +34,7 @@ proto_library( "//envoy/config/filter/dubbo/router/v2alpha1:pkg", "//envoy/config/filter/fault/v2:pkg", "//envoy/config/filter/http/adaptive_concurrency/v2alpha:pkg", + "//envoy/config/filter/http/admission_control/v2alpha:pkg", "//envoy/config/filter/http/buffer/v2:pkg", "//envoy/config/filter/http/compressor/v2:pkg", "//envoy/config/filter/http/cors/v2:pkg", @@ -77,6 +78,7 @@ proto_library( "//envoy/config/filter/network/rate_limit/v2:pkg", "//envoy/config/filter/network/rbac/v2:pkg", "//envoy/config/filter/network/redis_proxy/v2:pkg", + "//envoy/config/filter/network/rocketmq_proxy/v3:pkg", "//envoy/config/filter/network/sni_cluster/v2:pkg", "//envoy/config/filter/network/tcp_proxy/v2:pkg", "//envoy/config/filter/network/thrift_proxy/v2alpha1:pkg", @@ -111,7 +113,6 @@ proto_library( "//envoy/config/transport_socket/alts/v2alpha:pkg", "//envoy/config/transport_socket/raw_buffer/v2:pkg", "//envoy/config/transport_socket/tap/v2alpha:pkg", - "//envoy/config/wasm/v2alpha:pkg", "//envoy/data/accesslog/v2:pkg", "//envoy/data/accesslog/v3:pkg", "//envoy/data/cluster/v2alpha:pkg", @@ -129,6 +130,7 @@ proto_library( "//envoy/extensions/common/tap/v3:pkg", "//envoy/extensions/filters/common/fault/v3:pkg", "//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg", + "//envoy/extensions/filters/http/admission_control/v3alpha:pkg", "//envoy/extensions/filters/http/buffer/v3:pkg", "//envoy/extensions/filters/http/compressor/v3:pkg", "//envoy/extensions/filters/http/cors/v3:pkg", diff --git a/generated_api_shadow/README.md b/generated_api_shadow/README.md index 23442d8c17522..04633c218a7c4 100644 --- a/generated_api_shadow/README.md +++ b/generated_api_shadow/README.md @@ -1,6 +1,6 @@ This directory is for generated Envoy internal artifacts (via `proto_format`). -Do not hand edit any file under `envoy/`. This shadow API may only be used be -used in the Envoy source tree. +Do not hand edit any file under `envoy/`. This shadow API may only be used in +the Envoy source tree. -The `bazel/` tree is an symlink back to the official API Bazel rules. +The `bazel/` tree is a symlink back to the official API Bazel rules. diff --git a/generated_api_shadow/bazel/BUILD b/generated_api_shadow/bazel/BUILD index 279c7c9e6a9b0..4b582bb8be3f7 100644 --- a/generated_api_shadow/bazel/BUILD +++ b/generated_api_shadow/bazel/BUILD @@ -1,7 +1,7 @@ -licenses(["notice"]) # Apache 2 - load("@io_bazel_rules_go//proto:compiler.bzl", "go_proto_compiler") +licenses(["notice"]) # Apache 2 + go_proto_compiler( name = "pgv_plugin_go", options = ["lang=go"], diff --git a/generated_api_shadow/bazel/api_build_system.bzl b/generated_api_shadow/bazel/api_build_system.bzl index 7e88ab2bf9e56..c0269d161f805 100644 --- a/generated_api_shadow/bazel/api_build_system.bzl +++ b/generated_api_shadow/bazel/api_build_system.bzl @@ -1,7 +1,8 @@ +load("@rules_cc//cc:defs.bzl", "cc_test") load("@com_envoyproxy_protoc_gen_validate//bazel:pgv_proto_library.bzl", "pgv_cc_proto_library") load("@com_github_grpc_grpc//bazel:cc_grpc_library.bzl", "cc_grpc_library") load("@com_google_protobuf//:protobuf.bzl", _py_proto_library = "py_proto_library") -load("@io_bazel_rules_go//proto:def.bzl", "go_grpc_library", "go_proto_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") load("@io_bazel_rules_go//go:def.bzl", "go_test") load("@rules_proto//proto:defs.bzl", "proto_library") load( @@ -79,6 +80,10 @@ def py_proto_library(name, deps = [], plugin = None): if name == "annotations_py_proto": proto_deps = proto_deps + [":http_py_proto"] + # checked.proto depends on syntax.proto, we have to add this dependency manually as well. + if name == "checked_py_proto": + proto_deps = proto_deps + [":syntax_py_proto"] + # py_proto_library does not support plugin as an argument yet at gRPC v1.25.0: # https://github.com/grpc/grpc/blob/v1.25.0/bazel/python_rules.bzl#L72. # plugin should also be passed in here when gRPC version is greater than v1.25.x. @@ -138,7 +143,7 @@ def api_cc_py_proto_library( _api_cc_grpc_library(name = cc_grpc_name, proto = relative_name, deps = cc_proto_deps) def api_cc_test(name, **kwargs): - native.cc_test( + cc_test( name = name, **kwargs ) @@ -171,13 +176,16 @@ def api_proto_package( if has_services: compilers = ["@io_bazel_rules_go//proto:go_grpc", "@envoy_api//bazel:pgv_plugin_go"] + # Because RBAC proro depends on googleapis syntax.proto and checked.proto, + # which share the same go proto library, it causes duplicative dependencies. + # Thus, we use depset().to_list() to remove duplicated depenencies. go_proto_library( name = name + _GO_PROTO_SUFFIX, compilers = compilers, importpath = _GO_IMPORTPATH_PREFIX + native.package_name(), proto = name, visibility = ["//visibility:public"], - deps = [_go_proto_mapping(dep) for dep in deps] + [ + deps = depset([_go_proto_mapping(dep) for dep in deps] + [ "@com_github_golang_protobuf//ptypes:go_default_library", "@com_github_golang_protobuf//ptypes/any:go_default_library", "@com_github_golang_protobuf//ptypes/duration:go_default_library", @@ -187,5 +195,5 @@ def api_proto_package( "@com_envoyproxy_protoc_gen_validate//validate:go_default_library", "@com_google_googleapis//google/api:annotations_go_proto", "@com_google_googleapis//google/rpc:status_go_proto", - ], + ]).to_list(), ) diff --git a/generated_api_shadow/bazel/external_proto_deps.bzl b/generated_api_shadow/bazel/external_proto_deps.bzl index 514093abef90a..659c7a72d73e0 100644 --- a/generated_api_shadow/bazel/external_proto_deps.bzl +++ b/generated_api_shadow/bazel/external_proto_deps.bzl @@ -9,6 +9,7 @@ # external dependencies. Since BUILD files are generated, this is the canonical # place to define this mapping. EXTERNAL_PROTO_IMPORT_BAZEL_DEP_MAP = { + "google/api/expr/v1alpha1/checked.proto": "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto", "google/api/expr/v1alpha1/syntax.proto": "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", "metrics.proto": "@prometheus_metrics_model//:client_model", "opencensus/proto/trace/v1/trace.proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto", @@ -17,6 +18,7 @@ EXTERNAL_PROTO_IMPORT_BAZEL_DEP_MAP = { # This maps from the Bazel proto_library target to the Go language binding target for external dependencies. EXTERNAL_PROTO_GO_BAZEL_DEP_MAP = { + "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto": "@com_google_googleapis//google/api/expr/v1alpha1:expr_go_proto", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto": "@com_google_googleapis//google/api/expr/v1alpha1:expr_go_proto", "@opencensus_proto//opencensus/proto/trace/v1:trace_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto_go", "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_and_config_proto_go", @@ -24,6 +26,7 @@ EXTERNAL_PROTO_GO_BAZEL_DEP_MAP = { # This maps from the Bazel proto_library target to the C++ language binding target for external dependencies. EXTERNAL_PROTO_CC_BAZEL_DEP_MAP = { + "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto": "@com_google_googleapis//google/api/expr/v1alpha1:checked_cc_proto", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto": "@com_google_googleapis//google/api/expr/v1alpha1:syntax_cc_proto", "@opencensus_proto//opencensus/proto/trace/v1:trace_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto_cc", "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto_cc", @@ -31,6 +34,7 @@ EXTERNAL_PROTO_CC_BAZEL_DEP_MAP = { # This maps from the Bazel proto_library target to the Python language binding target for external dependencies. EXTERNAL_PROTO_PY_BAZEL_DEP_MAP = { + "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto": "@com_google_googleapis//google/api/expr/v1alpha1:checked_py_proto", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto": "@com_google_googleapis//google/api/expr/v1alpha1:syntax_py_proto", "@opencensus_proto//opencensus/proto/trace/v1:trace_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto_py", "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto_py", diff --git a/generated_api_shadow/bazel/repositories.bzl b/generated_api_shadow/bazel/repositories.bzl index af1f11331d013..a64e733cf74a9 100644 --- a/generated_api_shadow/bazel/repositories.bzl +++ b/generated_api_shadow/bazel/repositories.bzl @@ -1,4 +1,3 @@ -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") load(":envoy_http_archive.bzl", "envoy_http_archive") load(":repository_locations.bzl", "REPOSITORY_LOCATIONS") diff --git a/generated_api_shadow/bazel/repository_locations.bzl b/generated_api_shadow/bazel/repository_locations.bzl index c275a8c658353..0a0379f7685e3 100644 --- a/generated_api_shadow/bazel/repository_locations.bzl +++ b/generated_api_shadow/bazel/repository_locations.bzl @@ -4,8 +4,8 @@ BAZEL_SKYLIB_SHA256 = "1dde365491125a3db70731e25658dfdd3bc5dbdfd11b840b3e987ecf0 OPENCENSUS_PROTO_GIT_SHA = "be218fb6bd674af7519b1850cdf8410d8cbd48e8" # Dec 20, 2019 OPENCENSUS_PROTO_SHA256 = "e3bbdc94375e86c0edfb2fc5851507e08a3f26ee725ffff7c5c0e73264bdfcde" -PGV_GIT_SHA = "ab56c3dd1cf9b516b62c5087e1ec1471bd63631e" # Mar 11, 2020 -PGV_SHA256 = "3be12077affd1ebf8787001f5fba545cc5f1b914964dab4e0cc77c43fba03b41" +PGV_GIT_SHA = "278964a8052f96a2f514add0298098f63fb7f47f" # June 9, 2020 +PGV_SHA256 = "e368733c9fb7f8489591ffaf269170d7658cc0cd1ee322b601512b769446d3c8" GOOGLEAPIS_GIT_SHA = "82944da21578a53b74e547774cf62ed31a05b841" # Dec 2, 2019 GOOGLEAPIS_SHA = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d1405" @@ -13,8 +13,8 @@ GOOGLEAPIS_SHA = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d14 PROMETHEUS_GIT_SHA = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" # Nov 17, 2017 PROMETHEUS_SHA = "783bdaf8ee0464b35ec0c8704871e1e72afa0005c3f3587f65d9d6694bf3911b" -UDPA_GIT_SHA = "e8cd3a4bb307e2c810cffff99f93e96e6d7fee85" # Mar 27, 2020 -UDPA_SHA256 = "1fd7857cb61daee7726fca8f4d55e4923774a8d00a53007a4093830dc0482685" +UDPA_GIT_SHA = "efcf912fb35470672231c7b7bef620f3d17f655a" # June 29, 2020 +UDPA_SHA256 = "0f8179fbe3d27b89a4c34b2fbd55832f3b27b6810ea9b03b36d18da2629cc871" ZIPKINAPI_RELEASE = "0.2.2" # Aug 23, 2019 ZIPKINAPI_SHA256 = "688c4fe170821dd589f36ec45aaadc03a618a40283bc1f97da8fa11686fc816b" @@ -33,7 +33,7 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/envoyproxy/protoc-gen-validate/archive/" + PGV_GIT_SHA + ".tar.gz"], ), com_google_googleapis = dict( - # TODO(dio): Consider writing a Skylark macro for importing Google API proto. + # TODO(dio): Consider writing a Starlark macro for importing Google API proto. sha256 = GOOGLEAPIS_SHA, strip_prefix = "googleapis-" + GOOGLEAPIS_GIT_SHA, urls = ["https://github.com/googleapis/googleapis/archive/" + GOOGLEAPIS_GIT_SHA + ".tar.gz"], diff --git a/generated_api_shadow/envoy/admin/v3/config_dump.proto b/generated_api_shadow/envoy/admin/v3/config_dump.proto index b3c3836a8cc00..73156697fdb21 100644 --- a/generated_api_shadow/envoy/admin/v3/config_dump.proto +++ b/generated_api_shadow/envoy/admin/v3/config_dump.proto @@ -30,9 +30,12 @@ message ConfigDump { // // * *bootstrap*: :ref:`BootstrapConfigDump ` // * *clusters*: :ref:`ClustersConfigDump ` + // * *endpoints*: :ref:`EndpointsConfigDump ` // * *listeners*: :ref:`ListenersConfigDump ` // * *routes*: :ref:`RoutesConfigDump ` // + // EDS Configuration will only be dumped by using parameter `?include_eds` + // // You can filter output with the resource and mask query parameters. // See :ref:`/config_dump?resource={} `, // :ref:`/config_dump?mask={} `, @@ -346,3 +349,35 @@ message SecretsConfigDump { // warming in preparation to service clusters or listeners. repeated DynamicSecret dynamic_warming_secrets = 3; } + +// Envoy's admin fill this message with all currently known endpoints. Endpoint +// configuration information can be used to recreate an Envoy configuration by populating all +// endpoints as static endpoints or by returning them in an EDS response. +message EndpointsConfigDump { + message StaticEndpointConfig { + // The endpoint config. + google.protobuf.Any endpoint_config = 1; + + // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. + google.protobuf.Timestamp last_updated = 2; + } + + message DynamicEndpointConfig { + // [#not-implemented-hide:] This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time that + // the endpoint configuration was loaded. + string version_info = 1; + + // The endpoint config. + google.protobuf.Any endpoint_config = 2; + + // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. + google.protobuf.Timestamp last_updated = 3; + } + + // The statically loaded endpoint configs. + repeated StaticEndpointConfig static_endpoint_configs = 2; + + // The dynamically loaded endpoint configs. + repeated DynamicEndpointConfig dynamic_endpoint_configs = 3; +} diff --git a/generated_api_shadow/envoy/admin/v3/server_info.proto b/generated_api_shadow/envoy/admin/v3/server_info.proto index d412a7f011de0..480ce862c6a5f 100644 --- a/generated_api_shadow/envoy/admin/v3/server_info.proto +++ b/generated_api_shadow/envoy/admin/v3/server_info.proto @@ -54,7 +54,7 @@ message ServerInfo { CommandLineOptions command_line_options = 6; } -// [#next-free-field: 29] +// [#next-free-field: 34] message CommandLineOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.CommandLineOptions"; @@ -75,11 +75,25 @@ message CommandLineOptions { InitOnly = 2; } + enum DrainStrategy { + // Gradually discourage connections over the course of the drain period. + Gradual = 0; + + // Discourage all connections for the duration of the drain sequence. + Immediate = 1; + } + reserved 12; // See :option:`--base-id` for details. uint64 base_id = 1; + // See :option:`--use-dynamic-base-id` for details. + bool use_dynamic_base_id = 31; + + // See :option:`--base-id-path` for details. + string base_id_path = 32; + // See :option:`--concurrency` for details. uint32 concurrency = 2; @@ -95,6 +109,9 @@ message CommandLineOptions { // See :option:`--reject-unknown-dynamic-fields` for details. bool reject_unknown_dynamic_fields = 26; + // See :option:`--ignore-unknown-dynamic-fields` for details. + bool ignore_unknown_dynamic_fields = 30; + // See :option:`--admin-address-path` for details. string admin_address_path = 6; @@ -131,6 +148,9 @@ message CommandLineOptions { // See :option:`--drain-time-s` for details. google.protobuf.Duration drain_time = 17; + // See :option:`--drain-strategy` for details. + DrainStrategy drain_strategy = 33; + // See :option:`--parent-shutdown-time-s` for details. google.protobuf.Duration parent_shutdown_time = 18; @@ -152,6 +172,9 @@ message CommandLineOptions { // See :option:`--disable-extensions` for details. repeated string disabled_extensions = 28; + // See :option:`--bootstrap-version` for details. + uint32 bootstrap_version = 29; + uint64 hidden_envoy_deprecated_max_stats = 20 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; diff --git a/generated_api_shadow/envoy/admin/v4alpha/BUILD b/generated_api_shadow/envoy/admin/v4alpha/BUILD index 6da5b60bad287..d64c4f6a08167 100644 --- a/generated_api_shadow/envoy/admin/v4alpha/BUILD +++ b/generated_api_shadow/envoy/admin/v4alpha/BUILD @@ -10,7 +10,7 @@ api_proto_package( "//envoy/annotations:pkg", "//envoy/config/bootstrap/v4alpha:pkg", "//envoy/config/core/v4alpha:pkg", - "//envoy/config/tap/v3:pkg", + "//envoy/config/tap/v4alpha:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/generated_api_shadow/envoy/admin/v4alpha/config_dump.proto b/generated_api_shadow/envoy/admin/v4alpha/config_dump.proto index 02709a4145063..8bbd5743219da 100644 --- a/generated_api_shadow/envoy/admin/v4alpha/config_dump.proto +++ b/generated_api_shadow/envoy/admin/v4alpha/config_dump.proto @@ -30,9 +30,12 @@ message ConfigDump { // // * *bootstrap*: :ref:`BootstrapConfigDump ` // * *clusters*: :ref:`ClustersConfigDump ` + // * *endpoints*: :ref:`EndpointsConfigDump ` // * *listeners*: :ref:`ListenersConfigDump ` // * *routes*: :ref:`RoutesConfigDump ` // + // EDS Configuration will only be dumped by using parameter `?include_eds` + // // You can filter output with the resource and mask query parameters. // See :ref:`/config_dump?resource={} `, // :ref:`/config_dump?mask={} `, @@ -340,3 +343,43 @@ message SecretsConfigDump { // warming in preparation to service clusters or listeners. repeated DynamicSecret dynamic_warming_secrets = 3; } + +// Envoy's admin fill this message with all currently known endpoints. Endpoint +// configuration information can be used to recreate an Envoy configuration by populating all +// endpoints as static endpoints or by returning them in an EDS response. +message EndpointsConfigDump { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.EndpointsConfigDump"; + + message StaticEndpointConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig"; + + // The endpoint config. + google.protobuf.Any endpoint_config = 1; + + // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. + google.protobuf.Timestamp last_updated = 2; + } + + message DynamicEndpointConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig"; + + // [#not-implemented-hide:] This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time that + // the endpoint configuration was loaded. + string version_info = 1; + + // The endpoint config. + google.protobuf.Any endpoint_config = 2; + + // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. + google.protobuf.Timestamp last_updated = 3; + } + + // The statically loaded endpoint configs. + repeated StaticEndpointConfig static_endpoint_configs = 2; + + // The dynamically loaded endpoint configs. + repeated DynamicEndpointConfig dynamic_endpoint_configs = 3; +} diff --git a/generated_api_shadow/envoy/admin/v4alpha/server_info.proto b/generated_api_shadow/envoy/admin/v4alpha/server_info.proto index 867a9255bc51f..e3e40ac2eabc1 100644 --- a/generated_api_shadow/envoy/admin/v4alpha/server_info.proto +++ b/generated_api_shadow/envoy/admin/v4alpha/server_info.proto @@ -54,7 +54,7 @@ message ServerInfo { CommandLineOptions command_line_options = 6; } -// [#next-free-field: 29] +// [#next-free-field: 34] message CommandLineOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.CommandLineOptions"; @@ -74,6 +74,14 @@ message CommandLineOptions { InitOnly = 2; } + enum DrainStrategy { + // Gradually discourage connections over the course of the drain period. + Gradual = 0; + + // Discourage all connections for the duration of the drain sequence. + Immediate = 1; + } + reserved 12, 20, 21; reserved "max_stats", "max_obj_name_len"; @@ -81,6 +89,12 @@ message CommandLineOptions { // See :option:`--base-id` for details. uint64 base_id = 1; + // See :option:`--use-dynamic-base-id` for details. + bool use_dynamic_base_id = 31; + + // See :option:`--base-id-path` for details. + string base_id_path = 32; + // See :option:`--concurrency` for details. uint32 concurrency = 2; @@ -96,6 +110,9 @@ message CommandLineOptions { // See :option:`--reject-unknown-dynamic-fields` for details. bool reject_unknown_dynamic_fields = 26; + // See :option:`--ignore-unknown-dynamic-fields` for details. + bool ignore_unknown_dynamic_fields = 30; + // See :option:`--admin-address-path` for details. string admin_address_path = 6; @@ -132,6 +149,9 @@ message CommandLineOptions { // See :option:`--drain-time-s` for details. google.protobuf.Duration drain_time = 17; + // See :option:`--drain-strategy` for details. + DrainStrategy drain_strategy = 33; + // See :option:`--parent-shutdown-time-s` for details. google.protobuf.Duration parent_shutdown_time = 18; @@ -152,4 +172,7 @@ message CommandLineOptions { // See :option:`--disable-extensions` for details. repeated string disabled_extensions = 28; + + // See :option:`--bootstrap-version` for details. + uint32 bootstrap_version = 29; } diff --git a/generated_api_shadow/envoy/admin/v4alpha/tap.proto b/generated_api_shadow/envoy/admin/v4alpha/tap.proto index c47b308d6ee6d..039dfcfeb8120 100644 --- a/generated_api_shadow/envoy/admin/v4alpha/tap.proto +++ b/generated_api_shadow/envoy/admin/v4alpha/tap.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package envoy.admin.v4alpha; -import "envoy/config/tap/v3/common.proto"; +import "envoy/config/tap/v4alpha/common.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -24,5 +24,5 @@ message TapRequest { string config_id = 1 [(validate.rules).string = {min_bytes: 1}]; // The tap configuration to load. - config.tap.v3.TapConfig tap_config = 2 [(validate.rules).message = {required: true}]; + config.tap.v4alpha.TapConfig tap_config = 2 [(validate.rules).message = {required: true}]; } diff --git a/generated_api_shadow/envoy/api/v2/auth/cert.proto b/generated_api_shadow/envoy/api/v2/auth/cert.proto index a1642318e0438..49e8b8c70fa2e 100644 --- a/generated_api_shadow/envoy/api/v2/auth/cert.proto +++ b/generated_api_shadow/envoy/api/v2/auth/cert.proto @@ -2,486 +2,15 @@ syntax = "proto3"; package envoy.api.v2.auth; -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/config_source.proto"; -import "envoy/type/matcher/string.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - import "udpa/annotations/migrate.proto"; -import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; -import "validate/validate.proto"; + +import public "envoy/api/v2/auth/common.proto"; +import public "envoy/api/v2/auth/secret.proto"; +import public "envoy/api/v2/auth/tls.proto"; option java_package = "io.envoyproxy.envoy.api.v2.auth"; option java_outer_classname = "CertProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.transport_sockets.tls.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Common TLS configuration] - -message TlsParameters { - enum TlsProtocol { - // Envoy will choose the optimal TLS version. - TLS_AUTO = 0; - - // TLS 1.0 - TLSv1_0 = 1; - - // TLS 1.1 - TLSv1_1 = 2; - - // TLS 1.2 - TLSv1_2 = 3; - - // TLS 1.3 - TLSv1_3 = 4; - } - - // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for - // servers. - TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; - - // Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and - // ``TLSv1_2`` for clients and for servers using :ref:`BoringSSL FIPS `. - TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; - - // If specified, the TLS listener will only support the specified `cipher list - // `_ - // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not - // specified, the default list will be used. - // - // In non-FIPS builds, the default cipher list is: - // - // .. code-block:: none - // - // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] - // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] - // ECDHE-ECDSA-AES128-SHA - // ECDHE-RSA-AES128-SHA - // AES128-GCM-SHA256 - // AES128-SHA - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // ECDHE-ECDSA-AES256-SHA - // ECDHE-RSA-AES256-SHA - // AES256-GCM-SHA384 - // AES256-SHA - // - // In builds using :ref:`BoringSSL FIPS `, the default cipher list is: - // - // .. code-block:: none - // - // ECDHE-ECDSA-AES128-GCM-SHA256 - // ECDHE-RSA-AES128-GCM-SHA256 - // ECDHE-ECDSA-AES128-SHA - // ECDHE-RSA-AES128-SHA - // AES128-GCM-SHA256 - // AES128-SHA - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // ECDHE-ECDSA-AES256-SHA - // ECDHE-RSA-AES256-SHA - // AES256-GCM-SHA384 - // AES256-SHA - repeated string cipher_suites = 3; - - // If specified, the TLS connection will only support the specified ECDH - // curves. If not specified, the default curves will be used. - // - // In non-FIPS builds, the default curves are: - // - // .. code-block:: none - // - // X25519 - // P-256 - // - // In builds using :ref:`BoringSSL FIPS `, the default curve is: - // - // .. code-block:: none - // - // P-256 - repeated string ecdh_curves = 4; -} - -// BoringSSL private key method configuration. The private key methods are used for external -// (potentially asynchronous) signing and decryption operations. Some use cases for private key -// methods would be TPM support and TLS acceleration. -message PrivateKeyProvider { - // Private key method provider name. The name must match a - // supported private key method provider type. - string provider_name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Private key method provider specific configuration. - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true, (udpa.annotations.sensitive) = true]; - - google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; - } -} - -// [#next-free-field: 7] -message TlsCertificate { - // The TLS certificate chain. - core.DataSource certificate_chain = 1; - - // The TLS private key. - core.DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; - - // BoringSSL private key method provider. This is an alternative to :ref:`private_key - // ` field. This can't be - // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key - // ` and - // :ref:`private_key_provider - // ` fields will result in an - // error. - PrivateKeyProvider private_key_provider = 6; - - // The password to decrypt the TLS private key. If this field is not set, it is assumed that the - // TLS private key is not password encrypted. - core.DataSource password = 3 [(udpa.annotations.sensitive) = true]; - - // [#not-implemented-hide:] - core.DataSource ocsp_staple = 4; - - // [#not-implemented-hide:] - repeated core.DataSource signed_certificate_timestamp = 5; -} - -message TlsSessionTicketKeys { - // Keys for encrypting and decrypting TLS session tickets. The - // first key in the array contains the key to encrypt all new sessions created by this context. - // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys - // by, for example, putting the new key first, and the previous key second. - // - // If :ref:`session_ticket_keys ` - // is not specified, the TLS library will still support resuming sessions via tickets, but it will - // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts - // or on different hosts. - // - // Each key must contain exactly 80 bytes of cryptographically-secure random data. For - // example, the output of ``openssl rand 80``. - // - // .. attention:: - // - // Using this feature has serious security considerations and risks. Improper handling of keys - // may result in loss of secrecy in connections, even if ciphers supporting perfect forward - // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some - // discussion. To minimize the risk, you must: - // - // * Keep the session ticket keys at least as secure as your TLS certificate private keys - // * Rotate session ticket keys at least daily, and preferably hourly - // * Always generate keys using a cryptographically-secure random data source - repeated core.DataSource keys = 1 - [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; -} - -// [#next-free-field: 11] -message CertificateValidationContext { - // Peer certificate verification mode. - enum TrustChainVerification { - // Perform default certificate verification (e.g., against CA / verification lists) - VERIFY_TRUST_CHAIN = 0; - - // Connections where the certificate fails verification will be permitted. - // For HTTP connections, the result of certificate verification can be used in route matching. ( - // see :ref:`validated ` ). - ACCEPT_UNTRUSTED = 1; - } - - // TLS certificate data containing certificate authority certificates to use in verifying - // a presented peer certificate (e.g. server certificate for clusters or client certificate - // for listeners). If not specified and a peer certificate is presented it will not be - // verified. By default, a client certificate is optional, unless one of the additional - // options (:ref:`require_client_certificate - // `, - // :ref:`verify_certificate_spki - // `, - // :ref:`verify_certificate_hash - // `, or - // :ref:`match_subject_alt_names - // `) is also - // specified. - // - // It can optionally contain certificate revocation lists, in which case Envoy will verify - // that the presented peer certificate has not been revoked by one of the included CRLs. - // - // See :ref:`the TLS overview ` for a list of common - // system CA locations. - core.DataSource trusted_ca = 1; - - // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the - // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate - // matches one of the specified values. - // - // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate - // can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -noout -pubkey - // | openssl pkey -pubin -outform DER - // | openssl dgst -sha256 -binary - // | openssl enc -base64 - // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= - // - // This is the format used in HTTP Public Key Pinning. - // - // When both: - // :ref:`verify_certificate_hash - // ` and - // :ref:`verify_certificate_spki - // ` are specified, - // a hash matching value from either of the lists will result in the certificate being accepted. - // - // .. attention:: - // - // This option is preferred over :ref:`verify_certificate_hash - // `, - // because SPKI is tied to a private key, so it doesn't change when the certificate - // is renewed using the same private key. - repeated string verify_certificate_spki = 3 - [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}]; - - // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that - // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. - // - // A hex-encoded SHA-256 of the certificate can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 - // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a - // - // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate - // can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 - // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A - // - // Both of those formats are acceptable. - // - // When both: - // :ref:`verify_certificate_hash - // ` and - // :ref:`verify_certificate_spki - // ` are specified, - // a hash matching value from either of the lists will result in the certificate being accepted. - repeated string verify_certificate_hash = 2 - [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}]; - - // An optional list of Subject Alternative Names. If specified, Envoy will verify that the - // Subject Alternative Name of the presented certificate matches one of the specified values. - // - // .. attention:: - // - // Subject Alternative Names are easily spoofable and verifying only them is insecure, - // therefore this option must be used together with :ref:`trusted_ca - // `. - repeated string verify_subject_alt_name = 4 [deprecated = true]; - - // An optional list of Subject Alternative name matchers. Envoy will verify that the - // Subject Alternative Name of the presented certificate matches one of the specified matches. - // - // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be - // configured with exact match type in the :ref:`string matcher `. - // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", - // it should be configured as shown below. - // - // .. code-block:: yaml - // - // match_subject_alt_names: - // exact: "api.example.com" - // - // .. attention:: - // - // Subject Alternative Names are easily spoofable and verifying only them is insecure, - // therefore this option must be used together with :ref:`trusted_ca - // `. - repeated type.matcher.StringMatcher match_subject_alt_names = 9; - - // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. - google.protobuf.BoolValue require_ocsp_staple = 5; - - // [#not-implemented-hide:] Must present signed certificate time-stamp. - google.protobuf.BoolValue require_signed_certificate_timestamp = 6; - - // An optional `certificate revocation list - // `_ - // (in PEM format). If specified, Envoy will verify that the presented peer - // certificate has not been revoked by this CRL. If this DataSource contains - // multiple CRLs, all of them will be used. - core.DataSource crl = 7; - - // If specified, Envoy will not reject expired certificates. - bool allow_expired_certificate = 8; - - // Certificate trust chain verification mode. - TrustChainVerification trust_chain_verification = 10 - [(validate.rules).enum = {defined_only: true}]; -} - -// TLS context shared by both client and server TLS contexts. -// [#next-free-field: 9] -message CommonTlsContext { - message CombinedCertificateValidationContext { - // How to validate peer certificates. - CertificateValidationContext default_validation_context = 1 - [(validate.rules).message = {required: true}]; - - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 2 - [(validate.rules).message = {required: true}]; - } - - reserved 5; - - // TLS protocol versions, cipher suites etc. - TlsParameters tls_params = 1; - - // :ref:`Multiple TLS certificates ` can be associated with the - // same context to allow both RSA and ECDSA certificates. - // - // Only a single TLS certificate is supported in client contexts. In server contexts, the first - // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is - // used for clients that support ECDSA. - repeated TlsCertificate tls_certificates = 2; - - // Configs for fetching TLS certificates via SDS API. - repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 - [(validate.rules).repeated = {max_items: 1}]; - - oneof validation_context_type { - // How to validate peer certificates. - CertificateValidationContext validation_context = 3; - - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 7; - - // Combined certificate validation context holds a default CertificateValidationContext - // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic - // and default CertificateValidationContext are merged into a new CertificateValidationContext - // for validation. This merge is done by Message::MergeFrom(), so dynamic - // CertificateValidationContext overwrites singular fields in default - // CertificateValidationContext, and concatenates repeated fields to default - // CertificateValidationContext, and logical OR is applied to boolean fields. - CombinedCertificateValidationContext combined_validation_context = 8; - } - - // Supplies the list of ALPN protocols that the listener should expose. In - // practice this is likely to be set to one of two values (see the - // :ref:`codec_type - // ` - // parameter in the HTTP connection manager for more information): - // - // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. - // * "http/1.1" If the listener is only going to support HTTP/1.1. - // - // There is no default for this parameter. If empty, Envoy will not expose ALPN. - repeated string alpn_protocols = 4; -} - -message UpstreamTlsContext { - // Common TLS context settings. - // - // .. attention:: - // - // Server certificate verification is not enabled by default. Configure - // :ref:`trusted_ca` to enable - // verification. - CommonTlsContext common_tls_context = 1; - - // SNI string to use when creating TLS backend connections. - string sni = 2 [(validate.rules).string = {max_bytes: 255}]; - - // If true, server-initiated TLS renegotiation will be allowed. - // - // .. attention:: - // - // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. - bool allow_renegotiation = 3; - - // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets - // for TLSv1.2 and older) to store for the purpose of session resumption. - // - // Defaults to 1, setting this to 0 disables session resumption. - google.protobuf.UInt32Value max_session_keys = 4; -} - -// [#next-free-field: 8] -message DownstreamTlsContext { - // Common TLS context settings. - CommonTlsContext common_tls_context = 1; - - // If specified, Envoy will reject connections without a valid client - // certificate. - google.protobuf.BoolValue require_client_certificate = 2; - - // If specified, Envoy will reject connections without a valid and matching SNI. - // [#not-implemented-hide:] - google.protobuf.BoolValue require_sni = 3; - - oneof session_ticket_keys_type { - // TLS session ticket key settings. - TlsSessionTicketKeys session_ticket_keys = 4; - - // Config for fetching TLS session ticket keys via SDS API. - SdsSecretConfig session_ticket_keys_sds_secret_config = 5; - - // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS - // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. - // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using - // the keys specified through either :ref:`session_ticket_keys ` - // or :ref:`session_ticket_keys_sds_secret_config `. - // If this config is set to false and no keys are explicitly configured, the TLS server will issue - // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the - // implication that sessions cannot be resumed across hot restarts or on different hosts. - bool disable_stateless_session_resumption = 7; - } - - // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session - // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) - // ` - // only seconds could be specified (fractional seconds are going to be ignored). - google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { - lt {seconds: 4294967296} - gte {} - }]; -} - -message GenericSecret { - // Secret of generic type and is available to filters. - core.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; -} - -message SdsSecretConfig { - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - // When both name and config are specified, then secret can be fetched and/or reloaded via - // SDS. When only name is specified, then secret will be loaded from static resources. - string name = 1; - - core.ConfigSource sds_config = 2; -} - -// [#next-free-field: 6] -message Secret { - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - string name = 1; - - oneof type { - TlsCertificate tls_certificate = 2; - - TlsSessionTicketKeys session_ticket_keys = 3; - - CertificateValidationContext validation_context = 4; - - GenericSecret generic_secret = 5; - } -} diff --git a/generated_api_shadow/envoy/api/v2/auth/common.proto b/generated_api_shadow/envoy/api/v2/auth/common.proto new file mode 100644 index 0000000000000..c8122f4010297 --- /dev/null +++ b/generated_api_shadow/envoy/api/v2/auth/common.proto @@ -0,0 +1,327 @@ +syntax = "proto3"; + +package envoy.api.v2.auth; + +import "envoy/api/v2/core/base.proto"; +import "envoy/type/matcher/string.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2.auth"; +option java_outer_classname = "CommonProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = + "envoy.extensions.transport_sockets.tls.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#protodoc-title: Common TLS configuration] + +message TlsParameters { + enum TlsProtocol { + // Envoy will choose the optimal TLS version. + TLS_AUTO = 0; + + // TLS 1.0 + TLSv1_0 = 1; + + // TLS 1.1 + TLSv1_1 = 2; + + // TLS 1.2 + TLSv1_2 = 3; + + // TLS 1.3 + TLSv1_3 = 4; + } + + // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for + // servers. + TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; + + // Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_3`` for + // servers. + TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; + + // If specified, the TLS listener will only support the specified `cipher list + // `_ + // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not + // specified, the default list will be used. + // + // In non-FIPS builds, the default cipher list is: + // + // .. code-block:: none + // + // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] + // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] + // ECDHE-ECDSA-AES128-SHA + // ECDHE-RSA-AES128-SHA + // AES128-GCM-SHA256 + // AES128-SHA + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // ECDHE-ECDSA-AES256-SHA + // ECDHE-RSA-AES256-SHA + // AES256-GCM-SHA384 + // AES256-SHA + // + // In builds using :ref:`BoringSSL FIPS `, the default cipher list is: + // + // .. code-block:: none + // + // ECDHE-ECDSA-AES128-GCM-SHA256 + // ECDHE-RSA-AES128-GCM-SHA256 + // ECDHE-ECDSA-AES128-SHA + // ECDHE-RSA-AES128-SHA + // AES128-GCM-SHA256 + // AES128-SHA + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // ECDHE-ECDSA-AES256-SHA + // ECDHE-RSA-AES256-SHA + // AES256-GCM-SHA384 + // AES256-SHA + repeated string cipher_suites = 3; + + // If specified, the TLS connection will only support the specified ECDH + // curves. If not specified, the default curves will be used. + // + // In non-FIPS builds, the default curves are: + // + // .. code-block:: none + // + // X25519 + // P-256 + // + // In builds using :ref:`BoringSSL FIPS `, the default curve is: + // + // .. code-block:: none + // + // P-256 + repeated string ecdh_curves = 4; +} + +// BoringSSL private key method configuration. The private key methods are used for external +// (potentially asynchronous) signing and decryption operations. Some use cases for private key +// methods would be TPM support and TLS acceleration. +message PrivateKeyProvider { + // Private key method provider name. The name must match a + // supported private key method provider type. + string provider_name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Private key method provider specific configuration. + oneof config_type { + google.protobuf.Struct config = 2 [deprecated = true, (udpa.annotations.sensitive) = true]; + + google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; + } +} + +// [#next-free-field: 7] +message TlsCertificate { + // The TLS certificate chain. + core.DataSource certificate_chain = 1; + + // The TLS private key. + core.DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; + + // BoringSSL private key method provider. This is an alternative to :ref:`private_key + // ` field. This can't be + // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key + // ` and + // :ref:`private_key_provider + // ` fields will result in an + // error. + PrivateKeyProvider private_key_provider = 6; + + // The password to decrypt the TLS private key. If this field is not set, it is assumed that the + // TLS private key is not password encrypted. + core.DataSource password = 3 [(udpa.annotations.sensitive) = true]; + + // [#not-implemented-hide:] + core.DataSource ocsp_staple = 4; + + // [#not-implemented-hide:] + repeated core.DataSource signed_certificate_timestamp = 5; +} + +message TlsSessionTicketKeys { + // Keys for encrypting and decrypting TLS session tickets. The + // first key in the array contains the key to encrypt all new sessions created by this context. + // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys + // by, for example, putting the new key first, and the previous key second. + // + // If :ref:`session_ticket_keys ` + // is not specified, the TLS library will still support resuming sessions via tickets, but it will + // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts + // or on different hosts. + // + // Each key must contain exactly 80 bytes of cryptographically-secure random data. For + // example, the output of ``openssl rand 80``. + // + // .. attention:: + // + // Using this feature has serious security considerations and risks. Improper handling of keys + // may result in loss of secrecy in connections, even if ciphers supporting perfect forward + // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some + // discussion. To minimize the risk, you must: + // + // * Keep the session ticket keys at least as secure as your TLS certificate private keys + // * Rotate session ticket keys at least daily, and preferably hourly + // * Always generate keys using a cryptographically-secure random data source + repeated core.DataSource keys = 1 + [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; +} + +// [#next-free-field: 11] +message CertificateValidationContext { + // Peer certificate verification mode. + enum TrustChainVerification { + // Perform default certificate verification (e.g., against CA / verification lists) + VERIFY_TRUST_CHAIN = 0; + + // Connections where the certificate fails verification will be permitted. + // For HTTP connections, the result of certificate verification can be used in route matching. ( + // see :ref:`validated ` ). + ACCEPT_UNTRUSTED = 1; + } + + // TLS certificate data containing certificate authority certificates to use in verifying + // a presented peer certificate (e.g. server certificate for clusters or client certificate + // for listeners). If not specified and a peer certificate is presented it will not be + // verified. By default, a client certificate is optional, unless one of the additional + // options (:ref:`require_client_certificate + // `, + // :ref:`verify_certificate_spki + // `, + // :ref:`verify_certificate_hash + // `, or + // :ref:`match_subject_alt_names + // `) is also + // specified. + // + // It can optionally contain certificate revocation lists, in which case Envoy will verify + // that the presented peer certificate has not been revoked by one of the included CRLs. + // + // See :ref:`the TLS overview ` for a list of common + // system CA locations. + core.DataSource trusted_ca = 1; + + // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the + // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate + // matches one of the specified values. + // + // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate + // can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -noout -pubkey + // | openssl pkey -pubin -outform DER + // | openssl dgst -sha256 -binary + // | openssl enc -base64 + // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= + // + // This is the format used in HTTP Public Key Pinning. + // + // When both: + // :ref:`verify_certificate_hash + // ` and + // :ref:`verify_certificate_spki + // ` are specified, + // a hash matching value from either of the lists will result in the certificate being accepted. + // + // .. attention:: + // + // This option is preferred over :ref:`verify_certificate_hash + // `, + // because SPKI is tied to a private key, so it doesn't change when the certificate + // is renewed using the same private key. + repeated string verify_certificate_spki = 3 + [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}]; + + // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that + // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. + // + // A hex-encoded SHA-256 of the certificate can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 + // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a + // + // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate + // can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 + // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A + // + // Both of those formats are acceptable. + // + // When both: + // :ref:`verify_certificate_hash + // ` and + // :ref:`verify_certificate_spki + // ` are specified, + // a hash matching value from either of the lists will result in the certificate being accepted. + repeated string verify_certificate_hash = 2 + [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}]; + + // An optional list of Subject Alternative Names. If specified, Envoy will verify that the + // Subject Alternative Name of the presented certificate matches one of the specified values. + // + // .. attention:: + // + // Subject Alternative Names are easily spoofable and verifying only them is insecure, + // therefore this option must be used together with :ref:`trusted_ca + // `. + repeated string verify_subject_alt_name = 4 [deprecated = true]; + + // An optional list of Subject Alternative name matchers. Envoy will verify that the + // Subject Alternative Name of the presented certificate matches one of the specified matches. + // + // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be + // configured with exact match type in the :ref:`string matcher `. + // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", + // it should be configured as shown below. + // + // .. code-block:: yaml + // + // match_subject_alt_names: + // exact: "api.example.com" + // + // .. attention:: + // + // Subject Alternative Names are easily spoofable and verifying only them is insecure, + // therefore this option must be used together with :ref:`trusted_ca + // `. + repeated type.matcher.StringMatcher match_subject_alt_names = 9; + + // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. + google.protobuf.BoolValue require_ocsp_staple = 5; + + // [#not-implemented-hide:] Must present signed certificate time-stamp. + google.protobuf.BoolValue require_signed_certificate_timestamp = 6; + + // An optional `certificate revocation list + // `_ + // (in PEM format). If specified, Envoy will verify that the presented peer + // certificate has not been revoked by this CRL. If this DataSource contains + // multiple CRLs, all of them will be used. + core.DataSource crl = 7; + + // If specified, Envoy will not reject expired certificates. + bool allow_expired_certificate = 8; + + // Certificate trust chain verification mode. + TrustChainVerification trust_chain_verification = 10 + [(validate.rules).enum = {defined_only: true}]; +} diff --git a/generated_api_shadow/envoy/api/v2/auth/secret.proto b/generated_api_shadow/envoy/api/v2/auth/secret.proto new file mode 100644 index 0000000000000..3a6d8cf7dcb67 --- /dev/null +++ b/generated_api_shadow/envoy/api/v2/auth/secret.proto @@ -0,0 +1,50 @@ +syntax = "proto3"; + +package envoy.api.v2.auth; + +import "envoy/api/v2/auth/common.proto"; +import "envoy/api/v2/core/base.proto"; +import "envoy/api/v2/core/config_source.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2.auth"; +option java_outer_classname = "SecretProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = + "envoy.extensions.transport_sockets.tls.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#protodoc-title: Secrets configuration] + +message GenericSecret { + // Secret of generic type and is available to filters. + core.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; +} + +message SdsSecretConfig { + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + // When both name and config are specified, then secret can be fetched and/or reloaded via + // SDS. When only name is specified, then secret will be loaded from static resources. + string name = 1; + + core.ConfigSource sds_config = 2; +} + +// [#next-free-field: 6] +message Secret { + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + string name = 1; + + oneof type { + TlsCertificate tls_certificate = 2; + + TlsSessionTicketKeys session_ticket_keys = 3; + + CertificateValidationContext validation_context = 4; + + GenericSecret generic_secret = 5; + } +} diff --git a/generated_api_shadow/envoy/api/v2/auth/tls.proto b/generated_api_shadow/envoy/api/v2/auth/tls.proto new file mode 100644 index 0000000000000..201973a2b9de8 --- /dev/null +++ b/generated_api_shadow/envoy/api/v2/auth/tls.proto @@ -0,0 +1,152 @@ +syntax = "proto3"; + +package envoy.api.v2.auth; + +import "envoy/api/v2/auth/common.proto"; +import "envoy/api/v2/auth/secret.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2.auth"; +option java_outer_classname = "TlsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = + "envoy.extensions.transport_sockets.tls.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#protodoc-title: TLS transport socket] +// [#extension: envoy.transport_sockets.tls] +// The TLS contexts below provide the transport socket configuration for upstream/downstream TLS. + +message UpstreamTlsContext { + // Common TLS context settings. + // + // .. attention:: + // + // Server certificate verification is not enabled by default. Configure + // :ref:`trusted_ca` to enable + // verification. + CommonTlsContext common_tls_context = 1; + + // SNI string to use when creating TLS backend connections. + string sni = 2 [(validate.rules).string = {max_bytes: 255}]; + + // If true, server-initiated TLS renegotiation will be allowed. + // + // .. attention:: + // + // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. + bool allow_renegotiation = 3; + + // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets + // for TLSv1.2 and older) to store for the purpose of session resumption. + // + // Defaults to 1, setting this to 0 disables session resumption. + google.protobuf.UInt32Value max_session_keys = 4; +} + +// [#next-free-field: 8] +message DownstreamTlsContext { + // Common TLS context settings. + CommonTlsContext common_tls_context = 1; + + // If specified, Envoy will reject connections without a valid client + // certificate. + google.protobuf.BoolValue require_client_certificate = 2; + + // If specified, Envoy will reject connections without a valid and matching SNI. + // [#not-implemented-hide:] + google.protobuf.BoolValue require_sni = 3; + + oneof session_ticket_keys_type { + // TLS session ticket key settings. + TlsSessionTicketKeys session_ticket_keys = 4; + + // Config for fetching TLS session ticket keys via SDS API. + SdsSecretConfig session_ticket_keys_sds_secret_config = 5; + + // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS + // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. + // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using + // the keys specified through either :ref:`session_ticket_keys ` + // or :ref:`session_ticket_keys_sds_secret_config `. + // If this config is set to false and no keys are explicitly configured, the TLS server will issue + // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the + // implication that sessions cannot be resumed across hot restarts or on different hosts. + bool disable_stateless_session_resumption = 7; + } + + // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session + // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) + // ` + // only seconds could be specified (fractional seconds are going to be ignored). + google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { + lt {seconds: 4294967296} + gte {} + }]; +} + +// TLS context shared by both client and server TLS contexts. +// [#next-free-field: 9] +message CommonTlsContext { + message CombinedCertificateValidationContext { + // How to validate peer certificates. + CertificateValidationContext default_validation_context = 1 + [(validate.rules).message = {required: true}]; + + // Config for fetching validation context via SDS API. + SdsSecretConfig validation_context_sds_secret_config = 2 + [(validate.rules).message = {required: true}]; + } + + reserved 5; + + // TLS protocol versions, cipher suites etc. + TlsParameters tls_params = 1; + + // :ref:`Multiple TLS certificates ` can be associated with the + // same context to allow both RSA and ECDSA certificates. + // + // Only a single TLS certificate is supported in client contexts. In server contexts, the first + // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is + // used for clients that support ECDSA. + repeated TlsCertificate tls_certificates = 2; + + // Configs for fetching TLS certificates via SDS API. + repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 + [(validate.rules).repeated = {max_items: 1}]; + + oneof validation_context_type { + // How to validate peer certificates. + CertificateValidationContext validation_context = 3; + + // Config for fetching validation context via SDS API. + SdsSecretConfig validation_context_sds_secret_config = 7; + + // Combined certificate validation context holds a default CertificateValidationContext + // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic + // and default CertificateValidationContext are merged into a new CertificateValidationContext + // for validation. This merge is done by Message::MergeFrom(), so dynamic + // CertificateValidationContext overwrites singular fields in default + // CertificateValidationContext, and concatenates repeated fields to default + // CertificateValidationContext, and logical OR is applied to boolean fields. + CombinedCertificateValidationContext combined_validation_context = 8; + } + + // Supplies the list of ALPN protocols that the listener should expose. In + // practice this is likely to be set to one of two values (see the + // :ref:`codec_type + // ` + // parameter in the HTTP connection manager for more information): + // + // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. + // * "http/1.1" If the listener is only going to support HTTP/1.1. + // + // There is no default for this parameter. If empty, Envoy will not expose ALPN. + repeated string alpn_protocols = 4; +} diff --git a/generated_api_shadow/envoy/api/v2/cluster.proto b/generated_api_shadow/envoy/api/v2/cluster.proto index 5de5c20df570d..c95de62c128d4 100644 --- a/generated_api_shadow/envoy/api/v2/cluster.proto +++ b/generated_api_shadow/envoy/api/v2/cluster.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package envoy.api.v2; -import "envoy/api/v2/auth/cert.proto"; +import "envoy/api/v2/auth/tls.proto"; import "envoy/api/v2/cluster/circuit_breaker.proto"; import "envoy/api/v2/cluster/filter.proto"; import "envoy/api/v2/cluster/outlier_detection.proto"; @@ -471,7 +471,7 @@ message Cluster { reserved 12, 15; // Configuration to use different transport sockets for different endpoints. - // The entry of *envoy.transport_socket* in the + // The entry of *envoy.transport_socket_match* in the // :ref:`LbEndpoint.Metadata ` // is used to match against the transport sockets as they appear in the list. The first // :ref:`match ` is used. @@ -491,14 +491,14 @@ message Cluster { // transport_socket: // name: envoy.transport_sockets.raw_buffer // - // Connections to the endpoints whose metadata value under *envoy.transport_socket* + // Connections to the endpoints whose metadata value under *envoy.transport_socket_match* // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. // // If a :ref:`socket match ` with empty match // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" // socket match in case above. // - // If an endpoint metadata's value under *envoy.transport_socket* does not match any + // If an endpoint metadata's value under *envoy.transport_socket_match* does not match any // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or // *transport_socket* specified in this cluster. // diff --git a/generated_api_shadow/envoy/api/v2/core/base.proto b/generated_api_shadow/envoy/api/v2/core/base.proto index b7145d77efd3c..39846bc658a88 100644 --- a/generated_api_shadow/envoy/api/v2/core/base.proto +++ b/generated_api_shadow/envoy/api/v2/core/base.proto @@ -93,7 +93,7 @@ message BuildVersion { type.SemanticVersion version = 1; // Free-form build information. - // Envoy defines several well known keys in the source/common/common/version.h file + // Envoy defines several well known keys in the source/common/version/version.h file google.protobuf.Struct metadata = 2; } diff --git a/generated_api_shadow/envoy/api/v2/core/config_source.proto b/generated_api_shadow/envoy/api/v2/core/config_source.proto index fa42a7aeec1ce..7032b2c10d878 100644 --- a/generated_api_shadow/envoy/api/v2/core/config_source.proto +++ b/generated_api_shadow/envoy/api/v2/core/config_source.proto @@ -57,10 +57,6 @@ message ApiConfigSource { // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state // with every update, the xDS server only sends what has changed since the last update. - // - // DELTA_GRPC is not yet entirely implemented! Initially, only CDS is available. - // Do not use for other xDSes. - // [#comment:TODO(fredlas) update/remove this warning when appropriate.] DELTA_GRPC = 3; } @@ -110,6 +106,9 @@ message AggregatedConfigSource { // set in :ref:`ConfigSource ` can be used to // specify that other data can be obtained from the same server. message SelfConfigSource { + // API version for xDS transport protocol. This describes the xDS gRPC/REST + // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. + ApiVersion transport_api_version = 1 [(validate.rules).enum = {defined_only: true}]; } // Rate Limit settings to be applied for discovery requests made by Envoy. diff --git a/generated_api_shadow/envoy/api/v2/core/protocol.proto b/generated_api_shadow/envoy/api/v2/core/protocol.proto index 5838ca7440759..9c47e388ee1af 100644 --- a/generated_api_shadow/envoy/api/v2/core/protocol.proto +++ b/generated_api_shadow/envoy/api/v2/core/protocol.proto @@ -85,8 +85,6 @@ message HttpProtocolOptions { // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be // reset independent of any other timeouts. If not specified, this value is not set. - // The current implementation implements this timeout on downstream connections only. - // [#comment:TODO(shikugawa): add this functionality to upstream.] google.protobuf.Duration max_stream_duration = 4; // Action to take when a client request with a header name containing underscore characters is received. diff --git a/generated_api_shadow/envoy/api/v2/endpoint.proto b/generated_api_shadow/envoy/api/v2/endpoint.proto index e233b0e7d34ea..92a2b13a8947e 100644 --- a/generated_api_shadow/envoy/api/v2/endpoint.proto +++ b/generated_api_shadow/envoy/api/v2/endpoint.proto @@ -36,6 +36,7 @@ message ClusterLoadAssignment { // Load balancing policy settings. // [#next-free-field: 6] message Policy { + // [#not-implemented-hide:] message DropOverload { // Identifier for the policy specifying the drop. string category = 1 [(validate.rules).string = {min_bytes: 1}]; @@ -65,6 +66,7 @@ message ClusterLoadAssignment { // "throttle"_drop = 60% // "lb"_drop = 20% // 50% of the remaining 'actual' load, which is 40%. // actual_outgoing_load = 20% // remaining after applying all categories. + // [#not-implemented-hide:] repeated DropOverload drop_overloads = 2; // Priority levels and localities are considered overprovisioned with this diff --git a/generated_api_shadow/envoy/api/v2/listener/listener_components.proto b/generated_api_shadow/envoy/api/v2/listener/listener_components.proto index fe449c63358a1..a6791c86cd0be 100644 --- a/generated_api_shadow/envoy/api/v2/listener/listener_components.proto +++ b/generated_api_shadow/envoy/api/v2/listener/listener_components.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package envoy.api.v2.listener; -import "envoy/api/v2/auth/cert.proto"; +import "envoy/api/v2/auth/tls.proto"; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; import "envoy/type/range.proto"; diff --git a/generated_api_shadow/envoy/api/v2/route/route_components.proto b/generated_api_shadow/envoy/api/v2/route/route_components.proto index c890134414e55..007f71d57cb51 100644 --- a/generated_api_shadow/envoy/api/v2/route/route_components.proto +++ b/generated_api_shadow/envoy/api/v2/route/route_components.proto @@ -1177,6 +1177,21 @@ message RedirectAction { oneof path_rewrite_specifier { // The path portion of the URL will be swapped with this value. + // Please note that query string in path_redirect will override the + // request's query string and will not be stripped. + // + // For example, let's say we have the following routes: + // + // - match: { path: "/old-path-1" } + // redirect: { path_redirect: "/new-path-1" } + // - match: { path: "/old-path-2" } + // redirect: { path_redirect: "/new-path-2", strip-query: "true" } + // - match: { path: "/old-path-3" } + // redirect: { path_redirect: "/new-path-3?foo=1", strip_query: "true" } + // + // 1. if request uri is "/old-path-1?bar=1", users will be redirected to "/new-path-1?bar=1" + // 2. if request uri is "/old-path-2?bar=1", users will be redirected to "/new-path-2" + // 3. if request uri is "/old-path-3?bar=1", users will be redirected to "/new-path-3?foo=1" string path_redirect = 2 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; diff --git a/generated_api_shadow/envoy/config/accesslog/v3/BUILD b/generated_api_shadow/envoy/config/accesslog/v3/BUILD index 92e9f39492511..518ca23126cd1 100644 --- a/generated_api_shadow/envoy/config/accesslog/v3/BUILD +++ b/generated_api_shadow/envoy/config/accesslog/v3/BUILD @@ -9,6 +9,7 @@ api_proto_package( "//envoy/config/core/v3:pkg", "//envoy/config/filter/accesslog/v2:pkg", "//envoy/config/route/v3:pkg", + "//envoy/type/matcher/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto b/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto index da29f198802f2..f1a8c29a49218 100644 --- a/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto +++ b/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto @@ -4,10 +4,12 @@ package envoy.config.accesslog.v3; import "envoy/config/core/v3/base.proto"; import "envoy/config/route/v3/route_components.proto"; +import "envoy/type/matcher/v3/metadata.proto"; import "envoy/type/v3/percent.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -35,8 +37,8 @@ message AccessLog { // Filter which is used to determine if the access log needs to be written. AccessLogFilter filter = 2; - // Custom configuration that depends on the access log being instantiated. Built-in - // configurations include: + // Custom configuration that depends on the access log being instantiated. + // Built-in configurations include: // // #. "envoy.access_loggers.file": :ref:`FileAccessLog // ` @@ -51,7 +53,7 @@ message AccessLog { } } -// [#next-free-field: 12] +// [#next-free-field: 13] message AccessLogFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.AccessLogFilter"; @@ -91,6 +93,9 @@ message AccessLogFilter { // Extension filter. ExtensionFilter extension_filter = 11; + + // Metadata Filter + MetadataFilter metadata_filter = 12; } } @@ -154,25 +159,30 @@ message RuntimeFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.RuntimeFilter"; - // Runtime key to get an optional overridden numerator for use in the *percent_sampled* field. - // If found in runtime, this value will replace the default numerator. + // Runtime key to get an optional overridden numerator for use in the + // *percent_sampled* field. If found in runtime, this value will replace the + // default numerator. string runtime_key = 1 [(validate.rules).string = {min_bytes: 1}]; - // The default sampling percentage. If not specified, defaults to 0% with denominator of 100. + // The default sampling percentage. If not specified, defaults to 0% with + // denominator of 100. type.v3.FractionalPercent percent_sampled = 2; // By default, sampling pivots on the header - // :ref:`x-request-id` being present. If - // :ref:`x-request-id` is present, the filter will - // consistently sample across multiple hosts based on the runtime key value and the value - // extracted from :ref:`x-request-id`. If it is - // missing, or *use_independent_randomness* is set to true, the filter will randomly sample based - // on the runtime key value alone. *use_independent_randomness* can be used for logging kill - // switches within complex nested :ref:`AndFilter + // :ref:`x-request-id` being + // present. If :ref:`x-request-id` + // is present, the filter will consistently sample across multiple hosts based + // on the runtime key value and the value extracted from + // :ref:`x-request-id`. If it is + // missing, or *use_independent_randomness* is set to true, the filter will + // randomly sample based on the runtime key value alone. + // *use_independent_randomness* can be used for logging kill switches within + // complex nested :ref:`AndFilter // ` and :ref:`OrFilter - // ` blocks that are easier to reason about - // from a probability perspective (i.e., setting to true will cause the filter to behave like - // an independent random variable when composed within logical operator filters). + // ` blocks that are easier to + // reason about from a probability perspective (i.e., setting to true will + // cause the filter to behave like an independent random variable when + // composed within logical operator filters). bool use_independent_randomness = 3; } @@ -201,21 +211,22 @@ message HeaderFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.HeaderFilter"; - // Only requests with a header which matches the specified HeaderMatcher will pass the filter - // check. + // Only requests with a header which matches the specified HeaderMatcher will + // pass the filter check. route.v3.HeaderMatcher header = 1 [(validate.rules).message = {required: true}]; } // Filters requests that received responses with an Envoy response flag set. // A list of the response flags can be found -// in the access log formatter :ref:`documentation`. +// in the access log formatter +// :ref:`documentation`. message ResponseFlagFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.ResponseFlagFilter"; - // Only responses with the any of the flags listed in this field will be logged. - // This field is optional. If it is not specified, then any response flag will pass - // the filter check. + // Only responses with the any of the flags listed in this field will be + // logged. This field is optional. If it is not specified, then any response + // flag will pass the filter check. repeated string flags = 1 [(validate.rules).repeated = { items { string { @@ -238,13 +249,16 @@ message ResponseFlagFilter { in: "SI" in: "IH" in: "DPE" + in: "UMSDR" + in: "RFCF" + in: "NFCF" } } }]; } -// Filters gRPC requests based on their response status. If a gRPC status is not provided, the -// filter will infer the status from the HTTP status code. +// Filters gRPC requests based on their response status. If a gRPC status is not +// provided, the filter will infer the status from the HTTP status code. message GrpcStatusFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.GrpcStatusFilter"; @@ -272,11 +286,32 @@ message GrpcStatusFilter { // Logs only responses that have any one of the gRPC statuses in this field. repeated Status statuses = 1 [(validate.rules).repeated = {items {enum {defined_only: true}}}]; - // If included and set to true, the filter will instead block all responses with a gRPC status or - // inferred gRPC status enumerated in statuses, and allow all other responses. + // If included and set to true, the filter will instead block all responses + // with a gRPC status or inferred gRPC status enumerated in statuses, and + // allow all other responses. bool exclude = 2; } +// Filters based on matching dynamic metadata. +// If the matcher path and key correspond to an existing key in dynamic +// metadata, the request is logged only if the matcher value is equal to the +// metadata value. If the matcher path and key *do not* correspond to an +// existing key in dynamic metadata, the request is logged only if +// match_if_key_not_found is "true" or unset. +message MetadataFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.filter.accesslog.v2.MetadataFilter"; + + // Matcher to check metadata for specified value. For example, to match on the + // access_log_hint metadata, set the filter to "envoy.common" and the path to + // "access_log_hint", and the value to "true". + type.matcher.v3.MetadataMatcher matcher = 1; + + // Default result if the key does not exist in dynamic metadata: if unset or + // true, then log; if false, then don't log. + google.protobuf.BoolValue match_if_key_not_found = 2; +} + // Extension filter is statically registered at runtime. message ExtensionFilter { option (udpa.annotations.versioning).previous_message_type = diff --git a/generated_api_shadow/envoy/config/accesslog/v4alpha/BUILD b/generated_api_shadow/envoy/config/accesslog/v4alpha/BUILD new file mode 100644 index 0000000000000..e426e922fa726 --- /dev/null +++ b/generated_api_shadow/envoy/config/accesslog/v4alpha/BUILD @@ -0,0 +1,16 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/accesslog/v3:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/route/v4alpha:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto b/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto new file mode 100644 index 0000000000000..bd4bcd48c4b4a --- /dev/null +++ b/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto @@ -0,0 +1,333 @@ +syntax = "proto3"; + +package envoy.config.accesslog.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/route/v4alpha/route_components.proto"; +import "envoy/type/matcher/v4alpha/metadata.proto"; +import "envoy/type/v3/percent.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.accesslog.v4alpha"; +option java_outer_classname = "AccesslogProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Common access log types] + +message AccessLog { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.AccessLog"; + + reserved 3; + + reserved "config"; + + // The name of the access log implementation to instantiate. The name must + // match a statically registered access log. Current built-in loggers include: + // + // #. "envoy.access_loggers.file" + // #. "envoy.access_loggers.http_grpc" + // #. "envoy.access_loggers.tcp_grpc" + string name = 1; + + // Filter which is used to determine if the access log needs to be written. + AccessLogFilter filter = 2; + + // Custom configuration that depends on the access log being instantiated. + // Built-in configurations include: + // + // #. "envoy.access_loggers.file": :ref:`FileAccessLog + // ` + // #. "envoy.access_loggers.http_grpc": :ref:`HttpGrpcAccessLogConfig + // ` + // #. "envoy.access_loggers.tcp_grpc": :ref:`TcpGrpcAccessLogConfig + // ` + oneof config_type { + google.protobuf.Any typed_config = 4; + } +} + +// [#next-free-field: 13] +message AccessLogFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.AccessLogFilter"; + + oneof filter_specifier { + option (validate.required) = true; + + // Status code filter. + StatusCodeFilter status_code_filter = 1; + + // Duration filter. + DurationFilter duration_filter = 2; + + // Not health check filter. + NotHealthCheckFilter not_health_check_filter = 3; + + // Traceable filter. + TraceableFilter traceable_filter = 4; + + // Runtime filter. + RuntimeFilter runtime_filter = 5; + + // And filter. + AndFilter and_filter = 6; + + // Or filter. + OrFilter or_filter = 7; + + // Header filter. + HeaderFilter header_filter = 8; + + // Response flag filter. + ResponseFlagFilter response_flag_filter = 9; + + // gRPC status filter. + GrpcStatusFilter grpc_status_filter = 10; + + // Extension filter. + ExtensionFilter extension_filter = 11; + + // Metadata Filter + MetadataFilter metadata_filter = 12; + } +} + +// Filter on an integer comparison. +message ComparisonFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.ComparisonFilter"; + + enum Op { + // = + EQ = 0; + + // >= + GE = 1; + + // <= + LE = 2; + } + + // Comparison operator. + Op op = 1 [(validate.rules).enum = {defined_only: true}]; + + // Value to compare against. + core.v4alpha.RuntimeUInt32 value = 2; +} + +// Filters on HTTP response/status code. +message StatusCodeFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.StatusCodeFilter"; + + // Comparison. + ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; +} + +// Filters on total request duration in milliseconds. +message DurationFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.DurationFilter"; + + // Comparison. + ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; +} + +// Filters for requests that are not health check requests. A health check +// request is marked by the health check filter. +message NotHealthCheckFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.NotHealthCheckFilter"; +} + +// Filters for requests that are traceable. See the tracing overview for more +// information on how a request becomes traceable. +message TraceableFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.TraceableFilter"; +} + +// Filters for random sampling of requests. +message RuntimeFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.RuntimeFilter"; + + // Runtime key to get an optional overridden numerator for use in the + // *percent_sampled* field. If found in runtime, this value will replace the + // default numerator. + string runtime_key = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The default sampling percentage. If not specified, defaults to 0% with + // denominator of 100. + type.v3.FractionalPercent percent_sampled = 2; + + // By default, sampling pivots on the header + // :ref:`x-request-id` being + // present. If :ref:`x-request-id` + // is present, the filter will consistently sample across multiple hosts based + // on the runtime key value and the value extracted from + // :ref:`x-request-id`. If it is + // missing, or *use_independent_randomness* is set to true, the filter will + // randomly sample based on the runtime key value alone. + // *use_independent_randomness* can be used for logging kill switches within + // complex nested :ref:`AndFilter + // ` and :ref:`OrFilter + // ` blocks that are easier to + // reason about from a probability perspective (i.e., setting to true will + // cause the filter to behave like an independent random variable when + // composed within logical operator filters). + bool use_independent_randomness = 3; +} + +// Performs a logical “and” operation on the result of each filter in filters. +// Filters are evaluated sequentially and if one of them returns false, the +// filter returns false immediately. +message AndFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.AndFilter"; + + repeated AccessLogFilter filters = 1 [(validate.rules).repeated = {min_items: 2}]; +} + +// Performs a logical “or” operation on the result of each individual filter. +// Filters are evaluated sequentially and if one of them returns true, the +// filter returns true immediately. +message OrFilter { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.OrFilter"; + + repeated AccessLogFilter filters = 2 [(validate.rules).repeated = {min_items: 2}]; +} + +// Filters requests based on the presence or value of a request header. +message HeaderFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.HeaderFilter"; + + // Only requests with a header which matches the specified HeaderMatcher will + // pass the filter check. + route.v4alpha.HeaderMatcher header = 1 [(validate.rules).message = {required: true}]; +} + +// Filters requests that received responses with an Envoy response flag set. +// A list of the response flags can be found +// in the access log formatter +// :ref:`documentation`. +message ResponseFlagFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.ResponseFlagFilter"; + + // Only responses with the any of the flags listed in this field will be + // logged. This field is optional. If it is not specified, then any response + // flag will pass the filter check. + repeated string flags = 1 [(validate.rules).repeated = { + items { + string { + in: "LH" + in: "UH" + in: "UT" + in: "LR" + in: "UR" + in: "UF" + in: "UC" + in: "UO" + in: "NR" + in: "DI" + in: "FI" + in: "RL" + in: "UAEX" + in: "RLSE" + in: "DC" + in: "URX" + in: "SI" + in: "IH" + in: "DPE" + in: "UMSDR" + in: "RFCF" + in: "NFCF" + } + } + }]; +} + +// Filters gRPC requests based on their response status. If a gRPC status is not +// provided, the filter will infer the status from the HTTP status code. +message GrpcStatusFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.GrpcStatusFilter"; + + enum Status { + OK = 0; + CANCELED = 1; + UNKNOWN = 2; + INVALID_ARGUMENT = 3; + DEADLINE_EXCEEDED = 4; + NOT_FOUND = 5; + ALREADY_EXISTS = 6; + PERMISSION_DENIED = 7; + RESOURCE_EXHAUSTED = 8; + FAILED_PRECONDITION = 9; + ABORTED = 10; + OUT_OF_RANGE = 11; + UNIMPLEMENTED = 12; + INTERNAL = 13; + UNAVAILABLE = 14; + DATA_LOSS = 15; + UNAUTHENTICATED = 16; + } + + // Logs only responses that have any one of the gRPC statuses in this field. + repeated Status statuses = 1 [(validate.rules).repeated = {items {enum {defined_only: true}}}]; + + // If included and set to true, the filter will instead block all responses + // with a gRPC status or inferred gRPC status enumerated in statuses, and + // allow all other responses. + bool exclude = 2; +} + +// Filters based on matching dynamic metadata. +// If the matcher path and key correspond to an existing key in dynamic +// metadata, the request is logged only if the matcher value is equal to the +// metadata value. If the matcher path and key *do not* correspond to an +// existing key in dynamic metadata, the request is logged only if +// match_if_key_not_found is "true" or unset. +message MetadataFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.MetadataFilter"; + + // Matcher to check metadata for specified value. For example, to match on the + // access_log_hint metadata, set the filter to "envoy.common" and the path to + // "access_log_hint", and the value to "true". + type.matcher.v4alpha.MetadataMatcher matcher = 1; + + // Default result if the key does not exist in dynamic metadata: if unset or + // true, then log; if false, then don't log. + google.protobuf.BoolValue match_if_key_not_found = 2; +} + +// Extension filter is statically registered at runtime. +message ExtensionFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.ExtensionFilter"; + + reserved 2; + + reserved "config"; + + // The name of the filter implementation to instantiate. The name must + // match a statically registered filter. + string name = 1; + + // Custom configuration that depends on the filter being instantiated. + oneof config_type { + google.protobuf.Any typed_config = 3; + } +} diff --git a/generated_api_shadow/envoy/config/bootstrap/v2/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v2/bootstrap.proto index 622304483eb2d..da88dce786ae7 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v2/bootstrap.proto +++ b/generated_api_shadow/envoy/config/bootstrap/v2/bootstrap.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package envoy.config.bootstrap.v2; -import "envoy/api/v2/auth/cert.proto"; +import "envoy/api/v2/auth/secret.proto"; import "envoy/api/v2/cluster.proto"; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; diff --git a/generated_api_shadow/envoy/config/bootstrap/v3/BUILD b/generated_api_shadow/envoy/config/bootstrap/v3/BUILD index 645d50d891a2f..63eb22d36ea0c 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v3/BUILD +++ b/generated_api_shadow/envoy/config/bootstrap/v3/BUILD @@ -15,6 +15,8 @@ api_proto_package( "//envoy/config/overload/v3:pkg", "//envoy/config/trace/v3:pkg", "//envoy/extensions/transport_sockets/tls/v3:pkg", + "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto index 3b0861d818509..d3cf6d6947cf6 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto +++ b/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto @@ -7,18 +7,24 @@ import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/event_service_config.proto"; +import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/socket_option.proto"; import "envoy/config/listener/v3/listener.proto"; import "envoy/config/metrics/v3/stats.proto"; import "envoy/config/overload/v3/overload.proto"; import "envoy/config/trace/v3/http_tracer.proto"; -import "envoy/extensions/transport_sockets/tls/v3/cert.proto"; +import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; +import "envoy/type/v3/percent.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/resource_locator.proto"; + import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -34,7 +40,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // ` for more detail. // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 21] +// [#next-free-field: 26] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Bootstrap"; @@ -60,6 +66,7 @@ message Bootstrap { repeated envoy.extensions.transport_sockets.tls.v3.Secret secrets = 3; } + // [#next-free-field: 7] message DynamicResources { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Bootstrap.DynamicResources"; @@ -70,11 +77,19 @@ message Bootstrap { // :ref:`LDS ` configuration source. core.v3.ConfigSource lds_config = 1; + // Resource locator for listener collection. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator lds_resources_locator = 5; + // All post-bootstrap :ref:`Cluster ` definitions are // provided by a single :ref:`CDS ` // configuration source. core.v3.ConfigSource cds_config = 2; + // Resource locator for cluster collection. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator cds_resources_locator = 6; + // A single :ref:`ADS ` source may be optionally // specified. This must have :ref:`api_type // ` :ref:`GRPC @@ -143,7 +158,10 @@ message Bootstrap { Admin admin = 12; // Optional overload manager configuration. - overload.v3.OverloadManager overload_manager = 15; + overload.v3.OverloadManager overload_manager = 15 [ + (udpa.annotations.security).configure_for_untrusted_downstream = true, + (udpa.annotations.security).configure_for_untrusted_upstream = true + ]; // Enable :ref:`stats for event dispatcher `, defaults to false. // Note that this records a value for each iteration of the event loop on every thread. This @@ -176,6 +194,45 @@ message Bootstrap { // specified. bool use_tcp_for_dns_lookups = 20; + // Specifies optional bootstrap extensions to be instantiated at startup time. + // Each item contains extension specific configuration. + repeated core.v3.TypedExtensionConfig bootstrap_extensions = 21; + + // Configuration sources that will participate in + // *udpa.core.v1.ResourceLocator* authority resolution. The algorithm is as + // follows: + // 1. The authority field is taken from the *udpa.core.v1.ResourceLocator*, call + // this *resource_authority*. + // 2. *resource_authority* is compared against the authorities in any peer + // *ConfigSource*. The peer *ConfigSource* is the configuration source + // message which would have been used unconditionally for resolution + // with opaque resource names. If there is a match with an authority, the + // peer *ConfigSource* message is used. + // 3. *resource_authority* is compared sequentially with the authorities in + // each configuration source in *config_sources*. The first *ConfigSource* + // to match wins. + // 4. As a fallback, if no configuration source matches, then + // *default_config_source* is used. + // 5. If *default_config_source* is not specified, resolution fails. + // [#not-implemented-hide:] + repeated core.v3.ConfigSource config_sources = 22; + + // Default configuration source for *udpa.core.v1.ResourceLocator* if all + // other resolution fails. + // [#not-implemented-hide:] + core.v3.ConfigSource default_config_source = 23; + + // Optional overriding of default socket interface. The value must be the name of one of the + // socket interface factories initialized through a bootstrap extension + string default_socket_interface = 24; + + // Global map of CertificateProvider instances. These instances are referred to by name in the + // :ref:`CommonTlsContext.CertificateProviderInstance.instance_name + // ` + // field. + // [#not-implemented-hide:] + map certificate_provider_instances = 25; + Runtime hidden_envoy_deprecated_runtime = 11 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } @@ -249,6 +306,7 @@ message ClusterManager { // Envoy process watchdog configuration. When configured, this monitors for // nonresponsive threads and kills the process after the configured thresholds. // See the :ref:`watchdog documentation ` for more information. +// [#next-free-field: 7] message Watchdog { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Watchdog"; @@ -266,10 +324,22 @@ message Watchdog { // kill behavior. If not specified the default is 0 (disabled). google.protobuf.Duration kill_timeout = 3; - // If at least two watched threads have been nonresponsive for at least this - // duration assume a true deadlock and kill the entire Envoy process. Set to 0 - // to disable this behavior. If not specified the default is 0 (disabled). + // Defines the maximum jitter used to adjust the *kill_timeout* if *kill_timeout* is + // enabled. Enabling this feature would help to reduce risk of synchronized + // watchdog kill events across proxies due to external triggers. Set to 0 to + // disable. If not specified the default is 0 (disabled). + google.protobuf.Duration max_kill_timeout_jitter = 6 [(validate.rules).duration = {gte {}}]; + + // If max(2, ceil(registered_threads * Fraction(*multikill_threshold*))) + // threads have been nonresponsive for at least this duration kill the entire + // Envoy process. Set to 0 to disable this behavior. If not specified the + // default is 0 (disabled). google.protobuf.Duration multikill_timeout = 4; + + // Sets the threshold for *multikill_timeout* in terms of the percentage of + // nonresponsive threads required for the *multikill_timeout*. + // If not specified the default is 0. + type.v3.Percent multikill_threshold = 5; } // Runtime :ref:`configuration overview ` (deprecated). @@ -345,7 +415,12 @@ message RuntimeLayer { "envoy.config.bootstrap.v2.RuntimeLayer.RtdsLayer"; // Resource to subscribe to at *rtds_config* for the RTDS layer. - string name = 1; + string name = 1 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + + // Resource locator for RTDS layer. This is mutually exclusive to *name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator rtds_resource_locator = 3 + [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; // RTDS configuration source. core.v3.ConfigSource rtds_config = 2; diff --git a/generated_api_shadow/envoy/config/bootstrap/v4alpha/BUILD b/generated_api_shadow/envoy/config/bootstrap/v4alpha/BUILD index 005603632b4c5..b5609e3cc43f2 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v4alpha/BUILD +++ b/generated_api_shadow/envoy/config/bootstrap/v4alpha/BUILD @@ -10,11 +10,13 @@ api_proto_package( "//envoy/config/bootstrap/v3:pkg", "//envoy/config/cluster/v4alpha:pkg", "//envoy/config/core/v4alpha:pkg", - "//envoy/config/listener/v3:pkg", - "//envoy/config/metrics/v3:pkg", + "//envoy/config/listener/v4alpha:pkg", + "//envoy/config/metrics/v4alpha:pkg", "//envoy/config/overload/v3:pkg", "//envoy/config/trace/v4alpha:pkg", "//envoy/extensions/transport_sockets/tls/v4alpha:pkg", + "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto index 9177f186f6b52..89dd0d7f7d0d2 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto +++ b/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto @@ -7,18 +7,23 @@ import "envoy/config/core/v4alpha/address.proto"; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/config/core/v4alpha/event_service_config.proto"; +import "envoy/config/core/v4alpha/extension.proto"; import "envoy/config/core/v4alpha/socket_option.proto"; -import "envoy/config/listener/v3/listener.proto"; -import "envoy/config/metrics/v3/stats.proto"; +import "envoy/config/listener/v4alpha/listener.proto"; +import "envoy/config/metrics/v4alpha/stats.proto"; import "envoy/config/overload/v3/overload.proto"; import "envoy/config/trace/v4alpha/http_tracer.proto"; -import "envoy/extensions/transport_sockets/tls/v4alpha/cert.proto"; +import "envoy/extensions/transport_sockets/tls/v4alpha/secret.proto"; +import "envoy/type/v3/percent.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/resource_locator.proto"; + import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -34,7 +39,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // ` for more detail. // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 21] +// [#next-free-field: 26] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Bootstrap"; @@ -43,9 +48,9 @@ message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Bootstrap.StaticResources"; - // Static :ref:`Listeners `. These listeners are + // Static :ref:`Listeners `. These listeners are // available regardless of LDS configuration. - repeated listener.v3.Listener listeners = 1; + repeated listener.v4alpha.Listener listeners = 1; // If a network based configuration source is specified for :ref:`cds_config // `, it's necessary @@ -60,21 +65,30 @@ message Bootstrap { repeated envoy.extensions.transport_sockets.tls.v4alpha.Secret secrets = 3; } + // [#next-free-field: 7] message DynamicResources { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Bootstrap.DynamicResources"; reserved 4; - // All :ref:`Listeners ` are provided by a single + // All :ref:`Listeners ` are provided by a single // :ref:`LDS ` configuration source. core.v4alpha.ConfigSource lds_config = 1; + // Resource locator for listener collection. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator lds_resources_locator = 5; + // All post-bootstrap :ref:`Cluster ` definitions are // provided by a single :ref:`CDS ` // configuration source. core.v4alpha.ConfigSource cds_config = 2; + // Resource locator for cluster collection. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator cds_resources_locator = 6; + // A single :ref:`ADS ` source may be optionally // specified. This must have :ref:`api_type // ` :ref:`GRPC @@ -111,10 +125,10 @@ message Bootstrap { string flags_path = 5; // Optional set of stats sinks. - repeated metrics.v3.StatsSink stats_sinks = 6; + repeated metrics.v4alpha.StatsSink stats_sinks = 6; // Configuration for internal processing of stats. - metrics.v3.StatsConfig stats_config = 13; + metrics.v4alpha.StatsConfig stats_config = 13; // Optional duration between flushes to configured stats sinks. For // performance reasons Envoy latches counters and only flushes counters and @@ -145,12 +159,15 @@ message Bootstrap { Admin admin = 12; // Optional overload manager configuration. - overload.v3.OverloadManager overload_manager = 15; + overload.v3.OverloadManager overload_manager = 15 [ + (udpa.annotations.security).configure_for_untrusted_downstream = true, + (udpa.annotations.security).configure_for_untrusted_upstream = true + ]; // Enable :ref:`stats for event dispatcher `, defaults to false. // Note that this records a value for each iteration of the event loop on every thread. This // should normally be minimal overhead, but when using - // :ref:`statsd `, it will send each observed value + // :ref:`statsd `, it will send each observed value // over the wire individually because the statsd protocol doesn't have any way to represent a // histogram summary. Be aware that this can be a very large volume of data. bool enable_dispatcher_stats = 16; @@ -168,7 +185,7 @@ message Bootstrap { // Optional proxy version which will be used to set the value of :ref:`server.version statistic // ` if specified. Envoy will not process this value, it will be sent as is to - // :ref:`stats sinks `. + // :ref:`stats sinks `. google.protobuf.UInt64Value stats_server_version_override = 19; // Always use TCP queries instead of UDP queries for DNS lookups. @@ -177,6 +194,45 @@ message Bootstrap { // :ref:`use_tcp_for_dns_lookups ` are // specified. bool use_tcp_for_dns_lookups = 20; + + // Specifies optional bootstrap extensions to be instantiated at startup time. + // Each item contains extension specific configuration. + repeated core.v4alpha.TypedExtensionConfig bootstrap_extensions = 21; + + // Configuration sources that will participate in + // *udpa.core.v1.ResourceLocator* authority resolution. The algorithm is as + // follows: + // 1. The authority field is taken from the *udpa.core.v1.ResourceLocator*, call + // this *resource_authority*. + // 2. *resource_authority* is compared against the authorities in any peer + // *ConfigSource*. The peer *ConfigSource* is the configuration source + // message which would have been used unconditionally for resolution + // with opaque resource names. If there is a match with an authority, the + // peer *ConfigSource* message is used. + // 3. *resource_authority* is compared sequentially with the authorities in + // each configuration source in *config_sources*. The first *ConfigSource* + // to match wins. + // 4. As a fallback, if no configuration source matches, then + // *default_config_source* is used. + // 5. If *default_config_source* is not specified, resolution fails. + // [#not-implemented-hide:] + repeated core.v4alpha.ConfigSource config_sources = 22; + + // Default configuration source for *udpa.core.v1.ResourceLocator* if all + // other resolution fails. + // [#not-implemented-hide:] + core.v4alpha.ConfigSource default_config_source = 23; + + // Optional overriding of default socket interface. The value must be the name of one of the + // socket interface factories initialized through a bootstrap extension + string default_socket_interface = 24; + + // Global map of CertificateProvider instances. These instances are referred to by name in the + // :ref:`CommonTlsContext.CertificateProviderInstance.instance_name + // ` + // field. + // [#not-implemented-hide:] + map certificate_provider_instances = 25; } // Administration interface :ref:`operations documentation @@ -248,6 +304,7 @@ message ClusterManager { // Envoy process watchdog configuration. When configured, this monitors for // nonresponsive threads and kills the process after the configured thresholds. // See the :ref:`watchdog documentation ` for more information. +// [#next-free-field: 7] message Watchdog { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Watchdog"; @@ -265,10 +322,22 @@ message Watchdog { // kill behavior. If not specified the default is 0 (disabled). google.protobuf.Duration kill_timeout = 3; - // If at least two watched threads have been nonresponsive for at least this - // duration assume a true deadlock and kill the entire Envoy process. Set to 0 - // to disable this behavior. If not specified the default is 0 (disabled). + // Defines the maximum jitter used to adjust the *kill_timeout* if *kill_timeout* is + // enabled. Enabling this feature would help to reduce risk of synchronized + // watchdog kill events across proxies due to external triggers. Set to 0 to + // disable. If not specified the default is 0 (disabled). + google.protobuf.Duration max_kill_timeout_jitter = 6 [(validate.rules).duration = {gte {}}]; + + // If max(2, ceil(registered_threads * Fraction(*multikill_threshold*))) + // threads have been nonresponsive for at least this duration kill the entire + // Envoy process. Set to 0 to disable this behavior. If not specified the + // default is 0 (disabled). google.protobuf.Duration multikill_timeout = 4; + + // Sets the threshold for *multikill_timeout* in terms of the percentage of + // nonresponsive threads required for the *multikill_timeout*. + // If not specified the default is 0. + type.v3.Percent multikill_threshold = 5; } // Runtime :ref:`configuration overview ` (deprecated). @@ -343,8 +412,14 @@ message RuntimeLayer { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer"; - // Resource to subscribe to at *rtds_config* for the RTDS layer. - string name = 1; + oneof name_specifier { + // Resource to subscribe to at *rtds_config* for the RTDS layer. + string name = 1; + + // Resource locator for RTDS layer. This is mutually exclusive to *name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator rtds_resource_locator = 3; + } // RTDS configuration source. core.v4alpha.ConfigSource rtds_config = 2; diff --git a/generated_api_shadow/envoy/config/cluster/redis/redis_cluster.proto b/generated_api_shadow/envoy/config/cluster/redis/redis_cluster.proto index b1872501e8eb2..abe88f76a6ff8 100644 --- a/generated_api_shadow/envoy/config/cluster/redis/redis_cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/redis/redis_cluster.proto @@ -18,7 +18,7 @@ option (udpa.annotations.file_status).package_version_status = FROZEN; // of :ref:`Envoy's support for Redis Cluster `. // // Redis Cluster is an extension of Redis which supports sharding and high availability (where a -// shard that loses its master fails over to a replica, and designates it as the new master). +// shard that loses its primary fails over to a replica, and designates it as the new primary). // However, as there is no unified frontend or proxy service in front of Redis Cluster, the client // (in this case Envoy) must locally maintain the state of the Redis Cluster, specifically the // topology. A random node in the cluster is queried for the topology using the `CLUSTER SLOTS diff --git a/generated_api_shadow/envoy/config/cluster/v3/BUILD b/generated_api_shadow/envoy/config/cluster/v3/BUILD index 2c229a6ac8cc0..7bbe1aa145be5 100644 --- a/generated_api_shadow/envoy/config/cluster/v3/BUILD +++ b/generated_api_shadow/envoy/config/cluster/v3/BUILD @@ -14,5 +14,6 @@ api_proto_package( "//envoy/extensions/transport_sockets/tls/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto index e8e451de8e6b7..1f501359733b8 100644 --- a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto @@ -8,10 +8,11 @@ import "envoy/config/cluster/v3/outlier_detection.proto"; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/config_source.proto"; +import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/health_check.proto"; import "envoy/config/core/v3/protocol.proto"; import "envoy/config/endpoint/v3/endpoint.proto"; -import "envoy/extensions/transport_sockets/tls/v3/cert.proto"; +import "envoy/extensions/transport_sockets/tls/v3/tls.proto"; import "envoy/type/v3/percent.proto"; import "google/protobuf/any.proto"; @@ -19,7 +20,12 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/collection_entry.proto"; +import "udpa/core/v1/resource_locator.proto"; + import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -31,8 +37,14 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Cluster configuration] +// Cluster list collections. Entries are *Cluster* resources or references. +// [#not-implemented-hide:] +message ClusterCollection { + udpa.core.v1.CollectionEntry entries = 1; +} + // Configuration for a single upstream cluster. -// [#next-free-field: 48] +// [#next-free-field: 51] message Cluster { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster"; @@ -176,7 +188,12 @@ message Cluster { // Optional alternative to cluster name to present to EDS. This does not // have the same restrictions as cluster name, i.e. it may be arbitrary // length. - string service_name = 2; + string service_name = 2 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + + // Resource locator for EDS. This is mutually exclusive to *service_name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator eds_resource_locator = 3 + [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; } // Optionally divide the endpoints in this cluster into subsets defined by @@ -317,6 +334,31 @@ message Cluster { // The number of random healthy hosts from which the host with the fewest active requests will // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}]; + + // The following formula is used to calculate the dynamic weights when hosts have different load + // balancing weights: + // + // `weight = load_balancing_weight / (active_requests + 1)^active_request_bias` + // + // The larger the active request bias is, the more aggressively active requests will lower the + // effective weight when all host weights are not equal. + // + // `active_request_bias` must be greater than or equal to 0.0. + // + // When `active_request_bias == 0.0` the Least Request Load Balancer doesn't consider the number + // of active requests at the time it picks a host and behaves like the Round Robin Load + // Balancer. + // + // When `active_request_bias > 0.0` the Least Request Load Balancer scales the load balancing + // weight by the number of active requests at the time it does a pick. + // + // The value is cached for performance reasons and refreshed whenever one of the Load Balancer's + // host sets changes, e.g., whenever there is a host membership update or a host load balancing + // weight change. + // + // .. note:: + // This setting only takes effect if all host weights are not equal. + core.v3.RuntimeDouble active_request_bias = 2; } // Specific configuration for the :ref:`RingHash` @@ -430,9 +472,11 @@ message Cluster { // The specified percent will be truncated to the nearest 1%. type.v3.Percent healthy_panic_threshold = 1; - google.protobuf.Duration update_merge_window = 4; + oneof locality_config_specifier { + ZoneAwareLbConfig zone_aware_lb_config = 2; - bool ignore_new_hosts_until_first_hc = 5; + LocalityWeightedLbConfig locality_weighted_lb_config = 3; + } // If set, all health check/weight/metadata updates that happen within this duration will be // merged and delivered in one shot when the duration expires. The start of the duration is when @@ -448,7 +492,7 @@ message Cluster { // Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is // because merging those updates isn't currently safe. See // https://github.com/envoyproxy/envoy/pull/3941. - bool close_connections_on_host_set_change = 6; + google.protobuf.Duration update_merge_window = 4; // If set to true, Envoy will not consider new hosts when computing load balancing weights until // they have been health checked for the first time. This will have no effect unless @@ -469,16 +513,14 @@ message Cluster { // // If panic mode is triggered, new hosts are still eligible for traffic; they simply do not // contribute to the calculation when deciding whether panic mode is enabled or not. - ConsistentHashingLbConfig consistent_hashing_lb_config = 7; + bool ignore_new_hosts_until_first_hc = 5; - oneof locality_config_specifier { - // If set to `true`, the cluster manager will drain all existing - // connections to upstream hosts whenever hosts are added or removed from the cluster. - ZoneAwareLbConfig zone_aware_lb_config = 2; + // If set to `true`, the cluster manager will drain all existing + // connections to upstream hosts whenever hosts are added or removed from the cluster. + bool close_connections_on_host_set_change = 6; - //Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) - LocalityWeightedLbConfig locality_weighted_lb_config = 3; - } + //Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) + ConsistentHashingLbConfig consistent_hashing_lb_config = 7; } message RefreshRate { @@ -499,10 +541,39 @@ message Cluster { google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}]; } + // [#not-implemented-hide:] + message PrefetchPolicy { + // Indicates how many many streams (rounded up) can be anticipated per-upstream for each + // stream, useful for high-QPS or latency-sensitive services. + // + // For example if this is 2, for an incoming HTTP/1.1 stream, 2 connections will be + // established, one for the new incoming stream, and one for a presumed follow-up stream. For + // HTTP/2, only one connection would be established by default as one connection can + // serve both the original and presumed follow-up stream. + // + // In steady state for non-multiplexed connections a value of 1.5 would mean if there were 100 + // active streams, there would be 100 connections in use, and 50 connections prefetched. + // This might be a useful value for something like short lived single-use connections, + // for example proxying HTTP/1.1 if keep-alive were false and each stream resulted in connection + // termination. It would likely be overkill for long lived connections, such as TCP proxying SMTP + // or regular HTTP/1.1 with keep-alive. For long lived traffic, a value of 1.05 would be more + // reasonable, where for every 100 connections, 5 prefetched connections would be in the queue + // in case of unexpected disconnects where the connection could not be reused. + // + // If this value is not set, or set explicitly to one, Envoy will fetch as many connections + // as needed to serve streams in flight. This means in steady state if a connection is torn down, + // a subsequent streams will pay an upstream-rtt latency penalty waiting for streams to be + // prefetched. + // + // This is limited somewhat arbitrarily to 3 because prefetching connections too aggressively can + // harm latency more than the prefetching helps. + google.protobuf.DoubleValue prefetch_ratio = 1 [(validate.rules).double = {lte: 3.0 gte: 1.0}]; + } + reserved 12, 15; // Configuration to use different transport sockets for different endpoints. - // The entry of *envoy.transport_socket* in the + // The entry of *envoy.transport_socket_match* in the // :ref:`LbEndpoint.Metadata ` // is used to match against the transport sockets as they appear in the list. The first // :ref:`match ` is used. @@ -522,14 +593,14 @@ message Cluster { // transport_socket: // name: envoy.transport_sockets.raw_buffer // - // Connections to the endpoints whose metadata value under *envoy.transport_socket* + // Connections to the endpoints whose metadata value under *envoy.transport_socket_match* // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. // // If a :ref:`socket match ` with empty match // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" // socket match in case above. // - // If an endpoint metadata's value under *envoy.transport_socket* does not match any + // If an endpoint metadata's value under *envoy.transport_socket_match* does not match any // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or // *transport_socket* specified in this cluster. // @@ -545,6 +616,10 @@ message Cluster { // *TransportSocketMatch* in this field. Other client Envoys receive CDS without // *transport_socket_match* set, and still send plain text traffic to the same cluster. // + // This field can be used to specify custom transport socket configurations for health + // checks by adding matching key/value pairs in a health check's + // :ref:`transport socket match criteria ` field. + // // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.] repeated TransportSocketMatch transport_socket_matches = 43; @@ -561,26 +636,30 @@ message Cluster { // `. string alt_stat_name = 28; - // The :ref:`service discovery type ` - // to use for resolving the cluster. - EdsClusterConfig eds_cluster_config = 3; + oneof cluster_discovery_type { + // The :ref:`service discovery type ` + // to use for resolving the cluster. + DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}]; - // The custom cluster type. - google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}]; + // The custom cluster type. + CustomClusterType cluster_type = 38; + } // Configuration to use for EDS updates for the Cluster. - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; + EdsClusterConfig eds_cluster_config = 3; // The timeout for new network connections to hosts in the cluster. - LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}]; + google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}]; // Soft limit on size of the cluster’s connections read and write buffers. If // unspecified, an implementation defined default is applied (1MiB). - endpoint.v3.ClusterLoadAssignment load_assignment = 33; + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 + [(udpa.annotations.security).configure_for_untrusted_upstream = true]; // The :ref:`load balancer type ` to use // when picking a host in the cluster. - repeated core.v3.HealthCheck health_checks = 8; + // [#comment:TODO: Remove enum constraint :ref:`LOAD_BALANCING_POLICY_CONFIG` when implemented.] + LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true not_in: 7}]; // Setting this is required for specifying members of // :ref:`STATIC`, @@ -593,33 +672,33 @@ message Cluster { // Setting this allows non-EDS cluster types to contain embedded EDS equivalent // :ref:`endpoint assignments`. // - google.protobuf.UInt32Value max_requests_per_connection = 9; + endpoint.v3.ClusterLoadAssignment load_assignment = 33; // Optional :ref:`active health checking ` // configuration for the cluster. If no // configuration is specified no health checking will be done and all cluster // members will be considered healthy at all times. - CircuitBreakers circuit_breakers = 10; + repeated core.v3.HealthCheck health_checks = 8; // Optional maximum requests for a single upstream connection. This parameter // is respected by both the HTTP/1.1 and HTTP/2 connection pool // implementations. If not specified, there is no limit. Setting this // parameter to 1 will effectively disable keep alive. - core.v3.UpstreamHttpProtocolOptions upstream_http_protocol_options = 46; + google.protobuf.UInt32Value max_requests_per_connection = 9; // Optional :ref:`circuit breaking ` for the cluster. - core.v3.HttpProtocolOptions common_http_protocol_options = 29; + CircuitBreakers circuit_breakers = 10; // HTTP protocol options that are applied only to upstream HTTP connections. // These options apply to all HTTP versions. - core.v3.Http1ProtocolOptions http_protocol_options = 13; + core.v3.UpstreamHttpProtocolOptions upstream_http_protocol_options = 46; // Additional options when handling HTTP requests upstream. These options will be applicable to // both HTTP1 and HTTP2 requests. - core.v3.Http2ProtocolOptions http2_protocol_options = 14; + core.v3.HttpProtocolOptions common_http_protocol_options = 29; // Additional options when handling HTTP1 requests. - map typed_extension_protocol_options = 36; + core.v3.Http1ProtocolOptions http_protocol_options = 13; // Even if default HTTP2 protocol options are desired, this field must be // set so that Envoy will assume that the upstream supports HTTP/2 when @@ -627,14 +706,14 @@ message Cluster { // supports prior knowledge for upstream connections. Even if TLS is used // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 // connections to happen over plain text. - google.protobuf.Duration dns_refresh_rate = 16 - [(validate.rules).duration = {gt {nanos: 1000000}}]; + core.v3.Http2ProtocolOptions http2_protocol_options = 14 + [(udpa.annotations.security).configure_for_untrusted_upstream = true]; // The extension_protocol_options field is used to provide extension-specific protocol options // for upstream connections. The key should match the extension filter name, such as // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on // specific options. - RefreshRate dns_failure_refresh_rate = 44; + map typed_extension_protocol_options = 36; // If the DNS refresh rate is specified and the cluster type is either // :ref:`STRICT_DNS`, @@ -645,7 +724,8 @@ message Cluster { // :ref:`STRICT_DNS` // and :ref:`LOGICAL_DNS` // this setting is ignored. - bool respect_dns_ttl = 39; + google.protobuf.Duration dns_refresh_rate = 16 + [(validate.rules).duration = {gt {nanos: 1000000}}]; // If the DNS failure refresh rate is specified and the cluster type is either // :ref:`STRICT_DNS`, @@ -655,17 +735,17 @@ message Cluster { // other than :ref:`STRICT_DNS` and // :ref:`LOGICAL_DNS` this setting is // ignored. - DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}]; + RefreshRate dns_failure_refresh_rate = 44; // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS // resolution. - repeated core.v3.Address dns_resolvers = 18; + bool respect_dns_ttl = 39; // The DNS IP address resolution policy. If this setting is not specified, the // value defaults to // :ref:`AUTO`. - bool use_tcp_for_dns_lookups = 45; + DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}]; // If DNS resolvers are specified and the cluster type is either // :ref:`STRICT_DNS`, @@ -677,16 +757,16 @@ message Cluster { // :ref:`STRICT_DNS` // and :ref:`LOGICAL_DNS` // this setting is ignored. - OutlierDetection outlier_detection = 19; + repeated core.v3.Address dns_resolvers = 18; // [#next-major-version: Reconcile DNS options in a single message.] // Always use TCP queries instead of UDP queries for DNS lookups. - google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}]; + bool use_tcp_for_dns_lookups = 45; // If specified, outlier detection will be enabled for this upstream cluster. // Each of the configuration values can be overridden via // :ref:`runtime values `. - core.v3.BindConfig upstream_bind_config = 21; + OutlierDetection outlier_detection = 19; // The interval for removing stale hosts from a cluster type // :ref:`ORIGINAL_DST`. @@ -701,47 +781,56 @@ message Cluster { // value defaults to 5000ms. For cluster types other than // :ref:`ORIGINAL_DST` // this setting is ignored. - LbSubsetConfig lb_subset_config = 22; + google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}]; // Optional configuration used to bind newly established upstream connections. // This overrides any bind_config specified in the bootstrap proto. // If the address and port are empty, no bind will be performed. - CommonLbConfig common_lb_config = 27; + core.v3.BindConfig upstream_bind_config = 21; // Configuration for load balancing subsetting. - core.v3.TransportSocket transport_socket = 24; + LbSubsetConfig lb_subset_config = 22; - // Optional configuration for the Ring Hash load balancing policy. - core.v3.Metadata metadata = 25; + // Optional configuration for the load balancing algorithm selected by + // LbPolicy. Currently only + // :ref:`RING_HASH` and + // :ref:`LEAST_REQUEST` + // has additional configuration options. + // Specifying ring_hash_lb_config or least_request_lb_config without setting the corresponding + // LbPolicy will generate an error at runtime. + oneof lb_config { + // Optional configuration for the Ring Hash load balancing policy. + RingHashLbConfig ring_hash_lb_config = 23; - // Optional configuration for the Original Destination load balancing policy. - ClusterProtocolSelection protocol_selection = 26; + // Optional configuration for the Original Destination load balancing policy. + OriginalDstLbConfig original_dst_lb_config = 34; - // Optional configuration for the LeastRequest load balancing policy. - UpstreamConnectionOptions upstream_connection_options = 30; + // Optional configuration for the LeastRequest load balancing policy. + LeastRequestLbConfig least_request_lb_config = 37; + } // Common configuration for all load balancer implementations. - bool close_connections_on_host_health_failure = 31; + CommonLbConfig common_lb_config = 27; // Optional custom transport socket implementation to use for upstream connections. // To setup TLS, set a transport socket with name `tls` and // :ref:`UpstreamTlsContexts ` in the `typed_config`. // If no transport socket configuration is specified, new connections // will be set up with plaintext. - bool ignore_health_on_host_removal = 32; + core.v3.TransportSocket transport_socket = 24; // The Metadata field can be used to provide additional information about the // cluster. It can be used for stats, logging, and varying filter behavior. // Fields should use reverse DNS notation to denote which entity within Envoy // will need the information. For instance, if the metadata is intended for // the Router filter, the filter name should be specified as *envoy.filters.http.router*. - repeated Filter filters = 40; + core.v3.Metadata metadata = 25; // Determines how Envoy selects the protocol used to speak to upstream hosts. - LoadBalancingPolicy load_balancing_policy = 41; + ClusterProtocolSelection protocol_selection = 26; // Optional options for upstream connections. - core.v3.ConfigSource lrs_server = 42; + UpstreamConnectionOptions upstream_connection_options = 30; // If an upstream host becomes unhealthy (as determined by the configured health checks // or outlier detection), immediately close all connections to the failed host. @@ -756,64 +845,84 @@ message Cluster { // the unhealthy status is detected. If there are a large number of connections open // to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of // time exclusively closing these connections, and not processing any other traffic. - bool track_timeout_budgets = 47; + bool close_connections_on_host_health_failure = 31; // If set to true, Envoy will ignore the health value of a host when processing its removal // from service discovery. This means that if active health checking is used, Envoy will *not* // wait for the endpoint to go unhealthy before removing it. - repeated core.v3.Address hidden_envoy_deprecated_hosts = 7 [deprecated = true]; + bool ignore_health_on_host_removal = 32; // An (optional) network filter chain, listed in the order the filters should be applied. // The chain will be applied to all outgoing connections that Envoy makes to the upstream // servers of this cluster. - envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext hidden_envoy_deprecated_tls_context = - 11 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + repeated Filter filters = 40; // [#not-implemented-hide:] New mechanism for LB policy configuration. Used only if the // :ref:`lb_policy` field has the value // :ref:`LOAD_BALANCING_POLICY_CONFIG`. - map hidden_envoy_deprecated_extension_protocol_options = 35 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + LoadBalancingPolicy load_balancing_policy = 41; - oneof cluster_discovery_type { - // [#not-implemented-hide:] - // If present, tells the client where to send load reports via LRS. If not present, the - // client will fall back to a client-side default, which may be either (a) don't send any - // load reports or (b) send load reports for all clusters to a single default server - // (which may be configured in the bootstrap file). - // - // Note that if multiple clusters point to the same LRS server, the client may choose to - // create a separate stream for each cluster or it may choose to coalesce the data for - // multiple clusters onto a single stream. Either way, the client must make sure to send - // the data for any given cluster on no more than one stream. - // - // [#next-major-version: In the v3 API, we should consider restructuring this somehow, - // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation - // from the LRS stream here.] - DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}]; + // [#not-implemented-hide:] + // If present, tells the client where to send load reports via LRS. If not present, the + // client will fall back to a client-side default, which may be either (a) don't send any + // load reports or (b) send load reports for all clusters to a single default server + // (which may be configured in the bootstrap file). + // + // Note that if multiple clusters point to the same LRS server, the client may choose to + // create a separate stream for each cluster or it may choose to coalesce the data for + // multiple clusters onto a single stream. Either way, the client must make sure to send + // the data for any given cluster on no more than one stream. + // + // [#next-major-version: In the v3 API, we should consider restructuring this somehow, + // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation + // from the LRS stream here.] + core.v3.ConfigSource lrs_server = 42; - // If track_timeout_budgets is true, the :ref:`timeout budget histograms - // ` will be published for each - // request. These show what percentage of a request's per try and global timeout was used. A value - // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value - // of 100 would indicate that the request took the entirety of the timeout given to it. - CustomClusterType cluster_type = 38; - } + // If track_timeout_budgets is true, the :ref:`timeout budget histograms + // ` will be published for each + // request. These show what percentage of a request's per try and global timeout was used. A value + // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value + // of 100 would indicate that the request took the entirety of the timeout given to it. + // + // .. attention:: + // + // This field has been deprecated in favor of `timeout_budgets`, part of + // :ref:`track_cluster_stats `. + bool track_timeout_budgets = 47 [deprecated = true]; - // Optional configuration for the load balancing algorithm selected by - // LbPolicy. Currently only - // :ref:`RING_HASH` and - // :ref:`LEAST_REQUEST` - // has additional configuration options. - // Specifying ring_hash_lb_config or least_request_lb_config without setting the corresponding - // LbPolicy will generate an error at runtime. - oneof lb_config { - RingHashLbConfig ring_hash_lb_config = 23; + // Optional customization and configuration of upstream connection pool, and upstream type. + // + // Currently this field only applies for HTTP traffic but is designed for eventual use for custom + // TCP upstreams. + // + // For HTTP traffic, Envoy will generally take downstream HTTP and send it upstream as upstream + // HTTP, using the http connection pool and the codec from `http2_protocol_options` + // + // For routes where CONNECT termination is configured, Envoy will take downstream CONNECT + // requests and forward the CONNECT payload upstream over raw TCP using the tcp connection pool. + // + // The default pool used is the generic connection pool which creates the HTTP upstream for most + // HTTP requests, and the TCP upstream if CONNECT termination is configured. + // + // If users desire custom connection pool or upstream behavior, for example terminating + // CONNECT only if a custom filter indicates it is appropriate, the custom factories + // can be registered and configured here. + core.v3.TypedExtensionConfig upstream_config = 48; - OriginalDstLbConfig original_dst_lb_config = 34; + // Configuration to track optional cluster stats. + TrackClusterStats track_cluster_stats = 49; - LeastRequestLbConfig least_request_lb_config = 37; - } + // [#not-implemented-hide:] + // Prefetch configuration for this cluster. + PrefetchPolicy prefetch_policy = 50; + + repeated core.v3.Address hidden_envoy_deprecated_hosts = 7 [deprecated = true]; + + envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext hidden_envoy_deprecated_tls_context = + 11 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + + map hidden_envoy_deprecated_extension_protocol_options = 35 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } // [#not-implemented-hide:] Extensible load balancing policy configuration. @@ -872,3 +981,17 @@ message UpstreamConnectionOptions { // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. core.v3.TcpKeepalive tcp_keepalive = 1; } + +message TrackClusterStats { + // If timeout_budgets is true, the :ref:`timeout budget histograms + // ` will be published for each + // request. These show what percentage of a request's per try and global timeout was used. A value + // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value + // of 100 would indicate that the request took the entirety of the timeout given to it. + bool timeout_budgets = 1; + + // If request_response_sizes is true, then the :ref:`histograms + // ` tracking header and body sizes + // of requests and responses will be published. + bool request_response_sizes = 2; +} diff --git a/generated_api_shadow/envoy/config/cluster/v4alpha/BUILD b/generated_api_shadow/envoy/config/cluster/v4alpha/BUILD index 3aff84b82faa4..196ea73f908a9 100644 --- a/generated_api_shadow/envoy/config/cluster/v4alpha/BUILD +++ b/generated_api_shadow/envoy/config/cluster/v4alpha/BUILD @@ -12,5 +12,6 @@ api_proto_package( "//envoy/config/endpoint/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto index 887ef9c3fe337..87e35b70009c6 100644 --- a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto @@ -8,6 +8,7 @@ import "envoy/config/cluster/v4alpha/outlier_detection.proto"; import "envoy/config/core/v4alpha/address.proto"; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/config/core/v4alpha/extension.proto"; import "envoy/config/core/v4alpha/health_check.proto"; import "envoy/config/core/v4alpha/protocol.proto"; import "envoy/config/endpoint/v3/endpoint.proto"; @@ -18,7 +19,11 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/collection_entry.proto"; +import "udpa/core/v1/resource_locator.proto"; + import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -30,8 +35,17 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // [#protodoc-title: Cluster configuration] +// Cluster list collections. Entries are *Cluster* resources or references. +// [#not-implemented-hide:] +message ClusterCollection { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.ClusterCollection"; + + udpa.core.v1.CollectionEntry entries = 1; +} + // Configuration for a single upstream cluster. -// [#next-free-field: 48] +// [#next-free-field: 51] message Cluster { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster"; @@ -173,10 +187,16 @@ message Cluster { // Configuration for the source of EDS updates for this Cluster. core.v4alpha.ConfigSource eds_config = 1; - // Optional alternative to cluster name to present to EDS. This does not - // have the same restrictions as cluster name, i.e. it may be arbitrary - // length. - string service_name = 2; + oneof name_specifier { + // Optional alternative to cluster name to present to EDS. This does not + // have the same restrictions as cluster name, i.e. it may be arbitrary + // length. + string service_name = 2; + + // Resource locator for EDS. This is mutually exclusive to *service_name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator eds_resource_locator = 3; + } } // Optionally divide the endpoints in this cluster into subsets defined by @@ -317,6 +337,31 @@ message Cluster { // The number of random healthy hosts from which the host with the fewest active requests will // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}]; + + // The following formula is used to calculate the dynamic weights when hosts have different load + // balancing weights: + // + // `weight = load_balancing_weight / (active_requests + 1)^active_request_bias` + // + // The larger the active request bias is, the more aggressively active requests will lower the + // effective weight when all host weights are not equal. + // + // `active_request_bias` must be greater than or equal to 0.0. + // + // When `active_request_bias == 0.0` the Least Request Load Balancer doesn't consider the number + // of active requests at the time it picks a host and behaves like the Round Robin Load + // Balancer. + // + // When `active_request_bias > 0.0` the Least Request Load Balancer scales the load balancing + // weight by the number of active requests at the time it does a pick. + // + // The value is cached for performance reasons and refreshed whenever one of the Load Balancer's + // host sets changes, e.g., whenever there is a host membership update or a host load balancing + // weight change. + // + // .. note:: + // This setting only takes effect if all host weights are not equal. + core.v4alpha.RuntimeDouble active_request_bias = 2; } // Specific configuration for the :ref:`RingHash` @@ -500,12 +545,44 @@ message Cluster { google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}]; } + // [#not-implemented-hide:] + message PrefetchPolicy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.PrefetchPolicy"; + + // Indicates how many many streams (rounded up) can be anticipated per-upstream for each + // stream, useful for high-QPS or latency-sensitive services. + // + // For example if this is 2, for an incoming HTTP/1.1 stream, 2 connections will be + // established, one for the new incoming stream, and one for a presumed follow-up stream. For + // HTTP/2, only one connection would be established by default as one connection can + // serve both the original and presumed follow-up stream. + // + // In steady state for non-multiplexed connections a value of 1.5 would mean if there were 100 + // active streams, there would be 100 connections in use, and 50 connections prefetched. + // This might be a useful value for something like short lived single-use connections, + // for example proxying HTTP/1.1 if keep-alive were false and each stream resulted in connection + // termination. It would likely be overkill for long lived connections, such as TCP proxying SMTP + // or regular HTTP/1.1 with keep-alive. For long lived traffic, a value of 1.05 would be more + // reasonable, where for every 100 connections, 5 prefetched connections would be in the queue + // in case of unexpected disconnects where the connection could not be reused. + // + // If this value is not set, or set explicitly to one, Envoy will fetch as many connections + // as needed to serve streams in flight. This means in steady state if a connection is torn down, + // a subsequent streams will pay an upstream-rtt latency penalty waiting for streams to be + // prefetched. + // + // This is limited somewhat arbitrarily to 3 because prefetching connections too aggressively can + // harm latency more than the prefetching helps. + google.protobuf.DoubleValue prefetch_ratio = 1 [(validate.rules).double = {lte: 3.0 gte: 1.0}]; + } + reserved 12, 15, 7, 11, 35; reserved "hosts", "tls_context", "extension_protocol_options"; // Configuration to use different transport sockets for different endpoints. - // The entry of *envoy.transport_socket* in the + // The entry of *envoy.transport_socket_match* in the // :ref:`LbEndpoint.Metadata ` // is used to match against the transport sockets as they appear in the list. The first // :ref:`match ` is used. @@ -525,14 +602,14 @@ message Cluster { // transport_socket: // name: envoy.transport_sockets.raw_buffer // - // Connections to the endpoints whose metadata value under *envoy.transport_socket* + // Connections to the endpoints whose metadata value under *envoy.transport_socket_match* // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. // // If a :ref:`socket match ` with empty match // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" // socket match in case above. // - // If an endpoint metadata's value under *envoy.transport_socket* does not match any + // If an endpoint metadata's value under *envoy.transport_socket_match* does not match any // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or // *transport_socket* specified in this cluster. // @@ -548,6 +625,10 @@ message Cluster { // *TransportSocketMatch* in this field. Other client Envoys receive CDS without // *transport_socket_match* set, and still send plain text traffic to the same cluster. // + // This field can be used to specify custom transport socket configurations for health + // checks by adding matching key/value pairs in a health check's + // :ref:`transport socket match criteria ` field. + // // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.] repeated TransportSocketMatch transport_socket_matches = 43; @@ -581,11 +662,13 @@ message Cluster { // Soft limit on size of the cluster’s connections read and write buffers. If // unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 + [(udpa.annotations.security).configure_for_untrusted_upstream = true]; // The :ref:`load balancer type ` to use // when picking a host in the cluster. - LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}]; + // [#comment:TODO: Remove enum constraint :ref:`LOAD_BALANCING_POLICY_CONFIG` when implemented.] + LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true not_in: 7}]; // Setting this is required for specifying members of // :ref:`STATIC`, @@ -632,7 +715,8 @@ message Cluster { // supports prior knowledge for upstream connections. Even if TLS is used // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 // connections to happen over plain text. - core.v4alpha.Http2ProtocolOptions http2_protocol_options = 14; + core.v4alpha.Http2ProtocolOptions http2_protocol_options = 14 + [(udpa.annotations.security).configure_for_untrusted_upstream = true]; // The extension_protocol_options field is used to provide extension-specific protocol options // for upstream connections. The key should match the extension filter name, such as @@ -808,7 +892,38 @@ message Cluster { // request. These show what percentage of a request's per try and global timeout was used. A value // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value // of 100 would indicate that the request took the entirety of the timeout given to it. - bool track_timeout_budgets = 47; + // + // .. attention:: + // + // This field has been deprecated in favor of `timeout_budgets`, part of + // :ref:`track_cluster_stats `. + bool hidden_envoy_deprecated_track_timeout_budgets = 47 [deprecated = true]; + + // Optional customization and configuration of upstream connection pool, and upstream type. + // + // Currently this field only applies for HTTP traffic but is designed for eventual use for custom + // TCP upstreams. + // + // For HTTP traffic, Envoy will generally take downstream HTTP and send it upstream as upstream + // HTTP, using the http connection pool and the codec from `http2_protocol_options` + // + // For routes where CONNECT termination is configured, Envoy will take downstream CONNECT + // requests and forward the CONNECT payload upstream over raw TCP using the tcp connection pool. + // + // The default pool used is the generic connection pool which creates the HTTP upstream for most + // HTTP requests, and the TCP upstream if CONNECT termination is configured. + // + // If users desire custom connection pool or upstream behavior, for example terminating + // CONNECT only if a custom filter indicates it is appropriate, the custom factories + // can be registered and configured here. + core.v4alpha.TypedExtensionConfig upstream_config = 48; + + // Configuration to track optional cluster stats. + TrackClusterStats track_cluster_stats = 49; + + // [#not-implemented-hide:] + // Prefetch configuration for this cluster. + PrefetchPolicy prefetch_policy = 50; } // [#not-implemented-hide:] Extensible load balancing policy configuration. @@ -871,3 +986,20 @@ message UpstreamConnectionOptions { // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. core.v4alpha.TcpKeepalive tcp_keepalive = 1; } + +message TrackClusterStats { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.TrackClusterStats"; + + // If timeout_budgets is true, the :ref:`timeout budget histograms + // ` will be published for each + // request. These show what percentage of a request's per try and global timeout was used. A value + // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value + // of 100 would indicate that the request took the entirety of the timeout given to it. + bool timeout_budgets = 1; + + // If request_response_sizes is true, then the :ref:`histograms + // ` tracking header and body sizes + // of requests and responses will be published. + bool request_response_sizes = 2; +} diff --git a/generated_api_shadow/envoy/config/common/matcher/v3/BUILD b/generated_api_shadow/envoy/config/common/matcher/v3/BUILD new file mode 100644 index 0000000000000..c312b8eb6a613 --- /dev/null +++ b/generated_api_shadow/envoy/config/common/matcher/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/route/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/config/common/matcher/v3/matcher.proto b/generated_api_shadow/envoy/config/common/matcher/v3/matcher.proto new file mode 100644 index 0000000000000..d0955e7a1f8c1 --- /dev/null +++ b/generated_api_shadow/envoy/config/common/matcher/v3/matcher.proto @@ -0,0 +1,100 @@ +syntax = "proto3"; + +package envoy.config.common.matcher.v3; + +import "envoy/config/route/v3/route_components.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.common.matcher.v3"; +option java_outer_classname = "MatcherProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Unified Matcher API] + +// Match configuration. This is a recursive structure which allows complex nested match +// configurations to be built using various logical operators. +// [#next-free-field: 11] +message MatchPredicate { + // A set of match configurations used for logical operations. + message MatchSet { + // The list of rules that make up the set. + repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; + } + + oneof rule { + option (validate.required) = true; + + // A set that describes a logical OR. If any member of the set matches, the match configuration + // matches. + MatchSet or_match = 1; + + // A set that describes a logical AND. If all members of the set match, the match configuration + // matches. + MatchSet and_match = 2; + + // A negation match. The match configuration will match if the negated match condition matches. + MatchPredicate not_match = 3; + + // The match configuration will always match. + bool any_match = 4 [(validate.rules).bool = {const: true}]; + + // HTTP request headers match configuration. + HttpHeadersMatch http_request_headers_match = 5; + + // HTTP request trailers match configuration. + HttpHeadersMatch http_request_trailers_match = 6; + + // HTTP response headers match configuration. + HttpHeadersMatch http_response_headers_match = 7; + + // HTTP response trailers match configuration. + HttpHeadersMatch http_response_trailers_match = 8; + + // HTTP request generic body match configuration. + HttpGenericBodyMatch http_request_generic_body_match = 9; + + // HTTP response generic body match configuration. + HttpGenericBodyMatch http_response_generic_body_match = 10; + } +} + +// HTTP headers match configuration. +message HttpHeadersMatch { + // HTTP headers to match. + repeated route.v3.HeaderMatcher headers = 1; +} + +// HTTP generic body match configuration. +// List of text strings and hex strings to be located in HTTP body. +// All specified strings must be found in the HTTP body for positive match. +// The search may be limited to specified number of bytes from the body start. +// +// .. attention:: +// +// Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. +// If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified +// to scan only part of the http body. +message HttpGenericBodyMatch { + message GenericTextMatch { + oneof rule { + option (validate.required) = true; + + // Text string to be located in HTTP body. + string string_match = 1; + + // Sequence of bytes to be located in HTTP body. + bytes binary_match = 2; + } + } + + // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). + uint32 bytes_limit = 1; + + // List of patterns to match. + repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}]; +} diff --git a/generated_api_shadow/envoy/config/common/matcher/v4alpha/BUILD b/generated_api_shadow/envoy/config/common/matcher/v4alpha/BUILD new file mode 100644 index 0000000000000..7028ce1a2aea3 --- /dev/null +++ b/generated_api_shadow/envoy/config/common/matcher/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/common/matcher/v3:pkg", + "//envoy/config/route/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/config/common/matcher/v4alpha/matcher.proto b/generated_api_shadow/envoy/config/common/matcher/v4alpha/matcher.proto new file mode 100644 index 0000000000000..685ae03a1878f --- /dev/null +++ b/generated_api_shadow/envoy/config/common/matcher/v4alpha/matcher.proto @@ -0,0 +1,114 @@ +syntax = "proto3"; + +package envoy.config.common.matcher.v4alpha; + +import "envoy/config/route/v4alpha/route_components.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.common.matcher.v4alpha"; +option java_outer_classname = "MatcherProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Unified Matcher API] + +// Match configuration. This is a recursive structure which allows complex nested match +// configurations to be built using various logical operators. +// [#next-free-field: 11] +message MatchPredicate { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.common.matcher.v3.MatchPredicate"; + + // A set of match configurations used for logical operations. + message MatchSet { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.common.matcher.v3.MatchPredicate.MatchSet"; + + // The list of rules that make up the set. + repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; + } + + oneof rule { + option (validate.required) = true; + + // A set that describes a logical OR. If any member of the set matches, the match configuration + // matches. + MatchSet or_match = 1; + + // A set that describes a logical AND. If all members of the set match, the match configuration + // matches. + MatchSet and_match = 2; + + // A negation match. The match configuration will match if the negated match condition matches. + MatchPredicate not_match = 3; + + // The match configuration will always match. + bool any_match = 4 [(validate.rules).bool = {const: true}]; + + // HTTP request headers match configuration. + HttpHeadersMatch http_request_headers_match = 5; + + // HTTP request trailers match configuration. + HttpHeadersMatch http_request_trailers_match = 6; + + // HTTP response headers match configuration. + HttpHeadersMatch http_response_headers_match = 7; + + // HTTP response trailers match configuration. + HttpHeadersMatch http_response_trailers_match = 8; + + // HTTP request generic body match configuration. + HttpGenericBodyMatch http_request_generic_body_match = 9; + + // HTTP response generic body match configuration. + HttpGenericBodyMatch http_response_generic_body_match = 10; + } +} + +// HTTP headers match configuration. +message HttpHeadersMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.common.matcher.v3.HttpHeadersMatch"; + + // HTTP headers to match. + repeated route.v4alpha.HeaderMatcher headers = 1; +} + +// HTTP generic body match configuration. +// List of text strings and hex strings to be located in HTTP body. +// All specified strings must be found in the HTTP body for positive match. +// The search may be limited to specified number of bytes from the body start. +// +// .. attention:: +// +// Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. +// If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified +// to scan only part of the http body. +message HttpGenericBodyMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.common.matcher.v3.HttpGenericBodyMatch"; + + message GenericTextMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.common.matcher.v3.HttpGenericBodyMatch.GenericTextMatch"; + + oneof rule { + option (validate.required) = true; + + // Text string to be located in HTTP body. + string string_match = 1; + + // Sequence of bytes to be located in HTTP body. + bytes binary_match = 2; + } + } + + // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). + uint32 bytes_limit = 1; + + // List of patterns to match. + repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}]; +} diff --git a/generated_api_shadow/envoy/config/core/v3/BUILD b/generated_api_shadow/envoy/config/core/v3/BUILD index e52b984a61c73..60461220c20c4 100644 --- a/generated_api_shadow/envoy/config/core/v3/BUILD +++ b/generated_api_shadow/envoy/config/core/v3/BUILD @@ -11,5 +11,6 @@ api_proto_package( "//envoy/type/matcher/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/generated_api_shadow/envoy/config/core/v3/address.proto b/generated_api_shadow/envoy/config/core/v3/address.proto index a9dc3c6e1e300..5102c2d575911 100644 --- a/generated_api_shadow/envoy/config/core/v3/address.proto +++ b/generated_api_shadow/envoy/config/core/v3/address.proto @@ -54,29 +54,29 @@ message SocketAddress { // via :ref:`resolver_name `. string address = 2 [(validate.rules).string = {min_bytes: 1}]; - string resolver_name = 5; - - // This is only valid if :ref:`resolver_name - // ` is specified below and the - // named resolver is capable of named port resolution. - bool ipv4_compat = 6; - oneof port_specifier { option (validate.required) = true; - // The name of the custom resolver. This must have been registered with Envoy. If - // this is empty, a context dependent default applies. If the address is a concrete - // IP address, no resolution will occur. If address is a hostname this - // should be set for resolution other than DNS. Specifying a custom resolver with - // *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime. uint32 port_value = 3 [(validate.rules).uint32 = {lte: 65535}]; - // When binding to an IPv6 address above, this enables `IPv4 compatibility - // `_. Binding to ``::`` will - // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into - // IPv6 space as ``::FFFF:``. + // This is only valid if :ref:`resolver_name + // ` is specified below and the + // named resolver is capable of named port resolution. string named_port = 4; } + + // The name of the custom resolver. This must have been registered with Envoy. If + // this is empty, a context dependent default applies. If the address is a concrete + // IP address, no resolution will occur. If address is a hostname this + // should be set for resolution other than DNS. Specifying a custom resolver with + // *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime. + string resolver_name = 5; + + // When binding to an IPv6 address above, this enables `IPv4 compatibility + // `_. Binding to ``::`` will + // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into + // IPv6 space as ``::FFFF:``. + bool ipv4_compat = 6; } message TcpKeepalive { diff --git a/generated_api_shadow/envoy/config/core/v3/base.proto b/generated_api_shadow/envoy/config/core/v3/base.proto index f9d7759cc7fa4..9b2db0d99fdad 100644 --- a/generated_api_shadow/envoy/config/core/v3/base.proto +++ b/generated_api_shadow/envoy/config/core/v3/base.proto @@ -13,6 +13,7 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -94,7 +95,7 @@ message BuildVersion { type.v3.SemanticVersion version = 1; // Free-form build information. - // Envoy defines several well known keys in the source/common/common/version.h file + // Envoy defines several well known keys in the source/common/version/version.h file google.protobuf.Struct metadata = 2; } @@ -168,32 +169,32 @@ message Node { // E.g. "envoy" or "grpc" string user_agent_name = 6; - // Free-form string that identifies the version of the entity requesting config. - // E.g. "1.12.2" or "abcd1234", or "SpecialEnvoyBuild" - repeated Extension extensions = 9; + oneof user_agent_version_type { + // Free-form string that identifies the version of the entity requesting config. + // E.g. "1.12.2" or "abcd1234", or "SpecialEnvoyBuild" + string user_agent_version = 7; - // Structured version of the entity requesting config. - repeated string client_features = 10; + // Structured version of the entity requesting config. + BuildVersion user_agent_build_version = 8; + } // List of extensions and their versions supported by the node. - repeated Address listening_addresses = 11; + repeated Extension extensions = 9; // Client feature support list. These are well known features described // in the Envoy API repository for a given major version of an API. Client features // use reverse DNS naming scheme, for example `com.acme.feature`. // See :ref:`the list of features ` that xDS client may // support. - string hidden_envoy_deprecated_build_version = 5 [deprecated = true]; + repeated string client_features = 10; - oneof user_agent_version_type { - // Known listening ports on the node as a generic hint to the management server - // for filtering :ref:`listeners ` to be returned. For example, - // if there is a listener bound to port 80, the list can optionally contain the - // SocketAddress `(0.0.0.0,80)`. The field is optional and just a hint. - string user_agent_version = 7; + // Known listening ports on the node as a generic hint to the management server + // for filtering :ref:`listeners ` to be returned. For example, + // if there is a listener bound to port 80, the list can optionally contain the + // SocketAddress `(0.0.0.0,80)`. The field is optional and just a hint. + repeated Address listening_addresses = 11; - BuildVersion user_agent_build_version = 8; - } + string hidden_envoy_deprecated_build_version = 5 [deprecated = true]; } // Metadata provides additional inputs to filters based on matched listeners, @@ -330,7 +331,8 @@ message RetryPolicy { // Specifies the allowed number of retries. This parameter is optional and // defaults to 1. - google.protobuf.UInt32Value num_retries = 2; + google.protobuf.UInt32Value num_retries = 2 + [(udpa.annotations.field_migrate).rename = "max_retries"]; } // The message specifies how to fetch data from remote and how to verify it. diff --git a/generated_api_shadow/envoy/config/core/v3/config_source.proto b/generated_api_shadow/envoy/config/core/v3/config_source.proto index 159542a3e909a..9e7b8b777ec77 100644 --- a/generated_api_shadow/envoy/config/core/v3/config_source.proto +++ b/generated_api_shadow/envoy/config/core/v3/config_source.proto @@ -7,6 +7,8 @@ import "envoy/config/core/v3/grpc_service.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/authority.proto"; + import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -19,7 +21,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Configuration sources] -// xDS API version. This is used to describe both resource and transport +// xDS API and non-xDS services version. This is used to describe both resource and transport // protocol versions (in distinct configuration fields). enum ApiVersion { // When not specified, we assume v2, to ease migration to Envoy's stable API @@ -52,17 +54,23 @@ message ApiConfigSource { // the v2 protos is used. REST = 1; - // gRPC v2 API. + // SotW gRPC service. GRPC = 2; // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state // with every update, the xDS server only sends what has changed since the last update. - // - // DELTA_GRPC is not yet entirely implemented! Initially, only CDS is available. - // Do not use for other xDSes. - // [#comment:TODO(fredlas) update/remove this warning when appropriate.] DELTA_GRPC = 3; + + // SotW xDS gRPC with ADS. All resources which resolve to this configuration source will be + // multiplexed on a single connection to an ADS endpoint. + // [#not-implemented-hide:] + AGGREGATED_GRPC = 5; + + // Delta xDS gRPC with ADS. All resources which resolve to this configuration source will be + // multiplexed on a single connection to an ADS endpoint. + // [#not-implemented-hide:] + AGGREGATED_DELTA_GRPC = 6; } // API type (gRPC, REST, delta gRPC) @@ -114,6 +122,10 @@ message AggregatedConfigSource { // specify that other data can be obtained from the same server. message SelfConfigSource { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.SelfConfigSource"; + + // API version for xDS transport protocol. This describes the xDS gRPC/REST + // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. + ApiVersion transport_api_version = 1 [(validate.rules).enum = {defined_only: true}]; } // Rate Limit settings to be applied for discovery requests made by Envoy. @@ -136,34 +148,41 @@ message RateLimitSettings { // ` etc. may either be sourced from the // filesystem or from an xDS API source. Filesystem configs are watched with // inotify for updates. -// [#next-free-field: 7] +// [#next-free-field: 8] message ConfigSource { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.ConfigSource"; - // Path on the filesystem to source and watch for configuration updates. - // When sourcing configuration for :ref:`secret `, - // the certificate and key files are also watched for updates. - // - // .. note:: - // - // The path to the source must exist at config load time. - // - // .. note:: - // - // Envoy will only watch the file path for *moves.* This is because in general only moves - // are atomic. The same method of swapping files as is demonstrated in the - // :ref:`runtime documentation ` can be used here also. - google.protobuf.Duration initial_fetch_timeout = 4; - - // API configuration source. - ApiVersion resource_api_version = 6 [(validate.rules).enum = {defined_only: true}]; + // Authorities that this config source may be used for. An authority specified + // in a *udpa.core.v1.ResourceLocator* is resolved to a *ConfigSource* prior + // to configuration fetch. This field provides the association between + // authority name and configuration source. + // [#not-implemented-hide:] + repeated udpa.core.v1.Authority authorities = 7; oneof config_source_specifier { option (validate.required) = true; + // Path on the filesystem to source and watch for configuration updates. + // When sourcing configuration for :ref:`secret `, + // the certificate and key files are also watched for updates. + // + // .. note:: + // + // The path to the source must exist at config load time. + // + // .. note:: + // + // Envoy will only watch the file path for *moves.* This is because in general only moves + // are atomic. The same method of swapping files as is demonstrated in the + // :ref:`runtime documentation ` can be used here also. + string path = 1; + + // API configuration source. + ApiConfigSource api_config_source = 2; + // When set, ADS will be used to fetch resources. The ADS API configuration // source in the bootstrap configuration is used. - string path = 1; + AggregatedConfigSource ads = 3; // [#not-implemented-hide:] // When set, the client will access the resources from the same server it got the @@ -176,20 +195,20 @@ message ConfigSource { // [#next-major-version: In xDS v3, consider replacing the ads field with this one, since // this field can implicitly mean to use the same stream in the case where the ConfigSource // is provided via ADS and the specified data can also be obtained via ADS.] - ApiConfigSource api_config_source = 2; - - // When this timeout is specified, Envoy will wait no longer than the specified time for first - // config response on this xDS subscription during the :ref:`initialization process - // `. After reaching the timeout, Envoy will move to the next - // initialization phase, even if the first config is not delivered yet. The timer is activated - // when the xDS API subscription starts, and is disarmed on first config update or on error. 0 - // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another - // timeout applies). The default is 15s. - AggregatedConfigSource ads = 3; - - // API version for xDS resources. This implies the type URLs that the client - // will request for resources and the resource type that the client will in - // turn expect to be delivered. SelfConfigSource self = 5; } + + // When this timeout is specified, Envoy will wait no longer than the specified time for first + // config response on this xDS subscription during the :ref:`initialization process + // `. After reaching the timeout, Envoy will move to the next + // initialization phase, even if the first config is not delivered yet. The timer is activated + // when the xDS API subscription starts, and is disarmed on first config update or on error. 0 + // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another + // timeout applies). The default is 15s. + google.protobuf.Duration initial_fetch_timeout = 4; + + // API version for xDS resources. This implies the type URLs that the client + // will request for resources and the resource type that the client will in + // turn expect to be delivered. + ApiVersion resource_api_version = 6 [(validate.rules).enum = {defined_only: true}]; } diff --git a/generated_api_shadow/envoy/config/core/v3/extension.proto b/generated_api_shadow/envoy/config/core/v3/extension.proto new file mode 100644 index 0000000000000..ba66da6a8e363 --- /dev/null +++ b/generated_api_shadow/envoy/config/core/v3/extension.proto @@ -0,0 +1,61 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "envoy/config/core/v3/config_source.proto"; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "ExtensionProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Extension configuration] + +// Message type for extension configuration. +// [#next-major-version: revisit all existing typed_config that doesn't use this wrapper.]. +message TypedExtensionConfig { + // The name of an extension. This is not used to select the extension, instead + // it serves the role of an opaque identifier. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // The typed config for the extension. The type URL will be used to identify + // the extension. In the case that the type URL is *udpa.type.v1.TypedStruct*, + // the inner type URL of *TypedStruct* will be utilized. See the + // :ref:`extension configuration overview + // ` for further details. + google.protobuf.Any typed_config = 2 [(validate.rules).any = {required: true}]; +} + +// Configuration source specifier for a late-bound extension configuration. The +// parent resource is warmed until all the initial extension configurations are +// received, unless the flag to apply the default configuration is set. +// Subsequent extension updates are atomic on a per-worker basis. Once an +// extension configuration is applied to a request or a connection, it remains +// constant for the duration of processing. If the initial delivery of the +// extension configuration fails, due to a timeout for example, the optional +// default configuration is applied. Without a default configuration, the +// extension is disabled, until an extension configuration is received. The +// behavior of a disabled extension depends on the context. For example, a +// filter chain with a disabled extension filter rejects all incoming streams. +message ExtensionConfigSource { + ConfigSource config_source = 1 [(validate.rules).any = {required: true}]; + + // Optional default configuration to use as the initial configuration if + // there is a failure to receive the initial extension configuration or if + // `apply_default_config_without_warming` flag is set. + google.protobuf.Any default_config = 2; + + // Use the default config as the initial configuration without warming and + // waiting for the first discovery response. Requires the default configuration + // to be supplied. + bool apply_default_config_without_warming = 3; + + // A set of permitted extension type URLs. Extension configuration updates are rejected + // if they do not match any type URL in the set. + repeated string type_urls = 4 [(validate.rules).repeated = {min_items: 1}]; +} diff --git a/generated_api_shadow/envoy/config/core/v3/grpc_service.proto b/generated_api_shadow/envoy/config/core/v3/grpc_service.proto index 654d3ed81b561..552817ffd06fa 100644 --- a/generated_api_shadow/envoy/config/core/v3/grpc_service.proto +++ b/generated_api_shadow/envoy/config/core/v3/grpc_service.proto @@ -8,6 +8,7 @@ import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; @@ -35,9 +36,15 @@ message GrpcService { // in the :ref:`Cluster ` :ref:`transport_socket // `. string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The `:authority` header in the grpc request. If this field is not set, the authority header value will be `cluster_name`. + // Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster. + string authority = 2 + [(validate.rules).string = + {min_bytes: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}]; } - // [#next-free-field: 7] + // [#next-free-field: 9] message GoogleGrpc { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcService.GoogleGrpc"; @@ -200,6 +207,24 @@ message GrpcService { } } + // Channel arguments. + message ChannelArgs { + message Value { + // Pointer values are not supported, since they don't make any sense when + // delivered via the API. + oneof value_specifier { + option (validate.required) = true; + + string string_value = 1; + + int64 int_value = 2; + } + } + + // See grpc_types.h GRPC_ARG #defines for keys that work here. + map args = 1; + } + // The target URI when using the `Google C++ gRPC client // `_. SSL credentials will be supplied in // :ref:`channel_credentials `. @@ -230,30 +255,37 @@ message GrpcService { // Additional configuration for site-specific customizations of the Google // gRPC library. google.protobuf.Struct config = 6; - } - reserved 4; + // How many bytes each stream can buffer internally. + // If not set an implementation defined default is applied (1MiB). + google.protobuf.UInt32Value per_stream_buffer_limit_bytes = 7; - // Envoy's in-built gRPC client. - // See the :ref:`gRPC services overview ` - // documentation for discussion on gRPC client selection. - google.protobuf.Duration timeout = 3; + // Custom channels args. + ChannelArgs channel_args = 8; + } - // `Google C++ gRPC client `_ - // See the :ref:`gRPC services overview ` - // documentation for discussion on gRPC client selection. - repeated HeaderValue initial_metadata = 5; + reserved 4; oneof target_specifier { option (validate.required) = true; - // The timeout for the gRPC request. This is the timeout for a specific - // request. + // Envoy's in-built gRPC client. + // See the :ref:`gRPC services overview ` + // documentation for discussion on gRPC client selection. EnvoyGrpc envoy_grpc = 1; - // Additional metadata to include in streams initiated to the GrpcService. - // This can be used for scenarios in which additional ad hoc authorization - // headers (e.g. ``x-foo-bar: baz-key``) are to be injected. + // `Google C++ gRPC client `_ + // See the :ref:`gRPC services overview ` + // documentation for discussion on gRPC client selection. GoogleGrpc google_grpc = 2; } + + // The timeout for the gRPC request. This is the timeout for a specific + // request. + google.protobuf.Duration timeout = 3; + + // Additional metadata to include in streams initiated to the GrpcService. + // This can be used for scenarios in which additional ad hoc authorization + // headers (e.g. ``x-foo-bar: baz-key``) are to be injected. + repeated HeaderValue initial_metadata = 5; } diff --git a/generated_api_shadow/envoy/config/core/v3/health_check.proto b/generated_api_shadow/envoy/config/core/v3/health_check.proto index 5b95ebe39de38..05af0a8cef064 100644 --- a/generated_api_shadow/envoy/config/core/v3/health_check.proto +++ b/generated_api_shadow/envoy/config/core/v3/health_check.proto @@ -54,7 +54,7 @@ enum HealthStatus { DEGRADED = 5; } -// [#next-free-field: 23] +// [#next-free-field: 24] message HealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HealthCheck"; @@ -258,17 +258,21 @@ message HealthCheck { // Reuse health check connection between health checks. Default is true. google.protobuf.BoolValue reuse_connection = 7; - // HTTP health check. - google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}]; + oneof health_checker { + option (validate.required) = true; - // TCP health check. - google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration = {gt {}}]; + // HTTP health check. + HttpHealthCheck http_health_check = 8; - // gRPC health check. - google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration = {gt {}}]; + // TCP health check. + TcpHealthCheck tcp_health_check = 9; - // Custom health check. - google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}]; + // gRPC health check. + GrpcHealthCheck grpc_health_check = 11; + + // Custom health check. + CustomHealthCheck custom_health_check = 13; + } // The "no traffic interval" is a special health check interval that is used when a cluster has // never had traffic routed to it. This lower interval allows cluster information to be kept up to @@ -278,14 +282,14 @@ message HealthCheck { // any other. // // The default value for "no traffic interval" is 60 seconds. - string event_log_path = 17; + google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}]; // The "unhealthy interval" is a health check interval that is used for hosts that are marked as // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the // standard health check interval that is defined. // // The default value for "unhealthy interval" is the same as "interval". - EventServiceConfig event_service = 22; + google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration = {gt {}}]; // The "unhealthy edge interval" is a special health check interval that is used for the first // health check right after a host is marked as unhealthy. For subsequent health checks @@ -293,33 +297,61 @@ message HealthCheck { // check interval that is defined. // // The default value for "unhealthy edge interval" is the same as "unhealthy interval". - bool always_log_health_check_failures = 19; + google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration = {gt {}}]; // The "healthy edge interval" is a special health check interval that is used for the first // health check right after a host is marked as healthy. For subsequent health checks // Envoy will shift back to using the standard health check interval that is defined. // // The default value for "healthy edge interval" is the same as the default interval. - TlsOptions tls_options = 21; + google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}]; - oneof health_checker { - option (validate.required) = true; + // Specifies the path to the :ref:`health check event log `. + // If empty, no event log will be written. + string event_log_path = 17; - // Specifies the path to the :ref:`health check event log `. - // If empty, no event log will be written. - HttpHealthCheck http_health_check = 8; + // [#not-implemented-hide:] + // The gRPC service for the health check event service. + // If empty, health check events won't be sent to a remote endpoint. + EventServiceConfig event_service = 22; - // [#not-implemented-hide:] - // The gRPC service for the health check event service. - // If empty, health check events won't be sent to a remote endpoint. - TcpHealthCheck tcp_health_check = 9; + // If set to true, health check failure events will always be logged. If set to false, only the + // initial health check failure event will be logged. + // The default value is false. + bool always_log_health_check_failures = 19; - // If set to true, health check failure events will always be logged. If set to false, only the - // initial health check failure event will be logged. - // The default value is false. - GrpcHealthCheck grpc_health_check = 11; + // This allows overriding the cluster TLS settings, just for health check connections. + TlsOptions tls_options = 21; - // This allows overriding the cluster TLS settings, just for health check connections. - CustomHealthCheck custom_health_check = 13; - } + // Optional key/value pairs that will be used to match a transport socket from those specified in the cluster's + // :ref:`tranport socket matches `. + // For example, the following match criteria + // + // .. code-block:: yaml + // + // transport_socket_match_criteria: + // useMTLS: true + // + // Will match the following :ref:`cluster socket match ` + // + // .. code-block:: yaml + // + // transport_socket_matches: + // - name: "useMTLS" + // match: + // useMTLS: true + // transport_socket: + // name: envoy.transport_sockets.tls + // config: { ... } # tls socket configuration + // + // If this field is set, then for health checks it will supersede an entry of *envoy.transport_socket* in the + // :ref:`LbEndpoint.Metadata `. + // This allows using different transport socket capabilities for health checking versus proxying to the + // endpoint. + // + // If the key/values pairs specified do not match any + // :ref:`transport socket matches `, + // the cluster's :ref:`transport socket ` + // will be used for health check socket configuration. + google.protobuf.Struct transport_socket_match_criteria = 23; } diff --git a/generated_api_shadow/envoy/config/core/v3/http_uri.proto b/generated_api_shadow/envoy/config/core/v3/http_uri.proto index 6cc4d36d3944e..42bcd4f615724 100644 --- a/generated_api_shadow/envoy/config/core/v3/http_uri.proto +++ b/generated_api_shadow/envoy/config/core/v3/http_uri.proto @@ -29,20 +29,6 @@ message HttpUri { // string uri = 1 [(validate.rules).string = {min_bytes: 1}]; - // A cluster is created in the Envoy "cluster_manager" config - // section. This field specifies the cluster name. - // - // Example: - // - // .. code-block:: yaml - // - // cluster: jwks_cluster - // - google.protobuf.Duration timeout = 3 [(validate.rules).duration = { - required: true - gte {} - }]; - // Specify how `uri` is to be fetched. Today, this requires an explicit // cluster, but in the future we may support dynamic cluster creation or // inline DNS resolution. See `issue @@ -50,7 +36,21 @@ message HttpUri { oneof http_upstream_type { option (validate.required) = true; - // Sets the maximum duration in milliseconds that a response can take to arrive upon request. + // A cluster is created in the Envoy "cluster_manager" config + // section. This field specifies the cluster name. + // + // Example: + // + // .. code-block:: yaml + // + // cluster: jwks_cluster + // string cluster = 2 [(validate.rules).string = {min_bytes: 1}]; } + + // Sets the maximum duration in milliseconds that a response can take to arrive upon request. + google.protobuf.Duration timeout = 3 [(validate.rules).duration = { + required: true + gte {} + }]; } diff --git a/generated_api_shadow/envoy/config/core/v3/protocol.proto b/generated_api_shadow/envoy/config/core/v3/protocol.proto index 400b0dd95a940..0ab6289e9659d 100644 --- a/generated_api_shadow/envoy/config/core/v3/protocol.proto +++ b/generated_api_shadow/envoy/config/core/v3/protocol.proto @@ -92,8 +92,6 @@ message HttpProtocolOptions { // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be // reset independent of any other timeouts. If not specified, this value is not set. - // The current implementation implements this timeout on downstream connections only. - // [#comment:TODO(shikugawa): add this functionality to upstream.] google.protobuf.Duration max_stream_duration = 4; // Action to take when a client request with a header name containing underscore characters is received. @@ -161,7 +159,7 @@ message Http1ProtocolOptions { bool enable_trailers = 5; } -// [#next-free-field: 14] +// [#next-free-field: 15] message Http2ProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Http2ProtocolOptions"; @@ -174,7 +172,7 @@ message Http2ProtocolOptions { // The 16 bit parameter identifier. google.protobuf.UInt32Value identifier = 1 [ - (validate.rules).uint32 = {lte: 65536 gte: 1}, + (validate.rules).uint32 = {lte: 65535 gte: 0}, (validate.rules).message = {required: true} ]; @@ -282,8 +280,25 @@ message Http2ProtocolOptions { // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, // when this option is enabled, only the offending stream is terminated. // + // This is overridden by HCM :ref:`stream_error_on_invalid_http_messaging + // ` + // iff present. + // + // This is deprecated in favor of :ref:`override_stream_error_on_invalid_http_message + // ` + // + // See `RFC7540, sec. 8.1 `_ for details. + bool stream_error_on_invalid_http_messaging = 12 [deprecated = true]; + + // Allows invalid HTTP messaging and headers. When this option is disabled (default), then + // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, + // when this option is enabled, only the offending stream is terminated. + // + // This overrides any HCM :ref:`stream_error_on_invalid_http_messaging + // ` + // // See `RFC7540, sec. 8.1 `_ for details. - bool stream_error_on_invalid_http_messaging = 12; + google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 14; // [#not-implemented-hide:] // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions: diff --git a/generated_api_shadow/envoy/config/core/v3/proxy_protocol.proto b/generated_api_shadow/envoy/config/core/v3/proxy_protocol.proto new file mode 100644 index 0000000000000..225a8971f23a6 --- /dev/null +++ b/generated_api_shadow/envoy/config/core/v3/proxy_protocol.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "ProxyProtocolProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Proxy Protocol] + +message ProxyProtocolConfig { + enum Version { + // PROXY protocol version 1. Human readable format. + V1 = 0; + + // PROXY protocol version 2. Binary format. + V2 = 1; + } + + // The PROXY protocol version to use. See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details + Version version = 1; +} diff --git a/generated_api_shadow/envoy/config/core/v3/socket_option.proto b/generated_api_shadow/envoy/config/core/v3/socket_option.proto index 836b8f5538139..b22169b86aeb8 100644 --- a/generated_api_shadow/envoy/config/core/v3/socket_option.proto +++ b/generated_api_shadow/envoy/config/core/v3/socket_option.proto @@ -40,17 +40,17 @@ message SocketOption { // The numeric name as passed to setsockopt int64 name = 3; - // Because many sockopts take an int value. - SocketState state = 6 [(validate.rules).enum = {defined_only: true}]; - oneof value { option (validate.required) = true; - // Otherwise it's a byte buffer. + // Because many sockopts take an int value. int64 int_value = 4; - // The state in which the option will be applied. When used in BindConfig - // STATE_PREBIND is currently the only valid value. + // Otherwise it's a byte buffer. bytes buf_value = 5; } + + // The state in which the option will be applied. When used in BindConfig + // STATE_PREBIND is currently the only valid value. + SocketState state = 6 [(validate.rules).enum = {defined_only: true}]; } diff --git a/generated_api_shadow/envoy/config/core/v3/substitution_format_string.proto b/generated_api_shadow/envoy/config/core/v3/substitution_format_string.proto new file mode 100644 index 0000000000000..7537a1178b645 --- /dev/null +++ b/generated_api_shadow/envoy/config/core/v3/substitution_format_string.proto @@ -0,0 +1,61 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "google/protobuf/struct.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "SubstitutionFormatStringProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Substitution format string] + +// Configuration to use multiple :ref:`command operators ` +// to generate a new string in either plain text or JSON format. +message SubstitutionFormatString { + oneof format { + option (validate.required) = true; + + // Specify a format with command operators to form a text string. + // Its details is described in :ref:`format string`. + // + // .. code-block:: + // + // text_format: %LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% + // + // The following plain text will be created: + // + // .. code-block:: + // + // upstream connect error:204:path=/foo + // + string text_format = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Specify a format with command operators to form a JSON string. + // Its details is described in :ref:`format dictionary`. + // Values are rendered as strings, numbers, or boolean values as appropriate. + // Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA). + // See the documentation for a specific command operator for details. + // + // .. code-block:: + // + // json_format: + // status: %RESPONSE_CODE% + // message: %LOCAL_REPLY_BODY% + // + // The following JSON object would be created: + // + // .. code-block:: json + // + // { + // "status": 500, + // "message": "My error message" + // } + // + google.protobuf.Struct json_format = 2 [(validate.rules).message = {required: true}]; + } +} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/BUILD b/generated_api_shadow/envoy/config/core/v4alpha/BUILD index aeac38ac2833c..a4aa06ce9b44b 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/BUILD +++ b/generated_api_shadow/envoy/config/core/v4alpha/BUILD @@ -8,8 +8,9 @@ api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/config/core/v3:pkg", - "//envoy/type/matcher/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/generated_api_shadow/envoy/config/core/v4alpha/address.proto b/generated_api_shadow/envoy/config/core/v4alpha/address.proto index a2e6070103aef..ffade4bed75b4 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/address.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/address.proto @@ -45,7 +45,7 @@ message SocketAddress { // to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::`` // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented: // It is possible to distinguish a Listener address via the prefix/suffix matching - // in :ref:`FilterChainMatch `.] When used + // in :ref:`FilterChainMatch `.] When used // within an upstream :ref:`BindConfig `, the address // controls the source address of outbound connections. For :ref:`clusters // `, the cluster type determines whether the diff --git a/generated_api_shadow/envoy/config/core/v4alpha/base.proto b/generated_api_shadow/envoy/config/core/v4alpha/base.proto index dbc3c31e40e44..d7b5fd5836ff9 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/base.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/base.proto @@ -94,7 +94,7 @@ message BuildVersion { type.v3.SemanticVersion version = 1; // Free-form build information. - // Envoy defines several well known keys in the source/common/common/version.h file + // Envoy defines several well known keys in the source/common/version/version.h file google.protobuf.Struct metadata = 2; } @@ -332,7 +332,7 @@ message RetryPolicy { // Specifies the allowed number of retries. This parameter is optional and // defaults to 1. - google.protobuf.UInt32Value num_retries = 2; + google.protobuf.UInt32Value max_retries = 2; } // The message specifies how to fetch data from remote and how to verify it. diff --git a/generated_api_shadow/envoy/config/core/v4alpha/config_source.proto b/generated_api_shadow/envoy/config/core/v4alpha/config_source.proto index 0cfc7fc59b94b..b10e0377be65a 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/config_source.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/config_source.proto @@ -7,6 +7,8 @@ import "envoy/config/core/v4alpha/grpc_service.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/authority.proto"; + import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -19,7 +21,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // [#protodoc-title: Configuration sources] -// xDS API version. This is used to describe both resource and transport +// xDS API and non-xDS services version. This is used to describe both resource and transport // protocol versions (in distinct configuration fields). enum ApiVersion { // When not specified, we assume v2, to ease migration to Envoy's stable API @@ -53,17 +55,23 @@ message ApiConfigSource { // the v2 protos is used. REST = 1; - // gRPC v2 API. + // SotW gRPC service. GRPC = 2; // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state // with every update, the xDS server only sends what has changed since the last update. - // - // DELTA_GRPC is not yet entirely implemented! Initially, only CDS is available. - // Do not use for other xDSes. - // [#comment:TODO(fredlas) update/remove this warning when appropriate.] DELTA_GRPC = 3; + + // SotW xDS gRPC with ADS. All resources which resolve to this configuration source will be + // multiplexed on a single connection to an ADS endpoint. + // [#not-implemented-hide:] + AGGREGATED_GRPC = 5; + + // Delta xDS gRPC with ADS. All resources which resolve to this configuration source will be + // multiplexed on a single connection to an ADS endpoint. + // [#not-implemented-hide:] + AGGREGATED_DELTA_GRPC = 6; } // API type (gRPC, REST, delta gRPC) @@ -116,6 +124,10 @@ message AggregatedConfigSource { message SelfConfigSource { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.SelfConfigSource"; + + // API version for xDS transport protocol. This describes the xDS gRPC/REST + // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. + ApiVersion transport_api_version = 1 [(validate.rules).enum = {defined_only: true}]; } // Rate Limit settings to be applied for discovery requests made by Envoy. @@ -138,10 +150,17 @@ message RateLimitSettings { // ` etc. may either be sourced from the // filesystem or from an xDS API source. Filesystem configs are watched with // inotify for updates. -// [#next-free-field: 7] +// [#next-free-field: 8] message ConfigSource { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.ConfigSource"; + // Authorities that this config source may be used for. An authority specified + // in a *udpa.core.v1.ResourceLocator* is resolved to a *ConfigSource* prior + // to configuration fetch. This field provides the association between + // authority name and configuration source. + // [#not-implemented-hide:] + repeated udpa.core.v1.Authority authorities = 7; + oneof config_source_specifier { option (validate.required) = true; diff --git a/generated_api_shadow/envoy/config/core/v4alpha/extension.proto b/generated_api_shadow/envoy/config/core/v4alpha/extension.proto new file mode 100644 index 0000000000000..4de107580d072 --- /dev/null +++ b/generated_api_shadow/envoy/config/core/v4alpha/extension.proto @@ -0,0 +1,68 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "envoy/config/core/v4alpha/config_source.proto"; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "ExtensionProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Extension configuration] + +// Message type for extension configuration. +// [#next-major-version: revisit all existing typed_config that doesn't use this wrapper.]. +message TypedExtensionConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.TypedExtensionConfig"; + + // The name of an extension. This is not used to select the extension, instead + // it serves the role of an opaque identifier. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // The typed config for the extension. The type URL will be used to identify + // the extension. In the case that the type URL is *udpa.type.v1.TypedStruct*, + // the inner type URL of *TypedStruct* will be utilized. See the + // :ref:`extension configuration overview + // ` for further details. + google.protobuf.Any typed_config = 2 [(validate.rules).any = {required: true}]; +} + +// Configuration source specifier for a late-bound extension configuration. The +// parent resource is warmed until all the initial extension configurations are +// received, unless the flag to apply the default configuration is set. +// Subsequent extension updates are atomic on a per-worker basis. Once an +// extension configuration is applied to a request or a connection, it remains +// constant for the duration of processing. If the initial delivery of the +// extension configuration fails, due to a timeout for example, the optional +// default configuration is applied. Without a default configuration, the +// extension is disabled, until an extension configuration is received. The +// behavior of a disabled extension depends on the context. For example, a +// filter chain with a disabled extension filter rejects all incoming streams. +message ExtensionConfigSource { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.ExtensionConfigSource"; + + ConfigSource config_source = 1 [(validate.rules).any = {required: true}]; + + // Optional default configuration to use as the initial configuration if + // there is a failure to receive the initial extension configuration or if + // `apply_default_config_without_warming` flag is set. + google.protobuf.Any default_config = 2; + + // Use the default config as the initial configuration without warming and + // waiting for the first discovery response. Requires the default configuration + // to be supplied. + bool apply_default_config_without_warming = 3; + + // A set of permitted extension type URLs. Extension configuration updates are rejected + // if they do not match any type URL in the set. + repeated string type_urls = 4 [(validate.rules).repeated = {min_items: 1}]; +} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto b/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto index 64bbc6b5f0778..51f11fa1f3467 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto @@ -8,6 +8,7 @@ import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; @@ -35,9 +36,15 @@ message GrpcService { // in the :ref:`Cluster ` :ref:`transport_socket // `. string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The `:authority` header in the grpc request. If this field is not set, the authority header value will be `cluster_name`. + // Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster. + string authority = 2 + [(validate.rules).string = + {min_bytes: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}]; } - // [#next-free-field: 7] + // [#next-free-field: 9] message GoogleGrpc { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.GrpcService.GoogleGrpc"; @@ -202,6 +209,30 @@ message GrpcService { } } + // Channel arguments. + message ChannelArgs { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs"; + + message Value { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.Value"; + + // Pointer values are not supported, since they don't make any sense when + // delivered via the API. + oneof value_specifier { + option (validate.required) = true; + + string string_value = 1; + + int64 int_value = 2; + } + } + + // See grpc_types.h GRPC_ARG #defines for keys that work here. + map args = 1; + } + // The target URI when using the `Google C++ gRPC client // `_. SSL credentials will be supplied in // :ref:`channel_credentials `. @@ -232,6 +263,13 @@ message GrpcService { // Additional configuration for site-specific customizations of the Google // gRPC library. google.protobuf.Struct config = 6; + + // How many bytes each stream can buffer internally. + // If not set an implementation defined default is applied (1MiB). + google.protobuf.UInt32Value per_stream_buffer_limit_bytes = 7; + + // Custom channels args. + ChannelArgs channel_args = 8; } reserved 4; diff --git a/generated_api_shadow/envoy/config/core/v4alpha/health_check.proto b/generated_api_shadow/envoy/config/core/v4alpha/health_check.proto index 1975c309a7ded..39badc334b01c 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/health_check.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/health_check.proto @@ -4,7 +4,7 @@ package envoy.config.core.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/event_service_config.proto"; -import "envoy/type/matcher/v3/string.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; import "envoy/type/v3/http.proto"; import "envoy/type/v3/range.proto"; @@ -54,7 +54,7 @@ enum HealthStatus { DEGRADED = 5; } -// [#next-free-field: 23] +// [#next-free-field: 24] message HealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HealthCheck"; @@ -125,9 +125,9 @@ message HealthCheck { // An optional service name parameter which is used to validate the identity of // the health checked cluster using a :ref:`StringMatcher - // `. See the :ref:`architecture overview + // `. See the :ref:`architecture overview // ` for more information. - type.matcher.v3.StringMatcher service_name_matcher = 11; + type.matcher.v4alpha.StringMatcher service_name_matcher = 11; } message TcpHealthCheck { @@ -206,7 +206,7 @@ message HealthCheck { // Specifies the ALPN protocols for health check connections. This is useful if the // corresponding upstream is using ALPN-based :ref:`FilterChainMatch - // ` along with different protocols for health checks + // ` along with different protocols for health checks // versus data connections. If empty, no ALPN protocols will be set on health check connections. repeated string alpn_protocols = 1; } @@ -323,4 +323,36 @@ message HealthCheck { // This allows overriding the cluster TLS settings, just for health check connections. TlsOptions tls_options = 21; + + // Optional key/value pairs that will be used to match a transport socket from those specified in the cluster's + // :ref:`tranport socket matches `. + // For example, the following match criteria + // + // .. code-block:: yaml + // + // transport_socket_match_criteria: + // useMTLS: true + // + // Will match the following :ref:`cluster socket match ` + // + // .. code-block:: yaml + // + // transport_socket_matches: + // - name: "useMTLS" + // match: + // useMTLS: true + // transport_socket: + // name: envoy.transport_sockets.tls + // config: { ... } # tls socket configuration + // + // If this field is set, then for health checks it will supersede an entry of *envoy.transport_socket* in the + // :ref:`LbEndpoint.Metadata `. + // This allows using different transport socket capabilities for health checking versus proxying to the + // endpoint. + // + // If the key/values pairs specified do not match any + // :ref:`transport socket matches `, + // the cluster's :ref:`transport socket ` + // will be used for health check socket configuration. + google.protobuf.Struct transport_socket_match_criteria = 23; } diff --git a/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto b/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto index dcb205444524f..cc1b99d0a0485 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto @@ -92,8 +92,6 @@ message HttpProtocolOptions { // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be // reset independent of any other timeouts. If not specified, this value is not set. - // The current implementation implements this timeout on downstream connections only. - // [#comment:TODO(shikugawa): add this functionality to upstream.] google.protobuf.Duration max_stream_duration = 4; // Action to take when a client request with a header name containing underscore characters is received. @@ -161,7 +159,7 @@ message Http1ProtocolOptions { bool enable_trailers = 5; } -// [#next-free-field: 14] +// [#next-free-field: 15] message Http2ProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Http2ProtocolOptions"; @@ -174,7 +172,7 @@ message Http2ProtocolOptions { // The 16 bit parameter identifier. google.protobuf.UInt32Value identifier = 1 [ - (validate.rules).uint32 = {lte: 65536 gte: 1}, + (validate.rules).uint32 = {lte: 65535 gte: 0}, (validate.rules).message = {required: true} ]; @@ -282,8 +280,25 @@ message Http2ProtocolOptions { // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, // when this option is enabled, only the offending stream is terminated. // + // This is overridden by HCM :ref:`stream_error_on_invalid_http_messaging + // ` + // iff present. + // + // This is deprecated in favor of :ref:`override_stream_error_on_invalid_http_message + // ` + // + // See `RFC7540, sec. 8.1 `_ for details. + bool hidden_envoy_deprecated_stream_error_on_invalid_http_messaging = 12 [deprecated = true]; + + // Allows invalid HTTP messaging and headers. When this option is disabled (default), then + // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, + // when this option is enabled, only the offending stream is terminated. + // + // This overrides any HCM :ref:`stream_error_on_invalid_http_messaging + // ` + // // See `RFC7540, sec. 8.1 `_ for details. - bool stream_error_on_invalid_http_messaging = 12; + google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 14; // [#not-implemented-hide:] // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions: diff --git a/generated_api_shadow/envoy/config/core/v4alpha/proxy_protocol.proto b/generated_api_shadow/envoy/config/core/v4alpha/proxy_protocol.proto new file mode 100644 index 0000000000000..c7a8d1f454ddf --- /dev/null +++ b/generated_api_shadow/envoy/config/core/v4alpha/proxy_protocol.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "ProxyProtocolProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Proxy Protocol] + +message ProxyProtocolConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.ProxyProtocolConfig"; + + enum Version { + // PROXY protocol version 1. Human readable format. + V1 = 0; + + // PROXY protocol version 2. Binary format. + V2 = 1; + } + + // The PROXY protocol version to use. See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details + Version version = 1; +} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/substitution_format_string.proto b/generated_api_shadow/envoy/config/core/v4alpha/substitution_format_string.proto new file mode 100644 index 0000000000000..2d3e0a21b7905 --- /dev/null +++ b/generated_api_shadow/envoy/config/core/v4alpha/substitution_format_string.proto @@ -0,0 +1,65 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "google/protobuf/struct.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "SubstitutionFormatStringProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Substitution format string] + +// Configuration to use multiple :ref:`command operators ` +// to generate a new string in either plain text or JSON format. +message SubstitutionFormatString { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.SubstitutionFormatString"; + + oneof format { + option (validate.required) = true; + + // Specify a format with command operators to form a text string. + // Its details is described in :ref:`format string`. + // + // .. code-block:: + // + // text_format: %LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% + // + // The following plain text will be created: + // + // .. code-block:: + // + // upstream connect error:204:path=/foo + // + string text_format = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Specify a format with command operators to form a JSON string. + // Its details is described in :ref:`format dictionary`. + // Values are rendered as strings, numbers, or boolean values as appropriate. + // Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA). + // See the documentation for a specific command operator for details. + // + // .. code-block:: + // + // json_format: + // status: %RESPONSE_CODE% + // message: %LOCAL_REPLY_BODY% + // + // The following JSON object would be created: + // + // .. code-block:: json + // + // { + // "status": 500, + // "message": "My error message" + // } + // + google.protobuf.Struct json_format = 2 [(validate.rules).message = {required: true}]; + } +} diff --git a/generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto b/generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto index a65db5e7d7d8e..7233d5f9561a8 100644 --- a/generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto +++ b/generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto @@ -40,6 +40,7 @@ message ClusterLoadAssignment { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.ClusterLoadAssignment.Policy"; + // [#not-implemented-hide:] message DropOverload { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.ClusterLoadAssignment.Policy.DropOverload"; @@ -72,11 +73,12 @@ message ClusterLoadAssignment { // "throttle"_drop = 60% // "lb"_drop = 20% // 50% of the remaining 'actual' load, which is 40%. // actual_outgoing_load = 20% // remaining after applying all categories. + // [#not-implemented-hide:] repeated DropOverload drop_overloads = 2; // Priority levels and localities are considered overprovisioned with this // factor (in percentage). This means that we don't consider a priority - // level or locality unhealthy until the percentage of healthy hosts + // level or locality unhealthy until the fraction of healthy hosts // multiplied by the overprovisioning factor drops below 100. // With the default value 140(1.4), Envoy doesn't consider a priority level // or a locality unhealthy until their percentage of healthy hosts drops diff --git a/generated_api_shadow/envoy/config/endpoint/v3/endpoint_components.proto b/generated_api_shadow/envoy/config/endpoint/v3/endpoint_components.proto index 8e800745df3f6..b880a38d1a3ea 100644 --- a/generated_api_shadow/envoy/config/endpoint/v3/endpoint_components.proto +++ b/generated_api_shadow/envoy/config/endpoint/v3/endpoint_components.proto @@ -76,36 +76,36 @@ message Endpoint { message LbEndpoint { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.LbEndpoint"; - core.v3.HealthStatus health_status = 2; - - // [#not-implemented-hide:] - core.v3.Metadata metadata = 3; - - // Optional health status when known and supplied by EDS server. - google.protobuf.UInt32Value load_balancing_weight = 4 [(validate.rules).uint32 = {gte: 1}]; - // Upstream host identifier or a named reference. oneof host_identifier { - // The endpoint metadata specifies values that may be used by the load - // balancer to select endpoints in a cluster for a given request. The filter - // name should be specified as *envoy.lb*. An example boolean key-value pair - // is *canary*, providing the optional canary status of the upstream host. - // This may be matched against in a route's - // :ref:`RouteAction ` metadata_match field - // to subset the endpoints considered in cluster load balancing. Endpoint endpoint = 1; - // The optional load balancing weight of the upstream host; at least 1. - // Envoy uses the load balancing weight in some of the built in load - // balancers. The load balancing weight for an endpoint is divided by the sum - // of the weights of all endpoints in the endpoint's locality to produce a - // percentage of traffic for the endpoint. This percentage is then further - // weighted by the endpoint's locality's load balancing weight from - // LocalityLbEndpoints. If unspecified, each host is presumed to have equal - // weight in a locality. The sum of the weights of all endpoints in the - // endpoint's locality must not exceed uint32_t maximal value (4294967295). + // [#not-implemented-hide:] string endpoint_name = 5; } + + // Optional health status when known and supplied by EDS server. + core.v3.HealthStatus health_status = 2; + + // The endpoint metadata specifies values that may be used by the load + // balancer to select endpoints in a cluster for a given request. The filter + // name should be specified as *envoy.lb*. An example boolean key-value pair + // is *canary*, providing the optional canary status of the upstream host. + // This may be matched against in a route's + // :ref:`RouteAction ` metadata_match field + // to subset the endpoints considered in cluster load balancing. + core.v3.Metadata metadata = 3; + + // The optional load balancing weight of the upstream host; at least 1. + // Envoy uses the load balancing weight in some of the built in load + // balancers. The load balancing weight for an endpoint is divided by the sum + // of the weights of all endpoints in the endpoint's locality to produce a + // percentage of traffic for the endpoint. This percentage is then further + // weighted by the endpoint's locality's load balancing weight from + // LocalityLbEndpoints. If unspecified, each host is presumed to have equal + // weight in a locality. The sum of the weights of all endpoints in the + // endpoint's locality must not exceed uint32_t maximal value (4294967295). + google.protobuf.UInt32Value load_balancing_weight = 4 [(validate.rules).uint32 = {gte: 1}]; } // A group of endpoints belonging to a Locality. diff --git a/generated_api_shadow/envoy/config/endpoint/v3/load_report.proto b/generated_api_shadow/envoy/config/endpoint/v3/load_report.proto index 01eb7b12cf1aa..3f067737ec25d 100644 --- a/generated_api_shadow/envoy/config/endpoint/v3/load_report.proto +++ b/generated_api_shadow/envoy/config/endpoint/v3/load_report.proto @@ -17,11 +17,11 @@ option java_outer_classname = "LoadReportProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; -// These are stats Envoy reports to GLB every so often. Report frequency is -// defined by +// [#protodoc-title: Load Report] + +// These are stats Envoy reports to the management server at a frequency defined by // :ref:`LoadStatsResponse.load_reporting_interval`. // Stats per upstream region/zone and optionally per subzone. -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. // [#next-free-field: 9] message UpstreamLocalityStats { option (udpa.annotations.versioning).previous_message_type = @@ -60,7 +60,6 @@ message UpstreamLocalityStats { uint32 priority = 6; } -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. // [#next-free-field: 8] message UpstreamEndpointStats { option (udpa.annotations.versioning).previous_message_type = @@ -103,7 +102,6 @@ message UpstreamEndpointStats { repeated EndpointLoadMetricStats load_metric_stats = 5; } -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. message EndpointLoadMetricStats { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.EndpointLoadMetricStats"; @@ -121,7 +119,6 @@ message EndpointLoadMetricStats { // Per cluster load stats. Envoy reports these stats a management server in a // :ref:`LoadStatsRequest` -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. // Next ID: 7 // [#next-free-field: 7] message ClusterStats { diff --git a/generated_api_shadow/envoy/config/filter/http/cache/v2alpha/cache.proto b/generated_api_shadow/envoy/config/filter/http/cache/v2alpha/cache.proto index a9e51cf56a103..d08b5462fd88e 100644 --- a/generated_api_shadow/envoy/config/filter/http/cache/v2alpha/cache.proto +++ b/generated_api_shadow/envoy/config/filter/http/cache/v2alpha/cache.proto @@ -57,7 +57,7 @@ message CacheConfig { // contents of a response, as described by // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. // - // During insertion, *allowed_vary_headers* acts as a whitelist: if a + // During insertion, *allowed_vary_headers* acts as a allowlist: if a // response's *vary* header mentions any header names that aren't in // *allowed_vary_headers*, that response will not be cached. // diff --git a/generated_api_shadow/envoy/config/filter/http/ext_authz/v2/ext_authz.proto b/generated_api_shadow/envoy/config/filter/http/ext_authz/v2/ext_authz.proto index a407f4628d2e4..db188a572ae09 100644 --- a/generated_api_shadow/envoy/config/filter/http/ext_authz/v2/ext_authz.proto +++ b/generated_api_shadow/envoy/config/filter/http/ext_authz/v2/ext_authz.proto @@ -24,7 +24,7 @@ option (udpa.annotations.file_status).package_version_status = FROZEN; // External Authorization :ref:`configuration overview `. // [#extension: envoy.filters.http.ext_authz] -// [#next-free-field: 11] +// [#next-free-field: 12] message ExtAuthz { // External authorization service configuration. oneof services { @@ -98,6 +98,15 @@ message ExtAuthz { // If this field is not specified, the filter will be enabled for all requests. api.v2.core.RuntimeFractionalPercent filter_enabled = 9; + // Specifies whether to deny the requests, when the filter is disabled. + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to determine whether to deny request for + // filter protected path at filter disabling. If filter is disabled in + // typed_per_filter_config for the path, requests will not be denied. + // + // If this field is not specified, all requests will be allowed when disabled. + api.v2.core.RuntimeFeatureFlag deny_at_disable = 11; + // Specifies if the peer certificate is sent to the external service. // // When this field is true, Envoy will include the peer X.509 certificate, if available, in the diff --git a/generated_api_shadow/envoy/config/filter/http/health_check/v2/health_check.proto b/generated_api_shadow/envoy/config/filter/http/health_check/v2/health_check.proto index d7f6da8c82d43..7f2a486b26188 100644 --- a/generated_api_shadow/envoy/config/filter/http/health_check/v2/health_check.proto +++ b/generated_api_shadow/envoy/config/filter/http/health_check/v2/health_check.proto @@ -37,6 +37,11 @@ message HealthCheck { // If operating in non-pass-through mode, specifies a set of upstream cluster // names and the minimum percentage of servers in each of those clusters that // must be healthy or degraded in order for the filter to return a 200. + // + // .. note:: + // + // This value is interpreted as an integer by truncating, so 12.50% will be calculated + // as if it were 12%. map cluster_min_healthy_percentages = 4; // Specifies a set of health check request headers to match on. The health check filter will diff --git a/generated_api_shadow/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto b/generated_api_shadow/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto index d1f459078f20e..4da6d97ca2992 100644 --- a/generated_api_shadow/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto +++ b/generated_api_shadow/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto @@ -41,6 +41,6 @@ message ClientSSLAuth { // An optional list of IP address and subnet masks that should be white // listed for access by the filter. If no list is provided, there is no - // IP white list. + // IP allowlist. repeated api.v2.core.CidrRange ip_white_list = 4; } diff --git a/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto index c78e69b2ae309..06b13acb2f632 100644 --- a/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ b/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -232,7 +232,7 @@ message HttpConnectionManager { // Determines if upgrades are enabled or disabled by default. Defaults to true. // This can be overridden on a per-route basis with :ref:`cluster // ` as documented in the - // :ref:`upgrade documentation `. + // :ref:`upgrade documentation `. google.protobuf.BoolValue enabled = 3; } @@ -262,8 +262,8 @@ message HttpConnectionManager { } // A list of individual HTTP filters that make up the filter chain for - // requests made to the connection manager. Order matters as the filters are - // processed sequentially as request events happen. + // requests made to the connection manager. :ref:`Order matters ` + // as the filters are processed sequentially as request events happen. repeated HttpFilter http_filters = 5; // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` @@ -332,6 +332,16 @@ message HttpConnectionManager { // is terminated with a 408 Request Timeout error code if no upstream response // header has been received, otherwise a stream reset occurs. // + // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough + // window to write any remaining stream data once the entirety of stream data (local end stream is + // true) has been buffered pending available window. In other words, this timeout defends against + // a peer that does not release enough window to completely write the stream, even though all + // data has been proxied within available flow control windows. If the timeout is hit in this + // case, the :ref:`tx_flush_timeout ` counter will be + // incremented. Note that :ref:`max_stream_duration + // ` does not apply to this corner + // case. + // // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due // to the granularity of events presented to the connection manager. For example, while receiving // very large request headers, it may be the case that there is traffic regularly arriving on the @@ -487,17 +497,17 @@ message HttpConnectionManager { // true in the future. When not specified, this value may be overridden by the // runtime variable // :ref:`http_connection_manager.normalize_path`. - // See `Normalization and Comparison ` + // See `Normalization and Comparison `_ // for details of normalization. // Note that Envoy does not perform - // `case normalization ` + // `case normalization `_ google.protobuf.BoolValue normalize_path = 30; // Determines if adjacent slashes in the path are merged into one before any processing of // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without // setting this option, incoming requests with path `//dir///file` will not match against route // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of - // `HTTP spec ` and is provided for convenience. + // `HTTP spec `_ and is provided for convenience. bool merge_slashes = 33; // The configuration of the request ID extension. This includes operations such as diff --git a/generated_api_shadow/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto b/generated_api_shadow/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto index caca630fd297d..948d7c349ff00 100644 --- a/generated_api_shadow/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto +++ b/generated_api_shadow/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto @@ -34,10 +34,10 @@ message RedisProxy { // because replication is asynchronous and requires some delay. You need to ensure that your // application can tolerate stale data. enum ReadPolicy { - // Default mode. Read from the current master node. + // Default mode. Read from the current primary node. MASTER = 0; - // Read from the master, but if it is unavailable, read from replica nodes. + // Read from the primary, but if it is unavailable, read from replica nodes. PREFER_MASTER = 1; // Read from replica nodes. If multiple replica nodes are present within a shard, a random @@ -45,11 +45,11 @@ message RedisProxy { REPLICA = 2; // Read from the replica nodes (similar to REPLICA), but if all replicas are unavailable (not - // present or unhealthy), read from the master. + // present or unhealthy), read from the primary. PREFER_REPLICA = 3; - // Read from any node of the cluster. A random node is selected among the master and replicas, - // healthy nodes have precedent over unhealthy nodes. + // Read from any node of the cluster. A random node is selected among the primary and + // replicas, healthy nodes have precedent over unhealthy nodes. ANY = 4; } @@ -112,7 +112,7 @@ message RedisProxy { // count. bool enable_command_stats = 8; - // Read policy. The default is to read from the master. + // Read policy. The default is to read from the primary. ReadPolicy read_policy = 7 [(validate.rules).enum = {defined_only: true}]; } diff --git a/generated_api_shadow/envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.proto b/generated_api_shadow/envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.proto deleted file mode 100644 index de2608d44306c..0000000000000 --- a/generated_api_shadow/envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.proto +++ /dev/null @@ -1,48 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.udp.dns_filter.v2alpha; - -import "envoy/api/v2/core/base.proto"; -import "envoy/data/dns/v2alpha/dns_table.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.udp.dns_filter.v2alpha"; -option java_outer_classname = "DnsFilterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filter.udp.dns_filter.v3alpha"; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: DNS Filter] -// DNS Filter :ref:`configuration overview `. -// [#extension: envoy.filters.udp_listener.dns_filter] - -// Configuration for the DNS filter. -message DnsFilterConfig { - // This message contains the configuration for the Dns Filter operating - // in a server context. This message will contain the virtual hosts and - // associated addresses with which Envoy will respond to queries - message ServerContextConfig { - oneof config_source { - option (validate.required) = true; - - // Load the configuration specified from the control plane - data.dns.v2alpha.DnsTable inline_dns_table = 1; - - // Seed the filter configuration from an external path. This source - // is a yaml formatted file that contains the DnsTable driving Envoy's - // responses to DNS queries - api.v2.core.DataSource external_dns_table = 2; - } - } - - // The stat prefix used when emitting DNS filter statistics - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // Server context configuration - ServerContextConfig server_config = 2; -} diff --git a/generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto b/generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto index 5079c1f0df484..06dc150d5c70b 100644 --- a/generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto +++ b/generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto @@ -4,13 +4,16 @@ package envoy.config.filter.udp.udp_proxy.v2alpha; import "google/protobuf/duration.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.udp.udp_proxy.v2alpha"; option java_outer_classname = "UdpProxyProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; +option (udpa.annotations.file_migrate).move_to_package = + "envoy.extensions.filters.udp.udp_proxy.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: UDP proxy] // UDP proxy :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/listener/v3/BUILD b/generated_api_shadow/envoy/config/listener/v3/BUILD index e67314794940d..2ae77584b1194 100644 --- a/generated_api_shadow/envoy/config/listener/v3/BUILD +++ b/generated_api_shadow/envoy/config/listener/v3/BUILD @@ -14,5 +14,6 @@ api_proto_package( "//envoy/extensions/transport_sockets/tls/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/generated_api_shadow/envoy/config/listener/v3/listener.proto b/generated_api_shadow/envoy/config/listener/v3/listener.proto index 2b4ecb826d86e..0d0dc5d817a99 100644 --- a/generated_api_shadow/envoy/config/listener/v3/listener.proto +++ b/generated_api_shadow/envoy/config/listener/v3/listener.proto @@ -5,6 +5,7 @@ package envoy.config.listener.v3; import "envoy/config/accesslog/v3/accesslog.proto"; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/socket_option.proto"; import "envoy/config/listener/v3/api_listener.proto"; import "envoy/config/listener/v3/listener_components.proto"; @@ -14,6 +15,9 @@ import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/collection_entry.proto"; + +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -26,7 +30,13 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Listener configuration] // Listener :ref:`configuration overview ` -// [#next-free-field: 23] +// Listener list collections. Entries are *Listener* resources or references. +// [#not-implemented-hide:] +message ListenerCollection { + udpa.core.v1.CollectionEntry entries = 1; +} + +// [#next-free-field: 24] message Listener { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Listener"; @@ -106,7 +116,8 @@ message Listener { // Soft limit on size of the listener’s new connection read and write buffers. // If unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // Listener metadata. core.v3.Metadata metadata = 6; @@ -237,5 +248,15 @@ message Listener { // emitted by this listener. repeated accesslog.v3.AccessLog access_log = 22; + // If the protocol in the listener socket address in :ref:`protocol + // ` is :ref:`UDP + // `, this field specifies the actual udp + // writer to create, i.e. :ref:`name ` + // = "udp_default_writer" for creating a udp writer with writing in passthrough mode, + // = "udp_gso_batch_writer" for creating a udp writer with writing in batch mode. + // If not present, treat it as "udp_default_writer". + // [#not-implemented-hide:] + core.v3.TypedExtensionConfig udp_writer_config = 23; + google.protobuf.BoolValue hidden_envoy_deprecated_use_original_dst = 4 [deprecated = true]; } diff --git a/generated_api_shadow/envoy/config/listener/v3/listener_components.proto b/generated_api_shadow/envoy/config/listener/v3/listener_components.proto index 25d39e24620eb..138b168ce5d47 100644 --- a/generated_api_shadow/envoy/config/listener/v3/listener_components.proto +++ b/generated_api_shadow/envoy/config/listener/v3/listener_components.proto @@ -4,7 +4,7 @@ package envoy.config.listener.v3; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; -import "envoy/extensions/transport_sockets/tls/v3/cert.proto"; +import "envoy/extensions/transport_sockets/tls/v3/tls.proto"; import "envoy/type/v3/range.proto"; import "google/protobuf/any.proto"; @@ -280,16 +280,16 @@ message ListenerFilter { // :ref:`supported filter `. string name = 1 [(validate.rules).string = {min_bytes: 1}]; - ListenerFilterChainMatchPredicate filter_disabled = 4; - // Filter specific configuration which depends on the filter being instantiated. // See the supported filters for further documentation. oneof config_type { - // Optional match predicate used to disable the filter. The filter is enabled when this field is empty. - // See :ref:`ListenerFilterChainMatchPredicate ` - // for further examples. google.protobuf.Any typed_config = 3; google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; } + + // Optional match predicate used to disable the filter. The filter is enabled when this field is empty. + // See :ref:`ListenerFilterChainMatchPredicate ` + // for further examples. + ListenerFilterChainMatchPredicate filter_disabled = 4; } diff --git a/generated_api_shadow/envoy/config/listener/v3/quic_config.proto b/generated_api_shadow/envoy/config/listener/v3/quic_config.proto index 9949da2e0d708..c024be95bacee 100644 --- a/generated_api_shadow/envoy/config/listener/v3/quic_config.proto +++ b/generated_api_shadow/envoy/config/listener/v3/quic_config.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.config.listener.v3; +import "envoy/config/core/v3/base.proto"; + import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; @@ -16,7 +18,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: QUIC listener Config] // Configuration specific to the QUIC protocol. -// Next id: 4 +// Next id: 5 message QuicProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.listener.QuicProtocolOptions"; @@ -32,4 +34,8 @@ message QuicProtocolOptions { // Connection timeout in milliseconds before the crypto handshake is finished. // 20000ms if not specified. google.protobuf.Duration crypto_handshake_timeout = 3; + + // Runtime flag that controls whether the listener is enabled or not. If not specified, defaults + // to enabled. + core.v3.RuntimeFeatureFlag enabled = 4; } diff --git a/generated_api_shadow/envoy/config/listener/v3/udp_default_writer_config.proto b/generated_api_shadow/envoy/config/listener/v3/udp_default_writer_config.proto new file mode 100644 index 0000000000000..707a66c7b5c48 --- /dev/null +++ b/generated_api_shadow/envoy/config/listener/v3/udp_default_writer_config.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package envoy.config.listener.v3; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v3"; +option java_outer_classname = "UdpDefaultWriterConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Udp Default Writer Config] + +// [#not-implemented-hide:] +// Configuration specific to the Udp Default Writer. +message UdpDefaultWriterOptions { +} diff --git a/generated_api_shadow/envoy/config/listener/v3/udp_gso_batch_writer_config.proto b/generated_api_shadow/envoy/config/listener/v3/udp_gso_batch_writer_config.proto new file mode 100644 index 0000000000000..134cb6a42dd22 --- /dev/null +++ b/generated_api_shadow/envoy/config/listener/v3/udp_gso_batch_writer_config.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package envoy.config.listener.v3; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v3"; +option java_outer_classname = "UdpGsoBatchWriterConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Udp Gso Batch Writer Config] + +// [#not-implemented-hide:] +// Configuration specific to the Udp Gso Batch Writer. +message UdpGsoBatchWriterOptions { +} diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/BUILD b/generated_api_shadow/envoy/config/listener/v4alpha/BUILD new file mode 100644 index 0000000000000..cde02c9329192 --- /dev/null +++ b/generated_api_shadow/envoy/config/listener/v4alpha/BUILD @@ -0,0 +1,16 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/accesslog/v4alpha:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/listener/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", + ], +) diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/api_listener.proto b/generated_api_shadow/envoy/config/listener/v4alpha/api_listener.proto new file mode 100644 index 0000000000000..b8d076c365832 --- /dev/null +++ b/generated_api_shadow/envoy/config/listener/v4alpha/api_listener.proto @@ -0,0 +1,32 @@ +syntax = "proto3"; + +package envoy.config.listener.v4alpha; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; +option java_outer_classname = "ApiListenerProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: API listener] + +// Describes a type of API listener, which is used in non-proxy clients. The type of API +// exposed to the non-proxy application depends on the type of API listener. +message ApiListener { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.ApiListener"; + + // The type in this field determines the type of API listener. At present, the following + // types are supported: + // envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager (HTTP) + // [#next-major-version: In the v3 API, replace this Any field with a oneof containing the + // specific config message for each type of API listener. We could not do this in v2 because + // it would have caused circular dependencies for go protos: lds.proto depends on this file, + // and http_connection_manager.proto depends on rds.proto, which is in the same directory as + // lds.proto, so lds.proto cannot depend on this file.] + google.protobuf.Any api_listener = 1; +} diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto b/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto new file mode 100644 index 0000000000000..c188ecb244904 --- /dev/null +++ b/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto @@ -0,0 +1,265 @@ +syntax = "proto3"; + +package envoy.config.listener.v4alpha; + +import "envoy/config/accesslog/v4alpha/accesslog.proto"; +import "envoy/config/core/v4alpha/address.proto"; +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/extension.proto"; +import "envoy/config/core/v4alpha/socket_option.proto"; +import "envoy/config/listener/v4alpha/api_listener.proto"; +import "envoy/config/listener/v4alpha/listener_components.proto"; +import "envoy/config/listener/v4alpha/udp_listener_config.proto"; + +import "google/api/annotations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/core/v1/collection_entry.proto"; + +import "udpa/annotations/security.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; +option java_outer_classname = "ListenerProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Listener configuration] +// Listener :ref:`configuration overview ` + +// Listener list collections. Entries are *Listener* resources or references. +// [#not-implemented-hide:] +message ListenerCollection { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.ListenerCollection"; + + udpa.core.v1.CollectionEntry entries = 1; +} + +// [#next-free-field: 24] +message Listener { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.Listener"; + + enum DrainType { + // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check + // filter), listener removal/modification, and hot restart. + DEFAULT = 0; + + // Drain in response to listener removal/modification and hot restart. This setting does not + // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress + // and egress listeners. + MODIFY_ONLY = 1; + } + + // [#not-implemented-hide:] + message DeprecatedV1 { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.Listener.DeprecatedV1"; + + // Whether the listener should bind to the port. A listener that doesn't + // bind can only receive connections redirected from other listeners that + // set use_original_dst parameter to true. Default is true. + // + // This is deprecated in v2, all Listeners will bind to their port. An + // additional filter chain must be created for every original destination + // port this listener may redirect to in v2, with the original port + // specified in the FilterChainMatch destination_port field. + // + // [#comment:TODO(PiotrSikora): Remove this once verified that we no longer need it.] + google.protobuf.BoolValue bind_to_port = 1; + } + + // Configuration for listener connection balancing. + message ConnectionBalanceConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.Listener.ConnectionBalanceConfig"; + + // A connection balancer implementation that does exact balancing. This means that a lock is + // held during balancing so that connection counts are nearly exactly balanced between worker + // threads. This is "nearly" exact in the sense that a connection might close in parallel thus + // making the counts incorrect, but this should be rectified on the next accept. This balancer + // sacrifices accept throughput for accuracy and should be used when there are a small number of + // connections that rarely cycle (e.g., service mesh gRPC egress). + message ExactBalance { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.Listener.ConnectionBalanceConfig.ExactBalance"; + } + + oneof balance_type { + option (validate.required) = true; + + // If specified, the listener will use the exact connection balancer. + ExactBalance exact_balance = 1; + } + } + + reserved 14, 4; + + reserved "use_original_dst"; + + // The unique name by which this listener is known. If no name is provided, + // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically + // updated or removed via :ref:`LDS ` a unique name must be provided. + string name = 1; + + // The address that the listener should listen on. In general, the address must be unique, though + // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on + // Linux as the actual port will be allocated by the OS. + core.v4alpha.Address address = 2 [(validate.rules).message = {required: true}]; + + // A list of filter chains to consider for this listener. The + // :ref:`FilterChain ` with the most specific + // :ref:`FilterChainMatch ` criteria is used on a + // connection. + // + // Example using SNI for filter chain selection can be found in the + // :ref:`FAQ entry `. + repeated FilterChain filter_chains = 3; + + // Soft limit on size of the listener’s new connection read and write buffers. + // If unspecified, an implementation defined default is applied (1MiB). + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; + + // Listener metadata. + core.v4alpha.Metadata metadata = 6; + + // [#not-implemented-hide:] + DeprecatedV1 deprecated_v1 = 7; + + // The type of draining to perform at a listener-wide level. + DrainType drain_type = 8; + + // Listener filters have the opportunity to manipulate and augment the connection metadata that + // is used in connection filter chain matching, for example. These filters are run before any in + // :ref:`filter_chains `. Order matters as the + // filters are processed sequentially right after a socket has been accepted by the listener, and + // before a connection is created. + // UDP Listener filters can be specified when the protocol in the listener socket address in + // :ref:`protocol ` is :ref:`UDP + // `. + // UDP listeners currently support a single filter. + repeated ListenerFilter listener_filters = 9; + + // The timeout to wait for all listener filters to complete operation. If the timeout is reached, + // the accepted socket is closed without a connection being created unless + // `continue_on_listener_filters_timeout` is set to true. Specify 0 to disable the + // timeout. If not specified, a default timeout of 15s is used. + google.protobuf.Duration listener_filters_timeout = 15; + + // Whether a connection should be created when listener filters timeout. Default is false. + // + // .. attention:: + // + // Some listener filters, such as :ref:`Proxy Protocol filter + // `, should not be used with this option. It will cause + // unexpected behavior when a connection is created. + bool continue_on_listener_filters_timeout = 17; + + // Whether the listener should be set as a transparent socket. + // When this flag is set to true, connections can be redirected to the listener using an + // *iptables* *TPROXY* target, in which case the original source and destination addresses and + // ports are preserved on accepted connections. This flag should be used in combination with + // :ref:`an original_dst ` :ref:`listener filter + // ` to mark the connections' local addresses as + // "restored." This can be used to hand off each redirected connection to another listener + // associated with the connection's destination address. Direct connections to the socket without + // using *TPROXY* cannot be distinguished from connections redirected using *TPROXY* and are + // therefore treated as if they were redirected. + // When this flag is set to false, the listener's socket is explicitly reset as non-transparent. + // Setting this flag requires Envoy to run with the *CAP_NET_ADMIN* capability. + // When this flag is not set (default), the socket is not modified, i.e. the transparent option + // is neither set nor reset. + google.protobuf.BoolValue transparent = 10; + + // Whether the listener should set the *IP_FREEBIND* socket option. When this + // flag is set to true, listeners can be bound to an IP address that is not + // configured on the system running Envoy. When this flag is set to false, the + // option *IP_FREEBIND* is disabled on the socket. When this flag is not set + // (default), the socket is not modified, i.e. the option is neither enabled + // nor disabled. + google.protobuf.BoolValue freebind = 11; + + // Additional socket options that may not be present in Envoy source code or + // precompiled binaries. + repeated core.v4alpha.SocketOption socket_options = 13; + + // Whether the listener should accept TCP Fast Open (TFO) connections. + // When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on + // the socket, with a queue length of the specified size + // (see `details in RFC7413 `_). + // When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket. + // When this flag is not set (default), the socket is not modified, + // i.e. the option is neither enabled nor disabled. + // + // On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable + // TCP_FASTOPEN. + // See `ip-sysctl.txt `_. + // + // On macOS, only values of 0, 1, and unset are valid; other values may result in an error. + // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter. + google.protobuf.UInt32Value tcp_fast_open_queue_length = 12; + + // Specifies the intended direction of the traffic relative to the local Envoy. + core.v4alpha.TrafficDirection traffic_direction = 16; + + // If the protocol in the listener socket address in :ref:`protocol + // ` is :ref:`UDP + // `, this field specifies the actual udp + // listener to create, i.e. :ref:`udp_listener_name + // ` = "raw_udp_listener" for + // creating a packet-oriented UDP listener. If not present, treat it as "raw_udp_listener". + UdpListenerConfig udp_listener_config = 18; + + // Used to represent an API listener, which is used in non-proxy clients. The type of API + // exposed to the non-proxy application depends on the type of API listener. + // When this field is set, no other field except for :ref:`name` + // should be set. + // + // .. note:: + // + // Currently only one ApiListener can be installed; and it can only be done via bootstrap config, + // not LDS. + // + // [#next-major-version: In the v3 API, instead of this messy approach where the socket + // listener fields are directly in the top-level Listener message and the API listener types + // are in the ApiListener message, the socket listener messages should be in their own message, + // and the top-level Listener should essentially be a oneof that selects between the + // socket listener and the various types of API listener. That way, a given Listener message + // can structurally only contain the fields of the relevant type.] + ApiListener api_listener = 19; + + // The listener's connection balancer configuration, currently only applicable to TCP listeners. + // If no configuration is specified, Envoy will not attempt to balance active connections between + // worker threads. + ConnectionBalanceConfig connection_balance_config = 20; + + // When this flag is set to true, listeners set the *SO_REUSEPORT* socket option and + // create one socket for each worker thread. This makes inbound connections + // distribute among worker threads roughly evenly in cases where there are a high number + // of connections. When this flag is set to false, all worker threads share one socket. + // + // Before Linux v4.19-rc1, new TCP connections may be rejected during hot restart + // (see `3rd paragraph in 'soreuseport' commit message + // `_). + // This issue was fixed by `tcp: Avoid TCP syncookie rejected by SO_REUSEPORT socket + // `_. + bool reuse_port = 21; + + // Configuration for :ref:`access logs ` + // emitted by this listener. + repeated accesslog.v4alpha.AccessLog access_log = 22; + + // If the protocol in the listener socket address in :ref:`protocol + // ` is :ref:`UDP + // `, this field specifies the actual udp + // writer to create, i.e. :ref:`name ` + // = "udp_default_writer" for creating a udp writer with writing in passthrough mode, + // = "udp_gso_batch_writer" for creating a udp writer with writing in batch mode. + // If not present, treat it as "udp_default_writer". + // [#not-implemented-hide:] + core.v4alpha.TypedExtensionConfig udp_writer_config = 23; +} diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto b/generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto new file mode 100644 index 0000000000000..6900cde390162 --- /dev/null +++ b/generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto @@ -0,0 +1,298 @@ +syntax = "proto3"; + +package envoy.config.listener.v4alpha; + +import "envoy/config/core/v4alpha/address.proto"; +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/type/v3/range.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; +option java_outer_classname = "ListenerComponentsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Listener components] +// Listener :ref:`configuration overview ` + +message Filter { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.Filter"; + + reserved 3, 2; + + reserved "config"; + + // The name of the filter to instantiate. The name must match a + // :ref:`supported filter `. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Filter specific configuration which depends on the filter being + // instantiated. See the supported filters for further documentation. + oneof config_type { + google.protobuf.Any typed_config = 4; + } +} + +// Specifies the match criteria for selecting a specific filter chain for a +// listener. +// +// In order for a filter chain to be selected, *ALL* of its criteria must be +// fulfilled by the incoming connection, properties of which are set by the +// networking stack and/or listener filters. +// +// The following order applies: +// +// 1. Destination port. +// 2. Destination IP address. +// 3. Server name (e.g. SNI for TLS protocol), +// 4. Transport protocol. +// 5. Application protocols (e.g. ALPN for TLS protocol). +// 6. Source type (e.g. any, local or external network). +// 7. Source IP address. +// 8. Source port. +// +// For criteria that allow ranges or wildcards, the most specific value in any +// of the configured filter chains that matches the incoming connection is going +// to be used (e.g. for SNI ``www.example.com`` the most specific match would be +// ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter +// chain without ``server_names`` requirements). +// +// [#comment: Implemented rules are kept in the preference order, with deprecated fields +// listed at the end, because that's how we want to list them in the docs. +// +// [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules] +// [#next-free-field: 13] +message FilterChainMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.FilterChainMatch"; + + enum ConnectionSourceType { + // Any connection source matches. + ANY = 0; + + // Match a connection originating from the same host. + SAME_IP_OR_LOOPBACK = 1; + + // Match a connection originating from a different host. + EXTERNAL = 2; + } + + reserved 1; + + // Optional destination port to consider when use_original_dst is set on the + // listener in determining a filter chain match. + google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {lte: 65535 gte: 1}]; + + // If non-empty, an IP address and prefix length to match addresses when the + // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. + repeated core.v4alpha.CidrRange prefix_ranges = 3; + + // If non-empty, an IP address and suffix length to match addresses when the + // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. + // [#not-implemented-hide:] + string address_suffix = 4; + + // [#not-implemented-hide:] + google.protobuf.UInt32Value suffix_len = 5; + + // Specifies the connection source IP match type. Can be any, local or external network. + ConnectionSourceType source_type = 12 [(validate.rules).enum = {defined_only: true}]; + + // The criteria is satisfied if the source IP address of the downstream + // connection is contained in at least one of the specified subnets. If the + // parameter is not specified or the list is empty, the source IP address is + // ignored. + repeated core.v4alpha.CidrRange source_prefix_ranges = 6; + + // The criteria is satisfied if the source port of the downstream connection + // is contained in at least one of the specified ports. If the parameter is + // not specified, the source port is ignored. + repeated uint32 source_ports = 7 + [(validate.rules).repeated = {items {uint32 {lte: 65535 gte: 1}}}]; + + // If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining + // a filter chain match. Those values will be compared against the server names of a new + // connection, when detected by one of the listener filters. + // + // The server name will be matched against all wildcard domains, i.e. ``www.example.com`` + // will be first matched against ``www.example.com``, then ``*.example.com``, then ``*.com``. + // + // Note that partial wildcards are not supported, and values like ``*w.example.com`` are invalid. + // + // .. attention:: + // + // See the :ref:`FAQ entry ` on how to configure SNI for more + // information. + repeated string server_names = 11; + + // If non-empty, a transport protocol to consider when determining a filter chain match. + // This value will be compared against the transport protocol of a new connection, when + // it's detected by one of the listener filters. + // + // Suggested values include: + // + // * ``raw_buffer`` - default, used when no transport protocol is detected, + // * ``tls`` - set by :ref:`envoy.filters.listener.tls_inspector ` + // when TLS protocol is detected. + string transport_protocol = 9; + + // If non-empty, a list of application protocols (e.g. ALPN for TLS protocol) to consider when + // determining a filter chain match. Those values will be compared against the application + // protocols of a new connection, when detected by one of the listener filters. + // + // Suggested values include: + // + // * ``http/1.1`` - set by :ref:`envoy.filters.listener.tls_inspector + // `, + // * ``h2`` - set by :ref:`envoy.filters.listener.tls_inspector ` + // + // .. attention:: + // + // Currently, only :ref:`TLS Inspector ` provides + // application protocol detection based on the requested + // `ALPN `_ values. + // + // However, the use of ALPN is pretty much limited to the HTTP/2 traffic on the Internet, + // and matching on values other than ``h2`` is going to lead to a lot of false negatives, + // unless all connecting clients are known to use ALPN. + repeated string application_protocols = 10; +} + +// A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and +// various other parameters. +// [#next-free-field: 8] +message FilterChain { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.FilterChain"; + + reserved 2; + + reserved "tls_context"; + + // The criteria to use when matching a connection to this filter chain. + FilterChainMatch filter_chain_match = 1; + + // A list of individual network filters that make up the filter chain for + // connections established with the listener. Order matters as the filters are + // processed sequentially as connection events happen. Note: If the filter + // list is empty, the connection will close by default. + repeated Filter filters = 3; + + // Whether the listener should expect a PROXY protocol V1 header on new + // connections. If this option is enabled, the listener will assume that that + // remote address of the connection is the one specified in the header. Some + // load balancers including the AWS ELB support this option. If the option is + // absent or set to false, Envoy will use the physical peer address of the + // connection as the remote address. + google.protobuf.BoolValue use_proxy_proto = 4; + + // [#not-implemented-hide:] filter chain metadata. + core.v4alpha.Metadata metadata = 5; + + // Optional custom transport socket implementation to use for downstream connections. + // To setup TLS, set a transport socket with name `tls` and + // :ref:`DownstreamTlsContext ` in the `typed_config`. + // If no transport socket configuration is specified, new connections + // will be set up with plaintext. + core.v4alpha.TransportSocket transport_socket = 6; + + // [#not-implemented-hide:] The unique name (or empty) by which this filter chain is known. If no + // name is provided, Envoy will allocate an internal UUID for the filter chain. If the filter + // chain is to be dynamically updated or removed via FCDS a unique name must be provided. + string name = 7; +} + +// Listener filter chain match configuration. This is a recursive structure which allows complex +// nested match configurations to be built using various logical operators. +// +// Examples: +// +// * Matches if the destination port is 3306. +// +// .. code-block:: yaml +// +// destination_port_range: +// start: 3306 +// end: 3307 +// +// * Matches if the destination port is 3306 or 15000. +// +// .. code-block:: yaml +// +// or_match: +// rules: +// - destination_port_range: +// start: 3306 +// end: 3306 +// - destination_port_range: +// start: 15000 +// end: 15001 +// +// [#next-free-field: 6] +message ListenerFilterChainMatchPredicate { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.ListenerFilterChainMatchPredicate"; + + // A set of match configurations used for logical operations. + message MatchSet { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.ListenerFilterChainMatchPredicate.MatchSet"; + + // The list of rules that make up the set. + repeated ListenerFilterChainMatchPredicate rules = 1 + [(validate.rules).repeated = {min_items: 2}]; + } + + oneof rule { + option (validate.required) = true; + + // A set that describes a logical OR. If any member of the set matches, the match configuration + // matches. + MatchSet or_match = 1; + + // A set that describes a logical AND. If all members of the set match, the match configuration + // matches. + MatchSet and_match = 2; + + // A negation match. The match configuration will match if the negated match condition matches. + ListenerFilterChainMatchPredicate not_match = 3; + + // The match configuration will always match. + bool any_match = 4 [(validate.rules).bool = {const: true}]; + + // Match destination port. Particularly, the match evaluation must use the recovered local port if + // the owning listener filter is after :ref:`an original_dst listener filter `. + type.v3.Int32Range destination_port_range = 5; + } +} + +message ListenerFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.ListenerFilter"; + + reserved 2; + + reserved "config"; + + // The name of the filter to instantiate. The name must match a + // :ref:`supported filter `. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Filter specific configuration which depends on the filter being instantiated. + // See the supported filters for further documentation. + oneof config_type { + google.protobuf.Any typed_config = 3; + } + + // Optional match predicate used to disable the filter. The filter is enabled when this field is empty. + // See :ref:`ListenerFilterChainMatchPredicate ` + // for further examples. + ListenerFilterChainMatchPredicate filter_disabled = 4; +} diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/quic_config.proto b/generated_api_shadow/envoy/config/listener/v4alpha/quic_config.proto new file mode 100644 index 0000000000000..b2b1df1e374f6 --- /dev/null +++ b/generated_api_shadow/envoy/config/listener/v4alpha/quic_config.proto @@ -0,0 +1,41 @@ +syntax = "proto3"; + +package envoy.config.listener.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; +option java_outer_classname = "QuicConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: QUIC listener Config] + +// Configuration specific to the QUIC protocol. +// Next id: 5 +message QuicProtocolOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.QuicProtocolOptions"; + + // Maximum number of streams that the client can negotiate per connection. 100 + // if not specified. + google.protobuf.UInt32Value max_concurrent_streams = 1; + + // Maximum number of milliseconds that connection will be alive when there is + // no network activity. 300000ms if not specified. + google.protobuf.Duration idle_timeout = 2; + + // Connection timeout in milliseconds before the crypto handshake is finished. + // 20000ms if not specified. + google.protobuf.Duration crypto_handshake_timeout = 3; + + // Runtime flag that controls whether the listener is enabled or not. If not specified, defaults + // to enabled. + core.v4alpha.RuntimeFeatureFlag enabled = 4; +} diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/udp_default_writer_config.proto b/generated_api_shadow/envoy/config/listener/v4alpha/udp_default_writer_config.proto new file mode 100644 index 0000000000000..02660a7b49f4d --- /dev/null +++ b/generated_api_shadow/envoy/config/listener/v4alpha/udp_default_writer_config.proto @@ -0,0 +1,23 @@ +syntax = "proto3"; + +package envoy.config.listener.v4alpha; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; +option java_outer_classname = "UdpDefaultWriterConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Udp Default Writer Config] + +// [#not-implemented-hide:] +// Configuration specific to the Udp Default Writer. +message UdpDefaultWriterOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.UdpDefaultWriterOptions"; +} diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/udp_gso_batch_writer_config.proto b/generated_api_shadow/envoy/config/listener/v4alpha/udp_gso_batch_writer_config.proto new file mode 100644 index 0000000000000..5427fe19e7e13 --- /dev/null +++ b/generated_api_shadow/envoy/config/listener/v4alpha/udp_gso_batch_writer_config.proto @@ -0,0 +1,23 @@ +syntax = "proto3"; + +package envoy.config.listener.v4alpha; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; +option java_outer_classname = "UdpGsoBatchWriterConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Udp Gso Batch Writer Config] + +// [#not-implemented-hide:] +// Configuration specific to the Udp Gso Batch Writer. +message UdpGsoBatchWriterOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.UdpGsoBatchWriterOptions"; +} diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/udp_listener_config.proto b/generated_api_shadow/envoy/config/listener/v4alpha/udp_listener_config.proto new file mode 100644 index 0000000000000..7e40e9529f99c --- /dev/null +++ b/generated_api_shadow/envoy/config/listener/v4alpha/udp_listener_config.proto @@ -0,0 +1,42 @@ +syntax = "proto3"; + +package envoy.config.listener.v4alpha; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; +option java_outer_classname = "UdpListenerConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: UDP Listener Config] +// Listener :ref:`configuration overview ` + +message UdpListenerConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.UdpListenerConfig"; + + reserved 2; + + reserved "config"; + + // Used to look up UDP listener factory, matches "raw_udp_listener" or + // "quic_listener" to create a specific udp listener. + // If not specified, treat as "raw_udp_listener". + string udp_listener_name = 1; + + // Used to create a specific listener factory. To some factory, e.g. + // "raw_udp_listener", config is not needed. + oneof config_type { + google.protobuf.Any typed_config = 3; + } +} + +message ActiveRawUdpListenerConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.ActiveRawUdpListenerConfig"; +} diff --git a/generated_api_shadow/envoy/config/metrics/v3/metrics_service.proto b/generated_api_shadow/envoy/config/metrics/v3/metrics_service.proto index ad9879055ba3c..4bb6c77e66c23 100644 --- a/generated_api_shadow/envoy/config/metrics/v3/metrics_service.proto +++ b/generated_api_shadow/envoy/config/metrics/v3/metrics_service.proto @@ -2,8 +2,11 @@ syntax = "proto3"; package envoy.config.metrics.v3; +import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/grpc_service.proto"; +import "google/protobuf/wrappers.proto"; + import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -25,4 +28,14 @@ message MetricsServiceConfig { // The upstream gRPC cluster that hosts the metrics service. core.v3.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; + + // API version for metric service transport protocol. This describes the metric service gRPC + // endpoint and version of messages used on the wire. + core.v3.ApiVersion transport_api_version = 3 [(validate.rules).enum = {defined_only: true}]; + + // If true, counters are reported as the delta between flushing intervals. Otherwise, the current + // counter value is reported. Defaults to false. + // Eventually (https://github.com/envoyproxy/envoy/issues/10968) if this value is not set, the + // sink will take updates from the :ref:`MetricsResponse `. + google.protobuf.BoolValue report_counters_as_deltas = 2; } diff --git a/generated_api_shadow/envoy/config/metrics/v3/stats.proto b/generated_api_shadow/envoy/config/metrics/v3/stats.proto index bd5e0e8c49732..8b66a83a55e6d 100644 --- a/generated_api_shadow/envoy/config/metrics/v3/stats.proto +++ b/generated_api_shadow/envoy/config/metrics/v3/stats.proto @@ -81,6 +81,35 @@ message StatsConfig { // `issue #8771 `_ for more information. // If any unexpected behavior changes are observed, please open a new issue immediately. StatsMatcher stats_matcher = 3; + + // Defines rules for setting the histogram buckets. Rules are evaluated in order, and the first + // match is applied. If no match is found (or if no rules are set), the following default buckets + // are used: + // + // .. code-block:: json + // + // [ + // 0.5, + // 1, + // 5, + // 10, + // 25, + // 50, + // 100, + // 250, + // 500, + // 1000, + // 2500, + // 5000, + // 10000, + // 30000, + // 60000, + // 300000, + // 600000, + // 1800000, + // 3600000 + // ] + repeated HistogramBucketSettings histogram_bucket_settings = 4; } // Configuration for disabling stat instantiation. @@ -257,53 +286,68 @@ message TagSpecifier { } } +// Specifies a matcher for stats and the buckets that matching stats should use. +message HistogramBucketSettings { + // The stats that this rule applies to. The match is applied to the original stat name + // before tag-extraction, for example `cluster.exampleclustername.upstream_cx_length_ms`. + type.matcher.v3.StringMatcher match = 1 [(validate.rules).message = {required: true}]; + + // Each value is the upper bound of a bucket. Each bucket must be greater than 0 and unique. + // The order of the buckets does not matter. + repeated double buckets = 2 [(validate.rules).repeated = { + min_items: 1 + unique: true + items {double {gt: 0.0}} + }]; +} + // Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support // tagged metrics. // [#extension: envoy.stat_sinks.statsd] message StatsdSink { option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v2.StatsdSink"; - // The UDP address of a running `statsd `_ - // compliant listener. If specified, statistics will be flushed to this - // address. - string prefix = 3; - oneof statsd_specifier { option (validate.required) = true; + // The UDP address of a running `statsd `_ + // compliant listener. If specified, statistics will be flushed to this + // address. + core.v3.Address address = 1; + // The name of a cluster that is running a TCP `statsd // `_ compliant listener. If specified, // Envoy will connect to this cluster to flush statistics. - core.v3.Address address = 1; - - // Optional custom prefix for StatsdSink. If - // specified, this will override the default prefix. - // For example: - // - // .. code-block:: json - // - // { - // "prefix" : "envoy-prod" - // } - // - // will change emitted stats to - // - // .. code-block:: cpp - // - // envoy-prod.test_counter:1|c - // envoy-prod.test_timer:5|ms - // - // Note that the default prefix, "envoy", will be used if a prefix is not - // specified. - // - // Stats with default prefix: - // - // .. code-block:: cpp - // - // envoy.test_counter:1|c - // envoy.test_timer:5|ms string tcp_cluster_name = 2; } + + // Optional custom prefix for StatsdSink. If + // specified, this will override the default prefix. + // For example: + // + // .. code-block:: json + // + // { + // "prefix" : "envoy-prod" + // } + // + // will change emitted stats to + // + // .. code-block:: cpp + // + // envoy-prod.test_counter:1|c + // envoy-prod.test_timer:5|ms + // + // Note that the default prefix, "envoy", will be used if a prefix is not + // specified. + // + // Stats with default prefix: + // + // .. code-block:: cpp + // + // envoy.test_counter:1|c + // envoy.test_timer:5|ms + string prefix = 3; } // Stats configuration proto schema for built-in *envoy.stat_sinks.dog_statsd* sink. @@ -317,17 +361,25 @@ message DogStatsdSink { reserved 2; - // The UDP address of a running DogStatsD compliant listener. If specified, - // statistics will be flushed to this address. - string prefix = 3; - oneof dog_statsd_specifier { option (validate.required) = true; - // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field - // ` for more details. + // The UDP address of a running DogStatsD compliant listener. If specified, + // statistics will be flushed to this address. core.v3.Address address = 1; } + + // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field + // ` for more details. + string prefix = 3; + + // Optional max datagram size to use when sending UDP messages. By default Envoy + // will emit one metric per datagram. By specifying a max-size larger than a single + // metric, Envoy will emit multiple, new-line separated metrics. The max datagram + // size should not exceed your network's MTU. + // + // Note that this value may not be respected if smaller than a single metric. + google.protobuf.UInt64Value max_bytes_per_datagram = 4 [(validate.rules).uint64 = {gt: 0}]; } // Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink. diff --git a/generated_api_shadow/envoy/config/metrics/v4alpha/BUILD b/generated_api_shadow/envoy/config/metrics/v4alpha/BUILD new file mode 100644 index 0000000000000..4b70ffb4110a5 --- /dev/null +++ b/generated_api_shadow/envoy/config/metrics/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/metrics/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/config/metrics/v4alpha/metrics_service.proto b/generated_api_shadow/envoy/config/metrics/v4alpha/metrics_service.proto new file mode 100644 index 0000000000000..e2d83ce4c1c97 --- /dev/null +++ b/generated_api_shadow/envoy/config/metrics/v4alpha/metrics_service.proto @@ -0,0 +1,41 @@ +syntax = "proto3"; + +package envoy.config.metrics.v4alpha; + +import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/config/core/v4alpha/grpc_service.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.metrics.v4alpha"; +option java_outer_classname = "MetricsServiceProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Metrics service] + +// Metrics Service is configured as a built-in *envoy.stat_sinks.metrics_service* :ref:`StatsSink +// `. This opaque configuration will be used to create +// Metrics Service. +// [#extension: envoy.stat_sinks.metrics_service] +message MetricsServiceConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.metrics.v3.MetricsServiceConfig"; + + // The upstream gRPC cluster that hosts the metrics service. + core.v4alpha.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; + + // API version for metric service transport protocol. This describes the metric service gRPC + // endpoint and version of messages used on the wire. + core.v4alpha.ApiVersion transport_api_version = 3 [(validate.rules).enum = {defined_only: true}]; + + // If true, counters are reported as the delta between flushing intervals. Otherwise, the current + // counter value is reported. Defaults to false. + // Eventually (https://github.com/envoyproxy/envoy/issues/10968) if this value is not set, the + // sink will take updates from the :ref:`MetricsResponse `. + google.protobuf.BoolValue report_counters_as_deltas = 2; +} diff --git a/generated_api_shadow/envoy/config/metrics/v4alpha/stats.proto b/generated_api_shadow/envoy/config/metrics/v4alpha/stats.proto new file mode 100644 index 0000000000000..6265118cf9b87 --- /dev/null +++ b/generated_api_shadow/envoy/config/metrics/v4alpha/stats.proto @@ -0,0 +1,416 @@ +syntax = "proto3"; + +package envoy.config.metrics.v4alpha; + +import "envoy/config/core/v4alpha/address.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.metrics.v4alpha"; +option java_outer_classname = "StatsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Stats] +// Statistics :ref:`architecture overview `. + +// Configuration for pluggable stats sinks. +message StatsSink { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v3.StatsSink"; + + reserved 2; + + reserved "config"; + + // The name of the stats sink to instantiate. The name must match a supported + // stats sink. The built-in stats sinks are: + // + // * :ref:`envoy.stat_sinks.statsd ` + // * :ref:`envoy.stat_sinks.dog_statsd ` + // * :ref:`envoy.stat_sinks.metrics_service ` + // * :ref:`envoy.stat_sinks.hystrix ` + // + // Sinks optionally support tagged/multiple dimensional metrics. + string name = 1; + + // Stats sink specific configuration which depends on the sink being instantiated. See + // :ref:`StatsdSink ` for an example. + oneof config_type { + google.protobuf.Any typed_config = 3; + } +} + +// Statistics configuration such as tagging. +message StatsConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.metrics.v3.StatsConfig"; + + // Each stat name is iteratively processed through these tag specifiers. + // When a tag is matched, the first capture group is removed from the name so + // later :ref:`TagSpecifiers ` cannot match that + // same portion of the match. + repeated TagSpecifier stats_tags = 1; + + // Use all default tag regexes specified in Envoy. These can be combined with + // custom tags specified in :ref:`stats_tags + // `. They will be processed before + // the custom tags. + // + // .. note:: + // + // If any default tags are specified twice, the config will be considered + // invalid. + // + // See :repo:`well_known_names.h ` for a list of the + // default tags in Envoy. + // + // If not provided, the value is assumed to be true. + google.protobuf.BoolValue use_all_default_tags = 2; + + // Inclusion/exclusion matcher for stat name creation. If not provided, all stats are instantiated + // as normal. Preventing the instantiation of certain families of stats can improve memory + // performance for Envoys running especially large configs. + // + // .. warning:: + // Excluding stats may affect Envoy's behavior in undocumented ways. See + // `issue #8771 `_ for more information. + // If any unexpected behavior changes are observed, please open a new issue immediately. + StatsMatcher stats_matcher = 3; + + // Defines rules for setting the histogram buckets. Rules are evaluated in order, and the first + // match is applied. If no match is found (or if no rules are set), the following default buckets + // are used: + // + // .. code-block:: json + // + // [ + // 0.5, + // 1, + // 5, + // 10, + // 25, + // 50, + // 100, + // 250, + // 500, + // 1000, + // 2500, + // 5000, + // 10000, + // 30000, + // 60000, + // 300000, + // 600000, + // 1800000, + // 3600000 + // ] + repeated HistogramBucketSettings histogram_bucket_settings = 4; +} + +// Configuration for disabling stat instantiation. +message StatsMatcher { + // The instantiation of stats is unrestricted by default. If the goal is to configure Envoy to + // instantiate all stats, there is no need to construct a StatsMatcher. + // + // However, StatsMatcher can be used to limit the creation of families of stats in order to + // conserve memory. Stats can either be disabled entirely, or they can be + // limited by either an exclusion or an inclusion list of :ref:`StringMatcher + // ` protos: + // + // * If `reject_all` is set to `true`, no stats will be instantiated. If `reject_all` is set to + // `false`, all stats will be instantiated. + // + // * If an exclusion list is supplied, any stat name matching *any* of the StringMatchers in the + // list will not instantiate. + // + // * If an inclusion list is supplied, no stats will instantiate, except those matching *any* of + // the StringMatchers in the list. + // + // + // A StringMatcher can be used to match against an exact string, a suffix / prefix, or a regex. + // **NB:** For performance reasons, it is highly recommended to use a prefix- or suffix-based + // matcher rather than a regex-based matcher. + // + // Example 1. Excluding all stats. + // + // .. code-block:: json + // + // { + // "statsMatcher": { + // "rejectAll": "true" + // } + // } + // + // Example 2. Excluding all cluster-specific stats, but not cluster-manager stats: + // + // .. code-block:: json + // + // { + // "statsMatcher": { + // "exclusionList": { + // "patterns": [ + // { + // "prefix": "cluster." + // } + // ] + // } + // } + // } + // + // Example 3. Including only manager-related stats: + // + // .. code-block:: json + // + // { + // "statsMatcher": { + // "inclusionList": { + // "patterns": [ + // { + // "prefix": "cluster_manager." + // }, + // { + // "prefix": "listener_manager." + // } + // ] + // } + // } + // } + // + + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.metrics.v3.StatsMatcher"; + + oneof stats_matcher { + option (validate.required) = true; + + // If `reject_all` is true, then all stats are disabled. If `reject_all` is false, then all + // stats are enabled. + bool reject_all = 1; + + // Exclusive match. All stats are enabled except for those matching one of the supplied + // StringMatcher protos. + type.matcher.v4alpha.ListStringMatcher exclusion_list = 2; + + // Inclusive match. No stats are enabled except for those matching one of the supplied + // StringMatcher protos. + type.matcher.v4alpha.ListStringMatcher inclusion_list = 3; + } +} + +// Designates a tag name and value pair. The value may be either a fixed value +// or a regex providing the value via capture groups. The specified tag will be +// unconditionally set if a fixed value, otherwise it will only be set if one +// or more capture groups in the regex match. +message TagSpecifier { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.metrics.v3.TagSpecifier"; + + // Attaches an identifier to the tag values to identify the tag being in the + // sink. Envoy has a set of default names and regexes to extract dynamic + // portions of existing stats, which can be found in :repo:`well_known_names.h + // ` in the Envoy repository. If a :ref:`tag_name + // ` is provided in the config and + // neither :ref:`regex ` or + // :ref:`fixed_value ` were specified, + // Envoy will attempt to find that name in its set of defaults and use the accompanying regex. + // + // .. note:: + // + // It is invalid to specify the same tag name twice in a config. + string tag_name = 1; + + oneof tag_value { + // Designates a tag to strip from the tag extracted name and provide as a named + // tag value for all statistics. This will only occur if any part of the name + // matches the regex provided with one or more capture groups. + // + // The first capture group identifies the portion of the name to remove. The + // second capture group (which will normally be nested inside the first) will + // designate the value of the tag for the statistic. If no second capture + // group is provided, the first will also be used to set the value of the tag. + // All other capture groups will be ignored. + // + // Example 1. a stat name ``cluster.foo_cluster.upstream_rq_timeout`` and + // one tag specifier: + // + // .. code-block:: json + // + // { + // "tag_name": "envoy.cluster_name", + // "regex": "^cluster\.((.+?)\.)" + // } + // + // Note that the regex will remove ``foo_cluster.`` making the tag extracted + // name ``cluster.upstream_rq_timeout`` and the tag value for + // ``envoy.cluster_name`` will be ``foo_cluster`` (note: there will be no + // ``.`` character because of the second capture group). + // + // Example 2. a stat name + // ``http.connection_manager_1.user_agent.ios.downstream_cx_total`` and two + // tag specifiers: + // + // .. code-block:: json + // + // [ + // { + // "tag_name": "envoy.http_user_agent", + // "regex": "^http(?=\.).*?\.user_agent\.((.+?)\.)\w+?$" + // }, + // { + // "tag_name": "envoy.http_conn_manager_prefix", + // "regex": "^http\.((.*?)\.)" + // } + // ] + // + // The two regexes of the specifiers will be processed in the definition order. + // + // The first regex will remove ``ios.``, leaving the tag extracted name + // ``http.connection_manager_1.user_agent.downstream_cx_total``. The tag + // ``envoy.http_user_agent`` will be added with tag value ``ios``. + // + // The second regex will remove ``connection_manager_1.`` from the tag + // extracted name produced by the first regex + // ``http.connection_manager_1.user_agent.downstream_cx_total``, leaving + // ``http.user_agent.downstream_cx_total`` as the tag extracted name. The tag + // ``envoy.http_conn_manager_prefix`` will be added with the tag value + // ``connection_manager_1``. + string regex = 2 [(validate.rules).string = {max_bytes: 1024}]; + + // Specifies a fixed tag value for the ``tag_name``. + string fixed_value = 3; + } +} + +// Specifies a matcher for stats and the buckets that matching stats should use. +message HistogramBucketSettings { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.metrics.v3.HistogramBucketSettings"; + + // The stats that this rule applies to. The match is applied to the original stat name + // before tag-extraction, for example `cluster.exampleclustername.upstream_cx_length_ms`. + type.matcher.v4alpha.StringMatcher match = 1 [(validate.rules).message = {required: true}]; + + // Each value is the upper bound of a bucket. Each bucket must be greater than 0 and unique. + // The order of the buckets does not matter. + repeated double buckets = 2 [(validate.rules).repeated = { + min_items: 1 + unique: true + items {double {gt: 0.0}} + }]; +} + +// Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support +// tagged metrics. +// [#extension: envoy.stat_sinks.statsd] +message StatsdSink { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v3.StatsdSink"; + + oneof statsd_specifier { + option (validate.required) = true; + + // The UDP address of a running `statsd `_ + // compliant listener. If specified, statistics will be flushed to this + // address. + core.v4alpha.Address address = 1; + + // The name of a cluster that is running a TCP `statsd + // `_ compliant listener. If specified, + // Envoy will connect to this cluster to flush statistics. + string tcp_cluster_name = 2; + } + + // Optional custom prefix for StatsdSink. If + // specified, this will override the default prefix. + // For example: + // + // .. code-block:: json + // + // { + // "prefix" : "envoy-prod" + // } + // + // will change emitted stats to + // + // .. code-block:: cpp + // + // envoy-prod.test_counter:1|c + // envoy-prod.test_timer:5|ms + // + // Note that the default prefix, "envoy", will be used if a prefix is not + // specified. + // + // Stats with default prefix: + // + // .. code-block:: cpp + // + // envoy.test_counter:1|c + // envoy.test_timer:5|ms + string prefix = 3; +} + +// Stats configuration proto schema for built-in *envoy.stat_sinks.dog_statsd* sink. +// The sink emits stats with `DogStatsD `_ +// compatible tags. Tags are configurable via :ref:`StatsConfig +// `. +// [#extension: envoy.stat_sinks.dog_statsd] +message DogStatsdSink { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.metrics.v3.DogStatsdSink"; + + reserved 2; + + oneof dog_statsd_specifier { + option (validate.required) = true; + + // The UDP address of a running DogStatsD compliant listener. If specified, + // statistics will be flushed to this address. + core.v4alpha.Address address = 1; + } + + // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field + // ` for more details. + string prefix = 3; + + // Optional max datagram size to use when sending UDP messages. By default Envoy + // will emit one metric per datagram. By specifying a max-size larger than a single + // metric, Envoy will emit multiple, new-line separated metrics. The max datagram + // size should not exceed your network's MTU. + // + // Note that this value may not be respected if smaller than a single metric. + google.protobuf.UInt64Value max_bytes_per_datagram = 4 [(validate.rules).uint64 = {gt: 0}]; +} + +// Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink. +// The sink emits stats in `text/event-stream +// `_ +// formatted stream for use by `Hystrix dashboard +// `_. +// +// Note that only a single HystrixSink should be configured. +// +// Streaming is started through an admin endpoint :http:get:`/hystrix_event_stream`. +// [#extension: envoy.stat_sinks.hystrix] +message HystrixSink { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.metrics.v3.HystrixSink"; + + // The number of buckets the rolling statistical window is divided into. + // + // Each time the sink is flushed, all relevant Envoy statistics are sampled and + // added to the rolling window (removing the oldest samples in the window + // in the process). The sink then outputs the aggregate statistics across the + // current rolling window to the event stream(s). + // + // rolling_window(ms) = stats_flush_interval(ms) * num_of_buckets + // + // More detailed explanation can be found in `Hystrix wiki + // `_. + int64 num_buckets = 1; +} diff --git a/generated_api_shadow/envoy/config/ratelimit/v3/rls.proto b/generated_api_shadow/envoy/config/ratelimit/v3/rls.proto index bb3c538bbabff..98889b1e28825 100644 --- a/generated_api_shadow/envoy/config/ratelimit/v3/rls.proto +++ b/generated_api_shadow/envoy/config/ratelimit/v3/rls.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package envoy.config.ratelimit.v3; +import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "udpa/annotations/status.proto"; @@ -26,4 +27,8 @@ message RateLimitServiceConfig { // will connect to this cluster when it needs to make rate limit service // requests. core.v3.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; + + // API version for rate limit transport protocol. This describes the rate limit gRPC endpoint and + // version of messages used on the wire. + core.v3.ApiVersion transport_api_version = 4 [(validate.rules).enum = {defined_only: true}]; } diff --git a/generated_api_shadow/envoy/config/rbac/v3/BUILD b/generated_api_shadow/envoy/config/rbac/v3/BUILD index bef4331a1e651..ce88bd5e6c626 100644 --- a/generated_api_shadow/envoy/config/rbac/v3/BUILD +++ b/generated_api_shadow/envoy/config/rbac/v3/BUILD @@ -11,6 +11,7 @@ api_proto_package( "//envoy/config/route/v3:pkg", "//envoy/type/matcher/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", ], ) diff --git a/generated_api_shadow/envoy/config/rbac/v3/rbac.proto b/generated_api_shadow/envoy/config/rbac/v3/rbac.proto index 040f537d1f5c8..278e6857603fe 100644 --- a/generated_api_shadow/envoy/config/rbac/v3/rbac.proto +++ b/generated_api_shadow/envoy/config/rbac/v3/rbac.proto @@ -8,8 +8,10 @@ import "envoy/type/matcher/v3/metadata.proto"; import "envoy/type/matcher/v3/path.proto"; import "envoy/type/matcher/v3/string.proto"; +import "google/api/expr/v1alpha1/checked.proto"; import "google/api/expr/v1alpha1/syntax.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -22,8 +24,14 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Role Based Access Control (RBAC)] // Role Based Access Control (RBAC) provides service-level and method-level access control for a -// service. RBAC policies are additive. The policies are examined in order. A request is allowed -// once a matching policy is found (suppose the `action` is ALLOW). +// service. RBAC policies are additive. The policies are examined in order. Requests are allowed +// or denied based on the `action` and whether a matching policy is found. For instance, if the +// action is ALLOW and a matching policy is found the request should be allowed. +// +// RBAC can also be used to make access logging decisions by communicating with access loggers +// through dynamic metadata. When the action is LOG and at least one policy matches, the +// `access_log_hint` value in the shared key namespace 'envoy.common' is set to `true` indicating +// the request should be logged. // // Here is an example of RBAC configuration. It has two policies: // @@ -66,45 +74,69 @@ message RBAC { // Should we do safe-list or block-list style access control? enum Action { - // The policies grant access to principals. The rest is denied. This is safe-list style + // The policies grant access to principals. The rest are denied. This is safe-list style // access control. This is the default type. ALLOW = 0; - // The policies deny access to principals. The rest is allowed. This is block-list style + // The policies deny access to principals. The rest are allowed. This is block-list style // access control. DENY = 1; + + // The policies set the `access_log_hint` dynamic metadata key based on if requests match. + // All requests are allowed. + LOG = 2; } - // The action to take if a policy matches. The request is allowed if and only if: + // The action to take if a policy matches. Every action either allows or denies a request, + // and can also carry out action-specific operations. + // + // Actions: + // + // * ALLOW: Allows the request if and only if there is a policy that matches + // the request. + // * DENY: Allows the request if and only if there are no policies that + // match the request. + // * LOG: Allows all requests. If at least one policy matches, the dynamic + // metadata key `access_log_hint` is set to the value `true` under the shared + // key namespace 'envoy.common'. If no policies match, it is set to `false`. + // Other actions do not modify this key. // - // * `action` is "ALLOWED" and at least one policy matches - // * `action` is "DENY" and none of the policies match Action action = 1; // Maps from policy name to policy. A match occurs when at least one policy matches the request. map policies = 2; } -// Policy specifies a role and the principals that are assigned/denied the role. A policy matches if -// and only if at least one of its permissions match the action taking place AND at least one of its -// principals match the downstream AND the condition is true if specified. +// Policy specifies a role and the principals that are assigned/denied the role. +// A policy matches if and only if at least one of its permissions match the +// action taking place AND at least one of its principals match the downstream +// AND the condition is true if specified. message Policy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Policy"; - // Required. The set of permissions that define a role. Each permission is matched with OR - // semantics. To match all actions for this policy, a single Permission with the `any` field set - // to true should be used. + // Required. The set of permissions that define a role. Each permission is + // matched with OR semantics. To match all actions for this policy, a single + // Permission with the `any` field set to true should be used. repeated Permission permissions = 1 [(validate.rules).repeated = {min_items: 1}]; - // Required. The set of principals that are assigned/denied the role based on “action”. Each - // principal is matched with OR semantics. To match all downstreams for this policy, a single - // Principal with the `any` field set to true should be used. + // Required. The set of principals that are assigned/denied the role based on + // “action”. Each principal is matched with OR semantics. To match all + // downstreams for this policy, a single Principal with the `any` field set to + // true should be used. repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}]; // An optional symbolic expression specifying an access control // :ref:`condition `. The condition is combined // with the permissions and the principals as a clause with AND semantics. - google.api.expr.v1alpha1.Expr condition = 3; + // Only be used when checked_condition is not used. + google.api.expr.v1alpha1.Expr condition = 3 + [(udpa.annotations.field_migrate).oneof_promotion = "expression_specifier"]; + + // [#not-implemented-hide:] + // An optional symbolic expression that has been successfully type checked. + // Only be used when condition is not used. + google.api.expr.v1alpha1.CheckedExpr checked_condition = 4 + [(udpa.annotations.field_migrate).oneof_promotion = "expression_specifier"]; } // Permission defines an action (or actions) that a principal can take. @@ -151,9 +183,9 @@ message Permission { // Metadata that describes additional information about the action. type.matcher.v3.MetadataMatcher metadata = 7; - // Negates matching the provided permission. For instance, if the value of `not_rule` would - // match, this permission would not match. Conversely, if the value of `not_rule` would not - // match, this permission would match. + // Negates matching the provided permission. For instance, if the value of + // `not_rule` would match, this permission would not match. Conversely, if + // the value of `not_rule` would not match, this permission would match. Permission not_rule = 8; // The request server from the client's connection request. This is @@ -166,7 +198,8 @@ message Permission { // // * If the :ref:`TLS Inspector ` // filter is not added, and if a `FilterChainMatch` is not defined for - // the :ref:`server name `, + // the :ref:`server name + // `, // a TLS connection's requested SNI server name will be treated as if it // wasn't present. // @@ -179,13 +212,14 @@ message Permission { } } -// Principal defines an identity or a group of identities for a downstream subject. +// Principal defines an identity or a group of identities for a downstream +// subject. // [#next-free-field: 12] message Principal { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Principal"; - // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. Depending on the context, - // each are applied with the associated behavior. + // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. + // Depending on the context, each are applied with the associated behavior. message Set { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Principal.Set"; @@ -200,19 +234,21 @@ message Principal { reserved 1; - // The name of the principal. If set, The URI SAN or DNS SAN in that order is used from the - // certificate, otherwise the subject field is used. If unset, it applies to any user that is - // authenticated. + // The name of the principal. If set, The URI SAN or DNS SAN in that order + // is used from the certificate, otherwise the subject field is used. If + // unset, it applies to any user that is authenticated. type.matcher.v3.StringMatcher principal_name = 2; } oneof identifier { option (validate.required) = true; - // A set of identifiers that all must match in order to define the downstream. + // A set of identifiers that all must match in order to define the + // downstream. Set and_ids = 1; - // A set of identifiers at least one must match in order to define the downstream. + // A set of identifiers at least one must match in order to define the + // downstream. Set or_ids = 2; // When any is set, it matches any downstream. @@ -227,21 +263,23 @@ message Principal { // A CIDR block that describes the downstream remote/origin address. // Note: This is always the physical peer even if the - // :ref:`remote_ip ` is inferred - // from for example the x-forwarder-for header, proxy protocol, etc. + // :ref:`remote_ip ` is + // inferred from for example the x-forwarder-for header, proxy protocol, + // etc. core.v3.CidrRange direct_remote_ip = 10; // A CIDR block that describes the downstream remote/origin address. // Note: This may not be the physical peer and could be different from the - // :ref:`direct_remote_ip `. - // E.g, if the remote ip is inferred from for example the x-forwarder-for header, - // proxy protocol, etc. + // :ref:`direct_remote_ip + // `. E.g, if the + // remote ip is inferred from for example the x-forwarder-for header, proxy + // protocol, etc. core.v3.CidrRange remote_ip = 11; - // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only - // available for HTTP request. - // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path` - // field if you want to match the URL path without the query and fragment string. + // A header (or pseudo-header such as :path or :method) on the incoming HTTP + // request. Only available for HTTP request. Note: the pseudo-header :path + // includes the query and fragment string. Use the `url_path` field if you + // want to match the URL path without the query and fragment string. route.v3.HeaderMatcher header = 6; // A URL path on the incoming HTTP request. Only available for HTTP. @@ -250,9 +288,9 @@ message Principal { // Metadata that describes additional information about the principal. type.matcher.v3.MetadataMatcher metadata = 7; - // Negates matching the provided principal. For instance, if the value of `not_id` would match, - // this principal would not match. Conversely, if the value of `not_id` would not match, this - // principal would match. + // Negates matching the provided principal. For instance, if the value of + // `not_id` would match, this principal would not match. Conversely, if the + // value of `not_id` would not match, this principal would match. Principal not_id = 8; } } diff --git a/generated_api_shadow/envoy/config/rbac/v4alpha/BUILD b/generated_api_shadow/envoy/config/rbac/v4alpha/BUILD index dbfa8be4f36f2..be78d751372e6 100644 --- a/generated_api_shadow/envoy/config/rbac/v4alpha/BUILD +++ b/generated_api_shadow/envoy/config/rbac/v4alpha/BUILD @@ -9,8 +9,9 @@ api_proto_package( "//envoy/config/core/v4alpha:pkg", "//envoy/config/rbac/v3:pkg", "//envoy/config/route/v4alpha:pkg", - "//envoy/type/matcher/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", ], ) diff --git a/generated_api_shadow/envoy/config/rbac/v4alpha/rbac.proto b/generated_api_shadow/envoy/config/rbac/v4alpha/rbac.proto index cdbeb5bf2eef8..7139dfaa1485d 100644 --- a/generated_api_shadow/envoy/config/rbac/v4alpha/rbac.proto +++ b/generated_api_shadow/envoy/config/rbac/v4alpha/rbac.proto @@ -4,10 +4,11 @@ package envoy.config.rbac.v4alpha; import "envoy/config/core/v4alpha/address.proto"; import "envoy/config/route/v4alpha/route_components.proto"; -import "envoy/type/matcher/v3/metadata.proto"; -import "envoy/type/matcher/v3/path.proto"; -import "envoy/type/matcher/v3/string.proto"; +import "envoy/type/matcher/v4alpha/metadata.proto"; +import "envoy/type/matcher/v4alpha/path.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; +import "google/api/expr/v1alpha1/checked.proto"; import "google/api/expr/v1alpha1/syntax.proto"; import "udpa/annotations/status.proto"; @@ -22,8 +23,14 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // [#protodoc-title: Role Based Access Control (RBAC)] // Role Based Access Control (RBAC) provides service-level and method-level access control for a -// service. RBAC policies are additive. The policies are examined in order. A request is allowed -// once a matching policy is found (suppose the `action` is ALLOW). +// service. RBAC policies are additive. The policies are examined in order. Requests are allowed +// or denied based on the `action` and whether a matching policy is found. For instance, if the +// action is ALLOW and a matching policy is found the request should be allowed. +// +// RBAC can also be used to make access logging decisions by communicating with access loggers +// through dynamic metadata. When the action is LOG and at least one policy matches, the +// `access_log_hint` value in the shared key namespace 'envoy.common' is set to `true` indicating +// the request should be logged. // // Here is an example of RBAC configuration. It has two policies: // @@ -66,45 +73,69 @@ message RBAC { // Should we do safe-list or block-list style access control? enum Action { - // The policies grant access to principals. The rest is denied. This is safe-list style + // The policies grant access to principals. The rest are denied. This is safe-list style // access control. This is the default type. ALLOW = 0; - // The policies deny access to principals. The rest is allowed. This is block-list style + // The policies deny access to principals. The rest are allowed. This is block-list style // access control. DENY = 1; + + // The policies set the `access_log_hint` dynamic metadata key based on if requests match. + // All requests are allowed. + LOG = 2; } - // The action to take if a policy matches. The request is allowed if and only if: + // The action to take if a policy matches. Every action either allows or denies a request, + // and can also carry out action-specific operations. + // + // Actions: + // + // * ALLOW: Allows the request if and only if there is a policy that matches + // the request. + // * DENY: Allows the request if and only if there are no policies that + // match the request. + // * LOG: Allows all requests. If at least one policy matches, the dynamic + // metadata key `access_log_hint` is set to the value `true` under the shared + // key namespace 'envoy.common'. If no policies match, it is set to `false`. + // Other actions do not modify this key. // - // * `action` is "ALLOWED" and at least one policy matches - // * `action` is "DENY" and none of the policies match Action action = 1; // Maps from policy name to policy. A match occurs when at least one policy matches the request. map policies = 2; } -// Policy specifies a role and the principals that are assigned/denied the role. A policy matches if -// and only if at least one of its permissions match the action taking place AND at least one of its -// principals match the downstream AND the condition is true if specified. +// Policy specifies a role and the principals that are assigned/denied the role. +// A policy matches if and only if at least one of its permissions match the +// action taking place AND at least one of its principals match the downstream +// AND the condition is true if specified. message Policy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Policy"; - // Required. The set of permissions that define a role. Each permission is matched with OR - // semantics. To match all actions for this policy, a single Permission with the `any` field set - // to true should be used. + // Required. The set of permissions that define a role. Each permission is + // matched with OR semantics. To match all actions for this policy, a single + // Permission with the `any` field set to true should be used. repeated Permission permissions = 1 [(validate.rules).repeated = {min_items: 1}]; - // Required. The set of principals that are assigned/denied the role based on “action”. Each - // principal is matched with OR semantics. To match all downstreams for this policy, a single - // Principal with the `any` field set to true should be used. + // Required. The set of principals that are assigned/denied the role based on + // “action”. Each principal is matched with OR semantics. To match all + // downstreams for this policy, a single Principal with the `any` field set to + // true should be used. repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}]; - // An optional symbolic expression specifying an access control - // :ref:`condition `. The condition is combined - // with the permissions and the principals as a clause with AND semantics. - google.api.expr.v1alpha1.Expr condition = 3; + oneof expression_specifier { + // An optional symbolic expression specifying an access control + // :ref:`condition `. The condition is combined + // with the permissions and the principals as a clause with AND semantics. + // Only be used when checked_condition is not used. + google.api.expr.v1alpha1.Expr condition = 3; + + // [#not-implemented-hide:] + // An optional symbolic expression that has been successfully type checked. + // Only be used when condition is not used. + google.api.expr.v1alpha1.CheckedExpr checked_condition = 4; + } } // Permission defines an action (or actions) that a principal can take. @@ -140,7 +171,7 @@ message Permission { route.v4alpha.HeaderMatcher header = 4; // A URL path on the incoming HTTP request. Only available for HTTP. - type.matcher.v3.PathMatcher url_path = 10; + type.matcher.v4alpha.PathMatcher url_path = 10; // A CIDR block that describes the destination IP. core.v4alpha.CidrRange destination_ip = 5; @@ -149,11 +180,11 @@ message Permission { uint32 destination_port = 6 [(validate.rules).uint32 = {lte: 65535}]; // Metadata that describes additional information about the action. - type.matcher.v3.MetadataMatcher metadata = 7; + type.matcher.v4alpha.MetadataMatcher metadata = 7; - // Negates matching the provided permission. For instance, if the value of `not_rule` would - // match, this permission would not match. Conversely, if the value of `not_rule` would not - // match, this permission would match. + // Negates matching the provided permission. For instance, if the value of + // `not_rule` would match, this permission would not match. Conversely, if + // the value of `not_rule` would not match, this permission would match. Permission not_rule = 8; // The request server from the client's connection request. This is @@ -166,7 +197,8 @@ message Permission { // // * If the :ref:`TLS Inspector ` // filter is not added, and if a `FilterChainMatch` is not defined for - // the :ref:`server name `, + // the :ref:`server name + // `, // a TLS connection's requested SNI server name will be treated as if it // wasn't present. // @@ -175,17 +207,18 @@ message Permission { // // Please refer to :ref:`this FAQ entry ` to learn to // setup SNI. - type.matcher.v3.StringMatcher requested_server_name = 9; + type.matcher.v4alpha.StringMatcher requested_server_name = 9; } } -// Principal defines an identity or a group of identities for a downstream subject. +// Principal defines an identity or a group of identities for a downstream +// subject. // [#next-free-field: 12] message Principal { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Principal"; - // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. Depending on the context, - // each are applied with the associated behavior. + // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. + // Depending on the context, each are applied with the associated behavior. message Set { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Principal.Set"; @@ -200,19 +233,21 @@ message Principal { reserved 1; - // The name of the principal. If set, The URI SAN or DNS SAN in that order is used from the - // certificate, otherwise the subject field is used. If unset, it applies to any user that is - // authenticated. - type.matcher.v3.StringMatcher principal_name = 2; + // The name of the principal. If set, The URI SAN or DNS SAN in that order + // is used from the certificate, otherwise the subject field is used. If + // unset, it applies to any user that is authenticated. + type.matcher.v4alpha.StringMatcher principal_name = 2; } oneof identifier { option (validate.required) = true; - // A set of identifiers that all must match in order to define the downstream. + // A set of identifiers that all must match in order to define the + // downstream. Set and_ids = 1; - // A set of identifiers at least one must match in order to define the downstream. + // A set of identifiers at least one must match in order to define the + // downstream. Set or_ids = 2; // When any is set, it matches any downstream. @@ -227,32 +262,34 @@ message Principal { // A CIDR block that describes the downstream remote/origin address. // Note: This is always the physical peer even if the - // :ref:`remote_ip ` is inferred - // from for example the x-forwarder-for header, proxy protocol, etc. + // :ref:`remote_ip ` is + // inferred from for example the x-forwarder-for header, proxy protocol, + // etc. core.v4alpha.CidrRange direct_remote_ip = 10; // A CIDR block that describes the downstream remote/origin address. // Note: This may not be the physical peer and could be different from the - // :ref:`direct_remote_ip `. - // E.g, if the remote ip is inferred from for example the x-forwarder-for header, - // proxy protocol, etc. + // :ref:`direct_remote_ip + // `. E.g, if the + // remote ip is inferred from for example the x-forwarder-for header, proxy + // protocol, etc. core.v4alpha.CidrRange remote_ip = 11; - // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only - // available for HTTP request. - // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path` - // field if you want to match the URL path without the query and fragment string. + // A header (or pseudo-header such as :path or :method) on the incoming HTTP + // request. Only available for HTTP request. Note: the pseudo-header :path + // includes the query and fragment string. Use the `url_path` field if you + // want to match the URL path without the query and fragment string. route.v4alpha.HeaderMatcher header = 6; // A URL path on the incoming HTTP request. Only available for HTTP. - type.matcher.v3.PathMatcher url_path = 9; + type.matcher.v4alpha.PathMatcher url_path = 9; // Metadata that describes additional information about the principal. - type.matcher.v3.MetadataMatcher metadata = 7; + type.matcher.v4alpha.MetadataMatcher metadata = 7; - // Negates matching the provided principal. For instance, if the value of `not_id` would match, - // this principal would not match. Conversely, if the value of `not_id` would not match, this - // principal would match. + // Negates matching the provided principal. For instance, if the value of + // `not_id` would match, this principal would not match. Conversely, if the + // value of `not_id` would not match, this principal would match. Principal not_id = 8; } } diff --git a/generated_api_shadow/envoy/config/route/v3/BUILD b/generated_api_shadow/envoy/config/route/v3/BUILD index 019cf27528c6a..6f653723e5ae3 100644 --- a/generated_api_shadow/envoy/config/route/v3/BUILD +++ b/generated_api_shadow/envoy/config/route/v3/BUILD @@ -11,6 +11,7 @@ api_proto_package( "//envoy/api/v2/route:pkg", "//envoy/config/core/v3:pkg", "//envoy/type/matcher/v3:pkg", + "//envoy/type/metadata/v3:pkg", "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", diff --git a/generated_api_shadow/envoy/config/route/v3/route_components.proto b/generated_api_shadow/envoy/config/route/v3/route_components.proto index 616e76af302e1..f79f399d2140c 100644 --- a/generated_api_shadow/envoy/config/route/v3/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v3/route_components.proto @@ -3,8 +3,11 @@ syntax = "proto3"; package envoy.config.route.v3; import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/extension.proto"; +import "envoy/config/core/v3/proxy_protocol.proto"; import "envoy/type/matcher/v3/regex.proto"; import "envoy/type/matcher/v3/string.proto"; +import "envoy/type/metadata/v3/metadata.proto"; import "envoy/type/tracing/v3/custom_tag.proto"; import "envoy/type/v3/percent.proto"; import "envoy/type/v3/range.proto"; @@ -15,6 +18,7 @@ import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -121,7 +125,9 @@ message VirtualHost { // Specifies a list of HTTP headers that should be removed from each response // handled by this virtual host. - repeated string response_headers_to_remove = 11; + repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { + items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; // Indicates that the virtual host has a CORS policy. CorsPolicy cors = 8; @@ -207,40 +213,42 @@ message Route { // Route matching parameters. RouteMatch match = 1 [(validate.rules).message = {required: true}]; - // Route request to some upstream cluster. - core.v3.Metadata metadata = 4; + oneof action { + option (validate.required) = true; - // Return a redirect. - Decorator decorator = 5; + // Route request to some upstream cluster. + RouteAction route = 2; - // Return an arbitrary HTTP response directly, without proxying. - map typed_per_filter_config = 13; + // Return a redirect. + RedirectAction redirect = 3; - // [#not-implemented-hide:] - // If true, a filter will define the action (e.g., it could dynamically generate the - // RouteAction). - repeated core.v3.HeaderValueOption request_headers_to_add = 9 - [(validate.rules).repeated = {max_items: 1000}]; + // Return an arbitrary HTTP response directly, without proxying. + DirectResponseAction direct_response = 7; + + // [#not-implemented-hide:] + // If true, a filter will define the action (e.g., it could dynamically generate the + // RouteAction). + // [#comment: TODO(samflattery): Remove cleanup in route_fuzz_test.cc when + // implemented] + FilterAction filter_action = 17; + } // The Metadata field can be used to provide additional information // about the route. It can be used for configuration, stats, and logging. // The metadata should go under the filter namespace that will need it. // For instance, if the metadata is intended for the Router filter, // the filter name should be specified as *envoy.filters.http.router*. - repeated string request_headers_to_remove = 12 [(validate.rules).repeated = { - items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; + core.v3.Metadata metadata = 4; // Decorator for the matched route. - repeated core.v3.HeaderValueOption response_headers_to_add = 10 - [(validate.rules).repeated = {max_items: 1000}]; + Decorator decorator = 5; // The typed_per_filter_config field can be used to provide route-specific // configurations for filters. The key should match the filter name, such as // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter // specific; see the :ref:`HTTP filter documentation ` for // if and how it is utilized. - repeated string response_headers_to_remove = 11; + map typed_per_filter_config = 13; // Specifies a set of headers that will be added to requests matching this // route. Headers specified at this level are applied before headers from the @@ -248,11 +256,14 @@ message Route { // :ref:`envoy_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on // header value syntax, see the documentation on :ref:`custom request headers // `. - Tracing tracing = 15; + repeated core.v3.HeaderValueOption request_headers_to_add = 9 + [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request // matching this route. - google.protobuf.UInt32Value per_request_buffer_limit_bytes = 16; + repeated string request_headers_to_remove = 12 [(validate.rules).repeated = { + items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; // Specifies a set of headers that will be added to responses to requests // matching this route. Headers specified at this level are applied before @@ -260,27 +271,26 @@ message Route { // :ref:`envoy_api_msg_config.route.v3.RouteConfiguration`. For more information, including // details on header value syntax, see the documentation on // :ref:`custom request headers `. - map hidden_envoy_deprecated_per_filter_config = 8 - [deprecated = true]; - - oneof action { - option (validate.required) = true; + repeated core.v3.HeaderValueOption response_headers_to_add = 10 + [(validate.rules).repeated = {max_items: 1000}]; - // Specifies a list of HTTP headers that should be removed from each response - // to requests matching this route. - RouteAction route = 2; + // Specifies a list of HTTP headers that should be removed from each response + // to requests matching this route. + repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { + items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; - // Presence of the object defines whether the connection manager's tracing configuration - // is overridden by this route specific instance. - RedirectAction redirect = 3; + // Presence of the object defines whether the connection manager's tracing configuration + // is overridden by this route specific instance. + Tracing tracing = 15; - // The maximum bytes which will be buffered for retries and shadowing. - // If set, the bytes actually buffered will be the minimum value of this and the - // listener per_connection_buffer_limit_bytes. - DirectResponseAction direct_response = 7; + // The maximum bytes which will be buffered for retries and shadowing. + // If set, the bytes actually buffered will be the minimum value of this and the + // listener per_connection_buffer_limit_bytes. + google.protobuf.UInt32Value per_request_buffer_limit_bytes = 16; - FilterAction filter_action = 17; - } + map hidden_envoy_deprecated_per_filter_config = 8 + [deprecated = true]; } // Compared to the :ref:`cluster ` field that specifies a @@ -373,7 +383,7 @@ message WeightedCluster { string runtime_key_prefix = 2; } -// [#next-free-field: 12] +// [#next-free-field: 13] message RouteMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteMatch"; @@ -395,33 +405,58 @@ message RouteMatch { google.protobuf.BoolValue validated = 2; } + // An extensible message for matching CONNECT requests. + message ConnectMatcher { + } + reserved 5; - // If specified, the route is a prefix rule meaning that the prefix must - // match the beginning of the *:path* header. - google.protobuf.BoolValue case_sensitive = 4; + oneof path_specifier { + option (validate.required) = true; - // If specified, the route is an exact path rule meaning that the path must - // exactly match the *:path* header once the query string is removed. - core.v3.RuntimeFractionalPercent runtime_fraction = 9; + // If specified, the route is a prefix rule meaning that the prefix must + // match the beginning of the *:path* header. + string prefix = 1; - // If specified, the route is a regular expression rule meaning that the - // regex must match the *:path* header once the query string is removed. The entire path - // (without the query string) must match the regex. The rule will not match if only a - // subsequence of the *:path* header matches the regex. - // - // [#next-major-version: In the v3 API we should redo how path specification works such - // that we utilize StringMatcher, and additionally have consistent options around whether we - // strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive - // to deprecate the existing options. We should even consider whether we want to do away with - // path_specifier entirely and just rely on a set of header matchers which can already match - // on :path, etc. The issue with that is it is unclear how to generically deal with query string - // stripping. This needs more thought.] - repeated HeaderMatcher headers = 6; + // If specified, the route is an exact path rule meaning that the path must + // exactly match the *:path* header once the query string is removed. + string path = 2; + + // If specified, the route is a regular expression rule meaning that the + // regex must match the *:path* header once the query string is removed. The entire path + // (without the query string) must match the regex. The rule will not match if only a + // subsequence of the *:path* header matches the regex. + // + // [#next-major-version: In the v3 API we should redo how path specification works such + // that we utilize StringMatcher, and additionally have consistent options around whether we + // strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive + // to deprecate the existing options. We should even consider whether we want to do away with + // path_specifier entirely and just rely on a set of header matchers which can already match + // on :path, etc. The issue with that is it is unclear how to generically deal with query string + // stripping. This needs more thought.] + type.matcher.v3.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; + + // If this is used as the matcher, the matcher will only match CONNECT requests. + // Note that this will not match HTTP/2 upgrade-style CONNECT requests + // (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style + // upgrades. + // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2, + // where CONNECT requests may have a path, the path matchers will work if + // there is a path present. + // Note that CONNECT support is currently considered alpha in Envoy. + // [#comment:TODO(htuch): Replace the above comment with an alpha tag. + ConnectMatcher connect_matcher = 12; + + string hidden_envoy_deprecated_regex = 3 [ + deprecated = true, + (validate.rules).string = {max_bytes: 1024}, + (envoy.annotations.disallowed_by_default) = true + ]; + } // Indicates that prefix/path matching should be case insensitive. The default // is true. - repeated QueryParameterMatcher query_parameters = 7; + google.protobuf.BoolValue case_sensitive = 4; // Indicates that the route should additionally match on a runtime key. Every time the route // is considered for a match, it must also fall under the percentage of matches indicated by @@ -439,42 +474,32 @@ message RouteMatch { // integer with the assumption that the value is an integral percentage out of 100. For // instance, a runtime key lookup returning the value "42" would parse as a FractionalPercent // whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics. - GrpcRouteMatchOptions grpc = 8; + core.v3.RuntimeFractionalPercent runtime_fraction = 9; // Specifies a set of headers that the route should match on. The router will // check the request’s headers against all the specified headers in the route // config. A match will happen if all the headers in the route are present in // the request with the same values (or based on presence if the value field // is not in the config). - TlsContextMatchOptions tls_context = 11; - - oneof path_specifier { - option (validate.required) = true; - - // Specifies a set of URL query parameters on which the route should - // match. The router will check the query string from the *path* header - // against all the specified query parameters. If the number of specified - // query parameters is nonzero, they all must match the *path* header's - // query string for a match to occur. - string prefix = 1; + repeated HeaderMatcher headers = 6; - // If specified, only gRPC requests will be matched. The router will check - // that the content-type header has a application/grpc or one of the various - // application/grpc+ values. - string path = 2; + // Specifies a set of URL query parameters on which the route should + // match. The router will check the query string from the *path* header + // against all the specified query parameters. If the number of specified + // query parameters is nonzero, they all must match the *path* header's + // query string for a match to occur. + repeated QueryParameterMatcher query_parameters = 7; - // If specified, the client tls context will be matched against the defined - // match options. - // - // [#next-major-version: unify with RBAC] - type.matcher.v3.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; + // If specified, only gRPC requests will be matched. The router will check + // that the content-type header has a application/grpc or one of the various + // application/grpc+ values. + GrpcRouteMatchOptions grpc = 8; - string hidden_envoy_deprecated_regex = 3 [ - deprecated = true, - (validate.rules).string = {max_bytes: 1024}, - (envoy.annotations.disallowed_by_default) = true - ]; - } + // If specified, the client tls context will be matched against the defined + // match options. + // + // [#next-major-version: unify with RBAC] + TlsContextMatchOptions tls_context = 11; } // [#next-free-field: 12] @@ -500,14 +525,19 @@ message CorsPolicy { // Specifies whether the resource allows credentials. google.protobuf.BoolValue allow_credentials = 6; - // Specifies the % of requests for which the CORS filter is enabled. - // - // If neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are specified, the CORS - // filter will be enabled for 100% of the requests. - // - // If :ref:`runtime_key ` is - // specified, Envoy will lookup the runtime key to get the percentage of requests to filter. - core.v3.RuntimeFractionalPercent shadow_enabled = 10; + oneof enabled_specifier { + // Specifies the % of requests for which the CORS filter is enabled. + // + // If neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are specified, the CORS + // filter will be enabled for 100% of the requests. + // + // If :ref:`runtime_key ` is + // specified, Envoy will lookup the runtime key to get the percentage of requests to filter. + core.v3.RuntimeFractionalPercent filter_enabled = 9; + + google.protobuf.BoolValue hidden_envoy_deprecated_enabled = 7 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + } // Specifies the % of requests for which the CORS policies will be evaluated and tracked, but not // enforced. @@ -518,21 +548,16 @@ message CorsPolicy { // If :ref:`runtime_key ` is specified, // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate // and track the request's *Origin* to determine if it's valid but will not enforce any policies. + core.v3.RuntimeFractionalPercent shadow_enabled = 10; + repeated string hidden_envoy_deprecated_allow_origin = 1 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; repeated string hidden_envoy_deprecated_allow_origin_regex = 8 [deprecated = true, (validate.rules).repeated = {items {string {max_bytes: 1024}}}]; - - oneof enabled_specifier { - core.v3.RuntimeFractionalPercent filter_enabled = 9; - - google.protobuf.BoolValue hidden_envoy_deprecated_enabled = 7 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - } } -// [#next-free-field: 34] +// [#next-free-field: 35] message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction"; @@ -545,7 +570,10 @@ message RouteAction { } // Configures :ref:`internal redirect ` behavior. + // [#next-major-version: remove this definition - it's defined in the InternalRedirectPolicy message.] enum InternalRedirectAction { + option deprecated = true; + PASS_THROUGH_INTERNAL_REDIRECT = 0; HANDLE_INTERNAL_REDIRECT = 1; } @@ -602,6 +630,10 @@ message RouteAction { string header_name = 1 [ (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false} ]; + + // If specified, the request header value will be rewritten and used + // to produce the hash key. + type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 2; } // Envoy supports two types of cookie affinity: @@ -665,45 +697,45 @@ message RouteAction { string key = 1 [(validate.rules).string = {min_bytes: 1}]; } - // Header hash policy. - bool terminal = 4; - oneof policy_specifier { option (validate.required) = true; - // Cookie hash policy. + // Header hash policy. Header header = 1; - // Connection properties hash policy. + // Cookie hash policy. Cookie cookie = 2; - // Query parameter hash policy. + // Connection properties hash policy. ConnectionProperties connection_properties = 3; - // Filter state hash policy. + // Query parameter hash policy. QueryParameter query_parameter = 5; - // The flag that short-circuits the hash computing. This field provides a - // 'fallback' style of configuration: "if a terminal policy doesn't work, - // fallback to rest of the policy list", it saves time when the terminal - // policy works. - // - // If true, and there is already a hash computed, ignore rest of the - // list of hash polices. - // For example, if the following hash methods are configured: - // - // ========= ======== - // specifier terminal - // ========= ======== - // Header A true - // Header B false - // Header C false - // ========= ======== - // - // The generateHash process ends if policy "header A" generates a hash, as - // it's a terminal policy. + // Filter state hash policy. FilterState filter_state = 6; } + + // The flag that short-circuits the hash computing. This field provides a + // 'fallback' style of configuration: "if a terminal policy doesn't work, + // fallback to rest of the policy list", it saves time when the terminal + // policy works. + // + // If true, and there is already a hash computed, ignore rest of the + // list of hash polices. + // For example, if the following hash methods are configured: + // + // ========= ======== + // specifier terminal + // ========= ======== + // Header A true + // Header B false + // Header C false + // ========= ======== + // + // The generateHash process ends if policy "header A" generates a hash, as + // it's a terminal policy. + bool terminal = 4; } // Allows enabling and disabling upgrades on a per-route basis. @@ -716,6 +748,13 @@ message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction.UpgradeConfig"; + // Configuration for sending data upstream as a raw data payload. This is used for + // CONNECT requests, when forwarding CONNECT payload as raw TCP. + message ConnectConfig { + // If present, the proxy protocol header will be prepended to the CONNECT payload sent upstream. + core.v3.ProxyProtocolConfig proxy_protocol_config = 1; + } + // The case-insensitive name of this upgrade, e.g. "websocket". // For each upgrade type present in upgrade_configs, requests with // Upgrade: [upgrade_type] will be proxied upstream. @@ -724,44 +763,54 @@ message RouteAction { // Determines if upgrades are available on this route. Defaults to true. google.protobuf.BoolValue enabled = 2; + + // Configuration for sending data upstream as a raw data payload. This is used for + // CONNECT requests, when forwarding CONNECT payload as raw TCP. + // Note that CONNECT support is currently considered alpha in Envoy. + // [#comment:TODO(htuch): Replace the above comment with an alpha tag. + ConnectConfig connect_config = 3; } reserved 12, 18, 19, 16, 22, 21; - // Indicates the upstream cluster to which the request should be routed - // to. - ClusterNotFoundResponseCode cluster_not_found_response_code = 20 - [(validate.rules).enum = {defined_only: true}]; + oneof cluster_specifier { + option (validate.required) = true; - // Envoy will determine the cluster to route to by reading the value of the - // HTTP header named by cluster_header from the request headers. If the - // header is not found or the referenced cluster does not exist, Envoy will - // return a 404 response. - // - // .. attention:: - // - // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 - // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. - core.v3.Metadata metadata_match = 4; + // Indicates the upstream cluster to which the request should be routed + // to. + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. See - // :ref:`traffic splitting ` - // for additional documentation. - string prefix_rewrite = 5 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + // Envoy will determine the cluster to route to by reading the value of the + // HTTP header named by cluster_header from the request headers. If the + // header is not found or the referenced cluster does not exist, Envoy will + // return a 404 response. + // + // .. attention:: + // + // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 + // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. + string cluster_header = 2 + [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. See + // :ref:`traffic splitting ` + // for additional documentation. + WeightedCluster weighted_clusters = 3; + } // The HTTP status code to use when configured cluster is not found. // The default response code is 503 Service Unavailable. - type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 32; + ClusterNotFoundResponseCode cluster_not_found_response_code = 20 + [(validate.rules).enum = {defined_only: true}]; // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints // in the upstream cluster with metadata matching what's set in this field will be considered // for load balancing. If using :ref:`weighted_clusters // `, metadata will be merged, with values // provided there taking precedence. The filter name should be specified as *envoy.lb*. - google.protobuf.Duration timeout = 8; + core.v3.Metadata metadata_match = 4; // Indicates that during forwarding, the matched prefix (or path) should be // swapped with this value. This option allows application URLs to be rooted @@ -794,7 +843,8 @@ message RouteAction { // // Having above entries in the config, requests to */prefix* will be stripped to */*, while // requests to */prefix/etc* will be stripped to */etc*. - google.protobuf.Duration idle_timeout = 24; + string prefix_rewrite = 5 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; // Indicates that during forwarding, portions of the path that match the // pattern should be rewritten, even allowing the substitution of capture @@ -824,28 +874,32 @@ message RouteAction { // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` // would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to // ``/aaa/yyy/bbb``. - RetryPolicy retry_policy = 9; + type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 32; - // Indicates that during forwarding, the host header will be swapped with - // this value. - google.protobuf.Any retry_policy_typed_config = 33; + oneof host_rewrite_specifier { + // Indicates that during forwarding, the host header will be swapped with + // this value. + string host_rewrite_literal = 6 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - // Indicates that during forwarding, the host header will be swapped with - // the hostname of the upstream host chosen by the cluster manager. This - // option is applicable only when the destination cluster for a route is of - // type *strict_dns* or *logical_dns*. Setting this to true with other cluster - // types has no effect. - repeated RequestMirrorPolicy request_mirror_policies = 30; + // Indicates that during forwarding, the host header will be swapped with + // the hostname of the upstream host chosen by the cluster manager. This + // option is applicable only when the destination cluster for a route is of + // type *strict_dns* or *logical_dns*. Setting this to true with other cluster + // types has no effect. + google.protobuf.BoolValue auto_host_rewrite = 7; - // Indicates that during forwarding, the host header will be swapped with the content of given - // downstream or :ref:`custom ` header. - // If header value is empty, host header is left intact. - // - // .. attention:: - // - // Pay attention to the potential security implications of using this option. Provided header - // must come from trusted source. - core.v3.RoutingPriority priority = 11 [(validate.rules).enum = {defined_only: true}]; + // Indicates that during forwarding, the host header will be swapped with the content of given + // downstream or :ref:`custom ` header. + // If header value is empty, host header is left intact. + // + // .. attention:: + // + // Pay attention to the potential security implications of using this option. Provided header + // must come from trusted source. + string host_rewrite_header = 29 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; + } // Specifies the upstream timeout for the route. If not specified, the default is 15s. This // spans between the point at which the entire downstream request (i.e. end-of-stream) has been @@ -858,7 +912,7 @@ message RouteAction { // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the // :ref:`retry overview `. - repeated RateLimit rate_limits = 13; + google.protobuf.Duration timeout = 8; // Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout, // although the connection manager wide :ref:`stream_idle_timeout @@ -878,35 +932,35 @@ message RouteAction { // fires, the stream is terminated with a 408 Request Timeout error code if no // upstream response header has been received, otherwise a stream reset // occurs. - google.protobuf.BoolValue include_vh_rate_limits = 14; + google.protobuf.Duration idle_timeout = 24; // Indicates that the route has a retry policy. Note that if this is set, // it'll take precedence over the virtual host level retry policy entirely // (e.g.: policies are not merged, most internal one becomes the enforced policy). - repeated HashPolicy hash_policy = 15; + RetryPolicy retry_policy = 9; // [#not-implemented-hide:] // Specifies the configuration for retry policy extension. Note that if this is set, it'll take // precedence over the virtual host level retry policy entirely (e.g.: policies are not merged, // most internal one becomes the enforced policy). :ref:`Retry policy ` // should not be set if this field is used. - CorsPolicy cors = 17; + google.protobuf.Any retry_policy_typed_config = 33; // Indicates that the route has request mirroring policies. - google.protobuf.Duration max_grpc_timeout = 23; + repeated RequestMirrorPolicy request_mirror_policies = 30; // Optionally specifies the :ref:`routing priority `. - google.protobuf.Duration grpc_timeout_offset = 28; + core.v3.RoutingPriority priority = 11 [(validate.rules).enum = {defined_only: true}]; // Specifies a set of rate limit configurations that could be applied to the // route. - repeated UpgradeConfig upgrade_configs = 25; + repeated RateLimit rate_limits = 13; // Specifies if the rate limit filter should include the virtual host rate // limits. By default, if the route configured rate limits, the virtual host // :ref:`rate_limits ` are not applied to the // request. - InternalRedirectAction internal_redirect_action = 26; + google.protobuf.BoolValue include_vh_rate_limits = 14; // Specifies a list of hash policies to use for ring hash load balancing. Each // hash policy is evaluated individually and the combined result is used to @@ -920,10 +974,10 @@ message RouteAction { // backend). If a hash policy has the "terminal" attribute set to true, and // there is already a hash generated, the hash is returned immediately, // ignoring the rest of the hash policy list. - google.protobuf.UInt32Value max_internal_redirects = 31; + repeated HashPolicy hash_policy = 15; // Indicates that the route has a CORS policy. - HedgePolicy hedge_policy = 27; + CorsPolicy cors = 17; // If present, and the request is a gRPC request, use the // `grpc-timeout header `_, @@ -944,52 +998,49 @@ message RouteAction { // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the // :ref:`retry overview `. - RequestMirrorPolicy hidden_envoy_deprecated_request_mirror_policy = 10 [deprecated = true]; - - oneof cluster_specifier { - option (validate.required) = true; - - // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting - // the provided duration from the header. This is useful in allowing Envoy to set its global - // timeout to be less than that of the deadline imposed by the calling client, which makes it more - // likely that Envoy will handle the timeout instead of having the call canceled by the client. - // The offset will only be applied if the provided grpc_timeout is greater than the offset. This - // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning - // infinity). - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + google.protobuf.Duration max_grpc_timeout = 23; - string cluster_header = 2 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting + // the provided duration from the header. This is useful in allowing Envoy to set its global + // timeout to be less than that of the deadline imposed by the calling client, which makes it more + // likely that Envoy will handle the timeout instead of having the call canceled by the client. + // The offset will only be applied if the provided grpc_timeout is greater than the offset. This + // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning + // infinity). + google.protobuf.Duration grpc_timeout_offset = 28; - WeightedCluster weighted_clusters = 3; - } + repeated UpgradeConfig upgrade_configs = 25; - oneof host_rewrite_specifier { - // An internal redirect is handled, iff the number of previous internal redirects that a - // downstream request has encountered is lower than this value, and - // :ref:`internal_redirect_action ` - // is set to :ref:`HANDLE_INTERNAL_REDIRECT - // ` - // In the case where a downstream request is bounced among multiple routes by internal redirect, - // the first route that hits this threshold, or has - // :ref:`internal_redirect_action ` - // set to - // :ref:`PASS_THROUGH_INTERNAL_REDIRECT - // ` - // will pass the redirect back to downstream. - // - // If not specified, at most one redirect will be followed. - string host_rewrite_literal = 6 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + // If present, Envoy will try to follow an upstream redirect response instead of proxying the + // response back to the downstream. An upstream redirect response is defined + // by :ref:`redirect_response_codes + // `. + InternalRedirectPolicy internal_redirect_policy = 34; + + InternalRedirectAction internal_redirect_action = 26 [deprecated = true]; + + // An internal redirect is handled, iff the number of previous internal redirects that a + // downstream request has encountered is lower than this value, and + // :ref:`internal_redirect_action ` + // is set to :ref:`HANDLE_INTERNAL_REDIRECT + // ` + // In the case where a downstream request is bounced among multiple routes by internal redirect, + // the first route that hits this threshold, or has + // :ref:`internal_redirect_action ` + // set to + // :ref:`PASS_THROUGH_INTERNAL_REDIRECT + // ` + // will pass the redirect back to downstream. + // + // If not specified, at most one redirect will be followed. + google.protobuf.UInt32Value max_internal_redirects = 31 [deprecated = true]; - // Indicates that the route has a hedge policy. Note that if this is set, - // it'll take precedence over the virtual host level hedge policy entirely - // (e.g.: policies are not merged, most internal one becomes the enforced policy). - google.protobuf.BoolValue auto_host_rewrite = 7; + // Indicates that the route has a hedge policy. Note that if this is set, + // it'll take precedence over the virtual host level hedge policy entirely + // (e.g.: policies are not merged, most internal one becomes the enforced policy). + HedgePolicy hedge_policy = 27; - string host_rewrite_header = 29 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; - } + RequestMirrorPolicy hidden_envoy_deprecated_request_mirror_policy = 10 [deprecated = true]; } // HTTP retry :ref:`architecture overview `. @@ -1051,7 +1102,8 @@ message RetryPolicy { // Specifies the allowed number of retries. This parameter is optional and // defaults to 1. These are the same conditions documented for // :ref:`config_http_filters_router_x-envoy-max-retries`. - google.protobuf.UInt32Value num_retries = 2; + google.protobuf.UInt32Value num_retries = 2 + [(udpa.annotations.field_migrate).rename = "max_retries"]; // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The // same conditions documented for @@ -1149,28 +1201,46 @@ message RedirectAction { PERMANENT_REDIRECT = 4; } - // The scheme portion of the URL will be swapped with "https". - string host_redirect = 1 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // The scheme portion of the URL will be swapped with this value. - uint32 port_redirect = 8; - - // The host portion of the URL will be swapped with this value. - RedirectResponseCode response_code = 3 [(validate.rules).enum = {defined_only: true}]; - - // The port value of the URL will be swapped with this value. - bool strip_query = 6; - // When the scheme redirection take place, the following rules apply: // 1. If the source URI scheme is `http` and the port is explicitly // set to `:80`, the port will be removed after the redirection // 2. If the source URI scheme is `https` and the port is explicitly // set to `:443`, the port will be removed after the redirection oneof scheme_rewrite_specifier { - // The path portion of the URL will be swapped with this value. + // The scheme portion of the URL will be swapped with "https". bool https_redirect = 4; + // The scheme portion of the URL will be swapped with this value. + string scheme_redirect = 7; + } + + // The host portion of the URL will be swapped with this value. + string host_redirect = 1 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + + // The port value of the URL will be swapped with this value. + uint32 port_redirect = 8; + + oneof path_rewrite_specifier { + // The path portion of the URL will be swapped with this value. + // Please note that query string in path_redirect will override the + // request's query string and will not be stripped. + // + // For example, let's say we have the following routes: + // + // - match: { path: "/old-path-1" } + // redirect: { path_redirect: "/new-path-1" } + // - match: { path: "/old-path-2" } + // redirect: { path_redirect: "/new-path-2", strip-query: "true" } + // - match: { path: "/old-path-3" } + // redirect: { path_redirect: "/new-path-3?foo=1", strip_query: "true" } + // + // 1. if request uri is "/old-path-1?bar=1", users will be redirected to "/new-path-1?bar=1" + // 2. if request uri is "/old-path-2?bar=1", users will be redirected to "/new-path-2" + // 3. if request uri is "/old-path-3?bar=1", users will be redirected to "/new-path-3?foo=1" + string path_redirect = 2 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + // Indicates that during redirection, the matched prefix (or path) // should be swapped with this value. This option allows redirect URLs be dynamically created // based on the request. @@ -1179,20 +1249,17 @@ message RedirectAction { // // Pay attention to the use of trailing slashes as mentioned in // :ref:`RouteAction's prefix_rewrite `. - string scheme_redirect = 7; - } - - oneof path_rewrite_specifier { - // The HTTP status code to use in the redirect response. The default response - // code is MOVED_PERMANENTLY (301). - string path_redirect = 2 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Indicates that during redirection, the query portion of the URL will - // be removed. Default value is false. string prefix_rewrite = 5 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; } + + // The HTTP status code to use in the redirect response. The default response + // code is MOVED_PERMANENTLY (301). + RedirectResponseCode response_code = 3 [(validate.rules).enum = {defined_only: true}]; + + // Indicates that during redirection, the query portion of the URL will + // be removed. Default value is false. + bool strip_query = 6; } message DirectResponseAction { @@ -1311,7 +1378,7 @@ message VirtualCluster { message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit"; - // [#next-free-field: 7] + // [#next-free-field: 8] message Action { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit.Action"; @@ -1368,6 +1435,11 @@ message RateLimit { // The key to use in the descriptor entry. string descriptor_key = 2 [(validate.rules).string = {min_bytes: 1}]; + + // If set to true, Envoy skips the descriptor while calling rate limiting service + // when header is not present in the request. By default it skips calling the + // rate limiting service if this header is not present in the request. + bool skip_if_absent = 3; } // The following descriptor entry is appended to the descriptor and is populated using the @@ -1420,6 +1492,24 @@ message RateLimit { repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; } + // The following descriptor entry is appended when the dynamic metadata contains a key value: + // + // .. code-block:: cpp + // + // ("", "") + message DynamicMetaData { + // The key to use in the descriptor entry. + string descriptor_key = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Metadata struct that defines the key and path to retrieve the string value. A match will + // only happen if the value in the dynamic metadata is of type string. + type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; + + // An optional value to use if *metadata_key* is empty. If not set and + // no value is present under the metadata_key then no descriptor is generated. + string default_value = 3; + } + oneof action_specifier { option (validate.required) = true; @@ -1440,6 +1530,27 @@ message RateLimit { // Rate limit on the existence of request headers. HeaderValueMatch header_value_match = 6; + + // Rate limit on dynamic metadata. + DynamicMetaData dynamic_metadata = 7; + } + } + + message Override { + // Fetches the override from the dynamic metadata. + message DynamicMetadata { + // Metadata struct that defines the key and path to retrieve the struct value. + // The value must be a struct containing an integer "requests_per_unit" property + // and a "unit" property with a value parseable to :ref:`RateLimitUnit + // enum ` + type.metadata.v3.MetadataKey metadata_key = 1 [(validate.rules).message = {required: true}]; + } + + oneof override_specifier { + option (validate.required) = true; + + // Limit override from dynamic metadata. + DynamicMetadata dynamic_metadata = 1; } } @@ -1462,6 +1573,12 @@ message RateLimit { // configuration. See :ref:`composing actions // ` for additional documentation. repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}]; + + // An optional limit override to be appended to the descriptor produced by this + // rate limit configuration. If the override value is invalid or cannot be resolved + // from metadata, no override is provided. See :ref:`rate limit override + // ` for more information. + Override limit = 4; } // .. attention:: @@ -1498,15 +1615,15 @@ message HeaderMatcher { string name = 1 [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - // If specified, header match will be performed based on the value of the header. - bool invert_match = 8; - // Specifies how the header match will be performed to route the request. oneof header_match_specifier { + // If specified, header match will be performed based on the value of the header. + string exact_match = 4; + // If specified, this regex string is a regular expression rule which implies the entire request // header value must match the regex. The rule will not match if only a subsequence of the // request header value matches the regex. - string exact_match = 4; + type.matcher.v3.RegexMatcher safe_regex_match = 11; // If specified, header match will be performed based on range. // The rule will match if the request header value is within this range. @@ -1519,11 +1636,11 @@ message HeaderMatcher { // // * For range [-10,0), route will match for header value -1, but not for 0, "somestring", 10.9, // "-1somestring" - type.matcher.v3.RegexMatcher safe_regex_match = 11; + type.v3.Int64Range range_match = 6; // If specified, header match will be performed based on whether the header is in the // request. - type.v3.Int64Range range_match = 6; + bool present_match = 7; // If specified, header match will be performed based on the prefix of the header value. // Note: empty prefix is not allowed, please use present_match instead. @@ -1531,7 +1648,7 @@ message HeaderMatcher { // Examples: // // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*. - bool present_match = 7; + string prefix_match = 9 [(validate.rules).string = {min_bytes: 1}]; // If specified, header match will be performed based on the suffix of the header value. // Note: empty suffix is not allowed, please use present_match instead. @@ -1539,14 +1656,6 @@ message HeaderMatcher { // Examples: // // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*. - string prefix_match = 9 [(validate.rules).string = {min_bytes: 1}]; - - // If specified, the match result will be inverted before checking. Defaults to false. - // - // Examples: - // - // * The regex ``\d{3}`` does not match the value *1234*, so it will match when inverted. - // * The range [-10,0) will match the value -1, so it will not match when inverted. string suffix_match = 10 [(validate.rules).string = {min_bytes: 1}]; string hidden_envoy_deprecated_regex_match = 5 [ @@ -1555,6 +1664,14 @@ message HeaderMatcher { (envoy.annotations.disallowed_by_default) = true ]; } + + // If specified, the match result will be inverted before checking. Defaults to false. + // + // Examples: + // + // * The regex ``\d{3}`` does not match the value *1234*, so it will match when inverted. + // * The range [-10,0) will match the value -1, so it will not match when inverted. + bool invert_match = 8; } // Query parameter matching treats the query string of a request's :path header @@ -1568,17 +1685,44 @@ message QueryParameterMatcher { // *path*'s query string. string name = 1 [(validate.rules).string = {min_bytes: 1 max_bytes: 1024}]; - // Specifies whether a query parameter value should match against a string. + oneof query_parameter_match_specifier { + // Specifies whether a query parameter value should match against a string. + type.matcher.v3.StringMatcher string_match = 5 [(validate.rules).message = {required: true}]; + + // Specifies whether a query parameter should be present. + bool present_match = 6; + } + string hidden_envoy_deprecated_value = 3 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - // Specifies whether a query parameter should be present. google.protobuf.BoolValue hidden_envoy_deprecated_regex = 4 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; +} - oneof query_parameter_match_specifier { - type.matcher.v3.StringMatcher string_match = 5 [(validate.rules).message = {required: true}]; - - bool present_match = 6; - } +// HTTP Internal Redirect :ref:`architecture overview `. +message InternalRedirectPolicy { + // An internal redirect is not handled, unless the number of previous internal redirects that a + // downstream request has encountered is lower than this value. + // In the case where a downstream request is bounced among multiple routes by internal redirect, + // the first route that hits this threshold, or does not set :ref:`internal_redirect_policy + // ` + // will pass the redirect back to downstream. + // + // If not specified, at most one redirect will be followed. + google.protobuf.UInt32Value max_internal_redirects = 1; + + // Defines what upstream response codes are allowed to trigger internal redirect. If unspecified, + // only 302 will be treated as internal redirect. + // Only 301, 302, 303, 307 and 308 are valid values. Any other codes will be ignored. + repeated uint32 redirect_response_codes = 2 [(validate.rules).repeated = {max_items: 5}]; + + // Specifies a list of predicates that are queried when an upstream response is deemed + // to trigger an internal redirect by all other criteria. Any predicate in the list can reject + // the redirect, causing the response to be proxied to downstream. + repeated core.v3.TypedExtensionConfig predicates = 3; + + // Allow internal redirect to follow a target URI with a different scheme than the value of + // x-forwarded-proto. The default is false. + bool allow_cross_scheme_redirect = 4; } diff --git a/generated_api_shadow/envoy/config/route/v4alpha/BUILD b/generated_api_shadow/envoy/config/route/v4alpha/BUILD index 507bedd76bdf3..c72b7030b9fbb 100644 --- a/generated_api_shadow/envoy/config/route/v4alpha/BUILD +++ b/generated_api_shadow/envoy/config/route/v4alpha/BUILD @@ -9,7 +9,8 @@ api_proto_package( "//envoy/annotations:pkg", "//envoy/config/core/v4alpha:pkg", "//envoy/config/route/v3:pkg", - "//envoy/type/matcher/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "//envoy/type/metadata/v3:pkg", "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", diff --git a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto index e813b632edb04..a8b6ae4459cee 100644 --- a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto @@ -3,8 +3,11 @@ syntax = "proto3"; package envoy.config.route.v4alpha; import "envoy/config/core/v4alpha/base.proto"; -import "envoy/type/matcher/v3/regex.proto"; -import "envoy/type/matcher/v3/string.proto"; +import "envoy/config/core/v4alpha/extension.proto"; +import "envoy/config/core/v4alpha/proxy_protocol.proto"; +import "envoy/type/matcher/v4alpha/regex.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; +import "envoy/type/metadata/v3/metadata.proto"; import "envoy/type/tracing/v3/custom_tag.proto"; import "envoy/type/v3/percent.proto"; import "envoy/type/v3/range.proto"; @@ -123,7 +126,9 @@ message VirtualHost { // Specifies a list of HTTP headers that should be removed from each response // handled by this virtual host. - repeated string response_headers_to_remove = 11; + repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { + items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; // Indicates that the virtual host has a CORS policy. CorsPolicy cors = 8; @@ -142,7 +147,7 @@ message VirtualHost { // will see the attempt count as perceived by the second Envoy. Defaults to false. // This header is unaffected by the // :ref:`suppress_envoy_headers - // ` flag. + // ` flag. // // [#next-major-version: rename to include_attempt_count_in_request.] bool include_request_attempt_count = 14; @@ -154,7 +159,7 @@ message VirtualHost { // will see the attempt count as perceived by the Envoy closest upstream from itself. Defaults to false. // This header is unaffected by the // :ref:`suppress_envoy_headers - // ` flag. + // ` flag. bool include_attempt_count_in_response = 19; // Indicates the retry policy for all routes in this virtual host. Note that setting a @@ -223,6 +228,8 @@ message Route { // [#not-implemented-hide:] // If true, a filter will define the action (e.g., it could dynamically generate the // RouteAction). + // [#comment: TODO(samflattery): Remove cleanup in route_fuzz_test.cc when + // implemented] FilterAction filter_action = 17; } @@ -269,7 +276,9 @@ message Route { // Specifies a list of HTTP headers that should be removed from each response // to requests matching this route. - repeated string response_headers_to_remove = 11; + repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { + items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; // Presence of the object defines whether the connection manager's tracing configuration // is overridden by this route specific instance. @@ -371,7 +380,7 @@ message WeightedCluster { string runtime_key_prefix = 2; } -// [#next-free-field: 12] +// [#next-free-field: 13] message RouteMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteMatch"; @@ -393,6 +402,12 @@ message RouteMatch { google.protobuf.BoolValue validated = 2; } + // An extensible message for matching CONNECT requests. + message ConnectMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteMatch.ConnectMatcher"; + } + reserved 5, 3; reserved "regex"; @@ -420,7 +435,18 @@ message RouteMatch { // path_specifier entirely and just rely on a set of header matchers which can already match // on :path, etc. The issue with that is it is unclear how to generically deal with query string // stripping. This needs more thought.] - type.matcher.v3.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; + type.matcher.v4alpha.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; + + // If this is used as the matcher, the matcher will only match CONNECT requests. + // Note that this will not match HTTP/2 upgrade-style CONNECT requests + // (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style + // upgrades. + // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2, + // where CONNECT requests may have a path, the path matchers will work if + // there is a path present. + // Note that CONNECT support is currently considered alpha in Envoy. + // [#comment:TODO(htuch): Replace the above comment with an alpha tag. + ConnectMatcher connect_matcher = 12; } // Indicates that prefix/path matching should be case insensitive. The default @@ -481,7 +507,7 @@ message CorsPolicy { // Specifies string patterns that match allowed origins. An origin is allowed if any of the // string matchers match. - repeated type.matcher.v3.StringMatcher allow_origin_string_match = 11; + repeated type.matcher.v4alpha.StringMatcher allow_origin_string_match = 11; // Specifies the content for the *access-control-allow-methods* header. string allow_methods = 2; @@ -521,7 +547,7 @@ message CorsPolicy { core.v4alpha.RuntimeFractionalPercent shadow_enabled = 10; } -// [#next-free-field: 34] +// [#next-free-field: 35] message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction"; @@ -534,7 +560,10 @@ message RouteAction { } // Configures :ref:`internal redirect ` behavior. + // [#next-major-version: remove this definition - it's defined in the InternalRedirectPolicy message.] enum InternalRedirectAction { + option deprecated = true; + PASS_THROUGH_INTERNAL_REDIRECT = 0; HANDLE_INTERNAL_REDIRECT = 1; } @@ -592,6 +621,10 @@ message RouteAction { string header_name = 1 [ (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false} ]; + + // If specified, the request header value will be rewritten and used + // to produce the hash key. + type.matcher.v4alpha.RegexMatchAndSubstitute regex_rewrite = 2; } // Envoy supports two types of cookie affinity: @@ -706,6 +739,16 @@ message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction.UpgradeConfig"; + // Configuration for sending data upstream as a raw data payload. This is used for + // CONNECT requests, when forwarding CONNECT payload as raw TCP. + message ConnectConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteAction.UpgradeConfig.ConnectConfig"; + + // If present, the proxy protocol header will be prepended to the CONNECT payload sent upstream. + core.v4alpha.ProxyProtocolConfig proxy_protocol_config = 1; + } + // The case-insensitive name of this upgrade, e.g. "websocket". // For each upgrade type present in upgrade_configs, requests with // Upgrade: [upgrade_type] will be proxied upstream. @@ -714,6 +757,12 @@ message RouteAction { // Determines if upgrades are available on this route. Defaults to true. google.protobuf.BoolValue enabled = 2; + + // Configuration for sending data upstream as a raw data payload. This is used for + // CONNECT requests, when forwarding CONNECT payload as raw TCP. + // Note that CONNECT support is currently considered alpha in Envoy. + // [#comment:TODO(htuch): Replace the above comment with an alpha tag. + ConnectConfig connect_config = 3; } reserved 12, 18, 19, 16, 22, 21, 10; @@ -821,7 +870,7 @@ message RouteAction { // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` // would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to // ``/aaa/yyy/bbb``. - type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 32; + type.matcher.v4alpha.RegexMatchAndSubstitute regex_rewrite = 32; oneof host_rewrite_specifier { // Indicates that during forwarding, the host header will be swapped with @@ -958,7 +1007,13 @@ message RouteAction { repeated UpgradeConfig upgrade_configs = 25; - InternalRedirectAction internal_redirect_action = 26; + // If present, Envoy will try to follow an upstream redirect response instead of proxying the + // response back to the downstream. An upstream redirect response is defined + // by :ref:`redirect_response_codes + // `. + InternalRedirectPolicy internal_redirect_policy = 34; + + InternalRedirectAction hidden_envoy_deprecated_internal_redirect_action = 26 [deprecated = true]; // An internal redirect is handled, iff the number of previous internal redirects that a // downstream request has encountered is lower than this value, and @@ -974,7 +1029,8 @@ message RouteAction { // will pass the redirect back to downstream. // // If not specified, at most one redirect will be followed. - google.protobuf.UInt32Value max_internal_redirects = 31; + google.protobuf.UInt32Value hidden_envoy_deprecated_max_internal_redirects = 31 + [deprecated = true]; // Indicates that the route has a hedge policy. Note that if this is set, // it'll take precedence over the virtual host level hedge policy entirely @@ -1045,7 +1101,7 @@ message RetryPolicy { // Specifies the allowed number of retries. This parameter is optional and // defaults to 1. These are the same conditions documented for // :ref:`config_http_filters_router_x-envoy-max-retries`. - google.protobuf.UInt32Value num_retries = 2; + google.protobuf.UInt32Value max_retries = 2; // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The // same conditions documented for @@ -1166,6 +1222,21 @@ message RedirectAction { oneof path_rewrite_specifier { // The path portion of the URL will be swapped with this value. + // Please note that query string in path_redirect will override the + // request's query string and will not be stripped. + // + // For example, let's say we have the following routes: + // + // - match: { path: "/old-path-1" } + // redirect: { path_redirect: "/new-path-1" } + // - match: { path: "/old-path-2" } + // redirect: { path_redirect: "/new-path-2", strip-query: "true" } + // - match: { path: "/old-path-3" } + // redirect: { path_redirect: "/new-path-3?foo=1", strip_query: "true" } + // + // 1. if request uri is "/old-path-1?bar=1", users will be redirected to "/new-path-1?bar=1" + // 2. if request uri is "/old-path-2?bar=1", users will be redirected to "/new-path-2" + // 3. if request uri is "/old-path-3?bar=1", users will be redirected to "/new-path-3?foo=1" string path_redirect = 2 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; @@ -1302,7 +1373,7 @@ message VirtualCluster { message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit"; - // [#next-free-field: 7] + // [#next-free-field: 8] message Action { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit.Action"; @@ -1359,6 +1430,11 @@ message RateLimit { // The key to use in the descriptor entry. string descriptor_key = 2 [(validate.rules).string = {min_bytes: 1}]; + + // If set to true, Envoy skips the descriptor while calling rate limiting service + // when header is not present in the request. By default it skips calling the + // rate limiting service if this header is not present in the request. + bool skip_if_absent = 3; } // The following descriptor entry is appended to the descriptor and is populated using the @@ -1411,6 +1487,27 @@ message RateLimit { repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; } + // The following descriptor entry is appended when the dynamic metadata contains a key value: + // + // .. code-block:: cpp + // + // ("", "") + message DynamicMetaData { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Action.DynamicMetaData"; + + // The key to use in the descriptor entry. + string descriptor_key = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Metadata struct that defines the key and path to retrieve the string value. A match will + // only happen if the value in the dynamic metadata is of type string. + type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; + + // An optional value to use if *metadata_key* is empty. If not set and + // no value is present under the metadata_key then no descriptor is generated. + string default_value = 3; + } + oneof action_specifier { option (validate.required) = true; @@ -1431,6 +1528,33 @@ message RateLimit { // Rate limit on the existence of request headers. HeaderValueMatch header_value_match = 6; + + // Rate limit on dynamic metadata. + DynamicMetaData dynamic_metadata = 7; + } + } + + message Override { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Override"; + + // Fetches the override from the dynamic metadata. + message DynamicMetadata { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Override.DynamicMetadata"; + + // Metadata struct that defines the key and path to retrieve the struct value. + // The value must be a struct containing an integer "requests_per_unit" property + // and a "unit" property with a value parseable to :ref:`RateLimitUnit + // enum ` + type.metadata.v3.MetadataKey metadata_key = 1 [(validate.rules).message = {required: true}]; + } + + oneof override_specifier { + option (validate.required) = true; + + // Limit override from dynamic metadata. + DynamicMetadata dynamic_metadata = 1; } } @@ -1453,6 +1577,12 @@ message RateLimit { // configuration. See :ref:`composing actions // ` for additional documentation. repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}]; + + // An optional limit override to be appended to the descriptor produced by this + // rate limit configuration. If the override value is invalid or cannot be resolved + // from metadata, no override is provided. See :ref:`rate limit override + // ` for more information. + Override limit = 4; } // .. attention:: @@ -1500,7 +1630,7 @@ message HeaderMatcher { // If specified, this regex string is a regular expression rule which implies the entire request // header value must match the regex. The rule will not match if only a subsequence of the // request header value matches the regex. - type.matcher.v3.RegexMatcher safe_regex_match = 11; + type.matcher.v4alpha.RegexMatcher safe_regex_match = 11; // If specified, header match will be performed based on range. // The rule will match if the request header value is within this range. @@ -1562,9 +1692,40 @@ message QueryParameterMatcher { oneof query_parameter_match_specifier { // Specifies whether a query parameter value should match against a string. - type.matcher.v3.StringMatcher string_match = 5 [(validate.rules).message = {required: true}]; + type.matcher.v4alpha.StringMatcher string_match = 5 + [(validate.rules).message = {required: true}]; // Specifies whether a query parameter should be present. bool present_match = 6; } } + +// HTTP Internal Redirect :ref:`architecture overview `. +message InternalRedirectPolicy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.InternalRedirectPolicy"; + + // An internal redirect is not handled, unless the number of previous internal redirects that a + // downstream request has encountered is lower than this value. + // In the case where a downstream request is bounced among multiple routes by internal redirect, + // the first route that hits this threshold, or does not set :ref:`internal_redirect_policy + // ` + // will pass the redirect back to downstream. + // + // If not specified, at most one redirect will be followed. + google.protobuf.UInt32Value max_internal_redirects = 1; + + // Defines what upstream response codes are allowed to trigger internal redirect. If unspecified, + // only 302 will be treated as internal redirect. + // Only 301, 302, 303, 307 and 308 are valid values. Any other codes will be ignored. + repeated uint32 redirect_response_codes = 2 [(validate.rules).repeated = {max_items: 5}]; + + // Specifies a list of predicates that are queried when an upstream response is deemed + // to trigger an internal redirect by all other criteria. Any predicate in the list can reject + // the redirect, causing the response to be proxied to downstream. + repeated core.v4alpha.TypedExtensionConfig predicates = 3; + + // Allow internal redirect to follow a target URI with a different scheme than the value of + // x-forwarded-proto. The default is false. + bool allow_cross_scheme_redirect = 4; +} diff --git a/generated_api_shadow/envoy/config/tap/v3/BUILD b/generated_api_shadow/envoy/config/tap/v3/BUILD index f266efc592a2a..6fd3142264d9f 100644 --- a/generated_api_shadow/envoy/config/tap/v3/BUILD +++ b/generated_api_shadow/envoy/config/tap/v3/BUILD @@ -6,6 +6,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ + "//envoy/config/common/matcher/v3:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/route/v3:pkg", "//envoy/service/tap/v2alpha:pkg", diff --git a/generated_api_shadow/envoy/config/tap/v3/common.proto b/generated_api_shadow/envoy/config/tap/v3/common.proto index 0fea8f88a638e..42783115f8719 100644 --- a/generated_api_shadow/envoy/config/tap/v3/common.proto +++ b/generated_api_shadow/envoy/config/tap/v3/common.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package envoy.config.tap.v3; +import "envoy/config/common/matcher/v3/matcher.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "envoy/config/route/v3/route_components.proto"; @@ -28,7 +29,17 @@ message TapConfig { // The match configuration. If the configuration matches the data source being tapped, a tap will // occur, with the result written to the configured output. - MatchPredicate match_config = 1 [(validate.rules).message = {required: true}]; + // Exactly one of :ref:`match ` and + // :ref:`match_config ` must be set. If both + // are set, the :ref:`match ` will be used. + MatchPredicate match_config = 1 [deprecated = true]; + + // The match configuration. If the configuration matches the data source being tapped, a tap will + // occur, with the result written to the configured output. + // Exactly one of :ref:`match ` and + // :ref:`match_config ` must be set. If both + // are set, the :ref:`match ` will be used. + common.matcher.v3.MatchPredicate match = 4; // The tap output configuration. If a match configuration matches a data source being tapped, // a tap will occur and the data will be written to the configured output. @@ -47,7 +58,7 @@ message TapConfig { // Tap match configuration. This is a recursive structure which allows complex nested match // configurations to be built using various logical operators. -// [#next-free-field: 9] +// [#next-free-field: 11] message MatchPredicate { option (udpa.annotations.versioning).previous_message_type = "envoy.service.tap.v2alpha.MatchPredicate"; @@ -89,6 +100,12 @@ message MatchPredicate { // HTTP response trailers match configuration. HttpHeadersMatch http_response_trailers_match = 8; + + // HTTP request generic body match configuration. + HttpGenericBodyMatch http_request_generic_body_match = 9; + + // HTTP response generic body match configuration. + HttpGenericBodyMatch http_response_generic_body_match = 10; } } @@ -101,6 +118,36 @@ message HttpHeadersMatch { repeated route.v3.HeaderMatcher headers = 1; } +// HTTP generic body match configuration. +// List of text strings and hex strings to be located in HTTP body. +// All specified strings must be found in the HTTP body for positive match. +// The search may be limited to specified number of bytes from the body start. +// +// .. attention:: +// +// Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. +// If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified +// to scan only part of the http body. +message HttpGenericBodyMatch { + message GenericTextMatch { + oneof rule { + option (validate.required) = true; + + // Text string to be located in HTTP body. + string string_match = 1 [(validate.rules).string = {min_len: 1}]; + + // Sequence of bytes to be located in HTTP body. + bytes binary_match = 2 [(validate.rules).bytes = {min_len: 1}]; + } + } + + // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). + uint32 bytes_limit = 1; + + // List of patterns to match. + repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}]; +} + // Tap output configuration. message OutputConfig { option (udpa.annotations.versioning).previous_message_type = @@ -195,6 +242,7 @@ message OutputSink { // [#not-implemented-hide:] // GrpcService to stream data to. The format argument must be PROTO_BINARY. + // [#comment: TODO(samflattery): remove cleanup in uber_per_filter.cc once implemented] StreamingGrpcSink streaming_grpc = 4; } } diff --git a/generated_api_shadow/envoy/config/tap/v4alpha/BUILD b/generated_api_shadow/envoy/config/tap/v4alpha/BUILD new file mode 100644 index 0000000000000..be8b1c3a17e31 --- /dev/null +++ b/generated_api_shadow/envoy/config/tap/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/common/matcher/v4alpha:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/route/v4alpha:pkg", + "//envoy/config/tap/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/config/tap/v4alpha/common.proto b/generated_api_shadow/envoy/config/tap/v4alpha/common.proto new file mode 100644 index 0000000000000..d18ba1db94c1f --- /dev/null +++ b/generated_api_shadow/envoy/config/tap/v4alpha/common.proto @@ -0,0 +1,279 @@ +syntax = "proto3"; + +package envoy.config.tap.v4alpha; + +import "envoy/config/common/matcher/v4alpha/matcher.proto"; +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/grpc_service.proto"; +import "envoy/config/route/v4alpha/route_components.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.tap.v4alpha"; +option java_outer_classname = "CommonProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Common tap configuration] + +// Tap configuration. +message TapConfig { + // [#comment:TODO(mattklein123): Rate limiting] + + option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.TapConfig"; + + // The match configuration. If the configuration matches the data source being tapped, a tap will + // occur, with the result written to the configured output. + // Exactly one of :ref:`match ` and + // :ref:`match_config ` must be set. If both + // are set, the :ref:`match ` will be used. + MatchPredicate hidden_envoy_deprecated_match_config = 1 [deprecated = true]; + + // The match configuration. If the configuration matches the data source being tapped, a tap will + // occur, with the result written to the configured output. + // Exactly one of :ref:`match ` and + // :ref:`match_config ` must be set. If both + // are set, the :ref:`match ` will be used. + common.matcher.v4alpha.MatchPredicate match = 4; + + // The tap output configuration. If a match configuration matches a data source being tapped, + // a tap will occur and the data will be written to the configured output. + OutputConfig output_config = 2 [(validate.rules).message = {required: true}]; + + // [#not-implemented-hide:] Specify if Tap matching is enabled. The % of requests\connections for + // which the tap matching is enabled. When not enabled, the request\connection will not be + // recorded. + // + // .. note:: + // + // This field defaults to 100/:ref:`HUNDRED + // `. + core.v4alpha.RuntimeFractionalPercent tap_enabled = 3; +} + +// Tap match configuration. This is a recursive structure which allows complex nested match +// configurations to be built using various logical operators. +// [#next-free-field: 11] +message MatchPredicate { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.MatchPredicate"; + + // A set of match configurations used for logical operations. + message MatchSet { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.tap.v3.MatchPredicate.MatchSet"; + + // The list of rules that make up the set. + repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; + } + + oneof rule { + option (validate.required) = true; + + // A set that describes a logical OR. If any member of the set matches, the match configuration + // matches. + MatchSet or_match = 1; + + // A set that describes a logical AND. If all members of the set match, the match configuration + // matches. + MatchSet and_match = 2; + + // A negation match. The match configuration will match if the negated match condition matches. + MatchPredicate not_match = 3; + + // The match configuration will always match. + bool any_match = 4 [(validate.rules).bool = {const: true}]; + + // HTTP request headers match configuration. + HttpHeadersMatch http_request_headers_match = 5; + + // HTTP request trailers match configuration. + HttpHeadersMatch http_request_trailers_match = 6; + + // HTTP response headers match configuration. + HttpHeadersMatch http_response_headers_match = 7; + + // HTTP response trailers match configuration. + HttpHeadersMatch http_response_trailers_match = 8; + + // HTTP request generic body match configuration. + HttpGenericBodyMatch http_request_generic_body_match = 9; + + // HTTP response generic body match configuration. + HttpGenericBodyMatch http_response_generic_body_match = 10; + } +} + +// HTTP headers match configuration. +message HttpHeadersMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.tap.v3.HttpHeadersMatch"; + + // HTTP headers to match. + repeated route.v4alpha.HeaderMatcher headers = 1; +} + +// HTTP generic body match configuration. +// List of text strings and hex strings to be located in HTTP body. +// All specified strings must be found in the HTTP body for positive match. +// The search may be limited to specified number of bytes from the body start. +// +// .. attention:: +// +// Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. +// If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified +// to scan only part of the http body. +message HttpGenericBodyMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.tap.v3.HttpGenericBodyMatch"; + + message GenericTextMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.tap.v3.HttpGenericBodyMatch.GenericTextMatch"; + + oneof rule { + option (validate.required) = true; + + // Text string to be located in HTTP body. + string string_match = 1 [(validate.rules).string = {min_len: 1}]; + + // Sequence of bytes to be located in HTTP body. + bytes binary_match = 2 [(validate.rules).bytes = {min_len: 1}]; + } + } + + // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). + uint32 bytes_limit = 1; + + // List of patterns to match. + repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}]; +} + +// Tap output configuration. +message OutputConfig { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.OutputConfig"; + + // Output sinks for tap data. Currently a single sink is allowed in the list. Once multiple + // sink types are supported this constraint will be relaxed. + repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1 max_items: 1}]; + + // For buffered tapping, the maximum amount of received body that will be buffered prior to + // truncation. If truncation occurs, the :ref:`truncated + // ` field will be set. If not specified, the + // default is 1KiB. + google.protobuf.UInt32Value max_buffered_rx_bytes = 2; + + // For buffered tapping, the maximum amount of transmitted body that will be buffered prior to + // truncation. If truncation occurs, the :ref:`truncated + // ` field will be set. If not specified, the + // default is 1KiB. + google.protobuf.UInt32Value max_buffered_tx_bytes = 3; + + // Indicates whether taps produce a single buffered message per tap, or multiple streamed + // messages per tap in the emitted :ref:`TraceWrapper + // ` messages. Note that streamed tapping does not + // mean that no buffering takes place. Buffering may be required if data is processed before a + // match can be determined. See the HTTP tap filter :ref:`streaming + // ` documentation for more information. + bool streaming = 4; +} + +// Tap output sink configuration. +message OutputSink { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.OutputSink"; + + // Output format. All output is in the form of one or more :ref:`TraceWrapper + // ` messages. This enumeration indicates + // how those messages are written. Note that not all sinks support all output formats. See + // individual sink documentation for more information. + enum Format { + // Each message will be written as JSON. Any :ref:`body ` + // data will be present in the :ref:`as_bytes + // ` field. This means that body data will be + // base64 encoded as per the `proto3 JSON mappings + // `_. + JSON_BODY_AS_BYTES = 0; + + // Each message will be written as JSON. Any :ref:`body ` + // data will be present in the :ref:`as_string + // ` field. This means that body data will be + // string encoded as per the `proto3 JSON mappings + // `_. This format type is + // useful when it is known that that body is human readable (e.g., JSON over HTTP) and the + // user wishes to view it directly without being forced to base64 decode the body. + JSON_BODY_AS_STRING = 1; + + // Binary proto format. Note that binary proto is not self-delimiting. If a sink writes + // multiple binary messages without any length information the data stream will not be + // useful. However, for certain sinks that are self-delimiting (e.g., one message per file) + // this output format makes consumption simpler. + PROTO_BINARY = 2; + + // Messages are written as a sequence tuples, where each tuple is the message length encoded + // as a `protobuf 32-bit varint + // `_ + // followed by the binary message. The messages can be read back using the language specific + // protobuf coded stream implementation to obtain the message length and the message. + PROTO_BINARY_LENGTH_DELIMITED = 3; + + // Text proto format. + PROTO_TEXT = 4; + } + + // Sink output format. + Format format = 1 [(validate.rules).enum = {defined_only: true}]; + + oneof output_sink_type { + option (validate.required) = true; + + // Tap output will be streamed out the :http:post:`/tap` admin endpoint. + // + // .. attention:: + // + // It is only allowed to specify the streaming admin output sink if the tap is being + // configured from the :http:post:`/tap` admin endpoint. Thus, if an extension has + // been configured to receive tap configuration from some other source (e.g., static + // file, XDS, etc.) configuring the streaming admin output type will fail. + StreamingAdminSink streaming_admin = 2; + + // Tap output will be written to a file per tap sink. + FilePerTapSink file_per_tap = 3; + + // [#not-implemented-hide:] + // GrpcService to stream data to. The format argument must be PROTO_BINARY. + // [#comment: TODO(samflattery): remove cleanup in uber_per_filter.cc once implemented] + StreamingGrpcSink streaming_grpc = 4; + } +} + +// Streaming admin sink configuration. +message StreamingAdminSink { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.tap.v3.StreamingAdminSink"; +} + +// The file per tap sink outputs a discrete file for every tapped stream. +message FilePerTapSink { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.FilePerTapSink"; + + // Path prefix. The output file will be of the form _.pb, where is an + // identifier distinguishing the recorded trace for stream instances (the Envoy + // connection ID, HTTP stream ID, etc.). + string path_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; +} + +// [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC +// server. +message StreamingGrpcSink { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.tap.v3.StreamingGrpcSink"; + + // Opaque identifier, that will be sent back to the streaming grpc server. + string tap_id = 1; + + // The gRPC server that hosts the Tap Sink Service. + core.v4alpha.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; +} diff --git a/generated_api_shadow/envoy/config/wasm/v2alpha/wasm.proto b/generated_api_shadow/envoy/config/wasm/v2alpha/wasm.proto deleted file mode 100644 index b8f050a23d2b0..0000000000000 --- a/generated_api_shadow/envoy/config/wasm/v2alpha/wasm.proto +++ /dev/null @@ -1,83 +0,0 @@ -syntax = "proto3"; - -package envoy.config.wasm.v2alpha; - -import "envoy/api/v2/core/base.proto"; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.wasm.v2alpha"; -option java_outer_classname = "WasmProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.wasm.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Wasm service] - -// Configuration for a Wasm VM. -// [#next-free-field: 6] -// [#not-implemented-hide:] pending implementation. -message VmConfig { - // An ID which will be used along with a hash of the wasm code (or the name of the registered Null - // VM plugin) to determine which VM will be used for the plugin. All plugins which use the same - // *vm_id* and code will use the same VM. May be left blank. Sharing a VM between plugins can - // reduce memory utilization and make sharing of data easier which may have security implications. - // See ref: "TODO: add ref" for details. - string vm_id = 1; - - // The Wasm runtime type (either "v8" or "null" for code compiled into Envoy). - string runtime = 2 [(validate.rules).string = {min_bytes: 1}]; - - // The Wasm code that Envoy will execute. - api.v2.core.AsyncDataSource code = 3; - - // The Wasm configuration used in initialization of a new VM (proxy_on_start). - google.protobuf.Any configuration = 4; - - // Allow the wasm file to include pre-compiled code on VMs which support it. - // Warning: this should only be enable for trusted sources as the precompiled code is not - // verified. - bool allow_precompiled = 5; -} - -// Base Configuration for Wasm Plugins e.g. filters and services. -// [#next-free-field: 6] -// [#not-implemented-hide:] pending implementation. -message PluginConfig { - // A unique name for a filters/services in a VM for use in identifying the filter/service if - // multiple filters/services are handled by the same *vm_id* and *group_name* and for - // logging/debugging. - string name = 1; - - // A unique ID for a set of filters/services in a VM which will share a RootContext and Contexts - // if applicable (e.g. an Wasm HttpFilter and an Wasm AccessLog). If left blank, all - // filters/services with a blank group_name with the same *vm_id* will share Context(s). - string group_name = 2; - - // Configuration for finding or starting VM. - oneof vm_config { - VmConfig inline_vm_config = 3; - // In the future add referential VM configurations. - } - - // Filter/service configuration used to configure or reconfigure a plugin - // (proxy_on_configuration). - google.protobuf.Any configuration = 5; -} - -// WasmService is configured as a built-in *envoy.wasm_service* :ref:`ServiceConfig -// `. This opaque configuration will be used to -// create a Wasm Service. -// [#not-implemented-hide:] pending implementation. -message WasmService { - // General plugin configuration. - PluginConfig config = 1; - - // If true, create a single VM rather than creating one VM per worker. Such a singleton can - // not be used with filters. - bool singleton = 2; -} diff --git a/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto b/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto index 374569d937f28..c16b5be1ff0ed 100644 --- a/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto +++ b/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto @@ -186,7 +186,7 @@ message AccessLogCommon { } // Flags indicating occurrences during request/response processing. -// [#next-free-field: 20] +// [#next-free-field: 23] message ResponseFlags { option (udpa.annotations.versioning).previous_message_type = "envoy.data.accesslog.v2.ResponseFlags"; @@ -263,6 +263,15 @@ message ResponseFlags { // Indicates there was an HTTP protocol error on the downstream request. bool downstream_protocol_error = 19; + + // Indicates there was a max stream duration reached on the upstream request. + bool upstream_max_stream_duration_reached = 20; + + // Indicates the response was served from a cache filter. + bool response_from_cache_filter = 21; + + // Indicates that a filter configuration is not available. + bool no_filter_config_found = 22; } // Properties of a negotiated TLS connection. diff --git a/generated_api_shadow/envoy/data/core/v3/health_check_event.proto b/generated_api_shadow/envoy/data/core/v3/health_check_event.proto index cff0e381bd193..88b195b92b3df 100644 --- a/generated_api_shadow/envoy/data/core/v3/health_check_event.proto +++ b/generated_api_shadow/envoy/data/core/v3/health_check_event.proto @@ -42,27 +42,27 @@ message HealthCheckEvent { string cluster_name = 3 [(validate.rules).string = {min_bytes: 1}]; - // Host ejection. - google.protobuf.Timestamp timestamp = 6; - oneof event { option (validate.required) = true; - // Host addition. + // Host ejection. HealthCheckEjectUnhealthy eject_unhealthy_event = 4; - // Host failure. + // Host addition. HealthCheckAddHealthy add_healthy_event = 5; - // Healthy host became degraded. + // Host failure. HealthCheckFailure health_check_failure_event = 7; - // A degraded host returned to being healthy. + // Healthy host became degraded. DegradedHealthyHost degraded_healthy_host = 8; - // Timestamp for event. + // A degraded host returned to being healthy. NoLongerDegradedHost no_longer_degraded_host = 9; } + + // Timestamp for event. + google.protobuf.Timestamp timestamp = 6; } message HealthCheckEjectUnhealthy { diff --git a/generated_api_shadow/envoy/data/dns/v3/dns_table.proto b/generated_api_shadow/envoy/data/dns/v3/dns_table.proto index a6457e118672d..fd68847b892f1 100644 --- a/generated_api_shadow/envoy/data/dns/v3/dns_table.proto +++ b/generated_api_shadow/envoy/data/dns/v3/dns_table.proto @@ -28,22 +28,19 @@ message DnsTable { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v2alpha.DnsTable.AddressList"; - // This field contains a well formed IP address that is returned - // in the answer for a name query. The address field can be an - // IPv4 or IPv6 address. Address family detection is done automatically - // when Envoy parses the string. Since this field is repeated, - // Envoy will return one randomly chosen entry from this list in the - // DNS response. The random index will vary per query so that we prevent - // clients pinning on a single address for a configured domain + // This field contains a well formed IP address that is returned in the answer for a + // name query. The address field can be an IPv4 or IPv6 address. Address family + // detection is done automatically when Envoy parses the string. Since this field is + // repeated, Envoy will return as many entries from this list in the DNS response while + // keeping the response under 512 bytes repeated string address = 1 [(validate.rules).repeated = { min_items: 1 items {string {min_len: 3}} }]; } - // This message type is extensible and can contain a list of addresses - // or dictate some other method for resolving the addresses for an - // endpoint + // This message type is extensible and can contain a list of addresses, clusters or + // dictate a different method for resolving the addresses for an endpoint message DnsEndpoint { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v2alpha.DnsTable.DnsEndpoint"; @@ -52,6 +49,8 @@ message DnsTable { option (validate.required) = true; AddressList address_list = 1; + + string cluster_name = 2; } } @@ -59,27 +58,25 @@ message DnsTable { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v2alpha.DnsTable.DnsVirtualDomain"; - // The domain name for which Envoy will respond to query requests - string name = 1 [(validate.rules).string = {min_len: 2 well_known_regex: HTTP_HEADER_NAME}]; + // A domain name for which Envoy will respond to query requests + string name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; - // The configuration containing the method to determine the address - // of this endpoint + // The configuration containing the method to determine the address of this endpoint DnsEndpoint endpoint = 2; - // Sets the TTL in dns answers from Envoy returned to the client - google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gt {}}]; + // Sets the TTL in DNS answers from Envoy returned to the client. The default TTL is 300s + google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gte {seconds: 30}}]; } - // Control how many times envoy makes an attempt to forward a query to - // an external server - uint32 external_retry_count = 1; + // Control how many times Envoy makes an attempt to forward a query to an external DNS server + uint32 external_retry_count = 1 [(validate.rules).uint32 = {lte: 3}]; - // Fully qualified domain names for which Envoy will respond to queries - repeated DnsVirtualDomain virtual_domains = 2 [(validate.rules).repeated = {min_items: 1}]; + // Fully qualified domain names for which Envoy will respond to DNS queries. By leaving this + // list empty, Envoy will forward all queries to external resolvers + repeated DnsVirtualDomain virtual_domains = 2; - // This field serves to help Envoy determine whether it can authoritatively - // answer a query for a name matching a suffix in this list. If the query - // name does not match a suffix in this list, Envoy will forward - // the query to an upstream DNS server + // This field serves to help Envoy determine whether it can authoritatively answer a query + // for a name matching a suffix in this list. If the query name does not match a suffix in + // this list, Envoy will forward the query to an upstream DNS server repeated type.matcher.v3.StringMatcher known_suffixes = 3; } diff --git a/generated_api_shadow/envoy/data/dns/v4alpha/BUILD b/generated_api_shadow/envoy/data/dns/v4alpha/BUILD new file mode 100644 index 0000000000000..bc8958ceab0bf --- /dev/null +++ b/generated_api_shadow/envoy/data/dns/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/data/dns/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/data/dns/v4alpha/dns_table.proto b/generated_api_shadow/envoy/data/dns/v4alpha/dns_table.proto new file mode 100644 index 0000000000000..22fe377281ddf --- /dev/null +++ b/generated_api_shadow/envoy/data/dns/v4alpha/dns_table.proto @@ -0,0 +1,82 @@ +syntax = "proto3"; + +package envoy.data.dns.v4alpha; + +import "envoy/type/matcher/v4alpha/string.proto"; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.data.dns.v4alpha"; +option java_outer_classname = "DnsTableProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: DNS Filter Table Data] +// :ref:`DNS Filter config overview `. + +// This message contains the configuration for the DNS Filter if populated +// from the control plane +message DnsTable { + option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v3.DnsTable"; + + // This message contains a list of IP addresses returned for a query for a known name + message AddressList { + option (udpa.annotations.versioning).previous_message_type = + "envoy.data.dns.v3.DnsTable.AddressList"; + + // This field contains a well formed IP address that is returned in the answer for a + // name query. The address field can be an IPv4 or IPv6 address. Address family + // detection is done automatically when Envoy parses the string. Since this field is + // repeated, Envoy will return as many entries from this list in the DNS response while + // keeping the response under 512 bytes + repeated string address = 1 [(validate.rules).repeated = { + min_items: 1 + items {string {min_len: 3}} + }]; + } + + // This message type is extensible and can contain a list of addresses, clusters or + // dictate a different method for resolving the addresses for an endpoint + message DnsEndpoint { + option (udpa.annotations.versioning).previous_message_type = + "envoy.data.dns.v3.DnsTable.DnsEndpoint"; + + oneof endpoint_config { + option (validate.required) = true; + + AddressList address_list = 1; + + string cluster_name = 2; + } + } + + message DnsVirtualDomain { + option (udpa.annotations.versioning).previous_message_type = + "envoy.data.dns.v3.DnsTable.DnsVirtualDomain"; + + // A domain name for which Envoy will respond to query requests + string name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; + + // The configuration containing the method to determine the address of this endpoint + DnsEndpoint endpoint = 2; + + // Sets the TTL in DNS answers from Envoy returned to the client. The default TTL is 300s + google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gte {seconds: 30}}]; + } + + // Control how many times Envoy makes an attempt to forward a query to an external DNS server + uint32 external_retry_count = 1 [(validate.rules).uint32 = {lte: 3}]; + + // Fully qualified domain names for which Envoy will respond to DNS queries. By leaving this + // list empty, Envoy will forward all queries to external resolvers + repeated DnsVirtualDomain virtual_domains = 2; + + // This field serves to help Envoy determine whether it can authoritatively answer a query + // for a name matching a suffix in this list. If the query name does not match a suffix in + // this list, Envoy will forward the query to an upstream DNS server + repeated type.matcher.v4alpha.StringMatcher known_suffixes = 3; +} diff --git a/generated_api_shadow/envoy/data/tap/v3/common.proto b/generated_api_shadow/envoy/data/tap/v3/common.proto index c954b1b6747d7..861da12e20c1b 100644 --- a/generated_api_shadow/envoy/data/tap/v3/common.proto +++ b/generated_api_shadow/envoy/data/tap/v3/common.proto @@ -17,21 +17,21 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; message Body { option (udpa.annotations.versioning).previous_message_type = "envoy.data.tap.v2alpha.Body"; - // Body data as bytes. By default, tap body data will be present in this field, as the proto - // `bytes` type can contain any valid byte. - bool truncated = 3; - oneof body_type { + // Body data as bytes. By default, tap body data will be present in this field, as the proto + // `bytes` type can contain any valid byte. + bytes as_bytes = 1; + // Body data as string. This field is only used when the :ref:`JSON_BODY_AS_STRING // ` sink // format type is selected. See the documentation for that option for why this is useful. - bytes as_bytes = 1; - - // Specifies whether body data has been truncated to fit within the specified - // :ref:`max_buffered_rx_bytes - // ` and - // :ref:`max_buffered_tx_bytes - // ` settings. string as_string = 2; } + + // Specifies whether body data has been truncated to fit within the specified + // :ref:`max_buffered_rx_bytes + // ` and + // :ref:`max_buffered_tx_bytes + // ` settings. + bool truncated = 3; } diff --git a/generated_api_shadow/envoy/extensions/access_loggers/file/v3/BUILD b/generated_api_shadow/envoy/extensions/access_loggers/file/v3/BUILD index db752e857c62e..3edacd3aafea1 100644 --- a/generated_api_shadow/envoy/extensions/access_loggers/file/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/access_loggers/file/v3/BUILD @@ -7,6 +7,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/accesslog/v2:pkg", + "//envoy/config/core/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/access_loggers/file/v3/file.proto b/generated_api_shadow/envoy/extensions/access_loggers/file/v3/file.proto index f3c9c0a11612f..de33623c207f9 100644 --- a/generated_api_shadow/envoy/extensions/access_loggers/file/v3/file.proto +++ b/generated_api_shadow/envoy/extensions/access_loggers/file/v3/file.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.extensions.access_loggers.file.v3; +import "envoy/config/core/v3/substitution_format_string.proto"; + import "google/protobuf/struct.proto"; import "udpa/annotations/status.proto"; @@ -19,6 +21,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Custom configuration for an :ref:`AccessLog ` // that writes log entries directly to a file. Configures the built-in *envoy.access_loggers.file* // AccessLog. +// [#next-free-field: 6] message FileAccessLog { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v2.FileAccessLog"; @@ -30,16 +33,27 @@ message FileAccessLog { // Access log :ref:`format string`. // Envoy supports :ref:`custom access log formats ` as well as a // :ref:`default format `. - string format = 2; + // This field is deprecated. + // Please use :ref:`log_format `. + string format = 2 [deprecated = true]; // Access log :ref:`format dictionary`. All values // are rendered as strings. - google.protobuf.Struct json_format = 3; + // This field is deprecated. + // Please use :ref:`log_format `. + google.protobuf.Struct json_format = 3 [deprecated = true]; // Access log :ref:`format dictionary`. Values are // rendered as strings, numbers, or boolean values as appropriate. Nested JSON objects may // be produced by some command operators (e.g.FILTER_STATE or DYNAMIC_METADATA). See the // documentation for a specific command operator for details. - google.protobuf.Struct typed_json_format = 4; + // This field is deprecated. + // Please use :ref:`log_format `. + google.protobuf.Struct typed_json_format = 4 [deprecated = true]; + + // Configuration to form access log data and format. + // If not specified, use :ref:`default format `. + config.core.v3.SubstitutionFormatString log_format = 5 + [(validate.rules).message = {required: true}]; } } diff --git a/generated_api_shadow/envoy/extensions/access_loggers/file/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/access_loggers/file/v4alpha/BUILD new file mode 100644 index 0000000000000..ba8c3042328bb --- /dev/null +++ b/generated_api_shadow/envoy/extensions/access_loggers/file/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/extensions/access_loggers/file/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/access_loggers/file/v4alpha/file.proto b/generated_api_shadow/envoy/extensions/access_loggers/file/v4alpha/file.proto new file mode 100644 index 0000000000000..c2a2c753f5bb6 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/access_loggers/file/v4alpha/file.proto @@ -0,0 +1,59 @@ +syntax = "proto3"; + +package envoy.extensions.access_loggers.file.v4alpha; + +import "envoy/config/core/v4alpha/substitution_format_string.proto"; + +import "google/protobuf/struct.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.access_loggers.file.v4alpha"; +option java_outer_classname = "FileProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: File access log] +// [#extension: envoy.access_loggers.file] + +// Custom configuration for an :ref:`AccessLog ` +// that writes log entries directly to a file. Configures the built-in *envoy.access_loggers.file* +// AccessLog. +// [#next-free-field: 6] +message FileAccessLog { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.access_loggers.file.v3.FileAccessLog"; + + // A path to a local file to which to write the access log entries. + string path = 1 [(validate.rules).string = {min_bytes: 1}]; + + oneof access_log_format { + // Access log :ref:`format string`. + // Envoy supports :ref:`custom access log formats ` as well as a + // :ref:`default format `. + // This field is deprecated. + // Please use :ref:`log_format `. + string hidden_envoy_deprecated_format = 2 [deprecated = true]; + + // Access log :ref:`format dictionary`. All values + // are rendered as strings. + // This field is deprecated. + // Please use :ref:`log_format `. + google.protobuf.Struct hidden_envoy_deprecated_json_format = 3 [deprecated = true]; + + // Access log :ref:`format dictionary`. Values are + // rendered as strings, numbers, or boolean values as appropriate. Nested JSON objects may + // be produced by some command operators (e.g.FILTER_STATE or DYNAMIC_METADATA). See the + // documentation for a specific command operator for details. + // This field is deprecated. + // Please use :ref:`log_format `. + google.protobuf.Struct hidden_envoy_deprecated_typed_json_format = 4 [deprecated = true]; + + // Configuration to form access log data and format. + // If not specified, use :ref:`default format `. + config.core.v4alpha.SubstitutionFormatString log_format = 5 + [(validate.rules).message = {required: true}]; + } +} diff --git a/generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/als.proto b/generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/als.proto index 3cc154416627e..4996a877a9c6a 100644 --- a/generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/als.proto +++ b/generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/als.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package envoy.extensions.access_loggers.grpc.v3; +import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "google/protobuf/duration.proto"; @@ -53,7 +54,7 @@ message TcpGrpcAccessLogConfig { } // Common configuration for gRPC access logs. -// [#next-free-field: 6] +// [#next-free-field: 7] message CommonGrpcAccessLogConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v2.CommonGrpcAccessLogConfig"; @@ -66,6 +67,11 @@ message CommonGrpcAccessLogConfig { // The gRPC service for the access log service. config.core.v3.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; + // API version for access logs service transport protocol. This describes the access logs service + // gRPC endpoint and version of messages used on the wire. + config.core.v3.ApiVersion transport_api_version = 6 + [(validate.rules).enum = {defined_only: true}]; + // Interval for flushing access logs to the gRPC stream. Logger will flush requests every time // this interval is elapsed, or when batch size limit is hit, whichever comes first. Defaults to // 1 second. diff --git a/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/BUILD b/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/BUILD new file mode 100644 index 0000000000000..8bad369e35113 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/extensions/wasm/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/wasm.proto b/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/wasm.proto new file mode 100644 index 0000000000000..cd9db5906436f --- /dev/null +++ b/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/wasm.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package envoy.extensions.access_loggers.wasm.v3; + +import "envoy/extensions/wasm/v3/wasm.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.access_loggers.wasm.v3"; +option java_outer_classname = "WasmProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [[#not-implemented-hide:] +// Custom configuration for an :ref:`AccessLog ` +// that calls into a WASM VM. +message WasmAccessLog { + envoy.extensions.wasm.v3.PluginConfig config = 1; +} diff --git a/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto b/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto index 6f100d9dbb7e9..869e8c42caba1 100644 --- a/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto +++ b/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto @@ -27,4 +27,9 @@ message ClusterConfig { // `. common.dynamic_forward_proxy.v3.DnsCacheConfig dns_cache_config = 1 [(validate.rules).message = {required: true}]; + + // If true allow the cluster configuration to disable the auto_sni and auto_san_validation options + // in the :ref:`cluster's upstream_http_protocol_options + // ` + bool allow_insecure_cluster_options = 2; } diff --git a/generated_api_shadow/envoy/extensions/clusters/redis/v3/redis_cluster.proto b/generated_api_shadow/envoy/extensions/clusters/redis/v3/redis_cluster.proto index cf01359e55abb..afc19777edf2b 100644 --- a/generated_api_shadow/envoy/extensions/clusters/redis/v3/redis_cluster.proto +++ b/generated_api_shadow/envoy/extensions/clusters/redis/v3/redis_cluster.proto @@ -19,7 +19,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // of :ref:`Envoy's support for Redis Cluster `. // // Redis Cluster is an extension of Redis which supports sharding and high availability (where a -// shard that loses its master fails over to a replica, and designates it as the new master). +// shard that loses its primary fails over to a replica, and designates it as the new primary). // However, as there is no unified frontend or proxy service in front of Redis Cluster, the client // (in this case Envoy) must locally maintain the state of the Redis Cluster, specifically the // topology. A random node in the cluster is queried for the topology using the `CLUSTER SLOTS diff --git a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto b/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto index 7c72af35af33e..79cd583486ac9 100644 --- a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto +++ b/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto @@ -18,9 +18,16 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Dynamic forward proxy common configuration] +// Configuration of circuit breakers for resolver. +message DnsCacheCircuitBreakers { + // The maximum number of pending requests that Envoy will allow to the + // resolver. If not specified, the default is 1024. + google.protobuf.UInt32Value max_pending_requests = 1; +} + // Configuration for the dynamic forward proxy DNS cache. See the :ref:`architecture overview // ` for more information. -// [#next-free-field: 7] +// [#next-free-field: 9] message DnsCacheConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.common.dynamic_forward_proxy.v2alpha.DnsCacheConfig"; @@ -83,4 +90,13 @@ message DnsCacheConfig { // this is used as the cache's DNS refresh rate when DNS requests are failing. If this setting is // not specified, the failure refresh rate defaults to the dns_refresh_rate. config.cluster.v3.Cluster.RefreshRate dns_failure_refresh_rate = 6; + + // The config of circuit breakers for resolver. It provides a configurable threshold. + // If `envoy.reloadable_features.enable_dns_cache_circuit_breakers` is enabled, + // envoy will use dns cache circuit breakers with default settings even if this value is not set. + DnsCacheCircuitBreakers dns_cache_circuit_breaker = 7; + + // [#next-major-version: Reconcile DNS options in a single message.] + // Always use TCP queries instead of UDP queries for DNS lookups. + bool use_tcp_for_dns_lookups = 8; } diff --git a/generated_api_shadow/envoy/extensions/common/ratelimit/v3/BUILD b/generated_api_shadow/envoy/extensions/common/ratelimit/v3/BUILD index ee90746aa30a5..256b1e65eda58 100644 --- a/generated_api_shadow/envoy/extensions/common/ratelimit/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/common/ratelimit/v3/BUILD @@ -7,6 +7,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/ratelimit:pkg", + "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/common/ratelimit/v3/ratelimit.proto b/generated_api_shadow/envoy/extensions/common/ratelimit/v3/ratelimit.proto index 187ae3f229c46..9255deb4b64dc 100644 --- a/generated_api_shadow/envoy/extensions/common/ratelimit/v3/ratelimit.proto +++ b/generated_api_shadow/envoy/extensions/common/ratelimit/v3/ratelimit.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.extensions.common.ratelimit.v3; +import "envoy/type/v3/ratelimit_unit.proto"; + import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -54,6 +56,10 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // // The idea behind the API is that (1)/(2)/(3) and (4)/(5) can be sent in 1 request if desired. // This enables building complex application scenarios with a generic backend. +// +// Optionally the descriptor can contain a limit override under a "limit" key, that specifies +// the number of requests per unit to use instead of the number configured in the +// rate limiting service. message RateLimitDescriptor { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.ratelimit.RateLimitDescriptor"; @@ -69,6 +75,20 @@ message RateLimitDescriptor { string value = 2 [(validate.rules).string = {min_bytes: 1}]; } + // Override rate limit to apply to this descriptor instead of the limit + // configured in the rate limit service. See :ref:`rate limit override + // ` for more information. + message RateLimitOverride { + // The number of requests per unit of time. + uint32 requests_per_unit = 1; + + // The unit of time. + type.v3.RateLimitUnit unit = 2 [(validate.rules).enum = {defined_only: true}]; + } + // Descriptor entries. repeated Entry entries = 1 [(validate.rules).repeated = {min_items: 1}]; + + // Optional rate limit override to supply to the ratelimit service. + RateLimitOverride limit = 2; } diff --git a/generated_api_shadow/envoy/extensions/common/tap/v3/BUILD b/generated_api_shadow/envoy/extensions/common/tap/v3/BUILD index 64688f5bb438e..eb16b73a21112 100644 --- a/generated_api_shadow/envoy/extensions/common/tap/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/common/tap/v3/BUILD @@ -10,5 +10,6 @@ api_proto_package( "//envoy/config/core/v3:pkg", "//envoy/config/tap/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/common/tap/v3/common.proto b/generated_api_shadow/envoy/extensions/common/tap/v3/common.proto index 46a25b164d67a..68e80dad76b4b 100644 --- a/generated_api_shadow/envoy/extensions/common/tap/v3/common.proto +++ b/generated_api_shadow/envoy/extensions/common/tap/v3/common.proto @@ -5,6 +5,9 @@ package envoy.extensions.common.tap.v3; import "envoy/config/core/v3/config_source.proto"; import "envoy/config/tap/v3/common.proto"; +import "udpa/core/v1/resource_locator.proto"; + +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -30,7 +33,12 @@ message CommonExtensionConfig { config.core.v3.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; // Tap config to request from XDS server. - string name = 2 [(validate.rules).string = {min_bytes: 1}]; + string name = 2 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + + // Resource locator for TAP. This is mutually exclusive to *name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator tap_resource_locator = 3 + [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; } oneof config_type { diff --git a/generated_api_shadow/envoy/extensions/common/tap/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/common/tap/v4alpha/BUILD index d1fe49142a8e2..351e64d868455 100644 --- a/generated_api_shadow/envoy/extensions/common/tap/v4alpha/BUILD +++ b/generated_api_shadow/envoy/extensions/common/tap/v4alpha/BUILD @@ -7,8 +7,9 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v4alpha:pkg", - "//envoy/config/tap/v3:pkg", + "//envoy/config/tap/v4alpha:pkg", "//envoy/extensions/common/tap/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/common/tap/v4alpha/common.proto b/generated_api_shadow/envoy/extensions/common/tap/v4alpha/common.proto index 63de14a3d6f63..536f13d049c34 100644 --- a/generated_api_shadow/envoy/extensions/common/tap/v4alpha/common.proto +++ b/generated_api_shadow/envoy/extensions/common/tap/v4alpha/common.proto @@ -3,7 +3,9 @@ syntax = "proto3"; package envoy.extensions.common.tap.v4alpha; import "envoy/config/core/v4alpha/config_source.proto"; -import "envoy/config/tap/v3/common.proto"; +import "envoy/config/tap/v4alpha/common.proto"; + +import "udpa/core/v1/resource_locator.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -30,8 +32,14 @@ message CommonExtensionConfig { config.core.v4alpha.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; - // Tap config to request from XDS server. - string name = 2 [(validate.rules).string = {min_bytes: 1}]; + oneof name_specifier { + // Tap config to request from XDS server. + string name = 2; + + // Resource locator for TAP. This is mutually exclusive to *name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator tap_resource_locator = 3; + } } oneof config_type { @@ -42,7 +50,7 @@ message CommonExtensionConfig { // If specified, the tap filter will be configured via a static configuration that cannot be // changed. - config.tap.v3.TapConfig static_config = 2; + config.tap.v4alpha.TapConfig static_config = 2; // [#not-implemented-hide:] Configuration to use for TapDS updates for the filter. TapDSConfig tapds_config = 3; diff --git a/generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/BUILD b/generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/BUILD new file mode 100644 index 0000000000000..ef3541ebcb1df --- /dev/null +++ b/generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/gzip.proto b/generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/gzip.proto new file mode 100644 index 0000000000000..d4d60eaa43ee2 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/gzip.proto @@ -0,0 +1,79 @@ +syntax = "proto3"; + +package envoy.extensions.compression.gzip.compressor.v3; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.compression.gzip.compressor.v3"; +option java_outer_classname = "GzipProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Gzip Compressor] +// [#extension: envoy.compression.gzip.compressor] + +// [#next-free-field: 6] +message Gzip { + // All the values of this enumeration translate directly to zlib's compression strategies. + // For more information about each strategy, please refer to zlib manual. + enum CompressionStrategy { + DEFAULT_STRATEGY = 0; + FILTERED = 1; + HUFFMAN_ONLY = 2; + RLE = 3; + FIXED = 4; + } + + enum CompressionLevel { + option allow_alias = true; + + DEFAULT_COMPRESSION = 0; + BEST_SPEED = 1; + COMPRESSION_LEVEL_1 = 1; + COMPRESSION_LEVEL_2 = 2; + COMPRESSION_LEVEL_3 = 3; + COMPRESSION_LEVEL_4 = 4; + COMPRESSION_LEVEL_5 = 5; + COMPRESSION_LEVEL_6 = 6; + COMPRESSION_LEVEL_7 = 7; + COMPRESSION_LEVEL_8 = 8; + COMPRESSION_LEVEL_9 = 9; + BEST_COMPRESSION = 9; + } + + // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values + // use more memory, but are faster and produce better compression results. The default value is 5. + google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {lte: 9 gte: 1}]; + + // A value used for selecting the zlib compression level. This setting will affect speed and + // amount of compression applied to the content. "BEST_COMPRESSION" provides higher compression + // at the cost of higher latency and is equal to "COMPRESSION_LEVEL_9". "BEST_SPEED" provides + // lower compression with minimum impact on response time, the same as "COMPRESSION_LEVEL_1". + // "DEFAULT_COMPRESSION" provides an optimal result between speed and compression. According + // to zlib's manual this level gives the same result as "COMPRESSION_LEVEL_6". + // This field will be set to "DEFAULT_COMPRESSION" if not specified. + CompressionLevel compression_level = 2 [(validate.rules).enum = {defined_only: true}]; + + // A value used for selecting the zlib compression strategy which is directly related to the + // characteristics of the content. Most of the time "DEFAULT_STRATEGY" will be the best choice, + // which is also the default value for the parameter, though there are situations when + // changing this parameter might produce better results. For example, run-length encoding (RLE) + // is typically used when the content is known for having sequences which same data occurs many + // consecutive times. For more information about each strategy, please refer to zlib manual. + CompressionStrategy compression_strategy = 3 [(validate.rules).enum = {defined_only: true}]; + + // Value from 9 to 15 that represents the base two logarithmic of the compressor's window size. + // Larger window results in better compression at the expense of memory usage. The default is 12 + // which will produce a 4096 bytes window. For more details about this parameter, please refer to + // zlib manual > deflateInit2. + google.protobuf.UInt32Value window_bits = 4 [(validate.rules).uint32 = {lte: 15 gte: 9}]; + + // Value for Zlib's next output buffer. If not set, defaults to 4096. + // See https://www.zlib.net/manual.html for more details. Also see + // https://github.com/envoyproxy/envoy/issues/8448 for context on this filter's performance. + google.protobuf.UInt32Value chunk_size = 5 [(validate.rules).uint32 = {lte: 65536 gte: 4096}]; +} diff --git a/generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/BUILD b/generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/BUILD new file mode 100644 index 0000000000000..ef3541ebcb1df --- /dev/null +++ b/generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto b/generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto new file mode 100644 index 0000000000000..0ab0d947bd01d --- /dev/null +++ b/generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto @@ -0,0 +1,31 @@ +syntax = "proto3"; + +package envoy.extensions.compression.gzip.decompressor.v3; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.compression.gzip.decompressor.v3"; +option java_outer_classname = "GzipProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Gzip Decompressor] +// [#extension: envoy.compression.gzip.decompressor] + +message Gzip { + // Value from 9 to 15 that represents the base two logarithmic of the decompressor's window size. + // The decompression window size needs to be equal or larger than the compression window size. + // The default is 12 to match the default in the + // :ref:`gzip compressor `. + // For more details about this parameter, please refer to `zlib manual `_ > inflateInit2. + google.protobuf.UInt32Value window_bits = 1 [(validate.rules).uint32 = {lte: 15 gte: 9}]; + + // Value for zlib's decompressor output buffer. If not set, defaults to 4096. + // See https://www.zlib.net/manual.html for more details. + google.protobuf.UInt32Value chunk_size = 2 [(validate.rules).uint32 = {lte: 65536 gte: 4096}]; +} diff --git a/generated_api_shadow/envoy/extensions/filter/udp/dns_filter/v3alpha/dns_filter.proto b/generated_api_shadow/envoy/extensions/filter/udp/dns_filter/v3alpha/dns_filter.proto deleted file mode 100644 index 38a8872d323e1..0000000000000 --- a/generated_api_shadow/envoy/extensions/filter/udp/dns_filter/v3alpha/dns_filter.proto +++ /dev/null @@ -1,52 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filter.udp.dns_filter.v3alpha; - -import "envoy/config/core/v3/base.proto"; -import "envoy/data/dns/v3/dns_table.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filter.udp.dns_filter.v3alpha"; -option java_outer_classname = "DnsFilterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: DNS Filter] -// DNS Filter :ref:`configuration overview `. -// [#extension: envoy.filters.udp_listener.dns_filter] - -// Configuration for the DNS filter. -message DnsFilterConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.udp.dns_filter.v2alpha.DnsFilterConfig"; - - // This message contains the configuration for the Dns Filter operating - // in a server context. This message will contain the virtual hosts and - // associated addresses with which Envoy will respond to queries - message ServerContextConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.udp.dns_filter.v2alpha.DnsFilterConfig.ServerContextConfig"; - - oneof config_source { - option (validate.required) = true; - - // Load the configuration specified from the control plane - data.dns.v3.DnsTable inline_dns_table = 1; - - // Seed the filter configuration from an external path. This source - // is a yaml formatted file that contains the DnsTable driving Envoy's - // responses to DNS queries - config.core.v3.DataSource external_dns_table = 2; - } - } - - // The stat prefix used when emitting DNS filter statistics - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // Server context configuration - ServerContextConfig server_config = 2; -} diff --git a/generated_api_shadow/envoy/extensions/filters/common/fault/v3/fault.proto b/generated_api_shadow/envoy/extensions/filters/common/fault/v3/fault.proto index a5a688468fb4d..f8df4c3d16e62 100644 --- a/generated_api_shadow/envoy/extensions/filters/common/fault/v3/fault.proto +++ b/generated_api_shadow/envoy/extensions/filters/common/fault/v3/fault.proto @@ -40,26 +40,26 @@ message FaultDelay { reserved 2; - // Add a fixed delay before forwarding the operation upstream. See - // https://developers.google.com/protocol-buffers/docs/proto3#json for - // the JSON/YAML Duration mapping. For HTTP/Mongo/Redis, the specified - // delay will be injected before a new request/operation. For TCP - // connections, the proxying of the connection upstream will be delayed - // for the specified period. This is required if type is FIXED. - type.v3.FractionalPercent percentage = 4; - - // Fault delays are controlled via an HTTP header (if applicable). - FaultDelayType hidden_envoy_deprecated_type = 1 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - oneof fault_delay_secifier { option (validate.required) = true; - // The percentage of operations/connections/requests on which the delay will be injected. + // Add a fixed delay before forwarding the operation upstream. See + // https://developers.google.com/protocol-buffers/docs/proto3#json for + // the JSON/YAML Duration mapping. For HTTP/Mongo/Redis, the specified + // delay will be injected before a new request/operation. For TCP + // connections, the proxying of the connection upstream will be delayed + // for the specified period. This is required if type is FIXED. google.protobuf.Duration fixed_delay = 3 [(validate.rules).duration = {gt {}}]; + // Fault delays are controlled via an HTTP header (if applicable). HeaderDelay header_delay = 5; } + + // The percentage of operations/connections/requests on which the delay will be injected. + type.v3.FractionalPercent percentage = 4; + + FaultDelayType hidden_envoy_deprecated_type = 1 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } // Describes a rate limit to be applied. @@ -84,16 +84,16 @@ message FaultRateLimit { "envoy.config.filter.fault.v2.FaultRateLimit.HeaderLimit"; } - // A fixed rate limit. - type.v3.FractionalPercent percentage = 2; - oneof limit_type { option (validate.required) = true; - // Rate limits are controlled via an HTTP header (if applicable). + // A fixed rate limit. FixedLimit fixed_limit = 1; - // The percentage of operations/connections/requests on which the rate limit will be injected. + // Rate limits are controlled via an HTTP header (if applicable). HeaderLimit header_limit = 3; } + + // The percentage of operations/connections/requests on which the rate limit will be injected. + type.v3.FractionalPercent percentage = 2; } diff --git a/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto b/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto index 7ff9bb6a0f5f1..8dd851f4020a5 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto @@ -5,7 +5,6 @@ package envoy.extensions.filters.http.adaptive_concurrency.v3; import "envoy/config/core/v3/base.proto"; import "envoy/type/v3/percent.proto"; -import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; @@ -93,15 +92,15 @@ message AdaptiveConcurrency { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.adaptive_concurrency.v2alpha.AdaptiveConcurrency"; - // Gradient concurrency control will be used. - config.core.v3.RuntimeFeatureFlag enabled = 2; - oneof concurrency_controller_config { option (validate.required) = true; - // If set to false, the adaptive concurrency filter will operate as a pass-through filter. If the - // message is unspecified, the filter will be enabled. + // Gradient concurrency control will be used. GradientControllerConfig gradient_controller_config = 1 [(validate.rules).message = {required: true}]; } + + // If set to false, the adaptive concurrency filter will operate as a pass-through filter. If the + // message is unspecified, the filter will be enabled. + config.core.v3.RuntimeFeatureFlag enabled = 2; } diff --git a/generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/BUILD new file mode 100644 index 0000000000000..f139cce54af25 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto b/generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto new file mode 100644 index 0000000000000..6f01c88885f4e --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto @@ -0,0 +1,90 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.admission_control.v3alpha; + +import "envoy/config/core/v3/base.proto"; +import "envoy/type/v3/range.proto"; + +import "google/api/annotations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; +import "google/rpc/status.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.admission_control.v3alpha"; +option java_outer_classname = "AdmissionControlProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Admission Control] +// [#extension: envoy.filters.http.admission_control] + +message AdmissionControl { + // Default method of specifying what constitutes a successful request. All status codes that + // indicate a successful request must be explicitly specified if not relying on the default + // values. + message SuccessCriteria { + message HttpCriteria { + // Status code ranges that constitute a successful request. Configurable codes are in the + // range [100, 600). + repeated type.v3.Int32Range http_success_status = 1 + [(validate.rules).repeated = {min_items: 1}]; + } + + message GrpcCriteria { + // Status codes that constitute a successful request. + // Mappings can be found at: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. + repeated uint32 grpc_success_status = 1 [(validate.rules).repeated = {min_items: 1}]; + } + + // If HTTP criteria are unspecified, all HTTP status codes below 500 are treated as successful + // responses. + // + // .. note:: + // + // The default HTTP codes considered successful by the admission controller are done so due + // to the unlikelihood that sending fewer requests would change their behavior (for example: + // redirects, unauthorized access, or bad requests won't be alleviated by sending less + // traffic). + HttpCriteria http_criteria = 1; + + // GRPC status codes to consider as request successes. If unspecified, defaults to: Ok, + // Cancelled, Unknown, InvalidArgument, NotFound, AlreadyExists, Unauthenticated, + // FailedPrecondition, OutOfRange, PermissionDenied, and Unimplemented. + // + // .. note:: + // + // The default gRPC codes that are considered successful by the admission controller are + // chosen because of the unlikelihood that sending fewer requests will change the behavior. + GrpcCriteria grpc_criteria = 2; + } + + // If set to false, the admission control filter will operate as a pass-through filter. If the + // message is unspecified, the filter will be enabled. + config.core.v3.RuntimeFeatureFlag enabled = 1; + + // Defines how a request is considered a success/failure. + oneof evaluation_criteria { + option (validate.required) = true; + + SuccessCriteria success_criteria = 2; + } + + // The sliding time window over which the success rate is calculated. The window is rounded to the + // nearest second. Defaults to 120s. + google.protobuf.Duration sampling_window = 3; + + // Rejection probability is defined by the formula:: + // + // max(0, (rq_count - aggression_coefficient * rq_success_count) / (rq_count + 1)) + // + // The coefficient dictates how aggressively the admission controller will throttle requests as + // the success rate drops. Lower values will cause throttling to kick in at higher success rates + // and result in more aggressive throttling. Any values less than 1.0, will be set to 1.0. If the + // message is unspecified, the coefficient is 2.0. + config.core.v3.RuntimeDouble aggression_coefficient = 4; +} diff --git a/generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/cache.proto b/generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/cache.proto index 1ff305bb0e279..f78b1d24ac2ce 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/cache.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/cache.proto @@ -61,7 +61,7 @@ message CacheConfig { // contents of a response, as described by // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. // - // During insertion, *allowed_vary_headers* acts as a whitelist: if a + // During insertion, *allowed_vary_headers* acts as a allowlist: if a // response's *vary* header mentions any header names that aren't in // *allowed_vary_headers*, that response will not be cached. // diff --git a/generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/BUILD new file mode 100644 index 0000000000000..63033acab5cf1 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/route/v4alpha:pkg", + "//envoy/extensions/filters/http/cache/v3alpha:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/cache.proto b/generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/cache.proto new file mode 100644 index 0000000000000..19921edb0310b --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/cache.proto @@ -0,0 +1,84 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.cache.v4alpha; + +import "envoy/config/route/v4alpha/route_components.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.cache.v4alpha"; +option java_outer_classname = "CacheProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: HTTP Cache Filter] +// [#extension: envoy.filters.http.cache] + +message CacheConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.cache.v3alpha.CacheConfig"; + + // [#not-implemented-hide:] + // Modifies cache key creation by restricting which parts of the URL are included. + message KeyCreatorParams { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.cache.v3alpha.CacheConfig.KeyCreatorParams"; + + // If true, exclude the URL scheme from the cache key. Set to true if your origins always + // produce the same response for http and https requests. + bool exclude_scheme = 1; + + // If true, exclude the host from the cache key. Set to true if your origins' responses don't + // ever depend on host. + bool exclude_host = 2; + + // If *query_parameters_included* is nonempty, only query parameters matched + // by one or more of its matchers are included in the cache key. Any other + // query params will not affect cache lookup. + repeated config.route.v4alpha.QueryParameterMatcher query_parameters_included = 3; + + // If *query_parameters_excluded* is nonempty, query parameters matched by one + // or more of its matchers are excluded from the cache key (even if also + // matched by *query_parameters_included*), and will not affect cache lookup. + repeated config.route.v4alpha.QueryParameterMatcher query_parameters_excluded = 4; + } + + // Config specific to the cache storage implementation. + google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}]; + + // [#not-implemented-hide:] + // + // + // List of allowed *Vary* headers. + // + // The *vary* response header holds a list of header names that affect the + // contents of a response, as described by + // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. + // + // During insertion, *allowed_vary_headers* acts as a allowlist: if a + // response's *vary* header mentions any header names that aren't in + // *allowed_vary_headers*, that response will not be cached. + // + // During lookup, *allowed_vary_headers* controls what request headers will be + // sent to the cache storage implementation. + repeated type.matcher.v4alpha.StringMatcher allowed_vary_headers = 2; + + // [#not-implemented-hide:] + // + // + // Modifies cache key creation by restricting which parts of the URL are included. + KeyCreatorParams key_creator_params = 3; + + // [#not-implemented-hide:] + // + // + // Max body size the cache filter will insert into a cache. 0 means unlimited (though the cache + // storage implementation may have its own limit beyond which it will reject insertions). + uint32 max_body_bytes = 4; +} diff --git a/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/compressor.proto b/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/compressor.proto index 0eefe55140d26..0bfa5c1860d44 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/compressor.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/compressor.proto @@ -3,11 +3,14 @@ syntax = "proto3"; package envoy.extensions.filters.http.compressor.v3; import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/extension.proto"; +import "google/protobuf/any.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.compressor.v3"; option java_outer_classname = "CompressorProto"; @@ -15,8 +18,10 @@ option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Compressor] +// Compressor :ref:`configuration overview `. +// [#extension: envoy.filters.http.compressor] -// [#next-free-field: 6] +// [#next-free-field: 7] message Compressor { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.compressor.v2.Compressor"; @@ -46,4 +51,10 @@ message Compressor { // Runtime flag that controls whether the filter is enabled or not. If set to false, the // filter will operate as a pass-through filter. If not specified, defaults to enabled. config.core.v3.RuntimeFeatureFlag runtime_enabled = 5; + + // A compressor library to use for compression. Currently only + // :ref:`envoy.compression.gzip.compressor` + // is included in Envoy. + // This field is ignored if used in the context of the gzip http-filter, but is mandatory otherwise. + config.core.v3.TypedExtensionConfig compressor_library = 6; } diff --git a/generated_api_shadow/envoy/extensions/filters/http/csrf/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/csrf/v4alpha/BUILD new file mode 100644 index 0000000000000..72211218ff525 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/csrf/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/extensions/filters/http/csrf/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/http/csrf/v4alpha/csrf.proto b/generated_api_shadow/envoy/extensions/filters/http/csrf/v4alpha/csrf.proto new file mode 100644 index 0000000000000..dda915a059af5 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/csrf/v4alpha/csrf.proto @@ -0,0 +1,54 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.csrf.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.csrf.v4alpha"; +option java_outer_classname = "CsrfProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: CSRF] +// Cross-Site Request Forgery :ref:`configuration overview `. +// [#extension: envoy.filters.http.csrf] + +// CSRF filter config. +message CsrfPolicy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.csrf.v3.CsrfPolicy"; + + // Specifies the % of requests for which the CSRF filter is enabled. + // + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to get the percentage of requests to filter. + // + // .. note:: + // + // This field defaults to 100/:ref:`HUNDRED + // `. + config.core.v4alpha.RuntimeFractionalPercent filter_enabled = 1 + [(validate.rules).message = {required: true}]; + + // Specifies that CSRF policies will be evaluated and tracked, but not enforced. + // + // This is intended to be used when ``filter_enabled`` is off and will be ignored otherwise. + // + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate + // and track the request's *Origin* and *Destination* to determine if it's valid, but will not + // enforce any policies. + config.core.v4alpha.RuntimeFractionalPercent shadow_enabled = 2; + + // Specifies additional source origins that will be allowed in addition to + // the destination origin. + // + // More information on how this can be configured via runtime can be found + // :ref:`here `. + repeated type.matcher.v4alpha.StringMatcher additional_origins = 3; +} diff --git a/generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/BUILD new file mode 100644 index 0000000000000..2c3dad6453b65 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/decompressor.proto b/generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/decompressor.proto new file mode 100644 index 0000000000000..1e3d72766d054 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/decompressor.proto @@ -0,0 +1,57 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.decompressor.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/extension.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.decompressor.v3"; +option java_outer_classname = "DecompressorProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Decompressor] +// [#extension: envoy.filters.http.decompressor] + +message Decompressor { + // Common configuration for filter behavior on both the request and response direction. + message CommonDirectionConfig { + // Runtime flag that controls whether the filter is enabled for decompression or not. If set to false, the + // filter will operate as a pass-through filter. If the message is unspecified, the filter will be enabled. + config.core.v3.RuntimeFeatureFlag enabled = 1; + } + + // Configuration for filter behavior on the request direction. + message RequestDirectionConfig { + CommonDirectionConfig common_config = 1; + + // If set to true, and response decompression is enabled, the filter modifies the Accept-Encoding + // request header by appending the decompressor_library's encoding. Defaults to true. + google.protobuf.BoolValue advertise_accept_encoding = 2; + } + + // Configuration for filter behavior on the response direction. + message ResponseDirectionConfig { + CommonDirectionConfig common_config = 1; + } + + // A decompressor library to use for both request and response decompression. Currently only + // :ref:`envoy.compression.gzip.compressor` + // is included in Envoy. + config.core.v3.TypedExtensionConfig decompressor_library = 1 + [(validate.rules).message = {required: true}]; + + // Configuration for request decompression. Decompression is enabled by default if left empty. + RequestDirectionConfig request_direction_config = 2; + + // Configuration for response decompression. Decompression is enabled by default if left empty. + ResponseDirectionConfig response_direction_config = 3; +} diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto index 64e82c7b16145..0c99cb6997f8c 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.extensions.filters.http.ext_authz.v3; import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "envoy/config/core/v3/http_uri.proto"; import "envoy/type/matcher/v3/string.proto"; @@ -22,16 +23,24 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // External Authorization :ref:`configuration overview `. // [#extension: envoy.filters.http.ext_authz] -// [#next-free-field: 11] +// [#next-free-field: 13] message ExtAuthz { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.ext_authz.v2.ExtAuthz"; - // gRPC service configuration (default timeout: 200ms). - bool failure_mode_allow = 2; + // External authorization service configuration. + oneof services { + // gRPC service configuration (default timeout: 200ms). + config.core.v3.GrpcService grpc_service = 1; - // HTTP service configuration (default timeout: 200ms). - BufferSettings with_request_body = 5; + // HTTP service configuration (default timeout: 200ms). + HttpService http_service = 3; + } + + // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and + // version of messages used on the wire. + config.core.v3.ApiVersion transport_api_version = 12 + [(validate.rules).enum = {defined_only: true}]; // Changes filter's behaviour on errors: // @@ -45,12 +54,12 @@ message ExtAuthz { // // Note that errors can be *always* tracked in the :ref:`stats // `. - bool clear_route_cache = 6; + bool failure_mode_allow = 2; // Enables filter to buffer the client request body and send it within the authorization request. // A ``x-envoy-auth-partial-body: false|true`` metadata header will be added to the authorization // request message indicating if the body data is partial. - type.v3.HttpStatus status_on_error = 7; + BufferSettings with_request_body = 5; // Clears route cache in order to allow the external authorization service to correctly affect // routing decisions. Filter clears all cached routes when: @@ -62,11 +71,11 @@ message ExtAuthz { // 3. At least one *authorization response header* is added to the client request, or is used for // altering another client request header. // - repeated string metadata_context_namespaces = 8; + bool clear_route_cache = 6; // Sets the HTTP status that is returned to the client when there is a network error between the // filter and the authorization server. The default status is HTTP 403 Forbidden. - config.core.v3.RuntimeFractionalPercent filter_enabled = 9; + type.v3.HttpStatus status_on_error = 7; // Specifies a list of metadata namespaces whose values, if present, will be passed to the // ext_authz service as an opaque *protobuf::Struct*. @@ -80,7 +89,7 @@ message ExtAuthz { // metadata_context_namespaces: // - envoy.filters.http.jwt_authn // - bool include_peer_certificate = 10; + repeated string metadata_context_namespaces = 8; // Specifies if the filter is enabled. // @@ -88,19 +97,25 @@ message ExtAuthz { // Envoy will lookup the runtime key to get the percentage of requests to filter. // // If this field is not specified, the filter will be enabled for all requests. - bool hidden_envoy_deprecated_use_alpha = 4 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + config.core.v3.RuntimeFractionalPercent filter_enabled = 9; - // External authorization service configuration. - oneof services { - // Specifies if the peer certificate is sent to the external service. - // - // When this field is true, Envoy will include the peer X.509 certificate, if available, in the - // :ref:`certificate`. - config.core.v3.GrpcService grpc_service = 1; + // Specifies whether to deny the requests, when the filter is disabled. + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to determine whether to deny request for + // filter protected path at filter disabling. If filter is disabled in + // typed_per_filter_config for the path, requests will not be denied. + // + // If this field is not specified, all requests will be allowed when disabled. + config.core.v3.RuntimeFeatureFlag deny_at_disable = 11; - HttpService http_service = 3; - } + // Specifies if the peer certificate is sent to the external service. + // + // When this field is true, Envoy will include the peer X.509 certificate, if available, in the + // :ref:`certificate`. + bool include_peer_certificate = 10; + + bool hidden_envoy_deprecated_use_alpha = 4 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } // Configuration for buffering the request data. @@ -197,6 +212,11 @@ message AuthorizationResponse { // Note that coexistent headers will be overridden. type.matcher.v3.ListStringMatcher allowed_upstream_headers = 1; + // When this :ref:`list ` is set, authorization + // response headers that have a correspondent match will be added to the client's response. Note + // that coexistent headers will be appended. + type.matcher.v3.ListStringMatcher allowed_upstream_headers_to_append = 3; + // When this :ref:`list `. is set, authorization // response headers that have a correspondent match will be added to the client's response. Note // that when this list is *not* set, all the authorization response headers, except *Authority diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/BUILD new file mode 100644 index 0000000000000..9a3d8a574a9b0 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/BUILD @@ -0,0 +1,16 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/annotations:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/extensions/filters/http/ext_authz/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto new file mode 100644 index 0000000000000..7442715a0db34 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto @@ -0,0 +1,265 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.ext_authz.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/config/core/v4alpha/grpc_service.proto"; +import "envoy/config/core/v4alpha/http_uri.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; +import "envoy/type/v3/http_status.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.ext_authz.v4alpha"; +option java_outer_classname = "ExtAuthzProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: External Authorization] +// External Authorization :ref:`configuration overview `. +// [#extension: envoy.filters.http.ext_authz] + +// [#next-free-field: 13] +message ExtAuthz { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.ext_authz.v3.ExtAuthz"; + + reserved 4; + + reserved "use_alpha"; + + // External authorization service configuration. + oneof services { + // gRPC service configuration (default timeout: 200ms). + config.core.v4alpha.GrpcService grpc_service = 1; + + // HTTP service configuration (default timeout: 200ms). + HttpService http_service = 3; + } + + // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and + // version of messages used on the wire. + config.core.v4alpha.ApiVersion transport_api_version = 12 + [(validate.rules).enum = {defined_only: true}]; + + // Changes filter's behaviour on errors: + // + // 1. When set to true, the filter will *accept* client request even if the communication with + // the authorization service has failed, or if the authorization service has returned a HTTP 5xx + // error. + // + // 2. When set to false, ext-authz will *reject* client requests and return a *Forbidden* + // response if the communication with the authorization service has failed, or if the + // authorization service has returned a HTTP 5xx error. + // + // Note that errors can be *always* tracked in the :ref:`stats + // `. + bool failure_mode_allow = 2; + + // Enables filter to buffer the client request body and send it within the authorization request. + // A ``x-envoy-auth-partial-body: false|true`` metadata header will be added to the authorization + // request message indicating if the body data is partial. + BufferSettings with_request_body = 5; + + // Clears route cache in order to allow the external authorization service to correctly affect + // routing decisions. Filter clears all cached routes when: + // + // 1. The field is set to *true*. + // + // 2. The status returned from the authorization service is a HTTP 200 or gRPC 0. + // + // 3. At least one *authorization response header* is added to the client request, or is used for + // altering another client request header. + // + bool clear_route_cache = 6; + + // Sets the HTTP status that is returned to the client when there is a network error between the + // filter and the authorization server. The default status is HTTP 403 Forbidden. + type.v3.HttpStatus status_on_error = 7; + + // Specifies a list of metadata namespaces whose values, if present, will be passed to the + // ext_authz service as an opaque *protobuf::Struct*. + // + // For example, if the *jwt_authn* filter is used and :ref:`payload_in_metadata + // ` is set, + // then the following will pass the jwt payload to the authorization server. + // + // .. code-block:: yaml + // + // metadata_context_namespaces: + // - envoy.filters.http.jwt_authn + // + repeated string metadata_context_namespaces = 8; + + // Specifies if the filter is enabled. + // + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to get the percentage of requests to filter. + // + // If this field is not specified, the filter will be enabled for all requests. + config.core.v4alpha.RuntimeFractionalPercent filter_enabled = 9; + + // Specifies whether to deny the requests, when the filter is disabled. + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to determine whether to deny request for + // filter protected path at filter disabling. If filter is disabled in + // typed_per_filter_config for the path, requests will not be denied. + // + // If this field is not specified, all requests will be allowed when disabled. + config.core.v4alpha.RuntimeFeatureFlag deny_at_disable = 11; + + // Specifies if the peer certificate is sent to the external service. + // + // When this field is true, Envoy will include the peer X.509 certificate, if available, in the + // :ref:`certificate`. + bool include_peer_certificate = 10; +} + +// Configuration for buffering the request data. +message BufferSettings { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.ext_authz.v3.BufferSettings"; + + // Sets the maximum size of a message body that the filter will hold in memory. Envoy will return + // *HTTP 413* and will *not* initiate the authorization process when buffer reaches the number + // set in this field. Note that this setting will have precedence over :ref:`failure_mode_allow + // `. + uint32 max_request_bytes = 1 [(validate.rules).uint32 = {gt: 0}]; + + // When this field is true, Envoy will buffer the message until *max_request_bytes* is reached. + // The authorization request will be dispatched and no 413 HTTP error will be returned by the + // filter. + bool allow_partial_message = 2; +} + +// HttpService is used for raw HTTP communication between the filter and the authorization service. +// When configured, the filter will parse the client request and use these attributes to call the +// authorization server. Depending on the response, the filter may reject or accept the client +// request. Note that in any of these events, metadata can be added, removed or overridden by the +// filter: +// +// *On authorization request*, a list of allowed request headers may be supplied. See +// :ref:`allowed_headers +// ` +// for details. Additional headers metadata may be added to the authorization request. See +// :ref:`headers_to_add +// ` for +// details. +// +// On authorization response status HTTP 200 OK, the filter will allow traffic to the upstream and +// additional headers metadata may be added to the original client request. See +// :ref:`allowed_upstream_headers +// ` +// for details. +// +// On other authorization response statuses, the filter will not allow traffic. Additional headers +// metadata as well as body may be added to the client's response. See :ref:`allowed_client_headers +// ` +// for details. +// [#next-free-field: 9] +message HttpService { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.ext_authz.v3.HttpService"; + + reserved 3, 4, 5, 6; + + // Sets the HTTP server URI which the authorization requests must be sent to. + config.core.v4alpha.HttpUri server_uri = 1; + + // Sets a prefix to the value of authorization request header *Path*. + string path_prefix = 2; + + // Settings used for controlling authorization request metadata. + AuthorizationRequest authorization_request = 7; + + // Settings used for controlling authorization response metadata. + AuthorizationResponse authorization_response = 8; +} + +message AuthorizationRequest { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.ext_authz.v3.AuthorizationRequest"; + + // Authorization request will include the client request headers that have a correspondent match + // in the :ref:`list `. Note that in addition to the + // user's supplied matchers: + // + // 1. *Host*, *Method*, *Path* and *Content-Length* are automatically included to the list. + // + // 2. *Content-Length* will be set to 0 and the request to the authorization service will not have + // a message body. However, the authorization request can include the buffered client request body + // (controlled by :ref:`with_request_body + // ` setting), + // consequently the value of *Content-Length* of the authorization request reflects the size of + // its payload size. + // + type.matcher.v4alpha.ListStringMatcher allowed_headers = 1; + + // Sets a list of headers that will be included to the request to authorization service. Note that + // client request of the same key will be overridden. + repeated config.core.v4alpha.HeaderValue headers_to_add = 2; +} + +message AuthorizationResponse { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.ext_authz.v3.AuthorizationResponse"; + + // When this :ref:`list ` is set, authorization + // response headers that have a correspondent match will be added to the original client request. + // Note that coexistent headers will be overridden. + type.matcher.v4alpha.ListStringMatcher allowed_upstream_headers = 1; + + // When this :ref:`list ` is set, authorization + // response headers that have a correspondent match will be added to the client's response. Note + // that coexistent headers will be appended. + type.matcher.v4alpha.ListStringMatcher allowed_upstream_headers_to_append = 3; + + // When this :ref:`list `. is set, authorization + // response headers that have a correspondent match will be added to the client's response. Note + // that when this list is *not* set, all the authorization response headers, except *Authority + // (Host)* will be in the response to the client. When a header is included in this list, *Path*, + // *Status*, *Content-Length*, *WWWAuthenticate* and *Location* are automatically added. + type.matcher.v4alpha.ListStringMatcher allowed_client_headers = 2; +} + +// Extra settings on a per virtualhost/route/weighted-cluster level. +message ExtAuthzPerRoute { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute"; + + oneof override { + option (validate.required) = true; + + // Disable the ext auth filter for this particular vhost or route. + // If disabled is specified in multiple per-filter-configs, the most specific one will be used. + bool disabled = 1 [(validate.rules).bool = {const: true}]; + + // Check request settings for this route. + CheckSettings check_settings = 2 [(validate.rules).message = {required: true}]; + } +} + +// Extra settings for the check request. You can use this to provide extra context for the +// external authorization server on specific virtual hosts \ routes. For example, adding a context +// extension on the virtual host level can give the ext-authz server information on what virtual +// host is used without needing to parse the host header. If CheckSettings is specified in multiple +// per-filter-configs, they will be merged in order, and the result will be used. +message CheckSettings { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.ext_authz.v3.CheckSettings"; + + // Context extensions to set on the CheckRequest's + // :ref:`AttributeContext.context_extensions` + // + // Merge semantics for this field are such that keys from more specific configs override. + // + // .. note:: + // + // These settings are only applied to a filter configured with a + // :ref:`grpc_service`. + map context_extensions = 1; +} diff --git a/generated_api_shadow/envoy/extensions/filters/http/fault/v3/fault.proto b/generated_api_shadow/envoy/extensions/filters/http/fault/v3/fault.proto index 07996a9507ff9..d28ed28b11100 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/fault/v3/fault.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/fault/v3/fault.proto @@ -21,6 +21,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Fault Injection :ref:`configuration overview `. // [#extension: envoy.filters.http.fault] +// [#next-free-field: 6] message FaultAbort { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.fault.v2.FaultAbort"; @@ -35,22 +36,25 @@ message FaultAbort { reserved 1; - // HTTP status code to use to abort the HTTP request. - type.v3.FractionalPercent percentage = 3; - oneof error_type { option (validate.required) = true; - // Fault aborts are controlled via an HTTP header (if applicable). + // HTTP status code to use to abort the HTTP request. uint32 http_status = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; - // The percentage of requests/operations/connections that will be aborted with the error code - // provided. + // gRPC status code to use to abort the gRPC request. + uint32 grpc_status = 5; + + // Fault aborts are controlled via an HTTP header (if applicable). HeaderAbort header_abort = 4; } + + // The percentage of requests/operations/connections that will be aborted with the error code + // provided. + type.v3.FractionalPercent percentage = 3; } -// [#next-free-field: 14] +// [#next-free-field: 15] message HTTPFault { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.fault.v2.HTTPFault"; @@ -133,4 +137,8 @@ message HTTPFault { // The runtime key to override the :ref:`default ` // runtime. The default is: fault.http.rate_limit.response_percent string response_rate_limit_percent_runtime = 13; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.abort.grpc_status + string abort_grpc_status_runtime = 14; } diff --git a/generated_api_shadow/envoy/extensions/filters/http/fault/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/fault/v4alpha/BUILD new file mode 100644 index 0000000000000..936ee4414038e --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/fault/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/route/v4alpha:pkg", + "//envoy/extensions/filters/common/fault/v3:pkg", + "//envoy/extensions/filters/http/fault/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/http/fault/v4alpha/fault.proto b/generated_api_shadow/envoy/extensions/filters/http/fault/v4alpha/fault.proto new file mode 100644 index 0000000000000..7dd4f48aa476b --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/fault/v4alpha/fault.proto @@ -0,0 +1,144 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.fault.v4alpha; + +import "envoy/config/route/v4alpha/route_components.proto"; +import "envoy/extensions/filters/common/fault/v3/fault.proto"; +import "envoy/type/v3/percent.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.fault.v4alpha"; +option java_outer_classname = "FaultProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Fault Injection] +// Fault Injection :ref:`configuration overview `. +// [#extension: envoy.filters.http.fault] + +// [#next-free-field: 6] +message FaultAbort { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.fault.v3.FaultAbort"; + + // Fault aborts are controlled via an HTTP header (if applicable). See the + // :ref:`HTTP fault filter ` documentation for + // more information. + message HeaderAbort { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.fault.v3.FaultAbort.HeaderAbort"; + } + + reserved 1; + + oneof error_type { + option (validate.required) = true; + + // HTTP status code to use to abort the HTTP request. + uint32 http_status = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; + + // gRPC status code to use to abort the gRPC request. + uint32 grpc_status = 5; + + // Fault aborts are controlled via an HTTP header (if applicable). + HeaderAbort header_abort = 4; + } + + // The percentage of requests/operations/connections that will be aborted with the error code + // provided. + type.v3.FractionalPercent percentage = 3; +} + +// [#next-free-field: 15] +message HTTPFault { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.fault.v3.HTTPFault"; + + // If specified, the filter will inject delays based on the values in the + // object. + common.fault.v3.FaultDelay delay = 1; + + // If specified, the filter will abort requests based on the values in + // the object. At least *abort* or *delay* must be specified. + FaultAbort abort = 2; + + // Specifies the name of the (destination) upstream cluster that the + // filter should match on. Fault injection will be restricted to requests + // bound to the specific upstream cluster. + string upstream_cluster = 3; + + // Specifies a set of headers that the filter should match on. The fault + // injection filter can be applied selectively to requests that match a set of + // headers specified in the fault filter config. The chances of actual fault + // injection further depend on the value of the :ref:`percentage + // ` field. + // The filter will check the request's headers against all the specified + // headers in the filter config. A match will happen if all the headers in the + // config are present in the request with the same values (or based on + // presence if the *value* field is not in the config). + repeated config.route.v4alpha.HeaderMatcher headers = 4; + + // Faults are injected for the specified list of downstream hosts. If this + // setting is not set, faults are injected for all downstream nodes. + // Downstream node name is taken from :ref:`the HTTP + // x-envoy-downstream-service-node + // ` header and compared + // against downstream_nodes list. + repeated string downstream_nodes = 5; + + // The maximum number of faults that can be active at a single time via the configured fault + // filter. Note that because this setting can be overridden at the route level, it's possible + // for the number of active faults to be greater than this value (if injected via a different + // route). If not specified, defaults to unlimited. This setting can be overridden via + // `runtime ` and any faults that are not injected + // due to overflow will be indicated via the `faults_overflow + // ` stat. + // + // .. attention:: + // Like other :ref:`circuit breakers ` in Envoy, this is a fuzzy + // limit. It's possible for the number of active faults to rise slightly above the configured + // amount due to the implementation details. + google.protobuf.UInt32Value max_active_faults = 6; + + // The response rate limit to be applied to the response body of the stream. When configured, + // the percentage can be overridden by the :ref:`fault.http.rate_limit.response_percent + // ` runtime key. + // + // .. attention:: + // This is a per-stream limit versus a connection level limit. This means that concurrent streams + // will each get an independent limit. + common.fault.v3.FaultRateLimit response_rate_limit = 7; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.delay.fixed_delay_percent + string delay_percent_runtime = 8; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.abort.abort_percent + string abort_percent_runtime = 9; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.delay.fixed_duration_ms + string delay_duration_runtime = 10; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.abort.http_status + string abort_http_status_runtime = 11; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.max_active_faults + string max_active_faults_runtime = 12; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.rate_limit.response_percent + string response_rate_limit_percent_runtime = 13; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.abort.grpc_status + string abort_grpc_status_runtime = 14; +} diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto b/generated_api_shadow/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto index da27441f2acab..3082089202eef 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto @@ -45,32 +45,36 @@ message GrpcJsonTranscoder { bool preserve_proto_field_names = 4; } - // Supplies the filename of - // :ref:`the proto descriptor set ` for the gRPC - // services. - repeated string services = 2 [(validate.rules).repeated = {min_items: 1}]; + oneof descriptor_set { + option (validate.required) = true; - // Supplies the binary content of - // :ref:`the proto descriptor set ` for the gRPC - // services. - PrintOptions print_options = 3; + // Supplies the filename of + // :ref:`the proto descriptor set ` for the gRPC + // services. + string proto_descriptor = 1; + + // Supplies the binary content of + // :ref:`the proto descriptor set ` for the gRPC + // services. + bytes proto_descriptor_bin = 4; + } // A list of strings that // supplies the fully qualified service names (i.e. "package_name.service_name") that // the transcoder will translate. If the service name doesn't exist in ``proto_descriptor``, // Envoy will fail at startup. The ``proto_descriptor`` may contain more services than // the service names specified here, but they won't be translated. - bool match_incoming_request_route = 5; + repeated string services = 2 [(validate.rules).repeated = {min_items: 1}]; // Control options for response JSON. These options are passed directly to // `JsonPrintOptions `_. - repeated string ignored_query_parameters = 6; + PrintOptions print_options = 3; // Whether to keep the incoming request route after the outgoing headers have been transformed to // the match the upstream gRPC service. Note: This means that routes for gRPC services that are // not transcoded cannot be used in combination with *match_incoming_request_route*. - bool auto_mapping = 7; + bool match_incoming_request_route = 5; // A list of query parameters to be ignored for transcoding method mapping. // By default, the transcoder filter will not transcode a request if there are any @@ -97,7 +101,7 @@ message GrpcJsonTranscoder { // The request ``/shelves/100?foo=bar`` will not be mapped to ``GetShelf``` because variable // binding for ``foo`` is not defined. Adding ``foo`` to ``ignored_query_parameters`` will allow // the same request to be mapped to ``GetShelf``. - bool ignore_unknown_query_parameters = 8; + repeated string ignored_query_parameters = 6; // Whether to route methods without the ``google.api.http`` option. // @@ -119,45 +123,41 @@ message GrpcJsonTranscoder { // // The client could ``post`` a json body ``{"shelf": 1234}`` with the path of // ``/bookstore.Bookstore/GetShelfRequest`` to call ``GetShelfRequest``. - bool convert_grpc_status = 9; - - oneof descriptor_set { - option (validate.required) = true; + bool auto_mapping = 7; - // Whether to ignore query parameters that cannot be mapped to a corresponding - // protobuf field. Use this if you cannot control the query parameters and do - // not know them beforehand. Otherwise use ``ignored_query_parameters``. - // Defaults to false. - string proto_descriptor = 1; + // Whether to ignore query parameters that cannot be mapped to a corresponding + // protobuf field. Use this if you cannot control the query parameters and do + // not know them beforehand. Otherwise use ``ignored_query_parameters``. + // Defaults to false. + bool ignore_unknown_query_parameters = 8; - // Whether to convert gRPC status headers to JSON. - // When trailer indicates a gRPC error and there was no HTTP body, take ``google.rpc.Status`` - // from the ``grpc-status-details-bin`` header and use it as JSON body. - // If there was no such header, make ``google.rpc.Status`` out of the ``grpc-status`` and - // ``grpc-message`` headers. - // The error details types must be present in the ``proto_descriptor``. - // - // For example, if an upstream server replies with headers: - // - // .. code-block:: none - // - // grpc-status: 5 - // grpc-status-details-bin: - // CAUaMwoqdHlwZS5nb29nbGVhcGlzLmNvbS9nb29nbGUucnBjLlJlcXVlc3RJbmZvEgUKA3ItMQ - // - // The ``grpc-status-details-bin`` header contains a base64-encoded protobuf message - // ``google.rpc.Status``. It will be transcoded into: - // - // .. code-block:: none - // - // HTTP/1.1 404 Not Found - // content-type: application/json - // - // {"code":5,"details":[{"@type":"type.googleapis.com/google.rpc.RequestInfo","requestId":"r-1"}]} - // - // In order to transcode the message, the ``google.rpc.RequestInfo`` type from - // the ``google/rpc/error_details.proto`` should be included in the configured - // :ref:`proto descriptor set `. - bytes proto_descriptor_bin = 4; - } + // Whether to convert gRPC status headers to JSON. + // When trailer indicates a gRPC error and there was no HTTP body, take ``google.rpc.Status`` + // from the ``grpc-status-details-bin`` header and use it as JSON body. + // If there was no such header, make ``google.rpc.Status`` out of the ``grpc-status`` and + // ``grpc-message`` headers. + // The error details types must be present in the ``proto_descriptor``. + // + // For example, if an upstream server replies with headers: + // + // .. code-block:: none + // + // grpc-status: 5 + // grpc-status-details-bin: + // CAUaMwoqdHlwZS5nb29nbGVhcGlzLmNvbS9nb29nbGUucnBjLlJlcXVlc3RJbmZvEgUKA3ItMQ + // + // The ``grpc-status-details-bin`` header contains a base64-encoded protobuf message + // ``google.rpc.Status``. It will be transcoded into: + // + // .. code-block:: none + // + // HTTP/1.1 404 Not Found + // content-type: application/json + // + // {"code":5,"details":[{"@type":"type.googleapis.com/google.rpc.RequestInfo","requestId":"r-1"}]} + // + // In order to transcode the message, the ``google.rpc.RequestInfo`` type from + // the ``google/rpc/error_details.proto`` should be included in the configured + // :ref:`proto descriptor set `. + bool convert_grpc_status = 9; } diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/config.proto b/generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/config.proto index d5aca14ea5308..ff56066410cb0 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/config.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/config.proto @@ -28,12 +28,12 @@ message FilterConfig { // counts. bool emit_filter_state = 1; - // If set, specifies an allowlist of service/methods that will have individual stats - // emitted for them. Any call that does not match the allowlist will be counted - // in a stat with no method specifier: `cluster..grpc.*`. - bool enable_upstream_stats = 4; - oneof per_method_stat_specifier { + // If set, specifies an allowlist of service/methods that will have individual stats + // emitted for them. Any call that does not match the allowlist will be counted + // in a stat with no method specifier: `cluster..grpc.*`. + config.core.v3.GrpcMethodList individual_method_stats_allowlist = 2; + // If set to true, emit stats for all service/method names. // // If set to false, emit stats for all service/message types to the same stats without including @@ -52,16 +52,16 @@ message FilterConfig { // `stats_for_all_methods=false` in order to be safe by default. This behavior can be // controlled with runtime override // `envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default`. - config.core.v3.GrpcMethodList individual_method_stats_allowlist = 2; - - // If true, the filter will gather a histogram for the request time of the upstream. - // It works with :ref:`stats_for_all_methods - // ` - // and :ref:`individual_method_stats_allowlist - // ` the same way - // request_message_count and response_message_count works. google.protobuf.BoolValue stats_for_all_methods = 3; } + + // If true, the filter will gather a histogram for the request time of the upstream. + // It works with :ref:`stats_for_all_methods + // ` + // and :ref:`individual_method_stats_allowlist + // ` the same way + // request_message_count and response_message_count works. + bool enable_upstream_stats = 4; } // gRPC statistics filter state object in protobuf form. diff --git a/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/gzip.proto b/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/gzip.proto index 3206037723de0..e711827481a3a 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/gzip.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/gzip.proto @@ -19,7 +19,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Gzip :ref:`configuration overview `. // [#extension: envoy.filters.http.gzip] -// [#next-free-field: 11] +// [#next-free-field: 12] message Gzip { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.gzip.v2.Gzip"; @@ -72,6 +72,11 @@ message Gzip { // `remove_accept_encoding_header` are ignored. compressor.v3.Compressor compressor = 10; + // Value for Zlib's next output buffer. If not set, defaults to 4096. + // See https://www.zlib.net/manual.html for more details. Also see + // https://github.com/envoyproxy/envoy/issues/8448 for context on this filter's performance. + google.protobuf.UInt32Value chunk_size = 11 [(validate.rules).uint32 = {lte: 65536 gte: 4096}]; + google.protobuf.UInt32Value hidden_envoy_deprecated_content_length = 2 [deprecated = true]; repeated string hidden_envoy_deprecated_content_type = 6 [deprecated = true]; diff --git a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/BUILD index a8dda77ddfc31..8253ea6dff83b 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/BUILD @@ -7,6 +7,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/http/header_to_metadata/v2:pkg", + "//envoy/type/matcher/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto index 8e7c490f01b66..ace7c535069ac 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto @@ -2,6 +2,9 @@ syntax = "proto3"; package envoy.extensions.filters.http.header_to_metadata.v3; +import "envoy/type/matcher/v3/regex.proto"; + +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -44,7 +47,7 @@ message Config { BASE64 = 1; } - // [#next-free-field: 6] + // [#next-free-field: 7] message KeyValuePair { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.header_to_metadata.v2.Config.KeyValuePair"; @@ -57,15 +60,25 @@ message Config { // The value to pair with the given key. // - // When used for a `on_header_present` case, if value is non-empty it'll be used - // instead of the header value. If both are empty, no metadata is added. + // When used for a + // :ref:`on_header_present ` + // case, if value is non-empty it'll be used instead of the header value. If both are empty, no metadata is added. + // + // When used for a :ref:`on_header_missing ` + // case, a non-empty value must be provided otherwise no metadata is added. + string value = 3 [(udpa.annotations.field_migrate).oneof_promotion = "value_type"]; + + // If present, the header's value will be matched and substituted with this. If there is no match or substitution, the header value + // is used as-is. + // + // This is only used for :ref:`on_header_present `. // - // When used for a `on_header_missing` case, a non-empty value must be provided - // otherwise no metadata is added. - string value = 3; + // Note: if the `value` field is non-empty this field should be empty. + type.matcher.v3.RegexMatchAndSubstitute regex_value_rewrite = 6 + [(udpa.annotations.field_migrate).oneof_promotion = "value_type"]; // The value's type — defaults to string. - ValueType type = 4; + ValueType type = 4 [(validate.rules).enum = {defined_only: true}]; // How is the value encoded, default is NONE (not encoded). // The value will be decoded accordingly before storing to metadata. @@ -73,29 +86,41 @@ message Config { } // A Rule defines what metadata to apply when a header is present or missing. + // [#next-free-field: 6] message Rule { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.header_to_metadata.v2.Config.Rule"; - // The header that triggers this rule — required. - string header = 1 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // If the header is present, apply this metadata KeyValuePair. + // Specifies that a match will be performed on the value of a header or a cookie. + // + // The header to be extracted. + string header = 1 [ + (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}, + (udpa.annotations.field_migrate).oneof_promotion = "header_cookie_specifier" + ]; + + // The cookie to be extracted. + string cookie = 5 [ + (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}, + (udpa.annotations.field_migrate).oneof_promotion = "header_cookie_specifier" + ]; + + // If the header or cookie is present, apply this metadata KeyValuePair. // // If the value in the KeyValuePair is non-empty, it'll be used instead - // of the header value. - KeyValuePair on_header_present = 2; + // of the header or cookie value. + KeyValuePair on_header_present = 2 [(udpa.annotations.field_migrate).rename = "on_present"]; - // If the header is not present, apply this metadata KeyValuePair. + // If the header or cookie is not present, apply this metadata KeyValuePair. // // The value in the KeyValuePair must be set, since it'll be used in lieu - // of the missing header value. - KeyValuePair on_header_missing = 3; + // of the missing header or cookie value. + KeyValuePair on_header_missing = 3 [(udpa.annotations.field_migrate).rename = "on_missing"]; // Whether or not to remove the header after a rule is applied. // // This prevents headers from leaking. + // This field is not supported in case of a cookie. bool remove = 4; } diff --git a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/BUILD new file mode 100644 index 0000000000000..285e2346e0ff7 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/extensions/filters/http/header_to_metadata/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto new file mode 100644 index 0000000000000..0d7c814584dce --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto @@ -0,0 +1,130 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.header_to_metadata.v4alpha; + +import "envoy/type/matcher/v4alpha/regex.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.header_to_metadata.v4alpha"; +option java_outer_classname = "HeaderToMetadataProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Header-To-Metadata Filter] +// +// The configuration for transforming headers into metadata. This is useful +// for matching load balancer subsets, logging, etc. +// +// Header to Metadata :ref:`configuration overview `. +// [#extension: envoy.filters.http.header_to_metadata] + +message Config { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.header_to_metadata.v3.Config"; + + enum ValueType { + STRING = 0; + + NUMBER = 1; + + // The value is a serialized `protobuf.Value + // `_. + PROTOBUF_VALUE = 2; + } + + // ValueEncode defines the encoding algorithm. + enum ValueEncode { + // The value is not encoded. + NONE = 0; + + // The value is encoded in `Base64 `_. + // Note: this is mostly used for STRING and PROTOBUF_VALUE to escape the + // non-ASCII characters in the header. + BASE64 = 1; + } + + // [#next-free-field: 7] + message KeyValuePair { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.header_to_metadata.v3.Config.KeyValuePair"; + + // The namespace — if this is empty, the filter's namespace will be used. + string metadata_namespace = 1; + + // The key to use within the namespace. + string key = 2 [(validate.rules).string = {min_bytes: 1}]; + + oneof value_type { + // The value to pair with the given key. + // + // When used for a + // :ref:`on_header_present ` + // case, if value is non-empty it'll be used instead of the header value. If both are empty, no metadata is added. + // + // When used for a :ref:`on_header_missing ` + // case, a non-empty value must be provided otherwise no metadata is added. + string value = 3; + + // If present, the header's value will be matched and substituted with this. If there is no match or substitution, the header value + // is used as-is. + // + // This is only used for :ref:`on_header_present `. + // + // Note: if the `value` field is non-empty this field should be empty. + type.matcher.v4alpha.RegexMatchAndSubstitute regex_value_rewrite = 6; + } + + // The value's type — defaults to string. + ValueType type = 4 [(validate.rules).enum = {defined_only: true}]; + + // How is the value encoded, default is NONE (not encoded). + // The value will be decoded accordingly before storing to metadata. + ValueEncode encode = 5; + } + + // A Rule defines what metadata to apply when a header is present or missing. + // [#next-free-field: 6] + message Rule { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.header_to_metadata.v3.Config.Rule"; + + oneof header_cookie_specifier { + // Specifies that a match will be performed on the value of a header or a cookie. + // + // The header to be extracted. + string header = 1 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // The cookie to be extracted. + string cookie = 5 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; + } + + // If the header or cookie is present, apply this metadata KeyValuePair. + // + // If the value in the KeyValuePair is non-empty, it'll be used instead + // of the header or cookie value. + KeyValuePair on_present = 2; + + // If the header or cookie is not present, apply this metadata KeyValuePair. + // + // The value in the KeyValuePair must be set, since it'll be used in lieu + // of the missing header or cookie value. + KeyValuePair on_missing = 3; + + // Whether or not to remove the header after a rule is applied. + // + // This prevents headers from leaking. + // This field is not supported in case of a cookie. + bool remove = 4; + } + + // The list of rules to apply to requests. + repeated Rule request_rules = 1; + + // The list of rules to apply to responses. + repeated Rule response_rules = 2; +} diff --git a/generated_api_shadow/envoy/extensions/filters/http/health_check/v3/health_check.proto b/generated_api_shadow/envoy/extensions/filters/http/health_check/v3/health_check.proto index 1a5dbf1bb9006..f3a0c42c388c6 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/health_check/v3/health_check.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/health_check/v3/health_check.proto @@ -38,6 +38,11 @@ message HealthCheck { // If operating in non-pass-through mode, specifies a set of upstream cluster // names and the minimum percentage of servers in each of those clusters that // must be healthy or degraded in order for the filter to return a 200. + // + // .. note:: + // + // This value is interpreted as an integer by truncating, so 12.50% will be calculated + // as if it were 12%. map cluster_min_healthy_percentages = 4; // Specifies a set of health check request headers to match on. The health check filter will diff --git a/generated_api_shadow/envoy/extensions/filters/http/health_check/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/health_check/v4alpha/BUILD new file mode 100644 index 0000000000000..97b6ad2feb2d0 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/health_check/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/route/v4alpha:pkg", + "//envoy/extensions/filters/http/health_check/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto b/generated_api_shadow/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto new file mode 100644 index 0000000000000..3725d085dd7b0 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto @@ -0,0 +1,52 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.health_check.v4alpha; + +import "envoy/config/route/v4alpha/route_components.proto"; +import "envoy/type/v3/percent.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.health_check.v4alpha"; +option java_outer_classname = "HealthCheckProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Health check] +// Health check :ref:`configuration overview `. +// [#extension: envoy.filters.http.health_check] + +// [#next-free-field: 6] +message HealthCheck { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.health_check.v3.HealthCheck"; + + reserved 2; + + // Specifies whether the filter operates in pass through mode or not. + google.protobuf.BoolValue pass_through_mode = 1 [(validate.rules).message = {required: true}]; + + // If operating in pass through mode, the amount of time in milliseconds + // that the filter should cache the upstream response. + google.protobuf.Duration cache_time = 3; + + // If operating in non-pass-through mode, specifies a set of upstream cluster + // names and the minimum percentage of servers in each of those clusters that + // must be healthy or degraded in order for the filter to return a 200. + // + // .. note:: + // + // This value is interpreted as an integer by truncating, so 12.50% will be calculated + // as if it were 12%. + map cluster_min_healthy_percentages = 4; + + // Specifies a set of health check request headers to match on. The health check filter will + // check a request’s headers against all the specified headers. To specify the health check + // endpoint, set the ``:path`` header to match on. + repeated config.route.v4alpha.HeaderMatcher headers = 5; +} diff --git a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto index 592610819bdca..39fe6187f64fa 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto @@ -78,44 +78,50 @@ message JwtProvider { // repeated string audiences = 2; - // JWKS can be fetched from remote server via HTTP/HTTPS. This field specifies the remote HTTP - // URI and how the fetched JWKS should be cached. - // - // Example: - // - // .. code-block:: yaml - // - // remote_jwks: - // http_uri: - // uri: https://www.googleapis.com/oauth2/v1/certs - // cluster: jwt.www.googleapis.com|443 - // cache_duration: - // seconds: 300 - // - bool forward = 5; + // `JSON Web Key Set (JWKS) `_ is needed to + // validate signature of a JWT. This field specifies where to fetch JWKS. + oneof jwks_source_specifier { + option (validate.required) = true; - // JWKS is in local data source. It could be either in a local file or embedded in the - // inline_string. - // - // Example: local file - // - // .. code-block:: yaml - // - // local_jwks: - // filename: /etc/envoy/jwks/jwks1.txt - // - // Example: inline_string - // - // .. code-block:: yaml - // - // local_jwks: - // inline_string: ACADADADADA - // - repeated JwtHeader from_headers = 6; + // JWKS can be fetched from remote server via HTTP/HTTPS. This field specifies the remote HTTP + // URI and how the fetched JWKS should be cached. + // + // Example: + // + // .. code-block:: yaml + // + // remote_jwks: + // http_uri: + // uri: https://www.googleapis.com/oauth2/v1/certs + // cluster: jwt.www.googleapis.com|443 + // cache_duration: + // seconds: 300 + // + RemoteJwks remote_jwks = 3; + + // JWKS is in local data source. It could be either in a local file or embedded in the + // inline_string. + // + // Example: local file + // + // .. code-block:: yaml + // + // local_jwks: + // filename: /etc/envoy/jwks/jwks1.txt + // + // Example: inline_string + // + // .. code-block:: yaml + // + // local_jwks: + // inline_string: ACADADADADA + // + config.core.v3.DataSource local_jwks = 4; + } // If false, the JWT is removed in the request after a success verification. If true, the JWT is // not removed in the request. Default value is false. - repeated string from_params = 7; + bool forward = 5; // Two fields below define where to extract the JWT from an HTTP request. // @@ -142,8 +148,7 @@ message JwtProvider { // // ``x-goog-iap-jwt-assertion: ``. // - string forward_payload_header = 8 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; + repeated JwtHeader from_headers = 6; // JWT is sent in a query parameter. `jwt_params` represents the query parameter names. // @@ -158,39 +163,34 @@ message JwtProvider { // // /path?jwt_token= // - string payload_in_metadata = 9; - - // `JSON Web Key Set (JWKS) `_ is needed to - // validate signature of a JWT. This field specifies where to fetch JWKS. - oneof jwks_source_specifier { - option (validate.required) = true; + repeated string from_params = 7; - // This field specifies the header name to forward a successfully verified JWT payload to the - // backend. The forwarded data is:: - // - // base64url_encoded(jwt_payload_in_JSON) - // - // If it is not specified, the payload will not be forwarded. - RemoteJwks remote_jwks = 3; + // This field specifies the header name to forward a successfully verified JWT payload to the + // backend. The forwarded data is:: + // + // base64url_encoded(jwt_payload_in_JSON) + // + // If it is not specified, the payload will not be forwarded. + string forward_payload_header = 8 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; - // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata - // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn** - // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields* - // and the value is the *protobuf::Struct* converted from JWT JSON payload. - // - // For example, if payload_in_metadata is *my_payload*: - // - // .. code-block:: yaml - // - // envoy.filters.http.jwt_authn: - // my_payload: - // iss: https://example.com - // sub: test@example.com - // aud: https://example.com - // exp: 1501281058 - // - config.core.v3.DataSource local_jwks = 4; - } + // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata + // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn** + // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields* + // and the value is the *protobuf::Struct* converted from JWT JSON payload. + // + // For example, if payload_in_metadata is *my_payload*: + // + // .. code-block:: yaml + // + // envoy.filters.http.jwt_authn: + // my_payload: + // iss: https://example.com + // sub: test@example.com + // aud: https://example.com + // exp: 1501281058 + // + string payload_in_metadata = 9; } // This message specifies how to fetch JWKS from remote and how to cache it. diff --git a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/BUILD new file mode 100644 index 0000000000000..a9f9b8bc44c32 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/route/v4alpha:pkg", + "//envoy/extensions/filters/http/jwt_authn/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto new file mode 100644 index 0000000000000..302cf7253dde3 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto @@ -0,0 +1,531 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.jwt_authn.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/http_uri.proto"; +import "envoy/config/route/v4alpha/route_components.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/empty.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.jwt_authn.v4alpha"; +option java_outer_classname = "ConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: JWT Authentication] +// JWT Authentication :ref:`configuration overview `. +// [#extension: envoy.filters.http.jwt_authn] + +// Please see following for JWT authentication flow: +// +// * `JSON Web Token (JWT) `_ +// * `The OAuth 2.0 Authorization Framework `_ +// * `OpenID Connect `_ +// +// A JwtProvider message specifies how a JSON Web Token (JWT) can be verified. It specifies: +// +// * issuer: the principal that issues the JWT. It has to match the one from the token. +// * allowed audiences: the ones in the token have to be listed here. +// * how to fetch public key JWKS to verify the token signature. +// * how to extract JWT token in the request. +// * how to pass successfully verified token payload. +// +// Example: +// +// .. code-block:: yaml +// +// issuer: https://example.com +// audiences: +// - bookstore_android.apps.googleusercontent.com +// - bookstore_web.apps.googleusercontent.com +// remote_jwks: +// http_uri: +// uri: https://example.com/.well-known/jwks.json +// cluster: example_jwks_cluster +// cache_duration: +// seconds: 300 +// +// [#next-free-field: 10] +message JwtProvider { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.JwtProvider"; + + // Specify the `principal `_ that issued + // the JWT, usually a URL or an email address. + // + // Example: https://securetoken.google.com + // Example: 1234567-compute@developer.gserviceaccount.com + // + string issuer = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The list of JWT `audiences `_ are + // allowed to access. A JWT containing any of these audiences will be accepted. If not specified, + // will not check audiences in the token. + // + // Example: + // + // .. code-block:: yaml + // + // audiences: + // - bookstore_android.apps.googleusercontent.com + // - bookstore_web.apps.googleusercontent.com + // + repeated string audiences = 2; + + // `JSON Web Key Set (JWKS) `_ is needed to + // validate signature of a JWT. This field specifies where to fetch JWKS. + oneof jwks_source_specifier { + option (validate.required) = true; + + // JWKS can be fetched from remote server via HTTP/HTTPS. This field specifies the remote HTTP + // URI and how the fetched JWKS should be cached. + // + // Example: + // + // .. code-block:: yaml + // + // remote_jwks: + // http_uri: + // uri: https://www.googleapis.com/oauth2/v1/certs + // cluster: jwt.www.googleapis.com|443 + // cache_duration: + // seconds: 300 + // + RemoteJwks remote_jwks = 3; + + // JWKS is in local data source. It could be either in a local file or embedded in the + // inline_string. + // + // Example: local file + // + // .. code-block:: yaml + // + // local_jwks: + // filename: /etc/envoy/jwks/jwks1.txt + // + // Example: inline_string + // + // .. code-block:: yaml + // + // local_jwks: + // inline_string: ACADADADADA + // + config.core.v4alpha.DataSource local_jwks = 4; + } + + // If false, the JWT is removed in the request after a success verification. If true, the JWT is + // not removed in the request. Default value is false. + bool forward = 5; + + // Two fields below define where to extract the JWT from an HTTP request. + // + // If no explicit location is specified, the following default locations are tried in order: + // + // 1. The Authorization header using the `Bearer schema + // `_. Example:: + // + // Authorization: Bearer . + // + // 2. `access_token `_ query parameter. + // + // Multiple JWTs can be verified for a request. Each JWT has to be extracted from the locations + // its provider specified or from the default locations. + // + // Specify the HTTP headers to extract JWT token. For examples, following config: + // + // .. code-block:: yaml + // + // from_headers: + // - name: x-goog-iap-jwt-assertion + // + // can be used to extract token from header:: + // + // ``x-goog-iap-jwt-assertion: ``. + // + repeated JwtHeader from_headers = 6; + + // JWT is sent in a query parameter. `jwt_params` represents the query parameter names. + // + // For example, if config is: + // + // .. code-block:: yaml + // + // from_params: + // - jwt_token + // + // The JWT format in query parameter is:: + // + // /path?jwt_token= + // + repeated string from_params = 7; + + // This field specifies the header name to forward a successfully verified JWT payload to the + // backend. The forwarded data is:: + // + // base64url_encoded(jwt_payload_in_JSON) + // + // If it is not specified, the payload will not be forwarded. + string forward_payload_header = 8 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata + // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn** + // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields* + // and the value is the *protobuf::Struct* converted from JWT JSON payload. + // + // For example, if payload_in_metadata is *my_payload*: + // + // .. code-block:: yaml + // + // envoy.filters.http.jwt_authn: + // my_payload: + // iss: https://example.com + // sub: test@example.com + // aud: https://example.com + // exp: 1501281058 + // + string payload_in_metadata = 9; +} + +// This message specifies how to fetch JWKS from remote and how to cache it. +message RemoteJwks { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.RemoteJwks"; + + // The HTTP URI to fetch the JWKS. For example: + // + // .. code-block:: yaml + // + // http_uri: + // uri: https://www.googleapis.com/oauth2/v1/certs + // cluster: jwt.www.googleapis.com|443 + // + config.core.v4alpha.HttpUri http_uri = 1; + + // Duration after which the cached JWKS should be expired. If not specified, default cache + // duration is 5 minutes. + google.protobuf.Duration cache_duration = 2; +} + +// This message specifies a header location to extract JWT token. +message JwtHeader { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.JwtHeader"; + + // The HTTP header name. + string name = 1 + [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // The value prefix. The value format is "value_prefix" + // For example, for "Authorization: Bearer ", value_prefix="Bearer " with a space at the + // end. + string value_prefix = 2 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; +} + +// Specify a required provider with audiences. +message ProviderWithAudiences { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.ProviderWithAudiences"; + + // Specify a required provider name. + string provider_name = 1; + + // This field overrides the one specified in the JwtProvider. + repeated string audiences = 2; +} + +// This message specifies a Jwt requirement. An empty message means JWT verification is not +// required. Here are some config examples: +// +// .. code-block:: yaml +// +// # Example 1: not required with an empty message +// +// # Example 2: require A +// provider_name: provider-A +// +// # Example 3: require A or B +// requires_any: +// requirements: +// - provider_name: provider-A +// - provider_name: provider-B +// +// # Example 4: require A and B +// requires_all: +// requirements: +// - provider_name: provider-A +// - provider_name: provider-B +// +// # Example 5: require A and (B or C) +// requires_all: +// requirements: +// - provider_name: provider-A +// - requires_any: +// requirements: +// - provider_name: provider-B +// - provider_name: provider-C +// +// # Example 6: require A or (B and C) +// requires_any: +// requirements: +// - provider_name: provider-A +// - requires_all: +// requirements: +// - provider_name: provider-B +// - provider_name: provider-C +// +// # Example 7: A is optional (if token from A is provided, it must be valid, but also allows +// missing token.) +// requires_any: +// requirements: +// - provider_name: provider-A +// - allow_missing: {} +// +// # Example 8: A is optional and B is required. +// requires_all: +// requirements: +// - requires_any: +// requirements: +// - provider_name: provider-A +// - allow_missing: {} +// - provider_name: provider-B +// +// [#next-free-field: 7] +message JwtRequirement { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.JwtRequirement"; + + oneof requires_type { + // Specify a required provider name. + string provider_name = 1; + + // Specify a required provider with audiences. + ProviderWithAudiences provider_and_audiences = 2; + + // Specify list of JwtRequirement. Their results are OR-ed. + // If any one of them passes, the result is passed. + JwtRequirementOrList requires_any = 3; + + // Specify list of JwtRequirement. Their results are AND-ed. + // All of them must pass, if one of them fails or missing, it fails. + JwtRequirementAndList requires_all = 4; + + // The requirement is always satisfied even if JWT is missing or the JWT + // verification fails. A typical usage is: this filter is used to only verify + // JWTs and pass the verified JWT payloads to another filter, the other filter + // will make decision. In this mode, all JWT tokens will be verified. + google.protobuf.Empty allow_missing_or_failed = 5; + + // The requirement is satisfied if JWT is missing, but failed if JWT is + // presented but invalid. Similar to allow_missing_or_failed, this is used + // to only verify JWTs and pass the verified payload to another filter. The + // different is this mode will reject requests with invalid tokens. + google.protobuf.Empty allow_missing = 6; + } +} + +// This message specifies a list of RequiredProvider. +// Their results are OR-ed; if any one of them passes, the result is passed +message JwtRequirementOrList { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.JwtRequirementOrList"; + + // Specify a list of JwtRequirement. + repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; +} + +// This message specifies a list of RequiredProvider. +// Their results are AND-ed; all of them must pass, if one of them fails or missing, it fails. +message JwtRequirementAndList { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.JwtRequirementAndList"; + + // Specify a list of JwtRequirement. + repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; +} + +// This message specifies a Jwt requirement for a specific Route condition. +// Example 1: +// +// .. code-block:: yaml +// +// - match: +// prefix: /healthz +// +// In above example, "requires" field is empty for /healthz prefix match, +// it means that requests matching the path prefix don't require JWT authentication. +// +// Example 2: +// +// .. code-block:: yaml +// +// - match: +// prefix: / +// requires: { provider_name: provider-A } +// +// In above example, all requests matched the path prefix require jwt authentication +// from "provider-A". +message RequirementRule { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.RequirementRule"; + + // The route matching parameter. Only when the match is satisfied, the "requires" field will + // apply. + // + // For example: following match will match all requests. + // + // .. code-block:: yaml + // + // match: + // prefix: / + // + config.route.v4alpha.RouteMatch match = 1 [(validate.rules).message = {required: true}]; + + // Specify a Jwt Requirement. Please detail comment in message JwtRequirement. + JwtRequirement requires = 2; +} + +// This message specifies Jwt requirements based on stream_info.filterState. +// This FilterState should use `Router::StringAccessor` object to set a string value. +// Other HTTP filters can use it to specify Jwt requirements dynamically. +// +// Example: +// +// .. code-block:: yaml +// +// name: jwt_selector +// requires: +// issuer_1: +// provider_name: issuer1 +// issuer_2: +// provider_name: issuer2 +// +// If a filter set "jwt_selector" with "issuer_1" to FilterState for a request, +// jwt_authn filter will use JwtRequirement{"provider_name": "issuer1"} to verify. +message FilterStateRule { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.FilterStateRule"; + + // The filter state name to retrieve the `Router::StringAccessor` object. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // A map of string keys to requirements. The string key is the string value + // in the FilterState with the name specified in the *name* field above. + map requires = 3; +} + +// This is the Envoy HTTP filter config for JWT authentication. +// +// For example: +// +// .. code-block:: yaml +// +// providers: +// provider1: +// issuer: issuer1 +// audiences: +// - audience1 +// - audience2 +// remote_jwks: +// http_uri: +// uri: https://example.com/.well-known/jwks.json +// cluster: example_jwks_cluster +// provider2: +// issuer: issuer2 +// local_jwks: +// inline_string: jwks_string +// +// rules: +// # Not jwt verification is required for /health path +// - match: +// prefix: /health +// +// # Jwt verification for provider1 is required for path prefixed with "prefix" +// - match: +// prefix: /prefix +// requires: +// provider_name: provider1 +// +// # Jwt verification for either provider1 or provider2 is required for all other requests. +// - match: +// prefix: / +// requires: +// requires_any: +// requirements: +// - provider_name: provider1 +// - provider_name: provider2 +// +message JwtAuthentication { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.JwtAuthentication"; + + // Map of provider names to JwtProviders. + // + // .. code-block:: yaml + // + // providers: + // provider1: + // issuer: issuer1 + // audiences: + // - audience1 + // - audience2 + // remote_jwks: + // http_uri: + // uri: https://example.com/.well-known/jwks.json + // cluster: example_jwks_cluster + // provider2: + // issuer: provider2 + // local_jwks: + // inline_string: jwks_string + // + map providers = 1; + + // Specifies requirements based on the route matches. The first matched requirement will be + // applied. If there are overlapped match conditions, please put the most specific match first. + // + // Examples + // + // .. code-block:: yaml + // + // rules: + // - match: + // prefix: /healthz + // - match: + // prefix: /baz + // requires: + // provider_name: provider1 + // - match: + // prefix: /foo + // requires: + // requires_any: + // requirements: + // - provider_name: provider1 + // - provider_name: provider2 + // - match: + // prefix: /bar + // requires: + // requires_all: + // requirements: + // - provider_name: provider1 + // - provider_name: provider2 + // + repeated RequirementRule rules = 2; + + // This message specifies Jwt requirements based on stream_info.filterState. + // Other HTTP filters can use it to specify Jwt requirements dynamically. + // The *rules* field above is checked first, if it could not find any matches, + // check this one. + FilterStateRule filter_state_rules = 3; + + // When set to true, bypass the `CORS preflight request + // `_ regardless of JWT + // requirements specified in the rules. + bool bypass_cors_preflight = 4; +} diff --git a/generated_api_shadow/envoy/extensions/filters/http/lua/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/lua/v3/BUILD index 69390e69786a1..8878a585f46d2 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/lua/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/filters/http/lua/v3/BUILD @@ -6,6 +6,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ + "//envoy/config/core/v3:pkg", "//envoy/config/filter/http/lua/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/generated_api_shadow/envoy/extensions/filters/http/lua/v3/lua.proto b/generated_api_shadow/envoy/extensions/filters/http/lua/v3/lua.proto index da6b0c09a0f61..622726744de6c 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/lua/v3/lua.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/lua/v3/lua.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.extensions.filters.http.lua.v3; +import "envoy/config/core/v3/base.proto"; + import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -24,4 +26,37 @@ message Lua { // be properly escaped. YAML configuration may be easier to read since YAML supports multi-line // strings so complex scripts can be easily expressed inline in the configuration. string inline_code = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Map of named Lua source codes that can be referenced in :ref:`LuaPerRoute + // `. The Lua source codes can be + // loaded from inline string or local files. + // + // Example: + // + // .. code-block:: yaml + // + // source_codes: + // hello.lua: + // inline_string: | + // function envoy_on_response(response_handle) + // -- Do something. + // end + // world.lua: + // filename: /etc/lua/world.lua + // + map source_codes = 2; +} + +message LuaPerRoute { + oneof override { + option (validate.required) = true; + + // Disable the Lua filter for this particular vhost or route. If disabled is specified in + // multiple per-filter-configs, the most specific one will be used. + bool disabled = 1 [(validate.rules).bool = {const: true}]; + + // A name of a Lua source code stored in + // :ref:`Lua.source_codes `. + string name = 2 [(validate.rules).string = {min_len: 1}]; + } } diff --git a/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto index 057b7c3d44032..781fddc1939c0 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto @@ -19,11 +19,20 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Rate limit :ref:`configuration overview `. // [#extension: envoy.filters.http.ratelimit] -// [#next-free-field: 8] +// [#next-free-field: 9] message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.rate_limit.v2.RateLimit"; + // Defines the version of the standard to use for X-RateLimit headers. + enum XRateLimitHeadersRFCVersion { + // X-RateLimit headers disabled. + OFF = 0; + + // Use `draft RFC Version 03 `_. + DRAFT_VERSION_03 = 1; + } + // The rate limit domain to use when calling the rate limit service. string domain = 1 [(validate.rules).string = {min_bytes: 1}]; @@ -64,4 +73,30 @@ message RateLimit { // success. config.ratelimit.v3.RateLimitServiceConfig rate_limit_service = 7 [(validate.rules).message = {required: true}]; + + // Defines the standard version to use for X-RateLimit headers emitted by the filter: + // + // * ``X-RateLimit-Limit`` - indicates the request-quota associated to the + // client in the current time-window followed by the description of the + // quota policy. The values are returned by the rate limiting service in + // :ref:`current_limit` + // field. Example: `10, 10;w=1;name="per-ip", 1000;w=3600`. + // * ``X-RateLimit-Remaining`` - indicates the remaining requests in the + // current time-window. The values are returned by the rate limiting service + // in :ref:`limit_remaining` + // field. + // * ``X-RateLimit-Reset`` - indicates the number of seconds until reset of + // the current time-window. The values are returned by the rate limiting service + // in :ref:`duration_until_reset` + // field. + // + // In case rate limiting policy specifies more then one time window, the values + // above represent the window that is closest to reaching its limit. + // + // For more information about the headers specification see selected version of + // the `draft RFC `_. + // + // Disabled by default. + XRateLimitHeadersRFCVersion enable_x_ratelimit_headers = 8 + [(validate.rules).enum = {defined_only: true}]; } diff --git a/generated_api_shadow/envoy/extensions/filters/http/router/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/router/v4alpha/BUILD new file mode 100644 index 0000000000000..df329be542301 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/router/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/accesslog/v4alpha:pkg", + "//envoy/extensions/filters/http/router/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/http/router/v4alpha/router.proto b/generated_api_shadow/envoy/extensions/filters/http/router/v4alpha/router.proto new file mode 100644 index 0000000000000..d0baaab84a397 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/router/v4alpha/router.proto @@ -0,0 +1,81 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.router.v4alpha; + +import "envoy/config/accesslog/v4alpha/accesslog.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.router.v4alpha"; +option java_outer_classname = "RouterProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Router] +// Router :ref:`configuration overview `. +// [#extension: envoy.filters.http.router] + +// [#next-free-field: 7] +message Router { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.router.v3.Router"; + + // Whether the router generates dynamic cluster statistics. Defaults to + // true. Can be disabled in high performance scenarios. + google.protobuf.BoolValue dynamic_stats = 1; + + // Whether to start a child span for egress routed calls. This can be + // useful in scenarios where other filters (auth, ratelimit, etc.) make + // outbound calls and have child spans rooted at the same ingress + // parent. Defaults to false. + bool start_child_span = 2; + + // Configuration for HTTP upstream logs emitted by the router. Upstream logs + // are configured in the same way as access logs, but each log entry represents + // an upstream request. Presuming retries are configured, multiple upstream + // requests may be made for each downstream (inbound) request. + repeated config.accesslog.v4alpha.AccessLog upstream_log = 3; + + // Do not add any additional *x-envoy-* headers to requests or responses. This + // only affects the :ref:`router filter generated *x-envoy-* headers + // `, other Envoy filters and the HTTP + // connection manager may continue to set *x-envoy-* headers. + bool suppress_envoy_headers = 4; + + // Specifies a list of HTTP headers to strictly validate. Envoy will reject a + // request and respond with HTTP status 400 if the request contains an invalid + // value for any of the headers listed in this field. Strict header checking + // is only supported for the following headers: + // + // Value must be a ','-delimited list (i.e. no spaces) of supported retry + // policy values: + // + // * :ref:`config_http_filters_router_x-envoy-retry-grpc-on` + // * :ref:`config_http_filters_router_x-envoy-retry-on` + // + // Value must be an integer: + // + // * :ref:`config_http_filters_router_x-envoy-max-retries` + // * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` + // * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` + repeated string strict_check_headers = 5 [(validate.rules).repeated = { + items { + string { + in: "x-envoy-upstream-rq-timeout-ms" + in: "x-envoy-upstream-rq-per-try-timeout-ms" + in: "x-envoy-max-retries" + in: "x-envoy-retry-grpc-on" + in: "x-envoy-retry-on" + } + } + }]; + + // If not set, ingress Envoy will ignore + // :ref:`config_http_filters_router_x-envoy-expected-rq-timeout-ms` header, populated by egress + // Envoy, when deriving timeout for upstream cluster. + bool respect_expected_rq_timeout = 6; +} diff --git a/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/BUILD new file mode 100644 index 0000000000000..8bad369e35113 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/extensions/wasm/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/wasm.proto b/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/wasm.proto new file mode 100644 index 0000000000000..a812992a5b84e --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/wasm.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.wasm.v3; + +import "envoy/extensions/wasm/v3/wasm.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.wasm.v3"; +option java_outer_classname = "WasmProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [[#not-implemented-hide:] +message Wasm { + // General Plugin configuration. + envoy.extensions.wasm.v3.PluginConfig config = 1; +} diff --git a/generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto b/generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto index 63ad72945e280..8fd0c63d0c825 100644 --- a/generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto +++ b/generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto @@ -4,6 +4,7 @@ package envoy.extensions.filters.listener.proxy_protocol.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.listener.proxy_protocol.v3"; option java_outer_classname = "ProxyProtocolProto"; @@ -17,4 +18,26 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; message ProxyProtocol { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.listener.proxy_protocol.v2.ProxyProtocol"; + + message KeyValuePair { + // The namespace — if this is empty, the filter's namespace will be used. + string metadata_namespace = 1; + + // The key to use within the namespace. + string key = 2 [(validate.rules).string = {min_bytes: 1}]; + } + + // A Rule defines what metadata to apply when a header is present or missing. + message Rule { + // The type that triggers the rule - required + // TLV type is defined as uint8_t in proxy protocol. See `the spec + // `_ for details. + uint32 tlv_type = 1 [(validate.rules).uint32 = {lt: 256}]; + + // If the TLV type is present, apply this metadata KeyValuePair. + KeyValuePair on_tlv_present = 2; + } + + // The list of rules to apply to requests. + repeated Rule rules = 1; } diff --git a/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto b/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto index e2da157574f89..b3af267a77ad1 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto @@ -6,6 +6,7 @@ import "envoy/config/core/v3/address.proto"; import "google/protobuf/duration.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -28,7 +29,8 @@ message ClientSSLAuth { // the authentication service. The filter will connect to the service every 60s to fetch the list // of principals. The service must support the expected :ref:`REST API // `. - string auth_api_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string auth_api_cluster = 1 + [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; // The prefix to use when emitting :ref:`statistics // `. @@ -42,6 +44,7 @@ message ClientSSLAuth { // An optional list of IP address and subnet masks that should be white // listed for access by the filter. If no list is provided, there is no - // IP white list. - repeated config.core.v3.CidrRange ip_white_list = 4; + // IP allowlist. + repeated config.core.v3.CidrRange ip_white_list = 4 + [(udpa.annotations.field_migrate).rename = "ip_allowlist"]; } diff --git a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/BUILD new file mode 100644 index 0000000000000..663eb0d52d25c --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/route/v4alpha:pkg", + "//envoy/extensions/filters/network/dubbo_proxy/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto new file mode 100644 index 0000000000000..4894c7693fd7a --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto @@ -0,0 +1,70 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.dubbo_proxy.v4alpha; + +import "envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto"; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v4alpha"; +option java_outer_classname = "DubboProxyProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Dubbo Proxy] +// Dubbo Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.dubbo_proxy] + +// Dubbo Protocol types supported by Envoy. +enum ProtocolType { + // the default protocol. + Dubbo = 0; +} + +// Dubbo Serialization types supported by Envoy. +enum SerializationType { + // the default serialization protocol. + Hessian2 = 0; +} + +// [#next-free-field: 6] +message DubboProxy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.DubboProxy"; + + // The human readable prefix to use when emitting statistics. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Configure the protocol used. + ProtocolType protocol_type = 2 [(validate.rules).enum = {defined_only: true}]; + + // Configure the serialization protocol used. + SerializationType serialization_type = 3 [(validate.rules).enum = {defined_only: true}]; + + // The route table for the connection manager is static and is specified in this property. + repeated RouteConfiguration route_config = 4; + + // A list of individual Dubbo filters that make up the filter chain for requests made to the + // Dubbo proxy. Order matters as the filters are processed sequentially. For backwards + // compatibility, if no dubbo_filters are specified, a default Dubbo router filter + // (`envoy.filters.dubbo.router`) is used. + repeated DubboFilter dubbo_filters = 5; +} + +// DubboFilter configures a Dubbo filter. +message DubboFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.DubboFilter"; + + // The name of the filter to instantiate. The name must match a supported + // filter. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Filter specific configuration which depends on the filter being + // instantiated. See the supported filters for further documentation. + google.protobuf.Any config = 2; +} diff --git a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto new file mode 100644 index 0000000000000..c2ff03b33fb14 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto @@ -0,0 +1,121 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.dubbo_proxy.v4alpha; + +import "envoy/config/route/v4alpha/route_components.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; +import "envoy/type/v3/range.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v4alpha"; +option java_outer_classname = "RouteProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Dubbo Proxy Route Configuration] +// Dubbo Proxy :ref:`configuration overview `. + +// [#next-free-field: 6] +message RouteConfiguration { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.RouteConfiguration"; + + // The name of the route configuration. Reserved for future use in asynchronous route discovery. + string name = 1; + + // The interface name of the service. + string interface = 2; + + // Which group does the interface belong to. + string group = 3; + + // The version number of the interface. + string version = 4; + + // The list of routes that will be matched, in order, against incoming requests. The first route + // that matches will be used. + repeated Route routes = 5; +} + +message Route { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.Route"; + + // Route matching parameters. + RouteMatch match = 1 [(validate.rules).message = {required: true}]; + + // Route request to some upstream cluster. + RouteAction route = 2 [(validate.rules).message = {required: true}]; +} + +message RouteMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.RouteMatch"; + + // Method level routing matching. + MethodMatch method = 1; + + // Specifies a set of headers that the route should match on. The router will check the request’s + // headers against all the specified headers in the route config. A match will happen if all the + // headers in the route are present in the request with the same values (or based on presence if + // the value field is not in the config). + repeated config.route.v4alpha.HeaderMatcher headers = 2; +} + +message RouteAction { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.RouteAction"; + + oneof cluster_specifier { + option (validate.required) = true; + + // Indicates the upstream cluster to which the request should be routed. + string cluster = 1; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. + // Currently ClusterWeight only supports the name and weight fields. + config.route.v4alpha.WeightedCluster weighted_clusters = 2; + } +} + +message MethodMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.MethodMatch"; + + // The parameter matching type. + message ParameterMatchSpecifier { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.MethodMatch.ParameterMatchSpecifier"; + + oneof parameter_match_specifier { + // If specified, header match will be performed based on the value of the header. + string exact_match = 3; + + // If specified, header match will be performed based on range. + // The rule will match if the request header value is within this range. + // The entire request header value must represent an integer in base 10 notation: consisting + // of an optional plus or minus sign followed by a sequence of digits. The rule will not match + // if the header value does not represent an integer. Match will fail for empty values, + // floating point numbers or if only a subsequence of the header value is an integer. + // + // Examples: + // + // * For range [-10,0), route will match for header value -1, but not for 0, + // "somestring", 10.9, "-1somestring" + type.v3.Int64Range range_match = 4; + } + } + + // The name of the method. + type.matcher.v4alpha.StringMatcher name = 1; + + // Method parameter definition. + // The key is the parameter index, starting from 0. + // The value is the parameter matching type. + map params_match = 2; +} diff --git a/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto b/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto index c3a63ac0a4f65..50161f1cb92bc 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package envoy.extensions.filters.network.ext_authz.v3; +import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "udpa/annotations/status.proto"; @@ -22,6 +23,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // gRPC Authorization API defined by // :ref:`CheckRequest `. // A failed check will cause this filter to close the TCP connection. +// [#next-free-field: 6] message ExtAuthz { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.ext_authz.v2.ExtAuthz"; @@ -44,4 +46,9 @@ message ExtAuthz { // When this field is true, Envoy will include the peer X.509 certificate, if available, in the // :ref:`certificate`. bool include_peer_certificate = 4; + + // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and + // version of Check{Request,Response} used on the wire. + config.core.v3.ApiVersion transport_api_version = 5 + [(validate.rules).enum = {defined_only: true}]; } diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/BUILD index bd07dbcbb020d..283fd11e5f09c 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/BUILD @@ -15,5 +15,6 @@ api_proto_package( "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index b04d0861c9539..0439633d6e6e4 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -3,8 +3,11 @@ syntax = "proto3"; package envoy.extensions.filters.network.http_connection_manager.v3; import "envoy/config/accesslog/v3/accesslog.proto"; +import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/config_source.proto"; +import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/protocol.proto"; +import "envoy/config/core/v3/substitution_format_string.proto"; import "envoy/config/route/v3/route.proto"; import "envoy/config/route/v3/scoped_route.proto"; import "envoy/config/trace/v3/http_tracer.proto"; @@ -16,7 +19,11 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/resource_locator.proto"; + import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -30,7 +37,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // HTTP connection manager :ref:`configuration overview `. // [#extension: envoy.filters.network.http_connection_manager] -// [#next-free-field: 37] +// [#next-free-field: 41] message HttpConnectionManager { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager"; @@ -235,7 +242,7 @@ message HttpConnectionManager { // Determines if upgrades are enabled or disabled by default. Defaults to true. // This can be overridden on a per-route basis with :ref:`cluster // ` as documented in the - // :ref:`upgrade documentation `. + // :ref:`upgrade documentation `. google.protobuf.BoolValue enabled = 3; } @@ -249,59 +256,65 @@ message HttpConnectionManager { // more information. string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}]; - // The connection manager’s route table will be dynamically loaded via the RDS API. - repeated HttpFilter http_filters = 5; + oneof route_specifier { + option (validate.required) = true; - // The route table for the connection manager is static and is specified in this property. - google.protobuf.BoolValue add_user_agent = 6; + // The connection manager’s route table will be dynamically loaded via the RDS API. + Rds rds = 3; - // A route table will be dynamically assigned to each request based on request attributes - // (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are - // specified in this message. - Tracing tracing = 7; + // The route table for the connection manager is static and is specified in this property. + config.route.v3.RouteConfiguration route_config = 4; + + // A route table will be dynamically assigned to each request based on request attributes + // (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are + // specified in this message. + ScopedRoutes scoped_routes = 31; + } // A list of individual HTTP filters that make up the filter chain for - // requests made to the connection manager. Order matters as the filters are - // processed sequentially as request events happen. - config.core.v3.HttpProtocolOptions common_http_protocol_options = 35; + // requests made to the connection manager. :ref:`Order matters ` + // as the filters are processed sequentially as request events happen. + repeated HttpFilter http_filters = 5; // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked // documentation for more information. Defaults to false. - config.core.v3.Http1ProtocolOptions http_protocol_options = 8; + google.protobuf.BoolValue add_user_agent = 6; // Presence of the object defines whether the connection manager // emits :ref:`tracing ` data to the :ref:`configured tracing provider // `. - config.core.v3.Http2ProtocolOptions http2_protocol_options = 9; + Tracing tracing = 7; // Additional settings for HTTP requests handled by the connection manager. These will be // applicable to both HTTP1 and HTTP2 requests. - string server_name = 10; + config.core.v3.HttpProtocolOptions common_http_protocol_options = 35 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // Additional HTTP/1 settings that are passed to the HTTP/1 codec. - ServerHeaderTransformation server_header_transformation = 34 - [(validate.rules).enum = {defined_only: true}]; + config.core.v3.Http1ProtocolOptions http_protocol_options = 8; // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. - google.protobuf.UInt32Value max_request_headers_kb = 29 - [(validate.rules).uint32 = {lte: 96 gt: 0}]; + config.core.v3.Http2ProtocolOptions http2_protocol_options = 9 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // An optional override that the connection manager will write to the server // header in responses. If not set, the default is *envoy*. - google.protobuf.Duration stream_idle_timeout = 24; + string server_name = 10; // Defines the action to be applied to the Server header on the response path. // By default, Envoy will overwrite the header with the value specified in // server_name. - google.protobuf.Duration request_timeout = 28; + ServerHeaderTransformation server_header_transformation = 34 + [(validate.rules).enum = {defined_only: true}]; // The maximum request headers size for incoming connections. // If unconfigured, the default max request headers allowed is 60 KiB. // Requests that exceed this limit will receive a 431 response. // The max configurable limit is 96 KiB, based on current implementation // constraints. - google.protobuf.Duration drain_timeout = 12; + google.protobuf.UInt32Value max_request_headers_kb = 29 + [(validate.rules).uint32 = {lte: 96 gt: 0}]; // The stream idle timeout for connections managed by the connection manager. // If not specified, this defaults to 5 minutes. The default value was selected @@ -320,6 +333,16 @@ message HttpConnectionManager { // is terminated with a 408 Request Timeout error code if no upstream response // header has been received, otherwise a stream reset occurs. // + // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough + // window to write any remaining stream data once the entirety of stream data (local end stream is + // true) has been buffered pending available window. In other words, this timeout defends against + // a peer that does not release enough window to completely write the stream, even though all + // data has been proxied within available flow control windows. If the timeout is hit in this + // case, the :ref:`tx_flush_timeout ` counter will be + // incremented. Note that :ref:`max_stream_duration + // ` does not apply to + // this corner case. + // // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due // to the granularity of events presented to the connection manager. For example, while receiving // very large request headers, it may be the case that there is traffic regularly arriving on the @@ -328,13 +351,15 @@ message HttpConnectionManager { // // A value of 0 will completely disable the connection manager stream idle // timeout, although per-route idle timeout overrides will continue to apply. - google.protobuf.Duration delayed_close_timeout = 26; + google.protobuf.Duration stream_idle_timeout = 24 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The amount of time that Envoy will wait for the entire request to be received. // The timer is activated when the request is initiated, and is disarmed when the last byte of the // request is sent upstream (i.e. all decoding filters have processed the request), OR when the // response is initiated. If not specified or set to 0, this timeout is disabled. - repeated config.accesslog.v3.AccessLog access_log = 13; + google.protobuf.Duration request_timeout = 28 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The time that Envoy will wait between sending an HTTP/2 “shutdown // notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame. @@ -345,7 +370,7 @@ message HttpConnectionManager { // both when a connection hits the idle timeout or during general server // draining. The default grace period is 5000 milliseconds (5 seconds) if this // option is not specified. - google.protobuf.BoolValue use_remote_address = 14; + google.protobuf.Duration drain_timeout = 12; // The delayed close timeout is for downstream connections managed by the HTTP connection manager. // It is defined as a grace period after connection close processing has been locally initiated @@ -377,11 +402,11 @@ message HttpConnectionManager { // A value of 0 will completely disable delayed close processing. When disabled, the downstream // connection's socket will be closed immediately after the write flush is completed or will // never close if the write flush does not complete. - uint32 xff_num_trusted_hops = 19; + google.protobuf.Duration delayed_close_timeout = 26; // Configuration for :ref:`HTTP access logs ` // emitted by the connection manager. - InternalAddressConfig internal_address_config = 25; + repeated config.accesslog.v3.AccessLog access_log = 13; // If set to true, the connection manager will use the real remote address // of the client connection when determining internal versus external origin and manipulating @@ -390,20 +415,21 @@ message HttpConnectionManager { // :ref:`config_http_conn_man_headers_x-forwarded-for`, // :ref:`config_http_conn_man_headers_x-envoy-internal`, and // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. - bool skip_xff_append = 21; + google.protobuf.BoolValue use_remote_address = 14 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The number of additional ingress proxy hops from the right side of the // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when // determining the origin client's IP address. The default is zero if this option // is not specified. See the documentation for // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. - string via = 22; + uint32 xff_num_trusted_hops = 19; // Configures what network addresses are considered internal for stats and header sanitation // purposes. If unspecified, only RFC1918 IP addresses will be considered internal. // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more // information about internal/external addresses. - google.protobuf.BoolValue generate_request_id = 15; + InternalAddressConfig internal_address_config = 25; // If set, Envoy will not append the remote address to the // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may be used in @@ -413,28 +439,33 @@ message HttpConnectionManager { // will also suppress XFF addition, it has consequences for logging and other // Envoy uses of the remote address, so *skip_xff_append* should be used // when only an elision of XFF addition is intended. - bool preserve_external_request_id = 32; + bool skip_xff_append = 21; // Via header value to append to request and response headers. If this is // empty, no via header will be appended. - ForwardClientCertDetails forward_client_cert_details = 16 - [(validate.rules).enum = {defined_only: true}]; + string via = 22; // Whether the connection manager will generate the :ref:`x-request-id // ` header if it does not exist. This defaults to // true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature // is not desired it can be disabled. - SetCurrentClientCertDetails set_current_client_cert_details = 17; + google.protobuf.BoolValue generate_request_id = 15; // Whether the connection manager will keep the :ref:`x-request-id // ` header if passed for a request that is edge // (Edge request is the request from external clients to front Envoy) and not reset it, which // is the current Envoy behaviour. This defaults to false. - bool proxy_100_continue = 18; + bool preserve_external_request_id = 32; + + // If set, Envoy will always set :ref:`x-request-id ` header in response. + // If this is false or not set, the request ID is returned in responses only if tracing is forced using + // :ref:`x-envoy-force-trace ` header. + bool always_set_request_id_in_response = 37; // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP // header. - bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20; + ForwardClientCertDetails forward_client_cert_details = 16 + [(validate.rules).enum = {defined_only: true}]; // This field is valid only when :ref:`forward_client_cert_details // ` @@ -443,13 +474,13 @@ message HttpConnectionManager { // :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, *Hash* is always set, and // *By* is always set when the client certificate presents the URI type Subject Alternative Name // value. - repeated UpgradeConfig upgrade_configs = 23; + SetCurrentClientCertDetails set_current_client_cert_details = 17; // If proxy_100_continue is true, Envoy will proxy incoming "Expect: // 100-continue" headers upstream, and forward "100 Continue" responses // downstream. If this is false or not set, Envoy will instead strip the // "Expect: 100-continue" header, and send a "100 Continue" response itself. - google.protobuf.BoolValue normalize_path = 30; + bool proxy_100_continue = 18; // If // :ref:`use_remote_address @@ -464,9 +495,9 @@ message HttpConnectionManager { // ` for runtime // control. // [#not-implemented-hide:] - bool merge_slashes = 33; + bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20; - RequestIDExtension request_id_extension = 36; + repeated UpgradeConfig upgrade_configs = 23; // Should paths be normalized according to RFC 3986 before any processing of // requests by HTTP filters or routing? This affects the upstream *:path* header @@ -475,37 +506,131 @@ message HttpConnectionManager { // true in the future. When not specified, this value may be overridden by the // runtime variable // :ref:`http_connection_manager.normalize_path`. - // See `Normalization and Comparison ` + // See `Normalization and Comparison `_ // for details of normalization. // Note that Envoy does not perform - // `case normalization ` + // `case normalization `_ + google.protobuf.BoolValue normalize_path = 30; + + // Determines if adjacent slashes in the path are merged into one before any processing of + // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without + // setting this option, incoming requests with path `//dir///file` will not match against route + // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of + // `HTTP spec `_ and is provided for convenience. + bool merge_slashes = 33; + + // The configuration of the request ID extension. This includes operations such as + // generation, validation, and associated tracing operations. + // + // If not set, Envoy uses the default UUID-based behavior: + // + // 1. Request ID is propagated using *x-request-id* header. + // + // 2. Request ID is a universally unique identifier (UUID). + // + // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. + RequestIDExtension request_id_extension = 36; + + // The configuration to customize local reply returned by Envoy. It can customize status code, + // body text and response content type. If not specified, status code and text body are hard + // coded in Envoy, the response content type is plain text. + LocalReplyConfig local_reply_config = 38; + + // Determines if the port part should be removed from host/authority header before any processing + // of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's` + // local port and request method is not CONNECT. This affects the upstream host header as well. + // Without setting this option, incoming requests with host `example:443` will not match against + // route with :ref:`domains` match set to `example`. Defaults to `false`. Note that port removal is not part + // of `HTTP spec `_ and is provided for convenience. + bool strip_matching_host_port = 39; + + // Governs Envoy's behavior when receiving invalid HTTP from downstream. + // If this option is false (default), Envoy will err on the conservative side handling HTTP + // errors, terminating both HTTP/1.1 and HTTP/2 connections when receiving an invalid request. + // If this option is set to true, Envoy will be more permissive, only resetting the invalid + // stream in the case of HTTP/2 and leaving the connection open where possible (if the entire + // request is read for HTTP/1.1) + // In general this should be true for deployments receiving trusted traffic (L2 Envoys, + // company-internal mesh) and false when receiving untrusted traffic (edge deployments). + // + // If different behaviors for invalid_http_message for HTTP/1 and HTTP/2 are + // desired, one *must* use the new HTTP/2 option + // :ref:`override_stream_error_on_invalid_http_message + // ` + // *not* the deprecated but similarly named :ref:`stream_error_on_invalid_http_messaging + // ` + google.protobuf.BoolValue stream_error_on_invalid_http_message = 40; + google.protobuf.Duration hidden_envoy_deprecated_idle_timeout = 11 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; +} - oneof route_specifier { - option (validate.required) = true; +// The configuration to customize local reply returned by Envoy. +message LocalReplyConfig { + // Configuration of list of mappers which allows to filter and change local response. + // The mappers will be checked by the specified order until one is matched. + repeated ResponseMapper mappers = 1; - // Determines if adjacent slashes in the path are merged into one before any processing of - // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without - // setting this option, incoming requests with path `//dir///file` will not match against route - // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of - // `HTTP spec ` and is provided for convenience. - Rds rds = 3; + // The configuration to form response body from the :ref:`command operators ` + // and to specify response content type as one of: plain/text or application/json. + // + // Example one: plain/text body_format. + // + // .. code-block:: + // + // text_format: %LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% + // + // The following response body in `plain/text` format will be generated for a request with + // local reply body of "upstream connection error", response_code=503 and path=/foo. + // + // .. code-block:: + // + // upstream connect error:503:path=/foo + // + // Example two: application/json body_format. + // + // .. code-block:: + // + // json_format: + // status: %RESPONSE_CODE% + // message: %LOCAL_REPLY_BODY% + // path: $REQ(:path)% + // + // The following response body in "application/json" format would be generated for a request with + // local reply body of "upstream connection error", response_code=503 and path=/foo. + // + // .. code-block:: json + // + // { + // "status": 503, + // "message": "upstream connection error", + // "path": "/foo" + // } + // + config.core.v3.SubstitutionFormatString body_format = 2; +} - // The configuration of the request ID extension. This includes operations such as - // generation, validation, and associated tracing operations. - // - // If not set, Envoy uses the default UUID-based behavior: - // - // 1. Request ID is propagated using *x-request-id* header. - // - // 2. Request ID is a universally unique identifier (UUID). - // - // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. - config.route.v3.RouteConfiguration route_config = 4; +// The configuration to filter and change local response. +// [#next-free-field: 6] +message ResponseMapper { + // Filter to determine if this mapper should apply. + config.accesslog.v3.AccessLogFilter filter = 1 [(validate.rules).message = {required: true}]; - ScopedRoutes scoped_routes = 31; - } + // The new response status code if specified. + google.protobuf.UInt32Value status_code = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; + + // The new local reply body text if specified. It will be used in the `%LOCAL_REPLY_BODY%` + // command operator in the `body_format`. + config.core.v3.DataSource body = 3; + + // A per mapper `body_format` to override the :ref:`body_format `. + // It will be used when this mapper is matched. + config.core.v3.SubstitutionFormatString body_format_override = 4; + + // HTTP headers to add to a local reply. This allows the response mapper to append, to add + // or to override headers of any local reply before it is sent to a downstream client. + repeated config.core.v3.HeaderValueOption headers_to_add = 5 + [(validate.rules).repeated = {max_items: 1000}]; } message Rds { @@ -519,7 +644,13 @@ message Rds { // API. This allows an Envoy configuration with multiple HTTP listeners (and // associated HTTP connection manager filters) to use different route // configurations. - string route_config_name = 2 [(validate.rules).string = {min_bytes: 1}]; + string route_config_name = 2 + [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + + // Resource locator for RDS. This is mutually exclusive to *route_config_name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator rds_resource_locator = 3 + [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; } // This message is used to work around the limitations with 'oneof' and repeated fields. @@ -667,14 +798,16 @@ message ScopedRds { [(validate.rules).message = {required: true}]; } +// [#next-free-field: 6] message HttpFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.HttpFilter"; reserved 3; - // The name of the filter to instantiate. The name must match a - // :ref:`supported filter `. + // The name of the filter configuration. The name is used as a fallback to + // select an extension if the type of the configuration proto is not + // sufficient. It also serves as a resource name in ExtensionConfigDS. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being instantiated. See the supported @@ -682,6 +815,11 @@ message HttpFilter { oneof config_type { google.protobuf.Any typed_config = 4; + // Configuration source specifier for an extension configuration discovery service. + // In case of a failure and without the default configuration, the HTTP listener responds with 500. + // Extension configs delivered through this mechanism are not expected to require warming (see https://github.com/envoyproxy/envoy/issues/12061). + config.core.v3.ExtensionConfigSource config_discovery = 5; + google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; } } diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD index 792ccf7ab6772..837b7b898f265 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD @@ -7,7 +7,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", - "//envoy/config/accesslog/v3:pkg", + "//envoy/config/accesslog/v4alpha:pkg", "//envoy/config/core/v4alpha:pkg", "//envoy/config/route/v4alpha:pkg", "//envoy/config/trace/v4alpha:pkg", @@ -15,5 +15,6 @@ api_proto_package( "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index 975b71cc892f3..042a39863f810 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -2,9 +2,12 @@ syntax = "proto3"; package envoy.extensions.filters.network.http_connection_manager.v4alpha; -import "envoy/config/accesslog/v3/accesslog.proto"; +import "envoy/config/accesslog/v4alpha/accesslog.proto"; +import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/config/core/v4alpha/extension.proto"; import "envoy/config/core/v4alpha/protocol.proto"; +import "envoy/config/core/v4alpha/substitution_format_string.proto"; import "envoy/config/route/v4alpha/route.proto"; import "envoy/config/route/v4alpha/scoped_route.proto"; import "envoy/config/trace/v4alpha/http_tracer.proto"; @@ -16,7 +19,10 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/resource_locator.proto"; + import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -30,7 +36,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // HTTP connection manager :ref:`configuration overview `. // [#extension: envoy.filters.network.http_connection_manager] -// [#next-free-field: 37] +// [#next-free-field: 41] message HttpConnectionManager { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager"; @@ -231,7 +237,7 @@ message HttpConnectionManager { // Determines if upgrades are enabled or disabled by default. Defaults to true. // This can be overridden on a per-route basis with :ref:`cluster // ` as documented in the - // :ref:`upgrade documentation `. + // :ref:`upgrade documentation `. google.protobuf.BoolValue enabled = 3; } @@ -263,8 +269,8 @@ message HttpConnectionManager { } // A list of individual HTTP filters that make up the filter chain for - // requests made to the connection manager. Order matters as the filters are - // processed sequentially as request events happen. + // requests made to the connection manager. :ref:`Order matters ` + // as the filters are processed sequentially as request events happen. repeated HttpFilter http_filters = 5; // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` @@ -279,13 +285,15 @@ message HttpConnectionManager { // Additional settings for HTTP requests handled by the connection manager. These will be // applicable to both HTTP1 and HTTP2 requests. - config.core.v4alpha.HttpProtocolOptions common_http_protocol_options = 35; + config.core.v4alpha.HttpProtocolOptions common_http_protocol_options = 35 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // Additional HTTP/1 settings that are passed to the HTTP/1 codec. config.core.v4alpha.Http1ProtocolOptions http_protocol_options = 8; // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. - config.core.v4alpha.Http2ProtocolOptions http2_protocol_options = 9; + config.core.v4alpha.Http2ProtocolOptions http2_protocol_options = 9 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // An optional override that the connection manager will write to the server // header in responses. If not set, the default is *envoy*. @@ -322,6 +330,16 @@ message HttpConnectionManager { // is terminated with a 408 Request Timeout error code if no upstream response // header has been received, otherwise a stream reset occurs. // + // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough + // window to write any remaining stream data once the entirety of stream data (local end stream is + // true) has been buffered pending available window. In other words, this timeout defends against + // a peer that does not release enough window to completely write the stream, even though all + // data has been proxied within available flow control windows. If the timeout is hit in this + // case, the :ref:`tx_flush_timeout ` counter will be + // incremented. Note that :ref:`max_stream_duration + // ` does not apply to + // this corner case. + // // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due // to the granularity of events presented to the connection manager. For example, while receiving // very large request headers, it may be the case that there is traffic regularly arriving on the @@ -330,13 +348,15 @@ message HttpConnectionManager { // // A value of 0 will completely disable the connection manager stream idle // timeout, although per-route idle timeout overrides will continue to apply. - google.protobuf.Duration stream_idle_timeout = 24; + google.protobuf.Duration stream_idle_timeout = 24 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The amount of time that Envoy will wait for the entire request to be received. // The timer is activated when the request is initiated, and is disarmed when the last byte of the // request is sent upstream (i.e. all decoding filters have processed the request), OR when the // response is initiated. If not specified or set to 0, this timeout is disabled. - google.protobuf.Duration request_timeout = 28; + google.protobuf.Duration request_timeout = 28 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The time that Envoy will wait between sending an HTTP/2 “shutdown // notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame. @@ -383,7 +403,7 @@ message HttpConnectionManager { // Configuration for :ref:`HTTP access logs ` // emitted by the connection manager. - repeated config.accesslog.v3.AccessLog access_log = 13; + repeated config.accesslog.v4alpha.AccessLog access_log = 13; // If set to true, the connection manager will use the real remote address // of the client connection when determining internal versus external origin and manipulating @@ -392,7 +412,8 @@ message HttpConnectionManager { // :ref:`config_http_conn_man_headers_x-forwarded-for`, // :ref:`config_http_conn_man_headers_x-envoy-internal`, and // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. - google.protobuf.BoolValue use_remote_address = 14; + google.protobuf.BoolValue use_remote_address = 14 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The number of additional ingress proxy hops from the right side of the // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when @@ -433,6 +454,11 @@ message HttpConnectionManager { // is the current Envoy behaviour. This defaults to false. bool preserve_external_request_id = 32; + // If set, Envoy will always set :ref:`x-request-id ` header in response. + // If this is false or not set, the request ID is returned in responses only if tracing is forced using + // :ref:`x-envoy-force-trace ` header. + bool always_set_request_id_in_response = 37; + // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP // header. ForwardClientCertDetails forward_client_cert_details = 16 @@ -477,17 +503,17 @@ message HttpConnectionManager { // true in the future. When not specified, this value may be overridden by the // runtime variable // :ref:`http_connection_manager.normalize_path`. - // See `Normalization and Comparison ` + // See `Normalization and Comparison `_ // for details of normalization. // Note that Envoy does not perform - // `case normalization ` + // `case normalization `_ google.protobuf.BoolValue normalize_path = 30; // Determines if adjacent slashes in the path are merged into one before any processing of // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without // setting this option, incoming requests with path `//dir///file` will not match against route // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of - // `HTTP spec ` and is provided for convenience. + // `HTTP spec `_ and is provided for convenience. bool merge_slashes = 33; // The configuration of the request ID extension. This includes operations such as @@ -501,6 +527,110 @@ message HttpConnectionManager { // // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. RequestIDExtension request_id_extension = 36; + + // The configuration to customize local reply returned by Envoy. It can customize status code, + // body text and response content type. If not specified, status code and text body are hard + // coded in Envoy, the response content type is plain text. + LocalReplyConfig local_reply_config = 38; + + // Determines if the port part should be removed from host/authority header before any processing + // of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's` + // local port and request method is not CONNECT. This affects the upstream host header as well. + // Without setting this option, incoming requests with host `example:443` will not match against + // route with :ref:`domains` match set to `example`. Defaults to `false`. Note that port removal is not part + // of `HTTP spec `_ and is provided for convenience. + bool strip_matching_host_port = 39; + + // Governs Envoy's behavior when receiving invalid HTTP from downstream. + // If this option is false (default), Envoy will err on the conservative side handling HTTP + // errors, terminating both HTTP/1.1 and HTTP/2 connections when receiving an invalid request. + // If this option is set to true, Envoy will be more permissive, only resetting the invalid + // stream in the case of HTTP/2 and leaving the connection open where possible (if the entire + // request is read for HTTP/1.1) + // In general this should be true for deployments receiving trusted traffic (L2 Envoys, + // company-internal mesh) and false when receiving untrusted traffic (edge deployments). + // + // If different behaviors for invalid_http_message for HTTP/1 and HTTP/2 are + // desired, one *must* use the new HTTP/2 option + // :ref:`override_stream_error_on_invalid_http_message + // ` + // *not* the deprecated but similarly named :ref:`stream_error_on_invalid_http_messaging + // ` + google.protobuf.BoolValue stream_error_on_invalid_http_message = 40; +} + +// The configuration to customize local reply returned by Envoy. +message LocalReplyConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.LocalReplyConfig"; + + // Configuration of list of mappers which allows to filter and change local response. + // The mappers will be checked by the specified order until one is matched. + repeated ResponseMapper mappers = 1; + + // The configuration to form response body from the :ref:`command operators ` + // and to specify response content type as one of: plain/text or application/json. + // + // Example one: plain/text body_format. + // + // .. code-block:: + // + // text_format: %LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% + // + // The following response body in `plain/text` format will be generated for a request with + // local reply body of "upstream connection error", response_code=503 and path=/foo. + // + // .. code-block:: + // + // upstream connect error:503:path=/foo + // + // Example two: application/json body_format. + // + // .. code-block:: + // + // json_format: + // status: %RESPONSE_CODE% + // message: %LOCAL_REPLY_BODY% + // path: $REQ(:path)% + // + // The following response body in "application/json" format would be generated for a request with + // local reply body of "upstream connection error", response_code=503 and path=/foo. + // + // .. code-block:: json + // + // { + // "status": 503, + // "message": "upstream connection error", + // "path": "/foo" + // } + // + config.core.v4alpha.SubstitutionFormatString body_format = 2; +} + +// The configuration to filter and change local response. +// [#next-free-field: 6] +message ResponseMapper { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper"; + + // Filter to determine if this mapper should apply. + config.accesslog.v4alpha.AccessLogFilter filter = 1 [(validate.rules).message = {required: true}]; + + // The new response status code if specified. + google.protobuf.UInt32Value status_code = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; + + // The new local reply body text if specified. It will be used in the `%LOCAL_REPLY_BODY%` + // command operator in the `body_format`. + config.core.v4alpha.DataSource body = 3; + + // A per mapper `body_format` to override the :ref:`body_format `. + // It will be used when this mapper is matched. + config.core.v4alpha.SubstitutionFormatString body_format_override = 4; + + // HTTP headers to add to a local reply. This allows the response mapper to append, to add + // or to override headers of any local reply before it is sent to a downstream client. + repeated config.core.v4alpha.HeaderValueOption headers_to_add = 5 + [(validate.rules).repeated = {max_items: 1000}]; } message Rds { @@ -510,11 +640,17 @@ message Rds { // Configuration source specifier for RDS. config.core.v4alpha.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; - // The name of the route configuration. This name will be passed to the RDS - // API. This allows an Envoy configuration with multiple HTTP listeners (and - // associated HTTP connection manager filters) to use different route - // configurations. - string route_config_name = 2 [(validate.rules).string = {min_bytes: 1}]; + oneof name_specifier { + // The name of the route configuration. This name will be passed to the RDS + // API. This allows an Envoy configuration with multiple HTTP listeners (and + // associated HTTP connection manager filters) to use different route + // configurations. + string route_config_name = 2; + + // Resource locator for RDS. This is mutually exclusive to *route_config_name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator rds_resource_locator = 3; + } } // This message is used to work around the limitations with 'oneof' and repeated fields. @@ -663,6 +799,7 @@ message ScopedRds { [(validate.rules).message = {required: true}]; } +// [#next-free-field: 6] message HttpFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter"; @@ -671,14 +808,20 @@ message HttpFilter { reserved "config"; - // The name of the filter to instantiate. The name must match a - // :ref:`supported filter `. + // The name of the filter configuration. The name is used as a fallback to + // select an extension if the type of the configuration proto is not + // sufficient. It also serves as a resource name in ExtensionConfigDS. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being instantiated. See the supported // filters for further documentation. oneof config_type { google.protobuf.Any typed_config = 4; + + // Configuration source specifier for an extension configuration discovery service. + // In case of a failure and without the default configuration, the HTTP listener responds with 500. + // Extension configs delivered through this mechanism are not expected to require warming (see https://github.com/envoyproxy/envoy/issues/12061). + config.core.v4alpha.ExtensionConfigSource config_discovery = 5; } } diff --git a/generated_api_shadow/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto index 61f3ec45c8838..aa8e0f5941bf8 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.extensions.filters.network.postgres_proxy.v3alpha; +import "google/protobuf/wrappers.proto"; + import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -22,4 +24,9 @@ message PostgresProxy { // The human readable prefix to use when emitting :ref:`statistics // `. string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; + + // Controls whether SQL statements received in Frontend Query messages + // are parsed. Parsing is required to produce Postgres proxy filter + // metadata. Defaults to true. + google.protobuf.BoolValue enable_sql_parsing = 2; } diff --git a/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto index 60ab28cfcf1fb..8f996c30f9ae4 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto @@ -8,6 +8,7 @@ import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -22,7 +23,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Redis Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.redis_proxy] -// [#next-free-field: 7] +// [#next-free-field: 8] message RedisProxy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.redis_proxy.v2.RedisProxy"; @@ -38,22 +39,22 @@ message RedisProxy { // because replication is asynchronous and requires some delay. You need to ensure that your // application can tolerate stale data. enum ReadPolicy { - // Default mode. Read from the current master node. - MASTER = 0; + // Default mode. Read from the current primary node. + MASTER = 0 [(udpa.annotations.enum_value_migrate).rename = "PRIMARY"]; - // Read from the master, but if it is unavailable, read from replica nodes. - PREFER_MASTER = 1; + // Read from the primary, but if it is unavailable, read from replica nodes. + PREFER_MASTER = 1 [(udpa.annotations.enum_value_migrate).rename = "PREFER_PRIMARY"]; // Read from replica nodes. If multiple replica nodes are present within a shard, a random // node is selected. Healthy nodes have precedent over unhealthy nodes. REPLICA = 2; // Read from the replica nodes (similar to REPLICA), but if all replicas are unavailable (not - // present or unhealthy), read from the master. + // present or unhealthy), read from the primary. PREFER_REPLICA = 3; - // Read from any node of the cluster. A random node is selected among the master and replicas, - // healthy nodes have precedent over unhealthy nodes. + // Read from any node of the cluster. A random node is selected among the primary and + // replicas, healthy nodes have precedent over unhealthy nodes. ANY = 4; } @@ -113,10 +114,10 @@ message RedisProxy { google.protobuf.UInt32Value max_upstream_unknown_connections = 6; // Enable per-command statistics per upstream cluster, in addition to the filter level aggregate - // count. + // count. These commands are measured in microseconds. bool enable_command_stats = 8; - // Read policy. The default is to read from the master. + // Read policy. The default is to read from the primary. ReadPolicy read_policy = 7 [(validate.rules).enum = {defined_only: true}]; } @@ -188,7 +189,7 @@ message RedisProxy { ConnPoolSettings settings = 3 [(validate.rules).message = {required: true}]; // Indicates that latency stat should be computed in microseconds. By default it is computed in - // milliseconds. + // milliseconds. This does not apply to upstream command stats currently. bool latency_in_micros = 4; // List of **unique** prefixes used to separate keys from different workloads to different @@ -230,6 +231,18 @@ message RedisProxy { // AUTH, but no password is set" error will be returned. config.core.v3.DataSource downstream_auth_password = 6 [(udpa.annotations.sensitive) = true]; + // If a username is provided an ACL style AUTH command will be required with a username and password. + // Authenticate Redis client connections locally by forcing downstream clients to issue a `Redis + // AUTH command `_ with this username and the *downstream_auth_password* + // before enabling any other command. If an AUTH command's username and password matches this username + // and the *downstream_auth_password* , an "OK" response will be returned to the client. If the AUTH + // command username or password does not match this username or the *downstream_auth_password*, then an + // "WRONGPASS invalid username-password pair" error will be returned. If any other command is received before AUTH when this + // password is set, then a "NOAUTH Authentication required." error response will be sent to the + // client. If an AUTH command is received when the password is not set, then an "ERR Client sent + // AUTH, but no ACL is set" error will be returned. + config.core.v3.DataSource downstream_auth_username = 7 [(udpa.annotations.sensitive) = true]; + string hidden_envoy_deprecated_cluster = 2 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } @@ -244,4 +257,8 @@ message RedisProtocolOptions { // Upstream server password as defined by the `requirepass` directive // `_ in the server's configuration file. config.core.v3.DataSource auth_password = 1 [(udpa.annotations.sensitive) = true]; + + // Upstream server username as defined by the `user` directive + // `_ in the server's configuration file. + config.core.v3.DataSource auth_username = 2 [(udpa.annotations.sensitive) = true]; } diff --git a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD new file mode 100644 index 0000000000000..e6bc5699efc45 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v3:pkg", + "//envoy/config/route/v3:pkg", + "//envoy/type/matcher/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto new file mode 100644 index 0000000000000..ee77ab9095924 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.rocketmq_proxy.v3; + +import "envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v3"; +option java_outer_classname = "RocketmqProxyProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: RocketMQ Proxy] +// RocketMQ Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.rocketmq_proxy] + +message RocketmqProxy { + // The human readable prefix to use when emitting statistics. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The route table for the connection manager is specified in this property. + RouteConfiguration route_config = 2; + + // The largest duration transient object expected to live, more than 10s is recommended. + google.protobuf.Duration transient_object_life_span = 3; + + // If develop_mode is enabled, this proxy plugin may work without dedicated traffic intercepting + // facility without considering backward compatibility of exiting RocketMQ client SDK. + bool develop_mode = 4; +} diff --git a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto new file mode 100644 index 0000000000000..5fe5d33ffacf4 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto @@ -0,0 +1,55 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.rocketmq_proxy.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/config/route/v3/route_components.proto"; +import "envoy/type/matcher/v3/string.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v3"; +option java_outer_classname = "RouteProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Rocketmq Proxy Route Configuration] +// Rocketmq Proxy :ref:`configuration overview `. + +message RouteConfiguration { + // The name of the route configuration. + string name = 1; + + // The list of routes that will be matched, in order, against incoming requests. The first route + // that matches will be used. + repeated Route routes = 2; +} + +message Route { + // Route matching parameters. + RouteMatch match = 1 [(validate.rules).message = {required: true}]; + + // Route request to some upstream cluster. + RouteAction route = 2 [(validate.rules).message = {required: true}]; +} + +message RouteMatch { + // The name of the topic. + type.matcher.v3.StringMatcher topic = 1 [(validate.rules).message = {required: true}]; + + // Specifies a set of headers that the route should match on. The router will check the request’s + // headers against all the specified headers in the route config. A match will happen if all the + // headers in the route are present in the request with the same values (or based on presence if + // the value field is not in the config). + repeated config.route.v3.HeaderMatcher headers = 2; +} + +message RouteAction { + // Indicates the upstream cluster to which the request should be routed. + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Optional endpoint metadata match criteria used by the subset load balancer. + config.core.v3.Metadata metadata_match = 2; +} diff --git a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/BUILD new file mode 100644 index 0000000000000..d8d88f7f3bb4f --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/route/v4alpha:pkg", + "//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto new file mode 100644 index 0000000000000..a765734e66db5 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto @@ -0,0 +1,39 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.rocketmq_proxy.v4alpha; + +import "envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v4alpha"; +option java_outer_classname = "RocketmqProxyProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: RocketMQ Proxy] +// RocketMQ Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.rocketmq_proxy] + +message RocketmqProxy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.rocketmq_proxy.v3.RocketmqProxy"; + + // The human readable prefix to use when emitting statistics. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The route table for the connection manager is specified in this property. + RouteConfiguration route_config = 2; + + // The largest duration transient object expected to live, more than 10s is recommended. + google.protobuf.Duration transient_object_life_span = 3; + + // If develop_mode is enabled, this proxy plugin may work without dedicated traffic intercepting + // facility without considering backward compatibility of exiting RocketMQ client SDK. + bool develop_mode = 4; +} diff --git a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto new file mode 100644 index 0000000000000..995e8bcb05e36 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto @@ -0,0 +1,67 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.rocketmq_proxy.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/route/v4alpha/route_components.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v4alpha"; +option java_outer_classname = "RouteProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Rocketmq Proxy Route Configuration] +// Rocketmq Proxy :ref:`configuration overview `. + +message RouteConfiguration { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.rocketmq_proxy.v3.RouteConfiguration"; + + // The name of the route configuration. + string name = 1; + + // The list of routes that will be matched, in order, against incoming requests. The first route + // that matches will be used. + repeated Route routes = 2; +} + +message Route { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.rocketmq_proxy.v3.Route"; + + // Route matching parameters. + RouteMatch match = 1 [(validate.rules).message = {required: true}]; + + // Route request to some upstream cluster. + RouteAction route = 2 [(validate.rules).message = {required: true}]; +} + +message RouteMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.rocketmq_proxy.v3.RouteMatch"; + + // The name of the topic. + type.matcher.v4alpha.StringMatcher topic = 1 [(validate.rules).message = {required: true}]; + + // Specifies a set of headers that the route should match on. The router will check the request’s + // headers against all the specified headers in the route config. A match will happen if all the + // headers in the route are present in the request with the same values (or based on presence if + // the value field is not in the config). + repeated config.route.v4alpha.HeaderMatcher headers = 2; +} + +message RouteAction { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.rocketmq_proxy.v3.RouteAction"; + + // Indicates the upstream cluster to which the request should be routed. + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Optional endpoint metadata match criteria used by the subset load balancer. + config.core.v4alpha.Metadata metadata_match = 2; +} diff --git a/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto index 6024a6d552bcd..27d187ed2c338 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto @@ -99,18 +99,22 @@ message TcpProxy { // `. string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; - // The upstream cluster to connect to. - config.core.v3.Metadata metadata_match = 9; + oneof cluster_specifier { + option (validate.required) = true; - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. - google.protobuf.Duration idle_timeout = 8; + // The upstream cluster to connect to. + string cluster = 2; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. + WeightedCluster weighted_clusters = 10; + } // Optional endpoint metadata match criteria. Only endpoints in the upstream // cluster with metadata matching that set in metadata_match will be // considered. The filter name should be specified as *envoy.lb*. - google.protobuf.Duration downstream_idle_timeout = 3; + config.core.v3.Metadata metadata_match = 9; // The idle timeout for connections managed by the TCP proxy filter. The idle timeout // is defined as the period in which there are no bytes sent or received on either @@ -120,7 +124,7 @@ message TcpProxy { // .. warning:: // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP // FIN packets, etc. - google.protobuf.Duration upstream_idle_timeout = 4; + google.protobuf.Duration idle_timeout = 8; // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy // filter. The idle timeout is defined as the period in which there is no @@ -128,33 +132,29 @@ message TcpProxy { // is reached the connection will be closed. The distinction between // downstream_idle_timeout/upstream_idle_timeout provides a means to set // timeout based on the last byte sent on the downstream/upstream connection. - repeated config.accesslog.v3.AccessLog access_log = 5; + google.protobuf.Duration downstream_idle_timeout = 3; // [#not-implemented-hide:] - google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32 = {gte: 1}]; + google.protobuf.Duration upstream_idle_timeout = 4; // Configuration for :ref:`access logs ` // emitted by the this tcp_proxy. - repeated type.v3.HashPolicy hash_policy = 11 [(validate.rules).repeated = {max_items: 1}]; + repeated config.accesslog.v3.AccessLog access_log = 5; // The maximum number of unsuccessful connection attempts that will be made before // giving up. If the parameter is not specified, 1 connection attempt will be made. - TunnelingConfig tunneling_config = 12; + google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32 = {gte: 1}]; // Optional configuration for TCP proxy hash policy. If hash_policy is not set, the hash-based // load balancing algorithms will select a host randomly. Currently the number of hash policies is // limited to 1. - DeprecatedV1 hidden_envoy_deprecated_deprecated_v1 = 6 [deprecated = true]; - - oneof cluster_specifier { - option (validate.required) = true; + repeated type.v3.HashPolicy hash_policy = 11 [(validate.rules).repeated = {max_items: 1}]; - // [#not-implemented-hide:] feature in progress - // If set, this configures tunneling, e.g. configuration options to tunnel multiple TCP - // payloads over a shared HTTP/2 tunnel. If this message is absent, the payload - // will be proxied upstream as per usual. - string cluster = 2; + // [#not-implemented-hide:] feature in progress + // If set, this configures tunneling, e.g. configuration options to tunnel multiple TCP + // payloads over a shared HTTP/2 tunnel. If this message is absent, the payload + // will be proxied upstream as per usual. + TunnelingConfig tunneling_config = 12; - WeightedCluster weighted_clusters = 10; - } + DeprecatedV1 hidden_envoy_deprecated_deprecated_v1 = 6 [deprecated = true]; } diff --git a/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v4alpha/BUILD new file mode 100644 index 0000000000000..3825be9a8afc9 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/accesslog/v4alpha:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/extensions/filters/network/tcp_proxy/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto new file mode 100644 index 0000000000000..1857f2abcd4e9 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto @@ -0,0 +1,137 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.tcp_proxy.v4alpha; + +import "envoy/config/accesslog/v4alpha/accesslog.proto"; +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/type/v3/hash_policy.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.tcp_proxy.v4alpha"; +option java_outer_classname = "TcpProxyProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: TCP Proxy] +// TCP Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.tcp_proxy] + +// [#next-free-field: 13] +message TcpProxy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy"; + + // Allows for specification of multiple upstream clusters along with weights + // that indicate the percentage of traffic to be forwarded to each cluster. + // The router selects an upstream cluster based on these weights. + message WeightedCluster { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.WeightedCluster"; + + message ClusterWeight { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.WeightedCluster.ClusterWeight"; + + // Name of the upstream cluster. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // When a request matches the route, the choice of an upstream cluster is + // determined by its weight. The sum of weights across all entries in the + // clusters array determines the total weight. + uint32 weight = 2 [(validate.rules).uint32 = {gte: 1}]; + + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints + // in the upstream cluster with metadata matching what is set in this field will be considered + // for load balancing. Note that this will be merged with what's provided in + // :ref:`TcpProxy.metadata_match + // `, with values + // here taking precedence. The filter name should be specified as *envoy.lb*. + config.core.v4alpha.Metadata metadata_match = 3; + } + + // Specifies one or more upstream clusters associated with the route. + repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; + } + + // Configuration for tunneling TCP over other transports or application layers. + // Currently, only HTTP/2 is supported. When other options exist, HTTP/2 will + // remain the default. + message TunnelingConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.TunnelingConfig"; + + // The hostname to send in the synthesized CONNECT headers to the upstream proxy. + string hostname = 1 [(validate.rules).string = {min_bytes: 1}]; + } + + reserved 6; + + reserved "deprecated_v1"; + + // The prefix to use when emitting :ref:`statistics + // `. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + oneof cluster_specifier { + option (validate.required) = true; + + // The upstream cluster to connect to. + string cluster = 2; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. + WeightedCluster weighted_clusters = 10; + } + + // Optional endpoint metadata match criteria. Only endpoints in the upstream + // cluster with metadata matching that set in metadata_match will be + // considered. The filter name should be specified as *envoy.lb*. + config.core.v4alpha.Metadata metadata_match = 9; + + // The idle timeout for connections managed by the TCP proxy filter. The idle timeout + // is defined as the period in which there are no bytes sent or received on either + // the upstream or downstream connection. If not set, the default idle timeout is 1 hour. If set + // to 0s, the timeout will be disabled. + // + // .. warning:: + // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP + // FIN packets, etc. + google.protobuf.Duration idle_timeout = 8; + + // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy + // filter. The idle timeout is defined as the period in which there is no + // active traffic. If not set, there is no idle timeout. When the idle timeout + // is reached the connection will be closed. The distinction between + // downstream_idle_timeout/upstream_idle_timeout provides a means to set + // timeout based on the last byte sent on the downstream/upstream connection. + google.protobuf.Duration downstream_idle_timeout = 3; + + // [#not-implemented-hide:] + google.protobuf.Duration upstream_idle_timeout = 4; + + // Configuration for :ref:`access logs ` + // emitted by the this tcp_proxy. + repeated config.accesslog.v4alpha.AccessLog access_log = 5; + + // The maximum number of unsuccessful connection attempts that will be made before + // giving up. If the parameter is not specified, 1 connection attempt will be made. + google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32 = {gte: 1}]; + + // Optional configuration for TCP proxy hash policy. If hash_policy is not set, the hash-based + // load balancing algorithms will select a host randomly. Currently the number of hash policies is + // limited to 1. + repeated type.v3.HashPolicy hash_policy = 11 [(validate.rules).repeated = {max_items: 1}]; + + // [#not-implemented-hide:] feature in progress + // If set, this configures tunneling, e.g. configuration options to tunnel multiple TCP + // payloads over a shared HTTP/2 tunnel. If this message is absent, the payload + // will be proxied upstream as per usual. + TunnelingConfig tunneling_config = 12; +} diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto index 3eeae0cba594d..b7afc4f0b8037 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto @@ -46,39 +46,39 @@ message RouteMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.thrift_proxy.v2alpha1.RouteMatch"; - // If specified, the route must exactly match the request method name. As a special case, an - // empty string matches any request method name. - bool invert = 3; - - // If specified, the route must have the service name as the request method name prefix. As a - // special case, an empty string matches any service name. Only relevant when service - // multiplexing. - repeated config.route.v3.HeaderMatcher headers = 4; - oneof match_specifier { option (validate.required) = true; - // Inverts whatever matching is done in the :ref:`method_name - // ` or - // :ref:`service_name - // ` fields. - // Cannot be combined with wildcard matching as that would result in routes never being matched. - // - // .. note:: - // - // This does not invert matching done as part of the :ref:`headers field - // ` field. To - // invert header matching, see :ref:`invert_match - // `. + // If specified, the route must exactly match the request method name. As a special case, an + // empty string matches any request method name. string method_name = 1; - // Specifies a set of headers that the route should match on. The router will check the request’s - // headers against all the specified headers in the route config. A match will happen if all the - // headers in the route are present in the request with the same values (or based on presence if - // the value field is not in the config). Note that this only applies for Thrift transports and/or - // protocols that support headers. + // If specified, the route must have the service name as the request method name prefix. As a + // special case, an empty string matches any service name. Only relevant when service + // multiplexing. string service_name = 2; } + + // Inverts whatever matching is done in the :ref:`method_name + // ` or + // :ref:`service_name + // ` fields. + // Cannot be combined with wildcard matching as that would result in routes never being matched. + // + // .. note:: + // + // This does not invert matching done as part of the :ref:`headers field + // ` field. To + // invert header matching, see :ref:`invert_match + // `. + bool invert = 3; + + // Specifies a set of headers that the route should match on. The router will check the request’s + // headers against all the specified headers in the route config. A match will happen if all the + // headers in the route are present in the request with the same values (or based on presence if + // the value field is not in the config). Note that this only applies for Thrift transports and/or + // protocols that support headers. + repeated config.route.v3.HeaderMatcher headers = 4; } // [#next-free-field: 7] @@ -86,42 +86,44 @@ message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.thrift_proxy.v2alpha1.RouteAction"; - // Indicates a single upstream cluster to which the request should be routed - // to. - config.core.v3.Metadata metadata_match = 3; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. - repeated config.route.v3.RateLimit rate_limits = 4; - - // Envoy will determine the cluster to route to by reading the value of the - // Thrift header named by cluster_header from the request headers. If the - // header is not found or the referenced cluster does not exist Envoy will - // respond with an unknown method exception or an internal error exception, - // respectively. - bool strip_service_name = 5; - oneof cluster_specifier { option (validate.required) = true; - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in - // the upstream cluster with metadata matching what is set in this field will be considered. - // Note that this will be merged with what's provided in :ref:`WeightedCluster.metadata_match - // `, - // with values there taking precedence. Keys and values should be provided under the "envoy.lb" - // metadata key. + // Indicates a single upstream cluster to which the request should be routed + // to. string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - // Specifies a set of rate limit configurations that could be applied to the route. - // N.B. Thrift service or method name matching can be achieved by specifying a RequestHeaders - // action with the header name ":method-name". + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. WeightedCluster weighted_clusters = 2; - // Strip the service prefix from the method name, if there's a prefix. For - // example, the method call Service:method would end up being just method. - string cluster_header = 6 [(validate.rules).string = {min_bytes: 1}]; + // Envoy will determine the cluster to route to by reading the value of the + // Thrift header named by cluster_header from the request headers. If the + // header is not found or the referenced cluster does not exist Envoy will + // respond with an unknown method exception or an internal error exception, + // respectively. + string cluster_header = 6 [ + (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_VALUE strict: false} + ]; } + + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in + // the upstream cluster with metadata matching what is set in this field will be considered. + // Note that this will be merged with what's provided in :ref:`WeightedCluster.metadata_match + // `, + // with values there taking precedence. Keys and values should be provided under the "envoy.lb" + // metadata key. + config.core.v3.Metadata metadata_match = 3; + + // Specifies a set of rate limit configurations that could be applied to the route. + // N.B. Thrift service or method name matching can be achieved by specifying a RequestHeaders + // action with the header name ":method-name". + repeated config.route.v3.RateLimit rate_limits = 4; + + // Strip the service prefix from the method name, if there's a prefix. For + // example, the method call Service:method would end up being just method. + bool strip_service_name = 5; } // Allows for specification of multiple upstream clusters along with weights that indicate the diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/BUILD new file mode 100644 index 0000000000000..9ec74c0a9b83a --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/route/v4alpha:pkg", + "//envoy/extensions/filters/network/thrift_proxy/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto new file mode 100644 index 0000000000000..374cc131ddf83 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto @@ -0,0 +1,159 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.thrift_proxy.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/route/v4alpha/route_components.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v4alpha"; +option java_outer_classname = "RouteProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Thrift Proxy Route Configuration] +// Thrift Proxy :ref:`configuration overview `. + +message RouteConfiguration { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.RouteConfiguration"; + + // The name of the route configuration. Reserved for future use in asynchronous route discovery. + string name = 1; + + // The list of routes that will be matched, in order, against incoming requests. The first route + // that matches will be used. + repeated Route routes = 2; +} + +message Route { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.Route"; + + // Route matching parameters. + RouteMatch match = 1 [(validate.rules).message = {required: true}]; + + // Route request to some upstream cluster. + RouteAction route = 2 [(validate.rules).message = {required: true}]; +} + +message RouteMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.RouteMatch"; + + oneof match_specifier { + option (validate.required) = true; + + // If specified, the route must exactly match the request method name. As a special case, an + // empty string matches any request method name. + string method_name = 1; + + // If specified, the route must have the service name as the request method name prefix. As a + // special case, an empty string matches any service name. Only relevant when service + // multiplexing. + string service_name = 2; + } + + // Inverts whatever matching is done in the :ref:`method_name + // ` or + // :ref:`service_name + // ` fields. + // Cannot be combined with wildcard matching as that would result in routes never being matched. + // + // .. note:: + // + // This does not invert matching done as part of the :ref:`headers field + // ` field. To + // invert header matching, see :ref:`invert_match + // `. + bool invert = 3; + + // Specifies a set of headers that the route should match on. The router will check the request’s + // headers against all the specified headers in the route config. A match will happen if all the + // headers in the route are present in the request with the same values (or based on presence if + // the value field is not in the config). Note that this only applies for Thrift transports and/or + // protocols that support headers. + repeated config.route.v4alpha.HeaderMatcher headers = 4; +} + +// [#next-free-field: 7] +message RouteAction { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.RouteAction"; + + oneof cluster_specifier { + option (validate.required) = true; + + // Indicates a single upstream cluster to which the request should be routed + // to. + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. + WeightedCluster weighted_clusters = 2; + + // Envoy will determine the cluster to route to by reading the value of the + // Thrift header named by cluster_header from the request headers. If the + // header is not found or the referenced cluster does not exist Envoy will + // respond with an unknown method exception or an internal error exception, + // respectively. + string cluster_header = 6 [ + (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_VALUE strict: false} + ]; + } + + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in + // the upstream cluster with metadata matching what is set in this field will be considered. + // Note that this will be merged with what's provided in :ref:`WeightedCluster.metadata_match + // `, + // with values there taking precedence. Keys and values should be provided under the "envoy.lb" + // metadata key. + config.core.v4alpha.Metadata metadata_match = 3; + + // Specifies a set of rate limit configurations that could be applied to the route. + // N.B. Thrift service or method name matching can be achieved by specifying a RequestHeaders + // action with the header name ":method-name". + repeated config.route.v4alpha.RateLimit rate_limits = 4; + + // Strip the service prefix from the method name, if there's a prefix. For + // example, the method call Service:method would end up being just method. + bool strip_service_name = 5; +} + +// Allows for specification of multiple upstream clusters along with weights that indicate the +// percentage of traffic to be forwarded to each cluster. The router selects an upstream cluster +// based on these weights. +message WeightedCluster { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.WeightedCluster"; + + message ClusterWeight { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.WeightedCluster.ClusterWeight"; + + // Name of the upstream cluster. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // When a request matches the route, the choice of an upstream cluster is determined by its + // weight. The sum of weights across all entries in the clusters array determines the total + // weight. + google.protobuf.UInt32Value weight = 2 [(validate.rules).uint32 = {gte: 1}]; + + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in + // the upstream cluster with metadata matching what is set in this field, combined with what's + // provided in :ref:`RouteAction's metadata_match + // `, + // will be considered. Values here will take precedence. Keys and values should be provided + // under the "envoy.lb" metadata key. + config.core.v4alpha.Metadata metadata_match = 3; + } + + // Specifies one or more upstream clusters associated with the route. + repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; +} diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto new file mode 100644 index 0000000000000..6bf055da3ce65 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto @@ -0,0 +1,130 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.thrift_proxy.v4alpha; + +import "envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v4alpha"; +option java_outer_classname = "ThriftProxyProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Thrift Proxy] +// Thrift Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.thrift_proxy] + +// Thrift transport types supported by Envoy. +enum TransportType { + // For downstream connections, the Thrift proxy will attempt to determine which transport to use. + // For upstream connections, the Thrift proxy will use same transport as the downstream + // connection. + AUTO_TRANSPORT = 0; + + // The Thrift proxy will use the Thrift framed transport. + FRAMED = 1; + + // The Thrift proxy will use the Thrift unframed transport. + UNFRAMED = 2; + + // The Thrift proxy will assume the client is using the Thrift header transport. + HEADER = 3; +} + +// Thrift Protocol types supported by Envoy. +enum ProtocolType { + // For downstream connections, the Thrift proxy will attempt to determine which protocol to use. + // Note that the older, non-strict (or lax) binary protocol is not included in automatic protocol + // detection. For upstream connections, the Thrift proxy will use the same protocol as the + // downstream connection. + AUTO_PROTOCOL = 0; + + // The Thrift proxy will use the Thrift binary protocol. + BINARY = 1; + + // The Thrift proxy will use Thrift non-strict binary protocol. + LAX_BINARY = 2; + + // The Thrift proxy will use the Thrift compact protocol. + COMPACT = 3; + + // The Thrift proxy will use the Thrift "Twitter" protocol implemented by the finagle library. + TWITTER = 4; +} + +// [#next-free-field: 6] +message ThriftProxy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy"; + + // Supplies the type of transport that the Thrift proxy should use. Defaults to + // :ref:`AUTO_TRANSPORT`. + TransportType transport = 2 [(validate.rules).enum = {defined_only: true}]; + + // Supplies the type of protocol that the Thrift proxy should use. Defaults to + // :ref:`AUTO_PROTOCOL`. + ProtocolType protocol = 3 [(validate.rules).enum = {defined_only: true}]; + + // The human readable prefix to use when emitting statistics. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The route table for the connection manager is static and is specified in this property. + RouteConfiguration route_config = 4; + + // A list of individual Thrift filters that make up the filter chain for requests made to the + // Thrift proxy. Order matters as the filters are processed sequentially. For backwards + // compatibility, if no thrift_filters are specified, a default Thrift router filter + // (`envoy.filters.thrift.router`) is used. + repeated ThriftFilter thrift_filters = 5; +} + +// ThriftFilter configures a Thrift filter. +message ThriftFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.ThriftFilter"; + + reserved 2; + + reserved "config"; + + // The name of the filter to instantiate. The name must match a supported + // filter. The built-in filters are: + // + // [#comment:TODO(zuercher): Auto generate the following list] + // * :ref:`envoy.filters.thrift.router ` + // * :ref:`envoy.filters.thrift.rate_limit ` + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Filter specific configuration which depends on the filter being instantiated. See the supported + // filters for further documentation. + oneof config_type { + google.protobuf.Any typed_config = 3; + } +} + +// ThriftProtocolOptions specifies Thrift upstream protocol options. This object is used in +// in +// :ref:`typed_extension_protocol_options`, +// keyed by the name `envoy.filters.network.thrift_proxy`. +message ThriftProtocolOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.ThriftProtocolOptions"; + + // Supplies the type of transport that the Thrift proxy should use for upstream connections. + // Selecting + // :ref:`AUTO_TRANSPORT`, + // which is the default, causes the proxy to use the same transport as the downstream connection. + TransportType transport = 1 [(validate.rules).enum = {defined_only: true}]; + + // Supplies the type of protocol that the Thrift proxy should use for upstream connections. + // Selecting + // :ref:`AUTO_PROTOCOL`, + // which is the default, causes the proxy to use the same protocol as the downstream connection. + ProtocolType protocol = 2 [(validate.rules).enum = {defined_only: true}]; +} diff --git a/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/BUILD new file mode 100644 index 0000000000000..8bad369e35113 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/extensions/wasm/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/wasm.proto b/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/wasm.proto new file mode 100644 index 0000000000000..131582762b590 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/wasm.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.wasm.v3; + +import "envoy/extensions/wasm/v3/wasm.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.wasm.v3"; +option java_outer_classname = "WasmProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [[#not-implemented-hide:] +message Wasm { + // General Plugin configuration. + envoy.extensions.wasm.v3.PluginConfig config = 1; +} diff --git a/generated_api_shadow/envoy/extensions/filter/udp/dns_filter/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/BUILD similarity index 84% rename from generated_api_shadow/envoy/extensions/filter/udp/dns_filter/v3alpha/BUILD rename to generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/BUILD index d011b4d830ad4..dbf0a33e662e9 100644 --- a/generated_api_shadow/envoy/extensions/filter/udp/dns_filter/v3alpha/BUILD +++ b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/BUILD @@ -7,7 +7,6 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", - "//envoy/config/filter/udp/dns_filter/v2alpha:pkg", "//envoy/data/dns/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto new file mode 100644 index 0000000000000..32103540c1d2b --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto @@ -0,0 +1,76 @@ +syntax = "proto3"; + +package envoy.extensions.filters.udp.dns_filter.v3alpha; + +import "envoy/config/core/v3/address.proto"; +import "envoy/config/core/v3/base.proto"; +import "envoy/data/dns/v3/dns_table.proto"; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.udp.dns_filter.v3alpha"; +option java_outer_classname = "DnsFilterProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: DNS Filter] +// DNS Filter :ref:`configuration overview `. +// [#extension: envoy.filters.udp_listener.dns_filter] + +// Configuration for the DNS filter. +message DnsFilterConfig { + // This message contains the configuration for the DNS Filter operating + // in a server context. This message will contain the virtual hosts and + // associated addresses with which Envoy will respond to queries + message ServerContextConfig { + oneof config_source { + option (validate.required) = true; + + // Load the configuration specified from the control plane + data.dns.v3.DnsTable inline_dns_table = 1; + + // Seed the filter configuration from an external path. This source + // is a yaml formatted file that contains the DnsTable driving Envoy's + // responses to DNS queries + config.core.v3.DataSource external_dns_table = 2; + } + } + + // This message contains the configuration for the DNS Filter operating + // in a client context. This message will contain the timeouts, retry, + // and forwarding configuration for Envoy to make DNS requests to other + // resolvers + message ClientContextConfig { + // Sets the maximum time we will wait for the upstream query to complete + // We allow 5s for the upstream resolution to complete, so the minimum + // value here is 1. Note that the total latency for a failed query is the + // number of retries multiplied by the resolver_timeout. + google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 1}}]; + + // A list of DNS servers to which we can forward queries. If not + // specified, Envoy will use the ambient DNS resolvers in the + // system. + repeated config.core.v3.Address upstream_resolvers = 2; + + // Controls how many outstanding external lookup contexts the filter tracks. + // The context structure allows the filter to respond to every query even if the external + // resolution times out or is otherwise unsuccessful + uint64 max_pending_lookups = 3 [(validate.rules).uint64 = {gte: 1}]; + } + + // The stat prefix used when emitting DNS filter statistics + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; + + // Server context configuration contains the data that the filter uses to respond + // to DNS requests. + ServerContextConfig server_config = 2; + + // Client context configuration controls Envoy's behavior when it must use external + // resolvers to answer a query. This object is optional and if omitted instructs + // the filter to resolve queries from the data in the server_config + ClientContextConfig client_config = 3; +} diff --git a/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/BUILD new file mode 100644 index 0000000000000..f869cf5ac123a --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/data/dns/v4alpha:pkg", + "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto new file mode 100644 index 0000000000000..54615b8b93ed8 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto @@ -0,0 +1,86 @@ +syntax = "proto3"; + +package envoy.extensions.filters.udp.dns_filter.v4alpha; + +import "envoy/config/core/v4alpha/address.proto"; +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/data/dns/v4alpha/dns_table.proto"; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.udp.dns_filter.v4alpha"; +option java_outer_classname = "DnsFilterProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: DNS Filter] +// DNS Filter :ref:`configuration overview `. +// [#extension: envoy.filters.udp_listener.dns_filter] + +// Configuration for the DNS filter. +message DnsFilterConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig"; + + // This message contains the configuration for the DNS Filter operating + // in a server context. This message will contain the virtual hosts and + // associated addresses with which Envoy will respond to queries + message ServerContextConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig.ServerContextConfig"; + + oneof config_source { + option (validate.required) = true; + + // Load the configuration specified from the control plane + data.dns.v4alpha.DnsTable inline_dns_table = 1; + + // Seed the filter configuration from an external path. This source + // is a yaml formatted file that contains the DnsTable driving Envoy's + // responses to DNS queries + config.core.v4alpha.DataSource external_dns_table = 2; + } + } + + // This message contains the configuration for the DNS Filter operating + // in a client context. This message will contain the timeouts, retry, + // and forwarding configuration for Envoy to make DNS requests to other + // resolvers + message ClientContextConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig.ClientContextConfig"; + + // Sets the maximum time we will wait for the upstream query to complete + // We allow 5s for the upstream resolution to complete, so the minimum + // value here is 1. Note that the total latency for a failed query is the + // number of retries multiplied by the resolver_timeout. + google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 1}}]; + + // A list of DNS servers to which we can forward queries. If not + // specified, Envoy will use the ambient DNS resolvers in the + // system. + repeated config.core.v4alpha.Address upstream_resolvers = 2; + + // Controls how many outstanding external lookup contexts the filter tracks. + // The context structure allows the filter to respond to every query even if the external + // resolution times out or is otherwise unsuccessful + uint64 max_pending_lookups = 3 [(validate.rules).uint64 = {gte: 1}]; + } + + // The stat prefix used when emitting DNS filter statistics + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; + + // Server context configuration contains the data that the filter uses to respond + // to DNS requests. + ServerContextConfig server_config = 2; + + // Client context configuration controls Envoy's behavior when it must use external + // resolvers to answer a query. This object is optional and if omitted instructs + // the filter to resolve queries from the data in the server_config + ClientContextConfig client_config = 3; +} diff --git a/generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/BUILD new file mode 100644 index 0000000000000..c9a0d31060397 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/filter/udp/udp_proxy/v2alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto b/generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto new file mode 100644 index 0000000000000..43d2c56c06738 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto @@ -0,0 +1,38 @@ +syntax = "proto3"; + +package envoy.extensions.filters.udp.udp_proxy.v3; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.udp.udp_proxy.v3"; +option java_outer_classname = "UdpProxyProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: UDP proxy] +// UDP proxy :ref:`configuration overview `. +// [#extension: envoy.filters.udp_listener.udp_proxy] + +// Configuration for the UDP proxy filter. +message UdpProxyConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.filter.udp.udp_proxy.v2alpha.UdpProxyConfig"; + + // The stat prefix used when emitting UDP proxy filter stats. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + oneof route_specifier { + option (validate.required) = true; + + // The upstream cluster to connect to. + string cluster = 2 [(validate.rules).string = {min_bytes: 1}]; + } + + // The idle timeout for sessions. Idle is defined as no datagrams between received or sent by + // the session. The default if not specified is 1 minute. + google.protobuf.Duration idle_timeout = 3; +} diff --git a/generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/BUILD b/generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/BUILD new file mode 100644 index 0000000000000..ef3541ebcb1df --- /dev/null +++ b/generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.proto b/generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.proto new file mode 100644 index 0000000000000..a6da5b0f5d9b6 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package envoy.extensions.internal_redirect.allow_listed_routes.v3; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.internal_redirect.allow_listed_routes.v3"; +option java_outer_classname = "AllowListedRoutesConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Allow listed routes internal redirect predicate] + +// An internal redirect predicate that accepts only explicitly allowed target routes. +// [#extension: envoy.internal_redirect_predicates.allow_listed_routes] +message AllowListedRoutesConfig { + // The list of routes that's allowed as redirect target by this predicate, + // identified by the route's :ref:`name `. + // Empty route names are not allowed. + repeated string allowed_route_names = 1 + [(validate.rules).repeated = {items {string {min_len: 1}}}]; +} diff --git a/generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/BUILD b/generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/BUILD new file mode 100644 index 0000000000000..ef3541ebcb1df --- /dev/null +++ b/generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.proto b/generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.proto new file mode 100644 index 0000000000000..6cc5fba871ea0 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package envoy.extensions.internal_redirect.previous_routes.v3; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.internal_redirect.previous_routes.v3"; +option java_outer_classname = "PreviousRoutesConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Previous routes internal redirect predicate] + +// An internal redirect predicate that rejects redirect targets that are pointing +// to a route that has been followed by a previous redirect from the current route. +// [#extension: envoy.internal_redirect_predicates.previous_routes] +message PreviousRoutesConfig { +} diff --git a/generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/BUILD b/generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/BUILD new file mode 100644 index 0000000000000..ef3541ebcb1df --- /dev/null +++ b/generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.proto b/generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.proto new file mode 100644 index 0000000000000..54cec2f09bbba --- /dev/null +++ b/generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package envoy.extensions.internal_redirect.safe_cross_scheme.v3; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.internal_redirect.safe_cross_scheme.v3"; +option java_outer_classname = "SafeCrossSchemeConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: SafeCrossScheme internal redirect predicate] + +// An internal redirect predicate that checks the scheme between the +// downstream url and the redirect target url and allows a) same scheme +// redirect and b) safe cross scheme redirect, which means if the downstream +// scheme is HTTPS, both HTTPS and HTTP redirect targets are allowed, but if the +// downstream scheme is HTTP, only HTTP redirect targets are allowed. +// [#extension: +// envoy.internal_redirect_predicates.safe_cross_scheme] +message SafeCrossSchemeConfig { +} diff --git a/generated_api_shadow/envoy/extensions/network/socket_interface/v3/BUILD b/generated_api_shadow/envoy/extensions/network/socket_interface/v3/BUILD new file mode 100644 index 0000000000000..ef3541ebcb1df --- /dev/null +++ b/generated_api_shadow/envoy/extensions/network/socket_interface/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/extensions/network/socket_interface/v3/default_socket_interface.proto b/generated_api_shadow/envoy/extensions/network/socket_interface/v3/default_socket_interface.proto new file mode 100644 index 0000000000000..d2c747ec49fb1 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/network/socket_interface/v3/default_socket_interface.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package envoy.extensions.network.socket_interface.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.network.socket_interface.v3"; +option java_outer_classname = "DefaultSocketInterfaceProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Default Socket Interface configuration] + +// Configuration for default socket interface that relies on OS dependent syscall to create +// sockets. +message DefaultSocketInterface { +} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/BUILD new file mode 100644 index 0000000000000..2c3dad6453b65 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto b/generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto new file mode 100644 index 0000000000000..c6c2ee9798d6c --- /dev/null +++ b/generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.proxy_protocol.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/proxy_protocol.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.proxy_protocol.v3"; +option java_outer_classname = "UpstreamProxyProtocolProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Upstream Proxy Protocol] +// [#extension: envoy.transport_sockets.upstream_proxy_protocol] +// [#not-implemented-hide:] +// Configuration for PROXY protocol socket +message ProxyProtocolUpstreamTransport { + config.core.v3.ProxyProtocolConfig config = 1; + + // The underlying transport socket being wrapped. + config.core.v3.TransportSocket transport_socket = 2 [(validate.rules).message = {required: true}]; +} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/BUILD new file mode 100644 index 0000000000000..e95e504f3caf3 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/extensions/transport_sockets/tls/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/quic_transport.proto b/generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/quic_transport.proto new file mode 100644 index 0000000000000..b17e2262bc1e7 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/quic_transport.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.quic.v3; + +import "envoy/extensions/transport_sockets/tls/v3/tls.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.quic.v3"; +option java_outer_classname = "QuicTransportProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: quic transport] +// [#extension: envoy.transport_sockets.quic] + +// Configuration for Downstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy. +message QuicDownstreamTransport { + tls.v3.DownstreamTlsContext downstream_tls_context = 1 + [(validate.rules).message = {required: true}]; +} + +// Configuration for Upstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy. +message QuicUpstreamTransport { + tls.v3.UpstreamTlsContext upstream_tls_context = 1 [(validate.rules).message = {required: true}]; +} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/quic/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/quic/v4alpha/BUILD new file mode 100644 index 0000000000000..47c94aa706ee8 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/transport_sockets/quic/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/extensions/transport_sockets/quic/v3:pkg", + "//envoy/extensions/transport_sockets/tls/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/quic/v4alpha/quic_transport.proto b/generated_api_shadow/envoy/extensions/transport_sockets/quic/v4alpha/quic_transport.proto new file mode 100644 index 0000000000000..255bfe627b74c --- /dev/null +++ b/generated_api_shadow/envoy/extensions/transport_sockets/quic/v4alpha/quic_transport.proto @@ -0,0 +1,35 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.quic.v4alpha; + +import "envoy/extensions/transport_sockets/tls/v4alpha/tls.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.quic.v4alpha"; +option java_outer_classname = "QuicTransportProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: quic transport] +// [#extension: envoy.transport_sockets.quic] + +// Configuration for Downstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy. +message QuicDownstreamTransport { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.quic.v3.QuicDownstreamTransport"; + + tls.v4alpha.DownstreamTlsContext downstream_tls_context = 1 + [(validate.rules).message = {required: true}]; +} + +// Configuration for Upstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy. +message QuicUpstreamTransport { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.quic.v3.QuicUpstreamTransport"; + + tls.v4alpha.UpstreamTlsContext upstream_tls_context = 1 + [(validate.rules).message = {required: true}]; +} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/BUILD index 62b69636c78cc..14187bea65a7d 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/BUILD @@ -10,5 +10,6 @@ api_proto_package( "//envoy/config/core/v3:pkg", "//envoy/type/matcher/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/cert.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/cert.proto index 4121297ec1c33..cf5dc597aafb7 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/cert.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/cert.proto @@ -2,507 +2,12 @@ syntax = "proto3"; package envoy.extensions.transport_sockets.tls.v3; -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/config_source.proto"; -import "envoy/type/matcher/v3/string.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; + +import public "envoy/extensions/transport_sockets/tls/v3/common.proto"; +import public "envoy/extensions/transport_sockets/tls/v3/secret.proto"; +import public "envoy/extensions/transport_sockets/tls/v3/tls.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; option java_outer_classname = "CertProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Common TLS configuration] - -message TlsParameters { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.TlsParameters"; - - enum TlsProtocol { - // Envoy will choose the optimal TLS version. - TLS_AUTO = 0; - - // TLS 1.0 - TLSv1_0 = 1; - - // TLS 1.1 - TLSv1_1 = 2; - - // TLS 1.2 - TLSv1_2 = 3; - - // TLS 1.3 - TLSv1_3 = 4; - } - - // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for - // servers. - TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; - - // Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and - // ``TLSv1_2`` for clients and for servers using :ref:`BoringSSL FIPS `. - TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; - - // If specified, the TLS listener will only support the specified `cipher list - // `_ - // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not - // specified, the default list will be used. - // - // In non-FIPS builds, the default cipher list is: - // - // .. code-block:: none - // - // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] - // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] - // ECDHE-ECDSA-AES128-SHA - // ECDHE-RSA-AES128-SHA - // AES128-GCM-SHA256 - // AES128-SHA - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // ECDHE-ECDSA-AES256-SHA - // ECDHE-RSA-AES256-SHA - // AES256-GCM-SHA384 - // AES256-SHA - // - // In builds using :ref:`BoringSSL FIPS `, the default cipher list is: - // - // .. code-block:: none - // - // ECDHE-ECDSA-AES128-GCM-SHA256 - // ECDHE-RSA-AES128-GCM-SHA256 - // ECDHE-ECDSA-AES128-SHA - // ECDHE-RSA-AES128-SHA - // AES128-GCM-SHA256 - // AES128-SHA - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // ECDHE-ECDSA-AES256-SHA - // ECDHE-RSA-AES256-SHA - // AES256-GCM-SHA384 - // AES256-SHA - repeated string cipher_suites = 3; - - // If specified, the TLS connection will only support the specified ECDH - // curves. If not specified, the default curves will be used. - // - // In non-FIPS builds, the default curves are: - // - // .. code-block:: none - // - // X25519 - // P-256 - // - // In builds using :ref:`BoringSSL FIPS `, the default curve is: - // - // .. code-block:: none - // - // P-256 - repeated string ecdh_curves = 4; -} - -// BoringSSL private key method configuration. The private key methods are used for external -// (potentially asynchronous) signing and decryption operations. Some use cases for private key -// methods would be TPM support and TLS acceleration. -message PrivateKeyProvider { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.PrivateKeyProvider"; - - // Private key method provider name. The name must match a - // supported private key method provider type. - string provider_name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Private key method provider specific configuration. - oneof config_type { - google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (udpa.annotations.sensitive) = true]; - } -} - -// [#next-free-field: 7] -message TlsCertificate { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.TlsCertificate"; - - // The TLS certificate chain. - config.core.v3.DataSource certificate_chain = 1; - - // The TLS private key. - config.core.v3.DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; - - // BoringSSL private key method provider. This is an alternative to :ref:`private_key - // ` field. This can't be - // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key - // ` and - // :ref:`private_key_provider - // ` fields will result in an - // error. - PrivateKeyProvider private_key_provider = 6; - - // The password to decrypt the TLS private key. If this field is not set, it is assumed that the - // TLS private key is not password encrypted. - config.core.v3.DataSource password = 3 [(udpa.annotations.sensitive) = true]; - - // [#not-implemented-hide:] - config.core.v3.DataSource ocsp_staple = 4; - - // [#not-implemented-hide:] - repeated config.core.v3.DataSource signed_certificate_timestamp = 5; -} - -message TlsSessionTicketKeys { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.TlsSessionTicketKeys"; - - // Keys for encrypting and decrypting TLS session tickets. The - // first key in the array contains the key to encrypt all new sessions created by this context. - // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys - // by, for example, putting the new key first, and the previous key second. - // - // If :ref:`session_ticket_keys ` - // is not specified, the TLS library will still support resuming sessions via tickets, but it will - // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts - // or on different hosts. - // - // Each key must contain exactly 80 bytes of cryptographically-secure random data. For - // example, the output of ``openssl rand 80``. - // - // .. attention:: - // - // Using this feature has serious security considerations and risks. Improper handling of keys - // may result in loss of secrecy in connections, even if ciphers supporting perfect forward - // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some - // discussion. To minimize the risk, you must: - // - // * Keep the session ticket keys at least as secure as your TLS certificate private keys - // * Rotate session ticket keys at least daily, and preferably hourly - // * Always generate keys using a cryptographically-secure random data source - repeated config.core.v3.DataSource keys = 1 - [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; -} - -// [#next-free-field: 11] -message CertificateValidationContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.CertificateValidationContext"; - - // Peer certificate verification mode. - enum TrustChainVerification { - // Perform default certificate verification (e.g., against CA / verification lists) - VERIFY_TRUST_CHAIN = 0; - - // Connections where the certificate fails verification will be permitted. - // For HTTP connections, the result of certificate verification can be used in route matching. ( - // see :ref:`validated ` ). - ACCEPT_UNTRUSTED = 1; - } - - // TLS certificate data containing certificate authority certificates to use in verifying - // a presented peer certificate (e.g. server certificate for clusters or client certificate - // for listeners). If not specified and a peer certificate is presented it will not be - // verified. By default, a client certificate is optional, unless one of the additional - // options (:ref:`require_client_certificate - // `, - // :ref:`verify_certificate_spki - // `, - // :ref:`verify_certificate_hash - // `, or - // :ref:`match_subject_alt_names - // `) is also - // specified. - // - // It can optionally contain certificate revocation lists, in which case Envoy will verify - // that the presented peer certificate has not been revoked by one of the included CRLs. - // - // See :ref:`the TLS overview ` for a list of common - // system CA locations. - config.core.v3.DataSource trusted_ca = 1; - - // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the - // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate - // matches one of the specified values. - // - // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate - // can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -noout -pubkey - // | openssl pkey -pubin -outform DER - // | openssl dgst -sha256 -binary - // | openssl enc -base64 - // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= - // - // This is the format used in HTTP Public Key Pinning. - // - // When both: - // :ref:`verify_certificate_hash - // ` and - // :ref:`verify_certificate_spki - // ` are specified, - // a hash matching value from either of the lists will result in the certificate being accepted. - // - // .. attention:: - // - // This option is preferred over :ref:`verify_certificate_hash - // `, - // because SPKI is tied to a private key, so it doesn't change when the certificate - // is renewed using the same private key. - repeated string verify_certificate_spki = 3 - [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}]; - - // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that - // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. - // - // A hex-encoded SHA-256 of the certificate can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 - // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a - // - // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate - // can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 - // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A - // - // Both of those formats are acceptable. - // - // When both: - // :ref:`verify_certificate_hash - // ` and - // :ref:`verify_certificate_spki - // ` are specified, - // a hash matching value from either of the lists will result in the certificate being accepted. - repeated string verify_certificate_hash = 2 - [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}]; - - // An optional list of Subject Alternative name matchers. Envoy will verify that the - // Subject Alternative Name of the presented certificate matches one of the specified matches. - // - // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be - // configured with exact match type in the :ref:`string matcher `. - // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", - // it should be configured as shown below. - // - // .. code-block:: yaml - // - // match_subject_alt_names: - // exact: "api.example.com" - // - // .. attention:: - // - // Subject Alternative Names are easily spoofable and verifying only them is insecure, - // therefore this option must be used together with :ref:`trusted_ca - // `. - repeated type.matcher.v3.StringMatcher match_subject_alt_names = 9; - - // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. - google.protobuf.BoolValue require_ocsp_staple = 5; - - // [#not-implemented-hide:] Must present signed certificate time-stamp. - google.protobuf.BoolValue require_signed_certificate_timestamp = 6; - - // An optional `certificate revocation list - // `_ - // (in PEM format). If specified, Envoy will verify that the presented peer - // certificate has not been revoked by this CRL. If this DataSource contains - // multiple CRLs, all of them will be used. - config.core.v3.DataSource crl = 7; - - // If specified, Envoy will not reject expired certificates. - bool allow_expired_certificate = 8; - - // Certificate trust chain verification mode. - TrustChainVerification trust_chain_verification = 10 - [(validate.rules).enum = {defined_only: true}]; - - repeated string hidden_envoy_deprecated_verify_subject_alt_name = 4 [deprecated = true]; -} - -// TLS context shared by both client and server TLS contexts. -// [#next-free-field: 9] -message CommonTlsContext { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.CommonTlsContext"; - - message CombinedCertificateValidationContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.CommonTlsContext.CombinedCertificateValidationContext"; - - // How to validate peer certificates. - CertificateValidationContext default_validation_context = 1 - [(validate.rules).message = {required: true}]; - - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 2 - [(validate.rules).message = {required: true}]; - } - - reserved 5; - - // TLS protocol versions, cipher suites etc. - TlsParameters tls_params = 1; - - // :ref:`Multiple TLS certificates ` can be associated with the - // same context to allow both RSA and ECDSA certificates. - // - // Only a single TLS certificate is supported in client contexts. In server contexts, the first - // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is - // used for clients that support ECDSA. - repeated TlsCertificate tls_certificates = 2; - - // Configs for fetching TLS certificates via SDS API. - repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 - [(validate.rules).repeated = {max_items: 1}]; - - // How to validate peer certificates. - repeated string alpn_protocols = 4; - - oneof validation_context_type { - // Config for fetching validation context via SDS API. - CertificateValidationContext validation_context = 3; - - // Combined certificate validation context holds a default CertificateValidationContext - // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic - // and default CertificateValidationContext are merged into a new CertificateValidationContext - // for validation. This merge is done by Message::MergeFrom(), so dynamic - // CertificateValidationContext overwrites singular fields in default - // CertificateValidationContext, and concatenates repeated fields to default - // CertificateValidationContext, and logical OR is applied to boolean fields. - SdsSecretConfig validation_context_sds_secret_config = 7; - - // Supplies the list of ALPN protocols that the listener should expose. In - // practice this is likely to be set to one of two values (see the - // :ref:`codec_type - // ` - // parameter in the HTTP connection manager for more information): - // - // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. - // * "http/1.1" If the listener is only going to support HTTP/1.1. - // - // There is no default for this parameter. If empty, Envoy will not expose ALPN. - CombinedCertificateValidationContext combined_validation_context = 8; - } -} - -message UpstreamTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.UpstreamTlsContext"; - - // Common TLS context settings. - // - // .. attention:: - // - // Server certificate verification is not enabled by default. Configure - // :ref:`trusted_ca` to enable - // verification. - CommonTlsContext common_tls_context = 1; - - // SNI string to use when creating TLS backend connections. - string sni = 2 [(validate.rules).string = {max_bytes: 255}]; - - // If true, server-initiated TLS renegotiation will be allowed. - // - // .. attention:: - // - // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. - bool allow_renegotiation = 3; - - // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets - // for TLSv1.2 and older) to store for the purpose of session resumption. - // - // Defaults to 1, setting this to 0 disables session resumption. - google.protobuf.UInt32Value max_session_keys = 4; -} - -// [#next-free-field: 8] -message DownstreamTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.DownstreamTlsContext"; - - // Common TLS context settings. - CommonTlsContext common_tls_context = 1; - - // If specified, Envoy will reject connections without a valid client - // certificate. - google.protobuf.BoolValue require_client_certificate = 2; - - // If specified, Envoy will reject connections without a valid and matching SNI. - // [#not-implemented-hide:] - google.protobuf.BoolValue require_sni = 3; - - // TLS session ticket key settings. - google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { - lt {seconds: 4294967296} - gte {} - }]; - - oneof session_ticket_keys_type { - // Config for fetching TLS session ticket keys via SDS API. - TlsSessionTicketKeys session_ticket_keys = 4; - - // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS - // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. - // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using - // the keys specified through either :ref:`session_ticket_keys ` - // or :ref:`session_ticket_keys_sds_secret_config `. - // If this config is set to false and no keys are explicitly configured, the TLS server will issue - // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the - // implication that sessions cannot be resumed across hot restarts or on different hosts. - SdsSecretConfig session_ticket_keys_sds_secret_config = 5; - - // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session - // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) - // ` - // only seconds could be specified (fractional seconds are going to be ignored). - bool disable_stateless_session_resumption = 7; - } -} - -message GenericSecret { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.GenericSecret"; - - // Secret of generic type and is available to filters. - config.core.v3.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; -} - -message SdsSecretConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.SdsSecretConfig"; - - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - // When both name and config are specified, then secret can be fetched and/or reloaded via - // SDS. When only name is specified, then secret will be loaded from static resources. - string name = 1; - - config.core.v3.ConfigSource sds_config = 2; -} - -// [#next-free-field: 6] -message Secret { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.Secret"; - - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - string name = 1; - - oneof type { - TlsCertificate tls_certificate = 2; - - TlsSessionTicketKeys session_ticket_keys = 3; - - CertificateValidationContext validation_context = 4; - - GenericSecret generic_secret = 5; - } -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/common.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/common.proto new file mode 100644 index 0000000000000..417cf0054df42 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/common.proto @@ -0,0 +1,331 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tls.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/type/matcher/v3/string.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; +option java_outer_classname = "CommonProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Common TLS configuration] + +message TlsParameters { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.TlsParameters"; + + enum TlsProtocol { + // Envoy will choose the optimal TLS version. + TLS_AUTO = 0; + + // TLS 1.0 + TLSv1_0 = 1; + + // TLS 1.1 + TLSv1_1 = 2; + + // TLS 1.2 + TLSv1_2 = 3; + + // TLS 1.3 + TLSv1_3 = 4; + } + + // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for + // servers. + TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; + + // Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_3`` for + // servers. + TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; + + // If specified, the TLS listener will only support the specified `cipher list + // `_ + // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not + // specified, the default list will be used. + // + // In non-FIPS builds, the default cipher list is: + // + // .. code-block:: none + // + // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] + // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] + // ECDHE-ECDSA-AES128-SHA + // ECDHE-RSA-AES128-SHA + // AES128-GCM-SHA256 + // AES128-SHA + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // ECDHE-ECDSA-AES256-SHA + // ECDHE-RSA-AES256-SHA + // AES256-GCM-SHA384 + // AES256-SHA + // + // In builds using :ref:`BoringSSL FIPS `, the default cipher list is: + // + // .. code-block:: none + // + // ECDHE-ECDSA-AES128-GCM-SHA256 + // ECDHE-RSA-AES128-GCM-SHA256 + // ECDHE-ECDSA-AES128-SHA + // ECDHE-RSA-AES128-SHA + // AES128-GCM-SHA256 + // AES128-SHA + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // ECDHE-ECDSA-AES256-SHA + // ECDHE-RSA-AES256-SHA + // AES256-GCM-SHA384 + // AES256-SHA + repeated string cipher_suites = 3; + + // If specified, the TLS connection will only support the specified ECDH + // curves. If not specified, the default curves will be used. + // + // In non-FIPS builds, the default curves are: + // + // .. code-block:: none + // + // X25519 + // P-256 + // + // In builds using :ref:`BoringSSL FIPS `, the default curve is: + // + // .. code-block:: none + // + // P-256 + repeated string ecdh_curves = 4; +} + +// BoringSSL private key method configuration. The private key methods are used for external +// (potentially asynchronous) signing and decryption operations. Some use cases for private key +// methods would be TPM support and TLS acceleration. +message PrivateKeyProvider { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.PrivateKeyProvider"; + + // Private key method provider name. The name must match a + // supported private key method provider type. + string provider_name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Private key method provider specific configuration. + oneof config_type { + google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; + + google.protobuf.Struct hidden_envoy_deprecated_config = 2 + [deprecated = true, (udpa.annotations.sensitive) = true]; + } +} + +// [#next-free-field: 7] +message TlsCertificate { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.TlsCertificate"; + + // The TLS certificate chain. + config.core.v3.DataSource certificate_chain = 1; + + // The TLS private key. + config.core.v3.DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; + + // BoringSSL private key method provider. This is an alternative to :ref:`private_key + // ` field. This can't be + // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key + // ` and + // :ref:`private_key_provider + // ` fields will result in an + // error. + PrivateKeyProvider private_key_provider = 6; + + // The password to decrypt the TLS private key. If this field is not set, it is assumed that the + // TLS private key is not password encrypted. + config.core.v3.DataSource password = 3 [(udpa.annotations.sensitive) = true]; + + // [#not-implemented-hide:] + config.core.v3.DataSource ocsp_staple = 4; + + // [#not-implemented-hide:] + repeated config.core.v3.DataSource signed_certificate_timestamp = 5; +} + +message TlsSessionTicketKeys { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.TlsSessionTicketKeys"; + + // Keys for encrypting and decrypting TLS session tickets. The + // first key in the array contains the key to encrypt all new sessions created by this context. + // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys + // by, for example, putting the new key first, and the previous key second. + // + // If :ref:`session_ticket_keys ` + // is not specified, the TLS library will still support resuming sessions via tickets, but it will + // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts + // or on different hosts. + // + // Each key must contain exactly 80 bytes of cryptographically-secure random data. For + // example, the output of ``openssl rand 80``. + // + // .. attention:: + // + // Using this feature has serious security considerations and risks. Improper handling of keys + // may result in loss of secrecy in connections, even if ciphers supporting perfect forward + // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some + // discussion. To minimize the risk, you must: + // + // * Keep the session ticket keys at least as secure as your TLS certificate private keys + // * Rotate session ticket keys at least daily, and preferably hourly + // * Always generate keys using a cryptographically-secure random data source + repeated config.core.v3.DataSource keys = 1 + [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; +} + +// [#next-free-field: 11] +message CertificateValidationContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.CertificateValidationContext"; + + // Peer certificate verification mode. + enum TrustChainVerification { + // Perform default certificate verification (e.g., against CA / verification lists) + VERIFY_TRUST_CHAIN = 0; + + // Connections where the certificate fails verification will be permitted. + // For HTTP connections, the result of certificate verification can be used in route matching. ( + // see :ref:`validated ` ). + ACCEPT_UNTRUSTED = 1; + } + + // TLS certificate data containing certificate authority certificates to use in verifying + // a presented peer certificate (e.g. server certificate for clusters or client certificate + // for listeners). If not specified and a peer certificate is presented it will not be + // verified. By default, a client certificate is optional, unless one of the additional + // options (:ref:`require_client_certificate + // `, + // :ref:`verify_certificate_spki + // `, + // :ref:`verify_certificate_hash + // `, or + // :ref:`match_subject_alt_names + // `) is also + // specified. + // + // It can optionally contain certificate revocation lists, in which case Envoy will verify + // that the presented peer certificate has not been revoked by one of the included CRLs. + // + // See :ref:`the TLS overview ` for a list of common + // system CA locations. + config.core.v3.DataSource trusted_ca = 1; + + // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the + // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate + // matches one of the specified values. + // + // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate + // can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -noout -pubkey + // | openssl pkey -pubin -outform DER + // | openssl dgst -sha256 -binary + // | openssl enc -base64 + // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= + // + // This is the format used in HTTP Public Key Pinning. + // + // When both: + // :ref:`verify_certificate_hash + // ` and + // :ref:`verify_certificate_spki + // ` are specified, + // a hash matching value from either of the lists will result in the certificate being accepted. + // + // .. attention:: + // + // This option is preferred over :ref:`verify_certificate_hash + // `, + // because SPKI is tied to a private key, so it doesn't change when the certificate + // is renewed using the same private key. + repeated string verify_certificate_spki = 3 + [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}]; + + // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that + // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. + // + // A hex-encoded SHA-256 of the certificate can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 + // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a + // + // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate + // can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 + // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A + // + // Both of those formats are acceptable. + // + // When both: + // :ref:`verify_certificate_hash + // ` and + // :ref:`verify_certificate_spki + // ` are specified, + // a hash matching value from either of the lists will result in the certificate being accepted. + repeated string verify_certificate_hash = 2 + [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}]; + + // An optional list of Subject Alternative name matchers. Envoy will verify that the + // Subject Alternative Name of the presented certificate matches one of the specified matches. + // + // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be + // configured with exact match type in the :ref:`string matcher `. + // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", + // it should be configured as shown below. + // + // .. code-block:: yaml + // + // match_subject_alt_names: + // exact: "api.example.com" + // + // .. attention:: + // + // Subject Alternative Names are easily spoofable and verifying only them is insecure, + // therefore this option must be used together with :ref:`trusted_ca + // `. + repeated type.matcher.v3.StringMatcher match_subject_alt_names = 9; + + // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. + google.protobuf.BoolValue require_ocsp_staple = 5; + + // [#not-implemented-hide:] Must present signed certificate time-stamp. + google.protobuf.BoolValue require_signed_certificate_timestamp = 6; + + // An optional `certificate revocation list + // `_ + // (in PEM format). If specified, Envoy will verify that the presented peer + // certificate has not been revoked by this CRL. If this DataSource contains + // multiple CRLs, all of them will be used. + config.core.v3.DataSource crl = 7; + + // If specified, Envoy will not reject expired certificates. + bool allow_expired_certificate = 8; + + // Certificate trust chain verification mode. + TrustChainVerification trust_chain_verification = 10 + [(validate.rules).enum = {defined_only: true}]; + + repeated string hidden_envoy_deprecated_verify_subject_alt_name = 4 [deprecated = true]; +} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/secret.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/secret.proto new file mode 100644 index 0000000000000..80c68a56f5ce5 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/secret.proto @@ -0,0 +1,62 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tls.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/config_source.proto"; +import "envoy/extensions/transport_sockets/tls/v3/common.proto"; + +import "udpa/core/v1/resource_locator.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; +option java_outer_classname = "SecretProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Secrets configuration] + +message GenericSecret { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.GenericSecret"; + + // Secret of generic type and is available to filters. + config.core.v3.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; +} + +message SdsSecretConfig { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.SdsSecretConfig"; + + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + // When both name and config are specified, then secret can be fetched and/or reloaded via + // SDS. When only name is specified, then secret will be loaded from static resources. + string name = 1 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + + // Resource locator for SDS. This is mutually exclusive to *name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator sds_resource_locator = 3 + [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + + config.core.v3.ConfigSource sds_config = 2; +} + +// [#next-free-field: 6] +message Secret { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.Secret"; + + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + string name = 1; + + oneof type { + TlsCertificate tls_certificate = 2; + + TlsSessionTicketKeys session_ticket_keys = 3; + + CertificateValidationContext validation_context = 4; + + GenericSecret generic_secret = 5; + } +} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto new file mode 100644 index 0000000000000..7ee7920c724d1 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto @@ -0,0 +1,241 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tls.v3; + +import "envoy/config/core/v3/extension.proto"; +import "envoy/extensions/transport_sockets/tls/v3/common.proto"; +import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; +option java_outer_classname = "TlsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: TLS transport socket] +// [#extension: envoy.transport_sockets.tls] +// The TLS contexts below provide the transport socket configuration for upstream/downstream TLS. + +message UpstreamTlsContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.UpstreamTlsContext"; + + // Common TLS context settings. + // + // .. attention:: + // + // Server certificate verification is not enabled by default. Configure + // :ref:`trusted_ca` to enable + // verification. + CommonTlsContext common_tls_context = 1; + + // SNI string to use when creating TLS backend connections. + string sni = 2 [(validate.rules).string = {max_bytes: 255}]; + + // If true, server-initiated TLS renegotiation will be allowed. + // + // .. attention:: + // + // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. + bool allow_renegotiation = 3; + + // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets + // for TLSv1.2 and older) to store for the purpose of session resumption. + // + // Defaults to 1, setting this to 0 disables session resumption. + google.protobuf.UInt32Value max_session_keys = 4; +} + +// [#next-free-field: 8] +message DownstreamTlsContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.DownstreamTlsContext"; + + // Common TLS context settings. + CommonTlsContext common_tls_context = 1; + + // If specified, Envoy will reject connections without a valid client + // certificate. + google.protobuf.BoolValue require_client_certificate = 2; + + // If specified, Envoy will reject connections without a valid and matching SNI. + // [#not-implemented-hide:] + google.protobuf.BoolValue require_sni = 3; + + oneof session_ticket_keys_type { + // TLS session ticket key settings. + TlsSessionTicketKeys session_ticket_keys = 4; + + // Config for fetching TLS session ticket keys via SDS API. + SdsSecretConfig session_ticket_keys_sds_secret_config = 5; + + // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS + // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. + // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using + // the keys specified through either :ref:`session_ticket_keys ` + // or :ref:`session_ticket_keys_sds_secret_config `. + // If this config is set to false and no keys are explicitly configured, the TLS server will issue + // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the + // implication that sessions cannot be resumed across hot restarts or on different hosts. + bool disable_stateless_session_resumption = 7; + } + + // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session + // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) + // ` + // only seconds could be specified (fractional seconds are going to be ignored). + google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { + lt {seconds: 4294967296} + gte {} + }]; +} + +// TLS context shared by both client and server TLS contexts. +// [#next-free-field: 13] +message CommonTlsContext { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.CommonTlsContext"; + + // Config for Certificate provider to get certificates. This provider should allow certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + message CertificateProvider { + // opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify + // a root-certificate (validation context) or "TLS" to specify a new tls-certificate. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Provider specific config. + // Note: an implementation is expected to dedup multiple instances of the same config + // to maintain a single certificate-provider instance. The sharing can happen, for + // example, among multiple clusters or between the tls_certificate and validation_context + // certificate providers of a cluster. + // This config could be supplied inline or (in future) a named xDS resource. + oneof config { + option (validate.required) = true; + + config.core.v3.TypedExtensionConfig typed_config = 2; + } + } + + // Similar to CertificateProvider above, but allows the provider instances to be configured on + // the client side instead of being sent from the control plane. + message CertificateProviderInstance { + // Provider instance name. This name must be defined in the client's configuration (e.g., a + // bootstrap file) to correspond to a provider instance (i.e., the same data in the typed_config + // field that would be sent in the CertificateProvider message if the config was sent by the + // control plane). If not present, defaults to "default". + // + // Instance names should generally be defined not in terms of the underlying provider + // implementation (e.g., "file_watcher") but rather in terms of the function of the + // certificates (e.g., "foo_deployment_identity"). + string instance_name = 1; + + // Opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify + // a root-certificate (validation context) or "example.com" to specify a certificate for a + // particular domain. Not all provider instances will actually use this field, so the value + // defaults to the empty string. + string certificate_name = 2; + } + + message CombinedCertificateValidationContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.CommonTlsContext.CombinedCertificateValidationContext"; + + // How to validate peer certificates. + CertificateValidationContext default_validation_context = 1 + [(validate.rules).message = {required: true}]; + + // Config for fetching validation context via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, + // or validation_context_certificate_provider_instance may be used. + SdsSecretConfig validation_context_sds_secret_config = 2 [ + (validate.rules).message = {required: true}, + (udpa.annotations.field_migrate).oneof_promotion = "dynamic_validation_context" + ]; + + // Certificate provider for fetching validation context. + // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, + // or validation_context_certificate_provider_instance may be used. + // [#not-implemented-hide:] + CertificateProvider validation_context_certificate_provider = 3 + [(udpa.annotations.field_migrate).oneof_promotion = "dynamic_validation_context"]; + + // Certificate provider instance for fetching validation context. + // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, + // or validation_context_certificate_provider_instance may be used. + // [#not-implemented-hide:] + CertificateProviderInstance validation_context_certificate_provider_instance = 4 + [(udpa.annotations.field_migrate).oneof_promotion = "dynamic_validation_context"]; + } + + reserved 5; + + // TLS protocol versions, cipher suites etc. + TlsParameters tls_params = 1; + + // :ref:`Multiple TLS certificates ` can be associated with the + // same context to allow both RSA and ECDSA certificates. + // + // Only a single TLS certificate is supported in client contexts. In server contexts, the first + // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is + // used for clients that support ECDSA. + repeated TlsCertificate tls_certificates = 2; + + // Configs for fetching TLS certificates via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 + [(validate.rules).repeated = {max_items: 1}]; + + // Certificate provider for fetching TLS certificates. + // [#not-implemented-hide:] + CertificateProvider tls_certificate_certificate_provider = 9; + + // Certificate provider instance for fetching TLS certificates. + // [#not-implemented-hide:] + CertificateProviderInstance tls_certificate_certificate_provider_instance = 11; + + oneof validation_context_type { + // How to validate peer certificates. + CertificateValidationContext validation_context = 3; + + // Config for fetching validation context via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + SdsSecretConfig validation_context_sds_secret_config = 7; + + // Combined certificate validation context holds a default CertificateValidationContext + // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic + // and default CertificateValidationContext are merged into a new CertificateValidationContext + // for validation. This merge is done by Message::MergeFrom(), so dynamic + // CertificateValidationContext overwrites singular fields in default + // CertificateValidationContext, and concatenates repeated fields to default + // CertificateValidationContext, and logical OR is applied to boolean fields. + CombinedCertificateValidationContext combined_validation_context = 8; + + // Certificate provider for fetching validation context. + // [#not-implemented-hide:] + CertificateProvider validation_context_certificate_provider = 10; + + // Certificate provider instance for fetching validation context. + // [#not-implemented-hide:] + CertificateProviderInstance validation_context_certificate_provider_instance = 12; + } + + // Supplies the list of ALPN protocols that the listener should expose. In + // practice this is likely to be set to one of two values (see the + // :ref:`codec_type + // ` + // parameter in the HTTP connection manager for more information): + // + // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. + // * "http/1.1" If the listener is only going to support HTTP/1.1. + // + // There is no default for this parameter. If empty, Envoy will not expose ALPN. + repeated string alpn_protocols = 4; +} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/BUILD index e56544584bfe2..5471fdfbe0b1a 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/BUILD +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/BUILD @@ -8,7 +8,8 @@ api_proto_package( deps = [ "//envoy/config/core/v4alpha:pkg", "//envoy/extensions/transport_sockets/tls/v3:pkg", - "//envoy/type/matcher/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/cert.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/common.proto similarity index 63% rename from generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/cert.proto rename to generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/common.proto index febb6d665240b..0b63ade128d3c 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/cert.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/common.proto @@ -3,11 +3,9 @@ syntax = "proto3"; package envoy.extensions.transport_sockets.tls.v4alpha; import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/config_source.proto"; -import "envoy/type/matcher/v3/string.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; @@ -17,7 +15,7 @@ import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha"; -option java_outer_classname = "CertProto"; +option java_outer_classname = "CommonProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; @@ -48,8 +46,8 @@ message TlsParameters { // servers. TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; - // Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and - // ``TLSv1_2`` for clients and for servers using :ref:`BoringSSL FIPS `. + // Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_3`` for + // servers. TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; // If specified, the TLS listener will only support the specified `cipher list @@ -300,7 +298,7 @@ message CertificateValidationContext { // Subject Alternative Name of the presented certificate matches one of the specified matches. // // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be - // configured with exact match type in the :ref:`string matcher `. + // configured with exact match type in the :ref:`string matcher `. // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", // it should be configured as shown below. // @@ -314,7 +312,7 @@ message CertificateValidationContext { // Subject Alternative Names are easily spoofable and verifying only them is insecure, // therefore this option must be used together with :ref:`trusted_ca // `. - repeated type.matcher.v3.StringMatcher match_subject_alt_names = 9; + repeated type.matcher.v4alpha.StringMatcher match_subject_alt_names = 9; // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. google.protobuf.BoolValue require_ocsp_staple = 5; @@ -336,183 +334,3 @@ message CertificateValidationContext { TrustChainVerification trust_chain_verification = 10 [(validate.rules).enum = {defined_only: true}]; } - -// TLS context shared by both client and server TLS contexts. -// [#next-free-field: 9] -message CommonTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext"; - - message CombinedCertificateValidationContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext." - "CombinedCertificateValidationContext"; - - // How to validate peer certificates. - CertificateValidationContext default_validation_context = 1 - [(validate.rules).message = {required: true}]; - - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 2 - [(validate.rules).message = {required: true}]; - } - - reserved 5; - - // TLS protocol versions, cipher suites etc. - TlsParameters tls_params = 1; - - // :ref:`Multiple TLS certificates ` can be associated with the - // same context to allow both RSA and ECDSA certificates. - // - // Only a single TLS certificate is supported in client contexts. In server contexts, the first - // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is - // used for clients that support ECDSA. - repeated TlsCertificate tls_certificates = 2; - - // Configs for fetching TLS certificates via SDS API. - repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 - [(validate.rules).repeated = {max_items: 1}]; - - oneof validation_context_type { - // How to validate peer certificates. - CertificateValidationContext validation_context = 3; - - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 7; - - // Combined certificate validation context holds a default CertificateValidationContext - // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic - // and default CertificateValidationContext are merged into a new CertificateValidationContext - // for validation. This merge is done by Message::MergeFrom(), so dynamic - // CertificateValidationContext overwrites singular fields in default - // CertificateValidationContext, and concatenates repeated fields to default - // CertificateValidationContext, and logical OR is applied to boolean fields. - CombinedCertificateValidationContext combined_validation_context = 8; - } - - // Supplies the list of ALPN protocols that the listener should expose. In - // practice this is likely to be set to one of two values (see the - // :ref:`codec_type - // ` - // parameter in the HTTP connection manager for more information): - // - // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. - // * "http/1.1" If the listener is only going to support HTTP/1.1. - // - // There is no default for this parameter. If empty, Envoy will not expose ALPN. - repeated string alpn_protocols = 4; -} - -message UpstreamTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext"; - - // Common TLS context settings. - // - // .. attention:: - // - // Server certificate verification is not enabled by default. Configure - // :ref:`trusted_ca` to enable - // verification. - CommonTlsContext common_tls_context = 1; - - // SNI string to use when creating TLS backend connections. - string sni = 2 [(validate.rules).string = {max_bytes: 255}]; - - // If true, server-initiated TLS renegotiation will be allowed. - // - // .. attention:: - // - // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. - bool allow_renegotiation = 3; - - // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets - // for TLSv1.2 and older) to store for the purpose of session resumption. - // - // Defaults to 1, setting this to 0 disables session resumption. - google.protobuf.UInt32Value max_session_keys = 4; -} - -// [#next-free-field: 8] -message DownstreamTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext"; - - // Common TLS context settings. - CommonTlsContext common_tls_context = 1; - - // If specified, Envoy will reject connections without a valid client - // certificate. - google.protobuf.BoolValue require_client_certificate = 2; - - // If specified, Envoy will reject connections without a valid and matching SNI. - // [#not-implemented-hide:] - google.protobuf.BoolValue require_sni = 3; - - oneof session_ticket_keys_type { - // TLS session ticket key settings. - TlsSessionTicketKeys session_ticket_keys = 4; - - // Config for fetching TLS session ticket keys via SDS API. - SdsSecretConfig session_ticket_keys_sds_secret_config = 5; - - // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS - // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. - // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using - // the keys specified through either :ref:`session_ticket_keys ` - // or :ref:`session_ticket_keys_sds_secret_config `. - // If this config is set to false and no keys are explicitly configured, the TLS server will issue - // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the - // implication that sessions cannot be resumed across hot restarts or on different hosts. - bool disable_stateless_session_resumption = 7; - } - - // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session - // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) - // ` - // only seconds could be specified (fractional seconds are going to be ignored). - google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { - lt {seconds: 4294967296} - gte {} - }]; -} - -message GenericSecret { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.GenericSecret"; - - // Secret of generic type and is available to filters. - config.core.v4alpha.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; -} - -message SdsSecretConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig"; - - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - // When both name and config are specified, then secret can be fetched and/or reloaded via - // SDS. When only name is specified, then secret will be loaded from static resources. - string name = 1; - - config.core.v4alpha.ConfigSource sds_config = 2; -} - -// [#next-free-field: 6] -message Secret { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.Secret"; - - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - string name = 1; - - oneof type { - TlsCertificate tls_certificate = 2; - - TlsSessionTicketKeys session_ticket_keys = 3; - - CertificateValidationContext validation_context = 4; - - GenericSecret generic_secret = 5; - } -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto new file mode 100644 index 0000000000000..11306f21415a3 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto @@ -0,0 +1,65 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tls.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/extensions/transport_sockets/tls/v4alpha/common.proto"; + +import "udpa/core/v1/resource_locator.proto"; + +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha"; +option java_outer_classname = "SecretProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Secrets configuration] + +message GenericSecret { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.GenericSecret"; + + // Secret of generic type and is available to filters. + config.core.v4alpha.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; +} + +message SdsSecretConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig"; + + oneof name_specifier { + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + // When both name and config are specified, then secret can be fetched and/or reloaded via + // SDS. When only name is specified, then secret will be loaded from static resources. + string name = 1; + + // Resource locator for SDS. This is mutually exclusive to *name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator sds_resource_locator = 3; + } + + config.core.v4alpha.ConfigSource sds_config = 2; +} + +// [#next-free-field: 6] +message Secret { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.Secret"; + + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + string name = 1; + + oneof type { + TlsCertificate tls_certificate = 2; + + TlsSessionTicketKeys session_ticket_keys = 3; + + CertificateValidationContext validation_context = 4; + + GenericSecret generic_secret = 5; + } +} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto new file mode 100644 index 0000000000000..a73ba6e002ba2 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto @@ -0,0 +1,246 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tls.v4alpha; + +import "envoy/config/core/v4alpha/extension.proto"; +import "envoy/extensions/transport_sockets/tls/v4alpha/common.proto"; +import "envoy/extensions/transport_sockets/tls/v4alpha/secret.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha"; +option java_outer_classname = "TlsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: TLS transport socket] +// [#extension: envoy.transport_sockets.tls] +// The TLS contexts below provide the transport socket configuration for upstream/downstream TLS. + +message UpstreamTlsContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext"; + + // Common TLS context settings. + // + // .. attention:: + // + // Server certificate verification is not enabled by default. Configure + // :ref:`trusted_ca` to enable + // verification. + CommonTlsContext common_tls_context = 1; + + // SNI string to use when creating TLS backend connections. + string sni = 2 [(validate.rules).string = {max_bytes: 255}]; + + // If true, server-initiated TLS renegotiation will be allowed. + // + // .. attention:: + // + // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. + bool allow_renegotiation = 3; + + // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets + // for TLSv1.2 and older) to store for the purpose of session resumption. + // + // Defaults to 1, setting this to 0 disables session resumption. + google.protobuf.UInt32Value max_session_keys = 4; +} + +// [#next-free-field: 8] +message DownstreamTlsContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext"; + + // Common TLS context settings. + CommonTlsContext common_tls_context = 1; + + // If specified, Envoy will reject connections without a valid client + // certificate. + google.protobuf.BoolValue require_client_certificate = 2; + + // If specified, Envoy will reject connections without a valid and matching SNI. + // [#not-implemented-hide:] + google.protobuf.BoolValue require_sni = 3; + + oneof session_ticket_keys_type { + // TLS session ticket key settings. + TlsSessionTicketKeys session_ticket_keys = 4; + + // Config for fetching TLS session ticket keys via SDS API. + SdsSecretConfig session_ticket_keys_sds_secret_config = 5; + + // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS + // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. + // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using + // the keys specified through either :ref:`session_ticket_keys ` + // or :ref:`session_ticket_keys_sds_secret_config `. + // If this config is set to false and no keys are explicitly configured, the TLS server will issue + // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the + // implication that sessions cannot be resumed across hot restarts or on different hosts. + bool disable_stateless_session_resumption = 7; + } + + // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session + // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) + // ` + // only seconds could be specified (fractional seconds are going to be ignored). + google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { + lt {seconds: 4294967296} + gte {} + }]; +} + +// TLS context shared by both client and server TLS contexts. +// [#next-free-field: 13] +message CommonTlsContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext"; + + // Config for Certificate provider to get certificates. This provider should allow certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + message CertificateProvider { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider"; + + // opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify + // a root-certificate (validation context) or "TLS" to specify a new tls-certificate. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Provider specific config. + // Note: an implementation is expected to dedup multiple instances of the same config + // to maintain a single certificate-provider instance. The sharing can happen, for + // example, among multiple clusters or between the tls_certificate and validation_context + // certificate providers of a cluster. + // This config could be supplied inline or (in future) a named xDS resource. + oneof config { + option (validate.required) = true; + + config.core.v4alpha.TypedExtensionConfig typed_config = 2; + } + } + + // Similar to CertificateProvider above, but allows the provider instances to be configured on + // the client side instead of being sent from the control plane. + message CertificateProviderInstance { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance"; + + // Provider instance name. This name must be defined in the client's configuration (e.g., a + // bootstrap file) to correspond to a provider instance (i.e., the same data in the typed_config + // field that would be sent in the CertificateProvider message if the config was sent by the + // control plane). If not present, defaults to "default". + // + // Instance names should generally be defined not in terms of the underlying provider + // implementation (e.g., "file_watcher") but rather in terms of the function of the + // certificates (e.g., "foo_deployment_identity"). + string instance_name = 1; + + // Opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify + // a root-certificate (validation context) or "example.com" to specify a certificate for a + // particular domain. Not all provider instances will actually use this field, so the value + // defaults to the empty string. + string certificate_name = 2; + } + + message CombinedCertificateValidationContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext." + "CombinedCertificateValidationContext"; + + // How to validate peer certificates. + CertificateValidationContext default_validation_context = 1 + [(validate.rules).message = {required: true}]; + + oneof dynamic_validation_context { + // Config for fetching validation context via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, + // or validation_context_certificate_provider_instance may be used. + SdsSecretConfig validation_context_sds_secret_config = 2 + [(validate.rules).message = {required: true}]; + + // Certificate provider for fetching validation context. + // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, + // or validation_context_certificate_provider_instance may be used. + // [#not-implemented-hide:] + CertificateProvider validation_context_certificate_provider = 3; + + // Certificate provider instance for fetching validation context. + // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, + // or validation_context_certificate_provider_instance may be used. + // [#not-implemented-hide:] + CertificateProviderInstance validation_context_certificate_provider_instance = 4; + } + } + + reserved 5; + + // TLS protocol versions, cipher suites etc. + TlsParameters tls_params = 1; + + // :ref:`Multiple TLS certificates ` can be associated with the + // same context to allow both RSA and ECDSA certificates. + // + // Only a single TLS certificate is supported in client contexts. In server contexts, the first + // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is + // used for clients that support ECDSA. + repeated TlsCertificate tls_certificates = 2; + + // Configs for fetching TLS certificates via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 + [(validate.rules).repeated = {max_items: 1}]; + + // Certificate provider for fetching TLS certificates. + // [#not-implemented-hide:] + CertificateProvider tls_certificate_certificate_provider = 9; + + // Certificate provider instance for fetching TLS certificates. + // [#not-implemented-hide:] + CertificateProviderInstance tls_certificate_certificate_provider_instance = 11; + + oneof validation_context_type { + // How to validate peer certificates. + CertificateValidationContext validation_context = 3; + + // Config for fetching validation context via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + SdsSecretConfig validation_context_sds_secret_config = 7; + + // Combined certificate validation context holds a default CertificateValidationContext + // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic + // and default CertificateValidationContext are merged into a new CertificateValidationContext + // for validation. This merge is done by Message::MergeFrom(), so dynamic + // CertificateValidationContext overwrites singular fields in default + // CertificateValidationContext, and concatenates repeated fields to default + // CertificateValidationContext, and logical OR is applied to boolean fields. + CombinedCertificateValidationContext combined_validation_context = 8; + + // Certificate provider for fetching validation context. + // [#not-implemented-hide:] + CertificateProvider validation_context_certificate_provider = 10; + + // Certificate provider instance for fetching validation context. + // [#not-implemented-hide:] + CertificateProviderInstance validation_context_certificate_provider_instance = 12; + } + + // Supplies the list of ALPN protocols that the listener should expose. In + // practice this is likely to be set to one of two values (see the + // :ref:`codec_type + // ` + // parameter in the HTTP connection manager for more information): + // + // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. + // * "http/1.1" If the listener is only going to support HTTP/1.1. + // + // There is no default for this parameter. If empty, Envoy will not expose ALPN. + repeated string alpn_protocols = 4; +} diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/BUILD b/generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/BUILD new file mode 100644 index 0000000000000..ef3541ebcb1df --- /dev/null +++ b/generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto b/generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto new file mode 100644 index 0000000000000..c6b02364aa2d2 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; + +package envoy.extensions.upstreams.http.generic.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.generic.v3"; +option java_outer_classname = "GenericConnectionPoolProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Generic Connection Pool] + +// A connection pool which forwards downstream HTTP as TCP or HTTP to upstream, +// based on CONNECT configuration. +// [#extension: envoy.upstreams.http.generic] +message GenericConnectionPoolProto { +} diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/http/v3/BUILD b/generated_api_shadow/envoy/extensions/upstreams/http/http/v3/BUILD new file mode 100644 index 0000000000000..ef3541ebcb1df --- /dev/null +++ b/generated_api_shadow/envoy/extensions/upstreams/http/http/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto b/generated_api_shadow/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto new file mode 100644 index 0000000000000..e4c2d6ff9b84f --- /dev/null +++ b/generated_api_shadow/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package envoy.extensions.upstreams.http.http.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.http.v3"; +option java_outer_classname = "HttpConnectionPoolProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Http Connection Pool] + +// A connection pool which forwards downstream HTTP as HTTP to upstream. +// [#extension: envoy.upstreams.http.http] +message HttpConnectionPoolProto { +} diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/BUILD b/generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/BUILD new file mode 100644 index 0000000000000..ef3541ebcb1df --- /dev/null +++ b/generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto b/generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto new file mode 100644 index 0000000000000..5bc8734cb3f79 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package envoy.extensions.upstreams.http.tcp.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.tcp.v3"; +option java_outer_classname = "TcpConnectionPoolProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Tcp Connection Pool] + +// A connection pool which forwards downstream HTTP as TCP to upstream, +// [#extension: envoy.upstreams.http.tcp] +message TcpConnectionPoolProto { +} diff --git a/generated_api_shadow/envoy/extensions/wasm/v3/BUILD b/generated_api_shadow/envoy/extensions/wasm/v3/BUILD index d29790ff5e75b..2c3dad6453b65 100644 --- a/generated_api_shadow/envoy/extensions/wasm/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/wasm/v3/BUILD @@ -7,7 +7,6 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", - "//envoy/config/wasm/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto b/generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto index 16cae01897e03..26f458214466e 100644 --- a/generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto +++ b/generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto @@ -15,14 +15,12 @@ option java_outer_classname = "WasmProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; -// [#protodoc-title: Wasm service] +// [#protodoc-title: Wasm] +// [[#not-implemented-hide:] // Configuration for a Wasm VM. -// [#next-free-field: 6] -// [#not-implemented-hide:] pending implementation. +// [#next-free-field: 7] message VmConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.wasm.v2alpha.VmConfig"; - // An ID which will be used along with a hash of the wasm code (or the name of the registered Null // VM plugin) to determine which VM will be used for the plugin. All plugins which use the same // *vm_id* and code will use the same VM. May be left blank. Sharing a VM between plugins can @@ -36,51 +34,62 @@ message VmConfig { // The Wasm code that Envoy will execute. config.core.v3.AsyncDataSource code = 3; - // The Wasm configuration used in initialization of a new VM (proxy_on_start). + // The Wasm configuration used in initialization of a new VM + // (proxy_on_start). `google.protobuf.Struct` is serialized as JSON before + // passing it to the plugin. `google.protobuf.BytesValue` and + // `google.protobuf.StringValue` are passed directly without the wrapper. google.protobuf.Any configuration = 4; // Allow the wasm file to include pre-compiled code on VMs which support it. // Warning: this should only be enable for trusted sources as the precompiled code is not // verified. bool allow_precompiled = 5; + + // If true and the code needs to be remotely fetched and it is not in the cache then NACK the configuration + // update and do a background fetch to fill the cache, otherwise fetch the code asynchronously and enter + // warming state. + bool nack_on_code_cache_miss = 6; } +// [[#not-implemented-hide:] // Base Configuration for Wasm Plugins e.g. filters and services. // [#next-free-field: 6] -// [#not-implemented-hide:] pending implementation. message PluginConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.wasm.v2alpha.PluginConfig"; - // A unique name for a filters/services in a VM for use in identifying the filter/service if - // multiple filters/services are handled by the same *vm_id* and *group_name* and for + // multiple filters/services are handled by the same *vm_id* and *root_id* and for // logging/debugging. string name = 1; // A unique ID for a set of filters/services in a VM which will share a RootContext and Contexts // if applicable (e.g. an Wasm HttpFilter and an Wasm AccessLog). If left blank, all - // filters/services with a blank group_name with the same *vm_id* will share Context(s). - string group_name = 2; - - google.protobuf.Any configuration = 5; - // In the future add referential VM configurations. + // filters/services with a blank root_id with the same *vm_id* will share Context(s). + string root_id = 2; // Configuration for finding or starting VM. oneof vm_config { - // Filter/service configuration used to configure or reconfigure a plugin - // (proxy_on_configuration). VmConfig inline_vm_config = 3; + // In the future add referential VM configurations. } + + // Filter/service configuration used to configure or reconfigure a plugin + // (proxy_on_configuration). + // `google.protobuf.Struct` is serialized as JSON before + // passing it to the plugin. `google.protobuf.BytesValue` and + // `google.protobuf.StringValue` are passed directly without the wrapper. + google.protobuf.Any configuration = 4; + + // If there is a fatal error on the VM (e.g. exception, abort(), on_start or on_configure return false), + // then all plugins associated with the VM will either fail closed (by default), e.g. by returning an HTTP 503 error, + // or fail open (if 'fail_open' is set to true) by bypassing the filter. Note: when on_start or on_configure return false + // during xDS updates the xDS configuration will be rejected and when on_start or on_configuration return false on initial + // startup the proxy will not start. + bool fail_open = 5; } -// WasmService is configured as a built-in *envoy.wasm_service* :ref:`ServiceConfig -// `. This opaque configuration will be used to -// create a Wasm Service. -// [#not-implemented-hide:] pending implementation. +// [[#not-implemented-hide:] +// WasmService is configured as a built-in *envoy.wasm_service* :ref:`WasmService +// ` This opaque configuration will be used to create a Wasm Service. message WasmService { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.wasm.v2alpha.WasmService"; - // General plugin configuration. PluginConfig config = 1; diff --git a/generated_api_shadow/envoy/service/auth/v2alpha/BUILD b/generated_api_shadow/envoy/service/auth/v2alpha/BUILD index 0bd31fdc6ff8f..c75dabe1a8a00 100644 --- a/generated_api_shadow/envoy/service/auth/v2alpha/BUILD +++ b/generated_api_shadow/envoy/service/auth/v2alpha/BUILD @@ -1,9 +1,9 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + licenses(["notice"]) # Apache 2 # DO NOT EDIT. This file is generated by tools/proto_sync.py. -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - api_proto_package( has_services = True, deps = ["//envoy/service/auth/v2:pkg"], diff --git a/generated_api_shadow/envoy/service/auth/v3/external_auth.proto b/generated_api_shadow/envoy/service/auth/v3/external_auth.proto index b93b61a3bde95..e2ee274fdfdbe 100644 --- a/generated_api_shadow/envoy/service/auth/v3/external_auth.proto +++ b/generated_api_shadow/envoy/service/auth/v3/external_auth.proto @@ -6,6 +6,7 @@ import "envoy/config/core/v3/base.proto"; import "envoy/service/auth/v3/attribute_context.proto"; import "envoy/type/v3/http_status.proto"; +import "google/protobuf/struct.proto"; import "google/rpc/status.proto"; import "udpa/annotations/status.proto"; @@ -57,7 +58,7 @@ message DeniedHttpResponse { string body = 3; } -// HTTP attributes for an ok response. +// HTTP attributes for an OK response. message OkHttpResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v2.OkHttpResponse"; @@ -69,6 +70,14 @@ message OkHttpResponse { // by Leaving `append` as false, the filter will either add a new header, or override an existing // one if there is a match. repeated config.core.v3.HeaderValueOption headers = 2; + + // Optional response metadata that will be emitted as dynamic metadata to be consumed by the next + // filter. This metadata lives in a namespace specified by the canonical name of extension filter + // that requires it: + // + // - :ref:`envoy.filters.http.ext_authz ` for HTTP filter. + // - :ref:`envoy.filters.network.ext_authz ` for network filter. + google.protobuf.Struct dynamic_metadata = 3; } // Intended for gRPC and Network Authorization servers `only`. diff --git a/generated_api_shadow/envoy/service/discovery/v3/BUILD b/generated_api_shadow/envoy/service/discovery/v3/BUILD index bfe0abc351dfa..d74aebc3424bc 100644 --- a/generated_api_shadow/envoy/service/discovery/v3/BUILD +++ b/generated_api_shadow/envoy/service/discovery/v3/BUILD @@ -11,5 +11,6 @@ api_proto_package( "//envoy/config/core/v3:pkg", "//envoy/service/discovery/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/generated_api_shadow/envoy/service/discovery/v3/discovery.proto b/generated_api_shadow/envoy/service/discovery/v3/discovery.proto index b8e31160a88b8..40479539213cf 100644 --- a/generated_api_shadow/envoy/service/discovery/v3/discovery.proto +++ b/generated_api_shadow/envoy/service/discovery/v3/discovery.proto @@ -7,6 +7,10 @@ import "envoy/config/core/v3/base.proto"; import "google/protobuf/any.proto"; import "google/rpc/status.proto"; +import "udpa/core/v1/resource_locator.proto"; +import "udpa/core/v1/resource_name.proto"; + +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -140,7 +144,7 @@ message DiscoveryResponse { // In particular, initial_resource_versions being sent at the "start" of every // gRPC stream actually entails a message for each type_url, each with its own // initial_resource_versions. -// [#next-free-field: 8] +// [#next-free-field: 10] message DeltaDiscoveryRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.DeltaDiscoveryRequest"; @@ -148,7 +152,9 @@ message DeltaDiscoveryRequest { config.core.v3.Node node = 1; // Type of the resource that is being requested, e.g. - // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". + // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This does not need to be set if + // resources are only referenced via *udpa_resource_subscribe* and + // *udpa_resources_unsubscribe*. string type_url = 2; // DeltaDiscoveryRequests allow the client to add or remove individual @@ -174,9 +180,22 @@ message DeltaDiscoveryRequest { // A list of Resource names to add to the list of tracked resources. repeated string resource_names_subscribe = 3; + // As with *resource_names_subscribe* but used when subscribing to resources indicated + // by a *udpa.core.v1.ResourceLocator*. The directives in the resource locator + // are ignored and the context parameters are matched with + // *context_param_specifier* specific semantics. + // [#not-implemented-hide:] + repeated udpa.core.v1.ResourceLocator udpa_resources_subscribe = 8; + // A list of Resource names to remove from the list of tracked resources. repeated string resource_names_unsubscribe = 4; + // As with *resource_names_unsubscribe* but used when unsubscribing to resources indicated by a + // *udpa.core.v1.ResourceLocator*. This must match a previously subscribed + // resource locator provided in *udpa_resources_subscribe*. + // [#not-implemented-hide:] + repeated udpa.core.v1.ResourceLocator udpa_resources_unsubscribe = 9; + // Informs the server of the versions of the resources the xDS client knows of, to enable the // client to continue the same logical xDS session even in the face of gRPC stream reconnection. // It will not be populated: [1] in the very first stream of a session, since the client will @@ -199,7 +218,7 @@ message DeltaDiscoveryRequest { google.rpc.Status error_detail = 7; } -// [#next-free-field: 7] +// [#next-free-field: 8] message DeltaDiscoveryResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.DeltaDiscoveryResponse"; @@ -215,22 +234,34 @@ message DeltaDiscoveryResponse { // Type URL for resources. Identifies the xDS API when muxing over ADS. // Must be consistent with the type_url in the Any within 'resources' if 'resources' is non-empty. + // This does not need to be set if *udpa_removed_resources* is used instead of + // *removed_resources*. string type_url = 4; // Resources names of resources that have be deleted and to be removed from the xDS Client. // Removed resources for missing resources can be ignored. repeated string removed_resources = 6; + // As with *removed_resources* but used when a removed resource was named in + // its *Resource*s with a *udpa.core.v1.ResourceName*. + // [#not-implemented-hide:] + repeated udpa.core.v1.ResourceName udpa_removed_resources = 7; + // The nonce provides a way for DeltaDiscoveryRequests to uniquely // reference a DeltaDiscoveryResponse when (N)ACKing. The nonce is required. string nonce = 5; } +// [#next-free-field: 6] message Resource { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Resource"; // The resource's name, to distinguish it from others of the same type of resource. - string name = 3; + string name = 3 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + + // Used instead of *name* when a resource with a *udpa.core.v1.ResourceName* is delivered. + udpa.core.v1.ResourceName udpa_resource_name = 5 + [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; // The aliases are a list of other names that this resource can go by. repeated string aliases = 4; diff --git a/generated_api_shadow/envoy/service/extension/v3/BUILD b/generated_api_shadow/envoy/service/extension/v3/BUILD new file mode 100644 index 0000000000000..6c68a071b8731 --- /dev/null +++ b/generated_api_shadow/envoy/service/extension/v3/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + has_services = True, + deps = [ + "//envoy/annotations:pkg", + "//envoy/service/discovery/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/service/extension/v3/config_discovery.proto b/generated_api_shadow/envoy/service/extension/v3/config_discovery.proto new file mode 100644 index 0000000000000..652355b707e3d --- /dev/null +++ b/generated_api_shadow/envoy/service/extension/v3/config_discovery.proto @@ -0,0 +1,44 @@ +syntax = "proto3"; + +package envoy.service.extension.v3; + +import "envoy/service/discovery/v3/discovery.proto"; + +import "google/api/annotations.proto"; + +import "envoy/annotations/resource.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.service.extension.v3"; +option java_outer_classname = "ConfigDiscoveryProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: ExtensionConfigDS] + +// Return extension configurations. +service ExtensionConfigDiscoveryService { + option (envoy.annotations.resource).type = "envoy.config.core.v3.TypedExtensionConfig"; + + rpc StreamExtensionConfigs(stream discovery.v3.DiscoveryRequest) + returns (stream discovery.v3.DiscoveryResponse) { + } + + rpc DeltaExtensionConfigs(stream discovery.v3.DeltaDiscoveryRequest) + returns (stream discovery.v3.DeltaDiscoveryResponse) { + } + + rpc FetchExtensionConfigs(discovery.v3.DiscoveryRequest) + returns (discovery.v3.DiscoveryResponse) { + option (google.api.http).post = "/v3/discovery:extension_configs"; + option (google.api.http).body = "*"; + } +} + +// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue +// with importing services: https://github.com/google/protobuf/issues/4221 and +// protoxform to upgrade the file. +message EcdsDummy { +} diff --git a/generated_api_shadow/envoy/service/health/v3/hds.proto b/generated_api_shadow/envoy/service/health/v3/hds.proto index 0b09134709c82..484c0477ae466 100644 --- a/generated_api_shadow/envoy/service/health/v3/hds.proto +++ b/generated_api_shadow/envoy/service/health/v3/hds.proto @@ -9,6 +9,7 @@ import "envoy/config/endpoint/v3/endpoint_components.proto"; import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -108,11 +109,32 @@ message EndpointHealth { config.core.v3.HealthStatus health_status = 2; } +// Group endpoint health by locality under each cluster. +message LocalityEndpointsHealth { + config.core.v3.Locality locality = 1; + + repeated EndpointHealth endpoints_health = 2; +} + +// The health status of endpoints in a cluster. The cluster name and locality +// should match the corresponding fields in ClusterHealthCheck message. +message ClusterEndpointsHealth { + string cluster_name = 1; + + repeated LocalityEndpointsHealth locality_endpoints_health = 2; +} + message EndpointHealthResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.discovery.v2.EndpointHealthResponse"; + // [#comment:TODO(drewsortega): add deprecate annotation once cluster_endpoints_health is implemented] + // Deprecated - Flat list of endpoint health information. repeated EndpointHealth endpoints_health = 1; + + // [#not-implemented-hide:] + // Organize Endpoint health information by cluster. + repeated ClusterEndpointsHealth cluster_endpoints_health = 2; } message HealthCheckRequestOrEndpointHealthResponse { diff --git a/generated_api_shadow/envoy/service/health/v4alpha/BUILD b/generated_api_shadow/envoy/service/health/v4alpha/BUILD new file mode 100644 index 0000000000000..b7b2a13bd4958 --- /dev/null +++ b/generated_api_shadow/envoy/service/health/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + has_services = True, + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/endpoint/v3:pkg", + "//envoy/service/health/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/service/health/v4alpha/hds.proto b/generated_api_shadow/envoy/service/health/v4alpha/hds.proto new file mode 100644 index 0000000000000..957f058b9c576 --- /dev/null +++ b/generated_api_shadow/envoy/service/health/v4alpha/hds.proto @@ -0,0 +1,187 @@ +syntax = "proto3"; + +package envoy.service.health.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/health_check.proto"; +import "envoy/config/endpoint/v3/endpoint_components.proto"; + +import "google/api/annotations.proto"; +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.service.health.v4alpha"; +option java_outer_classname = "HdsProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Health Discovery Service (HDS)] + +// HDS is Health Discovery Service. It compliments Envoy’s health checking +// service by designating this Envoy to be a healthchecker for a subset of hosts +// in the cluster. The status of these health checks will be reported to the +// management server, where it can be aggregated etc and redistributed back to +// Envoy through EDS. +service HealthDiscoveryService { + // 1. Envoy starts up and if its can_healthcheck option in the static + // bootstrap config is enabled, sends HealthCheckRequest to the management + // server. It supplies its capabilities (which protocol it can health check + // with, what zone it resides in, etc.). + // 2. In response to (1), the management server designates this Envoy as a + // healthchecker to health check a subset of all upstream hosts for a given + // cluster (for example upstream Host 1 and Host 2). It streams + // HealthCheckSpecifier messages with cluster related configuration for all + // clusters this Envoy is designated to health check. Subsequent + // HealthCheckSpecifier message will be sent on changes to: + // a. Endpoints to health checks + // b. Per cluster configuration change + // 3. Envoy creates a health probe based on the HealthCheck config and sends + // it to endpoint(ip:port) of Host 1 and 2. Based on the HealthCheck + // configuration Envoy waits upon the arrival of the probe response and + // looks at the content of the response to decide whether the endpoint is + // healthy or not. If a response hasn't been received within the timeout + // interval, the endpoint health status is considered TIMEOUT. + // 4. Envoy reports results back in an EndpointHealthResponse message. + // Envoy streams responses as often as the interval configured by the + // management server in HealthCheckSpecifier. + // 5. The management Server collects health statuses for all endpoints in the + // cluster (for all clusters) and uses this information to construct + // EndpointDiscoveryResponse messages. + // 6. Once Envoy has a list of upstream endpoints to send traffic to, it load + // balances traffic to them without additional health checking. It may + // use inline healthcheck (i.e. consider endpoint UNHEALTHY if connection + // failed to a particular endpoint to account for health status propagation + // delay between HDS and EDS). + // By default, can_healthcheck is true. If can_healthcheck is false, Cluster + // configuration may not contain HealthCheck message. + // TODO(htuch): How is can_healthcheck communicated to CDS to ensure the above + // invariant? + // TODO(htuch): Add @amb67's diagram. + rpc StreamHealthCheck(stream HealthCheckRequestOrEndpointHealthResponse) + returns (stream HealthCheckSpecifier) { + } + + // TODO(htuch): Unlike the gRPC version, there is no stream-based binding of + // request/response. Should we add an identifier to the HealthCheckSpecifier + // to bind with the response? + rpc FetchHealthCheck(HealthCheckRequestOrEndpointHealthResponse) returns (HealthCheckSpecifier) { + option (google.api.http).post = "/v3/discovery:health_check"; + option (google.api.http).body = "*"; + } +} + +// Defines supported protocols etc, so the management server can assign proper +// endpoints to healthcheck. +message Capability { + option (udpa.annotations.versioning).previous_message_type = "envoy.service.health.v3.Capability"; + + // Different Envoy instances may have different capabilities (e.g. Redis) + // and/or have ports enabled for different protocols. + enum Protocol { + HTTP = 0; + TCP = 1; + REDIS = 2; + } + + repeated Protocol health_check_protocols = 1; +} + +message HealthCheckRequest { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.HealthCheckRequest"; + + config.core.v4alpha.Node node = 1; + + Capability capability = 2; +} + +message EndpointHealth { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.EndpointHealth"; + + config.endpoint.v3.Endpoint endpoint = 1; + + config.core.v4alpha.HealthStatus health_status = 2; +} + +// Group endpoint health by locality under each cluster. +message LocalityEndpointsHealth { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.LocalityEndpointsHealth"; + + config.core.v4alpha.Locality locality = 1; + + repeated EndpointHealth endpoints_health = 2; +} + +// The health status of endpoints in a cluster. The cluster name and locality +// should match the corresponding fields in ClusterHealthCheck message. +message ClusterEndpointsHealth { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.ClusterEndpointsHealth"; + + string cluster_name = 1; + + repeated LocalityEndpointsHealth locality_endpoints_health = 2; +} + +message EndpointHealthResponse { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.EndpointHealthResponse"; + + // [#comment:TODO(drewsortega): add deprecate annotation once cluster_endpoints_health is implemented] + // Deprecated - Flat list of endpoint health information. + repeated EndpointHealth endpoints_health = 1; + + // [#not-implemented-hide:] + // Organize Endpoint health information by cluster. + repeated ClusterEndpointsHealth cluster_endpoints_health = 2; +} + +message HealthCheckRequestOrEndpointHealthResponse { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.HealthCheckRequestOrEndpointHealthResponse"; + + oneof request_type { + HealthCheckRequest health_check_request = 1; + + EndpointHealthResponse endpoint_health_response = 2; + } +} + +message LocalityEndpoints { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.LocalityEndpoints"; + + config.core.v4alpha.Locality locality = 1; + + repeated config.endpoint.v3.Endpoint endpoints = 2; +} + +// The cluster name and locality is provided to Envoy for the endpoints that it +// health checks to support statistics reporting, logging and debugging by the +// Envoy instance (outside of HDS). For maximum usefulness, it should match the +// same cluster structure as that provided by EDS. +message ClusterHealthCheck { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.ClusterHealthCheck"; + + string cluster_name = 1; + + repeated config.core.v4alpha.HealthCheck health_checks = 2; + + repeated LocalityEndpoints locality_endpoints = 3; +} + +message HealthCheckSpecifier { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.HealthCheckSpecifier"; + + repeated ClusterHealthCheck cluster_health_checks = 1; + + // The default is 1 second. + google.protobuf.Duration interval = 2; +} diff --git a/generated_api_shadow/envoy/service/load_stats/v2/lrs.proto b/generated_api_shadow/envoy/service/load_stats/v2/lrs.proto index a71039e7ceeb0..d8707bd62cb2a 100644 --- a/generated_api_shadow/envoy/service/load_stats/v2/lrs.proto +++ b/generated_api_shadow/envoy/service/load_stats/v2/lrs.proto @@ -66,7 +66,13 @@ message LoadStatsRequest { // [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. message LoadStatsResponse { // Clusters to report stats for. - repeated string clusters = 1 [(validate.rules).repeated = {min_items: 1}]; + // Not populated if *send_all_clusters* is true. + repeated string clusters = 1; + + // If true, the client should send all clusters it knows about. + // Only clients that advertise the "envoy.lrs.supports_send_all_clusters" capability in their + // :ref:`client_features` field will honor this field. + bool send_all_clusters = 4; // The minimum interval of time to collect stats over. This is only a minimum for two reasons: // 1. There may be some delay from when the timer fires until stats sampling occurs. diff --git a/generated_api_shadow/envoy/service/load_stats/v3/lrs.proto b/generated_api_shadow/envoy/service/load_stats/v3/lrs.proto index ce48574826a90..76705ba77771e 100644 --- a/generated_api_shadow/envoy/service/load_stats/v3/lrs.proto +++ b/generated_api_shadow/envoy/service/load_stats/v3/lrs.proto @@ -17,7 +17,15 @@ option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; -// [#protodoc-title: Load reporting service] +// [#protodoc-title: Load Reporting service (LRS)] + +// Load Reporting Service is an Envoy API to emit load reports. Envoy will initiate a bi-directional +// stream with a management server. Upon connecting, the management server can send a +// :ref:`LoadStatsResponse ` to a node it is +// interested in getting the load reports for. Envoy in this node will start sending +// :ref:`LoadStatsRequest `. This is done periodically +// based on the :ref:`load reporting interval ` +// For details, take a look at the :ref:`Load Reporting Service sandbox example `. service LoadReportingService { // Advanced API to allow for multi-dimensional load balancing by remote @@ -53,7 +61,6 @@ service LoadReportingService { } // A load report Envoy sends to the management server. -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. message LoadStatsRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.service.load_stats.v2.LoadStatsRequest"; @@ -67,15 +74,21 @@ message LoadStatsRequest { // The management server sends envoy a LoadStatsResponse with all clusters it // is interested in learning load stats about. -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. message LoadStatsResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.load_stats.v2.LoadStatsResponse"; // Clusters to report stats for. - repeated string clusters = 1 [(validate.rules).repeated = {min_items: 1}]; + // Not populated if *send_all_clusters* is true. + repeated string clusters = 1; + + // If true, the client should send all clusters it knows about. + // Only clients that advertise the "envoy.lrs.supports_send_all_clusters" capability in their + // :ref:`client_features` field will honor this field. + bool send_all_clusters = 4; // The minimum interval of time to collect stats over. This is only a minimum for two reasons: + // // 1. There may be some delay from when the timer fires until stats sampling occurs. // 2. For clusters that were already feature in the previous *LoadStatsResponse*, any traffic // that is observed in between the corresponding previous *LoadStatsRequest* and this diff --git a/generated_api_shadow/envoy/service/ratelimit/v3/rls.proto b/generated_api_shadow/envoy/service/ratelimit/v3/rls.proto index 4aad42fcaa813..42f24cfb0805c 100644 --- a/generated_api_shadow/envoy/service/ratelimit/v3/rls.proto +++ b/generated_api_shadow/envoy/service/ratelimit/v3/rls.proto @@ -5,6 +5,8 @@ package envoy.service.ratelimit.v3; import "envoy/config/core/v3/base.proto"; import "envoy/extensions/common/ratelimit/v3/ratelimit.proto"; +import "google/protobuf/duration.proto"; + import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -69,6 +71,8 @@ message RateLimitResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.ratelimit.v2.RateLimitResponse.RateLimit"; + // Identifies the unit of of time for rate limit. + // [#comment: replace by envoy/type/v3/ratelimit_unit.proto in v4] enum Unit { // The time unit is not known. UNKNOWN = 0; @@ -108,6 +112,9 @@ message RateLimitResponse { // The limit remaining in the current time unit. uint32 limit_remaining = 3; + + // Duration until reset of the current limit window. + google.protobuf.Duration duration_until_reset = 4; } // The overall response code which takes into account all of the descriptors that were passed diff --git a/generated_api_shadow/envoy/service/status/v3/csds.proto b/generated_api_shadow/envoy/service/status/v3/csds.proto index 3347def21d8f9..beccfb8cb58ee 100644 --- a/generated_api_shadow/envoy/service/status/v3/csds.proto +++ b/generated_api_shadow/envoy/service/status/v3/csds.proto @@ -64,7 +64,7 @@ message ClientStatusRequest { } // Detailed config (per xDS) with status. -// [#next-free-field: 6] +// [#next-free-field: 7] message PerXdsConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.service.status.v2.PerXdsConfig"; @@ -79,6 +79,9 @@ message PerXdsConfig { admin.v3.RoutesConfigDump route_config = 4; admin.v3.ScopedRoutesConfigDump scoped_route_config = 5; + + // [#not-implemented-hide:] + admin.v3.EndpointsConfigDump endpoint_config = 6; } } diff --git a/generated_api_shadow/envoy/service/status/v4alpha/BUILD b/generated_api_shadow/envoy/service/status/v4alpha/BUILD new file mode 100644 index 0000000000000..fb238648fbca1 --- /dev/null +++ b/generated_api_shadow/envoy/service/status/v4alpha/BUILD @@ -0,0 +1,16 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + has_services = True, + deps = [ + "//envoy/admin/v4alpha:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/service/status/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/service/status/v4alpha/csds.proto b/generated_api_shadow/envoy/service/status/v4alpha/csds.proto new file mode 100644 index 0000000000000..2286eb94a8a73 --- /dev/null +++ b/generated_api_shadow/envoy/service/status/v4alpha/csds.proto @@ -0,0 +1,105 @@ +syntax = "proto3"; + +package envoy.service.status.v4alpha; + +import "envoy/admin/v4alpha/config_dump.proto"; +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/type/matcher/v4alpha/node.proto"; + +import "google/api/annotations.proto"; +import "google/protobuf/struct.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.service.status.v4alpha"; +option java_outer_classname = "CsdsProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Client Status Discovery Service (CSDS)] + +// CSDS is Client Status Discovery Service. It can be used to get the status of +// an xDS-compliant client from the management server's point of view. In the +// future, it can potentially be used as an interface to get the current +// state directly from the client. +service ClientStatusDiscoveryService { + rpc StreamClientStatus(stream ClientStatusRequest) returns (stream ClientStatusResponse) { + } + + rpc FetchClientStatus(ClientStatusRequest) returns (ClientStatusResponse) { + option (google.api.http).post = "/v3/discovery:client_status"; + option (google.api.http).body = "*"; + } +} + +// Status of a config. +enum ConfigStatus { + // Status info is not available/unknown. + UNKNOWN = 0; + + // Management server has sent the config to client and received ACK. + SYNCED = 1; + + // Config is not sent. + NOT_SENT = 2; + + // Management server has sent the config to client but hasn’t received + // ACK/NACK. + STALE = 3; + + // Management server has sent the config to client but received NACK. + ERROR = 4; +} + +// Request for client status of clients identified by a list of NodeMatchers. +message ClientStatusRequest { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.status.v3.ClientStatusRequest"; + + // Management server can use these match criteria to identify clients. + // The match follows OR semantics. + repeated type.matcher.v4alpha.NodeMatcher node_matchers = 1; +} + +// Detailed config (per xDS) with status. +// [#next-free-field: 7] +message PerXdsConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.status.v3.PerXdsConfig"; + + ConfigStatus status = 1; + + oneof per_xds_config { + admin.v4alpha.ListenersConfigDump listener_config = 2; + + admin.v4alpha.ClustersConfigDump cluster_config = 3; + + admin.v4alpha.RoutesConfigDump route_config = 4; + + admin.v4alpha.ScopedRoutesConfigDump scoped_route_config = 5; + + // [#not-implemented-hide:] + admin.v4alpha.EndpointsConfigDump endpoint_config = 6; + } +} + +// All xds configs for a particular client. +message ClientConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.status.v3.ClientConfig"; + + // Node for a particular client. + config.core.v4alpha.Node node = 1; + + repeated PerXdsConfig xds_config = 2; +} + +message ClientStatusResponse { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.status.v3.ClientStatusResponse"; + + // Client configs for the clients specified in the ClientStatusRequest. + repeated ClientConfig config = 1; +} diff --git a/generated_api_shadow/envoy/service/tap/v4alpha/BUILD b/generated_api_shadow/envoy/service/tap/v4alpha/BUILD new file mode 100644 index 0000000000000..5f75886cd068a --- /dev/null +++ b/generated_api_shadow/envoy/service/tap/v4alpha/BUILD @@ -0,0 +1,17 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + has_services = True, + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/tap/v4alpha:pkg", + "//envoy/data/tap/v3:pkg", + "//envoy/service/discovery/v3:pkg", + "//envoy/service/tap/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/service/tap/v4alpha/tap.proto b/generated_api_shadow/envoy/service/tap/v4alpha/tap.proto new file mode 100644 index 0000000000000..a1654d18bebbf --- /dev/null +++ b/generated_api_shadow/envoy/service/tap/v4alpha/tap.proto @@ -0,0 +1,64 @@ +syntax = "proto3"; + +package envoy.service.tap.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/data/tap/v3/wrapper.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.service.tap.v4alpha"; +option java_outer_classname = "TapProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Tap Sink Service] + +// [#not-implemented-hide:] A tap service to receive incoming taps. Envoy will call +// StreamTaps to deliver captured taps to the server +service TapSinkService { + // Envoy will connect and send StreamTapsRequest messages forever. It does not expect any + // response to be sent as nothing would be done in the case of failure. The server should + // disconnect if it expects Envoy to reconnect. + rpc StreamTaps(stream StreamTapsRequest) returns (StreamTapsResponse) { + } +} + +// [#not-implemented-hide:] Stream message for the Tap API. Envoy will open a stream to the server +// and stream taps without ever expecting a response. +message StreamTapsRequest { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.tap.v3.StreamTapsRequest"; + + message Identifier { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.tap.v3.StreamTapsRequest.Identifier"; + + // The node sending taps over the stream. + config.core.v4alpha.Node node = 1 [(validate.rules).message = {required: true}]; + + // The opaque identifier that was set in the :ref:`output config + // `. + string tap_id = 2; + } + + // Identifier data effectively is a structured metadata. As a performance optimization this will + // only be sent in the first message on the stream. + Identifier identifier = 1; + + // The trace id. this can be used to merge together a streaming trace. Note that the trace_id + // is not guaranteed to be spatially or temporally unique. + uint64 trace_id = 2; + + // The trace data. + data.tap.v3.TraceWrapper trace = 3; +} + +// [#not-implemented-hide:] +message StreamTapsResponse { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.tap.v3.StreamTapsResponse"; +} diff --git a/generated_api_shadow/envoy/service/tap/v4alpha/tapds.proto b/generated_api_shadow/envoy/service/tap/v4alpha/tapds.proto new file mode 100644 index 0000000000000..855fde8c8e63f --- /dev/null +++ b/generated_api_shadow/envoy/service/tap/v4alpha/tapds.proto @@ -0,0 +1,48 @@ +syntax = "proto3"; + +package envoy.service.tap.v4alpha; + +import "envoy/config/tap/v4alpha/common.proto"; +import "envoy/service/discovery/v3/discovery.proto"; + +import "google/api/annotations.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.service.tap.v4alpha"; +option java_outer_classname = "TapdsProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Tap discovery service] + +// [#not-implemented-hide:] Tap discovery service. +service TapDiscoveryService { + rpc StreamTapConfigs(stream discovery.v3.DiscoveryRequest) + returns (stream discovery.v3.DiscoveryResponse) { + } + + rpc DeltaTapConfigs(stream discovery.v3.DeltaDiscoveryRequest) + returns (stream discovery.v3.DeltaDiscoveryResponse) { + } + + rpc FetchTapConfigs(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { + option (google.api.http).post = "/v3/discovery:tap_configs"; + option (google.api.http).body = "*"; + } +} + +// [#not-implemented-hide:] A tap resource is essentially a tap configuration with a name +// The filter TapDS config references this name. +message TapResource { + option (udpa.annotations.versioning).previous_message_type = "envoy.service.tap.v3.TapResource"; + + // The name of the tap configuration. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Tap config to apply + config.tap.v4alpha.TapConfig config = 2; +} diff --git a/generated_api_shadow/envoy/type/matcher/regex.proto b/generated_api_shadow/envoy/type/matcher/regex.proto index 78b4a2c1d61e6..b23c0bff30750 100644 --- a/generated_api_shadow/envoy/type/matcher/regex.proto +++ b/generated_api_shadow/envoy/type/matcher/regex.proto @@ -19,12 +19,25 @@ message RegexMatcher { // Google's `RE2 `_ regex engine. The regex string must adhere to // the documented `syntax `_. The engine is designed // to complete execution in linear time as well as limit the amount of memory used. + // + // Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level` + // and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or + // complexity that a compiled regex can have before an exception is thrown or a warning is + // logged, respectively. `re2.max_program_size.error_level` defaults to 100, and + // `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning). + // + // Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`, + // which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented + // each time the program size exceeds the warn level threshold. message GoogleRE2 { // This field controls the RE2 "program size" which is a rough estimate of how complex a // compiled regex is to evaluate. A regex that has a program size greater than the configured // value will fail to compile. In this case, the configured max program size can be increased // or the regex can be simplified. If not specified, the default is 100. - google.protobuf.UInt32Value max_program_size = 1; + // + // This field is deprecated; regexp validation should be performed on the management server + // instead of being done by each individual client. + google.protobuf.UInt32Value max_program_size = 1 [deprecated = true]; } oneof engine_type { diff --git a/generated_api_shadow/envoy/type/matcher/v3/regex.proto b/generated_api_shadow/envoy/type/matcher/v3/regex.proto index 1b10df3ff1bac..6087c6f90fadf 100644 --- a/generated_api_shadow/envoy/type/matcher/v3/regex.proto +++ b/generated_api_shadow/envoy/type/matcher/v3/regex.proto @@ -22,6 +22,16 @@ message RegexMatcher { // Google's `RE2 `_ regex engine. The regex string must adhere to // the documented `syntax `_. The engine is designed // to complete execution in linear time as well as limit the amount of memory used. + // + // Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level` + // and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or + // complexity that a compiled regex can have before an exception is thrown or a warning is + // logged, respectively. `re2.max_program_size.error_level` defaults to 100, and + // `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning). + // + // Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`, + // which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented + // each time the program size exceeds the warn level threshold. message GoogleRE2 { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.RegexMatcher.GoogleRE2"; @@ -30,18 +40,21 @@ message RegexMatcher { // compiled regex is to evaluate. A regex that has a program size greater than the configured // value will fail to compile. In this case, the configured max program size can be increased // or the regex can be simplified. If not specified, the default is 100. - google.protobuf.UInt32Value max_program_size = 1; + // + // This field is deprecated; regexp validation should be performed on the management server + // instead of being done by each individual client. + google.protobuf.UInt32Value max_program_size = 1 [deprecated = true]; } - // Google's RE2 regex engine. - string regex = 2 [(validate.rules).string = {min_bytes: 1}]; - oneof engine_type { option (validate.required) = true; - // The regex match string. The string must be supported by the configured engine. + // Google's RE2 regex engine. GoogleRE2 google_re2 = 1 [(validate.rules).message = {required: true}]; } + + // The regex match string. The string must be supported by the configured engine. + string regex = 2 [(validate.rules).string = {min_bytes: 1}]; } // Describes how to match a string and then produce a new string using a regular @@ -59,7 +72,7 @@ message RegexMatchAndSubstitute { // so as to replace just one occurrence of a pattern. Capture groups can be // used in the pattern to extract portions of the subject string, and then // referenced in the substitution string. - RegexMatcher pattern = 1; + RegexMatcher pattern = 1 [(validate.rules).message = {required: true}]; // The string that should be substituted into matching portions of the // subject string during a substitution operation to produce a new string. diff --git a/generated_api_shadow/envoy/type/matcher/v3/string.proto b/generated_api_shadow/envoy/type/matcher/v3/string.proto index 2f9d43de40dcd..1c55202a7b778 100644 --- a/generated_api_shadow/envoy/type/matcher/v3/string.proto +++ b/generated_api_shadow/envoy/type/matcher/v3/string.proto @@ -21,23 +21,23 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; message StringMatcher { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.StringMatcher"; - // The input string must match exactly the string specified here. - // - // Examples: - // - // * *abc* only matches the value *abc*. - bool ignore_case = 6; - oneof match_pattern { option (validate.required) = true; + // The input string must match exactly the string specified here. + // + // Examples: + // + // * *abc* only matches the value *abc*. + string exact = 1; + // The input string must have the prefix specified here. // Note: empty prefix is not allowed, please use regex instead. // // Examples: // // * *abc* matches the value *abc.xyz* - string exact = 1; + string prefix = 2 [(validate.rules).string = {min_bytes: 1}]; // The input string must have the suffix specified here. // Note: empty prefix is not allowed, please use regex instead. @@ -45,14 +45,9 @@ message StringMatcher { // Examples: // // * *abc* matches the value *xyz.abc* - string prefix = 2 [(validate.rules).string = {min_bytes: 1}]; - - // The input string must match the regular expression specified here. string suffix = 3 [(validate.rules).string = {min_bytes: 1}]; - // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no - // effect for the safe_regex match. - // For example, the matcher *data* will match both input string *Data* and *data* if set to true. + // The input string must match the regular expression specified here. RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}]; string hidden_envoy_deprecated_regex = 4 [ @@ -61,6 +56,11 @@ message StringMatcher { (envoy.annotations.disallowed_by_default) = true ]; } + + // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no + // effect for the safe_regex match. + // For example, the matcher *data* will match both input string *Data* and *data* if set to true. + bool ignore_case = 6; } // Specifies a list of ways to match a string. diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/BUILD b/generated_api_shadow/envoy/type/matcher/v4alpha/BUILD new file mode 100644 index 0000000000000..e63f52b2baa50 --- /dev/null +++ b/generated_api_shadow/envoy/type/matcher/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/annotations:pkg", + "//envoy/type/matcher/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/metadata.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/metadata.proto new file mode 100644 index 0000000000000..8abe14e7b6673 --- /dev/null +++ b/generated_api_shadow/envoy/type/matcher/v4alpha/metadata.proto @@ -0,0 +1,105 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "envoy/type/matcher/v4alpha/value.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "MetadataProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Metadata matcher] + +// MetadataMatcher provides a general interface to check if a given value is matched in +// :ref:`Metadata `. It uses `filter` and `path` to retrieve the value +// from the Metadata and then check if it's matched to the specified value. +// +// For example, for the following Metadata: +// +// .. code-block:: yaml +// +// filter_metadata: +// envoy.filters.http.rbac: +// fields: +// a: +// struct_value: +// fields: +// b: +// struct_value: +// fields: +// c: +// string_value: pro +// t: +// list_value: +// values: +// - string_value: m +// - string_value: n +// +// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" +// from the Metadata which is matched to the specified prefix match. +// +// .. code-block:: yaml +// +// filter: envoy.filters.http.rbac +// path: +// - key: a +// - key: b +// - key: c +// value: +// string_match: +// prefix: pr +// +// The following MetadataMatcher is matched as the code will match one of the string values in the +// list at the path [a, t]. +// +// .. code-block:: yaml +// +// filter: envoy.filters.http.rbac +// path: +// - key: a +// - key: t +// value: +// list_match: +// one_of: +// string_match: +// exact: m +// +// An example use of MetadataMatcher is specifying additional metadata in envoy.filters.http.rbac to +// enforce access control based on dynamic metadata in a request. See :ref:`Permission +// ` and :ref:`Principal +// `. + +// [#next-major-version: MetadataMatcher should use StructMatcher] +message MetadataMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.MetadataMatcher"; + + // Specifies the segment in a path to retrieve value from Metadata. + // Note: Currently it's not supported to retrieve a value from a list in Metadata. This means that + // if the segment key refers to a list, it has to be the last segment in a path. + message PathSegment { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.MetadataMatcher.PathSegment"; + + oneof segment { + option (validate.required) = true; + + // If specified, use the key to retrieve the value in a Struct. + string key = 1 [(validate.rules).string = {min_bytes: 1}]; + } + } + + // The filter name to retrieve the Struct from the Metadata. + string filter = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The path to retrieve the Value from the Struct. + repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; + + // The MetadataMatcher is matched if the value retrieved by path is matched to this value. + ValueMatcher value = 3 [(validate.rules).message = {required: true}]; +} diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/node.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/node.proto new file mode 100644 index 0000000000000..a74bf808f05ae --- /dev/null +++ b/generated_api_shadow/envoy/type/matcher/v4alpha/node.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "envoy/type/matcher/v4alpha/string.proto"; +import "envoy/type/matcher/v4alpha/struct.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "NodeProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Node matcher] + +// Specifies the way to match a Node. +// The match follows AND semantics. +message NodeMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.NodeMatcher"; + + // Specifies match criteria on the node id. + StringMatcher node_id = 1; + + // Specifies match criteria on the node metadata. + repeated StructMatcher node_metadatas = 2; +} diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/number.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/number.proto new file mode 100644 index 0000000000000..b168af19ab50c --- /dev/null +++ b/generated_api_shadow/envoy/type/matcher/v4alpha/number.proto @@ -0,0 +1,33 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "envoy/type/v3/range.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "NumberProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Number matcher] + +// Specifies the way to match a double value. +message DoubleMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.DoubleMatcher"; + + oneof match_pattern { + option (validate.required) = true; + + // If specified, the input double value must be in the range specified here. + // Note: The range is using half-open interval semantics [start, end). + v3.DoubleRange range = 1; + + // If specified, the input double value must be equal to the value specified here. + double exact = 2; + } +} diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/path.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/path.proto new file mode 100644 index 0000000000000..9150939bf2eed --- /dev/null +++ b/generated_api_shadow/envoy/type/matcher/v4alpha/path.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "envoy/type/matcher/v4alpha/string.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "PathProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Path matcher] + +// Specifies the way to match a path on HTTP request. +message PathMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.PathMatcher"; + + oneof rule { + option (validate.required) = true; + + // The `path` must match the URL path portion of the :path header. The query and fragment + // string (if present) are removed in the URL path portion. + // For example, the path */data* will match the *:path* header */data#fragment?param=value*. + StringMatcher path = 1 [(validate.rules).message = {required: true}]; + } +} diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/regex.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/regex.proto new file mode 100644 index 0000000000000..f646147287330 --- /dev/null +++ b/generated_api_shadow/envoy/type/matcher/v4alpha/regex.proto @@ -0,0 +1,87 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "RegexProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Regex matcher] + +// A regex matcher designed for safety when used with untrusted input. +message RegexMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.RegexMatcher"; + + // Google's `RE2 `_ regex engine. The regex string must adhere to + // the documented `syntax `_. The engine is designed + // to complete execution in linear time as well as limit the amount of memory used. + // + // Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level` + // and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or + // complexity that a compiled regex can have before an exception is thrown or a warning is + // logged, respectively. `re2.max_program_size.error_level` defaults to 100, and + // `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning). + // + // Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`, + // which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented + // each time the program size exceeds the warn level threshold. + message GoogleRE2 { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.RegexMatcher.GoogleRE2"; + + // This field controls the RE2 "program size" which is a rough estimate of how complex a + // compiled regex is to evaluate. A regex that has a program size greater than the configured + // value will fail to compile. In this case, the configured max program size can be increased + // or the regex can be simplified. If not specified, the default is 100. + // + // This field is deprecated; regexp validation should be performed on the management server + // instead of being done by each individual client. + google.protobuf.UInt32Value hidden_envoy_deprecated_max_program_size = 1 [deprecated = true]; + } + + oneof engine_type { + option (validate.required) = true; + + // Google's RE2 regex engine. + GoogleRE2 google_re2 = 1 [(validate.rules).message = {required: true}]; + } + + // The regex match string. The string must be supported by the configured engine. + string regex = 2 [(validate.rules).string = {min_bytes: 1}]; +} + +// Describes how to match a string and then produce a new string using a regular +// expression and a substitution string. +message RegexMatchAndSubstitute { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.RegexMatchAndSubstitute"; + + // The regular expression used to find portions of a string (hereafter called + // the "subject string") that should be replaced. When a new string is + // produced during the substitution operation, the new string is initially + // the same as the subject string, but then all matches in the subject string + // are replaced by the substitution string. If replacing all matches isn't + // desired, regular expression anchors can be used to ensure a single match, + // so as to replace just one occurrence of a pattern. Capture groups can be + // used in the pattern to extract portions of the subject string, and then + // referenced in the substitution string. + RegexMatcher pattern = 1 [(validate.rules).message = {required: true}]; + + // The string that should be substituted into matching portions of the + // subject string during a substitution operation to produce a new string. + // Capture groups in the pattern can be referenced in the substitution + // string. Note, however, that the syntax for referring to capture groups is + // defined by the chosen regular expression engine. Google's `RE2 + // `_ regular expression engine uses a + // backslash followed by the capture group number to denote a numbered + // capture group. E.g., ``\1`` refers to capture group 1, and ``\2`` refers + // to capture group 2. + string substitution = 2; +} diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/string.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/string.proto new file mode 100644 index 0000000000000..8ce0b12f9e2a7 --- /dev/null +++ b/generated_api_shadow/envoy/type/matcher/v4alpha/string.proto @@ -0,0 +1,71 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "envoy/type/matcher/v4alpha/regex.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "StringProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: String matcher] + +// Specifies the way to match a string. +// [#next-free-field: 7] +message StringMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.StringMatcher"; + + reserved 4; + + reserved "regex"; + + oneof match_pattern { + option (validate.required) = true; + + // The input string must match exactly the string specified here. + // + // Examples: + // + // * *abc* only matches the value *abc*. + string exact = 1; + + // The input string must have the prefix specified here. + // Note: empty prefix is not allowed, please use regex instead. + // + // Examples: + // + // * *abc* matches the value *abc.xyz* + string prefix = 2 [(validate.rules).string = {min_bytes: 1}]; + + // The input string must have the suffix specified here. + // Note: empty prefix is not allowed, please use regex instead. + // + // Examples: + // + // * *abc* matches the value *xyz.abc* + string suffix = 3 [(validate.rules).string = {min_bytes: 1}]; + + // The input string must match the regular expression specified here. + RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}]; + } + + // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no + // effect for the safe_regex match. + // For example, the matcher *data* will match both input string *Data* and *data* if set to true. + bool ignore_case = 6; +} + +// Specifies a list of ways to match a string. +message ListStringMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.ListStringMatcher"; + + repeated StringMatcher patterns = 1 [(validate.rules).repeated = {min_items: 1}]; +} diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/struct.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/struct.proto new file mode 100644 index 0000000000000..643cc5a475708 --- /dev/null +++ b/generated_api_shadow/envoy/type/matcher/v4alpha/struct.proto @@ -0,0 +1,91 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "envoy/type/matcher/v4alpha/value.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "StructProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Struct matcher] + +// StructMatcher provides a general interface to check if a given value is matched in +// google.protobuf.Struct. It uses `path` to retrieve the value +// from the struct and then check if it's matched to the specified value. +// +// For example, for the following Struct: +// +// .. code-block:: yaml +// +// fields: +// a: +// struct_value: +// fields: +// b: +// struct_value: +// fields: +// c: +// string_value: pro +// t: +// list_value: +// values: +// - string_value: m +// - string_value: n +// +// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" +// from the Metadata which is matched to the specified prefix match. +// +// .. code-block:: yaml +// +// path: +// - key: a +// - key: b +// - key: c +// value: +// string_match: +// prefix: pr +// +// The following StructMatcher is matched as the code will match one of the string values in the +// list at the path [a, t]. +// +// .. code-block:: yaml +// +// path: +// - key: a +// - key: t +// value: +// list_match: +// one_of: +// string_match: +// exact: m +// +// An example use of StructMatcher is to match metadata in envoy.v*.core.Node. +message StructMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.StructMatcher"; + + // Specifies the segment in a path to retrieve value from Struct. + message PathSegment { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.StructMatcher.PathSegment"; + + oneof segment { + option (validate.required) = true; + + // If specified, use the key to retrieve the value in a Struct. + string key = 1 [(validate.rules).string = {min_bytes: 1}]; + } + } + + // The path to retrieve the Value from the Struct. + repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; + + // The StructMatcher is matched if the value retrieved by path is matched to this value. + ValueMatcher value = 3 [(validate.rules).message = {required: true}]; +} diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/value.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/value.proto new file mode 100644 index 0000000000000..6e509d4601099 --- /dev/null +++ b/generated_api_shadow/envoy/type/matcher/v4alpha/value.proto @@ -0,0 +1,71 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "envoy/type/matcher/v4alpha/number.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "ValueProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Value matcher] + +// Specifies the way to match a ProtobufWkt::Value. Primitive values and ListValue are supported. +// StructValue is not supported and is always not matched. +// [#next-free-field: 7] +message ValueMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.ValueMatcher"; + + // NullMatch is an empty message to specify a null value. + message NullMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.ValueMatcher.NullMatch"; + } + + // Specifies how to match a value. + oneof match_pattern { + option (validate.required) = true; + + // If specified, a match occurs if and only if the target value is a NullValue. + NullMatch null_match = 1; + + // If specified, a match occurs if and only if the target value is a double value and is + // matched to this field. + DoubleMatcher double_match = 2; + + // If specified, a match occurs if and only if the target value is a string value and is + // matched to this field. + StringMatcher string_match = 3; + + // If specified, a match occurs if and only if the target value is a bool value and is equal + // to this field. + bool bool_match = 4; + + // If specified, value match will be performed based on whether the path is referring to a + // valid primitive value in the metadata. If the path is referring to a non-primitive value, + // the result is always not matched. + bool present_match = 5; + + // If specified, a match occurs if and only if the target value is a list value and + // is matched to this field. + ListMatcher list_match = 6; + } +} + +// Specifies the way to match a list value. +message ListMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.ListMatcher"; + + oneof match_pattern { + option (validate.required) = true; + + // If specified, at least one of the values in the list must match the value specified. + ValueMatcher one_of = 1; + } +} diff --git a/generated_api_shadow/envoy/type/v3/ratelimit_unit.proto b/generated_api_shadow/envoy/type/v3/ratelimit_unit.proto new file mode 100644 index 0000000000000..a3fb27ff47ba0 --- /dev/null +++ b/generated_api_shadow/envoy/type/v3/ratelimit_unit.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package envoy.type.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.type.v3"; +option java_outer_classname = "RatelimitUnitProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Ratelimit Time Unit] + +// Identifies the unit of of time for rate limit. +enum RateLimitUnit { + // The time unit is not known. + UNKNOWN = 0; + + // The time unit representing a second. + SECOND = 1; + + // The time unit representing a minute. + MINUTE = 2; + + // The time unit representing an hour. + HOUR = 3; + + // The time unit representing a day. + DAY = 4; +} diff --git a/include/envoy/access_log/BUILD b/include/envoy/access_log/BUILD index 991715a6a830e..c2ba9dba547b1 100644 --- a/include/envoy/access_log/BUILD +++ b/include/envoy/access_log/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/access_log/access_log.h b/include/envoy/access_log/access_log.h index ec58fb541f967..eb84ff64cebce 100644 --- a/include/envoy/access_log/access_log.h +++ b/include/envoy/access_log/access_log.h @@ -67,7 +67,7 @@ class Filter { virtual bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers) PURE; + const Http::ResponseTrailerMap& response_trailers) const PURE; }; using FilterPtr = std::unique_ptr; @@ -95,66 +95,5 @@ class Instance { using InstanceSharedPtr = std::shared_ptr; -/** - * Interface for access log formatter. - * Formatters provide a complete access log output line for the given headers/trailers/stream. - */ -class Formatter { -public: - virtual ~Formatter() = default; - - /** - * Return a formatted access log line. - * @param request_headers supplies the request headers. - * @param response_headers supplies the response headers. - * @param response_trailers supplies the response trailers. - * @param stream_info supplies the stream info. - * @return std::string string containing the complete formatted access log line. - */ - virtual std::string format(const Http::RequestHeaderMap& request_headers, - const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo& stream_info) const PURE; -}; - -using FormatterPtr = std::unique_ptr; - -/** - * Interface for access log provider. - * FormatterProviders extract information from the given headers/trailers/stream. - */ -class FormatterProvider { -public: - virtual ~FormatterProvider() = default; - - /** - * Extract a value from the provided headers/trailers/stream. - * @param request_headers supplies the request headers. - * @param response_headers supplies the response headers. - * @param response_trailers supplies the response trailers. - * @param stream_info supplies the stream info. - * @return std::string containing a single value extracted from the given headers/trailers/stream. - */ - virtual std::string format(const Http::RequestHeaderMap& request_headers, - const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo& stream_info) const PURE; - /** - * Extract a value from the provided headers/trailers/stream, preserving the value's type. - * @param request_headers supplies the request headers. - * @param response_headers supplies the response headers. - * @param response_trailers supplies the response trailers. - * @param stream_info supplies the stream info. - * @return ProtobufWkt::Value containing a single value extracted from the given - * headers/trailers/stream. - */ - virtual ProtobufWkt::Value formatValue(const Http::RequestHeaderMap& request_headers, - const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo& stream_info) const PURE; -}; - -using FormatterProviderPtr = std::unique_ptr; - } // namespace AccessLog } // namespace Envoy diff --git a/include/envoy/api/BUILD b/include/envoy/api/BUILD index 0bdbc5a875886..6855cc6b86885 100644 --- a/include/envoy/api/BUILD +++ b/include/envoy/api/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/api/os_sys_calls.h b/include/envoy/api/os_sys_calls.h index 9fab9c1cd01b6..071a5465d5b5a 100644 --- a/include/envoy/api/os_sys_calls.h +++ b/include/envoy/api/os_sys_calls.h @@ -62,6 +62,16 @@ class OsSysCalls { */ virtual bool supportsMmsg() const PURE; + /** + * return true if the OS supports UDP GRO. + */ + virtual bool supportsUdpGro() const PURE; + + /** + * return true if the OS supports UDP GSO + */ + virtual bool supportsUdpGso() const PURE; + /** * Release all resources allocated for fd. * @return zero on success, -1 returned otherwise. diff --git a/include/envoy/buffer/BUILD b/include/envoy/buffer/BUILD index e22d136b17dec..3f2880cb720ed 100644 --- a/include/envoy/buffer/BUILD +++ b/include/envoy/buffer/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/buffer/buffer.h b/include/envoy/buffer/buffer.h index 61d803c2d2048..6e4f52644e37a 100644 --- a/include/envoy/buffer/buffer.h +++ b/include/envoy/buffer/buffer.h @@ -16,6 +16,7 @@ #include "absl/container/inlined_vector.h" #include "absl/strings/string_view.h" #include "absl/types/optional.h" +#include "absl/types/span.h" namespace Envoy { namespace Buffer { @@ -55,6 +56,21 @@ class BufferFragment { virtual void done() PURE; }; +/** + * A class to facilitate extracting buffer slices from a buffer instance. + */ +class SliceData { +public: + virtual ~SliceData() = default; + + /** + * @return a mutable view of the slice data. + */ + virtual absl::Span getMutableData() PURE; +}; + +using SliceDataPtr = std::unique_ptr; + /** * A basic buffer abstraction. */ @@ -62,6 +78,15 @@ class Instance { public: virtual ~Instance() = default; + /** + * Register function to call when the last byte in the last slice of this + * buffer has fully drained. Note that slices may be transferred to + * downstream buffers, drain trackers are transferred along with the bytes + * they track so the function is called only after the last byte is drained + * from all buffers. + */ + virtual void addDrainTracker(std::function drain_tracker) PURE; + /** * Copy data into the buffer (deprecated, use absl::string_view variant * instead). @@ -135,6 +160,15 @@ class Instance { virtual RawSliceVector getRawSlices(absl::optional max_slices = absl::nullopt) const PURE; + /** + * Transfer ownership of the front slice to the caller. Must only be called if the + * buffer is not empty otherwise the implementation will have undefined behavior. + * If the underlying slice is immutable then the implementation must create and return + * a mutable slice that has a copy of the immutable data. + * @return pointer to SliceData object that wraps the front slice + */ + virtual SliceDataPtr extractMutableFrontSlice() PURE; + /** * @return uint64_t the total length of the buffer (not necessarily contiguous in memory). */ @@ -181,9 +215,22 @@ class Instance { * @param data supplies the data to search for. * @param size supplies the length of the data to search for. * @param start supplies the starting index to search from. + * @param length limits the search to specified number of bytes starting from start index. + * When length value is zero, entire length of data from starting index to the end is searched. * @return the index where the match starts or -1 if there is no match. */ - virtual ssize_t search(const void* data, uint64_t size, size_t start) const PURE; + virtual ssize_t search(const void* data, uint64_t size, size_t start, size_t length) const PURE; + + /** + * Search for an occurrence of data within entire buffer. + * @param data supplies the data to search for. + * @param size supplies the length of the data to search for. + * @param start supplies the starting index to search from. + * @return the index where the match starts or -1 if there is no match. + */ + ssize_t search(const void* data, uint64_t size, size_t start) const { + return search(data, size, start, 0); + } /** * Search for an occurrence of data at the start of a buffer. @@ -376,7 +423,8 @@ class WatermarkFactory { * @return a newly created InstancePtr. */ virtual InstancePtr create(std::function below_low_watermark, - std::function above_high_watermark) PURE; + std::function above_high_watermark, + std::function above_overflow_watermark) PURE; }; using WatermarkFactoryPtr = std::unique_ptr; diff --git a/include/envoy/common/BUILD b/include/envoy/common/BUILD index 47dd8e1549ef3..13bce53792c1a 100644 --- a/include/envoy/common/BUILD +++ b/include/envoy/common/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_basic_cc_library", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_basic_cc_library( @@ -22,6 +22,10 @@ envoy_basic_cc_library( envoy_cc_library( name = "conn_pool_interface", hdrs = ["conn_pool.h"], + deps = [ + "//include/envoy/event:deferred_deletable", + "//include/envoy/upstream:upstream_interface", + ], ) envoy_cc_library( @@ -29,6 +33,16 @@ envoy_cc_library( hdrs = ["mutex_tracer.h"], ) +envoy_cc_library( + name = "random_generator_interface", + hdrs = ["random_generator.h"], +) + +envoy_cc_library( + name = "resource_interface", + hdrs = ["resource.h"], +) + envoy_cc_library( name = "time_interface", hdrs = ["time.h"], diff --git a/include/envoy/common/conn_pool.h b/include/envoy/common/conn_pool.h index c8a988b547943..9e42b40255380 100644 --- a/include/envoy/common/conn_pool.h +++ b/include/envoy/common/conn_pool.h @@ -1,8 +1,75 @@ #pragma once +#include "envoy/common/pure.h" +#include "envoy/event/deferred_deletable.h" +#include "envoy/upstream/upstream.h" + namespace Envoy { namespace ConnectionPool { +/** + * Controls the behavior of a canceled request. + */ +enum class CancelPolicy { + // By default, canceled requests allow a pending connection to complete and become + // available for a future request. + Default, + // When a request is canceled, closes a pending connection if there will still be sufficient + // connections to serve pending requests. CloseExcess is largely useful for callers that never + // re-use connections (e.g. by closing rather than releasing connections). Using CloseExcess in + // this situation guarantees that no idle connections will be held open by the conn pool awaiting + // a connection request. + CloseExcess, +}; + +/** + * Handle that allows a pending connection or stream request to be canceled before it is completed. + */ +class Cancellable { +public: + virtual ~Cancellable() = default; + + /** + * Cancel the pending connection or stream request. + * @param cancel_policy a CancelPolicy that controls the behavior of this cancellation. + */ + virtual void cancel(CancelPolicy cancel_policy) PURE; +}; + +/** + * An instance of a generic connection pool. + */ +class Instance { +public: + virtual ~Instance() = default; + + /** + * Called when a connection pool has been drained of pending requests, busy connections, and + * ready connections. + */ + using DrainedCb = std::function; + + /** + * Register a callback that gets called when the connection pool is fully drained. No actual + * draining is done. The owner of the connection pool is responsible for not creating any + * new streams. + */ + virtual void addDrainedCallback(DrainedCb cb) PURE; + + /** + * Actively drain all existing connection pool connections. This method can be used in cases + * where the connection pool is not being destroyed, but the caller wishes to make sure that + * all new streams take place on a new connection. For example, when a health check failure + * occurs. + */ + virtual void drainConnections() PURE; + + /** + * @return Upstream::HostDescriptionConstSharedPtr the host for which connections are pooled. + */ + virtual Upstream::HostDescriptionConstSharedPtr host() const PURE; +}; + enum class PoolFailureReason { // A resource overflowed and policy prevented a new connection from being created. Overflow, diff --git a/include/envoy/common/crypto/BUILD b/include/envoy/common/crypto/BUILD index 80d0fbb3971c8..db3e738b80ffa 100644 --- a/include/envoy/common/crypto/BUILD +++ b/include/envoy/common/crypto/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/common/platform.h b/include/envoy/common/platform.h index 1d7c58fd3b2a8..71e0795c9a552 100644 --- a/include/envoy/common/platform.h +++ b/include/envoy/common/platform.h @@ -53,13 +53,13 @@ __pragma(pack(push, 1)) definition, ##__VA_ARGS__; \ __pragma(pack(pop)) -using ssize_t = ptrdiff_t; +typedef ptrdiff_t ssize_t; // This is needed so the OsSysCalls interface compiles on Windows, // shmOpen takes mode_t as an argument. -using mode_t = uint32_t; +typedef uint32_t mode_t; -using os_fd_t = SOCKET; +typedef SOCKET os_fd_t; typedef unsigned int sa_family_t; @@ -125,6 +125,23 @@ struct msghdr { #define ENVOY_SHUT_WR SD_SEND #define ENVOY_SHUT_RDWR SD_BOTH +// winsock2 functions return distinct set of error codes, disjoint from POSIX errors (that are +// also available on Windows and set by POSIX function invocations). Here we map winsock2 error +// codes with platform agnostic macros that correspond to the same or roughly similar errors on +// POSIX systems for use in cross-platform socket error handling. +#define SOCKET_ERROR_AGAIN WSAEWOULDBLOCK +#define SOCKET_ERROR_NOT_SUP WSAEOPNOTSUPP +#define SOCKET_ERROR_AF_NO_SUP WSAEAFNOSUPPORT +#define SOCKET_ERROR_IN_PROGRESS WSAEINPROGRESS +// winsock2 does not differentiate between PERM and ACCESS violations +#define SOCKET_ERROR_PERM WSAEACCES +#define SOCKET_ERROR_ACCESS WSAEACCES +#define SOCKET_ERROR_MSG_SIZE WSAEMSGSIZE +#define SOCKET_ERROR_INTR WSAEINTR +#define SOCKET_ERROR_ADDR_NOT_AVAIL WSAEADDRNOTAVAIL +#define SOCKET_ERROR_INVAL WSAEINVAL +#define SOCKET_ERROR_ADDR_IN_USE WSAEADDRINUSE + #else // POSIX #include @@ -133,6 +150,7 @@ struct msghdr { #include #include #include +#include // for UDP_GRO #include #include // for mode_t #include @@ -171,7 +189,19 @@ struct msghdr { #define IP6T_SO_ORIGINAL_DST 80 #endif -using os_fd_t = int; +#ifndef SOL_UDP +#define SOL_UDP 17 +#endif + +#ifndef UDP_GRO +#define UDP_GRO 104 +#endif + +#ifndef UDP_SEGMENT +#define UDP_SEGMENT 103 +#endif + +typedef int os_fd_t; #define INVALID_SOCKET -1 #define SOCKET_VALID(sock) ((sock) >= 0) @@ -184,6 +214,19 @@ using os_fd_t = int; #define ENVOY_SHUT_WR SHUT_WR #define ENVOY_SHUT_RDWR SHUT_RDWR +// Mapping POSIX socket errors to common error names +#define SOCKET_ERROR_AGAIN EAGAIN +#define SOCKET_ERROR_NOT_SUP ENOTSUP +#define SOCKET_ERROR_AF_NO_SUP EAFNOSUPPORT +#define SOCKET_ERROR_IN_PROGRESS EINPROGRESS +#define SOCKET_ERROR_PERM EPERM +#define SOCKET_ERROR_ACCESS EACCES +#define SOCKET_ERROR_MSG_SIZE EMSGSIZE +#define SOCKET_ERROR_INTR EINTR +#define SOCKET_ERROR_ADDR_NOT_AVAIL EADDRNOTAVAIL +#define SOCKET_ERROR_INVAL EINVAL +#define SOCKET_ERROR_ADDR_IN_USE EADDRINUSE + #endif // Note: chromium disabled recvmmsg regardless of ndk version. However, the only Android target @@ -205,3 +248,28 @@ struct mmsghdr { unsigned int msg_len; }; #endif + +#define SUPPORTS_GETIFADDRS +#ifdef WIN32 +#undef SUPPORTS_GETIFADDRS +#endif + +// https://android.googlesource.com/platform/prebuilts/ndk/+/dev/platform/sysroot/usr/include/ifaddrs.h +#ifdef __ANDROID_API__ +#if __ANDROID_API__ < 24 +#undef SUPPORTS_GETIFADDRS +#endif // __ANDROID_API__ < 24 +#endif // ifdef __ANDROID_API__ + +// https://android.googlesource.com/platform/bionic/+/master/docs/status.md +// ``pthread_getname_np`` is introduced in API 26 +#define SUPPORTS_PTHREAD_NAMING 0 +#if defined(__ANDROID_API__) +#if __ANDROID_API__ >= 26 +#undef SUPPORTS_PTHREAD_NAMING +#define SUPPORTS_PTHREAD_NAMING 1 +#endif // __ANDROID_API__ >= 26 +#elif defined(__linux__) +#undef SUPPORTS_PTHREAD_NAMING +#define SUPPORTS_PTHREAD_NAMING 1 +#endif // defined(__ANDROID_API__) diff --git a/include/envoy/common/random_generator.h b/include/envoy/common/random_generator.h new file mode 100644 index 0000000000000..90fb1b7c15432 --- /dev/null +++ b/include/envoy/common/random_generator.h @@ -0,0 +1,54 @@ +#pragma once + +#include +#include +#include + +#include "envoy/common/pure.h" + +namespace Envoy { +namespace Random { + +/** + * Random number generator. Implementations should be thread safe. + */ +class RandomGenerator { +public: + virtual ~RandomGenerator() = default; + + using result_type = uint64_t; // NOLINT(readability-identifier-naming) + + /** + * @return uint64_t a new random number. + */ + virtual result_type random() PURE; + + /* + * @return the smallest value that `operator()` may return. The value is + * strictly less than `max()`. + */ + constexpr static result_type min() noexcept { return std::numeric_limits::min(); }; + + /* + * @return the largest value that `operator()` may return. The value is + * strictly greater than `min()`. + */ + constexpr static result_type max() noexcept { return std::numeric_limits::max(); }; + + /* + * @return a value in the closed interval `[min(), max()]`. Has amortized + * constant complexity. + */ + result_type operator()() { return result_type(random()); }; + + /** + * @return std::string containing uuid4 of 36 char length. + * for example, 7c25513b-0466-4558-a64c-12c6704f37ed + */ + virtual std::string uuid() PURE; +}; + +using RandomGeneratorPtr = std::unique_ptr; + +} // namespace Random +} // namespace Envoy diff --git a/include/envoy/common/resource.h b/include/envoy/common/resource.h new file mode 100644 index 0000000000000..ef65d35c71157 --- /dev/null +++ b/include/envoy/common/resource.h @@ -0,0 +1,51 @@ +#include + +#include "envoy/common/pure.h" + +#include "absl/types/optional.h" + +#pragma once + +namespace Envoy { + +/** + * A handle for use by any resource managers. + */ +class ResourceLimit { +public: + virtual ~ResourceLimit() = default; + + /** + * @return true if the resource can be created. + */ + virtual bool canCreate() PURE; + + /** + * Increment the resource count. + */ + virtual void inc() PURE; + + /** + * Decrement the resource count. + */ + virtual void dec() PURE; + + /** + * Decrement the resource count by a specific amount. + */ + virtual void decBy(uint64_t amount) PURE; + + /** + * @return the current maximum allowed number of this resource. + */ + virtual uint64_t max() PURE; + + /** + * @return the current resource count. + */ + virtual uint64_t count() const PURE; +}; + +using ResourceLimitOptRef = absl::optional>; + +} // namespace Envoy diff --git a/include/envoy/compression/compressor/BUILD b/include/envoy/compression/compressor/BUILD new file mode 100644 index 0000000000000..6632229aaed46 --- /dev/null +++ b/include/envoy/compression/compressor/BUILD @@ -0,0 +1,35 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_library( + name = "compressor_interface", + hdrs = ["compressor.h"], + deps = [ + "//include/envoy/buffer:buffer_interface", + ], +) + +envoy_cc_library( + name = "compressor_factory_interface", + hdrs = ["factory.h"], + deps = [ + ":compressor_interface", + ], +) + +envoy_cc_library( + name = "compressor_config_interface", + hdrs = ["config.h"], + deps = [ + ":compressor_factory_interface", + "//include/envoy/config:typed_config_interface", + "//include/envoy/server:filter_config_interface", + ], +) diff --git a/include/envoy/compressor/compressor.h b/include/envoy/compression/compressor/compressor.h similarity index 86% rename from include/envoy/compressor/compressor.h rename to include/envoy/compression/compressor/compressor.h index d25204a3ead46..f236586d4ddf5 100644 --- a/include/envoy/compressor/compressor.h +++ b/include/envoy/compression/compressor/compressor.h @@ -3,6 +3,7 @@ #include "envoy/buffer/buffer.h" namespace Envoy { +namespace Compression { namespace Compressor { /** @@ -26,5 +27,8 @@ class Compressor { virtual void compress(Buffer::Instance& buffer, State state) PURE; }; +using CompressorPtr = std::unique_ptr; + } // namespace Compressor +} // namespace Compression } // namespace Envoy diff --git a/include/envoy/compression/compressor/config.h b/include/envoy/compression/compressor/config.h new file mode 100644 index 0000000000000..3ef89c9f0d557 --- /dev/null +++ b/include/envoy/compression/compressor/config.h @@ -0,0 +1,24 @@ +#pragma once + +#include "envoy/compression/compressor/factory.h" +#include "envoy/config/typed_config.h" +#include "envoy/server/filter_config.h" + +namespace Envoy { +namespace Compression { +namespace Compressor { + +class NamedCompressorLibraryConfigFactory : public Config::TypedFactory { +public: + ~NamedCompressorLibraryConfigFactory() override = default; + + virtual CompressorFactoryPtr + createCompressorFactoryFromProto(const Protobuf::Message& config, + Server::Configuration::FactoryContext& context) PURE; + + std::string category() const override { return "envoy.compression.compressor"; } +}; + +} // namespace Compressor +} // namespace Compression +} // namespace Envoy diff --git a/include/envoy/compression/compressor/factory.h b/include/envoy/compression/compressor/factory.h new file mode 100644 index 0000000000000..4587e3a297b36 --- /dev/null +++ b/include/envoy/compression/compressor/factory.h @@ -0,0 +1,22 @@ +#pragma once + +#include "envoy/compression/compressor/compressor.h" + +namespace Envoy { +namespace Compression { +namespace Compressor { + +class CompressorFactory { +public: + virtual ~CompressorFactory() = default; + + virtual CompressorPtr createCompressor() PURE; + virtual const std::string& statsPrefix() const PURE; + virtual const std::string& contentEncoding() const PURE; +}; + +using CompressorFactoryPtr = std::unique_ptr; + +} // namespace Compressor +} // namespace Compression +} // namespace Envoy diff --git a/include/envoy/compression/decompressor/BUILD b/include/envoy/compression/decompressor/BUILD new file mode 100644 index 0000000000000..156d81d52356f --- /dev/null +++ b/include/envoy/compression/decompressor/BUILD @@ -0,0 +1,35 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_library( + name = "decompressor_config_interface", + hdrs = ["config.h"], + deps = [ + ":decompressor_factory_interface", + "//include/envoy/config:typed_config_interface", + "//include/envoy/server:filter_config_interface", + ], +) + +envoy_cc_library( + name = "decompressor_factory_interface", + hdrs = ["factory.h"], + deps = [ + ":decompressor_interface", + ], +) + +envoy_cc_library( + name = "decompressor_interface", + hdrs = ["decompressor.h"], + deps = [ + "//include/envoy/buffer:buffer_interface", + ], +) diff --git a/include/envoy/compression/decompressor/config.h b/include/envoy/compression/decompressor/config.h new file mode 100644 index 0000000000000..15ecd1255d6a0 --- /dev/null +++ b/include/envoy/compression/decompressor/config.h @@ -0,0 +1,24 @@ +#pragma once + +#include "envoy/compression/decompressor/factory.h" +#include "envoy/config/typed_config.h" +#include "envoy/server/filter_config.h" + +namespace Envoy { +namespace Compression { +namespace Decompressor { + +class NamedDecompressorLibraryConfigFactory : public Config::TypedFactory { +public: + ~NamedDecompressorLibraryConfigFactory() override = default; + + virtual DecompressorFactoryPtr + createDecompressorFactoryFromProto(const Protobuf::Message& config, + Server::Configuration::FactoryContext& context) PURE; + + std::string category() const override { return "envoy.compression.decompressor"; } +}; + +} // namespace Decompressor +} // namespace Compression +} // namespace Envoy \ No newline at end of file diff --git a/include/envoy/decompressor/decompressor.h b/include/envoy/compression/decompressor/decompressor.h similarity index 84% rename from include/envoy/decompressor/decompressor.h rename to include/envoy/compression/decompressor/decompressor.h index d694aa50ca1cf..c0518a5789b3c 100644 --- a/include/envoy/decompressor/decompressor.h +++ b/include/envoy/compression/decompressor/decompressor.h @@ -3,6 +3,7 @@ #include "envoy/buffer/buffer.h" namespace Envoy { +namespace Compression { namespace Decompressor { /** @@ -21,5 +22,8 @@ class Decompressor { Buffer::Instance& output_buffer) PURE; }; +using DecompressorPtr = std::unique_ptr; + } // namespace Decompressor +} // namespace Compression } // namespace Envoy diff --git a/include/envoy/compression/decompressor/factory.h b/include/envoy/compression/decompressor/factory.h new file mode 100644 index 0000000000000..8e3692f56ede5 --- /dev/null +++ b/include/envoy/compression/decompressor/factory.h @@ -0,0 +1,25 @@ +#pragma once + +#include "envoy/compression/decompressor/decompressor.h" + +namespace Envoy { +namespace Compression { +namespace Decompressor { + +class DecompressorFactory { +public: + virtual ~DecompressorFactory() = default; + + virtual DecompressorPtr createDecompressor(const std::string& stats_prefix) PURE; + virtual const std::string& statsPrefix() const PURE; + // TODO(junr03): this method assumes that decompressors are used on http messages. + // A more generic method might be `hint()` which gives the user of the decompressor a hint about + // the type of decompression that it can perform. + virtual const std::string& contentEncoding() const PURE; +}; + +using DecompressorFactoryPtr = std::unique_ptr; + +} // namespace Decompressor +} // namespace Compression +} // namespace Envoy \ No newline at end of file diff --git a/include/envoy/compressor/BUILD b/include/envoy/compressor/BUILD deleted file mode 100644 index 9b3b8f43e47d0..0000000000000 --- a/include/envoy/compressor/BUILD +++ /dev/null @@ -1,17 +0,0 @@ -licenses(["notice"]) # Apache 2 - -load( - "//bazel:envoy_build_system.bzl", - "envoy_cc_library", - "envoy_package", -) - -envoy_package() - -envoy_cc_library( - name = "compressor_interface", - hdrs = ["compressor.h"], - deps = [ - "//include/envoy/buffer:buffer_interface", - ], -) diff --git a/include/envoy/config/BUILD b/include/envoy/config/BUILD index 50fca3a730079..96140621aa6b2 100644 --- a/include/envoy/config/BUILD +++ b/include/envoy/config/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( @@ -29,12 +29,19 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "extension_config_provider_interface", + hdrs = ["extension_config_provider.h"], + deps = ["//source/common/protobuf"], +) + envoy_cc_library( name = "grpc_mux_interface", hdrs = ["grpc_mux.h"], deps = [ ":subscription_interface", "//include/envoy/stats:stats_macros", + "//source/common/common:cleanup_lib", "//source/common/protobuf", ], ) diff --git a/include/envoy/config/extension_config_provider.h b/include/envoy/config/extension_config_provider.h new file mode 100644 index 0000000000000..5dc3ee3a65d55 --- /dev/null +++ b/include/envoy/config/extension_config_provider.h @@ -0,0 +1,57 @@ +#pragma once + +#include "envoy/common/pure.h" + +#include "common/protobuf/protobuf.h" + +#include "absl/types/optional.h" + +namespace Envoy { +namespace Config { + +using ConfigAppliedCb = std::function; + +/** + * A provider for extension configurations obtained either statically or via + * the extension configuration discovery service. Dynamically updated extension + * configurations may share subscriptions across extension config providers. + */ +template class ExtensionConfigProvider { +public: + virtual ~ExtensionConfigProvider() = default; + + /** + * Get the extension configuration resource name. + **/ + virtual const std::string& name() PURE; + + /** + * @return FactoryCallback an extension factory callback. Note that if the + * provider has not yet performed an initial configuration load and no + * default is provided, an empty optional will be returned. The factory + * callback is the latest version of the extension configuration, and should + * generally apply only to new requests and connections. + */ + virtual absl::optional config() PURE; + + /** + * Validate that the configuration is applicable in the context of the provider. If an exception + * is thrown by any of the config providers for an update, the extension configuration update is + * rejected. + * @param proto_config is the candidate configuration update. + * @param factory used to instantiate an extension config. + */ + virtual void validateConfig(const ProtobufWkt::Any& proto_config, Factory& factory) PURE; + + /** + * Update the provider with a new configuration. + * @param config is an extension factory callback to replace the existing configuration. + * @param version_info is the version of the new extension configuration. + * @param cb the continuation callback for a completed configuration application. + */ + virtual void onConfigUpdate(FactoryCallback config, const std::string& version_info, + ConfigAppliedCb cb) PURE; +}; + +} // namespace Config +} // namespace Envoy diff --git a/include/envoy/config/grpc_mux.h b/include/envoy/config/grpc_mux.h index ff2c8d3c7a31d..0f20aae3cfc5a 100644 --- a/include/envoy/config/grpc_mux.h +++ b/include/envoy/config/grpc_mux.h @@ -1,28 +1,34 @@ #pragma once +#include + #include "envoy/common/exception.h" #include "envoy/common/pure.h" #include "envoy/config/subscription.h" #include "envoy/stats/stats_macros.h" +#include "common/common/cleanup.h" #include "common/protobuf/protobuf.h" namespace Envoy { namespace Config { +using ScopedResume = std::unique_ptr; /** * All control plane related stats. @see stats_macros.h */ -#define ALL_CONTROL_PLANE_STATS(COUNTER, GAUGE) \ +#define ALL_CONTROL_PLANE_STATS(COUNTER, GAUGE, TEXT_READOUT) \ COUNTER(rate_limit_enforced) \ GAUGE(connected_state, NeverImport) \ - GAUGE(pending_requests, Accumulate) + GAUGE(pending_requests, Accumulate) \ + TEXT_READOUT(identifier) /** * Struct definition for all control plane stats. @see stats_macros.h */ struct ControlPlaneStats { - ALL_CONTROL_PLANE_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT) + ALL_CONTROL_PLANE_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, + GENERATE_TEXT_READOUT_STRUCT) }; /** @@ -60,23 +66,23 @@ class GrpcMux { * requests may later be resumed with resume(). * @param type_url type URL corresponding to xDS API, e.g. * type.googleapis.com/envoy.api.v2.Cluster. + * + * @return a ScopedResume object, which when destructed, resumes the paused discovery requests. + * A discovery request will be sent if one would have been sent during the pause. */ - virtual void pause(const std::string& type_url) PURE; - - /** - * Resume discovery requests for a given API type. This will send a discovery request if one would - * have been sent during the pause. - * @param type_url type URL corresponding to xDS API e.g. type.googleapis.com/envoy.api.v2.Cluster - */ - virtual void resume(const std::string& type_url) PURE; + ABSL_MUST_USE_RESULT virtual ScopedResume pause(const std::string& type_url) PURE; /** - * Retrieves the current pause state as set by pause()/resume(). - * @param type_url type URL corresponding to xDS API, e.g. - * type.googleapis.com/envoy.api.v2.Cluster - * @return bool whether the API is paused. + * Pause discovery requests for given API types. This is useful when we're processing an update + * for LDS or CDS and don't want a flood of updates for RDS or EDS respectively. Discovery + * requests may later be resumed with resume(). + * @param type_urls type URLs corresponding to xDS API, e.g. + * type.googleapis.com/envoy.api.v2.Cluster. + * + * @return a ScopedResume object, which when destructed, resumes the paused discovery requests. + * A discovery request will be sent if one would have been sent during the pause. */ - virtual bool paused(const std::string& type_url) const PURE; + ABSL_MUST_USE_RESULT virtual ScopedResume pause(const std::vector type_urls) PURE; /** * Start a configuration subscription asynchronously for some API type and resources. @@ -86,17 +92,20 @@ class GrpcMux { * resources for type_url will result in callbacks. * @param callbacks the callbacks to be notified of configuration updates. These must be valid * until GrpcMuxWatch is destroyed. + * @param resource_decoder how incoming opaque resource objects are to be decoded. * @return GrpcMuxWatchPtr a handle to cancel the subscription with. E.g. when a cluster goes * away, its EDS updates should be cancelled by destroying the GrpcMuxWatchPtr. */ virtual GrpcMuxWatchPtr addWatch(const std::string& type_url, const std::set& resources, - SubscriptionCallbacks& callbacks) PURE; + SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder) PURE; }; using GrpcMuxPtr = std::unique_ptr; using GrpcMuxSharedPtr = std::shared_ptr; +template using ResponseProtoPtr = std::unique_ptr; /** * A grouping of callbacks that a GrpcMux should provide to its GrpcStream. */ @@ -119,7 +128,8 @@ template class GrpcStreamCallbacks { /** * For the GrpcStream to pass received protos to the context. */ - virtual void onDiscoveryResponse(std::unique_ptr&& message) PURE; + virtual void onDiscoveryResponse(ResponseProtoPtr&& message, + ControlPlaneStats& control_plane_stats) PURE; /** * For the GrpcStream to call when its rate limiting logic allows more requests to be sent. diff --git a/include/envoy/config/subscription.h b/include/envoy/config/subscription.h index d56242600758c..c05a6d567d700 100644 --- a/include/envoy/config/subscription.h +++ b/include/envoy/config/subscription.h @@ -25,10 +25,109 @@ enum class ConfigUpdateFailureReason { UpdateRejected }; +/** + * A wrapper for xDS resources that have been deserialized from the wire. + */ +class DecodedResource { +public: + virtual ~DecodedResource() = default; + + /** + * @return const std::string& resource name. + */ + virtual const std::string& name() const PURE; + + /** + * @return const std::vector& aliases() const PURE; + + /** + * @return const std::string& resource version. + */ + virtual const std::string& version() const PURE; + + /** + * @return const Protobuf::Message& resource message reference. If hasResource() is false, this + * will be the empty message. + */ + virtual const Protobuf::Message& resource() const PURE; + + /** + * @return bool does the xDS discovery response have a set resource payload? + */ + virtual bool hasResource() const PURE; +}; + +using DecodedResourcePtr = std::unique_ptr; +using DecodedResourceRef = std::reference_wrapper; + +class OpaqueResourceDecoder { +public: + virtual ~OpaqueResourceDecoder() = default; + + /** + * @param resource some opaque resource (ProtobufWkt::Any). + * @return ProtobufTypes::MessagePtr decoded protobuf message in the opaque resource, e.g. the + * RouteConfiguration for an Any containing envoy.config.route.v3.RouteConfiguration. + */ + virtual ProtobufTypes::MessagePtr decodeResource(const ProtobufWkt::Any& resource) PURE; + + /** + * @param resource some opaque resource (Protobuf::Message). + * @return std::String the resource name in a Protobuf::Message returned by decodeResource(), e.g. + * the route config name for a envoy.config.route.v3.RouteConfiguration message. + */ + virtual std::string resourceName(const Protobuf::Message& resource) PURE; +}; + +/** + * Subscription to DecodedResources. + */ class SubscriptionCallbacks { public: virtual ~SubscriptionCallbacks() = default; + /** + * Called when a state-of-the-world configuration update is received. (State-of-the-world is + * everything other than delta gRPC - filesystem, HTTP, non-delta gRPC). + * @param resources vector of fetched resources corresponding to the configuration update. + * @param version_info supplies the version information as supplied by the xDS discovery response. + * @throw EnvoyException with reason if the configuration is rejected. Otherwise the configuration + * is accepted. Accepted configurations have their version_info reflected in subsequent + * requests. + */ + virtual void onConfigUpdate(const std::vector& resources, + const std::string& version_info) PURE; + + /** + * Called when a delta configuration update is received. + * @param added_resources resources newly added since the previous fetch. + * @param removed_resources names of resources that this fetch instructed to be removed. + * @param system_version_info aggregate response data "version", for debugging. + * @throw EnvoyException with reason if the config changes are rejected. Otherwise the changes + * are accepted. Accepted changes have their version_info reflected in subsequent requests. + */ + virtual void onConfigUpdate(const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& system_version_info) PURE; + + /** + * Called when either the Subscription is unable to fetch a config update or when onConfigUpdate + * invokes an exception. + * @param reason supplies the update failure reason. + * @param e supplies any exception data on why the fetch failed. May be nullptr. + */ + virtual void onConfigUpdateFailed(ConfigUpdateFailureReason reason, const EnvoyException* e) PURE; +}; + +/** + * Invoked when raw config received from xDS wire. + */ +class UntypedConfigUpdateCallbacks { +public: + virtual ~UntypedConfigUpdateCallbacks() = default; + /** * Called when a state-of-the-world configuration update is received. (State-of-the-world is * everything other than delta gRPC - filesystem, HTTP, non-delta gRPC). @@ -61,12 +160,6 @@ class SubscriptionCallbacks { * @param e supplies any exception data on why the fetch failed. May be nullptr. */ virtual void onConfigUpdateFailed(ConfigUpdateFailureReason reason, const EnvoyException* e) PURE; - - /** - * Obtain the "name" of a v2 API resource in a google.protobuf.Any, e.g. the route config name for - * a RouteConfiguration, based on the underlying resource type. - */ - virtual std::string resourceName(const ProtobufWkt::Any& resource) PURE; }; /** @@ -97,20 +190,22 @@ using SubscriptionPtr = std::unique_ptr; /** * Per subscription stats. @see stats_macros.h */ -#define ALL_SUBSCRIPTION_STATS(COUNTER, GAUGE) \ +#define ALL_SUBSCRIPTION_STATS(COUNTER, GAUGE, TEXT_READOUT) \ COUNTER(init_fetch_timeout) \ COUNTER(update_attempt) \ COUNTER(update_failure) \ COUNTER(update_rejected) \ COUNTER(update_success) \ GAUGE(update_time, NeverImport) \ - GAUGE(version, NeverImport) + GAUGE(version, NeverImport) \ + TEXT_READOUT(version_text) /** * Struct definition for per subscription stats. @see stats_macros.h */ struct SubscriptionStats { - ALL_SUBSCRIPTION_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT) + ALL_SUBSCRIPTION_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, + GENERATE_TEXT_READOUT_STRUCT) }; } // namespace Config diff --git a/include/envoy/config/subscription_factory.h b/include/envoy/config/subscription_factory.h index 0676c7368837f..eb08360e7ddae 100644 --- a/include/envoy/config/subscription_factory.h +++ b/include/envoy/config/subscription_factory.h @@ -14,17 +14,20 @@ class SubscriptionFactory { /** * Subscription factory interface. * - * @param config envoy::api::v2::core::ConfigSource to construct from. + * @param config envoy::config::core::v3::ConfigSource to construct from. * @param type_url type URL for the resource being subscribed to. * @param scope stats scope for any stats tracked by the subscription. * @param callbacks the callbacks needed by all Subscription objects, to deliver config updates. * The callbacks must not result in the deletion of the Subscription object. + * @param resource_decoder how incoming opaque resource objects are to be decoded. + * * @return SubscriptionPtr subscription object corresponding for config and type_url. */ virtual SubscriptionPtr subscriptionFromConfigSource(const envoy::config::core::v3::ConfigSource& config, absl::string_view type_url, Stats::Scope& scope, - SubscriptionCallbacks& callbacks) PURE; + SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder) PURE; }; } // namespace Config diff --git a/include/envoy/decompressor/BUILD b/include/envoy/decompressor/BUILD deleted file mode 100644 index 4dbcfec52980f..0000000000000 --- a/include/envoy/decompressor/BUILD +++ /dev/null @@ -1,17 +0,0 @@ -licenses(["notice"]) # Apache 2 - -load( - "//bazel:envoy_build_system.bzl", - "envoy_cc_library", - "envoy_package", -) - -envoy_package() - -envoy_cc_library( - name = "decompressor_interface", - hdrs = ["decompressor.h"], - deps = [ - "//include/envoy/buffer:buffer_interface", - ], -) diff --git a/include/envoy/event/BUILD b/include/envoy/event/BUILD index 05ea911bf8cc2..ad215d6cc1331 100644 --- a/include/envoy/event/BUILD +++ b/include/envoy/event/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( @@ -19,6 +19,7 @@ envoy_cc_library( deps = [ ":deferred_deletable", ":file_event_interface", + ":schedulable_cb_interface", ":signal_interface", "//include/envoy/common:scope_tracker_interface", "//include/envoy/common:time_interface", @@ -39,6 +40,11 @@ envoy_cc_library( hdrs = ["file_event.h"], ) +envoy_cc_library( + name = "schedulable_cb_interface", + hdrs = ["schedulable_cb.h"], +) + envoy_cc_library( name = "signal_interface", hdrs = ["signal.h"], @@ -48,6 +54,7 @@ envoy_cc_library( name = "timer_interface", hdrs = ["timer.h"], deps = [ + ":schedulable_cb_interface", "//include/envoy/common:time_interface", ], ) diff --git a/include/envoy/event/dispatcher.h b/include/envoy/event/dispatcher.h index 1cad39e3d1f5c..eca8369801026 100644 --- a/include/envoy/event/dispatcher.h +++ b/include/envoy/event/dispatcher.h @@ -9,6 +9,7 @@ #include "envoy/common/scope_tracker.h" #include "envoy/common/time.h" #include "envoy/event/file_event.h" +#include "envoy/event/schedulable_cb.h" #include "envoy/event/signal.h" #include "envoy/event/timer.h" #include "envoy/filesystem/watcher.h" @@ -40,11 +41,15 @@ struct DispatcherStats { ALL_DISPATCHER_STATS(GENERATE_HISTOGRAM_STRUCT) }; +using DispatcherStatsPtr = std::unique_ptr; + /** * Callback invoked when a dispatcher post() runs. */ using PostCb = std::function; +using PostCbSharedPtr = std::shared_ptr; + /** * Abstract event dispatching loop. */ @@ -163,6 +168,14 @@ class Dispatcher { */ virtual Event::TimerPtr createTimer(TimerCb cb) PURE; + /** + * Allocates a schedulable callback. @see SchedulableCallback for docs on how to use the wrapped + * callback. + * @param cb supplies the callback to invoke when the SchedulableCallback is triggered on the + * event loop. + */ + virtual Event::SchedulableCallbackPtr createSchedulableCallback(std::function cb) PURE; + /** * Submits an item for deferred delete. @see DeferredDeletable. */ diff --git a/include/envoy/event/schedulable_cb.h b/include/envoy/event/schedulable_cb.h new file mode 100644 index 0000000000000..3b7bc3d1bb4a0 --- /dev/null +++ b/include/envoy/event/schedulable_cb.h @@ -0,0 +1,58 @@ +#pragma once + +#include +#include + +#include "envoy/common/pure.h" + +namespace Envoy { +namespace Event { + +/** + * Callback wrapper that allows direct scheduling of callbacks in the event loop. + */ +class SchedulableCallback { +public: + virtual ~SchedulableCallback() = default; + + /** + * Schedule the callback so it runs in the current iteration of the event loop after all events + * scheduled in the current event loop have had a chance to execute. + */ + virtual void scheduleCallbackCurrentIteration() PURE; + + /** + * Schedule the callback so it runs in the next iteration of the event loop. There are no + * ordering guarantees for callbacks scheduled for the next iteration, not even among + * next-iteration callbacks. + */ + virtual void scheduleCallbackNextIteration() PURE; + + /** + * Cancel pending execution of the callback. + */ + virtual void cancel() PURE; + + /** + * Return true whether the SchedulableCallback is scheduled for execution. + */ + virtual bool enabled() PURE; +}; + +using SchedulableCallbackPtr = std::unique_ptr; + +/** + * SchedulableCallback factory. + */ +class CallbackScheduler { +public: + virtual ~CallbackScheduler() = default; + + /** + * Create a schedulable callback. + */ + virtual SchedulableCallbackPtr createSchedulableCallback(const std::function& cb) PURE; +}; + +} // namespace Event +} // namespace Envoy diff --git a/include/envoy/event/timer.h b/include/envoy/event/timer.h index 629fcdf102406..c02a6a648b655 100644 --- a/include/envoy/event/timer.h +++ b/include/envoy/event/timer.h @@ -6,6 +6,7 @@ #include "envoy/common/pure.h" #include "envoy/common/time.h" +#include "envoy/event/schedulable_cb.h" namespace Envoy { @@ -80,12 +81,17 @@ class TimeSystem : public TimeSource { ~TimeSystem() override = default; using Duration = MonotonicTime::duration; + using Nanoseconds = std::chrono::nanoseconds; + using Microseconds = std::chrono::microseconds; + using Milliseconds = std::chrono::milliseconds; + using Seconds = std::chrono::seconds; /** * Creates a timer factory. This indirection enables thread-local timer-queue management, * so servers can have a separate timer-factory in each thread. */ - virtual SchedulerPtr createScheduler(Scheduler& base_scheduler) PURE; + virtual SchedulerPtr createScheduler(Scheduler& base_scheduler, + CallbackScheduler& cb_scheduler) PURE; }; } // namespace Event diff --git a/include/envoy/filesystem/BUILD b/include/envoy/filesystem/BUILD index e740d2ee25e40..6a95240457a9d 100644 --- a/include/envoy/filesystem/BUILD +++ b/include/envoy/filesystem/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/filesystem/filesystem.h b/include/envoy/filesystem/filesystem.h index 503eb4d87d9c2..09764415b6a99 100644 --- a/include/envoy/filesystem/filesystem.h +++ b/include/envoy/filesystem/filesystem.h @@ -120,16 +120,18 @@ class Instance { /** * Determine if the path is on a list of paths Envoy will refuse to access. This - * is a basic sanity check for users, blacklisting some clearly bad paths. Paths + * is a basic sanity check for users, denying some clearly bad paths. Paths * may still be problematic (e.g. indirectly leading to /dev/mem) even if this * returns false, it is up to the user to validate that supplied paths are * valid. * @param path some filesystem path. - * @return is the path on the blacklist? + * @return is the path on the deny list? */ virtual bool illegalPath(const std::string& path) PURE; }; +using InstancePtr = std::unique_ptr; + enum class FileType { Regular, Directory, Other }; struct DirectoryEntry { diff --git a/include/envoy/filter/http/BUILD b/include/envoy/filter/http/BUILD new file mode 100644 index 0000000000000..5a76c4ba7b9dc --- /dev/null +++ b/include/envoy/filter/http/BUILD @@ -0,0 +1,21 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_library( + name = "filter_config_provider_interface", + hdrs = ["filter_config_provider.h"], + deps = [ + "//include/envoy/config:extension_config_provider_interface", + "//include/envoy/http:filter_interface", + "//include/envoy/init:manager_interface", + "//include/envoy/server:filter_config_interface", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) diff --git a/include/envoy/filter/http/filter_config_provider.h b/include/envoy/filter/http/filter_config_provider.h new file mode 100644 index 0000000000000..e1c3f58c125a6 --- /dev/null +++ b/include/envoy/filter/http/filter_config_provider.h @@ -0,0 +1,57 @@ +#pragma once + +#include "envoy/config/core/v3/config_source.pb.h" +#include "envoy/config/extension_config_provider.h" +#include "envoy/http/filter.h" +#include "envoy/init/manager.h" +#include "envoy/server/filter_config.h" + +#include "absl/types/optional.h" + +namespace Envoy { +namespace Filter { +namespace Http { + +using FilterConfigProvider = + Envoy::Config::ExtensionConfigProvider; +using FilterConfigProviderPtr = std::unique_ptr; + +/** + * The FilterConfigProviderManager exposes the ability to get an FilterConfigProvider + * for both static and dynamic filter config providers. + */ +class FilterConfigProviderManager { +public: + virtual ~FilterConfigProviderManager() = default; + + /** + * Get an FilterConfigProviderPtr for a filter config. The config providers may share + * the underlying subscriptions to the filter config discovery service. + * @param config_source supplies the configuration source for the filter configs. + * @param filter_config_name the filter config resource name. + * @param require_type_urls enforces that the typed filter config must have a certain type URL. + * @param factory_context is the context to use for the filter config provider. + * @param stat_prefix supplies the stat_prefix to use for the provider stats. + * @param apply_without_warming initializes immediately with the default config and starts the + * subscription. + */ + virtual FilterConfigProviderPtr createDynamicFilterConfigProvider( + const envoy::config::core::v3::ConfigSource& config_source, + const std::string& filter_config_name, const std::set& require_type_urls, + Server::Configuration::FactoryContext& factory_context, const std::string& stat_prefix, + bool apply_without_warming) PURE; + + /** + * Get an FilterConfigProviderPtr for a statically inlined filter config. + * @param config is a fully resolved filter instantiation factory. + * @param filter_config_name is the name of the filter configuration resource. + */ + virtual FilterConfigProviderPtr + createStaticFilterConfigProvider(const Envoy::Http::FilterFactoryCb& config, + const std::string& filter_config_name) PURE; +}; + +} // namespace Http +} // namespace Filter +} // namespace Envoy diff --git a/include/envoy/formatter/BUILD b/include/envoy/formatter/BUILD new file mode 100644 index 0000000000000..df87c4cbac10b --- /dev/null +++ b/include/envoy/formatter/BUILD @@ -0,0 +1,19 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_library( + name = "substitution_formatter_interface", + hdrs = ["substitution_formatter.h"], + deps = [ + "//include/envoy/config:typed_config_interface", + "//include/envoy/http:header_map_interface", + "//include/envoy/stream_info:stream_info_interface", + ], +) diff --git a/include/envoy/formatter/substitution_formatter.h b/include/envoy/formatter/substitution_formatter.h new file mode 100644 index 0000000000000..ec17e692f73cd --- /dev/null +++ b/include/envoy/formatter/substitution_formatter.h @@ -0,0 +1,81 @@ +#pragma once + +#include +#include + +#include "envoy/common/pure.h" +#include "envoy/http/header_map.h" +#include "envoy/stream_info/stream_info.h" + +namespace Envoy { +namespace Formatter { + +/** + * Interface for substitution formatter. + * Formatters provide a complete substitution output line for the given headers/trailers/stream. + */ +class Formatter { +public: + virtual ~Formatter() = default; + + /** + * Return a formatted substitution line. + * @param request_headers supplies the request headers. + * @param response_headers supplies the response headers. + * @param response_trailers supplies the response trailers. + * @param stream_info supplies the stream info. + * @param local_reply_body supplies the local reply body. + * @return std::string string containing the complete formatted substitution line. + */ + virtual std::string format(const Http::RequestHeaderMap& request_headers, + const Http::ResponseHeaderMap& response_headers, + const Http::ResponseTrailerMap& response_trailers, + const StreamInfo::StreamInfo& stream_info, + absl::string_view local_reply_body) const PURE; +}; + +using FormatterPtr = std::unique_ptr; + +/** + * Interface for substitution provider. + * FormatterProviders extract information from the given headers/trailers/stream. + */ +class FormatterProvider { +public: + virtual ~FormatterProvider() = default; + + /** + * Extract a value from the provided headers/trailers/stream. + * @param request_headers supplies the request headers. + * @param response_headers supplies the response headers. + * @param response_trailers supplies the response trailers. + * @param stream_info supplies the stream info. + * @param local_reply_body supplies the local reply body. + * @return std::string containing a single value extracted from the given headers/trailers/stream. + */ + virtual std::string format(const Http::RequestHeaderMap& request_headers, + const Http::ResponseHeaderMap& response_headers, + const Http::ResponseTrailerMap& response_trailers, + const StreamInfo::StreamInfo& stream_info, + absl::string_view local_reply_body) const PURE; + /** + * Extract a value from the provided headers/trailers/stream, preserving the value's type. + * @param request_headers supplies the request headers. + * @param response_headers supplies the response headers. + * @param response_trailers supplies the response trailers. + * @param stream_info supplies the stream info. + * @param local_reply_body supplies the local reply body. + * @return ProtobufWkt::Value containing a single value extracted from the given + * headers/trailers/stream. + */ + virtual ProtobufWkt::Value formatValue(const Http::RequestHeaderMap& request_headers, + const Http::ResponseHeaderMap& response_headers, + const Http::ResponseTrailerMap& response_trailers, + const StreamInfo::StreamInfo& stream_info, + absl::string_view local_reply_body) const PURE; +}; + +using FormatterProviderPtr = std::unique_ptr; + +} // namespace Formatter +} // namespace Envoy diff --git a/include/envoy/grpc/BUILD b/include/envoy/grpc/BUILD index a2beb3e61e7ec..07d87ce33d193 100644 --- a/include/envoy/grpc/BUILD +++ b/include/envoy/grpc/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/grpc/async_client.h b/include/envoy/grpc/async_client.h index c7ab0d219b89c..b2005723fab29 100644 --- a/include/envoy/grpc/async_client.h +++ b/include/envoy/grpc/async_client.h @@ -58,6 +58,12 @@ class RawAsyncStream { * stream object and no further callbacks will be invoked. */ virtual void resetStream() PURE; + + /*** + * @returns if the stream has enough buffered outbound data to be over the configured buffer + * limits + */ + virtual bool isAboveWriteBufferHighWatermark() const PURE; }; class RawAsyncRequestCallbacks { diff --git a/include/envoy/grpc/async_client_manager.h b/include/envoy/grpc/async_client_manager.h index 8494204cf8ade..9b036480018f6 100644 --- a/include/envoy/grpc/async_client_manager.h +++ b/include/envoy/grpc/async_client_manager.h @@ -32,7 +32,7 @@ class AsyncClientManager { /** * Create a Grpc::AsyncClients factory for a service. Validation of the service is performed and * will raise an exception on failure. - * @param grpc_service envoy::api::v2::core::GrpcService configuration. + * @param grpc_service envoy::config::core::v3::GrpcService configuration. * @param scope stats scope. * @param skip_cluster_check if set to true skips checks for cluster presence and being statically * configured. diff --git a/include/envoy/grpc/status.h b/include/envoy/grpc/status.h index b967d3e291645..3715571bbb66d 100644 --- a/include/envoy/grpc/status.h +++ b/include/envoy/grpc/status.h @@ -9,8 +9,6 @@ class Status { public: using GrpcStatus = int64_t; - // If this enum is changed, then the std::unordered_map in Envoy::Grpc::Utility::nameToGrpcStatus - // located at: //source/common/access_log/grpc/status.cc must also be changed. enum WellKnownGrpcStatus { // The RPC completed successfully. Ok = 0, diff --git a/include/envoy/http/BUILD b/include/envoy/http/BUILD index e0bc196ac5b8e..f17ce1cb5e146 100644 --- a/include/envoy/http/BUILD +++ b/include/envoy/http/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( @@ -35,7 +35,9 @@ envoy_cc_library( ":metadata_interface", ":protocol_interface", "//include/envoy/buffer:buffer_interface", + "//include/envoy/grpc:status", "//include/envoy/network:address_interface", + "//source/common/http:status_lib", ], ) @@ -89,6 +91,7 @@ envoy_cc_library( deps = [ ":header_map_interface", "//include/envoy/network:address_interface", + "//include/envoy/stream_info:filter_state_interface", ], ) @@ -126,6 +129,7 @@ envoy_cc_library( envoy_cc_library( name = "metadata_interface", hdrs = ["metadata_interface.h"], + external_deps = ["abseil_node_hash_map"], ) envoy_cc_library( diff --git a/include/envoy/http/async_client.h b/include/envoy/http/async_client.h index 9e95df1cc2f76..066ccb04e7169 100644 --- a/include/envoy/http/async_client.h +++ b/include/envoy/http/async_client.h @@ -70,6 +70,14 @@ class AsyncClient { * @param reason failure reason */ virtual void onFailure(const Request& request, FailureReason reason) PURE; + + /** + * Called before finalizing upstream span when the request is complete or reset. + * @param span a tracing span to fill with extra tags. + * @param response_headers the response headers. + */ + virtual void onBeforeFinalizeUpstreamSpan(Envoy::Tracing::Span& span, + const Http::ResponseHeaderMap* response_headers) PURE; }; /** @@ -150,6 +158,12 @@ class AsyncClient { * Reset the stream. */ virtual void reset() PURE; + + /*** + * @returns if the stream has enough buffered outbound data to be over the configured buffer + * limits + */ + virtual bool isAboveWriteBufferHighWatermark() const PURE; }; virtual ~AsyncClient() = default; diff --git a/include/envoy/http/codec.h b/include/envoy/http/codec.h index 39e11b1954496..ccc04af094a67 100644 --- a/include/envoy/http/codec.h +++ b/include/envoy/http/codec.h @@ -6,14 +6,25 @@ #include "envoy/buffer/buffer.h" #include "envoy/common/pure.h" +#include "envoy/grpc/status.h" #include "envoy/http/header_map.h" #include "envoy/http/metadata_interface.h" #include "envoy/http/protocol.h" #include "envoy/network/address.h" +#include "common/http/status.h" + namespace Envoy { namespace Http { +namespace Http1 { +struct CodecStats; +} + +namespace Http2 { +struct CodecStats; +} + // Legacy default value of 60K is safely under both codec default limits. static const uint32_t DEFAULT_MAX_REQUEST_HEADERS_KB = 60; // Default maximum number of headers. @@ -26,6 +37,14 @@ const char MaxResponseHeadersCountOverrideKey[] = class Stream; +/** + * Error codes used to convey the reason for a GOAWAY. + */ +enum class GoAwayErrorCode { + NoError, + Other, +}; + /** * Stream encoder options specific to HTTP/1. */ @@ -85,8 +104,7 @@ class StreamEncoder { class RequestEncoder : public virtual StreamEncoder { public: /** - * Encode headers, optionally indicating end of stream. Response headers must - * have a valid :status set. + * Encode headers, optionally indicating end of stream. * @param headers supplies the header map to encode. * @param end_stream supplies whether this is a header only request. */ @@ -168,6 +186,20 @@ class RequestDecoder : public virtual StreamDecoder { * @param trailers supplies the decoded trailers. */ virtual void decodeTrailers(RequestTrailerMapPtr&& trailers) PURE; + + /** + * Called if the codec needs to send a protocol error. + * @param is_grpc_request indicates if the request is a gRPC request + * @param code supplies the HTTP error code to send. + * @param body supplies an optional body to send with the local reply. + * @param modify_headers supplies a way to edit headers before they are sent downstream. + * @param grpc_status an optional gRPC status for gRPC requests + * @param details details about the source of the error, for debug purposes + */ + virtual void sendLocalReply(bool is_grpc_request, Code code, absl::string_view body, + const std::function& modify_headers, + const absl::optional grpc_status, + absl::string_view details) PURE; }; /** @@ -303,6 +335,13 @@ class Stream { * with the stream. */ virtual const Network::Address::InstanceConstSharedPtr& connectionLocalAddress() PURE; + + /** + * Set the flush timeout for the stream. At the codec level this is used to bound the amount of + * time the codec will wait to flush body data pending open stream window. It does *not* count + * small window updates as satisfying the idle timeout as this is a potential DoS vector. + */ + virtual void setFlushTimeout(std::chrono::milliseconds timeout) PURE; }; /** @@ -315,7 +354,7 @@ class ConnectionCallbacks { /** * Fires when the remote indicates "go away." No new streams should be created. */ - virtual void onGoAway() PURE; + virtual void onGoAway(GoAwayErrorCode error_code) PURE; }; /** @@ -359,8 +398,10 @@ class Connection { /** * Dispatch incoming connection data. * @param data supplies the data to dispatch. The codec will drain as many bytes as it processes. + * @return Status indicating the status of the codec. Holds any errors encountered while + * processing the incoming data. */ - virtual void dispatch(Buffer::Instance& data) PURE; + virtual Status dispatch(Buffer::Instance& data) PURE; /** * Indicate "go away" to the remote. No new streams can be created beyond this point. diff --git a/include/envoy/http/conn_pool.h b/include/envoy/http/conn_pool.h index c41fe57641201..7539562435248 100644 --- a/include/envoy/http/conn_pool.h +++ b/include/envoy/http/conn_pool.h @@ -13,20 +13,8 @@ namespace Envoy { namespace Http { namespace ConnectionPool { -/** - * Handle that allows a pending request to be cancelled before it is bound to a connection. - */ -class Cancellable { -public: - virtual ~Cancellable() = default; - - /** - * Cancel the pending request. - */ - virtual void cancel() PURE; -}; - using PoolFailureReason = ::Envoy::ConnectionPool::PoolFailureReason; +using Cancellable = ::Envoy::ConnectionPool::Cancellable; /** * Pool callbacks invoked in the context of a newStream() call, either synchronously or @@ -60,7 +48,7 @@ class Callbacks { /** * An instance of a generic connection pool. */ -class Instance : public Event::DeferredDeletable { +class Instance : public Envoy::ConnectionPool::Instance, public Event::DeferredDeletable { public: ~Instance() override = default; @@ -69,27 +57,6 @@ class Instance : public Event::DeferredDeletable { */ virtual Http::Protocol protocol() const PURE; - /** - * Called when a connection pool has been drained of pending requests, busy connections, and - * ready connections. - */ - using DrainedCb = std::function; - - /** - * Register a callback that gets called when the connection pool is fully drained. No actual - * draining is done. The owner of the connection pool is responsible for not creating any - * new streams. - */ - virtual void addDrainedCallback(DrainedCb cb) PURE; - - /** - * Actively drain all existing connection pool connections. This method can be used in cases - * where the connection pool is not being destroyed, but the caller wishes to make sure that - * all new streams take place on a new connection. For example, when a health check failure - * occurs. - */ - virtual void drainConnections() PURE; - /** * Determines whether the connection pool is actively processing any requests. * @return true if the connection pool has any pending requests or any active requests. @@ -113,11 +80,6 @@ class Instance : public Event::DeferredDeletable { */ virtual Cancellable* newStream(Http::ResponseDecoder& response_decoder, Callbacks& callbacks) PURE; - - /** - * @return Upstream::HostDescriptionConstSharedPtr the host for which connections are pooled. - */ - virtual Upstream::HostDescriptionConstSharedPtr host() const PURE; }; using InstancePtr = std::unique_ptr; diff --git a/include/envoy/http/filter.h b/include/envoy/http/filter.h index c97589d3cb690..cee23a153616a 100644 --- a/include/envoy/http/filter.h +++ b/include/envoy/http/filter.h @@ -34,6 +34,7 @@ enum class FilterHeadersStatus { StopIteration, // Continue iteration to remaining filters, but ignore any subsequent data or trailers. This // results in creating a header only request/response. + // This status MUST NOT be returned by decodeHeaders() when end_stream is set to true. ContinueAndEndStream, // Do not iterate for headers as well as data and trailers for the current filter and the filters // following, and buffer body data for later dispatching. ContinueDecoding() MUST @@ -147,10 +148,28 @@ class StreamFilterCallbacks { * caching where applicable to avoid multiple lookups. If a filter has modified the headers in * a way that affects routing, clearRouteCache() must be called to clear the cache. * - * NOTE: In the future we may want to allow the filter to override the route entry. + * NOTE: In the future we want to split route() into 2 methods, one that just + * returns current route and another that actually resolve the route. */ virtual Router::RouteConstSharedPtr route() PURE; + /** + * Invokes callback with a matched route, callback can choose to accept this route by returning + * Router::RouteMatchStatus::Accept or continue route match from last matched route by returning + * Router::RouteMatchStatus::Continue, if there are more routes available. + * + * Returns route accepted by the callback or nullptr if no match found or none of route is + * accepted by the callback. + * + * NOTE: clearRouteCache() must be called before invoking this method otherwise cached route will + * be returned directly to the caller and the callback will not be invoked. + * + * Currently a route callback's decision is overridden by clearRouteCache() / route() call in the + * subsequent filters. We may want to persist callbacks so they always participate in later route + * resolution or make it an independent entity like filters that gets called on route resolution. + */ + virtual Router::RouteConstSharedPtr route(const Router::RouteCallback& cb) PURE; + /** * Returns the clusterInfo for the cached route. * This method is to avoid multiple look ups in the filter chain, it also provides a consistent @@ -307,10 +326,13 @@ class StreamDecoderFilterCallbacks : public virtual StreamFilterCallbacks { virtual RequestTrailerMap& addDecodedTrailers() PURE; /** - * Create a locally generated response using the provided response_code and body_text parameters. - * If the request was a gRPC request the local reply will be encoded as a gRPC response with a 200 - * HTTP response code and grpc-status and grpc-message headers mapped from the provided - * parameters. + * Attempts to create a locally generated response using the provided response_code and body_text + * parameters. If the request was a gRPC request the local reply will be encoded as a gRPC + * response with a 200 HTTP response code and grpc-status and grpc-message headers mapped from the + * provided parameters. + * + * If a response has already started (e.g. if the router calls sendSendLocalReply after encoding + * headers) this will either ship the reply directly to the downstream codec, or reset the stream. * * @param response_code supplies the HTTP response code. * @param body_text supplies the optional body text which is sent using the text/plain content @@ -337,9 +359,10 @@ class StreamDecoderFilterCallbacks : public virtual StreamFilterCallbacks { /** * Called with 100-Continue headers to be encoded. * - * This is not folded into encodeHeaders because most Envoy users and filters - * will not be proxying 100-continue and with it split out, can ignore the - * complexity of multiple encodeHeaders calls. + * This is not folded into encodeHeaders because most Envoy users and filters will not be proxying + * 100-continue and with it split out, can ignore the complexity of multiple encodeHeaders calls. + * + * This must not be invoked more than once per request. * * @param headers supplies the headers to be encoded. */ @@ -351,6 +374,9 @@ class StreamDecoderFilterCallbacks : public virtual StreamFilterCallbacks { * The connection manager inspects certain pseudo headers that are not actually sent downstream. * - See source/common/http/headers.h * + * The only 1xx that may be provided to encodeHeaders() is a 101 upgrade, which will be the final + * encodeHeaders() for a response. + * * @param headers supplies the headers to be encoded. * @param end_stream supplies whether this is a header only request/response. */ @@ -527,7 +553,7 @@ class StreamDecoderFilter : public StreamFilterBase { * should consider using StopAllIterationAndBuffer or StopAllIterationAndWatermark in * decodeHeaders() to prevent metadata passing to the following filters. * - * @param metadata supplies the decoded metadata. + * @param metadata_map supplies the decoded metadata. */ virtual FilterMetadataStatus decodeMetadata(MetadataMap& /* metadata_map */) { return Http::FilterMetadataStatus::Continue; @@ -696,6 +722,8 @@ class StreamEncoderFilter : public StreamFilterBase { * will not be proxying 100-continue and with it split out, can ignore the * complexity of multiple encodeHeaders calls. * + * This will only be invoked once per request. + * * @param headers supplies the 100-continue response headers to be encoded. * @return FilterHeadersStatus determines how filter chain iteration proceeds. * @@ -704,6 +732,10 @@ class StreamEncoderFilter : public StreamFilterBase { /** * Called with headers to be encoded, optionally indicating end of stream. + * + * The only 1xx that may be provided to encodeHeaders() is a 101 upgrade, which will be the final + * encodeHeaders() for a response. + * * @param headers supplies the headers to be encoded. * @param end_stream supplies whether this is a header only request/response. * @return FilterHeadersStatus determines how filter chain iteration proceeds. diff --git a/include/envoy/http/header_map.h b/include/envoy/http/header_map.h index 0c8ddb6adcfd0..bc5e9338a2dc7 100644 --- a/include/envoy/http/header_map.h +++ b/include/envoy/http/header_map.h @@ -6,7 +6,6 @@ #include #include #include -#include #include #include "envoy/common/pure.h" @@ -70,18 +69,6 @@ class LowerCaseString { std::string string_; }; -/** - * Lower case string hasher. - */ -struct LowerCaseStringHash { - size_t operator()(const LowerCaseString& value) const { return HashUtil::xxHash64(value.get()); } -}; - -/** - * Convenient type for unordered set of lower case string. - */ -using LowerCaseStrUnorderedSet = std::unordered_set; - /** * Convenient type for a vector of lower case string and string pair. */ @@ -148,6 +135,12 @@ class HeaderString { absl::get(buffer_).begin(), unary_op); } + /** + * Trim trailing whitespaces from the HeaderString. Only supported by the "Inline" HeaderString + * representation. + */ + void rtrim(); + /** * Get an absl::string_view. It will NOT be NUL terminated! * @@ -264,15 +257,11 @@ class HeaderEntry { }; /** - * The following defines all request headers that Envoy allows direct access to inside of the - * header map. In practice, these are all headers used during normal Envoy request flow + * The following defines all default request headers that Envoy allows direct access to inside of + * the header map. In practice, these are all headers used during normal Envoy request flow * processing. This allows O(1) access to these headers without even a hash lookup. */ #define INLINE_REQ_HEADERS(HEADER_FUNC) \ - HEADER_FUNC(Accept) \ - HEADER_FUNC(AcceptEncoding) \ - HEADER_FUNC(AccessControlRequestMethod) \ - HEADER_FUNC(Authorization) \ HEADER_FUNC(ClientTraceId) \ HEADER_FUNC(EnvoyDownstreamServiceCluster) \ HEADER_FUNC(EnvoyDownstreamServiceNode) \ @@ -297,49 +286,34 @@ class HeaderEntry { HEADER_FUNC(ForwardedClientCert) \ HEADER_FUNC(ForwardedFor) \ HEADER_FUNC(ForwardedProto) \ - HEADER_FUNC(GrpcAcceptEncoding) \ HEADER_FUNC(GrpcTimeout) \ HEADER_FUNC(Host) \ HEADER_FUNC(Method) \ - HEADER_FUNC(OtSpanContext) \ - HEADER_FUNC(Origin) \ HEADER_FUNC(Path) \ HEADER_FUNC(Protocol) \ - HEADER_FUNC(Referer) \ HEADER_FUNC(Scheme) \ HEADER_FUNC(TE) \ HEADER_FUNC(UserAgent) /** - * O(1) response headers. + * Default O(1) response headers. */ #define INLINE_RESP_HEADERS(HEADER_FUNC) \ - HEADER_FUNC(AccessControlAllowCredentials) \ - HEADER_FUNC(AccessControlAllowHeaders) \ - HEADER_FUNC(AccessControlAllowMethods) \ - HEADER_FUNC(AccessControlAllowOrigin) \ - HEADER_FUNC(AccessControlExposeHeaders) \ - HEADER_FUNC(AccessControlMaxAge) \ - HEADER_FUNC(ContentEncoding) \ HEADER_FUNC(Date) \ - HEADER_FUNC(Etag) \ HEADER_FUNC(EnvoyDegraded) \ HEADER_FUNC(EnvoyImmediateHealthCheckFail) \ - HEADER_FUNC(EnvoyOverloaded) \ HEADER_FUNC(EnvoyRateLimited) \ HEADER_FUNC(EnvoyUpstreamCanary) \ HEADER_FUNC(EnvoyUpstreamHealthCheckedCluster) \ HEADER_FUNC(EnvoyUpstreamServiceTime) \ HEADER_FUNC(Location) \ HEADER_FUNC(Server) \ - HEADER_FUNC(Status) \ - HEADER_FUNC(Vary) + HEADER_FUNC(Status) /** - * O(1) request and response headers. + * Default O(1) request and response headers. */ #define INLINE_REQ_RESP_HEADERS(HEADER_FUNC) \ - HEADER_FUNC(CacheControl) \ HEADER_FUNC(Connection) \ HEADER_FUNC(ContentLength) \ HEADER_FUNC(ContentType) \ @@ -353,7 +327,7 @@ class HeaderEntry { HEADER_FUNC(Via) /** - * O(1) response headers and trailers. + * Default O(1) response headers and trailers. */ #define INLINE_RESP_HEADERS_TRAILERS(HEADER_FUNC) \ HEADER_FUNC(GrpcMessage) \ @@ -380,7 +354,8 @@ class HeaderEntry { virtual void setReference##name(absl::string_view value) PURE; \ virtual void set##name(absl::string_view value) PURE; \ virtual void set##name(uint64_t value) PURE; \ - virtual size_t remove##name() PURE; + virtual size_t remove##name() PURE; \ + virtual absl::string_view get##name##Value() const PURE; /** * Wraps a set of HTTP headers. @@ -543,36 +518,21 @@ class HeaderMap { /** * Callback when calling iterate() over a const header map. * @param header supplies the header entry. - * @param context supplies the context passed to iterate(). - * @return Iterate::Continue to continue iteration. + * @return Iterate::Continue to continue iteration, or Iterate::Break to stop; */ - using ConstIterateCb = Iterate (*)(const HeaderEntry&, void*); + using ConstIterateCb = std::function; /** * Iterate over a constant header map. * @param cb supplies the iteration callback. - * @param context supplies the context that will be passed to the callback. */ - virtual void iterate(ConstIterateCb cb, void* context) const PURE; + virtual void iterate(ConstIterateCb cb) const PURE; /** * Iterate over a constant header map in reverse order. * @param cb supplies the iteration callback. - * @param context supplies the context that will be passed to the callback. - */ - virtual void iterateReverse(ConstIterateCb cb, void* context) const PURE; - - enum class Lookup { Found, NotFound, NotSupported }; - - /** - * Lookup one of the predefined inline headers (see ALL_INLINE_HEADERS below) by key. - * @param key supplies the header key. - * @param entry is set to the header entry if it exists and if key is one of the predefined inline - * headers; otherwise, nullptr. - * @return Lookup::Found if lookup was successful, Lookup::NotFound if the header entry doesn't - * exist, or Lookup::NotSupported if key is not one of the predefined inline headers. */ - virtual Lookup lookup(const LowerCaseString& key, const HeaderEntry** entry) const PURE; + virtual void iterateReverse(ConstIterateCb cb) const PURE; /** * Clears the headers in the map. @@ -586,6 +546,14 @@ class HeaderMap { */ virtual size_t remove(const LowerCaseString& key) PURE; + /** + * Remove all instances of headers where the header matches the predicate. + * @param predicate supplies the predicate to match headers against. + * @return the number of headers removed. + */ + using HeaderMatchPredicate = std::function; + virtual size_t removeIf(const HeaderMatchPredicate& predicate) PURE; + /** * Remove all instances of headers where the key begins with the supplied prefix. * @param prefix supplies the prefix to match header keys against. @@ -626,42 +594,171 @@ class HeaderMap { using HeaderMapPtr = std::unique_ptr; +/** + * Registry for custom headers. Headers can be registered multiple times in independent + * compilation units and will still point to the same slot. Headers are registered independently + * for each concrete header map type and do not overlap. Handles are strongly typed and do not + * allow mixing. + */ +class CustomInlineHeaderRegistry { +public: + enum class Type { RequestHeaders, RequestTrailers, ResponseHeaders, ResponseTrailers }; + using RegistrationMap = std::map; + + // A "phantom" type is used here to force the compiler to verify that handles are not mixed + // between concrete header map types. + template struct Handle { + Handle(RegistrationMap::const_iterator it) : it_(it) {} + bool operator==(const Handle& rhs) const { return it_ == rhs.it_; } + + RegistrationMap::const_iterator it_; + }; + + /** + * Register an inline header and return a handle for use in inline header calls. Must be called + * prior to finalize(). + */ + template + static Handle registerInlineHeader(const LowerCaseString& header_name) { + static size_t inline_header_index = 0; + + ASSERT(!mutableFinalized()); + auto& map = mutableRegistrationMap(); + auto entry = map.find(header_name); + if (entry == map.end()) { + map[header_name] = inline_header_index++; + } + return Handle(map.find(header_name)); + } + + /** + * Fetch the handle for a registered inline header. May only be called after finalized(). + */ + template + static absl::optional> getInlineHeader(const LowerCaseString& header_name) { + ASSERT(mutableFinalized()); + auto& map = mutableRegistrationMap(); + auto entry = map.find(header_name); + if (entry != map.end()) { + return Handle(entry); + } + return absl::nullopt; + } + + /** + * Fetch all registered headers. May only be called after finalized(). + */ + template static const RegistrationMap& headers() { + ASSERT(mutableFinalized()); + return mutableRegistrationMap(); + } + + /** + * Finalize the custom header registrations. No further changes are allowed after this point. + * This guaranteed that all header maps created by the process have the same variable size and + * custom registrations. + */ + template static void finalize() { + ASSERT(!mutableFinalized()); + mutableFinalized() = true; + } + +private: + template static RegistrationMap& mutableRegistrationMap() { + MUTABLE_CONSTRUCT_ON_FIRST_USE(RegistrationMap); + } + template static bool& mutableFinalized() { MUTABLE_CONSTRUCT_ON_FIRST_USE(bool); } +}; + +/** + * Static initializer to register a custom header in a compilation unit. This can be used by + * extensions to register custom headers. + */ +template class RegisterCustomInlineHeader { +public: + RegisterCustomInlineHeader(const LowerCaseString& header) + : handle_(CustomInlineHeaderRegistry::registerInlineHeader(header)) {} + + typename CustomInlineHeaderRegistry::Handle handle() { return handle_; } + +private: + const typename CustomInlineHeaderRegistry::Handle handle_; +}; + +/** + * The following functions allow O(1) access for custom inline headers. + */ +template class CustomInlineHeaderBase { +public: + virtual ~CustomInlineHeaderBase() = default; + + static constexpr CustomInlineHeaderRegistry::Type header_map_type = type; + using Handle = CustomInlineHeaderRegistry::Handle; + + virtual const HeaderEntry* getInline(Handle handle) const PURE; + virtual void appendInline(Handle handle, absl::string_view data, + absl::string_view delimiter) PURE; + virtual void setReferenceInline(Handle, absl::string_view value) PURE; + virtual void setInline(Handle, absl::string_view value) PURE; + virtual void setInline(Handle, uint64_t value) PURE; + virtual size_t removeInline(Handle handle) PURE; + absl::string_view getInlineValue(Handle handle) const { + const auto header = getInline(handle); + if (header != nullptr) { + return header->value().getStringView(); + } + return {}; + } +}; + /** * Typed derived classes for all header map types. */ // Base class for both request and response headers. -class RequestOrResponseHeaderMap : public virtual HeaderMap { +class RequestOrResponseHeaderMap : public HeaderMap { public: INLINE_REQ_RESP_HEADERS(DEFINE_INLINE_HEADER) }; // Request headers. -class RequestHeaderMap : public RequestOrResponseHeaderMap { +class RequestHeaderMap + : public RequestOrResponseHeaderMap, + public CustomInlineHeaderBase { public: INLINE_REQ_HEADERS(DEFINE_INLINE_HEADER) }; using RequestHeaderMapPtr = std::unique_ptr; // Request trailers. -class RequestTrailerMap : public virtual HeaderMap {}; +class RequestTrailerMap + : public HeaderMap, + public CustomInlineHeaderBase {}; using RequestTrailerMapPtr = std::unique_ptr; // Base class for both response headers and trailers. -class ResponseHeaderOrTrailerMap : public virtual HeaderMap { +class ResponseHeaderOrTrailerMap { public: + virtual ~ResponseHeaderOrTrailerMap() = default; + INLINE_RESP_HEADERS_TRAILERS(DEFINE_INLINE_HEADER) }; // Response headers. -class ResponseHeaderMap : public RequestOrResponseHeaderMap, public ResponseHeaderOrTrailerMap { +class ResponseHeaderMap + : public RequestOrResponseHeaderMap, + public ResponseHeaderOrTrailerMap, + public CustomInlineHeaderBase { public: INLINE_RESP_HEADERS(DEFINE_INLINE_HEADER) }; using ResponseHeaderMapPtr = std::unique_ptr; // Response trailers. -class ResponseTrailerMap : public virtual HeaderMap, public ResponseHeaderOrTrailerMap {}; +class ResponseTrailerMap + : public ResponseHeaderOrTrailerMap, + public HeaderMap, + public CustomInlineHeaderBase {}; using ResponseTrailerMapPtr = std::unique_ptr; /** diff --git a/include/envoy/http/metadata_interface.h b/include/envoy/http/metadata_interface.h index dc8dc0e4e65c5..3874aa905a490 100644 --- a/include/envoy/http/metadata_interface.h +++ b/include/envoy/http/metadata_interface.h @@ -3,9 +3,10 @@ #include #include #include -#include #include +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace Http { @@ -20,7 +21,7 @@ constexpr uint8_t END_METADATA_FLAG = 0x4; // TODO(soya3129): Respect max_frame_size after nghttp2 #1250 is resolved. constexpr uint64_t METADATA_MAX_PAYLOAD_SIZE = 16384; -using UnorderedStringMap = std::unordered_map; +using UnorderedStringMap = absl::node_hash_map; class MetadataMap : public UnorderedStringMap { public: diff --git a/include/envoy/init/BUILD b/include/envoy/init/BUILD index 2229d7c7a12e4..4bbc0d18f6826 100644 --- a/include/envoy/init/BUILD +++ b/include/envoy/init/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/init/target.h b/include/envoy/init/target.h index 9ab46d38aff48..75397ad2f991a 100644 --- a/include/envoy/init/target.h +++ b/include/envoy/init/target.h @@ -25,6 +25,12 @@ struct TargetHandle { * @return true if the target received this call, false if the target was already destroyed. */ virtual bool initialize(const Watcher& watcher) const PURE; + + /** + * @return a human-readable target name, for logging / debugging / tracking target names. + * The target name has to be unique. + */ + virtual absl::string_view name() const PURE; }; using TargetHandlePtr = std::unique_ptr; diff --git a/include/envoy/json/BUILD b/include/envoy/json/BUILD index 89c21942185a4..4bbab2712a149 100644 --- a/include/envoy/json/BUILD +++ b/include/envoy/json/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/json/json_object.h b/include/envoy/json/json_object.h index a5161ccfa91b8..7df162540ff58 100644 --- a/include/envoy/json/json_object.h +++ b/include/envoy/json/json_object.h @@ -147,8 +147,13 @@ class Object { virtual double getDouble(const std::string& name, double default_value) const PURE; /** - * @return a hash of the JSON object. This is a hash of each nested element in stable order. - * It does not consider white space that was originally in the parsed JSON. + * @return a hash of the JSON object. + * Per RFC 7159: + * An object is an unordered collection of zero or more name/value + * pairs, where a name is a string and a value is a string, number, + * boolean, null, object, or array. + * Objects with fields in different orders are equivalent and produce the same hash. + * It does not consider white space that was originally in the parsed JSON. */ virtual uint64_t hash() const PURE; diff --git a/include/envoy/local_info/BUILD b/include/envoy/local_info/BUILD index 52372ccf334f9..749ad670563b5 100644 --- a/include/envoy/local_info/BUILD +++ b/include/envoy/local_info/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/network/BUILD b/include/envoy/network/BUILD index 229ad30195231..3a8e67613c587 100644 --- a/include/envoy/network/BUILD +++ b/include/envoy/network/BUILD @@ -1,18 +1,17 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( name = "address_interface", hdrs = ["address.h"], deps = [ - ":io_handle_interface", "//include/envoy/api:os_sys_calls_interface", ], ) @@ -42,6 +41,20 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "udp_packet_writer_handler_interface", + hdrs = ["udp_packet_writer_handler.h"], + deps = [ + ":address_interface", + ":io_handle_interface", + ":socket_interface", + "//include/envoy/api:io_error_interface", + "//include/envoy/buffer:buffer_interface", + "//include/envoy/stats:stats_interface", + "//include/envoy/stats:stats_macros", + ], +) + envoy_cc_library( name = "dns_interface", hdrs = ["dns.h"], @@ -53,6 +66,11 @@ envoy_cc_library( hdrs = ["drain_decision.h"], ) +envoy_cc_library( + name = "exception_interface", + hdrs = ["exception.h"], +) + envoy_cc_library( name = "filter_interface", hdrs = ["filter.h"], @@ -61,6 +79,7 @@ envoy_cc_library( ":transport_socket_interface", "//include/envoy/buffer:buffer_interface", "//include/envoy/upstream:host_description_interface", + "//source/common/protobuf", ], ) @@ -76,18 +95,32 @@ envoy_cc_library( envoy_cc_library( name = "io_handle_interface", hdrs = ["io_handle.h"], + external_deps = ["abseil_optional"], deps = [ + ":address_interface", "//include/envoy/api:io_error_interface", + "//include/envoy/api:os_sys_calls_interface", "//source/common/common:assert_lib", ], ) +envoy_cc_library( + name = "socket_interface", + hdrs = ["socket.h"], + deps = [ + ":address_interface", + ":io_handle_interface", + "//include/envoy/config:typed_config_interface", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) + envoy_cc_library( name = "listen_socket_interface", hdrs = ["listen_socket.h"], deps = [ - ":address_interface", ":io_handle_interface", + ":socket_interface", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) @@ -97,6 +130,7 @@ envoy_cc_library( hdrs = ["transport_socket.h"], deps = [ ":io_handle_interface", + ":proxy_protocol_options_lib", "//include/envoy/buffer:buffer_interface", "//include/envoy/ssl:connection_interface", ], @@ -117,7 +151,9 @@ envoy_cc_library( ":connection_balancer_interface", ":connection_interface", ":listen_socket_interface", + ":udp_packet_writer_handler_interface", "//include/envoy/access_log:access_log_interface", + "//include/envoy/common:resource_interface", "//include/envoy/stats:stats_interface", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], @@ -132,3 +168,20 @@ envoy_cc_library( "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) + +envoy_cc_library( + name = "udp_packet_writer_config_interface", + hdrs = ["udp_packet_writer_config.h"], + deps = [ + "//include/envoy/config:typed_config_interface", + "//include/envoy/network:udp_packet_writer_handler_interface", + ], +) + +envoy_cc_library( + name = "proxy_protocol_options_lib", + hdrs = ["proxy_protocol.h"], + deps = [ + ":address_interface", + ], +) diff --git a/include/envoy/network/address.h b/include/envoy/network/address.h index fd5a3b4563ccd..94793f12c1555 100644 --- a/include/envoy/network/address.h +++ b/include/envoy/network/address.h @@ -10,7 +10,6 @@ #include "envoy/api/os_sys_calls.h" #include "envoy/common/platform.h" #include "envoy/common/pure.h" -#include "envoy/network/io_handle.h" #include "absl/numeric/int128.h" #include "absl/strings/string_view.h" @@ -43,6 +42,11 @@ class Ipv6 { * @return the absl::uint128 IPv6 address in network byte order. */ virtual absl::uint128 address() const PURE; + + /** + * @return true if address is Ipv6 and Ipv4 compatibility is disabled, false otherwise + */ + virtual bool v6only() const PURE; }; enum class IpVersion { v4, v6 }; // NOLINT(readability-identifier-naming) @@ -93,8 +97,24 @@ class Ip { virtual IpVersion version() const PURE; }; +/** + * Interface for a generic Pipe address + */ +class Pipe { +public: + virtual ~Pipe() = default; + /** + * @return abstract namespace flag + */ + virtual bool abstractNamespace() const PURE; + + /** + * @return pipe mode + */ + virtual mode_t mode() const PURE; +}; + enum class Type { Ip, Pipe }; -enum class SocketType { Stream, Datagram }; /** * Interface for all network addresses. @@ -133,40 +153,34 @@ class Instance { virtual const std::string& logicalName() const PURE; /** - * Bind a socket to this address. The socket should have been created with a call to socket() on - * an Instance of the same address family. - * @param fd supplies the platform socket handle. - * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call - * is successful, errno_ shouldn't be used. + * @return the IP address information IFF type() == Type::Ip, otherwise nullptr. */ - virtual Api::SysCallIntResult bind(os_fd_t fd) const PURE; + virtual const Ip* ip() const PURE; /** - * Connect a socket to this address. The socket should have been created with a call to socket() - * on this object. - * @param fd supplies the platform socket handle. - * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call - * is successful, errno_ shouldn't be used. + * @return the pipe address information IFF type() == Type::Pipe, otherwise nullptr. */ - virtual Api::SysCallIntResult connect(os_fd_t fd) const PURE; + virtual const Pipe* pipe() const PURE; /** - * @return the IP address information IFF type() == Type::Ip, otherwise nullptr. + * @return the underlying structure wherein the address is stored */ - virtual const Ip* ip() const PURE; + virtual const sockaddr* sockAddr() const PURE; /** - * Create a socket for this address. - * @param type supplies the socket type to create. - * @return the IoHandlePtr naming the socket. In case of a failure, the program would be - * aborted. + * @return length of the address container */ - virtual IoHandlePtr socket(SocketType type) const PURE; + virtual socklen_t sockAddrLen() const PURE; /** * @return the type of address. */ virtual Type type() const PURE; + + /** + * @return name of socket interface that should be used with this address + */ + virtual const std::string& socketInterface() const PURE; }; using InstanceConstSharedPtr = std::shared_ptr; diff --git a/include/envoy/network/connection_handler.h b/include/envoy/network/connection_handler.h index b8787df14ef8e..58f672c04641a 100644 --- a/include/envoy/network/connection_handler.h +++ b/include/envoy/network/connection_handler.h @@ -159,4 +159,4 @@ class ActiveUdpListenerFactory { using ActiveUdpListenerFactoryPtr = std::unique_ptr; } // namespace Network -} // namespace Envoy +} // namespace Envoy \ No newline at end of file diff --git a/include/envoy/network/exception.h b/include/envoy/network/exception.h new file mode 100644 index 0000000000000..54ba28bca2909 --- /dev/null +++ b/include/envoy/network/exception.h @@ -0,0 +1,32 @@ +#pragma once + +#include "envoy/common/exception.h" + +namespace Envoy { +namespace Network { + +/** + * Thrown when there is a runtime error creating/binding a listener. + */ +class CreateListenerException : public EnvoyException { +public: + CreateListenerException(const std::string& what) : EnvoyException(what) {} +}; + +/** + * Thrown when there is a runtime error binding a socket. + */ +class SocketBindException : public CreateListenerException { +public: + SocketBindException(const std::string& what, int error_number) + : CreateListenerException(what), error_number_(error_number) {} + + // This can't be called errno because otherwise the standard errno macro expansion replaces it. + int errorNumber() const { return error_number_; } + +private: + const int error_number_; +}; + +} // namespace Network +} // namespace Envoy \ No newline at end of file diff --git a/include/envoy/network/filter.h b/include/envoy/network/filter.h index f929f0472afda..a111b1a22ed47 100644 --- a/include/envoy/network/filter.h +++ b/include/envoy/network/filter.h @@ -7,6 +7,8 @@ #include "envoy/network/transport_socket.h" #include "envoy/upstream/host_description.h" +#include "common/protobuf/protobuf.h" + namespace Envoy { namespace Event { @@ -269,6 +271,21 @@ class ListenerFilterCallbacks { * @param success boolean telling whether the filter execution was successful or not. */ virtual void continueFilterChain(bool success) PURE; + + /** + * @param name the namespace used in the metadata in reverse DNS format, for example: + * envoy.test.my_filter. + * @param value the struct to set on the namespace. A merge will be performed with new values for + * the same key overriding existing. + */ + virtual void setDynamicMetadata(const std::string& name, const ProtobufWkt::Struct& value) PURE; + + /** + * @return const envoy::config::core::v3::Metadata& the dynamic metadata associated with this + * connection. + */ + virtual envoy::config::core::v3::Metadata& dynamicMetadata() PURE; + virtual const envoy::config::core::v3::Metadata& dynamicMetadata() const PURE; }; /** @@ -356,6 +373,8 @@ class DrainableFilterChain : public FilterChain { virtual void startDraining() PURE; }; +using DrainableFilterChainSharedPtr = std::shared_ptr; + /** * Interface for searching through configured filter chains. */ diff --git a/include/envoy/network/io_handle.h b/include/envoy/network/io_handle.h index 132912218c528..f5d18b5323329 100644 --- a/include/envoy/network/io_handle.h +++ b/include/envoy/network/io_handle.h @@ -5,8 +5,10 @@ #include "envoy/api/io_error.h" #include "envoy/common/platform.h" #include "envoy/common/pure.h" +#include "envoy/network/address.h" #include "absl/container/fixed_array.h" +#include "absl/types/optional.h" namespace Envoy { namespace Buffer { @@ -16,12 +18,6 @@ struct RawSlice; using RawSliceArrays = absl::FixedArray>; namespace Network { -namespace Address { -class Instance; -class Ip; - -using InstanceConstSharedPtr = std::shared_ptr; -} // namespace Address /** * IoHandle: an abstract interface for all I/O operations @@ -89,6 +85,8 @@ class IoHandle { Address::InstanceConstSharedPtr peer_address_; // The payload length of this packet. unsigned int msg_len_{0}; + // The gso_size, if specified in the transport header + unsigned int gso_size_{0}; }; /** @@ -144,6 +142,78 @@ class IoHandle { * return true if the platform supports recvmmsg() and sendmmsg(). */ virtual bool supportsMmsg() const PURE; + + /** + * return true if the platform supports udp_gro + */ + virtual bool supportsUdpGro() const PURE; + + /** + * Bind to address. The handle should have been created with a call to socket() + * @param address address to bind to. + * @param addrlen address length + * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call + * is successful, errno_ shouldn't be used. + */ + virtual Api::SysCallIntResult bind(Address::InstanceConstSharedPtr address) PURE; + + /** + * Listen on bound handle. + * @param backlog maximum number of pending connections for listener + * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call + * is successful, errno_ shouldn't be used. + */ + virtual Api::SysCallIntResult listen(int backlog) PURE; + + /** + * Connect to address. The handle should have been created with a call to socket() + * on this object. + * @param address remote address to connect to. + * @param addrlen remote address length + * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call + * is successful, errno_ shouldn't be used. + */ + virtual Api::SysCallIntResult connect(Address::InstanceConstSharedPtr address) PURE; + + /** + * Set option (see man 2 setsockopt) + */ + virtual Api::SysCallIntResult setOption(int level, int optname, const void* optval, + socklen_t optlen) PURE; + + /** + * Get option (see man 2 getsockopt) + */ + virtual Api::SysCallIntResult getOption(int level, int optname, void* optval, + socklen_t* optlen) PURE; + + /** + * Toggle blocking behavior + * @param blocking flag to set/unset blocking state + * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call + * is successful, errno_ shouldn't be used. + */ + virtual Api::SysCallIntResult setBlocking(bool blocking) PURE; + + /** + * Get domain used by underlying socket (see man 2 socket) + * @param domain updated to the underlying socket's domain if call is successful + * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call + * is successful, errno_ shouldn't be used. + */ + virtual absl::optional domain() PURE; + + /** + * Get local address (ip:port pair) + * @return local address as @ref Address::InstanceConstSharedPtr + */ + virtual Address::InstanceConstSharedPtr localAddress() PURE; + + /** + * Get peer's address (ip:port pair) + * @return peer's address as @ref Address::InstanceConstSharedPtr + */ + virtual Address::InstanceConstSharedPtr peerAddress() PURE; }; using IoHandlePtr = std::unique_ptr; diff --git a/include/envoy/network/listen_socket.h b/include/envoy/network/listen_socket.h index 89b8cdbebb23b..bc0c736589ee0 100644 --- a/include/envoy/network/listen_socket.h +++ b/include/envoy/network/listen_socket.h @@ -9,6 +9,7 @@ #include "envoy/config/core/v3/base.pb.h" #include "envoy/network/address.h" #include "envoy/network/io_handle.h" +#include "envoy/network/socket.h" #include "absl/strings/string_view.h" #include "absl/types/optional.h" @@ -16,165 +17,6 @@ namespace Envoy { namespace Network { -// SocketOptionName is an optional value that captures the setsockopt(2) -// arguments. The idea here is that if a socket option is not supported -// on a platform, we can make this the empty value, which allows us to -// avoid #ifdef proliferation. -struct SocketOptionName { - SocketOptionName() = default; - SocketOptionName(const SocketOptionName&) = default; - SocketOptionName(int level, int option, const std::string& name) - : value_(std::make_tuple(level, option, name)) {} - - int level() const { return std::get<0>(value_.value()); } - int option() const { return std::get<1>(value_.value()); } - const std::string& name() const { return std::get<2>(value_.value()); } - - bool has_value() const { return value_.has_value(); } - bool operator==(const SocketOptionName& rhs) const { return value_ == rhs.value_; } - -private: - absl::optional> value_; -}; - -// ENVOY_MAKE_SOCKET_OPTION_NAME is a helper macro to generate a -// SocketOptionName with a descriptive string name. -#define ENVOY_MAKE_SOCKET_OPTION_NAME(level, option) \ - Network::SocketOptionName(level, option, #level "/" #option) - -/** - * Base class for Sockets - */ -class Socket { -public: - virtual ~Socket() = default; - - /** - * @return the local address of the socket. - */ - virtual const Address::InstanceConstSharedPtr& localAddress() const PURE; - - /** - * Set the local address of the socket. On accepted sockets the local address defaults to the - * one at which the connection was received at, which is the same as the listener's address, if - * the listener is bound to a specific address. - * - * @param local_address the new local address. - */ - virtual void setLocalAddress(const Address::InstanceConstSharedPtr& local_address) PURE; - - /** - * @return IoHandle for the underlying connection - */ - virtual IoHandle& ioHandle() PURE; - - /** - * @return const IoHandle for the underlying connection - */ - virtual const IoHandle& ioHandle() const PURE; - - /** - * @return the type (stream or datagram) of the socket. - */ - virtual Address::SocketType socketType() const PURE; - - /** - * Close the underlying socket. - */ - virtual void close() PURE; - - /** - * Return true if close() hasn't been called. - */ - virtual bool isOpen() const PURE; - - /** - * Visitor class for setting socket options. - */ - class Option { - public: - virtual ~Option() = default; - - /** - * @param socket the socket on which to apply options. - * @param state the current state of the socket. Significant for options that can only be - * set for some particular state of the socket. - * @return true if succeeded, false otherwise. - */ - virtual bool setOption(Socket& socket, - envoy::config::core::v3::SocketOption::SocketState state) const PURE; - - /** - * @param vector of bytes to which the option should append hash key data that will be used - * to separate connections based on the option. Any data already in the key vector must - * not be modified. - */ - virtual void hashKey(std::vector& key) const PURE; - - /** - * Contains details about what this option applies to a socket. - */ - struct Details { - SocketOptionName name_; - std::string value_; ///< Binary string representation of an option's value. - - bool operator==(const Details& other) const { - return name_ == other.name_ && value_ == other.value_; - } - }; - - /** - * @param socket The socket for which we want to know the options that would be applied. - * @param state The state at which we would apply the options. - * @return What we would apply to the socket at the provided state. Empty if we'd apply nothing. - */ - virtual absl::optional
- getOptionDetails(const Socket& socket, - envoy::config::core::v3::SocketOption::SocketState state) const PURE; - }; - - using OptionConstSharedPtr = std::shared_ptr; - using Options = std::vector; - using OptionsSharedPtr = std::shared_ptr; - - static OptionsSharedPtr& appendOptions(OptionsSharedPtr& to, const OptionsSharedPtr& from) { - to->insert(to->end(), from->begin(), from->end()); - return to; - } - - static bool applyOptions(const OptionsSharedPtr& options, Socket& socket, - envoy::config::core::v3::SocketOption::SocketState state) { - if (options == nullptr) { - return true; - } - for (const auto& option : *options) { - if (!option->setOption(socket, state)) { - return false; - } - } - return true; - } - - /** - * Add a socket option visitor for later retrieval with options(). - */ - virtual void addOption(const OptionConstSharedPtr&) PURE; - - /** - * Add socket option visitors for later retrieval with options(). - */ - virtual void addOptions(const OptionsSharedPtr&) PURE; - - /** - * @return the socket options stored earlier with addOption() and addOptions() calls, if any. - */ - virtual const OptionsSharedPtr& options() const PURE; -}; - -using SocketPtr = std::unique_ptr; -using SocketSharedPtr = std::shared_ptr; -using SocketOptRef = absl::optional>; - /** * A socket passed to a connection. For server connections this represents the accepted socket, and * for client connections this represents the socket being connected to a remote address. @@ -254,20 +96,5 @@ class ConnectionSocket : public virtual Socket { using ConnectionSocketPtr = std::unique_ptr; -/** - * Thrown when there is a runtime error binding a socket. - */ -class SocketBindException : public EnvoyException { -public: - SocketBindException(const std::string& what, int error_number) - : EnvoyException(what), error_number_(error_number) {} - - // This can't be called errno because otherwise the standard errno macro expansion replaces it. - int errorNumber() const { return error_number_; } - -private: - const int error_number_; -}; - } // namespace Network } // namespace Envoy diff --git a/include/envoy/network/listener.h b/include/envoy/network/listener.h index 2f511eb99a77a..3d8257e69c5f7 100644 --- a/include/envoy/network/listener.h +++ b/include/envoy/network/listener.h @@ -7,10 +7,12 @@ #include "envoy/access_log/access_log.h" #include "envoy/api/io_error.h" #include "envoy/common/exception.h" +#include "envoy/common/resource.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/network/connection.h" #include "envoy/network/connection_balancer.h" #include "envoy/network/listen_socket.h" +#include "envoy/network/udp_packet_writer_handler.h" #include "envoy/stats/scope.h" namespace Envoy { @@ -37,7 +39,7 @@ class ListenSocketFactory { /** * @return the type of the socket getListenSocket() returns. */ - virtual Address::SocketType socketType() const PURE; + virtual Socket::Type socketType() const PURE; /** * @return the listening address of the socket getListenSocket() returns. Before getListenSocket() @@ -133,6 +135,12 @@ class ListenerConfig { */ virtual ActiveUdpListenerFactory* udpListenerFactory() PURE; + /** + * @return factory pointer if writing on UDP socket, otherwise return + * nullptr. + */ + virtual UdpPacketWriterFactoryOptRef udpPacketWriterFactory() PURE; + /** * @return traffic direction of the listener. */ @@ -144,6 +152,11 @@ class ListenerConfig { */ virtual ConnectionBalancer& connectionBalancer() PURE; + /** + * Open connection resources for this listener. + */ + virtual ResourceLimit& openConnections() PURE; + /** * @return std::vector access logs emitted by the listener. */ @@ -162,6 +175,11 @@ class ListenerCallbacks { * @param socket supplies the socket that is moved into the callee. */ virtual void onAccept(ConnectionSocketPtr&& socket) PURE; + + /** + * Called when a new connection is rejected. + */ + virtual void onReject() PURE; }; /** @@ -243,6 +261,12 @@ class UdpListenerCallbacks { * @param error_code supplies the received error on the listener. */ virtual void onReceiveError(Api::IoError::IoErrorCode error_code) PURE; + + /** + * Returns the pointer to the udp_packet_writer associated with the + * UdpListenerCallback + */ + virtual UdpPacketWriter& udpPacketWriter() PURE; }; /** @@ -294,17 +318,17 @@ class UdpListener : public virtual Listener { * sender. */ virtual Api::IoCallUint64Result send(const UdpSendData& data) PURE; + + /** + * Flushes out remaining buffered data since last call of send(). + * This is a no-op if the implementation doesn't buffer data while sending. + * + * @return the error code of the underlying flush api. + */ + virtual Api::IoCallUint64Result flush() PURE; }; using UdpListenerPtr = std::unique_ptr; -/** - * Thrown when there is a runtime error creating/binding a listener. - */ -class CreateListenerException : public EnvoyException { -public: - CreateListenerException(const std::string& what) : EnvoyException(what) {} -}; - } // namespace Network } // namespace Envoy diff --git a/include/envoy/network/proxy_protocol.h b/include/envoy/network/proxy_protocol.h new file mode 100644 index 0000000000000..52c111859b115 --- /dev/null +++ b/include/envoy/network/proxy_protocol.h @@ -0,0 +1,14 @@ +#pragma once + +#include "envoy/network/address.h" + +namespace Envoy { +namespace Network { + +struct ProxyProtocolData { + const Network::Address::InstanceConstSharedPtr src_addr_; + const Network::Address::InstanceConstSharedPtr dst_addr_; +}; + +} // namespace Network +} // namespace Envoy \ No newline at end of file diff --git a/include/envoy/network/socket.h b/include/envoy/network/socket.h new file mode 100644 index 0000000000000..74c805e79785f --- /dev/null +++ b/include/envoy/network/socket.h @@ -0,0 +1,278 @@ +#pragma once + +#include +#include +#include + +#include "envoy/common/platform.h" +#include "envoy/common/pure.h" +#include "envoy/config/core/v3/base.pb.h" +#include "envoy/network/address.h" +#include "envoy/network/io_handle.h" + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" + +namespace Envoy { +namespace Network { + +// SocketOptionName is an optional value that captures the setsockopt(2) +// arguments. The idea here is that if a socket option is not supported +// on a platform, we can make this the empty value, which allows us to +// avoid #ifdef proliferation. +struct SocketOptionName { + SocketOptionName() = default; + SocketOptionName(const SocketOptionName&) = default; + SocketOptionName(int level, int option, const std::string& name) + : value_(std::make_tuple(level, option, name)) {} + + int level() const { return std::get<0>(value_.value()); } + int option() const { return std::get<1>(value_.value()); } + const std::string& name() const { return std::get<2>(value_.value()); } + + bool hasValue() const { return value_.has_value(); } + bool operator==(const SocketOptionName& rhs) const { return value_ == rhs.value_; } + +private: + absl::optional> value_; +}; + +// ENVOY_MAKE_SOCKET_OPTION_NAME is a helper macro to generate a +// SocketOptionName with a descriptive string name. +#define ENVOY_MAKE_SOCKET_OPTION_NAME(level, option) \ + Network::SocketOptionName(level, option, #level "/" #option) + +/** + * Base class for Sockets + */ +class Socket { +public: + virtual ~Socket() = default; + + /** + * Type of sockets supported. See man 2 socket for more details + */ + enum class Type { Stream, Datagram }; + + /** + * @return the local address of the socket. + */ + virtual const Address::InstanceConstSharedPtr& localAddress() const PURE; + + /** + * Set the local address of the socket. On accepted sockets the local address defaults to the + * one at which the connection was received at, which is the same as the listener's address, if + * the listener is bound to a specific address. + * + * @param local_address the new local address. + */ + virtual void setLocalAddress(const Address::InstanceConstSharedPtr& local_address) PURE; + + /** + * @return IoHandle for the underlying connection + */ + virtual IoHandle& ioHandle() PURE; + + /** + * @return const IoHandle for the underlying connection + */ + virtual const IoHandle& ioHandle() const PURE; + + /** + * @return the type (stream or datagram) of the socket. + */ + virtual Socket::Type socketType() const PURE; + + /** + * @return the type (IP or pipe) of addresses used by the socket (subset of socket domain) + */ + virtual Address::Type addressType() const PURE; + + /** + * @return the IP version used by the socket if address type is IP, absl::nullopt otherwise + */ + virtual absl::optional ipVersion() const PURE; + + /** + * Close the underlying socket. + */ + virtual void close() PURE; + + /** + * Return true if close() hasn't been called. + */ + virtual bool isOpen() const PURE; + + /** + * Bind a socket to this address. The socket should have been created with a call to socket() + * @param address address to bind the socket to. + * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call + * is successful, errno_ shouldn't be used. + */ + virtual Api::SysCallIntResult bind(const Address::InstanceConstSharedPtr address) PURE; + + /** + * Listen on bound socket. + * @param backlog maximum number of pending connections for listener + * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call + * is successful, errno_ shouldn't be used. + */ + virtual Api::SysCallIntResult listen(int backlog) PURE; + + /** + * Connect a socket to this address. The socket should have been created with a call to socket() + * on this object. + * @param address remote address to connect to. + * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call + * is successful, errno_ shouldn't be used. + */ + virtual Api::SysCallIntResult connect(const Address::InstanceConstSharedPtr address) PURE; + + /** + * Propagates option to underlying socket (@see man 2 setsockopt) + */ + virtual Api::SysCallIntResult setSocketOption(int level, int optname, const void* optval, + socklen_t optlen) PURE; + + /** + * Retrieves option from underlying socket (@see man 2 getsockopt) + */ + virtual Api::SysCallIntResult getSocketOption(int level, int optname, void* optval, + socklen_t* optlen) const PURE; + + /** + * Toggle socket blocking state + */ + virtual Api::SysCallIntResult setBlockingForTest(bool blocking) PURE; + + /** + * Visitor class for setting socket options. + */ + class Option { + public: + virtual ~Option() = default; + + /** + * @param socket the socket on which to apply options. + * @param state the current state of the socket. Significant for options that can only be + * set for some particular state of the socket. + * @return true if succeeded, false otherwise. + */ + virtual bool setOption(Socket& socket, + envoy::config::core::v3::SocketOption::SocketState state) const PURE; + + /** + * @param vector of bytes to which the option should append hash key data that will be used + * to separate connections based on the option. Any data already in the key vector must + * not be modified. + */ + virtual void hashKey(std::vector& key) const PURE; + + /** + * Contains details about what this option applies to a socket. + */ + struct Details { + SocketOptionName name_; + std::string value_; ///< Binary string representation of an option's value. + + bool operator==(const Details& other) const { + return name_ == other.name_ && value_ == other.value_; + } + }; + + /** + * @param socket The socket for which we want to know the options that would be applied. + * @param state The state at which we would apply the options. + * @return What we would apply to the socket at the provided state. Empty if we'd apply nothing. + */ + virtual absl::optional
+ getOptionDetails(const Socket& socket, + envoy::config::core::v3::SocketOption::SocketState state) const PURE; + }; + + using OptionConstSharedPtr = std::shared_ptr; + using Options = std::vector; + using OptionsSharedPtr = std::shared_ptr; + + static OptionsSharedPtr& appendOptions(OptionsSharedPtr& to, const OptionsSharedPtr& from) { + to->insert(to->end(), from->begin(), from->end()); + return to; + } + + static bool applyOptions(const OptionsSharedPtr& options, Socket& socket, + envoy::config::core::v3::SocketOption::SocketState state) { + if (options == nullptr) { + return true; + } + for (const auto& option : *options) { + if (!option->setOption(socket, state)) { + return false; + } + } + return true; + } + + /** + * Add a socket option visitor for later retrieval with options(). + */ + virtual void addOption(const OptionConstSharedPtr&) PURE; + + /** + * Add socket option visitors for later retrieval with options(). + */ + virtual void addOptions(const OptionsSharedPtr&) PURE; + + /** + * @return the socket options stored earlier with addOption() and addOptions() calls, if any. + */ + virtual const OptionsSharedPtr& options() const PURE; +}; + +using SocketPtr = std::unique_ptr; +using SocketSharedPtr = std::shared_ptr; +using SocketOptRef = absl::optional>; + +class SocketInterface { +public: + virtual ~SocketInterface() = default; + + /** + * Low level api to create a socket in the underlying host stack. Does not create a + * @ref Network::SocketImpl + * @param type type of socket requested + * @param addr_type type of address used with the socket + * @param version IP version if address type is IP + * @param socket_v6only if the socket is ipv6 version only + * @return @ref Network::IoHandlePtr that wraps the underlying socket file descriptor + */ + virtual IoHandlePtr socket(Socket::Type type, Address::Type addr_type, Address::IpVersion version, + bool socket_v6only) const PURE; + + /** + * Low level api to create a socket in the underlying host stack. Does not create an + * @ref Network::SocketImpl + * @param socket_type type of socket requested + * @param addr address that is gleaned for address type and version if needed + * @return @ref Network::IoHandlePtr that wraps the underlying socket file descriptor + */ + virtual IoHandlePtr socket(Socket::Type socket_type, + const Address::InstanceConstSharedPtr addr) const PURE; + + /** + * Wrap socket file descriptor in IoHandle + * @param fd socket file descriptor to be wrapped + * @return @ref Network::IoHandlePtr that wraps the socket file descriptor + */ + virtual IoHandlePtr socket(os_fd_t fd) PURE; + + /** + * Returns true if the given family is supported on this machine. + * @param domain the IP family. + */ + virtual bool ipFamilySupported(int domain) PURE; +}; + +using SocketInterfacePtr = std::unique_ptr; + +} // namespace Network +} // namespace Envoy \ No newline at end of file diff --git a/include/envoy/network/transport_socket.h b/include/envoy/network/transport_socket.h index e303248fd471a..9e117b1161343 100644 --- a/include/envoy/network/transport_socket.h +++ b/include/envoy/network/transport_socket.h @@ -5,6 +5,7 @@ #include "envoy/buffer/buffer.h" #include "envoy/common/pure.h" #include "envoy/network/io_handle.h" +#include "envoy/network/proxy_protocol.h" #include "envoy/ssl/connection.h" #include "absl/types/optional.h" @@ -180,10 +181,31 @@ class TransportSocketOptions { virtual const std::vector& verifySubjectAltNameListOverride() const PURE; /** + * The application protocols to use when negotiating an upstream connection. When an application + * protocol override is provided, it will *always* be used. * @return the optional overridden application protocols. */ virtual const std::vector& applicationProtocolListOverride() const PURE; + /** + * The application protocol to use when negotiating an upstream connection and no other + * application protocol has been configured. Both + * TransportSocketOptions::applicationProtocolListOverride and application protocols configured + * in the CommonTlsContext on the Cluster will take precedence. + * + * Note that this option is intended for intermediate code (e.g. the HTTP connection pools) to + * specify a default ALPN when no specific values are specified elsewhere. As such, providing a + * value here might not make sense prior to load balancing. + * @return the optional fallback for application protocols, for when they are not specified in the + * TLS configuration. + */ + virtual const absl::optional& applicationProtocolFallback() const PURE; + + /** + * @return optional PROXY protocol address information. + */ + virtual absl::optional proxyProtocolOptions() const PURE; + /** * @param vector of bytes to which the option should append hash key data that will be used * to separate connections based on the option. Any data already in the key vector must diff --git a/include/envoy/network/udp_packet_writer_config.h b/include/envoy/network/udp_packet_writer_config.h new file mode 100644 index 0000000000000..dee4487e21983 --- /dev/null +++ b/include/envoy/network/udp_packet_writer_config.h @@ -0,0 +1,26 @@ +#pragma once + +#include "envoy/config/typed_config.h" +#include "envoy/network/udp_packet_writer_handler.h" + +#include "common/protobuf/protobuf.h" + +namespace Envoy { +namespace Network { + +class UdpPacketWriterConfigFactory : public Config::TypedFactory { +public: + ~UdpPacketWriterConfigFactory() override = default; + + /** + * Create an UdpPacketWriterFactory object according to given message. + * @param message specifies Udp Packet Writer options in a protobuf. + */ + virtual Network::UdpPacketWriterFactoryPtr + createUdpPacketWriterFactory(const Protobuf::Message& message) PURE; + + std::string category() const override { return "envoy.udp_packet_writers"; } +}; + +} // namespace Network +} // namespace Envoy diff --git a/include/envoy/network/udp_packet_writer_handler.h b/include/envoy/network/udp_packet_writer_handler.h new file mode 100644 index 0000000000000..dc82e54d8c346 --- /dev/null +++ b/include/envoy/network/udp_packet_writer_handler.h @@ -0,0 +1,120 @@ +#pragma once + +#include +#include + +#include "envoy/api/io_error.h" +#include "envoy/buffer/buffer.h" +#include "envoy/network/address.h" +#include "envoy/network/socket.h" +#include "envoy/stats/scope.h" +#include "envoy/stats/stats_macros.h" + +namespace Envoy { +namespace Network { + +/** + * Max v6 packet size, excluding IP and UDP headers. + */ +constexpr uint64_t UdpMaxOutgoingPacketSize = 1452; + +/** + * UdpPacketWriterBuffer bundles a buffer and a function that + * releases it. + */ +struct UdpPacketWriterBuffer { + UdpPacketWriterBuffer() = default; + UdpPacketWriterBuffer(uint8_t* buffer, size_t length, + std::function release_buffer) + : buffer_(buffer), length_(length), release_buffer_(std::move(release_buffer)) {} + + uint8_t* buffer_ = nullptr; + size_t length_ = 0; + std::function release_buffer_; +}; + +class UdpPacketWriter { +public: + virtual ~UdpPacketWriter() = default; + + /** + * @brief Sends a packet via given UDP socket with specific source address. + * + * @param buffer points to the buffer containing the packet + * @param local_ip is the source address to be used to send. If it is null, + * picks up the default network interface ip address. + * @param peer_address is the destination address to send to. + * @return result with number of bytes written, and write status + */ + virtual Api::IoCallUint64Result writePacket(const Buffer::Instance& buffer, + const Address::Ip* local_ip, + const Address::Instance& peer_address) PURE; + + /** + * @returns true if the network socket is not writable. + */ + virtual bool isWriteBlocked() const PURE; + + /** + * @brief mark the socket as writable when the socket is unblocked. + */ + virtual void setWritable() PURE; + + /** + * @brief Get the maximum size of the packet which can be written using this + * writer for the supplied peer address. + * + * @param peer_address is the destination address to send to. + * @return the max packet size + */ + virtual uint64_t getMaxPacketSize(const Address::Instance& peer_address) const PURE; + + /** + * @return true if Batch Mode + * @return false if PassThroughMode + */ + virtual bool isBatchMode() const PURE; + + /** + * @brief Get pointer to the next write location in internal buffer, + * it should be called iff the caller does not call writePacket + * for the returned buffer. The caller is expected to call writePacket + * with the buffer returned from this function to save a memcpy. + * + * @param local_ip is the source address to be used to send. + * @param peer_address is the destination address to send to. + * @return { char* to the next write location, + * func to release buffer } + */ + virtual UdpPacketWriterBuffer getNextWriteLocation(const Address::Ip* local_ip, + const Address::Instance& peer_address) PURE; + + /** + * @brief Batch Mode: Try to send all buffered packets + * PassThrough Mode: NULL operation + * + * @return Api::IoCallUint64Result + */ + virtual Api::IoCallUint64Result flush() PURE; +}; + +using UdpPacketWriterPtr = std::unique_ptr; + +class UdpPacketWriterFactory { +public: + virtual ~UdpPacketWriterFactory() = default; + + /** + * Creates an UdpPacketWriter object for the given Udp Socket + * @param socket UDP socket used to send packets. + * @return the UdpPacketWriter created. + */ + virtual UdpPacketWriterPtr createUdpPacketWriter(Network::IoHandle& io_handle, + Stats::Scope& scope) PURE; +}; + +using UdpPacketWriterFactoryPtr = std::unique_ptr; +using UdpPacketWriterFactoryOptRef = absl::optional>; + +} // namespace Network +} // namespace Envoy diff --git a/include/envoy/protobuf/BUILD b/include/envoy/protobuf/BUILD index c23eccce45ced..76eff507352dc 100644 --- a/include/envoy/protobuf/BUILD +++ b/include/envoy/protobuf/BUILD @@ -1,15 +1,17 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( name = "message_validator_interface", hdrs = ["message_validator.h"], - deps = ["//source/common/protobuf"], + deps = [ + "//source/common/protobuf", + ], ) diff --git a/include/envoy/protobuf/message_validator.h b/include/envoy/protobuf/message_validator.h index 613b8c964249e..8ec4dccb46e3b 100644 --- a/include/envoy/protobuf/message_validator.h +++ b/include/envoy/protobuf/message_validator.h @@ -12,13 +12,22 @@ namespace ProtobufMessage { /** * Exception class for reporting validation errors due to the presence of unknown - * fields in a protobuf + * fields in a protobuf. */ class UnknownProtoFieldException : public EnvoyException { public: UnknownProtoFieldException(const std::string& message) : EnvoyException(message) {} }; +/** + * Exception class for reporting validation errors due to the presence of deprecated + * fields in a protobuf. + */ +class DeprecatedProtoFieldException : public EnvoyException { +public: + DeprecatedProtoFieldException(const std::string& message) : EnvoyException(message) {} +}; + /** * Visitor interface for a Protobuf::Message. The methods of ValidationVisitor are invoked to * perform validation based on events encountered during or after the parsing of proto binary @@ -30,9 +39,23 @@ class ValidationVisitor { /** * Invoked when an unknown field is encountered. - * @param description human readable description of the field + * @param description human readable description of the field. */ virtual void onUnknownField(absl::string_view description) PURE; + + /** + * If true, skip this validation visitor in the interest of speed when + * possible. + **/ + virtual bool skipValidation() PURE; + + /** + * Invoked when deprecated field is encountered. + * @param description human readable description of the field. + * @param soft_deprecation is set to true, visitor would log a warning message, otherwise would + * throw an exception. + */ + virtual void onDeprecatedField(absl::string_view description, bool soft_deprecation) PURE; }; class ValidationContext { diff --git a/include/envoy/ratelimit/BUILD b/include/envoy/ratelimit/BUILD index 38b7e7bf4f0a9..615b69fa31073 100644 --- a/include/envoy/ratelimit/BUILD +++ b/include/envoy/ratelimit/BUILD @@ -1,14 +1,17 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( name = "ratelimit_interface", hdrs = ["ratelimit.h"], + deps = [ + "@envoy_api//envoy/type/v3:pkg_cc_proto", + ], ) diff --git a/include/envoy/ratelimit/ratelimit.h b/include/envoy/ratelimit/ratelimit.h index 4e122b6863f15..f23c8170ef684 100644 --- a/include/envoy/ratelimit/ratelimit.h +++ b/include/envoy/ratelimit/ratelimit.h @@ -3,9 +3,21 @@ #include #include +#include "envoy/type/v3/ratelimit_unit.pb.h" + +#include "absl/types/optional.h" + namespace Envoy { namespace RateLimit { +/** + * An optional dynamic override for the rate limit. See ratelimit.proto + */ +struct RateLimitOverride { + uint32_t requests_per_unit_; + envoy::type::v3::RateLimitUnit unit_; +}; + /** * A single rate limit request descriptor entry. See ratelimit.proto. */ @@ -19,6 +31,7 @@ struct DescriptorEntry { */ struct Descriptor { std::vector entries_; + absl::optional limit_ = absl::nullopt; }; } // namespace RateLimit diff --git a/include/envoy/registry/BUILD b/include/envoy/registry/BUILD index 7e2c9e38fd677..de34bccd492fa 100644 --- a/include/envoy/registry/BUILD +++ b/include/envoy/registry/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/registry/registry.h b/include/envoy/registry/registry.h index d48a48dd0b665..2b85df27c2e93 100644 --- a/include/envoy/registry/registry.h +++ b/include/envoy/registry/registry.h @@ -164,9 +164,9 @@ template class FactoryRegistry : public Logger::Loggable class FactoryRegistry : public Logger::Loggable& - versioned_factories() { + versionedFactories() { using VersionedFactoryMap = absl::flat_hash_map; MUTABLE_CONSTRUCT_ON_FIRST_USE(VersionedFactoryMap); @@ -236,7 +236,7 @@ template class FactoryRegistry : public Logger::Loggable class FactoryRegistry : public Logger::Loggable class FactoryRegistry : public Logger::Loggable getFactoryVersion(absl::string_view name) { - auto it = versioned_factories().find(name); - if (it == versioned_factories().end()) { + auto it = versionedFactories().find(name); + if (it == versionedFactories().end()) { return absl::nullopt; } return it->second; @@ -342,13 +342,13 @@ template class FactoryRegistry : public Logger::Loggable> buildFactoriesByType() { auto mapping = std::make_unique>(); - for (const auto& factory : factories()) { - if (factory.second == nullptr) { + for (const auto& [factory_name, factory] : factories()) { + if (factory == nullptr) { continue; } // Skip untyped factories. - std::string config_type = factory.second->configType(); + std::string config_type = factory->configType(); if (config_type.empty()) { continue; } @@ -356,14 +356,14 @@ template class FactoryRegistry : public Logger::Loggablefind(config_type); - if (it != mapping->end() && it->second != factory.second) { + if (it != mapping->end() && it->second != factory) { // Mark double-registered types with a nullptr. // See issue https://github.com/envoyproxy/envoy/issues/9643. ENVOY_LOG(warn, "Double registration for type: '{}' by '{}' and '{}'", config_type, - factory.second->name(), it->second ? it->second->name() : ""); + factory->name(), it->second ? it->second->name() : ""); it->second = nullptr; } else { - mapping->emplace(std::make_pair(config_type, factory.second)); + mapping->emplace(std::make_pair(config_type, factory)); } const Protobuf::Descriptor* previous = @@ -464,21 +464,22 @@ template class FactoryRegistry : public Logger::Loggablename(), prev_by_name->configType()); } - for (auto mapping : prev_deprecated_names) { - deprecatedFactoryNames().erase(mapping.first); + for (auto [prev_deprecated_name, mapped_canonical_name] : prev_deprecated_names) { + deprecatedFactoryNames().erase(prev_deprecated_name); - ENVOY_LOG(info, "Removed deprecated name '{}'", mapping.first); + ENVOY_LOG(info, "Removed deprecated name '{}'", prev_deprecated_name); - if (!mapping.second.empty()) { - deprecatedFactoryNames().emplace(std::make_pair(mapping.first, mapping.second)); + if (!mapped_canonical_name.empty()) { + deprecatedFactoryNames().emplace( + std::make_pair(prev_deprecated_name, mapped_canonical_name)); - auto* deprecated_factory = getFactory(mapping.second); + auto* deprecated_factory = getFactory(mapped_canonical_name); RELEASE_ASSERT(deprecated_factory != nullptr, "failed to restore deprecated factory name"); - factories().emplace(mapping.second, deprecated_factory); + factories().emplace(mapped_canonical_name, deprecated_factory); - ENVOY_LOG(info, "Restored deprecated name '{}' (mapped to '{}'", mapping.first, - mapping.second); + ENVOY_LOG(info, "Restored deprecated name '{}' (mapped to '{}'", prev_deprecated_name, + mapped_canonical_name); } } @@ -528,7 +529,8 @@ template class RegisterFactory { if (!instance_.name().empty()) { FactoryRegistry::registerFactory(instance_, instance_.name()); } else { - ASSERT(deprecated_names.size() != 0); + ASSERT(deprecated_names.size() != 0, + "Attempted to register a factory without a name or deprecated name"); } for (auto deprecated_name : deprecated_names) { diff --git a/include/envoy/router/BUILD b/include/envoy/router/BUILD index 6ed49171af71f..85b6058ed878e 100644 --- a/include/envoy/router/BUILD +++ b/include/envoy/router/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( @@ -13,6 +13,7 @@ envoy_cc_library( hdrs = ["rds.h"], deps = [ ":router_interface", + "//include/envoy/http:filter_interface", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", ], ) @@ -53,19 +54,24 @@ envoy_cc_library( hdrs = ["router.h"], external_deps = ["abseil_optional"], deps = [ + ":internal_redirect_interface", "//include/envoy/access_log:access_log_interface", + "//include/envoy/common:conn_pool_interface", "//include/envoy/common:matchers_interface", "//include/envoy/config:typed_metadata_interface", "//include/envoy/http:codec_interface", "//include/envoy/http:codes_interface", + "//include/envoy/http:conn_pool_interface", "//include/envoy/http:hash_policy_interface", "//include/envoy/http:header_map_interface", + "//include/envoy/tcp:conn_pool_interface", "//include/envoy/tracing:http_tracer_interface", "//include/envoy/upstream:resource_manager_interface", "//include/envoy/upstream:retry_interface", "//source/common/protobuf", "//source/common/protobuf:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/config/route/v3:pkg_cc_proto", "@envoy_api//envoy/type/v3:pkg_cc_proto", ], ) @@ -87,6 +93,7 @@ envoy_cc_library( "//include/envoy/http:filter_interface", "//include/envoy/http:header_map_interface", "//include/envoy/ratelimit:ratelimit_interface", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) @@ -107,3 +114,13 @@ envoy_cc_library( "//include/envoy/stream_info:filter_state_interface", ], ) + +envoy_cc_library( + name = "internal_redirect_interface", + hdrs = ["internal_redirect.h"], + deps = [ + "//include/envoy/config:typed_config_interface", + "//include/envoy/stream_info:filter_state_interface", + "//source/common/common:minimal_logger_lib", + ], +) diff --git a/include/envoy/router/internal_redirect.h b/include/envoy/router/internal_redirect.h new file mode 100644 index 0000000000000..95f624255ace6 --- /dev/null +++ b/include/envoy/router/internal_redirect.h @@ -0,0 +1,66 @@ +#pragma once + +#include "envoy/config/typed_config.h" +#include "envoy/stream_info/filter_state.h" + +#include "common/common/logger.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Router { + +/** + * Used to decide if an internal redirect is allowed to be followed based on the target route. + * Subclassing Logger::Loggable so that implementations can log details. + */ +class InternalRedirectPredicate : Logger::Loggable { +public: + virtual ~InternalRedirectPredicate() = default; + + /** + * A FilterState is provided so that predicate implementation can use it to preserve state across + * internal redirects. + * @param filter_state supplies the filter state associated with the current request so that the + * predicates can use it to persist states across filter chains. + * @param target_route_name indicates the route that an internal redirect is targeting. + * @param downstream_is_https indicates the downstream request is using https. + * @param target_is_https indicates the internal redirect target url has https in the url. + * @return whether the route specified by target_route_name is allowed to be followed. Any + * predicate returning false will prevent the redirect from being followed, causing the + * response to be proxied downstream. + */ + virtual bool acceptTargetRoute(StreamInfo::FilterState& filter_state, + absl::string_view target_route_name, bool downstream_is_https, + bool target_is_https) PURE; + + /** + * @return the name of the current predicate. + */ + virtual absl::string_view name() const PURE; +}; + +using InternalRedirectPredicateSharedPtr = std::shared_ptr; + +/** + * Factory for InternalRedirectPredicate. + */ +class InternalRedirectPredicateFactory : public Config::TypedFactory { +public: + ~InternalRedirectPredicateFactory() override = default; + + /** + * @param config contains the proto stored in TypedExtensionConfig.typed_config for the predicate. + * @param current_route_name stores the route name of the route where the predicate is installed. + * @return an InternalRedirectPredicate. The given current_route_name is useful for predicates + * that need to create per-route FilterState. + */ + virtual InternalRedirectPredicateSharedPtr + createInternalRedirectPredicate(const Protobuf::Message& config, + absl::string_view current_route_name) PURE; + + std::string category() const override { return "envoy.internal_redirect_predicates"; } +}; + +} // namespace Router +} // namespace Envoy diff --git a/include/envoy/router/route_config_provider_manager.h b/include/envoy/router/route_config_provider_manager.h index f266407a36515..67a184f2ba8e6 100644 --- a/include/envoy/router/route_config_provider_manager.h +++ b/include/envoy/router/route_config_provider_manager.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include "envoy/config/route/v3/route.pb.h" @@ -55,5 +56,8 @@ class RouteConfigProviderManager { ProtobufMessage::ValidationVisitor& validator) PURE; }; +using RouteConfigProviderManagerPtr = std::unique_ptr; +using RouteConfigProviderManagerSharedPtr = std::shared_ptr; + } // namespace Router } // namespace Envoy diff --git a/include/envoy/router/route_config_update_receiver.h b/include/envoy/router/route_config_update_receiver.h index 717f3c017758e..d18c6d5542529 100644 --- a/include/envoy/router/route_config_update_receiver.h +++ b/include/envoy/router/route_config_update_receiver.h @@ -31,18 +31,21 @@ class RouteConfigUpdateReceiver { virtual bool onRdsUpdate(const envoy::config::route::v3::RouteConfiguration& rc, const std::string& version_info) PURE; + using VirtualHostRefVector = + std::vector>; + /** * Called on updates via VHDS. - * @param added_resources supplies Resources (each containing a VirtualHost) that have been - * added. + * @param added_vhosts supplies VirtualHosts that have been added. + * @param added_resource_ids set of resources IDs (names + aliases) added. * @param removed_resources supplies names of VirtualHosts that have been removed. * @param version_info supplies RouteConfiguration version. * @return bool whether RouteConfiguration has been updated. */ - virtual bool onVhdsUpdate( - const Protobuf::RepeatedPtrField& added_resources, - const Protobuf::RepeatedPtrField& removed_resources, - const std::string& version_info) PURE; + virtual bool onVhdsUpdate(const VirtualHostRefVector& added_vhosts, + const std::set& added_resource_ids, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& version_info) PURE; /** * @return std::string& the name of RouteConfiguration. @@ -75,7 +78,7 @@ class RouteConfigUpdateReceiver { virtual absl::optional configInfo() const PURE; /** - * @return envoy::api::v2::RouteConfiguration& current RouteConfiguration. + * @return envoy::config::route::v3::RouteConfiguration& current RouteConfiguration. */ virtual const envoy::config::route::v3::RouteConfiguration& routeConfiguration() PURE; diff --git a/include/envoy/router/router.h b/include/envoy/router/router.h index 13032173d929f..7d37cf02cf32a 100644 --- a/include/envoy/router/router.h +++ b/include/envoy/router/router.h @@ -9,13 +9,18 @@ #include #include "envoy/access_log/access_log.h" +#include "envoy/common/conn_pool.h" #include "envoy/common/matchers.h" #include "envoy/config/core/v3/base.pb.h" +#include "envoy/config/route/v3/route_components.pb.h" #include "envoy/config/typed_metadata.h" #include "envoy/http/codec.h" #include "envoy/http/codes.h" +#include "envoy/http/conn_pool.h" #include "envoy/http/hash_policy.h" #include "envoy/http/header_map.h" +#include "envoy/router/internal_redirect.h" +#include "envoy/tcp/conn_pool.h" #include "envoy/tracing/http_tracer.h" #include "envoy/type/v3/percent.pb.h" #include "envoy/upstream/resource_manager.h" @@ -30,7 +35,8 @@ namespace Envoy { namespace Upstream { class ClusterManager; -} +class LoadBalancerContext; +} // namespace Upstream namespace Router { @@ -163,6 +169,7 @@ class RetryPolicy { static const uint32_t RETRY_ON_RETRIABLE_STATUS_CODES = 0x400; static const uint32_t RETRY_ON_RESET = 0x800; static const uint32_t RETRY_ON_RETRIABLE_HEADERS = 0x1000; + static const uint32_t RETRY_ON_ENVOY_RATE_LIMITED = 0x2000; // clang-format on virtual ~RetryPolicy() = default; @@ -236,9 +243,42 @@ class RetryPolicy { enum class RetryStatus { No, NoOverflow, NoRetryLimitExceeded, Yes }; /** - * InternalRedirectAction from the route configuration. + * InternalRedirectPolicy from the route configuration. */ -enum class InternalRedirectAction { PassThrough, Handle }; +class InternalRedirectPolicy { +public: + virtual ~InternalRedirectPolicy() = default; + + /** + * @return whether internal redirect is enabled on this route. + */ + virtual bool enabled() const PURE; + + /** + * @param response_code the response code from the upstream. + * @return whether the given response_code should trigger an internal redirect on this route. + */ + virtual bool shouldRedirectForResponseCode(const Http::Code& response_code) const PURE; + + /** + * Creates the target route predicates. This should really be called only once for each upstream + * redirect response. Creating the predicates lazily to avoid wasting CPU cycles on non-redirect + * responses, which should be the most common case. + * @return a vector of newly constructed InternalRedirectPredicate instances. + */ + virtual std::vector predicates() const PURE; + + /** + * @return the maximum number of allowed internal redirects on this route. + */ + virtual uint32_t maxInternalRedirects() const PURE; + + /** + * @return if it is allowed to follow the redirect with a different scheme in + * the target URI than the downstream request. + */ + virtual bool isCrossSchemeRedirectAllowed() const PURE; +}; /** * Wraps retry state for an active routed request. @@ -322,11 +362,13 @@ class RetryState { * Returns a reference to the PriorityLoad that should be used for the next retry. * @param priority_set current priority set. * @param original_priority_load original priority load. + * @param priority_mapping_func see @Upstream::RetryPriority::PriorityMappingFunc. * @return HealthyAndDegradedLoad that should be used to select a priority for the next retry. */ - virtual const Upstream::HealthyAndDegradedLoad& - priorityLoadForRetry(const Upstream::PrioritySet& priority_set, - const Upstream::HealthyAndDegradedLoad& original_priority_load) PURE; + virtual const Upstream::HealthyAndDegradedLoad& priorityLoadForRetry( + const Upstream::PrioritySet& priority_set, + const Upstream::HealthyAndDegradedLoad& original_priority_load, + const Upstream::RetryPriority::PriorityMappingFunc& priority_mapping_func) PURE; /** * return how many times host selection should be reattempted during host selection. */ @@ -683,6 +725,13 @@ class RouteEntry : public ResponseEntry { */ virtual const RetryPolicy& retryPolicy() const PURE; + /** + * @return const InternalRedirectPolicy& the internal redirect policy for the route. All routes + * have a internal redirect policy even if it is not enabled, which means redirects are + * simply proxied as normal responses. + */ + virtual const InternalRedirectPolicy& internalRedirectPolicy() const PURE; + /** * @return uint32_t any route cap on bytes which should be buffered for shadowing or retries. * This is an upper bound so does not necessarily reflect the bytes which will be buffered @@ -764,7 +813,7 @@ class RouteEntry : public ResponseEntry { virtual const Envoy::Config::TypedMetadata& typedMetadata() const PURE; /** - * @return const envoy::api::v2::core::Metadata& return the metadata provided in the config for + * @return const envoy::config::core::v3::Metadata& return the metadata provided in the config for * this route. */ virtual const envoy::config::core::v3::Metadata& metadata() const PURE; @@ -825,16 +874,11 @@ class RouteEntry : public ResponseEntry { */ virtual const UpgradeMap& upgradeMap() const PURE; + using ConnectConfig = envoy::config::route::v3::RouteAction::UpgradeConfig::ConnectConfig; /** - * @returns the internal redirect action which should be taken on this route. - */ - virtual InternalRedirectAction internalRedirectAction() const PURE; - - /** - * @returns the threshold of number of previously handled internal redirects, for this route to - * stop handle internal redirects. + * If present, informs how to handle proxying CONNECT requests on this route. */ - virtual uint32_t maxInternalRedirects() const PURE; + virtual const absl::optional& connectConfig() const PURE; /** * @return std::string& the name of the route. @@ -950,6 +994,44 @@ class Route { using RouteConstSharedPtr = std::shared_ptr; +/** + * RouteCallback, returns one of these enums to the route matcher to indicate + * if the matched route has been accepted or it wants the route matching to + * continue. + */ +enum class RouteMatchStatus { + // Continue matching route + Continue, + // Accept matched route + Accept +}; + +/** + * RouteCallback is passed this enum to indicate if more routes are available for evaluation. + */ +enum class RouteEvalStatus { + // Has more routes that can be evaluated for match. + HasMoreRoutes, + // All routes have been evaluated for match. + NoMoreRoutes +}; + +/** + * RouteCallback can be used to override routing decision made by the Route::Config::route, + * this callback is passed the RouteConstSharedPtr, when a matching route is found, and + * RouteEvalStatus indicating whether there are more routes available for evaluation. + * + * RouteCallback will be called back only when at least one matching route is found, if no matching + * routes are found RouteCallback will not be invoked. RouteCallback can return one of the + * RouteMatchStatus enum to indicate if the match has been accepted or should the route match + * evaluation continue. + * + * Returning RouteMatchStatus::Continue, when no more routes available for evaluation will result in + * no further callbacks and no route is deemed to be accepted and nullptr is returned to the caller + * of Route::Config::route. + */ +using RouteCallback = std::function; + /** * The router configuration. */ @@ -969,6 +1051,25 @@ class Config { const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const PURE; + /** + * Based on the incoming HTTP request headers, determine the target route (containing either a + * route entry or a direct response entry) for the request. + * + * Invokes callback with matched route, callback can choose to accept the route by returning + * RouteStatus::Stop or continue route match from last matched route by returning + * RouteMatchStatus::Continue, when more routes are available. + * + * @param cb supplies callback to be invoked upon route match. + * @param headers supplies the request headers. + * @param random_value supplies the random seed to use if a runtime choice is required. This + * allows stable choices between calls if desired. + * @return the route accepted by the callback or nullptr if no match found or none of route is + * accepted by the callback. + */ + virtual RouteConstSharedPtr route(const RouteCallback& cb, const Http::RequestHeaderMap& headers, + const StreamInfo::StreamInfo& stream_info, + uint64_t random_value) const PURE; + /** * Return a list of headers that will be cleaned from any requests that are not from an internal * (RFC1918) source. @@ -995,5 +1096,170 @@ class Config { using ConfigConstSharedPtr = std::shared_ptr; +class GenericConnectionPoolCallbacks; +class GenericUpstream; + +/** + * An API for wrapping either an HTTP or a TCP connection pool. + * + * The GenericConnPool exists to create a GenericUpstream handle via a call to + * newStream resulting in an eventual call to onPoolReady + */ +class GenericConnPool { +public: + virtual ~GenericConnPool() = default; + + /** + * Called to create a new HTTP stream or TCP connection for "CONNECT streams". + * + * The implementation of the GenericConnPool will either call + * GenericConnectionPoolCallbacks::onPoolReady + * when a stream is available or GenericConnectionPoolCallbacks::onPoolFailure + * if stream creation fails. + * + * The caller is responsible for calling cancelAnyPendingRequest() if stream + * creation is no longer desired. newStream may only be called once per + * GenericConnPool. + * + * @param callbacks callbacks to communicate stream failure or creation on. + */ + virtual void newStream(GenericConnectionPoolCallbacks* callbacks) PURE; + /** + * Called to cancel any pending newStream request, + */ + virtual bool cancelAnyPendingRequest() PURE; + /** + * @return optionally returns the protocol for the connection pool. + */ + virtual absl::optional protocol() const PURE; + /** + * @return optionally returns the host for the connection pool. + */ + virtual Upstream::HostDescriptionConstSharedPtr host() const PURE; +}; + +/** + * An API for the interactions the upstream stream needs to have with the downstream stream + * and/or router components + */ +class UpstreamToDownstream : public Http::ResponseDecoder, public Http::StreamCallbacks { +public: + /** + * @return return the routeEntry for the downstream stream. + */ + virtual const RouteEntry& routeEntry() const PURE; + /** + * @return return the connection for the downstream stream. + */ + virtual const Network::Connection& connection() const PURE; +}; + +/** + * An API for wrapping callbacks from either an HTTP or a TCP connection pool. + * + * Just like the connection pool callbacks, the GenericConnectionPoolCallbacks + * will either call onPoolReady when a GenericUpstream is ready, or + * onPoolFailure if a connection/stream can not be established. + */ +class GenericConnectionPoolCallbacks { +public: + virtual ~GenericConnectionPoolCallbacks() = default; + + /** + * Called to indicate a failure for GenericConnPool::newStream to establish a stream. + * + * @param reason supplies the failure reason. + * @param transport_failure_reason supplies the details of the transport failure reason. + * @param host supplies the description of the host that caused the failure. This may be nullptr + * if no host was involved in the failure (for example overflow). + */ + virtual void onPoolFailure(ConnectionPool::PoolFailureReason reason, + absl::string_view transport_failure_reason, + Upstream::HostDescriptionConstSharedPtr host) PURE; + /** + * Called when GenericConnPool::newStream has established a new stream. + * + * @param upstream supplies the generic upstream for the stream. + * @param host supplies the description of the host that will carry the request. For logical + * connection pools the description may be different each time this is called. + * @param upstream_local_address supplies the local address of the upstream connection. + * @param info supplies the stream info object associated with the upstream connection. + */ + virtual void onPoolReady(std::unique_ptr&& upstream, + Upstream::HostDescriptionConstSharedPtr host, + const Network::Address::InstanceConstSharedPtr& upstream_local_address, + const StreamInfo::StreamInfo& info) PURE; + + // @return the UpstreamToDownstream interface for this stream. + // + // This is the interface for all interactions the upstream stream needs to have with the + // downstream stream. It is in the GenericConnectionPoolCallbacks as the GenericConnectionPool + // creates the GenericUpstream, and the GenericUpstream will need this interface. + virtual UpstreamToDownstream& upstreamToDownstream() PURE; +}; + +/** + * An API for sending information to either a TCP or HTTP upstream. + * + * It is similar logically to RequestEncoder, only without the getStream interface. + */ +class GenericUpstream { +public: + virtual ~GenericUpstream() = default; + /** + * Encode a data frame. + * @param data supplies the data to encode. The data may be moved by the encoder. + * @param end_stream supplies whether this is the last data frame. + */ + virtual void encodeData(Buffer::Instance& data, bool end_stream) PURE; + /** + * Encode metadata. + * @param metadata_map_vector is the vector of metadata maps to encode. + */ + virtual void encodeMetadata(const Http::MetadataMapVector& metadata_map_vector) PURE; + /** + * Encode headers, optionally indicating end of stream. + * @param headers supplies the header map to encode. + * @param end_stream supplies whether this is a header only request. + */ + virtual void encodeHeaders(const Http::RequestHeaderMap& headers, bool end_stream) PURE; + /** + * Encode trailers. This implicitly ends the stream. + * @param trailers supplies the trailers to encode. + */ + virtual void encodeTrailers(const Http::RequestTrailerMap& trailers) PURE; + /** + * Enable/disable further data from this stream. + */ + virtual void readDisable(bool disable) PURE; + /** + * Reset the stream. No events will fire beyond this point. + * @param reason supplies the reset reason. + */ + virtual void resetStream() PURE; +}; + +using GenericConnPoolPtr = std::unique_ptr; + +/* + * A factory for creating generic connection pools. + */ +class GenericConnPoolFactory : public Envoy::Config::TypedFactory { +public: + ~GenericConnPoolFactory() override = default; + + /* + * @param options for creating the transport socket + * @return may be null + */ + virtual GenericConnPoolPtr + createGenericConnPool(Upstream::ClusterManager& cm, bool is_connect, + const RouteEntry& route_entry, + absl::optional downstream_protocol, + Upstream::LoadBalancerContext* ctx) const PURE; +}; + +using GenericConnPoolFactoryPtr = std::unique_ptr; + } // namespace Router } // namespace Envoy diff --git a/include/envoy/router/router_ratelimit.h b/include/envoy/router/router_ratelimit.h index 246c177bd47b7..1e6910c3b9ba2 100644 --- a/include/envoy/router/router_ratelimit.h +++ b/include/envoy/router/router_ratelimit.h @@ -5,12 +5,33 @@ #include #include +#include "envoy/config/core/v3/base.pb.h" #include "envoy/http/filter.h" #include "envoy/http/header_map.h" #include "envoy/ratelimit/ratelimit.h" namespace Envoy { namespace Router { + +/** + * Base interface for generic rate limit override action. + */ +class RateLimitOverrideAction { +public: + virtual ~RateLimitOverrideAction() = default; + + /** + * Potentially populate the descriptors 'limit' property with a RateLimitOverride instance + * @param descriptor supplies the descriptor to optionally fill. + * @param metadata supplies the dynamic metadata for the request. + * @return true if RateLimitOverride was set in the descriptor. + */ + virtual bool populateOverride(RateLimit::Descriptor& descriptor, + const envoy::config::core::v3::Metadata* metadata) const PURE; +}; + +using RateLimitOverrideActionPtr = std::unique_ptr; + /** * Base interface for generic rate limit action. */ @@ -25,12 +46,14 @@ class RateLimitAction { * @param local_service_cluster supplies the name of the local service cluster. * @param headers supplies the header for the request. * @param remote_address supplies the trusted downstream address for the connection. + * @param dynamic_metadata supplies the dynamic metadata for the request * @return true if the RateLimitAction populated the descriptor. */ - virtual bool populateDescriptor(const RouteEntry& route, RateLimit::Descriptor& descriptor, - const std::string& local_service_cluster, - const Http::HeaderMap& headers, - const Network::Address::Instance& remote_address) const PURE; + virtual bool + populateDescriptor(const RouteEntry& route, RateLimit::Descriptor& descriptor, + const std::string& local_service_cluster, const Http::HeaderMap& headers, + const Network::Address::Instance& remote_address, + const envoy::config::core::v3::Metadata* dynamic_metadata) const PURE; }; using RateLimitActionPtr = std::unique_ptr; @@ -59,12 +82,13 @@ class RateLimitPolicyEntry { * @param local_service_cluster supplies the name of the local service cluster. * @param headers supplies the header for the request. * @param remote_address supplies the trusted downstream address for the connection. + * @param dynamic_metadata supplies the dynamic metadata for the request. */ - virtual void populateDescriptors(const RouteEntry& route, - std::vector& descriptors, - const std::string& local_service_cluster, - const Http::HeaderMap& headers, - const Network::Address::Instance& remote_address) const PURE; + virtual void + populateDescriptors(const RouteEntry& route, std::vector& descriptors, + const std::string& local_service_cluster, const Http::HeaderMap& headers, + const Network::Address::Instance& remote_address, + const envoy::config::core::v3::Metadata* dynamic_metadata) const PURE; }; /** diff --git a/include/envoy/runtime/BUILD b/include/envoy/runtime/BUILD index 5118a04457c80..b80d180dedaaa 100644 --- a/include/envoy/runtime/BUILD +++ b/include/envoy/runtime/BUILD @@ -1,18 +1,23 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( name = "runtime_interface", hdrs = ["runtime.h"], - external_deps = ["abseil_optional"], + external_deps = [ + "abseil_node_hash_map", + "abseil_optional", + ], deps = [ + "//include/envoy/stats:stats_interface", + "//include/envoy/thread_local:thread_local_interface", "//source/common/common:assert_lib", "//source/common/singleton:threadsafe_singleton", "@envoy_api//envoy/type/v3:pkg_cc_proto", diff --git a/include/envoy/runtime/runtime.h b/include/envoy/runtime/runtime.h index 52abc0e506163..35737b6d07452 100644 --- a/include/envoy/runtime/runtime.h +++ b/include/envoy/runtime/runtime.h @@ -5,16 +5,18 @@ #include #include #include -#include #include #include "envoy/common/pure.h" +#include "envoy/stats/store.h" +#include "envoy/thread_local/thread_local.h" #include "envoy/type/v3/percent.pb.h" #include "common/common/assert.h" #include "common/singleton/threadsafe_singleton.h" #include "absl/container/flat_hash_map.h" +#include "absl/container/node_hash_map.h" #include "absl/types/optional.h" namespace Envoy { @@ -25,54 +27,11 @@ class ClusterManager; namespace Runtime { -/** - * Random number generator. Implementations should be thread safe. - */ -class RandomGenerator { -public: - virtual ~RandomGenerator() = default; - - using result_type = uint64_t; // NOLINT(readability-identifier-naming) - - /** - * @return uint64_t a new random number. - */ - virtual result_type random() PURE; - - /* - * @return the smallest value that `operator()` may return. The value is - * strictly less than `max()`. - */ - constexpr static result_type min() noexcept { return std::numeric_limits::min(); }; - - /* - * @return the largest value that `operator()` may return. The value is - * strictly greater than `min()`. - */ - constexpr static result_type max() noexcept { return std::numeric_limits::max(); }; - - /* - * @return a value in the closed interval `[min(), max()]`. Has amortized - * constant complexity. - */ - result_type operator()() { return result_type(random()); }; - - /** - * @return std::string containing uuid4 of 36 char length. - * for example, 7c25513b-0466-4558-a64c-12c6704f37ed - */ - virtual std::string uuid() PURE; -}; - -using RandomGeneratorPtr = std::unique_ptr; - /** * A snapshot of runtime data. */ -class Snapshot { +class Snapshot : public ThreadLocal::ThreadLocalObject { public: - virtual ~Snapshot() = default; - struct Entry { std::string raw_string_value_; absl::optional uint_value_; @@ -104,6 +63,11 @@ class Snapshot { using OverrideLayerConstPtr = std::unique_ptr; + /** + * Updates deprecated feature use stats. + */ + virtual void countDeprecatedFeatureUse() const PURE; + /** * Returns true if a deprecated feature is allowed. * @@ -252,6 +216,8 @@ class Snapshot { virtual const std::vector& getLayers() const PURE; }; +using SnapshotConstSharedPtr = std::shared_ptr; + /** * Loads runtime snapshots from storage (local disk, etc.). */ @@ -280,20 +246,25 @@ class Loader { * @return shared_ptr the current snapshot. This function may safely be called * from non-worker threads. */ - virtual std::shared_ptr threadsafeSnapshot() PURE; + virtual SnapshotConstSharedPtr threadsafeSnapshot() PURE; /** * Merge the given map of key-value pairs into the runtime's state. To remove a previous merge for * a key, use an empty string as the value. * @param values the values to merge */ - virtual void mergeValues(const std::unordered_map& values) PURE; + virtual void mergeValues(const absl::node_hash_map& values) PURE; /** * Initiate all RTDS subscriptions. The `on_done` callback is invoked when all RTDS requests * have either received and applied their responses or timed out. */ virtual void startRtdsSubscriptions(ReadyCallback on_done) PURE; + + /** + * @return Stats::Scope& the root scope. + */ + virtual Stats::Scope& getRootScope() PURE; }; using LoaderPtr = std::unique_ptr; diff --git a/include/envoy/secret/BUILD b/include/envoy/secret/BUILD index 5f16335ecba37..219884c19e812 100644 --- a/include/envoy/secret/BUILD +++ b/include/envoy/secret/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/secret/secret_manager.h b/include/envoy/secret/secret_manager.h index 666ce325244c4..ce13f6eba2145 100644 --- a/include/envoy/secret/secret_manager.h +++ b/include/envoy/secret/secret_manager.h @@ -24,7 +24,7 @@ class SecretManager { virtual ~SecretManager() = default; /** - * @param add a static secret from envoy::api::v2::auth::Secret. + * @param add a static secret from envoy::extensions::transport_sockets::tls::v3::Secret. * @throw an EnvoyException if the secret is invalid or not supported, or there is duplicate. */ virtual void diff --git a/include/envoy/server/BUILD b/include/envoy/server/BUILD index 0b6b538b40b14..534270f24e74c 100644 --- a/include/envoy/server/BUILD +++ b/include/envoy/server/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( @@ -78,6 +78,7 @@ envoy_cc_library( name = "health_checker_config_interface", hdrs = ["health_checker_config.h"], deps = [ + "//include/envoy/common:random_generator_interface", "//include/envoy/config:typed_config_interface", "//include/envoy/upstream:health_checker_interface", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", @@ -154,8 +155,8 @@ envoy_cc_library( ) envoy_cc_library( - name = "filter_config_interface", - hdrs = ["filter_config.h"], + name = "factory_context_interface", + hdrs = ["factory_context.h"], deps = [ ":admin_interface", ":drain_manager_interface", @@ -173,7 +174,6 @@ envoy_cc_library( "//include/envoy/network:drain_decision_interface", "//include/envoy/runtime:runtime_interface", "//include/envoy/server:overload_manager_interface", - "//include/envoy/server:transport_socket_config_interface", "//include/envoy/singleton:manager_interface", "//include/envoy/thread_local:thread_local_interface", "//include/envoy/tracing:http_tracer_interface", @@ -185,6 +185,30 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "filter_config_interface", + hdrs = ["filter_config.h"], + deps = [ + ":drain_manager_interface", + ":factory_context_interface", + ":lifecycle_notifier_interface", + ":process_context_interface", + "//include/envoy/access_log:access_log_interface", + "//include/envoy/config:typed_config_interface", + "//include/envoy/http:codes_interface", + "//include/envoy/http:filter_interface", + "//include/envoy/server:overload_manager_interface", + "//include/envoy/server:transport_socket_config_interface", + "//include/envoy/singleton:manager_interface", + "//include/envoy/thread_local:thread_local_interface", + "//include/envoy/tracing:http_tracer_interface", + "//include/envoy/upstream:cluster_manager_interface", + "//source/common/common:assert_lib", + "//source/common/common:macros", + "//source/common/protobuf", + ], +) + envoy_cc_library( name = "lifecycle_notifier_interface", hdrs = ["lifecycle_notifier.h"], @@ -220,6 +244,7 @@ envoy_cc_library( name = "transport_socket_config_interface", hdrs = ["transport_socket_config.h"], deps = [ + ":factory_context_interface", "//include/envoy/config:typed_config_interface", "//include/envoy/event:dispatcher_interface", "//include/envoy/init:manager_interface", @@ -233,6 +258,7 @@ envoy_cc_library( "//include/envoy/thread_local:thread_local_interface", "//include/envoy/upstream:cluster_manager_interface", "//source/common/protobuf", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) @@ -293,3 +319,12 @@ envoy_cc_library( "//include/envoy/network:connection_handler_interface", ], ) + +envoy_cc_library( + name = "bootstrap_extension_config_interface", + hdrs = ["bootstrap_extension_config.h"], + deps = [ + ":factory_context_interface", + "//include/envoy/config:typed_config_interface", + ], +) diff --git a/include/envoy/server/active_udp_listener_config.h b/include/envoy/server/active_udp_listener_config.h index 2e027dc4d7472..ae387dcfe9d63 100644 --- a/include/envoy/server/active_udp_listener_config.h +++ b/include/envoy/server/active_udp_listener_config.h @@ -10,7 +10,7 @@ namespace Server { /** * Interface to create udp listener according to - * envoy::api::v2::listener::UdpListenerConfig.udp_listener_name. + * envoy::config::listener::v3::UdpListenerConfig.udp_listener_name. */ class ActiveUdpListenerConfigFactory : public Config::UntypedFactory { public: diff --git a/include/envoy/server/admin.h b/include/envoy/server/admin.h index 62b2604fda788..41578bd3cb70d 100644 --- a/include/envoy/server/admin.h +++ b/include/envoy/server/admin.h @@ -60,7 +60,7 @@ class AdminStream { /** * This macro is used to add handlers to the Admin HTTP Endpoint. It builds * a callback that executes X when the specified admin handler is hit. This macro can be - * used to add static handlers as in source/server/http/admin.cc and also dynamic handlers as + * used to add static handlers as in source/server/admin/admin.cc and also dynamic handlers as * done in the RouteConfigProviderManagerImpl constructor in source/common/router/rds_impl.cc. */ #define MAKE_ADMIN_HANDLER(X) \ @@ -153,6 +153,11 @@ class Admin { * @param handler the handler that will receive this Admin's listener. */ virtual void addListenerToHandler(Network::ConnectionHandler* handler) PURE; + + /** + * @return the number of worker threads to run in the server. + */ + virtual uint32_t concurrency() const PURE; }; } // namespace Server diff --git a/include/envoy/server/bootstrap_extension_config.h b/include/envoy/server/bootstrap_extension_config.h new file mode 100644 index 0000000000000..7eaf4dcb25302 --- /dev/null +++ b/include/envoy/server/bootstrap_extension_config.h @@ -0,0 +1,47 @@ +#pragma once + +#include + +#include "envoy/server/factory_context.h" + +#include "common/protobuf/protobuf.h" + +namespace Envoy { +namespace Server { + +/** + * Parent class for bootstrap extensions. + */ +class BootstrapExtension { +public: + virtual ~BootstrapExtension() = default; +}; + +using BootstrapExtensionPtr = std::unique_ptr; + +namespace Configuration { + +/** + * Implemented for each bootstrap extension and registered via Registry::registerFactory or the + * convenience class RegisterFactory. + */ +class BootstrapExtensionFactory : public Config::TypedFactory { +public: + ~BootstrapExtensionFactory() override = default; + + /** + * Create a particular bootstrap extension implementation from a config proto. If the + * implementation is unable to produce a factory with the provided parameters, it should throw an + * EnvoyException. The returned pointer should never be nullptr. + * @param config the custom configuration for this bootstrap extension type. + * @param context general filter context through which persistent resources can be accessed. + */ + virtual BootstrapExtensionPtr createBootstrapExtension(const Protobuf::Message& config, + ServerFactoryContext& context) PURE; + + std::string category() const override { return "envoy.bootstrap"; } +}; + +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/include/envoy/server/configuration.h b/include/envoy/server/configuration.h index 88a167566e806..aee4ecf01c04b 100644 --- a/include/envoy/server/configuration.h +++ b/include/envoy/server/configuration.h @@ -64,6 +64,15 @@ class Main { * multiple nonresponsive threads. */ virtual std::chrono::milliseconds wdMultiKillTimeout() const PURE; + + /** + * @return double the percentage of threads that need to meet the MultiKillTimeout before we + * kill the process. This is used in the calculation below + * Max(2, ceil(registered_threads * Fraction(MultiKillThreshold))) + * which computes the number of threads that need to be be nonresponsive + * for at least MultiKillTimeout before we kill the process. + */ + virtual double wdMultiKillThreshold() const PURE; }; /** diff --git a/include/envoy/server/drain_manager.h b/include/envoy/server/drain_manager.h index 214ed65c0f93b..49ecc194166a1 100644 --- a/include/envoy/server/drain_manager.h +++ b/include/envoy/server/drain_manager.h @@ -16,10 +16,15 @@ class DrainManager : public Network::DrainDecision { public: /** * Invoked to begin the drain procedure. (Making drain close operations more likely). - * @param completion supplies the completion that will be called when the drain sequence is - * finished. The parameter is optional and can be an unassigned function. + * @param drain_complete_cb will be invoked once the drain sequence is finished. The parameter is + * optional and can be an unassigned function. */ - virtual void startDrainSequence(std::function completion) PURE; + virtual void startDrainSequence(std::function drain_complete_cb) PURE; + + /** + * @return whether the drain sequence has started. + */ + virtual bool draining() const PURE; /** * Invoked in the newly launched primary process to begin the parent shutdown sequence. At the end diff --git a/include/envoy/server/factory_context.h b/include/envoy/server/factory_context.h new file mode 100644 index 0000000000000..08f67e31cc3bd --- /dev/null +++ b/include/envoy/server/factory_context.h @@ -0,0 +1,278 @@ +#pragma once + +#include +#include +#include + +#include "envoy/access_log/access_log.h" +#include "envoy/common/random_generator.h" +#include "envoy/config/core/v3/base.pb.h" +#include "envoy/config/typed_config.h" +#include "envoy/grpc/context.h" +#include "envoy/http/codes.h" +#include "envoy/http/context.h" +#include "envoy/http/filter.h" +#include "envoy/init/manager.h" +#include "envoy/network/drain_decision.h" +#include "envoy/network/filter.h" +#include "envoy/runtime/runtime.h" +#include "envoy/server/admin.h" +#include "envoy/server/drain_manager.h" +#include "envoy/server/lifecycle_notifier.h" +#include "envoy/server/overload_manager.h" +#include "envoy/server/process_context.h" +#include "envoy/singleton/manager.h" +#include "envoy/stats/scope.h" +#include "envoy/thread_local/thread_local.h" +#include "envoy/tracing/http_tracer.h" +#include "envoy/upstream/cluster_manager.h" + +#include "common/common/assert.h" +#include "common/common/macros.h" +#include "common/protobuf/protobuf.h" + +namespace Envoy { +namespace Server { +namespace Configuration { + +/** + * Common interface for downstream and upstream network filters. + */ +class CommonFactoryContext { +public: + virtual ~CommonFactoryContext() = default; + + /** + * @return Upstream::ClusterManager& singleton for use by the entire server. + */ + virtual Upstream::ClusterManager& clusterManager() PURE; + + /** + * @return Event::Dispatcher& the main thread's dispatcher. This dispatcher should be used + * for all singleton processing. + */ + virtual Event::Dispatcher& dispatcher() PURE; + + /** + * @return information about the local environment the server is running in. + */ + virtual const LocalInfo::LocalInfo& localInfo() const PURE; + + /** + * @return ProtobufMessage::ValidationContext& validation visitor for xDS and static configuration + * messages. + */ + virtual ProtobufMessage::ValidationContext& messageValidationContext() PURE; + + /** + * @return RandomGenerator& the random generator for the server. + */ + virtual Envoy::Random::RandomGenerator& random() PURE; + + /** + * @return Runtime::Loader& the singleton runtime loader for the server. + */ + virtual Envoy::Runtime::Loader& runtime() PURE; + + /** + * @return Stats::Scope& the filter's stats scope. + */ + virtual Stats::Scope& scope() PURE; + + /** + * @return Singleton::Manager& the server-wide singleton manager. + */ + virtual Singleton::Manager& singletonManager() PURE; + + /** + * @return ThreadLocal::SlotAllocator& the thread local storage engine for the server. This is + * used to allow runtime lockless updates to configuration, etc. across multiple threads. + */ + virtual ThreadLocal::SlotAllocator& threadLocal() PURE; + + /** + * @return Server::Admin& the server's global admin HTTP endpoint. + */ + virtual Server::Admin& admin() PURE; + + /** + * @return TimeSource& a reference to the time source. + */ + virtual TimeSource& timeSource() PURE; + + /** + * @return Api::Api& a reference to the api object. + */ + virtual Api::Api& api() PURE; +}; + +/** + * ServerFactoryContext is an specialization of common interface for downstream and upstream network + * filters. The implementation guarantees the lifetime is no shorter than server. It could be used + * across listeners. + */ +class ServerFactoryContext : public virtual CommonFactoryContext { +public: + ~ServerFactoryContext() override = default; + + /** + * @return the server-wide grpc context. + */ + virtual Grpc::Context& grpcContext() PURE; + + /** + * @return DrainManager& the server-wide drain manager. + */ + virtual Envoy::Server::DrainManager& drainManager() PURE; + + /** + * @return the server's init manager. This can be used for extensions that need to initialize + * after cluster manager init but before the server starts listening. All extensions + * should register themselves during configuration load. initialize() will be called on + * each registered target after cluster manager init but before the server starts + * listening. Once all targets have initialized and invoked their callbacks, the server + * will start listening. + */ + virtual Init::Manager& initManager() PURE; + + /** + * @return ServerLifecycleNotifier& the lifecycle notifier for the server. + */ + virtual ServerLifecycleNotifier& lifecycleNotifier() PURE; + + /** + * @return std::chrono::milliseconds the flush interval of stats sinks. + */ + virtual std::chrono::milliseconds statsFlushInterval() const PURE; +}; + +/** + * Context passed to network and HTTP filters to access server resources. + * TODO(mattklein123): When we lock down visibility of the rest of the code, filters should only + * access the rest of the server via interfaces exposed here. + */ +class FactoryContext : public virtual CommonFactoryContext { +public: + ~FactoryContext() override = default; + + /** + * @return ServerFactoryContext which lifetime is no shorter than the server. + */ + virtual ServerFactoryContext& getServerFactoryContext() const PURE; + + /** + * @return TransportSocketFactoryContext which lifetime is no shorter than the server. + */ + virtual TransportSocketFactoryContext& getTransportSocketFactoryContext() const PURE; + + /** + * @return AccessLogManager for use by the entire server. + */ + virtual AccessLog::AccessLogManager& accessLogManager() PURE; + + /** + * @return envoy::config::core::v3::TrafficDirection the direction of the traffic relative to + * the local proxy. + */ + virtual envoy::config::core::v3::TrafficDirection direction() const PURE; + + /** + * @return const Network::DrainDecision& a drain decision that filters can use to determine if + * they should be doing graceful closes on connections when possible. + */ + virtual const Network::DrainDecision& drainDecision() PURE; + + /** + * @return whether external healthchecks are currently failed or not. + */ + virtual bool healthCheckFailed() PURE; + + /** + * @return the server's init manager. This can be used for extensions that need to initialize + * after cluster manager init but before the server starts listening. All extensions + * should register themselves during configuration load. initialize() will be called on + * each registered target after cluster manager init but before the server starts + * listening. Once all targets have initialized and invoked their callbacks, the server + * will start listening. + */ + virtual Init::Manager& initManager() PURE; + + /** + * @return ServerLifecycleNotifier& the lifecycle notifier for the server. + */ + virtual ServerLifecycleNotifier& lifecycleNotifier() PURE; + + /** + * @return Stats::Scope& the listener's stats scope. + */ + virtual Stats::Scope& listenerScope() PURE; + + /** + * @return const envoy::config::core::v3::Metadata& the config metadata associated with this + * listener. + */ + virtual const envoy::config::core::v3::Metadata& listenerMetadata() const PURE; + + /** + * @return OverloadManager& the overload manager for the server. + */ + virtual OverloadManager& overloadManager() PURE; + + /** + * @return Http::Context& a reference to the http context. + */ + virtual Http::Context& httpContext() PURE; + + /** + * @return Grpc::Context& a reference to the grpc context. + */ + virtual Grpc::Context& grpcContext() PURE; + + /** + * @return ProcessContextOptRef an optional reference to the + * process context. Will be unset when running in validation mode. + */ + virtual ProcessContextOptRef processContext() PURE; + + /** + * @return ProtobufMessage::ValidationVisitor& validation visitor for filter configuration + * messages. + */ + virtual ProtobufMessage::ValidationVisitor& messageValidationVisitor() PURE; +}; + +/** + * An implementation of FactoryContext. The life time is no shorter than the created filter chains. + * The life time is no longer than the owning listener. It should be used to create + * NetworkFilterChain. + */ +class FilterChainFactoryContext : public virtual FactoryContext { +public: + /** + * Set the flag that all attached filter chains will be destroyed. + */ + virtual void startDraining() PURE; +}; + +using FilterChainFactoryContextPtr = std::unique_ptr; + +/** + * An implementation of FactoryContext. The life time should cover the lifetime of the filter chains + * and connections. It can be used to create ListenerFilterChain. + */ +class ListenerFactoryContext : public virtual FactoryContext { +public: + /** + * Give access to the listener configuration + */ + virtual const Network::ListenerConfig& listenerConfig() const PURE; +}; + +/** + * FactoryContext for ProtocolOptionsFactory. + */ +using ProtocolOptionsFactoryContext = Server::Configuration::TransportSocketFactoryContext; + +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/include/envoy/server/filter_config.h b/include/envoy/server/filter_config.h index 75d8dd24d371f..97b2b3fe51b08 100644 --- a/include/envoy/server/filter_config.h +++ b/include/envoy/server/filter_config.h @@ -2,28 +2,12 @@ #include -#include "envoy/access_log/access_log.h" -#include "envoy/config/core/v3/base.pb.h" #include "envoy/config/typed_config.h" -#include "envoy/grpc/context.h" -#include "envoy/http/codes.h" -#include "envoy/http/context.h" #include "envoy/http/filter.h" #include "envoy/init/manager.h" -#include "envoy/network/drain_decision.h" #include "envoy/network/filter.h" -#include "envoy/runtime/runtime.h" -#include "envoy/server/admin.h" #include "envoy/server/drain_manager.h" -#include "envoy/server/lifecycle_notifier.h" -#include "envoy/server/overload_manager.h" -#include "envoy/server/process_context.h" -#include "envoy/server/transport_socket_config.h" -#include "envoy/singleton/manager.h" -#include "envoy/stats/scope.h" -#include "envoy/thread_local/thread_local.h" -#include "envoy/tracing/http_tracer.h" -#include "envoy/upstream/cluster_manager.h" +#include "envoy/server/factory_context.h" #include "common/common/assert.h" #include "common/common/macros.h" @@ -33,219 +17,6 @@ namespace Envoy { namespace Server { namespace Configuration { -/** - * Common interface for downstream and upstream network filters. - */ -class CommonFactoryContext { -public: - virtual ~CommonFactoryContext() = default; - - /** - * @return Upstream::ClusterManager& singleton for use by the entire server. - */ - virtual Upstream::ClusterManager& clusterManager() PURE; - - /** - * @return Event::Dispatcher& the main thread's dispatcher. This dispatcher should be used - * for all singleton processing. - */ - virtual Event::Dispatcher& dispatcher() PURE; - - /** - * @return information about the local environment the server is running in. - */ - virtual const LocalInfo::LocalInfo& localInfo() const PURE; - - /** - * @return ProtobufMessage::ValidationContext& validation visitor for xDS and static configuration - * messages. - */ - virtual ProtobufMessage::ValidationContext& messageValidationContext() PURE; - - /** - * @return RandomGenerator& the random generator for the server. - */ - virtual Envoy::Runtime::RandomGenerator& random() PURE; - - /** - * @return Runtime::Loader& the singleton runtime loader for the server. - */ - virtual Envoy::Runtime::Loader& runtime() PURE; - - /** - * @return Stats::Scope& the filter's stats scope. - */ - virtual Stats::Scope& scope() PURE; - - /** - * @return Singleton::Manager& the server-wide singleton manager. - */ - virtual Singleton::Manager& singletonManager() PURE; - - /** - * @return ThreadLocal::SlotAllocator& the thread local storage engine for the server. This is - * used to allow runtime lockless updates to configuration, etc. across multiple threads. - */ - virtual ThreadLocal::SlotAllocator& threadLocal() PURE; - - /** - * @return Server::Admin& the server's global admin HTTP endpoint. - */ - virtual Server::Admin& admin() PURE; - - /** - * @return TimeSource& a reference to the time source. - */ - virtual TimeSource& timeSource() PURE; - - /** - * @return Api::Api& a reference to the api object. - */ - virtual Api::Api& api() PURE; -}; - -/** - * ServerFactoryContext is an specialization of common interface for downstream and upstream network - * filters. The implementation guarantees the lifetime is no shorter than server. It could be used - * across listeners. - */ -class ServerFactoryContext : public virtual CommonFactoryContext { -public: - ~ServerFactoryContext() override = default; - - /** - * @return the server-wide grpc context. - */ - virtual Grpc::Context& grpcContext() PURE; - - /** - * @return DrainManager& the server-wide drain manager. - */ - virtual Envoy::Server::DrainManager& drainManager() PURE; -}; - -/** - * Context passed to network and HTTP filters to access server resources. - * TODO(mattklein123): When we lock down visibility of the rest of the code, filters should only - * access the rest of the server via interfaces exposed here. - */ -class FactoryContext : public virtual CommonFactoryContext { -public: - ~FactoryContext() override = default; - - /** - * @return ServerFactoryContext which lifetime is no shorter than the server. - */ - virtual ServerFactoryContext& getServerFactoryContext() const PURE; - - /** - * @return TransportSocketFactoryContext which lifetime is no shorter than the server. - */ - virtual TransportSocketFactoryContext& getTransportSocketFactoryContext() const PURE; - - /** - * @return AccessLogManager for use by the entire server. - */ - virtual AccessLog::AccessLogManager& accessLogManager() PURE; - - /** - * @return envoy::config::core::v3::TrafficDirection the direction of the traffic relative to - * the local proxy. - */ - virtual envoy::config::core::v3::TrafficDirection direction() const PURE; - - /** - * @return const Network::DrainDecision& a drain decision that filters can use to determine if - * they should be doing graceful closes on connections when possible. - */ - virtual const Network::DrainDecision& drainDecision() PURE; - - /** - * @return whether external healthchecks are currently failed or not. - */ - virtual bool healthCheckFailed() PURE; - - /** - * @return the server's init manager. This can be used for extensions that need to initialize - * after cluster manager init but before the server starts listening. All extensions - * should register themselves during configuration load. initialize() will be called on - * each registered target after cluster manager init but before the server starts - * listening. Once all targets have initialized and invoked their callbacks, the server - * will start listening. - */ - virtual Init::Manager& initManager() PURE; - - /** - * @return ServerLifecycleNotifier& the lifecycle notifier for the server. - */ - virtual ServerLifecycleNotifier& lifecycleNotifier() PURE; - - /** - * @return Stats::Scope& the listener's stats scope. - */ - virtual Stats::Scope& listenerScope() PURE; - - /** - * @return const envoy::config::core::v3::Metadata& the config metadata associated with this - * listener. - */ - virtual const envoy::config::core::v3::Metadata& listenerMetadata() const PURE; - - /** - * @return OverloadManager& the overload manager for the server. - */ - virtual OverloadManager& overloadManager() PURE; - - /** - * @return Http::Context& a reference to the http context. - */ - virtual Http::Context& httpContext() PURE; - - /** - * @return Grpc::Context& a reference to the grpc context. - */ - virtual Grpc::Context& grpcContext() PURE; - - /** - * @return ProcessContextOptRef an optional reference to the - * process context. Will be unset when running in validation mode. - */ - virtual ProcessContextOptRef processContext() PURE; - - /** - * @return ProtobufMessage::ValidationVisitor& validation visitor for filter configuration - * messages. - */ - virtual ProtobufMessage::ValidationVisitor& messageValidationVisitor() PURE; -}; - -/** - * An implementation of FactoryContext. The life time is no shorter than the created filter chains. - * The life time is no longer than the owning listener. It should be used to create - * NetworkFilterChain. - */ -class FilterChainFactoryContext : public virtual FactoryContext { -public: - /** - * Set the flag that all attached filter chains will be destroyed. - */ - virtual void startDraining() PURE; -}; - -using FilterChainFactoryContextPtr = std::unique_ptr; - -/** - * An implementation of FactoryContext. The life time should cover the lifetime of the filter chains - * and connections. It can be used to create ListenerFilterChain. - */ -class ListenerFactoryContext : public virtual FactoryContext { -public: - /** - * Give access to the listener configuration - */ - virtual const Network::ListenerConfig& listenerConfig() const PURE; -}; - /** * Common interface for listener filters and UDP listener filters */ @@ -321,9 +92,9 @@ class ProtocolOptionsFactory : public Config::TypedFactory { */ virtual Upstream::ProtocolOptionsConfigConstSharedPtr createProtocolOptionsConfig(const Protobuf::Message& config, - ProtobufMessage::ValidationVisitor& validation_visitor) { + ProtocolOptionsFactoryContext& factory_context) { UNREFERENCED_PARAMETER(config); - UNREFERENCED_PARAMETER(validation_visitor); + UNREFERENCED_PARAMETER(factory_context); return nullptr; } diff --git a/include/envoy/server/health_checker_config.h b/include/envoy/server/health_checker_config.h index 5994e37f231bf..5bb6344b8907c 100644 --- a/include/envoy/server/health_checker_config.h +++ b/include/envoy/server/health_checker_config.h @@ -1,5 +1,6 @@ #pragma once +#include "envoy/common/random_generator.h" #include "envoy/config/core/v3/health_check.pb.h" #include "envoy/config/typed_config.h" #include "envoy/runtime/runtime.h" @@ -26,7 +27,7 @@ class HealthCheckerFactoryContext { /** * @return RandomGenerator& the random generator for the server. */ - virtual Envoy::Runtime::RandomGenerator& random() PURE; + virtual Envoy::Random::RandomGenerator& random() PURE; /** * @return Event::Dispatcher& the main thread's dispatcher. This dispatcher should be used diff --git a/include/envoy/server/hot_restart.h b/include/envoy/server/hot_restart.h index 16c182c8da3cc..7525fa6a0f4fb 100644 --- a/include/envoy/server/hot_restart.h +++ b/include/envoy/server/hot_restart.h @@ -79,6 +79,11 @@ class HotRestart { */ virtual void shutdown() PURE; + /** + * Return the base id used to generate a domain socket name. + */ + virtual uint32_t baseId() PURE; + /** * Return the hot restart compatibility version so that operations code can decide whether to * perform a full or hot restart. @@ -96,5 +101,14 @@ class HotRestart { virtual Thread::BasicLockable& accessLogLock() PURE; }; +/** + * HotRestartDomainSocketInUseException is thrown during HotRestart construction only when the + * underlying domain socket is in use. + */ +class HotRestartDomainSocketInUseException : public EnvoyException { +public: + HotRestartDomainSocketInUseException(const std::string& what) : EnvoyException(what) {} +}; + } // namespace Server } // namespace Envoy diff --git a/include/envoy/server/instance.h b/include/envoy/server/instance.h index 1e6335962e715..c2d294ac3cb8a 100644 --- a/include/envoy/server/instance.h +++ b/include/envoy/server/instance.h @@ -7,6 +7,7 @@ #include "envoy/access_log/access_log.h" #include "envoy/api/api.h" #include "envoy/common/mutex_tracer.h" +#include "envoy/common/random_generator.h" #include "envoy/config/trace/v3/http_tracer.pb.h" #include "envoy/event/timer.h" #include "envoy/grpc/context.h" @@ -137,7 +138,7 @@ class Instance { /** * @return RandomGenerator& the random generator for the server. */ - virtual Runtime::RandomGenerator& random() PURE; + virtual Random::RandomGenerator& random() PURE; /** * @return Runtime::Loader& the singleton runtime loader for the server. diff --git a/include/envoy/server/listener_manager.h b/include/envoy/server/listener_manager.h index 57a7e97549a27..e01551414def6 100644 --- a/include/envoy/server/listener_manager.h +++ b/include/envoy/server/listener_manager.h @@ -71,7 +71,7 @@ class ListenerComponentFactory { */ virtual Network::SocketSharedPtr createListenSocket(Network::Address::InstanceConstSharedPtr address, - Network::Address::SocketType socket_type, + Network::Socket::Type socket_type, const Network::Socket::OptionsSharedPtr& options, const ListenSocketCreationParams& params) PURE; @@ -131,6 +131,16 @@ class ListenerManager { All, }; + // The types of listeners to be returned from listeners(ListenerState). + // An enum instead of enum class so the underlying type is an int and bitwise operations can be + // used without casting. + enum ListenerState : uint8_t { + ACTIVE = 1 << 0, + WARMING = 1 << 1, + DRAINING = 1 << 2, + ALL = ACTIVE | WARMING | DRAINING + }; + virtual ~ListenerManager() = default; /** @@ -161,11 +171,15 @@ class ListenerManager { virtual void createLdsApi(const envoy::config::core::v3::ConfigSource& lds_config) PURE; /** - * @return std::vector> a list of the currently - * loaded listeners. Note that this routine returns references to the existing listeners. The - * references are only valid in the context of the current call stack and should not be stored. + * @param state the type of listener to be returned (defaults to ACTIVE), states can be OR'd + * together to return multiple different types + * @return std::vector> a list of currently known + * listeners in the requested state. Note that this routine returns references to the existing + * listeners. The references are only valid in the context of the current call stack and should + * not be stored. */ - virtual std::vector> listeners() PURE; + virtual std::vector> + listeners(ListenerState state = ListenerState::ACTIVE) PURE; /** * @return uint64_t the total number of connections owned by all listeners across all workers. @@ -223,5 +237,13 @@ class ListenerManager { virtual ApiListenerOptRef apiListener() PURE; }; +// overload operator| to allow ListenerManager::listeners(ListenerState) to be called using a +// combination of flags, such as listeners(ListenerState::WARMING|ListenerState::ACTIVE) +constexpr ListenerManager::ListenerState operator|(const ListenerManager::ListenerState lhs, + const ListenerManager::ListenerState rhs) { + return static_cast(static_cast(lhs) | + static_cast(rhs)); +} + } // namespace Server } // namespace Envoy diff --git a/include/envoy/server/options.h b/include/envoy/server/options.h index 3a9ad7545ef7e..98ea52e2ce6c0 100644 --- a/include/envoy/server/options.h +++ b/include/envoy/server/options.h @@ -9,6 +9,7 @@ #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/network/address.h" +#include "absl/types/optional.h" #include "spdlog/spdlog.h" namespace Envoy { @@ -41,6 +42,24 @@ enum class Mode { // to be validated in a non-prod environment. }; +/** + * During the drain sequence, different components ask the DrainManager + * whether to drain via drainClose(). This enum dictates the behaviour of + * drainClose() calls. + */ +enum class DrainStrategy { + /** + * The probability of drainClose() returning true increases from 0 to 100% + * over the duration of the drain period. + */ + Gradual, + + /** + * drainClose() will return true as soon as the drain sequence is initiated. + */ + Immediate, +}; + using CommandLineOptionsPtr = std::unique_ptr; /** @@ -58,16 +77,38 @@ class Options { */ virtual uint64_t baseId() const PURE; + /** + * @return bool choose an unused base ID dynamically. The chosen base id can be written to a + * a file using the baseIdPath option. + */ + virtual bool useDynamicBaseId() const PURE; + + /** + * @return const std::string& the dynamic base id output file. + */ + virtual const std::string& baseIdPath() const PURE; + /** * @return the number of worker threads to run in the server. */ virtual uint32_t concurrency() const PURE; /** - * @return the number of seconds that envoy will perform draining during a hot restart. + * @return the duration of the drain period in seconds. */ virtual std::chrono::seconds drainTime() const PURE; + /** + * @return the strategy that defines behaviour of DrainManager::drainClose(); + */ + virtual DrainStrategy drainStrategy() const PURE; + + /** + * @return the delay before shutting down the parent envoy in a hot restart, + * generally longer than drainTime(). + */ + virtual std::chrono::seconds parentShutdownTime() const PURE; + /** * @return const std::string& the path to the configuration file. */ @@ -85,6 +126,11 @@ class Options { */ virtual const envoy::config::bootstrap::v3::Bootstrap& configProto() const PURE; + /** + * @return const absl::optional& the bootstrap version to use, if specified. + */ + virtual const absl::optional& bootstrapVersion() const PURE; + /** * @return bool allow unknown fields in the static configuration? */ @@ -95,6 +141,11 @@ class Options { */ virtual bool rejectUnknownDynamicFields() const PURE; + /** + * @return bool ignore unknown fields in the dynamic configuration? + **/ + virtual bool ignoreUnknownDynamicFields() const PURE; + /** * @return const std::string& the admin address output file. */ @@ -132,12 +183,6 @@ class Options { */ virtual const std::string& logPath() const PURE; - /** - * @return the number of seconds that envoy will wait before shutting down the parent envoy during - * a host restart. Generally this will be longer than the drainTime() option. - */ - virtual std::chrono::seconds parentShutdownTime() const PURE; - /** * @return the restart epoch. 0 indicates the first server start, 1 the second, and so on. */ diff --git a/include/envoy/server/overload_manager.h b/include/envoy/server/overload_manager.h index 010ac8ee94686..24ddd16cfd6c9 100644 --- a/include/envoy/server/overload_manager.h +++ b/include/envoy/server/overload_manager.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include "envoy/common/pure.h" #include "envoy/thread_local/thread_local.h" @@ -33,25 +32,8 @@ using OverloadActionCb = std::function; */ class ThreadLocalOverloadState : public ThreadLocal::ThreadLocalObject { public: - const OverloadActionState& getState(const std::string& action) { - auto it = actions_.find(action); - if (it == actions_.end()) { - it = actions_.insert(std::make_pair(action, OverloadActionState::Inactive)).first; - } - return it->second; - } - - void setState(const std::string& action, OverloadActionState state) { - auto it = actions_.find(action); - if (it == actions_.end()) { - actions_[action] = state; - } else { - it->second = state; - } - } - -private: - std::unordered_map actions_; + // Get a thread-local reference to the value for the given action key. + virtual const OverloadActionState& getState(const std::string& action) PURE; }; /** @@ -106,17 +88,6 @@ class OverloadManager { * an alternative to registering a callback for overload action state changes. */ virtual ThreadLocalOverloadState& getThreadLocalOverloadState() PURE; - - /** - * Convenience method to get a statically allocated reference to the inactive overload - * action state. Useful for code that needs to initialize a reference either to an - * entry in the ThreadLocalOverloadState map (if overload behavior is enabled) or to - * some other static memory location set to the inactive state (if overload behavior - * is disabled). - */ - static const OverloadActionState& getInactiveState() { - CONSTRUCT_ON_FIRST_USE(OverloadActionState, OverloadActionState::Inactive); - } }; } // namespace Server diff --git a/include/envoy/server/transport_socket_config.h b/include/envoy/server/transport_socket_config.h index a3dd4d5dac6da..e08405f9b4ab5 100644 --- a/include/envoy/server/transport_socket_config.h +++ b/include/envoy/server/transport_socket_config.h @@ -2,13 +2,14 @@ #include +#include "envoy/config/core/v3/health_check.pb.h" #include "envoy/config/typed_config.h" #include "envoy/event/dispatcher.h" #include "envoy/init/manager.h" #include "envoy/local_info/local_info.h" #include "envoy/network/transport_socket.h" -#include "envoy/runtime/runtime.h" #include "envoy/secret/secret_manager.h" +#include "envoy/server/factory_context.h" #include "envoy/singleton/manager.h" #include "envoy/ssl/context_manager.h" #include "envoy/stats/scope.h" @@ -66,7 +67,7 @@ class TransportSocketFactoryContext { /** * @return RandomGenerator& the random generator for the server. */ - virtual Envoy::Runtime::RandomGenerator& random() PURE; + virtual Envoy::Random::RandomGenerator& random() PURE; /** * @return the server-wide stats store. @@ -74,10 +75,9 @@ class TransportSocketFactoryContext { virtual Stats::Store& stats() PURE; /** - * @return a pointer pointing to the instance of an init manager, or nullptr - * if not set. + * @return a reference to the instance of an init manager. */ - virtual Init::Manager* initManager() PURE; + virtual Init::Manager& initManager() PURE; /** * @return the server's singleton manager. diff --git a/include/envoy/singleton/BUILD b/include/envoy/singleton/BUILD index f47887a062440..a0eb2536c45ea 100644 --- a/include/envoy/singleton/BUILD +++ b/include/envoy/singleton/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/ssl/BUILD b/include/envoy/ssl/BUILD index fb14af1a211cd..b8e7d530174fe 100644 --- a/include/envoy/ssl/BUILD +++ b/include/envoy/ssl/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/ssl/connection.h b/include/envoy/ssl/connection.h index b58d9511698ed..8241c48ad8d7c 100644 --- a/include/envoy/ssl/connection.h +++ b/include/envoy/ssl/connection.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include "envoy/common/pure.h" @@ -30,8 +31,8 @@ class ConnectionInfo { virtual bool peerCertificateValidated() const PURE; /** - * @return std::string the URIs in the SAN field of the local certificate. Returns {} if there is - * no local certificate, or no SAN field, or no URI. + * @return absl::Spanthe URIs in the SAN field of the local certificate. + * Returns {} if there is no local certificate, or no SAN field, or no URI. **/ virtual absl::Span uriSanLocalCertificate() const PURE; @@ -47,6 +48,12 @@ class ConnectionInfo { */ virtual const std::string& sha256PeerCertificateDigest() const PURE; + /** + * @return std::string the SHA1 digest of the peer certificate. Returns "" if there is no peer + * certificate which can happen in TLS (non mTLS) connections. + */ + virtual const std::string& sha1PeerCertificateDigest() const PURE; + /** * @return std::string the serial number field of the peer certificate. Returns "" if * there is no peer certificate, or no serial number. @@ -66,8 +73,8 @@ class ConnectionInfo { virtual const std::string& subjectPeerCertificate() const PURE; /** - * @return std::string the URIs in the SAN field of the peer certificate. Returns {} if there is - *no peer certificate, or no SAN field, or no URI. + * @return absl::Span the URIs in the SAN field of the peer certificate. + * Returns {} if there is no peer certificate, or no SAN field, or no URI. **/ virtual absl::Span uriSanPeerCertificate() const PURE; @@ -136,7 +143,7 @@ class ConnectionInfo { * if a peer cert exists and it contains the specified extension. * * Note: This is used out of tree, check with @snowp before removing. - * @param extension_name name of extension to look up + * @param extension_name name of extension to look up. * @return absl::optional the raw octets of the extension ``ASN.1`` object, if it * exists. */ diff --git a/include/envoy/ssl/private_key/BUILD b/include/envoy/ssl/private_key/BUILD index bf8a908421dfd..51ecf0198ec26 100644 --- a/include/envoy/ssl/private_key/BUILD +++ b/include/envoy/ssl/private_key/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/stats/BUILD b/include/envoy/stats/BUILD index fcba981cd242c..c810ac7ad30cb 100644 --- a/include/envoy/stats/BUILD +++ b/include/envoy/stats/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( @@ -29,6 +29,7 @@ envoy_cc_library( "tag_extractor.h", "tag_producer.h", ], + external_deps = ["abseil_inlined_vector"], deps = [ ":refcount_ptr_interface", ":symbol_table_interface", @@ -50,6 +51,7 @@ envoy_cc_library( envoy_cc_library( name = "symbol_table_interface", hdrs = ["symbol_table.h"], + external_deps = ["abseil_inlined_vector"], deps = [ "//source/common/common:hash_lib", ], diff --git a/include/envoy/stats/histogram.h b/include/envoy/stats/histogram.h index 719a83fea6e27..aa87d59e7360d 100644 --- a/include/envoy/stats/histogram.h +++ b/include/envoy/stats/histogram.h @@ -11,6 +11,23 @@ namespace Envoy { namespace Stats { +using ConstSupportedBuckets = const std::vector; + +class HistogramSettings { +public: + virtual ~HistogramSettings() = default; + + /** + * For formats like Prometheus where the entire histogram is published (but not + * like statsd where each value to include in the histogram is emitted separately), + * get the limits for each histogram bucket. + * @return The buckets for the histogram. Each value is an upper bound of a bucket. + */ + virtual ConstSupportedBuckets& buckets(absl::string_view stat_name) const PURE; +}; + +using HistogramSettingsConstPtr = std::unique_ptr; + /** * Holds the computed statistics for a histogram. */ @@ -43,7 +60,7 @@ class HistogramStatistics { * with 0 as the implicit lower bound. For timers, these bucket thresholds * are in milliseconds but the thresholds are applicable to all types of data. */ - virtual const std::vector& supportedBuckets() const PURE; + virtual ConstSupportedBuckets& supportedBuckets() const PURE; /** * Returns computed bucket values during the period. The vector contains an approximation diff --git a/include/envoy/stats/scope.h b/include/envoy/stats/scope.h index 408655bbb8a5c..93e5f00c7c5f1 100644 --- a/include/envoy/stats/scope.h +++ b/include/envoy/stats/scope.h @@ -28,6 +28,8 @@ using TextReadoutOptConstRef = absl::optional; using ScopeSharedPtr = std::shared_ptr; +template using IterateFn = std::function&)>; + /** * A named scope for stats. Scopes are a grouping of stats that can be acted on as a unit if needed * (for example to free/delete all of them). @@ -194,6 +196,47 @@ class Scope { */ virtual const SymbolTable& constSymbolTable() const PURE; virtual SymbolTable& symbolTable() PURE; + + /** + * Calls 'fn' for every counter. Note that in the case of overlapping scopes, + * the implementation may call fn more than one time for each counter. Iteration + * stops if `fn` returns false; + * + * @param fn Function to be run for every counter, or until fn return false. + * @return false if fn(counter) return false during iteration, true if every counter was hit. + */ + virtual bool iterate(const IterateFn& fn) const PURE; + + /** + * Calls 'fn' for every gauge. Note that in the case of overlapping scopes, + * the implementation may call fn more than one time for each gauge. Iteration + * stops if `fn` returns false; + * + * @param fn Function to be run for every gauge, or until fn return false. + * @return false if fn(gauge) return false during iteration, true if every gauge was hit. + */ + virtual bool iterate(const IterateFn& fn) const PURE; + + /** + * Calls 'fn' for every histogram. Note that in the case of overlapping + * scopes, the implementation may call fn more than one time for each + * histogram. Iteration stops if `fn` returns false; + * + * @param fn Function to be run for every histogram, or until fn return false. + * @return false if fn(histogram) return false during iteration, true if every histogram was hit. + */ + virtual bool iterate(const IterateFn& fn) const PURE; + + /** + * Calls 'fn' for every text readout. Note that in the case of overlapping + * scopes, the implementation may call fn more than one time for each + * text readout. Iteration stops if `fn` returns false; + * + * @param fn Function to be run for every text readout, or until fn return false. + * @return false if fn(text_readout) return false during iteration, true if every text readout + * was hit. + */ + virtual bool iterate(const IterateFn& fn) const PURE; }; } // namespace Stats diff --git a/include/envoy/stats/sink.h b/include/envoy/stats/sink.h index f0ce08c1dd05f..1303c9fd67b81 100644 --- a/include/envoy/stats/sink.h +++ b/include/envoy/stats/sink.h @@ -35,7 +35,11 @@ class MetricSnapshot { * @return a snapshot of all histograms. */ virtual const std::vector>& histograms() PURE; - // TODO(efimki): Add support of text readouts stats. + + /** + * @return a snapshot of all text readouts. + */ + virtual const std::vector>& textReadouts() PURE; }; /** diff --git a/include/envoy/stats/stats.h b/include/envoy/stats/stats.h index c723152ab5493..c03b1d58ad0bc 100644 --- a/include/envoy/stats/stats.h +++ b/include/envoy/stats/stats.h @@ -15,8 +15,6 @@ namespace Envoy { namespace Stats { -class Allocator; - /** * General interface for all stats objects. */ @@ -137,6 +135,15 @@ class Gauge : public Metric { virtual void sub(uint64_t amount) PURE; virtual uint64_t value() const PURE; + /** + * Sets a value from a hot-restart parent. This parent contribution must be + * kept distinct from the child value, so that when we erase the value it + * is not commingled with the child value, which may have been set() directly. + * + * @param parent_value the value from the hot-restart parent. + */ + virtual void setParentValue(uint64_t parent_value) PURE; + /** * @return the import mode, dictating behavior of the gauge across hot restarts. */ @@ -174,7 +181,7 @@ class TextReadout : public virtual Metric { * Sets the value of this TextReadout by moving the input |value| to minimize * buffer copies under the lock. */ - virtual void set(std::string&& value) PURE; + virtual void set(absl::string_view value) PURE; /** * @return the copy of this TextReadout value. */ diff --git a/include/envoy/stats/store.h b/include/envoy/stats/store.h index 158f00518a51c..191ed0f8589c9 100644 --- a/include/envoy/stats/store.h +++ b/include/envoy/stats/store.h @@ -5,6 +5,7 @@ #include #include "envoy/common/pure.h" +#include "envoy/stats/histogram.h" #include "envoy/stats/scope.h" #include "envoy/stats/stats_matcher.h" #include "envoy/stats/tag_producer.h" @@ -78,6 +79,12 @@ class StoreRoot : public Store { */ virtual void setStatsMatcher(StatsMatcherPtr&& stats_matcher) PURE; + /** + * Attach a HistogramSettings to this StoreRoot to generate histogram configurations + * according to some ruleset. + */ + virtual void setHistogramSettings(HistogramSettingsConstPtr&& histogram_settings) PURE; + /** * Initialize the store for threading. This will be called once after all worker threads have * been initialized. At this point the store can initialize itself for multi-threaded operation. diff --git a/include/envoy/stats/symbol_table.h b/include/envoy/stats/symbol_table.h index 3463e5c7688cf..b84d340f79d10 100644 --- a/include/envoy/stats/symbol_table.h +++ b/include/envoy/stats/symbol_table.h @@ -7,6 +7,7 @@ #include "envoy/common/pure.h" +#include "absl/container/inlined_vector.h" #include "absl/strings/string_view.h" namespace Envoy { @@ -20,7 +21,7 @@ namespace Stats { * declaration for StatName is in source/common/stats/symbol_table_impl.h */ class StatName; -using StatNameVec = std::vector; +using StatNameVec = absl::InlinedVector; class StatNameList; class StatNameSet; diff --git a/include/envoy/stream_info/BUILD b/include/envoy/stream_info/BUILD index 63fa4b47ba5a6..e491ce4233329 100644 --- a/include/envoy/stream_info/BUILD +++ b/include/envoy/stream_info/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/stream_info/filter_state.h b/include/envoy/stream_info/filter_state.h index f68fca790ab2f..20377176b56f5 100644 --- a/include/envoy/stream_info/filter_state.h +++ b/include/envoy/stream_info/filter_state.h @@ -15,6 +15,10 @@ namespace Envoy { namespace StreamInfo { +class FilterState; + +using FilterStateSharedPtr = std::shared_ptr; + /** * FilterState represents dynamically generated information regarding a stream (TCP or HTTP level) * or a connection by various filters in Envoy. FilterState can be write-once or write-many. @@ -146,14 +150,12 @@ class FilterState { * @return the pointer of the parent FilterState that has longer life span. nullptr means this is * either the top LifeSpan or the parent is not yet created. */ - virtual std::shared_ptr parent() const PURE; + virtual FilterStateSharedPtr parent() const PURE; protected: virtual const Object* getDataReadOnlyGeneric(absl::string_view data_name) const PURE; virtual Object* getDataMutableGeneric(absl::string_view data_name) PURE; }; -using FilterStateSharedPtr = std::shared_ptr; - } // namespace StreamInfo } // namespace Envoy diff --git a/include/envoy/stream_info/stream_info.h b/include/envoy/stream_info/stream_info.h index 89824f4190f49..c64e0837266de 100644 --- a/include/envoy/stream_info/stream_info.h +++ b/include/envoy/stream_info/stream_info.h @@ -72,8 +72,14 @@ enum ResponseFlag { InvalidEnvoyRequestHeaders = 0x20000, // Downstream request had an HTTP protocol error DownstreamProtocolError = 0x40000, + // Upstream request reached to user defined max stream duration. + UpstreamMaxStreamDurationReached = 0x80000, + // True if the response was served from an Envoy cache filter. + ResponseFromCacheFilter = 0x100000, + // Filter config was not received within the permitted warming deadline. + NoFilterConfigFound = 0x200000, // ATTENTION: MAKE SURE THIS REMAINS EQUAL TO THE LAST FLAG. - LastFlag = DownstreamProtocolError + LastFlag = NoFilterConfigFound }; /** @@ -91,6 +97,9 @@ struct ResponseCodeDetailValues { // Envoy is doing non-streaming proxying, and the request payload exceeded // configured limits. const std::string RequestPayloadTooLarge = "request_payload_too_large"; + // Envoy is doing non-streaming proxying, and the response payload exceeded + // configured limits. + const std::string ResponsePayloadTooLarge = "response_payload_too_large"; // Envoy is doing streaming proxying, but too much data arrived while waiting // to attempt a retry. const std::string RequestPayloadExceededRetryBufferLimit = @@ -100,6 +109,8 @@ struct ResponseCodeDetailValues { const std::string ResponsePayloadTooLArge = "response_payload_too_large"; // The per-stream keepalive timeout was exceeded. const std::string StreamIdleTimeout = "stream_idle_timeout"; + // The per-stream max duration timeout was exceeded. + const std::string MaxDurationTimeout = "max_duration_timeout"; // The per-stream total request timeout was exceeded const std::string RequestOverallTimeout = "request_overall_timeout"; // The request was rejected due to the Overload Manager reaching configured resource limits. @@ -108,8 +119,6 @@ struct ResponseCodeDetailValues { const std::string LowVersion = "low_version"; // The request was rejected due to the Host: or :authority field missing const std::string MissingHost = "missing_host_header"; - // The request was rejected due to the request headers being larger than the configured limit. - const std::string RequestHeadersTooLarge = "request_headers_too_large"; // The request was rejected due to x-envoy-* headers failing strict header validation. const std::string InvalidEnvoyRequestHeaders = "request_headers_failed_strict_check"; // The request was rejected due to the Path or :path header field missing. @@ -139,6 +148,8 @@ struct ResponseCodeDetailValues { const std::string UpstreamTimeout = "upstream_response_timeout"; // The final upstream try timed out const std::string UpstreamPerTryTimeout = "upstream_per_try_timeout"; + // The request was destroyed because of user defined max stream duration. + const std::string UpstreamMaxStreamDurationReached = "upstream_max_stream_duration_reached"; // The upstream connection was reset before a response was started. This // will generally be accompanied by details about why the reset occurred. const std::string EarlyUpstreamReset = "upstream_reset_before_response_started"; @@ -149,6 +160,12 @@ struct ResponseCodeDetailValues { const std::string LateUpstreamReset = "upstream_reset_after_response_started"; // The connection is rejected due to no matching filter chain. const std::string FilterChainNotFound = "filter_chain_not_found"; + // The client disconnected unexpectedly. + const std::string DownstreamRemoteDisconnect = "downstream_remote_disconnect"; + // The response was generated by the admin filter. + const std::string AdminFilterResponse = "admin_filter_response"; + // The original stream was replaced with an internal redirect. + const std::string InternalRedirect = "internal_redirect"; }; using ResponseCodeDetails = ConstSingleton; @@ -472,7 +489,8 @@ class StreamInfo { virtual const Router::RouteEntry* routeEntry() const PURE; /** - * @return const envoy::api::v2::core::Metadata& the dynamic metadata associated with this request + * @return const envoy::config::core::v3::Metadata& the dynamic metadata associated with this + * request */ virtual envoy::config::core::v3::Metadata& dynamicMetadata() PURE; virtual const envoy::config::core::v3::Metadata& dynamicMetadata() const PURE; diff --git a/include/envoy/tcp/BUILD b/include/envoy/tcp/BUILD index 991ccbd75e139..bbf9905810037 100644 --- a/include/envoy/tcp/BUILD +++ b/include/envoy/tcp/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/tcp/conn_pool.h b/include/envoy/tcp/conn_pool.h index 5cdcd617daf7e..14dd5677907f9 100644 --- a/include/envoy/tcp/conn_pool.h +++ b/include/envoy/tcp/conn_pool.h @@ -13,36 +13,6 @@ namespace Envoy { namespace Tcp { namespace ConnectionPool { -/** - * Controls the behavior of a canceled connection request. - */ -enum class CancelPolicy { - // By default, canceled connection requests allow a pending connection to complete and become - // available for a future connection request. - Default, - // When a connection request is canceled, closes a pending connection if there are more pending - // connections than pending connection requests. CloseExcess is useful for callers that never - // re-use connections (e.g. by closing rather than releasing connections). Using CloseExcess in - // this situation guarantees that no idle connections will be held open by the conn pool awaiting - // a connection request. - CloseExcess, -}; - -/** - * Handle that allows a pending connection request to be canceled before it is completed. - */ -class Cancellable { -public: - virtual ~Cancellable() = default; - - /** - * Cancel the pending connection request. - * @param cancel_policy a CancelPolicy that controls the behavior of this connection request - * cancellation. - */ - virtual void cancel(CancelPolicy cancel_policy) PURE; -}; - /* * UpstreamCallbacks for connection pool upstream connection callbacks and data. Note that * onEvent(Connected) is never triggered since the event always occurs before a ConnectionPool @@ -119,6 +89,8 @@ class ConnectionData { using ConnectionDataPtr = std::unique_ptr; using PoolFailureReason = ::Envoy::ConnectionPool::PoolFailureReason; +using Cancellable = ::Envoy::ConnectionPool::Cancellable; +using CancelPolicy = ::Envoy::ConnectionPool::CancelPolicy; /** * Pool callbacks invoked in the context of a newConnection() call, either synchronously or @@ -154,30 +126,14 @@ class Callbacks { /** * An instance of a generic connection pool. */ -class Instance : public Event::DeferredDeletable { +class Instance : public Envoy::ConnectionPool::Instance, public Event::DeferredDeletable { public: - ~Instance() override = default; - - /** - * Called when a connection pool has been drained of pending requests, busy connections, and - * ready connections. - */ - using DrainedCb = std::function; - /** - * Register a callback that gets called when the connection pool is fully drained. No actual - * draining is done. The owner of the connection pool is responsible for not creating any - * new connections. + * Immediately close all existing connection pool connections. This method can be used in cases + * where the connection pool is not being destroyed, but the caller wishes to terminate all + * existing connections. For example, when a health check failure occurs. */ - virtual void addDrainedCallback(DrainedCb cb) PURE; - - /** - * Actively drain all existing connection pool connections. This method can be used in cases - * where the connection pool is not being destroyed, but the caller wishes to make sure that - * all new requests take place on a new connection. For example, when a health check failure - * occurs. - */ - virtual void drainConnections() PURE; + virtual void closeConnections() PURE; /** * Create a new connection on the pool. @@ -191,11 +147,6 @@ class Instance : public Event::DeferredDeletable { * should be done by resetting the connection. */ virtual Cancellable* newConnection(Callbacks& callbacks) PURE; - - /** - * @return the description of the host this connection pool is for. - */ - virtual Upstream::HostDescriptionConstSharedPtr host() const PURE; }; using InstancePtr = std::unique_ptr; diff --git a/include/envoy/thread/BUILD b/include/envoy/thread/BUILD index ef8f2450a237a..d239377668505 100644 --- a/include/envoy/thread/BUILD +++ b/include/envoy/thread/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/thread/thread.h b/include/envoy/thread/thread.h index 70452ca5d29ab..bcc6864d14664 100644 --- a/include/envoy/thread/thread.h +++ b/include/envoy/thread/thread.h @@ -9,6 +9,9 @@ #include "common/common/thread_annotations.h" +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" + namespace Envoy { namespace Thread { @@ -37,13 +40,25 @@ class Thread { virtual ~Thread() = default; /** - * Join on thread exit. + * @return the name of the thread. + */ + virtual std::string name() const PURE; + + /** + * Blocks until the thread exits. */ virtual void join() PURE; }; using ThreadPtr = std::unique_ptr; +// Options specified during thread creation. +struct Options { + std::string name_; // A name supplied for the thread. On Linux this is limited to 15 chars. +}; + +using OptionsOptConstRef = const absl::optional&; + /** * Interface providing a mechanism for creating threads. */ @@ -52,10 +67,13 @@ class ThreadFactory { virtual ~ThreadFactory() = default; /** - * Create a thread. + * Creates a thread, immediately starting the thread_routine. + * * @param thread_routine supplies the function to invoke in the thread. + * @param options supplies options specified on thread creation. */ - virtual ThreadPtr createThread(std::function thread_routine) PURE; + virtual ThreadPtr createThread(std::function thread_routine, + OptionsOptConstRef options = absl::nullopt) PURE; /** * Return the current system thread ID @@ -63,6 +81,8 @@ class ThreadFactory { virtual ThreadId currentThreadId() PURE; }; +using ThreadFactoryPtr = std::unique_ptr; + /** * Like the C++11 "basic lockable concept" but a pure virtual interface vs. a template, and * with thread annotations. diff --git a/include/envoy/thread_local/BUILD b/include/envoy/thread_local/BUILD index d5fbfb396718b..3b23de4e01750 100644 --- a/include/envoy/thread_local/BUILD +++ b/include/envoy/thread_local/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/thread_local/thread_local.h b/include/envoy/thread_local/thread_local.h index 41c77d730d191..683617634a203 100644 --- a/include/envoy/thread_local/thread_local.h +++ b/include/envoy/thread_local/thread_local.h @@ -45,10 +45,14 @@ class Slot { /** * This is a helper on top of get() that casts the object stored in the slot to the specified - * type. Since the slot only stores pointers to the base interface, dynamic_cast provides some - * level of protection via RTTI. + * type. Since the slot only stores pointers to the base interface, the static_cast operates + * in production for performance, and the dynamic_cast validates correctness in tests and debug + * builds. */ - template T& getTyped() { return *std::dynamic_pointer_cast(get()); } + template T& getTyped() { + ASSERT(std::dynamic_pointer_cast(get()) != nullptr); + return *static_cast(get().get()); + } /** * Run a callback on all registered threads. diff --git a/include/envoy/tracing/BUILD b/include/envoy/tracing/BUILD index 1a6e82e018802..bc50ea3769fc3 100644 --- a/include/envoy/tracing/BUILD +++ b/include/envoy/tracing/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/tracing/http_tracer.h b/include/envoy/tracing/http_tracer.h index 63da639e84ee8..22b024ac97e0c 100644 --- a/include/envoy/tracing/http_tracer.h +++ b/include/envoy/tracing/http_tracer.h @@ -158,6 +158,22 @@ class Span { * @param sampled whether the span and any subsequent child spans should be sampled */ virtual void setSampled(bool sampled) PURE; + + /** + * Retrieve a key's value from the span's baggage. + * This baggage data could've been set by this span or any parent spans. + * @param key baggage key + * @return the baggage's value for the given input key + */ + virtual std::string getBaggage(absl::string_view key) PURE; + + /** + * Set a key/value pair in the current span's baggage. + * All subsequent child spans will have access to this baggage. + * @param key baggage key + * @param key baggage value + */ + virtual void setBaggage(absl::string_view key, absl::string_view value) PURE; }; /** diff --git a/include/envoy/upstream/BUILD b/include/envoy/upstream/BUILD index 2ecf8e601b7cf..d67f7b242f402 100644 --- a/include/envoy/upstream/BUILD +++ b/include/envoy/upstream/BUILD @@ -1,22 +1,26 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( name = "cluster_manager_interface", hdrs = ["cluster_manager.h"], + external_deps = [ + "abseil_node_hash_map", + ], deps = [ ":health_checker_interface", ":load_balancer_interface", ":thread_local_cluster_interface", ":upstream_interface", "//include/envoy/access_log:access_log_interface", + "//include/envoy/common:random_generator_interface", "//include/envoy/config:grpc_mux_interface", "//include/envoy/config:subscription_factory_interface", "//include/envoy/grpc:async_client_manager_interface", @@ -114,6 +118,7 @@ envoy_cc_library( envoy_cc_library( name = "resource_manager_interface", hdrs = ["resource_manager.h"], + deps = ["//include/envoy/common:resource_interface"], ) envoy_cc_library( diff --git a/include/envoy/upstream/cluster_factory.h b/include/envoy/upstream/cluster_factory.h index 389a804ba0441..68bbff008baf1 100644 --- a/include/envoy/upstream/cluster_factory.h +++ b/include/envoy/upstream/cluster_factory.h @@ -10,6 +10,7 @@ #include "envoy/access_log/access_log.h" #include "envoy/api/api.h" +#include "envoy/common/random_generator.h" #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/typed_config.h" #include "envoy/event/dispatcher.h" @@ -81,7 +82,7 @@ class ClusterFactoryContext { /** * @return RandomGenerator& the random generator for the server. */ - virtual Runtime::RandomGenerator& random() PURE; + virtual Random::RandomGenerator& random() PURE; /** * @return Runtime::Loader& the singleton runtime loader for the server. diff --git a/include/envoy/upstream/cluster_manager.h b/include/envoy/upstream/cluster_manager.h index 4bfe98beee6b6..8389eb94a96dd 100644 --- a/include/envoy/upstream/cluster_manager.h +++ b/include/envoy/upstream/cluster_manager.h @@ -4,10 +4,10 @@ #include #include #include -#include #include "envoy/access_log/access_log.h" #include "envoy/api/api.h" +#include "envoy/common/random_generator.h" #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/core/v3/address.pb.h" @@ -31,6 +31,9 @@ #include "envoy/upstream/thread_local_cluster.h" #include "envoy/upstream/upstream.h" +#include "absl/container/flat_hash_set.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace Upstream { @@ -75,7 +78,10 @@ class ClusterManagerFactory; * persistent and shared among multiple ongoing requests/connections. * Cluster manager is initialized in two phases. In the first phase which begins at the construction * all primary clusters (i.e. with endpoint assignments provisioned statically in bootstrap, - * discovered through DNS or file based CDS) are initialized. + * discovered through DNS or file based CDS) are initialized. This phase may complete synchronously + * with cluster manager construction iff all clusters are STATIC and without health checks + * configured. At the completion of the first phase cluster manager invokes callback set through the + * `setPrimaryClustersInitializedCb` method. * After the first phase has completed the server instance initializes services (i.e. RTDS) needed * to successfully deploy the rest of dynamic configuration. * In the second phase all secondary clusters (with endpoint assignments provisioned by xDS servers) @@ -83,6 +89,9 @@ class ClusterManagerFactory; */ class ClusterManager { public: + using PrimaryClustersReadyCallback = std::function; + using InitializationCompleteCallback = std::function; + virtual ~ClusterManager() = default; /** @@ -98,10 +107,15 @@ class ClusterManager { virtual bool addOrUpdateCluster(const envoy::config::cluster::v3::Cluster& cluster, const std::string& version_info) PURE; + /** + * Set a callback that will be invoked when all primary clusters have been initialized. + */ + virtual void setPrimaryClustersInitializedCb(PrimaryClustersReadyCallback callback) PURE; + /** * Set a callback that will be invoked when all owned clusters have been initialized. */ - virtual void setInitializedCb(std::function callback) PURE; + virtual void setInitializedCb(InitializationCompleteCallback callback) PURE; /** * Start initialization of secondary clusters and then dynamically configured clusters. @@ -111,7 +125,7 @@ class ClusterManager { virtual void initializeSecondaryClusters(const envoy::config::bootstrap::v3::Bootstrap& bootstrap) PURE; - using ClusterInfoMap = std::unordered_map>; + using ClusterInfoMap = absl::node_hash_map>; /** * @return ClusterInfoMap all current clusters. These are the primary (not thread local) @@ -119,6 +133,15 @@ class ClusterManager { */ virtual ClusterInfoMap clusters() PURE; + using ClusterSet = absl::flat_hash_set; + + /** + * @return const ClusterSet& providing the cluster names that are eligible as + * xDS API config sources. These must be static (i.e. in the + * bootstrap) and non-EDS. + */ + virtual const ClusterSet& primaryClusters() PURE; + /** * @return ThreadLocalCluster* the thread local cluster with the given name or nullptr if it * does not exist. This is thread safe. @@ -138,11 +161,13 @@ class ClusterManager { * * Can return nullptr if there is no host available in the cluster or if the cluster does not * exist. + * + * To resolve the protocol to use, we provide the downstream protocol (if one exists). */ - virtual Http::ConnectionPool::Instance* httpConnPoolForCluster(const std::string& cluster, - ResourcePriority priority, - Http::Protocol protocol, - LoadBalancerContext* context) PURE; + virtual Http::ConnectionPool::Instance* + httpConnPoolForCluster(const std::string& cluster, ResourcePriority priority, + absl::optional downstream_protocol, + LoadBalancerContext* context) PURE; /** * Allocate a load balanced TCP connection pool for a cluster. This is *per-thread* so that @@ -189,8 +214,8 @@ class ClusterManager { virtual void shutdown() PURE; /** - * @return const envoy::api::v2::core::BindConfig& cluster manager wide bind configuration for new - * upstream connections. + * @return const envoy::config::core::v3::BindConfig& cluster manager wide bind configuration for + * new upstream connections. */ virtual const envoy::config::core::v3::BindConfig& bindConfig() const PURE; @@ -345,7 +370,7 @@ class ClusterInfoFactory { ClusterManager& cm_; const LocalInfo::LocalInfo& local_info_; Event::Dispatcher& dispatcher_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; Singleton::Manager& singleton_manager_; ThreadLocal::SlotAllocator& tls_; ProtobufMessage::ValidationVisitor& validation_visitor_; diff --git a/include/envoy/upstream/load_balancer.h b/include/envoy/upstream/load_balancer.h index cfddc2b61e0a7..031daffc8ad20 100644 --- a/include/envoy/upstream/load_balancer.h +++ b/include/envoy/upstream/load_balancer.h @@ -53,12 +53,13 @@ class LoadBalancerContext { * * @param priority_state current priority state of the cluster being being load balanced. * @param original_priority_load the cached priority load for the cluster being load balanced. + * @param priority_mapping_func see @Upstream::RetryPriority::PriorityMappingFunc. * @return a reference to the priority load data that should be used to select a priority. * */ - virtual const HealthyAndDegradedLoad& - determinePriorityLoad(const PrioritySet& priority_set, - const HealthyAndDegradedLoad& original_priority_load) PURE; + virtual const HealthyAndDegradedLoad& determinePriorityLoad( + const PrioritySet& priority_set, const HealthyAndDegradedLoad& original_priority_load, + const Upstream::RetryPriority::PriorityMappingFunc& priority_mapping_func) PURE; /** * Called to determine whether we should reperform host selection. The load balancer diff --git a/include/envoy/upstream/resource_manager.h b/include/envoy/upstream/resource_manager.h index c10ff89c033f8..5cac59a1a0ad6 100644 --- a/include/envoy/upstream/resource_manager.h +++ b/include/envoy/upstream/resource_manager.h @@ -5,6 +5,7 @@ #include #include "envoy/common/pure.h" +#include "envoy/common/resource.h" namespace Envoy { namespace Upstream { @@ -16,54 +17,16 @@ namespace Upstream { enum class ResourcePriority { Default, High }; const size_t NumResourcePriorities = 2; -/** - * An individual resource tracked by the resource manager. - */ -class Resource { -public: - virtual ~Resource() = default; - - /** - * @return true if the resource can be created. - */ - virtual bool canCreate() PURE; - - /** - * Increment the resource count. - */ - virtual void inc() PURE; - - /** - * Decrement the resource count. - */ - virtual void dec() PURE; - - /** - * Decrement the resource count by a specific amount. - */ - virtual void decBy(uint64_t amount) PURE; - - /** - * @return the current maximum allowed number of this resource. - */ - virtual uint64_t max() PURE; - - /** - * @return the current resource count. - */ - virtual uint64_t count() const PURE; -}; - /** * RAII wrapper that increments a resource on construction and decrements it on destruction. */ class ResourceAutoIncDec { public: - ResourceAutoIncDec(Resource& resource) : resource_(resource) { resource_.inc(); } + ResourceAutoIncDec(ResourceLimit& resource) : resource_(resource) { resource_.inc(); } ~ResourceAutoIncDec() { resource_.dec(); } private: - Resource& resource_; + ResourceLimit& resource_; }; using ResourceAutoIncDecPtr = std::unique_ptr; @@ -78,31 +41,31 @@ class ResourceManager { virtual ~ResourceManager() = default; /** - * @return Resource& active TCP connections and UDP sessions. + * @return ResourceLimit& active TCP connections and UDP sessions. */ - virtual Resource& connections() PURE; + virtual ResourceLimit& connections() PURE; /** - * @return Resource& active pending requests (requests that have not yet been attached to a + * @return ResourceLimit& active pending requests (requests that have not yet been attached to a * connection pool connection). */ - virtual Resource& pendingRequests() PURE; + virtual ResourceLimit& pendingRequests() PURE; /** - * @return Resource& active requests (requests that are currently bound to a connection pool + * @return ResourceLimit& active requests (requests that are currently bound to a connection pool * connection and are awaiting response). */ - virtual Resource& requests() PURE; + virtual ResourceLimit& requests() PURE; /** - * @return Resource& active retries. + * @return ResourceLimit& active retries. */ - virtual Resource& retries() PURE; + virtual ResourceLimit& retries() PURE; /** - * @return Resource& active connection pools. + * @return ResourceLimit& active connection pools. */ - virtual Resource& connectionPools() PURE; + virtual ResourceLimit& connectionPools() PURE; }; } // namespace Upstream diff --git a/include/envoy/upstream/retry.h b/include/envoy/upstream/retry.h index 4a7af89201bd7..f772d54029179 100644 --- a/include/envoy/upstream/retry.h +++ b/include/envoy/upstream/retry.h @@ -18,18 +18,37 @@ class RetryPriority { public: virtual ~RetryPriority() = default; + /** + * Function that maps a HostDescription to it's effective priority level in a cluster. + * For most cluster types, the mapping is simply `return host.priority()`, but some + * cluster types require more complex mapping. + * @return either the effective priority, or absl::nullopt if the mapping cannot be determined, + * which can happen if the host has been removed from the configurations since it was + * used. + */ + using PriorityMappingFunc = + std::function(const Upstream::HostDescription&)>; + + static absl::optional defaultPriorityMapping(const Upstream::HostDescription& host) { + return host.priority(); + } + /** * Determines what PriorityLoad to use. * * @param priority_set current priority set of cluster. * @param original_priority_load the unmodified HealthAndDegradedLoad. + * @param priority_mapping_func a callback to get the priority of a host that has + * been attempted. This function may only be called on hosts that were + * passed to calls to `onHostAttempted()` on this object. * @return HealthAndDegradedLoad load that should be used for the next retry. Return * original_priority_load if the original load should be used. a pointer to original_priority, * original_degraded_priority if no changes should be made. */ virtual const HealthyAndDegradedLoad& determinePriorityLoad(const PrioritySet& priority_set, - const HealthyAndDegradedLoad& original_priority_load) PURE; + const HealthyAndDegradedLoad& original_priority_load, + const PriorityMappingFunc& priority_mapping_func) PURE; /** * Called after a host has been attempted but before host selection for the next attempt has diff --git a/include/envoy/upstream/upstream.h b/include/envoy/upstream/upstream.h index 553f9b33fd8bb..ebdc1575eb8f5 100644 --- a/include/envoy/upstream/upstream.h +++ b/include/envoy/upstream/upstream.h @@ -107,9 +107,10 @@ class Host : virtual public HostDescription { * connection. * @return the connection data. */ - virtual CreateConnectionData createHealthCheckConnection( - Event::Dispatcher& dispatcher, - Network::TransportSocketOptionsSharedPtr transport_socket_options) const PURE; + virtual CreateConnectionData + createHealthCheckConnection(Event::Dispatcher& dispatcher, + Network::TransportSocketOptionsSharedPtr transport_socket_options, + const envoy::config::core::v3::Metadata* metadata) const PURE; /** * @return host specific gauges. @@ -210,7 +211,7 @@ using HostVector = std::vector; using HealthyHostVector = Phantom; using DegradedHostVector = Phantom; using ExcludedHostVector = Phantom; -using HostMap = std::unordered_map; +using HostMap = absl::node_hash_map; using HostVectorSharedPtr = std::shared_ptr; using HostVectorConstSharedPtr = std::shared_ptr; @@ -220,7 +221,7 @@ using ExcludedHostVectorConstSharedPtr = std::shared_ptr; using LocalityWeightsMap = - std::unordered_map; + absl::node_hash_map; using PriorityState = std::vector>; /** @@ -570,6 +571,7 @@ class PrioritySet { COUNTER(upstream_rq_cancelled) \ COUNTER(upstream_rq_completed) \ COUNTER(upstream_rq_maintenance_mode) \ + COUNTER(upstream_rq_max_duration_reached) \ COUNTER(upstream_rq_pending_failure_eject) \ COUNTER(upstream_rq_pending_overflow) \ COUNTER(upstream_rq_pending_total) \ @@ -620,6 +622,15 @@ class PrioritySet { REMAINING_GAUGE(remaining_retries, Accumulate) \ REMAINING_GAUGE(remaining_rq, Accumulate) +/** + * All stats tracking request/response headers and body sizes. Not used by default. + */ +#define ALL_CLUSTER_REQUEST_RESPONSE_SIZE_STATS(HISTOGRAM) \ + HISTOGRAM(upstream_rq_headers_size, Bytes) \ + HISTOGRAM(upstream_rq_body_size, Bytes) \ + HISTOGRAM(upstream_rs_headers_size, Bytes) \ + HISTOGRAM(upstream_rs_body_size, Bytes) + /** * All stats around timeout budgets. Not used by default. */ @@ -648,6 +659,17 @@ struct ClusterCircuitBreakersStats { ALL_CLUSTER_CIRCUIT_BREAKERS_STATS(GENERATE_GAUGE_STRUCT, GENERATE_GAUGE_STRUCT) }; +/** + * Struct definition for cluster timeout budget stats. @see stats_macros.h + */ +struct ClusterRequestResponseSizeStats { + ALL_CLUSTER_REQUEST_RESPONSE_SIZE_STATS(GENERATE_HISTOGRAM_STRUCT) +}; + +using ClusterRequestResponseSizeStatsPtr = std::unique_ptr; +using ClusterRequestResponseSizeStatsOptRef = + absl::optional>; + /** * Struct definition for cluster timeout budget stats. @see stats_macros.h */ @@ -655,6 +677,10 @@ struct ClusterTimeoutBudgetStats { ALL_CLUSTER_TIMEOUT_BUDGET_STATS(GENERATE_HISTOGRAM_STRUCT) }; +using ClusterTimeoutBudgetStatsPtr = std::unique_ptr; +using ClusterTimeoutBudgetStatsOptRef = + absl::optional>; + /** * All extension protocol specific options returned by the method at * NamedNetworkFilterConfigFactory::createProtocolOptions @@ -704,6 +730,11 @@ class ClusterInfo { */ virtual const absl::optional idleTimeout() const PURE; + /** + * @return how many streams should be anticipated per each current stream. + */ + virtual float prefetchRatio() const PURE; + /** * @return soft limit on size of the cluster's connections read and write buffers. */ @@ -727,6 +758,12 @@ class ClusterInfo { */ virtual const envoy::config::core::v3::Http2ProtocolOptions& http2Options() const PURE; + /** + * @return const envoy::config::core::v3::HttpProtocolOptions for all of HTTP versions. + */ + virtual const envoy::config::core::v3::HttpProtocolOptions& + commonHttpProtocolOptions() const PURE; + /** * @param name std::string containing the well-known name of the extension for which protocol * options are desired @@ -740,8 +777,8 @@ class ClusterInfo { } /** - * @return const envoy::api::v2::Cluster::CommonLbConfig& the common configuration for all - * load balancers for this cluster. + * @return const envoy::config::cluster::v3::Cluster::CommonLbConfig& the common configuration for + * all load balancers for this cluster. */ virtual const envoy::config::cluster::v3::Cluster::CommonLbConfig& lbConfig() const PURE; @@ -774,13 +811,20 @@ class ClusterInfo { lbRingHashConfig() const PURE; /** - * @return const absl::optional& the configuration - * for the Original Destination load balancing policy, only used if type is set to + * @return const absl::optional& the + * configuration for the Original Destination load balancing policy, only used if type is set to * ORIGINAL_DST_LB. */ virtual const absl::optional& lbOriginalDstConfig() const PURE; + /** + * @return const absl::optional& the configuration + * for the upstream, if a custom upstream is configured. + */ + virtual const absl::optional& + upstreamConfig() const PURE; + /** * @return Whether the cluster is currently in maintenance mode and should not be routed to. * Different filters may handle this situation in different ways. The implementation @@ -836,9 +880,16 @@ class ClusterInfo { virtual ClusterLoadReportStats& loadReportStats() const PURE; /** - * @return absl::optional& stats on timeout budgets for this cluster. + * @return absl::optional> stats to track + * headers/body sizes of request/response for this cluster. + */ + virtual ClusterRequestResponseSizeStatsOptRef requestResponseSizeStats() const PURE; + + /** + * @return absl::optional> stats on timeout + * budgets for this cluster. */ - virtual const absl::optional& timeoutBudgetStats() const PURE; + virtual ClusterTimeoutBudgetStatsOptRef timeoutBudgetStats() const PURE; /** * Returns an optional source address for upstream connections to bind to. @@ -853,7 +904,7 @@ class ClusterInfo { virtual const LoadBalancerSubsetInfo& lbSubsetInfo() const PURE; /** - * @return const envoy::api::v2::core::Metadata& the configuration metadata for this cluster. + * @return const envoy::config::core::v3::Metadata& the configuration metadata for this cluster. */ virtual const envoy::config::core::v3::Metadata& metadata() const PURE; @@ -884,7 +935,7 @@ class ClusterInfo { /** * @return eds cluster service_name of the cluster. */ - virtual absl::optional eds_service_name() const PURE; + virtual absl::optional edsServiceName() const PURE; /** * Create network filters on a new upstream connection. @@ -903,6 +954,16 @@ class ClusterInfo { virtual const absl::optional& upstreamHttpProtocolOptions() const PURE; + /** + * @return the Http1 Codec Stats. + */ + virtual Http::Http1::CodecStats& http1CodecStats() const PURE; + + /** + * @return the Http2 Codec Stats. + */ + virtual Http::Http2::CodecStats& http2CodecStats() const PURE; + protected: /** * Invoked by extensionProtocolOptionsTyped. diff --git a/repokitteh.star b/repokitteh.star index 79f6bbee624bb..cf2385c1dfdee 100644 --- a/repokitteh.star +++ b/repokitteh.star @@ -4,18 +4,33 @@ use("github.com/repokitteh/modules/assign.star") use("github.com/repokitteh/modules/review.star") use("github.com/repokitteh/modules/wait.star") use("github.com/repokitteh/modules/circleci.star", secret_token=get_secret('circle_token')) +use("github.com/envoyproxy/envoy/ci/repokitteh/modules/azure_pipelines.star", secret_token=get_secret('azp_token')) use( - "github.com/repokitteh/modules/ownerscheck.star", + "github.com/envoyproxy/envoy/ci/repokitteh/modules/ownerscheck.star", paths=[ { "owner": "envoyproxy/api-shepherds!", - "path": "api/", + "path": + "(api/envoy[\w/]*/(v1alpha\d?|v1|v2alpha\d?|v2))|(api/envoy/type/(matcher/)?\w+.proto)", + "label": "v2-freeze", + "allow_global_approval": False, + "github_status_label": "v2 freeze violations", + }, + { + "owner": "envoyproxy/api-shepherds!", + "path": "api/envoy/", "label": "api", + "github_status_label": "any API change", + }, + { + "owner": "envoyproxy/api-watchers", + "path": "api/envoy/", }, ], ) -alias('retest', 'retry-circle') +alias('retest-circle', 'retry-circle') +alias('retest', 'retry-azp') def _backport(): github.issue_label('backport/review') diff --git a/restarter/BUILD b/restarter/BUILD index af4b8c78558d2..811a10b6d098e 100644 --- a/restarter/BUILD +++ b/restarter/BUILD @@ -1,10 +1,10 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() exports_files([ diff --git a/security/email-templates.md b/security/email-templates.md index e58dfdc91747e..ffd0232c7798c 100644 --- a/security/email-templates.md +++ b/security/email-templates.md @@ -50,6 +50,7 @@ Envoy maintainers on the Envoy GitHub. We will address the following CVE(s): * CVE-YEAR-ABCDEF (CVSS score $CVSS, $SEVERITY): $CVESUMMARY + - Link to the appropriate section of the CVE writeup document with gh-cve-template.md content. ... We intend to make candidates release patches available under embargo on the diff --git a/source/common/access_log/BUILD b/source/common/access_log/BUILD index 9dd85e03aa618..00bfcb101f798 100644 --- a/source/common/access_log/BUILD +++ b/source/common/access_log/BUILD @@ -1,18 +1,20 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( name = "access_log_lib", srcs = ["access_log_impl.cc"], hdrs = ["access_log_impl.h"], - external_deps = ["abseil_hash"], + external_deps = [ + "abseil_hash", + ], deps = [ "//include/envoy/access_log:access_log_interface", "//include/envoy/config:typed_config_interface", @@ -36,25 +38,6 @@ envoy_cc_library( ], ) -envoy_cc_library( - name = "access_log_formatter_lib", - srcs = ["access_log_formatter.cc"], - hdrs = ["access_log_formatter.h"], - external_deps = ["abseil_str_format"], - deps = [ - "//include/envoy/access_log:access_log_interface", - "//include/envoy/stream_info:stream_info_interface", - "//source/common/common:assert_lib", - "//source/common/common:utility_lib", - "//source/common/config:metadata_lib", - "//source/common/grpc:common_lib", - "//source/common/http:utility_lib", - "//source/common/protobuf:message_validator_lib", - "//source/common/stream_info:utility_lib", - "@envoy_api//envoy/config/core/v3:pkg_cc_proto", - ], -) - envoy_cc_library( name = "access_log_manager_lib", srcs = ["access_log_manager_impl.cc"], diff --git a/source/common/access_log/access_log_impl.cc b/source/common/access_log/access_log_impl.cc index a0f69d1cf90e8..447f951bc2f89 100644 --- a/source/common/access_log/access_log_impl.cc +++ b/source/common/access_log/access_log_impl.cc @@ -11,9 +11,9 @@ #include "envoy/runtime/runtime.h" #include "envoy/upstream/upstream.h" -#include "common/access_log/access_log_formatter.h" #include "common/common/assert.h" #include "common/common/utility.h" +#include "common/config/metadata.h" #include "common/config/utility.h" #include "common/http/header_map_impl.h" #include "common/http/header_utility.h" @@ -32,7 +32,7 @@ ComparisonFilter::ComparisonFilter(const envoy::config::accesslog::v3::Compariso Runtime::Loader& runtime) : config_(config), runtime_(runtime) {} -bool ComparisonFilter::compareAgainstValue(uint64_t lhs) { +bool ComparisonFilter::compareAgainstValue(uint64_t lhs) const { uint64_t value = config_.value().default_value(); if (!config_.value().runtime_key().empty()) { @@ -52,7 +52,7 @@ bool ComparisonFilter::compareAgainstValue(uint64_t lhs) { } FilterPtr FilterFactory::fromProto(const envoy::config::accesslog::v3::AccessLogFilter& config, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, ProtobufMessage::ValidationVisitor& validation_visitor) { switch (config.filter_specifier_case()) { case envoy::config::accesslog::v3::AccessLogFilter::FilterSpecifierCase::kStatusCodeFilter: @@ -77,6 +77,8 @@ FilterPtr FilterFactory::fromProto(const envoy::config::accesslog::v3::AccessLog case envoy::config::accesslog::v3::AccessLogFilter::FilterSpecifierCase::kGrpcStatusFilter: MessageUtil::validate(config, validation_visitor); return FilterPtr{new GrpcStatusFilter(config.grpc_status_filter())}; + case envoy::config::accesslog::v3::AccessLogFilter::FilterSpecifierCase::kMetadataFilter: + return FilterPtr{new MetadataFilter(config.metadata_filter())}; case envoy::config::accesslog::v3::AccessLogFilter::FilterSpecifierCase::kExtensionFilter: MessageUtil::validate(config, validation_visitor); { @@ -92,14 +94,15 @@ FilterPtr FilterFactory::fromProto(const envoy::config::accesslog::v3::AccessLog bool TraceableRequestFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&) { + const Http::ResponseTrailerMap&) const { Tracing::Decision decision = Tracing::HttpTracerUtility::isTracing(info, request_headers); return decision.traced && decision.reason == Tracing::Reason::ServiceForced; } bool StatusCodeFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap&, - const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&) { + const Http::ResponseHeaderMap&, + const Http::ResponseTrailerMap&) const { if (!info.responseCode()) { return compareAgainstValue(0ULL); } @@ -108,7 +111,8 @@ bool StatusCodeFilter::evaluate(const StreamInfo::StreamInfo& info, const Http:: } bool DurationFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap&, - const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&) { + const Http::ResponseHeaderMap&, + const Http::ResponseTrailerMap&) const { absl::optional final = info.requestComplete(); ASSERT(final); @@ -117,14 +121,15 @@ bool DurationFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::Re } RuntimeFilter::RuntimeFilter(const envoy::config::accesslog::v3::RuntimeFilter& config, - Runtime::Loader& runtime, Runtime::RandomGenerator& random) + Runtime::Loader& runtime, Random::RandomGenerator& random) : runtime_(runtime), random_(random), runtime_key_(config.runtime_key()), percent_(config.percent_sampled()), use_independent_randomness_(config.use_independent_randomness()) {} bool RuntimeFilter::evaluate(const StreamInfo::StreamInfo& stream_info, const Http::RequestHeaderMap& request_headers, - const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&) { + const Http::ResponseHeaderMap&, + const Http::ResponseTrailerMap&) const { auto rid_extension = stream_info.getRequestIDExtension(); uint64_t random_value; if (use_independent_randomness_ || @@ -141,7 +146,7 @@ bool RuntimeFilter::evaluate(const StreamInfo::StreamInfo& stream_info, OperatorFilter::OperatorFilter( const Protobuf::RepeatedPtrField& configs, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, ProtobufMessage::ValidationVisitor& validation_visitor) { for (const auto& config : configs) { filters_.emplace_back(FilterFactory::fromProto(config, runtime, random, validation_visitor)); @@ -149,19 +154,19 @@ OperatorFilter::OperatorFilter( } OrFilter::OrFilter(const envoy::config::accesslog::v3::OrFilter& config, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, + Random::RandomGenerator& random, ProtobufMessage::ValidationVisitor& validation_visitor) : OperatorFilter(config.filters(), runtime, random, validation_visitor) {} AndFilter::AndFilter(const envoy::config::accesslog::v3::AndFilter& config, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, ProtobufMessage::ValidationVisitor& validation_visitor) : OperatorFilter(config.filters(), runtime, random, validation_visitor) {} bool OrFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers) { + const Http::ResponseTrailerMap& response_trailers) const { bool result = false; for (auto& filter : filters_) { result |= filter->evaluate(info, request_headers, response_headers, response_trailers); @@ -177,7 +182,7 @@ bool OrFilter::evaluate(const StreamInfo::StreamInfo& info, bool AndFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers) { + const Http::ResponseTrailerMap& response_trailers) const { bool result = true; for (auto& filter : filters_) { result &= filter->evaluate(info, request_headers, response_headers, response_trailers); @@ -192,7 +197,7 @@ bool AndFilter::evaluate(const StreamInfo::StreamInfo& info, bool NotHealthCheckFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&) { + const Http::ResponseTrailerMap&) const { return !info.healthCheck(); } @@ -201,7 +206,7 @@ HeaderFilter::HeaderFilter(const envoy::config::accesslog::v3::HeaderFilter& con bool HeaderFilter::evaluate(const StreamInfo::StreamInfo&, const Http::RequestHeaderMap& request_headers, - const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&) { + const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&) const { return Http::HeaderUtility::matchHeaders(request_headers, *header_data_); } @@ -217,7 +222,8 @@ ResponseFlagFilter::ResponseFlagFilter( } bool ResponseFlagFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap&, - const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&) { + const Http::ResponseHeaderMap&, + const Http::ResponseTrailerMap&) const { if (configured_flags_ != 0) { return info.intersectResponseFlags(configured_flags_); } @@ -234,7 +240,7 @@ GrpcStatusFilter::GrpcStatusFilter(const envoy::config::accesslog::v3::GrpcStatu bool GrpcStatusFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap&, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers) { + const Http::ResponseTrailerMap& response_trailers) const { Grpc::Status::GrpcStatus status = Grpc::Status::WellKnownGrpcStatus::Unknown; const auto& optional_status = @@ -252,6 +258,44 @@ Grpc::Status::GrpcStatus GrpcStatusFilter::protoToGrpcStatus( return static_cast(status); } +MetadataFilter::MetadataFilter(const envoy::config::accesslog::v3::MetadataFilter& filter_config) + : default_match_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(filter_config, match_if_key_not_found, true)), + filter_(filter_config.matcher().filter()) { + + if (filter_config.has_matcher()) { + auto& matcher_config = filter_config.matcher(); + + for (const auto& seg : matcher_config.path()) { + path_.push_back(seg.key()); + } + + // Matches if the value equals the configured 'MetadataMatcher' value. + const auto& val = matcher_config.value(); + value_matcher_ = Matchers::ValueMatcher::create(val); + } + + // Matches if the value is present in dynamic metadata + auto present_val = envoy::type::matcher::v3::ValueMatcher(); + present_val.set_present_match(true); + present_matcher_ = Matchers::ValueMatcher::create(present_val); +} + +bool MetadataFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap&, + const Http::ResponseHeaderMap&, + const Http::ResponseTrailerMap&) const { + const auto& value = + Envoy::Config::Metadata::metadataValue(&info.dynamicMetadata(), filter_, path_); + // If the key corresponds to a set value in dynamic metadata, return true if the value matches the + // the configured 'MetadataMatcher' value and false otherwise + if (present_matcher_->match(value)) { + return value_matcher_ && value_matcher_->match(value); + } + + // If the key does not correspond to a set value in dynamic metadata, return true if + // 'match_if_key_not_found' is set to true and false otherwise + return default_match_; +} + InstanceSharedPtr AccessLogFactory::fromProto(const envoy::config::accesslog::v3::AccessLog& config, Server::Configuration::FactoryContext& context) { FilterPtr filter; diff --git a/source/common/access_log/access_log_impl.h b/source/common/access_log/access_log_impl.h index 512a957e1dc21..5aef64a40e2c6 100644 --- a/source/common/access_log/access_log_impl.h +++ b/source/common/access_log/access_log_impl.h @@ -2,20 +2,22 @@ #include #include -#include #include #include "envoy/access_log/access_log.h" +#include "envoy/common/random_generator.h" #include "envoy/config/accesslog/v3/accesslog.pb.h" #include "envoy/config/typed_config.h" #include "envoy/runtime/runtime.h" #include "envoy/server/access_log_config.h" #include "envoy/type/v3/percent.pb.h" +#include "common/common/matchers.h" #include "common/grpc/status.h" #include "common/http/header_utility.h" #include "common/protobuf/protobuf.h" +#include "absl/container/node_hash_set.h" #include "absl/hash/hash.h" namespace Envoy { @@ -30,7 +32,7 @@ class FilterFactory { * Read a filter definition from proto and instantiate a concrete filter class. */ static FilterPtr fromProto(const envoy::config::accesslog::v3::AccessLogFilter& config, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, ProtobufMessage::ValidationVisitor& validation_visitor); }; @@ -42,7 +44,7 @@ class ComparisonFilter : public Filter { ComparisonFilter(const envoy::config::accesslog::v3::ComparisonFilter& config, Runtime::Loader& runtime); - bool compareAgainstValue(uint64_t lhs); + bool compareAgainstValue(uint64_t lhs) const; envoy::config::accesslog::v3::ComparisonFilter config_; Runtime::Loader& runtime_; @@ -60,7 +62,7 @@ class StatusCodeFilter : public ComparisonFilter { // AccessLog::Filter bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers) override; + const Http::ResponseTrailerMap& response_trailers) const override; }; /** @@ -75,7 +77,7 @@ class DurationFilter : public ComparisonFilter { // AccessLog::Filter bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers) override; + const Http::ResponseTrailerMap& response_trailers) const override; }; /** @@ -85,7 +87,7 @@ class OperatorFilter : public Filter { public: OperatorFilter( const Protobuf::RepeatedPtrField& configs, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, ProtobufMessage::ValidationVisitor& validation_visitor); protected: @@ -98,13 +100,13 @@ class OperatorFilter : public Filter { class AndFilter : public OperatorFilter { public: AndFilter(const envoy::config::accesslog::v3::AndFilter& config, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, + Random::RandomGenerator& random, ProtobufMessage::ValidationVisitor& validation_visitor); // AccessLog::Filter bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers) override; + const Http::ResponseTrailerMap& response_trailers) const override; }; /** @@ -113,13 +115,12 @@ class AndFilter : public OperatorFilter { class OrFilter : public OperatorFilter { public: OrFilter(const envoy::config::accesslog::v3::OrFilter& config, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, - ProtobufMessage::ValidationVisitor& validation_visitor); + Random::RandomGenerator& random, ProtobufMessage::ValidationVisitor& validation_visitor); // AccessLog::Filter bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers) override; + const Http::ResponseTrailerMap& response_trailers) const override; }; /** @@ -132,7 +133,7 @@ class NotHealthCheckFilter : public Filter { // AccessLog::Filter bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers) override; + const Http::ResponseTrailerMap& response_trailers) const override; }; /** @@ -143,7 +144,7 @@ class TraceableRequestFilter : public Filter { // AccessLog::Filter bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers) override; + const Http::ResponseTrailerMap& response_trailers) const override; }; /** @@ -152,16 +153,16 @@ class TraceableRequestFilter : public Filter { class RuntimeFilter : public Filter { public: RuntimeFilter(const envoy::config::accesslog::v3::RuntimeFilter& config, Runtime::Loader& runtime, - Runtime::RandomGenerator& random); + Random::RandomGenerator& random); // AccessLog::Filter bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers) override; + const Http::ResponseTrailerMap& response_trailers) const override; private: Runtime::Loader& runtime_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; const std::string runtime_key_; const envoy::type::v3::FractionalPercent percent_; const bool use_independent_randomness_; @@ -177,7 +178,7 @@ class HeaderFilter : public Filter { // AccessLog::Filter bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers) override; + const Http::ResponseTrailerMap& response_trailers) const override; private: const Http::HeaderUtility::HeaderDataPtr header_data_; @@ -193,7 +194,7 @@ class ResponseFlagFilter : public Filter { // AccessLog::Filter bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers) override; + const Http::ResponseTrailerMap& response_trailers) const override; private: uint64_t configured_flags_{}; @@ -207,14 +208,14 @@ class ResponseFlagFilter : public Filter { class GrpcStatusFilter : public Filter { public: using GrpcStatusHashSet = - std::unordered_set>; + absl::node_hash_set>; GrpcStatusFilter(const envoy::config::accesslog::v3::GrpcStatusFilter& config); // AccessLog::Filter bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers) override; + const Http::ResponseTrailerMap& response_trailers) const override; private: GrpcStatusHashSet statuses_; @@ -228,6 +229,27 @@ class GrpcStatusFilter : public Filter { protoToGrpcStatus(envoy::config::accesslog::v3::GrpcStatusFilter::Status status) const; }; +/** + * Filters requests based on dynamic metadata + */ +class MetadataFilter : public Filter { +public: + MetadataFilter(const envoy::config::accesslog::v3::MetadataFilter& filter_config); + + bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, + const Http::ResponseHeaderMap& response_headers, + const Http::ResponseTrailerMap& response_trailers) const override; + +private: + Matchers::ValueMatcherConstSharedPtr present_matcher_; + Matchers::ValueMatcherConstSharedPtr value_matcher_; + + std::vector path_; + + const bool default_match_; + const std::string filter_; +}; + /** * Extension filter factory that reads from ExtensionFilter proto. */ @@ -245,7 +267,7 @@ class ExtensionFilterFactory : public Config::TypedFactory { * @return an instance of extension filter implementation from a config proto. */ virtual FilterPtr createFilter(const envoy::config::accesslog::v3::ExtensionFilter& config, - Runtime::Loader& runtime, Runtime::RandomGenerator& random) PURE; + Runtime::Loader& runtime, Random::RandomGenerator& random) PURE; std::string category() const override { return "envoy.access_logger.extension_filters"; } }; diff --git a/source/common/access_log/access_log_manager_impl.cc b/source/common/access_log/access_log_manager_impl.cc index 534b4be0b5478..4393fe94c0b8f 100644 --- a/source/common/access_log/access_log_manager_impl.cc +++ b/source/common/access_log/access_log_manager_impl.cc @@ -12,39 +12,28 @@ namespace Envoy { namespace AccessLog { AccessLogManagerImpl::~AccessLogManagerImpl() { - for (auto& access_log : access_logs_) { - ENVOY_LOG(debug, "destroying access logger {}", access_log.first); - access_log.second.reset(); + for (auto& [log_key, log_file_ptr] : access_logs_) { + ENVOY_LOG(debug, "destroying access logger {}", log_key); + log_file_ptr.reset(); } ENVOY_LOG(debug, "destroyed access loggers"); } void AccessLogManagerImpl::reopen() { - for (auto& access_log : access_logs_) { - access_log.second->reopen(); + for (auto& [log_key, log_file_ptr] : access_logs_) { + log_file_ptr->reopen(); } } -AccessLogFileSharedPtr AccessLogManagerImpl::createAccessLog(const std::string& file_name_arg) { - const std::string* file_name = &file_name_arg; -#ifdef WIN32 - // Preserve the expected behavior of specifying path: /dev/null on Windows - static const std::string windows_dev_null("NUL"); - if (file_name_arg.compare("/dev/null") == 0) { - file_name = static_cast(&windows_dev_null); +AccessLogFileSharedPtr AccessLogManagerImpl::createAccessLog(const std::string& file_name) { + if (access_logs_.count(file_name)) { + return access_logs_[file_name]; } -#endif - std::unordered_map::const_iterator access_log = - access_logs_.find(*file_name); - if (access_log != access_logs_.end()) { - return access_log->second; - } - - access_logs_[*file_name] = std::make_shared( - api_.fileSystem().createFile(*file_name), dispatcher_, lock_, file_stats_, + access_logs_[file_name] = std::make_shared( + api_.fileSystem().createFile(file_name), dispatcher_, lock_, file_stats_, file_flush_interval_msec_, api_.threadFactory()); - return access_logs_[*file_name]; + return access_logs_[file_name]; } AccessLogFileImpl::AccessLogFileImpl(Filesystem::FilePtr&& file, Event::Dispatcher& dispatcher, @@ -214,7 +203,8 @@ void AccessLogFileImpl::write(absl::string_view data) { } void AccessLogFileImpl::createFlushStructures() { - flush_thread_ = thread_factory_.createThread([this]() -> void { flushThreadFunc(); }); + flush_thread_ = thread_factory_.createThread([this]() -> void { flushThreadFunc(); }, + Thread::Options{"AccessLogFlush"}); flush_timer_->enableTimer(flush_interval_msec_); } diff --git a/source/common/access_log/access_log_manager_impl.h b/source/common/access_log/access_log_manager_impl.h index 2bf745cf41bfe..1727a0bbf053b 100644 --- a/source/common/access_log/access_log_manager_impl.h +++ b/source/common/access_log/access_log_manager_impl.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include "envoy/access_log/access_log.h" #include "envoy/api/api.h" @@ -14,6 +13,8 @@ #include "common/common/logger.h" #include "common/common/thread.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { #define ACCESS_LOG_FILE_STATS(COUNTER, GAUGE) \ @@ -51,7 +52,7 @@ class AccessLogManagerImpl : public AccessLogManager, Logger::Loggable access_logs_; + absl::node_hash_map access_logs_; }; /** diff --git a/source/common/api/BUILD b/source/common/api/BUILD index 25c142ce784a2..7e7264f8f0ff3 100644 --- a/source/common/api/BUILD +++ b/source/common/api/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", @@ -7,6 +5,8 @@ load( "envoy_select_hot_restart", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( @@ -17,6 +17,7 @@ envoy_cc_library( "//include/envoy/api:api_interface", "//source/common/common:thread_lib", "//source/common/event:dispatcher_lib", + "//source/common/network:socket_lib", ], ) diff --git a/source/common/api/api_impl.h b/source/common/api/api_impl.h index b3318f4686373..0096da46ec4b1 100644 --- a/source/common/api/api_impl.h +++ b/source/common/api/api_impl.h @@ -6,6 +6,7 @@ #include "envoy/api/api.h" #include "envoy/event/timer.h" #include "envoy/filesystem/filesystem.h" +#include "envoy/network/socket.h" #include "envoy/thread/thread.h" namespace Envoy { diff --git a/source/common/api/posix/os_sys_calls_impl.cc b/source/common/api/posix/os_sys_calls_impl.cc index 2c00e69987722..546015123bc07 100644 --- a/source/common/api/posix/os_sys_calls_impl.cc +++ b/source/common/api/posix/os_sys_calls_impl.cc @@ -73,6 +73,43 @@ bool OsSysCallsImpl::supportsMmsg() const { #endif } +bool OsSysCallsImpl::supportsUdpGro() const { +#if !defined(__linux__) + return false; +#else + static const bool is_supported = [] { + int fd = ::socket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, IPPROTO_UDP); + if (fd < 0) { + return false; + } + int val = 1; + bool result = (0 == ::setsockopt(fd, IPPROTO_UDP, UDP_GRO, &val, sizeof(val))); + ::close(fd); + return result; + }(); + return is_supported; +#endif +} + +bool OsSysCallsImpl::supportsUdpGso() const { +#if !defined(__linux__) + return false; +#else + static const bool is_supported = [] { + int fd = ::socket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, IPPROTO_UDP); + if (fd < 0) { + return false; + } + int optval; + socklen_t optlen = sizeof(optval); + bool result = (0 <= ::getsockopt(fd, IPPROTO_UDP, UDP_SEGMENT, &optval, &optlen)); + ::close(fd); + return result; + }(); + return is_supported; +#endif +} + SysCallIntResult OsSysCallsImpl::ftruncate(int fd, off_t length) { const int rc = ::ftruncate(fd, length); return {rc, rc != -1 ? 0 : errno}; diff --git a/source/common/api/posix/os_sys_calls_impl.h b/source/common/api/posix/os_sys_calls_impl.h index fc63bbc07ca44..036604eb40c10 100644 --- a/source/common/api/posix/os_sys_calls_impl.h +++ b/source/common/api/posix/os_sys_calls_impl.h @@ -22,6 +22,8 @@ class OsSysCallsImpl : public OsSysCalls { SysCallIntResult recvmmsg(os_fd_t sockfd, struct mmsghdr* msgvec, unsigned int vlen, int flags, struct timespec* timeout) override; bool supportsMmsg() const override; + bool supportsUdpGro() const override; + bool supportsUdpGso() const override; SysCallIntResult close(os_fd_t fd) override; SysCallIntResult ftruncate(int fd, off_t length) override; SysCallPtrResult mmap(void* addr, size_t length, int prot, int flags, int fd, diff --git a/source/common/api/win32/os_sys_calls_impl.cc b/source/common/api/win32/os_sys_calls_impl.cc index fa8af1a137cb8..86519612a2537 100644 --- a/source/common/api/win32/os_sys_calls_impl.cc +++ b/source/common/api/win32/os_sys_calls_impl.cc @@ -170,6 +170,16 @@ bool OsSysCallsImpl::supportsMmsg() const { return false; } +bool OsSysCallsImpl::supportsUdpGro() const { + // Windows doesn't support it. + return false; +} + +bool OsSysCallsImpl::supportsUdpGso() const { + // Windows doesn't support it. + return false; +} + SysCallIntResult OsSysCallsImpl::ftruncate(int fd, off_t length) { const int rc = ::_chsize_s(fd, length); return {rc, rc == 0 ? 0 : errno}; @@ -246,7 +256,7 @@ SysCallIntResult OsSysCallsImpl::shutdown(os_fd_t sockfd, int how) { SysCallIntResult OsSysCallsImpl::socketpair(int domain, int type, int protocol, os_fd_t sv[2]) { if (sv == nullptr) { - return {SOCKET_ERROR, WSAEINVAL}; + return {SOCKET_ERROR, SOCKET_ERROR_INVAL}; } sv[0] = sv[1] = INVALID_SOCKET; @@ -274,7 +284,7 @@ SysCallIntResult OsSysCallsImpl::socketpair(int domain, int type, int protocol, a.in6.sin6_addr = in6addr_loopback; a.in6.sin6_port = 0; } else { - return {SOCKET_ERROR, WSAEINVAL}; + return {SOCKET_ERROR, SOCKET_ERROR_INVAL}; } auto onErr = [this, listener, sv]() -> void { diff --git a/source/common/api/win32/os_sys_calls_impl.h b/source/common/api/win32/os_sys_calls_impl.h index 1f6b56608b2b1..3a2ca378d658b 100644 --- a/source/common/api/win32/os_sys_calls_impl.h +++ b/source/common/api/win32/os_sys_calls_impl.h @@ -22,6 +22,8 @@ class OsSysCallsImpl : public OsSysCalls { SysCallIntResult recvmmsg(os_fd_t sockfd, struct mmsghdr* msgvec, unsigned int vlen, int flags, struct timespec* timeout) override; bool supportsMmsg() const override; + bool supportsUdpGro() const override; + bool supportsUdpGso() const override; SysCallIntResult close(os_fd_t fd) override; SysCallIntResult ftruncate(int fd, off_t length) override; SysCallPtrResult mmap(void* addr, size_t length, int prot, int flags, int fd, diff --git a/source/common/buffer/BUILD b/source/common/buffer/BUILD index 0b9acc3732abd..171aa8a089872 100644 --- a/source/common/buffer/BUILD +++ b/source/common/buffer/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( @@ -15,6 +15,7 @@ envoy_cc_library( deps = [ "//source/common/buffer:buffer_lib", "//source/common/common:assert_lib", + "//source/common/runtime:runtime_features_lib", ], ) diff --git a/source/common/buffer/buffer_impl.cc b/source/common/buffer/buffer_impl.cc index c53a51c02bd03..0b92c7a426f5a 100644 --- a/source/common/buffer/buffer_impl.cc +++ b/source/common/buffer/buffer_impl.cc @@ -33,6 +33,11 @@ void OwnedImpl::addImpl(const void* data, uint64_t size) { } } +void OwnedImpl::addDrainTracker(std::function drain_tracker) { + ASSERT(!slices_.empty()); + slices_.back()->addDrainTracker(std::move(drain_tracker)); +} + void OwnedImpl::add(const void* data, uint64_t size) { addImpl(data, size); } void OwnedImpl::addBufferFragment(BufferFragment& fragment) { @@ -143,7 +148,9 @@ void OwnedImpl::copyOut(size_t start, uint64_t size, void* data) const { ASSERT(size == 0); } -void OwnedImpl::drain(uint64_t size) { +void OwnedImpl::drain(uint64_t size) { drainImpl(size); } + +void OwnedImpl::drainImpl(uint64_t size) { while (size != 0) { if (slices_.empty()) { break; @@ -194,6 +201,34 @@ RawSliceVector OwnedImpl::getRawSlices(absl::optional max_slices) cons return raw_slices; } +SliceDataPtr OwnedImpl::extractMutableFrontSlice() { + RELEASE_ASSERT(length_ > 0, "Extract called on empty buffer"); + // Remove zero byte fragments from the front of the queue to ensure + // that the extracted slice has data. + while (!slices_.empty() && slices_.front()->dataSize() == 0) { + slices_.pop_front(); + } + ASSERT(!slices_.empty()); + ASSERT(slices_.front()); + auto slice = std::move(slices_.front()); + auto size = slice->dataSize(); + length_ -= size; + slices_.pop_front(); + if (!slice->isMutable()) { + // Create a mutable copy of the immutable slice data. + auto mutable_slice = OwnedSlice::create(size); + auto copy_size = mutable_slice->append(slice->data(), size); + ASSERT(copy_size == size); + // Drain trackers for the immutable slice will be called as part of the slice destructor. + return mutable_slice; + } else { + // Make sure drain trackers are called before ownership of the slice is transferred from + // the buffer to the caller. + slice->callAndClearDrainTrackers(); + return slice; + } +} + uint64_t OwnedImpl::length() const { #ifndef NDEBUG // When running in debug mode, verify that the precomputed length matches the sum @@ -213,32 +248,20 @@ void* OwnedImpl::linearize(uint32_t size) { if (slices_.empty()) { return nullptr; } - uint64_t linearized_size = 0; - uint64_t num_slices_to_linearize = 0; - for (const auto& slice : slices_) { - num_slices_to_linearize++; - linearized_size += slice->dataSize(); - if (linearized_size >= size) { - break; - } - } - if (num_slices_to_linearize > 1) { - auto new_slice = OwnedSlice::create(linearized_size); - uint64_t bytes_copied = 0; - Slice::Reservation reservation = new_slice->reserve(linearized_size); + if (slices_[0]->dataSize() < size) { + auto new_slice = OwnedSlice::create(size); + Slice::Reservation reservation = new_slice->reserve(size); ASSERT(reservation.mem_ != nullptr); - ASSERT(reservation.len_ == linearized_size); - auto dest = static_cast(reservation.mem_); - do { - uint64_t data_size = slices_.front()->dataSize(); - memcpy(dest, slices_.front()->data(), data_size); - bytes_copied += data_size; - dest += data_size; - slices_.pop_front(); - } while (bytes_copied < linearized_size); - ASSERT(dest == static_cast(reservation.mem_) + linearized_size); + ASSERT(reservation.len_ == size); + copyOut(0, size, reservation.mem_); new_slice->commit(reservation); + + // Replace the first 'size' bytes in the buffer with the new slice. Since new_slice re-adds the + // drained bytes, avoid use of the overridable 'drain' method to avoid incorrectly checking if + // we dipped below low-watermark. + drainImpl(size); slices_.emplace_front(std::move(new_slice)); + length_ += size; } return slices_.front()->data(); } @@ -256,6 +279,7 @@ void OwnedImpl::coalesceOrAddSlice(SlicePtr&& other_slice) { // Copy content of the `other_slice`. The `move` methods which call this method effectively // drain the source buffer. addImpl(other_slice->data(), slice_size); + other_slice->transferDrainTrackersTo(*slices_.back()); } else { // Take ownership of the slice. slices_.emplace_back(std::move(other_slice)); @@ -372,7 +396,7 @@ uint64_t OwnedImpl::reserve(uint64_t length, RawSlice* iovecs, uint64_t num_iove return num_slices_used; } -ssize_t OwnedImpl::search(const void* data, uint64_t size, size_t start) const { +ssize_t OwnedImpl::search(const void* data, uint64_t size, size_t start, size_t length) const { // This implementation uses the same search algorithm as evbuffer_search(), a naive // scan that requires O(M*N) comparisons in the worst case. // TODO(brian-pane): replace this with a more efficient search if it shows up @@ -380,9 +404,17 @@ ssize_t OwnedImpl::search(const void* data, uint64_t size, size_t start) const { if (size == 0) { return (start <= length_) ? start : -1; } + + // length equal to zero means that entire buffer must be searched. + // Adjust the length to buffer length taking the staring index into account. + size_t left_to_search = length; + if (0 == length) { + left_to_search = length_ - start; + } ssize_t offset = 0; const uint8_t* needle = static_cast(data); - for (size_t slice_index = 0; slice_index < slices_.size(); slice_index++) { + for (size_t slice_index = 0; slice_index < slices_.size() && (left_to_search > 0); + slice_index++) { const auto& slice = slices_[slice_index]; uint64_t slice_size = slice->dataSize(); if (slice_size <= start) { @@ -395,20 +427,28 @@ ssize_t OwnedImpl::search(const void* data, uint64_t size, size_t start) const { const uint8_t* haystack_end = haystack + slice_size; haystack += start; while (haystack < haystack_end) { + const size_t slice_search_limit = + std::min(static_cast(haystack_end - haystack), left_to_search); // Search within this slice for the first byte of the needle. const uint8_t* first_byte_match = - static_cast(memchr(haystack, needle[0], haystack_end - haystack)); + static_cast(memchr(haystack, needle[0], slice_search_limit)); if (first_byte_match == nullptr) { + left_to_search -= slice_search_limit; break; } // After finding a match for the first byte of the needle, check whether the following // bytes in the buffer match the remainder of the needle. Note that the match can span // two or more slices. + left_to_search -= static_cast(first_byte_match - haystack + 1); + // Save the current number of bytes left to search. + // If the pattern is not found, the search will resume from the next byte + // and left_to_search value must be restored. + const size_t saved_left_to_search = left_to_search; size_t i = 1; size_t match_index = slice_index; const uint8_t* match_next = first_byte_match + 1; const uint8_t* match_end = haystack_end; - while (i < size) { + while ((i < size) && (0 < left_to_search)) { if (match_next >= match_end) { // We've hit the end of this slice, so continue checking against the next slice. match_index++; @@ -421,6 +461,7 @@ ssize_t OwnedImpl::search(const void* data, uint64_t size, size_t start) const { match_end = match_next + match_slice->dataSize(); continue; } + left_to_search--; if (*match_next++ != needle[i]) { break; } @@ -432,6 +473,7 @@ ssize_t OwnedImpl::search(const void* data, uint64_t size, size_t start) const { } // If this wasn't a successful match, start scanning again at the next byte. haystack = first_byte_match + 1; + left_to_search = saved_left_to_search; } start = 0; offset += slice_size; diff --git a/source/common/buffer/buffer_impl.h b/source/common/buffer/buffer_impl.h index 7da3adb821956..f5cea76504210 100644 --- a/source/common/buffer/buffer_impl.h +++ b/source/common/buffer/buffer_impl.h @@ -31,11 +31,22 @@ namespace Buffer { * | * data() */ -class Slice { +class Slice : public SliceData { public: using Reservation = RawSlice; - virtual ~Slice() = default; + ~Slice() override { callAndClearDrainTrackers(); } + + // SliceData + absl::Span getMutableData() override { + RELEASE_ASSERT(isMutable(), "Not allowed to call getMutableData if slice is immutable"); + return {base_ + data_, static_cast::size_type>(reservable_ - data_)}; + } + + /** + * @return true if the data in the slice is mutable + */ + virtual bool isMutable() const { return false; } /** * @return a pointer to the start of the usable content. @@ -113,10 +124,10 @@ class Slice { * @param reservation a reservation obtained from a previous call to reserve(). * If the reservation is not from this Slice, commit() will return false. * If the caller is committing fewer bytes than provided by reserve(), it - * should change the mem_ field of the reservation before calling commit(). + * should change the len_ field of the reservation before calling commit(). * For example, if a caller reserve()s 4KB to do a nonblocking socket read, * and the read only returns two bytes, the caller should set - * reservation.mem_ = 2 and then call `commit(reservation)`. + * reservation.len_ = 2 and then call `commit(reservation)`. * @return whether the Reservation was successfully committed to the Slice. */ bool commit(const Reservation& reservation) { @@ -137,6 +148,9 @@ class Slice { */ uint64_t append(const void* data, uint64_t size) { uint64_t copy_size = std::min(size, reservableSize()); + if (copy_size == 0) { + return 0; + } uint8_t* dest = base_ + reservable_; reservable_ += copy_size; // NOLINTNEXTLINE(clang-analyzer-core.NullDereference) @@ -193,6 +207,32 @@ class Slice { return SliceRepresentation{dataSize(), reservableSize(), capacity_}; } + /** + * Move all drain trackers from the current slice to the destination slice. + */ + void transferDrainTrackersTo(Slice& destination) { + destination.drain_trackers_.splice(destination.drain_trackers_.end(), drain_trackers_); + ASSERT(drain_trackers_.empty()); + } + + /** + * Add a drain tracker to the slice. + */ + void addDrainTracker(std::function drain_tracker) { + drain_trackers_.emplace_back(std::move(drain_tracker)); + } + + /** + * Call all drain trackers associated with the slice, then clear + * the drain tracker list. + */ + void callAndClearDrainTrackers() { + for (const auto& drain_tracker : drain_trackers_) { + drain_tracker(); + } + drain_trackers_.clear(); + } + protected: Slice(uint64_t data, uint64_t reservable, uint64_t capacity) : data_(data), reservable_(reservable), capacity_(capacity) {} @@ -208,6 +248,8 @@ class Slice { /** Total number of bytes in the slice */ uint64_t capacity_; + + std::list> drain_trackers_; }; using SlicePtr = std::unique_ptr; @@ -243,6 +285,8 @@ class OwnedSlice final : public Slice, public InlineStorage { private: OwnedSlice(uint64_t size) : Slice(0, 0, size) { base_ = storage_; } + bool isMutable() const override { return true; } + /** * Compute a slice size big enough to hold a specified amount of data. * @param data_size the minimum amount of data the slice must be able to store, in bytes. @@ -510,6 +554,7 @@ class OwnedImpl : public LibEventInstance { OwnedImpl(const void* data, uint64_t size); // Buffer::Instance + void addDrainTracker(std::function drain_tracker) override; void add(const void* data, uint64_t size) override; void addBufferFragment(BufferFragment& fragment) override; void add(absl::string_view data) override; @@ -520,13 +565,14 @@ class OwnedImpl : public LibEventInstance { void copyOut(size_t start, uint64_t size, void* data) const override; void drain(uint64_t size) override; RawSliceVector getRawSlices(absl::optional max_slices = absl::nullopt) const override; + SliceDataPtr extractMutableFrontSlice() override; uint64_t length() const override; void* linearize(uint32_t size) override; void move(Instance& rhs) override; void move(Instance& rhs, uint64_t length) override; Api::IoCallUint64Result read(Network::IoHandle& io_handle, uint64_t max_length) override; uint64_t reserve(uint64_t length, RawSlice* iovecs, uint64_t num_iovecs) override; - ssize_t search(const void* data, uint64_t size, size_t start) const override; + ssize_t search(const void* data, uint64_t size, size_t start, size_t length) const override; bool startsWith(absl::string_view data) const override; Api::IoCallUint64Result write(Network::IoHandle& io_handle) override; std::string toString() const override; @@ -539,13 +585,13 @@ class OwnedImpl : public LibEventInstance { * @param data start of the content to copy. * */ - void appendSliceForTest(const void* data, uint64_t size); + virtual void appendSliceForTest(const void* data, uint64_t size); /** * Create a new slice at the end of the buffer, and copy the supplied string into it. * @param data the string to append to the buffer. */ - void appendSliceForTest(absl::string_view data); + virtual void appendSliceForTest(absl::string_view data); /** * Describe the in-memory representation of the slices in the buffer. For use @@ -563,6 +609,7 @@ class OwnedImpl : public LibEventInstance { bool isSameBufferImpl(const Instance& rhs) const; void addImpl(const void* data, uint64_t size); + void drainImpl(uint64_t size); /** * Moves contents of the `other_slice` by either taking its ownership or coalescing it diff --git a/source/common/buffer/watermark_buffer.cc b/source/common/buffer/watermark_buffer.cc index 4f891d7c80293..9d566be1965d8 100644 --- a/source/common/buffer/watermark_buffer.cc +++ b/source/common/buffer/watermark_buffer.cc @@ -1,38 +1,39 @@ #include "common/buffer/watermark_buffer.h" #include "common/common/assert.h" +#include "common/runtime/runtime_features.h" namespace Envoy { namespace Buffer { void WatermarkBuffer::add(const void* data, uint64_t size) { OwnedImpl::add(data, size); - checkHighWatermark(); + checkHighAndOverflowWatermarks(); } void WatermarkBuffer::add(absl::string_view data) { OwnedImpl::add(data); - checkHighWatermark(); + checkHighAndOverflowWatermarks(); } void WatermarkBuffer::add(const Instance& data) { OwnedImpl::add(data); - checkHighWatermark(); + checkHighAndOverflowWatermarks(); } void WatermarkBuffer::prepend(absl::string_view data) { OwnedImpl::prepend(data); - checkHighWatermark(); + checkHighAndOverflowWatermarks(); } void WatermarkBuffer::prepend(Instance& data) { OwnedImpl::prepend(data); - checkHighWatermark(); + checkHighAndOverflowWatermarks(); } void WatermarkBuffer::commit(RawSlice* iovecs, uint64_t num_iovecs) { OwnedImpl::commit(iovecs, num_iovecs); - checkHighWatermark(); + checkHighAndOverflowWatermarks(); } void WatermarkBuffer::drain(uint64_t size) { @@ -42,23 +43,29 @@ void WatermarkBuffer::drain(uint64_t size) { void WatermarkBuffer::move(Instance& rhs) { OwnedImpl::move(rhs); - checkHighWatermark(); + checkHighAndOverflowWatermarks(); } void WatermarkBuffer::move(Instance& rhs, uint64_t length) { OwnedImpl::move(rhs, length); - checkHighWatermark(); + checkHighAndOverflowWatermarks(); +} + +SliceDataPtr WatermarkBuffer::extractMutableFrontSlice() { + auto result = OwnedImpl::extractMutableFrontSlice(); + checkLowWatermark(); + return result; } Api::IoCallUint64Result WatermarkBuffer::read(Network::IoHandle& io_handle, uint64_t max_length) { Api::IoCallUint64Result result = OwnedImpl::read(io_handle, max_length); - checkHighWatermark(); + checkHighAndOverflowWatermarks(); return result; } uint64_t WatermarkBuffer::reserve(uint64_t length, RawSlice* iovecs, uint64_t num_iovecs) { uint64_t bytes_reserved = OwnedImpl::reserve(length, iovecs, num_iovecs); - checkHighWatermark(); + checkHighAndOverflowWatermarks(); return bytes_reserved; } @@ -68,11 +75,30 @@ Api::IoCallUint64Result WatermarkBuffer::write(Network::IoHandle& io_handle) { return result; } +void WatermarkBuffer::appendSliceForTest(const void* data, uint64_t size) { + OwnedImpl::appendSliceForTest(data, size); + checkHighAndOverflowWatermarks(); +} + +void WatermarkBuffer::appendSliceForTest(absl::string_view data) { + appendSliceForTest(data.data(), data.size()); +} + void WatermarkBuffer::setWatermarks(uint32_t low_watermark, uint32_t high_watermark) { ASSERT(low_watermark < high_watermark || (high_watermark == 0 && low_watermark == 0)); + uint32_t overflow_watermark_multiplier = + Runtime::getInteger("envoy.buffer.overflow_multiplier", 0); + if (overflow_watermark_multiplier > 0 && + (static_cast(overflow_watermark_multiplier) * high_watermark) > + std::numeric_limits::max()) { + ENVOY_LOG_MISC(debug, "Error setting overflow threshold: envoy.buffer.overflow_multiplier * " + "high_watermark is overflowing. Disabling overflow watermark."); + overflow_watermark_multiplier = 0; + } low_watermark_ = low_watermark; high_watermark_ = high_watermark; - checkHighWatermark(); + overflow_watermark_ = overflow_watermark_multiplier * high_watermark; + checkHighAndOverflowWatermarks(); checkLowWatermark(); } @@ -86,14 +112,23 @@ void WatermarkBuffer::checkLowWatermark() { below_low_watermark_(); } -void WatermarkBuffer::checkHighWatermark() { - if (above_high_watermark_called_ || high_watermark_ == 0 || - OwnedImpl::length() <= high_watermark_) { +void WatermarkBuffer::checkHighAndOverflowWatermarks() { + if (high_watermark_ == 0 || OwnedImpl::length() <= high_watermark_) { return; } - above_high_watermark_called_ = true; - above_high_watermark_(); + if (!above_high_watermark_called_) { + above_high_watermark_called_ = true; + above_high_watermark_(); + } + + // Check if overflow watermark is enabled, wasn't previously triggered, + // and the buffer size is above the threshold + if (overflow_watermark_ != 0 && !above_overflow_watermark_called_ && + OwnedImpl::length() > overflow_watermark_) { + above_overflow_watermark_called_ = true; + above_overflow_watermark_(); + } } } // namespace Buffer diff --git a/source/common/buffer/watermark_buffer.h b/source/common/buffer/watermark_buffer.h index 827d1a51bccfd..de44822a56ab7 100644 --- a/source/common/buffer/watermark_buffer.h +++ b/source/common/buffer/watermark_buffer.h @@ -13,11 +13,15 @@ namespace Buffer { // buffer size transitions from under the low watermark to above the high watermark, the // above_high_watermark function is called one time. It will not be called again until the buffer // is drained below the low watermark, at which point the below_low_watermark function is called. +// If the buffer size is above the overflow watermark, above_overflow_watermark is called. +// It is only called on the first time the buffer overflows. class WatermarkBuffer : public OwnedImpl { public: WatermarkBuffer(std::function below_low_watermark, - std::function above_high_watermark) - : below_low_watermark_(below_low_watermark), above_high_watermark_(above_high_watermark) {} + std::function above_high_watermark, + std::function above_overflow_watermark) + : below_low_watermark_(below_low_watermark), above_high_watermark_(above_high_watermark), + above_overflow_watermark_(above_overflow_watermark) {} // Override all functions from Instance which can result in changing the size // of the underlying buffer. @@ -30,30 +34,40 @@ class WatermarkBuffer : public OwnedImpl { void drain(uint64_t size) override; void move(Instance& rhs) override; void move(Instance& rhs, uint64_t length) override; + SliceDataPtr extractMutableFrontSlice() override; Api::IoCallUint64Result read(Network::IoHandle& io_handle, uint64_t max_length) override; uint64_t reserve(uint64_t length, RawSlice* iovecs, uint64_t num_iovecs) override; Api::IoCallUint64Result write(Network::IoHandle& io_handle) override; void postProcess() override { checkLowWatermark(); } + void appendSliceForTest(const void* data, uint64_t size) override; + void appendSliceForTest(absl::string_view data) override; void setWatermarks(uint32_t watermark) { setWatermarks(watermark / 2, watermark); } void setWatermarks(uint32_t low_watermark, uint32_t high_watermark); uint32_t highWatermark() const { return high_watermark_; } + // Returns true if the high watermark callbacks have been called more recently + // than the low watermark callbacks. + bool highWatermarkTriggered() const { return above_high_watermark_called_; } private: - void checkHighWatermark(); + void checkHighAndOverflowWatermarks(); void checkLowWatermark(); std::function below_low_watermark_; std::function above_high_watermark_; + std::function above_overflow_watermark_; // Used for enforcing buffer limits (off by default). If these are set to non-zero by a call to // setWatermarks() the watermark callbacks will be called as described above. uint32_t high_watermark_{0}; uint32_t low_watermark_{0}; + uint32_t overflow_watermark_{0}; // Tracks the latest state of watermark callbacks. // True between the time above_high_watermark_ has been called until above_high_watermark_ has // been called. bool above_high_watermark_called_{false}; + // Set to true when above_overflow_watermark_ is called (and isn't cleared). + bool above_overflow_watermark_called_{false}; }; using WatermarkBufferPtr = std::unique_ptr; @@ -62,8 +76,10 @@ class WatermarkBufferFactory : public WatermarkFactory { public: // Buffer::WatermarkFactory InstancePtr create(std::function below_low_watermark, - std::function above_high_watermark) override { - return InstancePtr{new WatermarkBuffer(below_low_watermark, above_high_watermark)}; + std::function above_high_watermark, + std::function above_overflow_watermark) override { + return std::make_unique(below_low_watermark, above_high_watermark, + above_overflow_watermark); } }; diff --git a/source/common/buffer/zero_copy_input_stream_impl.cc b/source/common/buffer/zero_copy_input_stream_impl.cc index e94e36799b524..6b805eaf01a02 100644 --- a/source/common/buffer/zero_copy_input_stream_impl.cc +++ b/source/common/buffer/zero_copy_input_stream_impl.cc @@ -19,11 +19,15 @@ void ZeroCopyInputStreamImpl::move(Buffer::Instance& instance) { buffer_->move(instance); } -bool ZeroCopyInputStreamImpl::Next(const void** data, int* size) { +void ZeroCopyInputStreamImpl::drainLastSlice() { if (position_ != 0) { buffer_->drain(position_); position_ = 0; } +} + +bool ZeroCopyInputStreamImpl::Next(const void** data, int* size) { + drainLastSlice(); Buffer::RawSliceVector slices = buffer_->getRawSlices(1); @@ -44,7 +48,19 @@ bool ZeroCopyInputStreamImpl::Next(const void** data, int* size) { return false; } -bool ZeroCopyInputStreamImpl::Skip(int) { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } +bool ZeroCopyInputStreamImpl::Skip(int count) { + ASSERT(count >= 0); + drainLastSlice(); + + // Could not skip more than buffer length. + if (static_cast(count) > buffer_->length()) { + return false; + } + + buffer_->drain(count); + byte_count_ += count; + return true; +} void ZeroCopyInputStreamImpl::BackUp(int count) { ASSERT(count >= 0); diff --git a/source/common/buffer/zero_copy_input_stream_impl.h b/source/common/buffer/zero_copy_input_stream_impl.h index 96bdea0be9ea7..23304d06e34f6 100644 --- a/source/common/buffer/zero_copy_input_stream_impl.h +++ b/source/common/buffer/zero_copy_input_stream_impl.h @@ -36,10 +36,14 @@ class ZeroCopyInputStreamImpl : public virtual Protobuf::io::ZeroCopyInputStream // LimitingInputStream before passing to protobuf code to avoid a spin loop. bool Next(const void** data, int* size) override; void BackUp(int count) override; - bool Skip(int count) override; // Not implemented + bool Skip(int count) override; ProtobufTypes::Int64 ByteCount() const override { return byte_count_; } protected: + // The last slice is kept to support limited BackUp() calls. + // This function will drain it. + void drainLastSlice(); + Buffer::InstancePtr buffer_; uint64_t position_{0}; bool finished_{false}; diff --git a/source/common/chromium_url/BUILD b/source/common/chromium_url/BUILD deleted file mode 100644 index 9b07e76b00130..0000000000000 --- a/source/common/chromium_url/BUILD +++ /dev/null @@ -1,28 +0,0 @@ -licenses(["notice"]) # Apache 2 - -load( - "//bazel:envoy_build_system.bzl", - "envoy_cc_library", - "envoy_package", -) - -envoy_package() - -envoy_cc_library( - name = "chromium_url", - srcs = [ - "url_canon.cc", - "url_canon_internal.cc", - "url_canon_path.cc", - "url_canon_stdstring.cc", - ], - hdrs = [ - "envoy_shim.h", - "url_canon.h", - "url_canon_internal.h", - "url_canon_stdstring.h", - "url_parse.h", - "url_parse_internal.h", - ], - deps = ["//source/common/common:assert_lib"], -) diff --git a/source/common/chromium_url/LICENSE b/source/common/chromium_url/LICENSE deleted file mode 100644 index a32e00ce6be36..0000000000000 --- a/source/common/chromium_url/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2015 The Chromium Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/source/common/chromium_url/README.md b/source/common/chromium_url/README.md deleted file mode 100644 index 32e251c82d4d2..0000000000000 --- a/source/common/chromium_url/README.md +++ /dev/null @@ -1,16 +0,0 @@ -This is a manually minified variant of -https://chromium.googlesource.com/chromium/src.git/+archive/74.0.3729.15/url.tar.gz, -providing just the parts needed for `url::CanonicalizePath()`. This is intended -to support a security release fix for CVE-2019-9901. Long term we need this to -be moved to absl or QUICHE for upgrades and long-term support. - -Some specific transforms of interest: -* The namespace `url` was changed to `chromium_url`. -* `url_parse.h` is minified to just `Component` and flattened back into the URL - directory. It does not contain any non-Chromium authored code any longer and - so does not have a separate LICENSE. -* `envoy_shim.h` adapts various macros to the Envoy context. -* Anything not reachable from `url::CanonicalizePath()` has been dropped. -* Header include paths have changed as needed. -* BUILD was manually written. -* Various clang-tidy and format fixes. diff --git a/source/common/chromium_url/envoy_shim.h b/source/common/chromium_url/envoy_shim.h deleted file mode 100644 index 2b7443926c1f5..0000000000000 --- a/source/common/chromium_url/envoy_shim.h +++ /dev/null @@ -1,17 +0,0 @@ -#pragma once - -#include "common/common/assert.h" - -// This is a minimal Envoy adaptation layer for the Chromium URL library. -// NOLINT(namespace-envoy) - -#define DISALLOW_COPY_AND_ASSIGN(TypeName) \ - TypeName(const TypeName&) = delete; \ - TypeName& operator=(const TypeName&) = delete - -#define EXPORT_TEMPLATE_DECLARE(x) -#define EXPORT_TEMPLATE_DEFINE(x) -#define COMPONENT_EXPORT(x) - -#define DCHECK(x) ASSERT(x) -#define NOTREACHED() NOT_REACHED_GCOVR_EXCL_LINE diff --git a/source/common/chromium_url/url_canon.cc b/source/common/chromium_url/url_canon.cc deleted file mode 100644 index b9ad1b829726c..0000000000000 --- a/source/common/chromium_url/url_canon.cc +++ /dev/null @@ -1,16 +0,0 @@ -// Envoy snapshot of Chromium URL path normalization, see README.md. -// NOLINT(namespace-envoy) - -// Copyright 2017 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "common/chromium_url/url_canon.h" - -#include "common/chromium_url/envoy_shim.h" - -namespace chromium_url { - -template class EXPORT_TEMPLATE_DEFINE(COMPONENT_EXPORT(URL)) CanonOutputT; - -} // namespace chromium_url diff --git a/source/common/chromium_url/url_canon.h b/source/common/chromium_url/url_canon.h deleted file mode 100644 index 0280de643ac86..0000000000000 --- a/source/common/chromium_url/url_canon.h +++ /dev/null @@ -1,186 +0,0 @@ -// Envoy snapshot of Chromium URL path normalization, see README.md. -// NOLINT(namespace-envoy) - -// Copyright 2013 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef URL_URL_CANON_H_ -#define URL_URL_CANON_H_ - -#include -#include - -#include "common/chromium_url/envoy_shim.h" -#include "common/chromium_url/url_parse.h" - -namespace chromium_url { - -// Canonicalizer output ------------------------------------------------------- - -// Base class for the canonicalizer output, this maintains a buffer and -// supports simple resizing and append operations on it. -// -// It is VERY IMPORTANT that no virtual function calls be made on the common -// code path. We only have two virtual function calls, the destructor and a -// resize function that is called when the existing buffer is not big enough. -// The derived class is then in charge of setting up our buffer which we will -// manage. -template class CanonOutputT { -public: - CanonOutputT() : buffer_(NULL), buffer_len_(0), cur_len_(0) {} - virtual ~CanonOutputT() = default; - - // Implemented to resize the buffer. This function should update the buffer - // pointer to point to the new buffer, and any old data up to |cur_len_| in - // the buffer must be copied over. - // - // The new size |sz| must be larger than buffer_len_. - virtual void Resize(int sz) = 0; - - // Accessor for returning a character at a given position. The input offset - // must be in the valid range. - inline T at(int offset) const { return buffer_[offset]; } - - // Sets the character at the given position. The given position MUST be less - // than the length(). - inline void set(int offset, T ch) { buffer_[offset] = ch; } - - // Returns the number of characters currently in the buffer. - inline int length() const { return cur_len_; } - - // Returns the current capacity of the buffer. The length() is the number of - // characters that have been declared to be written, but the capacity() is - // the number that can be written without reallocation. If the caller must - // write many characters at once, it can make sure there is enough capacity, - // write the data, then use set_size() to declare the new length(). - int capacity() const { return buffer_len_; } - - // Called by the user of this class to get the output. The output will NOT - // be NULL-terminated. Call length() to get the - // length. - const T* data() const { return buffer_; } - T* data() { return buffer_; } - - // Shortens the URL to the new length. Used for "backing up" when processing - // relative paths. This can also be used if an external function writes a lot - // of data to the buffer (when using the "Raw" version below) beyond the end, - // to declare the new length. - // - // This MUST NOT be used to expand the size of the buffer beyond capacity(). - void set_length(int new_len) { cur_len_ = new_len; } - - // This is the most performance critical function, since it is called for - // every character. - void push_back(T ch) { - // In VC2005, putting this common case first speeds up execution - // dramatically because this branch is predicted as taken. - if (cur_len_ < buffer_len_) { - buffer_[cur_len_] = ch; - cur_len_++; - return; - } - - // Grow the buffer to hold at least one more item. Hopefully we won't have - // to do this very often. - if (!Grow(1)) - return; - - // Actually do the insertion. - buffer_[cur_len_] = ch; - cur_len_++; - } - - // Appends the given string to the output. - void Append(const T* str, int str_len) { - if (cur_len_ + str_len > buffer_len_) { - if (!Grow(cur_len_ + str_len - buffer_len_)) - return; - } - for (int i = 0; i < str_len; i++) - buffer_[cur_len_ + i] = str[i]; - cur_len_ += str_len; - } - - void ReserveSizeIfNeeded(int estimated_size) { - // Reserve a bit extra to account for escaped chars. - if (estimated_size > buffer_len_) - Resize(estimated_size + 8); - } - -protected: - // Grows the given buffer so that it can fit at least |min_additional| - // characters. Returns true if the buffer could be resized, false on OOM. - bool Grow(int min_additional) { - static const int kMinBufferLen = 16; - int new_len = (buffer_len_ == 0) ? kMinBufferLen : buffer_len_; - do { - if (new_len >= (1 << 30)) // Prevent overflow below. - return false; - new_len *= 2; - } while (new_len < buffer_len_ + min_additional); - Resize(new_len); - return true; - } - - T* buffer_; - int buffer_len_; - - // Used characters in the buffer. - int cur_len_; -}; - -// Simple implementation of the CanonOutput using new[]. This class -// also supports a static buffer so if it is allocated on the stack, most -// URLs can be canonicalized with no heap allocations. -template class RawCanonOutputT : public CanonOutputT { -public: - RawCanonOutputT() : CanonOutputT() { - this->buffer_ = fixed_buffer_; - this->buffer_len_ = fixed_capacity; - } - ~RawCanonOutputT() override { - if (this->buffer_ != fixed_buffer_) - delete[] this->buffer_; - } - - void Resize(int sz) override { - T* new_buf = new T[sz]; - memcpy(new_buf, this->buffer_, sizeof(T) * (this->cur_len_ < sz ? this->cur_len_ : sz)); - if (this->buffer_ != fixed_buffer_) - delete[] this->buffer_; - this->buffer_ = new_buf; - this->buffer_len_ = sz; - } - -protected: - T fixed_buffer_[fixed_capacity]; -}; - -// Explicitly instantiate commonly used instantiations. -extern template class EXPORT_TEMPLATE_DECLARE(COMPONENT_EXPORT(URL)) CanonOutputT; - -// Normally, all canonicalization output is in narrow characters. We support -// the templates so it can also be used internally if a wide buffer is -// required. -using CanonOutput = CanonOutputT; - -template -class RawCanonOutput : public RawCanonOutputT {}; - -// Path. If the input does not begin in a slash (including if the input is -// empty), we'll prepend a slash to the path to make it canonical. -// -// The 8-bit version assumes UTF-8 encoding, but does not verify the validity -// of the UTF-8 (i.e., you can have invalid UTF-8 sequences, invalid -// characters, etc.). Normally, URLs will come in as UTF-16, so this isn't -// an issue. Somebody giving us an 8-bit path is responsible for generating -// the path that the server expects (we'll escape high-bit characters), so -// if something is invalid, it's their problem. -COMPONENT_EXPORT(URL) -bool CanonicalizePath(const char* spec, const Component& path, CanonOutput* output, - Component* out_path); - -} // namespace chromium_url - -#endif // URL_URL_CANON_H_ diff --git a/source/common/chromium_url/url_canon_internal.cc b/source/common/chromium_url/url_canon_internal.cc deleted file mode 100644 index 38c932cad5b47..0000000000000 --- a/source/common/chromium_url/url_canon_internal.cc +++ /dev/null @@ -1,295 +0,0 @@ -// Envoy snapshot of Chromium URL path normalization, see README.md. -// NOLINT(namespace-envoy) - -// Copyright 2013 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "common/chromium_url/url_canon_internal.h" - -namespace chromium_url { - -// See the header file for this array's declaration. -const unsigned char kSharedCharTypeTable[0x100] = { - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0x00 - 0x0f - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0x10 - 0x1f - 0, // 0x20 ' ' (escape spaces in queries) - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x21 ! - 0, // 0x22 " - 0, // 0x23 # (invalid in query since it marks the ref) - CHAR_QUERY | CHAR_USERINFO, // 0x24 $ - CHAR_QUERY | CHAR_USERINFO, // 0x25 % - CHAR_QUERY | CHAR_USERINFO, // 0x26 & - 0, // 0x27 ' (Try to prevent XSS.) - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x28 ( - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x29 ) - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x2a * - CHAR_QUERY | CHAR_USERINFO, // 0x2b + - CHAR_QUERY | CHAR_USERINFO, // 0x2c , - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x2d - - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_COMPONENT, // 0x2e . - CHAR_QUERY, // 0x2f / - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | - CHAR_COMPONENT, // 0x30 0 - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | - CHAR_COMPONENT, // 0x31 1 - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | - CHAR_COMPONENT, // 0x32 2 - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | - CHAR_COMPONENT, // 0x33 3 - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | - CHAR_COMPONENT, // 0x34 4 - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | - CHAR_COMPONENT, // 0x35 5 - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | - CHAR_COMPONENT, // 0x36 6 - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | - CHAR_COMPONENT, // 0x37 7 - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_COMPONENT, // 0x38 8 - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_COMPONENT, // 0x39 9 - CHAR_QUERY, // 0x3a : - CHAR_QUERY, // 0x3b ; - 0, // 0x3c < (Try to prevent certain types of XSS.) - CHAR_QUERY, // 0x3d = - 0, // 0x3e > (Try to prevent certain types of XSS.) - CHAR_QUERY, // 0x3f ? - CHAR_QUERY, // 0x40 @ - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x41 A - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x42 B - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x43 C - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x44 D - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x45 E - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x46 F - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x47 G - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x48 H - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x49 I - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4a J - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4b K - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4c L - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4d M - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4e N - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4f O - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x50 P - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x51 Q - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x52 R - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x53 S - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x54 T - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x55 U - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x56 V - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x57 W - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_COMPONENT, // 0x58 X - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x59 Y - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x5a Z - CHAR_QUERY, // 0x5b [ - CHAR_QUERY, // 0x5c '\' - CHAR_QUERY, // 0x5d ] - CHAR_QUERY, // 0x5e ^ - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x5f _ - CHAR_QUERY, // 0x60 ` - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x61 a - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x62 b - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x63 c - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x64 d - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x65 e - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x66 f - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x67 g - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x68 h - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x69 i - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6a j - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6b k - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6c l - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6d m - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6e n - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6f o - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x70 p - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x71 q - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x72 r - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x73 s - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x74 t - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x75 u - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x76 v - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x77 w - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_COMPONENT, // 0x78 x - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x79 y - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x7a z - CHAR_QUERY, // 0x7b { - CHAR_QUERY, // 0x7c | - CHAR_QUERY, // 0x7d } - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x7e ~ - 0, // 0x7f - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0x80 - 0x8f - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0x90 - 0x9f - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0xa0 - 0xaf - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0xb0 - 0xbf - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0xc0 - 0xcf - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0xd0 - 0xdf - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0xe0 - 0xef - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0xf0 - 0xff -}; - -const char kHexCharLookup[0x10] = { - '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', -}; - -const char kCharToHexLookup[8] = { - 0, // 0x00 - 0x1f - '0', // 0x20 - 0x3f: digits 0 - 9 are 0x30 - 0x39 - 'A' - 10, // 0x40 - 0x5f: letters A - F are 0x41 - 0x46 - 'a' - 10, // 0x60 - 0x7f: letters a - f are 0x61 - 0x66 - 0, // 0x80 - 0x9F - 0, // 0xA0 - 0xBF - 0, // 0xC0 - 0xDF - 0, // 0xE0 - 0xFF -}; - -} // namespace chromium_url diff --git a/source/common/chromium_url/url_canon_internal.h b/source/common/chromium_url/url_canon_internal.h deleted file mode 100644 index 8c405b49814a0..0000000000000 --- a/source/common/chromium_url/url_canon_internal.h +++ /dev/null @@ -1,204 +0,0 @@ -// Envoy snapshot of Chromium URL path normalization, see README.md. -// NOLINT(namespace-envoy) - -// Copyright 2013 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef URL_URL_CANON_INTERNAL_H_ -#define URL_URL_CANON_INTERNAL_H_ - -// This file is intended to be included in another C++ file where the character -// types are defined. This allows us to write mostly generic code, but not have -// template bloat because everything is inlined when anybody calls any of our -// functions. - -#include -#include - -#include "common/chromium_url/envoy_shim.h" -#include "common/chromium_url/url_canon.h" - -namespace chromium_url { - -// Character type handling ----------------------------------------------------- - -// Bits that identify different character types. These types identify different -// bits that are set for each 8-bit character in the kSharedCharTypeTable. -enum SharedCharTypes { - // Characters that do not require escaping in queries. Characters that do - // not have this flag will be escaped; see url_canon_query.cc - CHAR_QUERY = 1, - - // Valid in the username/password field. - CHAR_USERINFO = 2, - - // Valid in a IPv4 address (digits plus dot and 'x' for hex). - CHAR_IPV4 = 4, - - // Valid in an ASCII-representation of a hex digit (as in %-escaped). - CHAR_HEX = 8, - - // Valid in an ASCII-representation of a decimal digit. - CHAR_DEC = 16, - - // Valid in an ASCII-representation of an octal digit. - CHAR_OCT = 32, - - // Characters that do not require escaping in encodeURIComponent. Characters - // that do not have this flag will be escaped; see url_util.cc. - CHAR_COMPONENT = 64, -}; - -// This table contains the flags in SharedCharTypes for each 8-bit character. -// Some canonicalization functions have their own specialized lookup table. -// For those with simple requirements, we have collected the flags in one -// place so there are fewer lookup tables to load into the CPU cache. -// -// Using an unsigned char type has a small but measurable performance benefit -// over using a 32-bit number. -extern const unsigned char kSharedCharTypeTable[0x100]; - -// More readable wrappers around the character type lookup table. -inline bool IsCharOfType(unsigned char c, SharedCharTypes type) { - return !!(kSharedCharTypeTable[c] & type); -} -inline bool IsQueryChar(unsigned char c) { return IsCharOfType(c, CHAR_QUERY); } -inline bool IsIPv4Char(unsigned char c) { return IsCharOfType(c, CHAR_IPV4); } -inline bool IsHexChar(unsigned char c) { return IsCharOfType(c, CHAR_HEX); } -inline bool IsComponentChar(unsigned char c) { return IsCharOfType(c, CHAR_COMPONENT); } - -// Maps the hex numerical values 0x0 to 0xf to the corresponding ASCII digit -// that will be used to represent it. -COMPONENT_EXPORT(URL) extern const char kHexCharLookup[0x10]; - -// This lookup table allows fast conversion between ASCII hex letters and their -// corresponding numerical value. The 8-bit range is divided up into 8 -// regions of 0x20 characters each. Each of the three character types (numbers, -// uppercase, lowercase) falls into different regions of this range. The table -// contains the amount to subtract from characters in that range to get at -// the corresponding numerical value. -// -// See HexDigitToValue for the lookup. -extern const char kCharToHexLookup[8]; - -// Assumes the input is a valid hex digit! Call IsHexChar before using this. -inline unsigned char HexCharToValue(unsigned char c) { return c - kCharToHexLookup[c / 0x20]; } - -// Indicates if the given character is a dot or dot equivalent, returning the -// number of characters taken by it. This will be one for a literal dot, 3 for -// an escaped dot. If the character is not a dot, this will return 0. -template inline int IsDot(const CHAR* spec, int offset, int end) { - if (spec[offset] == '.') { - return 1; - } else if (spec[offset] == '%' && offset + 3 <= end && spec[offset + 1] == '2' && - (spec[offset + 2] == 'e' || spec[offset + 2] == 'E')) { - // Found "%2e" - return 3; - } - return 0; -} - -// Write a single character, escaped, to the output. This always escapes: it -// does no checking that thee character requires escaping. -// Escaping makes sense only 8 bit chars, so code works in all cases of -// input parameters (8/16bit). -template -inline void AppendEscapedChar(UINCHAR ch, CanonOutputT* output) { - output->push_back('%'); - output->push_back(kHexCharLookup[(ch >> 4) & 0xf]); - output->push_back(kHexCharLookup[ch & 0xf]); -} - -// UTF-8 functions ------------------------------------------------------------ - -// Generic To-UTF-8 converter. This will call the given append method for each -// character that should be appended, with the given output method. Wrappers -// are provided below for escaped and non-escaped versions of this. -// -// The char_value must have already been checked that it's a valid Unicode -// character. -template -inline void DoAppendUTF8(unsigned char_value, Output* output) { - if (char_value <= 0x7f) { - Appender(static_cast(char_value), output); - } else if (char_value <= 0x7ff) { - // 110xxxxx 10xxxxxx - Appender(static_cast(0xC0 | (char_value >> 6)), output); - Appender(static_cast(0x80 | (char_value & 0x3f)), output); - } else if (char_value <= 0xffff) { - // 1110xxxx 10xxxxxx 10xxxxxx - Appender(static_cast(0xe0 | (char_value >> 12)), output); - Appender(static_cast(0x80 | ((char_value >> 6) & 0x3f)), output); - Appender(static_cast(0x80 | (char_value & 0x3f)), output); - } else if (char_value <= 0x10FFFF) { // Max Unicode code point. - // 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - Appender(static_cast(0xf0 | (char_value >> 18)), output); - Appender(static_cast(0x80 | ((char_value >> 12) & 0x3f)), output); - Appender(static_cast(0x80 | ((char_value >> 6) & 0x3f)), output); - Appender(static_cast(0x80 | (char_value & 0x3f)), output); - } else { - // Invalid UTF-8 character (>20 bits). - NOTREACHED(); - } -} - -// Helper used by AppendUTF8Value below. We use an unsigned parameter so there -// are no funny sign problems with the input, but then have to convert it to -// a regular char for appending. -inline void AppendCharToOutput(unsigned char ch, CanonOutput* output) { - output->push_back(static_cast(ch)); -} - -// Writes the given character to the output as UTF-8. This does NO checking -// of the validity of the Unicode characters; the caller should ensure that -// the value it is appending is valid to append. -inline void AppendUTF8Value(unsigned char_value, CanonOutput* output) { - DoAppendUTF8(char_value, output); -} - -// Writes the given character to the output as UTF-8, escaping ALL -// characters (even when they are ASCII). This does NO checking of the -// validity of the Unicode characters; the caller should ensure that the value -// it is appending is valid to append. -inline void AppendUTF8EscapedValue(unsigned char_value, CanonOutput* output) { - DoAppendUTF8(char_value, output); -} - -// Given a '%' character at |*begin| in the string |spec|, this will decode -// the escaped value and put it into |*unescaped_value| on success (returns -// true). On failure, this will return false, and will not write into -// |*unescaped_value|. -// -// |*begin| will be updated to point to the last character of the escape -// sequence so that when called with the index of a for loop, the next time -// through it will point to the next character to be considered. On failure, -// |*begin| will be unchanged. -inline bool Is8BitChar(char /*c*/) { - return true; // this case is specialized to avoid a warning -} - -template -inline bool DecodeEscaped(const CHAR* spec, int* begin, int end, unsigned char* unescaped_value) { - if (*begin + 3 > end || !Is8BitChar(spec[*begin + 1]) || !Is8BitChar(spec[*begin + 2])) { - // Invalid escape sequence because there's not enough room, or the - // digits are not ASCII. - return false; - } - - unsigned char first = static_cast(spec[*begin + 1]); - unsigned char second = static_cast(spec[*begin + 2]); - if (!IsHexChar(first) || !IsHexChar(second)) { - // Invalid hex digits, fail. - return false; - } - - // Valid escape sequence. - *unescaped_value = (HexCharToValue(first) << 4) + HexCharToValue(second); - *begin += 2; - return true; -} - -} // namespace chromium_url - -#endif // URL_URL_CANON_INTERNAL_H_ diff --git a/source/common/chromium_url/url_canon_path.cc b/source/common/chromium_url/url_canon_path.cc deleted file mode 100644 index 22587c0ab8a10..0000000000000 --- a/source/common/chromium_url/url_canon_path.cc +++ /dev/null @@ -1,413 +0,0 @@ -// Envoy snapshot of Chromium URL path normalization, see README.md. -// NOLINT(namespace-envoy) - -// Copyright 2013 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include - -#include "common/chromium_url/url_canon.h" -#include "common/chromium_url/url_canon_internal.h" -#include "common/chromium_url/url_parse_internal.h" - -namespace chromium_url { - -namespace { - -enum CharacterFlags { - // Pass through unchanged, whether escaped or unescaped. This doesn't - // actually set anything so you can't OR it to check, it's just to make the - // table below more clear when neither ESCAPE or UNESCAPE is set. - PASS = 0, - - // This character requires special handling in DoPartialPath. Doing this test - // first allows us to filter out the common cases of regular characters that - // can be directly copied. - SPECIAL = 1, - - // This character must be escaped in the canonical output. Note that all - // escaped chars also have the "special" bit set so that the code that looks - // for this is triggered. Not valid with PASS or ESCAPE - ESCAPE_BIT = 2, - ESCAPE = ESCAPE_BIT | SPECIAL, - - // This character must be unescaped in canonical output. Not valid with - // ESCAPE or PASS. We DON'T set the SPECIAL flag since if we encounter these - // characters unescaped, they should just be copied. - UNESCAPE = 4, - - // This character is disallowed in URLs. Note that the "special" bit is also - // set to trigger handling. - INVALID_BIT = 8, - INVALID = INVALID_BIT | SPECIAL, -}; - -// This table contains one of the above flag values. Note some flags are more -// than one bits because they also turn on the "special" flag. Special is the -// only flag that may be combined with others. -// -// This table is designed to match exactly what IE does with the characters. -// -// Dot is even more special, and the escaped version is handled specially by -// IsDot. Therefore, we don't need the "escape" flag, and even the "unescape" -// bit is never handled (we just need the "special") bit. -const unsigned char kPathCharLookup[0x100] = { - // NULL control chars... - INVALID, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, - // control chars... - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, - // ' ' ! " # $ % & ' ( ) * - // + , - . / - ESCAPE, PASS, ESCAPE, ESCAPE, PASS, ESCAPE, PASS, PASS, PASS, PASS, PASS, PASS, PASS, UNESCAPE, - SPECIAL, PASS, - // 0 1 2 3 4 5 6 7 8 9 : - // ; < = > ? - UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, - UNESCAPE, PASS, PASS, ESCAPE, PASS, ESCAPE, ESCAPE, - // @ A B C D E F G H I J - // K L M N O - PASS, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, - UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, - // P Q R S T U V W X Y Z - // [ \ ] ^ _ - UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, - UNESCAPE, UNESCAPE, PASS, ESCAPE, PASS, ESCAPE, UNESCAPE, - // ` a b c d e f g h i j - // k l m n o - ESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, - UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, - // p q r s t u v w x y z - // { | } ~ - UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, - UNESCAPE, UNESCAPE, ESCAPE, ESCAPE, ESCAPE, UNESCAPE, ESCAPE, - // ...all the high-bit characters are escaped - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE}; - -enum DotDisposition { - // The given dot is just part of a filename and is not special. - NOT_A_DIRECTORY, - - // The given dot is the current directory. - DIRECTORY_CUR, - - // The given dot is the first of a double dot that should take us up one. - DIRECTORY_UP -}; - -// When the path resolver finds a dot, this function is called with the -// character following that dot to see what it is. The return value -// indicates what type this dot is (see above). This code handles the case -// where the dot is at the end of the input. -// -// |*consumed_len| will contain the number of characters in the input that -// express what we found. -// -// If the input is "../foo", |after_dot| = 1, |end| = 6, and -// at the end, |*consumed_len| = 2 for the "./" this function consumed. The -// original dot length should be handled by the caller. -template -DotDisposition ClassifyAfterDot(const CHAR* spec, int after_dot, int end, int* consumed_len) { - if (after_dot == end) { - // Single dot at the end. - *consumed_len = 0; - return DIRECTORY_CUR; - } - if (IsURLSlash(spec[after_dot])) { - // Single dot followed by a slash. - *consumed_len = 1; // Consume the slash - return DIRECTORY_CUR; - } - - int second_dot_len = IsDot(spec, after_dot, end); - if (second_dot_len) { - int after_second_dot = after_dot + second_dot_len; - if (after_second_dot == end) { - // Double dot at the end. - *consumed_len = second_dot_len; - return DIRECTORY_UP; - } - if (IsURLSlash(spec[after_second_dot])) { - // Double dot followed by a slash. - *consumed_len = second_dot_len + 1; - return DIRECTORY_UP; - } - } - - // The dots are followed by something else, not a directory. - *consumed_len = 0; - return NOT_A_DIRECTORY; -} - -// Rewinds the output to the previous slash. It is assumed that the output -// ends with a slash and this doesn't count (we call this when we are -// appending directory paths, so the previous path component has and ending -// slash). -// -// This will stop at the first slash (assumed to be at position -// |path_begin_in_output| and not go any higher than that. Some web pages -// do ".." too many times, so we need to handle that brokenness. -// -// It searches for a literal slash rather than including a backslash as well -// because it is run only on the canonical output. -// -// The output is guaranteed to end in a slash when this function completes. -void BackUpToPreviousSlash(int path_begin_in_output, CanonOutput* output) { - DCHECK(output->length() > 0); - - int i = output->length() - 1; - DCHECK(output->at(i) == '/'); - if (i == path_begin_in_output) - return; // We're at the first slash, nothing to do. - - // Now back up (skipping the trailing slash) until we find another slash. - i--; - while (output->at(i) != '/' && i > path_begin_in_output) - i--; - - // Now shrink the output to just include that last slash we found. - output->set_length(i + 1); -} - -// Looks for problematic nested escape sequences and escapes the output as -// needed to ensure they can't be misinterpreted. -// -// Our concern is that in input escape sequence that's invalid because it -// contains nested escape sequences might look valid once those are unescaped. -// For example, "%%300" is not a valid escape sequence, but after unescaping the -// inner "%30" this becomes "%00" which is valid. Leaving this in the output -// string can result in callers re-canonicalizing the string and unescaping this -// sequence, thus resulting in something fundamentally different than the -// original input here. This can cause a variety of problems. -// -// This function is called after we've just unescaped a sequence that's within -// two output characters of a previous '%' that we know didn't begin a valid -// escape sequence in the input string. We look for whether the output is going -// to turn into a valid escape sequence, and if so, convert the initial '%' into -// an escaped "%25" so the output can't be misinterpreted. -// -// |spec| is the input string we're canonicalizing. -// |next_input_index| is the index of the next unprocessed character in |spec|. -// |input_len| is the length of |spec|. -// |last_invalid_percent_index| is the index in |output| of a previously-seen -// '%' character. The caller knows this '%' character isn't followed by a valid -// escape sequence in the input string. -// |output| is the canonicalized output thus far. The caller guarantees this -// ends with a '%' followed by one or two characters, and the '%' is the one -// pointed to by |last_invalid_percent_index|. The last character in the string -// was just unescaped. -template -void CheckForNestedEscapes(const CHAR* spec, int next_input_index, int input_len, - int last_invalid_percent_index, CanonOutput* output) { - const int length = output->length(); - const char last_unescaped_char = output->at(length - 1); - - // If |output| currently looks like "%c", we need to try appending the next - // input character to see if this will result in a problematic escape - // sequence. Note that this won't trigger on the first nested escape of a - // two-escape sequence like "%%30%30" -- we'll allow the conversion to - // "%0%30" -- but the second nested escape will be caught by this function - // when it's called again in that case. - const bool append_next_char = last_invalid_percent_index == length - 2; - if (append_next_char) { - // If the input doesn't contain a 7-bit character next, this case won't be a - // problem. - if ((next_input_index == input_len) || (spec[next_input_index] >= 0x80)) - return; - output->push_back(static_cast(spec[next_input_index])); - } - - // Now output ends like "%cc". Try to unescape this. - int begin = last_invalid_percent_index; - unsigned char temp; - if (DecodeEscaped(output->data(), &begin, output->length(), &temp)) { - // New escape sequence found. Overwrite the characters following the '%' - // with "25", and push_back() the one or two characters that were following - // the '%' when we were called. - if (!append_next_char) - output->push_back(output->at(last_invalid_percent_index + 1)); - output->set(last_invalid_percent_index + 1, '2'); - output->set(last_invalid_percent_index + 2, '5'); - output->push_back(last_unescaped_char); - } else if (append_next_char) { - // Not a valid escape sequence, but we still need to undo appending the next - // source character so the caller can process it normally. - output->set_length(length); - } -} - -// Appends the given path to the output. It assumes that if the input path -// starts with a slash, it should be copied to the output. If no path has -// already been appended to the output (the case when not resolving -// relative URLs), the path should begin with a slash. -// -// If there are already path components (this mode is used when appending -// relative paths for resolving), it assumes that the output already has -// a trailing slash and that if the input begins with a slash, it should be -// copied to the output. -// -// We do not collapse multiple slashes in a row to a single slash. It seems -// no web browsers do this, and we don't want incompatibilities, even though -// it would be correct for most systems. -template -bool DoPartialPath(const CHAR* spec, const Component& path, int path_begin_in_output, - CanonOutput* output) { - int end = path.end(); - - // We use this variable to minimize the amount of work done when unescaping -- - // we'll only call CheckForNestedEscapes() when this points at one of the last - // couple of characters in |output|. - int last_invalid_percent_index = INT_MIN; - - bool success = true; - for (int i = path.begin; i < end; i++) { - UCHAR uch = static_cast(spec[i]); - // Chromium UTF8 logic is unneeded, as the missing templated result - // refers only to char const* (single-byte) characters at this time. - // This only trips up MSVC, since linux gcc seems to optimize it away. - // Indention is to avoid gratuitous diffs to origin source - { - unsigned char out_ch = static_cast(uch); - unsigned char flags = kPathCharLookup[out_ch]; - if (flags & SPECIAL) { - // Needs special handling of some sort. - int dotlen; - if ((dotlen = IsDot(spec, i, end)) > 0) { - // See if this dot was preceded by a slash in the output. We - // assume that when canonicalizing paths, they will always - // start with a slash and not a dot, so we don't have to - // bounds check the output. - // - // Note that we check this in the case of dots so we don't have to - // special case slashes. Since slashes are much more common than - // dots, this actually increases performance measurably (though - // slightly). - DCHECK(output->length() > path_begin_in_output); - if (output->length() > path_begin_in_output && output->at(output->length() - 1) == '/') { - // Slash followed by a dot, check to see if this is means relative - int consumed_len; - switch (ClassifyAfterDot(spec, i + dotlen, end, &consumed_len)) { - case NOT_A_DIRECTORY: - // Copy the dot to the output, it means nothing special. - output->push_back('.'); - i += dotlen - 1; - break; - case DIRECTORY_CUR: // Current directory, just skip the input. - i += dotlen + consumed_len - 1; - break; - case DIRECTORY_UP: - BackUpToPreviousSlash(path_begin_in_output, output); - i += dotlen + consumed_len - 1; - break; - } - } else { - // This dot is not preceded by a slash, it is just part of some - // file name. - output->push_back('.'); - i += dotlen - 1; - } - - } else if (out_ch == '\\') { - // Convert backslashes to forward slashes - output->push_back('/'); - - } else if (out_ch == '%') { - // Handle escape sequences. - unsigned char unescaped_value; - if (DecodeEscaped(spec, &i, end, &unescaped_value)) { - // Valid escape sequence, see if we keep, reject, or unescape it. - // Note that at this point DecodeEscape() will have advanced |i| to - // the last character of the escape sequence. - char unescaped_flags = kPathCharLookup[unescaped_value]; - - if (unescaped_flags & UNESCAPE) { - // This escaped value shouldn't be escaped. Try to copy it. - output->push_back(unescaped_value); - // If we just unescaped a value within 2 output characters of the - // '%' from a previously-detected invalid escape sequence, we - // might have an input string with problematic nested escape - // sequences; detect and fix them. - if (last_invalid_percent_index >= (output->length() - 3)) { - CheckForNestedEscapes(spec, i + 1, end, last_invalid_percent_index, output); - } - } else { - // Either this is an invalid escaped character, or it's a valid - // escaped character we should keep escaped. In the first case we - // should just copy it exactly and remember the error. In the - // second we also copy exactly in case the server is sensitive to - // changing the case of any hex letters. - output->push_back('%'); - output->push_back(static_cast(spec[i - 1])); - output->push_back(static_cast(spec[i])); - if (unescaped_flags & INVALID_BIT) - success = false; - } - } else { - // Invalid escape sequence. IE7+ rejects any URLs with such - // sequences, while other browsers pass them through unchanged. We - // use the permissive behavior. - // TODO(brettw): Consider testing IE's strict behavior, which would - // allow removing the code to handle nested escapes above. - last_invalid_percent_index = output->length(); - output->push_back('%'); - } - - } else if (flags & INVALID_BIT) { - // For NULLs, etc. fail. - AppendEscapedChar(out_ch, output); - success = false; - - } else if (flags & ESCAPE_BIT) { - // This character should be escaped. - AppendEscapedChar(out_ch, output); - } - } else { - // Nothing special about this character, just append it. - output->push_back(out_ch); - } - } - } - return success; -} - -template -bool DoPath(const CHAR* spec, const Component& path, CanonOutput* output, Component* out_path) { - bool success = true; - out_path->begin = output->length(); - if (path.len > 0) { - // Write out an initial slash if the input has none. If we just parse a URL - // and then canonicalize it, it will of course have a slash already. This - // check is for the replacement and relative URL resolving cases of file - // URLs. - if (!IsURLSlash(spec[path.begin])) - output->push_back('/'); - - success = DoPartialPath(spec, path, out_path->begin, output); - } else { - // No input, canonical path is a slash. - output->push_back('/'); - } - out_path->len = output->length() - out_path->begin; - return success; -} - -} // namespace - -bool CanonicalizePath(const char* spec, const Component& path, CanonOutput* output, - Component* out_path) { - return DoPath(spec, path, output, out_path); -} - -} // namespace chromium_url diff --git a/source/common/chromium_url/url_canon_stdstring.cc b/source/common/chromium_url/url_canon_stdstring.cc deleted file mode 100644 index 0c61831e5f1ac..0000000000000 --- a/source/common/chromium_url/url_canon_stdstring.cc +++ /dev/null @@ -1,33 +0,0 @@ -// Envoy snapshot of Chromium URL path normalization, see README.md. -// NOLINT(namespace-envoy) - -// Copyright 2013 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "common/chromium_url/url_canon_stdstring.h" - -namespace chromium_url { - -StdStringCanonOutput::StdStringCanonOutput(std::string* str) : CanonOutput(), str_(str) { - cur_len_ = static_cast(str_->size()); // Append to existing data. - buffer_ = str_->empty() ? NULL : &(*str_)[0]; - buffer_len_ = static_cast(str_->size()); -} - -StdStringCanonOutput::~StdStringCanonOutput() { - // Nothing to do, we don't own the string. -} - -void StdStringCanonOutput::Complete() { - str_->resize(cur_len_); - buffer_len_ = cur_len_; -} - -void StdStringCanonOutput::Resize(int sz) { - str_->resize(sz); - buffer_ = str_->empty() ? NULL : &(*str_)[0]; - buffer_len_ = sz; -} - -} // namespace chromium_url diff --git a/source/common/chromium_url/url_canon_stdstring.h b/source/common/chromium_url/url_canon_stdstring.h deleted file mode 100644 index e14d6c22e74e8..0000000000000 --- a/source/common/chromium_url/url_canon_stdstring.h +++ /dev/null @@ -1,58 +0,0 @@ -// Envoy snapshot of Chromium URL path normalization, see README.md. -// NOLINT(namespace-envoy) - -// Copyright 2013 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef URL_URL_CANON_STDSTRING_H_ -#define URL_URL_CANON_STDSTRING_H_ - -// This header file defines a canonicalizer output method class for STL -// strings. Because the canonicalizer tries not to be dependent on the STL, -// we have segregated it here. - -#include - -#include "common/chromium_url/envoy_shim.h" -#include "common/chromium_url/url_canon.h" - -#define DISALLOW_COPY_AND_ASSIGN(TypeName) \ - TypeName(const TypeName&) = delete; \ - TypeName& operator=(const TypeName&) = delete - -namespace chromium_url { - -// Write into a std::string given in the constructor. This object does not own -// the string itself, and the user must ensure that the string stays alive -// throughout the lifetime of this object. -// -// The given string will be appended to; any existing data in the string will -// be preserved. -// -// Note that when canonicalization is complete, the string will likely have -// unused space at the end because we make the string very big to start out -// with (by |initial_size|). This ends up being important because resize -// operations are slow, and because the base class needs to write directly -// into the buffer. -// -// Therefore, the user should call Complete() before using the string that -// this class wrote into. -class COMPONENT_EXPORT(URL) StdStringCanonOutput : public CanonOutput { -public: - StdStringCanonOutput(std::string* str); - ~StdStringCanonOutput() override; - - // Must be called after writing has completed but before the string is used. - void Complete(); - - void Resize(int sz) override; - -protected: - std::string* str_; - DISALLOW_COPY_AND_ASSIGN(StdStringCanonOutput); -}; - -} // namespace chromium_url - -#endif // URL_URL_CANON_STDSTRING_H_ diff --git a/source/common/chromium_url/url_parse.h b/source/common/chromium_url/url_parse.h deleted file mode 100644 index b840af60438d1..0000000000000 --- a/source/common/chromium_url/url_parse.h +++ /dev/null @@ -1,49 +0,0 @@ -// Envoy snapshot of Chromium URL path normalization, see README.md. -// NOLINT(namespace-envoy) - -// Copyright 2013 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef URL_PARSE_H_ -#define URL_PARSE_H_ - -namespace chromium_url { - -// Component ------------------------------------------------------------------ - -// Represents a substring for URL parsing. -struct Component { - Component() : begin(0), len(-1) {} - - // Normal constructor: takes an offset and a length. - Component(int b, int l) : begin(b), len(l) {} - - int end() const { return begin + len; } - - // Returns true if this component is valid, meaning the length is given. Even - // valid components may be empty to record the fact that they exist. - bool is_valid() const { return (len != -1); } - - // Returns true if the given component is specified on false, the component - // is either empty or invalid. - bool is_nonempty() const { return (len > 0); } - - void reset() { - begin = 0; - len = -1; - } - - bool operator==(const Component& other) const { return begin == other.begin && len == other.len; } - - int begin; // Byte offset in the string of this component. - int len; // Will be -1 if the component is unspecified. -}; - -// Helper that returns a component created with the given begin and ending -// points. The ending point is non-inclusive. -inline Component MakeRange(int begin, int end) { return Component(begin, end - begin); } - -} // namespace chromium_url - -#endif // URL_PARSE_H_ diff --git a/source/common/chromium_url/url_parse_internal.h b/source/common/chromium_url/url_parse_internal.h deleted file mode 100644 index 0ca47bc488461..0000000000000 --- a/source/common/chromium_url/url_parse_internal.h +++ /dev/null @@ -1,18 +0,0 @@ -// Envoy snapshot of Chromium URL path normalization, see README.md. -// NOLINT(namespace-envoy) - -// Copyright 2013 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef URL_URL_PARSE_INTERNAL_H_ -#define URL_URL_PARSE_INTERNAL_H_ - -namespace chromium_url { - -// We treat slashes and backslashes the same for IE compatibility. -inline bool IsURLSlash(char ch) { return ch == '/' || ch == '\\'; } - -} // namespace chromium_url - -#endif // URL_URL_PARSE_INTERNAL_H_ diff --git a/source/common/common/BUILD b/source/common/common/BUILD index af25042d18c50..b1c7c2d132406 100644 --- a/source/common/common/BUILD +++ b/source/common/common/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_basic_cc_library", @@ -9,15 +7,20 @@ load( "envoy_cc_win32_library", "envoy_include_prefix", "envoy_package", - "envoy_select_boringssl", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( name = "assert_lib", srcs = ["assert.cc"], hdrs = ["assert.h"], + external_deps = [ + "abseil_base", + "abseil_synchronization", + ], deps = [":minimal_logger_lib"], ) @@ -34,7 +37,7 @@ envoy_cc_library( deps = [ ":assert_lib", "//include/envoy/common:backoff_strategy_interface", - "//include/envoy/runtime:runtime_interface", + "//include/envoy/common:random_generator_interface", ], ) @@ -72,6 +75,11 @@ envoy_cc_library( hdrs = ["compiler_requirements.h"], ) +envoy_cc_library( + name = "documentation_url_lib", + hdrs = ["documentation_url.h"], +) + envoy_cc_library( name = "empty_string", hdrs = ["empty_string.h"], @@ -143,6 +151,23 @@ envoy_cc_library( }), ) +envoy_cc_library( + name = "fancy_logger_lib", + srcs = ["fancy_logger.cc"], + hdrs = ["fancy_logger.h"], + external_deps = ["abseil_synchronization"], + deps = [ + ":base_logger_lib", + ":lock_guard_lib", + ":macros", + ":minimal_logger_lib", + ":non_copyable", + ] + select({ + "//bazel:android_logger": ["logger_impl_lib_android"], + "//conditions:default": ["logger_impl_lib_standard"], + }), +) + envoy_cc_library( name = "base_logger_lib", srcs = ["base_logger.cc"], @@ -195,6 +220,15 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "basic_resource_lib", + hdrs = ["basic_resource_impl.h"], + deps = [ + "//include/envoy/common:resource_interface", + "//include/envoy/runtime:runtime_interface", + ], +) + envoy_cc_library( name = "macros", hdrs = ["macros.h"], @@ -217,6 +251,21 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "random_generator_lib", + srcs = [ + "random_generator.cc", + ], + hdrs = [ + "random_generator.h", + ], + external_deps = ["ssl"], + deps = [ + ":assert_lib", + "//include/envoy/common:random_generator_interface", + ], +) + envoy_cc_library( name = "regex_lib", srcs = ["regex.cc"], @@ -225,6 +274,7 @@ envoy_cc_library( ":assert_lib", "//include/envoy/common:regex_interface", "//source/common/protobuf:utility_lib", + "//source/common/stats:symbol_table_lib", "@com_googlesource_code_re2//:re2", "@envoy_api//envoy/type/matcher/v3:pkg_cc_proto", ], @@ -274,7 +324,9 @@ envoy_cc_library( name = "thread_lib", hdrs = ["thread.h"], external_deps = ["abseil_synchronization"], - deps = envoy_cc_platform_dep("thread_impl_lib"), + deps = envoy_cc_platform_dep("thread_impl_lib") + [ + ":non_copyable", + ], ) envoy_cc_posix_library( @@ -312,6 +364,7 @@ envoy_cc_library( name = "utility_lib", srcs = ["utility.cc"], hdrs = ["utility.h"], + external_deps = ["abseil_node_hash_map"], deps = [ ":assert_lib", ":hash_lib", @@ -322,67 +375,6 @@ envoy_cc_library( ], ) -genrule( - name = "generate_version_number", - srcs = ["//:VERSION"], - outs = ["version_number.h"], - cmd = """echo "#define BUILD_VERSION_NUMBER \\"$$(cat $<)\\"" >$@""", -) - -genrule( - name = "generate_version_linkstamp", - outs = ["lib/version_linkstamp.h"], - cmd = "$(location :generate_version_linkstamp.sh) >> $@", - # Undocumented attr to depend on workspace status files. - # https://github.com/bazelbuild/bazel/issues/4942 - # Used here because generate_version_linkstamp.sh depends on the workspace status files. - stamp = 1, - tools = [":generate_version_linkstamp.sh"], -) - -genrule( - name = "generate_version_linkstamp_empty", - outs = ["empty/version_linkstamp.h"], - cmd = """>$@""", -) - -envoy_cc_library( - name = "version_includes", - hdrs = [ - "version.h", - ":generate_version_number", - ], - deps = [ - "//source/common/singleton:const_singleton", - "@envoy_api//envoy/config/core/v3:pkg_cc_proto", - ], -) - -envoy_cc_library( - name = "version_lib", - srcs = ["version.cc"], - hdrs = select({ - "//bazel:manual_stamp": [":generate_version_linkstamp"], - # By default the header file is empty. - # This is done so that the definitions linked via the linkstamp rule don't cause collisions. - "//conditions:default": [":generate_version_linkstamp_empty"], - }), - copts = envoy_select_boringssl( - ["-DENVOY_SSL_VERSION=\\\"BoringSSL-FIPS\\\""], - ["-DENVOY_SSL_VERSION=\\\"BoringSSL\\\""], - ), - linkstamp = "version_linkstamp.cc", - strip_include_prefix = select({ - "//bazel:manual_stamp": "lib", - "//conditions:default": "empty", - }), - deps = [ - ":version_includes", - "//source/common/common:macros", - "//source/common/protobuf:utility_lib", - ], -) - envoy_cc_library( name = "callback_impl_lib", hdrs = ["callback_impl.h"], @@ -420,16 +412,6 @@ envoy_cc_library( ], ) -envoy_cc_library( - name = "zlib_base_lib", - srcs = ["zlib/base.cc"], - hdrs = ["zlib/base.h"], - external_deps = ["zlib"], - deps = [ - "//source/common/buffer:buffer_lib", - ], -) - envoy_cc_library( name = "statusor_lib", hdrs = ["statusor.h"], diff --git a/source/common/common/assert.cc b/source/common/common/assert.cc index ab4b1b8776a43..1daf69b9abb90 100644 --- a/source/common/common/assert.cc +++ b/source/common/common/assert.cc @@ -1,5 +1,9 @@ #include "common/common/assert.h" +#include "absl/container/flat_hash_map.h" +#include "absl/strings/str_join.h" +#include "absl/synchronization/mutex.h" + namespace Envoy { namespace Assert { @@ -28,15 +32,84 @@ class ActionRegistrationImpl : public ActionRegistration { static std::function debug_assertion_failure_record_action_; }; +// This class implements the logic for triggering ENVOY_BUG logs and actions. Logging and actions +// will be triggered with exponential back-off per file and line bug. +class EnvoyBugRegistrationImpl : public ActionRegistration { +public: + EnvoyBugRegistrationImpl(std::function action) { + ASSERT(envoy_bug_failure_record_action_ == nullptr, + "An ENVOY_BUG action was already set. Currently only a single action is supported."); + envoy_bug_failure_record_action_ = action; + counters_.clear(); + } + + ~EnvoyBugRegistrationImpl() override { + ASSERT(envoy_bug_failure_record_action_ != nullptr); + envoy_bug_failure_record_action_ = nullptr; + } + + // This method is invoked when an ENVOY_BUG condition fails. It increments a per file and line + // counter for every ENVOY_BUG hit in a mutex guarded map. + // The implementation may also be a inline static counter per-file and line. There is no benchmark + // to show that the performance of this mutex is any worse than atomic counters. Acquiring and + // releasing a mutex is cheaper than a cache miss, but the mutex here is contended for every + // ENVOY_BUG failure rather than per individual bug. Logging ENVOY_BUGs is not a performance + // critical path, and mutex contention would indicate that there is a serious failure. + // Currently, this choice reduces code size and has the advantage that behavior is easier to + // understand and debug, and test behavior is predictable. + static bool shouldLogAndInvoke(absl::string_view bug_name) { + // Increment counter, inserting first if counter does not exist. + uint64_t counter_value = 0; + { + absl::MutexLock lock(&mutex_); + counter_value = ++counters_[bug_name]; + } + + // Check if counter is power of two by its bitwise representation. + return (counter_value & (counter_value - 1)) == 0; + } + + static void invokeAction() { + if (envoy_bug_failure_record_action_ != nullptr) { + envoy_bug_failure_record_action_(); + } + } + +private: + // This implementation currently only handles one action being set at a time. This is currently + // sufficient. If multiple actions are ever needed, the actions should be chained when + // additional actions are registered. + static std::function envoy_bug_failure_record_action_; + + using EnvoyBugMap = absl::flat_hash_map; + static absl::Mutex mutex_; + static EnvoyBugMap counters_ GUARDED_BY(mutex_); +}; + std::function ActionRegistrationImpl::debug_assertion_failure_record_action_; +std::function EnvoyBugRegistrationImpl::envoy_bug_failure_record_action_; +EnvoyBugRegistrationImpl::EnvoyBugMap EnvoyBugRegistrationImpl::counters_; +absl::Mutex EnvoyBugRegistrationImpl::mutex_; ActionRegistrationPtr setDebugAssertionFailureRecordAction(const std::function& action) { return std::make_unique(action); } -void invokeDebugAssertionFailureRecordAction_ForAssertMacroUseOnly() { +ActionRegistrationPtr setEnvoyBugFailureRecordAction(const std::function& action) { + return std::make_unique(action); +} + +void invokeDebugAssertionFailureRecordActionForAssertMacroUseOnly() { ActionRegistrationImpl::invokeAction(); } +void invokeEnvoyBugFailureRecordActionForEnvoyBugMacroUseOnly() { + EnvoyBugRegistrationImpl::invokeAction(); +} + +bool shouldLogAndInvokeEnvoyBugForEnvoyBugMacroUseOnly(absl::string_view bug_name) { + return EnvoyBugRegistrationImpl::shouldLogAndInvoke(bug_name); +} + } // namespace Assert } // namespace Envoy diff --git a/source/common/common/assert.h b/source/common/common/assert.h index e4c395ca48515..f4683ede34dda 100644 --- a/source/common/common/assert.h +++ b/source/common/common/assert.h @@ -32,13 +32,44 @@ using ActionRegistrationPtr = std::unique_ptr; */ ActionRegistrationPtr setDebugAssertionFailureRecordAction(const std::function& action); +/** + * Sets an action to be invoked when an ENVOY_BUG failure is detected in a release build. This + * action will be invoked each time an ENVOY_BUG failure is detected. + * + * This function is not thread-safe; concurrent calls to set the action are not allowed. + * + * The action may be invoked concurrently if two ENVOY_BUGs in different threads fail at the + * same time, so the action must be thread-safe. + * + * This has no effect in debug builds (envoy bug failure aborts the process). + * + * @param action The action to take when an envoy bug fails. + * @return A registration object. The registration is removed when the object is destructed. + */ +ActionRegistrationPtr setEnvoyBugFailureRecordAction(const std::function& action); + /** * Invokes the action set by setDebugAssertionFailureRecordAction, or does nothing if * no action has been set. * * This should only be called by ASSERT macros in this file. */ -void invokeDebugAssertionFailureRecordAction_ForAssertMacroUseOnly(); +void invokeDebugAssertionFailureRecordActionForAssertMacroUseOnly(); + +/** + * Invokes the action set by setEnvoyBugFailureRecordAction, or does nothing if + * no action has been set. + * + * This should only be called by ENVOY_BUG macros in this file. + */ +void invokeEnvoyBugFailureRecordActionForEnvoyBugMacroUseOnly(); + +/** + * Increments power of two counter for EnvoyBugRegistrationImpl. + * + * This should only be called by ENVOY_BUG macros in this file. + */ +bool shouldLogAndInvokeEnvoyBugForEnvoyBugMacroUseOnly(absl::string_view bug_name); // CONDITION_STR is needed to prevent macros in condition from being expected, which obfuscates // the logged failure, e.g., "EAGAIN" vs "11". @@ -87,7 +118,7 @@ void invokeDebugAssertionFailureRecordAction_ForAssertMacroUseOnly(); #if !defined(NDEBUG) // If this is a debug build. #define ASSERT_ACTION abort() #else // If this is not a debug build, but ENVOY_LOG_DEBUG_ASSERT_IN_RELEASE is defined. -#define ASSERT_ACTION Envoy::Assert::invokeDebugAssertionFailureRecordAction_ForAssertMacroUseOnly() +#define ASSERT_ACTION Envoy::Assert::invokeDebugAssertionFailureRecordActionForAssertMacroUseOnly() #endif // !defined(NDEBUG) #define _ASSERT_ORIGINAL(X) _ASSERT_IMPL(X, #X, ASSERT_ACTION, "") @@ -111,7 +142,7 @@ void invokeDebugAssertionFailureRecordAction_ForAssertMacroUseOnly(); // This non-implementation ensures that its argument is a valid expression that can be statically // casted to a bool, but the expression is never evaluated and will be compiled away. #define KNOWN_ISSUE_ASSERT _NULL_ASSERT_IMPL -#endif // defined(ENVOY_DEBUG_KNOWN_ISSUES) +#endif // defined(ENVOY_DISABLE_KNOWN_ISSUE_ASSERTS) // If ASSERT is called with one argument, the ASSERT_SELECTOR will return // _ASSERT_ORIGINAL and this will call _ASSERT_ORIGINAL(__VA_ARGS__). @@ -134,6 +165,47 @@ void invokeDebugAssertionFailureRecordAction_ForAssertMacroUseOnly(); abort(); \ } while (false) +#if !defined(NDEBUG) +#define ENVOY_BUG_ACTION abort() +#else +#define ENVOY_BUG_ACTION Envoy::Assert::invokeEnvoyBugFailureRecordActionForEnvoyBugMacroUseOnly() +#endif + +// These macros are needed to stringify __LINE__ correctly. +#define STRINGIFY(X) #X +#define TOSTRING(X) STRINGIFY(X) + +// CONDITION_STR is needed to prevent macros in condition from being expected, which obfuscates +// the logged failure, e.g., "EAGAIN" vs "11". +// ENVOY_BUG logging and actions are invoked only on power-of-two instances per log line. +#define _ENVOY_BUG_IMPL(CONDITION, CONDITION_STR, ACTION, DETAILS) \ + do { \ + if (!(CONDITION) && Envoy::Assert::shouldLogAndInvokeEnvoyBugForEnvoyBugMacroUseOnly( \ + __FILE__ ":" TOSTRING(__LINE__))) { \ + const std::string& details = (DETAILS); \ + ENVOY_LOG_TO_LOGGER(Envoy::Logger::Registry::getLog(Envoy::Logger::Id::envoy_bug), error, \ + "envoy bug failure: {}.{}{}", CONDITION_STR, \ + details.empty() ? "" : " Details: ", details); \ + ACTION; \ + } \ + } while (false) + +#define _ENVOY_BUG_VERBOSE(X, Y) _ENVOY_BUG_IMPL(X, #X, ENVOY_BUG_ACTION, Y) + +// This macro is needed to help to remove: "warning C4003: not enough arguments for function-like +// macro invocation ''" when expanding __VA_ARGS__. In our setup, MSVC treats this +// warning as an error. A sample code to reproduce the case: https://godbolt.org/z/M4zZNG. +#define PASS_ON(...) __VA_ARGS__ + +/** + * Indicate a failure condition that should never be met in normal circumstances. In contrast + * with ASSERT, an ENVOY_BUG is compiled in release mode. If a failure condition is met in release + * mode, it is logged and a stat is incremented with exponential back-off per ENVOY_BUG. In debug + * mode, it will crash if the condition is not met. ENVOY_BUG must be called with two arguments for + * verbose logging. + */ +#define ENVOY_BUG(...) PASS_ON(PASS_ON(_ENVOY_BUG_VERBOSE)(__VA_ARGS__)) + // NOT_IMPLEMENTED_GCOVR_EXCL_LINE is for overridden functions that are expressly not implemented. // The macro name includes "GCOVR_EXCL_LINE" to exclude the macro's usage from code coverage // reports. diff --git a/source/common/common/backoff_strategy.cc b/source/common/common/backoff_strategy.cc index b8241436aa212..c9b5b61b733be 100644 --- a/source/common/common/backoff_strategy.cc +++ b/source/common/common/backoff_strategy.cc @@ -3,7 +3,7 @@ namespace Envoy { JitteredBackOffStrategy::JitteredBackOffStrategy(uint64_t base_interval, uint64_t max_interval, - Runtime::RandomGenerator& random) + Random::RandomGenerator& random) : base_interval_(base_interval), max_interval_(max_interval), next_interval_(base_interval), random_(random) { ASSERT(base_interval_ > 0); diff --git a/source/common/common/backoff_strategy.h b/source/common/common/backoff_strategy.h index be84ec5e865e8..2484f3e11b201 100644 --- a/source/common/common/backoff_strategy.h +++ b/source/common/common/backoff_strategy.h @@ -4,7 +4,7 @@ #include #include "envoy/common/backoff_strategy.h" -#include "envoy/runtime/runtime.h" +#include "envoy/common/random_generator.h" #include "common/common/assert.h" @@ -24,7 +24,7 @@ class JitteredBackOffStrategy : public BackOffStrategy { * @param random the random generator. */ JitteredBackOffStrategy(uint64_t base_interval, uint64_t max_interval, - Runtime::RandomGenerator& random); + Random::RandomGenerator& random); // BackOffStrategy methods uint64_t nextBackOffMs() override; @@ -34,7 +34,7 @@ class JitteredBackOffStrategy : public BackOffStrategy { const uint64_t base_interval_; const uint64_t max_interval_{}; uint64_t next_interval_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; }; /** diff --git a/source/common/common/base_logger.cc b/source/common/common/base_logger.cc index 912c649337d65..2491ab389dc33 100644 --- a/source/common/common/base_logger.cc +++ b/source/common/common/base_logger.cc @@ -3,7 +3,7 @@ namespace Envoy { namespace Logger { -const char* Logger::DEFAULT_LOG_FORMAT = "[%Y-%m-%d %T.%e][%t][%l][%n] %v"; +const char* Logger::DEFAULT_LOG_FORMAT = "[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v"; Logger::Logger(std::shared_ptr logger) : logger_(logger) { logger_->set_pattern(DEFAULT_LOG_FORMAT); diff --git a/source/common/common/basic_resource_impl.h b/source/common/common/basic_resource_impl.h new file mode 100644 index 0000000000000..820412e04a88a --- /dev/null +++ b/source/common/common/basic_resource_impl.h @@ -0,0 +1,60 @@ +#pragma once + +#include + +#include "envoy/common/resource.h" +#include "envoy/runtime/runtime.h" + +#include "common/common/assert.h" + +#include "absl/types/optional.h" + +namespace Envoy { + +/** + * A handle to track some limited resource. + * + * NOTE: + * This implementation makes some assumptions which favor simplicity over correctness. Though + * atomics are used, it is possible for resources to temporarily go above the supplied maximums. + * This should not effect overall behavior. + */ +class BasicResourceLimitImpl : public ResourceLimit { +public: + BasicResourceLimitImpl(uint64_t max, Runtime::Loader& runtime, const std::string& runtime_key) + : max_(max), runtime_(&runtime), runtime_key_(runtime_key) {} + BasicResourceLimitImpl(uint64_t max) : max_(max) {} + BasicResourceLimitImpl() : max_(std::numeric_limits::max()) {} + + bool canCreate() override { return current_.load() < max(); } + + void inc() override { ++current_; } + + void dec() override { decBy(1); } + + void decBy(uint64_t amount) override { + ASSERT(current_ >= amount); + current_ -= amount; + } + + uint64_t max() override { + return (runtime_ != nullptr && runtime_key_.has_value()) + ? runtime_->snapshot().getInteger(runtime_key_.value(), max_) + : max_; + } + + uint64_t count() const override { return current_.load(); } + + void setMax(uint64_t new_max) { max_ = new_max; } + void resetMax() { max_ = std::numeric_limits::max(); } + +protected: + std::atomic current_{}; + +private: + uint64_t max_; + Runtime::Loader* runtime_{nullptr}; + const absl::optional runtime_key_; +}; + +} // namespace Envoy diff --git a/source/common/common/callback_impl.h b/source/common/common/callback_impl.h index 3e9b554df7e2d..d1d3ebdbe4962 100644 --- a/source/common/common/callback_impl.h +++ b/source/common/common/callback_impl.h @@ -24,6 +24,9 @@ template class CallbackManager { */ CallbackHandle* add(Callback callback) { callbacks_.emplace_back(*this, callback); + // get the list iterator of added callback handle, which will be used to remove itself from + // callbacks_ list. + callbacks_.back().it_ = (--callbacks_.end()); return &callbacks_.back(); } @@ -46,24 +49,21 @@ template class CallbackManager { CallbackHolder(CallbackManager& parent, Callback cb) : parent_(parent), cb_(cb) {} // CallbackHandle - void remove() override { parent_.remove(this); } + void remove() override { parent_.remove(it_); } CallbackManager& parent_; Callback cb_; + + // the iterator of this callback holder inside callbacks_ list + // upon removal, use this iterator to delete callback holder in O(1) + typename std::list::iterator it_; }; /** * Remove a member update callback added via add(). * @param handle supplies the callback handle to remove. */ - void remove(CallbackHandle* handle) { - ASSERT(std::find_if(callbacks_.begin(), callbacks_.end(), - [handle](const CallbackHolder& holder) -> bool { - return handle == &holder; - }) != callbacks_.end()); - callbacks_.remove_if( - [handle](const CallbackHolder& holder) -> bool { return handle == &holder; }); - } + void remove(typename std::list::iterator& it) { callbacks_.erase(it); } std::list callbacks_; }; diff --git a/source/common/common/documentation_url.h b/source/common/common/documentation_url.h new file mode 100644 index 0000000000000..dc3e0e352209a --- /dev/null +++ b/source/common/common/documentation_url.h @@ -0,0 +1,13 @@ +namespace Envoy { + +// TODO(ggreenway): replace 'latest' with the current version, pulled from the VERSION file at +// the root of the repo. +#define ENVOY_DOC_URL_ROOT "https://www.envoyproxy.io/docs/envoy/latest" + +#define ENVOY_DOC_URL_VERSION_HISTORY ENVOY_DOC_URL_ROOT "/version_history/version_history" + +#define ENVOY_DOC_URL_RUNTIME_OVERRIDE_DEPRECATED \ + ENVOY_DOC_URL_ROOT \ + "/configuration/operations/runtime#using-runtime-overrides-for-deprecated-features" + +} // namespace Envoy diff --git a/source/common/common/fancy_logger.cc b/source/common/common/fancy_logger.cc new file mode 100644 index 0000000000000..ef90afeefb982 --- /dev/null +++ b/source/common/common/fancy_logger.cc @@ -0,0 +1,102 @@ +#include "common/common/fancy_logger.h" + +#include +#include + +#include "common/common/logger.h" + +using spdlog::level::level_enum; + +namespace Envoy { + +/** + * Implements a lock from BasicLockable, to avoid dependency problem of thread.h. + */ +class FancyBasicLockable : public Thread::BasicLockable { +public: + // BasicLockable + void lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() override { mutex_.Lock(); } + bool tryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) override { return mutex_.TryLock(); } + void unlock() ABSL_UNLOCK_FUNCTION() override { mutex_.Unlock(); } + +private: + absl::Mutex mutex_; +}; + +SpdLoggerSharedPtr FancyContext::getFancyLogEntry(std::string key) + ABSL_LOCKS_EXCLUDED(fancy_log_lock_) { + absl::ReaderMutexLock l(&fancy_log_lock_); + return fancy_log_map_->find(key)->second; +} + +void FancyContext::initFancyLogger(std::string key, std::atomic& logger) + ABSL_LOCKS_EXCLUDED(fancy_log_lock_) { + absl::WriterMutexLock l(&fancy_log_lock_); + auto it = fancy_log_map_->find(key); + spdlog::logger* target; + if (it == fancy_log_map_->end()) { + target = createLogger(key); + } else { + target = it->second.get(); + } + logger.store(target); +} + +bool FancyContext::setFancyLogger(std::string key, level_enum log_level) + ABSL_LOCKS_EXCLUDED(fancy_log_lock_) { + absl::ReaderMutexLock l(&fancy_log_lock_); + auto it = fancy_log_map_->find(key); + if (it != fancy_log_map_->end()) { + it->second->set_level(log_level); + return true; + } + return false; +} + +void FancyContext::setDefaultFancyLevelFormat(spdlog::level::level_enum level, std::string format) + ABSL_LOCKS_EXCLUDED(fancy_log_lock_) { + if (level == Logger::Context::getFancyDefaultLevel() && + format == Logger::Context::getFancyLogFormat()) { + return; + } + absl::ReaderMutexLock l(&fancy_log_lock_); + for (const auto& it : *fancy_log_map_) { + if (it.second->level() == Logger::Context::getFancyDefaultLevel()) { + // if logger is default level now + it.second->set_level(level); + } + it.second->set_pattern(format); + } +} + +void FancyContext::initSink() { + spdlog::sink_ptr sink = Logger::Registry::getSink(); + Logger::DelegatingLogSinkSharedPtr sp = std::static_pointer_cast(sink); + if (!sp->hasLock()) { + static FancyBasicLockable tlock; + sp->setLock(tlock); + sp->setShouldEscape(false); + } +} + +spdlog::logger* FancyContext::createLogger(std::string key, int level) + ABSL_EXCLUSIVE_LOCKS_REQUIRED(fancy_log_lock_) { + SpdLoggerSharedPtr new_logger = + std::make_shared(key, Logger::Registry::getSink()); + if (!Logger::Registry::getSink()->hasLock()) { // occurs in benchmark test + initSink(); + } + level_enum lv = Logger::Context::getFancyDefaultLevel(); + if (level > -1) { + lv = static_cast(level); + } + new_logger->set_level(lv); + new_logger->set_pattern(Logger::Context::getFancyLogFormat()); + new_logger->flush_on(level_enum::critical); + fancy_log_map_->insert(std::make_pair(key, new_logger)); + return new_logger.get(); +} + +FancyContext& getFancyContext() { MUTABLE_CONSTRUCT_ON_FIRST_USE(FancyContext); } + +} // namespace Envoy diff --git a/source/common/common/fancy_logger.h b/source/common/common/fancy_logger.h new file mode 100644 index 0000000000000..dee92922d029f --- /dev/null +++ b/source/common/common/fancy_logger.h @@ -0,0 +1,116 @@ +#pragma once + +#include + +#include "common/common/macros.h" + +#include "absl/container/flat_hash_map.h" +#include "absl/synchronization/mutex.h" +#include "spdlog/spdlog.h" + +namespace Envoy { + +using SpdLoggerSharedPtr = std::shared_ptr; +using FancyMap = absl::flat_hash_map; +using FancyMapPtr = std::shared_ptr; + +/** + * Stores the lock and functions used by Fancy Logger's macro so that we don't need to declare + * them globally. Functions are provided to initialize a logger, set log level, flush a logger. + */ +class FancyContext { +public: + /** + * Gets a logger from map given a key (e.g. file name). + */ + SpdLoggerSharedPtr getFancyLogEntry(std::string key) ABSL_LOCKS_EXCLUDED(fancy_log_lock_); + + /** + * Initializes Fancy Logger and register it in global map if not done. + */ + void initFancyLogger(std::string key, std::atomic& logger) + ABSL_LOCKS_EXCLUDED(fancy_log_lock_); + + /** + * Sets log level. If not found, return false. + */ + bool setFancyLogger(std::string key, spdlog::level::level_enum log_level) + ABSL_LOCKS_EXCLUDED(fancy_log_lock_); + + /** + * Sets the default logger level and format when updating context. + */ + void setDefaultFancyLevelFormat(spdlog::level::level_enum level, std::string format) + ABSL_LOCKS_EXCLUDED(fancy_log_lock_); + +private: + /** + * Initializes sink for the initialization of loggers, needed only in benchmark test. + */ + void initSink(); + + /** + * Creates a logger given key and log level, and add it to map. + * Key is the log component name, e.g. file name now. + */ + spdlog::logger* createLogger(std::string key, int level = -1) + ABSL_EXCLUSIVE_LOCKS_REQUIRED(fancy_log_lock_); + + /** + * Lock for the following map (not for the corresponding loggers). + */ + absl::Mutex fancy_log_lock_; + + /** + * Map that stores pairs, key can be the file name. + */ + FancyMapPtr fancy_log_map_ ABSL_GUARDED_BY(fancy_log_lock_) = std::make_shared(); +}; + +FancyContext& getFancyContext(); + +#define FANCY_KEY std::string(__FILE__) + +/** + * Macro for fancy logger. + * Uses a global map to store logger and take use of thread-safe spdlog::logger. + * The local pointer is used to avoid another load() when logging. Here we use + * spdlog::logger* as atomic is a C++20 feature. + */ +#define FANCY_LOG(LEVEL, ...) \ + do { \ + static std::atomic flogger{0}; \ + spdlog::logger* local_flogger = flogger.load(std::memory_order_relaxed); \ + if (!local_flogger) { \ + getFancyContext().initFancyLogger(FANCY_KEY, flogger); \ + local_flogger = flogger.load(std::memory_order_relaxed); \ + } \ + local_flogger->log(spdlog::source_loc{__FILE__, __LINE__, __func__}, \ + ENVOY_SPDLOG_LEVEL(LEVEL), __VA_ARGS__); \ + } while (0) + +/** + * Convenient macro for connection log. + */ +#define FANCY_CONN_LOG(LEVEL, FORMAT, CONNECTION, ...) \ + FANCY_LOG(LEVEL, "[C{}] " FORMAT, (CONNECTION).id(), ##__VA_ARGS__) + +/** + * Convenient macro for stream log. + */ +#define FANCY_STREAM_LOG(LEVEL, FORMAT, STREAM, ...) \ + FANCY_LOG(LEVEL, "[C{}][S{}] " FORMAT, (STREAM).connection() ? (STREAM).connection()->id() : 0, \ + (STREAM).streamId(), ##__VA_ARGS__) + +/** + * Convenient macro for log flush. + */ +#define FANCY_FLUSH_LOG() \ + do { \ + SpdLoggerSharedPtr p = getFancyContext().getFancyLogEntry(FANCY_KEY); \ + if (p) { \ + p->flush(); \ + } \ + } while (0) + +} // namespace Envoy diff --git a/source/common/common/hash.cc b/source/common/common/hash.cc index 76fcf9a1df59d..c39ec996bf836 100644 --- a/source/common/common/hash.cc +++ b/source/common/common/hash.cc @@ -8,7 +8,7 @@ namespace Envoy { // platforms are needed. // from // (https://gcc.gnu.org/git/?p=gcc.git;a=blob_plain;f=libstdc%2b%2b-v3/libsupc%2b%2b/hash_bytes.cc) -uint64_t MurmurHash::murmurHash2_64(absl::string_view key, uint64_t seed) { +uint64_t MurmurHash::murmurHash2(absl::string_view key, uint64_t seed) { static const uint64_t mul = 0xc6a4a7935bd1e995UL; const char* const buf = static_cast(key.data()); uint64_t len = key.size(); @@ -19,18 +19,18 @@ uint64_t MurmurHash::murmurHash2_64(absl::string_view key, uint64_t seed) { const char* const end = buf + len_aligned; uint64_t hash = seed ^ (len * mul); for (const char* p = buf; p != end; p += 8) { - const uint64_t data = shift_mix(unaligned_load(p) * mul) * mul; + const uint64_t data = shiftMix(unalignedLoad(p) * mul) * mul; hash ^= data; hash *= mul; } if ((len & 0x7) != 0) { - const uint64_t data = load_bytes(end, len & 0x7); + const uint64_t data = loadBytes(end, len & 0x7); hash ^= data; hash *= mul; } - hash = shift_mix(hash) * mul; - hash = shift_mix(hash); + hash = shiftMix(hash) * mul; + hash = shiftMix(hash); return hash; } diff --git a/source/common/common/hash.h b/source/common/common/hash.h index 29c0072742082..c29b9effa89d2 100644 --- a/source/common/common/hash.h +++ b/source/common/common/hash.h @@ -1,8 +1,6 @@ #pragma once #include -#include -#include #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" @@ -55,17 +53,17 @@ class MurmurHash { * @param seed the seed to use for the hash * @return 64-bit hash representation of the supplied string view */ - static uint64_t murmurHash2_64(absl::string_view key, uint64_t seed = STD_HASH_SEED); + static uint64_t murmurHash2(absl::string_view key, uint64_t seed = STD_HASH_SEED); private: - static inline uint64_t unaligned_load(const char* p) { + static inline uint64_t unalignedLoad(const char* p) { uint64_t result; memcpy(&result, p, sizeof(result)); return result; } // Loads n bytes, where 1 <= n < 8. - static inline uint64_t load_bytes(const char* p, int n) { + static inline uint64_t loadBytes(const char* p, int n) { uint64_t result = 0; --n; do { @@ -74,23 +72,9 @@ class MurmurHash { return result; } - static inline uint64_t shift_mix(uint64_t v) { return v ^ (v >> 47); } + static inline uint64_t shiftMix(uint64_t v) { return v ^ (v >> 47); } }; -struct ConstCharStarHash { - size_t operator()(const char* a) const { return HashUtil::xxHash64(a); } -}; - -struct ConstCharStarEqual { - size_t operator()(const char* a, const char* b) const { return strcmp(a, b) == 0; } -}; - -template -using ConstCharStarHashMap = - absl::flat_hash_map; -using ConstCharStarHashSet = - absl::flat_hash_set; - using SharedString = std::shared_ptr; struct HeterogeneousStringHash { diff --git a/source/common/common/hex.cc b/source/common/common/hex.cc index 1fc6b603133f6..7c0b7f8c2e132 100644 --- a/source/common/common/hex.cc +++ b/source/common/common/hex.cc @@ -73,4 +73,15 @@ std::string Hex::uint32ToHex(uint32_t value) { return encode(data.data(), data.size()); } + +std::string Hex::uint16ToHex(uint16_t value) { + std::array data; + + // This is explicitly done for performance reasons + // using std::stringstream with std::hex is ~3 orders of magnitude slower. + data[1] = (value & 0x00FF); + data[0] = (value & 0xFF00) >> 8; + + return encode(data.data(), data.size()); +} } // namespace Envoy diff --git a/source/common/common/hex.h b/source/common/common/hex.h index d72fa77a7c953..aba722a4fc06a 100644 --- a/source/common/common/hex.h +++ b/source/common/common/hex.h @@ -16,7 +16,7 @@ class Hex final { * @return the hex encoded string representing data */ static std::string encode(const std::vector& data) { - return encode(&data[0], data.size()); + return encode(data.data(), data.size()); } /** @@ -49,5 +49,13 @@ class Hex final { * @return value as hexadecimal string */ static std::string uint32ToHex(uint32_t value); + + /** + * Converts the given 16-bit unsigned integer into a hexadecimal string. + * The result is always a string of 4 characters left padded with zeroes. + * @param value The unsigned integer to be converted. + * @return value as hexadecimal string + */ + static std::string uint16ToHex(uint16_t value); }; } // namespace Envoy diff --git a/source/common/common/linked_object.h b/source/common/common/linked_object.h index 13fc6d491567b..9c65e085b76f7 100644 --- a/source/common/common/linked_object.h +++ b/source/common/common/linked_object.h @@ -6,6 +6,40 @@ #include "common/common/assert.h" namespace Envoy { + +/** + * Helper methods for placing LinkedObject into a list. + */ +namespace LinkedList { + +/** + * Move an item into a linked list at the front. + * @param item supplies the item to move in. + * @param list supplies the list to move the item into. + */ +template +void moveIntoList(std::unique_ptr&& item, std::list>& list) { + ASSERT(!item->inserted_); + item->inserted_ = true; + auto position = list.emplace(list.begin(), std::move(item)); + (*position)->entry_ = position; +} + +/** + * Move an item into a linked list at the back. + * @param item supplies the item to move in. + * @param list supplies the list to move the item into. + */ +template +void moveIntoListBack(std::unique_ptr&& item, std::list>& list) { + ASSERT(!item->inserted_); + item->inserted_ = true; + auto position = list.emplace(list.end(), std::move(item)); + (*position)->entry_ = position; +} + +} // namespace LinkedList + /** * Mixin class that allows an object contained in a unique pointer to be easily linked and unlinked * from lists. @@ -39,28 +73,6 @@ template class LinkedObject { dst.splice(dst.begin(), src, entry_); } - /** - * Move an item into a linked list at the front. - * @param item supplies the item to move in. - * @param list supplies the list to move the item into. - */ - void moveIntoList(std::unique_ptr&& item, ListType& list) { - ASSERT(!inserted_); - inserted_ = true; - entry_ = list.emplace(list.begin(), std::move(item)); - } - - /** - * Move an item into a linked list at the back. - * @param item supplies the item to move in. - * @param list supplies the list to move the item into. - */ - void moveIntoListBack(std::unique_ptr&& item, ListType& list) { - ASSERT(!inserted_); - inserted_ = true; - entry_ = list.emplace(list.end(), std::move(item)); - } - /** * Remove this item from a list. * @param list supplies the list to remove from. This item should be in this list. @@ -79,6 +91,11 @@ template class LinkedObject { LinkedObject() = default; private: + template + friend void LinkedList::moveIntoList(std::unique_ptr&&, std::list>&); + template + friend void LinkedList::moveIntoListBack(std::unique_ptr&&, std::list>&); + typename ListType::iterator entry_; bool inserted_{false}; // iterators do not have any "invalid" value so we need this boolean for // sanity checking. diff --git a/source/common/common/lock_guard.h b/source/common/common/lock_guard.h index d3025a333999e..f1524469067dc 100644 --- a/source/common/common/lock_guard.h +++ b/source/common/common/lock_guard.h @@ -39,7 +39,7 @@ class ABSL_SCOPED_LOCKABLE OptionalLockGuard { // At the moment, TryLockGuard is very hard to annotate correctly, I // believe due to limitations in clang. At the moment there are no -// GUARDED_BY variables for any tryLocks in the codebase, so it's +// ABSL_GUARDED_BY variables for any tryLocks in the codebase, so it's // easiest just to leave it out. In a future clang release it's // possible we can enable this. See also the commented-out block // in ThreadTest.TestTryLockGuard in test/common/common/thread_test.cc. @@ -64,7 +64,7 @@ class ABSL_SCOPED_LOCKABLE TryLockGuard { /** * Destruction of the TryLockGuard unlocks the lock, if it was locked. */ - ~TryLockGuard() DISABLE_TRYLOCKGUARD_ANNOTATION(UNLOCK_FUNCTION()) { + ~TryLockGuard() DISABLE_TRYLOCKGUARD_ANNOTATION(ABSL_UNLOCK_FUNCTION()) { if (is_locked_) { lock_.unlock(); } @@ -73,7 +73,7 @@ class ABSL_SCOPED_LOCKABLE TryLockGuard { /** * @return bool whether the lock was successfully acquired. */ - bool tryLock() DISABLE_TRYLOCKGUARD_ANNOTATION(EXCLUSIVE_TRYLOCK_FUNCTION(true)) { + bool tryLock() DISABLE_TRYLOCKGUARD_ANNOTATION(ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true)) { is_locked_ = lock_.tryLock(); return is_locked_; } diff --git a/source/common/common/logger.cc b/source/common/common/logger.cc index 3816c0f61ca79..5304848762027 100644 --- a/source/common/common/logger.cc +++ b/source/common/common/logger.cc @@ -19,18 +19,34 @@ namespace Logger { StandardLogger::StandardLogger(const std::string& name) : Logger(std::make_shared(name, Registry::getSink())) {} -SinkDelegate::SinkDelegate(DelegatingLogSinkSharedPtr log_sink) - : previous_delegate_(log_sink->delegate()), log_sink_(log_sink) { - log_sink->setDelegate(this); -} +SinkDelegate::SinkDelegate(DelegatingLogSinkSharedPtr log_sink) : log_sink_(log_sink) {} SinkDelegate::~SinkDelegate() { - assert(log_sink_->delegate() == this); // Ensures stacked allocation of delegates. + // The previous delegate should have never been set or should have been reset by now via + // restoreDelegate(); + assert(previous_delegate_ == nullptr); +} + +void SinkDelegate::setDelegate() { + // There should be no previous delegate before this call. + assert(previous_delegate_ == nullptr); + previous_delegate_ = log_sink_->delegate(); + log_sink_->setDelegate(this); +} + +void SinkDelegate::restoreDelegate() { + // Ensures stacked allocation of delegates. + assert(log_sink_->delegate() == this); log_sink_->setDelegate(previous_delegate_); + previous_delegate_ = nullptr; } StderrSinkDelegate::StderrSinkDelegate(DelegatingLogSinkSharedPtr log_sink) - : SinkDelegate(log_sink) {} + : SinkDelegate(log_sink) { + setDelegate(); +} + +StderrSinkDelegate::~StderrSinkDelegate() { restoreDelegate(); } void StderrSinkDelegate::log(absl::string_view msg) { Thread::OptionalLockGuard guard(lock_); @@ -60,6 +76,13 @@ void DelegatingLogSink::log(const spdlog::details::log_msg& msg) { } lock.Release(); + // Hold the sink mutex while performing the actual logging. This prevents the sink from being + // swapped during an individual log event. + // TODO(mattklein123): In production this lock will never be contended. In practice, thread + // protection is really only needed in tests. It would be nice to figure out a test-only + // mechanism for this that does not require extra locking that we don't explicitly need in the + // prod code. + absl::ReaderMutexLock sink_lock(&sink_mutex_); if (should_escape_) { sink_->log(escapeLogLine(msg_view)); } else { @@ -103,11 +126,30 @@ Context::~Context() { } } -void Context::activate() { +void Context::activate(LoggerMode mode) { Registry::getSink()->setLock(lock_); - Registry::getSink()->set_should_escape(should_escape_); + Registry::getSink()->setShouldEscape(should_escape_); Registry::setLogLevel(log_level_); Registry::setLogFormat(log_format_); + + if (mode == LoggerMode::Fancy) { + fancy_default_level_ = log_level_; + fancy_log_format_ = log_format_; + } +} + +std::string Context::getFancyLogFormat() { + if (!current_context) { // Context is not instantiated in benchmark test + return "[%Y-%m-%d %T.%e][%t][%l][%n] %v"; + } + return current_context->fancy_log_format_; +} + +spdlog::level::level_enum Context::getFancyDefaultLevel() { + if (!current_context) { + return spdlog::level::info; + } + return current_context->fancy_default_level_; } std::vector& Registry::allLoggers() { diff --git a/source/common/common/logger.h b/source/common/common/logger.h index 30b44628076dc..5c2ed08f497c4 100644 --- a/source/common/common/logger.h +++ b/source/common/common/logger.h @@ -13,6 +13,7 @@ #include "common/common/macros.h" #include "common/common/non_copyable.h" +#include "absl/container/flat_hash_map.h" #include "absl/strings/string_view.h" #include "absl/synchronization/mutex.h" #include "fmt/ostream.h" @@ -34,6 +35,9 @@ namespace Logger { FUNCTION(conn_handler) \ FUNCTION(decompression) \ FUNCTION(dubbo) \ + FUNCTION(envoy_bug) \ + FUNCTION(ext_authz) \ + FUNCTION(rocketmq) \ FUNCTION(file) \ FUNCTION(filter) \ FUNCTION(forward_proxy) \ @@ -101,10 +105,21 @@ class SinkDelegate : NonCopyable { virtual void flush() PURE; protected: - SinkDelegate* previous_delegate() { return previous_delegate_; } + // Swap the current log sink delegate for this one. This should be called by the derived class + // constructor immediately before returning. This is required to match restoreDelegate(), + // otherwise it's possible for the previous delegate to get set in the base class constructor, + // the derived class constructor throws, and cleanup becomes broken. + void setDelegate(); + + // Swap the current log sink (this) for the previous one. This should be called by the derived + // class destructor in the body. This is critical as otherwise it's possible for a log message + // to get routed to a partially destructed sink. + void restoreDelegate(); + + SinkDelegate* previousDelegate() { return previous_delegate_; } private: - SinkDelegate* previous_delegate_; + SinkDelegate* previous_delegate_{nullptr}; DelegatingLogSinkSharedPtr log_sink_; }; @@ -114,6 +129,7 @@ class SinkDelegate : NonCopyable { class StderrSinkDelegate : public SinkDelegate { public: explicit StderrSinkDelegate(DelegatingLogSinkSharedPtr log_sink); + ~StderrSinkDelegate() override; // SinkDelegate void log(absl::string_view msg) override; @@ -122,7 +138,6 @@ class StderrSinkDelegate : public SinkDelegate { bool hasLock() const { return lock_ != nullptr; } void setLock(Thread::BasicLockable& lock) { lock_ = &lock; } void clearLock() { lock_ = nullptr; } - Thread::BasicLockable* lock() { return lock_; } private: Thread::BasicLockable* lock_{}; @@ -139,12 +154,15 @@ class DelegatingLogSink : public spdlog::sinks::sink { // spdlog::sinks::sink void log(const spdlog::details::log_msg& msg) override; - void flush() override { sink_->flush(); } + void flush() override { + absl::ReaderMutexLock lock(&sink_mutex_); + sink_->flush(); + } void set_pattern(const std::string& pattern) override { set_formatter(spdlog::details::make_unique(pattern)); } void set_formatter(std::unique_ptr formatter) override; - void set_should_escape(bool should_escape) { should_escape_ = should_escape; } + void setShouldEscape(bool should_escape) { should_escape_ = should_escape; } /** * @return bool whether a lock has been established. @@ -178,16 +196,25 @@ class DelegatingLogSink : public spdlog::sinks::sink { DelegatingLogSink() = default; - void setDelegate(SinkDelegate* sink) { sink_ = sink; } - SinkDelegate* delegate() { return sink_; } + void setDelegate(SinkDelegate* sink) { + absl::WriterMutexLock lock(&sink_mutex_); + sink_ = sink; + } + SinkDelegate* delegate() { + absl::ReaderMutexLock lock(&sink_mutex_); + return sink_; + } - SinkDelegate* sink_{nullptr}; + SinkDelegate* sink_ ABSL_GUARDED_BY(sink_mutex_){nullptr}; + absl::Mutex sink_mutex_; std::unique_ptr stderr_sink_; // Builtin sink to use as a last resort. std::unique_ptr formatter_ ABSL_GUARDED_BY(format_mutex_); - absl::Mutex format_mutex_; // direct absl reference to break build cycle. + absl::Mutex format_mutex_; bool should_escape_{false}; }; +enum class LoggerMode { Envoy, Fancy }; + /** * Defines a scope for the logging system with the specified lock and log level. * This is equivalent to setLogLevel, setLogFormat, and setLock, which can be @@ -205,14 +232,20 @@ class Context { Thread::BasicLockable& lock, bool should_escape); ~Context(); + static std::string getFancyLogFormat(); + static spdlog::level::level_enum getFancyDefaultLevel(); + private: - void activate(); + void activate(LoggerMode mode = LoggerMode::Envoy); const spdlog::level::level_enum log_level_; const std::string log_format_; Thread::BasicLockable& lock_; bool should_escape_; Context* const save_context_; + + std::string fancy_log_format_ = "[%Y-%m-%d %T.%e][%t][%l][%n] %v"; + spdlog::level::level_enum fancy_default_level_ = spdlog::level::info; }; /** diff --git a/source/common/common/logger_delegates.cc b/source/common/common/logger_delegates.cc index 31685fd2671f2..2fedc7838edb0 100644 --- a/source/common/common/logger_delegates.cc +++ b/source/common/common/logger_delegates.cc @@ -13,7 +13,11 @@ namespace Logger { FileSinkDelegate::FileSinkDelegate(const std::string& log_path, AccessLog::AccessLogManager& log_manager, DelegatingLogSinkSharedPtr log_sink) - : SinkDelegate(log_sink), log_file_(log_manager.createAccessLog(log_path)) {} + : SinkDelegate(log_sink), log_file_(log_manager.createAccessLog(log_path)) { + setDelegate(); +} + +FileSinkDelegate::~FileSinkDelegate() { restoreDelegate(); } void FileSinkDelegate::log(absl::string_view msg) { // Log files have internal locking to ensure serial, non-interleaved diff --git a/source/common/common/logger_delegates.h b/source/common/common/logger_delegates.h index 504855d58f442..f6058fcd507fa 100644 --- a/source/common/common/logger_delegates.h +++ b/source/common/common/logger_delegates.h @@ -14,9 +14,6 @@ namespace Envoy { namespace Logger { -class DelegatingLogSink; -using DelegatingLogSinkSharedPtr = std::shared_ptr; - /** * SinkDelegate that writes log messages to a file. */ @@ -24,6 +21,7 @@ class FileSinkDelegate : public SinkDelegate { public: FileSinkDelegate(const std::string& log_path, AccessLog::AccessLogManager& log_manager, DelegatingLogSinkSharedPtr log_sink); + ~FileSinkDelegate() override; // SinkDelegate void log(absl::string_view msg) override; diff --git a/source/common/common/macros.h b/source/common/common/macros.h index 386f54d384c68..f2b06b84f340b 100644 --- a/source/common/common/macros.h +++ b/source/common/common/macros.h @@ -54,4 +54,7 @@ namespace Envoy { #define FALLTHRU #endif +#if (defined(__GNUC__) && !defined(__clang__)) +#define GCC_COMPILER +#endif } // namespace Envoy diff --git a/source/common/common/perf_annotation.cc b/source/common/common/perf_annotation.cc index 4c1f08379f0ab..daa39ff0ca31a 100644 --- a/source/common/common/perf_annotation.cc +++ b/source/common/common/perf_annotation.cc @@ -34,7 +34,7 @@ PerfAnnotationContext::PerfAnnotationContext() = default; void PerfAnnotationContext::record(std::chrono::nanoseconds duration, absl::string_view category, absl::string_view description) { - CategoryDescription key((std::string(category)), (std::string(description))); + CategoryDescription key = {std::string(category), std::string(description)}; { #if PERF_THREAD_SAFE Thread::LockGuard lock(mutex_); @@ -112,8 +112,8 @@ std::string PerfAnnotationContext::toString() { columns[4].push_back(nanoseconds_string(stats.min_)); columns[5].push_back(nanoseconds_string(stats.max_)); const CategoryDescription& category_description = p->first; - columns[6].push_back(category_description.first); - columns[7].push_back(category_description.second); + columns[6].push_back(category_description.category); + columns[7].push_back(category_description.description); for (size_t i = 0; i < num_columns; ++i) { widths[i] = std::max(widths[i], columns[i].back().size()); } diff --git a/source/common/common/perf_annotation.h b/source/common/common/perf_annotation.h index e21b012d0a75f..7187244322d08 100644 --- a/source/common/common/perf_annotation.h +++ b/source/common/common/perf_annotation.h @@ -4,11 +4,11 @@ #include #include -#include #include "common/common/thread.h" #include "common/common/utility.h" +#include "absl/container/node_hash_map.h" #include "absl/strings/string_view.h" // Performance Annotation system, enabled with @@ -117,7 +117,14 @@ class PerfAnnotationContext { */ PerfAnnotationContext(); - using CategoryDescription = std::pair; + struct CategoryDescription { + std::string category; + std::string description; + + bool operator==(const CategoryDescription& other) const { + return category == other.category && description == other.description; + } + }; struct DurationStats { std::chrono::nanoseconds total_{0}; @@ -128,11 +135,11 @@ class PerfAnnotationContext { struct Hash { size_t operator()(const CategoryDescription& a) const { - return std::hash()(a.first) + 13 * std::hash()(a.second); + return std::hash()(a.category) + 13 * std::hash()(a.description); } }; - using DurationStatsMap = std::unordered_map; + using DurationStatsMap = absl::node_hash_map; // Maps {category, description} to DurationStats. #if PERF_THREAD_SAFE diff --git a/source/common/common/posix/thread_impl.cc b/source/common/common/posix/thread_impl.cc index 324230ade176b..71cbf2b02eb6b 100644 --- a/source/common/common/posix/thread_impl.cc +++ b/source/common/common/posix/thread_impl.cc @@ -1,6 +1,8 @@ #include "common/common/assert.h" #include "common/common/thread_impl.h" +#include "absl/strings/str_cat.h" + #if defined(__linux__) #include #endif @@ -24,26 +26,99 @@ int64_t getCurrentThreadId() { } // namespace -ThreadImplPosix::ThreadImplPosix(std::function thread_routine) - : thread_routine_(std::move(thread_routine)) { - RELEASE_ASSERT(Logger::Registry::initialized(), ""); - const int rc = pthread_create( - &thread_handle_, nullptr, - [](void* arg) -> void* { - static_cast(arg)->thread_routine_(); - return nullptr; - }, - this); - RELEASE_ASSERT(rc == 0, ""); -} +// See https://www.man7.org/linux/man-pages/man3/pthread_setname_np.3.html. +// The maximum thread name is 16 bytes including the terminating nul byte, +// so we need to truncate the string_view to 15 bytes. +#define PTHREAD_MAX_THREADNAME_LEN_INCLUDING_NULL_BYTE 16 -void ThreadImplPosix::join() { - const int rc = pthread_join(thread_handle_, nullptr); - RELEASE_ASSERT(rc == 0, ""); -} +/** + * Wrapper for a pthread thread. We don't use std::thread because it eats exceptions and leads to + * unusable stack traces. + */ +class ThreadImplPosix : public Thread { +public: + ThreadImplPosix(std::function thread_routine, OptionsOptConstRef options) + : thread_routine_(std::move(thread_routine)) { + if (options) { + name_ = options->name_.substr(0, PTHREAD_MAX_THREADNAME_LEN_INCLUDING_NULL_BYTE - 1); + } + RELEASE_ASSERT(Logger::Registry::initialized(), ""); + const int rc = pthread_create( + &thread_handle_, nullptr, + [](void* arg) -> void* { + static_cast(arg)->thread_routine_(); + return nullptr; + }, + this); + RELEASE_ASSERT(rc == 0, ""); + +#if SUPPORTS_PTHREAD_NAMING + // If the name was not specified, get it from the OS. If the name was + // specified, write it into the thread, and assert that the OS sees it the + // same way. + if (name_.empty()) { + getNameFromOS(name_); + } else { + const int set_name_rc = pthread_setname_np(thread_handle_, name_.c_str()); + if (set_name_rc != 0) { + ENVOY_LOG_MISC(trace, "Error {} setting name `{}'", set_name_rc, name_); + } else { + // When compiling in debug mode, read back the thread-name from the OS, + // and verify it's what we asked for. This ensures the truncation is as + // expected, and that the OS will actually retain all the bytes of the + // name we expect. + // + // Note that the system-call to read the thread name may fail in case + // the thread exits after the call to set the name above, and before the + // call to get the name, so we can only do the assert if that call + // succeeded. + std::string check_name; + ASSERT(!getNameFromOS(check_name) || check_name == name_, + absl::StrCat("configured name=", name_, " os name=", check_name)); + } + } +#endif + } + + ~ThreadImplPosix() override { ASSERT(joined_); } + + std::string name() const override { return name_; } + + // Thread::Thread + void join() override { + ASSERT(!joined_); + joined_ = true; + const int rc = pthread_join(thread_handle_, nullptr); + RELEASE_ASSERT(rc == 0, ""); + } + +private: +#if SUPPORTS_PTHREAD_NAMING + // Attempts to get the name from the operating system, returning true and + // updating 'name' if successful. Note that during normal operation this + // may fail, if the thread exits prior to the system call. + bool getNameFromOS(std::string& name) { + // Verify that the name got written into the thread as expected. + char buf[PTHREAD_MAX_THREADNAME_LEN_INCLUDING_NULL_BYTE]; + const int get_name_rc = pthread_getname_np(thread_handle_, buf, sizeof(buf)); + if (get_name_rc != 0) { + ENVOY_LOG_MISC(trace, "Error {} getting name", get_name_rc); + return false; + } + name = buf; + return true; + } +#endif + + std::function thread_routine_; + pthread_t thread_handle_; + std::string name_; + bool joined_{false}; +}; -ThreadPtr ThreadFactoryImplPosix::createThread(std::function thread_routine) { - return std::make_unique(thread_routine); +ThreadPtr ThreadFactoryImplPosix::createThread(std::function thread_routine, + OptionsOptConstRef options) { + return std::make_unique(thread_routine, options); } ThreadId ThreadFactoryImplPosix::currentThreadId() { return ThreadId(getCurrentThreadId()); } diff --git a/source/common/common/posix/thread_impl.h b/source/common/common/posix/thread_impl.h index 81c81d3be3fc5..9b373ecaceb60 100644 --- a/source/common/common/posix/thread_impl.h +++ b/source/common/common/posix/thread_impl.h @@ -9,29 +9,13 @@ namespace Envoy { namespace Thread { -/** - * Wrapper for a pthread thread. We don't use std::thread because it eats exceptions and leads to - * unusable stack traces. - */ -class ThreadImplPosix : public Thread { -public: - ThreadImplPosix(std::function thread_routine); - - // Thread::Thread - void join() override; - -private: - std::function thread_routine_; - pthread_t thread_handle_; -}; - /** * Implementation of ThreadFactory */ class ThreadFactoryImplPosix : public ThreadFactory { public: // Thread::ThreadFactory - ThreadPtr createThread(std::function thread_routine) override; + ThreadPtr createThread(std::function thread_routine, OptionsOptConstRef options) override; ThreadId currentThreadId() override; }; diff --git a/source/common/common/random_generator.cc b/source/common/common/random_generator.cc new file mode 100644 index 0000000000000..69eabc5e6c078 --- /dev/null +++ b/source/common/common/random_generator.cc @@ -0,0 +1,135 @@ +#include "common/common/random_generator.h" + +#include "common/common/assert.h" + +#include "openssl/rand.h" + +namespace Envoy { +namespace Random { + +const size_t RandomGeneratorImpl::UUID_LENGTH = 36; + +uint64_t RandomGeneratorImpl::random() { + // Prefetch 256 * sizeof(uint64_t) bytes of randomness. buffered_idx is initialized to 256, + // i.e. out-of-range value, so the buffer will be filled with randomness on the first call + // to this function. + // + // There is a diminishing return when increasing the prefetch size, as illustrated below in + // a test that generates 1,000,000,000 uint64_t numbers (results on Intel Xeon E5-1650v3). + // + // //test/common/runtime:runtime_impl_test - Random.DISABLED_benchmarkRandom + // + // prefetch | time | improvement + // (uint64_t) | (ms) | (% vs prev) + // --------------------------------- + // 32 | 25,931 | + // 64 | 15,124 | 42% faster + // 128 | 9,653 | 36% faster + // 256 | 6,930 | 28% faster <-- used right now + // 512 | 5,571 | 20% faster + // 1024 | 4,888 | 12% faster + // 2048 | 4,594 | 6% faster + // 4096 | 4,424 | 4% faster + // 8192 | 4,386 | 1% faster + + const size_t prefetch = 256; + static thread_local uint64_t buffered[prefetch]; + static thread_local size_t buffered_idx = prefetch; + + if (buffered_idx >= prefetch) { + int rc = RAND_bytes(reinterpret_cast(buffered), sizeof(buffered)); + ASSERT(rc == 1); + buffered_idx = 0; + } + + // Consume uint64_t from the buffer. + return buffered[buffered_idx++]; +} + +std::string RandomGeneratorImpl::uuid() { + // Prefetch 2048 bytes of randomness. buffered_idx is initialized to sizeof(buffered), + // i.e. out-of-range value, so the buffer will be filled with randomness on the first + // call to this function. + // + // There is a diminishing return when increasing the prefetch size, as illustrated below + // in a test that generates 100,000,000 UUIDs (results on Intel Xeon E5-1650v3). + // + // //test/common/runtime:uuid_util_test - UUIDUtilsTest.DISABLED_benchmark + // + // prefetch | time | improvement + // (bytes) | (ms) | (% vs prev) + // --------------------------------- + // 128 | 16,353 | + // 256 | 11,827 | 28% faster + // 512 | 9,676 | 18% faster + // 1024 | 8,594 | 11% faster + // 2048 | 8,097 | 6% faster <-- used right now + // 4096 | 7,790 | 4% faster + // 8192 | 7,737 | 1% faster + + static thread_local uint8_t buffered[2048]; + static thread_local size_t buffered_idx = sizeof(buffered); + + if (buffered_idx + 16 > sizeof(buffered)) { + int rc = RAND_bytes(buffered, sizeof(buffered)); + ASSERT(rc == 1); + buffered_idx = 0; + } + + // Consume 16 bytes from the buffer. + ASSERT(buffered_idx + 16 <= sizeof(buffered)); + uint8_t* rand = &buffered[buffered_idx]; + buffered_idx += 16; + + // Create UUID from Truly Random or Pseudo-Random Numbers. + // See: https://tools.ietf.org/html/rfc4122#section-4.4 + rand[6] = (rand[6] & 0x0f) | 0x40; // UUID version 4 (random) + rand[8] = (rand[8] & 0x3f) | 0x80; // UUID variant 1 (RFC4122) + + // Convert UUID to a string representation, e.g. a121e9e1-feae-4136-9e0e-6fac343d56c9. + static const char* const hex = "0123456789abcdef"; + char uuid[UUID_LENGTH]; + + for (uint8_t i = 0; i < 4; i++) { + const uint8_t d = rand[i]; + uuid[2 * i] = hex[d >> 4]; + uuid[2 * i + 1] = hex[d & 0x0f]; + } + + uuid[8] = '-'; + + for (uint8_t i = 4; i < 6; i++) { + const uint8_t d = rand[i]; + uuid[2 * i + 1] = hex[d >> 4]; + uuid[2 * i + 2] = hex[d & 0x0f]; + } + + uuid[13] = '-'; + + for (uint8_t i = 6; i < 8; i++) { + const uint8_t d = rand[i]; + uuid[2 * i + 2] = hex[d >> 4]; + uuid[2 * i + 3] = hex[d & 0x0f]; + } + + uuid[18] = '-'; + + for (uint8_t i = 8; i < 10; i++) { + const uint8_t d = rand[i]; + uuid[2 * i + 3] = hex[d >> 4]; + uuid[2 * i + 4] = hex[d & 0x0f]; + } + + uuid[23] = '-'; + + for (uint8_t i = 10; i < 16; i++) { + const uint8_t d = rand[i]; + uuid[2 * i + 4] = hex[d >> 4]; + uuid[2 * i + 5] = hex[d & 0x0f]; + } + + return std::string(uuid, UUID_LENGTH); +} + +} // namespace Random +} // namespace Envoy diff --git a/source/common/common/random_generator.h b/source/common/common/random_generator.h new file mode 100644 index 0000000000000..47c5da6b1f9d2 --- /dev/null +++ b/source/common/common/random_generator.h @@ -0,0 +1,21 @@ +#pragma once + +#include "envoy/common/random_generator.h" + +namespace Envoy { +namespace Random { +/** + * Implementation of RandomGenerator that uses per-thread RANLUX generators seeded with current + * time. + */ +class RandomGeneratorImpl : public RandomGenerator { +public: + // Random::RandomGenerator + uint64_t random() override; + std::string uuid() override; + + static const size_t UUID_LENGTH; +}; + +} // namespace Random +} // namespace Envoy diff --git a/source/common/common/regex.cc b/source/common/common/regex.cc index 5f7faa7d728b2..735a24d25b8d7 100644 --- a/source/common/common/regex.cc +++ b/source/common/common/regex.cc @@ -1,11 +1,13 @@ #include "common/common/regex.h" #include "envoy/common/exception.h" +#include "envoy/runtime/runtime.h" #include "envoy/type/matcher/v3/regex.pb.h" #include "common/common/assert.h" #include "common/common/fmt.h" #include "common/protobuf/utility.h" +#include "common/stats/symbol_table_impl.h" #include "re2/re2.h" @@ -47,12 +49,56 @@ class CompiledGoogleReMatcher : public CompiledMatcher { throw EnvoyException(regex_.error()); } - const uint32_t max_program_size = - PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.google_re2(), max_program_size, 100); - if (static_cast(regex_.ProgramSize()) > max_program_size) { - throw EnvoyException(fmt::format("regex '{}' RE2 program size of {} > max program size of " - "{}. Increase configured max program size if necessary.", - config.regex(), regex_.ProgramSize(), max_program_size)); + const uint32_t regex_program_size = static_cast(regex_.ProgramSize()); + + // Check if the deprecated field max_program_size is set first, and follow the old logic if so. + if (config.google_re2().has_max_program_size()) { + const uint32_t max_program_size = + PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.google_re2(), max_program_size, 100); + if (regex_program_size > max_program_size) { + throw EnvoyException(fmt::format("regex '{}' RE2 program size of {} > max program size of " + "{}. Increase configured max program size if necessary.", + config.regex(), regex_program_size, max_program_size)); + } + return; + } + + Runtime::Loader* runtime = Runtime::LoaderSingleton::getExisting(); + if (runtime) { + Stats::Scope& root_scope = runtime->getRootScope(); + + // TODO(perf): It would be more efficient to create the stats (program size histogram, warning + // counter) on startup and not with each regex match. + Stats::StatNameManagedStorage program_size_stat_name("re2.program_size", + root_scope.symbolTable()); + Stats::Histogram& program_size_stat = root_scope.histogramFromStatName( + program_size_stat_name.statName(), Stats::Histogram::Unit::Unspecified); + program_size_stat.recordValue(regex_program_size); + + Stats::StatNameManagedStorage warn_count_stat_name("re2.exceeded_warn_level", + root_scope.symbolTable()); + Stats::Counter& warn_count = root_scope.counterFromStatName(warn_count_stat_name.statName()); + + const uint32_t max_program_size_error_level = + runtime->snapshot().getInteger("re2.max_program_size.error_level", 100); + if (regex_program_size > max_program_size_error_level) { + throw EnvoyException(fmt::format("regex '{}' RE2 program size of {} > max program size of " + "{} set for the error level threshold. Increase " + "configured max program size if necessary.", + config.regex(), regex_program_size, + max_program_size_error_level)); + } + + const uint32_t max_program_size_warn_level = + runtime->snapshot().getInteger("re2.max_program_size.warn_level", UINT32_MAX); + if (regex_program_size > max_program_size_warn_level) { + warn_count.inc(); + ENVOY_LOG_MISC( + warn, + "regex '{}' RE2 program size of {} > max program size of {} set for the warn " + "level threshold. Increase configured max program size if necessary.", + config.regex(), regex_program_size, max_program_size_warn_level); + } } } diff --git a/source/common/common/thread.h b/source/common/common/thread.h index cfd40c8a4083c..4808d391dfbdc 100644 --- a/source/common/common/thread.h +++ b/source/common/common/thread.h @@ -1,10 +1,14 @@ #pragma once +#include +#include #include #include #include "envoy/thread/thread.h" +#include "common/common/non_copyable.h" + #include "absl/synchronization/mutex.h" namespace Envoy { @@ -60,9 +64,9 @@ class CondVar { * @return WaitStatus whether the condition timed out or not. */ template - WaitStatus waitFor( - MutexBasicLockable& mutex, - std::chrono::duration duration) noexcept ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) { + WaitStatus waitFor(MutexBasicLockable& mutex, + std::chrono::duration duration) noexcept + ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) { return condvar_.WaitWithTimeout(&mutex.mutex_, absl::FromChrono(duration)) ? WaitStatus::Timeout : WaitStatus::NoTimeout; @@ -75,5 +79,94 @@ class CondVar { absl::CondVar condvar_; }; +enum class AtomicPtrAllocMode { DoNotDelete, DeleteOnDestruct }; + +// Manages an array of atomic pointers to T, providing a relatively +// contention-free mechanism to lazily get a T* at an index, where the caller +// provides a mechanism to instantiate a T* under lock, if one has not already +// been stored at that index. +// +// alloc_mode controls whether allocated T* entries should be deleted on +// destruction of the array. This should be set to AtomicPtrAllocMode::DoNotDelete +// if the T* returned from MakeObject are managed by the caller. +template +class AtomicPtrArray : NonCopyable { +public: + AtomicPtrArray() { + for (std::atomic& atomic_ref : data_) { + atomic_ref = nullptr; + } + } + + ~AtomicPtrArray() { + if (alloc_mode == AtomicPtrAllocMode::DeleteOnDestruct) { + for (std::atomic& atomic_ref : data_) { + T* ptr = atomic_ref.load(); + if (ptr != nullptr) { + delete ptr; + } + } + } + } + + // User-defined function for allocating an object. This will be called + // under a lock controlled by this class, so MakeObject will not race + // against itself. MakeObject is allowed to return nullptr, in which + // case the next call to get() will call MakeObject again. + using MakeObject = std::function; + + /* + * Returns an already existing T* at index, or calls make_object to + * instantiate and save the T* under lock. + * + * @param index the Index to look up. + * @param make_object function to call under lock to make a T*. + * @return The new or already-existing T*, possibly nullptr if make_object returns nullptr. + */ + T* get(uint32_t index, const MakeObject& make_object) { + std::atomic& atomic_ref = data_[index]; + + // First, use an atomic load to see if the object has already been allocated. + if (atomic_ref.load() == nullptr) { + absl::MutexLock lock(&mutex_); + + // If that fails, check again under lock as two threads might have raced + // to create the object. + if (atomic_ref.load() == nullptr) { + atomic_ref = make_object(); + } + } + return atomic_ref.load(); + } + +private: + std::atomic data_[size]; + absl::Mutex mutex_; +}; + +// Manages a pointer to T, providing a relatively contention-free mechanism to +// lazily create a T*, where the caller provides a mechanism to instantiate a +// T* under lock, if one has not already been stored. +// +// alloc_mode controls whether allocated T* objects should be deleted on +// destruction of the AtomicObject. This should be set to +// AtomicPtrAllocMode::DoNotDelete if the T* returned from MakeObject are managed +// by the caller. +template +class AtomicPtr : private AtomicPtrArray { +public: + using BaseClass = AtomicPtrArray; + using typename BaseClass::MakeObject; + + /* + * Returns an already existing T*, or calls make_object to instantiate and + * save the T* under lock. + * + * @param make_object function to call under lock to make a T*. + * @return The new or already-existing T*, possibly nullptr if make_object returns nullptr. + */ + T* get(const MakeObject& make_object) { return BaseClass::get(0, make_object); } +}; + } // namespace Thread } // namespace Envoy diff --git a/source/common/common/utility.cc b/source/common/common/utility.cc index 7a3656f47ba72..1d7a5005129a4 100644 --- a/source/common/common/utility.cc +++ b/source/common/common/utility.cc @@ -15,6 +15,7 @@ #include "common/common/hash.h" #include "common/singleton/const_singleton.h" +#include "absl/container/node_hash_map.h" #include "absl/strings/ascii.h" #include "absl/strings/match.h" #include "absl/strings/str_join.h" @@ -38,6 +39,31 @@ using UnsignedMilliseconds = std::chrono::duration; } // namespace +const std::string errorDetails(int error_code) { +#ifndef WIN32 + // clang-format off + return strerror(error_code); + // clang-format on +#else + // Windows error codes do not correspond to POSIX errno values + // Use FormatMessage, strip trailing newline, and return "Unknown error" on failure (as on POSIX). + // Failures will usually be due to the error message not being found. + char* buffer = NULL; + DWORD msg_size = FormatMessage( + FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_ALLOCATE_BUFFER, + NULL, error_code, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPTSTR)&buffer, 0, NULL); + if (msg_size == 0) { + return "Unknown error"; + } + if (msg_size > 1 && buffer[msg_size - 2] == '\r' && buffer[msg_size - 1] == '\n') { + msg_size -= 2; + } + std::string error_details(buffer, msg_size); + ASSERT(LocalFree(buffer) == NULL); + return error_details; +#endif +} + std::string DateFormatter::fromTime(const SystemTime& time) const { struct CachedTime { // The string length of a number of seconds since the Epoch. E.g. for "1528270093", the length @@ -60,7 +86,7 @@ std::string DateFormatter::fromTime(const SystemTime& time) const { SpecifierOffsets specifier_offsets; }; // A map is used to keep different formatted format strings at a given second. - std::unordered_map formatted; + absl::node_hash_map formatted; }; static thread_local CachedTime cached_time; @@ -76,9 +102,11 @@ std::string DateFormatter::fromTime(const SystemTime& time) const { // Remove all the expired cached items. for (auto it = cached_time.formatted.cbegin(); it != cached_time.formatted.cend();) { if (it->second.epoch_time_seconds != epoch_time_seconds) { - it = cached_time.formatted.erase(it); + auto next_it = std::next(it); + cached_time.formatted.erase(it); + it = next_it; } else { - it++; + ++it; } } @@ -127,10 +155,13 @@ std::string DateFormatter::fromTime(const SystemTime& time) const { } void DateFormatter::parse(const std::string& format_string) { - std::string new_format_string = format_string; + std::string suffix = format_string; std::smatch matched; + // "step" is the last specifier's position + the last specifier's width. It's not the current + // position in "format_string" because the length has changed. It is actually the index which + // points to the end of the last specifier in formatted string (generated in the future). size_t step = 0; - while (regex_search(new_format_string, matched, SpecifierConstants::get().PATTERN)) { + while (regex_search(suffix, matched, SpecifierConstants::get().PATTERN)) { // The std::smatch matched for (%([1-9])?f)|(%s): [all, subsecond-specifier, subsecond-specifier // width, second-specifier]. const std::string& width_specifier = matched[2]; @@ -139,27 +170,22 @@ void DateFormatter::parse(const std::string& format_string) { // In the template string to be used in runtime substitution, the width is the number of // characters to be replaced. const size_t width = width_specifier.empty() ? 9 : width_specifier.at(0) - '0'; - new_format_string.replace(matched.position(), matched.length(), - std::string(second_specifier.empty() ? width : 2, '?')); - - ASSERT(step < new_format_string.size()); + ASSERT(!suffix.empty()); // This records matched position, the width of current subsecond pattern, and also the string // segment before the matched position. These values will be used later at data path. specifiers_.emplace_back( second_specifier.empty() - ? Specifier(matched.position(), width, - new_format_string.substr(step, matched.position() - step)) - : Specifier(matched.position(), - new_format_string.substr(step, matched.position() - step))); - + ? Specifier(step + matched.position(), width, suffix.substr(0, matched.position())) + : Specifier(step + matched.position(), suffix.substr(0, matched.position()))); step = specifiers_.back().position_ + specifiers_.back().width_; + suffix = matched.suffix(); } // To capture the segment after the last specifier pattern of a format string by creating a zero // width specifier. E.g. %3f-this-is-the-last-%s-segment-%Y-until-this. - if (step < new_format_string.size()) { - Specifier specifier(step, 0, new_format_string.substr(step)); + if (!suffix.empty()) { + Specifier specifier(step, 0, suffix); specifiers_.emplace_back(specifier); } } @@ -481,12 +507,12 @@ std::string StringUtil::removeCharacters(const absl::string_view& str, const auto intervals = remove_characters.toVector(); std::vector pieces; pieces.reserve(intervals.size()); - for (const auto& interval : intervals) { - if (interval.first != pos) { - ASSERT(interval.second <= str.size()); - pieces.push_back(str.substr(pos, interval.first - pos)); + for (const auto& [left_bound, right_bound] : intervals) { + if (left_bound != pos) { + ASSERT(right_bound <= str.size()); + pieces.push_back(str.substr(pos, left_bound - pos)); } - pos = interval.second; + pos = right_bound; } if (pos != str.size()) { pieces.push_back(str.substr(pos)); diff --git a/source/common/common/utility.h b/source/common/common/utility.h index f1214148f3d73..0101fd3d9fd9d 100644 --- a/source/common/common/utility.h +++ b/source/common/common/utility.h @@ -5,7 +5,6 @@ #include #include #include -#include #include #include "envoy/common/interval_set.h" @@ -18,6 +17,14 @@ #include "absl/strings/string_view.h" namespace Envoy { + +/** + * Retrieve string description of error code + * @param int error code + * @return const std::string error detail description + */ +const std::string errorDetails(int error_code); + /** * Utility class for formatting dates given an absl::FormatTime style format string. */ @@ -700,16 +707,6 @@ class InlineString : public InlineStorage { */ absl::string_view toStringView() const { return {data_, size_}; } - /** - * @return the number of bytes in the string - */ - size_t size() const { return size_; } - - /** - * @return a pointer to the first byte of the string. - */ - const char* data() const { return data_; } - private: // Constructor is declared private so that no one constructs one without the // proper size allocation. to accommodate the variable-size buffer. diff --git a/source/common/common/win32/thread_impl.cc b/source/common/common/win32/thread_impl.cc index 1d3eca9689570..8f26d63e0eb39 100644 --- a/source/common/common/win32/thread_impl.cc +++ b/source/common/common/win32/thread_impl.cc @@ -6,8 +6,14 @@ namespace Envoy { namespace Thread { -ThreadImplWin32::ThreadImplWin32(std::function thread_routine) +ThreadImplWin32::ThreadImplWin32(std::function thread_routine, OptionsOptConstRef options) : thread_routine_(thread_routine) { + if (options) { + name_ = options->name_; + // TODO(jmarantz): set the thread name for task manager, etc, or pull the + // auto-generated name from the OS if options is not present. + } + RELEASE_ASSERT(Logger::Registry::initialized(), ""); thread_handle_ = reinterpret_cast(::_beginthreadex( nullptr, 0, @@ -26,8 +32,9 @@ void ThreadImplWin32::join() { RELEASE_ASSERT(rc == WAIT_OBJECT_0, ""); } -ThreadPtr ThreadFactoryImplWin32::createThread(std::function thread_routine) { - return std::make_unique(thread_routine); +ThreadPtr ThreadFactoryImplWin32::createThread(std::function thread_routine, + OptionsOptConstRef options) { + return std::make_unique(thread_routine, options); } ThreadId ThreadFactoryImplWin32::currentThreadId() { diff --git a/source/common/common/win32/thread_impl.h b/source/common/common/win32/thread_impl.h index 8b5d0fe37e15b..87be085291c86 100644 --- a/source/common/common/win32/thread_impl.h +++ b/source/common/common/win32/thread_impl.h @@ -14,11 +14,12 @@ namespace Thread { */ class ThreadImplWin32 : public Thread { public: - ThreadImplWin32(std::function thread_routine); + ThreadImplWin32(std::function thread_routine, OptionsOptConstRef options); ~ThreadImplWin32(); // Thread::Thread void join() override; + std::string name() const override { return name_; } // Needed for WatcherImpl for the QueueUserAPC callback context HANDLE handle() const { return thread_handle_; } @@ -26,6 +27,7 @@ class ThreadImplWin32 : public Thread { private: std::function thread_routine_; HANDLE thread_handle_; + std::string name_; }; /** @@ -34,7 +36,7 @@ class ThreadImplWin32 : public Thread { class ThreadFactoryImplWin32 : public ThreadFactory { public: // Thread::ThreadFactory - ThreadPtr createThread(std::function thread_routine) override; + ThreadPtr createThread(std::function thread_routine, OptionsOptConstRef options) override; ThreadId currentThreadId() override; }; diff --git a/source/common/compressor/BUILD b/source/common/compressor/BUILD deleted file mode 100644 index d452e1c968f76..0000000000000 --- a/source/common/compressor/BUILD +++ /dev/null @@ -1,22 +0,0 @@ -licenses(["notice"]) # Apache 2 - -load( - "//bazel:envoy_build_system.bzl", - "envoy_cc_library", - "envoy_package", -) - -envoy_package() - -envoy_cc_library( - name = "compressor_lib", - srcs = ["zlib_compressor_impl.cc"], - hdrs = ["zlib_compressor_impl.h"], - external_deps = ["zlib"], - deps = [ - "//include/envoy/compressor:compressor_interface", - "//source/common/buffer:buffer_lib", - "//source/common/common:assert_lib", - "//source/common/common:zlib_base_lib", - ], -) diff --git a/source/common/config/BUILD b/source/common/config/BUILD index 611a8cd791cc5..55b8cce10d0cb 100644 --- a/source/common/config/BUILD +++ b/source/common/config/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( @@ -60,6 +60,15 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "decoded_resource_lib", + hdrs = ["decoded_resource_impl.h"], + deps = [ + "//include/envoy/config:subscription_interface", + "//source/common/protobuf:utility_lib", + ], +) + envoy_cc_library( name = "delta_subscription_state_lib", srcs = ["delta_subscription_state.cc"], @@ -68,6 +77,7 @@ envoy_cc_library( ":api_version_lib", ":pausable_ack_queue_lib", ":utility_lib", + ":watch_map_lib", "//include/envoy/config:subscription_interface", "//include/envoy/event:dispatcher_interface", "//source/common/common:assert_lib", @@ -86,6 +96,7 @@ envoy_cc_library( srcs = ["filesystem_subscription_impl.cc"], hdrs = ["filesystem_subscription_impl.h"], deps = [ + ":decoded_resource_lib", "//include/envoy/config:subscription_interface", "//include/envoy/event:dispatcher_interface", "//include/envoy/filesystem:filesystem_interface", @@ -122,15 +133,18 @@ envoy_cc_library( hdrs = ["grpc_mux_impl.h"], deps = [ ":api_version_lib", + ":decoded_resource_lib", ":grpc_stream_lib", ":utility_lib", "//include/envoy/config:grpc_mux_interface", "//include/envoy/config:subscription_interface", "//include/envoy/upstream:cluster_manager_interface", + "//source/common/common:cleanup_lib", "//source/common/common:minimal_logger_lib", "//source/common/common:utility_lib", "//source/common/memory:utils_lib", "//source/common/protobuf", + "@com_google_absl//absl/container:btree", "@envoy_api//envoy/api/v2:pkg_cc_proto", "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", ], @@ -175,6 +189,7 @@ envoy_cc_library( ], deps = [ ":api_version_lib", + ":decoded_resource_lib", ":version_converter_lib", "//include/envoy/config:subscription_interface", "//include/envoy/event:dispatcher_interface", @@ -209,6 +224,15 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "opaque_resource_decoder_lib", + hdrs = ["opaque_resource_decoder_impl.h"], + deps = [ + "//include/envoy/config:subscription_interface", + "//source/common/protobuf:utility_lib", + ], +) + envoy_cc_library( name = "pausable_ack_queue_lib", srcs = ["pausable_ack_queue.cc"], @@ -229,6 +253,7 @@ envoy_cc_library( "@envoy_api//envoy/service/discovery/v2:pkg_cc_proto", "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", "@envoy_api//envoy/service/endpoint/v3:pkg_cc_proto", + "@envoy_api//envoy/service/extension/v3:pkg_cc_proto", "@envoy_api//envoy/service/listener/v3:pkg_cc_proto", "@envoy_api//envoy/service/ratelimit/v2:pkg_cc_proto", "@envoy_api//envoy/service/ratelimit/v3:pkg_cc_proto", @@ -282,6 +307,7 @@ envoy_cc_library( "//include/envoy/config:subscription_factory_interface", "//include/envoy/config:subscription_interface", "//include/envoy/upstream:cluster_manager_interface", + "//source/common/common:minimal_logger_lib", "//source/common/protobuf", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], @@ -299,6 +325,16 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "udpa_resource_lib", + srcs = ["udpa_resource.cc"], + hdrs = ["udpa_resource.h"], + deps = [ + "//source/common/http:utility_lib", + "@com_github_cncf_udpa//udpa/core/v1:pkg_cc_proto", + ], +) + envoy_cc_library( name = "update_ack_lib", hdrs = ["update_ack.h"], @@ -326,6 +362,7 @@ envoy_cc_library( "//source/common/protobuf", "//source/common/protobuf:utility_lib", "//source/common/singleton:const_singleton", + "//source/common/stats:histogram_lib", "//source/common/stats:stats_lib", "//source/common/stats:stats_matcher_lib", "//source/common/stats:tag_producer_lib", @@ -356,8 +393,10 @@ envoy_cc_library( srcs = ["watch_map.cc"], hdrs = ["watch_map.h"], deps = [ + ":decoded_resource_lib", "//include/envoy/config:subscription_interface", "//source/common/common:assert_lib", + "//source/common/common:cleanup_lib", "//source/common/common:minimal_logger_lib", "//source/common/protobuf", "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", @@ -368,6 +407,7 @@ envoy_cc_library( name = "subscription_base_interface", hdrs = ["subscription_base.h"], deps = [ + ":opaque_resource_decoder_lib", ":resource_name_lib", "//include/envoy/config:subscription_interface", ], diff --git a/source/common/config/config_provider_impl.h b/source/common/config/config_provider_impl.h index 157941124d29f..144332fe23b0f 100644 --- a/source/common/config/config_provider_impl.h +++ b/source/common/config/config_provider_impl.h @@ -391,10 +391,10 @@ class ConfigProviderManagerImplBase : public ConfigProviderManager, public Singl protected: // Ordered set for deterministic config dump output. using ConfigProviderSet = std::set; - using ConfigProviderMap = std::unordered_map, EnumClassHash>; + using ConfigProviderMap = absl::node_hash_map, EnumClassHash>; using ConfigSubscriptionMap = - std::unordered_map>; + absl::node_hash_map>; ConfigProviderManagerImplBase(Server::Admin& admin, const std::string& config_name); diff --git a/source/common/config/datasource.cc b/source/common/config/datasource.cc index 7c089ebfe5655..d3e286d0b27af 100644 --- a/source/common/config/datasource.cc +++ b/source/common/config/datasource.cc @@ -40,7 +40,7 @@ absl::optional getPath(const envoy::config::core::v3::DataSource& s RemoteAsyncDataProvider::RemoteAsyncDataProvider( Upstream::ClusterManager& cm, Init::Manager& manager, const envoy::config::core::v3::RemoteDataSource& source, Event::Dispatcher& dispatcher, - Runtime::RandomGenerator& random, bool allow_empty, AsyncDataSourceCb&& callback) + Random::RandomGenerator& random, bool allow_empty, AsyncDataSourceCb&& callback) : allow_empty_(allow_empty), callback_(std::move(callback)), fetcher_(std::make_unique(cm, source.http_uri(), source.sha256(), *this)), diff --git a/source/common/config/datasource.h b/source/common/config/datasource.h index 1e35e119518b0..4b3ccdb17ffda 100644 --- a/source/common/config/datasource.h +++ b/source/common/config/datasource.h @@ -1,6 +1,7 @@ #pragma once #include "envoy/api/api.h" +#include "envoy/common/random_generator.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/init/manager.h" #include "envoy/upstream/cluster_manager.h" @@ -63,7 +64,7 @@ class RemoteAsyncDataProvider : public Config::DataFetcher::RemoteDataFetcherCal public: RemoteAsyncDataProvider(Upstream::ClusterManager& cm, Init::Manager& manager, const envoy::config::core::v3::RemoteDataSource& source, - Event::Dispatcher& dispatcher, Runtime::RandomGenerator& random, + Event::Dispatcher& dispatcher, Random::RandomGenerator& random, bool allow_empty, AsyncDataSourceCb&& callback); ~RemoteAsyncDataProvider() override { diff --git a/source/common/config/decoded_resource_impl.h b/source/common/config/decoded_resource_impl.h new file mode 100644 index 0000000000000..6698780203028 --- /dev/null +++ b/source/common/config/decoded_resource_impl.h @@ -0,0 +1,77 @@ +#pragma once + +#include "envoy/config/subscription.h" + +#include "common/protobuf/utility.h" + +namespace Envoy { +namespace Config { + +namespace { + +std::vector +repeatedPtrFieldToVector(const Protobuf::RepeatedPtrField& xs) { + std::vector ys; + std::copy(xs.begin(), xs.end(), std::back_inserter(ys)); + return ys; +} + +} // namespace + +class DecodedResourceImpl : public DecodedResource { +public: + DecodedResourceImpl(OpaqueResourceDecoder& resource_decoder, const ProtobufWkt::Any& resource, + const std::string& version) + : DecodedResourceImpl(resource_decoder, {}, Protobuf::RepeatedPtrField(), + resource, true, version) {} + DecodedResourceImpl(OpaqueResourceDecoder& resource_decoder, + const envoy::service::discovery::v3::Resource& resource) + : DecodedResourceImpl(resource_decoder, resource.name(), resource.aliases(), + resource.resource(), resource.has_resource(), resource.version()) {} + DecodedResourceImpl(ProtobufTypes::MessagePtr resource, const std::string& name, + const std::vector& aliases, const std::string& version) + : resource_(std::move(resource)), has_resource_(true), name_(name), aliases_(aliases), + version_(version) {} + + // Config::DecodedResource + const std::string& name() const override { return name_; } + const std::vector& aliases() const override { return aliases_; } + const std::string& version() const override { return version_; }; + const Protobuf::Message& resource() const override { return *resource_; }; + bool hasResource() const override { return has_resource_; } + +private: + DecodedResourceImpl(OpaqueResourceDecoder& resource_decoder, absl::optional name, + const Protobuf::RepeatedPtrField& aliases, + const ProtobufWkt::Any& resource, bool has_resource, + const std::string& version) + : resource_(resource_decoder.decodeResource(resource)), has_resource_(has_resource), + name_(name ? *name : resource_decoder.resourceName(*resource_)), + aliases_(repeatedPtrFieldToVector(aliases)), version_(version) {} + + const ProtobufTypes::MessagePtr resource_; + const bool has_resource_; + const std::string name_; + const std::vector aliases_; + const std::string version_; +}; + +using DecodedResourceImplPtr = std::unique_ptr; + +struct DecodedResourcesWrapper { + DecodedResourcesWrapper() = default; + DecodedResourcesWrapper(OpaqueResourceDecoder& resource_decoder, + const Protobuf::RepeatedPtrField& resources, + const std::string& version) { + for (const auto& resource : resources) { + owned_resources_.emplace_back(new DecodedResourceImpl(resource_decoder, resource, version)); + refvec_.emplace_back(*owned_resources_.back()); + } + } + + std::vector owned_resources_; + std::vector refvec_; +}; + +} // namespace Config +} // namespace Envoy diff --git a/source/common/config/delta_subscription_state.cc b/source/common/config/delta_subscription_state.cc index 5db0ce6f0bca5..c0a6a5502cb09 100644 --- a/source/common/config/delta_subscription_state.cc +++ b/source/common/config/delta_subscription_state.cc @@ -10,9 +10,9 @@ namespace Envoy { namespace Config { DeltaSubscriptionState::DeltaSubscriptionState(std::string type_url, - SubscriptionCallbacks& callbacks, + UntypedConfigUpdateCallbacks& watch_map, const LocalInfo::LocalInfo& local_info) - : type_url_(std::move(type_url)), callbacks_(callbacks), local_info_(local_info) {} + : type_url_(std::move(type_url)), watch_map_(watch_map), local_info_(local_info) {} void DeltaSubscriptionState::updateSubscriptionInterest(const std::set& cur_added, const std::set& cur_removed) { @@ -81,7 +81,7 @@ void DeltaSubscriptionState::handleGoodResponse( fmt::format("duplicate name {} found in the union of added+removed resources", name)); } } - callbacks_.onConfigUpdate(message.resources(), message.removed_resources(), + watch_map_.onConfigUpdate(message.resources(), message.removed_resources(), message.system_version_info()); for (const auto& resource : message.resources()) { setResourceVersion(resource.name(), resource.version()); @@ -108,11 +108,11 @@ void DeltaSubscriptionState::handleBadResponse(const EnvoyException& e, UpdateAc ack.error_detail_.set_code(Grpc::Status::WellKnownGrpcStatus::Internal); ack.error_detail_.set_message(Config::Utility::truncateGrpcStatusMessage(e.what())); ENVOY_LOG(warn, "delta config for {} rejected: {}", type_url_, e.what()); - callbacks_.onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, &e); + watch_map_.onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, &e); } void DeltaSubscriptionState::handleEstablishmentFailure() { - callbacks_.onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, + watch_map_.onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, nullptr); } @@ -124,16 +124,16 @@ DeltaSubscriptionState::getNextRequestAckless() { // initial_resource_versions "must be populated for first request in a stream". // Also, since this might be a new server, we must explicitly state *all* of our subscription // interest. - for (auto const& resource : resource_versions_) { + for (auto const& [resource_name, resource_version] : resource_versions_) { // Populate initial_resource_versions with the resource versions we currently have. // Resources we are interested in, but are still waiting to get any version of from the // server, do not belong in initial_resource_versions. (But do belong in new subscriptions!) - if (!resource.second.waitingForServer()) { - (*request.mutable_initial_resource_versions())[resource.first] = resource.second.version(); + if (!resource_version.waitingForServer()) { + (*request.mutable_initial_resource_versions())[resource_name] = resource_version.version(); } // As mentioned above, fill resource_names_subscribe with everything, including names we // have yet to receive any resource for. - names_added_.insert(resource.first); + names_added_.insert(resource_name); } names_removed_.clear(); } diff --git a/source/common/config/delta_subscription_state.h b/source/common/config/delta_subscription_state.h index 166b29608982c..1e21ba3a8efde 100644 --- a/source/common/config/delta_subscription_state.h +++ b/source/common/config/delta_subscription_state.h @@ -11,6 +11,9 @@ #include "common/common/logger.h" #include "common/config/api_version.h" #include "common/config/pausable_ack_queue.h" +#include "common/config/watch_map.h" + +#include "absl/container/node_hash_map.h" namespace Envoy { namespace Config { @@ -21,7 +24,7 @@ namespace Config { // being multiplexed together by ADS. class DeltaSubscriptionState : public Logger::Loggable { public: - DeltaSubscriptionState(std::string type_url, SubscriptionCallbacks& callbacks, + DeltaSubscriptionState(std::string type_url, UntypedConfigUpdateCallbacks& watch_map, const LocalInfo::LocalInfo& local_info); // Update which resources we're interested in subscribing to. @@ -80,22 +83,21 @@ class DeltaSubscriptionState : public Logger::Loggable { // names we are currently interested in. Those in the waitingForServer state currently don't have // any version for that resource: we need to inform the server if we lose interest in them, but we // also need to *not* include them in the initial_resource_versions map upon a reconnect. - std::unordered_map resource_versions_; + absl::node_hash_map resource_versions_; // The keys of resource_versions_. Only tracked separately because std::map does not provide an // iterator into just its keys, e.g. for use in std::set_difference. std::set resource_names_; const std::string type_url_; - // callbacks_ is expected to be a WatchMap. - SubscriptionCallbacks& callbacks_; + UntypedConfigUpdateCallbacks& watch_map_; const LocalInfo::LocalInfo& local_info_; std::chrono::milliseconds init_fetch_timeout_; bool any_request_sent_yet_in_current_stream_{}; // Tracks changes in our subscription interest since the previous DeltaDiscoveryRequest we sent. - // Can't use unordered_set due to ordering issues in gTest expectation matching. - // Feel free to change to unordered if you can figure out how to make it work. + // TODO: Can't use absl::flat_hash_set due to ordering issues in gTest expectation matching. + // Feel free to change to an unordered container once we figure out how to make it work. std::set names_added_; std::set names_removed_; }; diff --git a/source/common/config/filesystem_subscription_impl.cc b/source/common/config/filesystem_subscription_impl.cc index ea9b8173bac4b..1373dc34c92e4 100644 --- a/source/common/config/filesystem_subscription_impl.cc +++ b/source/common/config/filesystem_subscription_impl.cc @@ -4,6 +4,7 @@ #include "common/common/macros.h" #include "common/common/utility.h" +#include "common/config/decoded_resource_impl.h" #include "common/config/utility.h" #include "common/protobuf/protobuf.h" #include "common/protobuf/utility.h" @@ -13,9 +14,11 @@ namespace Config { FilesystemSubscriptionImpl::FilesystemSubscriptionImpl( Event::Dispatcher& dispatcher, absl::string_view path, SubscriptionCallbacks& callbacks, - SubscriptionStats stats, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) + OpaqueResourceDecoder& resource_decoder, SubscriptionStats stats, + ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) : path_(path), watcher_(dispatcher.createFilesystemWatcher()), callbacks_(callbacks), - stats_(stats), api_(api), validation_visitor_(validation_visitor) { + resource_decoder_(resource_decoder), stats_(stats), api_(api), + validation_visitor_(validation_visitor) { watcher_->addWatch(path_, Filesystem::Watcher::Events::MovedTo, [this](uint32_t) { if (started_) { refresh(); @@ -51,9 +54,12 @@ void FilesystemSubscriptionImpl::refresh() { try { MessageUtil::loadFromFile(path_, message, validation_visitor_, api_); config_update_available = true; - callbacks_.onConfigUpdate(message.resources(), message.version_info()); + const auto decoded_resources = + DecodedResourcesWrapper(resource_decoder_, message.resources(), message.version_info()); + callbacks_.onConfigUpdate(decoded_resources.refvec_, message.version_info()); stats_.update_time_.set(DateUtil::nowToMilliseconds(api_.timeSource())); stats_.version_.set(HashUtil::xxHash64(message.version_info())); + stats_.version_text_.set(message.version_info()); stats_.update_success_.inc(); ENVOY_LOG(debug, "Filesystem config update accepted for {}: {}", path_, message.DebugString()); } catch (const ProtobufMessage::UnknownProtoFieldException& e) { diff --git a/source/common/config/filesystem_subscription_impl.h b/source/common/config/filesystem_subscription_impl.h index 39c86f1654da2..75dd5f25b1e47 100644 --- a/source/common/config/filesystem_subscription_impl.h +++ b/source/common/config/filesystem_subscription_impl.h @@ -20,7 +20,8 @@ class FilesystemSubscriptionImpl : public Config::Subscription, Logger::Loggable { public: FilesystemSubscriptionImpl(Event::Dispatcher& dispatcher, absl::string_view path, - SubscriptionCallbacks& callbacks, SubscriptionStats stats, + SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder, SubscriptionStats stats, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api); // Config::Subscription @@ -37,6 +38,7 @@ class FilesystemSubscriptionImpl : public Config::Subscription, const std::string path_; std::unique_ptr watcher_; SubscriptionCallbacks& callbacks_; + OpaqueResourceDecoder& resource_decoder_; SubscriptionStats stats_; Api::Api& api_; ProtobufMessage::ValidationVisitor& validation_visitor_; diff --git a/source/common/config/grpc_mux_impl.cc b/source/common/config/grpc_mux_impl.cc index 1e495a2a7f94c..907bf9148adf2 100644 --- a/source/common/config/grpc_mux_impl.cc +++ b/source/common/config/grpc_mux_impl.cc @@ -1,14 +1,16 @@ #include "common/config/grpc_mux_impl.h" -#include - #include "envoy/service/discovery/v3/discovery.pb.h" +#include "common/config/decoded_resource_impl.h" #include "common/config/utility.h" #include "common/config/version_converter.h" #include "common/memory/utils.h" #include "common/protobuf/protobuf.h" +#include "absl/container/btree_map.h" +#include "absl/container/node_hash_set.h" + namespace Envoy { namespace Config { @@ -16,7 +18,7 @@ GrpcMuxImpl::GrpcMuxImpl(const LocalInfo::LocalInfo& local_info, Grpc::RawAsyncClientPtr async_client, Event::Dispatcher& dispatcher, const Protobuf::MethodDescriptor& service_method, envoy::config::core::v3::ApiVersion transport_api_version, - Runtime::RandomGenerator& random, Stats::Scope& scope, + Random::RandomGenerator& random, Stats::Scope& scope, const RateLimitSettings& rate_limit_settings, bool skip_subsequent_node) : grpc_stream_(this, std::move(async_client), service_method, random, dispatcher, scope, rate_limit_settings), @@ -28,23 +30,12 @@ GrpcMuxImpl::GrpcMuxImpl(const LocalInfo::LocalInfo& local_info, void GrpcMuxImpl::start() { grpc_stream_.establishNewStream(); } void GrpcMuxImpl::sendDiscoveryRequest(const std::string& type_url) { - if (!grpc_stream_.grpcStreamAvailable()) { - ENVOY_LOG(debug, "No stream available to sendDiscoveryRequest for {}", type_url); - return; // Drop this request; the reconnect will enqueue a new one. - } - ApiState& api_state = api_state_[type_url]; - if (api_state.paused_) { - ENVOY_LOG(trace, "API {} paused during sendDiscoveryRequest(), setting pending.", type_url); - api_state.pending_ = true; - return; // Drop this request; the unpause will enqueue a new one. - } - auto& request = api_state.request_; request.mutable_resource_names()->Clear(); // Maintain a set to avoid dupes. - std::unordered_set resources; + absl::node_hash_set resources; for (const auto* watch : api_state.watches_) { for (const std::string& resource : watch->resources_) { if (resources.count(resource) == 0) { @@ -70,8 +61,10 @@ void GrpcMuxImpl::sendDiscoveryRequest(const std::string& type_url) { GrpcMuxWatchPtr GrpcMuxImpl::addWatch(const std::string& type_url, const std::set& resources, - SubscriptionCallbacks& callbacks) { - auto watch = std::make_unique(resources, callbacks, type_url, *this); + SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder) { + auto watch = + std::make_unique(resources, callbacks, resource_decoder, type_url, *this); ENVOY_LOG(debug, "gRPC mux addWatch for " + type_url); // Lazily kick off the requests based on first subscription. This has the @@ -94,39 +87,40 @@ GrpcMuxWatchPtr GrpcMuxImpl::addWatch(const std::string& type_url, return watch; } -void GrpcMuxImpl::pause(const std::string& type_url) { - ENVOY_LOG(debug, "Pausing discovery requests for {}", type_url); - ApiState& api_state = api_state_[type_url]; - ASSERT(!api_state.paused_); - ASSERT(!api_state.pending_); - api_state.paused_ = true; +ScopedResume GrpcMuxImpl::pause(const std::string& type_url) { + return pause(std::vector{type_url}); } -void GrpcMuxImpl::resume(const std::string& type_url) { - ENVOY_LOG(debug, "Resuming discovery requests for {}", type_url); - ApiState& api_state = api_state_[type_url]; - ASSERT(api_state.paused_); - api_state.paused_ = false; - - if (api_state.pending_) { - ASSERT(api_state.subscribed_); - queueDiscoveryRequest(type_url); - api_state.pending_ = false; +ScopedResume GrpcMuxImpl::pause(const std::vector type_urls) { + for (const auto& type_url : type_urls) { + ApiState& api_state = api_state_[type_url]; + ENVOY_LOG(debug, "Pausing discovery requests for {} (previous count {})", type_url, + api_state.pauses_); + ++api_state.pauses_; } -} + return std::make_unique([this, type_urls]() { + for (const auto& type_url : type_urls) { + ApiState& api_state = api_state_[type_url]; + ENVOY_LOG(debug, "Resuming discovery requests for {} (previous count {})", type_url, + api_state.pauses_); + ASSERT(api_state.paused()); -bool GrpcMuxImpl::paused(const std::string& type_url) const { - auto entry = api_state_.find(type_url); - if (entry == api_state_.end()) { - return false; - } - return entry->second.paused_; + if (--api_state.pauses_ == 0 && api_state.pending_ && api_state.subscribed_) { + queueDiscoveryRequest(type_url); + api_state.pending_ = false; + } + } + }); } void GrpcMuxImpl::onDiscoveryResponse( - std::unique_ptr&& message) { + std::unique_ptr&& message, + ControlPlaneStats& control_plane_stats) { const std::string& type_url = message->type_url(); ENVOY_LOG(debug, "Received gRPC message for {} at version {}", type_url, message->version_info()); + if (message->has_control_plane()) { + control_plane_stats.identifier_.set(message->control_plane().identifier()); + } if (api_state_.count(type_url) == 0) { ENVOY_LOG(warn, "Ignoring the message for type URL {} as it has no current subscribers.", type_url); @@ -152,35 +146,47 @@ void GrpcMuxImpl::onDiscoveryResponse( } return; } + ScopedResume same_type_resume; + // We pause updates of the same type. This is necessary for SotW and GrpcMuxImpl, since unlike + // delta and NewGRpcMuxImpl, independent watch additions/removals trigger updates regardless of + // the delta state. The proper fix for this is to converge these implementations, + // see https://github.com/envoyproxy/envoy/issues/11477. + same_type_resume = pause(type_url); try { // To avoid O(n^2) explosion (e.g. when we have 1000s of EDS watches), we // build a map here from resource name to resource and then walk watches_. // We have to walk all watches (and need an efficient map as a result) to - // ensure we deliver empty config updates when a resource is dropped. - std::unordered_map resources; - SubscriptionCallbacks& callbacks = api_state_[type_url].watches_.front()->callbacks_; + // ensure we deliver empty config updates when a resource is dropped. We make the map ordered + // for test determinism. + std::vector resources; + absl::btree_map resource_ref_map; + std::vector all_resource_refs; + OpaqueResourceDecoder& resource_decoder = + api_state_[type_url].watches_.front()->resource_decoder_; for (const auto& resource : message->resources()) { if (type_url != resource.type_url()) { throw EnvoyException( fmt::format("{} does not match the message-wide type URL {} in DiscoveryResponse {}", resource.type_url(), type_url, message->DebugString())); } - const std::string resource_name = callbacks.resourceName(resource); - resources.emplace(resource_name, resource); + resources.emplace_back( + new DecodedResourceImpl(resource_decoder, resource, message->version_info())); + all_resource_refs.emplace_back(*resources.back()); + resource_ref_map.emplace(resources.back()->name(), *resources.back()); } for (auto watch : api_state_[type_url].watches_) { // onConfigUpdate should be called in all cases for single watch xDS (Cluster and // Listener) even if the message does not have resources so that update_empty stat // is properly incremented and state-of-the-world semantics are maintained. if (watch->resources_.empty()) { - watch->callbacks_.onConfigUpdate(message->resources(), message->version_info()); + watch->callbacks_.onConfigUpdate(all_resource_refs, message->version_info()); continue; } - Protobuf::RepeatedPtrField found_resources; + std::vector found_resources; for (const auto& watched_resource_name : watch->resources_) { - auto it = resources.find(watched_resource_name); - if (it != resources.end()) { - found_resources.Add()->MergeFrom(it->second); + auto it = resource_ref_map.find(watched_resource_name); + if (it != resource_ref_map.end()) { + found_resources.emplace_back(it->second); } } // onConfigUpdate should be called only on watches(clusters/routes) that have @@ -203,6 +209,7 @@ void GrpcMuxImpl::onDiscoveryResponse( error_detail->set_message(Config::Utility::truncateGrpcStatusMessage(e.what())); } api_state_[type_url].request_.set_response_nonce(message->nonce()); + ASSERT(api_state_[type_url].paused()); queueDiscoveryRequest(type_url); } @@ -210,6 +217,8 @@ void GrpcMuxImpl::onWriteable() { drainRequests(); } void GrpcMuxImpl::onStreamEstablished() { first_stream_request_ = true; + grpc_stream_.maybeUpdateQueueSizeStat(0); + request_queue_ = std::make_unique>(); for (const auto& type_url : subscriptions_) { queueDiscoveryRequest(type_url); } @@ -225,23 +234,28 @@ void GrpcMuxImpl::onEstablishmentFailure() { } void GrpcMuxImpl::queueDiscoveryRequest(const std::string& queue_item) { - request_queue_.push(queue_item); + if (!grpc_stream_.grpcStreamAvailable()) { + ENVOY_LOG(debug, "No stream available to queueDiscoveryRequest for {}", queue_item); + return; // Drop this request; the reconnect will enqueue a new one. + } + ApiState& api_state = api_state_[queue_item]; + if (api_state.paused()) { + ENVOY_LOG(trace, "API {} paused during queueDiscoveryRequest(), setting pending.", queue_item); + api_state.pending_ = true; + return; // Drop this request; the unpause will enqueue a new one. + } + request_queue_->push(queue_item); drainRequests(); } -void GrpcMuxImpl::clearRequestQueue() { - grpc_stream_.maybeUpdateQueueSizeStat(0); - request_queue_ = {}; -} - void GrpcMuxImpl::drainRequests() { - while (!request_queue_.empty() && grpc_stream_.checkRateLimitAllowsDrain()) { + while (!request_queue_->empty() && grpc_stream_.checkRateLimitAllowsDrain()) { // Process the request, if rate limiting is not enabled at all or if it is under rate limit. - sendDiscoveryRequest(request_queue_.front()); - request_queue_.pop(); + sendDiscoveryRequest(request_queue_->front()); + request_queue_->pop(); } - grpc_stream_.maybeUpdateQueueSizeStat(request_queue_.size()); + grpc_stream_.maybeUpdateQueueSizeStat(request_queue_->size()); } } // namespace Config -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/common/config/grpc_mux_impl.h b/source/common/config/grpc_mux_impl.h index d3572bf71f097..d735bc12c1cf9 100644 --- a/source/common/config/grpc_mux_impl.h +++ b/source/common/config/grpc_mux_impl.h @@ -1,9 +1,11 @@ #pragma once +#include +#include #include -#include #include "envoy/api/v2/discovery.pb.h" +#include "envoy/common/random_generator.h" #include "envoy/common/time.h" #include "envoy/config/grpc_mux.h" #include "envoy/config/subscription.h" @@ -18,9 +20,10 @@ #include "common/config/grpc_stream.h" #include "common/config/utility.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace Config { - /** * ADS API implementation that fetches via gRPC. */ @@ -31,30 +34,29 @@ class GrpcMuxImpl : public GrpcMux, GrpcMuxImpl(const LocalInfo::LocalInfo& local_info, Grpc::RawAsyncClientPtr async_client, Event::Dispatcher& dispatcher, const Protobuf::MethodDescriptor& service_method, envoy::config::core::v3::ApiVersion transport_api_version, - Runtime::RandomGenerator& random, Stats::Scope& scope, + Random::RandomGenerator& random, Stats::Scope& scope, const RateLimitSettings& rate_limit_settings, bool skip_subsequent_node); ~GrpcMuxImpl() override = default; void start() override; // GrpcMux - void pause(const std::string& type_url) override; - void resume(const std::string& type_url) override; - bool paused(const std::string& type_url) const override; + ScopedResume pause(const std::string& type_url) override; + ScopedResume pause(const std::vector type_urls) override; GrpcMuxWatchPtr addWatch(const std::string& type_url, const std::set& resources, - SubscriptionCallbacks& callbacks) override; + SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder) override; void handleDiscoveryResponse( std::unique_ptr&& message); - void sendDiscoveryRequest(const std::string& type_url); - // Config::GrpcStreamCallbacks void onStreamEstablished() override; void onEstablishmentFailure() override; - void onDiscoveryResponse( - std::unique_ptr&& message) override; + void + onDiscoveryResponse(std::unique_ptr&& message, + ControlPlaneStats& control_plane_stats) override; void onWriteable() override; GrpcStream& resources, SubscriptionCallbacks& callbacks, - const std::string& type_url, GrpcMuxImpl& parent) - : resources_(resources), callbacks_(callbacks), type_url_(type_url), parent_(parent), - watches_(parent.api_state_[type_url].watches_) { + OpaqueResourceDecoder& resource_decoder, const std::string& type_url, + GrpcMuxImpl& parent) + : resources_(resources), callbacks_(callbacks), resource_decoder_(resource_decoder), + type_url_(type_url), parent_(parent), watches_(parent.api_state_[type_url].watches_) { watches_.emplace(watches_.begin(), this); } ~GrpcMuxWatchImpl() override { watches_.remove(this); if (!resources_.empty()) { - parent_.sendDiscoveryRequest(type_url_); + parent_.queueDiscoveryRequest(type_url_); } } void update(const std::set& resources) override { watches_.remove(this); if (!resources_.empty()) { - parent_.sendDiscoveryRequest(type_url_); + parent_.queueDiscoveryRequest(type_url_); } resources_ = resources; // move this watch to the beginning of the list @@ -95,6 +99,7 @@ class GrpcMuxImpl : public GrpcMux, std::set resources_; SubscriptionCallbacks& callbacks_; + OpaqueResourceDecoder& resource_decoder_; const std::string type_url_; GrpcMuxImpl& parent_; @@ -104,12 +109,14 @@ class GrpcMuxImpl : public GrpcMux, // Per muxed API state. struct ApiState { + bool paused() const { return pauses_ > 0; } + // Watches on the returned resources for the API; std::list watches_; // Current DiscoveryRequest for API. envoy::service::discovery::v3::DiscoveryRequest request_; - // Paused via pause()? - bool paused_{}; + // Count of unresumed pause() invocations. + uint32_t pauses_{}; // Was a DiscoveryRequest elided during a pause? bool pending_{}; // Has this API been tracked in subscriptions_? @@ -118,7 +125,6 @@ class GrpcMuxImpl : public GrpcMux, // Request queue management logic. void queueDiscoveryRequest(const std::string& queue_item); - void clearRequestQueue(); GrpcStream @@ -126,35 +132,41 @@ class GrpcMuxImpl : public GrpcMux, const LocalInfo::LocalInfo& local_info_; const bool skip_subsequent_node_; bool first_stream_request_; - std::unordered_map api_state_; + absl::node_hash_map api_state_; // Envoy's dependency ordering. std::list subscriptions_; // A queue to store requests while rate limited. Note that when requests cannot be sent due to the // gRPC stream being down, this queue does not store them; rather, they are simply dropped. // This string is a type URL. - std::queue request_queue_; + std::unique_ptr> request_queue_; const envoy::config::core::v3::ApiVersion transport_api_version_; }; +using GrpcMuxImplPtr = std::unique_ptr; +using GrpcMuxImplSharedPtr = std::shared_ptr; + class NullGrpcMuxImpl : public GrpcMux, GrpcStreamCallbacks { public: void start() override {} - void pause(const std::string&) override {} - void resume(const std::string&) override {} - bool paused(const std::string&) const override { return false; } + ScopedResume pause(const std::string&) override { + return std::make_unique([] {}); + } + ScopedResume pause(const std::vector) override { + return std::make_unique([] {}); + } - GrpcMuxWatchPtr addWatch(const std::string&, const std::set&, - SubscriptionCallbacks&) override { + GrpcMuxWatchPtr addWatch(const std::string&, const std::set&, SubscriptionCallbacks&, + OpaqueResourceDecoder&) override { throw EnvoyException("ADS must be configured to support an ADS config source"); } void onWriteable() override {} void onStreamEstablished() override {} void onEstablishmentFailure() override {} - void onDiscoveryResponse( - std::unique_ptr&&) override {} + void onDiscoveryResponse(std::unique_ptr&&, + ControlPlaneStats&) override {} }; } // namespace Config diff --git a/source/common/config/grpc_stream.h b/source/common/config/grpc_stream.h index d1b80ad6a38bb..b922d8c09f0f3 100644 --- a/source/common/config/grpc_stream.h +++ b/source/common/config/grpc_stream.h @@ -1,7 +1,9 @@ #pragma once #include +#include +#include "envoy/common/random_generator.h" #include "envoy/config/grpc_mux.h" #include "envoy/grpc/async_client.h" @@ -13,6 +15,8 @@ namespace Envoy { namespace Config { +template using ResponseProtoPtr = std::unique_ptr; + // Oversees communication for gRPC xDS implementations (parent to both regular xDS and delta // xDS variants). Reestablishes the gRPC channel when necessary, and provides rate limiting of // requests. @@ -21,19 +25,24 @@ class GrpcStream : public Grpc::AsyncStreamCallbacks, public Logger::Loggable { public: GrpcStream(GrpcStreamCallbacks* callbacks, Grpc::RawAsyncClientPtr async_client, - const Protobuf::MethodDescriptor& service_method, Runtime::RandomGenerator& random, + const Protobuf::MethodDescriptor& service_method, Random::RandomGenerator& random, Event::Dispatcher& dispatcher, Stats::Scope& scope, const RateLimitSettings& rate_limit_settings) : callbacks_(callbacks), async_client_(std::move(async_client)), - service_method_(service_method), control_plane_stats_(generateControlPlaneStats(scope)), - random_(random), time_source_(dispatcher.timeSource()), + service_method_(service_method), + control_plane_stats_(Utility::generateControlPlaneStats(scope)), random_(random), + time_source_(dispatcher.timeSource()), rate_limiting_enabled_(rate_limit_settings.enabled_) { retry_timer_ = dispatcher.createTimer([this]() -> void { establishNewStream(); }); if (rate_limiting_enabled_) { // Default Bucket contains 100 tokens maximum and refills at 10 tokens/sec. limit_request_ = std::make_unique( rate_limit_settings.max_tokens_, time_source_, rate_limit_settings.fill_rate_); - drain_request_timer_ = dispatcher.createTimer([this]() { callbacks_->onWriteable(); }); + drain_request_timer_ = dispatcher.createTimer([this]() { + if (stream_ != nullptr) { + callbacks_->onWriteable(); + } + }); } // TODO(htuch): Make this configurable. @@ -73,14 +82,14 @@ class GrpcStream : public Grpc::AsyncStreamCallbacks, UNREFERENCED_PARAMETER(metadata); } - void onReceiveMessage(std::unique_ptr&& message) override { + void onReceiveMessage(ResponseProtoPtr&& message) override { // Reset here so that it starts with fresh backoff interval on next disconnect. backoff_strategy_->reset(); // Sometimes during hot restarts this stat's value becomes inconsistent and will continue to // have 0 until it is reconnected. Setting here ensures that it is consistent with the state of // management server connection. control_plane_stats_.connected_state_.set(1); - callbacks_->onDiscoveryResponse(std::move(message)); + callbacks_->onDiscoveryResponse(std::move(message), control_plane_stats_); } void onReceiveTrailingMetadata(Http::ResponseTrailerMapPtr&& metadata) override { @@ -116,7 +125,9 @@ class GrpcStream : public Grpc::AsyncStreamCallbacks, ASSERT(drain_request_timer_ != nullptr); control_plane_stats_.rate_limit_enforced_.inc(); // Enable the drain request timer. - drain_request_timer_->enableTimer(limit_request_->nextTokenAvailable()); + if (!drain_request_timer_->enabled()) { + drain_request_timer_->enableTimer(limit_request_->nextTokenAvailable()); + } return false; } @@ -125,12 +136,6 @@ class GrpcStream : public Grpc::AsyncStreamCallbacks, retry_timer_->enableTimer(std::chrono::milliseconds(backoff_strategy_->nextBackOffMs())); } - ControlPlaneStats generateControlPlaneStats(Stats::Scope& scope) { - const std::string control_plane_prefix = "control_plane."; - return {ALL_CONTROL_PLANE_STATS(POOL_COUNTER_PREFIX(scope, control_plane_prefix), - POOL_GAUGE_PREFIX(scope, control_plane_prefix))}; - } - GrpcStreamCallbacks* const callbacks_; Grpc::AsyncClient async_client_; @@ -140,7 +145,7 @@ class GrpcStream : public Grpc::AsyncStreamCallbacks, // Reestablishes the gRPC channel when necessary, with some backoff politeness. Event::TimerPtr retry_timer_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; TimeSource& time_source_; BackOffStrategyPtr backoff_strategy_; diff --git a/source/common/config/grpc_subscription_impl.cc b/source/common/config/grpc_subscription_impl.cc index 22b9a468af996..ef8037f250064 100644 --- a/source/common/config/grpc_subscription_impl.cc +++ b/source/common/config/grpc_subscription_impl.cc @@ -10,15 +10,13 @@ namespace Envoy { namespace Config { -GrpcSubscriptionImpl::GrpcSubscriptionImpl(GrpcMuxSharedPtr grpc_mux, - SubscriptionCallbacks& callbacks, - SubscriptionStats stats, absl::string_view type_url, - Event::Dispatcher& dispatcher, - std::chrono::milliseconds init_fetch_timeout, - bool is_aggregated) - : grpc_mux_(grpc_mux), callbacks_(callbacks), stats_(stats), type_url_(type_url), - dispatcher_(dispatcher), init_fetch_timeout_(init_fetch_timeout), - is_aggregated_(is_aggregated) {} +GrpcSubscriptionImpl::GrpcSubscriptionImpl( + GrpcMuxSharedPtr grpc_mux, SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder, SubscriptionStats stats, absl::string_view type_url, + Event::Dispatcher& dispatcher, std::chrono::milliseconds init_fetch_timeout, bool is_aggregated) + : grpc_mux_(grpc_mux), callbacks_(callbacks), resource_decoder_(resource_decoder), + stats_(stats), type_url_(type_url), dispatcher_(dispatcher), + init_fetch_timeout_(init_fetch_timeout), is_aggregated_(is_aggregated) {} // Config::Subscription void GrpcSubscriptionImpl::start(const std::set& resources) { @@ -30,7 +28,7 @@ void GrpcSubscriptionImpl::start(const std::set& resources) { init_fetch_timeout_timer_->enableTimer(init_fetch_timeout_); } - watch_ = grpc_mux_->addWatch(type_url_, resources, *this); + watch_ = grpc_mux_->addWatch(type_url_, resources, *this, resource_decoder_); // The attempt stat here is maintained for the purposes of having consistency between ADS and // gRPC/filesystem/REST Subscriptions. Since ADS is push based and muxed, the notion of an @@ -51,9 +49,8 @@ void GrpcSubscriptionImpl::updateResourceInterest( } // Config::SubscriptionCallbacks -void GrpcSubscriptionImpl::onConfigUpdate( - const Protobuf::RepeatedPtrField& resources, - const std::string& version_info) { +void GrpcSubscriptionImpl::onConfigUpdate(const std::vector& resources, + const std::string& version_info) { disableInitFetchTimeoutTimer(); // TODO(mattklein123): In the future if we start tracking per-resource versions, we need to // supply those versions to onConfigUpdate() along with the xDS response ("system") @@ -64,12 +61,13 @@ void GrpcSubscriptionImpl::onConfigUpdate( stats_.update_attempt_.inc(); stats_.update_time_.set(DateUtil::nowToMilliseconds(dispatcher_.timeSource())); stats_.version_.set(HashUtil::xxHash64(version_info)); + stats_.version_text_.set(version_info); ENVOY_LOG(debug, "gRPC config for {} accepted with {} resources with version {}", type_url_, resources.size(), version_info); } void GrpcSubscriptionImpl::onConfigUpdate( - const Protobuf::RepeatedPtrField& added_resources, + const std::vector& added_resources, const Protobuf::RepeatedPtrField& removed_resources, const std::string& system_version_info) { disableInitFetchTimeoutTimer(); @@ -78,6 +76,7 @@ void GrpcSubscriptionImpl::onConfigUpdate( stats_.update_success_.inc(); stats_.update_time_.set(DateUtil::nowToMilliseconds(dispatcher_.timeSource())); stats_.version_.set(HashUtil::xxHash64(system_version_info)); + stats_.version_text_.set(system_version_info); } void GrpcSubscriptionImpl::onConfigUpdateFailed(ConfigUpdateFailureReason reason, @@ -106,13 +105,7 @@ void GrpcSubscriptionImpl::onConfigUpdateFailed(ConfigUpdateFailureReason reason stats_.update_attempt_.inc(); } -std::string GrpcSubscriptionImpl::resourceName(const ProtobufWkt::Any& resource) { - return callbacks_.resourceName(resource); -} - -void GrpcSubscriptionImpl::pause() { grpc_mux_->pause(type_url_); } - -void GrpcSubscriptionImpl::resume() { grpc_mux_->resume(type_url_); } +ScopedResume GrpcSubscriptionImpl::pause() { return grpc_mux_->pause(type_url_); } void GrpcSubscriptionImpl::disableInitFetchTimeoutTimer() { if (init_fetch_timeout_timer_) { diff --git a/source/common/config/grpc_subscription_impl.h b/source/common/config/grpc_subscription_impl.h index ffc179f15bf81..a5102055a08ce 100644 --- a/source/common/config/grpc_subscription_impl.h +++ b/source/common/config/grpc_subscription_impl.h @@ -1,5 +1,7 @@ #pragma once +#include + #include "envoy/config/grpc_mux.h" #include "envoy/config/subscription.h" #include "envoy/event/dispatcher.h" @@ -17,37 +19,32 @@ class GrpcSubscriptionImpl : public Subscription, Logger::Loggable { public: GrpcSubscriptionImpl(GrpcMuxSharedPtr grpc_mux, SubscriptionCallbacks& callbacks, - SubscriptionStats stats, absl::string_view type_url, - Event::Dispatcher& dispatcher, std::chrono::milliseconds init_fetch_timeout, - bool is_aggregated); + OpaqueResourceDecoder& resource_decoder, SubscriptionStats stats, + absl::string_view type_url, Event::Dispatcher& dispatcher, + std::chrono::milliseconds init_fetch_timeout, bool is_aggregated); // Config::Subscription void start(const std::set& resource_names) override; void updateResourceInterest(const std::set& update_to_these_names) override; // Config::SubscriptionCallbacks (all pass through to callbacks_!) - void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + void onConfigUpdate(const std::vector& resources, const std::string& version_info) override; - - void onConfigUpdate( - const Protobuf::RepeatedPtrField& added_resources, - const Protobuf::RepeatedPtrField& removed_resources, - const std::string& system_version_info) override; - + void onConfigUpdate(const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& system_version_info) override; void onConfigUpdateFailed(ConfigUpdateFailureReason reason, const EnvoyException* e) override; - std::string resourceName(const ProtobufWkt::Any& resource) override; - GrpcMuxSharedPtr grpcMux() { return grpc_mux_; } - void pause(); - void resume(); + ScopedResume pause(); private: void disableInitFetchTimeoutTimer(); GrpcMuxSharedPtr grpc_mux_; SubscriptionCallbacks& callbacks_; + OpaqueResourceDecoder& resource_decoder_; SubscriptionStats stats_; const std::string type_url_; GrpcMuxWatchPtr watch_; @@ -59,5 +56,8 @@ class GrpcSubscriptionImpl : public Subscription, const bool is_aggregated_; }; +using GrpcSubscriptionImplPtr = std::unique_ptr; +using GrpcSubscriptionImplSharedPtr = std::shared_ptr; + } // namespace Config } // namespace Envoy diff --git a/source/common/config/http_subscription_impl.cc b/source/common/config/http_subscription_impl.cc index ab3974f9880e4..8c0d55d5e7494 100644 --- a/source/common/config/http_subscription_impl.cc +++ b/source/common/config/http_subscription_impl.cc @@ -8,6 +8,7 @@ #include "common/common/assert.h" #include "common/common/macros.h" #include "common/common/utility.h" +#include "common/config/decoded_resource_impl.h" #include "common/config/utility.h" #include "common/config/version_converter.h" #include "common/http/headers.h" @@ -22,17 +23,17 @@ namespace Config { HttpSubscriptionImpl::HttpSubscriptionImpl( const LocalInfo::LocalInfo& local_info, Upstream::ClusterManager& cm, const std::string& remote_cluster_name, Event::Dispatcher& dispatcher, - Runtime::RandomGenerator& random, std::chrono::milliseconds refresh_interval, + Random::RandomGenerator& random, std::chrono::milliseconds refresh_interval, std::chrono::milliseconds request_timeout, const Protobuf::MethodDescriptor& service_method, absl::string_view type_url, envoy::config::core::v3::ApiVersion transport_api_version, - SubscriptionCallbacks& callbacks, SubscriptionStats stats, - std::chrono::milliseconds init_fetch_timeout, + SubscriptionCallbacks& callbacks, OpaqueResourceDecoder& resource_decoder, + SubscriptionStats stats, std::chrono::milliseconds init_fetch_timeout, ProtobufMessage::ValidationVisitor& validation_visitor) : Http::RestApiFetcher(cm, remote_cluster_name, dispatcher, random, refresh_interval, request_timeout), - callbacks_(callbacks), stats_(stats), dispatcher_(dispatcher), - init_fetch_timeout_(init_fetch_timeout), validation_visitor_(validation_visitor), - transport_api_version_(transport_api_version) { + callbacks_(callbacks), resource_decoder_(resource_decoder), stats_(stats), + dispatcher_(dispatcher), init_fetch_timeout_(init_fetch_timeout), + validation_visitor_(validation_visitor), transport_api_version_(transport_api_version) { request_.mutable_node()->CopyFrom(local_info.node()); request_.set_type_url(std::string(type_url)); ASSERT(service_method.options().HasExtension(google::api::http)); @@ -85,10 +86,13 @@ void HttpSubscriptionImpl::parseResponse(const Http::ResponseMessage& response) return; } try { - callbacks_.onConfigUpdate(message.resources(), message.version_info()); + const auto decoded_resources = + DecodedResourcesWrapper(resource_decoder_, message.resources(), message.version_info()); + callbacks_.onConfigUpdate(decoded_resources.refvec_, message.version_info()); request_.set_version_info(message.version_info()); stats_.update_time_.set(DateUtil::nowToMilliseconds(dispatcher_.timeSource())); stats_.version_.set(HashUtil::xxHash64(request_.version_info())); + stats_.version_text_.set(request_.version_info()); stats_.update_success_.inc(); } catch (const EnvoyException& e) { handleFailure(Config::ConfigUpdateFailureReason::UpdateRejected, &e); diff --git a/source/common/config/http_subscription_impl.h b/source/common/config/http_subscription_impl.h index b5e8b33e94eab..ec3d2e6ad0de3 100644 --- a/source/common/config/http_subscription_impl.h +++ b/source/common/config/http_subscription_impl.h @@ -1,6 +1,7 @@ #pragma once #include "envoy/api/v2/discovery.pb.h" +#include "envoy/common/random_generator.h" #include "envoy/config/subscription.h" #include "envoy/event/dispatcher.h" #include "envoy/service/discovery/v3/discovery.pb.h" @@ -24,12 +25,12 @@ class HttpSubscriptionImpl : public Http::RestApiFetcher, public: HttpSubscriptionImpl(const LocalInfo::LocalInfo& local_info, Upstream::ClusterManager& cm, const std::string& remote_cluster_name, Event::Dispatcher& dispatcher, - Runtime::RandomGenerator& random, std::chrono::milliseconds refresh_interval, + Random::RandomGenerator& random, std::chrono::milliseconds refresh_interval, std::chrono::milliseconds request_timeout, const Protobuf::MethodDescriptor& service_method, absl::string_view type_url, envoy::config::core::v3::ApiVersion transport_api_version, - SubscriptionCallbacks& callbacks, SubscriptionStats stats, - std::chrono::milliseconds init_fetch_timeout, + SubscriptionCallbacks& callbacks, OpaqueResourceDecoder& resource_decoder, + SubscriptionStats stats, std::chrono::milliseconds init_fetch_timeout, ProtobufMessage::ValidationVisitor& validation_visitor); // Config::Subscription @@ -50,6 +51,7 @@ class HttpSubscriptionImpl : public Http::RestApiFetcher, Protobuf::RepeatedPtrField resources_; envoy::service::discovery::v3::DiscoveryRequest request_; Config::SubscriptionCallbacks& callbacks_; + Config::OpaqueResourceDecoder& resource_decoder_; SubscriptionStats stats_; Event::Dispatcher& dispatcher_; std::chrono::milliseconds init_fetch_timeout_; diff --git a/source/common/config/metadata.h b/source/common/config/metadata.h index 3c59c77a20836..efac4eff7e59d 100644 --- a/source/common/config/metadata.h +++ b/source/common/config/metadata.h @@ -2,7 +2,6 @@ #include #include -#include #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/typed_metadata.h" @@ -14,6 +13,8 @@ #include "common/protobuf/protobuf.h" #include "common/shared_pool/shared_pool.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace Config { @@ -115,15 +116,16 @@ template class TypedMetadataImpl : public TypedMetadata */ void populateFrom(const envoy::config::core::v3::Metadata& metadata) { auto& data_by_key = metadata.filter_metadata(); - for (const auto& it : Registry::FactoryRegistry::factories()) { - const auto& meta_iter = data_by_key.find(it.first); + for (const auto& [factory_name, factory] : + Registry::FactoryRegistry::factories()) { + const auto& meta_iter = data_by_key.find(factory_name); if (meta_iter != data_by_key.end()) { - data_[it.second->name()] = it.second->parse(meta_iter->second); + data_[factory->name()] = factory->parse(meta_iter->second); } } } - std::unordered_map> data_; + absl::node_hash_map> data_; }; } // namespace Config diff --git a/source/common/config/new_grpc_mux_impl.cc b/source/common/config/new_grpc_mux_impl.cc index fcec95a0b4cc6..131ccd24db51b 100644 --- a/source/common/config/new_grpc_mux_impl.cc +++ b/source/common/config/new_grpc_mux_impl.cc @@ -18,26 +18,35 @@ NewGrpcMuxImpl::NewGrpcMuxImpl(Grpc::RawAsyncClientPtr&& async_client, Event::Dispatcher& dispatcher, const Protobuf::MethodDescriptor& service_method, envoy::config::core::v3::ApiVersion transport_api_version, - Runtime::RandomGenerator& random, Stats::Scope& scope, + Random::RandomGenerator& random, Stats::Scope& scope, const RateLimitSettings& rate_limit_settings, const LocalInfo::LocalInfo& local_info) : grpc_stream_(this, std::move(async_client), service_method, random, dispatcher, scope, rate_limit_settings), local_info_(local_info), transport_api_version_(transport_api_version) {} -void NewGrpcMuxImpl::pause(const std::string& type_url) { pausable_ack_queue_.pause(type_url); } - -void NewGrpcMuxImpl::resume(const std::string& type_url) { - pausable_ack_queue_.resume(type_url); - trySendDiscoveryRequests(); +ScopedResume NewGrpcMuxImpl::pause(const std::string& type_url) { + return pause(std::vector{type_url}); } -bool NewGrpcMuxImpl::paused(const std::string& type_url) const { - return pausable_ack_queue_.paused(type_url); +ScopedResume NewGrpcMuxImpl::pause(const std::vector type_urls) { + for (const auto& type_url : type_urls) { + pausable_ack_queue_.pause(type_url); + } + + return std::make_unique([this, type_urls]() { + for (const auto& type_url : type_urls) { + pausable_ack_queue_.resume(type_url); + if (!pausable_ack_queue_.paused(type_url)) { + trySendDiscoveryRequests(); + } + } + }); } void NewGrpcMuxImpl::onDiscoveryResponse( - std::unique_ptr&& message) { + std::unique_ptr&& message, + ControlPlaneStats&) { ENVOY_LOG(debug, "Received DeltaDiscoveryResponse for {} at version {}", message->type_url(), message->system_version_info()); auto sub = subscriptions_.find(message->type_url()); @@ -64,8 +73,8 @@ void NewGrpcMuxImpl::onDiscoveryResponse( } void NewGrpcMuxImpl::onStreamEstablished() { - for (auto& sub : subscriptions_) { - sub.second->sub_state_.markStreamFresh(); + for (auto& [type_url, subscription] : subscriptions_) { + subscription->sub_state_.markStreamFresh(); } trySendDiscoveryRequests(); } @@ -79,8 +88,8 @@ void NewGrpcMuxImpl::onEstablishmentFailure() { absl::flat_hash_map all_subscribed; absl::flat_hash_map already_called; do { - for (auto& sub : subscriptions_) { - all_subscribed[sub.first] = &sub.second->sub_state_; + for (auto& [type_url, subscription] : subscriptions_) { + all_subscribed[type_url] = &subscription->sub_state_; } for (auto& sub : all_subscribed) { if (already_called.insert(sub).second) { // insert succeeded ==> not already called @@ -102,15 +111,16 @@ void NewGrpcMuxImpl::start() { grpc_stream_.establishNewStream(); } GrpcMuxWatchPtr NewGrpcMuxImpl::addWatch(const std::string& type_url, const std::set& resources, - SubscriptionCallbacks& callbacks) { + SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder) { auto entry = subscriptions_.find(type_url); if (entry == subscriptions_.end()) { // We don't yet have a subscription for type_url! Make one! addSubscription(type_url); - return addWatch(type_url, resources, callbacks); + return addWatch(type_url, resources, callbacks, resource_decoder); } - Watch* watch = entry->second->watch_map_.addWatch(callbacks); + Watch* watch = entry->second->watch_map_.addWatch(callbacks, resource_decoder); // updateWatch() queues a discovery request if any of 'resources' are not yet subscribed. updateWatch(type_url, watch, resources); return std::make_unique(type_url, watch, *this); diff --git a/source/common/config/new_grpc_mux_impl.h b/source/common/config/new_grpc_mux_impl.h index c7d63a93d01d1..431106a4dd399 100644 --- a/source/common/config/new_grpc_mux_impl.h +++ b/source/common/config/new_grpc_mux_impl.h @@ -1,6 +1,9 @@ #pragma once +#include + #include "envoy/api/v2/discovery.pb.h" +#include "envoy/common/random_generator.h" #include "envoy/common/token_bucket.h" #include "envoy/config/grpc_mux.h" #include "envoy/config/subscription.h" @@ -30,18 +33,20 @@ class NewGrpcMuxImpl NewGrpcMuxImpl(Grpc::RawAsyncClientPtr&& async_client, Event::Dispatcher& dispatcher, const Protobuf::MethodDescriptor& service_method, envoy::config::core::v3::ApiVersion transport_api_version, - Runtime::RandomGenerator& random, Stats::Scope& scope, + Random::RandomGenerator& random, Stats::Scope& scope, const RateLimitSettings& rate_limit_settings, const LocalInfo::LocalInfo& local_info); GrpcMuxWatchPtr addWatch(const std::string& type_url, const std::set& resources, - SubscriptionCallbacks& callbacks) override; + SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder) override; + + ScopedResume pause(const std::string& type_url) override; + ScopedResume pause(const std::vector type_urls) override; - void pause(const std::string& type_url) override; - void resume(const std::string& type_url) override; - bool paused(const std::string& type_url) const override; void onDiscoveryResponse( - std::unique_ptr&& message) override; + std::unique_ptr&& message, + ControlPlaneStats& control_plane_stats) override; void onStreamEstablished() override; @@ -65,8 +70,10 @@ class NewGrpcMuxImpl SubscriptionStuff& operator=(const SubscriptionStuff&) = delete; }; + using SubscriptionStuffPtr = std::unique_ptr; + // for use in tests only - const absl::flat_hash_map>& subscriptions() { + const absl::flat_hash_map& subscriptions() { return subscriptions_; } @@ -125,7 +132,7 @@ class NewGrpcMuxImpl PausableAckQueue pausable_ack_queue_; // Map key is type_url. - absl::flat_hash_map> subscriptions_; + absl::flat_hash_map subscriptions_; // Determines the order of initial discovery requests. (Assumes that subscriptions are added in // the order of Envoy's dependency ordering). @@ -140,6 +147,7 @@ class NewGrpcMuxImpl const envoy::config::core::v3::ApiVersion transport_api_version_; }; +using NewGrpcMuxImplPtr = std::unique_ptr; using NewGrpcMuxImplSharedPtr = std::shared_ptr; } // namespace Config diff --git a/source/common/config/opaque_resource_decoder_impl.h b/source/common/config/opaque_resource_decoder_impl.h new file mode 100644 index 0000000000000..ef066101952b2 --- /dev/null +++ b/source/common/config/opaque_resource_decoder_impl.h @@ -0,0 +1,37 @@ +#pragma once + +#include "envoy/config/subscription.h" + +#include "common/protobuf/utility.h" + +namespace Envoy { +namespace Config { + +template class OpaqueResourceDecoderImpl : public Config::OpaqueResourceDecoder { +public: + OpaqueResourceDecoderImpl(ProtobufMessage::ValidationVisitor& validation_visitor, + absl::string_view name_field) + : validation_visitor_(validation_visitor), name_field_(name_field) {} + + // Config::OpaqueResourceDecoder + ProtobufTypes::MessagePtr decodeResource(const ProtobufWkt::Any& resource) override { + auto typed_message = std::make_unique(); + // If the Any is a synthetic empty message (e.g. because the resource field was not set in + // Resource, this might be empty, so we shouldn't decode. + if (!resource.type_url().empty()) { + MessageUtil::anyConvertAndValidate(resource, *typed_message, validation_visitor_); + } + return typed_message; + } + + std::string resourceName(const Protobuf::Message& resource) override { + return MessageUtil::getStringField(resource, name_field_); + } + +private: + ProtobufMessage::ValidationVisitor& validation_visitor_; + const std::string name_field_; +}; + +} // namespace Config +} // namespace Envoy diff --git a/source/common/config/pausable_ack_queue.cc b/source/common/config/pausable_ack_queue.cc index daec7587acb2a..dc6f01773f6a1 100644 --- a/source/common/config/pausable_ack_queue.cc +++ b/source/common/config/pausable_ack_queue.cc @@ -13,7 +13,7 @@ size_t PausableAckQueue::size() const { return storage_.size(); } bool PausableAckQueue::empty() { for (const auto& entry : storage_) { - if (!paused_[entry.type_url_]) { + if (pauses_[entry.type_url_] == 0) { return false; } } @@ -22,7 +22,7 @@ bool PausableAckQueue::empty() { const UpdateAck& PausableAckQueue::front() { for (const auto& entry : storage_) { - if (!paused_[entry.type_url_]) { + if (pauses_[entry.type_url_] == 0) { return entry; } } @@ -32,7 +32,7 @@ const UpdateAck& PausableAckQueue::front() { UpdateAck PausableAckQueue::popFront() { for (auto it = storage_.begin(); it != storage_.end(); ++it) { - if (!paused_[it->type_url_]) { + if (pauses_[it->type_url_] == 0) { UpdateAck ret = *it; storage_.erase(it); return ret; @@ -44,23 +44,22 @@ UpdateAck PausableAckQueue::popFront() { void PausableAckQueue::pause(const std::string& type_url) { // It's ok to pause a subscription that doesn't exist yet. - auto& pause_entry = paused_[type_url]; - ASSERT(!pause_entry); - pause_entry = true; + auto& pause_entry = pauses_[type_url]; + ++pause_entry; } void PausableAckQueue::resume(const std::string& type_url) { - auto& pause_entry = paused_[type_url]; - ASSERT(pause_entry); - pause_entry = false; + auto& pause_entry = pauses_[type_url]; + ASSERT(pause_entry > 0); + --pause_entry; } bool PausableAckQueue::paused(const std::string& type_url) const { - auto entry = paused_.find(type_url); - if (entry == paused_.end()) { + auto entry = pauses_.find(type_url); + if (entry == pauses_.end()) { return false; } - return entry->second; + return entry->second > 0; } } // namespace Config diff --git a/source/common/config/pausable_ack_queue.h b/source/common/config/pausable_ack_queue.h index 011f3ed479a7a..5535e262598f5 100644 --- a/source/common/config/pausable_ack_queue.h +++ b/source/common/config/pausable_ack_queue.h @@ -27,7 +27,7 @@ class PausableAckQueue { private: // It's ok for non-existent subs to be paused/resumed. The cleanest way to support that is to give // the pause state its own map. (Map key is type_url.) - absl::flat_hash_map paused_; + absl::flat_hash_map pauses_; std::list storage_; }; diff --git a/source/common/config/protobuf_link_hacks.h b/source/common/config/protobuf_link_hacks.h index efcfa08f0c231..b613d60ff84c7 100644 --- a/source/common/config/protobuf_link_hacks.h +++ b/source/common/config/protobuf_link_hacks.h @@ -13,6 +13,7 @@ #include "envoy/service/discovery/v2/sds.pb.h" #include "envoy/service/discovery/v3/ads.pb.h" #include "envoy/service/endpoint/v3/eds.pb.h" +#include "envoy/service/extension/v3/config_discovery.pb.h" #include "envoy/service/listener/v3/lds.pb.h" #include "envoy/service/ratelimit/v2/rls.pb.h" #include "envoy/service/ratelimit/v3/rls.pb.h" diff --git a/source/common/config/remote_data_fetcher.h b/source/common/config/remote_data_fetcher.h index 34a7863ff2f0e..6ffe0b052ef08 100644 --- a/source/common/config/remote_data_fetcher.h +++ b/source/common/config/remote_data_fetcher.h @@ -53,6 +53,8 @@ class RemoteDataFetcher : public Logger::Loggable, void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&& response) override; void onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason reason) override; + void onBeforeFinalizeUpstreamSpan(Envoy::Tracing::Span&, + const Http::ResponseHeaderMap*) override {} /** * Fetch data from remote. diff --git a/source/common/config/subscription_base.h b/source/common/config/subscription_base.h index dd5686f2ffafd..765e28934ebcb 100644 --- a/source/common/config/subscription_base.h +++ b/source/common/config/subscription_base.h @@ -2,6 +2,7 @@ #include "envoy/config/subscription.h" +#include "common/config/opaque_resource_decoder_impl.h" #include "common/config/resource_name.h" namespace Envoy { @@ -9,16 +10,21 @@ namespace Config { template struct SubscriptionBase : public Config::SubscriptionCallbacks { public: - SubscriptionBase(const envoy::config::core::v3::ApiVersion api_version) - : api_version_(api_version) {} + SubscriptionBase(const envoy::config::core::v3::ApiVersion api_version, + ProtobufMessage::ValidationVisitor& validation_visitor, + absl::string_view name_field) + : resource_decoder_(validation_visitor, name_field), api_version_(api_version) {} std::string getResourceName() const { return Envoy::Config::getResourceName(api_version_); } +protected: + Config::OpaqueResourceDecoderImpl resource_decoder_; + private: const envoy::config::core::v3::ApiVersion api_version_; }; } // namespace Config -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/common/config/subscription_factory_impl.cc b/source/common/config/subscription_factory_impl.cc index 96ce4e07a81d7..6495688add618 100644 --- a/source/common/config/subscription_factory_impl.cc +++ b/source/common/config/subscription_factory_impl.cc @@ -16,27 +16,38 @@ namespace Config { SubscriptionFactoryImpl::SubscriptionFactoryImpl( const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher, - Upstream::ClusterManager& cm, Runtime::RandomGenerator& random, - ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) + Upstream::ClusterManager& cm, Random::RandomGenerator& random, + ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api, Runtime::Loader& runtime) : local_info_(local_info), dispatcher_(dispatcher), cm_(cm), random_(random), - validation_visitor_(validation_visitor), api_(api) {} + validation_visitor_(validation_visitor), api_(api), runtime_(runtime) {} SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( const envoy::config::core::v3::ConfigSource& config, absl::string_view type_url, - Stats::Scope& scope, SubscriptionCallbacks& callbacks) { + Stats::Scope& scope, SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder) { Config::Utility::checkLocalInfo(type_url, local_info_); std::unique_ptr result; SubscriptionStats stats = Utility::generateStats(scope); + const auto transport_api_version = config.api_config_source().transport_api_version(); + if (transport_api_version == envoy::config::core::v3::ApiVersion::V2 && + runtime_.snapshot().runtimeFeatureEnabled( + "envoy.reloadable_features.enable_deprecated_v2_api_warning")) { + runtime_.snapshot().countDeprecatedFeatureUse(); + ENVOY_LOG(warn, + "xDS of version v2 has been deprecated and will be removed in subsequent versions"); + } + switch (config.config_source_specifier_case()) { case envoy::config::core::v3::ConfigSource::ConfigSourceSpecifierCase::kPath: { Utility::checkFilesystemSubscriptionBackingPath(config.path(), api_); return std::make_unique( - dispatcher_, config.path(), callbacks, stats, validation_visitor_, api_); + dispatcher_, config.path(), callbacks, resource_decoder, stats, validation_visitor_, api_); } case envoy::config::core::v3::ConfigSource::ConfigSourceSpecifierCase::kApiConfigSource: { const envoy::config::core::v3::ApiConfigSource& api_config_source = config.api_config_source(); - Utility::checkApiConfigSourceSubscriptionBackingCluster(cm_.clusters(), api_config_source); + Utility::checkApiConfigSourceSubscriptionBackingCluster(cm_.primaryClusters(), + api_config_source); switch (api_config_source.api_type()) { case envoy::config::core::v3::ApiConfigSource::hidden_envoy_deprecated_UNSUPPORTED_REST_LEGACY: @@ -48,8 +59,9 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( return std::make_unique( local_info_, cm_, api_config_source.cluster_names()[0], dispatcher_, random_, Utility::apiConfigSourceRefreshDelay(api_config_source), - Utility::apiConfigSourceRequestTimeout(api_config_source), restMethod(type_url), type_url, - api_config_source.transport_api_version(), callbacks, stats, + Utility::apiConfigSourceRequestTimeout(api_config_source), + restMethod(type_url, api_config_source.transport_api_version()), type_url, + api_config_source.transport_api_version(), callbacks, resource_decoder, stats, Utility::configSourceInitialFetchTimeout(config), validation_visitor_); case envoy::config::core::v3::ApiConfigSource::GRPC: return std::make_unique( @@ -58,10 +70,12 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( Utility::factoryForGrpcApiConfigSource(cm_.grpcAsyncClientManager(), api_config_source, scope, true) ->create(), - dispatcher_, sotwGrpcMethod(type_url), api_config_source.transport_api_version(), - random_, scope, Utility::parseRateLimitSettings(api_config_source), + dispatcher_, sotwGrpcMethod(type_url, api_config_source.transport_api_version()), + api_config_source.transport_api_version(), random_, scope, + Utility::parseRateLimitSettings(api_config_source), api_config_source.set_node_on_first_message_only()), - callbacks, stats, type_url, dispatcher_, Utility::configSourceInitialFetchTimeout(config), + callbacks, resource_decoder, stats, type_url, dispatcher_, + Utility::configSourceInitialFetchTimeout(config), /*is_aggregated*/ false); case envoy::config::core::v3::ApiConfigSource::DELTA_GRPC: { return std::make_unique( @@ -69,10 +83,11 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( Config::Utility::factoryForGrpcApiConfigSource(cm_.grpcAsyncClientManager(), api_config_source, scope, true) ->create(), - dispatcher_, deltaGrpcMethod(type_url), api_config_source.transport_api_version(), - random_, scope, Utility::parseRateLimitSettings(api_config_source), local_info_), - callbacks, stats, type_url, dispatcher_, Utility::configSourceInitialFetchTimeout(config), - false); + dispatcher_, deltaGrpcMethod(type_url, api_config_source.transport_api_version()), + api_config_source.transport_api_version(), random_, scope, + Utility::parseRateLimitSettings(api_config_source), local_info_), + callbacks, resource_decoder, stats, type_url, dispatcher_, + Utility::configSourceInitialFetchTimeout(config), false); } default: NOT_REACHED_GCOVR_EXCL_LINE; @@ -80,11 +95,12 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( } case envoy::config::core::v3::ConfigSource::ConfigSourceSpecifierCase::kAds: { return std::make_unique( - cm_.adsMux(), callbacks, stats, type_url, dispatcher_, + cm_.adsMux(), callbacks, resource_decoder, stats, type_url, dispatcher_, Utility::configSourceInitialFetchTimeout(config), true); } default: - throw EnvoyException("Missing config source specifier in envoy::api::v2::core::ConfigSource"); + throw EnvoyException( + "Missing config source specifier in envoy::config::core::v3::ConfigSource"); } NOT_REACHED_GCOVR_EXCL_LINE; } diff --git a/source/common/config/subscription_factory_impl.h b/source/common/config/subscription_factory_impl.h index 8d31b8682aa8b..1241229861d4a 100644 --- a/source/common/config/subscription_factory_impl.h +++ b/source/common/config/subscription_factory_impl.h @@ -1,33 +1,39 @@ #pragma once #include "envoy/api/api.h" +#include "envoy/common/random_generator.h" #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/config/subscription.h" #include "envoy/config/subscription_factory.h" #include "envoy/stats/scope.h" #include "envoy/upstream/cluster_manager.h" +#include "common/common/logger.h" + namespace Envoy { namespace Config { -class SubscriptionFactoryImpl : public SubscriptionFactory { +class SubscriptionFactoryImpl : public SubscriptionFactory, Logger::Loggable { public: SubscriptionFactoryImpl(const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher, - Upstream::ClusterManager& cm, Runtime::RandomGenerator& random, - ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api); + Upstream::ClusterManager& cm, Random::RandomGenerator& random, + ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api, + Runtime::Loader& runtime); // Config::SubscriptionFactory SubscriptionPtr subscriptionFromConfigSource(const envoy::config::core::v3::ConfigSource& config, absl::string_view type_url, Stats::Scope& scope, - SubscriptionCallbacks& callbacks) override; + SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder) override; private: const LocalInfo::LocalInfo& local_info_; Event::Dispatcher& dispatcher_; Upstream::ClusterManager& cm_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; ProtobufMessage::ValidationVisitor& validation_visitor_; Api::Api& api_; + Runtime::Loader& runtime_; }; } // namespace Config diff --git a/source/common/config/type_to_endpoint.cc b/source/common/config/type_to_endpoint.cc index 7fd3dd55ec2cf..1c32fe47ad2c5 100644 --- a/source/common/config/type_to_endpoint.cc +++ b/source/common/config/type_to_endpoint.cc @@ -6,96 +6,257 @@ // API_NO_BOOST_FILE +#define SERVICE_VERSION_INFO(v2, v3) \ + createServiceVersionInfoMap(v2, {v2, v3}), createServiceVersionInfoMap(v3, {v2, v3}) + namespace Envoy { namespace Config { namespace { -// service RPC method fully qualified names. -struct Service { - std::string sotw_grpc_method_; - std::string delta_grpc_method_; - std::string rest_method_; +// A service's name, e.g. "envoy.api.v2.RouteDiscoveryService", +// "envoy.service.route.v3.RouteDiscoveryService". +using ServiceName = std::string; + +struct ServiceVersionInfo { + // This hold a name for each transport_api_version, for example for + // "envoy.api.v2.RouteDiscoveryService": + // { + // "V2": "envoy.api.v2.RouteDiscoveryService", + // "V3": "envoy.service.route.v3.RouteDiscoveryService" + // } + absl::flat_hash_map names_; +}; + +// A ServiceVersionInfoMap holds a service's transport_api_version and possible names for each +// available transport_api_version. For examples: +// +// Given "envoy.api.v2.RouteDiscoveryService" as the service name: +// { +// "envoy.api.v2.RouteDiscoveryService": { +// "names_": { +// "V2": "envoy.api.v2.RouteDiscoveryService", +// "V3": "envoy.service.route.v3.RouteDiscoveryService" +// } +// } +// } +// +// And for "envoy.service.route.v3.RouteDiscoveryService": +// { +// "envoy.service.route.v3.RouteDiscoveryService": +// "names_": { +// "V2": "envoy.api.v2.RouteDiscoveryService", +// "V3": "envoy.service.route.v3.RouteDiscoveryService" +// } +// } +// } +using ServiceVersionInfoMap = absl::flat_hash_map; + +// This creates a ServiceVersionInfoMap, with service name (For example: +// "envoy.api.v2.RouteDiscoveryService") as the key. +ServiceVersionInfoMap +createServiceVersionInfoMap(absl::string_view service_name, + const std::array& versioned_service_names) { + const auto key = static_cast(service_name); + return ServiceVersionInfoMap{{ + // ServiceName as the key. + key, + + // ServiceVersionInfo as the value. + ServiceVersionInfo{{ + {envoy::config::core::v3::ApiVersion::V2, versioned_service_names[0]}, + {envoy::config::core::v3::ApiVersion::V3, versioned_service_names[1]}, + }}, + }}; +} + +// A resource type URL. For example: "type.googleapis.com/envoy.api.v2.RouteConfiguration". +using TypeUrl = std::string; + +TypeUrl getResourceTypeUrl(absl::string_view service_name) { + const auto* service_desc = Protobuf::DescriptorPool::generated_pool()->FindServiceByName( + static_cast(service_name)); + ASSERT(service_desc != nullptr, fmt::format("{} missing", service_name)); + ASSERT(service_desc->options().HasExtension(envoy::annotations::resource)); + + return Grpc::Common::typeUrl( + service_desc->options().GetExtension(envoy::annotations::resource).type()); +} + +// A method name, e.g. "envoy.api.v2.RouteDiscoveryService.StreamRoutes". +using MethodName = std::string; + +struct VersionedDiscoveryType { + // A map of transport_api_version to discovery service RPC method fully qualified names. e.g. + // { + // "V2": "envoy.api.v2.RouteDiscoveryService.StreamRoutes", + // "V3": "envoy.service.route.v3.RouteDiscoveryService.StreamRoutes" + // } + absl::flat_hash_map methods_; +}; + +// This holds versioned discovery types. +struct VersionedService { + VersionedDiscoveryType sotw_grpc_; + VersionedDiscoveryType delta_grpc_; + VersionedDiscoveryType rest_; }; -// Map from resource type URL to service RPC methods. -using TypeUrlToServiceMap = std::unordered_map; +using TypeUrlToVersionedServiceMap = absl::flat_hash_map; + +// buildTypeUrlToServiceMap() builds a reverse map from a resource type URLs to a versioned service +// (by transport_api_version). +// +// The way we build it is by firstly constructing a list of ServiceVersionInfoMap: +// [ +// { +// "envoy.api.v2.RouteDiscoveryService": { +// "names_": { +// "V2": "envoy.api.v2.RouteDiscoveryService", +// "V3": "envoy.service.route.v3.RouteDiscoveryService" +// } +// } +// }, +// { +// "envoy.service.route.v3.RouteDiscoveryService": { +// "names_": { +// "V2": "envoy.api.v2.RouteDiscoveryService", +// "V3": "envoy.service.route.v3.RouteDiscoveryService" +// } +// } +// } +// ... +// ] +// +// Then we convert it into the following map, with the inferred resource type URL as the key: +// +// { +// "type.googleapis.com/envoy.api.v2.RouteConfiguration": { +// "sotw_grpc_": { +// "methods_": { +// "V2": "envoy.api.v2.RouteDiscoveryService.StreamRoutes", +// "V3": "envoy.service.route.v3.RouteDiscoveryService.StreamRoutes" +// } +// }, +// ... +// }, +// "type.googleapis.com/envoy.config.route.v3.RouteConfiguration": { +// "sotw_grpc_": { +// "methods_": { +// "V2": "envoy.api.v2.RouteDiscoveryService.StreamRoutes", +// "V3": "envoy.service.route.v3.RouteDiscoveryService.StreamRoutes" +// } +// }, +// ... +// } +// } +// +TypeUrlToVersionedServiceMap* buildTypeUrlToServiceMap() { + auto* type_url_to_versioned_service_map = new TypeUrlToVersionedServiceMap(); -TypeUrlToServiceMap* buildTypeUrlToServiceMap() { - auto* type_url_to_service_map = new TypeUrlToServiceMap(); // This happens once in the lifetime of Envoy. We build a reverse map from resource type URL to - // service methods. We explicitly enumerate all services, since DescriptorPool doesn't support - // iterating over all descriptors, due its lazy load design, see - // https://www.mail-archive.com/protobuf@googlegroups.com/msg04540.html. - for (const std::string& service_name : { - "envoy.api.v2.RouteDiscoveryService", - "envoy.service.route.v3.RouteDiscoveryService", - "envoy.api.v2.ScopedRoutesDiscoveryService", - "envoy.service.route.v3.ScopedRoutesDiscoveryService", - "envoy.api.v2.VirtualHostDiscoveryService", - "envoy.service.route.v3.VirtualHostDiscoveryService", - "envoy.service.discovery.v2.SecretDiscoveryService", - "envoy.service.secret.v3.SecretDiscoveryService", - "envoy.api.v2.ClusterDiscoveryService", - "envoy.service.cluster.v3.ClusterDiscoveryService", - "envoy.api.v2.EndpointDiscoveryService", - "envoy.service.endpoint.v3.EndpointDiscoveryService", - "envoy.api.v2.ListenerDiscoveryService", - "envoy.service.listener.v3.ListenerDiscoveryService", - "envoy.service.discovery.v2.RuntimeDiscoveryService", - "envoy.service.runtime.v3.RuntimeDiscoveryService", + // service methods (versioned by transport_api_version). We explicitly enumerate all services, + // since DescriptorPool doesn't support iterating over all descriptors, due its lazy load design, + // see https://www.mail-archive.com/protobuf@googlegroups.com/msg04540.html. + for (const ServiceVersionInfoMap& registered : { + SERVICE_VERSION_INFO("envoy.api.v2.RouteDiscoveryService", + "envoy.service.route.v3.RouteDiscoveryService"), + SERVICE_VERSION_INFO("envoy.api.v2.ScopedRoutesDiscoveryService", + "envoy.service.route.v3.ScopedRoutesDiscoveryService"), + SERVICE_VERSION_INFO("envoy.api.v2.ScopedRoutesDiscoveryService", + "envoy.service.route.v3.ScopedRoutesDiscoveryService"), + SERVICE_VERSION_INFO("envoy.api.v2.VirtualHostDiscoveryService", + "envoy.service.route.v3.VirtualHostDiscoveryService"), + SERVICE_VERSION_INFO("envoy.service.discovery.v2.SecretDiscoveryService", + "envoy.service.secret.v3.SecretDiscoveryService"), + SERVICE_VERSION_INFO("envoy.api.v2.ClusterDiscoveryService", + "envoy.service.cluster.v3.ClusterDiscoveryService"), + SERVICE_VERSION_INFO("envoy.api.v2.EndpointDiscoveryService", + "envoy.service.endpoint.v3.EndpointDiscoveryService"), + SERVICE_VERSION_INFO("envoy.api.v2.ListenerDiscoveryService", + "envoy.service.listener.v3.ListenerDiscoveryService"), + SERVICE_VERSION_INFO("envoy.service.discovery.v2.RuntimeDiscoveryService", + "envoy.service.runtime.v3.RuntimeDiscoveryService"), + ServiceVersionInfoMap{{ + "envoy.service.extension.v3.ExtensionConfigDiscoveryService", + ServiceVersionInfo{{ + {envoy::config::core::v3::ApiVersion::V3, + "envoy.service.extension.v3.ExtensionConfigDiscoveryService"}, + }}, + }}, }) { - const auto* service_desc = - Protobuf::DescriptorPool::generated_pool()->FindServiceByName(service_name); - // TODO(htuch): this should become an ASSERT once all v3 descriptors are linked in. - ASSERT(service_desc != nullptr, fmt::format("{} missing", service_name)); - ASSERT(service_desc->options().HasExtension(envoy::annotations::resource)); - const std::string resource_type_url = Grpc::Common::typeUrl( - service_desc->options().GetExtension(envoy::annotations::resource).type()); - Service& service = (*type_url_to_service_map)[resource_type_url]; - // We populate the service methods that are known below, but it's possible that some services - // don't implement all, e.g. VHDS doesn't support SotW or REST. - for (int method_index = 0; method_index < service_desc->method_count(); ++method_index) { - const auto& method_desc = *service_desc->method(method_index); - if (absl::StartsWith(method_desc.name(), "Stream")) { - service.sotw_grpc_method_ = method_desc.full_name(); - } else if (absl::StartsWith(method_desc.name(), "Delta")) { - service.delta_grpc_method_ = method_desc.full_name(); - } else if (absl::StartsWith(method_desc.name(), "Fetch")) { - service.rest_method_ = method_desc.full_name(); - } else { - ASSERT(false, "Unknown xDS service method"); + for (const auto& [registered_service_name, registered_service_info] : registered) { + const TypeUrl resource_type_url = getResourceTypeUrl(registered_service_name); + VersionedService& service = (*type_url_to_versioned_service_map)[resource_type_url]; + + for (const auto& [transport_api_version, service_name] : registered_service_info.names_) { + const auto* service_desc = + Protobuf::DescriptorPool::generated_pool()->FindServiceByName(service_name); + ASSERT(service_desc != nullptr, fmt::format("{} missing", service_name)); + ASSERT(service_desc->options().HasExtension(envoy::annotations::resource)); + + // We populate the service methods that are known below, but it's possible that some + // services don't implement all, e.g. VHDS doesn't support SotW or REST. + for (int method_index = 0; method_index < service_desc->method_count(); ++method_index) { + const auto& method_desc = *service_desc->method(method_index); + if (absl::StartsWith(method_desc.name(), "Stream")) { + service.sotw_grpc_.methods_[transport_api_version] = method_desc.full_name(); + } else if (absl::StartsWith(method_desc.name(), "Delta")) { + service.delta_grpc_.methods_[transport_api_version] = method_desc.full_name(); + } else if (absl::StartsWith(method_desc.name(), "Fetch")) { + service.rest_.methods_[transport_api_version] = method_desc.full_name(); + } else { + ASSERT(false, "Unknown xDS service method"); + } + } } } } - return type_url_to_service_map; + return type_url_to_versioned_service_map; } -TypeUrlToServiceMap& typeUrlToServiceMap() { - static TypeUrlToServiceMap* type_url_to_service_map = buildTypeUrlToServiceMap(); - return *type_url_to_service_map; +TypeUrlToVersionedServiceMap& typeUrlToVersionedServiceMap() { + static TypeUrlToVersionedServiceMap* type_url_to_versioned_service_map = + buildTypeUrlToServiceMap(); + return *type_url_to_versioned_service_map; +} + +envoy::config::core::v3::ApiVersion +effectiveTransportApiVersion(envoy::config::core::v3::ApiVersion transport_api_version) { + // By default (when the transport_api_version is "AUTO"), the effective transport_api_version is + // envoy::config::core::v3::ApiVersion::V2. + if (transport_api_version == envoy::config::core::v3::ApiVersion::AUTO) { + return envoy::config::core::v3::ApiVersion::V2; + } + return transport_api_version; } } // namespace -const Protobuf::MethodDescriptor& deltaGrpcMethod(absl::string_view type_url) { - const auto it = typeUrlToServiceMap().find(static_cast(type_url)); - ASSERT(it != typeUrlToServiceMap().cend()); +const Protobuf::MethodDescriptor& +deltaGrpcMethod(absl::string_view type_url, + envoy::config::core::v3::ApiVersion transport_api_version) { + const auto it = typeUrlToVersionedServiceMap().find(static_cast(type_url)); + ASSERT(it != typeUrlToVersionedServiceMap().cend()); return *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - it->second.delta_grpc_method_); + it->second.delta_grpc_.methods_[effectiveTransportApiVersion(transport_api_version)]); } -const Protobuf::MethodDescriptor& sotwGrpcMethod(absl::string_view type_url) { - const auto it = typeUrlToServiceMap().find(static_cast(type_url)); - ASSERT(it != typeUrlToServiceMap().cend()); +const Protobuf::MethodDescriptor& +sotwGrpcMethod(absl::string_view type_url, + envoy::config::core::v3::ApiVersion transport_api_version) { + const auto it = typeUrlToVersionedServiceMap().find(static_cast(type_url)); + ASSERT(it != typeUrlToVersionedServiceMap().cend()); return *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - it->second.sotw_grpc_method_); + it->second.sotw_grpc_.methods_[effectiveTransportApiVersion(transport_api_version)]); } -const Protobuf::MethodDescriptor& restMethod(absl::string_view type_url) { - const auto it = typeUrlToServiceMap().find(static_cast(type_url)); - ASSERT(it != typeUrlToServiceMap().cend()); - return *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(it->second.rest_method_); +const Protobuf::MethodDescriptor& +restMethod(absl::string_view type_url, envoy::config::core::v3::ApiVersion transport_api_version) { + const auto it = typeUrlToVersionedServiceMap().find(static_cast(type_url)); + ASSERT(it != typeUrlToVersionedServiceMap().cend()); + return *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( + it->second.rest_.methods_[effectiveTransportApiVersion(transport_api_version)]); } } // namespace Config diff --git a/source/common/config/type_to_endpoint.h b/source/common/config/type_to_endpoint.h index 0f06d2007977c..ed9f9e6e5c89d 100644 --- a/source/common/config/type_to_endpoint.h +++ b/source/common/config/type_to_endpoint.h @@ -10,12 +10,18 @@ namespace Envoy { namespace Config { // Translates an xDS resource type_url to the name of the delta gRPC service that carries it. -const Protobuf::MethodDescriptor& deltaGrpcMethod(absl::string_view resource_type_url); +const Protobuf::MethodDescriptor& +deltaGrpcMethod(absl::string_view resource_type_url, + envoy::config::core::v3::ApiVersion transport_api_version); // Translates an xDS resource type_url to the name of the state-of-the-world gRPC service that // carries it. -const Protobuf::MethodDescriptor& sotwGrpcMethod(absl::string_view resource_type_url); +const Protobuf::MethodDescriptor& +sotwGrpcMethod(absl::string_view resource_type_url, + envoy::config::core::v3::ApiVersion transport_api_version); // Translates an xDS resource type_url to the name of the REST service that carries it. -const Protobuf::MethodDescriptor& restMethod(absl::string_view resource_type_url); +const Protobuf::MethodDescriptor& +restMethod(absl::string_view resource_type_url, + envoy::config::core::v3::ApiVersion transport_api_version); } // namespace Config } // namespace Envoy diff --git a/source/common/config/udpa_resource.cc b/source/common/config/udpa_resource.cc new file mode 100644 index 0000000000000..d990ea8751056 --- /dev/null +++ b/source/common/config/udpa_resource.cc @@ -0,0 +1,215 @@ +#include "common/config/udpa_resource.h" + +#include + +#include "common/common/fmt.h" +#include "common/http/utility.h" + +#include "absl/strings/str_cat.h" +#include "absl/strings/str_split.h" + +// TODO(htuch): This file has a bunch of ad hoc URI encoding/decoding based on Envoy's HTTP util +// functions. Once https://github.com/envoyproxy/envoy/issues/6588 lands, we can replace with GURL. + +namespace Envoy { +namespace Config { + +using PercentEncoding = Http::Utility::PercentEncoding; + +namespace { + +// We need to percent-encode authority, id, path and query params. Resource types should not have +// reserved characters. + +std::string encodeAuthority(const std::string& authority) { + return PercentEncoding::encode(authority, "%/?#"); +} + +std::string encodeIdPath(const Protobuf::RepeatedPtrField& id) { + std::vector path_components; + for (const auto& id_component : id) { + path_components.emplace_back(PercentEncoding::encode(id_component, "%:/?#[]")); + } + const std::string path = absl::StrJoin(path_components, "/"); + return path.empty() ? "" : absl::StrCat("/", path); +} + +std::string encodeContextParams(const udpa::core::v1::ContextParams& context_params, + bool sort_context_params) { + std::vector query_param_components; + for (const auto& context_param : context_params.params()) { + query_param_components.emplace_back( + absl::StrCat(PercentEncoding::encode(context_param.first, "%#[]&="), "=", + PercentEncoding::encode(context_param.second, "%#[]&="))); + } + if (sort_context_params) { + std::sort(query_param_components.begin(), query_param_components.end()); + } + return query_param_components.empty() ? "" : "?" + absl::StrJoin(query_param_components, "&"); +} + +std::string encodeDirectives( + const Protobuf::RepeatedPtrField& directives) { + std::vector fragment_components; + const std::string DirectiveEscapeChars = "%#[],"; + for (const auto& directive : directives) { + switch (directive.directive_case()) { + case udpa::core::v1::ResourceLocator::Directive::DirectiveCase::kAlt: + fragment_components.emplace_back(absl::StrCat( + "alt=", PercentEncoding::encode(UdpaResourceIdentifier::encodeUrl(directive.alt()), + DirectiveEscapeChars))); + break; + case udpa::core::v1::ResourceLocator::Directive::DirectiveCase::kEntry: + fragment_components.emplace_back( + absl::StrCat("entry=", PercentEncoding::encode(directive.entry(), DirectiveEscapeChars))); + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + } + return fragment_components.empty() ? "" : "#" + absl::StrJoin(fragment_components, ","); +} + +} // namespace + +std::string UdpaResourceIdentifier::encodeUrn(const udpa::core::v1::ResourceName& resource_name, + const EncodeOptions& options) { + const std::string authority = encodeAuthority(resource_name.authority()); + const std::string id_path = encodeIdPath(resource_name.id()); + const std::string query_params = + encodeContextParams(resource_name.context(), options.sort_context_params_); + return absl::StrCat("udpa://", authority, "/", resource_name.resource_type(), id_path, + query_params); +} + +std::string +UdpaResourceIdentifier::encodeUrl(const udpa::core::v1::ResourceLocator& resource_locator, + const EncodeOptions& options) { + const std::string id_path = encodeIdPath(resource_locator.id()); + const std::string fragment = encodeDirectives(resource_locator.directives()); + std::string scheme = "udpa:"; + switch (resource_locator.scheme()) { + case udpa::core::v1::ResourceLocator::HTTP: + scheme = "http:"; + FALLTHRU; + case udpa::core::v1::ResourceLocator::UDPA: { + const std::string authority = encodeAuthority(resource_locator.authority()); + const std::string query_params = + encodeContextParams(resource_locator.exact_context(), options.sort_context_params_); + return absl::StrCat(scheme, "//", authority, "/", resource_locator.resource_type(), id_path, + query_params, fragment); + } + case udpa::core::v1::ResourceLocator::FILE: { + return absl::StrCat("file://", id_path, fragment); + } + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + +namespace { + +void decodePath(absl::string_view path, std::string* resource_type, + Protobuf::RepeatedPtrField& id) { + // This is guaranteed by Http::Utility::extractHostPathFromUrn. + ASSERT(absl::StartsWith(path, "/")); + const std::vector path_components = absl::StrSplit(path.substr(1), '/'); + auto id_it = path_components.cbegin(); + if (resource_type != nullptr) { + *resource_type = std::string(path_components[0]); + if (resource_type->empty()) { + throw UdpaResourceIdentifier::DecodeException( + fmt::format("Resource type missing from {}", path)); + } + id_it = std::next(id_it); + } + for (; id_it != path_components.cend(); id_it++) { + *id.Add() = PercentEncoding::decode(*id_it); + } +} + +void decodeQueryParams(absl::string_view query_params, + udpa::core::v1::ContextParams& context_params) { + Http::Utility::QueryParams query_params_components = + Http::Utility::parseQueryString(query_params); + for (const auto& it : query_params_components) { + (*context_params.mutable_params())[PercentEncoding::decode(it.first)] = + PercentEncoding::decode(it.second); + } +} + +void decodeFragment( + absl::string_view fragment, + Protobuf::RepeatedPtrField& directives) { + const std::vector fragment_components = absl::StrSplit(fragment, ','); + for (const absl::string_view& fragment_component : fragment_components) { + if (absl::StartsWith(fragment_component, "alt=")) { + directives.Add()->mutable_alt()->MergeFrom( + UdpaResourceIdentifier::decodeUrl(PercentEncoding::decode(fragment_component.substr(4)))); + } else if (absl::StartsWith(fragment_component, "entry=")) { + directives.Add()->set_entry(PercentEncoding::decode(fragment_component.substr(6))); + } else { + throw UdpaResourceIdentifier::DecodeException( + fmt::format("Unknown fragment component {}", fragment_component)); + ; + } + } +} + +} // namespace + +udpa::core::v1::ResourceName UdpaResourceIdentifier::decodeUrn(absl::string_view resource_urn) { + if (!absl::StartsWith(resource_urn, "udpa:")) { + throw UdpaResourceIdentifier::DecodeException( + fmt::format("{} does not have an udpa: scheme", resource_urn)); + } + absl::string_view host, path; + Http::Utility::extractHostPathFromUri(resource_urn, host, path); + udpa::core::v1::ResourceName decoded_resource_name; + decoded_resource_name.set_authority(PercentEncoding::decode(host)); + const size_t query_params_start = path.find('?'); + if (query_params_start != absl::string_view::npos) { + decodeQueryParams(path.substr(query_params_start), *decoded_resource_name.mutable_context()); + path = path.substr(0, query_params_start); + } + decodePath(path, decoded_resource_name.mutable_resource_type(), + *decoded_resource_name.mutable_id()); + return decoded_resource_name; +} + +udpa::core::v1::ResourceLocator UdpaResourceIdentifier::decodeUrl(absl::string_view resource_url) { + absl::string_view host, path; + Http::Utility::extractHostPathFromUri(resource_url, host, path); + udpa::core::v1::ResourceLocator decoded_resource_locator; + const size_t fragment_start = path.find('#'); + if (fragment_start != absl::string_view::npos) { + decodeFragment(path.substr(fragment_start + 1), *decoded_resource_locator.mutable_directives()); + path = path.substr(0, fragment_start); + } + if (absl::StartsWith(resource_url, "udpa:")) { + decoded_resource_locator.set_scheme(udpa::core::v1::ResourceLocator::UDPA); + } else if (absl::StartsWith(resource_url, "http:")) { + decoded_resource_locator.set_scheme(udpa::core::v1::ResourceLocator::HTTP); + } else if (absl::StartsWith(resource_url, "file:")) { + decoded_resource_locator.set_scheme(udpa::core::v1::ResourceLocator::FILE); + // File URLs only have a path and fragment. + decodePath(path, nullptr, *decoded_resource_locator.mutable_id()); + return decoded_resource_locator; + } else { + throw UdpaResourceIdentifier::DecodeException( + fmt::format("{} does not have a udpa:, http: or file: scheme", resource_url)); + } + decoded_resource_locator.set_authority(PercentEncoding::decode(host)); + const size_t query_params_start = path.find('?'); + if (query_params_start != absl::string_view::npos) { + decodeQueryParams(path.substr(query_params_start), + *decoded_resource_locator.mutable_exact_context()); + path = path.substr(0, query_params_start); + } + decodePath(path, decoded_resource_locator.mutable_resource_type(), + *decoded_resource_locator.mutable_id()); + return decoded_resource_locator; +} + +} // namespace Config +} // namespace Envoy diff --git a/source/common/config/udpa_resource.h b/source/common/config/udpa_resource.h new file mode 100644 index 0000000000000..5f90dcf1b0428 --- /dev/null +++ b/source/common/config/udpa_resource.h @@ -0,0 +1,71 @@ +#include "envoy/common/exception.h" + +#include "absl/strings/string_view.h" +#include "udpa/core/v1/resource_locator.pb.h" +#include "udpa/core/v1/resource_name.pb.h" + +namespace Envoy { +namespace Config { + +// Utilities for URI encoding/decoding of udpa::core::v1::Resource{Name,Locator}. +class UdpaResourceIdentifier { +public: + // Options for encoded URIs. + struct EncodeOptions { + // Should the context params be sorted by key? This provides deterministic encoding. + bool sort_context_params_{}; + }; + + /** + * Encode a udpa::core::v1::ResourceName message as a udpa:// URN string. + * + * @param resource_name resource name message. + * @param options encoding options. + * @return std::string udpa:// URN for resource_name. + */ + static std::string encodeUrn(const udpa::core::v1::ResourceName& resource_name, + const EncodeOptions& options); + static std::string encodeUrn(const udpa::core::v1::ResourceName& resource_name) { + return encodeUrn(resource_name, {}); + } + + /** + * Encode a udpa::core::v1::ResourceLocator message as a udpa:// URL string. + * + * @param resource_name resource name message. + * @param options encoding options. + * @return std::string udpa:// URL for resource_name. + */ + static std::string encodeUrl(const udpa::core::v1::ResourceLocator& resource_locator, + const EncodeOptions& options); + static std::string encodeUrl(const udpa::core::v1::ResourceLocator& resource_locator) { + return encodeUrl(resource_locator, {}); + } + + // Thrown when an exception occurs during URI decoding. + class DecodeException : public EnvoyException { + public: + DecodeException(const std::string& what) : EnvoyException(what) {} + }; + + /** + * Decode a udpa:// URN string to a udpa::core::v1::ResourceName. + * + * @param resource_urn udpa:// resource URN. + * @return udpa::core::v1::ResourceName resource name message for resource_urn. + * @throws DecodeException when parsing fails. + */ + static udpa::core::v1::ResourceName decodeUrn(absl::string_view resource_urn); + + /** + * Decode a udpa:// URL string to a udpa::core::v1::ResourceLocator. + * + * @param resource_url udpa:// resource URL. + * @return udpa::core::v1::ResourceLocator resource name message for resource_url. + * @throws DecodeException when parsing fails. + */ + static udpa::core::v1::ResourceLocator decodeUrl(absl::string_view resource_url); +}; + +} // namespace Config +} // namespace Envoy diff --git a/source/common/config/utility.cc b/source/common/config/utility.cc index 1d4a55a52fe13..e42ee777a1561 100644 --- a/source/common/config/utility.cc +++ b/source/common/config/utility.cc @@ -1,7 +1,5 @@ #include "common/config/utility.h" -#include - #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/core/v3/address.pb.h" @@ -20,6 +18,7 @@ #include "common/config/well_known_names.h" #include "common/protobuf/protobuf.h" #include "common/protobuf/utility.h" +#include "common/stats/histogram_impl.h" #include "common/stats/stats_matcher_impl.h" #include "common/stats/tag_producer_impl.h" @@ -136,13 +135,11 @@ void Utility::checkApiConfigSourceNames( } } -void Utility::validateClusterName(const Upstream::ClusterManager::ClusterInfoMap& clusters, +void Utility::validateClusterName(const Upstream::ClusterManager::ClusterSet& primary_clusters, const std::string& cluster_name, const std::string& config_source) { - const auto& it = clusters.find(cluster_name); - - if (it == clusters.end() || it->second.get().info()->addedViaApi() || - it->second.get().info()->type() == envoy::config::cluster::v3::Cluster::EDS) { + const auto& it = primary_clusters.find(cluster_name); + if (it == primary_clusters.end()) { throw EnvoyException(fmt::format("{} must have a statically defined non-EDS cluster: '{}' does " "not exist, was added via api, or is an EDS cluster", config_source, cluster_name)); @@ -150,7 +147,7 @@ void Utility::validateClusterName(const Upstream::ClusterManager::ClusterInfoMap } void Utility::checkApiConfigSourceSubscriptionBackingCluster( - const Upstream::ClusterManager::ClusterInfoMap& clusters, + const Upstream::ClusterManager::ClusterSet& primary_clusters, const envoy::config::core::v3::ApiConfigSource& api_config_source) { Utility::checkApiConfigSourceNames(api_config_source); @@ -161,14 +158,14 @@ void Utility::checkApiConfigSourceSubscriptionBackingCluster( // All API configs of type REST and UNSUPPORTED_REST_LEGACY should have cluster names. // Additionally, some gRPC API configs might have a cluster name set instead // of an envoy gRPC. - Utility::validateClusterName(clusters, api_config_source.cluster_names()[0], + Utility::validateClusterName(primary_clusters, api_config_source.cluster_names()[0], api_config_source.GetTypeName()); } else if (is_grpc) { // Some ApiConfigSources of type GRPC won't have a cluster name, such as if // they've been configured with google_grpc. if (api_config_source.grpc_services()[0].has_envoy_grpc()) { // If an Envoy gRPC exists, we take its cluster name. - Utility::validateClusterName(clusters, + Utility::validateClusterName(primary_clusters, api_config_source.grpc_services()[0].envoy_grpc().cluster_name(), api_config_source.GetTypeName()); } @@ -223,6 +220,11 @@ Utility::createStatsMatcher(const envoy::config::bootstrap::v3::Bootstrap& boots return std::make_unique(bootstrap.stats_config()); } +Stats::HistogramSettingsConstPtr +Utility::createHistogramSettings(const envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + return std::make_unique(bootstrap.stats_config()); +} + Grpc::AsyncClientFactoryPtr Utility::factoryForGrpcApiConfigSource( Grpc::AsyncClientManager& async_client_manager, const envoy::config::core::v3::ApiConfigSource& api_config_source, Stats::Scope& scope, diff --git a/source/common/config/utility.h b/source/common/config/utility.h index 363e12ab1982a..d19026386c532 100644 --- a/source/common/config/utility.h +++ b/source/common/config/utility.h @@ -1,6 +1,7 @@ #pragma once #include "envoy/api/api.h" +#include "envoy/common/random_generator.h" #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/core/v3/address.pb.h" @@ -12,6 +13,7 @@ #include "envoy/local_info/local_info.h" #include "envoy/registry/registry.h" #include "envoy/server/filter_config.h" +#include "envoy/stats/histogram.h" #include "envoy/stats/scope.h" #include "envoy/stats/stats_matcher.h" #include "envoy/stats/tag_producer.h" @@ -32,7 +34,7 @@ namespace Envoy { namespace Config { /** - * Constant Api Type Values, used by envoy::api::v2::core::ApiConfigSource. + * Constant Api Type Values, used by envoy::config::core::v3::ApiConfigSource. */ class ApiTypeValues { public: @@ -76,14 +78,14 @@ class Utility { /** * Extract refresh_delay as a std::chrono::milliseconds from - * envoy::api::v2::core::ApiConfigSource. + * envoy::config::core::v3::ApiConfigSource. */ static std::chrono::milliseconds apiConfigSourceRefreshDelay(const envoy::config::core::v3::ApiConfigSource& api_config_source); /** * Extract request_timeout as a std::chrono::milliseconds from - * envoy::api::v2::core::ApiConfigSource. If request_timeout isn't set in the config source, a + * envoy::config::core::v3::ApiConfigSource. If request_timeout isn't set in the config source, a * default value of 1s will be returned. */ static std::chrono::milliseconds @@ -91,18 +93,18 @@ class Utility { /** * Extract initial_fetch_timeout as a std::chrono::milliseconds from - * envoy::api::v2::core::ConfigSource. If request_timeout isn't set in the config source, a + * envoy::config::core::v3::ApiConfigSource. If request_timeout isn't set in the config source, a * default value of 0s will be returned. */ static std::chrono::milliseconds configSourceInitialFetchTimeout(const envoy::config::core::v3::ConfigSource& config_source); /** - * Populate an envoy::api::v2::core::ApiConfigSource. + * Populate an envoy::config::core::v3::ApiConfigSource. * @param cluster supplies the cluster name for the ApiConfigSource. * @param refresh_delay_ms supplies the refresh delay for the ApiConfigSource in ms. * @param api_type supplies the type of subscription to use for the ApiConfigSource. - * @param api_config_source a reference to the envoy::api::v2::core::ApiConfigSource object to + * @param api_config_source a reference to the envoy::config::core::v3::ApiConfigSource object to * populate. */ static void translateApiConfigSource(const std::string& cluster, uint32_t refresh_delay_ms, @@ -157,40 +159,54 @@ class Utility { /** * Check the validity of a cluster backing an api config source. Throws on error. - * @param clusters the clusters currently loaded in the cluster manager. + * @param primary_clusters the API config source eligible clusters. * @param cluster_name the cluster name to validate. * @param config_source the config source typed name. * @throws EnvoyException when an API config doesn't have a statically defined non-EDS cluster. */ - static void validateClusterName(const Upstream::ClusterManager::ClusterInfoMap& clusters, + static void validateClusterName(const Upstream::ClusterManager::ClusterSet& primary_clusters, const std::string& cluster_name, const std::string& config_source); /** * Potentially calls Utility::validateClusterName, if a cluster name can be found. - * @param clusters the clusters currently loaded in the cluster manager. + * @param primary_clusters the API config source eligible clusters. * @param api_config_source the config source to validate. * @throws EnvoyException when an API config doesn't have a statically defined non-EDS cluster. */ static void checkApiConfigSourceSubscriptionBackingCluster( - const Upstream::ClusterManager::ClusterInfoMap& clusters, + const Upstream::ClusterManager::ClusterSet& primary_clusters, const envoy::config::core::v3::ApiConfigSource& api_config_source); /** - * Parses RateLimit configuration from envoy::api::v2::core::ApiConfigSource to RateLimitSettings. + * Parses RateLimit configuration from envoy::config::core::v3::ApiConfigSource to + * RateLimitSettings. * @param api_config_source ApiConfigSource. * @return RateLimitSettings. */ static RateLimitSettings parseRateLimitSettings(const envoy::config::core::v3::ApiConfigSource& api_config_source); + /** + * Generate a ControlPlaneStats object from stats scope. + * @param scope for stats. + * @return ControlPlaneStats for scope. + */ + static ControlPlaneStats generateControlPlaneStats(Stats::Scope& scope) { + const std::string control_plane_prefix = "control_plane."; + return {ALL_CONTROL_PLANE_STATS(POOL_COUNTER_PREFIX(scope, control_plane_prefix), + POOL_GAUGE_PREFIX(scope, control_plane_prefix), + POOL_TEXT_READOUT_PREFIX(scope, control_plane_prefix))}; + } + /** * Generate a SubscriptionStats object from stats scope. * @param scope for stats. * @return SubscriptionStats for scope. */ static SubscriptionStats generateStats(Stats::Scope& scope) { - return {ALL_SUBSCRIPTION_STATS(POOL_COUNTER(scope), POOL_GAUGE(scope))}; + return { + ALL_SUBSCRIPTION_STATS(POOL_COUNTER(scope), POOL_GAUGE(scope), POOL_TEXT_READOUT(scope))}; } /** @@ -221,27 +237,42 @@ class Utility { */ template static Factory& getAndCheckFactory(const ProtoMessage& message) { - const ProtobufWkt::Any& typed_config = message.typed_config(); + Factory* factory = Utility::getFactoryByType(message.typed_config()); + if (factory != nullptr) { + return *factory; + } + + return Utility::getAndCheckFactoryByName(message.name()); + } + + /** + * Get type URL from a typed config. + * @param typed_config for the extension config. + */ + static std::string getFactoryType(const ProtobufWkt::Any& typed_config) { static const std::string& typed_struct_type = udpa::type::v1::TypedStruct::default_instance().GetDescriptor()->full_name(); - - if (!typed_config.type_url().empty()) { - // Unpack methods will only use the fully qualified type name after the last '/'. - // https://github.com/protocolbuffers/protobuf/blob/3.6.x/src/google/protobuf/any.proto#L87 - auto type = std::string(TypeUtil::typeUrlToDescriptorFullName(typed_config.type_url())); - if (type == typed_struct_type) { - udpa::type::v1::TypedStruct typed_struct; - MessageUtil::unpackTo(typed_config, typed_struct); - // Not handling nested structs or typed structs in typed structs - type = std::string(TypeUtil::typeUrlToDescriptorFullName(typed_struct.type_url())); - } - Factory* factory = Registry::FactoryRegistry::getFactoryByType(type); - if (factory != nullptr) { - return *factory; - } + // Unpack methods will only use the fully qualified type name after the last '/'. + // https://github.com/protocolbuffers/protobuf/blob/3.6.x/src/google/protobuf/any.proto#L87 + auto type = std::string(TypeUtil::typeUrlToDescriptorFullName(typed_config.type_url())); + if (type == typed_struct_type) { + udpa::type::v1::TypedStruct typed_struct; + MessageUtil::unpackTo(typed_config, typed_struct); + // Not handling nested structs or typed structs in typed structs + return std::string(TypeUtil::typeUrlToDescriptorFullName(typed_struct.type_url())); } + return type; + } - return Utility::getAndCheckFactoryByName(message.name()); + /** + * Get a Factory from the registry by type URL. + * @param typed_config for the extension config. + */ + template static Factory* getFactoryByType(const ProtobufWkt::Any& typed_config) { + if (typed_config.type_url().empty()) { + return nullptr; + } + return Registry::FactoryRegistry::getFactoryByType(getFactoryType(typed_config)); } /** @@ -317,9 +348,15 @@ class Utility { createStatsMatcher(const envoy::config::bootstrap::v3::Bootstrap& bootstrap); /** - * Obtain gRPC async client factory from a envoy::api::v2::core::ApiConfigSource. + * Create HistogramSettings instance. + */ + static Stats::HistogramSettingsConstPtr + createHistogramSettings(const envoy::config::bootstrap::v3::Bootstrap& bootstrap); + + /** + * Obtain gRPC async client factory from a envoy::config::core::v3::ApiConfigSource. * @param async_client_manager gRPC async client manager. - * @param api_config_source envoy::api::v3::core::ApiConfigSource. Must have config type GRPC. + * @param api_config_source envoy::config::core::v3::ApiConfigSource. Must have config type GRPC. * @param skip_cluster_check whether to skip cluster validation. * @return Grpc::AsyncClientFactoryPtr gRPC async client factory. */ @@ -331,7 +368,7 @@ class Utility { /** * Translate a set of cluster's hosts into a load assignment configuration. * @param hosts cluster's list of hosts. - * @return envoy::api::v2::ClusterLoadAssignment a load assignment configuration. + * @return envoy::config::endpoint::v3::ClusterLoadAssignment a load assignment configuration. */ static envoy::config::endpoint::v3::ClusterLoadAssignment translateClusterHosts(const Protobuf::RepeatedPtrField& hosts); @@ -381,7 +418,7 @@ class Utility { */ template static BackOffStrategyPtr prepareDnsRefreshStrategy(const T& config, uint64_t dns_refresh_rate_ms, - Runtime::RandomGenerator& random) { + Random::RandomGenerator& random) { if (config.has_dns_failure_refresh_rate()) { uint64_t base_interval_ms = PROTOBUF_GET_MS_REQUIRED(config.dns_failure_refresh_rate(), base_interval); diff --git a/source/common/config/version_converter.cc b/source/common/config/version_converter.cc index 2c4949bff071e..db2bd1cfc2162 100644 --- a/source/common/config/version_converter.cc +++ b/source/common/config/version_converter.cc @@ -3,6 +3,7 @@ #include "envoy/common/exception.h" #include "common/common/assert.h" +#include "common/common/macros.h" #include "common/config/api_type_oracle.h" #include "common/protobuf/visitor.h" #include "common/protobuf/well_known.h" @@ -14,8 +15,6 @@ namespace Config { namespace { -const char DeprecatedFieldShadowPrefix[] = "hidden_envoy_deprecated_"; - class ProtoVisitor { public: virtual ~ProtoVisitor() = default; @@ -61,10 +60,19 @@ DynamicMessagePtr createForDescriptorWithCast(const Protobuf::Message& message, return dynamic_message; } +} // namespace + +void VersionConverter::upgrade(const Protobuf::Message& prev_message, + Protobuf::Message& next_message) { + wireCast(prev_message, next_message); + // Track original type to support recoverOriginal(). + annotateWithOriginalType(*prev_message.GetDescriptor(), next_message); +} + // This needs to be recursive, since sub-messages are consumed and stored // internally, we later want to recover their original types. -void annotateWithOriginalType(const Protobuf::Descriptor& prev_descriptor, - Protobuf::Message& next_message) { +void VersionConverter::annotateWithOriginalType(const Protobuf::Descriptor& prev_descriptor, + Protobuf::Message& upgraded_message) { class TypeAnnotatingProtoVisitor : public ProtobufMessage::ProtoVisitor { public: void onMessage(Protobuf::Message& message, const void* ctxt) override { @@ -104,16 +112,7 @@ void annotateWithOriginalType(const Protobuf::Descriptor& prev_descriptor, } }; TypeAnnotatingProtoVisitor proto_visitor; - ProtobufMessage::traverseMutableMessage(proto_visitor, next_message, &prev_descriptor); -} - -} // namespace - -void VersionConverter::upgrade(const Protobuf::Message& prev_message, - Protobuf::Message& next_message) { - wireCast(prev_message, next_message); - // Track original type to support recoverOriginal(). - annotateWithOriginalType(*prev_message.GetDescriptor(), next_message); + ProtobufMessage::traverseMutableMessage(proto_visitor, upgraded_message, &prev_descriptor); } void VersionConverter::eraseOriginalTypeInformation(Protobuf::Message& message) { @@ -160,6 +159,7 @@ VersionConverter::getJsonStringFromMessage(const Protobuf::Message& message, DynamicMessagePtr dynamic_message; switch (api_version) { case envoy::config::core::v3::ApiVersion::AUTO: + FALLTHRU; case envoy::config::core::v3::ApiVersion::V2: { // TODO(htuch): this works as long as there are no new fields in the v3+ // DiscoveryRequest. When they are added, we need to do a full v2 conversion @@ -218,5 +218,7 @@ void VersionUtil::scrubHiddenEnvoyDeprecated(Protobuf::Message& message) { ProtobufMessage::traverseMutableMessage(proto_visitor, message, nullptr); } +const char VersionUtil::DeprecatedFieldShadowPrefix[] = "hidden_envoy_deprecated_"; + } // namespace Config } // namespace Envoy diff --git a/source/common/config/version_converter.h b/source/common/config/version_converter.h index cd7ee29afdc78..db9c765239318 100644 --- a/source/common/config/version_converter.h +++ b/source/common/config/version_converter.h @@ -91,6 +91,15 @@ class VersionConverter { static void prepareMessageForGrpcWire(Protobuf::Message& message, envoy::config::core::v3::ApiVersion api_version); + /** + * Annotate an upgraded message with original message type information. + * + * @param prev_descriptor descriptor for original type. + * @param upgraded_message upgraded message. + */ + static void annotateWithOriginalType(const Protobuf::Descriptor& prev_descriptor, + Protobuf::Message& upgraded_message); + /** * For a message that may have been upgraded, recover the original message. * This is useful for config dump, debug output etc. @@ -115,6 +124,9 @@ class VersionUtil { public: // Some helpers for working with earlier message version deprecated fields. static void scrubHiddenEnvoyDeprecated(Protobuf::Message& message); + + // A prefix that is added to deprecated fields names upon shadowing. + static const char DeprecatedFieldShadowPrefix[]; }; } // namespace Config diff --git a/source/common/config/watch_map.cc b/source/common/config/watch_map.cc index 257f42e4ccae2..51e73e06344d9 100644 --- a/source/common/config/watch_map.cc +++ b/source/common/config/watch_map.cc @@ -2,11 +2,15 @@ #include "envoy/service/discovery/v3/discovery.pb.h" +#include "common/common/cleanup.h" +#include "common/config/decoded_resource_impl.h" + namespace Envoy { namespace Config { -Watch* WatchMap::addWatch(SubscriptionCallbacks& callbacks) { - auto watch = std::make_unique(callbacks); +Watch* WatchMap::addWatch(SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder) { + auto watch = std::make_unique(callbacks, resource_decoder); Watch* watch_ptr = watch.get(); wildcard_watches_.insert(watch_ptr); watches_.insert(std::move(watch)); @@ -14,8 +18,20 @@ Watch* WatchMap::addWatch(SubscriptionCallbacks& callbacks) { } void WatchMap::removeWatch(Watch* watch) { - wildcard_watches_.erase(watch); // may or may not be in there, but we want it gone. - watches_.erase(watch); + if (deferred_removed_during_update_ != nullptr) { + deferred_removed_during_update_->insert(watch); + } else { + wildcard_watches_.erase(watch); // may or may not be in there, but we want it gone. + watches_.erase(watch); + } +} + +void WatchMap::removeDeferredWatches() { + for (auto& watch : *deferred_removed_during_update_) { + wildcard_watches_.erase(watch); // may or may not be in there, but we want it gone. + watches_.erase(watch); + } + deferred_removed_during_update_ = nullptr; } AddedRemoved WatchMap::updateWatchInterest(Watch* watch, @@ -58,23 +74,32 @@ void WatchMap::onConfigUpdate(const Protobuf::RepeatedPtrField if (watches_.empty()) { return; } - SubscriptionCallbacks& name_getter = (*watches_.begin())->callbacks_; + // Track any removals triggered by earlier watch updates. + ASSERT(deferred_removed_during_update_ == nullptr); + deferred_removed_during_update_ = std::make_unique>(); + Cleanup cleanup([this] { removeDeferredWatches(); }); // Build a map from watches, to the set of updated resources that each watch cares about. Each // entry in the map is then a nice little bundle that can be fed directly into the individual // onConfigUpdate()s. - absl::flat_hash_map> per_watch_updates; + std::vector decoded_resources; + absl::flat_hash_map> per_watch_updates; for (const auto& r : resources) { + decoded_resources.emplace_back( + new DecodedResourceImpl((*watches_.begin())->resource_decoder_, r, version_info)); const absl::flat_hash_set& interested_in_r = - watchesInterestedIn(name_getter.resourceName(r)); + watchesInterestedIn(decoded_resources.back()->name()); for (const auto& interested_watch : interested_in_r) { - per_watch_updates[interested_watch].Add()->CopyFrom(r); + per_watch_updates[interested_watch].emplace_back(*decoded_resources.back()); } } const bool map_is_single_wildcard = (watches_.size() == 1 && wildcard_watches_.size() == 1); // We just bundled up the updates into nice per-watch packages. Now, deliver them. for (auto& watch : watches_) { + if (deferred_removed_during_update_->count(watch.get()) > 0) { + continue; + } const auto this_watch_updates = per_watch_updates.find(watch); if (this_watch_updates == per_watch_updates.end()) { // This update included no resources this watch cares about. @@ -85,12 +110,12 @@ void WatchMap::onConfigUpdate(const Protobuf::RepeatedPtrField // of this watch's resources, so the watch must be informed with an onConfigUpdate. // 3) Otherwise, we can skip onConfigUpdate for this watch. if (map_is_single_wildcard || !watch->state_of_the_world_empty_) { - watch->callbacks_.onConfigUpdate({}, version_info); watch->state_of_the_world_empty_ = true; + watch->callbacks_.onConfigUpdate({}, version_info); } } else { - watch->callbacks_.onConfigUpdate(this_watch_updates->second, version_info); watch->state_of_the_world_empty_ = false; + watch->callbacks_.onConfigUpdate(this_watch_updates->second, version_info); } } } @@ -125,15 +150,26 @@ void WatchMap::onConfigUpdate( const Protobuf::RepeatedPtrField& added_resources, const Protobuf::RepeatedPtrField& removed_resources, const std::string& system_version_info) { + // Track any removals triggered by earlier watch updates. + ASSERT(deferred_removed_during_update_ == nullptr); + deferred_removed_during_update_ = std::make_unique>(); + Cleanup cleanup([this] { removeDeferredWatches(); }); // Build a pair of maps: from watches, to the set of resources {added,removed} that each watch // cares about. Each entry in the map-pair is then a nice little bundle that can be fed directly // into the individual onConfigUpdate()s. - absl::flat_hash_map> - per_watch_added; + std::vector decoded_resources; + absl::flat_hash_map> per_watch_added; for (const auto& r : added_resources) { const absl::flat_hash_set& interested_in_r = watchesInterestedIn(r.name()); + // If there are no watches, then we don't need to decode. If there are watches, they should all + // be for the same resource type, so we can just use the callbacks of the first watch to decode. + if (interested_in_r.empty()) { + continue; + } + decoded_resources.emplace_back( + new DecodedResourceImpl((*interested_in_r.begin())->resource_decoder_, r)); for (const auto& interested_watch : interested_in_r) { - per_watch_added[interested_watch].Add()->CopyFrom(r); + per_watch_added[interested_watch].emplace_back(*decoded_resources.back()); } } absl::flat_hash_map> per_watch_removed; @@ -145,22 +181,27 @@ void WatchMap::onConfigUpdate( } // We just bundled up the updates into nice per-watch packages. Now, deliver them. - for (const auto& added : per_watch_added) { - const Watch* cur_watch = added.first; + for (const auto& [cur_watch, resource_to_add] : per_watch_added) { + if (deferred_removed_during_update_->count(cur_watch) > 0) { + continue; + } const auto removed = per_watch_removed.find(cur_watch); if (removed == per_watch_removed.end()) { // additions only, no removals - cur_watch->callbacks_.onConfigUpdate(added.second, {}, system_version_info); + cur_watch->callbacks_.onConfigUpdate(resource_to_add, {}, system_version_info); } else { // both additions and removals - cur_watch->callbacks_.onConfigUpdate(added.second, removed->second, system_version_info); + cur_watch->callbacks_.onConfigUpdate(resource_to_add, removed->second, system_version_info); // Drop the removals now, so the final removals-only pass won't use them. per_watch_removed.erase(removed); } } // Any removals-only updates will not have been picked up in the per_watch_added loop. - for (auto& removed : per_watch_removed) { - removed.first->callbacks_.onConfigUpdate({}, removed.second, system_version_info); + for (auto& [cur_watch, resource_to_remove] : per_watch_removed) { + if (deferred_removed_during_update_->count(cur_watch) > 0) { + continue; + } + cur_watch->callbacks_.onConfigUpdate({}, resource_to_remove, system_version_info); } } diff --git a/source/common/config/watch_map.h b/source/common/config/watch_map.h index 36bcf23f88ea1..f1f7d09294ed7 100644 --- a/source/common/config/watch_map.h +++ b/source/common/config/watch_map.h @@ -24,8 +24,10 @@ struct AddedRemoved { }; struct Watch { - Watch(SubscriptionCallbacks& callbacks) : callbacks_(callbacks) {} + Watch(SubscriptionCallbacks& callbacks, OpaqueResourceDecoder& resource_decoder) + : callbacks_(callbacks), resource_decoder_(resource_decoder) {} SubscriptionCallbacks& callbacks_; + OpaqueResourceDecoder& resource_decoder_; std::set resource_names_; // must be sorted set, for set_difference. // Needed only for state-of-the-world. // Whether the most recent update contained any resources this watch cares about. @@ -56,14 +58,14 @@ struct Watch { // update the subscription accordingly. // // A WatchMap is assumed to be dedicated to a single type_url type of resource (EDS, CDS, etc). -class WatchMap : public SubscriptionCallbacks, public Logger::Loggable { +class WatchMap : public UntypedConfigUpdateCallbacks, public Logger::Loggable { public: WatchMap() = default; // Adds 'callbacks' to the WatchMap, with every possible resource being watched. // (Use updateWatchInterest() to narrow it down to some specific names). // Returns the newly added watch, to be used with updateWatchInterest and removeWatch. - Watch* addWatch(SubscriptionCallbacks& callbacks); + Watch* addWatch(SubscriptionCallbacks& callbacks, OpaqueResourceDecoder& resource_decoder); // Updates the set of resource names that the given watch should watch. // Returns any resource name additions/removals that are unique across all watches. That is: @@ -81,22 +83,21 @@ class WatchMap : public SubscriptionCallbacks, public Logger::Loggable& resources, const std::string& version_info) override; void onConfigUpdate( const Protobuf::RepeatedPtrField& added_resources, const Protobuf::RepeatedPtrField& removed_resources, const std::string& system_version_info) override; - void onConfigUpdateFailed(ConfigUpdateFailureReason reason, const EnvoyException* e) override; - std::string resourceName(const ProtobufWkt::Any&) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - WatchMap(const WatchMap&) = delete; WatchMap& operator=(const WatchMap&) = delete; private: + void removeDeferredWatches(); + // Given a list of names that are new to an individual watch, returns those names that are in fact // new to the entire subscription. std::set findAdditions(const std::vector& newly_added_to_watch, @@ -115,6 +116,11 @@ class WatchMap : public SubscriptionCallbacks, public Logger::Loggable wildcard_watches_; + // Watches that have been removed inside the call stack of the WatchMap's onConfigUpdate(). This + // can happen when a watch's onConfigUpdate() results in another watch being removed via + // removeWatch(). + std::unique_ptr> deferred_removed_during_update_; + // Maps a resource name to the set of watches interested in that resource. Has two purposes: // 1) Acts as a reference count; no watches care anymore ==> the resource can be removed. // 2) Enables efficient lookup of all interested watches when a resource has been updated. diff --git a/source/common/config/well_known_names.h b/source/common/config/well_known_names.h index a2cd01dfcdf8d..30698815f9bac 100644 --- a/source/common/config/well_known_names.h +++ b/source/common/config/well_known_names.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include #include "envoy/common/exception.h" diff --git a/source/common/conn_pool/BUILD b/source/common/conn_pool/BUILD new file mode 100644 index 0000000000000..fafa208adc168 --- /dev/null +++ b/source/common/conn_pool/BUILD @@ -0,0 +1,21 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_library( + name = "conn_pool_base_lib", + srcs = ["conn_pool_base.cc"], + hdrs = ["conn_pool_base.h"], + deps = [ + "//include/envoy/stats:timespan_interface", + "//source/common/common:linked_object", + "//source/common/stats:timespan_lib", + "//source/common/upstream:upstream_lib", + ], +) diff --git a/source/common/conn_pool/conn_pool_base.cc b/source/common/conn_pool/conn_pool_base.cc new file mode 100644 index 0000000000000..e8319ab816191 --- /dev/null +++ b/source/common/conn_pool/conn_pool_base.cc @@ -0,0 +1,479 @@ +#include "common/conn_pool/conn_pool_base.h" + +#include "common/common/assert.h" +#include "common/network/transport_socket_options_impl.h" +#include "common/runtime/runtime_features.h" +#include "common/stats/timespan_impl.h" +#include "common/upstream/upstream_impl.h" + +namespace Envoy { +namespace ConnectionPool { + +ConnPoolImplBase::ConnPoolImplBase( + Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, + Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options) + : host_(host), priority_(priority), dispatcher_(dispatcher), socket_options_(options), + transport_socket_options_(transport_socket_options) {} + +ConnPoolImplBase::~ConnPoolImplBase() { + ASSERT(ready_clients_.empty()); + ASSERT(busy_clients_.empty()); + ASSERT(connecting_clients_.empty()); +} + +void ConnPoolImplBase::destructAllConnections() { + for (auto* list : {&ready_clients_, &busy_clients_, &connecting_clients_}) { + while (!list->empty()) { + list->front()->close(); + } + } + + // Make sure all clients are destroyed before we are destroyed. + dispatcher_.clearDeferredDeleteList(); +} + +bool ConnPoolImplBase::shouldCreateNewConnection() const { + // The number of streams we want to be provisioned for is the number of + // pending and active streams times the prefetch ratio. + // The number of streams we are (theoretically) provisioned for is the + // connecting stream capacity plus the number of active streams. + // + // If prefetch ratio is not set, it defaults to 1, and this simplifies to the + // legacy value of pending_streams_.size() > connecting_stream_capacity_ + return (pending_streams_.size() + num_active_streams_) * prefetchRatio() > + (connecting_stream_capacity_ + num_active_streams_); +} + +float ConnPoolImplBase::prefetchRatio() const { + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.allow_prefetch")) { + return host_->cluster().prefetchRatio(); + } else { + return 1.0; + } +} + +void ConnPoolImplBase::tryCreateNewConnections() { + // Somewhat arbitrarily cap the number of connections prefetched due to new + // incoming connections. The prefetch ratio is capped at 3, so in steady + // state, no more than 3 connections should be prefetched. If hosts go + // unhealthy, and connections are not immediately prefetched, it could be that + // many connections are desired when the host becomes healthy again, but + // overwhelming it with connections is not desirable. + for (int i = 0; i < 3; ++i) { + if (!tryCreateNewConnection()) { + return; + } + } +} + +bool ConnPoolImplBase::tryCreateNewConnection() { + // There are already enough CONNECTING connections for the number of queued streams. + if (!shouldCreateNewConnection()) { + return false; + } + + const bool can_create_connection = + host_->cluster().resourceManager(priority_).connections().canCreate(); + if (!can_create_connection) { + host_->cluster().stats().upstream_cx_overflow_.inc(); + } + // If we are at the connection circuit-breaker limit due to other upstreams having + // too many open connections, and this upstream has no connections, always create one, to + // prevent pending streams being queued to this upstream with no way to be processed. + if (can_create_connection || + (ready_clients_.empty() && busy_clients_.empty() && connecting_clients_.empty())) { + ENVOY_LOG(debug, "creating a new connection"); + ActiveClientPtr client = instantiateActiveClient(); + ASSERT(client->state_ == ActiveClient::State::CONNECTING); + ASSERT(std::numeric_limits::max() - connecting_stream_capacity_ >= + client->effectiveConcurrentRequestLimit()); + ASSERT(client->real_host_description_); + connecting_stream_capacity_ += client->effectiveConcurrentRequestLimit(); + LinkedList::moveIntoList(std::move(client), owningList(client->state_)); + } + return can_create_connection; +} + +void ConnPoolImplBase::attachRequestToClient(Envoy::ConnectionPool::ActiveClient& client, + AttachContext& context) { + ASSERT(client.state_ == Envoy::ConnectionPool::ActiveClient::State::READY); + + if (!host_->cluster().resourceManager(priority_).requests().canCreate()) { + ENVOY_LOG(debug, "max streams overflow"); + onPoolFailure(client.real_host_description_, absl::string_view(), + ConnectionPool::PoolFailureReason::Overflow, context); + host_->cluster().stats().upstream_rq_pending_overflow_.inc(); + } else { + ENVOY_CONN_LOG(debug, "creating stream", client); + + client.remaining_streams_--; + if (client.remaining_streams_ == 0) { + ENVOY_CONN_LOG(debug, "maximum streams per connection, DRAINING", client); + host_->cluster().stats().upstream_cx_max_requests_.inc(); + transitionActiveClientState(client, Envoy::ConnectionPool::ActiveClient::State::DRAINING); + } else if (client.numActiveRequests() + 1 >= client.concurrent_stream_limit_) { + // As soon as the new stream is created, the client will be maxed out. + transitionActiveClientState(client, Envoy::ConnectionPool::ActiveClient::State::BUSY); + } + + num_active_streams_++; + host_->stats().rq_total_.inc(); + host_->stats().rq_active_.inc(); + host_->cluster().stats().upstream_rq_total_.inc(); + host_->cluster().stats().upstream_rq_active_.inc(); + host_->cluster().resourceManager(priority_).requests().inc(); + + onPoolReady(client, context); + } +} + +void ConnPoolImplBase::onRequestClosed(Envoy::ConnectionPool::ActiveClient& client, + bool delay_attaching_stream) { + ENVOY_CONN_LOG(debug, "destroying stream: {} remaining", client, client.numActiveRequests()); + ASSERT(num_active_streams_ > 0); + num_active_streams_--; + host_->stats().rq_active_.dec(); + host_->cluster().stats().upstream_rq_active_.dec(); + host_->cluster().resourceManager(priority_).requests().dec(); + if (client.state_ == ActiveClient::State::DRAINING && client.numActiveRequests() == 0) { + // Close out the draining client if we no longer have active streams. + client.close(); + } else if (client.state_ == ActiveClient::State::BUSY) { + // A stream was just ended, so we should be below the limit now. + ASSERT(client.numActiveRequests() < client.concurrent_stream_limit_); + + transitionActiveClientState(client, ActiveClient::State::READY); + if (!delay_attaching_stream) { + onUpstreamReady(); + } + } +} + +ConnectionPool::Cancellable* ConnPoolImplBase::newStream(AttachContext& context) { + if (!ready_clients_.empty()) { + ActiveClient& client = *ready_clients_.front(); + ENVOY_CONN_LOG(debug, "using existing connection", client); + attachRequestToClient(client, context); + // Even if there's a ready client, we may want to prefetch a new connection + // to handle the next incoming stream. + tryCreateNewConnections(); + return nullptr; + } + + if (host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) { + ConnectionPool::Cancellable* pending = newPendingRequest(context); + + // This must come after newPendingRequest() because this function uses the + // length of pending_streams_ to determine if a new connection is needed. + tryCreateNewConnections(); + + return pending; + } else { + ENVOY_LOG(debug, "max pending streams overflow"); + onPoolFailure(nullptr, absl::string_view(), ConnectionPool::PoolFailureReason::Overflow, + context); + host_->cluster().stats().upstream_rq_pending_overflow_.inc(); + return nullptr; + } +} + +void ConnPoolImplBase::onUpstreamReady() { + while (!pending_streams_.empty() && !ready_clients_.empty()) { + ActiveClientPtr& client = ready_clients_.front(); + ENVOY_CONN_LOG(debug, "attaching to next stream", *client); + // Pending streams are pushed onto the front, so pull from the back. + attachRequestToClient(*client, pending_streams_.back()->context()); + pending_streams_.pop_back(); + } +} + +std::list& ConnPoolImplBase::owningList(ActiveClient::State state) { + switch (state) { + case ActiveClient::State::CONNECTING: + return connecting_clients_; + case ActiveClient::State::READY: + return ready_clients_; + case ActiveClient::State::BUSY: + return busy_clients_; + case ActiveClient::State::DRAINING: + return busy_clients_; + case ActiveClient::State::CLOSED: + NOT_REACHED_GCOVR_EXCL_LINE; + } + NOT_REACHED_GCOVR_EXCL_LINE; +} + +void ConnPoolImplBase::transitionActiveClientState(ActiveClient& client, + ActiveClient::State new_state) { + auto& old_list = owningList(client.state_); + auto& new_list = owningList(new_state); + client.state_ = new_state; + + // old_list and new_list can be equal when transitioning from BUSY to DRAINING. + // + // The documentation for list.splice() (which is what moveBetweenLists() calls) is + // unclear whether it is allowed for src and dst to be the same, so check here + // since it is a no-op anyways. + if (&old_list != &new_list) { + client.moveBetweenLists(old_list, new_list); + } +} + +void ConnPoolImplBase::addDrainedCallbackImpl(Instance::DrainedCb cb) { + drained_callbacks_.push_back(cb); + checkForDrained(); +} + +void ConnPoolImplBase::closeIdleConnections() { + // Create a separate list of elements to close to avoid mutate-while-iterating problems. + std::list to_close; + + for (auto& client : ready_clients_) { + if (client->numActiveRequests() == 0) { + to_close.push_back(client.get()); + } + } + + if (pending_streams_.empty()) { + for (auto& client : connecting_clients_) { + to_close.push_back(client.get()); + } + } + + for (auto& entry : to_close) { + entry->close(); + } +} + +void ConnPoolImplBase::drainConnectionsImpl() { + closeIdleConnections(); + + // closeIdleConnections() closes all connections in ready_clients_ with no active streams, + // so all remaining entries in ready_clients_ are serving streams. Move them and all entries + // in busy_clients_ to draining. + while (!ready_clients_.empty()) { + transitionActiveClientState(*ready_clients_.front(), ActiveClient::State::DRAINING); + } + + // Changing busy_clients_ to DRAINING does not move them between lists, + // so use a for-loop since the list is not mutated. + ASSERT(&owningList(ActiveClient::State::DRAINING) == &busy_clients_); + for (auto& busy_client : busy_clients_) { + transitionActiveClientState(*busy_client, ActiveClient::State::DRAINING); + } +} + +void ConnPoolImplBase::checkForDrained() { + if (drained_callbacks_.empty()) { + return; + } + + closeIdleConnections(); + + if (pending_streams_.empty() && ready_clients_.empty() && busy_clients_.empty() && + connecting_clients_.empty()) { + ENVOY_LOG(debug, "invoking drained callbacks"); + for (const Instance::DrainedCb& cb : drained_callbacks_) { + cb(); + } + } +} + +void ConnPoolImplBase::onConnectionEvent(ActiveClient& client, absl::string_view failure_reason, + Network::ConnectionEvent event) { + if (client.state_ == ActiveClient::State::CONNECTING) { + ASSERT(connecting_stream_capacity_ >= client.effectiveConcurrentRequestLimit()); + connecting_stream_capacity_ -= client.effectiveConcurrentRequestLimit(); + } + + if (event == Network::ConnectionEvent::RemoteClose || + event == Network::ConnectionEvent::LocalClose) { + // The client died. + ENVOY_CONN_LOG(debug, "client disconnected, failure reason: {}", client, failure_reason); + + Envoy::Upstream::reportUpstreamCxDestroy(host_, event); + const bool incomplete_stream = client.closingWithIncompleteRequest(); + if (incomplete_stream) { + Envoy::Upstream::reportUpstreamCxDestroyActiveRequest(host_, event); + } + + if (client.state_ == ActiveClient::State::CONNECTING) { + host_->cluster().stats().upstream_cx_connect_fail_.inc(); + host_->stats().cx_connect_fail_.inc(); + + ConnectionPool::PoolFailureReason reason; + if (client.timed_out_) { + reason = ConnectionPool::PoolFailureReason::Timeout; + } else if (event == Network::ConnectionEvent::RemoteClose) { + reason = ConnectionPool::PoolFailureReason::RemoteConnectionFailure; + } else { + reason = ConnectionPool::PoolFailureReason::LocalConnectionFailure; + } + + // Raw connect failures should never happen under normal circumstances. If we have an upstream + // that is behaving badly, streams can get stuck here in the pending state. If we see a + // connect failure, we purge all pending streams so that calling code can determine what to + // do with the stream. + // NOTE: We move the existing pending streams to a temporary list. This is done so that + // if retry logic submits a new stream to the pool, we don't fail it inline. + purgePendingRequests(client.real_host_description_, failure_reason, reason); + // TODO(alyssawilk) only iff upstream is healthy. + // See if we should prefetch another connection based on active connections. + tryCreateNewConnections(); + } + + // We need to release our resourceManager() resources before checking below for + // whether we can create a new connection. Normally this would happen when + // client's destructor runs, but this object needs to be deferredDelete'd(), so + // this forces part of its cleanup to happen now. + client.releaseResources(); + + dispatcher_.deferredDelete(client.removeFromList(owningList(client.state_))); + if (incomplete_stream) { + checkForDrained(); + } + + client.state_ = ActiveClient::State::CLOSED; + + // If we have pending streams and we just lost a connection we should make a new one. + if (!pending_streams_.empty()) { + tryCreateNewConnections(); + } + } else if (event == Network::ConnectionEvent::Connected) { + client.conn_connect_ms_->complete(); + client.conn_connect_ms_.reset(); + + ASSERT(client.state_ == ActiveClient::State::CONNECTING); + transitionActiveClientState(client, ActiveClient::State::READY); + + onUpstreamReady(); + checkForDrained(); + } + + if (client.connect_timer_) { + client.connect_timer_->disableTimer(); + client.connect_timer_.reset(); + } +} + +PendingRequest::PendingRequest(ConnPoolImplBase& parent) : parent_(parent) { + parent_.host()->cluster().stats().upstream_rq_pending_total_.inc(); + parent_.host()->cluster().stats().upstream_rq_pending_active_.inc(); + parent_.host()->cluster().resourceManager(parent_.priority()).pendingRequests().inc(); +} + +PendingRequest::~PendingRequest() { + parent_.host()->cluster().stats().upstream_rq_pending_active_.dec(); + parent_.host()->cluster().resourceManager(parent_.priority()).pendingRequests().dec(); +} + +void PendingRequest::cancel(Envoy::ConnectionPool::CancelPolicy policy) { + parent_.onPendingRequestCancel(*this, policy); +} + +void ConnPoolImplBase::purgePendingRequests( + const Upstream::HostDescriptionConstSharedPtr& host_description, + absl::string_view failure_reason, ConnectionPool::PoolFailureReason reason) { + // NOTE: We move the existing pending streams to a temporary list. This is done so that + // if retry logic submits a new stream to the pool, we don't fail it inline. + pending_streams_to_purge_ = std::move(pending_streams_); + while (!pending_streams_to_purge_.empty()) { + PendingRequestPtr stream = + pending_streams_to_purge_.front()->removeFromList(pending_streams_to_purge_); + host_->cluster().stats().upstream_rq_pending_failure_eject_.inc(); + onPoolFailure(host_description, failure_reason, reason, stream->context()); + } +} + +bool ConnPoolImplBase::connectingConnectionIsExcess() const { + ASSERT(connecting_stream_capacity_ >= + connecting_clients_.front()->effectiveConcurrentRequestLimit()); + // If prefetchRatio is one, this simplifies to checking if there would still be sufficient + // connecting stream capacity to serve all pending streams if the most recent client were + // removed from the picture. + // + // If prefetch ratio is set, it also factors in the anticipated load based on both queued streams + // and active streams, and makes sure the connecting capacity would still be sufficient to serve + // that even with the most recent client removed. + return (pending_streams_.size() + num_active_streams_) * prefetchRatio() <= + (connecting_stream_capacity_ - + connecting_clients_.front()->effectiveConcurrentRequestLimit() + num_active_streams_); +} + +void ConnPoolImplBase::onPendingRequestCancel(PendingRequest& stream, + Envoy::ConnectionPool::CancelPolicy policy) { + ENVOY_LOG(debug, "cancelling pending stream"); + if (!pending_streams_to_purge_.empty()) { + // If pending_streams_to_purge_ is not empty, it means that we are called from + // with-in a onPoolFailure callback invoked in purgePendingRequests (i.e. purgePendingRequests + // is down in the call stack). Remove this stream from the list as it is cancelled, + // and there is no need to call its onPoolFailure callback. + stream.removeFromList(pending_streams_to_purge_); + } else { + stream.removeFromList(pending_streams_); + } + if (policy == Envoy::ConnectionPool::CancelPolicy::CloseExcess && !connecting_clients_.empty() && + connectingConnectionIsExcess()) { + auto& client = *connecting_clients_.front(); + transitionActiveClientState(client, ActiveClient::State::DRAINING); + client.close(); + } + + host_->cluster().stats().upstream_rq_cancelled_.inc(); + checkForDrained(); +} + +namespace { +// Translate zero to UINT64_MAX so that the zero/unlimited case doesn't +// have to be handled specially. +uint64_t translateZeroToUnlimited(uint64_t limit) { + return (limit != 0) ? limit : std::numeric_limits::max(); +} +} // namespace + +ActiveClient::ActiveClient(ConnPoolImplBase& parent, uint64_t lifetime_stream_limit, + uint64_t concurrent_stream_limit) + : parent_(parent), remaining_streams_(translateZeroToUnlimited(lifetime_stream_limit)), + concurrent_stream_limit_(translateZeroToUnlimited(concurrent_stream_limit)), + connect_timer_(parent_.dispatcher().createTimer([this]() -> void { onConnectTimeout(); })) { + conn_connect_ms_ = std::make_unique( + parent_.host()->cluster().stats().upstream_cx_connect_ms_, parent_.dispatcher().timeSource()); + conn_length_ = std::make_unique( + parent_.host()->cluster().stats().upstream_cx_length_ms_, parent_.dispatcher().timeSource()); + connect_timer_->enableTimer(parent_.host()->cluster().connectTimeout()); + + parent_.host()->stats().cx_total_.inc(); + parent_.host()->stats().cx_active_.inc(); + parent_.host()->cluster().stats().upstream_cx_total_.inc(); + parent_.host()->cluster().stats().upstream_cx_active_.inc(); + parent_.host()->cluster().resourceManager(parent_.priority()).connections().inc(); +} + +ActiveClient::~ActiveClient() { releaseResources(); } + +void ActiveClient::onEvent(Network::ConnectionEvent event) { + parent_.onConnectionEvent(*this, "", event); +} + +void ActiveClient::releaseResources() { + if (!resources_released_) { + resources_released_ = true; + + conn_length_->complete(); + + parent_.host()->cluster().stats().upstream_cx_active_.dec(); + parent_.host()->stats().cx_active_.dec(); + parent_.host()->cluster().resourceManager(parent_.priority()).connections().dec(); + } +} + +void ActiveClient::onConnectTimeout() { + ENVOY_CONN_LOG(debug, "connect timeout", *this); + parent_.host()->cluster().stats().upstream_cx_connect_timeout_.inc(); + timed_out_ = true; + close(); +} + +} // namespace ConnectionPool +} // namespace Envoy diff --git a/source/common/conn_pool/conn_pool_base.h b/source/common/conn_pool/conn_pool_base.h new file mode 100644 index 0000000000000..acce470ed5593 --- /dev/null +++ b/source/common/conn_pool/conn_pool_base.h @@ -0,0 +1,224 @@ +#pragma once + +#include "envoy/common/conn_pool.h" +#include "envoy/event/dispatcher.h" +#include "envoy/network/connection.h" +#include "envoy/stats/timespan.h" + +#include "common/common/linked_object.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace ConnectionPool { + +class ConnPoolImplBase; + +// A placeholder struct for whatever data a given connection pool needs to +// successfully attach and upstream connection to a downstream connection. +struct AttachContext { + // Add a virtual destructor to allow for the dynamic_cast ASSERT in typedContext. + virtual ~AttachContext() = default; +}; + +// ActiveClient provides a base class for connection pool clients that handles connection timings +// as well as managing the connection timeout. +class ActiveClient : public LinkedObject, + public Network::ConnectionCallbacks, + public Event::DeferredDeletable, + protected Logger::Loggable { +public: + ActiveClient(ConnPoolImplBase& parent, uint64_t lifetime_stream_limit, + uint64_t concurrent_stream_limit); + ~ActiveClient() override; + + void releaseResources(); + + // Network::ConnectionCallbacks + void onEvent(Network::ConnectionEvent event) override; + void onAboveWriteBufferHighWatermark() override {} + void onBelowWriteBufferLowWatermark() override {} + + // Called if the connection does not complete within the cluster's connectTimeout() + void onConnectTimeout(); + + // Returns the concurrent stream limit, accounting for if the total stream limit + // is less than the concurrent stream limit. + uint64_t effectiveConcurrentRequestLimit() const { + return std::min(remaining_streams_, concurrent_stream_limit_); + } + + // Closes the underlying connection. + virtual void close() PURE; + // Returns the ID of the underlying connection. + virtual uint64_t id() const PURE; + // Returns true if this closed with an incomplete stream, for stats tracking/ purposes. + virtual bool closingWithIncompleteRequest() const PURE; + // Returns the number of active streams on this connection. + virtual size_t numActiveRequests() const PURE; + + enum class State { + CONNECTING, // Connection is not yet established. + READY, // Additional streams may be immediately dispatched to this connection. + BUSY, // Connection is at its concurrent stream limit. + DRAINING, // No more streams can be dispatched to this connection, and it will be closed + // when all streams complete. + CLOSED // Connection is closed and object is queued for destruction. + }; + + ConnPoolImplBase& parent_; + uint64_t remaining_streams_; + const uint64_t concurrent_stream_limit_; + State state_{State::CONNECTING}; + Upstream::HostDescriptionConstSharedPtr real_host_description_; + Stats::TimespanPtr conn_connect_ms_; + Stats::TimespanPtr conn_length_; + Event::TimerPtr connect_timer_; + bool resources_released_{false}; + bool timed_out_{false}; +}; + +// TODO(alyssawilk) renames for Request classes and functions -> Stream classes and functions. +// PendingRequest is the base class for a connection which has been created but not yet established. +class PendingRequest : public LinkedObject, public ConnectionPool::Cancellable { +public: + PendingRequest(ConnPoolImplBase& parent); + ~PendingRequest() override; + + // ConnectionPool::Cancellable + void cancel(Envoy::ConnectionPool::CancelPolicy policy) override; + + // The context here returns a pointer to whatever context is provided with newStream(), + // which will be passed back to the parent in onPoolReady or onPoolFailure. + virtual AttachContext& context() PURE; + + ConnPoolImplBase& parent_; +}; + +using PendingRequestPtr = std::unique_ptr; + +using ActiveClientPtr = std::unique_ptr; + +// Base class that handles stream queueing logic shared between connection pool implementations. +class ConnPoolImplBase : protected Logger::Loggable { +public: + ConnPoolImplBase(Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, + Event::Dispatcher& dispatcher, + const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options); + virtual ~ConnPoolImplBase(); + + // A helper function to get the specific context type from the base class context. + template T& typedContext(AttachContext& context) { + ASSERT(dynamic_cast(&context) != nullptr); + return *static_cast(&context); + } + + void addDrainedCallbackImpl(Instance::DrainedCb cb); + void drainConnectionsImpl(); + + // Closes and destroys all connections. This must be called in the destructor of + // derived classes because the derived ActiveClient will downcast parent_ to a more + // specific type of ConnPoolImplBase, but if the more specific part is already destructed + // (due to bottom-up destructor ordering in c++) that access will be invalid. + void destructAllConnections(); + + // Returns a new instance of ActiveClient. + virtual ActiveClientPtr instantiateActiveClient() PURE; + + // Gets a pointer to the list that currently owns this client. + std::list& owningList(ActiveClient::State state); + + // Removes the PendingRequest from the list of streams. Called when the PendingRequest is + // cancelled, e.g. when the stream is reset before a connection has been established. + void onPendingRequestCancel(PendingRequest& stream, Envoy::ConnectionPool::CancelPolicy policy); + + // Fails all pending streams, calling onPoolFailure on the associated callbacks. + void purgePendingRequests(const Upstream::HostDescriptionConstSharedPtr& host_description, + absl::string_view failure_reason, + ConnectionPool::PoolFailureReason pool_failure_reason); + + // Closes any idle connections. + void closeIdleConnections(); + + // Changes the state_ of an ActiveClient and moves to the appropriate list. + void transitionActiveClientState(ActiveClient& client, ActiveClient::State new_state); + + void onConnectionEvent(ActiveClient& client, absl::string_view failure_reason, + Network::ConnectionEvent event); + void checkForDrained(); + void onUpstreamReady(); + ConnectionPool::Cancellable* newStream(AttachContext& context); + + virtual ConnectionPool::Cancellable* newPendingRequest(AttachContext& context) PURE; + + void attachRequestToClient(Envoy::ConnectionPool::ActiveClient& client, AttachContext& context); + + virtual void onPoolFailure(const Upstream::HostDescriptionConstSharedPtr& host_description, + absl::string_view failure_reason, + ConnectionPool::PoolFailureReason pool_failure_reason, + AttachContext& context) PURE; + virtual void onPoolReady(ActiveClient& client, AttachContext& context) PURE; + // Called by derived classes any time a stream is completed or destroyed for any reason. + void onRequestClosed(Envoy::ConnectionPool::ActiveClient& client, bool delay_attaching_stream); + + const Upstream::HostConstSharedPtr& host() const { return host_; } + Event::Dispatcher& dispatcher() { return dispatcher_; } + Upstream::ResourcePriority priority() const { return priority_; } + const Network::ConnectionSocket::OptionsSharedPtr& socketOptions() { return socket_options_; } + const Network::TransportSocketOptionsSharedPtr& transportSocketOptions() { + return transport_socket_options_; + } + +protected: + // Creates up to 3 connections, based on the prefetch ratio. + void tryCreateNewConnections(); + + // Creates a new connection if there is sufficient demand, it is allowed by resourceManager, or + // to avoid starving this pool. + bool tryCreateNewConnection(); + + // A helper function which determines if a canceled pending connection should + // be closed as excess or not. + bool connectingConnectionIsExcess() const; + + // A helper function which determines if a new incoming stream should trigger + // connection prefetch. + bool shouldCreateNewConnection() const; + + float prefetchRatio() const; + + const Upstream::HostConstSharedPtr host_; + const Upstream::ResourcePriority priority_; + + Event::Dispatcher& dispatcher_; + const Network::ConnectionSocket::OptionsSharedPtr socket_options_; + const Network::TransportSocketOptionsSharedPtr transport_socket_options_; + + std::list drained_callbacks_; + std::list pending_streams_; + + // When calling purgePendingRequests, this list will be used to hold the streams we are about + // to purge. We need this if one cancelled streams cancels a different pending stream + std::list pending_streams_to_purge_; + + // Clients that are ready to handle additional streams. + // All entries are in state READY. + std::list ready_clients_; + + // Clients that are not ready to handle additional streams due to being BUSY or DRAINING. + std::list busy_clients_; + + // Clients that are not ready to handle additional streams because they are CONNECTING. + std::list connecting_clients_; + + // The number of streams currently attached to clients. + uint64_t num_active_streams_{0}; + + // The number of streams that can be immediately dispatched + // if all CONNECTING connections become connected. + uint64_t connecting_stream_capacity_{0}; +}; + +} // namespace ConnectionPool +} // namespace Envoy diff --git a/source/common/crypto/BUILD b/source/common/crypto/BUILD index d7431432f6eb4..e47c843fe72a6 100644 --- a/source/common/crypto/BUILD +++ b/source/common/crypto/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/decompressor/BUILD b/source/common/decompressor/BUILD deleted file mode 100644 index dfdf8f9b90ed0..0000000000000 --- a/source/common/decompressor/BUILD +++ /dev/null @@ -1,23 +0,0 @@ -licenses(["notice"]) # Apache 2 - -load( - "//bazel:envoy_build_system.bzl", - "envoy_cc_library", - "envoy_package", -) - -envoy_package() - -envoy_cc_library( - name = "decompressor_lib", - srcs = ["zlib_decompressor_impl.cc"], - hdrs = ["zlib_decompressor_impl.h"], - external_deps = ["zlib"], - deps = [ - "//include/envoy/decompressor:decompressor_interface", - "//source/common/buffer:buffer_lib", - "//source/common/common:assert_lib", - "//source/common/common:minimal_logger_lib", - "//source/common/common:zlib_base_lib", - ], -) diff --git a/source/common/decompressor/zlib_decompressor_impl.h b/source/common/decompressor/zlib_decompressor_impl.h deleted file mode 100644 index 8d5627fc6c311..0000000000000 --- a/source/common/decompressor/zlib_decompressor_impl.h +++ /dev/null @@ -1,52 +0,0 @@ -#pragma once - -#include "envoy/decompressor/decompressor.h" - -#include "common/common/logger.h" -#include "common/common/zlib/base.h" - -#include "zlib.h" - -namespace Envoy { -namespace Decompressor { - -/** - * Implementation of decompressor's interface. - */ -class ZlibDecompressorImpl : public Zlib::Base, - public Decompressor, - public Logger::Loggable { -public: - ZlibDecompressorImpl(); - - /** - * Constructor that allows setting the size of decompressor's output buffer. It - * should be called whenever a buffer size different than the 4096 bytes, normally set by the - * default constructor, is desired. If memory is available and it makes sense to output large - * chunks of compressed data, zlib documentation suggests buffers sizes on the order of 128K or - * 256K bytes. @see http://zlib.net/zlib_how.html - * @param chunk_size amount of memory reserved for the decompressor output. - */ - ZlibDecompressorImpl(uint64_t chunk_size); - - /** - * Init must be called in order to initialize the decompressor. Once decompressor is initialized, - * it cannot be initialized again. Init should run before decompressing any data. - * @param window_bits sets the size of the history buffer. It must be greater than or equal to - * the window_bits value provided when data was compressed (zlib manual). - */ - void init(int64_t window_bits); - - // Decompressor - void decompress(const Buffer::Instance& input_buffer, Buffer::Instance& output_buffer) override; - - // Flag to track whether error occurred during decompression. - // When an error occurs, the error code (a negative int) will be stored in this variable. - int decompression_error_{0}; - -private: - bool inflateNext(); -}; - -} // namespace Decompressor -} // namespace Envoy diff --git a/source/common/event/BUILD b/source/common/event/BUILD index 33fd947b8d776..23ccee3b5ff7e 100644 --- a/source/common/event/BUILD +++ b/source/common/event/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( @@ -33,6 +33,7 @@ envoy_cc_library( "//source/common/network:connection_lib", "//source/common/network:dns_lib", "//source/common/network:listener_lib", + "//source/common/runtime:runtime_features_lib", ], ) @@ -64,6 +65,7 @@ envoy_cc_library( "dispatcher_impl.h", "event_impl_base.h", "file_event_impl.h", + "schedulable_cb_impl.h", ], deps = [ ":libevent_lib", @@ -104,6 +106,7 @@ envoy_cc_library( external_deps = ["event"], deps = [ ":libevent_lib", + ":schedulable_cb_lib", ":timer_lib", "//include/envoy/event:dispatcher_interface", "//include/envoy/event:timer_interface", @@ -111,6 +114,18 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "schedulable_cb_lib", + srcs = ["schedulable_cb_impl.cc"], + hdrs = ["schedulable_cb_impl.h"], + external_deps = ["event"], + deps = [ + ":event_impl_base_lib", + ":libevent_lib", + "//include/envoy/event:schedulable_cb_interface", + ], +) + envoy_cc_library( name = "timer_lib", srcs = ["timer_impl.cc"], @@ -121,6 +136,7 @@ envoy_cc_library( ":libevent_lib", "//include/envoy/event:timer_interface", "//source/common/common:scope_tracker", + "//source/common/runtime:runtime_features_lib", ], ) diff --git a/source/common/event/dispatcher_impl.cc b/source/common/event/dispatcher_impl.cc index c58b9076682c9..76f4a109039fd 100644 --- a/source/common/event/dispatcher_impl.cc +++ b/source/common/event/dispatcher_impl.cc @@ -39,24 +39,19 @@ DispatcherImpl::DispatcherImpl(const std::string& name, Api::Api& api, DispatcherImpl::DispatcherImpl(const std::string& name, Buffer::WatermarkFactoryPtr&& factory, Api::Api& api, Event::TimeSystem& time_system) : name_(name), api_(api), buffer_factory_(std::move(factory)), - scheduler_(time_system.createScheduler(base_scheduler_)), - deferred_delete_timer_(createTimerInternal([this]() -> void { clearDeferredDeleteList(); })), - post_timer_(createTimerInternal([this]() -> void { runPostCallbacks(); })), + scheduler_(time_system.createScheduler(base_scheduler_, base_scheduler_)), + deferred_delete_cb_(base_scheduler_.createSchedulableCallback( + [this]() -> void { clearDeferredDeleteList(); })), + post_cb_(base_scheduler_.createSchedulableCallback([this]() -> void { runPostCallbacks(); })), current_to_delete_(&to_delete_1_) { ASSERT(!name_.empty()); -#ifdef ENVOY_HANDLE_SIGNALS - SignalAction::registerFatalErrorHandler(*this); -#endif + FatalErrorHandler::registerFatalErrorHandler(*this); updateApproximateMonotonicTimeInternal(); base_scheduler_.registerOnPrepareCallback( std::bind(&DispatcherImpl::updateApproximateMonotonicTime, this)); } -DispatcherImpl::~DispatcherImpl() { -#ifdef ENVOY_HANDLE_SIGNALS - SignalAction::removeFatalErrorHandler(*this); -#endif -} +DispatcherImpl::~DispatcherImpl() { FatalErrorHandler::removeFatalErrorHandler(*this); } void DispatcherImpl::initializeStats(Stats::Scope& scope, const absl::optional& prefix) { @@ -159,6 +154,11 @@ TimerPtr DispatcherImpl::createTimer(TimerCb cb) { return createTimerInternal(cb); } +Event::SchedulableCallbackPtr DispatcherImpl::createSchedulableCallback(std::function cb) { + ASSERT(isThreadSafe()); + return base_scheduler_.createSchedulableCallback(cb); +} + TimerPtr DispatcherImpl::createTimerInternal(TimerCb cb) { return scheduler_->createTimer(cb, *this); } @@ -168,7 +168,7 @@ void DispatcherImpl::deferredDelete(DeferredDeletablePtr&& to_delete) { current_to_delete_->emplace_back(std::move(to_delete)); ENVOY_LOG(trace, "item added to deferred deletion list (size={})", current_to_delete_->size()); if (1 == current_to_delete_->size()) { - deferred_delete_timer_->enableTimer(std::chrono::milliseconds(0)); + deferred_delete_cb_->scheduleCallbackCurrentIteration(); } } @@ -188,7 +188,7 @@ void DispatcherImpl::post(std::function callback) { } if (do_post) { - post_timer_->enableTimer(std::chrono::milliseconds(0)); + post_cb_->scheduleCallbackCurrentIteration(); } } diff --git a/source/common/event/dispatcher_impl.h b/source/common/event/dispatcher_impl.h index 41be86039ad23..0db663dd985b7 100644 --- a/source/common/event/dispatcher_impl.h +++ b/source/common/event/dispatcher_impl.h @@ -64,6 +64,7 @@ class DispatcherImpl : Logger::Loggable, Network::UdpListenerPtr createUdpListener(Network::SocketSharedPtr&& socket, Network::UdpListenerCallbacks& cb) override; TimerPtr createTimer(TimerCb cb) override; + Event::SchedulableCallbackPtr createSchedulableCallback(std::function cb) override; void deferredDelete(DeferredDeletablePtr&& to_delete) override; void exit() override; SignalEventPtr listenForSignal(int signal_num, SignalCb cb) override; @@ -79,12 +80,12 @@ class DispatcherImpl : Logger::Loggable, void updateApproximateMonotonicTime() override; // FatalErrorInterface - void onFatalError() const override { + void onFatalError(std::ostream& os) const override { // Dump the state of the tracked object if it is in the current thread. This generally results // in dumping the active state only for the thread which caused the fatal error. if (isThreadSafe()) { if (current_object_) { - current_object_->dumpState(std::cerr); + current_object_->dumpState(os); } } } @@ -104,13 +105,13 @@ class DispatcherImpl : Logger::Loggable, const std::string name_; Api::Api& api_; std::string stats_prefix_; - std::unique_ptr stats_; + DispatcherStatsPtr stats_; Thread::ThreadId run_tid_; Buffer::WatermarkFactoryPtr buffer_factory_; LibeventScheduler base_scheduler_; SchedulerPtr scheduler_; - TimerPtr deferred_delete_timer_; - TimerPtr post_timer_; + SchedulableCallbackPtr deferred_delete_cb_; + SchedulableCallbackPtr post_cb_; std::vector to_delete_1_; std::vector to_delete_2_; std::vector* current_to_delete_; diff --git a/source/common/event/file_event_impl.cc b/source/common/event/file_event_impl.cc index 7607551fc99a4..a4ad6c419814c 100644 --- a/source/common/event/file_event_impl.cc +++ b/source/common/event/file_event_impl.cc @@ -4,6 +4,7 @@ #include "common/common/assert.h" #include "common/event/dispatcher_impl.h" +#include "common/runtime/runtime_features.h" #include "event2/event.h" @@ -12,31 +13,67 @@ namespace Event { FileEventImpl::FileEventImpl(DispatcherImpl& dispatcher, os_fd_t fd, FileReadyCb cb, FileTriggerType trigger, uint32_t events) - : cb_(cb), fd_(fd), trigger_(trigger) { + : cb_(cb), fd_(fd), trigger_(trigger), + activate_fd_events_next_event_loop_( + // Only read the runtime feature if the runtime loader singleton has already been created. + // Attempts to access runtime features too early in the initialization sequence triggers + // some spurious, scary-looking logs about not being able to read runtime feature config + // from the singleton. These warnings are caused by creation of filesystem watchers as + // part of the process of loading the runtime configuration from disk. + Runtime::LoaderSingleton::getExisting() + ? Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.activate_fds_next_event_loop") + : true) { #ifdef WIN32 RELEASE_ASSERT(trigger_ == FileTriggerType::Level, "libevent does not support edge triggers on Windows"); #endif assignEvents(events, &dispatcher.base()); event_add(&raw_event_, nullptr); + if (activate_fd_events_next_event_loop_) { + activation_cb_ = dispatcher.createSchedulableCallback([this]() { + ASSERT(injected_activation_events_ != 0); + mergeInjectedEventsAndRunCb(0); + }); + } } void FileEventImpl::activate(uint32_t events) { - int libevent_events = 0; - if (events & FileReadyType::Read) { - libevent_events |= EV_READ; - } + // events is not empty. + ASSERT(events != 0); + // Only supported event types are set. + ASSERT((events & (FileReadyType::Read | FileReadyType::Write | FileReadyType::Closed)) == events); + + if (!activate_fd_events_next_event_loop_) { + // Legacy implementation + int libevent_events = 0; + if (events & FileReadyType::Read) { + libevent_events |= EV_READ; + } - if (events & FileReadyType::Write) { - libevent_events |= EV_WRITE; + if (events & FileReadyType::Write) { + libevent_events |= EV_WRITE; + } + + if (events & FileReadyType::Closed) { + libevent_events |= EV_CLOSED; + } + + ASSERT(libevent_events); + event_active(&raw_event_, libevent_events, 0); + return; } - if (events & FileReadyType::Closed) { - libevent_events |= EV_CLOSED; + // Schedule the activation callback so it runs as part of the next loop iteration if it is not + // already scheduled. + if (injected_activation_events_ == 0) { + ASSERT(!activation_cb_->enabled()); + activation_cb_->scheduleCallbackNextIteration(); } + ASSERT(activation_cb_->enabled()); - ASSERT(libevent_events); - event_active(&raw_event_, libevent_events, 0); + // Merge new events with pending injected events. + injected_activation_events_ |= events; } void FileEventImpl::assignEvents(uint32_t events, event_base* base) { @@ -62,22 +99,36 @@ void FileEventImpl::assignEvents(uint32_t events, event_base* base) { events |= FileReadyType::Closed; } - // TODO(htuch): this should be ASSERT(events), but - // https://github.com/libevent/libevent/issues/984 seems to be producing unexpected - // behavior. The ASSERT should be restored once this issue is resolved. - if (events) { - event->cb_(events); - } + ASSERT(events != 0); + event->mergeInjectedEventsAndRunCb(events); }, this); } void FileEventImpl::setEnabled(uint32_t events) { + if (activate_fd_events_next_event_loop_ && injected_activation_events_ != 0) { + // Clear pending events on updates to the fd event mask to avoid delivering events that are no + // longer relevant. Updating the event mask will reset the fd edge trigger state so the proxy + // will be able to determine the fd read/write state without need for the injected activation + // events. + injected_activation_events_ = 0; + activation_cb_->cancel(); + } + auto* base = event_get_base(&raw_event_); event_del(&raw_event_); assignEvents(events, base); event_add(&raw_event_, nullptr); } +void FileEventImpl::mergeInjectedEventsAndRunCb(uint32_t events) { + if (activate_fd_events_next_event_loop_ && injected_activation_events_ != 0) { + events |= injected_activation_events_; + injected_activation_events_ = 0; + activation_cb_->cancel(); + } + cb_(events); +} + } // namespace Event } // namespace Envoy diff --git a/source/common/event/file_event_impl.h b/source/common/event/file_event_impl.h index 918b237fb6c1a..e4044fd25194a 100644 --- a/source/common/event/file_event_impl.h +++ b/source/common/event/file_event_impl.h @@ -25,10 +25,21 @@ class FileEventImpl : public FileEvent, ImplBase { private: void assignEvents(uint32_t events, event_base* base); + void mergeInjectedEventsAndRunCb(uint32_t events); FileReadyCb cb_; os_fd_t fd_; FileTriggerType trigger_; + + // Injected FileReadyType events that were scheduled by recent calls to activate() and are pending + // delivery. + uint32_t injected_activation_events_{}; + // Used to schedule delayed event activation. Armed iff pending_activation_events_ != 0. + SchedulableCallbackPtr activation_cb_; + // Latched "envoy.reloadable_features.activate_fds_next_event_loop" runtime feature. If true, fd + // events scheduled via activate are evaluated in the next iteration of the event loop after + // polling and activating new fd events. + const bool activate_fd_events_next_event_loop_; }; } // namespace Event diff --git a/source/common/event/libevent_scheduler.cc b/source/common/event/libevent_scheduler.cc index db5f44a306cc3..dda0380cb4d84 100644 --- a/source/common/event/libevent_scheduler.cc +++ b/source/common/event/libevent_scheduler.cc @@ -1,6 +1,7 @@ #include "common/event/libevent_scheduler.h" #include "common/common/assert.h" +#include "common/event/schedulable_cb_impl.h" #include "common/event/timer_impl.h" #include "event2/util.h" @@ -14,7 +15,22 @@ void recordTimeval(Stats::Histogram& histogram, const timeval& tv) { } } // namespace -LibeventScheduler::LibeventScheduler() : libevent_(event_base_new()) { +LibeventScheduler::LibeventScheduler() { +#ifdef WIN32 + event_config* event_config = event_config_new(); + RELEASE_ASSERT(event_config != nullptr, + "Failed to initialize libevent event_base: event_config_new"); + // Request wepoll backend by avoiding win32 backend. + int error = event_config_avoid_method(event_config, "win32"); + RELEASE_ASSERT(error == 0, "Failed to initialize libevent event_base: event_config_avoid_method"); + event_base* event_base = event_base_new_with_config(event_config); + event_config_free(event_config); +#else + event_base* event_base = event_base_new(); +#endif + RELEASE_ASSERT(event_base != nullptr, "Failed to initialize libevent event_base"); + libevent_ = Libevent::BasePtr(event_base); + // The dispatcher won't work as expected if libevent hasn't been configured to use threads. RELEASE_ASSERT(Libevent::Global::initialized(), ""); } @@ -23,6 +39,11 @@ TimerPtr LibeventScheduler::createTimer(const TimerCb& cb, Dispatcher& dispatche return std::make_unique(libevent_, cb, dispatcher); }; +SchedulableCallbackPtr +LibeventScheduler::createSchedulableCallback(const std::function& cb) { + return std::make_unique(libevent_, cb); +}; + void LibeventScheduler::run(Dispatcher::RunType mode) { int flag = 0; switch (mode) { @@ -33,7 +54,7 @@ void LibeventScheduler::run(Dispatcher::RunType mode) { // This is because libevent only supports level triggering on Windows, and so the write // event callbacks will trigger every time through the loop. Adding EVLOOP_ONCE ensures the // loop will run at most once - flag |= EVLOOP_NONBLOCK | EVLOOP_ONCE; + flag |= EVLOOP_ONCE; #endif break; case Dispatcher::RunType::Block: diff --git a/source/common/event/libevent_scheduler.h b/source/common/event/libevent_scheduler.h index f67d185636e7e..6059a0017baef 100644 --- a/source/common/event/libevent_scheduler.h +++ b/source/common/event/libevent_scheduler.h @@ -3,6 +3,7 @@ #include #include "envoy/event/dispatcher.h" +#include "envoy/event/schedulable_cb.h" #include "envoy/event/timer.h" #include "common/event/libevent.h" @@ -14,13 +15,57 @@ namespace Envoy { namespace Event { // Implements Scheduler based on libevent. -class LibeventScheduler : public Scheduler { +// +// Here is a rough summary of operations that libevent performs in each event loop iteration, in +// order. Note that the invocation order for "same-iteration" operations that execute as a group +// can be surprising and invocation order of expired timers is non-deterministic. +// Whenever possible, it is preferable to avoid making event invocation ordering assumptions. +// +// 1. Calculate the poll timeout by comparing the current time to the deadline of the closest +// timer (the one at head of the priority queue). +// 2. Run registered "prepare" callbacks. +// 3. Poll for fd events using the closest timer as timeout, add active fds to the work list. +// 4. Run registered "check" callbacks. +// 5. Check timer deadlines against current time and move expired timers from the timer priority +// queue to the work list. Expired timers are moved to the work list is a non-deterministic order. +// 6. Execute items in the work list until the list is empty. Note that additional work +// items could be added to the work list during execution of this step, more details below. +// 7. Goto 1 if the loop termination condition has not been reached +// +// The following "same-iteration" work items are added directly to the work list when they are +// scheduled so they execute in the current iteration of the event loop. Note that there are no +// ordering guarantees when mixing the mechanisms below. Specifically, it is unsafe to assume that +// calling post followed by deferredDelete will result in the post callback being invoked before the +// deferredDelete; deferredDelete will run first if there is a pending deferredDeletion at the time +// the post callback is scheduled because deferredDelete invocation is grouped. +// - Event::Dispatcher::post(cb). Post callbacks are invoked as a group. +// - Event::Dispatcher::deferredDelete(object) and Event::DeferredTaskUtil::deferredRun(...). +// The same mechanism implements both of these operations, so they are invoked as a group. +// - Event::SchedulableCallback::scheduleCallbackCurrentIteration(). Each of these callbacks is +// scheduled and invoked independently. +// - Event::FileEvent::activate() if "envoy.reloadable_features.activate_fds_next_event_loop" +// runtime feature is disabled. +// - Event::Timer::enableTimer(0) if "envoy.reloadable_features.activate_timers_next_event_loop" +// runtime feature is disabled. +// +// Event::FileEvent::activate and Event::SchedulableCallback::scheduleCallbackNextIteration are +// implemented as libevent timers with a deadline of 0. Both of these actions are moved to the work +// list while checking for expired timers during step 5. +// +// Events execute in the following order, derived from the order in which items were added to the +// work list: +// 0. Events added via event_active prior to the start of the event loop (in tests) +// 1. Fd events +// 2. Timers, FileEvent::activate and SchedulableCallback::scheduleCallbackNextIteration +// 3. "Same-iteration" work items described above, including Event::Dispatcher::post callbacks +class LibeventScheduler : public Scheduler, public CallbackScheduler { public: using OnPrepareCallback = std::function; LibeventScheduler(); // Scheduler TimerPtr createTimer(const TimerCb& cb, Dispatcher& dispatcher) override; + SchedulableCallbackPtr createSchedulableCallback(const std::function& cb) override; /** * Runs the event loop. diff --git a/source/common/event/real_time_system.cc b/source/common/event/real_time_system.cc index c528b58b4e8cc..7f022c23a0bb8 100644 --- a/source/common/event/real_time_system.cc +++ b/source/common/event/real_time_system.cc @@ -22,7 +22,7 @@ class RealScheduler : public Scheduler { } // namespace -SchedulerPtr RealTimeSystem::createScheduler(Scheduler& base_scheduler) { +SchedulerPtr RealTimeSystem::createScheduler(Scheduler& base_scheduler, CallbackScheduler&) { return std::make_unique(base_scheduler); } diff --git a/source/common/event/real_time_system.h b/source/common/event/real_time_system.h index 5323da8bfac37..a5b86466eec1f 100644 --- a/source/common/event/real_time_system.h +++ b/source/common/event/real_time_system.h @@ -13,7 +13,7 @@ namespace Event { class RealTimeSystem : public TimeSystem { public: // TimeSystem - SchedulerPtr createScheduler(Scheduler&) override; + SchedulerPtr createScheduler(Scheduler&, CallbackScheduler&) override; // TimeSource SystemTime systemTime() override { return time_source_.systemTime(); } diff --git a/source/common/event/schedulable_cb_impl.cc b/source/common/event/schedulable_cb_impl.cc new file mode 100644 index 0000000000000..797e5bb004e1a --- /dev/null +++ b/source/common/event/schedulable_cb_impl.cc @@ -0,0 +1,42 @@ +#include "common/event/schedulable_cb_impl.h" + +#include "common/common/assert.h" + +#include "event2/event.h" + +namespace Envoy { +namespace Event { + +SchedulableCallbackImpl::SchedulableCallbackImpl(Libevent::BasePtr& libevent, + std::function cb) + : cb_(cb) { + ASSERT(cb_); + evtimer_assign( + &raw_event_, libevent.get(), + [](evutil_socket_t, short, void* arg) -> void { + SchedulableCallbackImpl* cb = static_cast(arg); + cb->cb_(); + }, + this); +} + +void SchedulableCallbackImpl::scheduleCallbackCurrentIteration() { + // event_active directly adds the event to the end of the work queue so it executes in the current + // iteration of the event loop. + event_active(&raw_event_, EV_TIMEOUT, 0); +} + +void SchedulableCallbackImpl::scheduleCallbackNextIteration() { + // libevent computes the list of timers to move to the work list after polling for fd events, but + // iteration through the work list starts. Zero delay timers added while iterating through the + // work list execute on the next iteration of the event loop. + const timeval zero_tv{}; + event_add(&raw_event_, &zero_tv); +} + +void SchedulableCallbackImpl::cancel() { event_del(&raw_event_); } + +bool SchedulableCallbackImpl::enabled() { return 0 != evtimer_pending(&raw_event_, nullptr); } + +} // namespace Event +} // namespace Envoy diff --git a/source/common/event/schedulable_cb_impl.h b/source/common/event/schedulable_cb_impl.h new file mode 100644 index 0000000000000..48c6224f6a967 --- /dev/null +++ b/source/common/event/schedulable_cb_impl.h @@ -0,0 +1,31 @@ +#pragma once + +#include "envoy/event/schedulable_cb.h" + +#include "common/event/event_impl_base.h" +#include "common/event/libevent.h" + +namespace Envoy { +namespace Event { + +class DispatcherImpl; + +/** + * libevent implementation of SchedulableCallback. + */ +class SchedulableCallbackImpl : public SchedulableCallback, ImplBase { +public: + SchedulableCallbackImpl(Libevent::BasePtr& libevent, std::function cb); + + // SchedulableCallback implementation. + void scheduleCallbackCurrentIteration() override; + void scheduleCallbackNextIteration() override; + void cancel() override; + bool enabled() override; + +private: + std::function cb_; +}; + +} // namespace Event +} // namespace Envoy diff --git a/source/common/event/timer_impl.cc b/source/common/event/timer_impl.cc index 6c71f3cfe5acc..56137dc8b2e37 100644 --- a/source/common/event/timer_impl.cc +++ b/source/common/event/timer_impl.cc @@ -3,6 +3,7 @@ #include #include "common/common/assert.h" +#include "common/runtime/runtime_features.h" #include "event2/event.h" @@ -10,7 +11,16 @@ namespace Envoy { namespace Event { TimerImpl::TimerImpl(Libevent::BasePtr& libevent, TimerCb cb, Dispatcher& dispatcher) - : cb_(cb), dispatcher_(dispatcher) { + : cb_(cb), dispatcher_(dispatcher), + activate_timers_next_event_loop_( + // Only read the runtime feature if the runtime loader singleton has already been created. + // Accessing runtime features too early in the initialization sequence triggers logging + // and the logging code itself depends on the use of timers. Attempts to log while + // initializing the logging subsystem will result in a crash. + Runtime::LoaderSingleton::getExisting() + ? Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.activate_timers_next_event_loop") + : true) { ASSERT(cb_); evtimer_assign( &raw_event_, libevent.get(), @@ -44,7 +54,8 @@ void TimerImpl::enableHRTimer(const std::chrono::microseconds& d, void TimerImpl::internalEnableTimer(const timeval& tv, const ScopeTrackedObject* object) { object_ = object; - if (tv.tv_sec == 0 && tv.tv_usec == 0) { + + if (!activate_timers_next_event_loop_ && tv.tv_sec == 0 && tv.tv_usec == 0) { event_active(&raw_event_, EV_TIMEOUT, 0); } else { event_add(&raw_event_, &tv); diff --git a/source/common/event/timer_impl.h b/source/common/event/timer_impl.h index f9e9808242692..307fb3fe80d76 100644 --- a/source/common/event/timer_impl.h +++ b/source/common/event/timer_impl.h @@ -70,6 +70,11 @@ class TimerImpl : public Timer, ImplBase { // example if the DispatcherImpl::post is called by two threads, they race to // both set this to null. std::atomic object_{}; + + // Latched "envoy.reloadable_features.activate_timers_next_event_loop" runtime feature. If true, + // timers scheduled with a 0 time delta are evaluated in the next iteration of the event loop + // after polling and activating new fd events. + const bool activate_timers_next_event_loop_; }; } // namespace Event diff --git a/source/common/filesystem/BUILD b/source/common/filesystem/BUILD index 7aa299d43d74c..4059eb96df389 100644 --- a/source/common/filesystem/BUILD +++ b/source/common/filesystem/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", @@ -9,6 +7,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( @@ -35,6 +35,7 @@ envoy_cc_posix_library( deps = [ "//include/envoy/filesystem:filesystem_interface", "//source/common/api:os_sys_calls_lib", + "//source/common/common:utility_lib", ], ) @@ -70,6 +71,7 @@ envoy_cc_library( deps = [ "//include/envoy/filesystem:filesystem_interface", "//source/common/common:assert_lib", + "//source/common/common:utility_lib", ], ) diff --git a/source/common/filesystem/file_shared_impl.cc b/source/common/filesystem/file_shared_impl.cc index dc0e8bfcdc328..56601badb01c9 100644 --- a/source/common/filesystem/file_shared_impl.cc +++ b/source/common/filesystem/file_shared_impl.cc @@ -7,7 +7,13 @@ namespace Filesystem { Api::IoError::IoErrorCode IoFileError::getErrorCode() const { return IoErrorCode::UnknownError; } -std::string IoFileError::getErrorDetails() const { return ::strerror(errno_); } +std::string IoFileError::getErrorDetails() const { + // TODO(sunjayBhatia, wrowe): Disable clang-format until win32 implementation no longer uses POSIX + // subsystem, see https://github.com/envoyproxy/envoy/issues/11655 + // clang-format off + return ::strerror(errno_); + // clang-format on +} Api::IoCallBoolResult FileSharedImpl::open(FlagSet in) { if (isOpen()) { diff --git a/source/common/filesystem/inotify/watcher_impl.cc b/source/common/filesystem/inotify/watcher_impl.cc index d3e6bd48f69ce..2aeb9b6ef30b7 100644 --- a/source/common/filesystem/inotify/watcher_impl.cc +++ b/source/common/filesystem/inotify/watcher_impl.cc @@ -40,7 +40,7 @@ void WatcherImpl::addWatch(absl::string_view path, uint32_t events, OnChangedCb int watch_fd = inotify_add_watch(inotify_fd_, std::string(result.directory_).c_str(), watch_mask); if (watch_fd == -1) { throw EnvoyException( - fmt::format("unable to add filesystem watch for file {}: {}", path, strerror(errno))); + fmt::format("unable to add filesystem watch for file {}: {}", path, errorDetails(errno))); } ENVOY_LOG(debug, "added watch for directory: '{}' file: '{}' fd: {}", result.directory_, diff --git a/source/common/filesystem/inotify/watcher_impl.h b/source/common/filesystem/inotify/watcher_impl.h index 40f903f43e4a1..9b416f5c9c031 100644 --- a/source/common/filesystem/inotify/watcher_impl.h +++ b/source/common/filesystem/inotify/watcher_impl.h @@ -3,7 +3,6 @@ #include #include #include -#include #include "envoy/api/api.h" #include "envoy/event/dispatcher.h" @@ -11,6 +10,8 @@ #include "common/common/logger.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace Filesystem { @@ -43,7 +44,7 @@ class WatcherImpl : public Watcher, Logger::Loggable { Api::Api& api_; int inotify_fd_; Event::FileEventPtr inotify_event_; - std::unordered_map callback_map_; + absl::node_hash_map callback_map_; }; } // namespace Filesystem diff --git a/source/common/filesystem/kqueue/watcher_impl.cc b/source/common/filesystem/kqueue/watcher_impl.cc index aa1589f0cb8a1..2452eeb688b29 100644 --- a/source/common/filesystem/kqueue/watcher_impl.cc +++ b/source/common/filesystem/kqueue/watcher_impl.cc @@ -72,7 +72,7 @@ WatcherImpl::FileWatchPtr WatcherImpl::addWatch(absl::string_view path, uint32_t if (kevent(queue_, &event, 1, nullptr, 0, nullptr) == -1 || event.flags & EV_ERROR) { throw EnvoyException( - fmt::format("unable to add filesystem watch for file {}: {}", path, strerror(errno))); + fmt::format("unable to add filesystem watch for file {}: {}", path, errorDetails(errno))); } ENVOY_LOG(debug, "added watch for file: '{}' fd: {}", path, watch_fd); diff --git a/source/common/filesystem/kqueue/watcher_impl.h b/source/common/filesystem/kqueue/watcher_impl.h index b61ba721b531d..e34d905489792 100644 --- a/source/common/filesystem/kqueue/watcher_impl.h +++ b/source/common/filesystem/kqueue/watcher_impl.h @@ -11,6 +11,8 @@ #include "common/common/linked_object.h" #include "common/common/logger.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace Filesystem { @@ -47,7 +49,7 @@ class WatcherImpl : public Watcher, Logger::Loggable { Api::Api& api_; int queue_; - std::unordered_map watches_; + absl::node_hash_map watches_; Event::FileEventPtr kqueue_event_; }; diff --git a/source/common/filesystem/posix/directory_iterator_impl.cc b/source/common/filesystem/posix/directory_iterator_impl.cc index 06b4a910e12a5..6e8906d5c3d14 100644 --- a/source/common/filesystem/posix/directory_iterator_impl.cc +++ b/source/common/filesystem/posix/directory_iterator_impl.cc @@ -1,14 +1,14 @@ #include "envoy/common/exception.h" #include "common/common/fmt.h" +#include "common/common/utility.h" #include "common/filesystem/directory_iterator_impl.h" namespace Envoy { namespace Filesystem { DirectoryIteratorImpl::DirectoryIteratorImpl(const std::string& directory_path) - : directory_path_(directory_path), dir_(nullptr), - os_sys_calls_(Api::OsSysCallsSingleton::get()) { + : directory_path_(directory_path), os_sys_calls_(Api::OsSysCallsSingleton::get()) { openDirectory(); nextEntry(); } @@ -29,7 +29,7 @@ void DirectoryIteratorImpl::openDirectory() { dir_ = temp_dir; if (!dir_) { throw EnvoyException( - fmt::format("unable to open directory {}: {}", directory_path_, strerror(errno))); + fmt::format("unable to open directory {}: {}", directory_path_, errorDetails(errno))); } } @@ -38,7 +38,7 @@ void DirectoryIteratorImpl::nextEntry() { dirent* entry = ::readdir(dir_); if (entry == nullptr && errno != 0) { throw EnvoyException( - fmt::format("unable to iterate directory {}: {}", directory_path_, strerror(errno))); + fmt::format("unable to iterate directory {}: {}", directory_path_, errorDetails(errno))); } if (entry == nullptr) { diff --git a/source/common/filesystem/posix/filesystem_impl.cc b/source/common/filesystem/posix/filesystem_impl.cc index 70ddf0ecf98b6..e24814d0ca700 100644 --- a/source/common/filesystem/posix/filesystem_impl.cc +++ b/source/common/filesystem/posix/filesystem_impl.cc @@ -14,6 +14,7 @@ #include "common/common/assert.h" #include "common/common/fmt.h" #include "common/common/logger.h" +#include "common/common/utility.h" #include "common/filesystem/filesystem_impl.h" #include "absl/strings/match.h" @@ -95,8 +96,6 @@ std::string InstanceImplPosix::fileReadToEnd(const std::string& path) { throw EnvoyException(absl::StrCat("Invalid path: ", path)); } - std::ios::sync_with_stdio(false); - std::ifstream file(path); if (file.fail()) { throw EnvoyException(absl::StrCat("unable to read file: ", path)); @@ -127,7 +126,7 @@ bool InstanceImplPosix::illegalPath(const std::string& path) { // _before_ canonicalizing the path is that different unix flavors implement // /dev/fd/* differently, for example on linux they are symlinks to /dev/pts/* // which are symlinks to /proc/self/fds/. On BSD (and darwin) they are not - // symlinks at all. To avoid lots of platform, specifics, we whitelist + // symlinks at all. To avoid lots of platform, specifics, we allowlist // /dev/fd/* _before_ resolving the canonical path. if (absl::StartsWith(path, "/dev/fd/")) { return false; @@ -136,7 +135,7 @@ bool InstanceImplPosix::illegalPath(const std::string& path) { const Api::SysCallStringResult canonical_path = canonicalPath(path); if (canonical_path.rc_.empty()) { ENVOY_LOG_MISC(debug, "Unable to determine canonical path for {}: {}", path, - ::strerror(canonical_path.errno_)); + errorDetails(canonical_path.errno_)); return true; } diff --git a/source/common/filesystem/win32/filesystem_impl.cc b/source/common/filesystem/win32/filesystem_impl.cc index ca9e246b13b7d..d868fe567c008 100644 --- a/source/common/filesystem/win32/filesystem_impl.cc +++ b/source/common/filesystem/win32/filesystem_impl.cc @@ -12,6 +12,7 @@ #include "common/common/fmt.h" #include "common/filesystem/filesystem_impl.h" +#include "absl/container/node_hash_map.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_split.h" @@ -156,7 +157,7 @@ static const char filename_char_table[] = { // The "COM#" and "LPT#" names below have boolean flag requiring a [1-9] suffix. // This list can be avoided by observing dwFileAttributes & FILE_ATTRIBUTE_DEVICE // within WIN32_FILE_ATTRIBUTE_DATA or WIN32_FIND_DATA results. -std::unordered_map pathelt_table = { +absl::node_hash_map pathelt_table = { {"CON", false}, {"NUL", false}, {"AUX", false}, {"PRN", false}, {"COM", true}, {"LPT", true} }; diff --git a/source/common/filesystem/win32/watcher_impl.cc b/source/common/filesystem/win32/watcher_impl.cc index 5bc400639109f..80531f78d54ea 100644 --- a/source/common/filesystem/win32/watcher_impl.cc +++ b/source/common/filesystem/win32/watcher_impl.cc @@ -31,7 +31,10 @@ WatcherImpl::WatcherImpl(Event::Dispatcher& dispatcher, Api::Api& api) thread_exit_event_ = ::CreateEvent(nullptr, false, false, nullptr); ASSERT(thread_exit_event_ != NULL); keep_watching_ = true; - watch_thread_ = thread_factory_.createThread([this]() -> void { watchLoop(); }); + + // See comments in WorkerImpl::start for the naming convention. + Thread::Options options{absl::StrCat("wat:", dispatcher.name())}; + watch_thread_ = thread_factory_.createThread([this]() -> void { watchLoop(); }, options); } WatcherImpl::~WatcherImpl() { diff --git a/source/common/filesystem/win32/watcher_impl.h b/source/common/filesystem/win32/watcher_impl.h index f107f541eea7c..1eccf7aba5c69 100644 --- a/source/common/filesystem/win32/watcher_impl.h +++ b/source/common/filesystem/win32/watcher_impl.h @@ -7,7 +7,6 @@ #include #include #include -#include #include "envoy/api/api.h" #include "envoy/event/dispatcher.h" @@ -18,6 +17,8 @@ #include "common/common/logger.h" #include "common/common/thread_impl.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace Filesystem { @@ -56,7 +57,7 @@ class WatcherImpl : public Watcher, Logger::Loggable { typedef std::unique_ptr DirectoryWatchPtr; Api::Api& api_; - std::unordered_map callback_map_; + absl::node_hash_map callback_map_; Event::FileEventPtr directory_event_; os_fd_t event_write_; os_fd_t event_read_; diff --git a/source/common/filter/http/BUILD b/source/common/filter/http/BUILD new file mode 100644 index 0000000000000..888c2fd44b125 --- /dev/null +++ b/source/common/filter/http/BUILD @@ -0,0 +1,31 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_library( + name = "filter_config_discovery_lib", + srcs = ["filter_config_discovery_impl.cc"], + hdrs = ["filter_config_discovery_impl.h"], + deps = [ + "//include/envoy/config:subscription_interface", + "//include/envoy/filter/http:filter_config_provider_interface", + "//include/envoy/singleton:instance_interface", + "//include/envoy/stats:stats_macros", + "//include/envoy/thread_local:thread_local_interface", + "//source/common/config:subscription_base_interface", + "//source/common/config:subscription_factory_lib", + "//source/common/config:utility_lib", + "//source/common/grpc:common_lib", + "//source/common/init:manager_lib", + "//source/common/init:target_lib", + "//source/common/init:watcher_lib", + "//source/common/protobuf:utility_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) diff --git a/source/common/filter/http/filter_config_discovery_impl.cc b/source/common/filter/http/filter_config_discovery_impl.cc new file mode 100644 index 0000000000000..32b7e3e8b0386 --- /dev/null +++ b/source/common/filter/http/filter_config_discovery_impl.cc @@ -0,0 +1,226 @@ +#include "common/filter/http/filter_config_discovery_impl.h" + +#include "envoy/config/core/v3/extension.pb.validate.h" +#include "envoy/server/filter_config.h" + +#include "common/config/utility.h" +#include "common/grpc/common.h" +#include "common/protobuf/utility.h" + +#include "absl/strings/str_join.h" + +namespace Envoy { +namespace Filter { +namespace Http { + +DynamicFilterConfigProviderImpl::DynamicFilterConfigProviderImpl( + FilterConfigSubscriptionSharedPtr&& subscription, + const std::set& require_type_urls, + Server::Configuration::FactoryContext& factory_context) + : subscription_(std::move(subscription)), require_type_urls_(require_type_urls), + tls_(factory_context.threadLocal().allocateSlot()), + init_target_("DynamicFilterConfigProviderImpl", [this]() { + subscription_->start(); + // This init target is used to activate the subscription but not wait + // for a response. It is used whenever a default config is provided to be + // used while waiting for a response. + init_target_.ready(); + }) { + subscription_->filter_config_providers_.insert(this); + tls_->set([](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr { + return std::make_shared(); + }); +} + +DynamicFilterConfigProviderImpl::~DynamicFilterConfigProviderImpl() { + subscription_->filter_config_providers_.erase(this); +} + +const std::string& DynamicFilterConfigProviderImpl::name() { return subscription_->name(); } + +absl::optional DynamicFilterConfigProviderImpl::config() { + return tls_->getTyped().config_; +} + +void DynamicFilterConfigProviderImpl::validateConfig( + const ProtobufWkt::Any& proto_config, Server::Configuration::NamedHttpFilterConfigFactory&) { + auto type_url = Config::Utility::getFactoryType(proto_config); + if (require_type_urls_.count(type_url) == 0) { + throw EnvoyException(fmt::format("Error: filter config has type URL {} but expect {}.", + type_url, absl::StrJoin(require_type_urls_, ", "))); + } +} + +void DynamicFilterConfigProviderImpl::onConfigUpdate(Envoy::Http::FilterFactoryCb config, + const std::string&, + Config::ConfigAppliedCb cb) { + tls_->runOnAllThreads( + [config, cb](ThreadLocal::ThreadLocalObjectSharedPtr previous) + -> ThreadLocal::ThreadLocalObjectSharedPtr { + auto prev_config = std::dynamic_pointer_cast(previous); + prev_config->config_ = config; + if (cb) { + cb(); + } + return previous; + }, + [this, config]() { + // This happens after all workers have discarded the previous config so it can be safely + // deleted on the main thread by an update with the new config. + this->current_config_ = config; + }); +} + +FilterConfigSubscription::FilterConfigSubscription( + const envoy::config::core::v3::ConfigSource& config_source, + const std::string& filter_config_name, Server::Configuration::FactoryContext& factory_context, + const std::string& stat_prefix, FilterConfigProviderManagerImpl& filter_config_provider_manager, + const std::string& subscription_id) + : Config::SubscriptionBase( + envoy::config::core::v3::ApiVersion::V3, + factory_context.messageValidationContext().dynamicValidationVisitor(), "name"), + filter_config_name_(filter_config_name), factory_context_(factory_context), + validator_(factory_context.messageValidationContext().dynamicValidationVisitor()), + init_target_(fmt::format("FilterConfigSubscription init {}", filter_config_name_), + [this]() { start(); }), + scope_(factory_context.scope().createScope(stat_prefix + "extension_config_discovery." + + filter_config_name_ + ".")), + stat_prefix_(stat_prefix), + stats_({ALL_EXTENSION_CONFIG_DISCOVERY_STATS(POOL_COUNTER(*scope_))}), + filter_config_provider_manager_(filter_config_provider_manager), + subscription_id_(subscription_id) { + const auto resource_name = getResourceName(); + subscription_ = + factory_context.clusterManager().subscriptionFactory().subscriptionFromConfigSource( + config_source, Grpc::Common::typeUrl(resource_name), *scope_, *this, resource_decoder_); +} + +void FilterConfigSubscription::start() { + if (!started_) { + started_ = true; + subscription_->start({filter_config_name_}); + } +} + +void FilterConfigSubscription::onConfigUpdate( + const std::vector& resources, const std::string& version_info) { + // Make sure to make progress in case the control plane is temporarily inconsistent. + init_target_.ready(); + + if (resources.size() != 1) { + throw EnvoyException(fmt::format( + "Unexpected number of resources in ExtensionConfigDS response: {}", resources.size())); + } + const auto& filter_config = dynamic_cast( + resources[0].get().resource()); + if (filter_config.name() != filter_config_name_) { + throw EnvoyException(fmt::format("Unexpected resource name in ExtensionConfigDS response: {}", + filter_config.name())); + } + // Skip update if hash matches + const uint64_t new_hash = MessageUtil::hash(filter_config.typed_config()); + if (new_hash == last_config_hash_) { + return; + } + auto& factory = + Config::Utility::getAndCheckFactory( + filter_config); + // Ensure that the filter config is valid in the filter chain context once the proto is processed. + // Validation happens before updating to prevent a partial update application. It might be + // possible that the providers have distinct type URL constraints. + for (auto* provider : filter_config_providers_) { + provider->validateConfig(filter_config.typed_config(), factory); + } + ProtobufTypes::MessagePtr message = Config::Utility::translateAnyToFactoryConfig( + filter_config.typed_config(), validator_, factory); + Envoy::Http::FilterFactoryCb factory_callback = + factory.createFilterFactoryFromProto(*message, stat_prefix_, factory_context_); + ENVOY_LOG(debug, "Updating filter config {}", filter_config_name_); + const auto pending_update = std::make_shared>( + (factory_context_.admin().concurrency() + 1) * filter_config_providers_.size()); + for (auto* provider : filter_config_providers_) { + provider->onConfigUpdate(factory_callback, version_info, [this, pending_update]() { + if (--(*pending_update) == 0) { + stats_.config_reload_.inc(); + } + }); + } + last_config_hash_ = new_hash; +} + +void FilterConfigSubscription::onConfigUpdate( + const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, const std::string&) { + if (!removed_resources.empty()) { + ENVOY_LOG(error, + "Server sent a delta ExtensionConfigDS update attempting to remove a resource (name: " + "{}). Ignoring.", + removed_resources[0]); + } + if (!added_resources.empty()) { + onConfigUpdate(added_resources, added_resources[0].get().version()); + } +} + +void FilterConfigSubscription::onConfigUpdateFailed(Config::ConfigUpdateFailureReason reason, + const EnvoyException*) { + ENVOY_LOG(debug, "Updating filter config {} failed due to {}", filter_config_name_, reason); + stats_.config_fail_.inc(); + // Make sure to make progress in case the control plane is temporarily failing. + init_target_.ready(); +} + +FilterConfigSubscription::~FilterConfigSubscription() { + // If we get destroyed during initialization, make sure we signal that we "initialized". + init_target_.ready(); + // Remove the subscription from the provider manager. + filter_config_provider_manager_.subscriptions_.erase(subscription_id_); +} + +std::shared_ptr FilterConfigProviderManagerImpl::getSubscription( + const envoy::config::core::v3::ConfigSource& config_source, const std::string& name, + Server::Configuration::FactoryContext& factory_context, const std::string& stat_prefix) { + // FilterConfigSubscriptions are unique based on their config source and filter config name + // combination. + // TODO(https://github.com/envoyproxy/envoy/issues/11967) Hash collision can cause subscription + // aliasing. + const std::string subscription_id = absl::StrCat(MessageUtil::hash(config_source), ".", name); + auto it = subscriptions_.find(subscription_id); + if (it == subscriptions_.end()) { + auto subscription = std::make_shared( + config_source, name, factory_context, stat_prefix, *this, subscription_id); + subscriptions_.insert({subscription_id, std::weak_ptr(subscription)}); + return subscription; + } else { + auto existing = it->second.lock(); + ASSERT(existing != nullptr, + absl::StrCat("Cannot find subscribed filter config resource ", name)); + return existing; + } +} + +FilterConfigProviderPtr FilterConfigProviderManagerImpl::createDynamicFilterConfigProvider( + const envoy::config::core::v3::ConfigSource& config_source, + const std::string& filter_config_name, const std::set& require_type_urls, + Server::Configuration::FactoryContext& factory_context, const std::string& stat_prefix, + bool apply_without_warming) { + auto subscription = + getSubscription(config_source, filter_config_name, factory_context, stat_prefix); + // For warming, wait until the subscription receives the first response to indicate readiness. + // Otherwise, mark ready immediately and start the subscription on initialization. A default + // config is expected in the latter case. + if (!apply_without_warming) { + factory_context.initManager().add(subscription->initTarget()); + } + auto provider = std::make_unique( + std::move(subscription), require_type_urls, factory_context); + // Ensure the subscription starts if it has not already. + if (apply_without_warming) { + factory_context.initManager().add(provider->init_target_); + } + return provider; +} + +} // namespace Http +} // namespace Filter +} // namespace Envoy diff --git a/source/common/filter/http/filter_config_discovery_impl.h b/source/common/filter/http/filter_config_discovery_impl.h new file mode 100644 index 0000000000000..43a75542d1387 --- /dev/null +++ b/source/common/filter/http/filter_config_discovery_impl.h @@ -0,0 +1,192 @@ +#pragma once + +#include "envoy/config/core/v3/extension.pb.h" +#include "envoy/config/core/v3/extension.pb.validate.h" +#include "envoy/config/subscription.h" +#include "envoy/filter/http/filter_config_provider.h" +#include "envoy/protobuf/message_validator.h" +#include "envoy/server/factory_context.h" +#include "envoy/singleton/instance.h" +#include "envoy/stats/scope.h" +#include "envoy/stats/stats_macros.h" + +#include "common/config/subscription_base.h" +#include "common/init/manager_impl.h" +#include "common/init/target_impl.h" + +#include "absl/container/flat_hash_map.h" +#include "absl/container/flat_hash_set.h" + +namespace Envoy { +namespace Filter { +namespace Http { + +class FilterConfigProviderManagerImpl; +class FilterConfigSubscription; + +using FilterConfigSubscriptionSharedPtr = std::shared_ptr; + +/** + * Implementation of a filter config provider using discovery subscriptions. + **/ +class DynamicFilterConfigProviderImpl : public FilterConfigProvider { +public: + DynamicFilterConfigProviderImpl(FilterConfigSubscriptionSharedPtr&& subscription, + const std::set& require_type_urls, + Server::Configuration::FactoryContext& factory_context); + ~DynamicFilterConfigProviderImpl() override; + + // Config::ExtensionConfigProvider + const std::string& name() override; + absl::optional config() override; + void validateConfig(const ProtobufWkt::Any& proto_config, + Server::Configuration::NamedHttpFilterConfigFactory&) override; + void onConfigUpdate(Envoy::Http::FilterFactoryCb config, const std::string&, + Config::ConfigAppliedCb cb) override; + +private: + struct ThreadLocalConfig : public ThreadLocal::ThreadLocalObject { + ThreadLocalConfig() : config_{absl::nullopt} {} + absl::optional config_{}; + }; + + FilterConfigSubscriptionSharedPtr subscription_; + const std::set require_type_urls_; + // Currently applied configuration to ensure that the main thread deletes the last reference to + // it. + absl::optional current_config_{absl::nullopt}; + ThreadLocal::SlotPtr tls_; + + // Local initialization target to ensure that the subscription starts in + // case no warming is requested by any other filter config provider. + Init::TargetImpl init_target_; + + friend class FilterConfigProviderManagerImpl; +}; + +/** + * All extension config discovery stats. @see stats_macros.h + */ +#define ALL_EXTENSION_CONFIG_DISCOVERY_STATS(COUNTER) \ + COUNTER(config_reload) \ + COUNTER(config_fail) + +/** + * Struct definition for all extension config discovery stats. @see stats_macros.h + */ +struct ExtensionConfigDiscoveryStats { + ALL_EXTENSION_CONFIG_DISCOVERY_STATS(GENERATE_COUNTER_STRUCT) +}; + +/** + * A class that fetches the filter configuration dynamically using the filter config discovery API. + * Subscriptions are shared between the filter config providers. The filter config providers are + * notified when a new config is accepted. + */ +class FilterConfigSubscription + : Config::SubscriptionBase, + Logger::Loggable { +public: + FilterConfigSubscription(const envoy::config::core::v3::ConfigSource& config_source, + const std::string& filter_config_name, + Server::Configuration::FactoryContext& factory_context, + const std::string& stat_prefix, + FilterConfigProviderManagerImpl& filter_config_provider_manager, + const std::string& subscription_id); + + ~FilterConfigSubscription() override; + + const Init::SharedTargetImpl& initTarget() { return init_target_; } + const std::string& name() { return filter_config_name_; } + +private: + void start(); + + // Config::SubscriptionCallbacks + void onConfigUpdate(const std::vector& resources, + const std::string& version_info) override; + void onConfigUpdate(const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string&) override; + void onConfigUpdateFailed(Config::ConfigUpdateFailureReason reason, + const EnvoyException*) override; + + const std::string filter_config_name_; + uint64_t last_config_hash_{0ul}; + Server::Configuration::FactoryContext& factory_context_; + ProtobufMessage::ValidationVisitor& validator_; + + Init::SharedTargetImpl init_target_; + bool started_{false}; + + Stats::ScopePtr scope_; + const std::string stat_prefix_; + ExtensionConfigDiscoveryStats stats_; + + // FilterConfigProviderManagerImpl maintains active subscriptions in a map. + FilterConfigProviderManagerImpl& filter_config_provider_manager_; + const std::string subscription_id_; + absl::flat_hash_set filter_config_providers_; + friend class DynamicFilterConfigProviderImpl; + + // This must be the last since its destructor may call out to stats to report + // on draining requests. + std::unique_ptr subscription_; +}; + +/** + * Provider implementation of a static filter config. + **/ +class StaticFilterConfigProviderImpl : public FilterConfigProvider { +public: + StaticFilterConfigProviderImpl(const Envoy::Http::FilterFactoryCb& config, + const std::string filter_config_name) + : config_(config), filter_config_name_(filter_config_name) {} + + // Config::ExtensionConfigProvider + const std::string& name() override { return filter_config_name_; } + absl::optional config() override { return config_; } + void validateConfig(const ProtobufWkt::Any&, + Server::Configuration::NamedHttpFilterConfigFactory&) override { + NOT_REACHED_GCOVR_EXCL_LINE; + } + void onConfigUpdate(Envoy::Http::FilterFactoryCb, const std::string&, + Config::ConfigAppliedCb) override { + NOT_REACHED_GCOVR_EXCL_LINE; + } + +private: + Envoy::Http::FilterFactoryCb config_; + const std::string filter_config_name_; +}; + +/** + * An implementation of FilterConfigProviderManager. + */ +class FilterConfigProviderManagerImpl : public FilterConfigProviderManager, + public Singleton::Instance { +public: + FilterConfigProviderPtr createDynamicFilterConfigProvider( + const envoy::config::core::v3::ConfigSource& config_source, + const std::string& filter_config_name, const std::set& require_type_urls, + Server::Configuration::FactoryContext& factory_context, const std::string& stat_prefix, + bool apply_without_warming) override; + + FilterConfigProviderPtr + createStaticFilterConfigProvider(const Envoy::Http::FilterFactoryCb& config, + const std::string& filter_config_name) override { + return std::make_unique(config, filter_config_name); + } + +private: + std::shared_ptr + getSubscription(const envoy::config::core::v3::ConfigSource& config_source, + const std::string& name, Server::Configuration::FactoryContext& factory_context, + const std::string& stat_prefix); + absl::flat_hash_map> subscriptions_; + friend class FilterConfigSubscription; +}; + +} // namespace Http +} // namespace Filter +} // namespace Envoy diff --git a/source/common/formatter/BUILD b/source/common/formatter/BUILD new file mode 100644 index 0000000000000..d4eb45228abf1 --- /dev/null +++ b/source/common/formatter/BUILD @@ -0,0 +1,39 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_library( + name = "substitution_formatter_lib", + srcs = ["substitution_formatter.cc"], + hdrs = ["substitution_formatter.h"], + external_deps = ["abseil_str_format"], + deps = [ + "//include/envoy/formatter:substitution_formatter_interface", + "//include/envoy/stream_info:stream_info_interface", + "//source/common/common:assert_lib", + "//source/common/common:utility_lib", + "//source/common/config:metadata_lib", + "//source/common/grpc:common_lib", + "//source/common/http:utility_lib", + "//source/common/protobuf:message_validator_lib", + "//source/common/stream_info:utility_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) + +envoy_cc_library( + name = "substitution_format_string_lib", + srcs = ["substitution_format_string.cc"], + hdrs = ["substitution_format_string.h"], + deps = [ + ":substitution_formatter_lib", + "//source/common/protobuf", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) diff --git a/source/common/formatter/substitution_format_string.cc b/source/common/formatter/substitution_format_string.cc new file mode 100644 index 0000000000000..ec9e2db968656 --- /dev/null +++ b/source/common/formatter/substitution_format_string.cc @@ -0,0 +1,45 @@ +#include "common/formatter/substitution_format_string.h" + +#include "common/formatter/substitution_formatter.h" + +namespace Envoy { +namespace Formatter { +namespace { + +absl::flat_hash_map +convertJsonFormatToMap(const ProtobufWkt::Struct& json_format) { + absl::flat_hash_map output; + for (const auto& pair : json_format.fields()) { + if (pair.second.kind_case() != ProtobufWkt::Value::kStringValue) { + throw EnvoyException("Only string values are supported in the JSON access log format."); + } + output.emplace(pair.first, pair.second.string_value()); + } + return output; +} + +} // namespace + +FormatterPtr +SubstitutionFormatStringUtils::createJsonFormatter(const ProtobufWkt::Struct& struct_format, + bool preserve_types) { + auto json_format_map = convertJsonFormatToMap(struct_format); + return std::make_unique(json_format_map, preserve_types); +} + +FormatterPtr SubstitutionFormatStringUtils::fromProtoConfig( + const envoy::config::core::v3::SubstitutionFormatString& config) { + switch (config.format_case()) { + case envoy::config::core::v3::SubstitutionFormatString::FormatCase::kTextFormat: + return std::make_unique(config.text_format()); + case envoy::config::core::v3::SubstitutionFormatString::FormatCase::kJsonFormat: { + return createJsonFormatter(config.json_format(), true); + } + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + return nullptr; +} + +} // namespace Formatter +} // namespace Envoy diff --git a/source/common/formatter/substitution_format_string.h b/source/common/formatter/substitution_format_string.h new file mode 100644 index 0000000000000..6d514cecc47dc --- /dev/null +++ b/source/common/formatter/substitution_format_string.h @@ -0,0 +1,32 @@ +#pragma once + +#include + +#include "envoy/config/core/v3/substitution_format_string.pb.h" +#include "envoy/formatter/substitution_formatter.h" + +#include "common/protobuf/protobuf.h" + +namespace Envoy { +namespace Formatter { + +/** + * Utilities for using envoy::config::core::v3::SubstitutionFormatString + */ +class SubstitutionFormatStringUtils { +public: + /** + * Generate a formatter object from config SubstitutionFormatString. + */ + static FormatterPtr + fromProtoConfig(const envoy::config::core::v3::SubstitutionFormatString& config); + + /** + * Generate a Json formatter object from proto::Struct config + */ + static FormatterPtr createJsonFormatter(const ProtobufWkt::Struct& struct_format, + bool preserve_types); +}; + +} // namespace Formatter +} // namespace Envoy diff --git a/source/common/access_log/access_log_formatter.cc b/source/common/formatter/substitution_formatter.cc similarity index 86% rename from source/common/access_log/access_log_formatter.cc rename to source/common/formatter/substitution_formatter.cc index 6ea62a7dd1a81..4a3bb8a90cf8b 100644 --- a/source/common/access_log/access_log_formatter.cc +++ b/source/common/formatter/substitution_formatter.cc @@ -1,4 +1,4 @@ -#include "common/access_log/access_log_formatter.h" +#include "common/formatter/substitution_formatter.h" #include #include @@ -27,7 +27,7 @@ using Envoy::Config::Metadata; namespace Envoy { -namespace AccessLog { +namespace Formatter { static const std::string UnspecifiedValueString = "-"; @@ -51,26 +51,26 @@ const std::regex& getNewlinePattern() { CONSTRUCT_ON_FIRST_USE(std::regex, "\n") } // namespace -const std::string AccessLogFormatUtils::DEFAULT_FORMAT = +const std::string SubstitutionFormatUtils::DEFAULT_FORMAT = "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" " "%RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% " "%RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% " "\"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" " "\"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\"\n"; -FormatterPtr AccessLogFormatUtils::defaultAccessLogFormatter() { +FormatterPtr SubstitutionFormatUtils::defaultSubstitutionFormatter() { return FormatterPtr{new FormatterImpl(DEFAULT_FORMAT)}; } const std::string& -AccessLogFormatUtils::protocolToString(const absl::optional& protocol) { +SubstitutionFormatUtils::protocolToString(const absl::optional& protocol) { if (protocol) { return Http::Utility::getProtocolString(protocol.value()); } return UnspecifiedValueString; } -const std::string AccessLogFormatUtils::getHostname() { +const std::string SubstitutionFormatUtils::getHostname() { #ifdef HOST_NAME_MAX const size_t len = HOST_NAME_MAX; #else @@ -90,37 +90,40 @@ const std::string AccessLogFormatUtils::getHostname() { } FormatterImpl::FormatterImpl(const std::string& format) { - providers_ = AccessLogFormatParser::parse(format); + providers_ = SubstitutionFormatParser::parse(format); } std::string FormatterImpl::format(const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo& stream_info) const { + const StreamInfo::StreamInfo& stream_info, + absl::string_view local_reply_body) const { std::string log_line; log_line.reserve(256); for (const FormatterProviderPtr& provider : providers_) { - log_line += provider->format(request_headers, response_headers, response_trailers, stream_info); + log_line += provider->format(request_headers, response_headers, response_trailers, stream_info, + local_reply_body); } return log_line; } -JsonFormatterImpl::JsonFormatterImpl(std::unordered_map& format_mapping, - bool preserve_types) +JsonFormatterImpl::JsonFormatterImpl( + const absl::flat_hash_map& format_mapping, bool preserve_types) : preserve_types_(preserve_types) { for (const auto& pair : format_mapping) { - json_output_format_.emplace(pair.first, AccessLogFormatParser::parse(pair.second)); + json_output_format_.emplace(pair.first, SubstitutionFormatParser::parse(pair.second)); } } std::string JsonFormatterImpl::format(const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo& stream_info) const { + const StreamInfo::StreamInfo& stream_info, + absl::string_view local_reply_body) const { const auto output_struct = - toStruct(request_headers, response_headers, response_trailers, stream_info); + toStruct(request_headers, response_headers, response_trailers, stream_info, local_reply_body); const std::string log_line = MessageUtil::getJsonStringFromMessage(output_struct, false, true); return absl::StrCat(log_line, "\n"); @@ -129,7 +132,8 @@ std::string JsonFormatterImpl::format(const Http::RequestHeaderMap& request_head ProtobufWkt::Struct JsonFormatterImpl::toStruct(const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo& stream_info) const { + const StreamInfo::StreamInfo& stream_info, + absl::string_view local_reply_body) const { ProtobufWkt::Struct output; auto* fields = output.mutable_fields(); for (const auto& pair : json_output_format_) { @@ -140,16 +144,17 @@ ProtobufWkt::Struct JsonFormatterImpl::toStruct(const Http::RequestHeaderMap& re const auto& provider = providers.front(); const auto val = preserve_types_ ? provider->formatValue(request_headers, response_headers, - response_trailers, stream_info) - : ValueUtil::stringValue(provider->format( - request_headers, response_headers, response_trailers, stream_info)); - + response_trailers, stream_info, local_reply_body) + : ValueUtil::stringValue( + provider->format(request_headers, response_headers, + response_trailers, stream_info, local_reply_body)); (*fields)[pair.first] = val; } else { // Multiple providers forces string output. std::string str; for (const auto& provider : providers) { - str += provider->format(request_headers, response_headers, response_trailers, stream_info); + str += provider->format(request_headers, response_headers, response_trailers, stream_info, + local_reply_body); } (*fields)[pair.first] = ValueUtil::stringValue(str); } @@ -157,10 +162,10 @@ ProtobufWkt::Struct JsonFormatterImpl::toStruct(const Http::RequestHeaderMap& re return output; } -void AccessLogFormatParser::parseCommandHeader(const std::string& token, const size_t start, - std::string& main_header, - std::string& alternative_header, - absl::optional& max_length) { +void SubstitutionFormatParser::parseCommandHeader(const std::string& token, const size_t start, + std::string& main_header, + std::string& alternative_header, + absl::optional& max_length) { std::vector subs; parseCommand(token, start, "?", main_header, subs, max_length); if (subs.size() > 1) { @@ -181,10 +186,10 @@ void AccessLogFormatParser::parseCommandHeader(const std::string& token, const s } } -void AccessLogFormatParser::parseCommand(const std::string& token, const size_t start, - const std::string& separator, std::string& main, - std::vector& sub_items, - absl::optional& max_length) { +void SubstitutionFormatParser::parseCommand(const std::string& token, const size_t start, + const std::string& separator, std::string& main, + std::vector& sub_items, + absl::optional& max_length) { // TODO(dnoe): Convert this to use string_view throughout. const size_t end_request = token.find(')', start); sub_items.clear(); @@ -225,13 +230,13 @@ void AccessLogFormatParser::parseCommand(const std::string& token, const size_t } } -// TODO(derekargueta): #2967 - Rewrite AccessLogFormatter with parser library & formal grammar -std::vector AccessLogFormatParser::parse(const std::string& format) { +// TODO(derekargueta): #2967 - Rewrite SubstitutionFormatter with parser library & formal grammar +std::vector SubstitutionFormatParser::parse(const std::string& format) { std::string current_token; std::vector formatters; static constexpr absl::string_view DYNAMIC_META_TOKEN{"DYNAMIC_METADATA("}; static constexpr absl::string_view FILTER_STATE_TOKEN{"FILTER_STATE("}; - const std::regex command_w_args_regex(R"EOF(%([A-Z]|_)+(\([^\)]*\))?(:[0-9]+)?(%))EOF"); + const std::regex command_w_args_regex(R"EOF(^%([A-Z]|_)+(\([^\)]*\))?(:[0-9]+)?(%))EOF"); static constexpr absl::string_view PLAIN_SERIALIZATION{"PLAIN"}; static constexpr absl::string_view TYPED_SERIALIZATION{"TYPED"}; @@ -245,7 +250,7 @@ std::vector AccessLogFormatParser::parse(const std::string std::smatch m; const std::string search_space = format.substr(pos); - if (!(std::regex_search(search_space, m, command_w_args_regex) || m.position() == 0)) { + if (!std::regex_search(search_space, m, command_w_args_regex)) { throw EnvoyException( fmt::format("Incorrect configuration: {}. Couldn't find valid command at position {}", format, pos)); @@ -280,6 +285,8 @@ std::vector AccessLogFormatParser::parse(const std::string formatters.emplace_back(FormatterProviderPtr{ new ResponseTrailerFormatter(main_header, alternative_header, max_length)}); + } else if (absl::StartsWith(token, "LOCAL_REPLY_BODY")) { + formatters.emplace_back(std::make_unique()); } else if (absl::StartsWith(token, DYNAMIC_META_TOKEN)) { std::string filter_namespace; absl::optional max_length; @@ -417,7 +424,7 @@ class StreamInfoDurationFieldExtractor : public StreamInfoFormatter::FieldExtrac } private: - absl::optional extractMillis(const StreamInfo::StreamInfo& stream_info) const { + absl::optional extractMillis(const StreamInfo::StreamInfo& stream_info) const { const auto time = field_extractor_(stream_info); if (time) { return std::chrono::duration_cast(time.value()).count(); @@ -574,7 +581,7 @@ StreamInfoFormatter::StreamInfoFormatter(const std::string& field_name) { } else if (field_name == "PROTOCOL") { field_extractor_ = std::make_unique( [](const StreamInfo::StreamInfo& stream_info) { - return AccessLogFormatUtils::protocolToString(stream_info.protocol()); + return SubstitutionFormatUtils::protocolToString(stream_info.protocol()); }); } else if (field_name == "RESPONSE_CODE") { field_extractor_ = std::make_unique( @@ -709,6 +716,11 @@ StreamInfoFormatter::StreamInfoFormatter(const std::string& field_name) { [](const Ssl::ConnectionInfo& connection_info) { return connection_info.sha256PeerCertificateDigest(); }); + } else if (field_name == "DOWNSTREAM_PEER_FINGERPRINT_1") { + field_extractor_ = std::make_unique( + [](const Ssl::ConnectionInfo& connection_info) { + return connection_info.sha1PeerCertificateDigest(); + }); } else if (field_name == "DOWNSTREAM_PEER_SERIAL") { field_extractor_ = std::make_unique( [](const Ssl::ConnectionInfo& connection_info) { @@ -752,7 +764,7 @@ StreamInfoFormatter::StreamInfoFormatter(const std::string& field_name) { return result; }); } else if (field_name == "HOSTNAME") { - std::string hostname = AccessLogFormatUtils::getHostname(); + std::string hostname = SubstitutionFormatUtils::getHostname(); field_extractor_ = std::make_unique( [hostname](const StreamInfo::StreamInfo&) { return hostname; }); } else { @@ -763,14 +775,16 @@ StreamInfoFormatter::StreamInfoFormatter(const std::string& field_name) { std::string StreamInfoFormatter::format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo& stream_info) const { + const StreamInfo::StreamInfo& stream_info, + absl::string_view) const { return field_extractor_->extract(stream_info); } -ProtobufWkt::Value -StreamInfoFormatter::formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo& stream_info) const { +ProtobufWkt::Value StreamInfoFormatter::formatValue(const Http::RequestHeaderMap&, + const Http::ResponseHeaderMap&, + const Http::ResponseTrailerMap&, + const StreamInfo::StreamInfo& stream_info, + absl::string_view) const { return field_extractor_->extractValue(stream_info); } @@ -779,17 +793,34 @@ PlainStringFormatter::PlainStringFormatter(const std::string& str) { str_.set_st std::string PlainStringFormatter::format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo&) const { + const StreamInfo::StreamInfo&, absl::string_view) const { return str_.string_value(); } ProtobufWkt::Value PlainStringFormatter::formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo&) const { + const StreamInfo::StreamInfo&, + absl::string_view) const { return str_; } +std::string LocalReplyBodyFormatter::format(const Http::RequestHeaderMap&, + const Http::ResponseHeaderMap&, + const Http::ResponseTrailerMap&, + const StreamInfo::StreamInfo&, + absl::string_view local_reply_body) const { + return std::string(local_reply_body); +} + +ProtobufWkt::Value LocalReplyBodyFormatter::formatValue(const Http::RequestHeaderMap&, + const Http::ResponseHeaderMap&, + const Http::ResponseTrailerMap&, + const StreamInfo::StreamInfo&, + absl::string_view local_reply_body) const { + return ValueUtil::stringValue(std::string(local_reply_body)); +} + HeaderFormatter::HeaderFormatter(const std::string& main_header, const std::string& alternative_header, absl::optional max_length) @@ -835,13 +866,14 @@ ResponseHeaderFormatter::ResponseHeaderFormatter(const std::string& main_header, std::string ResponseHeaderFormatter::format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap& response_headers, const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo&) const { + const StreamInfo::StreamInfo&, + absl::string_view) const { return HeaderFormatter::format(response_headers); } ProtobufWkt::Value ResponseHeaderFormatter::formatValue( const Http::RequestHeaderMap&, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&) const { + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, absl::string_view) const { return HeaderFormatter::formatValue(response_headers); } @@ -853,14 +885,14 @@ RequestHeaderFormatter::RequestHeaderFormatter(const std::string& main_header, std::string RequestHeaderFormatter::format(const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo&) const { + const StreamInfo::StreamInfo&, absl::string_view) const { return HeaderFormatter::format(request_headers); } ProtobufWkt::Value RequestHeaderFormatter::formatValue(const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo&) const { + const StreamInfo::StreamInfo&, absl::string_view) const { return HeaderFormatter::formatValue(request_headers); } @@ -872,14 +904,15 @@ ResponseTrailerFormatter::ResponseTrailerFormatter(const std::string& main_heade std::string ResponseTrailerFormatter::format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo&) const { + const StreamInfo::StreamInfo&, + absl::string_view) const { return HeaderFormatter::format(response_trailers); } ProtobufWkt::Value ResponseTrailerFormatter::formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo&) const { + const StreamInfo::StreamInfo&, absl::string_view) const { return HeaderFormatter::formatValue(response_trailers); } @@ -891,7 +924,8 @@ GrpcStatusFormatter::GrpcStatusFormatter(const std::string& main_header, std::string GrpcStatusFormatter::format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap& response_headers, const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo& info) const { + const StreamInfo::StreamInfo& info, + absl::string_view) const { const auto grpc_status = Grpc::Common::getGrpcStatus(response_trailers, response_headers, info, true); if (!grpc_status.has_value()) { @@ -904,9 +938,11 @@ std::string GrpcStatusFormatter::format(const Http::RequestHeaderMap&, return grpc_status_message; } -ProtobufWkt::Value GrpcStatusFormatter::formatValue( - const Http::RequestHeaderMap&, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers, const StreamInfo::StreamInfo& info) const { +ProtobufWkt::Value +GrpcStatusFormatter::formatValue(const Http::RequestHeaderMap&, + const Http::ResponseHeaderMap& response_headers, + const Http::ResponseTrailerMap& response_trailers, + const StreamInfo::StreamInfo& info, absl::string_view) const { const auto grpc_status = Grpc::Common::getGrpcStatus(response_trailers, response_headers, info, true); if (!grpc_status.has_value()) { @@ -966,14 +1002,16 @@ DynamicMetadataFormatter::DynamicMetadataFormatter(const std::string& filter_nam std::string DynamicMetadataFormatter::format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo& stream_info) const { + const StreamInfo::StreamInfo& stream_info, + absl::string_view) const { return MetadataFormatter::formatMetadata(stream_info.dynamicMetadata()); } -ProtobufWkt::Value -DynamicMetadataFormatter::formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo& stream_info) const { +ProtobufWkt::Value DynamicMetadataFormatter::formatValue(const Http::RequestHeaderMap&, + const Http::ResponseHeaderMap&, + const Http::ResponseTrailerMap&, + const StreamInfo::StreamInfo& stream_info, + absl::string_view) const { return MetadataFormatter::formatMetadataValue(stream_info.dynamicMetadata()); } @@ -994,7 +1032,8 @@ FilterStateFormatter::filterState(const StreamInfo::StreamInfo& stream_info) con std::string FilterStateFormatter::format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo& stream_info) const { + const StreamInfo::StreamInfo& stream_info, + absl::string_view) const { const Envoy::StreamInfo::FilterState::Object* state = filterState(stream_info); if (!state) { return UnspecifiedValueString; @@ -1026,10 +1065,11 @@ std::string FilterStateFormatter::format(const Http::RequestHeaderMap&, return value; } -ProtobufWkt::Value -FilterStateFormatter::formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo& stream_info) const { +ProtobufWkt::Value FilterStateFormatter::formatValue(const Http::RequestHeaderMap&, + const Http::ResponseHeaderMap&, + const Http::ResponseTrailerMap&, + const StreamInfo::StreamInfo& stream_info, + absl::string_view) const { const Envoy::StreamInfo::FilterState::Object* state = filterState(stream_info); if (!state) { return unspecifiedValue(); @@ -1063,7 +1103,8 @@ StartTimeFormatter::StartTimeFormatter(const std::string& format) : date_formatt std::string StartTimeFormatter::format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo& stream_info) const { + const StreamInfo::StreamInfo& stream_info, + absl::string_view) const { if (date_formatter_.formatString().empty()) { return AccessLogDateTimeFormatter::fromTime(stream_info.startTime()); } else { @@ -1071,14 +1112,13 @@ std::string StartTimeFormatter::format(const Http::RequestHeaderMap&, } } -ProtobufWkt::Value -StartTimeFormatter::formatValue(const Http::RequestHeaderMap& request_headers, - const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo& stream_info) const { +ProtobufWkt::Value StartTimeFormatter::formatValue( + const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, + const Http::ResponseTrailerMap& response_trailers, const StreamInfo::StreamInfo& stream_info, + absl::string_view local_reply_body) const { return ValueUtil::stringValue( - format(request_headers, response_headers, response_trailers, stream_info)); + format(request_headers, response_headers, response_trailers, stream_info, local_reply_body)); } -} // namespace AccessLog +} // namespace Formatter } // namespace Envoy diff --git a/source/common/access_log/access_log_formatter.h b/source/common/formatter/substitution_formatter.h similarity index 78% rename from source/common/access_log/access_log_formatter.h rename to source/common/formatter/substitution_formatter.h index 408eb49b3eaba..8336f3274f855 100644 --- a/source/common/access_log/access_log_formatter.h +++ b/source/common/formatter/substitution_formatter.h @@ -2,25 +2,25 @@ #include #include -#include #include -#include "envoy/access_log/access_log.h" #include "envoy/common/time.h" #include "envoy/config/core/v3/base.pb.h" +#include "envoy/formatter/substitution_formatter.h" #include "envoy/stream_info/stream_info.h" #include "common/common/utility.h" +#include "absl/container/flat_hash_map.h" #include "absl/types/optional.h" namespace Envoy { -namespace AccessLog { +namespace Formatter { /** * Access log format parser. */ -class AccessLogFormatParser { +class SubstitutionFormatParser { public: static std::vector parse(const std::string& format); @@ -70,14 +70,14 @@ class AccessLogFormatParser { /** * Util class for access log format. */ -class AccessLogFormatUtils { +class SubstitutionFormatUtils { public: - static FormatterPtr defaultAccessLogFormatter(); + static FormatterPtr defaultSubstitutionFormatter(); static const std::string& protocolToString(const absl::optional& protocol); static const std::string getHostname(); private: - AccessLogFormatUtils(); + SubstitutionFormatUtils(); static const std::string DEFAULT_FORMAT; }; @@ -93,7 +93,8 @@ class FormatterImpl : public Formatter { std::string format(const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo& stream_info) const override; + const StreamInfo::StreamInfo& stream_info, + absl::string_view local_reply_body) const override; private: std::vector providers_; @@ -101,14 +102,15 @@ class FormatterImpl : public Formatter { class JsonFormatterImpl : public Formatter { public: - JsonFormatterImpl(std::unordered_map& format_mapping, + JsonFormatterImpl(const absl::flat_hash_map& format_mapping, bool preserve_types); // Formatter::format std::string format(const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo& stream_info) const override; + const StreamInfo::StreamInfo& stream_info, + absl::string_view local_reply_body) const override; private: const bool preserve_types_; @@ -117,7 +119,8 @@ class JsonFormatterImpl : public Formatter { ProtobufWkt::Struct toStruct(const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo& stream_info) const; + const StreamInfo::StreamInfo& stream_info, + absl::string_view local_reply_body) const; }; /** @@ -130,18 +133,32 @@ class PlainStringFormatter : public FormatterProvider { // FormatterProvider std::string format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; private: ProtobufWkt::Value str_; }; /** - * Base formatter for headers. + * FormatterProvider for local_reply_body. It returns the string from `local_reply_body` argument. */ +class LocalReplyBodyFormatter : public FormatterProvider { +public: + LocalReplyBodyFormatter() = default; + + // Formatter::format + std::string format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view local_reply_body) const override; + ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view local_reply_body) const override; +}; + class HeaderFormatter { public: HeaderFormatter(const std::string& main_header, const std::string& alternative_header, @@ -169,10 +186,11 @@ class RequestHeaderFormatter : public FormatterProvider, HeaderFormatter { // FormatterProvider std::string format(const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; }; /** @@ -185,10 +203,11 @@ class ResponseHeaderFormatter : public FormatterProvider, HeaderFormatter { // FormatterProvider std::string format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; }; /** @@ -202,10 +221,10 @@ class ResponseTrailerFormatter : public FormatterProvider, HeaderFormatter { // FormatterProvider std::string format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo&) const override; + const StreamInfo::StreamInfo&, absl::string_view) const override; ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; }; /** @@ -219,10 +238,10 @@ class GrpcStatusFormatter : public FormatterProvider, HeaderFormatter { // FormatterProvider std::string format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap& response_headers, const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo&) const override; + const StreamInfo::StreamInfo&, absl::string_view) const override; ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; }; /** @@ -234,10 +253,11 @@ class StreamInfoFormatter : public FormatterProvider { // FormatterProvider std::string format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; class FieldExtractor { public: @@ -282,10 +302,11 @@ class DynamicMetadataFormatter : public FormatterProvider, MetadataFormatter { // FormatterProvider std::string format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; }; /** @@ -298,10 +319,11 @@ class FilterStateFormatter : public FormatterProvider { // FormatterProvider std::string format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; private: const Envoy::StreamInfo::FilterState::Object* @@ -322,14 +344,15 @@ class StartTimeFormatter : public FormatterProvider { // FormatterProvider std::string format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; private: const Envoy::DateFormatter date_formatter_; }; -} // namespace AccessLog +} // namespace Formatter } // namespace Envoy diff --git a/source/common/grpc/BUILD b/source/common/grpc/BUILD index 29f31e66d4445..3daea1ce4395f 100644 --- a/source/common/grpc/BUILD +++ b/source/common/grpc/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", @@ -8,6 +6,8 @@ load( "envoy_select_google_grpc", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( @@ -33,6 +33,7 @@ envoy_cc_library( ":typed_async_client_lib", "//include/envoy/grpc:async_client_interface", "//source/common/buffer:zero_copy_input_stream_lib", + "//source/common/config:version_converter_lib", "//source/common/http:async_client_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], @@ -94,6 +95,7 @@ envoy_cc_library( "//source/common/common:macros", "//source/common/common:utility_lib", "//source/common/grpc:status_lib", + "//source/common/http:header_utility_lib", "//source/common/http:headers_lib", "//source/common/http:message_lib", "//source/common/http:utility_lib", @@ -114,6 +116,7 @@ envoy_cc_library( "//include/envoy/stats:stats_interface", "//source/common/common:hash_lib", "//source/common/stats:symbol_table_lib", + "//source/common/stats:utility_lib", ], ) diff --git a/source/common/grpc/async_client_impl.cc b/source/common/grpc/async_client_impl.cc index 19aa96ed460f0..55e4fa75b23be 100644 --- a/source/common/grpc/async_client_impl.cc +++ b/source/common/grpc/async_client_impl.cc @@ -16,7 +16,8 @@ AsyncClientImpl::AsyncClientImpl(Upstream::ClusterManager& cm, const envoy::config::core::v3::GrpcService& config, TimeSource& time_source) : cm_(cm), remote_cluster_name_(config.envoy_grpc().cluster_name()), - initial_metadata_(config.initial_metadata()), time_source_(time_source) {} + host_name_(config.envoy_grpc().authority()), initial_metadata_(config.initial_metadata()), + time_source_(time_source) {} AsyncClientImpl::~AsyncClientImpl() { while (!active_streams_.empty()) { @@ -31,14 +32,14 @@ AsyncRequest* AsyncClientImpl::sendRaw(absl::string_view service_full_name, const Http::AsyncClient::RequestOptions& options) { auto* const async_request = new AsyncRequestImpl( *this, service_full_name, method_name, std::move(request), callbacks, parent_span, options); - std::unique_ptr grpc_stream{async_request}; + AsyncStreamImplPtr grpc_stream{async_request}; grpc_stream->initialize(true); if (grpc_stream->hasResetStream()) { return nullptr; } - grpc_stream->moveIntoList(std::move(grpc_stream), active_streams_); + LinkedList::moveIntoList(std::move(grpc_stream), active_streams_); return async_request; } @@ -54,7 +55,7 @@ RawAsyncStream* AsyncClientImpl::startRaw(absl::string_view service_full_name, return nullptr; } - grpc_stream->moveIntoList(std::move(grpc_stream), active_streams_); + LinkedList::moveIntoList(std::move(grpc_stream), active_streams_); return active_streams_.front().get(); } @@ -83,9 +84,9 @@ void AsyncStreamImpl::initialize(bool buffer_body_for_retry) { // TODO(htuch): match Google gRPC base64 encoding behavior for *-bin headers, see // https://github.com/envoyproxy/envoy/pull/2444#discussion_r163914459. - headers_message_ = - Common::prepareHeaders(parent_.remote_cluster_name_, service_full_name_, method_name_, - absl::optional(options_.timeout)); + headers_message_ = Common::prepareHeaders( + parent_.host_name_.empty() ? parent_.remote_cluster_name_ : parent_.host_name_, + service_full_name_, method_name_, options_.timeout); // Fill service-wide initial metadata. for (const auto& header_value : parent_.initial_metadata_) { headers_message_->headers().addCopy(Http::LowerCaseString(header_value.key()), @@ -100,7 +101,7 @@ void AsyncStreamImpl::initialize(bool buffer_body_for_retry) { void AsyncStreamImpl::onHeaders(Http::ResponseHeaderMapPtr&& headers, bool end_stream) { const auto http_response_status = Http::Utility::getResponseStatus(*headers); const auto grpc_status = Common::getGrpcStatus(*headers); - callbacks_.onReceiveInitialMetadata(end_stream ? std::make_unique() + callbacks_.onReceiveInitialMetadata(end_stream ? Http::ResponseHeaderMapImpl::create() : std::move(headers)); if (http_response_status != enumToInt(Http::Code::OK)) { // https://github.com/grpc/grpc/blob/master/doc/http-grpc-status-mapping.md requires that @@ -108,6 +109,10 @@ void AsyncStreamImpl::onHeaders(Http::ResponseHeaderMapPtr&& headers, bool end_s if (end_stream && grpc_status) { // Due to headers/trailers type differences we need to copy here. This is an uncommon case but // we can potentially optimize in the future. + + // TODO(mattklein123): clang-tidy is showing a use after move when passing to + // onReceiveInitialMetadata() above. This looks like an actual bug that I will fix in a + // follow up. onTrailers(Http::createHeaderMap(*headers)); return; } @@ -163,7 +168,7 @@ void AsyncStreamImpl::onTrailers(Http::ResponseTrailerMapPtr&& trailers) { } void AsyncStreamImpl::streamError(Status::GrpcStatus grpc_status, const std::string& message) { - callbacks_.onReceiveTrailingMetadata(std::make_unique()); + callbacks_.onReceiveTrailingMetadata(Http::ResponseTrailerMapImpl::create()); callbacks_.onRemoteClose(grpc_status, message); resetStream(); } diff --git a/source/common/grpc/async_client_impl.h b/source/common/grpc/async_client_impl.h index f27cf84364317..ae0e2c7782ab9 100644 --- a/source/common/grpc/async_client_impl.h +++ b/source/common/grpc/async_client_impl.h @@ -1,5 +1,7 @@ #pragma once +#include + #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/core/v3/grpc_service.pb.h" #include "envoy/grpc/async_client.h" @@ -13,7 +15,9 @@ namespace Envoy { namespace Grpc { class AsyncRequestImpl; + class AsyncStreamImpl; +using AsyncStreamImplPtr = std::unique_ptr; class AsyncClientImpl final : public RawAsyncClient { public: @@ -33,8 +37,10 @@ class AsyncClientImpl final : public RawAsyncClient { private: Upstream::ClusterManager& cm_; const std::string remote_cluster_name_; + // The host header value in the http transport. + const std::string host_name_; const Protobuf::RepeatedPtrField initial_metadata_; - std::list> active_streams_; + std::list active_streams_; TimeSource& time_source_; friend class AsyncRequestImpl; @@ -44,7 +50,7 @@ class AsyncClientImpl final : public RawAsyncClient { class AsyncStreamImpl : public RawAsyncStream, Http::AsyncClient::StreamCallbacks, public Event::DeferredDeletable, - LinkedObject { + public LinkedObject { public: AsyncStreamImpl(AsyncClientImpl& parent, absl::string_view service_full_name, absl::string_view method_name, RawAsyncStreamCallbacks& callbacks, @@ -65,6 +71,9 @@ class AsyncStreamImpl : public RawAsyncStream, void sendMessageRaw(Buffer::InstancePtr&& request, bool end_stream) override; void closeStream() override; void resetStream() override; + bool isAboveWriteBufferHighWatermark() const override { + return stream_ && stream_->isAboveWriteBufferHighWatermark(); + } bool hasResetStream() const { return http_reset_; } diff --git a/source/common/grpc/common.cc b/source/common/grpc/common.cc index 2019f54ac874e..4322df957916e 100644 --- a/source/common/grpc/common.cc +++ b/source/common/grpc/common.cc @@ -14,6 +14,7 @@ #include "common/common/fmt.h" #include "common/common/macros.h" #include "common/common/utility.h" +#include "common/http/header_utility.h" #include "common/http/headers.h" #include "common/http/message_impl.h" #include "common/http/utility.h" @@ -26,18 +27,22 @@ namespace Envoy { namespace Grpc { bool Common::hasGrpcContentType(const Http::RequestOrResponseHeaderMap& headers) { - const Http::HeaderEntry* content_type = headers.ContentType(); + const absl::string_view content_type = headers.getContentTypeValue(); // Content type is gRPC if it is exactly "application/grpc" or starts with // "application/grpc+". Specifically, something like application/grpc-web is not gRPC. - return content_type != nullptr && - absl::StartsWith(content_type->value().getStringView(), - Http::Headers::get().ContentTypeValues.Grpc) && - (content_type->value().size() == Http::Headers::get().ContentTypeValues.Grpc.size() || - content_type->value() - .getStringView()[Http::Headers::get().ContentTypeValues.Grpc.size()] == '+'); + return absl::StartsWith(content_type, Http::Headers::get().ContentTypeValues.Grpc) && + (content_type.size() == Http::Headers::get().ContentTypeValues.Grpc.size() || + content_type[Http::Headers::get().ContentTypeValues.Grpc.size()] == '+'); } -bool Common::isGrpcResponseHeader(const Http::ResponseHeaderMap& headers, bool end_stream) { +bool Common::isGrpcRequestHeaders(const Http::RequestHeaderMap& headers) { + if (!headers.Path()) { + return false; + } + return hasGrpcContentType(headers); +} + +bool Common::isGrpcResponseHeaders(const Http::ResponseHeaderMap& headers, bool end_stream) { if (end_stream) { // Trailers-only response, only grpc-status is required. return headers.GrpcStatus() != nullptr; @@ -50,13 +55,13 @@ bool Common::isGrpcResponseHeader(const Http::ResponseHeaderMap& headers, bool e absl::optional Common::getGrpcStatus(const Http::ResponseHeaderOrTrailerMap& trailers, bool allow_user_defined) { - const Http::HeaderEntry* grpc_status_header = trailers.GrpcStatus(); + const absl::string_view grpc_status_header = trailers.getGrpcStatusValue(); uint64_t grpc_status_code; - if (!grpc_status_header || grpc_status_header->value().empty()) { + if (grpc_status_header.empty()) { return absl::nullopt; } - if (!absl::SimpleAtoi(grpc_status_header->value().getStringView(), &grpc_status_code) || + if (!absl::SimpleAtoi(grpc_status_header, &grpc_status_code) || (grpc_status_code > Status::WellKnownGrpcStatus::MaximumKnown && !allow_user_defined)) { return {Status::WellKnownGrpcStatus::InvalidCode}; } @@ -220,13 +225,13 @@ void Common::toGrpcTimeout(const std::chrono::milliseconds& timeout, } Http::RequestMessagePtr -Common::prepareHeaders(const std::string& upstream_cluster, const std::string& service_full_name, +Common::prepareHeaders(const std::string& host_name, const std::string& service_full_name, const std::string& method_name, const absl::optional& timeout) { Http::RequestMessagePtr message(new Http::RequestMessageImpl()); message->headers().setReferenceMethod(Http::Headers::get().MethodValues.Post); message->headers().setPath(absl::StrCat("/", service_full_name, "/", method_name)); - message->headers().setHost(upstream_cluster); + message->headers().setHost(host_name); // According to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md TE should appear // before Timeout and ContentType. message->headers().setReferenceTE(Http::Headers::get().TEValues.Trailers); diff --git a/source/common/grpc/common.h b/source/common/grpc/common.h index b450e7817e54e..cd94fe450568b 100644 --- a/source/common/grpc/common.h +++ b/source/common/grpc/common.h @@ -36,12 +36,20 @@ class Common { */ static bool hasGrpcContentType(const Http::RequestOrResponseHeaderMap& headers); + /** + * @param headers the headers to parse. + * @return bool indicating whether the header is a gRPC request header. + * Currently headers are considered gRPC request headers if they have the gRPC + * content type, and have a path header. + */ + static bool isGrpcRequestHeaders(const Http::RequestHeaderMap& headers); + /** * @param headers the headers to parse. * @param bool indicating whether the header is at end_stream. * @return bool indicating whether the header is a gRPC response header */ - static bool isGrpcResponseHeader(const Http::ResponseHeaderMap& headers, bool end_stream); + static bool isGrpcResponseHeaders(const Http::ResponseHeaderMap& headers, bool end_stream); /** * Returns the GrpcStatus code from a given set of trailers, if present. diff --git a/source/common/grpc/context_impl.cc b/source/common/grpc/context_impl.cc index f612f71cc0740..4c0e2f91ebc3e 100644 --- a/source/common/grpc/context_impl.cc +++ b/source/common/grpc/context_impl.cc @@ -4,48 +4,29 @@ #include #include "common/grpc/common.h" +#include "common/stats/utility.h" namespace Envoy { namespace Grpc { ContextImpl::ContextImpl(Stats::SymbolTable& symbol_table) - : symbol_table_(symbol_table), stat_name_pool_(symbol_table), - grpc_(stat_name_pool_.add("grpc")), grpc_web_(stat_name_pool_.add("grpc-web")), - success_(stat_name_pool_.add("success")), failure_(stat_name_pool_.add("failure")), - total_(stat_name_pool_.add("total")), zero_(stat_name_pool_.add("0")), + : stat_name_pool_(symbol_table), grpc_(stat_name_pool_.add("grpc")), + grpc_web_(stat_name_pool_.add("grpc-web")), success_(stat_name_pool_.add("success")), + failure_(stat_name_pool_.add("failure")), total_(stat_name_pool_.add("total")), + zero_(stat_name_pool_.add("0")), request_message_count_(stat_name_pool_.add("request_message_count")), response_message_count_(stat_name_pool_.add("response_message_count")), upstream_rq_time_(stat_name_pool_.add("upstream_rq_time")), stat_names_(symbol_table) {} -// Makes a stat name from a string, if we don't already have one for it. -// This always takes a lock on mutex_, and if we haven't seen the name -// before, it also takes a lock on the symbol table. -// -// TODO(jmarantz): See https://github.com/envoyproxy/envoy/pull/7008 for -// a lock-free approach to creating dynamic stat-names based on requests. -Stats::StatName ContextImpl::makeDynamicStatName(absl::string_view name) { - Thread::LockGuard lock(mutex_); - auto iter = stat_name_map_.find(name); - if (iter != stat_name_map_.end()) { - return iter->second; - } - const Stats::StatName stat_name = stat_name_pool_.add(name); - stat_name_map_[std::string(name)] = stat_name; - return stat_name; -} - // Gets the stat prefix and underlying storage, depending on whether request_names is empty -std::pair -ContextImpl::getPrefix(Protocol protocol, const absl::optional& request_names) { +Stats::ElementVec ContextImpl::statElements(Protocol protocol, + const absl::optional& request_names, + Stats::Element suffix) { const Stats::StatName protocolName = protocolStatName(protocol); if (request_names) { - Stats::SymbolTable::StoragePtr prefix_storage = - symbol_table_.join({protocolName, request_names->service_, request_names->method_}); - Stats::StatName prefix = Stats::StatName(prefix_storage.get()); - return {prefix, std::move(prefix_storage)}; - } else { - return {protocolName, nullptr}; + return Stats::ElementVec{protocolName, request_names->service_, request_names->method_, suffix}; } + return Stats::ElementVec{protocolName, suffix}; } void ContextImpl::chargeStat(const Upstream::ClusterInfo& cluster, Protocol protocol, @@ -57,28 +38,20 @@ void ContextImpl::chargeStat(const Upstream::ClusterInfo& cluster, Protocol prot absl::string_view status_str = grpc_status->value().getStringView(); auto iter = stat_names_.status_names_.find(status_str); - const Stats::StatName status_stat_name = - (iter != stat_names_.status_names_.end()) ? iter->second : makeDynamicStatName(status_str); - const Stats::SymbolTable::StoragePtr stat_name_storage = - request_names ? symbol_table_.join({protocolStatName(protocol), request_names->service_, - request_names->method_, status_stat_name}) - : symbol_table_.join({protocolStatName(protocol), status_stat_name}); - - cluster.statsScope().counterFromStatName(Stats::StatName(stat_name_storage.get())).inc(); + Stats::ElementVec elements = + statElements(protocol, request_names, + (iter != stat_names_.status_names_.end()) ? Stats::Element(iter->second) + : Stats::DynamicName(status_str)); + Stats::Utility::counterFromElements(cluster.statsScope(), elements).inc(); chargeStat(cluster, protocol, request_names, (status_str == "0")); } void ContextImpl::chargeStat(const Upstream::ClusterInfo& cluster, Protocol protocol, const absl::optional& request_names, bool success) { - auto prefix_and_storage = getPrefix(protocol, request_names); - Stats::StatName prefix = prefix_and_storage.first; - - const Stats::SymbolTable::StoragePtr status = - symbol_table_.join({prefix, successStatName(success)}); - const Stats::SymbolTable::StoragePtr total = symbol_table_.join({prefix, total_}); - - cluster.statsScope().counterFromStatName(Stats::StatName(status.get())).inc(); - cluster.statsScope().counterFromStatName(Stats::StatName(total.get())).inc(); + Stats::ElementVec elements = statElements(protocol, request_names, successStatName(success)); + Stats::Utility::counterFromElements(cluster.statsScope(), elements).inc(); + elements.back() = total_; + Stats::Utility::counterFromElements(cluster.statsScope(), elements).inc(); } void ContextImpl::chargeStat(const Upstream::ClusterInfo& cluster, @@ -89,43 +62,23 @@ void ContextImpl::chargeStat(const Upstream::ClusterInfo& cluster, void ContextImpl::chargeRequestMessageStat(const Upstream::ClusterInfo& cluster, const absl::optional& request_names, uint64_t amount) { - auto prefix_and_storage = getPrefix(Protocol::Grpc, request_names); - Stats::StatName prefix = prefix_and_storage.first; - - const Stats::SymbolTable::StoragePtr request_message_count = - symbol_table_.join({prefix, request_message_count_}); - - cluster.statsScope() - .counterFromStatName(Stats::StatName(request_message_count.get())) - .add(amount); + Stats::ElementVec elements = statElements(Protocol::Grpc, request_names, request_message_count_); + Stats::Utility::counterFromElements(cluster.statsScope(), elements).add(amount); } void ContextImpl::chargeResponseMessageStat(const Upstream::ClusterInfo& cluster, const absl::optional& request_names, uint64_t amount) { - auto prefix_and_storage = getPrefix(Protocol::Grpc, request_names); - Stats::StatName prefix = prefix_and_storage.first; - - const Stats::SymbolTable::StoragePtr response_message_count = - symbol_table_.join({prefix, response_message_count_}); - - cluster.statsScope() - .counterFromStatName(Stats::StatName(response_message_count.get())) - .add(amount); + Stats::ElementVec elements = statElements(Protocol::Grpc, request_names, response_message_count_); + Stats::Utility::counterFromElements(cluster.statsScope(), elements).add(amount); } void ContextImpl::chargeUpstreamStat(const Upstream::ClusterInfo& cluster, const absl::optional& request_names, std::chrono::milliseconds duration) { - auto prefix_and_storage = getPrefix(Protocol::Grpc, request_names); - Stats::StatName prefix = prefix_and_storage.first; - - const Stats::SymbolTable::StoragePtr upstream_rq_time = - symbol_table_.join({prefix, upstream_rq_time_}); - - cluster.statsScope() - .histogramFromStatName(Stats::StatName(upstream_rq_time.get()), - Stats::Histogram::Unit::Milliseconds) + Stats::ElementVec elements = statElements(Protocol::Grpc, request_names, upstream_rq_time_); + Stats::Utility::histogramFromElements(cluster.statsScope(), elements, + Stats::Histogram::Unit::Milliseconds) .recordValue(duration.count()); } @@ -136,8 +89,8 @@ ContextImpl::resolveDynamicServiceAndMethod(const Http::HeaderEntry* path) { return {}; } - const Stats::StatName service = makeDynamicStatName(request_names->service_); - const Stats::StatName method = makeDynamicStatName(request_names->method_); + Stats::Element service = Stats::DynamicName(request_names->service_); + Stats::Element method = Stats::DynamicName(request_names->method_); return RequestStatNames{service, method}; } diff --git a/source/common/grpc/context_impl.h b/source/common/grpc/context_impl.h index 9d3ddc731458b..98a34695235bb 100644 --- a/source/common/grpc/context_impl.h +++ b/source/common/grpc/context_impl.h @@ -9,6 +9,7 @@ #include "common/common/hash.h" #include "common/grpc/stat_names.h" #include "common/stats/symbol_table_impl.h" +#include "common/stats/utility.h" #include "absl/types/optional.h" @@ -16,8 +17,8 @@ namespace Envoy { namespace Grpc { struct Context::RequestStatNames { - Stats::StatName service_; // supplies the service name. - Stats::StatName method_; // supplies the method name. + Stats::Element service_; // supplies the service name. + Stats::Element method_; // supplies the method name. }; class ContextImpl : public Context { @@ -59,25 +60,13 @@ class ContextImpl : public Context { StatNames& statNames() override { return stat_names_; } private: - // Makes a stat name from a string, if we don't already have one for it. - // This always takes a lock on mutex_, and if we haven't seen the name - // before, it also takes a lock on the symbol table. - // - // TODO(jmarantz): See https://github.com/envoyproxy/envoy/pull/7008 for - // a lock-free approach to creating dynamic stat-names based on requests. - Stats::StatName makeDynamicStatName(absl::string_view name); - - // Gets the stat prefix and underlying storage, depending on whether request_names is empty - // or not. - // Prefix will be "" if request_names is empty, or - // ".." if it is not empty. - std::pair - getPrefix(Protocol protocol, const absl::optional& request_names); + // Creates an array of stat-name elements, comprising the protocol, optional + // service and method, and a suffix. + Stats::ElementVec statElements(Protocol protocol, + const absl::optional& request_names, + Stats::Element suffix); - Stats::SymbolTable& symbol_table_; - mutable Thread::MutexBasicLockable mutex_; - Stats::StatNamePool stat_name_pool_ ABSL_GUARDED_BY(mutex_); - StringMap stat_name_map_ ABSL_GUARDED_BY(mutex_); + Stats::StatNamePool stat_name_pool_; const Stats::StatName grpc_; const Stats::StatName grpc_web_; const Stats::StatName success_; diff --git a/source/common/grpc/google_async_client_impl.cc b/source/common/grpc/google_async_client_impl.cc index ea22f07c94518..e4b329d3e67ee 100644 --- a/source/common/grpc/google_async_client_impl.cc +++ b/source/common/grpc/google_async_client_impl.cc @@ -16,9 +16,13 @@ namespace Envoy { namespace Grpc { +namespace { +static constexpr int DefaultBufferLimitBytes = 1024 * 1024; +} GoogleAsyncClientThreadLocal::GoogleAsyncClientThreadLocal(Api::Api& api) - : completion_thread_(api.threadFactory().createThread([this] { completionThread(); })) {} + : completion_thread_(api.threadFactory().createThread([this] { completionThread(); }, + Thread::Options{"GrpcGoogClient"})) {} GoogleAsyncClientThreadLocal::~GoogleAsyncClientThreadLocal() { // Force streams to shutdown and invoke TryCancel() to start the drain of @@ -75,7 +79,9 @@ GoogleAsyncClientImpl::GoogleAsyncClientImpl(Event::Dispatcher& dispatcher, const envoy::config::core::v3::GrpcService& config, Api::Api& api, const StatNames& stat_names) : dispatcher_(dispatcher), tls_(tls), stat_prefix_(config.google_grpc().stat_prefix()), - initial_metadata_(config.initial_metadata()), scope_(scope) { + initial_metadata_(config.initial_metadata()), scope_(scope), + per_stream_buffer_limit_bytes_(PROTOBUF_GET_WRAPPED_OR_DEFAULT( + config.google_grpc(), per_stream_buffer_limit_bytes, DefaultBufferLimitBytes)) { // We rebuild the channel each time we construct the channel. It appears that the gRPC library is // smart enough to do connection pooling and reuse with identical channel args, so this should // have comparable overhead to what we are doing in Grpc::AsyncClientImpl, i.e. no expensive @@ -106,14 +112,14 @@ AsyncRequest* GoogleAsyncClientImpl::sendRaw(absl::string_view service_full_name const Http::AsyncClient::RequestOptions& options) { auto* const async_request = new GoogleAsyncRequestImpl( *this, service_full_name, method_name, std::move(request), callbacks, parent_span, options); - std::unique_ptr grpc_stream{async_request}; + GoogleAsyncStreamImplPtr grpc_stream{async_request}; grpc_stream->initialize(true); - if (grpc_stream->call_failed()) { + if (grpc_stream->callFailed()) { return nullptr; } - grpc_stream->moveIntoList(std::move(grpc_stream), active_streams_); + LinkedList::moveIntoList(std::move(grpc_stream), active_streams_); return async_request; } @@ -125,11 +131,11 @@ RawAsyncStream* GoogleAsyncClientImpl::startRaw(absl::string_view service_full_n callbacks, options); grpc_stream->initialize(false); - if (grpc_stream->call_failed()) { + if (grpc_stream->callFailed()) { return nullptr; } - grpc_stream->moveIntoList(std::move(grpc_stream), active_streams_); + LinkedList::moveIntoList(std::move(grpc_stream), active_streams_); return active_streams_.front().get(); } @@ -165,16 +171,13 @@ void GoogleAsyncStreamImpl::initialize(bool /*buffer_body_for_retry*/) { } // Due to the different HTTP header implementations, we effectively double // copy headers here. - Http::RequestHeaderMapImpl initial_metadata; - callbacks_.onCreateInitialMetadata(initial_metadata); - initial_metadata.iterate( - [](const Http::HeaderEntry& header, void* ctxt) { - auto* client_context = static_cast(ctxt); - client_context->AddMetadata(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return Http::HeaderMap::Iterate::Continue; - }, - &ctxt_); + auto initial_metadata = Http::RequestHeaderMapImpl::create(); + callbacks_.onCreateInitialMetadata(*initial_metadata); + initial_metadata->iterate([this](const Http::HeaderEntry& header) { + ctxt_.AddMetadata(std::string(header.key().getStringView()), + std::string(header.value().getStringView())); + return Http::HeaderMap::Iterate::Continue; + }); // Invoke stub call. rw_ = parent_.stub_->PrepareCall(&ctxt_, "/" + service_full_name_ + "/" + method_name_, &parent_.tls_.completionQueue()); @@ -201,9 +204,8 @@ void GoogleAsyncStreamImpl::notifyRemoteClose(Status::GrpcStatus grpc_status, parent_.stats_.streams_closed_[grpc_status]->inc(); } ENVOY_LOG(debug, "notifyRemoteClose {} {}", grpc_status, message); - callbacks_.onReceiveTrailingMetadata(trailing_metadata - ? std::move(trailing_metadata) - : std::make_unique()); + callbacks_.onReceiveTrailingMetadata(trailing_metadata ? std::move(trailing_metadata) + : Http::ResponseTrailerMapImpl::create()); callbacks_.onRemoteClose(grpc_status, message); } @@ -211,6 +213,7 @@ void GoogleAsyncStreamImpl::sendMessageRaw(Buffer::InstancePtr&& request, bool e write_pending_queue_.emplace(std::move(request), end_stream); ENVOY_LOG(trace, "Queued message to write ({} bytes)", write_pending_queue_.back().buf_.value().Length()); + bytes_in_write_pending_queue_ += write_pending_queue_.back().buf_.value().Length(); writeQueued(); } @@ -305,7 +308,7 @@ void GoogleAsyncStreamImpl::handleOpCompletion(GoogleAsyncTag::Operation op, boo ASSERT(call_initialized_); rw_->Read(&read_buf_, &read_tag_); ++inflight_tags_; - Http::ResponseHeaderMapPtr initial_metadata = std::make_unique(); + Http::ResponseHeaderMapPtr initial_metadata = Http::ResponseHeaderMapImpl::create(); metadataTranslate(ctxt_.GetServerInitialMetadata(), *initial_metadata); callbacks_.onReceiveInitialMetadata(std::move(initial_metadata)); break; @@ -313,6 +316,7 @@ void GoogleAsyncStreamImpl::handleOpCompletion(GoogleAsyncTag::Operation op, boo case GoogleAsyncTag::Operation::Write: { ASSERT(ok); write_pending_ = false; + bytes_in_write_pending_queue_ -= write_pending_queue_.front().buf_.value().Length(); write_pending_queue_.pop(); writeQueued(); break; @@ -338,8 +342,7 @@ void GoogleAsyncStreamImpl::handleOpCompletion(GoogleAsyncTag::Operation op, boo case GoogleAsyncTag::Operation::Finish: { ASSERT(finish_pending_); ENVOY_LOG(debug, "Finish with grpc-status code {}", status_.error_code()); - Http::ResponseTrailerMapPtr trailing_metadata = - std::make_unique(); + Http::ResponseTrailerMapPtr trailing_metadata = Http::ResponseTrailerMapImpl::create(); metadataTranslate(ctxt_.GetServerTrailingMetadata(), *trailing_metadata); notifyRemoteClose(static_cast(status_.error_code()), std::move(trailing_metadata), status_.error_message()); @@ -375,7 +378,7 @@ void GoogleAsyncStreamImpl::deferredDelete() { // Hence, it is safe here to create a unique_ptr to this and transfer // ownership to dispatcher_.deferredDelete(). After this call, no further // methods may be invoked on this object. - dispatcher_.deferredDelete(std::unique_ptr(this)); + dispatcher_.deferredDelete(GoogleAsyncStreamImplPtr(this)); } void GoogleAsyncStreamImpl::cleanup() { @@ -412,16 +415,16 @@ GoogleAsyncRequestImpl::GoogleAsyncRequestImpl( void GoogleAsyncRequestImpl::initialize(bool buffer_body_for_retry) { GoogleAsyncStreamImpl::initialize(buffer_body_for_retry); - if (this->call_failed()) { + if (callFailed()) { return; } - this->sendMessageRaw(std::move(request_), true); + sendMessageRaw(std::move(request_), true); } void GoogleAsyncRequestImpl::cancel() { current_span_->setTag(Tracing::Tags::get().Status, Tracing::Tags::get().Canceled); current_span_->finishSpan(); - this->resetStream(); + resetStream(); } void GoogleAsyncRequestImpl::onCreateInitialMetadata(Http::RequestHeaderMap& metadata) { diff --git a/source/common/grpc/google_async_client_impl.h b/source/common/grpc/google_async_client_impl.h index 19b2059420d6b..8e946ce5c0cb4 100644 --- a/source/common/grpc/google_async_client_impl.h +++ b/source/common/grpc/google_async_client_impl.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include "envoy/api/api.h" @@ -20,6 +21,7 @@ #include "common/grpc/typed_async_client.h" #include "common/tracing/http_tracer_impl.h" +#include "absl/container/node_hash_set.h" #include "grpcpp/generic/generic_stub.h" #include "grpcpp/grpcpp.h" #include "grpcpp/support/proto_buffer_writer.h" @@ -28,6 +30,9 @@ namespace Envoy { namespace Grpc { class GoogleAsyncStreamImpl; + +using GoogleAsyncStreamImplPtr = std::unique_ptr; + class GoogleAsyncRequestImpl; struct GoogleAsyncTag { @@ -58,13 +63,6 @@ struct GoogleAsyncTag { GoogleAsyncStreamImpl& stream_; const Operation op_; - - // Generate a void* tag for a given Operation. - static void* tag(Operation op) { return reinterpret_cast(op); } - // Extract Operation from void* tag. - static Operation operation(void* tag) { - return static_cast(reinterpret_cast(tag)); - } }; class GoogleAsyncClientThreadLocal : public ThreadLocal::ThreadLocalObject, @@ -112,9 +110,11 @@ class GoogleAsyncClientThreadLocal : public ThreadLocal::ThreadLocalObject, Thread::ThreadPtr completion_thread_; // Track all streams that are currently using this CQ, so we can notify them // on shutdown. - std::unordered_set streams_; + absl::node_hash_set streams_; }; +using GoogleAsyncClientThreadLocalPtr = std::unique_ptr; + // Google gRPC client stats. TODO(htuch): consider how a wider set of stats collected by the // library, such as the census related ones, can be externalized as needed. struct GoogleAsyncClientStats { @@ -135,6 +135,8 @@ class GoogleStub { grpc::CompletionQueue* cq) PURE; }; +using GoogleStubSharedPtr = std::shared_ptr; + class GoogleGenericStub : public GoogleStub { public: GoogleGenericStub(std::shared_ptr channel) : stub_(channel) {} @@ -155,12 +157,12 @@ class GoogleStubFactory { virtual ~GoogleStubFactory() = default; // Create a stub from a given channel. - virtual std::shared_ptr createStub(std::shared_ptr channel) PURE; + virtual GoogleStubSharedPtr createStub(std::shared_ptr channel) PURE; }; class GoogleGenericStubFactory : public GoogleStubFactory { public: - std::shared_ptr createStub(std::shared_ptr channel) override { + GoogleStubSharedPtr createStub(std::shared_ptr channel) override { return std::make_shared(channel); } }; @@ -184,6 +186,7 @@ class GoogleAsyncClientImpl final : public RawAsyncClient, Logger::Loggable stub_; - std::list> active_streams_; + GoogleStubSharedPtr stub_; + std::list active_streams_; const std::string stat_prefix_; const Protobuf::RepeatedPtrField initial_metadata_; Stats::ScopeSharedPtr scope_; GoogleAsyncClientStats stats_; + uint64_t per_stream_buffer_limit_bytes_; friend class GoogleAsyncClientThreadLocal; friend class GoogleAsyncRequestImpl; @@ -206,7 +210,7 @@ class GoogleAsyncClientImpl final : public RawAsyncClient, Logger::Loggable, - LinkedObject { + public LinkedObject { public: GoogleAsyncStreamImpl(GoogleAsyncClientImpl& parent, absl::string_view service_full_name, absl::string_view method_name, RawAsyncStreamCallbacks& callbacks, @@ -219,9 +223,15 @@ class GoogleAsyncStreamImpl : public RawAsyncStream, void sendMessageRaw(Buffer::InstancePtr&& request, bool end_stream) override; void closeStream() override; void resetStream() override; + // While the Google-gRPC code doesn't use Envoy watermark buffers, the logical + // analog is to make sure that the aren't too many bytes in the pending write + // queue. + bool isAboveWriteBufferHighWatermark() const override { + return bytes_in_write_pending_queue_ > parent_.perStreamBufferLimitBytes(); + } protected: - bool call_failed() const { return call_failed_; } + bool callFailed() const { return call_failed_; } private: // Process queued events in completed_ops_ with handleOpCompletion() on @@ -251,7 +261,7 @@ class GoogleAsyncStreamImpl : public RawAsyncStream, // End-of-stream with no additional message. PendingMessage() = default; - const absl::optional buf_; + const absl::optional buf_{}; const bool end_stream_{true}; }; @@ -271,7 +281,7 @@ class GoogleAsyncStreamImpl : public RawAsyncStream, Event::Dispatcher& dispatcher_; // We hold a ref count on the stub_ to allow the stream to wait for its tags // to drain from the CQ on cleanup. - std::shared_ptr stub_; + GoogleStubSharedPtr stub_; std::string service_full_name_; std::string method_name_; RawAsyncStreamCallbacks& callbacks_; @@ -279,6 +289,7 @@ class GoogleAsyncStreamImpl : public RawAsyncStream, grpc::ClientContext ctxt_; std::unique_ptr rw_; std::queue write_pending_queue_; + uint64_t bytes_in_write_pending_queue_{}; grpc::ByteBuffer read_buf_; grpc::Status status_; // Has Operation::Init completed? diff --git a/source/common/grpc/google_grpc_context.h b/source/common/grpc/google_grpc_context.h index b83f29a122148..2ec235161f2e0 100644 --- a/source/common/grpc/google_grpc_context.h +++ b/source/common/grpc/google_grpc_context.h @@ -21,7 +21,7 @@ class GoogleGrpcContext { private: struct InstanceTracker { Thread::MutexBasicLockable mutex_; - uint64_t live_instances_ GUARDED_BY(mutex_) = 0; + uint64_t live_instances_ ABSL_GUARDED_BY(mutex_) = 0; }; static InstanceTracker& instanceTracker(); diff --git a/source/common/grpc/google_grpc_creds_impl.h b/source/common/grpc/google_grpc_creds_impl.h index 8e2bd2b672882..e36083432aec8 100644 --- a/source/common/grpc/google_grpc_creds_impl.h +++ b/source/common/grpc/google_grpc_creds_impl.h @@ -19,7 +19,7 @@ getGoogleGrpcChannelCredentials(const envoy::config::core::v3::GrpcService& grpc class CredsUtility { public: /** - * Translation from envoy::api::v2::core::GrpcService to grpc::ChannelCredentials + * Translation from envoy::config::core::v3::GrpcService::GoogleGrpc to grpc::ChannelCredentials * for channel credentials. * @param google_grpc Google gRPC config. * @param api reference to the Api object @@ -31,8 +31,8 @@ class CredsUtility { Api::Api& api); /** - * Static translation from envoy::api::v2::core::GrpcService to a vector of grpc::CallCredentials. - * Any plugin based call credentials will be elided. + * Static translation from envoy::config::core::v3::GrpcService::GoogleGrpc to a vector of + * grpc::CallCredentials. Any plugin based call credentials will be elided. * @param grpc_service Google gRPC config. * @return std::vector> call credentials. */ @@ -40,8 +40,8 @@ class CredsUtility { callCredentials(const envoy::config::core::v3::GrpcService::GoogleGrpc& google_grpc); /** - * Default translation from envoy::api::v2::core::GrpcService to grpc::ChannelCredentials for SSL - * channel credentials. + * Default translation from envoy::config::core::v3::GrpcService::GoogleGrpc to + * grpc::ChannelCredentials for SSL channel credentials. * @param grpc_service_config gRPC service config. * @param api reference to the Api object * @return std::shared_ptr SSL channel credentials. Empty SSL @@ -53,8 +53,8 @@ class CredsUtility { Api::Api& api); /** - * Default static translation from envoy::api::v2::core::GrpcService to grpc::ChannelCredentials - * for all non-plugin based channel and call credentials. + * Default static translation from envoy::config::core::v3::GrpcService::GoogleGrpc to + * grpc::ChannelCredentials for all non-plugin based channel and call credentials. * @param grpc_service_config gRPC service config. * @param api reference to the Api object * @return std::shared_ptr composite channel and call credentials. diff --git a/source/common/grpc/google_grpc_utils.cc b/source/common/grpc/google_grpc_utils.cc index 395ad33151f29..b3fe1e20a320b 100644 --- a/source/common/grpc/google_grpc_utils.cc +++ b/source/common/grpc/google_grpc_utils.cc @@ -113,10 +113,31 @@ Buffer::InstancePtr GoogleGrpcUtils::makeBufferInstance(const grpc::ByteBuffer& return buffer; } +grpc::ChannelArguments +GoogleGrpcUtils::channelArgsFromConfig(const envoy::config::core::v3::GrpcService& config) { + grpc::ChannelArguments args; + for (const auto& channel_arg : config.google_grpc().channel_args().args()) { + switch (channel_arg.second.value_specifier_case()) { + case envoy::config::core::v3::GrpcService::GoogleGrpc::ChannelArgs::Value::kStringValue: { + args.SetString(channel_arg.first, channel_arg.second.string_value()); + break; + } + case envoy::config::core::v3::GrpcService::GoogleGrpc::ChannelArgs::Value::kIntValue: { + args.SetInt(channel_arg.first, channel_arg.second.int_value()); + break; + } + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + } + return args; +} + std::shared_ptr GoogleGrpcUtils::createChannel(const envoy::config::core::v3::GrpcService& config, Api::Api& api) { std::shared_ptr creds = getGoogleGrpcChannelCredentials(config, api); - return CreateChannel(config.google_grpc().target_uri(), creds); + const grpc::ChannelArguments args = channelArgsFromConfig(config); + return CreateCustomChannel(config.google_grpc().target_uri(), creds, args); } } // namespace Grpc diff --git a/source/common/grpc/google_grpc_utils.h b/source/common/grpc/google_grpc_utils.h index 03e7c6f618cb0..859a61ccfff9f 100644 --- a/source/common/grpc/google_grpc_utils.h +++ b/source/common/grpc/google_grpc_utils.h @@ -31,9 +31,17 @@ class GoogleGrpcUtils { */ static Buffer::InstancePtr makeBufferInstance(const grpc::ByteBuffer& buffer); + /** + * Build grpc::ChannelArguments from gRPC service config. + * @param config Google gRPC config. + * @return grpc::ChannelArguments corresponding to config. + */ + static grpc::ChannelArguments + channelArgsFromConfig(const envoy::config::core::v3::GrpcService& config); + /** * Build gRPC channel based on the given GrpcService configuration. - * @param config Google gRPC config. + * @param config Google gRPC config. * @param api reference to the Api object * @return static std::shared_ptr a gRPC channel. */ diff --git a/source/common/grpc/typed_async_client.cc b/source/common/grpc/typed_async_client.cc index 9c06dc6b701a9..465bde6e139e0 100644 --- a/source/common/grpc/typed_async_client.cc +++ b/source/common/grpc/typed_async_client.cc @@ -1,6 +1,8 @@ #include "common/grpc/typed_async_client.h" #include "common/buffer/zero_copy_input_stream_impl.h" +#include "common/common/assert.h" +#include "common/common/macros.h" #include "common/common/utility.h" #include "common/grpc/common.h" #include "common/http/utility.h" diff --git a/source/common/grpc/typed_async_client.h b/source/common/grpc/typed_async_client.h index 72907e42e6117..241926ee4ed70 100644 --- a/source/common/grpc/typed_async_client.h +++ b/source/common/grpc/typed_async_client.h @@ -1,9 +1,13 @@ #pragma once #include +#include #include "envoy/grpc/async_client.h" +#include "common/common/empty_string.h" +#include "common/config/version_converter.h" + namespace Envoy { namespace Grpc { namespace Internal { @@ -33,11 +37,20 @@ template class AsyncStream /* : public RawAsyncStream */ { AsyncStream() = default; AsyncStream(RawAsyncStream* stream) : stream_(stream) {} AsyncStream(const AsyncStream& other) = default; - void sendMessage(const Request& request, bool end_stream) { + void sendMessage(const Protobuf::Message& request, bool end_stream) { + Internal::sendMessageUntyped(stream_, std::move(request), end_stream); + } + void sendMessage(const Protobuf::Message& request, + envoy::config::core::v3::ApiVersion transport_api_version, bool end_stream) { + Config::VersionConverter::prepareMessageForGrpcWire(const_cast(request), + transport_api_version); Internal::sendMessageUntyped(stream_, std::move(request), end_stream); } void closeStream() { stream_->closeStream(); } void resetStream() { stream_->resetStream(); } + bool isAboveWriteBufferHighWatermark() const { + return stream_->isAboveWriteBufferHighWatermark(); + } AsyncStream* operator->() { return this; } AsyncStream operator=(RawAsyncStream* stream) { stream_ = stream; @@ -50,17 +63,19 @@ template class AsyncStream /* : public RawAsyncStream */ { RawAsyncStream* stream_{}; }; +template using ResponsePtr = std::unique_ptr; + /** * Convenience subclasses for AsyncRequestCallbacks. */ template class AsyncRequestCallbacks : public RawAsyncRequestCallbacks { public: ~AsyncRequestCallbacks() override = default; - virtual void onSuccess(std::unique_ptr&& response, Tracing::Span& span) PURE; + virtual void onSuccess(ResponsePtr&& response, Tracing::Span& span) PURE; private: void onSuccessRaw(Buffer::InstancePtr&& response, Tracing::Span& span) override { - auto message = std::unique_ptr(dynamic_cast( + auto message = ResponsePtr(dynamic_cast( Internal::parseMessageUntyped(std::make_unique(), std::move(response)) .release())); if (!message) { @@ -71,17 +86,66 @@ template class AsyncRequestCallbacks : public RawAsyncReques } }; +/** + * Versioned methods wrapper. + */ +class VersionedMethods { +public: + VersionedMethods(const std::string& v3, const std::string& v2, const std::string& v2_alpha = "") + : v3_(Protobuf::DescriptorPool::generated_pool()->FindMethodByName(v3)), + v2_(Protobuf::DescriptorPool::generated_pool()->FindMethodByName(v2)), + v2_alpha_(v2_alpha.empty() + ? nullptr + : Protobuf::DescriptorPool::generated_pool()->FindMethodByName(v2_alpha)) {} + + /** + * Given a version, return the method descriptor for a specific version. + * + * @param api_version target API version. + * @param use_alpha if this is an alpha version of an API method. + * + * @return Protobuf::MethodDescriptor& of a method for a specific version. + */ + const Protobuf::MethodDescriptor& + getMethodDescriptorForVersion(envoy::config::core::v3::ApiVersion api_version, + bool use_alpha = false) const { + switch (api_version) { + case envoy::config::core::v3::ApiVersion::AUTO: + FALLTHRU; + case envoy::config::core::v3::ApiVersion::V2: { + const auto* descriptor = use_alpha ? v2_alpha_ : v2_; + ASSERT(descriptor != nullptr); + return *descriptor; + } + + case envoy::config::core::v3::ApiVersion::V3: { + const auto* descriptor = v3_; + ASSERT(descriptor != nullptr); + return *descriptor; + } + + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + } + +private: + const Protobuf::MethodDescriptor* v3_{nullptr}; + const Protobuf::MethodDescriptor* v2_{nullptr}; + const Protobuf::MethodDescriptor* v2_alpha_{nullptr}; +}; + /** * Convenience subclasses for AsyncStreamCallbacks. */ template class AsyncStreamCallbacks : public RawAsyncStreamCallbacks { public: ~AsyncStreamCallbacks() override = default; - virtual void onReceiveMessage(std::unique_ptr&& message) PURE; + virtual void onReceiveMessage(ResponsePtr&& message) PURE; private: bool onReceiveMessageRaw(Buffer::InstancePtr&& response) override { - auto message = std::unique_ptr(dynamic_cast( + auto message = ResponsePtr(dynamic_cast( Internal::parseMessageUntyped(std::make_unique(), std::move(response)) .release())); if (!message) { @@ -105,6 +169,17 @@ template class AsyncClient /* : public Raw return Internal::sendUntyped(client_.get(), service_method, request, callbacks, parent_span, options); } + virtual AsyncRequest* send(const Protobuf::MethodDescriptor& service_method, + const Protobuf::Message& request, + AsyncRequestCallbacks& callbacks, Tracing::Span& parent_span, + const Http::AsyncClient::RequestOptions& options, + envoy::config::core::v3::ApiVersion transport_api_version) { + Config::VersionConverter::prepareMessageForGrpcWire(const_cast(request), + transport_api_version); + return Internal::sendUntyped(client_.get(), service_method, request, callbacks, parent_span, + options); + } + virtual AsyncStream start(const Protobuf::MethodDescriptor& service_method, AsyncStreamCallbacks& callbacks, const Http::AsyncClient::StreamOptions& options) { diff --git a/source/common/html/BUILD b/source/common/html/BUILD index 42c5fc06a7efc..fc2b6c391ad1e 100644 --- a/source/common/html/BUILD +++ b/source/common/html/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/http/BUILD b/source/common/http/BUILD index 02521f87ea7a6..3cc2fec337372 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( @@ -51,21 +51,27 @@ envoy_cc_library( deps = [ ":codec_wrappers_lib", ":exception_lib", + ":status_lib", ":utility_lib", "//include/envoy/event:deferred_deletable", "//include/envoy/http:codec_interface", "//include/envoy/network:connection_interface", "//include/envoy/network:filter_interface", + "//include/envoy/runtime:runtime_interface", "//source/common/common:assert_lib", "//source/common/common:enum_to_int", "//source/common/common:linked_object", "//source/common/common:minimal_logger_lib", "//source/common/config:utility_lib", + "//source/common/http/http1:codec_legacy_lib", "//source/common/http/http1:codec_lib", + "//source/common/http/http2:codec_legacy_lib", "//source/common/http/http2:codec_lib", "//source/common/http/http3:quic_codec_factory_lib", "//source/common/http/http3:well_known_names", "//source/common/network:filter_lib", + "//source/common/runtime:runtime_features_lib", + "//source/common/runtime:runtime_lib", ], ) @@ -128,23 +134,12 @@ envoy_cc_library( "//include/envoy/http:conn_pool_interface", "//include/envoy/stats:timespan_interface", "//source/common/common:linked_object", + "//source/common/conn_pool:conn_pool_base_lib", "//source/common/stats:timespan_lib", "//source/common/upstream:upstream_lib", ], ) -envoy_cc_library( - name = "conn_pool_base_legacy_lib", - srcs = ["conn_pool_base_legacy.cc"], - hdrs = ["conn_pool_base_legacy.h"], - deps = [ - "//include/envoy/http:conn_pool_interface", - "//include/envoy/stats:timespan_interface", - "//source/common/common:linked_object", - "//source/common/stats:timespan_lib", - ], -) - envoy_cc_library( name = "conn_manager_config_interface", hdrs = ["conn_manager_config.h"], @@ -154,6 +149,7 @@ envoy_cc_library( "//include/envoy/http:filter_interface", "//include/envoy/http:request_id_extension_interface", "//include/envoy/router:rds_interface", + "//source/common/local_reply:local_reply_lib", "//source/common/network:utility_lib", "//source/common/stats:symbol_table_lib", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", @@ -179,6 +175,7 @@ envoy_cc_library( ":header_utility_lib", ":headers_lib", ":path_utility_lib", + ":status_lib", ":user_agent_lib", ":utility_lib", "//include/envoy/access_log:access_log_interface", @@ -205,7 +202,6 @@ envoy_cc_library( "//include/envoy/stats:stats_macros", "//include/envoy/stats:timespan_interface", "//include/envoy/upstream:upstream_interface", - "//source/common/access_log:access_log_formatter_lib", "//source/common/buffer:buffer_lib", "//source/common/common:assert_lib", "//source/common/common:dump_state_utils", @@ -216,7 +212,9 @@ envoy_cc_library( "//source/common/common:scope_tracker", "//source/common/common:utility_lib", "//source/common/config:utility_lib", + "//source/common/http/http1:codec_legacy_lib", "//source/common/http/http1:codec_lib", + "//source/common/http/http2:codec_legacy_lib", "//source/common/http/http2:codec_lib", "//source/common/http/http3:quic_codec_factory_lib", "//source/common/http/http3:well_known_names", @@ -249,7 +247,10 @@ envoy_cc_library( envoy_cc_library( name = "exception_lib", hdrs = ["exception.h"], - deps = ["//include/envoy/http:header_map_interface"], + deps = [ + "//include/envoy/http:codes_interface", + "//include/envoy/http:header_map_interface", + ], ) envoy_cc_library( @@ -263,6 +264,15 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "header_list_view_lib", + srcs = ["header_list_view.cc"], + hdrs = ["header_list_view.h"], + deps = [ + "//include/envoy/http:header_map_interface", + ], +) + envoy_cc_library( name = "header_map_lib", srcs = ["header_map_impl.cc"], @@ -328,6 +338,7 @@ envoy_cc_library( "//include/envoy/stats:stats_macros", "//include/envoy/stats:timespan_interface", "//source/common/stats:symbol_table_lib", + "//source/common/stats:utility_lib", ], ) @@ -336,8 +347,8 @@ envoy_cc_library( srcs = ["utility.cc"], hdrs = ["utility.h"], external_deps = [ + "abseil_node_hash_set", "abseil_optional", - "http_parser", "nghttp2", ], deps = [ @@ -358,6 +369,7 @@ envoy_cc_library( "//source/common/json:json_loader_lib", "//source/common/network:utility_lib", "//source/common/protobuf:utility_lib", + "//source/common/runtime:runtime_features_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) @@ -371,6 +383,7 @@ envoy_cc_library( ], deps = [ ":header_map_lib", + ":utility_lib", "//include/envoy/common:regex_interface", "//include/envoy/http:header_map_interface", "//include/envoy/json:json_object_interface", @@ -387,10 +400,12 @@ envoy_cc_library( name = "path_utility_lib", srcs = ["path_utility.cc"], hdrs = ["path_utility.h"], - external_deps = ["abseil_optional"], + external_deps = [ + "abseil_optional", + "googleurl", + ], deps = [ "//include/envoy/http:header_map_interface", - "//source/common/chromium_url", "//source/common/common:logger_lib", ], ) @@ -423,5 +438,20 @@ envoy_cc_library( ], deps = [ "//include/envoy/http:codes_interface", + "//source/common/common:assert_lib", + ], +) + +envoy_cc_library( + name = "url_utility_lib", + srcs = ["url_utility.cc"], + hdrs = ["url_utility.h"], + external_deps = [ + "googleurl", + ], + deps = [ + "//source/common/common:assert_lib", + "//source/common/common:empty_string", + "//source/common/common:utility_lib", ], ) diff --git a/source/common/http/async_client_impl.cc b/source/common/http/async_client_impl.cc index cc5659da7885a..a1eaecc78fc45 100644 --- a/source/common/http/async_client_impl.cc +++ b/source/common/http/async_client_impl.cc @@ -20,6 +20,7 @@ const std::vector> const AsyncStreamImpl::NullHedgePolicy AsyncStreamImpl::RouteEntryImpl::hedge_policy_; const AsyncStreamImpl::NullRateLimitPolicy AsyncStreamImpl::RouteEntryImpl::rate_limit_policy_; const AsyncStreamImpl::NullRetryPolicy AsyncStreamImpl::RouteEntryImpl::retry_policy_; +const Router::InternalRedirectPolicyImpl AsyncStreamImpl::RouteEntryImpl::internal_redirect_policy_; const std::vector AsyncStreamImpl::RouteEntryImpl::shadow_policies_; const AsyncStreamImpl::NullVirtualHost AsyncStreamImpl::RouteEntryImpl::virtual_host_; const AsyncStreamImpl::NullRateLimitPolicy AsyncStreamImpl::NullVirtualHost::rate_limit_policy_; @@ -30,13 +31,15 @@ const Config::TypedMetadataImpl AsyncStreamImpl::RouteEntryImpl::typed_metadata_({}); const AsyncStreamImpl::NullPathMatchCriterion AsyncStreamImpl::RouteEntryImpl::path_match_criterion_; +const absl::optional + AsyncStreamImpl::RouteEntryImpl::connect_config_nullopt_; const std::list AsyncStreamImpl::NullConfig::internal_only_headers_; AsyncClientImpl::AsyncClientImpl(Upstream::ClusterInfoConstSharedPtr cluster, Stats::Store& stats_store, Event::Dispatcher& dispatcher, const LocalInfo::LocalInfo& local_info, Upstream::ClusterManager& cm, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, + Random::RandomGenerator& random, Router::ShadowWriterPtr&& shadow_writer, Http::Context& http_context) : cluster_(cluster), config_("http.async-client.", local_info, stats_store, cm, runtime, random, @@ -60,7 +63,7 @@ AsyncClient::Request* AsyncClientImpl::send(RequestMessagePtr&& request, // The request may get immediately failed. If so, we will return nullptr. if (!new_request->remote_closed_) { - new_request->moveIntoList(std::move(new_request), active_streams_); + LinkedList::moveIntoList(std::move(new_request), active_streams_); return async_request; } else { new_request->cleanup(); @@ -71,7 +74,7 @@ AsyncClient::Request* AsyncClientImpl::send(RequestMessagePtr&& request, AsyncClient::Stream* AsyncClientImpl::start(AsyncClient::StreamCallbacks& callbacks, const AsyncClient::StreamOptions& options) { std::unique_ptr new_stream{new AsyncStreamImpl(*this, callbacks, options)}; - new_stream->moveIntoList(std::move(new_stream), active_streams_); + LinkedList::moveIntoList(std::move(new_stream), active_streams_); return active_streams_.front().get(); } @@ -95,6 +98,7 @@ void AsyncStreamImpl::encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_str ENVOY_LOG(debug, "async http request response headers (end_stream={}):\n{}", end_stream, *headers); ASSERT(!remote_closed_); + encoded_response_headers_ = true; stream_callbacks_.onHeaders(std::move(headers), end_stream); closeRemote(end_stream); // At present, the router cleans up stream state as soon as the remote is closed, making a @@ -129,11 +133,11 @@ void AsyncStreamImpl::encodeTrailers(ResponseTrailerMapPtr&& trailers) { } void AsyncStreamImpl::sendHeaders(RequestHeaderMap& headers, bool end_stream) { - if (Http::Headers::get().MethodValues.Head == headers.Method()->value().getStringView()) { + if (Http::Headers::get().MethodValues.Head == headers.getMethodValue()) { is_head_request_ = true; } - is_grpc_request_ = Grpc::Common::hasGrpcContentType(headers); + is_grpc_request_ = Grpc::Common::isGrpcRequestHeaders(headers); headers.setReferenceEnvoyInternalRequest(Headers::get().EnvoyInternalRequestValues.True); if (send_xff_) { Utility::appendXff(headers, *parent_.config_.local_info_.address()); @@ -237,7 +241,6 @@ AsyncRequestImpl::AsyncRequestImpl(RequestMessagePtr&& request, AsyncClientImpl& AsyncClient::Callbacks& callbacks, const AsyncClient::RequestOptions& options) : AsyncStreamImpl(parent, *this, options), request_(std::move(request)), callbacks_(callbacks) { - if (nullptr != options.parent_span_) { const std::string child_span_name = options.child_span_name_.empty() @@ -263,6 +266,8 @@ void AsyncRequestImpl::initialize() { } void AsyncRequestImpl::onComplete() { + callbacks_.onBeforeFinalizeUpstreamSpan(*child_span_, &response_->headers()); + Tracing::HttpTracerUtility::finalizeUpstreamSpan(*child_span_, &response_->headers(), response_->trailers(), streamInfo(), Tracing::EgressConfig::get()); @@ -290,12 +295,15 @@ void AsyncRequestImpl::onTrailers(ResponseTrailerMapPtr&& trailers) { void AsyncRequestImpl::onReset() { if (!cancelled_) { - // Add tags about reset. - child_span_->setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True); + // Set "error reason" tag related to reset. The tagging for "error true" is done inside the + // Tracing::HttpTracerUtility::finalizeUpstreamSpan. child_span_->setTag(Tracing::Tags::get().ErrorReason, "Reset"); } - // Finalize the span based on whether we received a response or not + callbacks_.onBeforeFinalizeUpstreamSpan(*child_span_, + remoteClosed() ? &response_->headers() : nullptr); + + // Finalize the span based on whether we received a response or not. Tracing::HttpTracerUtility::finalizeUpstreamSpan( *child_span_, remoteClosed() ? &response_->headers() : nullptr, remoteClosed() ? response_->trailers() : nullptr, streamInfo(), Tracing::EgressConfig::get()); @@ -309,7 +317,7 @@ void AsyncRequestImpl::onReset() { void AsyncRequestImpl::cancel() { cancelled_ = true; - // Add tags about the cancellation + // Add tags about the cancellation. child_span_->setTag(Tracing::Tags::get().Canceled, Tracing::Tags::get().True); reset(); diff --git a/source/common/http/async_client_impl.h b/source/common/http/async_client_impl.h index 608b2188dc1bd..a4e2e7c86b84a 100644 --- a/source/common/http/async_client_impl.h +++ b/source/common/http/async_client_impl.h @@ -9,6 +9,7 @@ #include #include +#include "envoy/common/random_generator.h" #include "envoy/common/scope_tracker.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/route/v3/route_components.pb.h" @@ -47,16 +48,14 @@ class AsyncClientImpl final : public AsyncClient { AsyncClientImpl(Upstream::ClusterInfoConstSharedPtr cluster, Stats::Store& stats_store, Event::Dispatcher& dispatcher, const LocalInfo::LocalInfo& local_info, Upstream::ClusterManager& cm, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, Router::ShadowWriterPtr&& shadow_writer, + Random::RandomGenerator& random, Router::ShadowWriterPtr&& shadow_writer, Http::Context& http_context); ~AsyncClientImpl() override; // Http::AsyncClient Request* send(RequestMessagePtr&& request, Callbacks& callbacks, const AsyncClient::RequestOptions& options) override; - Stream* start(StreamCallbacks& callbacks, const AsyncClient::StreamOptions& options) override; - Event::Dispatcher& dispatcher() override { return dispatcher_; } private: @@ -77,7 +76,7 @@ class AsyncStreamImpl : public AsyncClient::Stream, public StreamDecoderFilterCallbacks, public Event::DeferredDeletable, Logger::Loggable, - LinkedObject, + public LinkedObject, public ScopeTrackedObject { public: AsyncStreamImpl(AsyncClientImpl& parent, AsyncClient::StreamCallbacks& callbacks, @@ -94,6 +93,7 @@ class AsyncStreamImpl : public AsyncClient::Stream, void sendData(Buffer::Instance& data, bool end_stream) override; void sendTrailers(RequestTrailerMap& trailers) override; void reset() override; + bool isAboveWriteBufferHighWatermark() const override { return high_watermark_calls_ > 0; } protected: bool remoteClosed() { return remote_closed_; } @@ -164,6 +164,11 @@ class AsyncStreamImpl : public AsyncClient::Stream, return nullptr; } + Router::RouteConstSharedPtr route(const Router::RouteCallback&, const Http::RequestHeaderMap&, + const StreamInfo::StreamInfo&, uint64_t) const override { + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } + const std::list& internalOnlyHeaders() const override { return internal_only_headers_; } @@ -227,6 +232,9 @@ class AsyncStreamImpl : public AsyncClient::Stream, } const Router::RateLimitPolicy& rateLimitPolicy() const override { return rate_limit_policy_; } const Router::RetryPolicy& retryPolicy() const override { return retry_policy_; } + const Router::InternalRedirectPolicy& internalRedirectPolicy() const override { + return internal_redirect_policy_; + } uint32_t retryShadowBufferLimit() const override { return std::numeric_limits::max(); } @@ -268,19 +276,19 @@ class AsyncStreamImpl : public AsyncClient::Stream, const Router::RouteSpecificFilterConfig* perFilterConfig(const std::string&) const override { return nullptr; } + const absl::optional& connectConfig() const override { + return connect_config_nullopt_; + } bool includeAttemptCountInRequest() const override { return false; } bool includeAttemptCountInResponse() const override { return false; } const Router::RouteEntry::UpgradeMap& upgradeMap() const override { return upgrade_map_; } - Router::InternalRedirectAction internalRedirectAction() const override { - return Router::InternalRedirectAction::PassThrough; - } - uint32_t maxInternalRedirects() const override { return 1; } const std::string& routeName() const override { return route_name_; } std::unique_ptr hash_policy_; static const NullHedgePolicy hedge_policy_; static const NullRateLimitPolicy rate_limit_policy_; static const NullRetryPolicy retry_policy_; + static const Router::InternalRedirectPolicyImpl internal_redirect_policy_; static const std::vector shadow_policies_; static const NullVirtualHost virtual_host_; static const std::multimap opaque_config_; @@ -292,6 +300,7 @@ class AsyncStreamImpl : public AsyncClient::Stream, Router::RouteEntry::UpgradeMap upgrade_map_; const std::string& cluster_name_; absl::optional timeout_; + static const absl::optional connect_config_nullopt_; const std::string route_name_; }; @@ -323,6 +332,9 @@ class AsyncStreamImpl : public AsyncClient::Stream, Event::Dispatcher& dispatcher() override { return parent_.dispatcher_; } void resetStream() override; Router::RouteConstSharedPtr route() override { return route_; } + Router::RouteConstSharedPtr route(const Router::RouteCallback&) override { + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } Upstream::ClusterInfoConstSharedPtr clusterInfo() override { return parent_.cluster_; } void clearRouteCache() override {} uint64_t streamId() const override { return stream_id_; } @@ -349,16 +361,24 @@ class AsyncStreamImpl : public AsyncClient::Stream, const absl::optional grpc_status, absl::string_view details) override { stream_info_.setResponseCodeDetails(details); + if (encoded_response_headers_) { + resetStream(); + return; + } Utility::sendLocalReply( - is_grpc_request_, - [this, modify_headers](ResponseHeaderMapPtr&& headers, bool end_stream) -> void { - if (modify_headers != nullptr) { - modify_headers(*headers); - } - encodeHeaders(std::move(headers), end_stream); - }, - [this](Buffer::Instance& data, bool end_stream) -> void { encodeData(data, end_stream); }, - remote_closed_, code, body, grpc_status, is_head_request_); + remote_closed_, + Utility::EncodeFunctions{ + nullptr, + [this, modify_headers](ResponseHeaderMapPtr&& headers, bool end_stream) -> void { + if (modify_headers != nullptr) { + modify_headers(*headers); + } + encodeHeaders(std::move(headers), end_stream); + }, + [this](Buffer::Instance& data, bool end_stream) -> void { + encodeData(data, end_stream); + }}, + Utility::LocalReplyData{is_grpc_request_, code, body, grpc_status, is_head_request_}); } // The async client won't pause if sending an Expect: 100-Continue so simply // swallows any incoming encode100Continue. @@ -367,8 +387,11 @@ class AsyncStreamImpl : public AsyncClient::Stream, void encodeData(Buffer::Instance& data, bool end_stream) override; void encodeTrailers(ResponseTrailerMapPtr&& trailers) override; void encodeMetadata(MetadataMapPtr&&) override {} - void onDecoderFilterAboveWriteBufferHighWatermark() override {} - void onDecoderFilterBelowWriteBufferLowWatermark() override {} + void onDecoderFilterAboveWriteBufferHighWatermark() override { ++high_watermark_calls_; } + void onDecoderFilterBelowWriteBufferLowWatermark() override { + ASSERT(high_watermark_calls_ != 0); + --high_watermark_calls_; + } void addDownstreamWatermarkCallbacks(DownstreamWatermarkCallbacks&) override {} void removeDownstreamWatermarkCallbacks(DownstreamWatermarkCallbacks&) override {} void setDecoderBufferLimit(uint32_t) override {} @@ -392,9 +415,11 @@ class AsyncStreamImpl : public AsyncClient::Stream, Tracing::NullSpan active_span_; const Tracing::Config& tracing_config_; std::shared_ptr route_; + uint32_t high_watermark_calls_{}; bool local_closed_{}; bool remote_closed_{}; Buffer::InstancePtr buffered_body_; + bool encoded_response_headers_{}; bool is_grpc_request_{}; bool is_head_request_{false}; bool send_xff_{true}; diff --git a/source/common/http/async_client_utility.cc b/source/common/http/async_client_utility.cc index 17124f06fb348..664a0fc0c651a 100644 --- a/source/common/http/async_client_utility.cc +++ b/source/common/http/async_client_utility.cc @@ -1,5 +1,7 @@ #include "common/http/async_client_utility.h" +#include "common/common/assert.h" + namespace Envoy { namespace Http { diff --git a/source/common/http/codec_client.cc b/source/common/http/codec_client.cc index 6d8011e224857..2353eba5be366 100644 --- a/source/common/http/codec_client.cc +++ b/source/common/http/codec_client.cc @@ -9,10 +9,15 @@ #include "common/config/utility.h" #include "common/http/exception.h" #include "common/http/http1/codec_impl.h" +#include "common/http/http1/codec_impl_legacy.h" #include "common/http/http2/codec_impl.h" +#include "common/http/http2/codec_impl_legacy.h" #include "common/http/http3/quic_codec_factory.h" #include "common/http/http3/well_known_names.h" +#include "common/http/status.h" #include "common/http/utility.h" +#include "common/runtime/runtime_features.h" +#include "common/runtime/runtime_impl.h" namespace Envoy { namespace Http { @@ -20,7 +25,7 @@ namespace Http { CodecClient::CodecClient(Type type, Network::ClientConnectionPtr&& connection, Upstream::HostDescriptionConstSharedPtr host, Event::Dispatcher& dispatcher) - : type_(type), connection_(std::move(connection)), host_(host), + : type_(type), host_(host), connection_(std::move(connection)), idle_timeout_(host_->cluster().idleTimeout()) { if (type_ != Type::HTTP3) { // Make sure upstream connections process data and then the FIN, rather than processing @@ -62,7 +67,7 @@ RequestEncoder& CodecClient::newStream(ResponseDecoder& response_decoder) { ActiveRequestPtr request(new ActiveRequest(*this, response_decoder)); request->encoder_ = &codec_->newStream(*request); request->encoder_->getStream().addCallbacks(*request); - request->moveIntoList(std::move(request), active_requests_); + LinkedList::moveIntoList(std::move(request), active_requests_); disableIdleTimer(); return *active_requests_.front()->encoder_; } @@ -121,18 +126,18 @@ void CodecClient::onReset(ActiveRequest& request, StreamResetReason reason) { void CodecClient::onData(Buffer::Instance& data) { bool protocol_error = false; - try { - codec_->dispatch(data); - } catch (CodecProtocolException& e) { - ENVOY_CONN_LOG(debug, "protocol error: {}", *connection_, e.what()); + const Status status = codec_->dispatch(data); + + if (isCodecProtocolError(status)) { + ENVOY_CONN_LOG(debug, "protocol error: {}", *connection_, status.message()); close(); protocol_error = true; - } catch (PrematureResponseException& e) { + } else if (isPrematureResponseError(status)) { ENVOY_CONN_LOG(debug, "premature response", *connection_); close(); // Don't count 408 responses where we have no active requests as protocol errors - if (!active_requests_.empty() || e.responseCode() != Code::RequestTimeout) { + if (!active_requests_.empty() || getPrematureResponseHttpCode(status) != Code::RequestTimeout) { protocol_error = true; } } @@ -146,18 +151,32 @@ CodecClientProd::CodecClientProd(Type type, Network::ClientConnectionPtr&& conne Upstream::HostDescriptionConstSharedPtr host, Event::Dispatcher& dispatcher) : CodecClient(type, std::move(connection), host, dispatcher) { + switch (type) { case Type::HTTP1: { - codec_ = std::make_unique( - *connection_, host->cluster().statsScope(), *this, host->cluster().http1Settings(), - host->cluster().maxResponseHeadersCount()); + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { + codec_ = std::make_unique( + *connection_, host->cluster().http1CodecStats(), *this, host->cluster().http1Settings(), + host->cluster().maxResponseHeadersCount()); + } else { + codec_ = std::make_unique( + *connection_, host->cluster().http1CodecStats(), *this, host->cluster().http1Settings(), + host->cluster().maxResponseHeadersCount()); + } break; } case Type::HTTP2: { - codec_ = std::make_unique( - *connection_, *this, host->cluster().statsScope(), host->cluster().http2Options(), - Http::DEFAULT_MAX_REQUEST_HEADERS_KB, host->cluster().maxResponseHeadersCount(), - Http2::ProdNghttp2SessionFactory::get()); + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { + codec_ = std::make_unique( + *connection_, *this, host->cluster().http2CodecStats(), host->cluster().http2Options(), + Http::DEFAULT_MAX_REQUEST_HEADERS_KB, host->cluster().maxResponseHeadersCount(), + Http2::ProdNghttp2SessionFactory::get()); + } else { + codec_ = std::make_unique( + *connection_, *this, host->cluster().http2CodecStats(), host->cluster().http2Options(), + Http::DEFAULT_MAX_REQUEST_HEADERS_KB, host->cluster().maxResponseHeadersCount(), + Http2::ProdNghttp2SessionFactory::get()); + } break; } case Type::HTTP3: { @@ -165,6 +184,7 @@ CodecClientProd::CodecClientProd(Type type, Network::ClientConnectionPtr&& conne Config::Utility::getAndCheckFactoryByName( Http::QuicCodecNames::get().Quiche) .createQuicClientConnection(*connection_, *this)); + break; } } } diff --git a/source/common/http/codec_client.h b/source/common/http/codec_client.h index 63d16b93ddb93..895b274737158 100644 --- a/source/common/http/codec_client.h +++ b/source/common/http/codec_client.h @@ -76,7 +76,7 @@ class CodecClient : Logger::Loggable, /** * @return the underlying connection ID. */ - uint64_t id() { return connection_->id(); } + uint64_t id() const { return connection_->id(); } /** * @return the underlying codec protocol. @@ -131,9 +131,9 @@ class CodecClient : Logger::Loggable, Upstream::HostDescriptionConstSharedPtr host, Event::Dispatcher& dispatcher); // Http::ConnectionCallbacks - void onGoAway() override { + void onGoAway(GoAwayErrorCode error_code) override { if (codec_callbacks_) { - codec_callbacks_->onGoAway(); + codec_callbacks_->onGoAway(error_code); } } @@ -155,9 +155,11 @@ class CodecClient : Logger::Loggable, } const Type type_; - ClientConnectionPtr codec_; - Network::ClientConnectionPtr connection_; + // The order of host_, connection_, and codec_ matter as during destruction each can refer to + // the previous, at least in tests. Upstream::HostDescriptionConstSharedPtr host_; + Network::ClientConnectionPtr connection_; + ClientConnectionPtr codec_; Event::TimerPtr idle_timer_; const absl::optional idle_timeout_; diff --git a/source/common/http/codec_helper.h b/source/common/http/codec_helper.h index 3cc6d5bd6580d..5128891e41055 100644 --- a/source/common/http/codec_helper.h +++ b/source/common/http/codec_helper.h @@ -1,11 +1,11 @@ #pragma once -#include - #include "envoy/http/codec.h" #include "common/common/assert.h" +#include "absl/container/inlined_vector.h" + namespace Envoy { namespace Http { @@ -54,12 +54,7 @@ class StreamCallbackHelper { bool local_end_stream_{}; protected: - StreamCallbackHelper() { - // Set space for 8 callbacks (64 bytes). - callbacks_.reserve(8); - } - - void addCallbacks_(StreamCallbacks& callbacks) { + void addCallbacksHelper(StreamCallbacks& callbacks) { ASSERT(!reset_callbacks_started_ && !local_end_stream_); callbacks_.push_back(&callbacks); for (uint32_t i = 0; i < high_watermark_callbacks_; ++i) { @@ -67,12 +62,12 @@ class StreamCallbackHelper { } } - void removeCallbacks_(StreamCallbacks& callbacks) { + void removeCallbacksHelper(StreamCallbacks& callbacks) { // For performance reasons we just clear the callback and do not resize the vector. // Reset callbacks scale with the number of filters per request and do not get added and // removed multiple times. // The vector may not be safely resized without making sure the run.*Callbacks() helper - // functions above still handle removeCallbacks_() calls mid-loop. + // functions above still handle removeCallbacksHelper() calls mid-loop. for (auto& callback : callbacks_) { if (callback == &callbacks) { callback = nullptr; @@ -82,7 +77,7 @@ class StreamCallbackHelper { } private: - std::vector callbacks_; + absl::InlinedVector callbacks_; bool reset_callbacks_started_{}; uint32_t high_watermark_callbacks_{}; }; diff --git a/source/common/http/codes.cc b/source/common/http/codes.cc index cf291af32dfba..37273856a1c60 100644 --- a/source/common/http/codes.cc +++ b/source/common/http/codes.cc @@ -34,10 +34,6 @@ CodeStatsImpl::CodeStatsImpl(Stats::SymbolTable& symbol_table) vcluster_(stat_name_pool_.add("vcluster")), vhost_(stat_name_pool_.add("vhost")), zone_(stat_name_pool_.add("zone")) { - for (auto& rc_stat_name : rc_stat_names_) { - rc_stat_name = nullptr; - } - // Pre-allocate response codes 200, 404, and 503, as those seem quite likely. // We don't pre-allocate all the HTTP codes because the first 127 allocations // are likely to be encoded in one byte, and we would rather spend those on @@ -180,18 +176,10 @@ Stats::StatName CodeStatsImpl::upstreamRqStatName(Code response_code) const { if (rc_index >= NumHttpCodes) { return upstream_rq_unknown_; } - std::atomic& atomic_ref = rc_stat_names_[rc_index]; - if (atomic_ref.load() == nullptr) { - absl::MutexLock lock(&mutex_); - - // Check again under lock as two threads might have raced to add a StatName - // for the same code. - if (atomic_ref.load() == nullptr) { - atomic_ref = stat_name_pool_.addReturningStorage( - absl::StrCat("upstream_rq_", enumToInt(response_code))); - } - } - return Stats::StatName(atomic_ref.load()); + return Stats::StatName(rc_stat_names_.get(rc_index, [this, response_code]() -> const uint8_t* { + return stat_name_pool_.addReturningStorage( + absl::StrCat("upstream_rq_", enumToInt(response_code))); + })); } std::string CodeUtility::groupStringForResponseCode(Code response_code) { diff --git a/source/common/http/codes.h b/source/common/http/codes.h index dcfa4e37df507..3957377aa2c2f 100644 --- a/source/common/http/codes.h +++ b/source/common/http/codes.h @@ -8,6 +8,7 @@ #include "envoy/http/header_map.h" #include "envoy/stats/scope.h" +#include "common/common/thread.h" #include "common/stats/symbol_table_impl.h" namespace Envoy { @@ -62,8 +63,7 @@ class CodeStatsImpl : public CodeStats { Stats::StatName upstreamRqGroup(Code response_code) const; Stats::StatName upstreamRqStatName(Code response_code) const; - mutable Stats::StatNamePool stat_name_pool_ ABSL_GUARDED_BY(mutex_); - mutable absl::Mutex mutex_; + mutable Stats::StatNamePool stat_name_pool_; Stats::SymbolTable& symbol_table_; const Stats::StatName canary_; @@ -108,7 +108,9 @@ class CodeStatsImpl : public CodeStats { static constexpr uint32_t NumHttpCodes = 500; static constexpr uint32_t HttpCodeOffset = 100; // code 100 is at index 0. - mutable std::atomic rc_stat_names_[NumHttpCodes]; + mutable Thread::AtomicPtrArray + rc_stat_names_; }; /** diff --git a/source/common/http/conn_manager_config.h b/source/common/http/conn_manager_config.h index 774b5e9f47c58..b67afc95a64c7 100644 --- a/source/common/http/conn_manager_config.h +++ b/source/common/http/conn_manager_config.h @@ -10,6 +10,7 @@ #include "envoy/type/v3/percent.pb.h" #include "common/http/date_provider.h" +#include "common/local_reply/local_reply.h" #include "common/network/utility.h" #include "common/stats/symbol_table_impl.h" @@ -245,6 +246,11 @@ class ConnectionManagerConfig { */ virtual bool preserveExternalRequestId() const PURE; + /** + * @return whether the x-request-id should always be set in the response. + */ + virtual bool alwaysSetRequestIdInResponse() const PURE; + /** * @return optional idle timeout for incoming connection manager connections. */ @@ -403,6 +409,12 @@ class ConnectionManagerConfig { */ virtual bool proxy100Continue() const PURE; + /** + * @return bool supplies if the HttpConnectionManager should handle invalid HTTP with a stream + * error or connection error. + */ + virtual bool streamErrorOnInvalidHttpMessaging() const PURE; + /** * @return supplies the http1 settings. */ @@ -419,12 +431,22 @@ class ConnectionManagerConfig { */ virtual bool shouldMergeSlashes() const PURE; + /** + * @return if the HttpConnectionManager should remove the port from host/authority header + */ + virtual bool shouldStripMatchingPort() const PURE; + /** * @return the action HttpConnectionManager should take when receiving client request * headers containing underscore characters. */ virtual envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headersWithUnderscoresAction() const PURE; + + /** + * @return LocalReply configuration which supplies mapping for local reply generated by Envoy. + */ + virtual const LocalReply::LocalReply& localReply() const PURE; }; } // namespace Http } // namespace Envoy diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index d6e4a55eced76..f00afb45cf7bd 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -11,6 +11,7 @@ #include "envoy/common/time.h" #include "envoy/event/dispatcher.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" +#include "envoy/http/header_map.h" #include "envoy/network/drain_decision.h" #include "envoy/router/router.h" #include "envoy/ssl/connection.h" @@ -30,13 +31,16 @@ #include "common/http/conn_manager_utility.h" #include "common/http/exception.h" #include "common/http/header_map_impl.h" +#include "common/http/header_utility.h" #include "common/http/headers.h" #include "common/http/http1/codec_impl.h" #include "common/http/http2/codec_impl.h" #include "common/http/path_utility.h" +#include "common/http/status.h" #include "common/http/utility.h" #include "common/network/utility.h" #include "common/router/config_impl.h" +#include "common/runtime/runtime_features.h" #include "common/runtime/runtime_impl.h" #include "common/stats/timespan_impl.h" @@ -98,11 +102,11 @@ ConnectionManagerImpl::generateListenerStats(const std::string& prefix, Stats::S ConnectionManagerImpl::ConnectionManagerImpl(ConnectionManagerConfig& config, const Network::DrainDecision& drain_close, - Runtime::RandomGenerator& random_generator, + Random::RandomGenerator& random_generator, Http::Context& http_context, Runtime::Loader& runtime, const LocalInfo::LocalInfo& local_info, Upstream::ClusterManager& cluster_manager, - Server::OverloadManager* overload_manager, + Server::OverloadManager& overload_manager, TimeSource& time_source) : config_(config), stats_(config_.stats()), conn_length_(new Stats::HistogramCompletableTimespanImpl( @@ -111,14 +115,10 @@ ConnectionManagerImpl::ConnectionManagerImpl(ConnectionManagerConfig& config, random_generator_(random_generator), http_context_(http_context), runtime_(runtime), local_info_(local_info), cluster_manager_(cluster_manager), listener_stats_(config_.listenerStats()), - overload_stop_accepting_requests_ref_( - overload_manager ? overload_manager->getThreadLocalOverloadState().getState( - Server::OverloadActionNames::get().StopAcceptingRequests) - : Server::OverloadManager::getInactiveState()), - overload_disable_keepalive_ref_( - overload_manager ? overload_manager->getThreadLocalOverloadState().getState( - Server::OverloadActionNames::get().DisableHttpKeepAlive) - : Server::OverloadManager::getInactiveState()), + overload_stop_accepting_requests_ref_(overload_manager.getThreadLocalOverloadState().getState( + Server::OverloadActionNames::get().StopAcceptingRequests)), + overload_disable_keepalive_ref_(overload_manager.getThreadLocalOverloadState().getState( + Server::OverloadActionNames::get().DisableHttpKeepAlive)), time_source_(time_source) {} const ResponseHeaderMap& ConnectionManagerImpl::continueHeader() { @@ -199,11 +199,12 @@ void ConnectionManagerImpl::doEndStream(ActiveStream& stream) { // explicitly nulls out response_encoder to avoid the downstream being notified of the // Envoy-internal stream instance being ended. if (stream.response_encoder_ != nullptr && - (!stream.state_.remote_complete_ || !stream.state_.codec_saw_local_complete_)) { + (!stream.filter_manager_.remoteComplete() || !stream.state_.codec_saw_local_complete_)) { // Indicate local is complete at this point so that if we reset during a continuation, we don't // raise further data or trailers. ENVOY_STREAM_LOG(debug, "doEndStream() resetting stream", stream); - stream.state_.local_complete_ = true; + // TODO(snowp): This call might not be necessary, try to clean up + remove setter function. + stream.filter_manager_.setLocalComplete(); stream.state_.codec_saw_local_complete_ = true; stream.response_encoder_->getStream().resetStream(StreamResetReason::LocalReset); reset_stream = true; @@ -218,15 +219,6 @@ void ConnectionManagerImpl::doEndStream(ActiveStream& stream) { } checkForDeferredClose(); - - // Reading may have been disabled for the non-multiplexing case, so enable it again. - // Also be sure to unwind any read-disable done by the prior downstream - // connection. - if (drain_state_ != DrainState::Closing && codec_->protocol() < Protocol::Http2) { - while (!read_callbacks_->connection().readEnabled()) { - read_callbacks_->connection().readDisable(false); - } - } } void ConnectionManagerImpl::doDeferredStreamDestroy(ActiveStream& stream) { @@ -238,19 +230,9 @@ void ConnectionManagerImpl::doDeferredStreamDestroy(ActiveStream& stream) { stream.stream_idle_timer_->disableTimer(); stream.stream_idle_timer_ = nullptr; } - stream.disarmRequestTimeout(); - - stream.state_.destroyed_ = true; - for (auto& filter : stream.decoder_filters_) { - filter->handle_->onDestroy(); - } + stream.filter_manager_.disarmRequestTimeout(); - for (auto& filter : stream.encoder_filters_) { - // Do not call on destroy twice for dual registered filters. - if (!filter->dual_filter_) { - filter->handle_->onDestroy(); - } - } + stream.filter_manager_.destroyFilters(); read_callbacks_->connection().dispatcher().deferredDelete(stream.removeFromList(streams_)); @@ -266,20 +248,20 @@ RequestDecoder& ConnectionManagerImpl::newStream(ResponseEncoder& response_encod } ENVOY_CONN_LOG(debug, "new stream", read_callbacks_->connection()); - ActiveStreamPtr new_stream(new ActiveStream(*this)); + ActiveStreamPtr new_stream(new ActiveStream(*this, response_encoder.getStream().bufferLimit())); new_stream->state_.is_internally_created_ = is_internally_created; new_stream->response_encoder_ = &response_encoder; new_stream->response_encoder_->getStream().addCallbacks(*new_stream); - new_stream->buffer_limit_ = new_stream->response_encoder_->getStream().bufferLimit(); + new_stream->response_encoder_->getStream().setFlushTimeout(new_stream->idle_timeout_ms_); // If the network connection is backed up, the stream should be made aware of it on creation. - // Both HTTP/1.x and HTTP/2 codecs handle this in StreamCallbackHelper::addCallbacks_. + // Both HTTP/1.x and HTTP/2 codecs handle this in StreamCallbackHelper::addCallbacksHelper. ASSERT(read_callbacks_->connection().aboveHighWatermark() == false || - new_stream->high_watermark_count_ > 0); - new_stream->moveIntoList(std::move(new_stream), streams_); + new_stream->filter_manager_.aboveHighWatermark()); + LinkedList::moveIntoList(std::move(new_stream), streams_); return **streams_.begin(); } -void ConnectionManagerImpl::handleCodecException(const char* error) { +void ConnectionManagerImpl::handleCodecError(absl::string_view error) { ENVOY_CONN_LOG(debug, "dispatch error: {}", read_callbacks_->connection(), error); read_callbacks_->connection().streamInfo().setResponseCodeDetails( absl::StrCat("codec error: ", error)); @@ -323,14 +305,15 @@ Network::FilterStatus ConnectionManagerImpl::onData(Buffer::Instance& data, bool do { redispatch = false; - try { - codec_->dispatch(data); - } catch (const FrameFloodException& e) { - handleCodecException(e.what()); + const Status status = codec_->dispatch(data); + + ASSERT(!isPrematureResponseError(status)); + if (isBufferFloodError(status)) { + handleCodecError(status.message()); return Network::FilterStatus::StopIteration; - } catch (const CodecProtocolException& e) { + } else if (isCodecProtocolError(status)) { stats_.named_.downstream_cx_protocol_error_.inc(); - handleCodecException(e.what()); + handleCodecError(status.message()); return Network::FilterStatus::StopIteration; } @@ -346,10 +329,6 @@ Network::FilterStatus ConnectionManagerImpl::onData(Buffer::Instance& data, bool data.length() > 0 && streams_.empty()) { redispatch = true; } - - if (!streams_.empty() && streams_.front()->state_.remote_complete_) { - read_callbacks_->connection().readDisable(true); - } } } while (redispatch); @@ -399,9 +378,9 @@ void ConnectionManagerImpl::resetAllStreams( // of the form: if parameter is nonempty, use that; else if the // codec details are nonempty, use those. This hack does not // seem better than the code duplication, so punt for now. - stream.stream_info_.setResponseFlag(response_flag.value()); + stream.filter_manager_.streamInfo().setResponseFlag(response_flag.value()); if (*response_flag == StreamInfo::ResponseFlag::DownstreamProtocolError) { - stream.stream_info_.setResponseCodeDetails( + stream.filter_manager_.streamInfo().setResponseCodeDetails( stream.response_encoder_->getStream().responseDetails()); } } @@ -413,12 +392,12 @@ void ConnectionManagerImpl::onEvent(Network::ConnectionEvent event) { stats_.named_.downstream_cx_destroy_local_.inc(); } - if (event == Network::ConnectionEvent::RemoteClose) { - stats_.named_.downstream_cx_destroy_remote_.inc(); - } - if (event == Network::ConnectionEvent::RemoteClose || event == Network::ConnectionEvent::LocalClose) { + if (event == Network::ConnectionEvent::RemoteClose) { + remote_close_ = true; + stats_.named_.downstream_cx_destroy_remote_.inc(); + } // TODO(mattklein123): It is technically possible that something outside of the filter causes // a local connection close, so we still guard against that here. A better solution would be to // have some type of "pre-close" callback that we could hook for cleanup that would get called @@ -474,7 +453,7 @@ void ConnectionManagerImpl::doConnectionClose( } } -void ConnectionManagerImpl::onGoAway() { +void ConnectionManagerImpl::onGoAway(GoAwayErrorCode) { // Currently we do nothing with remote go away frames. In the future we can decide to no longer // push resources if applicable. } @@ -537,13 +516,17 @@ void ConnectionManagerImpl::RdsRouteConfigUpdateRequester::requestRouteConfigUpd std::move(route_config_updated_cb)); } -ConnectionManagerImpl::ActiveStream::ActiveStream(ConnectionManagerImpl& connection_manager) +ConnectionManagerImpl::ActiveStream::ActiveStream(ConnectionManagerImpl& connection_manager, + uint32_t buffer_limit) : connection_manager_(connection_manager), + filter_manager_(*this, *this, buffer_limit, connection_manager_.config_.filterFactory(), + connection_manager_.config_.localReply(), + connection_manager_.codec_->protocol(), connection_manager_.timeSource(), + connection_manager_.read_callbacks_->connection().streamInfo().filterState(), + StreamInfo::FilterState::LifeSpan::Connection), stream_id_(connection_manager.random_generator_.random()), request_response_timespan_(new Stats::HistogramCompletableTimespanImpl( connection_manager_.stats_.named_.downstream_rq_time_, connection_manager_.timeSource())), - stream_info_(connection_manager_.codec_->protocol(), connection_manager_.timeSource(), - connection_manager.filterState()), upstream_options_(std::make_shared()) { ASSERT(!connection_manager.config_.isRoutable() || ((connection_manager.config_.routeConfigProvider() == nullptr && @@ -552,8 +535,12 @@ ConnectionManagerImpl::ActiveStream::ActiveStream(ConnectionManagerImpl& connect connection_manager.config_.scopedRouteConfigProvider() == nullptr)), "Either routeConfigProvider or scopedRouteConfigProvider should be set in " "ConnectionManagerImpl."); + for (const AccessLog::InstanceSharedPtr& access_log : connection_manager_.config_.accessLogs()) { + filter_manager_.addAccessLogHandler(access_log); + } - stream_info_.setRequestIDExtension(connection_manager.config_.requestIDExtension()); + filter_manager_.streamInfo().setRequestIDExtension( + connection_manager.config_.requestIDExtension()); if (connection_manager_.config_.isRoutable() && connection_manager.config_.routeConfigProvider() != nullptr) { @@ -577,18 +564,19 @@ ConnectionManagerImpl::ActiveStream::ActiveStream(ConnectionManagerImpl& connect } else { connection_manager_.stats_.named_.downstream_rq_http1_total_.inc(); } - stream_info_.setDownstreamLocalAddress( + filter_manager_.streamInfo().setDownstreamLocalAddress( connection_manager_.read_callbacks_->connection().localAddress()); - stream_info_.setDownstreamDirectRemoteAddress( + filter_manager_.streamInfo().setDownstreamDirectRemoteAddress( connection_manager_.read_callbacks_->connection().directRemoteAddress()); // Initially, the downstream remote address is the source address of the // downstream connection. That can change later in the request's lifecycle, // based on XFF processing, but setting the downstream remote address here // prevents surprises for logging code in edge cases. - stream_info_.setDownstreamRemoteAddress( + filter_manager_.streamInfo().setDownstreamRemoteAddress( connection_manager_.read_callbacks_->connection().remoteAddress()); - stream_info_.setDownstreamSslConnection(connection_manager_.read_callbacks_->connection().ssl()); + filter_manager_.streamInfo().setDownstreamSslConnection( + connection_manager_.read_callbacks_->connection().ssl()); if (connection_manager_.config_.streamIdleTimeout().count()) { idle_timeout_ms_ = connection_manager_.config_.streamIdleTimeout(); @@ -613,43 +601,58 @@ ConnectionManagerImpl::ActiveStream::ActiveStream(ConnectionManagerImpl& connect this); } - stream_info_.setRequestedServerName( + filter_manager_.streamInfo().setRequestedServerName( connection_manager_.read_callbacks_->connection().requestedServerName()); } ConnectionManagerImpl::ActiveStream::~ActiveStream() { - stream_info_.onRequestComplete(); + filter_manager_.streamInfo().onRequestComplete(); + Upstream::HostDescriptionConstSharedPtr upstream_host = + connection_manager_.read_callbacks_->upstreamHost(); + + if (upstream_host != nullptr) { + Upstream::ClusterRequestResponseSizeStatsOptRef req_resp_stats = + upstream_host->cluster().requestResponseSizeStats(); + if (req_resp_stats.has_value()) { + req_resp_stats->get().upstream_rq_body_size_.recordValue( + filter_manager_.streamInfo().bytesReceived()); + req_resp_stats->get().upstream_rs_body_size_.recordValue( + filter_manager_.streamInfo().bytesSent()); + } + } + // TODO(alyssawilk) this is not true. Fix. // A downstream disconnect can be identified for HTTP requests when the upstream returns with a 0 // response code and when no other response flags are set. - if (!stream_info_.hasAnyResponseFlag() && !stream_info_.responseCode()) { - stream_info_.setResponseFlag(StreamInfo::ResponseFlag::DownstreamConnectionTermination); + if (!filter_manager_.streamInfo().hasAnyResponseFlag() && + !filter_manager_.streamInfo().responseCode()) { + filter_manager_.streamInfo().setResponseFlag( + StreamInfo::ResponseFlag::DownstreamConnectionTermination); } - - connection_manager_.stats_.named_.downstream_rq_active_.dec(); - for (const AccessLog::InstanceSharedPtr& access_log : connection_manager_.config_.accessLogs()) { - access_log->log(request_headers_.get(), response_headers_.get(), response_trailers_.get(), - stream_info_); + if (connection_manager_.remote_close_) { + filter_manager_.streamInfo().setResponseCodeDetails( + StreamInfo::ResponseCodeDetails::get().DownstreamRemoteDisconnect); } - for (const auto& log_handler : access_log_handlers_) { - log_handler->log(request_headers_.get(), response_headers_.get(), response_trailers_.get(), - stream_info_); + + if (connection_manager_.codec_->protocol() < Protocol::Http2) { + // For HTTP/2 there are still some reset cases where details are not set. + // For HTTP/1 there shouldn't be any. Regression-proof this. + ASSERT(filter_manager_.streamInfo().responseCodeDetails().has_value()); } - if (stream_info_.healthCheck()) { + connection_manager_.stats_.named_.downstream_rq_active_.dec(); + if (filter_manager_.streamInfo().healthCheck()) { connection_manager_.config_.tracingStats().health_check_.inc(); } if (active_span_) { Tracing::HttpTracerUtility::finalizeDownstreamSpan( - *active_span_, request_headers_.get(), response_headers_.get(), response_trailers_.get(), - stream_info_, *this); + *active_span_, filter_manager_.requestHeaders(), filter_manager_.responseHeaders(), + filter_manager_.responseTrailers(), filter_manager_.streamInfo(), *this); } if (state_.successful_upgrade_) { connection_manager_.stats_.named_.downstream_cx_upgrades_active_.dec(); } - - ASSERT(state_.filter_call_state_ == 0); } void ConnectionManagerImpl::ActiveStream::resetIdleTimer() { @@ -664,59 +667,100 @@ void ConnectionManagerImpl::ActiveStream::resetIdleTimer() { void ConnectionManagerImpl::ActiveStream::onIdleTimeout() { connection_manager_.stats_.named_.downstream_rq_idle_timeout_.inc(); // If headers have not been sent to the user, send a 408. - if (response_headers_ != nullptr) { + if (filter_manager_.responseHeaders() != nullptr && + !Runtime::runtimeFeatureEnabled("envoy.reloadable_features.allow_response_for_timeout")) { // TODO(htuch): We could send trailers here with an x-envoy timeout header // or gRPC status code, and/or set H2 RST_STREAM error. + filter_manager_.streamInfo().setResponseCodeDetails( + StreamInfo::ResponseCodeDetails::get().StreamIdleTimeout); connection_manager_.doEndStream(*this); } else { - stream_info_.setResponseFlag(StreamInfo::ResponseFlag::StreamIdleTimeout); - sendLocalReply(request_headers_ != nullptr && - Grpc::Common::hasGrpcContentType(*request_headers_), - Http::Code::RequestTimeout, "stream timeout", nullptr, state_.is_head_request_, - absl::nullopt, StreamInfo::ResponseCodeDetails::get().StreamIdleTimeout); + // TODO(mattklein) this may result in multiple flags. This Ok? + filter_manager_.streamInfo().setResponseFlag(StreamInfo::ResponseFlag::StreamIdleTimeout); + sendLocalReply(filter_manager_.requestHeaders() != nullptr && + Grpc::Common::isGrpcRequestHeaders(*filter_manager_.requestHeaders()), + Http::Code::RequestTimeout, "stream timeout", nullptr, absl::nullopt, + StreamInfo::ResponseCodeDetails::get().StreamIdleTimeout); } } void ConnectionManagerImpl::ActiveStream::onRequestTimeout() { connection_manager_.stats_.named_.downstream_rq_timeout_.inc(); - sendLocalReply(request_headers_ != nullptr && Grpc::Common::hasGrpcContentType(*request_headers_), - Http::Code::RequestTimeout, "request timeout", nullptr, state_.is_head_request_, - absl::nullopt, StreamInfo::ResponseCodeDetails::get().RequestOverallTimeout); + sendLocalReply(filter_manager_.requestHeaders() != nullptr && + Grpc::Common::isGrpcRequestHeaders(*filter_manager_.requestHeaders()), + Http::Code::RequestTimeout, "request timeout", nullptr, absl::nullopt, + StreamInfo::ResponseCodeDetails::get().RequestOverallTimeout); } void ConnectionManagerImpl::ActiveStream::onStreamMaxDurationReached() { ENVOY_STREAM_LOG(debug, "Stream max duration time reached", *this); connection_manager_.stats_.named_.downstream_rq_max_duration_reached_.inc(); - connection_manager_.doEndStream(*this); + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.allow_response_for_timeout")) { + sendLocalReply(filter_manager_.requestHeaders() != nullptr && + Grpc::Common::isGrpcRequestHeaders(*filter_manager_.requestHeaders()), + Http::Code::RequestTimeout, "downstream duration timeout", nullptr, + absl::nullopt, StreamInfo::ResponseCodeDetails::get().MaxDurationTimeout); + } else { + filter_manager_.streamInfo().setResponseCodeDetails( + StreamInfo::ResponseCodeDetails::get().MaxDurationTimeout); + connection_manager_.doEndStream(*this); + } } -void ConnectionManagerImpl::ActiveStream::addStreamDecoderFilterWorker( +void ConnectionManagerImpl::FilterManager::addStreamDecoderFilterWorker( StreamDecoderFilterSharedPtr filter, bool dual_filter) { ActiveStreamDecoderFilterPtr wrapper(new ActiveStreamDecoderFilter(*this, filter, dual_filter)); filter->setDecoderFilterCallbacks(*wrapper); - wrapper->moveIntoListBack(std::move(wrapper), decoder_filters_); -} - -void ConnectionManagerImpl::ActiveStream::addStreamEncoderFilterWorker( + // Note: configured decoder filters are appended to decoder_filters_. + // This means that if filters are configured in the following order (assume all three filters are + // both decoder/encoder filters): + // http_filters: + // - A + // - B + // - C + // The decoder filter chain will iterate through filters A, B, C. + LinkedList::moveIntoListBack(std::move(wrapper), decoder_filters_); +} + +void ConnectionManagerImpl::FilterManager::addStreamEncoderFilterWorker( StreamEncoderFilterSharedPtr filter, bool dual_filter) { ActiveStreamEncoderFilterPtr wrapper(new ActiveStreamEncoderFilter(*this, filter, dual_filter)); filter->setEncoderFilterCallbacks(*wrapper); - wrapper->moveIntoList(std::move(wrapper), encoder_filters_); -} - -void ConnectionManagerImpl::ActiveStream::addAccessLogHandler( + // Note: configured encoder filters are prepended to encoder_filters_. + // This means that if filters are configured in the following order (assume all three filters are + // both decoder/encoder filters): + // http_filters: + // - A + // - B + // - C + // The encoder filter chain will iterate through filters C, B, A. + LinkedList::moveIntoList(std::move(wrapper), encoder_filters_); +} + +void ConnectionManagerImpl::FilterManager::addAccessLogHandler( AccessLog::InstanceSharedPtr handler) { access_log_handlers_.push_back(handler); } void ConnectionManagerImpl::ActiveStream::chargeStats(const ResponseHeaderMap& headers) { uint64_t response_code = Utility::getResponseStatus(headers); - stream_info_.response_code_ = response_code; + filter_manager_.streamInfo().response_code_ = response_code; - if (stream_info_.health_check_request_) { + if (filter_manager_.streamInfo().health_check_request_) { return; } + Upstream::HostDescriptionConstSharedPtr upstream_host = + connection_manager_.read_callbacks_->upstreamHost(); + + if (upstream_host != nullptr) { + Upstream::ClusterRequestResponseSizeStatsOptRef req_resp_stats = + upstream_host->cluster().requestResponseSizeStats(); + if (req_resp_stats.has_value()) { + req_resp_stats->get().upstream_rs_headers_size_.recordValue(headers.byteSize()); + } + } + connection_manager_.stats_.named_.downstream_rq_completed_.inc(); connection_manager_.listener_stats_.downstream_rq_completed_.inc(); if (CodeUtility::is1xx(response_code)) { @@ -741,6 +785,14 @@ const Network::Connection* ConnectionManagerImpl::ActiveStream::connection() { return &connection_manager_.read_callbacks_->connection(); } +uint32_t ConnectionManagerImpl::ActiveStream::localPort() { + auto ip = connection()->localAddress()->ip(); + if (ip == nullptr) { + return 0; + } + return ip->port(); +} + // Ordering in this function is complicated, but important. // // We want to do minimal work before selecting route and creating a filter @@ -751,20 +803,41 @@ const Network::Connection* ConnectionManagerImpl::ActiveStream::connection() { // can't route select properly without full headers), checking state required to // serve error responses (connection close, head requests, etc), and // modifications which may themselves affect route selection. -// -// TODO(alyssawilk) all the calls here should be audited for order priority, -// e.g. many early returns do not currently handle connection: close properly. void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& headers, bool end_stream) { ScopeTrackerScopeState scope(this, connection_manager_.read_callbacks_->connection().dispatcher()); - request_headers_ = std::move(headers); + filter_manager_.setRequestHeaders(std::move(headers)); + Upstream::HostDescriptionConstSharedPtr upstream_host = + connection_manager_.read_callbacks_->upstreamHost(); + + if (upstream_host != nullptr) { + Upstream::ClusterRequestResponseSizeStatsOptRef req_resp_stats = + upstream_host->cluster().requestResponseSizeStats(); + if (req_resp_stats.has_value()) { + req_resp_stats->get().upstream_rq_headers_size_.recordValue( + filter_manager_.requestHeaders()->byteSize()); + } + } - // TODO(alyssawilk) remove this synthetic path in a follow-up PR, including - // auditing of empty path headers. We check for path because HTTP/2 connect requests may have a - // path. - if (HeaderUtility::isConnect(*request_headers_) && !request_headers_->Path()) { - request_headers_->setPath("/"); + // Both saw_connection_close_ and is_head_request_ affect local replies: set + // them as early as possible. + const Protocol protocol = connection_manager_.codec_->protocol(); + const bool fixed_connection_close = + Runtime::runtimeFeatureEnabled("envoy.reloadable_features.fixed_connection_close"); + if (fixed_connection_close) { + state_.saw_connection_close_ = + HeaderUtility::shouldCloseConnection(protocol, *filter_manager_.requestHeaders()); + } + if (Http::Headers::get().MethodValues.Head == + filter_manager_.requestHeaders()->getMethodValue()) { + state_.is_head_request_ = true; + } + + if (HeaderUtility::isConnect(*filter_manager_.requestHeaders()) && + !filter_manager_.requestHeaders()->Path() && + !Runtime::runtimeFeatureEnabled("envoy.reloadable_features.stop_faking_paths")) { + filter_manager_.requestHeaders()->setPath("/"); } // We need to snap snapped_route_config_ here as it's used in mutateRequestHeaders later. @@ -780,151 +853,155 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he snapped_route_config_ = connection_manager_.config_.routeConfigProvider()->config(); } - if (Http::Headers::get().MethodValues.Head == - request_headers_->Method()->value().getStringView()) { - state_.is_head_request_ = true; - } ENVOY_STREAM_LOG(debug, "request headers complete (end_stream={}):\n{}", *this, end_stream, - *request_headers_); + *filter_manager_.requestHeaders()); // We end the decode here only if the request is header only. If we convert the request to a // header only, the stream will be marked as done once a subsequent decodeData/decodeTrailers is // called with end_stream=true. - maybeEndDecode(end_stream); + filter_manager_.maybeEndDecode(end_stream); // Drop new requests when overloaded as soon as we have decoded the headers. if (connection_manager_.overload_stop_accepting_requests_ref_ == Server::OverloadActionState::Active) { // In this one special case, do not create the filter chain. If there is a risk of memory // overload it is more important to avoid unnecessary allocation than to create the filters. - state_.created_filter_chain_ = true; + filter_manager_.skipFilterChainCreation(); connection_manager_.stats_.named_.downstream_rq_overload_close_.inc(); - sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), - Http::Code::ServiceUnavailable, "envoy overloaded", nullptr, - state_.is_head_request_, absl::nullopt, + sendLocalReply(Grpc::Common::isGrpcRequestHeaders(*filter_manager_.requestHeaders()), + Http::Code::ServiceUnavailable, "envoy overloaded", nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().Overload); return; } - if (!connection_manager_.config_.proxy100Continue() && request_headers_->Expect() && - request_headers_->Expect()->value() == Headers::get().ExpectValues._100Continue.c_str()) { + if (!connection_manager_.config_.proxy100Continue() && + filter_manager_.requestHeaders()->Expect() && + filter_manager_.requestHeaders()->Expect()->value() == + Headers::get().ExpectValues._100Continue.c_str()) { // Note in the case Envoy is handling 100-Continue complexity, it skips the filter chain // and sends the 100-Continue directly to the encoder. chargeStats(continueHeader()); response_encoder_->encode100ContinueHeaders(continueHeader()); // Remove the Expect header so it won't be handled again upstream. - request_headers_->removeExpect(); + filter_manager_.requestHeaders()->removeExpect(); } - connection_manager_.user_agent_.initializeFromHeaders(*request_headers_, + connection_manager_.user_agent_.initializeFromHeaders(*filter_manager_.requestHeaders(), connection_manager_.stats_.prefixStatName(), connection_manager_.stats_.scope_); // Make sure we are getting a codec version we support. - Protocol protocol = connection_manager_.codec_->protocol(); if (protocol == Protocol::Http10) { // Assume this is HTTP/1.0. This is fine for HTTP/0.9 but this code will also affect any // requests with non-standard version numbers (0.9, 1.3), basically anything which is not // HTTP/1.1. // // The protocol may have shifted in the HTTP/1.0 case so reset it. - stream_info_.protocol(protocol); + filter_manager_.streamInfo().protocol(protocol); if (!connection_manager_.config_.http1Settings().accept_http_10_) { // Send "Upgrade Required" if HTTP/1.0 support is not explicitly configured on. - sendLocalReply(false, Code::UpgradeRequired, "", nullptr, state_.is_head_request_, - absl::nullopt, StreamInfo::ResponseCodeDetails::get().LowVersion); + sendLocalReply(false, Code::UpgradeRequired, "", nullptr, absl::nullopt, + StreamInfo::ResponseCodeDetails::get().LowVersion); return; - } else { + } else if (!fixed_connection_close) { // HTTP/1.0 defaults to single-use connections. Make sure the connection // will be closed unless Keep-Alive is present. state_.saw_connection_close_ = true; - if (request_headers_->Connection() && - absl::EqualsIgnoreCase(request_headers_->Connection()->value().getStringView(), + if (absl::EqualsIgnoreCase(filter_manager_.requestHeaders()->getConnectionValue(), Http::Headers::get().ConnectionValues.KeepAlive)) { state_.saw_connection_close_ = false; } } - } - - if (!request_headers_->Host()) { - if ((protocol == Protocol::Http10) && + if (!filter_manager_.requestHeaders()->Host() && !connection_manager_.config_.http1Settings().default_host_for_http_10_.empty()) { // Add a default host if configured to do so. - request_headers_->setHost( + filter_manager_.requestHeaders()->setHost( connection_manager_.config_.http1Settings().default_host_for_http_10_); - } else { - // Require host header. For HTTP/1.1 Host has already been translated to :authority. - sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::BadRequest, "", - nullptr, state_.is_head_request_, absl::nullopt, - StreamInfo::ResponseCodeDetails::get().MissingHost); - return; } } + if (!filter_manager_.requestHeaders()->Host()) { + // Require host header. For HTTP/1.1 Host has already been translated to :authority. + sendLocalReply(Grpc::Common::hasGrpcContentType(*filter_manager_.requestHeaders()), + Code::BadRequest, "", nullptr, absl::nullopt, + StreamInfo::ResponseCodeDetails::get().MissingHost); + return; + } + // Verify header sanity checks which should have been performed by the codec. - ASSERT(HeaderUtility::requestHeadersValid(*request_headers_).has_value() == false); - - // Currently we only support relative paths at the application layer. We expect the codec to have - // broken the path into pieces if applicable. NOTE: Currently the HTTP/1.1 codec only does this - // when the allow_absolute_url flag is enabled on the HCM. - // https://tools.ietf.org/html/rfc7230#section-5.3 We also need to check for the existence of - // :path because CONNECT does not have a path, and we don't support that currently. - if (!request_headers_->Path() || request_headers_->Path()->value().getStringView().empty() || - request_headers_->Path()->value().getStringView()[0] != '/') { - const bool has_path = - request_headers_->Path() && !request_headers_->Path()->value().getStringView().empty(); + ASSERT(HeaderUtility::requestHeadersValid(*filter_manager_.requestHeaders()).has_value() == + false); + + // Check for the existence of the :path header for non-CONNECT requests, or present-but-empty + // :path header for CONNECT requests. We expect the codec to have broken the path into pieces if + // applicable. NOTE: Currently the HTTP/1.1 codec only does this when the allow_absolute_url flag + // is enabled on the HCM. + if ((!HeaderUtility::isConnect(*filter_manager_.requestHeaders()) || + filter_manager_.requestHeaders()->Path()) && + filter_manager_.requestHeaders()->getPathValue().empty()) { + sendLocalReply(Grpc::Common::hasGrpcContentType(*filter_manager_.requestHeaders()), + Code::NotFound, "", nullptr, absl::nullopt, + StreamInfo::ResponseCodeDetails::get().MissingPath); + return; + } + + // Currently we only support relative paths at the application layer. + if (!filter_manager_.requestHeaders()->getPathValue().empty() && + filter_manager_.requestHeaders()->getPathValue()[0] != '/') { connection_manager_.stats_.named_.downstream_rq_non_relative_path_.inc(); - sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::NotFound, "", nullptr, - state_.is_head_request_, absl::nullopt, - has_path ? StreamInfo::ResponseCodeDetails::get().AbsolutePath - : StreamInfo::ResponseCodeDetails::get().MissingPath); + sendLocalReply(Grpc::Common::hasGrpcContentType(*filter_manager_.requestHeaders()), + Code::NotFound, "", nullptr, absl::nullopt, + StreamInfo::ResponseCodeDetails::get().AbsolutePath); return; } // Path sanitization should happen before any path access other than the above sanity check. - if (!ConnectionManagerUtility::maybeNormalizePath(*request_headers_, + if (!ConnectionManagerUtility::maybeNormalizePath(*filter_manager_.requestHeaders(), connection_manager_.config_)) { - sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::BadRequest, "", - nullptr, state_.is_head_request_, absl::nullopt, + sendLocalReply(Grpc::Common::hasGrpcContentType(*filter_manager_.requestHeaders()), + Code::BadRequest, "", nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().PathNormalizationFailed); return; } - if (protocol == Protocol::Http11 && request_headers_->Connection() && - absl::EqualsIgnoreCase(request_headers_->Connection()->value().getStringView(), + ConnectionManagerUtility::maybeNormalizeHost(*filter_manager_.requestHeaders(), + connection_manager_.config_, localPort()); + + if (!fixed_connection_close && protocol == Protocol::Http11 && + absl::EqualsIgnoreCase(filter_manager_.requestHeaders()->getConnectionValue(), Http::Headers::get().ConnectionValues.Close)) { state_.saw_connection_close_ = true; } // Note: Proxy-Connection is not a standard header, but is supported here // since it is supported by http-parser the underlying parser for http // requests. - if (protocol < Protocol::Http2 && !state_.saw_connection_close_ && - request_headers_->ProxyConnection() && - absl::EqualsIgnoreCase(request_headers_->ProxyConnection()->value().getStringView(), + if (!fixed_connection_close && protocol < Protocol::Http2 && !state_.saw_connection_close_ && + absl::EqualsIgnoreCase(filter_manager_.requestHeaders()->getProxyConnectionValue(), Http::Headers::get().ConnectionValues.Close)) { state_.saw_connection_close_ = true; } if (!state_.is_internally_created_) { // Only sanitize headers on first pass. // Modify the downstream remote address depending on configuration and headers. - stream_info_.setDownstreamRemoteAddress(ConnectionManagerUtility::mutateRequestHeaders( - *request_headers_, connection_manager_.read_callbacks_->connection(), - connection_manager_.config_, *snapped_route_config_, connection_manager_.local_info_)); + filter_manager_.streamInfo().setDownstreamRemoteAddress( + ConnectionManagerUtility::mutateRequestHeaders( + *filter_manager_.requestHeaders(), connection_manager_.read_callbacks_->connection(), + connection_manager_.config_, *snapped_route_config_, connection_manager_.local_info_)); } - ASSERT(stream_info_.downstreamRemoteAddress() != nullptr); + ASSERT(filter_manager_.streamInfo().downstreamRemoteAddress() != nullptr); ASSERT(!cached_route_); refreshCachedRoute(); if (!state_.is_internally_created_) { // Only mutate tracing headers on first pass. ConnectionManagerUtility::mutateTracingRequestHeader( - *request_headers_, connection_manager_.runtime_, connection_manager_.config_, - cached_route_.value().get()); + *filter_manager_.requestHeaders(), connection_manager_.runtime_, + connection_manager_.config_, cached_route_.value().get()); } - stream_info_.setRequestHeaders(*request_headers_); + filter_manager_.streamInfo().setRequestHeaders(*filter_manager_.requestHeaders()); - const bool upgrade_rejected = createFilterChain() == false; + const bool upgrade_rejected = filter_manager_.createFilterChain() == false; // TODO if there are no filters when starting a filter iteration, the connection manager // should return 404. The current returns no response if there is no router filter. @@ -937,8 +1014,8 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he // contains a smuggled HTTP request. state_.saw_connection_close_ = true; connection_manager_.stats_.named_.downstream_rq_ws_on_non_ws_route_.inc(); - sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::Forbidden, "", - nullptr, state_.is_head_request_, absl::nullopt, + sendLocalReply(Grpc::Common::hasGrpcContentType(*filter_manager_.requestHeaders()), + Code::Forbidden, "", nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().UpgradeFailed); return; } @@ -948,7 +1025,10 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he if (hasCachedRoute()) { const Router::RouteEntry* route_entry = cached_route_.value()->routeEntry(); if (route_entry != nullptr && route_entry->idleTimeout()) { + // TODO(mattklein123): Technically if the cached route changes, we should also see if the + // route idle timeout has changed and update the value. idle_timeout_ms_ = route_entry->idleTimeout().value(); + response_encoder_->getStream().setFlushTimeout(idle_timeout_ms_); if (idle_timeout_ms_.count()) { // If we have a route-level idle timeout but no global stream idle timeout, create a timer. if (stream_idle_timer_ == nullptr) { @@ -970,20 +1050,20 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he traceRequest(); } - decodeHeaders(nullptr, *request_headers_, end_stream); + filter_manager_.decodeHeaders(*filter_manager_.requestHeaders(), end_stream); // Reset it here for both global and overridden cases. resetIdleTimer(); } void ConnectionManagerImpl::ActiveStream::traceRequest() { - Tracing::Decision tracing_decision = - Tracing::HttpTracerUtility::isTracing(stream_info_, *request_headers_); + Tracing::Decision tracing_decision = Tracing::HttpTracerUtility::isTracing( + filter_manager_.streamInfo(), *filter_manager_.requestHeaders()); ConnectionManagerImpl::chargeTracingStats(tracing_decision.reason, connection_manager_.config_.tracingStats()); - active_span_ = connection_manager_.tracer().startSpan(*this, *request_headers_, stream_info_, - tracing_decision); + active_span_ = connection_manager_.tracer().startSpan( + *this, *filter_manager_.requestHeaders(), filter_manager_.streamInfo(), tracing_decision); if (!active_span_) { return; @@ -1012,10 +1092,11 @@ void ConnectionManagerImpl::ActiveStream::traceRequest() { // propagation enabled) as a request header to enable the receiving service to use it in its // server span. if (decorated_operation_ && state_.decorated_propagate_) { - request_headers_->setEnvoyDecoratorOperation(*decorated_operation_); + filter_manager_.requestHeaders()->setEnvoyDecoratorOperation(*decorated_operation_); } } else { - const HeaderEntry* req_operation_override = request_headers_->EnvoyDecoratorOperation(); + const HeaderEntry* req_operation_override = + filter_manager_.requestHeaders()->EnvoyDecoratorOperation(); // For ingress (inbound) requests, if a decorator operation name has been provided, it // should be used to override the active span's operation. @@ -1028,14 +1109,27 @@ void ConnectionManagerImpl::ActiveStream::traceRequest() { decorated_operation_ = nullptr; } // Remove header so not propagated to service - request_headers_->removeEnvoyDecoratorOperation(); + filter_manager_.requestHeaders()->removeEnvoyDecoratorOperation(); } } } -void ConnectionManagerImpl::ActiveStream::decodeHeaders(ActiveStreamDecoderFilter* filter, - RequestHeaderMap& headers, - bool end_stream) { +void ConnectionManagerImpl::FilterManager::maybeContinueDecoding( + const std::list::iterator& continue_data_entry) { + if (continue_data_entry != decoder_filters_.end()) { + // We use the continueDecoding() code since it will correctly handle not calling + // decodeHeaders() again. Fake setting StopSingleIteration since the continueDecoding() code + // expects it. + ASSERT(buffered_request_data_); + (*continue_data_entry)->iteration_state_ = + ActiveStreamFilterBase::IterationState::StopSingleIteration; + (*continue_data_entry)->continueDecoding(); + } +} + +void ConnectionManagerImpl::FilterManager::decodeHeaders(ActiveStreamDecoderFilter* filter, + RequestHeaderMap& headers, + bool end_stream) { // Headers filter iteration should always start with the next filter if available. std::list::iterator entry = commonDecodePrefix(filter, FilterIterationStartState::AlwaysStartFromNext); @@ -1050,7 +1144,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(ActiveStreamDecoderFilte ASSERT(!(status == FilterHeadersStatus::ContinueAndEndStream && (*entry)->end_stream_)); state_.filter_call_state_ &= ~FilterCallState::DecodeHeaders; - ENVOY_STREAM_LOG(trace, "decode headers called: filter={} status={}", *this, + ENVOY_STREAM_LOG(trace, "decode headers called: filter={} status={}", active_stream_, static_cast((*entry).get()), static_cast(status)); const bool new_metadata_added = processNewlyAddedMetadata(); @@ -1059,8 +1153,9 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(ActiveStreamDecoderFilte // after the new metadata. if ((*entry)->end_stream_ && new_metadata_added && !buffered_request_data_) { Buffer::OwnedImpl empty_data(""); - ENVOY_STREAM_LOG( - trace, "inserting an empty data frame for end_stream due metadata being added.", *this); + ENVOY_STREAM_LOG(trace, + "inserting an empty data frame for end_stream due metadata being added.", + active_stream_); // Metadata frame doesn't carry end of stream bit. We need an empty data frame to end the // stream. addDecodedData(*((*entry).get()), empty_data, true); @@ -1072,6 +1167,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(ActiveStreamDecoderFilte // Stop iteration IFF this is not the last filter. If it is the last filter, continue with // processing since we need to handle the case where a terminal filter wants to buffer, but // a previous filter has added body. + maybeContinueDecoding(continue_data_entry); return; } @@ -1082,15 +1178,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(ActiveStreamDecoderFilte } } - if (continue_data_entry != decoder_filters_.end()) { - // We use the continueDecoding() code since it will correctly handle not calling - // decodeHeaders() again. Fake setting StopSingleIteration since the continueDecoding() code - // expects it. - ASSERT(buffered_request_data_); - (*continue_data_entry)->iteration_state_ = - ActiveStreamFilterBase::IterationState::StopSingleIteration; - (*continue_data_entry)->continueDecoding(); - } + maybeContinueDecoding(continue_data_entry); if (end_stream) { disarmRequestTimeout(); @@ -1100,18 +1188,19 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(ActiveStreamDecoderFilte void ConnectionManagerImpl::ActiveStream::decodeData(Buffer::Instance& data, bool end_stream) { ScopeTrackerScopeState scope(this, connection_manager_.read_callbacks_->connection().dispatcher()); - maybeEndDecode(end_stream); - stream_info_.addBytesReceived(data.length()); + filter_manager_.maybeEndDecode(end_stream); + filter_manager_.streamInfo().addBytesReceived(data.length()); - decodeData(nullptr, data, end_stream, FilterIterationStartState::CanStartFromCurrent); + filter_manager_.decodeData(data, end_stream); } -void ConnectionManagerImpl::ActiveStream::decodeData( +void ConnectionManagerImpl::FilterManager::decodeData( ActiveStreamDecoderFilter* filter, Buffer::Instance& data, bool end_stream, FilterIterationStartState filter_iteration_start_state) { - ScopeTrackerScopeState scope(this, - connection_manager_.read_callbacks_->connection().dispatcher()); - resetIdleTimer(); + ScopeTrackerScopeState scope( + &active_stream_, + active_stream_.connection_manager_.read_callbacks_->connection().dispatcher()); + active_stream_.resetIdleTimer(); // If we previously decided to decode only the headers, do nothing here. if (state_.decoding_headers_only_) { @@ -1189,7 +1278,7 @@ void ConnectionManagerImpl::ActiveStream::decodeData( if (end_stream) { state_.filter_call_state_ &= ~FilterCallState::LastDataFrame; } - ENVOY_STREAM_LOG(trace, "decode data called: filter={} status={}", *this, + ENVOY_STREAM_LOG(trace, "decode data called: filter={} status={}", active_stream_, static_cast((*entry).get()), static_cast(status)); processNewlyAddedMetadata(); @@ -1219,19 +1308,19 @@ void ConnectionManagerImpl::ActiveStream::decodeData( } } -RequestTrailerMap& ConnectionManagerImpl::ActiveStream::addDecodedTrailers() { +RequestTrailerMap& ConnectionManagerImpl::FilterManager::addDecodedTrailers() { // Trailers can only be added during the last data frame (i.e. end_stream = true). ASSERT(state_.filter_call_state_ & FilterCallState::LastDataFrame); // Trailers can only be added once. ASSERT(!request_trailers_); - request_trailers_ = std::make_unique(); + request_trailers_ = RequestTrailerMapImpl::create(); return *request_trailers_; } -void ConnectionManagerImpl::ActiveStream::addDecodedData(ActiveStreamDecoderFilter& filter, - Buffer::Instance& data, bool streaming) { +void ConnectionManagerImpl::FilterManager::addDecodedData(ActiveStreamDecoderFilter& filter, + Buffer::Instance& data, bool streaming) { if (state_.filter_call_state_ == 0 || (state_.filter_call_state_ & FilterCallState::DecodeHeaders) || (state_.filter_call_state_ & FilterCallState::DecodeData) || @@ -1252,7 +1341,7 @@ void ConnectionManagerImpl::ActiveStream::addDecodedData(ActiveStreamDecoderFilt } } -MetadataMapVector& ConnectionManagerImpl::ActiveStream::addDecodedMetadata() { +MetadataMapVector& ConnectionManagerImpl::FilterManager::addDecodedMetadata() { return *getRequestMetadataMapVector(); } @@ -1260,13 +1349,12 @@ void ConnectionManagerImpl::ActiveStream::decodeTrailers(RequestTrailerMapPtr&& ScopeTrackerScopeState scope(this, connection_manager_.read_callbacks_->connection().dispatcher()); resetIdleTimer(); - maybeEndDecode(true); - request_trailers_ = std::move(trailers); - decodeTrailers(nullptr, *request_trailers_); + filter_manager_.maybeEndDecode(true); + filter_manager_.decodeTrailers(std::move(trailers)); } -void ConnectionManagerImpl::ActiveStream::decodeTrailers(ActiveStreamDecoderFilter* filter, - RequestTrailerMap& trailers) { +void ConnectionManagerImpl::FilterManager::decodeTrailers(ActiveStreamDecoderFilter* filter, + RequestTrailerMap& trailers) { // If we previously decided to decode only the headers, do nothing here. if (state_.decoding_headers_only_) { return; @@ -1293,7 +1381,7 @@ void ConnectionManagerImpl::ActiveStream::decodeTrailers(ActiveStreamDecoderFilt (*entry)->handle_->decodeComplete(); (*entry)->end_stream_ = true; state_.filter_call_state_ &= ~FilterCallState::DecodeTrailers; - ENVOY_STREAM_LOG(trace, "decode trailers called: filter={} status={}", *this, + ENVOY_STREAM_LOG(trace, "decode trailers called: filter={} status={}", active_stream_, static_cast((*entry).get()), static_cast(status)); processNewlyAddedMetadata(); @@ -1310,11 +1398,11 @@ void ConnectionManagerImpl::ActiveStream::decodeMetadata(MetadataMapPtr&& metada // After going through filters, the ownership of metadata_map will be passed to terminal filter. // The terminal filter may encode metadata_map to the next hop immediately or store metadata_map // and encode later when connection pool is ready. - decodeMetadata(nullptr, *metadata_map); + filter_manager_.decodeMetadata(*metadata_map); } -void ConnectionManagerImpl::ActiveStream::decodeMetadata(ActiveStreamDecoderFilter* filter, - MetadataMap& metadata_map) { +void ConnectionManagerImpl::FilterManager::decodeMetadata(ActiveStreamDecoderFilter* filter, + MetadataMap& metadata_map) { // Filter iteration may start at the current filter. std::list::iterator entry = commonDecodePrefix(filter, FilterIterationStartState::CanStartFromCurrent); @@ -1331,29 +1419,29 @@ void ConnectionManagerImpl::ActiveStream::decodeMetadata(ActiveStreamDecoderFilt } FilterMetadataStatus status = (*entry)->handle_->decodeMetadata(metadata_map); - ENVOY_STREAM_LOG(trace, "decode metadata called: filter={} status={}, metadata: {}", *this, - static_cast((*entry).get()), static_cast(status), - metadata_map); + ENVOY_STREAM_LOG(trace, "decode metadata called: filter={} status={}, metadata: {}", + active_stream_, static_cast((*entry).get()), + static_cast(status), metadata_map); } } -void ConnectionManagerImpl::ActiveStream::maybeEndDecode(bool end_stream) { +void ConnectionManagerImpl::FilterManager::maybeEndDecode(bool end_stream) { ASSERT(!state_.remote_complete_); state_.remote_complete_ = end_stream; if (end_stream) { stream_info_.onLastDownstreamRxByteReceived(); - ENVOY_STREAM_LOG(debug, "request end stream", *this); + ENVOY_STREAM_LOG(debug, "request end stream", active_stream_); } } -void ConnectionManagerImpl::ActiveStream::disarmRequestTimeout() { - if (request_timer_) { - request_timer_->disableTimer(); +void ConnectionManagerImpl::FilterManager::disarmRequestTimeout() { + if (active_stream_.request_timer_) { + active_stream_.request_timer_->disableTimer(); } } std::list::iterator -ConnectionManagerImpl::ActiveStream::commonEncodePrefix( +ConnectionManagerImpl::FilterManager::commonEncodePrefix( ActiveStreamEncoderFilter* filter, bool end_stream, FilterIterationStartState filter_iteration_start_state) { // Only do base state setting on the initial call. Subsequent calls for filtering do not touch @@ -1374,7 +1462,7 @@ ConnectionManagerImpl::ActiveStream::commonEncodePrefix( } std::list::iterator -ConnectionManagerImpl::ActiveStream::commonDecodePrefix( +ConnectionManagerImpl::FilterManager::commonDecodePrefix( ActiveStreamDecoderFilter* filter, FilterIterationStartState filter_iteration_start_state) { if (!filter) { return decoder_filters_.begin(); @@ -1398,12 +1486,10 @@ void ConnectionManagerImpl::startDrainSequence() { } void ConnectionManagerImpl::ActiveStream::snapScopedRouteConfig() { - ASSERT(request_headers_ != nullptr, - "Try to snap scoped route config when there is no request headers."); - // NOTE: if a RDS subscription hasn't got a RouteConfiguration back, a Router::NullConfigImpl is // returned, in that case we let it pass. - snapped_route_config_ = snapped_scoped_routes_config_->getRouteConfig(*request_headers_); + snapped_route_config_ = + snapped_scoped_routes_config_->getRouteConfig(*filter_manager_.requestHeaders()); if (snapped_route_config_ == nullptr) { ENVOY_STREAM_LOG(trace, "can't find SRDS scope.", *this); // TODO(stevenzzzz): Consider to pass an error message to router filter, so that it can @@ -1412,29 +1498,32 @@ void ConnectionManagerImpl::ActiveStream::snapScopedRouteConfig() { } } -void ConnectionManagerImpl::ActiveStream::refreshCachedRoute() { +void ConnectionManagerImpl::ActiveStream::refreshCachedRoute() { refreshCachedRoute(nullptr); } + +void ConnectionManagerImpl::ActiveStream::refreshCachedRoute(const Router::RouteCallback& cb) { Router::RouteConstSharedPtr route; - if (request_headers_ != nullptr) { + if (filter_manager_.requestHeaders() != nullptr) { if (connection_manager_.config_.isRoutable() && connection_manager_.config_.scopedRouteConfigProvider() != nullptr) { // NOTE: re-select scope as well in case the scope key header has been changed by a filter. snapScopedRouteConfig(); } if (snapped_route_config_ != nullptr) { - route = snapped_route_config_->route(*request_headers_, stream_info_, stream_id_); + route = snapped_route_config_->route(cb, *filter_manager_.requestHeaders(), + filter_manager_.streamInfo(), stream_id_); } } - stream_info_.route_entry_ = route ? route->routeEntry() : nullptr; + filter_manager_.streamInfo().route_entry_ = route ? route->routeEntry() : nullptr; cached_route_ = std::move(route); - if (nullptr == stream_info_.route_entry_) { + if (nullptr == filter_manager_.streamInfo().route_entry_) { cached_cluster_info_ = nullptr; } else { - Upstream::ThreadLocalCluster* local_cluster = - connection_manager_.cluster_manager_.get(stream_info_.route_entry_->clusterName()); + Upstream::ThreadLocalCluster* local_cluster = connection_manager_.cluster_manager_.get( + filter_manager_.streamInfo().route_entry_->clusterName()); cached_cluster_info_ = (nullptr == local_cluster) ? nullptr : local_cluster->info(); } - stream_info_.setUpstreamClusterInfo(cached_cluster_info_.value()); + filter_manager_.streamInfo().setUpstreamClusterInfo(cached_cluster_info_.value()); refreshCachedTracingCustomTags(); } @@ -1465,9 +1554,8 @@ void ConnectionManagerImpl::ActiveStream::refreshCachedTracingCustomTags() { void ConnectionManagerImpl::ActiveStream::requestRouteConfigUpdate( Event::Dispatcher& thread_local_dispatcher, Http::RouteConfigUpdatedCallbackSharedPtr route_config_updated_cb) { - ASSERT(!request_headers_->Host()->value().empty()); - const auto& host_header = - absl::AsciiStrToLower(request_headers_->Host()->value().getStringView()); + ASSERT(!filter_manager_.requestHeaders()->Host()->value().empty()); + const auto& host_header = absl::AsciiStrToLower(filter_manager_.requestHeaders()->getHostValue()); route_config_update_requester_->requestRouteConfigUpdate(host_header, thread_local_dispatcher, std::move(route_config_updated_cb)); } @@ -1481,40 +1569,122 @@ absl::optional ConnectionManagerImpl::ActiveStream } void ConnectionManagerImpl::ActiveStream::sendLocalReply( + bool is_grpc_request, Code code, absl::string_view body, + const std::function& modify_headers, + const absl::optional grpc_status, absl::string_view details) { + const bool is_head_request = state_.is_head_request_; + filter_manager_.streamInfo().setResponseCodeDetails(details); + + // The BadRequest error code indicates there has been a messaging error. + if (Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.hcm_stream_error_on_invalid_message") && + !connection_manager_.config_.streamErrorOnInvalidHttpMessaging() && + code == Http::Code::BadRequest && connection_manager_.codec_->protocol() < Protocol::Http2) { + state_.saw_connection_close_ = true; + } + + if (filter_manager_.responseHeaders() == nullptr) { + // If the response has not started at all, send the response through the filter chain. + filter_manager_.sendLocalReplyViaFilterChain(is_grpc_request, code, body, modify_headers, + is_head_request, grpc_status, details); + } else if (!state_.non_100_response_headers_encoded_) { + ENVOY_STREAM_LOG(debug, "Sending local reply with details {} directly to the encoder", *this, + details); + // In this case, at least the header and possibly the body has started + // processing through the filter chain, but no non-informational headers + // have been sent downstream. To ensure that filters don't get their + // state machine screwed up, bypass the filter chain and send the local + // reply directly to the codec. + // + filter_manager_.sendDirectLocalReply(code, body, modify_headers, state_.is_head_request_, + grpc_status); + } else { + filter_manager_.streamInfo().setResponseCodeDetails(details); + // If we land in this branch, response headers have already been sent to the client. + // All we can do at this point is reset the stream. + ENVOY_STREAM_LOG(debug, "Resetting stream due to {}. Prior headers have already been sent", + *this, details); + connection_manager_.doEndStream(*this); + } +} + +void ConnectionManagerImpl::FilterManager::sendLocalReplyViaFilterChain( bool is_grpc_request, Code code, absl::string_view body, const std::function& modify_headers, bool is_head_request, const absl::optional grpc_status, absl::string_view details) { - ENVOY_STREAM_LOG(debug, "Sending local reply with details {}", *this, details); + ENVOY_STREAM_LOG(debug, "Sending local reply with details {}", active_stream_, details); ASSERT(response_headers_ == nullptr); // For early error handling, do a best-effort attempt to create a filter chain - // to ensure access logging. - if (!state_.created_filter_chain_) { - createFilterChain(); - } - stream_info_.setResponseCodeDetails(details); + // to ensure access logging. If the filter chain already exists this will be + // a no-op. + createFilterChain(); + Utility::sendLocalReply( - is_grpc_request, - [this, modify_headers](ResponseHeaderMapPtr&& headers, bool end_stream) -> void { - if (modify_headers != nullptr) { - modify_headers(*headers); - } - response_headers_ = std::move(headers); - // TODO: Start encoding from the last decoder filter that saw the - // request instead. - encodeHeaders(nullptr, *response_headers_, end_stream); - }, - [this](Buffer::Instance& data, bool end_stream) -> void { - // TODO: Start encoding from the last decoder filter that saw the - // request instead. - encodeData(nullptr, data, end_stream, FilterIterationStartState::CanStartFromCurrent); - }, - state_.destroyed_, code, body, grpc_status, is_head_request); + state_.destroyed_, + Utility::EncodeFunctions{ + [this](ResponseHeaderMap& response_headers, Code& code, std::string& body, + absl::string_view& content_type) -> void { + active_stream_.connection_manager_.config_.localReply().rewrite( + request_headers_.get(), response_headers, stream_info_, code, body, content_type); + }, + [this, modify_headers](ResponseHeaderMapPtr&& headers, bool end_stream) -> void { + if (modify_headers != nullptr) { + modify_headers(*headers); + } + response_headers_ = std::move(headers); + // TODO: Start encoding from the last decoder filter that saw the + // request instead. + encodeHeaders(nullptr, *response_headers_, end_stream); + }, + [this](Buffer::Instance& data, bool end_stream) -> void { + // TODO: Start encoding from the last decoder filter that saw the + // request instead. + encodeData(nullptr, data, end_stream, + FilterManager::FilterIterationStartState::CanStartFromCurrent); + }}, + Utility::LocalReplyData{is_grpc_request, code, body, grpc_status, is_head_request}); +} + +void ConnectionManagerImpl::FilterManager::sendDirectLocalReply( + Code code, absl::string_view body, + const std::function& modify_headers, bool is_head_request, + const absl::optional grpc_status) { + // Make sure we won't end up with nested watermark calls from the body buffer. + state_.encoder_filters_streaming_ = true; + Http::Utility::sendLocalReply( + state_.destroyed_, + Utility::EncodeFunctions{ + [&](ResponseHeaderMap& response_headers, Code& code, std::string& body, + absl::string_view& content_type) -> void { + local_reply_.rewrite(request_headers_.get(), response_headers, stream_info_, code, body, + content_type); + }, + [&](ResponseHeaderMapPtr&& response_headers, bool end_stream) -> void { + if (modify_headers != nullptr) { + modify_headers(*response_headers); + } + + // Move the response headers into the FilterManager to make sure they're visible to + // access logs. + response_headers_ = std::move(response_headers); + filter_manager_callbacks_.encodeHeaders(*response_headers_, end_stream); + maybeEndEncode(end_stream); + }, + [&](Buffer::Instance& data, bool end_stream) -> void { + filter_manager_callbacks_.encodeData(data, end_stream); + maybeEndEncode(end_stream); + }}, + Utility::LocalReplyData{Grpc::Common::hasGrpcContentType(*request_headers_), code, body, + grpc_status, is_head_request}); + maybeEndEncode(state_.local_complete_); } -void ConnectionManagerImpl::ActiveStream::encode100ContinueHeaders( +void ConnectionManagerImpl::FilterManager::encode100ContinueHeaders( ActiveStreamEncoderFilter* filter, ResponseHeaderMap& headers) { - resetIdleTimer(); - ASSERT(connection_manager_.config_.proxy100Continue()); + active_stream_.resetIdleTimer(); + ASSERT(active_stream_.connection_manager_.config_.proxy100Continue()); + // The caller must guarantee that encode100ContinueHeaders() is invoked at most once. + ASSERT(!state_.has_continue_headers_ || filter != nullptr); // Make sure commonContinue continues encode100ContinueHeaders. state_.has_continue_headers_ = true; @@ -1530,32 +1700,54 @@ void ConnectionManagerImpl::ActiveStream::encode100ContinueHeaders( state_.filter_call_state_ |= FilterCallState::Encode100ContinueHeaders; FilterHeadersStatus status = (*entry)->handle_->encode100ContinueHeaders(headers); state_.filter_call_state_ &= ~FilterCallState::Encode100ContinueHeaders; - ENVOY_STREAM_LOG(trace, "encode 100 continue headers called: filter={} status={}", *this, - static_cast((*entry).get()), static_cast(status)); + ENVOY_STREAM_LOG(trace, "encode 100 continue headers called: filter={} status={}", + active_stream_, static_cast((*entry).get()), + static_cast(status)); if (!(*entry)->commonHandleAfter100ContinueHeadersCallback(status)) { return; } } + filter_manager_callbacks_.encode100ContinueHeaders(headers); +} + +void ConnectionManagerImpl::ActiveStream::encode100ContinueHeaders( + ResponseHeaderMap& response_headers) { // Strip the T-E headers etc. Defer other header additions as well as drain-close logic to the // continuation headers. - ConnectionManagerUtility::mutateResponseHeaders(headers, request_headers_.get(), - connection_manager_.config_.requestIDExtension(), - EMPTY_STRING); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, + filter_manager_.requestHeaders(), + connection_manager_.config_, EMPTY_STRING); // Count both the 1xx and follow-up response code in stats. - chargeStats(headers); + chargeStats(response_headers); - ENVOY_STREAM_LOG(debug, "encoding 100 continue headers via codec:\n{}", *this, headers); + ENVOY_STREAM_LOG(debug, "encoding 100 continue headers via codec:\n{}", *this, response_headers); // Now actually encode via the codec. - response_encoder_->encode100ContinueHeaders(headers); + response_encoder_->encode100ContinueHeaders(response_headers); } -void ConnectionManagerImpl::ActiveStream::encodeHeaders(ActiveStreamEncoderFilter* filter, - ResponseHeaderMap& headers, - bool end_stream) { - resetIdleTimer(); +void ConnectionManagerImpl::FilterManager::maybeContinueEncoding( + const std::list::iterator& continue_data_entry) { + if (continue_data_entry != encoder_filters_.end()) { + // We use the continueEncoding() code since it will correctly handle not calling + // encodeHeaders() again. Fake setting StopSingleIteration since the continueEncoding() code + // expects it. + ASSERT(buffered_response_data_); + (*continue_data_entry)->iteration_state_ = + ActiveStreamFilterBase::IterationState::StopSingleIteration; + (*continue_data_entry)->continueEncoding(); + } +} + +void ConnectionManagerImpl::FilterManager::encodeHeaders(ActiveStreamEncoderFilter* filter, + ResponseHeaderMap& headers, + bool end_stream) { + // See encodeHeaders() comments in include/envoy/http/filter.h for why the 1xx precondition holds. + ASSERT(!CodeUtility::is1xx(Utility::getResponseStatus(headers)) || + Utility::getResponseStatus(headers) == enumToInt(Http::Code::SwitchingProtocols)); + active_stream_.resetIdleTimer(); disarmRequestTimeout(); // Headers filter iteration should always start with the next filter if available. @@ -1573,7 +1765,7 @@ void ConnectionManagerImpl::ActiveStream::encodeHeaders(ActiveStreamEncoderFilte (*entry)->handle_->encodeComplete(); } state_.filter_call_state_ &= ~FilterCallState::EncodeHeaders; - ENVOY_STREAM_LOG(trace, "encode headers called: filter={} status={}", *this, + ENVOY_STREAM_LOG(trace, "encode headers called: filter={} status={}", active_stream_, static_cast((*entry).get()), static_cast(status)); (*entry)->encode_headers_called_ = true; @@ -1587,6 +1779,9 @@ void ConnectionManagerImpl::ActiveStream::encodeHeaders(ActiveStreamEncoderFilte } if (!continue_iteration) { + if (!(*entry)->end_stream_) { + maybeContinueEncoding(continue_data_entry); + } return; } @@ -1599,23 +1794,29 @@ void ConnectionManagerImpl::ActiveStream::encodeHeaders(ActiveStreamEncoderFilte const bool modified_end_stream = state_.encoding_headers_only_ || (end_stream && continue_data_entry == encoder_filters_.end()); - encodeHeadersInternal(headers, modified_end_stream); + filter_manager_callbacks_.encodeHeaders(headers, modified_end_stream); + maybeEndEncode(modified_end_stream); - if (continue_data_entry != encoder_filters_.end() && !modified_end_stream) { - // We use the continueEncoding() code since it will correctly handle not calling - // encodeHeaders() again. Fake setting StopSingleIteration since the continueEncoding() code - // expects it. - ASSERT(buffered_response_data_); - (*continue_data_entry)->iteration_state_ = - ActiveStreamFilterBase::IterationState::StopSingleIteration; - (*continue_data_entry)->continueEncoding(); + if (!modified_end_stream) { + maybeContinueEncoding(continue_data_entry); } } -void ConnectionManagerImpl::ActiveStream::encodeHeadersInternal(ResponseHeaderMap& headers, - bool end_stream) { +void ConnectionManagerImpl::ActiveStream::encodeHeaders(ResponseHeaderMap& headers, + bool end_stream) { // Base headers. - connection_manager_.config_.dateProvider().setDateHeader(headers); + + // By default, always preserve the upstream date response header if present. If we choose to + // overwrite the upstream date unconditionally (a previous behavior), only do so if the response + // is not from cache + const bool should_preserve_upstream_date = + Runtime::runtimeFeatureEnabled("envoy.reloadable_features.preserve_upstream_date") || + filter_manager_.streamInfo().hasResponseFlag( + StreamInfo::ResponseFlag::ResponseFromCacheFilter); + if (!should_preserve_upstream_date || !headers.Date()) { + connection_manager_.config_.dateProvider().setDateHeader(headers); + } + // Following setReference() is safe because serverName() is constant for the life of the listener. const auto transformation = connection_manager_.config_.serverHeaderTransformation(); if (transformation == ConnectionManagerConfig::HttpConnectionManagerProto::OVERWRITE || @@ -1623,8 +1824,8 @@ void ConnectionManagerImpl::ActiveStream::encodeHeadersInternal(ResponseHeaderMa headers.Server() == nullptr)) { headers.setReferenceServer(connection_manager_.config_.serverName()); } - ConnectionManagerUtility::mutateResponseHeaders(headers, request_headers_.get(), - connection_manager_.config_.requestIDExtension(), + ConnectionManagerUtility::mutateResponseHeaders(headers, filter_manager_.requestHeaders(), + connection_manager_.config_, connection_manager_.config_.via()); // See if we want to drain/close the connection. Send the go away frame prior to encoding the @@ -1668,7 +1869,7 @@ void ConnectionManagerImpl::ActiveStream::encodeHeadersInternal(ResponseHeaderMa // If we are destroying a stream before remote is complete and the connection does not support // multiplexing, we should disconnect since we don't want to wait around for the request to // finish. - if (!state_.remote_complete_) { + if (!filter_manager_.remoteComplete()) { if (connection_manager_.codec_->protocol() < Protocol::Http2) { connection_manager_.drain_state_ = DrainState::Closing; } @@ -1679,9 +1880,11 @@ void ConnectionManagerImpl::ActiveStream::encodeHeadersInternal(ResponseHeaderMa if (connection_manager_.drain_state_ != DrainState::NotDraining && connection_manager_.codec_->protocol() < Protocol::Http2) { // If the connection manager is draining send "Connection: Close" on HTTP/1.1 connections. - // Do not do this for H2 (which drains via GOAWAY) or Upgrade (as the upgrade + // Do not do this for H2 (which drains via GOAWAY) or Upgrade or CONNECT (as the // payload is no longer HTTP/1.1) - if (!Utility::isUpgrade(headers)) { + if (!Utility::isUpgrade(headers) && + !HeaderUtility::isConnectResponse(filter_manager_.requestHeaders(), + *filter_manager_.responseHeaders())) { headers.setReferenceConnection(Headers::get().ConnectionValues.Close); } } @@ -1712,20 +1915,21 @@ void ConnectionManagerImpl::ActiveStream::encodeHeadersInternal(ResponseHeaderMa } } + // 100-continue headers are handled via encode100ContinueHeaders. + state_.non_100_response_headers_encoded_ = true; chargeStats(headers); ENVOY_STREAM_LOG(debug, "encoding headers via codec (end_stream={}):\n{}", *this, end_stream, headers); // Now actually encode via the codec. - stream_info_.onFirstDownstreamTxByteSent(); + filter_manager_.streamInfo().onFirstDownstreamTxByteSent(); response_encoder_->encodeHeaders(headers, end_stream); - maybeEndEncode(end_stream); } -void ConnectionManagerImpl::ActiveStream::encodeMetadata(ActiveStreamEncoderFilter* filter, - MetadataMapPtr&& metadata_map_ptr) { - resetIdleTimer(); +void ConnectionManagerImpl::FilterManager::encodeMetadata(ActiveStreamEncoderFilter* filter, + MetadataMapPtr&& metadata_map_ptr) { + active_stream_.resetIdleTimer(); std::list::iterator entry = commonEncodePrefix(filter, false, FilterIterationStartState::CanStartFromCurrent); @@ -1741,33 +1945,39 @@ void ConnectionManagerImpl::ActiveStream::encodeMetadata(ActiveStreamEncoderFilt } FilterMetadataStatus status = (*entry)->handle_->encodeMetadata(*metadata_map_ptr); - ENVOY_STREAM_LOG(trace, "encode metadata called: filter={} status={}", *this, + ENVOY_STREAM_LOG(trace, "encode metadata called: filter={} status={}", active_stream_, static_cast((*entry).get()), static_cast(status)); } // TODO(soya3129): update stats with metadata. // Now encode metadata via the codec. if (!metadata_map_ptr->empty()) { - ENVOY_STREAM_LOG(debug, "encoding metadata via codec:\n{}", *this, *metadata_map_ptr); MetadataMapVector metadata_map_vector; metadata_map_vector.emplace_back(std::move(metadata_map_ptr)); - response_encoder_->encodeMetadata(metadata_map_vector); + filter_manager_callbacks_.encodeMetadata(metadata_map_vector); } } -ResponseTrailerMap& ConnectionManagerImpl::ActiveStream::addEncodedTrailers() { +ResponseTrailerMap& ConnectionManagerImpl::FilterManager::addEncodedTrailers() { // Trailers can only be added during the last data frame (i.e. end_stream = true). ASSERT(state_.filter_call_state_ & FilterCallState::LastDataFrame); // Trailers can only be added once. ASSERT(!response_trailers_); - response_trailers_ = std::make_unique(); + response_trailers_ = ResponseTrailerMapImpl::create(); return *response_trailers_; } -void ConnectionManagerImpl::ActiveStream::addEncodedData(ActiveStreamEncoderFilter& filter, - Buffer::Instance& data, bool streaming) { +void ConnectionManagerImpl::FilterManager::sendLocalReply( + bool is_grpc_request, Code code, absl::string_view body, + const std::function& modify_headers, + const absl::optional grpc_status, absl::string_view details) { + active_stream_.sendLocalReply(is_grpc_request, code, body, modify_headers, grpc_status, details); +} + +void ConnectionManagerImpl::FilterManager::addEncodedData(ActiveStreamEncoderFilter& filter, + Buffer::Instance& data, bool streaming) { if (state_.filter_call_state_ == 0 || (state_.filter_call_state_ & FilterCallState::EncodeHeaders) || (state_.filter_call_state_ & FilterCallState::EncodeData) || @@ -1788,10 +1998,10 @@ void ConnectionManagerImpl::ActiveStream::addEncodedData(ActiveStreamEncoderFilt } } -void ConnectionManagerImpl::ActiveStream::encodeData( +void ConnectionManagerImpl::FilterManager::encodeData( ActiveStreamEncoderFilter* filter, Buffer::Instance& data, bool end_stream, FilterIterationStartState filter_iteration_start_state) { - resetIdleTimer(); + active_stream_.resetIdleTimer(); // If we previously decided to encode only the headers, do nothing here. if (state_.encoding_headers_only_) { @@ -1835,7 +2045,7 @@ void ConnectionManagerImpl::ActiveStream::encodeData( if (end_stream) { state_.filter_call_state_ &= ~FilterCallState::LastDataFrame; } - ENVOY_STREAM_LOG(trace, "encode data called: filter={} status={}", *this, + ENVOY_STREAM_LOG(trace, "encode data called: filter={} status={}", active_stream_, static_cast((*entry).get()), static_cast(status)); if (!trailers_exists_at_start && response_trailers_ && @@ -1849,7 +2059,9 @@ void ConnectionManagerImpl::ActiveStream::encodeData( } const bool modified_end_stream = end_stream && trailers_added_entry == encoder_filters_.end(); - encodeDataInternal(data, modified_end_stream); + ASSERT(!state_.encoding_headers_only_); + filter_manager_callbacks_.encodeData(data, modified_end_stream); + maybeEndEncode(modified_end_stream); // If trailers were adding during encodeData we need to trigger decodeTrailers in order // to allow filters to process the trailers. @@ -1858,20 +2070,44 @@ void ConnectionManagerImpl::ActiveStream::encodeData( } } -void ConnectionManagerImpl::ActiveStream::encodeDataInternal(Buffer::Instance& data, - bool end_stream) { - ASSERT(!state_.encoding_headers_only_); +void ConnectionManagerImpl::ActiveStream::encodeData(Buffer::Instance& data, bool end_stream) { ENVOY_STREAM_LOG(trace, "encoding data via codec (size={} end_stream={})", *this, data.length(), end_stream); - stream_info_.addBytesSent(data.length()); + filter_manager_.streamInfo().addBytesSent(data.length()); response_encoder_->encodeData(data, end_stream); - maybeEndEncode(end_stream); } -void ConnectionManagerImpl::ActiveStream::encodeTrailers(ActiveStreamEncoderFilter* filter, - ResponseTrailerMap& trailers) { - resetIdleTimer(); +void ConnectionManagerImpl::ActiveStream::encodeTrailers(ResponseTrailerMap& trailers) { + ENVOY_STREAM_LOG(debug, "encoding trailers via codec:\n{}", *this, trailers); + + response_encoder_->encodeTrailers(trailers); +} + +void ConnectionManagerImpl::ActiveStream::encodeMetadata(MetadataMapVector& metadata) { + ENVOY_STREAM_LOG(debug, "encoding metadata via codec:\n{}", *this, metadata); + response_encoder_->encodeMetadata(metadata); +} + +void ConnectionManagerImpl::ActiveStream::onDecoderFilterBelowWriteBufferLowWatermark() { + ENVOY_STREAM_LOG(debug, "Read-enabling downstream stream due to filter callbacks.", *this); + // If the state is destroyed, the codec's stream is already torn down. On + // teardown the codec will unwind any remaining read disable calls. + if (!filter_manager_.destroyed()) { + response_encoder_->getStream().readDisable(false); + } + connection_manager_.stats_.named_.downstream_flow_control_resumed_reading_total_.inc(); +} + +void ConnectionManagerImpl::ActiveStream::onDecoderFilterAboveWriteBufferHighWatermark() { + ENVOY_STREAM_LOG(debug, "Read-disabling downstream stream due to filter callbacks.", *this); + response_encoder_->getStream().readDisable(true); + connection_manager_.stats_.named_.downstream_flow_control_paused_reading_total_.inc(); +} + +void ConnectionManagerImpl::FilterManager::encodeTrailers(ActiveStreamEncoderFilter* filter, + ResponseTrailerMap& trailers) { + active_stream_.resetIdleTimer(); // If we previously decided to encode only the headers, do nothing here. if (state_.encoding_headers_only_) { @@ -1892,30 +2128,24 @@ void ConnectionManagerImpl::ActiveStream::encodeTrailers(ActiveStreamEncoderFilt (*entry)->handle_->encodeComplete(); (*entry)->end_stream_ = true; state_.filter_call_state_ &= ~FilterCallState::EncodeTrailers; - ENVOY_STREAM_LOG(trace, "encode trailers called: filter={} status={}", *this, + ENVOY_STREAM_LOG(trace, "encode trailers called: filter={} status={}", active_stream_, static_cast((*entry).get()), static_cast(status)); if (!(*entry)->commonHandleAfterTrailersCallback(status)) { return; } } - ENVOY_STREAM_LOG(debug, "encoding trailers via codec:\n{}", *this, trailers); - - response_encoder_->encodeTrailers(trailers); + filter_manager_callbacks_.encodeTrailers(trailers); maybeEndEncode(true); } -void ConnectionManagerImpl::ActiveStream::maybeEndEncode(bool end_stream) { +void ConnectionManagerImpl::FilterManager::maybeEndEncode(bool end_stream) { if (end_stream) { - ASSERT(!state_.codec_saw_local_complete_); - state_.codec_saw_local_complete_ = true; - stream_info_.onLastDownstreamTxByteSent(); - request_response_timespan_->complete(); - connection_manager_.doEndStream(*this); + filter_manager_callbacks_.endStream(); } } -bool ConnectionManagerImpl::ActiveStream::processNewlyAddedMetadata() { +bool ConnectionManagerImpl::FilterManager::processNewlyAddedMetadata() { if (request_metadata_map_vector_ == nullptr) { return false; } @@ -1926,9 +2156,9 @@ bool ConnectionManagerImpl::ActiveStream::processNewlyAddedMetadata() { return true; } -bool ConnectionManagerImpl::ActiveStream::handleDataIfStopAll(ActiveStreamFilterBase& filter, - Buffer::Instance& data, - bool& filter_streaming) { +bool ConnectionManagerImpl::FilterManager::handleDataIfStopAll(ActiveStreamFilterBase& filter, + Buffer::Instance& data, + bool& filter_streaming) { if (filter.stoppedAll()) { ASSERT(!filter.canIterate()); filter_streaming = @@ -1953,19 +2183,19 @@ void ConnectionManagerImpl::ActiveStream::onResetStream(StreamResetReason, absl: // DownstreamProtocolError and propagate the details upwards. const absl::string_view encoder_details = response_encoder_->getStream().responseDetails(); if (!encoder_details.empty()) { - stream_info_.setResponseFlag(StreamInfo::ResponseFlag::DownstreamProtocolError); - stream_info_.setResponseCodeDetails(encoder_details); + filter_manager_.streamInfo().setResponseFlag(StreamInfo::ResponseFlag::DownstreamProtocolError); + filter_manager_.streamInfo().setResponseCodeDetails(encoder_details); } } void ConnectionManagerImpl::ActiveStream::onAboveWriteBufferHighWatermark() { ENVOY_STREAM_LOG(debug, "Disabling upstream stream due to downstream stream watermark.", *this); - callHighWatermarkCallbacks(); + filter_manager_.callHighWatermarkCallbacks(); } void ConnectionManagerImpl::ActiveStream::onBelowWriteBufferLowWatermark() { ENVOY_STREAM_LOG(debug, "Enabling upstream stream due to downstream stream watermark.", *this); - callLowWatermarkCallbacks(); + filter_manager_.callLowWatermarkCallbacks(); } Tracing::OperationName ConnectionManagerImpl::ActiveStream::operationName() const { @@ -1984,14 +2214,14 @@ uint32_t ConnectionManagerImpl::ActiveStream::maxPathTagLength() const { return connection_manager_.config_.tracingConfig()->max_path_tag_length_; } -void ConnectionManagerImpl::ActiveStream::callHighWatermarkCallbacks() { +void ConnectionManagerImpl::FilterManager::callHighWatermarkCallbacks() { ++high_watermark_count_; for (auto watermark_callbacks : watermark_callbacks_) { watermark_callbacks->onAboveWriteBufferHighWatermark(); } } -void ConnectionManagerImpl::ActiveStream::callLowWatermarkCallbacks() { +void ConnectionManagerImpl::FilterManager::callLowWatermarkCallbacks() { ASSERT(high_watermark_count_ > 0); --high_watermark_count_; for (auto watermark_callbacks : watermark_callbacks_) { @@ -1999,8 +2229,8 @@ void ConnectionManagerImpl::ActiveStream::callLowWatermarkCallbacks() { } } -void ConnectionManagerImpl::ActiveStream::setBufferLimit(uint32_t new_limit) { - ENVOY_STREAM_LOG(debug, "setting buffer limit to {}", *this, new_limit); +void ConnectionManagerImpl::FilterManager::setBufferLimit(uint32_t new_limit) { + ENVOY_STREAM_LOG(debug, "setting buffer limit to {}", active_stream_, new_limit); buffer_limit_ = new_limit; if (buffered_request_data_) { buffered_request_data_->setWatermarks(buffer_limit_); @@ -2010,32 +2240,34 @@ void ConnectionManagerImpl::ActiveStream::setBufferLimit(uint32_t new_limit) { } } -bool ConnectionManagerImpl::ActiveStream::createFilterChain() { +bool ConnectionManagerImpl::FilterManager::createFilterChain() { if (state_.created_filter_chain_) { return false; } bool upgrade_rejected = false; - const Envoy::Http::HeaderEntry* upgrade = - request_headers_ ? request_headers_->Upgrade() : nullptr; - // Treat CONNECT requests as a special upgrade case. - if (!upgrade && request_headers_ && HeaderUtility::isConnect(*request_headers_)) { - upgrade = request_headers_->Method(); + const HeaderEntry* upgrade = nullptr; + if (request_headers_) { + upgrade = request_headers_->Upgrade(); + + // Treat CONNECT requests as a special upgrade case. + if (!upgrade && HeaderUtility::isConnect(*request_headers_)) { + upgrade = request_headers_->Method(); + } } + state_.created_filter_chain_ = true; if (upgrade != nullptr) { const Router::RouteEntry::UpgradeMap* upgrade_map = nullptr; // We must check if the 'cached_route_' optional is populated since this function can be called // early via sendLocalReply(), before the cached route is populated. - if (hasCachedRoute() && cached_route_.value()->routeEntry()) { - upgrade_map = &cached_route_.value()->routeEntry()->upgradeMap(); + if (active_stream_.hasCachedRoute() && active_stream_.cached_route_.value()->routeEntry()) { + upgrade_map = &active_stream_.cached_route_.value()->routeEntry()->upgradeMap(); } - if (connection_manager_.config_.filterFactory().createUpgradeFilterChain( - upgrade->value().getStringView(), upgrade_map, *this)) { - state_.successful_upgrade_ = true; - connection_manager_.stats_.named_.downstream_cx_upgrades_total_.inc(); - connection_manager_.stats_.named_.downstream_cx_upgrades_active_.inc(); + if (filter_chain_factory_.createUpgradeFilterChain(upgrade->value().getStringView(), + upgrade_map, *this)) { + filter_manager_callbacks_.upgradeFilterChainCreated(); return true; } else { upgrade_rejected = true; @@ -2044,19 +2276,19 @@ bool ConnectionManagerImpl::ActiveStream::createFilterChain() { } } - connection_manager_.config_.filterFactory().createFilterChain(*this); + filter_chain_factory_.createFilterChain(*this); return !upgrade_rejected; } void ConnectionManagerImpl::ActiveStreamFilterBase::commonContinue() { // TODO(mattklein123): Raise an error if this is called during a callback. if (!canContinue()) { - ENVOY_STREAM_LOG(trace, "cannot continue filter chain: filter={}", parent_, + ENVOY_STREAM_LOG(trace, "cannot continue filter chain: filter={}", parent_.active_stream_, static_cast(this)); return; } - ENVOY_STREAM_LOG(trace, "continuing filter chain: filter={}", parent_, + ENVOY_STREAM_LOG(trace, "continuing filter chain: filter={}", parent_.active_stream_, static_cast(this)); ASSERT(!canIterate()); // If iteration has stopped for all frame types, set iterate_from_current_filter_ to true so the @@ -2067,7 +2299,7 @@ void ConnectionManagerImpl::ActiveStreamFilterBase::commonContinue() { allowIteration(); // Only resume with do100ContinueHeaders() if we've actually seen a 100-Continue. - if (parent_.state_.has_continue_headers_ && !continue_headers_continued_) { + if (has100Continueheaders()) { continue_headers_continued_ = true; do100ContinueHeaders(); // If the response headers have not yet come in, don't continue on with @@ -2129,7 +2361,7 @@ bool ConnectionManagerImpl::ActiveStreamFilterBase::commonHandleAfterHeadersCall // Set headers_only to true so we know to end early if necessary, // but continue filter iteration so we actually write the headers/run the cleanup code. headers_only = true; - ENVOY_STREAM_LOG(debug, "converting to headers only", parent_); + ENVOY_STREAM_LOG(debug, "converting to headers only", parent_.active_stream_); } else { ASSERT(status == FilterHeadersStatus::Continue); headers_continued_ = true; @@ -2210,11 +2442,11 @@ bool ConnectionManagerImpl::ActiveStreamFilterBase::commonHandleAfterTrailersCal } const Network::Connection* ConnectionManagerImpl::ActiveStreamFilterBase::connection() { - return parent_.connection(); + return parent_.active_stream_.connection(); } Event::Dispatcher& ConnectionManagerImpl::ActiveStreamFilterBase::dispatcher() { - return parent_.connection_manager_.read_callbacks_->connection().dispatcher(); + return parent_.active_stream_.connection_manager_.read_callbacks_->connection().dispatcher(); } StreamInfo::StreamInfo& ConnectionManagerImpl::ActiveStreamFilterBase::streamInfo() { @@ -2222,48 +2454,102 @@ StreamInfo::StreamInfo& ConnectionManagerImpl::ActiveStreamFilterBase::streamInf } Tracing::Span& ConnectionManagerImpl::ActiveStreamFilterBase::activeSpan() { - if (parent_.active_span_) { - return *parent_.active_span_; + if (parent_.active_stream_.active_span_) { + return *parent_.active_stream_.active_span_; } else { return Tracing::NullSpan::instance(); } } -Tracing::Config& ConnectionManagerImpl::ActiveStreamFilterBase::tracingConfig() { return parent_; } +Tracing::Config& ConnectionManagerImpl::ActiveStreamFilterBase::tracingConfig() { + return parent_.active_stream_; +} + +const ScopeTrackedObject& ConnectionManagerImpl::ActiveStreamFilterBase::scope() { + return parent_.active_stream_; +} Upstream::ClusterInfoConstSharedPtr ConnectionManagerImpl::ActiveStreamFilterBase::clusterInfo() { // NOTE: Refreshing route caches clusterInfo as well. - if (!parent_.cached_route_.has_value()) { - parent_.refreshCachedRoute(); + if (!parent_.active_stream_.cached_route_.has_value()) { + parent_.active_stream_.refreshCachedRoute(); } - return parent_.cached_cluster_info_.value(); + return parent_.active_stream_.cached_cluster_info_.value(); } Router::RouteConstSharedPtr ConnectionManagerImpl::ActiveStreamFilterBase::route() { - if (!parent_.cached_route_.has_value()) { - parent_.refreshCachedRoute(); - } + return route(nullptr); +} - return parent_.cached_route_.value(); +Router::RouteConstSharedPtr +ConnectionManagerImpl::ActiveStreamFilterBase::route(const Router::RouteCallback& cb) { + if (parent_.active_stream_.cached_route_.has_value()) { + return parent_.active_stream_.cached_route_.value(); + } + parent_.active_stream_.refreshCachedRoute(cb); + return parent_.active_stream_.cached_route_.value(); } void ConnectionManagerImpl::ActiveStreamFilterBase::clearRouteCache() { - parent_.cached_route_ = absl::optional(); - parent_.cached_cluster_info_ = absl::optional(); - if (parent_.tracing_custom_tags_) { - parent_.tracing_custom_tags_->clear(); + parent_.active_stream_.cached_route_ = absl::optional(); + parent_.active_stream_.cached_cluster_info_ = + absl::optional(); + if (parent_.active_stream_.tracing_custom_tags_) { + parent_.active_stream_.tracing_custom_tags_->clear(); } } +bool ConnectionManagerImpl::ActiveStreamDecoderFilter::canContinue() { + // It is possible for the connection manager to respond directly to a request even while + // a filter is trying to continue. If a response has already happened, we should not + // continue to further filters. A concrete example of this is a filter buffering data, the + // last data frame comes in and the filter continues, but the final buffering takes the stream + // over the high watermark such that a 413 is returned. + return !parent_.state_.local_complete_; +} + Buffer::WatermarkBufferPtr ConnectionManagerImpl::ActiveStreamDecoderFilter::createBuffer() { - auto buffer = - std::make_unique([this]() -> void { this->requestDataDrained(); }, - [this]() -> void { this->requestDataTooLarge(); }); + auto buffer = std::make_unique( + [this]() -> void { this->requestDataDrained(); }, + [this]() -> void { this->requestDataTooLarge(); }, + []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }); buffer->setWatermarks(parent_.buffer_limit_); return buffer; } +Buffer::WatermarkBufferPtr& ConnectionManagerImpl::ActiveStreamDecoderFilter::bufferedData() { + return parent_.buffered_request_data_; +} + +bool ConnectionManagerImpl::ActiveStreamDecoderFilter::complete() { + return parent_.state_.remote_complete_; +} + +void ConnectionManagerImpl::ActiveStreamDecoderFilter::doHeaders(bool end_stream) { + parent_.decodeHeaders(this, *parent_.request_headers_, end_stream); +} + +void ConnectionManagerImpl::ActiveStreamDecoderFilter::doData(bool end_stream) { + parent_.decodeData(this, *parent_.buffered_request_data_, end_stream, + FilterManager::FilterIterationStartState::CanStartFromCurrent); +} + +void ConnectionManagerImpl::ActiveStreamDecoderFilter::doTrailers() { + parent_.decodeTrailers(this, *parent_.request_trailers_); +} +bool ConnectionManagerImpl::ActiveStreamDecoderFilter::hasTrailers() { + return parent_.request_trailers_ != nullptr; +} + +void ConnectionManagerImpl::ActiveStreamDecoderFilter::drainSavedRequestMetadata() { + ASSERT(saved_request_metadata_ != nullptr); + for (auto& metadata_map : *getSavedRequestMetadata()) { + parent_.decodeMetadata(this, *metadata_map); + } + getSavedRequestMetadata()->clear(); +} + void ConnectionManagerImpl::ActiveStreamDecoderFilter::handleMetadataAfterHeadersCallback() { // If we drain accumulated metadata, the iteration must start with the current filter. const bool saved_state = iterate_from_current_filter_; @@ -2293,17 +2579,34 @@ MetadataMapVector& ConnectionManagerImpl::ActiveStreamDecoderFilter::addDecodedM void ConnectionManagerImpl::ActiveStreamDecoderFilter::injectDecodedDataToFilterChain( Buffer::Instance& data, bool end_stream) { parent_.decodeData(this, data, end_stream, - ActiveStream::FilterIterationStartState::CanStartFromCurrent); + FilterManager::FilterIterationStartState::CanStartFromCurrent); } void ConnectionManagerImpl::ActiveStreamDecoderFilter::continueDecoding() { commonContinue(); } +const Buffer::Instance* ConnectionManagerImpl::ActiveStreamDecoderFilter::decodingBuffer() { + return parent_.buffered_request_data_.get(); +} + +void ConnectionManagerImpl::ActiveStreamDecoderFilter::modifyDecodingBuffer( + std::function callback) { + ASSERT(parent_.state_.latest_data_decoding_filter_ == this); + callback(*parent_.buffered_request_data_.get()); +} + +void ConnectionManagerImpl::ActiveStreamDecoderFilter::sendLocalReply( + Code code, absl::string_view body, + std::function modify_headers, + const absl::optional grpc_status, absl::string_view details) { + parent_.stream_info_.setResponseCodeDetails(details); + parent_.sendLocalReply(is_grpc_request_, code, body, modify_headers, grpc_status, details); +} void ConnectionManagerImpl::ActiveStreamDecoderFilter::encode100ContinueHeaders( ResponseHeaderMapPtr&& headers) { // If Envoy is not configured to proxy 100-Continue responses, swallow the 100 Continue // here. This avoids the potential situation where Envoy strips Expect: 100-Continue and sends a // 100-Continue, then proxies a duplicate 100 Continue from upstream. - if (parent_.connection_manager_.config_.proxy100Continue()) { + if (parent_.active_stream_.connection_manager_.config_.proxy100Continue()) { parent_.continue_headers_ = std::move(headers); parent_.encode100ContinueHeaders(nullptr, *parent_.continue_headers_); } @@ -2318,7 +2621,7 @@ void ConnectionManagerImpl::ActiveStreamDecoderFilter::encodeHeaders(ResponseHea void ConnectionManagerImpl::ActiveStreamDecoderFilter::encodeData(Buffer::Instance& data, bool end_stream) { parent_.encodeData(nullptr, data, end_stream, - ActiveStream::FilterIterationStartState::CanStartFromCurrent); + FilterManager::FilterIterationStartState::CanStartFromCurrent); } void ConnectionManagerImpl::ActiveStreamDecoderFilter::encodeTrailers( @@ -2334,17 +2637,15 @@ void ConnectionManagerImpl::ActiveStreamDecoderFilter::encodeMetadata( void ConnectionManagerImpl::ActiveStreamDecoderFilter:: onDecoderFilterAboveWriteBufferHighWatermark() { - ENVOY_STREAM_LOG(debug, "Read-disabling downstream stream due to filter callbacks.", parent_); - parent_.response_encoder_->getStream().readDisable(true); - parent_.connection_manager_.stats_.named_.downstream_flow_control_paused_reading_total_.inc(); + parent_.filter_manager_callbacks_.onDecoderFilterAboveWriteBufferHighWatermark(); } void ConnectionManagerImpl::ActiveStreamDecoderFilter::requestDataTooLarge() { - ENVOY_STREAM_LOG(debug, "request data too large watermark exceeded", parent_); + ENVOY_STREAM_LOG(debug, "request data too large watermark exceeded", parent_.active_stream_); if (parent_.state_.decoder_filters_streaming_) { onDecoderFilterAboveWriteBufferHighWatermark(); } else { - parent_.connection_manager_.stats_.named_.downstream_rq_too_large_.inc(); + parent_.active_stream_.connection_manager_.stats_.named_.downstream_rq_too_large_.inc(); sendLocalReply(Code::PayloadTooLarge, CodeUtility::toString(Code::PayloadTooLarge), nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().RequestPayloadTooLarge); } @@ -2358,9 +2659,7 @@ void ConnectionManagerImpl::ActiveStreamDecoderFilter::requestDataDrained() { void ConnectionManagerImpl::ActiveStreamDecoderFilter:: onDecoderFilterBelowWriteBufferLowWatermark() { - ENVOY_STREAM_LOG(debug, "Read-enabling downstream stream due to filter callbacks.", parent_); - parent_.response_encoder_->getStream().readDisable(false); - parent_.connection_manager_.stats_.named_.downstream_flow_control_resumed_reading_total_.inc(); + parent_.filter_manager_callbacks_.onDecoderFilterBelowWriteBufferLowWatermark(); } void ConnectionManagerImpl::ActiveStreamDecoderFilter::addDownstreamWatermarkCallbacks( @@ -2381,6 +2680,14 @@ void ConnectionManagerImpl::ActiveStreamDecoderFilter::removeDownstreamWatermark parent_.watermark_callbacks_.remove(&watermark_callbacks); } +void ConnectionManagerImpl::ActiveStreamDecoderFilter::setDecoderBufferLimit(uint32_t limit) { + parent_.setBufferLimit(limit); +} + +uint32_t ConnectionManagerImpl::ActiveStreamDecoderFilter::decoderBufferLimit() { + return parent_.buffer_limit_; +} + bool ConnectionManagerImpl::ActiveStreamDecoderFilter::recreateStream() { // Because the filter's and the HCM view of if the stream has a body and if // the stream is complete may differ, re-check bytesReceived() to make sure @@ -2388,49 +2695,96 @@ bool ConnectionManagerImpl::ActiveStreamDecoderFilter::recreateStream() { if (!complete() || parent_.stream_info_.bytesReceived() != 0) { return false; } + + parent_.stream_info_.setResponseCodeDetails( + StreamInfo::ResponseCodeDetails::get().InternalRedirect); // n.b. we do not currently change the codecs to point at the new stream // decoder because the decoder callbacks are complete. It would be good to // null out that pointer but should not be necessary. RequestHeaderMapPtr request_headers(std::move(parent_.request_headers_)); - ResponseEncoder* response_encoder = parent_.response_encoder_; - parent_.response_encoder_ = nullptr; - response_encoder->getStream().removeCallbacks(parent_); + ResponseEncoder* response_encoder = parent_.active_stream_.response_encoder_; + parent_.active_stream_.response_encoder_ = nullptr; + response_encoder->getStream().removeCallbacks(parent_.active_stream_); // This functionally deletes the stream (via deferred delete) so do not // reference anything beyond this point. - parent_.connection_manager_.doEndStream(this->parent_); + parent_.active_stream_.connection_manager_.doEndStream(parent_.active_stream_); - RequestDecoder& new_stream = parent_.connection_manager_.newStream(*response_encoder, true); + RequestDecoder& new_stream = + parent_.active_stream_.connection_manager_.newStream(*response_encoder, true); // We don't need to copy over the old parent FilterState from the old StreamInfo if it did not // store any objects with a LifeSpan at or above DownstreamRequest. This is to avoid unnecessary // heap allocation. + // TODO(snowp): In the case where connection level filter state has been set on the connection + // FilterState that we inherit, we'll end up copying this every time even though we could get + // away with just resetting it to the HCM filter_state_. if (parent_.stream_info_.filter_state_->hasDataAtOrAboveLifeSpan( StreamInfo::FilterState::LifeSpan::Request)) { - (*parent_.connection_manager_.streams_.begin())->stream_info_.filter_state_ = - std::make_shared( - parent_.stream_info_.filter_state_->parent(), - StreamInfo::FilterState::LifeSpan::FilterChain); + (*parent_.active_stream_.connection_manager_.streams_.begin()) + ->filter_manager_.streamInfo() + .filter_state_ = std::make_shared( + parent_.stream_info_.filter_state_->parent(), + StreamInfo::FilterState::LifeSpan::FilterChain); } new_stream.decodeHeaders(std::move(request_headers), true); return true; } +void ConnectionManagerImpl::ActiveStreamDecoderFilter::addUpstreamSocketOptions( + const Network::Socket::OptionsSharedPtr& options) { + + Network::Socket::appendOptions(parent_.active_stream_.upstream_options_, options); +} + +Network::Socket::OptionsSharedPtr +ConnectionManagerImpl::ActiveStreamDecoderFilter::getUpstreamSocketOptions() const { + return parent_.active_stream_.upstream_options_; +} + void ConnectionManagerImpl::ActiveStreamDecoderFilter::requestRouteConfigUpdate( Http::RouteConfigUpdatedCallbackSharedPtr route_config_updated_cb) { - parent_.requestRouteConfigUpdate(dispatcher(), std::move(route_config_updated_cb)); + parent_.active_stream_.requestRouteConfigUpdate(dispatcher(), std::move(route_config_updated_cb)); } absl::optional ConnectionManagerImpl::ActiveStreamDecoderFilter::routeConfig() { - return parent_.routeConfig(); + return parent_.active_stream_.routeConfig(); } Buffer::WatermarkBufferPtr ConnectionManagerImpl::ActiveStreamEncoderFilter::createBuffer() { - auto buffer = new Buffer::WatermarkBuffer([this]() -> void { this->responseDataDrained(); }, - [this]() -> void { this->responseDataTooLarge(); }); + auto buffer = new Buffer::WatermarkBuffer( + [this]() -> void { this->responseDataDrained(); }, + [this]() -> void { this->responseDataTooLarge(); }, + []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }); buffer->setWatermarks(parent_.buffer_limit_); return Buffer::WatermarkBufferPtr{buffer}; } +Buffer::WatermarkBufferPtr& ConnectionManagerImpl::ActiveStreamEncoderFilter::bufferedData() { + return parent_.buffered_response_data_; +} +bool ConnectionManagerImpl::ActiveStreamEncoderFilter::complete() { + return parent_.state_.local_complete_; +} +bool ConnectionManagerImpl::ActiveStreamEncoderFilter::has100Continueheaders() { + return parent_.state_.has_continue_headers_ && !continue_headers_continued_; +} +void ConnectionManagerImpl::ActiveStreamEncoderFilter::do100ContinueHeaders() { + parent_.encode100ContinueHeaders(this, *parent_.continue_headers_); +} +void ConnectionManagerImpl::ActiveStreamEncoderFilter::doHeaders(bool end_stream) { + parent_.encodeHeaders(this, *parent_.response_headers_, end_stream); +} +void ConnectionManagerImpl::ActiveStreamEncoderFilter::doData(bool end_stream) { + parent_.encodeData(this, *parent_.buffered_response_data_, end_stream, + FilterManager::FilterIterationStartState::CanStartFromCurrent); +} +void ConnectionManagerImpl::ActiveStreamEncoderFilter::drainSavedResponseMetadata() { + ASSERT(saved_response_metadata_ != nullptr); + for (auto& metadata_map : *getSavedResponseMetadata()) { + parent_.encodeMetadata(this, std::move(metadata_map)); + } + getSavedResponseMetadata()->clear(); +} void ConnectionManagerImpl::ActiveStreamEncoderFilter::handleMetadataAfterHeadersCallback() { // If we drain accumulated metadata, the iteration must start with the current filter. @@ -2445,6 +2799,12 @@ void ConnectionManagerImpl::ActiveStreamEncoderFilter::handleMetadataAfterHeader // Restores the original value of iterate_from_current_filter_. iterate_from_current_filter_ = saved_state; } +void ConnectionManagerImpl::ActiveStreamEncoderFilter::doTrailers() { + parent_.encodeTrailers(this, *parent_.response_trailers_); +} +bool ConnectionManagerImpl::ActiveStreamEncoderFilter::hasTrailers() { + return parent_.response_trailers_ != nullptr; +} void ConnectionManagerImpl::ActiveStreamEncoderFilter::addEncodedData(Buffer::Instance& data, bool streaming) { return parent_.addEncodedData(*this, data, streaming); @@ -2453,7 +2813,7 @@ void ConnectionManagerImpl::ActiveStreamEncoderFilter::addEncodedData(Buffer::In void ConnectionManagerImpl::ActiveStreamEncoderFilter::injectEncodedDataToFilterChain( Buffer::Instance& data, bool end_stream) { parent_.encodeData(this, data, end_stream, - ActiveStream::FilterIterationStartState::CanStartFromCurrent); + FilterManager::FilterIterationStartState::CanStartFromCurrent); } ResponseTrailerMap& ConnectionManagerImpl::ActiveStreamEncoderFilter::addEncodedTrailers() { @@ -2467,55 +2827,57 @@ void ConnectionManagerImpl::ActiveStreamEncoderFilter::addEncodedMetadata( void ConnectionManagerImpl::ActiveStreamEncoderFilter:: onEncoderFilterAboveWriteBufferHighWatermark() { - ENVOY_STREAM_LOG(debug, "Disabling upstream stream due to filter callbacks.", parent_); + ENVOY_STREAM_LOG(debug, "Disabling upstream stream due to filter callbacks.", + parent_.active_stream_); parent_.callHighWatermarkCallbacks(); } void ConnectionManagerImpl::ActiveStreamEncoderFilter:: onEncoderFilterBelowWriteBufferLowWatermark() { - ENVOY_STREAM_LOG(debug, "Enabling upstream stream due to filter callbacks.", parent_); + ENVOY_STREAM_LOG(debug, "Enabling upstream stream due to filter callbacks.", + parent_.active_stream_); parent_.callLowWatermarkCallbacks(); } +void ConnectionManagerImpl::ActiveStreamEncoderFilter::setEncoderBufferLimit(uint32_t limit) { + parent_.setBufferLimit(limit); +} + +uint32_t ConnectionManagerImpl::ActiveStreamEncoderFilter::encoderBufferLimit() { + return parent_.buffer_limit_; +} + void ConnectionManagerImpl::ActiveStreamEncoderFilter::continueEncoding() { commonContinue(); } +const Buffer::Instance* ConnectionManagerImpl::ActiveStreamEncoderFilter::encodingBuffer() { + return parent_.buffered_response_data_.get(); +} + +void ConnectionManagerImpl::ActiveStreamEncoderFilter::modifyEncodingBuffer( + std::function callback) { + ASSERT(parent_.state_.latest_data_encoding_filter_ == this); + callback(*parent_.buffered_response_data_.get()); +} + +Http1StreamEncoderOptionsOptRef +ConnectionManagerImpl::ActiveStreamEncoderFilter::http1StreamEncoderOptions() { + // TODO(mattklein123): At some point we might want to actually wrap this interface but for now + // we give the filter direct access to the encoder options. + return parent_.active_stream_.response_encoder_->http1StreamEncoderOptions(); +} + void ConnectionManagerImpl::ActiveStreamEncoderFilter::responseDataTooLarge() { if (parent_.state_.encoder_filters_streaming_) { onEncoderFilterAboveWriteBufferHighWatermark(); } else { - parent_.connection_manager_.stats_.named_.rs_too_large_.inc(); - - // If headers have not been sent to the user, send a 500. - if (!headers_continued_) { - // Make sure we won't end up with nested watermark calls from the body buffer. - parent_.state_.encoder_filters_streaming_ = true; - allowIteration(); - - parent_.stream_info_.setResponseCodeDetails( - StreamInfo::ResponseCodeDetails::get().RequestHeadersTooLarge); - // This does not call the standard sendLocalReply because if there is already response data - // we do not want to pass a second set of response headers through the filter chain. - // Instead, call the encodeHeadersInternal / encodeDataInternal helpers - // directly, which maximizes shared code with the normal response path. - Http::Utility::sendLocalReply( - Grpc::Common::hasGrpcContentType(*parent_.request_headers_), - [&](ResponseHeaderMapPtr&& response_headers, bool end_stream) -> void { - parent_.response_headers_ = std::move(response_headers); - parent_.encodeHeadersInternal(*parent_.response_headers_, end_stream); - }, - [&](Buffer::Instance& data, bool end_stream) -> void { - parent_.encodeDataInternal(data, end_stream); - }, - parent_.state_.destroyed_, Http::Code::InternalServerError, - CodeUtility::toString(Http::Code::InternalServerError), absl::nullopt, - parent_.state_.is_head_request_); - parent_.maybeEndEncode(parent_.state_.local_complete_); - } else { - ENVOY_STREAM_LOG( - debug, "Resetting stream. Response data too large and headers have already been sent", - *this); - resetStream(); - } + parent_.active_stream_.connection_manager_.stats_.named_.rs_too_large_.inc(); + + // In this case, sendLocalReply will either send a response directly to the encoder, or + // reset the stream. + parent_.sendLocalReply( + parent_.request_headers_ && Grpc::Common::isGrpcRequestHeaders(*parent_.request_headers_), + Http::Code::InternalServerError, CodeUtility::toString(Http::Code::InternalServerError), + nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().ResponsePayloadTooLarge); } } @@ -2524,12 +2886,12 @@ void ConnectionManagerImpl::ActiveStreamEncoderFilter::responseDataDrained() { } void ConnectionManagerImpl::ActiveStreamFilterBase::resetStream() { - parent_.connection_manager_.stats_.named_.downstream_rq_tx_reset_.inc(); - parent_.connection_manager_.doEndStream(this->parent_); + parent_.active_stream_.connection_manager_.stats_.named_.downstream_rq_tx_reset_.inc(); + parent_.active_stream_.connection_manager_.doEndStream(parent_.active_stream_); } uint64_t ConnectionManagerImpl::ActiveStreamFilterBase::streamId() const { - return parent_.stream_id_; + return parent_.active_stream_.stream_id_; } } // namespace Http diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index 10bcc7522bd61..e453d0271df0d 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -9,13 +9,16 @@ #include #include "envoy/access_log/access_log.h" +#include "envoy/common/random_generator.h" #include "envoy/common/scope_tracker.h" +#include "envoy/common/time.h" #include "envoy/event/deferred_deletable.h" #include "envoy/http/api_listener.h" #include "envoy/http/codec.h" #include "envoy/http/codes.h" #include "envoy/http/context.h" #include "envoy/http/filter.h" +#include "envoy/http/header_map.h" #include "envoy/network/connection.h" #include "envoy/network/drain_decision.h" #include "envoy/network/filter.h" @@ -26,6 +29,7 @@ #include "envoy/ssl/connection.h" #include "envoy/stats/scope.h" #include "envoy/stats/stats_macros.h" +#include "envoy/stream_info/filter_state.h" #include "envoy/tracing/http_tracer.h" #include "envoy/upstream/upstream.h" @@ -36,6 +40,7 @@ #include "common/http/conn_manager_config.h" #include "common/http/user_agent.h" #include "common/http/utility.h" +#include "common/local_reply/local_reply.h" #include "common/stream_info/stream_info_impl.h" #include "common/tracing/http_tracer_impl.h" @@ -54,10 +59,10 @@ class ConnectionManagerImpl : Logger::Loggable, public Http::ApiListener { public: ConnectionManagerImpl(ConnectionManagerConfig& config, const Network::DrainDecision& drain_close, - Runtime::RandomGenerator& random_generator, Http::Context& http_context, + Random::RandomGenerator& random_generator, Http::Context& http_context, Runtime::Loader& runtime, const LocalInfo::LocalInfo& local_info, Upstream::ClusterManager& cluster_manager, - Server::OverloadManager* overload_manager, TimeSource& time_system); + Server::OverloadManager& overload_manager, TimeSource& time_system); ~ConnectionManagerImpl() override; static ConnectionManagerStats generateStats(const std::string& prefix, Stats::Scope& scope); @@ -84,7 +89,7 @@ class ConnectionManagerImpl : Logger::Loggable, void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override; // Http::ConnectionCallbacks - void onGoAway() override; + void onGoAway(GoAwayErrorCode error_code) override; // Http::ServerConnectionCallbacks RequestDecoder& newStream(ResponseEncoder& response_encoder, @@ -102,17 +107,15 @@ class ConnectionManagerImpl : Logger::Loggable, TimeSource& timeSource() { return time_source_; } - // Return a reference to the shared_ptr so that it can be lazy created on demand. - std::shared_ptr& filterState() { return filter_state_; } - private: struct ActiveStream; + class FilterManager; /** * Base class wrapper for both stream encoder and decoder filters. */ struct ActiveStreamFilterBase : public virtual StreamFilterCallbacks { - ActiveStreamFilterBase(ActiveStream& parent, bool dual_filter) + ActiveStreamFilterBase(FilterManager& parent, bool dual_filter) : parent_(parent), iteration_state_(IterationState::Continue), iterate_from_current_filter_(false), headers_continued_(false), continue_headers_continued_(false), end_stream_(false), dual_filter_(dual_filter), @@ -139,6 +142,7 @@ class ConnectionManagerImpl : Logger::Loggable, virtual Buffer::WatermarkBufferPtr createBuffer() PURE; virtual Buffer::WatermarkBufferPtr& bufferedData() PURE; virtual bool complete() PURE; + virtual bool has100Continueheaders() PURE; virtual void do100ContinueHeaders() PURE; virtual void doHeaders(bool end_stream) PURE; virtual void doData(bool end_stream) PURE; @@ -153,13 +157,14 @@ class ConnectionManagerImpl : Logger::Loggable, Event::Dispatcher& dispatcher() override; void resetStream() override; Router::RouteConstSharedPtr route() override; + Router::RouteConstSharedPtr route(const Router::RouteCallback& cb) override; Upstream::ClusterInfoConstSharedPtr clusterInfo() override; void clearRouteCache() override; uint64_t streamId() const override; StreamInfo::StreamInfo& streamInfo() override; Tracing::Span& activeSpan() override; Tracing::Config& tracingConfig() override; - const ScopeTrackedObject& scope() override { return parent_; } + const ScopeTrackedObject& scope() override; // Functions to set or get iteration state. bool canIterate() { return iteration_state_ == IterationState::Continue; } @@ -199,7 +204,7 @@ class ConnectionManagerImpl : Logger::Loggable, StopAllWatermark, // Iteration has stopped for all frame types, and following data should // be buffered until high watermark is reached. }; - ActiveStream& parent_; + FilterManager& parent_; IterationState iteration_state_; // If the filter resumes iteration from a StopAllBuffer/Watermark state, the current filter // hasn't parsed data and trailers. As a result, the filter iteration should start with the @@ -221,45 +226,28 @@ class ConnectionManagerImpl : Logger::Loggable, struct ActiveStreamDecoderFilter : public ActiveStreamFilterBase, public StreamDecoderFilterCallbacks, LinkedObject { - ActiveStreamDecoderFilter(ActiveStream& parent, StreamDecoderFilterSharedPtr filter, + ActiveStreamDecoderFilter(FilterManager& parent, StreamDecoderFilterSharedPtr filter, bool dual_filter) : ActiveStreamFilterBase(parent, dual_filter), handle_(filter) {} // ActiveStreamFilterBase - bool canContinue() override { - // It is possible for the connection manager to respond directly to a request even while - // a filter is trying to continue. If a response has already happened, we should not - // continue to further filters. A concrete example of this is a filter buffering data, the - // last data frame comes in and the filter continues, but the final buffering takes the stream - // over the high watermark such that a 413 is returned. - return !parent_.state_.local_complete_; - } + bool canContinue() override; Buffer::WatermarkBufferPtr createBuffer() override; - Buffer::WatermarkBufferPtr& bufferedData() override { return parent_.buffered_request_data_; } - bool complete() override { return parent_.state_.remote_complete_; } + Buffer::WatermarkBufferPtr& bufferedData() override; + bool complete() override; + bool has100Continueheaders() override { return false; } void do100ContinueHeaders() override { NOT_REACHED_GCOVR_EXCL_LINE; } - void doHeaders(bool end_stream) override { - parent_.decodeHeaders(this, *parent_.request_headers_, end_stream); - } - void doData(bool end_stream) override { - parent_.decodeData(this, *parent_.buffered_request_data_, end_stream, - ActiveStream::FilterIterationStartState::CanStartFromCurrent); - } + void doHeaders(bool end_stream) override; + void doData(bool end_stream) override; void doMetadata() override { if (saved_request_metadata_ != nullptr) { drainSavedRequestMetadata(); } } - void doTrailers() override { parent_.decodeTrailers(this, *parent_.request_trailers_); } - bool hasTrailers() override { return parent_.request_trailers_ != nullptr; } + void doTrailers() override; + bool hasTrailers() override; - void drainSavedRequestMetadata() { - ASSERT(saved_request_metadata_ != nullptr); - for (auto& metadata_map : *getSavedRequestMetadata()) { - parent_.decodeMetadata(this, *metadata_map); - } - getSavedRequestMetadata()->clear(); - } + void drainSavedRequestMetadata(); // This function is called after the filter calls decodeHeaders() to drain accumulated metadata. void handleMetadataAfterHeadersCallback() override; @@ -269,23 +257,14 @@ class ConnectionManagerImpl : Logger::Loggable, RequestTrailerMap& addDecodedTrailers() override; MetadataMapVector& addDecodedMetadata() override; void continueDecoding() override; - const Buffer::Instance* decodingBuffer() override { - return parent_.buffered_request_data_.get(); - } + const Buffer::Instance* decodingBuffer() override; - void modifyDecodingBuffer(std::function callback) override { - ASSERT(parent_.state_.latest_data_decoding_filter_ == this); - callback(*parent_.buffered_request_data_.get()); - } + void modifyDecodingBuffer(std::function callback) override; void sendLocalReply(Code code, absl::string_view body, std::function modify_headers, const absl::optional grpc_status, - absl::string_view details) override { - parent_.stream_info_.setResponseCodeDetails(details); - parent_.sendLocalReply(is_grpc_request_, code, body, modify_headers, - parent_.state_.is_head_request_, grpc_status, details); - } + absl::string_view details) override; void encode100ContinueHeaders(ResponseHeaderMapPtr&& headers) override; void encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream) override; void encodeData(Buffer::Instance& data, bool end_stream) override; @@ -297,23 +276,19 @@ class ConnectionManagerImpl : Logger::Loggable, addDownstreamWatermarkCallbacks(DownstreamWatermarkCallbacks& watermark_callbacks) override; void removeDownstreamWatermarkCallbacks(DownstreamWatermarkCallbacks& watermark_callbacks) override; - void setDecoderBufferLimit(uint32_t limit) override { parent_.setBufferLimit(limit); } - uint32_t decoderBufferLimit() override { return parent_.buffer_limit_; } + void setDecoderBufferLimit(uint32_t limit) override; + uint32_t decoderBufferLimit() override; bool recreateStream() override; - void addUpstreamSocketOptions(const Network::Socket::OptionsSharedPtr& options) override { - Network::Socket::appendOptions(parent_.upstream_options_, options); - } + void addUpstreamSocketOptions(const Network::Socket::OptionsSharedPtr& options) override; - Network::Socket::OptionsSharedPtr getUpstreamSocketOptions() const override { - return parent_.upstream_options_; - } + Network::Socket::OptionsSharedPtr getUpstreamSocketOptions() const override; // Each decoder filter instance checks if the request passed to the filter is gRPC // so that we can issue gRPC local responses to gRPC requests. Filter's decodeHeaders() // called here may change the content type, so we must check it before the call. FilterHeadersStatus decodeHeaders(RequestHeaderMap& headers, bool end_stream) { - is_grpc_request_ = Grpc::Common::hasGrpcContentType(headers); + is_grpc_request_ = Grpc::Common::isGrpcRequestHeaders(headers); FilterHeadersStatus status = handle_->decodeHeaders(headers, end_stream); if (end_stream) { handle_->decodeComplete(); @@ -340,32 +315,20 @@ class ConnectionManagerImpl : Logger::Loggable, struct ActiveStreamEncoderFilter : public ActiveStreamFilterBase, public StreamEncoderFilterCallbacks, LinkedObject { - ActiveStreamEncoderFilter(ActiveStream& parent, StreamEncoderFilterSharedPtr filter, + ActiveStreamEncoderFilter(FilterManager& parent, StreamEncoderFilterSharedPtr filter, bool dual_filter) : ActiveStreamFilterBase(parent, dual_filter), handle_(filter) {} // ActiveStreamFilterBase bool canContinue() override { return true; } Buffer::WatermarkBufferPtr createBuffer() override; - Buffer::WatermarkBufferPtr& bufferedData() override { return parent_.buffered_response_data_; } - bool complete() override { return parent_.state_.local_complete_; } - void do100ContinueHeaders() override { - parent_.encode100ContinueHeaders(this, *parent_.continue_headers_); - } - void doHeaders(bool end_stream) override { - parent_.encodeHeaders(this, *parent_.response_headers_, end_stream); - } - void doData(bool end_stream) override { - parent_.encodeData(this, *parent_.buffered_response_data_, end_stream, - ActiveStream::FilterIterationStartState::CanStartFromCurrent); - } - void drainSavedResponseMetadata() { - ASSERT(saved_response_metadata_ != nullptr); - for (auto& metadata_map : *getSavedResponseMetadata()) { - parent_.encodeMetadata(this, std::move(metadata_map)); - } - getSavedResponseMetadata()->clear(); - } + Buffer::WatermarkBufferPtr& bufferedData() override; + bool complete() override; + bool has100Continueheaders() override; + void do100ContinueHeaders() override; + void doHeaders(bool end_stream) override; + void doData(bool end_stream) override; + void drainSavedResponseMetadata(); void handleMetadataAfterHeadersCallback() override; void doMetadata() override { @@ -373,8 +336,8 @@ class ConnectionManagerImpl : Logger::Loggable, drainSavedResponseMetadata(); } } - void doTrailers() override { parent_.encodeTrailers(this, *parent_.response_trailers_); } - bool hasTrailers() override { return parent_.response_trailers_ != nullptr; } + void doTrailers() override; + bool hasTrailers() override; // Http::StreamEncoderFilterCallbacks void addEncodedData(Buffer::Instance& data, bool streaming) override; @@ -383,21 +346,12 @@ class ConnectionManagerImpl : Logger::Loggable, void addEncodedMetadata(MetadataMapPtr&& metadata_map) override; void onEncoderFilterAboveWriteBufferHighWatermark() override; void onEncoderFilterBelowWriteBufferLowWatermark() override; - void setEncoderBufferLimit(uint32_t limit) override { parent_.setBufferLimit(limit); } - uint32_t encoderBufferLimit() override { return parent_.buffer_limit_; } + void setEncoderBufferLimit(uint32_t limit) override; + uint32_t encoderBufferLimit() override; void continueEncoding() override; - const Buffer::Instance* encodingBuffer() override { - return parent_.buffered_response_data_.get(); - } - void modifyEncodingBuffer(std::function callback) override { - ASSERT(parent_.state_.latest_data_encoding_filter_ == this); - callback(*parent_.buffered_response_data_.get()); - } - Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() override { - // TODO(mattklein123): At some point we might want to actually wrap this interface but for now - // we give the filter direct access to the encoder options. - return parent_.response_encoder_->http1StreamEncoderOptions(); - } + const Buffer::Instance* encodingBuffer() override; + void modifyEncodingBuffer(std::function callback) override; + Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() override; void responseDataTooLarge(); void responseDataDrained(); @@ -438,25 +392,281 @@ class ConnectionManagerImpl : Logger::Loggable, }; /** - * Wraps a single active stream on the connection. These are either full request/response pairs - * or pushes. + * Callbacks invoked by the FilterManager to pass filter data/events back to the caller. */ - struct ActiveStream : LinkedObject, - public Event::DeferredDeletable, - public StreamCallbacks, - public RequestDecoder, - public FilterChainFactoryCallbacks, - public Tracing::Config, - public ScopeTrackedObject { - ActiveStream(ConnectionManagerImpl& connection_manager); - ~ActiveStream() override; + class FilterManagerCallbacks { + public: + virtual ~FilterManagerCallbacks() = default; - // Indicates which filter to start the iteration with. - enum class FilterIterationStartState { AlwaysStartFromNext, CanStartFromCurrent }; + /** + * Called when the provided headers have been encoded by all the filters in the chain. + * @param response_headers the encoded headers. + * @param end_stream whether this is a header only response. + */ + virtual void encodeHeaders(ResponseHeaderMap& response_headers, bool end_stream) PURE; + /** + * Called when the provided 100 Continue headers have been encoded by all the filters in the + * chain. + * @param response_headers the encoded headers. + */ + virtual void encode100ContinueHeaders(ResponseHeaderMap& response_headers) PURE; + + /** + * Called when the provided data has been encoded by all filters in the chain. + * @param data the encoded data. + * @param end_stream whether this is the end of the response. + */ + virtual void encodeData(Buffer::Instance& data, bool end_stream) PURE; + + /** + * Called when the provided trailers have been encoded by all filters in the chain. + * @param trailers the encoded trailers. + */ + virtual void encodeTrailers(ResponseTrailerMap& trailers) PURE; + + /** + * Called when the provided metadata has been encoded by all filters in the chain. + * @param trailers the encoded trailers. + */ + virtual void encodeMetadata(MetadataMapVector& metadata) PURE; + + /** + * Called after encoding has completed. + */ + virtual void endStream() PURE; + + /** + * Called when the stream write buffer is no longer above the low watermark. + */ + virtual void onDecoderFilterBelowWriteBufferLowWatermark() PURE; + + /** + * Called when the stream write buffer is above above the high watermark. + */ + virtual void onDecoderFilterAboveWriteBufferHighWatermark() PURE; + + /** + * Called when the FilterManager creates an Upgrade filter chain. + */ + virtual void upgradeFilterChainCreated() PURE; + }; + + /** + * FilterManager manages decoding a request through a series of decoding filter and the encoding + * of the resulting response. + */ + class FilterManager : public ScopeTrackedObject, FilterChainFactoryCallbacks { + public: + FilterManager(ActiveStream& active_stream, FilterManagerCallbacks& filter_manager_callbacks, + uint32_t buffer_limit, FilterChainFactory& filter_chain_factory, + const LocalReply::LocalReply& local_reply, Http::Protocol protocol, + TimeSource& time_source, StreamInfo::FilterStateSharedPtr parent_filter_state, + StreamInfo::FilterState::LifeSpan filter_state_life_span) + : active_stream_(active_stream), filter_manager_callbacks_(filter_manager_callbacks), + buffer_limit_(buffer_limit), filter_chain_factory_(filter_chain_factory), + local_reply_(local_reply), + stream_info_(protocol, time_source, parent_filter_state, filter_state_life_span) {} + ~FilterManager() override { + for (const auto& log_handler : access_log_handlers_) { + log_handler->log(request_headers_.get(), response_headers_.get(), response_trailers_.get(), + stream_info_); + } + + ASSERT(state_.filter_call_state_ == 0); + } + + // ScopeTrackedObject + void dumpState(std::ostream& os, int indent_level = 0) const override { + const char* spaces = spacesForLevel(indent_level); + os << spaces << "FilterManager " << this << DUMP_MEMBER(state_.has_continue_headers_) + << DUMP_MEMBER(state_.decoding_headers_only_) << DUMP_MEMBER(state_.encoding_headers_only_) + << "\n"; + + DUMP_DETAILS(request_headers_); + DUMP_DETAILS(request_trailers_); + DUMP_DETAILS(response_headers_); + DUMP_DETAILS(response_trailers_); + DUMP_DETAILS(&stream_info_); + } + + // Http::FilterChainFactoryCallbacks + void addStreamDecoderFilter(StreamDecoderFilterSharedPtr filter) override { + addStreamDecoderFilterWorker(filter, false); + } + void addStreamEncoderFilter(StreamEncoderFilterSharedPtr filter) override { + addStreamEncoderFilterWorker(filter, false); + } + void addStreamFilter(StreamFilterSharedPtr filter) override { + addStreamDecoderFilterWorker(filter, true); + addStreamEncoderFilterWorker(filter, true); + } + void addAccessLogHandler(AccessLog::InstanceSharedPtr handler) override; + + void destroyFilters() { + state_.destroyed_ = true; + + for (auto& filter : decoder_filters_) { + filter->handle_->onDestroy(); + } + + for (auto& filter : encoder_filters_) { + // Do not call on destroy twice for dual registered filters. + if (!filter->dual_filter_) { + filter->handle_->onDestroy(); + } + } + } + + /** + * Decodes the provided headers starting at the first filter in the chain. + * @param headers the headers to decode. + * @param end_stream whether the request is header only. + */ + void decodeHeaders(RequestHeaderMap& headers, bool end_stream) { + decodeHeaders(nullptr, headers, end_stream); + } + + /** + * Decodes the provided data starting at the first filter in the chain. + * @param data the data to decode. + * @param end_stream whether this data is the end of the request. + */ + void decodeData(Buffer::Instance& data, bool end_stream) { + decodeData(nullptr, data, end_stream, FilterIterationStartState::CanStartFromCurrent); + } + + /** + * Decodes the provided trailers starting at the first filter in the chain. + * @param trailers the trailers to decode. + */ + void decodeTrailers(RequestTrailerMapPtr&& trailers) { + ASSERT(request_trailers_ == nullptr); + + request_trailers_ = std::move(trailers); + decodeTrailers(nullptr, *request_trailers_); + } + + /** + * Decodes the provided metadata starting at the first filter in the chain. + * @param metadata_map the metadata to decode. + */ + void decodeMetadata(MetadataMap& metadata_map) { decodeMetadata(nullptr, metadata_map); } + + // TODO(snowp): Make private as filter chain construction is moved into FM. void addStreamDecoderFilterWorker(StreamDecoderFilterSharedPtr filter, bool dual_filter); void addStreamEncoderFilterWorker(StreamEncoderFilterSharedPtr filter, bool dual_filter); - void chargeStats(const ResponseHeaderMap& headers); + + void disarmRequestTimeout(); + + /** + * If end_stream is true, marks decoding as complete. This is a noop if end_stream is false. + * @param end_stream whether decoding is complete. + */ + void maybeEndDecode(bool end_stream); + + /** + * If end_stream is true, marks encoding as complete. This is a noop if end_stream is false. + * @param end_stream whether encoding is complete. + */ + void maybeEndEncode(bool end_stream); + + /** + * Sends a local reply by constructing a response and passing it through all the encoder + * filters. The resulting response will be passed out via the FilterManagerCallbacks. + */ + void sendLocalReplyViaFilterChain( + bool is_grpc_request, Code code, absl::string_view body, + const std::function& modify_headers, bool is_head_request, + const absl::optional grpc_status, absl::string_view details); + + /** + * Sends a local reply by constructing a response and skipping the encoder filters. The + * resulting response will be passed out via the FilterManagerCallbacks. + */ + void sendDirectLocalReply(Code code, absl::string_view body, + const std::function& modify_headers, + bool is_head_request, + const absl::optional grpc_status); + + // Possibly increases buffer_limit_ to the value of limit. + void setBufferLimit(uint32_t limit); + + /** + * @return bool whether any above high watermark triggers are currently active + */ + bool aboveHighWatermark() { return high_watermark_count_ != 0; } + + // Pass on watermark callbacks to watermark subscribers. This boils down to passing watermark + // events for this stream and the downstream connection to the router filter. + void callHighWatermarkCallbacks(); + void callLowWatermarkCallbacks(); + + void setRequestHeaders(RequestHeaderMapPtr&& request_headers) { + // TODO(snowp): Ideally we don't need this function, but during decodeHeaders we might issue + // local replies before the FilterManager::decodeData has been called. We could likely get rid + // of this by updating the calls to sendLocalReply to pass ownership over the headers + adding + // asserts that we don't call the overload that doesn't pass ownership unless decodeData has + // been called. + ASSERT(request_headers_ == nullptr); + request_headers_ = std::move(request_headers); + } + + /** + * Marks local processing as complete. + */ + void setLocalComplete() { state_.local_complete_ = true; } + + /** + * Whether the filters have been destroyed. + */ + bool destroyed() const { return state_.destroyed_; } + + /** + * Whether remote processing has been marked as complete. + */ + bool remoteComplete() const { return state_.remote_complete_; } + + /** + * Instructs the FilterManager to not create a filter chain. This makes it possible to issue + * a local reply without the overhead of creating and traversing the filters. + */ + void skipFilterChainCreation() { + ASSERT(!state_.created_filter_chain_); + state_.created_filter_chain_ = true; + } + + /** + * Returns the current request headers, or nullptr if header decoding hasn't started yet. + */ + RequestHeaderMap* requestHeaders() const { return request_headers_.get(); } + + /** + * Returns the current request trailers, or nullptr if trailer decoding hasn't started yet. + */ + RequestTrailerMap* requestTrailers() const { return request_trailers_.get(); } + + /** + * Returns the current response headers, or nullptr if header encoding hasn't started yet. + */ + ResponseHeaderMap* responseHeaders() const { return response_headers_.get(); } + + /** + * Returns the current response trailers, or nullptr if trailer encoding hasn't started yet. + */ + ResponseTrailerMap* responseTrailers() const { return response_trailers_.get(); } + + // TODO(snowp): This should probably return a StreamInfo instead of the impl. + StreamInfo::StreamInfoImpl& streamInfo() { return stream_info_; } + const StreamInfo::StreamInfoImpl& streamInfo() const { return stream_info_; } + + // Set up the Encoder/Decoder filter chain. + bool createFilterChain(); + + private: + // Indicates which filter to start the iteration with. + enum class FilterIterationStartState { AlwaysStartFromNext, CanStartFromCurrent }; + // Returns the encoder filter to start iteration with. std::list::iterator commonEncodePrefix(ActiveStreamEncoderFilter* filter, bool end_stream, @@ -465,10 +675,13 @@ class ConnectionManagerImpl : Logger::Loggable, std::list::iterator commonDecodePrefix(ActiveStreamDecoderFilter* filter, FilterIterationStartState filter_iteration_start_state); - const Network::Connection* connection(); void addDecodedData(ActiveStreamDecoderFilter& filter, Buffer::Instance& data, bool streaming); RequestTrailerMap& addDecodedTrailers(); MetadataMapVector& addDecodedMetadata(); + // Helper function for the case where we have a header only request, but a filter adds a body + // to it. + void maybeContinueDecoding( + const std::list::iterator& maybe_continue_data_entry); void decodeHeaders(ActiveStreamDecoderFilter* filter, RequestHeaderMap& headers, bool end_stream); // Sends data through decoding filter chains. filter_iteration_start_state indicates which @@ -477,19 +690,18 @@ class ConnectionManagerImpl : Logger::Loggable, FilterIterationStartState filter_iteration_start_state); void decodeTrailers(ActiveStreamDecoderFilter* filter, RequestTrailerMap& trailers); void decodeMetadata(ActiveStreamDecoderFilter* filter, MetadataMap& metadata_map); - void disarmRequestTimeout(); - void maybeEndDecode(bool end_stream); void addEncodedData(ActiveStreamEncoderFilter& filter, Buffer::Instance& data, bool streaming); ResponseTrailerMap& addEncodedTrailers(); void sendLocalReply(bool is_grpc_request, Code code, absl::string_view body, const std::function& modify_headers, - bool is_head_request, const absl::optional grpc_status, absl::string_view details); void encode100ContinueHeaders(ActiveStreamEncoderFilter* filter, ResponseHeaderMap& headers); // As with most of the encode functions, this runs encodeHeaders on various // filters before calling encodeHeadersInternal which does final header munging and passes the // headers to the encoder. + void maybeContinueEncoding( + const std::list::iterator& maybe_continue_data_entry); void encodeHeaders(ActiveStreamEncoderFilter* filter, ResponseHeaderMap& headers, bool end_stream); // Sends data through encoding filter chains. filter_iteration_start_state indicates which @@ -500,25 +712,144 @@ class ConnectionManagerImpl : Logger::Loggable, void encodeTrailers(ActiveStreamEncoderFilter* filter, ResponseTrailerMap& trailers); void encodeMetadata(ActiveStreamEncoderFilter* filter, MetadataMapPtr&& metadata_map_ptr); - // This is a helper function for encodeHeaders and responseDataTooLarge which allows for shared - // code for the two headers encoding paths. It does header munging, updates timing stats, and - // sends the headers to the encoder. - void encodeHeadersInternal(ResponseHeaderMap& headers, bool end_stream); - // This is a helper function for encodeData and responseDataTooLarge which allows for shared - // code for the two data encoding paths. It does stats updates and tracks potential end of - // stream. - void encodeDataInternal(Buffer::Instance& data, bool end_stream); - - void maybeEndEncode(bool end_stream); // Returns true if new metadata is decoded. Otherwise, returns false. bool processNewlyAddedMetadata(); - uint64_t streamId() { return stream_id_; } + // Returns true if filter has stopped iteration for all frame types. Otherwise, returns false. // filter_streaming is the variable to indicate if stream is streaming, and its value may be // changed by the function. bool handleDataIfStopAll(ActiveStreamFilterBase& filter, Buffer::Instance& data, bool& filter_streaming); + MetadataMapVector* getRequestMetadataMapVector() { + if (request_metadata_map_vector_ == nullptr) { + request_metadata_map_vector_ = std::make_unique(); + } + return request_metadata_map_vector_.get(); + } + + ActiveStream& active_stream_; + + FilterManagerCallbacks& filter_manager_callbacks_; + + std::list decoder_filters_; + std::list encoder_filters_; + std::list access_log_handlers_; + + ResponseHeaderMapPtr continue_headers_; + ResponseHeaderMapPtr response_headers_; + ResponseTrailerMapPtr response_trailers_; + RequestHeaderMapPtr request_headers_; + RequestTrailerMapPtr request_trailers_; + // Stores metadata added in the decoding filter that is being processed. Will be cleared before + // processing the next filter. The storage is created on demand. We need to store metadata + // temporarily in the filter in case the filter has stopped all while processing headers. + std::unique_ptr request_metadata_map_vector_; + Buffer::WatermarkBufferPtr buffered_response_data_; + Buffer::WatermarkBufferPtr buffered_request_data_; + uint32_t buffer_limit_{0}; + uint32_t high_watermark_count_{0}; + std::list watermark_callbacks_; + + FilterChainFactory& filter_chain_factory_; + const LocalReply::LocalReply& local_reply_; + StreamInfo::StreamInfoImpl stream_info_; + // TODO(snowp): Once FM has been moved to its own file we'll make these private classes of FM, + // at which point they no longer need to be friends. + friend ActiveStreamFilterBase; + friend ActiveStreamDecoderFilter; + friend ActiveStreamEncoderFilter; + + /** + * Flags that keep track of which filter calls are currently in progress. + */ + // clang-format off + struct FilterCallState { + static constexpr uint32_t DecodeHeaders = 0x01; + static constexpr uint32_t DecodeData = 0x02; + static constexpr uint32_t DecodeTrailers = 0x04; + static constexpr uint32_t EncodeHeaders = 0x08; + static constexpr uint32_t EncodeData = 0x10; + static constexpr uint32_t EncodeTrailers = 0x20; + // Encode100ContinueHeaders is a bit of a special state as 100 continue + // headers may be sent during request processing. This state is only used + // to verify we do not encode100Continue headers more than once per + // filter. + static constexpr uint32_t Encode100ContinueHeaders = 0x40; + // Used to indicate that we're processing the final [En|De]codeData frame, + // i.e. end_stream = true + static constexpr uint32_t LastDataFrame = 0x80; + }; + // clang-format on + + struct State { + State() + : remote_complete_(false), local_complete_(false), has_continue_headers_(false), + created_filter_chain_(false) {} + + uint32_t filter_call_state_{0}; + + bool remote_complete_ : 1; + bool local_complete_ : 1; // This indicates that local is complete prior to filter processing. + // A filter can still stop the stream from being complete as seen + // by the codec. + // By default, we will assume there are no 100-Continue headers. If encode100ContinueHeaders + // is ever called, this is set to true so commonContinue resumes processing the 100-Continue. + bool has_continue_headers_ : 1; + bool created_filter_chain_ : 1; + + // The following 3 members are booleans rather than part of the space-saving bitfield as they + // are passed as arguments to functions expecting bools. Extend State using the bitfield + // where possible. + bool encoder_filters_streaming_{true}; + bool decoder_filters_streaming_{true}; + bool destroyed_{false}; + // Whether a filter has indicated that the response should be treated as a headers only + // response. + bool encoding_headers_only_{false}; + // Whether a filter has indicated that the request should be treated as a headers only + // request. + bool decoding_headers_only_{false}; + + // Used to track which filter is the latest filter that has received data. + ActiveStreamEncoderFilter* latest_data_encoding_filter_{}; + ActiveStreamDecoderFilter* latest_data_decoding_filter_{}; + }; + + State state_; + }; + + /** + * Wraps a single active stream on the connection. These are either full request/response pairs + * or pushes. + */ + struct ActiveStream : LinkedObject, + public Event::DeferredDeletable, + public StreamCallbacks, + public RequestDecoder, + public Tracing::Config, + public ScopeTrackedObject, + public FilterManagerCallbacks { + ActiveStream(ConnectionManagerImpl& connection_manager, uint32_t buffer_limit); + ~ActiveStream() override; + + void chargeStats(const ResponseHeaderMap& headers); + const Network::Connection* connection(); + void sendLocalReply(bool is_grpc_request, Code code, absl::string_view body, + const std::function& modify_headers, + const absl::optional grpc_status, + absl::string_view details) override; + uint64_t streamId() { return stream_id_; } + + // This is a helper function for encodeHeaders and responseDataTooLarge which allows for + // shared code for the two headers encoding paths. It does header munging, updates timing + // stats, and sends the headers to the encoder. + void encodeHeadersInternal(ResponseHeaderMap& headers, bool end_stream); + // This is a helper function for encodeData and responseDataTooLarge which allows for shared + // code for the two data encoding paths. It does stats updates and tracks potential end of + // stream. + void encodeDataInternal(Buffer::Instance& data, bool end_stream); + // Http::StreamCallbacks void onResetStream(StreamResetReason reason, absl::string_view transport_failure_reason) override; @@ -533,19 +864,6 @@ class ConnectionManagerImpl : Logger::Loggable, void decodeHeaders(RequestHeaderMapPtr&& headers, bool end_stream) override; void decodeTrailers(RequestTrailerMapPtr&& trailers) override; - // Http::FilterChainFactoryCallbacks - void addStreamDecoderFilter(StreamDecoderFilterSharedPtr filter) override { - addStreamDecoderFilterWorker(filter, false); - } - void addStreamEncoderFilter(StreamEncoderFilterSharedPtr filter) override { - addStreamEncoderFilterWorker(filter, false); - } - void addStreamFilter(StreamFilterSharedPtr filter) override { - addStreamDecoderFilterWorker(filter, true); - addStreamEncoderFilterWorker(filter, true); - } - void addAccessLogHandler(AccessLog::InstanceSharedPtr handler) override; - // Tracing::TracingConfig Tracing::OperationName operationName() const override; const Tracing::CustomTagMap* customTags() const override; @@ -556,15 +874,30 @@ class ConnectionManagerImpl : Logger::Loggable, void dumpState(std::ostream& os, int indent_level = 0) const override { const char* spaces = spacesForLevel(indent_level); os << spaces << "ActiveStream " << this << DUMP_MEMBER(stream_id_) - << DUMP_MEMBER(state_.has_continue_headers_) << DUMP_MEMBER(state_.is_head_request_) - << DUMP_MEMBER(state_.decoding_headers_only_) << DUMP_MEMBER(state_.encoding_headers_only_) - << "\n"; + << DUMP_MEMBER(state_.is_head_request_); - DUMP_DETAILS(request_headers_); - DUMP_DETAILS(request_trailers_); - DUMP_DETAILS(response_headers_); - DUMP_DETAILS(response_trailers_); - DUMP_DETAILS(&stream_info_); + DUMP_DETAILS(&filter_manager_); + } + + // FilterManagerCallbacks + void encodeHeaders(ResponseHeaderMap& response_headers, bool end_stream) override; + void encode100ContinueHeaders(ResponseHeaderMap& response_headers) override; + void encodeData(Buffer::Instance& data, bool end_stream) override; + void encodeTrailers(ResponseTrailerMap& trailers) override; + void encodeMetadata(MetadataMapVector& metadata) override; + void endStream() override { + ASSERT(!state_.codec_saw_local_complete_); + state_.codec_saw_local_complete_ = true; + filter_manager_.streamInfo().onLastDownstreamTxByteSent(); + request_response_timespan_->complete(); + connection_manager_.doEndStream(*this); + } + void onDecoderFilterBelowWriteBufferLowWatermark() override; + void onDecoderFilterAboveWriteBufferHighWatermark() override; + void upgradeFilterChainCreated() override { + connection_manager_.stats_.named_.downstream_cx_upgrades_total_.inc(); + connection_manager_.stats_.named_.downstream_cx_upgrades_active_.inc(); + state_.successful_upgrade_ = true; } void traceRequest(); @@ -574,6 +907,7 @@ class ConnectionManagerImpl : Logger::Loggable, void snapScopedRouteConfig(); void refreshCachedRoute(); + void refreshCachedRoute(const Router::RouteCallback& cb); void requestRouteConfigUpdate(Event::Dispatcher& thread_local_dispatcher, Http::RouteConfigUpdatedCallbackSharedPtr route_config_updated_cb); @@ -581,105 +915,46 @@ class ConnectionManagerImpl : Logger::Loggable, void refreshCachedTracingCustomTags(); - // Pass on watermark callbacks to watermark subscribers. This boils down to passing watermark - // events for this stream and the downstream connection to the router filter. - void callHighWatermarkCallbacks(); - void callLowWatermarkCallbacks(); - - /** - * Flags that keep track of which filter calls are currently in progress. - */ - // clang-format off - struct FilterCallState { - static constexpr uint32_t DecodeHeaders = 0x01; - static constexpr uint32_t DecodeData = 0x02; - static constexpr uint32_t DecodeTrailers = 0x04; - static constexpr uint32_t EncodeHeaders = 0x08; - static constexpr uint32_t EncodeData = 0x10; - static constexpr uint32_t EncodeTrailers = 0x20; - // Encode100ContinueHeaders is a bit of a special state as 100 continue - // headers may be sent during request processing. This state is only used - // to verify we do not encode100Continue headers more than once per - // filter. - static constexpr uint32_t Encode100ContinueHeaders = 0x40; - // Used to indicate that we're processing the final [En|De]codeData frame, - // i.e. end_stream = true - static constexpr uint32_t LastDataFrame = 0x80; - }; - // clang-format on - // All state for the stream. Put here for readability. struct State { State() - : remote_complete_(false), local_complete_(false), codec_saw_local_complete_(false), - saw_connection_close_(false), successful_upgrade_(false), created_filter_chain_(false), - is_internally_created_(false), decorated_propagate_(true), has_continue_headers_(false), - is_head_request_(false) {} + : codec_saw_local_complete_(false), saw_connection_close_(false), + successful_upgrade_(false), is_internally_created_(false), decorated_propagate_(true), + is_head_request_(false), non_100_response_headers_encoded_(false) {} - uint32_t filter_call_state_{0}; - // The following 3 members are booleans rather than part of the space-saving bitfield as they - // are passed as arguments to functions expecting bools. Extend State using the bitfield - // where possible. - bool encoder_filters_streaming_{true}; - bool decoder_filters_streaming_{true}; - bool destroyed_{false}; - bool remote_complete_ : 1; - bool local_complete_ : 1; // This indicates that local is complete prior to filter processing. - // A filter can still stop the stream from being complete as seen - // by the codec. bool codec_saw_local_complete_ : 1; // This indicates that local is complete as written all // the way through to the codec. bool saw_connection_close_ : 1; bool successful_upgrade_ : 1; - bool created_filter_chain_ : 1; // True if this stream is internally created. Currently only used for // internal redirects or other streams created via recreateStream(). bool is_internally_created_ : 1; bool decorated_propagate_ : 1; - // By default, we will assume there are no 100-Continue headers. If encode100ContinueHeaders - // is ever called, this is set to true so commonContinue resumes processing the 100-Continue. - bool has_continue_headers_ : 1; bool is_head_request_ : 1; - // Whether a filter has indicated that the request should be treated as a headers only - // request. - bool decoding_headers_only_{false}; - // Whether a filter has indicated that the response should be treated as a headers only - // response. - bool encoding_headers_only_{false}; - - // Used to track which filter is the latest filter that has received data. - ActiveStreamEncoderFilter* latest_data_encoding_filter_{}; - ActiveStreamDecoderFilter* latest_data_decoding_filter_{}; + // Tracks if headers other than 100-Continue have been encoded to the codec. + bool non_100_response_headers_encoded_ : 1; }; - // Possibly increases buffer_limit_ to the value of limit. - void setBufferLimit(uint32_t limit); - // Set up the Encoder/Decoder filter chain. - bool createFilterChain(); // Per-stream idle timeout callback. void onIdleTimeout(); // Reset per-stream idle timer. void resetIdleTimer(); - // Per-stream request timeout callback + // Per-stream request timeout callback. void onRequestTimeout(); // Per-stream alive duration reached. void onStreamMaxDurationReached(); bool hasCachedRoute() { return cached_route_.has_value() && cached_route_.value(); } + // Return local port of the connection. + uint32_t localPort(); + friend std::ostream& operator<<(std::ostream& os, const ActiveStream& s) { s.dumpState(os); return os; } - MetadataMapVector* getRequestMetadataMapVector() { - if (request_metadata_map_vector_ == nullptr) { - request_metadata_map_vector_ = std::make_unique(); - } - return request_metadata_map_vector_.get(); - } - Tracing::CustomTagMap& getOrMakeTracingCustomTagMap() { if (tracing_custom_tags_ == nullptr) { tracing_custom_tags_ = std::make_unique(); @@ -688,21 +963,12 @@ class ConnectionManagerImpl : Logger::Loggable, } ConnectionManagerImpl& connection_manager_; + FilterManager filter_manager_; Router::ConfigConstSharedPtr snapped_route_config_; Router::ScopedConfigConstSharedPtr snapped_scoped_routes_config_; Tracing::SpanPtr active_span_; const uint64_t stream_id_; ResponseEncoder* response_encoder_{}; - ResponseHeaderMapPtr continue_headers_; - ResponseHeaderMapPtr response_headers_; - Buffer::WatermarkBufferPtr buffered_response_data_; - ResponseTrailerMapPtr response_trailers_{}; - RequestHeaderMapPtr request_headers_; - Buffer::WatermarkBufferPtr buffered_request_data_; - RequestTrailerMapPtr request_trailers_; - std::list decoder_filters_; - std::list encoder_filters_; - std::list access_log_handlers_; Stats::TimespanPtr request_response_timespan_; // Per-stream idle timeout. Event::TimerPtr stream_idle_timer_; @@ -712,20 +978,14 @@ class ConnectionManagerImpl : Logger::Loggable, Event::TimerPtr max_stream_duration_timer_; std::chrono::milliseconds idle_timeout_ms_{}; State state_; - StreamInfo::StreamInfoImpl stream_info_; absl::optional cached_route_; absl::optional cached_cluster_info_; - std::list watermark_callbacks_{}; - // Stores metadata added in the decoding filter that is being processed. Will be cleared before - // processing the next filter. The storage is created on demand. We need to store metadata - // temporarily in the filter in case the filter has stopped all while processing headers. - std::unique_ptr request_metadata_map_vector_{nullptr}; - uint32_t buffer_limit_{0}; - uint32_t high_watermark_count_{0}; const std::string* decorated_operation_{nullptr}; Network::Socket::OptionsSharedPtr upstream_options_; std::unique_ptr route_config_update_requester_; std::unique_ptr tracing_custom_tags_{nullptr}; + + friend FilterManager; }; using ActiveStreamPtr = std::unique_ptr; @@ -753,7 +1013,7 @@ class ConnectionManagerImpl : Logger::Loggable, void onDrainTimeout(); void startDrainSequence(); Tracing::HttpTracer& tracer() { return *config_.tracer(); } - void handleCodecException(const char* error); + void handleCodecError(absl::string_view error); void doConnectionClose(absl::optional close_type, absl::optional response_flag); @@ -775,7 +1035,7 @@ class ConnectionManagerImpl : Logger::Loggable, // A connection duration timer. Armed during handling new connection if enabled in config. Event::TimerPtr connection_duration_timer_; Event::TimerPtr drain_timer_; - Runtime::RandomGenerator& random_generator_; + Random::RandomGenerator& random_generator_; Http::Context& http_context_; Runtime::Loader& runtime_; const LocalInfo::LocalInfo& local_info_; @@ -787,7 +1047,7 @@ class ConnectionManagerImpl : Logger::Loggable, const Server::OverloadActionState& overload_stop_accepting_requests_ref_; const Server::OverloadActionState& overload_disable_keepalive_ref_; TimeSource& time_source_; - std::shared_ptr filter_state_; + bool remote_close_{}; }; } // namespace Http diff --git a/source/common/http/conn_manager_utility.cc b/source/common/http/conn_manager_utility.cc index b4a97bfa8b053..c8ce01993cfae 100644 --- a/source/common/http/conn_manager_utility.cc +++ b/source/common/http/conn_manager_utility.cc @@ -6,12 +6,14 @@ #include "envoy/type/v3/percent.pb.h" -#include "common/access_log/access_log_formatter.h" #include "common/common/empty_string.h" #include "common/common/utility.h" +#include "common/http/header_utility.h" #include "common/http/headers.h" #include "common/http/http1/codec_impl.h" +#include "common/http/http1/codec_impl_legacy.h" #include "common/http/http2/codec_impl.h" +#include "common/http/http2/codec_impl_legacy.h" #include "common/http/path_utility.h" #include "common/http/utility.h" #include "common/network/utility.h" @@ -34,7 +36,7 @@ std::string ConnectionManagerUtility::determineNextProtocol(Network::Connection& // us the first few bytes of the HTTP/2 prefix since in all public cases we use SSL/ALPN. For // internal cases this should practically never happen. if (data.startsWith(Http2::CLIENT_MAGIC_PREFIX)) { - return Http2::ALPN_STRING; + return Utility::AlpnNames::get().Http2; } return ""; @@ -42,19 +44,35 @@ std::string ConnectionManagerUtility::determineNextProtocol(Network::Connection& ServerConnectionPtr ConnectionManagerUtility::autoCreateCodec( Network::Connection& connection, const Buffer::Instance& data, - ServerConnectionCallbacks& callbacks, Stats::Scope& scope, const Http1Settings& http1_settings, + ServerConnectionCallbacks& callbacks, Stats::Scope& scope, + Http1::CodecStats::AtomicPtr& http1_codec_stats, + Http2::CodecStats::AtomicPtr& http2_codec_stats, const Http1Settings& http1_settings, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, uint32_t max_request_headers_kb, uint32_t max_request_headers_count, envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action) { - if (determineNextProtocol(connection, data) == Http2::ALPN_STRING) { - return std::make_unique( - connection, callbacks, scope, http2_options, max_request_headers_kb, - max_request_headers_count, headers_with_underscores_action); + if (determineNextProtocol(connection, data) == Utility::AlpnNames::get().Http2) { + Http2::CodecStats& stats = Http2::CodecStats::atomicGet(http2_codec_stats, scope); + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { + return std::make_unique( + connection, callbacks, stats, http2_options, max_request_headers_kb, + max_request_headers_count, headers_with_underscores_action); + } else { + return std::make_unique( + connection, callbacks, stats, http2_options, max_request_headers_kb, + max_request_headers_count, headers_with_underscores_action); + } } else { - return std::make_unique( - connection, scope, callbacks, http1_settings, max_request_headers_kb, - max_request_headers_count, headers_with_underscores_action); + Http1::CodecStats& stats = Http1::CodecStats::atomicGet(http1_codec_stats, scope); + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { + return std::make_unique( + connection, stats, callbacks, http1_settings, max_request_headers_kb, + max_request_headers_count, headers_with_underscores_action); + } else { + return std::make_unique( + connection, stats, callbacks, http1_settings, max_request_headers_kb, + max_request_headers_count, headers_with_underscores_action); + } } } @@ -363,9 +381,10 @@ void ConnectionManagerUtility::mutateXfccRequestHeader(RequestHeaderMap& request } } -void ConnectionManagerUtility::mutateResponseHeaders( - ResponseHeaderMap& response_headers, const RequestHeaderMap* request_headers, - const RequestIDExtensionSharedPtr& rid_extension, const std::string& via) { +void ConnectionManagerUtility::mutateResponseHeaders(ResponseHeaderMap& response_headers, + const RequestHeaderMap* request_headers, + ConnectionManagerConfig& config, + const std::string& via) { if (request_headers != nullptr && Utility::isUpgrade(*request_headers) && Utility::isUpgrade(response_headers)) { // As in mutateRequestHeaders, Upgrade responses have special handling. @@ -391,8 +410,9 @@ void ConnectionManagerUtility::mutateResponseHeaders( response_headers.removeTransferEncoding(); - if (request_headers != nullptr && request_headers->EnvoyForceTrace()) { - rid_extension->setInResponse(response_headers, *request_headers); + if (request_headers != nullptr && + (config.alwaysSetRequestIdInResponse() || request_headers->EnvoyForceTrace())) { + config.requestIDExtension()->setInResponse(response_headers, *request_headers); } response_headers.removeKeepAlive(); response_headers.removeProxyConnection(); @@ -404,7 +424,9 @@ void ConnectionManagerUtility::mutateResponseHeaders( bool ConnectionManagerUtility::maybeNormalizePath(RequestHeaderMap& request_headers, const ConnectionManagerConfig& config) { - ASSERT(request_headers.Path()); + if (!request_headers.Path()) { + return true; // It's as valid as it is going to get. + } bool is_valid_path = true; if (config.shouldNormalizePath()) { is_valid_path = PathUtil::canonicalPath(request_headers); @@ -416,5 +438,13 @@ bool ConnectionManagerUtility::maybeNormalizePath(RequestHeaderMap& request_head return is_valid_path; } +void ConnectionManagerUtility::maybeNormalizeHost(RequestHeaderMap& request_headers, + const ConnectionManagerConfig& config, + uint32_t port) { + if (config.shouldStripMatchingPort()) { + HeaderUtility::stripPortFromHost(request_headers, port); + } +} + } // namespace Http } // namespace Envoy diff --git a/source/common/http/conn_manager_utility.h b/source/common/http/conn_manager_utility.h index 20381116162f1..b46a98c2f0a75 100644 --- a/source/common/http/conn_manager_utility.h +++ b/source/common/http/conn_manager_utility.h @@ -7,6 +7,8 @@ #include "envoy/network/connection.h" #include "common/http/conn_manager_impl.h" +#include "common/http/http1/codec_stats.h" +#include "common/http/http2/codec_stats.h" namespace Envoy { namespace Http { @@ -36,6 +38,8 @@ class ConnectionManagerUtility { static ServerConnectionPtr autoCreateCodec(Network::Connection& connection, const Buffer::Instance& data, ServerConnectionCallbacks& callbacks, Stats::Scope& scope, + Http1::CodecStats::AtomicPtr& http1_codec_stats, + Http2::CodecStats::AtomicPtr& http2_codec_stats, const Http1Settings& http1_settings, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, uint32_t max_request_headers_kb, uint32_t max_request_headers_count, @@ -60,15 +64,18 @@ class ConnectionManagerUtility { static void mutateResponseHeaders(ResponseHeaderMap& response_headers, const RequestHeaderMap* request_headers, - const RequestIDExtensionSharedPtr& rid_extension, - const std::string& via); + ConnectionManagerConfig& config, const std::string& via); - // Sanitize the path in the header map if forced by config. + // Sanitize the path in the header map if the path exists and it is forced by config. // Side affect: the string view of Path header is invalidated. // Return false if error happens during the sanitization. + // Returns true if there is no path. static bool maybeNormalizePath(RequestHeaderMap& request_headers, const ConnectionManagerConfig& config); + static void maybeNormalizeHost(RequestHeaderMap& request_headers, + const ConnectionManagerConfig& config, uint32_t port); + /** * Mutate request headers if request needs to be traced. */ diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index b59c45a5beef1..6a3b2362a1edd 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -1,443 +1,89 @@ #include "common/http/conn_pool_base.h" +#include "common/common/assert.h" +#include "common/http/utility.h" +#include "common/network/transport_socket_options_impl.h" +#include "common/runtime/runtime_features.h" #include "common/stats/timespan_impl.h" #include "common/upstream/upstream_impl.h" namespace Envoy { namespace Http { -ConnPoolImplBase::ConnPoolImplBase( - Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, - Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options) - : host_(host), priority_(priority), dispatcher_(dispatcher), socket_options_(options), - transport_socket_options_(transport_socket_options) {} - -ConnPoolImplBase::~ConnPoolImplBase() { - ASSERT(ready_clients_.empty()); - ASSERT(busy_clients_.empty()); -} - -void ConnPoolImplBase::destructAllConnections() { - for (auto* list : {&ready_clients_, &busy_clients_}) { - while (!list->empty()) { - list->front()->close(); - } - } - - // Make sure all clients are destroyed before we are destroyed. - dispatcher_.clearDeferredDeleteList(); -} - -void ConnPoolImplBase::tryCreateNewConnection() { - if (pending_requests_.size() <= connecting_request_capacity_) { - // There are already enough CONNECTING connections for the number - // of queued requests. - return; - } - const bool can_create_connection = - host_->cluster().resourceManager(priority_).connections().canCreate(); - if (!can_create_connection) { - host_->cluster().stats().upstream_cx_overflow_.inc(); - } - // If we are at the connection circuit-breaker limit due to other upstreams having - // too many open connections, and this upstream has no connections, always create one, to - // prevent pending requests being queued to this upstream with no way to be processed. - if (can_create_connection || (ready_clients_.empty() && busy_clients_.empty())) { - ENVOY_LOG(debug, "creating a new connection"); - ActiveClientPtr client = instantiateActiveClient(); - ASSERT(client->state_ == ActiveClient::State::CONNECTING); - ASSERT(std::numeric_limits::max() - connecting_request_capacity_ >= - client->effectiveConcurrentRequestLimit()); - connecting_request_capacity_ += client->effectiveConcurrentRequestLimit(); - client->moveIntoList(std::move(client), owningList(client->state_)); +Network::TransportSocketOptionsSharedPtr +wrapTransportSocketOptions(Network::TransportSocketOptionsSharedPtr transport_socket_options, + Protocol protocol) { + if (!Runtime::runtimeFeatureEnabled("envoy.reloadable_features.http_default_alpn")) { + return transport_socket_options; } -} - -void ConnPoolImplBase::attachRequestToClient(ActiveClient& client, - ResponseDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) { - ASSERT(client.state_ == ActiveClient::State::READY); - - if (!host_->cluster().resourceManager(priority_).requests().canCreate()) { - ENVOY_LOG(debug, "max requests overflow"); - callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), - nullptr); - host_->cluster().stats().upstream_rq_pending_overflow_.inc(); - } else { - ENVOY_CONN_LOG(debug, "creating stream", *client.codec_client_); - RequestEncoder& new_encoder = client.newStreamEncoder(response_decoder); - - client.remaining_requests_--; - if (client.remaining_requests_ == 0) { - ENVOY_CONN_LOG(debug, "maximum requests per connection, DRAINING", *client.codec_client_); - host_->cluster().stats().upstream_cx_max_requests_.inc(); - transitionActiveClientState(client, ActiveClient::State::DRAINING); - } else if (client.codec_client_->numActiveRequests() >= client.concurrent_request_limit_) { - transitionActiveClientState(client, ActiveClient::State::BUSY); - } - - num_active_requests_++; - host_->stats().rq_total_.inc(); - host_->stats().rq_active_.inc(); - host_->cluster().stats().upstream_rq_total_.inc(); - host_->cluster().stats().upstream_rq_active_.inc(); - host_->cluster().resourceManager(priority_).requests().inc(); - callbacks.onPoolReady(new_encoder, client.real_host_description_, - client.codec_client_->streamInfo()); - } -} - -void ConnPoolImplBase::onRequestClosed(ActiveClient& client, bool delay_attaching_request) { - ENVOY_CONN_LOG(debug, "destroying stream: {} remaining", *client.codec_client_, - client.codec_client_->numActiveRequests()); - ASSERT(num_active_requests_ > 0); - num_active_requests_--; - host_->stats().rq_active_.dec(); - host_->cluster().stats().upstream_rq_active_.dec(); - host_->cluster().resourceManager(priority_).requests().dec(); - if (client.state_ == ActiveClient::State::DRAINING && - client.codec_client_->numActiveRequests() == 0) { - // Close out the draining client if we no longer have active requests. - client.codec_client_->close(); - } else if (client.state_ == ActiveClient::State::BUSY) { - // A request was just ended, so we should be below the limit now. - ASSERT(client.codec_client_->numActiveRequests() < client.concurrent_request_limit_); - - transitionActiveClientState(client, ActiveClient::State::READY); - if (!delay_attaching_request) { - onUpstreamReady(); - } - } -} - -ConnectionPool::Cancellable* ConnPoolImplBase::newStream(ResponseDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) { - if (!ready_clients_.empty()) { - ActiveClient& client = *ready_clients_.front(); - ENVOY_CONN_LOG(debug, "using existing connection", *client.codec_client_); - attachRequestToClient(client, response_decoder, callbacks); - return nullptr; - } - - if (host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) { - ConnectionPool::Cancellable* pending = newPendingRequest(response_decoder, callbacks); - - // This must come after newPendingRequest() because this function uses the - // length of pending_requests_ to determine if a new connection is needed. - tryCreateNewConnection(); - return pending; - } else { - ENVOY_LOG(debug, "max pending requests overflow"); - callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), - nullptr); - host_->cluster().stats().upstream_rq_pending_overflow_.inc(); - return nullptr; - } -} - -void ConnPoolImplBase::onUpstreamReady() { - while (!pending_requests_.empty() && !ready_clients_.empty()) { - ActiveClientPtr& client = ready_clients_.front(); - ENVOY_CONN_LOG(debug, "attaching to next request", *client->codec_client_); - // Pending requests are pushed onto the front, so pull from the back. - attachRequestToClient(*client, pending_requests_.back()->decoder_, - pending_requests_.back()->callbacks_); - pending_requests_.pop_back(); - } -} - -bool ConnPoolImplBase::hasActiveConnections() const { - return (!pending_requests_.empty() || (num_active_requests_ > 0)); -} - -std::list& -ConnPoolImplBase::owningList(ActiveClient::State state) { - switch (state) { - case ActiveClient::State::CONNECTING: - return busy_clients_; - case ActiveClient::State::READY: - return ready_clients_; - case ActiveClient::State::BUSY: - return busy_clients_; - case ActiveClient::State::DRAINING: - return busy_clients_; - case ActiveClient::State::CLOSED: + // If configured to do so, we override the ALPN to use for the upstream connection to match the + // selected protocol. + std::string alpn; + switch (protocol) { + case Http::Protocol::Http10: NOT_REACHED_GCOVR_EXCL_LINE; - } - NOT_REACHED_GCOVR_EXCL_LINE; -} - -void ConnPoolImplBase::transitionActiveClientState(ActiveClient& client, - ActiveClient::State new_state) { - auto& old_list = owningList(client.state_); - auto& new_list = owningList(new_state); - client.state_ = new_state; - - // old_list and new_list can be equal when transitioning from BUSY to DRAINING. - // - // The documentation for list.splice() (which is what moveBetweenLists() calls) is - // unclear whether it is allowed for src and dst to be the same, so check here - // since it is a no-op anyways. - if (&old_list != &new_list) { - client.moveBetweenLists(old_list, new_list); - } -} - -void ConnPoolImplBase::addDrainedCallback(DrainedCb cb) { - drained_callbacks_.push_back(cb); - checkForDrained(); -} - -void ConnPoolImplBase::closeIdleConnections() { - // Create a separate list of elements to close to avoid mutate-while-iterating problems. - std::list to_close; - - for (auto& client : ready_clients_) { - if (!client->hasActiveRequests()) { - to_close.push_back(client.get()); - } - } - - if (pending_requests_.empty()) { - for (auto& client : busy_clients_) { - if (client->state_ == ActiveClient::State::CONNECTING) { - to_close.push_back(client.get()); - } - } - } - - for (auto& entry : to_close) { - entry->close(); - } -} - -void ConnPoolImplBase::drainConnections() { - closeIdleConnections(); - - // closeIdleConnections() closes all connections in ready_clients_ with no active requests, - // so all remaining entries in ready_clients_ are serving requests. Move them and all entries - // in busy_clients_ to draining. - while (!ready_clients_.empty()) { - transitionActiveClientState(*ready_clients_.front(), ActiveClient::State::DRAINING); - } - - // Changing busy_clients_ to DRAINING does not move them between lists, - // so use a for-loop since the list is not mutated. - ASSERT(&owningList(ActiveClient::State::DRAINING) == &busy_clients_); - for (auto& busy_client : busy_clients_) { - // Moving a CONNECTING client to DRAINING would violate state assumptions, namely that DRAINING - // connections have active requests (otherwise they would be closed) and that clients receiving - // a Connected event are in state CONNECTING. - if (busy_client->state_ != ActiveClient::State::CONNECTING) { - transitionActiveClientState(*busy_client, ActiveClient::State::DRAINING); - } - } -} - -void ConnPoolImplBase::checkForDrained() { - if (drained_callbacks_.empty()) { - return; - } - - closeIdleConnections(); - - if (pending_requests_.empty() && ready_clients_.empty() && busy_clients_.empty()) { - ENVOY_LOG(debug, "invoking drained callbacks"); - for (const DrainedCb& cb : drained_callbacks_) { - cb(); - } - } -} - -void ConnPoolImplBase::onConnectionEvent(ConnPoolImplBase::ActiveClient& client, - Network::ConnectionEvent event) { - if (client.state_ == ActiveClient::State::CONNECTING) { - ASSERT(connecting_request_capacity_ >= client.effectiveConcurrentRequestLimit()); - connecting_request_capacity_ -= client.effectiveConcurrentRequestLimit(); - } - - if (event == Network::ConnectionEvent::RemoteClose || - event == Network::ConnectionEvent::LocalClose) { - // The client died. - ENVOY_CONN_LOG(debug, "client disconnected, failure reason: {}", *client.codec_client_, - client.codec_client_->connectionFailureReason()); - - Envoy::Upstream::reportUpstreamCxDestroy(host_, event); - const bool incomplete_request = client.closingWithIncompleteRequest(); - if (incomplete_request) { - Envoy::Upstream::reportUpstreamCxDestroyActiveRequest(host_, event); - } - - if (client.state_ == ActiveClient::State::CONNECTING) { - host_->cluster().stats().upstream_cx_connect_fail_.inc(); - host_->stats().cx_connect_fail_.inc(); - - ConnectionPool::PoolFailureReason reason; - if (client.timed_out_) { - reason = ConnectionPool::PoolFailureReason::Timeout; - } else if (event == Network::ConnectionEvent::RemoteClose) { - reason = ConnectionPool::PoolFailureReason::RemoteConnectionFailure; - } else { - reason = ConnectionPool::PoolFailureReason::LocalConnectionFailure; - } - - // Raw connect failures should never happen under normal circumstances. If we have an upstream - // that is behaving badly, requests can get stuck here in the pending state. If we see a - // connect failure, we purge all pending requests so that calling code can determine what to - // do with the request. - // NOTE: We move the existing pending requests to a temporary list. This is done so that - // if retry logic submits a new request to the pool, we don't fail it inline. - purgePendingRequests(client.real_host_description_, - client.codec_client_->connectionFailureReason(), reason); - } - - // We need to release our resourceManager() resources before checking below for - // whether we can create a new connection. Normally this would happen when - // client's destructor runs, but this object needs to be deferredDelete'd(), so - // this forces part of its cleanup to happen now. - client.releaseResources(); - - dispatcher_.deferredDelete(client.removeFromList(owningList(client.state_))); - if (incomplete_request) { - checkForDrained(); - } - - client.state_ = ActiveClient::State::CLOSED; - - // If we have pending requests and we just lost a connection we should make a new one. - if (!pending_requests_.empty()) { - tryCreateNewConnection(); - } - } else if (event == Network::ConnectionEvent::Connected) { - client.conn_connect_ms_->complete(); - client.conn_connect_ms_.reset(); - - ASSERT(client.state_ == ActiveClient::State::CONNECTING); - transitionActiveClientState(client, ActiveClient::State::READY); - - onUpstreamReady(); - checkForDrained(); - } - - if (client.connect_timer_) { - client.connect_timer_->disableTimer(); - client.connect_timer_.reset(); - } -} - -ConnPoolImplBase::PendingRequest::PendingRequest(ConnPoolImplBase& parent, ResponseDecoder& decoder, - ConnectionPool::Callbacks& callbacks) - : parent_(parent), decoder_(decoder), callbacks_(callbacks) { - parent_.host_->cluster().stats().upstream_rq_pending_total_.inc(); - parent_.host_->cluster().stats().upstream_rq_pending_active_.inc(); - parent_.host_->cluster().resourceManager(parent_.priority_).pendingRequests().inc(); -} - -ConnPoolImplBase::PendingRequest::~PendingRequest() { - parent_.host_->cluster().stats().upstream_rq_pending_active_.dec(); - parent_.host_->cluster().resourceManager(parent_.priority_).pendingRequests().dec(); -} - -ConnectionPool::Cancellable* -ConnPoolImplBase::newPendingRequest(ResponseDecoder& decoder, - ConnectionPool::Callbacks& callbacks) { - ENVOY_LOG(debug, "queueing request due to no available connections"); - PendingRequestPtr pending_request(new PendingRequest(*this, decoder, callbacks)); - pending_request->moveIntoList(std::move(pending_request), pending_requests_); - return pending_requests_.front().get(); -} - -void ConnPoolImplBase::purgePendingRequests( - const Upstream::HostDescriptionConstSharedPtr& host_description, - absl::string_view failure_reason, ConnectionPool::PoolFailureReason reason) { - // NOTE: We move the existing pending requests to a temporary list. This is done so that - // if retry logic submits a new request to the pool, we don't fail it inline. - pending_requests_to_purge_ = std::move(pending_requests_); - while (!pending_requests_to_purge_.empty()) { - PendingRequestPtr request = - pending_requests_to_purge_.front()->removeFromList(pending_requests_to_purge_); - host_->cluster().stats().upstream_rq_pending_failure_eject_.inc(); - request->callbacks_.onPoolFailure(reason, failure_reason, host_description); - } -} - -void ConnPoolImplBase::onPendingRequestCancel(PendingRequest& request) { - ENVOY_LOG(debug, "cancelling pending request"); - if (!pending_requests_to_purge_.empty()) { - // If pending_requests_to_purge_ is not empty, it means that we are called from - // with-in a onPoolFailure callback invoked in purgePendingRequests (i.e. purgePendingRequests - // is down in the call stack). Remove this request from the list as it is cancelled, - // and there is no need to call its onPoolFailure callback. - request.removeFromList(pending_requests_to_purge_); + case Http::Protocol::Http11: + alpn = Http::Utility::AlpnNames::get().Http11; + break; + case Http::Protocol::Http2: + alpn = Http::Utility::AlpnNames::get().Http2; + break; + case Http::Protocol::Http3: + // TODO(snowp): Add once HTTP/3 upstream support is added. + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + break; + } + + if (transport_socket_options) { + return std::make_shared( + std::move(alpn), transport_socket_options); } else { - request.removeFromList(pending_requests_); + return std::make_shared( + "", std::vector{}, std::vector{}, std::move(alpn)); } - - host_->cluster().stats().upstream_rq_cancelled_.inc(); - checkForDrained(); -} - -namespace { -// Translate zero to UINT64_MAX so that the zero/unlimited case doesn't -// have to be handled specially. -uint64_t translateZeroToUnlimited(uint64_t limit) { - return (limit != 0) ? limit : std::numeric_limits::max(); } -} // namespace - -ConnPoolImplBase::ActiveClient::ActiveClient(ConnPoolImplBase& parent, - uint64_t lifetime_request_limit, - uint64_t concurrent_request_limit) - : parent_(parent), remaining_requests_(translateZeroToUnlimited(lifetime_request_limit)), - concurrent_request_limit_(translateZeroToUnlimited(concurrent_request_limit)), - connect_timer_(parent_.dispatcher_.createTimer([this]() -> void { onConnectTimeout(); })) { - Upstream::Host::CreateConnectionData data = parent_.host_->createConnection( - parent_.dispatcher_, parent_.socket_options_, parent_.transport_socket_options_); - real_host_description_ = data.host_description_; - codec_client_ = parent_.createCodecClient(data); - codec_client_->addConnectionCallbacks(*this); - conn_connect_ms_ = std::make_unique( - parent_.host_->cluster().stats().upstream_cx_connect_ms_, parent_.dispatcher_.timeSource()); - conn_length_ = std::make_unique( - parent_.host_->cluster().stats().upstream_cx_length_ms_, parent_.dispatcher_.timeSource()); - connect_timer_->enableTimer(parent_.host_->cluster().connectTimeout()); - - parent_.host_->stats().cx_total_.inc(); - parent_.host_->stats().cx_active_.inc(); - parent_.host_->cluster().stats().upstream_cx_total_.inc(); - parent_.host_->cluster().stats().upstream_cx_active_.inc(); - parent_.host_->cluster().resourceManager(parent_.priority_).connections().inc(); +HttpConnPoolImplBase::HttpConnPoolImplBase( + Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, + Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options, + Http::Protocol protocol) + : Envoy::ConnectionPool::ConnPoolImplBase( + host, priority, dispatcher, options, + wrapTransportSocketOptions(transport_socket_options, protocol)) {} - codec_client_->setConnectionStats( - {parent_.host_->cluster().stats().upstream_cx_rx_bytes_total_, - parent_.host_->cluster().stats().upstream_cx_rx_bytes_buffered_, - parent_.host_->cluster().stats().upstream_cx_tx_bytes_total_, - parent_.host_->cluster().stats().upstream_cx_tx_bytes_buffered_, - &parent_.host_->cluster().stats().bind_errors_, nullptr}); +ConnectionPool::Cancellable* +HttpConnPoolImplBase::newStream(Http::ResponseDecoder& response_decoder, + Http::ConnectionPool::Callbacks& callbacks) { + HttpAttachContext context({&response_decoder, &callbacks}); + return Envoy::ConnectionPool::ConnPoolImplBase::newStream(context); } -ConnPoolImplBase::ActiveClient::~ActiveClient() { releaseResources(); } - -void ConnPoolImplBase::ActiveClient::releaseResources() { - if (!resources_released_) { - resources_released_ = true; - - conn_length_->complete(); - - parent_.host_->cluster().stats().upstream_cx_active_.dec(); - parent_.host_->stats().cx_active_.dec(); - parent_.host_->cluster().resourceManager(parent_.priority_).connections().dec(); - } +bool HttpConnPoolImplBase::hasActiveConnections() const { + return (!pending_streams_.empty() || (num_active_streams_ > 0)); } -void ConnPoolImplBase::ActiveClient::onConnectTimeout() { - ENVOY_CONN_LOG(debug, "connect timeout", *codec_client_); - parent_.host_->cluster().stats().upstream_cx_connect_timeout_.inc(); - timed_out_ = true; - close(); +ConnectionPool::Cancellable* +HttpConnPoolImplBase::newPendingRequest(Envoy::ConnectionPool::AttachContext& context) { + Http::ResponseDecoder& decoder = *typedContext(context).decoder_; + Http::ConnectionPool::Callbacks& callbacks = *typedContext(context).callbacks_; + ENVOY_LOG(debug, "queueing stream due to no available connections"); + Envoy::ConnectionPool::PendingRequestPtr pending_stream( + new HttpPendingRequest(*this, decoder, callbacks)); + LinkedList::moveIntoList(std::move(pending_stream), pending_streams_); + return pending_streams_.front().get(); +} + +void HttpConnPoolImplBase::onPoolReady(Envoy::ConnectionPool::ActiveClient& client, + Envoy::ConnectionPool::AttachContext& context) { + ActiveClient* http_client = static_cast(&client); + auto& http_context = typedContext(context); + Http::ResponseDecoder& response_decoder = *http_context.decoder_; + Http::ConnectionPool::Callbacks& callbacks = *http_context.callbacks_; + Http::RequestEncoder& new_encoder = http_client->newStreamEncoder(response_decoder); + callbacks.onPoolReady(new_encoder, client.real_host_description_, + http_client->codec_client_->streamInfo()); } } // namespace Http diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index e702f2a057da8..45f3951570e9c 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -6,6 +6,7 @@ #include "envoy/stats/timespan.h" #include "common/common/linked_object.h" +#include "common/conn_pool/conn_pool_base.h" #include "common/http/codec_client.h" #include "absl/strings/string_view.h" @@ -13,172 +14,90 @@ namespace Envoy { namespace Http { -// Base class that handles request queueing logic shared between connection pool implementations. -class ConnPoolImplBase : public ConnectionPool::Instance, - protected Logger::Loggable { -public: - // ConnectionPool::Instance - ConnectionPool::Cancellable* newStream(ResponseDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) override; - void addDrainedCallback(DrainedCb cb) override; - bool hasActiveConnections() const override; - void drainConnections() override; - Upstream::HostDescriptionConstSharedPtr host() const override { return host_; }; - -protected: - ConnPoolImplBase(Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, - Event::Dispatcher& dispatcher, - const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options); - ~ConnPoolImplBase() override; - - // Closes and destroys all connections. This must be called in the destructor of - // derived classes because the derived ActiveClient will downcast parent_ to a more - // specific type of ConnPoolImplBase, but if the more specific part is already destructed - // (due to bottom-up destructor ordering in c++) that access will be invalid. - void destructAllConnections(); - - // ActiveClient provides a base class for connection pool clients that handles connection timings - // as well as managing the connection timeout. - class ActiveClient : public LinkedObject, - public Network::ConnectionCallbacks, - public Event::DeferredDeletable { - public: - ActiveClient(ConnPoolImplBase& parent, uint64_t lifetime_request_limit, - uint64_t concurrent_request_limit); - ~ActiveClient() override; - - void releaseResources(); - - // Network::ConnectionCallbacks - void onEvent(Network::ConnectionEvent event) override { - parent_.onConnectionEvent(*this, event); - } - void onAboveWriteBufferHighWatermark() override {} - void onBelowWriteBufferLowWatermark() override {} - - void onConnectTimeout(); - void close() { codec_client_->close(); } - - // Returns the concurrent request limit, accounting for if the total request limit - // is less than the concurrent request limit. - uint64_t effectiveConcurrentRequestLimit() const { - return std::min(remaining_requests_, concurrent_request_limit_); - } - - virtual bool hasActiveRequests() const PURE; - virtual bool closingWithIncompleteRequest() const PURE; - virtual RequestEncoder& newStreamEncoder(ResponseDecoder& response_decoder) PURE; - - enum class State { - CONNECTING, // Connection is not yet established. - READY, // Additional requests may be immediately dispatched to this connection. - BUSY, // Connection is at its concurrent request limit. - DRAINING, // No more requests can be dispatched to this connection, and it will be closed - // when all requests complete. - CLOSED // Connection is closed and object is queued for destruction. - }; - - ConnPoolImplBase& parent_; - uint64_t remaining_requests_; - const uint64_t concurrent_request_limit_; - State state_{State::CONNECTING}; - CodecClientPtr codec_client_; - Upstream::HostDescriptionConstSharedPtr real_host_description_; - Stats::TimespanPtr conn_connect_ms_; - Stats::TimespanPtr conn_length_; - Event::TimerPtr connect_timer_; - bool resources_released_{false}; - bool timed_out_{false}; - }; - - using ActiveClientPtr = std::unique_ptr; - - struct PendingRequest : LinkedObject, public ConnectionPool::Cancellable { - PendingRequest(ConnPoolImplBase& parent, ResponseDecoder& decoder, - ConnectionPool::Callbacks& callbacks); - ~PendingRequest() override; - - // ConnectionPool::Cancellable - void cancel() override { parent_.onPendingRequestCancel(*this); } - - ConnPoolImplBase& parent_; - ResponseDecoder& decoder_; - ConnectionPool::Callbacks& callbacks_; - }; - - using PendingRequestPtr = std::unique_ptr; +struct HttpAttachContext : public Envoy::ConnectionPool::AttachContext { + HttpAttachContext(Http::ResponseDecoder* d, Http::ConnectionPool::Callbacks* c) + : decoder_(d), callbacks_(c) {} + Http::ResponseDecoder* decoder_; + Http::ConnectionPool::Callbacks* callbacks_; +}; - // Create a new CodecClient. - virtual CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) PURE; +// An implementation of Envoy::ConnectionPool::PendingRequest for HTTP/1.1 and HTTP/2 +class HttpPendingRequest : public Envoy::ConnectionPool::PendingRequest { +public: + // OnPoolSuccess for HTTP requires both the decoder and callbacks. OnPoolFailure + // requires only the callbacks, but passes both for consistency. + HttpPendingRequest(Envoy::ConnectionPool::ConnPoolImplBase& parent, + Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks) + : Envoy::ConnectionPool::PendingRequest(parent), context_(&decoder, &callbacks) {} + + Envoy::ConnectionPool::AttachContext& context() override { return context_; } + HttpAttachContext context_; +}; - // Returns a new instance of ActiveClient. - virtual ActiveClientPtr instantiateActiveClient() PURE; +// An implementation of Envoy::ConnectionPool::ConnPoolImplBase for shared code +// between HTTP/1.1 and HTTP/2 +class HttpConnPoolImplBase : public Envoy::ConnectionPool::ConnPoolImplBase, + public Http::ConnectionPool::Instance { +public: + HttpConnPoolImplBase(Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, + Event::Dispatcher& dispatcher, + const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options, + Http::Protocol protocol); - // Gets a pointer to the list that currently owns this client. - std::list& owningList(ActiveClient::State state); + // ConnectionPool::Instance + void addDrainedCallback(DrainedCb cb) override { addDrainedCallbackImpl(cb); } + void drainConnections() override { drainConnectionsImpl(); } + Upstream::HostDescriptionConstSharedPtr host() const override { return host_; } + ConnectionPool::Cancellable* newStream(Http::ResponseDecoder& response_decoder, + Http::ConnectionPool::Callbacks& callbacks) override; + bool hasActiveConnections() const override; // Creates a new PendingRequest and enqueues it into the request queue. - ConnectionPool::Cancellable* newPendingRequest(ResponseDecoder& decoder, - ConnectionPool::Callbacks& callbacks); - // Removes the PendingRequest from the list of requests. Called when the PendingRequest is - // cancelled, e.g. when the stream is reset before a connection has been established. - void onPendingRequestCancel(PendingRequest& request); - - // Fails all pending requests, calling onPoolFailure on the associated callbacks. - void purgePendingRequests(const Upstream::HostDescriptionConstSharedPtr& host_description, - absl::string_view failure_reason, - ConnectionPool::PoolFailureReason pool_failure_reason); + ConnectionPool::Cancellable* + newPendingRequest(Envoy::ConnectionPool::AttachContext& context) override; + void onPoolFailure(const Upstream::HostDescriptionConstSharedPtr& host_description, + absl::string_view failure_reason, ConnectionPool::PoolFailureReason reason, + Envoy::ConnectionPool::AttachContext& context) override { + auto* callbacks = typedContext(context).callbacks_; + callbacks->onPoolFailure(reason, failure_reason, host_description); + } + void onPoolReady(Envoy::ConnectionPool::ActiveClient& client, + Envoy::ConnectionPool::AttachContext& context) override; - // Closes any idle connections. - void closeIdleConnections(); - - // Called by derived classes any time a request is completed or destroyed for any reason. - void onRequestClosed(ActiveClient& client, bool delay_attaching_request); - - // Changes the state_ of an ActiveClient and moves to the appropriate list. - void transitionActiveClientState(ActiveClient& client, ActiveClient::State new_state); - - void onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event); - void checkForDrained(); - void onUpstreamReady(); - void attachRequestToClient(ActiveClient& client, ResponseDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks); - - // Creates a new connection if allowed by resourceManager, or if created to avoid - // starving this pool. - void tryCreateNewConnection(); + virtual CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) PURE; +}; +// An implementation of Envoy::ConnectionPool::ActiveClient for HTTP/1.1 and HTTP/2 +class ActiveClient : public Envoy::ConnectionPool::ActiveClient { public: - const Upstream::HostConstSharedPtr host_; - const Upstream::ResourcePriority priority_; - -protected: - Event::Dispatcher& dispatcher_; - const Network::ConnectionSocket::OptionsSharedPtr socket_options_; - const Network::TransportSocketOptionsSharedPtr transport_socket_options_; - - std::list drained_callbacks_; - std::list pending_requests_; - - // When calling purgePendingRequests, this list will be used to hold the requests we are about - // to purge. We need this if one cancelled requests cancels a different pending request - std::list pending_requests_to_purge_; - - // Clients that are ready to handle additional requests. - // All entries are in state READY. - std::list ready_clients_; - - // Clients that are not ready to handle additional requests. - // Entries are in possible states CONNECTING, BUSY, or DRAINING. - std::list busy_clients_; - - // The number of requests currently attached to clients. - uint64_t num_active_requests_{0}; - - // The number of requests that can be immediately dispatched - // if all CONNECTING connections become connected. - uint64_t connecting_request_capacity_{0}; + ActiveClient(HttpConnPoolImplBase& parent, uint64_t lifetime_request_limit, + uint64_t concurrent_request_limit) + : Envoy::ConnectionPool::ActiveClient(parent, lifetime_request_limit, + concurrent_request_limit) { + Upstream::Host::CreateConnectionData data = parent_.host()->createConnection( + parent_.dispatcher(), parent_.socketOptions(), parent_.transportSocketOptions()); + real_host_description_ = data.host_description_; + codec_client_ = parent.createCodecClient(data); + codec_client_->addConnectionCallbacks(*this); + codec_client_->setConnectionStats( + {parent_.host()->cluster().stats().upstream_cx_rx_bytes_total_, + parent_.host()->cluster().stats().upstream_cx_rx_bytes_buffered_, + parent_.host()->cluster().stats().upstream_cx_tx_bytes_total_, + parent_.host()->cluster().stats().upstream_cx_tx_bytes_buffered_, + &parent_.host()->cluster().stats().bind_errors_, nullptr}); + } + void close() override { codec_client_->close(); } + virtual Http::RequestEncoder& newStreamEncoder(Http::ResponseDecoder& response_decoder) PURE; + void onEvent(Network::ConnectionEvent event) override { + parent_.onConnectionEvent(*this, codec_client_->connectionFailureReason(), event); + } + size_t numActiveRequests() const override { return codec_client_->numActiveRequests(); } + uint64_t id() const override { return codec_client_->id(); } + + Http::CodecClientPtr codec_client_; }; + } // namespace Http + } // namespace Envoy diff --git a/source/common/http/conn_pool_base_legacy.cc b/source/common/http/conn_pool_base_legacy.cc deleted file mode 100644 index d50cb871bff3a..0000000000000 --- a/source/common/http/conn_pool_base_legacy.cc +++ /dev/null @@ -1,98 +0,0 @@ -#include "common/http/conn_pool_base_legacy.h" - -#include "common/stats/timespan_impl.h" - -namespace Envoy { -namespace Http { -namespace Legacy { -ConnPoolImplBase::ActiveClient::ActiveClient(Event::Dispatcher& dispatcher, - const Upstream::ClusterInfo& cluster) - : connect_timer_(dispatcher.createTimer([this]() -> void { onConnectTimeout(); })) { - - conn_connect_ms_ = std::make_unique( - cluster.stats().upstream_cx_connect_ms_, dispatcher.timeSource()); - conn_length_ = std::make_unique( - cluster.stats().upstream_cx_length_ms_, dispatcher.timeSource()); - connect_timer_->enableTimer(cluster.connectTimeout()); -} - -void ConnPoolImplBase::ActiveClient::recordConnectionSetup() { - conn_connect_ms_->complete(); - conn_connect_ms_.reset(); -} - -void ConnPoolImplBase::ActiveClient::disarmConnectTimeout() { - if (connect_timer_) { - connect_timer_->disableTimer(); - connect_timer_.reset(); - } -} - -ConnPoolImplBase::ActiveClient::ConnectionState ConnPoolImplBase::ActiveClient::connectionState() { - // We don't track any failure state, as the client should be deferred destroyed once a failure - // event is handled. - if (connect_timer_) { - return Connecting; - } - - return Connected; -} - -ConnPoolImplBase::PendingRequest::PendingRequest(ConnPoolImplBase& parent, ResponseDecoder& decoder, - ConnectionPool::Callbacks& callbacks) - : parent_(parent), decoder_(decoder), callbacks_(callbacks) { - parent_.host_->cluster().stats().upstream_rq_pending_total_.inc(); - parent_.host_->cluster().stats().upstream_rq_pending_active_.inc(); - parent_.host_->cluster().resourceManager(parent_.priority_).pendingRequests().inc(); -} - -ConnPoolImplBase::PendingRequest::~PendingRequest() { - parent_.host_->cluster().stats().upstream_rq_pending_active_.dec(); - parent_.host_->cluster().resourceManager(parent_.priority_).pendingRequests().dec(); -} - -ConnectionPool::Cancellable* -ConnPoolImplBase::newPendingRequest(ResponseDecoder& decoder, - ConnectionPool::Callbacks& callbacks) { - ENVOY_LOG(debug, "queueing request due to no available connections"); - PendingRequestPtr pending_request(new PendingRequest(*this, decoder, callbacks)); - pending_request->moveIntoList(std::move(pending_request), pending_requests_); - return pending_requests_.front().get(); -} - -void ConnPoolImplBase::purgePendingRequests( - const Upstream::HostDescriptionConstSharedPtr& host_description, - absl::string_view failure_reason, bool was_remote_close) { - // NOTE: We move the existing pending requests to a temporary list. This is done so that - // if retry logic submits a new request to the pool, we don't fail it inline. - pending_requests_to_purge_ = std::move(pending_requests_); - while (!pending_requests_to_purge_.empty()) { - PendingRequestPtr request = - pending_requests_to_purge_.front()->removeFromList(pending_requests_to_purge_); - host_->cluster().stats().upstream_rq_pending_failure_eject_.inc(); - request->callbacks_.onPoolFailure( - was_remote_close ? ConnectionPool::PoolFailureReason::RemoteConnectionFailure - : ConnectionPool::PoolFailureReason::LocalConnectionFailure, - failure_reason, host_description); - } -} - -void ConnPoolImplBase::onPendingRequestCancel(PendingRequest& request) { - ENVOY_LOG(debug, "cancelling pending request"); - if (!pending_requests_to_purge_.empty()) { - // If pending_requests_to_purge_ is not empty, it means that we are called from - // with-in a onPoolFailure callback invoked in purgePendingRequests (i.e. purgePendingRequests - // is down in the call stack). Remove this request from the list as it is cancelled, - // and there is no need to call its onPoolFailure callback. - request.removeFromList(pending_requests_to_purge_); - } else { - request.removeFromList(pending_requests_); - } - - host_->cluster().stats().upstream_rq_cancelled_.inc(); - checkForDrained(); -} - -} // namespace Legacy -} // namespace Http -} // namespace Envoy diff --git a/source/common/http/conn_pool_base_legacy.h b/source/common/http/conn_pool_base_legacy.h deleted file mode 100644 index 7c96ec3a1aafd..0000000000000 --- a/source/common/http/conn_pool_base_legacy.h +++ /dev/null @@ -1,80 +0,0 @@ -#pragma once - -#include "envoy/http/conn_pool.h" -#include "envoy/stats/timespan.h" - -#include "common/common/linked_object.h" - -#include "absl/strings/string_view.h" - -namespace Envoy { -namespace Http { -namespace Legacy { - -// Base class that handles request queueing logic shared between connection pool implementations. -class ConnPoolImplBase : protected Logger::Loggable { -protected: - ConnPoolImplBase(Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority) - : host_(host), priority_(priority) {} - virtual ~ConnPoolImplBase() = default; - - // ActiveClient provides a base class for connection pool clients that handles connection timings - // as well as managing the connection timeout. - class ActiveClient { - public: - ActiveClient(Event::Dispatcher& dispatcher, const Upstream::ClusterInfo& cluster); - virtual ~ActiveClient() { conn_length_->complete(); } - - virtual void onConnectTimeout() PURE; - - void recordConnectionSetup(); - void disarmConnectTimeout(); - - enum ConnectionState { Connecting, Connected }; - ConnectionState connectionState(); - - private: - Event::TimerPtr connect_timer_; - Stats::TimespanPtr conn_connect_ms_; - Stats::TimespanPtr conn_length_; - }; - - struct PendingRequest : LinkedObject, public ConnectionPool::Cancellable { - PendingRequest(ConnPoolImplBase& parent, ResponseDecoder& decoder, - ConnectionPool::Callbacks& callbacks); - ~PendingRequest() override; - - // ConnectionPool::Cancellable - void cancel() override { parent_.onPendingRequestCancel(*this); } - - ConnPoolImplBase& parent_; - ResponseDecoder& decoder_; - ConnectionPool::Callbacks& callbacks_; - }; - - using PendingRequestPtr = std::unique_ptr; - - // Creates a new PendingRequest and enqueues it into the request queue. - ConnectionPool::Cancellable* newPendingRequest(ResponseDecoder& decoder, - ConnectionPool::Callbacks& callbacks); - // Removes the PendingRequest from the list of requests. Called when the PendingRequest is - // cancelled, e.g. when the stream is reset before a connection has been established. - void onPendingRequestCancel(PendingRequest& request); - - // Fails all pending requests, calling onPoolFailure on the associated callbacks. - void purgePendingRequests(const Upstream::HostDescriptionConstSharedPtr& host_description, - absl::string_view failure_reason, bool was_remote); - - // Must be implemented by sub class. Attempts to drain inactive clients. - virtual void checkForDrained() PURE; - - const Upstream::HostConstSharedPtr host_; - const Upstream::ResourcePriority priority_; - std::list pending_requests_; - // When calling purgePendingRequests, this list will be used to hold the requests we are about - // to purge. We need this if one cancelled requests cancels a different pending request - std::list pending_requests_to_purge_; -}; -} // namespace Legacy -} // namespace Http -} // namespace Envoy diff --git a/source/common/http/hash_policy.cc b/source/common/http/hash_policy.cc index bb7e4211c658c..d00dbb99fed7d 100644 --- a/source/common/http/hash_policy.cc +++ b/source/common/http/hash_policy.cc @@ -2,6 +2,8 @@ #include "envoy/config/route/v3/route_components.pb.h" +#include "common/common/matchers.h" +#include "common/common/regex.h" #include "common/http/utility.h" #include "absl/strings/str_cat.h" @@ -21,8 +23,15 @@ class HashMethodImplBase : public HashPolicyImpl::HashMethod { class HeaderHashMethod : public HashMethodImplBase { public: - HeaderHashMethod(const std::string& header_name, bool terminal) - : HashMethodImplBase(terminal), header_name_(header_name) {} + HeaderHashMethod(const envoy::config::route::v3::RouteAction::HashPolicy::Header& header, + bool terminal) + : HashMethodImplBase(terminal), header_name_(header.header_name()) { + if (header.has_regex_rewrite()) { + const auto& rewrite_spec = header.regex_rewrite(); + regex_rewrite_ = Regex::Utility::parseRegex(rewrite_spec.pattern()); + regex_rewrite_substitution_ = rewrite_spec.substitution(); + } + } absl::optional evaluate(const Network::Address::Instance*, const RequestHeaderMap& headers, @@ -32,13 +41,20 @@ class HeaderHashMethod : public HashMethodImplBase { const HeaderEntry* header = headers.get(header_name_); if (header) { - hash = HashUtil::xxHash64(header->value().getStringView()); + if (regex_rewrite_ != nullptr) { + hash = HashUtil::xxHash64(regex_rewrite_->replaceAll(header->value().getStringView(), + regex_rewrite_substitution_)); + } else { + hash = HashUtil::xxHash64(header->value().getStringView()); + } } return hash; } private: const LowerCaseString header_name_; + Regex::CompiledMatcherPtr regex_rewrite_{}; + std::string regex_rewrite_substitution_{}; }; class CookieHashMethod : public HashMethodImplBase { @@ -145,7 +161,7 @@ HashPolicyImpl::HashPolicyImpl( switch (hash_policy->policy_specifier_case()) { case envoy::config::route::v3::RouteAction::HashPolicy::PolicySpecifierCase::kHeader: hash_impls_.emplace_back( - new HeaderHashMethod(hash_policy->header().header_name(), hash_policy->terminal())); + new HeaderHashMethod(hash_policy->header(), hash_policy->terminal())); break; case envoy::config::route::v3::RouteAction::HashPolicy::PolicySpecifierCase::kCookie: { absl::optional ttl; diff --git a/source/common/http/header_list_view.cc b/source/common/http/header_list_view.cc new file mode 100644 index 0000000000000..adfb3f0657fa7 --- /dev/null +++ b/source/common/http/header_list_view.cc @@ -0,0 +1,15 @@ +#include "common/http/header_list_view.h" + +namespace Envoy { +namespace Http { + +HeaderListView::HeaderListView(const HeaderMap& header_map) { + header_map.iterate([this](const Http::HeaderEntry& header) -> HeaderMap::Iterate { + keys_.emplace_back(std::reference_wrapper(header.key())); + values_.emplace_back(std::reference_wrapper(header.value())); + return HeaderMap::Iterate::Continue; + }); +} + +} // namespace Http +} // namespace Envoy \ No newline at end of file diff --git a/source/common/http/header_list_view.h b/source/common/http/header_list_view.h new file mode 100644 index 0000000000000..552af6f89d5c2 --- /dev/null +++ b/source/common/http/header_list_view.h @@ -0,0 +1,24 @@ +#pragma once + +#include + +#include "envoy/http/header_map.h" + +namespace Envoy { +namespace Http { + +class HeaderListView { +public: + using HeaderStringRefs = std::vector>; + + HeaderListView(const HeaderMap& header_map); + const HeaderStringRefs& keys() const { return keys_; } + const HeaderStringRefs& values() const { return values_; } + +private: + HeaderStringRefs keys_; + HeaderStringRefs values_; +}; + +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/header_map_impl.cc b/source/common/http/header_map_impl.cc index 2f407c435a482..ce63493486b7d 100644 --- a/source/common/http/header_map_impl.cc +++ b/source/common/http/header_map_impl.cc @@ -5,6 +5,8 @@ #include #include +#include "envoy/http/header_map.h" + #include "common/common/assert.h" #include "common/common/dump_state_utils.h" #include "common/common/empty_string.h" @@ -26,22 +28,22 @@ void validateCapacity(uint64_t new_capacity) { "Trying to allocate overly large headers."); } -absl::string_view get_str_view(const VariantHeader& buffer) { +absl::string_view getStrView(const VariantHeader& buffer) { return absl::get(buffer); } -InlineHeaderVector& get_in_vec(VariantHeader& buffer) { +InlineHeaderVector& getInVec(VariantHeader& buffer) { return absl::get(buffer); } -const InlineHeaderVector& get_in_vec(const VariantHeader& buffer) { +const InlineHeaderVector& getInVec(const VariantHeader& buffer) { return absl::get(buffer); } } // namespace // Initialize as a Type::Inline HeaderString::HeaderString() : buffer_(InlineHeaderVector()) { - ASSERT((get_in_vec(buffer_).capacity()) >= MaxIntegerLength); + ASSERT((getInVec(buffer_).capacity()) >= MaxIntegerLength); ASSERT(valid()); } @@ -72,32 +74,41 @@ void HeaderString::append(const char* data, uint32_t data_size) { case Type::Reference: { // Rather than be too clever and optimize this uncommon case, we switch to // Inline mode and copy. - const absl::string_view prev = get_str_view(buffer_); + const absl::string_view prev = getStrView(buffer_); buffer_ = InlineHeaderVector(); // Assigning new_capacity to avoid resizing when appending the new data - get_in_vec(buffer_).reserve(new_capacity); - get_in_vec(buffer_).assign(prev.begin(), prev.end()); + getInVec(buffer_).reserve(new_capacity); + getInVec(buffer_).assign(prev.begin(), prev.end()); break; } case Type::Inline: { - get_in_vec(buffer_).reserve(new_capacity); + getInVec(buffer_).reserve(new_capacity); break; } } - get_in_vec(buffer_).insert(get_in_vec(buffer_).end(), data, data + data_size); + getInVec(buffer_).insert(getInVec(buffer_).end(), data, data + data_size); +} + +void HeaderString::rtrim() { + ASSERT(type() == Type::Inline); + absl::string_view original = getStringView(); + absl::string_view rtrimmed = StringUtil::rtrim(original); + if (original.size() != rtrimmed.size()) { + getInVec(buffer_).resize(rtrimmed.size()); + } } absl::string_view HeaderString::getStringView() const { if (type() == Type::Reference) { - return get_str_view(buffer_); + return getStrView(buffer_); } ASSERT(type() == Type::Inline); - return {get_in_vec(buffer_).data(), get_in_vec(buffer_).size()}; + return {getInVec(buffer_).data(), getInVec(buffer_).size()}; } void HeaderString::clear() { if (type() == Type::Inline) { - get_in_vec(buffer_).clear(); + getInVec(buffer_).clear(); } } @@ -109,8 +120,8 @@ void HeaderString::setCopy(const char* data, uint32_t size) { buffer_ = InlineHeaderVector(); } - get_in_vec(buffer_).reserve(size); - get_in_vec(buffer_).assign(data, data + size); + getInVec(buffer_).reserve(size); + getInVec(buffer_).assign(data, data + size); ASSERT(valid()); } @@ -132,8 +143,8 @@ void HeaderString::setInteger(uint64_t value) { // Switching from Type::Reference to Type::Inline buffer_ = InlineHeaderVector(); } - ASSERT((get_in_vec(buffer_).capacity()) > MaxIntegerLength); - get_in_vec(buffer_).assign(inner_buffer, inner_buffer + int_length); + ASSERT((getInVec(buffer_).capacity()) > MaxIntegerLength); + getInVec(buffer_).assign(inner_buffer, inner_buffer + int_length); } void HeaderString::setReference(absl::string_view ref_value) { @@ -143,10 +154,10 @@ void HeaderString::setReference(absl::string_view ref_value) { uint32_t HeaderString::size() const { if (type() == Type::Reference) { - return get_str_view(buffer_).size(); + return getStrView(buffer_).size(); } ASSERT(type() == Type::Inline); - return get_in_vec(buffer_).size(); + return getInVec(buffer_).size(); } HeaderString::Type HeaderString::type() const { @@ -183,30 +194,47 @@ void HeaderMapImpl::HeaderEntryImpl::value(const HeaderEntry& header) { value(header.value().getStringView()); } -#define INLINE_HEADER_STATIC_MAP_ENTRY(name) \ - add(Headers::get().name.get().c_str(), [](HeaderMapType& h) -> StaticLookupResponse { \ - return {&h.inline_headers_.name##_, &Headers::get().name}; \ - }); +template <> HeaderMapImpl::StaticLookupTable::StaticLookupTable() { +#define REGISTER_DEFAULT_REQUEST_HEADER(name) \ + CustomInlineHeaderRegistry::registerInlineHeader( \ + Headers::get().name); + INLINE_REQ_HEADERS(REGISTER_DEFAULT_REQUEST_HEADER) + INLINE_REQ_RESP_HEADERS(REGISTER_DEFAULT_REQUEST_HEADER) -template <> HeaderMapImpl::StaticLookupTable::StaticLookupTable() { - INLINE_REQ_HEADERS(INLINE_HEADER_STATIC_MAP_ENTRY) - INLINE_REQ_RESP_HEADERS(INLINE_HEADER_STATIC_MAP_ENTRY) + finalizeTable(); // Special case where we map a legacy host header to :authority. - add(Headers::get().HostLegacy.get().c_str(), [](HeaderMapType& h) -> StaticLookupResponse { - return {&h.inline_headers_.Host_, &Headers::get().Host}; + const auto handle = + CustomInlineHeaderRegistry::getInlineHeader( + Headers::get().Host); + add(Headers::get().HostLegacy.get().c_str(), [handle](HeaderMapImpl& h) -> StaticLookupResponse { + return {&h.inlineHeaders()[handle.value().it_->second], &handle.value().it_->first}; }); } -template <> HeaderMapImpl::StaticLookupTable::StaticLookupTable() { - INLINE_RESP_HEADERS(INLINE_HEADER_STATIC_MAP_ENTRY) - INLINE_REQ_RESP_HEADERS(INLINE_HEADER_STATIC_MAP_ENTRY) - INLINE_RESP_HEADERS_TRAILERS(INLINE_HEADER_STATIC_MAP_ENTRY) +template <> HeaderMapImpl::StaticLookupTable::StaticLookupTable() { + finalizeTable(); } -template <> -HeaderMapImpl::StaticLookupTable::StaticLookupTable(){ - INLINE_RESP_HEADERS_TRAILERS(INLINE_HEADER_STATIC_MAP_ENTRY)} +template <> HeaderMapImpl::StaticLookupTable::StaticLookupTable() { +#define REGISTER_RESPONSE_HEADER(name) \ + CustomInlineHeaderRegistry::registerInlineHeader( \ + Headers::get().name); + INLINE_RESP_HEADERS(REGISTER_RESPONSE_HEADER) + INLINE_REQ_RESP_HEADERS(REGISTER_RESPONSE_HEADER) + INLINE_RESP_HEADERS_TRAILERS(REGISTER_RESPONSE_HEADER) + + finalizeTable(); +} + +template <> HeaderMapImpl::StaticLookupTable::StaticLookupTable() { +#define REGISTER_RESPONSE_TRAILER(name) \ + CustomInlineHeaderRegistry::registerInlineHeader( \ + Headers::get().name); + INLINE_RESP_HEADERS_TRAILERS(REGISTER_RESPONSE_TRAILER) + + finalizeTable(); +} uint64_t HeaderMapImpl::appendToHeader(HeaderString& header, absl::string_view data, absl::string_view delimiter) { @@ -222,18 +250,6 @@ uint64_t HeaderMapImpl::appendToHeader(HeaderString& header, absl::string_view d return data.size() + byte_size; } -void HeaderMapImpl::initFromInitList( - HeaderMap& new_header_map, - const std::initializer_list>& values) { - for (auto& value : values) { - HeaderString key_string; - key_string.setCopy(value.first.get().c_str(), value.first.get().size()); - HeaderString value_string; - value_string.setCopy(value.second.c_str(), value.second.size()); - new_header_map.addViaMove(std::move(key_string), std::move(value_string)); - } -} - void HeaderMapImpl::updateSize(uint64_t from_size, uint64_t to_size) { ASSERT(cached_byte_size_ >= from_size); cached_byte_size_ -= from_size; @@ -248,28 +264,27 @@ void HeaderMapImpl::subtractSize(uint64_t size) { } void HeaderMapImpl::copyFrom(HeaderMap& lhs, const HeaderMap& header_map) { - header_map.iterate( - [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { - // TODO(mattklein123) PERF: Avoid copying here if not necessary. - HeaderString key_string; - key_string.setCopy(header.key().getStringView()); - HeaderString value_string; - value_string.setCopy(header.value().getStringView()); + header_map.iterate([&lhs](const HeaderEntry& header) -> HeaderMap::Iterate { + // TODO(mattklein123) PERF: Avoid copying here if not necessary. + HeaderString key_string; + key_string.setCopy(header.key().getStringView()); + HeaderString value_string; + value_string.setCopy(header.value().getStringView()); - static_cast(context)->addViaMove(std::move(key_string), - std::move(value_string)); - return HeaderMap::Iterate::Continue; - }, - &lhs); + lhs.addViaMove(std::move(key_string), std::move(value_string)); + return HeaderMap::Iterate::Continue; + }); } namespace { // This is currently only used in tests and is not optimized for performance. -HeaderMap::Iterate collectAllHeaders(const HeaderEntry& header, void* headers) { - static_cast>*>(headers)->push_back( - std::make_pair(header.key().getStringView(), header.value().getStringView())); - return HeaderMap::Iterate::Continue; +HeaderMap::ConstIterateCb +collectAllHeaders(std::vector>* dest) { + return [dest](const HeaderEntry& header) -> HeaderMap::Iterate { + dest->push_back(std::make_pair(header.key().getStringView(), header.value().getStringView())); + return HeaderMap::Iterate::Continue; + }; }; } // namespace @@ -282,7 +297,7 @@ bool HeaderMapImpl::operator==(const HeaderMap& rhs) const { std::vector> rhs_headers; rhs_headers.reserve(rhs.size()); - rhs.iterate(collectAllHeaders, &rhs_headers); + rhs.iterate(collectAllHeaders(&rhs_headers)); auto i = headers_.begin(); auto j = rhs_headers.begin(); @@ -317,23 +332,13 @@ void HeaderMapImpl::insertByKey(HeaderString&& key, HeaderString&& value) { } void HeaderMapImpl::addViaMove(HeaderString&& key, HeaderString&& value) { - // If this is an inline header, we can't addViaMove, because we'll overwrite - // the existing value. - auto* entry = getExistingInline(key.getStringView()); - if (entry != nullptr) { - const uint64_t added_size = appendToHeader(entry->value(), value.getStringView()); - addSize(added_size); - key.clear(); - value.clear(); - } else { - insertByKey(std::move(key), std::move(value)); - } + insertByKey(std::move(key), std::move(value)); } void HeaderMapImpl::addReference(const LowerCaseString& key, absl::string_view value) { HeaderString ref_key(key); HeaderString ref_value(value); - addViaMove(std::move(ref_key), std::move(ref_value)); + insertByKey(std::move(ref_key), std::move(ref_value)); } void HeaderMapImpl::addReferenceKey(const LowerCaseString& key, uint64_t value) { @@ -353,14 +358,8 @@ void HeaderMapImpl::addReferenceKey(const LowerCaseString& key, absl::string_vie } void HeaderMapImpl::addCopy(const LowerCaseString& key, uint64_t value) { - auto* entry = getExistingInline(key.get()); - if (entry != nullptr) { - char buf[32]; - StringUtil::itoa(buf, sizeof(buf), value); - const uint64_t added_size = appendToHeader(entry->value(), buf); - addSize(added_size); - return; - } + // In the case that the header is appended, we will perform a needless copy of the key and value. + // This is done on purpose to keep the code simple and should be rare. HeaderString new_key; new_key.setCopy(key.get()); HeaderString new_value; @@ -371,12 +370,8 @@ void HeaderMapImpl::addCopy(const LowerCaseString& key, uint64_t value) { } void HeaderMapImpl::addCopy(const LowerCaseString& key, absl::string_view value) { - auto* entry = getExistingInline(key.get()); - if (entry != nullptr) { - const uint64_t added_size = appendToHeader(entry->value(), value); - addSize(added_size); - return; - } + // In the case that the header is appended, we will perform a needless copy of the key and value. + // This is done on purpose to keep the code simple and should be rare. HeaderString new_key; new_key.setCopy(key.get()); HeaderString new_value; @@ -438,16 +433,25 @@ void HeaderMapImpl::verifyByteSizeInternalForTest() const { } const HeaderEntry* HeaderMapImpl::get(const LowerCaseString& key) const { - for (const HeaderEntryImpl& header : headers_) { - if (header.key() == key.get().c_str()) { - return &header; - } - } - - return nullptr; + return const_cast(this)->getExisting(key); } HeaderEntry* HeaderMapImpl::getExisting(const LowerCaseString& key) { + // Attempt a trie lookup first to see if the user is requesting an O(1) header. This may be + // relatively common in certain header matching / routing patterns. + // TODO(mattklein123): Add inline handle support directly to the header matcher code to support + // this use case more directly. + auto lookup = staticLookup(key.get()); + if (lookup.has_value()) { + return *lookup.value().entry_; + } + + // If the requested header is not an O(1) header we do a full scan. Doing the trie lookup is + // wasteful in the miss case, but is present for code consistency with other functions that do + // similar things. + // TODO(mattklein123): The full scan here and in remove() are the biggest issues with this + // implementation for certain use cases. We can either replace this with a totally different + // implementation or potentially create a lazy map if the size of the map is above a threshold. for (HeaderEntryImpl& header : headers_) { if (header.key() == key.get().c_str()) { return &header; @@ -457,72 +461,32 @@ HeaderEntry* HeaderMapImpl::getExisting(const LowerCaseString& key) { return nullptr; } -void HeaderMapImpl::iterate(ConstIterateCb cb, void* context) const { +void HeaderMapImpl::iterate(HeaderMap::ConstIterateCb cb) const { for (const HeaderEntryImpl& header : headers_) { - if (cb(header, context) == HeaderMap::Iterate::Break) { + if (cb(header) == HeaderMap::Iterate::Break) { break; } } } -void HeaderMapImpl::iterateReverse(ConstIterateCb cb, void* context) const { +void HeaderMapImpl::iterateReverse(HeaderMap::ConstIterateCb cb) const { for (auto it = headers_.rbegin(); it != headers_.rend(); it++) { - if (cb(*it, context) == HeaderMap::Iterate::Break) { + if (cb(*it) == HeaderMap::Iterate::Break) { break; } } } -HeaderMap::Lookup HeaderMapImpl::lookup(const LowerCaseString& key, - const HeaderEntry** entry) const { - // The accessor callbacks for predefined inline headers take a HeaderMapImpl& as an argument; - // even though we don't make any modifications, we need to const_cast in order to use the - // accessor. - // - // Making this work without const_cast would require managing an additional const accessor - // callback for each predefined inline header and add to the complexity of the code. - auto lookup = const_cast(this)->staticLookup(key.get()); - if (lookup.has_value()) { - *entry = *lookup.value().entry_; - if (*entry) { - return Lookup::Found; - } else { - return Lookup::NotFound; - } - } else { - *entry = nullptr; - return Lookup::NotSupported; - } -} - void HeaderMapImpl::clear() { clearInline(); headers_.clear(); cached_byte_size_ = 0; } -size_t HeaderMapImpl::remove(const LowerCaseString& key) { - const size_t old_size = headers_.size(); - auto lookup = staticLookup(key.get()); - if (lookup.has_value()) { - removeInline(lookup.value().entry_); - } else { - for (auto i = headers_.begin(); i != headers_.end();) { - if (i->key() == key.get().c_str()) { - subtractSize(i->key().size() + i->value().size()); - i = headers_.erase(i); - } else { - ++i; - } - } - } - return old_size - headers_.size(); -} - -size_t HeaderMapImpl::removePrefix(const LowerCaseString& prefix) { +size_t HeaderMapImpl::removeIf(const HeaderMap::HeaderMatchPredicate& predicate) { const size_t old_size = headers_.size(); - headers_.remove_if([&prefix, this](const HeaderEntryImpl& entry) { - bool to_remove = absl::StartsWith(entry.key().getStringView(), prefix.get()); + headers_.remove_if([&predicate, this](const HeaderEntryImpl& entry) { + const bool to_remove = predicate(entry); if (to_remove) { // If this header should be removed, make sure any references in the // static lookup table are cleared as well. @@ -543,18 +507,33 @@ size_t HeaderMapImpl::removePrefix(const LowerCaseString& prefix) { return old_size - headers_.size(); } +size_t HeaderMapImpl::remove(const LowerCaseString& key) { + auto lookup = staticLookup(key.get()); + if (lookup.has_value()) { + const size_t old_size = headers_.size(); + removeInline(lookup.value().entry_); + return old_size - headers_.size(); + } else { + // TODO(mattklein123): When the lazy map is implemented we can stop using removeIf() here. + return HeaderMapImpl::removeIf([&key](const HeaderEntry& entry) -> bool { + return key.get() == entry.key().getStringView(); + }); + } +} + +size_t HeaderMapImpl::removePrefix(const LowerCaseString& prefix) { + return HeaderMapImpl::removeIf([&prefix](const HeaderEntry& entry) -> bool { + return absl::StartsWith(entry.key().getStringView(), prefix.get()); + }); +} + void HeaderMapImpl::dumpState(std::ostream& os, int indent_level) const { - using IterateData = std::pair; - const char* spaces = spacesForLevel(indent_level); - IterateData iterate_data = std::make_pair(&os, spaces); - iterate( - [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { - auto* data = static_cast(context); - *data->first << data->second << "'" << header.key().getStringView() << "', '" - << header.value().getStringView() << "'\n"; - return HeaderMap::Iterate::Continue; - }, - &iterate_data); + iterate([&os, + spaces = spacesForLevel(indent_level)](const HeaderEntry& header) -> HeaderMap::Iterate { + os << spaces << "'" << header.key().getStringView() << "', '" << header.value().getStringView() + << "'\n"; + return HeaderMap::Iterate::Continue; + }); } HeaderMapImpl::HeaderEntryImpl& HeaderMapImpl::maybeCreateInline(HeaderEntryImpl** entry, @@ -585,14 +564,6 @@ HeaderMapImpl::HeaderEntryImpl& HeaderMapImpl::maybeCreateInline(HeaderEntryImpl return **entry; } -HeaderMapImpl::HeaderEntryImpl* HeaderMapImpl::getExistingInline(absl::string_view key) { - auto lookup = staticLookup(key); - if (lookup.has_value()) { - return *lookup.value().entry_; - } - return nullptr; -} - size_t HeaderMapImpl::removeInline(HeaderEntryImpl** ptr_to_entry) { if (!*ptr_to_entry) { return 0; @@ -606,5 +577,32 @@ size_t HeaderMapImpl::removeInline(HeaderEntryImpl** ptr_to_entry) { return 1; } +namespace { +template +HeaderMapImplUtility::HeaderMapImplInfo makeHeaderMapImplInfo(absl::string_view name) { + // Constructing a header map implementation will force the custom headers and sizing to be + // finalized, so do that first. + auto header_map = T::create(); + + HeaderMapImplUtility::HeaderMapImplInfo info; + info.name_ = std::string(name); + info.size_ = T::inlineHeadersSize() + sizeof(T); + for (const auto& header : CustomInlineHeaderRegistry::headers()) { + info.registered_headers_.push_back(header.first.get()); + } + return info; +} +} // namespace + +std::vector +HeaderMapImplUtility::getAllHeaderMapImplInfo() { + std::vector ret; + ret.push_back(makeHeaderMapImplInfo("request header map")); + ret.push_back(makeHeaderMapImplInfo("request trailer map")); + ret.push_back(makeHeaderMapImplInfo("response header map")); + ret.push_back(makeHeaderMapImplInfo("response trailer map")); + return ret; +} + } // namespace Http } // namespace Envoy diff --git a/source/common/http/header_map_impl.h b/source/common/http/header_map_impl.h index 7d456916e230c..d4cb88bdcacb4 100644 --- a/source/common/http/header_map_impl.h +++ b/source/common/http/header_map_impl.h @@ -5,6 +5,7 @@ #include #include #include +#include #include "envoy/http/header_map.h" @@ -17,80 +18,83 @@ namespace Http { /** * These are definitions of all of the inline header access functions described inside header_map.h - * TODO(asraa): Simplify code here so macros expand into single virtual calls. */ #define DEFINE_INLINE_HEADER_FUNCS(name) \ public: \ - const HeaderEntry* name() const override { return inline_headers_.name##_; } \ + const HeaderEntry* name() const override { return getInline(HeaderHandles::get().name); } \ void append##name(absl::string_view data, absl::string_view delimiter) override { \ - HeaderEntry& entry = maybeCreateInline(&inline_headers_.name##_, Headers::get().name); \ - addSize(HeaderMapImpl::appendToHeader(entry.value(), data, delimiter)); \ + appendInline(HeaderHandles::get().name, data, delimiter); \ } \ void setReference##name(absl::string_view value) override { \ - HeaderEntry& entry = maybeCreateInline(&inline_headers_.name##_, Headers::get().name); \ - updateSize(entry.value().size(), value.size()); \ - entry.value().setReference(value); \ + setReferenceInline(HeaderHandles::get().name, value); \ } \ void set##name(absl::string_view value) override { \ - HeaderEntry& entry = maybeCreateInline(&inline_headers_.name##_, Headers::get().name); \ - updateSize(entry.value().size(), value.size()); \ - entry.value().setCopy(value); \ + setInline(HeaderHandles::get().name, value); \ } \ - void set##name(uint64_t value) override { \ - HeaderEntry& entry = maybeCreateInline(&inline_headers_.name##_, Headers::get().name); \ - subtractSize(inline_headers_.name##_->value().size()); \ - entry.value().setInteger(value); \ - addSize(inline_headers_.name##_->value().size()); \ - } \ - size_t remove##name() override { return removeInline(&inline_headers_.name##_); } - -#define DEFINE_INLINE_HEADER_STRUCT(name) HeaderEntryImpl* name##_; + void set##name(uint64_t value) override { setInline(HeaderHandles::get().name, value); } \ + size_t remove##name() override { return removeInline(HeaderHandles::get().name); } \ + absl::string_view get##name##Value() const override { \ + return getInlineValue(HeaderHandles::get().name); \ + } /** * Implementation of Http::HeaderMap. This is heavily optimized for performance. Roughly, when - * headers are added to the map, we do a hash lookup to see if it's one of the O(1) headers. - * If it is, we store a reference to it that can be accessed later directly. Most high performance - * paths use O(1) direct access. In general, we try to copy as little as possible and allocate as - * little as possible in any of the paths. - * TODO(mattklein123): The end result of the header refactor should be to make this a fully - * protected base class or a mix-in for the concrete header types below. + * headers are added to the map by string, we do a trie lookup to see if it's one of the O(1) + * headers. If it is, we store a reference to it that can be accessed later directly via direct + * method access. Most high performance paths use O(1) direct method access. In general, we try to + * copy as little as possible and allocate as little as possible in any of the paths. */ -class HeaderMapImpl : public virtual HeaderMap, NonCopyable { +class HeaderMapImpl : NonCopyable { public: + virtual ~HeaderMapImpl() = default; + // The following "constructors" call virtual functions during construction and must use the // static factory pattern. static void copyFrom(HeaderMap& lhs, const HeaderMap& rhs); - static void - initFromInitList(HeaderMap& new_header_map, - const std::initializer_list>& values); + // The value_type of iterator must be pair, and the first value of them must be LowerCaseString. + // If not, it won't be compiled successfully. + template static void initFromInitList(HeaderMap& new_header_map, It begin, It end) { + for (auto it = begin; it != end; ++it) { + static_assert(std::is_samefirst), LowerCaseString>::value, + "iterator must be pair and the first value of them must be LowerCaseString"); + HeaderString key_string; + key_string.setCopy(it->first.get().c_str(), it->first.get().size()); + HeaderString value_string; + value_string.setCopy(it->second.c_str(), it->second.size()); + new_header_map.addViaMove(std::move(key_string), std::move(value_string)); + } + } // Performs a manual byte size count for test verification. void verifyByteSizeInternalForTest() const; - // Http::HeaderMap - bool operator==(const HeaderMap& rhs) const override; - bool operator!=(const HeaderMap& rhs) const override; - void addViaMove(HeaderString&& key, HeaderString&& value) override; - void addReference(const LowerCaseString& key, absl::string_view value) override; - void addReferenceKey(const LowerCaseString& key, uint64_t value) override; - void addReferenceKey(const LowerCaseString& key, absl::string_view value) override; - void addCopy(const LowerCaseString& key, uint64_t value) override; - void addCopy(const LowerCaseString& key, absl::string_view value) override; - void appendCopy(const LowerCaseString& key, absl::string_view value) override; - void setReference(const LowerCaseString& key, absl::string_view value) override; - void setReferenceKey(const LowerCaseString& key, absl::string_view value) override; - void setCopy(const LowerCaseString& key, absl::string_view value) override; - uint64_t byteSize() const override; - const HeaderEntry* get(const LowerCaseString& key) const override; - void iterate(ConstIterateCb cb, void* context) const override; - void iterateReverse(ConstIterateCb cb, void* context) const override; - Lookup lookup(const LowerCaseString& key, const HeaderEntry** entry) const override; - void clear() override; - size_t remove(const LowerCaseString& key) override; - size_t removePrefix(const LowerCaseString& key) override; - size_t size() const override { return headers_.size(); } - bool empty() const override { return headers_.empty(); } - void dumpState(std::ostream& os, int indent_level = 0) const override; + // Note: This class does not actually implement Http::HeaderMap to avoid virtual inheritance in + // the derived classes. Instead, it is used as a mix-in class for TypedHeaderMapImpl below. This + // both avoid virtual inheritance and allows the concrete final header maps to use a variable + // length member at the end. + bool operator==(const HeaderMap& rhs) const; + bool operator!=(const HeaderMap& rhs) const; + void addViaMove(HeaderString&& key, HeaderString&& value); + void addReference(const LowerCaseString& key, absl::string_view value); + void addReferenceKey(const LowerCaseString& key, uint64_t value); + void addReferenceKey(const LowerCaseString& key, absl::string_view value); + void addCopy(const LowerCaseString& key, uint64_t value); + void addCopy(const LowerCaseString& key, absl::string_view value); + void appendCopy(const LowerCaseString& key, absl::string_view value); + void setReference(const LowerCaseString& key, absl::string_view value); + void setReferenceKey(const LowerCaseString& key, absl::string_view value); + void setCopy(const LowerCaseString& key, absl::string_view value); + uint64_t byteSize() const; + const HeaderEntry* get(const LowerCaseString& key) const; + void iterate(HeaderMap::ConstIterateCb cb) const; + void iterateReverse(HeaderMap::ConstIterateCb cb) const; + void clear(); + size_t remove(const LowerCaseString& key); + size_t removeIf(const HeaderMap::HeaderMatchPredicate& predicate); + size_t removePrefix(const LowerCaseString& key); + size_t size() const { return headers_.size(); } + bool empty() const { return headers_.empty(); } + void dumpState(std::ostream& os, int indent_level = 0) const; protected: struct HeaderEntryImpl : public HeaderEntry, NonCopyable { @@ -123,20 +127,44 @@ class HeaderMapImpl : public virtual HeaderMap, NonCopyable { /** * Base class for a static lookup table that converts a string key into an O(1) header. */ - template - struct StaticLookupTable : public TrieLookupTable { - using HeaderMapType = T; - + template + struct StaticLookupTable + : public TrieLookupTable> { StaticLookupTable(); - static absl::optional lookup(T& header_map, absl::string_view key) { - auto entry = ConstSingleton::get().find(key); + void finalizeTable() { + CustomInlineHeaderRegistry::finalize(); + auto& headers = CustomInlineHeaderRegistry::headers(); + size_ = headers.size(); + for (const auto& header : headers) { + this->add(header.first.get().c_str(), [&header](HeaderMapImpl& h) -> StaticLookupResponse { + return {&h.inlineHeaders()[header.second], &header.first}; + }); + } + } + + static size_t size() { + // The size of the lookup table is finalized when the singleton lookup table is created. This + // allows for late binding of custom headers as well as envoy header prefix changes. This + // does mean that once the first header map is created of this type, no further changes are + // possible. + // TODO(mattklein123): If we decide to keep this implementation, it is conceivable that header + // maps could be created by an API factory that is owned by the listener/HCM, thus making + // O(1) header delivery over xDS possible. + return ConstSingleton::get().size_; + } + + static absl::optional lookup(HeaderMapImpl& header_map, + absl::string_view key) { + const auto& entry = ConstSingleton::get().find(key); if (entry != nullptr) { return entry(header_map); } else { return absl::nullopt; } } + + size_t size_; }; /** @@ -214,18 +242,13 @@ class HeaderMapImpl : public virtual HeaderMap, NonCopyable { HeaderEntryImpl& maybeCreateInline(HeaderEntryImpl** entry, const LowerCaseString& key, HeaderString&& value); HeaderEntry* getExisting(const LowerCaseString& key); - HeaderEntryImpl* getExistingInline(absl::string_view key); size_t removeInline(HeaderEntryImpl** entry); void updateSize(uint64_t from_size, uint64_t to_size); void addSize(uint64_t size); void subtractSize(uint64_t size); - virtual absl::optional staticLookup(absl::string_view) { - // TODO(mattklein123): Make this pure once HeaderMapImpl is a base class only. - return absl::nullopt; - } - virtual void clearInline() { - // TODO(mattklein123): Make this pure once HeaderMapImpl is a base class only. - } + virtual absl::optional staticLookup(absl::string_view) PURE; + virtual void clearInline() PURE; + virtual HeaderEntryImpl** inlineHeaders() PURE; HeaderList headers_; // This holds the internal byte size of the HeaderMap. @@ -233,102 +256,257 @@ class HeaderMapImpl : public virtual HeaderMap, NonCopyable { }; /** - * Typed derived classes for all header map types. + * Typed derived classes for all header map types. This class implements the actual typed + * interface and for the majority of methods just passes through to the HeaderMapImpl mix-in. Per + * above, this avoids virtual inheritance. + */ +template class TypedHeaderMapImpl : public HeaderMapImpl, public Interface { +public: + // Implementation of Http::HeaderMap that passes through to HeaderMapImpl. + bool operator==(const HeaderMap& rhs) const override { return HeaderMapImpl::operator==(rhs); } + bool operator!=(const HeaderMap& rhs) const override { return HeaderMapImpl::operator!=(rhs); } + void addViaMove(HeaderString&& key, HeaderString&& value) override { + HeaderMapImpl::addViaMove(std::move(key), std::move(value)); + } + void addReference(const LowerCaseString& key, absl::string_view value) override { + HeaderMapImpl::addReference(key, value); + } + void addReferenceKey(const LowerCaseString& key, uint64_t value) override { + HeaderMapImpl::addReferenceKey(key, value); + } + void addReferenceKey(const LowerCaseString& key, absl::string_view value) override { + HeaderMapImpl::addReferenceKey(key, value); + } + void addCopy(const LowerCaseString& key, uint64_t value) override { + HeaderMapImpl::addCopy(key, value); + } + void addCopy(const LowerCaseString& key, absl::string_view value) override { + HeaderMapImpl::addCopy(key, value); + } + void appendCopy(const LowerCaseString& key, absl::string_view value) override { + HeaderMapImpl::appendCopy(key, value); + } + void setReference(const LowerCaseString& key, absl::string_view value) override { + HeaderMapImpl::setReference(key, value); + } + void setReferenceKey(const LowerCaseString& key, absl::string_view value) override { + HeaderMapImpl::setReferenceKey(key, value); + } + void setCopy(const LowerCaseString& key, absl::string_view value) override { + HeaderMapImpl::setCopy(key, value); + } + uint64_t byteSize() const override { return HeaderMapImpl::byteSize(); } + const HeaderEntry* get(const LowerCaseString& key) const override { + return HeaderMapImpl::get(key); + } + void iterate(HeaderMap::ConstIterateCb cb) const override { HeaderMapImpl::iterate(cb); } + void iterateReverse(HeaderMap::ConstIterateCb cb) const override { + HeaderMapImpl::iterateReverse(cb); + } + void clear() override { HeaderMapImpl::clear(); } + size_t remove(const LowerCaseString& key) override { return HeaderMapImpl::remove(key); } + size_t removeIf(const HeaderMap::HeaderMatchPredicate& predicate) override { + return HeaderMapImpl::removeIf(predicate); + } + size_t removePrefix(const LowerCaseString& key) override { + return HeaderMapImpl::removePrefix(key); + } + size_t size() const override { return HeaderMapImpl::size(); } + bool empty() const override { return HeaderMapImpl::empty(); } + void dumpState(std::ostream& os, int indent_level = 0) const override { + HeaderMapImpl::dumpState(os, indent_level); + } + + // Generic custom header functions for each fully typed interface. To avoid accidental issues, + // the Handle type is different for each interface, which is why these functions live here vs. + // inside HeaderMapImpl. + using Handle = CustomInlineHeaderRegistry::Handle; + const HeaderEntry* getInline(Handle handle) const override { + ASSERT(handle.it_->second < inlineHeadersSize()); + return constInlineHeaders()[handle.it_->second]; + } + void appendInline(Handle handle, absl::string_view data, absl::string_view delimiter) override { + ASSERT(handle.it_->second < inlineHeadersSize()); + HeaderEntry& entry = maybeCreateInline(&inlineHeaders()[handle.it_->second], handle.it_->first); + addSize(HeaderMapImpl::appendToHeader(entry.value(), data, delimiter)); + } + void setReferenceInline(Handle handle, absl::string_view value) override { + ASSERT(handle.it_->second < inlineHeadersSize()); + HeaderEntry& entry = maybeCreateInline(&inlineHeaders()[handle.it_->second], handle.it_->first); + updateSize(entry.value().size(), value.size()); + entry.value().setReference(value); + } + void setInline(Handle handle, absl::string_view value) override { + ASSERT(handle.it_->second < inlineHeadersSize()); + HeaderEntry& entry = maybeCreateInline(&inlineHeaders()[handle.it_->second], handle.it_->first); + updateSize(entry.value().size(), value.size()); + entry.value().setCopy(value); + } + void setInline(Handle handle, uint64_t value) override { + ASSERT(handle.it_->second < inlineHeadersSize()); + HeaderEntry& entry = maybeCreateInline(&inlineHeaders()[handle.it_->second], handle.it_->first); + subtractSize(entry.value().size()); + entry.value().setInteger(value); + addSize(entry.value().size()); + } + size_t removeInline(Handle handle) override { + ASSERT(handle.it_->second < inlineHeadersSize()); + return HeaderMapImpl::removeInline(&inlineHeaders()[handle.it_->second]); + } + static size_t inlineHeadersSize() { + return StaticLookupTable::size() * sizeof(HeaderEntryImpl*); + } + +protected: + absl::optional staticLookup(absl::string_view key) override { + return StaticLookupTable::lookup(*this, key); + } + virtual const HeaderEntryImpl* const* constInlineHeaders() const PURE; +}; + +#define DEFINE_HEADER_HANDLE(name) \ + Handle name = \ + CustomInlineHeaderRegistry::getInlineHeader(Headers::get().name).value(); + +/** + * Concrete implementation of RequestHeaderMap which allows for variable custom registered inline + * headers. */ -class RequestHeaderMapImpl : public HeaderMapImpl, public RequestHeaderMap { +class RequestHeaderMapImpl final : public TypedHeaderMapImpl, + public InlineStorage { public: + static std::unique_ptr create() { + return std::unique_ptr(new (inlineHeadersSize()) RequestHeaderMapImpl()); + } + INLINE_REQ_HEADERS(DEFINE_INLINE_HEADER_FUNCS) INLINE_REQ_RESP_HEADERS(DEFINE_INLINE_HEADER_FUNCS) protected: - // Explicit inline headers for the request header map. - // TODO(mattklein123): This is mostly copied between all of the concrete header map types. - // In a future change we can either get rid of O(1) headers completely, or it should be possible - // to statically register all O(1) headers and move to a single dynamically sized class where we - // we reference the O(1) headers in the table by an offset. - struct AllInlineHeaders { - AllInlineHeaders() { clear(); } - void clear() { memset(this, 0, sizeof(*this)); } - - INLINE_REQ_HEADERS(DEFINE_INLINE_HEADER_STRUCT) - INLINE_REQ_RESP_HEADERS(DEFINE_INLINE_HEADER_STRUCT) + // NOTE: Because inline_headers_ is a variable size member, it must be the last member in the + // most derived class. This forces the definition of the following three functions to also be + // in the most derived class and thus duplicated. There may be a way to consolidate thus but it's + // not clear and can be deferred for now. + void clearInline() override { memset(inline_headers_, 0, inlineHeadersSize()); } + const HeaderEntryImpl* const* constInlineHeaders() const override { return inline_headers_; } + HeaderEntryImpl** inlineHeaders() override { return inline_headers_; } + +private: + struct HeaderHandleValues { + INLINE_REQ_HEADERS(DEFINE_HEADER_HANDLE) + INLINE_REQ_RESP_HEADERS(DEFINE_HEADER_HANDLE) }; - absl::optional staticLookup(absl::string_view key) override { - return StaticLookupTable::lookup(*this, key); - } - void clearInline() override { inline_headers_.clear(); } + using HeaderHandles = ConstSingleton; - AllInlineHeaders inline_headers_; + RequestHeaderMapImpl() { clearInline(); } - friend class HeaderMapImpl; + HeaderEntryImpl* inline_headers_[]; }; -class RequestTrailerMapImpl : public HeaderMapImpl, public RequestTrailerMap {}; +/** + * Concrete implementation of RequestTrailerMap which allows for variable custom registered inline + * headers. + */ +class RequestTrailerMapImpl final : public TypedHeaderMapImpl, + public InlineStorage { +public: + static std::unique_ptr create() { + return std::unique_ptr(new (inlineHeadersSize()) + RequestTrailerMapImpl()); + } + +protected: + // See comment in RequestHeaderMapImpl. + void clearInline() override { memset(inline_headers_, 0, inlineHeadersSize()); } + const HeaderEntryImpl* const* constInlineHeaders() const override { return inline_headers_; } + HeaderEntryImpl** inlineHeaders() override { return inline_headers_; } + +private: + RequestTrailerMapImpl() { clearInline(); } -class ResponseHeaderMapImpl : public HeaderMapImpl, public ResponseHeaderMap { + HeaderEntryImpl* inline_headers_[]; +}; + +/** + * Concrete implementation of ResponseHeaderMap which allows for variable custom registered inline + * headers. + */ +class ResponseHeaderMapImpl final : public TypedHeaderMapImpl, + public InlineStorage { public: + static std::unique_ptr create() { + return std::unique_ptr(new (inlineHeadersSize()) + ResponseHeaderMapImpl()); + } + INLINE_RESP_HEADERS(DEFINE_INLINE_HEADER_FUNCS) INLINE_REQ_RESP_HEADERS(DEFINE_INLINE_HEADER_FUNCS) INLINE_RESP_HEADERS_TRAILERS(DEFINE_INLINE_HEADER_FUNCS) protected: - // Explicit inline headers for the response header map. - // TODO(mattklein123): This is mostly copied between all of the concrete header map types. - // In a future change we can either get rid of O(1) headers completely, or it should be possible - // to statically register all O(1) headers and move to a single dynamically sized class where we - // we reference the O(1) headers in the table by an offset. - struct AllInlineHeaders { - AllInlineHeaders() { clear(); } - void clear() { memset(this, 0, sizeof(*this)); } - - INLINE_RESP_HEADERS(DEFINE_INLINE_HEADER_STRUCT) - INLINE_REQ_RESP_HEADERS(DEFINE_INLINE_HEADER_STRUCT) - INLINE_RESP_HEADERS_TRAILERS(DEFINE_INLINE_HEADER_STRUCT) + // See comment in RequestHeaderMapImpl. + void clearInline() override { memset(inline_headers_, 0, inlineHeadersSize()); } + const HeaderEntryImpl* const* constInlineHeaders() const override { return inline_headers_; } + HeaderEntryImpl** inlineHeaders() override { return inline_headers_; } + +private: + struct HeaderHandleValues { + INLINE_RESP_HEADERS(DEFINE_HEADER_HANDLE) + INLINE_REQ_RESP_HEADERS(DEFINE_HEADER_HANDLE) + INLINE_RESP_HEADERS_TRAILERS(DEFINE_HEADER_HANDLE) }; - absl::optional staticLookup(absl::string_view key) override { - return StaticLookupTable::lookup(*this, key); - } - void clearInline() override { inline_headers_.clear(); } + using HeaderHandles = ConstSingleton; - AllInlineHeaders inline_headers_; + ResponseHeaderMapImpl() { clearInline(); } - friend class HeaderMapImpl; + HeaderEntryImpl* inline_headers_[]; }; -class ResponseTrailerMapImpl : public HeaderMapImpl, public ResponseTrailerMap { +/** + * Concrete implementation of ResponseTrailerMap which allows for variable custom registered + * inline headers. + */ +class ResponseTrailerMapImpl final : public TypedHeaderMapImpl, + public InlineStorage { public: + static std::unique_ptr create() { + return std::unique_ptr(new (inlineHeadersSize()) + ResponseTrailerMapImpl()); + } + INLINE_RESP_HEADERS_TRAILERS(DEFINE_INLINE_HEADER_FUNCS) protected: - // Explicit inline headers for the response trailer map. - // TODO(mattklein123): This is mostly copied between all of the concrete header map types. - // In a future change we can either get rid of O(1) headers completely, or it should be possible - // to statically register all O(1) headers and move to a single dynamically sized class where we - // reference the O(1) headers in the table by an offset. - struct AllInlineHeaders { - AllInlineHeaders() { clear(); } - void clear() { memset(this, 0, sizeof(*this)); } - - INLINE_RESP_HEADERS_TRAILERS(DEFINE_INLINE_HEADER_STRUCT) + // See comment in RequestHeaderMapImpl. + void clearInline() override { memset(inline_headers_, 0, inlineHeadersSize()); } + const HeaderEntryImpl* const* constInlineHeaders() const override { return inline_headers_; } + HeaderEntryImpl** inlineHeaders() override { return inline_headers_; } + +private: + struct HeaderHandleValues { + INLINE_RESP_HEADERS_TRAILERS(DEFINE_HEADER_HANDLE) }; - absl::optional staticLookup(absl::string_view key) override { - return StaticLookupTable::lookup(*this, key); - } - void clearInline() override { inline_headers_.clear(); } + using HeaderHandles = ConstSingleton; - AllInlineHeaders inline_headers_; + ResponseTrailerMapImpl() { clearInline(); } - friend class HeaderMapImpl; + HeaderEntryImpl* inline_headers_[]; }; template std::unique_ptr createHeaderMap(const std::initializer_list>& values) { - auto new_header_map = std::make_unique(); - HeaderMapImpl::initFromInitList(*new_header_map, values); + auto new_header_map = T::create(); + HeaderMapImpl::initFromInitList(*new_header_map, values.begin(), values.end()); + return new_header_map; +} + +template std::unique_ptr createHeaderMap(It begin, It end) { + auto new_header_map = T::create(); + HeaderMapImpl::initFromInitList(*new_header_map, begin, end); return new_header_map; } @@ -338,10 +516,36 @@ template std::unique_ptr createHeaderMap(const HeaderMap& rhs) { // a few places when dealing with gRPC headers/trailers conversions so it's not trivial to remove. // We should revisit this to figure how to make this a bit safer as a non-intentional conversion // may have surprising results with different O(1) headers, implementations, etc. - auto new_header_map = std::make_unique(); + auto new_header_map = T::create(); HeaderMapImpl::copyFrom(*new_header_map, rhs); return new_header_map; } +struct EmptyHeaders { + RequestHeaderMapPtr request_headers = RequestHeaderMapImpl::create(); + ResponseHeaderMapPtr response_headers = ResponseHeaderMapImpl::create(); + ResponseTrailerMapPtr response_trailers = ResponseTrailerMapImpl::create(); +}; + +using StaticEmptyHeaders = ConstSingleton; + +class HeaderMapImplUtility { +public: + struct HeaderMapImplInfo { + // Human readable name for the header map used in info logging. + std::string name_; + // The byte size of the header map including both fixed space as well as variable space used + // by the registered custom headers. + size_t size_; + // All registered custom headers for the header map. + std::vector registered_headers_; + }; + + /** + * Fetch detailed information about each header map implementation for use in logging. + */ + static std::vector getAllHeaderMapImplInfo(); +}; + } // namespace Http } // namespace Envoy diff --git a/source/common/http/header_utility.cc b/source/common/http/header_utility.cc index 72560b73e0768..c293f29e16cde 100644 --- a/source/common/http/header_utility.cc +++ b/source/common/http/header_utility.cc @@ -5,6 +5,7 @@ #include "common/common/regex.h" #include "common/common/utility.h" #include "common/http/header_map_impl.h" +#include "common/http/utility.h" #include "common/protobuf/utility.h" #include "common/runtime/runtime_features.h" @@ -77,18 +78,13 @@ HeaderUtility::HeaderData::HeaderData(const envoy::config::route::v3::HeaderMatc void HeaderUtility::getAllOfHeader(const HeaderMap& headers, absl::string_view key, std::vector& out) { - auto args = std::make_pair(LowerCaseString(std::string(key)), &out); - - headers.iterate( - [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { - auto key_ret = - static_cast*>*>(context); - if (header.key() == key_ret->first.get().c_str()) { - key_ret->second->emplace_back(header.value().getStringView()); - } - return HeaderMap::Iterate::Continue; - }, - &args); + headers.iterate([key = LowerCaseString(std::string(key)), + &out](const HeaderEntry& header) -> HeaderMap::Iterate { + if (header.key() == key.get().c_str()) { + out.emplace_back(header.value().getStringView()); + } + return HeaderMap::Iterate::Continue; + }); } bool HeaderUtility::matchHeaders(const HeaderMap& request_headers, @@ -161,17 +157,22 @@ bool HeaderUtility::isConnect(const RequestHeaderMap& headers) { return headers.Method() && headers.Method()->value() == Http::Headers::get().MethodValues.Connect; } +bool HeaderUtility::isConnectResponse(const RequestHeaderMap* request_headers, + const ResponseHeaderMap& response_headers) { + return request_headers && isConnect(*request_headers) && + static_cast(Http::Utility::getResponseStatus(response_headers)) == + Http::Code::OK; +} + void HeaderUtility::addHeaders(HeaderMap& headers, const HeaderMap& headers_to_add) { - headers_to_add.iterate( - [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { - HeaderString k; - k.setCopy(header.key().getStringView()); - HeaderString v; - v.setCopy(header.value().getStringView()); - static_cast(context)->addViaMove(std::move(k), std::move(v)); - return HeaderMap::Iterate::Continue; - }, - &headers); + headers_to_add.iterate([&headers](const HeaderEntry& header) -> HeaderMap::Iterate { + HeaderString k; + k.setCopy(header.key().getStringView()); + HeaderString v; + v.setCopy(header.value().getStringView()); + headers.addViaMove(std::move(k), std::move(v)); + return HeaderMap::Iterate::Continue; + }); } bool HeaderUtility::isEnvoyInternalRequest(const RequestHeaderMap& headers) { @@ -180,15 +181,74 @@ bool HeaderUtility::isEnvoyInternalRequest(const RequestHeaderMap& headers) { internal_request_header->value() == Headers::get().EnvoyInternalRequestValues.True; } +void HeaderUtility::stripPortFromHost(RequestHeaderMap& headers, uint32_t listener_port) { + + if (headers.getMethodValue() == Http::Headers::get().MethodValues.Connect) { + // According to RFC 2817 Connect method should have port part in host header. + // In this case we won't strip it even if configured to do so. + return; + } + const absl::string_view original_host = headers.getHostValue(); + const absl::string_view::size_type port_start = original_host.rfind(':'); + if (port_start == absl::string_view::npos) { + return; + } + // According to RFC3986 v6 address is always enclosed in "[]". section 3.2.2. + const auto v6_end_index = original_host.rfind("]"); + if (v6_end_index == absl::string_view::npos || v6_end_index < port_start) { + if ((port_start + 1) > original_host.size()) { + return; + } + const absl::string_view port_str = original_host.substr(port_start + 1); + uint32_t port = 0; + if (!absl::SimpleAtoi(port_str, &port)) { + return; + } + if (port != listener_port) { + // We would strip ports only if they are the same, as local port of the listener. + return; + } + const absl::string_view host = original_host.substr(0, port_start); + headers.setHost(host); + } +} + absl::optional> HeaderUtility::requestHeadersValid(const RequestHeaderMap& headers) { // Make sure the host is valid. - if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.strict_authority_validation") && - headers.Host() && !HeaderUtility::authorityIsValid(headers.Host()->value().getStringView())) { + if (headers.Host() && !HeaderUtility::authorityIsValid(headers.Host()->value().getStringView())) { return SharedResponseCodeDetails::get().InvalidAuthority; } return absl::nullopt; } +bool HeaderUtility::shouldCloseConnection(Http::Protocol protocol, + const RequestOrResponseHeaderMap& headers) { + // HTTP/1.0 defaults to single-use connections. Make sure the connection will be closed unless + // Keep-Alive is present. + if (protocol == Protocol::Http10 && + (!headers.Connection() || + !Envoy::StringUtil::caseFindToken(headers.Connection()->value().getStringView(), ",", + Http::Headers::get().ConnectionValues.KeepAlive))) { + return true; + } + + if (protocol == Protocol::Http11 && headers.Connection() && + Envoy::StringUtil::caseFindToken(headers.Connection()->value().getStringView(), ",", + Http::Headers::get().ConnectionValues.Close)) { + return true; + } + + // Note: Proxy-Connection is not a standard header, but is supported here + // since it is supported by http-parser the underlying parser for http + // requests. + if (protocol < Protocol::Http2 && headers.ProxyConnection() && + Envoy::StringUtil::caseFindToken(headers.ProxyConnection()->value().getStringView(), ",", + Http::Headers::get().ConnectionValues.Close)) { + return true; + } + return false; +} + } // namespace Http } // namespace Envoy diff --git a/source/common/http/header_utility.h b/source/common/http/header_utility.h index b061a5a31d0e4..22992f1927f9e 100644 --- a/source/common/http/header_utility.h +++ b/source/common/http/header_utility.h @@ -5,6 +5,7 @@ #include "envoy/common/regex.h" #include "envoy/config/route/v3/route_components.pb.h" #include "envoy/http/header_map.h" +#include "envoy/http/protocol.h" #include "envoy/json/json_object.h" #include "envoy/type/v3/range.pb.h" @@ -116,6 +117,12 @@ class HeaderUtility { */ static bool isConnect(const RequestHeaderMap& headers); + /** + * @brief a helper function to determine if the headers represent an accepted CONNECT response. + */ + static bool isConnectResponse(const RequestHeaderMap* request_headers, + const ResponseHeaderMap& response_headers); + /** * Add headers from one HeaderMap to another * @param headers target where headers will be added @@ -135,6 +142,21 @@ class HeaderUtility { */ static absl::optional> requestHeadersValid(const RequestHeaderMap& headers); + + /** + * Determines if the response should be framed by Connection: Close based on protocol + * and headers. + * @param protocol the protocol of the request + * @param headers the request or response headers + * @return if the response should be framed by Connection: Close + */ + static bool shouldCloseConnection(Http::Protocol protocol, + const RequestOrResponseHeaderMap& headers); + + /** + * @brief Remove the port part from host/authority header if it is equal to provided port + */ + static void stripPortFromHost(RequestHeaderMap& headers, uint32_t listener_port); }; } // namespace Http } // namespace Envoy diff --git a/source/common/http/headers.h b/source/common/http/headers.h index 10b87e3da092d..73f866f7b60f1 100644 --- a/source/common/http/headers.h +++ b/source/common/http/headers.h @@ -43,12 +43,13 @@ class PrefixValue { }; /** - * Constant HTTP headers and values. All lower case. + * These are headers that are used in extension custom O(1) header registration. These headers + * *must* not contain any prefix override headers, as static init order requires that HeaderValues + * be instantiated for the first time after bootstrap is loaded and before the header maps are + * finalized. */ -class HeaderValues { +class CustomHeaderValues { public: - const char* prefix() { return ThreadSafeSingleton::get().prefix(); } - const LowerCaseString Accept{"accept"}; const LowerCaseString AcceptEncoding{"accept-encoding"}; const LowerCaseString AccessControlRequestMethod{"access-control-request-method"}; @@ -58,14 +59,73 @@ class HeaderValues { const LowerCaseString AccessControlExposeHeaders{"access-control-expose-headers"}; const LowerCaseString AccessControlMaxAge{"access-control-max-age"}; const LowerCaseString AccessControlAllowCredentials{"access-control-allow-credentials"}; - const LowerCaseString Age{"age"}; const LowerCaseString Authorization{"authorization"}; + const LowerCaseString CacheControl{"cache-control"}; + const LowerCaseString ContentEncoding{"content-encoding"}; + const LowerCaseString Etag{"etag"}; + const LowerCaseString GrpcAcceptEncoding{"grpc-accept-encoding"}; + const LowerCaseString IfMatch{"if-match"}; + const LowerCaseString IfNoneMatch{"if-none-match"}; + const LowerCaseString IfModifiedSince{"if-modified-since"}; + const LowerCaseString IfUnmodifiedSince{"if-unmodified-since"}; + const LowerCaseString IfRange{"if-range"}; + const LowerCaseString LastModified{"last-modified"}; + const LowerCaseString Origin{"origin"}; + const LowerCaseString OtSpanContext{"x-ot-span-context"}; + const LowerCaseString Pragma{"pragma"}; + const LowerCaseString Referer{"referer"}; + const LowerCaseString Vary{"vary"}; + + struct { + const std::string Gzip{"gzip"}; + const std::string Identity{"identity"}; + const std::string Wildcard{"*"}; + } AcceptEncodingValues; + + struct { + const std::string All{"*"}; + } AccessControlAllowOriginValue; + + struct { + const std::string NoCache{"no-cache"}; + const std::string NoCacheMaxAge0{"no-cache, max-age=0"}; + const std::string NoTransform{"no-transform"}; + const std::string Private{"private"}; + } CacheControlValues; + + struct { + const std::string Gzip{"gzip"}; + } ContentEncodingValues; + + struct { + const std::string True{"true"}; + } CORSValues; + + struct { + const std::string Default{"identity,deflate,gzip"}; + } GrpcAcceptEncodingValues; + + struct { + const std::string AcceptEncoding{"Accept-Encoding"}; + const std::string Wildcard{"*"}; + } VaryValues; +}; + +using CustomHeaders = ConstSingleton; + +/** + * Constant HTTP headers and values. All lower case. This group of headers can contain prefix + * override headers. + */ +class HeaderValues { +public: + const char* prefix() { return ThreadSafeSingleton::get().prefix(); } + + const LowerCaseString Age{"age"}; const LowerCaseString ProxyAuthenticate{"proxy-authenticate"}; const LowerCaseString ProxyAuthorization{"proxy-authorization"}; - const LowerCaseString CacheControl{"cache-control"}; const LowerCaseString ClientTraceId{"x-client-trace-id"}; const LowerCaseString Connection{"connection"}; - const LowerCaseString ContentEncoding{"content-encoding"}; const LowerCaseString ContentLength{"content-length"}; const LowerCaseString ContentType{"content-type"}; const LowerCaseString Cookie{"cookie"}; @@ -86,10 +146,14 @@ class HeaderValues { absl::StrCat(prefix(), "-immediate-health-check-fail")}; const LowerCaseString EnvoyOriginalUrl{absl::StrCat(prefix(), "-original-url")}; const LowerCaseString EnvoyInternalRequest{absl::StrCat(prefix(), "-internal")}; + // TODO(mattklein123): EnvoyIpTags should be a custom header registered with the IP tagging + // filter. We need to figure out if we can remove this header from the set of headers that + // participate in prefix overrides. const LowerCaseString EnvoyIpTags{absl::StrCat(prefix(), "-ip-tags")}; const LowerCaseString EnvoyMaxRetries{absl::StrCat(prefix(), "-max-retries")}; const LowerCaseString EnvoyNotForwarded{absl::StrCat(prefix(), "-not-forwarded")}; const LowerCaseString EnvoyOriginalDstHost{absl::StrCat(prefix(), "-original-dst-host")}; + const LowerCaseString EnvoyOriginalMethod{absl::StrCat(prefix(), "-original-method")}; const LowerCaseString EnvoyOriginalPath{absl::StrCat(prefix(), "-original-path")}; const LowerCaseString EnvoyOverloaded{absl::StrCat(prefix(), "-overloaded")}; const LowerCaseString EnvoyRateLimited{absl::StrCat(prefix(), "-ratelimited")}; @@ -115,7 +179,6 @@ class HeaderValues { const LowerCaseString EnvoyUpstreamHealthCheckedCluster{ absl::StrCat(prefix(), "-upstream-healthchecked-cluster")}; const LowerCaseString EnvoyDecoratorOperation{absl::StrCat(prefix(), "-decorator-operation")}; - const LowerCaseString Etag{"etag"}; const LowerCaseString Expect{"expect"}; const LowerCaseString Expires{"expires"}; const LowerCaseString ForwardedClientCert{"x-forwarded-client-cert"}; @@ -125,7 +188,6 @@ class HeaderValues { const LowerCaseString GrpcMessage{"grpc-message"}; const LowerCaseString GrpcStatus{"grpc-status"}; const LowerCaseString GrpcTimeout{"grpc-timeout"}; - const LowerCaseString GrpcAcceptEncoding{"grpc-accept-encoding"}; const LowerCaseString GrpcStatusDetailsBin{"grpc-status-details-bin"}; const LowerCaseString Host{":authority"}; const LowerCaseString HostLegacy{"host"}; @@ -133,12 +195,10 @@ class HeaderValues { const LowerCaseString KeepAlive{"keep-alive"}; const LowerCaseString Location{"location"}; const LowerCaseString Method{":method"}; - const LowerCaseString Origin{"origin"}; - const LowerCaseString OtSpanContext{"x-ot-span-context"}; const LowerCaseString Path{":path"}; const LowerCaseString Protocol{":protocol"}; const LowerCaseString ProxyConnection{"proxy-connection"}; - const LowerCaseString Referer{"referer"}; + const LowerCaseString Range{"range"}; const LowerCaseString RequestId{"x-request-id"}; const LowerCaseString Scheme{":scheme"}; const LowerCaseString Server{"server"}; @@ -148,7 +208,6 @@ class HeaderValues { const LowerCaseString TE{"te"}; const LowerCaseString Upgrade{"upgrade"}; const LowerCaseString UserAgent{"user-agent"}; - const LowerCaseString Vary{"vary"}; const LowerCaseString Via{"via"}; const LowerCaseString WWWAuthenticate{"www-authenticate"}; const LowerCaseString XContentTypeOptions{"x-content-type-options"}; @@ -166,13 +225,6 @@ class HeaderValues { const std::string WebSocket{"websocket"}; } UpgradeValues; - struct { - const std::string NoCache{"no-cache"}; - const std::string NoCacheMaxAge0{"no-cache, max-age=0"}; - const std::string NoTransform{"no-transform"}; - const std::string Private{"private"}; - } CacheControlValues; - struct { const std::string Text{"text/plain"}; const std::string TextEventStream{"text/event-stream"}; @@ -208,6 +260,7 @@ class HeaderValues { const std::string _5xx{"5xx"}; const std::string GatewayError{"gateway-error"}; const std::string ConnectFailure{"connect-failure"}; + const std::string EnvoyRateLimited{"envoy-ratelimited"}; const std::string RefusedStream{"refused-stream"}; const std::string Retriable4xx{"retriable-4xx"}; const std::string RetriableStatusCodes{"retriable-status-codes"}; @@ -260,10 +313,6 @@ class HeaderValues { const std::string EnvoyHealthChecker{"Envoy/HC"}; } UserAgentValues; - struct { - const std::string Default{"identity,deflate,gzip"}; - } GrpcAcceptEncodingValues; - struct { const std::string Trailers{"trailers"}; } TEValues; @@ -272,35 +321,12 @@ class HeaderValues { const std::string Nosniff{"nosniff"}; } XContentTypeOptionValues; - struct { - const std::string True{"true"}; - } CORSValues; - struct { const std::string Http10String{"HTTP/1.0"}; const std::string Http11String{"HTTP/1.1"}; const std::string Http2String{"HTTP/2"}; const std::string Http3String{"HTTP/3"}; } ProtocolStrings; - - struct { - const std::string Gzip{"gzip"}; - const std::string Identity{"identity"}; - const std::string Wildcard{"*"}; - } AcceptEncodingValues; - - struct { - const std::string Gzip{"gzip"}; - } ContentEncodingValues; - - struct { - const std::string AcceptEncoding{"Accept-Encoding"}; - const std::string Wildcard{"*"}; - } VaryValues; - - struct { - const std::string All{"*"}; - } AccessControlAllowOriginValue; }; using Headers = ConstSingleton; diff --git a/source/common/http/http1/BUILD b/source/common/http/http1/BUILD index 2709b312976e4..2fb4325d9810f 100644 --- a/source/common/http/http1/BUILD +++ b/source/common/http/http1/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( @@ -15,61 +15,55 @@ envoy_cc_library( ) envoy_cc_library( - name = "codec_lib", - srcs = ["codec_impl.cc"], - hdrs = ["codec_impl.h"], - external_deps = ["http_parser"], + name = "codec_stats_lib", + hdrs = ["codec_stats.h"], deps = [ - "//include/envoy/buffer:buffer_interface", - "//include/envoy/http:codec_interface", - "//include/envoy/http:header_map_interface", - "//include/envoy/network:connection_interface", "//include/envoy/stats:stats_interface", "//include/envoy/stats:stats_macros", - "//source/common/buffer:buffer_lib", - "//source/common/buffer:watermark_buffer_lib", - "//source/common/common:assert_lib", - "//source/common/common:utility_lib", - "//source/common/http:codec_helper_lib", - "//source/common/http:codes_lib", - "//source/common/http:exception_lib", - "//source/common/http:header_map_lib", - "//source/common/http:header_utility_lib", - "//source/common/http:headers_lib", - "//source/common/http:utility_lib", - "//source/common/http/http1:header_formatter_lib", - "//source/common/runtime:runtime_features_lib", - "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "//source/common/common:thread_lib", ], ) +CODEC_LIB_DEPS = [ + ":codec_stats_lib", + ":header_formatter_lib", + "//include/envoy/buffer:buffer_interface", + "//include/envoy/http:codec_interface", + "//include/envoy/http:header_map_interface", + "//include/envoy/network:connection_interface", + "//source/common/buffer:buffer_lib", + "//source/common/buffer:watermark_buffer_lib", + "//source/common/common:assert_lib", + "//source/common/common:statusor_lib", + "//source/common/common:utility_lib", + "//source/common/grpc:common_lib", + "//source/common/http:codec_helper_lib", + "//source/common/http:codes_lib", + "//source/common/http:exception_lib", + "//source/common/http:header_map_lib", + "//source/common/http:header_utility_lib", + "//source/common/http:headers_lib", + "//source/common/http:status_lib", + "//source/common/http:url_utility_lib", + "//source/common/http:utility_lib", + "//source/common/runtime:runtime_features_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", +] + envoy_cc_library( - name = "conn_pool_legacy_lib", - srcs = ["conn_pool_legacy.cc"], - hdrs = ["conn_pool_legacy.h"], - external_deps = ["abseil_optional"], - deps = [ - "//include/envoy/event:deferred_deletable", - "//include/envoy/event:dispatcher_interface", - "//include/envoy/event:timer_interface", - "//include/envoy/http:conn_pool_interface", - "//include/envoy/http:header_map_interface", - "//include/envoy/network:connection_interface", - "//include/envoy/stats:stats_interface", - "//include/envoy/stats:timespan_interface", - "//include/envoy/upstream:upstream_interface", - "//source/common/common:linked_object", - "//source/common/common:utility_lib", - "//source/common/http:codec_client_lib", - "//source/common/http:codec_wrappers_lib", - "//source/common/http:codes_lib", - "//source/common/http:conn_pool_base_legacy_lib", - "//source/common/http:headers_lib", - "//source/common/network:utility_lib", - "//source/common/runtime:runtime_features_lib", - "//source/common/stats:timespan_lib", - "//source/common/upstream:upstream_lib", - ], + name = "codec_lib", + srcs = ["codec_impl.cc"], + hdrs = ["codec_impl.h"], + external_deps = ["http_parser"], + deps = CODEC_LIB_DEPS + ["//source/common/common:cleanup_lib"], +) + +envoy_cc_library( + name = "codec_legacy_lib", + srcs = ["codec_impl_legacy.cc"], + hdrs = ["codec_impl_legacy.h"], + external_deps = ["http_parser"], + deps = CODEC_LIB_DEPS, ) envoy_cc_library( @@ -78,7 +72,6 @@ envoy_cc_library( hdrs = ["conn_pool.h"], external_deps = ["abseil_optional"], deps = [ - ":conn_pool_legacy_lib", "//include/envoy/event:deferred_deletable", "//include/envoy/event:dispatcher_interface", "//include/envoy/event:timer_interface", diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 9f5fb06f53d14..4e8c54ed7db53 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -9,12 +9,16 @@ #include "envoy/http/header_map.h" #include "envoy/network/connection.h" +#include "common/common/cleanup.h" #include "common/common/enum_to_int.h" +#include "common/common/statusor.h" #include "common/common/utility.h" +#include "common/grpc/common.h" #include "common/http/exception.h" #include "common/http/header_utility.h" #include "common/http/headers.h" #include "common/http/http1/header_formatter.h" +#include "common/http/url_utility.h" #include "common/http/utility.h" #include "common/runtime/runtime_features.h" @@ -34,6 +38,10 @@ struct Http1ResponseCodeDetailValues { const absl::string_view ConnectionHeaderSanitization = "http1.connection_header_rejected"; const absl::string_view InvalidUrl = "http1.invalid_url"; const absl::string_view InvalidTransferEncoding = "http1.invalid_transfer_encoding"; + const absl::string_view BodyDisallowed = "http1.body_disallowed"; + const absl::string_view TransferEncodingNotAllowed = "http1.transfer_encoding_not_allowed"; + const absl::string_view ContentLengthNotAllowed = "http1.content_length_not_allowed"; + const absl::string_view InvalidUnderscore = "http1.unexpected_underscore"; }; struct Http1HeaderTypesValues { @@ -67,8 +75,7 @@ const std::string StreamEncoderImpl::LAST_CHUNK = "0\r\n"; StreamEncoderImpl::StreamEncoderImpl(ConnectionImpl& connection, HeaderKeyFormatter* header_key_formatter) : connection_(connection), disable_chunk_encoding_(false), chunk_encoding_(true), - processing_100_continue_(false), is_response_to_head_request_(false), - is_response_to_connect_request_(false), is_content_length_allowed_(true), + is_response_to_head_request_(false), is_response_to_connect_request_(false), header_key_formatter_(header_key_formatter) { if (connection_.connection().aboveHighWatermark()) { runHighWatermarkCallbacks(); @@ -100,35 +107,30 @@ void StreamEncoderImpl::encodeFormattedHeader(absl::string_view key, absl::strin void ResponseEncoderImpl::encode100ContinueHeaders(const ResponseHeaderMap& headers) { ASSERT(headers.Status()->value() == "100"); - processing_100_continue_ = true; encodeHeaders(headers, false); - processing_100_continue_ = false; } void StreamEncoderImpl::encodeHeadersBase(const RequestOrResponseHeaderMap& headers, - bool end_stream) { + absl::optional status, bool end_stream) { bool saw_content_length = false; - headers.iterate( - [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { - absl::string_view key_to_use = header.key().getStringView(); - uint32_t key_size_to_use = header.key().size(); - // Translate :authority -> host so that upper layers do not need to deal with this. - if (key_size_to_use > 1 && key_to_use[0] == ':' && key_to_use[1] == 'a') { - key_to_use = absl::string_view(Headers::get().HostLegacy.get()); - key_size_to_use = Headers::get().HostLegacy.get().size(); - } + headers.iterate([this](const HeaderEntry& header) -> HeaderMap::Iterate { + absl::string_view key_to_use = header.key().getStringView(); + uint32_t key_size_to_use = header.key().size(); + // Translate :authority -> host so that upper layers do not need to deal with this. + if (key_size_to_use > 1 && key_to_use[0] == ':' && key_to_use[1] == 'a') { + key_to_use = absl::string_view(Headers::get().HostLegacy.get()); + key_size_to_use = Headers::get().HostLegacy.get().size(); + } - // Skip all headers starting with ':' that make it here. - if (key_to_use[0] == ':') { - return HeaderMap::Iterate::Continue; - } + // Skip all headers starting with ':' that make it here. + if (key_to_use[0] == ':') { + return HeaderMap::Iterate::Continue; + } - static_cast(context)->encodeFormattedHeader( - key_to_use, header.value().getStringView()); + encodeFormattedHeader(key_to_use, header.value().getStringView()); - return HeaderMap::Iterate::Continue; - }, - this); + return HeaderMap::Iterate::Continue; + }); if (headers.ContentLength()) { saw_content_length = true; @@ -150,7 +152,7 @@ void StreamEncoderImpl::encodeHeadersBase(const RequestOrResponseHeaderMap& head if (saw_content_length || disable_chunk_encoding_) { chunk_encoding_ = false; } else { - if (processing_100_continue_) { + if (status && *status == 100) { // Make sure we don't serialize chunk information with 100-Continue headers. chunk_encoding_ = false; } else if (end_stream && !is_response_to_head_request_) { @@ -158,15 +160,27 @@ void StreamEncoderImpl::encodeHeadersBase(const RequestOrResponseHeaderMap& head // response to a HEAD request. // For 204s and 1xx where content length is disallowed, don't append the content length but // also don't chunk encode. - if (is_content_length_allowed_) { + if (!status || (*status >= 200 && *status != 204)) { encodeFormattedHeader(Headers::get().ContentLength.get(), "0"); } chunk_encoding_ = false; } else if (connection_.protocol() == Protocol::Http10) { chunk_encoding_ = false; + } else if (status && (*status < 200 || *status == 204) && + connection_.strict1xxAnd204Headers()) { + // TODO(zuercher): when the "envoy.reloadable_features.strict_1xx_and_204_response_headers" + // feature flag is removed, this block can be coalesced with the 100 Continue logic above. + + // For 1xx and 204 responses, do not send the chunked encoding header or enable chunked + // encoding: https://tools.ietf.org/html/rfc7230#section-3.3.1 + chunk_encoding_ = false; } else { - encodeFormattedHeader(Headers::get().TransferEncoding.get(), - Headers::get().TransferEncodingValues.Chunked); + // For responses to connect requests, do not send the chunked encoding header: + // https://tools.ietf.org/html/rfc7231#section-4.3.6. + if (!is_response_to_connect_request_) { + encodeFormattedHeader(Headers::get().TransferEncoding.get(), + Headers::get().TransferEncodingValues.Chunked); + } // We do not apply chunk encoding for HTTP upgrades, including CONNECT style upgrades. // If there is a body in a response on the upgrade path, the chunks will be // passed through via maybeDirectDispatch so we need to avoid appending @@ -220,13 +234,10 @@ void StreamEncoderImpl::encodeTrailersBase(const HeaderMap& trailers) { // Finalize the body connection_.buffer().add(LAST_CHUNK); - trailers.iterate( - [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { - static_cast(context)->encodeFormattedHeader( - header.key().getStringView(), header.value().getStringView()); - return HeaderMap::Iterate::Continue; - }, - this); + trailers.iterate([this](const HeaderEntry& header) -> HeaderMap::Iterate { + encodeFormattedHeader(header.key().getStringView(), header.value().getStringView()); + return HeaderMap::Iterate::Continue; + }); connection_.flushOutput(); connection_.buffer().add(CRLF); @@ -266,9 +277,10 @@ void ServerConnectionImpl::maybeAddSentinelBufferFragment(Buffer::WatermarkBuffe outbound_responses_++; } -void ServerConnectionImpl::doFloodProtectionChecks() const { +Status ServerConnectionImpl::doFloodProtectionChecks() const { + ASSERT(dispatching_); if (!flood_protection_) { - return; + return okStatus(); } // Before processing another request, make sure that we are below the response flood protection // threshold. @@ -276,8 +288,9 @@ void ServerConnectionImpl::doFloodProtectionChecks() const { ENVOY_CONN_LOG(trace, "error accepting request: too many pending responses queued", connection_); stats_.response_flood_.inc(); - throw FrameFloodException("Too many responses queued."); + return bufferFloodError("Too many responses queued."); } + return okStatus(); } void ConnectionImpl::flushOutput(bool end_encode) { @@ -304,7 +317,17 @@ void StreamEncoderImpl::resetStream(StreamResetReason reason) { connection_.onResetStreamBase(reason); } -void StreamEncoderImpl::readDisable(bool disable) { connection_.readDisable(disable); } +void StreamEncoderImpl::readDisable(bool disable) { + if (disable) { + ++read_disable_calls_; + } else { + ASSERT(read_disable_calls_ != 0); + if (read_disable_calls_ != 0) { + --read_disable_calls_; + } + } + connection_.readDisable(disable); +} uint32_t StreamEncoderImpl::bufferLimit() { return connection_.bufferLimit(); } @@ -322,7 +345,7 @@ void ResponseEncoderImpl::encodeHeaders(const ResponseHeaderMap& headers, bool e ASSERT(headers.Status() != nullptr); uint64_t numeric_status = Utility::getResponseStatus(headers); - if (connection_.protocol() == Protocol::Http10 && connection_.supports_http_10()) { + if (connection_.protocol() == Protocol::Http10 && connection_.supportsHttp10()) { connection_.copyToBuffer(HTTP_10_RESPONSE_PREFIX, sizeof(HTTP_10_RESPONSE_PREFIX) - 1); } else { connection_.copyToBuffer(RESPONSE_PREFIX, sizeof(RESPONSE_PREFIX) - 1); @@ -337,20 +360,12 @@ void ResponseEncoderImpl::encodeHeaders(const ResponseHeaderMap& headers, bool e connection_.addCharToBuffer('\r'); connection_.addCharToBuffer('\n'); - if (numeric_status == 204 || numeric_status < 200) { - // Per https://tools.ietf.org/html/rfc7230#section-3.3.2 - setIsContentLengthAllowed(false); - } else { - // Make sure that if we encodeHeaders(100) then encodeHeaders(200) that we - // set is_content_length_allowed_ back to true. - setIsContentLengthAllowed(true); - } if (numeric_status >= 300) { // Don't do special CONNECT logic if the CONNECT was rejected. is_response_to_connect_request_ = false; } - encodeHeadersBase(headers, end_stream); + encodeHeadersBase(headers, absl::make_optional(numeric_status), end_stream); } static const char REQUEST_POSTFIX[] = " HTTP/1.1\r\n"; @@ -361,9 +376,14 @@ void RequestEncoderImpl::encodeHeaders(const RequestHeaderMap& headers, bool end const HeaderEntry* host = headers.Host(); bool is_connect = HeaderUtility::isConnect(headers); - if (!method || (!path && !is_connect)) { - throw CodecClientException(":method and :path must be specified"); - } + // TODO(#10878): Include missing host header for CONNECT. + // The RELEASE_ASSERT below does not change the existing behavior of `encodeHeaders`. + // The `encodeHeaders` used to throw on errors. Callers of `encodeHeaders()` do not catch + // exceptions and this would cause abnormal process termination in error cases. This change + // replaces abnormal process termination from unhandled exception with the RELEASE_ASSERT. Further + // work will replace this RELEASE_ASSERT with proper error handling. + RELEASE_ASSERT(method && (path || is_connect), ":method and :path must be specified"); + if (method->value() == Headers::get().MethodValues.Head) { head_request_ = true; } else if (method->value() == Headers::get().MethodValues.Connect) { @@ -383,37 +403,60 @@ void RequestEncoderImpl::encodeHeaders(const RequestHeaderMap& headers, bool end } connection_.copyToBuffer(REQUEST_POSTFIX, sizeof(REQUEST_POSTFIX) - 1); - encodeHeadersBase(headers, end_stream); + encodeHeadersBase(headers, absl::nullopt, end_stream); +} + +int ConnectionImpl::setAndCheckCallbackStatus(Status&& status) { + ASSERT(codec_status_.ok()); + codec_status_ = std::move(status); + return codec_status_.ok() ? enumToInt(HttpParserCode::Success) : enumToInt(HttpParserCode::Error); +} + +int ConnectionImpl::setAndCheckCallbackStatusOr(Envoy::StatusOr&& statusor) { + ASSERT(codec_status_.ok()); + if (statusor.ok()) { + return statusor.value(); + } else { + codec_status_ = std::move(statusor.status()); + return enumToInt(HttpParserCode::Error); + } } http_parser_settings ConnectionImpl::settings_{ [](http_parser* parser) -> int { - static_cast(parser->data)->onMessageBeginBase(); - return 0; + auto* conn_impl = static_cast(parser->data); + auto status = conn_impl->onMessageBeginBase(); + return conn_impl->setAndCheckCallbackStatus(std::move(status)); }, [](http_parser* parser, const char* at, size_t length) -> int { - static_cast(parser->data)->onUrl(at, length); - return 0; + auto* conn_impl = static_cast(parser->data); + auto status = conn_impl->onUrl(at, length); + return conn_impl->setAndCheckCallbackStatus(std::move(status)); }, nullptr, // on_status [](http_parser* parser, const char* at, size_t length) -> int { - static_cast(parser->data)->onHeaderField(at, length); - return 0; + auto* conn_impl = static_cast(parser->data); + auto status = conn_impl->onHeaderField(at, length); + return conn_impl->setAndCheckCallbackStatus(std::move(status)); }, [](http_parser* parser, const char* at, size_t length) -> int { - static_cast(parser->data)->onHeaderValue(at, length); - return 0; + auto* conn_impl = static_cast(parser->data); + auto status = conn_impl->onHeaderValue(at, length); + return conn_impl->setAndCheckCallbackStatus(std::move(status)); }, [](http_parser* parser) -> int { - return static_cast(parser->data)->onHeadersCompleteBase(); + auto* conn_impl = static_cast(parser->data); + auto statusor = conn_impl->onHeadersCompleteBase(); + return conn_impl->setAndCheckCallbackStatusOr(std::move(statusor)); }, [](http_parser* parser, const char* at, size_t length) -> int { static_cast(parser->data)->bufferBody(at, length); return 0; }, [](http_parser* parser) -> int { - static_cast(parser->data)->onMessageCompleteBase(); - return 0; + auto* conn_impl = static_cast(parser->data); + auto status = conn_impl->onMessageCompleteBase(); + return conn_impl->setAndCheckCallbackStatus(std::move(status)); }, [](http_parser* parser) -> int { // A 0-byte chunk header is used to signal the end of the chunked body. @@ -427,36 +470,41 @@ http_parser_settings ConnectionImpl::settings_{ nullptr // on_chunk_complete }; -ConnectionImpl::ConnectionImpl(Network::Connection& connection, Stats::Scope& stats, +ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stats, http_parser_type type, uint32_t max_headers_kb, const uint32_t max_headers_count, HeaderKeyFormatterPtr&& header_key_formatter, bool enable_trailers) - : connection_(connection), stats_{ALL_HTTP1_CODEC_STATS(POOL_COUNTER_PREFIX(stats, "http1."))}, + : connection_(connection), stats_(stats), header_key_formatter_(std::move(header_key_formatter)), processing_trailers_(false), handling_upgrade_(false), reset_stream_called_(false), deferred_end_stream_headers_(false), - strict_header_validation_( - Runtime::runtimeFeatureEnabled("envoy.reloadable_features.strict_header_validation")), connection_header_sanitization_(Runtime::runtimeFeatureEnabled( "envoy.reloadable_features.connection_header_sanitization")), enable_trailers_(enable_trailers), - reject_unsupported_transfer_encodings_(Runtime::runtimeFeatureEnabled( - "envoy.reloadable_features.reject_unsupported_transfer_encodings")), + strict_1xx_and_204_headers_(Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.strict_1xx_and_204_response_headers")), + dispatching_(false), output_buffer_([&]() -> void { this->onBelowLowWatermark(); }, - [&]() -> void { this->onAboveHighWatermark(); }), + [&]() -> void { this->onAboveHighWatermark(); }, + []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }), max_headers_kb_(max_headers_kb), max_headers_count_(max_headers_count) { output_buffer_.setWatermarks(connection.bufferLimit()); http_parser_init(&parser_, type); parser_.data = this; } -void ConnectionImpl::completeLastHeader() { +Status ConnectionImpl::completeLastHeader() { + ASSERT(dispatching_); ENVOY_CONN_LOG(trace, "completed header: key={} value={}", connection_, current_header_field_.getStringView(), current_header_value_.getStringView()); - checkHeaderNameForUnderscores(); + RETURN_IF_ERROR(checkHeaderNameForUnderscores()); auto& headers_or_trailers = headersOrTrailers(); if (!current_header_field_.empty()) { current_header_field_.inlineTransform([](char c) { return absl::ascii_tolower(c); }); + // Strip trailing whitespace of the current header value if any. Leading whitespace was trimmed + // in ConnectionImpl::onHeaderValue. http_parser does not strip leading or trailing whitespace + // as the spec requires: https://tools.ietf.org/html/rfc7230#section-3.2.4 + current_header_value_.rtrim(); headers_or_trailers.addViaMove(std::move(current_header_field_), std::move(current_header_value_)); } @@ -464,15 +512,33 @@ void ConnectionImpl::completeLastHeader() { // Check if the number of headers exceeds the limit. if (headers_or_trailers.size() > max_headers_count_) { error_code_ = Http::Code::RequestHeaderFieldsTooLarge; - sendProtocolError(Http1ResponseCodeDetails::get().TooManyHeaders); + RETURN_IF_ERROR(sendProtocolError(Http1ResponseCodeDetails::get().TooManyHeaders)); const absl::string_view header_type = processing_trailers_ ? Http1HeaderTypes::get().Trailers : Http1HeaderTypes::get().Headers; - throw CodecProtocolException(absl::StrCat(header_type, " size exceeds limit")); + return codecProtocolError(absl::StrCat(header_type, " size exceeds limit")); } header_parsing_state_ = HeaderParsingState::Field; ASSERT(current_header_field_.empty()); ASSERT(current_header_value_.empty()); + return okStatus(); +} + +uint32_t ConnectionImpl::getHeadersSize() { + return current_header_field_.size() + current_header_value_.size() + + headersOrTrailers().byteSize(); +} + +Status ConnectionImpl::checkMaxHeadersSize() { + const uint32_t total = getHeadersSize(); + if (total > (max_headers_kb_ * 1024)) { + const absl::string_view header_type = + processing_trailers_ ? Http1HeaderTypes::get().Trailers : Http1HeaderTypes::get().Headers; + error_code_ = Http::Code::RequestHeaderFieldsTooLarge; + RETURN_IF_ERROR(sendProtocolError(Http1ResponseCodeDetails::get().HeadersTooLarge)); + return codecProtocolError(absl::StrCat(header_type, " size exceeds limit")); + } + return okStatus(); } bool ConnectionImpl::maybeDirectDispatch(Buffer::Instance& data) { @@ -487,12 +553,26 @@ bool ConnectionImpl::maybeDirectDispatch(Buffer::Instance& data) { return true; } -void ConnectionImpl::dispatch(Buffer::Instance& data) { +Http::Status ConnectionImpl::dispatch(Buffer::Instance& data) { + // TODO(#10878): Remove this wrapper when exception removal is complete. innerDispatch may either + // throw an exception or return an error status. The utility wrapper catches exceptions and + // converts them to error statuses. + return Utility::exceptionToStatus( + [&](Buffer::Instance& data) -> Http::Status { return innerDispatch(data); }, data); +} + +Http::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) { ENVOY_CONN_LOG(trace, "parsing {} bytes", connection_, data.length()); + // Make sure that dispatching_ is set to false after dispatching, even when + // http_parser exits early with an error code. + Cleanup cleanup([this]() { dispatching_ = false; }); + ASSERT(!dispatching_); + ASSERT(codec_status_.ok()); ASSERT(buffered_body_.length() == 0); + dispatching_ = true; if (maybeDirectDispatch(data)) { - return; + return Http::okStatus(); } // Always unpause before dispatch. @@ -501,7 +581,11 @@ void ConnectionImpl::dispatch(Buffer::Instance& data) { ssize_t total_parsed = 0; if (data.length() > 0) { for (const Buffer::RawSlice& slice : data.getRawSlices()) { - total_parsed += dispatchSlice(static_cast(slice.mem_), slice.len_); + auto statusor_parsed = dispatchSlice(static_cast(slice.mem_), slice.len_); + if (!statusor_parsed.ok()) { + return statusor_parsed.status(); + } + total_parsed += statusor_parsed.value(); if (HTTP_PARSER_ERRNO(&parser_) != HPE_OK) { // Parse errors trigger an exception in dispatchSlice so we are guaranteed to be paused at // this point. @@ -511,7 +595,10 @@ void ConnectionImpl::dispatch(Buffer::Instance& data) { } dispatchBufferedBody(); } else { - dispatchSlice(nullptr, 0); + auto result = dispatchSlice(nullptr, 0); + if (!result.ok()) { + return result.status(); + } } ASSERT(buffered_body_.length() == 0); @@ -521,78 +608,82 @@ void ConnectionImpl::dispatch(Buffer::Instance& data) { // If an upgrade has been handled and there is body data or early upgrade // payload to send on, send it on. maybeDirectDispatch(data); + return Http::okStatus(); } -size_t ConnectionImpl::dispatchSlice(const char* slice, size_t len) { +Envoy::StatusOr ConnectionImpl::dispatchSlice(const char* slice, size_t len) { + ASSERT(codec_status_.ok() && dispatching_); ssize_t rc = http_parser_execute(&parser_, &settings_, slice, len); + if (!codec_status_.ok()) { + return codec_status_; + } if (HTTP_PARSER_ERRNO(&parser_) != HPE_OK && HTTP_PARSER_ERRNO(&parser_) != HPE_PAUSED) { - sendProtocolError(Http1ResponseCodeDetails::get().HttpCodecError); - throw CodecProtocolException("http/1.1 protocol error: " + - std::string(http_errno_name(HTTP_PARSER_ERRNO(&parser_)))); + RETURN_IF_ERROR(sendProtocolError(Http1ResponseCodeDetails::get().HttpCodecError)); + // Avoid overwriting the codec_status_ set in the callbacks. + ASSERT(codec_status_.ok()); + codec_status_ = codecProtocolError( + absl::StrCat("http/1.1 protocol error: ", http_errno_name(HTTP_PARSER_ERRNO(&parser_)))); + return codec_status_; } return rc; } -void ConnectionImpl::onHeaderField(const char* data, size_t length) { +Status ConnectionImpl::onHeaderField(const char* data, size_t length) { + ASSERT(dispatching_); // We previously already finished up the headers, these headers are // now trailers. if (header_parsing_state_ == HeaderParsingState::Done) { if (!enable_trailers_) { // Ignore trailers. - return; + return okStatus(); } processing_trailers_ = true; header_parsing_state_ = HeaderParsingState::Field; + allocTrailers(); } if (header_parsing_state_ == HeaderParsingState::Value) { - completeLastHeader(); + RETURN_IF_ERROR(completeLastHeader()); } current_header_field_.append(data, length); + + return checkMaxHeadersSize(); } -void ConnectionImpl::onHeaderValue(const char* data, size_t length) { +Status ConnectionImpl::onHeaderValue(const char* data, size_t length) { + ASSERT(dispatching_); if (header_parsing_state_ == HeaderParsingState::Done && !enable_trailers_) { // Ignore trailers. - return; + return okStatus(); } - if (processing_trailers_) { - maybeAllocTrailers(); - } - - // Work around a bug in http_parser where trailing whitespace is not trimmed - // as the spec requires: https://tools.ietf.org/html/rfc7230#section-3.2.4 - const absl::string_view header_value = StringUtil::trim(absl::string_view(data, length)); - - if (strict_header_validation_) { - if (!Http::HeaderUtility::headerValueIsValid(header_value)) { - ENVOY_CONN_LOG(debug, "invalid header value: {}", connection_, header_value); - error_code_ = Http::Code::BadRequest; - sendProtocolError(Http1ResponseCodeDetails::get().InvalidCharacters); - throw CodecProtocolException("http/1.1 protocol error: header value contains invalid chars"); - } + absl::string_view header_value{data, length}; + if (!Http::HeaderUtility::headerValueIsValid(header_value)) { + ENVOY_CONN_LOG(debug, "invalid header value: {}", connection_, header_value); + error_code_ = Http::Code::BadRequest; + RETURN_IF_ERROR(sendProtocolError(Http1ResponseCodeDetails::get().InvalidCharacters)); + return codecProtocolError("http/1.1 protocol error: header value contains invalid chars"); } header_parsing_state_ = HeaderParsingState::Value; + if (current_header_value_.empty()) { + // Strip leading whitespace if the current header value input contains the first bytes of the + // encoded header value. Trailing whitespace is stripped once the full header value is known in + // ConnectionImpl::completeLastHeader. http_parser does not strip leading or trailing whitespace + // as the spec requires: https://tools.ietf.org/html/rfc7230#section-3.2.4 . + header_value = StringUtil::ltrim(header_value); + } current_header_value_.append(header_value.data(), header_value.length()); - const uint32_t total = - current_header_field_.size() + current_header_value_.size() + headersOrTrailers().byteSize(); - if (total > (max_headers_kb_ * 1024)) { - const absl::string_view header_type = - processing_trailers_ ? Http1HeaderTypes::get().Trailers : Http1HeaderTypes::get().Headers; - error_code_ = Http::Code::RequestHeaderFieldsTooLarge; - sendProtocolError(Http1ResponseCodeDetails::get().HeadersTooLarge); - throw CodecProtocolException(absl::StrCat(header_type, " size exceeds limit")); - } + return checkMaxHeadersSize(); } -int ConnectionImpl::onHeadersCompleteBase() { +Envoy::StatusOr ConnectionImpl::onHeadersCompleteBase() { ASSERT(!processing_trailers_); + ASSERT(dispatching_); ENVOY_CONN_LOG(trace, "onHeadersCompleteBase", connection_); - completeLastHeader(); + RETURN_IF_ERROR(completeLastHeader()); if (!(parser_.http_major == 1 && parser_.http_minor == 1)) { // This is not necessarily true, but it's good enough since higher layers only care if this is @@ -603,16 +694,14 @@ int ConnectionImpl::onHeadersCompleteBase() { if (Utility::isUpgrade(request_or_response_headers) && upgradeAllowed()) { // Ignore h2c upgrade requests until we support them. // See https://github.com/envoyproxy/envoy/issues/7161 for details. - if (request_or_response_headers.Upgrade() && - absl::EqualsIgnoreCase(request_or_response_headers.Upgrade()->value().getStringView(), + if (absl::EqualsIgnoreCase(request_or_response_headers.getUpgradeValue(), Http::Headers::get().UpgradeValues.H2c)) { ENVOY_CONN_LOG(trace, "removing unsupported h2c upgrade headers.", connection_); request_or_response_headers.removeUpgrade(); if (request_or_response_headers.Connection()) { const auto& tokens_to_remove = caseUnorderdSetContainingUpgradeAndHttp2Settings(); std::string new_value = StringUtil::removeTokens( - request_or_response_headers.Connection()->value().getStringView(), ",", - tokens_to_remove, ","); + request_or_response_headers.getConnectionValue(), ",", tokens_to_remove, ","); if (new_value.empty()) { request_or_response_headers.removeConnection(); } else { @@ -626,28 +715,44 @@ int ConnectionImpl::onHeadersCompleteBase() { } } if (parser_.method == HTTP_CONNECT) { + if (request_or_response_headers.ContentLength()) { + if (request_or_response_headers.getContentLengthValue() == "0") { + request_or_response_headers.removeContentLength(); + } else { + // Per https://tools.ietf.org/html/rfc7231#section-4.3.6 a payload with a + // CONNECT request has no defined semantics, and may be rejected. + error_code_ = Http::Code::BadRequest; + RETURN_IF_ERROR(sendProtocolError(Http1ResponseCodeDetails::get().BodyDisallowed)); + return codecProtocolError("http/1.1 protocol error: unsupported content length"); + } + } ENVOY_CONN_LOG(trace, "codec entering upgrade mode for CONNECT request.", connection_); handling_upgrade_ = true; } // Per https://tools.ietf.org/html/rfc7230#section-3.3.1 Envoy should reject // transfer-codings it does not understand. + // Per https://tools.ietf.org/html/rfc7231#section-4.3.6 a payload with a + // CONNECT request has no defined semantics, and may be rejected. if (request_or_response_headers.TransferEncoding()) { - const absl::string_view encoding = - request_or_response_headers.TransferEncoding()->value().getStringView(); - if (reject_unsupported_transfer_encodings_ && - !absl::EqualsIgnoreCase(encoding, Headers::get().TransferEncodingValues.Chunked)) { + const absl::string_view encoding = request_or_response_headers.getTransferEncodingValue(); + if (!absl::EqualsIgnoreCase(encoding, Headers::get().TransferEncodingValues.Chunked) || + parser_.method == HTTP_CONNECT) { error_code_ = Http::Code::NotImplemented; - sendProtocolError(Http1ResponseCodeDetails::get().InvalidTransferEncoding); - throw CodecProtocolException("http/1.1 protocol error: unsupported transfer encoding"); + RETURN_IF_ERROR(sendProtocolError(Http1ResponseCodeDetails::get().InvalidTransferEncoding)); + return codecProtocolError("http/1.1 protocol error: unsupported transfer encoding"); } } - int rc = onHeadersComplete(); + auto statusor = onHeadersComplete(); + if (!statusor.ok()) { + RETURN_IF_ERROR(statusor.status()); + } + header_parsing_state_ = HeaderParsingState::Done; // Returning 2 informs http_parser to not expect a body or further data on this connection. - return handling_upgrade_ ? 2 : rc; + return handling_upgrade_ ? 2 : statusor.value(); } void ConnectionImpl::bufferBody(const char* data, size_t length) { @@ -656,6 +761,7 @@ void ConnectionImpl::bufferBody(const char* data, size_t length) { void ConnectionImpl::dispatchBufferedBody() { ASSERT(HTTP_PARSER_ERRNO(&parser_) == HPE_OK || HTTP_PARSER_ERRNO(&parser_) == HPE_PAUSED); + ASSERT(codec_status_.ok()); if (buffered_body_.length() > 0) { onBody(buffered_body_); buffered_body_.drain(buffered_body_.length()); @@ -670,7 +776,7 @@ void ConnectionImpl::onChunkHeader(bool is_final_chunk) { } } -void ConnectionImpl::onMessageCompleteBase() { +Status ConnectionImpl::onMessageCompleteBase() { ENVOY_CONN_LOG(trace, "message complete", connection_); dispatchBufferedBody(); @@ -681,19 +787,20 @@ void ConnectionImpl::onMessageCompleteBase() { ASSERT(!deferred_end_stream_headers_); ENVOY_CONN_LOG(trace, "Pausing parser due to upgrade.", connection_); http_parser_pause(&parser_, 1); - return; + return okStatus(); } // If true, this indicates we were processing trailers and must // move the last header into current_header_map_ if (header_parsing_state_ == HeaderParsingState::Value) { - completeLastHeader(); + RETURN_IF_ERROR(completeLastHeader()); } onMessageComplete(); + return okStatus(); } -void ConnectionImpl::onMessageBeginBase() { +Status ConnectionImpl::onMessageBeginBase() { ENVOY_CONN_LOG(trace, "message begin", connection_); // Make sure that if HTTP/1.0 and HTTP/1.1 requests share a connection Envoy correctly sets // protocol for each request. Envoy defaults to 1.1 but sets the protocol to 1.0 where applicable @@ -702,7 +809,7 @@ void ConnectionImpl::onMessageBeginBase() { processing_trailers_ = false; header_parsing_state_ = HeaderParsingState::Field; allocHeaders(); - onMessageBegin(); + return onMessageBegin(); } void ConnectionImpl::onResetStreamBase(StreamResetReason reason) { @@ -712,7 +819,7 @@ void ConnectionImpl::onResetStreamBase(StreamResetReason reason) { } ServerConnectionImpl::ServerConnectionImpl( - Network::Connection& connection, Stats::Scope& stats, ServerConnectionCallbacks& callbacks, + Network::Connection& connection, CodecStats& stats, ServerConnectionCallbacks& callbacks, const Http1Settings& settings, uint32_t max_request_headers_kb, const uint32_t max_request_headers_count, envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction @@ -733,6 +840,14 @@ ServerConnectionImpl::ServerConnectionImpl( Runtime::runtimeFeatureEnabled("envoy.reloadable_features.http1_flood_protection")), headers_with_underscores_action_(headers_with_underscores_action) {} +uint32_t ServerConnectionImpl::getHeadersSize() { + // Add in the the size of the request URL if processing request headers. + const uint32_t url_size = (!processing_trailers_ && active_request_.has_value()) + ? active_request_.value().request_url_.size() + : 0; + return url_size + ConnectionImpl::getHeadersSize(); +} + void ServerConnectionImpl::onEncodeComplete() { if (active_request_.value().remote_complete_) { // Only do this if remote is complete. If we are replying before the request is complete the @@ -742,7 +857,7 @@ void ServerConnectionImpl::onEncodeComplete() { } } -void ServerConnectionImpl::handlePath(RequestHeaderMap& headers, unsigned int method) { +Status ServerConnectionImpl::handlePath(RequestHeaderMap& headers, unsigned int method) { HeaderString path(Headers::get().Path); bool is_connect = (method == HTTP_CONNECT); @@ -753,7 +868,7 @@ void ServerConnectionImpl::handlePath(RequestHeaderMap& headers, unsigned int me (active_request.request_url_.getStringView()[0] == '/' || ((method == HTTP_OPTIONS) && active_request.request_url_.getStringView()[0] == '*'))) { headers.addViaMove(std::move(path), std::move(active_request.request_url_)); - return; + return okStatus(); } // If absolute_urls and/or connect are not going be handled, copy the url and return. @@ -762,13 +877,13 @@ void ServerConnectionImpl::handlePath(RequestHeaderMap& headers, unsigned int me // Absolute URLS in CONNECT requests will be rejected below by the URL class validation. if (!codec_settings_.allow_absolute_url_ && !is_connect) { headers.addViaMove(std::move(path), std::move(active_request.request_url_)); - return; + return okStatus(); } Utility::Url absolute_url; if (!absolute_url.initialize(active_request.request_url_.getStringView(), is_connect)) { - sendProtocolError(Http1ResponseCodeDetails::get().InvalidUrl); - throw CodecProtocolException("http/1.1 protocol error: invalid url in request line"); + RETURN_IF_ERROR(sendProtocolError(Http1ResponseCodeDetails::get().InvalidUrl)); + return codecProtocolError("http/1.1 protocol error: invalid url in request line"); } // RFC7230#5.7 // When a proxy receives a request with an absolute-form of @@ -783,9 +898,10 @@ void ServerConnectionImpl::handlePath(RequestHeaderMap& headers, unsigned int me headers.setPath(absolute_url.pathAndQueryParams()); } active_request.request_url_.clear(); + return okStatus(); } -int ServerConnectionImpl::onHeadersComplete() { +Envoy::StatusOr ServerConnectionImpl::onHeadersComplete() { // Handle the case where response happens prior to request complete. It's up to upper layer code // to disconnect the connection but we shouldn't fire any more events since it doesn't make // sense. @@ -798,12 +914,13 @@ int ServerConnectionImpl::onHeadersComplete() { if (!handling_upgrade_ && connection_header_sanitization_ && headers->Connection()) { // If we fail to sanitize the request, return a 400 to the client if (!Utility::sanitizeConnectionHeader(*headers)) { - absl::string_view header_value = headers->Connection()->value().getStringView(); + absl::string_view header_value = headers->getConnectionValue(); ENVOY_CONN_LOG(debug, "Invalid nominated headers in Connection: {}", connection_, header_value); error_code_ = Http::Code::BadRequest; - sendProtocolError(Http1ResponseCodeDetails::get().ConnectionHeaderSanitization); - throw CodecProtocolException("Invalid nominated headers in Connection."); + RETURN_IF_ERROR( + sendProtocolError(Http1ResponseCodeDetails::get().ConnectionHeaderSanitization)); + return codecProtocolError("Invalid nominated headers in Connection."); } } @@ -812,7 +929,7 @@ int ServerConnectionImpl::onHeadersComplete() { active_request.response_encoder_.setIsResponseToHeadRequest(parser_.method == HTTP_HEAD); active_request.response_encoder_.setIsResponseToConnectRequest(parser_.method == HTTP_CONNECT); - handlePath(*headers, parser_.method); + RETURN_IF_ERROR(handlePath(*headers, parser_.method)); ASSERT(active_request.request_url_.empty()); headers->setMethod(method_string); @@ -820,8 +937,8 @@ int ServerConnectionImpl::onHeadersComplete() { // Make sure the host is valid. auto details = HeaderUtility::requestHeadersValid(*headers); if (details.has_value()) { - sendProtocolError(details.value().get()); - throw CodecProtocolException( + RETURN_IF_ERROR(sendProtocolError(details.value().get())); + return codecProtocolError( "http/1.1 protocol error: request headers failed spec compliance checks"); } @@ -848,24 +965,31 @@ int ServerConnectionImpl::onHeadersComplete() { return 0; } -void ServerConnectionImpl::onMessageBegin() { +Status ServerConnectionImpl::onMessageBegin() { if (!resetStreamCalled()) { ASSERT(!active_request_.has_value()); active_request_.emplace(*this, header_key_formatter_.get()); auto& active_request = active_request_.value(); + if (resetStreamCalled()) { + return codecClientError("cannot create new streams after calling reset"); + } active_request.request_decoder_ = &callbacks_.newStream(active_request.response_encoder_); // Check for pipelined request flood as we prepare to accept a new request. // Parse errors that happen prior to onMessageBegin result in stream termination, it is not // possible to overflow output buffers with early parse errors. - doFloodProtectionChecks(); + RETURN_IF_ERROR(doFloodProtectionChecks()); } + return okStatus(); } -void ServerConnectionImpl::onUrl(const char* data, size_t length) { +Status ServerConnectionImpl::onUrl(const char* data, size_t length) { if (active_request_.has_value()) { active_request_.value().request_url_.append(data, length); + + RETURN_IF_ERROR(checkMaxHeadersSize()); } + return okStatus(); } void ServerConnectionImpl::onBody(Buffer::Instance& data) { @@ -880,6 +1004,10 @@ void ServerConnectionImpl::onMessageComplete() { ASSERT(!handling_upgrade_); if (active_request_.has_value()) { auto& active_request = active_request_.value(); + + if (active_request.request_decoder_) { + active_request.response_encoder_.readDisable(true); + } active_request.remote_complete_ = true; if (deferred_end_stream_headers_) { active_request.request_decoder_->decodeHeaders( @@ -908,7 +1036,7 @@ void ServerConnectionImpl::onResetStream(StreamResetReason reason) { active_request_.reset(); } -void ServerConnectionImpl::sendProtocolError(absl::string_view details) { +void ServerConnectionImpl::sendProtocolErrorOld(absl::string_view details) { if (active_request_.has_value()) { active_request_.value().response_encoder_.setDetails(details); } @@ -926,6 +1054,34 @@ void ServerConnectionImpl::sendProtocolError(absl::string_view details) { } } +Status ServerConnectionImpl::sendProtocolError(absl::string_view details) { + if (!Runtime::runtimeFeatureEnabled("envoy.reloadable_features.early_errors_via_hcm")) { + sendProtocolErrorOld(details); + return okStatus(); + } + // We do this here because we may get a protocol error before we have a logical stream. + if (!active_request_.has_value()) { + RETURN_IF_ERROR(onMessageBeginBase()); + } + ASSERT(active_request_.has_value()); + + active_request_.value().response_encoder_.setDetails(details); + if (!active_request_.value().response_encoder_.startedResponse()) { + // Note that the correctness of is_grpc_request and is_head_request is best-effort. + // If headers have not been fully parsed they may not be inferred correctly. + bool is_grpc_request = false; + if (absl::holds_alternative(headers_or_trailers_) && + absl::get(headers_or_trailers_) != nullptr) { + is_grpc_request = + Grpc::Common::isGrpcRequestHeaders(*absl::get(headers_or_trailers_)); + } + active_request_->request_decoder_->sendLocalReply(is_grpc_request, error_code_, + CodeUtility::toString(error_code_), nullptr, + absl::nullopt, details); + } + return okStatus(); +} + void ServerConnectionImpl::onAboveHighWatermark() { if (active_request_.has_value()) { active_request_.value().response_encoder_.runHighWatermarkCallbacks(); @@ -944,7 +1100,7 @@ void ServerConnectionImpl::releaseOutboundResponse( delete fragment; } -void ServerConnectionImpl::checkHeaderNameForUnderscores() { +Status ServerConnectionImpl::checkHeaderNameForUnderscores() { if (headers_with_underscores_action_ != envoy::config::core::v3::HttpProtocolOptions::ALLOW && Http::HeaderUtility::headerNameContainsUnderscore(current_header_field_.getStringView())) { if (headers_with_underscores_action_ == @@ -958,14 +1114,15 @@ void ServerConnectionImpl::checkHeaderNameForUnderscores() { ENVOY_CONN_LOG(debug, "Rejecting request due to header name with underscores: {}", connection_, current_header_field_.getStringView()); error_code_ = Http::Code::BadRequest; - sendProtocolError(Http1ResponseCodeDetails::get().InvalidCharacters); + RETURN_IF_ERROR(sendProtocolError(Http1ResponseCodeDetails::get().InvalidUnderscore)); stats_.requests_rejected_with_underscores_in_headers_.inc(); - throw CodecProtocolException("http/1.1 protocol error: header name contains underscores"); + return codecProtocolError("http/1.1 protocol error: header name contains underscores"); } } + return okStatus(); } -ClientConnectionImpl::ClientConnectionImpl(Network::Connection& connection, Stats::Scope& stats, +ClientConnectionImpl::ClientConnectionImpl(Network::Connection& connection, CodecStats& stats, ConnectionCallbacks&, const Http1Settings& settings, const uint32_t max_response_headers_count) : ConnectionImpl(connection, stats, HTTP_RESPONSE, MAX_RESPONSE_HEADERS_KB, @@ -984,10 +1141,6 @@ bool ClientConnectionImpl::cannotHaveBody() { } RequestEncoder& ClientConnectionImpl::newStream(ResponseDecoder& response_decoder) { - if (resetStreamCalled()) { - throw CodecClientException("cannot create new streams after calling reset"); - } - // If reads were disabled due to flow control, we expect reads to always be enabled again before // reusing this connection. This is done when the response is received. ASSERT(connection_.readEnabled()); @@ -999,12 +1152,14 @@ RequestEncoder& ClientConnectionImpl::newStream(ResponseDecoder& response_decode return pending_response_.value().encoder_; } -int ClientConnectionImpl::onHeadersComplete() { +Envoy::StatusOr ClientConnectionImpl::onHeadersComplete() { + ENVOY_CONN_LOG(trace, "status_code {}", connection_, parser_.status_code); + // Handle the case where the client is closing a kept alive connection (by sending a 408 // with a 'Connection: close' header). In this case we just let response flush out followed // by the remote close. if (!pending_response_.has_value() && !resetStreamCalled()) { - throw PrematureResponseException(static_cast(parser_.status_code)); + return prematureResponseError("", static_cast(parser_.status_code)); } else if (pending_response_.has_value()) { ASSERT(!pending_response_done_); auto& headers = absl::get(headers_or_trailers_); @@ -1015,32 +1170,66 @@ int ClientConnectionImpl::onHeadersComplete() { pending_response_.value().encoder_.connectRequest()) { ENVOY_CONN_LOG(trace, "codec entering upgrade mode for CONNECT response.", connection_); handling_upgrade_ = true; + + // For responses to connect requests, do not accept the chunked + // encoding header: https://tools.ietf.org/html/rfc7231#section-4.3.6 + if (headers->TransferEncoding() && + absl::EqualsIgnoreCase(headers->TransferEncoding()->value().getStringView(), + Headers::get().TransferEncodingValues.Chunked)) { + RETURN_IF_ERROR(sendProtocolError(Http1ResponseCodeDetails::get().InvalidTransferEncoding)); + return codecProtocolError("http/1.1 protocol error: unsupported transfer encoding"); + } } - if (parser_.status_code == 100) { - // http-parser treats 100 continue headers as their own complete response. - // Swallow the spurious onMessageComplete and continue processing. - ignore_message_complete_for_100_continue_ = true; - pending_response_.value().decoder_->decode100ContinueHeaders(std::move(headers)); + if (strict_1xx_and_204_headers_ && (parser_.status_code < 200 || parser_.status_code == 204)) { + if (headers->TransferEncoding()) { + RETURN_IF_ERROR( + sendProtocolError(Http1ResponseCodeDetails::get().TransferEncodingNotAllowed)); + return codecProtocolError( + "http/1.1 protocol error: transfer encoding not allowed in 1xx or 204"); + } - // Reset to ensure no information from the continue headers is used for the response headers - // in case the callee does not move the headers out. - headers_or_trailers_.emplace(nullptr); + if (headers->ContentLength()) { + // Report a protocol error for non-zero Content-Length, but paper over zero Content-Length. + if (headers->ContentLength()->value().getStringView() != "0") { + RETURN_IF_ERROR( + sendProtocolError(Http1ResponseCodeDetails::get().ContentLengthNotAllowed)); + return codecProtocolError( + "http/1.1 protocol error: content length not allowed in 1xx or 204"); + } + + headers->removeContentLength(); + } + } + + if (parser_.status_code == enumToInt(Http::Code::Continue)) { + pending_response_.value().decoder_->decode100ContinueHeaders(std::move(headers)); } else if (cannotHaveBody() && !handling_upgrade_) { deferred_end_stream_headers_ = true; } else { pending_response_.value().decoder_->decodeHeaders(std::move(headers), false); } + + // http-parser treats 1xx headers as their own complete response. Swallow the spurious + // onMessageComplete and continue processing for purely informational headers. + // 101-SwitchingProtocols is exempt as all data after the header is proxied through after + // upgrading. + if (CodeUtility::is1xx(parser_.status_code) && + parser_.status_code != enumToInt(Http::Code::SwitchingProtocols)) { + ignore_message_complete_for_1xx_ = true; + // Reset to ensure no information from the 1xx headers is used for the response headers. + headers_or_trailers_.emplace(nullptr); + } } - // Here we deal with cases where the response cannot have a body, but http_parser does not deal - // with it for us. + // Here we deal with cases where the response cannot have a body by returning 1, but http_parser + // does not deal with it for us. return cannotHaveBody() ? 1 : 0; } bool ClientConnectionImpl::upgradeAllowed() const { if (pending_response_.has_value()) { - return pending_response_->encoder_.upgrade_request_; + return pending_response_->encoder_.upgradeRequest(); } return false; } @@ -1055,8 +1244,8 @@ void ClientConnectionImpl::onBody(Buffer::Instance& data) { void ClientConnectionImpl::onMessageComplete() { ENVOY_CONN_LOG(trace, "message complete", connection_); - if (ignore_message_complete_for_100_continue_) { - ignore_message_complete_for_100_continue_ = false; + if (ignore_message_complete_for_1xx_) { + ignore_message_complete_for_1xx_ = false; return; } if (pending_response_.has_value()) { @@ -1067,17 +1256,6 @@ void ClientConnectionImpl::onMessageComplete() { // be reset just yet. Preserve the state in pending_response_done_ instead. pending_response_done_ = true; - // Streams are responsible for unwinding any outstanding readDisable(true) - // calls done on the underlying connection as they are destroyed. As this is - // the only place a HTTP/1 stream is destroyed where the Network::Connection is - // reused, unwind any outstanding readDisable() calls here. Do this before we dispatch - // end_stream in case the caller immediately reuses the connection. - if (connection_.state() == Network::Connection::State::Open) { - while (!connection_.readEnabled()) { - connection_.readDisable(false); - } - } - if (deferred_end_stream_headers_) { response.decoder_->decodeHeaders( std::move(absl::get(headers_or_trailers_)), true); @@ -1105,11 +1283,12 @@ void ClientConnectionImpl::onResetStream(StreamResetReason reason) { } } -void ClientConnectionImpl::sendProtocolError(absl::string_view details) { +Status ClientConnectionImpl::sendProtocolError(absl::string_view details) { if (pending_response_.has_value()) { ASSERT(!pending_response_done_); pending_response_.value().encoder_.setDetails(details); } + return okStatus(); } void ClientConnectionImpl::onAboveHighWatermark() { diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index 12901e447e9a6..0f8b5d7de71a6 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -11,35 +11,21 @@ #include "envoy/config/core/v3/protocol.pb.h" #include "envoy/http/codec.h" #include "envoy/network/connection.h" -#include "envoy/stats/scope.h" #include "common/buffer/watermark_buffer.h" #include "common/common/assert.h" +#include "common/common/statusor.h" #include "common/http/codec_helper.h" #include "common/http/codes.h" #include "common/http/header_map_impl.h" +#include "common/http/http1/codec_stats.h" #include "common/http/http1/header_formatter.h" +#include "common/http/status.h" namespace Envoy { namespace Http { namespace Http1 { -/** - * All stats for the HTTP/1 codec. @see stats_macros.h - */ -#define ALL_HTTP1_CODEC_STATS(COUNTER) \ - COUNTER(dropped_headers_with_underscores) \ - COUNTER(metadata_not_supported_error) \ - COUNTER(requests_rejected_with_underscores_in_headers) \ - COUNTER(response_flood) - -/** - * Wrapper struct for the HTTP/1 codec stats. @see stats_macros.h - */ -struct CodecStats { - ALL_HTTP1_CODEC_STATS(GENERATE_COUNTER_STRUCT) -}; - class ConnectionImpl; /** @@ -51,6 +37,12 @@ class StreamEncoderImpl : public virtual StreamEncoder, public StreamCallbackHelper, public Http1StreamEncoderOptions { public: + ~StreamEncoderImpl() override { + // When the stream goes away, undo any read blocks to resume reading. + while (read_disable_calls_ != 0) { + StreamEncoderImpl::readDisable(false); + } + } // Http::StreamEncoder void encodeData(Buffer::Instance& data, bool end_stream) override; void encodeMetadata(const MetadataMapVector&) override; @@ -61,8 +53,8 @@ class StreamEncoderImpl : public virtual StreamEncoder, void disableChunkEncoding() override { disable_chunk_encoding_ = true; } // Http::Stream - void addCallbacks(StreamCallbacks& callbacks) override { addCallbacks_(callbacks); } - void removeCallbacks(StreamCallbacks& callbacks) override { removeCallbacks_(callbacks); } + void addCallbacks(StreamCallbacks& callbacks) override { addCallbacksHelper(callbacks); } + void removeCallbacks(StreamCallbacks& callbacks) override { removeCallbacksHelper(callbacks); } // After this is called, for the HTTP/1 codec, the connection should be closed, i.e. no further // progress may be made with the codec. void resetStream(StreamResetReason reason) override; @@ -70,27 +62,33 @@ class StreamEncoderImpl : public virtual StreamEncoder, uint32_t bufferLimit() override; absl::string_view responseDetails() override { return details_; } const Network::Address::InstanceConstSharedPtr& connectionLocalAddress() override; + void setFlushTimeout(std::chrono::milliseconds) override { + // HTTP/1 has one stream per connection, thus any data encoded is immediately written to the + // connection, invoking any watermarks as necessary. There is no internal buffering that would + // require a flush timeout not already covered by other timeouts. + } void setIsResponseToHeadRequest(bool value) { is_response_to_head_request_ = value; } void setIsResponseToConnectRequest(bool value) { is_response_to_connect_request_ = value; } void setDetails(absl::string_view details) { details_ = details; } + void clearReadDisableCallsForTests() { read_disable_calls_ = 0; } + protected: StreamEncoderImpl(ConnectionImpl& connection, HeaderKeyFormatter* header_key_formatter); - void setIsContentLengthAllowed(bool value) { is_content_length_allowed_ = value; } - void encodeHeadersBase(const RequestOrResponseHeaderMap& headers, bool end_stream); + void encodeHeadersBase(const RequestOrResponseHeaderMap& headers, absl::optional status, + bool end_stream); void encodeTrailersBase(const HeaderMap& headers); static const std::string CRLF; static const std::string LAST_CHUNK; ConnectionImpl& connection_; + uint32_t read_disable_calls_{}; bool disable_chunk_encoding_ : 1; bool chunk_encoding_ : 1; - bool processing_100_continue_ : 1; bool is_response_to_head_request_ : 1; bool is_response_to_connect_request_ : 1; - bool is_content_length_allowed_ : 1; private: /** @@ -146,16 +144,16 @@ class RequestEncoderImpl : public StreamEncoderImpl, public RequestEncoder { public: RequestEncoderImpl(ConnectionImpl& connection, HeaderKeyFormatter* header_key_formatter) : StreamEncoderImpl(connection, header_key_formatter) {} - bool headRequest() { return head_request_; } - bool connectRequest() { return connect_request_; } + bool upgradeRequest() const { return upgrade_request_; } + bool headRequest() const { return head_request_; } + bool connectRequest() const { return connect_request_; } // Http::RequestEncoder void encodeHeaders(const RequestHeaderMap& headers, bool end_stream) override; void encodeTrailers(const RequestTrailerMap& trailers) override { encodeTrailersBase(trailers); } - bool upgrade_request_{}; - private: + bool upgrade_request_{}; bool head_request_{}; bool connect_request_{}; }; @@ -196,16 +194,20 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable&& statusor); + + // Codec errors found in callbacks are overridden within the http_parser library. This holds those + // errors to propagate them through to dispatch() where we can handle the error. + Envoy::Http::Status codec_status_; + protected: - ConnectionImpl(Network::Connection& connection, Stats::Scope& stats, http_parser_type type, + ConnectionImpl(Network::Connection& connection, CodecStats& stats, http_parser_type type, uint32_t max_headers_kb, const uint32_t max_headers_count, HeaderKeyFormatterPtr&& header_key_formatter, bool enable_trailers); + // The following define special return values for http_parser callbacks. See: + // https://github.com/nodejs/http-parser/blob/5c5b3ac62662736de9e71640a8dc16da45b32503/http_parser.h#L72 + // These codes do not overlap with standard HTTP Status codes. They are only used for user + // callbacks. + enum class HttpParserCode { + // Callbacks other than on_headers_complete should return a non-zero int to indicate an error + // and + // halt execution. + Error = -1, + Success = 0, + // Returning '1' from on_headers_complete will tell http_parser that it should not expect a + // body. + NoBody = 1, + // Returning '2' from on_headers_complete will tell http_parser that it should not expect a body + // nor any further data on the connection. + NoBodyData = 2, + }; + bool resetStreamCalled() { return reset_stream_called_; } + Status onMessageBeginBase(); + + /** + * Get memory used to represent HTTP headers or trailers currently being parsed. + * Computed by adding the partial header field and value that is currently being parsed and the + * estimated header size for previous header lines provided by HeaderMap::byteSize(). + */ + virtual uint32_t getHeadersSize(); + + /** + * Called from onUrl, onHeaderField and onHeaderValue to verify that the headers do not exceed the + * configured max header size limit. + * @return A codecProtocolError status if headers exceed the size limit. + */ + Status checkMaxHeadersSize(); Network::Connection& connection_; - CodecStats stats_; + CodecStats& stats_; http_parser parser_; Http::Code error_code_{Http::Code::BadRequest}; const HeaderKeyFormatterPtr header_key_formatter_; @@ -234,10 +278,10 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable dispatchSlice(const char* slice, size_t len); /** * Called by the http_parser when body data is received. @@ -287,38 +341,39 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable onHeadersCompleteBase(); + virtual Envoy::StatusOr onHeadersComplete() PURE; /** * Called to see if upgrade transition is allowed. @@ -339,8 +394,9 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable& activeRequest() { return active_request_; } + // ConnectionImpl + void onMessageComplete() override; + // Add the size of the request_url to the reported header size when processing request headers. + uint32_t getHeadersSize() override; +private: /** * Manipulate the request's first line, parsing the url and converting to a relative path if * necessary. Compute Host / :authority headers based on 7230#5.7 and 7230#6 * * @param is_connect true if the request has the CONNECT method * @param headers the request's headers - * @throws CodecProtocolException on an invalid url in the request line + * @return Status representing success or failure. This will fail if there is an invalid url in + * the request line. */ - void handlePath(RequestHeaderMap& headers, unsigned int method); + Status handlePath(RequestHeaderMap& headers, unsigned int method); // ConnectionImpl void onEncodeComplete() override; - void onMessageBegin() override; - void onUrl(const char* data, size_t length) override; - int onHeadersComplete() override; + Status onMessageBegin() override; + Status onUrl(const char* data, size_t length) override; + Envoy::StatusOr onHeadersComplete() override; // If upgrade behavior is not allowed, the HCM will have sanitized the headers out. bool upgradeAllowed() const override { return true; } void onBody(Buffer::Instance& data) override; - void onMessageComplete() override; void onResetStream(StreamResetReason reason) override; - void sendProtocolError(absl::string_view details) override; + Status sendProtocolError(absl::string_view details) override; void onAboveHighWatermark() override; void onBelowLowWatermark() override; HeaderMap& headersOrTrailers() override { @@ -451,19 +513,22 @@ class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { } void allocHeaders() override { ASSERT(nullptr == absl::get(headers_or_trailers_)); - headers_or_trailers_.emplace(std::make_unique()); + ASSERT(!processing_trailers_); + headers_or_trailers_.emplace(RequestHeaderMapImpl::create()); } - void maybeAllocTrailers() override { + void allocTrailers() override { ASSERT(processing_trailers_); if (!absl::holds_alternative(headers_or_trailers_)) { - headers_or_trailers_.emplace(std::make_unique()); + headers_or_trailers_.emplace(RequestTrailerMapImpl::create()); } } + void sendProtocolErrorOld(absl::string_view details); + void releaseOutboundResponse(const Buffer::OwnedBufferFragmentImpl* fragment); void maybeAddSentinelBufferFragment(Buffer::WatermarkBuffer& output_buffer) override; - void doFloodProtectionChecks() const; - void checkHeaderNameForUnderscores() override; + Status doFloodProtectionChecks() const; + Status checkHeaderNameForUnderscores() override; ServerConnectionCallbacks& callbacks_; absl::optional active_request_; @@ -491,7 +556,7 @@ class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { */ class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { public: - ClientConnectionImpl(Network::Connection& connection, Stats::Scope& stats, + ClientConnectionImpl(Network::Connection& connection, CodecStats& stats, ConnectionCallbacks& callbacks, const Http1Settings& settings, const uint32_t max_response_headers_count); @@ -512,14 +577,14 @@ class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { // ConnectionImpl void onEncodeComplete() override {} - void onMessageBegin() override {} - void onUrl(const char*, size_t) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - int onHeadersComplete() override; + Status onMessageBegin() override { return okStatus(); } + Status onUrl(const char*, size_t) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } + Envoy::StatusOr onHeadersComplete() override; bool upgradeAllowed() const override; void onBody(Buffer::Instance& data) override; void onMessageComplete() override; void onResetStream(StreamResetReason reason) override; - void sendProtocolError(absl::string_view details) override; + Status sendProtocolError(absl::string_view details) override; void onAboveHighWatermark() override; void onBelowLowWatermark() override; HeaderMap& headersOrTrailers() override { @@ -534,13 +599,13 @@ class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { } void allocHeaders() override { ASSERT(nullptr == absl::get(headers_or_trailers_)); - headers_or_trailers_.emplace(std::make_unique()); + ASSERT(!processing_trailers_); + headers_or_trailers_.emplace(ResponseHeaderMapImpl::create()); } - void maybeAllocTrailers() override { + void allocTrailers() override { ASSERT(processing_trailers_); if (!absl::holds_alternative(headers_or_trailers_)) { - headers_or_trailers_.emplace( - std::make_unique()); + headers_or_trailers_.emplace(ResponseTrailerMapImpl::create()); } } @@ -551,13 +616,13 @@ class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { // the response is complete. The existence of this variable is hard to reason about and it should // be combined with pending_response_ somehow in a follow up cleanup. bool pending_response_done_{true}; - // Set true between receiving 100-Continue headers and receiving the spurious onMessageComplete. - bool ignore_message_complete_for_100_continue_{}; + // Set true between receiving non-101 1xx headers and receiving the spurious onMessageComplete. + bool ignore_message_complete_for_1xx_{}; // TODO(mattklein123): This should be a member of PendingResponse but this change needs dedicated // thought as some of the reset and no header code paths make this difficult. Headers are - // populated on message begin. Trailers are populated on the first parsed trailer field (if - // trailers are enabled). The variant is reset to null headers on message complete for assertion - // purposes. + // populated on message begin. Trailers are populated when the switch to trailer processing is + // detected while parsing the first trailer field (if trailers are enabled). The variant is reset + // to null headers on message complete for assertion purposes. absl::variant headers_or_trailers_; // The default limit of 80 KiB is the vanilla http_parser behaviour. diff --git a/source/common/http/http1/codec_impl_legacy.cc b/source/common/http/http1/codec_impl_legacy.cc new file mode 100644 index 0000000000000..a8829d2182e29 --- /dev/null +++ b/source/common/http/http1/codec_impl_legacy.cc @@ -0,0 +1,1246 @@ +#include "common/http/http1/codec_impl_legacy.h" + +#include +#include +#include + +#include "envoy/buffer/buffer.h" +#include "envoy/http/codec.h" +#include "envoy/http/header_map.h" +#include "envoy/network/connection.h" + +#include "common/common/enum_to_int.h" +#include "common/common/utility.h" +#include "common/grpc/common.h" +#include "common/http/exception.h" +#include "common/http/header_utility.h" +#include "common/http/headers.h" +#include "common/http/http1/header_formatter.h" +#include "common/http/url_utility.h" +#include "common/http/utility.h" +#include "common/runtime/runtime_features.h" + +#include "absl/container/fixed_array.h" +#include "absl/strings/ascii.h" + +namespace Envoy { +namespace Http { +namespace Legacy { +namespace Http1 { +namespace { + +struct Http1ResponseCodeDetailValues { + const absl::string_view TooManyHeaders = "http1.too_many_headers"; + const absl::string_view HeadersTooLarge = "http1.headers_too_large"; + const absl::string_view HttpCodecError = "http1.codec_error"; + const absl::string_view InvalidCharacters = "http1.invalid_characters"; + const absl::string_view ConnectionHeaderSanitization = "http1.connection_header_rejected"; + const absl::string_view InvalidUrl = "http1.invalid_url"; + const absl::string_view InvalidTransferEncoding = "http1.invalid_transfer_encoding"; + const absl::string_view BodyDisallowed = "http1.body_disallowed"; + const absl::string_view TransferEncodingNotAllowed = "http1.transfer_encoding_not_allowed"; + const absl::string_view ContentLengthNotAllowed = "http1.content_length_not_allowed"; + const absl::string_view InvalidUnderscore = "http1.unexpected_underscore"; +}; + +struct Http1HeaderTypesValues { + const absl::string_view Headers = "headers"; + const absl::string_view Trailers = "trailers"; +}; + +using Http1ResponseCodeDetails = ConstSingleton; +using Http1HeaderTypes = ConstSingleton; +using Http::Http1::CodecStats; +using Http::Http1::HeaderKeyFormatter; +using Http::Http1::HeaderKeyFormatterPtr; +using Http::Http1::ProperCaseHeaderKeyFormatter; + +const StringUtil::CaseUnorderedSet& caseUnorderdSetContainingUpgradeAndHttp2Settings() { + CONSTRUCT_ON_FIRST_USE(StringUtil::CaseUnorderedSet, + Http::Headers::get().ConnectionValues.Upgrade, + Http::Headers::get().ConnectionValues.Http2Settings); +} + +HeaderKeyFormatterPtr formatter(const Http::Http1Settings& settings) { + if (settings.header_key_format_ == Http1Settings::HeaderKeyFormat::ProperCase) { + return std::make_unique(); + } + + return nullptr; +} + +} // namespace + +const std::string StreamEncoderImpl::CRLF = "\r\n"; +// Last chunk as defined here https://tools.ietf.org/html/rfc7230#section-4.1 +const std::string StreamEncoderImpl::LAST_CHUNK = "0\r\n"; + +StreamEncoderImpl::StreamEncoderImpl(ConnectionImpl& connection, + HeaderKeyFormatter* header_key_formatter) + : connection_(connection), disable_chunk_encoding_(false), chunk_encoding_(true), + is_response_to_head_request_(false), is_response_to_connect_request_(false), + header_key_formatter_(header_key_formatter) { + if (connection_.connection().aboveHighWatermark()) { + runHighWatermarkCallbacks(); + } +} + +void StreamEncoderImpl::encodeHeader(const char* key, uint32_t key_size, const char* value, + uint32_t value_size) { + + ASSERT(key_size > 0); + + connection_.copyToBuffer(key, key_size); + connection_.addCharToBuffer(':'); + connection_.addCharToBuffer(' '); + connection_.copyToBuffer(value, value_size); + connection_.addToBuffer(CRLF); +} +void StreamEncoderImpl::encodeHeader(absl::string_view key, absl::string_view value) { + this->encodeHeader(key.data(), key.size(), value.data(), value.size()); +} + +void StreamEncoderImpl::encodeFormattedHeader(absl::string_view key, absl::string_view value) { + if (header_key_formatter_ != nullptr) { + encodeHeader(header_key_formatter_->format(key), value); + } else { + encodeHeader(key, value); + } +} + +void ResponseEncoderImpl::encode100ContinueHeaders(const ResponseHeaderMap& headers) { + ASSERT(headers.Status()->value() == "100"); + encodeHeaders(headers, false); +} + +void StreamEncoderImpl::encodeHeadersBase(const RequestOrResponseHeaderMap& headers, + absl::optional status, bool end_stream) { + bool saw_content_length = false; + headers.iterate([this](const HeaderEntry& header) -> HeaderMap::Iterate { + absl::string_view key_to_use = header.key().getStringView(); + uint32_t key_size_to_use = header.key().size(); + // Translate :authority -> host so that upper layers do not need to deal with this. + if (key_size_to_use > 1 && key_to_use[0] == ':' && key_to_use[1] == 'a') { + key_to_use = absl::string_view(Headers::get().HostLegacy.get()); + key_size_to_use = Headers::get().HostLegacy.get().size(); + } + + // Skip all headers starting with ':' that make it here. + if (key_to_use[0] == ':') { + return HeaderMap::Iterate::Continue; + } + + encodeFormattedHeader(key_to_use, header.value().getStringView()); + + return HeaderMap::Iterate::Continue; + }); + + if (headers.ContentLength()) { + saw_content_length = true; + } + + ASSERT(!headers.TransferEncoding()); + + // Assume we are chunk encoding unless we are passed a content length or this is a header only + // response. Upper layers generally should strip transfer-encoding since it only applies to + // HTTP/1.1. The codec will infer it based on the type of response. + // for streaming (e.g. SSE stream sent to hystrix dashboard), we do not want + // chunk transfer encoding but we don't have a content-length so disable_chunk_encoding_ is + // consulted before enabling chunk encoding. + // + // Note that for HEAD requests Envoy does best-effort guessing when there is no + // content-length. If a client makes a HEAD request for an upstream resource + // with no bytes but the upstream response doesn't include "Content-length: 0", + // Envoy will incorrectly assume a subsequent response to GET will be chunk encoded. + if (saw_content_length || disable_chunk_encoding_) { + chunk_encoding_ = false; + } else { + if (status && *status == 100) { + // Make sure we don't serialize chunk information with 100-Continue headers. + chunk_encoding_ = false; + } else if (end_stream && !is_response_to_head_request_) { + // If this is a headers-only stream, append an explicit "Content-Length: 0" unless it's a + // response to a HEAD request. + // For 204s and 1xx where content length is disallowed, don't append the content length but + // also don't chunk encode. + if (!status || (*status >= 200 && *status != 204)) { + encodeFormattedHeader(Headers::get().ContentLength.get(), "0"); + } + chunk_encoding_ = false; + } else if (connection_.protocol() == Protocol::Http10) { + chunk_encoding_ = false; + } else if (status && (*status < 200 || *status == 204) && + connection_.strict1xxAnd204Headers()) { + // TODO(zuercher): when the "envoy.reloadable_features.strict_1xx_and_204_response_headers" + // feature flag is removed, this block can be coalesced with the 100 Continue logic above. + + // For 1xx and 204 responses, do not send the chunked encoding header or enable chunked + // encoding: https://tools.ietf.org/html/rfc7230#section-3.3.1 + chunk_encoding_ = false; + } else { + // For responses to connect requests, do not send the chunked encoding header: + // https://tools.ietf.org/html/rfc7231#section-4.3.6. + if (!is_response_to_connect_request_) { + encodeFormattedHeader(Headers::get().TransferEncoding.get(), + Headers::get().TransferEncodingValues.Chunked); + } + // We do not apply chunk encoding for HTTP upgrades, including CONNECT style upgrades. + // If there is a body in a response on the upgrade path, the chunks will be + // passed through via maybeDirectDispatch so we need to avoid appending + // extra chunk boundaries. + // + // When sending a response to a HEAD request Envoy may send an informational + // "Transfer-Encoding: chunked" header, but should not send a chunk encoded body. + chunk_encoding_ = !Utility::isUpgrade(headers) && !is_response_to_head_request_ && + !is_response_to_connect_request_; + } + } + + connection_.addToBuffer(CRLF); + + if (end_stream) { + endEncode(); + } else { + connection_.flushOutput(); + } +} + +void StreamEncoderImpl::encodeData(Buffer::Instance& data, bool end_stream) { + // end_stream may be indicated with a zero length data buffer. If that is the case, so not + // actually write the zero length buffer out. + if (data.length() > 0) { + if (chunk_encoding_) { + connection_.buffer().add(absl::StrCat(absl::Hex(data.length()), CRLF)); + } + + connection_.buffer().move(data); + + if (chunk_encoding_) { + connection_.buffer().add(CRLF); + } + } + + if (end_stream) { + endEncode(); + } else { + connection_.flushOutput(); + } +} + +void StreamEncoderImpl::encodeTrailersBase(const HeaderMap& trailers) { + if (!connection_.enableTrailers()) { + return endEncode(); + } + // Trailers only matter if it is a chunk transfer encoding + // https://tools.ietf.org/html/rfc7230#section-4.4 + if (chunk_encoding_) { + // Finalize the body + connection_.buffer().add(LAST_CHUNK); + + trailers.iterate([this](const HeaderEntry& header) -> HeaderMap::Iterate { + encodeFormattedHeader(header.key().getStringView(), header.value().getStringView()); + return HeaderMap::Iterate::Continue; + }); + + connection_.flushOutput(); + connection_.buffer().add(CRLF); + } + + connection_.flushOutput(); + connection_.onEncodeComplete(); +} + +void StreamEncoderImpl::encodeMetadata(const MetadataMapVector&) { + connection_.stats().metadata_not_supported_error_.inc(); +} + +void StreamEncoderImpl::endEncode() { + if (chunk_encoding_) { + connection_.buffer().add(LAST_CHUNK); + connection_.buffer().add(CRLF); + } + + connection_.flushOutput(true); + connection_.onEncodeComplete(); +} + +void ServerConnectionImpl::maybeAddSentinelBufferFragment(Buffer::WatermarkBuffer& output_buffer) { + if (!flood_protection_) { + return; + } + // It's messy and complicated to try to tag the final write of an HTTP response for response + // tracking for flood protection. Instead, write an empty buffer fragment after the response, + // to allow for tracking. + // When the response is written out, the fragment will be deleted and the counter will be updated + // by ServerConnectionImpl::releaseOutboundResponse() + auto fragment = + Buffer::OwnedBufferFragmentImpl::create(absl::string_view("", 0), response_buffer_releasor_); + output_buffer.addBufferFragment(*fragment.release()); + ASSERT(outbound_responses_ < max_outbound_responses_); + outbound_responses_++; +} + +void ServerConnectionImpl::doFloodProtectionChecks() const { + if (!flood_protection_) { + return; + } + // Before processing another request, make sure that we are below the response flood protection + // threshold. + if (outbound_responses_ >= max_outbound_responses_) { + ENVOY_CONN_LOG(trace, "error accepting request: too many pending responses queued", + connection_); + stats_.response_flood_.inc(); + throw FrameFloodException("Too many responses queued."); + } +} + +void ConnectionImpl::flushOutput(bool end_encode) { + if (end_encode) { + // If this is an HTTP response in ServerConnectionImpl, track outbound responses for flood + // protection + maybeAddSentinelBufferFragment(output_buffer_); + } + connection().write(output_buffer_, false); + ASSERT(0UL == output_buffer_.length()); +} + +void ConnectionImpl::addToBuffer(absl::string_view data) { output_buffer_.add(data); } + +void ConnectionImpl::addCharToBuffer(char c) { output_buffer_.add(&c, 1); } + +void ConnectionImpl::addIntToBuffer(uint64_t i) { output_buffer_.add(absl::StrCat(i)); } + +void ConnectionImpl::copyToBuffer(const char* data, uint64_t length) { + output_buffer_.add(data, length); +} + +void StreamEncoderImpl::resetStream(StreamResetReason reason) { + connection_.onResetStreamBase(reason); +} + +void StreamEncoderImpl::readDisable(bool disable) { + if (disable) { + ++read_disable_calls_; + } else { + ASSERT(read_disable_calls_ != 0); + if (read_disable_calls_ != 0) { + --read_disable_calls_; + } + } + connection_.readDisable(disable); +} + +uint32_t StreamEncoderImpl::bufferLimit() { return connection_.bufferLimit(); } + +const Network::Address::InstanceConstSharedPtr& StreamEncoderImpl::connectionLocalAddress() { + return connection_.connection().localAddress(); +} + +static const char RESPONSE_PREFIX[] = "HTTP/1.1 "; +static const char HTTP_10_RESPONSE_PREFIX[] = "HTTP/1.0 "; + +void ResponseEncoderImpl::encodeHeaders(const ResponseHeaderMap& headers, bool end_stream) { + started_response_ = true; + + // The contract is that client codecs must ensure that :status is present. + ASSERT(headers.Status() != nullptr); + uint64_t numeric_status = Utility::getResponseStatus(headers); + + if (connection_.protocol() == Protocol::Http10 && connection_.supportsHttp10()) { + connection_.copyToBuffer(HTTP_10_RESPONSE_PREFIX, sizeof(HTTP_10_RESPONSE_PREFIX) - 1); + } else { + connection_.copyToBuffer(RESPONSE_PREFIX, sizeof(RESPONSE_PREFIX) - 1); + } + connection_.addIntToBuffer(numeric_status); + connection_.addCharToBuffer(' '); + + const char* status_string = CodeUtility::toString(static_cast(numeric_status)); + uint32_t status_string_len = strlen(status_string); + connection_.copyToBuffer(status_string, status_string_len); + + connection_.addCharToBuffer('\r'); + connection_.addCharToBuffer('\n'); + + if (numeric_status >= 300) { + // Don't do special CONNECT logic if the CONNECT was rejected. + is_response_to_connect_request_ = false; + } + + encodeHeadersBase(headers, absl::make_optional(numeric_status), end_stream); +} + +static const char REQUEST_POSTFIX[] = " HTTP/1.1\r\n"; + +void RequestEncoderImpl::encodeHeaders(const RequestHeaderMap& headers, bool end_stream) { + const HeaderEntry* method = headers.Method(); + const HeaderEntry* path = headers.Path(); + const HeaderEntry* host = headers.Host(); + bool is_connect = HeaderUtility::isConnect(headers); + + if (!method || (!path && !is_connect)) { + // TODO(#10878): This exception does not occur during dispatch and would not be triggered under + // normal circumstances since inputs would fail parsing at ingress. Replace with proper error + // handling when exceptions are removed. Include missing host header for CONNECT. + throw CodecClientException(":method and :path must be specified"); + } + if (method->value() == Headers::get().MethodValues.Head) { + head_request_ = true; + } else if (method->value() == Headers::get().MethodValues.Connect) { + disableChunkEncoding(); + connect_request_ = true; + } + if (Utility::isUpgrade(headers)) { + upgrade_request_ = true; + } + + connection_.copyToBuffer(method->value().getStringView().data(), method->value().size()); + connection_.addCharToBuffer(' '); + if (is_connect) { + connection_.copyToBuffer(host->value().getStringView().data(), host->value().size()); + } else { + connection_.copyToBuffer(path->value().getStringView().data(), path->value().size()); + } + connection_.copyToBuffer(REQUEST_POSTFIX, sizeof(REQUEST_POSTFIX) - 1); + + encodeHeadersBase(headers, absl::nullopt, end_stream); +} + +http_parser_settings ConnectionImpl::settings_{ + [](http_parser* parser) -> int { + static_cast(parser->data)->onMessageBeginBase(); + return 0; + }, + [](http_parser* parser, const char* at, size_t length) -> int { + static_cast(parser->data)->onUrl(at, length); + return 0; + }, + nullptr, // on_status + [](http_parser* parser, const char* at, size_t length) -> int { + static_cast(parser->data)->onHeaderField(at, length); + return 0; + }, + [](http_parser* parser, const char* at, size_t length) -> int { + static_cast(parser->data)->onHeaderValue(at, length); + return 0; + }, + [](http_parser* parser) -> int { + return static_cast(parser->data)->onHeadersCompleteBase(); + }, + [](http_parser* parser, const char* at, size_t length) -> int { + static_cast(parser->data)->bufferBody(at, length); + return 0; + }, + [](http_parser* parser) -> int { + static_cast(parser->data)->onMessageCompleteBase(); + return 0; + }, + [](http_parser* parser) -> int { + // A 0-byte chunk header is used to signal the end of the chunked body. + // When this function is called, http-parser holds the size of the chunk in + // parser->content_length. See + // https://github.com/nodejs/http-parser/blob/v2.9.3/http_parser.h#L336 + const bool is_final_chunk = (parser->content_length == 0); + static_cast(parser->data)->onChunkHeader(is_final_chunk); + return 0; + }, + nullptr // on_chunk_complete +}; + +ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stats, + http_parser_type type, uint32_t max_headers_kb, + const uint32_t max_headers_count, + HeaderKeyFormatterPtr&& header_key_formatter, bool enable_trailers) + : connection_(connection), stats_(stats), + header_key_formatter_(std::move(header_key_formatter)), processing_trailers_(false), + handling_upgrade_(false), reset_stream_called_(false), deferred_end_stream_headers_(false), + connection_header_sanitization_(Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.connection_header_sanitization")), + enable_trailers_(enable_trailers), + strict_1xx_and_204_headers_(Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.strict_1xx_and_204_response_headers")), + output_buffer_([&]() -> void { this->onBelowLowWatermark(); }, + [&]() -> void { this->onAboveHighWatermark(); }, + []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }), + max_headers_kb_(max_headers_kb), max_headers_count_(max_headers_count) { + output_buffer_.setWatermarks(connection.bufferLimit()); + http_parser_init(&parser_, type); + parser_.data = this; +} + +void ConnectionImpl::completeLastHeader() { + ENVOY_CONN_LOG(trace, "completed header: key={} value={}", connection_, + current_header_field_.getStringView(), current_header_value_.getStringView()); + + checkHeaderNameForUnderscores(); + auto& headers_or_trailers = headersOrTrailers(); + if (!current_header_field_.empty()) { + current_header_field_.inlineTransform([](char c) { return absl::ascii_tolower(c); }); + // Strip trailing whitespace of the current header value if any. Leading whitespace was trimmed + // in ConnectionImpl::onHeaderValue. http_parser does not strip leading or trailing whitespace + // as the spec requires: https://tools.ietf.org/html/rfc7230#section-3.2.4 + current_header_value_.rtrim(); + headers_or_trailers.addViaMove(std::move(current_header_field_), + std::move(current_header_value_)); + } + + // Check if the number of headers exceeds the limit. + if (headers_or_trailers.size() > max_headers_count_) { + error_code_ = Http::Code::RequestHeaderFieldsTooLarge; + sendProtocolError(Http1ResponseCodeDetails::get().TooManyHeaders); + const absl::string_view header_type = + processing_trailers_ ? Http1HeaderTypes::get().Trailers : Http1HeaderTypes::get().Headers; + throw CodecProtocolException(absl::StrCat(header_type, " size exceeds limit")); + } + + header_parsing_state_ = HeaderParsingState::Field; + ASSERT(current_header_field_.empty()); + ASSERT(current_header_value_.empty()); +} + +uint32_t ConnectionImpl::getHeadersSize() { + return current_header_field_.size() + current_header_value_.size() + + headersOrTrailers().byteSize(); +} + +void ConnectionImpl::checkMaxHeadersSize() { + const uint32_t total = getHeadersSize(); + if (total > (max_headers_kb_ * 1024)) { + const absl::string_view header_type = + processing_trailers_ ? Http1HeaderTypes::get().Trailers : Http1HeaderTypes::get().Headers; + error_code_ = Http::Code::RequestHeaderFieldsTooLarge; + sendProtocolError(Http1ResponseCodeDetails::get().HeadersTooLarge); + throw CodecProtocolException(absl::StrCat(header_type, " size exceeds limit")); + } +} + +bool ConnectionImpl::maybeDirectDispatch(Buffer::Instance& data) { + if (!handling_upgrade_) { + // Only direct dispatch for Upgrade requests. + return false; + } + + ENVOY_CONN_LOG(trace, "direct-dispatched {} bytes", connection_, data.length()); + onBody(data); + data.drain(data.length()); + return true; +} + +Http::Status ConnectionImpl::dispatch(Buffer::Instance& data) { + // TODO(#10878): Remove this wrapper when exception removal is complete. innerDispatch may either + // throw an exception or return an error status. The utility wrapper catches exceptions and + // converts them to error statuses. + return Utility::exceptionToStatus( + [&](Buffer::Instance& data) -> Http::Status { return innerDispatch(data); }, data); +} + +Http::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) { + ENVOY_CONN_LOG(trace, "parsing {} bytes", connection_, data.length()); + ASSERT(buffered_body_.length() == 0); + + if (maybeDirectDispatch(data)) { + return Http::okStatus(); + } + + // Always unpause before dispatch. + http_parser_pause(&parser_, 0); + + ssize_t total_parsed = 0; + if (data.length() > 0) { + for (const Buffer::RawSlice& slice : data.getRawSlices()) { + total_parsed += dispatchSlice(static_cast(slice.mem_), slice.len_); + if (HTTP_PARSER_ERRNO(&parser_) != HPE_OK) { + // Parse errors trigger an exception in dispatchSlice so we are guaranteed to be paused at + // this point. + ASSERT(HTTP_PARSER_ERRNO(&parser_) == HPE_PAUSED); + break; + } + } + dispatchBufferedBody(); + } else { + dispatchSlice(nullptr, 0); + } + ASSERT(buffered_body_.length() == 0); + + ENVOY_CONN_LOG(trace, "parsed {} bytes", connection_, total_parsed); + data.drain(total_parsed); + + // If an upgrade has been handled and there is body data or early upgrade + // payload to send on, send it on. + maybeDirectDispatch(data); + return Http::okStatus(); +} + +size_t ConnectionImpl::dispatchSlice(const char* slice, size_t len) { + ssize_t rc = http_parser_execute(&parser_, &settings_, slice, len); + if (HTTP_PARSER_ERRNO(&parser_) != HPE_OK && HTTP_PARSER_ERRNO(&parser_) != HPE_PAUSED) { + sendProtocolError(Http1ResponseCodeDetails::get().HttpCodecError); + throw CodecProtocolException("http/1.1 protocol error: " + + std::string(http_errno_name(HTTP_PARSER_ERRNO(&parser_)))); + } + + return rc; +} + +void ConnectionImpl::onHeaderField(const char* data, size_t length) { + // We previously already finished up the headers, these headers are + // now trailers. + if (header_parsing_state_ == HeaderParsingState::Done) { + if (!enable_trailers_) { + // Ignore trailers. + return; + } + processing_trailers_ = true; + header_parsing_state_ = HeaderParsingState::Field; + allocTrailers(); + } + if (header_parsing_state_ == HeaderParsingState::Value) { + completeLastHeader(); + } + + current_header_field_.append(data, length); + + checkMaxHeadersSize(); +} + +void ConnectionImpl::onHeaderValue(const char* data, size_t length) { + if (header_parsing_state_ == HeaderParsingState::Done && !enable_trailers_) { + // Ignore trailers. + return; + } + + absl::string_view header_value{data, length}; + if (!Http::HeaderUtility::headerValueIsValid(header_value)) { + ENVOY_CONN_LOG(debug, "invalid header value: {}", connection_, header_value); + error_code_ = Http::Code::BadRequest; + sendProtocolError(Http1ResponseCodeDetails::get().InvalidCharacters); + throw CodecProtocolException("http/1.1 protocol error: header value contains invalid chars"); + } + + header_parsing_state_ = HeaderParsingState::Value; + if (current_header_value_.empty()) { + // Strip leading whitespace if the current header value input contains the first bytes of the + // encoded header value. Trailing whitespace is stripped once the full header value is known in + // ConnectionImpl::completeLastHeader. http_parser does not strip leading or trailing whitespace + // as the spec requires: https://tools.ietf.org/html/rfc7230#section-3.2.4 . + header_value = StringUtil::ltrim(header_value); + } + current_header_value_.append(header_value.data(), header_value.length()); + + checkMaxHeadersSize(); +} + +int ConnectionImpl::onHeadersCompleteBase() { + ASSERT(!processing_trailers_); + ENVOY_CONN_LOG(trace, "onHeadersCompleteBase", connection_); + completeLastHeader(); + + if (!(parser_.http_major == 1 && parser_.http_minor == 1)) { + // This is not necessarily true, but it's good enough since higher layers only care if this is + // HTTP/1.1 or not. + protocol_ = Protocol::Http10; + } + RequestOrResponseHeaderMap& request_or_response_headers = requestOrResponseHeaders(); + if (Utility::isUpgrade(request_or_response_headers) && upgradeAllowed()) { + // Ignore h2c upgrade requests until we support them. + // See https://github.com/envoyproxy/envoy/issues/7161 for details. + if (absl::EqualsIgnoreCase(request_or_response_headers.getUpgradeValue(), + Http::Headers::get().UpgradeValues.H2c)) { + ENVOY_CONN_LOG(trace, "removing unsupported h2c upgrade headers.", connection_); + request_or_response_headers.removeUpgrade(); + if (request_or_response_headers.Connection()) { + const auto& tokens_to_remove = caseUnorderdSetContainingUpgradeAndHttp2Settings(); + std::string new_value = StringUtil::removeTokens( + request_or_response_headers.getConnectionValue(), ",", tokens_to_remove, ","); + if (new_value.empty()) { + request_or_response_headers.removeConnection(); + } else { + request_or_response_headers.setConnection(new_value); + } + } + request_or_response_headers.remove(Headers::get().Http2Settings); + } else { + ENVOY_CONN_LOG(trace, "codec entering upgrade mode.", connection_); + handling_upgrade_ = true; + } + } + if (parser_.method == HTTP_CONNECT) { + if (request_or_response_headers.ContentLength()) { + if (request_or_response_headers.getContentLengthValue() == "0") { + request_or_response_headers.removeContentLength(); + } else { + // Per https://tools.ietf.org/html/rfc7231#section-4.3.6 a payload with a + // CONNECT request has no defined semantics, and may be rejected. + error_code_ = Http::Code::BadRequest; + sendProtocolError(Http1ResponseCodeDetails::get().BodyDisallowed); + throw CodecProtocolException("http/1.1 protocol error: unsupported content length"); + } + } + ENVOY_CONN_LOG(trace, "codec entering upgrade mode for CONNECT request.", connection_); + handling_upgrade_ = true; + } + + // Per https://tools.ietf.org/html/rfc7230#section-3.3.1 Envoy should reject + // transfer-codings it does not understand. + // Per https://tools.ietf.org/html/rfc7231#section-4.3.6 a payload with a + // CONNECT request has no defined semantics, and may be rejected. + if (request_or_response_headers.TransferEncoding()) { + const absl::string_view encoding = request_or_response_headers.getTransferEncodingValue(); + if (!absl::EqualsIgnoreCase(encoding, Headers::get().TransferEncodingValues.Chunked) || + parser_.method == HTTP_CONNECT) { + error_code_ = Http::Code::NotImplemented; + sendProtocolError(Http1ResponseCodeDetails::get().InvalidTransferEncoding); + throw CodecProtocolException("http/1.1 protocol error: unsupported transfer encoding"); + } + } + + int rc = onHeadersComplete(); + header_parsing_state_ = HeaderParsingState::Done; + + // Returning 2 informs http_parser to not expect a body or further data on this connection. + return handling_upgrade_ ? 2 : rc; +} + +void ConnectionImpl::bufferBody(const char* data, size_t length) { + buffered_body_.add(data, length); +} + +void ConnectionImpl::dispatchBufferedBody() { + ASSERT(HTTP_PARSER_ERRNO(&parser_) == HPE_OK || HTTP_PARSER_ERRNO(&parser_) == HPE_PAUSED); + if (buffered_body_.length() > 0) { + onBody(buffered_body_); + buffered_body_.drain(buffered_body_.length()); + } +} + +void ConnectionImpl::onChunkHeader(bool is_final_chunk) { + if (is_final_chunk) { + // Dispatch body before parsing trailers, so body ends up dispatched even if an error is found + // while processing trailers. + dispatchBufferedBody(); + } +} + +void ConnectionImpl::onMessageCompleteBase() { + ENVOY_CONN_LOG(trace, "message complete", connection_); + + dispatchBufferedBody(); + + if (handling_upgrade_) { + // If this is an upgrade request, swallow the onMessageComplete. The + // upgrade payload will be treated as stream body. + ASSERT(!deferred_end_stream_headers_); + ENVOY_CONN_LOG(trace, "Pausing parser due to upgrade.", connection_); + http_parser_pause(&parser_, 1); + return; + } + + // If true, this indicates we were processing trailers and must + // move the last header into current_header_map_ + if (header_parsing_state_ == HeaderParsingState::Value) { + completeLastHeader(); + } + + onMessageComplete(); +} + +void ConnectionImpl::onMessageBeginBase() { + ENVOY_CONN_LOG(trace, "message begin", connection_); + // Make sure that if HTTP/1.0 and HTTP/1.1 requests share a connection Envoy correctly sets + // protocol for each request. Envoy defaults to 1.1 but sets the protocol to 1.0 where applicable + // in onHeadersCompleteBase + protocol_ = Protocol::Http11; + processing_trailers_ = false; + header_parsing_state_ = HeaderParsingState::Field; + allocHeaders(); + onMessageBegin(); +} + +void ConnectionImpl::onResetStreamBase(StreamResetReason reason) { + ASSERT(!reset_stream_called_); + reset_stream_called_ = true; + onResetStream(reason); +} + +ServerConnectionImpl::ServerConnectionImpl( + Network::Connection& connection, CodecStats& stats, ServerConnectionCallbacks& callbacks, + const Http1Settings& settings, uint32_t max_request_headers_kb, + const uint32_t max_request_headers_count, + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action) + : ConnectionImpl(connection, stats, HTTP_REQUEST, max_request_headers_kb, + max_request_headers_count, formatter(settings), settings.enable_trailers_), + callbacks_(callbacks), codec_settings_(settings), + response_buffer_releasor_([this](const Buffer::OwnedBufferFragmentImpl* fragment) { + releaseOutboundResponse(fragment); + }), + // Pipelining is generally not well supported on the internet and has a series of dangerous + // overflow bugs. As such we are disabling it for now, and removing this temporary override if + // no one objects. If you use this integer to restore prior behavior, contact the + // maintainer team as it will otherwise be removed entirely soon. + max_outbound_responses_( + Runtime::getInteger("envoy.do_not_use_going_away_max_http2_outbound_responses", 2)), + flood_protection_( + Runtime::runtimeFeatureEnabled("envoy.reloadable_features.http1_flood_protection")), + headers_with_underscores_action_(headers_with_underscores_action) {} + +uint32_t ServerConnectionImpl::getHeadersSize() { + // Add in the the size of the request URL if processing request headers. + const uint32_t url_size = (!processing_trailers_ && active_request_.has_value()) + ? active_request_.value().request_url_.size() + : 0; + return url_size + ConnectionImpl::getHeadersSize(); +} + +void ServerConnectionImpl::onEncodeComplete() { + if (active_request_.value().remote_complete_) { + // Only do this if remote is complete. If we are replying before the request is complete the + // only logical thing to do is for higher level code to reset() / close the connection so we + // leave the request around so that it can fire reset callbacks. + active_request_.reset(); + } +} + +void ServerConnectionImpl::handlePath(RequestHeaderMap& headers, unsigned int method) { + HeaderString path(Headers::get().Path); + + bool is_connect = (method == HTTP_CONNECT); + + // The url is relative or a wildcard when the method is OPTIONS. Nothing to do here. + auto& active_request = active_request_.value(); + if (!is_connect && !active_request.request_url_.getStringView().empty() && + (active_request.request_url_.getStringView()[0] == '/' || + ((method == HTTP_OPTIONS) && active_request.request_url_.getStringView()[0] == '*'))) { + headers.addViaMove(std::move(path), std::move(active_request.request_url_)); + return; + } + + // If absolute_urls and/or connect are not going be handled, copy the url and return. + // This forces the behavior to be backwards compatible with the old codec behavior. + // CONNECT "urls" are actually host:port so look like absolute URLs to the above checks. + // Absolute URLS in CONNECT requests will be rejected below by the URL class validation. + if (!codec_settings_.allow_absolute_url_ && !is_connect) { + headers.addViaMove(std::move(path), std::move(active_request.request_url_)); + return; + } + + Utility::Url absolute_url; + if (!absolute_url.initialize(active_request.request_url_.getStringView(), is_connect)) { + sendProtocolError(Http1ResponseCodeDetails::get().InvalidUrl); + throw CodecProtocolException("http/1.1 protocol error: invalid url in request line"); + } + // RFC7230#5.7 + // When a proxy receives a request with an absolute-form of + // request-target, the proxy MUST ignore the received Host header field + // (if any) and instead replace it with the host information of the + // request-target. A proxy that forwards such a request MUST generate a + // new Host field-value based on the received request-target rather than + // forward the received Host field-value. + headers.setHost(absolute_url.hostAndPort()); + + if (!absolute_url.pathAndQueryParams().empty()) { + headers.setPath(absolute_url.pathAndQueryParams()); + } + active_request.request_url_.clear(); +} + +int ServerConnectionImpl::onHeadersComplete() { + // Handle the case where response happens prior to request complete. It's up to upper layer code + // to disconnect the connection but we shouldn't fire any more events since it doesn't make + // sense. + if (active_request_.has_value()) { + auto& active_request = active_request_.value(); + auto& headers = absl::get(headers_or_trailers_); + ENVOY_CONN_LOG(trace, "Server: onHeadersComplete size={}", connection_, headers->size()); + const char* method_string = http_method_str(static_cast(parser_.method)); + + if (!handling_upgrade_ && connection_header_sanitization_ && headers->Connection()) { + // If we fail to sanitize the request, return a 400 to the client + if (!Utility::sanitizeConnectionHeader(*headers)) { + absl::string_view header_value = headers->getConnectionValue(); + ENVOY_CONN_LOG(debug, "Invalid nominated headers in Connection: {}", connection_, + header_value); + error_code_ = Http::Code::BadRequest; + sendProtocolError(Http1ResponseCodeDetails::get().ConnectionHeaderSanitization); + throw CodecProtocolException("Invalid nominated headers in Connection."); + } + } + + // Inform the response encoder about any HEAD method, so it can set content + // length and transfer encoding headers correctly. + active_request.response_encoder_.setIsResponseToHeadRequest(parser_.method == HTTP_HEAD); + active_request.response_encoder_.setIsResponseToConnectRequest(parser_.method == HTTP_CONNECT); + + handlePath(*headers, parser_.method); + ASSERT(active_request.request_url_.empty()); + + headers->setMethod(method_string); + + // Make sure the host is valid. + auto details = HeaderUtility::requestHeadersValid(*headers); + if (details.has_value()) { + sendProtocolError(details.value().get()); + throw CodecProtocolException( + "http/1.1 protocol error: request headers failed spec compliance checks"); + } + + // Determine here whether we have a body or not. This uses the new RFC semantics where the + // presence of content-length or chunked transfer-encoding indicates a body vs. a particular + // method. If there is no body, we defer raising decodeHeaders() until the parser is flushed + // with message complete. This allows upper layers to behave like HTTP/2 and prevents a proxy + // scenario where the higher layers stream through and implicitly switch to chunked transfer + // encoding because end stream with zero body length has not yet been indicated. + if (parser_.flags & F_CHUNKED || + (parser_.content_length > 0 && parser_.content_length != ULLONG_MAX) || handling_upgrade_) { + active_request.request_decoder_->decodeHeaders(std::move(headers), false); + + // If the connection has been closed (or is closing) after decoding headers, pause the parser + // so we return control to the caller. + if (connection_.state() != Network::Connection::State::Open) { + http_parser_pause(&parser_, 1); + } + } else { + deferred_end_stream_headers_ = true; + } + } + + return 0; +} + +void ServerConnectionImpl::onMessageBegin() { + if (!resetStreamCalled()) { + ASSERT(!active_request_.has_value()); + active_request_.emplace(*this, header_key_formatter_.get()); + auto& active_request = active_request_.value(); + active_request.request_decoder_ = &callbacks_.newStream(active_request.response_encoder_); + + // Check for pipelined request flood as we prepare to accept a new request. + // Parse errors that happen prior to onMessageBegin result in stream termination, it is not + // possible to overflow output buffers with early parse errors. + doFloodProtectionChecks(); + } +} + +void ServerConnectionImpl::onUrl(const char* data, size_t length) { + if (active_request_.has_value()) { + active_request_.value().request_url_.append(data, length); + + checkMaxHeadersSize(); + } +} + +void ServerConnectionImpl::onBody(Buffer::Instance& data) { + ASSERT(!deferred_end_stream_headers_); + if (active_request_.has_value()) { + ENVOY_CONN_LOG(trace, "body size={}", connection_, data.length()); + active_request_.value().request_decoder_->decodeData(data, false); + } +} + +void ServerConnectionImpl::onMessageComplete() { + ASSERT(!handling_upgrade_); + if (active_request_.has_value()) { + auto& active_request = active_request_.value(); + + if (active_request.request_decoder_) { + active_request.response_encoder_.readDisable(true); + } + active_request.remote_complete_ = true; + if (deferred_end_stream_headers_) { + active_request.request_decoder_->decodeHeaders( + std::move(absl::get(headers_or_trailers_)), true); + deferred_end_stream_headers_ = false; + } else if (processing_trailers_) { + active_request.request_decoder_->decodeTrailers( + std::move(absl::get(headers_or_trailers_))); + } else { + Buffer::OwnedImpl buffer; + active_request.request_decoder_->decodeData(buffer, true); + } + + // Reset to ensure no information from one requests persists to the next. + headers_or_trailers_.emplace(nullptr); + } + + // Always pause the parser so that the calling code can process 1 request at a time and apply + // back pressure. However this means that the calling code needs to detect if there is more data + // in the buffer and dispatch it again. + http_parser_pause(&parser_, 1); +} + +void ServerConnectionImpl::onResetStream(StreamResetReason reason) { + active_request_.value().response_encoder_.runResetCallbacks(reason); + active_request_.reset(); +} + +void ServerConnectionImpl::sendProtocolErrorOld(absl::string_view details) { + if (active_request_.has_value()) { + active_request_.value().response_encoder_.setDetails(details); + } + // We do this here because we may get a protocol error before we have a logical stream. Higher + // layers can only operate on streams, so there is no coherent way to allow them to send an error + // "out of band." On one hand this is kind of a hack but on the other hand it normalizes HTTP/1.1 + // to look more like HTTP/2 to higher layers. + if (!active_request_.has_value() || + !active_request_.value().response_encoder_.startedResponse()) { + Buffer::OwnedImpl bad_request_response( + absl::StrCat("HTTP/1.1 ", error_code_, " ", CodeUtility::toString(error_code_), + "\r\ncontent-length: 0\r\nconnection: close\r\n\r\n")); + + connection_.write(bad_request_response, false); + } +} + +void ServerConnectionImpl::sendProtocolError(absl::string_view details) { + if (!Runtime::runtimeFeatureEnabled("envoy.reloadable_features.early_errors_via_hcm")) { + sendProtocolErrorOld(details); + return; + } + // We do this here because we may get a protocol error before we have a logical stream. + if (!active_request_.has_value()) { + onMessageBeginBase(); + } + ASSERT(active_request_.has_value()); + + active_request_.value().response_encoder_.setDetails(details); + if (!active_request_.value().response_encoder_.startedResponse()) { + // Note that the correctness of is_grpc_request and is_head_request is best-effort. + // If headers have not been fully parsed they may not be inferred correctly. + bool is_grpc_request = false; + if (absl::holds_alternative(headers_or_trailers_) && + absl::get(headers_or_trailers_) != nullptr) { + is_grpc_request = + Grpc::Common::isGrpcRequestHeaders(*absl::get(headers_or_trailers_)); + } + active_request_->request_decoder_->sendLocalReply(is_grpc_request, error_code_, + CodeUtility::toString(error_code_), nullptr, + absl::nullopt, details); + return; + } +} + +void ServerConnectionImpl::onAboveHighWatermark() { + if (active_request_.has_value()) { + active_request_.value().response_encoder_.runHighWatermarkCallbacks(); + } +} +void ServerConnectionImpl::onBelowLowWatermark() { + if (active_request_.has_value()) { + active_request_.value().response_encoder_.runLowWatermarkCallbacks(); + } +} + +void ServerConnectionImpl::releaseOutboundResponse( + const Buffer::OwnedBufferFragmentImpl* fragment) { + ASSERT(outbound_responses_ >= 1); + --outbound_responses_; + delete fragment; +} + +void ServerConnectionImpl::checkHeaderNameForUnderscores() { + if (headers_with_underscores_action_ != envoy::config::core::v3::HttpProtocolOptions::ALLOW && + Http::HeaderUtility::headerNameContainsUnderscore(current_header_field_.getStringView())) { + if (headers_with_underscores_action_ == + envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER) { + ENVOY_CONN_LOG(debug, "Dropping header with invalid characters in its name: {}", connection_, + current_header_field_.getStringView()); + stats_.dropped_headers_with_underscores_.inc(); + current_header_field_.clear(); + current_header_value_.clear(); + } else { + ENVOY_CONN_LOG(debug, "Rejecting request due to header name with underscores: {}", + connection_, current_header_field_.getStringView()); + error_code_ = Http::Code::BadRequest; + sendProtocolError(Http1ResponseCodeDetails::get().InvalidUnderscore); + stats_.requests_rejected_with_underscores_in_headers_.inc(); + throw CodecProtocolException("http/1.1 protocol error: header name contains underscores"); + } + } +} + +ClientConnectionImpl::ClientConnectionImpl(Network::Connection& connection, CodecStats& stats, + ConnectionCallbacks&, const Http1Settings& settings, + const uint32_t max_response_headers_count) + : ConnectionImpl(connection, stats, HTTP_RESPONSE, MAX_RESPONSE_HEADERS_KB, + max_response_headers_count, formatter(settings), settings.enable_trailers_) {} + +bool ClientConnectionImpl::cannotHaveBody() { + if (pending_response_.has_value() && pending_response_.value().encoder_.headRequest()) { + ASSERT(!pending_response_done_); + return true; + } else if (parser_.status_code == 204 || parser_.status_code == 304 || + (parser_.status_code >= 200 && parser_.content_length == 0)) { + return true; + } else { + return false; + } +} + +RequestEncoder& ClientConnectionImpl::newStream(ResponseDecoder& response_decoder) { + if (resetStreamCalled()) { + throw CodecClientException("cannot create new streams after calling reset"); + } + + // If reads were disabled due to flow control, we expect reads to always be enabled again before + // reusing this connection. This is done when the response is received. + ASSERT(connection_.readEnabled()); + + ASSERT(!pending_response_.has_value()); + ASSERT(pending_response_done_); + pending_response_.emplace(*this, header_key_formatter_.get(), &response_decoder); + pending_response_done_ = false; + return pending_response_.value().encoder_; +} + +int ClientConnectionImpl::onHeadersComplete() { + ENVOY_CONN_LOG(trace, "status_code {}", connection_, parser_.status_code); + + // Handle the case where the client is closing a kept alive connection (by sending a 408 + // with a 'Connection: close' header). In this case we just let response flush out followed + // by the remote close. + if (!pending_response_.has_value() && !resetStreamCalled()) { + throw PrematureResponseException(static_cast(parser_.status_code)); + } else if (pending_response_.has_value()) { + ASSERT(!pending_response_done_); + auto& headers = absl::get(headers_or_trailers_); + ENVOY_CONN_LOG(trace, "Client: onHeadersComplete size={}", connection_, headers->size()); + headers->setStatus(parser_.status_code); + + if (parser_.status_code >= 200 && parser_.status_code < 300 && + pending_response_.value().encoder_.connectRequest()) { + ENVOY_CONN_LOG(trace, "codec entering upgrade mode for CONNECT response.", connection_); + handling_upgrade_ = true; + + // For responses to connect requests, do not accept the chunked + // encoding header: https://tools.ietf.org/html/rfc7231#section-4.3.6 + if (headers->TransferEncoding() && + absl::EqualsIgnoreCase(headers->TransferEncoding()->value().getStringView(), + Headers::get().TransferEncodingValues.Chunked)) { + sendProtocolError(Http1ResponseCodeDetails::get().InvalidTransferEncoding); + throw CodecProtocolException("http/1.1 protocol error: unsupported transfer encoding"); + } + } + + if (strict_1xx_and_204_headers_ && (parser_.status_code < 200 || parser_.status_code == 204)) { + if (headers->TransferEncoding()) { + sendProtocolError(Http1ResponseCodeDetails::get().TransferEncodingNotAllowed); + throw CodecProtocolException( + "http/1.1 protocol error: transfer encoding not allowed in 1xx or 204"); + } + + if (headers->ContentLength()) { + // Report a protocol error for non-zero Content-Length, but paper over zero Content-Length. + if (headers->ContentLength()->value().getStringView() != "0") { + sendProtocolError(Http1ResponseCodeDetails::get().ContentLengthNotAllowed); + throw CodecProtocolException( + "http/1.1 protocol error: content length not allowed in 1xx or 204"); + } + + headers->removeContentLength(); + } + } + + if (parser_.status_code == enumToInt(Http::Code::Continue)) { + pending_response_.value().decoder_->decode100ContinueHeaders(std::move(headers)); + } else if (cannotHaveBody() && !handling_upgrade_) { + deferred_end_stream_headers_ = true; + } else { + pending_response_.value().decoder_->decodeHeaders(std::move(headers), false); + } + + // http-parser treats 1xx headers as their own complete response. Swallow the spurious + // onMessageComplete and continue processing for purely informational headers. + // 101-SwitchingProtocols is exempt as all data after the header is proxied through after + // upgrading. + if (CodeUtility::is1xx(parser_.status_code) && + parser_.status_code != enumToInt(Http::Code::SwitchingProtocols)) { + ignore_message_complete_for_1xx_ = true; + // Reset to ensure no information from the 1xx headers is used for the response headers. + headers_or_trailers_.emplace(nullptr); + } + } + + // Here we deal with cases where the response cannot have a body, but http_parser does not deal + // with it for us. + return cannotHaveBody() ? 1 : 0; +} + +bool ClientConnectionImpl::upgradeAllowed() const { + if (pending_response_.has_value()) { + return pending_response_->encoder_.upgradeRequest(); + } + return false; +} + +void ClientConnectionImpl::onBody(Buffer::Instance& data) { + ASSERT(!deferred_end_stream_headers_); + if (pending_response_.has_value()) { + ASSERT(!pending_response_done_); + pending_response_.value().decoder_->decodeData(data, false); + } +} + +void ClientConnectionImpl::onMessageComplete() { + ENVOY_CONN_LOG(trace, "message complete", connection_); + if (ignore_message_complete_for_1xx_) { + ignore_message_complete_for_1xx_ = false; + return; + } + if (pending_response_.has_value()) { + ASSERT(!pending_response_done_); + // After calling decodeData() with end stream set to true, we should no longer be able to reset. + PendingResponse& response = pending_response_.value(); + // Encoder is used as part of decode* calls later in this function so pending_response_ can not + // be reset just yet. Preserve the state in pending_response_done_ instead. + pending_response_done_ = true; + + if (deferred_end_stream_headers_) { + response.decoder_->decodeHeaders( + std::move(absl::get(headers_or_trailers_)), true); + deferred_end_stream_headers_ = false; + } else if (processing_trailers_) { + response.decoder_->decodeTrailers( + std::move(absl::get(headers_or_trailers_))); + } else { + Buffer::OwnedImpl buffer; + response.decoder_->decodeData(buffer, true); + } + + // Reset to ensure no information from one requests persists to the next. + pending_response_.reset(); + headers_or_trailers_.emplace(nullptr); + } +} + +void ClientConnectionImpl::onResetStream(StreamResetReason reason) { + // Only raise reset if we did not already dispatch a complete response. + if (pending_response_.has_value() && !pending_response_done_) { + pending_response_.value().encoder_.runResetCallbacks(reason); + pending_response_done_ = true; + pending_response_.reset(); + } +} + +void ClientConnectionImpl::sendProtocolError(absl::string_view details) { + if (pending_response_.has_value()) { + ASSERT(!pending_response_done_); + pending_response_.value().encoder_.setDetails(details); + } +} + +void ClientConnectionImpl::onAboveHighWatermark() { + // This should never happen without an active stream/request. + pending_response_.value().encoder_.runHighWatermarkCallbacks(); +} + +void ClientConnectionImpl::onBelowLowWatermark() { + // This can get called without an active stream/request when the response completion causes us to + // close the connection, but in doing so go below low watermark. + if (pending_response_.has_value() && !pending_response_done_) { + pending_response_.value().encoder_.runLowWatermarkCallbacks(); + } +} + +} // namespace Http1 +} // namespace Legacy +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/http1/codec_impl_legacy.h b/source/common/http/http1/codec_impl_legacy.h new file mode 100644 index 0000000000000..622d9441459bc --- /dev/null +++ b/source/common/http/http1/codec_impl_legacy.h @@ -0,0 +1,607 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include + +#include "envoy/config/core/v3/protocol.pb.h" +#include "envoy/http/codec.h" +#include "envoy/network/connection.h" + +#include "common/buffer/watermark_buffer.h" +#include "common/common/assert.h" +#include "common/common/statusor.h" +#include "common/http/codec_helper.h" +#include "common/http/codes.h" +#include "common/http/header_map_impl.h" +#include "common/http/http1/codec_stats.h" +#include "common/http/http1/header_formatter.h" +#include "common/http/status.h" + +namespace Envoy { +namespace Http { +namespace Legacy { +namespace Http1 { + +class ConnectionImpl; + +/** + * Base class for HTTP/1.1 request and response encoders. + */ +class StreamEncoderImpl : public virtual StreamEncoder, + public Stream, + Logger::Loggable, + public StreamCallbackHelper, + public Http1StreamEncoderOptions { +public: + ~StreamEncoderImpl() override { + // When the stream goes away, undo any read blocks to resume reading. + while (read_disable_calls_ != 0) { + StreamEncoderImpl::readDisable(false); + } + } + // Http::StreamEncoder + void encodeData(Buffer::Instance& data, bool end_stream) override; + void encodeMetadata(const MetadataMapVector&) override; + Stream& getStream() override { return *this; } + Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() override { return *this; } + + // Http::Http1StreamEncoderOptions + void disableChunkEncoding() override { disable_chunk_encoding_ = true; } + + // Http::Stream + void addCallbacks(StreamCallbacks& callbacks) override { addCallbacksHelper(callbacks); } + void removeCallbacks(StreamCallbacks& callbacks) override { removeCallbacksHelper(callbacks); } + // After this is called, for the HTTP/1 codec, the connection should be closed, i.e. no further + // progress may be made with the codec. + void resetStream(StreamResetReason reason) override; + void readDisable(bool disable) override; + uint32_t bufferLimit() override; + absl::string_view responseDetails() override { return details_; } + const Network::Address::InstanceConstSharedPtr& connectionLocalAddress() override; + void setFlushTimeout(std::chrono::milliseconds) override { + // HTTP/1 has one stream per connection, thus any data encoded is immediately written to the + // connection, invoking any watermarks as necessary. There is no internal buffering that would + // require a flush timeout not already covered by other timeouts. + } + + void setIsResponseToHeadRequest(bool value) { is_response_to_head_request_ = value; } + void setIsResponseToConnectRequest(bool value) { is_response_to_connect_request_ = value; } + void setDetails(absl::string_view details) { details_ = details; } + + void clearReadDisableCallsForTests() { read_disable_calls_ = 0; } + +protected: + StreamEncoderImpl(ConnectionImpl& connection, + Http::Http1::HeaderKeyFormatter* header_key_formatter); + void encodeHeadersBase(const RequestOrResponseHeaderMap& headers, absl::optional status, + bool end_stream); + void encodeTrailersBase(const HeaderMap& headers); + + static const std::string CRLF; + static const std::string LAST_CHUNK; + + ConnectionImpl& connection_; + uint32_t read_disable_calls_{}; + bool disable_chunk_encoding_ : 1; + bool chunk_encoding_ : 1; + bool is_response_to_head_request_ : 1; + bool is_response_to_connect_request_ : 1; + +private: + /** + * Called to encode an individual header. + * @param key supplies the header to encode. + * @param key_size supplies the byte size of the key. + * @param value supplies the value to encode. + * @param value_size supplies the byte size of the value. + */ + void encodeHeader(const char* key, uint32_t key_size, const char* value, uint32_t value_size); + + /** + * Called to encode an individual header. + * @param key supplies the header to encode as a string_view. + * @param value supplies the value to encode as a string_view. + */ + void encodeHeader(absl::string_view key, absl::string_view value); + + /** + * Called to finalize a stream encode. + */ + void endEncode(); + + void encodeFormattedHeader(absl::string_view key, absl::string_view value); + + const Http::Http1::HeaderKeyFormatter* const header_key_formatter_; + absl::string_view details_; +}; + +/** + * HTTP/1.1 response encoder. + */ +class ResponseEncoderImpl : public StreamEncoderImpl, public ResponseEncoder { +public: + ResponseEncoderImpl(ConnectionImpl& connection, + Http::Http1::HeaderKeyFormatter* header_key_formatter) + : StreamEncoderImpl(connection, header_key_formatter) {} + + bool startedResponse() { return started_response_; } + + // Http::ResponseEncoder + void encode100ContinueHeaders(const ResponseHeaderMap& headers) override; + void encodeHeaders(const ResponseHeaderMap& headers, bool end_stream) override; + void encodeTrailers(const ResponseTrailerMap& trailers) override { encodeTrailersBase(trailers); } + +private: + bool started_response_{}; +}; + +/** + * HTTP/1.1 request encoder. + */ +class RequestEncoderImpl : public StreamEncoderImpl, public RequestEncoder { +public: + RequestEncoderImpl(ConnectionImpl& connection, + Http::Http1::HeaderKeyFormatter* header_key_formatter) + : StreamEncoderImpl(connection, header_key_formatter) {} + bool upgradeRequest() const { return upgrade_request_; } + bool headRequest() const { return head_request_; } + bool connectRequest() const { return connect_request_; } + + // Http::RequestEncoder + void encodeHeaders(const RequestHeaderMap& headers, bool end_stream) override; + void encodeTrailers(const RequestTrailerMap& trailers) override { encodeTrailersBase(trailers); } + +private: + bool upgrade_request_{}; + bool head_request_{}; + bool connect_request_{}; +}; + +/** + * Base class for HTTP/1.1 client and server connections. + * Handles the callbacks of http_parser with its own base routine and then + * virtual dispatches to its subclasses. + */ +class ConnectionImpl : public virtual Connection, protected Logger::Loggable { +public: + /** + * @return Network::Connection& the backing network connection. + */ + Network::Connection& connection() { return connection_; } + + /** + * Called when the active encoder has completed encoding the outbound half of the stream. + */ + virtual void onEncodeComplete() PURE; + + /** + * Called when resetStream() has been called on an active stream. In HTTP/1.1 the only + * valid operation after this point is for the connection to get blown away, but we will not + * fire any more callbacks in case some stack has to unwind. + */ + void onResetStreamBase(StreamResetReason reason); + + /** + * Flush all pending output from encoding. + */ + void flushOutput(bool end_encode = false); + + void addToBuffer(absl::string_view data); + void addCharToBuffer(char c); + void addIntToBuffer(uint64_t i); + Buffer::WatermarkBuffer& buffer() { return output_buffer_; } + uint64_t bufferRemainingSize(); + void copyToBuffer(const char* data, uint64_t length); + void reserveBuffer(uint64_t size); + void readDisable(bool disable) { + if (connection_.state() == Network::Connection::State::Open) { + connection_.readDisable(disable); + } + } + uint32_t bufferLimit() { return connection_.bufferLimit(); } + virtual bool supportsHttp10() { return false; } + bool maybeDirectDispatch(Buffer::Instance& data); + virtual void maybeAddSentinelBufferFragment(Buffer::WatermarkBuffer&) {} + Http::Http1::CodecStats& stats() { return stats_; } + bool enableTrailers() const { return enable_trailers_; } + + // Http::Connection + Http::Status dispatch(Buffer::Instance& data) override; + void goAway() override {} // Called during connection manager drain flow + Protocol protocol() override { return protocol_; } + void shutdownNotice() override {} // Called during connection manager drain flow + bool wantsToWrite() override { return false; } + void onUnderlyingConnectionAboveWriteBufferHighWatermark() override { onAboveHighWatermark(); } + void onUnderlyingConnectionBelowWriteBufferLowWatermark() override { onBelowLowWatermark(); } + + bool strict1xxAnd204Headers() { return strict_1xx_and_204_headers_; } + +protected: + ConnectionImpl(Network::Connection& connection, Http::Http1::CodecStats& stats, + http_parser_type type, uint32_t max_headers_kb, const uint32_t max_headers_count, + Http::Http1::HeaderKeyFormatterPtr&& header_key_formatter, bool enable_trailers); + + bool resetStreamCalled() { return reset_stream_called_; } + void onMessageBeginBase(); + + /** + * Get memory used to represent HTTP headers or trailers currently being parsed. + * Computed by adding the partial header field and value that is currently being parsed and the + * estimated header size for previous header lines provided by HeaderMap::byteSize(). + */ + virtual uint32_t getHeadersSize(); + + /** + * Called from onUrl, onHeaderField and onHeaderValue to verify that the headers do not exceed the + * configured max header size limit. Throws a CodecProtocolException if headers exceed the size + * limit. + */ + void checkMaxHeadersSize(); + + Network::Connection& connection_; + Http::Http1::CodecStats& stats_; + http_parser parser_; + Http::Code error_code_{Http::Code::BadRequest}; + const Http::Http1::HeaderKeyFormatterPtr header_key_formatter_; + HeaderString current_header_field_; + HeaderString current_header_value_; + bool processing_trailers_ : 1; + bool handling_upgrade_ : 1; + bool reset_stream_called_ : 1; + // Deferred end stream headers indicate that we are not going to raise headers until the full + // HTTP/1 message has been flushed from the parser. This allows raising an HTTP/2 style headers + // block with end stream set to true with no further protocol data remaining. + bool deferred_end_stream_headers_ : 1; + const bool connection_header_sanitization_ : 1; + const bool enable_trailers_ : 1; + const bool strict_1xx_and_204_headers_ : 1; + +private: + enum class HeaderParsingState { Field, Value, Done }; + + virtual HeaderMap& headersOrTrailers() PURE; + virtual RequestOrResponseHeaderMap& requestOrResponseHeaders() PURE; + virtual void allocHeaders() PURE; + virtual void allocTrailers() PURE; + + /** + * Called in order to complete an in progress header decode. + */ + void completeLastHeader(); + + /** + * Check if header name contains underscore character. + * Underscore character is allowed in header names by the RFC-7230 and this check is implemented + * as a security measure due to systems that treat '_' and '-' as interchangeable. + * The ServerConnectionImpl may drop header or reject request based on the + * `common_http_protocol_options.headers_with_underscores_action` configuration option in the + * HttpConnectionManager. + */ + virtual bool shouldDropHeaderWithUnderscoresInNames(absl::string_view /* header_name */) const { + return false; + } + + /** + * An inner dispatch call that executes the dispatching logic. While exception removal is in + * migration (#10878), this function may either throw an exception or return an error status. + * Exceptions are caught and translated to their corresponding statuses in the outer level + * dispatch. + * TODO(#10878): Remove this when exception removal is complete. + */ + Http::Status innerDispatch(Buffer::Instance& data); + + /** + * Dispatch a memory span. + * @param slice supplies the start address. + * @len supplies the length of the span. + */ + size_t dispatchSlice(const char* slice, size_t len); + + /** + * Called by the http_parser when body data is received. + * @param data supplies the start address. + * @param length supplies the length. + */ + void bufferBody(const char* data, size_t length); + + /** + * Push the accumulated body through the filter pipeline. + */ + void dispatchBufferedBody(); + + /** + * Called when a request/response is beginning. A base routine happens first then a virtual + * dispatch is invoked. + */ + virtual void onMessageBegin() PURE; + + /** + * Called when URL data is received. + * @param data supplies the start address. + * @param length supplies the length. + */ + virtual void onUrl(const char* data, size_t length) PURE; + + /** + * Called when header field data is received. + * @param data supplies the start address. + * @param length supplies the length. + */ + void onHeaderField(const char* data, size_t length); + + /** + * Called when header value data is received. + * @param data supplies the start address. + * @param length supplies the length. + */ + void onHeaderValue(const char* data, size_t length); + + /** + * Called when headers are complete. A base routine happens first then a virtual dispatch is + * invoked. Note that this only applies to headers and NOT trailers. End of + * trailers are signaled via onMessageCompleteBase(). + * @return 0 if no error, 1 if there should be no body. + */ + int onHeadersCompleteBase(); + virtual int onHeadersComplete() PURE; + + /** + * Called to see if upgrade transition is allowed. + */ + virtual bool upgradeAllowed() const PURE; + + /** + * Called with body data is available for processing when either: + * - There is an accumulated partial body after the parser is done processing bytes read from the + * socket + * - The parser encounters the last byte of the body + * - The codec does a direct dispatch from the read buffer + * For performance reasons there is at most one call to onBody per call to HTTP/1 + * ConnectionImpl::dispatch call. + * @param data supplies the body data + */ + virtual void onBody(Buffer::Instance& data) PURE; + + /** + * Called when the request/response is complete. + */ + void onMessageCompleteBase(); + virtual void onMessageComplete() PURE; + + /** + * Called when accepting a chunk header. + */ + void onChunkHeader(bool is_final_chunk); + + /** + * @see onResetStreamBase(). + */ + virtual void onResetStream(StreamResetReason reason) PURE; + + /** + * Send a protocol error response to remote. + */ + virtual void sendProtocolError(absl::string_view details) PURE; + + /** + * Called when output_buffer_ or the underlying connection go from below a low watermark to over + * a high watermark. + */ + virtual void onAboveHighWatermark() PURE; + + /** + * Called when output_buffer_ or the underlying connection go from above a high watermark to + * below a low watermark. + */ + virtual void onBelowLowWatermark() PURE; + + /** + * Check if header name contains underscore character. + * The ServerConnectionImpl may drop header or reject request based on configuration. + */ + virtual void checkHeaderNameForUnderscores() {} + + static http_parser_settings settings_; + + HeaderParsingState header_parsing_state_{HeaderParsingState::Field}; + // Used to accumulate the HTTP message body during the current dispatch call. The accumulated body + // is pushed through the filter pipeline either at the end of the current dispatch call, or when + // the last byte of the body is processed (whichever happens first). + Buffer::OwnedImpl buffered_body_; + Buffer::WatermarkBuffer output_buffer_; + Protocol protocol_{Protocol::Http11}; + const uint32_t max_headers_kb_; + const uint32_t max_headers_count_; +}; + +/** + * Implementation of Http::ServerConnection for HTTP/1.1. + */ +class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { +public: + ServerConnectionImpl(Network::Connection& connection, Http::Http1::CodecStats& stats, + ServerConnectionCallbacks& callbacks, const Http1Settings& settings, + uint32_t max_request_headers_kb, const uint32_t max_request_headers_count, + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action); + bool supportsHttp10() override { return codec_settings_.accept_http_10_; } + +protected: + /** + * An active HTTP/1.1 request. + */ + struct ActiveRequest { + ActiveRequest(ConnectionImpl& connection, Http::Http1::HeaderKeyFormatter* header_key_formatter) + : response_encoder_(connection, header_key_formatter) {} + + HeaderString request_url_; + RequestDecoder* request_decoder_{}; + ResponseEncoderImpl response_encoder_; + bool remote_complete_{}; + }; + absl::optional& activeRequest() { return active_request_; } + // ConnectionImpl + void onMessageComplete() override; + // Add the size of the request_url to the reported header size when processing request headers. + uint32_t getHeadersSize() override; + +private: + /** + * Manipulate the request's first line, parsing the url and converting to a relative path if + * necessary. Compute Host / :authority headers based on 7230#5.7 and 7230#6 + * + * @param is_connect true if the request has the CONNECT method + * @param headers the request's headers + * @throws CodecProtocolException on an invalid url in the request line + */ + void handlePath(RequestHeaderMap& headers, unsigned int method); + + // ConnectionImpl + void onEncodeComplete() override; + void onMessageBegin() override; + void onUrl(const char* data, size_t length) override; + int onHeadersComplete() override; + // If upgrade behavior is not allowed, the HCM will have sanitized the headers out. + bool upgradeAllowed() const override { return true; } + void onBody(Buffer::Instance& data) override; + void onResetStream(StreamResetReason reason) override; + void sendProtocolError(absl::string_view details) override; + void onAboveHighWatermark() override; + void onBelowLowWatermark() override; + HeaderMap& headersOrTrailers() override { + if (absl::holds_alternative(headers_or_trailers_)) { + return *absl::get(headers_or_trailers_); + } else { + return *absl::get(headers_or_trailers_); + } + } + RequestOrResponseHeaderMap& requestOrResponseHeaders() override { + return *absl::get(headers_or_trailers_); + } + void allocHeaders() override { + ASSERT(nullptr == absl::get(headers_or_trailers_)); + ASSERT(!processing_trailers_); + headers_or_trailers_.emplace(RequestHeaderMapImpl::create()); + } + void allocTrailers() override { + ASSERT(processing_trailers_); + if (!absl::holds_alternative(headers_or_trailers_)) { + headers_or_trailers_.emplace(RequestTrailerMapImpl::create()); + } + } + + void sendProtocolErrorOld(absl::string_view details); + + void releaseOutboundResponse(const Buffer::OwnedBufferFragmentImpl* fragment); + void maybeAddSentinelBufferFragment(Buffer::WatermarkBuffer& output_buffer) override; + void doFloodProtectionChecks() const; + void checkHeaderNameForUnderscores() override; + + ServerConnectionCallbacks& callbacks_; + absl::optional active_request_; + Http1Settings codec_settings_; + const Buffer::OwnedBufferFragmentImpl::Releasor response_buffer_releasor_; + uint32_t outbound_responses_{}; + // This defaults to 2, which functionally disables pipelining. If any users + // of Envoy wish to enable pipelining (which is dangerous and ill supported) + // we could make this configurable. + uint32_t max_outbound_responses_{}; + bool flood_protection_{}; + // TODO(mattklein123): This should be a member of ActiveRequest but this change needs dedicated + // thought as some of the reset and no header code paths make this difficult. Headers are + // populated on message begin. Trailers are populated on the first parsed trailer field (if + // trailers are enabled). The variant is reset to null headers on message complete for assertion + // purposes. + absl::variant headers_or_trailers_; + // The action to take when a request header name contains underscore characters. + const envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action_; +}; + +/** + * Implementation of Http::ClientConnection for HTTP/1.1. + */ +class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { +public: + ClientConnectionImpl(Network::Connection& connection, Http::Http1::CodecStats& stats, + ConnectionCallbacks& callbacks, const Http1Settings& settings, + const uint32_t max_response_headers_count); + + // Http::ClientConnection + RequestEncoder& newStream(ResponseDecoder& response_decoder) override; + +private: + struct PendingResponse { + PendingResponse(ConnectionImpl& connection, + Http::Http1::HeaderKeyFormatter* header_key_formatter, ResponseDecoder* decoder) + : encoder_(connection, header_key_formatter), decoder_(decoder) {} + + RequestEncoderImpl encoder_; + ResponseDecoder* decoder_; + }; + + bool cannotHaveBody(); + + // ConnectionImpl + void onEncodeComplete() override {} + void onMessageBegin() override {} + void onUrl(const char*, size_t) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } + int onHeadersComplete() override; + bool upgradeAllowed() const override; + void onBody(Buffer::Instance& data) override; + void onMessageComplete() override; + void onResetStream(StreamResetReason reason) override; + void sendProtocolError(absl::string_view details) override; + void onAboveHighWatermark() override; + void onBelowLowWatermark() override; + HeaderMap& headersOrTrailers() override { + if (absl::holds_alternative(headers_or_trailers_)) { + return *absl::get(headers_or_trailers_); + } else { + return *absl::get(headers_or_trailers_); + } + } + RequestOrResponseHeaderMap& requestOrResponseHeaders() override { + return *absl::get(headers_or_trailers_); + } + void allocHeaders() override { + ASSERT(nullptr == absl::get(headers_or_trailers_)); + ASSERT(!processing_trailers_); + headers_or_trailers_.emplace(ResponseHeaderMapImpl::create()); + } + void allocTrailers() override { + ASSERT(processing_trailers_); + if (!absl::holds_alternative(headers_or_trailers_)) { + headers_or_trailers_.emplace(ResponseTrailerMapImpl::create()); + } + } + + absl::optional pending_response_; + // TODO(mattklein123): The following bool tracks whether a pending response is complete before + // dispatching callbacks. This is needed so that pending_response_ stays valid during callbacks + // in order to access the stream, but to avoid invoking callbacks that shouldn't be called once + // the response is complete. The existence of this variable is hard to reason about and it should + // be combined with pending_response_ somehow in a follow up cleanup. + bool pending_response_done_{true}; + // Set true between receiving non-101 1xx headers and receiving the spurious onMessageComplete. + bool ignore_message_complete_for_1xx_{}; + // TODO(mattklein123): This should be a member of PendingResponse but this change needs dedicated + // thought as some of the reset and no header code paths make this difficult. Headers are + // populated on message begin. Trailers are populated when the switch to trailer processing is + // detected while parsing the first trailer field (if trailers are enabled). The variant is reset + // to null headers on message complete for assertion purposes. + absl::variant headers_or_trailers_; + + // The default limit of 80 KiB is the vanilla http_parser behaviour. + static constexpr uint32_t MAX_RESPONSE_HEADERS_KB = 80; +}; + +} // namespace Http1 +} // namespace Legacy +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/http1/codec_stats.h b/source/common/http/http1/codec_stats.h new file mode 100644 index 0000000000000..ac296522adc21 --- /dev/null +++ b/source/common/http/http1/codec_stats.h @@ -0,0 +1,38 @@ +#pragma once + +#include "envoy/stats/scope.h" +#include "envoy/stats/stats_macros.h" + +#include "common/common/thread.h" + +namespace Envoy { +namespace Http { +namespace Http1 { + +/** + * All stats for the HTTP/1 codec. @see stats_macros.h + */ +#define ALL_HTTP1_CODEC_STATS(COUNTER) \ + COUNTER(dropped_headers_with_underscores) \ + COUNTER(metadata_not_supported_error) \ + COUNTER(requests_rejected_with_underscores_in_headers) \ + COUNTER(response_flood) + +/** + * Wrapper struct for the HTTP/1 codec stats. @see stats_macros.h + */ +struct CodecStats { + using AtomicPtr = Thread::AtomicPtr; + + static CodecStats& atomicGet(AtomicPtr& ptr, Stats::Scope& scope) { + return *ptr.get([&scope]() -> CodecStats* { + return new CodecStats{ALL_HTTP1_CODEC_STATS(POOL_COUNTER_PREFIX(scope, "http1."))}; + }); + } + + ALL_HTTP1_CODEC_STATS(GENERATE_COUNTER_STRUCT) +}; + +} // namespace Http1 +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/http1/conn_pool.cc b/source/common/http/http1/conn_pool.cc index 9b82d9b4d7e43..5203399e5a8e0 100644 --- a/source/common/http/http1/conn_pool.cc +++ b/source/common/http/http1/conn_pool.cc @@ -5,6 +5,7 @@ #include #include "envoy/event/dispatcher.h" +#include "envoy/event/schedulable_cb.h" #include "envoy/event/timer.h" #include "envoy/http/codec.h" #include "envoy/http/header_map.h" @@ -12,8 +13,8 @@ #include "common/http/codec_client.h" #include "common/http/codes.h" +#include "common/http/header_utility.h" #include "common/http/headers.h" -#include "common/http/http1/conn_pool_legacy.h" #include "common/runtime/runtime_features.h" #include "absl/strings/match.h" @@ -26,16 +27,16 @@ ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSha Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, const Network::TransportSocketOptionsSharedPtr& transport_socket_options) - : ConnPoolImplBase(std::move(host), std::move(priority), dispatcher, options, - transport_socket_options), - upstream_ready_timer_(dispatcher_.createTimer([this]() { + : HttpConnPoolImplBase(std::move(host), std::move(priority), dispatcher, options, + transport_socket_options, Protocol::Http11), + upstream_ready_cb_(dispatcher_.createSchedulableCallback([this]() { upstream_ready_enabled_ = false; onUpstreamReady(); })) {} ConnPoolImpl::~ConnPoolImpl() { destructAllConnections(); } -ConnPoolImplBase::ActiveClientPtr ConnPoolImpl::instantiateActiveClient() { +Envoy::ConnectionPool::ActiveClientPtr ConnPoolImpl::instantiateActiveClient() { return std::make_unique(*this); } @@ -56,9 +57,9 @@ void ConnPoolImpl::onResponseComplete(ActiveClient& client) { } else { client.stream_wrapper_.reset(); - if (!pending_requests_.empty() && !upstream_ready_enabled_) { + if (!pending_streams_.empty() && !upstream_ready_enabled_) { upstream_ready_enabled_ = true; - upstream_ready_timer_->enableTimer(std::chrono::milliseconds(0)); + upstream_ready_cb_->scheduleCallbackCurrentIteration(); } checkForDrained(); @@ -82,23 +83,27 @@ ConnPoolImpl::StreamWrapper::~StreamWrapper() { void ConnPoolImpl::StreamWrapper::onEncodeComplete() { encode_complete_ = true; } void ConnPoolImpl::StreamWrapper::decodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream) { - // If Connection: close OR - // Http/1.0 and not Connection: keep-alive OR - // Proxy-Connection: close - if ((headers->Connection() && - (absl::EqualsIgnoreCase(headers->Connection()->value().getStringView(), - Headers::get().ConnectionValues.Close))) || - (parent_.codec_client_->protocol() == Protocol::Http10 && - (!headers->Connection() || - !absl::EqualsIgnoreCase(headers->Connection()->value().getStringView(), - Headers::get().ConnectionValues.KeepAlive))) || - (headers->ProxyConnection() && - (absl::EqualsIgnoreCase(headers->ProxyConnection()->value().getStringView(), - Headers::get().ConnectionValues.Close)))) { - parent_.parent_.host_->cluster().stats().upstream_cx_close_notify_.inc(); - close_connection_ = true; + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.fixed_connection_close")) { + close_connection_ = + HeaderUtility::shouldCloseConnection(parent_.codec_client_->protocol(), *headers); + if (close_connection_) { + parent_.parent_.host()->cluster().stats().upstream_cx_close_notify_.inc(); + } + } else { + // If Connection: close OR + // Http/1.0 and not Connection: keep-alive OR + // Proxy-Connection: close + if ((absl::EqualsIgnoreCase(headers->getConnectionValue(), + Headers::get().ConnectionValues.Close)) || + (parent_.codec_client_->protocol() == Protocol::Http10 && + !absl::EqualsIgnoreCase(headers->getConnectionValue(), + Headers::get().ConnectionValues.KeepAlive)) || + (absl::EqualsIgnoreCase(headers->getProxyConnectionValue(), + Headers::get().ConnectionValues.Close))) { + parent_.parent_.host()->cluster().stats().upstream_cx_close_notify_.inc(); + close_connection_ = true; + } } - ResponseDecoderWrapper::decodeHeaders(std::move(headers), end_stream); } @@ -108,15 +113,13 @@ void ConnPoolImpl::StreamWrapper::onDecodeComplete() { } ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) - : ConnPoolImplBase::ActiveClient( + : Envoy::Http::ActiveClient( parent, parent.host_->cluster().maxRequestsPerConnection(), 1 // HTTP1 always has a concurrent-request-limit of 1 per connection. ) { parent.host_->cluster().stats().upstream_cx_http1_total_.inc(); } -bool ConnPoolImpl::ActiveClient::hasActiveRequests() const { return stream_wrapper_ != nullptr; } - bool ConnPoolImpl::ActiveClient::closingWithIncompleteRequest() const { return (stream_wrapper_ != nullptr) && (!stream_wrapper_->decode_complete_); } @@ -138,14 +141,8 @@ allocateConnPool(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr hos Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, const Network::TransportSocketOptionsSharedPtr& transport_socket_options) { - if (Runtime::runtimeFeatureEnabled( - "envoy.reloadable_features.new_http1_connection_pool_behavior")) { - return std::make_unique(dispatcher, host, priority, options, - transport_socket_options); - } else { - return std::make_unique( - dispatcher, host, priority, options, transport_socket_options); - } + return std::make_unique(dispatcher, host, priority, options, + transport_socket_options); } } // namespace Http1 diff --git a/source/common/http/http1/conn_pool.h b/source/common/http/http1/conn_pool.h index 0d9665f184462..c7538705a6ffc 100644 --- a/source/common/http/http1/conn_pool.h +++ b/source/common/http/http1/conn_pool.h @@ -17,7 +17,7 @@ namespace Http1 { * address. Higher layer code should handle resolving DNS on error and creating a new pool * bound to a different IP address. */ -class ConnPoolImpl : public ConnPoolImplBase { +class ConnPoolImpl : public Http::HttpConnPoolImplBase { public: ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, @@ -30,10 +30,10 @@ class ConnPoolImpl : public ConnPoolImplBase { Http::Protocol protocol() const override { return Http::Protocol::Http11; } // ConnPoolImplBase - ActiveClientPtr instantiateActiveClient() override; + Envoy::ConnectionPool::ActiveClientPtr instantiateActiveClient() override; protected: - struct ActiveClient; + class ActiveClient; struct StreamWrapper : public RequestEncoderWrapper, public ResponseDecoderWrapper, @@ -64,13 +64,13 @@ class ConnPoolImpl : public ConnPoolImplBase { using StreamWrapperPtr = std::unique_ptr; - struct ActiveClient : public ConnPoolImplBase::ActiveClient { + class ActiveClient : public Envoy::Http::ActiveClient { + public: ActiveClient(ConnPoolImpl& parent); ConnPoolImpl& parent() { return static_cast(parent_); } // ConnPoolImplBase::ActiveClient - bool hasActiveRequests() const override; bool closingWithIncompleteRequest() const override; RequestEncoder& newStreamEncoder(ResponseDecoder& response_decoder) override; @@ -79,10 +79,8 @@ class ConnPoolImpl : public ConnPoolImplBase { void onDownstreamReset(ActiveClient& client); void onResponseComplete(ActiveClient& client); - ActiveClient& firstReady() const { return static_cast(*ready_clients_.front()); } - ActiveClient& firstBusy() const { return static_cast(*busy_clients_.front()); } - Event::TimerPtr upstream_ready_timer_; + Event::SchedulableCallbackPtr upstream_ready_cb_; bool upstream_ready_enabled_{false}; }; diff --git a/source/common/http/http1/conn_pool_legacy.cc b/source/common/http/http1/conn_pool_legacy.cc deleted file mode 100644 index da834c2c104a3..0000000000000 --- a/source/common/http/http1/conn_pool_legacy.cc +++ /dev/null @@ -1,356 +0,0 @@ -#include "common/http/http1/conn_pool_legacy.h" - -#include -#include -#include - -#include "envoy/event/dispatcher.h" -#include "envoy/event/timer.h" -#include "envoy/http/codec.h" -#include "envoy/http/header_map.h" -#include "envoy/upstream/upstream.h" - -#include "common/common/utility.h" -#include "common/http/codec_client.h" -#include "common/http/codes.h" -#include "common/http/headers.h" -#include "common/network/utility.h" -#include "common/stats/timespan_impl.h" -#include "common/upstream/upstream_impl.h" - -#include "absl/strings/match.h" - -namespace Envoy { -namespace Http { -namespace Legacy { -namespace Http1 { - -ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, - Upstream::ResourcePriority priority, - const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options) - : ConnPoolImplBase(std::move(host), std::move(priority)), dispatcher_(dispatcher), - socket_options_(options), transport_socket_options_(transport_socket_options), - upstream_ready_timer_(dispatcher_.createTimer([this]() { onUpstreamReady(); })) {} - -ConnPoolImpl::~ConnPoolImpl() { - while (!ready_clients_.empty()) { - ready_clients_.front()->codec_client_->close(); - } - - while (!busy_clients_.empty()) { - busy_clients_.front()->codec_client_->close(); - } - - // Make sure all clients are destroyed before we are destroyed. - dispatcher_.clearDeferredDeleteList(); -} - -void ConnPoolImpl::drainConnections() { - while (!ready_clients_.empty()) { - ready_clients_.front()->codec_client_->close(); - } - - // We drain busy clients by manually setting remaining requests to 1. Thus, when the next - // response completes the client will be destroyed. - for (const auto& client : busy_clients_) { - client->remaining_requests_ = 1; - } -} - -void ConnPoolImpl::addDrainedCallback(DrainedCb cb) { - drained_callbacks_.push_back(cb); - checkForDrained(); -} - -bool ConnPoolImpl::hasActiveConnections() const { - return !pending_requests_.empty() || !busy_clients_.empty(); -} - -void ConnPoolImpl::attachRequestToClient(ActiveClient& client, ResponseDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) { - ASSERT(!client.stream_wrapper_); - host_->cluster().stats().upstream_rq_total_.inc(); - host_->stats().rq_total_.inc(); - client.stream_wrapper_ = std::make_unique(response_decoder, client); - callbacks.onPoolReady(*client.stream_wrapper_, client.real_host_description_, - client.codec_client_->streamInfo()); -} - -void ConnPoolImpl::checkForDrained() { - if (!drained_callbacks_.empty() && pending_requests_.empty() && busy_clients_.empty()) { - while (!ready_clients_.empty()) { - ready_clients_.front()->codec_client_->close(); - } - - for (const DrainedCb& cb : drained_callbacks_) { - cb(); - } - } -} - -void ConnPoolImpl::createNewConnection() { - ENVOY_LOG(debug, "creating a new connection"); - ActiveClientPtr client(new ActiveClient(*this)); - client->moveIntoList(std::move(client), busy_clients_); -} - -ConnectionPool::Cancellable* ConnPoolImpl::newStream(ResponseDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) { - if (!ready_clients_.empty()) { - ready_clients_.front()->moveBetweenLists(ready_clients_, busy_clients_); - ENVOY_CONN_LOG(debug, "using existing connection", *busy_clients_.front()->codec_client_); - attachRequestToClient(*busy_clients_.front(), response_decoder, callbacks); - return nullptr; - } - - if (host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) { - bool can_create_connection = - host_->cluster().resourceManager(priority_).connections().canCreate(); - if (!can_create_connection) { - host_->cluster().stats().upstream_cx_overflow_.inc(); - } - - // If we have no connections at all, make one no matter what so we don't starve. - if ((ready_clients_.empty() && busy_clients_.empty()) || can_create_connection) { - createNewConnection(); - } - - return newPendingRequest(response_decoder, callbacks); - } else { - ENVOY_LOG(debug, "max pending requests overflow"); - callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), - nullptr); - host_->cluster().stats().upstream_rq_pending_overflow_.inc(); - return nullptr; - } -} - -void ConnPoolImpl::onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event) { - if (event == Network::ConnectionEvent::RemoteClose || - event == Network::ConnectionEvent::LocalClose) { - // The client died. - ENVOY_CONN_LOG(debug, "client disconnected, failure reason: {}", *client.codec_client_, - client.codec_client_->connectionFailureReason()); - - Envoy::Upstream::reportUpstreamCxDestroy(host_, event); - ActiveClientPtr removed; - bool check_for_drained = true; - if (client.stream_wrapper_) { - if (!client.stream_wrapper_->decode_complete_) { - Envoy::Upstream::reportUpstreamCxDestroyActiveRequest(host_, event); - } - - // There is an active request attached to this client. The underlying codec client will - // already have "reset" the stream to fire the reset callback. All we do here is just - // destroy the client. - removed = client.removeFromList(busy_clients_); - } else if (client.connectionState() == - ConnPoolImplBase::ActiveClient::ConnectionState::Connected) { - removed = client.removeFromList(ready_clients_); - check_for_drained = false; - } else { - // The only time this happens is if we actually saw a connect failure. - host_->cluster().stats().upstream_cx_connect_fail_.inc(); - host_->stats().cx_connect_fail_.inc(); - - removed = client.removeFromList(busy_clients_); - - // Raw connect failures should never happen under normal circumstances. If we have an upstream - // that is behaving badly, requests can get stuck here in the pending state. If we see a - // connect failure, we purge all pending requests so that calling code can determine what to - // do with the request. - ENVOY_CONN_LOG(debug, "purge pending, failure reason: {}", *client.codec_client_, - client.codec_client_->connectionFailureReason()); - purgePendingRequests(client.real_host_description_, - client.codec_client_->connectionFailureReason(), - event == Network::ConnectionEvent::RemoteClose); - } - - dispatcher_.deferredDelete(std::move(removed)); - - // If we have pending requests and we just lost a connection we should make a new one. - if (pending_requests_.size() > (ready_clients_.size() + busy_clients_.size())) { - createNewConnection(); - } - - if (check_for_drained) { - checkForDrained(); - } - } - - client.disarmConnectTimeout(); - - // Note that the order in this function is important. Concretely, we must destroy the connect - // timer before we process a connected idle client, because if this results in an immediate - // drain/destruction event, we key off of the existence of the connect timer above to determine - // whether the client is in the ready list (connected) or the busy list (failed to connect). - if (event == Network::ConnectionEvent::Connected) { - client.recordConnectionSetup(); - processIdleClient(client, false); - } -} - -void ConnPoolImpl::onDownstreamReset(ActiveClient& client) { - // If we get a downstream reset to an attached client, we just blow it away. - client.codec_client_->close(); -} - -void ConnPoolImpl::onResponseComplete(ActiveClient& client) { - ENVOY_CONN_LOG(debug, "response complete", *client.codec_client_); - if (!client.stream_wrapper_->encode_complete_) { - ENVOY_CONN_LOG(debug, "response before request complete", *client.codec_client_); - onDownstreamReset(client); - } else if (client.stream_wrapper_->close_connection_ || client.codec_client_->remoteClosed()) { - ENVOY_CONN_LOG(debug, "saw upstream close connection", *client.codec_client_); - onDownstreamReset(client); - } else if (client.remaining_requests_ > 0 && --client.remaining_requests_ == 0) { - ENVOY_CONN_LOG(debug, "maximum requests per connection", *client.codec_client_); - host_->cluster().stats().upstream_cx_max_requests_.inc(); - onDownstreamReset(client); - } else { - // Upstream connection might be closed right after response is complete. Setting delay=true - // here to attach pending requests in next dispatcher loop to handle that case. - // https://github.com/envoyproxy/envoy/issues/2715 - processIdleClient(client, true); - } -} - -void ConnPoolImpl::onUpstreamReady() { - upstream_ready_enabled_ = false; - while (!pending_requests_.empty() && !ready_clients_.empty()) { - ActiveClient& client = *ready_clients_.front(); - ENVOY_CONN_LOG(debug, "attaching to next request", *client.codec_client_); - // There is work to do so bind a request to the client and move it to the busy list. Pending - // requests are pushed onto the front, so pull from the back. - attachRequestToClient(client, pending_requests_.back()->decoder_, - pending_requests_.back()->callbacks_); - pending_requests_.pop_back(); - client.moveBetweenLists(ready_clients_, busy_clients_); - } -} - -void ConnPoolImpl::processIdleClient(ActiveClient& client, bool delay) { - client.stream_wrapper_.reset(); - if (pending_requests_.empty() || delay) { - // There is nothing to service or delayed processing is requested, so just move the connection - // into the ready list. - ENVOY_CONN_LOG(debug, "moving to ready", *client.codec_client_); - client.moveBetweenLists(busy_clients_, ready_clients_); - } else { - // There is work to do immediately so bind a request to the client and move it to the busy list. - // Pending requests are pushed onto the front, so pull from the back. - ENVOY_CONN_LOG(debug, "attaching to next request", *client.codec_client_); - attachRequestToClient(client, pending_requests_.back()->decoder_, - pending_requests_.back()->callbacks_); - pending_requests_.pop_back(); - } - - if (delay && !pending_requests_.empty() && !upstream_ready_enabled_) { - upstream_ready_enabled_ = true; - upstream_ready_timer_->enableTimer(std::chrono::milliseconds(0)); - } - - checkForDrained(); -} - -ConnPoolImpl::StreamWrapper::StreamWrapper(ResponseDecoder& response_decoder, ActiveClient& parent) - : RequestEncoderWrapper(parent.codec_client_->newStream(*this)), - ResponseDecoderWrapper(response_decoder), parent_(parent) { - - RequestEncoderWrapper::inner_.getStream().addCallbacks(*this); - parent_.parent_.host_->cluster().stats().upstream_rq_active_.inc(); - parent_.parent_.host_->stats().rq_active_.inc(); - - // TODO (tonya11en): At the time of writing, there is no way to mix different versions of HTTP - // traffic in the same cluster, so incrementing the request count in the per-cluster resource - // manager will not affect circuit breaking in any unexpected ways. Ideally, outstanding requests - // counts would be tracked the same way in all HTTP versions. - // - // See: https://github.com/envoyproxy/envoy/issues/9215 - parent_.parent_.host_->cluster().resourceManager(parent_.parent_.priority_).requests().inc(); -} - -ConnPoolImpl::StreamWrapper::~StreamWrapper() { - parent_.parent_.host_->cluster().stats().upstream_rq_active_.dec(); - parent_.parent_.host_->stats().rq_active_.dec(); - parent_.parent_.host_->cluster().resourceManager(parent_.parent_.priority_).requests().dec(); -} - -void ConnPoolImpl::StreamWrapper::onEncodeComplete() { encode_complete_ = true; } - -void ConnPoolImpl::StreamWrapper::decodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream) { - // If Connection: close OR - // Http/1.0 and not Connection: keep-alive OR - // Proxy-Connection: close - if ((headers->Connection() && - (absl::EqualsIgnoreCase(headers->Connection()->value().getStringView(), - Headers::get().ConnectionValues.Close))) || - (parent_.codec_client_->protocol() == Protocol::Http10 && - (!headers->Connection() || - !absl::EqualsIgnoreCase(headers->Connection()->value().getStringView(), - Headers::get().ConnectionValues.KeepAlive))) || - (headers->ProxyConnection() && - (absl::EqualsIgnoreCase(headers->ProxyConnection()->value().getStringView(), - Headers::get().ConnectionValues.Close)))) { - parent_.parent_.host_->cluster().stats().upstream_cx_close_notify_.inc(); - close_connection_ = true; - } - - ResponseDecoderWrapper::decodeHeaders(std::move(headers), end_stream); -} - -void ConnPoolImpl::StreamWrapper::onDecodeComplete() { - decode_complete_ = encode_complete_; - parent_.parent_.onResponseComplete(parent_); -} - -ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) - : ConnPoolImplBase::ActiveClient(parent.dispatcher_, parent.host_->cluster()), parent_(parent), - remaining_requests_(parent_.host_->cluster().maxRequestsPerConnection()) { - - Upstream::Host::CreateConnectionData data = parent_.host_->createConnection( - parent_.dispatcher_, parent_.socket_options_, parent_.transport_socket_options_); - real_host_description_ = data.host_description_; - codec_client_ = parent_.createCodecClient(data); - codec_client_->addConnectionCallbacks(*this); - - parent_.host_->cluster().stats().upstream_cx_total_.inc(); - parent_.host_->cluster().stats().upstream_cx_active_.inc(); - parent_.host_->cluster().stats().upstream_cx_http1_total_.inc(); - parent_.host_->stats().cx_total_.inc(); - parent_.host_->stats().cx_active_.inc(); - parent_.host_->cluster().resourceManager(parent_.priority_).connections().inc(); - - codec_client_->setConnectionStats( - {parent_.host_->cluster().stats().upstream_cx_rx_bytes_total_, - parent_.host_->cluster().stats().upstream_cx_rx_bytes_buffered_, - parent_.host_->cluster().stats().upstream_cx_tx_bytes_total_, - parent_.host_->cluster().stats().upstream_cx_tx_bytes_buffered_, - &parent_.host_->cluster().stats().bind_errors_, nullptr}); -} - -ConnPoolImpl::ActiveClient::~ActiveClient() { - parent_.host_->cluster().stats().upstream_cx_active_.dec(); - parent_.host_->stats().cx_active_.dec(); - parent_.host_->cluster().resourceManager(parent_.priority_).connections().dec(); -} - -void ConnPoolImpl::ActiveClient::onConnectTimeout() { - // We just close the client at this point. This will result in both a timeout and a connect - // failure and will fold into all the normal connect failure logic. - ENVOY_CONN_LOG(debug, "connect timeout", *codec_client_); - parent_.host_->cluster().stats().upstream_cx_connect_timeout_.inc(); - codec_client_->close(); -} - -CodecClientPtr ProdConnPoolImpl::createCodecClient(Upstream::Host::CreateConnectionData& data) { - CodecClientPtr codec{new CodecClientProd(CodecClient::Type::HTTP1, std::move(data.connection_), - data.host_description_, dispatcher_)}; - return codec; -} - -} // namespace Http1 -} // namespace Legacy -} // namespace Http -} // namespace Envoy diff --git a/source/common/http/http1/conn_pool_legacy.h b/source/common/http/http1/conn_pool_legacy.h deleted file mode 100644 index be76eb5e77695..0000000000000 --- a/source/common/http/http1/conn_pool_legacy.h +++ /dev/null @@ -1,149 +0,0 @@ -#pragma once - -#include -#include -#include - -#include "envoy/event/deferred_deletable.h" -#include "envoy/event/timer.h" -#include "envoy/http/codec.h" -#include "envoy/http/conn_pool.h" -#include "envoy/network/connection.h" -#include "envoy/stats/timespan.h" -#include "envoy/upstream/upstream.h" - -#include "common/common/linked_object.h" -#include "common/http/codec_client.h" -#include "common/http/codec_wrappers.h" -#include "common/http/conn_pool_base_legacy.h" - -#include "absl/types/optional.h" - -namespace Envoy { -namespace Http { -namespace Legacy { -namespace Http1 { - -/** - * A connection pool implementation for HTTP/1.1 connections. - * NOTE: The connection pool does NOT do DNS resolution. It assumes it is being given a numeric IP - * address. Higher layer code should handle resolving DNS on error and creating a new pool - * bound to a different IP address. - */ -class ConnPoolImpl : public ConnectionPool::Instance, public Legacy::ConnPoolImplBase { -public: - ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, - Upstream::ResourcePriority priority, - const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options); - - ~ConnPoolImpl() override; - - // ConnectionPool::Instance - Http::Protocol protocol() const override { return Http::Protocol::Http11; } - void addDrainedCallback(DrainedCb cb) override; - void drainConnections() override; - bool hasActiveConnections() const override; - ConnectionPool::Cancellable* newStream(ResponseDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) override; - Upstream::HostDescriptionConstSharedPtr host() const override { return host_; }; - - // ConnPoolImplBase - void checkForDrained() override; - -protected: - struct ActiveClient; - - struct StreamWrapper : public RequestEncoderWrapper, - public ResponseDecoderWrapper, - public StreamCallbacks { - StreamWrapper(ResponseDecoder& response_decoder, ActiveClient& parent); - ~StreamWrapper() override; - - // StreamEncoderWrapper - void onEncodeComplete() override; - - // StreamDecoderWrapper - void decodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream) override; - void onPreDecodeComplete() override {} - void onDecodeComplete() override; - - // Http::StreamCallbacks - void onResetStream(StreamResetReason, absl::string_view) override { - parent_.parent_.onDownstreamReset(parent_); - } - void onAboveWriteBufferHighWatermark() override {} - void onBelowWriteBufferLowWatermark() override {} - - ActiveClient& parent_; - bool encode_complete_{}; - bool close_connection_{}; - bool decode_complete_{}; - }; - - using StreamWrapperPtr = std::unique_ptr; - - struct ActiveClient : ConnPoolImplBase::ActiveClient, - LinkedObject, - public Network::ConnectionCallbacks, - public Event::DeferredDeletable { - ActiveClient(ConnPoolImpl& parent); - ~ActiveClient() override; - - void onConnectTimeout() override; - - // Network::ConnectionCallbacks - void onEvent(Network::ConnectionEvent event) override { - parent_.onConnectionEvent(*this, event); - } - void onAboveWriteBufferHighWatermark() override {} - void onBelowWriteBufferLowWatermark() override {} - - ConnPoolImpl& parent_; - CodecClientPtr codec_client_; - Upstream::HostDescriptionConstSharedPtr real_host_description_; - StreamWrapperPtr stream_wrapper_; - uint64_t remaining_requests_; - }; - - using ActiveClientPtr = std::unique_ptr; - - void attachRequestToClient(ActiveClient& client, ResponseDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks); - virtual CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) PURE; - void createNewConnection(); - void onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event); - void onDownstreamReset(ActiveClient& client); - void onResponseComplete(ActiveClient& client); - void onUpstreamReady(); - void processIdleClient(ActiveClient& client, bool delay); - - Event::Dispatcher& dispatcher_; - std::list ready_clients_; - std::list busy_clients_; - std::list drained_callbacks_; - const Network::ConnectionSocket::OptionsSharedPtr socket_options_; - const Network::TransportSocketOptionsSharedPtr transport_socket_options_; - Event::TimerPtr upstream_ready_timer_; - bool upstream_ready_enabled_{false}; -}; - -/** - * Production implementation of the ConnPoolImpl. - */ -class ProdConnPoolImpl : public ConnPoolImpl { -public: - ProdConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, - Upstream::ResourcePriority priority, - const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options) - : ConnPoolImpl(dispatcher, host, priority, options, transport_socket_options) {} - - // ConnPoolImpl - CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) override; -}; - -} // namespace Http1 -} // namespace Legacy -} // namespace Http -} // namespace Envoy diff --git a/source/common/http/http2/BUILD b/source/common/http/http2/BUILD index 0790e3ee9cf5a..5ccf63147d5f6 100644 --- a/source/common/http/http2/BUILD +++ b/source/common/http/http2/BUILD @@ -1,13 +1,54 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() +envoy_cc_library( + name = "codec_stats_lib", + hdrs = ["codec_stats.h"], + deps = [ + "//include/envoy/stats:stats_interface", + "//include/envoy/stats:stats_macros", + "//source/common/common:thread_lib", + ], +) + +CODEC_LIB_DEPS = [ + ":codec_stats_lib", + ":metadata_decoder_lib", + ":metadata_encoder_lib", + "//include/envoy/event:deferred_deletable", + "//include/envoy/event:dispatcher_interface", + "//include/envoy/http:codec_interface", + "//include/envoy/http:codes_interface", + "//include/envoy/http:header_map_interface", + "//include/envoy/network:connection_interface", + "//include/envoy/stats:stats_interface", + "//source/common/buffer:buffer_lib", + "//source/common/buffer:watermark_buffer_lib", + "//source/common/common:assert_lib", + "//source/common/common:enum_to_int", + "//source/common/common:linked_object", + "//source/common/common:minimal_logger_lib", + "//source/common/common:statusor_lib", + "//source/common/common:utility_lib", + "//source/common/http:codec_helper_lib", + "//source/common/http:codes_lib", + "//source/common/http:exception_lib", + "//source/common/http:header_map_lib", + "//source/common/http:header_utility_lib", + "//source/common/http:headers_lib", + "//source/common/http:status_lib", + "//source/common/http:utility_lib", + "//source/common/runtime:runtime_features_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", +] + envoy_cc_library( name = "codec_lib", srcs = ["codec_impl.cc"], @@ -18,34 +59,23 @@ envoy_cc_library( "abseil_inlined_vector", "abseil_algorithm", ], - deps = [ - ":metadata_decoder_lib", - ":metadata_encoder_lib", - "//include/envoy/event:deferred_deletable", - "//include/envoy/event:dispatcher_interface", - "//include/envoy/http:codec_interface", - "//include/envoy/http:codes_interface", - "//include/envoy/http:header_map_interface", - "//include/envoy/network:connection_interface", - "//include/envoy/stats:stats_interface", - "//include/envoy/stats:stats_macros", - "//source/common/buffer:buffer_lib", - "//source/common/buffer:watermark_buffer_lib", - "//source/common/common:assert_lib", - "//source/common/common:enum_to_int", - "//source/common/common:linked_object", - "//source/common/common:minimal_logger_lib", - "//source/common/common:utility_lib", - "//source/common/http:codec_helper_lib", - "//source/common/http:codes_lib", - "//source/common/http:exception_lib", - "//source/common/http:header_map_lib", - "//source/common/http:header_utility_lib", - "//source/common/http:headers_lib", - "//source/common/http:utility_lib", - "//source/common/runtime:runtime_features_lib", - "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + deps = CODEC_LIB_DEPS, +) + +envoy_cc_library( + name = "codec_legacy_lib", + srcs = ["codec_impl_legacy.cc"], + hdrs = [ + "codec_impl.h", + "codec_impl_legacy.h", + ], + external_deps = [ + "nghttp2", + "abseil_optional", + "abseil_inlined_vector", + "abseil_algorithm", ], + deps = CODEC_LIB_DEPS, ) # Separate library for some nghttp2 setup stuff to avoid having tests take a @@ -60,31 +90,11 @@ envoy_cc_library( ], ) -envoy_cc_library( - name = "conn_pool_legacy_lib", - srcs = ["conn_pool_legacy.cc"], - hdrs = ["conn_pool_legacy.h"], - deps = [ - "//include/envoy/event:dispatcher_interface", - "//include/envoy/event:timer_interface", - "//include/envoy/http:conn_pool_interface", - "//include/envoy/network:connection_interface", - "//include/envoy/stats:timespan_interface", - "//include/envoy/upstream:upstream_interface", - "//source/common/http:codec_client_lib", - "//source/common/http:conn_pool_base_legacy_lib", - "//source/common/network:utility_lib", - "//source/common/stats:timespan_lib", - "//source/common/upstream:upstream_lib", - ], -) - envoy_cc_library( name = "conn_pool_lib", srcs = ["conn_pool.cc"], hdrs = ["conn_pool.h"], deps = [ - ":conn_pool_legacy_lib", "//include/envoy/event:dispatcher_interface", "//include/envoy/upstream:upstream_interface", "//source/common/http:codec_client_lib", diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index 721da9af8431c..894b80b628dfc 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -8,7 +8,6 @@ #include "envoy/http/codes.h" #include "envoy/http/header_map.h" #include "envoy/network/connection.h" -#include "envoy/stats/scope.h" #include "common/common/assert.h" #include "common/common/cleanup.h" @@ -19,7 +18,9 @@ #include "common/http/exception.h" #include "common/http/header_utility.h" #include "common/http/headers.h" +#include "common/http/http2/codec_stats.h" #include "common/http/utility.h" +#include "common/runtime/runtime_features.h" #include "absl/container/fixed_array.h" @@ -28,25 +29,31 @@ namespace Http { namespace Http2 { class Http2ResponseCodeDetailValues { +public: // Invalid HTTP header field was received and stream is going to be // closed. - const absl::string_view NgHttp2ErrHttpHeader = "http2.invalid.header.field"; - + const absl::string_view ng_http2_err_http_header_ = "http2.invalid.header.field"; // Violation in HTTP messaging rule. - const absl::string_view NgHttp2ErrHttpMessaging = "http2.violation.of.messaging.rule"; - + const absl::string_view ng_http2_err_http_messaging_ = "http2.violation.of.messaging.rule"; // none of the above - const absl::string_view NgHttp2ErrUnknown = "http2.unknown.nghttp2.error"; - -public: - const absl::string_view strerror(int error_code) const { + const absl::string_view ng_http2_err_unknown_ = "http2.unknown.nghttp2.error"; + // The number of headers (or trailers) exceeded the configured limits + const absl::string_view too_many_headers = "http2.too_many_headers"; + // Envoy detected an HTTP/2 frame flood from the server. + const absl::string_view outbound_frame_flood = "http2.outbound_frames_flood"; + // Envoy detected an inbound HTTP/2 frame flood. + const absl::string_view inbound_empty_frame_flood = "http2.inbound_empty_frames_flood"; + // Envoy was configured to drop requests with header keys beginning with underscores. + const absl::string_view invalid_underscore = "http2.unexpected_underscore"; + + const absl::string_view errorDetails(int error_code) const { switch (error_code) { case NGHTTP2_ERR_HTTP_HEADER: - return NgHttp2ErrHttpHeader; + return ng_http2_err_http_header_; case NGHTTP2_ERR_HTTP_MESSAGING: - return NgHttp2ErrHttpMessaging; + return ng_http2_err_http_messaging_; default: - return NgHttp2ErrUnknown; + return ng_http2_err_unknown_; } } }; @@ -87,20 +94,29 @@ void ProdNghttp2SessionFactory::init(nghttp2_session*, ConnectionImpl* connectio * Helper to remove const during a cast. nghttp2 takes non-const pointers for headers even though * it copies them. */ -template static T* remove_const(const void* object) { +template static T* removeConst(const void* object) { return const_cast(reinterpret_cast(object)); } ConnectionImpl::StreamImpl::StreamImpl(ConnectionImpl& parent, uint32_t buffer_limit) : parent_(parent), local_end_stream_sent_(false), remote_end_stream_(false), - data_deferred_(false), waiting_for_non_informational_headers_(false), + data_deferred_(false), received_noninformational_headers_(false), pending_receive_buffer_high_watermark_called_(false), pending_send_buffer_high_watermark_called_(false), reset_due_to_messaging_error_(false) { + parent_.stats_.streams_active_.inc(); if (buffer_limit > 0) { setWriteBufferWatermarks(buffer_limit / 2, buffer_limit); } } +ConnectionImpl::StreamImpl::~StreamImpl() { ASSERT(stream_idle_timer_ == nullptr); } + +void ConnectionImpl::StreamImpl::destroy() { + disarmStreamIdleTimer(); + parent_.stats_.streams_active_.dec(); + parent_.stats_.pending_send_bytes_.sub(pending_send_data_.length()); +} + static void insertHeader(std::vector& headers, const HeaderEntry& header) { uint8_t flags = 0; if (header.key().isReference()) { @@ -111,21 +127,18 @@ static void insertHeader(std::vector& headers, const HeaderEntry& he } const absl::string_view header_key = header.key().getStringView(); const absl::string_view header_value = header.value().getStringView(); - headers.push_back({remove_const(header_key.data()), - remove_const(header_value.data()), header_key.size(), + headers.push_back({removeConst(header_key.data()), + removeConst(header_value.data()), header_key.size(), header_value.size(), flags}); } void ConnectionImpl::StreamImpl::buildHeaders(std::vector& final_headers, const HeaderMap& headers) { final_headers.reserve(headers.size()); - headers.iterate( - [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { - std::vector* final_headers = static_cast*>(context); - insertHeader(*final_headers, header); - return HeaderMap::Iterate::Continue; - }, - &final_headers); + headers.iterate([&final_headers](const HeaderEntry& header) -> HeaderMap::Iterate { + insertHeader(final_headers, header); + return HeaderMap::Iterate::Continue; + }); } void ConnectionImpl::ServerStreamImpl::encode100ContinueHeaders(const ResponseHeaderMap& headers) { @@ -147,7 +160,16 @@ void ConnectionImpl::StreamImpl::encodeHeadersBase(const std::vector local_end_stream_ = end_stream; submitHeaders(final_headers, end_stream ? nullptr : &provider); - parent_.sendPendingFrames(); + auto status = parent_.sendPendingFrames(); + // The RELEASE_ASSERT below does not change the existing behavior of `sendPendingFrames()`. + // The `sendPendingFrames()` used to throw on errors and the only method that was catching + // these exceptions was the `dispatch()`. The `dispatch()` method still checks and handles + // errors returned by the `sendPendingFrames()`. + // Other callers of `sendPendingFrames()` do not catch exceptions from this method and + // would cause abnormal process termination in error cases. This change replaces abnormal + // process termination from unhandled exception with the RELEASE_ASSERT. + // Further work will replace this RELEASE_ASSERT with proper error handling. + RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); } void ConnectionImpl::ClientStreamImpl::encodeHeaders(const RequestHeaderMap& headers, @@ -158,12 +180,21 @@ void ConnectionImpl::ClientStreamImpl::encodeHeaders(const RequestHeaderMap& hea Http::RequestHeaderMapPtr modified_headers; if (Http::Utility::isUpgrade(headers)) { modified_headers = createHeaderMap(headers); - upgrade_type_ = std::string(headers.Upgrade()->value().getStringView()); + upgrade_type_ = std::string(headers.getUpgradeValue()); Http::Utility::transformUpgradeRequestFromH1toH2(*modified_headers); buildHeaders(final_headers, *modified_headers); } else if (headers.Method() && headers.Method()->value() == "CONNECT") { + // If this is not an upgrade style connect (above branch) it is a bytestream + // connect and should have :path and :protocol set accordingly + // As HTTP/1.1 does not require a path for CONNECT, we may have to add one + // if shifting codecs. For now, default to "/" - this can be made + // configurable if necessary. + // https://tools.ietf.org/html/draft-kinnear-httpbis-http2-transport-02 modified_headers = createHeaderMap(headers); modified_headers->setProtocol(Headers::get().ProtocolValues.Bytestream); + if (!headers.Path()) { + modified_headers->setPath("/"); + } buildHeaders(final_headers, *modified_headers); } else { buildHeaders(final_headers, headers); @@ -197,10 +228,13 @@ void ConnectionImpl::StreamImpl::encodeTrailersBase(const HeaderMap& trailers) { // In this case we want trailers to come after we release all pending body data that is // waiting on window updates. We need to save the trailers so that we can emit them later. ASSERT(!pending_trailers_to_encode_); - pending_trailers_to_encode_ = createHeaderMap(trailers); + pending_trailers_to_encode_ = cloneTrailers(trailers); + createPendingFlushTimer(); } else { submitTrailers(trailers); - parent_.sendPendingFrames(); + auto status = parent_.sendPendingFrames(); + // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. + RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); } } @@ -213,7 +247,9 @@ void ConnectionImpl::StreamImpl::encodeMetadata(const MetadataMapVector& metadat for (uint8_t flags : metadata_encoder.payloadFrameFlagBytes()) { submitMetadata(flags); } - parent_.sendPendingFrames(); + auto status = parent_.sendPendingFrames(); + // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. + RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); } void ConnectionImpl::StreamImpl::readDisable(bool disable) { @@ -225,10 +261,12 @@ void ConnectionImpl::StreamImpl::readDisable(bool disable) { } else { ASSERT(read_disable_count_ > 0); --read_disable_count_; - if (!buffers_overrun()) { + if (!buffersOverrun()) { nghttp2_session_consume(parent_.session_, stream_id_, unconsumed_bytes_); unconsumed_bytes_ = 0; - parent_.sendPendingFrames(); + auto status = parent_.sendPendingFrames(); + // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. + RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); } } } @@ -247,18 +285,20 @@ void ConnectionImpl::StreamImpl::pendingRecvBufferLowWatermark() { readDisable(false); } -void ConnectionImpl::ClientStreamImpl::decodeHeaders(bool allow_waiting_for_informational_headers) { +void ConnectionImpl::ClientStreamImpl::decodeHeaders() { auto& headers = absl::get(headers_or_trailers_); - if (allow_waiting_for_informational_headers && - CodeUtility::is1xx(Http::Utility::getResponseStatus(*headers))) { - waiting_for_non_informational_headers_ = true; - } + const uint64_t status = Http::Utility::getResponseStatus(*headers); if (!upgrade_type_.empty() && headers->Status()) { Http::Utility::transformUpgradeResponseFromH2toH1(*headers, upgrade_type_); } - if (headers->Status()->value() == "100") { + // Non-informational headers are non-1xx OR 101-SwitchingProtocols, since 101 implies that further + // proxying is on an upgrade path. + received_noninformational_headers_ = + !CodeUtility::is1xx(status) || status == enumToInt(Http::Code::SwitchingProtocols); + + if (status == enumToInt(Http::Code::Continue)) { ASSERT(!remote_end_stream_); response_decoder_.decode100ContinueHeaders(std::move(headers)); } else { @@ -271,8 +311,7 @@ void ConnectionImpl::ClientStreamImpl::decodeTrailers() { std::move(absl::get(headers_or_trailers_))); } -void ConnectionImpl::ServerStreamImpl::decodeHeaders(bool allow_waiting_for_informational_headers) { - ASSERT(!allow_waiting_for_informational_headers); +void ConnectionImpl::ServerStreamImpl::decodeHeaders() { auto& headers = absl::get(headers_or_trailers_); if (Http::Utility::isH2UpgradeRequest(*headers)) { Http::Utility::transformUpgradeRequestFromH2toH1(*headers); @@ -306,10 +345,22 @@ void ConnectionImpl::StreamImpl::saveHeader(HeaderString&& name, HeaderString&& } void ConnectionImpl::StreamImpl::submitTrailers(const HeaderMap& trailers) { + ASSERT(local_end_stream_); + const bool skip_encoding_empty_trailers = + trailers.empty() && parent_.skip_encoding_empty_trailers_; + if (skip_encoding_empty_trailers) { + ENVOY_CONN_LOG(debug, "skipping submitting trailers", parent_.connection_); + + // Instead of submitting empty trailers, we send empty data instead. + Buffer::OwnedImpl empty_buffer; + encodeDataHelper(empty_buffer, /*end_stream=*/true, skip_encoding_empty_trailers); + return; + } + std::vector final_headers; buildHeaders(final_headers, trailers); - int rc = - nghttp2_submit_trailer(parent_.session_, stream_id_, &final_headers[0], final_headers.size()); + int rc = nghttp2_submit_trailer(parent_.session_, stream_id_, final_headers.data(), + final_headers.size()); ASSERT(rc == 0); } @@ -341,7 +392,7 @@ ssize_t ConnectionImpl::StreamImpl::onDataSourceRead(uint64_t length, uint32_t* } } -int ConnectionImpl::StreamImpl::onDataSourceSend(const uint8_t* framehd, size_t length) { +Status ConnectionImpl::StreamImpl::onDataSourceSend(const uint8_t* framehd, size_t length) { // In this callback we are writing out a raw DATA frame without copying. nghttp2 assumes that we // "just know" that the frame header is 9 bytes. // https://nghttp2.org/documentation/types.html#c.nghttp2_send_data_callback @@ -350,21 +401,24 @@ int ConnectionImpl::StreamImpl::onDataSourceSend(const uint8_t* framehd, size_t parent_.outbound_data_frames_++; Buffer::OwnedImpl output; - if (!parent_.addOutboundFrameFragment(output, framehd, FRAME_HEADER_SIZE)) { + auto status = parent_.addOutboundFrameFragment(output, framehd, FRAME_HEADER_SIZE); + if (!status.ok()) { ENVOY_CONN_LOG(debug, "error sending data frame: Too many frames in the outbound queue", parent_.connection_); - return NGHTTP2_ERR_FLOODED; + setDetails(Http2ResponseCodeDetails::get().outbound_frame_flood); + return status; } + parent_.stats_.pending_send_bytes_.sub(length); output.move(pending_send_data_, length); parent_.connection_.write(output, false); - return 0; + return status; } void ConnectionImpl::ClientStreamImpl::submitHeaders(const std::vector& final_headers, nghttp2_data_provider* provider) { ASSERT(stream_id_ == -1); - stream_id_ = nghttp2_submit_request(parent_.session_, nullptr, &final_headers.data()[0], + stream_id_ = nghttp2_submit_request(parent_.session_, nullptr, final_headers.data(), final_headers.size(), provider, base()); ASSERT(stream_id_ > 0); } @@ -372,14 +426,46 @@ void ConnectionImpl::ClientStreamImpl::submitHeaders(const std::vector& final_headers, nghttp2_data_provider* provider) { ASSERT(stream_id_ != -1); - int rc = nghttp2_submit_response(parent_.session_, stream_id_, &final_headers.data()[0], + int rc = nghttp2_submit_response(parent_.session_, stream_id_, final_headers.data(), final_headers.size(), provider); ASSERT(rc == 0); } +void ConnectionImpl::ServerStreamImpl::createPendingFlushTimer() { + ASSERT(stream_idle_timer_ == nullptr); + if (stream_idle_timeout_.count() > 0) { + stream_idle_timer_ = + parent_.connection_.dispatcher().createTimer([this] { onPendingFlushTimer(); }); + stream_idle_timer_->enableTimer(stream_idle_timeout_); + } +} + +void ConnectionImpl::StreamImpl::onPendingFlushTimer() { + ENVOY_CONN_LOG(debug, "pending stream flush timeout", parent_.connection_); + stream_idle_timer_.reset(); + parent_.stats_.tx_flush_timeout_.inc(); + ASSERT(local_end_stream_ && !local_end_stream_sent_); + // This will emit a reset frame for this stream and close the stream locally. No reset callbacks + // will be run because higher layers think the stream is already finished. + resetStreamWorker(StreamResetReason::LocalReset); + auto status = parent_.sendPendingFrames(); + // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. + RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); +} + void ConnectionImpl::StreamImpl::encodeData(Buffer::Instance& data, bool end_stream) { ASSERT(!local_end_stream_); + encodeDataHelper(data, end_stream, /*skip_encoding_empty_trailers=*/false); +} + +void ConnectionImpl::StreamImpl::encodeDataHelper(Buffer::Instance& data, bool end_stream, + bool skip_encoding_empty_trailers) { + if (skip_encoding_empty_trailers) { + ASSERT(data.length() == 0 && end_stream); + } + local_end_stream_ = end_stream; + parent_.stats_.pending_send_bytes_.add(data.length()); pending_send_data_.move(data); if (data_deferred_) { int rc = nghttp2_session_resume_data(parent_.session_, stream_id_); @@ -388,7 +474,12 @@ void ConnectionImpl::StreamImpl::encodeData(Buffer::Instance& data, bool end_str data_deferred_ = false; } - parent_.sendPendingFrames(); + auto status = parent_.sendPendingFrames(); + // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. + RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); + if (local_end_stream_ && pending_send_data_.length() > 0) { + createPendingFlushTimer(); + } } void ConnectionImpl::StreamImpl::resetStream(StreamResetReason reason) { @@ -409,7 +500,9 @@ void ConnectionImpl::StreamImpl::resetStream(StreamResetReason reason) { // We must still call sendPendingFrames() in both the deferred and not deferred path. This forces // the cleanup logic to run which will reset the stream in all cases if all data frames could not // be sent. - parent_.sendPendingFrames(); + auto status = parent_.sendPendingFrames(); + // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. + RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); } void ConnectionImpl::StreamImpl::resetStreamWorker(StreamResetReason reason) { @@ -441,44 +534,66 @@ void ConnectionImpl::StreamImpl::onMetadataDecoded(MetadataMapPtr&& metadata_map decoder().decodeMetadata(std::move(metadata_map_ptr)); } -ConnectionImpl::ConnectionImpl(Network::Connection& connection, Stats::Scope& stats, +ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stats, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, const uint32_t max_headers_kb, const uint32_t max_headers_count) - : stats_{ALL_HTTP2_CODEC_STATS(POOL_COUNTER_PREFIX(stats, "http2."))}, connection_(connection), - max_headers_kb_(max_headers_kb), max_headers_count_(max_headers_count), + : stats_(stats), connection_(connection), max_headers_kb_(max_headers_kb), + max_headers_count_(max_headers_count), per_stream_buffer_limit_(http2_options.initial_stream_window_size().value()), stream_error_on_invalid_http_messaging_( - http2_options.stream_error_on_invalid_http_messaging()), - flood_detected_(false), max_outbound_frames_(http2_options.max_outbound_frames().value()), - frame_buffer_releasor_([this](const Buffer::OwnedBufferFragmentImpl* fragment) { - releaseOutboundFrame(fragment); - }), + http2_options.override_stream_error_on_invalid_http_message().value()), + max_outbound_frames_(http2_options.max_outbound_frames().value()), + frame_buffer_releasor_([this]() { releaseOutboundFrame(); }), max_outbound_control_frames_(http2_options.max_outbound_control_frames().value()), - control_frame_buffer_releasor_([this](const Buffer::OwnedBufferFragmentImpl* fragment) { - releaseOutboundControlFrame(fragment); - }), + control_frame_buffer_releasor_([this]() { releaseOutboundControlFrame(); }), max_consecutive_inbound_frames_with_empty_payload_( http2_options.max_consecutive_inbound_frames_with_empty_payload().value()), max_inbound_priority_frames_per_stream_( http2_options.max_inbound_priority_frames_per_stream().value()), max_inbound_window_update_frames_per_data_frame_sent_( http2_options.max_inbound_window_update_frames_per_data_frame_sent().value()), + skip_encoding_empty_trailers_(Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.http2_skip_encoding_empty_trailers")), dispatching_(false), raised_goaway_(false), pending_deferred_reset_(false) {} -ConnectionImpl::~ConnectionImpl() { nghttp2_session_del(session_); } +ConnectionImpl::~ConnectionImpl() { + for (const auto& stream : active_streams_) { + stream->destroy(); + } + nghttp2_session_del(session_); +} -void ConnectionImpl::dispatch(Buffer::Instance& data) { +Http::Status ConnectionImpl::dispatch(Buffer::Instance& data) { + // TODO(#10878): Remove this wrapper when exception removal is complete. innerDispatch may either + // throw an exception or return an error status. The utility wrapper catches exceptions and + // converts them to error statuses. + return Http::Utility::exceptionToStatus( + [&](Buffer::Instance& data) -> Http::Status { return innerDispatch(data); }, data); +} + +Http::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) { ENVOY_CONN_LOG(trace, "dispatching {} bytes", connection_, data.length()); + // Make sure that dispatching_ is set to false after dispatching, even when + // ConnectionImpl::dispatch returns early or throws an exception (consider removing if there is a + // single return after exception removal (#10878)). + Cleanup cleanup([this]() { dispatching_ = false; }); for (const Buffer::RawSlice& slice : data.getRawSlices()) { dispatching_ = true; ssize_t rc = nghttp2_session_mem_recv(session_, static_cast(slice.mem_), slice.len_); - if (rc == NGHTTP2_ERR_FLOODED || flood_detected_) { - throw FrameFloodException( + if (!nghttp2_callback_status_.ok()) { + return nghttp2_callback_status_; + } + // This error is returned when nghttp2 library detected a frame flood by one of its + // internal mechanisms. Most flood protection is done by Envoy's codec and this error + // should never be returned. However it is handled here in case nghttp2 has some flood + // protections that Envoy's codec does not have. + if (rc == NGHTTP2_ERR_FLOODED) { + return bufferFloodError( "Flooding was detected in this HTTP/2 session, and it must be closed"); } if (rc != static_cast(slice.len_)) { - throw CodecProtocolException(fmt::format("{}", nghttp2_strerror(rc))); + return codecProtocolError(nghttp2_strerror(rc)); } dispatching_ = false; @@ -488,7 +603,7 @@ void ConnectionImpl::dispatch(Buffer::Instance& data) { data.drain(data.length()); // Decoding incoming frames can generate outbound frames so flush pending. - sendPendingFrames(); + return sendPendingFrames(); } ConnectionImpl::StreamImpl* ConnectionImpl::getStream(int32_t stream_id) { @@ -502,7 +617,7 @@ int ConnectionImpl::onData(int32_t stream_id, const uint8_t* data, size_t len) { stream->pending_recv_data_.add(data, len); // Update the window to the peer unless some consumer of this stream's data has hit a flow control // limit and disabled reads on this stream - if (!stream->buffers_overrun()) { + if (!stream->buffersOverrun()) { nghttp2_session_consume(session_, stream_id, len); } else { stream->unconsumed_bytes_ += len; @@ -516,33 +631,46 @@ void ConnectionImpl::goAway() { NGHTTP2_NO_ERROR, nullptr, 0); ASSERT(rc == 0); - sendPendingFrames(); + auto status = sendPendingFrames(); + // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. + RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); } void ConnectionImpl::shutdownNotice() { int rc = nghttp2_submit_shutdown_notice(session_); ASSERT(rc == 0); - sendPendingFrames(); + auto status = sendPendingFrames(); + // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. + RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); } -int ConnectionImpl::onBeforeFrameReceived(const nghttp2_frame_hd* hd) { +Status ConnectionImpl::onBeforeFrameReceived(const nghttp2_frame_hd* hd) { ENVOY_CONN_LOG(trace, "about to recv frame type={}, flags={}", connection_, static_cast(hd->type), static_cast(hd->flags)); // Track all the frames without padding here, since this is the only callback we receive // for some of them (e.g. CONTINUATION frame, frames sent on closed streams, etc.). // HEADERS frame is tracked in onBeginHeaders(), DATA frame is tracked in onFrameReceived(). + auto status = okStatus(); if (hd->type != NGHTTP2_HEADERS && hd->type != NGHTTP2_DATA) { - if (!trackInboundFrames(hd, 0)) { - return NGHTTP2_ERR_FLOODED; - } + status = trackInboundFrames(hd, 0); } - return 0; + return status; } -int ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) { +ABSL_MUST_USE_RESULT +enum GoAwayErrorCode ngHttp2ErrorCodeToErrorCode(uint32_t code) noexcept { + switch (code) { + case NGHTTP2_NO_ERROR: + return GoAwayErrorCode::NoError; + default: + return GoAwayErrorCode::Other; + } +} + +Status ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) { ENVOY_CONN_LOG(trace, "recv frame type={}", connection_, static_cast(frame->hd.type)); // onFrameReceived() is called with a complete HEADERS frame assembled from all the HEADERS @@ -551,18 +679,17 @@ int ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) { ASSERT(frame->hd.type != NGHTTP2_CONTINUATION); if (frame->hd.type == NGHTTP2_DATA) { - if (!trackInboundFrames(&frame->hd, frame->data.padlen)) { - return NGHTTP2_ERR_FLOODED; - } + RETURN_IF_ERROR(trackInboundFrames(&frame->hd, frame->data.padlen)); } // Only raise GOAWAY once, since we don't currently expose stream information. Shutdown // notifications are the same as a normal GOAWAY. + // TODO: handle multiple GOAWAY frames. if (frame->hd.type == NGHTTP2_GOAWAY && !raised_goaway_) { ASSERT(frame->hd.stream_id == 0); raised_goaway_ = true; - callbacks().onGoAway(); - return 0; + callbacks().onGoAway(ngHttp2ErrorCodeToErrorCode(frame->goaway.error_code)); + return okStatus(); } if (frame->hd.type == NGHTTP2_SETTINGS && frame->hd.flags == NGHTTP2_FLAG_NONE) { @@ -571,7 +698,7 @@ int ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) { StreamImpl* stream = getStream(frame->hd.stream_id); if (!stream) { - return 0; + return okStatus(); } switch (frame->hd.type) { @@ -585,7 +712,7 @@ int ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) { switch (frame->headers.cat) { case NGHTTP2_HCAT_RESPONSE: case NGHTTP2_HCAT_REQUEST: { - stream->decodeHeaders(frame->headers.cat == NGHTTP2_HCAT_RESPONSE); + stream->decodeHeaders(); break; } @@ -593,30 +720,15 @@ int ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) { // It's possible that we are waiting to send a deferred reset, so only raise headers/trailers // if local is not complete. if (!stream->deferred_reset_) { - if (!stream->waiting_for_non_informational_headers_) { - if (!stream->remote_end_stream_) { - // This indicates we have received more headers frames than Envoy - // supports. Even if this is valid HTTP (something like 103 early hints) fail here - // rather than trying to push unexpected headers through the Envoy pipeline as that - // will likely result in Envoy crashing. - // It would be cleaner to reset the stream rather than reset the/ entire connection but - // it's also slightly more dangerous so currently we err on the side of safety. - stats_.too_many_header_frames_.inc(); - throw CodecProtocolException("Unexpected 'trailers' with no end stream."); - } else { - stream->decodeTrailers(); - } + if (nghttp2_session_check_server_session(session_) || + stream->received_noninformational_headers_) { + ASSERT(stream->remote_end_stream_); + stream->decodeTrailers(); } else { - ASSERT(!nghttp2_session_check_server_session(session_)); - stream->waiting_for_non_informational_headers_ = false; - - // Even if we have :status 100 in the client case in a response, when - // we received a 1xx to start out with, nghttp2 message checking - // guarantees proper flow here. - stream->decodeHeaders(false); + // We're a client session and still waiting for non-informational headers. + stream->decodeHeaders(); } } - break; } @@ -646,7 +758,7 @@ int ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) { } } - return 0; + return okStatus(); } int ConnectionImpl::onFrameSend(const nghttp2_frame* frame) { @@ -659,6 +771,15 @@ int ConnectionImpl::onFrameSend(const nghttp2_frame* frame) { case NGHTTP2_GOAWAY: { ENVOY_CONN_LOG(debug, "sent goaway code={}", connection_, frame->goaway.error_code); if (frame->goaway.error_code != NGHTTP2_NO_ERROR) { + // TODO(mattklein123): Returning this error code abandons standard nghttp2 frame accounting. + // As such, it is not reliable to call sendPendingFrames() again after this and we assume + // that the connection is going to get torn down immediately. One byproduct of this is that + // we need to cancel all pending flush stream timeouts since they can race with connection + // teardown. As part of the work to remove exceptions we should aim to clean up all of this + // error handling logic and only handle this type of case at the end of dispatch. + for (auto& stream : active_streams_) { + stream->disarmStreamIdleTimer(); + } return NGHTTP2_ERR_CALLBACK_FAILURE; } break; @@ -681,6 +802,11 @@ int ConnectionImpl::onFrameSend(const nghttp2_frame* frame) { return 0; } +int ConnectionImpl::onError(absl::string_view error) { + ENVOY_CONN_LOG(debug, "invalid http2: {}", connection_, error); + return 0; +} + int ConnectionImpl::onInvalidFrame(int32_t stream_id, int error_code) { ENVOY_CONN_LOG(debug, "invalid frame: {} on stream {}", connection_, nghttp2_strerror(error_code), stream_id); @@ -688,7 +814,7 @@ int ConnectionImpl::onInvalidFrame(int32_t stream_id, int error_code) { // Set details of error_code in the stream whenever we have one. StreamImpl* stream = getStream(stream_id); if (stream != nullptr) { - stream->setDetails(Http2ResponseCodeDetails::get().strerror(error_code)); + stream->setDetails(Http2ResponseCodeDetails::get().errorDetails(error_code)); } if (error_code == NGHTTP2_ERR_HTTP_HEADER || error_code == NGHTTP2_ERR_HTTP_MESSAGING) { @@ -721,56 +847,47 @@ int ConnectionImpl::onBeforeFrameSend(const nghttp2_frame* frame) { return 0; } -void ConnectionImpl::incrementOutboundFrameCount(bool is_outbound_flood_monitored_control_frame) { +Status ConnectionImpl::incrementOutboundFrameCount(bool is_outbound_flood_monitored_control_frame) { ++outbound_frames_; if (is_outbound_flood_monitored_control_frame) { ++outbound_control_frames_; } - checkOutboundQueueLimits(); + return checkOutboundQueueLimits(); } -bool ConnectionImpl::addOutboundFrameFragment(Buffer::OwnedImpl& output, const uint8_t* data, - size_t length) { +Status ConnectionImpl::addOutboundFrameFragment(Buffer::OwnedImpl& output, const uint8_t* data, + size_t length) { // Reset the outbound frame type (set in the onBeforeFrameSend callback) since the // onBeforeFrameSend callback is not called for DATA frames. bool is_outbound_flood_monitored_control_frame = false; std::swap(is_outbound_flood_monitored_control_frame, is_outbound_flood_monitored_control_frame_); - try { - incrementOutboundFrameCount(is_outbound_flood_monitored_control_frame); - } catch (const FrameFloodException&) { - return false; - } - - auto fragment = Buffer::OwnedBufferFragmentImpl::create( - absl::string_view(reinterpret_cast(data), length), - is_outbound_flood_monitored_control_frame ? control_frame_buffer_releasor_ - : frame_buffer_releasor_); + RETURN_IF_ERROR(incrementOutboundFrameCount(is_outbound_flood_monitored_control_frame)); - // The Buffer::OwnedBufferFragmentImpl object will be deleted in the *frame_buffer_releasor_ - // callback. - output.addBufferFragment(*fragment.release()); - return true; + output.add(data, length); + output.addDrainTracker(is_outbound_flood_monitored_control_frame ? control_frame_buffer_releasor_ + : frame_buffer_releasor_); + return okStatus(); } -void ConnectionImpl::releaseOutboundFrame(const Buffer::OwnedBufferFragmentImpl* fragment) { +void ConnectionImpl::releaseOutboundFrame() { ASSERT(outbound_frames_ >= 1); --outbound_frames_; - delete fragment; } -void ConnectionImpl::releaseOutboundControlFrame(const Buffer::OwnedBufferFragmentImpl* fragment) { +void ConnectionImpl::releaseOutboundControlFrame() { ASSERT(outbound_control_frames_ >= 1); --outbound_control_frames_; - releaseOutboundFrame(fragment); + releaseOutboundFrame(); } -ssize_t ConnectionImpl::onSend(const uint8_t* data, size_t length) { +StatusOr ConnectionImpl::onSend(const uint8_t* data, size_t length) { ENVOY_CONN_LOG(trace, "send data: bytes={}", connection_, length); Buffer::OwnedImpl buffer; - if (!addOutboundFrameFragment(buffer, data, length)) { + auto status = addOutboundFrameFragment(buffer, data, length); + if (!status.ok()) { ENVOY_CONN_LOG(debug, "error sending frame: Too many frames in the outbound queue.", connection_); - return NGHTTP2_ERR_FLOODED; + return status; } // While the buffer is transient the fragment it contains will be moved into the @@ -807,6 +924,7 @@ int ConnectionImpl::onStreamClose(int32_t stream_id, uint32_t error_code) { stream->runResetCallbacks(reason); } + stream->destroy(); connection_.dispatcher().deferredDelete(stream->removeFromList(active_streams_)); // Any unconsumed data must be consumed before the stream is deleted. // nghttp2 does not appear to track this internally, and any stream deleted @@ -872,6 +990,7 @@ int ConnectionImpl::saveHeader(const nghttp2_frame* frame, HeaderString&& name, auto should_return = checkHeaderNameForUnderscores(name.getStringView()); if (should_return) { + stream->setDetails(Http2ResponseCodeDetails::get().invalid_underscore); name.clear(); value.clear(); return should_return.value(); @@ -881,32 +1000,34 @@ int ConnectionImpl::saveHeader(const nghttp2_frame* frame, HeaderString&& name, if (stream->headers().byteSize() > max_headers_kb_ * 1024 || stream->headers().size() > max_headers_count_) { - // This will cause the library to reset/close the stream. + stream->setDetails(Http2ResponseCodeDetails::get().too_many_headers); stats_.header_overflow_.inc(); + // This will cause the library to reset/close the stream. return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; } else { return 0; } } -void ConnectionImpl::sendPendingFrames() { +Status ConnectionImpl::sendPendingFrames() { if (dispatching_ || connection_.state() == Network::Connection::State::Closed) { - return; + return okStatus(); } const int rc = nghttp2_session_send(session_); if (rc != 0) { ASSERT(rc == NGHTTP2_ERR_CALLBACK_FAILURE); - // For errors caused by the pending outbound frame flood the FrameFloodException has - // to be thrown. However the nghttp2 library returns only the generic error code for - // all failure types. Check queue limits and throw FrameFloodException if they were - // exceeded. - if (outbound_frames_ > max_outbound_frames_ || - outbound_control_frames_ > max_outbound_control_frames_) { - throw FrameFloodException("Too many frames in the outbound queue."); + + if (!nghttp2_callback_status_.ok()) { + return nghttp2_callback_status_; } - throw CodecProtocolException(std::string(nghttp2_strerror(rc))); + // The frame flood error should set the nghttp2_callback_status_ error, and return at the + // statement above. + ASSERT(outbound_frames_ <= max_outbound_frames_ && + outbound_control_frames_ <= max_outbound_control_frames_); + + return codecProtocolError(nghttp2_strerror(rc)); } // See ConnectionImpl::StreamImpl::resetStream() for why we do this. This is an uncommon event, @@ -928,8 +1049,9 @@ void ConnectionImpl::sendPendingFrames() { stream->resetStreamWorker(stream->deferred_reset_.value()); } } - sendPendingFrames(); + RETURN_IF_ERROR(sendPendingFrames()); } + return okStatus(); } void ConnectionImpl::sendSettings( @@ -974,7 +1096,7 @@ void ConnectionImpl::sendSettings( {NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, http2_options.max_concurrent_streams().value()}, {NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE, http2_options.initial_stream_window_size().value()}}); if (!settings.empty()) { - int rc = nghttp2_submit_settings(session_, NGHTTP2_FLAG_NONE, &settings[0], settings.size()); + int rc = nghttp2_submit_settings(session_, NGHTTP2_FLAG_NONE, settings.data(), settings.size()); ASSERT(rc == 0); } else { // nghttp2_submit_settings need to be called at least once @@ -995,12 +1117,25 @@ void ConnectionImpl::sendSettings( } } +int ConnectionImpl::setAndCheckNghttp2CallbackStatus(Status&& status) { + // Keep the error status that caused the original failure. Subsequent + // error statuses are silently discarded. + nghttp2_callback_status_.Update(std::move(status)); + return nghttp2_callback_status_.ok() ? 0 : NGHTTP2_ERR_CALLBACK_FAILURE; +} + ConnectionImpl::Http2Callbacks::Http2Callbacks() { nghttp2_session_callbacks_new(&callbacks_); nghttp2_session_callbacks_set_send_callback( callbacks_, [](nghttp2_session*, const uint8_t* data, size_t length, int, void* user_data) -> ssize_t { - return static_cast(user_data)->onSend(data, length); + auto status_or_len = static_cast(user_data)->onSend(data, length); + if (status_or_len.ok()) { + return status_or_len.value(); + } + auto status = status_or_len.status(); + return static_cast(user_data)->setAndCheckNghttp2CallbackStatus( + std::move(status)); }); nghttp2_session_callbacks_set_send_data_callback( @@ -1008,12 +1143,16 @@ ConnectionImpl::Http2Callbacks::Http2Callbacks() { [](nghttp2_session*, nghttp2_frame* frame, const uint8_t* framehd, size_t length, nghttp2_data_source* source, void*) -> int { ASSERT(frame->data.padlen == 0); - return static_cast(source->ptr)->onDataSourceSend(framehd, length); + auto status = static_cast(source->ptr)->onDataSourceSend(framehd, length); + return static_cast(source->ptr) + ->parent_.setAndCheckNghttp2CallbackStatus(std::move(status)); }); nghttp2_session_callbacks_set_on_begin_headers_callback( callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int { - return static_cast(user_data)->onBeginHeaders(frame); + auto status = static_cast(user_data)->onBeginHeaders(frame); + return static_cast(user_data)->setAndCheckNghttp2CallbackStatus( + std::move(status)); }); nghttp2_session_callbacks_set_on_header_callback( @@ -1038,12 +1177,16 @@ ConnectionImpl::Http2Callbacks::Http2Callbacks() { nghttp2_session_callbacks_set_on_begin_frame_callback( callbacks_, [](nghttp2_session*, const nghttp2_frame_hd* hd, void* user_data) -> int { - return static_cast(user_data)->onBeforeFrameReceived(hd); + auto status = static_cast(user_data)->onBeforeFrameReceived(hd); + return static_cast(user_data)->setAndCheckNghttp2CallbackStatus( + std::move(status)); }); nghttp2_session_callbacks_set_on_frame_recv_callback( callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int { - return static_cast(user_data)->onFrameReceived(frame); + auto status = static_cast(user_data)->onFrameReceived(frame); + return static_cast(user_data)->setAndCheckNghttp2CallbackStatus( + std::move(status)); }); nghttp2_session_callbacks_set_on_stream_close_callback( @@ -1098,6 +1241,11 @@ ConnectionImpl::Http2Callbacks::Http2Callbacks() { ASSERT(frame->hd.length <= len); return static_cast(user_data)->packMetadata(frame->hd.stream_id, buf, len); }); + + nghttp2_session_callbacks_set_error_callback2( + callbacks_, [](nghttp2_session*, int, const char* msg, size_t len, void* user_data) -> int { + return static_cast(user_data)->onError(absl::string_view(msg, len)); + }); } ConnectionImpl::Http2Callbacks::~Http2Callbacks() { nghttp2_session_callbacks_del(callbacks_); } @@ -1149,7 +1297,7 @@ ConnectionImpl::ClientHttp2Options::ClientHttp2Options( } ClientConnectionImpl::ClientConnectionImpl( - Network::Connection& connection, Http::ConnectionCallbacks& callbacks, Stats::Scope& stats, + Network::Connection& connection, Http::ConnectionCallbacks& callbacks, CodecStats& stats, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, const uint32_t max_response_headers_kb, const uint32_t max_response_headers_count, Nghttp2SessionFactory& http2_session_factory) @@ -1172,11 +1320,11 @@ RequestEncoder& ClientConnectionImpl::newStream(ResponseDecoder& decoder) { stream->runHighWatermarkCallbacks(); } ClientStreamImpl& stream_ref = *stream; - stream->moveIntoList(std::move(stream), active_streams_); + LinkedList::moveIntoList(std::move(stream), active_streams_); return stream_ref; } -int ClientConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) { +Status ClientConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) { // The client code explicitly does not currently support push promise. RELEASE_ASSERT(frame->hd.type == NGHTTP2_HEADERS, ""); RELEASE_ASSERT(frame->headers.cat == NGHTTP2_HCAT_RESPONSE || @@ -1187,7 +1335,7 @@ int ClientConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) { stream->allocTrailers(); } - return 0; + return okStatus(); } int ClientConnectionImpl::onHeader(const nghttp2_frame* frame, HeaderString&& name, @@ -1199,12 +1347,12 @@ int ClientConnectionImpl::onHeader(const nghttp2_frame* frame, HeaderString&& na } ServerConnectionImpl::ServerConnectionImpl( - Network::Connection& connection, Http::ServerConnectionCallbacks& callbacks, - Stats::Scope& scope, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, + Network::Connection& connection, Http::ServerConnectionCallbacks& callbacks, CodecStats& stats, + const envoy::config::core::v3::Http2ProtocolOptions& http2_options, const uint32_t max_request_headers_kb, const uint32_t max_request_headers_count, envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action) - : ConnectionImpl(connection, scope, http2_options, max_request_headers_kb, + : ConnectionImpl(connection, stats, http2_options, max_request_headers_kb, max_request_headers_count), callbacks_(callbacks), headers_with_underscores_action_(headers_with_underscores_action) { Http2Options h2_options(http2_options); @@ -1215,13 +1363,11 @@ ServerConnectionImpl::ServerConnectionImpl( allow_metadata_ = http2_options.allow_metadata(); } -int ServerConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) { +Status ServerConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) { // For a server connection, we should never get push promise frames. ASSERT(frame->hd.type == NGHTTP2_HEADERS); - if (!trackInboundFrames(&frame->hd, frame->headers.padlen)) { - return NGHTTP2_ERR_FLOODED; - } + RETURN_IF_ERROR(trackInboundFrames(&frame->hd, frame->headers.padlen)); if (frame->headers.cat != NGHTTP2_HCAT_REQUEST) { stats_.trailers_.inc(); @@ -1229,7 +1375,7 @@ int ServerConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) { StreamImpl* stream = getStream(frame->hd.stream_id); stream->allocTrailers(); - return 0; + return okStatus(); } ServerStreamImplPtr stream(new ServerStreamImpl(*this, per_stream_buffer_limit_)); @@ -1238,10 +1384,10 @@ int ServerConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) { } stream->request_decoder_ = &callbacks_.newStream(*stream); stream->stream_id_ = frame->hd.stream_id; - stream->moveIntoList(std::move(stream), active_streams_); + LinkedList::moveIntoList(std::move(stream), active_streams_); nghttp2_session_set_stream_user_data(session_, frame->hd.stream_id, active_streams_.front().get()); - return 0; + return okStatus(); } int ServerConnectionImpl::onHeader(const nghttp2_frame* frame, HeaderString&& name, @@ -1252,7 +1398,8 @@ int ServerConnectionImpl::onHeader(const nghttp2_frame* frame, HeaderString&& na return saveHeader(frame, std::move(name), std::move(value)); } -bool ServerConnectionImpl::trackInboundFrames(const nghttp2_frame_hd* hd, uint32_t padding_length) { +Status ServerConnectionImpl::trackInboundFrames(const nghttp2_frame_hd* hd, + uint32_t padding_length) { ENVOY_CONN_LOG(trace, "track inbound frame type={} flags={} length={} padding_length={}", connection_, static_cast(hd->type), static_cast(hd->flags), static_cast(hd->length), padding_length); @@ -1283,18 +1430,12 @@ bool ServerConnectionImpl::trackInboundFrames(const nghttp2_frame_hd* hd, uint32 break; } - if (!checkInboundFrameLimits()) { - // NGHTTP2_ERR_FLOODED is overridden within nghttp2 library and it doesn't propagate - // all the way to nghttp2_session_mem_recv() where we need it. - flood_detected_ = true; - return false; - } - - return true; + return checkInboundFrameLimits(hd->stream_id); } -bool ServerConnectionImpl::checkInboundFrameLimits() { +Status ServerConnectionImpl::checkInboundFrameLimits(int32_t stream_id) { ASSERT(dispatching_downstream_data_); + ConnectionImpl::StreamImpl* stream = getStream(stream_id); if (consecutive_inbound_frames_with_empty_payload_ > max_consecutive_inbound_frames_with_empty_payload_) { @@ -1302,8 +1443,11 @@ bool ServerConnectionImpl::checkInboundFrameLimits() { "error reading frame: Too many consecutive frames with an empty payload " "received in this HTTP/2 session.", connection_); + if (stream) { + stream->setDetails(Http2ResponseCodeDetails::get().inbound_empty_frame_flood); + } stats_.inbound_empty_frames_flood_.inc(); - return false; + return bufferFloodError("Too many consecutive frames with an empty payload"); } if (inbound_priority_frames_ > max_inbound_priority_frames_per_stream_ * (1 + inbound_streams_)) { @@ -1311,7 +1455,7 @@ bool ServerConnectionImpl::checkInboundFrameLimits() { "error reading frame: Too many PRIORITY frames received in this HTTP/2 session.", connection_); stats_.inbound_priority_frames_flood_.inc(); - return false; + return bufferFloodError("Too many PRIORITY frames"); } if (inbound_window_update_frames_ > @@ -1322,35 +1466,42 @@ bool ServerConnectionImpl::checkInboundFrameLimits() { "error reading frame: Too many WINDOW_UPDATE frames received in this HTTP/2 session.", connection_); stats_.inbound_window_update_frames_flood_.inc(); - return false; + return bufferFloodError("Too many WINDOW_UPDATE frames"); } - return true; + return okStatus(); } -void ServerConnectionImpl::checkOutboundQueueLimits() { +Status ServerConnectionImpl::checkOutboundQueueLimits() { if (outbound_frames_ > max_outbound_frames_ && dispatching_downstream_data_) { stats_.outbound_flood_.inc(); - throw FrameFloodException("Too many frames in the outbound queue."); + return bufferFloodError("Too many frames in the outbound queue."); } if (outbound_control_frames_ > max_outbound_control_frames_ && dispatching_downstream_data_) { stats_.outbound_control_flood_.inc(); - throw FrameFloodException("Too many control frames in the outbound queue."); + return bufferFloodError("Too many control frames in the outbound queue."); } + return okStatus(); +} + +Http::Status ServerConnectionImpl::dispatch(Buffer::Instance& data) { + // TODO(#10878): Remove this wrapper when exception removal is complete. innerDispatch may either + // throw an exception or return an error status. The utility wrapper catches exceptions and + // converts them to error statuses. + return Http::Utility::exceptionToStatus( + [&](Buffer::Instance& data) -> Http::Status { return innerDispatch(data); }, data); } -void ServerConnectionImpl::dispatch(Buffer::Instance& data) { +Http::Status ServerConnectionImpl::innerDispatch(Buffer::Instance& data) { ASSERT(!dispatching_downstream_data_); dispatching_downstream_data_ = true; - // Make sure the dispatching_downstream_data_ is set to false even - // when ConnectionImpl::dispatch throws an exception. + // Make sure the dispatching_downstream_data_ is set to false when innerDispatch ends. Cleanup cleanup([this]() { dispatching_downstream_data_ = false; }); // Make sure downstream outbound queue was not flooded by the upstream frames. - checkOutboundQueueLimits(); - - ConnectionImpl::dispatch(data); + RETURN_IF_ERROR(checkOutboundQueueLimits()); + return ConnectionImpl::innerDispatch(data); } absl::optional diff --git a/source/common/http/http2/codec_impl.h b/source/common/http/http2/codec_impl.h index ed579f6ff7fff..ce8e6f809a0de 100644 --- a/source/common/http/http2/codec_impl.h +++ b/source/common/http/http2/codec_impl.h @@ -11,16 +11,19 @@ #include "envoy/event/deferred_deletable.h" #include "envoy/http/codec.h" #include "envoy/network/connection.h" -#include "envoy/stats/scope.h" #include "common/buffer/buffer_impl.h" #include "common/buffer/watermark_buffer.h" #include "common/common/linked_object.h" #include "common/common/logger.h" +#include "common/common/statusor.h" +#include "common/common/thread.h" #include "common/http/codec_helper.h" #include "common/http/header_map_impl.h" +#include "common/http/http2/codec_stats.h" #include "common/http/http2/metadata_decoder.h" #include "common/http/http2/metadata_encoder.h" +#include "common/http/status.h" #include "common/http/utility.h" #include "absl/types/optional.h" @@ -30,38 +33,10 @@ namespace Envoy { namespace Http { namespace Http2 { -const std::string ALPN_STRING = "h2"; - // This is not the full client magic, but it's the smallest size that should be able to // differentiate between HTTP/1 and HTTP/2. const std::string CLIENT_MAGIC_PREFIX = "PRI * HTTP/2"; -/** - * All stats for the HTTP/2 codec. @see stats_macros.h - */ -#define ALL_HTTP2_CODEC_STATS(COUNTER) \ - COUNTER(dropped_headers_with_underscores) \ - COUNTER(header_overflow) \ - COUNTER(headers_cb_no_stream) \ - COUNTER(inbound_empty_frames_flood) \ - COUNTER(inbound_priority_frames_flood) \ - COUNTER(inbound_window_update_frames_flood) \ - COUNTER(outbound_control_flood) \ - COUNTER(outbound_flood) \ - COUNTER(requests_rejected_with_underscores_in_headers) \ - COUNTER(rx_messaging_error) \ - COUNTER(rx_reset) \ - COUNTER(too_many_header_frames) \ - COUNTER(trailers) \ - COUNTER(tx_reset) - -/** - * Wrapper struct for the HTTP/2 codec stats. @see stats_macros.h - */ -struct CodecStats { - ALL_HTTP2_CODEC_STATS(GENERATE_COUNTER_STRUCT) -}; - class Utility { public: /** @@ -80,14 +55,16 @@ class ConnectionImpl; // Abstract nghttp2_session factory. Used to enable injection of factories for testing. class Nghttp2SessionFactory { public: + using ConnectionImplType = ConnectionImpl; virtual ~Nghttp2SessionFactory() = default; // Returns a new nghttp2_session to be used with |connection|. virtual nghttp2_session* create(const nghttp2_session_callbacks* callbacks, - ConnectionImpl* connection, const nghttp2_option* options) PURE; + ConnectionImplType* connection, + const nghttp2_option* options) PURE; // Initializes the |session|. - virtual void init(nghttp2_session* session, ConnectionImpl* connection, + virtual void init(nghttp2_session* session, ConnectionImplType* connection, const envoy::config::core::v3::Http2ProtocolOptions& options) PURE; }; @@ -113,7 +90,7 @@ class ProdNghttp2SessionFactory : public Nghttp2SessionFactory { */ class ConnectionImpl : public virtual Connection, protected Logger::Loggable { public: - ConnectionImpl(Network::Connection& connection, Stats::Scope& stats, + ConnectionImpl(Network::Connection& connection, CodecStats& stats, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, const uint32_t max_headers_kb, const uint32_t max_headers_count); @@ -121,7 +98,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::LoggabledisableTimer(); + stream_idle_timer_.reset(); + } + } StreamImpl* base() { return this; } ssize_t onDataSourceRead(uint64_t length, uint32_t* data_flags); - int onDataSourceSend(const uint8_t* framehd, size_t length); + Status onDataSourceSend(const uint8_t* framehd, size_t length); void resetStreamWorker(StreamResetReason reason); static void buildHeaders(std::vector& final_headers, const HeaderMap& headers); void saveHeader(HeaderString&& name, HeaderString&& value); @@ -200,6 +199,9 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable 0; } + bool buffersOverrun() const { return read_disable_count_ > 0; } + + void encodeDataHelper(Buffer::Instance& data, bool end_stream, + bool skip_encoding_empty_trailers); ConnectionImpl& parent_; int32_t stream_id_{-1}; @@ -262,10 +276,12 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable void { this->pendingRecvBufferLowWatermark(); }, - [this]() -> void { this->pendingRecvBufferHighWatermark(); }}; + [this]() -> void { this->pendingRecvBufferHighWatermark(); }, + []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }}; Buffer::WatermarkBuffer pending_send_data_{ [this]() -> void { this->pendingSendBufferLowWatermark(); }, - [this]() -> void { this->pendingSendBufferHighWatermark(); }}; + [this]() -> void { this->pendingSendBufferHighWatermark(); }, + []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }}; HeaderMapPtr pending_trailers_to_encode_; std::unique_ptr metadata_decoder_; std::unique_ptr metadata_encoder_; @@ -274,11 +290,14 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable; @@ -290,13 +309,13 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable()) {} + headers_or_trailers_(ResponseHeaderMapImpl::create()) {} // StreamImpl void submitHeaders(const std::vector& final_headers, nghttp2_data_provider* provider) override; StreamDecoder& decoder() override { return response_decoder_; } - void decodeHeaders(bool allow_waiting_for_informational_headers) override; + void decodeHeaders() override; void decodeTrailers() override; HeaderMap& headers() override { if (absl::holds_alternative(headers_or_trailers_)) { @@ -308,14 +327,19 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable( - std::make_unique()); + if (received_noninformational_headers_) { + headers_or_trailers_.emplace(ResponseTrailerMapImpl::create()); } else { - headers_or_trailers_.emplace( - std::make_unique()); + headers_or_trailers_.emplace(ResponseHeaderMapImpl::create()); } } + HeaderMapPtr cloneTrailers(const HeaderMap& trailers) override { + return createHeaderMap(trailers); + } + void createPendingFlushTimer() override { + // Client streams do not create a flush timer because we currently assume that any failure + // to flush would be covered by a request/stream/etc. timeout. + } // RequestEncoder void encodeHeaders(const RequestHeaderMap& headers, bool end_stream) override; @@ -335,14 +359,13 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable()) {} + : StreamImpl(parent, buffer_limit), headers_or_trailers_(RequestHeaderMapImpl::create()) {} // StreamImpl void submitHeaders(const std::vector& final_headers, nghttp2_data_provider* provider) override; StreamDecoder& decoder() override { return *request_decoder_; } - void decodeHeaders(bool allow_waiting_for_informational_headers) override; + void decodeHeaders() override; void decodeTrailers() override; HeaderMap& headers() override { if (absl::holds_alternative(headers_or_trailers_)) { @@ -352,8 +375,12 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable(std::make_unique()); + headers_or_trailers_.emplace(RequestTrailerMapImpl::create()); + } + HeaderMapPtr cloneTrailers(const HeaderMap& trailers) override { + return createHeaderMap(trailers); } + void createPendingFlushTimer() override; // ResponseEncoder void encode100ContinueHeaders(const ResponseHeaderMap& headers) override; @@ -374,7 +401,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable active_streams_; nghttp2_session* session_{}; - CodecStats stats_; + CodecStats& stats_; Network::Connection& connection_; const uint32_t max_headers_kb_; const uint32_t max_headers_count_; uint32_t per_stream_buffer_limit_; bool allow_metadata_; const bool stream_error_on_invalid_http_messaging_; - bool flood_detected_; + + // Status for any errors encountered by the nghttp2 callbacks. + // nghttp2 library uses single return code to indicate callback failure and + // `nghttp2_callback_status_` is used to save right error information returned by a callback. The + // `nghttp2_callback_status_` is valid iff nghttp call returned NGHTTP2_ERR_CALLBACK_FAILURE. + Status nghttp2_callback_status_; // Set if the type of frame that is about to be sent is PING or SETTINGS with the ACK flag set, or // RST_STREAM. @@ -416,7 +454,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable frame_buffer_releasor_; // This counter keeps track of the number of outbound frames of types PING, SETTINGS and // RST_STREAM (these that were buffered in the underlying connection but not yet written into the // socket). If this counter exceeds the `max_outbound_control_frames_' value the connection is @@ -425,7 +463,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable control_frame_buffer_releasor_; // This counter keeps track of the number of consecutive inbound frames of types HEADERS, // CONTINUATION and DATA with an empty payload and no end stream flag. If this counter exceeds // the `max_consecutive_inbound_frames_with_empty_payload_` value the connection is terminated. @@ -466,34 +504,39 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable onSend(const uint8_t* data, size_t length); + + // Some browsers (e.g. WebKit-based browsers: https://bugs.webkit.org/show_bug.cgi?id=210108) have + // a problem with processing empty trailers (END_STREAM | END_HEADERS with zero length HEADERS) of + // an HTTP/2 response as reported here: https://github.com/envoyproxy/envoy/issues/10514. This is + // controlled by "envoy.reloadable_features.http2_skip_encoding_empty_trailers" runtime feature + // flag. + const bool skip_encoding_empty_trailers_; private: virtual ConnectionCallbacks& callbacks() PURE; - virtual int onBeginHeaders(const nghttp2_frame* frame) PURE; + virtual Status onBeginHeaders(const nghttp2_frame* frame) PURE; int onData(int32_t stream_id, const uint8_t* data, size_t len); - int onBeforeFrameReceived(const nghttp2_frame_hd* hd); - int onFrameReceived(const nghttp2_frame* frame); + Status onBeforeFrameReceived(const nghttp2_frame_hd* hd); + Status onFrameReceived(const nghttp2_frame* frame); int onBeforeFrameSend(const nghttp2_frame* frame); int onFrameSend(const nghttp2_frame* frame); + int onError(absl::string_view error); virtual int onHeader(const nghttp2_frame* frame, HeaderString&& name, HeaderString&& value) PURE; int onInvalidFrame(int32_t stream_id, int error_code); - int onStreamClose(int32_t stream_id, uint32_t error_code); int onMetadataReceived(int32_t stream_id, const uint8_t* data, size_t len); int onMetadataFrameComplete(int32_t stream_id, bool end_metadata); ssize_t packMetadata(int32_t stream_id, uint8_t* buf, size_t len); - // Adds buffer fragment for a new outbound frame to the supplied Buffer::OwnedImpl. - // Returns true on success or false if outbound queue limits were exceeded. - bool addOutboundFrameFragment(Buffer::OwnedImpl& output, const uint8_t* data, size_t length); - virtual void checkOutboundQueueLimits() PURE; - void incrementOutboundFrameCount(bool is_outbound_flood_monitored_control_frame); - virtual bool trackInboundFrames(const nghttp2_frame_hd* hd, uint32_t padding_length) PURE; - virtual bool checkInboundFrameLimits() PURE; - - void releaseOutboundFrame(const Buffer::OwnedBufferFragmentImpl* fragment); - void releaseOutboundControlFrame(const Buffer::OwnedBufferFragmentImpl* fragment); + // Returns Ok Status on success or error if outbound queue limits were exceeded. + Status addOutboundFrameFragment(Buffer::OwnedImpl& output, const uint8_t* data, size_t length); + virtual Status checkOutboundQueueLimits() PURE; + Status incrementOutboundFrameCount(bool is_outbound_flood_monitored_control_frame); + virtual Status trackInboundFrames(const nghttp2_frame_hd* hd, uint32_t padding_length) PURE; + virtual Status checkInboundFrameLimits(int32_t stream_id) PURE; + void releaseOutboundFrame(); + void releaseOutboundControlFrame(); bool dispatching_ : 1; bool raised_goaway_ : 1; @@ -505,12 +548,13 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable checkHeaderNameForUnderscores(absl::string_view header_name) override; // Http::Connection @@ -565,7 +609,8 @@ class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { // ClientConnectionImpl::checkOutboundQueueLimits method). The dispatch method on the // ServerConnectionImpl objects is called only when processing data from the downstream client in // the ConnectionManagerImpl::onData method. - void dispatch(Buffer::Instance& data) override; + Http::Status dispatch(Buffer::Instance& data) override; + Http::Status innerDispatch(Buffer::Instance& data) override; ServerConnectionCallbacks& callbacks_; diff --git a/source/common/http/http2/codec_impl_legacy.cc b/source/common/http/http2/codec_impl_legacy.cc new file mode 100644 index 0000000000000..c0046aae307df --- /dev/null +++ b/source/common/http/http2/codec_impl_legacy.cc @@ -0,0 +1,1493 @@ +#include "common/http/http2/codec_impl_legacy.h" + +#include +#include +#include + +#include "envoy/event/dispatcher.h" +#include "envoy/http/codes.h" +#include "envoy/http/header_map.h" +#include "envoy/network/connection.h" + +#include "common/common/assert.h" +#include "common/common/cleanup.h" +#include "common/common/enum_to_int.h" +#include "common/common/fmt.h" +#include "common/common/utility.h" +#include "common/http/codes.h" +#include "common/http/exception.h" +#include "common/http/header_utility.h" +#include "common/http/headers.h" +#include "common/http/http2/codec_stats.h" +#include "common/http/utility.h" +#include "common/runtime/runtime_features.h" + +#include "absl/container/fixed_array.h" + +namespace Envoy { +namespace Http { +namespace Legacy { +namespace Http2 { + +class Http2ResponseCodeDetailValues { +public: + // Invalid HTTP header field was received and stream is going to be + // closed. + const absl::string_view ng_http2_err_http_header_ = "http2.invalid.header.field"; + // Violation in HTTP messaging rule. + const absl::string_view ng_http2_err_http_messaging_ = "http2.violation.of.messaging.rule"; + // none of the above + const absl::string_view ng_http2_err_unknown_ = "http2.unknown.nghttp2.error"; + // The number of headers (or trailers) exceeded the configured limits + const absl::string_view too_many_headers = "http2.too_many_headers"; + // Envoy detected an HTTP/2 frame flood from the server. + const absl::string_view outbound_frame_flood = "http2.outbound_frames_flood"; + // Envoy detected an inbound HTTP/2 frame flood. + const absl::string_view inbound_empty_frame_flood = "http2.inbound_empty_frames_flood"; + // Envoy was configured to drop requests with header keys beginning with underscores. + const absl::string_view invalid_underscore = "http2.unexpected_underscore"; + + const absl::string_view errorDetails(int error_code) const { + switch (error_code) { + case NGHTTP2_ERR_HTTP_HEADER: + return ng_http2_err_http_header_; + case NGHTTP2_ERR_HTTP_MESSAGING: + return ng_http2_err_http_messaging_; + default: + return ng_http2_err_unknown_; + } + } +}; + +using Http2ResponseCodeDetails = ConstSingleton; +using Http::Http2::CodecStats; +using Http::Http2::MetadataDecoder; +using Http::Http2::MetadataEncoder; + +bool Utility::reconstituteCrumbledCookies(const HeaderString& key, const HeaderString& value, + HeaderString& cookies) { + if (key != Headers::get().Cookie.get().c_str()) { + return false; + } + + if (!cookies.empty()) { + cookies.append("; ", 2); + } + + const absl::string_view value_view = value.getStringView(); + cookies.append(value_view.data(), value_view.size()); + return true; +} + +ConnectionImpl::Http2Callbacks ConnectionImpl::http2_callbacks_; + +nghttp2_session* ProdNghttp2SessionFactory::create(const nghttp2_session_callbacks* callbacks, + ConnectionImpl* connection, + const nghttp2_option* options) { + nghttp2_session* session; + nghttp2_session_client_new2(&session, callbacks, connection, options); + return session; +} + +void ProdNghttp2SessionFactory::init(nghttp2_session*, ConnectionImpl* connection, + const envoy::config::core::v3::Http2ProtocolOptions& options) { + connection->sendSettings(options, true); +} + +/** + * Helper to remove const during a cast. nghttp2 takes non-const pointers for headers even though + * it copies them. + */ +template static T* removeConst(const void* object) { + return const_cast(reinterpret_cast(object)); +} + +ConnectionImpl::StreamImpl::StreamImpl(ConnectionImpl& parent, uint32_t buffer_limit) + : parent_(parent), local_end_stream_sent_(false), remote_end_stream_(false), + data_deferred_(false), received_noninformational_headers_(false), + pending_receive_buffer_high_watermark_called_(false), + pending_send_buffer_high_watermark_called_(false), reset_due_to_messaging_error_(false) { + parent_.stats_.streams_active_.inc(); + if (buffer_limit > 0) { + setWriteBufferWatermarks(buffer_limit / 2, buffer_limit); + } +} + +ConnectionImpl::StreamImpl::~StreamImpl() { ASSERT(stream_idle_timer_ == nullptr); } + +void ConnectionImpl::StreamImpl::destroy() { + disarmStreamIdleTimer(); + parent_.stats_.streams_active_.dec(); + parent_.stats_.pending_send_bytes_.sub(pending_send_data_.length()); +} + +static void insertHeader(std::vector& headers, const HeaderEntry& header) { + uint8_t flags = 0; + if (header.key().isReference()) { + flags |= NGHTTP2_NV_FLAG_NO_COPY_NAME; + } + if (header.value().isReference()) { + flags |= NGHTTP2_NV_FLAG_NO_COPY_VALUE; + } + const absl::string_view header_key = header.key().getStringView(); + const absl::string_view header_value = header.value().getStringView(); + headers.push_back({removeConst(header_key.data()), + removeConst(header_value.data()), header_key.size(), + header_value.size(), flags}); +} + +void ConnectionImpl::StreamImpl::buildHeaders(std::vector& final_headers, + const HeaderMap& headers) { + final_headers.reserve(headers.size()); + headers.iterate([&final_headers](const HeaderEntry& header) -> HeaderMap::Iterate { + insertHeader(final_headers, header); + return HeaderMap::Iterate::Continue; + }); +} + +void ConnectionImpl::ServerStreamImpl::encode100ContinueHeaders(const ResponseHeaderMap& headers) { + ASSERT(headers.Status()->value() == "100"); + encodeHeaders(headers, false); +} + +void ConnectionImpl::StreamImpl::encodeHeadersBase(const std::vector& final_headers, + bool end_stream) { + nghttp2_data_provider provider; + if (!end_stream) { + provider.source.ptr = this; + provider.read_callback = [](nghttp2_session*, int32_t, uint8_t*, size_t length, + uint32_t* data_flags, nghttp2_data_source* source, + void*) -> ssize_t { + return static_cast(source->ptr)->onDataSourceRead(length, data_flags); + }; + } + + local_end_stream_ = end_stream; + submitHeaders(final_headers, end_stream ? nullptr : &provider); + parent_.sendPendingFrames(); +} + +void ConnectionImpl::ClientStreamImpl::encodeHeaders(const RequestHeaderMap& headers, + bool end_stream) { + // This must exist outside of the scope of isUpgrade as the underlying memory is + // needed until encodeHeadersBase has been called. + std::vector final_headers; + Http::RequestHeaderMapPtr modified_headers; + if (Http::Utility::isUpgrade(headers)) { + modified_headers = createHeaderMap(headers); + upgrade_type_ = std::string(headers.getUpgradeValue()); + Http::Utility::transformUpgradeRequestFromH1toH2(*modified_headers); + buildHeaders(final_headers, *modified_headers); + } else if (headers.Method() && headers.Method()->value() == "CONNECT") { + // If this is not an upgrade style connect (above branch) it is a bytestream + // connect and should have :path and :protocol set accordingly + // As HTTP/1.1 does not require a path for CONNECT, we may have to add one + // if shifting codecs. For now, default to "/" - this can be made + // configurable if necessary. + // https://tools.ietf.org/html/draft-kinnear-httpbis-http2-transport-02 + modified_headers = createHeaderMap(headers); + modified_headers->setProtocol(Headers::get().ProtocolValues.Bytestream); + if (!headers.Path()) { + modified_headers->setPath("/"); + } + buildHeaders(final_headers, *modified_headers); + } else { + buildHeaders(final_headers, headers); + } + encodeHeadersBase(final_headers, end_stream); +} + +void ConnectionImpl::ServerStreamImpl::encodeHeaders(const ResponseHeaderMap& headers, + bool end_stream) { + // The contract is that client codecs must ensure that :status is present. + ASSERT(headers.Status() != nullptr); + + // This must exist outside of the scope of isUpgrade as the underlying memory is + // needed until encodeHeadersBase has been called. + std::vector final_headers; + Http::ResponseHeaderMapPtr modified_headers; + if (Http::Utility::isUpgrade(headers)) { + modified_headers = createHeaderMap(headers); + Http::Utility::transformUpgradeResponseFromH1toH2(*modified_headers); + buildHeaders(final_headers, *modified_headers); + } else { + buildHeaders(final_headers, headers); + } + encodeHeadersBase(final_headers, end_stream); +} + +void ConnectionImpl::StreamImpl::encodeTrailersBase(const HeaderMap& trailers) { + ASSERT(!local_end_stream_); + local_end_stream_ = true; + if (pending_send_data_.length() > 0) { + // In this case we want trailers to come after we release all pending body data that is + // waiting on window updates. We need to save the trailers so that we can emit them later. + ASSERT(!pending_trailers_to_encode_); + pending_trailers_to_encode_ = cloneTrailers(trailers); + createPendingFlushTimer(); + } else { + submitTrailers(trailers); + parent_.sendPendingFrames(); + } +} + +void ConnectionImpl::StreamImpl::encodeMetadata(const MetadataMapVector& metadata_map_vector) { + ASSERT(parent_.allow_metadata_); + MetadataEncoder& metadata_encoder = getMetadataEncoder(); + if (!metadata_encoder.createPayload(metadata_map_vector)) { + return; + } + for (uint8_t flags : metadata_encoder.payloadFrameFlagBytes()) { + submitMetadata(flags); + } + parent_.sendPendingFrames(); +} + +void ConnectionImpl::StreamImpl::readDisable(bool disable) { + ENVOY_CONN_LOG(debug, "Stream {} {}, unconsumed_bytes {} read_disable_count {}", + parent_.connection_, stream_id_, (disable ? "disabled" : "enabled"), + unconsumed_bytes_, read_disable_count_); + if (disable) { + ++read_disable_count_; + } else { + ASSERT(read_disable_count_ > 0); + --read_disable_count_; + if (!buffersOverrun()) { + nghttp2_session_consume(parent_.session_, stream_id_, unconsumed_bytes_); + unconsumed_bytes_ = 0; + parent_.sendPendingFrames(); + } + } +} + +void ConnectionImpl::StreamImpl::pendingRecvBufferHighWatermark() { + ENVOY_CONN_LOG(debug, "recv buffer over limit ", parent_.connection_); + ASSERT(!pending_receive_buffer_high_watermark_called_); + pending_receive_buffer_high_watermark_called_ = true; + readDisable(true); +} + +void ConnectionImpl::StreamImpl::pendingRecvBufferLowWatermark() { + ENVOY_CONN_LOG(debug, "recv buffer under limit ", parent_.connection_); + ASSERT(pending_receive_buffer_high_watermark_called_); + pending_receive_buffer_high_watermark_called_ = false; + readDisable(false); +} + +void ConnectionImpl::ClientStreamImpl::decodeHeaders() { + auto& headers = absl::get(headers_or_trailers_); + const uint64_t status = Http::Utility::getResponseStatus(*headers); + + if (!upgrade_type_.empty() && headers->Status()) { + Http::Utility::transformUpgradeResponseFromH2toH1(*headers, upgrade_type_); + } + + // Non-informational headers are non-1xx OR 101-SwitchingProtocols, since 101 implies that further + // proxying is on an upgrade path. + received_noninformational_headers_ = + !CodeUtility::is1xx(status) || status == enumToInt(Http::Code::SwitchingProtocols); + + if (status == enumToInt(Http::Code::Continue)) { + ASSERT(!remote_end_stream_); + response_decoder_.decode100ContinueHeaders(std::move(headers)); + } else { + response_decoder_.decodeHeaders(std::move(headers), remote_end_stream_); + } +} + +void ConnectionImpl::ClientStreamImpl::decodeTrailers() { + response_decoder_.decodeTrailers( + std::move(absl::get(headers_or_trailers_))); +} + +void ConnectionImpl::ServerStreamImpl::decodeHeaders() { + auto& headers = absl::get(headers_or_trailers_); + if (Http::Utility::isH2UpgradeRequest(*headers)) { + Http::Utility::transformUpgradeRequestFromH2toH1(*headers); + } + request_decoder_->decodeHeaders(std::move(headers), remote_end_stream_); +} + +void ConnectionImpl::ServerStreamImpl::decodeTrailers() { + request_decoder_->decodeTrailers( + std::move(absl::get(headers_or_trailers_))); +} + +void ConnectionImpl::StreamImpl::pendingSendBufferHighWatermark() { + ENVOY_CONN_LOG(debug, "send buffer over limit ", parent_.connection_); + ASSERT(!pending_send_buffer_high_watermark_called_); + pending_send_buffer_high_watermark_called_ = true; + runHighWatermarkCallbacks(); +} + +void ConnectionImpl::StreamImpl::pendingSendBufferLowWatermark() { + ENVOY_CONN_LOG(debug, "send buffer under limit ", parent_.connection_); + ASSERT(pending_send_buffer_high_watermark_called_); + pending_send_buffer_high_watermark_called_ = false; + runLowWatermarkCallbacks(); +} + +void ConnectionImpl::StreamImpl::saveHeader(HeaderString&& name, HeaderString&& value) { + if (!Utility::reconstituteCrumbledCookies(name, value, cookies_)) { + headers().addViaMove(std::move(name), std::move(value)); + } +} + +void ConnectionImpl::StreamImpl::submitTrailers(const HeaderMap& trailers) { + ASSERT(local_end_stream_); + const bool skip_encoding_empty_trailers = + trailers.empty() && parent_.skip_encoding_empty_trailers_; + if (skip_encoding_empty_trailers) { + ENVOY_CONN_LOG(debug, "skipping submitting trailers", parent_.connection_); + + // Instead of submitting empty trailers, we send empty data instead. + Buffer::OwnedImpl empty_buffer; + encodeDataHelper(empty_buffer, /*end_stream=*/true, skip_encoding_empty_trailers); + return; + } + + std::vector final_headers; + buildHeaders(final_headers, trailers); + int rc = nghttp2_submit_trailer(parent_.session_, stream_id_, final_headers.data(), + final_headers.size()); + ASSERT(rc == 0); +} + +void ConnectionImpl::StreamImpl::submitMetadata(uint8_t flags) { + ASSERT(stream_id_ > 0); + const int result = + nghttp2_submit_extension(parent_.session_, METADATA_FRAME_TYPE, flags, stream_id_, nullptr); + ASSERT(result == 0); +} + +ssize_t ConnectionImpl::StreamImpl::onDataSourceRead(uint64_t length, uint32_t* data_flags) { + if (pending_send_data_.length() == 0 && !local_end_stream_) { + ASSERT(!data_deferred_); + data_deferred_ = true; + return NGHTTP2_ERR_DEFERRED; + } else { + *data_flags |= NGHTTP2_DATA_FLAG_NO_COPY; + if (local_end_stream_ && pending_send_data_.length() <= length) { + *data_flags |= NGHTTP2_DATA_FLAG_EOF; + if (pending_trailers_to_encode_) { + // We need to tell the library to not set end stream so that we can emit the trailers. + *data_flags |= NGHTTP2_DATA_FLAG_NO_END_STREAM; + submitTrailers(*pending_trailers_to_encode_); + pending_trailers_to_encode_.reset(); + } + } + + return std::min(length, pending_send_data_.length()); + } +} + +int ConnectionImpl::StreamImpl::onDataSourceSend(const uint8_t* framehd, size_t length) { + // In this callback we are writing out a raw DATA frame without copying. nghttp2 assumes that we + // "just know" that the frame header is 9 bytes. + // https://nghttp2.org/documentation/types.html#c.nghttp2_send_data_callback + static const uint64_t FRAME_HEADER_SIZE = 9; + + parent_.outbound_data_frames_++; + + Buffer::OwnedImpl output; + if (!parent_.addOutboundFrameFragment(output, framehd, FRAME_HEADER_SIZE)) { + ENVOY_CONN_LOG(debug, "error sending data frame: Too many frames in the outbound queue", + parent_.connection_); + setDetails(Http2ResponseCodeDetails::get().outbound_frame_flood); + return NGHTTP2_ERR_FLOODED; + } + + parent_.stats_.pending_send_bytes_.sub(length); + output.move(pending_send_data_, length); + parent_.connection_.write(output, false); + return 0; +} + +void ConnectionImpl::ClientStreamImpl::submitHeaders(const std::vector& final_headers, + nghttp2_data_provider* provider) { + ASSERT(stream_id_ == -1); + stream_id_ = nghttp2_submit_request(parent_.session_, nullptr, final_headers.data(), + final_headers.size(), provider, base()); + ASSERT(stream_id_ > 0); +} + +void ConnectionImpl::ServerStreamImpl::submitHeaders(const std::vector& final_headers, + nghttp2_data_provider* provider) { + ASSERT(stream_id_ != -1); + int rc = nghttp2_submit_response(parent_.session_, stream_id_, final_headers.data(), + final_headers.size(), provider); + ASSERT(rc == 0); +} + +void ConnectionImpl::ServerStreamImpl::createPendingFlushTimer() { + ASSERT(stream_idle_timer_ == nullptr); + if (stream_idle_timeout_.count() > 0) { + stream_idle_timer_ = + parent_.connection_.dispatcher().createTimer([this] { onPendingFlushTimer(); }); + stream_idle_timer_->enableTimer(stream_idle_timeout_); + } +} + +void ConnectionImpl::StreamImpl::onPendingFlushTimer() { + ENVOY_CONN_LOG(debug, "pending stream flush timeout", parent_.connection_); + stream_idle_timer_.reset(); + parent_.stats_.tx_flush_timeout_.inc(); + ASSERT(local_end_stream_ && !local_end_stream_sent_); + // This will emit a reset frame for this stream and close the stream locally. No reset callbacks + // will be run because higher layers think the stream is already finished. + resetStreamWorker(StreamResetReason::LocalReset); + parent_.sendPendingFrames(); +} + +void ConnectionImpl::StreamImpl::encodeData(Buffer::Instance& data, bool end_stream) { + ASSERT(!local_end_stream_); + encodeDataHelper(data, end_stream, /*skip_encoding_empty_trailers=*/false); +} + +void ConnectionImpl::StreamImpl::encodeDataHelper(Buffer::Instance& data, bool end_stream, + bool skip_encoding_empty_trailers) { + if (skip_encoding_empty_trailers) { + ASSERT(data.length() == 0 && end_stream); + } + + local_end_stream_ = end_stream; + parent_.stats_.pending_send_bytes_.add(data.length()); + pending_send_data_.move(data); + if (data_deferred_) { + int rc = nghttp2_session_resume_data(parent_.session_, stream_id_); + ASSERT(rc == 0); + + data_deferred_ = false; + } + + parent_.sendPendingFrames(); + if (local_end_stream_ && pending_send_data_.length() > 0) { + createPendingFlushTimer(); + } +} + +void ConnectionImpl::StreamImpl::resetStream(StreamResetReason reason) { + // Higher layers expect calling resetStream() to immediately raise reset callbacks. + runResetCallbacks(reason); + + // If we submit a reset, nghttp2 will cancel outbound frames that have not yet been sent. + // We want these frames to go out so we defer the reset until we send all of the frames that + // end the local stream. + if (local_end_stream_ && !local_end_stream_sent_) { + parent_.pending_deferred_reset_ = true; + deferred_reset_ = reason; + ENVOY_CONN_LOG(trace, "deferred reset stream", parent_.connection_); + } else { + resetStreamWorker(reason); + } + + // We must still call sendPendingFrames() in both the deferred and not deferred path. This forces + // the cleanup logic to run which will reset the stream in all cases if all data frames could not + // be sent. + parent_.sendPendingFrames(); +} + +void ConnectionImpl::StreamImpl::resetStreamWorker(StreamResetReason reason) { + int rc = nghttp2_submit_rst_stream(parent_.session_, NGHTTP2_FLAG_NONE, stream_id_, + reason == StreamResetReason::LocalRefusedStreamReset + ? NGHTTP2_REFUSED_STREAM + : NGHTTP2_NO_ERROR); + ASSERT(rc == 0); +} + +MetadataEncoder& ConnectionImpl::StreamImpl::getMetadataEncoder() { + if (metadata_encoder_ == nullptr) { + metadata_encoder_ = std::make_unique(); + } + return *metadata_encoder_; +} + +MetadataDecoder& ConnectionImpl::StreamImpl::getMetadataDecoder() { + if (metadata_decoder_ == nullptr) { + auto cb = [this](MetadataMapPtr&& metadata_map_ptr) { + this->onMetadataDecoded(std::move(metadata_map_ptr)); + }; + metadata_decoder_ = std::make_unique(cb); + } + return *metadata_decoder_; +} + +void ConnectionImpl::StreamImpl::onMetadataDecoded(MetadataMapPtr&& metadata_map_ptr) { + decoder().decodeMetadata(std::move(metadata_map_ptr)); +} + +ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stats, + const envoy::config::core::v3::Http2ProtocolOptions& http2_options, + const uint32_t max_headers_kb, const uint32_t max_headers_count) + : stats_(stats), connection_(connection), max_headers_kb_(max_headers_kb), + max_headers_count_(max_headers_count), + per_stream_buffer_limit_(http2_options.initial_stream_window_size().value()), + stream_error_on_invalid_http_messaging_( + http2_options.override_stream_error_on_invalid_http_message().value()), + flood_detected_(false), max_outbound_frames_(http2_options.max_outbound_frames().value()), + frame_buffer_releasor_([this]() { releaseOutboundFrame(); }), + max_outbound_control_frames_(http2_options.max_outbound_control_frames().value()), + control_frame_buffer_releasor_([this]() { releaseOutboundControlFrame(); }), + max_consecutive_inbound_frames_with_empty_payload_( + http2_options.max_consecutive_inbound_frames_with_empty_payload().value()), + max_inbound_priority_frames_per_stream_( + http2_options.max_inbound_priority_frames_per_stream().value()), + max_inbound_window_update_frames_per_data_frame_sent_( + http2_options.max_inbound_window_update_frames_per_data_frame_sent().value()), + skip_encoding_empty_trailers_(Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.http2_skip_encoding_empty_trailers")), + dispatching_(false), raised_goaway_(false), pending_deferred_reset_(false) {} + +ConnectionImpl::~ConnectionImpl() { + for (const auto& stream : active_streams_) { + stream->destroy(); + } + nghttp2_session_del(session_); +} + +Http::Status ConnectionImpl::dispatch(Buffer::Instance& data) { + // TODO(#10878): Remove this wrapper when exception removal is complete. innerDispatch may either + // throw an exception or return an error status. The utility wrapper catches exceptions and + // converts them to error statuses. + return Http::Utility::exceptionToStatus( + [&](Buffer::Instance& data) -> Http::Status { return innerDispatch(data); }, data); +} + +Http::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) { + ENVOY_CONN_LOG(trace, "dispatching {} bytes", connection_, data.length()); + // Make sure that dispatching_ is set to false after dispatching, even when + // ConnectionImpl::dispatch returns early or throws an exception (consider removing if there is a + // single return after exception removal (#10878)). + Cleanup cleanup([this]() { dispatching_ = false; }); + for (const Buffer::RawSlice& slice : data.getRawSlices()) { + dispatching_ = true; + ssize_t rc = + nghttp2_session_mem_recv(session_, static_cast(slice.mem_), slice.len_); + if (rc == NGHTTP2_ERR_FLOODED || flood_detected_) { + throw FrameFloodException( + "Flooding was detected in this HTTP/2 session, and it must be closed"); + } + if (rc != static_cast(slice.len_)) { + throw CodecProtocolException(fmt::format("{}", nghttp2_strerror(rc))); + } + + dispatching_ = false; + } + + ENVOY_CONN_LOG(trace, "dispatched {} bytes", connection_, data.length()); + data.drain(data.length()); + + // Decoding incoming frames can generate outbound frames so flush pending. + sendPendingFrames(); + return Http::okStatus(); +} + +ConnectionImpl::StreamImpl* ConnectionImpl::getStream(int32_t stream_id) { + return static_cast(nghttp2_session_get_stream_user_data(session_, stream_id)); +} + +int ConnectionImpl::onData(int32_t stream_id, const uint8_t* data, size_t len) { + StreamImpl* stream = getStream(stream_id); + // If this results in buffering too much data, the watermark buffer will call + // pendingRecvBufferHighWatermark, resulting in ++read_disable_count_ + stream->pending_recv_data_.add(data, len); + // Update the window to the peer unless some consumer of this stream's data has hit a flow control + // limit and disabled reads on this stream + if (!stream->buffersOverrun()) { + nghttp2_session_consume(session_, stream_id, len); + } else { + stream->unconsumed_bytes_ += len; + } + return 0; +} + +void ConnectionImpl::goAway() { + int rc = nghttp2_submit_goaway(session_, NGHTTP2_FLAG_NONE, + nghttp2_session_get_last_proc_stream_id(session_), + NGHTTP2_NO_ERROR, nullptr, 0); + ASSERT(rc == 0); + + sendPendingFrames(); +} + +void ConnectionImpl::shutdownNotice() { + int rc = nghttp2_submit_shutdown_notice(session_); + ASSERT(rc == 0); + + sendPendingFrames(); +} + +int ConnectionImpl::onBeforeFrameReceived(const nghttp2_frame_hd* hd) { + ENVOY_CONN_LOG(trace, "about to recv frame type={}, flags={}", connection_, + static_cast(hd->type), static_cast(hd->flags)); + + // Track all the frames without padding here, since this is the only callback we receive + // for some of them (e.g. CONTINUATION frame, frames sent on closed streams, etc.). + // HEADERS frame is tracked in onBeginHeaders(), DATA frame is tracked in onFrameReceived(). + if (hd->type != NGHTTP2_HEADERS && hd->type != NGHTTP2_DATA) { + if (!trackInboundFrames(hd, 0)) { + return NGHTTP2_ERR_FLOODED; + } + } + + return 0; +} + +ABSL_MUST_USE_RESULT +enum GoAwayErrorCode ngHttp2ErrorCodeToErrorCode(uint32_t code) noexcept { + switch (code) { + case NGHTTP2_NO_ERROR: + return GoAwayErrorCode::NoError; + default: + return GoAwayErrorCode::Other; + } +} + +int ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) { + ENVOY_CONN_LOG(trace, "recv frame type={}", connection_, static_cast(frame->hd.type)); + + // onFrameReceived() is called with a complete HEADERS frame assembled from all the HEADERS + // and CONTINUATION frames, but we track them separately: HEADERS frames in onBeginHeaders() + // and CONTINUATION frames in onBeforeFrameReceived(). + ASSERT(frame->hd.type != NGHTTP2_CONTINUATION); + + if (frame->hd.type == NGHTTP2_DATA) { + if (!trackInboundFrames(&frame->hd, frame->data.padlen)) { + return NGHTTP2_ERR_FLOODED; + } + } + + // Only raise GOAWAY once, since we don't currently expose stream information. Shutdown + // notifications are the same as a normal GOAWAY. + // TODO: handle multiple GOAWAY frames. + if (frame->hd.type == NGHTTP2_GOAWAY && !raised_goaway_) { + ASSERT(frame->hd.stream_id == 0); + raised_goaway_ = true; + callbacks().onGoAway(ngHttp2ErrorCodeToErrorCode(frame->goaway.error_code)); + return 0; + } + + if (frame->hd.type == NGHTTP2_SETTINGS && frame->hd.flags == NGHTTP2_FLAG_NONE) { + onSettingsForTest(frame->settings); + } + + StreamImpl* stream = getStream(frame->hd.stream_id); + if (!stream) { + return 0; + } + + switch (frame->hd.type) { + case NGHTTP2_HEADERS: { + stream->remote_end_stream_ = frame->hd.flags & NGHTTP2_FLAG_END_STREAM; + if (!stream->cookies_.empty()) { + HeaderString key(Headers::get().Cookie); + stream->headers().addViaMove(std::move(key), std::move(stream->cookies_)); + } + + switch (frame->headers.cat) { + case NGHTTP2_HCAT_RESPONSE: + case NGHTTP2_HCAT_REQUEST: { + stream->decodeHeaders(); + break; + } + + case NGHTTP2_HCAT_HEADERS: { + // It's possible that we are waiting to send a deferred reset, so only raise headers/trailers + // if local is not complete. + if (!stream->deferred_reset_) { + if (nghttp2_session_check_server_session(session_) || + stream->received_noninformational_headers_) { + ASSERT(stream->remote_end_stream_); + stream->decodeTrailers(); + } else { + // We're a client session and still waiting for non-informational headers. + stream->decodeHeaders(); + } + } + break; + } + + default: + // We do not currently support push. + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } + + break; + } + case NGHTTP2_DATA: { + stream->remote_end_stream_ = frame->hd.flags & NGHTTP2_FLAG_END_STREAM; + + // It's possible that we are waiting to send a deferred reset, so only raise data if local + // is not complete. + if (!stream->deferred_reset_) { + stream->decoder().decodeData(stream->pending_recv_data_, stream->remote_end_stream_); + } + + stream->pending_recv_data_.drain(stream->pending_recv_data_.length()); + break; + } + case NGHTTP2_RST_STREAM: { + ENVOY_CONN_LOG(trace, "remote reset: {}", connection_, frame->rst_stream.error_code); + stats_.rx_reset_.inc(); + break; + } + } + + return 0; +} + +int ConnectionImpl::onFrameSend(const nghttp2_frame* frame) { + // The nghttp2 library does not cleanly give us a way to determine whether we received invalid + // data from our peer. Sometimes it raises the invalid frame callback, and sometimes it does not. + // In all cases however it will attempt to send a GOAWAY frame with an error status. If we see + // an outgoing frame of this type, we will return an error code so that we can abort execution. + ENVOY_CONN_LOG(trace, "sent frame type={}", connection_, static_cast(frame->hd.type)); + switch (frame->hd.type) { + case NGHTTP2_GOAWAY: { + ENVOY_CONN_LOG(debug, "sent goaway code={}", connection_, frame->goaway.error_code); + if (frame->goaway.error_code != NGHTTP2_NO_ERROR) { + // TODO(mattklein123): Returning this error code abandons standard nghttp2 frame accounting. + // As such, it is not reliable to call sendPendingFrames() again after this and we assume + // that the connection is going to get torn down immediately. One byproduct of this is that + // we need to cancel all pending flush stream timeouts since they can race with connection + // teardown. As part of the work to remove exceptions we should aim to clean up all of this + // error handling logic and only handle this type of case at the end of dispatch. + for (auto& stream : active_streams_) { + stream->disarmStreamIdleTimer(); + } + return NGHTTP2_ERR_CALLBACK_FAILURE; + } + break; + } + + case NGHTTP2_RST_STREAM: { + ENVOY_CONN_LOG(debug, "sent reset code={}", connection_, frame->rst_stream.error_code); + stats_.tx_reset_.inc(); + break; + } + + case NGHTTP2_HEADERS: + case NGHTTP2_DATA: { + StreamImpl* stream = getStream(frame->hd.stream_id); + stream->local_end_stream_sent_ = frame->hd.flags & NGHTTP2_FLAG_END_STREAM; + break; + } + } + + return 0; +} + +int ConnectionImpl::onError(absl::string_view error) { + ENVOY_CONN_LOG(debug, "invalid http2: {}", connection_, error); + return 0; +} + +int ConnectionImpl::onInvalidFrame(int32_t stream_id, int error_code) { + ENVOY_CONN_LOG(debug, "invalid frame: {} on stream {}", connection_, nghttp2_strerror(error_code), + stream_id); + + // Set details of error_code in the stream whenever we have one. + StreamImpl* stream = getStream(stream_id); + if (stream != nullptr) { + stream->setDetails(Http2ResponseCodeDetails::get().errorDetails(error_code)); + } + + if (error_code == NGHTTP2_ERR_HTTP_HEADER || error_code == NGHTTP2_ERR_HTTP_MESSAGING) { + stats_.rx_messaging_error_.inc(); + + if (stream_error_on_invalid_http_messaging_) { + // The stream is about to be closed due to an invalid header or messaging. Don't kill the + // entire connection if one stream has bad headers or messaging. + if (stream != nullptr) { + // See comment below in onStreamClose() for why we do this. + stream->reset_due_to_messaging_error_ = true; + } + return 0; + } + } + + // Cause dispatch to return with an error code. + return NGHTTP2_ERR_CALLBACK_FAILURE; +} + +int ConnectionImpl::onBeforeFrameSend(const nghttp2_frame* frame) { + ENVOY_CONN_LOG(trace, "about to send frame type={}, flags={}", connection_, + static_cast(frame->hd.type), static_cast(frame->hd.flags)); + ASSERT(!is_outbound_flood_monitored_control_frame_); + // Flag flood monitored outbound control frames. + is_outbound_flood_monitored_control_frame_ = + ((frame->hd.type == NGHTTP2_PING || frame->hd.type == NGHTTP2_SETTINGS) && + frame->hd.flags & NGHTTP2_FLAG_ACK) || + frame->hd.type == NGHTTP2_RST_STREAM; + return 0; +} + +void ConnectionImpl::incrementOutboundFrameCount(bool is_outbound_flood_monitored_control_frame) { + ++outbound_frames_; + if (is_outbound_flood_monitored_control_frame) { + ++outbound_control_frames_; + } + checkOutboundQueueLimits(); +} + +bool ConnectionImpl::addOutboundFrameFragment(Buffer::OwnedImpl& output, const uint8_t* data, + size_t length) { + // Reset the outbound frame type (set in the onBeforeFrameSend callback) since the + // onBeforeFrameSend callback is not called for DATA frames. + bool is_outbound_flood_monitored_control_frame = false; + std::swap(is_outbound_flood_monitored_control_frame, is_outbound_flood_monitored_control_frame_); + try { + incrementOutboundFrameCount(is_outbound_flood_monitored_control_frame); + } catch (const FrameFloodException&) { + return false; + } + + output.add(data, length); + output.addDrainTracker(is_outbound_flood_monitored_control_frame ? control_frame_buffer_releasor_ + : frame_buffer_releasor_); + return true; +} + +void ConnectionImpl::releaseOutboundFrame() { + ASSERT(outbound_frames_ >= 1); + --outbound_frames_; +} + +void ConnectionImpl::releaseOutboundControlFrame() { + ASSERT(outbound_control_frames_ >= 1); + --outbound_control_frames_; + releaseOutboundFrame(); +} + +ssize_t ConnectionImpl::onSend(const uint8_t* data, size_t length) { + ENVOY_CONN_LOG(trace, "send data: bytes={}", connection_, length); + Buffer::OwnedImpl buffer; + if (!addOutboundFrameFragment(buffer, data, length)) { + ENVOY_CONN_LOG(debug, "error sending frame: Too many frames in the outbound queue.", + connection_); + return NGHTTP2_ERR_FLOODED; + } + + // While the buffer is transient the fragment it contains will be moved into the + // write_buffer_ of the underlying connection_ by the write method below. + // This creates lifetime dependency between the write_buffer_ of the underlying connection + // and the codec object. Specifically the write_buffer_ MUST be either fully drained or + // deleted before the codec object is deleted. This is presently guaranteed by the + // destruction order of the Network::ConnectionImpl object where write_buffer_ is + // destroyed before the filter_manager_ which owns the codec through Http::ConnectionManagerImpl. + connection_.write(buffer, false); + return length; +} + +int ConnectionImpl::onStreamClose(int32_t stream_id, uint32_t error_code) { + StreamImpl* stream = getStream(stream_id); + if (stream) { + ENVOY_CONN_LOG(debug, "stream closed: {}", connection_, error_code); + if (!stream->remote_end_stream_ || !stream->local_end_stream_) { + StreamResetReason reason; + if (stream->reset_due_to_messaging_error_) { + // Unfortunately, the nghttp2 API makes it incredibly difficult to clearly understand + // the flow of resets. I.e., did the reset originate locally? Was it remote? Here, + // we attempt to track cases in which we sent a reset locally due to an invalid frame + // received from the remote. We only do that in two cases currently (HTTP messaging layer + // errors from https://tools.ietf.org/html/rfc7540#section-8 which nghttp2 is very strict + // about). In other cases we treat invalid frames as a protocol error and just kill + // the connection. + reason = StreamResetReason::LocalReset; + } else { + reason = error_code == NGHTTP2_REFUSED_STREAM ? StreamResetReason::RemoteRefusedStreamReset + : StreamResetReason::RemoteReset; + } + + stream->runResetCallbacks(reason); + } + + stream->destroy(); + connection_.dispatcher().deferredDelete(stream->removeFromList(active_streams_)); + // Any unconsumed data must be consumed before the stream is deleted. + // nghttp2 does not appear to track this internally, and any stream deleted + // with outstanding window will contribute to a slow connection-window leak. + nghttp2_session_consume(session_, stream_id, stream->unconsumed_bytes_); + stream->unconsumed_bytes_ = 0; + nghttp2_session_set_stream_user_data(session_, stream->stream_id_, nullptr); + } + + return 0; +} + +int ConnectionImpl::onMetadataReceived(int32_t stream_id, const uint8_t* data, size_t len) { + ENVOY_CONN_LOG(trace, "recv {} bytes METADATA", connection_, len); + + StreamImpl* stream = getStream(stream_id); + if (!stream) { + return 0; + } + + bool success = stream->getMetadataDecoder().receiveMetadata(data, len); + return success ? 0 : NGHTTP2_ERR_CALLBACK_FAILURE; +} + +int ConnectionImpl::onMetadataFrameComplete(int32_t stream_id, bool end_metadata) { + ENVOY_CONN_LOG(trace, "recv METADATA frame on stream {}, end_metadata: {}", connection_, + stream_id, end_metadata); + + StreamImpl* stream = getStream(stream_id); + if (stream == nullptr) { + return 0; + } + + bool result = stream->getMetadataDecoder().onMetadataFrameComplete(end_metadata); + return result ? 0 : NGHTTP2_ERR_CALLBACK_FAILURE; +} + +ssize_t ConnectionImpl::packMetadata(int32_t stream_id, uint8_t* buf, size_t len) { + ENVOY_CONN_LOG(trace, "pack METADATA frame on stream {}", connection_, stream_id); + + StreamImpl* stream = getStream(stream_id); + if (stream == nullptr) { + return 0; + } + + MetadataEncoder& encoder = stream->getMetadataEncoder(); + return encoder.packNextFramePayload(buf, len); +} + +int ConnectionImpl::saveHeader(const nghttp2_frame* frame, HeaderString&& name, + HeaderString&& value) { + StreamImpl* stream = getStream(frame->hd.stream_id); + if (!stream) { + // We have seen 1 or 2 crashes where we get a headers callback but there is no associated + // stream data. I honestly am not sure how this can happen. However, from reading the nghttp2 + // code it looks possible that inflate_header_block() can safely inflate headers for an already + // closed stream, but will still call the headers callback. Since that seems possible, we should + // ignore this case here. + // TODO(mattklein123): Figure out a test case that can hit this. + stats_.headers_cb_no_stream_.inc(); + return 0; + } + + auto should_return = checkHeaderNameForUnderscores(name.getStringView()); + if (should_return) { + stream->setDetails(Http2ResponseCodeDetails::get().invalid_underscore); + name.clear(); + value.clear(); + return should_return.value(); + } + + stream->saveHeader(std::move(name), std::move(value)); + + if (stream->headers().byteSize() > max_headers_kb_ * 1024 || + stream->headers().size() > max_headers_count_) { + stream->setDetails(Http2ResponseCodeDetails::get().too_many_headers); + stats_.header_overflow_.inc(); + // This will cause the library to reset/close the stream. + return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; + } else { + return 0; + } +} + +void ConnectionImpl::sendPendingFrames() { + if (dispatching_ || connection_.state() == Network::Connection::State::Closed) { + return; + } + + const int rc = nghttp2_session_send(session_); + if (rc != 0) { + ASSERT(rc == NGHTTP2_ERR_CALLBACK_FAILURE); + // For errors caused by the pending outbound frame flood the FrameFloodException has + // to be thrown. However the nghttp2 library returns only the generic error code for + // all failure types. Check queue limits and throw FrameFloodException if they were + // exceeded. + if (outbound_frames_ > max_outbound_frames_ || + outbound_control_frames_ > max_outbound_control_frames_) { + throw FrameFloodException("Too many frames in the outbound queue."); + } + + throw CodecProtocolException(std::string(nghttp2_strerror(rc))); + } + + // See ConnectionImpl::StreamImpl::resetStream() for why we do this. This is an uncommon event, + // so iterating through every stream to find the ones that have a deferred reset is not a big + // deal. Furthermore, queueing a reset frame does not actually invoke the close stream callback. + // This is only done when the reset frame is sent. Thus, it's safe to work directly with the + // stream map. + // NOTE: The way we handle deferred reset is essentially best effort. If we intend to do a + // deferred reset, we try to finish the stream, including writing any pending data frames. + // If we cannot do this (potentially due to not enough window), we just reset the stream. + // In general this behavior occurs only when we are trying to send immediate error messages + // to short circuit requests. In the best effort case, we complete the stream before + // resetting. In other cases, we just do the reset now which will blow away pending data + // frames and release any memory associated with the stream. + if (pending_deferred_reset_) { + pending_deferred_reset_ = false; + for (auto& stream : active_streams_) { + if (stream->deferred_reset_) { + stream->resetStreamWorker(stream->deferred_reset_.value()); + } + } + sendPendingFrames(); + } +} + +void ConnectionImpl::sendSettings( + const envoy::config::core::v3::Http2ProtocolOptions& http2_options, bool disable_push) { + absl::InlinedVector settings; + auto insertParameter = [&settings](const nghttp2_settings_entry& entry) mutable -> bool { + const auto it = std::find_if(settings.cbegin(), settings.cend(), + [&entry](const nghttp2_settings_entry& existing) { + return entry.settings_id == existing.settings_id; + }); + if (it != settings.end()) { + return false; + } + settings.push_back(entry); + return true; + }; + + // Universally disable receiving push promise frames as we don't currently support + // them. nghttp2 will fail the connection if the other side still sends them. + // TODO(mattklein123): Remove this when we correctly proxy push promise. + // NOTE: This is a special case with respect to custom parameter overrides in that server push is + // not supported and therefore not end user configurable. + if (disable_push) { + settings.push_back( + {static_cast(NGHTTP2_SETTINGS_ENABLE_PUSH), disable_push ? 0U : 1U}); + } + + for (const auto& it : http2_options.custom_settings_parameters()) { + ASSERT(it.identifier().value() <= std::numeric_limits::max()); + const bool result = + insertParameter({static_cast(it.identifier().value()), it.value().value()}); + ASSERT(result); + ENVOY_CONN_LOG(debug, "adding custom settings parameter with id {:#x} to {}", connection_, + it.identifier().value(), it.value().value()); + } + + // Insert named parameters. + settings.insert( + settings.end(), + {{NGHTTP2_SETTINGS_HEADER_TABLE_SIZE, http2_options.hpack_table_size().value()}, + {NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL, http2_options.allow_connect()}, + {NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, http2_options.max_concurrent_streams().value()}, + {NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE, http2_options.initial_stream_window_size().value()}}); + if (!settings.empty()) { + int rc = nghttp2_submit_settings(session_, NGHTTP2_FLAG_NONE, settings.data(), settings.size()); + ASSERT(rc == 0); + } else { + // nghttp2_submit_settings need to be called at least once + int rc = nghttp2_submit_settings(session_, NGHTTP2_FLAG_NONE, nullptr, 0); + ASSERT(rc == 0); + } + + const uint32_t initial_connection_window_size = + http2_options.initial_connection_window_size().value(); + // Increase connection window size up to our default size. + if (initial_connection_window_size != NGHTTP2_INITIAL_CONNECTION_WINDOW_SIZE) { + ENVOY_CONN_LOG(debug, "updating connection-level initial window size to {}", connection_, + initial_connection_window_size); + int rc = nghttp2_submit_window_update(session_, NGHTTP2_FLAG_NONE, 0, + initial_connection_window_size - + NGHTTP2_INITIAL_CONNECTION_WINDOW_SIZE); + ASSERT(rc == 0); + } +} + +ConnectionImpl::Http2Callbacks::Http2Callbacks() { + nghttp2_session_callbacks_new(&callbacks_); + nghttp2_session_callbacks_set_send_callback( + callbacks_, + [](nghttp2_session*, const uint8_t* data, size_t length, int, void* user_data) -> ssize_t { + return static_cast(user_data)->onSend(data, length); + }); + + nghttp2_session_callbacks_set_send_data_callback( + callbacks_, + [](nghttp2_session*, nghttp2_frame* frame, const uint8_t* framehd, size_t length, + nghttp2_data_source* source, void*) -> int { + ASSERT(frame->data.padlen == 0); + return static_cast(source->ptr)->onDataSourceSend(framehd, length); + }); + + nghttp2_session_callbacks_set_on_begin_headers_callback( + callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int { + return static_cast(user_data)->onBeginHeaders(frame); + }); + + nghttp2_session_callbacks_set_on_header_callback( + callbacks_, + [](nghttp2_session*, const nghttp2_frame* frame, const uint8_t* raw_name, size_t name_length, + const uint8_t* raw_value, size_t value_length, uint8_t, void* user_data) -> int { + // TODO PERF: Can reference count here to avoid copies. + HeaderString name; + name.setCopy(reinterpret_cast(raw_name), name_length); + HeaderString value; + value.setCopy(reinterpret_cast(raw_value), value_length); + return static_cast(user_data)->onHeader(frame, std::move(name), + std::move(value)); + }); + + nghttp2_session_callbacks_set_on_data_chunk_recv_callback( + callbacks_, + [](nghttp2_session*, uint8_t, int32_t stream_id, const uint8_t* data, size_t len, + void* user_data) -> int { + return static_cast(user_data)->onData(stream_id, data, len); + }); + + nghttp2_session_callbacks_set_on_begin_frame_callback( + callbacks_, [](nghttp2_session*, const nghttp2_frame_hd* hd, void* user_data) -> int { + return static_cast(user_data)->onBeforeFrameReceived(hd); + }); + + nghttp2_session_callbacks_set_on_frame_recv_callback( + callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int { + return static_cast(user_data)->onFrameReceived(frame); + }); + + nghttp2_session_callbacks_set_on_stream_close_callback( + callbacks_, + [](nghttp2_session*, int32_t stream_id, uint32_t error_code, void* user_data) -> int { + return static_cast(user_data)->onStreamClose(stream_id, error_code); + }); + + nghttp2_session_callbacks_set_on_frame_send_callback( + callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int { + return static_cast(user_data)->onFrameSend(frame); + }); + + nghttp2_session_callbacks_set_before_frame_send_callback( + callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int { + return static_cast(user_data)->onBeforeFrameSend(frame); + }); + + nghttp2_session_callbacks_set_on_frame_not_send_callback( + callbacks_, [](nghttp2_session*, const nghttp2_frame*, int, void*) -> int { + // We used to always return failure here but it looks now this can get called if the other + // side sends GOAWAY and we are trying to send a SETTINGS ACK. Just ignore this for now. + return 0; + }); + + nghttp2_session_callbacks_set_on_invalid_frame_recv_callback( + callbacks_, + [](nghttp2_session*, const nghttp2_frame* frame, int error_code, void* user_data) -> int { + return static_cast(user_data)->onInvalidFrame(frame->hd.stream_id, + error_code); + }); + + nghttp2_session_callbacks_set_on_extension_chunk_recv_callback( + callbacks_, + [](nghttp2_session*, const nghttp2_frame_hd* hd, const uint8_t* data, size_t len, + void* user_data) -> int { + ASSERT(hd->length >= len); + return static_cast(user_data)->onMetadataReceived(hd->stream_id, data, + len); + }); + + nghttp2_session_callbacks_set_unpack_extension_callback( + callbacks_, [](nghttp2_session*, void**, const nghttp2_frame_hd* hd, void* user_data) -> int { + return static_cast(user_data)->onMetadataFrameComplete( + hd->stream_id, hd->flags == END_METADATA_FLAG); + }); + + nghttp2_session_callbacks_set_pack_extension_callback( + callbacks_, + [](nghttp2_session*, uint8_t* buf, size_t len, const nghttp2_frame* frame, + void* user_data) -> ssize_t { + ASSERT(frame->hd.length <= len); + return static_cast(user_data)->packMetadata(frame->hd.stream_id, buf, len); + }); + + nghttp2_session_callbacks_set_error_callback2( + callbacks_, [](nghttp2_session*, int, const char* msg, size_t len, void* user_data) -> int { + return static_cast(user_data)->onError(absl::string_view(msg, len)); + }); +} + +ConnectionImpl::Http2Callbacks::~Http2Callbacks() { nghttp2_session_callbacks_del(callbacks_); } + +ConnectionImpl::Http2Options::Http2Options( + const envoy::config::core::v3::Http2ProtocolOptions& http2_options) { + nghttp2_option_new(&options_); + // Currently we do not do anything with stream priority. Setting the following option prevents + // nghttp2 from keeping around closed streams for use during stream priority dependency graph + // calculations. This saves a tremendous amount of memory in cases where there are a large + // number of kept alive HTTP/2 connections. + nghttp2_option_set_no_closed_streams(options_, 1); + nghttp2_option_set_no_auto_window_update(options_, 1); + + // The max send header block length is configured to an arbitrarily high number so as to never + // trigger the check within nghttp2, as we check request headers length in + // codec_impl::saveHeader. + nghttp2_option_set_max_send_header_block_length(options_, 0x2000000); + + if (http2_options.hpack_table_size().value() != NGHTTP2_DEFAULT_HEADER_TABLE_SIZE) { + nghttp2_option_set_max_deflate_dynamic_table_size(options_, + http2_options.hpack_table_size().value()); + } + + if (http2_options.allow_metadata()) { + nghttp2_option_set_user_recv_extension_type(options_, METADATA_FRAME_TYPE); + } + + // nghttp2 v1.39.2 lowered the internal flood protection limit from 10K to 1K of ACK frames. + // This new limit may cause the internal nghttp2 mitigation to trigger more often (as it + // requires just 9K of incoming bytes for smallest 9 byte SETTINGS frame), bypassing the same + // mitigation and its associated behavior in the envoy HTTP/2 codec. Since envoy does not rely + // on this mitigation, set back to the old 10K number to avoid any changes in the HTTP/2 codec + // behavior. + nghttp2_option_set_max_outbound_ack(options_, 10000); +} + +ConnectionImpl::Http2Options::~Http2Options() { nghttp2_option_del(options_); } + +ConnectionImpl::ClientHttp2Options::ClientHttp2Options( + const envoy::config::core::v3::Http2ProtocolOptions& http2_options) + : Http2Options(http2_options) { + // Temporarily disable initial max streams limit/protection, since we might want to create + // more than 100 streams before receiving the HTTP/2 SETTINGS frame from the server. + // + // TODO(PiotrSikora): remove this once multiple upstream connections or queuing are implemented. + nghttp2_option_set_peer_max_concurrent_streams( + options_, ::Envoy::Http2::Utility::OptionsLimits::DEFAULT_MAX_CONCURRENT_STREAMS); +} + +ClientConnectionImpl::ClientConnectionImpl( + Network::Connection& connection, Http::ConnectionCallbacks& callbacks, CodecStats& stats, + const envoy::config::core::v3::Http2ProtocolOptions& http2_options, + const uint32_t max_response_headers_kb, const uint32_t max_response_headers_count, + Nghttp2SessionFactory& http2_session_factory) + : ConnectionImpl(connection, stats, http2_options, max_response_headers_kb, + max_response_headers_count), + callbacks_(callbacks) { + ClientHttp2Options client_http2_options(http2_options); + session_ = http2_session_factory.create(http2_callbacks_.callbacks(), base(), + client_http2_options.options()); + http2_session_factory.init(session_, base(), http2_options); + allow_metadata_ = http2_options.allow_metadata(); +} + +RequestEncoder& ClientConnectionImpl::newStream(ResponseDecoder& decoder) { + ClientStreamImplPtr stream(new ClientStreamImpl(*this, per_stream_buffer_limit_, decoder)); + // If the connection is currently above the high watermark, make sure to inform the new stream. + // The connection can not pass this on automatically as it has no awareness that a new stream is + // created. + if (connection_.aboveHighWatermark()) { + stream->runHighWatermarkCallbacks(); + } + ClientStreamImpl& stream_ref = *stream; + LinkedList::moveIntoList(std::move(stream), active_streams_); + return stream_ref; +} + +int ClientConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) { + // The client code explicitly does not currently support push promise. + RELEASE_ASSERT(frame->hd.type == NGHTTP2_HEADERS, ""); + RELEASE_ASSERT(frame->headers.cat == NGHTTP2_HCAT_RESPONSE || + frame->headers.cat == NGHTTP2_HCAT_HEADERS, + ""); + if (frame->headers.cat == NGHTTP2_HCAT_HEADERS) { + StreamImpl* stream = getStream(frame->hd.stream_id); + stream->allocTrailers(); + } + + return 0; +} + +int ClientConnectionImpl::onHeader(const nghttp2_frame* frame, HeaderString&& name, + HeaderString&& value) { + // The client code explicitly does not currently support push promise. + ASSERT(frame->hd.type == NGHTTP2_HEADERS); + ASSERT(frame->headers.cat == NGHTTP2_HCAT_RESPONSE || frame->headers.cat == NGHTTP2_HCAT_HEADERS); + return saveHeader(frame, std::move(name), std::move(value)); +} + +ServerConnectionImpl::ServerConnectionImpl( + Network::Connection& connection, Http::ServerConnectionCallbacks& callbacks, CodecStats& stats, + const envoy::config::core::v3::Http2ProtocolOptions& http2_options, + const uint32_t max_request_headers_kb, const uint32_t max_request_headers_count, + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action) + : ConnectionImpl(connection, stats, http2_options, max_request_headers_kb, + max_request_headers_count), + callbacks_(callbacks), headers_with_underscores_action_(headers_with_underscores_action) { + Http2Options h2_options(http2_options); + + nghttp2_session_server_new2(&session_, http2_callbacks_.callbacks(), base(), + h2_options.options()); + sendSettings(http2_options, false); + allow_metadata_ = http2_options.allow_metadata(); +} + +int ServerConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) { + // For a server connection, we should never get push promise frames. + ASSERT(frame->hd.type == NGHTTP2_HEADERS); + + if (!trackInboundFrames(&frame->hd, frame->headers.padlen)) { + return NGHTTP2_ERR_FLOODED; + } + + if (frame->headers.cat != NGHTTP2_HCAT_REQUEST) { + stats_.trailers_.inc(); + ASSERT(frame->headers.cat == NGHTTP2_HCAT_HEADERS); + + StreamImpl* stream = getStream(frame->hd.stream_id); + stream->allocTrailers(); + return 0; + } + + ServerStreamImplPtr stream(new ServerStreamImpl(*this, per_stream_buffer_limit_)); + if (connection_.aboveHighWatermark()) { + stream->runHighWatermarkCallbacks(); + } + stream->request_decoder_ = &callbacks_.newStream(*stream); + stream->stream_id_ = frame->hd.stream_id; + LinkedList::moveIntoList(std::move(stream), active_streams_); + nghttp2_session_set_stream_user_data(session_, frame->hd.stream_id, + active_streams_.front().get()); + return 0; +} + +int ServerConnectionImpl::onHeader(const nghttp2_frame* frame, HeaderString&& name, + HeaderString&& value) { + // For a server connection, we should never get push promise frames. + ASSERT(frame->hd.type == NGHTTP2_HEADERS); + ASSERT(frame->headers.cat == NGHTTP2_HCAT_REQUEST || frame->headers.cat == NGHTTP2_HCAT_HEADERS); + return saveHeader(frame, std::move(name), std::move(value)); +} + +bool ServerConnectionImpl::trackInboundFrames(const nghttp2_frame_hd* hd, uint32_t padding_length) { + ENVOY_CONN_LOG(trace, "track inbound frame type={} flags={} length={} padding_length={}", + connection_, static_cast(hd->type), static_cast(hd->flags), + static_cast(hd->length), padding_length); + switch (hd->type) { + case NGHTTP2_HEADERS: + case NGHTTP2_CONTINUATION: + // Track new streams. + if (hd->flags & NGHTTP2_FLAG_END_HEADERS) { + inbound_streams_++; + } + FALLTHRU; + case NGHTTP2_DATA: + // Track frames with an empty payload and no end stream flag. + if (hd->length - padding_length == 0 && !(hd->flags & NGHTTP2_FLAG_END_STREAM)) { + ENVOY_CONN_LOG(trace, "frame with an empty payload and no end stream flag.", connection_); + consecutive_inbound_frames_with_empty_payload_++; + } else { + consecutive_inbound_frames_with_empty_payload_ = 0; + } + break; + case NGHTTP2_PRIORITY: + inbound_priority_frames_++; + break; + case NGHTTP2_WINDOW_UPDATE: + inbound_window_update_frames_++; + break; + default: + break; + } + + if (!checkInboundFrameLimits(hd->stream_id)) { + // NGHTTP2_ERR_FLOODED is overridden within nghttp2 library and it doesn't propagate + // all the way to nghttp2_session_mem_recv() where we need it. + flood_detected_ = true; + return false; + } + + return true; +} + +bool ServerConnectionImpl::checkInboundFrameLimits(int32_t stream_id) { + ASSERT(dispatching_downstream_data_); + ConnectionImpl::StreamImpl* stream = getStream(stream_id); + + if (consecutive_inbound_frames_with_empty_payload_ > + max_consecutive_inbound_frames_with_empty_payload_) { + ENVOY_CONN_LOG(trace, + "error reading frame: Too many consecutive frames with an empty payload " + "received in this HTTP/2 session.", + connection_); + if (stream) { + stream->setDetails(Http2ResponseCodeDetails::get().inbound_empty_frame_flood); + } + stats_.inbound_empty_frames_flood_.inc(); + return false; + } + + if (inbound_priority_frames_ > max_inbound_priority_frames_per_stream_ * (1 + inbound_streams_)) { + ENVOY_CONN_LOG(trace, + "error reading frame: Too many PRIORITY frames received in this HTTP/2 session.", + connection_); + stats_.inbound_priority_frames_flood_.inc(); + return false; + } + + if (inbound_window_update_frames_ > + 1 + 2 * (inbound_streams_ + + max_inbound_window_update_frames_per_data_frame_sent_ * outbound_data_frames_)) { + ENVOY_CONN_LOG( + trace, + "error reading frame: Too many WINDOW_UPDATE frames received in this HTTP/2 session.", + connection_); + stats_.inbound_window_update_frames_flood_.inc(); + return false; + } + + return true; +} + +void ServerConnectionImpl::checkOutboundQueueLimits() { + if (outbound_frames_ > max_outbound_frames_ && dispatching_downstream_data_) { + stats_.outbound_flood_.inc(); + throw FrameFloodException("Too many frames in the outbound queue."); + } + if (outbound_control_frames_ > max_outbound_control_frames_ && dispatching_downstream_data_) { + stats_.outbound_control_flood_.inc(); + throw FrameFloodException("Too many control frames in the outbound queue."); + } +} + +Http::Status ServerConnectionImpl::dispatch(Buffer::Instance& data) { + // TODO(#10878): Remove this wrapper when exception removal is complete. innerDispatch may either + // throw an exception or return an error status. The utility wrapper catches exceptions and + // converts them to error statuses. + return Http::Utility::exceptionToStatus( + [&](Buffer::Instance& data) -> Http::Status { return innerDispatch(data); }, data); +} + +Http::Status ServerConnectionImpl::innerDispatch(Buffer::Instance& data) { + ASSERT(!dispatching_downstream_data_); + dispatching_downstream_data_ = true; + + // Make sure the dispatching_downstream_data_ is set to false even + // when ConnectionImpl::dispatch throws an exception. + Cleanup cleanup([this]() { dispatching_downstream_data_ = false; }); + + // Make sure downstream outbound queue was not flooded by the upstream frames. + checkOutboundQueueLimits(); + + return ConnectionImpl::innerDispatch(data); +} + +absl::optional +ServerConnectionImpl::checkHeaderNameForUnderscores(absl::string_view header_name) { + if (headers_with_underscores_action_ != envoy::config::core::v3::HttpProtocolOptions::ALLOW && + Http::HeaderUtility::headerNameContainsUnderscore(header_name)) { + if (headers_with_underscores_action_ == + envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER) { + ENVOY_CONN_LOG(debug, "Dropping header with invalid characters in its name: {}", connection_, + header_name); + stats_.dropped_headers_with_underscores_.inc(); + return 0; + } + ENVOY_CONN_LOG(debug, "Rejecting request due to header name with underscores: {}", connection_, + header_name); + stats_.requests_rejected_with_underscores_in_headers_.inc(); + return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; + } + return absl::nullopt; +} + +} // namespace Http2 +} // namespace Legacy +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/http2/codec_impl_legacy.h b/source/common/http/http2/codec_impl_legacy.h new file mode 100644 index 0000000000000..47065d6438064 --- /dev/null +++ b/source/common/http/http2/codec_impl_legacy.h @@ -0,0 +1,618 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "envoy/config/core/v3/protocol.pb.h" +#include "envoy/event/deferred_deletable.h" +#include "envoy/http/codec.h" +#include "envoy/network/connection.h" + +#include "common/buffer/buffer_impl.h" +#include "common/buffer/watermark_buffer.h" +#include "common/common/linked_object.h" +#include "common/common/logger.h" +#include "common/common/thread.h" +#include "common/http/codec_helper.h" +#include "common/http/header_map_impl.h" +#include "common/http/http2/codec_stats.h" +#include "common/http/http2/metadata_decoder.h" +#include "common/http/http2/metadata_encoder.h" +#include "common/http/status.h" +#include "common/http/utility.h" + +#include "absl/types/optional.h" +#include "nghttp2/nghttp2.h" + +namespace Envoy { +namespace Http { +namespace Legacy { +namespace Http2 { + +// This is not the full client magic, but it's the smallest size that should be able to +// differentiate between HTTP/1 and HTTP/2. +const std::string CLIENT_MAGIC_PREFIX = "PRI * HTTP/2"; + +class Utility { +public: + /** + * Deal with https://tools.ietf.org/html/rfc7540#section-8.1.2.5 + * @param key supplies the incoming header key. + * @param value supplies the incoming header value. + * @param cookies supplies the header string to fill if this is a cookie header that needs to be + * rebuilt. + */ + static bool reconstituteCrumbledCookies(const HeaderString& key, const HeaderString& value, + HeaderString& cookies); +}; + +class ConnectionImpl; + +// Abstract nghttp2_session factory. Used to enable injection of factories for testing. +class Nghttp2SessionFactory { +public: + using ConnectionImplType = ConnectionImpl; + virtual ~Nghttp2SessionFactory() = default; + + // Returns a new nghttp2_session to be used with |connection|. + virtual nghttp2_session* create(const nghttp2_session_callbacks* callbacks, + ConnectionImplType* connection, + const nghttp2_option* options) PURE; + + // Initializes the |session|. + virtual void init(nghttp2_session* session, ConnectionImplType* connection, + const envoy::config::core::v3::Http2ProtocolOptions& options) PURE; +}; + +class ProdNghttp2SessionFactory : public Nghttp2SessionFactory { +public: + nghttp2_session* create(const nghttp2_session_callbacks* callbacks, ConnectionImpl* connection, + const nghttp2_option* options) override; + + void init(nghttp2_session* session, ConnectionImpl* connection, + const envoy::config::core::v3::Http2ProtocolOptions& options) override; + + // Returns a global factory instance. Note that this is possible because no internal state is + // maintained; the thread safety of create() and init()'s side effects is guaranteed by Envoy's + // worker based threading model. + static ProdNghttp2SessionFactory& get() { + static ProdNghttp2SessionFactory* instance = new ProdNghttp2SessionFactory(); + return *instance; + } +}; + +/** + * Base class for HTTP/2 client and server codecs. + */ +class ConnectionImpl : public virtual Connection, protected Logger::Loggable { +public: + ConnectionImpl(Network::Connection& connection, Http::Http2::CodecStats& stats, + const envoy::config::core::v3::Http2ProtocolOptions& http2_options, + const uint32_t max_headers_kb, const uint32_t max_headers_count); + + ~ConnectionImpl() override; + + // Http::Connection + // NOTE: the `dispatch` method is also overridden in the ServerConnectionImpl class + Http::Status dispatch(Buffer::Instance& data) override; + void goAway() override; + Protocol protocol() override { return Protocol::Http2; } + void shutdownNotice() override; + bool wantsToWrite() override { return nghttp2_session_want_write(session_); } + // Propagate network connection watermark events to each stream on the connection. + void onUnderlyingConnectionAboveWriteBufferHighWatermark() override { + for (auto& stream : active_streams_) { + stream->runHighWatermarkCallbacks(); + } + } + void onUnderlyingConnectionBelowWriteBufferLowWatermark() override { + for (auto& stream : active_streams_) { + stream->runLowWatermarkCallbacks(); + } + } + + /** + * An inner dispatch call that executes the dispatching logic. While exception removal is in + * migration (#10878), this function may either throw an exception or return an error status. + * Exceptions are caught and translated to their corresponding statuses in the outer level + * dispatch. + * This needs to be virtual so that ServerConnectionImpl can override. + * TODO(#10878): Remove this when exception removal is complete. + */ + virtual Http::Status innerDispatch(Buffer::Instance& data); + +protected: + friend class ProdNghttp2SessionFactory; + + /** + * Wrapper for static nghttp2 callback dispatchers. + */ + class Http2Callbacks { + public: + Http2Callbacks(); + ~Http2Callbacks(); + + const nghttp2_session_callbacks* callbacks() { return callbacks_; } + + private: + nghttp2_session_callbacks* callbacks_; + }; + + /** + * Wrapper for static nghttp2 session options. + */ + class Http2Options { + public: + Http2Options(const envoy::config::core::v3::Http2ProtocolOptions& http2_options); + ~Http2Options(); + + const nghttp2_option* options() { return options_; } + + protected: + nghttp2_option* options_; + }; + + class ClientHttp2Options : public Http2Options { + public: + ClientHttp2Options(const envoy::config::core::v3::Http2ProtocolOptions& http2_options); + }; + + /** + * Base class for client and server side streams. + */ + struct StreamImpl : public virtual StreamEncoder, + public Stream, + public LinkedObject, + public Event::DeferredDeletable, + public StreamCallbackHelper { + + StreamImpl(ConnectionImpl& parent, uint32_t buffer_limit); + ~StreamImpl() override; + // TODO(mattklein123): Optimally this would be done in the destructor but there are currently + // deferred delete lifetime issues that need sorting out if the destructor of the stream is + // going to be able to refer to the parent connection. + void destroy(); + void disarmStreamIdleTimer() { + if (stream_idle_timer_ != nullptr) { + // To ease testing and the destructor assertion. + stream_idle_timer_->disableTimer(); + stream_idle_timer_.reset(); + } + } + + StreamImpl* base() { return this; } + ssize_t onDataSourceRead(uint64_t length, uint32_t* data_flags); + int onDataSourceSend(const uint8_t* framehd, size_t length); + void resetStreamWorker(StreamResetReason reason); + static void buildHeaders(std::vector& final_headers, const HeaderMap& headers); + void saveHeader(HeaderString&& name, HeaderString&& value); + void encodeHeadersBase(const std::vector& final_headers, bool end_stream); + virtual void submitHeaders(const std::vector& final_headers, + nghttp2_data_provider* provider) PURE; + void encodeTrailersBase(const HeaderMap& headers); + void submitTrailers(const HeaderMap& trailers); + void submitMetadata(uint8_t flags); + virtual StreamDecoder& decoder() PURE; + virtual HeaderMap& headers() PURE; + virtual void allocTrailers() PURE; + virtual HeaderMapPtr cloneTrailers(const HeaderMap& trailers) PURE; + virtual void createPendingFlushTimer() PURE; + void onPendingFlushTimer(); + + // Http::StreamEncoder + void encodeData(Buffer::Instance& data, bool end_stream) override; + Stream& getStream() override { return *this; } + void encodeMetadata(const MetadataMapVector& metadata_map_vector) override; + Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() override { return absl::nullopt; } + + // Http::Stream + void addCallbacks(StreamCallbacks& callbacks) override { addCallbacksHelper(callbacks); } + void removeCallbacks(StreamCallbacks& callbacks) override { removeCallbacksHelper(callbacks); } + void resetStream(StreamResetReason reason) override; + void readDisable(bool disable) override; + uint32_t bufferLimit() override { return pending_recv_data_.highWatermark(); } + const Network::Address::InstanceConstSharedPtr& connectionLocalAddress() override { + return parent_.connection_.localAddress(); + } + absl::string_view responseDetails() override { return details_; } + void setFlushTimeout(std::chrono::milliseconds timeout) override { + stream_idle_timeout_ = timeout; + } + + // This code assumes that details is a static string, so that we + // can avoid copying it. + void setDetails(absl::string_view details) { + // TODO(asraa): In some cases nghttp2's error handling may cause processing of multiple + // invalid frames for a single stream. If a temporal stream error is returned from a callback, + // remaining frames in the buffer will still be partially processed. For example, remaining + // frames will still parse through nghttp2's push promise error handling and in + // onBeforeFrame(Send/Received) callbacks, which may return invalid frame errors and attempt + // to set details again. In these cases, we simply do not overwrite details. When internal + // error latching is implemented in the codec for exception removal, we should prevent calling + // setDetails in an error state. + if (details_.empty()) { + details_ = details; + } + } + + void setWriteBufferWatermarks(uint32_t low_watermark, uint32_t high_watermark) { + pending_recv_data_.setWatermarks(low_watermark, high_watermark); + pending_send_data_.setWatermarks(low_watermark, high_watermark); + } + + // If the receive buffer encounters watermark callbacks, enable/disable reads on this stream. + void pendingRecvBufferHighWatermark(); + void pendingRecvBufferLowWatermark(); + + // If the send buffer encounters watermark callbacks, propagate this information to the streams. + // The router and connection manager will propagate them on as appropriate. + void pendingSendBufferHighWatermark(); + void pendingSendBufferLowWatermark(); + + // Does any necessary WebSocket/Upgrade conversion, then passes the headers + // to the decoder_. + virtual void decodeHeaders() PURE; + virtual void decodeTrailers() PURE; + + // Get MetadataEncoder for this stream. + Http::Http2::MetadataEncoder& getMetadataEncoder(); + // Get MetadataDecoder for this stream. + Http::Http2::MetadataDecoder& getMetadataDecoder(); + // Callback function for MetadataDecoder. + void onMetadataDecoded(MetadataMapPtr&& metadata_map_ptr); + + bool buffersOverrun() const { return read_disable_count_ > 0; } + + void encodeDataHelper(Buffer::Instance& data, bool end_stream, + bool skip_encoding_empty_trailers); + + ConnectionImpl& parent_; + int32_t stream_id_{-1}; + uint32_t unconsumed_bytes_{0}; + uint32_t read_disable_count_{0}; + Buffer::WatermarkBuffer pending_recv_data_{ + [this]() -> void { this->pendingRecvBufferLowWatermark(); }, + [this]() -> void { this->pendingRecvBufferHighWatermark(); }, + []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }}; + Buffer::WatermarkBuffer pending_send_data_{ + [this]() -> void { this->pendingSendBufferLowWatermark(); }, + [this]() -> void { this->pendingSendBufferHighWatermark(); }, + []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }}; + HeaderMapPtr pending_trailers_to_encode_; + std::unique_ptr metadata_decoder_; + std::unique_ptr metadata_encoder_; + absl::optional deferred_reset_; + HeaderString cookies_; + bool local_end_stream_sent_ : 1; + bool remote_end_stream_ : 1; + bool data_deferred_ : 1; + bool received_noninformational_headers_ : 1; + bool pending_receive_buffer_high_watermark_called_ : 1; + bool pending_send_buffer_high_watermark_called_ : 1; + bool reset_due_to_messaging_error_ : 1; + absl::string_view details_; + // See HttpConnectionManager.stream_idle_timeout. + std::chrono::milliseconds stream_idle_timeout_{}; + Event::TimerPtr stream_idle_timer_; + }; + + using StreamImplPtr = std::unique_ptr; + + /** + * Client side stream (request). + */ + struct ClientStreamImpl : public StreamImpl, public RequestEncoder { + ClientStreamImpl(ConnectionImpl& parent, uint32_t buffer_limit, + ResponseDecoder& response_decoder) + : StreamImpl(parent, buffer_limit), response_decoder_(response_decoder), + headers_or_trailers_(ResponseHeaderMapImpl::create()) {} + + // StreamImpl + void submitHeaders(const std::vector& final_headers, + nghttp2_data_provider* provider) override; + StreamDecoder& decoder() override { return response_decoder_; } + void decodeHeaders() override; + void decodeTrailers() override; + HeaderMap& headers() override { + if (absl::holds_alternative(headers_or_trailers_)) { + return *absl::get(headers_or_trailers_); + } else { + return *absl::get(headers_or_trailers_); + } + } + void allocTrailers() override { + // If we are waiting for informational headers, make a new response header map, otherwise + // we are about to receive trailers. The codec makes sure this is the only valid sequence. + if (received_noninformational_headers_) { + headers_or_trailers_.emplace(ResponseTrailerMapImpl::create()); + } else { + headers_or_trailers_.emplace(ResponseHeaderMapImpl::create()); + } + } + HeaderMapPtr cloneTrailers(const HeaderMap& trailers) override { + return createHeaderMap(trailers); + } + void createPendingFlushTimer() override { + // Client streams do not create a flush timer because we currently assume that any failure + // to flush would be covered by a request/stream/etc. timeout. + } + + // RequestEncoder + void encodeHeaders(const RequestHeaderMap& headers, bool end_stream) override; + void encodeTrailers(const RequestTrailerMap& trailers) override { + encodeTrailersBase(trailers); + } + + ResponseDecoder& response_decoder_; + absl::variant headers_or_trailers_; + std::string upgrade_type_; + }; + + using ClientStreamImplPtr = std::unique_ptr; + + /** + * Server side stream (response). + */ + struct ServerStreamImpl : public StreamImpl, public ResponseEncoder { + ServerStreamImpl(ConnectionImpl& parent, uint32_t buffer_limit) + : StreamImpl(parent, buffer_limit), headers_or_trailers_(RequestHeaderMapImpl::create()) {} + + // StreamImpl + void submitHeaders(const std::vector& final_headers, + nghttp2_data_provider* provider) override; + StreamDecoder& decoder() override { return *request_decoder_; } + void decodeHeaders() override; + void decodeTrailers() override; + HeaderMap& headers() override { + if (absl::holds_alternative(headers_or_trailers_)) { + return *absl::get(headers_or_trailers_); + } else { + return *absl::get(headers_or_trailers_); + } + } + void allocTrailers() override { + headers_or_trailers_.emplace(RequestTrailerMapImpl::create()); + } + HeaderMapPtr cloneTrailers(const HeaderMap& trailers) override { + return createHeaderMap(trailers); + } + void createPendingFlushTimer() override; + + // ResponseEncoder + void encode100ContinueHeaders(const ResponseHeaderMap& headers) override; + void encodeHeaders(const ResponseHeaderMap& headers, bool end_stream) override; + void encodeTrailers(const ResponseTrailerMap& trailers) override { + encodeTrailersBase(trailers); + } + + RequestDecoder* request_decoder_{}; + absl::variant headers_or_trailers_; + }; + + using ServerStreamImplPtr = std::unique_ptr; + + ConnectionImpl* base() { return this; } + // NOTE: Always use non debug nullptr checks against the return value of this function. There are + // edge cases (such as for METADATA frames) where nghttp2 will issue a callback for a stream_id + // that is not associated with an existing stream. + StreamImpl* getStream(int32_t stream_id); + int saveHeader(const nghttp2_frame* frame, HeaderString&& name, HeaderString&& value); + void sendPendingFrames(); + void sendSettings(const envoy::config::core::v3::Http2ProtocolOptions& http2_options, + bool disable_push); + // Callback triggered when the peer's SETTINGS frame is received. + // NOTE: This is only used for tests. + virtual void onSettingsForTest(const nghttp2_settings&) {} + + /** + * Check if header name contains underscore character. + * Underscore character is allowed in header names by the RFC-7230 and this check is implemented + * as a security measure due to systems that treat '_' and '-' as interchangeable. + * The ServerConnectionImpl may drop header or reject request based on the + * `common_http_protocol_options.headers_with_underscores_action` configuration option in the + * HttpConnectionManager. + */ + virtual absl::optional checkHeaderNameForUnderscores(absl::string_view /* header_name */) { + return absl::nullopt; + } + + static Http2Callbacks http2_callbacks_; + + std::list active_streams_; + nghttp2_session* session_{}; + Http::Http2::CodecStats& stats_; + Network::Connection& connection_; + const uint32_t max_headers_kb_; + const uint32_t max_headers_count_; + uint32_t per_stream_buffer_limit_; + bool allow_metadata_; + const bool stream_error_on_invalid_http_messaging_; + bool flood_detected_; + + // Set if the type of frame that is about to be sent is PING or SETTINGS with the ACK flag set, or + // RST_STREAM. + bool is_outbound_flood_monitored_control_frame_ = 0; + // This counter keeps track of the number of outbound frames of all types (these that were + // buffered in the underlying connection but not yet written into the socket). If this counter + // exceeds the `max_outbound_frames_' value the connection is terminated. + uint32_t outbound_frames_ = 0; + // Maximum number of outbound frames. Initialized from corresponding http2_protocol_options. + // Default value is 10000. + const uint32_t max_outbound_frames_; + const std::function frame_buffer_releasor_; + // This counter keeps track of the number of outbound frames of types PING, SETTINGS and + // RST_STREAM (these that were buffered in the underlying connection but not yet written into the + // socket). If this counter exceeds the `max_outbound_control_frames_' value the connection is + // terminated. + uint32_t outbound_control_frames_ = 0; + // Maximum number of outbound frames of types PING, SETTINGS and RST_STREAM. Initialized from + // corresponding http2_protocol_options. Default value is 1000. + const uint32_t max_outbound_control_frames_; + const std::function control_frame_buffer_releasor_; + // This counter keeps track of the number of consecutive inbound frames of types HEADERS, + // CONTINUATION and DATA with an empty payload and no end stream flag. If this counter exceeds + // the `max_consecutive_inbound_frames_with_empty_payload_` value the connection is terminated. + uint32_t consecutive_inbound_frames_with_empty_payload_ = 0; + // Maximum number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA without + // a payload. Initialized from corresponding http2_protocol_options. Default value is 1. + const uint32_t max_consecutive_inbound_frames_with_empty_payload_; + + // This counter keeps track of the number of inbound streams. + uint32_t inbound_streams_ = 0; + // This counter keeps track of the number of inbound PRIORITY frames. If this counter exceeds + // the value calculated using this formula: + // + // max_inbound_priority_frames_per_stream_ * (1 + inbound_streams_) + // + // the connection is terminated. + uint64_t inbound_priority_frames_ = 0; + // Maximum number of inbound PRIORITY frames per stream. Initialized from corresponding + // http2_protocol_options. Default value is 100. + const uint32_t max_inbound_priority_frames_per_stream_; + + // This counter keeps track of the number of inbound WINDOW_UPDATE frames. If this counter exceeds + // the value calculated using this formula: + // + // 1 + 2 * (inbound_streams_ + + // max_inbound_window_update_frames_per_data_frame_sent_ * outbound_data_frames_) + // + // the connection is terminated. + uint64_t inbound_window_update_frames_ = 0; + // This counter keeps track of the number of outbound DATA frames. + uint64_t outbound_data_frames_ = 0; + // Maximum number of inbound WINDOW_UPDATE frames per outbound DATA frame sent. Initialized + // from corresponding http2_protocol_options. Default value is 10. + const uint32_t max_inbound_window_update_frames_per_data_frame_sent_; + + // For the flood mitigation to work the onSend callback must be called once for each outbound + // frame. This is what the nghttp2 library is doing, however this is not documented. The + // Http2FloodMitigationTest.* tests in test/integration/http2_integration_test.cc will break if + // this changes in the future. Also it is important that onSend does not do partial writes, as the + // nghttp2 library will keep calling this callback to write the rest of the frame. + ssize_t onSend(const uint8_t* data, size_t length); + + // Some browsers (e.g. WebKit-based browsers: https://bugs.webkit.org/show_bug.cgi?id=210108) have + // a problem with processing empty trailers (END_STREAM | END_HEADERS with zero length HEADERS) of + // an HTTP/2 response as reported here: https://github.com/envoyproxy/envoy/issues/10514. This is + // controlled by "envoy.reloadable_features.http2_skip_encoding_empty_trailers" runtime feature + // flag. + const bool skip_encoding_empty_trailers_; + +private: + virtual ConnectionCallbacks& callbacks() PURE; + virtual int onBeginHeaders(const nghttp2_frame* frame) PURE; + int onData(int32_t stream_id, const uint8_t* data, size_t len); + int onBeforeFrameReceived(const nghttp2_frame_hd* hd); + int onFrameReceived(const nghttp2_frame* frame); + int onBeforeFrameSend(const nghttp2_frame* frame); + int onFrameSend(const nghttp2_frame* frame); + int onError(absl::string_view error); + virtual int onHeader(const nghttp2_frame* frame, HeaderString&& name, HeaderString&& value) PURE; + int onInvalidFrame(int32_t stream_id, int error_code); + int onStreamClose(int32_t stream_id, uint32_t error_code); + int onMetadataReceived(int32_t stream_id, const uint8_t* data, size_t len); + int onMetadataFrameComplete(int32_t stream_id, bool end_metadata); + ssize_t packMetadata(int32_t stream_id, uint8_t* buf, size_t len); + // Adds buffer fragment for a new outbound frame to the supplied Buffer::OwnedImpl. + // Returns true on success or false if outbound queue limits were exceeded. + bool addOutboundFrameFragment(Buffer::OwnedImpl& output, const uint8_t* data, size_t length); + virtual void checkOutboundQueueLimits() PURE; + void incrementOutboundFrameCount(bool is_outbound_flood_monitored_control_frame); + virtual bool trackInboundFrames(const nghttp2_frame_hd* hd, uint32_t padding_length) PURE; + virtual bool checkInboundFrameLimits(int32_t stream_id) PURE; + void releaseOutboundFrame(); + void releaseOutboundControlFrame(); + + bool dispatching_ : 1; + bool raised_goaway_ : 1; + bool pending_deferred_reset_ : 1; +}; + +/** + * HTTP/2 client connection codec. + */ +class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { +public: + using SessionFactory = Nghttp2SessionFactory; + ClientConnectionImpl(Network::Connection& connection, ConnectionCallbacks& callbacks, + Http::Http2::CodecStats& stats, + const envoy::config::core::v3::Http2ProtocolOptions& http2_options, + const uint32_t max_response_headers_kb, + const uint32_t max_response_headers_count, + SessionFactory& http2_session_factory); + + // Http::ClientConnection + RequestEncoder& newStream(ResponseDecoder& response_decoder) override; + +private: + // ConnectionImpl + ConnectionCallbacks& callbacks() override { return callbacks_; } + int onBeginHeaders(const nghttp2_frame* frame) override; + int onHeader(const nghttp2_frame* frame, HeaderString&& name, HeaderString&& value) override; + + // Presently client connections only perform accounting of outbound frames and do not + // terminate connections when queue limits are exceeded. The primary reason is the complexity of + // the clean-up of upstream connections. The clean-up of upstream connection causes RST_STREAM + // messages to be sent on corresponding downstream connections. This may actually trigger flood + // mitigation on the downstream connections, which causes an exception to be thrown in the middle + // of the clean-up loop, leaving resources in a half cleaned up state. + // TODO(yanavlasov): add flood mitigation for upstream connections as well. + void checkOutboundQueueLimits() override {} + bool trackInboundFrames(const nghttp2_frame_hd*, uint32_t) override { return true; } + bool checkInboundFrameLimits(int32_t) override { return true; } + + Http::ConnectionCallbacks& callbacks_; +}; + +/** + * HTTP/2 server connection codec. + */ +class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { +public: + ServerConnectionImpl(Network::Connection& connection, ServerConnectionCallbacks& callbacks, + Http::Http2::CodecStats& stats, + const envoy::config::core::v3::Http2ProtocolOptions& http2_options, + const uint32_t max_request_headers_kb, + const uint32_t max_request_headers_count, + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action); + +private: + // ConnectionImpl + ConnectionCallbacks& callbacks() override { return callbacks_; } + int onBeginHeaders(const nghttp2_frame* frame) override; + int onHeader(const nghttp2_frame* frame, HeaderString&& name, HeaderString&& value) override; + void checkOutboundQueueLimits() override; + bool trackInboundFrames(const nghttp2_frame_hd* hd, uint32_t padding_length) override; + bool checkInboundFrameLimits(int32_t stream_id) override; + absl::optional checkHeaderNameForUnderscores(absl::string_view header_name) override; + + // Http::Connection + // The reason for overriding the dispatch method is to do flood mitigation only when + // processing data from downstream client. Doing flood mitigation when processing upstream + // responses makes clean-up tricky, which needs to be improved (see comments for the + // ClientConnectionImpl::checkOutboundQueueLimits method). The dispatch method on the + // ServerConnectionImpl objects is called only when processing data from the downstream client in + // the ConnectionManagerImpl::onData method. + Http::Status dispatch(Buffer::Instance& data) override; + Http::Status innerDispatch(Buffer::Instance& data) override; + + ServerConnectionCallbacks& callbacks_; + + // This flag indicates that downstream data is being dispatched and turns on flood mitigation + // in the checkMaxOutbound*Framed methods. + bool dispatching_downstream_data_{false}; + + // The action to take when a request header name contains underscore characters. + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action_; +}; + +} // namespace Http2 +} // namespace Legacy +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/http2/codec_stats.h b/source/common/http/http2/codec_stats.h new file mode 100644 index 0000000000000..05ea11bbe7641 --- /dev/null +++ b/source/common/http/http2/codec_stats.h @@ -0,0 +1,51 @@ +#pragma once + +#include "envoy/stats/scope.h" +#include "envoy/stats/stats_macros.h" + +#include "common/common/thread.h" + +namespace Envoy { +namespace Http { +namespace Http2 { + +/** + * All stats for the HTTP/2 codec. @see stats_macros.h + */ +#define ALL_HTTP2_CODEC_STATS(COUNTER, GAUGE) \ + COUNTER(dropped_headers_with_underscores) \ + COUNTER(header_overflow) \ + COUNTER(headers_cb_no_stream) \ + COUNTER(inbound_empty_frames_flood) \ + COUNTER(inbound_priority_frames_flood) \ + COUNTER(inbound_window_update_frames_flood) \ + COUNTER(outbound_control_flood) \ + COUNTER(outbound_flood) \ + COUNTER(requests_rejected_with_underscores_in_headers) \ + COUNTER(rx_messaging_error) \ + COUNTER(rx_reset) \ + COUNTER(trailers) \ + COUNTER(tx_flush_timeout) \ + COUNTER(tx_reset) \ + GAUGE(streams_active, Accumulate) \ + GAUGE(pending_send_bytes, Accumulate) + +/** + * Wrapper struct for the HTTP/2 codec stats. @see stats_macros.h + */ +struct CodecStats { + using AtomicPtr = Thread::AtomicPtr; + + static CodecStats& atomicGet(AtomicPtr& ptr, Stats::Scope& scope) { + return *ptr.get([&scope]() -> CodecStats* { + return new CodecStats{ALL_HTTP2_CODEC_STATS(POOL_COUNTER_PREFIX(scope, "http2."), + POOL_GAUGE_PREFIX(scope, "http2."))}; + }); + } + + ALL_HTTP2_CODEC_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT) +}; + +} // namespace Http2 +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/http2/conn_pool.cc b/source/common/http/http2/conn_pool.cc index fd73d1cbb0924..cde47b8498f2e 100644 --- a/source/common/http/http2/conn_pool.cc +++ b/source/common/http/http2/conn_pool.cc @@ -6,7 +6,6 @@ #include "envoy/upstream/upstream.h" #include "common/http/http2/codec_impl.h" -#include "common/http/http2/conn_pool_legacy.h" #include "common/runtime/runtime_features.h" namespace Envoy { @@ -17,15 +16,15 @@ ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSha Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, const Network::TransportSocketOptionsSharedPtr& transport_socket_options) - : ConnPoolImplBase(std::move(host), std::move(priority), dispatcher, options, - transport_socket_options) {} + : HttpConnPoolImplBase(std::move(host), std::move(priority), dispatcher, options, + transport_socket_options, Protocol::Http2) {} ConnPoolImpl::~ConnPoolImpl() { destructAllConnections(); } -ConnPoolImplBase::ActiveClientPtr ConnPoolImpl::instantiateActiveClient() { +Envoy::ConnectionPool::ActiveClientPtr ConnPoolImpl::instantiateActiveClient() { return std::make_unique(*this); } -void ConnPoolImpl::onGoAway(ActiveClient& client) { +void ConnPoolImpl::onGoAway(ActiveClient& client, Http::GoAwayErrorCode) { ENVOY_CONN_LOG(debug, "remote goaway", *client.codec_client_); host_->cluster().stats().upstream_cx_close_notify_.inc(); if (client.state_ != ActiveClient::State::DRAINING) { @@ -66,7 +65,7 @@ uint64_t ConnPoolImpl::maxRequestsPerConnection() { } ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) - : ConnPoolImplBase::ActiveClient( + : Envoy::Http::ActiveClient( parent, parent.maxRequestsPerConnection(), parent.host_->cluster().http2Options().max_concurrent_streams().value()) { codec_client_->setCodecClientCallbacks(*this); @@ -75,10 +74,6 @@ ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) parent.host_->cluster().stats().upstream_cx_http2_total_.inc(); } -bool ConnPoolImpl::ActiveClient::hasActiveRequests() const { - return codec_client_->numActiveRequests() > 0; -} - bool ConnPoolImpl::ActiveClient::closingWithIncompleteRequest() const { return closed_with_active_rq_; } @@ -98,14 +93,8 @@ allocateConnPool(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr hos Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, const Network::TransportSocketOptionsSharedPtr& transport_socket_options) { - if (Runtime::runtimeFeatureEnabled( - "envoy.reloadable_features.new_http2_connection_pool_behavior")) { - return std::make_unique(dispatcher, host, priority, options, - transport_socket_options); - } else { - return std::make_unique( - dispatcher, host, priority, options, transport_socket_options); - } + return std::make_unique(dispatcher, host, priority, options, + transport_socket_options); } } // namespace Http2 diff --git a/source/common/http/http2/conn_pool.h b/source/common/http/http2/conn_pool.h index 481f4eb24a978..9ae8a78834e80 100644 --- a/source/common/http/http2/conn_pool.h +++ b/source/common/http/http2/conn_pool.h @@ -16,7 +16,7 @@ namespace Http2 { * shifting to a new connection if we reach max streams on the primary. This is a base class * used for both the prod implementation as well as the testing one. */ -class ConnPoolImpl : public ConnPoolImplBase { +class ConnPoolImpl : public Envoy::Http::HttpConnPoolImplBase { public: ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, @@ -29,19 +29,19 @@ class ConnPoolImpl : public ConnPoolImplBase { Http::Protocol protocol() const override { return Http::Protocol::Http2; } // ConnPoolImplBase - ActiveClientPtr instantiateActiveClient() override; + Envoy::ConnectionPool::ActiveClientPtr instantiateActiveClient() override; protected: - struct ActiveClient : public CodecClientCallbacks, - public Http::ConnectionCallbacks, - public ConnPoolImplBase::ActiveClient { + class ActiveClient : public CodecClientCallbacks, + public Http::ConnectionCallbacks, + public Envoy::Http::ActiveClient { + public: ActiveClient(ConnPoolImpl& parent); ~ActiveClient() override = default; ConnPoolImpl& parent() { return static_cast(parent_); } // ConnPoolImpl::ActiveClient - bool hasActiveRequests() const override; bool closingWithIncompleteRequest() const override; RequestEncoder& newStreamEncoder(ResponseDecoder& response_decoder) override; @@ -52,14 +52,16 @@ class ConnPoolImpl : public ConnPoolImplBase { } // Http::ConnectionCallbacks - void onGoAway() override { parent().onGoAway(*this); } + void onGoAway(Http::GoAwayErrorCode error_code) override { + parent().onGoAway(*this, error_code); + } bool closed_with_active_rq_{}; }; uint64_t maxRequestsPerConnection(); void movePrimaryClientToDraining(); - void onGoAway(ActiveClient& client); + void onGoAway(ActiveClient& client, Http::GoAwayErrorCode error_code); void onStreamDestroy(ActiveClient& client); void onStreamReset(ActiveClient& client, Http::StreamResetReason reason); diff --git a/source/common/http/http2/conn_pool_legacy.cc b/source/common/http/http2/conn_pool_legacy.cc deleted file mode 100644 index d9834e1893baa..0000000000000 --- a/source/common/http/http2/conn_pool_legacy.cc +++ /dev/null @@ -1,309 +0,0 @@ -#include "common/http/http2/conn_pool_legacy.h" - -#include -#include - -#include "envoy/event/dispatcher.h" -#include "envoy/event/timer.h" -#include "envoy/upstream/upstream.h" - -#include "common/http/http2/codec_impl.h" -#include "common/network/utility.h" -#include "common/upstream/upstream_impl.h" - -namespace Envoy { -namespace Http { -namespace Legacy { -namespace Http2 { - -ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, - Upstream::ResourcePriority priority, - const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options) - : ConnPoolImplBase(std::move(host), std::move(priority)), dispatcher_(dispatcher), - socket_options_(options), transport_socket_options_(transport_socket_options) {} - -ConnPoolImpl::~ConnPoolImpl() { - if (primary_client_) { - primary_client_->client_->close(); - } - - if (draining_client_) { - draining_client_->client_->close(); - } - - // Make sure all clients are destroyed before we are destroyed. - dispatcher_.clearDeferredDeleteList(); -} - -void ConnPoolImpl::ConnPoolImpl::drainConnections() { - if (primary_client_ != nullptr) { - movePrimaryClientToDraining(); - } -} - -void ConnPoolImpl::addDrainedCallback(DrainedCb cb) { - drained_callbacks_.push_back(cb); - checkForDrained(); -} - -bool ConnPoolImpl::hasActiveConnections() const { - if (primary_client_ && primary_client_->client_->numActiveRequests() > 0) { - return true; - } - - if (draining_client_ && draining_client_->client_->numActiveRequests() > 0) { - return true; - } - - return !pending_requests_.empty(); -} - -void ConnPoolImpl::checkForDrained() { - if (drained_callbacks_.empty()) { - return; - } - - bool drained = true; - if (primary_client_) { - if (primary_client_->client_->numActiveRequests() == 0) { - primary_client_->client_->close(); - ASSERT(!primary_client_); - } else { - drained = false; - } - } - - ASSERT(!draining_client_ || (draining_client_->client_->numActiveRequests() > 0)); - if (draining_client_ && draining_client_->client_->numActiveRequests() > 0) { - drained = false; - } - - if (drained) { - ENVOY_LOG(debug, "invoking drained callbacks"); - for (const DrainedCb& cb : drained_callbacks_) { - cb(); - } - } -} - -void ConnPoolImpl::newClientStream(ResponseDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) { - if (!host_->cluster().resourceManager(priority_).requests().canCreate()) { - ENVOY_LOG(debug, "max requests overflow"); - callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), - nullptr); - host_->cluster().stats().upstream_rq_pending_overflow_.inc(); - } else { - ENVOY_CONN_LOG(debug, "creating stream", *primary_client_->client_); - primary_client_->total_streams_++; - host_->stats().rq_total_.inc(); - host_->stats().rq_active_.inc(); - host_->cluster().stats().upstream_rq_total_.inc(); - host_->cluster().stats().upstream_rq_active_.inc(); - host_->cluster().resourceManager(priority_).requests().inc(); - callbacks.onPoolReady(primary_client_->client_->newStream(response_decoder), - primary_client_->real_host_description_, - primary_client_->client_->streamInfo()); - } -} - -ConnectionPool::Cancellable* ConnPoolImpl::newStream(ResponseDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) { - ASSERT(drained_callbacks_.empty()); - - // First see if we need to handle max streams rollover. - uint64_t max_streams = host_->cluster().maxRequestsPerConnection(); - if (max_streams == 0) { - max_streams = maxTotalStreams(); - } - - if (primary_client_ && primary_client_->total_streams_ >= max_streams) { - movePrimaryClientToDraining(); - } - - if (!primary_client_) { - primary_client_ = std::make_unique(*this); - } - - // If the primary client is not connected yet, queue up the request. - if (!primary_client_->upstream_ready_) { - // If we're not allowed to enqueue more requests, fail fast. - if (!host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) { - ENVOY_LOG(debug, "max pending requests overflow"); - callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), - nullptr); - host_->cluster().stats().upstream_rq_pending_overflow_.inc(); - return nullptr; - } - - return newPendingRequest(response_decoder, callbacks); - } - - // We already have an active client that's connected to upstream, so attempt to establish a - // new stream. - newClientStream(response_decoder, callbacks); - return nullptr; -} - -void ConnPoolImpl::onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event) { - if (event == Network::ConnectionEvent::RemoteClose || - event == Network::ConnectionEvent::LocalClose) { - ENVOY_CONN_LOG(debug, "client disconnected", *client.client_); - - Envoy::Upstream::reportUpstreamCxDestroy(host_, event); - if (client.closed_with_active_rq_) { - Envoy::Upstream::reportUpstreamCxDestroyActiveRequest(host_, event); - } - - if (client.connectionState() == ConnPoolImplBase::ActiveClient::ConnectionState::Connecting) { - host_->cluster().stats().upstream_cx_connect_fail_.inc(); - host_->stats().cx_connect_fail_.inc(); - - // Raw connect failures should never happen under normal circumstances. If we have an upstream - // that is behaving badly, requests can get stuck here in the pending state. If we see a - // connect failure, we purge all pending requests so that calling code can determine what to - // do with the request. - // NOTE: We move the existing pending requests to a temporary list. This is done so that - // if retry logic submits a new request to the pool, we don't fail it inline. - purgePendingRequests(client.real_host_description_, client.client_->connectionFailureReason(), - event == Network::ConnectionEvent::RemoteClose); - } - - if (&client == primary_client_.get()) { - ENVOY_CONN_LOG(debug, "destroying primary client", *client.client_); - dispatcher_.deferredDelete(std::move(primary_client_)); - } else { - ENVOY_CONN_LOG(debug, "destroying draining client", *client.client_); - dispatcher_.deferredDelete(std::move(draining_client_)); - } - - if (client.closed_with_active_rq_) { - checkForDrained(); - } - } - - if (event == Network::ConnectionEvent::Connected) { - client.recordConnectionSetup(); - - client.upstream_ready_ = true; - onUpstreamReady(); - } - - client.disarmConnectTimeout(); -} - -void ConnPoolImpl::movePrimaryClientToDraining() { - ENVOY_CONN_LOG(debug, "moving primary to draining", *primary_client_->client_); - if (draining_client_) { - // This should pretty much never happen, but is possible if we start draining and then get - // a goaway for example. In this case just kill the current draining connection. It's not - // worth keeping a list. - draining_client_->client_->close(); - } - - ASSERT(!draining_client_); - if (primary_client_->client_->numActiveRequests() == 0) { - // If we are making a new connection and the primary does not have any active requests just - // close it now. - primary_client_->client_->close(); - } else { - draining_client_ = std::move(primary_client_); - } - - ASSERT(!primary_client_); -} - -void ConnPoolImpl::onConnectTimeout(ActiveClient& client) { - ENVOY_CONN_LOG(debug, "connect timeout", *client.client_); - host_->cluster().stats().upstream_cx_connect_timeout_.inc(); - client.client_->close(); -} - -void ConnPoolImpl::onGoAway(ActiveClient& client) { - ENVOY_CONN_LOG(debug, "remote goaway", *client.client_); - host_->cluster().stats().upstream_cx_close_notify_.inc(); - if (&client == primary_client_.get()) { - movePrimaryClientToDraining(); - } -} - -void ConnPoolImpl::onStreamDestroy(ActiveClient& client) { - ENVOY_CONN_LOG(debug, "destroying stream: {} remaining", *client.client_, - client.client_->numActiveRequests()); - host_->stats().rq_active_.dec(); - host_->cluster().stats().upstream_rq_active_.dec(); - host_->cluster().resourceManager(priority_).requests().dec(); - if (&client == draining_client_.get() && client.client_->numActiveRequests() == 0) { - // Close out the draining client if we no long have active requests. - client.client_->close(); - } - - // If we are destroying this stream because of a disconnect, do not check for drain here. We will - // wait until the connection has been fully drained of streams and then check in the connection - // event callback. - if (!client.closed_with_active_rq_) { - checkForDrained(); - } -} - -void ConnPoolImpl::onStreamReset(ActiveClient& client, Http::StreamResetReason reason) { - if (reason == StreamResetReason::ConnectionTermination || - reason == StreamResetReason::ConnectionFailure) { - host_->cluster().stats().upstream_rq_pending_failure_eject_.inc(); - client.closed_with_active_rq_ = true; - } else if (reason == StreamResetReason::LocalReset) { - host_->cluster().stats().upstream_rq_tx_reset_.inc(); - } else if (reason == StreamResetReason::RemoteReset) { - host_->cluster().stats().upstream_rq_rx_reset_.inc(); - } -} - -void ConnPoolImpl::onUpstreamReady() { - // Establishes new codec streams for each pending request. - while (!pending_requests_.empty()) { - newClientStream(pending_requests_.back()->decoder_, pending_requests_.back()->callbacks_); - pending_requests_.pop_back(); - } -} - -ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) - : ConnPoolImplBase::ActiveClient(parent.dispatcher_, parent.host_->cluster()), parent_(parent) { - Upstream::Host::CreateConnectionData data = parent_.host_->createConnection( - parent_.dispatcher_, parent_.socket_options_, parent_.transport_socket_options_); - real_host_description_ = data.host_description_; - client_ = parent_.createCodecClient(data); - client_->addConnectionCallbacks(*this); - client_->setCodecClientCallbacks(*this); - client_->setCodecConnectionCallbacks(*this); - - parent_.host_->stats().cx_total_.inc(); - parent_.host_->stats().cx_active_.inc(); - parent_.host_->cluster().stats().upstream_cx_total_.inc(); - parent_.host_->cluster().stats().upstream_cx_active_.inc(); - parent_.host_->cluster().stats().upstream_cx_http2_total_.inc(); - - client_->setConnectionStats({parent_.host_->cluster().stats().upstream_cx_rx_bytes_total_, - parent_.host_->cluster().stats().upstream_cx_rx_bytes_buffered_, - parent_.host_->cluster().stats().upstream_cx_tx_bytes_total_, - parent_.host_->cluster().stats().upstream_cx_tx_bytes_buffered_, - &parent_.host_->cluster().stats().bind_errors_, nullptr}); -} - -ConnPoolImpl::ActiveClient::~ActiveClient() { - parent_.host_->stats().cx_active_.dec(); - parent_.host_->cluster().stats().upstream_cx_active_.dec(); -} - -CodecClientPtr ProdConnPoolImpl::createCodecClient(Upstream::Host::CreateConnectionData& data) { - CodecClientPtr codec{new CodecClientProd(CodecClient::Type::HTTP2, std::move(data.connection_), - data.host_description_, dispatcher_)}; - return codec; -} - -uint32_t ProdConnPoolImpl::maxTotalStreams() { return MAX_STREAMS; } - -} // namespace Http2 -} // namespace Legacy -} // namespace Http -} // namespace Envoy diff --git a/source/common/http/http2/conn_pool_legacy.h b/source/common/http/http2/conn_pool_legacy.h deleted file mode 100644 index 0ffb2e520a8d1..0000000000000 --- a/source/common/http/http2/conn_pool_legacy.h +++ /dev/null @@ -1,121 +0,0 @@ -#pragma once - -#include -#include -#include - -#include "envoy/event/timer.h" -#include "envoy/http/conn_pool.h" -#include "envoy/network/connection.h" -#include "envoy/stats/timespan.h" -#include "envoy/upstream/upstream.h" - -#include "common/http/codec_client.h" -#include "common/http/conn_pool_base_legacy.h" - -namespace Envoy { -namespace Http { -namespace Legacy { -namespace Http2 { - -/** - * Implementation of a "connection pool" for HTTP/2. This mainly handles stats as well as - * shifting to a new connection if we reach max streams on the primary. This is a base class - * used for both the prod implementation as well as the testing one. - */ -class ConnPoolImpl : public ConnectionPool::Instance, public Legacy::ConnPoolImplBase { -public: - ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, - Upstream::ResourcePriority priority, - const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options); - ~ConnPoolImpl() override; - - // Http::ConnectionPool::Instance - Http::Protocol protocol() const override { return Http::Protocol::Http2; } - void addDrainedCallback(DrainedCb cb) override; - void drainConnections() override; - bool hasActiveConnections() const override; - ConnectionPool::Cancellable* newStream(ResponseDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) override; - Upstream::HostDescriptionConstSharedPtr host() const override { return host_; }; - -protected: - struct ActiveClient : ConnPoolImplBase::ActiveClient, - public Network::ConnectionCallbacks, - public CodecClientCallbacks, - public Event::DeferredDeletable, - public Http::ConnectionCallbacks { - ActiveClient(ConnPoolImpl& parent); - ~ActiveClient() override; - - void onConnectTimeout() override { parent_.onConnectTimeout(*this); } - - // Network::ConnectionCallbacks - void onEvent(Network::ConnectionEvent event) override { - parent_.onConnectionEvent(*this, event); - } - void onAboveWriteBufferHighWatermark() override {} - void onBelowWriteBufferLowWatermark() override {} - - // CodecClientCallbacks - void onStreamDestroy() override { parent_.onStreamDestroy(*this); } - void onStreamReset(Http::StreamResetReason reason) override { - parent_.onStreamReset(*this, reason); - } - - // Http::ConnectionCallbacks - void onGoAway() override { parent_.onGoAway(*this); } - - ConnPoolImpl& parent_; - CodecClientPtr client_; - Upstream::HostDescriptionConstSharedPtr real_host_description_; - uint64_t total_streams_{}; - bool upstream_ready_{}; - bool closed_with_active_rq_{}; - }; - - using ActiveClientPtr = std::unique_ptr; - - // Http::ConnPoolImplBase - void checkForDrained() override; - - virtual CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) PURE; - virtual uint32_t maxTotalStreams() PURE; - void movePrimaryClientToDraining(); - void onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event); - void onConnectTimeout(ActiveClient& client); - void onGoAway(ActiveClient& client); - void onStreamDestroy(ActiveClient& client); - void onStreamReset(ActiveClient& client, Http::StreamResetReason reason); - void newClientStream(ResponseDecoder& response_decoder, ConnectionPool::Callbacks& callbacks); - void onUpstreamReady(); - - Event::Dispatcher& dispatcher_; - ActiveClientPtr primary_client_; - ActiveClientPtr draining_client_; - std::list drained_callbacks_; - const Network::ConnectionSocket::OptionsSharedPtr socket_options_; - const Network::TransportSocketOptionsSharedPtr transport_socket_options_; -}; - -/** - * Production implementation of the HTTP/2 connection pool. - */ -class ProdConnPoolImpl : public ConnPoolImpl { -public: - using ConnPoolImpl::ConnPoolImpl; - -private: - CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) override; - uint32_t maxTotalStreams() override; - - // All streams are 2^31. Client streams are half that, minus stream 0. Just to be on the safe - // side we do 2^29. - static const uint64_t MAX_STREAMS = (1 << 29); -}; - -} // namespace Http2 -} // namespace Legacy -} // namespace Http -} // namespace Envoy diff --git a/source/common/http/http2/nghttp2.cc b/source/common/http/http2/nghttp2.cc index 448ea9bb2404b..68e9c620e9c86 100644 --- a/source/common/http/http2/nghttp2.cc +++ b/source/common/http/http2/nghttp2.cc @@ -14,15 +14,19 @@ namespace Http { namespace Http2 { void initializeNghttp2Logging() { + // Event when ENVOY_NGHTTP2_TRACE is not set, we install a debug logger, to prevent nghttp2 + // logging directly to stdout at -l trace. nghttp2_set_debug_vprintf_callback([](const char* format, va_list args) { - char buf[2048]; - const int n = ::vsnprintf(buf, sizeof(buf), format, args); - // nghttp2 inserts new lines, but we also insert a new line in the ENVOY_LOG - // below, so avoid double \n. - if (n >= 1 && static_cast(n) < sizeof(buf) && buf[n - 1] == '\n') { - buf[n - 1] = '\0'; + if (std::getenv("ENVOY_NGHTTP2_TRACE") != nullptr) { + char buf[2048]; + const int n = ::vsnprintf(buf, sizeof(buf), format, args); + // nghttp2 inserts new lines, but we also insert a new line in the ENVOY_LOG + // below, so avoid double \n. + if (n >= 1 && static_cast(n) < sizeof(buf) && buf[n - 1] == '\n') { + buf[n - 1] = '\0'; + } + ENVOY_LOG_TO_LOGGER(Logger::Registry::getLog(Logger::Id::http2), trace, "nghttp2: {}", buf); } - ENVOY_LOG_TO_LOGGER(Logger::Registry::getLog(Logger::Id::http2), trace, "nghttp2: {}", buf); }); } diff --git a/source/common/http/http3/BUILD b/source/common/http/http3/BUILD index cadca40d0d259..43ba5729097ce 100644 --- a/source/common/http/http3/BUILD +++ b/source/common/http/http3/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/http/message_impl.h b/source/common/http/message_impl.h index a42a0a2afd5d6..698c51824a069 100644 --- a/source/common/http/message_impl.h +++ b/source/common/http/message_impl.h @@ -19,7 +19,7 @@ template class MessageImpl : public Message { public: - MessageImpl() : headers_(std::make_unique()) {} + MessageImpl() : headers_(HeadersImplType::create()) {} MessageImpl(std::unique_ptr&& headers) : headers_(std::move(headers)) {} // Http::Message diff --git a/source/common/http/path_utility.cc b/source/common/http/path_utility.cc index cb930929e4a0f..5194a395e79b5 100644 --- a/source/common/http/path_utility.cc +++ b/source/common/http/path_utility.cc @@ -1,12 +1,12 @@ #include "common/http/path_utility.h" -#include "common/chromium_url/url_canon.h" -#include "common/chromium_url/url_canon_stdstring.h" #include "common/common/logger.h" #include "absl/strings/str_join.h" #include "absl/strings/str_split.h" #include "absl/types/optional.h" +#include "url/url_canon.h" +#include "url/url_canon_stdstring.h" namespace Envoy { namespace Http { @@ -14,11 +14,10 @@ namespace Http { namespace { absl::optional canonicalizePath(absl::string_view original_path) { std::string canonical_path; - chromium_url::Component in_component(0, original_path.size()); - chromium_url::Component out_component; - chromium_url::StdStringCanonOutput output(&canonical_path); - if (!chromium_url::CanonicalizePath(original_path.data(), in_component, &output, - &out_component)) { + url::Component in_component(0, original_path.size()); + url::Component out_component; + url::StdStringCanonOutput output(&canonical_path); + if (!CanonicalizePath(original_path.data(), in_component, &output, &out_component)) { return absl::nullopt; } else { output.Complete(); @@ -29,7 +28,8 @@ absl::optional canonicalizePath(absl::string_view original_path) { /* static */ bool PathUtil::canonicalPath(RequestHeaderMap& headers) { - const auto original_path = headers.Path()->value().getStringView(); + ASSERT(headers.Path()); + const auto original_path = headers.getPathValue(); // canonicalPath is supposed to apply on path component in URL instead of :path header const auto query_pos = original_path.find('?'); auto normalized_path_opt = canonicalizePath( @@ -54,7 +54,8 @@ bool PathUtil::canonicalPath(RequestHeaderMap& headers) { } void PathUtil::mergeSlashes(RequestHeaderMap& headers) { - const auto original_path = headers.Path()->value().getStringView(); + ASSERT(headers.Path()); + const auto original_path = headers.getPathValue(); // Only operate on path component in URL. const absl::string_view::size_type query_start = original_path.find('?'); const absl::string_view path = original_path.substr(0, query_start); @@ -62,10 +63,11 @@ void PathUtil::mergeSlashes(RequestHeaderMap& headers) { if (path.find("//") == absl::string_view::npos) { return; } - const absl::string_view prefix = absl::StartsWith(path, "/") ? "/" : absl::string_view(); - const absl::string_view suffix = absl::EndsWith(path, "/") ? "/" : absl::string_view(); - headers.setPath(absl::StrCat( - prefix, absl::StrJoin(absl::StrSplit(path, '/', absl::SkipEmpty()), "/"), query, suffix)); + const absl::string_view path_prefix = absl::StartsWith(path, "/") ? "/" : absl::string_view(); + const absl::string_view path_suffix = absl::EndsWith(path, "/") ? "/" : absl::string_view(); + headers.setPath(absl::StrCat(path_prefix, + absl::StrJoin(absl::StrSplit(path, '/', absl::SkipEmpty()), "/"), + path_suffix, query)); } absl::string_view PathUtil::removeQueryAndFragment(const absl::string_view path) { diff --git a/source/common/http/path_utility.h b/source/common/http/path_utility.h index 8df1581bad6f8..62be43e2e03fd 100644 --- a/source/common/http/path_utility.h +++ b/source/common/http/path_utility.h @@ -14,8 +14,10 @@ class PathUtil { public: // Returns if the normalization succeeds. // If it is successful, the path header in header path will be updated with the normalized path. + // Requires the Path header be present. static bool canonicalPath(RequestHeaderMap& headers); // Merges two or more adjacent slashes in path part of URI into one. + // Requires the Path header be present. static void mergeSlashes(RequestHeaderMap& headers); // Removes the query and/or fragment string (if present) from the input path. // For example, this function returns "/data" for the input path "/data#fragment?param=value". diff --git a/source/common/http/request_id_extension_impl.cc b/source/common/http/request_id_extension_impl.cc index ed4022712fc09..f2917959bcd1e 100644 --- a/source/common/http/request_id_extension_impl.cc +++ b/source/common/http/request_id_extension_impl.cc @@ -40,7 +40,7 @@ RequestIDExtensionSharedPtr RequestIDExtensionFactory::fromProto( } RequestIDExtensionSharedPtr -RequestIDExtensionFactory::defaultInstance(Envoy::Runtime::RandomGenerator& random) { +RequestIDExtensionFactory::defaultInstance(Envoy::Random::RandomGenerator& random) { return std::make_shared(random); } diff --git a/source/common/http/request_id_extension_impl.h b/source/common/http/request_id_extension_impl.h index b77ad9b8142b6..822dff8bc242b 100644 --- a/source/common/http/request_id_extension_impl.h +++ b/source/common/http/request_id_extension_impl.h @@ -1,5 +1,6 @@ #pragma once +#include "envoy/common/random_generator.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/http/request_id_extension.h" #include "envoy/server/request_id_extension_config.h" @@ -16,7 +17,7 @@ class RequestIDExtensionFactory { /** * Return a newly created instance of the default RequestIDExtension implementation. */ - static RequestIDExtensionSharedPtr defaultInstance(Envoy::Runtime::RandomGenerator& random); + static RequestIDExtensionSharedPtr defaultInstance(Envoy::Random::RandomGenerator& random); /** * Return a globally shared instance of the noop RequestIDExtension implementation. diff --git a/source/common/http/request_id_extension_uuid_impl.cc b/source/common/http/request_id_extension_uuid_impl.cc index dc95b46f81c72..7da58d05ac1dc 100644 --- a/source/common/http/request_id_extension_uuid_impl.cc +++ b/source/common/http/request_id_extension_uuid_impl.cc @@ -5,8 +5,8 @@ #include "envoy/http/header_map.h" +#include "common/common/random_generator.h" #include "common/common/utility.h" -#include "common/runtime/runtime_impl.h" #include "absl/strings/string_view.h" @@ -27,7 +27,7 @@ void UUIDRequestIDExtension::set(RequestHeaderMap& request_headers, bool force) void UUIDRequestIDExtension::setInResponse(ResponseHeaderMap& response_headers, const RequestHeaderMap& request_headers) { if (request_headers.RequestId()) { - response_headers.setRequestId(request_headers.RequestId()->value().getStringView()); + response_headers.setRequestId(request_headers.getRequestIdValue()); } } @@ -36,7 +36,7 @@ bool UUIDRequestIDExtension::modBy(const RequestHeaderMap& request_headers, uint if (request_headers.RequestId() == nullptr) { return false; } - const std::string uuid(request_headers.RequestId()->value().getStringView()); + const std::string uuid(request_headers.getRequestIdValue()); if (uuid.length() < 8) { return false; } @@ -54,8 +54,8 @@ TraceStatus UUIDRequestIDExtension::getTraceStatus(const RequestHeaderMap& reque if (request_headers.RequestId() == nullptr) { return TraceStatus::NoTrace; } - absl::string_view uuid = request_headers.RequestId()->value().getStringView(); - if (uuid.length() != Runtime::RandomGeneratorImpl::UUID_LENGTH) { + absl::string_view uuid = request_headers.getRequestIdValue(); + if (uuid.length() != Random::RandomGeneratorImpl::UUID_LENGTH) { return TraceStatus::NoTrace; } @@ -75,8 +75,8 @@ void UUIDRequestIDExtension::setTraceStatus(RequestHeaderMap& request_headers, T if (request_headers.RequestId() == nullptr) { return; } - absl::string_view uuid_view = request_headers.RequestId()->value().getStringView(); - if (uuid_view.length() != Runtime::RandomGeneratorImpl::UUID_LENGTH) { + absl::string_view uuid_view = request_headers.getRequestIdValue(); + if (uuid_view.length() != Random::RandomGeneratorImpl::UUID_LENGTH) { return; } std::string uuid(uuid_view); diff --git a/source/common/http/request_id_extension_uuid_impl.h b/source/common/http/request_id_extension_uuid_impl.h index c3a660e3a2bcb..ca5868dc6610f 100644 --- a/source/common/http/request_id_extension_uuid_impl.h +++ b/source/common/http/request_id_extension_uuid_impl.h @@ -1,5 +1,6 @@ #pragma once +#include "envoy/common/random_generator.h" #include "envoy/http/request_id_extension.h" #include "common/runtime/runtime_impl.h" @@ -11,7 +12,7 @@ namespace Http { // configured. class UUIDRequestIDExtension : public RequestIDExtension { public: - explicit UUIDRequestIDExtension(Envoy::Runtime::RandomGenerator& random) : random_(random) {} + explicit UUIDRequestIDExtension(Envoy::Random::RandomGenerator& random) : random_(random) {} void set(RequestHeaderMap& request_headers, bool force) override; void setInResponse(ResponseHeaderMap& response_headers, @@ -22,7 +23,7 @@ class UUIDRequestIDExtension : public RequestIDExtension { private: // Reference to the random generator used to generate new request IDs - Envoy::Runtime::RandomGenerator& random_; + Envoy::Random::RandomGenerator& random_; // Byte on this position has predefined value of 4 for UUID4. static const int TRACE_BYTE_POSITION = 14; diff --git a/source/common/http/rest_api_fetcher.cc b/source/common/http/rest_api_fetcher.cc index 612fff3708a33..ef4cca9bcd7b6 100644 --- a/source/common/http/rest_api_fetcher.cc +++ b/source/common/http/rest_api_fetcher.cc @@ -13,7 +13,7 @@ namespace Envoy { namespace Http { RestApiFetcher::RestApiFetcher(Upstream::ClusterManager& cm, const std::string& remote_cluster_name, - Event::Dispatcher& dispatcher, Runtime::RandomGenerator& random, + Event::Dispatcher& dispatcher, Random::RandomGenerator& random, std::chrono::milliseconds refresh_interval, std::chrono::milliseconds request_timeout) : remote_cluster_name_(remote_cluster_name), cm_(cm), random_(random), diff --git a/source/common/http/rest_api_fetcher.h b/source/common/http/rest_api_fetcher.h index f7dfa76dcde33..f4b19ab17a65d 100644 --- a/source/common/http/rest_api_fetcher.h +++ b/source/common/http/rest_api_fetcher.h @@ -3,6 +3,7 @@ #include #include +#include "envoy/common/random_generator.h" #include "envoy/config/subscription.h" #include "envoy/event/dispatcher.h" #include "envoy/runtime/runtime.h" @@ -18,7 +19,7 @@ namespace Http { class RestApiFetcher : public Http::AsyncClient::Callbacks { protected: RestApiFetcher(Upstream::ClusterManager& cm, const std::string& remote_cluster_name, - Event::Dispatcher& dispatcher, Runtime::RandomGenerator& random, + Event::Dispatcher& dispatcher, Random::RandomGenerator& random, std::chrono::milliseconds refresh_interval, std::chrono::milliseconds request_timeout); ~RestApiFetcher() override; @@ -65,8 +66,10 @@ class RestApiFetcher : public Http::AsyncClient::Callbacks { void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&& response) override; void onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason reason) override; + void onBeforeFinalizeUpstreamSpan(Envoy::Tracing::Span&, + const Http::ResponseHeaderMap*) override {} - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; const std::chrono::milliseconds refresh_interval_; const std::chrono::milliseconds request_timeout_; Event::TimerPtr refresh_timer_; diff --git a/source/common/http/status.cc b/source/common/http/status.cc index 166b154a3d2be..78ef5c562f2d8 100644 --- a/source/common/http/status.cc +++ b/source/common/http/status.cc @@ -1,5 +1,7 @@ #include "common/http/status.h" +#include "common/common/assert.h" + #include "absl/strings/str_cat.h" namespace Envoy { @@ -12,7 +14,7 @@ constexpr absl::string_view EnvoyPayloadUrl = "Envoy"; absl::string_view statusCodeToString(StatusCode code) { switch (code) { case StatusCode::Ok: - return absl::OkStatus().ToString(); + return "OK"; case StatusCode::CodecProtocolError: return "CodecProtocolError"; case StatusCode::BufferFloodError: @@ -37,17 +39,27 @@ struct PrematureResponsePayload : public EnvoyStatusPayload { }; template void storePayload(absl::Status& status, const T& payload) { - status.SetPayload( - EnvoyPayloadUrl, - absl::Cord(absl::string_view(reinterpret_cast(&payload), sizeof(payload)))); + absl::Cord cord(absl::string_view(reinterpret_cast(&payload), sizeof(payload))); + cord.Flatten(); // Flatten ahead of time for easier access later. + status.SetPayload(EnvoyPayloadUrl, std::move(cord)); } -template const T* getPayload(const absl::Status& status) { - auto payload = status.GetPayload(EnvoyPayloadUrl); - ASSERT(payload.has_value(), "Must have payload"); - auto data = payload.value().Flatten(); - ASSERT(data.length() >= sizeof(T), "Invalid payload length"); - return reinterpret_cast(data.data()); +template const T& getPayload(const absl::Status& status) { + // The only way to get a reference to the payload owned by the absl::Status is through the + // ForEachPayload method. All other methods create a copy of the payload, which is not convenient + // for peeking at the payload value. + const T* payload = nullptr; + status.ForEachPayload([&payload](absl::string_view url, const absl::Cord& cord) { + if (url == EnvoyPayloadUrl) { + ASSERT(!payload); // Status API guarantees to have one payload with given URL + auto data = cord.TryFlat(); + ASSERT(data.has_value()); // EnvoyPayloadUrl cords are flattened ahead of time + ASSERT(data.value().length() >= sizeof(T), "Invalid payload length"); + payload = reinterpret_cast(data.value().data()); + } + }); + ASSERT(payload); + return *payload; } } // namespace @@ -94,7 +106,7 @@ Status codecClientError(absl::string_view message) { // Methods for checking and extracting error information StatusCode getStatusCode(const Status& status) { - return status.ok() ? StatusCode::Ok : getPayload(status)->status_code_; + return status.ok() ? StatusCode::Ok : getPayload(status).status_code_; } bool isCodecProtocolError(const Status& status) { @@ -110,10 +122,10 @@ bool isPrematureResponseError(const Status& status) { } Http::Code getPrematureResponseHttpCode(const Status& status) { - const auto* payload = getPayload(status); - ASSERT(payload->status_code_ == StatusCode::PrematureResponseError, + const auto& payload = getPayload(status); + ASSERT(payload.status_code_ == StatusCode::PrematureResponseError, "Must be PrematureResponseError"); - return payload->http_code_; + return payload.http_code_; } bool isCodecClientError(const Status& status) { diff --git a/source/common/http/status.h b/source/common/http/status.h index bc2e1370df04f..2b47a5d0a48bb 100644 --- a/source/common/http/status.h +++ b/source/common/http/status.h @@ -109,5 +109,39 @@ ABSL_MUST_USE_RESULT bool isCodecClientError(const Status& status); */ Http::Code getPrematureResponseHttpCode(const Status& status); +/** + * Macro that checks return value of expression that results in Status and returns from + * the current function is status is not OK. + * + * Example usage: + * Status foo() { + * RETURN_IF_ERROR(bar()); + * return okStatus(); + * } + */ + +#define RETURN_IF_ERROR(expr) \ + do { \ + if (::Envoy::Http::Details::StatusAdapter adapter{(expr)}) { \ + } else { \ + return std::move(adapter.status_); \ + } \ + } while (false) + +namespace Details { +// Helper class to convert `Status` to `bool` so it can be used inside `if` statements. +struct StatusAdapter { + StatusAdapter(const Status& status) : status_(status) {} + StatusAdapter(Status&& status) : status_(std::move(status)) {} + + StatusAdapter(const StatusAdapter&) = delete; + StatusAdapter& operator=(const StatusAdapter&) = delete; + + explicit operator bool() const { return status_.ok(); } + + Status status_; +}; +} // namespace Details + } // namespace Http } // namespace Envoy diff --git a/source/common/http/url_utility.cc b/source/common/http/url_utility.cc new file mode 100644 index 0000000000000..d2fd43015280d --- /dev/null +++ b/source/common/http/url_utility.cc @@ -0,0 +1,95 @@ +#include "common/http/url_utility.h" + +#include + +#include +#include + +#include "common/common/assert.h" +#include "common/common/empty_string.h" +#include "common/common/utility.h" + +#include "absl/strings/numbers.h" +#include "absl/strings/str_cat.h" + +namespace Envoy { +namespace Http { +namespace Utility { + +bool Url::initialize(absl::string_view absolute_url, bool is_connect) { + // TODO(dio): When we have access to base::StringPiece, probably we can convert absolute_url to + // that instead. + GURL parsed(std::string{absolute_url}); + if (is_connect) { + return initializeForConnect(std::move(parsed)); + } + + // TODO(dio): Check if we need to accommodate to strictly validate only http(s) AND ws(s) schemes. + // Currently, we only accept http(s). + if (!parsed.is_valid() || !parsed.SchemeIsHTTPOrHTTPS()) { + return false; + } + + scheme_ = parsed.scheme(); + + // Only non-default ports will be rendered as part of host_and_port_. For example, + // http://www.host.com:80 has port component (i.e. 80). However, since 80 is a default port for + // http scheme, host_and_port_ will be rendered as www.host.com (without port). The same case with + // https scheme (with port 443) as well. + host_and_port_ = + absl::StrCat(parsed.host(), parsed.has_port() ? ":" : EMPTY_STRING, parsed.port()); + + const int port = parsed.EffectiveIntPort(); + if (port <= 0 || port > std::numeric_limits::max()) { + return false; + } + port_ = static_cast(port); + + // RFC allows the absolute URI to not end in "/", but the absolute path form must start with "/". + path_and_query_params_ = parsed.PathForRequest(); + if (parsed.has_ref()) { + absl::StrAppend(&path_and_query_params_, "#", parsed.ref()); + } + + return true; +} + +bool Url::initializeForConnect(GURL&& url) { + // CONNECT requests can only contain "hostname:port" + // https://github.com/nodejs/http-parser/blob/d9275da4650fd1133ddc96480df32a9efe4b059b/http_parser.c#L2503-L2506. + if (!url.is_valid() || url.IsStandard()) { + return false; + } + + const auto& parsed = url.parsed_for_possibly_invalid_spec(); + // The parsed.scheme contains the URL's hostname (stored by GURL). While host and port have -1 + // as its length. + if (parsed.scheme.len <= 0 || parsed.host.len > 0 || parsed.port.len > 0) { + return false; + } + + host_and_port_ = url.possibly_invalid_spec(); + const auto& parts = StringUtil::splitToken(host_and_port_, ":", /*keep_empty_string=*/true, + /*trim_whitespace=*/false); + if (parts.size() != 2 || static_cast(parsed.scheme.len) != parts.at(0).size() || + !validPortForConnect(parts.at(1))) { + return false; + } + + return true; +} + +bool Url::validPortForConnect(absl::string_view port_string) { + int port; + const bool valid = absl::SimpleAtoi(port_string, &port); + // Only a port value in valid range (1-65535) is allowed. + if (!valid || port <= 0 || port > std::numeric_limits::max()) { + return false; + } + port_ = static_cast(port); + return true; +} + +} // namespace Utility +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/url_utility.h b/source/common/http/url_utility.h new file mode 100644 index 0000000000000..fa140c6d5f12a --- /dev/null +++ b/source/common/http/url_utility.h @@ -0,0 +1,58 @@ +#pragma once + +#include + +#include "absl/strings/string_view.h" +#include "url/gurl.h" + +namespace Envoy { +namespace Http { +namespace Utility { + +/** + * Given a fully qualified URL, splits the string_view provided into scheme, host and path with + * query parameters components. + */ +class Url { +public: + /** + * Initializes a URL object from a URL string. + * @param absolute_url URL string to be parsed. + * @param is_connect whether to parse the absolute_url as CONNECT request URL or not. + * @return bool if the initialization is successful. + */ + bool initialize(absl::string_view absolute_url, bool is_connect); + + /** + * @return absl::string_view the scheme of a URL. + */ + absl::string_view scheme() const { return scheme_; } + + /** + * @return absl::string_view the host and port part of a URL. + */ + absl::string_view hostAndPort() const { return host_and_port_; } + + /** + * @return absl::string_view the path and query params part of a URL. + */ + absl::string_view pathAndQueryParams() const { return path_and_query_params_; } + + /** + * @return uint64_t the effective port of a URL. + */ + uint64_t port() const { return port_; } + +private: + bool initializeForConnect(GURL&& url); + bool validPortForConnect(absl::string_view port_string); + + std::string scheme_; + std::string host_and_port_; + std::string path_and_query_params_; + uint16_t port_{0}; +}; + +} // namespace Utility +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/user_agent.cc b/source/common/http/user_agent.cc index ca2774376751f..65c243aeae24c 100644 --- a/source/common/http/user_agent.cc +++ b/source/common/http/user_agent.cc @@ -10,6 +10,7 @@ #include "common/http/headers.h" #include "common/stats/symbol_table_impl.h" +#include "common/stats/utility.h" namespace Envoy { namespace Http { @@ -30,17 +31,14 @@ void UserAgent::completeConnectionLength(Stats::Timespan& span) { UserAgentStats::UserAgentStats(Stats::StatName prefix, Stats::StatName device, Stats::Scope& scope, const UserAgentContext& context) - : downstream_cx_total_(scope.counterFromStatName(Stats::StatName( - context.symbol_table_.join({prefix, device, context.downstream_cx_total_}).get()))), - downstream_cx_destroy_remote_active_rq_(scope.counterFromStatName(Stats::StatName( - context.symbol_table_ - .join({prefix, device, context.downstream_cx_destroy_remote_active_rq_}) - .get()))), - downstream_rq_total_(scope.counterFromStatName(Stats::StatName( - context.symbol_table_.join({prefix, device, context.downstream_rq_total_}).get()))), - downstream_cx_length_ms_(scope.histogramFromStatName( - Stats::StatName( - context.symbol_table_.join({prefix, device, context.downstream_cx_length_ms_}).get()), + : downstream_cx_total_(Stats::Utility::counterFromElements( + scope, {prefix, device, context.downstream_cx_total_})), + downstream_cx_destroy_remote_active_rq_(Stats::Utility::counterFromElements( + scope, {prefix, device, context.downstream_cx_destroy_remote_active_rq_})), + downstream_rq_total_(Stats::Utility::counterFromElements( + scope, {prefix, device, context.downstream_rq_total_})), + downstream_cx_length_ms_(Stats::Utility::histogramFromElements( + scope, {prefix, device, context.downstream_cx_length_ms_}, Stats::Histogram::Unit::Milliseconds)) { downstream_cx_total_.inc(); downstream_rq_total_.inc(); @@ -52,11 +50,11 @@ void UserAgent::initializeFromHeaders(const RequestHeaderMap& headers, Stats::St if (stats_ == nullptr && !initialized_) { initialized_ = true; - const HeaderEntry* user_agent = headers.UserAgent(); - if (user_agent != nullptr) { - if (user_agent->value().getStringView().find("iOS") != absl::string_view::npos) { + const absl::string_view user_agent = headers.getUserAgentValue(); + if (!user_agent.empty()) { + if (user_agent.find("iOS") != absl::string_view::npos) { stats_ = std::make_unique(prefix, context_.ios_, scope, context_); - } else if (user_agent->value().getStringView().find("android") != absl::string_view::npos) { + } else if (user_agent.find("android") != absl::string_view::npos) { stats_ = std::make_unique(prefix, context_.android_, scope, context_); } } diff --git a/source/common/http/utility.cc b/source/common/http/utility.cc index 544b1295b97b6..28e8872851cd6 100644 --- a/source/common/http/utility.cc +++ b/source/common/http/utility.cc @@ -1,7 +1,5 @@ #include "common/http/utility.h" -#include - #include #include #include @@ -23,7 +21,9 @@ #include "common/http/message_impl.h" #include "common/network/utility.h" #include "common/protobuf/utility.h" +#include "common/runtime/runtime_features.h" +#include "absl/container/node_hash_set.h" #include "absl/strings/match.h" #include "absl/strings/numbers.h" #include "absl/strings/str_cat.h" @@ -32,6 +32,28 @@ #include "nghttp2/nghttp2.h" namespace Envoy { +namespace Http { +namespace Utility { +Http::Status exceptionToStatus(std::function dispatch, + Buffer::Instance& data) { + Http::Status status; + try { + status = dispatch(data); + // TODO(#10878): Remove this when exception removal is complete. It is currently in migration, + // so dispatch may either return an error status or throw an exception. Soon we won't need to + // catch these exceptions, as all codec errors will be migrated to using error statuses that are + // returned from dispatch. + } catch (FrameFloodException& e) { + status = bufferFloodError(e.what()); + } catch (CodecProtocolException& e) { + status = codecProtocolError(e.what()); + } catch (PrematureResponseException& e) { + status = prematureResponseError(e.what(), e.responseCode()); + } + return status; +} +} // namespace Utility +} // namespace Http namespace Http2 { namespace Utility { @@ -40,7 +62,7 @@ namespace { void validateCustomSettingsParameters( const envoy::config::core::v3::Http2ProtocolOptions& options) { std::vector parameter_collisions, custom_parameter_collisions; - std::unordered_set + absl::node_hash_set custom_parameters; // User defined and named parameters with the same SETTINGS identifier can not both be set. for (const auto& it : options.custom_settings_parameters()) { @@ -121,12 +143,31 @@ const uint32_t OptionsLimits::DEFAULT_MAX_CONSECUTIVE_INBOUND_FRAMES_WITH_EMPTY_ const uint32_t OptionsLimits::DEFAULT_MAX_INBOUND_PRIORITY_FRAMES_PER_STREAM; const uint32_t OptionsLimits::DEFAULT_MAX_INBOUND_WINDOW_UPDATE_FRAMES_PER_DATA_FRAME_SENT; +envoy::config::core::v3::Http2ProtocolOptions +initializeAndValidateOptions(const envoy::config::core::v3::Http2ProtocolOptions& options, + bool hcm_stream_error_set, + const Protobuf::BoolValue& hcm_stream_error) { + auto ret = initializeAndValidateOptions(options); + if (Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.hcm_stream_error_on_invalid_message") && + !options.has_override_stream_error_on_invalid_http_message() && hcm_stream_error_set) { + ret.mutable_override_stream_error_on_invalid_http_message()->set_value( + hcm_stream_error.value()); + } + return ret; +} + envoy::config::core::v3::Http2ProtocolOptions initializeAndValidateOptions(const envoy::config::core::v3::Http2ProtocolOptions& options) { envoy::config::core::v3::Http2ProtocolOptions options_clone(options); // This will throw an exception when a custom parameter and a named parameter collide. validateCustomSettingsParameters(options); + if (!options.has_override_stream_error_on_invalid_http_message()) { + options_clone.mutable_override_stream_error_on_invalid_http_message()->set_value( + options.stream_error_on_invalid_http_messaging()); + } + if (!options_clone.has_hpack_table_size()) { options_clone.mutable_hpack_table_size()->set_value(OptionsLimits::DEFAULT_HPACK_TABLE_SIZE); } @@ -183,43 +224,6 @@ initializeAndValidateOptions(const envoy::config::core::v3::Http2ProtocolOptions namespace Http { -static const char kDefaultPath[] = "/"; - -bool Utility::Url::initialize(absl::string_view absolute_url, bool is_connect) { - struct http_parser_url u; - http_parser_url_init(&u); - const int result = - http_parser_parse_url(absolute_url.data(), absolute_url.length(), is_connect, &u); - - if (result != 0) { - return false; - } - if ((u.field_set & (1 << UF_HOST)) != (1 << UF_HOST) && - (u.field_set & (1 << UF_SCHEMA)) != (1 << UF_SCHEMA)) { - return false; - } - scheme_ = absl::string_view(absolute_url.data() + u.field_data[UF_SCHEMA].off, - u.field_data[UF_SCHEMA].len); - - uint16_t authority_len = u.field_data[UF_HOST].len; - if ((u.field_set & (1 << UF_PORT)) == (1 << UF_PORT)) { - authority_len = authority_len + u.field_data[UF_PORT].len + 1; - } - host_and_port_ = - absl::string_view(absolute_url.data() + u.field_data[UF_HOST].off, authority_len); - - // RFC allows the absolute-uri to not end in /, but the absolute path form - // must start with - uint64_t path_len = absolute_url.length() - (u.field_data[UF_HOST].off + hostAndPort().length()); - if (path_len > 0) { - uint64_t path_beginning = u.field_data[UF_HOST].off + hostAndPort().length(); - path_and_query_params_ = absl::string_view(absolute_url.data() + path_beginning, path_len); - } else if (!is_connect) { - path_and_query_params_ = absl::string_view(kDefaultPath, 1); - } - return true; -} - void Utility::appendXff(RequestHeaderMap& headers, const Network::Address::Instance& remote_address) { if (remote_address.type() != Network::Address::Type::Ip) { @@ -239,8 +243,7 @@ void Utility::appendVia(RequestOrResponseHeaderMap& headers, const std::string& std::string Utility::createSslRedirectPath(const RequestHeaderMap& headers) { ASSERT(headers.Host()); ASSERT(headers.Path()); - return fmt::format("https://{}{}", headers.Host()->value().getStringView(), - headers.Path()->value().getStringView()); + return fmt::format("https://{}{}", headers.getHostValue(), headers.getPathValue()); } Utility::QueryParams Utility::parseQueryString(absl::string_view url) { @@ -251,14 +254,26 @@ Utility::QueryParams Utility::parseQueryString(absl::string_view url) { } start++; - return parseParameters(url, start); + return parseParameters(url, start, /*decode_params=*/false); +} + +Utility::QueryParams Utility::parseAndDecodeQueryString(absl::string_view url) { + size_t start = url.find('?'); + if (start == std::string::npos) { + QueryParams params; + return params; + } + + start++; + return parseParameters(url, start, /*decode_params=*/true); } Utility::QueryParams Utility::parseFromBody(absl::string_view body) { - return parseParameters(body, 0); + return parseParameters(body, 0, /*decode_params=*/true); } -Utility::QueryParams Utility::parseParameters(absl::string_view data, size_t start) { +Utility::QueryParams Utility::parseParameters(absl::string_view data, size_t start, + bool decode_params) { QueryParams params; while (start < data.size()) { @@ -270,8 +285,10 @@ Utility::QueryParams Utility::parseParameters(absl::string_view data, size_t sta const size_t equal = param.find('='); if (equal != std::string::npos) { - params.emplace(StringUtil::subspan(data, start, start + equal), - StringUtil::subspan(data, start + equal + 1, end)); + const auto param_name = StringUtil::subspan(data, start, start + equal); + const auto param_value = StringUtil::subspan(data, start + equal + 1, end); + params.emplace(decode_params ? PercentEncoding::decode(param_name) : param_name, + decode_params ? PercentEncoding::decode(param_value) : param_value); } else { params.emplace(StringUtil::subspan(data, start, end), ""); } @@ -294,50 +311,41 @@ absl::string_view Utility::findQueryStringStart(const HeaderString& path) { std::string Utility::parseCookieValue(const HeaderMap& headers, const std::string& key) { - struct State { - std::string key_; - std::string ret_; - }; - - State state; - state.key_ = key; - - headers.iterateReverse( - [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { - // Find the cookie headers in the request (typically, there's only one). - if (header.key() == Http::Headers::get().Cookie.get()) { - - // Split the cookie header into individual cookies. - for (const auto s : StringUtil::splitToken(header.value().getStringView(), ";")) { - // Find the key part of the cookie (i.e. the name of the cookie). - size_t first_non_space = s.find_first_not_of(" "); - size_t equals_index = s.find('='); - if (equals_index == absl::string_view::npos) { - // The cookie is malformed if it does not have an `=`. Continue - // checking other cookies in this header. - continue; - } - const absl::string_view k = s.substr(first_non_space, equals_index - first_non_space); - State* state = static_cast(context); - // If the key matches, parse the value from the rest of the cookie string. - if (k == state->key_) { - absl::string_view v = s.substr(equals_index + 1, s.size() - 1); - - // Cookie values may be wrapped in double quotes. - // https://tools.ietf.org/html/rfc6265#section-4.1.1 - if (v.size() >= 2 && v.back() == '"' && v[0] == '"') { - v = v.substr(1, v.size() - 2); - } - state->ret_ = std::string{v}; - return HeaderMap::Iterate::Break; - } + std::string ret; + + headers.iterateReverse([&key, &ret](const HeaderEntry& header) -> HeaderMap::Iterate { + // Find the cookie headers in the request (typically, there's only one). + if (header.key() == Http::Headers::get().Cookie.get()) { + + // Split the cookie header into individual cookies. + for (const auto s : StringUtil::splitToken(header.value().getStringView(), ";")) { + // Find the key part of the cookie (i.e. the name of the cookie). + size_t first_non_space = s.find_first_not_of(" "); + size_t equals_index = s.find('='); + if (equals_index == absl::string_view::npos) { + // The cookie is malformed if it does not have an `=`. Continue + // checking other cookies in this header. + continue; + } + const absl::string_view k = s.substr(first_non_space, equals_index - first_non_space); + // If the key matches, parse the value from the rest of the cookie string. + if (k == key) { + absl::string_view v = s.substr(equals_index + 1, s.size() - 1); + + // Cookie values may be wrapped in double quotes. + // https://tools.ietf.org/html/rfc6265#section-4.1.1 + if (v.size() >= 2 && v.back() == '"' && v[0] == '"') { + v = v.substr(1, v.size() - 2); } + ret = std::string{v}; + return HeaderMap::Iterate::Break; } - return HeaderMap::Iterate::Continue; - }, - &state); + } + } + return HeaderMap::Iterate::Continue; + }); - return state.ret_; + return ret; } std::string Utility::makeSetCookieValue(const std::string& key, const std::string& value, @@ -363,7 +371,7 @@ std::string Utility::makeSetCookieValue(const std::string& key, const std::strin uint64_t Utility::getResponseStatus(const ResponseHeaderMap& headers) { const HeaderEntry* header = headers.Status(); uint64_t response_code; - if (!header || !absl::SimpleAtoi(headers.Status()->value().getStringView(), &response_code)) { + if (!header || !absl::SimpleAtoi(headers.getStatusValue(), &response_code)) { throw CodecClientException(":status must be specified and a valid unsigned long"); } return response_code; @@ -372,21 +380,20 @@ uint64_t Utility::getResponseStatus(const ResponseHeaderMap& headers) { bool Utility::isUpgrade(const RequestOrResponseHeaderMap& headers) { // In firefox the "Connection" request header value is "keep-alive, Upgrade", // we should check if it contains the "Upgrade" token. - return (headers.Connection() && headers.Upgrade() && - Envoy::StringUtil::caseFindToken(headers.Connection()->value().getStringView(), ",", + return (headers.Upgrade() && + Envoy::StringUtil::caseFindToken(headers.getConnectionValue(), ",", Http::Headers::get().ConnectionValues.Upgrade.c_str())); } bool Utility::isH2UpgradeRequest(const RequestHeaderMap& headers) { - return headers.Method() && - headers.Method()->value().getStringView() == Http::Headers::get().MethodValues.Connect && + return headers.getMethodValue() == Http::Headers::get().MethodValues.Connect && headers.Protocol() && !headers.Protocol()->value().empty() && headers.Protocol()->value() != Headers::get().ProtocolValues.Bytestream; } bool Utility::isWebSocketUpgradeRequest(const RequestHeaderMap& headers) { return (isUpgrade(headers) && - absl::EqualsIgnoreCase(headers.Upgrade()->value().getStringView(), + absl::EqualsIgnoreCase(headers.getUpgradeValue(), Http::Headers::get().UpgradeValues.WebSocket)); } @@ -407,64 +414,74 @@ Utility::parseHttp1Settings(const envoy::config::core::v3::Http1ProtocolOptions& return ret; } -void Utility::sendLocalReply(bool is_grpc, StreamDecoderFilterCallbacks& callbacks, - const bool& is_reset, Code response_code, absl::string_view body_text, - const absl::optional grpc_status, - bool is_head_request) { +void Utility::sendLocalReply(const bool& is_reset, StreamDecoderFilterCallbacks& callbacks, + const LocalReplyData& local_reply_data) { sendLocalReply( - is_grpc, - [&](ResponseHeaderMapPtr&& headers, bool end_stream) -> void { - callbacks.encodeHeaders(std::move(headers), end_stream); - }, - [&](Buffer::Instance& data, bool end_stream) -> void { - callbacks.encodeData(data, end_stream); - }, - is_reset, response_code, body_text, grpc_status, is_head_request); -} - -void Utility::sendLocalReply( - bool is_grpc, - std::function encode_headers, - std::function encode_data, const bool& is_reset, - Code response_code, absl::string_view body_text, - const absl::optional grpc_status, bool is_head_request) { + is_reset, + Utility::EncodeFunctions{nullptr, + [&](ResponseHeaderMapPtr&& headers, bool end_stream) -> void { + callbacks.encodeHeaders(std::move(headers), end_stream); + }, + [&](Buffer::Instance& data, bool end_stream) -> void { + callbacks.encodeData(data, end_stream); + }}, + local_reply_data); +} + +void Utility::sendLocalReply(const bool& is_reset, const EncodeFunctions& encode_functions, + const LocalReplyData& local_reply_data) { // encode_headers() may reset the stream, so the stream must not be reset before calling it. ASSERT(!is_reset); + + // rewrite_response will rewrite response code and body text. + Code response_code = local_reply_data.response_code_; + std::string body_text(local_reply_data.body_text_); + absl::string_view content_type(Headers::get().ContentTypeValues.Text); + + ResponseHeaderMapPtr response_headers{createHeaderMap( + {{Headers::get().Status, std::to_string(enumToInt(response_code))}})}; + + if (encode_functions.rewrite_) { + encode_functions.rewrite_(*response_headers, response_code, body_text, content_type); + } + // Respond with a gRPC trailers-only response if the request is gRPC - if (is_grpc) { - ResponseHeaderMapPtr response_headers{createHeaderMap( - {{Headers::get().Status, std::to_string(enumToInt(Code::OK))}, - {Headers::get().ContentType, Headers::get().ContentTypeValues.Grpc}, - {Headers::get().GrpcStatus, - std::to_string(enumToInt( - grpc_status ? grpc_status.value() - : Grpc::Utility::httpToGrpcStatus(enumToInt(response_code))))}})}; - if (!body_text.empty() && !is_head_request) { + if (local_reply_data.is_grpc_) { + response_headers->setStatus(std::to_string(enumToInt(Code::OK))); + response_headers->setReferenceContentType(Headers::get().ContentTypeValues.Grpc); + response_headers->setGrpcStatus( + std::to_string(enumToInt(local_reply_data.grpc_status_ + ? local_reply_data.grpc_status_.value() + : Grpc::Utility::httpToGrpcStatus(enumToInt(response_code))))); + if (!body_text.empty() && !local_reply_data.is_head_request_) { // TODO(dio): Probably it is worth to consider caching the encoded message based on gRPC // status. + // JsonFormatter adds a '\n' at the end. For header value, it should be removed. + // https://github.com/envoyproxy/envoy/blob/master/source/common/formatter/substitution_formatter.cc#L129 + if (body_text[body_text.length() - 1] == '\n') { + body_text = body_text.substr(0, body_text.length() - 1); + } response_headers->setGrpcMessage(PercentEncoding::encode(body_text)); } - encode_headers(std::move(response_headers), true); // Trailers only response + encode_functions.encode_headers_(std::move(response_headers), true); // Trailers only response return; } - ResponseHeaderMapPtr response_headers{createHeaderMap( - {{Headers::get().Status, std::to_string(enumToInt(response_code))}})}; if (!body_text.empty()) { response_headers->setContentLength(body_text.size()); - response_headers->setReferenceContentType(Headers::get().ContentTypeValues.Text); + response_headers->setReferenceContentType(content_type); } - if (is_head_request) { - encode_headers(std::move(response_headers), true); + if (local_reply_data.is_head_request_) { + encode_functions.encode_headers_(std::move(response_headers), true); return; } - encode_headers(std::move(response_headers), body_text.empty()); + encode_functions.encode_headers_(std::move(response_headers), body_text.empty()); // encode_headers()) may have changed the referenced is_reset so we need to test it if (!body_text.empty() && !is_reset) { Buffer::OwnedImpl buffer(body_text); - encode_data(buffer, true); + encode_functions.encode_data_(buffer, true); } } @@ -722,15 +739,13 @@ const std::string Utility::resetReasonToString(const Http::StreamResetReason res void Utility::transformUpgradeRequestFromH1toH2(RequestHeaderMap& headers) { ASSERT(Utility::isUpgrade(headers)); - const HeaderString& upgrade = headers.Upgrade()->value(); headers.setReferenceMethod(Http::Headers::get().MethodValues.Connect); - headers.setProtocol(upgrade.getStringView()); + headers.setProtocol(headers.getUpgradeValue()); headers.removeUpgrade(); headers.removeConnection(); // nghttp2 rejects upgrade requests/responses with content length, so strip // any unnecessary content length header. - if (headers.ContentLength() != nullptr && - headers.ContentLength()->value().getStringView() == "0") { + if (headers.getContentLengthValue() == "0") { headers.removeContentLength(); } } @@ -741,8 +756,7 @@ void Utility::transformUpgradeResponseFromH1toH2(ResponseHeaderMap& headers) { } headers.removeUpgrade(); headers.removeConnection(); - if (headers.ContentLength() != nullptr && - headers.ContentLength()->value().getStringView() == "0") { + if (headers.getContentLengthValue() == "0") { headers.removeContentLength(); } } @@ -750,9 +764,8 @@ void Utility::transformUpgradeResponseFromH1toH2(ResponseHeaderMap& headers) { void Utility::transformUpgradeRequestFromH2toH1(RequestHeaderMap& headers) { ASSERT(Utility::isH2UpgradeRequest(headers)); - const HeaderString& protocol = headers.Protocol()->value(); headers.setReferenceMethod(Http::Headers::get().MethodValues.Get); - headers.setUpgrade(protocol.getStringView()); + headers.setUpgrade(headers.getProtocolValue()); headers.setReferenceConnection(Http::Headers::get().ConnectionValues.Upgrade); headers.removeProtocol(); } @@ -807,7 +820,9 @@ void Utility::traversePerFilterConfigGeneric( } } -std::string Utility::PercentEncoding::encode(absl::string_view value) { +std::string Utility::PercentEncoding::encode(absl::string_view value, + absl::string_view reserved_chars) { + absl::flat_hash_set reserved_char_set{reserved_chars.begin(), reserved_chars.end()}; for (size_t i = 0; i < value.size(); ++i) { const char& ch = value[i]; // The escaping characters are defined in @@ -816,22 +831,23 @@ std::string Utility::PercentEncoding::encode(absl::string_view value) { // We do checking for each char in the string. If the current char is included in the defined // escaping characters, we jump to "the slow path" (append the char [encoded or not encoded] // to the returned string one by one) started from the current index. - if (ch < ' ' || ch >= '~' || ch == '%') { - return PercentEncoding::encode(value, i); + if (ch < ' ' || ch >= '~' || reserved_char_set.find(ch) != reserved_char_set.end()) { + return PercentEncoding::encode(value, i, reserved_char_set); } } return std::string(value); } -std::string Utility::PercentEncoding::encode(absl::string_view value, const size_t index) { +std::string Utility::PercentEncoding::encode(absl::string_view value, const size_t index, + const absl::flat_hash_set& reserved_char_set) { std::string encoded; if (index > 0) { - absl::StrAppend(&encoded, value.substr(0, index - 1)); + absl::StrAppend(&encoded, value.substr(0, index)); } for (size_t i = index; i < value.size(); ++i) { const char& ch = value[i]; - if (ch < ' ' || ch >= '~' || ch == '%') { + if (ch < ' ' || ch >= '~' || reserved_char_set.find(ch) != reserved_char_set.end()) { // For consistency, URI producers should use uppercase hexadecimal digits for all // percent-encodings. https://tools.ietf.org/html/rfc3986#section-2.1. absl::StrAppend(&encoded, fmt::format("%{:02X}", ch)); diff --git a/source/common/http/utility.h b/source/common/http/utility.h index 241962466c82f..d4625c9b8e82e 100644 --- a/source/common/http/utility.h +++ b/source/common/http/utility.h @@ -13,6 +13,8 @@ #include "envoy/http/metadata_interface.h" #include "envoy/http/query_params.h" +#include "common/http/exception.h" +#include "common/http/status.h" #include "common/json/json_loader.h" #include "absl/strings/string_view.h" @@ -20,6 +22,31 @@ #include "nghttp2/nghttp2.h" namespace Envoy { +namespace Http { +namespace Utility { + +// This is a wrapper around dispatch calls that may throw an exception or may return an error status +// while exception removal is in migration. +// TODO(#10878): Remove this. +Http::Status exceptionToStatus(std::function dispatch, + Buffer::Instance& data); + +/** + * Well-known HTTP ALPN values. + */ +class AlpnNameValues { +public: + const std::string Http10 = "http/1.0"; + const std::string Http11 = "http/1.1"; + const std::string Http2 = "h2"; + const std::string Http2c = "h2c"; +}; + +using AlpnNames = ConstSingleton; + +} // namespace Utility +} // namespace Http + namespace Http2 { namespace Utility { @@ -89,38 +116,28 @@ struct OptionsLimits { envoy::config::core::v3::Http2ProtocolOptions initializeAndValidateOptions(const envoy::config::core::v3::Http2ProtocolOptions& options); +envoy::config::core::v3::Http2ProtocolOptions +initializeAndValidateOptions(const envoy::config::core::v3::Http2ProtocolOptions& options, + bool hcm_stream_error_set, + const Protobuf::BoolValue& hcm_stream_error); } // namespace Utility } // namespace Http2 namespace Http { namespace Utility { -/** - * Given a fully qualified URL, splits the string_view provided into scheme, - * host and path with query parameters components. - */ -class Url { -public: - bool initialize(absl::string_view absolute_url, bool is_connect_request); - absl::string_view scheme() { return scheme_; } - absl::string_view hostAndPort() { return host_and_port_; } - absl::string_view pathAndQueryParams() { return path_and_query_params_; } - -private: - absl::string_view scheme_; - absl::string_view host_and_port_; - absl::string_view path_and_query_params_; -}; - class PercentEncoding { public: /** - * Encodes string view to its percent encoded representation. + * Encodes string view to its percent encoded representation. Non-visible ASCII is always escaped, + * in addition to a given list of reserved chars. + * * @param value supplies string to be encoded. - * @return std::string percent-encoded string based on - * https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#responses. + * @param reserved_chars list of reserved chars to escape. By default the escaped chars in + * https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#responses are used. + * @return std::string percent-encoded string. */ - static std::string encode(absl::string_view value); + static std::string encode(absl::string_view value, absl::string_view reserved_chars = "%"); /** * Decodes string view from its percent encoded representation. @@ -131,7 +148,8 @@ class PercentEncoding { private: // Encodes string view to its percent encoded representation, with start index. - static std::string encode(absl::string_view value, const size_t index); + static std::string encode(absl::string_view value, const size_t index, + const absl::flat_hash_set& reserved_char_set); }; /** @@ -162,6 +180,13 @@ std::string createSslRedirectPath(const RequestHeaderMap& headers); */ QueryParams parseQueryString(absl::string_view url); +/** + * Parse a URL into query parameters. + * @param url supplies the url to parse. + * @return QueryParams the parsed and percent-decoded parameters, if any. + */ +QueryParams parseAndDecodeQueryString(absl::string_view url); + /** * Parse a a request body into query parameters. * @param body supplies the body to parse. @@ -173,9 +198,11 @@ QueryParams parseFromBody(absl::string_view body); * Parse query parameters from a URL or body. * @param data supplies the data to parse. * @param start supplies the offset within the data. + * @param decode_params supplies the flag whether to percent-decode the parsed parameters (both name + * and value). Set to false to keep the parameters encoded. * @return QueryParams the parsed parameters, if any. */ -QueryParams parseParameters(absl::string_view data, size_t start); +QueryParams parseParameters(absl::string_view data, size_t start, bool decode_params); /** * Finds the start of the query string in a path @@ -238,47 +265,56 @@ bool isWebSocketUpgradeRequest(const RequestHeaderMap& headers); /** * @return Http1Settings An Http1Settings populated from the - * envoy::api::v2::core::Http1ProtocolOptions config. + * envoy::config::core::v3::Http1ProtocolOptions config. */ Http1Settings parseHttp1Settings(const envoy::config::core::v3::Http1ProtocolOptions& config); +struct EncodeFunctions { + // Function to rewrite locally generated response. + std::function + rewrite_; + // Function to encode response headers. + std::function encode_headers_; + // Function to encode the response body. + std::function encode_data_; +}; + +struct LocalReplyData { + // Tells if this is a response to a gRPC request. + bool is_grpc_; + // Supplies the HTTP response code. + Code response_code_; + // Supplies the optional body text which is returned. + absl::string_view body_text_; + // gRPC status code to override the httpToGrpcStatus mapping with. + const absl::optional grpc_status_; + // Tells if this is a response to a HEAD request. + bool is_head_request_ = false; +}; + /** * Create a locally generated response using filter callbacks. - * @param is_grpc tells if this is a response to a gRPC request. - * @param callbacks supplies the filter callbacks to use. * @param is_reset boolean reference that indicates whether a stream has been reset. It is the - * responsibility of the caller to ensure that this is set to false if onDestroy() - * is invoked in the context of sendLocalReply(). - * @param response_code supplies the HTTP response code. - * @param body_text supplies the optional body text which is sent using the text/plain content - * type. - * @param grpc_status the gRPC status code to override the httpToGrpcStatus mapping with. - * @param is_head_request tells if this is a response to a HEAD request + * responsibility of the caller to ensure that this is set to false if onDestroy() + * is invoked in the context of sendLocalReply(). + * @param callbacks supplies the filter callbacks to use. + * @param local_reply_data struct which keeps data related to generate reply. */ -void sendLocalReply(bool is_grpc, StreamDecoderFilterCallbacks& callbacks, const bool& is_reset, - Code response_code, absl::string_view body_text, - const absl::optional grpc_status, - bool is_head_request); +void sendLocalReply(const bool& is_reset, StreamDecoderFilterCallbacks& callbacks, + const LocalReplyData& local_reply_data); /** * Create a locally generated response using the provided lambdas. - * @param is_grpc tells if this is a response to a gRPC request. - * @param encode_headers supplies the function to encode response headers. - * @param encode_data supplies the function to encode the response body. + * @param is_reset boolean reference that indicates whether a stream has been reset. It is the * responsibility of the caller to ensure that this is set to false if onDestroy() * is invoked in the context of sendLocalReply(). - * @param response_code supplies the HTTP response code. - * @param body_text supplies the optional body text which is sent using the text/plain content - * type. - * @param grpc_status the gRPC status code to override the httpToGrpcStatus mapping with. - */ -void sendLocalReply( - bool is_grpc, - std::function encode_headers, - std::function encode_data, const bool& is_reset, - Code response_code, absl::string_view body_text, - const absl::optional grpc_status, bool is_head_request = false); + * @param encode_functions supplies the functions to encode response body and headers. + * @param local_reply_data struct which keeps data related to generate reply. + */ +void sendLocalReply(const bool& is_reset, const EncodeFunctions& encode_functions, + const LocalReplyData& local_reply_data); struct GetLastAddressFromXffInfo { // Last valid address pulled from the XFF header. diff --git a/source/common/init/BUILD b/source/common/init/BUILD index 6fef3006865be..5b75ca4a82851 100644 --- a/source/common/init/BUILD +++ b/source/common/init/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/init/manager_impl.cc b/source/common/init/manager_impl.cc index f60ddc64a9e90..95cb37e4cc3b6 100644 --- a/source/common/init/manager_impl.cc +++ b/source/common/init/manager_impl.cc @@ -1,19 +1,23 @@ #include "common/init/manager_impl.h" +#include + #include "common/common/assert.h" +#include "common/init/watcher_impl.h" namespace Envoy { namespace Init { ManagerImpl::ManagerImpl(absl::string_view name) : name_(fmt::format("init manager {}", name)), state_(State::Uninitialized), count_(0), - watcher_(name_, [this]() { onTargetReady(); }) {} + watcher_(name_, [this](absl::string_view target_name) { onTargetReady(target_name); }) {} Manager::State ManagerImpl::state() const { return state_; } void ManagerImpl::add(const Target& target) { ++count_; TargetHandlePtr target_handle(target.createHandle(name_)); + ++target_names_count_[target.name()]; switch (state_) { case State::Uninitialized: // If the manager isn't initialized yet, save the target handle to be initialized later. @@ -53,15 +57,26 @@ void ManagerImpl::initialize(const Watcher& watcher) { // completed immediately. for (const auto& target_handle : target_handles_) { if (!target_handle->initialize(watcher_)) { - onTargetReady(); + onTargetReady(target_handle->name()); } } } } -void ManagerImpl::onTargetReady() { +const absl::flat_hash_map& ManagerImpl::unreadyTargets() const { + return target_names_count_; +} + +void ManagerImpl::onTargetReady(absl::string_view target_name) { // If there are no remaining targets and one mysteriously calls us back, this manager is haunted. - ASSERT(count_ != 0, fmt::format("{} called back by target after initialization complete")); + ASSERT(count_ != 0, + fmt::format("{} called back by target after initialization complete", target_name)); + + // Decrease target_name count by 1. + ASSERT(target_names_count_.find(target_name) != target_names_count_.end()); + if (--target_names_count_[target_name] == 0) { + target_names_count_.erase(target_name); + } // If there are no uninitialized targets remaining when called back by a target, that means it was // the last. Signal `ready` to the handle we saved in `initialize`. diff --git a/source/common/init/manager_impl.h b/source/common/init/manager_impl.h index b92ac102fd729..026014245ccdb 100644 --- a/source/common/init/manager_impl.h +++ b/source/common/init/manager_impl.h @@ -7,6 +7,8 @@ #include "common/common/logger.h" #include "common/init/watcher_impl.h" +#include "absl/container/flat_hash_map.h" + namespace Envoy { namespace Init { @@ -35,27 +37,39 @@ class ManagerImpl : public Manager, Logger::Loggable { void add(const Target& target) override; void initialize(const Watcher& watcher) override; + // Expose the const reference of target_names_count_ hash map to public. + const absl::flat_hash_map& unreadyTargets() const; + private: - void onTargetReady(); + // Callback function with an additional target_name parameter, decrease unready targets count by + // 1, update target_names_count_ hash map. + void onTargetReady(absl::string_view target_name); + void ready(); - // Human-readable name for logging + // Human-readable name for logging. const std::string name_; - // Current state + // Current state. State state_; - // Current number of registered targets that have not yet initialized + // Current number of registered targets that have not yet initialized. uint32_t count_; - // Handle to the watcher passed in `initialize`, to be called when initialization completes + // Handle to the watcher passed in `initialize`, to be called when initialization completes. WatcherHandlePtr watcher_handle_; - // Watcher to receive ready notifications from each target + // Watcher to receive ready notifications from each target. We restrict the watcher_ inside + // ManagerImpl to be constructed with the 'TargetAwareReadyFn' fn so that the init manager will + // get target name information when the watcher_ calls 'onTargetSendName(target_name)' For any + // other purpose, a watcher can be constructed with either TargetAwareReadyFn or ReadyFn. const WatcherImpl watcher_; - // All registered targets + // All registered targets. std::list target_handles_; + + // Count of target_name of unready targets. + absl::flat_hash_map target_names_count_; }; } // namespace Init diff --git a/source/common/init/target_impl.cc b/source/common/init/target_impl.cc index 4d8df4c27aac1..8ee37eabfd147 100644 --- a/source/common/init/target_impl.cc +++ b/source/common/init/target_impl.cc @@ -22,6 +22,8 @@ bool TargetHandleImpl::initialize(const Watcher& watcher) const { } } +absl::string_view TargetHandleImpl::name() const { return name_; } + TargetImpl::TargetImpl(absl::string_view name, InitializeFn fn) : name_(fmt::format("target {}", name)), fn_(std::make_shared([this, fn](WatcherHandlePtr watcher_handle) { diff --git a/source/common/init/target_impl.h b/source/common/init/target_impl.h index d6a098daaca25..da7281c69b2f0 100644 --- a/source/common/init/target_impl.h +++ b/source/common/init/target_impl.h @@ -38,6 +38,8 @@ class TargetHandleImpl : public TargetHandle, Logger::Loggable // Init::TargetHandle bool initialize(const Watcher& watcher) const override; + absl::string_view name() const override; + private: // Name of the handle (almost always the name of the ManagerImpl calling the target) const std::string handle_name_; diff --git a/source/common/init/watcher_impl.cc b/source/common/init/watcher_impl.cc index b69fe3e7cf846..50b792bdcbbe0 100644 --- a/source/common/init/watcher_impl.cc +++ b/source/common/init/watcher_impl.cc @@ -4,7 +4,7 @@ namespace Envoy { namespace Init { WatcherHandleImpl::WatcherHandleImpl(absl::string_view handle_name, absl::string_view name, - std::weak_ptr fn) + std::weak_ptr fn) : handle_name_(handle_name), name_(name), fn_(std::move(fn)) {} bool WatcherHandleImpl::ready() const { @@ -12,26 +12,30 @@ bool WatcherHandleImpl::ready() const { if (locked_fn) { // If we can "lock" a shared pointer to the watcher's callback function, call it. ENVOY_LOG(debug, "{} initialized, notifying {}", handle_name_, name_); - (*locked_fn)(); + (*locked_fn)(handle_name_); return true; } else { // If not, the watcher was already destroyed. - ENVOY_LOG(debug, "{} initialized, but can't notify {} (unavailable)", handle_name_, name_); + ENVOY_LOG(debug, "{} initialized, but can't notify {}", handle_name_, name_); return false; } } WatcherImpl::WatcherImpl(absl::string_view name, ReadyFn fn) - : name_(name), fn_(std::make_shared(std::move(fn))) {} + : name_(name), fn_(std::make_shared( + [callback = std::move(fn)](absl::string_view) { callback(); })) {} + +WatcherImpl::WatcherImpl(absl::string_view name, TargetAwareReadyFn fn) + : name_(name), fn_(std::make_shared(std::move(fn))) {} WatcherImpl::~WatcherImpl() { ENVOY_LOG(debug, "{} destroyed", name_); } absl::string_view WatcherImpl::name() const { return name_; } WatcherHandlePtr WatcherImpl::createHandle(absl::string_view handle_name) const { - // Note: can't use std::make_unique because WatcherHandleImpl ctor is private + // Note: can't use std::make_unique because WatcherHandleImpl ctor is private. return std::unique_ptr( - new WatcherHandleImpl(handle_name, name_, std::weak_ptr(fn_))); + new WatcherHandleImpl(handle_name, name_, std::weak_ptr(fn_))); } } // namespace Init diff --git a/source/common/init/watcher_impl.h b/source/common/init/watcher_impl.h index 816a37c860eb2..fb41d8c0400a7 100644 --- a/source/common/init/watcher_impl.h +++ b/source/common/init/watcher_impl.h @@ -14,6 +14,7 @@ namespace Init { * initialization completes. */ using ReadyFn = std::function; +using TargetAwareReadyFn = std::function; /** * A WatcherHandleImpl functions as a weak reference to a Watcher. It is how a TargetImpl safely @@ -25,22 +26,22 @@ class WatcherHandleImpl : public WatcherHandle, Logger::Loggable fn); + std::weak_ptr fn); public: - // Init::WatcherHandle + // Init::WatcherHandle. bool ready() const override; private: // Name of the handle (either the name of the target calling the manager, or the name of the - // manager calling the client) + // manager calling the client). const std::string handle_name_; - // Name of the watcher (either the name of the manager, or the name of the client) + // Name of the watcher (either the name of the manager, or the name of the client). const std::string name_; - // The watcher's callback function, only called if the weak pointer can be "locked" - const std::weak_ptr fn_; + // The watcher's callback function, only called if the weak pointer can be "locked". + const std::weak_ptr fn_; }; /** @@ -51,22 +52,23 @@ class WatcherHandleImpl : public WatcherHandle, Logger::Loggable { public: /** - * @param name a human-readable watcher name, for logging / debugging - * @param fn a callback function to invoke when `ready` is called on the handle + * @param name a human-readable watcher name, for logging / debugging. + * @param fn a callback function to invoke when `ready` is called on the handle. */ WatcherImpl(absl::string_view name, ReadyFn fn); + WatcherImpl(absl::string_view name, TargetAwareReadyFn fn); ~WatcherImpl() override; - // Init::Watcher + // Init::Watcher. absl::string_view name() const override; WatcherHandlePtr createHandle(absl::string_view handle_name) const override; private: - // Human-readable name for logging + // Human-readable name for logging. const std::string name_; - // The callback function, called via WatcherHandleImpl by either the target or the manager - const std::shared_ptr fn_; + // The callback function, called via WatcherHandleImpl by either the target or the manager. + const std::shared_ptr fn_; }; } // namespace Init diff --git a/source/common/json/BUILD b/source/common/json/BUILD index afcfec49af766..edda394d6949d 100644 --- a/source/common/json/BUILD +++ b/source/common/json/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/json/json_loader.cc b/source/common/json/json_loader.cc index 0e3a084b5d7c0..083f7b64f4091 100644 --- a/source/common/json/json_loader.cc +++ b/source/common/json/json_loader.cc @@ -3,10 +3,10 @@ #include #include #include +#include #include #include #include -#include #include #include "common/common/assert.h" @@ -127,7 +127,7 @@ class Field : public Object { bool boolean_value_; double double_value_; int64_t integer_value_; - std::unordered_map object_value_; + std::map object_value_; std::string string_value_; }; diff --git a/source/common/local_info/BUILD b/source/common/local_info/BUILD index 4cfb87f03834c..ae6482c600ad0 100644 --- a/source/common/local_info/BUILD +++ b/source/common/local_info/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/local_reply/BUILD b/source/common/local_reply/BUILD new file mode 100644 index 0000000000000..16995a49ea862 --- /dev/null +++ b/source/common/local_reply/BUILD @@ -0,0 +1,30 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_library( + name = "local_reply_lib", + srcs = ["local_reply.cc"], + hdrs = ["local_reply.h"], + deps = [ + "//include/envoy/http:codes_interface", + "//include/envoy/http:header_map_interface", + "//include/envoy/server:filter_config_interface", + "//include/envoy/stream_info:stream_info_interface", + "//source/common/access_log:access_log_lib", + "//source/common/common:enum_to_int", + "//source/common/config:datasource_lib", + "//source/common/formatter:substitution_format_string_lib", + "//source/common/formatter:substitution_formatter_lib", + "//source/common/http:header_map_lib", + "//source/common/router:header_parser_lib", + "//source/common/stream_info:stream_info_lib", + "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", + ], +) diff --git a/source/common/local_reply/local_reply.cc b/source/common/local_reply/local_reply.cc new file mode 100644 index 0000000000000..d4549dc1a1356 --- /dev/null +++ b/source/common/local_reply/local_reply.cc @@ -0,0 +1,172 @@ +#include "common/local_reply/local_reply.h" + +#include +#include + +#include "common/access_log/access_log_impl.h" +#include "common/common/enum_to_int.h" +#include "common/config/datasource.h" +#include "common/formatter/substitution_format_string.h" +#include "common/formatter/substitution_formatter.h" +#include "common/http/header_map_impl.h" +#include "common/router/header_parser.h" + +namespace Envoy { +namespace LocalReply { + +class BodyFormatter { +public: + BodyFormatter() + : formatter_(std::make_unique("%LOCAL_REPLY_BODY%")), + content_type_(Http::Headers::get().ContentTypeValues.Text) {} + + BodyFormatter(const envoy::config::core::v3::SubstitutionFormatString& config) + : formatter_(Formatter::SubstitutionFormatStringUtils::fromProtoConfig(config)), + content_type_( + config.format_case() == + envoy::config::core::v3::SubstitutionFormatString::FormatCase::kJsonFormat + ? Http::Headers::get().ContentTypeValues.Json + : Http::Headers::get().ContentTypeValues.Text) {} + + void format(const Http::RequestHeaderMap& request_headers, + const Http::ResponseHeaderMap& response_headers, + const Http::ResponseTrailerMap& response_trailers, + const StreamInfo::StreamInfo& stream_info, std::string& body, + absl::string_view& content_type) const { + body = + formatter_->format(request_headers, response_headers, response_trailers, stream_info, body); + content_type = content_type_; + } + +private: + const Formatter::FormatterPtr formatter_; + const absl::string_view content_type_; +}; + +using BodyFormatterPtr = std::unique_ptr; +using HeaderParserPtr = std::unique_ptr; + +class ResponseMapper { +public: + ResponseMapper( + const envoy::extensions::filters::network::http_connection_manager::v3::ResponseMapper& + config, + Server::Configuration::FactoryContext& context) + : filter_(AccessLog::FilterFactory::fromProto(config.filter(), context.runtime(), + context.random(), + context.messageValidationVisitor())) { + if (config.has_status_code()) { + status_code_ = static_cast(config.status_code().value()); + } + if (config.has_body()) { + body_ = Config::DataSource::read(config.body(), true, context.api()); + } + + if (config.has_body_format_override()) { + body_formatter_ = std::make_unique(config.body_format_override()); + } + + header_parser_ = Envoy::Router::HeaderParser::configure(config.headers_to_add()); + } + + bool matchAndRewrite(const Http::RequestHeaderMap& request_headers, + Http::ResponseHeaderMap& response_headers, + const Http::ResponseTrailerMap& response_trailers, + StreamInfo::StreamInfoImpl& stream_info, Http::Code& code, std::string& body, + BodyFormatter*& final_formatter) const { + // If not matched, just bail out. + if (!filter_->evaluate(stream_info, request_headers, response_headers, response_trailers)) { + return false; + } + + if (body_.has_value()) { + body = body_.value(); + } + + header_parser_->evaluateHeaders(response_headers, stream_info); + + if (status_code_.has_value() && code != status_code_.value()) { + code = status_code_.value(); + response_headers.setStatus(std::to_string(enumToInt(code))); + stream_info.response_code_ = static_cast(code); + } + + if (body_formatter_) { + final_formatter = body_formatter_.get(); + } + return true; + } + +private: + const AccessLog::FilterPtr filter_; + absl::optional status_code_; + absl::optional body_; + HeaderParserPtr header_parser_; + BodyFormatterPtr body_formatter_; +}; + +using ResponseMapperPtr = std::unique_ptr; + +class LocalReplyImpl : public LocalReply { +public: + LocalReplyImpl() : body_formatter_(std::make_unique()) {} + + LocalReplyImpl( + const envoy::extensions::filters::network::http_connection_manager::v3::LocalReplyConfig& + config, + Server::Configuration::FactoryContext& context) + : body_formatter_(config.has_body_format() + ? std::make_unique(config.body_format()) + : std::make_unique()) { + for (const auto& mapper : config.mappers()) { + mappers_.emplace_back(std::make_unique(mapper, context)); + } + } + + void rewrite(const Http::RequestHeaderMap* request_headers, + Http::ResponseHeaderMap& response_headers, StreamInfo::StreamInfoImpl& stream_info, + Http::Code& code, std::string& body, + absl::string_view& content_type) const override { + // Set response code to stream_info and response_headers due to: + // 1) StatusCode filter is using response_code from stream_info, + // 2) %RESP(:status)% is from Status() in response_headers. + response_headers.setStatus(std::to_string(enumToInt(code))); + stream_info.response_code_ = static_cast(code); + + if (request_headers == nullptr) { + request_headers = Http::StaticEmptyHeaders::get().request_headers.get(); + } + + BodyFormatter* final_formatter{}; + for (const auto& mapper : mappers_) { + if (mapper->matchAndRewrite(*request_headers, response_headers, + *Http::StaticEmptyHeaders::get().response_trailers, stream_info, + code, body, final_formatter)) { + break; + } + } + + if (!final_formatter) { + final_formatter = body_formatter_.get(); + } + return final_formatter->format(*request_headers, response_headers, + *Http::StaticEmptyHeaders::get().response_trailers, stream_info, + body, content_type); + } + +private: + std::list mappers_; + const BodyFormatterPtr body_formatter_; +}; + +LocalReplyPtr Factory::createDefault() { return std::make_unique(); } + +LocalReplyPtr Factory::create( + const envoy::extensions::filters::network::http_connection_manager::v3::LocalReplyConfig& + config, + Server::Configuration::FactoryContext& context) { + return std::make_unique(config, context); +} + +} // namespace LocalReply +} // namespace Envoy diff --git a/source/common/local_reply/local_reply.h b/source/common/local_reply/local_reply.h new file mode 100644 index 0000000000000..cafcaf33d3079 --- /dev/null +++ b/source/common/local_reply/local_reply.h @@ -0,0 +1,54 @@ +#pragma once + +#include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" +#include "envoy/http/codes.h" +#include "envoy/http/header_map.h" +#include "envoy/server/filter_config.h" + +#include "common/stream_info/stream_info_impl.h" + +namespace Envoy { +namespace LocalReply { + +class LocalReply { +public: + virtual ~LocalReply() = default; + + /** + * rewrite the response status code, body and content_type. + * @param request_headers supplies the information about request headers required by filters. + * @param stream_info supplies the information about streams required by filters. + * @param code status code. + * @param body response body. + * @param content_type response content_type. + */ + virtual void rewrite(const Http::RequestHeaderMap* request_headers, + Http::ResponseHeaderMap& response_headers, + StreamInfo::StreamInfoImpl& stream_info, Http::Code& code, std::string& body, + absl::string_view& content_type) const PURE; +}; + +using LocalReplyPtr = std::unique_ptr; + +/** + * Access log filter factory that reads from proto. + */ +class Factory { +public: + /** + * Create a LocalReply object from ProtoConfig + */ + static LocalReplyPtr + create(const envoy::extensions::filters::network::http_connection_manager::v3::LocalReplyConfig& + config, + Server::Configuration::FactoryContext& context); + + /** + * Create a default LocalReply object with empty config. + * It is used at places without Server::Configuration::FactoryContext. + */ + static LocalReplyPtr createDefault(); +}; + +} // namespace LocalReply +} // namespace Envoy diff --git a/source/common/memory/BUILD b/source/common/memory/BUILD index 1501b81a6b018..45cc04041baa6 100644 --- a/source/common/memory/BUILD +++ b/source/common/memory/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/network/BUILD b/source/common/network/BUILD index cecff719b0468..b40195b288b79 100644 --- a/source/common/network/BUILD +++ b/source/common/network/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( @@ -20,9 +20,11 @@ envoy_cc_library( ], deps = [ ":io_socket_error_lib", + ":socket_interface_lib", "//include/envoy/buffer:buffer_interface", "//include/envoy/network:address_interface", "//include/envoy/network:io_handle_interface", + "//include/envoy/network:socket_interface", "//source/common/api:os_sys_calls_lib", "//source/common/common:assert_lib", "//source/common/common:utility_lib", @@ -46,7 +48,6 @@ envoy_cc_library( deps = [ ":address_lib", ":utility_lib", - "//include/envoy/json:json_object_interface", "//include/envoy/network:address_interface", "//source/common/common:assert_lib", "//source/common/common:utility_lib", @@ -142,6 +143,7 @@ envoy_cc_library( hdrs = ["hash_policy.h"], deps = [ "//include/envoy/network:hash_policy_interface", + "//source/common/common:assert_lib", "//source/common/common:hash_lib", "@envoy_api//envoy/type/v3:pkg_cc_proto", ], @@ -154,13 +156,17 @@ envoy_cc_library( deps = [ "//include/envoy/api:io_error_interface", "//source/common/common:assert_lib", + "//source/common/common:utility_lib", ], ) envoy_cc_library( name = "lc_trie_lib", hdrs = ["lc_trie.h"], - external_deps = ["abseil_int128"], + external_deps = [ + "abseil_node_hash_set", + "abseil_int128", + ], deps = [ ":address_lib", ":cidr_range_lib", @@ -169,13 +175,45 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "socket_interface_lib", + hdrs = ["socket_interface.h"], + deps = [ + "//include/envoy/config:typed_config_interface", + "//include/envoy/network:socket_interface", + "//include/envoy/registry", + "//include/envoy/server:bootstrap_extension_config_interface", + ], +) + +envoy_cc_library( + name = "socket_lib", + srcs = [ + "socket_impl.cc", + "socket_interface_impl.cc", + ], + hdrs = [ + "socket_impl.h", + "socket_interface_impl.h", + ], + deps = [ + ":address_lib", + ":socket_interface_lib", + "//include/envoy/network:socket_interface", + "//source/common/common:assert_lib", + "//source/common/common:utility_lib", + "@envoy_api//envoy/extensions/network/socket_interface/v3:pkg_cc_proto", + ], +) + envoy_cc_library( name = "listen_socket_lib", srcs = ["listen_socket_impl.cc"], hdrs = ["listen_socket_impl.h"], deps = [ - ":address_lib", + ":socket_lib", ":utility_lib", + "//include/envoy/network:exception_interface", "//include/envoy/network:listen_socket_interface", "//source/common/common:assert_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", @@ -197,9 +235,12 @@ envoy_cc_library( deps = [ ":address_lib", ":listen_socket_lib", + ":udp_default_writer_config", "//include/envoy/event:dispatcher_interface", "//include/envoy/event:file_event_interface", + "//include/envoy/network:exception_interface", "//include/envoy/network:listener_interface", + "//include/envoy/runtime:runtime_interface", "//include/envoy/stats:stats_interface", "//include/envoy/stats:stats_macros", "//source/common/buffer:buffer_lib", @@ -255,6 +296,7 @@ envoy_cc_library( "//source/common/api:os_sys_calls_lib", "//source/common/common:assert_lib", "//source/common/common:minimal_logger_lib", + "//source/common/common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) @@ -266,6 +308,7 @@ envoy_cc_library( external_deps = ["abseil_optional"], deps = [ ":address_lib", + ":socket_lib", ":socket_option_lib", "//include/envoy/network:listen_socket_interface", "//source/common/api:os_sys_calls_lib", @@ -296,6 +339,7 @@ envoy_cc_library( hdrs = ["utility.h"], deps = [ ":address_lib", + ":socket_lib", "//include/envoy/network:connection_interface", "//include/envoy/network:listener_interface", "//include/envoy/stats:stats_interface", @@ -315,8 +359,10 @@ envoy_cc_library( hdrs = ["transport_socket_options_impl.h"], deps = [ ":application_protocol_lib", + ":proxy_protocol_filter_state_lib", ":upstream_server_name_lib", ":upstream_subject_alt_names_lib", + "//include/envoy/network:proxy_protocol_options_lib", "//include/envoy/network:transport_socket_interface", "//include/envoy/stream_info:filter_state_interface", "//source/common/common:scalar_to_byte_vector_lib", @@ -359,3 +405,40 @@ envoy_cc_library( "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", ], ) + +envoy_cc_library( + name = "udp_packet_writer_handler_lib", + srcs = ["udp_packet_writer_handler_impl.cc"], + hdrs = ["udp_packet_writer_handler_impl.h"], + deps = [ + ":io_socket_error_lib", + ":utility_lib", + "//include/envoy/network:socket_interface", + "//include/envoy/network:udp_packet_writer_config_interface", + "//include/envoy/network:udp_packet_writer_handler_interface", + "//source/common/buffer:buffer_lib", + ], +) + +envoy_cc_library( + name = "udp_default_writer_config", + srcs = ["udp_default_writer_config.cc"], + hdrs = ["udp_default_writer_config.h"], + deps = [ + ":udp_packet_writer_handler_lib", + "//include/envoy/network:udp_packet_writer_config_interface", + "//include/envoy/registry", + "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", + ], +) + +envoy_cc_library( + name = "proxy_protocol_filter_state_lib", + srcs = ["proxy_protocol_filter_state.cc"], + hdrs = ["proxy_protocol_filter_state.h"], + deps = [ + "//include/envoy/network:proxy_protocol_options_lib", + "//include/envoy/stream_info:filter_state_interface", + "//source/common/common:macros", + ], +) diff --git a/source/common/network/addr_family_aware_socket_option_impl.cc b/source/common/network/addr_family_aware_socket_option_impl.cc index 60d33382c91a0..4a234e8fbca3e 100644 --- a/source/common/network/addr_family_aware_socket_option_impl.cc +++ b/source/common/network/addr_family_aware_socket_option_impl.cc @@ -7,41 +7,17 @@ #include "common/api/os_sys_calls_impl.h" #include "common/common/assert.h" #include "common/network/address_impl.h" +#include "common/network/socket_interface_impl.h" #include "common/network/socket_option_impl.h" namespace Envoy { namespace Network { namespace { -Address::IpVersion getVersionFromAddress(Address::InstanceConstSharedPtr addr) { - if (addr->ip() != nullptr) { - return addr->ip()->version(); - } - throw EnvoyException("Unable to set socket option on non-IP sockets"); -} - -absl::optional getVersionFromSocket(const Socket& socket) { - try { - // We have local address when the socket is used in a listener but have to - // infer the IP from the socket FD when initiating connections. - // TODO(htuch): Figure out a way to obtain a consistent interface for IP - // version from socket. - if (socket.localAddress()) { - return {getVersionFromAddress(socket.localAddress())}; - } else { - return {getVersionFromAddress(Address::addressFromFd(socket.ioHandle().fd()))}; - } - } catch (const EnvoyException&) { - // Ignore, we get here because we failed in getsockname(). - // TODO(htuch): We should probably clean up this logic to avoid relying on exceptions. - } - - return absl::nullopt; -} SocketOptionImplOptRef getOptionForSocket(const Socket& socket, SocketOptionImpl& ipv4_option, SocketOptionImpl& ipv6_option) { - auto version = getVersionFromSocket(socket); + auto version = socket.ipVersion(); if (!version.has_value()) { return absl::nullopt; } diff --git a/source/common/network/address_impl.cc b/source/common/network/address_impl.cc index 6b1d46c85a3be..57d1317b7e4df 100644 --- a/source/common/network/address_impl.cc +++ b/source/common/network/address_impl.cc @@ -7,11 +7,10 @@ #include "envoy/common/exception.h" #include "envoy/common/platform.h" -#include "common/api/os_sys_calls_impl.h" #include "common/common/assert.h" #include "common/common/fmt.h" #include "common/common/utility.h" -#include "common/network/io_socket_handle_impl.h" +#include "common/network/socket_interface.h" namespace Envoy { namespace Network { @@ -22,7 +21,7 @@ namespace { // Validate that IPv4 is supported on this platform, raise an exception for the // given address if not. void validateIpv4Supported(const std::string& address) { - static const bool supported = Network::Address::ipFamilySupported(AF_INET); + static const bool supported = SocketInterfaceSingleton::get().ipFamilySupported(AF_INET); if (!supported) { throw EnvoyException( fmt::format("IPv4 addresses are not supported on this machine: {}", address)); @@ -32,7 +31,7 @@ void validateIpv4Supported(const std::string& address) { // Validate that IPv6 is supported on this platform, raise an exception for the // given address if not. void validateIpv6Supported(const std::string& address) { - static const bool supported = Network::Address::ipFamilySupported(AF_INET6); + static const bool supported = SocketInterfaceSingleton::get().ipFamilySupported(AF_INET6); if (!supported) { throw EnvoyException( fmt::format("IPv6 addresses are not supported on this machine: {}", address)); @@ -48,17 +47,6 @@ std::string friendlyNameFromAbstractPath(absl::string_view path) { } // namespace -// Check if an IP family is supported on this machine. -bool ipFamilySupported(int domain) { - Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get(); - const Api::SysCallSocketResult result = os_sys_calls.socket(domain, SOCK_STREAM, 0); - if (SOCKET_VALID(result.rc_)) { - RELEASE_ASSERT(os_sys_calls.close(result.rc_).rc_ == 0, - absl::StrCat("Fail to close fd: response code ", result.rc_)); - } - return SOCKET_VALID(result.rc_); -} - Address::InstanceConstSharedPtr addressFromSockAddr(const sockaddr_storage& ss, socklen_t ss_len, bool v6only) { RELEASE_ASSERT(ss_len == 0 || static_cast(ss_len) >= sizeof(sa_family_t), ""); @@ -103,108 +91,9 @@ Address::InstanceConstSharedPtr addressFromSockAddr(const sockaddr_storage& ss, NOT_REACHED_GCOVR_EXCL_LINE; } -InstanceConstSharedPtr addressFromFd(os_fd_t fd) { - sockaddr_storage ss; - socklen_t ss_len = sizeof ss; - auto& os_sys_calls = Api::OsSysCallsSingleton::get(); - Api::SysCallIntResult result = - os_sys_calls.getsockname(fd, reinterpret_cast(&ss), &ss_len); - if (result.rc_ != 0) { - throw EnvoyException(fmt::format("getsockname failed for '{}': ({}) {}", fd, result.errno_, - strerror(result.errno_))); - } - int socket_v6only = 0; - if (ss.ss_family == AF_INET6) { - socklen_t size_int = sizeof(socket_v6only); - result = os_sys_calls.getsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &socket_v6only, &size_int); -#ifdef WIN32 - // On Windows, it is possible for this getsockopt() call to fail. - // This can happen if the address we are trying to connect to has nothing - // listening. So we can't use RELEASE_ASSERT and instead must throw an - // exception - if (SOCKET_FAILURE(result.rc_)) { - throw EnvoyException(fmt::format("getsockopt failed for '{}': ({}) {}", fd, result.errno_, - strerror(result.errno_))); - } -#else - RELEASE_ASSERT(result.rc_ == 0, ""); -#endif - } - return addressFromSockAddr(ss, ss_len, socket_v6only); -} - -InstanceConstSharedPtr peerAddressFromFd(os_fd_t fd) { - sockaddr_storage ss; - socklen_t ss_len = sizeof ss; - auto& os_sys_calls = Api::OsSysCallsSingleton::get(); - Api::SysCallIntResult result = - os_sys_calls.getpeername(fd, reinterpret_cast(&ss), &ss_len); - if (result.rc_ != 0) { - throw EnvoyException( - fmt::format("getpeername failed for '{}': {}", fd, strerror(result.errno_))); - } -#ifdef __APPLE__ - if (ss_len == sizeof(sockaddr) && ss.ss_family == AF_UNIX) -#else - if (ss_len == sizeof(sa_family_t) && ss.ss_family == AF_UNIX) -#endif - { - // For Unix domain sockets, can't find out the peer name, but it should match our own - // name for the socket (i.e. the path should match, barring any namespace or other - // mechanisms to hide things, of which there are many). - ss_len = sizeof ss; - result = os_sys_calls.getsockname(fd, reinterpret_cast(&ss), &ss_len); - if (result.rc_ != 0) { - throw EnvoyException( - fmt::format("getsockname failed for '{}': {}", fd, strerror(result.errno_))); - } - } - return addressFromSockAddr(ss, ss_len); -} - -IoHandlePtr InstanceBase::socketFromSocketType(SocketType socket_type) const { -#if defined(__APPLE__) || defined(WIN32) - int flags = 0; -#else - int flags = SOCK_NONBLOCK; -#endif - - if (socket_type == SocketType::Stream) { - flags |= SOCK_STREAM; - } else { - flags |= SOCK_DGRAM; - } - - int domain; - if (type() == Type::Ip) { - IpVersion version = ip()->version(); - if (version == IpVersion::v6) { - domain = AF_INET6; - } else { - ASSERT(version == IpVersion::v4); - domain = AF_INET; - } - } else { - ASSERT(type() == Type::Pipe); - domain = AF_UNIX; - } - - auto os_sys_calls = Api::OsSysCallsSingleton::get(); - const Api::SysCallSocketResult result = os_sys_calls.socket(domain, flags, 0); - RELEASE_ASSERT(SOCKET_VALID(result.rc_), - fmt::format("socket(2) failed, got error: {}", strerror(result.errno_))); - IoHandlePtr io_handle = std::make_unique(result.rc_); - -#if defined(__APPLE__) || defined(WIN32) - // Cannot set SOCK_NONBLOCK as a ::socket flag. - const int rc = os_sys_calls.setsocketblocking(io_handle->fd(), false).rc_; - RELEASE_ASSERT(!SOCKET_FAILURE(rc), ""); -#endif - - return io_handle; -} - -Ipv4Instance::Ipv4Instance(const sockaddr_in* address) : InstanceBase(Type::Ip) { +Ipv4Instance::Ipv4Instance(const sockaddr_in* address, absl::string_view sock_interface) + : InstanceBase(Type::Ip, sock_interface) { + memset(&ip_.ipv4_.address_, 0, sizeof(ip_.ipv4_.address_)); ip_.ipv4_.address_ = *address; ip_.friendly_address_ = sockaddrToString(*address); @@ -217,9 +106,12 @@ Ipv4Instance::Ipv4Instance(const sockaddr_in* address) : InstanceBase(Type::Ip) validateIpv4Supported(friendly_name_); } -Ipv4Instance::Ipv4Instance(const std::string& address) : Ipv4Instance(address, 0) {} +Ipv4Instance::Ipv4Instance(const std::string& address, absl::string_view sock_interface) + : Ipv4Instance(address, 0, sock_interface) {} -Ipv4Instance::Ipv4Instance(const std::string& address, uint32_t port) : InstanceBase(Type::Ip) { +Ipv4Instance::Ipv4Instance(const std::string& address, uint32_t port, + absl::string_view sock_interface) + : InstanceBase(Type::Ip, sock_interface) { memset(&ip_.ipv4_.address_, 0, sizeof(ip_.ipv4_.address_)); ip_.ipv4_.address_.sin_family = AF_INET; ip_.ipv4_.address_.sin_port = htons(port); @@ -233,7 +125,8 @@ Ipv4Instance::Ipv4Instance(const std::string& address, uint32_t port) : Instance ip_.friendly_address_ = address; } -Ipv4Instance::Ipv4Instance(uint32_t port) : InstanceBase(Type::Ip) { +Ipv4Instance::Ipv4Instance(uint32_t port, absl::string_view sock_interface) + : InstanceBase(Type::Ip, sock_interface) { memset(&ip_.ipv4_.address_, 0, sizeof(ip_.ipv4_.address_)); ip_.ipv4_.address_.sin_family = AF_INET; ip_.ipv4_.address_.sin_port = htons(port); @@ -249,17 +142,6 @@ bool Ipv4Instance::operator==(const Instance& rhs) const { (ip_.port() == rhs_casted->ip_.port())); } -Api::SysCallIntResult Ipv4Instance::bind(os_fd_t fd) const { - return Api::OsSysCallsSingleton::get().bind( - fd, reinterpret_cast(&ip_.ipv4_.address_), sizeof(ip_.ipv4_.address_)); -} - -Api::SysCallIntResult Ipv4Instance::connect(os_fd_t fd) const { - return Api::OsSysCallsSingleton::get().connect(fd, sockAddr(), sockAddrLen()); -} - -IoHandlePtr Ipv4Instance::socket(SocketType type) const { return socketFromSocketType(type); } - std::string Ipv4Instance::sockaddrToString(const sockaddr_in& addr) { static constexpr size_t BufferSize = 16; // enough space to hold an IPv4 address in string form char str[BufferSize]; @@ -296,6 +178,8 @@ absl::uint128 Ipv6Instance::Ipv6Helper::address() const { uint32_t Ipv6Instance::Ipv6Helper::port() const { return ntohs(address_.sin6_port); } +bool Ipv6Instance::Ipv6Helper::v6only() const { return v6only_; }; + std::string Ipv6Instance::Ipv6Helper::makeFriendlyAddress() const { char str[INET6_ADDRSTRLEN]; const char* ptr = inet_ntop(AF_INET6, &address_.sin6_addr, str, INET6_ADDRSTRLEN); @@ -303,17 +187,22 @@ std::string Ipv6Instance::Ipv6Helper::makeFriendlyAddress() const { return ptr; } -Ipv6Instance::Ipv6Instance(const sockaddr_in6& address, bool v6only) : InstanceBase(Type::Ip) { +Ipv6Instance::Ipv6Instance(const sockaddr_in6& address, bool v6only, + absl::string_view sock_interface) + : InstanceBase(Type::Ip, sock_interface) { ip_.ipv6_.address_ = address; ip_.friendly_address_ = ip_.ipv6_.makeFriendlyAddress(); - ip_.v6only_ = v6only; + ip_.ipv6_.v6only_ = v6only; friendly_name_ = fmt::format("[{}]:{}", ip_.friendly_address_, ip_.port()); validateIpv6Supported(friendly_name_); } -Ipv6Instance::Ipv6Instance(const std::string& address) : Ipv6Instance(address, 0) {} +Ipv6Instance::Ipv6Instance(const std::string& address, absl::string_view sock_interface) + : Ipv6Instance(address, 0, sock_interface) {} -Ipv6Instance::Ipv6Instance(const std::string& address, uint32_t port) : InstanceBase(Type::Ip) { +Ipv6Instance::Ipv6Instance(const std::string& address, uint32_t port, + absl::string_view sock_interface) + : InstanceBase(Type::Ip, sock_interface) { ip_.ipv6_.address_.sin6_family = AF_INET6; ip_.ipv6_.address_.sin6_port = htons(port); if (!address.empty()) { @@ -329,7 +218,8 @@ Ipv6Instance::Ipv6Instance(const std::string& address, uint32_t port) : Instance validateIpv6Supported(friendly_name_); } -Ipv6Instance::Ipv6Instance(uint32_t port) : Ipv6Instance("", port) {} +Ipv6Instance::Ipv6Instance(uint32_t port, absl::string_view sock_interface) + : Ipv6Instance("", port, sock_interface) {} bool Ipv6Instance::operator==(const Instance& rhs) const { const auto* rhs_casted = dynamic_cast(&rhs); @@ -337,59 +227,42 @@ bool Ipv6Instance::operator==(const Instance& rhs) const { (ip_.port() == rhs_casted->ip_.port())); } -Api::SysCallIntResult Ipv6Instance::bind(os_fd_t fd) const { - return Api::OsSysCallsSingleton::get().bind( - fd, reinterpret_cast(&ip_.ipv6_.address_), sizeof(ip_.ipv6_.address_)); -} - -Api::SysCallIntResult Ipv6Instance::connect(os_fd_t fd) const { - return Api::OsSysCallsSingleton::get().connect(fd, sockAddr(), sockAddrLen()); -} - -IoHandlePtr Ipv6Instance::socket(SocketType type) const { - IoHandlePtr io_handle = socketFromSocketType(type); - // Setting IPV6_V6ONLY restricts the IPv6 socket to IPv6 connections only. - const int v6only = ip_.v6only_; - const Api::SysCallIntResult result = Api::OsSysCallsSingleton::get().setsockopt( - io_handle->fd(), IPPROTO_IPV6, IPV6_V6ONLY, reinterpret_cast(&v6only), - sizeof(v6only)); - RELEASE_ASSERT(!SOCKET_FAILURE(result.rc_), ""); - return io_handle; -} - -PipeInstance::PipeInstance(const sockaddr_un* address, socklen_t ss_len, mode_t mode) - : InstanceBase(Type::Pipe) { +PipeInstance::PipeInstance(const sockaddr_un* address, socklen_t ss_len, mode_t mode, + absl::string_view sock_interface) + : InstanceBase(Type::Pipe, sock_interface) { if (address->sun_path[0] == '\0') { #if !defined(__linux__) throw EnvoyException("Abstract AF_UNIX sockets are only supported on linux."); #endif RELEASE_ASSERT(static_cast(ss_len) >= offsetof(struct sockaddr_un, sun_path) + 1, ""); - abstract_namespace_ = true; - address_length_ = ss_len - offsetof(struct sockaddr_un, sun_path); + pipe_.abstract_namespace_ = true; + pipe_.address_length_ = ss_len - offsetof(struct sockaddr_un, sun_path); } - address_ = *address; - if (abstract_namespace_) { + pipe_.address_ = *address; + if (pipe_.abstract_namespace_) { if (mode != 0) { throw EnvoyException("Cannot set mode for Abstract AF_UNIX sockets"); } // Replace all null characters with '@' in friendly_name_. - friendly_name_ = - friendlyNameFromAbstractPath(absl::string_view(address_.sun_path, address_length_)); + friendly_name_ = friendlyNameFromAbstractPath( + absl::string_view(pipe_.address_.sun_path, pipe_.address_length_)); } else { friendly_name_ = address->sun_path; } - this->mode = mode; + pipe_.mode_ = mode; } -PipeInstance::PipeInstance(const std::string& pipe_path, mode_t mode) : InstanceBase(Type::Pipe) { - if (pipe_path.size() >= sizeof(address_.sun_path)) { +PipeInstance::PipeInstance(const std::string& pipe_path, mode_t mode, + absl::string_view sock_interface) + : InstanceBase(Type::Pipe, sock_interface) { + if (pipe_path.size() >= sizeof(pipe_.address_.sun_path)) { throw EnvoyException( fmt::format("Path \"{}\" exceeds maximum UNIX domain socket path size of {}.", pipe_path, - sizeof(address_.sun_path))); + sizeof(pipe_.address_.sun_path))); } - memset(&address_, 0, sizeof(address_)); - address_.sun_family = AF_UNIX; + memset(&pipe_.address_, 0, sizeof(pipe_.address_)); + pipe_.address_.sun_family = AF_UNIX; if (pipe_path[0] == '@') { // This indicates an abstract namespace. // In this case, null bytes in the name have no special significance, and so we copy all @@ -402,50 +275,27 @@ PipeInstance::PipeInstance(const std::string& pipe_path, mode_t mode) : Instance if (mode != 0) { throw EnvoyException("Cannot set mode for Abstract AF_UNIX sockets"); } - abstract_namespace_ = true; - address_length_ = pipe_path.size(); - memcpy(&address_.sun_path[0], pipe_path.data(), pipe_path.size()); - address_.sun_path[0] = '\0'; - address_.sun_path[pipe_path.size()] = '\0'; - friendly_name_ = - friendlyNameFromAbstractPath(absl::string_view(address_.sun_path, address_length_)); + pipe_.abstract_namespace_ = true; + pipe_.address_length_ = pipe_path.size(); + memcpy(&pipe_.address_.sun_path[0], pipe_path.data(), pipe_path.size()); + pipe_.address_.sun_path[0] = '\0'; + pipe_.address_.sun_path[pipe_path.size()] = '\0'; + friendly_name_ = friendlyNameFromAbstractPath( + absl::string_view(pipe_.address_.sun_path, pipe_.address_length_)); } else { // Throw an error if the pipe path has an embedded null character. if (pipe_path.size() != strlen(pipe_path.c_str())) { throw EnvoyException("UNIX domain socket pathname contains embedded null characters"); } - StringUtil::strlcpy(&address_.sun_path[0], pipe_path.c_str(), sizeof(address_.sun_path)); - friendly_name_ = address_.sun_path; + StringUtil::strlcpy(&pipe_.address_.sun_path[0], pipe_path.c_str(), + sizeof(pipe_.address_.sun_path)); + friendly_name_ = pipe_.address_.sun_path; } - this->mode = mode; + pipe_.mode_ = mode; } bool PipeInstance::operator==(const Instance& rhs) const { return asString() == rhs.asString(); } -Api::SysCallIntResult PipeInstance::bind(os_fd_t fd) const { - if (!abstract_namespace_) { - // Try to unlink an existing filesystem object at the requested path. Ignore - // errors -- it's fine if the path doesn't exist, and if it exists but can't - // be unlinked then `::bind()` will generate a reasonable errno. - unlink(address_.sun_path); - } - auto& os_syscalls = Api::OsSysCallsSingleton::get(); - auto bind_result = os_syscalls.bind(fd, sockAddr(), sockAddrLen()); - if (mode != 0 && !abstract_namespace_ && bind_result.rc_ == 0) { - auto set_permissions = os_syscalls.chmod(address_.sun_path, mode); - if (set_permissions.rc_ != 0) { - throw EnvoyException(absl::StrCat("Failed to create socket with mode ", mode)); - } - } - return bind_result; -} - -Api::SysCallIntResult PipeInstance::connect(os_fd_t fd) const { - return Api::OsSysCallsSingleton::get().connect(fd, sockAddr(), sockAddrLen()); -} - -IoHandlePtr PipeInstance::socket(SocketType type) const { return socketFromSocketType(type); } - } // namespace Address } // namespace Network } // namespace Envoy diff --git a/source/common/network/address_impl.h b/source/common/network/address_impl.h index 8a8916ce8c69c..c7473fcd47544 100644 --- a/source/common/network/address_impl.h +++ b/source/common/network/address_impl.h @@ -8,18 +8,11 @@ #include "envoy/common/platform.h" #include "envoy/network/address.h" -#include "envoy/network/io_handle.h" namespace Envoy { namespace Network { namespace Address { -/** - * Returns true if the given family is supported on this machine. - * @param domain the IP family. - */ -bool ipFamilySupported(int domain); - /** * Convert an address in the form of the socket address struct defined by Posix, Linux, etc. into * a Network::Address::Instance and return a pointer to it. Raises an EnvoyException on failure. @@ -32,21 +25,6 @@ bool ipFamilySupported(int domain); InstanceConstSharedPtr addressFromSockAddr(const sockaddr_storage& ss, socklen_t len, bool v6only = true); -/** - * Obtain an address from a bound file descriptor. Raises an EnvoyException on failure. - * @param fd socket file descriptor - * @return InstanceConstSharedPtr for bound address. - */ -InstanceConstSharedPtr addressFromFd(os_fd_t fd); - -/** - * Obtain the address of the peer of the socket with the specified file descriptor. - * Raises an EnvoyException on failure. - * @param fd socket file descriptor - * @return InstanceConstSharedPtr for peer address. - */ -InstanceConstSharedPtr peerAddressFromFd(os_fd_t fd); - /** * Base class for all address types. */ @@ -59,14 +37,16 @@ class InstanceBase : public Instance { const std::string& logicalName() const override { return asString(); } Type type() const override { return type_; } - virtual const sockaddr* sockAddr() const PURE; - virtual socklen_t sockAddrLen() const PURE; + const std::string& socketInterface() const override { return socket_interface_; } protected: InstanceBase(Type type) : type_(type) {} - IoHandlePtr socketFromSocketType(SocketType type) const; + InstanceBase(Type type, absl::string_view sock_interface) : type_(type) { + socket_interface_ = std::string(sock_interface); + } std::string friendly_name_; + std::string socket_interface_; private: const Type type_; @@ -80,32 +60,28 @@ class Ipv4Instance : public InstanceBase { /** * Construct from an existing unix IPv4 socket address (IP v4 address and port). */ - explicit Ipv4Instance(const sockaddr_in* address); + explicit Ipv4Instance(const sockaddr_in* address, absl::string_view sock_interface = ""); /** * Construct from a string IPv4 address such as "1.2.3.4". Port will be unset/0. */ - explicit Ipv4Instance(const std::string& address); + explicit Ipv4Instance(const std::string& address, absl::string_view sock_interface = ""); /** * Construct from a string IPv4 address such as "1.2.3.4" as well as a port. */ - Ipv4Instance(const std::string& address, uint32_t port); + Ipv4Instance(const std::string& address, uint32_t port, absl::string_view sock_interface = ""); /** * Construct from a port. The IPv4 address will be set to "any" and is suitable for binding * a port to any available address. */ - explicit Ipv4Instance(uint32_t port); + explicit Ipv4Instance(uint32_t port, absl::string_view sock_interface = ""); // Network::Address::Instance bool operator==(const Instance& rhs) const override; - Api::SysCallIntResult bind(os_fd_t fd) const override; - Api::SysCallIntResult connect(os_fd_t fd) const override; const Ip* ip() const override { return &ip_; } - IoHandlePtr socket(SocketType type) const override; - - // Network::Address::InstanceBase + const Pipe* pipe() const override { return nullptr; } const sockaddr* sockAddr() const override { return reinterpret_cast(&ip_.ipv4_.address_); } @@ -154,32 +130,29 @@ class Ipv6Instance : public InstanceBase { /** * Construct from an existing unix IPv6 socket address (IP v6 address and port). */ - Ipv6Instance(const sockaddr_in6& address, bool v6only = true); + Ipv6Instance(const sockaddr_in6& address, bool v6only = true, + absl::string_view sock_interface = ""); /** * Construct from a string IPv6 address such as "12:34::5". Port will be unset/0. */ - explicit Ipv6Instance(const std::string& address); + explicit Ipv6Instance(const std::string& address, absl::string_view sock_interface = ""); /** * Construct from a string IPv6 address such as "12:34::5" as well as a port. */ - Ipv6Instance(const std::string& address, uint32_t port); + Ipv6Instance(const std::string& address, uint32_t port, absl::string_view sock_interface = ""); /** * Construct from a port. The IPv6 address will be set to "any" and is suitable for binding * a port to any available address. */ - explicit Ipv6Instance(uint32_t port); + explicit Ipv6Instance(uint32_t port, absl::string_view sock_interface = ""); // Network::Address::Instance bool operator==(const Instance& rhs) const override; - Api::SysCallIntResult bind(os_fd_t fd) const override; - Api::SysCallIntResult connect(os_fd_t fd) const override; const Ip* ip() const override { return &ip_; } - IoHandlePtr socket(SocketType type) const override; - - // Network::Address::InstanceBase + const Pipe* pipe() const override { return nullptr; } const sockaddr* sockAddr() const override { return reinterpret_cast(&ip_.ipv6_.address_); } @@ -189,11 +162,16 @@ class Ipv6Instance : public InstanceBase { struct Ipv6Helper : public Ipv6 { Ipv6Helper() { memset(&address_, 0, sizeof(address_)); } absl::uint128 address() const override; + bool v6only() const override; uint32_t port() const; std::string makeFriendlyAddress() const; sockaddr_in6 address_; + // Is IPv4 compatibility (https://tools.ietf.org/html/rfc3493#page-11) disabled? + // Default initialized to true to preserve extant Envoy behavior where we don't explicitly set + // this in the constructor. + bool v6only_{true}; }; struct IpHelper : public Ip { @@ -211,10 +189,6 @@ class Ipv6Instance : public InstanceBase { Ipv6Helper ipv6_; std::string friendly_address_; - // Is IPv4 compatibility (https://tools.ietf.org/html/rfc3493#page-11) disabled? - // Default initialized to true to preserve extant Envoy behavior where we don't explicitly set - // this in the constructor. - bool v6only_{true}; }; IpHelper ip_; @@ -228,35 +202,43 @@ class PipeInstance : public InstanceBase { /** * Construct from an existing unix address. */ - explicit PipeInstance(const sockaddr_un* address, socklen_t ss_len, mode_t mode = 0); + explicit PipeInstance(const sockaddr_un* address, socklen_t ss_len, mode_t mode = 0, + absl::string_view sock_interface = ""); /** * Construct from a string pipe path. */ - explicit PipeInstance(const std::string& pipe_path, mode_t mode = 0); + explicit PipeInstance(const std::string& pipe_path, mode_t mode = 0, + absl::string_view sock_interface = ""); // Network::Address::Instance bool operator==(const Instance& rhs) const override; - Api::SysCallIntResult bind(os_fd_t fd) const override; - Api::SysCallIntResult connect(os_fd_t fd) const override; const Ip* ip() const override { return nullptr; } - IoHandlePtr socket(SocketType type) const override; - - // Network::Address::InstanceBase - const sockaddr* sockAddr() const override { return reinterpret_cast(&address_); } + const Pipe* pipe() const override { return &pipe_; } + const sockaddr* sockAddr() const override { + return reinterpret_cast(&pipe_.address_); + } socklen_t sockAddrLen() const override { - if (abstract_namespace_) { - return offsetof(struct sockaddr_un, sun_path) + address_length_; + if (pipe_.abstract_namespace_) { + return offsetof(struct sockaddr_un, sun_path) + pipe_.address_length_; } - return sizeof(address_); + return sizeof(pipe_.address_); } private: - sockaddr_un address_; - // For abstract namespaces. - bool abstract_namespace_{false}; - uint32_t address_length_{0}; - mode_t mode{0}; + struct PipeHelper : public Pipe { + + bool abstractNamespace() const override { return abstract_namespace_; } + mode_t mode() const override { return mode_; } + + sockaddr_un address_; + // For abstract namespaces. + bool abstract_namespace_{false}; + uint32_t address_length_{0}; + mode_t mode_{0}; + }; + + PipeHelper pipe_; }; } // namespace Address diff --git a/source/common/network/base_listener_impl.cc b/source/common/network/base_listener_impl.cc index c096bfcfb5223..e1adf6b930ec1 100644 --- a/source/common/network/base_listener_impl.cc +++ b/source/common/network/base_listener_impl.cc @@ -8,16 +8,13 @@ #include "common/event/dispatcher_impl.h" #include "common/event/file_event_impl.h" #include "common/network/address_impl.h" +#include "common/network/socket_impl.h" #include "event2/listener.h" namespace Envoy { namespace Network { -Address::InstanceConstSharedPtr BaseListenerImpl::getLocalAddress(os_fd_t fd) { - return Address::addressFromFd(fd); -} - BaseListenerImpl::BaseListenerImpl(Event::DispatcherImpl& dispatcher, SocketSharedPtr socket) : local_address_(nullptr), dispatcher_(dispatcher), socket_(std::move(socket)) { const auto ip = socket_->localAddress()->ip(); diff --git a/source/common/network/base_listener_impl.h b/source/common/network/base_listener_impl.h index 806789ad35359..878136a4fe297 100644 --- a/source/common/network/base_listener_impl.h +++ b/source/common/network/base_listener_impl.h @@ -23,8 +23,6 @@ class BaseListenerImpl : public virtual Listener { BaseListenerImpl(Event::DispatcherImpl& dispatcher, SocketSharedPtr socket); protected: - virtual Address::InstanceConstSharedPtr getLocalAddress(os_fd_t fd); - Address::InstanceConstSharedPtr local_address_; Event::DispatcherImpl& dispatcher_; const SocketSharedPtr socket_; diff --git a/source/common/network/cidr_range.cc b/source/common/network/cidr_range.cc index 57292e0f6867a..277125e17c4a3 100644 --- a/source/common/network/cidr_range.cc +++ b/source/common/network/cidr_range.cc @@ -190,18 +190,6 @@ InstanceConstSharedPtr CidrRange::truncateIpAddressAndLength(InstanceConstShared NOT_REACHED_GCOVR_EXCL_LINE; } -IpList::IpList(const std::vector& subnets) { - for (const std::string& entry : subnets) { - CidrRange list_entry = CidrRange::create(entry); - if (list_entry.isValid()) { - ip_list_.push_back(list_entry); - } else { - throw EnvoyException( - fmt::format("invalid ip/mask combo '{}' (format is /<# mask bits>)", entry)); - } - } -} - IpList::IpList(const Protobuf::RepeatedPtrField& cidrs) { for (const envoy::config::core::v3::CidrRange& entry : cidrs) { CidrRange list_entry = CidrRange::create(entry); @@ -224,10 +212,6 @@ bool IpList::contains(const Instance& address) const { return false; } -IpList::IpList(const Json::Object& config, const std::string& member_name) - : IpList(config.hasObject(member_name) ? config.getStringArray(member_name) - : std::vector()) {} - } // namespace Address } // namespace Network } // namespace Envoy diff --git a/source/common/network/cidr_range.h b/source/common/network/cidr_range.h index 37b894a3908a8..9ec72f39156b8 100644 --- a/source/common/network/cidr_range.h +++ b/source/common/network/cidr_range.h @@ -4,7 +4,6 @@ #include #include "envoy/config/core/v3/address.pb.h" -#include "envoy/json/json_object.h" #include "envoy/network/address.h" #include "common/protobuf/protobuf.h" @@ -95,7 +94,7 @@ class CidrRange { static CidrRange create(const std::string& range); /** - * Constructs a CidrRange from envoy::api::v2::core::CidrRange. + * Constructs a CidrRange from envoy::config::core::v3::CidrRange. * TODO(ccaraman): Update CidrRange::create to support only constructing valid ranges. */ static CidrRange create(const envoy::config::core::v3::CidrRange& cidr); @@ -126,9 +125,7 @@ class CidrRange { */ class IpList { public: - IpList(const std::vector& subnets); - IpList(const Json::Object& config, const std::string& member_name); - IpList(const Protobuf::RepeatedPtrField& cidrs); + explicit IpList(const Protobuf::RepeatedPtrField& cidrs); IpList() = default; bool contains(const Instance& address) const; diff --git a/source/common/network/connection_balancer_impl.h b/source/common/network/connection_balancer_impl.h index 49cdb601ace85..17a09542c1d74 100644 --- a/source/common/network/connection_balancer_impl.h +++ b/source/common/network/connection_balancer_impl.h @@ -24,7 +24,7 @@ class ExactConnectionBalancerImpl : public ConnectionBalancer { private: absl::Mutex lock_; - std::vector handlers_ GUARDED_BY(lock_); + std::vector handlers_ ABSL_GUARDED_BY(lock_); }; /** diff --git a/source/common/network/connection_impl.cc b/source/common/network/connection_impl.cc index fed4bc8aa3207..2abbea352b763 100644 --- a/source/common/network/connection_impl.cc +++ b/source/common/network/connection_impl.cc @@ -10,7 +10,6 @@ #include "envoy/event/timer.h" #include "envoy/network/filter.h" -#include "common/api/os_sys_calls_impl.h" #include "common/common/assert.h" #include "common/common/empty_string.h" #include "common/common/enum_to_int.h" @@ -48,15 +47,19 @@ ConnectionImpl::ConnectionImpl(Event::Dispatcher& dispatcher, ConnectionSocketPt : ConnectionImplBase(dispatcher, next_global_id_++), transport_socket_(std::move(transport_socket)), socket_(std::move(socket)), stream_info_(stream_info), filter_manager_(*this), - write_buffer_( - dispatcher.getWatermarkFactory().create([this]() -> void { this->onLowWatermark(); }, - [this]() -> void { this->onHighWatermark(); })), - read_enabled_(true), above_high_watermark_(false), detect_early_close_(true), + read_buffer_([this]() -> void { this->onReadBufferLowWatermark(); }, + [this]() -> void { this->onReadBufferHighWatermark(); }, + []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }), + write_buffer_(dispatcher.getWatermarkFactory().create( + [this]() -> void { this->onWriteBufferLowWatermark(); }, + [this]() -> void { this->onWriteBufferHighWatermark(); }, + []() -> void { /* TODO(adisuissa): Handle overflow watermark */ })), + write_buffer_above_high_watermark_(false), detect_early_close_(true), enable_half_close_(false), read_end_stream_raised_(false), read_end_stream_(false), write_end_stream_(false), current_write_end_stream_(false), dispatch_buffered_data_(false) { // Treat the lack of a valid fd (which in practice only happens if we run out of FDs) as an OOM // condition and just crash. - RELEASE_ASSERT(SOCKET_VALID(ioHandle().fd()), ""); + RELEASE_ASSERT(SOCKET_VALID(ConnectionImpl::ioHandle().fd()), ""); if (!connected) { connecting_ = true; @@ -71,8 +74,8 @@ ConnectionImpl::ConnectionImpl(Event::Dispatcher& dispatcher, ConnectionSocketPt // We never ask for both early close and read at the same time. If we are reading, we want to // consume all available data. file_event_ = dispatcher_.createFileEvent( - ioHandle().fd(), [this](uint32_t events) -> void { onFileEvent(events); }, trigger, - Event::FileReadyType::Read | Event::FileReadyType::Write); + ConnectionImpl::ioHandle().fd(), [this](uint32_t events) -> void { onFileEvent(events); }, + trigger, Event::FileReadyType::Read | Event::FileReadyType::Write); transport_socket_->setTransportSocketCallbacks(*this); } @@ -186,8 +189,13 @@ Connection::State ConnectionImpl::state() const { void ConnectionImpl::closeConnectionImmediately() { closeSocket(ConnectionEvent::LocalClose); } +bool ConnectionImpl::consumerWantsToRead() { + return read_disable_count_ == 0 || + (read_disable_count_ == 1 && read_buffer_.highWatermarkTriggered()); +} + void ConnectionImpl::closeSocket(ConnectionEvent close_type) { - if (!ioHandle().isOpen()) { + if (!ConnectionImpl::ioHandle().isOpen()) { return; } @@ -216,7 +224,8 @@ void ConnectionImpl::closeSocket(ConnectionEvent close_type) { socket_->close(); - raiseEvent(close_type); + // Call the base class directly as close() is called in the destructor. + ConnectionImpl::raiseEvent(close_type); } void ConnectionImpl::noDelay(bool enable) { @@ -232,32 +241,23 @@ void ConnectionImpl::noDelay(bool enable) { } // Don't set NODELAY for unix domain sockets - sockaddr_storage addr; - socklen_t len = sizeof(addr); - - auto os_sys_calls = Api::OsSysCallsSingleton::get(); - Api::SysCallIntResult result = - os_sys_calls.getsockname(ioHandle().fd(), reinterpret_cast(&addr), &len); - - RELEASE_ASSERT(result.rc_ == 0, ""); - - if (addr.ss_family == AF_UNIX) { + if (socket_->addressType() == Address::Type::Pipe) { return; } // Set NODELAY int new_value = enable; - result = os_sys_calls.setsockopt(ioHandle().fd(), IPPROTO_TCP, TCP_NODELAY, &new_value, - sizeof(new_value)); + Api::SysCallIntResult result = + socket_->setSocketOption(IPPROTO_TCP, TCP_NODELAY, &new_value, sizeof(new_value)); #if defined(__APPLE__) - if (SOCKET_FAILURE(result.rc_) && result.errno_ == EINVAL) { + if (SOCKET_FAILURE(result.rc_) && result.errno_ == SOCKET_ERROR_INVAL) { // Sometimes occurs when the connection is not yet fully formed. Empirically, TCP_NODELAY is // enabled despite this result. return; } #elif defined(WIN32) if (SOCKET_FAILURE(result.rc_) && - (result.errno_ == WSAEWOULDBLOCK || result.errno_ == WSAEINVAL)) { + (result.errno_ == SOCKET_ERROR_AGAIN || result.errno_ == SOCKET_ERROR_INVAL)) { // Sometimes occurs when the connection is not yet fully formed. Empirically, TCP_NODELAY is // enabled despite this result. return; @@ -268,7 +268,7 @@ void ConnectionImpl::noDelay(bool enable) { } void ConnectionImpl::onRead(uint64_t read_buffer_size) { - if (!read_enabled_ || inDelayedClose()) { + if (inDelayedClose() || !consumerWantsToRead()) { return; } ASSERT(ioHandle().isOpen()); @@ -301,7 +301,7 @@ void ConnectionImpl::enableHalfClose(bool enabled) { // This code doesn't correctly ensure that EV_CLOSE isn't set if reading is disabled // when enabling half-close. This could be fixed, but isn't needed right now, so just // ASSERT that it doesn't happen. - ASSERT(!enabled || read_enabled_); + ASSERT(!enabled || read_disable_count_ == 0); enable_half_close_ = enabled; } @@ -311,8 +311,8 @@ void ConnectionImpl::readDisable(bool disable) { ASSERT(state() == State::Open); ASSERT(file_event_ != nullptr); - ENVOY_CONN_LOG(trace, "readDisable: enabled={} disable={} state={}", *this, read_enabled_, - disable, static_cast(state())); + ENVOY_CONN_LOG(trace, "readDisable: disable={} disable_count={} state={} buffer_length={}", *this, + disable, read_disable_count_, static_cast(state()), read_buffer_.length()); // When we disable reads, we still allow for early close notifications (the equivalent of // EPOLLRDHUP for an epoll backend). For backends that support it, this allows us to apply @@ -322,17 +322,16 @@ void ConnectionImpl::readDisable(bool disable) { // closed TCP connections in the sense that we assume that a remote FIN means the remote intends a // full close. if (disable) { - if (!read_enabled_) { - ++read_disable_count_; - return; - } - ASSERT(read_enabled_); - read_enabled_ = false; + ++read_disable_count_; if (state() != State::Open || file_event_ == nullptr) { // If readDisable is called on a closed connection, do not crash. return; } + if (read_disable_count_ > 1) { + // The socket has already been read disabled. + return; + } // If half-close semantics are enabled, we never want early close notifications; we // always want to read all available data, even if the other side has closed. @@ -342,27 +341,26 @@ void ConnectionImpl::readDisable(bool disable) { file_event_->setEnabled(Event::FileReadyType::Write); } } else { - if (read_disable_count_ > 0) { - --read_disable_count_; - return; - } - ASSERT(!read_enabled_); - read_enabled_ = true; - + ASSERT(read_disable_count_ != 0); + --read_disable_count_; if (state() != State::Open || file_event_ == nullptr) { // If readDisable is called on a closed connection, do not crash. return; } - // We never ask for both early close and read at the same time. If we are reading, we want to - // consume all available data. - file_event_->setEnabled(Event::FileReadyType::Read | Event::FileReadyType::Write); - // If the connection has data buffered there's no guarantee there's also data in the kernel - // which will kick off the filter chain. Instead fake an event to make sure the buffered data - // gets processed regardless and ensure that we dispatch it via onRead. - if (read_buffer_.length() > 0) { + if (read_disable_count_ == 0) { + // We never ask for both early close and read at the same time. If we are reading, we want to + // consume all available data. + file_event_->setEnabled(Event::FileReadyType::Read | Event::FileReadyType::Write); + } + + if (consumerWantsToRead() && read_buffer_.length() > 0) { + // If the connection has data buffered there's no guarantee there's also data in the kernel + // which will kick off the filter chain. Alternately if the read buffer has data the fd could + // be read disabled. To handle these cases, fake an event to make sure the buffered data gets + // processed regardless and ensure that we dispatch it via onRead. dispatch_buffered_data_ = true; - file_event_->activate(Event::FileReadyType::Read); + setReadBufferReady(); } } } @@ -383,7 +381,7 @@ bool ConnectionImpl::readEnabled() const { // Calls to readEnabled on a closed socket are considered to be an error. ASSERT(state() == State::Open); ASSERT(file_event_ != nullptr); - return read_enabled_; + return read_disable_count_ == 0; } void ConnectionImpl::addBytesSentCallback(BytesSentCb cb) { @@ -468,22 +466,37 @@ void ConnectionImpl::setBufferLimits(uint32_t limit) { // would result in respecting the exact buffer limit. if (limit > 0) { static_cast(write_buffer_.get())->setWatermarks(limit + 1); + read_buffer_.setWatermarks(limit + 1); } } -void ConnectionImpl::onLowWatermark() { +void ConnectionImpl::onReadBufferLowWatermark() { + ENVOY_CONN_LOG(debug, "onBelowReadBufferLowWatermark", *this); + if (state() == State::Open) { + readDisable(false); + } +} + +void ConnectionImpl::onReadBufferHighWatermark() { + ENVOY_CONN_LOG(debug, "onAboveReadBufferHighWatermark", *this); + if (state() == State::Open) { + readDisable(true); + } +} + +void ConnectionImpl::onWriteBufferLowWatermark() { ENVOY_CONN_LOG(debug, "onBelowWriteBufferLowWatermark", *this); - ASSERT(above_high_watermark_); - above_high_watermark_ = false; + ASSERT(write_buffer_above_high_watermark_); + write_buffer_above_high_watermark_ = false; for (ConnectionCallbacks* callback : callbacks_) { callback->onBelowWriteBufferLowWatermark(); } } -void ConnectionImpl::onHighWatermark() { +void ConnectionImpl::onWriteBufferHighWatermark() { ENVOY_CONN_LOG(debug, "onAboveWriteBufferHighWatermark", *this); - ASSERT(!above_high_watermark_); - above_high_watermark_ = true; + ASSERT(!write_buffer_above_high_watermark_); + write_buffer_above_high_watermark_ = true; for (ConnectionCallbacks* callback : callbacks_) { callback->onAboveWriteBufferHighWatermark(); } @@ -528,10 +541,24 @@ void ConnectionImpl::onFileEvent(uint32_t events) { } void ConnectionImpl::onReadReady() { - ENVOY_CONN_LOG(trace, "read ready", *this); + ENVOY_CONN_LOG(trace, "read ready. dispatch_buffered_data={}", *this, dispatch_buffered_data_); + const bool latched_dispatch_buffered_data = dispatch_buffered_data_; + dispatch_buffered_data_ = false; ASSERT(!connecting_); + // We get here while read disabled in two ways. + // 1) There was a call to setReadBufferReady(), for example if a raw buffer socket ceded due to + // shouldDrainReadBuffer(). In this case we defer the event until the socket is read enabled. + // 2) The consumer of connection data called readDisable(true), and instead of reading from the + // socket we simply need to dispatch already read data. + if (read_disable_count_ != 0) { + if (latched_dispatch_buffered_data && consumerWantsToRead()) { + onRead(read_buffer_.length()); + } + return; + } + IoResult result = transport_socket_->doRead(read_buffer_); uint64_t new_buffer_size = read_buffer_.length(); updateReadBufferStats(result.bytes_processed_, new_buffer_size); @@ -545,13 +572,12 @@ void ConnectionImpl::onReadReady() { read_end_stream_ |= result.end_stream_read_; if (result.bytes_processed_ != 0 || result.end_stream_read_ || - (dispatch_buffered_data_ && read_buffer_.length() > 0)) { + (latched_dispatch_buffered_data && read_buffer_.length() > 0)) { // Skip onRead if no bytes were processed unless we explicitly want to force onRead for // buffered data. For instance, skip onRead if the connection was closed without producing // more data. onRead(new_buffer_size); } - dispatch_buffered_data_ = false; // The read callback may have already closed the connection. if (result.action_ == PostIoAction::Close || bothSidesHalfClosed()) { @@ -568,8 +594,8 @@ ConnectionImpl::unixSocketPeerCredentials() const { #else struct ucred ucred; socklen_t ucred_size = sizeof(ucred); - int rc = getsockopt(ioHandle().fd(), SOL_SOCKET, SO_PEERCRED, &ucred, &ucred_size); - if (rc == -1) { + int rc = socket_->getSocketOption(SOL_SOCKET, SO_PEERCRED, &ucred, &ucred_size).rc_; + if (SOCKET_FAILURE(rc)) { return absl::nullopt; } @@ -583,9 +609,7 @@ void ConnectionImpl::onWriteReady() { if (connecting_) { int error; socklen_t error_size = sizeof(error); - RELEASE_ASSERT(Api::OsSysCallsSingleton::get() - .getsockopt(ioHandle().fd(), SOL_SOCKET, SO_ERROR, &error, &error_size) - .rc_ == 0, + RELEASE_ASSERT(socket_->getSocketOption(SOL_SOCKET, SO_ERROR, &error, &error_size).rc_ == 0, ""); if (error == 0) { @@ -621,15 +645,18 @@ void ConnectionImpl::onWriteReady() { } else if ((inDelayedClose() && new_buffer_size == 0) || bothSidesHalfClosed()) { ENVOY_CONN_LOG(debug, "write flush complete", *this); if (delayed_close_state_ == DelayedCloseState::CloseAfterFlushAndWait) { - ASSERT(delayed_close_timer_ != nullptr); - delayed_close_timer_->enableTimer(delayed_close_timeout_); + ASSERT(delayed_close_timer_ != nullptr && delayed_close_timer_->enabled()); + if (result.bytes_processed_ > 0) { + delayed_close_timer_->enableTimer(delayed_close_timeout_); + } } else { ASSERT(bothSidesHalfClosed() || delayed_close_state_ == DelayedCloseState::CloseAfterFlush); closeConnectionImmediately(); } } else { ASSERT(result.action_ == PostIoAction::KeepOpen); - if (delayed_close_timer_ != nullptr) { + ASSERT(!delayed_close_timer_ || delayed_close_timer_->enabled()); + if (delayed_close_timer_ != nullptr && result.bytes_processed_ > 0) { delayed_close_timer_->enableTimer(delayed_close_timeout_); } if (result.bytes_processed_ > 0) { @@ -700,17 +727,19 @@ ClientConnectionImpl::ClientConnectionImpl( file_event_->activate(Event::FileReadyType::Write); return; } - const Network::Address::Instance* source_to_use = source_address.get(); + + const Network::Address::InstanceConstSharedPtr* source = &source_address; + if (socket_->localAddress()) { - source_to_use = socket_->localAddress().get(); + source = &socket_->localAddress(); } - if (source_to_use != nullptr) { - const Api::SysCallIntResult result = source_to_use->bind(ioHandle().fd()); + if (*source != nullptr) { + Api::SysCallIntResult result = socket_->bind(*source); if (result.rc_ < 0) { // TODO(lizan): consider add this error into transportFailureReason. - ENVOY_LOG_MISC(debug, "Bind failure. Failed to bind to {}: {}", source_to_use->asString(), - strerror(result.errno_)); + ENVOY_LOG_MISC(debug, "Bind failure. Failed to bind to {}: {}", source->get()->asString(), + errorDetails(result.errno_)); bind_error_ = true; // Set a special error state to ensure asynchronous close to give the owner of the // ConnectionImpl a chance to add callbacks and detect the "disconnect". @@ -725,13 +754,20 @@ ClientConnectionImpl::ClientConnectionImpl( void ClientConnectionImpl::connect() { ENVOY_CONN_LOG(debug, "connecting to {}", *this, socket_->remoteAddress()->asString()); - const Api::SysCallIntResult result = socket_->remoteAddress()->connect(ioHandle().fd()); + const Api::SysCallIntResult result = socket_->connect(socket_->remoteAddress()); if (result.rc_ == 0) { // write will become ready. ASSERT(connecting_); } else { - ASSERT(result.rc_ == -1); - if (result.errno_ == EINPROGRESS) { + ASSERT(SOCKET_FAILURE(result.rc_)); +#ifdef WIN32 + // winsock2 connect returns EWOULDBLOCK if the socket is non-blocking and the connection + // cannot be completed immediately. We do not check for EINPROGRESS as that error is for + // blocking operations. + if (result.errno_ == SOCKET_ERROR_AGAIN) { +#else + if (result.errno_ == SOCKET_ERROR_IN_PROGRESS) { +#endif ASSERT(connecting_); ENVOY_CONN_LOG(debug, "connection in progress", *this); } else { @@ -743,12 +779,6 @@ void ClientConnectionImpl::connect() { file_event_->activate(Event::FileReadyType::Write); } } - - // The local address can only be retrieved for IP connections. Other - // types, such as UDS, don't have a notion of a local address. - if (socket_->remoteAddress()->type() == Address::Type::Ip) { - socket_->setLocalAddress(Address::addressFromFd(ioHandle().fd())); - } } } // namespace Network } // namespace Envoy diff --git a/source/common/network/connection_impl.h b/source/common/network/connection_impl.h index 26d04eae93485..17ebe609a2630 100644 --- a/source/common/network/connection_impl.h +++ b/source/common/network/connection_impl.h @@ -60,7 +60,7 @@ class ConnectionImpl : public ConnectionImplBase, public TransportSocketCallback // Network::Connection void addBytesSentCallback(BytesSentCb cb) override; void enableHalfClose(bool enabled) override; - void close(ConnectionCloseType type) override; + void close(ConnectionCloseType type) final; std::string nextProtocol() const override { return transport_socket_->protocol(); } void noDelay(bool enable) override; void readDisable(bool disable) override; @@ -82,7 +82,7 @@ class ConnectionImpl : public ConnectionImplBase, public TransportSocketCallback void setBufferLimits(uint32_t limit) override; uint32_t bufferLimit() const override { return read_buffer_limit_; } bool localAddressRestored() const override { return socket_->localAddressRestored(); } - bool aboveHighWatermark() const override { return above_high_watermark_; } + bool aboveHighWatermark() const override { return write_buffer_above_high_watermark_; } const ConnectionSocket::OptionsSharedPtr& socketOptions() const override { return socket_->options(); } @@ -102,10 +102,10 @@ class ConnectionImpl : public ConnectionImplBase, public TransportSocketCallback } // Network::TransportSocketCallbacks - IoHandle& ioHandle() override { return socket_->ioHandle(); } + IoHandle& ioHandle() final { return socket_->ioHandle(); } const IoHandle& ioHandle() const override { return socket_->ioHandle(); } Connection& connection() override { return *this; } - void raiseEvent(ConnectionEvent event) override; + void raiseEvent(ConnectionEvent event) final; // Should the read buffer be drained? bool shouldDrainReadBuffer() override { return read_buffer_limit_ > 0 && read_buffer_.length() >= read_buffer_limit_; @@ -122,20 +122,33 @@ class ConnectionImpl : public ConnectionImplBase, public TransportSocketCallback static uint64_t nextGlobalIdForTest() { return next_global_id_; } protected: + // A convenience function which returns true if + // 1) The read disable count is zero or + // 2) The read disable count is one due to the read buffer being overrun. + // In either case the consumer of the data would like to read from the buffer. + // If the read count is greater than one, or equal to one when the buffer is + // not overrun, then the consumer of the data has called readDisable, and does + // not want to read. + bool consumerWantsToRead(); + // Network::ConnectionImplBase - void closeConnectionImmediately() override; + void closeConnectionImmediately() final; void closeSocket(ConnectionEvent close_type); - void onLowWatermark(); - void onHighWatermark(); + void onReadBufferLowWatermark(); + void onReadBufferHighWatermark(); + void onWriteBufferLowWatermark(); + void onWriteBufferHighWatermark(); TransportSocketPtr transport_socket_; ConnectionSocketPtr socket_; StreamInfo::StreamInfo& stream_info_; FilterManagerImpl filter_manager_; - Buffer::OwnedImpl read_buffer_; + // Ensure that if the consumer of the data from this connection isn't + // consuming, that the connection eventually stops reading from the wire. + Buffer::WatermarkBuffer read_buffer_; // This must be a WatermarkBuffer, but as it is created by a factory the ConnectionImpl only has // a generic pointer. // It MUST be defined after the filter_manager_ as some filters may have callbacks that @@ -174,8 +187,7 @@ class ConnectionImpl : public ConnectionImplBase, public TransportSocketCallback uint64_t last_write_buffer_size_{}; Buffer::Instance* current_write_buffer_{}; uint32_t read_disable_count_{0}; - bool read_enabled_ : 1; - bool above_high_watermark_ : 1; + bool write_buffer_above_high_watermark_ : 1; bool detect_early_close_ : 1; bool enable_half_close_ : 1; bool read_end_stream_raised_ : 1; diff --git a/source/common/network/dns_impl.cc b/source/common/network/dns_impl.cc index 17d608ce2ceb9..d44d53c70f39e 100644 --- a/source/common/network/dns_impl.cc +++ b/source/common/network/dns_impl.cc @@ -161,10 +161,10 @@ void DnsResolverImpl::PendingResolution::onAresGetAddrInfoCallback(int status, i try { callback_(resolution_status, std::move(address_list)); } catch (const EnvoyException& e) { - ENVOY_LOG(critical, "EnvoyException in c-ares callback"); + ENVOY_LOG(critical, "EnvoyException in c-ares callback: {}", e.what()); dispatcher_.post([s = std::string(e.what())] { throw EnvoyException(s); }); } catch (const std::exception& e) { - ENVOY_LOG(critical, "std::exception in c-ares callback"); + ENVOY_LOG(critical, "std::exception in c-ares callback: {}", e.what()); dispatcher_.post([s = std::string(e.what())] { throw EnvoyException(s); }); } catch (...) { ENVOY_LOG(critical, "Unknown exception in c-ares callback"); diff --git a/source/common/network/dns_impl.h b/source/common/network/dns_impl.h index 44588fc4f52c3..dc62e06adb112 100644 --- a/source/common/network/dns_impl.h +++ b/source/common/network/dns_impl.h @@ -2,7 +2,6 @@ #include #include -#include #include "envoy/common/platform.h" #include "envoy/event/dispatcher.h" @@ -13,6 +12,7 @@ #include "common/common/logger.h" #include "common/common/utility.h" +#include "absl/container/node_hash_map.h" #include "ares.h" namespace Envoy { @@ -104,7 +104,7 @@ class DnsResolverImpl : public DnsResolver, protected Logger::Loggable events_; + absl::node_hash_map events_; }; } // namespace Network diff --git a/source/common/network/filter_manager_impl.cc b/source/common/network/filter_manager_impl.cc index c083a56eb4ed0..593abc0980951 100644 --- a/source/common/network/filter_manager_impl.cc +++ b/source/common/network/filter_manager_impl.cc @@ -13,7 +13,7 @@ void FilterManagerImpl::addWriteFilter(WriteFilterSharedPtr filter) { ASSERT(connection_.state() == Connection::State::Open); ActiveWriteFilterPtr new_filter(new ActiveWriteFilter{*this, filter}); filter->initializeWriteFilterCallbacks(*new_filter); - new_filter->moveIntoList(std::move(new_filter), downstream_filters_); + LinkedList::moveIntoList(std::move(new_filter), downstream_filters_); } void FilterManagerImpl::addFilter(FilterSharedPtr filter) { @@ -25,7 +25,7 @@ void FilterManagerImpl::addReadFilter(ReadFilterSharedPtr filter) { ASSERT(connection_.state() == Connection::State::Open); ActiveReadFilterPtr new_filter(new ActiveReadFilter{*this, filter}); filter->initializeReadFilterCallbacks(*new_filter); - new_filter->moveIntoListBack(std::move(new_filter), upstream_filters_); + LinkedList::moveIntoListBack(std::move(new_filter), upstream_filters_); } bool FilterManagerImpl::initializeReadFilters() { diff --git a/source/common/network/filter_matcher.cc b/source/common/network/filter_matcher.cc index 6668850db44ee..7b2831b8a55e0 100644 --- a/source/common/network/filter_matcher.cc +++ b/source/common/network/filter_matcher.cc @@ -2,6 +2,8 @@ #include "envoy/network/filter.h" +#include "common/common/assert.h" + #include "absl/strings/str_format.h" namespace Envoy { @@ -50,4 +52,4 @@ bool ListenerFilterAndMatcher::matches(ListenerFilterCallbacks& cb) const { } } // namespace Network -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/common/network/io_socket_error_impl.cc b/source/common/network/io_socket_error_impl.cc index 3382ac5acf2f9..c1d3c13d78a09 100644 --- a/source/common/network/io_socket_error_impl.cc +++ b/source/common/network/io_socket_error_impl.cc @@ -1,40 +1,41 @@ #include "common/network/io_socket_error_impl.h" #include "common/common/assert.h" +#include "common/common/utility.h" namespace Envoy { namespace Network { Api::IoError::IoErrorCode IoSocketError::getErrorCode() const { switch (errno_) { - case EAGAIN: + case SOCKET_ERROR_AGAIN: ASSERT(this == IoSocketError::getIoSocketEagainInstance(), "Didn't use getIoSocketEagainInstance() to generate `Again`."); return IoErrorCode::Again; - case ENOTSUP: + case SOCKET_ERROR_NOT_SUP: return IoErrorCode::NoSupport; - case EAFNOSUPPORT: + case SOCKET_ERROR_AF_NO_SUP: return IoErrorCode::AddressFamilyNoSupport; - case EINPROGRESS: + case SOCKET_ERROR_IN_PROGRESS: return IoErrorCode::InProgress; - case EPERM: + case SOCKET_ERROR_PERM: return IoErrorCode::Permission; - case EMSGSIZE: + case SOCKET_ERROR_MSG_SIZE: return IoErrorCode::MessageTooBig; - case EINTR: + case SOCKET_ERROR_INTR: return IoErrorCode::Interrupt; - case EADDRNOTAVAIL: + case SOCKET_ERROR_ADDR_NOT_AVAIL: return IoErrorCode::AddressNotAvailable; default: - ENVOY_LOG_MISC(debug, "Unknown error code {} details {}", errno_, ::strerror(errno_)); + ENVOY_LOG_MISC(debug, "Unknown error code {} details {}", errno_, getErrorDetails()); return IoErrorCode::UnknownError; } } -std::string IoSocketError::getErrorDetails() const { return ::strerror(errno_); } +std::string IoSocketError::getErrorDetails() const { return errorDetails(errno_); } IoSocketError* IoSocketError::getIoSocketEagainInstance() { - static auto* instance = new IoSocketError(EAGAIN); + static auto* instance = new IoSocketError(SOCKET_ERROR_AGAIN); return instance; } diff --git a/source/common/network/io_socket_handle_impl.cc b/source/common/network/io_socket_handle_impl.cc index 306c0c425f64b..5edd1fe5d054f 100644 --- a/source/common/network/io_socket_handle_impl.cc +++ b/source/common/network/io_socket_handle_impl.cc @@ -3,6 +3,7 @@ #include "envoy/buffer/buffer.h" #include "common/api/os_sys_calls_impl.h" +#include "common/common/utility.h" #include "common/network/address_impl.h" #include "absl/container/fixed_array.h" @@ -244,11 +245,13 @@ Api::IoCallUint64Result IoSocketHandleImpl::recvmsg(Buffer::RawSlice* slices, RELEASE_ASSERT(hdr.msg_namelen > 0, fmt::format("Unable to get remote address from recvmsg() for fd: {}", fd_)); output.msg_[0].peer_address_ = getAddressFromSockAddrOrDie(peer_addr, hdr.msg_namelen, fd_); + output.msg_[0].gso_size_ = 0; if (hdr.msg_controllen > 0) { - // Get overflow, local address from control message. + // Get overflow, local address and gso_size from control message. for (struct cmsghdr* cmsg = CMSG_FIRSTHDR(&hdr); cmsg != nullptr; cmsg = CMSG_NXTHDR(&hdr, cmsg)) { + if (output.msg_[0].local_address_ == nullptr) { Address::InstanceConstSharedPtr addr = maybeGetDstAddressFromHeader(*cmsg, self_port, fd_); if (addr != nullptr) { @@ -261,10 +264,17 @@ Api::IoCallUint64Result IoSocketHandleImpl::recvmsg(Buffer::RawSlice* slices, absl::optional maybe_dropped = maybeGetPacketsDroppedFromHeader(*cmsg); if (maybe_dropped) { *output.dropped_packets_ = *maybe_dropped; + continue; } } +#ifdef UDP_GRO + if (cmsg->cmsg_level == SOL_UDP && cmsg->cmsg_type == UDP_GRO) { + output.msg_[0].gso_size_ = *reinterpret_cast(CMSG_DATA(cmsg)); + } +#endif } } + return sysCallResultToIoCallResult(result); } @@ -272,7 +282,7 @@ Api::IoCallUint64Result IoSocketHandleImpl::recvmmsg(RawSliceArrays& slices, uin RecvMsgOutput& output) { ASSERT(output.msg_.size() == slices.size()); if (slices.empty()) { - return sysCallResultToIoCallResult(Api::SysCallIntResult{0, EAGAIN}); + return sysCallResultToIoCallResult(Api::SysCallIntResult{0, SOCKET_ERROR_AGAIN}); } const uint32_t num_packets_per_mmsg_call = slices.size(); absl::FixedArray mmsg_hdr(num_packets_per_mmsg_call); @@ -365,5 +375,92 @@ bool IoSocketHandleImpl::supportsMmsg() const { return Api::OsSysCallsSingleton::get().supportsMmsg(); } +bool IoSocketHandleImpl::supportsUdpGro() const { + return Api::OsSysCallsSingleton::get().supportsUdpGro(); +} + +Api::SysCallIntResult IoSocketHandleImpl::bind(Address::InstanceConstSharedPtr address) { + return Api::OsSysCallsSingleton::get().bind(fd_, address->sockAddr(), address->sockAddrLen()); +} + +Api::SysCallIntResult IoSocketHandleImpl::listen(int backlog) { + return Api::OsSysCallsSingleton::get().listen(fd_, backlog); +} + +Api::SysCallIntResult IoSocketHandleImpl::connect(Address::InstanceConstSharedPtr address) { + return Api::OsSysCallsSingleton::get().connect(fd_, address->sockAddr(), address->sockAddrLen()); +} + +Api::SysCallIntResult IoSocketHandleImpl::setOption(int level, int optname, const void* optval, + socklen_t optlen) { + return Api::OsSysCallsSingleton::get().setsockopt(fd_, level, optname, optval, optlen); +} + +Api::SysCallIntResult IoSocketHandleImpl::getOption(int level, int optname, void* optval, + socklen_t* optlen) { + return Api::OsSysCallsSingleton::get().getsockopt(fd_, level, optname, optval, optlen); +} + +Api::SysCallIntResult IoSocketHandleImpl::setBlocking(bool blocking) { + return Api::OsSysCallsSingleton::get().setsocketblocking(fd_, blocking); +} + +absl::optional IoSocketHandleImpl::domain() { + sockaddr_storage addr; + socklen_t len = sizeof(addr); + Api::SysCallIntResult result; + + result = Api::OsSysCallsSingleton::get().getsockname( + fd_, reinterpret_cast(&addr), &len); + + if (result.rc_ == 0) { + return {addr.ss_family}; + } + + return absl::nullopt; +} + +Address::InstanceConstSharedPtr IoSocketHandleImpl::localAddress() { + sockaddr_storage ss; + socklen_t ss_len = sizeof(ss); + auto& os_sys_calls = Api::OsSysCallsSingleton::get(); + Api::SysCallIntResult result = + os_sys_calls.getsockname(fd_, reinterpret_cast(&ss), &ss_len); + if (result.rc_ != 0) { + throw EnvoyException(fmt::format("getsockname failed for '{}': ({}) {}", fd_, result.errno_, + errorDetails(result.errno_))); + } + return Address::addressFromSockAddr(ss, ss_len, socket_v6only_); +} + +Address::InstanceConstSharedPtr IoSocketHandleImpl::peerAddress() { + sockaddr_storage ss; + socklen_t ss_len = sizeof ss; + auto& os_sys_calls = Api::OsSysCallsSingleton::get(); + Api::SysCallIntResult result = + os_sys_calls.getpeername(fd_, reinterpret_cast(&ss), &ss_len); + if (result.rc_ != 0) { + throw EnvoyException( + fmt::format("getpeername failed for '{}': {}", fd_, errorDetails(result.errno_))); + } +#ifdef __APPLE__ + if (ss_len == sizeof(sockaddr) && ss.ss_family == AF_UNIX) +#else + if (ss_len == sizeof(sa_family_t) && ss.ss_family == AF_UNIX) +#endif + { + // For Unix domain sockets, can't find out the peer name, but it should match our own + // name for the socket (i.e. the path should match, barring any namespace or other + // mechanisms to hide things, of which there are many). + ss_len = sizeof ss; + result = os_sys_calls.getsockname(fd_, reinterpret_cast(&ss), &ss_len); + if (result.rc_ != 0) { + throw EnvoyException( + fmt::format("getsockname failed for '{}': {}", fd_, errorDetails(result.errno_))); + } + } + return Address::addressFromSockAddr(ss, ss_len); +} + } // namespace Network } // namespace Envoy diff --git a/source/common/network/io_socket_handle_impl.h b/source/common/network/io_socket_handle_impl.h index cd1a97ea3ac11..e23d0f4447264 100644 --- a/source/common/network/io_socket_handle_impl.h +++ b/source/common/network/io_socket_handle_impl.h @@ -16,7 +16,8 @@ namespace Network { */ class IoSocketHandleImpl : public IoHandle, protected Logger::Loggable { public: - explicit IoSocketHandleImpl(os_fd_t fd = INVALID_SOCKET) : fd_(fd) {} + explicit IoSocketHandleImpl(os_fd_t fd = INVALID_SOCKET, bool socket_v6only = false) + : fd_(fd), socket_v6only_(socket_v6only) {} // Close underlying socket if close() hasn't been call yet. ~IoSocketHandleImpl() override; @@ -44,6 +45,18 @@ class IoSocketHandleImpl : public IoHandle, protected Logger::Loggable domain() override; + Address::InstanceConstSharedPtr localAddress() override; + Address::InstanceConstSharedPtr peerAddress() override; private: // Converts a SysCallSizeResult to IoCallUint64Result. @@ -54,10 +67,10 @@ class IoSocketHandleImpl : public IoHandle, protected Logger::Loggable #include -#include #include #include "envoy/common/exception.h" @@ -14,6 +13,7 @@ #include "common/network/cidr_range.h" #include "common/network/utility.h" +#include "absl/container/node_hash_set.h" #include "absl/numeric/int128.h" #include "fmt/format.h" @@ -230,7 +230,7 @@ template class LcTrie { using Ipv4 = uint32_t; using Ipv6 = absl::uint128; - using DataSet = std::unordered_set; + using DataSet = absl::node_hash_set; using DataSetSharedPtr = std::shared_ptr; /** diff --git a/source/common/network/listen_socket_impl.cc b/source/common/network/listen_socket_impl.cc index 9c7da8ce43260..a905a254eac87 100644 --- a/source/common/network/listen_socket_impl.cc +++ b/source/common/network/listen_socket_impl.cc @@ -7,35 +7,34 @@ #include "envoy/common/exception.h" #include "envoy/common/platform.h" #include "envoy/config/core/v3/base.pb.h" +#include "envoy/network/exception.h" -#include "common/api/os_sys_calls_impl.h" #include "common/common/assert.h" #include "common/common/fmt.h" +#include "common/common/utility.h" #include "common/network/address_impl.h" #include "common/network/utility.h" namespace Envoy { namespace Network { -void ListenSocketImpl::doBind() { - const Api::SysCallIntResult result = local_address_->bind(io_handle_->fd()); +Api::SysCallIntResult ListenSocketImpl::bind(Network::Address::InstanceConstSharedPtr address) { + local_address_ = address; + + const Api::SysCallIntResult result = SocketImpl::bind(local_address_); if (SOCKET_FAILURE(result.rc_)) { close(); - throw SocketBindException( - fmt::format("cannot bind '{}': {}", local_address_->asString(), strerror(result.errno_)), - result.errno_); - } - if (local_address_->type() == Address::Type::Ip && local_address_->ip()->port() == 0) { - // If the port we bind is zero, then the OS will pick a free port for us (assuming there are - // any), and we need to find out the port number that the OS picked. - local_address_ = Address::addressFromFd(io_handle_->fd()); + throw SocketBindException(fmt::format("cannot bind '{}': {}", local_address_->asString(), + errorDetails(result.errno_)), + result.errno_); } + return {0, 0}; } void ListenSocketImpl::setListenSocketOptions(const Network::Socket::OptionsSharedPtr& options) { if (!Network::Socket::applyOptions(options, *this, envoy::config::core::v3::SocketOption::STATE_PREBIND)) { - throw EnvoyException("ListenSocket: Setting socket options failed"); + throw CreateListenerException("ListenSocket: Setting socket options failed"); } } @@ -44,34 +43,35 @@ void ListenSocketImpl::setupSocket(const Network::Socket::OptionsSharedPtr& opti setListenSocketOptions(options); if (bind_to_port) { - doBind(); + bind(local_address_); } } template <> -void NetworkListenSocket< - NetworkSocketTrait>::setPrebindSocketOptions() { - +void NetworkListenSocket>::setPrebindSocketOptions() { +// On Windows, SO_REUSEADDR does not restrict subsequent bind calls when there is a listener as on +// Linux and later BSD socket stacks +#ifndef WIN32 int on = 1; - auto& os_syscalls = Api::OsSysCallsSingleton::get(); - Api::SysCallIntResult status = - os_syscalls.setsockopt(io_handle_->fd(), SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)); + auto status = setSocketOption(SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)); RELEASE_ASSERT(status.rc_ != -1, "failed to set SO_REUSEADDR socket option"); +#endif } template <> -void NetworkListenSocket< - NetworkSocketTrait>::setPrebindSocketOptions() {} +void NetworkListenSocket>::setPrebindSocketOptions() {} UdsListenSocket::UdsListenSocket(const Address::InstanceConstSharedPtr& address) - : ListenSocketImpl(address->socket(Address::SocketType::Stream), address) { + : ListenSocketImpl(ioHandleForAddr(Socket::Type::Stream, address), address) { RELEASE_ASSERT(io_handle_->fd() != -1, ""); - doBind(); + bind(local_address_); } UdsListenSocket::UdsListenSocket(IoHandlePtr&& io_handle, const Address::InstanceConstSharedPtr& address) : ListenSocketImpl(std::move(io_handle), address) {} +std::atomic AcceptedSocketImpl::global_accepted_socket_count_; + } // namespace Network } // namespace Envoy diff --git a/source/common/network/listen_socket_impl.h b/source/common/network/listen_socket_impl.h index 0dba0680b1c80..8e6050599d730 100644 --- a/source/common/network/listen_socket_impl.h +++ b/source/common/network/listen_socket_impl.h @@ -7,80 +7,44 @@ #include "envoy/common/platform.h" #include "envoy/network/connection.h" #include "envoy/network/listen_socket.h" +#include "envoy/network/socket.h" #include "common/common/assert.h" +#include "common/network/socket_impl.h" +#include "common/network/socket_interface_impl.h" namespace Envoy { namespace Network { -class SocketImpl : public virtual Socket { -public: - // Network::Socket - const Address::InstanceConstSharedPtr& localAddress() const override { return local_address_; } - void setLocalAddress(const Address::InstanceConstSharedPtr& local_address) override { - local_address_ = local_address; - } - - IoHandle& ioHandle() override { return *io_handle_; } - const IoHandle& ioHandle() const override { return *io_handle_; } - void close() override { - if (io_handle_->isOpen()) { - io_handle_->close(); - } - } - bool isOpen() const override { return io_handle_->isOpen(); } - void ensureOptions() { - if (!options_) { - options_ = std::make_shared>(); - } - } - void addOption(const OptionConstSharedPtr& option) override { - ensureOptions(); - options_->emplace_back(std::move(option)); - } - void addOptions(const OptionsSharedPtr& options) override { - ensureOptions(); - Network::Socket::appendOptions(options_, options); - } - const OptionsSharedPtr& options() const override { return options_; } - -protected: - SocketImpl(IoHandlePtr&& io_handle, const Address::InstanceConstSharedPtr& local_address) - : io_handle_(std::move(io_handle)), local_address_(local_address) {} - - const IoHandlePtr io_handle_; - Address::InstanceConstSharedPtr local_address_; - OptionsSharedPtr options_; -}; - class ListenSocketImpl : public SocketImpl { protected: ListenSocketImpl(IoHandlePtr&& io_handle, const Address::InstanceConstSharedPtr& local_address) : SocketImpl(std::move(io_handle), local_address) {} void setupSocket(const Network::Socket::OptionsSharedPtr& options, bool bind_to_port); - void doBind(); void setListenSocketOptions(const Network::Socket::OptionsSharedPtr& options); + Api::SysCallIntResult bind(Network::Address::InstanceConstSharedPtr address) override; }; /** * Wraps a unix socket. */ -template struct NetworkSocketTrait {}; +template struct NetworkSocketTrait {}; -template <> struct NetworkSocketTrait { - static constexpr Address::SocketType type = Address::SocketType::Stream; +template <> struct NetworkSocketTrait { + static constexpr Socket::Type type = Socket::Type::Stream; }; -template <> struct NetworkSocketTrait { - static constexpr Address::SocketType type = Address::SocketType::Datagram; +template <> struct NetworkSocketTrait { + static constexpr Socket::Type type = Socket::Type::Datagram; }; template class NetworkListenSocket : public ListenSocketImpl { public: NetworkListenSocket(const Address::InstanceConstSharedPtr& address, const Network::Socket::OptionsSharedPtr& options, bool bind_to_port) - : ListenSocketImpl(address->socket(T::type), address) { + : ListenSocketImpl(Network::SocketInterfaceSingleton::get().socket(T::type, address), + address) { RELEASE_ASSERT(SOCKET_VALID(io_handle_->fd()), ""); setPrebindSocketOptions(); @@ -94,23 +58,23 @@ template class NetworkListenSocket : public ListenSocketImpl { setListenSocketOptions(options); } - Address::SocketType socketType() const override { return T::type; } + Socket::Type socketType() const override { return T::type; } protected: void setPrebindSocketOptions(); }; -using TcpListenSocket = NetworkListenSocket>; +using TcpListenSocket = NetworkListenSocket>; using TcpListenSocketPtr = std::unique_ptr; -using UdpListenSocket = NetworkListenSocket>; +using UdpListenSocket = NetworkListenSocket>; using UdpListenSocketPtr = std::unique_ptr; class UdsListenSocket : public ListenSocketImpl { public: UdsListenSocket(const Address::InstanceConstSharedPtr& address); UdsListenSocket(IoHandlePtr&& io_handle, const Address::InstanceConstSharedPtr& address); - Address::SocketType socketType() const override { return Address::SocketType::Stream; } + Socket::Type socketType() const override { return Socket::Type::Stream; } }; class ConnectionSocketImpl : public SocketImpl, public ConnectionSocket { @@ -121,8 +85,15 @@ class ConnectionSocketImpl : public SocketImpl, public ConnectionSocket { : SocketImpl(std::move(io_handle), local_address), remote_address_(remote_address), direct_remote_address_(remote_address) {} + ConnectionSocketImpl(Socket::Type type, const Address::InstanceConstSharedPtr& local_address, + const Address::InstanceConstSharedPtr& remote_address) + : SocketImpl(type, local_address), remote_address_(remote_address), + direct_remote_address_(remote_address) { + setLocalAddress(local_address); + } + // Network::Socket - Address::SocketType socketType() const override { return Address::SocketType::Stream; } + Socket::Type socketType() const override { return Socket::Type::Stream; } // Network::ConnectionSocket const Address::InstanceConstSharedPtr& remoteAddress() const override { return remote_address_; } @@ -172,7 +143,21 @@ class AcceptedSocketImpl : public ConnectionSocketImpl { public: AcceptedSocketImpl(IoHandlePtr&& io_handle, const Address::InstanceConstSharedPtr& local_address, const Address::InstanceConstSharedPtr& remote_address) - : ConnectionSocketImpl(std::move(io_handle), local_address, remote_address) {} + : ConnectionSocketImpl(std::move(io_handle), local_address, remote_address) { + ++global_accepted_socket_count_; + } + + ~AcceptedSocketImpl() override { + ASSERT(global_accepted_socket_count_.load() > 0); + --global_accepted_socket_count_; + } + + // TODO (tonya11en): Global connection count tracking is temporarily performed via a static + // variable until the logic is moved into the overload manager. + static uint64_t acceptedSocketCount() { return global_accepted_socket_count_.load(); } + +private: + static std::atomic global_accepted_socket_count_; }; // ConnectionSocket used with client connections. @@ -180,8 +165,8 @@ class ClientSocketImpl : public ConnectionSocketImpl { public: ClientSocketImpl(const Address::InstanceConstSharedPtr& remote_address, const OptionsSharedPtr& options) - : ConnectionSocketImpl(remote_address->socket(Address::SocketType::Stream), nullptr, - remote_address) { + : ConnectionSocketImpl(Network::ioHandleForAddr(Socket::Type::Stream, remote_address), + nullptr, remote_address) { if (options) { addOptions(options); } diff --git a/source/common/network/listener_impl.cc b/source/common/network/listener_impl.cc index a8e6d5809f87a..96c1eded88dd6 100644 --- a/source/common/network/listener_impl.cc +++ b/source/common/network/listener_impl.cc @@ -3,10 +3,12 @@ #include "envoy/common/exception.h" #include "envoy/common/platform.h" #include "envoy/config/core/v3/base.pb.h" +#include "envoy/network/exception.h" #include "common/common/assert.h" #include "common/common/empty_string.h" #include "common/common/fmt.h" +#include "common/common/utility.h" #include "common/event/dispatcher_impl.h" #include "common/event/file_event_impl.h" #include "common/network/address_impl.h" @@ -17,18 +19,48 @@ namespace Envoy { namespace Network { +const absl::string_view ListenerImpl::GlobalMaxCxRuntimeKey = + "overload.global_downstream_max_connections"; + +bool ListenerImpl::rejectCxOverGlobalLimit() { + // Enforce the global connection limit if necessary, immediately closing the accepted connection. + Runtime::Loader* runtime = Runtime::LoaderSingleton::getExisting(); + + if (runtime == nullptr) { + // The runtime singleton won't exist in most unit tests that do not need global downstream limit + // enforcement. Therefore, there is no need to enforce limits if the singleton doesn't exist. + // TODO(tonya11en): Revisit this once runtime is made globally available. + return false; + } + + // If the connection limit is not set, don't limit the connections, but still track them. + // TODO(tonya11en): In integration tests, threadsafeSnapshot is necessary since the FakeUpstreams + // use a listener and do not run in a worker thread. In practice, this code path will always be + // run on a worker thread, but to prevent failed assertions in test environments, threadsafe + // snapshots must be used. This must be revisited. + const uint64_t global_cx_limit = runtime->threadsafeSnapshot()->getInteger( + GlobalMaxCxRuntimeKey, std::numeric_limits::max()); + return AcceptedSocketImpl::acceptedSocketCount() >= global_cx_limit; +} + void ListenerImpl::listenCallback(evconnlistener*, evutil_socket_t fd, sockaddr* remote_addr, int remote_addr_len, void* arg) { ListenerImpl* listener = static_cast(arg); - // Create the IoSocketHandleImpl for the fd here. - IoHandlePtr io_handle = std::make_unique(fd); + // Wrap raw socket fd in IoHandle. + IoHandlePtr io_handle = SocketInterfaceSingleton::get().socket(fd); + + if (rejectCxOverGlobalLimit()) { + // The global connection limit has been reached. + io_handle->close(); + listener->cb_.onReject(); + return; + } // Get the local address from the new socket if the listener is listening on IP ANY // (e.g., 0.0.0.0 for IPv4) (local_address_ is nullptr in this case). const Address::InstanceConstSharedPtr& local_address = - listener->local_address_ ? listener->local_address_ - : listener->getLocalAddress(io_handle->fd()); + listener->local_address_ ? listener->local_address_ : io_handle->localAddress(); // The accept() call that filled in remote_addr doesn't fill in more than the sa_family field // for Unix domain sockets; apparently there isn't a mechanism in the kernel to get the @@ -39,7 +71,7 @@ void ListenerImpl::listenCallback(evconnlistener*, evutil_socket_t fd, sockaddr* // IPv4 local_address was created from an IPv6 mapped IPv4 address. const Address::InstanceConstSharedPtr& remote_address = (remote_addr->sa_family == AF_UNIX) - ? Address::peerAddressFromFd(io_handle->fd()) + ? io_handle->peerAddress() : Address::addressFromSockAddr(*reinterpret_cast(remote_addr), remote_addr_len, local_address->ip()->version() == Address::IpVersion::v6); @@ -76,7 +108,7 @@ ListenerImpl::ListenerImpl(Event::DispatcherImpl& dispatcher, SocketSharedPtr so void ListenerImpl::errorCallback(evconnlistener*, void*) { // We should never get an error callback. This can happen if we run out of FDs or memory. In those // cases just crash. - PANIC(fmt::format("listener accept failure: {}", strerror(errno))); + PANIC(fmt::format("listener accept failure: {}", errorDetails(errno))); } void ListenerImpl::enable() { diff --git a/source/common/network/listener_impl.h b/source/common/network/listener_impl.h index 953d82723b8fc..c431d77f46106 100644 --- a/source/common/network/listener_impl.h +++ b/source/common/network/listener_impl.h @@ -1,5 +1,8 @@ #pragma once +#include "envoy/runtime/runtime.h" + +#include "absl/strings/string_view.h" #include "base_listener_impl.h" namespace Envoy { @@ -17,6 +20,8 @@ class ListenerImpl : public BaseListenerImpl { void disable() override; void enable() override; + static const absl::string_view GlobalMaxCxRuntimeKey; + protected: void setupServerSocket(Event::DispatcherImpl& dispatcher, Socket& socket); @@ -27,6 +32,10 @@ class ListenerImpl : public BaseListenerImpl { int remote_addr_len, void* arg); static void errorCallback(evconnlistener* listener, void* context); + // Returns true if global connection limit has been reached and the accepted socket should be + // rejected/closed. If the accepted socket is to be admitted, false is returned. + static bool rejectCxOverGlobalLimit(); + Event::Libevent::ListenerPtr listener_; }; diff --git a/source/common/network/proxy_protocol_filter_state.cc b/source/common/network/proxy_protocol_filter_state.cc new file mode 100644 index 0000000000000..cae58c9612537 --- /dev/null +++ b/source/common/network/proxy_protocol_filter_state.cc @@ -0,0 +1,13 @@ +#include "common/network/proxy_protocol_filter_state.h" + +#include "common/common/macros.h" + +namespace Envoy { +namespace Network { + +const std::string& ProxyProtocolFilterState::key() { + CONSTRUCT_ON_FIRST_USE(std::string, "envoy.network.proxy_protocol_options"); +} + +} // namespace Network +} // namespace Envoy diff --git a/source/common/network/proxy_protocol_filter_state.h b/source/common/network/proxy_protocol_filter_state.h new file mode 100644 index 0000000000000..9cb35a9ee8781 --- /dev/null +++ b/source/common/network/proxy_protocol_filter_state.h @@ -0,0 +1,23 @@ +#pragma once + +#include "envoy/network/proxy_protocol.h" +#include "envoy/stream_info/filter_state.h" + +namespace Envoy { +namespace Network { + +/** + * PROXY protocol info to be used in connections. + */ +class ProxyProtocolFilterState : public StreamInfo::FilterState::Object { +public: + ProxyProtocolFilterState(Network::ProxyProtocolData options) : options_(options) {} + const Network::ProxyProtocolData& value() const { return options_; } + static const std::string& key(); + +private: + const Network::ProxyProtocolData options_; +}; + +} // namespace Network +} // namespace Envoy diff --git a/source/common/network/resolver_impl.h b/source/common/network/resolver_impl.h index 958c9b22d0f6a..0241a4fe53094 100644 --- a/source/common/network/resolver_impl.h +++ b/source/common/network/resolver_impl.h @@ -11,7 +11,7 @@ namespace Envoy { namespace Network { namespace Address { /** - * Create an Instance from a envoy::api::v2::core::Address. + * Create an Instance from a envoy::config::core::v3::Address. * @param address supplies the address proto to resolve. * @return pointer to the Instance. */ @@ -19,7 +19,7 @@ Address::InstanceConstSharedPtr resolveProtoAddress(const envoy::config::core::v3::Address& address); /** - * Create an Instance from a envoy::api::v2::core::SocketAddress. + * Create an Instance from a envoy::config::core::v3::SocketAddress. * @param address supplies the socket address proto to resolve. * @return pointer to the Instance. */ diff --git a/source/common/network/socket_impl.cc b/source/common/network/socket_impl.cc new file mode 100644 index 0000000000000..a24c34de7eab9 --- /dev/null +++ b/source/common/network/socket_impl.cc @@ -0,0 +1,122 @@ +#include "common/network/socket_impl.h" + +#include "envoy/common/exception.h" + +#include "common/api/os_sys_calls_impl.h" +#include "common/common/utility.h" +#include "common/network/address_impl.h" +#include "common/network/io_socket_handle_impl.h" +#include "common/network/socket_interface_impl.h" + +namespace Envoy { +namespace Network { + +SocketImpl::SocketImpl(Socket::Type sock_type, const Address::InstanceConstSharedPtr addr) + : io_handle_(ioHandleForAddr(sock_type, addr)), sock_type_(sock_type), + addr_type_(addr->type()) {} + +SocketImpl::SocketImpl(IoHandlePtr&& io_handle, + const Address::InstanceConstSharedPtr& local_address) + : io_handle_(std::move(io_handle)), local_address_(local_address) { + + if (local_address_ != nullptr) { + addr_type_ = local_address_->type(); + return; + } + + // Should not happen but some tests inject -1 fds + if (SOCKET_INVALID(io_handle_->fd())) { + return; + } + + auto domain = io_handle_->domain(); + + // This should never happen in practice but too many tests inject fake fds ... + if (!domain.has_value()) { + return; + } + + addr_type_ = *domain == AF_UNIX ? Address::Type::Pipe : Address::Type::Ip; +} + +Api::SysCallIntResult SocketImpl::bind(Network::Address::InstanceConstSharedPtr address) { + Api::SysCallIntResult bind_result; + + if (address->type() == Address::Type::Pipe) { + const Address::Pipe* pipe = address->pipe(); + const auto* pipe_sa = reinterpret_cast(address->sockAddr()); + bool abstract_namespace = address->pipe()->abstractNamespace(); + if (!abstract_namespace) { + // Try to unlink an existing filesystem object at the requested path. Ignore + // errors -- it's fine if the path doesn't exist, and if it exists but can't + // be unlinked then `::bind()` will generate a reasonable errno. + unlink(pipe_sa->sun_path); + } + // Not storing a reference to syscalls singleton because of unit test mocks + bind_result = io_handle_->bind(address); + if (pipe->mode() != 0 && !abstract_namespace && bind_result.rc_ == 0) { + auto set_permissions = Api::OsSysCallsSingleton::get().chmod(pipe_sa->sun_path, pipe->mode()); + if (set_permissions.rc_ != 0) { + throw EnvoyException(fmt::format("Failed to create socket with mode {}: {}", + std::to_string(pipe->mode()), + errorDetails(set_permissions.errno_))); + } + } + return bind_result; + } + + bind_result = io_handle_->bind(address); + if (bind_result.rc_ == 0 && address->ip()->port() == 0) { + local_address_ = io_handle_->localAddress(); + } + return bind_result; +} + +Api::SysCallIntResult SocketImpl::listen(int backlog) { return io_handle_->listen(backlog); } + +Api::SysCallIntResult SocketImpl::connect(const Network::Address::InstanceConstSharedPtr address) { + auto result = io_handle_->connect(address); + if (address->type() == Address::Type::Ip) { + local_address_ = io_handle_->localAddress(); + } + return result; +} + +Api::SysCallIntResult SocketImpl::setSocketOption(int level, int optname, const void* optval, + socklen_t optlen) { + return io_handle_->setOption(level, optname, optval, optlen); +} + +Api::SysCallIntResult SocketImpl::getSocketOption(int level, int optname, void* optval, + socklen_t* optlen) const { + return io_handle_->getOption(level, optname, optval, optlen); +} + +Api::SysCallIntResult SocketImpl::setBlockingForTest(bool blocking) { + return io_handle_->setBlocking(blocking); +} + +absl::optional SocketImpl::ipVersion() const { + if (addr_type_ == Address::Type::Ip) { + // Always hit after socket is initialized, i.e., accepted or connected + if (local_address_ != nullptr) { + return local_address_->ip()->version(); + } else { + auto domain = io_handle_->domain(); + if (!domain.has_value()) { + return absl::nullopt; + } + if (*domain == AF_INET) { + return Address::IpVersion::v4; + } else if (*domain == AF_INET6) { + return Address::IpVersion::v6; + } else { + return absl::nullopt; + } + } + } + return absl::nullopt; +} + +} // namespace Network +} // namespace Envoy diff --git a/source/common/network/socket_impl.h b/source/common/network/socket_impl.h new file mode 100644 index 0000000000000..1704b6a005f1a --- /dev/null +++ b/source/common/network/socket_impl.h @@ -0,0 +1,67 @@ +#pragma once + +#include "envoy/network/socket.h" + +#include "common/common/assert.h" + +namespace Envoy { +namespace Network { + +class SocketImpl : public virtual Socket { +public: + SocketImpl(Socket::Type socket_type, const Address::InstanceConstSharedPtr addr); + + // Network::Socket + const Address::InstanceConstSharedPtr& localAddress() const override { return local_address_; } + void setLocalAddress(const Address::InstanceConstSharedPtr& local_address) override { + local_address_ = local_address; + } + + IoHandle& ioHandle() override { return *io_handle_; } + const IoHandle& ioHandle() const override { return *io_handle_; } + void close() override { + if (io_handle_->isOpen()) { + io_handle_->close(); + } + } + bool isOpen() const override { return io_handle_->isOpen(); } + void ensureOptions() { + if (!options_) { + options_ = std::make_shared>(); + } + } + void addOption(const OptionConstSharedPtr& option) override { + ensureOptions(); + options_->emplace_back(std::move(option)); + } + void addOptions(const OptionsSharedPtr& options) override { + ensureOptions(); + Network::Socket::appendOptions(options_, options); + } + + Api::SysCallIntResult bind(Network::Address::InstanceConstSharedPtr address) override; + Api::SysCallIntResult listen(int backlog) override; + Api::SysCallIntResult connect(const Address::InstanceConstSharedPtr addr) override; + Api::SysCallIntResult setSocketOption(int level, int optname, const void* optval, + socklen_t optlen) override; + Api::SysCallIntResult getSocketOption(int level, int optname, void* optval, + socklen_t* optlen) const override; + Api::SysCallIntResult setBlockingForTest(bool blocking) override; + + const OptionsSharedPtr& options() const override { return options_; } + Socket::Type socketType() const override { return sock_type_; } + Address::Type addressType() const override { return addr_type_; } + absl::optional ipVersion() const override; + +protected: + SocketImpl(IoHandlePtr&& io_handle, const Address::InstanceConstSharedPtr& local_address); + + const IoHandlePtr io_handle_; + Address::InstanceConstSharedPtr local_address_; + OptionsSharedPtr options_; + Socket::Type sock_type_; + Address::Type addr_type_; +}; + +} // namespace Network +} // namespace Envoy \ No newline at end of file diff --git a/source/common/network/socket_interface.h b/source/common/network/socket_interface.h new file mode 100644 index 0000000000000..9374b65a2344d --- /dev/null +++ b/source/common/network/socket_interface.h @@ -0,0 +1,74 @@ +#pragma once + +#include "envoy/config/typed_config.h" +#include "envoy/network/socket.h" +#include "envoy/registry/registry.h" +#include "envoy/server/bootstrap_extension_config.h" + +#include "common/singleton/threadsafe_singleton.h" + +#include "absl/container/flat_hash_map.h" + +namespace Envoy { +namespace Network { + +// Wrapper for SocketInterface instances returned by createBootstrapExtension() which must be +// implemented by all factories that derive SocketInterfaceBase +class SocketInterfaceExtension : public Server::BootstrapExtension { +public: + SocketInterfaceExtension(SocketInterface& sock_interface) : sock_interface_(sock_interface) {} + SocketInterface& socketInterface() { return sock_interface_; } + +private: + SocketInterface& sock_interface_; +}; + +// Class to be derived by all SocketInterface implementations. +// +// It acts both as a SocketInterface and as a BootstrapExtensionFactory. The latter is used, on the +// one hand, to configure and initialize the interface and, on the other, for SocketInterface lookup +// by leveraging the FactoryRegistry. As required for all bootstrap extensions, all derived classes +// should register via the REGISTER_FACTORY() macro as BootstrapExtensionFactory. +// +// SocketInterface instances can be retrieved using the factory name, i.e., string returned by +// name() function implemented by all classes that derive SocketInterfaceBase, via +// Network::socketInterface(). When instantiating addresses, address resolvers should +// set the socket interface field to the name of the socket interface implementation that should +// be used to create sockets for said addresses. +class SocketInterfaceBase : public SocketInterface, + public Server::Configuration::BootstrapExtensionFactory {}; + +/** + * Lookup SocketInterface instance by name + * @param name Name of the socket interface to be looked up + * @return Pointer to @ref SocketInterface instance that registered using the name of nullptr + */ +static inline const SocketInterface* socketInterface(std::string name) { + auto factory = + Registry::FactoryRegistry::getFactory(name); + return dynamic_cast(factory); +} + +using SocketInterfaceSingleton = InjectableSingleton; +using SocketInterfaceLoader = ScopedInjectableLoader; + +/** + * Create IoHandle for given address + * @param type type of socket to be requested + * @param addr address that is gleaned for address type, version and socket interface name + * @return @ref Network::IoHandlePtr that wraps the underlying socket file descriptor + */ +static inline IoHandlePtr ioHandleForAddr(Socket::Type type, + const Address::InstanceConstSharedPtr addr) { + auto sock_interface_name = addr->socketInterface(); + if (!sock_interface_name.empty()) { + auto sock_interface = socketInterface(sock_interface_name); + RELEASE_ASSERT(sock_interface != nullptr, + fmt::format("missing socket interface {}", sock_interface_name)); + return sock_interface->socket(type, addr); + } + return SocketInterfaceSingleton::get().socket(type, addr); +} + +} // namespace Network +} // namespace Envoy \ No newline at end of file diff --git a/source/common/network/socket_interface_impl.cc b/source/common/network/socket_interface_impl.cc new file mode 100644 index 0000000000000..065556d69222f --- /dev/null +++ b/source/common/network/socket_interface_impl.cc @@ -0,0 +1,107 @@ +#include "common/network/socket_interface_impl.h" + +#include "envoy/common/exception.h" +#include "envoy/extensions/network/socket_interface/v3/default_socket_interface.pb.h" +#include "envoy/network/socket.h" + +#include "common/api/os_sys_calls_impl.h" +#include "common/common/utility.h" +#include "common/network/address_impl.h" +#include "common/network/io_socket_handle_impl.h" + +namespace Envoy { +namespace Network { + +IoHandlePtr SocketInterfaceImpl::socket(Socket::Type socket_type, Address::Type addr_type, + Address::IpVersion version, bool socket_v6only) const { +#if defined(__APPLE__) || defined(WIN32) + int flags = 0; +#else + int flags = SOCK_NONBLOCK; +#endif + + if (socket_type == Socket::Type::Stream) { + flags |= SOCK_STREAM; + } else { + flags |= SOCK_DGRAM; + } + + int domain; + if (addr_type == Address::Type::Ip) { + if (version == Address::IpVersion::v6) { + domain = AF_INET6; + } else { + ASSERT(version == Address::IpVersion::v4); + domain = AF_INET; + } + } else { + ASSERT(addr_type == Address::Type::Pipe); + domain = AF_UNIX; + } + + const Api::SysCallSocketResult result = Api::OsSysCallsSingleton::get().socket(domain, flags, 0); + RELEASE_ASSERT(SOCKET_VALID(result.rc_), + fmt::format("socket(2) failed, got error: {}", errorDetails(result.errno_))); + IoHandlePtr io_handle = std::make_unique(result.rc_, socket_v6only); + +#if defined(__APPLE__) || defined(WIN32) + // Cannot set SOCK_NONBLOCK as a ::socket flag. + const int rc = Api::OsSysCallsSingleton::get().setsocketblocking(io_handle->fd(), false).rc_; + RELEASE_ASSERT(!SOCKET_FAILURE(rc), ""); +#endif + + return io_handle; +} + +IoHandlePtr SocketInterfaceImpl::socket(Socket::Type socket_type, + const Address::InstanceConstSharedPtr addr) const { + Address::IpVersion ip_version = addr->ip() ? addr->ip()->version() : Address::IpVersion::v4; + int v6only = 0; + if (addr->type() == Address::Type::Ip && ip_version == Address::IpVersion::v6) { + v6only = addr->ip()->ipv6()->v6only(); + } + + IoHandlePtr io_handle = + SocketInterfaceImpl::socket(socket_type, addr->type(), ip_version, v6only); + if (addr->type() == Address::Type::Ip && ip_version == Address::IpVersion::v6) { + // Setting IPV6_V6ONLY restricts the IPv6 socket to IPv6 connections only. + const Api::SysCallIntResult result = Api::OsSysCallsSingleton::get().setsockopt( + io_handle->fd(), IPPROTO_IPV6, IPV6_V6ONLY, reinterpret_cast(&v6only), + sizeof(v6only)); + RELEASE_ASSERT(!SOCKET_FAILURE(result.rc_), ""); + } + return io_handle; +} + +IoHandlePtr SocketInterfaceImpl::socket(os_fd_t fd) { + return std::make_unique(fd); +} + +bool SocketInterfaceImpl::ipFamilySupported(int domain) { + Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get(); + const Api::SysCallSocketResult result = os_sys_calls.socket(domain, SOCK_STREAM, 0); + if (SOCKET_VALID(result.rc_)) { + RELEASE_ASSERT(os_sys_calls.close(result.rc_).rc_ == 0, + fmt::format("Fail to close fd: response code {}", errorDetails(result.rc_))); + } + return SOCKET_VALID(result.rc_); +} + +Server::BootstrapExtensionPtr +SocketInterfaceImpl::createBootstrapExtension(const Protobuf::Message&, + Server::Configuration::ServerFactoryContext&) { + return std::make_unique(*this); +} + +ProtobufTypes::MessagePtr SocketInterfaceImpl::createEmptyConfigProto() { + return std::make_unique< + envoy::extensions::network::socket_interface::v3::DefaultSocketInterface>(); +} + +REGISTER_FACTORY(SocketInterfaceImpl, Server::Configuration::BootstrapExtensionFactory); + +static SocketInterfaceLoader* socket_interface_ = + new SocketInterfaceLoader(std::make_unique()); + +} // namespace Network +} // namespace Envoy diff --git a/source/common/network/socket_interface_impl.h b/source/common/network/socket_interface_impl.h new file mode 100644 index 0000000000000..42f9b6875d9da --- /dev/null +++ b/source/common/network/socket_interface_impl.h @@ -0,0 +1,34 @@ +#pragma once + +#include "envoy/network/address.h" +#include "envoy/network/socket.h" + +#include "common/network/socket_interface.h" + +namespace Envoy { +namespace Network { + +class SocketInterfaceImpl : public SocketInterfaceBase { +public: + // SocketInterface + IoHandlePtr socket(Socket::Type socket_type, Address::Type addr_type, Address::IpVersion version, + bool socket_v6only) const override; + IoHandlePtr socket(Socket::Type socket_type, + const Address::InstanceConstSharedPtr addr) const override; + IoHandlePtr socket(os_fd_t fd) override; + bool ipFamilySupported(int domain) override; + + // Server::Configuration::BootstrapExtensionFactory + Server::BootstrapExtensionPtr + createBootstrapExtension(const Protobuf::Message& config, + Server::Configuration::ServerFactoryContext& context) override; + ProtobufTypes::MessagePtr createEmptyConfigProto() override; + std::string name() const override { + return "envoy.extensions.network.socket_interface.default_socket_interface"; + }; +}; + +DECLARE_FACTORY(SocketInterfaceImpl); + +} // namespace Network +} // namespace Envoy \ No newline at end of file diff --git a/source/common/network/socket_option_factory.cc b/source/common/network/socket_option_factory.cc index 0a888525c8e8b..e6ed92c56e73f 100644 --- a/source/common/network/socket_option_factory.cc +++ b/source/common/network/socket_option_factory.cc @@ -61,6 +61,14 @@ std::unique_ptr SocketOptionFactory::buildSocketMarkOptions(uin return options; } +std::unique_ptr SocketOptionFactory::buildSocketNoSigpipeOptions() { + // Provide additional handling for `SIGPIPE` at the socket layer by converting it to `EPIPE`. + std::unique_ptr options = std::make_unique(); + options->push_back(std::make_shared( + envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_SOCKET_SO_NOSIGPIPE, 1)); + return options; +} + std::unique_ptr SocketOptionFactory::buildLiteralOptions( const Protobuf::RepeatedPtrField& socket_options) { auto options = std::make_unique(); @@ -124,5 +132,12 @@ std::unique_ptr SocketOptionFactory::buildReusePortOptions() { return options; } +std::unique_ptr SocketOptionFactory::buildUdpGroOptions() { + std::unique_ptr options = std::make_unique(); + options->push_back(std::make_shared( + envoy::config::core::v3::SocketOption::STATE_BOUND, ENVOY_SOCKET_UDP_GRO, 1)); + return options; +} + } // namespace Network } // namespace Envoy diff --git a/source/common/network/socket_option_factory.h b/source/common/network/socket_option_factory.h index 51a40b1bd8c31..72c67bfd89964 100644 --- a/source/common/network/socket_option_factory.h +++ b/source/common/network/socket_option_factory.h @@ -2,7 +2,7 @@ #include "envoy/common/platform.h" #include "envoy/config/core/v3/base.pb.h" -#include "envoy/network/listen_socket.h" +#include "envoy/network/socket.h" #include "common/common/logger.h" #include "common/protobuf/protobuf.h" @@ -26,12 +26,14 @@ class SocketOptionFactory : Logger::Loggable { static std::unique_ptr buildIpFreebindOptions(); static std::unique_ptr buildIpTransparentOptions(); static std::unique_ptr buildSocketMarkOptions(uint32_t mark); + static std::unique_ptr buildSocketNoSigpipeOptions(); static std::unique_ptr buildTcpFastOpenOptions(uint32_t queue_length); static std::unique_ptr buildLiteralOptions( const Protobuf::RepeatedPtrField& socket_options); static std::unique_ptr buildIpPacketInfoOptions(); static std::unique_ptr buildRxQueueOverFlowOptions(); static std::unique_ptr buildReusePortOptions(); + static std::unique_ptr buildUdpGroOptions(); }; } // namespace Network } // namespace Envoy diff --git a/source/common/network/socket_option_impl.cc b/source/common/network/socket_option_impl.cc index 016e97613f9dd..7293fa483b2f4 100644 --- a/source/common/network/socket_option_impl.cc +++ b/source/common/network/socket_option_impl.cc @@ -5,6 +5,7 @@ #include "common/api/os_sys_calls_impl.h" #include "common/common/assert.h" +#include "common/common/utility.h" #include "common/network/address_impl.h" namespace Envoy { @@ -14,7 +15,7 @@ namespace Network { bool SocketOptionImpl::setOption(Socket& socket, envoy::config::core::v3::SocketOption::SocketState state) const { if (in_state_ == state) { - if (!optname_.has_value()) { + if (!optname_.hasValue()) { ENVOY_LOG(warn, "Failed to set unsupported option on socket"); return false; } @@ -23,7 +24,7 @@ bool SocketOptionImpl::setOption(Socket& socket, SocketOptionImpl::setSocketOption(socket, optname_, value_.data(), value_.size()); if (result.rc_ != 0) { ENVOY_LOG(warn, "Setting {} option on socket failed: {}", optname_.name(), - strerror(result.errno_)); + errorDetails(result.errno_)); return false; } } @@ -44,18 +45,16 @@ SocketOptionImpl::getOptionDetails(const Socket&, return absl::make_optional(std::move(info)); } -bool SocketOptionImpl::isSupported() const { return optname_.has_value(); } +bool SocketOptionImpl::isSupported() const { return optname_.hasValue(); } Api::SysCallIntResult SocketOptionImpl::setSocketOption(Socket& socket, const Network::SocketOptionName& optname, const void* value, size_t size) { - if (!optname.has_value()) { - return {-1, ENOTSUP}; + if (!optname.hasValue()) { + return {-1, SOCKET_ERROR_NOT_SUP}; } - auto& os_syscalls = Api::OsSysCallsSingleton::get(); - return os_syscalls.setsockopt(socket.ioHandle().fd(), optname.level(), optname.option(), value, - size); + return socket.setSocketOption(optname.level(), optname.option(), value, size); } } // namespace Network diff --git a/source/common/network/socket_option_impl.h b/source/common/network/socket_option_impl.h index 95338adf6f9d3..ce6ebb9265793 100644 --- a/source/common/network/socket_option_impl.h +++ b/source/common/network/socket_option_impl.h @@ -47,12 +47,24 @@ namespace Network { #define ENVOY_SOCKET_SO_MARK Network::SocketOptionName() #endif +#ifdef SO_NOSIGPIPE +#define ENVOY_SOCKET_SO_NOSIGPIPE ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_NOSIGPIPE) +#else +#define ENVOY_SOCKET_SO_NOSIGPIPE Network::SocketOptionName() +#endif + #ifdef SO_REUSEPORT #define ENVOY_SOCKET_SO_REUSEPORT ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_REUSEPORT) #else #define ENVOY_SOCKET_SO_REUSEPORT Network::SocketOptionName() #endif +#ifdef UDP_GRO +#define ENVOY_SOCKET_UDP_GRO ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_UDP, UDP_GRO) +#else +#define ENVOY_SOCKET_UDP_GRO Network::SocketOptionName() +#endif + #ifdef TCP_KEEPCNT #define ENVOY_SOCKET_TCP_KEEPCNT ENVOY_MAKE_SOCKET_OPTION_NAME(IPPROTO_TCP, TCP_KEEPCNT) #else diff --git a/source/common/network/transport_socket_options_impl.cc b/source/common/network/transport_socket_options_impl.cc index 4e88d9812ea25..62358ce487102 100644 --- a/source/common/network/transport_socket_options_impl.cc +++ b/source/common/network/transport_socket_options_impl.cc @@ -1,5 +1,6 @@ #include "common/network/transport_socket_options_impl.h" +#include #include #include #include @@ -8,27 +9,45 @@ #include "common/common/scalar_to_byte_vector.h" #include "common/common/utility.h" #include "common/network/application_protocol.h" +#include "common/network/proxy_protocol_filter_state.h" #include "common/network/upstream_server_name.h" #include "common/network/upstream_subject_alt_names.h" namespace Envoy { namespace Network { -void TransportSocketOptionsImpl::hashKey(std::vector& key) const { - if (override_server_name_.has_value()) { - pushScalarToByteVector(StringUtil::CaseInsensitiveHash()(override_server_name_.value()), key); +namespace { +void commonHashKey(const TransportSocketOptions& options, std::vector& key) { + const auto& server_name_overide = options.serverNameOverride(); + if (server_name_overide.has_value()) { + pushScalarToByteVector(StringUtil::CaseInsensitiveHash()(server_name_overide.value()), key); } - if (!override_verify_san_list_.empty()) { - for (const auto& san : override_verify_san_list_) { + const auto& verify_san_list = options.verifySubjectAltNameListOverride(); + if (!verify_san_list.empty()) { + for (const auto& san : verify_san_list) { pushScalarToByteVector(StringUtil::CaseInsensitiveHash()(san), key); } } - if (!override_alpn_list_.empty()) { - for (const auto& protocol : override_alpn_list_) { + const auto& alpn_list = options.applicationProtocolListOverride(); + if (!alpn_list.empty()) { + for (const auto& protocol : alpn_list) { pushScalarToByteVector(StringUtil::CaseInsensitiveHash()(protocol), key); } } + const auto& alpn_fallback = options.applicationProtocolFallback(); + if (alpn_fallback.has_value()) { + pushScalarToByteVector(StringUtil::CaseInsensitiveHash()(*alpn_fallback), key); + } +} +} // namespace + +void AlpnDecoratingTransportSocketOptions::hashKey(std::vector& key) const { + commonHashKey(*this, key); +} + +void TransportSocketOptionsImpl::hashKey(std::vector& key) const { + commonHashKey(*this, key); } TransportSocketOptionsSharedPtr @@ -36,6 +55,7 @@ TransportSocketOptionsUtility::fromFilterState(const StreamInfo::FilterState& fi absl::string_view server_name; std::vector application_protocols; std::vector subject_alt_names; + absl::optional proxy_protocol_options; bool needs_transport_socket_options = false; if (filter_state.hasData(UpstreamServerName::key())) { @@ -59,9 +79,17 @@ TransportSocketOptionsUtility::fromFilterState(const StreamInfo::FilterState& fi needs_transport_socket_options = true; } + if (filter_state.hasData(ProxyProtocolFilterState::key())) { + const auto& proxy_protocol_filter_state = + filter_state.getDataReadOnly(ProxyProtocolFilterState::key()); + proxy_protocol_options.emplace(proxy_protocol_filter_state.value()); + needs_transport_socket_options = true; + } + if (needs_transport_socket_options) { return std::make_shared( - server_name, std::move(subject_alt_names), std::move(application_protocols)); + server_name, std::move(subject_alt_names), std::move(application_protocols), absl::nullopt, + proxy_protocol_options); } else { return nullptr; } diff --git a/source/common/network/transport_socket_options_impl.h b/source/common/network/transport_socket_options_impl.h index 341ab567e8ed4..3611f117c8e52 100644 --- a/source/common/network/transport_socket_options_impl.h +++ b/source/common/network/transport_socket_options_impl.h @@ -1,21 +1,55 @@ #pragma once +#include "envoy/network/proxy_protocol.h" #include "envoy/network/transport_socket.h" #include "envoy/stream_info/filter_state.h" namespace Envoy { namespace Network { +// A wrapper around another TransportSocketOptions that overrides the ALPN fallback. +class AlpnDecoratingTransportSocketOptions : public TransportSocketOptions { +public: + AlpnDecoratingTransportSocketOptions(std::string&& alpn, + TransportSocketOptionsSharedPtr inner_options) + : alpn_fallback_(std::move(alpn)), inner_options_(std::move(inner_options)) {} + // Network::TransportSocketOptions + const absl::optional& serverNameOverride() const override { + return inner_options_->serverNameOverride(); + } + const std::vector& verifySubjectAltNameListOverride() const override { + return inner_options_->verifySubjectAltNameListOverride(); + } + const std::vector& applicationProtocolListOverride() const override { + return inner_options_->applicationProtocolListOverride(); + } + const absl::optional& applicationProtocolFallback() const override { + return alpn_fallback_; + } + absl::optional proxyProtocolOptions() const override { + return inner_options_->proxyProtocolOptions(); + } + void hashKey(std::vector& key) const override; + +private: + const absl::optional alpn_fallback_; + const TransportSocketOptionsSharedPtr inner_options_; +}; + class TransportSocketOptionsImpl : public TransportSocketOptions { public: - TransportSocketOptionsImpl(absl::string_view override_server_name = "", - std::vector&& override_verify_san_list = {}, - std::vector&& override_alpn = {}) + TransportSocketOptionsImpl( + absl::string_view override_server_name = "", + std::vector&& override_verify_san_list = {}, + std::vector&& override_alpn = {}, + absl::optional&& fallback_alpn = {}, + absl::optional proxy_proto_options = absl::nullopt) : override_server_name_(override_server_name.empty() ? absl::nullopt : absl::optional(override_server_name)), override_verify_san_list_{std::move(override_verify_san_list)}, - override_alpn_list_{std::move(override_alpn)} {} + override_alpn_list_{std::move(override_alpn)}, alpn_fallback_{std::move(fallback_alpn)}, + proxy_protocol_options_(proxy_proto_options) {} // Network::TransportSocketOptions const absl::optional& serverNameOverride() const override { @@ -27,12 +61,20 @@ class TransportSocketOptionsImpl : public TransportSocketOptions { const std::vector& applicationProtocolListOverride() const override { return override_alpn_list_; } + const absl::optional& applicationProtocolFallback() const override { + return alpn_fallback_; + } + absl::optional proxyProtocolOptions() const override { + return proxy_protocol_options_; + } void hashKey(std::vector& key) const override; private: const absl::optional override_server_name_; const std::vector override_verify_san_list_; const std::vector override_alpn_list_; + const absl::optional alpn_fallback_; + const absl::optional proxy_protocol_options_; }; class TransportSocketOptionsUtility { diff --git a/source/common/network/udp_default_writer_config.cc b/source/common/network/udp_default_writer_config.cc new file mode 100644 index 0000000000000..c07336c513a89 --- /dev/null +++ b/source/common/network/udp_default_writer_config.cc @@ -0,0 +1,32 @@ +#include "common/network/udp_default_writer_config.h" + +#include +#include + +#include "envoy/config/listener/v3/udp_default_writer_config.pb.h" + +#include "common/network/udp_packet_writer_handler_impl.h" + +namespace Envoy { +namespace Network { + +UdpPacketWriterPtr UdpDefaultWriterFactory::createUdpPacketWriter(Network::IoHandle& io_handle, + Stats::Scope& /*scope*/) { + return std::make_unique(io_handle); +} + +ProtobufTypes::MessagePtr UdpDefaultWriterConfigFactory::createEmptyConfigProto() { + return std::make_unique(); +} + +UdpPacketWriterFactoryPtr +UdpDefaultWriterConfigFactory::createUdpPacketWriterFactory(const Protobuf::Message& /*message*/) { + return std::make_unique(); +} + +std::string UdpDefaultWriterConfigFactory::name() const { return "udp_default_writer"; } + +REGISTER_FACTORY(UdpDefaultWriterConfigFactory, Network::UdpPacketWriterConfigFactory); + +} // namespace Network +} // namespace Envoy diff --git a/source/common/network/udp_default_writer_config.h b/source/common/network/udp_default_writer_config.h new file mode 100644 index 0000000000000..e01c465e904f7 --- /dev/null +++ b/source/common/network/udp_default_writer_config.h @@ -0,0 +1,32 @@ +#pragma once + +#include "envoy/network/udp_packet_writer_config.h" +#include "envoy/network/udp_packet_writer_handler.h" +#include "envoy/registry/registry.h" + +namespace Envoy { +namespace Network { + +class UdpDefaultWriterFactory : public Network::UdpPacketWriterFactory { +public: + Network::UdpPacketWriterPtr createUdpPacketWriter(Network::IoHandle& io_handle, + Stats::Scope& scope) override; +}; + +// UdpPacketWriterConfigFactory to create UdpDefaultWriterFactory based on given protobuf +// This is the default UdpPacketWriterConfigFactory if not specified in config. +class UdpDefaultWriterConfigFactory : public UdpPacketWriterConfigFactory { +public: + // UdpPacketWriterConfigFactory + ProtobufTypes::MessagePtr createEmptyConfigProto() override; + + Network::UdpPacketWriterFactoryPtr + createUdpPacketWriterFactory(const Protobuf::Message&) override; + + std::string name() const override; +}; + +DECLARE_FACTORY(UdpDefaultWriterConfigFactory); + +} // namespace Network +} // namespace Envoy diff --git a/source/common/network/udp_listener_impl.cc b/source/common/network/udp_listener_impl.cc index f05817f19d187..3eaf0272e9401 100644 --- a/source/common/network/udp_listener_impl.cc +++ b/source/common/network/udp_listener_impl.cc @@ -8,6 +8,7 @@ #include "envoy/common/exception.h" #include "envoy/common/platform.h" #include "envoy/config/core/v3/base.pb.h" +#include "envoy/network/exception.h" #include "common/api/os_sys_calls_impl.h" #include "common/common/assert.h" @@ -44,16 +45,18 @@ UdpListenerImpl::UdpListenerImpl(Event::DispatcherImpl& dispatcher, SocketShared } UdpListenerImpl::~UdpListenerImpl() { - disable(); + disableEvent(); file_event_.reset(); } -void UdpListenerImpl::disable() { file_event_->setEnabled(0); } +void UdpListenerImpl::disable() { disableEvent(); } void UdpListenerImpl::enable() { file_event_->setEnabled(Event::FileReadyType::Read | Event::FileReadyType::Write); } +void UdpListenerImpl::disableEvent() { file_event_->setEnabled(0); } + void UdpListenerImpl::onSocketEvent(short flags) { ASSERT((flags & (Event::FileReadyType::Read | Event::FileReadyType::Write))); ENVOY_UDP_LOG(trace, "socket event: {}", flags); @@ -107,8 +110,9 @@ const Address::InstanceConstSharedPtr& UdpListenerImpl::localAddress() const { Api::IoCallUint64Result UdpListenerImpl::send(const UdpSendData& send_data) { ENVOY_UDP_LOG(trace, "send"); Buffer::Instance& buffer = send_data.buffer_; - Api::IoCallUint64Result send_result = Utility::writeToSocket( - socket_->ioHandle(), buffer, send_data.local_ip_, send_data.peer_address_); + + Api::IoCallUint64Result send_result = + cb_.udpPacketWriter().writePacket(buffer, send_data.local_ip_, send_data.peer_address_); // The send_result normalizes the rc_ value to 0 in error conditions. // The drain call is hence 'safe' in success and failure cases. @@ -116,5 +120,10 @@ Api::IoCallUint64Result UdpListenerImpl::send(const UdpSendData& send_data) { return send_result; } +Api::IoCallUint64Result UdpListenerImpl::flush() { + ENVOY_UDP_LOG(trace, "flush"); + return cb_.udpPacketWriter().flush(); +} + } // namespace Network } // namespace Envoy diff --git a/source/common/network/udp_listener_impl.h b/source/common/network/udp_listener_impl.h index 2184b4419c105..67168fb1c7ee1 100644 --- a/source/common/network/udp_listener_impl.h +++ b/source/common/network/udp_listener_impl.h @@ -35,6 +35,7 @@ class UdpListenerImpl : public BaseListenerImpl, Event::Dispatcher& dispatcher() override; const Address::InstanceConstSharedPtr& localAddress() const override; Api::IoCallUint64Result send(const UdpSendData& data) override; + Api::IoCallUint64Result flush() override; void processPacket(Address::InstanceConstSharedPtr local_address, Address::InstanceConstSharedPtr peer_address, Buffer::InstancePtr buffer, @@ -54,6 +55,7 @@ class UdpListenerImpl : public BaseListenerImpl, private: void onSocketEvent(short flags); + void disableEvent(); TimeSource& time_source_; Event::FileEventPtr file_event_; diff --git a/source/common/network/udp_packet_writer_handler_impl.cc b/source/common/network/udp_packet_writer_handler_impl.cc new file mode 100644 index 0000000000000..27d499268e28a --- /dev/null +++ b/source/common/network/udp_packet_writer_handler_impl.cc @@ -0,0 +1,28 @@ +#include "common/network/udp_packet_writer_handler_impl.h" + +#include "common/buffer/buffer_impl.h" +#include "common/network/utility.h" + +namespace Envoy { +namespace Network { + +UdpDefaultWriter::UdpDefaultWriter(Network::IoHandle& io_handle) + : write_blocked_(false), io_handle_(io_handle) {} + +UdpDefaultWriter::~UdpDefaultWriter() = default; + +Api::IoCallUint64Result UdpDefaultWriter::writePacket(const Buffer::Instance& buffer, + const Address::Ip* local_ip, + const Address::Instance& peer_address) { + ASSERT(!write_blocked_, "Cannot write while IO handle is blocked."); + Api::IoCallUint64Result result = + Utility::writeToSocket(io_handle_, buffer, local_ip, peer_address); + if (result.err_ && result.err_->getErrorCode() == Api::IoError::IoErrorCode::Again) { + // Writer is blocked when error code received is EWOULDBLOCK/EAGAIN + write_blocked_ = true; + } + return result; +} + +} // namespace Network +} // namespace Envoy diff --git a/source/common/network/udp_packet_writer_handler_impl.h b/source/common/network/udp_packet_writer_handler_impl.h new file mode 100644 index 0000000000000..50c3f34b79cda --- /dev/null +++ b/source/common/network/udp_packet_writer_handler_impl.h @@ -0,0 +1,45 @@ +#pragma once + +#include "envoy/buffer/buffer.h" +#include "envoy/network/socket.h" +#include "envoy/network/udp_packet_writer_handler.h" + +#include "common/network/io_socket_error_impl.h" + +namespace Envoy { +namespace Network { + +class UdpDefaultWriter : public UdpPacketWriter { +public: + UdpDefaultWriter(Network::IoHandle& io_handle); + + ~UdpDefaultWriter() override; + + // Following writePacket utilizes Utility::writeToSocket() implementation + Api::IoCallUint64Result writePacket(const Buffer::Instance& buffer, const Address::Ip* local_ip, + const Address::Instance& peer_address) override; + + bool isWriteBlocked() const override { return write_blocked_; } + void setWritable() override { write_blocked_ = false; } + uint64_t getMaxPacketSize(const Address::Instance& /*peer_address*/) const override { + return Network::UdpMaxOutgoingPacketSize; + } + bool isBatchMode() const override { return false; } + Network::UdpPacketWriterBuffer + getNextWriteLocation(const Address::Ip* /*local_ip*/, + const Address::Instance& /*peer_address*/) override { + return {nullptr, 0, nullptr}; + } + Api::IoCallUint64Result flush() override { + return Api::IoCallUint64Result( + /*rc=*/0, + /*err=*/Api::IoErrorPtr(nullptr, Network::IoSocketError::deleteIoError)); + } + +private: + bool write_blocked_; + Network::IoHandle& io_handle_; +}; + +} // namespace Network +} // namespace Envoy diff --git a/source/common/network/utility.cc b/source/common/network/utility.cc index 891e6f995ef7a..15145ec7ef498 100644 --- a/source/common/network/utility.cc +++ b/source/common/network/utility.cc @@ -7,6 +7,7 @@ #include #include +#include "envoy/buffer/buffer.h" #include "envoy/common/exception.h" #include "envoy/common/platform.h" #include "envoy/config/core/v3/address.pb.h" @@ -96,6 +97,28 @@ uint32_t portFromUrl(const std::string& url, absl::string_view scheme, } } +Api::IoCallUint64Result receiveMessage(uint64_t max_packet_size, Buffer::InstancePtr& buffer, + IoHandle::RecvMsgOutput& output, IoHandle& handle, + const Address::Instance& local_address) { + + Buffer::RawSlice slice; + const uint64_t num_slices = buffer->reserve(max_packet_size, &slice, 1); + ASSERT(num_slices == 1u); + + Api::IoCallUint64Result result = + handle.recvmsg(&slice, num_slices, local_address.ip()->port(), output); + + if (!result.ok()) { + return result; + } + + // Adjust memory length and commit slice to buffer + slice.len_ = std::min(slice.len_, static_cast(result.rc_)); + buffer->commit(&slice, 1); + + return result; +} + } // namespace std::string Utility::hostFromTcpUrl(const std::string& url) { @@ -171,6 +194,7 @@ Address::InstanceConstSharedPtr Utility::parseInternetAddressAndPort(const std:: throwWithMalformedIp(ip_address); } sockaddr_in sa4; + memset(&sa4, 0, sizeof(sa4)); if (ip_str.empty() || inet_pton(AF_INET, ip_str.c_str(), &sa4.sin_addr) != 1) { throwWithMalformedIp(ip_address); } @@ -196,7 +220,7 @@ void Utility::throwWithMalformedIp(absl::string_view ip_address) { // need to be updated in the future. Discussion can be found at Github issue #939. Address::InstanceConstSharedPtr Utility::getLocalAddress(const Address::IpVersion version) { Address::InstanceConstSharedPtr ret; -#ifndef WIN32 +#ifdef SUPPORTS_GETIFADDRS struct ifaddrs* ifaddr; struct ifaddrs* ifa; @@ -343,47 +367,38 @@ Address::InstanceConstSharedPtr Utility::getAddressWithPort(const Address::Insta NOT_REACHED_GCOVR_EXCL_LINE; } -Address::InstanceConstSharedPtr Utility::getOriginalDst(os_fd_t fd) { +Address::InstanceConstSharedPtr Utility::getOriginalDst(Socket& sock) { #ifdef SOL_IP - sockaddr_storage orig_addr; - socklen_t addr_len = sizeof(sockaddr_storage); - int socket_domain; - socklen_t domain_len = sizeof(socket_domain); - auto& os_syscalls = Api::OsSysCallsSingleton::get(); - const Api::SysCallIntResult result = - os_syscalls.getsockopt(fd, SOL_SOCKET, SO_DOMAIN, &socket_domain, &domain_len); - int status = result.rc_; - if (status != 0) { + if (sock.addressType() != Address::Type::Ip) { return nullptr; } - if (socket_domain == AF_INET) { - status = os_syscalls.getsockopt(fd, SOL_IP, SO_ORIGINAL_DST, &orig_addr, &addr_len).rc_; - } else if (socket_domain == AF_INET6) { - status = os_syscalls.getsockopt(fd, SOL_IPV6, IP6T_SO_ORIGINAL_DST, &orig_addr, &addr_len).rc_; - } else { + auto ipVersion = sock.ipVersion(); + if (!ipVersion.has_value()) { return nullptr; } - if (status != 0) { - return nullptr; + sockaddr_storage orig_addr; + memset(&orig_addr, 0, sizeof(orig_addr)); + socklen_t addr_len = sizeof(sockaddr_storage); + int status; + + if (*ipVersion == Address::IpVersion::v4) { + status = sock.getSocketOption(SOL_IP, SO_ORIGINAL_DST, &orig_addr, &addr_len).rc_; + } else { + status = sock.getSocketOption(SOL_IPV6, IP6T_SO_ORIGINAL_DST, &orig_addr, &addr_len).rc_; } - switch (orig_addr.ss_family) { - case AF_INET: - return Address::InstanceConstSharedPtr{ - new Address::Ipv4Instance(reinterpret_cast(&orig_addr))}; - case AF_INET6: - return Address::InstanceConstSharedPtr{ - new Address::Ipv6Instance(reinterpret_cast(orig_addr))}; - default: + if (status != 0) { return nullptr; } + + return Address::addressFromSockAddr(orig_addr, 0, true /* default for v6 constructor */); #else // TODO(zuercher): determine if connection redirection is possible under macOS (c.f. pfctl and // divert), and whether it's possible to find the learn destination address. - UNREFERENCED_PARAMETER(fd); + UNREFERENCED_PARAMETER(sock); return nullptr; #endif } @@ -481,22 +496,22 @@ void Utility::addressToProtobufAddress(const Address::Instance& address, } } -Address::SocketType +Socket::Type Utility::protobufAddressSocketType(const envoy::config::core::v3::Address& proto_address) { switch (proto_address.address_case()) { case envoy::config::core::v3::Address::AddressCase::kSocketAddress: { const auto protocol = proto_address.socket_address().protocol(); switch (protocol) { case envoy::config::core::v3::SocketAddress::TCP: - return Address::SocketType::Stream; + return Socket::Type::Stream; case envoy::config::core::v3::SocketAddress::UDP: - return Address::SocketType::Datagram; + return Socket::Type::Datagram; default: NOT_REACHED_GCOVR_EXCL_LINE; } } case envoy::config::core::v3::Address::AddressCase::kPipe: - return Address::SocketType::Stream; + return Socket::Type::Stream; default: NOT_REACHED_GCOVR_EXCL_LINE; } @@ -506,8 +521,7 @@ Api::IoCallUint64Result Utility::writeToSocket(IoHandle& handle, const Buffer::I const Address::Ip* local_ip, const Address::Instance& peer_address) { Buffer::RawSliceVector slices = buffer.getRawSlices(); - return writeToSocket(handle, !slices.empty() ? &slices[0] : nullptr, slices.size(), local_ip, - peer_address); + return writeToSocket(handle, slices.data(), slices.size(), local_ip, peer_address); } Api::IoCallUint64Result Utility::writeToSocket(IoHandle& handle, Buffer::RawSlice* slices, @@ -531,14 +545,10 @@ Api::IoCallUint64Result Utility::writeToSocket(IoHandle& handle, Buffer::RawSlic return send_result; } -void passPayloadToProcessor(uint64_t bytes_read, Buffer::RawSlice& slice, - Buffer::InstancePtr buffer, Address::InstanceConstSharedPtr peer_addess, +void passPayloadToProcessor(uint64_t bytes_read, Buffer::InstancePtr buffer, + Address::InstanceConstSharedPtr peer_addess, Address::InstanceConstSharedPtr local_address, UdpPacketProcessor& udp_packet_processor, MonotonicTime receive_time) { - // Adjust used memory length. - slice.len_ = std::min(slice.len_, static_cast(bytes_read)); - buffer->commit(&slice, 1); - RELEASE_ASSERT( peer_addess != nullptr, fmt::format("Unable to get remote address on the socket bount to local address: {} ", @@ -558,6 +568,44 @@ Api::IoCallUint64Result Utility::readFromSocket(IoHandle& handle, UdpPacketProcessor& udp_packet_processor, MonotonicTime receive_time, uint32_t* packets_dropped) { + + if (handle.supportsUdpGro()) { + Buffer::InstancePtr buffer = std::make_unique(); + IoHandle::RecvMsgOutput output(1, packets_dropped); + + // TODO(yugant): Avoid allocating 24k for each read by getting memory from UdpPacketProcessor + const uint64_t max_packet_size_with_gro = 16 * udp_packet_processor.maxPacketSize(); + + Api::IoCallUint64Result result = + receiveMessage(max_packet_size_with_gro, buffer, output, handle, local_address); + + if (!result.ok()) { + return result; + } + + const uint64_t gso_size = output.msg_[0].gso_size_; + ENVOY_LOG_MISC(trace, "recvmsg bytes {} with gso_size as {}", result.rc_, gso_size); + + // Skip gso segmentation and proceed as a single payload. + if (gso_size == 0u) { + passPayloadToProcessor(result.rc_, std::move(buffer), std::move(output.msg_[0].peer_address_), + std::move(output.msg_[0].local_address_), udp_packet_processor, + receive_time); + return result; + } + + // Segment the buffer read by the recvmsg syscall into gso_sized sub buffers. + while (buffer->length() > 0) { + const uint64_t bytes_to_copy = std::min(buffer->length(), gso_size); + Buffer::InstancePtr sub_buffer = std::make_unique(); + sub_buffer->move(*buffer, bytes_to_copy); + passPayloadToProcessor(bytes_to_copy, std::move(sub_buffer), output.msg_[0].peer_address_, + output.msg_[0].local_address_, udp_packet_processor, receive_time); + } + + return result; + } + if (handle.supportsMmsg()) { const uint32_t num_packets_per_mmsg_call = 16u; const uint32_t num_slices_per_packet = 1u; @@ -585,20 +633,22 @@ Api::IoCallUint64Result Utility::readFromSocket(IoHandle& handle, ASSERT(msg_len <= slice->len_); ENVOY_LOG_MISC(debug, "Receive a packet with {} bytes from {}", msg_len, output.msg_[i].peer_address_->asString()); - passPayloadToProcessor(msg_len, *slice, std::move(buffers[i]), output.msg_[i].peer_address_, + + // Adjust used memory length and commit slice to buffer + slice->len_ = std::min(slice->len_, static_cast(msg_len)); + buffers[i]->commit(slice, 1); + + passPayloadToProcessor(msg_len, std::move(buffers[i]), output.msg_[i].peer_address_, output.msg_[i].local_address_, udp_packet_processor, receive_time); } return result; } Buffer::InstancePtr buffer = std::make_unique(); - Buffer::RawSlice slice; - const uint64_t num_slices = buffer->reserve(udp_packet_processor.maxPacketSize(), &slice, 1); - ASSERT(num_slices == 1u); - IoHandle::RecvMsgOutput output(1, packets_dropped); + Api::IoCallUint64Result result = - handle.recvmsg(&slice, num_slices, local_address.ip()->port(), output); + receiveMessage(udp_packet_processor.maxPacketSize(), buffer, output, handle, local_address); if (!result.ok()) { return result; @@ -606,9 +656,9 @@ Api::IoCallUint64Result Utility::readFromSocket(IoHandle& handle, ENVOY_LOG_MISC(trace, "recvmsg bytes {}", result.rc_); - passPayloadToProcessor( - result.rc_, slice, std::move(buffer), std::move(output.msg_[0].peer_address_), - std::move(output.msg_[0].local_address_), udp_packet_processor, receive_time); + passPayloadToProcessor(result.rc_, std::move(buffer), std::move(output.msg_[0].peer_address_), + std::move(output.msg_[0].local_address_), udp_packet_processor, + receive_time); return result; } diff --git a/source/common/network/utility.h b/source/common/network/utility.h index 152b2ccc471d0..be64071e9ea69 100644 --- a/source/common/network/utility.h +++ b/source/common/network/utility.h @@ -229,13 +229,13 @@ class Utility { uint32_t port); /** - * Retrieve the original destination address from an accepted fd. + * Retrieve the original destination address from an accepted socket. * The address (IP and port) may be not local and the port may differ from * the listener port if the packets were redirected using iptables - * @param fd is the descriptor returned by accept() + * @param sock is accepted socket * @return the original destination or nullptr if not available. */ - static Address::InstanceConstSharedPtr getOriginalDst(os_fd_t fd); + static Address::InstanceConstSharedPtr getOriginalDst(Socket& sock); /** * Parses a string containing a comma-separated list of port numbers and/or @@ -286,7 +286,7 @@ class Utility { * @param proto_address the address protobuf * @return socket type */ - static Address::SocketType + static Socket::Type protobufAddressSocketType(const envoy::config::core::v3::Address& proto_address); /** diff --git a/source/common/profiler/BUILD b/source/common/profiler/BUILD index c853cfea30c39..192f9712568ab 100644 --- a/source/common/profiler/BUILD +++ b/source/common/profiler/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/protobuf/BUILD b/source/common/protobuf/BUILD index 9a9aa1f306241..f505161b810f8 100644 --- a/source/common/protobuf/BUILD +++ b/source/common/protobuf/BUILD @@ -1,5 +1,4 @@ -licenses(["notice"]) # Apache 2 - +load("@rules_cc//cc:defs.bzl", "cc_proto_library") load("@rules_proto//proto:defs.bzl", "proto_library") load( "//bazel:envoy_build_system.bzl", @@ -7,6 +6,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() proto_library( @@ -33,6 +34,7 @@ envoy_cc_library( deps = [ "//include/envoy/protobuf:message_validator_interface", "//include/envoy/stats:stats_interface", + "//source/common/common:documentation_url_lib", "//source/common/common:hash_lib", "//source/common/common:logger_lib", "//source/common/common:macros", @@ -64,6 +66,7 @@ envoy_cc_library( "//include/envoy/protobuf:message_validator_interface", "//include/envoy/runtime:runtime_interface", "//source/common/common:assert_lib", + "//source/common/common:documentation_url_lib", "//source/common/common:hash_lib", "//source/common/common:utility_lib", "//source/common/config:api_type_oracle_lib", diff --git a/source/common/protobuf/message_validator_impl.cc b/source/common/protobuf/message_validator_impl.cc index 9b164a925da5c..c486f9d0d4ecd 100644 --- a/source/common/protobuf/message_validator_impl.cc +++ b/source/common/protobuf/message_validator_impl.cc @@ -11,10 +11,24 @@ namespace Envoy { namespace ProtobufMessage { -void WarningValidationVisitorImpl::setCounter(Stats::Counter& counter) { - ASSERT(counter_ == nullptr); - counter_ = &counter; - counter.add(prestats_count_); +namespace { +const char deprecation_error[] = " If continued use of this field is absolutely necessary, " + "see " ENVOY_DOC_URL_RUNTIME_OVERRIDE_DEPRECATED " for " + "how to apply a temporary and highly discouraged override."; + +void onDeprecatedFieldCommon(absl::string_view description, bool soft_deprecation) { + if (soft_deprecation) { + ENVOY_LOG_MISC(warn, "Deprecated field: {}", absl::StrCat(description, deprecation_error)); + } else { + throw DeprecatedProtoFieldException(absl::StrCat(description, deprecation_error)); + } +} +} // namespace + +void WarningValidationVisitorImpl::setUnknownCounter(Stats::Counter& counter) { + ASSERT(unknown_counter_ == nullptr); + unknown_counter_ = &counter; + counter.add(prestats_unknown_count_); } void WarningValidationVisitorImpl::onUnknownField(absl::string_view description) { @@ -24,20 +38,31 @@ void WarningValidationVisitorImpl::onUnknownField(absl::string_view description) if (!it.second) { return; } + // It's a new field, log and bump stat. ENVOY_LOG(warn, "Unknown field: {}", description); - if (counter_ == nullptr) { - ++prestats_count_; + if (unknown_counter_ == nullptr) { + ++prestats_unknown_count_; } else { - counter_->inc(); + unknown_counter_->inc(); } } +void WarningValidationVisitorImpl::onDeprecatedField(absl::string_view description, + bool soft_deprecation) { + onDeprecatedFieldCommon(description, soft_deprecation); +} + void StrictValidationVisitorImpl::onUnknownField(absl::string_view description) { throw UnknownProtoFieldException( absl::StrCat("Protobuf message (", description, ") has unknown fields")); } +void StrictValidationVisitorImpl::onDeprecatedField(absl::string_view description, + bool soft_deprecation) { + onDeprecatedFieldCommon(description, soft_deprecation); +} + ValidationVisitor& getNullValidationVisitor() { MUTABLE_CONSTRUCT_ON_FIRST_USE(NullValidationVisitorImpl); } diff --git a/source/common/protobuf/message_validator_impl.h b/source/common/protobuf/message_validator_impl.h index 32d705fd44bfc..54644f49b8430 100644 --- a/source/common/protobuf/message_validator_impl.h +++ b/source/common/protobuf/message_validator_impl.h @@ -3,6 +3,7 @@ #include "envoy/protobuf/message_validator.h" #include "envoy/stats/stats.h" +#include "common/common/documentation_url.h" #include "common/common/logger.h" #include "absl/container/flat_hash_set.h" @@ -14,6 +15,10 @@ class NullValidationVisitorImpl : public ValidationVisitor { public: // Envoy::ProtobufMessage::ValidationVisitor void onUnknownField(absl::string_view) override {} + void onDeprecatedField(absl::string_view, bool) override {} + + // Envoy::ProtobufMessage::ValidationVisitor + bool skipValidation() override { return true; } }; ValidationVisitor& getNullValidationVisitor(); @@ -21,25 +26,33 @@ ValidationVisitor& getNullValidationVisitor(); class WarningValidationVisitorImpl : public ValidationVisitor, public Logger::Loggable { public: - void setCounter(Stats::Counter& counter); + void setUnknownCounter(Stats::Counter& counter); // Envoy::ProtobufMessage::ValidationVisitor void onUnknownField(absl::string_view description) override; + void onDeprecatedField(absl::string_view description, bool soft_deprecation) override; + + // Envoy::ProtobufMessage::ValidationVisitor + bool skipValidation() override { return false; } private: // Track hashes of descriptions we've seen, to avoid log spam. A hash is used here to avoid // wasting memory with unused strings. absl::flat_hash_set descriptions_; - // This can be late initialized via setCounter(), enabling the server bootstrap loading which - // occurs prior to the initialization of the stats subsystem. - Stats::Counter* counter_{}; - uint64_t prestats_count_{}; + // This can be late initialized via setUnknownCounter(), enabling the server bootstrap loading + // which occurs prior to the initialization of the stats subsystem. + Stats::Counter* unknown_counter_{}; + uint64_t prestats_unknown_count_{}; }; class StrictValidationVisitorImpl : public ValidationVisitor { public: // Envoy::ProtobufMessage::ValidationVisitor void onUnknownField(absl::string_view description) override; + + // Envoy::ProtobufMessage::ValidationVisitor + bool skipValidation() override { return false; } + void onDeprecatedField(absl::string_view description, bool soft_deprecation) override; }; ValidationVisitor& getStrictValidationVisitor(); @@ -62,18 +75,21 @@ class ValidationContextImpl : public ValidationContext { class ProdValidationContextImpl : public ValidationContextImpl { public: - ProdValidationContextImpl(bool allow_unknown_static_fields, bool allow_unknown_dynamic_fields) + ProdValidationContextImpl(bool allow_unknown_static_fields, bool allow_unknown_dynamic_fields, + bool ignore_unknown_dynamic_fields) : ValidationContextImpl(allow_unknown_static_fields ? static_warning_validation_visitor_ : getStrictValidationVisitor(), allow_unknown_dynamic_fields - ? dynamic_warning_validation_visitor_ + ? (ignore_unknown_dynamic_fields + ? ProtobufMessage::getNullValidationVisitor() + : dynamic_warning_validation_visitor_) : ProtobufMessage::getStrictValidationVisitor()) {} - ProtobufMessage::WarningValidationVisitorImpl& static_warning_validation_visitor() { + ProtobufMessage::WarningValidationVisitorImpl& staticWarningValidationVisitor() { return static_warning_validation_visitor_; } - ProtobufMessage::WarningValidationVisitorImpl& dynamic_warning_validation_visitor() { + ProtobufMessage::WarningValidationVisitorImpl& dynamicWarningValidationVisitor() { return dynamic_warning_validation_visitor_; } diff --git a/source/common/protobuf/utility.cc b/source/common/protobuf/utility.cc index e3715a13c79d1..288c3fc9620a7 100644 --- a/source/common/protobuf/utility.cc +++ b/source/common/protobuf/utility.cc @@ -8,6 +8,7 @@ #include "envoy/type/v3/percent.pb.h" #include "common/common/assert.h" +#include "common/common/documentation_url.h" #include "common/common/fmt.h" #include "common/config/api_type_oracle.h" #include "common/config/version_converter.h" @@ -35,12 +36,12 @@ void blockFormat(YAML::Node node) { node.SetStyle(YAML::EmitterStyle::Block); if (node.Type() == YAML::NodeType::Sequence) { - for (auto it : node) { + for (const auto& it : node) { blockFormat(it); } } if (node.Type() == YAML::NodeType::Map) { - for (auto it : node) { + for (const auto& it : node) { blockFormat(it.second); } } @@ -103,7 +104,7 @@ ProtobufWkt::Value parseYamlNode(const YAML::Node& node) { void jsonConvertInternal(const Protobuf::Message& source, ProtobufMessage::ValidationVisitor& validation_visitor, - Protobuf::Message& dest) { + Protobuf::Message& dest, bool do_boosting = true) { Protobuf::util::JsonPrintOptions json_options; json_options.preserve_proto_field_names = true; std::string json; @@ -112,7 +113,7 @@ void jsonConvertInternal(const Protobuf::Message& source, throw EnvoyException(fmt::format("Unable to convert protobuf message to JSON string: {} {}", status.ToString(), source.DebugString())); } - MessageUtil::loadFromJson(json, dest, validation_visitor); + MessageUtil::loadFromJson(json, dest, validation_visitor, do_boosting); } enum class MessageVersion { @@ -144,6 +145,7 @@ void tryWithApiBoosting(MessageXformFn f, Protobuf::Message& message) { f(message, MessageVersion::LATEST_VERSION); return; } + Protobuf::DynamicMessageFactory dmf; auto earlier_message = ProtobufTypes::MessagePtr(dmf.GetPrototype(earlier_version_desc)->New()); ASSERT(earlier_message != nullptr); @@ -163,7 +165,8 @@ void tryWithApiBoosting(MessageXformFn f, Protobuf::Message& message) { // otherwise fatal field. Throws a warning on use of a fatal by default field. void deprecatedFieldHelper(Runtime::Loader* runtime, bool proto_annotated_as_deprecated, bool proto_annotated_as_disallowed, const std::string& feature_name, - std::string error, const Protobuf::Message& message) { + std::string error, const Protobuf::Message& message, + ProtobufMessage::ValidationVisitor& validation_visitor) { // This option is for Envoy builds with --define deprecated_features=disabled // The build options CI then verifies that as Envoy developers deprecate fields, // that they update canonical configs and unit tests to not use those deprecated @@ -194,16 +197,9 @@ void deprecatedFieldHelper(Runtime::Loader* runtime, bool proto_annotated_as_dep std::string with_overridden = fmt::format( error, (runtime_overridden ? "runtime overrides to continue using now fatal-by-default " : "")); - if (warn_only) { - ENVOY_LOG_MISC(warn, "{}", with_overridden); - } else { - const char fatal_error[] = - " If continued use of this field is absolutely necessary, see " - "https://www.envoyproxy.io/docs/envoy/latest/configuration/operations/runtime" - "#using-runtime-overrides-for-deprecated-features for how to apply a temporary and " - "highly discouraged override."; - throw ProtoValidationException(with_overridden + fatal_error, message); - } + + validation_visitor.onDeprecatedField("type " + message.GetTypeName() + " " + with_overridden, + warn_only); } } // namespace @@ -263,6 +259,7 @@ size_t MessageUtil::hash(const Protobuf::Message& message) { printer.SetExpandAny(true); printer.SetUseFieldNumber(true); printer.SetSingleLineMode(true); + printer.SetHideUnknownFields(true); printer.PrintToString(message, &text_format); } @@ -270,43 +267,49 @@ size_t MessageUtil::hash(const Protobuf::Message& message) { } void MessageUtil::loadFromJson(const std::string& json, Protobuf::Message& message, - ProtobufMessage::ValidationVisitor& validation_visitor) { - tryWithApiBoosting( - [&json, &validation_visitor](Protobuf::Message& message, MessageVersion message_version) { - Protobuf::util::JsonParseOptions options; - options.case_insensitive_enum_parsing = true; - // Let's first try and get a clean parse when checking for unknown fields; - // this should be the common case. - options.ignore_unknown_fields = false; - const auto strict_status = Protobuf::util::JsonStringToMessage(json, &message, options); - if (strict_status.ok()) { - // Success, no need to do any extra work. - return; - } - // If we fail, we see if we get a clean parse when allowing unknown fields. - // This is essentially a workaround - // for https://github.com/protocolbuffers/protobuf/issues/5967. - // TODO(htuch): clean this up when protobuf supports JSON/YAML unknown field - // detection directly. - options.ignore_unknown_fields = true; - const auto relaxed_status = Protobuf::util::JsonStringToMessage(json, &message, options); - // If we still fail with relaxed unknown field checking, the error has nothing - // to do with unknown fields. - if (!relaxed_status.ok()) { - throw EnvoyException("Unable to parse JSON as proto (" + relaxed_status.ToString() + - "): " + json); - } - // We know it's an unknown field at this point. If we're at the latest - // version, then it's definitely an unknown field, otherwise we try to - // load again at a later version. - if (message_version == MessageVersion::LATEST_VERSION) { - validation_visitor.onUnknownField("type " + message.GetTypeName() + " reason " + - strict_status.ToString()); - } else { - throw ApiBoostRetryException("Unknown field, possibly a rename, try again."); - } - }, - message); + ProtobufMessage::ValidationVisitor& validation_visitor, + bool do_boosting) { + auto load_json = [&json, &validation_visitor](Protobuf::Message& message, + MessageVersion message_version) { + Protobuf::util::JsonParseOptions options; + options.case_insensitive_enum_parsing = true; + // Let's first try and get a clean parse when checking for unknown fields; + // this should be the common case. + options.ignore_unknown_fields = false; + const auto strict_status = Protobuf::util::JsonStringToMessage(json, &message, options); + if (strict_status.ok()) { + // Success, no need to do any extra work. + return; + } + // If we fail, we see if we get a clean parse when allowing unknown fields. + // This is essentially a workaround + // for https://github.com/protocolbuffers/protobuf/issues/5967. + // TODO(htuch): clean this up when protobuf supports JSON/YAML unknown field + // detection directly. + options.ignore_unknown_fields = true; + const auto relaxed_status = Protobuf::util::JsonStringToMessage(json, &message, options); + // If we still fail with relaxed unknown field checking, the error has nothing + // to do with unknown fields. + if (!relaxed_status.ok()) { + throw EnvoyException("Unable to parse JSON as proto (" + relaxed_status.ToString() + + "): " + json); + } + // We know it's an unknown field at this point. If we're at the latest + // version, then it's definitely an unknown field, otherwise we try to + // load again at a later version. + if (message_version == MessageVersion::LATEST_VERSION) { + validation_visitor.onUnknownField("type " + message.GetTypeName() + " reason " + + strict_status.ToString()); + } else { + throw ApiBoostRetryException("Unknown field, possibly a rename, try again."); + } + }; + + if (do_boosting) { + tryWithApiBoosting(load_json, message); + } else { + load_json(message, MessageVersion::LATEST_VERSION); + } } void MessageUtil::loadFromJson(const std::string& json, ProtobufWkt::Struct& message) { @@ -316,11 +319,12 @@ void MessageUtil::loadFromJson(const std::string& json, ProtobufWkt::Struct& mes } void MessageUtil::loadFromYaml(const std::string& yaml, Protobuf::Message& message, - ProtobufMessage::ValidationVisitor& validation_visitor) { + ProtobufMessage::ValidationVisitor& validation_visitor, + bool do_boosting) { ProtobufWkt::Value value = ValueUtil::loadFromYaml(yaml); if (value.kind_case() == ProtobufWkt::Value::kStructValue || value.kind_case() == ProtobufWkt::Value::kListValue) { - jsonConvertInternal(value, validation_visitor, message); + jsonConvertInternal(value, validation_visitor, message, do_boosting); return; } throw EnvoyException("Unable to convert YAML as JSON: " + yaml); @@ -334,50 +338,74 @@ void MessageUtil::loadFromYaml(const std::string& yaml, ProtobufWkt::Struct& mes void MessageUtil::loadFromFile(const std::string& path, Protobuf::Message& message, ProtobufMessage::ValidationVisitor& validation_visitor, - Api::Api& api) { + Api::Api& api, bool do_boosting) { const std::string contents = api.fileSystem().fileReadToEnd(path); // If the filename ends with .pb, attempt to parse it as a binary proto. if (absl::EndsWith(path, FileExtensions::get().ProtoBinary)) { // Attempt to parse the binary format. - if (message.ParseFromString(contents)) { - MessageUtil::checkForUnexpectedFields(message, validation_visitor); - return; + auto read_proto_binary = [&contents, &validation_visitor](Protobuf::Message& message, + MessageVersion message_version) { + try { + if (message.ParseFromString(contents)) { + MessageUtil::checkForUnexpectedFields(message, validation_visitor); + } + return; + } catch (EnvoyException& ex) { + if (message_version == MessageVersion::LATEST_VERSION) { + // Failed reading the latest version - pass the same error upwards + throw ex; + } + } + throw ApiBoostRetryException( + "Failed to parse at earlier version, trying again at later version."); + }; + + if (do_boosting) { + // Attempts to read as the previous version and upgrade, and if it fails + // attempts to read as latest version. + tryWithApiBoosting(read_proto_binary, message); + } else { + read_proto_binary(message, MessageVersion::LATEST_VERSION); } - throw EnvoyException("Unable to parse file \"" + path + "\" as a binary protobuf (type " + - message.GetTypeName() + ")"); + return; } + // If the filename ends with .pb_text, attempt to parse it as a text proto. if (absl::EndsWith(path, FileExtensions::get().ProtoText)) { - tryWithApiBoosting( - [&contents, &path](Protobuf::Message& message, MessageVersion message_version) { - if (Protobuf::TextFormat::ParseFromString(contents, &message)) { - return; - } - if (message_version == MessageVersion::LATEST_VERSION) { - throw EnvoyException("Unable to parse file \"" + path + "\" as a text protobuf (type " + - message.GetTypeName() + ")"); - } else { - throw ApiBoostRetryException( - "Failed to parse at earlier version, trying again at later version."); - } - }, - message); + auto read_proto_text = [&contents, &path](Protobuf::Message& message, + MessageVersion message_version) { + if (Protobuf::TextFormat::ParseFromString(contents, &message)) { + return; + } + if (message_version == MessageVersion::LATEST_VERSION) { + throw EnvoyException("Unable to parse file \"" + path + "\" as a text protobuf (type " + + message.GetTypeName() + ")"); + } else { + throw ApiBoostRetryException( + "Failed to parse at earlier version, trying again at later version."); + } + }; + + if (do_boosting) { + tryWithApiBoosting(read_proto_text, message); + } else { + read_proto_text(message, MessageVersion::LATEST_VERSION); + } return; } if (absl::EndsWith(path, FileExtensions::get().Yaml)) { - loadFromYaml(contents, message, validation_visitor); + loadFromYaml(contents, message, validation_visitor, do_boosting); } else { - loadFromJson(contents, message, validation_visitor); + loadFromJson(contents, message, validation_visitor, do_boosting); } } namespace { -void checkForDeprecatedNonRepeatedEnumValue(const Protobuf::Message& message, - absl::string_view filename, - const Protobuf::FieldDescriptor* field, - const Protobuf::Reflection* reflection, - Runtime::Loader* runtime) { +void checkForDeprecatedNonRepeatedEnumValue( + const Protobuf::Message& message, absl::string_view filename, + const Protobuf::FieldDescriptor* field, const Protobuf::Reflection* reflection, + Runtime::Loader* runtime, ProtobufMessage::ValidationVisitor& validation_visitor) { // Repeated fields will be handled by recursion in checkForUnexpectedFields. if (field->is_repeated() || field->cpp_type() != Protobuf::FieldDescriptor::CPPTYPE_ENUM) { return; @@ -395,13 +423,12 @@ void checkForDeprecatedNonRepeatedEnumValue(const Protobuf::Message& message, enum_value_descriptor->name(), " for enum '", field->full_name(), "' from file ", filename, ". This enum value will be removed from Envoy soon", (default_value ? " so a non-default value must now be explicitly set" : ""), - ". Please see https://www.envoyproxy.io/docs/envoy/latest/intro/deprecated " - "for details."); + ". Please see " ENVOY_DOC_URL_VERSION_HISTORY " for details."); deprecatedFieldHelper( runtime, true /*deprecated*/, enum_value_descriptor->options().GetExtension(envoy::annotations::disallowed_by_default_enum), absl::StrCat("envoy.deprecated_features:", enum_value_descriptor->full_name()), error, - message); + message, validation_visitor); } class UnexpectedFieldProtoVisitor : public ProtobufMessage::ConstProtoVisitor { @@ -417,7 +444,8 @@ class UnexpectedFieldProtoVisitor : public ProtobufMessage::ConstProtoVisitor { // Before we check to see if the field is in use, see if there's a // deprecated default enum value. - checkForDeprecatedNonRepeatedEnumValue(message, filename, &field, reflection, runtime_); + checkForDeprecatedNonRepeatedEnumValue(message, filename, &field, reflection, runtime_, + validation_visitor_); // If this field is not in use, continue. if ((field.is_repeated() && reflection->FieldSize(message, &field) == 0) || @@ -427,16 +455,30 @@ class UnexpectedFieldProtoVisitor : public ProtobufMessage::ConstProtoVisitor { // If this field is deprecated, warn or throw an error. if (field.options().deprecated()) { - const std::string warning = absl::StrCat( - "Using {}deprecated option '", field.full_name(), "' from file ", filename, - ". This configuration will be removed from " - "Envoy soon. Please see https://www.envoyproxy.io/docs/envoy/latest/intro/deprecated " - "for details."); + if (absl::StartsWith(field.name(), Config::VersionUtil::DeprecatedFieldShadowPrefix)) { + // The field was marked as hidden_envoy_deprecated and an error must be thrown, + // unless it is part of an explicit test that needs access to the deprecated field + // when we enable runtime deprecation override to allow point field overrides for tests. + if (!runtime_ || + !runtime_->snapshot().deprecatedFeatureEnabled( + absl::StrCat("envoy.deprecated_features:", field.full_name()), false)) { + const std::string fatal_error = absl::StrCat( + "Illegal use of hidden_envoy_deprecated_ V2 field '", field.full_name(), + "' from file ", filename, + " while using the latest V3 configuration. This field has been removed from the " + "current Envoy API. Please see " ENVOY_DOC_URL_VERSION_HISTORY " for details."); + throw ProtoValidationException(fatal_error, message); + } + } + const std::string warning = + absl::StrCat("Using {}deprecated option '", field.full_name(), "' from file ", filename, + ". This configuration will be removed from " + "Envoy soon. Please see " ENVOY_DOC_URL_VERSION_HISTORY " for details."); deprecatedFieldHelper(runtime_, true /*deprecated*/, field.options().GetExtension(envoy::annotations::disallowed_by_default), absl::StrCat("envoy.deprecated_features:", field.full_name()), warning, - message); + message, validation_visitor_); } return nullptr; } @@ -535,16 +577,18 @@ void MessageUtil::unpackTo(const ProtobufWkt::Any& any_message, Protobuf::Messag Config::ApiTypeOracle::getEarlierVersionDescriptor(message.GetDescriptor()->full_name()); // If the earlier version matches, unpack and upgrade. if (earlier_version_desc != nullptr && any_full_name == earlier_version_desc->full_name()) { - Protobuf::DynamicMessageFactory dmf; - auto earlier_message = - ProtobufTypes::MessagePtr(dmf.GetPrototype(earlier_version_desc)->New()); - ASSERT(earlier_message != nullptr); - if (!any_message.UnpackTo(earlier_message.get())) { + // Take the Any message but adjust its type URL, since earlier/later versions are wire + // compatible. + ProtobufWkt::Any any_message_with_fixup; + any_message_with_fixup.MergeFrom(any_message); + any_message_with_fixup.set_type_url("type.googleapis.com/" + + message.GetDescriptor()->full_name()); + if (!any_message_with_fixup.UnpackTo(&message)) { throw EnvoyException(fmt::format("Unable to unpack as {}: {}", - earlier_message->GetDescriptor()->full_name(), - any_message.DebugString())); + earlier_version_desc->full_name(), + any_message_with_fixup.DebugString())); } - Config::VersionConverter::upgrade(*earlier_message, message); + Config::VersionConverter::annotateWithOriginalType(*earlier_version_desc, message); return; } } diff --git a/source/common/protobuf/utility.h b/source/common/protobuf/utility.h index b2d4b828be1ac..dc2ec54d1863d 100644 --- a/source/common/protobuf/utility.h +++ b/source/common/protobuf/utility.h @@ -85,7 +85,7 @@ uint64_t fractionalPercentDenominatorToInt( } // namespace ProtobufPercentHelper } // namespace Envoy -// Convert an envoy::api::v2::core::Percent to a double or a default. +// Convert an envoy::type::v3::Percent to a double or a default. // @param message supplies the proto message containing the field. // @param field_name supplies the field name in the message. // @param default_value supplies the default if the field is not present. @@ -94,7 +94,7 @@ uint64_t fractionalPercentDenominatorToInt( ? (message).has_##field_name() ? (message).field_name().value() : default_value \ : throw EnvoyException(fmt::format("Value not in the range of 0..100 range."))) -// Convert an envoy::api::v2::core::Percent to a rounded integer or a default. +// Convert an envoy::type::v3::Percent to a rounded integer or a default. // @param message supplies the proto message containing the field. // @param field_name supplies the field name in the message. // @param max_value supplies the maximum allowed integral value (e.g., 100, 10000, etc.). @@ -216,13 +216,16 @@ class MessageUtil { static std::size_t hash(const Protobuf::Message& message); static void loadFromJson(const std::string& json, Protobuf::Message& message, - ProtobufMessage::ValidationVisitor& validation_visitor); + ProtobufMessage::ValidationVisitor& validation_visitor, + bool do_boosting = true); static void loadFromJson(const std::string& json, ProtobufWkt::Struct& message); static void loadFromYaml(const std::string& yaml, Protobuf::Message& message, - ProtobufMessage::ValidationVisitor& validation_visitor); + ProtobufMessage::ValidationVisitor& validation_visitor, + bool do_boosting = true); static void loadFromYaml(const std::string& yaml, ProtobufWkt::Struct& message); static void loadFromFile(const std::string& path, Protobuf::Message& message, - ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api); + ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api, + bool do_boosting = true); /** * Checks for use of deprecated fields in message and all sub-messages. @@ -247,7 +250,9 @@ class MessageUtil { static void validate(const MessageType& message, ProtobufMessage::ValidationVisitor& validation_visitor) { // Log warnings or throw errors if deprecated fields or unknown fields are in use. - checkForUnexpectedFields(message, validation_visitor); + if (!validation_visitor.skipValidation()) { + checkForUnexpectedFields(message, validation_visitor); + } std::string err; if (!Validate(message, &err)) { @@ -257,8 +262,9 @@ class MessageUtil { template static void loadFromYamlAndValidate(const std::string& yaml, MessageType& message, - ProtobufMessage::ValidationVisitor& validation_visitor) { - loadFromYaml(yaml, message, validation_visitor); + ProtobufMessage::ValidationVisitor& validation_visitor, + bool avoid_boosting = false) { + loadFromYaml(yaml, message, validation_visitor, !avoid_boosting); validate(message, validation_visitor); } @@ -296,10 +302,15 @@ class MessageUtil { * * @return MessageType the typed message inside the Any. */ + template + static inline void anyConvert(const ProtobufWkt::Any& message, MessageType& typed_message) { + unpackTo(message, typed_message); + }; + template static inline MessageType anyConvert(const ProtobufWkt::Any& message) { MessageType typed_message; - unpackTo(message, typed_message); + anyConvert(message, typed_message); return typed_message; }; @@ -310,15 +321,39 @@ class MessageUtil { * @return MessageType the typed message inside the Any. * @throw ProtoValidationException if the message does not satisfy its type constraints. */ + template + static inline void anyConvertAndValidate(const ProtobufWkt::Any& message, + MessageType& typed_message, + ProtobufMessage::ValidationVisitor& validation_visitor) { + anyConvert(message, typed_message); + validate(typed_message, validation_visitor); + }; + template static inline MessageType anyConvertAndValidate(const ProtobufWkt::Any& message, ProtobufMessage::ValidationVisitor& validation_visitor) { - MessageType typed_message = anyConvert(message); - validate(typed_message, validation_visitor); + MessageType typed_message; + anyConvertAndValidate(message, typed_message, validation_visitor); return typed_message; }; + /** + * Obtain a string field from a protobuf message dynamically. + * + * @param message message to extract from. + * @param field_name field name. + * + * @return std::string with field value. + */ + static inline std::string getStringField(const Protobuf::Message& message, + const std::string& field_name) { + const Protobuf::Descriptor* descriptor = message.GetDescriptor(); + const Protobuf::FieldDescriptor* name_field = descriptor->FindFieldByName(field_name); + const Protobuf::Reflection* reflection = message.GetReflection(); + return reflection->GetString(message, name_field); + } + /** * Convert between two protobufs via a JSON round-trip. This is used to translate arbitrary * messages to/from google.protobuf.Struct. diff --git a/source/common/protobuf/well_known.h b/source/common/protobuf/well_known.h index 86905f3b63c0d..dcd2a9a82b4be 100644 --- a/source/common/protobuf/well_known.h +++ b/source/common/protobuf/well_known.h @@ -1,5 +1,7 @@ #pragma once +#include + namespace Envoy { namespace ProtobufWellKnown { diff --git a/source/common/router/BUILD b/source/common/router/BUILD index 2786beb3bccc5..5f9e2b8cbc96f 100644 --- a/source/common/router/BUILD +++ b/source/common/router/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( @@ -252,6 +252,7 @@ envoy_cc_library( "//source/common/http:header_utility_lib", "//source/common/http:headers_lib", "//source/common/http:utility_lib", + "//source/common/runtime:runtime_features_lib", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", ], ) @@ -289,6 +290,7 @@ envoy_cc_library( "//source/common/access_log:access_log_lib", "//source/common/buffer:watermark_buffer_lib", "//source/common/common:assert_lib", + "//source/common/common:cleanup_lib", "//source/common/common:empty_string", "//source/common/common:enum_to_int", "//source/common/common:hash_lib", @@ -302,6 +304,7 @@ envoy_cc_library( "//source/common/http:header_map_lib", "//source/common/http:headers_lib", "//source/common/http:message_lib", + "//source/common/http:url_utility_lib", "//source/common/http:utility_lib", "//source/common/network:application_protocol_lib", "//source/common/network:transport_socket_options_lib", @@ -309,6 +312,7 @@ envoy_cc_library( "//source/common/stream_info:uint32_accessor_lib", "//source/common/tracing:http_tracer_lib", "//source/common/upstream:load_balancer_lib", + "//source/extensions/common/proxy_protocol:proxy_protocol_header_lib", "@envoy_api//envoy/extensions/filters/http/router/v3:pkg_cc_proto", ], ) @@ -323,8 +327,10 @@ envoy_cc_library( "//include/envoy/router:router_ratelimit_interface", "//source/common/common:assert_lib", "//source/common/common:empty_string", + "//source/common/config:metadata_lib", "//source/common/http:header_utility_lib", "//source/common/protobuf:utility_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", ], ) @@ -363,10 +369,10 @@ envoy_cc_library( "//include/envoy/router:string_accessor_interface", "//include/envoy/stream_info:filter_state_interface", "//include/envoy/stream_info:stream_info_interface", - "//source/common/access_log:access_log_formatter_lib", "//source/common/common:minimal_logger_lib", "//source/common/common:utility_lib", "//source/common/config:metadata_lib", + "//source/common/formatter:substitution_formatter_lib", "//source/common/http:header_map_lib", "//source/common/json:json_loader_lib", ], diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index 801cc0e00ad43..73a2de20e6dd0 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -34,6 +34,7 @@ #include "common/protobuf/protobuf.h" #include "common/protobuf/utility.h" #include "common/router/retry_state_impl.h" +#include "common/runtime/runtime_features.h" #include "common/tracing/http_tracer_impl.h" #include "extensions/filters/http/common/utility.h" @@ -45,18 +46,6 @@ namespace Envoy { namespace Router { namespace { -InternalRedirectAction -convertInternalRedirectAction(const envoy::config::route::v3::RouteAction& route) { - switch (route.internal_redirect_action()) { - case envoy::config::route::v3::RouteAction::PASS_THROUGH_INTERNAL_REDIRECT: - return InternalRedirectAction::PassThrough; - case envoy::config::route::v3::RouteAction::HANDLE_INTERNAL_REDIRECT: - return InternalRedirectAction::Handle; - default: - return InternalRedirectAction::PassThrough; - } -} - const std::string DEPRECATED_ROUTER_NAME = "envoy.router"; } // namespace @@ -152,6 +141,49 @@ Upstream::RetryPrioritySharedPtr RetryPolicyImpl::retryPriority() const { *validation_visitor_, num_retries_); } +InternalRedirectPolicyImpl::InternalRedirectPolicyImpl( + const envoy::config::route::v3::InternalRedirectPolicy& policy_config, + ProtobufMessage::ValidationVisitor& validator, absl::string_view current_route_name) + : current_route_name_(current_route_name), + redirect_response_codes_(buildRedirectResponseCodes(policy_config)), + max_internal_redirects_( + PROTOBUF_GET_WRAPPED_OR_DEFAULT(policy_config, max_internal_redirects, 1)), + enabled_(true), allow_cross_scheme_redirect_(policy_config.allow_cross_scheme_redirect()) { + for (const auto& predicate : policy_config.predicates()) { + auto& factory = + Envoy::Config::Utility::getAndCheckFactory(predicate); + auto config = factory.createEmptyConfigProto(); + Envoy::Config::Utility::translateOpaqueConfig(predicate.typed_config(), {}, validator, *config); + predicate_factories_.emplace_back(&factory, std::move(config)); + } +} + +std::vector InternalRedirectPolicyImpl::predicates() const { + std::vector predicates; + for (const auto& predicate_factory : predicate_factories_) { + predicates.emplace_back(predicate_factory.first->createInternalRedirectPredicate( + *predicate_factory.second, current_route_name_)); + } + return predicates; +} + +absl::flat_hash_set InternalRedirectPolicyImpl::buildRedirectResponseCodes( + const envoy::config::route::v3::InternalRedirectPolicy& policy_config) const { + if (policy_config.redirect_response_codes_size() == 0) { + return absl::flat_hash_set{Http::Code::Found}; + } + absl::flat_hash_set ret; + std::for_each(policy_config.redirect_response_codes().begin(), + policy_config.redirect_response_codes().end(), [&ret](uint32_t response_code) { + const absl::flat_hash_set valid_redirect_response_code = {301, 302, 303, + 307, 308}; + if (valid_redirect_response_code.contains(response_code)) { + ret.insert(static_cast(response_code)); + } + }); + return ret; +} + CorsPolicyImpl::CorsPolicyImpl(const envoy::config::route::v3::CorsPolicy& config, Runtime::Loader& loader) : config_(config), loader_(loader), allow_methods_(config.allow_methods()), @@ -269,11 +301,16 @@ RouteEntryImplBase::RouteEntryImplBase(const VirtualHostImpl& vhost, ? ":" + std::to_string(route.redirect().port_redirect()) : ""), path_redirect_(route.redirect().path_redirect()), + path_redirect_has_query_(path_redirect_.find('?') != absl::string_view::npos), + enable_preserve_query_in_path_redirects_(Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.preserve_query_string_in_path_redirects")), https_redirect_(route.redirect().https_redirect()), prefix_rewrite_redirect_(route.redirect().prefix_rewrite()), strip_query_(route.redirect().strip_query()), hedge_policy_(buildHedgePolicy(vhost.hedgePolicy(), route.route())), retry_policy_(buildRetryPolicy(vhost.retryPolicy(), route.route(), validator)), + internal_redirect_policy_( + buildInternalRedirectPolicy(route.route(), validator, route.name())), rate_limit_policy_(route.route().rate_limits()), priority_(ConfigUtility::parsePriority(route.route().priority())), config_headers_(Http::HeaderUtility::buildHeaderDataVector(route.match().headers())), @@ -293,10 +330,7 @@ RouteEntryImplBase::RouteEntryImplBase(const VirtualHostImpl& vhost, per_filter_configs_(route.typed_per_filter_config(), route.hidden_envoy_deprecated_per_filter_config(), factory_context, validator), - route_name_(route.name()), time_source_(factory_context.dispatcher().timeSource()), - internal_redirect_action_(convertInternalRedirectAction(route.route())), - max_internal_redirects_( - PROTOBUF_GET_WRAPPED_OR_DEFAULT(route.route(), max_internal_redirects, 1)) { + route_name_(route.name()), time_source_(factory_context.dispatcher().timeSource()) { if (route.route().has_metadata_match()) { const auto filter_it = route.route().metadata_match().filter_metadata().find( Envoy::Config::MetadataFilters::get().ENVOY_LB); @@ -382,6 +416,12 @@ RouteEntryImplBase::RouteEntryImplBase(const VirtualHostImpl& vhost, if (!success) { throw EnvoyException(absl::StrCat("Duplicate upgrade ", upgrade_config.upgrade_type())); } + if (upgrade_config.upgrade_type() == Http::Headers::get().MethodValues.Connect) { + connect_config_ = upgrade_config.connect_config(); + } else if (upgrade_config.has_connect_config()) { + throw EnvoyException(absl::StrCat("Non-CONNECT upgrade type ", upgrade_config.upgrade_type(), + " has ConnectConfig")); + } } if (route.route().has_regex_rewrite()) { @@ -392,6 +432,13 @@ RouteEntryImplBase::RouteEntryImplBase(const VirtualHostImpl& vhost, regex_rewrite_ = Regex::Utility::parseRegex(rewrite_spec.pattern()); regex_rewrite_substitution_ = rewrite_spec.substitution(); } + + if (enable_preserve_query_in_path_redirects_ && path_redirect_has_query_ && strip_query_) { + ENVOY_LOG(warn, + "`strip_query` is set to true, but `path_redirect` contains query string and it will " + "not be stripped: {}", + path_redirect_); + } } bool RouteEntryImplBase::evaluateRuntimeMatch(const uint64_t random_value) const { @@ -430,12 +477,6 @@ bool RouteEntryImplBase::matchRoute(const Http::RequestHeaderMap& headers, uint64_t random_value) const { bool matches = true; - // TODO(mattklein123): Currently all match types require a path header. When we support CONNECT - // we will need to figure out how to safely relax this. - if (headers.Path() == nullptr) { - return false; - } - matches &= evaluateRuntimeMatch(random_value); if (!matches) { // No need to waste further cycles calculating a route match. @@ -443,13 +484,13 @@ bool RouteEntryImplBase::matchRoute(const Http::RequestHeaderMap& headers, } if (match_grpc_) { - matches &= Grpc::Common::hasGrpcContentType(headers); + matches &= Grpc::Common::isGrpcRequestHeaders(headers); } matches &= Http::HeaderUtility::matchHeaders(headers, config_headers_); if (!config_query_parameters_.empty()) { Http::Utility::QueryParams query_parameters = - Http::Utility::parseQueryString(headers.Path()->value().getStringView()); + Http::Utility::parseQueryString(headers.getPathValue()); matches &= ConfigUtility::matchQueryParams(query_parameters, config_query_parameters_); } @@ -540,7 +581,8 @@ void RouteEntryImplBase::finalizePathHeader(Http::RequestHeaderMap& headers, return; } - std::string path(headers.Path()->value().getStringView()); + // TODO(perf): can we avoid the string copy for the common case? + std::string path(headers.getPathValue()); if (insert_envoy_original_path) { headers.setEnvoyOriginalPath(path); } @@ -565,7 +607,7 @@ absl::string_view RouteEntryImplBase::processRequestHost(const Http::RequestHead absl::string_view new_scheme, absl::string_view new_port) const { - absl::string_view request_host = headers.Host()->value().getStringView(); + absl::string_view request_host = headers.getHostValue(); size_t host_end; if (request_host.empty()) { return request_host; @@ -582,7 +624,7 @@ absl::string_view RouteEntryImplBase::processRequestHost(const Http::RequestHead if (host_end != absl::string_view::npos) { absl::string_view request_port = request_host.substr(host_end); - absl::string_view request_protocol = headers.ForwardedProto()->value().getStringView(); + absl::string_view request_protocol = headers.getForwardedProtoValue(); bool remove_port = !new_port.empty(); if (new_scheme != request_protocol) { @@ -614,7 +656,7 @@ std::string RouteEntryImplBase::newPath(const Http::RequestHeaderMap& headers) c final_scheme = Http::Headers::get().SchemeValues.Https; } else { ASSERT(headers.ForwardedProto()); - final_scheme = headers.ForwardedProto()->value().getStringView(); + final_scheme = headers.getForwardedProtoValue(); } if (!port_redirect_.empty()) { @@ -630,17 +672,46 @@ std::string RouteEntryImplBase::newPath(const Http::RequestHeaderMap& headers) c final_host = processRequestHost(headers, final_scheme, final_port); } - if (!path_redirect_.empty()) { - final_path = path_redirect_.c_str(); - } else { - ASSERT(headers.Path()); - final_path = headers.Path()->value().getStringView(); - if (strip_query_) { - size_t path_end = final_path.find("?"); + std::string final_path_value; + if (enable_preserve_query_in_path_redirects_) { + if (!path_redirect_.empty()) { + // The path_redirect query string, if any, takes precedence over the request's query string, + // and it will not be stripped regardless of `strip_query`. + if (path_redirect_has_query_) { + final_path = path_redirect_.c_str(); + } else { + const absl::string_view current_path = headers.getPathValue(); + const size_t path_end = current_path.find('?'); + const bool current_path_has_query = path_end != absl::string_view::npos; + if (current_path_has_query) { + final_path_value = path_redirect_; + final_path_value.append(current_path.data() + path_end, current_path.length() - path_end); + final_path = final_path_value; + } else { + final_path = path_redirect_.c_str(); + } + } + } else { + final_path = headers.getPathValue(); + } + if (!path_redirect_has_query_ && strip_query_) { + const size_t path_end = final_path.find('?'); if (path_end != absl::string_view::npos) { final_path = final_path.substr(0, path_end); } } + } else { + if (!path_redirect_.empty()) { + final_path = path_redirect_.c_str(); + } else { + final_path = headers.getPathValue(); + if (strip_query_) { + const size_t path_end = final_path.find("?"); + if (path_end != absl::string_view::npos) { + final_path = final_path.substr(0, path_end); + } + } + } } return fmt::format("{}://{}{}{}", final_scheme, final_host, final_port, final_path); @@ -707,6 +778,28 @@ RetryPolicyImpl RouteEntryImplBase::buildRetryPolicy( return RetryPolicyImpl(); } +InternalRedirectPolicyImpl RouteEntryImplBase::buildInternalRedirectPolicy( + const envoy::config::route::v3::RouteAction& route_config, + ProtobufMessage::ValidationVisitor& validator, absl::string_view current_route_name) const { + if (route_config.has_internal_redirect_policy()) { + return InternalRedirectPolicyImpl(route_config.internal_redirect_policy(), validator, + current_route_name); + } + envoy::config::route::v3::InternalRedirectPolicy policy_config; + switch (route_config.internal_redirect_action()) { + case envoy::config::route::v3::RouteAction::HANDLE_INTERNAL_REDIRECT: + break; + case envoy::config::route::v3::RouteAction::PASS_THROUGH_INTERNAL_REDIRECT: + FALLTHRU; + default: + return InternalRedirectPolicyImpl(); + } + if (route_config.has_max_internal_redirects()) { + *policy_config.mutable_max_internal_redirects() = route_config.max_internal_redirects(); + } + return InternalRedirectPolicyImpl(policy_config, validator, current_route_name); +} + DecoratorConstPtr RouteEntryImplBase::parseDecorator(const envoy::config::route::v3::Route& route) { DecoratorConstPtr ret; if (route.has_decorator()) { @@ -852,7 +945,7 @@ RouteConstSharedPtr PrefixRouteEntryImpl::matches(const Http::RequestHeaderMap& const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const { if (RouteEntryImplBase::matchRoute(headers, stream_info, random_value) && - path_matcher_->match(headers.Path()->value().getStringView())) { + path_matcher_->match(headers.getPathValue())) { return clusterEntry(headers, random_value); } return nullptr; @@ -874,7 +967,7 @@ RouteConstSharedPtr PathRouteEntryImpl::matches(const Http::RequestHeaderMap& he const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const { if (RouteEntryImplBase::matchRoute(headers, stream_info, random_value) && - path_matcher_->match(headers.Path()->value().getStringView())) { + path_matcher_->match(headers.getPathValue())) { return clusterEntry(headers, random_value); } @@ -902,8 +995,7 @@ RegexRouteEntryImpl::RegexRouteEntryImpl( void RegexRouteEntryImpl::rewritePathHeader(Http::RequestHeaderMap& headers, bool insert_envoy_original_path) const { - const absl::string_view path = - Http::PathUtil::removeQueryAndFragment(headers.Path()->value().getStringView()); + const absl::string_view path = Http::PathUtil::removeQueryAndFragment(headers.getPathValue()); // TODO(yuval-k): This ASSERT can happen if the path was changed by a filter without clearing the // route cache. We should consider if ASSERT-ing is the desired behavior in this case. ASSERT(regex_->match(path)); @@ -914,8 +1006,7 @@ RouteConstSharedPtr RegexRouteEntryImpl::matches(const Http::RequestHeaderMap& h const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const { if (RouteEntryImplBase::matchRoute(headers, stream_info, random_value)) { - const absl::string_view path = - Http::PathUtil::removeQueryAndFragment(headers.Path()->value().getStringView()); + const absl::string_view path = Http::PathUtil::removeQueryAndFragment(headers.getPathValue()); if (regex_->match(path)) { return clusterEntry(headers, random_value); } @@ -923,6 +1014,27 @@ RouteConstSharedPtr RegexRouteEntryImpl::matches(const Http::RequestHeaderMap& h return nullptr; } +ConnectRouteEntryImpl::ConnectRouteEntryImpl( + const VirtualHostImpl& vhost, const envoy::config::route::v3::Route& route, + Server::Configuration::ServerFactoryContext& factory_context, + ProtobufMessage::ValidationVisitor& validator) + : RouteEntryImplBase(vhost, route, factory_context, validator) {} + +void ConnectRouteEntryImpl::rewritePathHeader(Http::RequestHeaderMap& headers, + bool insert_envoy_original_path) const { + const absl::string_view path = Http::PathUtil::removeQueryAndFragment(headers.getPathValue()); + finalizePathHeader(headers, path, insert_envoy_original_path); +} + +RouteConstSharedPtr ConnectRouteEntryImpl::matches(const Http::RequestHeaderMap& headers, + const StreamInfo::StreamInfo&, + uint64_t random_value) const { + if (Http::HeaderUtility::isConnect(headers)) { + return clusterEntry(headers, random_value); + } + return nullptr; +} + VirtualHostImpl::VirtualHostImpl(const envoy::config::route::v3::VirtualHost& virtual_host, const ConfigImpl& global_route_config, Server::Configuration::ServerFactoryContext& factory_context, @@ -982,6 +1094,10 @@ VirtualHostImpl::VirtualHostImpl(const envoy::config::route::v3::VirtualHost& vi routes_.emplace_back(new RegexRouteEntryImpl(*this, route, factory_context, validator)); break; } + case envoy::config::route::v3::RouteMatch::PathSpecifierCase::kConnectMatcher: { + routes_.emplace_back(new ConnectRouteEntryImpl(*this, route, factory_context, validator)); + break; + } case envoy::config::route::v3::RouteMatch::PathSpecifierCase::PATH_SPECIFIER_NOT_SET: NOT_REACHED_GCOVR_EXCL_LINE; } @@ -1081,7 +1197,8 @@ RouteMatcher::RouteMatcher(const envoy::config::route::v3::RouteConfiguration& r bool duplicate_found = false; if ("*" == domain) { if (default_virtual_host_) { - throw EnvoyException(fmt::format("Only a single wildcard domain is permitted")); + throw EnvoyException(fmt::format("Only a single wildcard domain is permitted in route {}", + route_config.name())); } default_virtual_host_ = virtual_host; } else if (!domain.empty() && '*' == domain[0]) { @@ -1096,14 +1213,16 @@ RouteMatcher::RouteMatcher(const envoy::config::route::v3::RouteConfiguration& r duplicate_found = !virtual_hosts_.emplace(domain, virtual_host).second; } if (duplicate_found) { - throw EnvoyException(fmt::format( - "Only unique values for domains are permitted. Duplicate entry of domain {}", domain)); + throw EnvoyException(fmt::format("Only unique values for domains are permitted. Duplicate " + "entry of domain {} in route {}", + domain, route_config.name())); } } } } -RouteConstSharedPtr VirtualHostImpl::getRouteFromEntries(const Http::RequestHeaderMap& headers, +RouteConstSharedPtr VirtualHostImpl::getRouteFromEntries(const RouteCallback& cb, + const Http::RequestHeaderMap& headers, const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const { // No x-forwarded-proto header. This normally only happens when ActiveStream::decodeHeaders @@ -1123,11 +1242,32 @@ RouteConstSharedPtr VirtualHostImpl::getRouteFromEntries(const Http::RequestHead } // Check for a route that matches the request. - for (const RouteEntryImplBaseConstSharedPtr& route : routes_) { - RouteConstSharedPtr route_entry = route->matches(headers, stream_info, random_value); - if (nullptr != route_entry) { - return route_entry; + for (auto route = routes_.begin(); route != routes_.end(); ++route) { + if (!headers.Path() && !(*route)->supportsPathlessHeaders()) { + continue; + } + + RouteConstSharedPtr route_entry = (*route)->matches(headers, stream_info, random_value); + if (nullptr == route_entry) { + continue; } + + if (cb) { + RouteEvalStatus eval_status = (std::next(route) == routes_.end()) + ? RouteEvalStatus::NoMoreRoutes + : RouteEvalStatus::HasMoreRoutes; + RouteMatchStatus match_status = cb(route_entry, eval_status); + if (match_status == RouteMatchStatus::Accept) { + return route_entry; + } + if (match_status == RouteMatchStatus::Continue && + eval_status == RouteEvalStatus::NoMoreRoutes) { + return nullptr; + } + continue; + } + + return route_entry; } return nullptr; @@ -1147,8 +1287,8 @@ const VirtualHostImpl* RouteMatcher::findVirtualHost(const Http::RequestHeaderMa // TODO (@rshriram) Match Origin header in WebSocket // request with VHost, using wildcard match - const std::string host = - Http::LowerCaseString(std::string(headers.Host()->value().getStringView())).get(); + // Lower-case the value of the host header, as hostnames are case insensitive. + const std::string host = absl::AsciiStrToLower(headers.getHostValue()); const auto& iter = virtual_hosts_.find(host); if (iter != virtual_hosts_.end()) { return iter->second.get(); @@ -1172,12 +1312,14 @@ const VirtualHostImpl* RouteMatcher::findVirtualHost(const Http::RequestHeaderMa return default_virtual_host_.get(); } -RouteConstSharedPtr RouteMatcher::route(const Http::RequestHeaderMap& headers, +RouteConstSharedPtr RouteMatcher::route(const RouteCallback& cb, + const Http::RequestHeaderMap& headers, const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const { + const VirtualHostImpl* virtual_host = findVirtualHost(headers); if (virtual_host) { - return virtual_host->getRouteFromEntries(headers, stream_info, random_value); + return virtual_host->getRouteFromEntries(cb, headers, stream_info, random_value); } else { return nullptr; } @@ -1223,6 +1365,13 @@ ConfigImpl::ConfigImpl(const envoy::config::route::v3::RouteConfiguration& confi config.response_headers_to_remove()); } +RouteConstSharedPtr ConfigImpl::route(const RouteCallback& cb, + const Http::RequestHeaderMap& headers, + const StreamInfo::StreamInfo& stream_info, + uint64_t random_value) const { + return route_matcher_->route(cb, headers, stream_info, random_value); +} + namespace { RouteSpecificFilterConfigConstSharedPtr diff --git a/source/common/router/config_impl.h b/source/common/router/config_impl.h index 89add8a903e55..a32d19fbe7427 100644 --- a/source/common/router/config_impl.h +++ b/source/common/router/config_impl.h @@ -2,12 +2,12 @@ #include #include +#include #include #include #include #include #include -#include #include #include "envoy/config/core/v3/base.pb.h" @@ -31,6 +31,7 @@ #include "common/router/tls_context_match_criteria_impl.h" #include "common/stats/symbol_table_impl.h" +#include "absl/container/node_hash_map.h" #include "absl/types/optional.h" namespace Envoy { @@ -53,6 +54,9 @@ class Matchable { virtual RouteConstSharedPtr matches(const Http::RequestHeaderMap& headers, const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const PURE; + + // By default, matchers do not support null Path headers. + virtual bool supportsPathlessHeaders() const { return false; } }; class PerFilterConfigs { @@ -65,7 +69,7 @@ class PerFilterConfigs { const RouteSpecificFilterConfig* get(const std::string& name) const; private: - std::unordered_map configs_; + absl::node_hash_map configs_; }; class RouteEntryImplBase; @@ -160,7 +164,8 @@ class VirtualHostImpl : public VirtualHost { Server::Configuration::ServerFactoryContext& factory_context, Stats::Scope& scope, ProtobufMessage::ValidationVisitor& validator, bool validate_clusters); - RouteConstSharedPtr getRouteFromEntries(const Http::RequestHeaderMap& headers, + RouteConstSharedPtr getRouteFromEntries(const RouteCallback& cb, + const Http::RequestHeaderMap& headers, const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const; const VirtualCluster* virtualClusterFromEntries(const Http::HeaderMap& headers) const; @@ -381,6 +386,46 @@ class RouteTracingImpl : public RouteTracing { Tracing::CustomTagMap custom_tags_; }; +/** + * Implementation of InternalRedirectPolicy that reads from the proto + * InternalRedirectPolicy of the RouteAction. + */ +class InternalRedirectPolicyImpl : public InternalRedirectPolicy { +public: + // Constructor that enables internal redirect with policy_config controlling the configurable + // behaviors. + explicit InternalRedirectPolicyImpl( + const envoy::config::route::v3::InternalRedirectPolicy& policy_config, + ProtobufMessage::ValidationVisitor& validator, absl::string_view current_route_name); + // Default constructor that disables internal redirect. + InternalRedirectPolicyImpl() = default; + + bool enabled() const override { return enabled_; } + + bool shouldRedirectForResponseCode(const Http::Code& response_code) const override { + return redirect_response_codes_.contains(response_code); + } + + std::vector predicates() const override; + + uint32_t maxInternalRedirects() const override { return max_internal_redirects_; } + + bool isCrossSchemeRedirectAllowed() const override { return allow_cross_scheme_redirect_; } + +private: + absl::flat_hash_set buildRedirectResponseCodes( + const envoy::config::route::v3::InternalRedirectPolicy& policy_config) const; + + const std::string current_route_name_; + const absl::flat_hash_set redirect_response_codes_; + const uint32_t max_internal_redirects_{1}; + const bool enabled_{false}; + const bool allow_cross_scheme_redirect_{false}; + + std::vector> + predicate_factories_; +}; + /** * Base implementation for all route entries. */ @@ -437,6 +482,9 @@ class RouteEntryImplBase : public RouteEntry, Upstream::ResourcePriority priority() const override { return priority_; } const RateLimitPolicy& rateLimitPolicy() const override { return rate_limit_policy_; } const RetryPolicy& retryPolicy() const override { return retry_policy_; } + const InternalRedirectPolicy& internalRedirectPolicy() const override { + return internal_redirect_policy_; + } uint32_t retryShadowBufferLimit() const override { return retry_shadow_buffer_limit_; } const std::vector& shadowPolicies() const override { return shadow_policies_; } const VirtualCluster* virtualCluster(const Http::HeaderMap& headers) const override { @@ -465,11 +513,8 @@ class RouteEntryImplBase : public RouteEntry, bool includeAttemptCountInResponse() const override { return vhost_.includeAttemptCountInResponse(); } + const absl::optional& connectConfig() const override { return connect_config_; } const UpgradeMap& upgradeMap() const override { return upgrade_map_; } - InternalRedirectAction internalRedirectAction() const override { - return internal_redirect_action_; - } - uint32_t maxInternalRedirects() const override { return max_internal_redirects_; } // Router::DirectResponseEntry std::string newPath(const Http::RequestHeaderMap& headers) const override; @@ -494,6 +539,7 @@ class RouteEntryImplBase : public RouteEntry, std::string regex_rewrite_substitution_; const std::string host_rewrite_; bool include_vh_rate_limits_; + absl::optional connect_config_; RouteConstSharedPtr clusterEntry(const Http::HeaderMap& headers, uint64_t random_value) const; @@ -541,6 +587,9 @@ class RouteEntryImplBase : public RouteEntry, Upstream::ResourcePriority priority() const override { return parent_->priority(); } const RateLimitPolicy& rateLimitPolicy() const override { return parent_->rateLimitPolicy(); } const RetryPolicy& retryPolicy() const override { return parent_->retryPolicy(); } + const InternalRedirectPolicy& internalRedirectPolicy() const override { + return parent_->internalRedirectPolicy(); + } uint32_t retryShadowBufferLimit() const override { return parent_->retryShadowBufferLimit(); } const std::vector& shadowPolicies() const override { return parent_->shadowPolicies(); @@ -553,7 +602,7 @@ class RouteEntryImplBase : public RouteEntry, return parent_->maxGrpcTimeout(); } absl::optional grpcTimeoutOffset() const override { - return parent_->maxGrpcTimeout(); + return parent_->grpcTimeoutOffset(); } const MetadataMatchCriteria* metadataMatchCriteria() const override { return parent_->metadataMatchCriteria(); @@ -591,11 +640,10 @@ class RouteEntryImplBase : public RouteEntry, bool includeAttemptCountInResponse() const override { return parent_->includeAttemptCountInResponse(); } - const UpgradeMap& upgradeMap() const override { return parent_->upgradeMap(); } - InternalRedirectAction internalRedirectAction() const override { - return parent_->internalRedirectAction(); + const absl::optional& connectConfig() const override { + return parent_->connectConfig(); } - uint32_t maxInternalRedirects() const override { return parent_->maxInternalRedirects(); } + const UpgradeMap& upgradeMap() const override { return parent_->upgradeMap(); } // Router::Route const DirectResponseEntry* directResponseEntry() const override { return nullptr; } @@ -684,6 +732,11 @@ class RouteEntryImplBase : public RouteEntry, const envoy::config::route::v3::RouteAction& route_config, ProtobufMessage::ValidationVisitor& validation_visitor) const; + InternalRedirectPolicyImpl + buildInternalRedirectPolicy(const envoy::config::route::v3::RouteAction& route_config, + ProtobufMessage::ValidationVisitor& validator, + absl::string_view current_route_name) const; + // Default timeout is 15s if nothing is specified in the route config. static const uint64_t DEFAULT_ROUTE_TIMEOUT_MS = 15000; @@ -705,11 +758,14 @@ class RouteEntryImplBase : public RouteEntry, const std::string host_redirect_; const std::string port_redirect_; const std::string path_redirect_; + const bool path_redirect_has_query_; + const bool enable_preserve_query_in_path_redirects_; const bool https_redirect_; const std::string prefix_rewrite_redirect_; const bool strip_query_; const HedgePolicyImpl hedge_policy_; const RetryPolicyImpl retry_policy_; + const InternalRedirectPolicyImpl internal_redirect_policy_; const RateLimitPolicyImpl rate_limit_policy_; std::vector shadow_policies_; const Upstream::ResourcePriority priority_; @@ -739,8 +795,6 @@ class RouteEntryImplBase : public RouteEntry, PerFilterConfigs per_filter_configs_; const std::string route_name_; TimeSource& time_source_; - InternalRedirectAction internal_redirect_action_; - uint32_t max_internal_redirects_{1}; }; /** @@ -824,6 +878,29 @@ class RegexRouteEntryImpl : public RouteEntryImplBase { std::string regex_str_; }; +/** + * Route entry implementation for CONNECT requests. + */ +class ConnectRouteEntryImpl : public RouteEntryImplBase { +public: + ConnectRouteEntryImpl(const VirtualHostImpl& vhost, const envoy::config::route::v3::Route& route, + Server::Configuration::ServerFactoryContext& factory_context, + ProtobufMessage::ValidationVisitor& validator); + + // Router::PathMatchCriterion + const std::string& matcher() const override { return EMPTY_STRING; } + PathMatchType matchType() const override { return PathMatchType::None; } + + // Router::Matchable + RouteConstSharedPtr matches(const Http::RequestHeaderMap& headers, + const StreamInfo::StreamInfo& stream_info, + uint64_t random_value) const override; + + // Router::DirectResponseEntry + void rewritePathHeader(Http::RequestHeaderMap&, bool) const override; + + bool supportsPathlessHeaders() const override { return true; } +}; /** * Wraps the route configuration which matches an incoming request headers to a backend cluster. * This is split out mainly to help with unit testing. @@ -835,21 +912,21 @@ class RouteMatcher { Server::Configuration::ServerFactoryContext& factory_context, ProtobufMessage::ValidationVisitor& validator, bool validate_clusters); - RouteConstSharedPtr route(const Http::RequestHeaderMap& headers, + RouteConstSharedPtr route(const RouteCallback& cb, const Http::RequestHeaderMap& headers, const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const; const VirtualHostImpl* findVirtualHost(const Http::RequestHeaderMap& headers) const; private: using WildcardVirtualHosts = - std::map, std::greater<>>; + std::map, std::greater<>>; using SubstringFunction = std::function; const VirtualHostImpl* findWildcardVirtualHost(const std::string& host, const WildcardVirtualHosts& wildcard_virtual_hosts, SubstringFunction substring_function) const; Stats::ScopePtr vhost_scope_; - std::unordered_map virtual_hosts_; + absl::node_hash_map virtual_hosts_; // std::greater as a minor optimization to iterate from more to less specific // // A note on using an unordered_map versus a vector of (string, VirtualHostSharedPtr) pairs: @@ -885,9 +962,13 @@ class ConfigImpl : public Config { RouteConstSharedPtr route(const Http::RequestHeaderMap& headers, const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const override { - return route_matcher_->route(headers, stream_info, random_value); + return route(nullptr, headers, stream_info, random_value); } + RouteConstSharedPtr route(const RouteCallback& cb, const Http::RequestHeaderMap& headers, + const StreamInfo::StreamInfo& stream_info, + uint64_t random_value) const override; + const std::list& internalOnlyHeaders() const override { return internal_only_headers_; } @@ -922,6 +1003,11 @@ class NullConfigImpl : public Config { return nullptr; } + RouteConstSharedPtr route(const RouteCallback&, const Http::RequestHeaderMap&, + const StreamInfo::StreamInfo&, uint64_t) const override { + return nullptr; + } + const std::list& internalOnlyHeaders() const override { return internal_only_headers_; } diff --git a/source/common/router/header_formatter.cc b/source/common/router/header_formatter.cc index 88ac5741b6485..8e40e95b23eb7 100644 --- a/source/common/router/header_formatter.cc +++ b/source/common/router/header_formatter.cc @@ -4,11 +4,11 @@ #include "envoy/router/string_accessor.h" -#include "common/access_log/access_log_formatter.h" #include "common/common/fmt.h" #include "common/common/logger.h" #include "common/common/utility.h" #include "common/config/metadata.h" +#include "common/formatter/substitution_formatter.h" #include "common/http/header_map_impl.h" #include "common/json/json_loader.h" #include "common/stream_info/utility.h" @@ -222,7 +222,7 @@ StreamInfoHeaderFormatter::StreamInfoHeaderFormatter(absl::string_view field_nam : append_(append) { if (field_name == "PROTOCOL") { field_extractor_ = [](const Envoy::StreamInfo::StreamInfo& stream_info) { - return Envoy::AccessLog::AccessLogFormatUtils::protocolToString(stream_info.protocol()); + return Envoy::Formatter::SubstitutionFormatUtils::protocolToString(stream_info.protocol()); }; } else if (field_name == "DOWNSTREAM_REMOTE_ADDRESS") { field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) { @@ -288,6 +288,11 @@ StreamInfoHeaderFormatter::StreamInfoHeaderFormatter(absl::string_view field_nam sslConnectionInfoStringHeaderExtractor([](const Ssl::ConnectionInfo& connection_info) { return connection_info.sha256PeerCertificateDigest(); }); + } else if (field_name == "DOWNSTREAM_PEER_FINGERPRINT_1") { + field_extractor_ = + sslConnectionInfoStringHeaderExtractor([](const Ssl::ConnectionInfo& connection_info) { + return connection_info.sha1PeerCertificateDigest(); + }); } else if (field_name == "DOWNSTREAM_PEER_SERIAL") { field_extractor_ = sslConnectionInfoStringHeaderExtractor([](const Ssl::ConnectionInfo& connection_info) { @@ -319,17 +324,17 @@ StreamInfoHeaderFormatter::StreamInfoHeaderFormatter(absl::string_view field_nam const std::string pattern = fmt::format("%{}%", field_name); if (start_time_formatters_.find(pattern) == start_time_formatters_.end()) { start_time_formatters_.emplace( - std::make_pair(pattern, AccessLog::AccessLogFormatParser::parse(pattern))); + std::make_pair(pattern, Formatter::SubstitutionFormatParser::parse(pattern))); } field_extractor_ = [this, pattern](const Envoy::StreamInfo::StreamInfo& stream_info) { const auto& formatters = start_time_formatters_.at(pattern); - static const Http::RequestHeaderMapImpl empty_request_headers; - static const Http::ResponseHeaderMapImpl empty_response_headers; - static const Http::ResponseTrailerMapImpl empty_response_trailers; std::string formatted; for (const auto& formatter : formatters) { - absl::StrAppend(&formatted, formatter->format(empty_request_headers, empty_response_headers, - empty_response_trailers, stream_info)); + absl::StrAppend(&formatted, + formatter->format(*Http::StaticEmptyHeaders::get().request_headers, + *Http::StaticEmptyHeaders::get().response_headers, + *Http::StaticEmptyHeaders::get().response_trailers, + stream_info, absl::string_view())); } return formatted; }; @@ -342,8 +347,19 @@ StreamInfoHeaderFormatter::StreamInfoHeaderFormatter(absl::string_view field_nam } else if (absl::StartsWith(field_name, "REQ")) { field_extractor_ = parseRequestHeader(field_name.substr(STATIC_STRLEN("REQ"))); } else if (field_name == "HOSTNAME") { - std::string hostname = Envoy::AccessLog::AccessLogFormatUtils::getHostname(); + std::string hostname = Envoy::Formatter::SubstitutionFormatUtils::getHostname(); field_extractor_ = [hostname](const StreamInfo::StreamInfo&) { return hostname; }; + } else if (field_name == "RESPONSE_FLAGS") { + field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) { + return StreamInfo::ResponseFlagUtils::toShortString(stream_info); + }; + } else if (field_name == "RESPONSE_CODE_DETAILS") { + field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) -> std::string { + if (stream_info.responseCodeDetails().has_value()) { + return stream_info.responseCodeDetails().value(); + } + return ""; + }; } else { throw EnvoyException(fmt::format("field '{}' not supported as custom header", field_name)); } diff --git a/source/common/router/header_formatter.h b/source/common/router/header_formatter.h index eb9f8766f5483..847657dba0d7c 100644 --- a/source/common/router/header_formatter.h +++ b/source/common/router/header_formatter.h @@ -4,8 +4,9 @@ #include #include -#include "envoy/access_log/access_log.h" +#include "envoy/formatter/substitution_formatter.h" +#include "absl/container/node_hash_map.h" #include "absl/strings/string_view.h" namespace Envoy { @@ -45,7 +46,7 @@ class StreamInfoHeaderFormatter : public HeaderFormatter { private: FieldExtractor field_extractor_; const bool append_; - std::unordered_map> + absl::node_hash_map> start_time_formatters_; }; diff --git a/source/common/router/metadatamatchcriteria_impl.cc b/source/common/router/metadatamatchcriteria_impl.cc index 8739d314cccf2..88cfa4b229fdf 100644 --- a/source/common/router/metadatamatchcriteria_impl.cc +++ b/source/common/router/metadatamatchcriteria_impl.cc @@ -9,7 +9,7 @@ MetadataMatchCriteriaImpl::extractMetadataMatchCriteria(const MetadataMatchCrite // Track locations of each name (from the parent) in v to make it // easier to replace them when the same name exists in matches. - std::unordered_map existing; + absl::node_hash_map existing; if (parent) { for (const auto& it : parent->metadata_match_criteria_) { diff --git a/source/common/router/rds_impl.cc b/source/common/router/rds_impl.cc index 11f45c683dd33..d84eb0a3d4f52 100644 --- a/source/common/router/rds_impl.cc +++ b/source/common/router/rds_impl.cc @@ -8,8 +8,6 @@ #include "envoy/admin/v3/config_dump.pb.h" #include "envoy/api/v2/route.pb.h" #include "envoy/config/core/v3/config_source.pb.h" -#include "envoy/config/route/v3/route.pb.h" -#include "envoy/config/route/v3/route.pb.validate.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/service/discovery/v3/discovery.pb.h" @@ -69,9 +67,11 @@ RdsRouteConfigSubscription::RdsRouteConfigSubscription( const std::string& stat_prefix, Envoy::Router::RouteConfigProviderManagerImpl& route_config_provider_manager) : Envoy::Config::SubscriptionBase( - rds.config_source().resource_api_version()), - route_config_name_(rds.route_config_name()), factory_context_(factory_context), - validator_(factory_context.messageValidationContext().dynamicValidationVisitor()), + rds.config_source().resource_api_version(), + factory_context.messageValidationContext().dynamicValidationVisitor(), "name"), + route_config_name_(rds.route_config_name()), + scope_(factory_context.scope().createScope(stat_prefix + "rds." + route_config_name_ + ".")), + factory_context_(factory_context), parent_init_target_(fmt::format("RdsRouteConfigSubscription init {}", route_config_name_), [this]() { local_init_manager_.initialize(local_init_watcher_); }), local_init_watcher_(fmt::format("RDS local-init-watcher {}", rds.route_config_name()), @@ -80,17 +80,17 @@ RdsRouteConfigSubscription::RdsRouteConfigSubscription( fmt::format("RdsRouteConfigSubscription local-init-target {}", route_config_name_), [this]() { subscription_->start({route_config_name_}); }), local_init_manager_(fmt::format("RDS local-init-manager {}", route_config_name_)), - scope_(factory_context.scope().createScope(stat_prefix + "rds." + route_config_name_ + ".")), stat_prefix_(stat_prefix), stats_({ALL_RDS_STATS(POOL_COUNTER(*scope_))}), route_config_provider_manager_(route_config_provider_manager), manager_identifier_(manager_identifier) { const auto resource_name = getResourceName(); subscription_ = factory_context.clusterManager().subscriptionFactory().subscriptionFromConfigSource( - rds.config_source(), Grpc::Common::typeUrl(resource_name), *scope_, *this); + rds.config_source(), Grpc::Common::typeUrl(resource_name), *scope_, *this, + resource_decoder_); local_init_manager_.add(local_init_target_); config_update_info_ = - std::make_unique(factory_context.timeSource(), validator_); + std::make_unique(factory_context.timeSource()); } RdsRouteConfigSubscription::~RdsRouteConfigSubscription() { @@ -105,14 +105,13 @@ RdsRouteConfigSubscription::~RdsRouteConfigSubscription() { } void RdsRouteConfigSubscription::onConfigUpdate( - const Protobuf::RepeatedPtrField& resources, + const std::vector& resources, const std::string& version_info) { if (!validateUpdateSize(resources.size())) { return; } - auto route_config = - MessageUtil::anyConvertAndValidate(resources[0], - validator_); + const auto& route_config = dynamic_cast( + resources[0].get().resource()); if (route_config.name() != route_config_name_) { throw EnvoyException(fmt::format("Unexpected RDS configuration (expecting {}): {}", route_config_name_, route_config.name())); @@ -178,7 +177,7 @@ void RdsRouteConfigSubscription::maybeCreateInitManager( } void RdsRouteConfigSubscription::onConfigUpdate( - const Protobuf::RepeatedPtrField& added_resources, + const std::vector& added_resources, const Protobuf::RepeatedPtrField& removed_resources, const std::string&) { if (!removed_resources.empty()) { // TODO(#2500) when on-demand resource loading is supported, an RDS removal may make sense @@ -189,9 +188,7 @@ void RdsRouteConfigSubscription::onConfigUpdate( removed_resources[0]); } if (!added_resources.empty()) { - Protobuf::RepeatedPtrField unwrapped_resource; - *unwrapped_resource.Add() = added_resources[0].resource(); - onConfigUpdate(unwrapped_resource, added_resources[0].version()); + onConfigUpdate(added_resources, added_resources[0].get().version()); } } @@ -280,10 +277,10 @@ void RdsRouteConfigProviderImpl::onConfigUpdate() { auto found = aliases.find(it->alias_); if (found != aliases.end()) { // TODO(dmitri-d) HeaderMapImpl is expensive, need to profile this - Http::RequestHeaderMapImpl host_header; - host_header.setHost(VhdsSubscription::aliasToDomainName(it->alias_)); - const bool host_exists = config->virtualHostExists(host_header); - auto current_cb = it->cb_; + auto host_header = Http::RequestHeaderMapImpl::create(); + host_header->setHost(VhdsSubscription::aliasToDomainName(it->alias_)); + const bool host_exists = config->virtualHostExists(*host_header); + std::weak_ptr current_cb(it->cb_); it->thread_local_dispatcher_.post([current_cb, host_exists] { if (auto cb = current_cb.lock()) { (*cb)(host_exists); @@ -339,7 +336,7 @@ Router::RouteConfigProviderSharedPtr RouteConfigProviderManagerImpl::createRdsRo RdsRouteConfigSubscriptionSharedPtr subscription(new RdsRouteConfigSubscription( rds, manager_identifier, factory_context, stat_prefix, *this)); init_manager.add(subscription->parent_init_target_); - std::shared_ptr new_provider{ + RdsRouteConfigProviderImplSharedPtr new_provider{ new RdsRouteConfigProviderImpl(std::move(subscription), factory_context)}; dynamic_route_config_providers_.insert( {manager_identifier, std::weak_ptr(new_provider)}); diff --git a/source/common/router/rds_impl.h b/source/common/router/rds_impl.h index 5481e33980644..cba4793acd5a5 100644 --- a/source/common/router/rds_impl.h +++ b/source/common/router/rds_impl.h @@ -2,14 +2,14 @@ #include #include +#include #include #include -#include -#include #include "envoy/admin/v3/config_dump.pb.h" #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/config/route/v3/route.pb.h" +#include "envoy/config/route/v3/route.pb.validate.h" #include "envoy/config/subscription.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/http/codes.h" @@ -34,6 +34,9 @@ #include "common/router/route_config_update_receiver_impl.h" #include "common/router/vhds.h" +#include "absl/container/node_hash_map.h" +#include "absl/container/node_hash_set.h" + namespace Envoy { namespace Router { @@ -116,7 +119,7 @@ class RdsRouteConfigSubscription public: ~RdsRouteConfigSubscription() override; - std::unordered_set& routeConfigProviders() { + absl::node_hash_set& routeConfigProviders() { ASSERT(route_config_providers_.size() == 1 || route_config_providers_.empty()); return route_config_providers_; } @@ -128,17 +131,13 @@ class RdsRouteConfigSubscription private: // Config::SubscriptionCallbacks - void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + void onConfigUpdate(const std::vector& resources, const std::string& version_info) override; - void onConfigUpdate( - const Protobuf::RepeatedPtrField& added_resources, - const Protobuf::RepeatedPtrField& removed_resources, - const std::string&) override; + void onConfigUpdate(const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& system_version_info) override; void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException* e) override; - std::string resourceName(const ProtobufWkt::Any& resource) override { - return MessageUtil::anyConvert(resource).name(); - } Common::CallbackHandle* addUpdateCallback(std::function callback) { return update_callback_manager_.add(callback); @@ -152,10 +151,11 @@ class RdsRouteConfigSubscription bool validateUpdateSize(int num_resources); - std::unique_ptr subscription_; const std::string route_config_name_; + // This scope must outlive the subscription_ below as the subscription has derived stats. + Stats::ScopePtr scope_; + Envoy::Config::SubscriptionPtr subscription_; Server::Configuration::ServerFactoryContext& factory_context_; - ProtobufMessage::ValidationVisitor& validator_; // Init target used to notify the parent init manager that the subscription [and its sub resource] // is ready. @@ -165,13 +165,12 @@ class RdsRouteConfigSubscription // Target which starts the RDS subscription. Init::TargetImpl local_init_target_; Init::ManagerImpl local_init_manager_; - Stats::ScopePtr scope_; std::string stat_prefix_; RdsStats stats_; RouteConfigProviderManagerImpl& route_config_provider_manager_; const uint64_t manager_identifier_; // TODO(lambdai): Prove that a subscription has exactly one provider and remove the container. - std::unordered_set route_config_providers_; + absl::node_hash_set route_config_providers_; VhdsSubscriptionPtr vhds_subscription_; RouteConfigUpdatePtr config_update_info_; Common::CallbackManager<> update_callback_manager_; @@ -231,6 +230,8 @@ class RdsRouteConfigProviderImpl : public RouteConfigProvider, friend class RouteConfigProviderManagerImpl; }; +using RdsRouteConfigProviderImplSharedPtr = std::shared_ptr; + class RouteConfigProviderManagerImpl : public RouteConfigProviderManager, public Singleton::Instance { public: @@ -253,14 +254,16 @@ class RouteConfigProviderManagerImpl : public RouteConfigProviderManager, // TODO(jsedgwick) These two members are prime candidates for the owned-entry list/map // as in ConfigTracker. I.e. the ProviderImpls would have an EntryOwner for these lists // Then the lifetime management stuff is centralized and opaque. - std::unordered_map> + absl::node_hash_map> dynamic_route_config_providers_; - std::unordered_set static_route_config_providers_; + absl::node_hash_set static_route_config_providers_; Server::ConfigTracker::EntryOwnerPtr config_tracker_entry_; friend class RdsRouteConfigSubscription; friend class StaticRouteConfigProviderImpl; }; +using RouteConfigProviderManagerImplPtr = std::unique_ptr; + } // namespace Router } // namespace Envoy diff --git a/source/common/router/retry_state_impl.cc b/source/common/router/retry_state_impl.cc index 2952b7a341573..9912f041709e7 100644 --- a/source/common/router/retry_state_impl.cc +++ b/source/common/router/retry_state_impl.cc @@ -13,6 +13,7 @@ #include "common/http/codes.h" #include "common/http/headers.h" #include "common/http/utility.h" +#include "common/runtime/runtime_features.h" namespace Envoy { namespace Router { @@ -22,6 +23,7 @@ namespace Router { const uint32_t RetryPolicy::RETRY_ON_5XX; const uint32_t RetryPolicy::RETRY_ON_GATEWAY_ERROR; const uint32_t RetryPolicy::RETRY_ON_CONNECT_FAILURE; +const uint32_t RetryPolicy::RETRY_ON_ENVOY_RATE_LIMITED; const uint32_t RetryPolicy::RETRY_ON_RETRIABLE_4XX; const uint32_t RetryPolicy::RETRY_ON_RETRIABLE_HEADERS; const uint32_t RetryPolicy::RETRY_ON_RETRIABLE_STATUS_CODES; @@ -31,11 +33,12 @@ const uint32_t RetryPolicy::RETRY_ON_GRPC_DEADLINE_EXCEEDED; const uint32_t RetryPolicy::RETRY_ON_GRPC_RESOURCE_EXHAUSTED; const uint32_t RetryPolicy::RETRY_ON_GRPC_UNAVAILABLE; -RetryStatePtr -RetryStateImpl::create(const RetryPolicy& route_policy, Http::RequestHeaderMap& request_headers, - const Upstream::ClusterInfo& cluster, const VirtualCluster* vcluster, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, - Event::Dispatcher& dispatcher, Upstream::ResourcePriority priority) { +RetryStatePtr RetryStateImpl::create(const RetryPolicy& route_policy, + Http::RequestHeaderMap& request_headers, + const Upstream::ClusterInfo& cluster, + const VirtualCluster* vcluster, Runtime::Loader& runtime, + Random::RandomGenerator& random, Event::Dispatcher& dispatcher, + Upstream::ResourcePriority priority) { RetryStatePtr ret; // We short circuit here and do not bother with an allocation if there is no chance we will retry. @@ -45,16 +48,24 @@ RetryStateImpl::create(const RetryPolicy& route_policy, Http::RequestHeaderMap& dispatcher, priority)); } + // Consume all retry related headers to avoid them being propagated to the upstream request_headers.removeEnvoyRetryOn(); request_headers.removeEnvoyRetryGrpcOn(); request_headers.removeEnvoyMaxRetries(); + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.consume_all_retry_headers")) { + request_headers.removeEnvoyHedgeOnPerTryTimeout(); + request_headers.removeEnvoyRetriableHeaderNames(); + request_headers.removeEnvoyRetriableStatusCodes(); + request_headers.removeEnvoyUpstreamRequestPerTryTimeoutMs(); + } + return ret; } RetryStateImpl::RetryStateImpl(const RetryPolicy& route_policy, Http::RequestHeaderMap& request_headers, const Upstream::ClusterInfo& cluster, const VirtualCluster* vcluster, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, Event::Dispatcher& dispatcher, Upstream::ResourcePriority priority) : cluster_(cluster), vcluster_(vcluster), runtime_(runtime), random_(random), dispatcher_(dispatcher), retry_on_(route_policy.retryOn()), @@ -83,11 +94,10 @@ RetryStateImpl::RetryStateImpl(const RetryPolicy& route_policy, // Merge in the headers. if (request_headers.EnvoyRetryOn()) { - retry_on_ |= parseRetryOn(request_headers.EnvoyRetryOn()->value().getStringView()).first; + retry_on_ |= parseRetryOn(request_headers.getEnvoyRetryOnValue()).first; } if (request_headers.EnvoyRetryGrpcOn()) { - retry_on_ |= - parseRetryGrpcOn(request_headers.EnvoyRetryGrpcOn()->value().getStringView()).first; + retry_on_ |= parseRetryGrpcOn(request_headers.getEnvoyRetryGrpcOnValue()).first; } const auto& retriable_request_headers = route_policy.retriableRequestHeaders(); @@ -107,15 +117,15 @@ RetryStateImpl::RetryStateImpl(const RetryPolicy& route_policy, } if (retry_on_ != 0 && request_headers.EnvoyMaxRetries()) { uint64_t temp; - if (absl::SimpleAtoi(request_headers.EnvoyMaxRetries()->value().getStringView(), &temp)) { + if (absl::SimpleAtoi(request_headers.getEnvoyMaxRetriesValue(), &temp)) { // The max retries header takes precedence if set. retries_remaining_ = temp; } } if (request_headers.EnvoyRetriableStatusCodes()) { - for (const auto code : StringUtil::splitToken( - request_headers.EnvoyRetriableStatusCodes()->value().getStringView(), ",")) { + for (const auto code : + StringUtil::splitToken(request_headers.getEnvoyRetriableStatusCodesValue(), ",")) { unsigned int out; if (absl::SimpleAtoi(code, &out)) { retriable_status_codes_.emplace_back(out); @@ -160,6 +170,8 @@ std::pair RetryStateImpl::parseRetryOn(absl::string_view config) ret |= RetryPolicy::RETRY_ON_GATEWAY_ERROR; } else if (retry_on == Http::Headers::get().EnvoyRetryOnValues.ConnectFailure) { ret |= RetryPolicy::RETRY_ON_CONNECT_FAILURE; + } else if (retry_on == Http::Headers::get().EnvoyRetryOnValues.EnvoyRateLimited) { + ret |= RetryPolicy::RETRY_ON_ENVOY_RATE_LIMITED; } else if (retry_on == Http::Headers::get().EnvoyRetryOnValues.Retriable4xx) { ret |= RetryPolicy::RETRY_ON_RETRIABLE_4XX; } else if (retry_on == Http::Headers::get().EnvoyRetryOnValues.RefusedStream) { @@ -281,13 +293,10 @@ RetryStatus RetryStateImpl::shouldHedgeRetryPerTryTimeout(DoRetryCallback callba } bool RetryStateImpl::wouldRetryFromHeaders(const Http::ResponseHeaderMap& response_headers) { - if (response_headers.EnvoyOverloaded() != nullptr) { - return false; - } - - // We never retry if the request is rate limited. + // A response that contains the x-envoy-ratelimited header comes from an upstream envoy. + // We retry these only when the envoy-ratelimited policy is in effect. if (response_headers.EnvoyRateLimited() != nullptr) { - return false; + return retry_on_ & RetryPolicy::RETRY_ON_ENVOY_RATE_LIMITED; } if (retry_on_ & RetryPolicy::RETRY_ON_5XX) { diff --git a/source/common/router/retry_state_impl.h b/source/common/router/retry_state_impl.h index 79d355cd6499b..9b0a19a779114 100644 --- a/source/common/router/retry_state_impl.h +++ b/source/common/router/retry_state_impl.h @@ -3,6 +3,7 @@ #include #include +#include "envoy/common/random_generator.h" #include "envoy/event/timer.h" #include "envoy/http/codec.h" #include "envoy/http/header_map.h" @@ -27,7 +28,7 @@ class RetryStateImpl : public RetryState { static RetryStatePtr create(const RetryPolicy& route_policy, Http::RequestHeaderMap& request_headers, const Upstream::ClusterInfo& cluster, const VirtualCluster* vcluster, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, Event::Dispatcher& dispatcher, Upstream::ResourcePriority priority); ~RetryStateImpl() override; @@ -74,13 +75,15 @@ class RetryStateImpl : public RetryState { [&host](auto predicate) { return predicate->shouldSelectAnotherHost(host); }); } - const Upstream::HealthyAndDegradedLoad& - priorityLoadForRetry(const Upstream::PrioritySet& priority_set, - const Upstream::HealthyAndDegradedLoad& original_priority_load) override { + const Upstream::HealthyAndDegradedLoad& priorityLoadForRetry( + const Upstream::PrioritySet& priority_set, + const Upstream::HealthyAndDegradedLoad& original_priority_load, + const Upstream::RetryPriority::PriorityMappingFunc& priority_mapping_func) override { if (!retry_priority_) { return original_priority_load; } - return retry_priority_->determinePriorityLoad(priority_set, original_priority_load); + return retry_priority_->determinePriorityLoad(priority_set, original_priority_load, + priority_mapping_func); } uint32_t hostSelectionMaxAttempts() const override { return host_selection_max_attempts_; } @@ -88,7 +91,7 @@ class RetryStateImpl : public RetryState { private: RetryStateImpl(const RetryPolicy& route_policy, Http::RequestHeaderMap& request_headers, const Upstream::ClusterInfo& cluster, const VirtualCluster* vcluster, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, Event::Dispatcher& dispatcher, Upstream::ResourcePriority priority); void enableBackoffTimer(); @@ -99,7 +102,7 @@ class RetryStateImpl : public RetryState { const Upstream::ClusterInfo& cluster_; const VirtualCluster* vcluster_; Runtime::Loader& runtime_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; Event::Dispatcher& dispatcher_; uint32_t retry_on_{}; uint32_t retries_remaining_{}; diff --git a/source/common/router/route_config_update_receiver_impl.cc b/source/common/router/route_config_update_receiver_impl.cc index bdd3a1e188dc8..144ee75d977bd 100644 --- a/source/common/router/route_config_update_receiver_impl.cc +++ b/source/common/router/route_config_update_receiver_impl.cc @@ -3,8 +3,6 @@ #include #include "envoy/config/route/v3/route.pb.h" -#include "envoy/config/route/v3/route_components.pb.h" -#include "envoy/config/route/v3/route_components.pb.validate.h" #include "envoy/service/discovery/v3/discovery.pb.h" #include "common/common/assert.h" @@ -40,26 +38,16 @@ void RouteConfigUpdateReceiverImpl::onUpdateCommon( } bool RouteConfigUpdateReceiverImpl::onVhdsUpdate( - const Protobuf::RepeatedPtrField& added_resources, + const VirtualHostRefVector& added_vhosts, const std::set& added_resource_ids, const Protobuf::RepeatedPtrField& removed_resources, const std::string& version_info) { - collectResourceIdsInUpdate(added_resources); + resource_ids_in_last_update_ = added_resource_ids; const bool removed = removeVhosts(vhds_virtual_hosts_, removed_resources); - const bool updated = updateVhosts(vhds_virtual_hosts_, added_resources); + const bool updated = updateVhosts(vhds_virtual_hosts_, added_vhosts); onUpdateCommon(route_config_proto_, version_info); return removed || updated || !resource_ids_in_last_update_.empty(); } -void RouteConfigUpdateReceiverImpl::collectResourceIdsInUpdate( - const Protobuf::RepeatedPtrField& added_resources) { - resource_ids_in_last_update_.clear(); - for (const auto& resource : added_resources) { - resource_ids_in_last_update_.emplace(resource.name()); - std::copy(resource.aliases().begin(), resource.aliases().end(), - std::inserter(resource_ids_in_last_update_, resource_ids_in_last_update_.end())); - } -} - void RouteConfigUpdateReceiverImpl::initializeRdsVhosts( const envoy::config::route::v3::RouteConfiguration& route_configuration) { rds_virtual_hosts_.clear(); @@ -84,22 +72,14 @@ bool RouteConfigUpdateReceiverImpl::removeVhosts( bool RouteConfigUpdateReceiverImpl::updateVhosts( std::map& vhosts, - const Protobuf::RepeatedPtrField& added_resources) { + const VirtualHostRefVector& added_vhosts) { bool vhosts_added = false; - for (const auto& resource : added_resources) { - // the management server returns empty resources (they contain no virtual hosts in this case) - // for aliases that it couldn't resolve. - if (onDemandFetchFailed(resource)) { - continue; - } - envoy::config::route::v3::VirtualHost vhost = - MessageUtil::anyConvertAndValidate( - resource.resource(), validation_visitor_); - auto found = vhosts.find(vhost.name()); + for (const auto& vhost : added_vhosts) { + auto found = vhosts.find(vhost.get().name()); if (found != vhosts.end()) { vhosts.erase(found); } - vhosts.emplace(vhost.name(), vhost); + vhosts.emplace(vhost.get().name(), vhost.get()); vhosts_added = true; } return vhosts_added; @@ -118,10 +98,5 @@ void RouteConfigUpdateReceiverImpl::rebuildRouteConfig( } } -bool RouteConfigUpdateReceiverImpl::onDemandFetchFailed( - const envoy::service::discovery::v3::Resource& resource) const { - return !resource.has_resource(); -} - } // namespace Router } // namespace Envoy diff --git a/source/common/router/route_config_update_receiver_impl.h b/source/common/router/route_config_update_receiver_impl.h index dc5cee4a422fa..a0e44f7975da7 100644 --- a/source/common/router/route_config_update_receiver_impl.h +++ b/source/common/router/route_config_update_receiver_impl.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include "envoy/config/route/v3/route.pb.h" #include "envoy/config/route/v3/route_components.pb.h" @@ -17,19 +16,15 @@ namespace Router { class RouteConfigUpdateReceiverImpl : public RouteConfigUpdateReceiver { public: - RouteConfigUpdateReceiverImpl(TimeSource& time_source, - ProtobufMessage::ValidationVisitor& validation_visitor) + RouteConfigUpdateReceiverImpl(TimeSource& time_source) : time_source_(time_source), last_config_hash_(0ull), last_vhds_config_hash_(0ul), - validation_visitor_(validation_visitor), vhds_configuration_changed_(true) {} + vhds_configuration_changed_(true) {} void initializeRdsVhosts(const envoy::config::route::v3::RouteConfiguration& route_configuration); - void collectResourceIdsInUpdate( - const Protobuf::RepeatedPtrField& added_resources); bool removeVhosts(std::map& vhosts, const Protobuf::RepeatedPtrField& removed_vhost_names); - bool updateVhosts( - std::map& vhosts, - const Protobuf::RepeatedPtrField& added_resources); + bool updateVhosts(std::map& vhosts, + const VirtualHostRefVector& added_vhosts); void rebuildRouteConfig( const std::map& rds_vhosts, const std::map& vhds_vhosts, @@ -41,10 +36,10 @@ class RouteConfigUpdateReceiverImpl : public RouteConfigUpdateReceiver { // Router::RouteConfigUpdateReceiver bool onRdsUpdate(const envoy::config::route::v3::RouteConfiguration& rc, const std::string& version_info) override; - bool onVhdsUpdate( - const Protobuf::RepeatedPtrField& added_resources, - const Protobuf::RepeatedPtrField& removed_resources, - const std::string& version_info) override; + bool onVhdsUpdate(const VirtualHostRefVector& added_vhosts, + const std::set& added_resource_ids, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& version_info) override; const std::string& routeConfigName() const override { return route_config_proto_.name(); } const std::string& configVersion() const override { return last_config_version_; } uint64_t configHash() const override { return last_config_hash_; } @@ -70,7 +65,6 @@ class RouteConfigUpdateReceiverImpl : public RouteConfigUpdateReceiver { std::map rds_virtual_hosts_; std::map vhds_virtual_hosts_; absl::optional config_info_; - ProtobufMessage::ValidationVisitor& validation_visitor_; std::set resource_ids_in_last_update_; bool vhds_configuration_changed_; }; diff --git a/source/common/router/router.cc b/source/common/router/router.cc index de4278259e656..88db133d6e2c2 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -15,15 +15,18 @@ #include "envoy/upstream/upstream.h" #include "common/common/assert.h" +#include "common/common/cleanup.h" #include "common/common/empty_string.h" #include "common/common/enum_to_int.h" #include "common/common/scope_tracker.h" #include "common/common/utility.h" +#include "common/config/utility.h" #include "common/grpc/common.h" #include "common/http/codes.h" #include "common/http/header_map_impl.h" #include "common/http/headers.h" #include "common/http/message_impl.h" +#include "common/http/url_utility.h" #include "common/http/utility.h" #include "common/network/application_protocol.h" #include "common/network/transport_socket_options_impl.h" @@ -33,6 +36,7 @@ #include "common/router/debug_config.h" #include "common/router/retry_state_impl.h" #include "common/router/upstream_request.h" +#include "common/runtime/runtime_features.h" #include "common/runtime/runtime_impl.h" #include "common/stream_info/uint32_accessor_impl.h" #include "common/tracing/http_tracer_impl.h" @@ -48,9 +52,7 @@ uint32_t getLength(const Buffer::Instance* instance) { return instance ? instanc bool schemeIsHttp(const Http::RequestHeaderMap& downstream_headers, const Network::Connection& connection) { - if (downstream_headers.ForwardedProto() && - downstream_headers.ForwardedProto()->value().getStringView() == - Http::Headers::get().SchemeValues.Http) { + if (downstream_headers.getForwardedProtoValue() == Http::Headers::get().SchemeValues.Http) { return true; } if (!connection.ssl()) { @@ -59,57 +61,6 @@ bool schemeIsHttp(const Http::RequestHeaderMap& downstream_headers, return false; } -bool convertRequestHeadersForInternalRedirect(Http::RequestHeaderMap& downstream_headers, - StreamInfo::FilterState& filter_state, - uint32_t max_internal_redirects, - const Http::HeaderEntry& internal_redirect, - const Network::Connection& connection) { - // Make sure the redirect response contains a URL to redirect to. - if (internal_redirect.value().getStringView().length() == 0) { - return false; - } - - Http::Utility::Url absolute_url; - if (!absolute_url.initialize(internal_redirect.value().getStringView(), false)) { - return false; - } - - // Don't allow serving TLS responses over plaintext. - bool scheme_is_http = schemeIsHttp(downstream_headers, connection); - if (scheme_is_http && absolute_url.scheme() == Http::Headers::get().SchemeValues.Https) { - return false; - } - - // Make sure that performing the redirect won't result in exceeding the configured number of - // redirects allowed for this route. - if (!filter_state.hasData(NumInternalRedirectsFilterStateName)) { - filter_state.setData( - NumInternalRedirectsFilterStateName, std::make_shared(0), - StreamInfo::FilterState::StateType::Mutable, StreamInfo::FilterState::LifeSpan::Request); - } - StreamInfo::UInt32Accessor& num_internal_redirect = - filter_state.getDataMutable(NumInternalRedirectsFilterStateName); - - if (num_internal_redirect.value() >= max_internal_redirects) { - return false; - } - num_internal_redirect.increment(); - - // Preserve the original request URL for the second pass. - downstream_headers.setEnvoyOriginalUrl( - absl::StrCat(scheme_is_http ? Http::Headers::get().SchemeValues.Http - : Http::Headers::get().SchemeValues.Https, - "://", downstream_headers.Host()->value().getStringView(), - downstream_headers.Path()->value().getStringView())); - - // Replace the original host, scheme and path. - downstream_headers.setScheme(absolute_url.scheme()); - downstream_headers.setHost(absolute_url.hostAndPort()); - downstream_headers.setPath(absolute_url.pathAndQueryParams()); - - return true; -} - constexpr uint64_t TimeoutPrecisionFactor = 100; } // namespace @@ -215,10 +166,10 @@ FilterUtility::finalTimeout(const RouteEntry& route, Http::RequestHeaderMap& req } // See if there is a per try/retry timeout. If it's >= global we just ignore it. - const Http::HeaderEntry* per_try_timeout_entry = - request_headers.EnvoyUpstreamRequestPerTryTimeoutMs(); - if (per_try_timeout_entry) { - if (absl::SimpleAtoi(per_try_timeout_entry->value().getStringView(), &header_timeout)) { + const absl::string_view per_try_timeout_entry = + request_headers.getEnvoyUpstreamRequestPerTryTimeoutMsValue(); + if (!per_try_timeout_entry.empty()) { + if (absl::SimpleAtoi(per_try_timeout_entry, &header_timeout)) { timeout.per_try_timeout_ = std::chrono::milliseconds(header_timeout); } request_headers.removeEnvoyUpstreamRequestPerTryTimeoutMs(); @@ -379,7 +330,6 @@ void Filter::chargeUpstreamCode(Http::Code code, Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, bool end_stream) { // Do a common header check. We make sure that all outgoing requests have all HTTP/2 headers. // These get stripped by HTTP/1 codec where applicable. - ASSERT(headers.Path()); ASSERT(headers.Method()); ASSERT(headers.Host()); @@ -395,7 +345,7 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, : nullptr; // TODO: Maybe add a filter API for this. - grpc_request_ = Grpc::Common::hasGrpcContentType(headers); + grpc_request_ = Grpc::Common::isGrpcRequestHeaders(headers); // Only increment rq total stat if we actually decode headers here. This does not count requests // that get handled by earlier filters. @@ -410,8 +360,7 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, route_ = callbacks_->route(); if (!route_) { config_.stats_.no_route_.inc(); - ENVOY_STREAM_LOG(debug, "no cluster match for URL '{}'", *callbacks_, - headers.Path()->value().getStringView()); + ENVOY_STREAM_LOG(debug, "no cluster match for URL '{}'", *callbacks_, headers.getPathValue()); callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::NoRouteFound); callbacks_->sendLocalReply(Http::Code::NotFound, "", modify_headers, absl::nullopt, @@ -428,7 +377,10 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, direct_response->responseCode(), direct_response->responseBody(), [this, direct_response, &request_headers = headers](Http::ResponseHeaderMap& response_headers) -> void { - const auto new_path = direct_response->newPath(request_headers); + std::string new_path; + if (request_headers.Path()) { + new_path = direct_response->newPath(request_headers); + } // See https://tools.ietf.org/html/rfc7231#section-7.1.2. const auto add_location = direct_response->responseCode() == Http::Code::Created || @@ -473,7 +425,7 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, // Set up stat prefixes, etc. request_vcluster_ = route_entry_->virtualCluster(headers); ENVOY_STREAM_LOG(debug, "cluster '{}' match for URL '{}'", *callbacks_, - route_entry_->clusterName(), headers.Path()->value().getStringView()); + route_entry_->clusterName(), headers.getPathValue()); if (config_.strict_check_headers_ != nullptr) { for (const auto& header : *config_.strict_check_headers_) { @@ -508,7 +460,8 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, Http::Code::ServiceUnavailable, "maintenance mode", [modify_headers, this](Http::ResponseHeaderMap& headers) { if (!config_.suppress_envoy_headers_) { - headers.setReferenceEnvoyOverloaded(Http::Headers::get().EnvoyOverloadedValues.True); + headers.addReference(Http::Headers::get().EnvoyOverloaded, + Http::Headers::get().EnvoyOverloadedValues.True); } // Note: append_cluster_info does not respect suppress_envoy_headers. modify_headers(headers); @@ -522,8 +475,7 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, const auto& upstream_http_protocol_options = cluster_->upstreamHttpProtocolOptions(); if (upstream_http_protocol_options.has_value()) { - const auto parsed_authority = - Http::Utility::parseAuthority(headers.Host()->value().getStringView()); + const auto parsed_authority = Http::Utility::parseAuthority(headers.getHostValue()); if (!parsed_authority.is_ip_address_ && upstream_http_protocol_options.value().auto_sni()) { callbacks_->streamInfo().filterState()->setData( Network::UpstreamServerName::key(), @@ -540,15 +492,15 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, } } - Http::ConnectionPool::Instance* http_pool = getHttpConnPool(); - Upstream::HostDescriptionConstSharedPtr host; + transport_socket_options_ = Network::TransportSocketOptionsUtility::fromFilterState( + *callbacks_->streamInfo().filterState()); + std::unique_ptr generic_conn_pool = createConnPool(); - if (http_pool) { - host = http_pool->host(); - } else { + if (!generic_conn_pool) { sendNoHealthyUpstreamResponse(); return Http::FilterHeadersStatus::StopIteration; } + Upstream::HostDescriptionConstSharedPtr host = generic_conn_pool->host(); if (debug_config && debug_config->append_upstream_host_) { // The hostname and address will be appended to any local or upstream responses from this point, @@ -636,8 +588,8 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, modify_headers_ = modify_headers; UpstreamRequestPtr upstream_request = - std::make_unique(*this, std::make_unique(*http_pool)); - upstream_request->moveIntoList(std::move(upstream_request), upstream_requests_); + std::make_unique(*this, std::move(generic_conn_pool)); + LinkedList::moveIntoList(std::move(upstream_request), upstream_requests_); upstream_requests_.front()->encodeHeaders(end_stream); if (end_stream) { onRequestComplete(); @@ -646,15 +598,20 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, return Http::FilterHeadersStatus::StopIteration; } -Http::ConnectionPool::Instance* Filter::getHttpConnPool() { - // Choose protocol based on cluster configuration and downstream connection - // Note: Cluster may downgrade HTTP2 to HTTP1 based on runtime configuration. - Http::Protocol protocol = cluster_->upstreamHttpProtocol(callbacks_->streamInfo().protocol()); - transport_socket_options_ = Network::TransportSocketOptionsUtility::fromFilterState( - *callbacks_->streamInfo().filterState()); - - return config_.cm_.httpConnPoolForCluster(route_entry_->clusterName(), route_entry_->priority(), - protocol, this); +std::unique_ptr Filter::createConnPool() { + GenericConnPoolFactory* factory = nullptr; + if (cluster_->upstreamConfig().has_value()) { + factory = &Envoy::Config::Utility::getAndCheckFactory( + cluster_->upstreamConfig().value()); + } else { + factory = &Envoy::Config::Utility::getAndCheckFactoryByName( + "envoy.filters.connection_pools.http.generic"); + } + const bool should_tcp_proxy = + route_entry_->connectConfig().has_value() && + downstream_headers_->getMethodValue() == Http::Headers::get().MethodValues.Connect; + return factory->createGenericConnPool(config_.cm_, should_tcp_proxy, *route_entry_, + callbacks_->streamInfo().protocol(), this); } void Filter::sendNoHealthyUpstreamResponse() { @@ -936,6 +893,31 @@ void Filter::onPerTryTimeout(UpstreamRequest& upstream_request) { StreamInfo::ResponseCodeDetails::get().UpstreamPerTryTimeout); } +void Filter::onStreamMaxDurationReached(UpstreamRequest& upstream_request) { + upstream_request.resetStream(); + + if (maybeRetryReset(Http::StreamResetReason::LocalReset, upstream_request)) { + return; + } + + upstream_request.removeFromList(upstream_requests_); + cleanup(); + + if (downstream_response_started_ && + !Runtime::runtimeFeatureEnabled("envoy.reloadable_features.allow_500_after_100")) { + callbacks_->streamInfo().setResponseCodeDetails( + StreamInfo::ResponseCodeDetails::get().UpstreamMaxStreamDurationReached); + callbacks_->resetStream(); + } else { + callbacks_->streamInfo().setResponseFlag( + StreamInfo::ResponseFlag::UpstreamMaxStreamDurationReached); + // sendLocalReply may instead reset the stream if downstream_response_started_ is true. + callbacks_->sendLocalReply( + Http::Code::RequestTimeout, "upstream max stream duration reached", modify_headers_, + absl::nullopt, StreamInfo::ResponseCodeDetails::get().UpstreamMaxStreamDurationReached); + } +} + void Filter::updateOutlierDetection(Upstream::Outlier::Result result, UpstreamRequest& upstream_request, absl::optional code) { @@ -966,12 +948,13 @@ void Filter::chargeUpstreamAbort(Http::Code code, bool dropped, UpstreamRequest& void Filter::onUpstreamTimeoutAbort(StreamInfo::ResponseFlag response_flags, absl::string_view details) { - if (cluster_->timeoutBudgetStats().has_value()) { + Upstream::ClusterTimeoutBudgetStatsOptRef tb_stats = cluster()->timeoutBudgetStats(); + if (tb_stats.has_value()) { Event::Dispatcher& dispatcher = callbacks_->dispatcher(); std::chrono::milliseconds response_time = std::chrono::duration_cast( dispatcher.timeSource().monotonicTime() - downstream_request_complete_time_); - cluster_->timeoutBudgetStats()->upstream_rq_timeout_budget_percent_used_.recordValue( + tb_stats->get().upstream_rq_timeout_budget_percent_used_.recordValue( FilterUtility::percentageOfTimeout(response_time, timeout_.global_timeout_)); } @@ -984,7 +967,8 @@ void Filter::onUpstreamAbort(Http::Code code, StreamInfo::ResponseFlag response_ absl::string_view body, bool dropped, absl::string_view details) { // If we have not yet sent anything downstream, send a response with an appropriate status code. // Otherwise just reset the ongoing response. - if (downstream_response_started_) { + if (downstream_response_started_ && + !Runtime::runtimeFeatureEnabled("envoy.reloadable_features.allow_500_after_100")) { // This will destroy any created retry timers. callbacks_->streamInfo().setResponseCodeDetails(details); cleanup(); @@ -995,11 +979,13 @@ void Filter::onUpstreamAbort(Http::Code code, StreamInfo::ResponseFlag response_ callbacks_->streamInfo().setResponseFlag(response_flags); + // sendLocalReply may instead reset the stream if downstream_response_started_ is true. callbacks_->sendLocalReply( code, body, [dropped, this](Http::ResponseHeaderMap& headers) { if (dropped && !config_.suppress_envoy_headers_) { - headers.setReferenceEnvoyOverloaded(Http::Headers::get().EnvoyOverloadedValues.True); + headers.addReference(Http::Headers::get().EnvoyOverloaded, + Http::Headers::get().EnvoyOverloadedValues.True); } modify_headers_(headers); }, @@ -1024,6 +1010,7 @@ bool Filter::maybeRetryReset(Http::StreamResetReason reset_reason, if (upstream_request.upstreamHost()) { upstream_request.upstreamHost()->stats().rq_error_.inc(); } + upstream_request.removeFromList(upstream_requests_); return true; } else if (retry_status == RetryStatus::NoOverflow) { @@ -1038,8 +1025,9 @@ bool Filter::maybeRetryReset(Http::StreamResetReason reset_reason, void Filter::onUpstreamReset(Http::StreamResetReason reset_reason, absl::string_view transport_failure_reason, UpstreamRequest& upstream_request) { - ENVOY_STREAM_LOG(debug, "upstream reset: reset reason {}", *callbacks_, - Http::Utility::resetReasonToString(reset_reason)); + ENVOY_STREAM_LOG(debug, "upstream reset: reset reason: {}, transport failure reason: {}", + *callbacks_, Http::Utility::resetReasonToString(reset_reason), + transport_failure_reason); // TODO: The reset may also come from upstream over the wire. In this case it should be // treated as external origin error and distinguished from local origin error. @@ -1063,10 +1051,16 @@ void Filter::onUpstreamReset(Http::StreamResetReason reset_reason, } const StreamInfo::ResponseFlag response_flags = streamResetReasonToResponseFlag(reset_reason); + const std::string body = absl::StrCat("upstream connect error or disconnect/reset before headers. reset reason: ", - Http::Utility::resetReasonToString(reset_reason)); - + Http::Utility::resetReasonToString(reset_reason), + Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.http_transport_failure_reason_in_body") && + !transport_failure_reason.empty() + ? ", transport failure reason: " + : "", + transport_failure_reason); callbacks_->streamInfo().setUpstreamTransportFailureReason(transport_failure_reason); const std::string& basic_details = downstream_response_started_ ? StreamInfo::ResponseCodeDetails::get().LateUpstreamReset @@ -1138,7 +1132,17 @@ void Filter::onUpstream100ContinueHeaders(Http::ResponseHeaderMapPtr&& headers, // the complexity until someone asks for it. retry_state_.reset(); - callbacks_->encode100ContinueHeaders(std::move(headers)); + // We coalesce 100-continue headers here, to prevent encoder filters and HCM from having to worry + // about this. This is done in the router filter, rather than UpstreamRequest, since we want to + // potentially coalesce across retries and multiple upstream requests in the future, even though + // we currently don't support retry after 100. + // It's plausible that this functionality might need to move to HCM in the future for internal + // redirects, but we would need to maintain the "only call encode100ContinueHeaders() once" + // invariant. + if (!downstream_100_continue_headers_encoded_) { + downstream_100_continue_headers_encoded_ = true; + callbacks_->encode100ContinueHeaders(std::move(headers)); + } } void Filter::resetAll() { @@ -1165,7 +1169,7 @@ void Filter::resetOtherUpstreams(UpstreamRequest& upstream_request) { ASSERT(final_upstream_request); // Now put the final request back on this list. - final_upstream_request->moveIntoList(std::move(final_upstream_request), upstream_requests_); + LinkedList::moveIntoList(std::move(final_upstream_request), upstream_requests_); } void Filter::onUpstreamHeaders(uint64_t response_code, Http::ResponseHeaderMapPtr&& headers, @@ -1215,7 +1219,7 @@ void Filter::onUpstreamHeaders(uint64_t response_code, Http::ResponseHeaderMapPt code_stats.chargeBasicResponseStat(cluster_->statsScope(), config_.retry_, static_cast(response_code)); - if (!end_stream) { + if (!end_stream || !upstream_request.encodeComplete()) { upstream_request.resetStream(); } upstream_request.removeFromList(upstream_requests_); @@ -1232,8 +1236,9 @@ void Filter::onUpstreamHeaders(uint64_t response_code, Http::ResponseHeaderMapPt } } - if (static_cast(response_code) == Http::Code::Found && - route_entry_->internalRedirectAction() == InternalRedirectAction::Handle && + if (route_entry_->internalRedirectPolicy().enabled() && + route_entry_->internalRedirectPolicy().shouldRedirectForResponseCode( + static_cast(response_code)) && setupRedirect(*headers, upstream_request)) { return; // If the redirect could not be handled, fail open and let it pass to the @@ -1353,8 +1358,9 @@ void Filter::onUpstreamComplete(UpstreamRequest& upstream_request) { std::chrono::milliseconds response_time = std::chrono::duration_cast( dispatcher.timeSource().monotonicTime() - downstream_request_complete_time_); - if (cluster_->timeoutBudgetStats().has_value()) { - cluster_->timeoutBudgetStats()->upstream_rq_timeout_budget_percent_used_.recordValue( + Upstream::ClusterTimeoutBudgetStatsOptRef tb_stats = cluster()->timeoutBudgetStats(); + if (tb_stats.has_value()) { + tb_stats->get().upstream_rq_timeout_budget_percent_used_.recordValue( FilterUtility::percentageOfTimeout(response_time, timeout_.global_timeout_)); } @@ -1416,15 +1422,11 @@ bool Filter::setupRedirect(const Http::ResponseHeaderMap& headers, attempting_internal_redirect_with_complete_stream_ = upstream_request.upstreamTiming().last_upstream_rx_byte_received_ && downstream_end_stream_; - const StreamInfo::FilterStateSharedPtr& filter_state = callbacks_->streamInfo().filterState(); - // Redirects are not supported for streaming requests yet. if (downstream_end_stream_ && !callbacks_->decodingBuffer() && // Redirects with body not yet supported. location != nullptr && - convertRequestHeadersForInternalRedirect(*downstream_headers_, *filter_state, - route_entry_->maxInternalRedirects(), *location, - *callbacks_->connection()) && + convertRequestHeadersForInternalRedirect(*downstream_headers_, *location) && callbacks_->recreateStream()) { cluster_->stats().upstream_internal_redirect_succeeded_total_.inc(); return true; @@ -1437,6 +1439,96 @@ bool Filter::setupRedirect(const Http::ResponseHeaderMap& headers, return false; } +bool Filter::convertRequestHeadersForInternalRedirect(Http::RequestHeaderMap& downstream_headers, + const Http::HeaderEntry& internal_redirect) { + if (!downstream_headers.Path()) { + ENVOY_STREAM_LOG(trace, "no path in downstream_headers", *callbacks_); + return false; + } + + // Make sure the redirect response contains a URL to redirect to. + if (internal_redirect.value().getStringView().empty()) { + config_.stats_.passthrough_internal_redirect_bad_location_.inc(); + return false; + } + Http::Utility::Url absolute_url; + if (!absolute_url.initialize(internal_redirect.value().getStringView(), false)) { + config_.stats_.passthrough_internal_redirect_bad_location_.inc(); + return false; + } + + const auto& policy = route_entry_->internalRedirectPolicy(); + // Don't allow serving TLS responses over plaintext unless allowed by policy. + const bool scheme_is_http = schemeIsHttp(downstream_headers, *callbacks_->connection()); + const bool target_is_http = absolute_url.scheme() == Http::Headers::get().SchemeValues.Http; + if (!policy.isCrossSchemeRedirectAllowed() && scheme_is_http != target_is_http) { + config_.stats_.passthrough_internal_redirect_unsafe_scheme_.inc(); + return false; + } + + const StreamInfo::FilterStateSharedPtr& filter_state = callbacks_->streamInfo().filterState(); + // Make sure that performing the redirect won't result in exceeding the configured number of + // redirects allowed for this route. + if (!filter_state->hasData(NumInternalRedirectsFilterStateName)) { + filter_state->setData( + NumInternalRedirectsFilterStateName, std::make_shared(0), + StreamInfo::FilterState::StateType::Mutable, StreamInfo::FilterState::LifeSpan::Request); + } + StreamInfo::UInt32Accessor& num_internal_redirect = + filter_state->getDataMutable(NumInternalRedirectsFilterStateName); + + if (num_internal_redirect.value() >= policy.maxInternalRedirects()) { + config_.stats_.passthrough_internal_redirect_too_many_redirects_.inc(); + return false; + } + // Copy the old values, so they can be restored if the redirect fails. + const std::string original_host(downstream_headers.getHostValue()); + const std::string original_path(downstream_headers.getPathValue()); + const bool scheme_is_set = (downstream_headers.Scheme() != nullptr); + Cleanup restore_original_headers( + [&downstream_headers, original_host, original_path, scheme_is_set, scheme_is_http]() { + downstream_headers.setHost(original_host); + downstream_headers.setPath(original_path); + if (scheme_is_set) { + downstream_headers.setScheme(scheme_is_http ? Http::Headers::get().SchemeValues.Http + : Http::Headers::get().SchemeValues.Https); + } + }); + + // Replace the original host, scheme and path. + downstream_headers.setScheme(absolute_url.scheme()); + downstream_headers.setHost(absolute_url.hostAndPort()); + downstream_headers.setPath(absolute_url.pathAndQueryParams()); + + callbacks_->clearRouteCache(); + const auto route = callbacks_->route(); + // Don't allow a redirect to a non existing route. + if (!route) { + config_.stats_.passthrough_internal_redirect_no_route_.inc(); + return false; + } + + const auto& route_name = route->routeEntry()->routeName(); + for (const auto& predicate : policy.predicates()) { + if (!predicate->acceptTargetRoute(*filter_state, route_name, !scheme_is_http, + !target_is_http)) { + config_.stats_.passthrough_internal_redirect_predicate_.inc(); + ENVOY_STREAM_LOG(trace, "rejecting redirect targeting {}, by {} predicate", *callbacks_, + route_name, predicate->name()); + return false; + } + } + + num_internal_redirect.increment(); + restore_original_headers.cancel(); + // Preserve the original request URL for the second pass. + downstream_headers.setEnvoyOriginalUrl(absl::StrCat(scheme_is_http + ? Http::Headers::get().SchemeValues.Http + : Http::Headers::get().SchemeValues.Https, + "://", original_host, original_path)); + return true; +} + void Filter::doRetry() { ENVOY_STREAM_LOG(debug, "performing retry", *callbacks_); @@ -1444,26 +1536,22 @@ void Filter::doRetry() { attempt_count_++; ASSERT(pending_retries_ > 0); pending_retries_--; - UpstreamRequestPtr upstream_request; - Http::ConnectionPool::Instance* conn_pool = getHttpConnPool(); - if (conn_pool) { - upstream_request = - std::make_unique(*this, std::make_unique(*conn_pool)); - } - - if (!upstream_request) { + std::unique_ptr generic_conn_pool = createConnPool(); + if (!generic_conn_pool) { sendNoHealthyUpstreamResponse(); cleanup(); return; } + UpstreamRequestPtr upstream_request = + std::make_unique(*this, std::move(generic_conn_pool)); if (include_attempt_count_in_request_) { downstream_headers_->setEnvoyAttemptCount(attempt_count_); } UpstreamRequest* upstream_request_tmp = upstream_request.get(); - upstream_request->moveIntoList(std::move(upstream_request), upstream_requests_); + LinkedList::moveIntoList(std::move(upstream_request), upstream_requests_); upstream_requests_.front()->encodeHeaders(!callbacks_->decodingBuffer() && !downstream_trailers_ && downstream_end_stream_); // It's possible we got immediately reset which means the upstream request we just @@ -1490,7 +1578,7 @@ uint32_t Filter::numRequestsAwaitingHeaders() { RetryStatePtr ProdFilter::createRetryState(const RetryPolicy& policy, Http::RequestHeaderMap& request_headers, const Upstream::ClusterInfo& cluster, const VirtualCluster* vcluster, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, Event::Dispatcher& dispatcher, Upstream::ResourcePriority priority) { return RetryStateImpl::create(policy, request_headers, cluster, vcluster, runtime, random, dispatcher, priority); diff --git a/source/common/router/router.h b/source/common/router/router.h index 9f532d08fb5f6..65ef129ccf3ad 100644 --- a/source/common/router/router.h +++ b/source/common/router/router.h @@ -6,6 +6,7 @@ #include #include +#include "envoy/common/random_generator.h" #include "envoy/extensions/filters/http/router/v3/router.pb.h" #include "envoy/http/codec.h" #include "envoy/http/codes.h" @@ -41,6 +42,11 @@ namespace Router { */ // clang-format off #define ALL_ROUTER_STATS(COUNTER) \ + COUNTER(passthrough_internal_redirect_bad_location) \ + COUNTER(passthrough_internal_redirect_unsafe_scheme) \ + COUNTER(passthrough_internal_redirect_too_many_redirects) \ + COUNTER(passthrough_internal_redirect_no_route) \ + COUNTER(passthrough_internal_redirect_predicate) \ COUNTER(no_route) \ COUNTER(no_cluster) \ COUNTER(rq_redirect) \ @@ -175,7 +181,7 @@ class FilterConfig { public: FilterConfig(const std::string& stat_prefix, const LocalInfo::LocalInfo& local_info, Stats::Scope& scope, Upstream::ClusterManager& cm, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, ShadowWriterPtr&& shadow_writer, + Random::RandomGenerator& random, ShadowWriterPtr&& shadow_writer, bool emit_dynamic_stats, bool start_child_span, bool suppress_envoy_headers, bool respect_expected_rq_timeout, const Protobuf::RepeatedPtrField& strict_check_headers, @@ -220,7 +226,7 @@ class FilterConfig { const LocalInfo::LocalInfo& local_info_; Upstream::ClusterManager& cm_; Runtime::Loader& runtime_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; FilterStats stats_; const bool emit_dynamic_stats_; const bool start_child_span_; @@ -265,6 +271,7 @@ class RouterFilterInterface { UpstreamRequest& upstream_request) PURE; virtual void onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host) PURE; virtual void onPerTryTimeout(UpstreamRequest& upstream_request) PURE; + virtual void onStreamMaxDurationReached(UpstreamRequest& upstream_request) PURE; virtual Http::StreamDecoderFilterCallbacks* callbacks() PURE; virtual Upstream::ClusterInfoConstSharedPtr cluster() PURE; @@ -291,7 +298,8 @@ class Filter : Logger::Loggable, public RouterFilterInterface { public: Filter(FilterConfig& config) - : config_(config), final_upstream_request_(nullptr), downstream_response_started_(false), + : config_(config), final_upstream_request_(nullptr), + downstream_100_continue_headers_encoded_(false), downstream_response_started_(false), downstream_end_stream_(false), is_retry_(false), attempting_internal_redirect_with_complete_stream_(false) {} @@ -367,15 +375,17 @@ class Filter : Logger::Loggable, return retry_state_->shouldSelectAnotherHost(host); } - const Upstream::HealthyAndDegradedLoad& - determinePriorityLoad(const Upstream::PrioritySet& priority_set, - const Upstream::HealthyAndDegradedLoad& original_priority_load) override { + const Upstream::HealthyAndDegradedLoad& determinePriorityLoad( + const Upstream::PrioritySet& priority_set, + const Upstream::HealthyAndDegradedLoad& original_priority_load, + const Upstream::RetryPriority::PriorityMappingFunc& priority_mapping_func) override { // We only modify the priority load on retries. if (!is_retry_) { return original_priority_load; } - return retry_state_->priorityLoadForRetry(priority_set, original_priority_load); + return retry_state_->priorityLoadForRetry(priority_set, original_priority_load, + priority_mapping_func); } uint32_t hostSelectionRetryCount() const override { @@ -432,6 +442,7 @@ class Filter : Logger::Loggable, UpstreamRequest& upstream_request) override; void onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host) override; void onPerTryTimeout(UpstreamRequest& upstream_request) override; + void onStreamMaxDurationReached(UpstreamRequest& upstream_request) override; Http::StreamDecoderFilterCallbacks* callbacks() override { return callbacks_; } Upstream::ClusterInfoConstSharedPtr cluster() override { return cluster_; } FilterConfig& config() override { return config_; } @@ -465,9 +476,12 @@ class Filter : Logger::Loggable, virtual RetryStatePtr createRetryState(const RetryPolicy& policy, Http::RequestHeaderMap& request_headers, const Upstream::ClusterInfo& cluster, const VirtualCluster* vcluster, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, Event::Dispatcher& dispatcher, Upstream::ResourcePriority priority) PURE; - Http::ConnectionPool::Instance* getHttpConnPool(); + + std::unique_ptr createConnPool(); + UpstreamRequestPtr createUpstreamRequest(); + void maybeDoShadowing(); bool maybeRetryReset(Http::StreamResetReason reset_reason, UpstreamRequest& upstream_request); uint32_t numRequestsAwaitingHeaders(); @@ -492,6 +506,8 @@ class Filter : Logger::Loggable, void resetOtherUpstreams(UpstreamRequest& upstream_request); void sendNoHealthyUpstreamResponse(); bool setupRedirect(const Http::ResponseHeaderMap& headers, UpstreamRequest& upstream_request); + bool convertRequestHeadersForInternalRedirect(Http::RequestHeaderMap& downstream_headers, + const Http::HeaderEntry& internal_redirect); void updateOutlierDetection(Upstream::Outlier::Result result, UpstreamRequest& upstream_request, absl::optional code); void doRetry(); @@ -529,6 +545,7 @@ class Filter : Logger::Loggable, // list of cookies to add to upstream headers std::vector downstream_set_cookies_; + bool downstream_100_continue_headers_encoded_ : 1; bool downstream_response_started_ : 1; bool downstream_end_stream_ : 1; bool is_retry_ : 1; @@ -549,7 +566,7 @@ class ProdFilter : public Filter { RetryStatePtr createRetryState(const RetryPolicy& policy, Http::RequestHeaderMap& request_headers, const Upstream::ClusterInfo& cluster, const VirtualCluster* vcluster, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, + Random::RandomGenerator& random, Event::Dispatcher& dispatcher, Upstream::ResourcePriority priority) override; }; diff --git a/source/common/router/router_ratelimit.cc b/source/common/router/router_ratelimit.cc index 259834270e776..e4840d3417ed2 100644 --- a/source/common/router/router_ratelimit.cc +++ b/source/common/router/router_ratelimit.cc @@ -5,10 +5,12 @@ #include #include +#include "envoy/config/core/v3/base.pb.h" #include "envoy/config/route/v3/route_components.pb.h" #include "common/common/assert.h" #include "common/common/empty_string.h" +#include "common/config/metadata.h" #include "common/protobuf/utility.h" namespace Envoy { @@ -16,11 +18,37 @@ namespace Router { const uint64_t RateLimitPolicyImpl::MAX_STAGE_NUMBER = 10UL; +bool DynamicMetadataRateLimitOverride::populateOverride( + RateLimit::Descriptor& descriptor, const envoy::config::core::v3::Metadata* metadata) const { + const ProtobufWkt::Value& metadata_value = + Envoy::Config::Metadata::metadataValue(metadata, metadata_key_); + if (metadata_value.kind_case() != ProtobufWkt::Value::kStructValue) { + return false; + } + + const auto& override_value = metadata_value.struct_value().fields(); + const auto& limit_it = override_value.find("requests_per_unit"); + const auto& unit_it = override_value.find("unit"); + if (limit_it != override_value.end() && + limit_it->second.kind_case() == ProtobufWkt::Value::kNumberValue && + unit_it != override_value.end() && + unit_it->second.kind_case() == ProtobufWkt::Value::kStringValue) { + envoy::type::v3::RateLimitUnit unit; + if (envoy::type::v3::RateLimitUnit_Parse(unit_it->second.string_value(), &unit)) { + descriptor.limit_.emplace(RateLimit::RateLimitOverride{ + static_cast(limit_it->second.number_value()), unit}); + return true; + } + } + return false; +} + bool SourceClusterAction::populateDescriptor(const Router::RouteEntry&, RateLimit::Descriptor& descriptor, const std::string& local_service_cluster, const Http::HeaderMap&, - const Network::Address::Instance&) const { + const Network::Address::Instance&, + const envoy::config::core::v3::Metadata*) const { descriptor.entries_.push_back({"source_cluster", local_service_cluster}); return true; } @@ -28,7 +56,8 @@ bool SourceClusterAction::populateDescriptor(const Router::RouteEntry&, bool DestinationClusterAction::populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor, const std::string&, const Http::HeaderMap&, - const Network::Address::Instance&) const { + const Network::Address::Instance&, + const envoy::config::core::v3::Metadata*) const { descriptor.entries_.push_back({"destination_cluster", route.clusterName()}); return true; } @@ -36,20 +65,26 @@ bool DestinationClusterAction::populateDescriptor(const Router::RouteEntry& rout bool RequestHeadersAction::populateDescriptor(const Router::RouteEntry&, RateLimit::Descriptor& descriptor, const std::string&, const Http::HeaderMap& headers, - const Network::Address::Instance&) const { + const Network::Address::Instance&, + const envoy::config::core::v3::Metadata*) const { const Http::HeaderEntry* header_value = headers.get(header_name_); + + // If header is not present in the request and if skip_if_absent is true skip this descriptor, + // while calling rate limiting service. If skip_if_absent is false, do not call rate limiting + // service. if (!header_value) { - return false; + return skip_if_absent_; } - descriptor.entries_.push_back( {descriptor_key_, std::string(header_value->value().getStringView())}); return true; } -bool RemoteAddressAction::populateDescriptor( - const Router::RouteEntry&, RateLimit::Descriptor& descriptor, const std::string&, - const Http::HeaderMap&, const Network::Address::Instance& remote_address) const { +bool RemoteAddressAction::populateDescriptor(const Router::RouteEntry&, + RateLimit::Descriptor& descriptor, const std::string&, + const Http::HeaderMap&, + const Network::Address::Instance& remote_address, + const envoy::config::core::v3::Metadata*) const { if (remote_address.type() != Network::Address::Type::Ip) { return false; } @@ -60,12 +95,35 @@ bool RemoteAddressAction::populateDescriptor( bool GenericKeyAction::populateDescriptor(const Router::RouteEntry&, RateLimit::Descriptor& descriptor, const std::string&, - const Http::HeaderMap&, - const Network::Address::Instance&) const { + const Http::HeaderMap&, const Network::Address::Instance&, + const envoy::config::core::v3::Metadata*) const { descriptor.entries_.push_back({"generic_key", descriptor_value_}); return true; } +DynamicMetaDataAction::DynamicMetaDataAction( + const envoy::config::route::v3::RateLimit::Action::DynamicMetaData& action) + : metadata_key_(action.metadata_key()), descriptor_key_(action.descriptor_key()), + default_value_(action.default_value()) {} + +bool DynamicMetaDataAction::populateDescriptor( + const Router::RouteEntry&, RateLimit::Descriptor& descriptor, const std::string&, + const Http::HeaderMap&, const Network::Address::Instance&, + const envoy::config::core::v3::Metadata* dynamic_metadata) const { + const ProtobufWkt::Value& metadata_value = + Envoy::Config::Metadata::metadataValue(dynamic_metadata, metadata_key_); + + if (!metadata_value.string_value().empty()) { + descriptor.entries_.push_back({descriptor_key_, metadata_value.string_value()}); + return true; + } else if (metadata_value.string_value().empty() && !default_value_.empty()) { + descriptor.entries_.push_back({descriptor_key_, default_value_}); + return true; + } + + return false; +} + HeaderValueMatchAction::HeaderValueMatchAction( const envoy::config::route::v3::RateLimit::Action::HeaderValueMatch& action) : descriptor_value_(action.descriptor_value()), @@ -75,7 +133,8 @@ HeaderValueMatchAction::HeaderValueMatchAction( bool HeaderValueMatchAction::populateDescriptor(const Router::RouteEntry&, RateLimit::Descriptor& descriptor, const std::string&, const Http::HeaderMap& headers, - const Network::Address::Instance&) const { + const Network::Address::Instance&, + const envoy::config::core::v3::Metadata*) const { if (expect_match_ == Http::HeaderUtility::matchHeaders(headers, action_headers_)) { descriptor.entries_.push_back({"header_match", descriptor_value_}); return true; @@ -105,6 +164,9 @@ RateLimitPolicyEntryImpl::RateLimitPolicyEntryImpl( case envoy::config::route::v3::RateLimit::Action::ActionSpecifierCase::kGenericKey: actions_.emplace_back(new GenericKeyAction(action.generic_key())); break; + case envoy::config::route::v3::RateLimit::Action::ActionSpecifierCase::kDynamicMetadata: + actions_.emplace_back(new DynamicMetaDataAction(action.dynamic_metadata())); + break; case envoy::config::route::v3::RateLimit::Action::ActionSpecifierCase::kHeaderValueMatch: actions_.emplace_back(new HeaderValueMatchAction(action.header_value_match())); break; @@ -112,22 +174,37 @@ RateLimitPolicyEntryImpl::RateLimitPolicyEntryImpl( NOT_REACHED_GCOVR_EXCL_LINE; } } + if (config.has_limit()) { + switch (config.limit().override_specifier_case()) { + case envoy::config::route::v3::RateLimit_Override::OverrideSpecifierCase::kDynamicMetadata: + limit_override_.emplace( + new DynamicMetadataRateLimitOverride(config.limit().dynamic_metadata())); + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + } } void RateLimitPolicyEntryImpl::populateDescriptors( const Router::RouteEntry& route, std::vector& descriptors, const std::string& local_service_cluster, const Http::HeaderMap& headers, - const Network::Address::Instance& remote_address) const { + const Network::Address::Instance& remote_address, + const envoy::config::core::v3::Metadata* dynamic_metadata) const { RateLimit::Descriptor descriptor; bool result = true; for (const RateLimitActionPtr& action : actions_) { result = result && action->populateDescriptor(route, descriptor, local_service_cluster, headers, - remote_address); + remote_address, dynamic_metadata); if (!result) { break; } } + if (limit_override_) { + limit_override_.value()->populateOverride(descriptor, dynamic_metadata); + } + if (result) { descriptors.emplace_back(descriptor); } diff --git a/source/common/router/router_ratelimit.h b/source/common/router/router_ratelimit.h index 0d7826f67be99..02af468b9f983 100644 --- a/source/common/router/router_ratelimit.h +++ b/source/common/router/router_ratelimit.h @@ -5,15 +5,36 @@ #include #include +#include "envoy/config/core/v3/base.pb.h" #include "envoy/config/route/v3/route_components.pb.h" #include "envoy/router/router.h" #include "envoy/router/router_ratelimit.h" +#include "common/config/metadata.h" #include "common/http/header_utility.h" +#include "absl/types/optional.h" + namespace Envoy { namespace Router { +/** + * Populate rate limit override from dynamic metadata. + */ +class DynamicMetadataRateLimitOverride : public RateLimitOverrideAction { +public: + DynamicMetadataRateLimitOverride( + const envoy::config::route::v3::RateLimit::Override::DynamicMetadata& config) + : metadata_key_(config.metadata_key()) {} + + // Router::RateLimitOverrideAction + bool populateOverride(RateLimit::Descriptor& descriptor, + const envoy::config::core::v3::Metadata* metadata) const override; + +private: + const Envoy::Config::MetadataKey metadata_key_; +}; + /** * Action for source cluster rate limiting. */ @@ -22,7 +43,8 @@ class SourceClusterAction : public RateLimitAction { // Router::RateLimitAction bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor, const std::string& local_service_cluster, const Http::HeaderMap& headers, - const Network::Address::Instance& remote_address) const override; + const Network::Address::Instance& remote_address, + const envoy::config::core::v3::Metadata* dynamic_metadata) const override; }; /** @@ -33,7 +55,8 @@ class DestinationClusterAction : public RateLimitAction { // Router::RateLimitAction bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor, const std::string& local_service_cluster, const Http::HeaderMap& headers, - const Network::Address::Instance& remote_address) const override; + const Network::Address::Instance& remote_address, + const envoy::config::core::v3::Metadata* dynamic_metadata) const override; }; /** @@ -42,16 +65,19 @@ class DestinationClusterAction : public RateLimitAction { class RequestHeadersAction : public RateLimitAction { public: RequestHeadersAction(const envoy::config::route::v3::RateLimit::Action::RequestHeaders& action) - : header_name_(action.header_name()), descriptor_key_(action.descriptor_key()) {} + : header_name_(action.header_name()), descriptor_key_(action.descriptor_key()), + skip_if_absent_(action.skip_if_absent()) {} // Router::RateLimitAction bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor, const std::string& local_service_cluster, const Http::HeaderMap& headers, - const Network::Address::Instance& remote_address) const override; + const Network::Address::Instance& remote_address, + const envoy::config::core::v3::Metadata* dynamic_metadata) const override; private: const Http::LowerCaseString header_name_; const std::string descriptor_key_; + const bool skip_if_absent_; }; /** @@ -62,7 +88,8 @@ class RemoteAddressAction : public RateLimitAction { // Router::RateLimitAction bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor, const std::string& local_service_cluster, const Http::HeaderMap& headers, - const Network::Address::Instance& remote_address) const override; + const Network::Address::Instance& remote_address, + const envoy::config::core::v3::Metadata* dynamic_metadata) const override; }; /** @@ -76,12 +103,31 @@ class GenericKeyAction : public RateLimitAction { // Router::RateLimitAction bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor, const std::string& local_service_cluster, const Http::HeaderMap& headers, - const Network::Address::Instance& remote_address) const override; + const Network::Address::Instance& remote_address, + const envoy::config::core::v3::Metadata* dynamic_metadata) const override; private: const std::string descriptor_value_; }; +/** + * Action for dynamic metadata rate limiting. + */ +class DynamicMetaDataAction : public RateLimitAction { +public: + DynamicMetaDataAction(const envoy::config::route::v3::RateLimit::Action::DynamicMetaData& action); + // Router::RateLimitAction + bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor, + const std::string& local_service_cluster, const Http::HeaderMap& headers, + const Network::Address::Instance& remote_address, + const envoy::config::core::v3::Metadata* dynamic_metadata) const override; + +private: + const Envoy::Config::MetadataKey metadata_key_; + const std::string descriptor_key_; + const std::string default_value_; +}; + /** * Action for header value match rate limiting. */ @@ -93,7 +139,8 @@ class HeaderValueMatchAction : public RateLimitAction { // Router::RateLimitAction bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor, const std::string& local_service_cluster, const Http::HeaderMap& headers, - const Network::Address::Instance& remote_address) const override; + const Network::Address::Instance& remote_address, + const envoy::config::core::v3::Metadata* dynamic_metadata) const override; private: const std::string descriptor_value_; @@ -111,15 +158,18 @@ class RateLimitPolicyEntryImpl : public RateLimitPolicyEntry { // Router::RateLimitPolicyEntry uint64_t stage() const override { return stage_; } const std::string& disableKey() const override { return disable_key_; } - void populateDescriptors(const Router::RouteEntry& route, - std::vector& descriptors, - const std::string& local_service_cluster, const Http::HeaderMap&, - const Network::Address::Instance& remote_address) const override; + void + populateDescriptors(const Router::RouteEntry& route, + std::vector& descriptors, + const std::string& local_service_cluster, const Http::HeaderMap&, + const Network::Address::Instance& remote_address, + const envoy::config::core::v3::Metadata* dynamic_metadata) const override; private: const std::string disable_key_; uint64_t stage_; std::vector actions_; + absl::optional limit_override_ = absl::nullopt; }; /** diff --git a/source/common/router/scoped_config_impl.cc b/source/common/router/scoped_config_impl.cc index 6a5d6ae2934cc..8cef0a7e4a306 100644 --- a/source/common/router/scoped_config_impl.cc +++ b/source/common/router/scoped_config_impl.cc @@ -103,8 +103,7 @@ ScopeKeyBuilderImpl::ScopeKeyBuilderImpl(ScopedRoutes::ScopeKeyBuilder&& config) } } -std::unique_ptr -ScopeKeyBuilderImpl::computeScopeKey(const Http::HeaderMap& headers) const { +ScopeKeyPtr ScopeKeyBuilderImpl::computeScopeKey(const Http::HeaderMap& headers) const { ScopeKey key; for (const auto& builder : fragment_builders_) { // returns nullopt if a null fragment is found. @@ -117,29 +116,33 @@ ScopeKeyBuilderImpl::computeScopeKey(const Http::HeaderMap& headers) const { return std::make_unique(std::move(key)); } -void ScopedConfigImpl::addOrUpdateRoutingScope( - const ScopedRouteInfoConstSharedPtr& scoped_route_info) { - const auto iter = scoped_route_info_by_name_.find(scoped_route_info->scopeName()); - if (iter != scoped_route_info_by_name_.end()) { - ASSERT(scoped_route_info_by_key_.contains(iter->second->scopeKey().hash())); - scoped_route_info_by_key_.erase(iter->second->scopeKey().hash()); +void ScopedConfigImpl::addOrUpdateRoutingScopes( + const std::vector& scoped_route_infos) { + for (auto& scoped_route_info : scoped_route_infos) { + const auto iter = scoped_route_info_by_name_.find(scoped_route_info->scopeName()); + if (iter != scoped_route_info_by_name_.end()) { + ASSERT(scoped_route_info_by_key_.contains(iter->second->scopeKey().hash())); + scoped_route_info_by_key_.erase(iter->second->scopeKey().hash()); + } + scoped_route_info_by_name_[scoped_route_info->scopeName()] = scoped_route_info; + scoped_route_info_by_key_[scoped_route_info->scopeKey().hash()] = scoped_route_info; } - scoped_route_info_by_name_[scoped_route_info->scopeName()] = scoped_route_info; - scoped_route_info_by_key_[scoped_route_info->scopeKey().hash()] = scoped_route_info; } -void ScopedConfigImpl::removeRoutingScope(const std::string& scope_name) { - const auto iter = scoped_route_info_by_name_.find(scope_name); - if (iter != scoped_route_info_by_name_.end()) { - ASSERT(scoped_route_info_by_key_.contains(iter->second->scopeKey().hash())); - scoped_route_info_by_key_.erase(iter->second->scopeKey().hash()); - scoped_route_info_by_name_.erase(iter); +void ScopedConfigImpl::removeRoutingScopes(const std::vector& scope_names) { + for (std::string const& scope_name : scope_names) { + const auto iter = scoped_route_info_by_name_.find(scope_name); + if (iter != scoped_route_info_by_name_.end()) { + ASSERT(scoped_route_info_by_key_.contains(iter->second->scopeKey().hash())); + scoped_route_info_by_key_.erase(iter->second->scopeKey().hash()); + scoped_route_info_by_name_.erase(iter); + } } } Router::ConfigConstSharedPtr ScopedConfigImpl::getRouteConfig(const Http::HeaderMap& headers) const { - std::unique_ptr scope_key = scope_key_builder_.computeScopeKey(headers); + ScopeKeyPtr scope_key = scope_key_builder_.computeScopeKey(headers); if (scope_key == nullptr) { return nullptr; } diff --git a/source/common/router/scoped_config_impl.h b/source/common/router/scoped_config_impl.h index 1879fb33a87de..5a1703caf82c9 100644 --- a/source/common/router/scoped_config_impl.h +++ b/source/common/router/scoped_config_impl.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include "envoy/config/route/v3/scoped_route.pb.h" @@ -77,6 +78,8 @@ class ScopeKey { std::vector> fragments_; }; +using ScopeKeyPtr = std::unique_ptr; + // String fragment. class StringKeyFragment : public ScopeKeyFragmentBase { public: @@ -130,7 +133,7 @@ class ScopeKeyBuilderBase { virtual ~ScopeKeyBuilderBase() = default; // Computes scope key for given headers, returns nullptr if a key can't be computed. - virtual std::unique_ptr computeScopeKey(const Http::HeaderMap& headers) const PURE; + virtual ScopeKeyPtr computeScopeKey(const Http::HeaderMap& headers) const PURE; protected: const ScopedRoutes::ScopeKeyBuilder config_; @@ -140,7 +143,7 @@ class ScopeKeyBuilderImpl : public ScopeKeyBuilderBase { public: explicit ScopeKeyBuilderImpl(ScopedRoutes::ScopeKeyBuilder&& config); - std::unique_ptr computeScopeKey(const Http::HeaderMap& headers) const override; + ScopeKeyPtr computeScopeKey(const Http::HeaderMap& headers) const override; private: std::vector> fragment_builders_; @@ -180,8 +183,10 @@ class ScopedConfigImpl : public ScopedConfig { ScopedConfigImpl(ScopedRoutes::ScopeKeyBuilder&& scope_key_builder) : scope_key_builder_(std::move(scope_key_builder)) {} - void addOrUpdateRoutingScope(const ScopedRouteInfoConstSharedPtr& scoped_route_info); - void removeRoutingScope(const std::string& scope_name); + void + addOrUpdateRoutingScopes(const std::vector& scoped_route_infos); + + void removeRoutingScopes(const std::vector& scope_names); // Envoy::Router::ScopedConfig Router::ConfigConstSharedPtr getRouteConfig(const Http::HeaderMap& headers) const override; diff --git a/source/common/router/scoped_rds.cc b/source/common/router/scoped_rds.cc index c900d0ead013a..c7e4d3db44fff 100644 --- a/source/common/router/scoped_rds.cc +++ b/source/common/router/scoped_rds.cc @@ -6,7 +6,6 @@ #include "envoy/api/v2/scoped_route.pb.h" #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/config/route/v3/scoped_route.pb.h" -#include "envoy/config/route/v3/scoped_route.pb.validate.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/service/discovery/v3/discovery.pb.h" @@ -20,6 +19,7 @@ #include "common/init/manager_impl.h" #include "common/init/watcher_impl.h" #include "common/router/rds_impl.h" +#include "common/router/scoped_config_impl.h" #include "absl/strings/str_join.h" @@ -29,6 +29,7 @@ using Envoy::Config::ConfigProvider; using Envoy::Config::ConfigProviderInstanceType; using Envoy::Config::ConfigProviderManager; using Envoy::Config::ConfigProviderPtr; +using Envoy::Config::ScopedResume; namespace Envoy { namespace Router { @@ -100,18 +101,18 @@ ScopedRdsConfigSubscription::ScopedRdsConfigSubscription( : DeltaConfigSubscriptionInstance("SRDS", manager_identifier, config_provider_manager, factory_context), Envoy::Config::SubscriptionBase( - rds_config_source.resource_api_version()), + rds_config_source.resource_api_version(), + factory_context.messageValidationContext().dynamicValidationVisitor(), "name"), factory_context_(factory_context), name_(name), scope_key_builder_(scope_key_builder), scope_(factory_context.scope().createScope(stat_prefix + "scoped_rds." + name + ".")), stats_({ALL_SCOPED_RDS_STATS(POOL_COUNTER(*scope_))}), - rds_config_source_(std::move(rds_config_source)), - validation_visitor_(factory_context.messageValidationContext().dynamicValidationVisitor()), - stat_prefix_(stat_prefix), route_config_provider_manager_(route_config_provider_manager) { + rds_config_source_(std::move(rds_config_source)), stat_prefix_(stat_prefix), + route_config_provider_manager_(route_config_provider_manager) { const auto resource_name = getResourceName(); subscription_ = factory_context.clusterManager().subscriptionFactory().subscriptionFromConfigSource( scoped_rds.scoped_rds_config_source(), Grpc::Common::typeUrl(resource_name), *scope_, - *this); + *this, resource_decoder_); initialize([scope_key_builder]() -> Envoy::Config::ConfigProvider::ConfigConstSharedPtr { return std::make_shared( @@ -135,67 +136,52 @@ ScopedRdsConfigSubscription::RdsRouteConfigProviderHelper::RdsRouteConfigProvide })) {} bool ScopedRdsConfigSubscription::addOrUpdateScopes( - const Protobuf::RepeatedPtrField& resources, - Init::Manager& init_manager, const std::string& version_info, - std::vector& exception_msgs) { + const std::vector& resources, Init::Manager& init_manager, + const std::string& version_info) { bool any_applied = false; envoy::extensions::filters::network::http_connection_manager::v3::Rds rds; rds.mutable_config_source()->MergeFrom(rds_config_source_); - absl::flat_hash_set unique_resource_names; + std::vector updated_scopes; for (const auto& resource : resources) { - envoy::config::route::v3::ScopedRouteConfiguration scoped_route_config; - try { - scoped_route_config = - MessageUtil::anyConvertAndValidate( - resource.resource(), validation_visitor_); - const std::string scope_name = scoped_route_config.name(); - if (!unique_resource_names.insert(scope_name).second) { - throw EnvoyException( - fmt::format("duplicate scoped route configuration '{}' found", scope_name)); - } - // TODO(stevenzzz): Creating a new RdsRouteConfigProvider likely expensive, migrate RDS to - // config-provider-framework to make it light weight. - rds.set_route_config_name(scoped_route_config.route_configuration_name()); - auto rds_config_provider_helper = - std::make_unique(*this, scope_name, rds, init_manager); - auto scoped_route_info = std::make_shared( - std::move(scoped_route_config), rds_config_provider_helper->routeConfig()); - // Detect if there is key conflict between two scopes, in which case Envoy won't be able to - // tell which RouteConfiguration to use. Reject the second scope in the delta form API. - auto iter = scope_name_by_hash_.find(scoped_route_info->scopeKey().hash()); - if (iter != scope_name_by_hash_.end()) { - if (iter->second != scoped_route_info->scopeName()) { - throw EnvoyException( - fmt::format("scope key conflict found, first scope is '{}', second scope is '{}'", - iter->second, scoped_route_info->scopeName())); - } - } - // NOTE: delete previous route provider if any. - route_provider_by_scope_.insert({scope_name, std::move(rds_config_provider_helper)}); - scope_name_by_hash_[scoped_route_info->scopeKey().hash()] = scoped_route_info->scopeName(); - scoped_route_map_[scoped_route_info->scopeName()] = scoped_route_info; - applyConfigUpdate([scoped_route_info](ConfigProvider::ConfigConstSharedPtr config) - -> ConfigProvider::ConfigConstSharedPtr { - auto* thread_local_scoped_config = - const_cast(static_cast(config.get())); - thread_local_scoped_config->addOrUpdateRoutingScope(scoped_route_info); - return config; - }); - any_applied = true; - ENVOY_LOG(debug, "srds: add/update scoped_route '{}', version: {}", - scoped_route_info->scopeName(), version_info); - } catch (const EnvoyException& e) { - exception_msgs.emplace_back(absl::StrCat("", e.what())); - } + // Explicit copy so that we can std::move later. + envoy::config::route::v3::ScopedRouteConfiguration scoped_route_config = + dynamic_cast( + resource.get().resource()); + const std::string scope_name = scoped_route_config.name(); + // TODO(stevenzzz): Creating a new RdsRouteConfigProvider likely expensive, migrate RDS to + // config-provider-framework to make it light weight. + rds.set_route_config_name(scoped_route_config.route_configuration_name()); + auto rds_config_provider_helper = + std::make_unique(*this, scope_name, rds, init_manager); + auto scoped_route_info = std::make_shared( + std::move(scoped_route_config), rds_config_provider_helper->routeConfig()); + route_provider_by_scope_.insert({scope_name, std::move(rds_config_provider_helper)}); + scope_name_by_hash_[scoped_route_info->scopeKey().hash()] = scoped_route_info->scopeName(); + scoped_route_map_[scoped_route_info->scopeName()] = scoped_route_info; + updated_scopes.push_back(scoped_route_info); + any_applied = true; + ENVOY_LOG(debug, "srds: queueing add/update of scoped_route '{}', version: {}", + scoped_route_info->scopeName(), version_info); + } + + if (!updated_scopes.empty()) { + applyConfigUpdate([updated_scopes](ConfigProvider::ConfigConstSharedPtr config) + -> ConfigProvider::ConfigConstSharedPtr { + auto* thread_local_scoped_config = + const_cast(static_cast(config.get())); + thread_local_scoped_config->addOrUpdateRoutingScopes(updated_scopes); + return config; + }); } return any_applied; } -std::list> +std::list ScopedRdsConfigSubscription::removeScopes( const Protobuf::RepeatedPtrField& scope_names, const std::string& version_info) { - std::list> + std::list to_be_removed_rds_providers; + std::vector removed_scope_names; for (const auto& scope_name : scope_names) { auto iter = scoped_route_map_.find(scope_name); if (iter != scoped_route_map_.end()) { @@ -207,81 +193,88 @@ ScopedRdsConfigSubscription::removeScopes( } scope_name_by_hash_.erase(iter->second->scopeKey().hash()); scoped_route_map_.erase(iter); - applyConfigUpdate([scope_name](ConfigProvider::ConfigConstSharedPtr config) - -> ConfigProvider::ConfigConstSharedPtr { - auto* thread_local_scoped_config = - const_cast(static_cast(config.get())); - thread_local_scoped_config->removeRoutingScope(scope_name); - return config; - }); - ENVOY_LOG(debug, "srds: remove scoped route '{}', version: {}", scope_name, version_info); + removed_scope_names.push_back(scope_name); + ENVOY_LOG(debug, "srds: queueing removal of scoped route '{}', version: {}", scope_name, + version_info); } } + if (!removed_scope_names.empty()) { + applyConfigUpdate([removed_scope_names](ConfigProvider::ConfigConstSharedPtr config) + -> ConfigProvider::ConfigConstSharedPtr { + auto* thread_local_scoped_config = + const_cast(static_cast(config.get())); + thread_local_scoped_config->removeRoutingScopes(removed_scope_names); + return config; + }); + } return to_be_removed_rds_providers; } void ScopedRdsConfigSubscription::onConfigUpdate( - const Protobuf::RepeatedPtrField& added_resources, + const std::vector& added_resources, const Protobuf::RepeatedPtrField& removed_resources, const std::string& version_info) { // NOTE: deletes are done before adds/updates. - absl::flat_hash_map to_be_removed_scopes; + // Destruction of resume_rds will lift the floodgate for new RDS subscriptions. + // Note in the case of partial acceptance, accepted RDS subscriptions should be started + // despite of any error. + ScopedResume resume_rds; // If new route config sources come after the local init manager's initialize() been // called, the init manager can't accept new targets. Instead we use a local override which will // start new subscriptions but not wait on them to be ready. - std::unique_ptr noop_init_manager; - // NOTE: This should be defined after noop_init_manager as it depends on the - // noop_init_manager. - std::unique_ptr resume_rds; + std::unique_ptr srds_init_mgr; + // NOTE: This should be defined after srds_init_mgr and resume_rds, as it depends on the + // srds_init_mgr, and we want a single RDS discovery request to be sent to management + // server. + std::unique_ptr srds_initialization_continuation; + ASSERT(localInitManager().state() > Init::Manager::State::Uninitialized); + const auto type_urls = + Envoy::Config::getAllVersionTypeUrls(); + // Pause RDS to not send a burst of RDS requests until we start all the new subscriptions. + // In the case that localInitManager is uninitialized, RDS is already paused + // either by Server init or LDS init. + if (factory_context_.clusterManager().adsMux()) { + resume_rds = factory_context_.clusterManager().adsMux()->pause(type_urls); + } // if local init manager is initialized, the parent init manager may have gone away. if (localInitManager().state() == Init::Manager::State::Initialized) { - const auto type_url = Envoy::Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V2); - noop_init_manager = + srds_init_mgr = std::make_unique(fmt::format("SRDS {}:{}", name_, version_info)); - // Pause RDS to not send a burst of RDS requests until we start all the new subscriptions. - // In the case if factory_context_.init_manager() is uninitialized, RDS is already paused - // either by Server init or LDS init. - if (factory_context_.clusterManager().adsMux()) { - factory_context_.clusterManager().adsMux()->pause(type_url); - } - resume_rds = std::make_unique([this, &noop_init_manager, version_info, type_url] { - // For new RDS subscriptions created after listener warming up, we don't wait for them to - // warm up. - Init::WatcherImpl noop_watcher( - // Note: we just throw it away. - fmt::format("SRDS ConfigUpdate watcher {}:{}", name_, version_info), - []() { /*Do nothing.*/ }); - noop_init_manager->initialize(noop_watcher); - // New RDS subscriptions should have been created, now lift the floodgate. - // Note in the case of partial acceptance, accepted RDS subscriptions should be started - // despite of any error. - if (factory_context_.clusterManager().adsMux()) { - factory_context_.clusterManager().adsMux()->resume(type_url); - } - }); + srds_initialization_continuation = + std::make_unique([this, &srds_init_mgr, version_info] { + // For new RDS subscriptions created after listener warming up, we don't wait for them to + // warm up. + Init::WatcherImpl noop_watcher( + // Note: we just throw it away. + fmt::format("SRDS ConfigUpdate watcher {}:{}", name_, version_info), + []() { /*Do nothing.*/ }); + srds_init_mgr->initialize(noop_watcher); + }); + } + + std::string exception_msg; + Protobuf::RepeatedPtrField clean_removed_resources = + detectUpdateConflictAndCleanupRemoved(added_resources, removed_resources, exception_msg); + if (!exception_msg.empty()) { + throw EnvoyException(fmt::format("Error adding/updating scoped route(s): {}", exception_msg)); } - std::vector exception_msgs; // Do not delete RDS config providers just yet, in case the to be deleted RDS subscriptions could // be reused by some to be added scopes. - std::list> - to_be_removed_rds_providers = removeScopes(removed_resources, version_info); + std::list + to_be_removed_rds_providers = removeScopes(clean_removed_resources, version_info); + bool any_applied = addOrUpdateScopes(added_resources, - (noop_init_manager == nullptr ? localInitManager() : *noop_init_manager), - version_info, exception_msgs) || + (srds_init_mgr == nullptr ? localInitManager() : *srds_init_mgr), + version_info) || !to_be_removed_rds_providers.empty(); ConfigSubscriptionCommonBase::onConfigUpdate(); if (any_applied) { setLastConfigInfo(absl::optional({absl::nullopt, version_info})); } stats_.config_reload_.inc(); - if (!exception_msgs.empty()) { - throw EnvoyException(fmt::format("Error adding/updating scoped route(s): {}", - absl::StrJoin(exception_msgs, ", "))); - } } void ScopedRdsConfigSubscription::onRdsConfigUpdate(const std::string& scope_name, @@ -298,7 +291,7 @@ void ScopedRdsConfigSubscription::onRdsConfigUpdate(const std::string& scope_nam -> ConfigProvider::ConfigConstSharedPtr { auto* thread_local_scoped_config = const_cast(static_cast(config.get())); - thread_local_scoped_config->addOrUpdateRoutingScope(new_scoped_route_info); + thread_local_scoped_config->addOrUpdateRoutingScopes({new_scoped_route_info}); return config; }); } @@ -306,47 +299,68 @@ void ScopedRdsConfigSubscription::onRdsConfigUpdate(const std::string& scope_nam // TODO(stevenzzzz): see issue #7508, consider generalizing this function as it overlaps with // CdsApiImpl::onConfigUpdate. void ScopedRdsConfigSubscription::onConfigUpdate( - const Protobuf::RepeatedPtrField& resources, + const std::vector& resources, const std::string& version_info) { + Protobuf::RepeatedPtrField to_remove_repeated; + for (const auto& scoped_route : scoped_route_map_) { + *to_remove_repeated.Add() = scoped_route.first; + } + onConfigUpdate(resources, to_remove_repeated, version_info); +} + +Protobuf::RepeatedPtrField +ScopedRdsConfigSubscription::detectUpdateConflictAndCleanupRemoved( + const std::vector& resources, + const Protobuf::RepeatedPtrField& removed_resources, std::string& exception_msg) { + Protobuf::RepeatedPtrField clean_removed_resources; + // All the scope names to be removed or updated. + absl::flat_hash_set updated_or_removed_scopes; + for (const std::string& removed_resource : removed_resources) { + updated_or_removed_scopes.insert(removed_resource); + } + for (const auto& resource : resources) { + const auto& scoped_route = + dynamic_cast( + resource.get().resource()); + updated_or_removed_scopes.insert(scoped_route.name()); + } + + absl::flat_hash_map scope_name_by_hash = scope_name_by_hash_; + absl::erase_if(scope_name_by_hash, [&updated_or_removed_scopes](const auto& key_name) { + auto const& [key, name] = key_name; + return updated_or_removed_scopes.contains(name); + }); absl::flat_hash_map scoped_routes; - absl::flat_hash_map scope_name_by_key_hash; - for (const auto& resource_any : resources) { + for (const auto& resource : resources) { // Throws (thus rejects all) on any error. - auto scoped_route = - MessageUtil::anyConvertAndValidate( - resource_any, validation_visitor_); - const std::string scope_name = scoped_route.name(); + const auto& scoped_route = + dynamic_cast( + resource.get().resource()); + const std::string& scope_name = scoped_route.name(); auto scope_config_inserted = scoped_routes.try_emplace(scope_name, std::move(scoped_route)); if (!scope_config_inserted.second) { - throw EnvoyException( - fmt::format("duplicate scoped route configuration '{}' found", scope_name)); + exception_msg = fmt::format("duplicate scoped route configuration '{}' found", scope_name); + return clean_removed_resources; } const envoy::config::route::v3::ScopedRouteConfiguration& scoped_route_config = scope_config_inserted.first->second; const uint64_t key_fingerprint = MessageUtil::hash(scoped_route_config.key()); - if (!scope_name_by_key_hash.try_emplace(key_fingerprint, scope_name).second) { - throw EnvoyException( + if (!scope_name_by_hash.try_emplace(key_fingerprint, scope_name).second) { + exception_msg = fmt::format("scope key conflict found, first scope is '{}', second scope is '{}'", - scope_name_by_key_hash[key_fingerprint], scope_name)); + scope_name_by_hash[key_fingerprint], scope_name); + return clean_removed_resources; } } - ScopedRouteMap scoped_routes_to_remove = scoped_route_map_; - Protobuf::RepeatedPtrField to_add_repeated; - Protobuf::RepeatedPtrField to_remove_repeated; - for (auto& iter : scoped_routes) { - const std::string& scope_name = iter.first; - scoped_routes_to_remove.erase(scope_name); - auto* to_add = to_add_repeated.Add(); - to_add->set_name(scope_name); - to_add->set_version(version_info); - to_add->mutable_resource()->PackFrom(iter.second); - } - for (const auto& scoped_route : scoped_routes_to_remove) { - *to_remove_repeated.Add() = scoped_route.first; + // only remove resources that is not going to be updated. + for (const std::string& removed_resource : removed_resources) { + if (!scoped_routes.contains(removed_resource)) { + *clean_removed_resources.Add() = removed_resource; + } } - onConfigUpdate(to_add_repeated, to_remove_repeated, version_info); + return clean_removed_resources; } ScopedRdsConfigProvider::ScopedRdsConfigProvider( @@ -412,7 +426,7 @@ ConfigProviderPtr ScopedRoutesConfigProviderManager::createXdsConfigProvider( typed_optarg.scope_key_builder_, factory_context, stat_prefix, typed_optarg.rds_config_source_, static_cast(config_provider_manager) - .route_config_provider_manager(), + .routeConfigProviderPanager(), static_cast(config_provider_manager)); }); diff --git a/source/common/router/scoped_rds.h b/source/common/router/scoped_rds.h index befa51a21dc22..b00ab1a4ef8ab 100644 --- a/source/common/router/scoped_rds.h +++ b/source/common/router/scoped_rds.h @@ -1,10 +1,12 @@ #pragma once +#include #include #include "envoy/common/callback.h" #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/config/route/v3/scoped_route.pb.h" +#include "envoy/config/route/v3/scoped_route.pb.validate.h" #include "envoy/config/subscription.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/router/route_config_provider_manager.h" @@ -122,50 +124,51 @@ class ScopedRdsConfigSubscription ScopedRdsConfigSubscription& parent_; std::string scope_name_; - std::shared_ptr route_provider_; + RdsRouteConfigProviderImplSharedPtr route_provider_; // This handle_ is owned by the route config provider's RDS subscription, when the helper // destructs, the handle is deleted as well. Common::CallbackHandle* rds_update_callback_handle_; }; + using RdsRouteConfigProviderHelperPtr = std::unique_ptr; + // Adds or updates scopes, create a new RDS provider for each resource, if an exception is thrown // during updating, the exception message is collected via the exception messages vector. // Returns true if any scope updated, false otherwise. - bool addOrUpdateScopes( - const Protobuf::RepeatedPtrField& resources, - Init::Manager& init_manager, const std::string& version_info, - std::vector& exception_msgs); + bool addOrUpdateScopes(const std::vector& resources, + Init::Manager& init_manager, const std::string& version_info); // Removes given scopes from the managed set of scopes. // Returns a list of to be removed helpers which is temporally held in the onConfigUpdate method, // to make sure new scopes sharing the same RDS source configs could reuse the subscriptions. - std::list> + std::list removeScopes(const Protobuf::RepeatedPtrField& scope_names, const std::string& version_info); // Envoy::Config::DeltaConfigSubscriptionInstance void start() override { subscription_->start({}); } + // Detect scope name and scope key conflict between added scopes or between added scopes and old + // scopes. Some removed scopes may be in added resources list, instead of being removed, they + // should be updated, so only return scope names that will disappear after update. If conflict + // detected, fill exception_msg with information about scope conflict and return. + Protobuf::RepeatedPtrField detectUpdateConflictAndCleanupRemoved( + const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, std::string& exception_msg); + // Envoy::Config::SubscriptionCallbacks - // NOTE: state-of-the-world form onConfigUpdate(resources, version_info) will throw an - // EnvoyException on any error and essentially reject an update. While the Delta form - // onConfigUpdate(added_resources, removed_resources, version_info) by design will partially - // accept correct RouteConfiguration from management server. - void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + // NOTE: both delta form and state-of-the-world form onConfigUpdate(resources, version_info) will + // throw an EnvoyException on any error and essentially reject an update. + void onConfigUpdate(const std::vector& resources, const std::string& version_info) override; - void onConfigUpdate( - const Protobuf::RepeatedPtrField& added_resources, - const Protobuf::RepeatedPtrField& removed_resources, - const std::string& version_info) override; + void onConfigUpdate(const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& system_version_info) override; void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException*) override { ASSERT(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure != reason); DeltaConfigSubscriptionInstance::onConfigUpdateFailed(); } - std::string resourceName(const ProtobufWkt::Any& resource) override { - return MessageUtil::anyConvert(resource) - .name(); - } // Propagate RDS updates to ScopeConfigImpl in workers. void onRdsConfigUpdate(const std::string& scope_name, RdsRouteConfigSubscription& rds_subscription); @@ -174,20 +177,18 @@ class ScopedRdsConfigSubscription ScopedRouteMap scoped_route_map_; // RdsRouteConfigProvider by scope name. - absl::flat_hash_map> - route_provider_by_scope_; + absl::flat_hash_map route_provider_by_scope_; // A map of (hash, scope-name), used to detect the key conflict between scopes. absl::flat_hash_map scope_name_by_hash_; // For creating RDS subscriptions. Server::Configuration::ServerFactoryContext& factory_context_; const std::string name_; - std::unique_ptr subscription_; + Envoy::Config::SubscriptionPtr subscription_; const envoy::extensions::filters::network::http_connection_manager::v3::ScopedRoutes:: ScopeKeyBuilder scope_key_builder_; Stats::ScopePtr scope_; ScopedRdsStats stats_; const envoy::config::core::v3::ConfigSource rds_config_source_; - ProtobufMessage::ValidationVisitor& validation_visitor_; const std::string stat_prefix_; RouteConfigProviderManager& route_config_provider_manager_; }; @@ -238,7 +239,7 @@ class ScopedRoutesConfigProviderManager : public Envoy::Config::ConfigProviderMa Server::Configuration::ServerFactoryContext& factory_context, const Envoy::Config::ConfigProviderManager::OptionalArg& optarg) override; - RouteConfigProviderManager& route_config_provider_manager() { + RouteConfigProviderManager& routeConfigProviderPanager() { return route_config_provider_manager_; } @@ -246,6 +247,10 @@ class ScopedRoutesConfigProviderManager : public Envoy::Config::ConfigProviderMa RouteConfigProviderManager& route_config_provider_manager_; }; +using ScopedRoutesConfigProviderManagerPtr = std::unique_ptr; +using ScopedRoutesConfigProviderManagerSharedPtr = + std::shared_ptr; + // The optional argument passed to the ConfigProviderManager::create*() functions. class ScopedRoutesConfigProviderManagerOptArg : public Envoy::Config::ConfigProviderManager::OptionalArg { diff --git a/source/common/router/shadow_writer_impl.cc b/source/common/router/shadow_writer_impl.cc index 41b0736f60fae..504877c4e643e 100644 --- a/source/common/router/shadow_writer_impl.cc +++ b/source/common/router/shadow_writer_impl.cc @@ -21,14 +21,13 @@ void ShadowWriterImpl::shadow(const std::string& cluster, Http::RequestMessagePt return; } - ASSERT(!request->headers().Host()->value().empty()); + ASSERT(!request->headers().getHostValue().empty()); // Switch authority to add a shadow postfix. This allows upstream logging to make more sense. - auto parts = StringUtil::splitToken(request->headers().Host()->value().getStringView(), ":"); + auto parts = StringUtil::splitToken(request->headers().getHostValue(), ":"); ASSERT(!parts.empty() && parts.size() <= 2); - request->headers().setHost( - parts.size() == 2 - ? absl::StrJoin(parts, "-shadow:") - : absl::StrCat(request->headers().Host()->value().getStringView(), "-shadow")); + request->headers().setHost(parts.size() == 2 + ? absl::StrJoin(parts, "-shadow:") + : absl::StrCat(request->headers().getHostValue(), "-shadow")); // This is basically fire and forget. We don't handle cancelling. cm_.httpAsyncClientForCluster(cluster).send(std::move(request), *this, options); } diff --git a/source/common/router/shadow_writer_impl.h b/source/common/router/shadow_writer_impl.h index 2224912e88560..c65748e3325e6 100644 --- a/source/common/router/shadow_writer_impl.h +++ b/source/common/router/shadow_writer_impl.h @@ -26,6 +26,8 @@ class ShadowWriterImpl : Logger::Loggable, // Http::AsyncClient::Callbacks void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&&) override {} void onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason) override {} + void onBeforeFinalizeUpstreamSpan(Envoy::Tracing::Span&, + const Http::ResponseHeaderMap*) override {} private: Upstream::ClusterManager& cm_; diff --git a/source/common/router/upstream_request.cc b/source/common/router/upstream_request.cc index 818214ef86480..6e722145cb45d 100644 --- a/source/common/router/upstream_request.cc +++ b/source/common/router/upstream_request.cc @@ -35,6 +35,7 @@ #include "common/stream_info/uint32_accessor_impl.h" #include "common/tracing/http_tracer_impl.h" +#include "extensions/common/proxy_protocol/proxy_protocol_header.h" #include "extensions/filters/http/well_known_names.h" namespace Envoy { @@ -48,7 +49,7 @@ UpstreamRequest::UpstreamRequest(RouterFilterInterface& parent, calling_encode_headers_(false), upstream_canary_(false), decode_complete_(false), encode_complete_(false), encode_trailers_(false), retried_(false), awaiting_headers_(true), outlier_detection_timeout_recorded_(false), - create_per_try_timeout_on_request_complete_(false), + create_per_try_timeout_on_request_complete_(false), paused_for_connect_(false), record_timeout_budget_(parent_.cluster()->timeoutBudgetStats().has_value()) { if (parent_.config().start_child_span_) { span_ = parent_.callbacks()->activeSpan().spawnChild( @@ -77,6 +78,9 @@ UpstreamRequest::~UpstreamRequest() { // Allows for testing. per_try_timeout_->disableTimer(); } + if (max_stream_duration_timer_ != nullptr) { + max_stream_duration_timer_->disableTimer(); + } clearRequestEncoder(); // If desired, fire the per-try histogram when the UpstreamRequest @@ -86,10 +90,9 @@ UpstreamRequest::~UpstreamRequest() { const MonotonicTime end_time = dispatcher.timeSource().monotonicTime(); const std::chrono::milliseconds response_time = std::chrono::duration_cast(end_time - start_time_); - parent_.cluster() - ->timeoutBudgetStats() - ->upstream_rq_timeout_budget_per_try_percent_used_.recordValue( - FilterUtility::percentageOfTimeout(response_time, parent_.timeout().per_try_timeout_)); + Upstream::ClusterTimeoutBudgetStatsOptRef tb_stats = parent_.cluster()->timeoutBudgetStats(); + tb_stats->get().upstream_rq_timeout_budget_per_try_percent_used_.recordValue( + FilterUtility::percentageOfTimeout(response_time, parent_.timeout().per_try_timeout_)); } stream_info_.setUpstreamTiming(upstream_timing_); @@ -98,6 +101,12 @@ UpstreamRequest::~UpstreamRequest() { upstream_log->log(parent_.downstreamHeaders(), upstream_headers_.get(), upstream_trailers_.get(), stream_info_); } + + while (downstream_data_disabled_ != 0) { + parent_.callbacks()->onDecoderFilterBelowWriteBufferLowWatermark(); + parent_.cluster()->stats().upstream_flow_control_drained_total_.inc(); + --downstream_data_disabled_; + } } void UpstreamRequest::decode100ContinueHeaders(Http::ResponseHeaderMapPtr&& headers) { @@ -110,6 +119,23 @@ void UpstreamRequest::decode100ContinueHeaders(Http::ResponseHeaderMapPtr&& head void UpstreamRequest::decodeHeaders(Http::ResponseHeaderMapPtr&& headers, bool end_stream) { ScopeTrackerScopeState scope(&parent_.callbacks()->scope(), parent_.callbacks()->dispatcher()); + // We drop 1xx other than 101 on the floor; 101 upgrade headers need to be passed to the client as + // part of the final response. 100-continue headers are handled in onUpstream100ContinueHeaders. + // + // We could in principle handle other headers here, but this might result in the double invocation + // of decodeHeaders() (once for informational, again for non-informational), which is likely an + // easy to miss corner case in the filter and HCM contract. + // + // This filtering is done early in upstream request, unlike 100 coalescing which is performed in + // the router filter, since the filtering only depends on the state of a single upstream, and we + // don't want to confuse accounting such as onFirstUpstreamRxByteReceived() with informational + // headers. + const uint64_t response_code = Http::Utility::getResponseStatus(*headers); + if (Http::CodeUtility::is1xx(response_code) && + response_code != enumToInt(Http::Code::SwitchingProtocols)) { + return; + } + // TODO(rodaine): This is actually measuring after the headers are parsed and not the first // byte. upstream_timing_.onFirstUpstreamRxByteReceived(parent_.callbacks()->dispatcher().timeSource()); @@ -119,8 +145,13 @@ void UpstreamRequest::decodeHeaders(Http::ResponseHeaderMapPtr&& headers, bool e if (!parent_.config().upstream_logs_.empty()) { upstream_headers_ = Http::createHeaderMap(*headers); } - const uint64_t response_code = Http::Utility::getResponseStatus(*headers); stream_info_.response_code_ = static_cast(response_code); + + if (paused_for_connect_ && response_code == 200) { + encodeBodyAndTrailers(); + paused_for_connect_ = false; + } + parent_.onUpstreamHeaders(response_code, std::move(headers), *this, end_stream); } @@ -141,6 +172,11 @@ void UpstreamRequest::decodeTrailers(Http::ResponseTrailerMapPtr&& trailers) { } parent_.onUpstreamTrailers(std::move(trailers), *this); } +const RouteEntry& UpstreamRequest::routeEntry() const { return *parent_.routeEntry(); } + +const Network::Connection& UpstreamRequest::connection() const { + return *parent_.callbacks()->connection(); +} void UpstreamRequest::decodeMetadata(Http::MetadataMapPtr&& metadata_map) { parent_.onUpstreamMetadata(std::move(metadata_map)); @@ -171,12 +207,13 @@ void UpstreamRequest::encodeData(Buffer::Instance& data, bool end_stream) { ASSERT(!encode_complete_); encode_complete_ = end_stream; - if (!upstream_) { + if (!upstream_ || paused_for_connect_) { ENVOY_STREAM_LOG(trace, "buffering {} bytes", *parent_.callbacks(), data.length()); if (!buffered_request_body_) { buffered_request_body_ = std::make_unique( [this]() -> void { this->enableDataFromDownstreamForFlowControl(); }, - [this]() -> void { this->disableDataFromDownstreamForFlowControl(); }); + [this]() -> void { this->disableDataFromDownstreamForFlowControl(); }, + []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }); buffered_request_body_->setWatermarks(parent_.callbacks()->decoderBufferLimit()); } @@ -351,6 +388,7 @@ void UpstreamRequest::onPoolReady( parent_.callbacks()->addDownstreamWatermarkCallbacks(downstream_watermark_manager_); calling_encode_headers_ = true; + auto* headers = parent_.downstreamHeaders(); if (parent_.routeEntry()->autoHostRewrite() && !host->hostname().empty()) { parent_.downstreamHeaders()->setHost(host->hostname()); } @@ -361,13 +399,33 @@ void UpstreamRequest::onPoolReady( upstream_timing_.onFirstUpstreamTxByteSent(parent_.callbacks()->dispatcher().timeSource()); - const bool end_stream = !buffered_request_body_ && encode_complete_ && !encode_trailers_; - // If end_stream is set in headers, and there are metadata to send, delays end_stream. The case - // only happens when decoding headers filters return ContinueAndEndStream. - const bool delay_headers_end_stream = end_stream && !downstream_metadata_map_vector_.empty(); - upstream_->encodeHeaders(*parent_.downstreamHeaders(), end_stream && !delay_headers_end_stream); + // Make sure that when we are forwarding CONNECT payload we do not do so until + // the upstream has accepted the CONNECT request. + if (conn_pool_->protocol().has_value() && + headers->getMethodValue() == Http::Headers::get().MethodValues.Connect) { + paused_for_connect_ = true; + } + + if (upstream_host_->cluster().commonHttpProtocolOptions().has_max_stream_duration()) { + const auto max_stream_duration = std::chrono::milliseconds(DurationUtil::durationToMilliseconds( + upstream_host_->cluster().commonHttpProtocolOptions().max_stream_duration())); + if (max_stream_duration.count()) { + max_stream_duration_timer_ = parent_.callbacks()->dispatcher().createTimer( + [this]() -> void { onStreamMaxDurationReached(); }); + max_stream_duration_timer_->enableTimer(max_stream_duration); + } + } + + upstream_->encodeHeaders(*parent_.downstreamHeaders(), shouldSendEndStream()); + calling_encode_headers_ = false; + if (!paused_for_connect_) { + encodeBodyAndTrailers(); + } +} + +void UpstreamRequest::encodeBodyAndTrailers() { // It is possible to get reset in the middle of an encodeHeaders() call. This happens for // example in the HTTP/2 codec if the frame cannot be encoded for some reason. This should never // happen but it's unclear if we have covered all cases so protect against it and test for it. @@ -382,7 +440,7 @@ void UpstreamRequest::onPoolReady( downstream_metadata_map_vector_); upstream_->encodeMetadata(downstream_metadata_map_vector_); downstream_metadata_map_vector_.clear(); - if (delay_headers_end_stream) { + if (shouldSendEndStream()) { Buffer::OwnedImpl empty_data(""); upstream_->encodeData(empty_data, true); } @@ -403,6 +461,13 @@ void UpstreamRequest::onPoolReady( } } +void UpstreamRequest::onStreamMaxDurationReached() { + upstream_host_->cluster().stats().upstream_rq_max_duration_reached_.inc(); + + // The upstream had closed then try to retry along with retry policy. + parent_.onStreamMaxDurationReached(*this); +} + void UpstreamRequest::clearRequestEncoder() { // Before clearing the encoder, unsubscribe from callbacks. if (upstream_) { @@ -421,7 +486,6 @@ void UpstreamRequest::DownstreamWatermarkManager::onAboveWriteBufferHighWatermar // can disable reads from upstream. ASSERT(!parent_.parent_.finalUpstreamRequest() || &parent_ == parent_.parent_.finalUpstreamRequest()); - // The downstream connection is overrun. Pause reads from upstream. // If there are multiple calls to readDisable either the codec (H2) or the underlying // Network::Connection (H1) will handle reference counting. @@ -451,6 +515,7 @@ void UpstreamRequest::disableDataFromDownstreamForFlowControl() { ASSERT(parent_.upstreamRequests().size() == 1 || parent_.downstreamEndStream()); parent_.cluster()->stats().upstream_flow_control_backed_up_total_.inc(); parent_.callbacks()->onDecoderFilterAboveWriteBufferHighWatermark(); + ++downstream_data_disabled_; } void UpstreamRequest::enableDataFromDownstreamForFlowControl() { @@ -466,45 +531,11 @@ void UpstreamRequest::enableDataFromDownstreamForFlowControl() { ASSERT(parent_.upstreamRequests().size() == 1 || parent_.downstreamEndStream()); parent_.cluster()->stats().upstream_flow_control_drained_total_.inc(); parent_.callbacks()->onDecoderFilterBelowWriteBufferLowWatermark(); -} - -void HttpConnPool::newStream(GenericConnectionPoolCallbacks* callbacks) { - callbacks_ = callbacks; - // It's possible for a reset to happen inline within the newStream() call. In this case, we - // might get deleted inline as well. Only write the returned handle out if it is not nullptr to - // deal with this case. - Http::ConnectionPool::Cancellable* handle = - conn_pool_.newStream(*callbacks->upstreamRequest(), *this); - if (handle) { - conn_pool_stream_handle_ = handle; + ASSERT(downstream_data_disabled_ != 0); + if (downstream_data_disabled_ > 0) { + --downstream_data_disabled_; } } -bool HttpConnPool::cancelAnyPendingRequest() { - if (conn_pool_stream_handle_) { - conn_pool_stream_handle_->cancel(); - conn_pool_stream_handle_ = nullptr; - return true; - } - return false; -} - -absl::optional HttpConnPool::protocol() const { return conn_pool_.protocol(); } - -void HttpConnPool::onPoolFailure(ConnectionPool::PoolFailureReason reason, - absl::string_view transport_failure_reason, - Upstream::HostDescriptionConstSharedPtr host) { - callbacks_->onPoolFailure(reason, transport_failure_reason, host); -} - -void HttpConnPool::onPoolReady(Http::RequestEncoder& request_encoder, - Upstream::HostDescriptionConstSharedPtr host, - const StreamInfo::StreamInfo& info) { - conn_pool_stream_handle_ = nullptr; - auto upstream = std::make_unique(*callbacks_->upstreamRequest(), &request_encoder); - callbacks_->onPoolReady(std::move(upstream), host, - request_encoder.getStream().connectionLocalAddress(), info); -} - } // namespace Router } // namespace Envoy diff --git a/source/common/router/upstream_request.h b/source/common/router/upstream_request.h index 0e85a21ddae1b..91f17f511239f 100644 --- a/source/common/router/upstream_request.h +++ b/source/common/router/upstream_request.h @@ -30,41 +30,9 @@ class GenericConnectionPoolCallbacks; class RouterFilterInterface; class UpstreamRequest; -// An API for wrapping either an HTTP or a TCP connection pool. -class GenericConnPool : public Logger::Loggable { -public: - virtual ~GenericConnPool() = default; - - // Called to create a new HTTP stream or TCP connection. The implementation - // is then responsible for calling either onPoolReady or onPoolFailure on the - // supplied GenericConnectionPoolCallbacks. - virtual void newStream(GenericConnectionPoolCallbacks* callbacks) PURE; - // Called to cancel a call to newStream. Returns true if a newStream request - // was canceled, false otherwise. - virtual bool cancelAnyPendingRequest() PURE; - // Optionally returns the protocol for the connection pool. - virtual absl::optional protocol() const PURE; -}; - -// An API for the UpstreamRequest to get callbacks from either an HTTP or TCP -// connection pool. -class GenericConnectionPoolCallbacks { -public: - virtual ~GenericConnectionPoolCallbacks() = default; - - virtual void onPoolFailure(ConnectionPool::PoolFailureReason reason, - absl::string_view transport_failure_reason, - Upstream::HostDescriptionConstSharedPtr host) PURE; - virtual void onPoolReady(std::unique_ptr&& upstream, - Upstream::HostDescriptionConstSharedPtr host, - const Network::Address::InstanceConstSharedPtr& upstream_local_address, - const StreamInfo::StreamInfo& info) PURE; - virtual UpstreamRequest* upstreamRequest() PURE; -}; - // The base request for Upstream. class UpstreamRequest : public Logger::Loggable, - public Http::ResponseDecoder, + public UpstreamToDownstream, public LinkedObject, public GenericConnectionPoolCallbacks { public: @@ -86,12 +54,18 @@ class UpstreamRequest : public Logger::Loggable, void decodeData(Buffer::Instance& data, bool end_stream) override; void decodeMetadata(Http::MetadataMapPtr&& metadata_map) override; - // Http::ResponseDecoder + // UpstreamToDownstream (Http::ResponseDecoder) void decode100ContinueHeaders(Http::ResponseHeaderMapPtr&& headers) override; void decodeHeaders(Http::ResponseHeaderMapPtr&& headers, bool end_stream) override; void decodeTrailers(Http::ResponseTrailerMapPtr&& trailers) override; - - void onResetStream(Http::StreamResetReason reason, absl::string_view transport_failure_reason); + // UpstreamToDownstream (Http::StreamCallbacks) + void onResetStream(Http::StreamResetReason reason, + absl::string_view transport_failure_reason) override; + void onAboveWriteBufferHighWatermark() override { disableDataFromDownstreamForFlowControl(); } + void onBelowWriteBufferLowWatermark() override { enableDataFromDownstreamForFlowControl(); } + // UpstreamToDownstream + const RouteEntry& routeEntry() const override; + const Network::Connection& connection() const override; void disableDataFromDownstreamForFlowControl(); void enableDataFromDownstreamForFlowControl(); @@ -104,9 +78,10 @@ class UpstreamRequest : public Logger::Loggable, Upstream::HostDescriptionConstSharedPtr host, const Network::Address::InstanceConstSharedPtr& upstream_local_address, const StreamInfo::StreamInfo& info) override; - UpstreamRequest* upstreamRequest() override { return this; } + UpstreamToDownstream& upstreamToDownstream() override { return *this; } void clearRequestEncoder(); + void onStreamMaxDurationReached(); struct DownstreamWatermarkManager : public Http::DownstreamWatermarkCallbacks { DownstreamWatermarkManager(UpstreamRequest& parent) : parent_(parent) {} @@ -119,6 +94,7 @@ class UpstreamRequest : public Logger::Loggable, }; void readEnable(); + void encodeBodyAndTrailers(); // Getters and setters Upstream::HostDescriptionConstSharedPtr& upstreamHost() { return upstream_host_; } @@ -138,8 +114,17 @@ class UpstreamRequest : public Logger::Loggable, bool createPerTryTimeoutOnRequestComplete() { return create_per_try_timeout_on_request_complete_; } + bool encodeComplete() const { return encode_complete_; } + RouterFilterInterface& parent() { return parent_; } private: + bool shouldSendEndStream() { + // Only encode end stream if the full request has been received, the body + // has been sent, and any trailers or metadata have also been sent. + return encode_complete_ && !buffered_request_body_ && !encode_trailers_ && + downstream_metadata_map_vector_.empty(); + } + RouterFilterInterface& parent_; std::unique_ptr conn_pool_; bool grpc_rq_success_deferred_; @@ -159,6 +144,8 @@ class UpstreamRequest : public Logger::Loggable, Http::ResponseTrailerMapPtr upstream_trailers_; Http::MetadataMapVector downstream_metadata_map_vector_; + // Tracks the number of times the flow of data from downstream has been disabled. + uint32_t downstream_data_disabled_{}; bool calling_encode_headers_ : 1; bool upstream_canary_ : 1; bool decode_complete_ : 1; @@ -170,93 +157,15 @@ class UpstreamRequest : public Logger::Loggable, // Tracks whether we deferred a per try timeout because the downstream request // had not been completed yet. bool create_per_try_timeout_on_request_complete_ : 1; + // True if the CONNECT headers have been sent but proxying payload is paused + // waiting for response headers. + bool paused_for_connect_ : 1; // Sentinel to indicate if timeout budget tracking is configured for the cluster, // and if so, if the per-try histogram should record a value. bool record_timeout_budget_ : 1; -}; - -class HttpConnPool : public GenericConnPool, public Http::ConnectionPool::Callbacks { -public: - HttpConnPool(Http::ConnectionPool::Instance& conn_pool) : conn_pool_(conn_pool) {} - - // GenericConnPool - void newStream(GenericConnectionPoolCallbacks* callbacks) override; - bool cancelAnyPendingRequest() override; - absl::optional protocol() const override; - - // Http::ConnectionPool::Callbacks - void onPoolFailure(ConnectionPool::PoolFailureReason reason, - absl::string_view transport_failure_reason, - Upstream::HostDescriptionConstSharedPtr host) override; - void onPoolReady(Http::RequestEncoder& callbacks_encoder, - Upstream::HostDescriptionConstSharedPtr host, - const StreamInfo::StreamInfo& info) override; - -private: - // Points to the actual connection pool to create streams from. - Http::ConnectionPool::Instance& conn_pool_; - Http::ConnectionPool::Cancellable* conn_pool_stream_handle_{}; - GenericConnectionPoolCallbacks* callbacks_{}; -}; - -// A generic API which covers common functionality between HTTP and TCP upstreams. -class GenericUpstream { -public: - virtual ~GenericUpstream() = default; - virtual void encodeData(Buffer::Instance& data, bool end_stream) PURE; - virtual void encodeMetadata(const Http::MetadataMapVector& metadata_map_vector) PURE; - virtual void encodeHeaders(const Http::RequestHeaderMap& headers, bool end_stream) PURE; - virtual void encodeTrailers(const Http::RequestTrailerMap& trailers) PURE; - virtual void readDisable(bool disable) PURE; - virtual void resetStream() PURE; -}; - -class HttpUpstream : public GenericUpstream, public Http::StreamCallbacks { -public: - HttpUpstream(UpstreamRequest& upstream_request, Http::RequestEncoder* encoder) - : upstream_request_(upstream_request), request_encoder_(encoder) { - request_encoder_->getStream().addCallbacks(*this); - } - - // GenericUpstream - void encodeData(Buffer::Instance& data, bool end_stream) override { - request_encoder_->encodeData(data, end_stream); - } - void encodeMetadata(const Http::MetadataMapVector& metadata_map_vector) override { - request_encoder_->encodeMetadata(metadata_map_vector); - } - void encodeHeaders(const Http::RequestHeaderMap& headers, bool end_stream) override { - request_encoder_->encodeHeaders(headers, end_stream); - } - void encodeTrailers(const Http::RequestTrailerMap& trailers) override { - request_encoder_->encodeTrailers(trailers); - } - - void readDisable(bool disable) override { request_encoder_->getStream().readDisable(disable); } - - void resetStream() override { - request_encoder_->getStream().removeCallbacks(*this); - request_encoder_->getStream().resetStream(Http::StreamResetReason::LocalReset); - } - - // Http::StreamCallbacks - void onResetStream(Http::StreamResetReason reason, - absl::string_view transport_failure_reason) override { - upstream_request_.onResetStream(reason, transport_failure_reason); - } - - void onAboveWriteBufferHighWatermark() override { - upstream_request_.disableDataFromDownstreamForFlowControl(); - } - void onBelowWriteBufferLowWatermark() override { - upstream_request_.enableDataFromDownstreamForFlowControl(); - } - -private: - UpstreamRequest& upstream_request_; - Http::RequestEncoder* request_encoder_{}; + Event::TimerPtr max_stream_duration_timer_; }; } // namespace Router diff --git a/source/common/router/vhds.cc b/source/common/router/vhds.cc index 75f8a95708871..31d5b9d27d251 100644 --- a/source/common/router/vhds.cc +++ b/source/common/router/vhds.cc @@ -20,12 +20,14 @@ namespace Envoy { namespace Router { // Implements callbacks to handle DeltaDiscovery protocol for VirtualHostDiscoveryService -VhdsSubscription::VhdsSubscription(RouteConfigUpdatePtr& config_update_info, - Server::Configuration::ServerFactoryContext& factory_context, - const std::string& stat_prefix, - std::unordered_set& route_config_providers, - envoy::config::core::v3::ApiVersion resource_api_version) - : Envoy::Config::SubscriptionBase(resource_api_version), +VhdsSubscription::VhdsSubscription( + RouteConfigUpdatePtr& config_update_info, + Server::Configuration::ServerFactoryContext& factory_context, const std::string& stat_prefix, + absl::node_hash_set& route_config_providers, + envoy::config::core::v3::ApiVersion resource_api_version) + : Envoy::Config::SubscriptionBase( + resource_api_version, + factory_context.messageValidationContext().dynamicValidationVisitor(), "name"), config_update_info_(config_update_info), scope_(factory_context.scope().createScope(stat_prefix + "vhds." + config_update_info_->routeConfigName() + ".")), @@ -45,7 +47,7 @@ VhdsSubscription::VhdsSubscription(RouteConfigUpdatePtr& config_update_info, subscription_ = factory_context.clusterManager().subscriptionFactory().subscriptionFromConfigSource( config_update_info_->routeConfiguration().vhds().config_source(), - Grpc::Common::typeUrl(resource_name), *scope_, *this); + Grpc::Common::typeUrl(resource_name), *scope_, *this, resource_decoder_); } void VhdsSubscription::updateOnDemand(const std::string& with_route_config_name_prefix) { @@ -61,10 +63,25 @@ void VhdsSubscription::onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureRe } void VhdsSubscription::onConfigUpdate( - const Protobuf::RepeatedPtrField& added_resources, + const std::vector& added_resources, const Protobuf::RepeatedPtrField& removed_resources, const std::string& version_info) { - if (config_update_info_->onVhdsUpdate(added_resources, removed_resources, version_info)) { + RouteConfigUpdateReceiver::VirtualHostRefVector added_vhosts; + std::set added_resource_ids; + for (const auto& resource : added_resources) { + added_resource_ids.emplace(resource.get().name()); + std::copy(resource.get().aliases().begin(), resource.get().aliases().end(), + std::inserter(added_resource_ids, added_resource_ids.end())); + // the management server returns empty resources (they contain no virtual hosts in this case) + // for aliases that it couldn't resolve. + if (!resource.get().hasResource()) { + continue; + } + added_vhosts.emplace_back( + dynamic_cast(resource.get().resource())); + } + if (config_update_info_->onVhdsUpdate(added_vhosts, added_resource_ids, removed_resources, + version_info)) { stats_.config_reload_.inc(); ENVOY_LOG(debug, "vhds: loading new configuration: config_name={} hash={}", config_update_info_->routeConfigName(), config_update_info_->configHash()); diff --git a/source/common/router/vhds.h b/source/common/router/vhds.h index 372f5a08989c7..ea5be40740422 100644 --- a/source/common/router/vhds.h +++ b/source/common/router/vhds.h @@ -3,11 +3,10 @@ #include #include #include -#include -#include #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/config/route/v3/route_components.pb.h" +#include "envoy/config/route/v3/route_components.pb.validate.h" #include "envoy/config/subscription.h" #include "envoy/http/codes.h" #include "envoy/local_info/local_info.h" @@ -24,6 +23,8 @@ #include "common/init/target_impl.h" #include "common/protobuf/utility.h" +#include "absl/container/node_hash_set.h" + namespace Envoy { namespace Router { @@ -41,7 +42,7 @@ class VhdsSubscription : Envoy::Config::SubscriptionBase& route_config_providers, + absl::node_hash_set& route_config_providers, const envoy::config::core::v3::ApiVersion resource_api_version = envoy::config::core::v3::ApiVersion::AUTO); ~VhdsSubscription() override { init_target_.ready(); } @@ -59,24 +60,21 @@ class VhdsSubscription : Envoy::Config::SubscriptionBase&, + void onConfigUpdate(const std::vector&, const std::string&) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - void onConfigUpdate(const Protobuf::RepeatedPtrField&, + void onConfigUpdate(const std::vector&, const Protobuf::RepeatedPtrField&, const std::string&) override; void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException* e) override; - std::string resourceName(const ProtobufWkt::Any& resource) override { - return MessageUtil::anyConvert(resource).name(); - } RouteConfigUpdatePtr& config_update_info_; Stats::ScopePtr scope_; VhdsStats stats_; - std::unique_ptr subscription_; + Envoy::Config::SubscriptionPtr subscription_; Init::TargetImpl init_target_; - std::unordered_set& route_config_providers_; + absl::node_hash_set& route_config_providers_; }; using VhdsSubscriptionPtr = std::unique_ptr; diff --git a/source/common/runtime/BUILD b/source/common/runtime/BUILD index ddeb069e3e5ae..61b63093da25d 100644 --- a/source/common/runtime/BUILD +++ b/source/common/runtime/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( @@ -34,6 +34,7 @@ envoy_cc_library( ], deps = [ "//include/envoy/runtime:runtime_interface", + "//source/common/protobuf:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/type/v3:pkg_cc_proto", ], @@ -47,7 +48,6 @@ envoy_cc_library( hdrs = [ "runtime_impl.h", ], - external_deps = ["ssl"], deps = [ ":runtime_features_lib", ":runtime_protos_lib", @@ -61,6 +61,7 @@ envoy_cc_library( "//include/envoy/upstream:cluster_manager_interface", "//source/common/common:empty_string", "//source/common/common:minimal_logger_lib", + "//source/common/common:random_generator_lib", #FIXME "//source/common/common:thread_lib", "//source/common/common:utility_lib", "//source/common/config:api_version_lib", diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 1ddc80c12f3d5..ae06021386517 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -55,15 +55,32 @@ constexpr const char* runtime_features[] = { // Enabled "envoy.reloadable_features.http1_flood_protection", "envoy.reloadable_features.test_feature_true", - "envoy.reloadable_features.strict_header_validation", "envoy.reloadable_features.connection_header_sanitization", - "envoy.reloadable_features.strict_authority_validation", - "envoy.reloadable_features.reject_unsupported_transfer_encodings", - "envoy.reloadable_features.new_http1_connection_pool_behavior", - "envoy.reloadable_features.new_http2_connection_pool_behavior", + // Begin alphabetically sorted section. + "envoy.reloadable_features.activate_fds_next_event_loop", + "envoy.reloadable_features.activate_timers_next_event_loop", + "envoy.reloadable_features.allow_500_after_100", "envoy.deprecated_features.allow_deprecated_extension_names", + "envoy.reloadable_features.allow_prefetch", + "envoy.reloadable_features.allow_response_for_timeout", + "envoy.reloadable_features.consume_all_retry_headers", + "envoy.reloadable_features.disallow_unbounded_access_logs", + "envoy.reloadable_features.early_errors_via_hcm", + "envoy.reloadable_features.enable_deprecated_v2_api_warning", + "envoy.reloadable_features.enable_dns_cache_circuit_breakers", "envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher", "envoy.reloadable_features.fix_upgrade_response", + "envoy.reloadable_features.fix_wildcard_matching", + "envoy.reloadable_features.fixed_connection_close", + "envoy.reloadable_features.http_default_alpn", + "envoy.reloadable_features.http_transport_failure_reason_in_body", + "envoy.reloadable_features.http2_skip_encoding_empty_trailers", + "envoy.reloadable_features.listener_in_place_filterchain_update", + "envoy.reloadable_features.preserve_query_string_in_path_redirects", + "envoy.reloadable_features.preserve_upstream_date", + "envoy.reloadable_features.stop_faking_paths", + "envoy.reloadable_features.hcm_stream_error_on_invalid_message", + "envoy.reloadable_features.strict_1xx_and_204_response_headers", }; // This is a section for officially sanctioned runtime features which are too @@ -75,6 +92,10 @@ constexpr const char* runtime_features[] = { // When features are added here, there should be a tracking bug assigned to the // code owner to flip the default after sufficient testing. constexpr const char* disabled_runtime_features[] = { + // TODO(asraa) flip this feature after codec errors are handled + "envoy.reloadable_features.new_codec_behavior", + // TODO(alyssawilk) flip true after the release. + "envoy.reloadable_features.new_tcp_connection_pool", // Sentinel and test flag. "envoy.reloadable_features.test_feature_false", }; diff --git a/source/common/runtime/runtime_impl.cc b/source/common/runtime/runtime_impl.cc index 4ca76e9172e29..86c59606a80e1 100644 --- a/source/common/runtime/runtime_impl.cc +++ b/source/common/runtime/runtime_impl.cc @@ -1,17 +1,13 @@ #include "common/runtime/runtime_impl.h" #include -#include #include -#include #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/event/dispatcher.h" #include "envoy/service/discovery/v2/rtds.pb.h" #include "envoy/service/discovery/v3/discovery.pb.h" -#include "envoy/service/runtime/v3/rtds.pb.h" -#include "envoy/service/runtime/v3/rtds.pb.validate.h" #include "envoy/thread_local/thread_local.h" #include "envoy/type/v3/percent.pb.h" #include "envoy/type/v3/percent.pb.validate.h" @@ -26,135 +22,18 @@ #include "common/protobuf/utility.h" #include "common/runtime/runtime_features.h" +#include "absl/container/node_hash_map.h" +#include "absl/container/node_hash_set.h" #include "absl/strings/match.h" #include "absl/strings/numbers.h" -#include "openssl/rand.h" namespace Envoy { namespace Runtime { -const size_t RandomGeneratorImpl::UUID_LENGTH = 36; - -uint64_t RandomGeneratorImpl::random() { - // Prefetch 256 * sizeof(uint64_t) bytes of randomness. buffered_idx is initialized to 256, - // i.e. out-of-range value, so the buffer will be filled with randomness on the first call - // to this function. - // - // There is a diminishing return when increasing the prefetch size, as illustrated below in - // a test that generates 1,000,000,000 uint64_t numbers (results on Intel Xeon E5-1650v3). - // - // //test/common/runtime:runtime_impl_test - Random.DISABLED_benchmarkRandom - // - // prefetch | time | improvement - // (uint64_t) | (ms) | (% vs prev) - // --------------------------------- - // 32 | 25,931 | - // 64 | 15,124 | 42% faster - // 128 | 9,653 | 36% faster - // 256 | 6,930 | 28% faster <-- used right now - // 512 | 5,571 | 20% faster - // 1024 | 4,888 | 12% faster - // 2048 | 4,594 | 6% faster - // 4096 | 4,424 | 4% faster - // 8192 | 4,386 | 1% faster - - const size_t prefetch = 256; - static thread_local uint64_t buffered[prefetch]; - static thread_local size_t buffered_idx = prefetch; - - if (buffered_idx >= prefetch) { - int rc = RAND_bytes(reinterpret_cast(buffered), sizeof(buffered)); - ASSERT(rc == 1); - buffered_idx = 0; - } - - // Consume uint64_t from the buffer. - return buffered[buffered_idx++]; -} - -std::string RandomGeneratorImpl::uuid() { - // Prefetch 2048 bytes of randomness. buffered_idx is initialized to sizeof(buffered), - // i.e. out-of-range value, so the buffer will be filled with randomness on the first - // call to this function. - // - // There is a diminishing return when increasing the prefetch size, as illustrated below - // in a test that generates 100,000,000 UUIDs (results on Intel Xeon E5-1650v3). - // - // //test/common/runtime:uuid_util_test - UUIDUtilsTest.DISABLED_benchmark - // - // prefetch | time | improvement - // (bytes) | (ms) | (% vs prev) - // --------------------------------- - // 128 | 16,353 | - // 256 | 11,827 | 28% faster - // 512 | 9,676 | 18% faster - // 1024 | 8,594 | 11% faster - // 2048 | 8,097 | 6% faster <-- used right now - // 4096 | 7,790 | 4% faster - // 8192 | 7,737 | 1% faster - - static thread_local uint8_t buffered[2048]; - static thread_local size_t buffered_idx = sizeof(buffered); - - if (buffered_idx + 16 > sizeof(buffered)) { - int rc = RAND_bytes(buffered, sizeof(buffered)); - ASSERT(rc == 1); - buffered_idx = 0; - } - - // Consume 16 bytes from the buffer. - ASSERT(buffered_idx + 16 <= sizeof(buffered)); - uint8_t* rand = &buffered[buffered_idx]; - buffered_idx += 16; - - // Create UUID from Truly Random or Pseudo-Random Numbers. - // See: https://tools.ietf.org/html/rfc4122#section-4.4 - rand[6] = (rand[6] & 0x0f) | 0x40; // UUID version 4 (random) - rand[8] = (rand[8] & 0x3f) | 0x80; // UUID variant 1 (RFC4122) - - // Convert UUID to a string representation, e.g. a121e9e1-feae-4136-9e0e-6fac343d56c9. - static const char* const hex = "0123456789abcdef"; - char uuid[UUID_LENGTH]; - - for (uint8_t i = 0; i < 4; i++) { - const uint8_t d = rand[i]; - uuid[2 * i] = hex[d >> 4]; - uuid[2 * i + 1] = hex[d & 0x0f]; - } - - uuid[8] = '-'; - - for (uint8_t i = 4; i < 6; i++) { - const uint8_t d = rand[i]; - uuid[2 * i + 1] = hex[d >> 4]; - uuid[2 * i + 2] = hex[d & 0x0f]; - } - - uuid[13] = '-'; - - for (uint8_t i = 6; i < 8; i++) { - const uint8_t d = rand[i]; - uuid[2 * i + 2] = hex[d >> 4]; - uuid[2 * i + 3] = hex[d & 0x0f]; - } - - uuid[18] = '-'; - - for (uint8_t i = 8; i < 10; i++) { - const uint8_t d = rand[i]; - uuid[2 * i + 3] = hex[d >> 4]; - uuid[2 * i + 4] = hex[d & 0x0f]; - } - - uuid[23] = '-'; - - for (uint8_t i = 10; i < 16; i++) { - const uint8_t d = rand[i]; - uuid[2 * i + 4] = hex[d >> 4]; - uuid[2 * i + 5] = hex[d & 0x0f]; - } - - return std::string(uuid, UUID_LENGTH); +void SnapshotImpl::countDeprecatedFeatureUse() const { + stats_.deprecated_feature_use_.inc(); + // Similar to the above, but a gauge that isn't imported during a hot restart. + stats_.deprecated_feature_seen_since_process_start_.inc(); } bool SnapshotImpl::deprecatedFeatureEnabled(absl::string_view key, bool default_value) const { @@ -167,7 +46,8 @@ bool SnapshotImpl::deprecatedFeatureEnabled(absl::string_view key, bool default_ // The feature is allowed. It is assumed this check is called when the feature // is about to be used, so increment the feature use stat. - stats_.deprecated_feature_use_.inc(); + countDeprecatedFeatureUse(); + #ifdef ENVOY_DISABLE_DEPRECATED_FEATURES return false; #endif @@ -278,7 +158,7 @@ const std::vector& SnapshotImpl::getLayers() co return layers_; } -SnapshotImpl::SnapshotImpl(RandomGenerator& generator, RuntimeStats& stats, +SnapshotImpl::SnapshotImpl(Random::RandomGenerator& generator, RuntimeStats& stats, std::vector&& layers) : layers_{std::move(layers)}, generator_{generator}, stats_{stats} { for (const auto& layer : layers_) { @@ -353,7 +233,7 @@ void SnapshotImpl::parseEntryFractionalPercentValue(Entry& entry) { entry.fractional_percent_value_ = converted_fractional_percent; } -void AdminLayer::mergeValues(const std::unordered_map& values) { +void AdminLayer::mergeValues(const absl::node_hash_map& values) { for (const auto& kv : values) { values_.erase(kv.first); if (!kv.second.empty()) { @@ -466,12 +346,12 @@ void ProtoLayer::walkProtoValue(const ProtobufWkt::Value& v, const std::string& LoaderImpl::LoaderImpl(Event::Dispatcher& dispatcher, ThreadLocal::SlotAllocator& tls, const envoy::config::bootstrap::v3::LayeredRuntime& config, const LocalInfo::LocalInfo& local_info, Stats::Store& store, - RandomGenerator& generator, + Random::RandomGenerator& generator, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) : generator_(generator), stats_(generateStats(store)), tls_(tls.allocateSlot()), config_(config), service_cluster_(local_info.clusterName()), api_(api), - init_watcher_("RDTS", [this]() { onRdtsReady(); }) { - std::unordered_set layer_names; + init_watcher_("RTDS", [this]() { onRtdsReady(); }), store_(store) { + absl::node_hash_set layer_names; for (const auto& layer : config_.layers()) { auto ret = layer_names.insert(layer.name()); if (!ret.second) { @@ -508,14 +388,20 @@ LoaderImpl::LoaderImpl(Event::Dispatcher& dispatcher, ThreadLocal::SlotAllocator loadNewSnapshot(); } -void LoaderImpl::initialize(Upstream::ClusterManager& cm) { cm_ = &cm; } +void LoaderImpl::initialize(Upstream::ClusterManager& cm) { + cm_ = &cm; + + for (const auto& s : subscriptions_) { + s->createSubscription(); + } +} void LoaderImpl::startRtdsSubscriptions(ReadyCallback on_done) { on_rtds_initialized_ = on_done; init_manager_.initialize(init_watcher_); } -void LoaderImpl::onRdtsReady() { +void LoaderImpl::onRtdsReady() { ENVOY_LOG(info, "RTDS has finished initialization"); on_rtds_initialized_(); } @@ -524,17 +410,22 @@ RtdsSubscription::RtdsSubscription( LoaderImpl& parent, const envoy::config::bootstrap::v3::RuntimeLayer::RtdsLayer& rtds_layer, Stats::Store& store, ProtobufMessage::ValidationVisitor& validation_visitor) : Envoy::Config::SubscriptionBase( - rtds_layer.rtds_config().resource_api_version()), + rtds_layer.rtds_config().resource_api_version(), validation_visitor, "name"), parent_(parent), config_source_(rtds_layer.rtds_config()), store_(store), resource_name_(rtds_layer.name()), - init_target_("RTDS " + resource_name_, [this]() { start(); }), - validation_visitor_(validation_visitor) {} + init_target_("RTDS " + resource_name_, [this]() { start(); }) {} -void RtdsSubscription::onConfigUpdate(const Protobuf::RepeatedPtrField& resources, +void RtdsSubscription::createSubscription() { + const auto resource_name = getResourceName(); + subscription_ = parent_.cm_->subscriptionFactory().subscriptionFromConfigSource( + config_source_, Grpc::Common::typeUrl(resource_name), store_, *this, resource_decoder_); +} + +void RtdsSubscription::onConfigUpdate(const std::vector& resources, const std::string&) { validateUpdateSize(resources.size()); - auto runtime = MessageUtil::anyConvertAndValidate( - resources[0], validation_visitor_); + const auto& runtime = + dynamic_cast(resources[0].get().resource()); if (runtime.name() != resource_name_) { throw EnvoyException( fmt::format("Unexpected RTDS runtime (expecting {}): {}", resource_name_, runtime.name())); @@ -546,12 +437,10 @@ void RtdsSubscription::onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + const std::vector& added_resources, const Protobuf::RepeatedPtrField&, const std::string&) { - validateUpdateSize(resources.size()); - Protobuf::RepeatedPtrField unwrapped_resource; - *unwrapped_resource.Add() = resources[0].resource(); - onConfigUpdate(unwrapped_resource, resources[0].version()); + validateUpdateSize(added_resources.size()); + onConfigUpdate(added_resources, added_resources[0].get().version()); } void RtdsSubscription::onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, @@ -562,15 +451,7 @@ void RtdsSubscription::onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureRe init_target_.ready(); } -void RtdsSubscription::start() { - // We have to delay the subscription creation until init-time, since the - // cluster manager resources are not available in the constructor when - // instantiated in the server instance. - const auto resource_name = getResourceName(); - subscription_ = parent_.cm_->subscriptionFactory().subscriptionFromConfigSource( - config_source_, Grpc::Common::typeUrl(resource_name), store_, *this); - subscription_->start({resource_name_}); -} +void RtdsSubscription::start() { subscription_->start({resource_name_}); } void RtdsSubscription::validateUpdateSize(uint32_t num_resources) { if (num_resources != 1) { @@ -597,7 +478,7 @@ const Snapshot& LoaderImpl::snapshot() { return tls_->getTyped(); } -std::shared_ptr LoaderImpl::threadsafeSnapshot() { +SnapshotConstSharedPtr LoaderImpl::threadsafeSnapshot() { if (tls_->currentThreadRegistered()) { return std::dynamic_pointer_cast(tls_->get()); } @@ -608,7 +489,7 @@ std::shared_ptr LoaderImpl::threadsafeSnapshot() { } } -void LoaderImpl::mergeValues(const std::unordered_map& values) { +void LoaderImpl::mergeValues(const absl::node_hash_map& values) { if (admin_layer_ == nullptr) { throw EnvoyException("No admin layer specified"); } @@ -616,6 +497,8 @@ void LoaderImpl::mergeValues(const std::unordered_map& loadNewSnapshot(); } +Stats::Scope& LoaderImpl::getRootScope() { return store_; } + RuntimeStats LoaderImpl::generateStats(Stats::Store& store) { std::string prefix = "runtime."; RuntimeStats stats{ @@ -623,7 +506,7 @@ RuntimeStats LoaderImpl::generateStats(Stats::Store& store) { return stats; } -std::unique_ptr LoaderImpl::createNewSnapshot() { +SnapshotImplPtr LoaderImpl::createNewSnapshot() { std::vector layers; uint32_t disk_layers = 0; uint32_t error_layers = 0; diff --git a/source/common/runtime/runtime_impl.h b/source/common/runtime/runtime_impl.h index e59afd9c7361c..ee4c0cb3841cb 100644 --- a/source/common/runtime/runtime_impl.h +++ b/source/common/runtime/runtime_impl.h @@ -3,10 +3,10 @@ #include #include #include -#include #include "envoy/api/api.h" #include "envoy/common/exception.h" +#include "envoy/common/random_generator.h" #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/config/subscription.h" @@ -14,6 +14,7 @@ #include "envoy/runtime/runtime.h" #include "envoy/service/discovery/v3/discovery.pb.h" #include "envoy/service/runtime/v3/rtds.pb.h" +#include "envoy/service/runtime/v3/rtds.pb.validate.h" #include "envoy/stats/stats_macros.h" #include "envoy/stats/store.h" #include "envoy/thread_local/thread_local.h" @@ -28,6 +29,7 @@ #include "common/init/target_impl.h" #include "common/singleton/threadsafe_singleton.h" +#include "absl/container/node_hash_map.h" #include "spdlog/spdlog.h" namespace Envoy { @@ -35,19 +37,6 @@ namespace Runtime { using RuntimeSingleton = ThreadSafeSingleton; -/** - * Implementation of RandomGenerator that uses per-thread RANLUX generators seeded with current - * time. - */ -class RandomGeneratorImpl : public RandomGenerator { -public: - // Runtime::RandomGenerator - uint64_t random() override; - std::string uuid() override; - - static const size_t UUID_LENGTH; -}; - /** * All runtime stats. @see stats_macros.h */ @@ -58,6 +47,7 @@ class RandomGeneratorImpl : public RandomGenerator { COUNTER(override_dir_exists) \ COUNTER(override_dir_not_exists) \ GAUGE(admin_overrides_active, NeverImport) \ + GAUGE(deprecated_feature_seen_since_process_start, NeverImport) \ GAUGE(num_keys, NeverImport) \ GAUGE(num_layers, NeverImport) @@ -71,14 +61,13 @@ struct RuntimeStats { /** * Implementation of Snapshot whose source is the vector of layers passed to the constructor. */ -class SnapshotImpl : public Snapshot, - public ThreadLocal::ThreadLocalObject, - Logger::Loggable { +class SnapshotImpl : public Snapshot, Logger::Loggable { public: - SnapshotImpl(RandomGenerator& generator, RuntimeStats& stats, + SnapshotImpl(Random::RandomGenerator& generator, RuntimeStats& stats, std::vector&& layers); // Runtime::Snapshot + void countDeprecatedFeatureUse() const override; bool deprecatedFeatureEnabled(absl::string_view key, bool default_value) const override; bool runtimeFeatureEnabled(absl::string_view key) const override; bool featureEnabled(absl::string_view key, uint64_t default_value, uint64_t random_value, @@ -124,10 +113,12 @@ class SnapshotImpl : public Snapshot, const std::vector layers_; EntryMap values_; - RandomGenerator& generator_; + Random::RandomGenerator& generator_; RuntimeStats& stats_; }; +using SnapshotImplPtr = std::unique_ptr; + /** * Base implementation of OverrideLayer that by itself provides an empty values map. */ @@ -162,7 +153,7 @@ class AdminLayer : public OverrideLayerImpl { * Merge the provided values into our entry map. An empty value indicates that a key should be * removed from our map. */ - void mergeValues(const std::unordered_map& values); + void mergeValues(const absl::node_hash_map& values); private: RuntimeStats& stats_; @@ -205,21 +196,18 @@ struct RtdsSubscription : Envoy::Config::SubscriptionBase& resources, + void onConfigUpdate(const std::vector& resources, + const std::string& version_info) override; + void onConfigUpdate(const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, const std::string&) override; - void onConfigUpdate( - const Protobuf::RepeatedPtrField& added_resources, - const Protobuf::RepeatedPtrField& removed_resources, - const std::string&) override; void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException* e) override; - std::string resourceName(const ProtobufWkt::Any& resource) override { - return MessageUtil::anyConvert(resource).name(); - } void start(); void validateUpdateSize(uint32_t num_resources); + void createSubscription(); LoaderImpl& parent_; const envoy::config::core::v3::ConfigSource config_source_; @@ -228,7 +216,6 @@ struct RtdsSubscription : Envoy::Config::SubscriptionBase; @@ -244,27 +231,28 @@ class LoaderImpl : public Loader, Logger::Loggable { LoaderImpl(Event::Dispatcher& dispatcher, ThreadLocal::SlotAllocator& tls, const envoy::config::bootstrap::v3::LayeredRuntime& config, const LocalInfo::LocalInfo& local_info, Stats::Store& store, - RandomGenerator& generator, ProtobufMessage::ValidationVisitor& validation_visitor, - Api::Api& api); + Random::RandomGenerator& generator, + ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api); // Runtime::Loader void initialize(Upstream::ClusterManager& cm) override; const Snapshot& snapshot() override; - std::shared_ptr threadsafeSnapshot() override; - void mergeValues(const std::unordered_map& values) override; + SnapshotConstSharedPtr threadsafeSnapshot() override; + void mergeValues(const absl::node_hash_map& values) override; void startRtdsSubscriptions(ReadyCallback on_done) override; + Stats::Scope& getRootScope() override; private: friend RtdsSubscription; // Create a new Snapshot - virtual std::unique_ptr createNewSnapshot(); + SnapshotImplPtr createNewSnapshot(); // Load a new Snapshot into TLS void loadNewSnapshot(); RuntimeStats generateStats(Stats::Store& store); - void onRdtsReady(); + void onRtdsReady(); - RandomGenerator& generator_; + Random::RandomGenerator& generator_; RuntimeStats stats_; AdminLayerPtr admin_layer_; ThreadLocal::SlotPtr tls_; @@ -277,9 +265,10 @@ class LoaderImpl : public Loader, Logger::Loggable { Init::ManagerImpl init_manager_{"RTDS"}; std::vector subscriptions_; Upstream::ClusterManager* cm_{}; + Stats::Store& store_; absl::Mutex snapshot_mutex_; - std::shared_ptr thread_safe_snapshot_ ABSL_GUARDED_BY(snapshot_mutex_); + SnapshotConstSharedPtr thread_safe_snapshot_ ABSL_GUARDED_BY(snapshot_mutex_); }; } // namespace Runtime diff --git a/source/common/runtime/runtime_protos.h b/source/common/runtime/runtime_protos.h index 06b0e5816d5a2..855b145121db1 100644 --- a/source/common/runtime/runtime_protos.h +++ b/source/common/runtime/runtime_protos.h @@ -35,6 +35,8 @@ class Double { : runtime_key_(double_proto.runtime_key()), default_value_(double_proto.default_value()), runtime_(runtime) {} + const std::string& runtimeKey() const { return runtime_key_; } + double value() const { return runtime_.snapshot().getDouble(runtime_key_, default_value_); } private: diff --git a/source/common/secret/BUILD b/source/common/secret/BUILD index 719c3e884af91..f486c2f7ce8ed 100644 --- a/source/common/secret/BUILD +++ b/source/common/secret/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/secret/sds_api.cc b/source/common/secret/sds_api.cc index deab859adafd3..664de75f44395 100644 --- a/source/common/secret/sds_api.cc +++ b/source/common/secret/sds_api.cc @@ -1,11 +1,8 @@ #include "common/secret/sds_api.h" -#include - #include "envoy/api/v2/auth/cert.pb.h" #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" -#include "envoy/extensions/transport_sockets/tls/v3/cert.pb.validate.h" #include "envoy/service/discovery/v3/discovery.pb.h" #include "common/common/assert.h" @@ -21,14 +18,18 @@ SdsApi::SdsApi(envoy::config::core::v3::ConfigSource sds_config, absl::string_vi Init::Manager& init_manager, std::function destructor_cb, Event::Dispatcher& dispatcher, Api::Api& api) : Envoy::Config::SubscriptionBase( - sds_config.resource_api_version()), + sds_config.resource_api_version(), validation_visitor, "name"), init_target_(fmt::format("SdsApi {}", sds_config_name), [this] { initialize(); }), stats_(stats), sds_config_(std::move(sds_config)), sds_config_name_(sds_config_name), - secret_hash_(0), clean_up_(std::move(destructor_cb)), validation_visitor_(validation_visitor), + secret_hash_(0), clean_up_(std::move(destructor_cb)), subscription_factory_(subscription_factory), time_source_(time_source), secret_data_{sds_config_name_, "uninitialized", time_source_.systemTime()}, dispatcher_(dispatcher), api_(api) { + const auto resource_name = getResourceName(); + // This has to happen here (rather than in initialize()) as it can throw exceptions. + subscription_ = subscription_factory_.subscriptionFromConfigSource( + sds_config_, Grpc::Common::typeUrl(resource_name), stats_, *this, resource_decoder_); // TODO(JimmyCYJ): Implement chained_init_manager, so that multiple init_manager // can be chained together to behave as one init_manager. In that way, we let // two listeners which share same SdsApi to register at separate init managers, and @@ -36,12 +37,11 @@ SdsApi::SdsApi(envoy::config::core::v3::ConfigSource sds_config, absl::string_vi init_manager.add(init_target_); } -void SdsApi::onConfigUpdate(const Protobuf::RepeatedPtrField& resources, +void SdsApi::onConfigUpdate(const std::vector& resources, const std::string& version_info) { validateUpdateSize(resources.size()); - auto secret = - MessageUtil::anyConvertAndValidate( - resources[0], validation_visitor_); + const auto& secret = dynamic_cast( + resources[0].get().resource()); if (secret.name() != sds_config_name_) { throw EnvoyException( @@ -84,13 +84,10 @@ void SdsApi::onConfigUpdate(const Protobuf::RepeatedPtrField& init_target_.ready(); } -void SdsApi::onConfigUpdate( - const Protobuf::RepeatedPtrField& resources, - const Protobuf::RepeatedPtrField&, const std::string&) { - validateUpdateSize(resources.size()); - Protobuf::RepeatedPtrField unwrapped_resource; - *unwrapped_resource.Add() = resources[0].resource(); - onConfigUpdate(unwrapped_resource, resources[0].version()); +void SdsApi::onConfigUpdate(const std::vector& added_resources, + const Protobuf::RepeatedPtrField&, const std::string&) { + validateUpdateSize(added_resources.size()); + onConfigUpdate(added_resources, added_resources[0].get().version()); } void SdsApi::onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, @@ -111,9 +108,8 @@ void SdsApi::validateUpdateSize(int num_resources) { } void SdsApi::initialize() { - const auto resource_name = getResourceName(); - subscription_ = subscription_factory_.subscriptionFromConfigSource( - sds_config_, Grpc::Common::typeUrl(resource_name), stats_, *this); + // Don't put any code here that can throw exceptions, this has been the cause of multiple + // hard-to-diagnose regressions. subscription_->start({sds_config_name_}); } diff --git a/source/common/secret/sds_api.h b/source/common/secret/sds_api.h index 0ca7c93f24aaa..e06467104a9d2 100644 --- a/source/common/secret/sds_api.h +++ b/source/common/secret/sds_api.h @@ -8,6 +8,7 @@ #include "envoy/config/subscription_factory.h" #include "envoy/event/dispatcher.h" #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" +#include "envoy/extensions/transport_sockets/tls/v3/secret.pb.validate.h" #include "envoy/init/manager.h" #include "envoy/local_info/local_info.h" #include "envoy/runtime/runtime.h" @@ -56,16 +57,13 @@ class SdsApi : public Envoy::Config::SubscriptionBase< Common::CallbackManager<> update_callback_manager_; // Config::SubscriptionCallbacks - void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + void onConfigUpdate(const std::vector& resources, const std::string& version_info) override; - void onConfigUpdate(const Protobuf::RepeatedPtrField&, - const Protobuf::RepeatedPtrField&, const std::string&) override; + void onConfigUpdate(const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& system_version_info) override; void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException* e) override; - std::string resourceName(const ProtobufWkt::Any& resource) override { - return MessageUtil::anyConvert(resource) - .name(); - } virtual std::vector getDataSourceFilenames() PURE; private: @@ -77,13 +75,12 @@ class SdsApi : public Envoy::Config::SubscriptionBase< Stats::Store& stats_; const envoy::config::core::v3::ConfigSource sds_config_; - std::unique_ptr subscription_; + Config::SubscriptionPtr subscription_; const std::string sds_config_name_; uint64_t secret_hash_; uint64_t files_hash_; Cleanup clean_up_; - ProtobufMessage::ValidationVisitor& validation_visitor_; Config::SubscriptionFactory& subscription_factory_; TimeSource& time_source_; SecretData secret_data_; @@ -118,7 +115,7 @@ class TlsCertificateSdsApi : public SdsApi, public TlsCertificateConfigProvider sds_config, sds_config_name, secret_provider_context.clusterManager().subscriptionFactory(), secret_provider_context.dispatcher().timeSource(), secret_provider_context.messageValidationVisitor(), secret_provider_context.stats(), - *secret_provider_context.initManager(), destructor_cb, secret_provider_context.dispatcher(), + secret_provider_context.initManager(), destructor_cb, secret_provider_context.dispatcher(), secret_provider_context.api()); } @@ -179,7 +176,7 @@ class CertificateValidationContextSdsApi : public SdsApi, sds_config, sds_config_name, secret_provider_context.clusterManager().subscriptionFactory(), secret_provider_context.dispatcher().timeSource(), secret_provider_context.messageValidationVisitor(), secret_provider_context.stats(), - *secret_provider_context.initManager(), destructor_cb, secret_provider_context.dispatcher(), + secret_provider_context.initManager(), destructor_cb, secret_provider_context.dispatcher(), secret_provider_context.api()); } CertificateValidationContextSdsApi(const envoy::config::core::v3::ConfigSource& sds_config, @@ -250,7 +247,7 @@ class TlsSessionTicketKeysSdsApi : public SdsApi, public TlsSessionTicketKeysCon sds_config, sds_config_name, secret_provider_context.clusterManager().subscriptionFactory(), secret_provider_context.dispatcher().timeSource(), secret_provider_context.messageValidationVisitor(), secret_provider_context.stats(), - *secret_provider_context.initManager(), destructor_cb, secret_provider_context.dispatcher(), + secret_provider_context.initManager(), destructor_cb, secret_provider_context.dispatcher(), secret_provider_context.api()); } @@ -321,7 +318,7 @@ class GenericSecretSdsApi : public SdsApi, public GenericSecretConfigProvider { sds_config, sds_config_name, secret_provider_context.clusterManager().subscriptionFactory(), secret_provider_context.dispatcher().timeSource(), secret_provider_context.messageValidationVisitor(), secret_provider_context.stats(), - *secret_provider_context.initManager(), destructor_cb, secret_provider_context.dispatcher(), + secret_provider_context.initManager(), destructor_cb, secret_provider_context.dispatcher(), secret_provider_context.api()); } diff --git a/source/common/secret/secret_manager_impl.h b/source/common/secret/secret_manager_impl.h index 002bed3decb2a..799c7415d7ce0 100644 --- a/source/common/secret/secret_manager_impl.h +++ b/source/common/secret/secret_manager_impl.h @@ -1,7 +1,5 @@ #pragma once -#include - #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" #include "envoy/secret/secret_manager.h" @@ -13,6 +11,8 @@ #include "common/common/logger.h" #include "common/secret/sds_api.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace Secret { @@ -88,7 +88,6 @@ class SecretManagerImpl : public SecretManager { std::function unregister_secret_provider = [map_key, this]() { removeDynamicSecretProvider(map_key); }; - ASSERT(secret_provider_context.initManager() != nullptr); secret_provider = SecretType::create(secret_provider_context, sds_config_source, config_name, unregister_secret_provider); dynamic_secret_providers_[map_key] = secret_provider; @@ -116,22 +115,22 @@ class SecretManagerImpl : public SecretManager { ASSERT(num_deleted == 1, ""); } - std::unordered_map> dynamic_secret_providers_; + absl::node_hash_map> dynamic_secret_providers_; }; // Manages pairs of secret name and TlsCertificateConfigProviderSharedPtr. - std::unordered_map + absl::node_hash_map static_tls_certificate_providers_; // Manages pairs of secret name and CertificateValidationContextConfigProviderSharedPtr. - std::unordered_map + absl::node_hash_map static_certificate_validation_context_providers_; - std::unordered_map + absl::node_hash_map static_session_ticket_keys_providers_; // Manages pairs of secret name and GenericSecretConfigProviderSharedPtr. - std::unordered_map + absl::node_hash_map static_generic_secret_providers_; // map hash code of SDS config source and SdsApi object. diff --git a/source/common/shared_pool/BUILD b/source/common/shared_pool/BUILD index 447ad6538b565..1d55c9ec99a03 100644 --- a/source/common/shared_pool/BUILD +++ b/source/common/shared_pool/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/signal/BUILD b/source/common/signal/BUILD index 17dec6c9be55d..2a18144c87dbf 100644 --- a/source/common/signal/BUILD +++ b/source/common/signal/BUILD @@ -1,16 +1,20 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( name = "fatal_error_handler_lib", + srcs = ["fatal_error_handler.cc"], hdrs = ["fatal_error_handler.h"], + deps = [ + "//source/common/common:macros", + ], ) envoy_cc_library( diff --git a/source/common/signal/fatal_error_handler.cc b/source/common/signal/fatal_error_handler.cc new file mode 100644 index 0000000000000..125093e3c589a --- /dev/null +++ b/source/common/signal/fatal_error_handler.cc @@ -0,0 +1,74 @@ +#include "common/signal/fatal_error_handler.h" + +#include + +#include "common/common/macros.h" + +#include "absl/base/attributes.h" +#include "absl/synchronization/mutex.h" + +namespace Envoy { +namespace FatalErrorHandler { + +namespace { + +ABSL_CONST_INIT static absl::Mutex failure_mutex(absl::kConstInit); +// Since we can't grab the failure mutex on fatal error (snagging locks under +// fatal crash causing potential deadlocks) access the handler list as an atomic +// operation, which is async-signal-safe. If the crash handler runs at the same +// time as another thread tries to modify the list, one of them will get the +// list and the other will get nullptr instead. If the crash handler loses the +// race and gets nullptr, it won't run any of the registered error handlers. +using FailureFunctionList = std::list; +ABSL_CONST_INIT std::atomic fatal_error_handlers{nullptr}; + +} // namespace + +void registerFatalErrorHandler(const FatalErrorHandlerInterface& handler) { +#ifdef ENVOY_OBJECT_TRACE_ON_DUMP + absl::MutexLock l(&failure_mutex); + FailureFunctionList* list = fatal_error_handlers.exchange(nullptr, std::memory_order_relaxed); + if (list == nullptr) { + list = new FailureFunctionList; + } + list->push_back(&handler); + fatal_error_handlers.store(list, std::memory_order_release); +#else + UNREFERENCED_PARAMETER(handler); +#endif +} + +void removeFatalErrorHandler(const FatalErrorHandlerInterface& handler) { +#ifdef ENVOY_OBJECT_TRACE_ON_DUMP + absl::MutexLock l(&failure_mutex); + FailureFunctionList* list = fatal_error_handlers.exchange(nullptr, std::memory_order_relaxed); + if (list == nullptr) { + // removeFatalErrorHandler() may see an empty list of fatal error handlers + // if it's called at the same time as callFatalErrorHandlers(). In that case + // Envoy is in the middle of crashing anyway, but don't add a segfault on + // top of the crash. + return; + } + list->remove(&handler); + if (list->empty()) { + delete list; + } else { + fatal_error_handlers.store(list, std::memory_order_release); + } +#else + UNREFERENCED_PARAMETER(handler); +#endif +} + +void callFatalErrorHandlers(std::ostream& os) { + FailureFunctionList* list = fatal_error_handlers.exchange(nullptr, std::memory_order_relaxed); + if (list != nullptr) { + for (const auto* handler : *list) { + handler->onFatalError(os); + } + delete list; + } +} + +} // namespace FatalErrorHandler +} // namespace Envoy diff --git a/source/common/signal/fatal_error_handler.h b/source/common/signal/fatal_error_handler.h index 95c185911d3e8..b06997af7e815 100644 --- a/source/common/signal/fatal_error_handler.h +++ b/source/common/signal/fatal_error_handler.h @@ -1,18 +1,39 @@ #pragma once +#include + #include "envoy/common/pure.h" namespace Envoy { // A simple class which allows registering functions to be called when Envoy -// receives one of the fatal signals, documented below. -// -// This is split out of signal_action.h because it is exempted from various -// builds. +// receives one of the fatal signals, documented in signal_action.h. class FatalErrorHandlerInterface { public: virtual ~FatalErrorHandlerInterface() = default; - virtual void onFatalError() const PURE; + // Called when Envoy receives a fatal signal. Must be async-signal-safe: in + // particular, it can't allocate memory. + virtual void onFatalError(std::ostream& os) const PURE; }; +namespace FatalErrorHandler { +/** + * Add this handler to the list of functions which will be called if Envoy + * receives a fatal signal. + */ +void registerFatalErrorHandler(const FatalErrorHandlerInterface& handler); + +/** + * Removes this handler from the list of functions which will be called if Envoy + * receives a fatal signal. + */ +void removeFatalErrorHandler(const FatalErrorHandlerInterface& handler); + +/** + * Calls and unregisters the fatal error handlers registered with + * registerFatalErrorHandler. This is async-signal-safe and intended to be + * called from a fatal signal handler. + */ +void callFatalErrorHandlers(std::ostream& os); +} // namespace FatalErrorHandler } // namespace Envoy diff --git a/source/common/signal/signal_action.cc b/source/common/signal/signal_action.cc index 1b9e5cf78fa6d..c3a53c19da701 100644 --- a/source/common/signal/signal_action.cc +++ b/source/common/signal/signal_action.cc @@ -5,50 +5,10 @@ #include #include "common/common/assert.h" -#include "common/common/version.h" +#include "common/version/version.h" namespace Envoy { -ABSL_CONST_INIT static absl::Mutex failure_mutex(absl::kConstInit); -// Since we can't grab the failure mutex on fatal error (snagging locks under -// fatal crash causing potential deadlocks) access the handler list as an atomic -// operation, to minimize the chance that one thread is operating on the list -// while the crash handler is attempting to access it. -// This basically makes edits to the list thread-safe - if one thread is -// modifying the list rather than crashing in the crash handler due to accessing -// the list in a non-thread-safe manner, it simply won't log crash traces. -using FailureFunctionList = std::list; -ABSL_CONST_INIT std::atomic fatal_error_handlers{nullptr}; - -void SignalAction::registerFatalErrorHandler(const FatalErrorHandlerInterface& handler) { -#ifdef ENVOY_OBJECT_TRACE_ON_DUMP - absl::MutexLock l(&failure_mutex); - FailureFunctionList* list = fatal_error_handlers.exchange(nullptr, std::memory_order_relaxed); - if (list == nullptr) { - list = new FailureFunctionList; - } - list->push_back(&handler); - fatal_error_handlers.store(list, std::memory_order_release); -#else - UNREFERENCED_PARAMETER(handler); -#endif -} - -void SignalAction::removeFatalErrorHandler(const FatalErrorHandlerInterface& handler) { -#ifdef ENVOY_OBJECT_TRACE_ON_DUMP - absl::MutexLock l(&failure_mutex); - FailureFunctionList* list = fatal_error_handlers.exchange(nullptr, std::memory_order_relaxed); - list->remove(&handler); - if (list->empty()) { - delete list; - } else { - fatal_error_handlers.store(list, std::memory_order_release); - } -#else - UNREFERENCED_PARAMETER(handler); -#endif -} - constexpr int SignalAction::FATAL_SIGS[]; void SignalAction::sigHandler(int sig, siginfo_t* info, void* context) { @@ -62,13 +22,8 @@ void SignalAction::sigHandler(int sig, siginfo_t* info, void* context) { } tracer.logTrace(); - FailureFunctionList* list = fatal_error_handlers.exchange(nullptr, std::memory_order_relaxed); - if (list) { - // Finally after logging the stack trace, call any registered crash handlers. - for (const auto* handler : *list) { - handler->onFatalError(); - } - } + // Finally after logging the stack trace, call any registered crash handlers. + FatalErrorHandler::callFatalErrorHandlers(std::cerr); signal(sig, SIG_DFL); raise(sig); diff --git a/source/common/signal/signal_action.h b/source/common/signal/signal_action.h index 0092dc4fffaa4..ffabf9cc3cde3 100644 --- a/source/common/signal/signal_action.h +++ b/source/common/signal/signal_action.h @@ -73,18 +73,6 @@ class SignalAction : NonCopyable { */ static void sigHandler(int sig, siginfo_t* info, void* context); - /** - * Add this handler to the list of functions which will be called if Envoy - * receives a fatal signal. - */ - static void registerFatalErrorHandler(const FatalErrorHandlerInterface& handler); - - /** - * Removes this handler from the list of functions which will be called if Envoy - * receives a fatal signal. - */ - static void removeFatalErrorHandler(const FatalErrorHandlerInterface& handler); - private: /** * Allocate this many bytes on each side of the area used for alt stack. @@ -142,7 +130,6 @@ class SignalAction : NonCopyable { char* altstack_{}; std::array previous_handlers_; stack_t previous_altstack_; - std::list fatal_error_handlers_; }; } // namespace Envoy diff --git a/source/common/singleton/BUILD b/source/common/singleton/BUILD index 1b52b93501a15..06d67beae1b50 100644 --- a/source/common/singleton/BUILD +++ b/source/common/singleton/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/singleton/manager_impl.h b/source/common/singleton/manager_impl.h index 6f55ad3fadb25..e6eb8cb9af97a 100644 --- a/source/common/singleton/manager_impl.h +++ b/source/common/singleton/manager_impl.h @@ -1,12 +1,12 @@ #pragma once -#include - #include "envoy/singleton/manager.h" #include "envoy/thread/thread.h" #include "common/common/non_copyable.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace Singleton { @@ -24,7 +24,7 @@ class ManagerImpl : public Manager, NonCopyable { InstanceSharedPtr get(const std::string& name, SingletonFactoryCb cb) override; private: - std::unordered_map> singletons_; + absl::node_hash_map> singletons_; Thread::ThreadFactory& thread_factory_; const Thread::ThreadId run_tid_; }; diff --git a/source/common/ssl/BUILD b/source/common/ssl/BUILD index 3f46b11b24717..0be754cc48093 100644 --- a/source/common/ssl/BUILD +++ b/source/common/ssl/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/ssl/certificate_validation_context_config_impl.h b/source/common/ssl/certificate_validation_context_config_impl.h index f054039ee1ba0..1636c2ed0713f 100644 --- a/source/common/ssl/certificate_validation_context_config_impl.h +++ b/source/common/ssl/certificate_validation_context_config_impl.h @@ -21,7 +21,7 @@ class CertificateValidationContextConfigImpl : public CertificateValidationConte const std::string& certificateRevocationList() const override { return certificate_revocation_list_; } - const std::string& certificateRevocationListPath() const override { + const std::string& certificateRevocationListPath() const final { return certificate_revocation_list_path_; } const std::vector& verifySubjectAltNameList() const override { diff --git a/source/common/stats/BUILD b/source/common/stats/BUILD index 256074df9cbfb..bc5c41f6e9e22 100644 --- a/source/common/stats/BUILD +++ b/source/common/stats/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( @@ -35,7 +35,9 @@ envoy_cc_library( ":metric_impl_lib", "//source/common/common:assert_lib", "//source/common/common:hash_lib", + "//source/common/common:matchers_lib", "//source/common/common:utility_lib", + "@envoy_api//envoy/config/metrics/v3:pkg_cc_proto", ], ) @@ -221,6 +223,7 @@ envoy_cc_library( name = "tag_producer_lib", srcs = ["tag_producer_impl.cc"], hdrs = ["tag_producer_impl.h"], + external_deps = ["abseil_node_hash_set"], deps = [ ":tag_extractor_lib", "//include/envoy/stats:stats_interface", @@ -249,6 +252,7 @@ envoy_cc_library( hdrs = ["thread_local_store.h"], deps = [ ":allocator_lib", + ":histogram_lib", ":null_counter_lib", ":null_gauge_lib", ":null_text_readout_lib", diff --git a/source/common/stats/allocator_impl.cc b/source/common/stats/allocator_impl.cc index 06db3ee37f52b..63e3159a842ed 100644 --- a/source/common/stats/allocator_impl.cc +++ b/source/common/stats/allocator_impl.cc @@ -63,7 +63,7 @@ template class StatsSharedImpl : public MetricImpl } // Metric - SymbolTable& symbolTable() override { return alloc_.symbolTable(); } + SymbolTable& symbolTable() final { return alloc_.symbolTable(); } bool used() const override { return flags_ & Metric::Flags::Used; } // RefcountInterface @@ -96,7 +96,7 @@ template class StatsSharedImpl : public MetricImpl * our ref-count decrement hits zero. The counters and gauges are held in * distinct sets so we virtualize this removal helper. */ - virtual void removeFromSetLockHeld() EXCLUSIVE_LOCKS_REQUIRED(alloc_.mutex_) PURE; + virtual void removeFromSetLockHeld() ABSL_EXCLUSIVE_LOCKS_REQUIRED(alloc_.mutex_) PURE; protected: AllocatorImpl& alloc_; @@ -121,7 +121,7 @@ class CounterImpl : public StatsSharedImpl { const StatNameTagVector& stat_name_tags) : StatsSharedImpl(name, alloc, tag_extracted_name, stat_name_tags) {} - void removeFromSetLockHeld() EXCLUSIVE_LOCKS_REQUIRED(alloc_.mutex_) override { + void removeFromSetLockHeld() ABSL_EXCLUSIVE_LOCKS_REQUIRED(alloc_.mutex_) override { const size_t count = alloc_.counters_.erase(statName()); ASSERT(count == 1); } @@ -165,28 +165,28 @@ class GaugeImpl : public StatsSharedImpl { } } - void removeFromSetLockHeld() override EXCLUSIVE_LOCKS_REQUIRED(alloc_.mutex_) { + void removeFromSetLockHeld() override ABSL_EXCLUSIVE_LOCKS_REQUIRED(alloc_.mutex_) { const size_t count = alloc_.gauges_.erase(statName()); ASSERT(count == 1); } // Stats::Gauge void add(uint64_t amount) override { - value_ += amount; + child_value_ += amount; flags_ |= Flags::Used; } void dec() override { sub(1); } void inc() override { add(1); } void set(uint64_t value) override { - value_ = value; + child_value_ = value; flags_ |= Flags::Used; } void sub(uint64_t amount) override { - ASSERT(value_ >= amount); + ASSERT(child_value_ >= amount); ASSERT(used() || amount == 0); - value_ -= amount; + child_value_ -= amount; } - uint64_t value() const override { return value_; } + uint64_t value() const override { return child_value_ + parent_value_; } ImportMode importMode() const override { if (flags_ & Flags::NeverImport) { @@ -217,15 +217,18 @@ class GaugeImpl : public StatsSharedImpl { // A previous revision of Envoy may have transferred a gauge that it // thought was Accumulate. But the new version thinks it's NeverImport, so // we clear the accumulated value. - value_ = 0; + parent_value_ = 0; flags_ &= ~Flags::Used; flags_ |= Flags::NeverImport; break; } } + void setParentValue(uint64_t value) override { parent_value_ = value; } + private: - std::atomic value_{0}; + std::atomic parent_value_{0}; + std::atomic child_value_{0}; }; class TextReadoutImpl : public StatsSharedImpl { @@ -234,15 +237,16 @@ class TextReadoutImpl : public StatsSharedImpl { const StatNameTagVector& stat_name_tags) : StatsSharedImpl(name, alloc, tag_extracted_name, stat_name_tags) {} - void removeFromSetLockHeld() EXCLUSIVE_LOCKS_REQUIRED(alloc_.mutex_) override { + void removeFromSetLockHeld() ABSL_EXCLUSIVE_LOCKS_REQUIRED(alloc_.mutex_) override { const size_t count = alloc_.text_readouts_.erase(statName()); ASSERT(count == 1); } // Stats::TextReadout - void set(std::string&& value) override { + void set(absl::string_view value) override { + std::string value_copy(value); absl::MutexLock lock(&mutex_); - value_ = std::move(value); + value_ = std::move(value_copy); } std::string value() const override { absl::MutexLock lock(&mutex_); @@ -263,7 +267,7 @@ CounterSharedPtr AllocatorImpl::makeCounter(StatName name, StatName tag_extracte if (iter != counters_.end()) { return CounterSharedPtr(*iter); } - auto counter = CounterSharedPtr(new CounterImpl(name, *this, tag_extracted_name, stat_name_tags)); + auto counter = CounterSharedPtr(makeCounterInternal(name, tag_extracted_name, stat_name_tags)); counters_.insert(counter.get()); return counter; } @@ -307,5 +311,10 @@ bool AllocatorImpl::isMutexLockedForTest() { return !locked; } +Counter* AllocatorImpl::makeCounterInternal(StatName name, StatName tag_extracted_name, + const StatNameTagVector& stat_name_tags) { + return new CounterImpl(name, *this, tag_extracted_name, stat_name_tags); +} + } // namespace Stats } // namespace Envoy diff --git a/source/common/stats/allocator_impl.h b/source/common/stats/allocator_impl.h index 02e9265293587..469484866f18a 100644 --- a/source/common/stats/allocator_impl.h +++ b/source/common/stats/allocator_impl.h @@ -47,38 +47,24 @@ class AllocatorImpl : public Allocator { */ bool isMutexLockedForTest(); +protected: + virtual Counter* makeCounterInternal(StatName name, StatName tag_extracted_name, + const StatNameTagVector& stat_name_tags); + private: template friend class StatsSharedImpl; friend class CounterImpl; friend class GaugeImpl; friend class TextReadoutImpl; + friend class NotifyingAllocatorImpl; + + void removeCounterFromSetLockHeld(Counter* counter) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + void removeGaugeFromSetLockHeld(Gauge* gauge) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + void removeTextReadoutFromSetLockHeld(Counter* counter) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - struct HeapStatHash { - using is_transparent = void; // NOLINT(readability-identifier-naming) - size_t operator()(const Metric* a) const { return a->statName().hash(); } - size_t operator()(StatName a) const { return a.hash(); } - }; - - struct HeapStatCompare { - using is_transparent = void; // NOLINT(readability-identifier-naming) - bool operator()(const Metric* a, const Metric* b) const { - return a->statName() == b->statName(); - } - bool operator()(const Metric* a, StatName b) const { return a->statName() == b; } - }; - - void removeCounterFromSetLockHeld(Counter* counter) EXCLUSIVE_LOCKS_REQUIRED(mutex_); - void removeGaugeFromSetLockHeld(Gauge* gauge) EXCLUSIVE_LOCKS_REQUIRED(mutex_); - void removeTextReadoutFromSetLockHeld(Counter* counter) EXCLUSIVE_LOCKS_REQUIRED(mutex_); - - // An unordered set of HeapStatData pointers which keys off the key() - // field in each object. This necessitates a custom comparator and hasher, which key off of the - // StatNamePtr's own StatNamePtrHash and StatNamePtrCompare operators. - template - using StatSet = absl::flat_hash_set; - StatSet counters_ GUARDED_BY(mutex_); - StatSet gauges_ GUARDED_BY(mutex_); - StatSet text_readouts_ GUARDED_BY(mutex_); + StatSet counters_ ABSL_GUARDED_BY(mutex_); + StatSet gauges_ ABSL_GUARDED_BY(mutex_); + StatSet text_readouts_ ABSL_GUARDED_BY(mutex_); SymbolTable& symbol_table_; diff --git a/source/common/stats/fake_symbol_table_impl.h b/source/common/stats/fake_symbol_table_impl.h index b9639ef44f4a2..19bfa00daa798 100644 --- a/source/common/stats/fake_symbol_table_impl.h +++ b/source/common/stats/fake_symbol_table_impl.h @@ -5,7 +5,6 @@ #include #include #include -#include #include #include "envoy/common/exception.h" @@ -95,7 +94,7 @@ class FakeSymbolTableImpl : public SymbolTable { void incRefCount(const StatName&) override {} StoragePtr encode(absl::string_view name) override { return encodeHelper(name); } StoragePtr makeDynamicStorage(absl::string_view name) override { return encodeHelper(name); } - SymbolTable::StoragePtr join(const std::vector& names) const override { + SymbolTable::StoragePtr join(const StatNameVec& names) const override { std::vector strings; for (StatName name : names) { if (!name.empty()) { diff --git a/source/common/stats/histogram_impl.cc b/source/common/stats/histogram_impl.cc index f7ab4897596bf..a2b866dc112d3 100644 --- a/source/common/stats/histogram_impl.cc +++ b/source/common/stats/histogram_impl.cc @@ -10,32 +10,33 @@ namespace Envoy { namespace Stats { -HistogramStatisticsImpl::HistogramStatisticsImpl(const histogram_t* histogram_ptr) - : computed_quantiles_(supportedQuantiles().size(), 0.0) { - hist_approx_quantile(histogram_ptr, supportedQuantiles().data(), supportedQuantiles().size(), +namespace { +const ConstSupportedBuckets default_buckets{}; +} + +HistogramStatisticsImpl::HistogramStatisticsImpl() + : supported_buckets_(default_buckets), computed_quantiles_(supportedQuantiles().size(), 0.0) {} + +HistogramStatisticsImpl::HistogramStatisticsImpl(const histogram_t* histogram_ptr, + ConstSupportedBuckets& supported_buckets) + : supported_buckets_(supported_buckets), + computed_quantiles_(HistogramStatisticsImpl::supportedQuantiles().size(), 0.0) { + hist_approx_quantile(histogram_ptr, supportedQuantiles().data(), + HistogramStatisticsImpl::supportedQuantiles().size(), computed_quantiles_.data()); sample_count_ = hist_sample_count(histogram_ptr); sample_sum_ = hist_approx_sum(histogram_ptr); - const std::vector& supported_buckets = supportedBuckets(); - computed_buckets_.reserve(supported_buckets.size()); - for (const auto bucket : supported_buckets) { + computed_buckets_.reserve(supported_buckets_.size()); + for (const auto bucket : supported_buckets_) { computed_buckets_.emplace_back(hist_approx_count_below(histogram_ptr, bucket)); } } const std::vector& HistogramStatisticsImpl::supportedQuantiles() const { - static const std::vector supported_quantiles = {0, 0.25, 0.5, 0.75, 0.90, - 0.95, 0.99, 0.995, 0.999, 1}; - return supported_quantiles; -} - -const std::vector& HistogramStatisticsImpl::supportedBuckets() const { - static const std::vector supported_buckets = { - 0.5, 1, 5, 10, 25, 50, 100, 250, 500, 1000, - 2500, 5000, 10000, 30000, 60000, 300000, 600000, 1800000, 3600000}; - return supported_buckets; + CONSTRUCT_ON_FIRST_USE(std::vector, + {0, 0.25, 0.5, 0.75, 0.90, 0.95, 0.99, 0.995, 0.999, 1}); } std::string HistogramStatisticsImpl::quantileSummary() const { @@ -51,7 +52,7 @@ std::string HistogramStatisticsImpl::quantileSummary() const { std::string HistogramStatisticsImpl::bucketSummary() const { std::vector bucket_summary; - const std::vector& supported_buckets = supportedBuckets(); + ConstSupportedBuckets& supported_buckets = supportedBuckets(); bucket_summary.reserve(supported_buckets.size()); for (size_t i = 0; i < supported_buckets.size(); ++i) { bucket_summary.push_back(fmt::format("B{:g}: {}", supported_buckets[i], computed_buckets_[i])); @@ -73,12 +74,39 @@ void HistogramStatisticsImpl::refresh(const histogram_t* new_histogram_ptr) { ASSERT(supportedBuckets().size() == computed_buckets_.size()); computed_buckets_.clear(); - const std::vector& supported_buckets = supportedBuckets(); + ConstSupportedBuckets& supported_buckets = supportedBuckets(); computed_buckets_.reserve(supported_buckets.size()); for (const auto bucket : supported_buckets) { computed_buckets_.emplace_back(hist_approx_count_below(new_histogram_ptr, bucket)); } } +HistogramSettingsImpl::HistogramSettingsImpl(const envoy::config::metrics::v3::StatsConfig& config) + : configs_([&config]() { + std::vector configs; + for (const auto& matcher : config.histogram_bucket_settings()) { + std::vector buckets{matcher.buckets().begin(), matcher.buckets().end()}; + std::sort(buckets.begin(), buckets.end()); + configs.emplace_back(matcher.match(), std::move(buckets)); + } + + return configs; + }()) {} + +const ConstSupportedBuckets& HistogramSettingsImpl::buckets(absl::string_view stat_name) const { + for (const auto& config : configs_) { + if (config.first.match(stat_name)) { + return config.second; + } + } + return defaultBuckets(); +} + +const ConstSupportedBuckets& HistogramSettingsImpl::defaultBuckets() { + CONSTRUCT_ON_FIRST_USE(ConstSupportedBuckets, + {0.5, 1, 5, 10, 25, 50, 100, 250, 500, 1000, 2500, 5000, 10000, 30000, + 60000, 300000, 600000, 1800000, 3600000}); +} + } // namespace Stats } // namespace Envoy diff --git a/source/common/stats/histogram_impl.h b/source/common/stats/histogram_impl.h index 332fca0e2b078..67c2d7d170668 100644 --- a/source/common/stats/histogram_impl.h +++ b/source/common/stats/histogram_impl.h @@ -3,10 +3,12 @@ #include #include +#include "envoy/config/metrics/v3/stats.pb.h" #include "envoy/stats/histogram.h" #include "envoy/stats/stats.h" #include "envoy/stats/store.h" +#include "common/common/matchers.h" #include "common/common/non_copyable.h" #include "common/stats/metric_impl.h" @@ -15,32 +17,53 @@ namespace Envoy { namespace Stats { +class HistogramSettingsImpl : public HistogramSettings { +public: + HistogramSettingsImpl() = default; + HistogramSettingsImpl(const envoy::config::metrics::v3::StatsConfig& config); + + // HistogramSettings + const ConstSupportedBuckets& buckets(absl::string_view stat_name) const override; + + static ConstSupportedBuckets& defaultBuckets(); + +private: + using Config = std::pair; + const std::vector configs_{}; +}; + /** * Implementation of HistogramStatistics for circllhist. */ class HistogramStatisticsImpl : public HistogramStatistics, NonCopyable { public: - HistogramStatisticsImpl() : computed_quantiles_(supportedQuantiles().size(), 0.0) {} + HistogramStatisticsImpl(); + /** * HistogramStatisticsImpl object is constructed using the passed in histogram. * @param histogram_ptr pointer to the histogram for which stats will be calculated. This pointer * will not be retained. */ - HistogramStatisticsImpl(const histogram_t* histogram_ptr); + HistogramStatisticsImpl( + const histogram_t* histogram_ptr, + ConstSupportedBuckets& supported_buckets = HistogramSettingsImpl::defaultBuckets()); + + static ConstSupportedBuckets& defaultSupportedBuckets(); void refresh(const histogram_t* new_histogram_ptr); // HistogramStatistics std::string quantileSummary() const override; std::string bucketSummary() const override; - const std::vector& supportedQuantiles() const override; + const std::vector& supportedQuantiles() const final; const std::vector& computedQuantiles() const override { return computed_quantiles_; } - const std::vector& supportedBuckets() const override; + ConstSupportedBuckets& supportedBuckets() const override { return supported_buckets_; } const std::vector& computedBuckets() const override { return computed_buckets_; } uint64_t sampleCount() const override { return sample_count_; } double sampleSum() const override { return sample_sum_; } private: + ConstSupportedBuckets& supported_buckets_; std::vector computed_quantiles_; std::vector computed_buckets_; uint64_t sample_count_; @@ -85,7 +108,7 @@ class HistogramImpl : public HistogramImplHelper { void recordValue(uint64_t value) override { parent_.deliverHistogramToSinks(*this, value); } bool used() const override { return true; } - SymbolTable& symbolTable() override { return parent_.symbolTable(); } + SymbolTable& symbolTable() final { return parent_.symbolTable(); } private: Unit unit_; diff --git a/source/common/stats/isolated_store_impl.h b/source/common/stats/isolated_store_impl.h index 2427e71a1b314..57d14ebf45fdc 100644 --- a/source/common/stats/isolated_store_impl.h +++ b/source/common/stats/isolated_store_impl.h @@ -91,6 +91,15 @@ template class IsolatedStatsCache { return vec; } + bool iterate(const IterateFn& fn) const { + for (auto& stat : stats_) { + if (!fn(stat.second)) { + return false; + } + } + return true; + } + private: friend class IsolatedStoreImpl; @@ -154,6 +163,13 @@ class IsolatedStoreImpl : public StoreImpl { return text_readouts_.find(name); } + bool iterate(const IterateFn& fn) const override { return counters_.iterate(fn); } + bool iterate(const IterateFn& fn) const override { return gauges_.iterate(fn); } + bool iterate(const IterateFn& fn) const override { return histograms_.iterate(fn); } + bool iterate(const IterateFn& fn) const override { + return text_readouts_.iterate(fn); + } + // Stats::Store std::vector counters() const override { return counters_.toVector(); } std::vector gauges() const override { diff --git a/source/common/stats/metric_impl.h b/source/common/stats/metric_impl.h index c923395b992d4..52b577230fd3b 100644 --- a/source/common/stats/metric_impl.h +++ b/source/common/stats/metric_impl.h @@ -32,10 +32,35 @@ class MetricHelper { void iterateTagStatNames(const Metric::TagStatNameIterFn& fn) const; void clear(SymbolTable& symbol_table) { stat_names_.clear(symbol_table); } + // Hasher for metrics. + struct Hash { + using is_transparent = void; // NOLINT(readability-identifier-naming) + size_t operator()(const Metric* a) const { return a->statName().hash(); } + size_t operator()(StatName a) const { return a.hash(); } + }; + + // Comparator for metrics. + struct Compare { + using is_transparent = void; // NOLINT(readability-identifier-naming) + bool operator()(const Metric* a, const Metric* b) const { + return a->statName() == b->statName(); + } + bool operator()(const Metric* a, StatName b) const { return a->statName() == b; } + }; + private: StatNameList stat_names_; }; +// An unordered set of stat pointers. which keys off Metric::statName(). +// This necessitates a custom comparator and hasher, using the StatNamePtr's +// own StatNamePtrHash and StatNamePtrCompare operators. +// +// This is used by AllocatorImpl for counters, gauges, and text-readouts, and +// is also used by thread_local_store.h for histograms. +template +using StatSet = absl::flat_hash_set; + /** * Partial implementation of the Metric interface on behalf of Counters, Gauges, * and Histograms. It leaves symbolTable() unimplemented so that implementations diff --git a/source/common/stats/null_gauge.h b/source/common/stats/null_gauge.h index c3e7ccc468711..bbd8b2e507359 100644 --- a/source/common/stats/null_gauge.h +++ b/source/common/stats/null_gauge.h @@ -27,6 +27,7 @@ class NullGaugeImpl : public MetricImpl { void inc() override {} void dec() override {} void set(uint64_t) override {} + void setParentValue(uint64_t) override {} void sub(uint64_t) override {} uint64_t value() const override { return 0; } ImportMode importMode() const override { return ImportMode::NeverImport; } diff --git a/source/common/stats/null_text_readout.h b/source/common/stats/null_text_readout.h index d3e9cc832e6bf..da6c0976abf74 100644 --- a/source/common/stats/null_text_readout.h +++ b/source/common/stats/null_text_readout.h @@ -23,7 +23,7 @@ class NullTextReadoutImpl : public MetricImpl { MetricImpl::clear(symbol_table_); } - void set(std::string&&) override {} + void set(absl::string_view) override {} std::string value() const override { return std::string(); } // Metric diff --git a/source/common/stats/scope_prefixer.h b/source/common/stats/scope_prefixer.h index b7c8743756204..b6872bc98dffd 100644 --- a/source/common/stats/scope_prefixer.h +++ b/source/common/stats/scope_prefixer.h @@ -49,12 +49,39 @@ class ScopePrefixer : public Scope { HistogramOptConstRef findHistogram(StatName name) const override; TextReadoutOptConstRef findTextReadout(StatName name) const override; - const SymbolTable& constSymbolTable() const override { return scope_.constSymbolTable(); } - SymbolTable& symbolTable() override { return scope_.symbolTable(); } + const SymbolTable& constSymbolTable() const final { return scope_.constSymbolTable(); } + SymbolTable& symbolTable() final { return scope_.symbolTable(); } NullGaugeImpl& nullGauge(const std::string& str) override { return scope_.nullGauge(str); } + bool iterate(const IterateFn& fn) const override { return iterHelper(fn); } + bool iterate(const IterateFn& fn) const override { return iterHelper(fn); } + bool iterate(const IterateFn& fn) const override { return iterHelper(fn); } + bool iterate(const IterateFn& fn) const override { return iterHelper(fn); } + private: + template bool iterHelper(const IterateFn& fn) const { + // We determine here what's in the scope by looking at name + // prefixes. Strictly speaking this is not correct, as a stat name can be in + // different scopes. But there is no data in `ScopePrefixer` to resurrect + // actual membership of a stat in a scope, so we go by name matching. Note + // that `ScopePrefixer` is not used in `ThreadLocalStore`, which has + // accurate maps describing which stats are in which scopes. + // + // TODO(jmarantz): In the scope of this limited implementation, it would be + // faster to match on the StatName prefix. This would be possible if + // SymbolTable exposed a split() method. + std::string prefix_str = scope_.symbolTable().toString(prefix_.statName()); + if (!prefix_str.empty() && !absl::EndsWith(prefix_str, ".")) { + prefix_str += "."; + } + IterateFn filter_scope = [&fn, + &prefix_str](const RefcountPtr& stat) -> bool { + return !absl::StartsWith(stat->name(), prefix_str) || fn(stat); + }; + return scope_.iterate(filter_scope); + } + Scope& scope_; StatNameStorage prefix_; }; diff --git a/source/common/stats/stat_merger.cc b/source/common/stats/stat_merger.cc index b32ff6d7f332b..870866a2615e7 100644 --- a/source/common/stats/stat_merger.cc +++ b/source/common/stats/stat_merger.cc @@ -7,6 +7,18 @@ namespace Stats { StatMerger::StatMerger(Store& target_store) : temp_scope_(target_store.createScope("")) {} +StatMerger::~StatMerger() { + // By the time a parent exits, all its contributions to accumulated gauges + // should be zero. But depending on the timing of the stat-merger + // communication shutdown and other shutdown activities on the parent, the + // gauges may not all be zero yet. So simply erase all the parent + // contributions. + for (StatName stat_name : parent_gauges_) { + Gauge& gauge = temp_scope_->gaugeFromStatName(stat_name, Gauge::ImportMode::Uninitialized); + gauge.setParentValue(0); + } +} + StatName StatMerger::DynamicContext::makeDynamicStatName(const std::string& name, const DynamicsMap& map) { auto iter = map.find(name); @@ -20,7 +32,7 @@ StatName StatMerger::DynamicContext::makeDynamicStatName(const std::string& name // Name has embedded dynamic segments; we'll need to join together the // static/dynamic StatName segments. - std::vector segments; + StatNameVec segments; uint32_t segment_index = 0; std::vector dynamic_segments; @@ -124,18 +136,16 @@ void StatMerger::mergeGauges(const Protobuf::Map& gauges, continue; } - uint64_t& parent_value_ref = parent_gauge_values_[gauge_ref.statName()]; - uint64_t old_parent_value = parent_value_ref; - uint64_t new_parent_value = gauge.second; - parent_value_ref = new_parent_value; - - // Note that new_parent_value may be less than old_parent_value, in which - // case 2s complement does its magic (-1 == 0xffffffffffffffff) and adding - // that to the gauge's current value works the same as subtraction. - gauge_ref.add(new_parent_value - old_parent_value); + const uint64_t new_parent_value = gauge.second; + parent_gauges_.insert(gauge_ref.statName()); + gauge_ref.setParentValue(new_parent_value); } } +void StatMerger::retainParentGaugeValue(Stats::StatName gauge_name) { + parent_gauges_.erase(gauge_name); +} + void StatMerger::mergeStats(const Protobuf::Map& counter_deltas, const Protobuf::Map& gauges, const DynamicsMap& dynamics) { diff --git a/source/common/stats/stat_merger.h b/source/common/stats/stat_merger.h index 2eb1ca2ff7929..6dbf01d25aa3a 100644 --- a/source/common/stats/stat_merger.h +++ b/source/common/stats/stat_merger.h @@ -41,20 +41,43 @@ class StatMerger { }; StatMerger(Stats::Store& target_store); + ~StatMerger(); - // Merge the values of stats_proto into stats_store. Counters are always straightforward - // addition, while gauges default to addition but have exceptions. + /** + * Merge the values of stats_proto into stats_store. Counters are always + * straightforward addition, while gauges default to addition but have + * exceptions. + * + * @param counter_deltas map of counter changes from parent + * @param gauges map of gauge changes from parent + * @param dynamics information about which segments of the names are dynamic. + */ void mergeStats(const Protobuf::Map& counter_deltas, const Protobuf::Map& gauges, const DynamicsMap& dynamics = DynamicsMap()); + /** + * Indicates that a gauge's value from the hot-restart parent should be + * retained, combining it with the child data. By default, data is transferred + * from parent gauges only during the hot-restart process, but the parent + * contribution is subtracted from the child when the parent terminates. This + * makes sense for gauges such as active connection counts, but is not + * appropriate for server.hot_restart_generation. + * + * This function must be called immediately prior to destruction of the + * StatMerger instance. + * + * @param gauge_name The gauge to be retained. + */ + void retainParentGaugeValue(Stats::StatName gauge_name); + private: void mergeCounters(const Protobuf::Map& counter_deltas, const DynamicsMap& dynamics_map); void mergeGauges(const Protobuf::Map& gauges, const DynamicsMap& dynamics_map); - StatNameHashMap parent_gauge_values_; + StatNameHashSet parent_gauges_; // A stats Scope for our in-the-merging-process counters to live in. Scopes conceptually hold // shared_ptrs to the stats that live in them, with the question of which stats are living in a // given scope determined by which stat names have been accessed via that scope. E.g., if you diff --git a/source/common/stats/symbol_table_creator.cc b/source/common/stats/symbol_table_creator.cc index 8b29313130b5c..755c8fcce2e41 100644 --- a/source/common/stats/symbol_table_creator.cc +++ b/source/common/stats/symbol_table_creator.cc @@ -4,7 +4,7 @@ namespace Envoy { namespace Stats { bool SymbolTableCreator::initialized_ = false; -bool SymbolTableCreator::use_fake_symbol_tables_ = true; +bool SymbolTableCreator::use_fake_symbol_tables_ = false; SymbolTablePtr SymbolTableCreator::initAndMakeSymbolTable(bool use_fake) { ASSERT(!initialized_ || (use_fake_symbol_tables_ == use_fake)); diff --git a/source/common/stats/symbol_table_impl.cc b/source/common/stats/symbol_table_impl.cc index 78d20924c3289..5a9a6df7461de 100644 --- a/source/common/stats/symbol_table_impl.cc +++ b/source/common/stats/symbol_table_impl.cc @@ -31,7 +31,7 @@ static constexpr uint32_t Low7Bits = 0x7f; static constexpr Symbol FirstValidSymbol = 1; static constexpr uint8_t LiteralStringIndicator = 0; -uint64_t StatName::dataSize() const { +size_t StatName::dataSize() const { if (size_and_data_ == nullptr) { return 0; } @@ -46,9 +46,9 @@ void StatName::debugPrint() { if (size_and_data_ == nullptr) { std::cerr << "Null StatName" << std::endl; } else { - const uint64_t nbytes = dataSize(); + const size_t nbytes = dataSize(); std::cerr << "dataSize=" << nbytes << ":"; - for (uint64_t i = 0; i < nbytes; ++i) { + for (size_t i = 0; i < nbytes; ++i) { std::cerr << " " << static_cast(data()[i]); } const SymbolVec encoding = SymbolTableImpl::Encoding::decodeSymbols(data(), dataSize()); @@ -67,8 +67,8 @@ SymbolTableImpl::Encoding::~Encoding() { ASSERT(mem_block_.capacity() == 0); } -uint64_t SymbolTableImpl::Encoding::encodingSizeBytes(uint64_t number) { - uint64_t num_bytes = 0; +size_t SymbolTableImpl::Encoding::encodingSizeBytes(uint64_t number) { + size_t num_bytes = 0; do { ++num_bytes; number >>= 7; @@ -106,7 +106,7 @@ void SymbolTableImpl::Encoding::addSymbols(const std::vector& symbols) { } } -std::pair SymbolTableImpl::Encoding::decodeNumber(const uint8_t* encoding) { +std::pair SymbolTableImpl::Encoding::decodeNumber(const uint8_t* encoding) { uint64_t number = 0; uint64_t uc = SpilloverMask; const uint8_t* start = encoding; @@ -117,8 +117,7 @@ std::pair SymbolTableImpl::Encoding::decodeNumber(const uint return std::make_pair(number, encoding - start); } -SymbolVec SymbolTableImpl::Encoding::decodeSymbols(const SymbolTable::Storage array, - uint64_t size) { +SymbolVec SymbolTableImpl::Encoding::decodeSymbols(const SymbolTable::Storage array, size_t size) { SymbolVec symbol_vec; symbol_vec.reserve(size); decodeTokens( @@ -128,9 +127,9 @@ SymbolVec SymbolTableImpl::Encoding::decodeSymbols(const SymbolTable::Storage ar } void SymbolTableImpl::Encoding::decodeTokens( - const SymbolTable::Storage array, uint64_t size, - const std::function& symbolTokenFn, - const std::function& stringViewTokenFn) { + const SymbolTable::Storage array, size_t size, + const std::function& symbol_token_fn, + const std::function& string_view_token_fn) { while (size > 0) { if (*array == LiteralStringIndicator) { // To avoid scanning memory to find the literal size during decode, we @@ -138,17 +137,17 @@ void SymbolTableImpl::Encoding::decodeTokens( ASSERT(size > 1); ++array; --size; - std::pair length_consumed = decodeNumber(array); + std::pair length_consumed = decodeNumber(array); uint64_t length = length_consumed.first; array += length_consumed.second; size -= length_consumed.second; ASSERT(size >= length); - stringViewTokenFn(absl::string_view(reinterpret_cast(array), length)); + string_view_token_fn(absl::string_view(reinterpret_cast(array), length)); size -= length; array += length; } else { - std::pair symbol_consumed = decodeNumber(array); - symbolTokenFn(symbol_consumed.first); + std::pair symbol_consumed = decodeNumber(array); + symbol_token_fn(symbol_consumed.first); size -= symbol_consumed.second; array += symbol_consumed.second; } @@ -156,13 +155,13 @@ void SymbolTableImpl::Encoding::decodeTokens( } std::vector SymbolTableImpl::decodeStrings(const SymbolTable::Storage array, - uint64_t size) const { + size_t size) const { std::vector strings; Thread::LockGuard lock(lock_); Encoding::decodeTokens( array, size, [this, &strings](Symbol symbol) - NO_THREAD_SAFETY_ANALYSIS { strings.push_back(fromSymbol(symbol)); }, + ABSL_NO_THREAD_SAFETY_ANALYSIS { strings.push_back(fromSymbol(symbol)); }, [&strings](absl::string_view str) { strings.push_back(str); }); return strings; } @@ -184,7 +183,7 @@ void SymbolTableImpl::Encoding::appendToMemBlock(StatName stat_name, } SymbolTableImpl::SymbolTableImpl() - // Have to be explicitly initialized, if we want to use the GUARDED_BY macro. + // Have to be explicitly initialized, if we want to use the ABSL_GUARDED_BY macro. : next_symbol_(FirstValidSymbol), monotonic_counter_(FirstValidSymbol) {} SymbolTableImpl::~SymbolTableImpl() { @@ -294,7 +293,7 @@ uint64_t SymbolTableImpl::getRecentLookups(const RecentLookupsFn& iter) const { Thread::LockGuard lock(lock_); recent_lookups_.forEach( [&name_count_map](absl::string_view str, uint64_t count) - NO_THREAD_SAFETY_ANALYSIS { name_count_map[std::string(str)] += count; }); + ABSL_NO_THREAD_SAFETY_ANALYSIS { name_count_map[std::string(str)] += count; }); total += recent_lookups_.total(); } @@ -388,13 +387,13 @@ Symbol SymbolTableImpl::toSymbol(absl::string_view sv) { } absl::string_view SymbolTableImpl::fromSymbol(const Symbol symbol) const - EXCLUSIVE_LOCKS_REQUIRED(lock_) { + ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) { auto search = decode_map_.find(symbol); RELEASE_ASSERT(search != decode_map_.end(), "no such symbol"); return search->second->toStringView(); } -void SymbolTableImpl::newSymbol() EXCLUSIVE_LOCKS_REQUIRED(lock_) { +void SymbolTableImpl::newSymbol() ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) { if (pool_.empty()) { next_symbol_ = ++monotonic_counter_; } else { @@ -451,8 +450,8 @@ StatNameStorage::StatNameStorage(absl::string_view name, SymbolTable& table) : StatNameStorageBase(table.encode(name)) {} StatNameStorage::StatNameStorage(StatName src, SymbolTable& table) { - const uint64_t size = src.size(); - MemBlockBuilder storage(size); + const size_t size = src.size(); + MemBlockBuilder storage(size); // Note: MemBlockBuilder takes uint64_t. src.copyToMemBlock(storage); setBytes(storage.release()); table.incRefCount(statName()); @@ -472,11 +471,11 @@ SymbolTable::StoragePtr SymbolTableImpl::makeDynamicStorage(absl::string_view na // payload_bytes is the total number of bytes needed to represent the // characters in name, plus their encoded size, plus the literal indicator. - const uint64_t payload_bytes = SymbolTableImpl::Encoding::totalSizeBytes(name.size()) + 1; + const size_t payload_bytes = SymbolTableImpl::Encoding::totalSizeBytes(name.size()) + 1; // total_bytes includes the payload_bytes, plus the LiteralStringIndicator, and // the length of those. - const uint64_t total_bytes = SymbolTableImpl::Encoding::totalSizeBytes(payload_bytes); + const size_t total_bytes = SymbolTableImpl::Encoding::totalSizeBytes(payload_bytes); MemBlockBuilder mem_block(total_bytes); SymbolTableImpl::Encoding::appendEncoding(payload_bytes, mem_block); @@ -550,7 +549,7 @@ void StatNameStorageSet::free(SymbolTable& symbol_table) { } SymbolTable::StoragePtr SymbolTableImpl::join(const StatNameVec& stat_names) const { - uint64_t num_bytes = 0; + size_t num_bytes = 0; for (StatName stat_name : stat_names) { if (!stat_name.empty()) { num_bytes += stat_name.dataSize(); @@ -629,7 +628,7 @@ void StatNameSet::rememberBuiltin(absl::string_view str) { builtin_stat_names_[str] = stat_name; } -StatName StatNameSet::getBuiltin(absl::string_view token, StatName fallback) { +StatName StatNameSet::getBuiltin(absl::string_view token, StatName fallback) const { // If token was recorded as a built-in during initialization, we can // service this request lock-free. const auto iter = builtin_stat_names_.find(token); diff --git a/source/common/stats/symbol_table_impl.h b/source/common/stats/symbol_table_impl.h index 9121e56739cea..816799461803f 100644 --- a/source/common/stats/symbol_table_impl.h +++ b/source/common/stats/symbol_table_impl.h @@ -5,7 +5,6 @@ #include #include #include -#include #include #include "envoy/common/exception.h" @@ -93,7 +92,7 @@ class SymbolTableImpl : public SymbolTable { /** * Decodes a uint8_t array into a SymbolVec. */ - static SymbolVec decodeSymbols(const SymbolTable::Storage array, uint64_t size); + static SymbolVec decodeSymbols(const SymbolTable::Storage array, size_t size); /** * Decodes a uint8_t array into a sequence of symbols and literal strings. @@ -103,26 +102,21 @@ class SymbolTableImpl : public SymbolTable { * * @param array the StatName encoded as a uint8_t array. * @param size the size of the array in bytes. - * @param symbolTokenFn a function to be called whenever a symbol is encountered in the array. - * @param stringVIewTokeNFn a function to be called whenever a string literal is encountered. + * @param symbol_token_fn a function to be called whenever a symbol is encountered in the array. + * @param string_view_token_fn a function to be called whenever a string literal is encountered. */ - static void decodeTokens(const SymbolTable::Storage array, uint64_t size, - const std::function& symbolTokenFn, - const std::function& stringViewTokenFn); + static void decodeTokens(const SymbolTable::Storage array, size_t size, + const std::function& symbol_token_fn, + const std::function& string_view_token_fn); /** * Returns the number of bytes required to represent StatName as a uint8_t * array, including the encoded size. */ - uint64_t bytesRequired() const { + size_t bytesRequired() const { return data_bytes_required_ + encodingSizeBytes(data_bytes_required_); } - /** - * @return the number of uint8_t entries we collected while adding symbols. - */ - uint64_t dataBytesRequired() const { return data_bytes_required_; } - /** * Moves the contents of the vector into an allocated array. The array * must have been allocated with bytesRequired() bytes. @@ -135,13 +129,13 @@ class SymbolTableImpl : public SymbolTable { * @param number A number to encode in a variable length byte-array. * @return The number of bytes it would take to encode the number. */ - static uint64_t encodingSizeBytes(uint64_t number); + static size_t encodingSizeBytes(uint64_t number); /** * @param num_data_bytes The number of bytes in a data-block. * @return The total number of bytes required for the data-block and its encoded size. */ - static uint64_t totalSizeBytes(uint64_t num_data_bytes) { + static size_t totalSizeBytes(size_t num_data_bytes) { return encodingSizeBytes(num_data_bytes) + num_data_bytes; } @@ -172,12 +166,10 @@ class SymbolTableImpl : public SymbolTable { * @param The encoded byte array, written previously by appendEncoding. * @return A pair containing the decoded number, and the number of bytes consumed from encoding. */ - static std::pair decodeNumber(const uint8_t* encoding); - - StoragePtr release() { return mem_block_.release(); } + static std::pair decodeNumber(const uint8_t* encoding); private: - uint64_t data_bytes_required_{0}; + size_t data_bytes_required_{0}; MemBlockBuilder mem_block_; }; @@ -236,7 +228,7 @@ class SymbolTableImpl : public SymbolTable { * @param size the size of the array in bytes. * @return std::string the retrieved stat name. */ - std::vector decodeStrings(const Storage array, uint64_t size) const; + std::vector decodeStrings(const Storage array, size_t size) const; /** * Convenience function for encode(), symbolizing one string segment at a time. @@ -244,7 +236,7 @@ class SymbolTableImpl : public SymbolTable { * @param sv the individual string to be encoded as a symbol. * @return Symbol the encoded string. */ - Symbol toSymbol(absl::string_view sv) EXCLUSIVE_LOCKS_REQUIRED(lock_); + Symbol toSymbol(absl::string_view sv) ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_); /** * Convenience function for decode(), decoding one symbol at a time. @@ -252,7 +244,7 @@ class SymbolTableImpl : public SymbolTable { * @param symbol the individual symbol to be decoded. * @return absl::string_view the decoded string. */ - absl::string_view fromSymbol(Symbol symbol) const EXCLUSIVE_LOCKS_REQUIRED(lock_); + absl::string_view fromSymbol(Symbol symbol) const ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_); /** * Stages a new symbol for use. To be called after a successful insertion. @@ -275,7 +267,7 @@ class SymbolTableImpl : public SymbolTable { // Stores the symbol to be used at next insertion. This should exist ahead of insertion time so // that if insertion succeeds, the value written is the correct one. - Symbol next_symbol_ GUARDED_BY(lock_); + Symbol next_symbol_ ABSL_GUARDED_BY(lock_); // If the free pool is exhausted, we monotonically increase this counter. Symbol monotonic_counter_; @@ -285,14 +277,14 @@ class SymbolTableImpl : public SymbolTable { // Using absl::string_view lets us only store the complete string once, in the decode map. using EncodeMap = absl::flat_hash_map; using DecodeMap = absl::flat_hash_map; - EncodeMap encode_map_ GUARDED_BY(lock_); - DecodeMap decode_map_ GUARDED_BY(lock_); + EncodeMap encode_map_ ABSL_GUARDED_BY(lock_); + DecodeMap decode_map_ ABSL_GUARDED_BY(lock_); // Free pool of symbols for re-use. // TODO(ambuc): There might be an optimization here relating to storing ranges of freed symbols // using an Envoy::IntervalSet. - std::stack pool_ GUARDED_BY(lock_); - RecentLookups recent_lookups_ GUARDED_BY(lock_); + std::stack pool_ ABSL_GUARDED_BY(lock_); + RecentLookups recent_lookups_ ABSL_GUARDED_BY(lock_); }; // Base class for holding the backing-storing for a StatName. The two derived @@ -393,12 +385,7 @@ class StatName { return H::combine(std::move(h), absl::string_view()); } - // Casts the raw data as a string_view. Note that this string_view will not - // be in human-readable form, but it will be compatible with a string-view - // hasher. - const char* cdata = reinterpret_cast(stat_name.data()); - absl::string_view data_as_string_view = absl::string_view(cdata, stat_name.dataSize()); - return H::combine(std::move(h), data_as_string_view); + return H::combine(std::move(h), stat_name.dataAsStringView()); } /** @@ -410,22 +397,21 @@ class StatName { uint64_t hash() const { return absl::Hash()(*this); } bool operator==(const StatName& rhs) const { - const uint64_t sz = dataSize(); - return sz == rhs.dataSize() && memcmp(data(), rhs.data(), sz * sizeof(uint8_t)) == 0; + return dataAsStringView() == rhs.dataAsStringView(); } bool operator!=(const StatName& rhs) const { return !(*this == rhs); } /** - * @return uint64_t the number of bytes in the symbol array, excluding the - * overhead for the size itself. + * @return size_t the number of bytes in the symbol array, excluding the + * overhead for the size itself. */ - uint64_t dataSize() const; + size_t dataSize() const; /** - * @return uint64_t the number of bytes in the symbol array, including the + * @return size_t the number of bytes in the symbol array, including the * overhead for the size itself. */ - uint64_t size() const { return SymbolTableImpl::Encoding::totalSizeBytes(dataSize()); } + size_t size() const { return SymbolTableImpl::Encoding::totalSizeBytes(dataSize()); } /** * Copies the entire StatName representation into a MemBlockBuilder, including @@ -459,22 +445,30 @@ class StatName { * @return A pointer to the first byte of data (skipping over size bytes). */ const uint8_t* data() const { + if (size_and_data_ == nullptr) { + return nullptr; + } return size_and_data_ + SymbolTableImpl::Encoding::encodingSizeBytes(dataSize()); } const uint8_t* dataIncludingSize() const { return size_and_data_; } - /** - * @return A pointer to the buffer, including the size bytes. - */ - const uint8_t* sizeAndData() const { return size_and_data_; } - /** * @return whether this is empty. */ bool empty() const { return size_and_data_ == nullptr || dataSize() == 0; } private: + /** + * Casts the raw data as a string_view. Note that this string_view will not + * be in human-readable form, but it will be compatible with a string-view + * hasher and comparator. + */ + absl::string_view dataAsStringView() const { + return {reinterpret_cast(data()), + static_cast(dataSize())}; + } + const uint8_t* size_and_data_{nullptr}; }; @@ -503,7 +497,7 @@ class StatNameManagedStorage : public StatNameStorage { // generate symbols for it. StatNameManagedStorage(absl::string_view name, SymbolTable& table) : StatNameStorage(name, table), symbol_table_(table) {} - StatNameManagedStorage(StatNameManagedStorage&& src) + StatNameManagedStorage(StatNameManagedStorage&& src) noexcept : StatNameStorage(std::move(src)), symbol_table_(src.symbol_table_) {} ~StatNameManagedStorage() { free(symbol_table_); } @@ -607,11 +601,6 @@ class StatNameDynamicPool { public: explicit StatNameDynamicPool(SymbolTable& symbol_table) : symbol_table_(symbol_table) {} - /** - * Removes all StatNames from the pool. - */ - void clear() { storage_vector_.clear(); } - /** * @param name the name to add the container. * @return the StatName held in the container for this name. @@ -778,14 +767,22 @@ class StatNameStorageSet { HashSet hash_set_; }; -// Captures StatNames for lookup by string, keeping two maps: a map of -// 'built-ins' that is expected to be populated during initialization, and a map -// of dynamically discovered names. The latter map is protected by a mutex, and -// can be mutated at runtime. +// Captures StatNames for lookup by string, keeping a map of 'built-ins' that is +// expected to be populated during initialization. // // Ideally, builtins should be added during process initialization, in the // outermost relevant context. And as the builtins map is not mutex protected, -// builtins must *not* be added in the request-path. +// builtins must *not* be added to an existing StatNameSet in the request-path. +// +// It is fine to populate a new StatNameSet when (for example) an xDS +// message reveals a new set of names to be used as stats. The population must +// be completed prior to exposing the new StatNameSet to worker threads. +// +// To create stats using names discovered in the request path, dynamic stat +// names must be used (see StatNameDynamicStorage). Consider using helper +// methods such as Stats::Utility::counterFromElements in common/stats/utility.h +// to simplify the process of allocating and combining stat names and creating +// counters, gauges, and histograms from them. class StatNameSet { public: // This object must be instantiated via SymbolTable::makeSet(), thus constructor is private. @@ -817,10 +814,26 @@ class StatNameSet { * * @return the StatName or fallback. */ - StatName getBuiltin(absl::string_view token, StatName fallback); + StatName getBuiltin(absl::string_view token, StatName fallback) const; /** * Adds a StatName using the pool, but without remembering it in any maps. + * + * For convenience, StatNameSet offers pass-through thread-safe access to + * its mutex-protected pool. This is useful in constructor initializers, when + * StatNames are needed both from compile-time constants, as well as from + * other constructor args, e.g. + * MyClass(const std::vector& strings, Stats::SymbolTable& symbol_table) + * : stat_name_set_(symbol_table), + * known_const_(stat_name_set_.add("known_const")) { // unmapped constants from pool + * stat_name_set_.rememberBuiltins(strings); // mapped builtins. + * } + * This avoids the need to make two different pools; one backing the + * StatNameSet mapped entries, and the other backing the set passed in via the + * constructor. + * + * @param str The string to add as a StatName + * @return The StatName for str. */ StatName add(absl::string_view str) { absl::MutexLock lock(&mutex_); @@ -835,7 +848,7 @@ class StatNameSet { const std::string name_; Stats::SymbolTable& symbol_table_; - Stats::StatNamePool pool_ GUARDED_BY(mutex_); + Stats::StatNamePool pool_ ABSL_GUARDED_BY(mutex_); mutable absl::Mutex mutex_; using StringStatNameMap = absl::flat_hash_map; StringStatNameMap builtin_stat_names_; diff --git a/source/common/stats/tag_producer_impl.cc b/source/common/stats/tag_producer_impl.cc index 84b2ab158142c..255dfcaeed39b 100644 --- a/source/common/stats/tag_producer_impl.cc +++ b/source/common/stats/tag_producer_impl.cc @@ -14,7 +14,7 @@ namespace Stats { TagProducerImpl::TagProducerImpl(const envoy::config::metrics::v3::StatsConfig& config) { // To check name conflict. reserveResources(config); - std::unordered_set names = addDefaultExtractors(config); + absl::node_hash_set names = addDefaultExtractors(config); for (const auto& tag_specifier : config.stats_tags()) { const std::string& name = tag_specifier.tag_name(); @@ -97,9 +97,9 @@ void TagProducerImpl::reserveResources(const envoy::config::metrics::v3::StatsCo default_tags_.reserve(config.stats_tags().size()); } -std::unordered_set +absl::node_hash_set TagProducerImpl::addDefaultExtractors(const envoy::config::metrics::v3::StatsConfig& config) { - std::unordered_set names; + absl::node_hash_set names; if (!config.has_use_all_default_tags() || config.use_all_default_tags().value()) { for (const auto& desc : Config::TagNames::get().descriptorVec()) { names.emplace(desc.name_); diff --git a/source/common/stats/tag_producer_impl.h b/source/common/stats/tag_producer_impl.h index e8b27307b2b8a..093d4021389b8 100644 --- a/source/common/stats/tag_producer_impl.h +++ b/source/common/stats/tag_producer_impl.h @@ -4,8 +4,6 @@ #include #include #include -#include -#include #include #include "envoy/config/metrics/v3/stats.pb.h" @@ -18,6 +16,7 @@ #include "common/protobuf/protobuf.h" #include "absl/container/flat_hash_map.h" +#include "absl/container/node_hash_set.h" #include "absl/strings/string_view.h" namespace Envoy { @@ -71,9 +70,9 @@ class TagProducerImpl : public TagProducer { * into a string-set for dup-detection against new stat names * specified in the configuration. * @param config const envoy::config::metrics::v2::StatsConfig& the config. - * @return names std::unordered_set the set of names to populate + * @return names absl::node_hash_set the set of names to populate */ - std::unordered_set + absl::node_hash_set addDefaultExtractors(const envoy::config::metrics::v3::StatsConfig& config); /** diff --git a/source/common/stats/tag_utility.cc b/source/common/stats/tag_utility.cc index 6875f046d72c1..7710277fd5ba2 100644 --- a/source/common/stats/tag_utility.cc +++ b/source/common/stats/tag_utility.cc @@ -37,7 +37,7 @@ TagStatNameJoiner::TagStatNameJoiner(StatName stat_name, SymbolTable::StoragePtr TagStatNameJoiner::joinNameAndTags(StatName name, const StatNameTagVector& tags, SymbolTable& symbol_table) { - std::vector stat_names; + StatNameVec stat_names; stat_names.reserve(1 + 2 * tags.size()); stat_names.emplace_back(name); @@ -50,4 +50,4 @@ SymbolTable::StoragePtr TagStatNameJoiner::joinNameAndTags(StatName name, } } // namespace TagUtility } // namespace Stats -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/common/stats/thread_local_store.cc b/source/common/stats/thread_local_store.cc index 5a9a47f912fa1..54d0c78eba9bd 100644 --- a/source/common/stats/thread_local_store.cc +++ b/source/common/stats/thread_local_store.cc @@ -12,6 +12,7 @@ #include "envoy/stats/stats.h" #include "common/common/lock_guard.h" +#include "common/stats/histogram_impl.h" #include "common/stats/stats_matcher_impl.h" #include "common/stats/tag_producer_impl.h" #include "common/stats/tag_utility.h" @@ -24,11 +25,18 @@ namespace Stats { const char ThreadLocalStoreImpl::MainDispatcherCleanupSync[] = "main-dispatcher-cleanup"; ThreadLocalStoreImpl::ThreadLocalStoreImpl(Allocator& alloc) - : alloc_(alloc), default_scope_(createScope("")), + : alloc_(alloc), default_scope_(ThreadLocalStoreImpl::createScope("")), tag_producer_(std::make_unique()), - stats_matcher_(std::make_unique()), heap_allocator_(alloc.symbolTable()), - null_counter_(alloc.symbolTable()), null_gauge_(alloc.symbolTable()), - null_histogram_(alloc.symbolTable()), null_text_readout_(alloc.symbolTable()) {} + stats_matcher_(std::make_unique()), + histogram_settings_(std::make_unique()), + heap_allocator_(alloc.symbolTable()), null_counter_(alloc.symbolTable()), + null_gauge_(alloc.symbolTable()), null_histogram_(alloc.symbolTable()), + null_text_readout_(alloc.symbolTable()), + well_known_tags_(alloc.symbolTable().makeSet("well_known_tags")) { + for (const auto& desc : Config::TagNames::get().descriptorVec()) { + well_known_tags_->rememberBuiltin(desc.name_); + } +} ThreadLocalStoreImpl::~ThreadLocalStoreImpl() { ASSERT(shutting_down_ || !threading_ever_initialized_); @@ -36,6 +44,14 @@ ThreadLocalStoreImpl::~ThreadLocalStoreImpl() { ASSERT(scopes_.empty()); } +void ThreadLocalStoreImpl::setHistogramSettings(HistogramSettingsConstPtr&& histogram_settings) { + Thread::LockGuard lock(lock_); + for (ScopeImpl* scope : scopes_) { + ASSERT(scope->central_cache_->histograms_.empty()); + } + histogram_settings_ = std::move(histogram_settings); +} + void ThreadLocalStoreImpl::setStatsMatcher(StatsMatcherPtr&& stats_matcher) { stats_matcher_ = std::move(stats_matcher); if (stats_matcher_->acceptsAll()) { @@ -47,17 +63,27 @@ void ThreadLocalStoreImpl::setStatsMatcher(StatsMatcherPtr&& stats_matcher) { // in the default_scope. There should be no requests, so there will // be no copies in TLS caches. Thread::LockGuard lock(lock_); + const uint32_t first_histogram_index = deleted_histograms_.size(); for (ScopeImpl* scope : scopes_) { removeRejectedStats(scope->central_cache_->counters_, deleted_counters_); removeRejectedStats(scope->central_cache_->gauges_, deleted_gauges_); removeRejectedStats(scope->central_cache_->histograms_, deleted_histograms_); removeRejectedStats(scope->central_cache_->text_readouts_, deleted_text_readouts_); } + + // Remove any newly rejected histograms from histogram_set_. + { + Thread::LockGuard hist_lock(hist_mutex_); + for (uint32_t i = first_histogram_index; i < deleted_histograms_.size(); ++i) { + uint32_t erased = histogram_set_.erase(deleted_histograms_[i].get()); + ASSERT(erased == 1); + } + } } template void ThreadLocalStoreImpl::removeRejectedStats(StatMapClass& map, StatListClass& list) { - std::vector remove_list; + StatNameVec remove_list; for (auto& stat : map) { if (rejects(stat.first)) { remove_list.push_back(stat.first); @@ -72,11 +98,7 @@ void ThreadLocalStoreImpl::removeRejectedStats(StatMapClass& map, StatListClass& } bool ThreadLocalStoreImpl::rejects(StatName stat_name) const { - // Don't both elaborating the StatName there are no pattern-based - // exclusions;/inclusions. - if (stats_matcher_->acceptsAll()) { - return false; - } + ASSERT(!stats_matcher_->acceptsAll()); // TODO(ambuc): If stats_matcher_ depends on regexes, this operation (on the // hot path) could become prohibitively expensive. Revisit this usage in the @@ -148,16 +170,11 @@ std::vector ThreadLocalStoreImpl::textReadouts() const { std::vector ThreadLocalStoreImpl::histograms() const { std::vector ret; - Thread::LockGuard lock(lock_); - // TODO(ramaraochavali): As histograms don't share storage, there is a chance of duplicate names - // here. We need to create global storage for histograms similar to how we have a central storage - // in shared memory for counters/gauges. In the interim, no de-dup is done here. This may result - // in histograms with duplicate names, but until shared storage is implemented it's ultimately - // less confusing for users who have such configs. - for (ScopeImpl* scope : scopes_) { - for (const auto& name_histogram_pair : scope->central_cache_->histograms_) { - const ParentHistogramSharedPtr& parent_hist = name_histogram_pair.second; - ret.push_back(parent_hist); + Thread::LockGuard lock(hist_mutex_); + { + ret.reserve(histogram_set_.size()); + for (const auto& histogram_ptr : histogram_set_) { + ret.emplace_back(histogram_ptr); } } @@ -177,6 +194,11 @@ void ThreadLocalStoreImpl::initializeThreading(Event::Dispatcher& main_thread_di void ThreadLocalStoreImpl::shutdownThreading() { // This will block both future cache fills as well as cache flushes. shutting_down_ = true; + Thread::LockGuard lock(hist_mutex_); + for (ParentHistogramImpl* histogram : histogram_set_) { + histogram->setShuttingDown(true); + } + histogram_set_.clear(); } void ThreadLocalStoreImpl::mergeHistograms(PostMergeCb merge_complete_cb) { @@ -185,12 +207,9 @@ void ThreadLocalStoreImpl::mergeHistograms(PostMergeCb merge_complete_cb) { merge_in_progress_ = true; tls_->runOnAllThreads( [this]() -> void { - for (const auto& scope : tls_->getTyped().scope_cache_) { - const TlsCacheEntry& tls_cache_entry = scope.second; - for (const auto& name_histogram_pair : tls_cache_entry.histograms_) { - const TlsHistogramSharedPtr& tls_hist = name_histogram_pair.second; - tls_hist->beginMerge(); - } + for (const auto& id_hist : tls_->getTyped().tls_histogram_cache_) { + const TlsHistogramSharedPtr& tls_hist = id_hist.second; + tls_hist->beginMerge(); } }, [this, merge_complete_cb]() -> void { mergeInternal(merge_complete_cb); }); @@ -245,6 +264,10 @@ void ThreadLocalStoreImpl::releaseScopeCrossThread(ScopeImpl* scope) { if (!shutting_down_ && main_thread_dispatcher_) { const uint64_t scope_id = scope->scope_id_; lock.release(); + + // TODO(jmarantz): consider batching all the scope IDs that should be + // cleared from TLS caches to reduce bursts of runOnAllThreads on a large + // config update. See the pattern below used for histograms. main_thread_dispatcher_->post([this, central_cache, scope_id]() { sync_.syncPoint(MainDispatcherCleanupSync); clearScopeFromCaches(scope_id, central_cache); @@ -252,12 +275,27 @@ void ThreadLocalStoreImpl::releaseScopeCrossThread(ScopeImpl* scope) { } } +void ThreadLocalStoreImpl::releaseHistogramCrossThread(uint64_t histogram_id) { + // This can happen from any thread. We post() back to the main thread which will initiate the + // cache flush operation. + if (!shutting_down_ && main_thread_dispatcher_) { + main_thread_dispatcher_->post( + [this, histogram_id]() { clearHistogramFromCaches(histogram_id); }); + } +} + ThreadLocalStoreImpl::TlsCacheEntry& ThreadLocalStoreImpl::TlsCache::insertScope(uint64_t scope_id) { return scope_cache_[scope_id]; } void ThreadLocalStoreImpl::TlsCache::eraseScope(uint64_t scope_id) { scope_cache_.erase(scope_id); } +void ThreadLocalStoreImpl::TlsCache::eraseHistogram(uint64_t histogram_id) { + // This is called for every histogram in every thread, even though the + // histogram may not have been cached in each thread yet. So we don't + // want to check whether the erase() call erased anything. + tls_histogram_cache_.erase(histogram_id); +} void ThreadLocalStoreImpl::clearScopeFromCaches(uint64_t scope_id, CentralCacheEntrySharedPtr central_cache) { @@ -271,10 +309,26 @@ void ThreadLocalStoreImpl::clearScopeFromCaches(uint64_t scope_id, } } +void ThreadLocalStoreImpl::clearHistogramFromCaches(uint64_t histogram_id) { + // If we are shutting down we no longer perform cache flushes as workers may be shutting down + // at the same time. + if (!shutting_down_) { + // Perform a cache flush on all threads. + // + // TODO(jmarantz): If this cross-thread posting proves to be a performance + // bottleneck, + // https://gist.github.com/jmarantz/838cb6de7e74c0970ea6b63eded0139a + // contains a patch that will implement batching together to clear multiple + // histograms. + tls_->runOnAllThreads( + [this, histogram_id]() { tls_->getTyped().eraseHistogram(histogram_id); }); + } +} + ThreadLocalStoreImpl::ScopeImpl::ScopeImpl(ThreadLocalStoreImpl& parent, const std::string& prefix) : scope_id_(parent.next_scope_id_++), parent_(parent), - prefix_(Utility::sanitizeStatsName(prefix), parent.symbolTable()), - central_cache_(new CentralCacheEntry(parent.symbolTable())) {} + prefix_(Utility::sanitizeStatsName(prefix), parent.alloc_.symbolTable()), + central_cache_(new CentralCacheEntry(parent.alloc_.symbolTable())) {} ThreadLocalStoreImpl::ScopeImpl::~ScopeImpl() { parent_.releaseScopeCrossThread(this); @@ -299,8 +353,13 @@ class StatNameTagHelper { tls.symbolTable().callWithStringView(name, [&tags, &tls, this](absl::string_view name_str) { tag_extracted_name_ = pool_.add(tls.tagProducer().produceTags(name_str, tags)); }); + StatName empty; for (const auto& tag : tags) { - stat_name_tags_.emplace_back(pool_.add(tag.name_), pool_.add(tag.value_)); + StatName tag_name = tls.wellKnownTags().getBuiltin(tag.name_, empty); + if (tag_name.empty()) { + tag_name = pool_.add(tag.name_); + } + stat_name_tags_.emplace_back(tag_name, pool_.add(tag.value_)); } } else { tag_extracted_name_ = name; @@ -395,8 +454,10 @@ StatType& ThreadLocalStoreImpl::ScopeImpl::safeMakeStat( } template -absl::optional> -ThreadLocalStoreImpl::ScopeImpl::findStatLockHeld( +using StatTypeOptConstRef = absl::optional>; + +template +StatTypeOptConstRef ThreadLocalStoreImpl::ScopeImpl::findStatLockHeld( StatName name, StatNameHashMap>& central_cache_map) const { auto iter = central_cache_map.find(name); if (iter == central_cache_map.end()) { @@ -542,9 +603,28 @@ Histogram& ThreadLocalStoreImpl::ScopeImpl::histogramFromStatNameWithTags( } else { StatNameTagHelper tag_helper(parent_, joiner.tagExtractedName(), stat_name_tags); - RefcountPtr stat( - new ParentHistogramImpl(final_stat_name, unit, parent_, *this, - tag_helper.tagExtractedName(), tag_helper.statNameTags())); + ConstSupportedBuckets* buckets = nullptr; + symbolTable().callWithStringView(final_stat_name, + [&buckets, this](absl::string_view stat_name) { + buckets = &parent_.histogram_settings_->buckets(stat_name); + }); + + RefcountPtr stat; + { + Thread::LockGuard lock(parent_.hist_mutex_); + auto iter = parent_.histogram_set_.find(final_stat_name); + if (iter != parent_.histogram_set_.end()) { + stat = RefcountPtr(*iter); + } else { + stat = new ParentHistogramImpl(final_stat_name, unit, parent_, + tag_helper.tagExtractedName(), tag_helper.statNameTags(), + *buckets, parent_.next_histogram_id_++); + if (!parent_.shutting_down_) { + parent_.histogram_set_.insert(stat.get()); + } + } + } + central_ref = ¢ral_cache_->histograms_[stat->statName()]; *central_ref = stat; } @@ -615,34 +695,34 @@ TextReadoutOptConstRef ThreadLocalStoreImpl::ScopeImpl::findTextReadout(StatName return findStatLockHeld(name, central_cache_->text_readouts_); } -Histogram& ThreadLocalStoreImpl::ScopeImpl::tlsHistogram(StatName name, - ParentHistogramImpl& parent) { +Histogram& ThreadLocalStoreImpl::tlsHistogram(ParentHistogramImpl& parent, uint64_t id) { // tlsHistogram() is generally not called for a histogram that is rejected by // the matcher, so no further rejection-checking is needed at this level. // TlsHistogram inherits its reject/accept status from ParentHistogram. // See comments in counterFromStatName() which explains the logic here. - StatNameHashMap* tls_cache = nullptr; - if (!parent_.shutting_down_ && parent_.tls_) { - tls_cache = &parent_.tls_->getTyped().scope_cache_[this->scope_id_].histograms_; - auto iter = tls_cache->find(name); - if (iter != tls_cache->end()) { - return *iter->second; + TlsHistogramSharedPtr* tls_histogram = nullptr; + if (!shutting_down_ && tls_ != nullptr) { + TlsCache& tls_cache = tls_->getTyped(); + tls_histogram = &tls_cache.tls_histogram_cache_[id]; + if (*tls_histogram != nullptr) { + return **tls_histogram; } } - StatNameTagHelper tag_helper(parent_, name, absl::nullopt); + StatNameTagHelper tag_helper(*this, parent.statName(), absl::nullopt); TlsHistogramSharedPtr hist_tls_ptr( - new ThreadLocalHistogramImpl(name, parent.unit(), tag_helper.tagExtractedName(), + new ThreadLocalHistogramImpl(parent.statName(), parent.unit(), tag_helper.tagExtractedName(), tag_helper.statNameTags(), symbolTable())); parent.addTlsHistogram(hist_tls_ptr); - if (tls_cache) { - tls_cache->insert(std::make_pair(hist_tls_ptr->statName(), hist_tls_ptr)); + if (tls_histogram != nullptr) { + *tls_histogram = hist_tls_ptr; } + return *hist_tls_ptr; } @@ -658,7 +738,7 @@ ThreadLocalHistogramImpl::ThreadLocalHistogramImpl(StatName name, Histogram::Uni } ThreadLocalHistogramImpl::~ThreadLocalHistogramImpl() { - MetricImpl::clear(symbolTable()); + MetricImpl::clear(symbol_table_); hist_free(histograms_[0]); hist_free(histograms_[1]); } @@ -675,26 +755,78 @@ void ThreadLocalHistogramImpl::merge(histogram_t* target) { hist_clear(*other_histogram); } -ParentHistogramImpl::ParentHistogramImpl(StatName name, Histogram::Unit unit, Store& parent, - TlsScope& tls_scope, StatName tag_extracted_name, - const StatNameTagVector& stat_name_tags) - : MetricImpl(name, tag_extracted_name, stat_name_tags, parent.symbolTable()), unit_(unit), - parent_(parent), tls_scope_(tls_scope), interval_histogram_(hist_alloc()), - cumulative_histogram_(hist_alloc()), interval_statistics_(interval_histogram_), - cumulative_statistics_(cumulative_histogram_), merged_(false) {} +ParentHistogramImpl::ParentHistogramImpl(StatName name, Histogram::Unit unit, + ThreadLocalStoreImpl& thread_local_store, + StatName tag_extracted_name, + const StatNameTagVector& stat_name_tags, + ConstSupportedBuckets& supported_buckets, uint64_t id) + : MetricImpl(name, tag_extracted_name, stat_name_tags, thread_local_store.symbolTable()), + unit_(unit), thread_local_store_(thread_local_store), interval_histogram_(hist_alloc()), + cumulative_histogram_(hist_alloc()), + interval_statistics_(interval_histogram_, supported_buckets), + cumulative_statistics_(cumulative_histogram_, supported_buckets), merged_(false), id_(id) {} ParentHistogramImpl::~ParentHistogramImpl() { - MetricImpl::clear(symbolTable()); + thread_local_store_.releaseHistogramCrossThread(id_); + ASSERT(ref_count_ == 0); + MetricImpl::clear(thread_local_store_.symbolTable()); hist_free(interval_histogram_); hist_free(cumulative_histogram_); } +void ParentHistogramImpl::incRefCount() { ++ref_count_; } + +bool ParentHistogramImpl::decRefCount() { + bool ret; + if (shutting_down_) { + // When shutting down, we cannot reference thread_local_store_, as + // histograms can outlive the store. So we decrement the ref-count without + // the stores' lock. We will not be removing the object from the store's + // histogram map in this scenario, as the set was cleared during shutdown, + // and will not be repopulated in histogramFromStatNameWithTags after + // initiating shutdown. + ret = --ref_count_ == 0; + } else { + // We delegate to the Store object to decrement the ref-count so it can hold + // the lock to the map. If we don't hold a lock, another thread may + // simultaneously try to allocate the same name'd histogram after we + // decrement it, and we'll wind up with a dtor/update race. To avoid this we + // must hold the lock until the histogram is removed from the map. + // + // See also StatsSharedImpl::decRefCount() in allocator_impl.cc, which has + // the same issue. + ret = thread_local_store_.decHistogramRefCount(*this, ref_count_); + } + return ret; +} + +bool ThreadLocalStoreImpl::decHistogramRefCount(ParentHistogramImpl& hist, + std::atomic& ref_count) { + // We must hold the store's histogram lock when decrementing the + // refcount. Otherwise another thread may simultaneously try to allocate the + // same name'd stat after we decrement it, and we'll wind up with a + // dtor/update race. To avoid this we must hold the lock until the stat is + // removed from the map. + Thread::LockGuard lock(hist_mutex_); + ASSERT(ref_count >= 1); + if (--ref_count == 0) { + if (!shutting_down_) { + const size_t count = histogram_set_.erase(hist.statName()); + ASSERT(shutting_down_ || count == 1); + } + return true; + } + return false; +} + +SymbolTable& ParentHistogramImpl::symbolTable() { return thread_local_store_.symbolTable(); } + Histogram::Unit ParentHistogramImpl::unit() const { return unit_; } void ParentHistogramImpl::recordValue(uint64_t value) { - Histogram& tls_histogram = tls_scope_.tlsHistogram(statName(), *this); + Histogram& tls_histogram = thread_local_store_.tlsHistogram(*this, id_); tls_histogram.recordValue(value); - parent_.deliverHistogramToSinks(*this, value); + thread_local_store_.deliverHistogramToSinks(*this, value); } bool ParentHistogramImpl::used() const { @@ -741,7 +873,7 @@ const std::string ParentHistogramImpl::quantileSummary() const { const std::string ParentHistogramImpl::bucketSummary() const { if (used()) { std::vector bucket_summary; - const std::vector& supported_buckets = interval_statistics_.supportedBuckets(); + ConstSupportedBuckets& supported_buckets = interval_statistics_.supportedBuckets(); bucket_summary.reserve(supported_buckets.size()); for (size_t i = 0; i < supported_buckets.size(); ++i) { bucket_summary.push_back(fmt::format("B{:g}({},{})", supported_buckets[i], diff --git a/source/common/stats/thread_local_store.h b/source/common/stats/thread_local_store.h index 135abeb257e40..22d72bfaa9e01 100644 --- a/source/common/stats/thread_local_store.h +++ b/source/common/stats/thread_local_store.h @@ -4,6 +4,7 @@ #include #include #include +#include #include #include "envoy/stats/tag.h" @@ -58,7 +59,7 @@ class ThreadLocalHistogramImpl : public HistogramImplHelper { void recordValue(uint64_t value) override; // Stats::Metric - SymbolTable& symbolTable() override { return symbol_table_; } + SymbolTable& symbolTable() final { return symbol_table_; } bool used() const override { return used_; } private: @@ -73,15 +74,16 @@ class ThreadLocalHistogramImpl : public HistogramImplHelper { using TlsHistogramSharedPtr = RefcountPtr; -class TlsScope; +class ThreadLocalStoreImpl; /** * Log Linear Histogram implementation that is stored in the main thread. */ class ParentHistogramImpl : public MetricImpl { public: - ParentHistogramImpl(StatName name, Histogram::Unit unit, Store& parent, TlsScope& tls_scope, - StatName tag_extracted_name, const StatNameTagVector& stat_name_tags); + ParentHistogramImpl(StatName name, Histogram::Unit unit, ThreadLocalStoreImpl& parent, + StatName tag_extracted_name, const StatNameTagVector& stat_name_tags, + ConstSupportedBuckets& supported_buckets, uint64_t id); ~ParentHistogramImpl() override; void addTlsHistogram(const TlsHistogramSharedPtr& hist_ptr); @@ -106,48 +108,37 @@ class ParentHistogramImpl : public MetricImpl { const std::string bucketSummary() const override; // Stats::Metric - SymbolTable& symbolTable() override { return parent_.symbolTable(); } + SymbolTable& symbolTable() override; bool used() const override; // RefcountInterface - void incRefCount() override { refcount_helper_.incRefCount(); } - bool decRefCount() override { return refcount_helper_.decRefCount(); } - uint32_t use_count() const override { return refcount_helper_.use_count(); } + void incRefCount() override; + bool decRefCount() override; + uint32_t use_count() const override { return ref_count_; } + + // Indicates that the ThreadLocalStore is shutting down, so no need to clear its histogram_set_. + void setShuttingDown(bool shutting_down) { shutting_down_ = shutting_down; } + bool shuttingDown() const { return shutting_down_; } private: - bool usedLockHeld() const EXCLUSIVE_LOCKS_REQUIRED(merge_lock_); + bool usedLockHeld() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(merge_lock_); Histogram::Unit unit_; - Store& parent_; - TlsScope& tls_scope_; + ThreadLocalStoreImpl& thread_local_store_; histogram_t* interval_histogram_; histogram_t* cumulative_histogram_; HistogramStatisticsImpl interval_statistics_; HistogramStatisticsImpl cumulative_statistics_; mutable Thread::MutexBasicLockable merge_lock_; - std::list tls_histograms_ GUARDED_BY(merge_lock_); + std::list tls_histograms_ ABSL_GUARDED_BY(merge_lock_); bool merged_; - RefcountHelper refcount_helper_; + std::atomic shutting_down_{false}; + std::atomic ref_count_{0}; + const uint64_t id_; // Index into TlsCache::histogram_cache_. }; using ParentHistogramImplSharedPtr = RefcountPtr; -/** - * Class used to create ThreadLocalHistogram in the scope. - */ -class TlsScope : public Scope { -public: - ~TlsScope() override = default; - - // TODO(ramaraochavali): Allow direct TLS access for the advanced consumers. - /** - * @return a ThreadLocalHistogram within the scope's namespace. - * @param name name of the histogram with scope prefix attached. - * @param parent the parent histogram. - */ - virtual Histogram& tlsHistogram(StatName name, ParentHistogramImpl& parent) PURE; -}; - /** * Store implementation with thread local caching. For design details see * https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md @@ -241,6 +232,11 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo return absl::nullopt; } + bool iterate(const IterateFn& fn) const override { return iterHelper(fn); } + bool iterate(const IterateFn& fn) const override { return iterHelper(fn); } + bool iterate(const IterateFn& fn) const override { return iterHelper(fn); } + bool iterate(const IterateFn& fn) const override { return iterHelper(fn); } + // Stats::Store std::vector counters() const override; std::vector gauges() const override; @@ -253,17 +249,30 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo tag_producer_ = std::move(tag_producer); } void setStatsMatcher(StatsMatcherPtr&& stats_matcher) override; + void setHistogramSettings(HistogramSettingsConstPtr&& histogram_settings) override; void initializeThreading(Event::Dispatcher& main_thread_dispatcher, ThreadLocal::Instance& tls) override; void shutdownThreading() override; void mergeHistograms(PostMergeCb merge_cb) override; + Histogram& tlsHistogram(ParentHistogramImpl& parent, uint64_t id); + /** * @return a thread synchronizer object used for controlling thread behavior in tests. */ Thread::ThreadSynchronizer& sync() { return sync_; } + /** + * @return a set of well known tag names; used to reduce symbol table churn. + */ + const StatNameSet& wellKnownTags() const { return *well_known_tags_; } + + bool decHistogramRefCount(ParentHistogramImpl& histogram, std::atomic& ref_count); + void releaseHistogramCrossThread(uint64_t histogram_id); + private: + friend class ThreadLocalStoreTestingPeer; + template using StatRefMap = StatNameHashMap>; struct TlsCacheEntry { @@ -275,9 +284,18 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo StatRefMap gauges_; StatRefMap text_readouts_; - // The histogram objects are not shared with the central cache, and don't - // require taking a lock when decrementing their ref-count. - StatNameHashMap histograms_; + // Histograms also require holding a mutex while decrementing reference + // counts. The only difference from other stats is that the histogram_set_ + // lives in the ThreadLocalStore object, rather than in + // AllocatorImpl. Histograms are removed from that set when all scopes + // referencing the histogram are dropped. Each ParentHistogram has a unique + // index, which is not re-used during the process lifetime. + // + // There is also a tls_histogram_cache_ in the TlsCache object, which is + // not tied to a scope. It maps from parent histogram's unique index to + // a TlsHistogram. This enables continuity between same-named histograms + // in same-named scopes. That scenario is common when re-creating scopes in + // response to xDS. StatNameHashMap parent_histograms_; // We keep a TLS cache of rejected stat names. This costs memory, but @@ -302,7 +320,7 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo }; using CentralCacheEntrySharedPtr = RefcountPtr; - struct ScopeImpl : public TlsScope { + struct ScopeImpl : public Scope { ScopeImpl(ThreadLocalStoreImpl& parent, const std::string& prefix); ~ScopeImpl() override; @@ -315,19 +333,19 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo Histogram& histogramFromStatNameWithTags(const StatName& name, StatNameTagVectorOptConstRef tags, Histogram::Unit unit) override; - Histogram& tlsHistogram(StatName name, ParentHistogramImpl& parent) override; TextReadout& textReadoutFromStatNameWithTags(const StatName& name, StatNameTagVectorOptConstRef tags) override; ScopePtr createScope(const std::string& name) override { return parent_.createScope(symbolTable().toString(prefix_.statName()) + "." + name); } - const SymbolTable& constSymbolTable() const override { return parent_.constSymbolTable(); } - SymbolTable& symbolTable() override { return parent_.symbolTable(); } + const SymbolTable& constSymbolTable() const final { return parent_.constSymbolTable(); } + SymbolTable& symbolTable() final { return parent_.symbolTable(); } Counter& counterFromString(const std::string& name) override { StatNameManagedStorage storage(name, symbolTable()); return counterFromStatName(storage.statName()); } + Gauge& gaugeFromString(const std::string& name, Gauge::ImportMode import_mode) override { StatNameManagedStorage storage(name, symbolTable()); return gaugeFromStatName(storage.statName(), import_mode); @@ -343,6 +361,28 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo NullGaugeImpl& nullGauge(const std::string&) override { return parent_.null_gauge_; } + template bool iterHelper(StatFn fn, const StatMap& map) const { + for (auto& iter : map) { + if (!fn(iter.second)) { + return false; + } + } + return true; + } + + bool iterate(const IterateFn& fn) const override { + return iterHelper(fn, central_cache_->counters_); + } + bool iterate(const IterateFn& fn) const override { + return iterHelper(fn, central_cache_->gauges_); + } + bool iterate(const IterateFn& fn) const override { + return iterHelper(fn, central_cache_->histograms_); + } + bool iterate(const IterateFn& fn) const override { + return iterHelper(fn, central_cache_->text_readouts_); + } + // NOTE: The find methods assume that `name` is fully-qualified. // Implementations will not add the scope prefix. CounterOptConstRef findCounter(StatName name) const override; @@ -375,6 +415,9 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo MakeStatFn make_stat, StatRefMap* tls_cache, StatNameHashSet* tls_rejected_stats, StatType& null_stat); + template + using StatTypeOptConstRef = absl::optional>; + /** * Looks up an existing stat, populating the local cache if necessary. Does * not check the TLS or rejects, and does not create a stat if it does not @@ -385,7 +428,7 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo * @return a reference to the stat, if it exists. */ template - absl::optional> + StatTypeOptConstRef findStatLockHeld(StatName name, StatNameHashMap>& central_cache_map) const; @@ -398,6 +441,7 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo struct TlsCache : public ThreadLocal::ThreadLocalObject { TlsCacheEntry& insertScope(uint64_t scope_id); void eraseScope(uint64_t scope_id); + void eraseHistogram(uint64_t histogram); // The TLS scope cache is keyed by scope ID. This is used to avoid complex circular references // during scope destruction. An ID is required vs. using the address of the scope pointer @@ -407,10 +451,24 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo // store. See the overview for more information. This complexity is required for lockless // operation in the fast path. absl::flat_hash_map scope_cache_; + + // Maps from histogram ID (monotonically increasing) to a TLS histogram. + absl::flat_hash_map tls_histogram_cache_; }; + template bool iterHelper(StatFn fn) const { + Thread::LockGuard lock(lock_); + for (ScopeImpl* scope : scopes_) { + if (!scope->iterate(fn)) { + return false; + } + } + return true; + } + std::string getTagsForName(const std::string& name, TagVector& tags) const; void clearScopeFromCaches(uint64_t scope_id, CentralCacheEntrySharedPtr central_cache); + void clearHistogramFromCaches(uint64_t histogram_id); void releaseScopeCrossThread(ScopeImpl* scope); void mergeInternal(PostMergeCb merge_cb); bool rejects(StatName name) const; @@ -424,11 +482,12 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo Event::Dispatcher* main_thread_dispatcher_{}; ThreadLocal::SlotPtr tls_; mutable Thread::MutexBasicLockable lock_; - absl::flat_hash_set scopes_ GUARDED_BY(lock_); + absl::flat_hash_set scopes_ ABSL_GUARDED_BY(lock_); ScopePtr default_scope_; std::list> timer_sinks_; TagProducerPtr tag_producer_; StatsMatcherPtr stats_matcher_; + HistogramSettingsConstPtr histogram_settings_; std::atomic threading_ever_initialized_{}; std::atomic shutting_down_{}; std::atomic merge_in_progress_{}; @@ -447,14 +506,22 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo // It seems like it would be better to have each client that expects a stat // to exist to hold it as (e.g.) a CounterSharedPtr rather than a Counter& // but that would be fairly complex to change. - std::vector deleted_counters_; - std::vector deleted_gauges_; - std::vector deleted_histograms_; - std::vector deleted_text_readouts_; + std::vector deleted_counters_ ABSL_GUARDED_BY(lock_); + std::vector deleted_gauges_ ABSL_GUARDED_BY(lock_); + std::vector deleted_histograms_ ABSL_GUARDED_BY(lock_); + std::vector deleted_text_readouts_ ABSL_GUARDED_BY(lock_); Thread::ThreadSynchronizer sync_; std::atomic next_scope_id_{}; + uint64_t next_histogram_id_ ABSL_GUARDED_BY(hist_mutex_) = 0; + + StatNameSetPtr well_known_tags_; + + mutable Thread::MutexBasicLockable hist_mutex_; + StatSet histogram_set_ ABSL_GUARDED_BY(hist_mutex_); }; +using ThreadLocalStoreImplPtr = std::unique_ptr; + } // namespace Stats } // namespace Envoy diff --git a/source/common/stats/utility.cc b/source/common/stats/utility.cc index 18441355fd3d7..ee3944172c0d9 100644 --- a/source/common/stats/utility.cc +++ b/source/common/stats/utility.cc @@ -4,6 +4,7 @@ #include #include "absl/strings/match.h" +#include "absl/types/optional.h" namespace Envoy { namespace Stats { @@ -34,5 +35,87 @@ absl::optional Utility::findTag(const Metric& metric, StatName find_ta return value; } +namespace { + +// Helper class for the three Utility::*FromElements implementations to build up +// a joined StatName from a mix of StatName and string_view. +struct ElementVisitor { + ElementVisitor(SymbolTable& symbol_table, const ElementVec& elements) + : symbol_table_(symbol_table), pool_(symbol_table) { + stat_names_.resize(elements.size()); + for (const Element& element : elements) { + absl::visit(*this, element); + } + joined_ = symbol_table_.join(stat_names_); + } + + // Overloads provides for absl::visit to call. + void operator()(StatName stat_name) { stat_names_.push_back(stat_name); } + void operator()(absl::string_view name) { stat_names_.push_back(pool_.add(name)); } + + /** + * @return the StatName constructed by joining the elements. + */ + StatName statName() { return StatName(joined_.get()); } + + SymbolTable& symbol_table_; + StatNameVec stat_names_; + StatNameDynamicPool pool_; + SymbolTable::StoragePtr joined_; +}; + +} // namespace + +Counter& Utility::counterFromElements(Scope& scope, const ElementVec& elements, + StatNameTagVectorOptConstRef tags) { + ElementVisitor visitor(scope.symbolTable(), elements); + return scope.counterFromStatNameWithTags(visitor.statName(), tags); +} + +Counter& Utility::counterFromStatNames(Scope& scope, const StatNameVec& elements, + StatNameTagVectorOptConstRef tags) { + SymbolTable::StoragePtr joined = scope.symbolTable().join(elements); + return scope.counterFromStatNameWithTags(StatName(joined.get()), tags); +} + +Gauge& Utility::gaugeFromElements(Scope& scope, const ElementVec& elements, + Gauge::ImportMode import_mode, + StatNameTagVectorOptConstRef tags) { + ElementVisitor visitor(scope.symbolTable(), elements); + return scope.gaugeFromStatNameWithTags(visitor.statName(), tags, import_mode); +} + +Gauge& Utility::gaugeFromStatNames(Scope& scope, const StatNameVec& elements, + Gauge::ImportMode import_mode, + StatNameTagVectorOptConstRef tags) { + SymbolTable::StoragePtr joined = scope.symbolTable().join(elements); + return scope.gaugeFromStatNameWithTags(StatName(joined.get()), tags, import_mode); +} + +Histogram& Utility::histogramFromElements(Scope& scope, const ElementVec& elements, + Histogram::Unit unit, StatNameTagVectorOptConstRef tags) { + ElementVisitor visitor(scope.symbolTable(), elements); + return scope.histogramFromStatNameWithTags(visitor.statName(), tags, unit); +} + +Histogram& Utility::histogramFromStatNames(Scope& scope, const StatNameVec& elements, + Histogram::Unit unit, + StatNameTagVectorOptConstRef tags) { + SymbolTable::StoragePtr joined = scope.symbolTable().join(elements); + return scope.histogramFromStatNameWithTags(StatName(joined.get()), tags, unit); +} + +TextReadout& Utility::textReadoutFromElements(Scope& scope, const ElementVec& elements, + StatNameTagVectorOptConstRef tags) { + ElementVisitor visitor(scope.symbolTable(), elements); + return scope.textReadoutFromStatNameWithTags(visitor.statName(), tags); +} + +TextReadout& Utility::textReadoutFromStatNames(Scope& scope, const StatNameVec& elements, + StatNameTagVectorOptConstRef tags) { + SymbolTable::StoragePtr joined = scope.symbolTable().join(elements); + return scope.textReadoutFromStatNameWithTags(StatName(joined.get()), tags); +} + } // namespace Stats } // namespace Envoy diff --git a/source/common/stats/utility.h b/source/common/stats/utility.h index 0d0ed4b21bc0a..4328c2ef5875d 100644 --- a/source/common/stats/utility.h +++ b/source/common/stats/utility.h @@ -2,16 +2,45 @@ #include +#include "envoy/stats/scope.h" #include "envoy/stats/stats.h" +#include "common/common/thread.h" #include "common/stats/symbol_table_impl.h" +#include "absl/container/inlined_vector.h" #include "absl/strings/string_view.h" #include "absl/types/optional.h" namespace Envoy { namespace Stats { +/** + * Represents a dynamically created stat name token based on absl::string_view. + * This class wrapper is used in the 'Element' variant so that call-sites + * can express explicit intent to create dynamic stat names, which are more + * expensive than symbolic stat names. We use dynamic stat names only for + * building stats based on names discovered in the line of a request. + */ +class DynamicName : public absl::string_view { +public: + // This is intentionally left as an implicit conversion from string_view to + // make call-sites easier to read, e.g. + // Utility::counterFromElements(*scope, {DynamicName("a"), DynamicName("b")}); + DynamicName(absl::string_view str) : absl::string_view(str) {} +}; + +/** + * Holds either a symbolic StatName or a dynamic string, for the purpose of + * composing a vector to pass to Utility::counterFromElements, etc. This is + * a programming convenience to create joined stat names. It is easier to + * call the above helpers than to use SymbolTable::join(), because the helpers + * hide the memory management of the joined storage, and they allow easier + * co-mingling of symbolic and dynamic stat-name components. + */ +using Element = absl::variant; +using ElementVec = absl::InlinedVector; + /** * Common stats utility routines. */ @@ -34,6 +63,198 @@ class Utility { * @return The value of the tag, if found. */ static absl::optional findTag(const Metric& metric, StatName find_tag_name); + + /** + * Creates a counter from a vector of tokens which are used to create the + * name. The tokens can be specified as DynamicName or StatName. For + * tokens specified as DynamicName, a dynamic StatName will be created. See + * https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#dynamic-stat-tokens + * for more detail on why symbolic StatNames are preferred when possible. + * + * See also counterFromStatNames, which is slightly faster but does not allow + * passing DynamicName(string)s as names. + * + * @param scope The scope in which to create the counter. + * @param elements The vector of mixed DynamicName and StatName + * @param tags optionally specified tags. + * @return A counter named using the joined elements. + */ + static Counter& counterFromElements(Scope& scope, const ElementVec& elements, + StatNameTagVectorOptConstRef tags = absl::nullopt); + + /** + * Creates a counter from a vector of tokens which are used to create the + * name. The tokens must be of type StatName. + * + * See also counterFromElements, which is slightly slower, but allows + * passing DynamicName(string)s as elements. + * + * @param scope The scope in which to create the counter. + * @param names The vector of StatNames + * @param tags optionally specified tags. + * @return A counter named using the joined elements. + */ + static Counter& counterFromStatNames(Scope& scope, const StatNameVec& names, + StatNameTagVectorOptConstRef tags = absl::nullopt); + + /** + * Creates a gauge from a vector of tokens which are used to create the + * name. The tokens can be specified as DynamicName or StatName. For + * tokens specified as DynamicName, a dynamic StatName will be created. See + * https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#dynamic-stat-tokens + * for more detail on why symbolic StatNames are preferred when possible. + * + * See also gaugeFromStatNames, which is slightly faster but does not allow + * passing DynamicName(string)s as names. + * + * @param scope The scope in which to create the counter. + * @param elements The vector of mixed DynamicName and StatName + * @param import_mode Whether hot-restart should accumulate this value. + * @param tags optionally specified tags. + * @return A gauge named using the joined elements. + */ + static Gauge& gaugeFromElements(Scope& scope, const ElementVec& elements, + Gauge::ImportMode import_mode, + StatNameTagVectorOptConstRef tags = absl::nullopt); + + /** + * Creates a gauge from a vector of tokens which are used to create the + * name. The tokens must be of type StatName. + * + * See also gaugeFromElements, which is slightly slower, but allows + * passing DynamicName(string)s as elements. + * + * @param scope The scope in which to create the counter. + * @param names The vector of StatNames + * @param import_mode Whether hot-restart should accumulate this value. + * @param tags optionally specified tags. + * @return A gauge named using the joined elements. + */ + static Gauge& gaugeFromStatNames(Scope& scope, const StatNameVec& elements, + Gauge::ImportMode import_mode, + StatNameTagVectorOptConstRef tags = absl::nullopt); + + /** + * Creates a histogram from a vector of tokens which are used to create the + * name. The tokens can be specified as DynamicName or StatName. For + * tokens specified as DynamicName, a dynamic StatName will be created. See + * https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#dynamic-stat-tokens + * for more detail on why symbolic StatNames are preferred when possible. + * + * See also histogramFromStatNames, which is slightly faster but does not allow + * passing DynamicName(string)s as names. + * + * @param scope The scope in which to create the counter. + * @param elements The vector of mixed DynamicName and StatName + * @param unit The unit of measurement. + * @param tags optionally specified tags. + * @return A histogram named using the joined elements. + */ + static Histogram& histogramFromElements(Scope& scope, const ElementVec& elements, + Histogram::Unit unit, + StatNameTagVectorOptConstRef tags = absl::nullopt); + + /** + * Creates a histogram from a vector of tokens which are used to create the + * name. The tokens must be of type StatName. + * + * See also histogramFromElements, which is slightly slower, but allows + * passing DynamicName(string)s as elements. + * + * @param scope The scope in which to create the counter. + * @param elements The vector of mixed DynamicName and StatName + * @param unit The unit of measurement. + * @param tags optionally specified tags. + * @return A histogram named using the joined elements. + */ + static Histogram& histogramFromStatNames(Scope& scope, const StatNameVec& elements, + Histogram::Unit unit, + StatNameTagVectorOptConstRef tags = absl::nullopt); + + /** + * Creates a TextReadout from a vector of tokens which are used to create the + * name. The tokens can be specified as DynamicName or StatName. For + * tokens specified as DynamicName, a dynamic StatName will be created. See + * https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#dynamic-stat-tokens + * for more detail on why symbolic StatNames are preferred when possible. + * + * See also TextReadoutFromStatNames, which is slightly faster but does not allow + * passing DynamicName(string)s as names. + * + * @param scope The scope in which to create the counter. + * @param elements The vector of mixed DynamicName and StatName + * @param unit The unit of measurement. + * @param tags optionally specified tags. + * @return A TextReadout named using the joined elements. + */ + static TextReadout& textReadoutFromElements(Scope& scope, const ElementVec& elements, + StatNameTagVectorOptConstRef tags = absl::nullopt); + + /** + * Creates a TextReadout from a vector of tokens which are used to create the + * name. The tokens must be of type StatName. + * + * See also TextReadoutFromElements, which is slightly slower, but allows + * passing DynamicName(string)s as elements. + * + * @param scope The scope in which to create the counter. + * @param elements The vector of mixed DynamicName and StatName + * @param unit The unit of measurement. + * @param tags optionally specified tags. + * @return A TextReadout named using the joined elements. + */ + static TextReadout& textReadoutFromStatNames(Scope& scope, const StatNameVec& elements, + StatNameTagVectorOptConstRef tags = absl::nullopt); +}; + +/** + * Holds a reference to a stat by name. Note that the stat may not be created + * yet at the time CachedReference is created. Calling get() then does a lazy + * lookup, potentially returning absl::nullopt if the stat doesn't exist yet. + * StatReference works whether the name was constructed symbolically, or with + * StatNameDynamicStorage. + * + * Lookups are very slow, taking time proportional to the size of the scope, + * holding mutexes during the lookup. However once the lookup succeeds, the + * result is cached atomically, and further calls to get() are thus fast and + * mutex-free. The implementation may be faster for stats that are named + * symbolically. + * + * CachedReference is valid for the lifetime of the Scope. When the Scope + * becomes invalid, CachedReferences must also be dropped as they will hold + * pointers into the scope. + */ +template class CachedReference { +public: + CachedReference(Scope& scope, absl::string_view name) : scope_(scope), name_(std::string(name)) {} + + /** + * Finds the named stat, if it exists, returning it as an optional. + */ + absl::optional> get() { + StatType* stat = stat_.get([this]() -> StatType* { + StatType* stat = nullptr; + IterateFn check_stat = [this, + &stat](const RefcountPtr& shared_stat) -> bool { + if (shared_stat->name() == name_) { + stat = shared_stat.get(); + return false; // Stop iteration. + } + return true; + }; + scope_.iterate(check_stat); + return stat; + }); + if (stat == nullptr) { + return absl::nullopt; + } + return *stat; + } + +private: + Scope& scope_; + const std::string name_; + Thread::AtomicPtr stat_; }; } // namespace Stats diff --git a/source/common/stream_info/BUILD b/source/common/stream_info/BUILD index 9abb095ed9780..d2962e67ef153 100644 --- a/source/common/stream_info/BUILD +++ b/source/common/stream_info/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/stream_info/filter_state_impl.cc b/source/common/stream_info/filter_state_impl.cc index 6097f02e04286..d873587abfcf8 100644 --- a/source/common/stream_info/filter_state_impl.cc +++ b/source/common/stream_info/filter_state_impl.cc @@ -97,8 +97,8 @@ void FilterStateImpl::maybeCreateParent(ParentAccessMode parent_access_mode) { if (life_span_ >= FilterState::LifeSpan::TopSpan) { return; } - if (absl::holds_alternative>(ancestor_)) { - std::shared_ptr ancestor = absl::get>(ancestor_); + if (absl::holds_alternative(ancestor_)) { + FilterStateSharedPtr ancestor = absl::get(ancestor_); if (ancestor == nullptr || ancestor->lifeSpan() != life_span_ + 1) { parent_ = std::make_shared(ancestor, FilterState::LifeSpan(life_span_ + 1)); } else { diff --git a/source/common/stream_info/filter_state_impl.h b/source/common/stream_info/filter_state_impl.h index 6bf8fb9ad517b..319026e959acc 100644 --- a/source/common/stream_info/filter_state_impl.h +++ b/source/common/stream_info/filter_state_impl.h @@ -22,12 +22,12 @@ class FilterStateImpl : public FilterState { * @param ancestor a std::shared_ptr storing an already created ancestor. * @param life_span the life span this is handling. */ - FilterStateImpl(std::shared_ptr ancestor, FilterState::LifeSpan life_span) + FilterStateImpl(FilterStateSharedPtr ancestor, FilterState::LifeSpan life_span) : ancestor_(ancestor), life_span_(life_span) { maybeCreateParent(ParentAccessMode::ReadOnly); } - using LazyCreateAncestor = std::pair&, FilterState::LifeSpan>; + using LazyCreateAncestor = std::pair; /** * @param ancestor a std::pair storing an ancestor, that can be passed in as a way to lazy * initialize a FilterState that's owned by an object with bigger scope than this. This is to @@ -49,7 +49,7 @@ class FilterStateImpl : public FilterState { bool hasDataAtOrAboveLifeSpan(FilterState::LifeSpan life_span) const override; FilterState::LifeSpan lifeSpan() const override { return life_span_; } - std::shared_ptr parent() const override { return parent_; } + FilterStateSharedPtr parent() const override { return parent_; } private: // This only checks the local data_storage_ for data_name existence. @@ -62,8 +62,8 @@ class FilterStateImpl : public FilterState { FilterState::StateType state_type_; }; - absl::variant, LazyCreateAncestor> ancestor_; - std::shared_ptr parent_; + absl::variant ancestor_; + FilterStateSharedPtr parent_; const FilterState::LifeSpan life_span_; absl::flat_hash_map> data_storage_; }; diff --git a/source/common/stream_info/stream_info_impl.h b/source/common/stream_info/stream_info_impl.h index 3c440f91afa12..a384cd401cf3a 100644 --- a/source/common/stream_info/stream_info_impl.h +++ b/source/common/stream_info/stream_info_impl.h @@ -18,21 +18,21 @@ namespace Envoy { namespace StreamInfo { struct StreamInfoImpl : public StreamInfo { - StreamInfoImpl(TimeSource& time_source) - : StreamInfoImpl(absl::nullopt, time_source, - std::make_shared(FilterState::LifeSpan::FilterChain)) {} + StreamInfoImpl(TimeSource& time_source, + FilterState::LifeSpan life_span = FilterState::LifeSpan::FilterChain) + : StreamInfoImpl(absl::nullopt, time_source, std::make_shared(life_span)) {} StreamInfoImpl(Http::Protocol protocol, TimeSource& time_source) : StreamInfoImpl(protocol, time_source, std::make_shared(FilterState::LifeSpan::FilterChain)) {} StreamInfoImpl(Http::Protocol protocol, TimeSource& time_source, - std::shared_ptr& parent_filter_state) - : StreamInfoImpl(protocol, time_source, - std::make_shared( - FilterStateImpl::LazyCreateAncestor(parent_filter_state, - FilterState::LifeSpan::Connection), - FilterState::LifeSpan::FilterChain)) {} + FilterStateSharedPtr parent_filter_state, FilterState::LifeSpan life_span) + : StreamInfoImpl( + protocol, time_source, + std::make_shared( + FilterStateImpl::LazyCreateAncestor(std::move(parent_filter_state), life_span), + FilterState::LifeSpan::FilterChain)) {} SystemTime startTime() const override { return start_time_; } diff --git a/source/common/stream_info/utility.cc b/source/common/stream_info/utility.cc index ccd24cb1acf71..2f7049545bd3d 100644 --- a/source/common/stream_info/utility.cc +++ b/source/common/stream_info/utility.cc @@ -25,6 +25,9 @@ const std::string ResponseFlagUtils::RATELIMIT_SERVICE_ERROR = "RLSE"; const std::string ResponseFlagUtils::STREAM_IDLE_TIMEOUT = "SI"; const std::string ResponseFlagUtils::INVALID_ENVOY_REQUEST_HEADERS = "IH"; const std::string ResponseFlagUtils::DOWNSTREAM_PROTOCOL_ERROR = "DPE"; +const std::string ResponseFlagUtils::UPSTREAM_MAX_STREAM_DURATION_REACHED = "UMSDR"; +const std::string ResponseFlagUtils::RESPONSE_FROM_CACHE_FILTER = "RFCF"; +const std::string ResponseFlagUtils::NO_FILTER_CONFIG_FOUND = "NFCF"; void ResponseFlagUtils::appendString(std::string& result, const std::string& append) { if (result.empty()) { @@ -37,7 +40,7 @@ void ResponseFlagUtils::appendString(std::string& result, const std::string& app const std::string ResponseFlagUtils::toShortString(const StreamInfo& stream_info) { std::string result; - static_assert(ResponseFlag::LastFlag == 0x40000, "A flag has been added. Fix this code."); + static_assert(ResponseFlag::LastFlag == 0x200000, "A flag has been added. Fix this code."); if (stream_info.hasResponseFlag(ResponseFlag::FailedLocalHealthCheck)) { appendString(result, FAILED_LOCAL_HEALTH_CHECK); @@ -114,6 +117,18 @@ const std::string ResponseFlagUtils::toShortString(const StreamInfo& stream_info appendString(result, DOWNSTREAM_PROTOCOL_ERROR); } + if (stream_info.hasResponseFlag(ResponseFlag::UpstreamMaxStreamDurationReached)) { + appendString(result, UPSTREAM_MAX_STREAM_DURATION_REACHED); + } + + if (stream_info.hasResponseFlag(ResponseFlag::ResponseFromCacheFilter)) { + appendString(result, RESPONSE_FROM_CACHE_FILTER); + } + + if (stream_info.hasResponseFlag(ResponseFlag::NoFilterConfigFound)) { + appendString(result, NO_FILTER_CONFIG_FOUND); + } + return result.empty() ? NONE : result; } @@ -140,6 +155,10 @@ absl::optional ResponseFlagUtils::toResponseFlag(const std::string {ResponseFlagUtils::STREAM_IDLE_TIMEOUT, ResponseFlag::StreamIdleTimeout}, {ResponseFlagUtils::INVALID_ENVOY_REQUEST_HEADERS, ResponseFlag::InvalidEnvoyRequestHeaders}, {ResponseFlagUtils::DOWNSTREAM_PROTOCOL_ERROR, ResponseFlag::DownstreamProtocolError}, + {ResponseFlagUtils::UPSTREAM_MAX_STREAM_DURATION_REACHED, + ResponseFlag::UpstreamMaxStreamDurationReached}, + {ResponseFlagUtils::RESPONSE_FROM_CACHE_FILTER, ResponseFlag::ResponseFromCacheFilter}, + {ResponseFlagUtils::NO_FILTER_CONFIG_FOUND, ResponseFlag::NoFilterConfigFound}, }; const auto& it = map.find(flag); if (it != map.end()) { diff --git a/source/common/stream_info/utility.h b/source/common/stream_info/utility.h index fe8059b896439..9b4ac08e413c8 100644 --- a/source/common/stream_info/utility.h +++ b/source/common/stream_info/utility.h @@ -40,6 +40,9 @@ class ResponseFlagUtils { const static std::string STREAM_IDLE_TIMEOUT; const static std::string INVALID_ENVOY_REQUEST_HEADERS; const static std::string DOWNSTREAM_PROTOCOL_ERROR; + const static std::string UPSTREAM_MAX_STREAM_DURATION_REACHED; + const static std::string RESPONSE_FROM_CACHE_FILTER; + const static std::string NO_FILTER_CONFIG_FOUND; }; /** diff --git a/source/common/tcp/BUILD b/source/common/tcp/BUILD index a9e3b948a1b2f..9a4234e77bb8f 100644 --- a/source/common/tcp/BUILD +++ b/source/common/tcp/BUILD @@ -1,17 +1,23 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( name = "conn_pool_lib", - srcs = ["conn_pool.cc"], - hdrs = ["conn_pool.h"], + srcs = [ + "conn_pool.cc", + "original_conn_pool.cc", + ], + hdrs = [ + "conn_pool.h", + "original_conn_pool.h", + ], external_deps = ["abseil_optional"], deps = [ "//include/envoy/event:deferred_deletable", @@ -24,6 +30,7 @@ envoy_cc_library( "//include/envoy/upstream:upstream_interface", "//source/common/common:linked_object", "//source/common/common:utility_lib", + "//source/common/http:conn_pool_base_lib", "//source/common/network:filter_lib", "//source/common/network:utility_lib", "//source/common/stats:timespan_lib", diff --git a/source/common/tcp/conn_pool.cc b/source/common/tcp/conn_pool.cc index 76f6d453be788..ed3332d8afef2 100644 --- a/source/common/tcp/conn_pool.cc +++ b/source/common/tcp/conn_pool.cc @@ -12,435 +12,56 @@ namespace Envoy { namespace Tcp { -ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, - Upstream::ResourcePriority priority, - const Network::ConnectionSocket::OptionsSharedPtr& options, - Network::TransportSocketOptionsSharedPtr transport_socket_options) - : dispatcher_(dispatcher), host_(host), priority_(priority), socket_options_(options), - transport_socket_options_(transport_socket_options), - upstream_ready_timer_(dispatcher_.createTimer([this]() { onUpstreamReady(); })) {} - -ConnPoolImpl::~ConnPoolImpl() { - while (!ready_conns_.empty()) { - ready_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush); - } - - while (!busy_conns_.empty()) { - busy_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush); - } - - while (!pending_conns_.empty()) { - pending_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush); - } - - // Make sure all connections are destroyed before we are destroyed. - dispatcher_.clearDeferredDeleteList(); -} - -void ConnPoolImpl::drainConnections() { - while (!ready_conns_.empty()) { - ready_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush); - } - - // We drain busy and pending connections by manually setting remaining requests to 1. Thus, when - // the next response completes the connection will be destroyed. - for (const auto& conn : busy_conns_) { - conn->remaining_requests_ = 1; - } - - for (const auto& conn : pending_conns_) { - conn->remaining_requests_ = 1; - } -} - -void ConnPoolImpl::addDrainedCallback(DrainedCb cb) { - drained_callbacks_.push_back(cb); - checkForDrained(); -} - -void ConnPoolImpl::assignConnection(ActiveConn& conn, ConnectionPool::Callbacks& callbacks) { - ASSERT(conn.wrapper_ == nullptr); - conn.wrapper_ = std::make_shared(conn); - - callbacks.onPoolReady(std::make_unique(conn.wrapper_), - conn.real_host_description_); -} - -void ConnPoolImpl::checkForDrained() { - if (!drained_callbacks_.empty() && pending_requests_.empty() && busy_conns_.empty() && - pending_conns_.empty()) { - while (!ready_conns_.empty()) { - ready_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush); - } - - for (const DrainedCb& cb : drained_callbacks_) { - cb(); - } - } -} - -void ConnPoolImpl::createNewConnection() { - ENVOY_LOG(debug, "creating a new connection"); - ActiveConnPtr conn(new ActiveConn(*this)); - conn->moveIntoList(std::move(conn), pending_conns_); -} - -ConnectionPool::Cancellable* ConnPoolImpl::newConnection(ConnectionPool::Callbacks& callbacks) { - if (!ready_conns_.empty()) { - ready_conns_.front()->moveBetweenLists(ready_conns_, busy_conns_); - ENVOY_CONN_LOG(debug, "using existing connection", *busy_conns_.front()->conn_); - assignConnection(*busy_conns_.front(), callbacks); - return nullptr; - } - - if (host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) { - bool can_create_connection = - host_->cluster().resourceManager(priority_).connections().canCreate(); - if (!can_create_connection) { - host_->cluster().stats().upstream_cx_overflow_.inc(); - } - - // If we have no connections at all, make one no matter what so we don't starve. - if ((ready_conns_.empty() && busy_conns_.empty() && pending_conns_.empty()) || - can_create_connection) { - createNewConnection(); - } - - ENVOY_LOG(debug, "queueing request due to no available connections"); - PendingRequestPtr pending_request(new PendingRequest(*this, callbacks)); - pending_request->moveIntoList(std::move(pending_request), pending_requests_); - return pending_requests_.front().get(); - } else { - ENVOY_LOG(debug, "max pending requests overflow"); - callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, nullptr); - host_->cluster().stats().upstream_rq_pending_overflow_.inc(); - return nullptr; - } -} - -void ConnPoolImpl::onConnectionEvent(ActiveConn& conn, Network::ConnectionEvent event) { - if (event == Network::ConnectionEvent::RemoteClose || - event == Network::ConnectionEvent::LocalClose) { - ENVOY_CONN_LOG(debug, "client disconnected", *conn.conn_); - - Envoy::Upstream::reportUpstreamCxDestroy(host_, event); - - ActiveConnPtr removed; - bool check_for_drained = true; - if (conn.wrapper_ != nullptr) { - if (!conn.wrapper_->released_) { - Envoy::Upstream::reportUpstreamCxDestroyActiveRequest(host_, event); - - conn.wrapper_->release(true); - } - - removed = conn.removeFromList(busy_conns_); - } else if (!conn.connect_timer_) { - // The connect timer is destroyed on connect. The lack of a connect timer means that this - // connection is idle and in the ready pool. - removed = conn.removeFromList(ready_conns_); - check_for_drained = false; - } else { - // The only time this happens is if we actually saw a connect failure. - host_->cluster().stats().upstream_cx_connect_fail_.inc(); - host_->stats().cx_connect_fail_.inc(); - removed = conn.removeFromList(pending_conns_); - - // Raw connect failures should never happen under normal circumstances. If we have an upstream - // that is behaving badly, requests can get stuck here in the pending state. If we see a - // connect failure, we purge all pending requests so that calling code can determine what to - // do with the request. - // NOTE: We move the existing pending requests to a temporary list. This is done so that - // if retry logic submits a new request to the pool, we don't fail it inline. - // TODO(lizan): If pool failure due to transport socket, propagate the reason to access log. - ConnectionPool::PoolFailureReason reason; - if (conn.timed_out_) { - reason = ConnectionPool::PoolFailureReason::Timeout; - } else if (event == Network::ConnectionEvent::RemoteClose) { - reason = ConnectionPool::PoolFailureReason::RemoteConnectionFailure; - } else { - reason = ConnectionPool::PoolFailureReason::LocalConnectionFailure; - } - - std::list pending_requests_to_purge(std::move(pending_requests_)); - while (!pending_requests_to_purge.empty()) { - PendingRequestPtr request = - pending_requests_to_purge.front()->removeFromList(pending_requests_to_purge); - host_->cluster().stats().upstream_rq_pending_failure_eject_.inc(); - request->callbacks_.onPoolFailure(reason, conn.real_host_description_); - } - } - - dispatcher_.deferredDelete(std::move(removed)); - - // If we have pending requests and we just lost a connection we should make a new one. - if (pending_requests_.size() > - (ready_conns_.size() + busy_conns_.size() + pending_conns_.size())) { - createNewConnection(); - } - - if (check_for_drained) { - checkForDrained(); - } - } - - if (conn.connect_timer_) { - conn.connect_timer_->disableTimer(); - conn.connect_timer_.reset(); - } - - // Note that the order in this function is important. Concretely, we must destroy the connect - // timer before we process an idle connection, because if this results in an immediate - // drain/destruction event, we key off of the existence of the connect timer above to determine - // whether the connection is in the ready list (connected) or the pending list (failed to - // connect). - if (event == Network::ConnectionEvent::Connected) { - conn.conn_->streamInfo().setDownstreamSslConnection(conn.conn_->ssl()); - conn_connect_ms_->complete(); - processIdleConnection(conn, true, false); - } -} - -void ConnPoolImpl::onPendingRequestCancel(PendingRequest& request, - ConnectionPool::CancelPolicy cancel_policy) { - ENVOY_LOG(debug, "canceling pending request"); - request.removeFromList(pending_requests_); - host_->cluster().stats().upstream_rq_cancelled_.inc(); - - // If the cancel requests closure of excess connections and there are more pending connections - // than requests, close the most recently created pending connection. - if (cancel_policy == ConnectionPool::CancelPolicy::CloseExcess && - pending_requests_.size() < pending_conns_.size()) { - ENVOY_LOG(debug, "canceling pending connection"); - pending_conns_.back()->conn_->close(Network::ConnectionCloseType::NoFlush); - } - - checkForDrained(); -} - -void ConnPoolImpl::onConnReleased(ActiveConn& conn) { - ENVOY_CONN_LOG(debug, "connection released", *conn.conn_); - - if (conn.remaining_requests_ > 0 && --conn.remaining_requests_ == 0) { - ENVOY_CONN_LOG(debug, "maximum requests per connection", *conn.conn_); - host_->cluster().stats().upstream_cx_max_requests_.inc(); - - conn.conn_->close(Network::ConnectionCloseType::NoFlush); - } else { - // Upstream connection might be closed right after response is complete. Setting delay=true - // here to assign pending requests in next dispatcher loop to handle that case. - // https://github.com/envoyproxy/envoy/issues/2715 - processIdleConnection(conn, false, true); - } -} - -void ConnPoolImpl::onConnDestroyed(ActiveConn& conn) { - ENVOY_CONN_LOG(debug, "connection destroyed", *conn.conn_); -} - -void ConnPoolImpl::onUpstreamReady() { - upstream_ready_enabled_ = false; - while (!pending_requests_.empty() && !ready_conns_.empty()) { - ActiveConn& conn = *ready_conns_.front(); - ENVOY_CONN_LOG(debug, "assigning connection", *conn.conn_); - // There is work to do so bind a connection to the caller and move it to the busy list. Pending - // requests are pushed onto the front, so pull from the back. - conn.moveBetweenLists(ready_conns_, busy_conns_); - assignConnection(conn, pending_requests_.back()->callbacks_); - pending_requests_.pop_back(); - } -} - -void ConnPoolImpl::processIdleConnection(ActiveConn& conn, bool new_connection, bool delay) { - if (conn.wrapper_) { - conn.wrapper_->invalidate(); - conn.wrapper_.reset(); - } - - // TODO(zuercher): As a future improvement, we may wish to close extra connections when there are - // no pending requests rather than moving them to ready_conns_. For conn pool callers that re-use - // connections it is possible that a busy connection may be re-assigned to a pending request - // while a new connection is pending. The current behavior is to move the pending connection to - // the ready list to await a future request. For some protocols, e.g. mysql which has the server - // transmit handshake data on connect, it may be desirable to close the connection if no pending - // request is available. The CloseExcess flag for cancel is related: if we close pending - // connections without requests here it becomes superfluous (instead of closing connections at - // cancel time we'd wait until they completed and close them here). Finally, we want to avoid - // requiring operators to correct configure clusters to get the necessary pending connection - // behavior (e.g. we want to find a way to enable the new behavior without having to configure - // it on a cluster). - - if (pending_requests_.empty() || delay) { - // There is nothing to service or delayed processing is requested, so just move the connection - // into the ready list. - ENVOY_CONN_LOG(debug, "moving to ready", *conn.conn_); - if (new_connection) { - conn.moveBetweenLists(pending_conns_, ready_conns_); - } else { - conn.moveBetweenLists(busy_conns_, ready_conns_); - } - } else { - // There is work to do immediately so bind a request to the caller and move it to the busy list. - // Pending requests are pushed onto the front, so pull from the back. - ENVOY_CONN_LOG(debug, "assigning connection", *conn.conn_); - if (new_connection) { - conn.moveBetweenLists(pending_conns_, busy_conns_); - } - assignConnection(conn, pending_requests_.back()->callbacks_); - pending_requests_.pop_back(); - } - - if (delay && !pending_requests_.empty() && !upstream_ready_enabled_) { - upstream_ready_enabled_ = true; - upstream_ready_timer_->enableTimer(std::chrono::milliseconds(0)); - } - - checkForDrained(); -} - -ConnPoolImpl::ConnectionWrapper::ConnectionWrapper(ActiveConn& parent) : parent_(parent) { - parent_.parent_.host_->cluster().stats().upstream_rq_total_.inc(); - parent_.parent_.host_->cluster().stats().upstream_rq_active_.inc(); - parent_.parent_.host_->stats().rq_total_.inc(); - parent_.parent_.host_->stats().rq_active_.inc(); -} - -Network::ClientConnection& ConnPoolImpl::ConnectionWrapper::connection() { - ASSERT(conn_valid_); - return *parent_.conn_; -} - -void ConnPoolImpl::ConnectionWrapper::addUpstreamCallbacks(ConnectionPool::UpstreamCallbacks& cb) { - ASSERT(!released_); - callbacks_ = &cb; -} - -void ConnPoolImpl::ConnectionWrapper::release(bool closed) { - // Allow multiple calls: connection close and destruction of ConnectionDataImplPtr will both - // result in this call. - if (!released_) { - released_ = true; - callbacks_ = nullptr; - if (!closed) { - parent_.parent_.onConnReleased(parent_); - } - - parent_.parent_.host_->cluster().stats().upstream_rq_active_.dec(); - parent_.parent_.host_->stats().rq_active_.dec(); - } -} - -ConnPoolImpl::PendingRequest::PendingRequest(ConnPoolImpl& parent, - ConnectionPool::Callbacks& callbacks) - : parent_(parent), callbacks_(callbacks) { - parent_.host_->cluster().stats().upstream_rq_pending_total_.inc(); - parent_.host_->cluster().stats().upstream_rq_pending_active_.inc(); - parent_.host_->cluster().resourceManager(parent_.priority_).pendingRequests().inc(); -} - -ConnPoolImpl::PendingRequest::~PendingRequest() { - parent_.host_->cluster().stats().upstream_rq_pending_active_.dec(); - parent_.host_->cluster().resourceManager(parent_.priority_).pendingRequests().dec(); -} - -ConnPoolImpl::ActiveConn::ActiveConn(ConnPoolImpl& parent) - : parent_(parent), - connect_timer_(parent_.dispatcher_.createTimer([this]() -> void { onConnectTimeout(); })), - remaining_requests_(parent_.host_->cluster().maxRequestsPerConnection()), timed_out_(false) { - - parent_.conn_connect_ms_ = std::make_unique( - parent_.host_->cluster().stats().upstream_cx_connect_ms_, parent_.dispatcher_.timeSource()); - - Upstream::Host::CreateConnectionData data = parent_.host_->createConnection( - parent_.dispatcher_, parent_.socket_options_, parent_.transport_socket_options_); +ActiveTcpClient::ActiveTcpClient(ConnPoolImpl& parent, const Upstream::HostConstSharedPtr& host, + uint64_t concurrent_stream_limit) + : Envoy::ConnectionPool::ActiveClient(parent, host->cluster().maxRequestsPerConnection(), + concurrent_stream_limit), + parent_(parent) { + Upstream::Host::CreateConnectionData data = host->createConnection( + parent_.dispatcher(), parent_.socketOptions(), parent_.transportSocketOptions()); real_host_description_ = data.host_description_; - - conn_ = std::move(data.connection_); - - conn_->detectEarlyCloseWhenReadDisabled(false); - conn_->addConnectionCallbacks(*this); - conn_->addReadFilter(Network::ReadFilterSharedPtr{new ConnReadFilter(*this)}); - - ENVOY_CONN_LOG(debug, "connecting", *conn_); - conn_->connect(); - - parent_.host_->cluster().stats().upstream_cx_total_.inc(); - parent_.host_->cluster().stats().upstream_cx_active_.inc(); - parent_.host_->stats().cx_total_.inc(); - parent_.host_->stats().cx_active_.inc(); - conn_length_ = std::make_unique( - parent_.host_->cluster().stats().upstream_cx_length_ms_, parent_.dispatcher_.timeSource()); - connect_timer_->enableTimer(parent_.host_->cluster().connectTimeout()); - parent_.host_->cluster().resourceManager(parent_.priority_).connections().inc(); - - conn_->setConnectionStats({parent_.host_->cluster().stats().upstream_cx_rx_bytes_total_, - parent_.host_->cluster().stats().upstream_cx_rx_bytes_buffered_, - parent_.host_->cluster().stats().upstream_cx_tx_bytes_total_, - parent_.host_->cluster().stats().upstream_cx_tx_bytes_buffered_, - &parent_.host_->cluster().stats().bind_errors_, nullptr}); - - // We just universally set no delay on connections. Theoretically we might at some point want - // to make this configurable. - conn_->noDelay(true); -} - -ConnPoolImpl::ActiveConn::~ActiveConn() { - if (wrapper_) { - wrapper_->invalidate(); - } - - parent_.host_->cluster().stats().upstream_cx_active_.dec(); - parent_.host_->stats().cx_active_.dec(); - conn_length_->complete(); - parent_.host_->cluster().resourceManager(parent_.priority_).connections().dec(); - - parent_.onConnDestroyed(*this); -} - -void ConnPoolImpl::ActiveConn::onConnectTimeout() { - // We just close the connection at this point. This will result in both a timeout and a connect - // failure and will fold into all the normal connect failure logic. - ENVOY_CONN_LOG(debug, "connect timeout", *conn_); - timed_out_ = true; - parent_.host_->cluster().stats().upstream_cx_connect_timeout_.inc(); - conn_->close(Network::ConnectionCloseType::NoFlush); -} - -void ConnPoolImpl::ActiveConn::onUpstreamData(Buffer::Instance& data, bool end_stream) { - if (wrapper_ != nullptr && wrapper_->callbacks_ != nullptr) { - // Delegate to the connection owner. - wrapper_->callbacks_->onUpstreamData(data, end_stream); - } else { - // Unexpected data from upstream, close down the connection. - ENVOY_CONN_LOG(debug, "unexpected data from upstream, closing connection", *conn_); - conn_->close(Network::ConnectionCloseType::NoFlush); - } -} - -void ConnPoolImpl::ActiveConn::onEvent(Network::ConnectionEvent event) { - ConnectionPool::UpstreamCallbacks* cb = nullptr; - if (wrapper_ != nullptr && wrapper_->callbacks_ != nullptr) { - cb = wrapper_->callbacks_; - } - - // In the event of a close event, we want to update the pool's state before triggering callbacks, - // preventing the case where we attempt to return a closed connection to the ready pool. - parent_.onConnectionEvent(*this, event); - - if (cb) { - cb->onEvent(event); - } -} - -void ConnPoolImpl::ActiveConn::onAboveWriteBufferHighWatermark() { - if (wrapper_ != nullptr && wrapper_->callbacks_ != nullptr) { - wrapper_->callbacks_->onAboveWriteBufferHighWatermark(); - } -} - -void ConnPoolImpl::ActiveConn::onBelowWriteBufferLowWatermark() { - if (wrapper_ != nullptr && wrapper_->callbacks_ != nullptr) { - wrapper_->callbacks_->onBelowWriteBufferLowWatermark(); + connection_ = std::move(data.connection_); + connection_->addConnectionCallbacks(*this); + connection_->detectEarlyCloseWhenReadDisabled(false); + connection_->addReadFilter(std::make_shared(*this)); + connection_->connect(); +} + +ActiveTcpClient::~ActiveTcpClient() { + // Handle the case where deferred delete results in the ActiveClient being destroyed before + // TcpConnectionData. Make sure the TcpConnectionData will not refer to this ActiveTcpClient + // and handle clean up normally done in clearCallbacks() + if (tcp_connection_data_) { + ASSERT(state_ == ActiveClient::State::CLOSED); + tcp_connection_data_->release(); + parent_.onRequestClosed(*this, true); + parent_.checkForDrained(); + } + parent_.onConnDestroyed(); +} + +void ActiveTcpClient::clearCallbacks() { + if (state_ == Envoy::ConnectionPool::ActiveClient::State::BUSY || + state_ == Envoy::ConnectionPool::ActiveClient::State::DRAINING) { + parent_.onConnReleased(*this); + } + callbacks_ = nullptr; + tcp_connection_data_ = nullptr; + parent_.onRequestClosed(*this, true); + parent_.checkForDrained(); +} + +void ActiveTcpClient::onEvent(Network::ConnectionEvent event) { + Envoy::ConnectionPool::ActiveClient::onEvent(event); + // Do not pass the Connected event to TCP proxy sessions. + // The tcp proxy filter synthesizes its own Connected event in onPoolReadyBase + // and receiving it twice causes problems. + // TODO(alyssawilk) clean this up in a follow-up. It's confusing. + if (callbacks_ && event != Network::ConnectionEvent::Connected) { + callbacks_->onEvent(event); + // After receiving a disconnect event, the owner of callbacks_ will likely self-destruct. + // Clear the pointer to avoid using it again. + callbacks_ = nullptr; } } diff --git a/source/common/tcp/conn_pool.h b/source/common/tcp/conn_pool.h index a80e8e05a0446..8b6c5e6e983da 100644 --- a/source/common/tcp/conn_pool.h +++ b/source/common/tcp/conn_pool.h @@ -13,153 +13,198 @@ #include "common/common/linked_object.h" #include "common/common/logger.h" +#include "common/http/conn_pool_base.h" #include "common/network/filter_impl.h" namespace Envoy { namespace Tcp { -class ConnPoolImpl : Logger::Loggable, public ConnectionPool::Instance { -public: - ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, - Upstream::ResourcePriority priority, - const Network::ConnectionSocket::OptionsSharedPtr& options, - Network::TransportSocketOptionsSharedPtr transport_socket_options); - - ~ConnPoolImpl() override; - - // ConnectionPool::Instance - void addDrainedCallback(DrainedCb cb) override; - void drainConnections() override; - ConnectionPool::Cancellable* newConnection(ConnectionPool::Callbacks& callbacks) override; - Upstream::HostDescriptionConstSharedPtr host() const override { return host_; } - -protected: - struct ActiveConn; - - struct ConnectionWrapper { - ConnectionWrapper(ActiveConn& parent); - - Network::ClientConnection& connection(); - void addUpstreamCallbacks(ConnectionPool::UpstreamCallbacks& callbacks); - void setConnectionState(ConnectionPool::ConnectionStatePtr&& state) { - parent_.setConnectionState(std::move(state)); - }; - ConnectionPool::ConnectionState* connectionState() { return parent_.connectionState(); } - - void release(bool closed); - - void invalidate() { conn_valid_ = false; } +class ConnPoolImpl; - ActiveConn& parent_; - ConnectionPool::UpstreamCallbacks* callbacks_{}; - bool released_{false}; - bool conn_valid_{true}; - }; - - using ConnectionWrapperSharedPtr = std::shared_ptr; - - struct ConnectionDataImpl : public ConnectionPool::ConnectionData { - ConnectionDataImpl(ConnectionWrapperSharedPtr wrapper) : wrapper_(std::move(wrapper)) {} - ~ConnectionDataImpl() override { wrapper_->release(false); } +struct TcpAttachContext : public Envoy::ConnectionPool::AttachContext { + TcpAttachContext(Tcp::ConnectionPool::Callbacks* callbacks) : callbacks_(callbacks) {} + Tcp::ConnectionPool::Callbacks* callbacks_; +}; - // ConnectionPool::ConnectionData - Network::ClientConnection& connection() override { return wrapper_->connection(); } - void addUpstreamCallbacks(ConnectionPool::UpstreamCallbacks& callbacks) override { - wrapper_->addUpstreamCallbacks(callbacks); - }; - void setConnectionState(ConnectionPool::ConnectionStatePtr&& state) override { - wrapper_->setConnectionState(std::move(state)); - } - ConnectionPool::ConnectionState* connectionState() override { - return wrapper_->connectionState(); - } +class TcpPendingRequest : public Envoy::ConnectionPool::PendingRequest { +public: + TcpPendingRequest(Envoy::ConnectionPool::ConnPoolImplBase& parent, TcpAttachContext& context) + : Envoy::ConnectionPool::PendingRequest(parent), context_(context) {} + Envoy::ConnectionPool::AttachContext& context() override { return context_; } - ConnectionWrapperSharedPtr wrapper_; - }; + TcpAttachContext context_; +}; +class ActiveTcpClient : public Envoy::ConnectionPool::ActiveClient { +public: struct ConnReadFilter : public Network::ReadFilterBaseImpl { - ConnReadFilter(ActiveConn& parent) : parent_(parent) {} + ConnReadFilter(ActiveTcpClient& parent) : parent_(parent) {} // Network::ReadFilter Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override { parent_.onUpstreamData(data, end_stream); return Network::FilterStatus::StopIteration; } - - ActiveConn& parent_; + ActiveTcpClient& parent_; }; - struct ActiveConn : LinkedObject, - public Network::ConnectionCallbacks, - public Event::DeferredDeletable { - ActiveConn(ConnPoolImpl& parent); - ~ActiveConn() override; + // This acts as the bridge between the ActiveTcpClient and an individual TCP connection. + class TcpConnectionData : public Envoy::Tcp::ConnectionPool::ConnectionData { + public: + TcpConnectionData(ActiveTcpClient& parent, Network::ClientConnection& connection) + : parent_(&parent), connection_(connection) { + parent_->tcp_connection_data_ = this; + } + ~TcpConnectionData() override { + // Generally it is the case that TcpConnectionData will be destroyed before the + // ActiveTcpClient. Because ordering on the deferred delete list is not guaranteed in the + // case of a disconnect, make sure parent_ is valid before doing clean-up. + if (parent_) { + parent_->clearCallbacks(); + } + } - void onConnectTimeout(); - void onUpstreamData(Buffer::Instance& data, bool end_stream); + Network::ClientConnection& connection() override { return connection_; } + void setConnectionState(ConnectionPool::ConnectionStatePtr&& state) override { + parent_->connection_state_ = std::move(state); + } - // Network::ConnectionCallbacks - void onEvent(Network::ConnectionEvent event) override; - void onAboveWriteBufferHighWatermark() override; - void onBelowWriteBufferLowWatermark() override; + void addUpstreamCallbacks(ConnectionPool::UpstreamCallbacks& callbacks) override { + parent_->callbacks_ = &callbacks; + } + void release() { parent_ = nullptr; } - void setConnectionState(ConnectionPool::ConnectionStatePtr&& state) { - conn_state_ = std::move(state); + protected: + ConnectionPool::ConnectionState* connectionState() override { + return parent_->connection_state_.get(); } - ConnectionPool::ConnectionState* connectionState() { return conn_state_.get(); } - - ConnPoolImpl& parent_; - Upstream::HostDescriptionConstSharedPtr real_host_description_; - ConnectionWrapperSharedPtr wrapper_; - Network::ClientConnectionPtr conn_; - ConnectionPool::ConnectionStatePtr conn_state_; - Event::TimerPtr connect_timer_; - Stats::TimespanPtr conn_length_; - uint64_t remaining_requests_; - bool timed_out_; - }; - using ActiveConnPtr = std::unique_ptr; + private: + ActiveTcpClient* parent_; + Network::ClientConnection& connection_; + }; - struct PendingRequest : LinkedObject, public ConnectionPool::Cancellable { - PendingRequest(ConnPoolImpl& parent, ConnectionPool::Callbacks& callbacks); - ~PendingRequest() override; + ActiveTcpClient(ConnPoolImpl& parent, const Upstream::HostConstSharedPtr& host, + uint64_t concurrent_stream_limit); + ~ActiveTcpClient() override; + + // Override the default's of Envoy::ConnectionPool::ActiveClient for class-specific functions. + // Network::ConnectionCallbacks + void onEvent(Network::ConnectionEvent event) override; + void onAboveWriteBufferHighWatermark() override { callbacks_->onAboveWriteBufferHighWatermark(); } + void onBelowWriteBufferLowWatermark() override { callbacks_->onBelowWriteBufferLowWatermark(); } + + void close() override { connection_->close(Network::ConnectionCloseType::NoFlush); } + size_t numActiveRequests() const override { return callbacks_ ? 1 : 0; } + bool closingWithIncompleteRequest() const override { return false; } + uint64_t id() const override { return connection_->id(); } + + void onUpstreamData(Buffer::Instance& data, bool end_stream) { + if (callbacks_) { + callbacks_->onUpstreamData(data, end_stream); + } else { + close(); + } + } + void clearCallbacks(); + + ConnPoolImpl& parent_; + ConnectionPool::UpstreamCallbacks* callbacks_{}; + Network::ClientConnectionPtr connection_; + ConnectionPool::ConnectionStatePtr connection_state_; + TcpConnectionData* tcp_connection_data_{}; +}; - // ConnectionPool::Cancellable - void cancel(ConnectionPool::CancelPolicy cancel_policy) override { - parent_.onPendingRequestCancel(*this, cancel_policy); +class ConnPoolImpl : public Envoy::ConnectionPool::ConnPoolImplBase, + public Tcp::ConnectionPool::Instance { +public: + ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, + Upstream::ResourcePriority priority, + const Network::ConnectionSocket::OptionsSharedPtr& options, + Network::TransportSocketOptionsSharedPtr transport_socket_options) + : Envoy::ConnectionPool::ConnPoolImplBase(host, priority, dispatcher, options, + transport_socket_options), + upstream_ready_cb_(dispatcher.createSchedulableCallback([this]() { + upstream_ready_enabled_ = false; + onUpstreamReady(); + })) {} + ~ConnPoolImpl() override { destructAllConnections(); } + + void addDrainedCallback(DrainedCb cb) override { addDrainedCallbackImpl(cb); } + void drainConnections() override { + drainConnectionsImpl(); + // Legacy behavior for the TCP connection pool marks all connecting clients + // as draining. + for (auto& connecting_client : connecting_clients_) { + if (connecting_client->remaining_streams_ > 1) { + uint64_t old_limit = connecting_client->effectiveConcurrentRequestLimit(); + connecting_client->remaining_streams_ = 1; + if (connecting_client->effectiveConcurrentRequestLimit() < old_limit) { + connecting_stream_capacity_ -= + (old_limit - connecting_client->effectiveConcurrentRequestLimit()); + } + } } + } - ConnPoolImpl& parent_; - ConnectionPool::Callbacks& callbacks_; - }; + void closeConnections() override { + for (auto* list : {&ready_clients_, &busy_clients_, &connecting_clients_}) { + while (!list->empty()) { + list->front()->close(); + } + } + } + ConnectionPool::Cancellable* newConnection(Tcp::ConnectionPool::Callbacks& callbacks) override { + TcpAttachContext context(&callbacks); + return Envoy::ConnectionPool::ConnPoolImplBase::newStream(context); + } + + ConnectionPool::Cancellable* + newPendingRequest(Envoy::ConnectionPool::AttachContext& context) override { + Envoy::ConnectionPool::PendingRequestPtr pending_stream = + std::make_unique(*this, typedContext(context)); + LinkedList::moveIntoList(std::move(pending_stream), pending_streams_); + return pending_streams_.front().get(); + } + + Upstream::HostDescriptionConstSharedPtr host() const override { + return Envoy::ConnectionPool::ConnPoolImplBase::host(); + } + + Envoy::ConnectionPool::ActiveClientPtr instantiateActiveClient() override { + return std::make_unique(*this, Envoy::ConnectionPool::ConnPoolImplBase::host(), + 1); + } + + void onPoolReady(Envoy::ConnectionPool::ActiveClient& client, + Envoy::ConnectionPool::AttachContext& context) override { + ActiveTcpClient* tcp_client = static_cast(&client); + auto* callbacks = typedContext(context).callbacks_; + std::unique_ptr connection_data = + std::make_unique(*tcp_client, *tcp_client->connection_); + callbacks->onPoolReady(std::move(connection_data), tcp_client->real_host_description_); + } + + void onPoolFailure(const Upstream::HostDescriptionConstSharedPtr& host_description, + absl::string_view, ConnectionPool::PoolFailureReason reason, + Envoy::ConnectionPool::AttachContext& context) override { + auto* callbacks = typedContext(context).callbacks_; + callbacks->onPoolFailure(reason, host_description); + } + + // These two functions exist for testing parity between old and new Tcp Connection Pools. + virtual void onConnReleased(Envoy::ConnectionPool::ActiveClient& client) { + if (client.state_ == Envoy::ConnectionPool::ActiveClient::State::BUSY) { + if (!pending_streams_.empty() && !upstream_ready_enabled_) { + upstream_ready_cb_->scheduleCallbackCurrentIteration(); + } + } + } + virtual void onConnDestroyed() {} - using PendingRequestPtr = std::unique_ptr; - - void assignConnection(ActiveConn& conn, ConnectionPool::Callbacks& callbacks); - void createNewConnection(); - void onConnectionEvent(ActiveConn& conn, Network::ConnectionEvent event); - void onPendingRequestCancel(PendingRequest& request, ConnectionPool::CancelPolicy cancel_policy); - virtual void onConnReleased(ActiveConn& conn); - virtual void onConnDestroyed(ActiveConn& conn); - void onUpstreamReady(); - void processIdleConnection(ActiveConn& conn, bool new_connection, bool delay); - void checkForDrained(); - - Event::Dispatcher& dispatcher_; - Upstream::HostConstSharedPtr host_; - Upstream::ResourcePriority priority_; - const Network::ConnectionSocket::OptionsSharedPtr socket_options_; - Network::TransportSocketOptionsSharedPtr transport_socket_options_; - - std::list pending_conns_; // conns awaiting connected event - std::list ready_conns_; // conns ready for assignment - std::list busy_conns_; // conns assigned - std::list pending_requests_; - std::list drained_callbacks_; - Stats::TimespanPtr conn_connect_ms_; - Event::TimerPtr upstream_ready_timer_; - bool upstream_ready_enabled_{false}; +protected: + Event::SchedulableCallbackPtr upstream_ready_cb_; + bool upstream_ready_enabled_{}; }; } // namespace Tcp diff --git a/source/common/tcp/original_conn_pool.cc b/source/common/tcp/original_conn_pool.cc new file mode 100644 index 0000000000000..b34c31280f89a --- /dev/null +++ b/source/common/tcp/original_conn_pool.cc @@ -0,0 +1,467 @@ +#include "common/tcp/original_conn_pool.h" + +#include + +#include "envoy/event/dispatcher.h" +#include "envoy/event/timer.h" +#include "envoy/upstream/upstream.h" + +#include "common/stats/timespan_impl.h" +#include "common/upstream/upstream_impl.h" + +namespace Envoy { +namespace Tcp { + +OriginalConnPoolImpl::OriginalConnPoolImpl( + Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, + Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, + Network::TransportSocketOptionsSharedPtr transport_socket_options) + : dispatcher_(dispatcher), host_(host), priority_(priority), socket_options_(options), + transport_socket_options_(transport_socket_options), + upstream_ready_cb_(dispatcher_.createSchedulableCallback([this]() { onUpstreamReady(); })) {} + +OriginalConnPoolImpl::~OriginalConnPoolImpl() { + while (!ready_conns_.empty()) { + ready_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush); + } + + while (!busy_conns_.empty()) { + busy_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush); + } + + while (!pending_conns_.empty()) { + pending_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush); + } + + // Make sure all connections are destroyed before we are destroyed. + dispatcher_.clearDeferredDeleteList(); +} + +void OriginalConnPoolImpl::drainConnections() { + while (!ready_conns_.empty()) { + ready_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush); + } + + // We drain busy and pending connections by manually setting remaining requests to 1. Thus, when + // the next response completes the connection will be destroyed. + for (const auto& conn : busy_conns_) { + conn->remaining_requests_ = 1; + } + + for (const auto& conn : pending_conns_) { + conn->remaining_requests_ = 1; + } +} + +void OriginalConnPoolImpl::closeConnections() { + while (!ready_conns_.empty()) { + ready_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush); + } + + while (!busy_conns_.empty()) { + busy_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush); + } + + while (!pending_conns_.empty()) { + pending_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush); + } +} + +void OriginalConnPoolImpl::addDrainedCallback(DrainedCb cb) { + drained_callbacks_.push_back(cb); + checkForDrained(); +} + +void OriginalConnPoolImpl::assignConnection(ActiveConn& conn, + ConnectionPool::Callbacks& callbacks) { + ASSERT(conn.wrapper_ == nullptr); + conn.wrapper_ = std::make_shared(conn); + + callbacks.onPoolReady(std::make_unique(conn.wrapper_), + conn.real_host_description_); +} + +void OriginalConnPoolImpl::checkForDrained() { + if (!drained_callbacks_.empty() && pending_requests_.empty() && busy_conns_.empty() && + pending_conns_.empty()) { + while (!ready_conns_.empty()) { + ready_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush); + } + + for (const DrainedCb& cb : drained_callbacks_) { + cb(); + } + } +} + +void OriginalConnPoolImpl::createNewConnection() { + ENVOY_LOG(debug, "creating a new connection"); + ActiveConnPtr conn(new ActiveConn(*this)); + LinkedList::moveIntoList(std::move(conn), pending_conns_); +} + +ConnectionPool::Cancellable* +OriginalConnPoolImpl::newConnection(ConnectionPool::Callbacks& callbacks) { + if (!ready_conns_.empty()) { + ready_conns_.front()->moveBetweenLists(ready_conns_, busy_conns_); + ENVOY_CONN_LOG(debug, "using existing connection", *busy_conns_.front()->conn_); + assignConnection(*busy_conns_.front(), callbacks); + return nullptr; + } + + if (host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) { + bool can_create_connection = + host_->cluster().resourceManager(priority_).connections().canCreate(); + if (!can_create_connection) { + host_->cluster().stats().upstream_cx_overflow_.inc(); + } + + // If we have no connections at all, make one no matter what so we don't starve. + if ((ready_conns_.empty() && busy_conns_.empty() && pending_conns_.empty()) || + can_create_connection) { + createNewConnection(); + } + + ENVOY_LOG(debug, "queueing request due to no available connections"); + PendingRequestPtr pending_request(new PendingRequest(*this, callbacks)); + LinkedList::moveIntoList(std::move(pending_request), pending_requests_); + return pending_requests_.front().get(); + } else { + ENVOY_LOG(debug, "max pending requests overflow"); + callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, nullptr); + host_->cluster().stats().upstream_rq_pending_overflow_.inc(); + return nullptr; + } +} + +void OriginalConnPoolImpl::onConnectionEvent(ActiveConn& conn, Network::ConnectionEvent event) { + if (event == Network::ConnectionEvent::RemoteClose || + event == Network::ConnectionEvent::LocalClose) { + ENVOY_CONN_LOG(debug, "client disconnected", *conn.conn_); + + Envoy::Upstream::reportUpstreamCxDestroy(host_, event); + + ActiveConnPtr removed; + bool check_for_drained = true; + if (conn.wrapper_ != nullptr) { + if (!conn.wrapper_->released_) { + Envoy::Upstream::reportUpstreamCxDestroyActiveRequest(host_, event); + + conn.wrapper_->release(true); + } + + removed = conn.removeFromList(busy_conns_); + } else if (!conn.connect_timer_) { + // The connect timer is destroyed on connect. The lack of a connect timer means that this + // connection is idle and in the ready pool. + removed = conn.removeFromList(ready_conns_); + check_for_drained = false; + } else { + // The only time this happens is if we actually saw a connect failure. + host_->cluster().stats().upstream_cx_connect_fail_.inc(); + host_->stats().cx_connect_fail_.inc(); + removed = conn.removeFromList(pending_conns_); + + // Raw connect failures should never happen under normal circumstances. If we have an upstream + // that is behaving badly, requests can get stuck here in the pending state. If we see a + // connect failure, we purge all pending requests so that calling code can determine what to + // do with the request. + // NOTE: We move the existing pending requests to a temporary list. This is done so that + // if retry logic submits a new request to the pool, we don't fail it inline. + // TODO(lizan): If pool failure due to transport socket, propagate the reason to access log. + ConnectionPool::PoolFailureReason reason; + if (conn.timed_out_) { + reason = ConnectionPool::PoolFailureReason::Timeout; + } else if (event == Network::ConnectionEvent::RemoteClose) { + reason = ConnectionPool::PoolFailureReason::RemoteConnectionFailure; + } else { + reason = ConnectionPool::PoolFailureReason::LocalConnectionFailure; + } + + std::list pending_requests_to_purge; + pending_requests_to_purge.swap(pending_requests_); + while (!pending_requests_to_purge.empty()) { + PendingRequestPtr request = + pending_requests_to_purge.front()->removeFromList(pending_requests_to_purge); + host_->cluster().stats().upstream_rq_pending_failure_eject_.inc(); + request->callbacks_.onPoolFailure(reason, conn.real_host_description_); + } + } + + dispatcher_.deferredDelete(std::move(removed)); + + // If we have pending requests and we just lost a connection we should make a new one. + if (pending_requests_.size() > + (ready_conns_.size() + busy_conns_.size() + pending_conns_.size())) { + createNewConnection(); + } + + if (check_for_drained) { + checkForDrained(); + } + } + + if (conn.connect_timer_) { + conn.connect_timer_->disableTimer(); + conn.connect_timer_.reset(); + } + + // Note that the order in this function is important. Concretely, we must destroy the connect + // timer before we process an idle connection, because if this results in an immediate + // drain/destruction event, we key off of the existence of the connect timer above to determine + // whether the connection is in the ready list (connected) or the pending list (failed to + // connect). + if (event == Network::ConnectionEvent::Connected) { + conn.conn_->streamInfo().setDownstreamSslConnection(conn.conn_->ssl()); + conn_connect_ms_->complete(); + processIdleConnection(conn, true, false); + } +} + +void OriginalConnPoolImpl::onPendingRequestCancel(PendingRequest& request, + ConnectionPool::CancelPolicy cancel_policy) { + ENVOY_LOG(debug, "canceling pending request"); + request.removeFromList(pending_requests_); + host_->cluster().stats().upstream_rq_cancelled_.inc(); + + // If the cancel requests closure of excess connections and there are more pending connections + // than requests, close the most recently created pending connection. + if (cancel_policy == ConnectionPool::CancelPolicy::CloseExcess && + pending_requests_.size() < pending_conns_.size()) { + ENVOY_LOG(debug, "canceling pending connection"); + pending_conns_.back()->conn_->close(Network::ConnectionCloseType::NoFlush); + } + + checkForDrained(); +} + +void OriginalConnPoolImpl::onConnReleased(ActiveConn& conn) { + ENVOY_CONN_LOG(debug, "connection released", *conn.conn_); + + if (conn.remaining_requests_ > 0 && --conn.remaining_requests_ == 0) { + ENVOY_CONN_LOG(debug, "maximum requests per connection", *conn.conn_); + host_->cluster().stats().upstream_cx_max_requests_.inc(); + + conn.conn_->close(Network::ConnectionCloseType::NoFlush); + } else { + // Upstream connection might be closed right after response is complete. Setting delay=true + // here to assign pending requests in next dispatcher loop to handle that case. + // https://github.com/envoyproxy/envoy/issues/2715 + processIdleConnection(conn, false, true); + } +} + +void OriginalConnPoolImpl::onConnDestroyed(ActiveConn& conn) { + ENVOY_CONN_LOG(debug, "connection destroyed", *conn.conn_); +} + +void OriginalConnPoolImpl::onUpstreamReady() { + upstream_ready_enabled_ = false; + while (!pending_requests_.empty() && !ready_conns_.empty()) { + ActiveConn& conn = *ready_conns_.front(); + ENVOY_CONN_LOG(debug, "assigning connection", *conn.conn_); + // There is work to do so bind a connection to the caller and move it to the busy list. Pending + // requests are pushed onto the front, so pull from the back. + conn.moveBetweenLists(ready_conns_, busy_conns_); + assignConnection(conn, pending_requests_.back()->callbacks_); + pending_requests_.pop_back(); + } +} + +void OriginalConnPoolImpl::processIdleConnection(ActiveConn& conn, bool new_connection, + bool delay) { + if (conn.wrapper_) { + conn.wrapper_->invalidate(); + conn.wrapper_.reset(); + } + + // TODO(zuercher): As a future improvement, we may wish to close extra connections when there are + // no pending requests rather than moving them to ready_conns_. For conn pool callers that re-use + // connections it is possible that a busy connection may be re-assigned to a pending request + // while a new connection is pending. The current behavior is to move the pending connection to + // the ready list to await a future request. For some protocols, e.g. mysql which has the server + // transmit handshake data on connect, it may be desirable to close the connection if no pending + // request is available. The CloseExcess flag for cancel is related: if we close pending + // connections without requests here it becomes superfluous (instead of closing connections at + // cancel time we'd wait until they completed and close them here). Finally, we want to avoid + // requiring operators to correct configure clusters to get the necessary pending connection + // behavior (e.g. we want to find a way to enable the new behavior without having to configure + // it on a cluster). + + if (pending_requests_.empty() || delay) { + // There is nothing to service or delayed processing is requested, so just move the connection + // into the ready list. + ENVOY_CONN_LOG(debug, "moving to ready", *conn.conn_); + if (new_connection) { + conn.moveBetweenLists(pending_conns_, ready_conns_); + } else { + conn.moveBetweenLists(busy_conns_, ready_conns_); + } + } else { + // There is work to do immediately so bind a request to the caller and move it to the busy list. + // Pending requests are pushed onto the front, so pull from the back. + ENVOY_CONN_LOG(debug, "assigning connection", *conn.conn_); + if (new_connection) { + conn.moveBetweenLists(pending_conns_, busy_conns_); + } + assignConnection(conn, pending_requests_.back()->callbacks_); + pending_requests_.pop_back(); + } + + if (delay && !pending_requests_.empty() && !upstream_ready_enabled_) { + upstream_ready_enabled_ = true; + upstream_ready_cb_->scheduleCallbackCurrentIteration(); + } + + checkForDrained(); +} + +OriginalConnPoolImpl::ConnectionWrapper::ConnectionWrapper(ActiveConn& parent) : parent_(parent) { + parent_.parent_.host_->cluster().stats().upstream_rq_total_.inc(); + parent_.parent_.host_->cluster().stats().upstream_rq_active_.inc(); + parent_.parent_.host_->stats().rq_total_.inc(); + parent_.parent_.host_->stats().rq_active_.inc(); +} + +Network::ClientConnection& OriginalConnPoolImpl::ConnectionWrapper::connection() { + ASSERT(conn_valid_); + return *parent_.conn_; +} + +void OriginalConnPoolImpl::ConnectionWrapper::addUpstreamCallbacks( + ConnectionPool::UpstreamCallbacks& cb) { + ASSERT(!released_); + callbacks_ = &cb; +} + +void OriginalConnPoolImpl::ConnectionWrapper::release(bool closed) { + // Allow multiple calls: connection close and destruction of ConnectionDataImplPtr will both + // result in this call. + if (!released_) { + released_ = true; + callbacks_ = nullptr; + if (!closed) { + parent_.parent_.onConnReleased(parent_); + } + + parent_.parent_.host_->cluster().stats().upstream_rq_active_.dec(); + parent_.parent_.host_->stats().rq_active_.dec(); + } +} + +OriginalConnPoolImpl::PendingRequest::PendingRequest(OriginalConnPoolImpl& parent, + ConnectionPool::Callbacks& callbacks) + : parent_(parent), callbacks_(callbacks) { + parent_.host_->cluster().stats().upstream_rq_pending_total_.inc(); + parent_.host_->cluster().stats().upstream_rq_pending_active_.inc(); + parent_.host_->cluster().resourceManager(parent_.priority_).pendingRequests().inc(); +} + +OriginalConnPoolImpl::PendingRequest::~PendingRequest() { + parent_.host_->cluster().stats().upstream_rq_pending_active_.dec(); + parent_.host_->cluster().resourceManager(parent_.priority_).pendingRequests().dec(); +} + +OriginalConnPoolImpl::ActiveConn::ActiveConn(OriginalConnPoolImpl& parent) + : parent_(parent), + connect_timer_(parent_.dispatcher_.createTimer([this]() -> void { onConnectTimeout(); })), + remaining_requests_(parent_.host_->cluster().maxRequestsPerConnection()), timed_out_(false) { + + parent_.conn_connect_ms_ = std::make_unique( + parent_.host_->cluster().stats().upstream_cx_connect_ms_, parent_.dispatcher_.timeSource()); + + Upstream::Host::CreateConnectionData data = parent_.host_->createConnection( + parent_.dispatcher_, parent_.socket_options_, parent_.transport_socket_options_); + real_host_description_ = data.host_description_; + + conn_ = std::move(data.connection_); + + conn_->detectEarlyCloseWhenReadDisabled(false); + conn_->addConnectionCallbacks(*this); + conn_->addReadFilter(Network::ReadFilterSharedPtr{new ConnReadFilter(*this)}); + + ENVOY_CONN_LOG(debug, "connecting", *conn_); + conn_->connect(); + + parent_.host_->cluster().stats().upstream_cx_total_.inc(); + parent_.host_->cluster().stats().upstream_cx_active_.inc(); + parent_.host_->stats().cx_total_.inc(); + parent_.host_->stats().cx_active_.inc(); + conn_length_ = std::make_unique( + parent_.host_->cluster().stats().upstream_cx_length_ms_, parent_.dispatcher_.timeSource()); + connect_timer_->enableTimer(parent_.host_->cluster().connectTimeout()); + parent_.host_->cluster().resourceManager(parent_.priority_).connections().inc(); + + conn_->setConnectionStats({parent_.host_->cluster().stats().upstream_cx_rx_bytes_total_, + parent_.host_->cluster().stats().upstream_cx_rx_bytes_buffered_, + parent_.host_->cluster().stats().upstream_cx_tx_bytes_total_, + parent_.host_->cluster().stats().upstream_cx_tx_bytes_buffered_, + &parent_.host_->cluster().stats().bind_errors_, nullptr}); + + // We just universally set no delay on connections. Theoretically we might at some point want + // to make this configurable. + conn_->noDelay(true); +} + +OriginalConnPoolImpl::ActiveConn::~ActiveConn() { + if (wrapper_) { + wrapper_->invalidate(); + } + + parent_.host_->cluster().stats().upstream_cx_active_.dec(); + parent_.host_->stats().cx_active_.dec(); + conn_length_->complete(); + parent_.host_->cluster().resourceManager(parent_.priority_).connections().dec(); + + parent_.onConnDestroyed(*this); +} + +void OriginalConnPoolImpl::ActiveConn::onConnectTimeout() { + // We just close the connection at this point. This will result in both a timeout and a connect + // failure and will fold into all the normal connect failure logic. + ENVOY_CONN_LOG(debug, "connect timeout", *conn_); + timed_out_ = true; + parent_.host_->cluster().stats().upstream_cx_connect_timeout_.inc(); + conn_->close(Network::ConnectionCloseType::NoFlush); +} + +void OriginalConnPoolImpl::ActiveConn::onUpstreamData(Buffer::Instance& data, bool end_stream) { + if (wrapper_ != nullptr && wrapper_->callbacks_ != nullptr) { + // Delegate to the connection owner. + wrapper_->callbacks_->onUpstreamData(data, end_stream); + } else { + // Unexpected data from upstream, close down the connection. + ENVOY_CONN_LOG(debug, "unexpected data from upstream, closing connection", *conn_); + conn_->close(Network::ConnectionCloseType::NoFlush); + } +} + +void OriginalConnPoolImpl::ActiveConn::onEvent(Network::ConnectionEvent event) { + ConnectionPool::UpstreamCallbacks* cb = nullptr; + if (wrapper_ != nullptr && wrapper_->callbacks_ != nullptr) { + cb = wrapper_->callbacks_; + } + + // In the event of a close event, we want to update the pool's state before triggering callbacks, + // preventing the case where we attempt to return a closed connection to the ready pool. + parent_.onConnectionEvent(*this, event); + + if (cb) { + cb->onEvent(event); + } +} + +void OriginalConnPoolImpl::ActiveConn::onAboveWriteBufferHighWatermark() { + if (wrapper_ != nullptr && wrapper_->callbacks_ != nullptr) { + wrapper_->callbacks_->onAboveWriteBufferHighWatermark(); + } +} + +void OriginalConnPoolImpl::ActiveConn::onBelowWriteBufferLowWatermark() { + if (wrapper_ != nullptr && wrapper_->callbacks_ != nullptr) { + wrapper_->callbacks_->onBelowWriteBufferLowWatermark(); + } +} + +} // namespace Tcp +} // namespace Envoy diff --git a/source/common/tcp/original_conn_pool.h b/source/common/tcp/original_conn_pool.h new file mode 100644 index 0000000000000..2c0af2d50680b --- /dev/null +++ b/source/common/tcp/original_conn_pool.h @@ -0,0 +1,168 @@ +#pragma once + +#include +#include + +#include "envoy/event/deferred_deletable.h" +#include "envoy/event/schedulable_cb.h" +#include "envoy/event/timer.h" +#include "envoy/network/connection.h" +#include "envoy/network/filter.h" +#include "envoy/stats/timespan.h" +#include "envoy/tcp/conn_pool.h" +#include "envoy/upstream/upstream.h" + +#include "common/common/linked_object.h" +#include "common/common/logger.h" +#include "common/network/filter_impl.h" + +namespace Envoy { +namespace Tcp { + +class OriginalConnPoolImpl : Logger::Loggable, public ConnectionPool::Instance { +public: + OriginalConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, + Upstream::ResourcePriority priority, + const Network::ConnectionSocket::OptionsSharedPtr& options, + Network::TransportSocketOptionsSharedPtr transport_socket_options); + + ~OriginalConnPoolImpl() override; + + // ConnectionPool::Instance + void addDrainedCallback(DrainedCb cb) override; + void drainConnections() override; + void closeConnections() override; + ConnectionPool::Cancellable* newConnection(ConnectionPool::Callbacks& callbacks) override; + Upstream::HostDescriptionConstSharedPtr host() const override { return host_; } + +protected: + struct ActiveConn; + + struct ConnectionWrapper { + ConnectionWrapper(ActiveConn& parent); + + Network::ClientConnection& connection(); + void addUpstreamCallbacks(ConnectionPool::UpstreamCallbacks& callbacks); + void setConnectionState(ConnectionPool::ConnectionStatePtr&& state) { + parent_.setConnectionState(std::move(state)); + }; + ConnectionPool::ConnectionState* connectionState() { return parent_.connectionState(); } + + void release(bool closed); + + void invalidate() { conn_valid_ = false; } + + ActiveConn& parent_; + ConnectionPool::UpstreamCallbacks* callbacks_{}; + bool released_{false}; + bool conn_valid_{true}; + }; + + using ConnectionWrapperSharedPtr = std::shared_ptr; + + struct ConnectionDataImpl : public ConnectionPool::ConnectionData { + ConnectionDataImpl(ConnectionWrapperSharedPtr wrapper) : wrapper_(std::move(wrapper)) {} + ~ConnectionDataImpl() override { wrapper_->release(false); } + + // ConnectionPool::ConnectionData + Network::ClientConnection& connection() override { return wrapper_->connection(); } + void addUpstreamCallbacks(ConnectionPool::UpstreamCallbacks& callbacks) override { + wrapper_->addUpstreamCallbacks(callbacks); + }; + void setConnectionState(ConnectionPool::ConnectionStatePtr&& state) override { + wrapper_->setConnectionState(std::move(state)); + } + ConnectionPool::ConnectionState* connectionState() override { + return wrapper_->connectionState(); + } + + ConnectionWrapperSharedPtr wrapper_; + }; + + struct ConnReadFilter : public Network::ReadFilterBaseImpl { + ConnReadFilter(ActiveConn& parent) : parent_(parent) {} + + // Network::ReadFilter + Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override { + parent_.onUpstreamData(data, end_stream); + return Network::FilterStatus::StopIteration; + } + + ActiveConn& parent_; + }; + + struct ActiveConn : LinkedObject, + public Network::ConnectionCallbacks, + public Event::DeferredDeletable { + ActiveConn(OriginalConnPoolImpl& parent); + ~ActiveConn() override; + + void onConnectTimeout(); + void onUpstreamData(Buffer::Instance& data, bool end_stream); + + // Network::ConnectionCallbacks + void onEvent(Network::ConnectionEvent event) override; + void onAboveWriteBufferHighWatermark() override; + void onBelowWriteBufferLowWatermark() override; + + void setConnectionState(ConnectionPool::ConnectionStatePtr&& state) { + conn_state_ = std::move(state); + } + ConnectionPool::ConnectionState* connectionState() { return conn_state_.get(); } + + OriginalConnPoolImpl& parent_; + Upstream::HostDescriptionConstSharedPtr real_host_description_; + ConnectionWrapperSharedPtr wrapper_; + Network::ClientConnectionPtr conn_; + ConnectionPool::ConnectionStatePtr conn_state_; + Event::TimerPtr connect_timer_; + Stats::TimespanPtr conn_length_; + uint64_t remaining_requests_; + bool timed_out_; + }; + + using ActiveConnPtr = std::unique_ptr; + + struct PendingRequest : LinkedObject, public ConnectionPool::Cancellable { + PendingRequest(OriginalConnPoolImpl& parent, ConnectionPool::Callbacks& callbacks); + ~PendingRequest() override; + + // ConnectionPool::Cancellable + void cancel(ConnectionPool::CancelPolicy cancel_policy) override { + parent_.onPendingRequestCancel(*this, cancel_policy); + } + + OriginalConnPoolImpl& parent_; + ConnectionPool::Callbacks& callbacks_; + }; + + using PendingRequestPtr = std::unique_ptr; + + void assignConnection(ActiveConn& conn, ConnectionPool::Callbacks& callbacks); + void createNewConnection(); + void onConnectionEvent(ActiveConn& conn, Network::ConnectionEvent event); + void onPendingRequestCancel(PendingRequest& request, ConnectionPool::CancelPolicy cancel_policy); + virtual void onConnReleased(ActiveConn& conn); + virtual void onConnDestroyed(ActiveConn& conn); + void onUpstreamReady(); + void processIdleConnection(ActiveConn& conn, bool new_connection, bool delay); + void checkForDrained(); + + Event::Dispatcher& dispatcher_; + Upstream::HostConstSharedPtr host_; + Upstream::ResourcePriority priority_; + const Network::ConnectionSocket::OptionsSharedPtr socket_options_; + Network::TransportSocketOptionsSharedPtr transport_socket_options_; + + std::list pending_conns_; // conns awaiting connected event + std::list ready_conns_; // conns ready for assignment + std::list busy_conns_; // conns assigned + std::list pending_requests_; + std::list drained_callbacks_; + Stats::TimespanPtr conn_connect_ms_; + Event::SchedulableCallbackPtr upstream_ready_cb_; + bool upstream_ready_enabled_{false}; +}; + +} // namespace Tcp +} // namespace Envoy diff --git a/source/common/tcp_proxy/BUILD b/source/common/tcp_proxy/BUILD index fc81b6fdb6250..328aca0a23e96 100644 --- a/source/common/tcp_proxy/BUILD +++ b/source/common/tcp_proxy/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/tcp_proxy/tcp_proxy.cc b/source/common/tcp_proxy/tcp_proxy.cc index 539ea9dde24e9..92dd68e4be4ab 100644 --- a/source/common/tcp_proxy/tcp_proxy.cc +++ b/source/common/tcp_proxy/tcp_proxy.cc @@ -410,6 +410,16 @@ Network::FilterStatus Filter::initializeUpstreamConnection() { downstreamConnection()->streamInfo().filterState()); } + if (!maybeTunnel(cluster_name)) { + // Either cluster is unknown or there are no healthy hosts. tcpConnPoolForCluster() increments + // cluster->stats().upstream_cx_none_healthy in the latter case. + getStreamInfo().setResponseFlag(StreamInfo::ResponseFlag::NoHealthyUpstream); + onInitFailure(UpstreamFailureReason::NoHealthyUpstream); + } + return Network::FilterStatus::StopIteration; +} + +bool Filter::maybeTunnel(const std::string& cluster_name) { if (!config_->tunnelingConfig()) { Tcp::ConnectionPool::Instance* conn_pool = cluster_manager_.tcpConnPoolForCluster( cluster_name, Upstream::ResourcePriority::Default, this); @@ -428,27 +438,39 @@ Network::FilterStatus Filter::initializeUpstreamConnection() { } // Because we never return open connections to the pool, this either has a handle waiting on // connection completion, or onPoolFailure has been invoked. Either way, stop iteration. - return Network::FilterStatus::StopIteration; + return true; } } else { + auto* cluster = cluster_manager_.get(cluster_name); + if (!cluster) { + return false; + } + // TODO(snowp): Ideally we should prevent this from being configured, but that's tricky to get + // right since whether a cluster is invalid depends on both the tcp_proxy config + cluster + // config. + if ((cluster->info()->features() & Upstream::ClusterInfo::Features::HTTP2) == 0) { + ENVOY_LOG(error, "Attempted to tunnel over HTTP/1.1, this is not supported. Set " + "http2_protocol_options on the cluster."); + return false; + } Http::ConnectionPool::Instance* conn_pool = cluster_manager_.httpConnPoolForCluster( - cluster_name, Upstream::ResourcePriority::Default, Http::Protocol::Http2, this); + cluster_name, Upstream::ResourcePriority::Default, absl::nullopt, this); if (conn_pool) { upstream_ = std::make_unique(*upstream_callbacks_, config_->tunnelingConfig()->hostname()); HttpUpstream* http_upstream = static_cast(upstream_.get()); - upstream_handle_ = std::make_shared( - conn_pool->newStream(http_upstream->responseDecoder(), *this)); - return Network::FilterStatus::StopIteration; + Http::ConnectionPool::Cancellable* cancellable = + conn_pool->newStream(http_upstream->responseDecoder(), *this); + if (cancellable) { + ASSERT(upstream_handle_.get() == nullptr); + upstream_handle_ = std::make_shared(cancellable); + } + return true; } } - // Either cluster is unknown or there are no healthy hosts. tcpConnPoolForCluster() increments - // cluster->stats().upstream_cx_none_healthy in the latter case. - getStreamInfo().setResponseFlag(StreamInfo::ResponseFlag::NoHealthyUpstream); - onInitFailure(UpstreamFailureReason::NoHealthyUpstream); - return Network::FilterStatus::StopIteration; -} + return false; +} void Filter::onPoolFailure(ConnectionPool::PoolFailureReason reason, Upstream::HostDescriptionConstSharedPtr host) { upstream_handle_.reset(); diff --git a/source/common/tcp_proxy/tcp_proxy.h b/source/common/tcp_proxy/tcp_proxy.h index 950f8f654dbe0..8a402e8a4cd20 100644 --- a/source/common/tcp_proxy/tcp_proxy.h +++ b/source/common/tcp_proxy/tcp_proxy.h @@ -3,10 +3,10 @@ #include #include #include -#include #include #include "envoy/access_log/access_log.h" +#include "envoy/common/random_generator.h" #include "envoy/event/timer.h" #include "envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.pb.h" #include "envoy/network/connection.h" @@ -29,6 +29,8 @@ #include "common/tcp_proxy/upstream.h" #include "common/upstream/load_balancer_impl.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace TcpProxy { @@ -206,7 +208,7 @@ class Config { ThreadLocal::SlotPtr upstream_drain_manager_slot_; SharedConfigSharedPtr shared_config_; std::unique_ptr cluster_metadata_match_criteria_; - Runtime::RandomGenerator& random_generator_; + Random::RandomGenerator& random_generator_; std::unique_ptr hash_policy_; }; @@ -320,7 +322,7 @@ class Filter : public Network::ReadFilter, bool on_high_watermark_called_{false}; }; - virtual StreamInfo::StreamInfo& getStreamInfo(); + StreamInfo::StreamInfo& getStreamInfo(); protected: struct DownstreamCallbacks : public Network::ConnectionCallbacks { @@ -353,6 +355,7 @@ class Filter : public Network::ReadFilter, void initialize(Network::ReadFilterCallbacks& callbacks, bool set_connection_stats); Network::FilterStatus initializeUpstreamConnection(); + bool maybeTunnel(const std::string& cluster_name); void onConnectTimeout(); void onDownstreamEvent(Network::ConnectionEvent event); void onUpstreamData(Buffer::Instance& data, bool end_stream); @@ -419,7 +422,7 @@ class UpstreamDrainManager : public ThreadLocal::ThreadLocalObject { // This must be a map instead of set because there is no way to move elements // out of a set, and these elements get passed to deferredDelete() instead of // being deleted in-place. The key and value will always be equal. - std::unordered_map drainers_; + absl::node_hash_map drainers_; }; } // namespace TcpProxy diff --git a/source/common/tcp_proxy/upstream.h b/source/common/tcp_proxy/upstream.h index db193b0f4bba0..8d2a301d71377 100644 --- a/source/common/tcp_proxy/upstream.h +++ b/source/common/tcp_proxy/upstream.h @@ -33,7 +33,9 @@ class TcpConnectionHandle : public ConnectionHandle { class HttpConnectionHandle : public ConnectionHandle { public: HttpConnectionHandle(Http::ConnectionPool::Cancellable* handle) : upstream_http_handle_(handle) {} - void cancel() override { upstream_http_handle_->cancel(); } + void cancel() override { + upstream_http_handle_->cancel(Tcp::ConnectionPool::CancelPolicy::Default); + } private: Http::ConnectionPool::Cancellable* upstream_http_handle_{}; diff --git a/source/common/thread_local/BUILD b/source/common/thread_local/BUILD index 82b0d913f0e44..3892298e00d64 100644 --- a/source/common/thread_local/BUILD +++ b/source/common/thread_local/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/thread_local/thread_local_impl.cc b/source/common/thread_local/thread_local_impl.cc index 8bfb093befae7..d4d02f8b2f5f0 100644 --- a/source/common/thread_local/thread_local_impl.cc +++ b/source/common/thread_local/thread_local_impl.cc @@ -26,7 +26,7 @@ SlotPtr InstanceImpl::allocateSlot() { ASSERT(!shutdown_); if (free_slot_indexes_.empty()) { - std::unique_ptr slot(new SlotImpl(*this, slots_.size())); + SlotImplPtr slot(new SlotImpl(*this, slots_.size())); auto wrapper = std::make_unique(*this, std::move(slot)); slots_.push_back(wrapper->slot_.get()); return wrapper; @@ -34,7 +34,7 @@ SlotPtr InstanceImpl::allocateSlot() { const uint32_t idx = free_slot_indexes_.front(); free_slot_indexes_.pop_front(); ASSERT(idx < slots_.size()); - std::unique_ptr slot(new SlotImpl(*this, idx)); + SlotImplPtr slot(new SlotImpl(*this, idx)); slots_[idx] = slot.get(); return std::make_unique(*this, std::move(slot)); } @@ -56,7 +56,7 @@ ThreadLocalObjectSharedPtr InstanceImpl::SlotImpl::get() { return thread_local_data_.data_[index_]; } -InstanceImpl::Bookkeeper::Bookkeeper(InstanceImpl& parent, std::unique_ptr&& slot) +InstanceImpl::Bookkeeper::Bookkeeper(InstanceImpl& parent, SlotImplPtr&& slot) : parent_(parent), slot_(std::move(slot)), ref_count_(/*not used.*/ nullptr, [slot = slot_.get(), &parent = this->parent_](uint32_t* /* not used */) { @@ -117,7 +117,7 @@ void InstanceImpl::registerThread(Event::Dispatcher& dispatcher, bool main_threa // Puts the slot into a deferred delete container, the slot will be destructed when its out-going // callback reference count goes to 0. -void InstanceImpl::recycle(std::unique_ptr&& slot) { +void InstanceImpl::recycle(SlotImplPtr&& slot) { ASSERT(std::this_thread::get_id() == main_thread_id_); ASSERT(slot != nullptr); auto* slot_addr = slot.get(); @@ -194,11 +194,11 @@ void InstanceImpl::runOnAllThreads(Event::PostCb cb, Event::PostCb all_threads_c // for programming simplicity here. cb(); - std::shared_ptr cb_guard(new Event::PostCb(cb), - [this, all_threads_complete_cb](Event::PostCb* cb) { - main_thread_dispatcher_->post(all_threads_complete_cb); - delete cb; - }); + Event::PostCbSharedPtr cb_guard(new Event::PostCb(cb), + [this, all_threads_complete_cb](Event::PostCb* cb) { + main_thread_dispatcher_->post(all_threads_complete_cb); + delete cb; + }); for (Event::Dispatcher& dispatcher : registered_threads_) { dispatcher.post([cb_guard]() -> void { (*cb_guard)(); }); diff --git a/source/common/thread_local/thread_local_impl.h b/source/common/thread_local/thread_local_impl.h index b451c4eb236a1..71153107fb3dd 100644 --- a/source/common/thread_local/thread_local_impl.h +++ b/source/common/thread_local/thread_local_impl.h @@ -3,6 +3,7 @@ #include #include #include +#include #include #include "envoy/thread_local/thread_local.h" @@ -50,10 +51,12 @@ class InstanceImpl : Logger::Loggable, public NonCopyable, pub const uint64_t index_; }; + using SlotImplPtr = std::unique_ptr; + // A Wrapper of SlotImpl which on destruction returns the SlotImpl to the deferred delete queue // (detaches it). struct Bookkeeper : public Slot { - Bookkeeper(InstanceImpl& parent, std::unique_ptr&& slot); + Bookkeeper(InstanceImpl& parent, SlotImplPtr&& slot); ~Bookkeeper() override { parent_.recycle(std::move(slot_)); } // ThreadLocal::Slot @@ -66,7 +69,7 @@ class InstanceImpl : Logger::Loggable, public NonCopyable, pub void set(InitializeCb cb) override; InstanceImpl& parent_; - std::unique_ptr slot_; + SlotImplPtr slot_; std::shared_ptr ref_count_; }; @@ -75,7 +78,7 @@ class InstanceImpl : Logger::Loggable, public NonCopyable, pub std::vector data_; }; - void recycle(std::unique_ptr&& slot); + void recycle(SlotImplPtr&& slot); // Cleanup the deferred deletes queue. void scheduleCleanup(SlotImpl* slot); @@ -89,7 +92,7 @@ class InstanceImpl : Logger::Loggable, public NonCopyable, pub // A indexed container for Slots that has to be deferred to delete due to out-going callbacks // pointing to the Slot. To let the ref_count_ deleter find the SlotImpl by address, the container // is defined as a map of SlotImpl address to the unique_ptr. - absl::flat_hash_map> deferred_deletes_; + absl::flat_hash_map deferred_deletes_; std::vector slots_; // A list of index of freed slots. @@ -104,5 +107,7 @@ class InstanceImpl : Logger::Loggable, public NonCopyable, pub friend class ThreadLocalInstanceImplTest; }; +using InstanceImplPtr = std::unique_ptr; + } // namespace ThreadLocal } // namespace Envoy diff --git a/source/common/tracing/BUILD b/source/common/tracing/BUILD index d516876313b50..ef703c964a904 100644 --- a/source/common/tracing/BUILD +++ b/source/common/tracing/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( @@ -23,12 +23,12 @@ envoy_cc_library( "//include/envoy/thread_local:thread_local_interface", "//include/envoy/tracing:http_tracer_interface", "//include/envoy/upstream:cluster_manager_interface", - "//source/common/access_log:access_log_formatter_lib", "//source/common/buffer:zero_copy_input_stream_lib", "//source/common/common:base64_lib", "//source/common/common:macros", "//source/common/common:utility_lib", "//source/common/config:metadata_lib", + "//source/common/formatter:substitution_formatter_lib", "//source/common/grpc:common_lib", "//source/common/http:codes_lib", "//source/common/http:header_map_lib", diff --git a/source/common/tracing/http_tracer_impl.cc b/source/common/tracing/http_tracer_impl.cc index f3f568666f609..d6010aa083be9 100644 --- a/source/common/tracing/http_tracer_impl.cc +++ b/source/common/tracing/http_tracer_impl.cc @@ -7,11 +7,11 @@ #include "envoy/type/metadata/v3/metadata.pb.h" #include "envoy/type/tracing/v3/custom_tag.pb.h" -#include "common/access_log/access_log_formatter.h" #include "common/common/assert.h" #include "common/common/fmt.h" #include "common/common/macros.h" #include "common/common/utility.h" +#include "common/formatter/substitution_formatter.h" #include "common/grpc/common.h" #include "common/http/codes.h" #include "common/http/header_map_impl.h" @@ -25,27 +25,31 @@ namespace Envoy { namespace Tracing { -// TODO(mattklein123) PERF: Avoid string creations/copies in this entire file. +// TODO(perf): Avoid string creations/copies in this entire file. static std::string buildResponseCode(const StreamInfo::StreamInfo& info) { return info.responseCode() ? std::to_string(info.responseCode().value()) : "0"; } -static std::string valueOrDefault(const Http::HeaderEntry* header, const char* default_value) { - return header ? std::string(header->value().getStringView()) : default_value; +static absl::string_view valueOrDefault(const Http::HeaderEntry* header, + const char* default_value) { + return header ? header->value().getStringView() : default_value; } static std::string buildUrl(const Http::RequestHeaderMap& request_headers, const uint32_t max_path_length) { - std::string path(request_headers.EnvoyOriginalPath() - ? request_headers.EnvoyOriginalPath()->value().getStringView() - : request_headers.Path()->value().getStringView()); + if (!request_headers.Path()) { + return ""; + } + absl::string_view path(request_headers.EnvoyOriginalPath() + ? request_headers.getEnvoyOriginalPathValue() + : request_headers.getPathValue()); if (path.length() > max_path_length) { path = path.substr(0, max_path_length); } - return absl::StrCat(valueOrDefault(request_headers.ForwardedProto(), ""), "://", - valueOrDefault(request_headers.Host(), ""), path); + return absl::StrCat(request_headers.getForwardedProtoValue(), "://", + request_headers.getHostValue(), path); } const std::string HttpTracerUtility::IngressOperation = "ingress"; @@ -157,18 +161,16 @@ void HttpTracerUtility::finalizeDownstreamSpan(Span& span, // Pre response data. if (request_headers) { if (request_headers->RequestId()) { - span.setTag(Tracing::Tags::get().GuidXRequestId, - std::string(request_headers->RequestId()->value().getStringView())); + span.setTag(Tracing::Tags::get().GuidXRequestId, request_headers->getRequestIdValue()); } span.setTag(Tracing::Tags::get().HttpUrl, buildUrl(*request_headers, tracing_config.maxPathTagLength())); - span.setTag(Tracing::Tags::get().HttpMethod, - std::string(request_headers->Method()->value().getStringView())); + span.setTag(Tracing::Tags::get().HttpMethod, request_headers->getMethodValue()); span.setTag(Tracing::Tags::get().DownstreamCluster, valueOrDefault(request_headers->EnvoyDownstreamServiceCluster(), "-")); span.setTag(Tracing::Tags::get().UserAgent, valueOrDefault(request_headers->UserAgent(), "-")); span.setTag(Tracing::Tags::get().HttpProtocol, - AccessLog::AccessLogFormatUtils::protocolToString(stream_info.protocol())); + Formatter::SubstitutionFormatUtils::protocolToString(stream_info.protocol())); const auto& remote_address = stream_info.downstreamDirectRemoteAddress(); @@ -181,10 +183,10 @@ void HttpTracerUtility::finalizeDownstreamSpan(Span& span, if (request_headers->ClientTraceId()) { span.setTag(Tracing::Tags::get().GuidXClientTraceId, - std::string(request_headers->ClientTraceId()->value().getStringView())); + request_headers->getClientTraceIdValue()); } - if (Grpc::Common::hasGrpcContentType(*request_headers)) { + if (Grpc::Common::isGrpcRequestHeaders(*request_headers)) { addGrpcRequestTags(span, *request_headers); } } @@ -210,7 +212,7 @@ void HttpTracerUtility::finalizeUpstreamSpan(Span& span, const StreamInfo::StreamInfo& stream_info, const Config& tracing_config) { span.setTag(Tracing::Tags::get().HttpProtocol, - AccessLog::AccessLogFormatUtils::protocolToString(stream_info.protocol())); + Formatter::SubstitutionFormatUtils::protocolToString(stream_info.protocol())); if (stream_info.upstreamHost()) { span.setTag(Tracing::Tags::get().UpstreamAddress, @@ -280,7 +282,7 @@ SpanPtr HttpTracerImpl::startSpan(const Config& config, Http::RequestHeaderMap& if (config.operationName() == OperationName::Egress) { span_name.append(" "); - span_name.append(std::string(request_headers.Host()->value().getStringView())); + span_name.append(std::string(request_headers.getHostValue())); } SpanPtr active_span = driver_->startSpan(config, request_headers, span_name, diff --git a/source/common/tracing/http_tracer_impl.h b/source/common/tracing/http_tracer_impl.h index 14cb47cbeb6a5..760b4ed2bf3e7 100644 --- a/source/common/tracing/http_tracer_impl.h +++ b/source/common/tracing/http_tracer_impl.h @@ -169,6 +169,8 @@ class NullSpan : public Span { void log(SystemTime, const std::string&) override {} void finishSpan() override {} void injectContext(Http::RequestHeaderMap&) override {} + void setBaggage(absl::string_view, absl::string_view) override {} + std::string getBaggage(absl::string_view) override { return std::string(); } SpanPtr spawnChild(const Config&, const std::string&, SystemTime) override { return SpanPtr{new NullSpan()}; } diff --git a/source/common/upstream/BUILD b/source/common/upstream/BUILD index 608c24bbfb3f0..2d0fb940cf003 100644 --- a/source/common/upstream/BUILD +++ b/source/common/upstream/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( @@ -175,12 +175,14 @@ envoy_cc_library( hdrs = ["load_balancer_impl.h"], deps = [ ":edf_scheduler_lib", + "//include/envoy/common:random_generator_interface", "//include/envoy/runtime:runtime_interface", "//include/envoy/stats:stats_interface", "//include/envoy/upstream:load_balancer_interface", "//include/envoy/upstream:upstream_interface", "//source/common/common:assert_lib", "//source/common/protobuf:utility_lib", + "//source/common/runtime:runtime_protos_lib", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", ], ) @@ -221,6 +223,8 @@ envoy_cc_library( "//source/common/config:version_converter_lib", "//source/common/grpc:async_client_lib", "//source/common/network:resolver_lib", + "//source/common/protobuf:message_validator_lib", + "//source/common/protobuf:utility_lib", "//source/extensions/transport_sockets:well_known_names", "//source/server:transport_socket_config_lib", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", @@ -310,6 +314,7 @@ envoy_cc_library( "//include/envoy/upstream:resource_manager_interface", "//include/envoy/upstream:upstream_interface", "//source/common/common:assert_lib", + "//source/common/common:basic_resource_lib", ], ) @@ -364,6 +369,7 @@ envoy_cc_library( "//include/envoy/upstream:cluster_factory_interface", "//include/envoy/upstream:locality_lib", "//source/common/config:api_version_lib", + "//source/common/config:decoded_resource_lib", "//source/common/config:metadata_lib", "//source/common/config:subscription_base_interface", "//source/common/config:subscription_factory_lib", @@ -426,11 +432,15 @@ envoy_cc_library( "//include/envoy/ssl:context_interface", "//include/envoy/upstream:health_checker_interface", "//source/common/common:enum_to_int", + "//source/common/common:thread_lib", "//source/common/common:utility_lib", + "//source/common/http/http1:codec_stats_lib", + "//source/common/http/http2:codec_stats_lib", "//source/common/http:utility_lib", "//source/common/network:address_lib", "//source/common/network:resolver_lib", "//source/common/network:socket_option_factory_lib", + "//source/common/network:socket_option_lib", "//source/common/network:utility_lib", "//source/common/protobuf", "//source/common/protobuf:utility_lib", @@ -509,6 +519,8 @@ envoy_cc_library( "//source/common/common:minimal_logger_lib", "//source/common/config:metadata_lib", "//source/common/config:well_known_names", + "//source/common/http/http1:codec_stats_lib", + "//source/common/http/http2:codec_stats_lib", "//source/common/init:manager_lib", "//source/common/shared_pool:shared_pool_lib", "//source/common/stats:isolated_store_lib", diff --git a/source/common/upstream/cds_api_impl.cc b/source/common/upstream/cds_api_impl.cc index 389f4f5f265d5..b05ae5802cd3c 100644 --- a/source/common/upstream/cds_api_impl.cc +++ b/source/common/upstream/cds_api_impl.cc @@ -3,8 +3,6 @@ #include #include "envoy/api/v2/cluster.pb.h" -#include "envoy/config/cluster/v3/cluster.pb.h" -#include "envoy/config/cluster/v3/cluster.pb.validate.h" #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/service/discovery/v3/discovery.pb.h" #include "envoy/stats/scope.h" @@ -16,6 +14,7 @@ #include "common/config/utility.h" #include "common/protobuf/utility.h" +#include "absl/container/node_hash_set.h" #include "absl/strings/str_join.h" namespace Envoy { @@ -30,66 +29,52 @@ CdsApiPtr CdsApiImpl::create(const envoy::config::core::v3::ConfigSource& cds_co CdsApiImpl::CdsApiImpl(const envoy::config::core::v3::ConfigSource& cds_config, ClusterManager& cm, Stats::Scope& scope, ProtobufMessage::ValidationVisitor& validation_visitor) : Envoy::Config::SubscriptionBase( - cds_config.resource_api_version()), - cm_(cm), scope_(scope.createScope("cluster_manager.cds.")), - validation_visitor_(validation_visitor) { + cds_config.resource_api_version(), validation_visitor, "name"), + cm_(cm), scope_(scope.createScope("cluster_manager.cds.")) { const auto resource_name = getResourceName(); subscription_ = cm_.subscriptionFactory().subscriptionFromConfigSource( - cds_config, Grpc::Common::typeUrl(resource_name), *scope_, *this); + cds_config, Grpc::Common::typeUrl(resource_name), *scope_, *this, resource_decoder_); } -void CdsApiImpl::onConfigUpdate(const Protobuf::RepeatedPtrField& resources, +void CdsApiImpl::onConfigUpdate(const std::vector& resources, const std::string& version_info) { ClusterManager::ClusterInfoMap clusters_to_remove = cm_.clusters(); std::vector clusters; - for (const auto& cluster_blob : resources) { - // No validation needed here the overloaded call to onConfigUpdate validates. - clusters.push_back(MessageUtil::anyConvert(cluster_blob)); - clusters_to_remove.erase(clusters.back().name()); + for (const auto& resource : resources) { + clusters_to_remove.erase(resource.get().name()); } Protobuf::RepeatedPtrField to_remove_repeated; for (const auto& cluster : clusters_to_remove) { *to_remove_repeated.Add() = cluster.first; } - Protobuf::RepeatedPtrField to_add_repeated; - for (const auto& cluster : clusters) { - envoy::service::discovery::v3::Resource* to_add = to_add_repeated.Add(); - to_add->set_name(cluster.name()); - to_add->set_version(version_info); - to_add->mutable_resource()->PackFrom(cluster); - } - onConfigUpdate(to_add_repeated, to_remove_repeated, version_info); + onConfigUpdate(resources, to_remove_repeated, version_info); } -void CdsApiImpl::onConfigUpdate( - const Protobuf::RepeatedPtrField& added_resources, - const Protobuf::RepeatedPtrField& removed_resources, - const std::string& system_version_info) { - std::unique_ptr maybe_eds_resume; +void CdsApiImpl::onConfigUpdate(const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& system_version_info) { + Config::ScopedResume maybe_resume_eds; if (cm_.adsMux()) { - const auto type_url = Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V2); - cm_.adsMux()->pause(type_url); - maybe_eds_resume = - std::make_unique([this, type_url] { cm_.adsMux()->resume(type_url); }); + const auto type_urls = + Config::getAllVersionTypeUrls(); + maybe_resume_eds = cm_.adsMux()->pause(type_urls); } ENVOY_LOG(info, "cds: add {} cluster(s), remove {} cluster(s)", added_resources.size(), removed_resources.size()); std::vector exception_msgs; - std::unordered_set cluster_names; + absl::node_hash_set cluster_names; bool any_applied = false; for (const auto& resource : added_resources) { envoy::config::cluster::v3::Cluster cluster; try { - cluster = MessageUtil::anyConvertAndValidate( - resource.resource(), validation_visitor_); + cluster = dynamic_cast(resource.get().resource()); if (!cluster_names.insert(cluster.name()).second) { // NOTE: at this point, the first of these duplicates has already been successfully applied. throw EnvoyException(fmt::format("duplicate cluster {} found", cluster.name())); } - if (cm_.addOrUpdateCluster(cluster, resource.version())) { + if (cm_.addOrUpdateCluster(cluster, resource.get().version())) { any_applied = true; ENVOY_LOG(info, "cds: add/update cluster '{}'", cluster.name()); } else { @@ -132,4 +117,4 @@ void CdsApiImpl::runInitializeCallbackIfAny() { } } // namespace Upstream -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/common/upstream/cds_api_impl.h b/source/common/upstream/cds_api_impl.h index f2f66340e9b05..970c12a4ba166 100644 --- a/source/common/upstream/cds_api_impl.h +++ b/source/common/upstream/cds_api_impl.h @@ -4,6 +4,7 @@ #include "envoy/api/api.h" #include "envoy/config/cluster/v3/cluster.pb.h" +#include "envoy/config/cluster/v3/cluster.pb.validate.h" #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/config/subscription.h" #include "envoy/event/dispatcher.h" @@ -38,27 +39,22 @@ class CdsApiImpl : public CdsApi, private: // Config::SubscriptionCallbacks - void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + void onConfigUpdate(const std::vector& resources, const std::string& version_info) override; - void onConfigUpdate( - const Protobuf::RepeatedPtrField& added_resources, - const Protobuf::RepeatedPtrField& removed_resources, - const std::string& system_version_info) override; + void onConfigUpdate(const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& system_version_info) override; void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException* e) override; - std::string resourceName(const ProtobufWkt::Any& resource) override { - return MessageUtil::anyConvert(resource).name(); - } CdsApiImpl(const envoy::config::core::v3::ConfigSource& cds_config, ClusterManager& cm, Stats::Scope& scope, ProtobufMessage::ValidationVisitor& validation_visitor); void runInitializeCallbackIfAny(); ClusterManager& cm_; - std::unique_ptr subscription_; + Config::SubscriptionPtr subscription_; std::string system_version_info_; std::function initialize_callback_; Stats::ScopePtr scope_; - ProtobufMessage::ValidationVisitor& validation_visitor_; }; } // namespace Upstream diff --git a/source/common/upstream/cluster_factory_impl.cc b/source/common/upstream/cluster_factory_impl.cc index 8233ae6ac6be0..2f85ef31188f9 100644 --- a/source/common/upstream/cluster_factory_impl.cc +++ b/source/common/upstream/cluster_factory_impl.cc @@ -27,7 +27,7 @@ std::pair ClusterFactoryImplBase:: const envoy::config::cluster::v3::Cluster& cluster, ClusterManager& cluster_manager, Stats::Store& stats, ThreadLocal::Instance& tls, Network::DnsResolverSharedPtr dns_resolver, Ssl::ContextManager& ssl_context_manager, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, + Random::RandomGenerator& random, Event::Dispatcher& dispatcher, AccessLog::AccessLogManager& log_manager, const LocalInfo::LocalInfo& local_info, Server::Admin& admin, Singleton::Manager& singleton_manager, Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api, diff --git a/source/common/upstream/cluster_factory_impl.h b/source/common/upstream/cluster_factory_impl.h index 63af89ead3fe5..4e8c6d1a811d2 100644 --- a/source/common/upstream/cluster_factory_impl.h +++ b/source/common/upstream/cluster_factory_impl.h @@ -11,6 +11,7 @@ #include #include +#include "envoy/common/random_generator.h" #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/typed_metadata.h" #include "envoy/event/timer.h" @@ -55,7 +56,7 @@ class ClusterFactoryContextImpl : public ClusterFactoryContext { ThreadLocal::SlotAllocator& tls, Network::DnsResolverSharedPtr dns_resolver, Ssl::ContextManager& ssl_context_manager, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, + Random::RandomGenerator& random, Event::Dispatcher& dispatcher, AccessLog::AccessLogManager& log_manager, const LocalInfo::LocalInfo& local_info, Server::Admin& admin, Singleton::Manager& singleton_manager, @@ -74,7 +75,7 @@ class ClusterFactoryContextImpl : public ClusterFactoryContext { Network::DnsResolverSharedPtr dnsResolver() override { return dns_resolver_; } Ssl::ContextManager& sslContextManager() override { return ssl_context_manager_; } Runtime::Loader& runtime() override { return runtime_; } - Runtime::RandomGenerator& random() override { return random_; } + Random::RandomGenerator& random() override { return random_; } Event::Dispatcher& dispatcher() override { return dispatcher_; } AccessLog::AccessLogManager& logManager() override { return log_manager_; } const LocalInfo::LocalInfo& localInfo() override { return local_info_; } @@ -94,7 +95,7 @@ class ClusterFactoryContextImpl : public ClusterFactoryContext { Network::DnsResolverSharedPtr dns_resolver_; Ssl::ContextManager& ssl_context_manager_; Runtime::Loader& runtime_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; Event::Dispatcher& dispatcher_; AccessLog::AccessLogManager& log_manager_; const LocalInfo::LocalInfo& local_info_; @@ -120,7 +121,7 @@ class ClusterFactoryImplBase : public ClusterFactory { create(const envoy::config::cluster::v3::Cluster& cluster, ClusterManager& cluster_manager, Stats::Store& stats, ThreadLocal::Instance& tls, Network::DnsResolverSharedPtr dns_resolver, Ssl::ContextManager& ssl_context_manager, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, + Runtime::Loader& runtime, Random::RandomGenerator& random, Event::Dispatcher& dispatcher, AccessLog::AccessLogManager& log_manager, const LocalInfo::LocalInfo& local_info, Server::Admin& admin, Singleton::Manager& singleton_manager, Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api, diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index 40f1cb07d33e9..f9c8c40977672 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -32,7 +32,9 @@ #include "common/network/utility.h" #include "common/protobuf/utility.h" #include "common/router/shadow_writer_impl.h" +#include "common/runtime/runtime_features.h" #include "common/tcp/conn_pool.h" +#include "common/tcp/original_conn_pool.h" #include "common/upstream/cds_api_impl.h" #include "common/upstream/load_balancer_impl.h" #include "common/upstream/maglev_lb.h" @@ -127,12 +129,20 @@ void ClusterManagerInitHelper::maybeFinishInitialize() { return; } - // If we are still waiting for primary clusters to initialize, do nothing. - ASSERT(state_ == State::WaitingToStartSecondaryInitialization || state_ == State::CdsInitialized); + ASSERT(state_ == State::WaitingToStartSecondaryInitialization || + state_ == State::CdsInitialized || + state_ == State::WaitingForPrimaryInitializationToComplete); ENVOY_LOG(debug, "maybe finish initialize primary init clusters empty: {}", primary_init_clusters_.empty()); + // If we are still waiting for primary clusters to initialize, do nothing. if (!primary_init_clusters_.empty()) { return; + } else if (state_ == State::WaitingForPrimaryInitializationToComplete) { + state_ = State::WaitingToStartSecondaryInitialization; + if (primary_clusters_initialized_callback_) { + primary_clusters_initialized_callback_(); + } + return; } // If we are still waiting for secondary clusters to initialize, see if we need to first call @@ -141,19 +151,17 @@ void ClusterManagerInitHelper::maybeFinishInitialize() { secondary_init_clusters_.empty()); if (!secondary_init_clusters_.empty()) { if (!started_secondary_initialize_) { - const auto type_url = Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V2); ENVOY_LOG(info, "cm init: initializing secondary clusters"); // If the first CDS response doesn't have any primary cluster, ClusterLoadAssignment // should be already paused by CdsApiImpl::onConfigUpdate(). Need to check that to // avoid double pause ClusterLoadAssignment. - if (cm_.adsMux() == nullptr || cm_.adsMux()->paused(type_url)) { - initializeSecondaryClusters(); - } else { - cm_.adsMux()->pause(type_url); - Cleanup eds_resume([this, type_url] { cm_.adsMux()->resume(type_url); }); - initializeSecondaryClusters(); + Config::ScopedResume maybe_resume_eds; + if (cm_.adsMux()) { + const auto type_urls = + Config::getAllVersionTypeUrls(); + maybe_resume_eds = cm_.adsMux()->pause(type_urls); } + initializeSecondaryClusters(); } return; } @@ -179,7 +187,8 @@ void ClusterManagerInitHelper::onStaticLoadComplete() { ASSERT(state_ == State::Loading); // After initialization of primary clusters has completed, transition to // waiting for signal to initialize secondary clusters and then CDS. - state_ = State::WaitingToStartSecondaryInitialization; + state_ = State::WaitingForPrimaryInitializationToComplete; + maybeFinishInitialize(); } void ClusterManagerInitHelper::startInitializingSecondaryClusters() { @@ -200,7 +209,8 @@ void ClusterManagerInitHelper::setCds(CdsApi* cds) { } } -void ClusterManagerInitHelper::setInitializedCb(std::function callback) { +void ClusterManagerInitHelper::setInitializedCb( + ClusterManager::InitializationCompleteCallback callback) { if (state_ == State::AllClustersInitialized) { callback(); } else { @@ -208,10 +218,23 @@ void ClusterManagerInitHelper::setInitializedCb(std::function callback) } } +void ClusterManagerInitHelper::setPrimaryClustersInitializedCb( + ClusterManager::PrimaryClustersReadyCallback callback) { + // The callback must be set before or at the `WaitingToStartSecondaryInitialization` state. + ASSERT(state_ == State::WaitingToStartSecondaryInitialization || + state_ == State::WaitingForPrimaryInitializationToComplete || state_ == State::Loading); + if (state_ == State::WaitingToStartSecondaryInitialization) { + // This is the case where all clusters are STATIC and without health checking. + callback(); + } else { + primary_clusters_initialized_callback_ = callback; + } +} + ClusterManagerImpl::ClusterManagerImpl( const envoy::config::bootstrap::v3::Bootstrap& bootstrap, ClusterManagerFactory& factory, Stats::Store& stats, ThreadLocal::Instance& tls, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, const LocalInfo::LocalInfo& local_info, + Random::RandomGenerator& random, const LocalInfo::LocalInfo& local_info, AccessLog::AccessLogManager& log_manager, Event::Dispatcher& main_thread_dispatcher, Server::Admin& admin, ProtobufMessage::ValidationContext& validation_context, Api::Api& api, Http::Context& http_context, Grpc::Context& grpc_context) @@ -224,7 +247,7 @@ ClusterManagerImpl::ClusterManagerImpl( time_source_(main_thread_dispatcher.timeSource()), dispatcher_(main_thread_dispatcher), http_context_(http_context), subscription_factory_(local_info, main_thread_dispatcher, *this, random, - validation_context.dynamicValidationVisitor(), api) { + validation_context.dynamicValidationVisitor(), api, runtime_) { async_client_manager_ = std::make_unique( *this, tls, time_source_, api, grpc_context.statNames()); const auto& cm_config = bootstrap.cluster_manager(); @@ -251,12 +274,22 @@ ClusterManagerImpl::ClusterManagerImpl( // loading is done because in v2 configuration each EDS cluster individually sets up a // subscription. When this subscription is an API source the cluster will depend on a non-EDS // cluster, so the non-EDS clusters must be loaded first. + auto is_primary_cluster = [](const envoy::config::cluster::v3::Cluster& cluster) -> bool { + return cluster.type() != envoy::config::cluster::v3::Cluster::EDS || + (cluster.type() == envoy::config::cluster::v3::Cluster::EDS && + cluster.eds_cluster_config().eds_config().config_source_specifier_case() == + envoy::config::core::v3::ConfigSource::ConfigSourceSpecifierCase::kPath); + }; + // Build book-keeping for which clusters are primary. This is useful when we + // invoke loadCluster() below and it needs the complete set of primaries. for (const auto& cluster : bootstrap.static_resources().clusters()) { - // First load all the primary clusters. - if (cluster.type() != envoy::config::cluster::v3::Cluster::EDS || - (cluster.type() == envoy::config::cluster::v3::Cluster::EDS && - cluster.eds_cluster_config().eds_config().config_source_specifier_case() == - envoy::config::core::v3::ConfigSource::ConfigSourceSpecifierCase::kPath)) { + if (is_primary_cluster(cluster)) { + primary_clusters_.insert(cluster.name()); + } + } + // Load all the primary clusters. + for (const auto& cluster : bootstrap.static_resources().clusters()) { + if (is_primary_cluster(cluster)) { loadCluster(cluster, "", false, active_clusters_); } } @@ -765,13 +798,13 @@ void ClusterManagerImpl::updateClusterCounts() { // signal to ADS to proceed with RDS updates. // If we're in the middle of shutting down (ads_mux_ already gone) then this is irrelevant. if (ads_mux_) { - const auto type_url = Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V2); + const auto type_urls = Config::getAllVersionTypeUrls(); const uint64_t previous_warming = cm_stats_.warming_clusters_.value(); if (previous_warming == 0 && !warming_clusters_.empty()) { - ads_mux_->pause(type_url); + resume_cds_ = ads_mux_->pause(type_urls); } else if (previous_warming > 0 && warming_clusters_.empty()) { - ads_mux_->resume(type_url); + ASSERT(resume_cds_ != nullptr); + resume_cds_.reset(); } } cm_stats_.active_clusters_.set(active_clusters_.size()); @@ -791,7 +824,8 @@ ThreadLocalCluster* ClusterManagerImpl::get(absl::string_view cluster) { Http::ConnectionPool::Instance* ClusterManagerImpl::httpConnPoolForCluster(const std::string& cluster, ResourcePriority priority, - Http::Protocol protocol, LoadBalancerContext* context) { + absl::optional protocol, + LoadBalancerContext* context) { ThreadLocalClusterManagerImpl& cluster_manager = tls_->getTyped(); auto entry = cluster_manager.thread_local_clusters_.find(cluster); @@ -1130,18 +1164,31 @@ void ClusterManagerImpl::ThreadLocalClusterManagerImpl::onHostHealthFailure( } } { + // Drain or close any TCP connection pool for the host. Draining a TCP pool doesn't lead to + // connections being closed, it only prevents new connections through the pool. The + // CLOSE_CONNECTIONS_ON_HOST_HEALTH_FAILURE can be used to make the pool close any + // active connections. const auto& container = config.host_tcp_conn_pool_map_.find(host); if (container != config.host_tcp_conn_pool_map_.end()) { for (const auto& pair : container->second.pools_) { const Tcp::ConnectionPool::InstancePtr& pool = pair.second; - pool->drainConnections(); + if (host->cluster().features() & + ClusterInfo::Features::CLOSE_CONNECTIONS_ON_HOST_HEALTH_FAILURE) { + pool->closeConnections(); + } else { + pool->drainConnections(); + } } } } if (host->cluster().features() & ClusterInfo::Features::CLOSE_CONNECTIONS_ON_HOST_HEALTH_FAILURE) { - + // Close non connection pool TCP connections obtained from tcpConnForCluster() + // + // TODO(jono): The only remaining user of the non-pooled connections seems to be the statsd + // TCP client. Perhaps it could be rewritten to use a connection pool, and this code deleted. + // // Each connection will remove itself from the TcpConnectionsMap when it closes, via its // Network::ConnectionCallbacks. The last removed tcp conn will remove the TcpConnectionsMap // from host_tcp_conn_map_, so do not cache it between iterations. @@ -1244,7 +1291,8 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::~ClusterEntry() Http::ConnectionPool::Instance* ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::connPool( - ResourcePriority priority, Http::Protocol protocol, LoadBalancerContext* context) { + ResourcePriority priority, absl::optional downstream_protocol, + LoadBalancerContext* context) { HostConstSharedPtr host = lb_->chooseHost(context); if (!host) { ENVOY_LOG(debug, "no healthy host for HTTP connection pool"); @@ -1252,7 +1300,8 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::connPool( return nullptr; } - std::vector hash_key = {uint8_t(protocol)}; + auto upstream_protocol = host->cluster().upstreamHttpProtocol(downstream_protocol); + std::vector hash_key = {uint8_t(upstream_protocol)}; Network::Socket::OptionsSharedPtr upstream_options(std::make_shared()); if (context) { @@ -1283,7 +1332,7 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::connPool( ConnPoolsContainer::ConnPools::PoolOptRef pool = container.pools_->getPool(priority, hash_key, [&]() { return parent_.parent_.factory_.allocateConnPool( - parent_.thread_local_dispatcher_, host, priority, protocol, + parent_.thread_local_dispatcher_, host, priority, upstream_protocol, !upstream_options->empty() ? upstream_options : nullptr, have_transport_socket_options ? context->upstreamTransportSocketOptions() : nullptr); }); @@ -1368,8 +1417,13 @@ Tcp::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateTcpConnPool( Event::Dispatcher& dispatcher, HostConstSharedPtr host, ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, Network::TransportSocketOptionsSharedPtr transport_socket_options) { - return Tcp::ConnectionPool::InstancePtr{ - new Tcp::ConnPoolImpl(dispatcher, host, priority, options, transport_socket_options)}; + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_tcp_connection_pool")) { + return std::make_unique(dispatcher, host, priority, options, + transport_socket_options); + } else { + return Tcp::ConnectionPool::InstancePtr{new Tcp::OriginalConnPoolImpl( + dispatcher, host, priority, options, transport_socket_options)}; + } } std::pair ProdClusterManagerFactory::clusterFromProto( diff --git a/source/common/upstream/cluster_manager_impl.h b/source/common/upstream/cluster_manager_impl.h index 4f89e443acee0..c229395c1353a 100644 --- a/source/common/upstream/cluster_manager_impl.h +++ b/source/common/upstream/cluster_manager_impl.h @@ -10,6 +10,7 @@ #include #include "envoy/api/api.h" +#include "envoy/common/random_generator.h" #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/core/v3/address.pb.h" @@ -41,7 +42,7 @@ class ProdClusterManagerFactory : public ClusterManagerFactory { public: ProdClusterManagerFactory( Server::Admin& admin, Runtime::Loader& runtime, Stats::Store& stats, - ThreadLocal::Instance& tls, Runtime::RandomGenerator& random, + ThreadLocal::Instance& tls, Random::RandomGenerator& random, Network::DnsResolverSharedPtr dns_resolver, Ssl::ContextManager& ssl_context_manager, Event::Dispatcher& main_thread_dispatcher, const LocalInfo::LocalInfo& local_info, Secret::SecretManager& secret_manager, ProtobufMessage::ValidationContext& validation_context, @@ -83,7 +84,7 @@ class ProdClusterManagerFactory : public ClusterManagerFactory { Runtime::Loader& runtime_; Stats::Store& stats_; ThreadLocal::Instance& tls_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; Network::DnsResolverSharedPtr dns_resolver_; Ssl::ContextManager& ssl_context_manager_; const LocalInfo::LocalInfo& local_info_; @@ -111,12 +112,16 @@ class ClusterManagerInitHelper : Logger::Loggable { enum class State { // Initial state. During this state all static clusters are loaded. Any primary clusters - // are immediately initialized. + // immediately begin initialization. Loading, + // In this state cluster manager waits for all primary clusters to finish initialization. + // This state may immediately transition to the next state iff all clusters are STATIC and + // without health checks enabled or health checks have failed immediately, since their + // initialization completes immediately. + WaitingForPrimaryInitializationToComplete, // During this state cluster manager waits to start initializing secondary clusters. In this - // state all - // primary clusters have completed initialization. Initialization of the secondary clusters - // is started by the `initializeSecondaryClusters` method. + // state all primary clusters have completed initialization. Initialization of the + // secondary clusters is started by the `initializeSecondaryClusters` method. WaitingToStartSecondaryInitialization, // In this state cluster manager waits for all secondary clusters (if configured) to finish // initialization. Then, if CDS is configured, this state tracks waiting for the first CDS @@ -133,7 +138,8 @@ class ClusterManagerInitHelper : Logger::Loggable { void onStaticLoadComplete(); void removeCluster(Cluster& cluster); void setCds(CdsApi* cds); - void setInitializedCb(std::function callback); + void setPrimaryClustersInitializedCb(ClusterManager::PrimaryClustersReadyCallback callback); + void setInitializedCb(ClusterManager::InitializationCompleteCallback callback); State state() const { return state_; } void startInitializingSecondaryClusters(); @@ -149,7 +155,8 @@ class ClusterManagerInitHelper : Logger::Loggable { ClusterManager& cm_; std::function per_cluster_init_callback_; CdsApi* cds_{}; - std::function initialized_callback_; + ClusterManager::PrimaryClustersReadyCallback primary_clusters_initialized_callback_; + ClusterManager::InitializationCompleteCallback initialized_callback_; std::list primary_init_clusters_; std::list secondary_init_clusters_; State state_{State::Loading}; @@ -186,7 +193,7 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable callback) override { + + void setPrimaryClustersInitializedCb(PrimaryClustersReadyCallback callback) override { + init_helper_.setPrimaryClustersInitializedCb(callback); + } + + void setInitializedCb(InitializationCompleteCallback callback) override { init_helper_.setInitializedCb(callback); } @@ -210,11 +222,15 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable downstream_protocol, + LoadBalancerContext* context) override; Tcp::ConnectionPool::Instance* tcpConnPoolForCluster(const std::string& cluster, ResourcePriority priority, LoadBalancerContext* context) override; @@ -223,6 +239,9 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggablecancel(); + } // Make sure we destroy all potential outgoing connections before this returns. cds_api_.reset(); ads_mux_.reset(); @@ -308,14 +327,15 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable>; + absl::node_hash_map>; struct ClusterEntry : public ThreadLocalCluster { ClusterEntry(ThreadLocalClusterManagerImpl& parent, ClusterInfoConstSharedPtr cluster, const LoadBalancerFactorySharedPtr& lb_factory); ~ClusterEntry() override; - Http::ConnectionPool::Instance* connPool(ResourcePriority priority, Http::Protocol protocol, + Http::ConnectionPool::Instance* connPool(ResourcePriority priority, + absl::optional downstream_protocol, LoadBalancerContext* context); Tcp::ConnectionPool::Instance* tcpConnPool(ResourcePriority priority, @@ -367,9 +387,9 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable host_http_conn_pool_map_; - std::unordered_map host_tcp_conn_pool_map_; - std::unordered_map host_tcp_conn_map_; + absl::node_hash_map host_http_conn_pool_map_; + absl::node_hash_map host_tcp_conn_pool_map_; + absl::node_hash_map host_tcp_conn_map_; std::list update_callbacks_; const PrioritySet* local_priority_set_{}; @@ -448,9 +468,9 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable; - using PendingUpdatesByPriorityMap = std::unordered_map; + using PendingUpdatesByPriorityMap = absl::node_hash_map; using PendingUpdatesByPriorityMapPtr = std::unique_ptr; - using ClusterUpdatesMap = std::unordered_map; + using ClusterUpdatesMap = absl::node_hash_map; void applyUpdates(const Cluster& cluster, uint32_t priority, PendingUpdates& updates); bool scheduleUpdate(const Cluster& cluster, uint32_t priority, bool mergeable, @@ -468,7 +488,7 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable local_cluster_name_; @@ -492,6 +514,7 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable::getPool(KEY_TYPE key, const PoolFactory& facto if (pool_iter != active_pools_.end()) { return std::ref(*(pool_iter->second)); } - Resource& connPoolResource = host_->cluster().resourceManager(priority_).connectionPools(); + ResourceLimit& connPoolResource = host_->cluster().resourceManager(priority_).connectionPools(); // We need a new pool. Check if we have room. if (!connPoolResource.canCreate()) { // We're full. Try to free up a pool. If we can't, bail out. diff --git a/source/common/upstream/eds.cc b/source/common/upstream/eds.cc index 03a12914cfa89..d8dc0d21c242f 100644 --- a/source/common/upstream/eds.cc +++ b/source/common/upstream/eds.cc @@ -4,13 +4,12 @@ #include "envoy/common/exception.h" #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/core/v3/config_source.pb.h" -#include "envoy/config/endpoint/v3/endpoint.pb.h" -#include "envoy/config/endpoint/v3/endpoint.pb.validate.h" #include "envoy/service/discovery/v3/discovery.pb.h" #include "common/common/assert.h" #include "common/common/utility.h" #include "common/config/api_version.h" +#include "common/config/decoded_resource_impl.h" #include "common/config/version_converter.h" namespace Envoy { @@ -23,12 +22,12 @@ EdsClusterImpl::EdsClusterImpl( : BaseDynamicClusterImpl(cluster, runtime, factory_context, std::move(stats_scope), added_via_api), Envoy::Config::SubscriptionBase( - cluster.eds_cluster_config().eds_config().resource_api_version()), + cluster.eds_cluster_config().eds_config().resource_api_version(), + factory_context.messageValidationVisitor(), "cluster_name"), local_info_(factory_context.localInfo()), cluster_name_(cluster.eds_cluster_config().service_name().empty() ? cluster.name() - : cluster.eds_cluster_config().service_name()), - validation_visitor_(factory_context.messageValidationVisitor()) { + : cluster.eds_cluster_config().service_name()) { Event::Dispatcher& dispatcher = factory_context.dispatcher(); assignment_timeout_ = dispatcher.createTimer([this]() -> void { onAssignmentTimeout(); }); const auto& eds_config = cluster.eds_cluster_config().eds_config(); @@ -41,13 +40,14 @@ EdsClusterImpl::EdsClusterImpl( const auto resource_name = getResourceName(); subscription_ = factory_context.clusterManager().subscriptionFactory().subscriptionFromConfigSource( - eds_config, Grpc::Common::typeUrl(resource_name), info_->statsScope(), *this); + eds_config, Grpc::Common::typeUrl(resource_name), info_->statsScope(), *this, + resource_decoder_); } void EdsClusterImpl::startPreInit() { subscription_->start({cluster_name_}); } void EdsClusterImpl::BatchUpdateHelper::batchUpdate(PrioritySet::HostUpdateCb& host_update_cb) { - std::unordered_map updated_hosts; + absl::node_hash_map updated_hosts; PriorityStateManager priority_state_manager(parent_, parent_.local_info_, &host_update_cb); for (const auto& locality_lb_endpoint : cluster_load_assignment_.endpoints()) { parent_.validateEndpointsForZoneAwareRouting(locality_lb_endpoint); @@ -112,14 +112,14 @@ void EdsClusterImpl::BatchUpdateHelper::batchUpdate(PrioritySet::HostUpdateCb& h parent_.onPreInitComplete(); } -void EdsClusterImpl::onConfigUpdate(const Protobuf::RepeatedPtrField& resources, +void EdsClusterImpl::onConfigUpdate(const std::vector& resources, const std::string&) { if (!validateUpdateSize(resources.size())) { return; } - auto cluster_load_assignment = - MessageUtil::anyConvertAndValidate( - resources[0], validation_visitor_); + envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment = + dynamic_cast( + resources[0].get().resource()); if (cluster_load_assignment.cluster_name() != cluster_name_) { throw EnvoyException(fmt::format("Unexpected EDS cluster (expecting {}): {}", cluster_name_, cluster_load_assignment.cluster_name())); @@ -145,15 +145,13 @@ void EdsClusterImpl::onConfigUpdate(const Protobuf::RepeatedPtrField& resources, - const Protobuf::RepeatedPtrField&, const std::string&) { - if (!validateUpdateSize(resources.size())) { +void EdsClusterImpl::onConfigUpdate(const std::vector& added_resources, + const Protobuf::RepeatedPtrField&, + const std::string&) { + if (!validateUpdateSize(added_resources.size())) { return; } - Protobuf::RepeatedPtrField unwrapped_resource; - *unwrapped_resource.Add() = resources[0].resource(); - onConfigUpdate(unwrapped_resource, resources[0].version()); + onConfigUpdate(added_resources, added_resources[0].get().version()); } bool EdsClusterImpl::validateUpdateSize(int num_resources) { @@ -175,11 +173,13 @@ void EdsClusterImpl::onAssignmentTimeout() { // TODO(vishalpowar) This is not going to work for incremental updates, and we // need to instead change the health status to indicate the assignments are // stale. - Protobuf::RepeatedPtrField resources; envoy::config::endpoint::v3::ClusterLoadAssignment resource; resource.set_cluster_name(cluster_name_); - resources.Add()->PackFrom(resource); - onConfigUpdate(resources, ""); + ProtobufWkt::Any any_resource; + any_resource.PackFrom(resource); + Config::DecodedResourceImpl decoded_resource(resource_decoder_, any_resource, ""); + std::vector resource_refs = {decoded_resource}; + onConfigUpdate(resource_refs, ""); // Stat to track how often we end up with stale assignments. info_->stats().assignment_stale_.inc(); } @@ -234,7 +234,7 @@ bool EdsClusterImpl::updateHostsPerLocality( const uint32_t priority, const uint32_t overprovisioning_factor, const HostVector& new_hosts, LocalityWeightsMap& locality_weights_map, LocalityWeightsMap& new_locality_weights_map, PriorityStateManager& priority_state_manager, - std::unordered_map& updated_hosts) { + absl::node_hash_map& updated_hosts) { const auto& host_set = priority_set_.getOrCreateHostSet(priority, overprovisioning_factor); HostVectorSharedPtr current_hosts_copy(new HostVector(host_set.hosts())); @@ -293,7 +293,7 @@ EdsClusterFactory::createClusterImpl( } /** - * Static registration for the strict dns cluster factory. @see RegisterFactory. + * Static registration for the Eds cluster factory. @see RegisterFactory. */ REGISTER_FACTORY(EdsClusterFactory, ClusterFactory); diff --git a/source/common/upstream/eds.h b/source/common/upstream/eds.h index fa3b09eb8cca3..4ab24c38788ab 100644 --- a/source/common/upstream/eds.h +++ b/source/common/upstream/eds.h @@ -1,9 +1,12 @@ #pragma once +#include + #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.validate.h" #include "envoy/config/subscription.h" #include "envoy/config/subscription_factory.h" #include "envoy/local_info/local_info.h" @@ -37,23 +40,20 @@ class EdsClusterImpl private: // Config::SubscriptionCallbacks - void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + void onConfigUpdate(const std::vector& resources, const std::string& version_info) override; - void onConfigUpdate(const Protobuf::RepeatedPtrField&, - const Protobuf::RepeatedPtrField&, const std::string&) override; + void onConfigUpdate(const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& system_version_info) override; void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException* e) override; - std::string resourceName(const ProtobufWkt::Any& resource) override { - return MessageUtil::anyConvert(resource) - .cluster_name(); - } - using LocalityWeightsMap = std::unordered_map; + using LocalityWeightsMap = absl::node_hash_map; bool updateHostsPerLocality(const uint32_t priority, const uint32_t overprovisioning_factor, const HostVector& new_hosts, LocalityWeightsMap& locality_weights_map, LocalityWeightsMap& new_locality_weights_map, PriorityStateManager& priority_state_manager, - std::unordered_map& updated_hosts); + absl::node_hash_map& updated_hosts); bool validateUpdateSize(int num_resources); // ClusterImplBase @@ -76,16 +76,17 @@ class EdsClusterImpl const envoy::config::endpoint::v3::ClusterLoadAssignment& cluster_load_assignment_; }; - std::unique_ptr subscription_; + Config::SubscriptionPtr subscription_; const LocalInfo::LocalInfo& local_info_; const std::string cluster_name_; std::vector locality_weights_map_; HostMap all_hosts_; Event::TimerPtr assignment_timeout_; - ProtobufMessage::ValidationVisitor& validation_visitor_; InitializePhase initialize_phase_; }; +using EdsClusterImplSharedPtr = std::shared_ptr; + class EdsClusterFactory : public ClusterFactoryImplBase { public: EdsClusterFactory() : ClusterFactoryImplBase(Extensions::Clusters::ClusterTypes::get().Eds) {} diff --git a/source/common/upstream/health_checker_base_impl.cc b/source/common/upstream/health_checker_base_impl.cc index 06e3efb3f3aa1..47a67c3f29a79 100644 --- a/source/common/upstream/health_checker_base_impl.cc +++ b/source/common/upstream/health_checker_base_impl.cc @@ -15,7 +15,7 @@ HealthCheckerImplBase::HealthCheckerImplBase(const Cluster& cluster, const envoy::config::core::v3::HealthCheck& config, Event::Dispatcher& dispatcher, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, + Random::RandomGenerator& random, HealthCheckEventLoggerPtr&& event_logger) : always_log_health_check_failures_(config.always_log_health_check_failures()), cluster_(cluster), dispatcher_(dispatcher), @@ -35,7 +35,8 @@ HealthCheckerImplBase::HealthCheckerImplBase(const Cluster& cluster, PROTOBUF_GET_MS_OR_DEFAULT(config, unhealthy_edge_interval, unhealthy_interval_.count())), healthy_edge_interval_( PROTOBUF_GET_MS_OR_DEFAULT(config, healthy_edge_interval, interval_.count())), - transport_socket_options_(initTransportSocketOptions(config)) { + transport_socket_options_(initTransportSocketOptions(config)), + transport_socket_match_metadata_(initTransportSocketMatchMetadata(config)) { cluster_.prioritySet().addMemberUpdateCb( [this](const HostVector& hosts_added, const HostVector& hosts_removed) -> void { onClusterMemberUpdate(hosts_added, hosts_removed); @@ -55,6 +56,20 @@ HealthCheckerImplBase::initTransportSocketOptions( return std::make_shared(); } +MetadataConstSharedPtr HealthCheckerImplBase::initTransportSocketMatchMetadata( + const envoy::config::core::v3::HealthCheck& config) { + if (config.has_transport_socket_match_criteria()) { + std::shared_ptr metadata = + std::make_shared(); + (*metadata->mutable_filter_metadata())[Envoy::Config::MetadataFilters::get() + .ENVOY_TRANSPORT_SOCKET_MATCH] = + config.transport_socket_match_criteria(); + return metadata; + } + + return nullptr; +} + HealthCheckerImplBase::~HealthCheckerImplBase() { // ASSERTs inside the session destructor check to make sure we have been previously deferred // deleted. Unify that logic here before actual destruction happens. @@ -63,17 +78,9 @@ HealthCheckerImplBase::~HealthCheckerImplBase() { } } -void HealthCheckerImplBase::decHealthy() { - ASSERT(local_process_healthy_ > 0); - local_process_healthy_--; - refreshHealthyStat(); -} +void HealthCheckerImplBase::decHealthy() { stats_.healthy_.sub(1); } -void HealthCheckerImplBase::decDegraded() { - ASSERT(local_process_degraded_ > 0); - local_process_degraded_--; - refreshHealthyStat(); -} +void HealthCheckerImplBase::decDegraded() { stats_.degraded_.sub(1); } HealthCheckerStats HealthCheckerImplBase::generateStats(Stats::Scope& scope) { std::string prefix("health_check."); @@ -81,15 +88,9 @@ HealthCheckerStats HealthCheckerImplBase::generateStats(Stats::Scope& scope) { POOL_GAUGE_PREFIX(scope, prefix))}; } -void HealthCheckerImplBase::incHealthy() { - local_process_healthy_++; - refreshHealthyStat(); -} +void HealthCheckerImplBase::incHealthy() { stats_.healthy_.add(1); } -void HealthCheckerImplBase::incDegraded() { - local_process_degraded_++; - refreshHealthyStat(); -} +void HealthCheckerImplBase::incDegraded() { stats_.degraded_.add(1); } std::chrono::milliseconds HealthCheckerImplBase::interval(HealthState state, HealthTransition changed_state) const { @@ -172,20 +173,7 @@ void HealthCheckerImplBase::onClusterMemberUpdate(const HostVector& hosts_added, } } -void HealthCheckerImplBase::refreshHealthyStat() { - // Each hot restarted process health checks independently. To make the stats easier to read, - // we assume that both processes will converge and the last one that writes wins for the host. - stats_.healthy_.set(local_process_healthy_); - stats_.degraded_.set(local_process_degraded_); -} - void HealthCheckerImplBase::runCallbacks(HostSharedPtr host, HealthTransition changed_state) { - // When a parent process shuts down, it will kill all of the active health checking sessions, - // which will decrement the healthy count and the healthy stat in the parent. If the child is - // stable and does not update, the healthy stat will be wrong. This routine is called any time - // any HC happens against a host so just refresh the healthy stat here so that it is correct. - refreshHealthyStat(); - for (const HostStatusCb& cb : callbacks_) { cb(host, changed_state); } diff --git a/source/common/upstream/health_checker_base_impl.h b/source/common/upstream/health_checker_base_impl.h index 2aed0b19c85de..ff2f62101f577 100644 --- a/source/common/upstream/health_checker_base_impl.h +++ b/source/common/upstream/health_checker_base_impl.h @@ -1,6 +1,7 @@ #pragma once #include "envoy/access_log/access_log.h" +#include "envoy/common/random_generator.h" #include "envoy/config/core/v3/health_check.pb.h" #include "envoy/data/core/v3/health_check_event.pb.h" #include "envoy/event/timer.h" @@ -49,6 +50,9 @@ class HealthCheckerImplBase : public HealthChecker, std::shared_ptr transportSocketOptions() const { return transport_socket_options_; } + MetadataConstSharedPtr transportSocketMatchMetadata() const { + return transport_socket_match_metadata_; + } protected: class ActiveHealthCheckSession : public Event::DeferredDeletable { @@ -91,7 +95,7 @@ class HealthCheckerImplBase : public HealthChecker, HealthCheckerImplBase(const Cluster& cluster, const envoy::config::core::v3::HealthCheck& config, Event::Dispatcher& dispatcher, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, HealthCheckEventLoggerPtr&& event_logger); + Random::RandomGenerator& random, HealthCheckEventLoggerPtr&& event_logger); ~HealthCheckerImplBase() override; virtual ActiveHealthCheckSessionPtr makeSession(HostSharedPtr host) PURE; @@ -105,7 +109,7 @@ class HealthCheckerImplBase : public HealthChecker, const uint32_t healthy_threshold_; HealthCheckerStats stats_; Runtime::Loader& runtime_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; const bool reuse_connection_; HealthCheckEventLoggerPtr event_logger_; @@ -132,11 +136,12 @@ class HealthCheckerImplBase : public HealthChecker, std::chrono::milliseconds intervalWithJitter(uint64_t base_time_ms, std::chrono::milliseconds interval_jitter) const; void onClusterMemberUpdate(const HostVector& hosts_added, const HostVector& hosts_removed); - void refreshHealthyStat(); void runCallbacks(HostSharedPtr host, HealthTransition changed_state); void setUnhealthyCrossThread(const HostSharedPtr& host); static std::shared_ptr initTransportSocketOptions(const envoy::config::core::v3::HealthCheck& config); + static MetadataConstSharedPtr + initTransportSocketMatchMetadata(const envoy::config::core::v3::HealthCheck& config); static const std::chrono::milliseconds NO_TRAFFIC_INTERVAL; @@ -149,10 +154,9 @@ class HealthCheckerImplBase : public HealthChecker, const std::chrono::milliseconds unhealthy_interval_; const std::chrono::milliseconds unhealthy_edge_interval_; const std::chrono::milliseconds healthy_edge_interval_; - std::unordered_map active_sessions_; - uint64_t local_process_healthy_{}; - uint64_t local_process_degraded_{}; + absl::node_hash_map active_sessions_; const std::shared_ptr transport_socket_options_; + const MetadataConstSharedPtr transport_socket_match_metadata_; }; class HealthCheckEventLoggerImpl : public HealthCheckEventLogger { diff --git a/source/common/upstream/health_checker_impl.cc b/source/common/upstream/health_checker_impl.cc index 5ca42738d464e..bce69e033bb86 100644 --- a/source/common/upstream/health_checker_impl.cc +++ b/source/common/upstream/health_checker_impl.cc @@ -16,8 +16,10 @@ #include "common/config/well_known_names.h" #include "common/grpc/common.h" #include "common/http/header_map_impl.h" +#include "common/http/header_utility.h" #include "common/network/address_impl.h" #include "common/router/router.h" +#include "common/runtime/runtime_features.h" #include "common/runtime/runtime_impl.h" #include "common/upstream/host_utility.h" @@ -57,7 +59,7 @@ const std::string& getHostname(const HostSharedPtr& host, class HealthCheckerFactoryContextImpl : public Server::Configuration::HealthCheckerFactoryContext { public: HealthCheckerFactoryContextImpl(Upstream::Cluster& cluster, Envoy::Runtime::Loader& runtime, - Envoy::Runtime::RandomGenerator& random, + Envoy::Random::RandomGenerator& random, Event::Dispatcher& dispatcher, HealthCheckEventLoggerPtr&& event_logger, ProtobufMessage::ValidationVisitor& validation_visitor, @@ -67,7 +69,7 @@ class HealthCheckerFactoryContextImpl : public Server::Configuration::HealthChec } Upstream::Cluster& cluster() override { return cluster_; } Envoy::Runtime::Loader& runtime() override { return runtime_; } - Envoy::Runtime::RandomGenerator& random() override { return random_; } + Envoy::Random::RandomGenerator& random() override { return random_; } Event::Dispatcher& dispatcher() override { return dispatcher_; } HealthCheckEventLoggerPtr eventLogger() override { return std::move(event_logger_); } ProtobufMessage::ValidationVisitor& messageValidationVisitor() override { @@ -78,7 +80,7 @@ class HealthCheckerFactoryContextImpl : public Server::Configuration::HealthChec private: Upstream::Cluster& cluster_; Envoy::Runtime::Loader& runtime_; - Envoy::Runtime::RandomGenerator& random_; + Envoy::Random::RandomGenerator& random_; Event::Dispatcher& dispatcher_; HealthCheckEventLoggerPtr event_logger_; ProtobufMessage::ValidationVisitor& validation_visitor_; @@ -87,7 +89,7 @@ class HealthCheckerFactoryContextImpl : public Server::Configuration::HealthChec HealthCheckerSharedPtr HealthCheckerFactory::create( const envoy::config::core::v3::HealthCheck& health_check_config, Upstream::Cluster& cluster, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, + Runtime::Loader& runtime, Random::RandomGenerator& random, Event::Dispatcher& dispatcher, AccessLog::AccessLogManager& log_manager, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) { HealthCheckEventLoggerPtr event_logger; @@ -128,7 +130,7 @@ HttpHealthCheckerImpl::HttpHealthCheckerImpl(const Cluster& cluster, const envoy::config::core::v3::HealthCheck& config, Event::Dispatcher& dispatcher, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, + Random::RandomGenerator& random, HealthCheckEventLoggerPtr&& event_logger) : HealthCheckerImplBase(cluster, config, dispatcher, runtime, random, std::move(event_logger)), path_(config.http_health_check().path()), host_value_(config.http_health_check().host()), @@ -250,7 +252,8 @@ void HttpHealthCheckerImpl::HttpActiveHealthCheckSession::onEvent(Network::Conne void HttpHealthCheckerImpl::HttpActiveHealthCheckSession::onInterval() { if (!client_) { Upstream::Host::CreateConnectionData conn = - host_->createHealthCheckConnection(parent_.dispatcher_, parent_.transportSocketOptions()); + host_->createHealthCheckConnection(parent_.dispatcher_, parent_.transportSocketOptions(), + parent_.transportSocketMatchMetadata().get()); client_.reset(parent_.createCodecClient(conn)); client_->addConnectionCallbacks(connection_callback_impl_); expect_reset_ = false; @@ -302,8 +305,7 @@ HttpHealthCheckerImpl::HttpActiveHealthCheckSession::healthCheckResult() { parent_.stats_.verify_cluster_.inc(); std::string service_cluster_healthchecked = response_headers_->EnvoyUpstreamHealthCheckedCluster() - ? std::string( - response_headers_->EnvoyUpstreamHealthCheckedCluster()->value().getStringView()) + ? std::string(response_headers_->getEnvoyUpstreamHealthCheckedClusterValue()) : EMPTY_STRING; if (parent_.service_name_matcher_->match(service_cluster_healthchecked)) { return degraded ? HealthCheckResult::Degraded : HealthCheckResult::Succeeded; @@ -343,6 +345,14 @@ bool HttpHealthCheckerImpl::HttpActiveHealthCheckSession::shouldClose() const { return false; } + if (!parent_.reuse_connection_) { + return true; + } + + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.fixed_connection_close")) { + return Http::HeaderUtility::shouldCloseConnection(client_->protocol(), *response_headers_); + } + if (response_headers_->Connection()) { const bool close = absl::EqualsIgnoreCase(response_headers_->Connection()->value().getStringView(), @@ -361,10 +371,6 @@ bool HttpHealthCheckerImpl::HttpActiveHealthCheckSession::shouldClose() const { } } - if (!parent_.reuse_connection_) { - return true; - } - return false; } @@ -419,7 +425,7 @@ TcpHealthCheckMatcher::MatchSegments TcpHealthCheckMatcher::loadProtoBytes( bool TcpHealthCheckMatcher::match(const MatchSegments& expected, const Buffer::Instance& buffer) { uint64_t start_index = 0; for (const std::vector& segment : expected) { - ssize_t search_result = buffer.search(&segment[0], segment.size(), start_index); + ssize_t search_result = buffer.search(segment.data(), segment.size(), start_index); if (search_result == -1) { return false; } @@ -433,7 +439,7 @@ bool TcpHealthCheckMatcher::match(const MatchSegments& expected, const Buffer::I TcpHealthCheckerImpl::TcpHealthCheckerImpl(const Cluster& cluster, const envoy::config::core::v3::HealthCheck& config, Event::Dispatcher& dispatcher, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, + Random::RandomGenerator& random, HealthCheckEventLoggerPtr&& event_logger) : HealthCheckerImplBase(cluster, config, dispatcher, runtime, random, std::move(event_logger)), send_bytes_([&config] { @@ -505,7 +511,9 @@ void TcpHealthCheckerImpl::TcpActiveHealthCheckSession::onEvent(Network::Connect void TcpHealthCheckerImpl::TcpActiveHealthCheckSession::onInterval() { if (!client_) { client_ = - host_->createHealthCheckConnection(parent_.dispatcher_, parent_.transportSocketOptions()) + host_ + ->createHealthCheckConnection(parent_.dispatcher_, parent_.transportSocketOptions(), + parent_.transportSocketMatchMetadata().get()) .connection_; session_callbacks_ = std::make_shared(*this); client_->addConnectionCallbacks(*session_callbacks_); @@ -519,7 +527,7 @@ void TcpHealthCheckerImpl::TcpActiveHealthCheckSession::onInterval() { if (!parent_.send_bytes_.empty()) { Buffer::OwnedImpl data; for (const std::vector& segment : parent_.send_bytes_) { - data.add(&segment[0], segment.size()); + data.add(segment.data(), segment.size()); } client_->write(data, false); @@ -536,7 +544,7 @@ GrpcHealthCheckerImpl::GrpcHealthCheckerImpl(const Cluster& cluster, const envoy::config::core::v3::HealthCheck& config, Event::Dispatcher& dispatcher, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, + Random::RandomGenerator& random, HealthCheckEventLoggerPtr&& event_logger) : HealthCheckerImplBase(cluster, config, dispatcher, runtime, random, std::move(event_logger)), service_method_(*Protobuf::DescriptorPool::generated_pool()->FindMethodByName( @@ -583,8 +591,8 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::decodeHeaders( end_stream); return; } - if (!Grpc::Common::hasGrpcContentType(*headers)) { - onRpcComplete(Grpc::Status::WellKnownGrpcStatus::Internal, "invalid gRPC content-type", false); + if (!Grpc::Common::isGrpcResponseHeaders(*headers, end_stream)) { + onRpcComplete(Grpc::Status::WellKnownGrpcStatus::Internal, "not a gRPC request", false); return; } if (end_stream) { @@ -659,7 +667,8 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onEvent(Network::Conne void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onInterval() { if (!client_) { Upstream::Host::CreateConnectionData conn = - host_->createHealthCheckConnection(parent_.dispatcher_, parent_.transportSocketOptions()); + host_->createHealthCheckConnection(parent_.dispatcher_, parent_.transportSocketOptions(), + parent_.transportSocketMatchMetadata().get()); client_ = parent_.createCodecClient(conn); client_->addConnectionCallbacks(connection_callback_impl_); client_->setCodecConnectionCallbacks(http_connection_callback_impl_); @@ -694,17 +703,25 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onInterval() { void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onResetStream(Http::StreamResetReason, absl::string_view) { const bool expected_reset = expect_reset_; + const bool goaway = received_no_error_goaway_; resetState(); if (expected_reset) { // Stream reset was initiated by us (bogus gRPC response, timeout or cluster host is going - // away). In these cases health check failure has already been reported, so just return. + // away). In these cases health check failure has already been reported and a GOAWAY (if any) + // has already been handled, so just return. return; } ENVOY_CONN_LOG(debug, "connection/stream error health_flags={}", *client_, HostUtility::healthFlagsToString(*host_)); + if (goaway || !parent_.reuse_connection_) { + // Stream reset was unexpected, so we haven't closed the connection + // yet in response to a GOAWAY or due to disabled connection reuse. + client_->close(); + } + // TODO(baranov1ch): according to all HTTP standards, we should check if reason is one of // Http::StreamResetReason::RemoteRefusedStreamReset (which may mean GOAWAY), // Http::StreamResetReason::RemoteReset or Http::StreamResetReason::ConnectionTermination (both @@ -713,9 +730,19 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onResetStream(Http::St handleFailure(envoy::data::core::v3::NETWORK); } -void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onGoAway() { +void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onGoAway( + Http::GoAwayErrorCode error_code) { ENVOY_CONN_LOG(debug, "connection going away health_flags={}", *client_, HostUtility::healthFlagsToString(*host_)); + // If we have an active health check probe and receive a GOAWAY indicating + // graceful shutdown, allow the probe to complete before closing the connection. + // The connection will be closed when the active check completes or another + // terminal condition occurs, such as a timeout or stream reset. + if (request_encoder_ && error_code == Http::GoAwayErrorCode::NoError) { + received_no_error_goaway_ = true; + return; + } + // Even if we have active health check probe, fail it on GOAWAY and schedule new one. if (request_encoder_) { handleFailure(envoy::data::core::v3::NETWORK); @@ -748,6 +775,9 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onRpcComplete( handleFailure(envoy::data::core::v3::ACTIVE); } + // Read the value as we may call resetState() and clear it. + const bool goaway = received_no_error_goaway_; + // |end_stream| will be false if we decided to stop healthcheck before HTTP stream has ended - // invalid gRPC payload, unexpected message stream or wrong content-type. if (end_stream) { @@ -758,7 +788,7 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onRpcComplete( request_encoder_->getStream().resetStream(Http::StreamResetReason::LocalReset); } - if (!parent_.reuse_connection_) { + if (!parent_.reuse_connection_ || goaway) { client_->close(); } } @@ -768,13 +798,18 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::resetState() { request_encoder_ = nullptr; decoder_ = Grpc::Decoder(); health_check_response_.reset(); + received_no_error_goaway_ = false; } void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onTimeout() { ENVOY_CONN_LOG(debug, "connection/stream timeout health_flags={}", *client_, HostUtility::healthFlagsToString(*host_)); expect_reset_ = true; - request_encoder_->getStream().resetStream(Http::StreamResetReason::LocalReset); + if (received_no_error_goaway_ || !parent_.reuse_connection_) { + client_->close(); + } else { + request_encoder_->getStream().resetStream(Http::StreamResetReason::LocalReset); + } } void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::logHealthCheckStatus( diff --git a/source/common/upstream/health_checker_impl.h b/source/common/upstream/health_checker_impl.h index 07b2f8d93dc58..dc2fa6c1bbbf0 100644 --- a/source/common/upstream/health_checker_impl.h +++ b/source/common/upstream/health_checker_impl.h @@ -2,6 +2,7 @@ #include "envoy/access_log/access_log.h" #include "envoy/api/api.h" +#include "envoy/common/random_generator.h" #include "envoy/config/core/v3/health_check.pb.h" #include "envoy/data/core/v3/health_check_event.pb.h" #include "envoy/grpc/status.h" @@ -32,13 +33,14 @@ class HealthCheckerFactory : public Logger::Loggable * @param runtime supplies the runtime loader. * @param random supplies the random generator. * @param dispatcher supplies the dispatcher. - * @param event_logger supplies the event_logger. + * @param log_manager supplies the log_manager. * @param validation_visitor message validation visitor instance. + * @param api reference to the Api object * @return a health checker. */ static HealthCheckerSharedPtr create(const envoy::config::core::v3::HealthCheck& health_check_config, - Upstream::Cluster& cluster, Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Upstream::Cluster& cluster, Runtime::Loader& runtime, Random::RandomGenerator& random, Event::Dispatcher& dispatcher, AccessLog::AccessLogManager& log_manager, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api); }; @@ -50,7 +52,7 @@ class HttpHealthCheckerImpl : public HealthCheckerImplBase { public: HttpHealthCheckerImpl(const Cluster& cluster, const envoy::config::core::v3::HealthCheck& config, Event::Dispatcher& dispatcher, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, HealthCheckEventLoggerPtr&& event_logger); + Random::RandomGenerator& random, HealthCheckEventLoggerPtr&& event_logger); /** * Utility class checking if given http status matches configured expectations. @@ -221,7 +223,7 @@ class TcpHealthCheckerImpl : public HealthCheckerImplBase { public: TcpHealthCheckerImpl(const Cluster& cluster, const envoy::config::core::v3::HealthCheck& config, Event::Dispatcher& dispatcher, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, HealthCheckEventLoggerPtr&& event_logger); + Random::RandomGenerator& random, HealthCheckEventLoggerPtr&& event_logger); private: struct TcpActiveHealthCheckSession; @@ -286,7 +288,7 @@ class GrpcHealthCheckerImpl : public HealthCheckerImplBase { public: GrpcHealthCheckerImpl(const Cluster& cluster, const envoy::config::core::v3::HealthCheck& config, Event::Dispatcher& dispatcher, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, HealthCheckEventLoggerPtr&& event_logger); + Random::RandomGenerator& random, HealthCheckEventLoggerPtr&& event_logger); private: struct GrpcActiveHealthCheckSession : public ActiveHealthCheckSession, @@ -323,7 +325,7 @@ class GrpcHealthCheckerImpl : public HealthCheckerImplBase { void onBelowWriteBufferLowWatermark() override {} void onEvent(Network::ConnectionEvent event); - void onGoAway(); + void onGoAway(Http::GoAwayErrorCode error_code); class ConnectionCallbackImpl : public Network::ConnectionCallbacks { public: @@ -341,7 +343,7 @@ class GrpcHealthCheckerImpl : public HealthCheckerImplBase { public: HttpConnectionCallbackImpl(GrpcActiveHealthCheckSession& parent) : parent_(parent) {} // Http::ConnectionCallbacks - void onGoAway() override { parent_.onGoAway(); } + void onGoAway(Http::GoAwayErrorCode error_code) override { parent_.onGoAway(error_code); } private: GrpcActiveHealthCheckSession& parent_; @@ -358,6 +360,9 @@ class GrpcHealthCheckerImpl : public HealthCheckerImplBase { // e.g. remote reset. In this case healthcheck status has already been reported, only state // cleanup is required. bool expect_reset_ = false; + // If true, we received a GOAWAY (NO_ERROR code) and are deferring closing the connection + // until the active probe completes. + bool received_no_error_goaway_ = false; }; virtual Http::CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) PURE; diff --git a/source/common/upstream/health_discovery_service.cc b/source/common/upstream/health_discovery_service.cc index 1ca0375b974bd..21fa74e345889 100644 --- a/source/common/upstream/health_discovery_service.cc +++ b/source/common/upstream/health_discovery_service.cc @@ -6,10 +6,14 @@ #include "envoy/config/core/v3/health_check.pb.h" #include "envoy/config/endpoint/v3/endpoint_components.pb.h" #include "envoy/service/health/v3/hds.pb.h" +#include "envoy/service/health/v3/hds.pb.validate.h" #include "envoy/stats/scope.h" #include "common/config/version_converter.h" +#include "common/protobuf/message_validator_impl.h" #include "common/protobuf/protobuf.h" +#include "common/protobuf/utility.h" +#include "common/upstream/upstream_impl.h" namespace Envoy { namespace Upstream { @@ -28,14 +32,16 @@ HdsDelegate::HdsDelegate(Stats::Scope& scope, Grpc::RawAsyncClientPtr async_clie envoy::config::core::v3::ApiVersion transport_api_version, Event::Dispatcher& dispatcher, Runtime::Loader& runtime, Envoy::Stats::Store& stats, Ssl::ContextManager& ssl_context_manager, - Runtime::RandomGenerator& random, ClusterInfoFactory& info_factory, + Random::RandomGenerator& random, ClusterInfoFactory& info_factory, AccessLog::AccessLogManager& access_log_manager, ClusterManager& cm, const LocalInfo::LocalInfo& local_info, Server::Admin& admin, Singleton::Manager& singleton_manager, ThreadLocal::SlotAllocator& tls, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) : stats_{ALL_HDS_STATS(POOL_COUNTER_PREFIX(scope, "hds_delegate."))}, - service_method_(*Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - "envoy.service.discovery.v2.HealthDiscoveryService.StreamHealthCheck")), + service_method_(Grpc::VersionedMethods( + "envoy.service.health.v3.HealthDiscoveryService.StreamHealthCheck", + "envoy.service.discovery.v2.HealthDiscoveryService.StreamHealthCheck") + .getMethodDescriptorForVersion(transport_api_version)), async_client_(std::move(async_client)), transport_api_version_(transport_api_version), dispatcher_(dispatcher), runtime_(runtime), store_stats_(stats), ssl_context_manager_(ssl_context_manager), random_(random), info_factory_(info_factory), @@ -102,7 +108,8 @@ envoy::service::health::v3::HealthCheckRequestOrEndpointHealthResponse HdsDelega auto* endpoint = response.mutable_endpoint_health_response()->add_endpoints_health(); Network::Utility::addressToProtobufAddress( *host->address(), *endpoint->mutable_endpoint()->mutable_address()); - // TODO(lilika): Add support for more granular options of envoy::api::v2::core::HealthStatus + // TODO(lilika): Add support for more granular options of + // envoy::config::core::v3::HealthStatus if (host->health() == Host::Health::Healthy) { endpoint->set_health_status(envoy::config::core::v3::HEALTHY); } else { @@ -169,10 +176,11 @@ void HdsDelegate::processMessage( ENVOY_LOG(debug, "New HdsCluster config {} ", cluster_config.DebugString()); // Create HdsCluster - hds_clusters_.emplace_back(new HdsCluster(admin_, runtime_, cluster_config, bind_config, - store_stats_, ssl_context_manager_, false, - info_factory_, cm_, local_info_, dispatcher_, random_, - singleton_manager_, tls_, validation_visitor_, api_)); + hds_clusters_.emplace_back( + new HdsCluster(admin_, runtime_, std::move(cluster_config), bind_config, store_stats_, + ssl_context_manager_, false, info_factory_, cm_, local_info_, dispatcher_, + random_, singleton_manager_, tls_, validation_visitor_, api_)); + hds_clusters_.back()->initialize([] {}); hds_clusters_.back()->startHealthchecks(access_log_manager_, runtime_, random_, dispatcher_, api_); @@ -186,13 +194,25 @@ void HdsDelegate::onReceiveMessage( stats_.requests_.inc(); ENVOY_LOG(debug, "New health check response message {} ", message->DebugString()); + // Validate message fields + try { + MessageUtil::validate(*message, validation_visitor_); + } catch (const ProtoValidationException& ex) { + // Increment error count + stats_.errors_.inc(); + ENVOY_LOG(warn, "Unable to validate health check specifier: {}", ex.what()); + + // Do not continue processing message + return; + } + // Reset hds_clusters_.clear(); // Set response auto server_response_ms = PROTOBUF_GET_MS_OR_DEFAULT(*message, interval, 1000); - // Process the HealthCheckSpecifier message + // Process the HealthCheckSpecifier message. processMessage(std::move(message)); if (server_response_ms_ != server_response_ms) { @@ -214,15 +234,15 @@ void HdsDelegate::onRemoteClose(Grpc::Status::GrpcStatus status, const std::stri } HdsCluster::HdsCluster(Server::Admin& admin, Runtime::Loader& runtime, - const envoy::config::cluster::v3::Cluster& cluster, + envoy::config::cluster::v3::Cluster cluster, const envoy::config::core::v3::BindConfig& bind_config, Stats::Store& stats, Ssl::ContextManager& ssl_context_manager, bool added_via_api, ClusterInfoFactory& info_factory, ClusterManager& cm, const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher, - Runtime::RandomGenerator& random, Singleton::Manager& singleton_manager, + Random::RandomGenerator& random, Singleton::Manager& singleton_manager, ThreadLocal::SlotAllocator& tls, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) - : runtime_(runtime), cluster_(cluster), bind_config_(bind_config), stats_(stats), + : runtime_(runtime), cluster_(std::move(cluster)), bind_config_(bind_config), stats_(stats), ssl_context_manager_(ssl_context_manager), added_via_api_(added_via_api), initial_hosts_(new HostVector()), validation_visitor_(validation_visitor) { ENVOY_LOG(debug, "Creating an HdsCluster"); @@ -232,14 +252,13 @@ HdsCluster::HdsCluster(Server::Admin& admin, Runtime::Loader& runtime, {admin, runtime_, cluster_, bind_config_, stats_, ssl_context_manager_, added_via_api_, cm, local_info, dispatcher, random, singleton_manager, tls, validation_visitor, api}); - for (const auto& host : cluster.load_assignment().endpoints(0).lb_endpoints()) { + for (const auto& host : cluster_.load_assignment().endpoints(0).lb_endpoints()) { initial_hosts_->emplace_back( new HostImpl(info_, "", Network::Address::resolveProtoAddress(host.endpoint().address()), nullptr, 1, envoy::config::core::v3::Locality().default_instance(), envoy::config::endpoint::v3::Endpoint::HealthCheckConfig().default_instance(), 0, envoy::config::core::v3::UNKNOWN)); } - initialize([] {}); } ClusterSharedPtr HdsCluster::create() { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } @@ -260,13 +279,13 @@ ProdClusterInfoFactory::createClusterInfo(const CreateClusterInfoParams& params) auto socket_matcher = std::make_unique( params.cluster_.transport_socket_matches(), factory_context, socket_factory, *scope); - return std::make_unique( - params.cluster_, params.bind_config_, params.runtime_, std::move(socket_matcher), - std::move(scope), params.added_via_api_, params.validation_visitor_, factory_context); + return std::make_unique(params.cluster_, params.bind_config_, params.runtime_, + std::move(socket_matcher), std::move(scope), + params.added_via_api_, factory_context); } void HdsCluster::startHealthchecks(AccessLog::AccessLogManager& access_log_manager, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, Event::Dispatcher& dispatcher, Api::Api& api) { for (auto& health_check : cluster_.health_checks()) { health_checkers_.push_back( diff --git a/source/common/upstream/health_discovery_service.h b/source/common/upstream/health_discovery_service.h index 079c22448dc32..6f21bc0701d38 100644 --- a/source/common/upstream/health_discovery_service.h +++ b/source/common/upstream/health_discovery_service.h @@ -1,6 +1,7 @@ #pragma once #include "envoy/api/api.h" +#include "envoy/common/random_generator.h" #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/core/v3/address.pb.h" #include "envoy/event/dispatcher.h" @@ -12,6 +13,7 @@ #include "common/common/backoff_strategy.h" #include "common/common/logger.h" +#include "common/common/macros.h" #include "common/config/utility.h" #include "common/grpc/async_client_impl.h" #include "common/network/resolver_impl.h" @@ -40,12 +42,12 @@ class HdsCluster : public Cluster, Logger::Loggable { public: static ClusterSharedPtr create(); HdsCluster(Server::Admin& admin, Runtime::Loader& runtime, - const envoy::config::cluster::v3::Cluster& cluster, + envoy::config::cluster::v3::Cluster cluster, const envoy::config::core::v3::BindConfig& bind_config, Stats::Store& stats, Ssl::ContextManager& ssl_context_manager, bool added_via_api, ClusterInfoFactory& info_factory, ClusterManager& cm, const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher, - Runtime::RandomGenerator& random, Singleton::Manager& singleton_manager, + Random::RandomGenerator& random, Singleton::Manager& singleton_manager, ThreadLocal::SlotAllocator& tls, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api); @@ -62,7 +64,7 @@ class HdsCluster : public Cluster, Logger::Loggable { // Creates and starts healthcheckers to its endpoints void startHealthchecks(AccessLog::AccessLogManager& access_log_manager, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, + Random::RandomGenerator& random, Event::Dispatcher& dispatcher, Api::Api& api); std::vector healthCheckers() { return health_checkers_; }; @@ -76,7 +78,7 @@ class HdsCluster : public Cluster, Logger::Loggable { std::function initialization_complete_callback_; Runtime::Loader& runtime_; - const envoy::config::cluster::v3::Cluster& cluster_; + const envoy::config::cluster::v3::Cluster cluster_; const envoy::config::core::v3::BindConfig& bind_config_; Stats::Store& stats_; Ssl::ContextManager& ssl_context_manager_; @@ -118,7 +120,7 @@ class HdsDelegate : Grpc::AsyncStreamCallbacks LoadBalancerBase::chooseHostSet(LoadBalancerContext* context) { if (context) { - const auto priority_loads = context->determinePriorityLoad(priority_set_, per_priority_load_); + const auto priority_loads = context->determinePriorityLoad( + priority_set_, per_priority_load_, Upstream::RetryPriority::defaultPriorityMapping); const auto priority_and_source = choosePriority(random_.random(), priority_loads.healthy_priority_load_, @@ -341,7 +342,7 @@ LoadBalancerBase::chooseHostSet(LoadBalancerContext* context) { ZoneAwareLoadBalancerBase::ZoneAwareLoadBalancerBase( const PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config) : LoadBalancerBase(priority_set, stats, runtime, random, common_config), local_priority_set_(local_priority_set), @@ -693,7 +694,7 @@ const HostVector& ZoneAwareLoadBalancerBase::hostSourceToHosts(HostsSource hosts EdfLoadBalancerBase::EdfLoadBalancerBase( const PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config) : ZoneAwareLoadBalancerBase(priority_set, local_priority_set, stats, runtime, random, common_config), diff --git a/source/common/upstream/load_balancer_impl.h b/source/common/upstream/load_balancer_impl.h index 4ef1fa0b3dd0a..13ab0884f2853 100644 --- a/source/common/upstream/load_balancer_impl.h +++ b/source/common/upstream/load_balancer_impl.h @@ -1,16 +1,20 @@ #pragma once +#include #include +#include #include #include #include +#include "envoy/common/random_generator.h" #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/runtime/runtime.h" #include "envoy/upstream/load_balancer.h" #include "envoy/upstream/upstream.h" #include "common/protobuf/utility.h" +#include "common/runtime/runtime_protos.h" #include "common/upstream/edf_scheduler.h" namespace Envoy { @@ -68,7 +72,7 @@ class LoadBalancerBase : public LoadBalancer { void recalculateLoadInTotalPanic(); LoadBalancerBase(const PrioritySet& priority_set, ClusterStats& stats, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, + Random::RandomGenerator& random, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config); // Choose host set randomly, based on the healthy_per_priority_load_ and @@ -89,7 +93,7 @@ class LoadBalancerBase : public LoadBalancer { ClusterStats& stats_; Runtime::Loader& runtime_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; const uint32_t default_healthy_panic_percent_; // The priority-ordered set of hosts to use for load balancing. const PrioritySet& priority_set_; @@ -147,8 +151,8 @@ class LoadBalancerContextBase : public LoadBalancerContext { const Http::RequestHeaderMap* downstreamHeaders() const override { return nullptr; } const HealthyAndDegradedLoad& - determinePriorityLoad(const PrioritySet&, - const HealthyAndDegradedLoad& original_priority_load) override { + determinePriorityLoad(const PrioritySet&, const HealthyAndDegradedLoad& original_priority_load, + const Upstream::RetryPriority::PriorityMappingFunc&) override { return original_priority_load; } @@ -171,7 +175,7 @@ class ZoneAwareLoadBalancerBase : public LoadBalancerBase { // Both priority_set and local_priority_set if non-null must have at least one host set. ZoneAwareLoadBalancerBase( const PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config); ~ZoneAwareLoadBalancerBase() override; @@ -224,7 +228,7 @@ class ZoneAwareLoadBalancerBase : public LoadBalancerBase { struct HostsSourceHash { size_t operator()(const HostsSource& hs) const { - // This is only used for std::unordered_map keys, so we don't need a deterministic hash. + // This is only used for absl::node_hash_map keys, so we don't need a deterministic hash. size_t hash = std::hash()(hs.priority_); hash = 37 * hash + std::hash()(static_cast(hs.source_type_)); hash = 37 * hash + std::hash()(hs.locality_index_); @@ -351,7 +355,7 @@ class EdfLoadBalancerBase : public ZoneAwareLoadBalancerBase { public: EdfLoadBalancerBase(const PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, + Random::RandomGenerator& random, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config); // Upstream::LoadBalancerBase @@ -367,6 +371,8 @@ class EdfLoadBalancerBase : public ZoneAwareLoadBalancerBase { void initialize(); + virtual void refresh(uint32_t priority); + // Seed to allow us to desynchronize load balancers across a fleet. If we don't // do this, multiple Envoys that receive an update at the same time (or even // multiple load balancers on the same host) will send requests to @@ -375,14 +381,13 @@ class EdfLoadBalancerBase : public ZoneAwareLoadBalancerBase { const uint64_t seed_; private: - void refresh(uint32_t priority); virtual void refreshHostSource(const HostsSource& source) PURE; virtual double hostWeight(const Host& host) PURE; virtual HostConstSharedPtr unweightedHostPick(const HostVector& hosts_to_use, const HostsSource& source) PURE; // Scheduler for each valid HostsSource. - std::unordered_map scheduler_; + absl::node_hash_map scheduler_; }; /** @@ -393,7 +398,7 @@ class RoundRobinLoadBalancer : public EdfLoadBalancerBase { public: RoundRobinLoadBalancer(const PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, + Random::RandomGenerator& random, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config) : EdfLoadBalancerBase(priority_set, local_priority_set, stats, runtime, random, common_config) { @@ -417,7 +422,7 @@ class RoundRobinLoadBalancer : public EdfLoadBalancerBase { return hosts_to_use[rr_indexes_[source]++ % hosts_to_use.size()]; } - std::unordered_map rr_indexes_; + absl::node_hash_map rr_indexes_; }; /** @@ -437,11 +442,12 @@ class RoundRobinLoadBalancer : public EdfLoadBalancerBase { * The benefit of the Maglev table is at the expense of resolution, memory usage is capped. * Additionally, the Maglev table can be shared amongst all threads. */ -class LeastRequestLoadBalancer : public EdfLoadBalancerBase { +class LeastRequestLoadBalancer : public EdfLoadBalancerBase, + Logger::Loggable { public: LeastRequestLoadBalancer( const PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config, const absl::optional least_request_config) @@ -450,26 +456,71 @@ class LeastRequestLoadBalancer : public EdfLoadBalancerBase { choice_count_( least_request_config.has_value() ? PROTOBUF_GET_WRAPPED_OR_DEFAULT(least_request_config.value(), choice_count, 2) - : 2) { + : 2), + active_request_bias_runtime_( + least_request_config.has_value() && least_request_config->has_active_request_bias() + ? std::make_unique(least_request_config->active_request_bias(), + runtime) + : nullptr) { initialize(); } +protected: + void refresh(uint32_t priority) override { + active_request_bias_ = + active_request_bias_runtime_ != nullptr ? active_request_bias_runtime_->value() : 1.0; + + if (active_request_bias_ < 0.0) { + ENVOY_LOG(warn, "upstream: invalid active request bias supplied (runtime key {}), using 1.0", + active_request_bias_runtime_->runtimeKey()); + active_request_bias_ = 1.0; + } + + EdfLoadBalancerBase::refresh(priority); + } + private: void refreshHostSource(const HostsSource&) override {} double hostWeight(const Host& host) override { - // Here we scale host weight by the number of active requests at the time we do the pick. We - // always add 1 to avoid division by 0. It might be possible to do better by picking two hosts - // off of the schedule, and selecting the one with fewer active requests at the time of - // selection. - // TODO(mattklein123): @htuch brings up the point that how we are scaling weight here might not - // be the only/best way of doing this. Essentially, it makes weight and active requests equally - // important. Are they equally important in practice? There is no right answer here and we might - // want to iterate on this as we gain more experience. - return static_cast(host.weight()) / (host.stats().rq_active_.value() + 1); + // This method is called to calculate the dynamic weight as following when all load balancing + // weights are not equal: + // + // `weight = load_balancing_weight / (active_requests + 1)^active_request_bias` + // + // `active_request_bias` can be configured via runtime and its value is cached in + // `active_request_bias_` to avoid having to do a runtime lookup each time a host weight is + // calculated. + // + // When `active_request_bias == 0.0` we behave like `RoundRobinLoadBalancer` and return the + // host weight without considering the number of active requests at the time we do the pick. + // + // When `active_request_bias > 0.0` we scale the host weight by the number of active + // requests at the time we do the pick. We always add 1 to avoid division by 0. + // + // It might be possible to do better by picking two hosts off of the schedule, and selecting the + // one with fewer active requests at the time of selection. + if (active_request_bias_ == 0.0) { + return host.weight(); + } + + if (active_request_bias_ == 1.0) { + return static_cast(host.weight()) / (host.stats().rq_active_.value() + 1); + } + + return static_cast(host.weight()) / + std::pow(host.stats().rq_active_.value() + 1, active_request_bias_); } HostConstSharedPtr unweightedHostPick(const HostVector& hosts_to_use, const HostsSource& source) override; + const uint32_t choice_count_; + + // The exponent used to calculate host weights can be configured via runtime. We cache it for + // performance reasons and refresh it in `LeastRequestLoadBalancer::refresh(uint32_t priority)` + // whenever a `HostSet` is updated. + double active_request_bias_{}; + + const std::unique_ptr active_request_bias_runtime_; }; /** @@ -478,8 +529,7 @@ class LeastRequestLoadBalancer : public EdfLoadBalancerBase { class RandomLoadBalancer : public ZoneAwareLoadBalancerBase { public: RandomLoadBalancer(const PrioritySet& priority_set, const PrioritySet* local_priority_set, - ClusterStats& stats, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, + ClusterStats& stats, Runtime::Loader& runtime, Random::RandomGenerator& random, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config) : ZoneAwareLoadBalancerBase(priority_set, local_priority_set, stats, runtime, random, common_config) {} diff --git a/source/common/upstream/load_stats_reporter.cc b/source/common/upstream/load_stats_reporter.cc index 14c707e1c613b..fa5697e86fbd3 100644 --- a/source/common/upstream/load_stats_reporter.cc +++ b/source/common/upstream/load_stats_reporter.cc @@ -17,10 +17,13 @@ LoadStatsReporter::LoadStatsReporter(const LocalInfo::LocalInfo& local_info, : cm_(cluster_manager), stats_{ALL_LOAD_REPORTER_STATS( POOL_COUNTER_PREFIX(scope, "load_reporter."))}, async_client_(std::move(async_client)), transport_api_version_(transport_api_version), - service_method_(*Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - "envoy.service.load_stats.v2.LoadReportingService.StreamLoadStats")), + service_method_( + Grpc::VersionedMethods("envoy.service.load_stats.v3.LoadReportingService.StreamLoadStats", + "envoy.service.load_stats.v2.LoadReportingService.StreamLoadStats") + .getMethodDescriptorForVersion(transport_api_version)), time_source_(dispatcher.timeSource()) { request_.mutable_node()->MergeFrom(local_info.node()); + request_.mutable_node()->add_client_features("envoy.lrs.supports_send_all_clusters"); retry_timer_ = dispatcher.createTimer([this]() -> void { establishNewStream(); }); response_timer_ = dispatcher.createTimer([this]() -> void { sendLoadStatsRequest(); }); establishNewStream(); @@ -44,6 +47,20 @@ void LoadStatsReporter::establishNewStream() { } void LoadStatsReporter::sendLoadStatsRequest() { + // TODO(htuch): This sends load reports for only the set of clusters in clusters_, which + // was initialized in startLoadReportPeriod() the last time we either sent a load report + // or received a new LRS response (whichever happened more recently). The code in + // startLoadReportPeriod() adds to clusters_ only those clusters that exist in the + // ClusterManager at the moment when startLoadReportPeriod() runs. This means that if + // a cluster is selected by the LRS server (either by being explicitly listed or by using + // the send_all_clusters field), if that cluster was added to the ClusterManager since the + // last time startLoadReportPeriod() was invoked, we will not report its load here. In + // practice, this means that for any newly created cluster, we will always drop the data for + // the initial load report period. This seems sub-optimal. + // + // One possible way to deal with this would be to get a notification whenever a new cluster is + // added to the cluster manager. When we get the notification, we record the current time in + // clusters_ as the start time for the load reporting window for that cluster. request_.mutable_cluster_stats()->Clear(); for (const auto& cluster_name_and_timestamp : clusters_) { const std::string& cluster_name = cluster_name_and_timestamp.first; @@ -56,8 +73,8 @@ void LoadStatsReporter::sendLoadStatsRequest() { auto& cluster = it->second.get(); auto* cluster_stats = request_.add_cluster_stats(); cluster_stats->set_cluster_name(cluster_name); - if (cluster.info()->eds_service_name().has_value()) { - cluster_stats->set_cluster_service_name(cluster.info()->eds_service_name().value()); + if (cluster.info()->edsServiceName().has_value()) { + cluster_stats->set_cluster_service_name(cluster.info()->edsServiceName().value()); } for (auto& host_set : cluster.prioritySet().hostSetsPerPriority()) { ENVOY_LOG(trace, "Load report locality count {}", host_set->hostsPerLocality().get().size()); @@ -123,9 +140,9 @@ void LoadStatsReporter::onReceiveInitialMetadata(Http::ResponseHeaderMapPtr&& me void LoadStatsReporter::onReceiveMessage( std::unique_ptr&& message) { ENVOY_LOG(debug, "New load report epoch: {}", message->DebugString()); - stats_.requests_.inc(); message_ = std::move(message); startLoadReportPeriod(); + stats_.requests_.inc(); } void LoadStatsReporter::startLoadReportPeriod() { @@ -135,26 +152,35 @@ void LoadStatsReporter::startLoadReportPeriod() { // problems due to referencing of temporaries in the below loop with Google's // internal string type. Consider this optimization when the string types // converge. - std::unordered_map existing_clusters; - for (const std::string& cluster_name : message_->clusters()) { - if (clusters_.count(cluster_name) > 0) { - existing_clusters.emplace(cluster_name, clusters_[cluster_name]); + absl::node_hash_map existing_clusters; + if (message_->send_all_clusters()) { + for (const auto& p : cm_.clusters()) { + const std::string& cluster_name = p.first; + if (clusters_.count(cluster_name) > 0) { + existing_clusters.emplace(cluster_name, clusters_[cluster_name]); + } + } + } else { + for (const std::string& cluster_name : message_->clusters()) { + if (clusters_.count(cluster_name) > 0) { + existing_clusters.emplace(cluster_name, clusters_[cluster_name]); + } } } clusters_.clear(); // Reset stats for all hosts in clusters we are tracking. - for (const std::string& cluster_name : message_->clusters()) { + auto handle_cluster_func = [this, &existing_clusters](const std::string& cluster_name) { clusters_.emplace(cluster_name, existing_clusters.count(cluster_name) > 0 ? existing_clusters[cluster_name] : time_source_.monotonicTime().time_since_epoch()); auto cluster_info_map = cm_.clusters(); auto it = cluster_info_map.find(cluster_name); if (it == cluster_info_map.end()) { - continue; + return; } // Don't reset stats for existing tracked clusters. if (existing_clusters.count(cluster_name) > 0) { - continue; + return; } auto& cluster = it->second.get(); for (auto& host_set : cluster.prioritySet().hostSetsPerPriority()) { @@ -165,6 +191,16 @@ void LoadStatsReporter::startLoadReportPeriod() { } } cluster.info()->loadReportStats().upstream_rq_dropped_.latch(); + }; + if (message_->send_all_clusters()) { + for (const auto& p : cm_.clusters()) { + const std::string& cluster_name = p.first; + handle_cluster_func(cluster_name); + } + } else { + for (const std::string& cluster_name : message_->clusters()) { + handle_cluster_func(cluster_name); + } } response_timer_->enableTimer(std::chrono::milliseconds( DurationUtil::durationToMilliseconds(message_->load_reporting_interval()))); diff --git a/source/common/upstream/load_stats_reporter.h b/source/common/upstream/load_stats_reporter.h index 3334abeec7b41..bd6ecfb393890 100644 --- a/source/common/upstream/load_stats_reporter.h +++ b/source/common/upstream/load_stats_reporter.h @@ -6,6 +6,7 @@ #include "common/common/logger.h" #include "common/grpc/async_client_impl.h" +#include "common/grpc/typed_async_client.h" namespace Envoy { namespace Upstream { @@ -65,7 +66,7 @@ class LoadStatsReporter envoy::service::load_stats::v3::LoadStatsRequest request_; std::unique_ptr message_; // Map from cluster name to start of measurement interval. - std::unordered_map clusters_; + absl::node_hash_map clusters_; TimeSource& time_source_; }; diff --git a/source/common/upstream/maglev_lb.cc b/source/common/upstream/maglev_lb.cc index c7e454a97ea79..72e97126296d2 100644 --- a/source/common/upstream/maglev_lb.cc +++ b/source/common/upstream/maglev_lb.cc @@ -101,7 +101,7 @@ uint64_t MaglevTable::permutation(const TableBuildEntry& entry) { MaglevLoadBalancer::MaglevLoadBalancer( const PrioritySet& priority_set, ClusterStats& stats, Stats::Scope& scope, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config, uint64_t table_size) : ThreadAwareLoadBalancerBase(priority_set, stats, runtime, random, common_config), scope_(scope.createScope("maglev_lb.")), stats_(generateStats(*scope_)), diff --git a/source/common/upstream/maglev_lb.h b/source/common/upstream/maglev_lb.h index 12a71e4fcb2d7..24eac7f6e6650 100644 --- a/source/common/upstream/maglev_lb.h +++ b/source/common/upstream/maglev_lb.h @@ -1,5 +1,6 @@ #pragma once +#include "envoy/common/random_generator.h" #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/stats/scope.h" #include "envoy/stats/stats_macros.h" @@ -70,7 +71,7 @@ class MaglevTable : public ThreadAwareLoadBalancerBase::HashingLoadBalancer, class MaglevLoadBalancer : public ThreadAwareLoadBalancerBase { public: MaglevLoadBalancer(const PrioritySet& priority_set, ClusterStats& stats, Stats::Scope& scope, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config, uint64_t table_size = MaglevTable::DefaultTableSize); diff --git a/source/common/upstream/original_dst_cluster.h b/source/common/upstream/original_dst_cluster.h index 52d8e56a30dce..14970a46094ae 100644 --- a/source/common/upstream/original_dst_cluster.h +++ b/source/common/upstream/original_dst_cluster.h @@ -3,7 +3,6 @@ #include #include #include -#include #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/secret/secret_manager.h" diff --git a/source/common/upstream/outlier_detection_impl.cc b/source/common/upstream/outlier_detection_impl.cc index e077a862739e6..e27ab5cca985c 100644 --- a/source/common/upstream/outlier_detection_impl.cc +++ b/source/common/upstream/outlier_detection_impl.cc @@ -268,8 +268,8 @@ DetectorImpl::DetectorImpl(const Cluster& cluster, DetectorImpl::~DetectorImpl() { for (const auto& host : host_monitors_) { if (host.first->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)) { - ASSERT(stats_.ejections_active_.value() > 0); - stats_.ejections_active_.dec(); + ASSERT(ejections_active_helper_.value() > 0); + ejections_active_helper_.dec(); } } } @@ -301,8 +301,8 @@ void DetectorImpl::initialize(const Cluster& cluster) { for (const HostSharedPtr& host : hosts_removed) { ASSERT(host_monitors_.count(host) == 1); if (host->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)) { - ASSERT(stats_.ejections_active_.value() > 0); - stats_.ejections_active_.dec(); + ASSERT(ejections_active_helper_.value() > 0); + ejections_active_helper_.dec(); } host_monitors_.erase(host); @@ -335,7 +335,7 @@ void DetectorImpl::checkHostForUneject(HostSharedPtr host, DetectorHostMonitorIm "outlier_detection.base_ejection_time_ms", config_.baseEjectionTimeMs())); ASSERT(monitor->numEjections() > 0); if ((base_eject_time * monitor->numEjections()) <= (now - monitor->lastEjectionTime().value())) { - stats_.ejections_active_.dec(); + ejections_active_helper_.dec(); host->healthFlagClear(Host::HealthFlag::FAILED_OUTLIER_CHECK); // Reset the consecutive failure counters to avoid re-ejection on very few new errors due // to the non-triggering counter being close to its trigger value. @@ -451,7 +451,7 @@ void DetectorImpl::ejectHost(HostSharedPtr host, uint64_t max_ejection_percent = std::min( 100, runtime_.snapshot().getInteger("outlier_detection.max_ejection_percent", config_.maxEjectionPercent())); - double ejected_percent = 100.0 * stats_.ejections_active_.value() / host_monitors_.size(); + double ejected_percent = 100.0 * ejections_active_helper_.value() / host_monitors_.size(); // Note this is not currently checked per-priority level, so it is possible // for outlier detection to eject all hosts at any given priority level. if (ejected_percent < max_ejection_percent) { @@ -461,7 +461,7 @@ void DetectorImpl::ejectHost(HostSharedPtr host, stats_.ejections_total_.inc(); } if (enforceEjection(type)) { - stats_.ejections_active_.inc(); + ejections_active_helper_.inc(); updateEnforcedEjectionStats(type); host_monitors_[host]->eject(time_source_.monotonicTime()); runCallbacks(host); diff --git a/source/common/upstream/outlier_detection_impl.h b/source/common/upstream/outlier_detection_impl.h index 39e891e44e577..dcaf3c638757d 100644 --- a/source/common/upstream/outlier_detection_impl.h +++ b/source/common/upstream/outlier_detection_impl.h @@ -6,7 +6,6 @@ #include #include #include -#include #include #include "envoy/access_log/access_log.h" @@ -18,9 +17,12 @@ #include "envoy/http/codes.h" #include "envoy/runtime/runtime.h" #include "envoy/stats/scope.h" +#include "envoy/stats/stats.h" #include "envoy/upstream/outlier_detection.h" #include "envoy/upstream/upstream.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace Upstream { namespace Outlier { @@ -40,7 +42,7 @@ class DetectorHostMonitorNullImpl : public DetectorHostMonitor { double successRate(SuccessRateMonitorType) const override { return -1; } private: - const absl::optional time_; + const absl::optional time_{}; }; /** @@ -387,14 +389,32 @@ class DetectorImpl : public Detector, public std::enable_shared_from_this ejections_active_value_{0}; + }; DetectorConfig config_; Event::Dispatcher& dispatcher_; Runtime::Loader& runtime_; TimeSource& time_source_; DetectionStats stats_; + EjectionsActiveHelper ejections_active_helper_{stats_.ejections_active_}; Event::TimerPtr interval_timer_; std::list callbacks_; - std::unordered_map host_monitors_; + absl::node_hash_map host_monitors_; EventLoggerSharedPtr event_logger_; // EjectionPair for external and local origin events. diff --git a/source/common/upstream/resource_manager_impl.h b/source/common/upstream/resource_manager_impl.h index e360c90206b71..f826d36fa1989 100644 --- a/source/common/upstream/resource_manager_impl.h +++ b/source/common/upstream/resource_manager_impl.h @@ -5,15 +5,64 @@ #include #include +#include "envoy/common/resource.h" #include "envoy/runtime/runtime.h" #include "envoy/upstream/resource_manager.h" #include "envoy/upstream/upstream.h" #include "common/common/assert.h" +#include "common/common/basic_resource_impl.h" namespace Envoy { namespace Upstream { +struct ManagedResourceImpl : public BasicResourceLimitImpl { + ManagedResourceImpl(uint64_t max, Runtime::Loader& runtime, const std::string& runtime_key, + Stats::Gauge& open_gauge, Stats::Gauge& remaining) + : BasicResourceLimitImpl(max, runtime, runtime_key), open_gauge_(open_gauge), + remaining_(remaining) { + remaining_.set(max); + } + + // Upstream::Resource + bool canCreate() override { return current_ < max(); } + void inc() override { + BasicResourceLimitImpl::inc(); + updateRemaining(); + open_gauge_.set(BasicResourceLimitImpl::canCreate() ? 0 : 1); + } + void decBy(uint64_t amount) override { + BasicResourceLimitImpl::decBy(amount); + updateRemaining(); + open_gauge_.set(BasicResourceLimitImpl::canCreate() ? 0 : 1); + } + + /** + * We set the gauge instead of incrementing and decrementing because, + * though atomics are used, it is possible for the current resource count + * to be greater than the supplied max. + */ + void updateRemaining() { + /** + * We cannot use std::max here because max() and current_ are + * unsigned and subtracting them may overflow. + */ + const uint64_t current_copy = current_; + remaining_.set(max() > current_copy ? max() - current_copy : 0); + } + + /** + * A gauge to notify the live circuit breaker state. The gauge is set to 0 + * to notify that the circuit breaker is not yet triggered. + */ + Stats::Gauge& open_gauge_; + + /** + * The number of resources remaining before the circuit breaker opens. + */ + Stats::Gauge& remaining_; +}; + /** * Implementation of ResourceManager. * NOTE: This implementation makes some assumptions which favor simplicity over correctness. @@ -44,78 +93,21 @@ class ResourceManagerImpl : public ResourceManager { pending_requests_) {} // Upstream::ResourceManager - Resource& connections() override { return connections_; } - Resource& pendingRequests() override { return pending_requests_; } - Resource& requests() override { return requests_; } - Resource& retries() override { return retries_; } - Resource& connectionPools() override { return connection_pools_; } + ResourceLimit& connections() override { return connections_; } + ResourceLimit& pendingRequests() override { return pending_requests_; } + ResourceLimit& requests() override { return requests_; } + ResourceLimit& retries() override { return retries_; } + ResourceLimit& connectionPools() override { return connection_pools_; } private: - struct ResourceImpl : public Resource { - ResourceImpl(uint64_t max, Runtime::Loader& runtime, const std::string& runtime_key, - Stats::Gauge& open_gauge, Stats::Gauge& remaining) - : max_(max), runtime_(runtime), runtime_key_(runtime_key), open_gauge_(open_gauge), - remaining_(remaining) { - remaining_.set(max); - } - ~ResourceImpl() override { ASSERT(current_ == 0); } - - // Upstream::Resource - bool canCreate() override { return current_ < max(); } - void inc() override { - current_++; - updateRemaining(); - open_gauge_.set(canCreate() ? 0 : 1); - } - void dec() override { decBy(1); } - void decBy(uint64_t amount) override { - ASSERT(current_ >= amount); - current_ -= amount; - updateRemaining(); - open_gauge_.set(canCreate() ? 0 : 1); - } - uint64_t max() override { return runtime_.snapshot().getInteger(runtime_key_, max_); } - uint64_t count() const override { return current_.load(); } - - /** - * We set the gauge instead of incrementing and decrementing because, - * though atomics are used, it is possible for the current resource count - * to be greater than the supplied max. - */ - void updateRemaining() { - /** - * We cannot use std::max here because max() and current_ are - * unsigned and subtracting them may overflow. - */ - const uint64_t current_copy = current_; - remaining_.set(max() > current_copy ? max() - current_copy : 0); - } - - const uint64_t max_; - std::atomic current_{}; - Runtime::Loader& runtime_; - const std::string runtime_key_; - - /** - * A gauge to notify the live circuit breaker state. The gauge is set to 0 - * to notify that the circuit breaker is not yet triggered. - */ - Stats::Gauge& open_gauge_; - - /** - * The number of resources remaining before the circuit breaker opens. - */ - Stats::Gauge& remaining_; - }; - - class RetryBudgetImpl : public Resource { + class RetryBudgetImpl : public ResourceLimit { public: RetryBudgetImpl(absl::optional budget_percent, absl::optional min_retry_concurrency, uint64_t max_retries, Runtime::Loader& runtime, const std::string& retry_budget_runtime_key, const std::string& max_retries_runtime_key, Stats::Gauge& open_gauge, - Stats::Gauge& remaining, const Resource& requests, - const Resource& pending_requests) + Stats::Gauge& remaining, const ResourceLimit& requests, + const ResourceLimit& pending_requests) : runtime_(runtime), max_retry_resource_(max_retries, runtime, max_retries_runtime_key, open_gauge, remaining), budget_percent_(budget_percent), min_retry_concurrency_(min_retry_concurrency), @@ -123,7 +115,7 @@ class ResourceManagerImpl : public ResourceManager { min_retry_concurrency_key_(retry_budget_runtime_key + "min_retry_concurrency"), requests_(requests), pending_requests_(pending_requests), remaining_(remaining) {} - // Upstream::Resource + // Envoy::ResourceLimit bool canCreate() override { if (!useRetryBudget()) { return max_retry_resource_.canCreate(); @@ -182,20 +174,20 @@ class ResourceManagerImpl : public ResourceManager { Runtime::Loader& runtime_; // The max_retry resource is nested within the budget to maintain state if the retry budget is // toggled. - ResourceImpl max_retry_resource_; + ManagedResourceImpl max_retry_resource_; const absl::optional budget_percent_; const absl::optional min_retry_concurrency_; const std::string budget_percent_key_; const std::string min_retry_concurrency_key_; - const Resource& requests_; - const Resource& pending_requests_; + const ResourceLimit& requests_; + const ResourceLimit& pending_requests_; Stats::Gauge& remaining_; }; - ResourceImpl connections_; - ResourceImpl pending_requests_; - ResourceImpl requests_; - ResourceImpl connection_pools_; + ManagedResourceImpl connections_; + ManagedResourceImpl pending_requests_; + ManagedResourceImpl requests_; + ManagedResourceImpl connection_pools_; RetryBudgetImpl retries_; }; diff --git a/source/common/upstream/ring_hash_lb.cc b/source/common/upstream/ring_hash_lb.cc index 7953d55a4bae4..28b64bdefd753 100644 --- a/source/common/upstream/ring_hash_lb.cc +++ b/source/common/upstream/ring_hash_lb.cc @@ -18,7 +18,7 @@ namespace Upstream { RingHashLoadBalancer::RingHashLoadBalancer( const PrioritySet& priority_set, ClusterStats& stats, Stats::Scope& scope, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, const absl::optional& config, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config) : ThreadAwareLoadBalancerBase(priority_set, stats, runtime, random, common_config), @@ -168,7 +168,7 @@ RingHashLoadBalancer::Ring::Ring(const NormalizedHostWeightVector& normalized_ho const uint64_t hash = (hash_function == HashFunction::Cluster_RingHashLbConfig_HashFunction_MURMUR_HASH_2) - ? MurmurHash::murmurHash2_64(hash_key, MurmurHash::STD_HASH_SEED) + ? MurmurHash::murmurHash2(hash_key, MurmurHash::STD_HASH_SEED) : HashUtil::xxHash64(hash_key); ENVOY_LOG(trace, "ring hash: hash_key={} hash={}", hash_key.data(), hash); diff --git a/source/common/upstream/ring_hash_lb.h b/source/common/upstream/ring_hash_lb.h index 9353d34715a7a..288f2cfb105ca 100644 --- a/source/common/upstream/ring_hash_lb.h +++ b/source/common/upstream/ring_hash_lb.h @@ -42,7 +42,7 @@ class RingHashLoadBalancer : public ThreadAwareLoadBalancerBase, public: RingHashLoadBalancer( const PrioritySet& priority_set, ClusterStats& stats, Stats::Scope& scope, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, const absl::optional& config, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config); diff --git a/source/common/upstream/static_cluster.h b/source/common/upstream/static_cluster.h index ed7a97b57fc17..b5de7b76b8dd6 100644 --- a/source/common/upstream/static_cluster.h +++ b/source/common/upstream/static_cluster.h @@ -44,5 +44,7 @@ class StaticClusterFactory : public ClusterFactoryImplBase { Stats::ScopePtr&& stats_scope) override; }; +DECLARE_FACTORY(StaticClusterFactory); + } // namespace Upstream } // namespace Envoy diff --git a/source/common/upstream/strict_dns_cluster.cc b/source/common/upstream/strict_dns_cluster.cc index eb7477b5b5cf2..279bf47bef277 100644 --- a/source/common/upstream/strict_dns_cluster.cc +++ b/source/common/upstream/strict_dns_cluster.cc @@ -118,7 +118,7 @@ void StrictDnsClusterImpl::ResolveTarget::startResolve() { if (status == Network::DnsResolver::ResolutionStatus::Success) { parent_.info_->stats().update_success_.inc(); - std::unordered_map updated_hosts; + absl::node_hash_map updated_hosts; HostVector new_hosts; std::chrono::seconds ttl_refresh_rate = std::chrono::seconds::max(); for (const auto& resp : response) { diff --git a/source/common/upstream/subset_lb.cc b/source/common/upstream/subset_lb.cc index 1cf9de7230a29..6fe34d4853b7d 100644 --- a/source/common/upstream/subset_lb.cc +++ b/source/common/upstream/subset_lb.cc @@ -1,7 +1,6 @@ #include "common/upstream/subset_lb.h" #include -#include #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/core/v3/base.pb.h" @@ -15,13 +14,15 @@ #include "common/upstream/maglev_lb.h" #include "common/upstream/ring_hash_lb.h" +#include "absl/container/node_hash_set.h" + namespace Envoy { namespace Upstream { SubsetLoadBalancer::SubsetLoadBalancer( LoadBalancerType lb_type, PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats, Stats::Scope& scope, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, const LoadBalancerSubsetInfo& subsets, + Random::RandomGenerator& random, const LoadBalancerSubsetInfo& subsets, const absl::optional& lb_ring_hash_config, const absl::optional& @@ -83,6 +84,8 @@ SubsetLoadBalancer::SubsetLoadBalancer( // This is a regular update with deltas. update(priority, hosts_added, hosts_removed); } + + purgeEmptySubsets(subsets_); }); } @@ -91,7 +94,7 @@ SubsetLoadBalancer::~SubsetLoadBalancer() { // Ensure gauges reflect correct values. forEachSubset(subsets_, [&](LbSubsetEntryPtr entry) { - if (entry->initialized() && entry->active()) { + if (entry->active()) { stats_.lb_subsets_removed_.inc(); stats_.lb_subsets_active_.dec(); } @@ -363,8 +366,8 @@ void SubsetLoadBalancer::updateFallbackSubset(uint32_t priority, const HostVecto void SubsetLoadBalancer::processSubsets( const HostVector& hosts_added, const HostVector& hosts_removed, std::function update_cb, - std::function new_cb) { - std::unordered_set subsets_modified; + std::function new_cb) { + absl::node_hash_set subsets_modified; std::pair steps[] = {{hosts_added, true}, {hosts_removed, false}}; for (const auto& step : steps) { @@ -392,7 +395,9 @@ void SubsetLoadBalancer::processSubsets( HostPredicate predicate = [this, kvs](const Host& host) -> bool { return hostMatches(kvs, host); }; - new_cb(entry, predicate, kvs, adding_hosts); + if (adding_hosts) { + new_cb(entry, predicate, kvs); + } } } } @@ -421,31 +426,18 @@ void SubsetLoadBalancer::update(uint32_t priority, const HostVector& hosts_added processSubsets( hosts_added, hosts_removed, [&](LbSubsetEntryPtr entry) { - const bool active_before = entry->active(); entry->priority_subset_->update(priority, hosts_added, hosts_removed); - - if (active_before && !entry->active()) { - stats_.lb_subsets_active_.dec(); - stats_.lb_subsets_removed_.inc(); - } else if (!active_before && entry->active()) { - stats_.lb_subsets_active_.inc(); - stats_.lb_subsets_created_.inc(); - } }, - [&](LbSubsetEntryPtr entry, HostPredicate predicate, const SubsetMetadata& kvs, - bool adding_host) { - UNREFERENCED_PARAMETER(kvs); - if (adding_host) { - ENVOY_LOG(debug, "subset lb: creating load balancer for {}", describeMetadata(kvs)); - - // Initialize new entry with hosts and update stats. (An uninitialized entry - // with only removed hosts is a degenerate case and we leave the entry - // uninitialized.) - entry->priority_subset_ = std::make_shared( - *this, predicate, locality_weight_aware_, scale_locality_weight_); - stats_.lb_subsets_active_.inc(); - stats_.lb_subsets_created_.inc(); - } + [&](LbSubsetEntryPtr entry, HostPredicate predicate, const SubsetMetadata& kvs) { + ENVOY_LOG(debug, "subset lb: creating load balancer for {}", describeMetadata(kvs)); + + // Initialize new entry with hosts and update stats. (An uninitialized entry + // with only removed hosts is a degenerate case and we leave the entry + // uninitialized.) + entry->priority_subset_ = std::make_shared( + *this, predicate, locality_weight_aware_, scale_locality_weight_); + stats_.lb_subsets_active_.inc(); + stats_.lb_subsets_created_.inc(); }); } @@ -593,6 +585,39 @@ void SubsetLoadBalancer::forEachSubset(LbSubsetMap& subsets, } } +void SubsetLoadBalancer::purgeEmptySubsets(LbSubsetMap& subsets) { + for (auto subset_it = subsets.begin(); subset_it != subsets.end();) { + for (auto it = subset_it->second.begin(); it != subset_it->second.end();) { + LbSubsetEntryPtr entry = it->second; + + purgeEmptySubsets(entry->children_); + + if (entry->active() || entry->hasChildren()) { + it++; + continue; + } + + // If it wasn't initialized, it wasn't accounted for. + if (entry->initialized()) { + stats_.lb_subsets_active_.dec(); + stats_.lb_subsets_removed_.inc(); + } + + auto next_it = std::next(it); + subset_it->second.erase(it); + it = next_it; + } + + if (subset_it->second.empty()) { + auto next_subset_it = std::next(subset_it); + subsets.erase(subset_it); + subset_it = next_subset_it; + } else { + subset_it++; + } + } +} + // Initialize a new HostSubsetImpl and LoadBalancer from the SubsetLoadBalancer, filtering hosts // with the given predicate. SubsetLoadBalancer::PrioritySubsetImpl::PrioritySubsetImpl(const SubsetLoadBalancer& subset_lb, @@ -671,8 +696,8 @@ void SubsetLoadBalancer::HostSubsetImpl::update(const HostVector& hosts_added, // that we maintain a consistent view of the metadata and saves on computation // since metadata lookups can be expensive. // - // We use an unordered_set because this can potentially be in the tens of thousands. - std::unordered_set matching_hosts; + // We use an unordered container because this can potentially be in the tens of thousands. + absl::node_hash_set matching_hosts; auto cached_predicate = [&matching_hosts](const auto& host) { return matching_hosts.count(&host) == 1; diff --git a/source/common/upstream/subset_lb.h b/source/common/upstream/subset_lb.h index 29d984210fa3f..c9fcb8d64eedb 100644 --- a/source/common/upstream/subset_lb.h +++ b/source/common/upstream/subset_lb.h @@ -4,7 +4,6 @@ #include #include #include -#include #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/runtime/runtime.h" @@ -16,6 +15,7 @@ #include "common/protobuf/utility.h" #include "common/upstream/upstream_impl.h" +#include "absl/container/node_hash_map.h" #include "absl/types/optional.h" namespace Envoy { @@ -26,7 +26,7 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggable& lb_ring_hash_config, const absl::optional& @@ -63,9 +63,6 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggable(&getOrCreateHostSet(priority)); - } - void triggerCallbacks() { for (size_t i = 0; i < hostSetsPerPriority().size(); ++i) { runReferenceUpdateCallbacks(i, {}, {}); @@ -127,8 +120,8 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggable; using SubsetSelectorMapPtr = std::shared_ptr; - using ValueSubsetMap = std::unordered_map; - using LbSubsetMap = std::unordered_map; + using ValueSubsetMap = absl::node_hash_map; + using LbSubsetMap = absl::node_hash_map; using SubsetSelectorFallbackParamsRef = std::reference_wrapper; class LoadBalancerContextWrapper : public LoadBalancerContext { @@ -147,10 +140,11 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::LoggabledownstreamHeaders(); } - const HealthyAndDegradedLoad& - determinePriorityLoad(const PrioritySet& priority_set, - const HealthyAndDegradedLoad& original_priority_load) override { - return wrapped_->determinePriorityLoad(priority_set, original_priority_load); + const HealthyAndDegradedLoad& determinePriorityLoad( + const PrioritySet& priority_set, const HealthyAndDegradedLoad& original_priority_load, + const Upstream::RetryPriority::PriorityMappingFunc& priority_mapping_func) override { + return wrapped_->determinePriorityLoad(priority_set, original_priority_load, + priority_mapping_func); } bool shouldSelectAnotherHost(const Host& host) override { return wrapped_->shouldSelectAnotherHost(host); @@ -177,7 +171,7 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggable subset_keys_; + absl::node_hash_map subset_keys_; SubsetSelectorFallbackParams fallback_params_; }; @@ -188,6 +182,7 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggableempty(); } + bool hasChildren() const { return !children_.empty(); } LbSubsetMap children_; @@ -204,10 +199,10 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggable update_cb, - std::function cb); + void + processSubsets(const HostVector& hosts_added, const HostVector& hosts_removed, + std::function update_cb, + std::function cb); HostConstSharedPtr tryChooseHostFromContext(LoadBalancerContext* context, bool& host_chosen); @@ -222,6 +217,7 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggable cb); + void purgeEmptySubsets(LbSubsetMap& subsets); std::vector extractSubsetMetadata(const std::set& subset_keys, const Host& host); @@ -235,7 +231,7 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggable; struct LoadBalancerImpl : public LoadBalancer { - LoadBalancerImpl(ClusterStats& stats, Runtime::RandomGenerator& random) + LoadBalancerImpl(ClusterStats& stats, Random::RandomGenerator& random) : stats_(stats), random_(random) {} // Upstream::LoadBalancer HostConstSharedPtr chooseHost(LoadBalancerContext* context) override; ClusterStats& stats_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; std::shared_ptr> per_priority_state_; std::shared_ptr healthy_per_priority_load_; std::shared_ptr degraded_per_priority_load_; }; struct LoadBalancerFactoryImpl : public LoadBalancerFactory { - LoadBalancerFactoryImpl(ClusterStats& stats, Runtime::RandomGenerator& random) + LoadBalancerFactoryImpl(ClusterStats& stats, Random::RandomGenerator& random) : stats_(stats), random_(random) {} // Upstream::LoadBalancerFactory LoadBalancerPtr create() override; ClusterStats& stats_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; absl::Mutex mutex_; std::shared_ptr> per_priority_state_ ABSL_GUARDED_BY(mutex_); // This is split out of PerPriorityState so LoadBalancerBase::ChoosePriority can be reused. diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index 49e56528834f2..d98d672ffd8fb 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -6,7 +6,6 @@ #include #include #include -#include #include #include "envoy/config/cluster/v3/circuit_breaker.pb.h" @@ -19,21 +18,26 @@ #include "envoy/event/dispatcher.h" #include "envoy/event/timer.h" #include "envoy/network/dns.h" +#include "envoy/network/transport_socket.h" #include "envoy/secret/secret_manager.h" #include "envoy/server/filter_config.h" #include "envoy/server/transport_socket_config.h" #include "envoy/ssl/context_manager.h" #include "envoy/stats/scope.h" #include "envoy/upstream/health_checker.h" +#include "envoy/upstream/upstream.h" #include "common/common/enum_to_int.h" #include "common/common/fmt.h" #include "common/common/utility.h" #include "common/config/utility.h" +#include "common/http/http1/codec_stats.h" +#include "common/http/http2/codec_stats.h" #include "common/http/utility.h" #include "common/network/address_impl.h" #include "common/network/resolver_impl.h" #include "common/network/socket_option_factory.h" +#include "common/network/socket_option_impl.h" #include "common/protobuf/protobuf.h" #include "common/protobuf/utility.h" #include "common/router/config_utility.h" @@ -48,6 +52,7 @@ #include "extensions/filters/network/common/utility.h" #include "extensions/transport_sockets/well_known_names.h" +#include "absl/container/node_hash_set.h" #include "absl/strings/str_cat.h" namespace Envoy { @@ -99,6 +104,12 @@ parseClusterSocketOptions(const envoy::config::cluster::v3::Cluster& config, const envoy::config::core::v3::BindConfig bind_config) { Network::ConnectionSocket::OptionsSharedPtr cluster_options = std::make_shared(); + // The process-wide `signal()` handling may fail to handle SIGPIPE if overridden + // in the process (i.e., on a mobile client). Some OSes support handling it at the socket layer: + if (ENVOY_SOCKET_SO_NOSIGPIPE.hasValue()) { + Network::Socket::appendOptions(cluster_options, + Network::SocketOptionFactory::buildSocketNoSigpipeOptions()); + } // Cluster IP_FREEBIND settings, when set, will override the cluster manager wide settings. if ((bind_config.freebind().value() && !config.upstream_bind_config().has_freebind()) || config.upstream_bind_config().freebind().value()) { @@ -128,7 +139,7 @@ parseClusterSocketOptions(const envoy::config::cluster::v3::Cluster& config, ProtocolOptionsConfigConstSharedPtr createProtocolOptionsConfig(const std::string& name, const ProtobufWkt::Any& typed_config, const ProtobufWkt::Struct& config, - ProtobufMessage::ValidationVisitor& validation_visitor) { + Server::Configuration::ProtocolOptionsFactoryContext& factory_context) { Server::Configuration::ProtocolOptionsFactory* factory = Registry::FactoryRegistry::getFactory( name); @@ -149,15 +160,15 @@ createProtocolOptionsConfig(const std::string& name, const ProtobufWkt::Any& typ throw EnvoyException(fmt::format("filter {} does not support protocol options", name)); } - Envoy::Config::Utility::translateOpaqueConfig(typed_config, config, validation_visitor, - *proto_config); + Envoy::Config::Utility::translateOpaqueConfig( + typed_config, config, factory_context.messageValidationVisitor(), *proto_config); - return factory->createProtocolOptionsConfig(*proto_config, validation_visitor); + return factory->createProtocolOptionsConfig(*proto_config, factory_context); } -std::map -parseExtensionProtocolOptions(const envoy::config::cluster::v3::Cluster& config, - ProtobufMessage::ValidationVisitor& validation_visitor) { +std::map parseExtensionProtocolOptions( + const envoy::config::cluster::v3::Cluster& config, + Server::Configuration::ProtocolOptionsFactoryContext& factory_context) { if (!config.typed_extension_protocol_options().empty() && !config.hidden_envoy_deprecated_extension_protocol_options().empty()) { throw EnvoyException("Only one of typed_extension_protocol_options or " @@ -173,7 +184,7 @@ parseExtensionProtocolOptions(const envoy::config::cluster::v3::Cluster& config, auto& name = Extensions::NetworkFilters::Common::FilterNameUtil::canonicalFilterName(it.first); auto object = createProtocolOptionsConfig( - name, it.second, ProtobufWkt::Struct::default_instance(), validation_visitor); + name, it.second, ProtobufWkt::Struct::default_instance(), factory_context); if (object != nullptr) { options[name] = std::move(object); } @@ -186,7 +197,7 @@ parseExtensionProtocolOptions(const envoy::config::cluster::v3::Cluster& config, auto& name = Extensions::NetworkFilters::Common::FilterNameUtil::canonicalFilterName(it.first); auto object = createProtocolOptionsConfig(name, ProtobufWkt::Any::default_instance(), it.second, - validation_visitor); + factory_context); if (object != nullptr) { options[name] = std::move(object); } @@ -222,8 +233,8 @@ bool updateHealthFlag(const Host& updated_host, Host& existing_host, Host::Healt // Converts a set of hosts into a HostVector, excluding certain hosts. // @param hosts hosts to convert // @param excluded_hosts hosts to exclude from the resulting vector. -HostVector filterHosts(const std::unordered_set& hosts, - const std::unordered_set& excluded_hosts) { +HostVector filterHosts(const absl::node_hash_set& hosts, + const absl::node_hash_set& excluded_hosts) { HostVector net_hosts; net_hosts.reserve(hosts.size()); @@ -268,7 +279,7 @@ HostDescriptionImpl::HostDescriptionImpl( Network::TransportSocketFactory& HostDescriptionImpl::resolveTransportSocketFactory( const Network::Address::InstanceConstSharedPtr& dest_address, - const envoy::config::core::v3::Metadata* metadata) { + const envoy::config::core::v3::Metadata* metadata) const { auto match = cluster_->transportSocketMatcher().resolve(metadata); match.stats_.total_match_count_.inc(); ENVOY_LOG(debug, "transport socket match, socket {} selected for host with address {}", @@ -305,8 +316,13 @@ void HostImpl::setEdsHealthFlag(envoy::config::core::v3::HealthStatus health_sta Host::CreateConnectionData HostImpl::createHealthCheckConnection( Event::Dispatcher& dispatcher, - Network::TransportSocketOptionsSharedPtr transport_socket_options) const { - return {createConnection(dispatcher, *cluster_, healthCheckAddress(), socket_factory_, nullptr, + Network::TransportSocketOptionsSharedPtr transport_socket_options, + const envoy::config::core::v3::Metadata* metadata) const { + + Network::TransportSocketFactory& factory = + (metadata != nullptr) ? resolveTransportSocketFactory(healthCheckAddress(), metadata) + : socket_factory_; + return {createConnection(dispatcher, *cluster_, healthCheckAddress(), factory, nullptr, transport_socket_options), shared_from_this()}; } @@ -424,7 +440,7 @@ void HostSetImpl::rebuildLocalityScheduler( // scheduler. // // TODO(htuch): if the underlying locality index -> - // envoy::api::v2::core::Locality hasn't changed in hosts_/healthy_hosts_/degraded_hosts_, we + // envoy::config::core::v3::Locality hasn't changed in hosts_/healthy_hosts_/degraded_hosts_, we // could just update locality_weight_ without rebuilding. Similar to how host // level WRR works, we would age out the existing entries via picks and lazily // apply the new weights. @@ -603,6 +619,11 @@ ClusterStats ClusterInfoImpl::generateStats(Stats::Scope& scope) { return {ALL_CLUSTER_STATS(POOL_COUNTER(scope), POOL_GAUGE(scope), POOL_HISTOGRAM(scope))}; } +ClusterRequestResponseSizeStats +ClusterInfoImpl::generateRequestResponseSizeStats(Stats::Scope& scope) { + return {ALL_CLUSTER_REQUEST_RESPONSE_SIZE_STATS(POOL_HISTOGRAM(scope))}; +} + ClusterLoadReportStats ClusterInfoImpl::generateLoadReportStats(Stats::Scope& scope) { return {ALL_CLUSTER_LOAD_REPORT_STATS(POOL_COUNTER(scope))}; } @@ -626,7 +647,7 @@ class FactoryContextImpl : public Server::Configuration::CommonFactoryContext { Upstream::ClusterManager& clusterManager() override { return cluster_manager_; } Event::Dispatcher& dispatcher() override { return dispatcher_; } const LocalInfo::LocalInfo& localInfo() const override { return local_info_; } - Envoy::Runtime::RandomGenerator& random() override { return random_; } + Envoy::Random::RandomGenerator& random() override { return random_; } Envoy::Runtime::Loader& runtime() override { return runtime_; } Stats::Scope& scope() override { return stats_scope_; } Singleton::Manager& singletonManager() override { return singleton_manager_; } @@ -645,7 +666,7 @@ class FactoryContextImpl : public Server::Configuration::CommonFactoryContext { Upstream::ClusterManager& cluster_manager_; const LocalInfo::LocalInfo& local_info_; Event::Dispatcher& dispatcher_; - Envoy::Runtime::RandomGenerator& random_; + Envoy::Random::RandomGenerator& random_; Envoy::Runtime::Loader& runtime_; Singleton::Manager& singleton_manager_; ThreadLocal::SlotAllocator& tls_; @@ -656,7 +677,6 @@ ClusterInfoImpl::ClusterInfoImpl( const envoy::config::cluster::v3::Cluster& config, const envoy::config::core::v3::BindConfig& bind_config, Runtime::Loader& runtime, TransportSocketMatcherPtr&& socket_matcher, Stats::ScopePtr&& stats_scope, bool added_via_api, - ProtobufMessage::ValidationVisitor& validation_visitor, Server::Configuration::TransportSocketFactoryContext& factory_context) : runtime_(runtime), name_(config.name()), type_(config.type()), max_requests_per_connection_( @@ -667,25 +687,32 @@ ClusterInfoImpl::ClusterInfoImpl( Http::DEFAULT_MAX_HEADERS_COUNT))), connect_timeout_( std::chrono::milliseconds(PROTOBUF_GET_MS_REQUIRED(config, connect_timeout))), + prefetch_ratio_( + PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.prefetch_policy(), prefetch_ratio, 1.0)), per_connection_buffer_limit_bytes_( PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, per_connection_buffer_limit_bytes, 1024 * 1024)), socket_matcher_(std::move(socket_matcher)), stats_scope_(std::move(stats_scope)), stats_(generateStats(*stats_scope_)), load_report_stats_store_(stats_scope_->symbolTable()), load_report_stats_(generateLoadReportStats(load_report_stats_store_)), - timeout_budget_stats_(config.track_timeout_budgets() - ? absl::make_optional( - generateTimeoutBudgetStats(*stats_scope_)) - : absl::nullopt), + optional_cluster_stats_((config.has_track_cluster_stats() || config.track_timeout_budgets()) + ? std::make_unique(config, *stats_scope_) + : nullptr), features_(parseFeatures(config)), http1_settings_(Http::Utility::parseHttp1Settings(config.http_protocol_options())), http2_options_(Http2::Utility::initializeAndValidateOptions(config.http2_protocol_options())), - extension_protocol_options_(parseExtensionProtocolOptions(config, validation_visitor)), + common_http_protocol_options_(config.common_http_protocol_options()), + extension_protocol_options_(parseExtensionProtocolOptions(config, factory_context)), resource_managers_(config, runtime, name_, *stats_scope_), maintenance_mode_runtime_key_(absl::StrCat("upstream.maintenance_mode.", name_)), source_address_(getSourceAddress(config, bind_config)), lb_least_request_config_(config.least_request_lb_config()), lb_ring_hash_config_(config.ring_hash_lb_config()), - lb_original_dst_config_(config.original_dst_lb_config()), added_via_api_(added_via_api), + lb_original_dst_config_(config.original_dst_lb_config()), + upstream_config_(config.has_upstream_config() + ? absl::make_optional( + config.upstream_config()) + : absl::nullopt), + added_via_api_(added_via_api), lb_subset_(LoadBalancerSubsetInfoImpl(config.lb_subset_config())), metadata_(config.metadata()), typed_metadata_(config.metadata()), common_lb_config_(config.common_lb_config()), @@ -847,7 +874,8 @@ void ClusterInfoImpl::createNetworkFilterChain(Network::Connection& connection) Http::Protocol ClusterInfoImpl::upstreamHttpProtocol(absl::optional downstream_protocol) const { - if (features_ & Upstream::ClusterInfo::Features::USE_DOWNSTREAM_PROTOCOL) { + if (downstream_protocol.has_value() && + features_ & Upstream::ClusterInfo::Features::USE_DOWNSTREAM_PROTOCOL) { return downstream_protocol.value(); } else { return (features_ & Upstream::ClusterInfo::Features::HTTP2) ? Http::Protocol::Http2 @@ -863,17 +891,15 @@ ClusterImplBase::ClusterImplBase( init_watcher_("ClusterImplBase", [this]() { onInitDone(); }), runtime_(runtime), local_cluster_(factory_context.clusterManager().localClusterName().value_or("") == cluster.name()), - symbol_table_(stats_scope->symbolTable()), const_metadata_shared_pool_(Config::Metadata::getConstMetadataSharedPool( factory_context.singletonManager(), factory_context.dispatcher())) { factory_context.setInitManager(init_manager_); auto socket_factory = createTransportSocketFactory(cluster, factory_context); auto socket_matcher = std::make_unique( cluster.transport_socket_matches(), factory_context, socket_factory, *stats_scope); - info_ = std::make_unique( - cluster, factory_context.clusterManager().bindConfig(), runtime, std::move(socket_matcher), - std::move(stats_scope), added_via_api, factory_context.messageValidationVisitor(), - factory_context); + info_ = std::make_unique(cluster, factory_context.clusterManager().bindConfig(), + runtime, std::move(socket_matcher), + std::move(stats_scope), added_via_api, factory_context); // Create the default (empty) priority set before registering callbacks to // avoid getting an update the first time it is accessed. priority_set_.getOrCreateHostSet(0); @@ -1074,6 +1100,17 @@ void ClusterImplBase::validateEndpointsForZoneAwareRouting( } } +ClusterInfoImpl::OptionalClusterStats::OptionalClusterStats( + const envoy::config::cluster::v3::Cluster& config, Stats::Scope& stats_scope) + : timeout_budget_stats_( + (config.track_cluster_stats().timeout_budgets() || config.track_timeout_budgets()) + ? std::make_unique(generateTimeoutBudgetStats(stats_scope)) + : nullptr), + request_response_size_stats_(config.track_cluster_stats().request_response_sizes() + ? std::make_unique( + generateRequestResponseSizeStats(stats_scope)) + : nullptr) {} + ClusterInfoImpl::ResourceManagers::ResourceManagers( const envoy::config::cluster::v3::Cluster& config, Runtime::Loader& runtime, const std::string& cluster_name, Stats::Scope& stats_scope) { @@ -1096,6 +1133,14 @@ ClusterInfoImpl::generateCircuitBreakersStats(Stats::Scope& scope, const std::st } } +Http::Http1::CodecStats& ClusterInfoImpl::http1CodecStats() const { + return Http::Http1::CodecStats::atomicGet(http1_codec_stats_, *stats_scope_); +} + +Http::Http2::CodecStats& ClusterInfoImpl::http2CodecStats() const { + return Http::Http2::CodecStats::atomicGet(http2_codec_stats_, *stats_scope_); +} + ResourceManagerImplPtr ClusterInfoImpl::ResourceManagers::load(const envoy::config::cluster::v3::Cluster& config, Runtime::Loader& runtime, const std::string& cluster_name, @@ -1310,14 +1355,12 @@ bool BaseDynamicClusterImpl::updateDynamicHostList(const HostVector& new_hosts, bool hosts_changed = false; // Go through and see if the list we have is different from what we just got. If it is, we make a - // new host list and raise a change notification. This uses an N^2 search given that this does not - // happen very often and the list sizes should be small (see - // https://github.com/envoyproxy/envoy/issues/2874). We also check for duplicates here. It's + // new host list and raise a change notification. We also check for duplicates here. It's // possible for DNS to return the same address multiple times, and a bad EDS implementation could // do the same thing. // Keep track of hosts we see in new_hosts that we are able to match up with an existing host. - std::unordered_set existing_hosts_for_current_priority( + absl::node_hash_set existing_hosts_for_current_priority( current_priority_hosts.size()); HostVector final_hosts; for (const HostSharedPtr& host : new_hosts) { @@ -1415,16 +1458,20 @@ bool BaseDynamicClusterImpl::updateDynamicHostList(const HostVector& new_hosts, // Remove hosts from current_priority_hosts that were matched to an existing host in the previous // loop. - for (auto itr = current_priority_hosts.begin(); itr != current_priority_hosts.end();) { - auto existing_itr = existing_hosts_for_current_priority.find((*itr)->address()->asString()); + auto erase_from = + std::remove_if(current_priority_hosts.begin(), current_priority_hosts.end(), + [&existing_hosts_for_current_priority](const HostSharedPtr& p) { + auto existing_itr = + existing_hosts_for_current_priority.find(p->address()->asString()); - if (existing_itr != existing_hosts_for_current_priority.end()) { - existing_hosts_for_current_priority.erase(existing_itr); - itr = current_priority_hosts.erase(itr); - } else { - itr++; - } - } + if (existing_itr != existing_hosts_for_current_priority.end()) { + existing_hosts_for_current_priority.erase(existing_itr); + return true; + } + + return false; + }); + current_priority_hosts.erase(erase_from, current_priority_hosts.end()); // If we saw existing hosts during this iteration from a different priority, then we've moved // a host from another priority into this one, so we should mark the priority as having changed. @@ -1442,21 +1489,23 @@ bool BaseDynamicClusterImpl::updateDynamicHostList(const HostVector& new_hosts, const bool dont_remove_healthy_hosts = health_checker_ != nullptr && !info()->drainConnectionsOnHostRemoval(); if (!current_priority_hosts.empty() && dont_remove_healthy_hosts) { - for (auto i = current_priority_hosts.begin(); i != current_priority_hosts.end();) { - if (!((*i)->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC) || - (*i)->healthFlagGet(Host::HealthFlag::FAILED_EDS_HEALTH))) { - if ((*i)->weight() > max_host_weight) { - max_host_weight = (*i)->weight(); - } - - final_hosts.push_back(*i); - updated_hosts[(*i)->address()->asString()] = *i; - (*i)->healthFlagSet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL); - i = current_priority_hosts.erase(i); - } else { - i++; - } - } + erase_from = + std::remove_if(current_priority_hosts.begin(), current_priority_hosts.end(), + [&updated_hosts, &final_hosts, &max_host_weight](const HostSharedPtr& p) { + if (!(p->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC) || + p->healthFlagGet(Host::HealthFlag::FAILED_EDS_HEALTH))) { + if (p->weight() > max_host_weight) { + max_host_weight = p->weight(); + } + + final_hosts.push_back(p); + updated_hosts[p->address()->asString()] = p; + p->healthFlagSet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL); + return true; + } + return false; + }); + current_priority_hosts.erase(erase_from, current_priority_hosts.end()); } // At this point we've accounted for all the new hosts as well the hosts that previously diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index bdb40c9d68416..4a9e0a06468d9 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -38,8 +38,11 @@ #include "common/common/callback_impl.h" #include "common/common/enum_to_int.h" #include "common/common/logger.h" +#include "common/common/thread.h" #include "common/config/metadata.h" #include "common/config/well_known_names.h" +#include "common/http/http1/codec_stats.h" +#include "common/http/http2/codec_stats.h" #include "common/init/manager_impl.h" #include "common/network/utility.h" #include "common/shared_pool/shared_pool.h" @@ -51,6 +54,7 @@ #include "server/transport_socket_config_impl.h" +#include "absl/container/node_hash_set.h" #include "absl/synchronization/mutex.h" namespace Envoy { @@ -132,11 +136,9 @@ class HostDescriptionImpl : virtual public HostDescription, } uint32_t priority() const override { return priority_; } void priority(uint32_t priority) override { priority_ = priority; } - -private: Network::TransportSocketFactory& resolveTransportSocketFactory(const Network::Address::InstanceConstSharedPtr& dest_address, - const envoy::config::core::v3::Metadata* metadata); + const envoy::config::core::v3::Metadata* metadata) const; protected: ClusterInfoConstSharedPtr cluster_; @@ -148,7 +150,7 @@ class HostDescriptionImpl : virtual public HostDescription, mutable absl::Mutex metadata_mutex_; MetadataConstSharedPtr metadata_ ABSL_GUARDED_BY(metadata_mutex_); const envoy::config::core::v3::Locality locality_; - Stats::StatNameManagedStorage locality_zone_stat_name_; + Stats::StatNameDynamicStorage locality_zone_stat_name_; mutable HostStats stats_; Outlier::DetectorHostMonitorPtr outlier_detector_; HealthCheckHostMonitorPtr health_checker_; @@ -172,7 +174,7 @@ class HostImpl : public HostDescriptionImpl, priority), used_(true) { setEdsHealthFlag(health_status); - weight(initial_weight); + HostImpl::weight(initial_weight); } // Upstream::Host @@ -183,9 +185,10 @@ class HostImpl : public HostDescriptionImpl, CreateConnectionData createConnection( Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options, Network::TransportSocketOptionsSharedPtr transport_socket_options) const override; - CreateConnectionData createHealthCheckConnection( - Event::Dispatcher& dispatcher, - Network::TransportSocketOptionsSharedPtr transport_socket_options) const override; + CreateConnectionData + createHealthCheckConnection(Event::Dispatcher& dispatcher, + Network::TransportSocketOptionsSharedPtr transport_socket_options, + const envoy::config::core::v3::Metadata* metadata) const override; std::vector> gauges() const override { @@ -193,7 +196,7 @@ class HostImpl : public HostDescriptionImpl, } void healthFlagClear(HealthFlag flag) override { health_flags_ &= ~enumToInt(flag); } bool healthFlagGet(HealthFlag flag) const override { return health_flags_ & enumToInt(flag); } - void healthFlagSet(HealthFlag flag) override { health_flags_ |= enumToInt(flag); } + void healthFlagSet(HealthFlag flag) final { health_flags_ |= enumToInt(flag); } ActiveHealthFailureType getActiveHealthFailureType() const override { return active_health_failure_type_; @@ -496,12 +499,12 @@ class PrioritySetImpl : public PrioritySet { const HostVector& hosts_removed, absl::optional overprovisioning_factor) override; - std::unordered_set all_hosts_added_; - std::unordered_set all_hosts_removed_; + absl::node_hash_set all_hosts_added_; + absl::node_hash_set all_hosts_removed_; private: PrioritySetImpl& parent_; - std::unordered_set priorities_; + absl::node_hash_set priorities_; }; }; @@ -513,14 +516,14 @@ class ClusterInfoImpl : public ClusterInfo, protected Logger::Loggable idleTimeout() const override { return idle_timeout_; } + float prefetchRatio() const override { return prefetch_ratio_; } uint32_t perConnectionBufferLimitBytes() const override { return per_connection_buffer_limit_bytes_; } @@ -540,6 +544,9 @@ class ClusterInfoImpl : public ClusterInfo, protected Logger::Loggable& + upstreamConfig() const override { + return upstream_config_; + } bool maintenanceMode() const override; uint64_t maxRequestsPerConnection() const override { return max_requests_per_connection_; } uint32_t maxResponseHeadersCount() const override { return max_response_headers_count_; } @@ -568,10 +579,27 @@ class ClusterInfoImpl : public ClusterInfo, protected Logger::Loggablerequest_response_size_stats_ == nullptr) { + return absl::nullopt; + } + + return std::ref(*(optional_cluster_stats_->request_response_size_stats_)); + } + ClusterLoadReportStats& loadReportStats() const override { return load_report_stats_; } - const absl::optional& timeoutBudgetStats() const override { - return timeout_budget_stats_; + + ClusterTimeoutBudgetStatsOptRef timeoutBudgetStats() const override { + if (optional_cluster_stats_ == nullptr || + optional_cluster_stats_->timeout_budget_stats_ == nullptr) { + return absl::nullopt; + } + + return std::ref(*(optional_cluster_stats_->timeout_budget_stats_)); } + const Network::Address::InstanceConstSharedPtr& sourceAddress() const override { return source_address_; }; @@ -590,12 +618,15 @@ class ClusterInfoImpl : public ClusterInfo, protected Logger::Loggable eds_service_name() const override { return eds_service_name_; } + absl::optional edsServiceName() const override { return eds_service_name_; } void createNetworkFilterChain(Network::Connection&) const override; Http::Protocol upstreamHttpProtocol(absl::optional downstream_protocol) const override; + Http::Http1::CodecStats& http1CodecStats() const override; + Http::Http2::CodecStats& http2CodecStats() const override; + private: struct ResourceManagers { ResourceManagers(const envoy::config::cluster::v3::Cluster& config, Runtime::Loader& runtime, @@ -610,6 +641,13 @@ class ClusterInfoImpl : public ClusterInfo, protected Logger::Loggable idle_timeout_; + const float prefetch_ratio_; const uint32_t per_connection_buffer_limit_bytes_; TransportSocketMatcherPtr socket_matcher_; Stats::ScopePtr stats_scope_; mutable ClusterStats stats_; Stats::IsolatedStoreImpl load_report_stats_store_; mutable ClusterLoadReportStats load_report_stats_; - const absl::optional timeout_budget_stats_; + const std::unique_ptr optional_cluster_stats_; const uint64_t features_; const Http::Http1Settings http1_settings_; const envoy::config::core::v3::Http2ProtocolOptions http2_options_; + const envoy::config::core::v3::HttpProtocolOptions common_http_protocol_options_; const std::map extension_protocol_options_; mutable ResourceManagers resource_managers_; const std::string maintenance_mode_runtime_key_; @@ -636,6 +676,7 @@ class ClusterInfoImpl : public ClusterInfo, protected Logger::Loggable lb_ring_hash_config_; absl::optional lb_original_dst_config_; + absl::optional upstream_config_; const bool added_via_api_; LoadBalancerSubsetInfoImpl lb_subset_; const envoy::config::core::v3::Metadata metadata_; @@ -650,6 +691,8 @@ class ClusterInfoImpl : public ClusterInfo, protected Logger::Loggable cluster_type_; const std::unique_ptr factory_context_; std::vector filter_factories_; + mutable Http::Http1::CodecStats::AtomicPtr http1_codec_stats_; + mutable Http::Http2::CodecStats::AtomicPtr http2_codec_stats_; }; /** @@ -704,7 +747,6 @@ class ClusterImplBase : public Cluster, protected Logger::Loggable partitionHostsPerLocality(const HostsPerLocality& hosts); - Stats::SymbolTable& symbolTable() { return symbol_table_; } Config::ConstMetadataSharedPoolSharedPtr constMetadataSharedPool() { return const_metadata_shared_pool_; } @@ -772,7 +814,6 @@ class ClusterImplBase : public Cluster, protected Logger::Loggable initialization_complete_callback_; uint64_t pending_initialize_health_checks_{}; const bool local_cluster_; - Stats::SymbolTable& symbol_table_; Config::ConstMetadataSharedPoolSharedPtr const_metadata_shared_pool_; }; @@ -813,9 +854,6 @@ class PriorityStateManager : protected Logger::Loggable { const absl::optional health_checker_flag, absl::optional overprovisioning_factor = absl::nullopt); - // Returns the size of the current cluster priority state. - size_t size() const { return priority_state_.size(); } - // Returns the saved priority state. PriorityState& priorityState() { return priority_state_; } diff --git a/source/common/version/BUILD b/source/common/version/BUILD new file mode 100644 index 0000000000000..9d726567378a4 --- /dev/null +++ b/source/common/version/BUILD @@ -0,0 +1,83 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_basic_cc_library", + "envoy_cc_library", + "envoy_package", + "envoy_select_boringssl", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +genrule( + name = "generate_version_number", + srcs = ["//:VERSION"], + outs = ["version_number.h"], + cmd = """echo "#define BUILD_VERSION_NUMBER \\"$$(cat $<)\\"" >$@""", + visibility = ["//visibility:private"], +) + +genrule( + name = "generate_version_linkstamp", + outs = ["manual_linkstamp.cc"], + cmd = select({ + # Only iOS builds typically follow this logic, OS/X is built as a normal binary + "//bazel:apple": "$(location :generate_version_linkstamp.sh) Library >> $@", + "//conditions:default": "$(location :generate_version_linkstamp.sh) >> $@", + }), + # Undocumented attr to depend on workspace status files. + # https://github.com/bazelbuild/bazel/issues/4942 + # Used here because generate_version_linkstamp.sh depends on the workspace status files. + stamp = 1, + tools = [":generate_version_linkstamp.sh"], + visibility = ["//visibility:private"], +) + +envoy_cc_library( + name = "version_includes", + hdrs = [ + "version.h", + ":generate_version_number", + ], + deps = [ + "//source/common/singleton:const_singleton", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) + +envoy_cc_library( + name = "version_lib", + srcs = ["version.cc"], + copts = envoy_select_boringssl( + ["-DENVOY_SSL_VERSION=\\\"BoringSSL-FIPS\\\""], + ["-DENVOY_SSL_VERSION=\\\"BoringSSL\\\""], + ), + deps = [ + ":version_includes", + "//source/common/common:macros", + "//source/common/protobuf:utility_lib", + ], +) + +envoy_basic_cc_library( + name = "manual_version_linkstamp", + srcs = [":generate_version_linkstamp"], + visibility = ["//visibility:private"], +) + +envoy_basic_cc_library( + name = "version_linkstamp", + linkstamp = select({ + "//bazel:manual_stamp": None, + "//conditions:default": "version_linkstamp.cc", + }), + # Linking this library makes build cache inefficient, limiting this to //source/exe package only. + # Tests are linked with //test/test_common:test_version_linkstamp. + visibility = ["//source/exe:__pkg__"], + deps = select({ + "//bazel:manual_stamp": [":manual_version_linkstamp"], + "//conditions:default": [], + }), + alwayslink = 1, +) diff --git a/source/common/common/generate_version_linkstamp.sh b/source/common/version/generate_version_linkstamp.sh similarity index 78% rename from source/common/common/generate_version_linkstamp.sh rename to source/common/version/generate_version_linkstamp.sh index 4ad2da073589d..d8873f8b5a339 100755 --- a/source/common/common/generate_version_linkstamp.sh +++ b/source/common/version/generate_version_linkstamp.sh @@ -10,9 +10,14 @@ # But following the implicit trail one can deduce that linkstamp is in effect when "stamping" (https://github.com/bazelbuild/bazel/issues/2893) is on. # envoy_cc_library -- and the underlying cc_library rule -- does not support "stamping". # This makes sense as stamping mainly makes sense in the context of binaries for production releases, not static libraries. -build_scm_revision=$(grep BUILD_SCM_REVISION bazel-out/volatile-status.txt | sed 's/^BUILD_SCM_REVISION //' | tr -d '\\n') +build_scm_revision=$(sed -n -E 's/^BUILD_SCM_REVISION ([0-9a-f]{40})$/\1/p' < bazel-out/volatile-status.txt) +if [ -z "$1" ]; then + build_scm_status=$(sed -n -E 's/^BUILD_SCM_STATUS ([a-zA-Z]*)$/\1/p' < bazel-out/volatile-status.txt) +else + build_scm_status=$1 +fi echo "extern const char build_scm_revision[];" echo "extern const char build_scm_status[];" echo "const char build_scm_revision[] = \"$build_scm_revision\";" -echo "const char build_scm_status[] = \"Library\";" \ No newline at end of file +echo "const char build_scm_status[] = \"$build_scm_status\";" diff --git a/source/common/common/version.cc b/source/common/version/version.cc similarity index 97% rename from source/common/common/version.cc rename to source/common/version/version.cc index 1e930e61a3ec1..d2ddbae3c8184 100644 --- a/source/common/common/version.cc +++ b/source/common/version/version.cc @@ -1,4 +1,4 @@ -#include "common/common/version.h" +#include "common/version/version.h" #include #include @@ -6,7 +6,6 @@ #include "common/common/fmt.h" #include "common/common/macros.h" -#include "common/common/version_linkstamp.h" #include "common/protobuf/utility.h" #include "absl/strings/numbers.h" diff --git a/source/common/common/version.h b/source/common/version/version.h similarity index 96% rename from source/common/common/version.h rename to source/common/version/version.h index 6189f8801af23..a5720105ef852 100644 --- a/source/common/common/version.h +++ b/source/common/version/version.h @@ -4,8 +4,8 @@ #include "envoy/config/core/v3/base.pb.h" -#include "common/common/version_number.h" #include "common/singleton/const_singleton.h" +#include "common/version/version_number.h" namespace Envoy { diff --git a/source/common/common/version_linkstamp.cc b/source/common/version/version_linkstamp.cc similarity index 100% rename from source/common/common/version_linkstamp.cc rename to source/common/version/version_linkstamp.cc diff --git a/source/docs/flow_control.md b/source/docs/flow_control.md index 1bdad5450da37..e32bc4b2e8508 100644 --- a/source/docs/flow_control.md +++ b/source/docs/flow_control.md @@ -135,7 +135,7 @@ time, it should return `FilterDataStatus::StopIterationAndWatermark` to pause further data processing, which will cause the `ConnectionManagerImpl` to trigger watermark callbacks on behalf of the filter. If a filter can not make forward progress without the complete body, it should return `FilterDataStatus::StopIterationAndBuffer`. -in this case if the `ConnectionManagerImpl` buffers more than the allowed data +In this case if the `ConnectionManagerImpl` buffers more than the allowed data it will return an error downstream: a 413 on the request path, 500 or `resetStream()` on the response path. @@ -165,7 +165,7 @@ And the low watermark path: `StreamDecoderFilterCallback::onDecoderFilterBelowWriteBufferLowWatermark()`. * When `Envoy::Http::ConnectionManagerImpl` receives `onDecoderFilterAboveWriteBufferHighWatermark()` it calls `readDisable(false)` on the downstream - stream to pause data. + stream to resume data. # Encoder filters @@ -192,11 +192,11 @@ The encoder high watermark path for streaming filters is as follows: `DownstreamWatermarkCallbacks::onAboveWriteBufferHighWatermark()` for all filters which registered to receive watermark events * `Envoy::Router::Filter` receives `onAboveWriteBufferHighWatermark()` and calls - `readDisable(false)` on the upstream request. + `readDisable(true)` on the upstream request. The encoder low watermark path for streaming filters is as follows: - * When an instance of `Envoy::Router::StreamEncoderFilter` buffers too much data it should call + * When an instance of `Envoy::Router::StreamEncoderFilter` buffers drains it should call `StreamEncoderFilterCallback::onEncodeFilterBelowWriteBufferLowWatermark()`. * When `Envoy::Http::ConnectionManagerImpl::ActiveStreamEncoderFilter` receives `onEncoderFilterBelowWriteBufferLowWatermark()` it calls @@ -205,7 +205,7 @@ The encoder low watermark path for streaming filters is as follows: `DownstreamWatermarkCallbacks::onBelowWriteBufferLowWatermark()` for all filters which registered to receive watermark events * `Envoy::Router::Filter` receives `onBelowWriteBufferLowWatermark()` and calls - `readDisable(true)` on the upstream request. + `readDisable(false)` on the upstream request. # HTTP and HTTP/2 codec upstream send buffer @@ -394,6 +394,8 @@ watermark path is as follows: From this point the `ConnectionManagerImpl` takes over and the code path is the same as for the HTTP/2 codec downstream send buffer. +The low watermark path is as follows: + * When `Http::Http1::ConnectionImpl::output_buffer_` drains it calls `onOutputBufferBelowLowWatermark()` * Http::Http1::ConnectionImpl::ServerConnectionImpl::onOutputBufferBelowLowWatermark() calls @@ -416,6 +418,8 @@ watermark path is as follows: From this point on the `Envoy::Router::Filter` picks up the event and the code path is the same as for the HTTP/2 codec upstream send buffer. +The low watermark path is as follows: + * When `Http::Http1::ConnectionImpl::output_buffer_` drains it calls `onOutputBufferBelowLowWatermark()` * Http::Http1::ConnectionImpl::ClientConnectionImpl::onOutputBufferBelowLowWatermark() calls @@ -423,3 +427,6 @@ for the HTTP/2 codec upstream send buffer. receiving an `onBelowWriteBufferLowWatermark()` callback. From this point on the `Envoy::Router::Filter` picks up the event and the code path is the same as for the HTTP/2 codec upstream send buffer. + +### HTTP3 implementation details +HTTP3 network buffer and stream send buffer works differently from HTTP2 and HTTP. See quiche_integration.md. diff --git a/source/docs/header_map.md b/source/docs/header_map.md new file mode 100644 index 0000000000000..59ae9db78621a --- /dev/null +++ b/source/docs/header_map.md @@ -0,0 +1,30 @@ +# Header map implementation overview + +The Envoy header map implementation (`HeaderMapImpl`) has the following properties: +* Headers are stored in a linked list (`HeaderList`) in the order they are added, with pseudo + headers kept at the front of the list. +* O(1) direct access is possible for common headers needed during data plane processing. This is + provided by a table of pointers that reach directly into a linked list that is populated when + headers are added or removed from the map. When O(1) headers are accessed by direct method + (`DEFINE_INLINE_HEADER` and `CustomInlineHeaderBase`) they use direct pointer access to see + whether a header is present, add it, modify it, etc. When headers are added by name a trie is used to lookup the pointer in the table (`StaticLookupTable`). +* Custom headers can be registered statically against a specific implementation (request headers, + request trailers, response headers, and response trailers) via core code and extensions + (`CustomInlineHeaderRegistry`). Each registered header increases the size of the table by the size of a single pointer. +* Operations that search, replace, etc. for a header by name that is not one of the O(1) headers + will incur an O(N) search through the linked list. This is an implementation deficiency for + certain usage patterns that will be improved in future changes. + +## Implementation details + +* O(1) registered headers are tracked during static initialization via the `CustomInlineHeaderBase` + class. +* The first time a header map is constructed (in practice this is after bootstrap load and the + Envoy header prefix is finalized when `getAllHeaderMapImplInfo` is called), the + `StaticLookupTable` is finalized for each header map type. No further changes are possible after + this point. The `StaticLookupTable` defines the amount of variable pointer table space that is + require for each header map type. +* Each concrete header map type derives from `InlineStorage` with a variable length member at the + end of the definition. +* Each concrete header map type uses a factory function and a provide constructor. The required + size is determined via the `inlineHeadersSize` function. \ No newline at end of file diff --git a/source/docs/quiche_integration.md b/source/docs/quiche_integration.md index 45de216cc6b44..9232278631f72 100644 --- a/source/docs/quiche_integration.md +++ b/source/docs/quiche_integration.md @@ -31,3 +31,7 @@ When the bytes buffered in a stream's send buffer exceeds its high watermark, it QUICHE doesn't buffer data at the local connection layer. All the data is buffered in the respective streams.To prevent the case where all streams collectively buffers a lot of data, there is also a simulated watermark buffer for each QUIC connection which is updated upon each stream write. When the aggregated buffered bytes goes above high watermark, its registered network callbacks will call Network::ConnectionCallbacks::onAboveWriteBufferHighWatermark(). The HCM will notify each stream via QUIC codec Http::Connection::onUnderlyingConnectionAboveWriteBufferHighWatermark() which will call each stream's StreamCallbackHelper::runHighWatermarkCallbacks(). There might be a way to simply the call stack as Quic connection already knows about all the stream, there is no need to call to HCM and notify each stream via codec. But here we just follow the same logic as HTTP2 codec does. In the same way, any QuicStream::OnCanWrite() may change the aggregated buffered bytes in the connection level bookkeeping as well. If the buffered bytes goes down below the low watermark, the same calls will be triggered to propagate onBelowWriteBufferLowWatermark() to each stream. + +As to Http::StreamEncoder::encodeHeaders()/encodeTrailers(), the accounting is done differently between Google QUIC and IETF QUIC: + * In Google QUIC, encodeHeaders()/encodeTrailers() check the buffer size increase on header stream before and after writing headers/trailers. In QuicSession::OnCanWrite(), may drain header stream send buffer, so there we also check send buffer size decrease on header stream. + * In IETF QUIC, encodeHeaders()/encodeTrailers() check the buffer size increase on the corresponding data stream which is similar to encodeData(). The buffered headers/trailers are only drained via QuicStream::OnCanWrite() so there is no need to check QuicSession::OnCanWrite. diff --git a/source/docs/stats.md b/source/docs/stats.md index 43be6992146c3..f80d1b46932f4 100644 --- a/source/docs/stats.md +++ b/source/docs/stats.md @@ -16,7 +16,7 @@ values, they are passed from parent to child in an RPC protocol. They were previously held in shared memory, which imposed various restrictions. Unlike the shared memory implementation, the RPC passing *requires a mode-bit specified when constructing gauges indicating whether it should be accumulated across hot-restarts*. - + ## Performance and Thread Local Storage A key tenant of the Envoy architecture is high performance on machines with @@ -77,6 +77,18 @@ followed. accumulates in to *interval* histograms. * Finally the main *interval* histogram is merged to *cumulative* histogram. +`ParentHistogram`s are held weakly a set in ThreadLocalStore. Like other stats, +they keep an embedded reference count and are removed from the set and destroyed +when the last strong reference disappears. Consequently, we must hold a lock for +the set when decrementing histogram reference counts. A similar process occurs for +other types of stats, but in those cases it is taken care of in `AllocatorImpl`. +There are strong references to `ParentHistograms` in TlsCacheEntry::parent_histograms_. + +Thread-local `TlsHistogram`s are created on behalf of a `ParentHistogram` +whenever accessed from a worker thread. They are strongly referenced in the +`ParentHistogram` as well as in a cache in the `ThreadLocalStore`, to help +maintain data continuity as scopes are re-created during operation. + ## Stat naming infrastructure and memory consumption Stat names are replicated in several places in various forms. diff --git a/source/docs/subset_load_balancer.md b/source/docs/subset_load_balancer.md index c34e032f6d358..23220d79e1a39 100644 --- a/source/docs/subset_load_balancer.md +++ b/source/docs/subset_load_balancer.md @@ -120,7 +120,7 @@ e7 | dev | 1.2-pre | std | Note: Only e1 has the "xlarge" metadata key. -Given this CDS `envoy::api::v2::Cluster`: +Given this CDS `envoy::config::cluster::v3::Cluster`: ``` json { @@ -165,7 +165,7 @@ After loading this configuration, the SLB's `LbSubsetMap` looks like this: ![LbSubsetMap Diagram](subset_load_balancer_diagram.svg) -Given these `envoy::api::v2::route::Route` entries: +Given these `envoy::config::route::v3::Route` entries: ``` json "routes": [ diff --git a/source/exe/BUILD b/source/exe/BUILD index 867605c7663db..a88969a6ae21e 100644 --- a/source/exe/BUILD +++ b/source/exe/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_binary", @@ -9,9 +7,11 @@ load( "envoy_cc_win32_library", "envoy_package", ) -load("//source/extensions:all_extensions.bzl", "envoy_all_extensions") +load("//source/extensions:all_extensions.bzl", "envoy_all_core_extensions", "envoy_all_extensions") load("//bazel:repositories.bzl", "PPC_SKIP_TARGETS", "WINDOWS_SKIP_TARGETS") +licenses(["notice"]) # Apache 2 + envoy_package() alias( @@ -22,7 +22,7 @@ alias( envoy_cc_binary( name = "envoy-static", stamped = True, - deps = ["envoy_main_entry_lib"], + deps = [":envoy_main_entry_lib"], ) envoy_cc_library( @@ -56,7 +56,7 @@ envoy_cc_library( ) envoy_cc_library( - name = "envoy_main_common_lib", + name = "main_common_lib", srcs = ["main_common.cc"], hdrs = ["main_common.h"], deps = [ @@ -80,6 +80,52 @@ envoy_cc_library( }), ) +envoy_cc_library( + name = "envoy_main_common_lib", + deps = [ + ":main_common_lib", + "//source/common/version:version_linkstamp", + ], +) + +envoy_cc_library( + name = "envoy_common_with_core_extensions_lib", + deps = [ + "//source/common/event:libevent_lib", + "//source/common/network:utility_lib", + "//source/common/stats:stats_lib", + "//source/common/stats:thread_local_store_lib", + "//source/server:drain_manager_lib", + "//source/server:options_lib", + "//source/server:server_lib", + "//source/server:listener_hooks_lib", + ] + envoy_all_core_extensions(), +) + +envoy_cc_library( + name = "envoy_main_common_with_core_extensions_lib", + deps = [ + ":envoy_common_with_core_extensions_lib", + ":main_common_lib", + ":platform_impl_lib", + ":process_wide_lib", + "//source/common/api:os_sys_calls_lib", + "//source/common/common:compiler_requirements_lib", + "//source/common/common:perf_annotation_lib", + "//source/common/grpc:google_grpc_context_lib", + "//source/common/stats:symbol_table_creator_lib", + "//source/server:hot_restart_lib", + "//source/server:hot_restart_nop_lib", + "//source/server/config_validation:server_lib", + ] + select({ + "//bazel:disable_signal_trace": [], + "//conditions:default": [ + "//source/common/signal:sigaction_lib", + ":terminate_handler_lib", + ], + }), +) + envoy_cc_library( name = "process_wide_lib", srcs = ["process_wide.cc"], diff --git a/source/exe/main.cc b/source/exe/main.cc index 6afcd3fd33592..80cfc86f18b0f 100644 --- a/source/exe/main.cc +++ b/source/exe/main.cc @@ -1,7 +1,5 @@ #include "exe/main_common.h" -#include "absl/debugging/symbolize.h" - // NOLINT(namespace-envoy) /** @@ -11,30 +9,4 @@ * deployment such as initializing signal handling. It calls main_common * after setting up command line options. */ -int main(int argc, char** argv) { -#ifndef __APPLE__ - // absl::Symbolize mostly works without this, but this improves corner case - // handling, such as running in a chroot jail. - absl::InitializeSymbolizer(argv[0]); -#endif - std::unique_ptr main_common; - - // Initialize the server's main context under a try/catch loop and simply return EXIT_FAILURE - // as needed. Whatever code in the initialization path that fails is expected to log an error - // message so the user can diagnose. - try { - main_common = std::make_unique(argc, argv); - } catch (const Envoy::NoServingException& e) { - return EXIT_SUCCESS; - } catch (const Envoy::MalformedArgvException& e) { - std::cerr << e.what() << std::endl; - return EXIT_FAILURE; - } catch (const Envoy::EnvoyException& e) { - std::cerr << e.what() << std::endl; - return EXIT_FAILURE; - } - - // Run the server listener loop outside try/catch blocks, so that unexpected exceptions - // show up as a core-dumps for easier diagnostics. - return main_common->run() ? EXIT_SUCCESS : EXIT_FAILURE; -} +int main(int argc, char** argv) { return Envoy::MainCommon::main(argc, argv); } diff --git a/source/exe/main_common.cc b/source/exe/main_common.cc index a46523033ab54..ace645ebb1aa8 100644 --- a/source/exe/main_common.cc +++ b/source/exe/main_common.cc @@ -1,5 +1,6 @@ #include "exe/main_common.h" +#include #include #include #include @@ -7,6 +8,7 @@ #include "envoy/config/listener/v3/listener.pb.h" #include "common/common/compiler_requirements.h" +#include "common/common/logger.h" #include "common/common/perf_annotation.h" #include "common/network/utility.h" #include "common/stats/symbol_table_creator.h" @@ -19,6 +21,7 @@ #include "server/options_impl.h" #include "server/server.h" +#include "absl/debugging/symbolize.h" #include "absl/strings/str_split.h" #ifdef ENVOY_HOT_RESTART @@ -43,7 +46,7 @@ Runtime::LoaderPtr ProdComponentFactory::createRuntime(Server::Instance& server, MainCommonBase::MainCommonBase(const OptionsImpl& options, Event::TimeSystem& time_system, ListenerHooks& listener_hooks, Server::ComponentFactory& component_factory, - std::unique_ptr&& random_generator, + std::unique_ptr&& random_generator, Thread::ThreadFactory& thread_factory, Filesystem::Instance& file_system, std::unique_ptr process_context) @@ -58,14 +61,7 @@ MainCommonBase::MainCommonBase(const OptionsImpl& options, Event::TimeSystem& ti switch (options_.mode()) { case Server::Mode::InitOnly: case Server::Mode::Serve: { -#ifdef ENVOY_HOT_RESTART - if (!options.hotRestartDisabled()) { - restarter_ = std::make_unique(options_); - } -#endif - if (restarter_ == nullptr) { - restarter_ = std::make_unique(); - } + configureHotRestarter(*random_generator); tls_ = std::make_unique(); Thread::BasicLockable& log_lock = restarter_->logLock(); @@ -106,6 +102,60 @@ void MainCommonBase::configureComponentLogLevels() { } } +void MainCommonBase::configureHotRestarter(Random::RandomGenerator& random_generator) { +#ifdef ENVOY_HOT_RESTART + if (!options_.hotRestartDisabled()) { + uint32_t base_id = options_.baseId(); + + if (options_.useDynamicBaseId()) { + ASSERT(options_.restartEpoch() == 0, "cannot use dynamic base id during hot restart"); + + std::unique_ptr restarter; + + // Try 100 times to get an unused base ID and then give up under the assumption + // that some other problem has occurred to prevent binding the domain socket. + for (int i = 0; i < 100 && restarter == nullptr; i++) { + // HotRestartImpl is going to multiply this value by 10, so leave head room. + base_id = static_cast(random_generator.random()) & 0x0FFFFFFF; + + try { + restarter = std::make_unique(base_id, 0); + } catch (Server::HotRestartDomainSocketInUseException& ex) { + // No luck, try again. + ENVOY_LOG_MISC(debug, "dynamic base id: {}", ex.what()); + } + } + + if (restarter == nullptr) { + throw EnvoyException("unable to select a dynamic base id"); + } + + restarter_.swap(restarter); + } else { + restarter_ = std::make_unique(base_id, options_.restartEpoch()); + } + + // Write the base-id to the requested path whether we selected it + // dynamically or not. + if (!options_.baseIdPath().empty()) { + std::ofstream base_id_out_file(options_.baseIdPath()); + if (!base_id_out_file) { + ENVOY_LOG_MISC(critical, "cannot open base id output file {} for writing.", + options_.baseIdPath()); + } else { + base_id_out_file << base_id; + } + } + } +#else + UNREFERENCED_PARAMETER(random_generator); +#endif + + if (restarter_ == nullptr) { + restarter_ = std::make_unique(); + } +} + bool MainCommonBase::run() { switch (options_.mode()) { case Server::Mode::Serve: @@ -128,17 +178,17 @@ void MainCommonBase::adminRequest(absl::string_view path_and_query, absl::string std::string path_and_query_buf = std::string(path_and_query); std::string method_buf = std::string(method); server_->dispatcher().post([this, path_and_query_buf, method_buf, handler]() { - Http::ResponseHeaderMapImpl response_headers; + auto response_headers = Http::ResponseHeaderMapImpl::create(); std::string body; - server_->admin().request(path_and_query_buf, method_buf, response_headers, body); - handler(response_headers, body); + server_->admin().request(path_and_query_buf, method_buf, *response_headers, body); + handler(*response_headers, body); }); } MainCommon::MainCommon(int argc, const char* const* argv) : options_(argc, argv, &MainCommon::hotRestartVersion, spdlog::level::info), base_(options_, real_time_system_, default_listener_hooks_, prod_component_factory_, - std::make_unique(), platform_impl_.threadFactory(), + std::make_unique(), platform_impl_.threadFactory(), platform_impl_.fileSystem(), nullptr) {} std::string MainCommon::hotRestartVersion(bool hot_restart_enabled) { @@ -152,4 +202,36 @@ std::string MainCommon::hotRestartVersion(bool hot_restart_enabled) { return "disabled"; } +int MainCommon::main(int argc, char** argv, PostServerHook hook) { +#ifndef __APPLE__ + // absl::Symbolize mostly works without this, but this improves corner case + // handling, such as running in a chroot jail. + absl::InitializeSymbolizer(argv[0]); +#endif + std::unique_ptr main_common; + + // Initialize the server's main context under a try/catch loop and simply return EXIT_FAILURE + // as needed. Whatever code in the initialization path that fails is expected to log an error + // message so the user can diagnose. + try { + main_common = std::make_unique(argc, argv); + Envoy::Server::Instance* server = main_common->server(); + if (server != nullptr && hook != nullptr) { + hook(*server); + } + } catch (const Envoy::NoServingException& e) { + return EXIT_SUCCESS; + } catch (const Envoy::MalformedArgvException& e) { + std::cerr << e.what() << std::endl; + return EXIT_FAILURE; + } catch (const Envoy::EnvoyException& e) { + std::cerr << e.what() << std::endl; + return EXIT_FAILURE; + } + + // Run the server listener loop outside try/catch blocks, so that unexpected exceptions + // show up as a core-dumps for easier diagnostics. + return main_common->run() ? EXIT_SUCCESS : EXIT_FAILURE; +} + } // namespace Envoy diff --git a/source/exe/main_common.h b/source/exe/main_common.h index 1bd2ca6c2a87d..91ea197def3c2 100644 --- a/source/exe/main_common.h +++ b/source/exe/main_common.h @@ -38,7 +38,7 @@ class MainCommonBase { // destructed. MainCommonBase(const OptionsImpl& options, Event::TimeSystem& time_system, ListenerHooks& listener_hooks, Server::ComponentFactory& component_factory, - std::unique_ptr&& random_generator, + std::unique_ptr&& random_generator, Thread::ThreadFactory& thread_factory, Filesystem::Instance& file_system, std::unique_ptr process_context); @@ -78,21 +78,25 @@ class MainCommonBase { Stats::SymbolTablePtr symbol_table_; Stats::AllocatorImpl stats_allocator_; - std::unique_ptr tls_; + ThreadLocal::InstanceImplPtr tls_; std::unique_ptr restarter_; - std::unique_ptr stats_store_; + Stats::ThreadLocalStoreImplPtr stats_store_; std::unique_ptr logging_context_; std::unique_ptr init_manager_{std::make_unique("Server")}; std::unique_ptr server_; private: void configureComponentLogLevels(); + void configureHotRestarter(Random::RandomGenerator& random_generator); }; // TODO(jmarantz): consider removing this class; I think it'd be more useful to // go through MainCommonBase directly. class MainCommon { public: + // Hook to run after a server is created. + using PostServerHook = std::function; + MainCommon(int argc, const char* const* argv); bool run() { return base_.run(); } // Only tests have a legitimate need for this today. @@ -120,6 +124,20 @@ class MainCommon { */ Server::Instance* server() { return base_.server(); } + /** + * Instantiates a MainCommon using default factory implements, parses args, + * and runs an event loop depending on the mode. + * + * Note that MainCommonBase can also be directly instantiated, providing the + * opportunity to override subsystem implementations for custom + * implementations. + * + * @param argc number of command-line args + * @param argv command-line argument array + * @param hook optional hook to run after a server is created + */ + static int main(int argc, char** argv, PostServerHook hook = nullptr); + private: #ifdef ENVOY_HANDLE_SIGNALS Envoy::SignalAction handle_sigs_; diff --git a/source/exe/platform_impl.h b/source/exe/platform_impl.h index 4c05dff225841..6e1ec22ce872a 100644 --- a/source/exe/platform_impl.h +++ b/source/exe/platform_impl.h @@ -13,8 +13,8 @@ class PlatformImpl { Filesystem::Instance& fileSystem() { return *file_system_; } private: - std::unique_ptr thread_factory_; - std::unique_ptr file_system_; + Thread::ThreadFactoryPtr thread_factory_; + Filesystem::InstancePtr file_system_; }; } // namespace Envoy diff --git a/source/extensions/access_loggers/BUILD b/source/extensions/access_loggers/BUILD index 6156949edef64..40a5e79b39d3b 100644 --- a/source/extensions/access_loggers/BUILD +++ b/source/extensions/access_loggers/BUILD @@ -1,16 +1,18 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "well_known_names", hdrs = ["well_known_names.h"], + # well known names files are public as long as they exist. + visibility = ["//visibility:public"], deps = [ "//source/common/singleton:const_singleton", ], diff --git a/source/extensions/access_loggers/common/BUILD b/source/extensions/access_loggers/common/BUILD index daa8a198e578d..1afb1f270a429 100644 --- a/source/extensions/access_loggers/common/BUILD +++ b/source/extensions/access_loggers/common/BUILD @@ -1,14 +1,14 @@ -licenses(["notice"]) # Apache 2 - -# Base class for implementations of AccessLog::Instance. - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# Base class for implementations of AccessLog::Instance. + +envoy_extension_package() envoy_cc_library( name = "access_log_base", diff --git a/source/extensions/access_loggers/common/access_log_base.cc b/source/extensions/access_loggers/common/access_log_base.cc index 99c3fa9e12f51..0323013d63504 100644 --- a/source/extensions/access_loggers/common/access_log_base.cc +++ b/source/extensions/access_loggers/common/access_log_base.cc @@ -12,17 +12,14 @@ void ImplBase::log(const Http::RequestHeaderMap* request_headers, const Http::ResponseHeaderMap* response_headers, const Http::ResponseTrailerMap* response_trailers, const StreamInfo::StreamInfo& stream_info) { - ConstSingleton empty_request_headers; - ConstSingleton empty_response_headers; - ConstSingleton empty_response_trailers; if (!request_headers) { - request_headers = &empty_request_headers.get(); + request_headers = Http::StaticEmptyHeaders::get().request_headers.get(); } if (!response_headers) { - response_headers = &empty_response_headers.get(); + response_headers = Http::StaticEmptyHeaders::get().response_headers.get(); } if (!response_trailers) { - response_trailers = &empty_response_trailers.get(); + response_trailers = Http::StaticEmptyHeaders::get().response_trailers.get(); } if (filter_ && !filter_->evaluate(stream_info, *request_headers, *response_headers, *response_trailers)) { diff --git a/source/extensions/access_loggers/common/access_log_base.h b/source/extensions/access_loggers/common/access_log_base.h index 75f3237a434cb..4fc1aae87d6c2 100644 --- a/source/extensions/access_loggers/common/access_log_base.h +++ b/source/extensions/access_loggers/common/access_log_base.h @@ -2,7 +2,6 @@ #include #include -#include #include #include "envoy/access_log/access_log.h" diff --git a/source/extensions/access_loggers/file/BUILD b/source/extensions/access_loggers/file/BUILD index 30d0af49e2fe6..b95be9f7228cc 100644 --- a/source/extensions/access_loggers/file/BUILD +++ b/source/extensions/access_loggers/file/BUILD @@ -1,21 +1,23 @@ -licenses(["notice"]) # Apache 2 - -# Access log implementation that writes to a file. -# Public docs: docs/root/configuration/access_log.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# Access log implementation that writes to a file. +# Public docs: docs/root/configuration/access_log.rst + +envoy_extension_package() envoy_cc_library( name = "file_access_log_lib", srcs = ["file_access_log_impl.cc"], hdrs = ["file_access_log_impl.h"], + # The file based access logger is core code. + visibility = ["//visibility:public"], deps = [ "//source/extensions/access_loggers/common:access_log_base", ], @@ -26,11 +28,15 @@ envoy_cc_extension( srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream", + # TODO(#9953) determine if this is core or should be cleaned up. + visibility = [ + "//:extension_config", + "//test:__subpackages__", + ], deps = [ ":file_access_log_lib", "//include/envoy/registry", - "//include/envoy/server:access_log_config_interface", - "//source/common/access_log:access_log_formatter_lib", + "//source/common/formatter:substitution_format_string_lib", "//source/common/protobuf", "//source/extensions/access_loggers:well_known_names", "@envoy_api//envoy/extensions/access_loggers/file/v3:pkg_cc_proto", diff --git a/source/extensions/access_loggers/file/config.cc b/source/extensions/access_loggers/file/config.cc index dca4268868407..a3e817c71058d 100644 --- a/source/extensions/access_loggers/file/config.cc +++ b/source/extensions/access_loggers/file/config.cc @@ -1,15 +1,15 @@ #include "extensions/access_loggers/file/config.h" #include -#include #include "envoy/extensions/access_loggers/file/v3/file.pb.h" #include "envoy/extensions/access_loggers/file/v3/file.pb.validate.h" #include "envoy/registry/registry.h" #include "envoy/server/filter_config.h" -#include "common/access_log/access_log_formatter.h" #include "common/common/logger.h" +#include "common/formatter/substitution_format_string.h" +#include "common/formatter/substitution_formatter.h" #include "common/protobuf/protobuf.h" #include "extensions/access_loggers/file/file_access_log_impl.h" @@ -27,32 +27,36 @@ FileAccessLogFactory::createAccessLogInstance(const Protobuf::Message& config, const auto& fal_config = MessageUtil::downcastAndValidate< const envoy::extensions::access_loggers::file::v3::FileAccessLog&>( config, context.messageValidationVisitor()); - AccessLog::FormatterPtr formatter; + Formatter::FormatterPtr formatter; - if (fal_config.access_log_format_case() == envoy::extensions::access_loggers::file::v3:: - FileAccessLog::AccessLogFormatCase::kFormat || - fal_config.access_log_format_case() == - envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase:: - ACCESS_LOG_FORMAT_NOT_SET) { + switch (fal_config.access_log_format_case()) { + case envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase::kFormat: if (fal_config.format().empty()) { - formatter = AccessLog::AccessLogFormatUtils::defaultAccessLogFormatter(); + formatter = Formatter::SubstitutionFormatUtils::defaultSubstitutionFormatter(); } else { - formatter = std::make_unique(fal_config.format()); + envoy::config::core::v3::SubstitutionFormatString sff_config; + sff_config.set_text_format(fal_config.format()); + formatter = Formatter::SubstitutionFormatStringUtils::fromProtoConfig(sff_config); } - } else if (fal_config.access_log_format_case() == - envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase:: - kJsonFormat) { - auto json_format_map = this->convertJsonFormatToMap(fal_config.json_format()); - formatter = std::make_unique(json_format_map, false); - } else if (fal_config.access_log_format_case() == - envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase:: - kTypedJsonFormat) { - auto json_format_map = this->convertJsonFormatToMap(fal_config.typed_json_format()); - formatter = std::make_unique(json_format_map, true); - } else { - throw EnvoyException( - "Invalid access_log format provided. Only 'format', 'json_format', or 'typed_json_format' " - "are supported."); + break; + case envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase::kJsonFormat: + formatter = Formatter::SubstitutionFormatStringUtils::createJsonFormatter( + fal_config.json_format(), false); + break; + case envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase:: + kTypedJsonFormat: { + envoy::config::core::v3::SubstitutionFormatString sff_config; + *sff_config.mutable_json_format() = fal_config.typed_json_format(); + formatter = Formatter::SubstitutionFormatStringUtils::fromProtoConfig(sff_config); + break; + } + case envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase::kLogFormat: + formatter = Formatter::SubstitutionFormatStringUtils::fromProtoConfig(fal_config.log_format()); + break; + case envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase:: + ACCESS_LOG_FORMAT_NOT_SET: + formatter = Formatter::SubstitutionFormatUtils::defaultSubstitutionFormatter(); + break; } return std::make_shared(fal_config.path(), std::move(filter), std::move(formatter), @@ -66,18 +70,6 @@ ProtobufTypes::MessagePtr FileAccessLogFactory::createEmptyConfigProto() { std::string FileAccessLogFactory::name() const { return AccessLogNames::get().File; } -std::unordered_map -FileAccessLogFactory::convertJsonFormatToMap(ProtobufWkt::Struct json_format) { - std::unordered_map output; - for (const auto& pair : json_format.fields()) { - if (pair.second.kind_case() != ProtobufWkt::Value::kStringValue) { - throw EnvoyException("Only string values are supported in the JSON access log format."); - } - output.emplace(pair.first, pair.second.string_value()); - } - return output; -} - /** * Static registration for the file access log. @see RegisterFactory. */ diff --git a/source/extensions/access_loggers/file/config.h b/source/extensions/access_loggers/file/config.h index 7f3976adfc8be..d3ebf58c352f8 100644 --- a/source/extensions/access_loggers/file/config.h +++ b/source/extensions/access_loggers/file/config.h @@ -19,9 +19,6 @@ class FileAccessLogFactory : public Server::Configuration::AccessLogInstanceFact ProtobufTypes::MessagePtr createEmptyConfigProto() override; std::string name() const override; - -private: - std::unordered_map convertJsonFormatToMap(ProtobufWkt::Struct config); }; } // namespace File diff --git a/source/extensions/access_loggers/file/file_access_log_impl.cc b/source/extensions/access_loggers/file/file_access_log_impl.cc index 9ccbf36a7ee08..4d571251b859b 100644 --- a/source/extensions/access_loggers/file/file_access_log_impl.cc +++ b/source/extensions/access_loggers/file/file_access_log_impl.cc @@ -6,7 +6,7 @@ namespace AccessLoggers { namespace File { FileAccessLog::FileAccessLog(const std::string& access_log_path, AccessLog::FilterPtr&& filter, - AccessLog::FormatterPtr&& formatter, + Formatter::FormatterPtr&& formatter, AccessLog::AccessLogManager& log_manager) : ImplBase(std::move(filter)), formatter_(std::move(formatter)) { log_file_ = log_manager.createAccessLog(access_log_path); @@ -16,8 +16,8 @@ void FileAccessLog::emitLog(const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, const Http::ResponseTrailerMap& response_trailers, const StreamInfo::StreamInfo& stream_info) { - log_file_->write( - formatter_->format(request_headers, response_headers, response_trailers, stream_info)); + log_file_->write(formatter_->format(request_headers, response_headers, response_trailers, + stream_info, absl::string_view())); } } // namespace File diff --git a/source/extensions/access_loggers/file/file_access_log_impl.h b/source/extensions/access_loggers/file/file_access_log_impl.h index ce278ed776af2..3cd195c44d1aa 100644 --- a/source/extensions/access_loggers/file/file_access_log_impl.h +++ b/source/extensions/access_loggers/file/file_access_log_impl.h @@ -1,5 +1,7 @@ #pragma once +#include "common/formatter/substitution_formatter.h" + #include "extensions/access_loggers/common/access_log_base.h" namespace Envoy { @@ -13,7 +15,7 @@ namespace File { class FileAccessLog : public Common::ImplBase { public: FileAccessLog(const std::string& access_log_path, AccessLog::FilterPtr&& filter, - AccessLog::FormatterPtr&& formatter, AccessLog::AccessLogManager& log_manager); + Formatter::FormatterPtr&& formatter, AccessLog::AccessLogManager& log_manager); private: // Common::ImplBase @@ -23,7 +25,7 @@ class FileAccessLog : public Common::ImplBase { const StreamInfo::StreamInfo& stream_info) override; AccessLog::AccessLogFileSharedPtr log_file_; - AccessLog::FormatterPtr formatter_; + Formatter::FormatterPtr formatter_; }; } // namespace File diff --git a/source/extensions/access_loggers/grpc/BUILD b/source/extensions/access_loggers/grpc/BUILD index f487c0d3688ad..94683341a2f70 100644 --- a/source/extensions/access_loggers/grpc/BUILD +++ b/source/extensions/access_loggers/grpc/BUILD @@ -1,16 +1,16 @@ -licenses(["notice"]) # Apache 2 - -# Access log implementation that writes to a gRPC service. -# Public docs: TODO(rodaine): Docs needed. - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# Access log implementation that writes to a gRPC service. +# Public docs: TODO(rodaine): Docs needed. + +envoy_extension_package() envoy_cc_library( name = "config_utils", @@ -36,6 +36,7 @@ envoy_cc_library( "//include/envoy/upstream:upstream_interface", "//source/common/grpc:async_client_lib", "//source/common/grpc:typed_async_client_lib", + "//source/common/runtime:runtime_features_lib", "//source/extensions/access_loggers/common:access_log_base", "@envoy_api//envoy/data/accesslog/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/access_loggers/grpc/v3:pkg_cc_proto", @@ -97,6 +98,12 @@ envoy_cc_extension( srcs = ["http_config.cc"], hdrs = ["http_config.h"], security_posture = "robust_to_untrusted_downstream", + # TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/common/access_log:__subpackages__", + "//test/integration:__subpackages__", + ], deps = [ ":config_utils", "//include/envoy/server:access_log_config_interface", @@ -114,6 +121,12 @@ envoy_cc_extension( srcs = ["tcp_config.cc"], hdrs = ["tcp_config.h"], security_posture = "robust_to_untrusted_downstream", + # TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/common/access_log:__subpackages__", + "//test/integration:__subpackages__", + ], deps = [ ":config_utils", "//include/envoy/server:access_log_config_interface", diff --git a/source/extensions/access_loggers/grpc/config_utils.cc b/source/extensions/access_loggers/grpc/config_utils.cc index e950aea731fbb..aa59f2f28018c 100644 --- a/source/extensions/access_loggers/grpc/config_utils.cc +++ b/source/extensions/access_loggers/grpc/config_utils.cc @@ -10,7 +10,7 @@ namespace GrpcCommon { // Singleton registration via macro defined in envoy/singleton/manager.h SINGLETON_MANAGER_REGISTRATION(grpc_access_logger_cache); -std::shared_ptr +GrpcCommon::GrpcAccessLoggerCacheSharedPtr getGrpcAccessLoggerCacheSingleton(Server::Configuration::FactoryContext& context) { return context.singletonManager().getTyped( SINGLETON_MANAGER_REGISTERED_NAME(grpc_access_logger_cache), [&context] { diff --git a/source/extensions/access_loggers/grpc/grpc_access_log_impl.cc b/source/extensions/access_loggers/grpc/grpc_access_log_impl.cc index 86a4a50bcbf8a..21c1ce123e0cf 100644 --- a/source/extensions/access_loggers/grpc/grpc_access_log_impl.cc +++ b/source/extensions/access_loggers/grpc/grpc_access_log_impl.cc @@ -5,7 +5,9 @@ #include "envoy/upstream/upstream.h" #include "common/common/assert.h" +#include "common/grpc/typed_async_client.h" #include "common/network/utility.h" +#include "common/runtime/runtime_features.h" #include "common/stream_info/utility.h" namespace Envoy { @@ -23,25 +25,53 @@ void GrpcAccessLoggerImpl::LocalStream::onRemoteClose(Grpc::Status::GrpcStatus, } } -GrpcAccessLoggerImpl::GrpcAccessLoggerImpl(Grpc::RawAsyncClientPtr&& client, std::string log_name, - std::chrono::milliseconds buffer_flush_interval_msec, - uint64_t buffer_size_bytes, - Event::Dispatcher& dispatcher, - const LocalInfo::LocalInfo& local_info) - : client_(std::move(client)), log_name_(log_name), +GrpcAccessLoggerImpl::GrpcAccessLoggerImpl( + Grpc::RawAsyncClientPtr&& client, std::string log_name, + std::chrono::milliseconds buffer_flush_interval_msec, uint64_t max_buffer_size_bytes, + Event::Dispatcher& dispatcher, const LocalInfo::LocalInfo& local_info, Stats::Scope& scope, + envoy::config::core::v3::ApiVersion transport_api_version) + : stats_({ALL_GRPC_ACCESS_LOGGER_STATS( + POOL_COUNTER_PREFIX(scope, "access_logs.grpc_access_log."))}), + client_(std::move(client)), log_name_(log_name), buffer_flush_interval_msec_(buffer_flush_interval_msec), flush_timer_(dispatcher.createTimer([this]() { flush(); flush_timer_->enableTimer(buffer_flush_interval_msec_); })), - buffer_size_bytes_(buffer_size_bytes), local_info_(local_info) { + max_buffer_size_bytes_(max_buffer_size_bytes), local_info_(local_info), + service_method_( + Grpc::VersionedMethods("envoy.service.accesslog.v3.AccessLogService.StreamAccessLogs", + "envoy.service.accesslog.v2.AccessLogService.StreamAccessLogs") + .getMethodDescriptorForVersion(transport_api_version)), + transport_api_version_(transport_api_version) { flush_timer_->enableTimer(buffer_flush_interval_msec_); } +bool GrpcAccessLoggerImpl::canLogMore() { + if (max_buffer_size_bytes_ == 0 || approximate_message_size_bytes_ < max_buffer_size_bytes_) { + stats_.logs_written_.inc(); + return true; + } + flush(); + if (approximate_message_size_bytes_ < max_buffer_size_bytes_) { + stats_.logs_written_.inc(); + return true; + } + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.disallow_unbounded_access_logs")) { + stats_.logs_dropped_.inc(); + return false; + } + stats_.logs_written_.inc(); + return true; +} + void GrpcAccessLoggerImpl::log(envoy::data::accesslog::v3::HTTPAccessLogEntry&& entry) { + if (!canLogMore()) { + return; + } approximate_message_size_bytes_ += entry.ByteSizeLong(); message_.mutable_http_logs()->mutable_log_entry()->Add(std::move(entry)); - if (approximate_message_size_bytes_ >= buffer_size_bytes_) { + if (approximate_message_size_bytes_ >= max_buffer_size_bytes_) { flush(); } } @@ -49,7 +79,7 @@ void GrpcAccessLoggerImpl::log(envoy::data::accesslog::v3::HTTPAccessLogEntry&& void GrpcAccessLoggerImpl::log(envoy::data::accesslog::v3::TCPAccessLogEntry&& entry) { approximate_message_size_bytes_ += entry.ByteSizeLong(); message_.mutable_tcp_logs()->mutable_log_entry()->Add(std::move(entry)); - if (approximate_message_size_bytes_ >= buffer_size_bytes_) { + if (approximate_message_size_bytes_ >= max_buffer_size_bytes_) { flush(); } } @@ -66,9 +96,7 @@ void GrpcAccessLoggerImpl::flush() { if (stream_->stream_ == nullptr) { stream_->stream_ = - client_->start(*Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - "envoy.service.accesslog.v2.AccessLogService.StreamAccessLogs"), - *stream_, Http::AsyncClient::StreamOptions()); + client_->start(service_method_, *stream_, Http::AsyncClient::StreamOptions()); auto* identifier = message_.mutable_identifier(); *identifier->mutable_node() = local_info_.node(); @@ -76,7 +104,10 @@ void GrpcAccessLoggerImpl::flush() { } if (stream_->stream_ != nullptr) { - stream_->stream_->sendMessage(message_, false); + if (stream_->stream_->isAboveWriteBufferHighWatermark()) { + return; + } + stream_->stream_->sendMessage(message_, transport_api_version_, false); } else { // Clear out the stream data due to stream creation failure. stream_.reset(); @@ -99,7 +130,7 @@ GrpcAccessLoggerCacheImpl::GrpcAccessLoggerCacheImpl(Grpc::AsyncClientManager& a GrpcAccessLoggerSharedPtr GrpcAccessLoggerCacheImpl::getOrCreateLogger( const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, - GrpcAccessLoggerType logger_type) { + GrpcAccessLoggerType logger_type, Stats::Scope& scope) { // TODO(euroelessar): Consider cleaning up loggers. auto& cache = tls_slot_->getTyped(); const auto cache_key = std::make_pair(MessageUtil::hash(config), logger_type); @@ -113,7 +144,7 @@ GrpcAccessLoggerSharedPtr GrpcAccessLoggerCacheImpl::getOrCreateLogger( factory->create(), config.log_name(), std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(config, buffer_flush_interval, 1000)), PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, buffer_size_bytes, 16384), cache.dispatcher_, - local_info_); + local_info_, scope, config.transport_api_version()); cache.access_loggers_.emplace(cache_key, logger); return logger; } diff --git a/source/extensions/access_loggers/grpc/grpc_access_log_impl.h b/source/extensions/access_loggers/grpc/grpc_access_log_impl.h index 4dd4b59485868..2fe0d112d6f63 100644 --- a/source/extensions/access_loggers/grpc/grpc_access_log_impl.h +++ b/source/extensions/access_loggers/grpc/grpc_access_log_impl.h @@ -1,6 +1,6 @@ #pragma once -#include +#include #include #include "envoy/data/accesslog/v3/accesslog.pb.h" @@ -21,7 +21,19 @@ namespace Extensions { namespace AccessLoggers { namespace GrpcCommon { -// TODO(mattklein123): Stats +/** + * All stats for the grpc access logger. @see stats_macros.h + */ +#define ALL_GRPC_ACCESS_LOGGER_STATS(COUNTER) \ + COUNTER(logs_written) \ + COUNTER(logs_dropped) + +/** + * Wrapper struct for the access log stats. @see stats_macros.h + */ +struct GrpcAccessLoggerStats { + ALL_GRPC_ACCESS_LOGGER_STATS(GENERATE_COUNTER_STRUCT) +}; /** * Interface for an access logger. The logger provides abstraction on top of gRPC stream, deals with @@ -63,7 +75,7 @@ class GrpcAccessLoggerCache { */ virtual GrpcAccessLoggerSharedPtr getOrCreateLogger( const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, - GrpcAccessLoggerType logger_type) PURE; + GrpcAccessLoggerType logger_type, Stats::Scope& scope) PURE; }; using GrpcAccessLoggerCacheSharedPtr = std::shared_ptr; @@ -72,8 +84,9 @@ class GrpcAccessLoggerImpl : public GrpcAccessLogger { public: GrpcAccessLoggerImpl(Grpc::RawAsyncClientPtr&& client, std::string log_name, std::chrono::milliseconds buffer_flush_interval_msec, - uint64_t buffer_size_bytes, Event::Dispatcher& dispatcher, - const LocalInfo::LocalInfo& local_info); + uint64_t max_buffer_size_bytes, Event::Dispatcher& dispatcher, + const LocalInfo::LocalInfo& local_info, Stats::Scope& scope, + envoy::config::core::v3::ApiVersion transport_api_version); // Extensions::AccessLoggers::GrpcCommon::GrpcAccessLogger void log(envoy::data::accesslog::v3::HTTPAccessLogEntry&& entry) override; @@ -98,19 +111,26 @@ class GrpcAccessLoggerImpl : public GrpcAccessLogger { void flush(); + bool canLogMore(); + + GrpcAccessLoggerStats stats_; Grpc::AsyncClient client_; const std::string log_name_; const std::chrono::milliseconds buffer_flush_interval_msec_; const Event::TimerPtr flush_timer_; - const uint64_t buffer_size_bytes_; + const uint64_t max_buffer_size_bytes_; uint64_t approximate_message_size_bytes_ = 0; envoy::service::accesslog::v3::StreamAccessLogsMessage message_; absl::optional stream_; const LocalInfo::LocalInfo& local_info_; + const Protobuf::MethodDescriptor& service_method_; + const envoy::config::core::v3::ApiVersion transport_api_version_; }; +using GrpcAccessLoggerImplPtr = std::unique_ptr; + class GrpcAccessLoggerCacheImpl : public Singleton::Instance, public GrpcAccessLoggerCache { public: GrpcAccessLoggerCacheImpl(Grpc::AsyncClientManager& async_client_manager, Stats::Scope& scope, @@ -119,7 +139,7 @@ class GrpcAccessLoggerCacheImpl : public Singleton::Instance, public GrpcAccessL GrpcAccessLoggerSharedPtr getOrCreateLogger( const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, - GrpcAccessLoggerType logger_type) override; + GrpcAccessLoggerType logger_type, Stats::Scope& scope) override; private: /** @@ -140,6 +160,8 @@ class GrpcAccessLoggerCacheImpl : public Singleton::Instance, public GrpcAccessL const LocalInfo::LocalInfo& local_info_; }; +using GrpcAccessLoggerCacheImplPtr = std::unique_ptr; + } // namespace GrpcCommon } // namespace AccessLoggers } // namespace Extensions diff --git a/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc b/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc index 1d187bc299852..74b061cbad7c0 100644 --- a/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc +++ b/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc @@ -37,7 +37,7 @@ void Utility::responseFlagsToAccessLogResponseFlags( envoy::data::accesslog::v3::AccessLogCommon& common_access_log, const StreamInfo::StreamInfo& stream_info) { - static_assert(StreamInfo::ResponseFlag::LastFlag == 0x40000, + static_assert(StreamInfo::ResponseFlag::LastFlag == 0x200000, "A flag has been added. Fix this code."); if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::FailedLocalHealthCheck)) { @@ -116,6 +116,15 @@ void Utility::responseFlagsToAccessLogResponseFlags( if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::DownstreamProtocolError)) { common_access_log.mutable_response_flags()->set_downstream_protocol_error(true); } + if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::UpstreamMaxStreamDurationReached)) { + common_access_log.mutable_response_flags()->set_upstream_max_stream_duration_reached(true); + } + if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::ResponseFromCacheFilter)) { + common_access_log.mutable_response_flags()->set_response_from_cache_filter(true); + } + if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::NoFilterConfigFound)) { + common_access_log.mutable_response_flags()->set_no_filter_config_found(true); + } } void Utility::extractCommonAccessLogProperties( diff --git a/source/extensions/access_loggers/grpc/http_config.cc b/source/extensions/access_loggers/grpc/http_config.cc index 6655c9f615153..830ba54b2d0ef 100644 --- a/source/extensions/access_loggers/grpc/http_config.cc +++ b/source/extensions/access_loggers/grpc/http_config.cc @@ -30,9 +30,9 @@ HttpGrpcAccessLogFactory::createAccessLogInstance(const Protobuf::Message& confi const envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig&>( config, context.messageValidationVisitor()); - return std::make_shared( - std::move(filter), proto_config, context.threadLocal(), - GrpcCommon::getGrpcAccessLoggerCacheSingleton(context)); + return std::make_shared(std::move(filter), proto_config, context.threadLocal(), + GrpcCommon::getGrpcAccessLoggerCacheSingleton(context), + context.scope()); } ProtobufTypes::MessagePtr HttpGrpcAccessLogFactory::createEmptyConfigProto() { diff --git a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc index 6f07a77a44ecb..92bc5e38ee734 100644 --- a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc +++ b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc @@ -5,6 +5,7 @@ #include "envoy/extensions/access_loggers/grpc/v3/als.pb.h" #include "common/common/assert.h" +#include "common/http/headers.h" #include "common/network/utility.h" #include "common/stream_info/utility.h" @@ -15,6 +16,9 @@ namespace Extensions { namespace AccessLoggers { namespace HttpGrpc { +Http::RegisterCustomInlineHeader + referer_handle(Http::CustomHeaders::get().Referer); + HttpGrpcAccessLog::ThreadLocalLogger::ThreadLocalLogger( GrpcCommon::GrpcAccessLoggerSharedPtr logger) : logger_(std::move(logger)) {} @@ -22,8 +26,9 @@ HttpGrpcAccessLog::ThreadLocalLogger::ThreadLocalLogger( HttpGrpcAccessLog::HttpGrpcAccessLog( AccessLog::FilterPtr&& filter, envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig config, - ThreadLocal::SlotAllocator& tls, GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache) - : Common::ImplBase(std::move(filter)), config_(std::move(config)), + ThreadLocal::SlotAllocator& tls, GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache, + Stats::Scope& scope) + : Common::ImplBase(std::move(filter)), scope_(scope), config_(std::move(config)), tls_slot_(tls.allocateSlot()), access_logger_cache_(std::move(access_logger_cache)) { for (const auto& header : config_.additional_request_headers_to_log()) { request_headers_to_log_.emplace_back(header); @@ -39,7 +44,7 @@ HttpGrpcAccessLog::HttpGrpcAccessLog( tls_slot_->set([this](Event::Dispatcher&) { return std::make_shared(access_logger_cache_->getOrCreateLogger( - config_.common_config(), GrpcCommon::GrpcAccessLoggerType::HTTP)); + config_.common_config(), GrpcCommon::GrpcAccessLoggerType::HTTP, scope_)); }); } @@ -74,40 +79,36 @@ void HttpGrpcAccessLog::emitLog(const Http::RequestHeaderMap& request_headers, // TODO(mattklein123): Populate port field. auto* request_properties = log_entry.mutable_request(); if (request_headers.Scheme() != nullptr) { - request_properties->set_scheme(std::string(request_headers.Scheme()->value().getStringView())); + request_properties->set_scheme(std::string(request_headers.getSchemeValue())); } if (request_headers.Host() != nullptr) { - request_properties->set_authority(std::string(request_headers.Host()->value().getStringView())); + request_properties->set_authority(std::string(request_headers.getHostValue())); } if (request_headers.Path() != nullptr) { - request_properties->set_path(std::string(request_headers.Path()->value().getStringView())); + request_properties->set_path(std::string(request_headers.getPathValue())); } if (request_headers.UserAgent() != nullptr) { - request_properties->set_user_agent( - std::string(request_headers.UserAgent()->value().getStringView())); + request_properties->set_user_agent(std::string(request_headers.getUserAgentValue())); } - if (request_headers.Referer() != nullptr) { + if (request_headers.getInline(referer_handle.handle()) != nullptr) { request_properties->set_referer( - std::string(request_headers.Referer()->value().getStringView())); + std::string(request_headers.getInlineValue(referer_handle.handle()))); } if (request_headers.ForwardedFor() != nullptr) { - request_properties->set_forwarded_for( - std::string(request_headers.ForwardedFor()->value().getStringView())); + request_properties->set_forwarded_for(std::string(request_headers.getForwardedForValue())); } if (request_headers.RequestId() != nullptr) { - request_properties->set_request_id( - std::string(request_headers.RequestId()->value().getStringView())); + request_properties->set_request_id(std::string(request_headers.getRequestIdValue())); } if (request_headers.EnvoyOriginalPath() != nullptr) { - request_properties->set_original_path( - std::string(request_headers.EnvoyOriginalPath()->value().getStringView())); + request_properties->set_original_path(std::string(request_headers.getEnvoyOriginalPathValue())); } request_properties->set_request_headers_bytes(request_headers.byteSize()); request_properties->set_request_body_bytes(stream_info.bytesReceived()); if (request_headers.Method() != nullptr) { envoy::config::core::v3::RequestMethod method = envoy::config::core::v3::METHOD_UNSPECIFIED; - envoy::config::core::v3::RequestMethod_Parse( - std::string(request_headers.Method()->value().getStringView()), &method); + envoy::config::core::v3::RequestMethod_Parse(std::string(request_headers.getMethodValue()), + &method); request_properties->set_request_method(method); } if (!request_headers_to_log_.empty()) { diff --git a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.h b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.h index 7d15bbb7dbae1..fcae58bd5f105 100644 --- a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.h +++ b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.h @@ -1,6 +1,6 @@ #pragma once -#include +#include #include #include "envoy/extensions/access_loggers/grpc/v3/als.pb.h" @@ -30,7 +30,8 @@ class HttpGrpcAccessLog : public Common::ImplBase { HttpGrpcAccessLog(AccessLog::FilterPtr&& filter, envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig config, ThreadLocal::SlotAllocator& tls, - GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache); + GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache, + Stats::Scope& scope); private: /** @@ -48,6 +49,7 @@ class HttpGrpcAccessLog : public Common::ImplBase { const Http::ResponseTrailerMap& response_trailers, const StreamInfo::StreamInfo& stream_info) override; + Stats::Scope& scope_; const envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig config_; const ThreadLocal::SlotPtr tls_slot_; const GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache_; @@ -57,6 +59,8 @@ class HttpGrpcAccessLog : public Common::ImplBase { std::vector filter_states_to_log_; }; +using HttpGrpcAccessLogPtr = std::unique_ptr; + } // namespace HttpGrpc } // namespace AccessLoggers } // namespace Extensions diff --git a/source/extensions/access_loggers/grpc/tcp_config.cc b/source/extensions/access_loggers/grpc/tcp_config.cc index 80d985dce2d60..268a4653a35b4 100644 --- a/source/extensions/access_loggers/grpc/tcp_config.cc +++ b/source/extensions/access_loggers/grpc/tcp_config.cc @@ -31,7 +31,8 @@ TcpGrpcAccessLogFactory::createAccessLogInstance(const Protobuf::Message& config config, context.messageValidationVisitor()); return std::make_shared(std::move(filter), proto_config, context.threadLocal(), - GrpcCommon::getGrpcAccessLoggerCacheSingleton(context)); + GrpcCommon::getGrpcAccessLoggerCacheSingleton(context), + context.scope()); } ProtobufTypes::MessagePtr TcpGrpcAccessLogFactory::createEmptyConfigProto() { diff --git a/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.cc b/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.cc index 2ff4f524e4472..a77d182b03b48 100644 --- a/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.cc +++ b/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.cc @@ -20,12 +20,13 @@ TcpGrpcAccessLog::ThreadLocalLogger::ThreadLocalLogger(GrpcCommon::GrpcAccessLog TcpGrpcAccessLog::TcpGrpcAccessLog( AccessLog::FilterPtr&& filter, envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig config, - ThreadLocal::SlotAllocator& tls, GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache) - : Common::ImplBase(std::move(filter)), config_(std::move(config)), + ThreadLocal::SlotAllocator& tls, GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache, + Stats::Scope& scope) + : Common::ImplBase(std::move(filter)), scope_(scope), config_(std::move(config)), tls_slot_(tls.allocateSlot()), access_logger_cache_(std::move(access_logger_cache)) { tls_slot_->set([this](Event::Dispatcher&) { return std::make_shared(access_logger_cache_->getOrCreateLogger( - config_.common_config(), GrpcCommon::GrpcAccessLoggerType::TCP)); + config_.common_config(), GrpcCommon::GrpcAccessLoggerType::TCP, scope_)); }); } diff --git a/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.h b/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.h index 8f9369da75863..cf424bb92a3df 100644 --- a/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.h +++ b/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.h @@ -1,6 +1,5 @@ #pragma once -#include #include #include "envoy/extensions/access_loggers/grpc/v3/als.pb.h" @@ -30,7 +29,8 @@ class TcpGrpcAccessLog : public Common::ImplBase { TcpGrpcAccessLog(AccessLog::FilterPtr&& filter, envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig config, ThreadLocal::SlotAllocator& tls, - GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache); + GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache, + Stats::Scope& scope); private: /** @@ -48,6 +48,7 @@ class TcpGrpcAccessLog : public Common::ImplBase { const Http::ResponseTrailerMap& response_trailers, const StreamInfo::StreamInfo& stream_info) override; + Stats::Scope& scope_; const envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig config_; const ThreadLocal::SlotPtr tls_slot_; const GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache_; diff --git a/source/extensions/all_extensions.bzl b/source/extensions/all_extensions.bzl index 95f018c44973c..5fde35e3c92bb 100644 --- a/source/extensions/all_extensions.bzl +++ b/source/extensions/all_extensions.bzl @@ -9,8 +9,44 @@ _required_extensions = { } # Return all extensions to be compiled into Envoy. -def envoy_all_extensions(blacklist = []): +def envoy_all_extensions(denylist = []): all_extensions = dicts.add(_required_extensions, EXTENSIONS) # These extensions can be removed on a site specific basis. - return [v for k, v in all_extensions.items() if not k in blacklist] + return [v for k, v in all_extensions.items() if not k in denylist] + +# Core extensions needed to run Envoy's integration tests. +_core_extensions = [ + "envoy.access_loggers.file", + "envoy.filters.http.router", + "envoy.filters.http.health_check", + "envoy.filters.network.http_connection_manager", + "envoy.stat_sinks.statsd", + "envoy.transport_sockets.raw_buffer", +] + +# Return all core extensions to be compiled into Envoy. +def envoy_all_core_extensions(): + all_extensions = dicts.add(_required_extensions, EXTENSIONS) + + # These extensions can be removed on a site specific basis. + return [v for k, v in all_extensions.items() if k in _core_extensions] + +_http_filter_prefix = "envoy.filters.http" + +def envoy_all_http_filters(): + all_extensions = dicts.add(_required_extensions, EXTENSIONS) + + return [v for k, v in all_extensions.items() if k.startswith(_http_filter_prefix)] + +# All network-layer filters are extensions with names that have the following prefix. +_network_filter_prefix = "envoy.filters.network" + +# All thrift filters are extensions with names that have the following prefix. +_thrift_filter_prefix = "envoy.filters.thrift" + +# Return all network-layer filter extensions to be compiled into network-layer filter generic fuzzer. +def envoy_all_network_filters(): + all_extensions = dicts.add(_required_extensions, EXTENSIONS) + + return [v for k, v in all_extensions.items() if k.startswith(_network_filter_prefix) or k.startswith(_thrift_filter_prefix)] diff --git a/source/extensions/clusters/BUILD b/source/extensions/clusters/BUILD index 7a4780afbdab2..46709ec0c238c 100644 --- a/source/extensions/clusters/BUILD +++ b/source/extensions/clusters/BUILD @@ -1,16 +1,18 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "well_known_names", hdrs = ["well_known_names.h"], + # well known names files are public as long as they exist. + visibility = ["//visibility:public"], deps = [ "//source/common/config:well_known_names", "//source/common/singleton:const_singleton", diff --git a/source/extensions/clusters/aggregate/BUILD b/source/extensions/clusters/aggregate/BUILD index 8dab07320fdf6..d23dd525625af 100644 --- a/source/extensions/clusters/aggregate/BUILD +++ b/source/extensions/clusters/aggregate/BUILD @@ -1,12 +1,12 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_extension( name = "cluster", diff --git a/source/extensions/clusters/aggregate/cluster.cc b/source/extensions/clusters/aggregate/cluster.cc index 5088c6a246585..836ef16627cc7 100644 --- a/source/extensions/clusters/aggregate/cluster.cc +++ b/source/extensions/clusters/aggregate/cluster.cc @@ -4,6 +4,8 @@ #include "envoy/extensions/clusters/aggregate/v3/cluster.pb.h" #include "envoy/extensions/clusters/aggregate/v3/cluster.pb.validate.h" +#include "common/common/assert.h" + namespace Envoy { namespace Extensions { namespace Clusters { @@ -12,7 +14,7 @@ namespace Aggregate { Cluster::Cluster(const envoy::config::cluster::v3::Cluster& cluster, const envoy::extensions::clusters::aggregate::v3::ClusterConfig& config, Upstream::ClusterManager& cluster_manager, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, + Random::RandomGenerator& random, Server::Configuration::TransportSocketFactoryContextImpl& factory_context, Stats::ScopePtr&& stats_scope, ThreadLocal::SlotAllocator& tls, bool added_via_api) : Upstream::ClusterImplBase(cluster, runtime, factory_context, std::move(stats_scope), @@ -20,10 +22,9 @@ Cluster::Cluster(const envoy::config::cluster::v3::Cluster& cluster, cluster_manager_(cluster_manager), runtime_(runtime), random_(random), tls_(tls.allocateSlot()), clusters_(config.clusters().begin(), config.clusters().end()) {} -PriorityContext +PriorityContextPtr Cluster::linearizePrioritySet(const std::function& skip_predicate) { - Upstream::PrioritySetImpl priority_set; - std::vector> priority_to_cluster; + PriorityContextPtr priority_context = std::make_unique(); uint32_t next_priority_after_linearizing = 0; // Linearize the priority set. e.g. for clusters [C_0, C_1, C_2] referred in aggregate cluster @@ -47,16 +48,21 @@ Cluster::linearizePrioritySet(const std::function& ski uint32_t priority_in_current_cluster = 0; for (const auto& host_set : tlc->prioritySet().hostSetsPerPriority()) { if (!host_set->hosts().empty()) { - priority_set.updateHosts( - next_priority_after_linearizing++, Upstream::HostSetImpl::updateHostsParams(*host_set), + priority_context->priority_set_.updateHosts( + next_priority_after_linearizing, Upstream::HostSetImpl::updateHostsParams(*host_set), host_set->localityWeights(), host_set->hosts(), {}, host_set->overprovisioningFactor()); - priority_to_cluster.emplace_back(std::make_pair(priority_in_current_cluster, tlc)); + priority_context->priority_to_cluster_.emplace_back( + std::make_pair(priority_in_current_cluster, tlc)); + + priority_context->cluster_and_priority_to_linearized_priority_[std::make_pair( + cluster, priority_in_current_cluster)] = next_priority_after_linearizing; + next_priority_after_linearizing++; } priority_in_current_cluster++; } } - return std::make_pair(std::move(priority_set), std::move(priority_to_cluster)); + return priority_context; } void Cluster::startPreInit() { @@ -85,10 +91,11 @@ void Cluster::startPreInit() { void Cluster::refresh(const std::function& skip_predicate) { // Post the priority set to worker threads. tls_->runOnAllThreads([this, skip_predicate, cluster_name = this->info()->name()]() { - PriorityContext priority_set = linearizePrioritySet(skip_predicate); + PriorityContextPtr priority_context = linearizePrioritySet(skip_predicate); Upstream::ThreadLocalCluster* cluster = cluster_manager_.get(cluster_name); ASSERT(cluster != nullptr); - dynamic_cast(cluster->loadBalancer()).refresh(priority_set); + dynamic_cast(cluster->loadBalancer()) + .refresh(std::move(priority_context)); }); } @@ -113,15 +120,41 @@ void Cluster::onClusterRemoval(const std::string& cluster_name) { } } +absl::optional AggregateClusterLoadBalancer::LoadBalancerImpl::hostToLinearizedPriority( + const Upstream::HostDescription& host) const { + auto it = priority_context_.cluster_and_priority_to_linearized_priority_.find( + std::make_pair(host.cluster().name(), host.priority())); + + if (it != priority_context_.cluster_and_priority_to_linearized_priority_.end()) { + return it->second; + } else { + // The HostSet can change due to CDS/EDS updates between retries. + return absl::nullopt; + } +} + Upstream::HostConstSharedPtr AggregateClusterLoadBalancer::LoadBalancerImpl::chooseHost(Upstream::LoadBalancerContext* context) { + const Upstream::HealthyAndDegradedLoad* priority_loads = nullptr; + if (context != nullptr) { + priority_loads = &context->determinePriorityLoad( + priority_set_, per_priority_load_, + [this](const auto& host) { return hostToLinearizedPriority(host); }); + } else { + priority_loads = &per_priority_load_; + } + const auto priority_pair = - choosePriority(random_.random(), per_priority_load_.healthy_priority_load_, - per_priority_load_.degraded_priority_load_); - AggregateLoadBalancerContext aggregate_context(context, priority_pair.second, - priority_to_cluster_[priority_pair.first].first); - return priority_to_cluster_[priority_pair.first].second->loadBalancer().chooseHost( - &aggregate_context); + choosePriority(random_.random(), priority_loads->healthy_priority_load_, + priority_loads->degraded_priority_load_); + + AggregateLoadBalancerContext aggregate_context( + context, priority_pair.second, + priority_context_.priority_to_cluster_[priority_pair.first].first); + + Upstream::ThreadLocalCluster* cluster = + priority_context_.priority_to_cluster_[priority_pair.first].second; + return cluster->loadBalancer().chooseHost(&aggregate_context); } Upstream::HostConstSharedPtr diff --git a/source/extensions/clusters/aggregate/cluster.h b/source/extensions/clusters/aggregate/cluster.h index 550d8919346c6..417a8e8de156b 100644 --- a/source/extensions/clusters/aggregate/cluster.h +++ b/source/extensions/clusters/aggregate/cluster.h @@ -14,15 +14,26 @@ namespace Extensions { namespace Clusters { namespace Aggregate { -using PriorityContext = std::pair>>; +using PriorityToClusterVector = std::vector>; + +// Maps pair(host_cluster_name, host_priority) to the linearized priority of the Aggregate cluster. +using ClusterAndPriorityToLinearizedPriorityMap = + absl::flat_hash_map, uint32_t>; + +struct PriorityContext { + Upstream::PrioritySetImpl priority_set_; + PriorityToClusterVector priority_to_cluster_; + ClusterAndPriorityToLinearizedPriorityMap cluster_and_priority_to_linearized_priority_; +}; + +using PriorityContextPtr = std::unique_ptr; class Cluster : public Upstream::ClusterImplBase, Upstream::ClusterUpdateCallbacks { public: Cluster(const envoy::config::cluster::v3::Cluster& cluster, const envoy::extensions::clusters::aggregate::v3::ClusterConfig& config, Upstream::ClusterManager& cluster_manager, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, + Random::RandomGenerator& random, Server::Configuration::TransportSocketFactoryContextImpl& factory_context, Stats::ScopePtr&& stats_scope, ThreadLocal::SlotAllocator& tls, bool added_via_api); @@ -42,7 +53,7 @@ class Cluster : public Upstream::ClusterImplBase, Upstream::ClusterUpdateCallbac Upstream::ClusterUpdateCallbacksHandlePtr handle_; Upstream::ClusterManager& cluster_manager_; Runtime::Loader& runtime_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; ThreadLocal::SlotPtr tls_; const std::vector clusters_; @@ -51,7 +62,7 @@ class Cluster : public Upstream::ClusterImplBase, Upstream::ClusterUpdateCallbac void startPreInit() override; void refresh(const std::function& skip_predicate); - PriorityContext + PriorityContextPtr linearizePrioritySet(const std::function& skip_predicate); }; @@ -60,7 +71,7 @@ class Cluster : public Upstream::ClusterImplBase, Upstream::ClusterUpdateCallbac class AggregateClusterLoadBalancer : public Upstream::LoadBalancer { public: AggregateClusterLoadBalancer( - Upstream::ClusterStats& stats, Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Upstream::ClusterStats& stats, Runtime::Loader& runtime, Random::RandomGenerator& random, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config) : stats_(stats), runtime_(runtime), random_(random), common_config_(common_config) {} @@ -73,10 +84,11 @@ class AggregateClusterLoadBalancer : public Upstream::LoadBalancer { class LoadBalancerImpl : public Upstream::LoadBalancerBase { public: LoadBalancerImpl(const PriorityContext& priority_context, Upstream::ClusterStats& stats, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config) - : Upstream::LoadBalancerBase(priority_context.first, stats, runtime, random, common_config), - priority_to_cluster_(priority_context.second) {} + : Upstream::LoadBalancerBase(priority_context.priority_set_, stats, runtime, random, + common_config), + priority_context_(priority_context) {} // Upstream::LoadBalancer Upstream::HostConstSharedPtr chooseHost(Upstream::LoadBalancerContext* context) override; @@ -86,8 +98,10 @@ class AggregateClusterLoadBalancer : public Upstream::LoadBalancer { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } + absl::optional hostToLinearizedPriority(const Upstream::HostDescription& host) const; + private: - std::vector> priority_to_cluster_; + const PriorityContext& priority_context_; }; using LoadBalancerImplPtr = std::unique_ptr; @@ -95,17 +109,19 @@ class AggregateClusterLoadBalancer : public Upstream::LoadBalancer { LoadBalancerImplPtr load_balancer_; Upstream::ClusterStats& stats_; Runtime::Loader& runtime_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config_; + PriorityContextPtr priority_context_; public: - void refresh(const PriorityContext& priority_context) { - if (!priority_context.first.hostSetsPerPriority().empty()) { - load_balancer_ = std::make_unique(priority_context, stats_, runtime_, + void refresh(PriorityContextPtr priority_context) { + if (!priority_context->priority_set_.hostSetsPerPriority().empty()) { + load_balancer_ = std::make_unique(*priority_context, stats_, runtime_, random_, common_config_); } else { load_balancer_ = nullptr; } + priority_context_ = std::move(priority_context); } }; diff --git a/source/extensions/clusters/aggregate/lb_context.h b/source/extensions/clusters/aggregate/lb_context.h index 83c2325863f09..08aae97f51e7e 100644 --- a/source/extensions/clusters/aggregate/lb_context.h +++ b/source/extensions/clusters/aggregate/lb_context.h @@ -37,11 +37,13 @@ class AggregateLoadBalancerContext : public Upstream::LoadBalancerContext { } const Upstream::HealthyAndDegradedLoad& determinePriorityLoad(const Upstream::PrioritySet&, - const Upstream::HealthyAndDegradedLoad& original_priority_load) override { + const Upstream::HealthyAndDegradedLoad& original_priority_load, + const Upstream::RetryPriority::PriorityMappingFunc&) override { // Re-assign load. Set all traffic to the priority and availability selected in aggregate // cluster. - // TODO(yxue): allow determinePriorityLoad to affect the load of top level cluster and verify it - // works with current retry plugin + // + // Note: context_->determinePriorityLoad() was already called and its result handled in + // AggregateClusterLoadBalancer::LoadBalancerImpl::chooseHost(). const size_t priorities = original_priority_load.healthy_priority_load_.get().size(); priority_load_.healthy_priority_load_.get().assign(priorities, 0); priority_load_.degraded_priority_load_.get().assign(priorities, 0); diff --git a/source/extensions/clusters/dynamic_forward_proxy/BUILD b/source/extensions/clusters/dynamic_forward_proxy/BUILD index d063252df1672..0dc4780118e1d 100644 --- a/source/extensions/clusters/dynamic_forward_proxy/BUILD +++ b/source/extensions/clusters/dynamic_forward_proxy/BUILD @@ -1,12 +1,12 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_extension( name = "cluster", diff --git a/source/extensions/clusters/dynamic_forward_proxy/cluster.cc b/source/extensions/clusters/dynamic_forward_proxy/cluster.cc index c671aeb3c795b..c5af40dfd4018 100644 --- a/source/extensions/clusters/dynamic_forward_proxy/cluster.cc +++ b/source/extensions/clusters/dynamic_forward_proxy/cluster.cc @@ -171,7 +171,7 @@ Cluster::LoadBalancer::chooseHost(Upstream::LoadBalancerContext* context) { absl::string_view host; if (context->downstreamHeaders()) { - host = context->downstreamHeaders()->Host()->value().getStringView(); + host = context->downstreamHeaders()->getHostValue(); } else if (context->downstreamConnection()) { host = context->downstreamConnection()->requestedServerName(); } @@ -198,11 +198,12 @@ ClusterFactory::createClusterWithConfig( Stats::ScopePtr&& stats_scope) { Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactoryImpl cache_manager_factory( context.singletonManager(), context.dispatcher(), context.tls(), context.random(), - context.stats()); + context.runtime(), context.stats()); envoy::config::cluster::v3::Cluster cluster_config = cluster; if (cluster_config.has_upstream_http_protocol_options()) { - if (!cluster_config.upstream_http_protocol_options().auto_sni() || - !cluster_config.upstream_http_protocol_options().auto_san_validation()) { + if (!proto_config.allow_insecure_cluster_options() && + (!cluster_config.upstream_http_protocol_options().auto_sni() || + !cluster_config.upstream_http_protocol_options().auto_san_validation())) { throw EnvoyException( "dynamic_forward_proxy cluster must have auto_sni and auto_san_validation true when " "configured with upstream_http_protocol_options"); diff --git a/source/extensions/clusters/redis/BUILD b/source/extensions/clusters/redis/BUILD index 3519f93506295..784103719061e 100644 --- a/source/extensions/clusters/redis/BUILD +++ b/source/extensions/clusters/redis/BUILD @@ -1,13 +1,13 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "crc16_lib", diff --git a/source/extensions/clusters/redis/redis_cluster.cc b/source/extensions/clusters/redis/redis_cluster.cc index 0d07abb071d2c..db5f04d918077 100644 --- a/source/extensions/clusters/redis/redis_cluster.cc +++ b/source/extensions/clusters/redis/redis_cluster.cc @@ -45,8 +45,10 @@ RedisCluster::RedisCluster( : Config::Utility::translateClusterHosts(cluster.hidden_envoy_deprecated_hosts())), local_info_(factory_context.localInfo()), random_(factory_context.random()), redis_discovery_session_(*this, redis_client_factory), lb_factory_(std::move(lb_factory)), + auth_username_( + NetworkFilters::RedisProxy::ProtocolOptionsConfigImpl::authUsername(info(), api)), auth_password_( - NetworkFilters::RedisProxy::ProtocolOptionsConfigImpl::auth_password(info(), api)), + NetworkFilters::RedisProxy::ProtocolOptionsConfigImpl::authPassword(info(), api)), cluster_name_(cluster.name()), refresh_manager_(Common::Redis::getClusterRefreshManager( factory_context.singletonManager(), factory_context.dispatcher(), @@ -94,13 +96,13 @@ void RedisCluster::onClusterSlotUpdate(ClusterSlotsPtr&& slots) { Upstream::HostVector new_hosts; for (const ClusterSlot& slot : *slots) { - new_hosts.emplace_back(new RedisHost(info(), "", slot.master(), *this, true)); + new_hosts.emplace_back(new RedisHost(info(), "", slot.primary(), *this, true)); for (auto const& replica : slot.replicas()) { new_hosts.emplace_back(new RedisHost(info(), "", replica, *this, false)); } } - std::unordered_map updated_hosts; + absl::node_hash_map updated_hosts; Upstream::HostVector hosts_added; Upstream::HostVector hosts_removed; const bool host_updated = updateDynamicHostList(new_hosts, hosts_, hosts_added, hosts_removed, @@ -149,6 +151,10 @@ RedisCluster::DnsDiscoveryResolveTarget::~DnsDiscoveryResolveTarget() { if (active_query_) { active_query_->cancel(); } + // Disable timer for mock tests. + if (resolve_timer_) { + resolve_timer_->disableTimer(); + } } void RedisCluster::DnsDiscoveryResolveTarget::startResolveDns() { @@ -226,6 +232,10 @@ RedisCluster::RedisDiscoverySession::~RedisDiscoverySession() { current_request_->cancel(); current_request_ = nullptr; } + // Disable timer for mock tests. + if (resolve_timer_) { + resolve_timer_->disableTimer(); + } while (!client_map_.empty()) { client_map_.begin()->second->client_->close(); @@ -278,7 +288,8 @@ void RedisCluster::RedisDiscoverySession::startResolveRedis() { client = std::make_unique(*this); client->host_ = current_host_address_; client->client_ = client_factory_.create(host, dispatcher_, *this, redis_command_stats_, - parent_.info()->statsScope(), parent_.auth_password_); + parent_.info()->statsScope(), parent_.auth_username_, + parent_.auth_password_); client->client_->addConnectionCallbacks(*client); } @@ -291,8 +302,8 @@ void RedisCluster::RedisDiscoverySession::onResponse( const uint32_t SlotRangeStart = 0; const uint32_t SlotRangeEnd = 1; - const uint32_t SlotMaster = 2; - const uint32_t SlotSlaveStart = 3; + const uint32_t SlotPrimary = 2; + const uint32_t SlotReplicaStart = 3; // Do nothing if the cluster is empty. if (value->type() != NetworkFilters::Common::Redis::RespType::Array || value->asArray().empty()) { @@ -320,18 +331,18 @@ void RedisCluster::RedisDiscoverySession::onResponse( return; } - // Field 2: Master address for slot range - auto master_address = ProcessCluster(slot_range[SlotMaster]); - if (!master_address) { + // Field 2: Primary address for slot range + auto primary_address = ProcessCluster(slot_range[SlotPrimary]); + if (!primary_address) { onUnexpectedResponse(value); return; } slots->emplace_back(slot_range[SlotRangeStart].asInteger(), - slot_range[SlotRangeEnd].asInteger(), master_address); + slot_range[SlotRangeEnd].asInteger(), primary_address); - for (auto replica = std::next(slot_range.begin(), SlotSlaveStart); replica != slot_range.end(); - ++replica) { + for (auto replica = std::next(slot_range.begin(), SlotReplicaStart); + replica != slot_range.end(); ++replica) { auto replica_address = ProcessCluster(*replica); if (!replica_address) { onUnexpectedResponse(value); diff --git a/source/extensions/clusters/redis/redis_cluster.h b/source/extensions/clusters/redis/redis_cluster.h index f716960385c8a..b3d842aa19dee 100644 --- a/source/extensions/clusters/redis/redis_cluster.h +++ b/source/extensions/clusters/redis/redis_cluster.h @@ -13,6 +13,7 @@ #include #include "envoy/api/api.h" +#include "envoy/common/random_generator.h" #include "envoy/config/cluster/redis/redis_cluster.pb.h" #include "envoy/config/cluster/redis/redis_cluster.pb.validate.h" #include "envoy/config/cluster/v3/cluster.pb.h" @@ -144,7 +145,7 @@ class RedisCluster : public Upstream::BaseDynamicClusterImpl { class RedisHost : public Upstream::HostImpl { public: RedisHost(Upstream::ClusterInfoConstSharedPtr cluster, const std::string& hostname, - Network::Address::InstanceConstSharedPtr address, RedisCluster& parent, bool master) + Network::Address::InstanceConstSharedPtr address, RedisCluster& parent, bool primary) : Upstream::HostImpl( cluster, hostname, address, // TODO(zyfjeff): Created through metadata shared pool @@ -153,12 +154,12 @@ class RedisCluster : public Upstream::BaseDynamicClusterImpl { parent.localityLbEndpoint().locality(), parent.lbEndpoint().endpoint().health_check_config(), parent.localityLbEndpoint().priority(), parent.lbEndpoint().health_status()), - master_(master) {} + primary_(primary) {} - bool isMaster() const { return master_; } + bool isPrimary() const { return primary_; } private: - const bool master_; + const bool primary_; }; // Resolves the discovery endpoint. @@ -221,12 +222,12 @@ class RedisCluster : public Upstream::BaseDynamicClusterImpl { std::chrono::milliseconds bufferFlushTimeoutInMs() const override { return buffer_timeout_; } uint32_t maxUpstreamUnknownConnections() const override { return 0; } bool enableCommandStats() const override { return false; } - // For any readPolicy other than Master, the RedisClientFactory will send a READONLY command + // For any readPolicy other than Primary, the RedisClientFactory will send a READONLY command // when establishing a new connection. Since we're only using this for making the "cluster // slots" commands, the READONLY command is not relevant in this context. We're setting it to - // Master to avoid the additional READONLY command. + // Primary to avoid the additional READONLY command. Extensions::NetworkFilters::Common::Redis::Client::ReadPolicy readPolicy() const override { - return Extensions::NetworkFilters::Common::Redis::Client::ReadPolicy::Master; + return Extensions::NetworkFilters::Common::Redis::Client::ReadPolicy::Primary; } // Extensions::NetworkFilters::Common::Redis::Client::ClientCallbacks @@ -246,7 +247,7 @@ class RedisCluster : public Upstream::BaseDynamicClusterImpl { Event::Dispatcher& dispatcher_; std::string current_host_address_; Extensions::NetworkFilters::Common::Redis::Client::PoolRequest* current_request_{}; - std::unordered_map client_map_; + absl::node_hash_map client_map_; std::list discovery_address_list_; @@ -269,13 +270,14 @@ class RedisCluster : public Upstream::BaseDynamicClusterImpl { Network::DnsLookupFamily dns_lookup_family_; const envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment_; const LocalInfo::LocalInfo& local_info_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; RedisDiscoverySession redis_discovery_session_; const ClusterSlotUpdateCallBackSharedPtr lb_factory_; Upstream::HostVector hosts_; Upstream::HostMap all_hosts_; + const std::string auth_username_; const std::string auth_password_; const std::string cluster_name_; const Common::Redis::ClusterRefreshManagerSharedPtr refresh_manager_; diff --git a/source/extensions/clusters/redis/redis_cluster_lb.cc b/source/extensions/clusters/redis/redis_cluster_lb.cc index 631f061756cd1..43bb0f9b32224 100644 --- a/source/extensions/clusters/redis/redis_cluster_lb.cc +++ b/source/extensions/clusters/redis/redis_cluster_lb.cc @@ -6,7 +6,7 @@ namespace Clusters { namespace Redis { bool ClusterSlot::operator==(const Envoy::Extensions::Clusters::Redis::ClusterSlot& rhs) const { - return start_ == rhs.start_ && end_ == rhs.end_ && master_ == rhs.master_ && + return start_ == rhs.start_ && end_ == rhs.end_ && primary_ == rhs.primary_ && replicas_ == rhs.replicas_; } @@ -30,28 +30,28 @@ bool RedisClusterLoadBalancerFactory::onClusterSlotUpdate(ClusterSlotsPtr&& slot for (const ClusterSlot& slot : *slots) { // look in the updated map - const std::string master_address = slot.master()->asString(); + const std::string primary_address = slot.primary()->asString(); - auto result = shards.try_emplace(master_address, shard_vector->size()); + auto result = shards.try_emplace(primary_address, shard_vector->size()); if (result.second) { - auto master_host = all_hosts.find(master_address); - ASSERT(master_host != all_hosts.end(), + auto primary_host = all_hosts.find(primary_address); + ASSERT(primary_host != all_hosts.end(), "we expect all address to be found in the updated_hosts"); - Upstream::HostVectorSharedPtr master_and_replicas = std::make_shared(); + Upstream::HostVectorSharedPtr primary_and_replicas = std::make_shared(); Upstream::HostVectorSharedPtr replicas = std::make_shared(); - master_and_replicas->push_back(master_host->second); + primary_and_replicas->push_back(primary_host->second); for (auto const& replica : slot.replicas()) { auto replica_host = all_hosts.find(replica->asString()); ASSERT(replica_host != all_hosts.end(), "we expect all address to be found in the updated_hosts"); replicas->push_back(replica_host->second); - master_and_replicas->push_back(replica_host->second); + primary_and_replicas->push_back(replica_host->second); } shard_vector->emplace_back( - std::make_shared(master_host->second, replicas, master_and_replicas)); + std::make_shared(primary_host->second, replicas, primary_and_replicas)); } for (auto i = slot.start(); i <= slot.end(); ++i) { @@ -84,7 +84,7 @@ void RedisClusterLoadBalancerFactory::onHostHealthUpdate() { for (auto const& shard : *current_shard_vector) { shard_vector->emplace_back(std::make_shared( - shard->master(), shard->replicas().hostsPtr(), shard->allHosts().hostsPtr())); + shard->primary(), shard->replicas().hostsPtr(), shard->allHosts().hostsPtr())); } { @@ -100,7 +100,7 @@ Upstream::LoadBalancerPtr RedisClusterLoadBalancerFactory::create() { namespace { Upstream::HostConstSharedPtr chooseRandomHost(const Upstream::HostSetImpl& host_set, - Runtime::RandomGenerator& random) { + Random::RandomGenerator& random) { auto hosts = host_set.healthyHosts(); if (hosts.empty()) { hosts = host_set.degradedHosts(); @@ -138,11 +138,11 @@ Upstream::HostConstSharedPtr RedisClusterLoadBalancerFactory::RedisClusterLoadBa auto redis_context = dynamic_cast(context); if (redis_context && redis_context->isReadCommand()) { switch (redis_context->readPolicy()) { - case NetworkFilters::Common::Redis::Client::ReadPolicy::Master: - return shard->master(); - case NetworkFilters::Common::Redis::Client::ReadPolicy::PreferMaster: - if (shard->master()->health() == Upstream::Host::Health::Healthy) { - return shard->master(); + case NetworkFilters::Common::Redis::Client::ReadPolicy::Primary: + return shard->primary(); + case NetworkFilters::Common::Redis::Client::ReadPolicy::PreferPrimary: + if (shard->primary()->health() == Upstream::Host::Health::Healthy) { + return shard->primary(); } else { return chooseRandomHost(shard->allHosts(), random_); } @@ -158,7 +158,7 @@ Upstream::HostConstSharedPtr RedisClusterLoadBalancerFactory::RedisClusterLoadBa return chooseRandomHost(shard->allHosts(), random_); } } - return shard->master(); + return shard->primary(); } bool RedisLoadBalancerContextImpl::isReadRequest( @@ -185,7 +185,7 @@ RedisLoadBalancerContextImpl::RedisLoadBalancerContextImpl( const NetworkFilters::Common::Redis::RespValue& request, NetworkFilters::Common::Redis::Client::ReadPolicy read_policy) : hash_key_(is_redis_cluster ? Crc16::crc16(hashtag(key, true)) - : MurmurHash::murmurHash2_64(hashtag(key, enabled_hashtagging))), + : MurmurHash::murmurHash2(hashtag(key, enabled_hashtagging))), is_read_(isReadRequest(request)), read_policy_(read_policy) {} // Inspired by the redis-cluster hashtagging algorithm diff --git a/source/extensions/clusters/redis/redis_cluster_lb.h b/source/extensions/clusters/redis/redis_cluster_lb.h index d7912294570cc..0c5142a8290a3 100644 --- a/source/extensions/clusters/redis/redis_cluster_lb.h +++ b/source/extensions/clusters/redis/redis_cluster_lb.h @@ -31,12 +31,12 @@ static const uint64_t MaxSlot = 16384; class ClusterSlot { public: - ClusterSlot(int64_t start, int64_t end, Network::Address::InstanceConstSharedPtr master) - : start_(start), end_(end), master_(std::move(master)) {} + ClusterSlot(int64_t start, int64_t end, Network::Address::InstanceConstSharedPtr primary) + : start_(start), end_(end), primary_(std::move(primary)) {} int64_t start() const { return start_; } int64_t end() const { return end_; } - Network::Address::InstanceConstSharedPtr master() const { return master_; } + Network::Address::InstanceConstSharedPtr primary() const { return primary_; } const absl::flat_hash_set& replicas() const { return replicas_; } @@ -49,7 +49,7 @@ class ClusterSlot { private: int64_t start_; int64_t end_; - Network::Address::InstanceConstSharedPtr master_; + Network::Address::InstanceConstSharedPtr primary_; absl::flat_hash_set replicas_; }; @@ -82,7 +82,7 @@ class RedisLoadBalancerContextImpl : public RedisLoadBalancerContext, bool is_redis_cluster, const NetworkFilters::Common::Redis::RespValue& request, NetworkFilters::Common::Redis::Client::ReadPolicy read_policy = - NetworkFilters::Common::Redis::Client::ReadPolicy::Master); + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary); // Upstream::LoadBalancerContextBase absl::optional computeHashKey() override { return hash_key_; } @@ -130,7 +130,7 @@ using ClusterSlotUpdateCallBackSharedPtr = std::shared_ptr #include #include "envoy/common/pure.h" diff --git a/source/extensions/common/aws/region_provider.h b/source/extensions/common/aws/region_provider.h index aa87f90c173db..33a4fa2803b2c 100644 --- a/source/extensions/common/aws/region_provider.h +++ b/source/extensions/common/aws/region_provider.h @@ -1,5 +1,7 @@ #pragma once +#include + #include "envoy/common/pure.h" #include "absl/types/optional.h" @@ -23,6 +25,7 @@ class RegionProvider { virtual absl::optional getRegion() PURE; }; +using RegionProviderPtr = std::unique_ptr; using RegionProviderSharedPtr = std::shared_ptr; } // namespace Aws diff --git a/source/extensions/common/aws/signer_impl.cc b/source/extensions/common/aws/signer_impl.cc index da5256e25ed15..86730647966bf 100644 --- a/source/extensions/common/aws/signer_impl.cc +++ b/source/extensions/common/aws/signer_impl.cc @@ -24,8 +24,7 @@ void SignerImpl::sign(Http::RequestMessage& message, bool sign_body) { } void SignerImpl::sign(Http::RequestHeaderMap& headers) { - // S3 payloads require special treatment. - if (service_name_ == "s3") { + if (require_content_hash_) { headers.setReference(SignatureHeaders::get().ContentSha256, SignatureConstants::get().UnsignedPayload); sign(headers, SignatureConstants::get().UnsignedPayload); @@ -75,7 +74,7 @@ void SignerImpl::sign(Http::RequestHeaderMap& headers, const std::string& conten const auto authorization_header = createAuthorizationHeader( credentials.accessKeyId().value(), credential_scope, canonical_headers, signature); ENVOY_LOG(debug, "Signing request with: {}", authorization_header); - headers.addCopy(Http::Headers::get().Authorization, authorization_header); + headers.addCopy(Http::CustomHeaders::get().Authorization, authorization_header); } std::string SignerImpl::createContentHash(Http::RequestMessage& message, bool sign_body) const { diff --git a/source/extensions/common/aws/signer_impl.h b/source/extensions/common/aws/signer_impl.h index f925b6046b9dc..78908874e042a 100644 --- a/source/extensions/common/aws/signer_impl.h +++ b/source/extensions/common/aws/signer_impl.h @@ -47,8 +47,19 @@ class SignerImpl : public Signer, public Logger::Loggable { public: SignerImpl(absl::string_view service_name, absl::string_view region, const CredentialsProviderSharedPtr& credentials_provider, TimeSource& time_source) - : service_name_(service_name), region_(region), credentials_provider_(credentials_provider), - time_source_(time_source), long_date_formatter_(SignatureConstants::get().LongDateFormat), + : service_name_(service_name), region_(region), + + // S3, Glacier, ES payloads require special treatment. + // S3: + // https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. + // ES: + // https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-request-signing.html. + // Glacier: + // https://docs.aws.amazon.com/amazonglacier/latest/dev/amazon-glacier-signing-requests.html. + require_content_hash_{service_name_ == "s3" || service_name_ == "glacier" || + service_name_ == "es"}, + credentials_provider_(credentials_provider), time_source_(time_source), + long_date_formatter_(SignatureConstants::get().LongDateFormat), short_date_formatter_(SignatureConstants::get().ShortDateFormat) {} void sign(Http::RequestMessage& message, bool sign_body = false) override; @@ -74,6 +85,8 @@ class SignerImpl : public Signer, public Logger::Loggable { const std::string service_name_; const std::string region_; + + const bool require_content_hash_; CredentialsProviderSharedPtr credentials_provider_; TimeSource& time_source_; DateFormatter long_date_formatter_; diff --git a/source/extensions/common/aws/utility.cc b/source/extensions/common/aws/utility.cc index fb9c00918f060..794051e0d67a5 100644 --- a/source/extensions/common/aws/utility.cc +++ b/source/extensions/common/aws/utility.cc @@ -14,49 +14,45 @@ namespace Aws { std::map Utility::canonicalizeHeaders(const Http::RequestHeaderMap& headers) { std::map out; - headers.iterate( - [](const Http::HeaderEntry& entry, void* context) -> Http::HeaderMap::Iterate { - auto* map = static_cast*>(context); - // Skip empty headers - if (entry.key().empty() || entry.value().empty()) { - return Http::HeaderMap::Iterate::Continue; - } - // Pseudo-headers should not be canonicalized - if (!entry.key().getStringView().empty() && entry.key().getStringView()[0] == ':') { - return Http::HeaderMap::Iterate::Continue; - } - // Skip headers that are likely to mutate, when crossing proxies - const auto key = entry.key().getStringView(); - if (key == Http::Headers::get().ForwardedFor.get() || - key == Http::Headers::get().ForwardedProto.get() || key == "x-amzn-trace-id") { - return Http::HeaderMap::Iterate::Continue; - } - - std::string value(entry.value().getStringView()); - // Remove leading, trailing, and deduplicate repeated ascii spaces - absl::RemoveExtraAsciiWhitespace(&value); - const auto iter = map->find(std::string(entry.key().getStringView())); - // If the entry already exists, append the new value to the end - if (iter != map->end()) { - iter->second += fmt::format(",{}", value); - } else { - map->emplace(std::string(entry.key().getStringView()), value); - } - return Http::HeaderMap::Iterate::Continue; - }, - &out); + headers.iterate([&out](const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate { + // Skip empty headers + if (entry.key().empty() || entry.value().empty()) { + return Http::HeaderMap::Iterate::Continue; + } + // Pseudo-headers should not be canonicalized + if (!entry.key().getStringView().empty() && entry.key().getStringView()[0] == ':') { + return Http::HeaderMap::Iterate::Continue; + } + // Skip headers that are likely to mutate, when crossing proxies + const auto key = entry.key().getStringView(); + if (key == Http::Headers::get().ForwardedFor.get() || + key == Http::Headers::get().ForwardedProto.get() || key == "x-amzn-trace-id") { + return Http::HeaderMap::Iterate::Continue; + } + + std::string value(entry.value().getStringView()); + // Remove leading, trailing, and deduplicate repeated ascii spaces + absl::RemoveExtraAsciiWhitespace(&value); + const auto iter = out.find(std::string(entry.key().getStringView())); + // If the entry already exists, append the new value to the end + if (iter != out.end()) { + iter->second += fmt::format(",{}", value); + } else { + out.emplace(std::string(entry.key().getStringView()), value); + } + return Http::HeaderMap::Iterate::Continue; + }); // The AWS SDK has a quirk where it removes "default ports" (80, 443) from the host headers // Additionally, we canonicalize the :authority header as "host" // TODO(lavignes): This may need to be tweaked to canonicalize :authority for HTTP/2 requests - const auto* authority_header = headers.Host(); - if (authority_header != nullptr && !authority_header->value().empty()) { - const auto& value = authority_header->value().getStringView(); - const auto parts = StringUtil::splitToken(value, ":"); + const absl::string_view authority_header = headers.getHostValue(); + if (!authority_header.empty()) { + const auto parts = StringUtil::splitToken(authority_header, ":"); if (parts.size() > 1 && (parts[1] == "80" || parts[1] == "443")) { // Has default port, so use only the host part out.emplace(Http::Headers::get().HostLegacy.get(), std::string(parts[0])); } else { - out.emplace(Http::Headers::get().HostLegacy.get(), std::string(value)); + out.emplace(Http::Headers::get().HostLegacy.get(), std::string(authority_header)); } } return out; diff --git a/source/extensions/common/crypto/BUILD b/source/extensions/common/crypto/BUILD index 04c698e084806..ea1802a97570c 100644 --- a/source/extensions/common/crypto/BUILD +++ b/source/extensions/common/crypto/BUILD @@ -1,12 +1,12 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_extension( name = "utility_lib", @@ -23,6 +23,12 @@ envoy_cc_extension( ], security_posture = "unknown", undocumented = True, + # Legacy test use. TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/common/config:__subpackages__", + "//test/common/crypto:__subpackages__", + ], deps = [ "//include/envoy/buffer:buffer_interface", "//source/common/common:assert_lib", diff --git a/source/extensions/common/dynamic_forward_proxy/BUILD b/source/extensions/common/dynamic_forward_proxy/BUILD index 89f6d47aae984..19d6138696182 100644 --- a/source/extensions/common/dynamic_forward_proxy/BUILD +++ b/source/extensions/common/dynamic_forward_proxy/BUILD @@ -1,21 +1,23 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "dns_cache_interface", hdrs = ["dns_cache.h"], deps = [ "//include/envoy/common:backoff_strategy_interface", + "//include/envoy/common:random_generator_interface", "//include/envoy/event:dispatcher_interface", "//include/envoy/singleton:manager_interface", "//include/envoy/thread_local:thread_local_interface", + "//include/envoy/upstream:resource_manager_interface", "@envoy_api//envoy/extensions/common/dynamic_forward_proxy/v3:pkg_cc_proto", ], ) @@ -37,6 +39,7 @@ envoy_cc_library( hdrs = ["dns_cache_impl.h"], deps = [ ":dns_cache_interface", + ":dns_cache_resource_manager", "//include/envoy/network:dns_interface", "//include/envoy/thread_local:thread_local_interface", "//source/common/common:cleanup_lib", @@ -46,3 +49,19 @@ envoy_cc_library( "@envoy_api//envoy/extensions/common/dynamic_forward_proxy/v3:pkg_cc_proto", ], ) + +envoy_cc_library( + name = "dns_cache_resource_manager", + srcs = ["dns_cache_resource_manager.cc"], + hdrs = ["dns_cache_resource_manager.h"], + deps = [ + ":dns_cache_interface", + "//include/envoy/common:resource_interface", + "//include/envoy/stats:stats_interface", + "//source/common/common:assert_lib", + "//source/common/common:basic_resource_lib", + "//source/common/runtime:runtime_lib", + "//source/common/upstream:resource_manager_lib", + "@envoy_api//envoy/extensions/common/dynamic_forward_proxy/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache.h b/source/extensions/common/dynamic_forward_proxy/dns_cache.h index 52941be1083ae..8f9d5a7e4d1a3 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache.h +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache.h @@ -1,9 +1,11 @@ #pragma once +#include "envoy/common/random_generator.h" #include "envoy/event/dispatcher.h" #include "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h" #include "envoy/singleton/manager.h" #include "envoy/thread_local/thread_local.h" +#include "envoy/upstream/resource_manager.h" namespace Envoy { namespace Extensions { @@ -43,6 +45,32 @@ class DnsHostInfo { using DnsHostInfoSharedPtr = std::shared_ptr; +#define ALL_DNS_CACHE_CIRCUIT_BREAKERS_STATS(OPEN_GAUGE, REMAINING_GAUGE) \ + OPEN_GAUGE(rq_pending_open, Accumulate) \ + REMAINING_GAUGE(rq_pending_remaining, Accumulate) + +struct DnsCacheCircuitBreakersStats { + ALL_DNS_CACHE_CIRCUIT_BREAKERS_STATS(GENERATE_GAUGE_STRUCT, GENERATE_GAUGE_STRUCT) +}; + +/** + * A resource manager of DNS Cache. + */ +class DnsCacheResourceManager { +public: + virtual ~DnsCacheResourceManager() = default; + + /** + * Returns the resource limit of pending requests to DNS. + */ + virtual ResourceLimit& pendingRequests() PURE; + + /** + * Returns the reference of stats for dns cache circuit breakers. + */ + virtual DnsCacheCircuitBreakersStats& stats() PURE; +}; + /** * A cache of DNS hosts. Hosts will re-resolve their addresses or be automatically purged * depending on configured policy. @@ -148,6 +176,15 @@ class DnsCache { * @return all hosts currently stored in the cache. */ virtual absl::flat_hash_map hosts() PURE; + + /** + * Check if a DNS request is allowed given resource limits. + * @param pending_request optional pending request resource limit. If no resource limit is + * provided the internal DNS cache limit is used. + * @return RAII handle for pending request circuit breaker if the request was allowed. + */ + virtual Upstream::ResourceAutoIncDecPtr + canCreateDnsRequest(ResourceLimitOptRef pending_request) PURE; }; using DnsCacheSharedPtr = std::shared_ptr; @@ -176,7 +213,7 @@ using DnsCacheManagerSharedPtr = std::shared_ptr; DnsCacheManagerSharedPtr getCacheManager(Singleton::Manager& manager, Event::Dispatcher& main_thread_dispatcher, ThreadLocal::SlotAllocator& tls, - Runtime::RandomGenerator& random, + Random::RandomGenerator& random, Runtime::Loader& loader, Stats::Scope& root_scope); /** diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc index c24de298df1ff..b2e2d5defce14 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc @@ -16,13 +16,15 @@ namespace DynamicForwardProxy { DnsCacheImpl::DnsCacheImpl( Event::Dispatcher& main_thread_dispatcher, ThreadLocal::SlotAllocator& tls, - Runtime::RandomGenerator& random, Stats::Scope& root_scope, + Random::RandomGenerator& random, Runtime::Loader& loader, Stats::Scope& root_scope, const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config) : main_thread_dispatcher_(main_thread_dispatcher), dns_lookup_family_(Upstream::getDnsLookupFamilyFromEnum(config.dns_lookup_family())), - resolver_(main_thread_dispatcher.createDnsResolver({}, false)), tls_slot_(tls.allocateSlot()), + resolver_(main_thread_dispatcher.createDnsResolver({}, config.use_tcp_for_dns_lookups())), + tls_slot_(tls.allocateSlot()), scope_(root_scope.createScope(fmt::format("dns_cache.{}.", config.name()))), - stats_{ALL_DNS_CACHE_STATS(POOL_COUNTER(*scope_), POOL_GAUGE(*scope_))}, + stats_(generateDnsCacheStats(*scope_)), + resource_manager_(*scope_, loader, config.name(), config.dns_cache_circuit_breaker()), refresh_interval_(PROTOBUF_GET_MS_OR_DEFAULT(config, dns_refresh_rate, 60000)), failure_backoff_strategy_( Config::Utility::prepareDnsRefreshStrategy< @@ -46,6 +48,10 @@ DnsCacheImpl::~DnsCacheImpl() { } } +DnsCacheStats DnsCacheImpl::generateDnsCacheStats(Stats::Scope& scope) { + return {ALL_DNS_CACHE_STATS(POOL_COUNTER(scope), POOL_GAUGE(scope))}; +} + DnsCacheImpl::LoadDnsCacheEntryResult DnsCacheImpl::loadDnsCacheEntry(absl::string_view host, uint16_t default_port, LoadDnsCacheEntryCallbacks& callbacks) { @@ -72,6 +78,20 @@ DnsCacheImpl::loadDnsCacheEntry(absl::string_view host, uint16_t default_port, } } +Upstream::ResourceAutoIncDecPtr +DnsCacheImpl::canCreateDnsRequest(ResourceLimitOptRef pending_requests) { + const auto has_pending_requests = pending_requests.has_value(); + auto& current_pending_requests = + has_pending_requests ? pending_requests->get() : resource_manager_.pendingRequests(); + if (!current_pending_requests.canCreate()) { + if (!has_pending_requests) { + stats_.dns_rq_pending_overflow_.inc(); + } + return nullptr; + } + return std::make_unique(current_pending_requests); +} + absl::flat_hash_map DnsCacheImpl::hosts() { absl::flat_hash_map ret; for (const auto& host : primary_hosts_) { diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h index 1f649c301ff4c..a7f1426c8be3f 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h @@ -2,12 +2,14 @@ #include "envoy/common/backoff_strategy.h" #include "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h" +#include "envoy/http/filter.h" #include "envoy/network/dns.h" #include "envoy/thread_local/thread_local.h" #include "common/common/cleanup.h" #include "extensions/common/dynamic_forward_proxy/dns_cache.h" +#include "extensions/common/dynamic_forward_proxy/dns_cache_resource_manager.h" #include "absl/container/flat_hash_map.h" @@ -27,6 +29,7 @@ namespace DynamicForwardProxy { COUNTER(host_address_changed) \ COUNTER(host_overflow) \ COUNTER(host_removed) \ + COUNTER(dns_rq_pending_overflow) \ GAUGE(num_hosts, NeverImport) /** @@ -39,15 +42,18 @@ struct DnsCacheStats { class DnsCacheImpl : public DnsCache, Logger::Loggable { public: DnsCacheImpl(Event::Dispatcher& main_thread_dispatcher, ThreadLocal::SlotAllocator& tls, - Runtime::RandomGenerator& random, Stats::Scope& root_scope, + Random::RandomGenerator& random, Runtime::Loader& loader, Stats::Scope& root_scope, const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config); ~DnsCacheImpl() override; + static DnsCacheStats generateDnsCacheStats(Stats::Scope& scope); // DnsCache LoadDnsCacheEntryResult loadDnsCacheEntry(absl::string_view host, uint16_t default_port, LoadDnsCacheEntryCallbacks& callbacks) override; AddUpdateCallbacksHandlePtr addUpdateCallbacks(UpdateCallbacks& callbacks) override; absl::flat_hash_map hosts() override; + Upstream::ResourceAutoIncDecPtr + canCreateDnsRequest(ResourceLimitOptRef pending_requests) override; private: using TlsHostMap = absl::flat_hash_map; @@ -83,7 +89,7 @@ class DnsCacheImpl : public DnsCache, Logger::Loggable update_callbacks_; absl::flat_hash_map primary_hosts_; + DnsCacheResourceManagerImpl resource_manager_; const std::chrono::milliseconds refresh_interval_; const BackOffStrategyPtr failure_backoff_strategy_; const std::chrono::milliseconds host_ttl_; diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.cc b/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.cc index f898749d5c83c..ff2eb6add5e87 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.cc +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.cc @@ -27,8 +27,8 @@ DnsCacheSharedPtr DnsCacheManagerImpl::getCache( return existing_cache->second.cache_; } - DnsCacheSharedPtr new_cache = - std::make_shared(main_thread_dispatcher_, tls_, random_, root_scope_, config); + DnsCacheSharedPtr new_cache = std::make_shared( + main_thread_dispatcher_, tls_, random_, loader_, root_scope_, config); caches_.emplace(config.name(), ActiveCache{config, new_cache}); return new_cache; } @@ -36,12 +36,12 @@ DnsCacheSharedPtr DnsCacheManagerImpl::getCache( DnsCacheManagerSharedPtr getCacheManager(Singleton::Manager& singleton_manager, Event::Dispatcher& main_thread_dispatcher, ThreadLocal::SlotAllocator& tls, - Runtime::RandomGenerator& random, + Random::RandomGenerator& random, Runtime::Loader& loader, Stats::Scope& root_scope) { return singleton_manager.getTyped( SINGLETON_MANAGER_REGISTERED_NAME(dns_cache_manager), - [&main_thread_dispatcher, &tls, &random, &root_scope] { - return std::make_shared(main_thread_dispatcher, tls, random, + [&main_thread_dispatcher, &tls, &random, &loader, &root_scope] { + return std::make_shared(main_thread_dispatcher, tls, random, loader, root_scope); }); } diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h b/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h index 130ef570bbc27..15db6a928a0be 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h @@ -14,9 +14,10 @@ namespace DynamicForwardProxy { class DnsCacheManagerImpl : public DnsCacheManager, public Singleton::Instance { public: DnsCacheManagerImpl(Event::Dispatcher& main_thread_dispatcher, ThreadLocal::SlotAllocator& tls, - Runtime::RandomGenerator& random, Stats::Scope& root_scope) + Random::RandomGenerator& random, Runtime::Loader& loader, + Stats::Scope& root_scope) : main_thread_dispatcher_(main_thread_dispatcher), tls_(tls), random_(random), - root_scope_(root_scope) {} + loader_(loader), root_scope_(root_scope) {} // DnsCacheManager DnsCacheSharedPtr getCache( @@ -34,7 +35,8 @@ class DnsCacheManagerImpl : public DnsCacheManager, public Singleton::Instance { Event::Dispatcher& main_thread_dispatcher_; ThreadLocal::SlotAllocator& tls_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; + Runtime::Loader& loader_; Stats::Scope& root_scope_; absl::flat_hash_map caches_; }; @@ -42,20 +44,21 @@ class DnsCacheManagerImpl : public DnsCacheManager, public Singleton::Instance { class DnsCacheManagerFactoryImpl : public DnsCacheManagerFactory { public: DnsCacheManagerFactoryImpl(Singleton::Manager& singleton_manager, Event::Dispatcher& dispatcher, - ThreadLocal::SlotAllocator& tls, Runtime::RandomGenerator& random, - Stats::Scope& root_scope) + ThreadLocal::SlotAllocator& tls, Random::RandomGenerator& random, + Runtime::Loader& loader, Stats::Scope& root_scope) : singleton_manager_(singleton_manager), dispatcher_(dispatcher), tls_(tls), random_(random), - root_scope_(root_scope) {} + loader_(loader), root_scope_(root_scope) {} DnsCacheManagerSharedPtr get() override { - return getCacheManager(singleton_manager_, dispatcher_, tls_, random_, root_scope_); + return getCacheManager(singleton_manager_, dispatcher_, tls_, random_, loader_, root_scope_); } private: Singleton::Manager& singleton_manager_; Event::Dispatcher& dispatcher_; ThreadLocal::SlotAllocator& tls_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; + Runtime::Loader& loader_; Stats::Scope& root_scope_; }; diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_resource_manager.cc b/source/extensions/common/dynamic_forward_proxy/dns_cache_resource_manager.cc new file mode 100644 index 0000000000000..65ce809275efa --- /dev/null +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_resource_manager.cc @@ -0,0 +1,26 @@ +#include "extensions/common/dynamic_forward_proxy/dns_cache_resource_manager.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace DynamicForwardProxy { + +DnsCacheResourceManagerImpl::DnsCacheResourceManagerImpl( + Stats::Scope& scope, Runtime::Loader& loader, const std::string& config_name, + const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheCircuitBreakers& cb_config) + : cb_stats_(generateDnsCacheCircuitBreakersStats(scope)), + pending_requests_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(cb_config, max_pending_requests, 1024), + loader, fmt::format("dns_cache.{}.circuit_breakers", config_name), + cb_stats_.rq_pending_open_, cb_stats_.rq_pending_remaining_) {} + +DnsCacheCircuitBreakersStats +DnsCacheResourceManagerImpl::generateDnsCacheCircuitBreakersStats(Stats::Scope& scope) { + std::string stat_prefix = "circuit_breakers"; + return {ALL_DNS_CACHE_CIRCUIT_BREAKERS_STATS(POOL_GAUGE_PREFIX(scope, stat_prefix), + POOL_GAUGE_PREFIX(scope, stat_prefix))}; +} + +} // namespace DynamicForwardProxy +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_resource_manager.h b/source/extensions/common/dynamic_forward_proxy/dns_cache_resource_manager.h new file mode 100644 index 0000000000000..0d4762ede616b --- /dev/null +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_resource_manager.h @@ -0,0 +1,44 @@ +#pragma once + +#include +#include +#include + +#include "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h" +#include "envoy/runtime/runtime.h" +#include "envoy/stats/scope.h" +#include "envoy/stats/stats_macros.h" +#include "envoy/upstream/resource_manager.h" + +#include "common/common/assert.h" +#include "common/common/basic_resource_impl.h" +#include "common/upstream/resource_manager_impl.h" + +#include "extensions/common/dynamic_forward_proxy/dns_cache.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace DynamicForwardProxy { + +class DnsCacheResourceManagerImpl : public DnsCacheResourceManager { +public: + DnsCacheResourceManagerImpl( + Stats::Scope& scope, Runtime::Loader& loader, const std::string& config_name, + const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheCircuitBreakers& + cb_config); + + static DnsCacheCircuitBreakersStats generateDnsCacheCircuitBreakersStats(Stats::Scope& scope); + // Envoy::Upstream::DnsCacheResourceManager + ResourceLimit& pendingRequests() override { return pending_requests_; } + DnsCacheCircuitBreakersStats& stats() override { return cb_stats_; } + +private: + DnsCacheCircuitBreakersStats cb_stats_; + Upstream::ManagedResourceImpl pending_requests_; +}; + +} // namespace DynamicForwardProxy +} // namespace Common +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/common/matcher/BUILD b/source/extensions/common/matcher/BUILD new file mode 100644 index 0000000000000..2ad3f963048af --- /dev/null +++ b/source/extensions/common/matcher/BUILD @@ -0,0 +1,21 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_library( + name = "matcher_lib", + srcs = ["matcher.cc"], + hdrs = ["matcher.h"], + deps = [ + "//source/common/buffer:buffer_lib", + "//source/common/common:minimal_logger_lib", + "//source/common/http:header_utility_lib", + "@envoy_api//envoy/config/common/matcher/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/common/matcher/matcher.cc b/source/extensions/common/matcher/matcher.cc new file mode 100644 index 0000000000000..8040b4650bca0 --- /dev/null +++ b/source/extensions/common/matcher/matcher.cc @@ -0,0 +1,335 @@ +#include "extensions/common/matcher/matcher.h" + +#include "common/common/assert.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace Matcher { + +void buildMatcher(const envoy::config::common::matcher::v3::MatchPredicate& match_config, + std::vector& matchers) { + // In order to store indexes and build our matcher tree inline, we must reserve a slot where + // the matcher we are about to create will go. This allows us to know its future index and still + // construct more of the tree in each called constructor (e.g., multiple OR/AND conditions). + // Once fully constructed, we move the matcher into its position below. See the matcher + // overview in matcher.h for more information. + matchers.emplace_back(nullptr); + + MatcherPtr new_matcher; + switch (match_config.rule_case()) { + case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kOrMatch: + new_matcher = std::make_unique(match_config.or_match(), matchers, + SetLogicMatcher::Type::Or); + break; + case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kAndMatch: + new_matcher = std::make_unique(match_config.and_match(), matchers, + SetLogicMatcher::Type::And); + break; + case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kNotMatch: + new_matcher = std::make_unique(match_config.not_match(), matchers); + break; + case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kAnyMatch: + new_matcher = std::make_unique(matchers); + break; + case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kHttpRequestHeadersMatch: + new_matcher = std::make_unique( + match_config.http_request_headers_match(), matchers); + break; + case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kHttpRequestTrailersMatch: + new_matcher = std::make_unique( + match_config.http_request_trailers_match(), matchers); + break; + case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kHttpResponseHeadersMatch: + new_matcher = std::make_unique( + match_config.http_response_headers_match(), matchers); + break; + case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kHttpResponseTrailersMatch: + new_matcher = std::make_unique( + match_config.http_response_trailers_match(), matchers); + break; + case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kHttpRequestGenericBodyMatch: + new_matcher = std::make_unique( + match_config.http_request_generic_body_match(), matchers); + break; + case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kHttpResponseGenericBodyMatch: + new_matcher = std::make_unique( + match_config.http_response_generic_body_match(), matchers); + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + + // Per above, move the matcher into its position. + matchers[new_matcher->index()] = std::move(new_matcher); +} + +SetLogicMatcher::SetLogicMatcher( + const envoy::config::common::matcher::v3::MatchPredicate::MatchSet& configs, + std::vector& matchers, Type type) + : LogicMatcherBase(matchers), matchers_(matchers), type_(type) { + for (const auto& config : configs.rules()) { + indexes_.push_back(matchers_.size()); + buildMatcher(config, matchers_); + } +} + +void SetLogicMatcher::updateLocalStatus(MatchStatusVector& statuses, + const UpdateFunctor& functor) const { + if (!statuses[my_index_].might_change_status_) { + return; + } + + for (size_t index : indexes_) { + functor(*matchers_[index], statuses); + } + + auto predicate = [&statuses](size_t index) { return statuses[index].matches_; }; + if (type_ == Type::And) { + statuses[my_index_].matches_ = std::all_of(indexes_.begin(), indexes_.end(), predicate); + } else { + ASSERT(type_ == Type::Or); + statuses[my_index_].matches_ = std::any_of(indexes_.begin(), indexes_.end(), predicate); + } + + // TODO(mattklein123): We can potentially short circuit this even further if we git a single false + // in an AND set or a single true in an OR set. + statuses[my_index_].might_change_status_ = + std::any_of(indexes_.begin(), indexes_.end(), + [&statuses](size_t index) { return statuses[index].might_change_status_; }); +} + +NotMatcher::NotMatcher(const envoy::config::common::matcher::v3::MatchPredicate& config, + std::vector& matchers) + : LogicMatcherBase(matchers), matchers_(matchers), not_index_(matchers.size()) { + buildMatcher(config, matchers); +} + +void NotMatcher::updateLocalStatus(MatchStatusVector& statuses, + const UpdateFunctor& functor) const { + if (!statuses[my_index_].might_change_status_) { + return; + } + + functor(*matchers_[not_index_], statuses); + statuses[my_index_].matches_ = !statuses[not_index_].matches_; + statuses[my_index_].might_change_status_ = statuses[not_index_].might_change_status_; +} + +HttpHeaderMatcherBase::HttpHeaderMatcherBase( + const envoy::config::common::matcher::v3::HttpHeadersMatch& config, + const std::vector& matchers) + : SimpleMatcher(matchers), + headers_to_match_(Http::HeaderUtility::buildHeaderDataVector(config.headers())) {} + +void HttpHeaderMatcherBase::matchHeaders(const Http::HeaderMap& headers, + MatchStatusVector& statuses) const { + ASSERT(statuses[my_index_].might_change_status_); + statuses[my_index_].matches_ = Http::HeaderUtility::matchHeaders(headers, headers_to_match_); + statuses[my_index_].might_change_status_ = false; +} + +// HttpGenericBodyMatcher +// Scans the HTTP body and looks for patterns. +// HTTP body may be passed to the matcher in chunks. The search logic buffers +// only as many bytes as is the length of the longest pattern to be found. +HttpGenericBodyMatcher::HttpGenericBodyMatcher( + const envoy::config::common::matcher::v3::HttpGenericBodyMatch& config, + const std::vector& matchers) + : HttpBodyMatcherBase(matchers) { + patterns_ = std::make_shared>(); + for (const auto& i : config.patterns()) { + switch (i.rule_case()) { + // For binary match 'i' contains sequence of bytes to locate in the body. + case envoy::config::common::matcher::v3::HttpGenericBodyMatch::GenericTextMatch::kBinaryMatch: { + patterns_->push_back(i.binary_match()); + } break; + // For string match 'i' contains exact string to locate in the body. + case envoy::config::common::matcher::v3::HttpGenericBodyMatch::GenericTextMatch::kStringMatch: + patterns_->push_back(i.string_match()); + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + // overlap_size_ indicates how many bytes from previous data chunk(s) are buffered. + overlap_size_ = std::max(overlap_size_, patterns_->back().length() - 1); + } + limit_ = config.bytes_limit(); +} + +void HttpGenericBodyMatcher::onBody(const Buffer::Instance& data, MatchStatusVector& statuses) { + // Get the context associated with this stream. + HttpGenericBodyMatcherCtx* ctx = + static_cast(statuses[my_index_].ctx_.get()); + + if (statuses[my_index_].might_change_status_ == false) { + // End of search limit has been already reached or all patterns have been found. + // Status is not going to change. + ASSERT(((0 != limit_) && (limit_ == ctx->processed_bytes_)) || (ctx->patterns_index_.empty())); + return; + } + + // Iterate through all patterns to be found and check if they are located across body + // chunks: part of the pattern was in previous body chunk and remaining of the pattern + // is in the current body chunk on in the current body chunk. + bool resize_required = false; + auto body_search_limit = limit_ - ctx->processed_bytes_; + auto it = ctx->patterns_index_.begin(); + while (it != ctx->patterns_index_.end()) { + const auto& pattern = patterns_->at(*it); + if ((!ctx->overlap_.empty() && (locatePatternAcrossChunks(pattern, data, ctx))) || + (-1 != data.search(static_cast(pattern.data()), pattern.length(), 0, + body_search_limit))) { + // Pattern found. Remove it from the list of patterns to be found. + // If the longest pattern has been found, resize of overlap buffer may be + // required. + resize_required = resize_required || (ctx->capacity_ == (pattern.length() - 1)); + it = ctx->patterns_index_.erase(it); + } else { + it++; + } + } + + if (ctx->patterns_index_.empty()) { + // All patterns were found. + statuses[my_index_].matches_ = true; + statuses[my_index_].might_change_status_ = false; + return; + } + + // Check if next body chunks should be searched for patterns. If the search limit + // ends on the current body chunk, there is no need to check next chunks. + if (0 != limit_) { + ctx->processed_bytes_ = std::min(uint64_t(limit_), ctx->processed_bytes_ + data.length()); + if (limit_ == ctx->processed_bytes_) { + // End of search limit has been reached and not all patterns have been found. + statuses[my_index_].matches_ = false; + statuses[my_index_].might_change_status_ = false; + return; + } + } + + // If longest pattern has been located, there is possibility that overlap_ + // buffer size may be reduced. + if (resize_required) { + resizeOverlapBuffer(ctx); + } + + bufferLastBytes(data, ctx); +} + +// Here we handle a situation when a pattern is spread across multiple body buffers. +// overlap_ stores number of bytes from previous body chunks equal to longest pattern yet to be +// found minus one byte (-1). The logic below tries to find the beginning of the pattern in +// overlap_ buffer and the pattern should continue at the beginning of the next buffer. +bool HttpGenericBodyMatcher::locatePatternAcrossChunks(const std::string& pattern, + const Buffer::Instance& data, + const HttpGenericBodyMatcherCtx* ctx) { + // Take the first character from the pattern and locate it in overlap_. + auto pattern_index = 0; + // Start position in overlap_. overlap_ size was calculated based on the longest pattern to be + // found, but search for shorter patterns may start from some offset, not the beginning of the + // buffer. + size_t start_index = (ctx->overlap_.size() > (pattern.size() - 1)) + ? ctx->overlap_.size() - (pattern.size() - 1) + : 0; + auto match_iter = std::find(std::begin(ctx->overlap_) + start_index, std::end(ctx->overlap_), + pattern.at(pattern_index)); + + if (match_iter == std::end(ctx->overlap_)) { + return false; + } + + // Continue checking characters until end of overlap_ buffer. + while (match_iter != std::end(ctx->overlap_)) { + if (pattern[pattern_index] != *match_iter) { + return false; + } + pattern_index++; + match_iter++; + } + + // Now check if the remaining of the pattern matches the beginning of the body + // buffer.i Do it only if there is sufficient number of bytes in the data buffer. + auto pattern_remainder = pattern.substr(pattern_index); + if ((0 != limit_) && (pattern_remainder.length() > (limit_ - ctx->processed_bytes_))) { + // Even if we got match it would be outside the search limit + return false; + } + return ((pattern_remainder.length() <= data.length()) && data.startsWith(pattern_remainder)); +} + +// Method buffers last bytes from the currently processed body in overlap_. +// This is required to find patterns which spans across multiple body chunks. +void HttpGenericBodyMatcher::bufferLastBytes(const Buffer::Instance& data, + HttpGenericBodyMatcherCtx* ctx) { + // The matcher buffers the last seen X bytes where X is equal to the length of the + // longest pattern - 1. With the arrival of the new 'data' the following situations + // are possible: + // 1. The new data's length is larger or equal to X. In this case just copy last X bytes + // from the data to overlap_ buffer. + // 2. The new data length is smaller than X and there is enough room in overlap buffer to just + // copy the bytes from data. + // 3. The new data length is smaller than X and there is not enough room in overlap buffer. + if (data.length() >= ctx->capacity_) { + // Case 1: + // Just overwrite the entire overlap_ buffer with new data. + ctx->overlap_.resize(ctx->capacity_); + data.copyOut(data.length() - ctx->capacity_, ctx->capacity_, ctx->overlap_.data()); + } else { + if (data.length() <= (ctx->capacity_ - ctx->overlap_.size())) { + // Case 2. Just add the new data on top of already buffered. + const auto size = ctx->overlap_.size(); + ctx->overlap_.resize(ctx->overlap_.size() + data.length()); + data.copyOut(0, data.length(), ctx->overlap_.data() + size); + } else { + // Case 3. First shift data to make room for new data and then copy + // entire new buffer. + const size_t shift = ctx->overlap_.size() - (ctx->capacity_ - data.length()); + for (size_t i = 0; i < (ctx->overlap_.size() - shift); i++) { + ctx->overlap_[i] = ctx->overlap_[i + shift]; + } + const auto size = ctx->overlap_.size(); + ctx->overlap_.resize(ctx->capacity_); + data.copyOut(0, data.length(), ctx->overlap_.data() + (size - shift)); + } + } +} + +// Method takes list of indexes of patterns not yet located in the http body and returns the +// length of the longest pattern. +// This is used by matcher to buffer as minimum bytes as possible. +size_t HttpGenericBodyMatcher::calcLongestPatternSize(const std::list& indexes) const { + ASSERT(!indexes.empty()); + size_t max_len = 0; + for (const auto& i : indexes) { + max_len = std::max(max_len, patterns_->at(i).length()); + } + return max_len; +} + +// Method checks if it is possible to reduce the size of overlap_ buffer. +void HttpGenericBodyMatcher::resizeOverlapBuffer(HttpGenericBodyMatcherCtx* ctx) { + // Check if we need to resize overlap_ buffer. Since it was initialized to size of the longest + // pattern, it will be shrunk only and memory allocations do not happen. + // Depending on how many bytes were already in the buffer, shift may be required if + // the new size is smaller than number of already buffered bytes. + const size_t max_len = calcLongestPatternSize(ctx->patterns_index_); + if (ctx->capacity_ != (max_len - 1)) { + const size_t new_size = max_len - 1; + const size_t shift = (ctx->overlap_.size() > new_size) ? (ctx->overlap_.size() - new_size) : 0; + // Copy the last new_size bytes to the beginning of the buffer. + for (size_t i = 0; (i < new_size) && (shift > 0); i++) { + ctx->overlap_[i] = ctx->overlap_[i + shift]; + } + ctx->capacity_ = new_size; + if (shift > 0) { + ctx->overlap_.resize(new_size); + } + } +} + +} // namespace Matcher +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/common/tap/tap_matcher.h b/source/extensions/common/matcher/matcher.h similarity index 59% rename from source/extensions/common/tap/tap_matcher.h rename to source/extensions/common/matcher/matcher.h index 55975cb6a70aa..4eecd25d3786a 100644 --- a/source/extensions/common/tap/tap_matcher.h +++ b/source/extensions/common/matcher/matcher.h @@ -1,21 +1,34 @@ #pragma once -#include "envoy/config/tap/v3/common.pb.h" +#include "envoy/config/common/matcher/v3/matcher.pb.h" +#include "common/buffer/buffer_impl.h" #include "common/http/header_utility.h" namespace Envoy { namespace Extensions { namespace Common { -namespace Tap { +namespace Matcher { class Matcher; using MatcherPtr = std::unique_ptr; /** - * Base class for all tap matchers. + * Base class for context used by individual matchers. + * The context may be required by matchers which are called multiple times + * and need to carry state between the calls. For example body matchers may + * store information how any bytes of the body have been already processed + * or what what has been already found in the body and what has yet to be found. + */ +class MatcherCtx { +public: + virtual ~MatcherCtx() = default; +}; + +/** + * Base class for all matchers. * - * A high level note on the design of tap matching which is different from other matching in Envoy + * A high level note on the design of matching which is different from other matching in Envoy * due to a requirement to support streaming matching (match as new data arrives versus * calculating the match given all available data at once). * - The matching system is composed of a constant matching configuration. This is essentially @@ -40,6 +53,7 @@ class Matcher { bool matches_{false}; // Does the matcher currently match? bool might_change_status_{true}; // Is it possible for matches_ to change in subsequent updates? + std::unique_ptr ctx_{}; // Context used by matchers to save interim context. }; using MatchStatusVector = std::vector; @@ -51,7 +65,7 @@ class Matcher { Matcher(const std::vector& matchers) // NOTE: This code assumes that the index for the matcher being constructed has already been // allocated, which is why my_index_ is set to size() - 1. See buildMatcher() in - // tap_matcher.cc. + // matcher.cc. : my_index_(matchers.size() - 1) {} virtual ~Matcher() = default; @@ -103,12 +117,30 @@ class Matcher { virtual void onHttpResponseTrailers(const Http::ResponseTrailerMap& response_trailers, MatchStatusVector& statuses) const PURE; + /** + * Update match status given HTTP request body. + * @param data supplies the request body. + * @param statuses supplies the per-stream-request match status vector which must be the same + * size as the match tree vector (see above). + */ + virtual void onRequestBody(const Buffer::Instance& data, MatchStatusVector& statuses) PURE; + + /** + * Update match status given HTTP response body. + * @param data supplies the response body. + * @param statuses supplies the per-stream-request match status vector which must be the same + * size as the match tree vector (see above). + */ + virtual void onResponseBody(const Buffer::Instance& data, MatchStatusVector& statuses) PURE; + /** * @return whether given currently available information, the matcher matches. * @param statuses supplies the per-stream-request match status vector which must be the same * size as the match tree vector (see above). */ - MatchStatus matchStatus(const MatchStatusVector& statuses) const { return statuses[my_index_]; } + const MatchStatus& matchStatus(const MatchStatusVector& statuses) const { + return statuses[my_index_]; + } protected: const size_t my_index_; @@ -117,9 +149,9 @@ class Matcher { /** * Factory method to build a matcher given a match config. Calling this function may end * up recursively building many matchers, which will all be added to the passed in vector - * of matchers. See the comments in tap.h for the general structure of how tap matchers work. + * of matchers. See the comments in matcher.h for the general structure of how matchers work. */ -void buildMatcher(const envoy::config::tap::v3::MatchPredicate& match_config, +void buildMatcher(const envoy::config::common::matcher::v3::MatchPredicate& match_config, std::vector& matchers); /** @@ -129,7 +161,6 @@ class LogicMatcherBase : public Matcher { public: using Matcher::Matcher; - // Extensions::Common::Tap::Matcher void onNewStream(MatchStatusVector& statuses) const override { updateLocalStatus(statuses, [](Matcher& m, MatchStatusVector& statuses) { m.onNewStream(statuses); }); @@ -158,6 +189,16 @@ class LogicMatcherBase : public Matcher { m.onHttpResponseTrailers(response_trailers, statuses); }); } + void onRequestBody(const Buffer::Instance& data, MatchStatusVector& statuses) override { + updateLocalStatus(statuses, [&data](Matcher& m, MatchStatusVector& statuses) { + m.onRequestBody(data, statuses); + }); + } + void onResponseBody(const Buffer::Instance& data, MatchStatusVector& statuses) override { + updateLocalStatus(statuses, [&data](Matcher& m, MatchStatusVector& statuses) { + m.onResponseBody(data, statuses); + }); + } protected: using UpdateFunctor = std::function; @@ -172,7 +213,7 @@ class SetLogicMatcher : public LogicMatcherBase { public: enum class Type { And, Or }; - SetLogicMatcher(const envoy::config::tap::v3::MatchPredicate::MatchSet& configs, + SetLogicMatcher(const envoy::config::common::matcher::v3::MatchPredicate::MatchSet& configs, std::vector& matchers, Type type); private: @@ -188,7 +229,7 @@ class SetLogicMatcher : public LogicMatcherBase { */ class NotMatcher : public LogicMatcherBase { public: - NotMatcher(const envoy::config::tap::v3::MatchPredicate& config, + NotMatcher(const envoy::config::common::matcher::v3::MatchPredicate& config, std::vector& matchers); private: @@ -206,12 +247,13 @@ class SimpleMatcher : public Matcher { public: using Matcher::Matcher; - // Extensions::Common::Tap::Matcher void onNewStream(MatchStatusVector&) const override {} void onHttpRequestHeaders(const Http::RequestHeaderMap&, MatchStatusVector&) const override {} void onHttpRequestTrailers(const Http::RequestTrailerMap&, MatchStatusVector&) const override {} void onHttpResponseHeaders(const Http::ResponseHeaderMap&, MatchStatusVector&) const override {} void onHttpResponseTrailers(const Http::ResponseTrailerMap&, MatchStatusVector&) const override {} + void onRequestBody(const Buffer::Instance&, MatchStatusVector&) override {} + void onResponseBody(const Buffer::Instance&, MatchStatusVector&) override {} }; /** @@ -221,7 +263,6 @@ class AnyMatcher : public SimpleMatcher { public: using SimpleMatcher::SimpleMatcher; - // Extensions::Common::Tap::Matcher void onNewStream(MatchStatusVector& statuses) const override { statuses[my_index_].matches_ = true; statuses[my_index_].might_change_status_ = false; @@ -233,7 +274,7 @@ class AnyMatcher : public SimpleMatcher { */ class HttpHeaderMatcherBase : public SimpleMatcher { public: - HttpHeaderMatcherBase(const envoy::config::tap::v3::HttpHeadersMatch& config, + HttpHeaderMatcherBase(const envoy::config::common::matcher::v3::HttpHeadersMatch& config, const std::vector& matchers); protected: @@ -249,7 +290,6 @@ class HttpRequestHeadersMatcher : public HttpHeaderMatcherBase { public: using HttpHeaderMatcherBase::HttpHeaderMatcherBase; - // Extensions::Common::Tap::Matcher void onHttpRequestHeaders(const Http::RequestHeaderMap& request_headers, MatchStatusVector& statuses) const override { matchHeaders(request_headers, statuses); @@ -263,7 +303,6 @@ class HttpRequestTrailersMatcher : public HttpHeaderMatcherBase { public: using HttpHeaderMatcherBase::HttpHeaderMatcherBase; - // Extensions::Common::Tap::Matcher void onHttpRequestTrailers(const Http::RequestTrailerMap& request_trailers, MatchStatusVector& statuses) const override { matchHeaders(request_trailers, statuses); @@ -277,7 +316,6 @@ class HttpResponseHeadersMatcher : public HttpHeaderMatcherBase { public: using HttpHeaderMatcherBase::HttpHeaderMatcherBase; - // Extensions::Common::Tap::Matcher void onHttpResponseHeaders(const Http::ResponseHeaderMap& response_headers, MatchStatusVector& statuses) const override { matchHeaders(response_headers, statuses); @@ -291,14 +329,120 @@ class HttpResponseTrailersMatcher : public HttpHeaderMatcherBase { public: using HttpHeaderMatcherBase::HttpHeaderMatcherBase; - // Extensions::Common::Tap::Matcher void onHttpResponseTrailers(const Http::ResponseTrailerMap& response_trailers, MatchStatusVector& statuses) const override { matchHeaders(response_trailers, statuses); } }; -} // namespace Tap +/** + * Base class for body matchers. + */ +class HttpBodyMatcherBase : public SimpleMatcher { +public: + HttpBodyMatcherBase(const std::vector& matchers) : SimpleMatcher(matchers) {} + +protected: + // Limit search to specified number of bytes. + // Value equal to zero means no limit. + uint32_t limit_{}; +}; + +/** + * Context is used by HttpGenericBodyMatcher to: + * - track how many bytes has been processed + * - track patterns which have been found + * - store last several seen bytes of the HTTP body (when pattern starts at the end of previous body + * chunk and continues at the beginning of the next body chunk) + */ +class HttpGenericBodyMatcherCtx : public MatcherCtx { +public: + HttpGenericBodyMatcherCtx(const std::shared_ptr>& patterns, + size_t overlap_size) + : patterns_(patterns) { + // Initialize overlap_ buffer's capacity to fit the longest pattern - 1. + // The length of the longest pattern is known and passed here as overlap_size. + patterns_index_.resize(patterns_->size()); + std::iota(patterns_index_.begin(), patterns_index_.end(), 0); + overlap_.reserve(overlap_size); + capacity_ = overlap_size; + } + ~HttpGenericBodyMatcherCtx() override = default; + + // The context is initialized per each http request. The patterns_ + // shared pointer attaches to matcher's list of patterns, so patterns + // can be referenced without copying data. + const std::shared_ptr> patterns_; + // List stores indexes of patterns in patterns_ shared memory which + // still need to be located in the body. When a pattern is found + // its index is removed from the list. + // When all patterns have been found, the list is empty. + std::list patterns_index_; + // Buffer to store the last bytes from previous body chunk(s). + // It will store only as many bytes as is the length of the longest + // pattern to be found minus 1. + // It is necessary to locate patterns which are spread across 2 or more + // body chunks. + std::vector overlap_; + // capacity_ tells how many bytes should be buffered. overlap_'s initial + // capacity is set to the length of the longest pattern - 1. As patterns + // are found, there is a possibility that not as many bytes are required to be buffered. + // It must be tracked outside of vector, because vector::reserve does not + // change capacity when new value is lower than current capacity. + uint32_t capacity_{}; + // processed_bytes_ tracks how many bytes of HTTP body have been processed. + uint32_t processed_bytes_{}; +}; + +class HttpGenericBodyMatcher : public HttpBodyMatcherBase { +public: + HttpGenericBodyMatcher(const envoy::config::common::matcher::v3::HttpGenericBodyMatch& config, + const std::vector& matchers); + +protected: + void onBody(const Buffer::Instance&, MatchStatusVector&); + void onNewStream(MatchStatusVector& statuses) const override { + // Allocate a new context used for the new stream. + statuses[my_index_].ctx_ = + std::make_unique(patterns_, overlap_size_); + statuses[my_index_].matches_ = false; + statuses[my_index_].might_change_status_ = true; + } + bool locatePatternAcrossChunks(const std::string& pattern, const Buffer::Instance& data, + const HttpGenericBodyMatcherCtx* ctx); + void bufferLastBytes(const Buffer::Instance& data, HttpGenericBodyMatcherCtx* ctx); + + size_t calcLongestPatternSize(const std::list& indexes) const; + void resizeOverlapBuffer(HttpGenericBodyMatcherCtx* ctx); + +private: + // The following fields are initialized based on matcher config and are used + // by all HTTP matchers. + // List of strings which body must contain to get match. + std::shared_ptr> patterns_; + // Stores the length of the longest pattern. + size_t overlap_size_{}; +}; + +class HttpRequestGenericBodyMatcher : public HttpGenericBodyMatcher { +public: + using HttpGenericBodyMatcher::HttpGenericBodyMatcher; + + void onRequestBody(const Buffer::Instance& data, MatchStatusVector& statuses) override { + onBody(data, statuses); + } +}; + +class HttpResponseGenericBodyMatcher : public HttpGenericBodyMatcher { +public: + using HttpGenericBodyMatcher::HttpGenericBodyMatcher; + + void onResponseBody(const Buffer::Instance& data, MatchStatusVector& statuses) override { + onBody(data, statuses); + } +}; + +} // namespace Matcher } // namespace Common } // namespace Extensions } // namespace Envoy diff --git a/source/extensions/common/proxy_protocol/BUILD b/source/extensions/common/proxy_protocol/BUILD index 755af8cae0d0f..7a2b9bf66d034 100644 --- a/source/extensions/common/proxy_protocol/BUILD +++ b/source/extensions/common/proxy_protocol/BUILD @@ -1,20 +1,24 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "proxy_protocol_header_lib", srcs = ["proxy_protocol_header.cc"], hdrs = ["proxy_protocol_header.h"], + # This is used by the router, so considered core code. + visibility = ["//visibility:public"], deps = [ "//include/envoy/buffer:buffer_interface", "//include/envoy/network:address_interface", + "//include/envoy/network:connection_interface", "//source/common/network:address_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/common/proxy_protocol/proxy_protocol_header.cc b/source/extensions/common/proxy_protocol/proxy_protocol_header.cc index c6f9d0a9f0604..0342f3d1aff3c 100644 --- a/source/extensions/common/proxy_protocol/proxy_protocol_header.cc +++ b/source/extensions/common/proxy_protocol/proxy_protocol_header.cc @@ -35,6 +35,12 @@ void generateV1Header(const std::string& src_addr, const std::string& dst_addr, out.add(stream.str()); } +void generateV1Header(const Network::Address::Ip& source_address, + const Network::Address::Ip& dest_address, Buffer::Instance& out) { + generateV1Header(source_address.addressAsString(), dest_address.addressAsString(), + source_address.port(), dest_address.port(), source_address.version(), out); +} + void generateV2Header(const std::string& src_addr, const std::string& dst_addr, uint32_t src_port, uint32_t dst_port, Network::Address::IpVersion ip_version, Buffer::Instance& out) { @@ -95,6 +101,23 @@ void generateV2Header(const std::string& src_addr, const std::string& dst_addr, out.add(ports, 4); } +void generateV2Header(const Network::Address::Ip& source_address, + const Network::Address::Ip& dest_address, Buffer::Instance& out) { + generateV2Header(source_address.addressAsString(), dest_address.addressAsString(), + source_address.port(), dest_address.port(), source_address.version(), out); +} + +void generateProxyProtoHeader(const envoy::config::core::v3::ProxyProtocolConfig& config, + const Network::Connection& connection, Buffer::Instance& out) { + const Network::Address::Ip& dest_address = *connection.localAddress()->ip(); + const Network::Address::Ip& source_address = *connection.remoteAddress()->ip(); + if (config.version() == envoy::config::core::v3::ProxyProtocolConfig::V1) { + generateV1Header(source_address, dest_address, out); + } else if (config.version() == envoy::config::core::v3::ProxyProtocolConfig::V2) { + generateV2Header(source_address, dest_address, out); + } +} + void generateV2LocalHeader(Buffer::Instance& out) { out.add(PROXY_PROTO_V2_SIGNATURE, PROXY_PROTO_V2_SIGNATURE_LEN); const uint8_t addr_fam_protocol_and_length[4]{PROXY_PROTO_V2_VERSION << 4, 0, 0, 0}; @@ -104,4 +127,4 @@ void generateV2LocalHeader(Buffer::Instance& out) { } // namespace ProxyProtocol } // namespace Common } // namespace Extensions -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/extensions/common/proxy_protocol/proxy_protocol_header.h b/source/extensions/common/proxy_protocol/proxy_protocol_header.h index 81d9dc1f8951b..013c842ced20a 100644 --- a/source/extensions/common/proxy_protocol/proxy_protocol_header.h +++ b/source/extensions/common/proxy_protocol/proxy_protocol_header.h @@ -1,7 +1,9 @@ #pragma once #include "envoy/buffer/buffer.h" +#include "envoy/config/core/v3/proxy_protocol.pb.h" #include "envoy/network/address.h" +#include "envoy/network/connection.h" namespace Envoy { namespace Extensions { @@ -41,15 +43,25 @@ constexpr uint32_t PROXY_PROTO_V2_ADDR_LEN_UNIX = 216; void generateV1Header(const std::string& src_addr, const std::string& dst_addr, uint32_t src_port, uint32_t dst_port, Network::Address::IpVersion ip_version, Buffer::Instance& out); +void generateV1Header(const Network::Address::Ip& source_address, + const Network::Address::Ip& dest_address, Buffer::Instance& out); + // Generates the v2 PROXY protocol header and adds it to the specified buffer // TCP is assumed as the transport protocol void generateV2Header(const std::string& src_addr, const std::string& dst_addr, uint32_t src_port, uint32_t dst_port, Network::Address::IpVersion ip_version, Buffer::Instance& out); +void generateV2Header(const Network::Address::Ip& source_address, + const Network::Address::Ip& dest_address, Buffer::Instance& out); + +// Generates the appropriate proxy proto header and appends it to the supplied buffer. +void generateProxyProtoHeader(const envoy::config::core::v3::ProxyProtocolConfig& config, + const Network::Connection& connection, Buffer::Instance& out); + // Generates the v2 PROXY protocol local command header and adds it to the specified buffer void generateV2LocalHeader(Buffer::Instance& out); } // namespace ProxyProtocol } // namespace Common } // namespace Extensions -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/extensions/common/redis/BUILD b/source/extensions/common/redis/BUILD index 7c51e3a80f72f..f7427e61ad2e8 100644 --- a/source/extensions/common/redis/BUILD +++ b/source/extensions/common/redis/BUILD @@ -1,16 +1,16 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_extension_package", +) + licenses(["notice"]) # Apache 2 # Redis proxy L4 network filter. Implements consistent hashing and observability for large redis # clusters. # Public docs: docs/root/configuration/network_filters/redis_proxy_filter.rst -load( - "//bazel:envoy_build_system.bzl", - "envoy_cc_library", - "envoy_package", -) - -envoy_package() +envoy_extension_package() envoy_cc_library( name = "cluster_refresh_manager_interface", diff --git a/source/extensions/common/redis/cluster_refresh_manager_impl.h b/source/extensions/common/redis/cluster_refresh_manager_impl.h index eaef07799cf8c..a62db60327f1b 100644 --- a/source/extensions/common/redis/cluster_refresh_manager_impl.h +++ b/source/extensions/common/redis/cluster_refresh_manager_impl.h @@ -93,7 +93,7 @@ class ClusterRefreshManagerImpl : public ClusterRefreshManager, Event::Dispatcher& main_thread_dispatcher_; Upstream::ClusterManager& cm_; TimeSource& time_source_; - std::map info_map_ GUARDED_BY(map_mutex_); + std::map info_map_ ABSL_GUARDED_BY(map_mutex_); Thread::MutexBasicLockable map_mutex_; }; diff --git a/source/extensions/common/sqlutils/BUILD b/source/extensions/common/sqlutils/BUILD new file mode 100644 index 0000000000000..f477e6a422080 --- /dev/null +++ b/source/extensions/common/sqlutils/BUILD @@ -0,0 +1,19 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_library( + name = "sqlutils_lib", + srcs = ["sqlutils.cc"], + hdrs = ["sqlutils.h"], + external_deps = ["sqlparser"], + deps = [ + "//source/common/protobuf:utility_lib", + ], +) diff --git a/source/extensions/common/sqlutils/sqlutils.cc b/source/extensions/common/sqlutils/sqlutils.cc new file mode 100644 index 0000000000000..64b5438111be2 --- /dev/null +++ b/source/extensions/common/sqlutils/sqlutils.cc @@ -0,0 +1,49 @@ +#include "extensions/common/sqlutils/sqlutils.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace SQLUtils { + +bool SQLUtils::setMetadata(const std::string& query, const DecoderAttributes& attr, + ProtobufWkt::Struct& metadata) { + hsql::SQLParserResult result; + + hsql::SQLParser::parse(query, &result); + + if (!result.isValid()) { + return false; + } + + std::string database; + // Check if the attributes map contains database name. + const auto it = attr.find("database"); + if (it != attr.end()) { + database = absl::StrCat(".", it->second); + } + + auto& fields = *metadata.mutable_fields(); + + for (auto i = 0u; i < result.size(); ++i) { + if (result.getStatement(i)->type() == hsql::StatementType::kStmtShow) { + continue; + } + hsql::TableAccessMap table_access_map; + // Get names of accessed tables. + result.getStatement(i)->tablesAccessed(table_access_map); + for (auto& it : table_access_map) { + auto& operations = *fields[it.first + database].mutable_list_value(); + // For each table get names of operations performed on that table. + for (const auto& ot : it.second) { + operations.add_values()->set_string_value(ot); + } + } + } + + return true; +} + +} // namespace SQLUtils +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/common/sqlutils/sqlutils.h b/source/extensions/common/sqlutils/sqlutils.h new file mode 100644 index 0000000000000..4e0c29131d535 --- /dev/null +++ b/source/extensions/common/sqlutils/sqlutils.h @@ -0,0 +1,31 @@ +#include "common/protobuf/utility.h" + +#include "include/sqlparser/SQLParser.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace SQLUtils { + +class SQLUtils { +public: + using DecoderAttributes = std::map; + /** + * Method parses SQL query string and writes output to metadata. + * @param query supplies SQL statement. + * @param attr supplies attributes which cannot be extracted from SQL query but are + * required to create proper metadata. For example database name may be sent + * by a client when it initially connects to the server, not along each SQL query. + * @param metadata supplies placeholder where metadata should be written. + * @return True if parsing was successful and False if parsing failed. + * If True was returned the metadata contains result of parsing. The results are + * stored in metadata.mutable_fields. + **/ + static bool setMetadata(const std::string& query, const DecoderAttributes& attr, + ProtobufWkt::Struct& metadata); +}; + +} // namespace SQLUtils +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/common/tap/BUILD b/source/extensions/common/tap/BUILD index ec9acb716fa4d..e127bf3aaa192 100644 --- a/source/extensions/common/tap/BUILD +++ b/source/extensions/common/tap/BUILD @@ -1,19 +1,19 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "tap_interface", hdrs = ["tap.h"], deps = [ - ":tap_matcher", "//include/envoy/http:header_map_interface", + "//source/extensions/common/matcher:matcher_lib", "@envoy_api//envoy/config/tap/v3:pkg_cc_proto", "@envoy_api//envoy/data/tap/v3:pkg_cc_proto", ], @@ -25,23 +25,14 @@ envoy_cc_library( hdrs = ["tap_config_base.h"], deps = [ ":tap_interface", - ":tap_matcher", "//source/common/common:assert_lib", + "//source/common/common:hex_lib", + "//source/extensions/common/matcher:matcher_lib", "@envoy_api//envoy/config/tap/v3:pkg_cc_proto", "@envoy_api//envoy/data/tap/v3:pkg_cc_proto", ], ) -envoy_cc_library( - name = "tap_matcher", - srcs = ["tap_matcher.cc"], - hdrs = ["tap_matcher.h"], - deps = [ - "//source/common/http:header_utility_lib", - "@envoy_api//envoy/config/tap/v3:pkg_cc_proto", - ], -) - envoy_cc_library( name = "admin", srcs = ["admin.cc"], diff --git a/source/extensions/common/tap/admin.cc b/source/extensions/common/tap/admin.cc index b6c4449db1774..9dc6b9b08411e 100644 --- a/source/extensions/common/tap/admin.cc +++ b/source/extensions/common/tap/admin.cc @@ -110,7 +110,7 @@ void AdminHandler::AdminPerTapSinkHandle::submitTrace( std::shared_ptr shared_trace{std::move(trace)}; // The handle can be destroyed before the cross thread post is complete. Thus, we capture a // reference to our parent. - parent_.main_thread_dispatcher_.post([& parent = parent_, trace = shared_trace, format]() { + parent_.main_thread_dispatcher_.post([&parent = parent_, trace = shared_trace, format]() { if (!parent.attached_request_.has_value()) { return; } diff --git a/source/extensions/common/tap/admin.h b/source/extensions/common/tap/admin.h index c876e9f7fd765..a3cbb7f6e815a 100644 --- a/source/extensions/common/tap/admin.h +++ b/source/extensions/common/tap/admin.h @@ -1,11 +1,12 @@ #pragma once -#include "envoy/config/tap/v3/common.pb.h" #include "envoy/server/admin.h" #include "envoy/singleton/manager.h" #include "extensions/common/tap/tap.h" +#include "absl/container/node_hash_set.h" + namespace Envoy { namespace Extensions { namespace Common { @@ -80,7 +81,7 @@ class AdminHandler : public Singleton::Instance, Server::Admin& admin_; Event::Dispatcher& main_thread_dispatcher_; - std::unordered_map> config_id_map_; + absl::node_hash_map> config_id_map_; absl::optional attached_request_; }; diff --git a/source/extensions/common/tap/tap.h b/source/extensions/common/tap/tap.h index d91f7739c963e..9abf88d6965b3 100644 --- a/source/extensions/common/tap/tap.h +++ b/source/extensions/common/tap/tap.h @@ -5,7 +5,7 @@ #include "envoy/data/tap/v3/wrapper.pb.h" #include "envoy/http/header_map.h" -#include "extensions/common/tap/tap_matcher.h" +#include "extensions/common/matcher/matcher.h" #include "absl/strings/string_view.h" @@ -14,6 +14,8 @@ namespace Extensions { namespace Common { namespace Tap { +using Matcher = Envoy::Extensions::Common::Matcher::Matcher; + using TraceWrapperPtr = std::unique_ptr; inline TraceWrapperPtr makeTraceWrapper() { return std::make_unique(); @@ -138,6 +140,13 @@ class TapConfig { */ virtual const Matcher& rootMatcher() const PURE; + /** + * Non-const version of rootMatcher method. + */ + Matcher& rootMatcher() { + return const_cast(static_cast(*this).rootMatcher()); + } + /** * Return whether the tap session should run in streaming or buffering mode. */ diff --git a/source/extensions/common/tap/tap_config_base.cc b/source/extensions/common/tap/tap_config_base.cc index b9debd1720e66..7eea4b7fe7beb 100644 --- a/source/extensions/common/tap/tap_config_base.cc +++ b/source/extensions/common/tap/tap_config_base.cc @@ -5,9 +5,11 @@ #include "envoy/data/tap/v3/wrapper.pb.h" #include "common/common/assert.h" +#include "common/common/fmt.h" +#include "common/config/version_converter.h" #include "common/protobuf/utility.h" -#include "extensions/common/tap/tap_matcher.h" +#include "extensions/common/matcher/matcher.h" #include "absl/container/fixed_array.h" @@ -16,6 +18,8 @@ namespace Extensions { namespace Common { namespace Tap { +using namespace Matcher; + bool Utility::addBufferToProtoBytes(envoy::data::tap::v3::Body& output_body, uint32_t max_buffered_bytes, const Buffer::Instance& data, uint32_t buffer_start_offset, uint32_t buffer_length_to_copy) { @@ -72,7 +76,20 @@ TapConfigBaseImpl::TapConfigBaseImpl(envoy::config::tap::v3::TapConfig&& proto_c NOT_REACHED_GCOVR_EXCL_LINE; } - buildMatcher(proto_config.match_config(), matchers_); + envoy::config::common::matcher::v3::MatchPredicate match; + if (proto_config.has_match()) { + // Use the match field whenever it is set. + match = proto_config.match(); + } else if (proto_config.has_match_config()) { + // Fallback to use the deprecated match_config field and upgrade (wire cast) it to the new + // MatchPredicate which is backward compatible with the old MatchPredicate originally + // introduced in the Tap filter. + Config::VersionConverter::upgrade(proto_config.match_config(), match); + } else { + throw EnvoyException(fmt::format("Neither match nor match_config is set in TapConfig: {}", + proto_config.DebugString())); + } + buildMatcher(match, matchers_); } const Matcher& TapConfigBaseImpl::rootMatcher() const { diff --git a/source/extensions/common/tap/tap_config_base.h b/source/extensions/common/tap/tap_config_base.h index 519f875620634..8a6014bc143c9 100644 --- a/source/extensions/common/tap/tap_config_base.h +++ b/source/extensions/common/tap/tap_config_base.h @@ -7,14 +7,17 @@ #include "envoy/data/tap/v3/common.pb.h" #include "envoy/data/tap/v3/wrapper.pb.h" +#include "extensions/common/matcher/matcher.h" #include "extensions/common/tap/tap.h" -#include "extensions/common/tap/tap_matcher.h" namespace Envoy { namespace Extensions { namespace Common { namespace Tap { +using Matcher = Envoy::Extensions::Common::Matcher::Matcher; +using MatcherPtr = Envoy::Extensions::Common::Matcher::MatcherPtr; + /** * Common utilities for tapping. */ @@ -53,7 +56,9 @@ class Utility { const uint32_t start_offset_trim = std::min(start_offset, slice.len_); slice.len_ -= start_offset_trim; start_offset -= start_offset_trim; - slice.mem_ = static_cast(slice.mem_) + start_offset_trim; + if (slice.mem_ != nullptr) { + slice.mem_ = static_cast(slice.mem_) + start_offset_trim; + } const uint32_t final_length = std::min(length, slice.len_); slice.len_ = final_length; diff --git a/source/extensions/common/tap/tap_matcher.cc b/source/extensions/common/tap/tap_matcher.cc deleted file mode 100644 index dc78940835248..0000000000000 --- a/source/extensions/common/tap/tap_matcher.cc +++ /dev/null @@ -1,127 +0,0 @@ -#include "extensions/common/tap/tap_matcher.h" - -#include "envoy/config/tap/v3/common.pb.h" - -#include "common/common/assert.h" - -namespace Envoy { -namespace Extensions { -namespace Common { -namespace Tap { - -void buildMatcher(const envoy::config::tap::v3::MatchPredicate& match_config, - std::vector& matchers) { - // In order to store indexes and build our matcher tree inline, we must reserve a slot where - // the matcher we are about to create will go. This allows us to know its future index and still - // construct more of the tree in each called constructor (e.g., multiple OR/AND conditions). - // Once fully constructed, we move the matcher into its position below. See the tap matcher - // overview in tap.h for more information. - matchers.emplace_back(nullptr); - - MatcherPtr new_matcher; - switch (match_config.rule_case()) { - case envoy::config::tap::v3::MatchPredicate::RuleCase::kOrMatch: - new_matcher = std::make_unique(match_config.or_match(), matchers, - SetLogicMatcher::Type::Or); - break; - case envoy::config::tap::v3::MatchPredicate::RuleCase::kAndMatch: - new_matcher = std::make_unique(match_config.and_match(), matchers, - SetLogicMatcher::Type::And); - break; - case envoy::config::tap::v3::MatchPredicate::RuleCase::kNotMatch: - new_matcher = std::make_unique(match_config.not_match(), matchers); - break; - case envoy::config::tap::v3::MatchPredicate::RuleCase::kAnyMatch: - new_matcher = std::make_unique(matchers); - break; - case envoy::config::tap::v3::MatchPredicate::RuleCase::kHttpRequestHeadersMatch: - new_matcher = std::make_unique( - match_config.http_request_headers_match(), matchers); - break; - case envoy::config::tap::v3::MatchPredicate::RuleCase::kHttpRequestTrailersMatch: - new_matcher = std::make_unique( - match_config.http_request_trailers_match(), matchers); - break; - case envoy::config::tap::v3::MatchPredicate::RuleCase::kHttpResponseHeadersMatch: - new_matcher = std::make_unique( - match_config.http_response_headers_match(), matchers); - break; - case envoy::config::tap::v3::MatchPredicate::RuleCase::kHttpResponseTrailersMatch: - new_matcher = std::make_unique( - match_config.http_response_trailers_match(), matchers); - break; - default: - NOT_REACHED_GCOVR_EXCL_LINE; - } - - // Per above, move the matcher into its position. - matchers[new_matcher->index()] = std::move(new_matcher); -} - -SetLogicMatcher::SetLogicMatcher(const envoy::config::tap::v3::MatchPredicate::MatchSet& configs, - std::vector& matchers, Type type) - : LogicMatcherBase(matchers), matchers_(matchers), type_(type) { - for (const auto& config : configs.rules()) { - indexes_.push_back(matchers_.size()); - buildMatcher(config, matchers_); - } -} - -void SetLogicMatcher::updateLocalStatus(MatchStatusVector& statuses, - const UpdateFunctor& functor) const { - if (!statuses[my_index_].might_change_status_) { - return; - } - - for (size_t index : indexes_) { - functor(*matchers_[index], statuses); - } - - auto predicate = [&statuses](size_t index) { return statuses[index].matches_; }; - if (type_ == Type::And) { - statuses[my_index_].matches_ = std::all_of(indexes_.begin(), indexes_.end(), predicate); - } else { - ASSERT(type_ == Type::Or); - statuses[my_index_].matches_ = std::any_of(indexes_.begin(), indexes_.end(), predicate); - } - - // TODO(mattklein123): We can potentially short circuit this even further if we git a single false - // in an AND set or a single true in an OR set. - statuses[my_index_].might_change_status_ = - std::any_of(indexes_.begin(), indexes_.end(), - [&statuses](size_t index) { return statuses[index].might_change_status_; }); -} - -NotMatcher::NotMatcher(const envoy::config::tap::v3::MatchPredicate& config, - std::vector& matchers) - : LogicMatcherBase(matchers), matchers_(matchers), not_index_(matchers.size()) { - buildMatcher(config, matchers); -} - -void NotMatcher::updateLocalStatus(MatchStatusVector& statuses, - const UpdateFunctor& functor) const { - if (!statuses[my_index_].might_change_status_) { - return; - } - - functor(*matchers_[not_index_], statuses); - statuses[my_index_].matches_ = !statuses[not_index_].matches_; - statuses[my_index_].might_change_status_ = statuses[not_index_].might_change_status_; -} - -HttpHeaderMatcherBase::HttpHeaderMatcherBase(const envoy::config::tap::v3::HttpHeadersMatch& config, - const std::vector& matchers) - : SimpleMatcher(matchers), - headers_to_match_(Http::HeaderUtility::buildHeaderDataVector(config.headers())) {} - -void HttpHeaderMatcherBase::matchHeaders(const Http::HeaderMap& headers, - MatchStatusVector& statuses) const { - ASSERT(statuses[my_index_].might_change_status_); - statuses[my_index_].matches_ = Http::HeaderUtility::matchHeaders(headers, headers_to_match_); - statuses[my_index_].might_change_status_ = false; -} - -} // namespace Tap -} // namespace Common -} // namespace Extensions -} // namespace Envoy diff --git a/source/extensions/common/utility.h b/source/extensions/common/utility.h index c50677c898d95..60336fe5e4445 100644 --- a/source/extensions/common/utility.h +++ b/source/extensions/common/utility.h @@ -1,10 +1,9 @@ #pragma once -#include - #include "envoy/common/exception.h" #include "envoy/runtime/runtime.h" +#include "common/common/documentation_url.h" #include "common/common/logger.h" namespace Envoy { @@ -103,9 +102,8 @@ class ExtensionNameUtil { return fmt::format( "Using deprecated {}{}extension name '{}' for '{}'. This name will be removed from Envoy " - "soon. Please see " - "https://www.envoyproxy.io/docs/envoy/latest/intro/deprecated for details.", - extension_type, spacing, deprecated_name, canonical_name); + "soon. Please see {} for details.", + extension_type, spacing, deprecated_name, canonical_name, ENVOY_DOC_URL_VERSION_HISTORY); } static std::string fatalMessage(absl::string_view extension_type, @@ -113,11 +111,9 @@ class ExtensionNameUtil { absl::string_view canonical_name) { std::string err = message(extension_type, deprecated_name, canonical_name); - const char fatal_error[] = - " If continued use of this filter name is absolutely necessary, see " - "https://www.envoyproxy.io/docs/envoy/latest/configuration/operations/runtime" - "#using-runtime-overrides-for-deprecated-features for how to apply a temporary and " - "highly discouraged override."; + const char fatal_error[] = " If continued use of this filter name is absolutely necessary, " + "see " ENVOY_DOC_URL_RUNTIME_OVERRIDE_DEPRECATED " for " + "how to apply a temporary and highly discouraged override."; return err + fatal_error; } diff --git a/source/extensions/common/wasm/BUILD b/source/extensions/common/wasm/BUILD index 9333c679421f6..e594ac846209d 100644 --- a/source/extensions/common/wasm/BUILD +++ b/source/extensions/common/wasm/BUILD @@ -1,12 +1,12 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "well_known_names", @@ -21,6 +21,7 @@ envoy_cc_library( hdrs = ["wasm_vm.h"], deps = [ ":well_known_names", + "//include/envoy/stats:stats_interface", "//source/common/common:minimal_logger_lib", ], ) @@ -29,6 +30,7 @@ envoy_cc_library( name = "wasm_vm_base", hdrs = ["wasm_vm_base.h"], deps = [ + ":wasm_vm_interface", "//source/common/stats:stats_lib", ], ) diff --git a/source/extensions/common/wasm/null/BUILD b/source/extensions/common/wasm/null/BUILD index 2bae8acd9f4ff..31a33d8f4d49d 100644 --- a/source/extensions/common/wasm/null/BUILD +++ b/source/extensions/common/wasm/null/BUILD @@ -1,12 +1,12 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "null_vm_plugin_interface", diff --git a/source/extensions/common/wasm/null/null_vm.h b/source/extensions/common/wasm/null/null_vm.h index e0cf345c51b6e..9bdaad668f8bd 100644 --- a/source/extensions/common/wasm/null/null_vm.h +++ b/source/extensions/common/wasm/null/null_vm.h @@ -55,7 +55,7 @@ struct NullVm : public WasmVmBase { #undef _REGISTER_CALLBACK std::string plugin_name_; - std::unique_ptr plugin_; + NullVmPluginPtr plugin_; }; } // namespace Null diff --git a/source/extensions/common/wasm/null/null_vm_plugin.h b/source/extensions/common/wasm/null/null_vm_plugin.h index bc89271452c6b..1176c98c07c9c 100644 --- a/source/extensions/common/wasm/null/null_vm_plugin.h +++ b/source/extensions/common/wasm/null/null_vm_plugin.h @@ -1,5 +1,7 @@ #pragma once +#include + #include "envoy/config/typed_config.h" #include "extensions/common/wasm/wasm_vm.h" @@ -24,6 +26,8 @@ class NullVmPlugin { #undef _DEFIN_GET_FUNCTIONE }; +using NullVmPluginPtr = std::unique_ptr; + /** * Pseudo-WASM plugins using the NullVM should implement this factory and register via * Registry::registerFactory or the convenience class RegisterFactory. @@ -37,7 +41,7 @@ class NullVmPluginFactory : public Config::UntypedFactory { /** * Create an instance of the plugin. */ - virtual std::unique_ptr create() const PURE; + virtual NullVmPluginPtr create() const PURE; }; } // namespace Null diff --git a/source/extensions/common/wasm/v8/BUILD b/source/extensions/common/wasm/v8/BUILD index 04d0954d1b2cd..4ff62d112f2ff 100644 --- a/source/extensions/common/wasm/v8/BUILD +++ b/source/extensions/common/wasm/v8/BUILD @@ -1,12 +1,12 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "v8_lib", diff --git a/source/extensions/common/wasm/wasm_vm_base.h b/source/extensions/common/wasm/wasm_vm_base.h index a709534cba521..a780af5c8dcc4 100644 --- a/source/extensions/common/wasm/wasm_vm_base.h +++ b/source/extensions/common/wasm/wasm_vm_base.h @@ -37,7 +37,7 @@ class WasmVmBase : public WasmVm { stats_.active_.inc(); ENVOY_LOG(debug, "WasmVm created {} now active", runtime_, stats_.active_.value()); } - virtual ~WasmVmBase() { + ~WasmVmBase() override { stats_.active_.dec(); ENVOY_LOG(debug, "~WasmVm {} {} remaining active", runtime_, stats_.active_.value()); } diff --git a/source/extensions/compression/common/compressor/BUILD b/source/extensions/compression/common/compressor/BUILD new file mode 100644 index 0000000000000..db3d5c88ae16f --- /dev/null +++ b/source/extensions/compression/common/compressor/BUILD @@ -0,0 +1,19 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_library( + name = "compressor_factory_base_lib", + hdrs = ["factory_base.h"], + deps = [ + "//include/envoy/compression/compressor:compressor_config_interface", + "//include/envoy/compression/compressor:compressor_factory_interface", + "//include/envoy/server:filter_config_interface", + ], +) diff --git a/source/extensions/compression/common/compressor/factory_base.h b/source/extensions/compression/common/compressor/factory_base.h new file mode 100644 index 0000000000000..472d754cb9cd5 --- /dev/null +++ b/source/extensions/compression/common/compressor/factory_base.h @@ -0,0 +1,45 @@ +#pragma once + +#include "envoy/compression/compressor/config.h" +#include "envoy/compression/compressor/factory.h" +#include "envoy/server/filter_config.h" + +namespace Envoy { +namespace Extensions { +namespace Compression { +namespace Common { +namespace Compressor { + +template +class CompressorLibraryFactoryBase + : public Envoy::Compression::Compressor::NamedCompressorLibraryConfigFactory { +public: + Envoy::Compression::Compressor::CompressorFactoryPtr + createCompressorFactoryFromProto(const Protobuf::Message& proto_config, + Server::Configuration::FactoryContext& context) override { + return createCompressorFactoryFromProtoTyped( + MessageUtil::downcastAndValidate(proto_config, + context.messageValidationVisitor())); + } + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique(); + } + + std::string name() const override { return name_; } + +protected: + CompressorLibraryFactoryBase(const std::string& name) : name_(name) {} + +private: + virtual Envoy::Compression::Compressor::CompressorFactoryPtr + createCompressorFactoryFromProtoTyped(const ConfigProto&) PURE; + + const std::string name_; +}; + +} // namespace Compressor +} // namespace Common +} // namespace Compression +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/compression/common/decompressor/BUILD b/source/extensions/compression/common/decompressor/BUILD new file mode 100644 index 0000000000000..0d69c90a8acdc --- /dev/null +++ b/source/extensions/compression/common/decompressor/BUILD @@ -0,0 +1,17 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_library( + name = "decompressor_factory_base_lib", + hdrs = ["factory_base.h"], + deps = [ + "//include/envoy/compression/decompressor:decompressor_config_interface", + ], +) diff --git a/source/extensions/compression/common/decompressor/factory_base.h b/source/extensions/compression/common/decompressor/factory_base.h new file mode 100644 index 0000000000000..7bf3c1571f7f6 --- /dev/null +++ b/source/extensions/compression/common/decompressor/factory_base.h @@ -0,0 +1,45 @@ +#pragma once + +#include "envoy/compression/decompressor/config.h" + +namespace Envoy { +namespace Extensions { +namespace Compression { +namespace Common { +namespace Decompressor { + +template +class DecompressorLibraryFactoryBase + : public Envoy::Compression::Decompressor::NamedDecompressorLibraryConfigFactory { +public: + Envoy::Compression::Decompressor::DecompressorFactoryPtr + createDecompressorFactoryFromProto(const Protobuf::Message& proto_config, + Server::Configuration::FactoryContext& context) override { + return createDecompressorFactoryFromProtoTyped( + MessageUtil::downcastAndValidate(proto_config, + context.messageValidationVisitor()), + context); + } + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique(); + } + + std::string name() const override { return name_; } + +protected: + DecompressorLibraryFactoryBase(const std::string& name) : name_(name) {} + +private: + virtual Envoy::Compression::Decompressor::DecompressorFactoryPtr + createDecompressorFactoryFromProtoTyped(const ConfigProto& proto_config, + Server::Configuration::FactoryContext& context) PURE; + + const std::string name_; +}; + +} // namespace Decompressor +} // namespace Common +} // namespace Compression +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/compression/gzip/common/BUILD b/source/extensions/compression/gzip/common/BUILD new file mode 100644 index 0000000000000..5c301a6a9abe8 --- /dev/null +++ b/source/extensions/compression/gzip/common/BUILD @@ -0,0 +1,19 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_library( + name = "zlib_base_lib", + srcs = ["base.cc"], + hdrs = ["base.h"], + external_deps = ["zlib"], + deps = [ + "//source/common/buffer:buffer_lib", + ], +) diff --git a/source/common/common/zlib/base.cc b/source/extensions/compression/gzip/common/base.cc similarity index 92% rename from source/common/common/zlib/base.cc rename to source/extensions/compression/gzip/common/base.cc index 5336f35f87355..b3843c1aec1ff 100644 --- a/source/common/common/zlib/base.cc +++ b/source/extensions/compression/gzip/common/base.cc @@ -1,4 +1,4 @@ -#include "common/common/zlib/base.h" +#include "extensions/compression/gzip/common/base.h" namespace Envoy { namespace Zlib { diff --git a/source/common/common/zlib/base.h b/source/extensions/compression/gzip/common/base.h similarity index 92% rename from source/common/common/zlib/base.h rename to source/extensions/compression/gzip/common/base.h index 4f427fb909858..f8b89cb253350 100644 --- a/source/common/common/zlib/base.h +++ b/source/extensions/compression/gzip/common/base.h @@ -12,6 +12,7 @@ namespace Zlib { /** * Shared code between the compressor and the decompressor. */ +// TODO(junr03): move to extensions tree once the compressor side is moved to extensions. class Base { public: Base(uint64_t chunk_size, std::function zstream_deleter); diff --git a/source/extensions/compression/gzip/compressor/BUILD b/source/extensions/compression/gzip/compressor/BUILD new file mode 100644 index 0000000000000..e8918d1fcbc8d --- /dev/null +++ b/source/extensions/compression/gzip/compressor/BUILD @@ -0,0 +1,37 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_library( + name = "compressor_lib", + srcs = ["zlib_compressor_impl.cc"], + hdrs = ["zlib_compressor_impl.h"], + external_deps = ["zlib"], + deps = [ + "//include/envoy/compression/compressor:compressor_interface", + "//source/common/buffer:buffer_lib", + "//source/common/common:assert_lib", + "//source/extensions/compression/gzip/common:zlib_base_lib", + ], +) + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", + deps = [ + ":compressor_lib", + "//source/common/http:headers_lib", + "//source/extensions/compression/common/compressor:compressor_factory_base_lib", + "//source/extensions/filters/http:well_known_names", + "@envoy_api//envoy/extensions/compression/gzip/compressor/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/compression/gzip/compressor/config.cc b/source/extensions/compression/gzip/compressor/config.cc new file mode 100644 index 0000000000000..0971a9a905864 --- /dev/null +++ b/source/extensions/compression/gzip/compressor/config.cc @@ -0,0 +1,99 @@ +#include "extensions/compression/gzip/compressor/config.h" + +namespace Envoy { +namespace Extensions { +namespace Compression { +namespace Gzip { +namespace Compressor { + +namespace { +// Default zlib memory level. +const uint64_t DefaultMemoryLevel = 5; + +// Default and maximum compression window size. +const uint64_t DefaultWindowBits = 12; + +// When logical OR'ed to window bits, this sets a gzip header and trailer around the compressed +// data. +const uint64_t GzipHeaderValue = 16; + +// Default zlib chunk size. +const uint32_t DefaultChunkSize = 4096; +} // namespace + +GzipCompressorFactory::GzipCompressorFactory( + const envoy::extensions::compression::gzip::compressor::v3::Gzip& gzip) + : compression_level_(compressionLevelEnum(gzip.compression_level())), + compression_strategy_(compressionStrategyEnum(gzip.compression_strategy())), + memory_level_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(gzip, memory_level, DefaultMemoryLevel)), + window_bits_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(gzip, window_bits, DefaultWindowBits) | + GzipHeaderValue), + chunk_size_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(gzip, chunk_size, DefaultChunkSize)) {} + +ZlibCompressorImpl::CompressionLevel GzipCompressorFactory::compressionLevelEnum( + envoy::extensions::compression::gzip::compressor::v3::Gzip::CompressionLevel + compression_level) { + switch (compression_level) { + case envoy::extensions::compression::gzip::compressor::v3::Gzip::BEST_SPEED: + return ZlibCompressorImpl::CompressionLevel::Speed; + case envoy::extensions::compression::gzip::compressor::v3::Gzip::COMPRESSION_LEVEL_2: + return ZlibCompressorImpl::CompressionLevel::Level2; + case envoy::extensions::compression::gzip::compressor::v3::Gzip::COMPRESSION_LEVEL_3: + return ZlibCompressorImpl::CompressionLevel::Level3; + case envoy::extensions::compression::gzip::compressor::v3::Gzip::COMPRESSION_LEVEL_4: + return ZlibCompressorImpl::CompressionLevel::Level4; + case envoy::extensions::compression::gzip::compressor::v3::Gzip::COMPRESSION_LEVEL_5: + return ZlibCompressorImpl::CompressionLevel::Level5; + case envoy::extensions::compression::gzip::compressor::v3::Gzip::COMPRESSION_LEVEL_6: + return ZlibCompressorImpl::CompressionLevel::Level6; + case envoy::extensions::compression::gzip::compressor::v3::Gzip::COMPRESSION_LEVEL_7: + return ZlibCompressorImpl::CompressionLevel::Level7; + case envoy::extensions::compression::gzip::compressor::v3::Gzip::COMPRESSION_LEVEL_8: + return ZlibCompressorImpl::CompressionLevel::Level8; + case envoy::extensions::compression::gzip::compressor::v3::Gzip::BEST_COMPRESSION: + return ZlibCompressorImpl::CompressionLevel::Best; + default: + return ZlibCompressorImpl::CompressionLevel::Standard; + } +} + +ZlibCompressorImpl::CompressionStrategy GzipCompressorFactory::compressionStrategyEnum( + envoy::extensions::compression::gzip::compressor::v3::Gzip::CompressionStrategy + compression_strategy) { + switch (compression_strategy) { + case envoy::extensions::compression::gzip::compressor::v3::Gzip::FILTERED: + return ZlibCompressorImpl::CompressionStrategy::Filtered; + case envoy::extensions::compression::gzip::compressor::v3::Gzip::FIXED: + return ZlibCompressorImpl::CompressionStrategy::Fixed; + case envoy::extensions::compression::gzip::compressor::v3::Gzip::HUFFMAN_ONLY: + return ZlibCompressorImpl::CompressionStrategy::Huffman; + case envoy::extensions::compression::gzip::compressor::v3::Gzip::RLE: + return ZlibCompressorImpl::CompressionStrategy::Rle; + default: + return ZlibCompressorImpl::CompressionStrategy::Standard; + } +} + +Envoy::Compression::Compressor::CompressorPtr GzipCompressorFactory::createCompressor() { + auto compressor = std::make_unique(chunk_size_); + compressor->init(compression_level_, compression_strategy_, window_bits_, memory_level_); + return compressor; +} + +Envoy::Compression::Compressor::CompressorFactoryPtr +GzipCompressorLibraryFactory::createCompressorFactoryFromProtoTyped( + const envoy::extensions::compression::gzip::compressor::v3::Gzip& proto_config) { + return std::make_unique(proto_config); +} + +/** + * Static registration for the gzip compressor library. @see NamedCompressorLibraryConfigFactory. + */ +REGISTER_FACTORY(GzipCompressorLibraryFactory, + Envoy::Compression::Compressor::NamedCompressorLibraryConfigFactory); + +} // namespace Compressor +} // namespace Gzip +} // namespace Compression +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/compression/gzip/compressor/config.h b/source/extensions/compression/gzip/compressor/config.h new file mode 100644 index 0000000000000..2fcee31020b30 --- /dev/null +++ b/source/extensions/compression/gzip/compressor/config.h @@ -0,0 +1,71 @@ +#pragma once + +#include "envoy/compression/compressor/factory.h" +#include "envoy/extensions/compression/gzip/compressor/v3/gzip.pb.h" +#include "envoy/extensions/compression/gzip/compressor/v3/gzip.pb.validate.h" + +#include "common/http/headers.h" + +#include "extensions/compression/common/compressor/factory_base.h" +#include "extensions/compression/gzip/compressor/zlib_compressor_impl.h" +#include "extensions/filters/http/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace Compression { +namespace Gzip { +namespace Compressor { + +namespace { + +const std::string& gzipStatsPrefix() { CONSTRUCT_ON_FIRST_USE(std::string, "gzip."); } +const std::string& gzipExtensionName() { + CONSTRUCT_ON_FIRST_USE(std::string, "envoy.compression.gzip.compressor"); +} + +} // namespace + +class GzipCompressorFactory : public Envoy::Compression::Compressor::CompressorFactory { +public: + GzipCompressorFactory(const envoy::extensions::compression::gzip::compressor::v3::Gzip& gzip); + + // Envoy::Compression::Compressor::CompressorFactory + Envoy::Compression::Compressor::CompressorPtr createCompressor() override; + const std::string& statsPrefix() const override { return gzipStatsPrefix(); } + const std::string& contentEncoding() const override { + return Http::CustomHeaders::get().ContentEncodingValues.Gzip; + } + +private: + static ZlibCompressorImpl::CompressionLevel + compressionLevelEnum(envoy::extensions::compression::gzip::compressor::v3::Gzip::CompressionLevel + compression_level); + static ZlibCompressorImpl::CompressionStrategy compressionStrategyEnum( + envoy::extensions::compression::gzip::compressor::v3::Gzip::CompressionStrategy + compression_strategy); + + ZlibCompressorImpl::CompressionLevel compression_level_; + ZlibCompressorImpl::CompressionStrategy compression_strategy_; + const int32_t memory_level_; + const int32_t window_bits_; + const uint32_t chunk_size_; +}; + +class GzipCompressorLibraryFactory + : public Compression::Common::Compressor::CompressorLibraryFactoryBase< + envoy::extensions::compression::gzip::compressor::v3::Gzip> { +public: + GzipCompressorLibraryFactory() : CompressorLibraryFactoryBase(gzipExtensionName()) {} + +private: + Envoy::Compression::Compressor::CompressorFactoryPtr createCompressorFactoryFromProtoTyped( + const envoy::extensions::compression::gzip::compressor::v3::Gzip& config) override; +}; + +DECLARE_FACTORY(GzipCompressorLibraryFactory); + +} // namespace Compressor +} // namespace Gzip +} // namespace Compression +} // namespace Extensions +} // namespace Envoy diff --git a/source/common/compressor/zlib_compressor_impl.cc b/source/extensions/compression/gzip/compressor/zlib_compressor_impl.cc similarity index 85% rename from source/common/compressor/zlib_compressor_impl.cc rename to source/extensions/compression/gzip/compressor/zlib_compressor_impl.cc index 2f44a5da1a90c..432c36e970150 100644 --- a/source/common/compressor/zlib_compressor_impl.cc +++ b/source/extensions/compression/gzip/compressor/zlib_compressor_impl.cc @@ -1,4 +1,4 @@ -#include "common/compressor/zlib_compressor_impl.h" +#include "extensions/compression/gzip/compressor/zlib_compressor_impl.h" #include @@ -9,6 +9,9 @@ #include "absl/container/fixed_array.h" namespace Envoy { +namespace Extensions { +namespace Compression { +namespace Gzip { namespace Compressor { ZlibCompressorImpl::ZlibCompressorImpl() : ZlibCompressorImpl(4096) {} @@ -34,7 +37,8 @@ void ZlibCompressorImpl::init(CompressionLevel comp_level, CompressionStrategy c initialized_ = true; } -void ZlibCompressorImpl::compress(Buffer::Instance& buffer, State state) { +void ZlibCompressorImpl::compress(Buffer::Instance& buffer, + Envoy::Compression::Compressor::State state) { for (const Buffer::RawSlice& input_slice : buffer.getRawSlices()) { zstream_ptr_->avail_in = input_slice.len_; zstream_ptr_->next_in = static_cast(input_slice.mem_); @@ -46,7 +50,7 @@ void ZlibCompressorImpl::compress(Buffer::Instance& buffer, State state) { buffer.drain(input_slice.len_); } - process(buffer, state == State::Finish ? Z_FINISH : Z_SYNC_FLUSH); + process(buffer, state == Envoy::Compression::Compressor::State::Finish ? Z_FINISH : Z_SYNC_FLUSH); } bool ZlibCompressorImpl::deflateNext(int64_t flush_state) { @@ -81,4 +85,7 @@ void ZlibCompressorImpl::process(Buffer::Instance& output_buffer, int64_t flush_ } } // namespace Compressor +} // namespace Gzip +} // namespace Compression +} // namespace Extensions } // namespace Envoy diff --git a/source/common/compressor/zlib_compressor_impl.h b/source/extensions/compression/gzip/compressor/zlib_compressor_impl.h similarity index 74% rename from source/common/compressor/zlib_compressor_impl.h rename to source/extensions/compression/gzip/compressor/zlib_compressor_impl.h index 396e7ff250aa7..deddf3aac37a5 100644 --- a/source/common/compressor/zlib_compressor_impl.h +++ b/source/extensions/compression/gzip/compressor/zlib_compressor_impl.h @@ -1,18 +1,21 @@ #pragma once -#include "envoy/compressor/compressor.h" +#include "envoy/compression/compressor/compressor.h" -#include "common/common/zlib/base.h" +#include "extensions/compression/gzip/common/base.h" #include "zlib.h" namespace Envoy { +namespace Extensions { +namespace Compression { +namespace Gzip { namespace Compressor { /** * Implementation of compressor's interface. */ -class ZlibCompressorImpl : public Zlib::Base, public Compressor { +class ZlibCompressorImpl : public Zlib::Base, public Envoy::Compression::Compressor::Compressor { public: ZlibCompressorImpl(); @@ -30,11 +33,22 @@ class ZlibCompressorImpl : public Zlib::Base, public Compressor { * Enum values used to set compression level during initialization. * best: gives best compression. * speed: gives best performance. + * levelX: allows to adjust trad-offs more precisely - from level1 (best speed, but very + * low compression ratio) to level9 (best compression, but low speed). * standard: requests a default compromise between speed and compression. (default) @see zlib * manual. */ enum class CompressionLevel : int64_t { Best = Z_BEST_COMPRESSION, + Level1 = 1, + Level2 = 2, + Level3 = 3, + Level4 = 4, + Level5 = 5, + Level6 = 6, + Level7 = 7, + Level8 = 8, + Level9 = 9, Speed = Z_BEST_SPEED, Standard = Z_DEFAULT_COMPRESSION, }; @@ -42,12 +56,14 @@ class ZlibCompressorImpl : public Zlib::Base, public Compressor { /** * Enum values are used for setting the compression algorithm strategy. * filtered: used for data produced by a filter. (or predictor) @see Z_FILTERED (zlib manual) + * fixed: disable dynamic Huffman codes. @see Z_FIXED (zlib manual) * huffman: used to enforce Huffman encoding. @see RFC 1951 * rle: used to limit match distances to one. (Run-length encoding) * standard: used for normal data. (default) @see Z_DEFAULT_STRATEGY in zlib manual. */ enum class CompressionStrategy : uint64_t { Filtered = Z_FILTERED, + Fixed = Z_FIXED, Huffman = Z_HUFFMAN_ONLY, Rle = Z_RLE, Standard = Z_DEFAULT_STRATEGY, @@ -66,8 +82,8 @@ class ZlibCompressorImpl : public Zlib::Base, public Compressor { void init(CompressionLevel level, CompressionStrategy strategy, int64_t window_bits, uint64_t memory_level); - // Compressor - void compress(Buffer::Instance& buffer, State state) override; + // Compression::Compressor::Compressor + void compress(Buffer::Instance& buffer, Envoy::Compression::Compressor::State state) override; private: bool deflateNext(int64_t flush_state); @@ -75,4 +91,7 @@ class ZlibCompressorImpl : public Zlib::Base, public Compressor { }; } // namespace Compressor +} // namespace Gzip +} // namespace Compression +} // namespace Extensions } // namespace Envoy diff --git a/source/extensions/compression/gzip/decompressor/BUILD b/source/extensions/compression/gzip/decompressor/BUILD new file mode 100644 index 0000000000000..b4c6fb375d459 --- /dev/null +++ b/source/extensions/compression/gzip/decompressor/BUILD @@ -0,0 +1,39 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_library( + name = "zlib_decompressor_impl_lib", + srcs = ["zlib_decompressor_impl.cc"], + hdrs = ["zlib_decompressor_impl.h"], + external_deps = ["zlib"], + deps = [ + "//include/envoy/compression/decompressor:decompressor_interface", + "//include/envoy/stats:stats_interface", + "//include/envoy/stats:stats_macros", + "//source/common/buffer:buffer_lib", + "//source/common/common:assert_lib", + "//source/common/common:minimal_logger_lib", + "//source/extensions/compression/gzip/common:zlib_base_lib", + ], +) + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", + deps = [ + ":zlib_decompressor_impl_lib", + "//source/common/http:headers_lib", + "//source/extensions/compression/common/decompressor:decompressor_factory_base_lib", + "@envoy_api//envoy/extensions/compression/gzip/decompressor/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/compression/gzip/decompressor/config.cc b/source/extensions/compression/gzip/decompressor/config.cc new file mode 100644 index 0000000000000..f2b845de14334 --- /dev/null +++ b/source/extensions/compression/gzip/decompressor/config.cc @@ -0,0 +1,47 @@ +#include "extensions/compression/gzip/decompressor/config.h" + +namespace Envoy { +namespace Extensions { +namespace Compression { +namespace Gzip { +namespace Decompressor { + +namespace { +const uint32_t DefaultWindowBits = 12; +const uint32_t DefaultChunkSize = 4096; +// When logical OR'ed to window bits, this tells zlib library to decompress gzip data per: +// inflateInit2 in https://www.zlib.net/manual.html +const uint32_t GzipHeaderValue = 16; +} // namespace + +GzipDecompressorFactory::GzipDecompressorFactory( + const envoy::extensions::compression::gzip::decompressor::v3::Gzip& gzip, Stats::Scope& scope) + : scope_(scope), + window_bits_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(gzip, window_bits, DefaultWindowBits) | + GzipHeaderValue), + chunk_size_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(gzip, chunk_size, DefaultChunkSize)) {} + +Envoy::Compression::Decompressor::DecompressorPtr +GzipDecompressorFactory::createDecompressor(const std::string& stats_prefix) { + auto decompressor = std::make_unique(scope_, stats_prefix, chunk_size_); + decompressor->init(window_bits_); + return decompressor; +} + +Envoy::Compression::Decompressor::DecompressorFactoryPtr +GzipDecompressorLibraryFactory::createDecompressorFactoryFromProtoTyped( + const envoy::extensions::compression::gzip::decompressor::v3::Gzip& proto_config, + Server::Configuration::FactoryContext& context) { + return std::make_unique(proto_config, context.scope()); +} + +/** + * Static registration for the gzip decompressor. @see NamedDecompressorLibraryConfigFactory. + */ +REGISTER_FACTORY(GzipDecompressorLibraryFactory, + Envoy::Compression::Decompressor::NamedDecompressorLibraryConfigFactory); +} // namespace Decompressor +} // namespace Gzip +} // namespace Compression +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/compression/gzip/decompressor/config.h b/source/extensions/compression/gzip/decompressor/config.h new file mode 100644 index 0000000000000..34c9ca11bf57b --- /dev/null +++ b/source/extensions/compression/gzip/decompressor/config.h @@ -0,0 +1,63 @@ +#pragma once + +#include "envoy/compression/decompressor/config.h" +#include "envoy/extensions/compression/gzip/decompressor/v3/gzip.pb.h" +#include "envoy/extensions/compression/gzip/decompressor/v3/gzip.pb.validate.h" + +#include "common/http/headers.h" + +#include "extensions/compression/common/decompressor/factory_base.h" +#include "extensions/compression/gzip/decompressor/zlib_decompressor_impl.h" + +namespace Envoy { +namespace Extensions { +namespace Compression { +namespace Gzip { +namespace Decompressor { + +namespace { +const std::string& gzipStatsPrefix() { CONSTRUCT_ON_FIRST_USE(std::string, "gzip."); } +const std::string& gzipExtensionName() { + CONSTRUCT_ON_FIRST_USE(std::string, "envoy.compression.gzip.decompressor"); +} + +} // namespace + +class GzipDecompressorFactory : public Envoy::Compression::Decompressor::DecompressorFactory { +public: + GzipDecompressorFactory(const envoy::extensions::compression::gzip::decompressor::v3::Gzip& gzip, + Stats::Scope& scope); + + // Envoy::Compression::Decompressor::DecompressorFactory + Envoy::Compression::Decompressor::DecompressorPtr + createDecompressor(const std::string& stats_prefix) override; + const std::string& statsPrefix() const override { return gzipStatsPrefix(); } + const std::string& contentEncoding() const override { + return Http::CustomHeaders::get().ContentEncodingValues.Gzip; + } + +private: + Stats::Scope& scope_; + const int32_t window_bits_; + const uint32_t chunk_size_; +}; + +class GzipDecompressorLibraryFactory + : public Common::Decompressor::DecompressorLibraryFactoryBase< + envoy::extensions::compression::gzip::decompressor::v3::Gzip> { +public: + GzipDecompressorLibraryFactory() : DecompressorLibraryFactoryBase(gzipExtensionName()) {} + +private: + Envoy::Compression::Decompressor::DecompressorFactoryPtr createDecompressorFactoryFromProtoTyped( + const envoy::extensions::compression::gzip::decompressor::v3::Gzip& proto_config, + Server::Configuration::FactoryContext& context) override; +}; + +DECLARE_FACTORY(GzipDecompressorLibraryFactory); + +} // namespace Decompressor +} // namespace Gzip +} // namespace Compression +} // namespace Extensions +} // namespace Envoy diff --git a/source/common/decompressor/zlib_decompressor_impl.cc b/source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.cc similarity index 56% rename from source/common/decompressor/zlib_decompressor_impl.cc rename to source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.cc index 55dffc6d36091..9066af8f0426f 100644 --- a/source/common/decompressor/zlib_decompressor_impl.cc +++ b/source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.cc @@ -1,4 +1,6 @@ -#include "common/decompressor/zlib_decompressor_impl.h" +#include "extensions/compression/gzip/decompressor/zlib_decompressor_impl.h" + +#include #include @@ -9,15 +11,22 @@ #include "absl/container/fixed_array.h" namespace Envoy { +namespace Extensions { +namespace Compression { +namespace Gzip { namespace Decompressor { -ZlibDecompressorImpl::ZlibDecompressorImpl() : ZlibDecompressorImpl(4096) {} +ZlibDecompressorImpl::ZlibDecompressorImpl(Stats::Scope& scope, const std::string& stats_prefix) + : ZlibDecompressorImpl(scope, stats_prefix, 4096) {} -ZlibDecompressorImpl::ZlibDecompressorImpl(uint64_t chunk_size) - : Zlib::Base(chunk_size, [](z_stream* z) { - inflateEnd(z); - delete z; - }) { +ZlibDecompressorImpl::ZlibDecompressorImpl(Stats::Scope& scope, const std::string& stats_prefix, + uint64_t chunk_size) + : Zlib::Base(chunk_size, + [](z_stream* z) { + inflateEnd(z); + delete z; + }), + stats_(generateStats(stats_prefix, scope)) { zstream_ptr_->zalloc = Z_NULL; zstream_ptr_->zfree = Z_NULL; zstream_ptr_->opaque = Z_NULL; @@ -64,15 +73,42 @@ bool ZlibDecompressorImpl::inflateNext() { if (result < 0) { decompression_error_ = result; - ENVOY_LOG( - trace, - "zlib decompression error: {}. Error codes are defined in https://www.zlib.net/manual.html", - result); + ENVOY_LOG(trace, + "zlib decompression error: {}, msg: {}. Error codes are defined in " + "https://www.zlib.net/manual.html", + result, zstream_ptr_->msg); + chargeErrorStats(result); return false; } return true; } +void ZlibDecompressorImpl::chargeErrorStats(const int result) { + switch (result) { + case Z_ERRNO: + stats_.zlib_errno_.inc(); + break; + case Z_STREAM_ERROR: + stats_.zlib_stream_error_.inc(); + break; + case Z_DATA_ERROR: + stats_.zlib_data_error_.inc(); + break; + case Z_MEM_ERROR: + stats_.zlib_mem_error_.inc(); + break; + case Z_BUF_ERROR: + stats_.zlib_buf_error_.inc(); + break; + case Z_VERSION_ERROR: + stats_.zlib_version_error_.inc(); + break; + } +} + } // namespace Decompressor +} // namespace Gzip +} // namespace Compression +} // namespace Extensions } // namespace Envoy diff --git a/source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.h b/source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.h new file mode 100644 index 0000000000000..ec20b8c8dbcaa --- /dev/null +++ b/source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.h @@ -0,0 +1,91 @@ +#pragma once + +#include "envoy/compression/decompressor/decompressor.h" +#include "envoy/stats/scope.h" +#include "envoy/stats/stats_macros.h" + +#include "common/common/logger.h" + +#include "extensions/compression/gzip/common/base.h" + +#include "zlib.h" + +namespace Envoy { +namespace Extensions { +namespace Compression { +namespace Gzip { +namespace Decompressor { + +/** + * All zlib decompressor stats. @see stats_macros.h + */ +#define ALL_ZLIB_DECOMPRESSOR_STATS(COUNTER) \ + COUNTER(zlib_errno) \ + COUNTER(zlib_stream_error) \ + COUNTER(zlib_data_error) \ + COUNTER(zlib_mem_error) \ + COUNTER(zlib_buf_error) \ + COUNTER(zlib_version_error) + +/** + * Struct definition for zlib decompressor stats. @see stats_macros.h + */ +struct ZlibDecompressorStats { + ALL_ZLIB_DECOMPRESSOR_STATS(GENERATE_COUNTER_STRUCT) +}; + +/** + * Implementation of decompressor's interface. + */ +class ZlibDecompressorImpl : public Zlib::Base, + public Envoy::Compression::Decompressor::Decompressor, + public Logger::Loggable { +public: + ZlibDecompressorImpl(Stats::Scope& scope, const std::string& stats_prefix); + + /** + * Constructor that allows setting the size of decompressor's output buffer. It + * should be called whenever a buffer size different than the 4096 bytes, normally set by the + * default constructor, is desired. If memory is available and it makes sense to output large + * chunks of compressed data, zlib documentation suggests buffers sizes on the order of 128K or + * 256K bytes. @see http://zlib.net/zlib_how.html + * @param chunk_size amount of memory reserved for the decompressor output. + */ + ZlibDecompressorImpl(Stats::Scope& scope, const std::string& stats_prefix, uint64_t chunk_size); + + /** + * Init must be called in order to initialize the decompressor. Once decompressor is initialized, + * it cannot be initialized again. Init should run before decompressing any data. + * @param window_bits sets the size of the history buffer. It must be greater than or equal to + * the window_bits value provided when data was compressed (zlib manual). + */ + void init(int64_t window_bits); + + // Compression::Decompressor::Decompressor + void decompress(const Buffer::Instance& input_buffer, Buffer::Instance& output_buffer) override; + + // Flag to track whether error occurred during decompression. + // When an error occurs, the error code (a negative int) will be stored in this variable. + int decompression_error_{0}; + +private: + // TODO: clean up friend class. This is here to allow coverage of chargeErrorStats as it isn't + // completely straightforward + // to cause zlib's inflate function to return all the error codes necessary to hit all the cases + // in the switch statement. + friend class ZlibDecompressorStatsTest; + static ZlibDecompressorStats generateStats(const std::string& prefix, Stats::Scope& scope) { + return ZlibDecompressorStats{ALL_ZLIB_DECOMPRESSOR_STATS(POOL_COUNTER_PREFIX(scope, prefix))}; + } + + bool inflateNext(); + void chargeErrorStats(const int result); + + const ZlibDecompressorStats stats_; +}; + +} // namespace Decompressor +} // namespace Gzip +} // namespace Compression +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl index 1bb72dfb7e1dc..0ae05caa57c02 100644 --- a/source/extensions/extensions_build_config.bzl +++ b/source/extensions/extensions_build_config.bzl @@ -16,6 +16,13 @@ EXTENSIONS = { "envoy.clusters.dynamic_forward_proxy": "//source/extensions/clusters/dynamic_forward_proxy:cluster", "envoy.clusters.redis": "//source/extensions/clusters/redis:redis_cluster", + # + # Compression + # + + "envoy.compression.gzip.compressor": "//source/extensions/compression/gzip/compressor:config", + "envoy.compression.gzip.decompressor": "//source/extensions/compression/gzip/decompressor:config", + # # gRPC Credentials Plugins # @@ -34,12 +41,15 @@ EXTENSIONS = { # "envoy.filters.http.adaptive_concurrency": "//source/extensions/filters/http/adaptive_concurrency:config", + "envoy.filters.http.admission_control": "//source/extensions/filters/http/admission_control:config", "envoy.filters.http.aws_lambda": "//source/extensions/filters/http/aws_lambda:config", "envoy.filters.http.aws_request_signing": "//source/extensions/filters/http/aws_request_signing:config", "envoy.filters.http.buffer": "//source/extensions/filters/http/buffer:config", "envoy.filters.http.cache": "//source/extensions/filters/http/cache:config", + "envoy.filters.http.compressor": "//source/extensions/filters/http/compressor:config", "envoy.filters.http.cors": "//source/extensions/filters/http/cors:config", "envoy.filters.http.csrf": "//source/extensions/filters/http/csrf:config", + "envoy.filters.http.decompressor": "//source/extensions/filters/http/decompressor:config", "envoy.filters.http.dynamic_forward_proxy": "//source/extensions/filters/http/dynamic_forward_proxy:config", "envoy.filters.http.dynamo": "//source/extensions/filters/http/dynamo:config", "envoy.filters.http.ext_authz": "//source/extensions/filters/http/ext_authz:config", @@ -96,6 +106,7 @@ EXTENSIONS = { "envoy.filters.network.ratelimit": "//source/extensions/filters/network/ratelimit:config", "envoy.filters.network.rbac": "//source/extensions/filters/network/rbac:config", "envoy.filters.network.redis_proxy": "//source/extensions/filters/network/redis_proxy:config", + "envoy.filters.network.rocketmq_proxy": "//source/extensions/filters/network/rocketmq_proxy:config", "envoy.filters.network.tcp_proxy": "//source/extensions/filters/network/tcp_proxy:config", "envoy.filters.network.thrift_proxy": "//source/extensions/filters/network/thrift_proxy:config", "envoy.filters.network.sni_cluster": "//source/extensions/filters/network/sni_cluster:config", @@ -106,7 +117,7 @@ EXTENSIONS = { # UDP filters # - "envoy.filters.udp_listener.dns_filter": "//source/extensions/filters/udp/dns_filter:config", + "envoy.filters.udp_listener.dns_filter": "//source/extensions/filters/udp/dns_filter:config", "envoy.filters.udp_listener.udp_proxy": "//source/extensions/filters/udp/udp_proxy:config", # @@ -149,8 +160,10 @@ EXTENSIONS = { # "envoy.transport_sockets.alts": "//source/extensions/transport_sockets/alts:config", + "envoy.transport_sockets.upstream_proxy_protocol": "//source/extensions/transport_sockets/proxy_protocol:upstream_proxy_protocol", "envoy.transport_sockets.raw_buffer": "//source/extensions/transport_sockets/raw_buffer:config", "envoy.transport_sockets.tap": "//source/extensions/transport_sockets/tap:config", + "envoy.transport_sockets.quic": "//source/extensions/quic_listeners/quiche:quic_transport_socket_factory_lib", # # Retry host predicates @@ -171,4 +184,23 @@ EXTENSIONS = { # "envoy.filters.http.cache.simple_http_cache": "//source/extensions/filters/http/cache/simple_http_cache:simple_http_cache_lib", + + # + # Internal redirect predicates + # + "envoy.internal_redirect_predicates.allow_listed_routes": "//source/extensions/internal_redirect/allow_listed_routes:config", + "envoy.internal_redirect_predicates.previous_routes": "//source/extensions/internal_redirect/previous_routes:config", + "envoy.internal_redirect_predicates.safe_cross_scheme": "//source/extensions/internal_redirect/safe_cross_scheme:config", + + # Http Upstreams (excepting envoy.upstreams.http.generic which is hard-coded into the build so not registered here) + "envoy.upstreams.http.http": "//source/extensions/upstreams/http/http:config", + "envoy.upstreams.http.tcp": "//source/extensions/upstreams/http/tcp:config", + + } + +# This can be used to extend the visibility rules for Envoy extensions +# (//:extension_config and //:extension_library in //BUILD) +# if downstream Envoy builds need to directly reference envoy extensions. +ADDITIONAL_VISIBILITY = [ + ] diff --git a/source/extensions/filters/common/expr/BUILD b/source/extensions/filters/common/expr/BUILD index 316c36b05b19d..fbbcd725ba43d 100644 --- a/source/extensions/filters/common/expr/BUILD +++ b/source/extensions/filters/common/expr/BUILD @@ -1,12 +1,12 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "evaluator_lib", diff --git a/source/extensions/filters/common/expr/context.cc b/source/extensions/filters/common/expr/context.cc index 6df67169d09ea..17a0bd88a5704 100644 --- a/source/extensions/filters/common/expr/context.cc +++ b/source/extensions/filters/common/expr/context.cc @@ -13,6 +13,9 @@ namespace Filters { namespace Common { namespace Expr { +Http::RegisterCustomInlineHeader + referer_handle(Http::CustomHeaders::get().Referer); + absl::optional convertHeaderEntry(const Http::HeaderEntry* header) { if (header == nullptr) { return {}; @@ -67,7 +70,7 @@ absl::optional RequestWrapper::operator[](CelValue key) const { // (which is not available at the time of the request headers) if (headers_.value_ != nullptr && headers_.value_->ContentLength() != nullptr) { int64_t length; - if (absl::SimpleAtoi(headers_.value_->ContentLength()->value().getStringView(), &length)) { + if (absl::SimpleAtoi(headers_.value_->getContentLengthValue(), &length)) { return CelValue::CreateInt64(length); } } else { @@ -93,7 +96,7 @@ absl::optional RequestWrapper::operator[](CelValue key) const { if (value == Path) { return convertHeaderEntry(headers_.value_->Path()); } else if (value == UrlPath) { - absl::string_view path = headers_.value_->Path()->value().getStringView(); + absl::string_view path = headers_.value_->getPathValue(); size_t query_offset = path.find('?'); if (query_offset == absl::string_view::npos) { return CelValue::CreateStringView(path); @@ -106,7 +109,7 @@ absl::optional RequestWrapper::operator[](CelValue key) const { } else if (value == Method) { return convertHeaderEntry(headers_.value_->Method()); } else if (value == Referer) { - return convertHeaderEntry(headers_.value_->Referer()); + return convertHeaderEntry(headers_.value_->getInline(referer_handle.handle())); } else if (value == ID) { return convertHeaderEntry(headers_.value_->RequestId()); } else if (value == UserAgent) { @@ -136,8 +139,8 @@ absl::optional ResponseWrapper::operator[](CelValue key) const { return CelValue::CreateInt64(info_.responseFlags()); } else if (value == GrpcStatus) { auto const& optional_status = Grpc::Common::getGrpcStatus( - trailers_.value_ ? *trailers_.value_ : ConstSingleton::get(), - headers_.value_ ? *headers_.value_ : ConstSingleton::get(), + trailers_.value_ ? *trailers_.value_ : *Http::StaticEmptyHeaders::get().response_trailers, + headers_.value_ ? *headers_.value_ : *Http::StaticEmptyHeaders::get().response_headers, info_); if (optional_status.has_value()) { return CelValue::CreateInt64(optional_status.value()); @@ -187,6 +190,13 @@ absl::optional UpstreamWrapper::operator[](CelValue key) const { upstream_host->address()->ip() != nullptr) { return CelValue::CreateInt64(upstream_host->address()->ip()->port()); } + } else if (value == UpstreamLocalAddress) { + auto upstream_local_address = info_.upstreamLocalAddress(); + if (upstream_local_address != nullptr) { + return CelValue::CreateStringView(upstream_local_address->asStringView()); + } + } else if (value == UpstreamTransportFailureReason) { + return CelValue::CreateStringView(info_.upstreamTransportFailureReason()); } auto ssl_info = info_.upstreamSslConnection(); diff --git a/source/extensions/filters/common/expr/context.h b/source/extensions/filters/common/expr/context.h index f3a2aed0cef58..8c06b86ce7cb9 100644 --- a/source/extensions/filters/common/expr/context.h +++ b/source/extensions/filters/common/expr/context.h @@ -66,6 +66,8 @@ constexpr absl::string_view Destination = "destination"; // Upstream properties constexpr absl::string_view Upstream = "upstream"; +constexpr absl::string_view UpstreamLocalAddress = "local_address"; +constexpr absl::string_view UpstreamTransportFailureReason = "transport_failure_reason"; class RequestWrapper; diff --git a/source/extensions/filters/common/expr/evaluator.cc b/source/extensions/filters/common/expr/evaluator.cc index 0bd1d3554f190..e4920fd21fdab 100644 --- a/source/extensions/filters/common/expr/evaluator.cc +++ b/source/extensions/filters/common/expr/evaluator.cc @@ -62,7 +62,7 @@ ExpressionPtr createExpression(Builder& builder, const google::api::expr::v1alph throw CelException( absl::StrCat("failed to create an expression: ", cel_expression_status.status().message())); } - return std::move(cel_expression_status.ValueOrDie()); + return std::move(cel_expression_status.value()); } absl::optional evaluate(const Expression& expr, Protobuf::Arena* arena, @@ -76,7 +76,7 @@ absl::optional evaluate(const Expression& expr, Protobuf::Arena* arena return {}; } - return eval_status.ValueOrDie(); + return eval_status.value(); } bool matches(const Expression& expr, const StreamInfo::StreamInfo& info, diff --git a/source/extensions/filters/common/ext_authz/BUILD b/source/extensions/filters/common/ext_authz/BUILD index 58ed8316353e3..977560fefb203 100644 --- a/source/extensions/filters/common/ext_authz/BUILD +++ b/source/extensions/filters/common/ext_authz/BUILD @@ -1,12 +1,12 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "ext_authz_interface", @@ -44,6 +44,7 @@ envoy_cc_library( "//source/common/protobuf", "//source/common/tracing:http_tracer_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/service/auth/v2alpha:pkg_cc_proto", "@envoy_api//envoy/service/auth/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/common/ext_authz/check_request_utils.cc b/source/extensions/filters/common/ext_authz/check_request_utils.cc index 7906ad3607f6a..55847df1a946f 100644 --- a/source/extensions/filters/common/ext_authz/check_request_utils.cc +++ b/source/extensions/filters/common/ext_authz/check_request_utils.cc @@ -115,18 +115,15 @@ void CheckRequestUtils::setHttpRequest( } // Fill in the headers. - auto mutable_headers = httpreq.mutable_headers(); - headers.iterate( - [](const Envoy::Http::HeaderEntry& e, void* ctx) { - // Skip any client EnvoyAuthPartialBody header, which could interfere with internal use. - if (e.key().getStringView() != Http::Headers::get().EnvoyAuthPartialBody.get()) { - auto* mutable_headers = static_cast*>(ctx); - (*mutable_headers)[std::string(e.key().getStringView())] = - std::string(e.value().getStringView()); - } - return Envoy::Http::HeaderMap::Iterate::Continue; - }, - mutable_headers); + auto* mutable_headers = httpreq.mutable_headers(); + headers.iterate([mutable_headers](const Envoy::Http::HeaderEntry& e) { + // Skip any client EnvoyAuthPartialBody header, which could interfere with internal use. + if (e.key().getStringView() != Http::Headers::get().EnvoyAuthPartialBody.get()) { + (*mutable_headers)[std::string(e.key().getStringView())] = + std::string(e.value().getStringView()); + } + return Envoy::Http::HeaderMap::Iterate::Continue; + }); // Set request body. if (max_request_bytes > 0 && decoding_buffer != nullptr) { diff --git a/source/extensions/filters/common/ext_authz/ext_authz.h b/source/extensions/filters/common/ext_authz/ext_authz.h index 725f534090f71..ba34d2e8a9fcb 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz.h +++ b/source/extensions/filters/common/ext_authz/ext_authz.h @@ -49,14 +49,23 @@ enum class CheckStatus { struct Response { // Call status. CheckStatus status; - // Optional http headers used on either denied or ok responses. + // A set of HTTP headers returned by the authorization server, that will be optionally appended + // to the request to the upstream server. Http::HeaderVector headers_to_append; - // Optional http headers used on either denied or ok responses. + // A set of HTTP headers returned by the authorization server, will be optionally set + // (using "setCopy") to the request to the upstream server. + Http::HeaderVector headers_to_set; + // A set of HTTP headers returned by the authorization server, will be optionally added + // (using "addCopy") to the request to the upstream server. Http::HeaderVector headers_to_add; // Optional http body used only on denied response. std::string body; // Optional http status used only on denied response. Http::Code status_code{}; + + // A set of metadata returned by the authorization server, that will be emitted as filter's + // dynamic metadata that other filters can leverage. + ProtobufWkt::Struct dynamic_metadata; }; using ResponsePtr = std::unique_ptr; diff --git a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc index d96756eaf43b7..c82435a23edc4 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc @@ -1,6 +1,7 @@ #include "extensions/filters/common/ext_authz/ext_authz_grpc_impl.h" #include "envoy/config/core/v3/base.pb.h" +#include "envoy/service/auth/v2alpha/external_auth.pb.h" #include "envoy/service/auth/v3/external_auth.pb.h" #include "common/common/assert.h" @@ -16,16 +17,16 @@ namespace Filters { namespace Common { namespace ExtAuthz { -// Values used for selecting service paths. -// TODO(gsagula): keep only V2 when V2Alpha gets deprecated. -constexpr char V2[] = "envoy.service.auth.v2.Authorization.Check"; -constexpr char V2alpha[] = "envoy.service.auth.v2alpha.Authorization.Check"; - GrpcClientImpl::GrpcClientImpl(Grpc::RawAsyncClientPtr&& async_client, const absl::optional& timeout, + envoy::config::core::v3::ApiVersion transport_api_version, bool use_alpha) - : service_method_(getMethodDescriptor(use_alpha)), async_client_(std::move(async_client)), - timeout_(timeout) {} + : async_client_(std::move(async_client)), timeout_(timeout), + service_method_(Grpc::VersionedMethods("envoy.service.auth.v3.Authorization.Check", + "envoy.service.auth.v2.Authorization.Check", + "envoy.service.auth.v2alpha.Authorization.Check") + .getMethodDescriptorForVersion(transport_api_version, use_alpha)), + transport_api_version_(transport_api_version) {} GrpcClientImpl::~GrpcClientImpl() { ASSERT(!callbacks_); } @@ -41,17 +42,21 @@ void GrpcClientImpl::check(RequestCallbacks& callbacks, ASSERT(callbacks_ == nullptr); callbacks_ = &callbacks; + ENVOY_LOG(trace, "Sending CheckRequest: {}", request.DebugString()); request_ = async_client_->send(service_method_, request, *this, parent_span, - Http::AsyncClient::RequestOptions().setTimeout(timeout_)); + Http::AsyncClient::RequestOptions().setTimeout(timeout_), + transport_api_version_); } void GrpcClientImpl::onSuccess(std::unique_ptr&& response, Tracing::Span& span) { + ENVOY_LOG(trace, "Received CheckResponse: {}", response->DebugString()); ResponsePtr authz_response = std::make_unique(Response{}); if (response->status().code() == Grpc::Status::WellKnownGrpcStatus::Ok) { span.setTag(TracingConstants::get().TraceStatus, TracingConstants::get().TraceOk); authz_response->status = CheckStatus::OK; if (response->has_ok_response()) { + authz_response->dynamic_metadata = response->ok_response().dynamic_metadata(); toAuthzResponseHeader(authz_response, response->ok_response().headers()); } } else { @@ -73,6 +78,8 @@ void GrpcClientImpl::onSuccess(std::unique_ptrheaders_to_append.emplace_back(Http::LowerCaseString(header.header().key()), header.header().value()); } else { - response->headers_to_add.emplace_back(Http::LowerCaseString(header.header().key()), + response->headers_to_set.emplace_back(Http::LowerCaseString(header.header().key()), header.header().value()); } } } -const Protobuf::MethodDescriptor& GrpcClientImpl::getMethodDescriptor(bool use_alpha) { - const auto* descriptor = - use_alpha ? Protobuf::DescriptorPool::generated_pool()->FindMethodByName(V2alpha) - : Protobuf::DescriptorPool::generated_pool()->FindMethodByName(V2); - ASSERT(descriptor != nullptr); - return *descriptor; -} - } // namespace ExtAuthz } // namespace Common } // namespace Filters diff --git a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h index a678505ba3229..da1ed1d2ebf1e 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h +++ b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h @@ -2,6 +2,7 @@ #include #include +#include #include #include @@ -38,11 +39,14 @@ using ExtAuthzAsyncCallbacks = Grpc::AsyncRequestCallbacks { public: // TODO(gsagula): remove `use_alpha` param when V2Alpha gets deprecated. GrpcClientImpl(Grpc::RawAsyncClientPtr&& async_client, - const absl::optional& timeout, bool use_alpha); + const absl::optional& timeout, + envoy::config::core::v3::ApiVersion transport_api_version, bool use_alpha); ~GrpcClientImpl() override; // ExtAuthz::Client @@ -58,18 +62,20 @@ class GrpcClientImpl : public Client, public ExtAuthzAsyncCallbacks { Tracing::Span& span) override; private: - static const Protobuf::MethodDescriptor& getMethodDescriptor(bool use_alpha); void toAuthzResponseHeader( ResponsePtr& response, const Protobuf::RepeatedPtrField& headers); - const Protobuf::MethodDescriptor& service_method_; Grpc::AsyncClient async_client_; Grpc::AsyncRequest* request_{}; absl::optional timeout_; RequestCallbacks* callbacks_{}; + const Protobuf::MethodDescriptor& service_method_; + const envoy::config::core::v3::ApiVersion transport_api_version_; }; +using GrpcClientImplPtr = std::unique_ptr; + } // namespace ExtAuthz } // namespace Common } // namespace Filters diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc index e096d679af050..fe7207bdfbd32 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc @@ -33,30 +33,40 @@ const Http::HeaderMap& lengthZeroHeader() { const Response& errorResponse() { CONSTRUCT_ON_FIRST_USE(Response, Response{CheckStatus::Error, Http::HeaderVector{}, Http::HeaderVector{}, - EMPTY_STRING, Http::Code::Forbidden}); + Http::HeaderVector{}, EMPTY_STRING, Http::Code::Forbidden, + ProtobufWkt::Struct{}}); } // SuccessResponse used for creating either DENIED or OK authorization responses. struct SuccessResponse { SuccessResponse(const Http::HeaderMap& headers, const MatcherSharedPtr& matchers, - Response&& response) - : headers_(headers), matchers_(matchers), response_(std::make_unique(response)) { - headers_.iterate( - [](const Http::HeaderEntry& header, void* ctx) -> Http::HeaderMap::Iterate { - auto* context = static_cast(ctx); - // UpstreamHeaderMatcher - if (context->matchers_->matches(header.key().getStringView())) { - context->response_->headers_to_add.emplace_back( - Http::LowerCaseString{std::string(header.key().getStringView())}, - std::string(header.value().getStringView())); - } - return Http::HeaderMap::Iterate::Continue; - }, - this); + const MatcherSharedPtr& append_matchers, Response&& response) + : headers_(headers), matchers_(matchers), append_matchers_(append_matchers), + response_(std::make_unique(response)) { + headers_.iterate([this](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + // UpstreamHeaderMatcher + if (matchers_->matches(header.key().getStringView())) { + response_->headers_to_set.emplace_back( + Http::LowerCaseString{std::string(header.key().getStringView())}, + std::string(header.value().getStringView())); + } + if (append_matchers_->matches(header.key().getStringView())) { + // If there is an existing matching key in the current headers, the new entry will be + // appended with the same key. For example, given {"key": "value1"} headers, if there is + // a matching "key" from the authorization response headers {"key": "value2"}, the + // request to upstream server will have two entries for "key": {"key": "value1", "key": + // "value2"}. + response_->headers_to_add.emplace_back( + Http::LowerCaseString{std::string(header.key().getStringView())}, + std::string(header.value().getStringView())); + } + return Http::HeaderMap::Iterate::Continue; + }); } const Http::HeaderMap& headers_; const MatcherSharedPtr& matchers_; + const MatcherSharedPtr& append_matchers_; ResponsePtr response_; }; @@ -128,6 +138,9 @@ ClientConfig::ClientConfig(const envoy::extensions::filters::http::ext_authz::v3 upstream_header_matchers_(toUpstreamMatchers( config.http_service().authorization_response().allowed_upstream_headers(), enable_case_sensitive_string_matcher_)), + upstream_header_to_append_matchers_(toUpstreamMatchers( + config.http_service().authorization_response().allowed_upstream_headers_to_append(), + enable_case_sensitive_string_matcher_)), cluster_name_(config.http_service().server_uri().cluster()), timeout_(timeout), path_prefix_(path_prefix), tracing_name_(fmt::format("async {} egress", config.http_service().server_uri().cluster())), @@ -138,8 +151,8 @@ MatcherSharedPtr ClientConfig::toRequestMatchers(const envoy::type::matcher::v3::ListStringMatcher& list, const bool disable_lowercase_string_matcher) { const std::vector keys{ - {Http::Headers::get().Authorization, Http::Headers::get().Method, Http::Headers::get().Path, - Http::Headers::get().Host}}; + {Http::CustomHeaders::get().Authorization, Http::Headers::get().Method, + Http::Headers::get().Path, Http::Headers::get().Host}}; std::vector matchers( createStringMatchers(list, disable_lowercase_string_matcher)); @@ -190,23 +203,15 @@ ClientConfig::toUpstreamMatchers(const envoy::type::matcher::v3::ListStringMatch createStringMatchers(list, disable_lowercase_string_matcher)); } -RawHttpClientImpl::RawHttpClientImpl(Upstream::ClusterManager& cm, ClientConfigSharedPtr config, - TimeSource& time_source) - : cm_(cm), config_(config), time_source_(time_source) {} +RawHttpClientImpl::RawHttpClientImpl(Upstream::ClusterManager& cm, ClientConfigSharedPtr config) + : cm_(cm), config_(config) {} -RawHttpClientImpl::~RawHttpClientImpl() { - ASSERT(callbacks_ == nullptr); - ASSERT(span_ == nullptr); -} +RawHttpClientImpl::~RawHttpClientImpl() { ASSERT(callbacks_ == nullptr); } void RawHttpClientImpl::cancel() { ASSERT(callbacks_ != nullptr); - ASSERT(span_ != nullptr); - span_->setTag(Tracing::Tags::get().Status, Tracing::Tags::get().Canceled); - span_->finishSpan(); request_->cancel(); callbacks_ = nullptr; - span_ = nullptr; } // Client @@ -215,11 +220,7 @@ void RawHttpClientImpl::check(RequestCallbacks& callbacks, Tracing::Span& parent_span, const StreamInfo::StreamInfo& stream_info) { ASSERT(callbacks_ == nullptr); - ASSERT(span_ == nullptr); callbacks_ = &callbacks; - span_ = parent_span.spawnChild(Tracing::EgressConfig::get(), config_->tracingName(), - time_source_.systemTime()); - span_->setTag(Tracing::Tags::get().UpstreamCluster, config_->cluster()); Http::RequestHeaderMapPtr headers; const uint64_t request_length = request.attributes().request().http().body().size(); @@ -260,51 +261,47 @@ void RawHttpClientImpl::check(RequestCallbacks& callbacks, // It's possible that the cluster specified in the filter configuration no longer exists due to a // CDS removal. if (cm_.get(cluster) == nullptr) { - // TODO(dio): Add stats and tracing related to this. + // TODO(dio): Add stats related to this. ENVOY_LOG(debug, "ext_authz cluster '{}' does not exist", cluster); callbacks_->onComplete(std::make_unique(errorResponse())); - span_->setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True); - span_->finishSpan(); callbacks_ = nullptr; - span_ = nullptr; } else { - span_->injectContext(message->headers()); - request_ = cm_.httpAsyncClientForCluster(cluster).send( - std::move(message), *this, - Http::AsyncClient::RequestOptions().setTimeout(config_->timeout())); + auto options = Http::AsyncClient::RequestOptions() + .setTimeout(config_->timeout()) + .setParentSpan(parent_span) + .setChildSpanName(config_->tracingName()); + + request_ = cm_.httpAsyncClientForCluster(cluster).send(std::move(message), *this, options); } } void RawHttpClientImpl::onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&& message) { callbacks_->onComplete(toResponse(std::move(message))); - span_->finishSpan(); callbacks_ = nullptr; - span_ = nullptr; } void RawHttpClientImpl::onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason reason) { ASSERT(reason == Http::AsyncClient::FailureReason::Reset); callbacks_->onComplete(std::make_unique(errorResponse())); - span_->setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True); - span_->finishSpan(); callbacks_ = nullptr; - span_ = nullptr; } -ResponsePtr RawHttpClientImpl::toResponse(Http::ResponseMessagePtr message) { - // Set an error status if parsing status code fails. A Forbidden response is sent to the client - // if the filter has not been configured with failure_mode_allow. - uint64_t status_code{}; - if (!absl::SimpleAtoi(message->headers().Status()->value().getStringView(), &status_code)) { - ENVOY_LOG(warn, "ext_authz HTTP client failed to parse the HTTP status code."); - span_->setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True); - return std::make_unique(errorResponse()); +void RawHttpClientImpl::onBeforeFinalizeUpstreamSpan( + Tracing::Span& span, const Http::ResponseHeaderMap* response_headers) { + if (response_headers != nullptr) { + const uint64_t status_code = Http::Utility::getResponseStatus(*response_headers); + span.setTag(TracingConstants::get().HttpStatus, + Http::CodeUtility::toString(static_cast(status_code))); + span.setTag(TracingConstants::get().TraceStatus, status_code == enumToInt(Http::Code::OK) + ? TracingConstants::get().TraceOk + : TracingConstants::get().TraceUnauthz); } +} - span_->setTag(TracingConstants::get().HttpStatus, - Http::CodeUtility::toString(static_cast(status_code))); +ResponsePtr RawHttpClientImpl::toResponse(Http::ResponseMessagePtr message) { + const uint64_t status_code = Http::Utility::getResponseStatus(message->headers()); // Set an error status if the call to the authorization server returns any of the 5xx HTTP error // codes. A Forbidden response is sent to the client if the filter has not been configured with @@ -316,17 +313,19 @@ ResponsePtr RawHttpClientImpl::toResponse(Http::ResponseMessagePtr message) { // Create an Ok authorization response. if (status_code == enumToInt(Http::Code::OK)) { SuccessResponse ok{message->headers(), config_->upstreamHeaderMatchers(), + config_->upstreamHeaderToAppendMatchers(), Response{CheckStatus::OK, Http::HeaderVector{}, Http::HeaderVector{}, - EMPTY_STRING, Http::Code::OK}}; - span_->setTag(TracingConstants::get().TraceStatus, TracingConstants::get().TraceOk); + Http::HeaderVector{}, EMPTY_STRING, Http::Code::OK, + ProtobufWkt::Struct{}}}; return std::move(ok.response_); } // Create a Denied authorization response. SuccessResponse denied{message->headers(), config_->clientHeaderMatchers(), + config_->upstreamHeaderToAppendMatchers(), Response{CheckStatus::Denied, Http::HeaderVector{}, Http::HeaderVector{}, - message->bodyAsString(), static_cast(status_code)}}; - span_->setTag(TracingConstants::get().TraceStatus, TracingConstants::get().TraceUnauthz); + Http::HeaderVector{}, message->bodyAsString(), + static_cast(status_code), ProtobufWkt::Struct{}}}; return std::move(denied.response_); } diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h index 51956c41d2dbf..8f5abd684379f 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h @@ -99,9 +99,13 @@ class ClientConfig { const MatcherSharedPtr& upstreamHeaderMatchers() const { return upstream_header_matchers_; } /** - * Returns a list of headers that will be add to the authorization request. + * Returns a list of matchers used for selecting the authorization response headers that + * should be sent to the upstream server. The same header keys will be appended, instead of + * be replaced. */ - const Http::LowerCaseStrPairVector& headersToAdd() const { return authorization_headers_to_add_; } + const MatcherSharedPtr& upstreamHeaderToAppendMatchers() const { + return upstream_header_to_append_matchers_; + } /** * Returns the name used for tracing. @@ -128,6 +132,7 @@ class ClientConfig { const MatcherSharedPtr request_header_matchers_; const MatcherSharedPtr client_header_matchers_; const MatcherSharedPtr upstream_header_matchers_; + const MatcherSharedPtr upstream_header_to_append_matchers_; const Http::LowerCaseStrPairVector authorization_headers_to_add_; const std::string cluster_name_; const std::chrono::milliseconds timeout_; @@ -149,8 +154,7 @@ class RawHttpClientImpl : public Client, public Http::AsyncClient::Callbacks, Logger::Loggable { public: - explicit RawHttpClientImpl(Upstream::ClusterManager& cm, ClientConfigSharedPtr config, - TimeSource& time_source); + explicit RawHttpClientImpl(Upstream::ClusterManager& cm, ClientConfigSharedPtr config); ~RawHttpClientImpl() override; // ExtAuthz::Client @@ -162,15 +166,16 @@ class RawHttpClientImpl : public Client, void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&& message) override; void onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason reason) override; + void onBeforeFinalizeUpstreamSpan(Tracing::Span& span, + const Http::ResponseHeaderMap* response_headers) override; private: ResponsePtr toResponse(Http::ResponseMessagePtr message); + Upstream::ClusterManager& cm_; ClientConfigSharedPtr config_; Http::AsyncClient::Request* request_{}; RequestCallbacks* callbacks_{}; - TimeSource& time_source_; - Tracing::SpanPtr span_; }; } // namespace ExtAuthz diff --git a/source/extensions/filters/common/fault/BUILD b/source/extensions/filters/common/fault/BUILD index e70a66db64ebd..bf05af548e013 100644 --- a/source/extensions/filters/common/fault/BUILD +++ b/source/extensions/filters/common/fault/BUILD @@ -1,12 +1,12 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "fault_config_lib", diff --git a/source/extensions/filters/common/fault/fault_config.cc b/source/extensions/filters/common/fault/fault_config.cc index 7bcdd765a3eff..ebbb86e2fd95e 100644 --- a/source/extensions/filters/common/fault/fault_config.cc +++ b/source/extensions/filters/common/fault/fault_config.cc @@ -34,7 +34,13 @@ FaultAbortConfig::FaultAbortConfig( switch (abort_config.error_type_case()) { case envoy::extensions::filters::http::fault::v3::FaultAbort::ErrorTypeCase::kHttpStatus: provider_ = - std::make_unique(abort_config.http_status(), abort_config.percentage()); + std::make_unique(static_cast(abort_config.http_status()), + absl::nullopt, abort_config.percentage()); + break; + case envoy::extensions::filters::http::fault::v3::FaultAbort::ErrorTypeCase::kGrpcStatus: + provider_ = std::make_unique( + absl::nullopt, static_cast(abort_config.grpc_status()), + abort_config.percentage()); break; case envoy::extensions::filters::http::fault::v3::FaultAbort::ErrorTypeCase::kHeaderAbort: provider_ = std::make_unique(abort_config.percentage()); @@ -44,10 +50,10 @@ FaultAbortConfig::FaultAbortConfig( } } -absl::optional FaultAbortConfig::HeaderAbortProvider::statusCode( +absl::optional FaultAbortConfig::HeaderAbortProvider::httpStatusCode( const Http::RequestHeaderMap* request_headers) const { - absl::optional ret; - const auto header = request_headers->get(HeaderNames::get().AbortRequest); + absl::optional ret = absl::nullopt; + auto header = request_headers->get(Filters::Common::Fault::HeaderNames::get().AbortRequest); if (header == nullptr) { return ret; } @@ -64,6 +70,21 @@ absl::optional FaultAbortConfig::HeaderAbortProvider::statusCode( return ret; } +absl::optional FaultAbortConfig::HeaderAbortProvider::grpcStatusCode( + const Http::RequestHeaderMap* request_headers) const { + auto header = request_headers->get(Filters::Common::Fault::HeaderNames::get().AbortGrpcRequest); + if (header == nullptr) { + return absl::nullopt; + } + + uint64_t code; + if (!absl::SimpleAtoi(header->value().getStringView(), &code)) { + return absl::nullopt; + } + + return static_cast(code); +} + FaultDelayConfig::FaultDelayConfig( const envoy::extensions::filters::common::fault::v3::FaultDelay& delay_config) { switch (delay_config.fault_delay_secifier_case()) { diff --git a/source/extensions/filters/common/fault/fault_config.h b/source/extensions/filters/common/fault/fault_config.h index 2bf80a1e67d27..e253814273b34 100644 --- a/source/extensions/filters/common/fault/fault_config.h +++ b/source/extensions/filters/common/fault/fault_config.h @@ -2,6 +2,7 @@ #include "envoy/extensions/filters/common/fault/v3/fault.pb.h" #include "envoy/extensions/filters/http/fault/v3/fault.pb.h" +#include "envoy/grpc/status.h" #include "envoy/http/header_map.h" #include "envoy/type/v3/percent.pb.h" @@ -22,6 +23,7 @@ class HeaderNameValues { const Http::LowerCaseString AbortRequest{absl::StrCat(prefix(), "-fault-abort-request")}; const Http::LowerCaseString AbortRequestPercentage{ absl::StrCat(prefix(), "-fault-abort-request-percentage")}; + const Http::LowerCaseString AbortGrpcRequest{absl::StrCat(prefix(), "-fault-abort-grpc-request")}; const Http::LowerCaseString DelayRequest{absl::StrCat(prefix(), "-fault-delay-request")}; const Http::LowerCaseString DelayRequestPercentage{ absl::StrCat(prefix(), "-fault-delay-request-percentage")}; @@ -53,8 +55,12 @@ class FaultAbortConfig { public: FaultAbortConfig(const envoy::extensions::filters::http::fault::v3::FaultAbort& abort_config); - absl::optional statusCode(const Http::RequestHeaderMap* request_headers) const { - return provider_->statusCode(request_headers); + absl::optional httpStatusCode(const Http::RequestHeaderMap* request_headers) const { + return provider_->httpStatusCode(request_headers); + } + absl::optional + grpcStatusCode(const Http::RequestHeaderMap* request_headers) const { + return provider_->grpcStatusCode(request_headers); } envoy::type::v3::FractionalPercent @@ -71,22 +77,35 @@ class FaultAbortConfig { // Return the HTTP status code to use. Optionally passed HTTP headers that may contain the // HTTP status code depending on the provider implementation. virtual absl::optional - statusCode(const Http::RequestHeaderMap* request_headers) const PURE; + httpStatusCode(const Http::RequestHeaderMap* request_headers) const PURE; + + // Return the gRPC status code to use. Optionally passed an HTTP header that may contain the + // gRPC status code depending on the provider implementation. + virtual absl::optional + grpcStatusCode(const Http::RequestHeaderMap* request_headers) const PURE; + // Return what percentage of requests abort faults should be applied to. Optionally passed // HTTP headers that may contain the percentage depending on the provider implementation. virtual envoy::type::v3::FractionalPercent percentage(const Http::RequestHeaderMap* request_headers) const PURE; }; - // Delay provider that uses a fixed abort status code. + // Abort provider that uses a fixed abort status code. class FixedAbortProvider : public AbortProvider { public: - FixedAbortProvider(uint64_t status_code, const envoy::type::v3::FractionalPercent& percentage) - : status_code_(status_code), percentage_(percentage) {} + FixedAbortProvider(absl::optional http_status_code, + absl::optional grpc_status_code, + const envoy::type::v3::FractionalPercent& percentage) + : http_status_code_(http_status_code), grpc_status_code_(grpc_status_code), + percentage_(percentage) {} - // AbortProvider - absl::optional statusCode(const Http::RequestHeaderMap*) const override { - return static_cast(status_code_); + absl::optional httpStatusCode(const Http::RequestHeaderMap*) const override { + return http_status_code_; + } + + absl::optional + grpcStatusCode(const Http::RequestHeaderMap*) const override { + return grpc_status_code_; } envoy::type::v3::FractionalPercent percentage(const Http::RequestHeaderMap*) const override { @@ -94,23 +113,30 @@ class FaultAbortConfig { } private: - const uint64_t status_code_; + const absl::optional http_status_code_; + const absl::optional grpc_status_code_; const envoy::type::v3::FractionalPercent percentage_; }; // Abort provider the reads a status code from an HTTP header. - class HeaderAbortProvider : public AbortProvider, public HeaderPercentageProvider { + class HeaderAbortProvider : public AbortProvider { public: HeaderAbortProvider(const envoy::type::v3::FractionalPercent& percentage) - : HeaderPercentageProvider(HeaderNames::get().AbortRequestPercentage, percentage) {} - // AbortProvider + : header_percentage_provider_(HeaderNames::get().AbortRequestPercentage, percentage) {} + absl::optional - statusCode(const Http::RequestHeaderMap* request_headers) const override; + httpStatusCode(const Http::RequestHeaderMap* request_headers) const override; + + absl::optional + grpcStatusCode(const Http::RequestHeaderMap* request_headers) const override; envoy::type::v3::FractionalPercent percentage(const Http::RequestHeaderMap* request_headers) const override { - return HeaderPercentageProvider::percentage(request_headers); + return header_percentage_provider_.percentage(request_headers); } + + private: + HeaderPercentageProvider header_percentage_provider_; }; using AbortProviderPtr = std::unique_ptr; @@ -176,18 +202,22 @@ class FaultDelayConfig { }; // Delay provider the reads a delay from an HTTP header. - class HeaderDelayProvider : public DelayProvider, public HeaderPercentageProvider { + class HeaderDelayProvider : public DelayProvider { public: HeaderDelayProvider(const envoy::type::v3::FractionalPercent& percentage) - : HeaderPercentageProvider(HeaderNames::get().DelayRequestPercentage, percentage) {} + : header_percentage_provider_(HeaderNames::get().DelayRequestPercentage, percentage) {} + // DelayProvider absl::optional duration(const Http::RequestHeaderMap* request_headers) const override; envoy::type::v3::FractionalPercent percentage(const Http::RequestHeaderMap* request_headers) const override { - return HeaderPercentageProvider::percentage(request_headers); + return header_percentage_provider_.percentage(request_headers); } + + private: + HeaderPercentageProvider header_percentage_provider_; }; using DelayProviderPtr = std::unique_ptr; @@ -252,16 +282,20 @@ class FaultRateLimitConfig { }; // Rate limit provider that reads the rate limit from an HTTP header. - class HeaderRateLimitProvider : public RateLimitProvider, public HeaderPercentageProvider { + class HeaderRateLimitProvider : public RateLimitProvider { public: HeaderRateLimitProvider(const envoy::type::v3::FractionalPercent& percentage) - : HeaderPercentageProvider(HeaderNames::get().ThroughputResponsePercentage, percentage) {} + : header_percentage_provider_(HeaderNames::get().ThroughputResponsePercentage, percentage) { + } // RateLimitProvider absl::optional rateKbps(const Http::RequestHeaderMap* request_headers) const override; envoy::type::v3::FractionalPercent percentage(const Http::RequestHeaderMap* request_headers) const override { - return HeaderPercentageProvider::percentage(request_headers); + return header_percentage_provider_.percentage(request_headers); } + + private: + HeaderPercentageProvider header_percentage_provider_; }; using RateLimitProviderPtr = std::unique_ptr; diff --git a/source/extensions/filters/common/lua/BUILD b/source/extensions/filters/common/lua/BUILD index d1f515945c6ea..769784c89092e 100644 --- a/source/extensions/filters/common/lua/BUILD +++ b/source/extensions/filters/common/lua/BUILD @@ -1,14 +1,14 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) load("//bazel:envoy_internal.bzl", "envoy_external_dep_path") load("@bazel_skylib//rules:common_settings.bzl", "bool_flag") -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() bool_flag( name = "moonjit", @@ -44,6 +44,7 @@ envoy_cc_library( deps = [ ":lua_lib", "//include/envoy/buffer:buffer_interface", + "//source/common/common:hex_lib", "//source/common/protobuf", ], ) diff --git a/source/extensions/filters/common/lua/lua.cc b/source/extensions/filters/common/lua/lua.cc index 3f8b5cd7ec015..c907fef9fd6a4 100644 --- a/source/extensions/filters/common/lua/lua.cc +++ b/source/extensions/filters/common/lua/lua.cc @@ -51,7 +51,7 @@ ThreadLocalState::ThreadLocalState(const std::string& code, ThreadLocal::SlotAll // First verify that the supplied code can be parsed. CSmartPtr state(lua_open()); - ASSERT(state.get() != nullptr, "unable to create new lua state object"); + RELEASE_ASSERT(state.get() != nullptr, "unable to create new Lua state object"); luaL_openlibs(state.get()); if (0 != luaL_dostring(state.get(), code.c_str())) { @@ -71,8 +71,8 @@ int ThreadLocalState::getGlobalRef(uint64_t slot) { } uint64_t ThreadLocalState::registerGlobal(const std::string& global) { - tls_slot_->runOnAllThreads([this, global]() { - LuaThreadLocal& tls = tls_slot_->getTyped(); + tls_slot_->runOnAllThreads([global](ThreadLocal::ThreadLocalObjectSharedPtr previous) { + LuaThreadLocal& tls = *std::dynamic_pointer_cast(previous); lua_getglobal(tls.state_.get(), global.c_str()); if (lua_isfunction(tls.state_.get(), -1)) { tls.global_slots_.push_back(luaL_ref(tls.state_.get(), LUA_REGISTRYINDEX)); @@ -81,6 +81,7 @@ uint64_t ThreadLocalState::registerGlobal(const std::string& global) { lua_pop(tls.state_.get(), 1); tls.global_slots_.push_back(LUA_REFNIL); } + return previous; }); return current_global_slot_++; @@ -92,7 +93,7 @@ CoroutinePtr ThreadLocalState::createCoroutine() { } ThreadLocalState::LuaThreadLocal::LuaThreadLocal(const std::string& code) : state_(lua_open()) { - ASSERT(state_.get() != nullptr, "unable to create new lua state object"); + RELEASE_ASSERT(state_.get() != nullptr, "unable to create new Lua state object"); luaL_openlibs(state_.get()); int rc = luaL_dostring(state_.get(), code.c_str()); ASSERT(rc == 0); diff --git a/source/extensions/filters/common/lua/lua.h b/source/extensions/filters/common/lua/lua.h index b9bb7caa157ea..726b6c149e166 100644 --- a/source/extensions/filters/common/lua/lua.h +++ b/source/extensions/filters/common/lua/lua.h @@ -386,8 +386,11 @@ class ThreadLocalState : Logger::Loggable { * all threaded workers. */ template void registerType() { - tls_slot_->runOnAllThreads( - [this]() { T::registerType(tls_slot_->getTyped().state_.get()); }); + tls_slot_->runOnAllThreads([](ThreadLocal::ThreadLocalObjectSharedPtr previous) { + LuaThreadLocal& tls = *std::dynamic_pointer_cast(previous); + T::registerType(tls.state_.get()); + return previous; + }); } /** @@ -417,6 +420,8 @@ class ThreadLocalState : Logger::Loggable { uint64_t current_global_slot_{}; }; +using ThreadLocalStatePtr = std::unique_ptr; + /** * An exception specific to Lua errors. */ diff --git a/source/extensions/filters/common/lua/wrappers.cc b/source/extensions/filters/common/lua/wrappers.cc index 2e8d5d16c76df..02e4db6ca2a88 100644 --- a/source/extensions/filters/common/lua/wrappers.cc +++ b/source/extensions/filters/common/lua/wrappers.cc @@ -1,11 +1,50 @@ #include "extensions/filters/common/lua/wrappers.h" +#include + +#include + +#include "common/common/assert.h" +#include "common/common/hex.h" + +#include "absl/time/time.h" + namespace Envoy { namespace Extensions { namespace Filters { namespace Common { namespace Lua { +namespace { + +// Builds a Lua table from a list of strings. +template +void createLuaTableFromStringList(lua_State* state, const StringList& list) { + lua_createtable(state, list.size(), 0); + for (size_t i = 0; i < list.size(); i++) { + lua_pushstring(state, list[i].c_str()); + // After the list[i].c_str() is pushed to the stack, we need to set the "current element" with + // that value. The lua_rawseti(state, t, i) helps us to set the value of table t with key i. + // Given the index of the current element/table in the stack is below the pushed value i.e. -2 + // and the key (refers to where the element is in the table) is i + 1 (note that in Lua index + // starts from 1), hence we have: + lua_rawseti(state, -2, i + 1); + } +} + +// By default, LUA_INTEGER is https://en.cppreference.com/w/cpp/types/ptrdiff_t +// (https://github.com/LuaJIT/LuaJIT/blob/8271c643c21d1b2f344e339f559f2de6f3663191/src/luaconf.h#L104), +// which is large enough to hold timestamp-since-epoch in seconds. Note: In Lua, we usually use +// os.time(os.date("!*t")) to get current timestamp-since-epoch in seconds. +int64_t timestampInSeconds(const absl::optional& system_time) { + return system_time.has_value() ? std::chrono::duration_cast( + system_time.value().time_since_epoch()) + .count() + : 0; +} + +} // namespace + int BufferWrapper::luaLength(lua_State* state) { lua_pushnumber(state, data_.length()); return 1; @@ -217,13 +256,109 @@ int MetadataMapWrapper::luaPairs(lua_State* state) { return 1; } +int SslConnectionWrapper::luaPeerCertificatePresented(lua_State* state) { + lua_pushboolean(state, connection_info_.peerCertificatePresented()); + return 1; +} + +int SslConnectionWrapper::luaPeerCertificateValidated(lua_State* state) { + lua_pushboolean(state, connection_info_.peerCertificateValidated()); + return 1; +} + +int SslConnectionWrapper::luaUriSanLocalCertificate(lua_State* state) { + createLuaTableFromStringList(state, connection_info_.uriSanLocalCertificate()); + return 1; +} + +int SslConnectionWrapper::luaSha256PeerCertificateDigest(lua_State* state) { + lua_pushstring(state, connection_info_.sha256PeerCertificateDigest().c_str()); + return 1; +} + +int SslConnectionWrapper::luaSerialNumberPeerCertificate(lua_State* state) { + lua_pushstring(state, connection_info_.serialNumberPeerCertificate().c_str()); + return 1; +} + +int SslConnectionWrapper::luaIssuerPeerCertificate(lua_State* state) { + lua_pushstring(state, connection_info_.issuerPeerCertificate().c_str()); + return 1; +} + +int SslConnectionWrapper::luaSubjectPeerCertificate(lua_State* state) { + lua_pushstring(state, connection_info_.subjectPeerCertificate().c_str()); + return 1; +} + +int SslConnectionWrapper::luaUriSanPeerCertificate(lua_State* state) { + createLuaTableFromStringList(state, connection_info_.uriSanPeerCertificate()); + return 1; +} + +int SslConnectionWrapper::luaSubjectLocalCertificate(lua_State* state) { + lua_pushstring(state, connection_info_.subjectLocalCertificate().c_str()); + return 1; +} + +int SslConnectionWrapper::luaDnsSansPeerCertificate(lua_State* state) { + createLuaTableFromStringList(state, connection_info_.dnsSansPeerCertificate()); + return 1; +} + +int SslConnectionWrapper::luaDnsSansLocalCertificate(lua_State* state) { + createLuaTableFromStringList(state, connection_info_.dnsSansLocalCertificate()); + return 1; +} + +int SslConnectionWrapper::luaValidFromPeerCertificate(lua_State* state) { + lua_pushinteger(state, timestampInSeconds(connection_info_.validFromPeerCertificate())); + return 1; +} + +int SslConnectionWrapper::luaExpirationPeerCertificate(lua_State* state) { + lua_pushinteger(state, timestampInSeconds(connection_info_.expirationPeerCertificate())); + return 1; +} + +int SslConnectionWrapper::luaSessionId(lua_State* state) { + lua_pushstring(state, connection_info_.sessionId().c_str()); + return 1; +} + +int SslConnectionWrapper::luaCiphersuiteId(lua_State* state) { + lua_pushstring(state, + absl::StrCat("0x", Hex::uint16ToHex(connection_info_.ciphersuiteId())).c_str()); + return 1; +} + +int SslConnectionWrapper::luaCiphersuiteString(lua_State* state) { + lua_pushstring(state, connection_info_.ciphersuiteString().c_str()); + return 1; +} + +int SslConnectionWrapper::luaUrlEncodedPemEncodedPeerCertificate(lua_State* state) { + lua_pushstring(state, connection_info_.urlEncodedPemEncodedPeerCertificate().c_str()); + return 1; +} + +int SslConnectionWrapper::luaUrlEncodedPemEncodedPeerCertificateChain(lua_State* state) { + lua_pushstring(state, connection_info_.urlEncodedPemEncodedPeerCertificateChain().c_str()); + return 1; +} + +int SslConnectionWrapper::luaTlsVersion(lua_State* state) { + lua_pushstring(state, connection_info_.tlsVersion().c_str()); + return 1; +} + int ConnectionWrapper::luaSsl(lua_State* state) { const auto& ssl = connection_->ssl(); if (ssl != nullptr) { if (ssl_connection_wrapper_.get() != nullptr) { ssl_connection_wrapper_.pushStack(); } else { - ssl_connection_wrapper_.reset(SslConnectionWrapper::create(state, ssl), true); + ssl_connection_wrapper_.reset(SslConnectionWrapper::create(state, *ssl), true); } } else { lua_pushnil(state); diff --git a/source/extensions/filters/common/lua/wrappers.h b/source/extensions/filters/common/lua/wrappers.h index 92aa697cfd73c..09ea9b44467ae 100644 --- a/source/extensions/filters/common/lua/wrappers.h +++ b/source/extensions/filters/common/lua/wrappers.h @@ -112,10 +112,145 @@ class MetadataMapWrapper : public BaseLuaObject { */ class SslConnectionWrapper : public BaseLuaObject { public: - SslConnectionWrapper(const Ssl::ConnectionInfoConstSharedPtr) {} - static ExportedFunctions exportedFunctions() { return {}; } + explicit SslConnectionWrapper(const Ssl::ConnectionInfo& info) : connection_info_{info} {} + static ExportedFunctions exportedFunctions() { + return {{"peerCertificatePresented", static_luaPeerCertificatePresented}, + {"peerCertificateValidated", static_luaPeerCertificateValidated}, + {"uriSanLocalCertificate", static_luaUriSanLocalCertificate}, + {"sha256PeerCertificateDigest", static_luaSha256PeerCertificateDigest}, + {"serialNumberPeerCertificate", static_luaSerialNumberPeerCertificate}, + {"issuerPeerCertificate", static_luaIssuerPeerCertificate}, + {"subjectPeerCertificate", static_luaSubjectPeerCertificate}, + {"uriSanPeerCertificate", static_luaUriSanPeerCertificate}, + {"subjectLocalCertificate", static_luaSubjectLocalCertificate}, + {"dnsSansPeerCertificate", static_luaDnsSansPeerCertificate}, + {"dnsSansLocalCertificate", static_luaDnsSansLocalCertificate}, + {"validFromPeerCertificate", static_luaValidFromPeerCertificate}, + {"expirationPeerCertificate", static_luaExpirationPeerCertificate}, + {"sessionId", static_luaSessionId}, + {"ciphersuiteId", static_luaCiphersuiteId}, + {"ciphersuiteString", static_luaCiphersuiteString}, + {"urlEncodedPemEncodedPeerCertificate", static_luaUrlEncodedPemEncodedPeerCertificate}, + {"urlEncodedPemEncodedPeerCertificateChain", + static_luaUrlEncodedPemEncodedPeerCertificateChain}, + {"tlsVersion", static_luaTlsVersion}}; + } + +private: + /** + * Returns bool whether the peer certificate is presented. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaPeerCertificatePresented); + + /** + * Returns bool whether the peer certificate is validated. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaPeerCertificateValidated); + + /** + * Returns the URIs in the SAN field of the local certificate. Returns empty table if there is no + * local certificate, or no SAN field, or no URI in SAN. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaUriSanLocalCertificate); + + /** + * Returns the subject field of the local certificate in RFC 2253 format. Returns empty string if + * there is no local certificate, or no subject. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaSubjectLocalCertificate); - // TODO(dio): Add more Lua APIs around Ssl::Connection. + /** + * Returns the SHA256 digest of the peer certificate. Returns empty string if there is no peer + * certificate which can happen in TLS (non mTLS) connections. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaSha256PeerCertificateDigest); + + /** + * Returns the serial number field of the peer certificate. Returns empty string if there is no + * peer certificate, or no serial number. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaSerialNumberPeerCertificate); + + /** + * Returns the issuer field of the peer certificate in RFC 2253 format. Returns empty string if + * there is no peer certificate, or no issuer. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaIssuerPeerCertificate); + + /** + * Returns the subject field of the peer certificate in RFC 2253 format. Returns empty string if + * there is no peer certificate, or no subject. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaSubjectPeerCertificate); + + /** + * Returns the URIs in the SAN field of the peer certificate. Returns empty table if there is no + * peer certificate, or no SAN field, or no URI. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaUriSanPeerCertificate); + + /** + * Return string the URL-encoded PEM-encoded representation of the peer certificate. Returns empty + * string if there is no peer certificate or encoding fails. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaUrlEncodedPemEncodedPeerCertificate); + + /** + * Returns the URL-encoded PEM-encoded representation of the full peer certificate chain including + * the leaf certificate. Returns empty string if there is no peer certificate or encoding fails. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaUrlEncodedPemEncodedPeerCertificateChain); + + /** + * Returns the DNS entries in the SAN field of the peer certificate. Returns an empty table if + * there is no peer certificate, or no SAN field, or no DNS entries in SAN. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaDnsSansPeerCertificate); + + /** + * Returns the DNS entries in the SAN field of the local certificate. Returns an empty table if + * there is no local certificate, or no SAN field, or no DNS entries in SAN. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaDnsSansLocalCertificate); + + /** + * Returns the timestamp-since-epoch (in seconds) that the peer certificate was issued and should + * be considered valid from. Returns empty string if there is no peer certificate. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaValidFromPeerCertificate); + + /** + * Returns the timestamp-since-epoch (in seconds) that the peer certificate expires and should not + * be considered valid after. Returns empty string if there is no peer certificate. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaExpirationPeerCertificate); + + /** + * Returns the hex-encoded TLS session ID as defined in RFC 5246. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaSessionId); + + /** + * Returns the standard ID for the ciphers used in the established TLS connection. Returns 0xffff + * if there is no current negotiated ciphersuite. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaCiphersuiteId); + + /** + * Returns the OpenSSL name for the set of ciphers used in the established TLS connection. Returns + * empty string if there is no current negotiated ciphersuite. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaCiphersuiteString); + + /** + * Returns the TLS version (e.g. TLSv1.2, TLSv1.3) used in the established TLS connection. Returns + * string if secured and nil if not. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaTlsVersion); + + // TODO(dio): Add luaX509Extension if required, since currently it is used out of tree. + + const Ssl::ConnectionInfo& connection_info_; }; /** @@ -124,6 +259,9 @@ class SslConnectionWrapper : public BaseLuaObject { class ConnectionWrapper : public BaseLuaObject { public: ConnectionWrapper(const Network::Connection* connection) : connection_{connection} {} + + // TODO(dio): Remove this in favor of StreamInfo::downstreamSslConnection wrapper since ssl() in + // envoy/network/connection.h is subject to removal. static ExportedFunctions exportedFunctions() { return {{"ssl", static_luaSsl}}; } private: diff --git a/source/extensions/filters/common/original_src/BUILD b/source/extensions/filters/common/original_src/BUILD index 7cf8fd5926d2a..0c4b4832e2e35 100644 --- a/source/extensions/filters/common/original_src/BUILD +++ b/source/extensions/filters/common/original_src/BUILD @@ -1,14 +1,14 @@ -licenses(["notice"]) # Apache 2 - -# Helprs for filters for mirroring the downstream remote address on the upstream's source. - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# Helprs for filters for mirroring the downstream remote address on the upstream's source. + +envoy_extension_package() envoy_cc_library( name = "original_src_socket_option_lib", diff --git a/source/extensions/filters/common/ratelimit/BUILD b/source/extensions/filters/common/ratelimit/BUILD index bd26ccb6b8b06..4bf0b36b1e5de 100644 --- a/source/extensions/filters/common/ratelimit/BUILD +++ b/source/extensions/filters/common/ratelimit/BUILD @@ -1,12 +1,12 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "ratelimit_lib", @@ -39,6 +39,7 @@ envoy_cc_library( "//include/envoy/singleton:manager_interface", "//include/envoy/tracing:http_tracer_interface", "//source/common/stats:symbol_table_lib", + "@envoy_api//envoy/service/ratelimit/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/common/ratelimit/ratelimit.h b/source/extensions/filters/common/ratelimit/ratelimit.h index bb4317eb2a388..4ad48e7a87ab4 100644 --- a/source/extensions/filters/common/ratelimit/ratelimit.h +++ b/source/extensions/filters/common/ratelimit/ratelimit.h @@ -7,6 +7,7 @@ #include "envoy/common/pure.h" #include "envoy/ratelimit/ratelimit.h" +#include "envoy/service/ratelimit/v3/rls.pb.h" #include "envoy/singleton/manager.h" #include "envoy/tracing/http_tracer.h" @@ -30,6 +31,10 @@ enum class LimitStatus { OverLimit }; +using DescriptorStatusList = + std::vector; +using DescriptorStatusListPtr = std::unique_ptr; + /** * Async callbacks used during limit() calls. */ @@ -41,7 +46,8 @@ class RequestCallbacks { * Called when a limit request is complete. The resulting status, * response headers and request headers to be forwarded to the upstream are supplied. */ - virtual void complete(LimitStatus status, Http::ResponseHeaderMapPtr&& response_headers_to_add, + virtual void complete(LimitStatus status, DescriptorStatusListPtr&& descriptor_statuses, + Http::ResponseHeaderMapPtr&& response_headers_to_add, Http::RequestHeaderMapPtr&& request_headers_to_add) PURE; }; diff --git a/source/extensions/filters/common/ratelimit/ratelimit_impl.cc b/source/extensions/filters/common/ratelimit/ratelimit_impl.cc index 2df6f8445debb..5a93471af9032 100644 --- a/source/extensions/filters/common/ratelimit/ratelimit_impl.cc +++ b/source/extensions/filters/common/ratelimit/ratelimit_impl.cc @@ -7,7 +7,6 @@ #include "envoy/config/core/v3/grpc_service.pb.h" #include "envoy/extensions/common/ratelimit/v3/ratelimit.pb.h" -#include "envoy/service/ratelimit/v3/rls.pb.h" #include "envoy/stats/scope.h" #include "common/common/assert.h" @@ -21,10 +20,14 @@ namespace Common { namespace RateLimit { GrpcClientImpl::GrpcClientImpl(Grpc::RawAsyncClientPtr&& async_client, - const absl::optional& timeout) - : service_method_(*Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - "envoy.service.ratelimit.v2.RateLimitService.ShouldRateLimit")), - async_client_(std::move(async_client)), timeout_(timeout) {} + const absl::optional& timeout, + envoy::config::core::v3::ApiVersion transport_api_version) + : async_client_(std::move(async_client)), timeout_(timeout), + service_method_( + Grpc::VersionedMethods("envoy.service.ratelimit.v3.RateLimitService.ShouldRateLimit", + "envoy.service.ratelimit.v2.RateLimitService.ShouldRateLimit") + .getMethodDescriptorForVersion(transport_api_version)), + transport_api_version_(transport_api_version) {} GrpcClientImpl::~GrpcClientImpl() { ASSERT(!callbacks_); } @@ -47,6 +50,12 @@ void GrpcClientImpl::createRequest(envoy::service::ratelimit::v3::RateLimitReque new_entry->set_key(entry.key_); new_entry->set_value(entry.value_); } + if (descriptor.limit_) { + envoy::extensions::common::ratelimit::v3::RateLimitDescriptor_RateLimitOverride* new_limit = + new_descriptor->mutable_limit(); + new_limit->set_requests_per_unit(descriptor.limit_.value().requests_per_unit_); + new_limit->set_unit(descriptor.limit_.value().unit_); + } } } @@ -60,7 +69,8 @@ void GrpcClientImpl::limit(RequestCallbacks& callbacks, const std::string& domai createRequest(request, domain, descriptors); request_ = async_client_->send(service_method_, request, *this, parent_span, - Http::AsyncClient::RequestOptions().setTimeout(timeout_)); + Http::AsyncClient::RequestOptions().setTimeout(timeout_), + transport_api_version_); } void GrpcClientImpl::onSuccess( @@ -78,19 +88,22 @@ void GrpcClientImpl::onSuccess( Http::ResponseHeaderMapPtr response_headers_to_add; Http::RequestHeaderMapPtr request_headers_to_add; if (!response->response_headers_to_add().empty()) { - response_headers_to_add = std::make_unique(); + response_headers_to_add = Http::ResponseHeaderMapImpl::create(); for (const auto& h : response->response_headers_to_add()) { response_headers_to_add->addCopy(Http::LowerCaseString(h.key()), h.value()); } } if (!response->request_headers_to_add().empty()) { - request_headers_to_add = std::make_unique(); + request_headers_to_add = Http::RequestHeaderMapImpl::create(); for (const auto& h : response->request_headers_to_add()) { request_headers_to_add->addCopy(Http::LowerCaseString(h.key()), h.value()); } } - callbacks_->complete(status, std::move(response_headers_to_add), + + DescriptorStatusListPtr descriptor_statuses = std::make_unique( + response->statuses().begin(), response->statuses().end()); + callbacks_->complete(status, std::move(descriptor_statuses), std::move(response_headers_to_add), std::move(request_headers_to_add)); callbacks_ = nullptr; } @@ -98,20 +111,21 @@ void GrpcClientImpl::onSuccess( void GrpcClientImpl::onFailure(Grpc::Status::GrpcStatus status, const std::string&, Tracing::Span&) { ASSERT(status != Grpc::Status::WellKnownGrpcStatus::Ok); - callbacks_->complete(LimitStatus::Error, nullptr, nullptr); + callbacks_->complete(LimitStatus::Error, nullptr, nullptr, nullptr); callbacks_ = nullptr; } ClientPtr rateLimitClient(Server::Configuration::FactoryContext& context, const envoy::config::core::v3::GrpcService& grpc_service, - const std::chrono::milliseconds timeout) { + const std::chrono::milliseconds timeout, + envoy::config::core::v3::ApiVersion transport_api_version) { // TODO(ramaraochavali): register client to singleton when GrpcClientImpl supports concurrent // requests. const auto async_client_factory = context.clusterManager().grpcAsyncClientManager().factoryForGrpcService( grpc_service, context.scope(), true); return std::make_unique( - async_client_factory->create(), timeout); + async_client_factory->create(), timeout, transport_api_version); } } // namespace RateLimit diff --git a/source/extensions/filters/common/ratelimit/ratelimit_impl.h b/source/extensions/filters/common/ratelimit/ratelimit_impl.h index f6daf85b14e4e..4108ec2b45c04 100644 --- a/source/extensions/filters/common/ratelimit/ratelimit_impl.h +++ b/source/extensions/filters/common/ratelimit/ratelimit_impl.h @@ -46,7 +46,8 @@ class GrpcClientImpl : public Client, public Logger::Loggable { public: GrpcClientImpl(Grpc::RawAsyncClientPtr&& async_client, - const absl::optional& timeout); + const absl::optional& timeout, + envoy::config::core::v3::ApiVersion transport_api_version); ~GrpcClientImpl() override; static void createRequest(envoy::service::ratelimit::v3::RateLimitRequest& request, @@ -67,13 +68,14 @@ class GrpcClientImpl : public Client, Tracing::Span& span) override; private: - const Protobuf::MethodDescriptor& service_method_; Grpc::AsyncClient async_client_; Grpc::AsyncRequest* request_{}; absl::optional timeout_; RequestCallbacks* callbacks_{}; + const Protobuf::MethodDescriptor& service_method_; + const envoy::config::core::v3::ApiVersion transport_api_version_; }; /** @@ -81,7 +83,8 @@ class GrpcClientImpl : public Client, */ ClientPtr rateLimitClient(Server::Configuration::FactoryContext& context, const envoy::config::core::v3::GrpcService& grpc_service, - const std::chrono::milliseconds timeout); + const std::chrono::milliseconds timeout, + envoy::config::core::v3::ApiVersion transport_api_version); } // namespace RateLimit } // namespace Common diff --git a/source/extensions/filters/common/rbac/BUILD b/source/extensions/filters/common/rbac/BUILD index 2784d91ffb112..14c649ca43406 100644 --- a/source/extensions/filters/common/rbac/BUILD +++ b/source/extensions/filters/common/rbac/BUILD @@ -1,12 +1,12 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "utility_lib", diff --git a/source/extensions/filters/common/rbac/engine.h b/source/extensions/filters/common/rbac/engine.h index a833867dd02a9..7174d4edb8607 100644 --- a/source/extensions/filters/common/rbac/engine.h +++ b/source/extensions/filters/common/rbac/engine.h @@ -19,32 +19,32 @@ class RoleBasedAccessControlEngine { virtual ~RoleBasedAccessControlEngine() = default; /** - * Returns whether or not the current action is permitted. + * Handles action-specific operations and returns whether or not the request is permitted. * * @param connection the downstream connection used to identify the action/principal. * @param headers the headers of the incoming request used to identify the action/principal. An * empty map should be used if there are no headers available. * @param info the per-request or per-connection stream info with additional information - * about the action/principal. + * about the action/principal. Can be modified by the LOG Action. * @param effective_policy_id it will be filled by the matching policy's ID, * which is used to identity the source of the allow/deny. */ - virtual bool allowed(const Network::Connection& connection, - const Envoy::Http::RequestHeaderMap& headers, - const StreamInfo::StreamInfo& info, - std::string* effective_policy_id) const PURE; + virtual bool handleAction(const Network::Connection& connection, + const Envoy::Http::RequestHeaderMap& headers, + StreamInfo::StreamInfo& info, + std::string* effective_policy_id) const PURE; /** - * Returns whether or not the current action is permitted. + * Handles action-specific operations and returns whether or not the request is permitted. * * @param connection the downstream connection used to identify the action/principal. * @param info the per-request or per-connection stream info with additional information - * about the action/principal. + * about the action/principal. Can be modified by the LOG Action. * @param effective_policy_id it will be filled by the matching policy's ID, * which is used to identity the source of the allow/deny. */ - virtual bool allowed(const Network::Connection& connection, const StreamInfo::StreamInfo& info, - std::string* effective_policy_id) const PURE; + virtual bool handleAction(const Network::Connection& connection, StreamInfo::StreamInfo& info, + std::string* effective_policy_id) const PURE; }; } // namespace RBAC diff --git a/source/extensions/filters/common/rbac/engine_impl.cc b/source/extensions/filters/common/rbac/engine_impl.cc index bd8a0a9cd0cac..dc2a6ba79222a 100644 --- a/source/extensions/filters/common/rbac/engine_impl.cc +++ b/source/extensions/filters/common/rbac/engine_impl.cc @@ -11,8 +11,8 @@ namespace Common { namespace RBAC { RoleBasedAccessControlEngineImpl::RoleBasedAccessControlEngineImpl( - const envoy::config::rbac::v3::RBAC& rules) - : allowed_if_matched_(rules.action() == envoy::config::rbac::v3::RBAC::ALLOW) { + const envoy::config::rbac::v3::RBAC& rules, const EnforcementMode mode) + : action_(rules.action()), mode_(mode) { // guard expression builder by presence of a condition in policies for (const auto& policy : rules.policies()) { if (policy.second.has_condition()) { @@ -26,10 +26,43 @@ RoleBasedAccessControlEngineImpl::RoleBasedAccessControlEngineImpl( } } -bool RoleBasedAccessControlEngineImpl::allowed(const Network::Connection& connection, - const Envoy::Http::RequestHeaderMap& headers, - const StreamInfo::StreamInfo& info, - std::string* effective_policy_id) const { +bool RoleBasedAccessControlEngineImpl::handleAction(const Network::Connection& connection, + StreamInfo::StreamInfo& info, + std::string* effective_policy_id) const { + return handleAction(connection, *Http::StaticEmptyHeaders::get().request_headers, info, + effective_policy_id); +} + +bool RoleBasedAccessControlEngineImpl::handleAction(const Network::Connection& connection, + const Envoy::Http::RequestHeaderMap& headers, + StreamInfo::StreamInfo& info, + std::string* effective_policy_id) const { + bool matched = checkPolicyMatch(connection, info, headers, effective_policy_id); + + switch (action_) { + case envoy::config::rbac::v3::RBAC::ALLOW: + return matched; + case envoy::config::rbac::v3::RBAC::DENY: + return !matched; + case envoy::config::rbac::v3::RBAC::LOG: { + // If not shadow enforcement, set shared log metadata + if (mode_ != EnforcementMode::Shadow) { + ProtobufWkt::Struct log_metadata; + auto& log_fields = *log_metadata.mutable_fields(); + log_fields[DynamicMetadataKeysSingleton::get().AccessLogKey].set_bool_value(matched); + info.setDynamicMetadata(DynamicMetadataKeysSingleton::get().CommonNamespace, log_metadata); + } + + return true; + } + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + +bool RoleBasedAccessControlEngineImpl::checkPolicyMatch( + const Network::Connection& connection, const StreamInfo::StreamInfo& info, + const Envoy::Http::RequestHeaderMap& headers, std::string* effective_policy_id) const { bool matched = false; for (const auto& policy : policies_) { @@ -42,17 +75,7 @@ bool RoleBasedAccessControlEngineImpl::allowed(const Network::Connection& connec } } - // only allowed if: - // - matched and ALLOW action - // - not matched and DENY action - return matched == allowed_if_matched_; -} - -bool RoleBasedAccessControlEngineImpl::allowed(const Network::Connection& connection, - const StreamInfo::StreamInfo& info, - std::string* effective_policy_id) const { - static const Http::RequestHeaderMapImpl* empty_header = new Http::RequestHeaderMapImpl(); - return allowed(connection, *empty_header, info, effective_policy_id); + return matched; } } // namespace RBAC diff --git a/source/extensions/filters/common/rbac/engine_impl.h b/source/extensions/filters/common/rbac/engine_impl.h index 261b45b0aa133..0aacfb41f8e15 100644 --- a/source/extensions/filters/common/rbac/engine_impl.h +++ b/source/extensions/filters/common/rbac/engine_impl.h @@ -11,18 +11,40 @@ namespace Filters { namespace Common { namespace RBAC { +class DynamicMetadataKeys { +public: + const std::string ShadowEffectivePolicyIdField{"shadow_effective_policy_id"}; + const std::string ShadowEngineResultField{"shadow_engine_result"}; + const std::string EngineResultAllowed{"allowed"}; + const std::string EngineResultDenied{"denied"}; + const std::string AccessLogKey{"access_log_hint"}; + const std::string CommonNamespace{"envoy.common"}; +}; + +using DynamicMetadataKeysSingleton = ConstSingleton; + +enum class EnforcementMode { Enforced, Shadow }; + class RoleBasedAccessControlEngineImpl : public RoleBasedAccessControlEngine, NonCopyable { public: - RoleBasedAccessControlEngineImpl(const envoy::config::rbac::v3::RBAC& rules); + RoleBasedAccessControlEngineImpl(const envoy::config::rbac::v3::RBAC& rules, + const EnforcementMode mode = EnforcementMode::Enforced); - bool allowed(const Network::Connection& connection, const Envoy::Http::RequestHeaderMap& headers, - const StreamInfo::StreamInfo& info, std::string* effective_policy_id) const override; + bool handleAction(const Network::Connection& connection, + const Envoy::Http::RequestHeaderMap& headers, StreamInfo::StreamInfo& info, + std::string* effective_policy_id) const override; - bool allowed(const Network::Connection& connection, const StreamInfo::StreamInfo& info, - std::string* effective_policy_id) const override; + bool handleAction(const Network::Connection& connection, StreamInfo::StreamInfo& info, + std::string* effective_policy_id) const override; private: - const bool allowed_if_matched_; + // Checks whether the request matches any policies + bool checkPolicyMatch(const Network::Connection& connection, const StreamInfo::StreamInfo& info, + const Envoy::Http::RequestHeaderMap& headers, + std::string* effective_policy_id) const; + + const envoy::config::rbac::v3::RBAC::Action action_; + const EnforcementMode mode_; std::map> policies_; diff --git a/source/extensions/filters/common/rbac/matchers.cc b/source/extensions/filters/common/rbac/matchers.cc index 123f394d65236..81b5be5885bf2 100644 --- a/source/extensions/filters/common/rbac/matchers.cc +++ b/source/extensions/filters/common/rbac/matchers.cc @@ -211,7 +211,7 @@ bool PathMatcher::matches(const Network::Connection&, const Envoy::Http::Request if (headers.Path() == nullptr) { return false; } - return path_matcher_.match(headers.Path()->value().getStringView()); + return path_matcher_.match(headers.getPathValue()); } } // namespace RBAC diff --git a/source/extensions/filters/common/rbac/matchers.h b/source/extensions/filters/common/rbac/matchers.h index fcc10f41fdb17..a73bcf3732665 100644 --- a/source/extensions/filters/common/rbac/matchers.h +++ b/source/extensions/filters/common/rbac/matchers.h @@ -38,7 +38,7 @@ class Matcher { * @param connection the downstream connection used to match against. * @param headers the request headers used to match against. An empty map should be used if * there are none headers available. - * @param metadata the additional information about the action/principal. + * @param info the additional information about the action/principal. */ virtual bool matches(const Network::Connection& connection, const Envoy::Http::RequestHeaderMap& headers, diff --git a/source/extensions/filters/common/rbac/utility.h b/source/extensions/filters/common/rbac/utility.h index a48efb813234b..04635eb37411e 100644 --- a/source/extensions/filters/common/rbac/utility.h +++ b/source/extensions/filters/common/rbac/utility.h @@ -12,16 +12,6 @@ namespace Filters { namespace Common { namespace RBAC { -class DynamicMetadataKeys { -public: - const std::string ShadowEffectivePolicyIdField{"shadow_effective_policy_id"}; - const std::string ShadowEngineResultField{"shadow_engine_result"}; - const std::string EngineResultAllowed{"allowed"}; - const std::string EngineResultDenied{"denied"}; -}; - -using DynamicMetadataKeysSingleton = ConstSingleton; - /** * All stats for the RBAC filter. @see stats_macros.h */ @@ -40,19 +30,18 @@ struct RoleBasedAccessControlFilterStats { RoleBasedAccessControlFilterStats generateStats(const std::string& prefix, Stats::Scope& scope); -enum class EnforcementMode { Enforced, Shadow }; - template std::unique_ptr createEngine(const ConfigType& config) { - return config.has_rules() ? std::make_unique(config.rules()) + return config.has_rules() ? std::make_unique( + config.rules(), EnforcementMode::Enforced) : nullptr; } template std::unique_ptr createShadowEngine(const ConfigType& config) { - return config.has_shadow_rules() - ? std::make_unique(config.shadow_rules()) - : nullptr; + return config.has_shadow_rules() ? std::make_unique( + config.shadow_rules(), EnforcementMode::Shadow) + : nullptr; } } // namespace RBAC diff --git a/source/extensions/filters/http/BUILD b/source/extensions/filters/http/BUILD index 7a4780afbdab2..790ddc806157a 100644 --- a/source/extensions/filters/http/BUILD +++ b/source/extensions/filters/http/BUILD @@ -1,16 +1,18 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "well_known_names", hdrs = ["well_known_names.h"], + # Well known names are public. + visibility = ["//visibility:public"], deps = [ "//source/common/config:well_known_names", "//source/common/singleton:const_singleton", diff --git a/source/extensions/filters/http/adaptive_concurrency/BUILD b/source/extensions/filters/http/adaptive_concurrency/BUILD index 1cff74436f07e..9cef1214ab36f 100644 --- a/source/extensions/filters/http/adaptive_concurrency/BUILD +++ b/source/extensions/filters/http/adaptive_concurrency/BUILD @@ -1,17 +1,17 @@ -licenses(["notice"]) # Apache 2 - -# HTTP L7 filter that dynamically adjusts the number of allowed concurrent -# requests based on sampled latencies. -# Public docs: docs/root/configuration/http_filters/adaptive_concurrency_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# HTTP L7 filter that dynamically adjusts the number of allowed concurrent +# requests based on sampled latencies. +# Public docs: docs/root/configuration/http_filters/adaptive_concurrency_filter.rst + +envoy_extension_package() envoy_cc_library( name = "adaptive_concurrency_filter_lib", diff --git a/source/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter.cc b/source/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter.cc index 69e706cbf2b63..b2478d0408968 100644 --- a/source/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter.cc +++ b/source/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter.cc @@ -43,16 +43,11 @@ Http::FilterHeadersStatus AdaptiveConcurrencyFilter::decodeHeaders(Http::Request return Http::FilterHeadersStatus::StopIteration; } - // When the deferred_sample_task_ object is destroyed, the time difference between its destruction - // and the request start time is measured as the request latency. This value is sampled by the - // concurrency controller either when encoding is complete or during destruction of this filter - // object. + // When the deferred_sample_task_ object is destroyed, the request start time is sampled. This + // occurs either when encoding is complete or during destruction of this filter object. + const auto now = config_->timeSource().monotonicTime(); deferred_sample_task_ = - std::make_unique([this, rq_start_time = config_->timeSource().monotonicTime()]() { - const auto now = config_->timeSource().monotonicTime(); - const std::chrono::nanoseconds rq_latency = now - rq_start_time; - controller_->recordLatencySample(rq_latency); - }); + std::make_unique([this, now]() { controller_->recordLatencySample(now); }); return Http::FilterHeadersStatus::Continue; } diff --git a/source/extensions/filters/http/adaptive_concurrency/config.cc b/source/extensions/filters/http/adaptive_concurrency/config.cc index fc6b9d5e0f99e..63a3d2d7f3698 100644 --- a/source/extensions/filters/http/adaptive_concurrency/config.cc +++ b/source/extensions/filters/http/adaptive_concurrency/config.cc @@ -26,7 +26,8 @@ Http::FilterFactoryCb AdaptiveConcurrencyFilterFactory::createFilterFactoryFromP Controller::GradientControllerConfig(config.gradient_controller_config(), context.runtime()); controller = std::make_shared( std::move(gradient_controller_config), context.dispatcher(), context.runtime(), - acc_stats_prefix + "gradient_controller.", context.scope(), context.random()); + acc_stats_prefix + "gradient_controller.", context.scope(), context.random(), + context.timeSource()); AdaptiveConcurrencyFilterConfigSharedPtr filter_config( new AdaptiveConcurrencyFilterConfig(config, context.runtime(), std::move(acc_stats_prefix), diff --git a/source/extensions/filters/http/adaptive_concurrency/controller/BUILD b/source/extensions/filters/http/adaptive_concurrency/controller/BUILD index b5e828f9a3b3a..b9f4475d7af72 100644 --- a/source/extensions/filters/http/adaptive_concurrency/controller/BUILD +++ b/source/extensions/filters/http/adaptive_concurrency/controller/BUILD @@ -1,16 +1,16 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_extension_package", +) + licenses(["notice"]) # Apache 2 # HTTP L7 filter that dynamically adjusts the number of allowed concurrent # requests based on sampled latencies. # Public docs: TODO (tonya11en) -load( - "//bazel:envoy_build_system.bzl", - "envoy_cc_library", - "envoy_package", -) - -envoy_package() +envoy_extension_package() envoy_cc_library( name = "controller_lib", @@ -23,6 +23,7 @@ envoy_cc_library( "libcircllhist", ], deps = [ + "//include/envoy/common:time_interface", "//source/common/event:dispatcher_lib", "//source/common/protobuf", "//source/common/runtime:runtime_lib", diff --git a/source/extensions/filters/http/adaptive_concurrency/controller/controller.h b/source/extensions/filters/http/adaptive_concurrency/controller/controller.h index ecb78307a9d23..a6ba79f55a425 100644 --- a/source/extensions/filters/http/adaptive_concurrency/controller/controller.h +++ b/source/extensions/filters/http/adaptive_concurrency/controller/controller.h @@ -3,6 +3,7 @@ #include #include "envoy/common/pure.h" +#include "envoy/common/time.h" namespace Envoy { namespace Extensions { @@ -41,9 +42,9 @@ class ConcurrencyController { * request latency to update the internal state of the controller for * concurrency limit calculations. * - * @param rq_latency is the clocked round-trip time for the request. + * @param rq_send_time the time point which the sampled request was sent */ - virtual void recordLatencySample(std::chrono::nanoseconds rq_latency) PURE; + virtual void recordLatencySample(MonotonicTime rq_send_time) PURE; /** * Omit sampling an outstanding request and update the internal state of the controller to reflect diff --git a/source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.cc b/source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.cc index c94ddaef11c3a..d8063ec6723f8 100644 --- a/source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.cc +++ b/source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.cc @@ -3,6 +3,7 @@ #include #include +#include "envoy/common/random_generator.h" #include "envoy/event/dispatcher.h" #include "envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.pb.h" #include "envoy/runtime/runtime.h" @@ -46,10 +47,11 @@ GradientControllerConfig::GradientControllerConfig( GradientController::GradientController(GradientControllerConfig config, Event::Dispatcher& dispatcher, Runtime::Loader&, const std::string& stats_prefix, Stats::Scope& scope, - Runtime::RandomGenerator& random) + Random::RandomGenerator& random, TimeSource& time_source) : config_(std::move(config)), dispatcher_(dispatcher), scope_(scope), - stats_(generateStats(scope_, stats_prefix)), random_(random), deferred_limit_value_(0), - num_rq_outstanding_(0), concurrency_limit_(config_.minConcurrency()), + stats_(generateStats(scope_, stats_prefix)), random_(random), time_source_(time_source), + deferred_limit_value_(0), num_rq_outstanding_(0), + concurrency_limit_(config_.minConcurrency()), latency_sample_hist_(hist_fast_alloc(), hist_free) { min_rtt_calc_timer_ = dispatcher_.createTimer([this]() -> void { enterMinRTTSamplingWindow(); }); @@ -102,6 +104,8 @@ void GradientController::enterMinRTTSamplingWindow() { // Throw away any latency samples from before the recalculation window as it may not represent // the minRTT. hist_clear(latency_sample_hist_.get()); + + min_rtt_epoch_ = time_source_.monotonicTime(); } void GradientController::updateMinRTT() { @@ -192,16 +196,22 @@ RequestForwardingAction GradientController::forwardingDecision() { return RequestForwardingAction::Block; } -void GradientController::recordLatencySample(std::chrono::nanoseconds rq_latency) { - const uint32_t latency_usec = - std::chrono::duration_cast(rq_latency).count(); +void GradientController::recordLatencySample(MonotonicTime rq_send_time) { ASSERT(num_rq_outstanding_.load() > 0); --num_rq_outstanding_; + if (rq_send_time < min_rtt_epoch_) { + // Disregard samples from requests started in the previous minRTT window. + return; + } + + const std::chrono::microseconds rq_latency = + std::chrono::duration_cast(time_source_.monotonicTime() - + rq_send_time); uint32_t sample_count; { absl::MutexLock ml(&sample_mutation_mtx_); - hist_insert(latency_sample_hist_.get(), latency_usec, 1); + hist_insert(latency_sample_hist_.get(), rq_latency.count(), 1); sample_count = hist_sample_count(latency_sample_hist_.get()); } diff --git a/source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.h b/source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.h index 1da1c3d8b81a0..176bb52095d6f 100644 --- a/source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.h +++ b/source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.h @@ -3,6 +3,8 @@ #include #include +#include "envoy/common/random_generator.h" +#include "envoy/common/time.h" #include "envoy/event/dispatcher.h" #include "envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.pb.h" #include "envoy/runtime/runtime.h" @@ -210,11 +212,11 @@ class GradientController : public ConcurrencyController { public: GradientController(GradientControllerConfig config, Event::Dispatcher& dispatcher, Runtime::Loader& runtime, const std::string& stats_prefix, Stats::Scope& scope, - Runtime::RandomGenerator& random); + Random::RandomGenerator& random, TimeSource& time_source); // ConcurrencyController. RequestForwardingAction forwardingDecision() override; - void recordLatencySample(std::chrono::nanoseconds rq_latency) override; + void recordLatencySample(MonotonicTime rq_send_time) override; void cancelLatencySample() override; uint32_t concurrencyLimit() const override { return concurrency_limit_.load(); } @@ -237,7 +239,8 @@ class GradientController : public ConcurrencyController { Event::Dispatcher& dispatcher_; Stats::Scope& scope_; GradientControllerStats stats_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; + TimeSource& time_source_; // Protects data related to latency sampling and RTT values. In addition to protecting the latency // sample histogram, the mutex ensures that the minRTT calculation window and the sample window @@ -274,6 +277,10 @@ class GradientController : public ConcurrencyController { // after remaining at the minimum limit for too long. uint32_t consecutive_min_concurrency_set_ ABSL_GUARDED_BY(sample_mutation_mtx_); + // We will disregard sampling any requests admitted before this timestamp to prevent sampling + // requests admitted before the start of a minRTT window and potentially skewing the minRTT. + MonotonicTime min_rtt_epoch_; + Event::TimerPtr min_rtt_calc_timer_; Event::TimerPtr sample_reset_timer_; }; diff --git a/source/extensions/filters/http/admission_control/BUILD b/source/extensions/filters/http/admission_control/BUILD new file mode 100644 index 0000000000000..07acbda5fe58a --- /dev/null +++ b/source/extensions/filters/http/admission_control/BUILD @@ -0,0 +1,53 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +# HTTP L7 filter that probabilistically rejects requests based on upstream success-rate. +# Public docs: docs/root/configuration/http_filters/admission_control.rst + +envoy_extension_package() + +envoy_cc_extension( + name = "admission_control_filter_lib", + srcs = [ + "admission_control.cc", + "thread_local_controller.cc", + ], + hdrs = [ + "admission_control.h", + "thread_local_controller.h", + ], + security_posture = "unknown", + deps = [ + "//include/envoy/http:filter_interface", + "//include/envoy/runtime:runtime_interface", + "//source/common/common:cleanup_lib", + "//source/common/http:codes_lib", + "//source/common/runtime:runtime_lib", + "//source/extensions/filters/http:well_known_names", + "//source/extensions/filters/http/admission_control/evaluators:response_evaluator_lib", + "//source/extensions/filters/http/common:pass_through_filter_lib", + "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", + ], +) + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + security_posture = "unknown", + status = "alpha", + deps = [ + "//include/envoy/registry", + "//source/common/common:enum_to_int", + "//source/extensions/filters/http:well_known_names", + "//source/extensions/filters/http/admission_control:admission_control_filter_lib", + "//source/extensions/filters/http/admission_control/evaluators:response_evaluator_lib", + "//source/extensions/filters/http/common:factory_base_lib", + "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", + ], +) diff --git a/source/extensions/filters/http/admission_control/admission_control.cc b/source/extensions/filters/http/admission_control/admission_control.cc new file mode 100644 index 0000000000000..fe880fefc47b6 --- /dev/null +++ b/source/extensions/filters/http/admission_control/admission_control.cc @@ -0,0 +1,140 @@ +#include "extensions/filters/http/admission_control/admission_control.h" + +#include +#include +#include +#include + +#include "envoy/common/random_generator.h" +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" +#include "envoy/grpc/status.h" +#include "envoy/http/codes.h" +#include "envoy/runtime/runtime.h" +#include "envoy/server/filter_config.h" + +#include "common/common/cleanup.h" +#include "common/common/enum_to_int.h" +#include "common/grpc/common.h" +#include "common/http/codes.h" +#include "common/http/utility.h" +#include "common/protobuf/utility.h" + +#include "extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h" +#include "extensions/filters/http/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace AdmissionControl { + +using GrpcStatus = Grpc::Status::GrpcStatus; + +static constexpr double defaultAggression = 2.0; + +AdmissionControlFilterConfig::AdmissionControlFilterConfig( + const AdmissionControlProto& proto_config, Runtime::Loader& runtime, + Random::RandomGenerator& random, Stats::Scope& scope, ThreadLocal::SlotPtr&& tls, + std::shared_ptr response_evaluator) + : random_(random), scope_(scope), tls_(std::move(tls)), + admission_control_feature_(proto_config.enabled(), runtime), + aggression_( + proto_config.has_aggression_coefficient() + ? std::make_unique(proto_config.aggression_coefficient(), runtime) + : nullptr), + response_evaluator_(std::move(response_evaluator)) {} + +double AdmissionControlFilterConfig::aggression() const { + return std::max(1.0, aggression_ ? aggression_->value() : defaultAggression); +} + +AdmissionControlFilter::AdmissionControlFilter(AdmissionControlFilterConfigSharedPtr config, + const std::string& stats_prefix) + : config_(std::move(config)), stats_(generateStats(config_->scope(), stats_prefix)), + expect_grpc_status_in_trailer_(false), record_request_(true) {} + +Http::FilterHeadersStatus AdmissionControlFilter::decodeHeaders(Http::RequestHeaderMap&, bool) { + // TODO(tonya11en): Ensure we document the fact that healthchecks are ignored. + if (!config_->filterEnabled() || decoder_callbacks_->streamInfo().healthCheck()) { + // We must forego recording the success/failure of this request during encoding. + record_request_ = false; + return Http::FilterHeadersStatus::Continue; + } + + if (shouldRejectRequest()) { + decoder_callbacks_->sendLocalReply(Http::Code::ServiceUnavailable, "", nullptr, absl::nullopt, + "denied by admission control"); + stats_.rq_rejected_.inc(); + return Http::FilterHeadersStatus::StopIteration; + } + + return Http::FilterHeadersStatus::Continue; +} + +Http::FilterHeadersStatus AdmissionControlFilter::encodeHeaders(Http::ResponseHeaderMap& headers, + bool end_stream) { + // TODO(tonya11en): It's not possible for an HTTP filter to understand why a stream is reset, so + // we are not currently accounting for resets when recording requests. + + if (!record_request_) { + return Http::FilterHeadersStatus::Continue; + } + + bool successful_response = false; + if (Grpc::Common::isGrpcResponseHeaders(headers, end_stream)) { + absl::optional grpc_status = Grpc::Common::getGrpcStatus(headers); + + // If the GRPC status isn't found in the headers, it must be found in the trailers. + expect_grpc_status_in_trailer_ = !grpc_status.has_value(); + if (expect_grpc_status_in_trailer_) { + return Http::FilterHeadersStatus::Continue; + } + + const uint32_t status = enumToInt(grpc_status.value()); + successful_response = config_->responseEvaluator().isGrpcSuccess(status); + } else { + // HTTP response. + const uint64_t http_status = Http::Utility::getResponseStatus(headers); + successful_response = config_->responseEvaluator().isHttpSuccess(http_status); + } + + if (successful_response) { + recordSuccess(); + } else { + recordFailure(); + } + + return Http::FilterHeadersStatus::Continue; +} + +Http::FilterTrailersStatus +AdmissionControlFilter::encodeTrailers(Http::ResponseTrailerMap& trailers) { + if (expect_grpc_status_in_trailer_) { + absl::optional grpc_status = Grpc::Common::getGrpcStatus(trailers, false); + + if (grpc_status.has_value() && + config_->responseEvaluator().isGrpcSuccess(grpc_status.value())) { + recordSuccess(); + } else { + recordFailure(); + } + } + + return Http::FilterTrailersStatus::Continue; +} + +bool AdmissionControlFilter::shouldRejectRequest() const { + const auto request_counts = config_->getController().requestCounts(); + const double total = request_counts.requests; + const double success = request_counts.successes; + const double probability = (total - config_->aggression() * success) / (total + 1); + + // Choosing an accuracy of 4 significant figures for the probability. + static constexpr uint64_t accuracy = 1e4; + auto r = config_->random().random(); + return (accuracy * std::max(probability, 0.0)) > (r % accuracy); +} + +} // namespace AdmissionControl +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/admission_control/admission_control.h b/source/extensions/filters/http/admission_control/admission_control.h new file mode 100644 index 0000000000000..54d793236e6bd --- /dev/null +++ b/source/extensions/filters/http/admission_control/admission_control.h @@ -0,0 +1,118 @@ +#pragma once + +#include +#include +#include + +#include "envoy/common/random_generator.h" +#include "envoy/common/time.h" +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" +#include "envoy/http/codes.h" +#include "envoy/http/filter.h" +#include "envoy/runtime/runtime.h" +#include "envoy/server/filter_config.h" +#include "envoy/stats/scope.h" +#include "envoy/stats/stats_macros.h" + +#include "common/common/cleanup.h" +#include "common/grpc/common.h" +#include "common/grpc/status.h" +#include "common/http/codes.h" +#include "common/runtime/runtime_protos.h" + +#include "extensions/filters/http/admission_control/evaluators/response_evaluator.h" +#include "extensions/filters/http/admission_control/thread_local_controller.h" +#include "extensions/filters/http/common/pass_through_filter.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace AdmissionControl { + +/** + * All stats for the admission control filter. + */ +#define ALL_ADMISSION_CONTROL_STATS(COUNTER) COUNTER(rq_rejected) + +/** + * Wrapper struct for admission control filter stats. @see stats_macros.h + */ +struct AdmissionControlStats { + ALL_ADMISSION_CONTROL_STATS(GENERATE_COUNTER_STRUCT) +}; + +using AdmissionControlProto = + envoy::extensions::filters::http::admission_control::v3alpha::AdmissionControl; + +/** + * Configuration for the admission control filter. + */ +class AdmissionControlFilterConfig { +public: + AdmissionControlFilterConfig(const AdmissionControlProto& proto_config, Runtime::Loader& runtime, + Random::RandomGenerator& random, Stats::Scope& scope, + ThreadLocal::SlotPtr&& tls, + std::shared_ptr response_evaluator); + virtual ~AdmissionControlFilterConfig() = default; + + virtual ThreadLocalController& getController() const { + return tls_->getTyped(); + } + + Random::RandomGenerator& random() const { return random_; } + bool filterEnabled() const { return admission_control_feature_.enabled(); } + Stats::Scope& scope() const { return scope_; } + double aggression() const; + ResponseEvaluator& responseEvaluator() const { return *response_evaluator_; } + +private: + Random::RandomGenerator& random_; + Stats::Scope& scope_; + const ThreadLocal::SlotPtr tls_; + Runtime::FeatureFlag admission_control_feature_; + std::unique_ptr aggression_; + std::shared_ptr response_evaluator_; +}; + +using AdmissionControlFilterConfigSharedPtr = std::shared_ptr; + +/** + * A filter that probabilistically rejects requests based on upstream success-rate. + */ +class AdmissionControlFilter : public Http::PassThroughFilter, + Logger::Loggable { +public: + AdmissionControlFilter(AdmissionControlFilterConfigSharedPtr config, + const std::string& stats_prefix); + + // Http::StreamDecoderFilter + Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool) override; + + // Http::StreamEncoderFilter + Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& headers, + bool end_stream) override; + Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap& trailers) override; + +private: + static AdmissionControlStats generateStats(Stats::Scope& scope, const std::string& prefix) { + return {ALL_ADMISSION_CONTROL_STATS(POOL_COUNTER_PREFIX(scope, prefix))}; + } + + bool shouldRejectRequest() const; + + void recordSuccess() { config_->getController().recordSuccess(); } + + void recordFailure() { config_->getController().recordFailure(); } + + const AdmissionControlFilterConfigSharedPtr config_; + AdmissionControlStats stats_; + bool expect_grpc_status_in_trailer_; + + // If false, the filter will forego recording a request success or failure during encoding. + bool record_request_; +}; + +} // namespace AdmissionControl +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/admission_control/config.cc b/source/extensions/filters/http/admission_control/config.cc new file mode 100644 index 0000000000000..297fabf4f6d71 --- /dev/null +++ b/source/extensions/filters/http/admission_control/config.cc @@ -0,0 +1,64 @@ +#include "extensions/filters/http/admission_control/config.h" + +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" +#include "envoy/registry/registry.h" + +#include "common/common/enum_to_int.h" + +#include "extensions/filters/http/admission_control/admission_control.h" +#include "extensions/filters/http/admission_control/evaluators/response_evaluator.h" +#include "extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace AdmissionControl { + +static constexpr std::chrono::seconds defaultSamplingWindow{120}; + +Http::FilterFactoryCb AdmissionControlFilterFactory::createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::http::admission_control::v3alpha::AdmissionControl& config, + const std::string& stats_prefix, Server::Configuration::FactoryContext& context) { + + const std::string prefix = stats_prefix + "admission_control."; + + // Create the thread-local controller. + auto tls = context.threadLocal().allocateSlot(); + auto sampling_window = std::chrono::seconds( + PROTOBUF_GET_MS_OR_DEFAULT(config, sampling_window, 1000 * defaultSamplingWindow.count()) / + 1000); + tls->set( + [sampling_window, &context](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr { + return std::make_shared(context.timeSource(), sampling_window); + }); + + std::unique_ptr response_evaluator; + switch (config.evaluation_criteria_case()) { + case AdmissionControlProto::EvaluationCriteriaCase::kSuccessCriteria: + response_evaluator = std::make_unique(config.success_criteria()); + break; + case AdmissionControlProto::EvaluationCriteriaCase::EVALUATION_CRITERIA_NOT_SET: + NOT_REACHED_GCOVR_EXCL_LINE; + } + + AdmissionControlFilterConfigSharedPtr filter_config = + std::make_shared(config, context.runtime(), context.random(), + context.scope(), std::move(tls), + std::move(response_evaluator)); + + return [filter_config, prefix](Http::FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamFilter(std::make_shared(filter_config, prefix)); + }; +} + +/** + * Static registration for the admission_control filter. @see RegisterFactory. + */ +REGISTER_FACTORY(AdmissionControlFilterFactory, + Server::Configuration::NamedHttpFilterConfigFactory); + +} // namespace AdmissionControl +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/admission_control/config.h b/source/extensions/filters/http/admission_control/config.h new file mode 100644 index 0000000000000..8abe84eafefcb --- /dev/null +++ b/source/extensions/filters/http/admission_control/config.h @@ -0,0 +1,32 @@ +#pragma once + +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" + +#include "extensions/filters/http/common/factory_base.h" +#include "extensions/filters/http/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace AdmissionControl { + +/** + * Config registration for the adaptive concurrency limit filter. @see NamedHttpFilterConfigFactory. + */ +class AdmissionControlFilterFactory + : public Common::FactoryBase< + envoy::extensions::filters::http::admission_control::v3alpha::AdmissionControl> { +public: + AdmissionControlFilterFactory() : FactoryBase(HttpFilterNames::get().AdmissionControl) {} + + Http::FilterFactoryCb createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::http::admission_control::v3alpha::AdmissionControl& + proto_config, + const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override; +}; + +} // namespace AdmissionControl +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/admission_control/evaluators/BUILD b/source/extensions/filters/http/admission_control/evaluators/BUILD new file mode 100644 index 0000000000000..c5c72ee2db5ca --- /dev/null +++ b/source/extensions/filters/http/admission_control/evaluators/BUILD @@ -0,0 +1,26 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +# HTTP L7 filter that probabilistically rejects requests based on upstream success-rate. + +envoy_extension_package() + +envoy_cc_library( + name = "response_evaluator_lib", + srcs = ["success_criteria_evaluator.cc"], + hdrs = [ + "response_evaluator.h", + "success_criteria_evaluator.h", + ], + visibility = ["//visibility:public"], + deps = [ + "//include/envoy/grpc:status", + "//source/common/common:enum_to_int", + "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", + ], +) diff --git a/source/extensions/filters/http/admission_control/evaluators/response_evaluator.h b/source/extensions/filters/http/admission_control/evaluators/response_evaluator.h new file mode 100644 index 0000000000000..9915014fdede2 --- /dev/null +++ b/source/extensions/filters/http/admission_control/evaluators/response_evaluator.h @@ -0,0 +1,33 @@ +#pragma once + +#include + +#include "envoy/common/pure.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace AdmissionControl { + +/** + * Determines of a request was successful based on response headers. + */ +class ResponseEvaluator { +public: + virtual ~ResponseEvaluator() = default; + + /** + * Returns true if the provided HTTP code constitutes a success. + */ + virtual bool isHttpSuccess(uint64_t code) const PURE; + + /** + * Returns true if the provided gRPC status counts constitutes a success. + */ + virtual bool isGrpcSuccess(uint32_t status) const PURE; +}; + +} // namespace AdmissionControl +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.cc b/source/extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.cc new file mode 100644 index 0000000000000..6771bfba9a7b2 --- /dev/null +++ b/source/extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.cc @@ -0,0 +1,73 @@ +#include "extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h" + +#include + +#include "envoy/common/exception.h" +#include "envoy/grpc/status.h" + +#include "common/common/enum_to_int.h" +#include "common/common/fmt.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace AdmissionControl { + +SuccessCriteriaEvaluator::SuccessCriteriaEvaluator(const SuccessCriteria& success_criteria) { + // HTTP status. + if (success_criteria.has_http_criteria()) { + for (const auto& range : success_criteria.http_criteria().http_success_status()) { + if (!validHttpRange(range.start(), range.end())) { + throw EnvoyException( + fmt::format("invalid HTTP range: [{}, {})", range.start(), range.end())); + } + + const auto start = static_cast(range.start()); + const auto end = static_cast(range.end()); + http_success_fns_.emplace_back( + [start, end](uint64_t status) { return (start <= status) && (status < end); }); + } + } else { + // We default to all non-5xx codes as successes. + http_success_fns_.emplace_back([](uint64_t status) { return status < 500; }); + } + + // GRPC status. + if (success_criteria.has_grpc_criteria()) { + for (const auto& status : success_criteria.grpc_criteria().grpc_success_status()) { + if (status > 16) { + throw EnvoyException(fmt::format("invalid gRPC code {}", status)); + } + + grpc_success_codes_.emplace_back(status); + } + } else { + grpc_success_codes_ = { + enumToInt(Grpc::Status::WellKnownGrpcStatus::AlreadyExists), + enumToInt(Grpc::Status::WellKnownGrpcStatus::Canceled), + enumToInt(Grpc::Status::WellKnownGrpcStatus::FailedPrecondition), + enumToInt(Grpc::Status::WellKnownGrpcStatus::InvalidArgument), + enumToInt(Grpc::Status::WellKnownGrpcStatus::NotFound), + enumToInt(Grpc::Status::WellKnownGrpcStatus::Ok), + enumToInt(Grpc::Status::WellKnownGrpcStatus::OutOfRange), + enumToInt(Grpc::Status::WellKnownGrpcStatus::PermissionDenied), + enumToInt(Grpc::Status::WellKnownGrpcStatus::Unauthenticated), + enumToInt(Grpc::Status::WellKnownGrpcStatus::Unimplemented), + enumToInt(Grpc::Status::WellKnownGrpcStatus::Unknown), + }; + } +} + +bool SuccessCriteriaEvaluator::isGrpcSuccess(uint32_t status) const { + return std::count(grpc_success_codes_.begin(), grpc_success_codes_.end(), status) > 0; +} + +bool SuccessCriteriaEvaluator::isHttpSuccess(uint64_t code) const { + return std::any_of(http_success_fns_.begin(), http_success_fns_.end(), + [code](auto fn) { return fn(code); }); +} + +} // namespace AdmissionControl +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h b/source/extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h new file mode 100644 index 0000000000000..511d54408f42e --- /dev/null +++ b/source/extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h @@ -0,0 +1,36 @@ +#pragma once + +#include + +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" + +#include "extensions/filters/http/admission_control/evaluators/response_evaluator.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace AdmissionControl { + +class SuccessCriteriaEvaluator : public ResponseEvaluator { +public: + using SuccessCriteria = envoy::extensions::filters::http::admission_control::v3alpha:: + AdmissionControl::SuccessCriteria; + SuccessCriteriaEvaluator(const SuccessCriteria& evaluation_criteria); + // ResponseEvaluator + bool isHttpSuccess(uint64_t code) const override; + bool isGrpcSuccess(uint32_t status) const override; + +private: + bool validHttpRange(const int32_t start, const int32_t end) const { + return start <= end && start < 600 && start >= 100 && end <= 600 && end >= 100; + } + + std::vector> http_success_fns_; + std::vector grpc_success_codes_; +}; + +} // namespace AdmissionControl +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/admission_control/thread_local_controller.cc b/source/extensions/filters/http/admission_control/thread_local_controller.cc new file mode 100644 index 0000000000000..30f0aac40061c --- /dev/null +++ b/source/extensions/filters/http/admission_control/thread_local_controller.cc @@ -0,0 +1,49 @@ +#include "extensions/filters/http/admission_control/thread_local_controller.h" + +#include + +#include "envoy/common/pure.h" +#include "envoy/common/time.h" +#include "envoy/http/codes.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace AdmissionControl { + +static constexpr std::chrono::seconds defaultHistoryGranularity{1}; + +ThreadLocalControllerImpl::ThreadLocalControllerImpl(TimeSource& time_source, + std::chrono::seconds sampling_window) + : time_source_(time_source), sampling_window_(sampling_window) {} + +void ThreadLocalControllerImpl::maybeUpdateHistoricalData() { + // Purge stale samples. + while (!historical_data_.empty() && ageOfOldestSample() >= sampling_window_) { + removeOldestSample(); + } + + // It's possible we purged stale samples from the history and are left with nothing, so it's + // necessary to add an empty entry. We will also need to roll over into a new entry in the + // historical data if we've exceeded the time specified by the granularity. + if (historical_data_.empty() || ageOfNewestSample() >= defaultHistoryGranularity) { + historical_data_.emplace_back(time_source_.monotonicTime(), RequestData()); + } +} + +void ThreadLocalControllerImpl::recordRequest(bool success) { + maybeUpdateHistoricalData(); + + // The back of the deque will be the most recent samples. + ++historical_data_.back().second.requests; + ++global_data_.requests; + if (success) { + ++historical_data_.back().second.successes; + ++global_data_.successes; + } +} + +} // namespace AdmissionControl +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/admission_control/thread_local_controller.h b/source/extensions/filters/http/admission_control/thread_local_controller.h new file mode 100644 index 0000000000000..11f9387581779 --- /dev/null +++ b/source/extensions/filters/http/admission_control/thread_local_controller.h @@ -0,0 +1,113 @@ +#pragma once + +#include "envoy/common/pure.h" +#include "envoy/common/time.h" +#include "envoy/http/codes.h" +#include "envoy/thread_local/thread_local.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace AdmissionControl { + +/* + * Thread-local admission controller interface. + */ +class ThreadLocalController { +public: + struct RequestData { + RequestData(uint32_t request_count, uint32_t success_count) + : requests(request_count), successes(success_count) {} + RequestData() = default; + + inline bool operator==(const RequestData& rhs) const { + return (requests == rhs.requests) && (successes == rhs.successes); + } + + uint32_t requests{0}; + uint32_t successes{0}; + }; + + virtual ~ThreadLocalController() = default; + + // Record success/failure of a request and update the internal state of the controller to reflect + // this. + virtual void recordSuccess() PURE; + virtual void recordFailure() PURE; + + // Returns the current number of requests and how many of them are successful. + virtual RequestData requestCounts() PURE; +}; + +/** + * Thread-local object to track request counts and successes over a rolling time window. Request + * data for the time window is kept recent via a circular buffer that phases out old request/success + * counts when recording new samples. + * + * This controller is thread-local so that we do not need to take any locks on the sample histories + * to update them, at the cost of decreasing the number of samples. + * + * The look-back window for request samples is accurate up to a hard-coded 1-second granularity. + * TODO (tonya11en): Allow the granularity to be configurable. + */ +class ThreadLocalControllerImpl : public ThreadLocalController, + public ThreadLocal::ThreadLocalObject { +public: + ThreadLocalControllerImpl(TimeSource& time_source, std::chrono::seconds sampling_window); + ~ThreadLocalControllerImpl() override = default; + void recordSuccess() override { recordRequest(true); } + void recordFailure() override { recordRequest(false); } + + RequestData requestCounts() override { + maybeUpdateHistoricalData(); + return global_data_; + } + +private: + void recordRequest(bool success); + + // Potentially remove any stale samples and record sample aggregates to the historical data. + void maybeUpdateHistoricalData(); + + // Returns the age of the oldest sample in the historical data. + std::chrono::microseconds ageOfOldestSample() const { + ASSERT(!historical_data_.empty()); + using namespace std::chrono; + return duration_cast(time_source_.monotonicTime() - + historical_data_.front().first); + } + + // Returns the age of the newest sample in the historical data. + std::chrono::microseconds ageOfNewestSample() const { + ASSERT(!historical_data_.empty()); + using namespace std::chrono; + return duration_cast(time_source_.monotonicTime() - + historical_data_.back().first); + } + + // Removes the oldest sample in the historical data and reconciles the global data. + void removeOldestSample() { + ASSERT(!historical_data_.empty()); + global_data_.successes -= historical_data_.front().second.successes; + global_data_.requests -= historical_data_.front().second.requests; + historical_data_.pop_front(); + } + + TimeSource& time_source_; + + // Stores samples from oldest (front) to newest (back). Since there is no need to read/modify + // entries that are not the oldest or newest (front/back), we can get away with using a deque + // which allocates memory in chunks and keeps most elements contiguous and cache-friendly. + std::deque> historical_data_; + + // Request data aggregated for the whole look-back window. + RequestData global_data_; + + // The rolling time window size. + const std::chrono::seconds sampling_window_; +}; + +} // namespace AdmissionControl +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/aws_lambda/BUILD b/source/extensions/filters/http/aws_lambda/BUILD index a3c73926c517c..86e2cc553f784 100644 --- a/source/extensions/filters/http/aws_lambda/BUILD +++ b/source/extensions/filters/http/aws_lambda/BUILD @@ -1,17 +1,17 @@ -licenses(["notice"]) # Apache 2 - -# L7 HTTP AWS Lambda filter -# Public docs: docs/root/configuration/http_filters/aws_lambda_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", "envoy_proto_library", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# L7 HTTP AWS Lambda filter +# Public docs: docs/root/configuration/http_filters/aws_lambda_filter.rst + +envoy_extension_package() envoy_proto_library( name = "request_response", diff --git a/source/extensions/filters/http/aws_lambda/aws_lambda_filter.cc b/source/extensions/filters/http/aws_lambda/aws_lambda_filter.cc index f10ca5c0d0b02..e6b93b3f90e24 100644 --- a/source/extensions/filters/http/aws_lambda/aws_lambda_filter.cc +++ b/source/extensions/filters/http/aws_lambda/aws_lambda_filter.cc @@ -93,8 +93,7 @@ bool isContentTypeTextual(const Http::RequestOrResponseHeaderMap& headers) { return false; } - const Http::LowerCaseString content_type_value{ - std::string(headers.ContentType()->value().getStringView())}; + const Http::LowerCaseString content_type_value{std::string(headers.getContentTypeValue())}; if (content_type_value.get() == Http::Headers::get().ContentTypeValues.Json) { return true; } @@ -251,9 +250,7 @@ Http::FilterDataStatus Filter::encodeData(Buffer::Instance& data, bool end_strea } ENVOY_LOG(trace, "Tranforming JSON payload to HTTP response."); - if (!encoder_callbacks_->encodingBuffer()) { - encoder_callbacks_->addEncodedData(data, false); - } + encoder_callbacks_->addEncodedData(data, false); const Buffer::Instance& encoding_buffer = *encoder_callbacks_->encodingBuffer(); encoder_callbacks_->modifyEncodingBuffer([this](Buffer::Instance& enc_buf) { Buffer::OwnedImpl body; @@ -270,36 +267,35 @@ void Filter::jsonizeRequest(Http::RequestHeaderMap const& headers, const Buffer: using source::extensions::filters::http::aws_lambda::Request; Request json_req; if (headers.Path()) { - json_req.set_raw_path(std::string(headers.Path()->value().getStringView())); + json_req.set_raw_path(std::string(headers.getPathValue())); } if (headers.Method()) { - json_req.set_method(std::string(headers.Method()->value().getStringView())); + json_req.set_method(std::string(headers.getMethodValue())); } // Wrap the headers - headers.iterate( - [](const Http::HeaderEntry& entry, void* ctx) -> Http::HeaderMap::Iterate { - auto* req = static_cast(ctx); - // ignore H2 pseudo-headers - if (absl::StartsWith(entry.key().getStringView(), ":")) { - return Http::HeaderMap::Iterate::Continue; - } - std::string name = std::string(entry.key().getStringView()); - auto it = req->mutable_headers()->find(name); - if (it == req->headers().end()) { - req->mutable_headers()->insert({name, std::string(entry.value().getStringView())}); - } else { - // Coalesce headers with multiple values - it->second += fmt::format(",{}", entry.value().getStringView()); - } - return Http::HeaderMap::Iterate::Continue; - }, - &json_req); + headers.iterate([&json_req](const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate { + // ignore H2 pseudo-headers + if (absl::StartsWith(entry.key().getStringView(), ":")) { + return Http::HeaderMap::Iterate::Continue; + } + std::string name = std::string(entry.key().getStringView()); + auto it = json_req.mutable_headers()->find(name); + if (it == json_req.headers().end()) { + json_req.mutable_headers()->insert({name, std::string(entry.value().getStringView())}); + } else { + // Coalesce headers with multiple values + it->second += fmt::format(",{}", entry.value().getStringView()); + } + return Http::HeaderMap::Iterate::Continue; + }); // Wrap the Query String - for (auto&& kv_pair : Http::Utility::parseQueryString(headers.Path()->value().getStringView())) { - json_req.mutable_query_string_parameters()->insert({kv_pair.first, kv_pair.second}); + if (headers.Path()) { + for (auto&& kv_pair : Http::Utility::parseQueryString(headers.getPathValue())) { + json_req.mutable_query_string_parameters()->insert({kv_pair.first, kv_pair.second}); + } } // Wrap the body diff --git a/source/extensions/filters/http/aws_lambda/aws_lambda_filter.h b/source/extensions/filters/http/aws_lambda/aws_lambda_filter.h index 6611128143d6e..82bfdaf85cf2f 100644 --- a/source/extensions/filters/http/aws_lambda/aws_lambda_filter.h +++ b/source/extensions/filters/http/aws_lambda/aws_lambda_filter.h @@ -83,7 +83,6 @@ class FilterSettings : public Router::RouteSpecificFilterConfig { : arn_(arn), invocation_mode_(mode), payload_passthrough_(payload_passthrough) {} const Arn& arn() const& { return arn_; } - Arn&& arn() && { return std::move(arn_); } bool payloadPassthrough() const { return payload_passthrough_; } InvocationMode invocationMode() const { return invocation_mode_; } diff --git a/source/extensions/filters/http/aws_lambda/config.cc b/source/extensions/filters/http/aws_lambda/config.cc index 957e8a0960d49..c784020da619d 100644 --- a/source/extensions/filters/http/aws_lambda/config.cc +++ b/source/extensions/filters/http/aws_lambda/config.cc @@ -25,10 +25,8 @@ getInvocationMode(const envoy::extensions::filters::http::aws_lambda::v3::Config switch (proto_config.invocation_mode()) { case Config_InvocationMode_ASYNCHRONOUS: return InvocationMode::Asynchronous; - break; case Config_InvocationMode_SYNCHRONOUS: return InvocationMode::Synchronous; - break; default: NOT_REACHED_GCOVR_EXCL_LINE; } diff --git a/source/extensions/filters/http/aws_request_signing/BUILD b/source/extensions/filters/http/aws_request_signing/BUILD index c723fb932b084..01b83ecf68656 100644 --- a/source/extensions/filters/http/aws_request_signing/BUILD +++ b/source/extensions/filters/http/aws_request_signing/BUILD @@ -1,16 +1,16 @@ -licenses(["notice"]) # Apache 2 - -# L7 HTTP AWS request signing filter -# Public docs: docs/root/configuration/http_filters/aws_request_signing_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# L7 HTTP AWS request signing filter +# Public docs: docs/root/configuration/http_filters/aws_request_signing_filter.rst + +envoy_extension_package() envoy_cc_library( name = "aws_request_signing_filter_lib", diff --git a/source/extensions/filters/http/buffer/BUILD b/source/extensions/filters/http/buffer/BUILD index e7629854f46b4..c39db2ac9a857 100644 --- a/source/extensions/filters/http/buffer/BUILD +++ b/source/extensions/filters/http/buffer/BUILD @@ -1,16 +1,16 @@ -licenses(["notice"]) # Apache 2 - -# Request buffering and timeout L7 HTTP filter -# Public docs: docs/root/configuration/http_filters/buffer_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# Request buffering and timeout L7 HTTP filter +# Public docs: docs/root/configuration/http_filters/buffer_filter.rst + +envoy_extension_package() envoy_cc_library( name = "buffer_filter_lib", @@ -38,6 +38,7 @@ envoy_cc_extension( srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream", + # Legacy test use. TODO(#9953) clean up. deps = [ "//include/envoy/registry", "//source/extensions/filters/http:well_known_names", diff --git a/source/extensions/filters/http/cache/BUILD b/source/extensions/filters/http/cache/BUILD index 03c1c4932fefb..6dd67613de956 100644 --- a/source/extensions/filters/http/cache/BUILD +++ b/source/extensions/filters/http/cache/BUILD @@ -1,32 +1,47 @@ -licenses(["notice"]) # Apache 2 - -## Pluggable HTTP cache filter - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", "envoy_proto_library", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +## Pluggable HTTP cache filter + +envoy_extension_package() envoy_cc_library( name = "cache_filter_lib", srcs = ["cache_filter.cc"], hdrs = ["cache_filter.h"], deps = [ + ":cache_headers_utils_lib", + ":cacheability_utils_lib", ":http_cache_lib", + "//source/common/common:enum_to_int", "//source/common/common:logger_lib", "//source/common/common:macros", "//source/common/http:header_map_lib", "//source/common/http:headers_lib", + "//source/common/http:utility_lib", "//source/extensions/filters/http/common:pass_through_filter_lib", "@envoy_api//envoy/extensions/filters/http/cache/v3alpha:pkg_cc_proto", ], ) +envoy_cc_library( + name = "cacheability_utils_lib", + srcs = ["cacheability_utils.cc"], + hdrs = ["cacheability_utils.h"], + deps = [ + ":cache_headers_utils_lib", + "//source/common/common:utility_lib", + "//source/common/http:headers_lib", + ], +) + envoy_proto_library( name = "key", srcs = ["key.proto"], @@ -37,7 +52,7 @@ envoy_cc_library( srcs = ["http_cache.cc"], hdrs = ["http_cache.h"], deps = [ - ":http_cache_utils_lib", + ":cache_headers_utils_lib", ":key_cc_proto", "//include/envoy/buffer:buffer_interface", "//include/envoy/common:time_interface", @@ -45,6 +60,7 @@ envoy_cc_library( "//include/envoy/http:codes_interface", "//include/envoy/http:header_map_interface", "//source/common/common:assert_lib", + "//source/common/http:header_utility_lib", "//source/common/http:headers_lib", "//source/common/protobuf:utility_lib", "@envoy_api//envoy/extensions/filters/http/cache/v3alpha:pkg_cc_proto", @@ -52,9 +68,10 @@ envoy_cc_library( ) envoy_cc_library( - name = "http_cache_utils_lib", - srcs = ["http_cache_utils.cc"], - hdrs = ["http_cache_utils.h"], + name = "cache_headers_utils_lib", + srcs = ["cache_headers_utils.cc"], + hdrs = ["cache_headers_utils.h"], + external_deps = ["abseil_optional"], deps = [ "//include/envoy/common:time_interface", "//include/envoy/http:header_map_interface", diff --git a/source/extensions/filters/http/cache/cache_filter.cc b/source/extensions/filters/http/cache/cache_filter.cc index ad43c577034e4..6e4b469f342ef 100644 --- a/source/extensions/filters/http/cache/cache_filter.cc +++ b/source/extensions/filters/http/cache/cache_filter.cc @@ -1,6 +1,10 @@ #include "extensions/filters/http/cache/cache_filter.h" +#include "common/common/enum_to_int.h" #include "common/http/headers.h" +#include "common/http/utility.h" + +#include "extensions/filters/http/cache/cacheability_utils.h" #include "absl/strings/string_view.h" @@ -9,28 +13,17 @@ namespace Extensions { namespace HttpFilters { namespace Cache { -bool CacheFilter::isCacheableRequest(Http::RequestHeaderMap& headers) { - const Http::HeaderEntry* method = headers.Method(); - const Http::HeaderEntry* forwarded_proto = headers.ForwardedProto(); - const Http::HeaderValues& header_values = Http::Headers::get(); - // TODO(toddmgreer): Also serve HEAD requests from cache. - // TODO(toddmgreer): Check all the other cache-related headers. - return method && forwarded_proto && headers.Path() && headers.Host() && - (method->value() == header_values.MethodValues.Get) && - (forwarded_proto->value() == header_values.SchemeValues.Http || - forwarded_proto->value() == header_values.SchemeValues.Https); +namespace { +inline bool isResponseNotModified(const Http::ResponseHeaderMap& response_headers) { + return Http::Utility::getResponseStatus(response_headers) == enumToInt(Http::Code::NotModified); } +} // namespace -bool CacheFilter::isCacheableResponse(Http::ResponseHeaderMap& headers) { - const Http::HeaderEntry* cache_control = headers.CacheControl(); - // TODO(toddmgreer): fully check for cacheability. See for example - // https://github.com/apache/incubator-pagespeed-mod/blob/master/pagespeed/kernel/http/caching_headers.h. - if (cache_control) { - return !StringUtil::caseFindToken(cache_control->value().getStringView(), ",", - Http::Headers::get().CacheControlValues.Private); - } - return false; -} +struct CacheResponseCodeDetailValues { + const absl::string_view ResponseFromCacheFilter = "cache.response_from_cache_filter"; +}; + +using CacheResponseCodeDetails = ConstSingleton; CacheFilter::CacheFilter(const envoy::extensions::filters::http::cache::v3alpha::CacheConfig&, const std::string&, Stats::Scope&, TimeSource& time_source, @@ -52,30 +45,64 @@ Http::FilterHeadersStatus CacheFilter::decodeHeaders(Http::RequestHeaderMap& hea *decoder_callbacks_, headers); return Http::FilterHeadersStatus::Continue; } - if (!isCacheableRequest(headers)) { + if (!CacheabilityUtils::isCacheableRequest(headers)) { ENVOY_STREAM_LOG(debug, "CacheFilter::decodeHeaders ignoring uncacheable request: {}", *decoder_callbacks_, headers); return Http::FilterHeadersStatus::Continue; } ASSERT(decoder_callbacks_); - lookup_ = cache_.makeLookupContext(LookupRequest(headers, time_source_.systemTime())); - ASSERT(lookup_); + LookupRequest lookup_request(headers, time_source_.systemTime()); + request_allows_inserts_ = !lookup_request.requestCacheControl().no_store_; + lookup_ = cache_.makeLookupContext(std::move(lookup_request)); + + ASSERT(lookup_); ENVOY_STREAM_LOG(debug, "CacheFilter::decodeHeaders starting lookup", *decoder_callbacks_); - lookup_->getHeaders([this](LookupResult&& result) { onHeaders(std::move(result)); }); - if (state_ == GetHeadersState::GetHeadersResultUnusable) { - // onHeaders has already been called, and no usable cache entry was found--continue iteration. + + lookup_->getHeaders( + [this, &headers](LookupResult&& result) { onHeaders(std::move(result), headers); }); + + // If the cache called onHeaders synchronously it will have advanced the filter_state_. + switch (filter_state_) { + case FilterState::Initial: + // Headers are not fetched from cache yet -- wait until cache lookup is completed. + filter_state_ = FilterState::WaitingForCacheLookup; + return Http::FilterHeadersStatus::StopAllIterationAndWatermark; + case FilterState::DecodeServingFromCache: + case FilterState::ResponseServedFromCache: + // A fresh cached response was found -- no need to continue the decoding stream. + return Http::FilterHeadersStatus::StopAllIterationAndWatermark; + default: return Http::FilterHeadersStatus::Continue; } - // onHeaders hasn't been called yet--stop iteration to wait for it, and tell it that we stopped - // iteration. - state_ = GetHeadersState::FinishedGetHeadersCall; - return Http::FilterHeadersStatus::StopAllIterationAndWatermark; } Http::FilterHeadersStatus CacheFilter::encodeHeaders(Http::ResponseHeaderMap& headers, bool end_stream) { - if (lookup_ && isCacheableResponse(headers)) { + if (filter_state_ == FilterState::DecodeServingFromCache) { + // This call was invoked by decoder_callbacks_->encodeHeaders -- ignore it. + return Http::FilterHeadersStatus::Continue; + } + + // If lookup_ is null, the request wasn't cacheable, so the response isn't either. + if (!lookup_) { + return Http::FilterHeadersStatus::Continue; + } + + if (filter_state_ == FilterState::ValidatingCachedResponse && isResponseNotModified(headers)) { + processSuccessfulValidation(headers); + if (filter_state_ != FilterState::ResponseServedFromCache) { + // Response is still being fetched from cache -- wait until it is fetched & encoded. + filter_state_ = FilterState::WaitingForCacheBody; + return Http::FilterHeadersStatus::StopIteration; + } + return Http::FilterHeadersStatus::Continue; + } + + // Either a cache miss or a cache entry that is no longer valid. + // Check if the new response can be cached. + if (request_allows_inserts_ && CacheabilityUtils::isCacheableResponse(headers)) { + // TODO(#12140): Add date internal header or metadata to cached responses. ENVOY_STREAM_LOG(debug, "CacheFilter::encodeHeaders inserting headers", *encoder_callbacks_); insert_ = cache_.makeInsertContext(std::move(lookup_)); insert_->insertHeaders(headers, end_stream); @@ -84,8 +111,16 @@ Http::FilterHeadersStatus CacheFilter::encodeHeaders(Http::ResponseHeaderMap& he } Http::FilterDataStatus CacheFilter::encodeData(Buffer::Instance& data, bool end_stream) { + if (filter_state_ == FilterState::DecodeServingFromCache) { + // This call was invoked by decoder_callbacks_->encodeData -- ignore it. + return Http::FilterDataStatus::Continue; + } + if (filter_state_ == FilterState::WaitingForCacheBody) { + // Encoding stream stopped waiting for cached body (and trailers) to be encoded. + return Http::FilterDataStatus::StopIterationAndBuffer; + } if (insert_) { - ENVOY_STREAM_LOG(debug, "CacheFilter::encodeHeaders inserting body", *encoder_callbacks_); + ENVOY_STREAM_LOG(debug, "CacheFilter::encodeData inserting body", *encoder_callbacks_); // TODO(toddmgreer): Wait for the cache if necessary. insert_->insertBody( data, [](bool) {}, end_stream); @@ -93,48 +128,58 @@ Http::FilterDataStatus CacheFilter::encodeData(Buffer::Instance& data, bool end_ return Http::FilterDataStatus::Continue; } -void CacheFilter::onHeaders(LookupResult&& result) { +void CacheFilter::getBody() { + ASSERT(lookup_, "CacheFilter is trying to call getBody with no LookupContext"); + ASSERT(!remaining_body_.empty(), "No reason to call getBody when there's no body to get."); + lookup_->getBody(remaining_body_[0], + [this](Buffer::InstancePtr&& body) { onBody(std::move(body)); }); +} + +void CacheFilter::getTrailers() { + ASSERT(lookup_, "CacheFilter is trying to call getTrailers with no LookupContext"); + ASSERT(response_has_trailers_, "No reason to call getTrailers when there's no trailers to get."); + lookup_->getTrailers( + [this](Http::ResponseTrailerMapPtr&& trailers) { onTrailers(std::move(trailers)); }); +} + +void CacheFilter::onHeaders(LookupResult&& result, Http::RequestHeaderMap& request_headers) { + // TODO(yosrym93): Handle request only-if-cached directive. + bool should_continue_decoding = false; switch (result.cache_entry_status_) { - case CacheEntryStatus::RequiresValidation: case CacheEntryStatus::FoundNotModified: - case CacheEntryStatus::UnsatisfiableRange: - NOT_IMPLEMENTED_GCOVR_EXCL_LINE; // We don't yet return or support these codes. + case CacheEntryStatus::NotSatisfiableRange: // TODO(#10132): create 416 response. + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; // We don't yet return or support these codes. + case CacheEntryStatus::RequiresValidation: + // If a cache entry requires validation, inject validation headers in the request and let it + // pass through as if no cache entry was found. + // If the cache entry was valid, the response status should be 304 (unmodified) and the cache + // entry will be injected in the response body. + lookup_result_ = std::make_unique(std::move(result)); + should_continue_decoding = filter_state_ == FilterState::WaitingForCacheLookup; + filter_state_ = FilterState::ValidatingCachedResponse; + injectValidationHeaders(request_headers); + break; case CacheEntryStatus::Unusable: - if (state_ == GetHeadersState::FinishedGetHeadersCall) { - // decodeHeader returned Http::FilterHeadersStatus::StopAllIterationAndWatermark--restart it - decoder_callbacks_->continueDecoding(); - } else { - // decodeHeader hasn't yet returned--tell it to return Http::FilterHeadersStatus::Continue. - state_ = GetHeadersState::GetHeadersResultUnusable; - } - return; + should_continue_decoding = filter_state_ == FilterState::WaitingForCacheLookup; + filter_state_ = FilterState::NoCachedResponseFound; + break; + case CacheEntryStatus::SatisfiableRange: // TODO(#10132): break response content to the ranges + // requested. case CacheEntryStatus::Ok: - response_has_trailers_ = result.has_trailers_; - const bool end_stream = (result.content_length_ == 0 && !response_has_trailers_); - // TODO(toddmgreer): Calculate age per https://httpwg.org/specs/rfc7234.html#age.calculations - result.headers_->addReferenceKey(Http::Headers::get().Age, 0); - decoder_callbacks_->encodeHeaders(std::move(result.headers_), end_stream); - if (end_stream) { - return; - } - if (result.content_length_ > 0) { - remaining_body_.emplace_back(0, result.content_length_); - getBody(); - } else { - lookup_->getTrailers( - [this](Http::ResponseTrailerMapPtr&& trailers) { onTrailers(std::move(trailers)); }); - } + lookup_result_ = std::make_unique(std::move(result)); + filter_state_ = FilterState::DecodeServingFromCache; + encodeCachedResponse(); + } + if (should_continue_decoding) { + // decodeHeaders returned StopIteration waiting for this callback -- continue decoding. + decoder_callbacks_->continueDecoding(); } -} - -void CacheFilter::getBody() { - ASSERT(!remaining_body_.empty(), "No reason to call getBody when there's no body to get."); - lookup_->getBody(remaining_body_[0], - [this](Buffer::InstancePtr&& body) { onBody(std::move(body)); }); } // TODO(toddmgreer): Handle downstream backpressure. void CacheFilter::onBody(Buffer::InstancePtr&& body) { + // Can be called during decoding if a valid cache hit is found, + // or during encoding if a cache entry was being validated. ASSERT(!remaining_body_.empty(), "CacheFilter doesn't call getBody unless there's more body to get, so this is a " "bogus callback."); @@ -147,23 +192,166 @@ void CacheFilter::onBody(Buffer::InstancePtr&& body) { remaining_body_.erase(remaining_body_.begin()); } else { ASSERT(false, "Received oversized body from cache."); - decoder_callbacks_->resetStream(); + filter_state_ == FilterState::DecodeServingFromCache ? decoder_callbacks_->resetStream() + : encoder_callbacks_->resetStream(); return; } const bool end_stream = remaining_body_.empty() && !response_has_trailers_; - decoder_callbacks_->encodeData(*body, end_stream); + + filter_state_ == FilterState::DecodeServingFromCache + ? decoder_callbacks_->encodeData(*body, end_stream) + : encoder_callbacks_->addEncodedData(*body, true); + if (!remaining_body_.empty()) { getBody(); } else if (response_has_trailers_) { - lookup_->getTrailers( - [this](Http::ResponseTrailerMapPtr&& trailers) { onTrailers(std::move(trailers)); }); + getTrailers(); + } else { + finalizeEncodingCachedResponse(); } } void CacheFilter::onTrailers(Http::ResponseTrailerMapPtr&& trailers) { - decoder_callbacks_->encodeTrailers(std::move(trailers)); + // Can be called during decoding if a valid cache hit is found, + // or during encoding if a cache entry was being validated. + if (filter_state_ == FilterState::DecodeServingFromCache) { + decoder_callbacks_->encodeTrailers(std::move(trailers)); + } else { + Http::ResponseTrailerMap& response_trailers = encoder_callbacks_->addEncodedTrailers(); + response_trailers = std::move(*trailers); + } + finalizeEncodingCachedResponse(); +} + +void CacheFilter::processSuccessfulValidation(Http::ResponseHeaderMap& response_headers) { + ASSERT(lookup_result_, "CacheFilter trying to validate a non-existent lookup result"); + ASSERT( + filter_state_ == FilterState::ValidatingCachedResponse, + "processSuccessfulValidation must only be called when a cached response is being validated"); + ASSERT(isResponseNotModified(response_headers), + "processSuccessfulValidation must only be called with 304 responses"); + + // Check whether the cached entry should be updated before modifying the 304 response. + const bool should_update_cached_entry = shouldUpdateCachedEntry(response_headers); + + // Update the 304 response status code and content-length. + response_headers.setStatus(lookup_result_->headers_->getStatusValue()); + response_headers.setContentLength(lookup_result_->headers_->getContentLengthValue()); + + // A cache entry was successfully validated -> encode cached body and trailers. + // encodeCachedResponse also adds the age header to lookup_result_ + // so it should be called before headers are merged. + encodeCachedResponse(); + + // Add any missing headers from the cached response to the 304 response. + lookup_result_->headers_->iterate([&response_headers](const Http::HeaderEntry& cached_header) { + // TODO(yosrym93): Try to avoid copying the header key twice. + Http::LowerCaseString key(std::string(cached_header.key().getStringView())); + absl::string_view value = cached_header.value().getStringView(); + if (!response_headers.get(key)) { + response_headers.setCopy(key, value); + } + return Http::HeaderMap::Iterate::Continue; + }); + + if (should_update_cached_entry) { + // TODO(yosrym93): else the cached entry should be deleted. + cache_.updateHeaders(*lookup_, response_headers); + } +} + +bool CacheFilter::shouldUpdateCachedEntry(const Http::ResponseHeaderMap& response_headers) const { + ASSERT(isResponseNotModified(response_headers), + "shouldUpdateCachedEntry must only be called with 304 responses"); + ASSERT(lookup_result_, "shouldUpdateCachedEntry precondition unsatisfied: lookup_result_ " + "does not point to a cache lookup result"); + ASSERT(filter_state_ == FilterState::ValidatingCachedResponse, + "shouldUpdateCachedEntry precondition unsatisfied: the " + "CacheFilter is not validating a cache lookup result"); + + // According to: https://httpwg.org/specs/rfc7234.html#freshening.responses, + // and assuming a single cached response per key: + // If the 304 response contains a strong validator (etag) that does not match the cached response, + // the cached response should not be updated. + const Http::HeaderEntry* response_etag = response_headers.get(Http::CustomHeaders::get().Etag); + const Http::HeaderEntry* cached_etag = + lookup_result_->headers_->get(Http::CustomHeaders::get().Etag); + return !response_etag || (cached_etag && cached_etag->value().getStringView() == + response_etag->value().getStringView()); +} + +void CacheFilter::injectValidationHeaders(Http::RequestHeaderMap& request_headers) { + ASSERT(lookup_result_, "injectValidationHeaders precondition unsatisfied: lookup_result_ " + "does not point to a cache lookup result"); + ASSERT(filter_state_ == FilterState::ValidatingCachedResponse, + "injectValidationHeaders precondition unsatisfied: the " + "CacheFilter is not validating a cache lookup result"); + + const Http::HeaderEntry* etag_header = + lookup_result_->headers_->get(Http::CustomHeaders::get().Etag); + const Http::HeaderEntry* last_modified_header = + lookup_result_->headers_->get(Http::CustomHeaders::get().LastModified); + + if (etag_header) { + absl::string_view etag = etag_header->value().getStringView(); + request_headers.setReferenceKey(Http::CustomHeaders::get().IfNoneMatch, etag); + } + if (CacheHeadersUtils::httpTime(last_modified_header) != SystemTime()) { + // Valid Last-Modified header exists. + absl::string_view last_modified = last_modified_header->value().getStringView(); + request_headers.setReferenceKey(Http::CustomHeaders::get().IfModifiedSince, last_modified); + } else { + // Either Last-Modified is missing or invalid, fallback to Date. + // A correct behaviour according to: + // https://httpwg.org/specs/rfc7232.html#header.if-modified-since + absl::string_view date = lookup_result_->headers_->getDateValue(); + request_headers.setReferenceKey(Http::CustomHeaders::get().IfModifiedSince, date); + } +} + +void CacheFilter::encodeCachedResponse() { + ASSERT(lookup_result_, "encodeCachedResponse precondition unsatisfied: lookup_result_ " + "does not point to a cache lookup result"); + + response_has_trailers_ = lookup_result_->has_trailers_; + const bool end_stream = (lookup_result_->content_length_ == 0 && !response_has_trailers_); + // TODO(toddmgreer): Calculate age per https://httpwg.org/specs/rfc7234.html#age.calculations + lookup_result_->headers_->addReferenceKey(Http::Headers::get().Age, 0); + + // Set appropriate response flags and codes. + Http::StreamFilterCallbacks* callbacks = + filter_state_ == FilterState::DecodeServingFromCache + ? static_cast(decoder_callbacks_) + : static_cast(encoder_callbacks_); + + callbacks->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::ResponseFromCacheFilter); + callbacks->streamInfo().setResponseCodeDetails( + CacheResponseCodeDetails::get().ResponseFromCacheFilter); + + // If the filter is encoding, 304 response headers and cached headers are merged in encodeHeaders. + // If the filter is decoding, we need to serve response headers from cache directly. + if (filter_state_ == FilterState::DecodeServingFromCache) { + decoder_callbacks_->encodeHeaders(std::move(lookup_result_->headers_), end_stream); + } + + if (lookup_result_->content_length_ > 0) { + remaining_body_.emplace_back(0, lookup_result_->content_length_); + getBody(); + } else if (response_has_trailers_) { + getTrailers(); + } } + +void CacheFilter::finalizeEncodingCachedResponse() { + if (filter_state_ == FilterState::WaitingForCacheBody) { + // encodeHeaders returned StopIteration waiting for finishing encoding the cached response -- + // continue encoding. + encoder_callbacks_->continueEncoding(); + } + filter_state_ = FilterState::ResponseServedFromCache; +} + } // namespace Cache } // namespace HttpFilters } // namespace Extensions diff --git a/source/extensions/filters/http/cache/cache_filter.h b/source/extensions/filters/http/cache/cache_filter.h index 212ff8728284f..f873569289e07 100644 --- a/source/extensions/filters/http/cache/cache_filter.h +++ b/source/extensions/filters/http/cache/cache_filter.h @@ -6,9 +6,11 @@ #include #include "envoy/extensions/filters/http/cache/v3alpha/cache.pb.h" +#include "envoy/http/header_map.h" #include "common/common/logger.h" +#include "extensions/filters/http/cache/cache_headers_utils.h" #include "extensions/filters/http/cache/http_cache.h" #include "extensions/filters/http/common/pass_through_filter.h" @@ -37,32 +39,86 @@ class CacheFilter : public Http::PassThroughFilter, Http::FilterDataStatus encodeData(Buffer::Instance& buffer, bool end_stream) override; private: + // Utility functions: make any necessary checks and call the corresponding lookup_ functions. void getBody(); - void onHeaders(LookupResult&& result); + void getTrailers(); + + // Callbacks for HttpCache to call when headers/body/trailers are ready. + void onHeaders(LookupResult&& result, Http::RequestHeaderMap& request_headers); void onBody(Buffer::InstancePtr&& body); void onTrailers(Http::ResponseTrailerMapPtr&& trailers); - // These don't require private access, but are members per envoy convention. - static bool isCacheableRequest(Http::RequestHeaderMap& headers); - static bool isCacheableResponse(Http::ResponseHeaderMap& headers); + // Precondition: lookup_result_ points to a cache lookup result that requires validation. + // filter_state_ is ValidatingCachedResponse. + // Serves a validated cached response after updating it with a 304 response. + void processSuccessfulValidation(Http::ResponseHeaderMap& response_headers); + + // Precondition: lookup_result_ points to a cache lookup result that requires validation. + // filter_state_ is ValidatingCachedResponse. + // Checks if a cached entry should be updated with a 304 response. + bool shouldUpdateCachedEntry(const Http::ResponseHeaderMap& response_headers) const; + + // Precondition: lookup_result_ points to a cache lookup result that requires validation. + // Should only be called during onHeaders as it modifies RequestHeaderMap. + // Adds required conditional headers for cache validation to the request headers + // according to the present cache lookup result headers. + void injectValidationHeaders(Http::RequestHeaderMap& request_headers); + + // Precondition: lookup_result_ points to a fresh or validated cache look up result. + // filter_state_ is ValidatingCachedResponse. + // Adds a cache lookup result to the response encoding stream. + // Can be called during decoding if a valid cache hit is found, + // or during encoding if a cache entry was validated successfully. + void encodeCachedResponse(); + + // Precondition: finished adding a response from cache to the response encoding stream. + // Updates filter_state_ and continues the encoding stream if necessary. + void finalizeEncodingCachedResponse(); TimeSource& time_source_; HttpCache& cache_; LookupContextPtr lookup_; InsertContextPtr insert_; + LookupResultPtr lookup_result_; - // Tracks what body bytes still need to be read from the cache. This is - // currently only one Range, but will expand when full range support is added. Initialized by - // onOkHeaders. + // Tracks what body bytes still need to be read from the cache. This is currently only one Range, + // but will expand when full range support is added. Initialized by encodeCachedResponse. std::vector remaining_body_; // True if the response has trailers. // TODO(toddmgreer): cache trailers. - bool response_has_trailers_; + bool response_has_trailers_ = false; + + // True if a request allows cache inserts according to: + // https://httpwg.org/specs/rfc7234.html#response.cacheability + bool request_allows_inserts_ = false; + + enum class FilterState { + Initial, + + // CacheFilter::decodeHeaders called lookup->getHeaders() but onHeaders was not called yet + // (lookup result not ready) -- the decoding stream should be stopped until the cache lookup + // result is ready. + WaitingForCacheLookup, + + // CacheFilter::encodeHeaders called encodeCachedResponse() but encoding the cached response is + // not finished yet -- the encoding stream should be stopped until it is finished. + WaitingForCacheBody, + + // Cache lookup did not find a cached response for this request. + NoCachedResponseFound, + + // Cache lookup found a cached response that requires validation. + ValidatingCachedResponse, + + // Cache lookup found a fresh cached response and it is being added to the encoding stream. + DecodeServingFromCache, - // Used for coordinating between decodeHeaders and onHeaders. - enum class GetHeadersState { Initial, FinishedGetHeadersCall, GetHeadersResultUnusable }; - GetHeadersState state_ = GetHeadersState::Initial; + // The cached response was successfully added to the encoding stream (either during decoding or + // encoding). + ResponseServedFromCache + }; + FilterState filter_state_ = FilterState::Initial; }; } // namespace Cache diff --git a/source/extensions/filters/http/cache/cache_headers_utils.cc b/source/extensions/filters/http/cache/cache_headers_utils.cc new file mode 100644 index 0000000000000..27d08bde0088d --- /dev/null +++ b/source/extensions/filters/http/cache/cache_headers_utils.cc @@ -0,0 +1,199 @@ +#include "extensions/filters/http/cache/cache_headers_utils.h" + +#include +#include + +#include "envoy/common/time.h" + +#include "absl/algorithm/container.h" +#include "absl/strings/ascii.h" +#include "absl/strings/numbers.h" +#include "absl/strings/str_split.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Cache { + +// Utility functions used in RequestCacheControl & ResponseCacheControl. +namespace { +// A directive with an invalid duration is ignored, the RFC does not specify a behavior: +// https://httpwg.org/specs/rfc7234.html#delta-seconds +OptionalDuration parseDuration(absl::string_view s) { + OptionalDuration duration; + // Strip quotation marks if any. + if (s.size() > 1 && s.front() == '"' && s.back() == '"') { + s = s.substr(1, s.size() - 2); + } + long num; + if (absl::SimpleAtoi(s, &num) && num >= 0) { + // s is a valid string of digits representing a positive number. + duration = std::chrono::seconds(num); + } + return duration; +} + +inline std::pair +separateDirectiveAndArgument(absl::string_view full_directive) { + return absl::StrSplit(absl::StripAsciiWhitespace(full_directive), absl::MaxSplits('=', 1)); +} +} // namespace + +// The grammar for This Cache-Control header value should be: +// Cache-Control = 1#cache-directive +// cache-directive = token [ "=" ( token / quoted-string ) ] +// token = 1*tchar +// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" +// / "-" / "." / "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA +// quoted-string = DQUOTE *( qdtext / quoted-pair ) DQUOTE +// qdtext = HTAB / SP /%x21 / %x23-5B / %x5D-7E / obs-text +// obs-text = %x80-FF +// quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text ) +// VCHAR = %x21-7E ; visible (printing) characters + +// Multiple directives are comma separated according to: +// https://httpwg.org/specs/rfc7234.html#collected.abnf + +RequestCacheControl::RequestCacheControl(absl::string_view cache_control_header) { + const std::vector directives = absl::StrSplit(cache_control_header, ','); + + for (auto full_directive : directives) { + absl::string_view directive, argument; + std::tie(directive, argument) = separateDirectiveAndArgument(full_directive); + + if (directive == "no-cache") { + must_validate_ = true; + } else if (directive == "no-store") { + no_store_ = true; + } else if (directive == "no-transform") { + no_transform_ = true; + } else if (directive == "only-if-cached") { + only_if_cached_ = true; + } else if (directive == "max-age") { + max_age_ = parseDuration(argument); + } else if (directive == "min-fresh") { + min_fresh_ = parseDuration(argument); + } else if (directive == "max-stale") { + max_stale_ = argument.empty() ? SystemTime::duration::max() : parseDuration(argument); + } + } +} + +ResponseCacheControl::ResponseCacheControl(absl::string_view cache_control_header) { + const std::vector directives = absl::StrSplit(cache_control_header, ','); + + for (auto full_directive : directives) { + absl::string_view directive, argument; + std::tie(directive, argument) = separateDirectiveAndArgument(full_directive); + + if (directive == "no-cache") { + // If no-cache directive has arguments they are ignored - not handled. + must_validate_ = true; + } else if (directive == "must-revalidate" || directive == "proxy-revalidate") { + no_stale_ = true; + } else if (directive == "no-store" || directive == "private") { + // If private directive has arguments they are ignored - not handled. + no_store_ = true; + } else if (directive == "no-transform") { + no_transform_ = true; + } else if (directive == "public") { + is_public_ = true; + } else if (directive == "s-maxage") { + max_age_ = parseDuration(argument); + } else if (!max_age_.has_value() && directive == "max-age") { + max_age_ = parseDuration(argument); + } + } +} + +std::ostream& operator<<(std::ostream& os, const OptionalDuration& duration) { + return duration.has_value() ? os << duration.value().count() : os << " "; +} + +std::ostream& operator<<(std::ostream& os, const RequestCacheControl& request_cache_control) { + return os << "{" + << "must_validate: " << request_cache_control.must_validate_ << ", " + << "no_store: " << request_cache_control.no_store_ << ", " + << "no_transform: " << request_cache_control.no_transform_ << ", " + << "only_if_cached: " << request_cache_control.only_if_cached_ << ", " + << "max_age: " << request_cache_control.max_age_ << ", " + << "min_fresh: " << request_cache_control.min_fresh_ << ", " + << "max_stale: " << request_cache_control.max_stale_ << "}"; +} + +std::ostream& operator<<(std::ostream& os, const ResponseCacheControl& response_cache_control) { + return os << "{" + << "must_validate: " << response_cache_control.must_validate_ << ", " + << "no_store: " << response_cache_control.no_store_ << ", " + << "no_transform: " << response_cache_control.no_transform_ << ", " + << "no_stale: " << response_cache_control.no_stale_ << ", " + << "public: " << response_cache_control.is_public_ << ", " + << "max_age: " << response_cache_control.max_age_ << "}"; +} + +bool operator==(const RequestCacheControl& lhs, const RequestCacheControl& rhs) { + return (lhs.must_validate_ == rhs.must_validate_) && (lhs.no_store_ == rhs.no_store_) && + (lhs.no_transform_ == rhs.no_transform_) && (lhs.only_if_cached_ == rhs.only_if_cached_) && + (lhs.max_age_ == rhs.max_age_) && (lhs.min_fresh_ == rhs.min_fresh_) && + (lhs.max_stale_ == rhs.max_stale_); +} + +bool operator==(const ResponseCacheControl& lhs, const ResponseCacheControl& rhs) { + return (lhs.must_validate_ == rhs.must_validate_) && (lhs.no_store_ == rhs.no_store_) && + (lhs.no_transform_ == rhs.no_transform_) && (lhs.no_stale_ == rhs.no_stale_) && + (lhs.is_public_ == rhs.is_public_) && (lhs.max_age_ == rhs.max_age_); +} + +SystemTime CacheHeadersUtils::httpTime(const Http::HeaderEntry* header_entry) { + if (!header_entry) { + return {}; + } + absl::Time time; + const std::string input(header_entry->value().getStringView()); + + // Acceptable Date/Time Formats per: + // https://tools.ietf.org/html/rfc7231#section-7.1.1.1 + // + // Sun, 06 Nov 1994 08:49:37 GMT ; IMF-fixdate. + // Sunday, 06-Nov-94 08:49:37 GMT ; obsolete RFC 850 format. + // Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format. + static const char* rfc7231_date_formats[] = {"%a, %d %b %Y %H:%M:%S GMT", + "%A, %d-%b-%y %H:%M:%S GMT", "%a %b %e %H:%M:%S %Y"}; + + for (const std::string& format : rfc7231_date_formats) { + if (absl::ParseTime(format, input, &time, nullptr)) { + return ToChronoTime(time); + } + } + return {}; +} + +absl::optional CacheHeadersUtils::readAndRemoveLeadingDigits(absl::string_view& str) { + uint64_t val = 0; + uint32_t bytes_consumed = 0; + + for (const char cur : str) { + if (!absl::ascii_isdigit(cur)) { + break; + } + uint64_t new_val = (val * 10) + (cur - '0'); + if (new_val / 8 < val) { + // Overflow occurred. + return absl::nullopt; + } + val = new_val; + ++bytes_consumed; + } + + if (bytes_consumed) { + // Consume some digits. + str.remove_prefix(bytes_consumed); + return val; + } + return absl::nullopt; +} + +} // namespace Cache +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/cache/cache_headers_utils.h b/source/extensions/filters/http/cache/cache_headers_utils.h new file mode 100644 index 0000000000000..8a185d88b40db --- /dev/null +++ b/source/extensions/filters/http/cache/cache_headers_utils.h @@ -0,0 +1,107 @@ +#pragma once + +#include "envoy/common/time.h" +#include "envoy/http/header_map.h" + +#include "absl/strings/string_view.h" +#include "absl/time/time.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Cache { + +using OptionalDuration = absl::optional; + +// According to: https://httpwg.org/specs/rfc7234.html#cache-request-directive +struct RequestCacheControl { + RequestCacheControl() = default; + explicit RequestCacheControl(absl::string_view cache_control_header); + + // must_validate is true if 'no-cache' directive is present + // A cached response must not be served without successful validation with the origin + bool must_validate_ = false; + + // The response to this request must not be cached (stored) + bool no_store_ = false; + + // 'no-transform' directive is not used now + // No transformations should be done to the response of this request, as defined by: + // https://httpwg.org/specs/rfc7230.html#message.transformations + bool no_transform_ = false; + + // 'only-if-cached' directive is not used now + // The request should be satisfied using a cached response, or respond with 504 (Gateway Error) + bool only_if_cached_ = false; + + // The client is unwilling to receive a cached response whose age exceeds the max-age + OptionalDuration max_age_; + + // The client is unwilling to received a cached response that satisfies: + // expiration_time - now < min-fresh + OptionalDuration min_fresh_; + + // The client is willing to receive a stale response that satisfies: + // now - expiration_time < max-stale + // If max-stale has no value then the client is willing to receive any stale response + OptionalDuration max_stale_; +}; + +// According to: https://httpwg.org/specs/rfc7234.html#cache-response-directive +struct ResponseCacheControl { + ResponseCacheControl() = default; + explicit ResponseCacheControl(absl::string_view cache_control_header); + + // must_validate is true if 'no-cache' directive is present; arguments are ignored for now + // This response must not be used to satisfy subsequent requests without successful validation + // with the origin + bool must_validate_ = false; + + // no_store is true if any of 'no-store' or 'private' directives is present. + // 'private' arguments are ignored for now so it is equivalent to 'no-store' + // This response must not be cached (stored) + bool no_store_ = false; + + // 'no-transform' directive is not used now + // No transformations should be done to this response , as defined by: + // https://httpwg.org/specs/rfc7230.html#message.transformations + bool no_transform_ = false; + + // no_stale is true if any of 'must-revalidate' or 'proxy-revalidate' directives is present + // This response must not be served stale without successful validation with the origin + bool no_stale_ = false; + + // 'public' directive is not used now + // This response may be stored, even if the response would normally be non-cacheable or cacheable + // only within a private cache, see: + // https://httpwg.org/specs/rfc7234.html#cache-response-directive.public + bool is_public_ = false; + + // max_age is set if to 's-maxage' if present, if not it is set to 'max-age' if present. + // Indicates the maximum time after which this response will be considered stale + OptionalDuration max_age_; +}; + +std::ostream& operator<<(std::ostream& os, const OptionalDuration& duration); +std::ostream& operator<<(std::ostream& os, const RequestCacheControl& request_cache_control); +std::ostream& operator<<(std::ostream& os, const ResponseCacheControl& response_cache_control); +bool operator==(const RequestCacheControl& lhs, const RequestCacheControl& rhs); +bool operator==(const ResponseCacheControl& lhs, const ResponseCacheControl& rhs); + +class CacheHeadersUtils { +public: + // Parses header_entry as an HTTP time. Returns SystemTime() if + // header_entry is null or malformed. + static SystemTime httpTime(const Http::HeaderEntry* header_entry); + + /** + * Read a leading positive decimal integer value and advance "*str" past the + * digits read. If overflow occurs, or no digits exist, return + * absl::nullopt without advancing "*str". + */ + static absl::optional readAndRemoveLeadingDigits(absl::string_view& str); +}; +} // namespace Cache +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/cache/cacheability_utils.cc b/source/extensions/filters/http/cache/cacheability_utils.cc new file mode 100644 index 0000000000000..778fd574a09dd --- /dev/null +++ b/source/extensions/filters/http/cache/cacheability_utils.cc @@ -0,0 +1,80 @@ +#include "extensions/filters/http/cache/cacheability_utils.h" + +#include "envoy/http/header_map.h" + +#include "common/common/macros.h" +#include "common/common/utility.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Cache { + +namespace { +const absl::flat_hash_set& cacheableStatusCodes() { + // As defined by: + // https://tools.ietf.org/html/rfc7231#section-6.1, + // https://tools.ietf.org/html/rfc7538#section-3, + // https://tools.ietf.org/html/rfc7725#section-3 + // TODO(yosrym93): the list of cacheable status codes should be configurable. + CONSTRUCT_ON_FIRST_USE(absl::flat_hash_set, "200", "203", "204", "206", "300", + "301", "308", "404", "405", "410", "414", "451", "501"); +} + +const std::vector& conditionalHeaders() { + // As defined by: https://httpwg.org/specs/rfc7232.html#preconditions. + CONSTRUCT_ON_FIRST_USE( + std::vector, &Http::CustomHeaders::get().IfMatch, + &Http::CustomHeaders::get().IfNoneMatch, &Http::CustomHeaders::get().IfModifiedSince, + &Http::CustomHeaders::get().IfUnmodifiedSince, &Http::CustomHeaders::get().IfRange); +} +} // namespace + +Http::RegisterCustomInlineHeader + authorization_handle(Http::CustomHeaders::get().Authorization); +Http::RegisterCustomInlineHeader + cache_control_handle(Http::CustomHeaders::get().CacheControl); + +bool CacheabilityUtils::isCacheableRequest(const Http::RequestHeaderMap& headers) { + const absl::string_view method = headers.getMethodValue(); + const absl::string_view forwarded_proto = headers.getForwardedProtoValue(); + const Http::HeaderValues& header_values = Http::Headers::get(); + + // Check if the request contains any conditional headers. + // For now, requests with conditional headers bypass the CacheFilter. + // This behavior does not cause any incorrect results, but may reduce the cache effectiveness. + // If needed to be handled properly refer to: + // https://httpwg.org/specs/rfc7234.html#validation.received + for (auto conditional_header : conditionalHeaders()) { + if (headers.get(*conditional_header)) { + return false; + } + } + + // TODO(toddmgreer): Also serve HEAD requests from cache. + // Cache-related headers are checked in HttpCache::LookupRequest. + return headers.Path() && headers.Host() && !headers.getInline(authorization_handle.handle()) && + (method == header_values.MethodValues.Get) && + (forwarded_proto == header_values.SchemeValues.Http || + forwarded_proto == header_values.SchemeValues.Https); +} + +bool CacheabilityUtils::isCacheableResponse(const Http::ResponseHeaderMap& headers) { + absl::string_view cache_control = headers.getInlineValue(cache_control_handle.handle()); + ResponseCacheControl response_cache_control(cache_control); + + // Only cache responses with explicit validation data, either: + // max-age or s-maxage cache-control directives with date header. + // expires header. + const bool has_validation_data = + (headers.Date() && response_cache_control.max_age_.has_value()) || + headers.get(Http::Headers::get().Expires); + + return !response_cache_control.no_store_ && + cacheableStatusCodes().contains((headers.getStatusValue())) && has_validation_data; +} + +} // namespace Cache +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/cache/cacheability_utils.h b/source/extensions/filters/http/cache/cacheability_utils.h new file mode 100644 index 0000000000000..752e4f3f11557 --- /dev/null +++ b/source/extensions/filters/http/cache/cacheability_utils.h @@ -0,0 +1,30 @@ +#pragma once + +#include "common/common/utility.h" +#include "common/http/headers.h" + +#include "extensions/filters/http/cache/cache_headers_utils.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Cache { +class CacheabilityUtils { +public: + // Checks if a request can be served from cache. + // This does not depend on cache-control headers as + // request cache-control headers only decide whether + // validation is required and whether the response can be cached. + static bool isCacheableRequest(const Http::RequestHeaderMap& headers); + + // Checks if a response can be stored in cache. + // Note that if a request is not cacheable according to 'isCacheableRequest' + // then its response is also not cacheable. + // Therefore, isCacheableRequest, isCacheableResponse and CacheFilter::request_allows_inserts_ + // together should cover https://httpwg.org/specs/rfc7234.html#response.cacheability. + static bool isCacheableResponse(const Http::ResponseHeaderMap& headers); +}; +} // namespace Cache +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/cache/http_cache.cc b/source/extensions/filters/http/cache/http_cache.cc index 7ae556d4891b2..60e73ff895494 100644 --- a/source/extensions/filters/http/cache/http_cache.cc +++ b/source/extensions/filters/http/cache/http_cache.cc @@ -2,14 +2,17 @@ #include #include +#include #include "envoy/http/codes.h" +#include "envoy/http/header_map.h" +#include "common/http/header_utility.h" #include "common/http/headers.h" #include "common/protobuf/utility.h" -#include "extensions/filters/http/cache/http_cache_utils.h" - +#include "absl/strings/str_split.h" +#include "absl/strings/string_view.h" #include "absl/time/time.h" namespace Envoy { @@ -17,6 +20,13 @@ namespace Extensions { namespace HttpFilters { namespace Cache { +Http::RegisterCustomInlineHeader + request_cache_control_handle(Http::CustomHeaders::get().CacheControl); +Http::RegisterCustomInlineHeader + response_cache_control_handle(Http::CustomHeaders::get().CacheControl); +Http::RegisterCustomInlineHeader + pragma_handler(Http::CustomHeaders::get().Pragma); + std::ostream& operator<<(std::ostream& os, CacheEntryStatus status) { switch (status) { case CacheEntryStatus::Ok: @@ -27,8 +37,10 @@ std::ostream& operator<<(std::ostream& os, CacheEntryStatus status) { return os << "RequiresValidation"; case CacheEntryStatus::FoundNotModified: return os << "FoundNotModified"; - case CacheEntryStatus::UnsatisfiableRange: - return os << "UnsatisfiableRange"; + case CacheEntryStatus::NotSatisfiableRange: + return os << "NotSatisfiableRange"; + case CacheEntryStatus::SatisfiableRange: + return os << "SatisfiableRange"; } NOT_REACHED_GCOVR_EXCL_LINE; } @@ -38,10 +50,7 @@ std::ostream& operator<<(std::ostream& os, const AdjustedByteRange& range) { } LookupRequest::LookupRequest(const Http::RequestHeaderMap& request_headers, SystemTime timestamp) - : timestamp_(timestamp), - request_cache_control_(request_headers.CacheControl() == nullptr - ? "" - : request_headers.CacheControl()->value().getStringView()) { + : timestamp_(timestamp) { // These ASSERTs check prerequisites. A request without these headers can't be looked up in cache; // CacheFilter doesn't create LookupRequests for such requests. ASSERT(request_headers.Path(), "Can't form cache lookup key for malformed Http::RequestHeaderMap " @@ -54,14 +63,21 @@ LookupRequest::LookupRequest(const Http::RequestHeaderMap& request_headers, Syst const Http::HeaderString& forwarded_proto = request_headers.ForwardedProto()->value(); const auto& scheme_values = Http::Headers::get().SchemeValues; ASSERT(forwarded_proto == scheme_values.Http || forwarded_proto == scheme_values.Https); + + initializeRequestCacheControl(request_headers); // TODO(toddmgreer): Let config determine whether to include forwarded_proto, host, and // query params. // TODO(toddmgreer): get cluster name. - // TODO(toddmgreer): Parse Range header into request_range_spec_, and handle the resultant - // vector in CacheFilter::onOkHeaders. + // TODO(toddmgreer): handle the resultant vector in CacheFilter::onOkHeaders. + // Range Requests are only valid for GET requests + if (request_headers.getMethodValue() == Http::Headers::get().MethodValues.Get) { + // TODO(cbdm): using a constant limit of 10 ranges, could make this into a parameter. + const int RangeSpecifierLimit = 10; + request_range_spec_ = RangeRequests::parseRanges(request_headers, RangeSpecifierLimit); + } key_.set_cluster_name("cluster_name_goes_here"); - key_.set_host(std::string(request_headers.Host()->value().getStringView())); - key_.set_path(std::string(request_headers.Path()->value().getStringView())); + key_.set_host(std::string(request_headers.getHostValue())); + key_.set_path(std::string(request_headers.getPathValue())); key_.set_clear_http(forwarded_proto == scheme_values.Http); } @@ -71,20 +87,72 @@ LookupRequest::LookupRequest(const Http::RequestHeaderMap& request_headers, Syst size_t stableHashKey(const Key& key) { return MessageUtil::hash(key); } size_t localHashKey(const Key& key) { return stableHashKey(key); } -// Returns true if response_headers is fresh. -bool LookupRequest::isFresh(const Http::ResponseHeaderMap& response_headers) const { - if (!response_headers.Date()) { - return false; +void LookupRequest::initializeRequestCacheControl(const Http::RequestHeaderMap& request_headers) { + const absl::string_view cache_control = + request_headers.getInlineValue(request_cache_control_handle.handle()); + const absl::string_view pragma = request_headers.getInlineValue(pragma_handler.handle()); + + if (!cache_control.empty()) { + request_cache_control_ = RequestCacheControl(cache_control); + } else { + // According to: https://httpwg.org/specs/rfc7234.html#header.pragma, + // when Cache-Control header is missing, "Pragma:no-cache" is equivalent to + // "Cache-Control:no-cache". Any other directives are ignored. + request_cache_control_.must_validate_ = RequestCacheControl(pragma).must_validate_; } - const Http::HeaderEntry* cache_control_header = response_headers.CacheControl(); - if (cache_control_header) { - const SystemTime::duration effective_max_age = - Utils::effectiveMaxAge(cache_control_header->value().getStringView()); - return timestamp_ - Utils::httpTime(response_headers.Date()) < effective_max_age; +} + +bool LookupRequest::requiresValidation(const Http::ResponseHeaderMap& response_headers) const { + // TODO(yosrym93): Store parsed response cache-control in cache instead of parsing it on every + // lookup. + const absl::string_view cache_control = + response_headers.getInlineValue(response_cache_control_handle.handle()); + const ResponseCacheControl response_cache_control(cache_control); + + const SystemTime response_time = CacheHeadersUtils::httpTime(response_headers.Date()); + + if (timestamp_ < response_time) { + // Response time is in the future, validate response. + return true; + } + + const SystemTime::duration response_age = timestamp_ - response_time; + const bool request_max_age_exceeded = request_cache_control_.max_age_.has_value() && + request_cache_control_.max_age_.value() < response_age; + if (response_cache_control.must_validate_ || request_cache_control_.must_validate_ || + request_max_age_exceeded) { + // Either the request or response explicitly require validation, or a request max-age + // requirement is not satisfied. + return true; + } + + // CacheabilityUtils::isCacheableResponse(..) guarantees that any cached response satisfies this. + // When date metadata injection for responses with no date + // is implemented, this ASSERT will need to be updated. + ASSERT((response_headers.Date() && response_cache_control.max_age_.has_value()) || + response_headers.get(Http::Headers::get().Expires), + "Cache entry does not have valid expiration data."); + + const SystemTime expiration_time = + response_cache_control.max_age_.has_value() + ? response_time + response_cache_control.max_age_.value() + : CacheHeadersUtils::httpTime(response_headers.get(Http::Headers::get().Expires)); + + if (timestamp_ > expiration_time) { + // Response is stale, requires validation if + // the response does not allow being served stale, + // or the request max-stale directive does not allow it. + const bool allowed_by_max_stale = + request_cache_control_.max_stale_.has_value() && + request_cache_control_.max_stale_.value() > timestamp_ - expiration_time; + return response_cache_control.no_stale_ || !allowed_by_max_stale; + } else { + // Response is fresh, requires validation only if there is an unsatisfied min-fresh requirement. + const bool min_fresh_unsatisfied = + request_cache_control_.min_fresh_.has_value() && + request_cache_control_.min_fresh_.value() > expiration_time - timestamp_; + return min_fresh_unsatisfied; } - // We didn't find a cache-control header with enough info to determine - // freshness, so fall back to the expires header. - return timestamp_ <= Utils::httpTime(response_headers.get(Http::Headers::get().Expires)); } LookupResult LookupRequest::makeLookupResult(Http::ResponseHeaderMapPtr&& response_headers, @@ -92,12 +160,16 @@ LookupResult LookupRequest::makeLookupResult(Http::ResponseHeaderMapPtr&& respon // TODO(toddmgreer): Implement all HTTP caching semantics. ASSERT(response_headers); LookupResult result; - result.cache_entry_status_ = - isFresh(*response_headers) ? CacheEntryStatus::Ok : CacheEntryStatus::RequiresValidation; + result.cache_entry_status_ = requiresValidation(*response_headers) + ? CacheEntryStatus::RequiresValidation + : CacheEntryStatus::Ok; result.headers_ = std::move(response_headers); result.content_length_ = content_length; if (!adjustByteRangeSet(result.response_ranges_, request_range_spec_, content_length)) { result.headers_->setStatus(static_cast(Http::Code::RangeNotSatisfiable)); + result.cache_entry_status_ = CacheEntryStatus::NotSatisfiableRange; + } else if (!result.response_ranges_.empty()) { + result.cache_entry_status_ = CacheEntryStatus::SatisfiableRange; } result.has_trailers_ = false; return result; @@ -118,7 +190,7 @@ bool adjustByteRangeSet(std::vector& response_ranges, for (const RawByteRange& spec : request_range_spec) { if (spec.isSuffix()) { - // spec is a suffix-byte-range-spec + // spec is a suffix-byte-range-spec. if (spec.suffixLength() == 0) { // This range is unsatisfiable, so skip it. continue; @@ -155,6 +227,90 @@ bool adjustByteRangeSet(std::vector& response_ranges, } return true; } + +std::vector RangeRequests::parseRanges(const Http::RequestHeaderMap& request_headers, + uint64_t max_byte_range_specs) { + // Makes sure we have a GET request, as Range headers are only valid with this type of request. + const absl::string_view method = request_headers.getMethodValue(); + ASSERT(method == Http::Headers::get().MethodValues.Get); + + // Multiple instances of range headers are invalid. + // https://tools.ietf.org/html/rfc7230#section-3.2.2 + std::vector range_headers; + Http::HeaderUtility::getAllOfHeader(request_headers, Http::Headers::get().Range.get(), + range_headers); + + absl::string_view header_value; + if (range_headers.size() == 1) { + header_value = range_headers.front(); + } else { + if (range_headers.size() > 1) { + ENVOY_LOG(debug, "Multiple range headers provided in request. Ignoring all range headers."); + } + return {}; + } + + if (!absl::ConsumePrefix(&header_value, "bytes=")) { + ENVOY_LOG(debug, "Invalid range header. range-unit not correctly specified, only 'bytes' " + "supported. Ignoring range header."); + return {}; + } + + std::vector ranges = + absl::StrSplit(header_value, absl::MaxSplits(',', max_byte_range_specs)); + if (ranges.size() > max_byte_range_specs) { + ENVOY_LOG(debug, + "There are more ranges than allowed by the byte range parse limit ({}). Ignoring " + "range header.", + max_byte_range_specs); + return {}; + } + + std::vector parsed_ranges; + for (absl::string_view cur_range : ranges) { + absl::optional first = CacheHeadersUtils::readAndRemoveLeadingDigits(cur_range); + + if (!absl::ConsumePrefix(&cur_range, "-")) { + ENVOY_LOG(debug, + "Invalid format for range header: missing range-end. Ignoring range header."); + return {}; + } + + absl::optional last = CacheHeadersUtils::readAndRemoveLeadingDigits(cur_range); + + if (!cur_range.empty()) { + ENVOY_LOG(debug, + "Unexpected characters after byte range in range header. Ignoring range header."); + return {}; + } + + if (!first && !last) { + ENVOY_LOG(debug, "Invalid format for range header: missing first-byte-pos AND last-byte-pos; " + "at least one of them is required. Ignoring range header."); + return {}; + } + + // Handle suffix range (e.g., -123). + if (!first) { + first = std::numeric_limits::max(); + } + + // Handle optional range-end (e.g., 123-). + if (!last) { + last = std::numeric_limits::max(); + } + + if (first != std::numeric_limits::max() && first > last) { + ENVOY_LOG(debug, "Invalid format for range header: range-start and range-end out of order. " + "Ignoring range header."); + return {}; + } + + parsed_ranges.push_back(RawByteRange(first.value(), last.value())); + } + + return parsed_ranges; +} } // namespace Cache } // namespace HttpFilters } // namespace Extensions diff --git a/source/extensions/filters/http/cache/http_cache.h b/source/extensions/filters/http/cache/http_cache.h index 05e07a84fd7cb..907a8d02be96f 100644 --- a/source/extensions/filters/http/cache/http_cache.h +++ b/source/extensions/filters/http/cache/http_cache.h @@ -11,9 +11,12 @@ #include "envoy/http/header_map.h" #include "common/common/assert.h" +#include "common/common/logger.h" #include "source/extensions/filters/http/cache/key.pb.h" +#include "extensions/filters/http/cache/cache_headers_utils.h" + #include "absl/strings/string_view.h" namespace Envoy { @@ -32,8 +35,10 @@ enum class CacheEntryStatus { // This entry is fresh, and an appropriate basis for a 304 Not Modified // response. FoundNotModified, - // This entry is fresh, but can't satisfy the requested range(s). - UnsatisfiableRange, + // This entry is fresh, but cannot satisfy the requested range(s). + NotSatisfiableRange, + // This entry is fresh, and can satisfy the requested range(s). + SatisfiableRange, }; std::ostream& operator<<(std::ostream& os, CacheEntryStatus status); @@ -69,6 +74,16 @@ class RawByteRange { const uint64_t last_byte_pos_; }; +class RangeRequests : Logger::Loggable { +public: + // Parses the ranges from the request headers into a vector. + // max_byte_range_specs defines how many byte ranges can be parsed from the header value. + // If there is no range header, multiple range headers, the header value is malformed, or there + // are more ranges than max_byte_range_specs, returns an empty vector. + static std::vector parseRanges(const Http::RequestHeaderMap& request_headers, + uint64_t max_byte_range_specs); +}; + // Byte range from an HTTP request, adjusted for a known response body size, and converted from an // HTTP-style closed interval to a C++ style half-open interval. class AdjustedByteRange { @@ -136,6 +151,7 @@ struct LookupResult { // True if the cached response has trailers. bool has_trailers_ = false; }; +using LookupResultPtr = std::unique_ptr; // Produces a hash of key that is consistent across restarts, architectures, // builds, and configurations. Caches that store persistent entries based on a @@ -162,21 +178,11 @@ class LookupRequest { // Prereq: request_headers's Path(), Scheme(), and Host() are non-null. LookupRequest(const Http::RequestHeaderMap& request_headers, SystemTime timestamp); + const RequestCacheControl& requestCacheControl() const { return request_cache_control_; } + // Caches may modify the key according to local needs, though care must be // taken to ensure that meaningfully distinct responses have distinct keys. const Key& key() const { return key_; } - Key& key() { return key_; } - - // Returns the subset of this request's headers that are listed in - // envoy::extensions::filters::http::cache::v3alpha::CacheConfig::allowed_vary_headers. If a cache - // storage implementation forwards lookup requests to a remote cache server that supports *vary* - // headers, that server may need to see these headers. For local implementations, it may be - // simpler to instead call makeLookupResult with each potential response. - HeaderVector& vary_headers() { return vary_headers_; } - const HeaderVector& vary_headers() const { return vary_headers_; } - - // Time when this LookupRequest was created (in response to an HTTP request). - SystemTime timestamp() const { return timestamp_; } // WARNING: Incomplete--do not use in production (yet). // Returns a LookupResult suitable for sending to the cache filter's @@ -191,13 +197,21 @@ class LookupRequest { uint64_t content_length) const; private: - bool isFresh(const Http::ResponseHeaderMap& response_headers) const; + void initializeRequestCacheControl(const Http::RequestHeaderMap& request_headers); + bool requiresValidation(const Http::ResponseHeaderMap& response_headers) const; Key key_; std::vector request_range_spec_; + // Time when this LookupRequest was created (in response to an HTTP request). SystemTime timestamp_; + // The subset of this request's headers that are listed in + // envoy::extensions::filters::http::cache::v3alpha::CacheConfig::allowed_vary_headers. If a cache + // storage implementation forwards lookup requests to a remote cache server that supports *vary* + // headers, that server may need to see these headers. For local implementations, it may be + // simpler to instead call makeLookupResult with each potential response. HeaderVector vary_headers_; - const std::string request_cache_control_; + + RequestCacheControl request_cache_control_; }; // Statically known information about a cache. @@ -220,7 +234,7 @@ class InsertContext { // The insertion is streamed into the cache in chunks whose size is determined // by the client, but with a pace determined by the cache. To avoid streaming // data into cache too fast for the cache to handle, clients should wait for - // the cache to call ready_for_next_chunk() before streaming the next chunk. + // the cache to call readyForNextChunk() before streaming the next chunk. // // The client can abort the streaming insertion by dropping the // InsertContextPtr. A cache can abort the insertion by passing 'false' into @@ -292,8 +306,8 @@ class HttpCache { // // This is called when an expired cache entry is successfully validated, to // update the cache entry. - virtual void updateHeaders(LookupContextPtr&& lookup_context, - Http::ResponseHeaderMapPtr&& response_headers) PURE; + virtual void updateHeaders(const LookupContext& lookup_context, + const Http::ResponseHeaderMap& response_headers) PURE; // Returns statically known information about a cache. virtual CacheInfo cacheInfo() const PURE; diff --git a/source/extensions/filters/http/cache/http_cache_utils.cc b/source/extensions/filters/http/cache/http_cache_utils.cc deleted file mode 100644 index 14d3f67d97099..0000000000000 --- a/source/extensions/filters/http/cache/http_cache_utils.cc +++ /dev/null @@ -1,188 +0,0 @@ -#include "extensions/filters/http/cache/http_cache_utils.h" - -#include -#include - -#include "absl/algorithm/container.h" -#include "absl/strings/ascii.h" -#include "absl/strings/numbers.h" -#include "absl/strings/strip.h" - -namespace Envoy { -namespace Extensions { -namespace HttpFilters { -namespace Cache { - -// True for characters defined as tchars by -// https://tools.ietf.org/html/rfc7230#section-3.2.6 -// -// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" -// / "-" / "." / "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA -bool Utils::tchar(char c) { - switch (c) { - case '!': - case '#': - case '$': - case '%': - case '&': - case '*': - case '+': - case '-': - case '.': - case '^': - case '_': - case '`': - case '|': - case '~': - return true; - } - return absl::ascii_isalnum(c); -} - -// Removes an initial HTTP header field value token, as defined by -// https://tools.ietf.org/html/rfc7230#section-3.2.6. Returns true if an initial -// token was present. -// -// token = 1*tchar -bool Utils::eatToken(absl::string_view& s) { - const absl::string_view::iterator token_end = absl::c_find_if_not(s, &tchar); - if (token_end == s.begin()) { - return false; - } - s.remove_prefix(token_end - s.begin()); - return true; -} - -// Removes an initial token or quoted-string (if present), as defined by -// https://tools.ietf.org/html/rfc7234#section-5.2. If a cache-control directive -// has an argument (as indicated by '='), it should be in this form. -// -// quoted-string = DQUOTE *( qdtext / quoted-pair ) DQUOTE -// qdtext = HTAB / SP /%x21 / %x23-5B / %x5D-7E / obs-text -// obs-text = %x80-FF -// quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text ) -// VCHAR = %x21-7E ; visible (printing) characters -// -// For example, the directive "my-extension=42" has an argument of "42", so an -// input of "public, my-extension=42, max-age=999" -void Utils::eatDirectiveArgument(absl::string_view& s) { - if (s.empty()) { - return; - } - if (s.front() == '"') { - // TODO(#9833): handle \-escaped quotes - const size_t closing_quote = s.find('"', 1); - s.remove_prefix(closing_quote); - } else { - eatToken(s); - } -} - -// If s is non-null and begins with a decimal number ([0-9]+), removes it from -// the input and returns a SystemTime::duration representing that many seconds. -// If s is null or doesn't begin with digits, returns -// SystemTime::duration::zero(). If parsing overflows, returns -// SystemTime::duration::max(). -SystemTime::duration Utils::eatLeadingDuration(absl::string_view& s) { - const absl::string_view::iterator digits_end = absl::c_find_if_not(s, &absl::ascii_isdigit); - const size_t digits_length = digits_end - s.begin(); - if (digits_length == 0) { - return SystemTime::duration::zero(); - } - const absl::string_view digits(s.begin(), digits_length); - s.remove_prefix(digits_length); - uint64_t num; - return absl::SimpleAtoi(digits, &num) ? std::chrono::seconds(num) : SystemTime::duration::max(); -} - -// Returns the effective max-age represented by cache-control. If the result is -// SystemTime::duration::zero(), or is less than the response's, the response -// should be validated. -// -// TODO(#9833): Write a CacheControl class to fully parse the cache-control -// header value. Consider sharing with the gzip filter. -SystemTime::duration Utils::effectiveMaxAge(absl::string_view cache_control) { - // The grammar for This Cache-Control header value should be: - // Cache-Control = 1#cache-directive - // cache-directive = token [ "=" ( token / quoted-string ) ] - // token = 1*tchar - // tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" - // / "-" / "." / "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA - // quoted-string = DQUOTE *( qdtext / quoted-pair ) DQUOTE - // qdtext = HTAB / SP /%x21 / %x23-5B / %x5D-7E / obs-text - // obs-text = %x80-FF - // quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text ) - // VCHAR = %x21-7E ; visible (printing) characters - SystemTime::duration max_age = SystemTime::duration::zero(); - bool found_s_maxage = false; - while (!cache_control.empty()) { - // Each time through the loop, we eat one cache-directive. Each branch - // either returns or completely eats a cache-directive. - if (absl::ConsumePrefix(&cache_control, "no-cache")) { - if (eatToken(cache_control)) { - // The token wasn't no-cache; it just started that way, so we must - // finish eating this cache-directive. - if (absl::ConsumePrefix(&cache_control, "=")) { - eatDirectiveArgument(cache_control); - } - } else { - // Found a no-cache directive, so validation is required. - return SystemTime::duration::zero(); - } - } else if (absl::ConsumePrefix(&cache_control, "s-maxage=")) { - max_age = eatLeadingDuration(cache_control); - found_s_maxage = true; - cache_control = absl::StripLeadingAsciiWhitespace(cache_control); - if (!cache_control.empty() && cache_control[0] != ',') { - // Unexpected text at end of directive - return SystemTime::duration::zero(); - } - } else if (!found_s_maxage && absl::ConsumePrefix(&cache_control, "max-age=")) { - max_age = eatLeadingDuration(cache_control); - if (!cache_control.empty() && cache_control[0] != ',') { - // Unexpected text at end of directive - return SystemTime::duration::zero(); - } - } else if (eatToken(cache_control)) { - // Unknown directive--ignore. - if (absl::ConsumePrefix(&cache_control, "=")) { - eatDirectiveArgument(cache_control); - } - } else { - // This directive starts with illegal characters. Require validation. - return SystemTime::duration::zero(); - } - // Whichever branch we took should have consumed the entire cache-directive, - // so we just need to eat the delimiter and optional whitespace. - absl::ConsumePrefix(&cache_control, ","); - cache_control = absl::StripLeadingAsciiWhitespace(cache_control); - } - return max_age; -} - -SystemTime Utils::httpTime(const Http::HeaderEntry* header_entry) { - if (!header_entry) { - return {}; - } - absl::Time time; - const std::string input(header_entry->value().getStringView()); - - // Acceptable Date/Time Formats per - // https://tools.ietf.org/html/rfc7231#section-7.1.1.1 - // - // Sun, 06 Nov 1994 08:49:37 GMT ; IMF-fixdate - // Sunday, 06-Nov-94 08:49:37 GMT ; obsolete RFC 850 format - // Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format - static const auto& rfc7231_date_formats = *new std::array{ - "%a, %d %b %Y %H:%M:%S GMT", "%A, %d-%b-%y %H:%M:%S GMT", "%a %b %e %H:%M:%S %Y"}; - for (const std::string& format : rfc7231_date_formats) { - if (absl::ParseTime(format, input, &time, nullptr)) { - return ToChronoTime(time); - } - } - return {}; -} -} // namespace Cache -} // namespace HttpFilters -} // namespace Extensions -} // namespace Envoy diff --git a/source/extensions/filters/http/cache/http_cache_utils.h b/source/extensions/filters/http/cache/http_cache_utils.h deleted file mode 100644 index d62599b8f5bb1..0000000000000 --- a/source/extensions/filters/http/cache/http_cache_utils.h +++ /dev/null @@ -1,34 +0,0 @@ -#pragma once - -#include "envoy/common/time.h" -#include "envoy/http/header_map.h" - -#include "absl/strings/string_view.h" -#include "absl/time/time.h" - -namespace Envoy { -namespace Extensions { -namespace HttpFilters { -namespace Cache { -class Utils { -public: - // Parses and returns max-age or s-maxage (with s-maxage taking precedence), - // parsed into a SystemTime::Duration. Returns SystemTime::Duration::zero if - // neither is present, or there is a no-cache directive, or if max-age or - // s-maxage is malformed. - static SystemTime::duration effectiveMaxAge(absl::string_view cache_control); - - // Parses header_entry as an HTTP time. Returns SystemTime() if - // header_entry is null or malformed. - static SystemTime httpTime(const Http::HeaderEntry* header_entry); - -private: - static bool tchar(char c); - static bool eatToken(absl::string_view& s); - static void eatDirectiveArgument(absl::string_view& s); - static SystemTime::duration eatLeadingDuration(absl::string_view& s); -}; -} // namespace Cache -} // namespace HttpFilters -} // namespace Extensions -} // namespace Envoy diff --git a/source/extensions/filters/http/cache/simple_http_cache/BUILD b/source/extensions/filters/http/cache/simple_http_cache/BUILD index 6f569711d5dc9..f9484060aa97d 100644 --- a/source/extensions/filters/http/cache/simple_http_cache/BUILD +++ b/source/extensions/filters/http/cache/simple_http_cache/BUILD @@ -1,15 +1,15 @@ -licenses(["notice"]) # Apache 2 - -## WIP: Simple in-memory cache storage plugin. Not ready for deployment. - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", - "envoy_package", + "envoy_extension_package", "envoy_proto_library", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +## WIP: Simple in-memory cache storage plugin. Not ready for deployment. + +envoy_extension_package() envoy_cc_extension( name = "simple_http_cache_lib", diff --git a/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.cc b/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.cc index 5eadaa6a36922..ab2707c450c0f 100644 --- a/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.cc +++ b/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.cc @@ -92,12 +92,10 @@ LookupContextPtr SimpleHttpCache::makeLookupContext(LookupRequest&& request) { return std::make_unique(*this, std::move(request)); } -void SimpleHttpCache::updateHeaders(LookupContextPtr&& lookup_context, - Http::ResponseHeaderMapPtr&& response_headers) { - ASSERT(lookup_context); - ASSERT(response_headers); +void SimpleHttpCache::updateHeaders(const LookupContext&, const Http::ResponseHeaderMap&) { // TODO(toddmgreer): Support updating headers. - NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + // Not implemented yet, however this is called during tests + // NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } SimpleHttpCache::Entry SimpleHttpCache::lookup(const LookupRequest& request) { diff --git a/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.h b/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.h index ba3851142874c..0223d6bd34b98 100644 --- a/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.h +++ b/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.h @@ -25,15 +25,15 @@ class SimpleHttpCache : public HttpCache { // HttpCache LookupContextPtr makeLookupContext(LookupRequest&& request) override; InsertContextPtr makeInsertContext(LookupContextPtr&& lookup_context) override; - void updateHeaders(LookupContextPtr&& lookup_context, - Http::ResponseHeaderMapPtr&& response_headers) override; + void updateHeaders(const LookupContext& lookup_context, + const Http::ResponseHeaderMap& response_headers) override; CacheInfo cacheInfo() const override; Entry lookup(const LookupRequest& request); void insert(const Key& key, Http::ResponseHeaderMapPtr&& response_headers, std::string&& body); absl::Mutex mutex_; - absl::flat_hash_map map_ GUARDED_BY(mutex_); + absl::flat_hash_map map_ ABSL_GUARDED_BY(mutex_); }; } // namespace Cache diff --git a/source/extensions/filters/http/common/BUILD b/source/extensions/filters/http/common/BUILD index b5d2b2a030e35..bbafc6cc659a9 100644 --- a/source/extensions/filters/http/common/BUILD +++ b/source/extensions/filters/http/common/BUILD @@ -1,16 +1,18 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "pass_through_filter_lib", hdrs = ["pass_through_filter.h"], + # A thin shim used by test and prod filters. + visibility = ["//visibility:public"], deps = [ "//include/envoy/server:filter_config_interface", ], @@ -41,6 +43,11 @@ envoy_cc_library( envoy_cc_library( name = "utility_lib", hdrs = ["utility.h"], + # Used by the router filter. TODO(#9953) clean up. + visibility = [ + "//source:__subpackages__", + "//test:__subpackages__", + ], deps = [ "//include/envoy/runtime:runtime_interface", "//source/common/common:macros", diff --git a/source/extensions/filters/http/common/compressor/BUILD b/source/extensions/filters/http/common/compressor/BUILD index 55e6a87aa2c72..a1c67b984a5eb 100644 --- a/source/extensions/filters/http/common/compressor/BUILD +++ b/source/extensions/filters/http/common/compressor/BUILD @@ -1,19 +1,20 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() +# TODO(rojkov): move this library to source/extensions/filters/http/compressor/. envoy_cc_library( name = "compressor_lib", srcs = ["compressor.cc"], hdrs = ["compressor.h"], deps = [ - "//include/envoy/compressor:compressor_interface", + "//include/envoy/compression/compressor:compressor_interface", "//include/envoy/stats:stats_macros", "//include/envoy/stream_info:filter_state_interface", "//source/common/buffer:buffer_lib", diff --git a/source/extensions/filters/http/common/compressor/compressor.cc b/source/extensions/filters/http/common/compressor/compressor.cc index 7c6ac05893b36..4e0a1b48ce9be 100644 --- a/source/extensions/filters/http/common/compressor/compressor.cc +++ b/source/extensions/filters/http/common/compressor/compressor.cc @@ -11,6 +11,17 @@ namespace Compressors { namespace { +Http::RegisterCustomInlineHeader + accept_encoding_handle(Http::CustomHeaders::get().AcceptEncoding); +Http::RegisterCustomInlineHeader + cache_control_handle(Http::CustomHeaders::get().CacheControl); +Http::RegisterCustomInlineHeader + content_encoding_handle(Http::CustomHeaders::get().ContentEncoding); +Http::RegisterCustomInlineHeader + etag_handle(Http::CustomHeaders::get().Etag); +Http::RegisterCustomInlineHeader + vary_handle(Http::CustomHeaders::get().Vary); + // Default minimum length of an upstream response that allows compression. const uint64_t DefaultMinimumContentLength = 30; @@ -61,7 +72,7 @@ CompressorFilter::CompressorFilter(const CompressorFilterConfigSharedPtr config) : skip_compression_{true}, config_(std::move(config)) {} Http::FilterHeadersStatus CompressorFilter::decodeHeaders(Http::RequestHeaderMap& headers, bool) { - const Http::HeaderEntry* accept_encoding = headers.AcceptEncoding(); + const Http::HeaderEntry* accept_encoding = headers.getInline(accept_encoding_handle.handle()); if (accept_encoding != nullptr) { // Capture the value of the "Accept-Encoding" request header to use it later when making // decision on compressing the corresponding HTTP response. @@ -69,7 +80,7 @@ Http::FilterHeadersStatus CompressorFilter::decodeHeaders(Http::RequestHeaderMap } if (config_->enabled() && config_->removeAcceptEncodingHeader()) { - headers.removeAcceptEncoding(); + headers.removeInline(accept_encoding_handle.handle()); } return Http::FilterHeadersStatus::Continue; @@ -100,28 +111,39 @@ void CompressorFilter::setDecoderFilterCallbacks(Http::StreamDecoderFilterCallba Http::FilterHeadersStatus CompressorFilter::encodeHeaders(Http::ResponseHeaderMap& headers, bool end_stream) { - if (!end_stream && config_->enabled() && isMinimumContentLength(headers) && - isAcceptEncodingAllowed(headers) && isContentTypeAllowed(headers) && - !hasCacheControlNoTransform(headers) && isEtagAllowed(headers) && - isTransferEncodingAllowed(headers) && !headers.ContentEncoding()) { + const bool isEnabledAndContentLengthBigEnough = + config_->enabled() && isMinimumContentLength(headers); + const bool isCompressible = isEnabledAndContentLengthBigEnough && isContentTypeAllowed(headers) && + !hasCacheControlNoTransform(headers) && isEtagAllowed(headers) && + !headers.getInline(content_encoding_handle.handle()); + if (!end_stream && isEnabledAndContentLengthBigEnough && isAcceptEncodingAllowed(headers) && + isCompressible && isTransferEncodingAllowed(headers)) { skip_compression_ = false; sanitizeEtagHeader(headers); - insertVaryHeader(headers); headers.removeContentLength(); - headers.setContentEncoding(config_->contentEncoding()); + headers.setInline(content_encoding_handle.handle(), config_->contentEncoding()); config_->stats().compressed_.inc(); // Finally instantiate the compressor. compressor_ = config_->makeCompressor(); } else { config_->stats().not_compressed_.inc(); } + + // Even if we decided not to compress due to incompatible Accept-Encoding value, + // the Vary header would need to be inserted to let a caching proxy in front of Envoy + // know that the requested resource still can be served with compression applied. + if (isCompressible) { + insertVaryHeader(headers); + } + return Http::FilterHeadersStatus::Continue; } Http::FilterDataStatus CompressorFilter::encodeData(Buffer::Instance& data, bool end_stream) { if (!skip_compression_) { config_->stats().total_uncompressed_bytes_.add(data.length()); - compressor_->compress(data, end_stream ? Compressor::State::Finish : Compressor::State::Flush); + compressor_->compress(data, end_stream ? Envoy::Compression::Compressor::State::Finish + : Envoy::Compression::Compressor::State::Flush); config_->stats().total_compressed_bytes_.add(data.length()); } return Http::FilterDataStatus::Continue; @@ -130,7 +152,7 @@ Http::FilterDataStatus CompressorFilter::encodeData(Buffer::Instance& data, bool Http::FilterTrailersStatus CompressorFilter::encodeTrailers(Http::ResponseTrailerMap&) { if (!skip_compression_) { Buffer::OwnedImpl empty_buffer; - compressor_->compress(empty_buffer, Compressor::State::Finish); + compressor_->compress(empty_buffer, Envoy::Compression::Compressor::State::Finish); config_->stats().total_compressed_bytes_.add(empty_buffer.length()); encoder_callbacks_->addEncodedData(empty_buffer, true); } @@ -138,10 +160,10 @@ Http::FilterTrailersStatus CompressorFilter::encodeTrailers(Http::ResponseTraile } bool CompressorFilter::hasCacheControlNoTransform(Http::ResponseHeaderMap& headers) const { - const Http::HeaderEntry* cache_control = headers.CacheControl(); + const Http::HeaderEntry* cache_control = headers.getInline(cache_control_handle.handle()); if (cache_control) { return StringUtil::caseFindToken(cache_control->value().getStringView(), ",", - Http::Headers::get().CacheControlValues.NoTransform); + Http::CustomHeaders::get().CacheControlValues.NoTransform); } return false; @@ -235,18 +257,18 @@ CompressorFilter::chooseEncoding(const Http::ResponseHeaderMap& headers) const { // If there's no intersection between accepted encodings and the ones provided by the allowed // compressors, then only the "identity" encoding is acceptable. return std::make_unique( - Http::Headers::get().AcceptEncodingValues.Identity, + Http::CustomHeaders::get().AcceptEncodingValues.Identity, CompressorFilter::EncodingDecision::HeaderStat::NotValid); } // Find intersection of encodings accepted by the user agent and provided // by the allowed compressors and choose the one with the highest q-value. - EncPair choice{Http::Headers::get().AcceptEncodingValues.Identity, static_cast(0)}; + EncPair choice{Http::CustomHeaders::get().AcceptEncodingValues.Identity, static_cast(0)}; for (const auto& pair : pairs) { if ((pair.second > choice.second) && (allowed_compressors.count(std::string(pair.first)) || - pair.first == Http::Headers::get().AcceptEncodingValues.Identity || - pair.first == Http::Headers::get().AcceptEncodingValues.Wildcard)) { + pair.first == Http::CustomHeaders::get().AcceptEncodingValues.Identity || + pair.first == Http::CustomHeaders::get().AcceptEncodingValues.Wildcard)) { choice = pair; } } @@ -254,19 +276,19 @@ CompressorFilter::chooseEncoding(const Http::ResponseHeaderMap& headers) const { if (!choice.second) { // The value of "Accept-Encoding" must be invalid as we ended up with zero q-value. return std::make_unique( - Http::Headers::get().AcceptEncodingValues.Identity, + Http::CustomHeaders::get().AcceptEncodingValues.Identity, CompressorFilter::EncodingDecision::HeaderStat::NotValid); } // The "identity" encoding (no compression) is always available. - if (choice.first == Http::Headers::get().AcceptEncodingValues.Identity) { + if (choice.first == Http::CustomHeaders::get().AcceptEncodingValues.Identity) { return std::make_unique( - Http::Headers::get().AcceptEncodingValues.Identity, + Http::CustomHeaders::get().AcceptEncodingValues.Identity, CompressorFilter::EncodingDecision::HeaderStat::Identity); } // If wildcard is given then use which ever compressor is registered first. - if (choice.first == Http::Headers::get().AcceptEncodingValues.Wildcard) { + if (choice.first == Http::CustomHeaders::get().AcceptEncodingValues.Wildcard) { auto first_registered = std::min_element( allowed_compressors.begin(), allowed_compressors.end(), [](const std::pair& a, @@ -350,7 +372,8 @@ bool CompressorFilter::isContentTypeAllowed(Http::ResponseHeaderMap& headers) co } bool CompressorFilter::isEtagAllowed(Http::ResponseHeaderMap& headers) const { - const bool is_etag_allowed = !(config_->disableOnEtagHeader() && headers.Etag()); + const bool is_etag_allowed = + !(config_->disableOnEtagHeader() && headers.getInline(etag_handle.handle())); if (!is_etag_allowed) { config_->stats().not_compressed_etag_.inc(); } @@ -370,10 +393,8 @@ bool CompressorFilter::isMinimumContentLength(Http::ResponseHeaderMap& headers) return is_minimum_content_length; } - const Http::HeaderEntry* transfer_encoding = headers.TransferEncoding(); - return (transfer_encoding && - StringUtil::caseFindToken(transfer_encoding->value().getStringView(), ",", - Http::Headers::get().TransferEncodingValues.Chunked)); + return StringUtil::caseFindToken(headers.getTransferEncodingValue(), ",", + Http::Headers::get().TransferEncodingValues.Chunked); } bool CompressorFilter::isTransferEncodingAllowed(Http::ResponseHeaderMap& headers) const { @@ -396,17 +417,18 @@ bool CompressorFilter::isTransferEncodingAllowed(Http::ResponseHeaderMap& header } void CompressorFilter::insertVaryHeader(Http::ResponseHeaderMap& headers) { - const Http::HeaderEntry* vary = headers.Vary(); + const Http::HeaderEntry* vary = headers.getInline(vary_handle.handle()); if (vary != nullptr) { if (!StringUtil::findToken(vary->value().getStringView(), ",", - Http::Headers::get().VaryValues.AcceptEncoding, true)) { + Http::CustomHeaders::get().VaryValues.AcceptEncoding, true)) { std::string new_header; absl::StrAppend(&new_header, vary->value().getStringView(), ", ", - Http::Headers::get().VaryValues.AcceptEncoding); - headers.setVary(new_header); + Http::CustomHeaders::get().VaryValues.AcceptEncoding); + headers.setInline(vary_handle.handle(), new_header); } } else { - headers.setReferenceVary(Http::Headers::get().VaryValues.AcceptEncoding); + headers.setReferenceInline(vary_handle.handle(), + Http::CustomHeaders::get().VaryValues.AcceptEncoding); } } @@ -416,11 +438,11 @@ void CompressorFilter::insertVaryHeader(Http::ResponseHeaderMap& headers) { // This design attempts to stay more on the safe side by preserving weak etags and removing // the strong ones when disable_on_etag_header is false. Envoy does NOT re-write entity tags. void CompressorFilter::sanitizeEtagHeader(Http::ResponseHeaderMap& headers) { - const Http::HeaderEntry* etag = headers.Etag(); + const Http::HeaderEntry* etag = headers.getInline(etag_handle.handle()); if (etag != nullptr) { absl::string_view value(etag->value().getStringView()); if (value.length() > 2 && !((value[0] == 'w' || value[0] == 'W') && value[1] == '/')) { - headers.removeEtag(); + headers.removeInline(etag_handle.handle()); } } } diff --git a/source/extensions/filters/http/common/compressor/compressor.h b/source/extensions/filters/http/common/compressor/compressor.h index fc99ab517d0d8..844719a334667 100644 --- a/source/extensions/filters/http/common/compressor/compressor.h +++ b/source/extensions/filters/http/common/compressor/compressor.h @@ -1,6 +1,6 @@ #pragma once -#include "envoy/compressor/compressor.h" +#include "envoy/compression/compressor/compressor.h" #include "envoy/extensions/filters/http/compressor/v3/compressor.pb.h" #include "envoy/stats/scope.h" #include "envoy/stats/stats_macros.h" @@ -55,12 +55,14 @@ struct CompressorStats { ALL_COMPRESSOR_STATS(GENERATE_COUNTER_STRUCT) }; +// TODO(rojkov): merge this class with Compressor::CompressorFilterConfig when the filter +// `envoy.filters.http.gzip` is fully deprecated and dropped. class CompressorFilterConfig { public: CompressorFilterConfig() = delete; virtual ~CompressorFilterConfig() = default; - virtual std::unique_ptr makeCompressor() PURE; + virtual Envoy::Compression::Compressor::CompressorPtr makeCompressor() PURE; bool enabled() const { return enabled_.enabled(); } const CompressorStats& stats() { return stats_; } @@ -69,7 +71,6 @@ class CompressorFilterConfig { bool removeAcceptEncodingHeader() const { return remove_accept_encoding_header_; } uint32_t minimumLength() const { return content_length_; } const std::string contentEncoding() const { return content_encoding_; }; - const std::map registeredCompressors() const; protected: CompressorFilterConfig( @@ -148,7 +149,7 @@ class CompressorFilter : public Http::PassThroughFilter { bool shouldCompress(const EncodingDecision& decision) const; bool skip_compression_; - std::unique_ptr compressor_; + Envoy::Compression::Compressor::CompressorPtr compressor_; const CompressorFilterConfigSharedPtr config_; std::unique_ptr accept_encoding_; }; diff --git a/source/extensions/filters/http/common/jwks_fetcher.cc b/source/extensions/filters/http/common/jwks_fetcher.cc index 3406879727c75..9f53fd32e21e0 100644 --- a/source/extensions/filters/http/common/jwks_fetcher.cc +++ b/source/extensions/filters/http/common/jwks_fetcher.cc @@ -22,7 +22,7 @@ class JwksFetcherImpl : public JwksFetcher, ~JwksFetcherImpl() override { cancel(); } - void cancel() override { + void cancel() final { if (request_ && !complete_) { request_->cancel(); ENVOY_LOG(debug, "fetch pubkey [uri = {}]: canceled", uri_->uri()); @@ -102,6 +102,8 @@ class JwksFetcherImpl : public JwksFetcher, reset(); } + void onBeforeFinalizeUpstreamSpan(Tracing::Span&, const Http::ResponseHeaderMap*) override {} + private: Upstream::ClusterManager& cm_; bool complete_{}; diff --git a/source/extensions/filters/http/common/utility.h b/source/extensions/filters/http/common/utility.h index 23915b30f4dfb..b119e2db12b50 100644 --- a/source/extensions/filters/http/common/utility.h +++ b/source/extensions/filters/http/common/utility.h @@ -1,7 +1,5 @@ #pragma once -#include - #include "common/common/macros.h" #include "extensions/common/utility.h" diff --git a/source/extensions/filters/http/compressor/BUILD b/source/extensions/filters/http/compressor/BUILD new file mode 100644 index 0000000000000..01855f8eb64a6 --- /dev/null +++ b/source/extensions/filters/http/compressor/BUILD @@ -0,0 +1,39 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +# HTTP L7 filter that performs compression with configurable compression libraries +# Public docs: docs/root/configuration/http_filters/compressor_filter.rst + +envoy_extension_package() + +envoy_cc_library( + name = "compressor_filter_lib", + srcs = ["compressor_filter.cc"], + hdrs = ["compressor_filter.h"], + deps = [ + "//include/envoy/compression/compressor:compressor_factory_interface", + "//source/extensions/filters/http/common/compressor:compressor_lib", + "@envoy_api//envoy/extensions/filters/http/compressor/v3:pkg_cc_proto", + ], +) + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", + deps = [ + ":compressor_filter_lib", + "//include/envoy/compression/compressor:compressor_config_interface", + "//source/common/config:utility_lib", + "//source/extensions/filters/http:well_known_names", + "//source/extensions/filters/http/common:factory_base_lib", + "@envoy_api//envoy/extensions/filters/http/compressor/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/filters/http/compressor/compressor_filter.cc b/source/extensions/filters/http/compressor/compressor_filter.cc new file mode 100644 index 0000000000000..24e974e012e5c --- /dev/null +++ b/source/extensions/filters/http/compressor/compressor_filter.cc @@ -0,0 +1,26 @@ +#include "extensions/filters/http/compressor/compressor_filter.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Compressor { + +CompressorFilterConfig::CompressorFilterConfig( + const envoy::extensions::filters::http::compressor::v3::Compressor& generic_compressor, + const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime, + Compression::Compressor::CompressorFactoryPtr compressor_factory) + : Common::Compressors::CompressorFilterConfig( + generic_compressor, + stats_prefix + "compressor." + generic_compressor.compressor_library().name() + "." + + compressor_factory->statsPrefix(), + scope, runtime, compressor_factory->contentEncoding()), + compressor_factory_(std::move(compressor_factory)) {} + +Envoy::Compression::Compressor::CompressorPtr CompressorFilterConfig::makeCompressor() { + return compressor_factory_->createCompressor(); +} + +} // namespace Compressor +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/compressor/compressor_filter.h b/source/extensions/filters/http/compressor/compressor_filter.h new file mode 100644 index 0000000000000..8d7347847f794 --- /dev/null +++ b/source/extensions/filters/http/compressor/compressor_filter.h @@ -0,0 +1,35 @@ +#pragma once + +#include "envoy/compression/compressor/factory.h" +#include "envoy/extensions/filters/http/compressor/v3/compressor.pb.h" + +#include "extensions/filters/http/common/compressor/compressor.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Compressor { + +/** + * Configuration for the compressor filter. + */ +class CompressorFilterConfig : public Common::Compressors::CompressorFilterConfig { + // TODO(rojkov): move functionality of Common::Compressors::CompressorFilterConfig + // to this class when `envoy.filters.http.gzip` is fully deprecated and dropped. +public: + CompressorFilterConfig() = delete; + CompressorFilterConfig( + const envoy::extensions::filters::http::compressor::v3::Compressor& genereic_compressor, + const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime, + Envoy::Compression::Compressor::CompressorFactoryPtr compressor_factory); + + Envoy::Compression::Compressor::CompressorPtr makeCompressor() override; + +private: + const Envoy::Compression::Compressor::CompressorFactoryPtr compressor_factory_; +}; + +} // namespace Compressor +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/compressor/config.cc b/source/extensions/filters/http/compressor/config.cc new file mode 100644 index 0000000000000..aff3ca5afe4cc --- /dev/null +++ b/source/extensions/filters/http/compressor/config.cc @@ -0,0 +1,52 @@ +#include "extensions/filters/http/compressor/config.h" + +#include "envoy/compression/compressor/config.h" + +#include "common/config/utility.h" + +#include "extensions/filters/http/compressor/compressor_filter.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Compressor { + +Http::FilterFactoryCb CompressorFilterFactory::createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::http::compressor::v3::Compressor& proto_config, + const std::string& stats_prefix, Server::Configuration::FactoryContext& context) { + // TODO(rojkov): instead of throwing an exception make the Compressor.compressor_library field + // required when the Gzip HTTP-filter is fully deprecated and removed. + if (!proto_config.has_compressor_library()) { + throw EnvoyException("Compressor filter doesn't have compressor_library defined"); + } + const std::string type{TypeUtil::typeUrlToDescriptorFullName( + proto_config.compressor_library().typed_config().type_url())}; + Compression::Compressor::NamedCompressorLibraryConfigFactory* const config_factory = + Registry::FactoryRegistry< + Compression::Compressor::NamedCompressorLibraryConfigFactory>::getFactoryByType(type); + if (config_factory == nullptr) { + throw EnvoyException( + fmt::format("Didn't find a registered implementation for type: '{}'", type)); + } + ProtobufTypes::MessagePtr message = Config::Utility::translateAnyToFactoryConfig( + proto_config.compressor_library().typed_config(), context.messageValidationVisitor(), + *config_factory); + Compression::Compressor::CompressorFactoryPtr compressor_factory = + config_factory->createCompressorFactoryFromProto(*message, context); + Common::Compressors::CompressorFilterConfigSharedPtr config = + std::make_shared(proto_config, stats_prefix, context.scope(), + context.runtime(), std::move(compressor_factory)); + return [config](Http::FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamFilter(std::make_shared(config)); + }; +} + +/** + * Static registration for the compressor filter. @see NamedHttpFilterConfigFactory. + */ +REGISTER_FACTORY(CompressorFilterFactory, Server::Configuration::NamedHttpFilterConfigFactory); + +} // namespace Compressor +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/compressor/config.h b/source/extensions/filters/http/compressor/config.h new file mode 100644 index 0000000000000..50127769f3b4f --- /dev/null +++ b/source/extensions/filters/http/compressor/config.h @@ -0,0 +1,33 @@ +#pragma once + +#include "envoy/extensions/filters/http/compressor/v3/compressor.pb.h" +#include "envoy/extensions/filters/http/compressor/v3/compressor.pb.validate.h" + +#include "extensions/filters/http/common/factory_base.h" +#include "extensions/filters/http/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Compressor { + +/** + * Config registration for the compressor filter. @see NamedHttpFilterConfigFactory. + */ +class CompressorFilterFactory + : public Common::FactoryBase { +public: + CompressorFilterFactory() : FactoryBase(HttpFilterNames::get().Compressor) {} + +private: + Http::FilterFactoryCb createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::http::compressor::v3::Compressor& config, + const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override; +}; + +DECLARE_FACTORY(CompressorFilterFactory); + +} // namespace Compressor +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/cors/BUILD b/source/extensions/filters/http/cors/BUILD index 9004a1c984fa1..903fa5599ff01 100644 --- a/source/extensions/filters/http/cors/BUILD +++ b/source/extensions/filters/http/cors/BUILD @@ -1,16 +1,16 @@ -licenses(["notice"]) # Apache 2 - -# L7 HTTP filter which implements CORS processing (https://en.wikipedia.org/wiki/Cross-origin_resource_sharing) -# Public docs: docs/root/configuration/http_filters/cors_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# L7 HTTP filter which implements CORS processing (https://en.wikipedia.org/wiki/Cross-origin_resource_sharing) +# Public docs: docs/root/configuration/http_filters/cors_filter.rst + +envoy_extension_package() envoy_cc_library( name = "cors_filter_lib", @@ -32,6 +32,11 @@ envoy_cc_extension( srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream", + # TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/integration:__subpackages__", + ], deps = [ "//include/envoy/registry", "//include/envoy/server:filter_config_interface", diff --git a/source/extensions/filters/http/cors/cors_filter.cc b/source/extensions/filters/http/cors/cors_filter.cc index 34f2576aabfd9..574a0f36bfc1e 100644 --- a/source/extensions/filters/http/cors/cors_filter.cc +++ b/source/extensions/filters/http/cors/cors_filter.cc @@ -1,6 +1,7 @@ #include "extensions/filters/http/cors/cors_filter.h" #include "envoy/http/codes.h" +#include "envoy/http/header_map.h" #include "envoy/stats/scope.h" #include "common/common/empty_string.h" @@ -13,6 +14,29 @@ namespace Extensions { namespace HttpFilters { namespace Cors { +struct HttpResponseCodeDetailValues { + const absl::string_view CorsResponse = "cors_response"; +}; +using HttpResponseCodeDetails = ConstSingleton; + +Http::RegisterCustomInlineHeader + access_control_request_method_handle(Http::CustomHeaders::get().AccessControlRequestMethod); +Http::RegisterCustomInlineHeader + origin_handle(Http::CustomHeaders::get().Origin); +Http::RegisterCustomInlineHeader + access_control_allow_origin_handle(Http::CustomHeaders::get().AccessControlAllowOrigin); +Http::RegisterCustomInlineHeader + access_control_allow_credentials_handle( + Http::CustomHeaders::get().AccessControlAllowCredentials); +Http::RegisterCustomInlineHeader + access_control_allow_methods_handle(Http::CustomHeaders::get().AccessControlAllowMethods); +Http::RegisterCustomInlineHeader + access_control_allow_headers_handle(Http::CustomHeaders::get().AccessControlAllowHeaders); +Http::RegisterCustomInlineHeader + access_control_max_age_handle(Http::CustomHeaders::get().AccessControlMaxAge); +Http::RegisterCustomInlineHeader + access_control_expose_headers_handle(Http::CustomHeaders::get().AccessControlExposeHeaders); + CorsFilterConfig::CorsFilterConfig(const std::string& stats_prefix, Stats::Scope& scope) : stats_(generateStats(stats_prefix + "cors.", scope)) {} @@ -36,7 +60,7 @@ Http::FilterHeadersStatus CorsFilter::decodeHeaders(Http::RequestHeaderMap& head return Http::FilterHeadersStatus::Continue; } - origin_ = headers.Origin(); + origin_ = headers.getInline(origin_handle.handle()); if (origin_ == nullptr || origin_->value().empty()) { return Http::FilterHeadersStatus::Continue; } @@ -53,39 +77,40 @@ Http::FilterHeadersStatus CorsFilter::decodeHeaders(Http::RequestHeaderMap& head is_cors_request_ = true; - const auto method = headers.Method(); - if (method == nullptr || - method->value().getStringView() != Http::Headers::get().MethodValues.Options) { + const absl::string_view method = headers.getMethodValue(); + if (method != Http::Headers::get().MethodValues.Options) { return Http::FilterHeadersStatus::Continue; } - const auto requestMethod = headers.AccessControlRequestMethod(); - if (requestMethod == nullptr || requestMethod->value().empty()) { + if (headers.getInlineValue(access_control_request_method_handle.handle()).empty()) { return Http::FilterHeadersStatus::Continue; } auto response_headers{Http::createHeaderMap( {{Http::Headers::get().Status, std::to_string(enumToInt(Http::Code::OK))}})}; - response_headers->setAccessControlAllowOrigin(origin_->value().getStringView()); + response_headers->setInline(access_control_allow_origin_handle.handle(), + origin_->value().getStringView()); if (allowCredentials()) { - response_headers->setReferenceAccessControlAllowCredentials( - Http::Headers::get().CORSValues.True); + response_headers->setReferenceInline(access_control_allow_credentials_handle.handle(), + Http::CustomHeaders::get().CORSValues.True); } if (!allowMethods().empty()) { - response_headers->setAccessControlAllowMethods(allowMethods()); + response_headers->setInline(access_control_allow_methods_handle.handle(), allowMethods()); } if (!allowHeaders().empty()) { - response_headers->setAccessControlAllowHeaders(allowHeaders()); + response_headers->setInline(access_control_allow_headers_handle.handle(), allowHeaders()); } if (!maxAge().empty()) { - response_headers->setAccessControlMaxAge(maxAge()); + response_headers->setInline(access_control_max_age_handle.handle(), maxAge()); } + decoder_callbacks_->streamInfo().setResponseCodeDetails( + HttpResponseCodeDetails::get().CorsResponse); decoder_callbacks_->encodeHeaders(std::move(response_headers), true); return Http::FilterHeadersStatus::StopIteration; @@ -98,13 +123,14 @@ Http::FilterHeadersStatus CorsFilter::encodeHeaders(Http::ResponseHeaderMap& hea return Http::FilterHeadersStatus::Continue; } - headers.setAccessControlAllowOrigin(origin_->value().getStringView()); + headers.setInline(access_control_allow_origin_handle.handle(), origin_->value().getStringView()); if (allowCredentials()) { - headers.setReferenceAccessControlAllowCredentials(Http::Headers::get().CORSValues.True); + headers.setReferenceInline(access_control_allow_credentials_handle.handle(), + Http::CustomHeaders::get().CORSValues.True); } if (!exposeHeaders().empty()) { - headers.setAccessControlExposeHeaders(exposeHeaders()); + headers.setInline(access_control_expose_headers_handle.handle(), exposeHeaders()); } return Http::FilterHeadersStatus::Continue; diff --git a/source/extensions/filters/http/csrf/BUILD b/source/extensions/filters/http/csrf/BUILD index cd2315773e6e0..47bea6f6bbf22 100644 --- a/source/extensions/filters/http/csrf/BUILD +++ b/source/extensions/filters/http/csrf/BUILD @@ -1,16 +1,16 @@ -licenses(["notice"]) # Apache 2 - -# L7 HTTP filter which implements CSRF processing (https://www.owasp.org/index.php/Cross-Site_Request_Forgery_(CSRF)) -# Public docs: docs/root/configuration/http_filters/csrf_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# L7 HTTP filter which implements CSRF processing (https://www.owasp.org/index.php/Cross-Site_Request_Forgery_(CSRF)) +# Public docs: docs/root/configuration/http_filters/csrf_filter.rst + +envoy_extension_package() envoy_cc_library( name = "csrf_filter_lib", @@ -22,6 +22,7 @@ envoy_cc_library( "//source/common/common:matchers_lib", "//source/common/http:header_map_lib", "//source/common/http:headers_lib", + "//source/common/http:url_utility_lib", "//source/common/http:utility_lib", "//source/extensions/filters/http:well_known_names", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", diff --git a/source/extensions/filters/http/csrf/csrf_filter.cc b/source/extensions/filters/http/csrf/csrf_filter.cc index 396dc056c87de..bb7db21b36eb5 100644 --- a/source/extensions/filters/http/csrf/csrf_filter.cc +++ b/source/extensions/filters/http/csrf/csrf_filter.cc @@ -6,6 +6,7 @@ #include "common/common/empty_string.h" #include "common/http/header_map_impl.h" #include "common/http/headers.h" +#include "common/http/url_utility.h" #include "common/http/utility.h" #include "extensions/filters/http/well_known_names.h" @@ -15,6 +16,11 @@ namespace Extensions { namespace HttpFilters { namespace Csrf { +Http::RegisterCustomInlineHeader + origin_handle(Http::CustomHeaders::get().Origin); +Http::RegisterCustomInlineHeader + referer_handle(Http::CustomHeaders::get().Referer); + struct RcDetailsValues { const std::string OriginMismatch = "csrf_origin_mismatch"; }; @@ -22,37 +28,48 @@ using RcDetails = ConstSingleton; namespace { bool isModifyMethod(const Http::RequestHeaderMap& headers) { - const Envoy::Http::HeaderEntry* method = headers.Method(); - if (method == nullptr) { + const absl::string_view method_type = headers.getMethodValue(); + if (method_type.empty()) { return false; } - const absl::string_view method_type = method->value().getStringView(); const auto& method_values = Http::Headers::get().MethodValues; return (method_type == method_values.Post || method_type == method_values.Put || method_type == method_values.Delete || method_type == method_values.Patch); } -absl::string_view hostAndPort(const Http::HeaderEntry* header) { - Http::Utility::Url absolute_url; - if (header != nullptr && !header->value().empty()) { - if (absolute_url.initialize(header->value().getStringView(), false)) { - return absolute_url.hostAndPort(); +std::string hostAndPort(const absl::string_view absolute_url) { + Http::Utility::Url url; + if (!absolute_url.empty()) { + if (url.initialize(absolute_url, /*is_connect=*/false)) { + return std::string(url.hostAndPort()); } - return header->value().getStringView(); + return std::string(absolute_url); } return EMPTY_STRING; } -absl::string_view sourceOriginValue(const Http::RequestHeaderMap& headers) { - const absl::string_view origin = hostAndPort(headers.Origin()); - if (origin != EMPTY_STRING) { +// Note: per https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin, +// the Origin header must include the scheme (and hostAndPort expects +// an absolute URL). +std::string sourceOriginValue(const Http::RequestHeaderMap& headers) { + const auto origin = hostAndPort(headers.getInlineValue(origin_handle.handle())); + if (!origin.empty()) { return origin; } - return hostAndPort(headers.Referer()); + return hostAndPort(headers.getInlineValue(referer_handle.handle())); } -absl::string_view targetOriginValue(const Http::RequestHeaderMap& headers) { - return hostAndPort(headers.Host()); +std::string targetOriginValue(const Http::RequestHeaderMap& headers) { + const auto host_value = headers.getHostValue(); + + // Don't even bother if there's not Host header. + if (host_value.empty()) { + return EMPTY_STRING; + } + + const auto absolute_url = fmt::format( + "{}://{}", headers.Scheme() != nullptr ? headers.getSchemeValue() : "http", host_value); + return hostAndPort(absolute_url); } static CsrfStats generateStats(const std::string& prefix, Stats::Scope& scope) { @@ -86,8 +103,8 @@ Http::FilterHeadersStatus CsrfFilter::decodeHeaders(Http::RequestHeaderMap& head } bool is_valid = true; - const absl::string_view source_origin = sourceOriginValue(headers); - if (source_origin == EMPTY_STRING) { + const auto source_origin = sourceOriginValue(headers); + if (source_origin.empty()) { is_valid = false; config_->stats().missing_source_origin_.inc(); } @@ -123,7 +140,7 @@ void CsrfFilter::determinePolicy() { } bool CsrfFilter::isValid(const absl::string_view source_origin, Http::RequestHeaderMap& headers) { - const absl::string_view target_origin = targetOriginValue(headers); + const auto target_origin = targetOriginValue(headers); if (source_origin == target_origin) { return true; } diff --git a/source/extensions/filters/http/decompressor/BUILD b/source/extensions/filters/http/decompressor/BUILD new file mode 100644 index 0000000000000..08d224b8b2849 --- /dev/null +++ b/source/extensions/filters/http/decompressor/BUILD @@ -0,0 +1,45 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +# HTTP L7 filter that performs decompression with configurable decompression libraries +# Public docs: docs/root/configuration/http_filters/decompressor_filter.rst + +envoy_extension_package() + +envoy_cc_library( + name = "decompressor_filter_lib", + srcs = ["decompressor_filter.cc"], + hdrs = ["decompressor_filter.h"], + deps = [ + "//include/envoy/compression/decompressor:decompressor_config_interface", + "//include/envoy/compression/decompressor:decompressor_interface", + "//include/envoy/http:filter_interface", + "//source/common/buffer:buffer_lib", + "//source/common/common:macros", + "//source/common/http:headers_lib", + "//source/common/runtime:runtime_lib", + "//source/extensions/filters/http/common:pass_through_filter_lib", + "@envoy_api//envoy/extensions/filters/http/decompressor/v3:pkg_cc_proto", + ], +) + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream_and_upstream", + deps = [ + ":decompressor_filter_lib", + "//include/envoy/compression/decompressor:decompressor_config_interface", + "//source/common/config:utility_lib", + "//source/extensions/filters/http:well_known_names", + "//source/extensions/filters/http/common:factory_base_lib", + "@envoy_api//envoy/extensions/filters/http/decompressor/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/filters/http/decompressor/config.cc b/source/extensions/filters/http/decompressor/config.cc new file mode 100644 index 0000000000000..fb52ae85c2169 --- /dev/null +++ b/source/extensions/filters/http/decompressor/config.cc @@ -0,0 +1,48 @@ +#include "extensions/filters/http/decompressor/config.h" + +#include "envoy/compression/decompressor/config.h" + +#include "common/config/utility.h" + +#include "extensions/filters/http/decompressor/decompressor_filter.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Decompressor { + +Http::FilterFactoryCb DecompressorFilterFactory::createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::http::decompressor::v3::Decompressor& proto_config, + const std::string& stats_prefix, Server::Configuration::FactoryContext& context) { + const std::string decompressor_library_type{TypeUtil::typeUrlToDescriptorFullName( + proto_config.decompressor_library().typed_config().type_url())}; + Compression::Decompressor::NamedDecompressorLibraryConfigFactory* const + decompressor_library_factory = Registry::FactoryRegistry< + Compression::Decompressor::NamedDecompressorLibraryConfigFactory>:: + getFactoryByType(decompressor_library_type); + if (decompressor_library_factory == nullptr) { + throw EnvoyException(fmt::format("Didn't find a registered implementation for type: '{}'", + decompressor_library_type)); + } + ProtobufTypes::MessagePtr message = Config::Utility::translateAnyToFactoryConfig( + proto_config.decompressor_library().typed_config(), context.messageValidationVisitor(), + *decompressor_library_factory); + Compression::Decompressor::DecompressorFactoryPtr decompressor_factory = + decompressor_library_factory->createDecompressorFactoryFromProto(*message, context); + DecompressorFilterConfigSharedPtr filter_config = std::make_shared( + proto_config, stats_prefix, context.scope(), context.runtime(), + std::move(decompressor_factory)); + return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamFilter(std::make_shared(filter_config)); + }; +} + +/** + * Static registration for the decompressor filter. @see NamedHttpFilterConfigFactory. + */ +REGISTER_FACTORY(DecompressorFilterFactory, Server::Configuration::NamedHttpFilterConfigFactory); + +} // namespace Decompressor +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/http/decompressor/config.h b/source/extensions/filters/http/decompressor/config.h new file mode 100644 index 0000000000000..4e04abe3c6df2 --- /dev/null +++ b/source/extensions/filters/http/decompressor/config.h @@ -0,0 +1,33 @@ +#pragma once + +#include "envoy/extensions/filters/http/decompressor/v3/decompressor.pb.h" +#include "envoy/extensions/filters/http/decompressor/v3/decompressor.pb.validate.h" + +#include "extensions/filters/http/common/factory_base.h" +#include "extensions/filters/http/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Decompressor { + +/** + * Config registration for the decompressor filter. @see NamedHttpFilterConfigFactory. + */ +class DecompressorFilterFactory + : public Common::FactoryBase { +public: + DecompressorFilterFactory() : FactoryBase(HttpFilterNames::get().Decompressor) {} + +private: + Http::FilterFactoryCb createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::http::decompressor::v3::Decompressor& config, + const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override; +}; + +DECLARE_FACTORY(DecompressorFilterFactory); + +} // namespace Decompressor +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/http/decompressor/decompressor_filter.cc b/source/extensions/filters/http/decompressor/decompressor_filter.cc new file mode 100644 index 0000000000000..62f7526e25980 --- /dev/null +++ b/source/extensions/filters/http/decompressor/decompressor_filter.cc @@ -0,0 +1,154 @@ +#include "extensions/filters/http/decompressor/decompressor_filter.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/empty_string.h" +#include "common/common/macros.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Decompressor { + +Http::RegisterCustomInlineHeader + accept_encoding_handle(Http::CustomHeaders::get().AcceptEncoding); +Http::RegisterCustomInlineHeader + cache_control_request_handle(Http::CustomHeaders::get().CacheControl); +Http::RegisterCustomInlineHeader + content_encoding_request_handle(Http::CustomHeaders::get().ContentEncoding); +Http::RegisterCustomInlineHeader + cache_control_response_handle(Http::CustomHeaders::get().CacheControl); +Http::RegisterCustomInlineHeader + content_encoding_response_handle(Http::CustomHeaders::get().ContentEncoding); + +DecompressorFilterConfig::DecompressorFilterConfig( + const envoy::extensions::filters::http::decompressor::v3::Decompressor& proto_config, + const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime, + Compression::Decompressor::DecompressorFactoryPtr decompressor_factory) + : stats_prefix_(fmt::format("{}decompressor.{}.{}", stats_prefix, + proto_config.decompressor_library().name(), + decompressor_factory->statsPrefix())), + decompressor_stats_prefix_(stats_prefix_ + "decompressor_library"), + decompressor_factory_(std::move(decompressor_factory)), + request_direction_config_(proto_config.request_direction_config(), stats_prefix_, scope, + runtime), + response_direction_config_(proto_config.response_direction_config(), stats_prefix_, scope, + runtime) {} + +DecompressorFilterConfig::DirectionConfig::DirectionConfig( + const envoy::extensions::filters::http::decompressor::v3::Decompressor::CommonDirectionConfig& + proto_config, + const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime) + : stats_(generateStats(stats_prefix, scope)), + decompression_enabled_(proto_config.enabled(), runtime) {} + +DecompressorFilterConfig::RequestDirectionConfig::RequestDirectionConfig( + const envoy::extensions::filters::http::decompressor::v3::Decompressor::RequestDirectionConfig& + proto_config, + const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime) + : DirectionConfig(proto_config.common_config(), stats_prefix + "request.", scope, runtime), + advertise_accept_encoding_( + PROTOBUF_GET_WRAPPED_OR_DEFAULT(proto_config, advertise_accept_encoding, true)) {} + +DecompressorFilterConfig::ResponseDirectionConfig::ResponseDirectionConfig( + const envoy::extensions::filters::http::decompressor::v3::Decompressor::ResponseDirectionConfig& + proto_config, + const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime) + : DirectionConfig(proto_config.common_config(), stats_prefix + "response.", scope, runtime) {} + +DecompressorFilter::DecompressorFilter(DecompressorFilterConfigSharedPtr config) + : config_(std::move(config)) {} + +Http::FilterHeadersStatus DecompressorFilter::decodeHeaders(Http::RequestHeaderMap& headers, + bool end_stream) { + // Two responsibilities on the request side: + // 1. If response decompression is enabled (and advertisement is enabled), then advertise to + // the upstream that this hop is able to decompress responses via the Accept-Encoding header. + if (config_->responseDirectionConfig().decompressionEnabled() && + config_->requestDirectionConfig().advertiseAcceptEncoding()) { + headers.appendInline(accept_encoding_handle.handle(), config_->contentEncoding(), ","); + ENVOY_STREAM_LOG(debug, + "DecompressorFilter::decodeHeaders advertise Accept-Encoding with value '{}'", + *decoder_callbacks_, headers.getInlineValue(accept_encoding_handle.handle())); + } + + // Headers-only requests do not, by definition, get decompressed. + if (end_stream) { + return Http::FilterHeadersStatus::Continue; + } + ENVOY_STREAM_LOG(debug, "DecompressorFilter::decodeHeaders: {}", *decoder_callbacks_, headers); + + // 2. Setup request decompression if all checks comply. + return maybeInitDecompress(config_->requestDirectionConfig(), request_decompressor_, + *decoder_callbacks_, headers); +}; + +Http::FilterDataStatus DecompressorFilter::decodeData(Buffer::Instance& data, bool) { + return maybeDecompress(config_->requestDirectionConfig(), request_decompressor_, + *decoder_callbacks_, data); +} + +Http::FilterHeadersStatus DecompressorFilter::encodeHeaders(Http::ResponseHeaderMap& headers, + bool end_stream) { + // Headers only response, continue. + if (end_stream) { + return Http::FilterHeadersStatus::Continue; + } + ENVOY_STREAM_LOG(debug, "DecompressorFilter::encodeHeaders: {}", *encoder_callbacks_, headers); + + return maybeInitDecompress(config_->responseDirectionConfig(), response_decompressor_, + *encoder_callbacks_, headers); +} + +Http::FilterDataStatus DecompressorFilter::encodeData(Buffer::Instance& data, bool) { + return maybeDecompress(config_->responseDirectionConfig(), response_decompressor_, + *encoder_callbacks_, data); +} + +Http::FilterDataStatus DecompressorFilter::maybeDecompress( + const DecompressorFilterConfig::DirectionConfig& direction_config, + const Compression::Decompressor::DecompressorPtr& decompressor, + Http::StreamFilterCallbacks& callbacks, Buffer::Instance& input_buffer) const { + if (decompressor) { + Buffer::OwnedImpl output_buffer; + decompressor->decompress(input_buffer, output_buffer); + + // Report decompression via stats and logging before modifying the input buffer. + direction_config.stats().total_compressed_bytes_.add(input_buffer.length()); + direction_config.stats().total_uncompressed_bytes_.add(output_buffer.length()); + ENVOY_STREAM_LOG(debug, "{} data decompressed from {} bytes to {} bytes", callbacks, + direction_config.logString(), input_buffer.length(), output_buffer.length()); + + input_buffer.drain(input_buffer.length()); + input_buffer.add(output_buffer); + } + return Http::FilterDataStatus::Continue; +} + +template <> +Http::CustomInlineHeaderRegistry::Handle +DecompressorFilter::getCacheControlHandle() { + return cache_control_request_handle.handle(); +} + +template <> +Http::CustomInlineHeaderRegistry::Handle +DecompressorFilter::getCacheControlHandle() { + return cache_control_response_handle.handle(); +} + +template <> +Http::CustomInlineHeaderRegistry::Handle +DecompressorFilter::getContentEncodingHandle() { + return content_encoding_request_handle.handle(); +} + +template <> +Http::CustomInlineHeaderRegistry::Handle +DecompressorFilter::getContentEncodingHandle() { + return content_encoding_response_handle.handle(); +} + +} // namespace Decompressor +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/http/decompressor/decompressor_filter.h b/source/extensions/filters/http/decompressor/decompressor_filter.h new file mode 100644 index 0000000000000..9dabae66f0aaf --- /dev/null +++ b/source/extensions/filters/http/decompressor/decompressor_filter.h @@ -0,0 +1,209 @@ +#pragma once + +#include "envoy/compression/decompressor/config.h" +#include "envoy/compression/decompressor/decompressor.h" +#include "envoy/extensions/filters/http/decompressor/v3/decompressor.pb.h" +#include "envoy/http/filter.h" + +#include "common/common/macros.h" +#include "common/http/headers.h" +#include "common/runtime/runtime_protos.h" + +#include "extensions/filters/http/common/pass_through_filter.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Decompressor { + +/** + * All decompressor filter stats. @see stats_macros.h + */ +#define ALL_DECOMPRESSOR_STATS(COUNTER) \ + COUNTER(decompressed) \ + COUNTER(not_decompressed) \ + COUNTER(total_uncompressed_bytes) \ + COUNTER(total_compressed_bytes) + +/** + * Struct definition for decompressor stats. @see stats_macros.h + */ +struct DecompressorStats { + ALL_DECOMPRESSOR_STATS(GENERATE_COUNTER_STRUCT) +}; + +/** + * Configuration for the decompressor filter. + */ +class DecompressorFilterConfig { +public: + class DirectionConfig { + public: + DirectionConfig(const envoy::extensions::filters::http::decompressor::v3::Decompressor:: + CommonDirectionConfig& proto_config, + const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime); + + virtual ~DirectionConfig() = default; + + virtual const std::string& logString() const PURE; + const DecompressorStats& stats() const { return stats_; } + bool decompressionEnabled() const { return decompression_enabled_.enabled(); } + + private: + static DecompressorStats generateStats(const std::string& prefix, Stats::Scope& scope) { + return DecompressorStats{ALL_DECOMPRESSOR_STATS(POOL_COUNTER_PREFIX(scope, prefix))}; + } + + const DecompressorStats stats_; + const Runtime::FeatureFlag decompression_enabled_; + }; + + class RequestDirectionConfig : public DirectionConfig { + public: + RequestDirectionConfig(const envoy::extensions::filters::http::decompressor::v3::Decompressor:: + RequestDirectionConfig& proto_config, + const std::string& stats_prefix, Stats::Scope& scope, + Runtime::Loader& runtime); + + // DirectionConfig + const std::string& logString() const override { + CONSTRUCT_ON_FIRST_USE(std::string, "request"); + } + + bool advertiseAcceptEncoding() const { return advertise_accept_encoding_; } + + private: + const bool advertise_accept_encoding_; + }; + + class ResponseDirectionConfig : public DirectionConfig { + public: + ResponseDirectionConfig(const envoy::extensions::filters::http::decompressor::v3::Decompressor:: + ResponseDirectionConfig& proto_config, + const std::string& stats_prefix, Stats::Scope& scope, + Runtime::Loader& runtime); + + // DirectionConfig + const std::string& logString() const override { + CONSTRUCT_ON_FIRST_USE(std::string, "response"); + } + }; + + DecompressorFilterConfig( + const envoy::extensions::filters::http::decompressor::v3::Decompressor& proto_config, + const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime, + Compression::Decompressor::DecompressorFactoryPtr decompressor_factory); + + Compression::Decompressor::DecompressorPtr makeDecompressor() { + return decompressor_factory_->createDecompressor(decompressor_stats_prefix_); + } + const std::string& contentEncoding() { return decompressor_factory_->contentEncoding(); } + const RequestDirectionConfig& requestDirectionConfig() { return request_direction_config_; } + const ResponseDirectionConfig& responseDirectionConfig() { return response_direction_config_; } + +private: + const std::string stats_prefix_; + const std::string decompressor_stats_prefix_; + const Compression::Decompressor::DecompressorFactoryPtr decompressor_factory_; + const RequestDirectionConfig request_direction_config_; + const ResponseDirectionConfig response_direction_config_; +}; + +using DecompressorFilterConfigSharedPtr = std::shared_ptr; + +/** + * A filter that decompresses data bidirectionally. + */ +class DecompressorFilter : public Http::PassThroughFilter, + public Logger::Loggable { +public: + DecompressorFilter(DecompressorFilterConfigSharedPtr config); + + // Http::StreamDecoderFilter + Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool) override; + Http::FilterDataStatus decodeData(Buffer::Instance&, bool) override; + + // Http::StreamEncoderFilter + Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap&, bool) override; + Http::FilterDataStatus encodeData(Buffer::Instance&, bool) override; + +private: + template + Http::FilterHeadersStatus + maybeInitDecompress(const DecompressorFilterConfig::DirectionConfig& direction_config, + Compression::Decompressor::DecompressorPtr& decompressor, + Http::StreamFilterCallbacks& callbacks, HeaderType& headers) { + if (direction_config.decompressionEnabled() && !hasCacheControlNoTransform(headers) && + contentEncodingMatches(headers)) { + direction_config.stats().decompressed_.inc(); + decompressor = config_->makeDecompressor(); + + // Update headers. + headers.removeContentLength(); + modifyContentEncoding(headers); + + ENVOY_STREAM_LOG(debug, "do decompress {}: {}", callbacks, direction_config.logString(), + headers); + } else { + direction_config.stats().not_decompressed_.inc(); + ENVOY_STREAM_LOG(debug, "do not decompress {}: {}", callbacks, direction_config.logString(), + headers); + } + + return Http::FilterHeadersStatus::Continue; + } + + Http::FilterDataStatus + maybeDecompress(const DecompressorFilterConfig::DirectionConfig& direction_config, + const Compression::Decompressor::DecompressorPtr& decompressor, + Http::StreamFilterCallbacks& callbacks, Buffer::Instance& input_buffer) const; + + // TODO(junr03): These can be shared between compressor and decompressor. + template + static Http::CustomInlineHeaderRegistry::Handle getCacheControlHandle(); + template static bool hasCacheControlNoTransform(HeaderType& headers) { + const auto handle = getCacheControlHandle(); + return headers.getInline(handle) + ? StringUtil::caseFindToken( + headers.getInlineValue(handle), ",", + Http::CustomHeaders::get().CacheControlValues.NoTransform) + : false; + } + + /** + * Content-Encoding matches if the configured encoding is the first value in the comma-delimited + * Content-Encoding header, regardless of spacing and casing. + */ + template + static Http::CustomInlineHeaderRegistry::Handle getContentEncodingHandle(); + template bool contentEncodingMatches(HeaderType& headers) const { + const auto handle = getContentEncodingHandle(); + if (headers.getInline(handle)) { + absl::string_view coding = + StringUtil::trim(StringUtil::cropRight(headers.getInlineValue(handle), ",")); + return StringUtil::CaseInsensitiveCompare()(config_->contentEncoding(), coding); + } + return false; + } + + template static void modifyContentEncoding(HeaderType& headers) { + const auto handle = getContentEncodingHandle(); + const auto all_codings = StringUtil::trim(headers.getInlineValue(handle)); + const auto remaining_codings = StringUtil::trim(StringUtil::cropLeft(all_codings, ",")); + + if (remaining_codings != all_codings) { + headers.setInline(handle, remaining_codings); + } else { + headers.removeInline(handle); + } + } + + DecompressorFilterConfigSharedPtr config_; + Compression::Decompressor::DecompressorPtr request_decompressor_{}; + Compression::Decompressor::DecompressorPtr response_decompressor_{}; +}; + +} // namespace Decompressor +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/http/dynamic_forward_proxy/BUILD b/source/extensions/filters/http/dynamic_forward_proxy/BUILD index dc85075eb30dc..dc15f124ed780 100644 --- a/source/extensions/filters/http/dynamic_forward_proxy/BUILD +++ b/source/extensions/filters/http/dynamic_forward_proxy/BUILD @@ -1,13 +1,13 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "proxy_filter_lib", @@ -15,9 +15,12 @@ envoy_cc_library( hdrs = ["proxy_filter.h"], deps = [ "//include/envoy/http:filter_interface", + "//source/common/runtime:runtime_features_lib", + "//source/extensions/clusters:well_known_names", "//source/extensions/common/dynamic_forward_proxy:dns_cache_interface", "//source/extensions/filters/http:well_known_names", "//source/extensions/filters/http/common:pass_through_filter_lib", + "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/dynamic_forward_proxy/v3:pkg_cc_proto", ], @@ -28,7 +31,6 @@ envoy_cc_extension( srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream", - status = "alpha", deps = [ "//include/envoy/registry", "//include/envoy/server:filter_config_interface", diff --git a/source/extensions/filters/http/dynamic_forward_proxy/config.cc b/source/extensions/filters/http/dynamic_forward_proxy/config.cc index 637711663317b..30c984da4840e 100644 --- a/source/extensions/filters/http/dynamic_forward_proxy/config.cc +++ b/source/extensions/filters/http/dynamic_forward_proxy/config.cc @@ -16,7 +16,7 @@ Http::FilterFactoryCb DynamicForwardProxyFilterFactory::createFilterFactoryFromP const std::string&, Server::Configuration::FactoryContext& context) { Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactoryImpl cache_manager_factory( context.singletonManager(), context.dispatcher(), context.threadLocal(), context.random(), - context.scope()); + context.runtime(), context.scope()); ProxyFilterConfigSharedPtr filter_config(std::make_shared( proto_config, cache_manager_factory, context.clusterManager())); return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { diff --git a/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc b/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc index 47d08e0dcab8d..b41b0cf07d910 100644 --- a/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc +++ b/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc @@ -1,8 +1,12 @@ #include "extensions/filters/http/dynamic_forward_proxy/proxy_filter.h" +#include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.pb.h" +#include "common/runtime/runtime_features.h" + +#include "extensions/clusters/well_known_names.h" #include "extensions/common/dynamic_forward_proxy/dns_cache.h" #include "extensions/filters/http/well_known_names.h" @@ -16,6 +20,8 @@ struct ResponseStringValues { const std::string PendingRequestOverflow = "Dynamic forward proxy pending request overflow"; }; +using CustomClusterType = envoy::config::cluster::v3::Cluster::CustomClusterType; + using ResponseStrings = ConstSingleton; using LoadDnsCacheEntryStatus = Common::DynamicForwardProxy::DnsCache::LoadDnsCacheEntryStatus; @@ -53,16 +59,36 @@ Http::FilterHeadersStatus ProxyFilter::decodeHeaders(Http::RequestHeaderMap& hea } cluster_info_ = cluster->info(); - auto& resource = cluster_info_->resourceManager(route_entry->priority()).pendingRequests(); - if (!resource.canCreate()) { - ENVOY_STREAM_LOG(debug, "pending request overflow", *decoder_callbacks_); - cluster_info_->stats().upstream_rq_pending_overflow_.inc(); - decoder_callbacks_->sendLocalReply( + // We only need to do DNS lookups for hosts in dynamic forward proxy clusters, + // since the other cluster types do their own DNS management. + const absl::optional& cluster_type = cluster_info_->clusterType(); + if (!cluster_type) { + return Http::FilterHeadersStatus::Continue; + } + if (cluster_type->name() != + Envoy::Extensions::Clusters::ClusterTypes::get().DynamicForwardProxy) { + return Http::FilterHeadersStatus::Continue; + } + + const bool should_use_dns_cache_circuit_breakers = + Runtime::runtimeFeatureEnabled("envoy.reloadable_features.enable_dns_cache_circuit_breakers"); + + circuit_breaker_ = config_->cache().canCreateDnsRequest( + !should_use_dns_cache_circuit_breakers + ? absl::make_optional(std::reference_wrapper( + cluster_info_->resourceManager(route_entry->priority()).pendingRequests())) + : absl::nullopt); + + if (circuit_breaker_ == nullptr) { + if (!should_use_dns_cache_circuit_breakers) { + cluster_info_->stats().upstream_rq_pending_overflow_.inc(); + } + ENVOY_STREAM_LOG(debug, "pending request overflow", *this->decoder_callbacks_); + this->decoder_callbacks_->sendLocalReply( Http::Code::ServiceUnavailable, ResponseStrings::get().PendingRequestOverflow, nullptr, absl::nullopt, ResponseStrings::get().PendingRequestOverflow); return Http::FilterHeadersStatus::StopIteration; } - circuit_breaker_ = std::make_unique(resource); uint16_t default_port = 80; if (cluster_info_->transportSocketMatcher() diff --git a/source/extensions/filters/http/dynamo/BUILD b/source/extensions/filters/http/dynamo/BUILD index 9eac6935f3304..c152863819ed0 100644 --- a/source/extensions/filters/http/dynamo/BUILD +++ b/source/extensions/filters/http/dynamo/BUILD @@ -1,16 +1,16 @@ -licenses(["notice"]) # Apache 2 - -# AWS DynamoDB L7 HTTP filter (observability): https://aws.amazon.com/dynamodb/ -# Public docs: docs/root/configuration/http_filters/dynamodb_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# AWS DynamoDB L7 HTTP filter (observability): https://aws.amazon.com/dynamodb/ +# Public docs: docs/root/configuration/http_filters/dynamodb_filter.rst + +envoy_extension_package() envoy_cc_library( name = "dynamo_filter_lib", @@ -61,5 +61,6 @@ envoy_cc_library( ":dynamo_request_parser_lib", "//include/envoy/stats:stats_interface", "//source/common/stats:symbol_table_lib", + "//source/common/stats:utility_lib", ], ) diff --git a/source/extensions/filters/http/dynamo/config.h b/source/extensions/filters/http/dynamo/config.h index 2638b3f76a415..551438e674545 100644 --- a/source/extensions/filters/http/dynamo/config.h +++ b/source/extensions/filters/http/dynamo/config.h @@ -3,6 +3,7 @@ #include #include "envoy/extensions/filters/http/dynamo/v3/dynamo.pb.h" +#include "envoy/extensions/filters/http/dynamo/v3/dynamo.pb.validate.h" #include "envoy/server/filter_config.h" #include "extensions/filters/http/common/factory_base.h" diff --git a/source/extensions/filters/http/dynamo/dynamo_stats.cc b/source/extensions/filters/http/dynamo/dynamo_stats.cc index 468c77f0a9592..06f3770a688e6 100644 --- a/source/extensions/filters/http/dynamo/dynamo_stats.cc +++ b/source/extensions/filters/http/dynamo/dynamo_stats.cc @@ -46,25 +46,21 @@ DynamoStats::DynamoStats(Stats::Scope& scope, const std::string& prefix) stat_name_set_->rememberBuiltins({"operation", "table"}); } -Stats::SymbolTable::StoragePtr DynamoStats::addPrefix(const Stats::StatNameVec& names) { - Stats::StatNameVec names_with_prefix; +Stats::ElementVec DynamoStats::addPrefix(const Stats::ElementVec& names) { + Stats::ElementVec names_with_prefix; names_with_prefix.reserve(1 + names.size()); names_with_prefix.push_back(prefix_); names_with_prefix.insert(names_with_prefix.end(), names.begin(), names.end()); - return scope_.symbolTable().join(names_with_prefix); + return names_with_prefix; } -void DynamoStats::incCounter(const Stats::StatNameVec& names) { - const Stats::SymbolTable::StoragePtr stat_name_storage = addPrefix(names); - scope_.counterFromStatName(Stats::StatName(stat_name_storage.get())).inc(); +void DynamoStats::incCounter(const Stats::ElementVec& names) { + Stats::Utility::counterFromElements(scope_, addPrefix(names)).inc(); } -void DynamoStats::recordHistogram(const Stats::StatNameVec& names, Stats::Histogram::Unit unit, +void DynamoStats::recordHistogram(const Stats::ElementVec& names, Stats::Histogram::Unit unit, uint64_t value) { - const Stats::SymbolTable::StoragePtr stat_name_storage = addPrefix(names); - Stats::Histogram& histogram = - scope_.histogramFromStatName(Stats::StatName(stat_name_storage.get()), unit); - histogram.recordValue(value); + Stats::Utility::histogramFromElements(scope_, addPrefix(names), unit).recordValue(value); } Stats::Counter& DynamoStats::buildPartitionStatCounter(const std::string& table_name, @@ -72,12 +68,11 @@ Stats::Counter& DynamoStats::buildPartitionStatCounter(const std::string& table_ const std::string& partition_id) { // Use the last 7 characters of the partition id. absl::string_view id_last_7 = absl::string_view(partition_id).substr(partition_id.size() - 7); - Stats::StatNameDynamicPool dynamic(scope_.symbolTable()); - const Stats::StatName partition = dynamic.add(absl::StrCat("__partition_id=", id_last_7)); - const Stats::SymbolTable::StoragePtr stat_name_storage = - addPrefix({table_, dynamic.add(table_name), capacity_, - getBuiltin(operation, unknown_operation_), partition}); - return scope_.counterFromStatName(Stats::StatName(stat_name_storage.get())); + std::string partition = absl::StrCat("__partition_id=", id_last_7); + return Stats::Utility::counterFromElements( + scope_, + addPrefix({table_, Stats::DynamicName(table_name), capacity_, + getBuiltin(operation, unknown_operation_), Stats::DynamicName(partition)})); } size_t DynamoStats::groupIndex(uint64_t status) { diff --git a/source/extensions/filters/http/dynamo/dynamo_stats.h b/source/extensions/filters/http/dynamo/dynamo_stats.h index 4241ec5dd711b..48399e4f4d23b 100644 --- a/source/extensions/filters/http/dynamo/dynamo_stats.h +++ b/source/extensions/filters/http/dynamo/dynamo_stats.h @@ -6,6 +6,7 @@ #include "envoy/stats/scope.h" #include "common/stats/symbol_table_impl.h" +#include "common/stats/utility.h" namespace Envoy { namespace Extensions { @@ -16,9 +17,8 @@ class DynamoStats { public: DynamoStats(Stats::Scope& scope, const std::string& prefix); - void incCounter(const Stats::StatNameVec& names); - void recordHistogram(const Stats::StatNameVec& names, Stats::Histogram::Unit unit, - uint64_t value); + void incCounter(const Stats::ElementVec& names); + void recordHistogram(const Stats::ElementVec& names, Stats::Histogram::Unit unit, uint64_t value); /** * Creates the partition id stats string. The stats format is @@ -42,7 +42,7 @@ class DynamoStats { Stats::SymbolTable& symbolTable() { return scope_.symbolTable(); } private: - Stats::SymbolTable::StoragePtr addPrefix(const Stats::StatNameVec& names); + Stats::ElementVec addPrefix(const Stats::ElementVec& names); Stats::Scope& scope_; Stats::StatNameSetPtr stat_name_set_; diff --git a/source/extensions/filters/http/ext_authz/BUILD b/source/extensions/filters/http/ext_authz/BUILD index 1af4bcf28687f..0d789c30c0489 100644 --- a/source/extensions/filters/http/ext_authz/BUILD +++ b/source/extensions/filters/http/ext_authz/BUILD @@ -1,16 +1,16 @@ -licenses(["notice"]) # Apache 2 - -# External authorization L7 HTTP filter -# Public docs: TODO(saumoh): Docs needed in docs/root/configuration/http_filters - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# External authorization L7 HTTP filter +# Public docs: TODO(saumoh): Docs needed in docs/root/configuration/http_filters + +envoy_extension_package() envoy_cc_library( name = "ext_authz", diff --git a/source/extensions/filters/http/ext_authz/config.cc b/source/extensions/filters/http/ext_authz/config.cc index 1d994268cbe84..f5808ee7fdf5a 100644 --- a/source/extensions/filters/http/ext_authz/config.cc +++ b/source/extensions/filters/http/ext_authz/config.cc @@ -37,7 +37,7 @@ Http::FilterFactoryCb ExtAuthzFilterConfig::createFilterFactoryFromProtoTyped( callback = [filter_config, client_config, &context](Http::FilterChainFactoryCallbacks& callbacks) { auto client = std::make_unique( - context.clusterManager(), client_config, context.timeSource()); + context.clusterManager(), client_config); callbacks.addStreamDecoderFilter(Http::StreamDecoderFilterSharedPtr{ std::make_shared(filter_config, std::move(client))}); }; @@ -46,13 +46,15 @@ Http::FilterFactoryCb ExtAuthzFilterConfig::createFilterFactoryFromProtoTyped( const uint32_t timeout_ms = PROTOBUF_GET_MS_OR_DEFAULT(proto_config.grpc_service(), timeout, DefaultTimeout); callback = [grpc_service = proto_config.grpc_service(), &context, filter_config, timeout_ms, + transport_api_version = proto_config.transport_api_version(), use_alpha = proto_config.hidden_envoy_deprecated_use_alpha()]( Http::FilterChainFactoryCallbacks& callbacks) { const auto async_client_factory = context.clusterManager().grpcAsyncClientManager().factoryForGrpcService( grpc_service, context.scope(), true); auto client = std::make_unique( - async_client_factory->create(), std::chrono::milliseconds(timeout_ms), use_alpha); + async_client_factory->create(), std::chrono::milliseconds(timeout_ms), + transport_api_version, use_alpha); callbacks.addStreamDecoderFilter(Http::StreamDecoderFilterSharedPtr{ std::make_shared(filter_config, std::move(client))}); }; diff --git a/source/extensions/filters/http/ext_authz/ext_authz.cc b/source/extensions/filters/http/ext_authz/ext_authz.cc index e3f754e078413..176c168d6e65d 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.cc +++ b/source/extensions/filters/http/ext_authz/ext_authz.cc @@ -49,7 +49,7 @@ void Filter::initiateCall(const Http::RequestHeaderMap& headers, context_extensions = maybe_merged_per_route_config.value().takeContextExtensions(); } - // If metadata_context_namespaces is specified, pass matching metadata to the ext_authz service + // If metadata_context_namespaces is specified, pass matching metadata to the ext_authz service. envoy::config::core::v3::Metadata metadata_context; const auto& request_metadata = callbacks_->streamInfo().dynamicMetadata().filter_metadata(); for (const auto& context_key : config_->metadataContextNamespaces()) { @@ -78,6 +78,17 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, skip_check_ = skipCheckForRoute(route); if (!config_->filterEnabled() || skip_check_) { + if (skip_check_) { + return Http::FilterHeadersStatus::Continue; + } + if (config_->denyAtDisable()) { + ENVOY_STREAM_LOG(trace, "ext_authz filter is disabled. Deny the request.", *callbacks_); + callbacks_->streamInfo().setResponseFlag( + StreamInfo::ResponseFlag::UnauthorizedExternalService); + callbacks_->sendLocalReply(config_->statusOnError(), EMPTY_STRING, nullptr, absl::nullopt, + RcDetails::get().AuthzError); + return Http::FilterHeadersStatus::StopIteration; + } return Http::FilterHeadersStatus::Continue; } @@ -155,21 +166,42 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { case CheckStatus::OK: { ENVOY_STREAM_LOG(trace, "ext_authz filter added header(s) to the request:", *callbacks_); if (config_->clearRouteCache() && - (!response->headers_to_add.empty() || !response->headers_to_append.empty())) { + (!response->headers_to_set.empty() || !response->headers_to_append.empty())) { ENVOY_STREAM_LOG(debug, "ext_authz is clearing route cache", *callbacks_); callbacks_->clearRouteCache(); } - for (const auto& header : response->headers_to_add) { + for (const auto& header : response->headers_to_set) { ENVOY_STREAM_LOG(trace, "'{}':'{}'", *callbacks_, header.first.get(), header.second); request_headers_->setCopy(header.first, header.second); } + for (const auto& header : response->headers_to_add) { + ENVOY_STREAM_LOG(trace, "'{}':'{}'", *callbacks_, header.first.get(), header.second); + request_headers_->addCopy(header.first, header.second); + } for (const auto& header : response->headers_to_append) { const Http::HeaderEntry* header_to_modify = request_headers_->get(header.first); - if (header_to_modify) { + // TODO(dio): Add a flag to allow appending non-existent headers, without setting it first + // (via `headers_to_add`). For example, given: + // 1. Original headers {"original": "true"} + // 2. Response headers from the authorization servers {{"append": "1"}, {"append": "2"}} + // + // Currently it is not possible to add {{"append": "1"}, {"append": "2"}} (the intended + // combined headers: {{"original": "true"}, {"append": "1"}, {"append": "2"}}) to the request + // to upstream server by only sets `headers_to_append`. + if (header_to_modify != nullptr) { ENVOY_STREAM_LOG(trace, "'{}':'{}'", *callbacks_, header.first.get(), header.second); + // The current behavior of appending is by combining entries with the same key, into one + // entry. The value of that combined entry is separated by ",". + // TODO(dio): Consider to use addCopy instead. request_headers_->appendCopy(header.first, header.second); } } + + if (!response->dynamic_metadata.fields().empty()) { + callbacks_->streamInfo().setDynamicMetadata(HttpFilterNames::get().ExtAuthorization, + response->dynamic_metadata); + } + if (cluster_) { config_->incCounter(cluster_->statsScope(), config_->ext_authz_ok_); } @@ -201,17 +233,17 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { callbacks_->sendLocalReply( response->status_code, response->body, - [& headers = response->headers_to_add, + [&headers = response->headers_to_set, &callbacks = *callbacks_](Http::HeaderMap& response_headers) -> void { ENVOY_STREAM_LOG(trace, "ext_authz filter added header(s) to the local response:", callbacks); - // First remove all headers requested by the ext_authz filter, - // to ensure that they will override existing headers + // Firstly, remove all headers requested by the ext_authz filter, to ensure that they will + // override existing headers. for (const auto& header : headers) { response_headers.remove(header.first); } - // Then set all of the requested headers, allowing the - // same header to be set multiple times, e.g. `Set-Cookie` + // Then set all of the requested headers, allowing the same header to be set multiple + // times, e.g. `Set-Cookie`. for (const auto& header : headers) { ENVOY_STREAM_LOG(trace, " '{}':'{}'", callbacks, header.first.get(), header.second); response_headers.addCopy(header.first, header.second); diff --git a/source/extensions/filters/http/ext_authz/ext_authz.h b/source/extensions/filters/http/ext_authz/ext_authz.h index 56ce8b5a30743..14b52ffd776ad 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.h +++ b/source/extensions/filters/http/ext_authz/ext_authz.h @@ -58,19 +58,22 @@ struct ExtAuthzFilterStats { class FilterConfig { public: FilterConfig(const envoy::extensions::filters::http::ext_authz::v3::ExtAuthz& config, - const LocalInfo::LocalInfo& local_info, Stats::Scope& scope, - Runtime::Loader& runtime, Http::Context& http_context, - const std::string& stats_prefix) + const LocalInfo::LocalInfo&, Stats::Scope& scope, Runtime::Loader& runtime, + Http::Context& http_context, const std::string& stats_prefix) : allow_partial_message_(config.with_request_body().allow_partial_message()), failure_mode_allow_(config.failure_mode_allow()), clear_route_cache_(config.clear_route_cache()), max_request_bytes_(config.with_request_body().max_request_bytes()), - status_on_error_(toErrorCode(config.status_on_error().code())), local_info_(local_info), - scope_(scope), runtime_(runtime), http_context_(http_context), + status_on_error_(toErrorCode(config.status_on_error().code())), scope_(scope), + runtime_(runtime), http_context_(http_context), filter_enabled_(config.has_filter_enabled() ? absl::optional( Runtime::FractionalPercent(config.filter_enabled(), runtime_)) : absl::nullopt), + deny_at_disable_(config.has_deny_at_disable() + ? absl::optional( + Runtime::FeatureFlag(config.deny_at_disable(), runtime_)) + : absl::nullopt), pool_(scope_.symbolTable()), metadata_context_namespaces_(config.metadata_context_namespaces().begin(), config.metadata_context_namespaces().end()), @@ -90,13 +93,13 @@ class FilterConfig { uint32_t maxRequestBytes() const { return max_request_bytes_; } - const LocalInfo::LocalInfo& localInfo() const { return local_info_; } - Http::Code statusOnError() const { return status_on_error_; } bool filterEnabled() { return filter_enabled_.has_value() ? filter_enabled_->enabled() : true; } - Runtime::Loader& runtime() { return runtime_; } + bool denyAtDisable() { + return deny_at_disable_.has_value() ? deny_at_disable_->enabled() : false; + } Stats::Scope& scope() { return scope_; } @@ -133,12 +136,12 @@ class FilterConfig { const bool clear_route_cache_; const uint32_t max_request_bytes_; const Http::Code status_on_error_; - const LocalInfo::LocalInfo& local_info_; Stats::Scope& scope_; Runtime::Loader& runtime_; Http::Context& http_context_; const absl::optional filter_enabled_; + const absl::optional deny_at_disable_; // TODO(nezdolik): stop using pool as part of deprecating cluster scope stats. Stats::StatNamePool pool_; diff --git a/source/extensions/filters/http/fault/BUILD b/source/extensions/filters/http/fault/BUILD index 3c6e1775235db..a518d60f37e13 100644 --- a/source/extensions/filters/http/fault/BUILD +++ b/source/extensions/filters/http/fault/BUILD @@ -1,16 +1,16 @@ -licenses(["notice"]) # Apache 2 - -# HTTP L7 filter that injects faults into the request flow -# Public docs: docs/root/configuration/http_filters/fault_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# HTTP L7 filter that injects faults into the request flow +# Public docs: docs/root/configuration/http_filters/fault_filter.rst + +envoy_extension_package() envoy_cc_library( name = "fault_filter_lib", @@ -33,6 +33,7 @@ envoy_cc_library( "//source/common/http:header_utility_lib", "//source/common/http:headers_lib", "//source/common/protobuf:utility_lib", + "//source/common/stats:utility_lib", "//source/extensions/filters/common/fault:fault_config_lib", "//source/extensions/filters/http:well_known_names", "@envoy_api//envoy/extensions/filters/http/fault/v3:pkg_cc_proto", diff --git a/source/extensions/filters/http/fault/fault_filter.cc b/source/extensions/filters/http/fault/fault_filter.cc index f3e277edfe6b7..245f44b98a562 100644 --- a/source/extensions/filters/http/fault/fault_filter.cc +++ b/source/extensions/filters/http/fault/fault_filter.cc @@ -19,6 +19,7 @@ #include "common/http/headers.h" #include "common/http/utility.h" #include "common/protobuf/utility.h" +#include "common/stats/utility.h" #include "extensions/filters/http/well_known_names.h" @@ -43,6 +44,8 @@ FaultSettings::FaultSettings(const envoy::extensions::filters::http::fault::v3:: RuntimeKeys::get().DelayDurationKey)), abort_http_status_runtime_(PROTOBUF_GET_STRING_OR_DEFAULT( fault, abort_http_status_runtime, RuntimeKeys::get().AbortHttpStatusKey)), + abort_grpc_status_runtime_(PROTOBUF_GET_STRING_OR_DEFAULT( + fault, abort_grpc_status_runtime, RuntimeKeys::get().AbortGrpcStatusKey)), max_active_faults_runtime_(PROTOBUF_GET_STRING_OR_DEFAULT( fault, max_active_faults_runtime, RuntimeKeys::get().MaxActiveFaultsKey)), response_rate_limit_percent_runtime_( @@ -85,9 +88,8 @@ FaultFilterConfig::FaultFilterConfig( stats_prefix_(stat_name_set_->add(absl::StrCat(stats_prefix, "fault"))) {} void FaultFilterConfig::incCounter(Stats::StatName downstream_cluster, Stats::StatName stat_name) { - Stats::SymbolTable::StoragePtr storage = - scope_.symbolTable().join({stats_prefix_, downstream_cluster, stat_name}); - scope_.counterFromStatName(Stats::StatName(storage.get())).inc(); + Stats::Utility::counterFromStatNames(scope_, {stats_prefix_, downstream_cluster, stat_name}) + .inc(); } FaultFilter::FaultFilter(FaultFilterConfigSharedPtr config) : config_(config) {} @@ -134,8 +136,7 @@ Http::FilterHeadersStatus FaultFilter::decodeHeaders(Http::RequestHeaderMap& hea } if (headers.EnvoyDownstreamServiceCluster()) { - downstream_cluster_ = - std::string(headers.EnvoyDownstreamServiceCluster()->value().getStringView()); + downstream_cluster_ = std::string(headers.getEnvoyDownstreamServiceClusterValue()); if (!downstream_cluster_.empty()) { downstream_cluster_storage_ = std::make_unique( downstream_cluster_, config_->scope().symbolTable()); @@ -149,6 +150,8 @@ Http::FilterHeadersStatus FaultFilter::decodeHeaders(Http::RequestHeaderMap& hea fmt::format("fault.http.{}.delay.fixed_duration_ms", downstream_cluster_); downstream_cluster_abort_http_status_key_ = fmt::format("fault.http.{}.abort.http_status", downstream_cluster_); + downstream_cluster_abort_grpc_status_key_ = + fmt::format("fault.http.{}.abort.grpc_status", downstream_cluster_); } maybeSetupResponseRateLimit(headers); @@ -164,9 +167,12 @@ Http::FilterHeadersStatus FaultFilter::decodeHeaders(Http::RequestHeaderMap& hea return Http::FilterHeadersStatus::StopIteration; } - const auto abort_code = abortHttpStatus(headers); - if (abort_code.has_value()) { - abortWithHTTPStatus(abort_code.value()); + absl::optional http_status; + absl::optional grpc_status; + std::tie(http_status, grpc_status) = abortStatus(headers); + + if (http_status.has_value()) { + abortWithStatus(http_status.value(), grpc_status); return Http::FilterHeadersStatus::StopIteration; } @@ -284,29 +290,64 @@ FaultFilter::delayDuration(const Http::RequestHeaderMap& request_headers) { return ret; } -absl::optional -FaultFilter::abortHttpStatus(const Http::RequestHeaderMap& request_headers) { +AbortHttpAndGrpcStatus FaultFilter::abortStatus(const Http::RequestHeaderMap& request_headers) { if (!isAbortEnabled(request_headers)) { - return absl::nullopt; + return AbortHttpAndGrpcStatus{absl::nullopt, absl::nullopt}; + } + + auto http_status = abortHttpStatus(request_headers); + // If http status code is set, then gRPC status won't be used. + if (http_status.has_value()) { + return AbortHttpAndGrpcStatus{http_status, absl::nullopt}; } + auto grpc_status = abortGrpcStatus(request_headers); + // If gRPC status code is set, then http status will be set to Http::Code::OK (200) + if (grpc_status.has_value()) { + return AbortHttpAndGrpcStatus{Http::Code::OK, grpc_status}; + } + + return AbortHttpAndGrpcStatus{absl::nullopt, absl::nullopt}; +} + +absl::optional +FaultFilter::abortHttpStatus(const Http::RequestHeaderMap& request_headers) { // See if the configured abort provider has a default status code, if not there is no abort status // code (e.g., header configuration and no/invalid header). - const auto config_abort = fault_settings_->requestAbort()->statusCode(&request_headers); - if (!config_abort.has_value()) { + auto http_status = fault_settings_->requestAbort()->httpStatusCode(&request_headers); + if (!http_status.has_value()) { return absl::nullopt; } - auto status_code = static_cast(config_abort.value()); - auto code = static_cast(config_->runtime().snapshot().getInteger( - fault_settings_->abortHttpStatusRuntime(), status_code)); + auto default_http_status_code = static_cast(http_status.value()); + auto runtime_http_status_code = config_->runtime().snapshot().getInteger( + fault_settings_->abortHttpStatusRuntime(), default_http_status_code); if (!downstream_cluster_abort_http_status_key_.empty()) { - code = static_cast(config_->runtime().snapshot().getInteger( - downstream_cluster_abort_http_status_key_, status_code)); + runtime_http_status_code = config_->runtime().snapshot().getInteger( + downstream_cluster_abort_http_status_key_, default_http_status_code); } - return code; + return static_cast(runtime_http_status_code); +} + +absl::optional +FaultFilter::abortGrpcStatus(const Http::RequestHeaderMap& request_headers) { + auto grpc_status = fault_settings_->requestAbort()->grpcStatusCode(&request_headers); + if (!grpc_status.has_value()) { + return absl::nullopt; + } + + auto default_grpc_status_code = static_cast(grpc_status.value()); + auto runtime_grpc_status_code = config_->runtime().snapshot().getInteger( + fault_settings_->abortGrpcStatusRuntime(), default_grpc_status_code); + + if (!downstream_cluster_abort_grpc_status_key_.empty()) { + runtime_grpc_status_code = config_->runtime().snapshot().getInteger( + downstream_cluster_abort_grpc_status_key_, default_grpc_status_code); + } + + return static_cast(runtime_grpc_status_code); } void FaultFilter::recordDelaysInjectedStats() { @@ -375,20 +416,24 @@ void FaultFilter::postDelayInjection(const Http::RequestHeaderMap& request_heade resetTimerState(); // Delays can be followed by aborts - const auto abort_code = abortHttpStatus(request_headers); - if (abort_code.has_value()) { - abortWithHTTPStatus(abort_code.value()); + absl::optional http_status; + absl::optional grpc_status; + std::tie(http_status, grpc_status) = abortStatus(request_headers); + + if (http_status.has_value()) { + abortWithStatus(http_status.value(), grpc_status); } else { // Continue request processing. decoder_callbacks_->continueDecoding(); } } -void FaultFilter::abortWithHTTPStatus(Http::Code abort_code) { +void FaultFilter::abortWithStatus(Http::Code http_status_code, + absl::optional grpc_status) { + recordAbortsInjectedStats(); decoder_callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::FaultInjected); - decoder_callbacks_->sendLocalReply(abort_code, "fault filter abort", nullptr, absl::nullopt, + decoder_callbacks_->sendLocalReply(http_status_code, "fault filter abort", nullptr, grpc_status, RcDetails::get().FaultAbort); - recordAbortsInjectedStats(); } bool FaultFilter::matchesTargetUpstreamCluster() { @@ -412,8 +457,7 @@ bool FaultFilter::matchesDownstreamNodes(const Http::RequestHeaderMap& headers) return false; } - const absl::string_view downstream_node = - headers.EnvoyDownstreamServiceNode()->value().getStringView(); + const absl::string_view downstream_node = headers.getEnvoyDownstreamServiceNodeValue(); return fault_settings_->downstreamNodes().find(downstream_node) != fault_settings_->downstreamNodes().end(); } @@ -456,7 +500,8 @@ StreamRateLimiter::StreamRateLimiter(uint64_t max_kbps, uint64_t max_buffered_da // ~63ms intervals. token_bucket_(SecondDivisor, time_source, SecondDivisor), token_timer_(dispatcher.createTimer([this] { onTokenTimer(); })), - buffer_(resume_data_cb, pause_data_cb) { + buffer_(resume_data_cb, pause_data_cb, + []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }) { ASSERT(bytes_per_time_slice_ > 0); ASSERT(max_buffered_data > 0); buffer_.setWatermarks(max_buffered_data); diff --git a/source/extensions/filters/http/fault/fault_filter.h b/source/extensions/filters/http/fault/fault_filter.h index bdbcbd9752821..206a8134c72cd 100644 --- a/source/extensions/filters/http/fault/fault_filter.h +++ b/source/extensions/filters/http/fault/fault_filter.h @@ -3,7 +3,6 @@ #include #include #include -#include #include #include "envoy/extensions/filters/http/fault/v3/fault.pb.h" @@ -67,6 +66,7 @@ class FaultSettings : public Router::RouteSpecificFilterConfig { const std::string& abortPercentRuntime() const { return abort_percent_runtime_; } const std::string& delayPercentRuntime() const { return delay_percent_runtime_; } const std::string& abortHttpStatusRuntime() const { return abort_http_status_runtime_; } + const std::string& abortGrpcStatusRuntime() const { return abort_grpc_status_runtime_; } const std::string& delayDurationRuntime() const { return delay_duration_runtime_; } const std::string& maxActiveFaultsRuntime() const { return max_active_faults_runtime_; } const std::string& responseRateLimitPercentRuntime() const { @@ -80,6 +80,7 @@ class FaultSettings : public Router::RouteSpecificFilterConfig { const std::string AbortPercentKey = "fault.http.abort.abort_percent"; const std::string DelayDurationKey = "fault.http.delay.fixed_duration_ms"; const std::string AbortHttpStatusKey = "fault.http.abort.http_status"; + const std::string AbortGrpcStatusKey = "fault.http.abort.grpc_status"; const std::string MaxActiveFaultsKey = "fault.http.max_active_faults"; const std::string ResponseRateLimitPercentKey = "fault.http.rate_limit.response_percent"; }; @@ -98,6 +99,7 @@ class FaultSettings : public Router::RouteSpecificFilterConfig { const std::string abort_percent_runtime_; const std::string delay_duration_runtime_; const std::string abort_http_status_runtime_; + const std::string abort_grpc_status_runtime_; const std::string max_active_faults_runtime_; const std::string response_rate_limit_percent_runtime_; }; @@ -203,6 +205,8 @@ class StreamRateLimiter : Logger::Loggable { Buffer::WatermarkBuffer buffer_; }; +using AbortHttpAndGrpcStatus = + std::pair, absl::optional>; /** * A filter that is capable of faulting an entire request before dispatching it upstream. */ @@ -245,7 +249,8 @@ class FaultFilter : public Http::StreamFilter, Logger::Loggable grpc_status_code); bool matchesTargetUpstreamCluster(); bool matchesDownstreamNodes(const Http::RequestHeaderMap& headers); bool isAbortEnabled(const Http::RequestHeaderMap& request_headers); @@ -253,7 +258,10 @@ class FaultFilter : public Http::StreamFilter, Logger::Loggable delayDuration(const Http::RequestHeaderMap& request_headers); + AbortHttpAndGrpcStatus abortStatus(const Http::RequestHeaderMap& request_headers); absl::optional abortHttpStatus(const Http::RequestHeaderMap& request_headers); + absl::optional + abortGrpcStatus(const Http::RequestHeaderMap& request_headers); void maybeIncActiveFaults(); void maybeSetupResponseRateLimit(const Http::RequestHeaderMap& request_headers); @@ -270,6 +278,7 @@ class FaultFilter : public Http::StreamFilter, Logger::Loggable + accept_handle(Http::CustomHeaders::get().Accept); + struct RcDetailsValues { // The gRPC HTTP/1 reverse bridge failed because the body payload was too // small to be a gRPC frame. @@ -43,21 +46,23 @@ std::string badContentTypeMessage(const Http::ResponseHeaderMap& headers) { if (headers.ContentType() != nullptr) { return fmt::format( "envoy reverse bridge: upstream responded with unsupported content-type {}, status code {}", - headers.ContentType()->value().getStringView(), headers.Status()->value().getStringView()); + headers.getContentTypeValue(), headers.getStatusValue()); } else { return fmt::format( "envoy reverse bridge: upstream responded with no content-type header, status code {}", - headers.Status()->value().getStringView()); + headers.getStatusValue()); } } void adjustContentLength(Http::RequestOrResponseHeaderMap& headers, const std::function& adjustment) { - auto length_header = headers.ContentLength(); - if (length_header != nullptr) { + auto length_header = headers.getContentLengthValue(); + if (!length_header.empty()) { uint64_t length; - if (absl::SimpleAtoi(length_header->value().getStringView(), &length)) { - headers.setContentLength(adjustment(length)); + if (absl::SimpleAtoi(length_header, &length)) { + if (length != 0) { + headers.setContentLength(adjustment(length)); + } } } } @@ -84,14 +89,14 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, // If this is a gRPC request we: // - mark this request as being gRPC // - change the content-type to application/x-protobuf - if (Envoy::Grpc::Common::hasGrpcContentType(headers)) { + if (Envoy::Grpc::Common::isGrpcRequestHeaders(headers)) { enabled_ = true; // We keep track of the original content-type to ensure that we handle // gRPC content type variations such as application/grpc+proto. - content_type_ = std::string(headers.ContentType()->value().getStringView()); + content_type_ = std::string(headers.getContentTypeValue()); headers.setContentType(upstream_content_type_); - headers.setAccept(upstream_content_type_); + headers.setInline(accept_handle.handle(), upstream_content_type_); if (withhold_grpc_frames_) { // Adjust the content-length header to account for us removing the gRPC frame header. @@ -126,17 +131,16 @@ Http::FilterDataStatus Filter::decodeData(Buffer::Instance& buffer, bool) { Http::FilterHeadersStatus Filter::encodeHeaders(Http::ResponseHeaderMap& headers, bool) { if (enabled_) { - auto content_type = headers.ContentType(); + absl::string_view content_type = headers.getContentTypeValue(); // If the response from upstream does not have the correct content-type, // perform an early return with a useful error message in grpc-message. - if (content_type == nullptr || - content_type->value().getStringView() != upstream_content_type_) { + if (content_type != upstream_content_type_) { headers.setGrpcMessage(badContentTypeMessage(headers)); headers.setGrpcStatus(Envoy::Grpc::Status::WellKnownGrpcStatus::Unknown); headers.setStatus(enumToInt(Http::Code::OK)); - if (content_type != nullptr) { + if (!content_type.empty()) { headers.setContentType(content_type_); } @@ -191,6 +195,10 @@ Http::FilterDataStatus Filter::encodeData(Buffer::Instance& buffer, bool end_str } Http::FilterTrailersStatus Filter::encodeTrailers(Http::ResponseTrailerMap& trailers) { + if (!enabled_) { + return Http::FilterTrailersStatus::Continue; + } + trailers.setGrpcStatus(grpc_status_); if (withhold_grpc_frames_) { diff --git a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.h b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.h index 8a518783bf5d6..12707aac9f6c4 100644 --- a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.h +++ b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include "envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.pb.h" @@ -48,6 +49,8 @@ class Filter : public Envoy::Http::PassThroughFilter { Buffer::OwnedImpl buffer_{}; }; +using FilterPtr = std::unique_ptr; + class FilterConfigPerRoute : public Router::RouteSpecificFilterConfig { public: FilterConfigPerRoute( diff --git a/source/extensions/filters/http/grpc_json_transcoder/BUILD b/source/extensions/filters/http/grpc_json_transcoder/BUILD index ca2ce1749d40a..88429fc0bfc71 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/BUILD +++ b/source/extensions/filters/http/grpc_json_transcoder/BUILD @@ -1,16 +1,16 @@ -licenses(["notice"]) # Apache 2 - -# L7 HTTP filter that implements binary gRPC to JSON transcoding -# Public docs: docs/root/configuration/http_filters/grpc_json_transcoder_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# L7 HTTP filter that implements binary gRPC to JSON transcoding +# Public docs: docs/root/configuration/http_filters/grpc_json_transcoder_filter.rst + +envoy_extension_package() envoy_cc_library( name = "json_transcoder_filter_lib", diff --git a/source/extensions/filters/http/grpc_json_transcoder/http_body_utils.cc b/source/extensions/filters/http/grpc_json_transcoder/http_body_utils.cc index e516a7f2d567e..5e55b57482c8a 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/http_body_utils.cc +++ b/source/extensions/filters/http/grpc_json_transcoder/http_body_utils.cc @@ -2,23 +2,70 @@ #include "google/api/httpbody.pb.h" +using Envoy::Protobuf::io::CodedInputStream; using Envoy::Protobuf::io::CodedOutputStream; using Envoy::Protobuf::io::StringOutputStream; +using Envoy::Protobuf::io::ZeroCopyInputStream; namespace Envoy { namespace Extensions { namespace HttpFilters { namespace GrpcJsonTranscoder { +namespace { + +// Embedded messages are treated the same way as strings (wire type 2). +constexpr uint32_t ProtobufLengthDelimitedField = 2; + +bool parseMessageByFieldPath(CodedInputStream* input, + absl::Span field_path, + Protobuf::Message* message) { + if (field_path.empty()) { + return message->MergeFromCodedStream(input); + } + + const uint32_t expected_tag = (field_path.front()->number() << 3) | ProtobufLengthDelimitedField; + for (;;) { + const uint32_t tag = input->ReadTag(); + if (tag == expected_tag) { + uint32_t length = 0; + if (!input->ReadVarint32(&length)) { + return false; + } + auto limit = input->IncrementRecursionDepthAndPushLimit(length); + if (!parseMessageByFieldPath(input, field_path.subspan(1), message)) { + return false; + } + if (!input->DecrementRecursionDepthAndPopLimit(limit.first)) { + return false; + } + } else if (tag == 0) { + return true; + } else { + if (!Protobuf::internal::WireFormatLite::SkipField(input, tag)) { + return false; + } + } + } +} +} // namespace + +bool HttpBodyUtils::parseMessageByFieldPath(ZeroCopyInputStream* stream, + const std::vector& field_path, + Protobuf::Message* message) { + CodedInputStream input(stream); + input.SetRecursionLimit(field_path.size()); + + return GrpcJsonTranscoder::parseMessageByFieldPath(&input, absl::MakeConstSpan(field_path), + message); +} + void HttpBodyUtils::appendHttpBodyEnvelope( Buffer::Instance& output, const std::vector& request_body_field_path, std::string content_type, uint64_t content_length) { // Manually encode the protobuf envelope for the body. // See https://developers.google.com/protocol-buffers/docs/encoding#embedded for wire format. - // Embedded messages are treated the same way as strings (wire type 2). - constexpr uint32_t ProtobufLengthDelimitedField = 2; - std::string proto_envelope; { // For memory safety, the StringOutputStream needs to be destroyed before diff --git a/source/extensions/filters/http/grpc_json_transcoder/http_body_utils.h b/source/extensions/filters/http/grpc_json_transcoder/http_body_utils.h index dc2af9c3859b4..629af665a0694 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/http_body_utils.h +++ b/source/extensions/filters/http/grpc_json_transcoder/http_body_utils.h @@ -13,6 +13,9 @@ namespace GrpcJsonTranscoder { class HttpBodyUtils { public: + static bool parseMessageByFieldPath(Protobuf::io::ZeroCopyInputStream* stream, + const std::vector& field_path, + Protobuf::Message* message); static void appendHttpBodyEnvelope(Buffer::Instance& output, const std::vector& request_body_field_path, diff --git a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc index 1e0b67aa654f0..e2998a3f58661 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc +++ b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc @@ -31,14 +31,19 @@ using Envoy::ProtobufUtil::Status; using Envoy::ProtobufUtil::error::Code; using google::api::HttpRule; using google::grpc::transcoding::JsonRequestTranslator; +using JsonRequestTranslatorPtr = std::unique_ptr; using google::grpc::transcoding::MessageStream; using google::grpc::transcoding::PathMatcherBuilder; using google::grpc::transcoding::PathMatcherUtility; using google::grpc::transcoding::RequestInfo; using google::grpc::transcoding::RequestMessageTranslator; +using RequestMessageTranslatorPtr = std::unique_ptr; using google::grpc::transcoding::ResponseToJsonTranslator; +using ResponseToJsonTranslatorPtr = std::unique_ptr; using google::grpc::transcoding::Transcoder; +using TranscoderPtr = std::unique_ptr; using google::grpc::transcoding::TranscoderInputStream; +using TranscoderInputStreamPtr = std::unique_ptr; namespace Envoy { namespace Extensions { @@ -71,9 +76,9 @@ class TranscoderImpl : public Transcoder { * @param request_translator a JsonRequestTranslator that does the request translation * @param response_translator a ResponseToJsonTranslator that does the response translation */ - TranscoderImpl(std::unique_ptr request_translator, - std::unique_ptr json_request_translator, - std::unique_ptr response_translator) + TranscoderImpl(RequestMessageTranslatorPtr request_translator, + JsonRequestTranslatorPtr json_request_translator, + ResponseToJsonTranslatorPtr response_translator) : request_translator_(std::move(request_translator)), json_request_translator_(std::move(json_request_translator)), request_message_stream_(request_translator_ ? *request_translator_ @@ -92,12 +97,12 @@ class TranscoderImpl : public Transcoder { ProtobufUtil::Status ResponseStatus() override { return response_translator_->Status(); } private: - std::unique_ptr request_translator_; - std::unique_ptr json_request_translator_; + RequestMessageTranslatorPtr request_translator_; + JsonRequestTranslatorPtr json_request_translator_; MessageStream& request_message_stream_; - std::unique_ptr response_translator_; - std::unique_ptr request_stream_; - std::unique_ptr response_stream_; + ResponseToJsonTranslatorPtr response_translator_; + TranscoderInputStreamPtr request_stream_; + TranscoderInputStreamPtr response_stream_; }; } // namespace @@ -141,7 +146,11 @@ JsonTranscoderConfig::JsonTranscoderConfig( &descriptor_pool_)); PathMatcherBuilder pmb; + // clang-format off + // We cannot convert this to a absl hash set as PathMatcherUtility::RegisterByHttpRule takes a + // std::unordered_set as an argument std::unordered_set ignored_query_parameters; + // clang-format on for (const auto& query_param : proto_config.ignored_query_parameters()) { ignored_query_parameters.insert(query_param); } @@ -213,37 +222,59 @@ void JsonTranscoderConfig::addBuiltinSymbolDescriptor(const std::string& symbol_ addFileDescriptor(file_proto); } +Status JsonTranscoderConfig::resolveField(const Protobuf::Descriptor* descriptor, + const std::string& field_path_str, + std::vector* field_path, + bool* is_http_body) { + const Protobuf::Type* message_type = + type_helper_->Info()->GetTypeByTypeUrl(Grpc::Common::typeUrl(descriptor->full_name())); + if (message_type == nullptr) { + return ProtobufUtil::Status(Code::NOT_FOUND, + "Could not resolve type: " + descriptor->full_name()); + } + + Status status = type_helper_->ResolveFieldPath( + *message_type, field_path_str == "*" ? "" : field_path_str, field_path); + if (!status.ok()) { + return status; + } + + if (field_path->empty()) { + *is_http_body = descriptor->full_name() == google::api::HttpBody::descriptor()->full_name(); + } else { + const Protobuf::Type* body_type = + type_helper_->Info()->GetTypeByTypeUrl(field_path->back()->type_url()); + *is_http_body = body_type != nullptr && + body_type->name() == google::api::HttpBody::descriptor()->full_name(); + } + return Status::OK; +} + Status JsonTranscoderConfig::createMethodInfo(const Protobuf::MethodDescriptor* descriptor, const HttpRule& http_rule, MethodInfoSharedPtr& method_info) { method_info = std::make_shared(); method_info->descriptor_ = descriptor; - method_info->response_type_is_http_body_ = - descriptor->output_type()->full_name() == google::api::HttpBody::descriptor()->full_name(); - const Protobuf::Type* request_type = type_helper_->Info()->GetTypeByTypeUrl( - Grpc::Common::typeUrl(descriptor->input_type()->full_name())); - if (request_type == nullptr) { - return ProtobufUtil::Status(Code::NOT_FOUND, - "Could not resolve type: " + descriptor->input_type()->full_name()); + Status status = + resolveField(descriptor->input_type(), http_rule.body(), + &method_info->request_body_field_path, &method_info->request_type_is_http_body_); + if (!status.ok()) { + return status; } - Status status = - type_helper_->ResolveFieldPath(*request_type, http_rule.body() == "*" ? "" : http_rule.body(), - &method_info->request_body_field_path); + status = resolveField(descriptor->output_type(), http_rule.response_body(), + &method_info->response_body_field_path, + &method_info->response_type_is_http_body_); if (!status.ok()) { return status; } - if (method_info->request_body_field_path.empty()) { - method_info->request_type_is_http_body_ = - descriptor->input_type()->full_name() == google::api::HttpBody::descriptor()->full_name(); - } else { - const Protobuf::Type* body_type = type_helper_->Info()->GetTypeByTypeUrl( - method_info->request_body_field_path.back()->type_url()); - method_info->request_type_is_http_body_ = - body_type != nullptr && - body_type->name() == google::api::HttpBody::descriptor()->full_name(); + if (!method_info->response_body_field_path.empty() && !method_info->response_type_is_http_body_) { + // TODO(euroelessar): Implement https://github.com/envoyproxy/envoy/issues/11136. + return Status(Code::UNIMPLEMENTED, + "Setting \"response_body\" is not supported yet for non-HttpBody fields: " + + descriptor->full_name()); } return Status::OK; @@ -257,14 +288,14 @@ bool JsonTranscoderConfig::convertGrpcStatus() const { return convert_grpc_statu ProtobufUtil::Status JsonTranscoderConfig::createTranscoder( const Http::RequestHeaderMap& headers, ZeroCopyInputStream& request_input, - google::grpc::transcoding::TranscoderInputStream& response_input, - std::unique_ptr& transcoder, MethodInfoSharedPtr& method_info) { - if (Grpc::Common::hasGrpcContentType(headers)) { + google::grpc::transcoding::TranscoderInputStream& response_input, TranscoderPtr& transcoder, + MethodInfoSharedPtr& method_info) { + if (Grpc::Common::isGrpcRequestHeaders(headers)) { return ProtobufUtil::Status(Code::INVALID_ARGUMENT, "Request headers has application/grpc content-type"); } - const std::string method(headers.Method()->value().getStringView()); - std::string path(headers.Path()->value().getStringView()); + const std::string method(headers.getMethodValue()); + std::string path(headers.getPathValue()); std::string args; const size_t pos = path.find('?'); @@ -302,8 +333,8 @@ ProtobufUtil::Status JsonTranscoderConfig::createTranscoder( request_info.variable_bindings.emplace_back(std::move(resolved_binding)); } - std::unique_ptr request_translator; - std::unique_ptr json_request_translator; + RequestMessageTranslatorPtr request_translator; + JsonRequestTranslatorPtr json_request_translator; if (method_info->request_type_is_http_body_) { request_translator = std::make_unique(*type_helper_->Resolver(), false, std::move(request_info)); @@ -316,7 +347,7 @@ ProtobufUtil::Status JsonTranscoderConfig::createTranscoder( const auto response_type_url = Grpc::Common::typeUrl(method_info->descriptor_->output_type()->full_name()); - std::unique_ptr response_translator{new ResponseToJsonTranslator( + ResponseToJsonTranslatorPtr response_translator{new ResponseToJsonTranslator( type_helper_->Resolver(), response_type_url, method_info->descriptor_->server_streaming(), &response_input, print_options_)}; @@ -364,7 +395,7 @@ Http::FilterHeadersStatus JsonTranscoderFilter::decodeHeaders(Http::RequestHeade if (method_->request_type_is_http_body_) { if (headers.ContentType() != nullptr) { - absl::string_view content_type = headers.ContentType()->value().getStringView(); + absl::string_view content_type = headers.getContentTypeValue(); content_type_.assign(content_type.begin(), content_type.end()); } @@ -386,7 +417,8 @@ Http::FilterHeadersStatus JsonTranscoderFilter::decodeHeaders(Http::RequestHeade headers.removeContentLength(); headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Grpc); - headers.setEnvoyOriginalPath(headers.Path()->value().getStringView()); + headers.setEnvoyOriginalPath(headers.getPathValue()); + headers.addReferenceKey(Http::Headers::get().EnvoyOriginalMethod, headers.getMethodValue()); headers.setPath("/" + method_->descriptor_->service()->full_name() + "/" + method_->descriptor_->name()); headers.setReferenceMethod(Http::Headers::get().MethodValues.Post); @@ -476,7 +508,7 @@ void JsonTranscoderFilter::setDecoderFilterCallbacks( Http::FilterHeadersStatus JsonTranscoderFilter::encodeHeaders(Http::ResponseHeaderMap& headers, bool end_stream) { - if (!Grpc::Common::isGrpcResponseHeader(headers, end_stream)) { + if (!Grpc::Common::isGrpcResponseHeaders(headers, end_stream)) { error_ = true; } @@ -519,10 +551,13 @@ Http::FilterDataStatus JsonTranscoderFilter::encodeData(Buffer::Instance& data, has_body_ = true; if (method_->response_type_is_http_body_) { - buildResponseFromHttpBodyOutput(*response_headers_, data); + bool frame_processed = buildResponseFromHttpBodyOutput(*response_headers_, data); if (!method_->descriptor_->server_streaming()) { return Http::FilterDataStatus::StopIterationAndBuffer; } + if (!http_body_response_headers_set_ && !frame_processed) { + return Http::FilterDataStatus::StopIterationAndBuffer; + } return Http::FilterDataStatus::Continue; } @@ -563,28 +598,24 @@ void JsonTranscoderFilter::doTrailers(Http::ResponseHeaderOrTrailerMap& headers_ return; } - if (method_->response_type_is_http_body_ && method_->descriptor_->server_streaming()) { - // Do not add empty json when HttpBody + streaming - // Also, headers already sent, just continue. - return; + if (!method_->response_type_is_http_body_) { + Buffer::OwnedImpl data; + readToBuffer(*transcoder_->ResponseOutput(), data); + if (data.length()) { + encoder_callbacks_->addEncodedData(data, true); + } } - Buffer::OwnedImpl data; - readToBuffer(*transcoder_->ResponseOutput(), data); - - if (data.length()) { - encoder_callbacks_->addEncodedData(data, true); - } + // If there was no previous headers frame, this |trailers| map is our |response_headers_|, + // so there is no need to copy headers from one to the other. + const bool is_trailers_only_response = response_headers_ == &headers_or_trailers; + const bool is_server_streaming = method_->descriptor_->server_streaming(); - if (method_->descriptor_->server_streaming()) { - // For streaming case, the headers are already sent, so just continue here. + if (is_server_streaming && !is_trailers_only_response) { + // Continue if headers were sent already. return; } - // If there was no previous headers frame, this |trailers| map is our |response_headers_|, - // so there is no need to copy headers from one to the other. - bool is_trailers_only_response = response_headers_ == &headers_or_trailers; - if (!grpc_status || grpc_status.value() == Grpc::Status::WellKnownGrpcStatus::InvalidCode) { response_headers_->setStatus(enumToInt(Http::Code::ServiceUnavailable)); } else { @@ -607,8 +638,11 @@ void JsonTranscoderFilter::doTrailers(Http::ResponseHeaderOrTrailerMap& headers_ response_headers_->remove(trailerHeader()); } - response_headers_->setContentLength( - encoder_callbacks_->encodingBuffer() ? encoder_callbacks_->encodingBuffer()->length() : 0); + if (!method_->descriptor_->server_streaming()) { + // Set content-length for non-streaming responses. + response_headers_->setContentLength( + encoder_callbacks_->encodingBuffer() ? encoder_callbacks_->encodingBuffer()->length() : 0); + } } void JsonTranscoderFilter::setEncoderFilterCallbacks( @@ -667,19 +701,25 @@ void JsonTranscoderFilter::maybeSendHttpBodyRequestMessage() { first_request_sent_ = true; } -void JsonTranscoderFilter::buildResponseFromHttpBodyOutput( +bool JsonTranscoderFilter::buildResponseFromHttpBodyOutput( Http::ResponseHeaderMap& response_headers, Buffer::Instance& data) { std::vector frames; decoder_.decode(data, frames); if (frames.empty()) { - return; + return false; } google::api::HttpBody http_body; for (auto& frame : frames) { if (frame.length_ > 0) { + http_body.Clear(); Buffer::ZeroCopyInputStreamImpl stream(std::move(frame.data_)); - http_body.ParseFromZeroCopyStream(&stream); + if (!HttpBodyUtils::parseMessageByFieldPath(&stream, method_->response_body_field_path, + &http_body)) { + // TODO(euroelessar): Return error to client. + encoder_callbacks_->resetStream(); + return true; + } const auto& body = http_body.data(); data.add(body); @@ -688,14 +728,16 @@ void JsonTranscoderFilter::buildResponseFromHttpBodyOutput( // Non streaming case: single message with content type / length response_headers.setContentType(http_body.content_type()); response_headers.setContentLength(body.size()); + return true; } else if (!http_body_response_headers_set_) { // Streaming case: set content type only once from first HttpBody message response_headers.setContentType(http_body.content_type()); http_body_response_headers_set_ = true; } - return; } } + + return true; } bool JsonTranscoderFilter::maybeConvertGrpcStatus(Grpc::Status::GrpcStatus grpc_status, @@ -714,7 +756,10 @@ bool JsonTranscoderFilter::maybeConvertGrpcStatus(Grpc::Status::GrpcStatus grpc_ return false; } - auto status_details = Grpc::Common::getGrpcStatusDetailsBin(trailers); + // TODO(mattklein123): The dynamic cast here is needed because ResponseHeaderOrTrailerMap is not + // a header map. This can likely be cleaned up. + auto status_details = + Grpc::Common::getGrpcStatusDetailsBin(dynamic_cast(trailers)); if (!status_details) { // If no rpc.Status object was sent in the grpc-status-details-bin header, // construct it from the grpc-status and grpc-message headers. diff --git a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h index 5c271dafde243..a0fabc85bfdde 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h +++ b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h @@ -44,6 +44,7 @@ struct VariableBinding { struct MethodInfo { const Protobuf::MethodDescriptor* descriptor_ = nullptr; std::vector request_body_field_path; + std::vector response_body_field_path; bool request_type_is_http_body_ = false; bool response_type_is_http_body_ = false; }; @@ -112,6 +113,10 @@ class JsonTranscoderConfig : public Logger::Loggable { private: void addFileDescriptor(const Protobuf::FileDescriptorProto& file); void addBuiltinSymbolDescriptor(const std::string& symbol_name); + ProtobufUtil::Status resolveField(const Protobuf::Descriptor* descriptor, + const std::string& field_path_str, + std::vector* field_path, + bool* is_http_body); ProtobufUtil::Status createMethodInfo(const Protobuf::MethodDescriptor* descriptor, const google::api::HttpRule& http_rule, MethodInfoSharedPtr& method_info); @@ -162,7 +167,11 @@ class JsonTranscoderFilter : public Http::StreamFilter, public Logger::Loggable< bool checkIfTranscoderFailed(const std::string& details); bool readToBuffer(Protobuf::io::ZeroCopyInputStream& stream, Buffer::Instance& data); void maybeSendHttpBodyRequestMessage(); - void buildResponseFromHttpBodyOutput(Http::ResponseHeaderMap& response_headers, + /** + * Builds response from HttpBody protobuf. + * Returns true if at least one gRPC frame has processed. + */ + bool buildResponseFromHttpBodyOutput(Http::ResponseHeaderMap& response_headers, Buffer::Instance& data); bool maybeConvertGrpcStatus(Grpc::Status::GrpcStatus grpc_status, Http::ResponseHeaderOrTrailerMap& trailers); diff --git a/source/extensions/filters/http/grpc_stats/BUILD b/source/extensions/filters/http/grpc_stats/BUILD index 171e49afd3200..ac38af9751369 100644 --- a/source/extensions/filters/http/grpc_stats/BUILD +++ b/source/extensions/filters/http/grpc_stats/BUILD @@ -1,14 +1,14 @@ -licenses(["notice"]) # Apache 2 - -# L7 HTTP filter that implements gRPC telemetry - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# L7 HTTP filter that implements gRPC telemetry + +envoy_extension_package() envoy_cc_extension( name = "config", diff --git a/source/extensions/filters/http/grpc_stats/grpc_stats_filter.cc b/source/extensions/filters/http/grpc_stats/grpc_stats_filter.cc index 7c91093d47121..8dfbc0dc0dc48 100644 --- a/source/extensions/filters/http/grpc_stats/grpc_stats_filter.cc +++ b/source/extensions/filters/http/grpc_stats/grpc_stats_filter.cc @@ -147,7 +147,7 @@ class GrpcStatsFilter : public Http::PassThroughFilter { GrpcStatsFilter(ConfigConstSharedPtr config) : config_(config) {} Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, bool) override { - grpc_request_ = Grpc::Common::hasGrpcContentType(headers); + grpc_request_ = Grpc::Common::isGrpcRequestHeaders(headers); if (grpc_request_) { cluster_ = decoder_callbacks_->clusterInfo(); if (cluster_) { @@ -203,7 +203,7 @@ class GrpcStatsFilter : public Http::PassThroughFilter { Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& headers, bool end_stream) override { - grpc_response_ = Grpc::Common::isGrpcResponseHeader(headers, end_stream); + grpc_response_ = Grpc::Common::isGrpcResponseHeaders(headers, end_stream); if (doStatTracking()) { config_->context_.chargeStat(*cluster_, Grpc::Context::Protocol::Grpc, request_names_, headers.GrpcStatus()); diff --git a/source/extensions/filters/http/grpc_web/BUILD b/source/extensions/filters/http/grpc_web/BUILD index e1509bde3c50c..d18eb56ed01d0 100644 --- a/source/extensions/filters/http/grpc_web/BUILD +++ b/source/extensions/filters/http/grpc_web/BUILD @@ -1,16 +1,16 @@ -licenses(["notice"]) # Apache 2 - -# L7 HTTP filter that implements the grpc-web protocol (https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-WEB.md) -# Public docs: docs/root/configuration/http_filters/grpc_web_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# L7 HTTP filter that implements the grpc-web protocol (https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-WEB.md) +# Public docs: docs/root/configuration/http_filters/grpc_web_filter.rst + +envoy_extension_package() envoy_cc_library( name = "grpc_web_filter_lib", diff --git a/source/extensions/filters/http/grpc_web/grpc_web_filter.cc b/source/extensions/filters/http/grpc_web/grpc_web_filter.cc index 7ead4cd30d550..4f03c7e4950d1 100644 --- a/source/extensions/filters/http/grpc_web/grpc_web_filter.cc +++ b/source/extensions/filters/http/grpc_web/grpc_web_filter.cc @@ -17,6 +17,11 @@ namespace Extensions { namespace HttpFilters { namespace GrpcWeb { +Http::RegisterCustomInlineHeader + accept_handle(Http::CustomHeaders::get().Accept); +Http::RegisterCustomInlineHeader + grpc_accept_encoding_handle(Http::CustomHeaders::get().GrpcAcceptEncoding); + struct RcDetailsValues { // The grpc web filter couldn't decode the data as the size wasn't a multiple of 4. const std::string GrpcDecodeFailedDueToSize = "grpc_base_64_decode_failed_bad_size"; @@ -39,6 +44,9 @@ const absl::flat_hash_set& GrpcWebFilter::gRpcWebContentTypes() con } bool GrpcWebFilter::isGrpcWebRequest(const Http::RequestHeaderMap& headers) { + if (!headers.Path()) { + return false; + } const Http::HeaderEntry* content_type = headers.ContentType(); if (content_type != nullptr) { return gRpcWebContentTypes().count(content_type->value().getStringView()) > 0; @@ -49,7 +57,6 @@ bool GrpcWebFilter::isGrpcWebRequest(const Http::RequestHeaderMap& headers) { // Implements StreamDecoderFilter. // TODO(fengli): Implements the subtypes of gRPC-Web content-type other than proto, like +json, etc. Http::FilterHeadersStatus GrpcWebFilter::decodeHeaders(Http::RequestHeaderMap& headers, bool) { - const Http::HeaderEntry* content_type = headers.ContentType(); if (!isGrpcWebRequest(headers)) { return Http::FilterHeadersStatus::Continue; } @@ -61,20 +68,17 @@ Http::FilterHeadersStatus GrpcWebFilter::decodeHeaders(Http::RequestHeaderMap& h headers.removeContentLength(); setupStatTracking(headers); - if (content_type != nullptr && (Http::Headers::get().ContentTypeValues.GrpcWebText == - content_type->value().getStringView() || - Http::Headers::get().ContentTypeValues.GrpcWebTextProto == - content_type->value().getStringView())) { + const absl::string_view content_type = headers.getContentTypeValue(); + if (content_type == Http::Headers::get().ContentTypeValues.GrpcWebText || + content_type == Http::Headers::get().ContentTypeValues.GrpcWebTextProto) { // Checks whether gRPC-Web client is sending base64 encoded request. is_text_request_ = true; } headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Grpc); - const Http::HeaderEntry* accept = headers.Accept(); - if (accept != nullptr && - (Http::Headers::get().ContentTypeValues.GrpcWebText == accept->value().getStringView() || - Http::Headers::get().ContentTypeValues.GrpcWebTextProto == - accept->value().getStringView())) { + const absl::string_view accept = headers.getInlineValue(accept_handle.handle()); + if (accept == Http::Headers::get().ContentTypeValues.GrpcWebText || + accept == Http::Headers::get().ContentTypeValues.GrpcWebTextProto) { // Checks whether gRPC-Web client is asking for base64 encoded response. is_text_response_ = true; } @@ -82,7 +86,8 @@ Http::FilterHeadersStatus GrpcWebFilter::decodeHeaders(Http::RequestHeaderMap& h // Adds te:trailers to upstream HTTP2 request. It's required for gRPC. headers.setReferenceTE(Http::Headers::get().TEValues.Trailers); // Adds grpc-accept-encoding:identity,deflate,gzip. It's required for gRPC. - headers.setReferenceGrpcAcceptEncoding(Http::Headers::get().GrpcAcceptEncodingValues.Default); + headers.setReferenceInline(grpc_accept_encoding_handle.handle(), + Http::CustomHeaders::get().GrpcAcceptEncodingValues.Default); return Http::FilterHeadersStatus::Continue; } @@ -198,18 +203,15 @@ Http::FilterTrailersStatus GrpcWebFilter::encodeTrailers(Http::ResponseTrailerMa // Trailers are expected to come all in once, and will be encoded into one single trailers frame. // Trailers in the trailers frame are separated by CRLFs. Buffer::OwnedImpl temp; - trailers.iterate( - [](const Http::HeaderEntry& header, void* context) -> Http::HeaderMap::Iterate { - Buffer::Instance* temp = static_cast(context); - temp->add(header.key().getStringView().data(), header.key().size()); - temp->add(":"); - temp->add(header.value().getStringView().data(), header.value().size()); - temp->add("\r\n"); - return Http::HeaderMap::Iterate::Continue; - }, - &temp); - - // Clear out the trailers so they don't get added since it is now in the body + trailers.iterate([&temp](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + temp.add(header.key().getStringView().data(), header.key().size()); + temp.add(":"); + temp.add(header.value().getStringView().data(), header.value().size()); + temp.add("\r\n"); + return Http::HeaderMap::Iterate::Continue; + }); + + // Clears out the trailers so they don't get added since it is now in the body. trailers.clear(); Buffer::OwnedImpl buffer; // Adds the trailers frame head. diff --git a/source/extensions/filters/http/grpc_web/grpc_web_filter.h b/source/extensions/filters/http/grpc_web/grpc_web_filter.h index 7dfd54d51f48b..2ae3d2381fbf2 100644 --- a/source/extensions/filters/http/grpc_web/grpc_web_filter.h +++ b/source/extensions/filters/http/grpc_web/grpc_web_filter.h @@ -1,7 +1,5 @@ #pragma once -#include - #include "envoy/http/filter.h" #include "envoy/upstream/cluster_manager.h" diff --git a/source/extensions/filters/http/gzip/BUILD b/source/extensions/filters/http/gzip/BUILD index 5b1f7517b66f0..39b1459d45bef 100644 --- a/source/extensions/filters/http/gzip/BUILD +++ b/source/extensions/filters/http/gzip/BUILD @@ -1,26 +1,27 @@ -licenses(["notice"]) # Apache 2 - -# HTTP L7 filter that performs gzip compression -# Public docs: docs/root/configuration/http_filters/gzip_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# HTTP L7 filter that performs gzip compression +# Public docs: docs/root/configuration/http_filters/gzip_filter.rst + +envoy_extension_package() envoy_cc_library( name = "gzip_filter_lib", srcs = ["gzip_filter.cc"], hdrs = ["gzip_filter.h"], deps = [ - "//source/common/compressor:compressor_lib", "//source/common/http:headers_lib", "//source/common/protobuf", + "//source/extensions/compression/gzip/compressor:compressor_lib", "//source/extensions/filters/http/common/compressor:compressor_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/gzip/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/http/gzip/config.cc b/source/extensions/filters/http/gzip/config.cc index f8c577d67f344..d11d9279d5bef 100644 --- a/source/extensions/filters/http/gzip/config.cc +++ b/source/extensions/filters/http/gzip/config.cc @@ -10,6 +10,25 @@ namespace Gzip { Http::FilterFactoryCb GzipFilterFactory::createFilterFactoryFromProtoTyped( const envoy::extensions::filters::http::gzip::v3::Gzip& proto_config, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) { + // This will flip to false eventually. + const bool runtime_feature_default = true; + const char runtime_key[] = "envoy.deprecated_features.allow_deprecated_gzip_http_filter"; + const std::string warn_message = + "Using deprecated extension 'envoy.extensions.filters.http.gzip'. This " + "extension will be removed from Envoy soon. Please use " + "'envoy.extensions.filters.http.compressor' instead."; + + if (context.runtime().snapshot().deprecatedFeatureEnabled(runtime_key, runtime_feature_default)) { + ENVOY_LOG_MISC(warn, "{}", warn_message); + } else { + throw EnvoyException( + warn_message + + " If continued use of this extension is absolutely necessary, see " + "https://www.envoyproxy.io/docs/envoy/latest/configuration/operations/runtime" + "#using-runtime-overrides-for-deprecated-features for how to apply a temporary and " + "highly discouraged override."); + } + Common::Compressors::CompressorFilterConfigSharedPtr config = std::make_shared( proto_config, stats_prefix, context.scope(), context.runtime()); return [config](Http::FilterChainFactoryCallbacks& callbacks) -> void { diff --git a/source/extensions/filters/http/gzip/gzip_filter.cc b/source/extensions/filters/http/gzip/gzip_filter.cc index 846500cd6dab2..804c3b9bd337f 100644 --- a/source/extensions/filters/http/gzip/gzip_filter.cc +++ b/source/extensions/filters/http/gzip/gzip_filter.cc @@ -1,6 +1,9 @@ #include "extensions/filters/http/gzip/gzip_filter.h" +#include "envoy/config/core/v3/base.pb.h" + #include "common/http/headers.h" +#include "common/protobuf/message_validator_impl.h" #include "common/protobuf/protobuf.h" namespace Envoy { @@ -15,7 +18,8 @@ const uint64_t DefaultMemoryLevel = 5; // Default and maximum compression window size. const uint64_t DefaultWindowBits = 12; -// When summed to window bits, this sets a gzip header and trailer around the compressed data. +// When logical OR'ed to window bits, this sets a gzip header and trailer around the compressed +// data. const uint64_t GzipHeaderValue = 16; } // namespace @@ -24,41 +28,45 @@ GzipFilterConfig::GzipFilterConfig(const envoy::extensions::filters::http::gzip: const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime) : CompressorFilterConfig(compressorConfig(gzip), stats_prefix + "gzip.", scope, runtime, - Http::Headers::get().ContentEncodingValues.Gzip), + Http::CustomHeaders::get().ContentEncodingValues.Gzip), compression_level_(compressionLevelEnum(gzip.compression_level())), compression_strategy_(compressionStrategyEnum(gzip.compression_strategy())), memory_level_(memoryLevelUint(gzip.memory_level().value())), - window_bits_(windowBitsUint(gzip.window_bits().value())) {} + window_bits_(windowBitsUint(gzip.window_bits().value())), + chunk_size_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(gzip, chunk_size, 4096)) {} -std::unique_ptr GzipFilterConfig::makeCompressor() { - auto compressor = std::make_unique(); +Envoy::Compression::Compressor::CompressorPtr GzipFilterConfig::makeCompressor() { + auto compressor = + std::make_unique(chunk_size_); compressor->init(compressionLevel(), compressionStrategy(), windowBits(), memoryLevel()); return compressor; } -Compressor::ZlibCompressorImpl::CompressionLevel GzipFilterConfig::compressionLevelEnum( +Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel +GzipFilterConfig::compressionLevelEnum( envoy::extensions::filters::http::gzip::v3::Gzip::CompressionLevel::Enum compression_level) { switch (compression_level) { case envoy::extensions::filters::http::gzip::v3::Gzip::CompressionLevel::BEST: - return Compressor::ZlibCompressorImpl::CompressionLevel::Best; + return Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Best; case envoy::extensions::filters::http::gzip::v3::Gzip::CompressionLevel::SPEED: - return Compressor::ZlibCompressorImpl::CompressionLevel::Speed; + return Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Speed; default: - return Compressor::ZlibCompressorImpl::CompressionLevel::Standard; + return Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard; } } -Compressor::ZlibCompressorImpl::CompressionStrategy GzipFilterConfig::compressionStrategyEnum( +Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy +GzipFilterConfig::compressionStrategyEnum( envoy::extensions::filters::http::gzip::v3::Gzip::CompressionStrategy compression_strategy) { switch (compression_strategy) { case envoy::extensions::filters::http::gzip::v3::Gzip::RLE: - return Compressor::ZlibCompressorImpl::CompressionStrategy::Rle; + return Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Rle; case envoy::extensions::filters::http::gzip::v3::Gzip::FILTERED: - return Compressor::ZlibCompressorImpl::CompressionStrategy::Filtered; + return Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Filtered; case envoy::extensions::filters::http::gzip::v3::Gzip::HUFFMAN: - return Compressor::ZlibCompressorImpl::CompressionStrategy::Huffman; + return Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Huffman; default: - return Compressor::ZlibCompressorImpl::CompressionStrategy::Standard; + return Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard; } } @@ -78,8 +86,12 @@ GzipFilterConfig::compressorConfig(const envoy::extensions::filters::http::gzip: envoy::extensions::filters::http::compressor::v3::Compressor compressor = {}; if (gzip.has_hidden_envoy_deprecated_content_length()) { compressor.set_allocated_content_length( + // According to + // https://developers.google.com/protocol-buffers/docs/reference/cpp-generated#embeddedmessage + // the message Compressor takes ownership of the allocated Protobuf::Uint32Value object. new Protobuf::UInt32Value(gzip.hidden_envoy_deprecated_content_length())); } + // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) for (const std::string& ctype : gzip.hidden_envoy_deprecated_content_type()) { compressor.add_content_type(ctype); } diff --git a/source/extensions/filters/http/gzip/gzip_filter.h b/source/extensions/filters/http/gzip/gzip_filter.h index a7c6406c2dc4e..be30f081a043c 100644 --- a/source/extensions/filters/http/gzip/gzip_filter.h +++ b/source/extensions/filters/http/gzip/gzip_filter.h @@ -2,8 +2,7 @@ #include "envoy/extensions/filters/http/gzip/v3/gzip.pb.h" -#include "common/compressor/zlib_compressor_impl.h" - +#include "extensions/compression/gzip/compressor/zlib_compressor_impl.h" #include "extensions/filters/http/common/compressor/compressor.h" namespace Envoy { @@ -20,22 +19,25 @@ class GzipFilterConfig : public Common::Compressors::CompressorFilterConfig { GzipFilterConfig(const envoy::extensions::filters::http::gzip::v3::Gzip& gzip, const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime); - std::unique_ptr makeCompressor() override; + Envoy::Compression::Compressor::CompressorPtr makeCompressor() override; - Compressor::ZlibCompressorImpl::CompressionLevel compressionLevel() const { + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel compressionLevel() const { return compression_level_; } - Compressor::ZlibCompressorImpl::CompressionStrategy compressionStrategy() const { + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy + compressionStrategy() const { return compression_strategy_; } uint64_t memoryLevel() const { return memory_level_; } uint64_t windowBits() const { return window_bits_; } + uint32_t chunkSize() const { return chunk_size_; } private: - static Compressor::ZlibCompressorImpl::CompressionLevel compressionLevelEnum( + static Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel compressionLevelEnum( envoy::extensions::filters::http::gzip::v3::Gzip::CompressionLevel::Enum compression_level); - static Compressor::ZlibCompressorImpl::CompressionStrategy compressionStrategyEnum( + static Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy + compressionStrategyEnum( envoy::extensions::filters::http::gzip::v3::Gzip::CompressionStrategy compression_strategy); static uint64_t memoryLevelUint(Protobuf::uint32 level); @@ -45,11 +47,12 @@ class GzipFilterConfig : public Common::Compressors::CompressorFilterConfig { static const envoy::extensions::filters::http::compressor::v3::Compressor compressorConfig(const envoy::extensions::filters::http::gzip::v3::Gzip& gzip); - Compressor::ZlibCompressorImpl::CompressionLevel compression_level_; - Compressor::ZlibCompressorImpl::CompressionStrategy compression_strategy_; + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel compression_level_; + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy compression_strategy_; - int32_t memory_level_; - int32_t window_bits_; + const int32_t memory_level_; + const int32_t window_bits_; + const uint32_t chunk_size_; }; } // namespace Gzip diff --git a/source/extensions/filters/http/header_to_metadata/BUILD b/source/extensions/filters/http/header_to_metadata/BUILD index 1c9cbed21a849..1bbe574312e60 100644 --- a/source/extensions/filters/http/header_to_metadata/BUILD +++ b/source/extensions/filters/http/header_to_metadata/BUILD @@ -1,16 +1,16 @@ -licenses(["notice"]) # Apache 2 - -# HTTP L7 filter that transforms request data into dynamic metadata -# Public docs: docs/root/configuration/http_filters/header_to_metadata_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# HTTP L7 filter that transforms request data into dynamic metadata +# Public docs: docs/root/configuration/http_filters/header_to_metadata_filter.rst + +envoy_extension_package() envoy_cc_library( name = "header_to_metadata_filter_lib", @@ -19,6 +19,7 @@ envoy_cc_library( deps = [ "//include/envoy/server:filter_config_interface", "//source/common/common:base64_lib", + "//source/common/http:utility_lib", "//source/extensions/filters/http:well_known_names", "@envoy_api//envoy/extensions/filters/http/header_to_metadata/v3:pkg_cc_proto", ], diff --git a/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc b/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc index 3a5d2dc6725ff..350234f2fe367 100644 --- a/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc +++ b/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc @@ -3,7 +3,9 @@ #include "envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.pb.h" #include "common/common/base64.h" +#include "common/common/regex.h" #include "common/config/well_known_names.h" +#include "common/http/utility.h" #include "common/protobuf/protobuf.h" #include "extensions/filters/http/well_known_names.h" @@ -16,6 +18,77 @@ namespace Extensions { namespace HttpFilters { namespace HeaderToMetadataFilter { +// Extract the value of the header. +absl::optional HeaderValueSelector::extract(Http::HeaderMap& map) const { + const Http::HeaderEntry* header_entry = map.get(header_); + if (header_entry == nullptr) { + return absl::nullopt; + } + // Catch the value in the header before removing. + absl::optional value = std::string(header_entry->value().getStringView()); + if (remove_) { + map.remove(header_); + } + return value; +} + +// Extract the value of the key from the cookie header. +absl::optional CookieValueSelector::extract(Http::HeaderMap& map) const { + std::string value = Envoy::Http::Utility::parseCookieValue(map, cookie_); + if (!value.empty()) { + return absl::optional(std::move(value)); + } + return absl::nullopt; +} + +Rule::Rule(const ProtoRule& rule) : rule_(rule) { + // Ensure only one of header and cookie is specified. + // TODO(radha13): remove this once we are on v4 and these fields are folded into a oneof. + if (!rule.cookie().empty() && !rule.header().empty()) { + throw EnvoyException("Cannot specify both header and cookie"); + } + + // Initialize the shared pointer. + if (!rule.header().empty()) { + selector_ = + std::make_shared(Http::LowerCaseString(rule.header()), rule.remove()); + } else if (!rule.cookie().empty()) { + selector_ = std::make_shared(rule.cookie()); + } else { + throw EnvoyException("One of Cookie or Header option needs to be specified"); + } + + // Rule must have at least one of the `on_header_*` fields set. + if (!rule.has_on_header_present() && !rule.has_on_header_missing()) { + const auto& error = fmt::format("header to metadata filter: rule for {} has neither " + "`on_header_present` nor `on_header_missing` set", + selector_->toString()); + throw EnvoyException(error); + } + + // Ensure value and regex_value_rewrite are not mixed. + // TODO(rgs1): remove this once we are on v4 and these fields are folded into a oneof. + if (!rule.on_header_present().value().empty() && + rule.on_header_present().has_regex_value_rewrite()) { + throw EnvoyException("Cannot specify both value and regex_value_rewrite"); + } + + // Remove field is un-supported for cookie. + if (!rule.cookie().empty() && rule.remove()) { + throw EnvoyException("Cannot specify remove for cookie"); + } + + if (rule.has_on_header_missing() && rule.on_header_missing().value().empty()) { + throw EnvoyException("Cannot specify on_header_missing rule with an empty value"); + } + + if (rule.on_header_present().has_regex_value_rewrite()) { + const auto& rewrite_spec = rule.on_header_present().regex_value_rewrite(); + regex_rewrite_ = Regex::Utility::parseRegex(rewrite_spec.pattern()); + regex_rewrite_substitution_ = rewrite_spec.substitution(); + } +} + Config::Config(const envoy::extensions::filters::http::header_to_metadata::v3::Config config, const bool per_route) { request_set_ = Config::configToVector(config.request_rules(), request_rules_); @@ -39,17 +112,7 @@ bool Config::configToVector(const ProtobufRepeatedRule& proto_rules, } for (const auto& entry : proto_rules) { - std::pair rule = {Http::LowerCaseString(entry.header()), entry}; - - // Rule must have at least one of the `on_header_*` fields set. - if (!entry.has_on_header_present() && !entry.has_on_header_missing()) { - const auto& error = fmt::format("header to metadata filter: rule for header '{}' has neither " - "`on_header_present` nor `on_header_missing` set", - entry.header()); - throw EnvoyException(error); - } - - vector.push_back(rule); + vector.emplace_back(entry); } return true; @@ -89,15 +152,11 @@ void HeaderToMetadataFilter::setEncoderFilterCallbacks( } bool HeaderToMetadataFilter::addMetadata(StructMap& map, const std::string& meta_namespace, - const std::string& key, absl::string_view value, - ValueType type, ValueEncode encode) const { + const std::string& key, std::string value, ValueType type, + ValueEncode encode) const { ProtobufWkt::Value val; - if (value.empty()) { - // No value, skip. we could allow this though. - ENVOY_LOG(debug, "no metadata value provided"); - return false; - } + ASSERT(!value.empty()); if (value.size() >= MAX_HEADER_VALUE_LEN) { // Too long, go away. @@ -105,10 +164,9 @@ bool HeaderToMetadataFilter::addMetadata(StructMap& map, const std::string& meta return false; } - std::string decodedValue = std::string(value); if (encode == envoy::extensions::filters::http::header_to_metadata::v3::Config::BASE64) { - decodedValue = Base64::decodeWithoutPadding(value); - if (decodedValue.empty()) { + value = Base64::decodeWithoutPadding(value); + if (value.empty()) { ENVOY_LOG(debug, "Base64 decode failed"); return false; } @@ -117,11 +175,11 @@ bool HeaderToMetadataFilter::addMetadata(StructMap& map, const std::string& meta // Sane enough, add the key/value. switch (type) { case envoy::extensions::filters::http::header_to_metadata::v3::Config::STRING: - val.set_string_value(std::move(decodedValue)); + val.set_string_value(std::move(value)); break; case envoy::extensions::filters::http::header_to_metadata::v3::Config::NUMBER: { double dval; - if (absl::SimpleAtod(StringUtil::trim(decodedValue), &dval)) { + if (absl::SimpleAtod(StringUtil::trim(value), &dval)) { val.set_number_value(dval); } else { ENVOY_LOG(debug, "value to number conversion failed"); @@ -130,15 +188,14 @@ bool HeaderToMetadataFilter::addMetadata(StructMap& map, const std::string& meta break; } case envoy::extensions::filters::http::header_to_metadata::v3::Config::PROTOBUF_VALUE: { - if (!val.ParseFromString(decodedValue)) { + if (!val.ParseFromString(value)) { ENVOY_LOG(debug, "parse from decoded string failed"); return false; } break; } default: - ENVOY_LOG(debug, "unknown value type"); - return false; + NOT_REACHED_GCOVR_EXCL_LINE; } // Have we seen this namespace before? @@ -158,46 +215,42 @@ const std::string& HeaderToMetadataFilter::decideNamespace(const std::string& ns return nspace.empty() ? HttpFilterNames::get().HeaderToMetadata : nspace; } +// add metadata['key']= value depending on header present or missing case +void HeaderToMetadataFilter::applyKeyValue(std::string value, const Rule& rule, + const KeyValuePair& keyval, StructMap& np) { + if (!keyval.value().empty()) { + value = keyval.value(); + } else { + const auto& matcher = rule.regexRewrite(); + if (matcher != nullptr) { + value = matcher->replaceAll(value, rule.regexSubstitution()); + } + } + if (!value.empty()) { + const auto& nspace = decideNamespace(keyval.metadata_namespace()); + addMetadata(np, nspace, keyval.key(), value, keyval.type(), keyval.encode()); + } else { + ENVOY_LOG(debug, "value is empty, not adding metadata"); + } +} + void HeaderToMetadataFilter::writeHeaderToMetadata(Http::HeaderMap& headers, const HeaderToMetadataRules& rules, Http::StreamFilterCallbacks& callbacks) { StructMap structs_by_namespace; - for (const auto& rulePair : rules) { - const auto& header = rulePair.first; - const auto& rule = rulePair.second; - const Http::HeaderEntry* header_entry = headers.get(header); - - if (header_entry != nullptr && rule.has_on_header_present()) { - const auto& keyval = rule.on_header_present(); - absl::string_view value = keyval.value().empty() ? header_entry->value().getStringView() - : absl::string_view(keyval.value()); - - if (!value.empty()) { - const auto& nspace = decideNamespace(keyval.metadata_namespace()); - addMetadata(structs_by_namespace, nspace, keyval.key(), value, keyval.type(), - keyval.encode()); - } else { - ENVOY_LOG(debug, "value is empty, not adding metadata"); - } - - if (rule.remove()) { - headers.remove(header); - } - } else if (rule.has_on_header_missing()) { - // Add metadata for the header missing case. - const auto& keyval = rule.on_header_missing(); - - if (!keyval.value().empty()) { - const auto& nspace = decideNamespace(keyval.metadata_namespace()); - addMetadata(structs_by_namespace, nspace, keyval.key(), keyval.value(), keyval.type(), - keyval.encode()); - } else { - ENVOY_LOG(debug, "value is empty, not adding metadata"); - } + for (const auto& rule : rules) { + const auto& proto_rule = rule.rule(); + absl::optional value = rule.selector_->extract(headers); + + if (value && proto_rule.has_on_header_present()) { + applyKeyValue(std::move(value).value_or(""), rule, proto_rule.on_header_present(), + structs_by_namespace); + } else if (!value && proto_rule.has_on_header_missing()) { + applyKeyValue(std::move(value).value_or(""), rule, proto_rule.on_header_missing(), + structs_by_namespace); } } - // Any matching rules? if (!structs_by_namespace.empty()) { for (auto const& entry : structs_by_namespace) { @@ -206,18 +259,6 @@ void HeaderToMetadataFilter::writeHeaderToMetadata(Http::HeaderMap& headers, } } -const Config* HeaderToMetadataFilter::getRouteConfig() const { - if (!decoder_callbacks_->route() || !decoder_callbacks_->route()->routeEntry()) { - return nullptr; - } - - const auto* entry = decoder_callbacks_->route()->routeEntry(); - const auto* per_filter_config = - entry->virtualHost().perFilterConfig(HttpFilterNames::get().HeaderToMetadata); - - return dynamic_cast(per_filter_config); -} - // TODO(rgs1): this belongs in one of the filter interfaces, see issue #10164. const Config* HeaderToMetadataFilter::getConfig() const { // Cached config pointer. @@ -225,7 +266,8 @@ const Config* HeaderToMetadataFilter::getConfig() const { return effective_config_; } - effective_config_ = getRouteConfig(); + effective_config_ = Http::Utility::resolveMostSpecificPerFilterConfig( + HttpFilterNames::get().HeaderToMetadata, decoder_callbacks_->route()); if (effective_config_) { return effective_config_; } diff --git a/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.h b/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.h index 6c02dfb75b076..dd85f1fc4f998 100644 --- a/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.h +++ b/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.h @@ -8,6 +8,7 @@ #include "envoy/server/filter_config.h" #include "common/common/logger.h" +#include "common/common/matchers.h" #include "absl/strings/string_view.h" @@ -16,10 +17,72 @@ namespace Extensions { namespace HttpFilters { namespace HeaderToMetadataFilter { -using Rule = envoy::extensions::filters::http::header_to_metadata::v3::Config::Rule; +using ProtoRule = envoy::extensions::filters::http::header_to_metadata::v3::Config::Rule; using ValueType = envoy::extensions::filters::http::header_to_metadata::v3::Config::ValueType; using ValueEncode = envoy::extensions::filters::http::header_to_metadata::v3::Config::ValueEncode; -using HeaderToMetadataRules = std::vector>; +using KeyValuePair = envoy::extensions::filters::http::header_to_metadata::v3::Config::KeyValuePair; + +// Interface for getting values from a cookie or a header. +class ValueSelector { +public: + virtual ~ValueSelector() = default; + + /** + * Called to extract the value of a given header or cookie. + * @param http header map. + * @return absl::optional the extracted header or cookie. + */ + virtual absl::optional extract(Http::HeaderMap& map) const PURE; + + /** + * @return a string representation of either a cookie or a header passed in the request. + */ + virtual std::string toString() const PURE; +}; + +// Get value from a header. +class HeaderValueSelector : public ValueSelector { +public: + // ValueSelector. + explicit HeaderValueSelector(Http::LowerCaseString header, bool remove) + : header_(std::move(header)), remove_(std::move(remove)) {} + absl::optional extract(Http::HeaderMap& map) const override; + std::string toString() const override { return fmt::format("header '{}'", header_.get()); } + ~HeaderValueSelector() override = default; + +private: + const Http::LowerCaseString header_; + const bool remove_; +}; + +// Get value from a cookie. +class CookieValueSelector : public ValueSelector { +public: + // ValueSelector. + explicit CookieValueSelector(std::string cookie) : cookie_(std::move(cookie)) {} + absl::optional extract(Http::HeaderMap& map) const override; + std::string toString() const override { return fmt::format("cookie '{}'", cookie_); } + ~CookieValueSelector() override = default; + +private: + const std::string cookie_; +}; + +class Rule { +public: + Rule(const ProtoRule& rule); + const ProtoRule& rule() const { return rule_; } + const Regex::CompiledMatcherPtr& regexRewrite() const { return regex_rewrite_; } + const std::string& regexSubstitution() const { return regex_rewrite_substitution_; } + std::shared_ptr selector_; + +private: + const ProtoRule rule_; + Regex::CompiledMatcherPtr regex_rewrite_{}; + std::string regex_rewrite_substitution_{}; +}; + +using HeaderToMetadataRules = std::vector; // TODO(yangminzhu): Make MAX_HEADER_VALUE_LEN configurable. const uint32_t MAX_HEADER_VALUE_LEN = 8 * 1024; @@ -34,18 +97,13 @@ class Config : public ::Envoy::Router::RouteSpecificFilterConfig, Config(const envoy::extensions::filters::http::header_to_metadata::v3::Config config, bool per_route = false); - HeaderToMetadataRules requestRules() const { return request_rules_; } - HeaderToMetadataRules responseRules() const { return response_rules_; } + const HeaderToMetadataRules& requestRules() const { return request_rules_; } + const HeaderToMetadataRules& responseRules() const { return response_rules_; } bool doResponse() const { return response_set_; } bool doRequest() const { return request_set_; } private: - using ProtobufRepeatedRule = Protobuf::RepeatedPtrField; - - HeaderToMetadataRules request_rules_; - HeaderToMetadataRules response_rules_; - bool response_set_; - bool request_set_; + using ProtobufRepeatedRule = Protobuf::RepeatedPtrField; /** * configToVector is a helper function for converting from configuration (protobuf types) into @@ -60,6 +118,11 @@ class Config : public ::Envoy::Router::RouteSpecificFilterConfig, static bool configToVector(const ProtobufRepeatedRule&, HeaderToMetadataRules&); const std::string& decideNamespace(const std::string& nspace) const; + + HeaderToMetadataRules request_rules_; + HeaderToMetadataRules response_rules_; + bool response_set_; + bool request_set_; }; using ConfigSharedPtr = std::shared_ptr; @@ -125,11 +188,11 @@ class HeaderToMetadataFilter : public Http::StreamFilter, */ void writeHeaderToMetadata(Http::HeaderMap& headers, const HeaderToMetadataRules& rules, Http::StreamFilterCallbacks& callbacks); - bool addMetadata(StructMap&, const std::string&, const std::string&, absl::string_view, ValueType, + bool addMetadata(StructMap&, const std::string&, const std::string&, std::string, ValueType, ValueEncode) const; + void applyKeyValue(std::string, const Rule&, const KeyValuePair&, StructMap&); const std::string& decideNamespace(const std::string& nspace) const; const Config* getConfig() const; - const Config* getRouteConfig() const; }; } // namespace HeaderToMetadataFilter diff --git a/source/extensions/filters/http/health_check/BUILD b/source/extensions/filters/http/health_check/BUILD index d26759688ee80..dd4fa02f30b30 100644 --- a/source/extensions/filters/http/health_check/BUILD +++ b/source/extensions/filters/http/health_check/BUILD @@ -1,16 +1,16 @@ -licenses(["notice"]) # Apache 2 - -# L7 HTTP filter that implements health check responses -# Public docs: docs/root/configuration/http_filters/health_check_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# L7 HTTP filter that implements health check responses +# Public docs: docs/root/configuration/http_filters/health_check_filter.rst + +envoy_extension_package() envoy_cc_library( name = "health_check_lib", @@ -38,6 +38,13 @@ envoy_cc_extension( srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream", + # Legacy test use. TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/common/filter/http:__subpackages__", + "//test/integration:__subpackages__", + "//test/server:__subpackages__", + ], deps = [ "//include/envoy/registry", "//source/common/http:header_utility_lib", diff --git a/source/extensions/filters/http/health_check/health_check.cc b/source/extensions/filters/http/health_check/health_check.cc index 6f1e3bf9c8a7f..801544b69f445 100644 --- a/source/extensions/filters/http/health_check/health_check.cc +++ b/source/extensions/filters/http/health_check/health_check.cc @@ -134,7 +134,7 @@ void HealthCheckFilter::onComplete() { for (const auto& item : *cluster_min_healthy_percentages_) { details = &RcDetails::get().HealthCheckClusterHealthy; const std::string& cluster_name = item.first; - const double min_healthy_percentage = item.second; + const uint64_t min_healthy_percentage = static_cast(item.second); auto* cluster = clusterManager.get(cluster_name); if (cluster == nullptr) { // If the cluster does not exist at all, consider the service unhealthy. @@ -148,7 +148,7 @@ void HealthCheckFilter::onComplete() { if (membership_total == 0) { // If the cluster exists but is empty, consider the service unhealthy unless // the specified minimum percent healthy for the cluster happens to be zero. - if (min_healthy_percentage == 0.0) { + if (min_healthy_percentage == 0UL) { continue; } else { final_status = Http::Code::ServiceUnavailable; @@ -158,10 +158,8 @@ void HealthCheckFilter::onComplete() { } // In the general case, consider the service unhealthy if fewer than the // specified percentage of the servers in the cluster are available (healthy + degraded). - // TODO(brian-pane) switch to purely integer-based math here, because the - // int-to-float conversions and floating point division are slow. - if ((stats.membership_healthy_.value() + stats.membership_degraded_.value()) < - membership_total * min_healthy_percentage / 100.0) { + if ((100UL * (stats.membership_healthy_.value() + stats.membership_degraded_.value())) < + membership_total * min_healthy_percentage) { final_status = Http::Code::ServiceUnavailable; details = &RcDetails::get().HealthCheckClusterUnhealthy; break; diff --git a/source/extensions/filters/http/ip_tagging/BUILD b/source/extensions/filters/http/ip_tagging/BUILD index a28d3cf649ede..5e27f10bb15c0 100644 --- a/source/extensions/filters/http/ip_tagging/BUILD +++ b/source/extensions/filters/http/ip_tagging/BUILD @@ -1,16 +1,16 @@ -licenses(["notice"]) # Apache 2 - -# HTTP L7 filter that writes an IP tagging header based on IP trie data -# Public docs: docs/root/configuration/http_filters/ip_tagging_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# HTTP L7 filter that writes an IP tagging header based on IP trie data +# Public docs: docs/root/configuration/http_filters/ip_tagging_filter.rst + +envoy_extension_package() envoy_cc_library( name = "ip_tagging_filter_lib", @@ -34,6 +34,11 @@ envoy_cc_extension( srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream", + # TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/integration:__subpackages__", + ], deps = [ "//include/envoy/registry", "//source/common/protobuf:utility_lib", diff --git a/source/extensions/filters/http/ip_tagging/ip_tagging_filter.h b/source/extensions/filters/http/ip_tagging/ip_tagging_filter.h index a4bc928971109..a37c5b9006a87 100644 --- a/source/extensions/filters/http/ip_tagging/ip_tagging_filter.h +++ b/source/extensions/filters/http/ip_tagging/ip_tagging_filter.h @@ -36,7 +36,6 @@ class IpTaggingFilterConfig { Runtime::Loader& runtime); Runtime::Loader& runtime() { return runtime_; } - Stats::Scope& scope() { return scope_; } FilterRequestType requestType() const { return request_type_; } const Network::LcTrie::LcTrie& trie() const { return *trie_; } diff --git a/source/extensions/filters/http/jwt_authn/BUILD b/source/extensions/filters/http/jwt_authn/BUILD index b64922a9c4422..f0249b014ea11 100644 --- a/source/extensions/filters/http/jwt_authn/BUILD +++ b/source/extensions/filters/http/jwt_authn/BUILD @@ -1,13 +1,13 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "extractor_lib", diff --git a/source/extensions/filters/http/jwt_authn/authenticator.cc b/source/extensions/filters/http/jwt_authn/authenticator.cc index 3837b2c4c0345..27c1d3e8e267f 100644 --- a/source/extensions/filters/http/jwt_authn/authenticator.cc +++ b/source/extensions/filters/http/jwt_authn/authenticator.cc @@ -105,7 +105,7 @@ std::string AuthenticatorImpl::name() const { return provider_.value() + (is_allow_missing_ ? "-OPTIONAL" : ""); } if (is_allow_failed_) { - return "_IS_ALLOW_FALED_"; + return "_IS_ALLOW_FAILED_"; } if (is_allow_missing_) { return "_IS_ALLOW_MISSING_"; @@ -265,8 +265,14 @@ void AuthenticatorImpl::verifyKey() { void AuthenticatorImpl::doneWithStatus(const Status& status) { ENVOY_LOG(debug, "{}: JWT token verification completed with: {}", name(), ::google::jwt_verify::getStatusString(status)); - // if on allow missing or failed this should verify all tokens, otherwise stop on ok. - if ((Status::Ok == status && !is_allow_failed_ && !is_allow_missing_) || tokens_.empty()) { + + // If a request has multiple tokens, all of them must be valid. Otherwise it may have + // following security hole: a request has a good token and a bad one, it will pass + // verification, forwarded to the backend, and the backend may mistakenly use the bad + // token as the good one that passed the verification. + + // Unless allowing failed or missing, all tokens must be verified successfully. + if ((Status::Ok != status && !is_allow_failed_ && !is_allow_missing_) || tokens_.empty()) { tokens_.clear(); if (is_allow_failed_) { callback_(Status::Ok); diff --git a/source/extensions/filters/http/jwt_authn/extractor.cc b/source/extensions/filters/http/jwt_authn/extractor.cc index fb1ebb21a0917..b84f9fb4178fc 100644 --- a/source/extensions/filters/http/jwt_authn/extractor.cc +++ b/source/extensions/filters/http/jwt_authn/extractor.cc @@ -9,6 +9,7 @@ #include "common/http/utility.h" #include "common/singleton/const_singleton.h" +#include "absl/container/node_hash_set.h" #include "absl/strings/match.h" using envoy::extensions::filters::http::jwt_authn::v3::JwtProvider; @@ -35,7 +36,7 @@ using JwtConstValues = ConstSingleton; // A base JwtLocation object to store token and specified_issuers. class JwtLocationBase : public JwtLocation { public: - JwtLocationBase(const std::string& token, const std::unordered_set& issuers) + JwtLocationBase(const std::string& token, const absl::node_hash_set& issuers) : token_(token), specified_issuers_(issuers) {} // Get the token string @@ -50,13 +51,13 @@ class JwtLocationBase : public JwtLocation { // Extracted token. const std::string token_; // Stored issuers specified the location. - const std::unordered_set& specified_issuers_; + const absl::node_hash_set& specified_issuers_; }; // The JwtLocation for header extraction. class JwtHeaderLocation : public JwtLocationBase { public: - JwtHeaderLocation(const std::string& token, const std::unordered_set& issuers, + JwtHeaderLocation(const std::string& token, const absl::node_hash_set& issuers, const LowerCaseString& header) : JwtLocationBase(token, issuers), header_(header) {} @@ -70,7 +71,7 @@ class JwtHeaderLocation : public JwtLocationBase { // The JwtLocation for param extraction. class JwtParamLocation : public JwtLocationBase { public: - JwtParamLocation(const std::string& token, const std::unordered_set& issuers, + JwtParamLocation(const std::string& token, const absl::node_hash_set& issuers, const std::string&) : JwtLocationBase(token, issuers) {} @@ -118,7 +119,7 @@ class ExtractorImpl : public Logger::Loggable, public Extractor // The value prefix. e.g. for "Bearer ", the value_prefix is "Bearer ". std::string value_prefix_; // Issuers that specified this header. - std::unordered_set specified_issuers_; + absl::node_hash_set specified_issuers_; }; using HeaderLocationSpecPtr = std::unique_ptr; // The map of (header + value_prefix) to HeaderLocationSpecPtr @@ -127,7 +128,7 @@ class ExtractorImpl : public Logger::Loggable, public Extractor // ParamMap value type to store issuers that specified this header. struct ParamLocationSpec { // Issuers that specified this param. - std::unordered_set specified_issuers_; + absl::node_hash_set specified_issuers_; }; // The map of a parameter key to set of issuers specified the parameter std::map param_locations_; @@ -153,7 +154,7 @@ void ExtractorImpl::addProvider(const JwtProvider& provider) { } // If not specified, use default locations. if (provider.from_headers().empty() && provider.from_params().empty()) { - addHeaderConfig(provider.issuer(), Http::Headers::get().Authorization, + addHeaderConfig(provider.issuer(), Http::CustomHeaders::get().Authorization, JwtConstValues::get().BearerPrefix); addQueryParamConfig(provider.issuer(), JwtConstValues::get().AccessTokenParam); } @@ -208,7 +209,7 @@ ExtractorImpl::extract(const Http::RequestHeaderMap& headers) const { } // Check query parameter locations. - const auto& params = Http::Utility::parseQueryString(headers.Path()->value().getStringView()); + const auto& params = Http::Utility::parseAndDecodeQueryString(headers.getPathValue()); for (const auto& location_it : param_locations_) { const auto& param_key = location_it.first; const auto& location_spec = location_it.second; diff --git a/source/extensions/filters/http/jwt_authn/extractor.h b/source/extensions/filters/http/jwt_authn/extractor.h index 83255f9a29823..8be7d9b830ca6 100644 --- a/source/extensions/filters/http/jwt_authn/extractor.h +++ b/source/extensions/filters/http/jwt_authn/extractor.h @@ -2,7 +2,6 @@ #include #include -#include #include "envoy/extensions/filters/http/jwt_authn/v3/config.pb.h" #include "envoy/http/header_map.h" diff --git a/source/extensions/filters/http/jwt_authn/filter.cc b/source/extensions/filters/http/jwt_authn/filter.cc index 2035727a65d2d..65bc2b9a28961 100644 --- a/source/extensions/filters/http/jwt_authn/filter.cc +++ b/source/extensions/filters/http/jwt_authn/filter.cc @@ -16,12 +16,15 @@ namespace JwtAuthn { namespace { +Http::RegisterCustomInlineHeader + access_control_request_method_handle(Http::CustomHeaders::get().AccessControlRequestMethod); +Http::RegisterCustomInlineHeader + origin_handle(Http::CustomHeaders::get().Origin); + bool isCorsPreflightRequest(const Http::RequestHeaderMap& headers) { - return headers.Method() && - headers.Method()->value().getStringView() == Http::Headers::get().MethodValues.Options && - headers.Origin() && !headers.Origin()->value().empty() && - headers.AccessControlRequestMethod() && - !headers.AccessControlRequestMethod()->value().empty(); + return headers.getMethodValue() == Http::Headers::get().MethodValues.Options && + !headers.getInlineValue(origin_handle.handle()).empty() && + !headers.getInlineValue(access_control_request_method_handle.handle()).empty(); } } // namespace diff --git a/source/extensions/filters/http/jwt_authn/jwks_cache.cc b/source/extensions/filters/http/jwt_authn/jwks_cache.cc index 9c7034c08d0b1..7ec91acd98066 100644 --- a/source/extensions/filters/http/jwt_authn/jwks_cache.cc +++ b/source/extensions/filters/http/jwt_authn/jwks_cache.cc @@ -1,7 +1,6 @@ #include "extensions/filters/http/jwt_authn/jwks_cache.h" #include -#include #include "envoy/common/time.h" #include "envoy/extensions/filters/http/jwt_authn/v3/config.pb.h" @@ -10,6 +9,7 @@ #include "common/config/datasource.h" #include "common/protobuf/utility.h" +#include "absl/container/node_hash_map.h" #include "jwt_verify_lib/check_audience.h" using envoy::extensions::filters::http::jwt_authn::v3::JwtAuthentication; @@ -115,7 +115,7 @@ class JwksCacheImpl : public JwksCache { return it->second; } - JwksData* findByProvider(const std::string& provider) override { + JwksData* findByProvider(const std::string& provider) final { const auto it = jwks_data_map_.find(provider); if (it == jwks_data_map_.end()) { return nullptr; @@ -125,9 +125,9 @@ class JwksCacheImpl : public JwksCache { private: // The Jwks data map indexed by provider. - std::unordered_map jwks_data_map_; + absl::node_hash_map jwks_data_map_; // The Jwks data pointer map indexed by issuer. - std::unordered_map issuer_ptr_map_; + absl::node_hash_map issuer_ptr_map_; }; } // namespace diff --git a/source/extensions/filters/http/jwt_authn/matcher.cc b/source/extensions/filters/http/jwt_authn/matcher.cc index 526387c7eab0d..ff60faa0431f5 100644 --- a/source/extensions/filters/http/jwt_authn/matcher.cc +++ b/source/extensions/filters/http/jwt_authn/matcher.cc @@ -42,7 +42,7 @@ class BaseMatcherImpl : public Matcher, public Logger::Loggable matches &= Http::HeaderUtility::matchHeaders(headers, config_headers_); if (!config_query_parameters_.empty()) { Http::Utility::QueryParams query_parameters = - Http::Utility::parseQueryString(headers.Path()->value().getStringView()); + Http::Utility::parseQueryString(headers.getPathValue()); matches &= ConfigUtility::matchQueryParams(query_parameters, config_query_parameters_); } return matches; @@ -66,8 +66,7 @@ class PrefixMatcherImpl : public BaseMatcherImpl { path_matcher_(Matchers::PathMatcher::createPrefix(prefix_, !case_sensitive_)) {} bool matches(const Http::RequestHeaderMap& headers) const override { - if (BaseMatcherImpl::matchRoute(headers) && - path_matcher_->match(headers.Path()->value().getStringView())) { + if (BaseMatcherImpl::matchRoute(headers) && path_matcher_->match(headers.getPathValue())) { ENVOY_LOG(debug, "Prefix requirement '{}' matched.", prefix_); return true; } @@ -90,8 +89,7 @@ class PathMatcherImpl : public BaseMatcherImpl { path_matcher_(Matchers::PathMatcher::createExact(path_, !case_sensitive_)) {} bool matches(const Http::RequestHeaderMap& headers) const override { - if (BaseMatcherImpl::matchRoute(headers) && - path_matcher_->match(headers.Path()->value().getStringView())) { + if (BaseMatcherImpl::matchRoute(headers) && path_matcher_->match(headers.getPathValue())) { ENVOY_LOG(debug, "Path requirement '{}' matched.", path_); return true; } @@ -156,6 +154,11 @@ MatcherConstPtr Matcher::create(const RequirementRule& rule) { case RouteMatch::PathSpecifierCase::kHiddenEnvoyDeprecatedRegex: case RouteMatch::PathSpecifierCase::kSafeRegex: return std::make_unique(rule); + case RouteMatch::PathSpecifierCase::kConnectMatcher: + // TODO: When CONNECT match support is implemented, remove the manual clean-up of CONNECT + // matching in the filter fuzzer implementation: + // //test/extensions/filters/http/common/fuzz/uber_per_filter.cc + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; // path specifier is required. case RouteMatch::PathSpecifierCase::PATH_SPECIFIER_NOT_SET: default: diff --git a/source/extensions/filters/http/jwt_authn/verifier.cc b/source/extensions/filters/http/jwt_authn/verifier.cc index 138a50e95a9d5..e8b613911e8d4 100644 --- a/source/extensions/filters/http/jwt_authn/verifier.cc +++ b/source/extensions/filters/http/jwt_authn/verifier.cc @@ -70,7 +70,7 @@ class ContextImpl : public Verifier::Context { Http::RequestHeaderMap& headers_; Tracing::Span& parent_span_; Verifier::Callbacks* callback_; - std::unordered_map completion_states_; + absl::node_hash_map completion_states_; std::vector auths_; ProtobufWkt::Struct payload_; }; diff --git a/source/extensions/filters/http/lua/BUILD b/source/extensions/filters/http/lua/BUILD index d7560f5207b7d..2e08db0ad5636 100644 --- a/source/extensions/filters/http/lua/BUILD +++ b/source/extensions/filters/http/lua/BUILD @@ -1,16 +1,16 @@ -licenses(["notice"]) # Apache 2 - -# Lua scripting L7 HTTP filter (https://www.lua.org/, http://luajit.org/) -# Public docs: docs/root/configuration/http_filters/lua_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# Lua scripting L7 HTTP filter (https://www.lua.org/, http://luajit.org/) +# Public docs: docs/root/configuration/http_filters/lua_filter.rst + +envoy_extension_package() envoy_cc_library( name = "lua_filter_lib", @@ -23,12 +23,14 @@ envoy_cc_library( "//include/envoy/upstream:cluster_manager_interface", "//source/common/buffer:buffer_lib", "//source/common/common:enum_to_int", + "//source/common/config:datasource_lib", "//source/common/crypto:utility_lib", "//source/common/http:message_lib", "//source/extensions/common:utility_lib", "//source/extensions/filters/common/lua:lua_lib", "//source/extensions/filters/common/lua:wrappers_lib", "//source/extensions/filters/http:well_known_names", + "@envoy_api//envoy/extensions/filters/http/lua/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/http/lua/config.cc b/source/extensions/filters/http/lua/config.cc index 9f1990ea346d3..0166b9ed921a1 100644 --- a/source/extensions/filters/http/lua/config.cc +++ b/source/extensions/filters/http/lua/config.cc @@ -15,12 +15,19 @@ Http::FilterFactoryCb LuaFilterConfig::createFilterFactoryFromProtoTyped( const envoy::extensions::filters::http::lua::v3::Lua& proto_config, const std::string&, Server::Configuration::FactoryContext& context) { FilterConfigConstSharedPtr filter_config(new FilterConfig{ - proto_config.inline_code(), context.threadLocal(), context.clusterManager()}); + proto_config, context.threadLocal(), context.clusterManager(), context.api()}); return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { callbacks.addStreamFilter(std::make_shared(filter_config)); }; } +Router::RouteSpecificFilterConfigConstSharedPtr +LuaFilterConfig::createRouteSpecificFilterConfigTyped( + const envoy::extensions::filters::http::lua::v3::LuaPerRoute& proto_config, + Server::Configuration::ServerFactoryContext& context, ProtobufMessage::ValidationVisitor&) { + return std::make_shared(proto_config, context.threadLocal(), context.api()); +} + /** * Static registration for the Lua filter. @see RegisterFactory. */ diff --git a/source/extensions/filters/http/lua/config.h b/source/extensions/filters/http/lua/config.h index d13eeb1b757bb..b2057c532f2b5 100644 --- a/source/extensions/filters/http/lua/config.h +++ b/source/extensions/filters/http/lua/config.h @@ -14,7 +14,9 @@ namespace Lua { /** * Config registration for the Lua filter. @see NamedHttpFilterConfigFactory. */ -class LuaFilterConfig : public Common::FactoryBase { +class LuaFilterConfig + : public Common::FactoryBase { public: LuaFilterConfig() : FactoryBase(HttpFilterNames::get().Lua) {} @@ -22,6 +24,11 @@ class LuaFilterConfig : public Common::FactoryBase; + const std::string DEPRECATED_LUA_NAME = "envoy.lua"; std::atomic& deprecatedNameLogged() { @@ -101,6 +107,7 @@ void buildHeadersFromTable(Http::HeaderMap& headers, lua_State* state, int table } Http::AsyncClient::Request* makeHttpCall(lua_State* state, Filter& filter, + Tracing::Span& parent_span, Http::AsyncClient::Callbacks& callbacks) { const std::string cluster = luaL_checkstring(state, 2); luaL_checktype(state, 3, LUA_TTABLE); @@ -115,7 +122,7 @@ Http::AsyncClient::Request* makeHttpCall(lua_State* state, Filter& filter, luaL_error(state, "http call cluster invalid. Must be configured"); } - auto headers = std::make_unique(); + auto headers = Http::RequestHeaderMapImpl::create(); buildHeadersFromTable(*headers, state, 3); Http::RequestMessagePtr message(new Http::RequestMessageImpl(std::move(headers))); @@ -135,11 +142,38 @@ Http::AsyncClient::Request* makeHttpCall(lua_State* state, Filter& filter, timeout = std::chrono::milliseconds(timeout_ms); } - return filter.clusterManager().httpAsyncClientForCluster(cluster).send( - std::move(message), callbacks, Http::AsyncClient::RequestOptions().setTimeout(timeout)); + auto options = Http::AsyncClient::RequestOptions().setTimeout(timeout).setParentSpan(parent_span); + return filter.clusterManager().httpAsyncClientForCluster(cluster).send(std::move(message), + callbacks, options); } } // namespace +PerLuaCodeSetup::PerLuaCodeSetup(const std::string& lua_code, ThreadLocal::SlotAllocator& tls) + : lua_state_(lua_code, tls) { + lua_state_.registerType(); + lua_state_.registerType(); + lua_state_.registerType(); + lua_state_.registerType(); + lua_state_.registerType(); + lua_state_.registerType(); + lua_state_.registerType(); + lua_state_.registerType(); + lua_state_.registerType(); + lua_state_.registerType(); + lua_state_.registerType(); + lua_state_.registerType(); + + request_function_slot_ = lua_state_.registerGlobal("envoy_on_request"); + if (lua_state_.getGlobalRef(request_function_slot_) == LUA_REFNIL) { + ENVOY_LOG(info, "envoy_on_request() function not found. Lua filter will not hook requests."); + } + + response_function_slot_ = lua_state_.registerGlobal("envoy_on_response"); + if (lua_state_.getGlobalRef(response_function_slot_) == LUA_REFNIL) { + ENVOY_LOG(info, "envoy_on_response() function not found. Lua filter will not hook responses."); + } +} + StreamHandleWrapper::StreamHandleWrapper(Filters::Common::Lua::Coroutine& coroutine, Http::HeaderMap& headers, bool end_stream, Filter& filter, FilterCallbacks& callbacks) @@ -240,13 +274,11 @@ int StreamHandleWrapper::luaRespond(lua_State* state) { luaL_checktype(state, 2, LUA_TTABLE); size_t body_size; const char* raw_body = luaL_optlstring(state, 3, nullptr, &body_size); - auto headers = std::make_unique(); + auto headers = Http::ResponseHeaderMapImpl::create(); buildHeadersFromTable(*headers, state, 2); uint64_t status; - if (headers->Status() == nullptr || - !absl::SimpleAtoi(headers->Status()->value().getStringView(), &status) || status < 200 || - status >= 600) { + if (!absl::SimpleAtoi(headers->getStatusValue(), &status) || status < 200 || status >= 600) { luaL_error(state, ":status must be between 200-599"); } @@ -272,14 +304,14 @@ int StreamHandleWrapper::luaHttpCall(lua_State* state) { } if (lua_toboolean(state, async_flag_index)) { - return luaHttpCallAsynchronous(state); + return doAsynchronousHttpCall(state, callbacks_.activeSpan()); } else { - return luaHttpCallSynchronous(state); + return doSynchronousHttpCall(state, callbacks_.activeSpan()); } } -int StreamHandleWrapper::luaHttpCallSynchronous(lua_State* state) { - http_request_ = makeHttpCall(state, filter_, *this); +int StreamHandleWrapper::doSynchronousHttpCall(lua_State* state, Tracing::Span& span) { + http_request_ = makeHttpCall(state, filter_, span, *this); if (http_request_) { state_ = State::HttpCall; return lua_yield(state, 0); @@ -290,8 +322,8 @@ int StreamHandleWrapper::luaHttpCallSynchronous(lua_State* state) { } } -int StreamHandleWrapper::luaHttpCallAsynchronous(lua_State* state) { - makeHttpCall(state, filter_, noopCallbacks()); +int StreamHandleWrapper::doAsynchronousHttpCall(lua_State* state, Tracing::Span& span) { + makeHttpCall(state, filter_, span, noopCallbacks()); return 0; } @@ -303,17 +335,15 @@ void StreamHandleWrapper::onSuccess(const Http::AsyncClient::Request&, // We need to build a table with the headers as return param 1. The body will be return param 2. lua_newtable(coroutine_.luaState()); - response->headers().iterate( - [](const Http::HeaderEntry& header, void* context) -> Http::HeaderMap::Iterate { - lua_State* state = static_cast(context); - lua_pushlstring(state, header.key().getStringView().data(), - header.key().getStringView().length()); - lua_pushlstring(state, header.value().getStringView().data(), - header.value().getStringView().length()); - lua_settable(state, -3); - return Http::HeaderMap::Iterate::Continue; - }, - coroutine_.luaState()); + response->headers().iterate([lua_State = coroutine_.luaState()]( + const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + lua_pushlstring(lua_State, header.key().getStringView().data(), + header.key().getStringView().length()); + lua_pushlstring(lua_State, header.value().getStringView().data(), + header.value().getStringView().length()); + lua_settable(lua_State, -3); + return Http::HeaderMap::Iterate::Continue; + }); // TODO(mattklein123): Avoid double copy here. if (response->body() != nullptr) { @@ -569,33 +599,30 @@ int StreamHandleWrapper::luaImportPublicKey(lua_State* state) { return 1; } -FilterConfig::FilterConfig(const std::string& lua_code, ThreadLocal::SlotAllocator& tls, - Upstream::ClusterManager& cluster_manager) - : cluster_manager_(cluster_manager), lua_state_(lua_code, tls) { - lua_state_.registerType(); - lua_state_.registerType(); - lua_state_.registerType(); - lua_state_.registerType(); - lua_state_.registerType(); - lua_state_.registerType(); - lua_state_.registerType(); - lua_state_.registerType(); - lua_state_.registerType(); - lua_state_.registerType(); - lua_state_.registerType(); - lua_state_.registerType(); - - request_function_slot_ = lua_state_.registerGlobal("envoy_on_request"); - if (lua_state_.getGlobalRef(request_function_slot_) == LUA_REFNIL) { - ENVOY_LOG(info, "envoy_on_request() function not found. Lua filter will not hook requests."); +FilterConfig::FilterConfig(const envoy::extensions::filters::http::lua::v3::Lua& proto_config, + ThreadLocal::SlotAllocator& tls, + Upstream::ClusterManager& cluster_manager, Api::Api& api) + : cluster_manager_(cluster_manager) { + auto global_setup_ptr = std::make_unique(proto_config.inline_code(), tls); + if (global_setup_ptr) { + per_lua_code_setups_map_[GLOBAL_SCRIPT_NAME] = std::move(global_setup_ptr); } - response_function_slot_ = lua_state_.registerGlobal("envoy_on_response"); - if (lua_state_.getGlobalRef(response_function_slot_) == LUA_REFNIL) { - ENVOY_LOG(info, "envoy_on_response() function not found. Lua filter will not hook responses."); + for (const auto& source : proto_config.source_codes()) { + const std::string code = Config::DataSource::read(source.second, true, api); + auto per_lua_code_setup_ptr = std::make_unique(code, tls); + if (!per_lua_code_setup_ptr) { + continue; + } + per_lua_code_setups_map_[source.first] = std::move(per_lua_code_setup_ptr); } } +FilterConfigPerRoute::FilterConfigPerRoute( + const envoy::extensions::filters::http::lua::v3::LuaPerRoute& config, + ThreadLocal::SlotAllocator&, Api::Api&) + : disabled_(config.disabled()), name_(config.name()) {} + void Filter::onDestroy() { destroyed_ = true; if (request_stream_wrapper_.get()) { @@ -609,12 +636,14 @@ void Filter::onDestroy() { Http::FilterHeadersStatus Filter::doHeaders(StreamHandleRef& handle, Filters::Common::Lua::CoroutinePtr& coroutine, FilterCallbacks& callbacks, int function_ref, - Http::HeaderMap& headers, bool end_stream) { + PerLuaCodeSetup* setup, Http::HeaderMap& headers, + bool end_stream) { if (function_ref == LUA_REFNIL) { return Http::FilterHeadersStatus::Continue; } + ASSERT(setup); + coroutine = setup->createCoroutine(); - coroutine = config_->createCoroutine(); handle.reset(StreamHandleWrapper::create(coroutine->luaState(), *coroutine, headers, end_stream, *this, callbacks), true); @@ -689,11 +718,15 @@ void Filter::scriptLog(spdlog::level::level_enum level, const char* message) { return; case spdlog::level::off: NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + return; + case spdlog::level::n_levels: + NOT_REACHED_GCOVR_EXCL_LINE; } } void Filter::DecoderCallbacks::respond(Http::ResponseHeaderMapPtr&& headers, Buffer::Instance* body, lua_State*) { + callbacks_->streamInfo().setResponseCodeDetails(HttpResponseCodeDetails::get().LuaResponse); callbacks_->encodeHeaders(std::move(headers), body == nullptr); if (body && !parent_.destroyed_) { callbacks_->encodeData(*body, true); diff --git a/source/extensions/filters/http/lua/lua_filter.h b/source/extensions/filters/http/lua/lua_filter.h index 88725c50ef406..24909a95d6499 100644 --- a/source/extensions/filters/http/lua/lua_filter.h +++ b/source/extensions/filters/http/lua/lua_filter.h @@ -1,9 +1,11 @@ #pragma once +#include "envoy/extensions/filters/http/lua/v3/lua.pb.h" #include "envoy/http/filter.h" #include "envoy/upstream/cluster_manager.h" #include "common/crypto/utility.h" +#include "common/http/utility.h" #include "extensions/common/utility.h" #include "extensions/filters/common/lua/wrappers.h" @@ -15,6 +17,31 @@ namespace Extensions { namespace HttpFilters { namespace Lua { +constexpr char GLOBAL_SCRIPT_NAME[] = "GLOBAL"; + +class PerLuaCodeSetup : Logger::Loggable { +public: + PerLuaCodeSetup(const std::string& lua_code, ThreadLocal::SlotAllocator& tls); + + Extensions::Filters::Common::Lua::CoroutinePtr createCoroutine() { + return lua_state_.createCoroutine(); + } + + int requestFunctionRef() { return lua_state_.getGlobalRef(request_function_slot_); } + int responseFunctionRef() { return lua_state_.getGlobalRef(response_function_slot_); } + + uint64_t runtimeBytesUsed() { return lua_state_.runtimeBytesUsed(); } + void runtimeGC() { return lua_state_.runtimeGC(); } + +private: + uint64_t request_function_slot_{}; + uint64_t response_function_slot_{}; + + Filters::Common::Lua::ThreadLocalState lua_state_; +}; + +using PerLuaCodeSetupPtr = std::unique_ptr; + /** * Callbacks used by a stream handler to access the filter. */ @@ -69,6 +96,11 @@ class FilterCallbacks { * @return const Network::Connection* the current network connection handle. */ virtual const Network::Connection* connection() const PURE; + + /** + * @return const Tracing::Span& the current tracing active span. + */ + virtual Tracing::Span& activeSpan() PURE; }; class Filter; @@ -236,8 +268,8 @@ class StreamHandleWrapper : public Filters::Common::Lua::BaseLuaObject { public: - FilterConfig(const std::string& lua_code, ThreadLocal::SlotAllocator& tls, - Upstream::ClusterManager& cluster_manager); - Filters::Common::Lua::CoroutinePtr createCoroutine() { return lua_state_.createCoroutine(); } - int requestFunctionRef() { return lua_state_.getGlobalRef(request_function_slot_); } - int responseFunctionRef() { return lua_state_.getGlobalRef(response_function_slot_); } - uint64_t runtimeBytesUsed() { return lua_state_.runtimeBytesUsed(); } - void runtimeGC() { return lua_state_.runtimeGC(); } + FilterConfig(const envoy::extensions::filters::http::lua::v3::Lua& proto_config, + ThreadLocal::SlotAllocator& tls, Upstream::ClusterManager& cluster_manager, + Api::Api& api); + + PerLuaCodeSetup* perLuaCodeSetup(const std::string& name) const { + const auto iter = per_lua_code_setups_map_.find(name); + if (iter != per_lua_code_setups_map_.end()) { + return iter->second.get(); + } + return nullptr; + } Upstream::ClusterManager& cluster_manager_; private: - Filters::Common::Lua::ThreadLocalState lua_state_; - uint64_t request_function_slot_; - uint64_t response_function_slot_; + absl::flat_hash_map per_lua_code_setups_map_; }; using FilterConfigConstSharedPtr = std::shared_ptr; +/** + * Route configuration for the filter. + */ +class FilterConfigPerRoute : public Router::RouteSpecificFilterConfig { +public: + FilterConfigPerRoute(const envoy::extensions::filters::http::lua::v3::LuaPerRoute& config, + ThreadLocal::SlotAllocator& tls, Api::Api& api); + + bool disabled() const { return disabled_; } + const std::string& name() const { return name_; } + +private: + const bool disabled_; + const std::string name_; +}; + +namespace { + +PerLuaCodeSetup* getPerLuaCodeSetup(const FilterConfig* filter_config, + Http::StreamFilterCallbacks* callbacks) { + const FilterConfigPerRoute* config_per_route = nullptr; + if (callbacks && callbacks->route()) { + config_per_route = Http::Utility::resolveMostSpecificPerFilterConfig( + HttpFilterNames::get().Lua, callbacks->route()); + } + + if (config_per_route != nullptr) { + if (config_per_route->disabled()) { + return nullptr; + } else if (!config_per_route->name().empty()) { + ASSERT(filter_config); + return filter_config->perLuaCodeSetup(config_per_route->name()); + } + return nullptr; + } + ASSERT(filter_config); + return filter_config->perLuaCodeSetup(GLOBAL_SCRIPT_NAME); +} + +} // namespace + // TODO(mattklein123): Filter stats. /** @@ -329,8 +406,10 @@ class Filter : public Http::StreamFilter, Logger::Loggable { // Http::StreamDecoderFilter Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, bool end_stream) override { - return doHeaders(request_stream_wrapper_, request_coroutine_, decoder_callbacks_, - config_->requestFunctionRef(), headers, end_stream); + PerLuaCodeSetup* setup = getPerLuaCodeSetup(config_.get(), decoder_callbacks_.callbacks_); + const int function_ref = setup ? setup->requestFunctionRef() : LUA_REFNIL; + return doHeaders(request_stream_wrapper_, request_coroutine_, decoder_callbacks_, function_ref, + setup, headers, end_stream); } Http::FilterDataStatus decodeData(Buffer::Instance& data, bool end_stream) override { return doData(request_stream_wrapper_, data, end_stream); @@ -348,8 +427,10 @@ class Filter : public Http::StreamFilter, Logger::Loggable { } Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& headers, bool end_stream) override { + PerLuaCodeSetup* setup = getPerLuaCodeSetup(config_.get(), decoder_callbacks_.callbacks_); + const int function_ref = setup ? setup->responseFunctionRef() : LUA_REFNIL; return doHeaders(response_stream_wrapper_, response_coroutine_, encoder_callbacks_, - config_->responseFunctionRef(), headers, end_stream); + function_ref, setup, headers, end_stream); } Http::FilterDataStatus encodeData(Buffer::Instance& data, bool end_stream) override { return doData(response_stream_wrapper_, data, end_stream); @@ -381,6 +462,7 @@ class Filter : public Http::StreamFilter, Logger::Loggable { const ProtobufWkt::Struct& metadata() const override; StreamInfo::StreamInfo& streamInfo() override { return callbacks_->streamInfo(); } const Network::Connection* connection() const override { return callbacks_->connection(); } + Tracing::Span& activeSpan() override { return callbacks_->activeSpan(); } Filter& parent_; Http::StreamDecoderFilterCallbacks* callbacks_{}; @@ -402,6 +484,7 @@ class Filter : public Http::StreamFilter, Logger::Loggable { const ProtobufWkt::Struct& metadata() const override; StreamInfo::StreamInfo& streamInfo() override { return callbacks_->streamInfo(); } const Network::Connection* connection() const override { return callbacks_->connection(); } + Tracing::Span& activeSpan() override { return callbacks_->activeSpan(); } Filter& parent_; Http::StreamEncoderFilterCallbacks* callbacks_{}; @@ -412,7 +495,8 @@ class Filter : public Http::StreamFilter, Logger::Loggable { Http::FilterHeadersStatus doHeaders(StreamHandleRef& handle, Filters::Common::Lua::CoroutinePtr& coroutine, FilterCallbacks& callbacks, int function_ref, - Http::HeaderMap& headers, bool end_stream); + PerLuaCodeSetup* setup, Http::HeaderMap& headers, + bool end_stream); Http::FilterDataStatus doData(StreamHandleRef& handle, Buffer::Instance& data, bool end_stream); Http::FilterTrailersStatus doTrailers(StreamHandleRef& handle, Http::HeaderMap& trailers); diff --git a/source/extensions/filters/http/lua/wrappers.cc b/source/extensions/filters/http/lua/wrappers.cc index a772f3c1edfe1..300586ef2860e 100644 --- a/source/extensions/filters/http/lua/wrappers.cc +++ b/source/extensions/filters/http/lua/wrappers.cc @@ -11,13 +11,10 @@ namespace Lua { HeaderMapIterator::HeaderMapIterator(HeaderMapWrapper& parent) : parent_(parent) { entries_.reserve(parent_.headers_.size()); - parent_.headers_.iterate( - [](const Http::HeaderEntry& header, void* context) -> Http::HeaderMap::Iterate { - HeaderMapIterator* iterator = static_cast(context); - iterator->entries_.push_back(&header); - return Http::HeaderMap::Iterate::Continue; - }, - this); + parent_.headers_.iterate([this](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + entries_.push_back(&header); + return Http::HeaderMap::Iterate::Continue; + }); } int HeaderMapIterator::luaPairsIterator(lua_State* state) { @@ -116,6 +113,21 @@ int StreamInfoWrapper::luaDynamicMetadata(lua_State* state) { return 1; } +int StreamInfoWrapper::luaDownstreamSslConnection(lua_State* state) { + const auto& ssl = stream_info_.downstreamSslConnection(); + if (ssl != nullptr) { + if (downstream_ssl_connection_.get() != nullptr) { + downstream_ssl_connection_.pushStack(); + } else { + downstream_ssl_connection_.reset( + Filters::Common::Lua::SslConnectionWrapper::create(state, *ssl), true); + } + } else { + lua_pushnil(state); + } + return 1; +} + DynamicMetadataMapIterator::DynamicMetadataMapIterator(DynamicMetadataMapWrapper& parent) : parent_{parent}, current_{parent_.streamInfo().dynamicMetadata().filter_metadata().begin()} {} diff --git a/source/extensions/filters/http/lua/wrappers.h b/source/extensions/filters/http/lua/wrappers.h index 35f82556250d6..be616dc087ec2 100644 --- a/source/extensions/filters/http/lua/wrappers.h +++ b/source/extensions/filters/http/lua/wrappers.h @@ -7,6 +7,7 @@ #include "extensions/common/crypto/crypto_impl.h" #include "extensions/filters/common/lua/lua.h" +#include "extensions/filters/common/lua/wrappers.h" #include "openssl/evp.h" @@ -181,7 +182,9 @@ class StreamInfoWrapper : public Filters::Common::Lua::BaseLuaObject dynamic_metadata_wrapper_; + Filters::Common::Lua::LuaDeathRef + downstream_ssl_connection_; friend class DynamicMetadataMapWrapper; }; diff --git a/source/extensions/filters/http/on_demand/BUILD b/source/extensions/filters/http/on_demand/BUILD index 2332afdae292d..86b029ca21d3d 100644 --- a/source/extensions/filters/http/on_demand/BUILD +++ b/source/extensions/filters/http/on_demand/BUILD @@ -1,15 +1,15 @@ -licenses(["notice"]) # Apache 2 - -# On-demand RDS update HTTP filter - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# On-demand RDS update HTTP filter + +envoy_extension_package() envoy_cc_library( name = "on_demand_update_lib", @@ -31,6 +31,12 @@ envoy_cc_extension( srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream", + # TODO(#9953) classify and clean up. + visibility = [ + "//:extension_config", + "//test/common/access_log:__subpackages__", + "//test/integration:__subpackages__", + ], deps = [ "//include/envoy/registry", "//source/extensions/filters/http:well_known_names", diff --git a/source/extensions/filters/http/on_demand/config.h b/source/extensions/filters/http/on_demand/config.h index 1f63e9cc53f4e..88556a869470b 100644 --- a/source/extensions/filters/http/on_demand/config.h +++ b/source/extensions/filters/http/on_demand/config.h @@ -1,6 +1,7 @@ #pragma once #include "envoy/config/filter/http/on_demand/v2/on_demand.pb.h" +#include "envoy/config/filter/http/on_demand/v2/on_demand.pb.validate.h" #include "extensions/filters/http/common/factory_base.h" #include "extensions/filters/http/well_known_names.h" diff --git a/source/extensions/filters/http/on_demand/on_demand_update.cc b/source/extensions/filters/http/on_demand/on_demand_update.cc index cf69080e667d8..da5b2ec6bc107 100644 --- a/source/extensions/filters/http/on_demand/on_demand_update.cc +++ b/source/extensions/filters/http/on_demand/on_demand_update.cc @@ -37,6 +37,11 @@ void OnDemandRouteUpdate::setDecoderFilterCallbacks(Http::StreamDecoderFilterCal callbacks_ = &callbacks; } +// A weak_ptr copy of the route_config_updated_callback_ is kept by RdsRouteConfigProviderImpl +// in config_update_callbacks_. By resetting the pointer in onDestroy() callback we ensure +// that this filter/filter-chain will not be resumed if the corresponding has been closed +void OnDemandRouteUpdate::onDestroy() { route_config_updated_callback_.reset(); } + // This is the callback which is called when an update requested in requestRouteConfigUpdate() // has been propagated to workers, at which point the request processing is restarted from the // beginning. diff --git a/source/extensions/filters/http/on_demand/on_demand_update.h b/source/extensions/filters/http/on_demand/on_demand_update.h index a2cd51e07e207..455ef4160aa5a 100644 --- a/source/extensions/filters/http/on_demand/on_demand_update.h +++ b/source/extensions/filters/http/on_demand/on_demand_update.h @@ -27,7 +27,7 @@ class OnDemandRouteUpdate : public Http::StreamDecoderFilter { void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override; - void onDestroy() override {} + void onDestroy() override; private: Http::StreamDecoderFilterCallbacks* callbacks_{}; diff --git a/source/extensions/filters/http/original_src/BUILD b/source/extensions/filters/http/original_src/BUILD index b95bf621118bb..b88a1d8df9ffe 100644 --- a/source/extensions/filters/http/original_src/BUILD +++ b/source/extensions/filters/http/original_src/BUILD @@ -1,15 +1,15 @@ -licenses(["notice"]) # Apache 2 - -# A filter for mirroring the downstream remote address on the upstream's source. - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# A filter for mirroring the downstream remote address on the upstream's source. + +envoy_extension_package() envoy_cc_library( name = "config_lib", diff --git a/source/extensions/filters/http/ratelimit/BUILD b/source/extensions/filters/http/ratelimit/BUILD index af3b29e079088..0b9584711194d 100644 --- a/source/extensions/filters/http/ratelimit/BUILD +++ b/source/extensions/filters/http/ratelimit/BUILD @@ -1,22 +1,23 @@ -licenses(["notice"]) # Apache 2 - -# Ratelimit L7 HTTP filter -# Public docs: docs/root/configuration/http_filters/rate_limit_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# Ratelimit L7 HTTP filter +# Public docs: docs/root/configuration/http_filters/rate_limit_filter.rst + +envoy_extension_package() envoy_cc_library( name = "ratelimit_lib", srcs = ["ratelimit.cc"], hdrs = ["ratelimit.h"], deps = [ + ":ratelimit_headers_lib", "//include/envoy/http:codes_interface", "//include/envoy/ratelimit:ratelimit_interface", "//source/common/common:assert_lib", @@ -30,6 +31,16 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "ratelimit_headers_lib", + srcs = ["ratelimit_headers.cc"], + hdrs = ["ratelimit_headers.h"], + deps = [ + "//source/common/http:header_map_lib", + "//source/extensions/filters/common/ratelimit:ratelimit_client_interface", + ], +) + envoy_cc_extension( name = "config", srcs = ["config.cc"], diff --git a/source/extensions/filters/http/ratelimit/config.cc b/source/extensions/filters/http/ratelimit/config.cc index c234672301b8a..1bcf930af3900 100644 --- a/source/extensions/filters/http/ratelimit/config.cc +++ b/source/extensions/filters/http/ratelimit/config.cc @@ -31,7 +31,8 @@ Http::FilterFactoryCb RateLimitFilterConfig::createFilterFactoryFromProtoTyped( filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { callbacks.addStreamFilter(std::make_shared( filter_config, Filters::Common::RateLimit::rateLimitClient( - context, proto_config.rate_limit_service().grpc_service(), timeout))); + context, proto_config.rate_limit_service().grpc_service(), timeout, + proto_config.rate_limit_service().transport_api_version()))); }; } diff --git a/source/extensions/filters/http/ratelimit/ratelimit.cc b/source/extensions/filters/http/ratelimit/ratelimit.cc index cc4b8dd5bb770..c2c2b36b9e3a9 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.cc +++ b/source/extensions/filters/http/ratelimit/ratelimit.cc @@ -12,6 +12,8 @@ #include "common/http/header_utility.h" #include "common/router/config_impl.h" +#include "extensions/filters/http/ratelimit/ratelimit_headers.h" + namespace Envoy { namespace Extensions { namespace HttpFilters { @@ -125,6 +127,7 @@ void Filter::onDestroy() { } void Filter::complete(Filters::Common::RateLimit::LimitStatus status, + Filters::Common::RateLimit::DescriptorStatusListPtr&& descriptor_statuses, Http::ResponseHeaderMapPtr&& response_headers_to_add, Http::RequestHeaderMapPtr&& request_headers_to_add) { state_ = State::Complete; @@ -154,13 +157,24 @@ void Filter::complete(Filters::Common::RateLimit::LimitStatus status, false}; httpContext().codeStats().chargeResponseStat(info); if (response_headers_to_add_ == nullptr) { - response_headers_to_add_ = std::make_unique(); + response_headers_to_add_ = Http::ResponseHeaderMapImpl::create(); } response_headers_to_add_->setReferenceEnvoyRateLimited( Http::Headers::get().EnvoyRateLimitedValues.True); break; } + if (config_->enableXRateLimitHeaders()) { + Http::ResponseHeaderMapPtr rate_limit_headers = + XRateLimitHeaderUtils::create(std::move(descriptor_statuses)); + if (response_headers_to_add_ == nullptr) { + response_headers_to_add_ = Http::ResponseHeaderMapImpl::create(); + } + Http::HeaderUtility::addHeaders(*response_headers_to_add_, *rate_limit_headers); + } else { + descriptor_statuses = nullptr; + } + if (status == Filters::Common::RateLimit::LimitStatus::OverLimit && config_->runtime().snapshot().featureEnabled("ratelimit.http_filter_enforcing", 100)) { state_ = State::Responded; @@ -201,7 +215,8 @@ void Filter::populateRateLimitDescriptors(const Router::RateLimitPolicy& rate_li continue; } rate_limit.populateDescriptors(*route_entry, descriptors, config_->localInfo().clusterName(), - headers, *callbacks_->streamInfo().downstreamRemoteAddress()); + headers, *callbacks_->streamInfo().downstreamRemoteAddress(), + &callbacks_->streamInfo().dynamicMetadata()); } } diff --git a/source/extensions/filters/http/ratelimit/ratelimit.h b/source/extensions/filters/http/ratelimit/ratelimit.h index c47e93cfde4f1..b7b803343cbe9 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.h +++ b/source/extensions/filters/http/ratelimit/ratelimit.h @@ -43,6 +43,9 @@ class FilterConfig { : stringToType(config.request_type())), local_info_(local_info), scope_(scope), runtime_(runtime), failure_mode_deny_(config.failure_mode_deny()), + enable_x_ratelimit_headers_( + config.enable_x_ratelimit_headers() == + envoy::extensions::filters::http::ratelimit::v3::RateLimit::DRAFT_VERSION_03), rate_limited_grpc_status_( config.rate_limited_as_resource_exhausted() ? absl::make_optional(Grpc::Status::WellKnownGrpcStatus::ResourceExhausted) @@ -55,6 +58,7 @@ class FilterConfig { Stats::Scope& scope() { return scope_; } FilterRequestType requestType() const { return request_type_; } bool failureModeAllow() const { return !failure_mode_deny_; } + bool enableXRateLimitHeaders() const { return enable_x_ratelimit_headers_; } const absl::optional rateLimitedGrpcStatus() const { return rate_limited_grpc_status_; } @@ -80,6 +84,7 @@ class FilterConfig { Stats::Scope& scope_; Runtime::Loader& runtime_; const bool failure_mode_deny_; + const bool enable_x_ratelimit_headers_; const absl::optional rate_limited_grpc_status_; Http::Context& http_context_; Filters::Common::RateLimit::StatNames stat_names_; @@ -117,6 +122,7 @@ class Filter : public Http::StreamFilter, public Filters::Common::RateLimit::Req // RateLimit::RequestCallbacks void complete(Filters::Common::RateLimit::LimitStatus status, + Filters::Common::RateLimit::DescriptorStatusListPtr&& descriptor_statuses, Http::ResponseHeaderMapPtr&& response_headers_to_add, Http::RequestHeaderMapPtr&& request_headers_to_add) override; diff --git a/source/extensions/filters/http/ratelimit/ratelimit_headers.cc b/source/extensions/filters/http/ratelimit/ratelimit_headers.cc new file mode 100644 index 0000000000000..097171b108f5b --- /dev/null +++ b/source/extensions/filters/http/ratelimit/ratelimit_headers.cc @@ -0,0 +1,82 @@ +#include "extensions/filters/http/ratelimit/ratelimit_headers.h" + +#include "common/http/header_map_impl.h" + +#include "absl/strings/substitute.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace RateLimitFilter { + +Http::ResponseHeaderMapPtr XRateLimitHeaderUtils::create( + Filters::Common::RateLimit::DescriptorStatusListPtr&& descriptor_statuses) { + Http::ResponseHeaderMapPtr result = Http::ResponseHeaderMapImpl::create(); + if (!descriptor_statuses || descriptor_statuses->empty()) { + descriptor_statuses = nullptr; + return result; + } + + absl::optional + min_remaining_limit_status; + std::string quota_policy; + for (auto&& status : *descriptor_statuses) { + if (!status.has_current_limit()) { + continue; + } + if (!min_remaining_limit_status || + status.limit_remaining() < min_remaining_limit_status.value().limit_remaining()) { + min_remaining_limit_status.emplace(status); + } + const uint32_t window = convertRateLimitUnit(status.current_limit().unit()); + // Constructing the quota-policy per RFC + // https://tools.ietf.org/id/draft-polli-ratelimit-headers-02.html#name-ratelimit-limit + // Example of the result: `, 10;w=1;name="per-ip", 1000;w=3600` + if (window) { + // For each descriptor status append `;w=` + absl::SubstituteAndAppend("a_policy, ", $0;$1=$2", + status.current_limit().requests_per_unit(), + XRateLimitHeaders::get().QuotaPolicyKeys.Window, window); + if (!status.current_limit().name().empty()) { + // If the descriptor has a name, append `;name=""` + absl::SubstituteAndAppend("a_policy, ";$0=\"$1\"", + XRateLimitHeaders::get().QuotaPolicyKeys.Name, + status.current_limit().name()); + } + } + } + + if (min_remaining_limit_status) { + const std::string rate_limit_limit = absl::StrCat( + min_remaining_limit_status.value().current_limit().requests_per_unit(), quota_policy); + result->addReferenceKey(XRateLimitHeaders::get().XRateLimitLimit, rate_limit_limit); + result->addReferenceKey(XRateLimitHeaders::get().XRateLimitRemaining, + min_remaining_limit_status.value().limit_remaining()); + result->addReferenceKey(XRateLimitHeaders::get().XRateLimitReset, + min_remaining_limit_status.value().duration_until_reset().seconds()); + } + descriptor_statuses = nullptr; + return result; +} + +uint32_t XRateLimitHeaderUtils::convertRateLimitUnit( + const envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::Unit unit) { + switch (unit) { + case envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::SECOND: + return 1; + case envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::MINUTE: + return 60; + case envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::HOUR: + return 60 * 60; + case envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::DAY: + return 24 * 60 * 60; + case envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::UNKNOWN: + default: + return 0; + } +} + +} // namespace RateLimitFilter +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/ratelimit/ratelimit_headers.h b/source/extensions/filters/http/ratelimit/ratelimit_headers.h new file mode 100644 index 0000000000000..047bf495defcf --- /dev/null +++ b/source/extensions/filters/http/ratelimit/ratelimit_headers.h @@ -0,0 +1,40 @@ +#pragma once + +#include "envoy/http/header_map.h" + +#include "common/singleton/const_singleton.h" + +#include "extensions/filters/common/ratelimit/ratelimit.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace RateLimitFilter { + +class XRateLimitHeaderValues { +public: + const Http::LowerCaseString XRateLimitLimit{"x-ratelimit-limit"}; + const Http::LowerCaseString XRateLimitRemaining{"x-ratelimit-remaining"}; + const Http::LowerCaseString XRateLimitReset{"x-ratelimit-reset"}; + + struct { + const std::string Window{"w"}; + const std::string Name{"name"}; + } QuotaPolicyKeys; +}; +using XRateLimitHeaders = ConstSingleton; + +class XRateLimitHeaderUtils { +public: + static Http::ResponseHeaderMapPtr + create(Filters::Common::RateLimit::DescriptorStatusListPtr&& descriptor_statuses); + +private: + static uint32_t + convertRateLimitUnit(envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::Unit unit); +}; + +} // namespace RateLimitFilter +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/rbac/BUILD b/source/extensions/filters/http/rbac/BUILD index 77472b169e84b..1f7802394c705 100644 --- a/source/extensions/filters/http/rbac/BUILD +++ b/source/extensions/filters/http/rbac/BUILD @@ -1,19 +1,24 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream", + # TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/integration:__subpackages__", + ], deps = [ "//include/envoy/registry", "//source/extensions/filters/http:well_known_names", diff --git a/source/extensions/filters/http/rbac/rbac_filter.cc b/source/extensions/filters/http/rbac/rbac_filter.cc index 6e1ff3ea33188..d396db7f52bcd 100644 --- a/source/extensions/filters/http/rbac/rbac_filter.cc +++ b/source/extensions/filters/http/rbac/rbac_filter.cc @@ -80,8 +80,8 @@ RoleBasedAccessControlFilter::decodeHeaders(Http::RequestHeaderMap& headers, boo if (shadow_engine != nullptr) { std::string shadow_resp_code = Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().EngineResultAllowed; - if (shadow_engine->allowed(*callbacks_->connection(), headers, callbacks_->streamInfo(), - &effective_policy_id)) { + if (shadow_engine->handleAction(*callbacks_->connection(), headers, callbacks_->streamInfo(), + &effective_policy_id)) { ENVOY_LOG(debug, "shadow allowed"); config_->stats().shadow_allowed_.inc(); } else { @@ -109,7 +109,8 @@ RoleBasedAccessControlFilter::decodeHeaders(Http::RequestHeaderMap& headers, boo const auto engine = config_->engine(callbacks_->route(), Filters::Common::RBAC::EnforcementMode::Enforced); if (engine != nullptr) { - if (engine->allowed(*callbacks_->connection(), headers, callbacks_->streamInfo(), nullptr)) { + if (engine->handleAction(*callbacks_->connection(), headers, callbacks_->streamInfo(), + nullptr)) { ENVOY_LOG(debug, "enforced allowed"); config_->stats().allowed_.inc(); return Http::FilterHeadersStatus::Continue; diff --git a/source/extensions/filters/http/router/BUILD b/source/extensions/filters/http/router/BUILD index 98ce18396f5e4..6402dc14c8802 100644 --- a/source/extensions/filters/http/router/BUILD +++ b/source/extensions/filters/http/router/BUILD @@ -1,21 +1,23 @@ -licenses(["notice"]) # Apache 2 - -# HTTP L7 filter responsible for routing to upstream connection pools -# Public docs: docs/root/configuration/http_filters/router_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# HTTP L7 filter responsible for routing to upstream connection pools +# Public docs: docs/root/configuration/http_filters/router_filter.rst + +envoy_extension_package() envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream", + # This is core Envoy config. + visibility = ["//visibility:public"], deps = [ "//include/envoy/registry", "//source/common/router:router_lib", diff --git a/source/extensions/filters/http/squash/BUILD b/source/extensions/filters/http/squash/BUILD index 0047bdf3f7def..ea2bdcd1242b1 100644 --- a/source/extensions/filters/http/squash/BUILD +++ b/source/extensions/filters/http/squash/BUILD @@ -1,16 +1,16 @@ -licenses(["notice"]) # Apache 2 - -# L7 HTTP filter that implements the Squash microservice debugger -# Public docs: docs/root/configuration/http_filters/squash_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# L7 HTTP filter that implements the Squash microservice debugger +# Public docs: docs/root/configuration/http_filters/squash_filter.rst + +envoy_extension_package() envoy_cc_library( name = "squash_filter_lib", diff --git a/source/extensions/filters/http/squash/squash_filter.cc b/source/extensions/filters/http/squash/squash_filter.cc index ca0c8205f5588..0c5d42fe1f6ac 100644 --- a/source/extensions/filters/http/squash/squash_filter.cc +++ b/source/extensions/filters/http/squash/squash_filter.cc @@ -198,7 +198,7 @@ void SquashFilter::onCreateAttachmentSuccess(Http::ResponseMessagePtr&& m) { // Get the config object that was created if (Http::Utility::getResponseStatus(m->headers()) != enumToInt(Http::Code::Created)) { ENVOY_LOG(debug, "Squash: can't create attachment object. status {} - not squashing", - m->headers().Status()->value().getStringView()); + m->headers().getStatusValue()); doneSquashing(); } else { std::string debug_attachment_id; diff --git a/source/extensions/filters/http/squash/squash_filter.h b/source/extensions/filters/http/squash/squash_filter.h index f1b8446a132f1..d654e7f220886 100644 --- a/source/extensions/filters/http/squash/squash_filter.h +++ b/source/extensions/filters/http/squash/squash_filter.h @@ -68,6 +68,7 @@ class AsyncClientCallbackShim : public Http::AsyncClient::Callbacks { void onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason f) override { on_fail_(f); } + void onBeforeFinalizeUpstreamSpan(Tracing::Span&, const Http::ResponseHeaderMap*) override {} private: const std::function on_success_; diff --git a/source/extensions/filters/http/tap/BUILD b/source/extensions/filters/http/tap/BUILD index d388912dfe562..73d4237cd0192 100644 --- a/source/extensions/filters/http/tap/BUILD +++ b/source/extensions/filters/http/tap/BUILD @@ -1,16 +1,16 @@ -licenses(["notice"]) # Apache 2 - -# L7 HTTP Tap filter -# Public docs: docs/root/configuration/http_filters/tap_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# L7 HTTP Tap filter +# Public docs: docs/root/configuration/http_filters/tap_filter.rst + +envoy_extension_package() envoy_cc_library( name = "tap_config_interface", diff --git a/source/extensions/filters/http/tap/tap_config_impl.cc b/source/extensions/filters/http/tap/tap_config_impl.cc index 2ea89dc12914d..dfe31d083ab2b 100644 --- a/source/extensions/filters/http/tap/tap_config_impl.cc +++ b/source/extensions/filters/http/tap/tap_config_impl.cc @@ -15,13 +15,14 @@ namespace TapFilter { namespace TapCommon = Extensions::Common::Tap; namespace { -Http::HeaderMap::Iterate fillHeaderList(const Http::HeaderEntry& header, void* context) { - Protobuf::RepeatedPtrField& header_list = - *reinterpret_cast*>(context); - auto& new_header = *header_list.Add(); - new_header.set_key(std::string(header.key().getStringView())); - new_header.set_value(std::string(header.value().getStringView())); - return Http::HeaderMap::Iterate::Continue; +Http::HeaderMap::ConstIterateCb +fillHeaderList(Protobuf::RepeatedPtrField* output) { + return [output](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + auto& new_header = *output->Add(); + new_header.set_key(std::string(header.key().getStringView())); + new_header.set_value(std::string(header.value().getStringView())); + return Http::HeaderMap::Iterate::Continue; + }; } } // namespace @@ -35,9 +36,8 @@ HttpPerRequestTapperPtr HttpTapConfigImpl::createPerRequestTapper(uint64_t strea void HttpPerRequestTapperImpl::streamRequestHeaders() { TapCommon::TraceWrapperPtr trace = makeTraceSegment(); - request_headers_->iterate( - fillHeaderList, - trace->mutable_http_streamed_trace_segment()->mutable_request_headers()->mutable_headers()); + request_headers_->iterate(fillHeaderList( + trace->mutable_http_streamed_trace_segment()->mutable_request_headers()->mutable_headers())); sink_handle_->submitTrace(std::move(trace)); } @@ -61,15 +61,15 @@ void HttpPerRequestTapperImpl::streamBufferedRequestBody() { void HttpPerRequestTapperImpl::onRequestBody(const Buffer::Instance& data) { onBody(data, buffered_streamed_request_body_, config_->maxBufferedRxBytes(), &envoy::data::tap::v3::HttpStreamedTraceSegment::mutable_request_body_chunk, - &envoy::data::tap::v3::HttpBufferedTrace::mutable_request); + &envoy::data::tap::v3::HttpBufferedTrace::mutable_request, true); } void HttpPerRequestTapperImpl::streamRequestTrailers() { if (request_trailers_ != nullptr) { TapCommon::TraceWrapperPtr trace = makeTraceSegment(); - request_trailers_->iterate(fillHeaderList, trace->mutable_http_streamed_trace_segment() - ->mutable_request_trailers() - ->mutable_headers()); + request_trailers_->iterate(fillHeaderList(trace->mutable_http_streamed_trace_segment() + ->mutable_request_trailers() + ->mutable_headers())); sink_handle_->submitTrace(std::move(trace)); } } @@ -91,9 +91,8 @@ void HttpPerRequestTapperImpl::onRequestTrailers(const Http::RequestTrailerMap& void HttpPerRequestTapperImpl::streamResponseHeaders() { TapCommon::TraceWrapperPtr trace = makeTraceSegment(); - response_headers_->iterate( - fillHeaderList, - trace->mutable_http_streamed_trace_segment()->mutable_response_headers()->mutable_headers()); + response_headers_->iterate(fillHeaderList( + trace->mutable_http_streamed_trace_segment()->mutable_response_headers()->mutable_headers())); sink_handle_->submitTrace(std::move(trace)); } @@ -123,7 +122,7 @@ void HttpPerRequestTapperImpl::streamBufferedResponseBody() { void HttpPerRequestTapperImpl::onResponseBody(const Buffer::Instance& data) { onBody(data, buffered_streamed_response_body_, config_->maxBufferedTxBytes(), &envoy::data::tap::v3::HttpStreamedTraceSegment::mutable_response_body_chunk, - &envoy::data::tap::v3::HttpBufferedTrace::mutable_response); + &envoy::data::tap::v3::HttpBufferedTrace::mutable_response, false); } void HttpPerRequestTapperImpl::onResponseTrailers(const Http::ResponseTrailerMap& trailers) { @@ -141,9 +140,9 @@ void HttpPerRequestTapperImpl::onResponseTrailers(const Http::ResponseTrailerMap } TapCommon::TraceWrapperPtr trace = makeTraceSegment(); - trailers.iterate(fillHeaderList, trace->mutable_http_streamed_trace_segment() - ->mutable_response_trailers() - ->mutable_headers()); + trailers.iterate(fillHeaderList(trace->mutable_http_streamed_trace_segment() + ->mutable_response_trailers() + ->mutable_headers())); sink_handle_->submitTrace(std::move(trace)); } } @@ -156,16 +155,16 @@ bool HttpPerRequestTapperImpl::onDestroyLog() { makeBufferedFullTraceIfNeeded(); auto& http_trace = *buffered_full_trace_->mutable_http_buffered_trace(); if (request_headers_ != nullptr) { - request_headers_->iterate(fillHeaderList, http_trace.mutable_request()->mutable_headers()); + request_headers_->iterate(fillHeaderList(http_trace.mutable_request()->mutable_headers())); } if (request_trailers_ != nullptr) { - request_trailers_->iterate(fillHeaderList, http_trace.mutable_request()->mutable_trailers()); + request_trailers_->iterate(fillHeaderList(http_trace.mutable_request()->mutable_trailers())); } if (response_headers_ != nullptr) { - response_headers_->iterate(fillHeaderList, http_trace.mutable_response()->mutable_headers()); + response_headers_->iterate(fillHeaderList(http_trace.mutable_response()->mutable_headers())); } if (response_trailers_ != nullptr) { - response_trailers_->iterate(fillHeaderList, http_trace.mutable_response()->mutable_trailers()); + response_trailers_->iterate(fillHeaderList(http_trace.mutable_response()->mutable_trailers())); } ENVOY_LOG(debug, "submitting buffered trace sink"); @@ -177,10 +176,12 @@ bool HttpPerRequestTapperImpl::onDestroyLog() { void HttpPerRequestTapperImpl::onBody( const Buffer::Instance& data, Extensions::Common::Tap::TraceWrapperPtr& buffered_streamed_body, uint32_t max_buffered_bytes, MutableBodyChunk mutable_body_chunk, - MutableMessage mutable_message) { - // TODO(mattklein123): Body matching. + MutableMessage mutable_message, bool request) { + // Invoke body matcher. + request ? config_->rootMatcher().onRequestBody(data, statuses_) + : config_->rootMatcher().onResponseBody(data, statuses_); if (config_->streaming()) { - const auto match_status = config_->rootMatcher().matchStatus(statuses_); + const auto& match_status = config_->rootMatcher().matchStatus(statuses_); // Without body matching, we must have already started tracing or have not yet matched. ASSERT(started_streaming_trace_ || !match_status.matches_); diff --git a/source/extensions/filters/http/tap/tap_config_impl.h b/source/extensions/filters/http/tap/tap_config_impl.h index bb8bb0b48c4f6..f61f275774c56 100644 --- a/source/extensions/filters/http/tap/tap_config_impl.h +++ b/source/extensions/filters/http/tap/tap_config_impl.h @@ -53,7 +53,7 @@ class HttpPerRequestTapperImpl : public HttpPerRequestTapper, Logger::LoggableonRequestBody(data); } return Http::FilterDataStatus::Continue; @@ -56,7 +56,7 @@ Http::FilterHeadersStatus Filter::encodeHeaders(Http::ResponseHeaderMap& headers } Http::FilterDataStatus Filter::encodeData(Buffer::Instance& data, bool) { - if (tapper_ != nullptr) { + if ((tapper_ != nullptr) && (0 != data.length())) { tapper_->onResponseBody(data); } return Http::FilterDataStatus::Continue; diff --git a/source/extensions/filters/http/well_known_names.h b/source/extensions/filters/http/well_known_names.h index 68bc5c361be45..2adb1681701fb 100644 --- a/source/extensions/filters/http/well_known_names.h +++ b/source/extensions/filters/http/well_known_names.h @@ -16,10 +16,14 @@ class HttpFilterNameValues { const std::string Buffer = "envoy.filters.http.buffer"; // Cache filter const std::string Cache = "envoy.filters.http.cache"; + // Compressor filter + const std::string Compressor = "envoy.filters.http.compressor"; // CORS filter const std::string Cors = "envoy.filters.http.cors"; // CSRF filter const std::string Csrf = "envoy.filters.http.csrf"; + // Decompressor filter + const std::string Decompressor = "envoy.filters.http.decompressor"; // Dynamo filter const std::string Dynamo = "envoy.filters.http.dynamo"; // Fault filter @@ -62,6 +66,8 @@ class HttpFilterNameValues { const std::string Tap = "envoy.filters.http.tap"; // Adaptive concurrency limit filter const std::string AdaptiveConcurrency = "envoy.filters.http.adaptive_concurrency"; + // Admission control filter + const std::string AdmissionControl = "envoy.filters.http.admission_control"; // Original Src Filter const std::string OriginalSrc = "envoy.filters.http.original_src"; // Dynamic forward proxy filter diff --git a/source/extensions/filters/listener/BUILD b/source/extensions/filters/listener/BUILD index 6156949edef64..9a2ee9ad75cbc 100644 --- a/source/extensions/filters/listener/BUILD +++ b/source/extensions/filters/listener/BUILD @@ -1,16 +1,18 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "well_known_names", hdrs = ["well_known_names.h"], + # Well known names are public. + visibility = ["//visibility:public"], deps = [ "//source/common/singleton:const_singleton", ], diff --git a/source/extensions/filters/listener/http_inspector/BUILD b/source/extensions/filters/listener/http_inspector/BUILD index 8df52851b8249..0f3c7f50eb40d 100644 --- a/source/extensions/filters/listener/http_inspector/BUILD +++ b/source/extensions/filters/listener/http_inspector/BUILD @@ -1,15 +1,15 @@ -licenses(["notice"]) # Apache 2 - -# HTTP inspector filter for sniffing HTTP protocol and setting HTTP version to a FilterChain. - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# HTTP inspector filter for sniffing HTTP protocol and setting HTTP version to a FilterChain. + +envoy_extension_package() envoy_cc_library( name = "http_inspector_lib", @@ -24,6 +24,7 @@ envoy_cc_library( "//source/common/api:os_sys_calls_lib", "//source/common/common:minimal_logger_lib", "//source/common/http:headers_lib", + "//source/common/http:utility_lib", "//source/extensions/transport_sockets:well_known_names", ], ) diff --git a/source/extensions/filters/listener/http_inspector/http_inspector.cc b/source/extensions/filters/listener/http_inspector/http_inspector.cc index bb039f8cfe5d8..90234d9b31fef 100644 --- a/source/extensions/filters/listener/http_inspector/http_inspector.cc +++ b/source/extensions/filters/listener/http_inspector/http_inspector.cc @@ -8,6 +8,7 @@ #include "common/common/assert.h" #include "common/common/macros.h" #include "common/http/headers.h" +#include "common/http/utility.h" #include "extensions/transport_sockets/well_known_names.h" @@ -104,9 +105,9 @@ ParseState Filter::onRead() { const Api::SysCallSizeResult result = os_syscalls.recv(socket.ioHandle().fd(), buf_, Config::MAX_INSPECT_SIZE, MSG_PEEK); ENVOY_LOG(trace, "http inspector: recv: {}", result.rc_); - if (result.rc_ == -1 && result.errno_ == EAGAIN) { + if (SOCKET_FAILURE(result.rc_) && result.errno_ == SOCKET_ERROR_AGAIN) { return ParseState::Continue; - } else if (result.rc_ < 0) { + } else if (SOCKET_FAILURE(result.rc_)) { config_->stats().read_error_.inc(); return ParseState::Error; } @@ -181,16 +182,16 @@ void Filter::done(bool success) { absl::string_view protocol; if (protocol_ == Http::Headers::get().ProtocolStrings.Http10String) { config_->stats().http10_found_.inc(); - protocol = "http/1.0"; + protocol = Http::Utility::AlpnNames::get().Http10; } else if (protocol_ == Http::Headers::get().ProtocolStrings.Http11String) { config_->stats().http11_found_.inc(); - protocol = "http/1.1"; + protocol = Http::Utility::AlpnNames::get().Http11; } else { ASSERT(protocol_ == "HTTP/2"); config_->stats().http2_found_.inc(); // h2 HTTP/2 over TLS, h2c HTTP/2 over TCP // TODO(yxue): use detected protocol from http inspector and support h2c token in HCM - protocol = "h2c"; + protocol = Http::Utility::AlpnNames::get().Http2c; } cb_->socket().setRequestedApplicationProtocols({protocol}); diff --git a/source/extensions/filters/listener/original_dst/BUILD b/source/extensions/filters/listener/original_dst/BUILD index d21098d656e3d..78c09f58155c3 100644 --- a/source/extensions/filters/listener/original_dst/BUILD +++ b/source/extensions/filters/listener/original_dst/BUILD @@ -1,16 +1,16 @@ -licenses(["notice"]) # Apache 2 - -# ORIGINAL_DST iptables redirection listener filter -# Public docs: docs/root/configuration/listener_filters/original_dst_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# ORIGINAL_DST iptables redirection listener filter +# Public docs: docs/root/configuration/listener_filters/original_dst_filter.rst + +envoy_extension_package() envoy_cc_library( name = "original_dst_lib", @@ -29,6 +29,11 @@ envoy_cc_extension( name = "config", srcs = ["config.cc"], security_posture = "robust_to_untrusted_downstream", + # TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/integration:__subpackages__", + ], deps = [ ":original_dst_lib", "//include/envoy/registry", diff --git a/source/extensions/filters/listener/original_dst/original_dst.cc b/source/extensions/filters/listener/original_dst/original_dst.cc index d6e49bc3b7baf..cea15d4664bbd 100644 --- a/source/extensions/filters/listener/original_dst/original_dst.cc +++ b/source/extensions/filters/listener/original_dst/original_dst.cc @@ -10,18 +10,16 @@ namespace Extensions { namespace ListenerFilters { namespace OriginalDst { -Network::Address::InstanceConstSharedPtr OriginalDstFilter::getOriginalDst(os_fd_t fd) { - return Network::Utility::getOriginalDst(fd); +Network::Address::InstanceConstSharedPtr OriginalDstFilter::getOriginalDst(Network::Socket& sock) { + return Network::Utility::getOriginalDst(sock); } Network::FilterStatus OriginalDstFilter::onAccept(Network::ListenerFilterCallbacks& cb) { ENVOY_LOG(debug, "original_dst: New connection accepted"); Network::ConnectionSocket& socket = cb.socket(); - const Network::Address::Instance& local_address = *socket.localAddress(); - if (local_address.type() == Network::Address::Type::Ip) { - Network::Address::InstanceConstSharedPtr original_local_address = - getOriginalDst(socket.ioHandle().fd()); + if (socket.addressType() == Network::Address::Type::Ip) { + Network::Address::InstanceConstSharedPtr original_local_address = getOriginalDst(socket); // A listener that has the use_original_dst flag set to true can still receive // connections that are NOT redirected using iptables. If a connection was not redirected, diff --git a/source/extensions/filters/listener/original_dst/original_dst.h b/source/extensions/filters/listener/original_dst/original_dst.h index 59c5cc0ee8860..836834a4d6581 100644 --- a/source/extensions/filters/listener/original_dst/original_dst.h +++ b/source/extensions/filters/listener/original_dst/original_dst.h @@ -14,7 +14,7 @@ namespace OriginalDst { */ class OriginalDstFilter : public Network::ListenerFilter, Logger::Loggable { public: - virtual Network::Address::InstanceConstSharedPtr getOriginalDst(os_fd_t fd); + virtual Network::Address::InstanceConstSharedPtr getOriginalDst(Network::Socket& sock); // Network::ListenerFilter Network::FilterStatus onAccept(Network::ListenerFilterCallbacks& cb) override; diff --git a/source/extensions/filters/listener/original_src/BUILD b/source/extensions/filters/listener/original_src/BUILD index 96fac9a170142..4240bb61f28a2 100644 --- a/source/extensions/filters/listener/original_src/BUILD +++ b/source/extensions/filters/listener/original_src/BUILD @@ -1,15 +1,15 @@ -licenses(["notice"]) # Apache 2 - -# A filter for mirroring the downstream remote address on the upstream's source. - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# A filter for mirroring the downstream remote address on the upstream's source. + +envoy_extension_package() envoy_cc_library( name = "config_lib", diff --git a/source/extensions/filters/listener/proxy_protocol/BUILD b/source/extensions/filters/listener/proxy_protocol/BUILD index f62e9940af1c0..810c99d4021f0 100644 --- a/source/extensions/filters/listener/proxy_protocol/BUILD +++ b/source/extensions/filters/listener/proxy_protocol/BUILD @@ -1,15 +1,15 @@ -licenses(["notice"]) # Apache 2 - -# Proxy protocol listener filter: https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# Proxy protocol listener filter: https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt + +envoy_extension_package() envoy_cc_library( name = "proxy_protocol_lib", @@ -18,6 +18,7 @@ envoy_cc_library( "proxy_protocol.h", "proxy_protocol_header.h", ], + visibility = ["//visibility:public"], deps = [ "//include/envoy/event:dispatcher_interface", "//include/envoy/network:filter_interface", @@ -29,6 +30,9 @@ envoy_cc_library( "//source/common/common:utility_lib", "//source/common/network:address_lib", "//source/common/network:utility_lib", + "//source/extensions/common/proxy_protocol:proxy_protocol_header_lib", + "//source/extensions/filters/listener:well_known_names", + "@envoy_api//envoy/extensions/filters/listener/proxy_protocol/v3:pkg_cc_proto", ], ) @@ -36,6 +40,11 @@ envoy_cc_extension( name = "config", srcs = ["config.cc"], security_posture = "robust_to_untrusted_downstream", + # TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/integration:__subpackages__", + ], deps = [ "//include/envoy/registry", "//include/envoy/server:filter_config_interface", diff --git a/source/extensions/filters/listener/proxy_protocol/config.cc b/source/extensions/filters/listener/proxy_protocol/config.cc index 0fa044542d08a..4641fcd6c9bf6 100644 --- a/source/extensions/filters/listener/proxy_protocol/config.cc +++ b/source/extensions/filters/listener/proxy_protocol/config.cc @@ -1,3 +1,5 @@ +#include + #include "envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.pb.h" #include "envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.pb.validate.h" #include "envoy/registry/registry.h" @@ -18,10 +20,16 @@ class ProxyProtocolConfigFactory : public Server::Configuration::NamedListenerFi public: // NamedListenerFilterConfigFactory Network::ListenerFilterFactoryCb createListenerFilterFactoryFromProto( - const Protobuf::Message&, + const Protobuf::Message& message, const Network::ListenerFilterMatcherSharedPtr& listener_filter_matcher, Server::Configuration::ListenerFactoryContext& context) override { - ConfigSharedPtr config(new Config(context.scope())); + + // downcast it to the proxy protocol config + const auto& proto_config = MessageUtil::downcastAndValidate< + const envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol&>( + message, context.messageValidationVisitor()); + + ConfigSharedPtr config = std::make_shared(context.scope(), proto_config); return [listener_filter_matcher, config](Network::ListenerFilterManager& filter_manager) -> void { filter_manager.addAcceptFilter(listener_filter_matcher, std::make_unique(config)); diff --git a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc index 1c90e9c924ec2..c3029c2234cfc 100644 --- a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc +++ b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc @@ -1,6 +1,7 @@ #include "extensions/filters/listener/proxy_protocol/proxy_protocol.h" #include +#include #include #include #include @@ -15,16 +16,53 @@ #include "common/api/os_sys_calls_impl.h" #include "common/common/assert.h" #include "common/common/empty_string.h" +#include "common/common/fmt.h" #include "common/common/utility.h" #include "common/network/address_impl.h" #include "common/network/utility.h" +#include "extensions/common/proxy_protocol/proxy_protocol_header.h" +#include "extensions/filters/listener/well_known_names.h" + +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V1_SIGNATURE; +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V1_SIGNATURE_LEN; +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_ADDR_LEN_INET; +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_ADDR_LEN_INET6; +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_AF_INET; +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_AF_INET6; +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_HEADER_LEN; +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_LOCAL; +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_ONBEHALF_OF; +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_SIGNATURE; +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_SIGNATURE_LEN; +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_TRANSPORT_DGRAM; +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_TRANSPORT_STREAM; +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_VERSION; + namespace Envoy { namespace Extensions { namespace ListenerFilters { namespace ProxyProtocol { -Config::Config(Stats::Scope& scope) : stats_{ALL_PROXY_PROTOCOL_STATS(POOL_COUNTER(scope))} {} +Config::Config( + Stats::Scope& scope, + const envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol& proto_config) + : stats_{ALL_PROXY_PROTOCOL_STATS(POOL_COUNTER(scope))} { + for (const auto& rule : proto_config.rules()) { + tlv_types_[0xFF & rule.tlv_type()] = rule.on_tlv_present(); + } +} + +const KeyValuePair* Config::isTlvTypeNeeded(uint8_t type) const { + auto tlv_type = tlv_types_.find(type); + if (tlv_types_.end() != tlv_type) { + return &tlv_type->second; + } + + return nullptr; +} + +size_t Config::numberOfNeededTlvTypes() const { return tlv_types_.size(); } Network::FilterStatus Filter::onAccept(Network::ListenerFilterCallbacks& cb) { ENVOY_LOG(debug, "proxy_protocol: New connection accepted"); @@ -54,7 +92,7 @@ void Filter::onReadWorker() { Network::ConnectionSocket& socket = cb_->socket(); if ((!proxy_protocol_header_.has_value() && !readProxyHeader(socket.ioHandle().fd())) || - (proxy_protocol_header_.has_value() && !parseExtensions(socket.ioHandle().fd()))) { + (proxy_protocol_header_.has_value() && !readExtensions(socket.ioHandle().fd()))) { // We return if a) we do not yet have the header, or b) we have the header but not yet all // the extension data. In both cases we'll be called again when the socket is ready to read // and pick up where we left off. @@ -231,11 +269,10 @@ void Filter::parseV1Header(char* buf, size_t len) { } } -bool Filter::parseExtensions(os_fd_t fd) { +bool Filter::parseExtensions(os_fd_t fd, uint8_t* buf, size_t buf_size, size_t* buf_off) { // If we ever implement extensions elsewhere, be sure to // continue to skip and ignore those for LOCAL. while (proxy_protocol_header_.value().extensions_length_) { - // buf_ is no longer in use so we re-use it to read/discard int bytes_avail; auto& os_syscalls = Api::OsSysCallsSingleton::get(); if (os_syscalls.ioctl(fd, FIONREAD, &bytes_avail).rc_ < 0) { @@ -244,14 +281,104 @@ bool Filter::parseExtensions(os_fd_t fd) { if (bytes_avail == 0) { return false; } - bytes_avail = std::min(size_t(bytes_avail), sizeof(buf_)); + bytes_avail = std::min(size_t(bytes_avail), buf_size); bytes_avail = std::min(size_t(bytes_avail), proxy_protocol_header_.value().extensions_length_); - const Api::SysCallSizeResult recv_result = os_syscalls.recv(fd, buf_, bytes_avail, 0); + buf += (nullptr != buf_off) ? *buf_off : 0; + const Api::SysCallSizeResult recv_result = os_syscalls.recv(fd, buf, bytes_avail, 0); if (recv_result.rc_ != bytes_avail) { throw EnvoyException("failed to read proxy protocol extension"); } proxy_protocol_header_.value().extensions_length_ -= recv_result.rc_; + + if (nullptr != buf_off) { + *buf_off += recv_result.rc_; + } } + + return true; +} + +/** + * @note A TLV is arranged in the following format: + * struct pp2_tlv { + * uint8_t type; + * uint8_t length_hi; + * uint8_t length_lo; + * uint8_t value[0]; + * }; + * See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details + */ +void Filter::parseTlvs(const std::vector& tlvs) { + size_t idx{0}; + while (idx < tlvs.size()) { + const uint8_t tlv_type = tlvs[idx]; + idx++; + + if ((idx + 1) >= tlvs.size()) { + throw EnvoyException( + fmt::format("failed to read proxy protocol extension. No bytes for TLV length. " + "Extension length is {}, current index is {}, current type is {}.", + tlvs.size(), idx, tlv_type)); + } + + const uint8_t tlv_length_upper = tlvs[idx]; + const uint8_t tlv_length_lower = tlvs[idx + 1]; + const size_t tlv_value_length = (tlv_length_upper << 8) + tlv_length_lower; + idx += 2; + + // Get the value. + if ((idx + tlv_value_length - 1) >= tlvs.size()) { + throw EnvoyException( + fmt::format("failed to read proxy protocol extension. No bytes for TLV value. " + "Extension length is {}, current index is {}, current type is {}, current " + "value length is {}.", + tlvs.size(), idx, tlv_type, tlv_length_upper)); + } + + // Only save to dynamic metadata if this type of TLV is needed. + auto key_value_pair = config_->isTlvTypeNeeded(tlv_type); + if (nullptr != key_value_pair) { + ProtobufWkt::Value metadata_value; + metadata_value.set_string_value(reinterpret_cast(tlvs.data() + idx), + tlv_value_length); + + std::string metadata_key = key_value_pair->metadata_namespace().empty() + ? ListenerFilterNames::get().ProxyProtocol + : key_value_pair->metadata_namespace(); + + ProtobufWkt::Struct metadata( + (*cb_->dynamicMetadata().mutable_filter_metadata())[metadata_key]); + metadata.mutable_fields()->insert({key_value_pair->key(), metadata_value}); + cb_->setDynamicMetadata(metadata_key, metadata); + } else { + ENVOY_LOG(trace, "proxy_protocol: Skip TLV of type {} since it's not needed", tlv_type); + } + + idx += tlv_value_length; + ASSERT(idx <= tlvs.size()); + } +} + +bool Filter::readExtensions(os_fd_t fd) { + // Parse and discard the extensions if this is a local command or there's no TLV needs to be saved + // to metadata. + if (proxy_protocol_header_.value().local_command_ || 0 == config_->numberOfNeededTlvTypes()) { + // buf_ is no longer in use so we re-use it to read/discard. + return parseExtensions(fd, reinterpret_cast(buf_), sizeof(buf_), nullptr); + } + + // Initialize the buf_tlv_ only when we need to read the TLVs. + if (buf_tlv_.empty()) { + buf_tlv_.resize(proxy_protocol_header_.value().extensions_length_); + } + + // Parse until we have all the TLVs in buf_tlv. + if (!parseExtensions(fd, buf_tlv_.data(), buf_tlv_.size(), &buf_tlv_off_)) { + return false; + } + + parseTlvs(buf_tlv_); + return true; } diff --git a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.h b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.h index 4b23d470dc400..26ee119f5d3d4 100644 --- a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.h +++ b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.h @@ -1,19 +1,29 @@ #pragma once #include "envoy/event/file_event.h" +#include "envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.pb.h" #include "envoy/network/filter.h" #include "envoy/stats/scope.h" #include "envoy/stats/stats_macros.h" #include "common/common/logger.h" +#include "extensions/common/proxy_protocol/proxy_protocol_header.h" + +#include "absl/container/flat_hash_map.h" #include "proxy_protocol_header.h" +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_ADDR_LEN_UNIX; +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_HEADER_LEN; + namespace Envoy { namespace Extensions { namespace ListenerFilters { namespace ProxyProtocol { +using KeyValuePair = + envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol::KeyValuePair; + /** * All stats for the proxy protocol. @see stats_macros.h */ @@ -32,11 +42,27 @@ struct ProxyProtocolStats { /** * Global configuration for Proxy Protocol listener filter. */ -class Config { +class Config : public Logger::Loggable { public: - Config(Stats::Scope& scope); + Config( + Stats::Scope& scope, + const envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol& proto_config); ProxyProtocolStats stats_; + + /** + * Return null if the type of TLV is not needed otherwise a pointer to the KeyValuePair for + * emitting to dynamic metadata. + */ + const KeyValuePair* isTlvTypeNeeded(uint8_t type) const; + + /** + * Number of TLV types that need to be parsed and saved to dynamic metadata. + */ + size_t numberOfNeededTlvTypes() const; + +private: + absl::flat_hash_map tlv_types_; }; using ConfigSharedPtr = std::shared_ptr; @@ -79,7 +105,9 @@ class Filter : public Network::ListenerFilter, Logger::Loggable& tlvs); + bool readExtensions(os_fd_t fd); /** * Given a char * & len, parse the header as per spec @@ -102,6 +130,16 @@ class Filter : public Network::ListenerFilter, Logger::Loggable buf_tlv_; + + /** + * The index in buf_tlv_ that has been fully read. + */ + size_t buf_tlv_off_{}; + ConfigSharedPtr config_; absl::optional proxy_protocol_header_; diff --git a/source/extensions/filters/listener/proxy_protocol/proxy_protocol_header.h b/source/extensions/filters/listener/proxy_protocol/proxy_protocol_header.h index 63c3c96eadf02..c451c8f5e1c7c 100644 --- a/source/extensions/filters/listener/proxy_protocol/proxy_protocol_header.h +++ b/source/extensions/filters/listener/proxy_protocol/proxy_protocol_header.h @@ -9,22 +9,6 @@ namespace Extensions { namespace ListenerFilters { namespace ProxyProtocol { -// See https://github.com/haproxy/haproxy/blob/master/doc/proxy-protocol.txt for definitions - -// TODO(wez470): Refactor listener filter to use common proxy proto constants -constexpr char PROXY_PROTO_V1_SIGNATURE[] = "PROXY "; -constexpr uint32_t PROXY_PROTO_V1_SIGNATURE_LEN = 6; -constexpr char PROXY_PROTO_V2_SIGNATURE[] = "\x0d\x0a\x0d\x0a\x00\x0d\x0a\x51\x55\x49\x54\x0a"; -constexpr uint32_t PROXY_PROTO_V2_SIGNATURE_LEN = 12; -constexpr uint32_t PROXY_PROTO_V2_HEADER_LEN = 16; -constexpr uint32_t PROXY_PROTO_V2_VERSION = 0x2; -constexpr uint32_t PROXY_PROTO_V2_ONBEHALF_OF = 0x1; -constexpr uint32_t PROXY_PROTO_V2_LOCAL = 0x0; - -constexpr uint32_t PROXY_PROTO_V2_AF_INET = 0x1; -constexpr uint32_t PROXY_PROTO_V2_AF_INET6 = 0x2; -constexpr uint32_t PROXY_PROTO_V2_AF_UNIX = 0x3; - struct WireHeader { WireHeader(size_t extensions_length) : extensions_length_(extensions_length), protocol_version_(Network::Address::IpVersion::v4), @@ -44,14 +28,6 @@ struct WireHeader { const bool local_command_; }; -constexpr uint32_t PROXY_PROTO_V2_ADDR_LEN_UNSPEC = 0; -constexpr uint32_t PROXY_PROTO_V2_ADDR_LEN_INET = 12; -constexpr uint32_t PROXY_PROTO_V2_ADDR_LEN_INET6 = 36; -constexpr uint32_t PROXY_PROTO_V2_ADDR_LEN_UNIX = 216; - -constexpr uint8_t PROXY_PROTO_V2_TRANSPORT_STREAM = 0x1; -constexpr uint8_t PROXY_PROTO_V2_TRANSPORT_DGRAM = 0x2; - } // namespace ProxyProtocol } // namespace ListenerFilters } // namespace Extensions diff --git a/source/extensions/filters/listener/tls_inspector/BUILD b/source/extensions/filters/listener/tls_inspector/BUILD index d400c3534e289..35a163b26b994 100644 --- a/source/extensions/filters/listener/tls_inspector/BUILD +++ b/source/extensions/filters/listener/tls_inspector/BUILD @@ -1,22 +1,27 @@ -licenses(["notice"]) # Apache 2 - -# TLS inspector filter for examining various TLS parameters before routing to a FilterChain. -# Public docs: docs/root/configuration/listener_filters/tls_inspector.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# TLS inspector filter for examining various TLS parameters before routing to a FilterChain. +# Public docs: docs/root/configuration/listener_filters/tls_inspector.rst + +envoy_extension_package() envoy_cc_library( name = "tls_inspector_lib", srcs = ["tls_inspector.cc"], hdrs = ["tls_inspector.h"], external_deps = ["ssl"], + # TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/integration:__subpackages__", + ], deps = [ "//include/envoy/event:dispatcher_interface", "//include/envoy/event:timer_interface", @@ -33,6 +38,11 @@ envoy_cc_extension( name = "config", srcs = ["config.cc"], security_posture = "robust_to_untrusted_downstream", + # TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/integration:__subpackages__", + ], deps = [ "//include/envoy/registry", "//include/envoy/server:filter_config_interface", diff --git a/source/extensions/filters/listener/tls_inspector/tls_inspector.cc b/source/extensions/filters/listener/tls_inspector/tls_inspector.cc index aa9dc7a7d6031..38ea9324b243f 100644 --- a/source/extensions/filters/listener/tls_inspector/tls_inspector.cc +++ b/source/extensions/filters/listener/tls_inspector/tls_inspector.cc @@ -170,7 +170,7 @@ ParseState Filter::onRead() { config_->maxClientHelloSize(), MSG_PEEK); ENVOY_LOG(trace, "tls inspector: recv: {}", result.rc_); - if (result.rc_ == -1 && result.errno_ == EAGAIN) { + if (SOCKET_FAILURE(result.rc_) && result.errno_ == SOCKET_ERROR_AGAIN) { return ParseState::Continue; } else if (result.rc_ < 0) { config_->stats().read_error_.inc(); diff --git a/source/extensions/filters/network/BUILD b/source/extensions/filters/network/BUILD index 7a4780afbdab2..790ddc806157a 100644 --- a/source/extensions/filters/network/BUILD +++ b/source/extensions/filters/network/BUILD @@ -1,16 +1,18 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "well_known_names", hdrs = ["well_known_names.h"], + # Well known names are public. + visibility = ["//visibility:public"], deps = [ "//source/common/config:well_known_names", "//source/common/singleton:const_singleton", diff --git a/source/extensions/filters/network/client_ssl_auth/BUILD b/source/extensions/filters/network/client_ssl_auth/BUILD index d2f50785404d5..d77c4abae5949 100644 --- a/source/extensions/filters/network/client_ssl_auth/BUILD +++ b/source/extensions/filters/network/client_ssl_auth/BUILD @@ -1,16 +1,16 @@ -licenses(["notice"]) # Apache 2 - -# Client SSL authorization L4 network filter -# Public docs: docs/root/configuration/network_filters/client_ssl_auth_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# Client SSL authorization L4 network filter +# Public docs: docs/root/configuration/network_filters/client_ssl_auth_filter.rst + +envoy_extension_package() envoy_cc_library( name = "client_ssl_auth", diff --git a/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc b/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc index 67af3ff10bbf3..4892e8107c612 100644 --- a/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc +++ b/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc @@ -24,12 +24,12 @@ namespace ClientSslAuth { ClientSslAuthConfig::ClientSslAuthConfig( const envoy::extensions::filters::network::client_ssl_auth::v3::ClientSSLAuth& config, ThreadLocal::SlotAllocator& tls, Upstream::ClusterManager& cm, Event::Dispatcher& dispatcher, - Stats::Scope& scope, Runtime::RandomGenerator& random) + Stats::Scope& scope, Random::RandomGenerator& random) : RestApiFetcher( cm, config.auth_api_cluster(), dispatcher, random, std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(config, refresh_delay, 60000)), std::chrono::milliseconds(1000)), - tls_(tls.allocateSlot()), ip_white_list_(config.ip_white_list()), + tls_(tls.allocateSlot()), ip_allowlist_(config.ip_white_list()), stats_(generateStats(scope, config.stat_prefix())) { if (!cm.get(remote_cluster_name_)) { @@ -45,7 +45,7 @@ ClientSslAuthConfig::ClientSslAuthConfig( ClientSslAuthConfigSharedPtr ClientSslAuthConfig::create( const envoy::extensions::filters::network::client_ssl_auth::v3::ClientSSLAuth& config, ThreadLocal::SlotAllocator& tls, Upstream::ClusterManager& cm, Event::Dispatcher& dispatcher, - Stats::Scope& scope, Runtime::RandomGenerator& random) { + Stats::Scope& scope, Random::RandomGenerator& random) { ClientSslAuthConfigSharedPtr new_config( new ClientSslAuthConfig(config, tls, cm, dispatcher, scope, random)); new_config->initialize(); @@ -111,8 +111,8 @@ void ClientSslAuthFilter::onEvent(Network::ConnectionEvent event) { } ASSERT(read_callbacks_->connection().ssl()); - if (config_->ipWhiteList().contains(*read_callbacks_->connection().remoteAddress())) { - config_->stats().auth_ip_white_list_.inc(); + if (config_->ipAllowlist().contains(*read_callbacks_->connection().remoteAddress())) { + config_->stats().auth_ip_allowlist_.inc(); read_callbacks_->continueReading(); return; } diff --git a/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.h b/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.h index 967a0903e9073..53422e5f48d89 100644 --- a/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.h +++ b/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.h @@ -3,12 +3,11 @@ #include #include #include -#include +#include "envoy/common/random_generator.h" #include "envoy/config/subscription.h" #include "envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.pb.h" #include "envoy/network/filter.h" -#include "envoy/runtime/runtime.h" #include "envoy/stats/scope.h" #include "envoy/stats/stats_macros.h" #include "envoy/thread_local/thread_local.h" @@ -19,6 +18,8 @@ #include "common/network/utility.h" #include "common/protobuf/utility.h" +#include "absl/container/node_hash_set.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { @@ -30,7 +31,7 @@ namespace ClientSslAuth { #define ALL_CLIENT_SSL_AUTH_STATS(COUNTER, GAUGE) \ COUNTER(auth_digest_match) \ COUNTER(auth_digest_no_match) \ - COUNTER(auth_ip_white_list) \ + COUNTER(auth_ip_allowlist) \ COUNTER(auth_no_ssl) \ COUNTER(update_failure) \ COUNTER(update_success) \ @@ -59,7 +60,7 @@ class AllowedPrincipals : public ThreadLocal::ThreadLocalObject { size_t size() const { return allowed_sha256_digests_.size(); } private: - std::unordered_set allowed_sha256_digests_; + absl::node_hash_set allowed_sha256_digests_; }; using AllowedPrincipalsSharedPtr = std::shared_ptr; @@ -70,24 +71,24 @@ using ClientSslAuthConfigSharedPtr = std::shared_ptr; /** * Global configuration for client SSL authentication. The config contacts a JSON API to fetch the * list of allowed principals, caches it, then makes auth decisions on it and any associated IP - * white list. + * allowlist. */ class ClientSslAuthConfig : public Http::RestApiFetcher { public: static ClientSslAuthConfigSharedPtr create(const envoy::extensions::filters::network::client_ssl_auth::v3::ClientSSLAuth& config, ThreadLocal::SlotAllocator& tls, Upstream::ClusterManager& cm, - Event::Dispatcher& dispatcher, Stats::Scope& scope, Runtime::RandomGenerator& random); + Event::Dispatcher& dispatcher, Stats::Scope& scope, Random::RandomGenerator& random); const AllowedPrincipals& allowedPrincipals(); - const Network::Address::IpList& ipWhiteList() { return ip_white_list_; } + const Network::Address::IpList& ipAllowlist() { return ip_allowlist_; } GlobalStats& stats() { return stats_; } private: ClientSslAuthConfig( const envoy::extensions::filters::network::client_ssl_auth::v3::ClientSSLAuth& config, ThreadLocal::SlotAllocator& tls, Upstream::ClusterManager& cm, Event::Dispatcher& dispatcher, - Stats::Scope& scope, Runtime::RandomGenerator& random); + Stats::Scope& scope, Random::RandomGenerator& random); static GlobalStats generateStats(Stats::Scope& scope, const std::string& prefix); @@ -98,7 +99,7 @@ class ClientSslAuthConfig : public Http::RestApiFetcher { void onFetchFailure(Config::ConfigUpdateFailureReason reason, const EnvoyException* e) override; ThreadLocal::SlotPtr tls_; - Network::Address::IpList ip_white_list_; + Network::Address::IpList ip_allowlist_; GlobalStats stats_; }; diff --git a/source/extensions/filters/network/common/BUILD b/source/extensions/filters/network/common/BUILD index dcbf6142308e6..09249e4000508 100644 --- a/source/extensions/filters/network/common/BUILD +++ b/source/extensions/filters/network/common/BUILD @@ -1,16 +1,18 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "factory_base_lib", hdrs = ["factory_base.h"], + # Used by core. TODO(#9953) clean up. + visibility = ["//visibility:public"], deps = [ "//include/envoy/server:filter_config_interface", ], @@ -19,6 +21,8 @@ envoy_cc_library( envoy_cc_library( name = "utility_lib", hdrs = ["utility.h"], + # Used by core. TODO(#9953) clean up. + visibility = ["//visibility:public"], deps = [ "//include/envoy/runtime:runtime_interface", "//source/common/common:macros", diff --git a/source/extensions/filters/network/common/factory_base.h b/source/extensions/filters/network/common/factory_base.h index d11eca0ec7501..cad30901c11e6 100644 --- a/source/extensions/filters/network/common/factory_base.h +++ b/source/extensions/filters/network/common/factory_base.h @@ -1,6 +1,7 @@ #pragma once #include "envoy/server/filter_config.h" +#include "envoy/server/transport_socket_config.h" #include "envoy/upstream/upstream.h" namespace Envoy { @@ -31,11 +32,12 @@ class FactoryBase : public Server::Configuration::NamedNetworkFilterConfigFactor return std::make_unique(); } - Upstream::ProtocolOptionsConfigConstSharedPtr - createProtocolOptionsConfig(const Protobuf::Message& proto_config, - ProtobufMessage::ValidationVisitor& validation_visitor) override { + Upstream::ProtocolOptionsConfigConstSharedPtr createProtocolOptionsConfig( + const Protobuf::Message& proto_config, + Server::Configuration::ProtocolOptionsFactoryContext& factory_context) override { return createProtocolOptionsTyped(MessageUtil::downcastAndValidate( - proto_config, validation_visitor)); + proto_config, factory_context.messageValidationVisitor()), + factory_context); } std::string name() const override { return name_; } @@ -52,7 +54,8 @@ class FactoryBase : public Server::Configuration::NamedNetworkFilterConfigFactor Server::Configuration::FactoryContext& context) PURE; virtual Upstream::ProtocolOptionsConfigConstSharedPtr - createProtocolOptionsTyped(const ProtocolOptionsProto&) { + createProtocolOptionsTyped(const ProtocolOptionsProto&, + Server::Configuration::ProtocolOptionsFactoryContext&) { throw EnvoyException(fmt::format("filter {} does not support protocol options", name_)); } diff --git a/source/extensions/filters/network/common/redis/BUILD b/source/extensions/filters/network/common/redis/BUILD index a7adc168788fa..5c0393d36a627 100644 --- a/source/extensions/filters/network/common/redis/BUILD +++ b/source/extensions/filters/network/common/redis/BUILD @@ -1,12 +1,12 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "codec_interface", @@ -97,5 +97,6 @@ envoy_cc_library( "//source/common/common:utility_lib", "//source/common/stats:symbol_table_lib", "//source/common/stats:timespan_lib", + "//source/common/stats:utility_lib", ], ) diff --git a/source/extensions/filters/network/common/redis/client.h b/source/extensions/filters/network/common/redis/client.h index b420438ac55f2..f0c573f92f824 100644 --- a/source/extensions/filters/network/common/redis/client.h +++ b/source/extensions/filters/network/common/redis/client.h @@ -106,7 +106,7 @@ class Client : public Event::DeferredDeletable { * Initialize the connection. Issue the auth command and readonly command as needed. * @param auth password for upstream host. */ - virtual void initialize(const std::string& auth_password) PURE; + virtual void initialize(const std::string& auth_username, const std::string& auth_password) PURE; }; using ClientPtr = std::unique_ptr; @@ -114,7 +114,7 @@ using ClientPtr = std::unique_ptr; /** * Read policy to use for Redis cluster. */ -enum class ReadPolicy { Master, PreferMaster, Replica, PreferReplica, Any }; +enum class ReadPolicy { Primary, PreferPrimary, Replica, PreferReplica, Any }; /** * Configuration for a redis connection pool. @@ -186,6 +186,8 @@ class Config { virtual ReadPolicy readPolicy() const PURE; }; +using ConfigSharedPtr = std::shared_ptr; + /** * A factory for individual redis client connections. */ @@ -206,7 +208,8 @@ class ClientFactory { virtual ClientPtr create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, const Config& config, const RedisCommandStatsSharedPtr& redis_command_stats, - Stats::Scope& scope, const std::string& auth_password) PURE; + Stats::Scope& scope, const std::string& auth_username, + const std::string& auth_password) PURE; }; } // namespace Client diff --git a/source/extensions/filters/network/common/redis/client_impl.cc b/source/extensions/filters/network/common/redis/client_impl.cc index 9643b725a0111..6cdc7b8ad0077 100644 --- a/source/extensions/filters/network/common/redis/client_impl.cc +++ b/source/extensions/filters/network/common/redis/client_impl.cc @@ -31,11 +31,11 @@ ConfigImpl::ConfigImpl( enable_command_stats_(config.enable_command_stats()) { switch (config.read_policy()) { case envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings::MASTER: - read_policy_ = ReadPolicy::Master; + read_policy_ = ReadPolicy::Primary; break; case envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings:: PREFER_MASTER: - read_policy_ = ReadPolicy::PreferMaster; + read_policy_ = ReadPolicy::PreferPrimary; break; case envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings::REPLICA: read_policy_ = ReadPolicy::Replica; @@ -240,7 +240,11 @@ void ClientImpl::onRespValue(RespValuePtr&& value) { } } if (!redirected) { - callbacks.onResponse(std::move(value)); + if (err[0] == RedirectionResponse::get().CLUSTER_DOWN) { + callbacks.onFailure(); + } else { + callbacks.onResponse(std::move(value)); + } } } else { callbacks.onResponse(std::move(value)); @@ -285,16 +289,20 @@ void ClientImpl::PendingRequest::cancel() { canceled_ = true; } -void ClientImpl::initialize(const std::string& auth_password) { - if (!auth_password.empty()) { +void ClientImpl::initialize(const std::string& auth_username, const std::string& auth_password) { + if (!auth_username.empty()) { + // Send an AUTH command to the upstream server with username and password. + Utility::AuthRequest auth_request(auth_username, auth_password); + makeRequest(auth_request, null_pool_callbacks); + } else if (!auth_password.empty()) { // Send an AUTH command to the upstream server. Utility::AuthRequest auth_request(auth_password); makeRequest(auth_request, null_pool_callbacks); } // Any connection to replica requires the READONLY command in order to perform read. - // Also the READONLY command is a no-opt for the master. + // Also the READONLY command is a no-opt for the primary. // We only need to send the READONLY command iff it's possible that the host is a replica. - if (config_.readPolicy() != Common::Redis::Client::ReadPolicy::Master) { + if (config_.readPolicy() != Common::Redis::Client::ReadPolicy::Primary) { makeRequest(Utility::ReadOnlyRequest::instance(), null_pool_callbacks); } } @@ -304,10 +312,11 @@ ClientFactoryImpl ClientFactoryImpl::instance_; ClientPtr ClientFactoryImpl::create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, const Config& config, const RedisCommandStatsSharedPtr& redis_command_stats, - Stats::Scope& scope, const std::string& auth_password) { + Stats::Scope& scope, const std::string& auth_username, + const std::string& auth_password) { ClientPtr client = ClientImpl::create(host, dispatcher, EncoderPtr{new EncoderImpl()}, decoder_factory_, config, redis_command_stats, scope); - client->initialize(auth_password); + client->initialize(auth_username, auth_password); return client; } diff --git a/source/extensions/filters/network/common/redis/client_impl.h b/source/extensions/filters/network/common/redis/client_impl.h index 5d7bcb182ea87..ca0eb5f21dbe6 100644 --- a/source/extensions/filters/network/common/redis/client_impl.h +++ b/source/extensions/filters/network/common/redis/client_impl.h @@ -31,6 +31,7 @@ namespace Client { struct RedirectionValues { const std::string ASK = "ASK"; const std::string MOVED = "MOVED"; + const std::string CLUSTER_DOWN = "CLUSTERDOWN"; }; using RedirectionResponse = ConstSingleton; @@ -87,7 +88,7 @@ class ClientImpl : public Client, public DecoderCallbacks, public Network::Conne PoolRequest* makeRequest(const RespValue& request, ClientCallbacks& callbacks) override; bool active() override { return !pending_requests_.empty(); } void flushBufferAndResetTimer(); - void initialize(const std::string& auth_password) override; + void initialize(const std::string& auth_username, const std::string& auth_password) override; private: friend class RedisClientImplTest; @@ -151,7 +152,8 @@ class ClientFactoryImpl : public ClientFactory { // RedisProxy::ConnPool::ClientFactoryImpl ClientPtr create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, const Config& config, const RedisCommandStatsSharedPtr& redis_command_stats, - Stats::Scope& scope, const std::string& auth_password) override; + Stats::Scope& scope, const std::string& auth_username, + const std::string& auth_password) override; static ClientFactoryImpl instance_; diff --git a/source/extensions/filters/network/common/redis/redis_command_stats.cc b/source/extensions/filters/network/common/redis/redis_command_stats.cc index 02307dc9c1c24..5a6509cf3ae6c 100644 --- a/source/extensions/filters/network/common/redis/redis_command_stats.cc +++ b/source/extensions/filters/network/common/redis/redis_command_stats.cc @@ -1,6 +1,7 @@ #include "extensions/filters/network/common/redis/redis_command_stats.h" #include "common/stats/timespan_impl.h" +#include "common/stats/utility.h" #include "extensions/filters/network/common/redis/supported_commands.h" @@ -32,33 +33,20 @@ RedisCommandStats::RedisCommandStats(Stats::SymbolTable& symbol_table, const std Extensions::NetworkFilters::Common::Redis::SupportedCommands::mset()); } -Stats::Counter& RedisCommandStats::counter(Stats::Scope& scope, - const Stats::StatNameVec& stat_names) { - const Stats::SymbolTable::StoragePtr storage_ptr = symbol_table_.join(stat_names); - Stats::StatName full_stat_name = Stats::StatName(storage_ptr.get()); - return scope.counterFromStatName(full_stat_name); -} - -Stats::Histogram& RedisCommandStats::histogram(Stats::Scope& scope, - const Stats::StatNameVec& stat_names, - Stats::Histogram::Unit unit) { - const Stats::SymbolTable::StoragePtr storage_ptr = symbol_table_.join(stat_names); - Stats::StatName full_stat_name = Stats::StatName(storage_ptr.get()); - return scope.histogramFromStatName(full_stat_name, unit); -} - Stats::TimespanPtr RedisCommandStats::createCommandTimer(Stats::Scope& scope, Stats::StatName command, Envoy::TimeSource& time_source) { return std::make_unique( - histogram(scope, {prefix_, command, latency_}, Stats::Histogram::Unit::Microseconds), + Stats::Utility::histogramFromStatNames(scope, {prefix_, command, latency_}, + Stats::Histogram::Unit::Microseconds), time_source); } Stats::TimespanPtr RedisCommandStats::createAggregateTimer(Stats::Scope& scope, Envoy::TimeSource& time_source) { return std::make_unique( - histogram(scope, {prefix_, upstream_rq_time_}, Stats::Histogram::Unit::Microseconds), + Stats::Utility::histogramFromStatNames(scope, {prefix_, upstream_rq_time_}, + Stats::Histogram::Unit::Microseconds), time_source); } @@ -84,16 +72,13 @@ Stats::StatName RedisCommandStats::getCommandFromRequest(const RespValue& reques } void RedisCommandStats::updateStatsTotal(Stats::Scope& scope, Stats::StatName command) { - counter(scope, {prefix_, command, total_}).inc(); + Stats::Utility::counterFromStatNames(scope, {prefix_, command, total_}).inc(); } void RedisCommandStats::updateStats(Stats::Scope& scope, Stats::StatName command, const bool success) { - if (success) { - counter(scope, {prefix_, command, success_}).inc(); - } else { - counter(scope, {prefix_, command, failure_}).inc(); - } + Stats::StatName status = success ? success_ : failure_; + Stats::Utility::counterFromStatNames(scope, {prefix_, command, status}).inc(); } } // namespace Redis diff --git a/source/extensions/filters/network/common/redis/redis_command_stats.h b/source/extensions/filters/network/common/redis/redis_command_stats.h index a2870ea4003e3..5dddb9f8303c9 100644 --- a/source/extensions/filters/network/common/redis/redis_command_stats.h +++ b/source/extensions/filters/network/common/redis/redis_command_stats.h @@ -28,9 +28,6 @@ class RedisCommandStats { return std::make_shared(symbol_table, "upstream_commands"); } - Stats::Counter& counter(Stats::Scope& scope, const Stats::StatNameVec& stat_names); - Stats::Histogram& histogram(Stats::Scope& scope, const Stats::StatNameVec& stat_names, - Stats::Histogram::Unit unit); Stats::TimespanPtr createCommandTimer(Stats::Scope& scope, Stats::StatName command, Envoy::TimeSource& time_source); Stats::TimespanPtr createAggregateTimer(Stats::Scope& scope, Envoy::TimeSource& time_source); diff --git a/source/extensions/filters/network/common/redis/utility.cc b/source/extensions/filters/network/common/redis/utility.cc index c652addb3e12d..773196dd70e21 100644 --- a/source/extensions/filters/network/common/redis/utility.cc +++ b/source/extensions/filters/network/common/redis/utility.cc @@ -19,6 +19,18 @@ AuthRequest::AuthRequest(const std::string& password) { asArray().swap(values); } +AuthRequest::AuthRequest(const std::string& username, const std::string& password) { + std::vector values(3); + values[0].type(RespType::BulkString); + values[0].asString() = "auth"; + values[1].type(RespType::BulkString); + values[1].asString() = username; + values[2].type(RespType::BulkString); + values[2].asString() = password; + type(RespType::Array); + asArray().swap(values); +} + RespValuePtr makeError(const std::string& error) { Common::Redis::RespValuePtr response(new RespValue()); response->type(Common::Redis::RespType::Error); diff --git a/source/extensions/filters/network/common/redis/utility.h b/source/extensions/filters/network/common/redis/utility.h index b2e77b8e94ab4..ca5774d2d3a6c 100644 --- a/source/extensions/filters/network/common/redis/utility.h +++ b/source/extensions/filters/network/common/redis/utility.h @@ -13,6 +13,7 @@ namespace Utility { class AuthRequest : public Redis::RespValue { public: + AuthRequest(const std::string& username, const std::string& password); AuthRequest(const std::string& password); }; diff --git a/source/extensions/filters/network/common/utility.h b/source/extensions/filters/network/common/utility.h index 8c499cf1eb498..54a458aa7b62e 100644 --- a/source/extensions/filters/network/common/utility.h +++ b/source/extensions/filters/network/common/utility.h @@ -1,7 +1,5 @@ #pragma once -#include - #include "common/common/macros.h" #include "extensions/common/utility.h" diff --git a/source/extensions/filters/network/direct_response/BUILD b/source/extensions/filters/network/direct_response/BUILD index e5037679e9003..a7ed6d274a1fa 100644 --- a/source/extensions/filters/network/direct_response/BUILD +++ b/source/extensions/filters/network/direct_response/BUILD @@ -1,16 +1,16 @@ -licenses(["notice"]) # Apache 2 - -# Direct response L4 network filter. -# Public docs: docs/root/configuration/network_filters/direct_response_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# Direct response L4 network filter. +# Public docs: docs/root/configuration/network_filters/direct_response_filter.rst + +envoy_extension_package() envoy_cc_library( name = "filter", diff --git a/source/extensions/filters/network/dubbo_proxy/BUILD b/source/extensions/filters/network/dubbo_proxy/BUILD index cb61a5bfc9893..bf83e91ad0fd1 100644 --- a/source/extensions/filters/network/dubbo_proxy/BUILD +++ b/source/extensions/filters/network/dubbo_proxy/BUILD @@ -1,13 +1,13 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "buffer_helper_lib", diff --git a/source/extensions/filters/network/dubbo_proxy/active_message.cc b/source/extensions/filters/network/dubbo_proxy/active_message.cc index 8af50551f6165..d4af36ae17b13 100644 --- a/source/extensions/filters/network/dubbo_proxy/active_message.cc +++ b/source/extensions/filters/network/dubbo_proxy/active_message.cc @@ -31,8 +31,8 @@ DubboFilters::UpstreamResponseStatus ActiveResponseDecoder::onData(Buffer::Insta void ActiveResponseDecoder::onStreamDecoded(MessageMetadataSharedPtr metadata, ContextSharedPtr ctx) { - ASSERT(metadata->message_type() == MessageType::Response || - metadata->message_type() == MessageType::Exception); + ASSERT(metadata->messageType() == MessageType::Response || + metadata->messageType() == MessageType::Exception); ASSERT(metadata->hasResponseStatus()); metadata_ = metadata; @@ -45,24 +45,23 @@ void ActiveResponseDecoder::onStreamDecoded(MessageMetadataSharedPtr metadata, throw DownstreamConnectionCloseException("Downstream has closed or closing"); } - response_connection_.write(ctx->message_origin_data(), false); + response_connection_.write(ctx->messageOriginData(), false); ENVOY_LOG(debug, "dubbo response: the upstream response message has been forwarded to the downstream"); stats_.response_.inc(); stats_.response_decoding_success_.inc(); - if (metadata->message_type() == MessageType::Exception) { + if (metadata->messageType() == MessageType::Exception) { stats_.response_business_exception_.inc(); } - switch (metadata->response_status()) { + switch (metadata->responseStatus()) { case ResponseStatus::Ok: stats_.response_success_.inc(); break; default: stats_.response_error_.inc(); - ENVOY_LOG(error, "dubbo response status: {}", - static_cast(metadata->response_status())); + ENVOY_LOG(error, "dubbo response status: {}", static_cast(metadata->responseStatus())); break; } @@ -70,7 +69,7 @@ void ActiveResponseDecoder::onStreamDecoded(MessageMetadataSharedPtr metadata, response_status_ = DubboFilters::UpstreamResponseStatus::Complete; ENVOY_LOG(debug, "dubbo response: complete processing of upstream response messages, id is {}", - metadata->request_id()); + metadata->requestId()); } FilterStatus ActiveResponseDecoder::applyMessageEncodedFilters(MessageMetadataSharedPtr metadata, @@ -129,7 +128,7 @@ ActiveMessageDecoderFilter::ActiveMessageDecoderFilter(ActiveMessage& parent, void ActiveMessageDecoderFilter::continueDecoding() { ASSERT(parent_.context()); auto state = ActiveMessage::FilterIterationStartState::AlwaysStartFromNext; - if (0 != parent_.context()->message_origin_data().length()) { + if (0 != parent_.context()->messageOriginData().length()) { state = ActiveMessage::FilterIterationStartState::CanStartFromCurrent; ENVOY_LOG(warn, "The original message data is not consumed, triggering the decoder filter from " "the current location"); @@ -138,7 +137,7 @@ void ActiveMessageDecoderFilter::continueDecoding() { if (status == FilterStatus::Continue) { ENVOY_LOG(debug, "dubbo response: start upstream"); // All filters have been executed for the current decoder state. - if (parent_.pending_stream_decoded()) { + if (parent_.pendingStreamDecoded()) { // If the filter stack was paused during messageEnd, handle end-of-request details. parent_.finalizeRequest(); } @@ -171,7 +170,7 @@ ActiveMessageEncoderFilter::ActiveMessageEncoderFilter(ActiveMessage& parent, void ActiveMessageEncoderFilter::continueEncoding() { ASSERT(parent_.context()); auto state = ActiveMessage::FilterIterationStartState::AlwaysStartFromNext; - if (0 != parent_.context()->message_origin_data().length()) { + if (0 != parent_.context()->messageOriginData().length()) { state = ActiveMessage::FilterIterationStartState::CanStartFromCurrent; ENVOY_LOG(warn, "The original message data is not consumed, triggering the encoder filter from " "the current location"); @@ -185,9 +184,9 @@ void ActiveMessageEncoderFilter::continueEncoding() { // class ActiveMessage ActiveMessage::ActiveMessage(ConnectionManager& parent) : parent_(parent), request_timer_(std::make_unique( - parent_.stats().request_time_ms_, parent.time_system())), - request_id_(-1), stream_id_(parent.random_generator().random()), - stream_info_(parent.time_system()), pending_stream_decoded_(false), + parent_.stats().request_time_ms_, parent.timeSystem())), + request_id_(-1), stream_id_(parent.randomGenerator().random()), + stream_info_(parent.timeSystem()), pending_stream_decoded_(false), local_response_sent_(false) { parent_.stats().request_active_.inc(); stream_info_.setDownstreamLocalAddress(parent_.connection().localAddress()); @@ -256,8 +255,7 @@ void ActiveMessage::onStreamDecoded(MessageMetadataSharedPtr metadata, ContextSh auto status = applyDecoderFilters(nullptr, FilterIterationStartState::CanStartFromCurrent); if (status == FilterStatus::StopIteration) { - ENVOY_LOG(debug, "dubbo request: stop calling decoder filter, id is {}", - metadata->request_id()); + ENVOY_LOG(debug, "dubbo request: stop calling decoder filter, id is {}", metadata->requestId()); pending_stream_decoded_ = true; return; } @@ -265,14 +263,14 @@ void ActiveMessage::onStreamDecoded(MessageMetadataSharedPtr metadata, ContextSh finalizeRequest(); ENVOY_LOG(debug, "dubbo request: complete processing of downstream request messages, id is {}", - metadata->request_id()); + metadata->requestId()); } void ActiveMessage::finalizeRequest() { pending_stream_decoded_ = false; parent_.stats().request_.inc(); bool is_one_way = false; - switch (metadata_->message_type()) { + switch (metadata_->messageType()) { case MessageType::Request: parent_.stats().request_twoway_.inc(); break; @@ -415,7 +413,7 @@ void ActiveMessage::resetDownstreamConnection() { void ActiveMessage::resetStream() { parent_.deferredMessage(*this); } uint64_t ActiveMessage::requestId() const { - return metadata_ != nullptr ? metadata_->request_id() : 0; + return metadata_ != nullptr ? metadata_->requestId() : 0; } uint64_t ActiveMessage::streamId() const { return stream_id_; } @@ -452,14 +450,14 @@ void ActiveMessage::addDecoderFilterWorker(DubboFilters::DecoderFilterSharedPtr ActiveMessageDecoderFilterPtr wrapper = std::make_unique(*this, filter, dual_filter); filter->setDecoderFilterCallbacks(*wrapper); - wrapper->moveIntoListBack(std::move(wrapper), decoder_filters_); + LinkedList::moveIntoListBack(std::move(wrapper), decoder_filters_); } void ActiveMessage::addEncoderFilterWorker(DubboFilters::EncoderFilterSharedPtr filter, bool dual_filter) { ActiveMessageEncoderFilterPtr wrapper = std::make_unique(*this, filter, dual_filter); filter->setEncoderFilterCallbacks(*wrapper); - wrapper->moveIntoListBack(std::move(wrapper), encoder_filters_); + LinkedList::moveIntoListBack(std::move(wrapper), encoder_filters_); } void ActiveMessage::onReset() { parent_.deferredMessage(*this); } diff --git a/source/extensions/filters/network/dubbo_proxy/active_message.h b/source/extensions/filters/network/dubbo_proxy/active_message.h index a0209fd4271a1..2870e8d501d79 100644 --- a/source/extensions/filters/network/dubbo_proxy/active_message.h +++ b/source/extensions/filters/network/dubbo_proxy/active_message.h @@ -44,7 +44,7 @@ class ActiveResponseDecoder : public ResponseDecoderCallbacks, StreamHandler& newStream() override { return *this; } void onHeartbeat(MessageMetadataSharedPtr) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - uint64_t requestId() const { return metadata_ ? metadata_->request_id() : 0; } + uint64_t requestId() const { return metadata_ ? metadata_->requestId() : 0; } private: FilterStatus applyMessageEncodedFilters(MessageMetadataSharedPtr metadata, ContextSharedPtr ctx); @@ -185,7 +185,7 @@ class ActiveMessage : public LinkedObject, void onError(const std::string& what); MessageMetadataSharedPtr metadata() const { return metadata_; } ContextSharedPtr context() const { return context_; } - bool pending_stream_decoded() const { return pending_stream_decoded_; } + bool pendingStreamDecoded() const { return pending_stream_decoded_; } private: void addDecoderFilterWorker(DubboFilters::DecoderFilterSharedPtr filter, bool dual_filter); diff --git a/source/extensions/filters/network/dubbo_proxy/conn_manager.cc b/source/extensions/filters/network/dubbo_proxy/conn_manager.cc index 4546ed7463487..a21f6350bcaef 100644 --- a/source/extensions/filters/network/dubbo_proxy/conn_manager.cc +++ b/source/extensions/filters/network/dubbo_proxy/conn_manager.cc @@ -18,7 +18,7 @@ namespace DubboProxy { constexpr uint32_t BufferLimit = UINT32_MAX; -ConnectionManager::ConnectionManager(Config& config, Runtime::RandomGenerator& random_generator, +ConnectionManager::ConnectionManager(Config& config, Random::RandomGenerator& random_generator, TimeSource& time_system) : config_(config), time_system_(time_system), stats_(config_.stats()), random_generator_(random_generator), protocol_(config.createProtocol()), @@ -38,7 +38,7 @@ Network::FilterStatus ConnectionManager::onData(Buffer::Instance& data, bool end if (stopped_) { ASSERT(!active_message_list_.empty()); auto metadata = (*active_message_list_.begin())->metadata(); - if (metadata && metadata->message_type() == MessageType::Oneway) { + if (metadata && metadata->messageType() == MessageType::Oneway) { ENVOY_CONN_LOG(trace, "waiting for one-way completion", read_callbacks_->connection()); half_closed_ = true; return Network::FilterStatus::StopIteration; @@ -83,7 +83,7 @@ StreamHandler& ConnectionManager::newStream() { ActiveMessagePtr new_message(std::make_unique(*this)); new_message->createFilterChain(); - new_message->moveIntoList(std::move(new_message), active_message_list_); + LinkedList::moveIntoList(std::move(new_message), active_message_list_); return **active_message_list_.begin(); } diff --git a/source/extensions/filters/network/dubbo_proxy/conn_manager.h b/source/extensions/filters/network/dubbo_proxy/conn_manager.h index 246df5aebfc76..f09ebfa1ac54e 100644 --- a/source/extensions/filters/network/dubbo_proxy/conn_manager.h +++ b/source/extensions/filters/network/dubbo_proxy/conn_manager.h @@ -47,7 +47,7 @@ class ConnectionManager : public Network::ReadFilter, using ConfigSerializationType = envoy::extensions::filters::network::dubbo_proxy::v3::SerializationType; - ConnectionManager(Config& config, Runtime::RandomGenerator& random_generator, + ConnectionManager(Config& config, Random::RandomGenerator& random_generator, TimeSource& time_system); ~ConnectionManager() override = default; @@ -67,8 +67,8 @@ class ConnectionManager : public Network::ReadFilter, DubboFilterStats& stats() const { return stats_; } Network::Connection& connection() const { return read_callbacks_->connection(); } - TimeSource& time_system() const { return time_system_; } - Runtime::RandomGenerator& random_generator() const { return random_generator_; } + TimeSource& timeSystem() const { return time_system_; } + Random::RandomGenerator& randomGenerator() const { return random_generator_; } Config& config() const { return config_; } SerializationType downstreamSerializationType() const { return protocol_->serializer()->type(); } ProtocolType downstreamProtocolType() const { return protocol_->type(); } @@ -94,7 +94,7 @@ class ConnectionManager : public Network::ReadFilter, Config& config_; TimeSource& time_system_; DubboFilterStats& stats_; - Runtime::RandomGenerator& random_generator_; + Random::RandomGenerator& random_generator_; SerializerPtr serializer_; ProtocolPtr protocol_; diff --git a/source/extensions/filters/network/dubbo_proxy/decoder.cc b/source/extensions/filters/network/dubbo_proxy/decoder.cc index 3715acf865d5e..0f838a9a06f9f 100644 --- a/source/extensions/filters/network/dubbo_proxy/decoder.cc +++ b/source/extensions/filters/network/dubbo_proxy/decoder.cc @@ -19,22 +19,22 @@ DecoderStateMachine::onDecodeStreamHeader(Buffer::Instance& buffer) { } auto context = ret.first; - if (metadata->message_type() == MessageType::HeartbeatRequest || - metadata->message_type() == MessageType::HeartbeatResponse) { - if (buffer.length() < (context->header_size() + context->body_size())) { + if (metadata->messageType() == MessageType::HeartbeatRequest || + metadata->messageType() == MessageType::HeartbeatResponse) { + if (buffer.length() < (context->headerSize() + context->bodySize())) { ENVOY_LOG(debug, "dubbo decoder: need more data for {} protocol heartbeat", protocol_.name()); return {ProtocolState::WaitForData}; } ENVOY_LOG(debug, "dubbo decoder: this is the {} heartbeat message", protocol_.name()); - buffer.drain(context->header_size() + context->body_size()); + buffer.drain(context->headerSize() + context->bodySize()); delegate_.onHeartbeat(metadata); return {ProtocolState::Done}; } active_stream_ = delegate_.newStream(metadata, context); ASSERT(active_stream_); - context->message_origin_data().move(buffer, context->header_size()); + context->messageOriginData().move(buffer, context->headerSize()); return {ProtocolState::OnDecodeStreamData}; } @@ -49,8 +49,7 @@ DecoderStateMachine::onDecodeStreamData(Buffer::Instance& buffer) { return {ProtocolState::WaitForData}; } - active_stream_->context_->message_origin_data().move(buffer, - active_stream_->context_->body_size()); + active_stream_->context_->messageOriginData().move(buffer, active_stream_->context_->bodySize()); active_stream_->onStreamDecoded(); active_stream_ = nullptr; diff --git a/source/extensions/filters/network/dubbo_proxy/decoder.h b/source/extensions/filters/network/dubbo_proxy/decoder.h index 2723633c79a69..d180ba13e4e0c 100644 --- a/source/extensions/filters/network/dubbo_proxy/decoder.h +++ b/source/extensions/filters/network/dubbo_proxy/decoder.h @@ -91,11 +91,6 @@ class DecoderStateMachine : public Logger::Loggable { */ ProtocolState currentState() const { return state_; } - /** - * Set the current state. Used for testing only. - */ - void setCurrentState(ProtocolState state) { state_ = state; } - private: struct DecoderStatus { DecoderStatus() = default; diff --git a/source/extensions/filters/network/dubbo_proxy/deserializer.h b/source/extensions/filters/network/dubbo_proxy/deserializer.h deleted file mode 100644 index 95f2f8e5bc44d..0000000000000 --- a/source/extensions/filters/network/dubbo_proxy/deserializer.h +++ /dev/null @@ -1,177 +0,0 @@ -#pragma once - -#include -#include - -#include "envoy/buffer/buffer.h" - -#include "common/common/assert.h" -#include "common/config/utility.h" -#include "common/singleton/const_singleton.h" - -#include "extensions/filters/network/dubbo_proxy/message.h" -#include "extensions/filters/network/dubbo_proxy/metadata.h" - -namespace Envoy { -namespace Extensions { -namespace NetworkFilters { -namespace DubboProxy { - -/** - * Names of available deserializer implementations. - */ -class DeserializerNameValues { -public: - struct SerializationTypeHash { - template std::size_t operator()(T t) const { return static_cast(t); } - }; - - using DeserializerTypeNameMap = - std::unordered_map; - - const DeserializerTypeNameMap deserializerTypeNameMap = { - {SerializationType::Hessian, "hessian"}, - }; - - const std::string& fromType(SerializationType type) const { - const auto& itor = deserializerTypeNameMap.find(type); - if (itor != deserializerTypeNameMap.end()) { - return itor->second; - } - - NOT_REACHED_GCOVR_EXCL_LINE; - } -}; - -using DeserializerNames = ConstSingleton; - -/** - * RpcInvocation represent an rpc call - * See - * https://github.com/apache/incubator-dubbo/blob/master/dubbo-rpc/dubbo-rpc-api/src/main/java/org/apache/dubbo/rpc/RpcInvocation.java - */ -class RpcInvocation { -public: - virtual ~RpcInvocation() = default; - virtual const std::string& getMethodName() const PURE; - virtual const std::string& getServiceName() const PURE; - virtual const std::string& getServiceVersion() const PURE; -}; - -using RpcInvocationPtr = std::unique_ptr; - -/** - * RpcResult represent the result of an rpc call - * See - * https://github.com/apache/incubator-dubbo/blob/master/dubbo-rpc/dubbo-rpc-api/src/main/java/org/apache/dubbo/rpc/RpcResult.java - */ -class RpcResult { -public: - virtual ~RpcResult() = default; - virtual bool hasException() const PURE; -}; - -using RpcResultPtr = std::unique_ptr; - -class Deserializer { -public: - virtual ~Deserializer() = default; - /** - * Return this Deserializer's name - * - * @return std::string containing the serialization name. - */ - virtual const std::string& name() const PURE; - - /** - * @return SerializationType the deserializer type - */ - virtual SerializationType type() const PURE; - - /** - * deserialize an rpc call - * If successful, the RpcInvocation removed from the buffer - * - * @param buffer the currently buffered dubbo data - * @body_size the complete RpcInvocation size - * @throws EnvoyException if the data is not valid for this serialization - */ - virtual void deserializeRpcInvocation(Buffer::Instance& buffer, size_t body_size, - MessageMetadataSharedPtr metadata) PURE; - /** - * deserialize result of an rpc call - * If successful, the RpcResult removed from the buffer - * - * @param buffer the currently buffered dubbo data - * @body_size the complete RpcResult size - * @throws EnvoyException if the data is not valid for this serialization - */ - virtual RpcResultPtr deserializeRpcResult(Buffer::Instance& buffer, size_t body_size) PURE; - - /** - * serialize result of an rpc call - * If successful, the output_buffer is written to the serialized data - * - * @param output_buffer store the serialized data - * @param content the rpc response content - * @param type the rpc response type - * @return size_t the length of the serialized content - */ - virtual size_t serializeRpcResult(Buffer::Instance& output_buffer, const std::string& content, - RpcResponseType type) PURE; -}; - -using DeserializerPtr = std::unique_ptr; - -/** - * Implemented by each Dubbo deserialize and registered via Registry::registerFactory or the - * convenience class RegisterFactory. - */ -class NamedDeserializerConfigFactory { -public: - virtual ~NamedDeserializerConfigFactory() = default; - - /** - * Create a particular Dubbo deserializer. - * @return DeserializerPtr the transport - */ - virtual DeserializerPtr createDeserializer() PURE; - - /** - * @return std::string the identifying name for a particular implementation of Dubbo deserializer - * produced by the factory. - */ - virtual std::string name() PURE; - - /** - * Convenience method to lookup a factory by type. - * @param TransportType the transport type - * @return NamedDeserializerConfigFactory& for the TransportType - */ - static NamedDeserializerConfigFactory& getFactory(SerializationType type) { - const std::string& name = DeserializerNames::get().fromType(type); - return Envoy::Config::Utility::getAndCheckFactory(name); - } -}; - -/** - * DeserializerFactoryBase provides a template for a trivial NamedDeserializerConfigFactory. - */ -template -class DeserializerFactoryBase : public NamedDeserializerConfigFactory { - DeserializerPtr createDeserializer() override { return std::make_unique(); } - - std::string name() override { return name_; } - -protected: - DeserializerFactoryBase(SerializationType type) - : name_(DeserializerNames::get().fromType(type)) {} - -private: - const std::string name_; -}; - -} // namespace DubboProxy -} // namespace NetworkFilters -} // namespace Extensions -} // namespace Envoy diff --git a/source/extensions/filters/network/dubbo_proxy/dubbo_hessian2_serializer_impl.cc b/source/extensions/filters/network/dubbo_proxy/dubbo_hessian2_serializer_impl.cc index 19e40d284808f..473a0ce5c6f5b 100644 --- a/source/extensions/filters/network/dubbo_proxy/dubbo_hessian2_serializer_impl.cc +++ b/source/extensions/filters/network/dubbo_proxy/dubbo_hessian2_serializer_impl.cc @@ -29,9 +29,9 @@ DubboHessian2SerializerImpl::deserializeRpcInvocation(Buffer::Instance& buffer, std::string method_name = HessianUtils::peekString(buffer, &size, total_size); total_size += size; - if (static_cast(context->body_size()) < total_size) { + if (static_cast(context->bodySize()) < total_size) { throw EnvoyException(fmt::format("RpcInvocation size({}) large than body size({})", total_size, - context->body_size())); + context->bodySize())); } auto invo = std::make_shared(); @@ -45,7 +45,7 @@ DubboHessian2SerializerImpl::deserializeRpcInvocation(Buffer::Instance& buffer, std::pair DubboHessian2SerializerImpl::deserializeRpcResult(Buffer::Instance& buffer, ContextSharedPtr context) { - ASSERT(buffer.length() >= context->body_size()); + ASSERT(buffer.length() >= context->bodySize()); size_t total_size = 0; bool has_value = true; @@ -69,15 +69,15 @@ DubboHessian2SerializerImpl::deserializeRpcResult(Buffer::Instance& buffer, throw EnvoyException(fmt::format("not supported return type {}", static_cast(type))); } - if (context->body_size() < total_size) { + if (context->bodySize() < total_size) { throw EnvoyException(fmt::format("RpcResult size({}) large than body size({})", total_size, - context->body_size())); + context->bodySize())); } - if (!has_value && context->body_size() != total_size) { + if (!has_value && context->bodySize() != total_size) { throw EnvoyException( fmt::format("RpcResult is no value, but the rest of the body size({}) not equal 0", - (context->body_size() - total_size))); + (context->bodySize() - total_size))); } return std::pair(result, true); diff --git a/source/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.cc b/source/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.cc index b91c4e972ee28..3a05491134d98 100644 --- a/source/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.cc +++ b/source/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.cc @@ -66,7 +66,7 @@ void parseRequestInfoFromBuffer(Buffer::Instance& data, MessageMetadataSharedPtr static_cast::type>(type))); } - if (!is_two_way && metadata->message_type() != MessageType::HeartbeatRequest) { + if (!is_two_way && metadata->messageType() != MessageType::HeartbeatRequest) { metadata->setMessageType(MessageType::Oneway); } @@ -129,9 +129,9 @@ DubboProtocolImpl::decodeHeader(Buffer::Instance& buffer, MessageMetadataSharedP } auto context = std::make_shared(); - context->set_header_size(DubboProtocolImpl::MessageSize); - context->set_body_size(body_size); - context->set_heartbeat(is_event); + context->setHeaderSize(DubboProtocolImpl::MessageSize); + context->setBodySize(body_size); + context->setHeartbeat(is_event); return std::pair(context, true); } @@ -140,11 +140,11 @@ bool DubboProtocolImpl::decodeData(Buffer::Instance& buffer, ContextSharedPtr co MessageMetadataSharedPtr metadata) { ASSERT(serializer_); - if ((buffer.length()) < static_cast(context->body_size())) { + if ((buffer.length()) < static_cast(context->bodySize())) { return false; } - switch (metadata->message_type()) { + switch (metadata->messageType()) { case MessageType::Oneway: case MessageType::Request: { auto ret = serializer_->deserializeRpcInvocation(buffer, context); @@ -175,16 +175,16 @@ bool DubboProtocolImpl::encode(Buffer::Instance& buffer, const MessageMetadata& const std::string& content, RpcResponseType type) { ASSERT(serializer_); - switch (metadata.message_type()) { + switch (metadata.messageType()) { case MessageType::HeartbeatResponse: { ASSERT(metadata.hasResponseStatus()); ASSERT(content.empty()); buffer.writeBEInt(MagicNumber); - uint8_t flag = static_cast(metadata.serialization_type()); + uint8_t flag = static_cast(metadata.serializationType()); flag = flag ^ EventMask; buffer.writeByte(flag); - buffer.writeByte(static_cast(metadata.response_status())); - buffer.writeBEInt(metadata.request_id()); + buffer.writeByte(static_cast(metadata.responseStatus())); + buffer.writeBEInt(metadata.requestId()); buffer.writeBEInt(0); return true; } @@ -195,9 +195,9 @@ bool DubboProtocolImpl::encode(Buffer::Instance& buffer, const MessageMetadata& size_t serialized_body_size = serializer_->serializeRpcResult(body_buffer, content, type); buffer.writeBEInt(MagicNumber); - buffer.writeByte(static_cast(metadata.serialization_type())); - buffer.writeByte(static_cast(metadata.response_status())); - buffer.writeBEInt(metadata.request_id()); + buffer.writeByte(static_cast(metadata.serializationType())); + buffer.writeByte(static_cast(metadata.responseStatus())); + buffer.writeBEInt(metadata.requestId()); buffer.writeBEInt(serialized_body_size); buffer.move(body_buffer, serialized_body_size); diff --git a/source/extensions/filters/network/dubbo_proxy/filters/BUILD b/source/extensions/filters/network/dubbo_proxy/filters/BUILD index 19f4fd317675a..d2c9fd1ff03c1 100644 --- a/source/extensions/filters/network/dubbo_proxy/filters/BUILD +++ b/source/extensions/filters/network/dubbo_proxy/filters/BUILD @@ -1,12 +1,12 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "filter_interface", diff --git a/source/extensions/filters/network/dubbo_proxy/heartbeat_response.cc b/source/extensions/filters/network/dubbo_proxy/heartbeat_response.cc index 3d9f7a648844f..69df071a35271 100644 --- a/source/extensions/filters/network/dubbo_proxy/heartbeat_response.cc +++ b/source/extensions/filters/network/dubbo_proxy/heartbeat_response.cc @@ -8,8 +8,8 @@ namespace DubboProxy { DubboFilters::DirectResponse::ResponseType HeartbeatResponse::encode(MessageMetadata& metadata, DubboProxy::Protocol& protocol, Buffer::Instance& buffer) const { - ASSERT(metadata.response_status() == ResponseStatus::Ok); - ASSERT(metadata.message_type() == MessageType::HeartbeatResponse); + ASSERT(metadata.responseStatus() == ResponseStatus::Ok); + ASSERT(metadata.messageType() == MessageType::HeartbeatResponse); if (!protocol.encode(buffer, metadata, "")) { throw EnvoyException("failed to encode heartbeat message"); diff --git a/source/extensions/filters/network/dubbo_proxy/message.h b/source/extensions/filters/network/dubbo_proxy/message.h index 8e74e25dcb41f..08a399fae8b75 100644 --- a/source/extensions/filters/network/dubbo_proxy/message.h +++ b/source/extensions/filters/network/dubbo_proxy/message.h @@ -7,6 +7,7 @@ #include "common/buffer/buffer_impl.h" +#include "absl/container/node_hash_map.h" #include "absl/types/optional.h" namespace Envoy { @@ -88,16 +89,16 @@ enum class RpcResponseType : uint8_t { class Context { public: - using AttachmentMap = std::unordered_map; + using AttachmentMap = absl::node_hash_map; bool hasAttachments() const { return !attachments_.empty(); } const AttachmentMap& attachments() const { return attachments_; } - Buffer::Instance& message_origin_data() { return message_origin_buffer_; } - size_t message_size() const { return header_size() + body_size(); } + Buffer::Instance& messageOriginData() { return message_origin_buffer_; } + size_t messageSize() const { return headerSize() + bodySize(); } - virtual size_t body_size() const PURE; - virtual size_t header_size() const PURE; + virtual size_t bodySize() const PURE; + virtual size_t headerSize() const PURE; protected: Context() = default; @@ -118,10 +119,10 @@ class RpcInvocation { public: virtual ~RpcInvocation() = default; - virtual const std::string& service_name() const PURE; - virtual const std::string& method_name() const PURE; - virtual const absl::optional& service_version() const PURE; - virtual const absl::optional& service_group() const PURE; + virtual const std::string& serviceName() const PURE; + virtual const std::string& methodName() const PURE; + virtual const absl::optional& serviceVersion() const PURE; + virtual const absl::optional& serviceGroup() const PURE; }; using RpcInvocationSharedPtr = std::shared_ptr; diff --git a/source/extensions/filters/network/dubbo_proxy/message_impl.h b/source/extensions/filters/network/dubbo_proxy/message_impl.h index 1fc20c5f7a11f..c535b522e90e7 100644 --- a/source/extensions/filters/network/dubbo_proxy/message_impl.h +++ b/source/extensions/filters/network/dubbo_proxy/message_impl.h @@ -13,11 +13,11 @@ class ContextBase : public Context { ~ContextBase() override = default; // Override from Context - size_t body_size() const override { return body_size_; } - size_t header_size() const override { return header_size_; } + size_t bodySize() const override { return body_size_; } + size_t headerSize() const override { return header_size_; } - void set_body_size(size_t size) { body_size_ = size; } - void set_header_size(size_t size) { header_size_ = size; } + void setBodySize(size_t size) { body_size_ = size; } + void setHeaderSize(size_t size) { header_size_ = size; } protected: size_t body_size_{0}; @@ -29,8 +29,8 @@ class ContextImpl : public ContextBase { ContextImpl() = default; ~ContextImpl() override = default; - bool is_heartbeat() const { return is_heartbeat_; } - void set_heartbeat(bool is_heartbeat) { is_heartbeat_ = is_heartbeat; } + bool isHeartbeat() const { return is_heartbeat_; } + void setHeartbeat(bool is_heartbeat) { is_heartbeat_ = is_heartbeat; } private: bool is_heartbeat_{false}; @@ -41,16 +41,16 @@ class RpcInvocationBase : public RpcInvocation { ~RpcInvocationBase() override = default; void setServiceName(const std::string& name) { service_name_ = name; } - const std::string& service_name() const override { return service_name_; } + const std::string& serviceName() const override { return service_name_; } void setMethodName(const std::string& name) { method_name_ = name; } - const std::string& method_name() const override { return method_name_; } + const std::string& methodName() const override { return method_name_; } void setServiceVersion(const std::string& version) { service_version_ = version; } - const absl::optional& service_version() const override { return service_version_; } + const absl::optional& serviceVersion() const override { return service_version_; } void setServiceGroup(const std::string& group) { group_ = group; } - const absl::optional& service_group() const override { return group_; } + const absl::optional& serviceGroup() const override { return group_; } protected: std::string service_name_; diff --git a/source/extensions/filters/network/dubbo_proxy/metadata.h b/source/extensions/filters/network/dubbo_proxy/metadata.h index 41a7f3976f4d5..698b50283ec6b 100644 --- a/source/extensions/filters/network/dubbo_proxy/metadata.h +++ b/source/extensions/filters/network/dubbo_proxy/metadata.h @@ -2,7 +2,6 @@ #include #include -#include #include "common/common/assert.h" #include "common/common/empty_string.h" @@ -23,31 +22,31 @@ class MessageMetadata { invocation_info_ = invocation_info; } bool hasInvocationInfo() const { return invocation_info_ != nullptr; } - const RpcInvocation& invocation_info() const { return *invocation_info_; } + const RpcInvocation& invocationInfo() const { return *invocation_info_; } void setProtocolType(ProtocolType type) { proto_type_ = type; } - ProtocolType protocol_type() const { return proto_type_; } + ProtocolType protocolType() const { return proto_type_; } void setProtocolVersion(uint8_t version) { protocol_version_ = version; } - uint8_t protocol_version() const { return protocol_version_; } + uint8_t protocolVersion() const { return protocol_version_; } void setMessageType(MessageType type) { message_type_ = type; } - MessageType message_type() const { return message_type_; } + MessageType messageType() const { return message_type_; } void setRequestId(int64_t id) { request_id_ = id; } - int64_t request_id() const { return request_id_; } + int64_t requestId() const { return request_id_; } void setTimeout(uint32_t timeout) { timeout_ = timeout; } absl::optional timeout() const { return timeout_; } void setTwoWayFlag(bool two_way) { is_two_way_ = two_way; } - bool is_two_way() const { return is_two_way_; } + bool isTwoWay() const { return is_two_way_; } template void setSerializationType(T type) { ASSERT((std::is_same::type>::value)); serialization_type_ = static_cast(type); } - template T serialization_type() const { + template T serializationType() const { ASSERT((std::is_same::type>::value)); return static_cast(serialization_type_); } @@ -56,7 +55,7 @@ class MessageMetadata { ASSERT((std::is_same::type>::value)); response_status_ = static_cast(status); } - template T response_status() const { + template T responseStatus() const { ASSERT((std::is_same::type>::value)); return static_cast(response_status_.value()); } diff --git a/source/extensions/filters/network/dubbo_proxy/protocol.h b/source/extensions/filters/network/dubbo_proxy/protocol.h index b496699d42c2f..09f16d4420da5 100644 --- a/source/extensions/filters/network/dubbo_proxy/protocol.h +++ b/source/extensions/filters/network/dubbo_proxy/protocol.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include "envoy/buffer/buffer.h" #include "envoy/config/typed_config.h" diff --git a/source/extensions/filters/network/dubbo_proxy/protocol_constants.h b/source/extensions/filters/network/dubbo_proxy/protocol_constants.h index 138905d22c1e7..e7b787831e37c 100644 --- a/source/extensions/filters/network/dubbo_proxy/protocol_constants.h +++ b/source/extensions/filters/network/dubbo_proxy/protocol_constants.h @@ -1,13 +1,13 @@ #pragma once -#include - #include "common/common/assert.h" #include "common/common/fmt.h" #include "common/singleton/const_singleton.h" #include "extensions/filters/network/dubbo_proxy/message.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { @@ -22,7 +22,7 @@ class ProtocolNameValues { template std::size_t operator()(T t) const { return static_cast(t); } }; - using ProtocolTypeNameMap = std::unordered_map; + using ProtocolTypeNameMap = absl::node_hash_map; const ProtocolTypeNameMap protocolTypeNameMap = { {ProtocolType::Dubbo, "dubbo"}, @@ -47,7 +47,7 @@ class SerializerNameValues { }; using SerializerTypeNameMap = - std::unordered_map; + absl::node_hash_map; const SerializerTypeNameMap serializerTypeNameMap = { {SerializationType::Hessian2, "hessian2"}, @@ -77,7 +77,7 @@ class ProtocolSerializerNameValues { #define GENERATE_PAIR(X, Y) generateKey(X, Y), generateValue(X, Y) - using ProtocolSerializerTypeNameMap = std::unordered_map; + using ProtocolSerializerTypeNameMap = absl::node_hash_map; const ProtocolSerializerTypeNameMap protocolSerializerTypeNameMap = { {GENERATE_PAIR(ProtocolType::Dubbo, SerializationType::Hessian2)}, diff --git a/source/extensions/filters/network/dubbo_proxy/router/BUILD b/source/extensions/filters/network/dubbo_proxy/router/BUILD index 13a1154347904..4227ca25fcf5a 100644 --- a/source/extensions/filters/network/dubbo_proxy/router/BUILD +++ b/source/extensions/filters/network/dubbo_proxy/router/BUILD @@ -1,12 +1,12 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "router_interface", diff --git a/source/extensions/filters/network/dubbo_proxy/router/route.h b/source/extensions/filters/network/dubbo_proxy/router/route.h index c9814aa18f2a0..247cdf480f161 100644 --- a/source/extensions/filters/network/dubbo_proxy/router/route.h +++ b/source/extensions/filters/network/dubbo_proxy/router/route.h @@ -37,7 +37,7 @@ class RouteMatcherNameValues { }; using RouteMatcherNameMap = - std::unordered_map; + absl::node_hash_map; const RouteMatcherNameMap routeMatcherNameMap = { {RouteMatcherType::Default, "default"}, diff --git a/source/extensions/filters/network/dubbo_proxy/router/route_matcher.cc b/source/extensions/filters/network/dubbo_proxy/router/route_matcher.cc index 62f483e6933bf..1ff5c79e3fbf8 100644 --- a/source/extensions/filters/network/dubbo_proxy/router/route_matcher.cc +++ b/source/extensions/filters/network/dubbo_proxy/router/route_matcher.cc @@ -82,7 +82,7 @@ bool ParameterRouteEntryImpl::matchParameter(absl::string_view request_data, RouteConstSharedPtr ParameterRouteEntryImpl::matches(const MessageMetadata& metadata, uint64_t random_value) const { ASSERT(metadata.hasInvocationInfo()); - const auto invocation = dynamic_cast(&metadata.invocation_info()); + const auto invocation = dynamic_cast(&metadata.invocationInfo()); ASSERT(invocation); if (!invocation->hasParameters()) { return nullptr; @@ -141,7 +141,7 @@ MethodRouteEntryImpl::~MethodRouteEntryImpl() = default; RouteConstSharedPtr MethodRouteEntryImpl::matches(const MessageMetadata& metadata, uint64_t random_value) const { ASSERT(metadata.hasInvocationInfo()); - const auto invocation = dynamic_cast(&metadata.invocation_info()); + const auto invocation = dynamic_cast(&metadata.invocationInfo()); ASSERT(invocation); if (invocation->hasHeaders() && !RouteEntryImplBase::headersMatch(invocation->headers())) { @@ -149,14 +149,14 @@ RouteConstSharedPtr MethodRouteEntryImpl::matches(const MessageMetadata& metadat return nullptr; } - if (invocation->method_name().empty()) { + if (invocation->methodName().empty()) { ENVOY_LOG(error, "dubbo route matcher: there is no method name in the metadata"); return nullptr; } - if (!method_name_.match(invocation->method_name())) { + if (!method_name_.match(invocation->methodName())) { ENVOY_LOG(debug, "dubbo route matcher: method matching failed, input method '{}'", - invocation->method_name()); + invocation->methodName()); return nullptr; } @@ -182,13 +182,13 @@ SingleRouteMatcherImpl::SingleRouteMatcherImpl(const RouteConfig& config, RouteConstSharedPtr SingleRouteMatcherImpl::route(const MessageMetadata& metadata, uint64_t random_value) const { ASSERT(metadata.hasInvocationInfo()); - const auto& invocation = metadata.invocation_info(); + const auto& invocation = metadata.invocationInfo(); - if (service_name_ == invocation.service_name() && + if (service_name_ == invocation.serviceName() && (group_.value().empty() || - (invocation.service_group().has_value() && invocation.service_group().value() == group_)) && - (version_.value().empty() || (invocation.service_version().has_value() && - invocation.service_version().value() == version_))) { + (invocation.serviceGroup().has_value() && invocation.serviceGroup().value() == group_)) && + (version_.value().empty() || (invocation.serviceVersion().has_value() && + invocation.serviceVersion().value() == version_))) { for (const auto& route : routes_) { RouteConstSharedPtr route_entry = route->matches(metadata, random_value); if (nullptr != route_entry) { diff --git a/source/extensions/filters/network/dubbo_proxy/router/router_impl.cc b/source/extensions/filters/network/dubbo_proxy/router/router_impl.cc index 5bd9aab946770..ffe3ef9ce1c13 100644 --- a/source/extensions/filters/network/dubbo_proxy/router/router_impl.cc +++ b/source/extensions/filters/network/dubbo_proxy/router/router_impl.cc @@ -24,15 +24,15 @@ void Router::setDecoderFilterCallbacks(DubboFilters::DecoderFilterCallbacks& cal FilterStatus Router::onMessageDecoded(MessageMetadataSharedPtr metadata, ContextSharedPtr ctx) { ASSERT(metadata->hasInvocationInfo()); - const auto& invocation = metadata->invocation_info(); + const auto& invocation = metadata->invocationInfo(); route_ = callbacks_->route(); if (!route_) { ENVOY_STREAM_LOG(debug, "dubbo router: no cluster match for interface '{}'", *callbacks_, - invocation.service_name()); + invocation.serviceName()); callbacks_->sendLocalReply(AppException(ResponseStatus::ServiceNotFound, fmt::format("dubbo router: no route for interface '{}'", - invocation.service_name())), + invocation.serviceName())), false); return FilterStatus::StopIteration; } @@ -52,7 +52,7 @@ FilterStatus Router::onMessageDecoded(MessageMetadataSharedPtr metadata, Context cluster_ = cluster->info(); ENVOY_STREAM_LOG(debug, "dubbo router: cluster '{}' match for interface '{}'", *callbacks_, - route_entry_->clusterName(), invocation.service_name()); + route_entry_->clusterName(), invocation.serviceName()); if (cluster_->maintenanceMode()) { callbacks_->sendLocalReply( @@ -75,7 +75,7 @@ FilterStatus Router::onMessageDecoded(MessageMetadataSharedPtr metadata, Context } ENVOY_STREAM_LOG(debug, "dubbo router: decoding request", *callbacks_); - upstream_request_buffer_.move(ctx->message_origin_data(), ctx->message_size()); + upstream_request_buffer_.move(ctx->messageOriginData(), ctx->messageSize()); upstream_request_ = std::make_unique( *this, *conn_pool, metadata, callbacks_->serializationType(), callbacks_->protocolType()); @@ -262,7 +262,7 @@ void Router::UpstreamRequest::onUpstreamHostSelected(Upstream::HostDescriptionCo } void Router::UpstreamRequest::onResetStream(ConnectionPool::PoolFailureReason reason) { - if (metadata_->message_type() == MessageType::Oneway) { + if (metadata_->messageType() == MessageType::Oneway) { // For oneway requests, we should not attempt a response. Reset the downstream to signal // an error. ENVOY_LOG(debug, "dubbo upstream request: the request is oneway, reset downstream stream"); diff --git a/source/extensions/filters/network/dubbo_proxy/serializer.h b/source/extensions/filters/network/dubbo_proxy/serializer.h index 2d3c1125cb7fe..8b12ccd43dc4b 100644 --- a/source/extensions/filters/network/dubbo_proxy/serializer.h +++ b/source/extensions/filters/network/dubbo_proxy/serializer.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include "envoy/buffer/buffer.h" #include "envoy/config/typed_config.h" diff --git a/source/extensions/filters/network/dubbo_proxy/serializer_impl.h b/source/extensions/filters/network/dubbo_proxy/serializer_impl.h index 983843c6f7fe2..1c9bcd7ccf0ba 100644 --- a/source/extensions/filters/network/dubbo_proxy/serializer_impl.h +++ b/source/extensions/filters/network/dubbo_proxy/serializer_impl.h @@ -11,11 +11,9 @@ namespace DubboProxy { class RpcInvocationImpl : public RpcInvocationBase { public: // TODO(gengleilei) Add parameter data types and implement Dubbo data type mapping. - using ParameterValueMap = std::unordered_map; + using ParameterValueMap = absl::node_hash_map; using ParameterValueMapPtr = std::unique_ptr; - using HeaderMapPtr = std::unique_ptr; - RpcInvocationImpl() = default; ~RpcInvocationImpl() override = default; @@ -32,7 +30,7 @@ class RpcInvocationImpl : public RpcInvocationBase { private: inline void assignHeaderIfNeed() { if (!headers_) { - headers_ = std::make_unique(); + headers_ = Http::RequestHeaderMapImpl::create(); } } @@ -43,7 +41,7 @@ class RpcInvocationImpl : public RpcInvocationBase { } ParameterValueMapPtr parameter_map_; - HeaderMapPtr headers_; // attachment + Http::HeaderMapPtr headers_; // attachment }; class RpcResultImpl : public RpcResult { diff --git a/source/extensions/filters/network/echo/BUILD b/source/extensions/filters/network/echo/BUILD index 6e26725de2fbd..6b136705258c0 100644 --- a/source/extensions/filters/network/echo/BUILD +++ b/source/extensions/filters/network/echo/BUILD @@ -1,16 +1,16 @@ -licenses(["notice"]) # Apache 2 - -# Echo L4 network filter. This is primarily a simplistic example. -# Public docs: docs/root/configuration/network_filters/echo_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# Echo L4 network filter. This is primarily a simplistic example. +# Public docs: docs/root/configuration/network_filters/echo_filter.rst + +envoy_extension_package() envoy_cc_library( name = "echo", @@ -29,6 +29,11 @@ envoy_cc_extension( name = "config", srcs = ["config.cc"], security_posture = "unknown", + # TODO(#9953) move echo integration test to extensions. + visibility = [ + "//:extension_config", + "//test/integration:__subpackages__", + ], deps = [ ":echo", "//include/envoy/registry", diff --git a/source/extensions/filters/network/ext_authz/BUILD b/source/extensions/filters/network/ext_authz/BUILD index 286c454dc9cd3..ebc6847e28f66 100644 --- a/source/extensions/filters/network/ext_authz/BUILD +++ b/source/extensions/filters/network/ext_authz/BUILD @@ -1,16 +1,16 @@ -licenses(["notice"]) # Apache 2 - -# External authorization L4 network filter -# Public docs: TODO(saumoh): Docs needed in docs/root/configuration/network_filters - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# External authorization L4 network filter +# Public docs: TODO(saumoh): Docs needed in docs/root/configuration/network_filters + +envoy_extension_package() envoy_cc_library( name = "ext_authz", @@ -26,6 +26,7 @@ envoy_cc_library( "//source/common/tracing:http_tracer_lib", "//source/extensions/filters/common/ext_authz:ext_authz_grpc_lib", "//source/extensions/filters/common/ext_authz:ext_authz_interface", + "//source/extensions/filters/network:well_known_names", "@envoy_api//envoy/extensions/filters/network/ext_authz/v3:pkg_cc_proto", "@envoy_api//envoy/service/auth/v3:pkg_cc_proto", ], diff --git a/source/extensions/filters/network/ext_authz/config.cc b/source/extensions/filters/network/ext_authz/config.cc index a47f488a6f6bf..8bfdf1f81f7d2 100644 --- a/source/extensions/filters/network/ext_authz/config.cc +++ b/source/extensions/filters/network/ext_authz/config.cc @@ -23,17 +23,19 @@ namespace ExtAuthz { Network::FilterFactoryCb ExtAuthzConfigFactory::createFilterFactoryFromProtoTyped( const envoy::extensions::filters::network::ext_authz::v3::ExtAuthz& proto_config, Server::Configuration::FactoryContext& context) { - ConfigSharedPtr ext_authz_config(new Config(proto_config, context.scope())); + ConfigSharedPtr ext_authz_config = std::make_shared(proto_config, context.scope()); const uint32_t timeout_ms = PROTOBUF_GET_MS_OR_DEFAULT(proto_config.grpc_service(), timeout, 200); return [grpc_service = proto_config.grpc_service(), &context, ext_authz_config, + transport_api_version = proto_config.transport_api_version(), timeout_ms](Network::FilterManager& filter_manager) -> void { auto async_client_factory = context.clusterManager().grpcAsyncClientManager().factoryForGrpcService( grpc_service, context.scope(), true); auto client = std::make_unique( - async_client_factory->create(), std::chrono::milliseconds(timeout_ms), false); + async_client_factory->create(), std::chrono::milliseconds(timeout_ms), + transport_api_version, false); filter_manager.addReadFilter(Network::ReadFilterSharedPtr{ std::make_shared(ext_authz_config, std::move(client))}); }; diff --git a/source/extensions/filters/network/ext_authz/ext_authz.cc b/source/extensions/filters/network/ext_authz/ext_authz.cc index af91178846084..97feb62e0d22d 100644 --- a/source/extensions/filters/network/ext_authz/ext_authz.cc +++ b/source/extensions/filters/network/ext_authz/ext_authz.cc @@ -8,6 +8,8 @@ #include "common/common/assert.h" #include "common/tracing/http_tracer_impl.h" +#include "extensions/filters/network/well_known_names.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { @@ -90,6 +92,12 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { // Status is Error and yet we are configured to allow traffic. Click a counter. config_->stats().failure_mode_allowed_.inc(); } + + if (!response->dynamic_metadata.fields().empty()) { + filter_callbacks_->connection().streamInfo().setDynamicMetadata( + NetworkFilterNames::get().ExtAuthorization, response->dynamic_metadata); + } + // We can get completion inline, so only call continue if that isn't happening. if (!calling_check_) { filter_callbacks_->continueReading(); diff --git a/source/extensions/filters/network/http_connection_manager/BUILD b/source/extensions/filters/network/http_connection_manager/BUILD index b7ebe990a80d3..012cd2b00cce7 100644 --- a/source/extensions/filters/network/http_connection_manager/BUILD +++ b/source/extensions/filters/network/http_connection_manager/BUILD @@ -1,22 +1,24 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_extension_package", +) + licenses(["notice"]) # Apache 2 # L4 network filter that implements HTTP protocol handling and filtering. This filter internally # drives all of the L7 HTTP filters. # Public docs: docs/root/configuration/http_conn_man/http_conn_man.rst -load( - "//bazel:envoy_build_system.bzl", - "envoy_cc_extension", - "envoy_package", -) - -envoy_package() +envoy_extension_package() envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream", + # This is core Envoy config. + visibility = ["//visibility:public"], deps = [ "//include/envoy/config:config_provider_manager_interface", "//include/envoy/filesystem:filesystem_interface", @@ -31,13 +33,17 @@ envoy_cc_extension( "//source/common/access_log:access_log_lib", "//source/common/common:minimal_logger_lib", "//source/common/config:utility_lib", + "//source/common/filter/http:filter_config_discovery_lib", "//source/common/http:conn_manager_lib", "//source/common/http:default_server_string_lib", "//source/common/http:request_id_extension_lib", "//source/common/http:utility_lib", + "//source/common/http/http1:codec_legacy_lib", "//source/common/http/http1:codec_lib", + "//source/common/http/http2:codec_legacy_lib", "//source/common/http/http2:codec_lib", "//source/common/json:json_loader_lib", + "//source/common/local_reply:local_reply_lib", "//source/common/protobuf:utility_lib", "//source/common/router:rds_lib", "//source/common/router:scoped_rds_lib", @@ -45,6 +51,7 @@ envoy_cc_extension( "//source/common/tracing:http_tracer_config_lib", "//source/common/tracing:http_tracer_lib", "//source/common/tracing:http_tracer_manager_lib", + "//source/extensions/filters/http/common:pass_through_filter_lib", "//source/extensions/filters/network:well_known_names", "//source/extensions/filters/network/common:factory_base_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index 50b4d45f13f5a..29078e4f59c57 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -9,6 +9,7 @@ #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.h" #include "envoy/filesystem/filesystem.h" +#include "envoy/registry/registry.h" #include "envoy/server/admin.h" #include "envoy/tracing/http_tracer.h" #include "envoy/type/tracing/v3/custom_tag.pb.h" @@ -17,14 +18,18 @@ #include "common/access_log/access_log_impl.h" #include "common/common/fmt.h" #include "common/config/utility.h" +#include "common/filter/http/filter_config_discovery_impl.h" #include "common/http/conn_manager_utility.h" #include "common/http/default_server_string.h" #include "common/http/http1/codec_impl.h" +#include "common/http/http1/codec_impl_legacy.h" #include "common/http/http2/codec_impl.h" +#include "common/http/http2/codec_impl_legacy.h" #include "common/http/http3/quic_codec_factory.h" #include "common/http/http3/well_known_names.h" #include "common/http/request_id_extension_impl.h" #include "common/http/utility.h" +#include "common/local_reply/local_reply.h" #include "common/protobuf/utility.h" #include "common/router/rds_impl.h" #include "common/router/scoped_rds.h" @@ -32,6 +37,8 @@ #include "common/tracing/http_tracer_config_impl.h" #include "common/tracing/http_tracer_manager_impl.h" +#include "extensions/filters/http/common/pass_through_filter.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { @@ -72,6 +79,16 @@ std::unique_ptr createInternalAddressConfig( return std::make_unique(); } +class MissingConfigFilter : public Http::PassThroughDecoderFilter { +public: + Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool) override { + decoder_callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::NoFilterConfigFound); + decoder_callbacks_->sendLocalReply(Http::Code::InternalServerError, EMPTY_STRING, nullptr, + absl::nullopt, EMPTY_STRING); + return Http::FilterHeadersStatus::StopIteration; + } +}; + } // namespace // Singleton registration via macro defined in envoy/singleton/manager.h @@ -79,6 +96,7 @@ SINGLETON_MANAGER_REGISTRATION(date_provider); SINGLETON_MANAGER_REGISTRATION(route_config_provider_manager); SINGLETON_MANAGER_REGISTRATION(scoped_routes_config_provider_manager); SINGLETON_MANAGER_REGISTRATION(http_tracer_manager); +SINGLETON_MANAGER_REGISTRATION(filter_config_provider_manager); Utility::Singletons Utility::createSingletons(Server::Configuration::FactoryContext& context) { std::shared_ptr date_provider = @@ -88,13 +106,13 @@ Utility::Singletons Utility::createSingletons(Server::Configuration::FactoryCont context.threadLocal()); }); - std::shared_ptr route_config_provider_manager = + Router::RouteConfigProviderManagerSharedPtr route_config_provider_manager = context.singletonManager().getTyped( SINGLETON_MANAGER_REGISTERED_NAME(route_config_provider_manager), [&context] { return std::make_shared(context.admin()); }); - std::shared_ptr scoped_routes_config_provider_manager = + Router::ScopedRoutesConfigProviderManagerSharedPtr scoped_routes_config_provider_manager = context.singletonManager().getTyped( SINGLETON_MANAGER_REGISTERED_NAME(scoped_routes_config_provider_manager), [&context, route_config_provider_manager] { @@ -109,8 +127,13 @@ Utility::Singletons Utility::createSingletons(Server::Configuration::FactoryCont context.getServerFactoryContext(), context.messageValidationVisitor())); }); + std::shared_ptr filter_config_provider_manager = + context.singletonManager().getTyped( + SINGLETON_MANAGER_REGISTERED_NAME(filter_config_provider_manager), + [] { return std::make_shared(); }); + return {date_provider, route_config_provider_manager, scoped_routes_config_provider_manager, - http_tracer_manager}; + http_tracer_manager, filter_config_provider_manager}; } std::shared_ptr Utility::createConfig( @@ -119,10 +142,11 @@ std::shared_ptr Utility::createConfig( Server::Configuration::FactoryContext& context, Http::DateProvider& date_provider, Router::RouteConfigProviderManager& route_config_provider_manager, Config::ConfigProviderManager& scoped_routes_config_provider_manager, - Tracing::HttpTracerManager& http_tracer_manager) { + Tracing::HttpTracerManager& http_tracer_manager, + Filter::Http::FilterConfigProviderManager& filter_config_provider_manager) { return std::make_shared( proto_config, context, date_provider, route_config_provider_manager, - scoped_routes_config_provider_manager, http_tracer_manager); + scoped_routes_config_provider_manager, http_tracer_manager, filter_config_provider_manager); } Network::FilterFactoryCb @@ -134,7 +158,8 @@ HttpConnectionManagerFilterConfigFactory::createFilterFactoryFromProtoTyped( auto filter_config = Utility::createConfig( proto_config, context, *singletons.date_provider_, *singletons.route_config_provider_manager_, - *singletons.scoped_routes_config_provider_manager_, *singletons.http_tracer_manager_); + *singletons.scoped_routes_config_provider_manager_, *singletons.http_tracer_manager_, + *singletons.filter_config_provider_manager_); // This lambda captures the shared_ptrs created above, thus preserving the // reference count. @@ -143,8 +168,8 @@ HttpConnectionManagerFilterConfigFactory::createFilterFactoryFromProtoTyped( return [singletons, filter_config, &context](Network::FilterManager& filter_manager) -> void { filter_manager.addReadFilter(Network::ReadFilterSharedPtr{new Http::ConnectionManagerImpl( *filter_config, context.drainDecision(), context.random(), context.httpContext(), - context.runtime(), context.localInfo(), context.clusterManager(), - &context.overloadManager(), context.dispatcher().timeSource())}); + context.runtime(), context.localInfo(), context.clusterManager(), context.overloadManager(), + context.dispatcher().timeSource())}); }; } @@ -166,7 +191,8 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( Server::Configuration::FactoryContext& context, Http::DateProvider& date_provider, Router::RouteConfigProviderManager& route_config_provider_manager, Config::ConfigProviderManager& scoped_routes_config_provider_manager, - Tracing::HttpTracerManager& http_tracer_manager) + Tracing::HttpTracerManager& http_tracer_manager, + Filter::Http::FilterConfigProviderManager& filter_config_provider_manager) : context_(context), stats_prefix_(fmt::format("http.{}.", config.stat_prefix())), stats_(Http::ConnectionManagerImpl::generateStats(stats_prefix_, context_.scope())), tracing_stats_( @@ -177,7 +203,10 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( skip_xff_append_(config.skip_xff_append()), via_(config.via()), route_config_provider_manager_(route_config_provider_manager), scoped_routes_config_provider_manager_(scoped_routes_config_provider_manager), - http2_options_(Http2::Utility::initializeAndValidateOptions(config.http2_protocol_options())), + filter_config_provider_manager_(filter_config_provider_manager), + http2_options_(Http2::Utility::initializeAndValidateOptions( + config.http2_protocol_options(), config.has_stream_error_on_invalid_http_message(), + config.stream_error_on_invalid_http_message())), http1_settings_(Http::Utility::parseHttp1Settings(config.http_protocol_options())), max_request_headers_kb_(PROTOBUF_GET_WRAPPED_OR_DEFAULT( config, max_request_headers_kb, Http::DEFAULT_MAX_REQUEST_HEADERS_KB)), @@ -196,10 +225,13 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( drain_timeout_(PROTOBUF_GET_MS_OR_DEFAULT(config, drain_timeout, 5000)), generate_request_id_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, generate_request_id, true)), preserve_external_request_id_(config.preserve_external_request_id()), + always_set_request_id_in_response_(config.always_set_request_id_in_response()), date_provider_(date_provider), listener_stats_(Http::ConnectionManagerImpl::generateListenerStats(stats_prefix_, context_.listenerScope())), proxy_100_continue_(config.proxy_100_continue()), + stream_error_on_invalid_http_messaging_( + PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, stream_error_on_invalid_http_message, false)), delayed_close_timeout_(PROTOBUF_GET_MS_OR_DEFAULT(config, delayed_close_timeout, 1000)), #ifdef ENVOY_NORMALIZE_PATH_BY_DEFAULT normalize_path_(PROTOBUF_GET_WRAPPED_OR_DEFAULT( @@ -215,8 +247,10 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( 0))), #endif merge_slashes_(config.merge_slashes()), + strip_matching_port_(config.strip_matching_host_port()), headers_with_underscores_action_( - config.common_http_protocol_options().headers_with_underscores_action()) { + config.common_http_protocol_options().headers_with_underscores_action()), + local_reply_(LocalReply::Factory::create(config.local_reply_config(), context)) { // If idle_timeout_ was not configured in common_http_protocol_options, use value in deprecated // idle_timeout field. // TODO(asraa): Remove when idle_timeout is removed. @@ -441,17 +475,16 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( void HttpConnectionManagerConfig::processFilter( const envoy::extensions::filters::network::http_connection_manager::v3::HttpFilter& proto_config, - int i, absl::string_view prefix, std::list& filter_factories, + int i, absl::string_view prefix, FilterFactoriesList& filter_factories, const char* filter_chain_type, bool last_filter_in_current_config) { ENVOY_LOG(debug, " {} filter #{}", prefix, i); - ENVOY_LOG(debug, " name: {}", proto_config.name()); - ENVOY_LOG(debug, " config: {}", - MessageUtil::getJsonStringFromMessage( - proto_config.has_typed_config() - ? static_cast(proto_config.typed_config()) - : static_cast( - proto_config.hidden_envoy_deprecated_config()), - true)); + if (proto_config.config_type_case() == + envoy::extensions::filters::network::http_connection_manager::v3::HttpFilter::ConfigTypeCase:: + kConfigDiscovery) { + processDynamicFilterConfig(proto_config.name(), proto_config.config_discovery(), + filter_factories, filter_chain_type, last_filter_in_current_config); + return; + } // Now see if there is a factory that will accept the config. auto& factory = @@ -464,7 +497,63 @@ void HttpConnectionManagerConfig::processFilter( bool is_terminal = factory.isTerminalFilter(); Config::Utility::validateTerminalFilters(proto_config.name(), factory.name(), filter_chain_type, is_terminal, last_filter_in_current_config); - filter_factories.push_back(callback); + auto filter_config_provider = filter_config_provider_manager_.createStaticFilterConfigProvider( + callback, proto_config.name()); + ENVOY_LOG(debug, " name: {}", filter_config_provider->name()); + ENVOY_LOG(debug, " config: {}", + MessageUtil::getJsonStringFromMessage( + proto_config.has_typed_config() + ? static_cast(proto_config.typed_config()) + : static_cast( + proto_config.hidden_envoy_deprecated_config()), + true)); + filter_factories.push_back(std::move(filter_config_provider)); +} + +void HttpConnectionManagerConfig::processDynamicFilterConfig( + const std::string& name, const envoy::config::core::v3::ExtensionConfigSource& config_discovery, + FilterFactoriesList& filter_factories, const char* filter_chain_type, + bool last_filter_in_current_config) { + ENVOY_LOG(debug, " dynamic filter name: {}", name); + if (config_discovery.apply_default_config_without_warming() && + !config_discovery.has_default_config()) { + throw EnvoyException(fmt::format( + "Error: filter config {} applied without warming but has no default config.", name)); + } + std::set require_type_urls; + for (const auto& type_url : config_discovery.type_urls()) { + auto factory_type_url = TypeUtil::typeUrlToDescriptorFullName(type_url); + require_type_urls.emplace(factory_type_url); + auto* factory = Registry::FactoryRegistry< + Server::Configuration::NamedHttpFilterConfigFactory>::getFactoryByType(factory_type_url); + if (factory == nullptr) { + throw EnvoyException( + fmt::format("Error: no factory found for a required type URL {}.", factory_type_url)); + } + Config::Utility::validateTerminalFilters(name, factory->name(), filter_chain_type, + factory->isTerminalFilter(), + last_filter_in_current_config); + } + auto filter_config_provider = filter_config_provider_manager_.createDynamicFilterConfigProvider( + config_discovery.config_source(), name, require_type_urls, context_, stats_prefix_, + config_discovery.apply_default_config_without_warming()); + if (config_discovery.has_default_config()) { + auto* default_factory = + Config::Utility::getFactoryByType( + config_discovery.default_config()); + if (default_factory == nullptr) { + throw EnvoyException(fmt::format("Error: cannot find filter factory {} for default filter " + "configuration with type URL {}.", + name, config_discovery.default_config().type_url())); + } + filter_config_provider->validateConfig(config_discovery.default_config(), *default_factory); + ProtobufTypes::MessagePtr message = Config::Utility::translateAnyToFactoryConfig( + config_discovery.default_config(), context_.messageValidationVisitor(), *default_factory); + Http::FilterFactoryCb default_config = + default_factory->createFilterFactoryFromProto(*message, stats_prefix_, context_); + filter_config_provider->onConfigUpdate(default_config, "", nullptr); + } + filter_factories.push_back(std::move(filter_config_provider)); } Http::ServerConnectionPtr @@ -472,14 +561,34 @@ HttpConnectionManagerConfig::createCodec(Network::Connection& connection, const Buffer::Instance& data, Http::ServerConnectionCallbacks& callbacks) { switch (codec_type_) { - case CodecType::HTTP1: - return std::make_unique( - connection, context_.scope(), callbacks, http1_settings_, maxRequestHeadersKb(), - maxRequestHeadersCount(), headersWithUnderscoresAction()); - case CodecType::HTTP2: - return std::make_unique( - connection, callbacks, context_.scope(), http2_options_, maxRequestHeadersKb(), - maxRequestHeadersCount(), headersWithUnderscoresAction()); + case CodecType::HTTP1: { + if (context_.runtime().snapshot().runtimeFeatureEnabled( + "envoy.reloadable_features.new_codec_behavior")) { + return std::make_unique( + connection, Http::Http1::CodecStats::atomicGet(http1_codec_stats_, context_.scope()), + callbacks, http1_settings_, maxRequestHeadersKb(), maxRequestHeadersCount(), + headersWithUnderscoresAction()); + } else { + return std::make_unique( + connection, Http::Http1::CodecStats::atomicGet(http1_codec_stats_, context_.scope()), + callbacks, http1_settings_, maxRequestHeadersKb(), maxRequestHeadersCount(), + headersWithUnderscoresAction()); + } + } + case CodecType::HTTP2: { + if (context_.runtime().snapshot().runtimeFeatureEnabled( + "envoy.reloadable_features.new_codec_behavior")) { + return std::make_unique( + connection, callbacks, + Http::Http2::CodecStats::atomicGet(http2_codec_stats_, context_.scope()), http2_options_, + maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction()); + } else { + return std::make_unique( + connection, callbacks, + Http::Http2::CodecStats::atomicGet(http2_codec_stats_, context_.scope()), http2_options_, + maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction()); + } + } case CodecType::HTTP3: // Hard code Quiche factory name here to instantiate a QUIC codec implemented. // TODO(danzh) Add support to get the factory name from config, possibly @@ -491,19 +600,39 @@ HttpConnectionManagerConfig::createCodec(Network::Connection& connection, .createQuicServerConnection(connection, callbacks)); case CodecType::AUTO: return Http::ConnectionManagerUtility::autoCreateCodec( - connection, data, callbacks, context_.scope(), http1_settings_, http2_options_, - maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction()); + connection, data, callbacks, context_.scope(), http1_codec_stats_, http2_codec_stats_, + http1_settings_, http2_options_, maxRequestHeadersKb(), maxRequestHeadersCount(), + headersWithUnderscoresAction()); } - NOT_REACHED_GCOVR_EXCL_LINE; } -void HttpConnectionManagerConfig::createFilterChain(Http::FilterChainFactoryCallbacks& callbacks) { - for (const Http::FilterFactoryCb& factory : filter_factories_) { - factory(callbacks); +void HttpConnectionManagerConfig::createFilterChainForFactories( + Http::FilterChainFactoryCallbacks& callbacks, const FilterFactoriesList& filter_factories) { + bool added_missing_config_filter = false; + for (const auto& filter_config_provider : filter_factories) { + auto config = filter_config_provider->config(); + if (config.has_value()) { + config.value()(callbacks); + continue; + } + + // If a filter config is missing after warming, inject a local reply with status 500. + if (!added_missing_config_filter) { + ENVOY_LOG(trace, "Missing filter config for a provider {}", filter_config_provider->name()); + callbacks.addStreamDecoderFilter( + Http::StreamDecoderFilterSharedPtr{std::make_shared()}); + added_missing_config_filter = true; + } else { + ENVOY_LOG(trace, "Provider {} missing a filter config", filter_config_provider->name()); + } } } +void HttpConnectionManagerConfig::createFilterChain(Http::FilterChainFactoryCallbacks& callbacks) { + createFilterChainForFactories(callbacks, filter_factories_); +} + bool HttpConnectionManagerConfig::createUpgradeFilterChain( absl::string_view upgrade_type, const Http::FilterChainFactory::UpgradeMap* per_route_upgrade_map, @@ -532,9 +661,7 @@ bool HttpConnectionManagerConfig::createUpgradeFilterChain( filters_to_use = it->second.filter_factories.get(); } - for (const Http::FilterFactoryCb& factory : *filters_to_use) { - factory(callbacks); - } + createFilterChainForFactories(callbacks, *filters_to_use); return true; } @@ -572,7 +699,8 @@ HttpConnectionManagerFactory::createHttpConnectionManagerFactoryFromProto( auto filter_config = Utility::createConfig( proto_config, context, *singletons.date_provider_, *singletons.route_config_provider_manager_, - *singletons.scoped_routes_config_provider_manager_, *singletons.http_tracer_manager_); + *singletons.scoped_routes_config_provider_manager_, *singletons.http_tracer_manager_, + *singletons.filter_config_provider_manager_); // This lambda captures the shared_ptrs created above, thus preserving the // reference count. @@ -581,8 +709,8 @@ HttpConnectionManagerFactory::createHttpConnectionManagerFactoryFromProto( return [singletons, filter_config, &context, &read_callbacks]() -> Http::ApiListenerPtr { auto conn_manager = std::make_unique( *filter_config, context.drainDecision(), context.random(), context.httpContext(), - context.runtime(), context.localInfo(), context.clusterManager(), - &context.overloadManager(), context.dispatcher().timeSource()); + context.runtime(), context.localInfo(), context.clusterManager(), context.overloadManager(), + context.dispatcher().timeSource()); // This factory creates a new ConnectionManagerImpl in the absence of its usual environment as // an L4 filter, so this factory needs to take a few actions. diff --git a/source/extensions/filters/network/http_connection_manager/config.h b/source/extensions/filters/network/http_connection_manager/config.h index 59dee762513fa..47cc707bdb897 100644 --- a/source/extensions/filters/network/http_connection_manager/config.h +++ b/source/extensions/filters/network/http_connection_manager/config.h @@ -8,8 +8,10 @@ #include #include "envoy/config/config_provider_manager.h" +#include "envoy/config/core/v3/extension.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.h" +#include "envoy/filter/http/filter_config_provider.h" #include "envoy/http/filter.h" #include "envoy/http/request_id_extension.h" #include "envoy/router/route_config_provider_manager.h" @@ -18,7 +20,10 @@ #include "common/common/logger.h" #include "common/http/conn_manager_impl.h" #include "common/http/date_provider_impl.h" +#include "common/http/http1/codec_stats.h" +#include "common/http/http2/codec_stats.h" #include "common/json/json_loader.h" +#include "common/local_reply/local_reply.h" #include "common/router/rds_impl.h" #include "common/router/scoped_rds.h" #include "common/tracing/http_tracer_impl.h" @@ -85,11 +90,12 @@ class HttpConnectionManagerConfig : Logger::Loggable, Server::Configuration::FactoryContext& context, Http::DateProvider& date_provider, Router::RouteConfigProviderManager& route_config_provider_manager, Config::ConfigProviderManager& scoped_routes_config_provider_manager, - Tracing::HttpTracerManager& http_tracer_manager); + Tracing::HttpTracerManager& http_tracer_manager, + Filter::Http::FilterConfigProviderManager& filter_config_provider_manager); // Http::FilterChainFactory void createFilterChain(Http::FilterChainFactoryCallbacks& callbacks) override; - using FilterFactoriesList = std::list; + using FilterFactoriesList = std::list; struct FilterConfig { std::unique_ptr filter_factories; bool allow_upgrade; @@ -109,6 +115,7 @@ class HttpConnectionManagerConfig : Logger::Loggable, FilterChainFactory& filterFactory() override { return *this; } bool generateRequestId() const override { return generate_request_id_; } bool preserveExternalRequestId() const override { return preserve_external_request_id_; } + bool alwaysSetRequestIdInResponse() const override { return always_set_request_id_in_response_; } uint32_t maxRequestHeadersKb() const override { return max_request_headers_kb_; } uint32_t maxRequestHeadersCount() const override { return max_request_headers_count_; } absl::optional idleTimeout() const override { return idle_timeout_; } @@ -153,14 +160,19 @@ class HttpConnectionManagerConfig : Logger::Loggable, const absl::optional& userAgent() override { return user_agent_; } Http::ConnectionManagerListenerStats& listenerStats() override { return listener_stats_; } bool proxy100Continue() const override { return proxy_100_continue_; } + bool streamErrorOnInvalidHttpMessaging() const override { + return stream_error_on_invalid_http_messaging_; + } const Http::Http1Settings& http1Settings() const override { return http1_settings_; } bool shouldNormalizePath() const override { return normalize_path_; } bool shouldMergeSlashes() const override { return merge_slashes_; } + bool shouldStripMatchingPort() const override { return strip_matching_port_; } envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headersWithUnderscoresAction() const override { return headers_with_underscores_action_; } std::chrono::milliseconds delayedCloseTimeout() const override { return delayed_close_timeout_; } + const LocalReply::LocalReply& localReply() const override { return *local_reply_; } private: enum class CodecType { HTTP1, HTTP2, HTTP3, AUTO }; @@ -169,6 +181,13 @@ class HttpConnectionManagerConfig : Logger::Loggable, proto_config, int i, absl::string_view prefix, FilterFactoriesList& filter_factories, const char* filter_chain_type, bool last_filter_in_current_config); + void + processDynamicFilterConfig(const std::string& name, + const envoy::config::core::v3::ExtensionConfigSource& config_discovery, + FilterFactoriesList& filter_factories, const char* filter_chain_type, + bool last_filter_in_current_config); + void createFilterChainForFactories(Http::FilterChainFactoryCallbacks& callbacks, + const FilterFactoriesList& filter_factories); /** * Determines what tracing provider to use for a given @@ -185,6 +204,8 @@ class HttpConnectionManagerConfig : Logger::Loggable, std::list access_logs_; const std::string stats_prefix_; Http::ConnectionManagerStats stats_; + mutable Http::Http1::CodecStats::AtomicPtr http1_codec_stats_; + mutable Http::Http2::CodecStats::AtomicPtr http2_codec_stats_; Http::ConnectionManagerTracingStats tracing_stats_; const bool use_remote_address_{}; const std::unique_ptr internal_address_config_; @@ -195,6 +216,7 @@ class HttpConnectionManagerConfig : Logger::Loggable, std::vector set_current_client_cert_details_; Router::RouteConfigProviderManager& route_config_provider_manager_; Config::ConfigProviderManager& scoped_routes_config_provider_manager_; + Filter::Http::FilterConfigProviderManager& filter_config_provider_manager_; CodecType codec_type_; envoy::config::core::v3::Http2ProtocolOptions http2_options_; const Http::Http1Settings http1_settings_; @@ -216,14 +238,18 @@ class HttpConnectionManagerConfig : Logger::Loggable, std::chrono::milliseconds drain_timeout_; bool generate_request_id_; const bool preserve_external_request_id_; + const bool always_set_request_id_in_response_; Http::DateProvider& date_provider_; Http::ConnectionManagerListenerStats listener_stats_; const bool proxy_100_continue_; + const bool stream_error_on_invalid_http_messaging_; std::chrono::milliseconds delayed_close_timeout_; const bool normalize_path_; const bool merge_slashes_; + const bool strip_matching_port_; const envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action_; + const LocalReply::LocalReplyPtr local_reply_; // Default idle timeout is 5 minutes if nothing is specified in the HCM config. static const uint64_t StreamIdleTimeoutMs = 5 * 60 * 1000; @@ -249,10 +275,10 @@ class Utility { public: struct Singletons { std::shared_ptr date_provider_; - std::shared_ptr route_config_provider_manager_; - std::shared_ptr - scoped_routes_config_provider_manager_; + Router::RouteConfigProviderManagerSharedPtr route_config_provider_manager_; + Router::ScopedRoutesConfigProviderManagerSharedPtr scoped_routes_config_provider_manager_; Tracing::HttpTracerManagerSharedPtr http_tracer_manager_; + std::shared_ptr filter_config_provider_manager_; }; /** @@ -279,7 +305,8 @@ class Utility { Server::Configuration::FactoryContext& context, Http::DateProvider& date_provider, Router::RouteConfigProviderManager& route_config_provider_manager, Config::ConfigProviderManager& scoped_routes_config_provider_manager, - Tracing::HttpTracerManager& http_tracer_manager); + Tracing::HttpTracerManager& http_tracer_manager, + Filter::Http::FilterConfigProviderManager& filter_config_provider_manager); }; } // namespace HttpConnectionManager diff --git a/source/extensions/filters/network/kafka/BUILD b/source/extensions/filters/network/kafka/BUILD index f4588076cfaeb..cc625b61fc142 100644 --- a/source/extensions/filters/network/kafka/BUILD +++ b/source/extensions/filters/network/kafka/BUILD @@ -1,16 +1,17 @@ -licenses(["notice"]) # Apache 2 - -# Kafka network filter. -# Broker filter public docs: docs/root/configuration/network_filters/kafka_broker_filter.rst - +load("@rules_python//python:defs.bzl", "py_binary", "py_library") load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# Kafka network filter. +# Broker filter public docs: docs/root/configuration/network_filters/kafka_broker_filter.rst + +envoy_extension_package() envoy_cc_extension( name = "kafka_broker_config_lib", @@ -96,6 +97,7 @@ envoy_cc_library( ], deps = [ ":serialization_lib", + ":tagged_fields_lib", ], ) @@ -159,6 +161,7 @@ envoy_cc_library( ], deps = [ ":serialization_lib", + ":tagged_fields_lib", ], ) diff --git a/source/extensions/filters/network/kafka/serialization.h b/source/extensions/filters/network/kafka/serialization.h index 8d157172891ac..8e833e67720d7 100644 --- a/source/extensions/filters/network/kafka/serialization.h +++ b/source/extensions/filters/network/kafka/serialization.h @@ -66,8 +66,6 @@ template class Deserializer { */ template class IntDeserializer : public Deserializer { public: - IntDeserializer() : written_{0} {}; - uint32_t feed(absl::string_view& data) override { const uint32_t available = std::min(sizeof(buf_) - written_, data.size()); memcpy(buf_ + written_, data.data(), available); @@ -86,7 +84,7 @@ template class IntDeserializer : public Deserializer { protected: char buf_[sizeof(T) / sizeof(char)]; - uint32_t written_; + uint32_t written_{0}; bool ready_{false}; }; diff --git a/source/extensions/filters/network/local_ratelimit/BUILD b/source/extensions/filters/network/local_ratelimit/BUILD index c13b64a3b73d5..13389742fa56a 100644 --- a/source/extensions/filters/network/local_ratelimit/BUILD +++ b/source/extensions/filters/network/local_ratelimit/BUILD @@ -1,16 +1,16 @@ -licenses(["notice"]) # Apache 2 - -# Local ratelimit L4 network filter -# Public docs: docs/root/configuration/network_filters/local_rate_limit_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# Local ratelimit L4 network filter +# Public docs: docs/root/configuration/network_filters/local_rate_limit_filter.rst + +envoy_extension_package() envoy_cc_library( name = "local_ratelimit_lib", diff --git a/source/extensions/filters/network/mongo_proxy/BUILD b/source/extensions/filters/network/mongo_proxy/BUILD index c4c08d4a6bc29..2e281e1f67896 100644 --- a/source/extensions/filters/network/mongo_proxy/BUILD +++ b/source/extensions/filters/network/mongo_proxy/BUILD @@ -1,16 +1,16 @@ -licenses(["notice"]) # Apache 2 - -# Mongo proxy L4 network filter (observability and fault injection). -# Public docs: docs/root/configuration/network_filters/mongo_proxy_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# Mongo proxy L4 network filter (observability and fault injection). +# Public docs: docs/root/configuration/network_filters/mongo_proxy_filter.rst + +envoy_extension_package() envoy_cc_library( name = "bson_interface", @@ -89,6 +89,7 @@ envoy_cc_library( deps = [ "//include/envoy/stats:stats_interface", "//source/common/stats:symbol_table_lib", + "//source/common/stats:utility_lib", ], ) diff --git a/source/extensions/filters/network/mongo_proxy/mongo_stats.cc b/source/extensions/filters/network/mongo_proxy/mongo_stats.cc index bf9e90ce105c9..6059b461f94c4 100644 --- a/source/extensions/filters/network/mongo_proxy/mongo_stats.cc +++ b/source/extensions/filters/network/mongo_proxy/mongo_stats.cc @@ -31,23 +31,21 @@ MongoStats::MongoStats(Stats::Scope& scope, absl::string_view prefix) stat_name_set_->rememberBuiltins({"insert", "query", "update", "delete"}); } -Stats::SymbolTable::StoragePtr MongoStats::addPrefix(const std::vector& names) { - std::vector names_with_prefix; +Stats::ElementVec MongoStats::addPrefix(const Stats::ElementVec& names) { + Stats::ElementVec names_with_prefix; names_with_prefix.reserve(1 + names.size()); names_with_prefix.push_back(prefix_); names_with_prefix.insert(names_with_prefix.end(), names.begin(), names.end()); - return scope_.symbolTable().join(names_with_prefix); + return names_with_prefix; } -void MongoStats::incCounter(const std::vector& names) { - const Stats::SymbolTable::StoragePtr stat_name_storage = addPrefix(names); - scope_.counterFromStatName(Stats::StatName(stat_name_storage.get())).inc(); +void MongoStats::incCounter(const Stats::ElementVec& names) { + Stats::Utility::counterFromElements(scope_, addPrefix(names)).inc(); } -void MongoStats::recordHistogram(const std::vector& names, - Stats::Histogram::Unit unit, uint64_t sample) { - const Stats::SymbolTable::StoragePtr stat_name_storage = addPrefix(names); - scope_.histogramFromStatName(Stats::StatName(stat_name_storage.get()), unit).recordValue(sample); +void MongoStats::recordHistogram(const Stats::ElementVec& names, Stats::Histogram::Unit unit, + uint64_t sample) { + Stats::Utility::histogramFromElements(scope_, addPrefix(names), unit).recordValue(sample); } } // namespace MongoProxy diff --git a/source/extensions/filters/network/mongo_proxy/mongo_stats.h b/source/extensions/filters/network/mongo_proxy/mongo_stats.h index f49d4d34e7bfa..b19561df6788f 100644 --- a/source/extensions/filters/network/mongo_proxy/mongo_stats.h +++ b/source/extensions/filters/network/mongo_proxy/mongo_stats.h @@ -7,6 +7,7 @@ #include "envoy/stats/scope.h" #include "common/stats/symbol_table_impl.h" +#include "common/stats/utility.h" namespace Envoy { namespace Extensions { @@ -17,8 +18,8 @@ class MongoStats { public: MongoStats(Stats::Scope& scope, absl::string_view prefix); - void incCounter(const std::vector& names); - void recordHistogram(const std::vector& names, Stats::Histogram::Unit unit, + void incCounter(const Stats::ElementVec& names); + void recordHistogram(const Stats::ElementVec& names, Stats::Histogram::Unit unit, uint64_t sample); /** @@ -31,10 +32,8 @@ class MongoStats { return stat_name_set_->getBuiltin(str, fallback); } - Stats::SymbolTable& symbolTable() { return scope_.symbolTable(); } - private: - Stats::SymbolTable::StoragePtr addPrefix(const std::vector& names); + Stats::ElementVec addPrefix(const Stats::ElementVec& names); Stats::Scope& scope_; Stats::StatNameSetPtr stat_name_set_; diff --git a/source/extensions/filters/network/mongo_proxy/proxy.cc b/source/extensions/filters/network/mongo_proxy/proxy.cc index fcbd3f6c52bb0..fa70d4ae9801f 100644 --- a/source/extensions/filters/network/mongo_proxy/proxy.cc +++ b/source/extensions/filters/network/mongo_proxy/proxy.cc @@ -152,22 +152,21 @@ void ProxyFilter::decodeQuery(QueryMessagePtr&& message) { } else { // Normal query, get stats on a per collection basis first. QueryMessageInfo::QueryType query_type = active_query->query_info_.type(); - Stats::StatNameVec names; + Stats::ElementVec names; names.reserve(6); // 2 entries are added by chargeQueryStats(). names.push_back(mongo_stats_->collection_); - Stats::StatNameDynamicPool dynamic(mongo_stats_->symbolTable()); - names.push_back(dynamic.add(active_query->query_info_.collection())); + names.push_back(Stats::DynamicName(active_query->query_info_.collection())); chargeQueryStats(names, query_type); // Callsite stats if we have it. if (!active_query->query_info_.callsite().empty()) { names.push_back(mongo_stats_->callsite_); - names.push_back(dynamic.add(active_query->query_info_.callsite())); + names.push_back(Stats::DynamicName(active_query->query_info_.callsite())); chargeQueryStats(names, query_type); } // Global stats. - if (active_query->query_info_.max_time() < 1) { + if (active_query->query_info_.maxTime() < 1) { stats_.op_query_no_max_time_.inc(); } if (query_type == QueryMessageInfo::QueryType::ScatterGet) { @@ -180,7 +179,7 @@ void ProxyFilter::decodeQuery(QueryMessagePtr&& message) { active_query_list_.emplace_back(std::move(active_query)); } -void ProxyFilter::chargeQueryStats(Stats::StatNameVec& names, +void ProxyFilter::chargeQueryStats(Stats::ElementVec& names, QueryMessageInfo::QueryType query_type) { // names come in containing {"collection", collection}. Report stats for 1 or // 2 variations on this array, and then return with the array in the same @@ -224,16 +223,15 @@ void ProxyFilter::decodeReply(ReplyMessagePtr&& message) { } if (!active_query.query_info_.command().empty()) { - Stats::StatNameVec names{mongo_stats_->cmd_, - mongo_stats_->getBuiltin(active_query.query_info_.command(), - mongo_stats_->unknown_command_)}; + Stats::ElementVec names{mongo_stats_->cmd_, + mongo_stats_->getBuiltin(active_query.query_info_.command(), + mongo_stats_->unknown_command_)}; chargeReplyStats(active_query, names, *message); } else { // Collection stats first. - Stats::StatNameDynamicPool dynamic(mongo_stats_->symbolTable()); - Stats::StatNameVec names{mongo_stats_->collection_, - dynamic.add(active_query.query_info_.collection()), - mongo_stats_->query_}; + Stats::ElementVec names{mongo_stats_->collection_, + Stats::DynamicName(active_query.query_info_.collection()), + mongo_stats_->query_}; chargeReplyStats(active_query, names, *message); // Callsite stats if we have it. @@ -242,7 +240,7 @@ void ProxyFilter::decodeReply(ReplyMessagePtr&& message) { // to mutate the array to {"collection", collection, "callsite", callsite, "query"}. ASSERT(names.size() == 3); names.back() = mongo_stats_->callsite_; // Replaces "query". - names.push_back(dynamic.add(active_query.query_info_.callsite())); + names.push_back(Stats::DynamicName(active_query.query_info_.callsite())); names.push_back(mongo_stats_->query_); chargeReplyStats(active_query, names, *message); } @@ -292,7 +290,7 @@ void ProxyFilter::onDrainClose() { read_callbacks_->connection().close(Network::ConnectionCloseType::FlushWrite); } -void ProxyFilter::chargeReplyStats(ActiveQuery& active_query, Stats::StatNameVec& names, +void ProxyFilter::chargeReplyStats(ActiveQuery& active_query, Stats::ElementVec& names, const ReplyMessage& message) { uint64_t reply_documents_byte_size = 0; for (const Bson::DocumentSharedPtr& document : message.documents()) { diff --git a/source/extensions/filters/network/mongo_proxy/proxy.h b/source/extensions/filters/network/mongo_proxy/proxy.h index 0da6146f418e8..c54308f1ae386 100644 --- a/source/extensions/filters/network/mongo_proxy/proxy.h +++ b/source/extensions/filters/network/mongo_proxy/proxy.h @@ -167,12 +167,12 @@ class ProxyFilter : public Network::Filter, // Increment counters related to queries. 'names' is passed by non-const // reference so the implementation can mutate it without copying, though it // always restores it to its prior state prior to return. - void chargeQueryStats(Stats::StatNameVec& names, QueryMessageInfo::QueryType query_type); + void chargeQueryStats(Stats::ElementVec& names, QueryMessageInfo::QueryType query_type); // Add samples to histograms related to replies. 'names' is passed by // non-const reference so the implementation can mutate it without copying, // though it always restores it to its prior state prior to return. - void chargeReplyStats(ActiveQuery& active_query, Stats::StatNameVec& names, + void chargeReplyStats(ActiveQuery& active_query, Stats::ElementVec& names, const ReplyMessage& message); void doDecode(Buffer::Instance& buffer); diff --git a/source/extensions/filters/network/mongo_proxy/utility.h b/source/extensions/filters/network/mongo_proxy/utility.h index e7fda7f26b1cf..3b8a6773601cf 100644 --- a/source/extensions/filters/network/mongo_proxy/utility.h +++ b/source/extensions/filters/network/mongo_proxy/utility.h @@ -40,7 +40,7 @@ class QueryMessageInfo { /** * @return the value of maxTimeMS or 0 if not given. */ - int32_t max_time() { return max_time_; } + int32_t maxTime() { return max_time_; } /** * @return the type of a query message. diff --git a/source/extensions/filters/network/mysql_proxy/BUILD b/source/extensions/filters/network/mysql_proxy/BUILD index bd27d007f8b2d..fee8571ea619c 100644 --- a/source/extensions/filters/network/mysql_proxy/BUILD +++ b/source/extensions/filters/network/mysql_proxy/BUILD @@ -1,16 +1,16 @@ -licenses(["notice"]) # Apache 2 - -# MySQL proxy L7 network filter. -# Public docs: docs/root/configuration/network_filters/mysql_proxy_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# MySQL proxy L7 network filter. +# Public docs: docs/root/configuration/network_filters/mysql_proxy_filter.rst + +envoy_extension_package() envoy_cc_library( name = "proxy_lib", @@ -36,7 +36,6 @@ envoy_cc_library( "mysql_session.h", "mysql_utils.h", ], - external_deps = ["sqlparser"], deps = [ "//include/envoy/network:filter_interface", "//include/envoy/server:filter_config_interface", @@ -44,6 +43,7 @@ envoy_cc_library( "//include/envoy/stats:stats_macros", "//source/common/buffer:buffer_lib", "//source/common/network:filter_lib", + "//source/extensions/common/sqlutils:sqlutils_lib", "//source/extensions/filters/network:well_known_names", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], diff --git a/source/extensions/filters/network/mysql_proxy/mysql_codec_command.h b/source/extensions/filters/network/mysql_proxy/mysql_codec_command.h index 776218be0c51f..6f4ae5239e1f1 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_codec_command.h +++ b/source/extensions/filters/network/mysql_proxy/mysql_codec_command.h @@ -60,8 +60,6 @@ class CommandResponse : public MySQLCodec { int parseMessage(Buffer::Instance&, uint32_t) override { return MYSQL_SUCCESS; } std::string encode() override { return ""; } - uint16_t getServerStatus() const { return server_status_; } - uint16_t getWarnings() const { return warnings_; } void setServerStatus(uint16_t status); void setWarnings(uint16_t warnings); diff --git a/source/extensions/filters/network/mysql_proxy/mysql_codec_switch_resp.h b/source/extensions/filters/network/mysql_proxy/mysql_codec_switch_resp.h index 5ed3d70c9c354..7b26ad7bcf27d 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_codec_switch_resp.h +++ b/source/extensions/filters/network/mysql_proxy/mysql_codec_switch_resp.h @@ -14,7 +14,6 @@ class ClientSwitchResponse : public MySQLCodec { int parseMessage(Buffer::Instance& buffer, uint32_t len) override; std::string encode() override; - const std::string& getAuthPluginResp() const { return auth_plugin_resp_; } void setAuthPluginResp(std::string& auth_swith_resp); private: diff --git a/source/extensions/filters/network/mysql_proxy/mysql_decoder.h b/source/extensions/filters/network/mysql_proxy/mysql_decoder.h index ff11a613f87b7..e5890d1a05ef6 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_decoder.h +++ b/source/extensions/filters/network/mysql_proxy/mysql_decoder.h @@ -6,6 +6,7 @@ #include "common/buffer/buffer_impl.h" #include "common/common/logger.h" +#include "extensions/common/sqlutils/sqlutils.h" #include "extensions/filters/network/mysql_proxy/mysql_codec_clogin.h" #include "extensions/filters/network/mysql_proxy/mysql_codec_clogin_resp.h" #include "extensions/filters/network/mysql_proxy/mysql_codec_command.h" @@ -45,6 +46,14 @@ class Decoder { virtual void onData(Buffer::Instance& data) PURE; virtual MySQLSession& getSession() PURE; + + const Extensions::Common::SQLUtils::SQLUtils::DecoderAttributes& getAttributes() const { + return attributes_; + } + +protected: + // Decoder attributes. + Extensions::Common::SQLUtils::SQLUtils::DecoderAttributes attributes_; }; using DecoderPtr = std::unique_ptr; diff --git a/source/extensions/filters/network/mysql_proxy/mysql_filter.cc b/source/extensions/filters/network/mysql_proxy/mysql_filter.cc index 0d8be2394d150..e66701ee87841 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_filter.cc +++ b/source/extensions/filters/network/mysql_proxy/mysql_filter.cc @@ -8,8 +8,6 @@ #include "extensions/filters/network/well_known_names.h" -#include "include/sqlparser/SQLParser.h" - namespace Envoy { namespace Extensions { namespace NetworkFilters { @@ -105,39 +103,23 @@ void MySQLFilter::onCommand(Command& command) { } // Parse a given query - hsql::SQLParserResult result; - hsql::SQLParser::parse(command.getData(), &result); + envoy::config::core::v3::Metadata& dynamic_metadata = + read_callbacks_->connection().streamInfo().dynamicMetadata(); + ProtobufWkt::Struct metadata( + (*dynamic_metadata.mutable_filter_metadata())[NetworkFilterNames::get().MySQLProxy]); + + auto result = Common::SQLUtils::SQLUtils::setMetadata(command.getData(), + decoder_->getAttributes(), metadata); ENVOY_CONN_LOG(trace, "mysql_proxy: query processed {}", read_callbacks_->connection(), command.getData()); - if (!result.isValid()) { + if (!result) { config_->stats_.queries_parse_error_.inc(); return; } config_->stats_.queries_parsed_.inc(); - // Set dynamic metadata - envoy::config::core::v3::Metadata& dynamic_metadata = - read_callbacks_->connection().streamInfo().dynamicMetadata(); - ProtobufWkt::Struct metadata( - (*dynamic_metadata.mutable_filter_metadata())[NetworkFilterNames::get().MySQLProxy]); - auto& fields = *metadata.mutable_fields(); - - for (auto i = 0u; i < result.size(); ++i) { - if (result.getStatement(i)->type() == hsql::StatementType::kStmtShow) { - continue; - } - hsql::TableAccessMap table_access_map; - result.getStatement(i)->tablesAccessed(table_access_map); - for (auto& it : table_access_map) { - auto& operations = *fields[it.first].mutable_list_value(); - for (const auto& ot : it.second) { - operations.add_values()->set_string_value(ot); - } - } - } - read_callbacks_->connection().streamInfo().setDynamicMetadata( NetworkFilterNames::get().MySQLProxy, metadata); } diff --git a/source/extensions/filters/network/postgres_proxy/BUILD b/source/extensions/filters/network/postgres_proxy/BUILD index 05b99ade974fe..aa397da9b55fd 100644 --- a/source/extensions/filters/network/postgres_proxy/BUILD +++ b/source/extensions/filters/network/postgres_proxy/BUILD @@ -1,3 +1,10 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_extension_package", +) + licenses(["notice"]) # Apache 2 #package(default_visibility = ["//visibility:public"]) @@ -5,14 +12,7 @@ licenses(["notice"]) # Apache 2 # PostgresSQL proxy L7 network filter. # Public docs: docs/root/configuration/network_filters/postgres_proxy_filter.rst -load( - "//bazel:envoy_build_system.bzl", - "envoy_cc_extension", - "envoy_cc_library", - "envoy_package", -) - -envoy_package() +envoy_extension_package() envoy_cc_library( name = "filter", @@ -33,6 +33,8 @@ envoy_cc_library( "//include/envoy/stats:stats_macros", "//source/common/buffer:buffer_lib", "//source/common/network:filter_lib", + "//source/extensions/common/sqlutils:sqlutils_lib", + "//source/extensions/filters/network:well_known_names", ], ) diff --git a/source/extensions/filters/network/postgres_proxy/config.cc b/source/extensions/filters/network/postgres_proxy/config.cc index 948ccd9f58a07..14180bc201b1f 100644 --- a/source/extensions/filters/network/postgres_proxy/config.cc +++ b/source/extensions/filters/network/postgres_proxy/config.cc @@ -15,9 +15,10 @@ NetworkFilters::PostgresProxy::PostgresConfigFactory::createFilterFactoryFromPro ASSERT(!proto_config.stat_prefix().empty()); const std::string stat_prefix = fmt::format("postgres.{}", proto_config.stat_prefix()); + const bool enable_sql = PROTOBUF_GET_WRAPPED_OR_DEFAULT(proto_config, enable_sql_parsing, true); PostgresFilterConfigSharedPtr filter_config( - std::make_shared(stat_prefix, context.scope())); + std::make_shared(stat_prefix, enable_sql, context.scope())); return [filter_config](Network::FilterManager& filter_manager) -> void { filter_manager.addFilter(std::make_shared(filter_config)); }; diff --git a/source/extensions/filters/network/postgres_proxy/config.h b/source/extensions/filters/network/postgres_proxy/config.h index 4c5f1e4a8a504..eada27fed6184 100644 --- a/source/extensions/filters/network/postgres_proxy/config.h +++ b/source/extensions/filters/network/postgres_proxy/config.h @@ -19,7 +19,7 @@ class PostgresConfigFactory : public Common::FactoryBase< envoy::extensions::filters::network::postgres_proxy::v3alpha::PostgresProxy> { public: - PostgresConfigFactory() : FactoryBase{NetworkFilterNames::get().Postgres} {} + PostgresConfigFactory() : FactoryBase{NetworkFilterNames::get().PostgresProxy} {} private: Network::FilterFactoryCb createFilterFactoryFromProtoTyped( diff --git a/source/extensions/filters/network/postgres_proxy/postgres_decoder.cc b/source/extensions/filters/network/postgres_proxy/postgres_decoder.cc index d4d3702c33a7f..0aae15ce995f8 100644 --- a/source/extensions/filters/network/postgres_proxy/postgres_decoder.cc +++ b/source/extensions/filters/network/postgres_proxy/postgres_decoder.cc @@ -2,6 +2,8 @@ #include +#include "absl/strings/str_split.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { @@ -9,7 +11,7 @@ namespace PostgresProxy { void DecoderImpl::initialize() { // Special handler for first message of the transaction. - first_ = MsgProcessor{"Startup", {}}; + first_ = MsgProcessor{"Startup", {&DecoderImpl::onStartup}}; // Frontend messages. FE_messages_.direction_ = "Frontend"; @@ -17,7 +19,7 @@ void DecoderImpl::initialize() { // Setup handlers for known messages. absl::flat_hash_map& FE_known_msgs = FE_messages_.messages_; - // Handler for know messages. + // Handler for known Frontend messages. FE_known_msgs['B'] = MsgProcessor{"Bind", {}}; FE_known_msgs['C'] = MsgProcessor{"Close", {}}; FE_known_msgs['d'] = MsgProcessor{"CopyData", {}}; @@ -29,12 +31,12 @@ void DecoderImpl::initialize() { FE_known_msgs['F'] = MsgProcessor{"FunctionCall", {}}; FE_known_msgs['p'] = MsgProcessor{"PasswordMessage/GSSResponse/SASLInitialResponse/SASLResponse", {}}; - FE_known_msgs['P'] = MsgProcessor{"Parse", {}}; - FE_known_msgs['Q'] = MsgProcessor{"Query", {}}; + FE_known_msgs['P'] = MsgProcessor{"Parse", {&DecoderImpl::onParse}}; + FE_known_msgs['Q'] = MsgProcessor{"Query", {&DecoderImpl::onQuery}}; FE_known_msgs['S'] = MsgProcessor{"Sync", {}}; FE_known_msgs['X'] = MsgProcessor{"Terminate", {&DecoderImpl::decodeFrontendTerminate}}; - // Handler for unknown messages. + // Handler for unknown Frontend messages. FE_messages_.unknown_ = MsgProcessor{"Other", {&DecoderImpl::incMessagesUnknown}}; // Backend messages. @@ -43,7 +45,7 @@ void DecoderImpl::initialize() { // Setup handlers for known messages. absl::flat_hash_map& BE_known_msgs = BE_messages_.messages_; - // Handler for know messages. + // Handler for known Backend messages. BE_known_msgs['R'] = MsgProcessor{"Authentication", {&DecoderImpl::decodeAuthentication}}; BE_known_msgs['K'] = MsgProcessor{"BackendKeyData", {}}; BE_known_msgs['2'] = MsgProcessor{"BindComplete", {}}; @@ -68,7 +70,7 @@ void DecoderImpl::initialize() { BE_known_msgs['Z'] = MsgProcessor{"ReadyForQuery", {}}; BE_known_msgs['T'] = MsgProcessor{"RowDescription", {}}; - // Handler for unknown messages. + // Handler for unknown Backend messages. BE_messages_.unknown_ = MsgProcessor{"Other", {&DecoderImpl::incMessagesUnknown}}; // Setup hash map for handling backend statements. @@ -169,6 +171,7 @@ bool DecoderImpl::parseMessage(Buffer::Instance& data) { // The 1 byte message type and message length should be in the buffer // Check if the entire message has been read. std::string message; + uint32_t length = data.peekBEInt(startup_ ? 0 : 1); if (data.length() < (length + (startup_ ? 0 : 1))) { ENVOY_LOG(trace, "postgres_proxy: cannot parse message. Need {} bytes in buffer", @@ -190,6 +193,7 @@ bool DecoderImpl::parseMessage(Buffer::Instance& data) { return false; } else { ENVOY_LOG(debug, "Detected version {}.{} of Postgres", code >> 16, code & 0x0000FFFF); + // 4 bytes of length and 4 bytes of version code. } } @@ -324,6 +328,44 @@ void DecoderImpl::decodeBackendErrorResponse() { decodeErrorNotice(BE_errors_); // indicating its meaning. It can be warning, notice, info, debug or log. void DecoderImpl::decodeBackendNoticeResponse() { decodeErrorNotice(BE_notices_); } +// Method parses Parse message of the following format: +// String: The name of the destination prepared statement (an empty string selects the unnamed +// prepared statement). +// +// String: The query string to be parsed. +// +// Int16: The number of parameter data +// types specified (can be zero). Note that this is not an indication of the number of parameters +// that might appear in the query string, only the number that the frontend wants to pre-specify +// types for. Then, for each parameter, there is the following: +// +// Int32: Specifies the object ID of +// the parameter data type. Placing a zero here is equivalent to leaving the type unspecified. +void DecoderImpl::onParse() { + // The first two strings are separated by \0. + // The first string is optional. If no \0 is found it means + // that the message contains query string only. + std::vector query_parts = absl::StrSplit(message_, absl::ByChar('\0')); + callbacks_->processQuery(query_parts[1]); +} + +void DecoderImpl::onQuery() { callbacks_->processQuery(message_); } + +// Method is invoked on clear-text Startup message. +// The message format is continuous string of the following format: +// userdatabaseapplication_nameencoding +void DecoderImpl::onStartup() { + // First 4 bytes of startup message contains version code. + // It is skipped. After that message contains attributes. + attributes_ = absl::StrSplit(message_.substr(4), absl::ByChar('\0'), absl::SkipEmpty()); + + // If "database" attribute is not found, default it to "user" attribute. + if ((attributes_.find("database") == attributes_.end()) && + (attributes_.find("user") != attributes_.end())) { + attributes_["database"] = attributes_["user"]; + } +} + } // namespace PostgresProxy } // namespace NetworkFilters } // namespace Extensions diff --git a/source/extensions/filters/network/postgres_proxy/postgres_decoder.h b/source/extensions/filters/network/postgres_proxy/postgres_decoder.h index bd779a2c24ac4..24465b55731f9 100644 --- a/source/extensions/filters/network/postgres_proxy/postgres_decoder.h +++ b/source/extensions/filters/network/postgres_proxy/postgres_decoder.h @@ -6,6 +6,7 @@ #include "common/buffer/buffer_impl.h" #include "common/common/logger.h" +#include "extensions/common/sqlutils/sqlutils.h" #include "extensions/filters/network/postgres_proxy/postgres_session.h" #include "absl/container/flat_hash_map.h" @@ -39,6 +40,8 @@ class DecoderCallbacks { enum class ErrorType { Error, Fatal, Panic, Unknown }; virtual void incErrors(ErrorType) PURE; + + virtual void processQuery(const std::string&) PURE; }; // Postgres message decoder. @@ -48,6 +51,15 @@ class Decoder { virtual bool onData(Buffer::Instance& data, bool frontend) PURE; virtual PostgresSession& getSession() PURE; + + const Extensions::Common::SQLUtils::SQLUtils::DecoderAttributes& getAttributes() const { + return attributes_; + } + +protected: + // Decoder attributes extracted from Startup message. + // It can be username, database name, client app type, etc. + Extensions::Common::SQLUtils::SQLUtils::DecoderAttributes attributes_; }; using DecoderPtr = std::unique_ptr; @@ -113,6 +125,9 @@ class DecoderImpl : public Decoder, Logger::Loggable { void decodeBackendNoticeResponse(); void decodeFrontendTerminate(); void decodeErrorNotice(MsgParserDict& types); + void onQuery(); + void onParse(); + void onStartup(); void incMessagesUnknown() { callbacks_->incMessagesUnknown(); } void incSessionsEncrypted() { callbacks_->incSessionsEncrypted(); } diff --git a/source/extensions/filters/network/postgres_proxy/postgres_filter.cc b/source/extensions/filters/network/postgres_proxy/postgres_filter.cc index c339de5dd47c7..f66754c051018 100644 --- a/source/extensions/filters/network/postgres_proxy/postgres_filter.cc +++ b/source/extensions/filters/network/postgres_proxy/postgres_filter.cc @@ -4,14 +4,18 @@ #include "envoy/network/connection.h" #include "extensions/filters/network/postgres_proxy/postgres_decoder.h" +#include "extensions/filters/network/well_known_names.h" namespace Envoy { namespace Extensions { namespace NetworkFilters { namespace PostgresProxy { -PostgresFilterConfig::PostgresFilterConfig(const std::string& stat_prefix, Stats::Scope& scope) - : stat_prefix_{stat_prefix}, scope_{scope}, stats_{generateStats(stat_prefix, scope)} {} +PostgresFilterConfig::PostgresFilterConfig(const std::string& stat_prefix, bool enable_sql_parsing, + Stats::Scope& scope) + : stat_prefix_{stat_prefix}, + enable_sql_parsing_(enable_sql_parsing), scope_{scope}, stats_{generateStats(stat_prefix, + scope)} {} PostgresFilter::PostgresFilter(PostgresFilterConfigSharedPtr config) : config_{config} { if (!decoder_) { @@ -21,7 +25,8 @@ PostgresFilter::PostgresFilter(PostgresFilterConfigSharedPtr config) : config_{c // Network::ReadFilter Network::FilterStatus PostgresFilter::onData(Buffer::Instance& data, bool) { - ENVOY_CONN_LOG(trace, "echo: got {} bytes", read_callbacks_->connection(), data.length()); + ENVOY_CONN_LOG(trace, "postgres_proxy: got {} bytes", read_callbacks_->connection(), + data.length()); // Frontend Buffer frontend_buffer_.add(data); @@ -159,6 +164,29 @@ void PostgresFilter::incStatements(StatementType type) { } } +void PostgresFilter::processQuery(const std::string& sql) { + if (config_->enable_sql_parsing_) { + ProtobufWkt::Struct metadata; + + auto result = Common::SQLUtils::SQLUtils::setMetadata(sql, decoder_->getAttributes(), metadata); + + if (!result) { + config_->stats_.statements_parse_error_.inc(); + ENVOY_CONN_LOG(trace, "postgres_proxy: cannot parse SQL: {}", read_callbacks_->connection(), + sql.c_str()); + return; + } + + config_->stats_.statements_parsed_.inc(); + ENVOY_CONN_LOG(trace, "postgres_proxy: query processed {}", read_callbacks_->connection(), + sql.c_str()); + + // Set dynamic metadata + read_callbacks_->connection().streamInfo().setDynamicMetadata( + NetworkFilterNames::get().PostgresProxy, metadata); + } +} + void PostgresFilter::doDecode(Buffer::Instance& data, bool frontend) { // Keep processing data until buffer is empty or decoder says // that it cannot process data in the buffer. diff --git a/source/extensions/filters/network/postgres_proxy/postgres_filter.h b/source/extensions/filters/network/postgres_proxy/postgres_filter.h index 0355bea4b1f3b..5571a0587c400 100644 --- a/source/extensions/filters/network/postgres_proxy/postgres_filter.h +++ b/source/extensions/filters/network/postgres_proxy/postgres_filter.h @@ -40,6 +40,8 @@ namespace PostgresProxy { COUNTER(transactions) \ COUNTER(transactions_commit) \ COUNTER(transactions_rollback) \ + COUNTER(statements_parsed) \ + COUNTER(statements_parse_error) \ COUNTER(notices) \ COUNTER(notices_notice) \ COUNTER(notices_warning) \ @@ -60,9 +62,11 @@ struct PostgresProxyStats { */ class PostgresFilterConfig { public: - PostgresFilterConfig(const std::string& stat_prefix, Stats::Scope& scope); + PostgresFilterConfig(const std::string& stat_prefix, bool enable_sql_parsing, + Stats::Scope& scope); const std::string stat_prefix_; + bool enable_sql_parsing_{true}; Stats::Scope& scope_; PostgresProxyStats stats_; @@ -101,6 +105,7 @@ class PostgresFilter : public Network::Filter, void incTransactions() override; void incTransactionsCommit() override; void incTransactionsRollback() override; + void processQuery(const std::string&) override; void doDecode(Buffer::Instance& data, bool); DecoderPtr createDecoder(DecoderCallbacks* callbacks); @@ -111,6 +116,8 @@ class PostgresFilter : public Network::Filter, uint32_t getFrontendBufLength() const { return frontend_buffer_.length(); } uint32_t getBackendBufLength() const { return backend_buffer_.length(); } const PostgresProxyStats& getStats() const { return config_->stats_; } + Network::Connection& connection() const { return read_callbacks_->connection(); } + const PostgresFilterConfigSharedPtr& getConfig() const { return config_; } private: Network::ReadFilterCallbacks* read_callbacks_{}; diff --git a/source/extensions/filters/network/ratelimit/BUILD b/source/extensions/filters/network/ratelimit/BUILD index 6ac6dc7f87f1c..f653adf348fb8 100644 --- a/source/extensions/filters/network/ratelimit/BUILD +++ b/source/extensions/filters/network/ratelimit/BUILD @@ -1,21 +1,27 @@ -licenses(["notice"]) # Apache 2 - -# Ratelimit L4 network filter -# Public docs: docs/root/configuration/network_filters/rate_limit_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# Ratelimit L4 network filter +# Public docs: docs/root/configuration/network_filters/rate_limit_filter.rst + +envoy_extension_package() envoy_cc_library( name = "ratelimit_lib", srcs = ["ratelimit.cc"], hdrs = ["ratelimit.h"], + # Legacy test use. TODO(#9953) clean up. + visibility = [ + "//source/extensions:__subpackages__", + "//test/common/network:__pkg__", + "//test/extensions:__subpackages__", + ], deps = [ "//include/envoy/network:connection_interface", "//include/envoy/network:filter_interface", diff --git a/source/extensions/filters/network/ratelimit/config.cc b/source/extensions/filters/network/ratelimit/config.cc index 4e45468603500..82037f5b424f8 100644 --- a/source/extensions/filters/network/ratelimit/config.cc +++ b/source/extensions/filters/network/ratelimit/config.cc @@ -35,7 +35,8 @@ Network::FilterFactoryCb RateLimitConfigFactory::createFilterFactoryFromProtoTyp filter_config, Filters::Common::RateLimit::rateLimitClient( - context, proto_config.rate_limit_service().grpc_service(), timeout))); + context, proto_config.rate_limit_service().grpc_service(), timeout, + proto_config.rate_limit_service().transport_api_version()))); }; } diff --git a/source/extensions/filters/network/ratelimit/ratelimit.cc b/source/extensions/filters/network/ratelimit/ratelimit.cc index 7ef447a8af2a3..430508ce3b611 100644 --- a/source/extensions/filters/network/ratelimit/ratelimit.cc +++ b/source/extensions/filters/network/ratelimit/ratelimit.cc @@ -69,8 +69,9 @@ void Filter::onEvent(Network::ConnectionEvent event) { } } -void Filter::complete(Filters::Common::RateLimit::LimitStatus status, Http::ResponseHeaderMapPtr&&, - Http::RequestHeaderMapPtr&&) { +void Filter::complete(Filters::Common::RateLimit::LimitStatus status, + Filters::Common::RateLimit::DescriptorStatusListPtr&&, + Http::ResponseHeaderMapPtr&&, Http::RequestHeaderMapPtr&&) { status_ = Status::Complete; config_->stats().active_.dec(); diff --git a/source/extensions/filters/network/ratelimit/ratelimit.h b/source/extensions/filters/network/ratelimit/ratelimit.h index 2babfd85dcd25..eba34f4348671 100644 --- a/source/extensions/filters/network/ratelimit/ratelimit.h +++ b/source/extensions/filters/network/ratelimit/ratelimit.h @@ -92,6 +92,7 @@ class Filter : public Network::ReadFilter, // RateLimit::RequestCallbacks void complete(Filters::Common::RateLimit::LimitStatus status, + Filters::Common::RateLimit::DescriptorStatusListPtr&& descriptor_statuses, Http::ResponseHeaderMapPtr&& response_headers_to_add, Http::RequestHeaderMapPtr&& request_headers_to_add) override; diff --git a/source/extensions/filters/network/rbac/BUILD b/source/extensions/filters/network/rbac/BUILD index f5c63db53d0a6..75e98406cf268 100644 --- a/source/extensions/filters/network/rbac/BUILD +++ b/source/extensions/filters/network/rbac/BUILD @@ -1,13 +1,13 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_extension( name = "config", diff --git a/source/extensions/filters/network/rbac/rbac_filter.cc b/source/extensions/filters/network/rbac/rbac_filter.cc index 1bc12017b3b6d..3b328ed2815f9 100644 --- a/source/extensions/filters/network/rbac/rbac_filter.cc +++ b/source/extensions/filters/network/rbac/rbac_filter.cc @@ -85,8 +85,10 @@ RoleBasedAccessControlFilter::checkEngine(Filters::Common::RBAC::EnforcementMode const auto engine = config_->engine(mode); if (engine != nullptr) { std::string effective_policy_id; - if (engine->allowed(callbacks_->connection(), callbacks_->connection().streamInfo(), - &effective_policy_id)) { + + // Check authorization decision and do Action operations + if (engine->handleAction(callbacks_->connection(), callbacks_->connection().streamInfo(), + &effective_policy_id)) { if (mode == Filters::Common::RBAC::EnforcementMode::Shadow) { ENVOY_LOG(debug, "shadow allowed"); config_->stats().shadow_allowed_.inc(); diff --git a/source/extensions/filters/network/redis_proxy/BUILD b/source/extensions/filters/network/redis_proxy/BUILD index 38611a99f1d5c..c0b742efa02e5 100644 --- a/source/extensions/filters/network/redis_proxy/BUILD +++ b/source/extensions/filters/network/redis_proxy/BUILD @@ -1,17 +1,17 @@ -licenses(["notice"]) # Apache 2 - -# Redis proxy L4 network filter. Implements consistent hashing and observability for large redis -# clusters. -# Public docs: docs/root/configuration/network_filters/redis_proxy_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# Redis proxy L4 network filter. Implements consistent hashing and observability for large redis +# clusters. +# Public docs: docs/root/configuration/network_filters/redis_proxy_filter.rst + +envoy_extension_package() envoy_cc_library( name = "command_splitter_interface", @@ -119,6 +119,11 @@ envoy_cc_extension( srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "requires_trusted_downstream_and_upstream", + # TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/integration:__subpackages__", + ], deps = [ "//include/envoy/upstream:upstream_interface", "//source/extensions/common/redis:cluster_refresh_manager_lib", diff --git a/source/extensions/filters/network/redis_proxy/command_splitter.h b/source/extensions/filters/network/redis_proxy/command_splitter.h index 5e1248d3b5001..e03d0a92e1377 100644 --- a/source/extensions/filters/network/redis_proxy/command_splitter.h +++ b/source/extensions/filters/network/redis_proxy/command_splitter.h @@ -41,11 +41,18 @@ class SplitCallbacks { virtual bool connectionAllowed() PURE; /** - * Called when an authentication command has been received. + * Called when an authentication command has been received with a password. * @param password supplies the AUTH password provided by the downstream client. */ virtual void onAuth(const std::string& password) PURE; + /** + * Called when an authentication command has been received with a username and password. + * @param username supplies the AUTH username provided by the downstream client. + * @param password supplies the AUTH password provided by the downstream client. + */ + virtual void onAuth(const std::string& username, const std::string& password) PURE; + /** * Called when the response is ready. * @param value supplies the response which is now owned by the callee. diff --git a/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc b/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc index adfbf7ff9fbe1..a5bd89588f51a 100644 --- a/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc +++ b/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc @@ -439,7 +439,12 @@ SplitRequestPtr InstanceImpl::makeRequest(Common::Redis::RespValuePtr&& request, onInvalidRequest(callbacks); return nullptr; } - callbacks.onAuth(request->asArray()[1].asString()); + if (request->asArray().size() == 3) { + callbacks.onAuth(request->asArray()[1].asString(), request->asArray()[2].asString()); + } else { + callbacks.onAuth(request->asArray()[1].asString()); + } + return nullptr; } diff --git a/source/extensions/filters/network/redis_proxy/command_splitter_impl.h b/source/extensions/filters/network/redis_proxy/command_splitter_impl.h index 8057f9a91b2ca..b67b4498f0cfa 100644 --- a/source/extensions/filters/network/redis_proxy/command_splitter_impl.h +++ b/source/extensions/filters/network/redis_proxy/command_splitter_impl.h @@ -2,7 +2,6 @@ #include #include -#include #include #include "envoy/stats/scope.h" diff --git a/source/extensions/filters/network/redis_proxy/config.cc b/source/extensions/filters/network/redis_proxy/config.cc index e52a04ffe0655..2d62f511b393e 100644 --- a/source/extensions/filters/network/redis_proxy/config.cc +++ b/source/extensions/filters/network/redis_proxy/config.cc @@ -75,12 +75,12 @@ Network::FilterFactoryCb RedisProxyFilterConfigFactory::createFilterFactoryFromP for (auto& cluster : unique_clusters) { Stats::ScopePtr stats_scope = context.scope().createScope(fmt::format("cluster.{}.redis_cluster", cluster)); - - upstreams.emplace(cluster, std::make_shared( - cluster, context.clusterManager(), - Common::Redis::Client::ClientFactoryImpl::instance_, - context.threadLocal(), proto_config.settings(), context.api(), - std::move(stats_scope), redis_command_stats, refresh_manager)); + auto conn_pool_ptr = std::make_shared( + cluster, context.clusterManager(), Common::Redis::Client::ClientFactoryImpl::instance_, + context.threadLocal(), proto_config.settings(), context.api(), std::move(stats_scope), + redis_command_stats, refresh_manager); + conn_pool_ptr->init(); + upstreams.emplace(cluster, conn_pool_ptr); } auto router = diff --git a/source/extensions/filters/network/redis_proxy/config.h b/source/extensions/filters/network/redis_proxy/config.h index 521ae76a9d9f7..c3237934fcea1 100644 --- a/source/extensions/filters/network/redis_proxy/config.h +++ b/source/extensions/filters/network/redis_proxy/config.h @@ -24,27 +24,39 @@ class ProtocolOptionsConfigImpl : public Upstream::ProtocolOptionsConfig { ProtocolOptionsConfigImpl( const envoy::extensions::filters::network::redis_proxy::v3::RedisProtocolOptions& proto_config) - : auth_password_(proto_config.auth_password()) {} + : auth_username_(proto_config.auth_username()), auth_password_(proto_config.auth_password()) { + } + + std::string authUsername(Api::Api& api) const { + return Config::DataSource::read(auth_username_, true, api); + } - std::string auth_password(Api::Api& api) const { + std::string authPassword(Api::Api& api) const { return Config::DataSource::read(auth_password_, true, api); } - const envoy::config::core::v3::DataSource& auth_password_datasource() const { - return auth_password_; + static const std::string authUsername(const Upstream::ClusterInfoConstSharedPtr info, + Api::Api& api) { + auto options = info->extensionProtocolOptionsTyped( + NetworkFilterNames::get().RedisProxy); + if (options) { + return options->authUsername(api); + } + return EMPTY_STRING; } - static const std::string auth_password(const Upstream::ClusterInfoConstSharedPtr info, - Api::Api& api) { + static const std::string authPassword(const Upstream::ClusterInfoConstSharedPtr info, + Api::Api& api) { auto options = info->extensionProtocolOptionsTyped( NetworkFilterNames::get().RedisProxy); if (options) { - return options->auth_password(api); + return options->authPassword(api); } return EMPTY_STRING; } private: + envoy::config::core::v3::DataSource auth_username_; envoy::config::core::v3::DataSource auth_password_; }; @@ -65,7 +77,8 @@ class RedisProxyFilterConfigFactory Upstream::ProtocolOptionsConfigConstSharedPtr createProtocolOptionsTyped( const envoy::extensions::filters::network::redis_proxy::v3::RedisProtocolOptions& - proto_config) override { + proto_config, + Server::Configuration::ProtocolOptionsFactoryContext&) override { return std::make_shared(proto_config); } }; diff --git a/source/extensions/filters/network/redis_proxy/conn_pool.h b/source/extensions/filters/network/redis_proxy/conn_pool.h index 0fa1e68bec965..385fac2eb5278 100644 --- a/source/extensions/filters/network/redis_proxy/conn_pool.h +++ b/source/extensions/filters/network/redis_proxy/conn_pool.h @@ -62,14 +62,6 @@ class Instance { */ virtual Common::Redis::Client::PoolRequest* makeRequest(const std::string& hash_key, RespVariant&& request, PoolCallbacks& callbacks) PURE; - - /** - * Notify the redirection manager singleton that a redirection error has been received from an - * upstream server associated with the pool's associated cluster. - * @return bool true if a cluster's registered callback with the redirection manager is scheduled - * to be called from the main thread dispatcher, false otherwise. - */ - virtual bool onRedirection() PURE; }; using InstanceSharedPtr = std::shared_ptr; diff --git a/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc b/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc index fdc70b71ec99f..8e9ac6f186a63 100644 --- a/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc +++ b/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc @@ -44,21 +44,36 @@ InstanceImpl::InstanceImpl( const Common::Redis::RedisCommandStatsSharedPtr& redis_command_stats, Extensions::Common::Redis::ClusterRefreshManagerSharedPtr refresh_manager) : cluster_name_(cluster_name), cm_(cm), client_factory_(client_factory), - tls_(tls.allocateSlot()), config_(config), api_(api), stats_scope_(std::move(stats_scope)), + tls_(tls.allocateSlot()), config_(new Common::Redis::Client::ConfigImpl(config)), api_(api), + stats_scope_(std::move(stats_scope)), redis_command_stats_(redis_command_stats), redis_cluster_stats_{REDIS_CLUSTER_STATS( POOL_COUNTER(*stats_scope_))}, - refresh_manager_(std::move(refresh_manager)) { - tls_->set([this, cluster_name]( - Event::Dispatcher& dispatcher) -> ThreadLocal::ThreadLocalObjectSharedPtr { - return std::make_shared(*this, dispatcher, cluster_name); - }); + refresh_manager_(std::move(refresh_manager)) {} + +void InstanceImpl::init() { + // Note: `this` and `cluster_name` have a a lifetime of the filter. + // That may be shorter than the tls callback if the listener is torn down shortly after it is + // created. We use a weak pointer to make sure this object outlives the tls callbacks. + std::weak_ptr this_weak_ptr = this->shared_from_this(); + tls_->set( + [this_weak_ptr](Event::Dispatcher& dispatcher) -> ThreadLocal::ThreadLocalObjectSharedPtr { + if (auto this_shared_ptr = this_weak_ptr.lock()) { + return std::make_shared(this_shared_ptr, dispatcher, + this_shared_ptr->cluster_name_); + } + return nullptr; + }); } +// This method is always called from a InstanceSharedPtr we don't have to worry about tls_->getTyped +// failing due to InstanceImpl going away. Common::Redis::Client::PoolRequest* InstanceImpl::makeRequest(const std::string& key, RespVariant&& request, PoolCallbacks& callbacks) { return tls_->getTyped().makeRequest(key, std::move(request), callbacks); } +// This method is always called from a InstanceSharedPtr we don't have to worry about tls_->getTyped +// failing due to InstanceImpl going away. Common::Redis::Client::PoolRequest* InstanceImpl::makeRequestToHost(const std::string& host_address, const Common::Redis::RespValue& request, @@ -66,15 +81,20 @@ InstanceImpl::makeRequestToHost(const std::string& host_address, return tls_->getTyped().makeRequestToHost(host_address, request, callbacks); } -InstanceImpl::ThreadLocalPool::ThreadLocalPool(InstanceImpl& parent, Event::Dispatcher& dispatcher, +InstanceImpl::ThreadLocalPool::ThreadLocalPool(std::shared_ptr parent, + Event::Dispatcher& dispatcher, std::string cluster_name) : parent_(parent), dispatcher_(dispatcher), cluster_name_(std::move(cluster_name)), drain_timer_(dispatcher.createTimer([this]() -> void { drainClients(); })), - is_redis_cluster_(false) { - cluster_update_handle_ = parent_.cm_.addThreadLocalClusterUpdateCallbacks(*this); - Upstream::ThreadLocalCluster* cluster = parent_.cm_.get(cluster_name_); + is_redis_cluster_(false), client_factory_(parent->client_factory_), config_(parent->config_), + stats_scope_(parent->stats_scope_), redis_command_stats_(parent->redis_command_stats_), + redis_cluster_stats_(parent->redis_cluster_stats_), + refresh_manager_(parent->refresh_manager_) { + cluster_update_handle_ = parent->cm_.addThreadLocalClusterUpdateCallbacks(*this); + Upstream::ThreadLocalCluster* cluster = parent->cm_.get(cluster_name_); if (cluster != nullptr) { - auth_password_ = ProtocolOptionsConfigImpl::auth_password(cluster->info(), parent_.api_); + auth_username_ = ProtocolOptionsConfigImpl::authUsername(cluster->info(), parent->api_); + auth_password_ = ProtocolOptionsConfigImpl::authPassword(cluster->info(), parent->api_); onClusterAddOrUpdateNonVirtual(*cluster); } } @@ -99,10 +119,15 @@ void InstanceImpl::ThreadLocalPool::onClusterAddOrUpdateNonVirtual( if (cluster.info()->name() != cluster_name_) { return; } + // Ensure the filter is not deleted in the main thread during this method. + auto shared_parent = parent_.lock(); + if (!shared_parent) { + return; + } if (cluster_ != nullptr) { // Treat an update as a removal followed by an add. - onClusterRemoval(cluster_name_); + ThreadLocalPool::onClusterRemoval(cluster_name_); } ASSERT(cluster_ == nullptr); @@ -214,9 +239,9 @@ InstanceImpl::ThreadLocalPool::threadLocalActiveClient(Upstream::HostConstShared if (!client) { client = std::make_unique(*this); client->host_ = host; - client->redis_client_ = parent_.client_factory_.create(host, dispatcher_, parent_.config_, - parent_.redis_command_stats_, - *parent_.stats_scope_, auth_password_); + client->redis_client_ = + client_factory_.create(host, dispatcher_, *config_, redis_command_stats_, *(stats_scope_), + auth_username_, auth_password_); client->redis_client_->addConnectionCallbacks(*client); } return client; @@ -231,9 +256,9 @@ InstanceImpl::ThreadLocalPool::makeRequest(const std::string& key, RespVariant&& return nullptr; } - Clusters::Redis::RedisLoadBalancerContextImpl lb_context(key, parent_.config_.enableHashtagging(), + Clusters::Redis::RedisLoadBalancerContextImpl lb_context(key, config_->enableHashtagging(), is_redis_cluster_, getRequest(request), - parent_.config_.readPolicy()); + config_->readPolicy()); Upstream::HostConstSharedPtr host = cluster_->loadBalancer().chooseHost(&lb_context); if (!host) { return nullptr; @@ -289,9 +314,9 @@ Common::Redis::Client::PoolRequest* InstanceImpl::ThreadLocalPool::makeRequestTo auto it = host_address_map_.find(host_address_map_key); if (it == host_address_map_.end()) { // This host is not known to the cluster manager. Create a new host and insert it into the map. - if (created_via_redirect_hosts_.size() == parent_.config_.maxUpstreamUnknownConnections()) { + if (created_via_redirect_hosts_.size() == config_->maxUpstreamUnknownConnections()) { // Too many upstream connections to unknown hosts have been created. - parent_.redis_cluster_stats_.max_upstream_unknown_connections_reached_.inc(); + redis_cluster_stats_.max_upstream_unknown_connections_reached_.inc(); return nullptr; } if (!ipv6) { @@ -343,7 +368,7 @@ void InstanceImpl::ThreadLocalActiveClient::onEvent(Network::ConnectionEvent eve it++) { if ((*it).get() == this) { if (!redis_client_->active()) { - parent_.parent_.redis_cluster_stats_.upstream_cx_drained_.inc(); + parent_.redis_cluster_stats_.upstream_cx_drained_.inc(); } parent_.dispatcher_.deferredDelete(std::move(redis_client_)); parent_.clients_to_drain_.erase(it); @@ -379,7 +404,7 @@ void InstanceImpl::PendingRequest::onResponse(Common::Redis::RespValuePtr&& resp void InstanceImpl::PendingRequest::onFailure() { request_handler_ = nullptr; pool_callbacks_.onFailure(); - parent_.parent_.onFailure(); + parent_.refresh_manager_->onFailure(parent_.cluster_name_); parent_.onRequestCompleted(); } @@ -402,7 +427,7 @@ bool InstanceImpl::PendingRequest::onRedirection(Common::Redis::RespValuePtr&& v onResponse(std::move(value)); return false; } else { - parent_.parent_.onRedirection(); + parent_.refresh_manager_->onRedirection(parent_.cluster_name_); return true; } } diff --git a/source/extensions/filters/network/redis_proxy/conn_pool_impl.h b/source/extensions/filters/network/redis_proxy/conn_pool_impl.h index 6dcb695efac80..2a6c643cfeedd 100644 --- a/source/extensions/filters/network/redis_proxy/conn_pool_impl.h +++ b/source/extensions/filters/network/redis_proxy/conn_pool_impl.h @@ -5,7 +5,6 @@ #include #include #include -#include #include #include "envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h" @@ -24,11 +23,14 @@ #include "source/extensions/clusters/redis/redis_cluster_lb.h" #include "extensions/common/redis/cluster_refresh_manager.h" +#include "extensions/filters/network/common/redis/client.h" #include "extensions/filters/network/common/redis/client_impl.h" #include "extensions/filters/network/common/redis/codec_impl.h" #include "extensions/filters/network/common/redis/utility.h" #include "extensions/filters/network/redis_proxy/conn_pool.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { @@ -52,7 +54,7 @@ class DoNothingPoolCallbacks : public PoolCallbacks { void onFailure() override{}; }; -class InstanceImpl : public Instance { +class InstanceImpl : public Instance, public std::enable_shared_from_this { public: InstanceImpl( const std::string& cluster_name, Upstream::ClusterManager& cm, @@ -79,9 +81,7 @@ class InstanceImpl : public Instance { makeRequestToHost(const std::string& host_address, const Common::Redis::RespValue& request, Common::Redis::Client::ClientCallbacks& callbacks); - bool onRedirection() override { return refresh_manager_->onRedirection(cluster_name_); } - bool onFailure() { return refresh_manager_->onFailure(cluster_name_); } - bool onHostDegraded() { return refresh_manager_->onHostDegraded(cluster_name_); } + void init(); // Allow the unit test to have access to private members. friend class RedisConnPoolImplTest; @@ -127,7 +127,8 @@ class InstanceImpl : public Instance { struct ThreadLocalPool : public ThreadLocal::ThreadLocalObject, public Upstream::ClusterUpdateCallbacks { - ThreadLocalPool(InstanceImpl& parent, Event::Dispatcher& dispatcher, std::string cluster_name); + ThreadLocalPool(std::shared_ptr parent, Event::Dispatcher& dispatcher, + std::string cluster_name); ~ThreadLocalPool() override; ThreadLocalActiveClientPtr& threadLocalActiveClient(Upstream::HostConstSharedPtr host); Common::Redis::Client::PoolRequest* makeRequest(const std::string& key, RespVariant&& request, @@ -149,14 +150,15 @@ class InstanceImpl : public Instance { void onRequestCompleted(); - InstanceImpl& parent_; + std::weak_ptr parent_; Event::Dispatcher& dispatcher_; const std::string cluster_name_; Upstream::ClusterUpdateCallbacksHandlePtr cluster_update_handle_; Upstream::ThreadLocalCluster* cluster_{}; - std::unordered_map client_map_; + absl::node_hash_map client_map_; Envoy::Common::CallbackHandle* host_set_member_update_cb_handle_{}; - std::unordered_map host_address_map_; + absl::node_hash_map host_address_map_; + std::string auth_username_; std::string auth_password_; std::list created_via_redirect_hosts_; std::list clients_to_drain_; @@ -170,15 +172,21 @@ class InstanceImpl : public Instance { */ Event::TimerPtr drain_timer_; bool is_redis_cluster_; + Common::Redis::Client::ClientFactory& client_factory_; + Common::Redis::Client::ConfigSharedPtr config_; + Stats::ScopeSharedPtr stats_scope_; + Common::Redis::RedisCommandStatsSharedPtr redis_command_stats_; + RedisClusterStats redis_cluster_stats_; + const Extensions::Common::Redis::ClusterRefreshManagerSharedPtr refresh_manager_; }; const std::string cluster_name_; Upstream::ClusterManager& cm_; Common::Redis::Client::ClientFactory& client_factory_; ThreadLocal::SlotPtr tls_; - Common::Redis::Client::ConfigImpl config_; + Common::Redis::Client::ConfigSharedPtr config_; Api::Api& api_; - Stats::ScopePtr stats_scope_; + Stats::ScopeSharedPtr stats_scope_; Common::Redis::RedisCommandStatsSharedPtr redis_command_stats_; RedisClusterStats redis_cluster_stats_; const Extensions::Common::Redis::ClusterRefreshManagerSharedPtr refresh_manager_; diff --git a/source/extensions/filters/network/redis_proxy/proxy_filter.cc b/source/extensions/filters/network/redis_proxy/proxy_filter.cc index 7782485d5ec9a..aa2f558cc51af 100644 --- a/source/extensions/filters/network/redis_proxy/proxy_filter.cc +++ b/source/extensions/filters/network/redis_proxy/proxy_filter.cc @@ -23,6 +23,8 @@ ProxyFilterConfig::ProxyFilterConfig( : drain_decision_(drain_decision), runtime_(runtime), stat_prefix_(fmt::format("redis.{}.", config.stat_prefix())), stats_(generateStats(stat_prefix_, scope)), + downstream_auth_username_( + Config::DataSource::read(config.downstream_auth_username(), true, api)), downstream_auth_password_( Config::DataSource::read(config.downstream_auth_password(), true, api)) {} @@ -38,7 +40,8 @@ ProxyFilter::ProxyFilter(Common::Redis::DecoderFactory& factory, config_(config) { config_->stats_.downstream_cx_total_.inc(); config_->stats_.downstream_cx_active_.inc(); - connection_allowed_ = config_->downstream_auth_password_.empty(); + connection_allowed_ = + config_->downstream_auth_username_.empty() && config_->downstream_auth_password_.empty(); } ProxyFilter::~ProxyFilter() { @@ -96,6 +99,31 @@ void ProxyFilter::onAuth(PendingRequest& request, const std::string& password) { request.onResponse(std::move(response)); } +void ProxyFilter::onAuth(PendingRequest& request, const std::string& username, + const std::string& password) { + Common::Redis::RespValuePtr response{new Common::Redis::RespValue()}; + if (config_->downstream_auth_username_.empty() && config_->downstream_auth_password_.empty()) { + response->type(Common::Redis::RespType::Error); + response->asString() = "ERR Client sent AUTH, but no username-password pair is set"; + } else if (config_->downstream_auth_username_.empty() && username == "default" && + password == config_->downstream_auth_password_) { + // empty username and "default" are synonymous in Redis 6 ACLs + response->type(Common::Redis::RespType::SimpleString); + response->asString() = "OK"; + connection_allowed_ = true; + } else if (username == config_->downstream_auth_username_ && + password == config_->downstream_auth_password_) { + response->type(Common::Redis::RespType::SimpleString); + response->asString() = "OK"; + connection_allowed_ = true; + } else { + response->type(Common::Redis::RespType::Error); + response->asString() = "WRONGPASS invalid username-password pair"; + connection_allowed_ = false; + } + request.onResponse(std::move(response)); +} + void ProxyFilter::onResponse(PendingRequest& request, Common::Redis::RespValuePtr&& value) { ASSERT(!pending_requests_.empty()); request.pending_response_ = std::move(value); diff --git a/source/extensions/filters/network/redis_proxy/proxy_filter.h b/source/extensions/filters/network/redis_proxy/proxy_filter.h index 23ebd3e0f039b..1694a2a0640e9 100644 --- a/source/extensions/filters/network/redis_proxy/proxy_filter.h +++ b/source/extensions/filters/network/redis_proxy/proxy_filter.h @@ -57,6 +57,7 @@ class ProxyFilterConfig { const std::string stat_prefix_; const std::string redis_drain_close_runtime_key_{"redis.drain_close_enabled"}; ProxyStats stats_; + const std::string downstream_auth_username_; const std::string downstream_auth_password_; private: @@ -100,6 +101,9 @@ class ProxyFilter : public Network::ReadFilter, // RedisProxy::CommandSplitter::SplitCallbacks bool connectionAllowed() override { return parent_.connectionAllowed(); } void onAuth(const std::string& password) override { parent_.onAuth(*this, password); } + void onAuth(const std::string& username, const std::string& password) override { + parent_.onAuth(*this, username, password); + } void onResponse(Common::Redis::RespValuePtr&& value) override { parent_.onResponse(*this, std::move(value)); } @@ -110,6 +114,7 @@ class ProxyFilter : public Network::ReadFilter, }; void onAuth(PendingRequest& request, const std::string& password); + void onAuth(PendingRequest& request, const std::string& username, const std::string& password); void onResponse(PendingRequest& request, Common::Redis::RespValuePtr&& value); Common::Redis::DecoderPtr decoder_; diff --git a/source/extensions/filters/network/rocketmq_proxy/BUILD b/source/extensions/filters/network/rocketmq_proxy/BUILD new file mode 100644 index 0000000000000..f837b9bf83f8d --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/BUILD @@ -0,0 +1,148 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_library( + name = "well_known_names", + hdrs = ["well_known_names.h"], + deps = ["//source/common/singleton:const_singleton"], +) + +envoy_cc_library( + name = "stats_interface", + hdrs = ["stats.h"], + deps = [ + "//include/envoy/stats:stats_interface", + "//include/envoy/stats:stats_macros", + ], +) + +envoy_cc_library( + name = "rocketmq_interface", + hdrs = [ + "topic_route.h", + ], + deps = [ + "//source/common/protobuf:utility_lib", + ], +) + +envoy_cc_library( + name = "rocketmq_lib", + srcs = [ + "topic_route.cc", + ], + deps = [ + ":rocketmq_interface", + ], +) + +envoy_cc_library( + name = "protocol_interface", + hdrs = ["protocol.h"], + deps = [ + ":metadata_lib", + "//source/common/buffer:buffer_lib", + "//source/common/protobuf:utility_lib", + ], +) + +envoy_cc_library( + name = "protocol_lib", + srcs = ["protocol.cc"], + deps = [ + ":protocol_interface", + ":well_known_names", + "//source/common/common:enum_to_int", + ], +) + +envoy_cc_library( + name = "codec_lib", + srcs = [ + "codec.cc", + ], + hdrs = [ + "codec.h", + ], + deps = [ + ":protocol_lib", + "//include/envoy/network:filter_interface", + "//source/common/protobuf:utility_lib", + ], +) + +envoy_cc_library( + name = "conn_manager_lib", + srcs = [ + "active_message.cc", + "conn_manager.cc", + ], + hdrs = [ + "active_message.h", + "conn_manager.h", + ], + deps = [ + ":codec_lib", + ":protocol_lib", + ":rocketmq_lib", + ":stats_interface", + ":well_known_names", + "//include/envoy/buffer:buffer_interface", + "//include/envoy/event:dispatcher_interface", + "//include/envoy/network:connection_interface", + "//include/envoy/tcp:conn_pool_interface", + "//include/envoy/upstream:cluster_manager_interface", + "//source/common/buffer:buffer_lib", + "//source/common/common:assert_lib", + "//source/common/common:empty_string", + "//source/common/common:enum_to_int", + "//source/common/common:linked_object", + "//source/common/protobuf:utility_lib", + "//source/common/stats:timespan_lib", + "//source/common/upstream:load_balancer_lib", + "//source/extensions/filters/network:well_known_names", + "//source/extensions/filters/network/rocketmq_proxy/router:router_interface", + "@envoy_api//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg_cc_proto", + ], +) + +envoy_cc_extension( + name = "config", + srcs = [ + "config.cc", + ], + hdrs = [ + "config.h", + ], + security_posture = "requires_trusted_downstream_and_upstream", + status = "alpha", + deps = [ + ":conn_manager_lib", + "//include/envoy/registry", + "//include/envoy/server:filter_config_interface", + "//source/common/common:logger_lib", + "//source/common/common:minimal_logger_lib", + "//source/common/config:utility_lib", + "//source/extensions/filters/network/common:factory_base_lib", + "//source/extensions/filters/network/rocketmq_proxy/router:route_matcher", + "//source/extensions/filters/network/rocketmq_proxy/router:router_lib", + "@envoy_api//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg_cc_proto", + ], +) + +envoy_cc_library( + name = "metadata_lib", + hdrs = ["metadata.h"], + external_deps = ["abseil_optional"], + deps = [ + "//source/common/http:header_map_lib", + ], +) diff --git a/source/extensions/filters/network/rocketmq_proxy/active_message.cc b/source/extensions/filters/network/rocketmq_proxy/active_message.cc new file mode 100644 index 0000000000000..3f38565da6844 --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/active_message.cc @@ -0,0 +1,333 @@ +#include "extensions/filters/network/rocketmq_proxy/active_message.h" + +#include "envoy/upstream/cluster_manager.h" + +#include "common/common/empty_string.h" +#include "common/common/enum_to_int.h" +#include "common/protobuf/utility.h" + +#include "extensions/filters/network/rocketmq_proxy/conn_manager.h" +#include "extensions/filters/network/rocketmq_proxy/topic_route.h" +#include "extensions/filters/network/rocketmq_proxy/well_known_names.h" +#include "extensions/filters/network/well_known_names.h" + +#include "absl/strings/match.h" + +using Envoy::Tcp::ConnectionPool::ConnectionDataPtr; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +ActiveMessage::ActiveMessage(ConnectionManager& conn_manager, RemotingCommandPtr&& request) + : connection_manager_(conn_manager), request_(std::move(request)) { + metadata_ = std::make_shared(); + MetadataHelper::parseRequest(request_, metadata_); + updateActiveRequestStats(); +} + +ActiveMessage::~ActiveMessage() { updateActiveRequestStats(false); } + +void ActiveMessage::createFilterChain() { router_ = connection_manager_.config().createRouter(); } + +void ActiveMessage::sendRequestToUpstream() { + if (!router_) { + createFilterChain(); + } + router_->sendRequestToUpstream(*this); +} + +Router::RouteConstSharedPtr ActiveMessage::route() { + if (cached_route_) { + return cached_route_.value(); + } + const std::string& topic_name = metadata_->topicName(); + ENVOY_LOG(trace, "fetch route for topic: {}", topic_name); + Router::RouteConstSharedPtr route = connection_manager_.config().routerConfig().route(*metadata_); + cached_route_ = route; + return cached_route_.value(); +} + +void ActiveMessage::onError(absl::string_view error_message) { + connection_manager_.onError(request_, error_message); +} + +const RemotingCommandPtr& ActiveMessage::downstreamRequest() const { return request_; } + +void ActiveMessage::fillAckMessageDirective(Buffer::Instance& buffer, const std::string& group, + const std::string& topic, + const AckMessageDirective& directive) { + int32_t cursor = 0; + const int32_t buffer_length = buffer.length(); + while (cursor < buffer_length) { + auto frame_length = buffer.peekBEInt(cursor); + std::string decoded_topic = Decoder::decodeTopic(buffer, cursor); + ENVOY_LOG(trace, "Process a message: consumer group: {}, topic: {}, messageId: {}", + decoded_topic, group, Decoder::decodeMsgId(buffer, cursor)); + if (!absl::StartsWith(decoded_topic, RetryTopicPrefix) && decoded_topic != topic) { + ENVOY_LOG(warn, + "Decoded topic from pop-response does not equal to request. Decoded topic: " + "{}, request topic: {}, message ID: {}", + decoded_topic, topic, Decoder::decodeMsgId(buffer, cursor)); + } + + /* + * Sometimes, client SDK may used -1 for queue-id in the pop request so that broker servers + * are allowed to lookup all queues it serves. So we need to use the actual queue Id from + * response body. + */ + int32_t queue_id = Decoder::decodeQueueId(buffer, cursor); + int64_t queue_offset = Decoder::decodeQueueOffset(buffer, cursor); + + std::string key = fmt::format("{}-{}-{}-{}", group, decoded_topic, queue_id, queue_offset); + connection_manager_.insertAckDirective(key, directive); + ENVOY_LOG( + debug, + "Insert an ack directive. Consumer group: {}, topic: {}, queue Id: {}, queue offset: {}", + group, topic, queue_id, queue_offset); + cursor += frame_length; + } +} + +void ActiveMessage::sendResponseToDownstream() { + if (request_->code() == enumToSignedInt(RequestCode::PopMessage)) { + // Fill ack message directive + auto pop_header = request_->typedCustomHeader(); + AckMessageDirective directive(pop_header->targetBrokerName(), pop_header->targetBrokerId(), + connection_manager_.timeSource().monotonicTime()); + ENVOY_LOG(trace, "Receive pop response from broker name: {}, broker ID: {}", + pop_header->targetBrokerName(), pop_header->targetBrokerId()); + fillAckMessageDirective(response_->body(), pop_header->consumerGroup(), pop_header->topic(), + directive); + } + + // If acknowledgment of the message is successful, we need to erase the ack directive from + // manager. + if (request_->code() == enumToSignedInt(RequestCode::AckMessage) && + response_->code() == enumToSignedInt(ResponseCode::Success)) { + auto ack_header = request_->typedCustomHeader(); + connection_manager_.eraseAckDirective(ack_header->directiveKey()); + } + + if (response_) { + response_->opaque(request_->opaque()); + connection_manager_.sendResponseToDownstream(response_); + } +} + +void ActiveMessage::fillBrokerData(std::vector& list, const std::string& cluster, + const std::string& broker_name, int64_t broker_id, + const std::string& address) { + bool found = false; + for (auto& entry : list) { + if (entry.cluster() == cluster && entry.brokerName() == broker_name) { + found = true; + if (entry.brokerAddresses().find(broker_id) != entry.brokerAddresses().end()) { + ENVOY_LOG(warn, "Duplicate broker_id found. Broker ID: {}, address: {}", broker_id, + address); + continue; + } else { + entry.brokerAddresses()[broker_id] = address; + } + } + } + + if (!found) { + absl::node_hash_map addresses; + addresses.emplace(broker_id, address); + + list.emplace_back(BrokerData(cluster, broker_name, std::move(addresses))); + } +} + +void ActiveMessage::onQueryTopicRoute() { + std::string cluster_name; + ASSERT(metadata_->hasTopicName()); + const std::string& topic_name = metadata_->topicName(); + Upstream::ThreadLocalCluster* cluster = nullptr; + Router::RouteConstSharedPtr route = this->route(); + if (route) { + cluster_name = route->routeEntry()->clusterName(); + Upstream::ClusterManager& cluster_manager = connection_manager_.config().clusterManager(); + cluster = cluster_manager.get(cluster_name); + } + if (cluster) { + ENVOY_LOG(trace, "Enovy has an operating cluster {} for topic {}", cluster_name, topic_name); + std::vector queue_data_list; + std::vector broker_data_list; + for (auto& host_set : cluster->prioritySet().hostSetsPerPriority()) { + if (host_set->hosts().empty()) { + continue; + } + for (const auto& host : host_set->hosts()) { + std::string broker_address = host->address()->asString(); + auto& filter_metadata = host->metadata()->filter_metadata(); + const auto filter_it = filter_metadata.find(NetworkFilterNames::get().RocketmqProxy); + ASSERT(filter_it != filter_metadata.end()); + const auto& metadata_fields = filter_it->second.fields(); + ASSERT(metadata_fields.contains(RocketmqConstants::get().BrokerName)); + std::string broker_name = + metadata_fields.at(RocketmqConstants::get().BrokerName).string_value(); + ASSERT(metadata_fields.contains(RocketmqConstants::get().ClusterName)); + std::string broker_cluster_name = + metadata_fields.at(RocketmqConstants::get().ClusterName).string_value(); + // Proto3 will ignore the field if the value is zero. + int32_t read_queue_num = 0; + if (metadata_fields.contains(RocketmqConstants::get().ReadQueueNum)) { + read_queue_num = static_cast( + metadata_fields.at(RocketmqConstants::get().WriteQueueNum).number_value()); + } + int32_t write_queue_num = 0; + if (metadata_fields.contains(RocketmqConstants::get().WriteQueueNum)) { + write_queue_num = static_cast( + metadata_fields.at(RocketmqConstants::get().WriteQueueNum).number_value()); + } + int32_t perm = 0; + if (metadata_fields.contains(RocketmqConstants::get().Perm)) { + perm = static_cast( + metadata_fields.at(RocketmqConstants::get().Perm).number_value()); + } + int32_t broker_id = 0; + if (metadata_fields.contains(RocketmqConstants::get().BrokerId)) { + broker_id = static_cast( + metadata_fields.at(RocketmqConstants::get().BrokerId).number_value()); + } + queue_data_list.emplace_back(QueueData(broker_name, read_queue_num, write_queue_num, perm)); + if (connection_manager_.config().developMode()) { + ENVOY_LOG(trace, "Develop mode, return proxy address to replace all broker addresses so " + "that L4 network rewrite is not required"); + fillBrokerData(broker_data_list, broker_cluster_name, broker_name, broker_id, + connection_manager_.config().proxyAddress()); + } else { + fillBrokerData(broker_data_list, broker_cluster_name, broker_name, broker_id, + broker_address); + } + } + } + ENVOY_LOG(trace, "Prepare TopicRouteData for {} OK", topic_name); + TopicRouteData topic_route_data(std::move(queue_data_list), std::move(broker_data_list)); + ProtobufWkt::Struct data_struct; + topic_route_data.encode(data_struct); + std::string json = MessageUtil::getJsonStringFromMessage(data_struct); + ENVOY_LOG(trace, "Serialize TopicRouteData for {} OK:\n{}", cluster_name, json); + RemotingCommandPtr response = std::make_unique( + static_cast(ResponseCode::Success), downstreamRequest()->version(), + downstreamRequest()->opaque()); + response->markAsResponse(); + response->body().add(json); + connection_manager_.sendResponseToDownstream(response); + } else { + onError("Cluster is not available"); + ENVOY_LOG(warn, "Cluster for topic {} is not available", topic_name); + } + onReset(); +} + +void ActiveMessage::onReset() { connection_manager_.deferredDelete(*this); } + +bool ActiveMessage::onUpstreamData(Envoy::Buffer::Instance& data, bool end_stream, + ConnectionDataPtr& conn_data) { + bool underflow = false; + bool has_error = false; + response_ = Decoder::decode(data, underflow, has_error, downstreamRequest()->code()); + if (underflow && !end_stream) { + ENVOY_LOG(trace, "Wait for more data from upstream"); + return false; + } + + if (enumToSignedInt(RequestCode::PopMessage) == request_->code() && router_ != nullptr) { + recordPopRouteInfo(router_->upstreamHost()); + } + + connection_manager_.stats().response_.inc(); + if (!has_error) { + connection_manager_.stats().response_decoding_success_.inc(); + // Relay response to downstream + sendResponseToDownstream(); + } else { + ENVOY_LOG(error, "Failed to decode response for opaque: {}, close immediately.", + downstreamRequest()->opaque()); + onError("Failed to decode response from upstream"); + connection_manager_.stats().response_decoding_error_.inc(); + conn_data->connection().close(Network::ConnectionCloseType::NoFlush); + } + + if (end_stream) { + conn_data->connection().close(Network::ConnectionCloseType::NoFlush); + } + return true; +} + +void ActiveMessage::recordPopRouteInfo(Upstream::HostDescriptionConstSharedPtr host_description) { + if (host_description) { + auto host_metadata = host_description->metadata(); + auto filter_metadata = host_metadata->filter_metadata(); + const auto filter_it = filter_metadata.find(NetworkFilterNames::get().RocketmqProxy); + ASSERT(filter_it != filter_metadata.end()); + const auto& metadata_fields = filter_it->second.fields(); + ASSERT(metadata_fields.contains(RocketmqConstants::get().BrokerName)); + std::string broker_name = + metadata_fields.at(RocketmqConstants::get().BrokerName).string_value(); + // Proto3 will ignore the field if the value is zero. + int32_t broker_id = 0; + if (metadata_fields.contains(RocketmqConstants::get().BrokerId)) { + broker_id = static_cast( + metadata_fields.at(RocketmqConstants::get().BrokerId).number_value()); + } + // Tag the request with upstream host metadata: broker-name, broker-id + auto custom_header = request_->typedCustomHeader(); + custom_header->targetBrokerName(broker_name); + custom_header->targetBrokerId(broker_id); + } +} + +void ActiveMessage::updateActiveRequestStats(bool is_inc) { + if (is_inc) { + connection_manager_.stats().request_active_.inc(); + } else { + connection_manager_.stats().request_active_.dec(); + } + auto code = static_cast(request_->code()); + switch (code) { + case RequestCode::PopMessage: { + if (is_inc) { + connection_manager_.stats().pop_message_active_.inc(); + } else { + connection_manager_.stats().pop_message_active_.dec(); + } + break; + } + case RequestCode::SendMessage: { + if (is_inc) { + connection_manager_.stats().send_message_v1_active_.inc(); + } else { + connection_manager_.stats().send_message_v1_active_.dec(); + } + break; + } + case RequestCode::SendMessageV2: { + if (is_inc) { + connection_manager_.stats().send_message_v2_active_.inc(); + } else { + connection_manager_.stats().send_message_v2_active_.dec(); + } + break; + } + case RequestCode::GetRouteInfoByTopic: { + if (is_inc) { + connection_manager_.stats().get_topic_route_active_.inc(); + } else { + connection_manager_.stats().get_topic_route_active_.dec(); + } + break; + } + default: + break; + } +} + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/rocketmq_proxy/active_message.h b/source/extensions/filters/network/rocketmq_proxy/active_message.h new file mode 100644 index 0000000000000..566907d40dd6b --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/active_message.h @@ -0,0 +1,105 @@ +#pragma once + +#include "envoy/event/deferred_deletable.h" +#include "envoy/network/connection.h" +#include "envoy/network/filter.h" +#include "envoy/stats/timespan.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/linked_object.h" +#include "common/common/logger.h" + +#include "extensions/filters/network/rocketmq_proxy/codec.h" +#include "extensions/filters/network/rocketmq_proxy/protocol.h" +#include "extensions/filters/network/rocketmq_proxy/router/router.h" +#include "extensions/filters/network/rocketmq_proxy/topic_route.h" + +#include "absl/types/optional.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +class ConnectionManager; + +/** + * ActiveMessage represents an in-flight request from downstream that has not yet received response + * from upstream. + */ +class ActiveMessage : public LinkedObject, + public Event::DeferredDeletable, + Logger::Loggable { +public: + ActiveMessage(ConnectionManager& conn_manager, RemotingCommandPtr&& request); + + ~ActiveMessage() override; + + /** + * Set up filter-chain according to configuration from bootstrap config file and dynamic + * configuration items from Pilot. + */ + void createFilterChain(); + + /** + * Relay requests from downstream to upstream cluster. If the target cluster is absent at the + * moment, it triggers cluster discovery service request and mark awaitCluster as true. + * ClusterUpdateCallback will process requests marked await-cluster once the target cluster is + * in place. + */ + void sendRequestToUpstream(); + + const RemotingCommandPtr& downstreamRequest() const; + + /** + * Parse pop response and insert ack route directive such that ack requests will be forwarded to + * the same broker host from which messages are popped. + * @param buffer Pop response body. + * @param group Consumer group name. + * @param topic Topic from which messages are popped + * @param directive ack route directive + */ + virtual void fillAckMessageDirective(Buffer::Instance& buffer, const std::string& group, + const std::string& topic, + const AckMessageDirective& directive); + + virtual void sendResponseToDownstream(); + + void onQueryTopicRoute(); + + virtual void onError(absl::string_view error_message); + + ConnectionManager& connectionManager() { return connection_manager_; } + + virtual void onReset(); + + bool onUpstreamData(Buffer::Instance& data, bool end_stream, + Tcp::ConnectionPool::ConnectionDataPtr& conn_data); + + virtual MessageMetadataSharedPtr metadata() const { return metadata_; } + + virtual Router::RouteConstSharedPtr route(); + + void recordPopRouteInfo(Upstream::HostDescriptionConstSharedPtr host_description); + + static void fillBrokerData(std::vector& list, const std::string& cluster, + const std::string& broker_name, int64_t broker_id, + const std::string& address); + +private: + ConnectionManager& connection_manager_; + RemotingCommandPtr request_; + RemotingCommandPtr response_; + MessageMetadataSharedPtr metadata_; + Router::RouterPtr router_; + absl::optional cached_route_; + + void updateActiveRequestStats(bool is_inc = true); +}; + +using ActiveMessagePtr = std::unique_ptr; + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/codec.cc b/source/extensions/filters/network/rocketmq_proxy/codec.cc new file mode 100644 index 0000000000000..dd00abdfa330a --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/codec.cc @@ -0,0 +1,414 @@ +#include "extensions/filters/network/rocketmq_proxy/codec.h" + +#include + +#include "common/common/assert.h" +#include "common/common/empty_string.h" +#include "common/common/enum_to_int.h" +#include "common/common/logger.h" + +#include "extensions/filters/network/rocketmq_proxy/protocol.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +RemotingCommandPtr Decoder::decode(Buffer::Instance& buffer, bool& underflow, bool& has_error, + int request_code) { + // Verify there is at least some bits, which stores frame length and header length + if (buffer.length() <= MIN_FRAME_SIZE) { + underflow = true; + return nullptr; + } + + auto frame_length = buffer.peekBEInt(); + + if (frame_length > MAX_FRAME_SIZE) { + has_error = true; + return nullptr; + } + + if (buffer.length() < frame_length) { + underflow = true; + return nullptr; + } + buffer.drain(FRAME_LENGTH_FIELD_SIZE); + + auto mark = buffer.peekBEInt(); + uint32_t header_length = adjustHeaderLength(mark); + if (frame_length < header_length + FRAME_HEADER_LENGTH_FIELD_SIZE) { + // There is an error in frame_length. + // Make sure body_length is non-negative. + has_error = true; + return nullptr; + } + buffer.drain(FRAME_HEADER_LENGTH_FIELD_SIZE); + + uint32_t body_length = frame_length - FRAME_HEADER_LENGTH_FIELD_SIZE - header_length; + + ENVOY_LOG(debug, + "Request/Response Frame Meta: Frame Length = {}, Header Length = {}, Body Length = {}", + frame_length, header_length, body_length); + + Buffer::OwnedImpl header_buffer; + header_buffer.move(buffer, header_length); + std::string header_json = header_buffer.toString(); + ENVOY_LOG(trace, "Request/Response Header JSON: {}", header_json); + + int32_t code, version, opaque; + uint32_t flag; + if (isJsonHeader(mark)) { + ProtobufWkt::Struct header_struct; + + // Parse header JSON text + try { + MessageUtil::loadFromJson(header_json, header_struct); + } catch (std::exception& e) { + has_error = true; + ENVOY_LOG(error, "Failed to parse header JSON: {}. Error message: {}", header_json, e.what()); + return nullptr; + } + + const auto& filed_value_pair = header_struct.fields(); + if (!filed_value_pair.contains("code")) { + ENVOY_LOG(error, "Malformed frame: 'code' field is missing. Header JSON: {}", header_json); + has_error = true; + return nullptr; + } + code = filed_value_pair.at("code").number_value(); + if (!filed_value_pair.contains("version")) { + ENVOY_LOG(error, "Malformed frame: 'version' field is missing. Header JSON: {}", header_json); + has_error = true; + return nullptr; + } + version = filed_value_pair.at("version").number_value(); + if (!filed_value_pair.contains("opaque")) { + ENVOY_LOG(error, "Malformed frame: 'opaque' field is missing. Header JSON: {}", header_json); + has_error = true; + return nullptr; + } + opaque = filed_value_pair.at("opaque").number_value(); + if (!filed_value_pair.contains("flag")) { + ENVOY_LOG(error, "Malformed frame: 'flag' field is missing. Header JSON: {}", header_json); + has_error = true; + return nullptr; + } + flag = filed_value_pair.at("flag").number_value(); + RemotingCommandPtr cmd = std::make_unique(code, version, opaque); + cmd->flag(flag); + if (filed_value_pair.contains("language")) { + cmd->language(filed_value_pair.at("language").string_value()); + } + + if (filed_value_pair.contains("serializeTypeCurrentRPC")) { + cmd->serializeTypeCurrentRPC(filed_value_pair.at("serializeTypeCurrentRPC").string_value()); + } + + cmd->body_.move(buffer, body_length); + + if (RemotingCommand::isResponse(flag)) { + if (filed_value_pair.contains("remark")) { + cmd->remark(filed_value_pair.at("remark").string_value()); + } + cmd->custom_header_ = decodeResponseExtHeader(static_cast(code), header_struct, + static_cast(request_code)); + } else { + cmd->custom_header_ = decodeExtHeader(static_cast(code), header_struct); + } + return cmd; + } else { + ENVOY_LOG(warn, "Unsupported header serialization type"); + has_error = true; + return nullptr; + } +} + +bool Decoder::isComplete(Buffer::Instance& buffer, int32_t cursor) { + if (buffer.length() - cursor < 4) { + // buffer is definitely incomplete. + return false; + } + + auto total_size = buffer.peekBEInt(cursor); + return buffer.length() - cursor >= static_cast(total_size); +} + +std::string Decoder::decodeTopic(Buffer::Instance& buffer, int32_t cursor) { + if (!isComplete(buffer, cursor)) { + return EMPTY_STRING; + } + + auto magic_code = buffer.peekBEInt(cursor + 4); + + MessageVersion message_version = V1; + if (enumToSignedInt(MessageVersion::V1) == magic_code) { + message_version = V1; + } else if (enumToSignedInt(MessageVersion::V2) == magic_code) { + message_version = V2; + } + + int32_t offset = 4 /* total size */ + + 4 /* magic code */ + + 4 /* body CRC */ + + 4 /* queue Id */ + + 4 /* flag */ + + 8 /* queue offset */ + + 8 /* physical offset */ + + 4 /* sys flag */ + + 8 /* born timestamp */ + + 4 /* born host */ + + 4 /* born host port */ + + 8 /* store timestamp */ + + 4 /* store host */ + + 4 /* store host port */ + + 4 /* re-consume times */ + + 8 /* transaction offset */ + ; + auto body_size = buffer.peekBEInt(cursor + offset); + offset += 4 /* body size */ + + body_size /* body */; + int32_t topic_length; + std::string topic; + switch (message_version) { + case V1: { + topic_length = buffer.peekBEInt(cursor + offset); + topic.reserve(topic_length); + topic.resize(topic_length); + buffer.copyOut(cursor + offset + sizeof(int8_t), topic_length, &topic[0]); + break; + } + case V2: { + topic_length = buffer.peekBEInt(cursor + offset); + topic.reserve(topic_length); + topic.resize(topic_length); + buffer.copyOut(cursor + offset + sizeof(int16_t), topic_length, &topic[0]); + break; + } + } + return topic; +} + +int32_t Decoder::decodeQueueId(Buffer::Instance& buffer, int32_t cursor) { + if (!isComplete(buffer, cursor)) { + return -1; + } + + int32_t offset = 4 /* total size */ + + 4 /* magic code */ + + 4 /* body CRC */; + + return buffer.peekBEInt(cursor + offset); +} + +int64_t Decoder::decodeQueueOffset(Buffer::Instance& buffer, int32_t cursor) { + if (!isComplete(buffer, cursor)) { + return -1; + } + + int32_t offset = 4 /* total size */ + + 4 /* magic code */ + + 4 /* body CRC */ + + 4 /* queue Id */ + + 4 /* flag */; + return buffer.peekBEInt(cursor + offset); +} + +std::string Decoder::decodeMsgId(Buffer::Instance& buffer, int32_t cursor) { + if (!isComplete(buffer, cursor)) { + return EMPTY_STRING; + } + + int32_t offset = 4 /* total size */ + + 4 /* magic code */ + + 4 /* body CRC */ + + 4 /* queue Id */ + + 4 /* flag */ + + 8 /* queue offset */; + auto physical_offset = buffer.peekBEInt(cursor + offset); + offset += 8 /* physical offset */ + + 4 /* sys flag */ + + 8 /* born timestamp */ + + 4 /* born host */ + + 4 /* born host port */ + + 8 /* store timestamp */ + ; + + Buffer::OwnedImpl msg_id_buffer; + msg_id_buffer.writeBEInt(buffer.peekBEInt(cursor + offset)); + msg_id_buffer.writeBEInt(physical_offset); + std::string msg_id; + msg_id.reserve(32); + for (uint64_t i = 0; i < msg_id_buffer.length(); i++) { + auto c = msg_id_buffer.peekBEInt(); + msg_id.append(1, static_cast(c >> 4U)); + msg_id.append(1, static_cast(c & 0xFU)); + } + return msg_id; +} + +CommandCustomHeaderPtr Decoder::decodeExtHeader(RequestCode code, + ProtobufWkt::Struct& header_struct) { + const auto& filed_value_pair = header_struct.fields(); + switch (code) { + case RequestCode::SendMessage: { + ASSERT(filed_value_pair.contains("extFields")); + const auto& ext_fields = filed_value_pair.at("extFields"); + auto send_msg_ext_header = new SendMessageRequestHeader(); + send_msg_ext_header->version_ = SendMessageRequestVersion::V1; + send_msg_ext_header->decode(ext_fields); + return send_msg_ext_header; + } + case RequestCode::SendMessageV2: { + ASSERT(filed_value_pair.contains("extFields")); + const auto& ext_fields = filed_value_pair.at("extFields"); + auto send_msg_ext_header = new SendMessageRequestHeader(); + send_msg_ext_header->version_ = SendMessageRequestVersion::V2; + send_msg_ext_header->decode(ext_fields); + return send_msg_ext_header; + } + + case RequestCode::GetRouteInfoByTopic: { + ASSERT(filed_value_pair.contains("extFields")); + const auto& ext_fields = filed_value_pair.at("extFields"); + auto get_route_info_request_header = new GetRouteInfoRequestHeader(); + get_route_info_request_header->decode(ext_fields); + return get_route_info_request_header; + } + + case RequestCode::UnregisterClient: { + ASSERT(filed_value_pair.contains("extFields")); + const auto& ext_fields = filed_value_pair.at("extFields"); + auto unregister_client_request_header = new UnregisterClientRequestHeader(); + unregister_client_request_header->decode(ext_fields); + return unregister_client_request_header; + } + + case RequestCode::GetConsumerListByGroup: { + ASSERT(filed_value_pair.contains("extFields")); + const auto& ext_fields = filed_value_pair.at("extFields"); + auto get_consumer_list_by_group_request_header = new GetConsumerListByGroupRequestHeader(); + get_consumer_list_by_group_request_header->decode(ext_fields); + return get_consumer_list_by_group_request_header; + } + + case RequestCode::PopMessage: { + ASSERT(filed_value_pair.contains("extFields")); + const auto& ext_fields = filed_value_pair.at("extFields"); + auto pop_message_request_header = new PopMessageRequestHeader(); + pop_message_request_header->decode(ext_fields); + return pop_message_request_header; + } + + case RequestCode::AckMessage: { + ASSERT(filed_value_pair.contains("extFields")); + const auto& ext_fields = filed_value_pair.at("extFields"); + auto ack_message_request_header = new AckMessageRequestHeader(); + ack_message_request_header->decode(ext_fields); + return ack_message_request_header; + } + + case RequestCode::HeartBeat: { + // Heartbeat does not have an extended header. + return nullptr; + } + + default: + ENVOY_LOG(warn, "Unsupported request code: {}", static_cast(code)); + return nullptr; + } +} + +CommandCustomHeaderPtr Decoder::decodeResponseExtHeader(ResponseCode response_code, + ProtobufWkt::Struct& header_struct, + RequestCode request_code) { + // No need to decode a failed response. + if (response_code != ResponseCode::Success && + response_code != ResponseCode::ReplicaNotAvailable) { + return nullptr; + } + const auto& filed_value_pair = header_struct.fields(); + switch (request_code) { + case RequestCode::SendMessage: + case RequestCode::SendMessageV2: { + auto send_message_response_header = new SendMessageResponseHeader(); + ASSERT(filed_value_pair.contains("extFields")); + auto& ext_fields = filed_value_pair.at("extFields"); + send_message_response_header->decode(ext_fields); + return send_message_response_header; + } + + case RequestCode::PopMessage: { + auto pop_message_response_header = new PopMessageResponseHeader(); + ASSERT(filed_value_pair.contains("extFields")); + const auto& ext_fields = filed_value_pair.at("extFields"); + pop_message_response_header->decode(ext_fields); + return pop_message_response_header; + } + default: + return nullptr; + } +} + +void Encoder::encode(const RemotingCommandPtr& command, Buffer::Instance& data) { + + ProtobufWkt::Struct command_struct; + auto* fields = command_struct.mutable_fields(); + + ProtobufWkt::Value code_v; + code_v.set_number_value(command->code_); + (*fields)["code"] = code_v; + + ProtobufWkt::Value language_v; + language_v.set_string_value(command->language()); + (*fields)["language"] = language_v; + + ProtobufWkt::Value version_v; + version_v.set_number_value(command->version_); + (*fields)["version"] = version_v; + + ProtobufWkt::Value opaque_v; + opaque_v.set_number_value(command->opaque_); + (*fields)["opaque"] = opaque_v; + + ProtobufWkt::Value flag_v; + flag_v.set_number_value(command->flag_); + (*fields)["flag"] = flag_v; + + if (!command->remark_.empty()) { + ProtobufWkt::Value remark_v; + remark_v.set_string_value(command->remark_); + (*fields)["remark"] = remark_v; + } + + ProtobufWkt::Value serialization_type_v; + serialization_type_v.set_string_value(command->serializeTypeCurrentRPC()); + (*fields)["serializeTypeCurrentRPC"] = serialization_type_v; + + if (command->custom_header_) { + ProtobufWkt::Value ext_fields_v; + command->custom_header_->encode(ext_fields_v); + (*fields)["extFields"] = ext_fields_v; + } + + std::string json = MessageUtil::getJsonStringFromMessage(command_struct); + + int32_t frame_length = 4; + int32_t header_length = json.size(); + frame_length += header_length; + frame_length += command->bodyLength(); + + data.writeBEInt(frame_length); + data.writeBEInt(header_length); + data.add(json); + + // add body + if (command->bodyLength() > 0) { + data.add(command->body()); + } +} + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/codec.h b/source/extensions/filters/network/rocketmq_proxy/codec.h new file mode 100644 index 0000000000000..e22502f48b34d --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/codec.h @@ -0,0 +1,81 @@ +#pragma once + +#include +#include +#include + +#include "envoy/common/platform.h" +#include "envoy/network/filter.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/logger.h" +#include "common/protobuf/utility.h" + +#include "extensions/filters/network/rocketmq_proxy/protocol.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +enum MessageVersion : uint32_t { + V1 = (0xAABBCCDDU ^ 1880681586U) + 8U, + V2 = (0xAABBCCDDU ^ 1880681586U) + 4U +}; + +class Decoder : Logger::Loggable { +public: + Decoder() = default; + + ~Decoder() = default; + + /** + * @param buffer Data buffer to decode. + * @param underflow Indicate if buffer contains enough data in terms of protocol frame. + * @param has_error Indicate if the decoding is successful or not. + * @param request_code Corresponding request code if applies. + * @return Decoded remote command. + */ + static RemotingCommandPtr decode(Buffer::Instance& buffer, bool& underflow, bool& has_error, + int request_code = 0); + + static std::string decodeTopic(Buffer::Instance& buffer, int32_t cursor); + + static int32_t decodeQueueId(Buffer::Instance& buffer, int32_t cursor); + + static int64_t decodeQueueOffset(Buffer::Instance& buffer, int32_t cursor); + + static std::string decodeMsgId(Buffer::Instance& buffer, int32_t cursor); + + static constexpr uint32_t MIN_FRAME_SIZE = 8; + + static constexpr uint32_t MAX_FRAME_SIZE = 4 * 1024 * 1024; + + static constexpr uint32_t FRAME_LENGTH_FIELD_SIZE = 4; + + static constexpr uint32_t FRAME_HEADER_LENGTH_FIELD_SIZE = 4; + +private: + static uint32_t adjustHeaderLength(uint32_t len) { return len & 0xFFFFFFu; } + + static bool isJsonHeader(uint32_t len) { return (len >> 24u) == 0; } + + static CommandCustomHeaderPtr decodeExtHeader(RequestCode code, + ProtobufWkt::Struct& header_struct); + + static CommandCustomHeaderPtr decodeResponseExtHeader(ResponseCode response_code, + ProtobufWkt::Struct& header_struct, + RequestCode request_code); + + static bool isComplete(Buffer::Instance& buffer, int32_t cursor); +}; + +class Encoder { +public: + static void encode(const RemotingCommandPtr& command, Buffer::Instance& buffer); +}; + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/config.cc b/source/extensions/filters/network/rocketmq_proxy/config.cc new file mode 100644 index 0000000000000..02f8da69c41f5 --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/config.cc @@ -0,0 +1,65 @@ +#include "extensions/filters/network/rocketmq_proxy/config.h" + +#include + +#include "envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.h" +#include "envoy/registry/registry.h" +#include "envoy/server/filter_config.h" + +#include "extensions/filters/network/rocketmq_proxy/conn_manager.h" +#include "extensions/filters/network/rocketmq_proxy/stats.h" +#include "extensions/filters/network/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +namespace rocketmq_config = envoy::extensions::filters::network::rocketmq_proxy::v3; + +Network::FilterFactoryCb RocketmqProxyFilterConfigFactory::createFilterFactoryFromProtoTyped( + const rocketmq_config::RocketmqProxy& proto_config, + Server::Configuration::FactoryContext& context) { + std::shared_ptr filter_config = std::make_shared(proto_config, context); + return [filter_config, &context](Network::FilterManager& filter_manager) -> void { + filter_manager.addReadFilter( + std::make_shared(*filter_config, context.dispatcher().timeSource())); + }; +} + +REGISTER_FACTORY(RocketmqProxyFilterConfigFactory, + Server::Configuration::NamedNetworkFilterConfigFactory); + +ConfigImpl::ConfigImpl(const RocketmqProxyConfig& config, + Server::Configuration::FactoryContext& context) + : context_(context), stats_prefix_(fmt::format("rocketmq.{}.", config.stat_prefix())), + stats_(RocketmqFilterStats::generateStats(stats_prefix_, context_.scope())), + route_matcher_(new Router::RouteMatcher(config.route_config())), + develop_mode_(config.develop_mode()), + transient_object_life_span_(PROTOBUF_GET_MS_OR_DEFAULT(config, transient_object_life_span, + TransientObjectLifeSpan)) {} + +std::string ConfigImpl::proxyAddress() { + const LocalInfo::LocalInfo& localInfo = context_.getServerFactoryContext().localInfo(); + Network::Address::InstanceConstSharedPtr address = localInfo.address(); + if (address->type() == Network::Address::Type::Ip) { + const std::string& ip = address->ip()->addressAsString(); + std::string proxyAddr{ip}; + if (address->ip()->port()) { + return proxyAddr.append(":").append(std::to_string(address->ip()->port())); + } else { + ENVOY_LOG(trace, "Local info does not have port specified, defaulting to 10000"); + return proxyAddr.append(":10000"); + } + } + return address->asString(); +} + +Router::RouteConstSharedPtr ConfigImpl::route(const MessageMetadata& metadata) const { + return route_matcher_->route(metadata); +} + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/config.h b/source/extensions/filters/network/rocketmq_proxy/config.h new file mode 100644 index 0000000000000..df5cbe7c97113 --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/config.h @@ -0,0 +1,72 @@ +#pragma once + +#include +#include + +#include "envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.h" +#include "envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.validate.h" + +#include "extensions/filters/network/common/factory_base.h" +#include "extensions/filters/network/rocketmq_proxy/conn_manager.h" +#include "extensions/filters/network/rocketmq_proxy/router/route_matcher.h" +#include "extensions/filters/network/rocketmq_proxy/router/router_impl.h" +#include "extensions/filters/network/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +class RocketmqProxyFilterConfigFactory + : public Common::FactoryBase< + envoy::extensions::filters::network::rocketmq_proxy::v3::RocketmqProxy> { +public: + RocketmqProxyFilterConfigFactory() : FactoryBase(NetworkFilterNames::get().RocketmqProxy, true) {} + +private: + Network::FilterFactoryCb createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::network::rocketmq_proxy::v3::RocketmqProxy& proto_config, + Server::Configuration::FactoryContext& context) override; +}; + +class ConfigImpl : public Config, public Router::Config, Logger::Loggable { +public: + using RocketmqProxyConfig = + envoy::extensions::filters::network::rocketmq_proxy::v3::RocketmqProxy; + + ConfigImpl(const RocketmqProxyConfig& config, Server::Configuration::FactoryContext& context); + ~ConfigImpl() override = default; + + // Config + RocketmqFilterStats& stats() override { return stats_; } + Upstream::ClusterManager& clusterManager() override { return context_.clusterManager(); } + Router::RouterPtr createRouter() override { + return std::make_unique(context_.clusterManager()); + } + bool developMode() const override { return develop_mode_; } + + std::chrono::milliseconds transientObjectLifeSpan() const override { + return transient_object_life_span_; + } + + std::string proxyAddress() override; + Router::Config& routerConfig() override { return *this; } + + // Router::Config + Router::RouteConstSharedPtr route(const MessageMetadata& metadata) const override; + +private: + Server::Configuration::FactoryContext& context_; + const std::string stats_prefix_; + RocketmqFilterStats stats_; + Router::RouteMatcherPtr route_matcher_; + const bool develop_mode_; + std::chrono::milliseconds transient_object_life_span_; + + static constexpr uint64_t TransientObjectLifeSpan = 30 * 1000; +}; + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/conn_manager.cc b/source/extensions/filters/network/rocketmq_proxy/conn_manager.cc new file mode 100644 index 0000000000000..0748f80476ff9 --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/conn_manager.cc @@ -0,0 +1,376 @@ +#include "extensions/filters/network/rocketmq_proxy/conn_manager.h" + +#include "envoy/buffer/buffer.h" +#include "envoy/network/connection.h" + +#include "common/common/enum_to_int.h" +#include "common/protobuf/utility.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +ConsumerGroupMember::ConsumerGroupMember(absl::string_view client_id, + ConnectionManager& conn_manager) + : client_id_(client_id.data(), client_id.size()), connection_manager_(&conn_manager), + last_(connection_manager_->time_source_.monotonicTime()) {} + +void ConsumerGroupMember::refresh() { last_ = connection_manager_->time_source_.monotonicTime(); } + +bool ConsumerGroupMember::expired() const { + auto duration = connection_manager_->time_source_.monotonicTime() - last_; + return std::chrono::duration_cast(duration).count() > + connection_manager_->config().transientObjectLifeSpan().count(); +} + +ConnectionManager::ConnectionManager(Config& config, TimeSource& time_source) + : config_(config), time_source_(time_source), stats_(config.stats()) {} + +Envoy::Network::FilterStatus ConnectionManager::onData(Envoy::Buffer::Instance& data, + bool end_stream) { + ENVOY_CONN_LOG(trace, "rocketmq_proxy: received {} bytes.", read_callbacks_->connection(), + data.length()); + request_buffer_.move(data); + dispatch(); + if (end_stream) { + resetAllActiveMessages("Connection to downstream is closed"); + read_callbacks_->connection().close(Envoy::Network::ConnectionCloseType::FlushWrite); + } + return Network::FilterStatus::StopIteration; +} + +void ConnectionManager::dispatch() { + if (request_buffer_.length() < Decoder::MIN_FRAME_SIZE) { + ENVOY_CONN_LOG(warn, "rocketmq_proxy: request buffer length is less than min frame size: {}", + read_callbacks_->connection(), request_buffer_.length()); + return; + } + + bool underflow = false; + bool has_decode_error = false; + while (!underflow) { + RemotingCommandPtr request = Decoder::decode(request_buffer_, underflow, has_decode_error); + if (underflow) { + // Wait for more data + break; + } + stats_.request_.inc(); + + // Decode error, we need to close connection immediately. + if (has_decode_error) { + ENVOY_CONN_LOG(error, "Failed to decode request, close connection immediately", + read_callbacks_->connection()); + stats_.request_decoding_error_.inc(); + resetAllActiveMessages("Failed to decode data from downstream. Close connection immediately"); + read_callbacks_->connection().close(Envoy::Network::ConnectionCloseType::FlushWrite); + return; + } else { + stats_.request_decoding_success_.inc(); + } + + switch (static_cast(request->code())) { + case RequestCode::GetRouteInfoByTopic: { + ENVOY_CONN_LOG(trace, "GetTopicRoute request, code: {}, opaque: {}", + read_callbacks_->connection(), request->code(), request->opaque()); + onGetTopicRoute(std::move(request)); + } break; + + case RequestCode::UnregisterClient: { + ENVOY_CONN_LOG(trace, "process unregister client request, code: {}, opaque: {}", + read_callbacks_->connection(), request->code(), request->opaque()); + onUnregisterClient(std::move(request)); + } break; + + case RequestCode::SendMessage: { + ENVOY_CONN_LOG(trace, "SendMessage request, code: {}, opaque: {}", + read_callbacks_->connection(), request->code(), request->opaque()); + onSendMessage(std::move(request)); + stats_.send_message_v1_.inc(); + } break; + + case RequestCode::SendMessageV2: { + ENVOY_CONN_LOG(trace, "SendMessage request, code: {}, opaque: {}", + read_callbacks_->connection(), request->code(), request->opaque()); + onSendMessage(std::move(request)); + stats_.send_message_v2_.inc(); + } break; + + case RequestCode::GetConsumerListByGroup: { + ENVOY_CONN_LOG(trace, "GetConsumerListByGroup request, code: {}, opaque: {}", + read_callbacks_->connection(), request->code(), request->opaque()); + onGetConsumerListByGroup(std::move(request)); + } break; + + case RequestCode::PopMessage: { + ENVOY_CONN_LOG(trace, "PopMessage request, code: {}, opaque: {}", + read_callbacks_->connection(), request->code(), request->opaque()); + onPopMessage(std::move(request)); + stats_.pop_message_.inc(); + } break; + + case RequestCode::AckMessage: { + ENVOY_CONN_LOG(trace, "AckMessage request, code: {}, opaque: {}", + read_callbacks_->connection(), request->code(), request->opaque()); + onAckMessage(std::move(request)); + stats_.ack_message_.inc(); + } break; + + case RequestCode::HeartBeat: { + ENVOY_CONN_LOG(trace, "Heartbeat request, opaque: {}", read_callbacks_->connection(), + request->opaque()); + onHeartbeat(std::move(request)); + } break; + + default: { + ENVOY_CONN_LOG(warn, "Request code {} not supported yet", read_callbacks_->connection(), + request->code()); + std::string error_msg("Request not supported"); + onError(request, error_msg); + } break; + } + } +} + +void ConnectionManager::purgeDirectiveTable() { + auto current = time_source_.monotonicTime(); + for (auto it = ack_directive_table_.begin(); it != ack_directive_table_.end();) { + auto duration = current - it->second.creation_time_; + if (std::chrono::duration_cast(duration).count() > + config_.transientObjectLifeSpan().count()) { + ack_directive_table_.erase(it++); + } else { + it++; + } + } +} + +void ConnectionManager::sendResponseToDownstream(RemotingCommandPtr& response) { + Buffer::OwnedImpl buffer; + Encoder::encode(response, buffer); + if (read_callbacks_->connection().state() == Network::Connection::State::Open) { + ENVOY_CONN_LOG(trace, "Write response to downstream. Opaque: {}", read_callbacks_->connection(), + response->opaque()); + read_callbacks_->connection().write(buffer, false); + } else { + ENVOY_CONN_LOG(error, "Send response to downstream failed as connection is no longer open", + read_callbacks_->connection()); + } +} + +void ConnectionManager::onGetTopicRoute(RemotingCommandPtr request) { + createActiveMessage(request).onQueryTopicRoute(); + stats_.get_topic_route_.inc(); +} + +void ConnectionManager::onHeartbeat(RemotingCommandPtr request) { + const std::string& body = request->body().toString(); + + purgeDirectiveTable(); + + ProtobufWkt::Struct body_struct; + try { + MessageUtil::loadFromJson(body, body_struct); + } catch (std::exception& e) { + ENVOY_LOG(warn, "Failed to decode heartbeat body. Error message: {}", e.what()); + return; + } + + HeartbeatData heartbeatData; + if (!heartbeatData.decode(body_struct)) { + ENVOY_LOG(warn, "Failed to decode heartbeat data"); + return; + } + + for (const auto& group : heartbeatData.consumerGroups()) { + addOrUpdateGroupMember(group, heartbeatData.clientId()); + } + + RemotingCommandPtr response = std::make_unique(); + response->code(enumToSignedInt(ResponseCode::Success)); + response->opaque(request->opaque()); + response->remark("Heartbeat OK"); + response->markAsResponse(); + sendResponseToDownstream(response); + stats_.heartbeat_.inc(); +} + +void ConnectionManager::addOrUpdateGroupMember(absl::string_view group, + absl::string_view client_id) { + ENVOY_LOG(trace, "#addOrUpdateGroupMember. Group: {}, client ID: {}", group, client_id); + auto search = group_members_.find(std::string(group.data(), group.length())); + if (search == group_members_.end()) { + std::vector members; + members.emplace_back(ConsumerGroupMember(client_id, *this)); + group_members_.emplace(std::string(group.data(), group.size()), members); + } else { + std::vector& members = search->second; + for (auto it = members.begin(); it != members.end();) { + if (it->clientId() == client_id) { + it->refresh(); + ++it; + } else if (it->expired()) { + it = members.erase(it); + } else { + ++it; + } + } + if (members.empty()) { + group_members_.erase(search); + } + } +} + +void ConnectionManager::onUnregisterClient(RemotingCommandPtr request) { + auto header = request->typedCustomHeader(); + ASSERT(header != nullptr); + ASSERT(!header->clientId().empty()); + ENVOY_LOG(trace, "Unregister client ID: {}, producer group: {}, consumer group: {}", + header->clientId(), header->producerGroup(), header->consumerGroup()); + + if (!header->consumerGroup().empty()) { + auto search = group_members_.find(header->consumerGroup()); + if (search != group_members_.end()) { + std::vector& members = search->second; + for (auto it = members.begin(); it != members.end();) { + if (it->clientId() == header->clientId()) { + it = members.erase(it); + } else if (it->expired()) { + it = members.erase(it); + } else { + ++it; + } + } + if (members.empty()) { + group_members_.erase(search); + } + } + } + + RemotingCommandPtr response = std::make_unique( + enumToSignedInt(ResponseCode::Success), request->version(), request->opaque()); + response->markAsResponse(); + response->remark("Envoy unregister client OK."); + sendResponseToDownstream(response); + stats_.unregister_.inc(); +} + +void ConnectionManager::onError(RemotingCommandPtr& request, absl::string_view error_msg) { + Buffer::OwnedImpl buffer; + RemotingCommandPtr response = std::make_unique(); + response->markAsResponse(); + response->opaque(request->opaque()); + response->code(enumToSignedInt(ResponseCode::SystemError)); + response->remark(error_msg); + sendResponseToDownstream(response); +} + +void ConnectionManager::onSendMessage(RemotingCommandPtr request) { + ENVOY_CONN_LOG(trace, "#onSendMessage, opaque: {}", read_callbacks_->connection(), + request->opaque()); + auto header = request->typedCustomHeader(); + header->queueId(-1); + createActiveMessage(request).sendRequestToUpstream(); +} + +void ConnectionManager::onGetConsumerListByGroup(RemotingCommandPtr request) { + auto requestExtHeader = request->typedCustomHeader(); + + ASSERT(requestExtHeader != nullptr); + ASSERT(!requestExtHeader->consumerGroup().empty()); + + ENVOY_LOG(trace, "#onGetConsumerListByGroup, consumer group: {}", + requestExtHeader->consumerGroup()); + + auto search = group_members_.find(requestExtHeader->consumerGroup()); + GetConsumerListByGroupResponseBody getConsumerListByGroupResponseBody; + if (search != group_members_.end()) { + std::vector& members = search->second; + std::sort(members.begin(), members.end()); + for (const auto& member : members) { + getConsumerListByGroupResponseBody.add(member.clientId()); + } + } else { + ENVOY_LOG(warn, "There is no consumer belongs to consumer_group: {}", + requestExtHeader->consumerGroup()); + } + ProtobufWkt::Struct body_struct; + + getConsumerListByGroupResponseBody.encode(body_struct); + + RemotingCommandPtr response = std::make_unique( + enumToSignedInt(ResponseCode::Success), request->version(), request->opaque()); + response->markAsResponse(); + std::string json = MessageUtil::getJsonStringFromMessage(body_struct); + response->body().add(json); + ENVOY_LOG(trace, "GetConsumerListByGroup respond with body: {}", json); + + sendResponseToDownstream(response); + stats_.get_consumer_list_.inc(); +} + +void ConnectionManager::onPopMessage(RemotingCommandPtr request) { + auto header = request->typedCustomHeader(); + ASSERT(header != nullptr); + ENVOY_LOG(trace, "#onPopMessage. Consumer group: {}, topic: {}", header->consumerGroup(), + header->topic()); + createActiveMessage(request).sendRequestToUpstream(); +} + +void ConnectionManager::onAckMessage(RemotingCommandPtr request) { + auto header = request->typedCustomHeader(); + ASSERT(header != nullptr); + ENVOY_LOG( + trace, + "#onAckMessage. Consumer group: {}, topic: {}, queue Id: {}, offset: {}, extra-info: {}", + header->consumerGroup(), header->topic(), header->queueId(), header->offset(), + header->extraInfo()); + + // Fill the target broker_name and broker_id routing directive + auto it = ack_directive_table_.find(header->directiveKey()); + if (it == ack_directive_table_.end()) { + ENVOY_LOG(warn, "There was no previous ack directive available, which is unexpected"); + onError(request, "No ack directive is found"); + return; + } + header->targetBrokerName(it->second.broker_name_); + header->targetBrokerId(it->second.broker_id_); + + createActiveMessage(request).sendRequestToUpstream(); +} + +ActiveMessage& ConnectionManager::createActiveMessage(RemotingCommandPtr& request) { + ENVOY_CONN_LOG(trace, "ConnectionManager#createActiveMessage. Code: {}, opaque: {}", + read_callbacks_->connection(), request->code(), request->opaque()); + ActiveMessagePtr active_message = std::make_unique(*this, std::move(request)); + LinkedList::moveIntoList(std::move(active_message), active_message_list_); + return **active_message_list_.begin(); +} + +void ConnectionManager::deferredDelete(ActiveMessage& active_message) { + read_callbacks_->connection().dispatcher().deferredDelete( + active_message.removeFromList(active_message_list_)); +} + +void ConnectionManager::resetAllActiveMessages(absl::string_view error_msg) { + while (!active_message_list_.empty()) { + ENVOY_CONN_LOG(warn, "Reset pending request {} due to error: {}", read_callbacks_->connection(), + active_message_list_.front()->downstreamRequest()->opaque(), error_msg); + active_message_list_.front()->onReset(); + stats_.response_error_.inc(); + } +} + +Envoy::Network::FilterStatus ConnectionManager::onNewConnection() { + return Network::FilterStatus::Continue; +} + +void ConnectionManager::initializeReadFilterCallbacks( + Envoy::Network::ReadFilterCallbacks& callbacks) { + read_callbacks_ = &callbacks; +} + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/rocketmq_proxy/conn_manager.h b/source/extensions/filters/network/rocketmq_proxy/conn_manager.h new file mode 100644 index 0000000000000..29f3faf48ad57 --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/conn_manager.h @@ -0,0 +1,215 @@ +#pragma once + +#include + +#include "envoy/common/time.h" +#include "envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.h" +#include "envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.validate.h" +#include "envoy/network/connection.h" +#include "envoy/network/filter.h" +#include "envoy/stats/scope.h" +#include "envoy/stats/stats.h" +#include "envoy/stats/stats_macros.h" +#include "envoy/stats/timespan.h" +#include "envoy/upstream/thread_local_cluster.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/logger.h" + +#include "extensions/filters/network/rocketmq_proxy/active_message.h" +#include "extensions/filters/network/rocketmq_proxy/codec.h" +#include "extensions/filters/network/rocketmq_proxy/stats.h" + +#include "absl/container/flat_hash_map.h" +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +class Config { +public: + virtual ~Config() = default; + + virtual RocketmqFilterStats& stats() PURE; + + virtual Upstream::ClusterManager& clusterManager() PURE; + + virtual Router::RouterPtr createRouter() PURE; + + /** + * Indicate whether this proxy is running in development mode. If true, this proxy plugin may + * work without dedicated traffic intercepting facility without considering backward + * compatibility. + * @return true when in development mode; false otherwise. + */ + virtual bool developMode() const PURE; + + virtual std::string proxyAddress() PURE; + + virtual Router::Config& routerConfig() PURE; + + virtual std::chrono::milliseconds transientObjectLifeSpan() const PURE; +}; + +class ConnectionManager; + +/** + * This class is to ensure legacy RocketMQ SDK works. Heartbeat between client SDK and envoy is not + * necessary any more and should be removed once the lite SDK is in-place. + */ +class ConsumerGroupMember { +public: + ConsumerGroupMember(absl::string_view client_id, ConnectionManager& conn_manager); + + bool operator==(const ConsumerGroupMember& other) const { return client_id_ == other.client_id_; } + + bool operator<(const ConsumerGroupMember& other) const { return client_id_ < other.client_id_; } + + void refresh(); + + bool expired() const; + + absl::string_view clientId() const { return client_id_; } + + void setLastForTest(MonotonicTime tp) { last_ = tp; } + +private: + std::string client_id_; + ConnectionManager* connection_manager_; + MonotonicTime last_; +}; + +class ConnectionManager : public Network::ReadFilter, Logger::Loggable { +public: + ConnectionManager(Config& config, TimeSource& time_source); + + ~ConnectionManager() override = default; + + /** + * Called when data is read on the connection. + * @param data supplies the read data which may be modified. + * @param end_stream supplies whether this is the last byte on the connection. This will only + * be set if the connection has half-close semantics enabled. + * @return status used by the filter manager to manage further filter iteration. + */ + Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override; + + /** + * Called when a connection is first established. Filters should do one time long term processing + * that needs to be done when a connection is established. Filter chain iteration can be stopped + * if needed. + * @return status used by the filter manager to manage further filter iteration. + */ + Network::FilterStatus onNewConnection() override; + + /** + * Initializes the read filter callbacks used to interact with the filter manager. It will be + * called by the filter manager a single time when the filter is first registered. Thus, any + * construction that requires the backing connection should take place in the context of this + * function. + * + * IMPORTANT: No outbound networking or complex processing should be done in this function. + * That should be done in the context of onNewConnection() if needed. + * + * @param callbacks supplies the callbacks. + */ + void initializeReadFilterCallbacks(Network::ReadFilterCallbacks&) override; + + /** + * Send response to downstream either when envoy proxy has received result from upstream hosts or + * the proxy itself may serve the request. + * @param response Response to write to downstream with identical opaque number. + */ + void sendResponseToDownstream(RemotingCommandPtr& response); + + void onGetTopicRoute(RemotingCommandPtr request); + + /** + * Called when downstream sends heartbeat requests. + * @param request heartbeat request from downstream + */ + void onHeartbeat(RemotingCommandPtr request); + + void addOrUpdateGroupMember(absl::string_view group, absl::string_view client_id); + + void onUnregisterClient(RemotingCommandPtr request); + + void onError(RemotingCommandPtr& request, absl::string_view error_msg); + + void onSendMessage(RemotingCommandPtr request); + + void onGetConsumerListByGroup(RemotingCommandPtr request); + + void onPopMessage(RemotingCommandPtr request); + + void onAckMessage(RemotingCommandPtr request); + + ActiveMessage& createActiveMessage(RemotingCommandPtr& request); + + void deferredDelete(ActiveMessage& active_message); + + void resetAllActiveMessages(absl::string_view error_msg); + + Config& config() { return config_; } + + RocketmqFilterStats& stats() { return stats_; } + + absl::flat_hash_map>& groupMembersForTest() { + return group_members_; + } + + std::list& activeMessageList() { return active_message_list_; } + + void insertAckDirective(const std::string& key, const AckMessageDirective& directive) { + ack_directive_table_.insert(std::make_pair(key, directive)); + } + + void eraseAckDirective(const std::string& key) { + auto it = ack_directive_table_.find(key); + if (it != ack_directive_table_.end()) { + ack_directive_table_.erase(it); + } + } + + TimeSource& timeSource() const { return time_source_; } + + const absl::flat_hash_map& getAckDirectiveTableForTest() const { + return ack_directive_table_; + } + + friend class ConsumerGroupMember; + +private: + /** + * Dispatch incoming requests from downstream to run through filter chains. + */ + void dispatch(); + + /** + * Invoked by heartbeat to purge deprecated ack_directive entries. + */ + void purgeDirectiveTable(); + + Network::ReadFilterCallbacks* read_callbacks_{}; + Buffer::OwnedImpl request_buffer_; + + Config& config_; + TimeSource& time_source_; + RocketmqFilterStats& stats_; + + std::list active_message_list_; + + absl::flat_hash_map> group_members_; + + /** + * Message unique key to message acknowledge directive mapping. + * Acknowledge requests first consult this table to determine which host in the cluster to go. + */ + absl::flat_hash_map ack_directive_table_; +}; +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/rocketmq_proxy/metadata.h b/source/extensions/filters/network/rocketmq_proxy/metadata.h new file mode 100644 index 0000000000000..ed913a1f92e0c --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/metadata.h @@ -0,0 +1,43 @@ +#pragma once + +#include + +#include "common/http/header_map_impl.h" + +#include "absl/types/optional.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +class MessageMetadata { +public: + MessageMetadata() = default; + + void setOneWay(bool oneway) { is_oneway_ = oneway; } + bool isOneWay() const { return is_oneway_; } + + bool hasTopicName() const { return topic_name_.has_value(); } + const std::string& topicName() const { return topic_name_.value(); } + void setTopicName(const std::string& topic_name) { topic_name_ = topic_name; } + + /** + * @return HeaderMap of current headers + */ + const Http::HeaderMap& headers() const { return *headers_; } + Http::HeaderMap& headers() { return *headers_; } + +private: + bool is_oneway_{false}; + absl::optional topic_name_{}; + + Http::HeaderMapPtr headers_{Http::RequestHeaderMapImpl::create()}; +}; + +using MessageMetadataSharedPtr = std::shared_ptr; + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/protocol.cc b/source/extensions/filters/network/rocketmq_proxy/protocol.cc new file mode 100644 index 0000000000000..e16481cc453fa --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/protocol.cc @@ -0,0 +1,749 @@ +#include "extensions/filters/network/rocketmq_proxy/protocol.h" + +#include "common/common/assert.h" +#include "common/common/enum_to_int.h" + +#include "extensions/filters/network/rocketmq_proxy/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +void SendMessageRequestHeader::encode(ProtobufWkt::Value& root) { + auto& members = *(root.mutable_struct_value()->mutable_fields()); + + switch (version_) { + case SendMessageRequestVersion::V1: { + ProtobufWkt::Value producer_group_v; + producer_group_v.set_string_value(producer_group_); + members["producerGroup"] = producer_group_v; + + ProtobufWkt::Value topic_v; + topic_v.set_string_value(topic_.c_str(), topic_.length()); + members["topic"] = topic_v; + + ProtobufWkt::Value default_topic_v; + default_topic_v.set_string_value(default_topic_); + members["defaultTopic"] = default_topic_v; + + ProtobufWkt::Value default_topic_queue_number_v; + default_topic_queue_number_v.set_number_value(default_topic_queue_number_); + members["defaultTopicQueueNums"] = default_topic_queue_number_v; + + ProtobufWkt::Value queue_id_v; + queue_id_v.set_number_value(queue_id_); + members["queueId"] = queue_id_v; + + ProtobufWkt::Value sys_flag_v; + sys_flag_v.set_number_value(sys_flag_); + members["sysFlag"] = sys_flag_v; + + ProtobufWkt::Value born_timestamp_v; + born_timestamp_v.set_number_value(born_timestamp_); + members["bornTimestamp"] = born_timestamp_v; + + ProtobufWkt::Value flag_v; + flag_v.set_number_value(flag_); + members["flag"] = flag_v; + + if (!properties_.empty()) { + ProtobufWkt::Value properties_v; + properties_v.set_string_value(properties_.c_str(), properties_.length()); + members["properties"] = properties_v; + } + + if (reconsume_time_ > 0) { + ProtobufWkt::Value reconsume_times_v; + reconsume_times_v.set_number_value(reconsume_time_); + members["reconsumeTimes"] = reconsume_times_v; + } + + if (unit_mode_) { + ProtobufWkt::Value unit_mode_v; + unit_mode_v.set_bool_value(unit_mode_); + members["unitMode"] = unit_mode_v; + } + + if (batch_) { + ProtobufWkt::Value batch_v; + batch_v.set_bool_value(batch_); + members["batch"] = batch_v; + } + + if (max_reconsume_time_ > 0) { + ProtobufWkt::Value max_reconsume_time_v; + max_reconsume_time_v.set_number_value(max_reconsume_time_); + members["maxReconsumeTimes"] = max_reconsume_time_v; + } + break; + } + case SendMessageRequestVersion::V2: { + ProtobufWkt::Value producer_group_v; + producer_group_v.set_string_value(producer_group_.c_str(), producer_group_.length()); + members["a"] = producer_group_v; + + ProtobufWkt::Value topic_v; + topic_v.set_string_value(topic_.c_str(), topic_.length()); + members["b"] = topic_v; + + ProtobufWkt::Value default_topic_v; + default_topic_v.set_string_value(default_topic_.c_str(), default_topic_.length()); + members["c"] = default_topic_v; + + ProtobufWkt::Value default_topic_queue_number_v; + default_topic_queue_number_v.set_number_value(default_topic_queue_number_); + members["d"] = default_topic_queue_number_v; + + ProtobufWkt::Value queue_id_v; + queue_id_v.set_number_value(queue_id_); + members["e"] = queue_id_v; + + ProtobufWkt::Value sys_flag_v; + sys_flag_v.set_number_value(sys_flag_); + members["f"] = sys_flag_v; + + ProtobufWkt::Value born_timestamp_v; + born_timestamp_v.set_number_value(born_timestamp_); + members["g"] = born_timestamp_v; + + ProtobufWkt::Value flag_v; + flag_v.set_number_value(flag_); + members["h"] = flag_v; + + if (!properties_.empty()) { + ProtobufWkt::Value properties_v; + properties_v.set_string_value(properties_.c_str(), properties_.length()); + members["i"] = properties_v; + } + + if (reconsume_time_ > 0) { + ProtobufWkt::Value reconsume_times_v; + reconsume_times_v.set_number_value(reconsume_time_); + members["j"] = reconsume_times_v; + } + + if (unit_mode_) { + ProtobufWkt::Value unit_mode_v; + unit_mode_v.set_bool_value(unit_mode_); + members["k"] = unit_mode_v; + } + + if (batch_) { + ProtobufWkt::Value batch_v; + batch_v.set_bool_value(batch_); + members["m"] = batch_v; + } + + if (max_reconsume_time_ > 0) { + ProtobufWkt::Value max_reconsume_time_v; + max_reconsume_time_v.set_number_value(max_reconsume_time_); + members["l"] = max_reconsume_time_v; + } + break; + } + default: + break; + } +} + +void SendMessageRequestHeader::decode(const ProtobufWkt::Value& ext_fields) { + const auto& members = ext_fields.struct_value().fields(); + switch (version_) { + case SendMessageRequestVersion::V1: { + ASSERT(members.contains("producerGroup")); + ASSERT(members.contains("topic")); + ASSERT(members.contains("defaultTopic")); + ASSERT(members.contains("defaultTopicQueueNums")); + ASSERT(members.contains("queueId")); + ASSERT(members.contains("sysFlag")); + ASSERT(members.contains("bornTimestamp")); + ASSERT(members.contains("flag")); + + producer_group_ = members.at("producerGroup").string_value(); + topic_ = members.at("topic").string_value(); + default_topic_ = members.at("defaultTopic").string_value(); + + if (members.at("defaultTopicQueueNums").kind_case() == ProtobufWkt::Value::kNumberValue) { + default_topic_queue_number_ = members.at("defaultTopicQueueNums").number_value(); + } else { + default_topic_queue_number_ = std::stoi(members.at("defaultTopicQueueNums").string_value()); + } + + if (members.at("queueId").kind_case() == ProtobufWkt::Value::kNumberValue) { + queue_id_ = members.at("queueId").number_value(); + } else { + queue_id_ = std::stoi(members.at("queueId").string_value()); + } + + if (members.at("sysFlag").kind_case() == ProtobufWkt::Value::kNumberValue) { + sys_flag_ = static_cast(members.at("sysFlag").number_value()); + } else { + sys_flag_ = std::stoi(members.at("sysFlag").string_value()); + } + + if (members.at("bornTimestamp").kind_case() == ProtobufWkt::Value::kNumberValue) { + born_timestamp_ = static_cast(members.at("bornTimestamp").number_value()); + } else { + born_timestamp_ = std::stoll(members.at("bornTimestamp").string_value()); + } + + if (members.at("flag").kind_case() == ProtobufWkt::Value::kNumberValue) { + flag_ = static_cast(members.at("flag").number_value()); + } else { + flag_ = std::stoi(members.at("flag").string_value()); + } + + if (members.contains("properties")) { + properties_ = members.at("properties").string_value(); + } + + if (members.contains("reconsumeTimes")) { + if (members.at("reconsumeTimes").kind_case() == ProtobufWkt::Value::kNumberValue) { + reconsume_time_ = members.at("reconsumeTimes").number_value(); + } else { + reconsume_time_ = std::stoi(members.at("reconsumeTimes").string_value()); + } + } + + if (members.contains("unitMode")) { + if (members.at("unitMode").kind_case() == ProtobufWkt::Value::kBoolValue) { + unit_mode_ = members.at("unitMode").bool_value(); + } else { + unit_mode_ = (members.at("unitMode").string_value() == std::string("true")); + } + } + + if (members.contains("batch")) { + if (members.at("batch").kind_case() == ProtobufWkt::Value::kBoolValue) { + batch_ = members.at("batch").bool_value(); + } else { + batch_ = (members.at("batch").string_value() == std::string("true")); + } + } + + if (members.contains("maxReconsumeTimes")) { + if (members.at("maxReconsumeTimes").kind_case() == ProtobufWkt::Value::kNumberValue) { + max_reconsume_time_ = static_cast(members.at("maxReconsumeTimes").number_value()); + } else { + max_reconsume_time_ = std::stoi(members.at("maxReconsumeTimes").string_value()); + } + } + break; + } + + case SendMessageRequestVersion::V2: { + ASSERT(members.contains("a")); + ASSERT(members.contains("b")); + ASSERT(members.contains("c")); + ASSERT(members.contains("d")); + ASSERT(members.contains("e")); + ASSERT(members.contains("f")); + ASSERT(members.contains("g")); + ASSERT(members.contains("h")); + + producer_group_ = members.at("a").string_value(); + topic_ = members.at("b").string_value(); + default_topic_ = members.at("c").string_value(); + + if (members.at("d").kind_case() == ProtobufWkt::Value::kNumberValue) { + default_topic_queue_number_ = members.at("d").number_value(); + } else { + default_topic_queue_number_ = std::stoi(members.at("d").string_value()); + } + + if (members.at("e").kind_case() == ProtobufWkt::Value::kNumberValue) { + queue_id_ = members.at("e").number_value(); + } else { + queue_id_ = std::stoi(members.at("e").string_value()); + } + + if (members.at("f").kind_case() == ProtobufWkt::Value::kNumberValue) { + sys_flag_ = static_cast(members.at("f").number_value()); + } else { + sys_flag_ = std::stoi(members.at("f").string_value()); + } + + if (members.at("g").kind_case() == ProtobufWkt::Value::kNumberValue) { + born_timestamp_ = static_cast(members.at("g").number_value()); + } else { + born_timestamp_ = std::stoll(members.at("g").string_value()); + } + + if (members.at("h").kind_case() == ProtobufWkt::Value::kNumberValue) { + flag_ = static_cast(members.at("h").number_value()); + } else { + flag_ = std::stoi(members.at("h").string_value()); + } + + if (members.contains("i")) { + properties_ = members.at("i").string_value(); + } + + if (members.contains("j")) { + if (members.at("j").kind_case() == ProtobufWkt::Value::kNumberValue) { + reconsume_time_ = members.at("j").number_value(); + } else { + reconsume_time_ = std::stoi(members.at("j").string_value()); + } + } + + if (members.contains("k")) { + if (members.at("k").kind_case() == ProtobufWkt::Value::kBoolValue) { + unit_mode_ = members.at("k").bool_value(); + } else { + unit_mode_ = (members.at("k").string_value() == std::string("true")); + } + } + + if (members.contains("m")) { + if (members.at("m").kind_case() == ProtobufWkt::Value::kBoolValue) { + batch_ = members.at("m").bool_value(); + } else { + batch_ = (members.at("m").string_value() == std::string("true")); + } + } + + if (members.contains("l")) { + if (members.at("l").kind_case() == ProtobufWkt::Value::kNumberValue) { + max_reconsume_time_ = members.at("l").number_value(); + } else { + max_reconsume_time_ = std::stoi(members.at("l").string_value()); + } + } + break; + } + default: + ENVOY_LOG(error, "Unknown SendMessageRequestVersion: {}", static_cast(version_)); + break; + } +} + +void SendMessageResponseHeader::encode(ProtobufWkt::Value& root) { + auto& members = *(root.mutable_struct_value()->mutable_fields()); + + ASSERT(!msg_id_.empty()); + ProtobufWkt::Value msg_id_v; + msg_id_v.set_string_value(msg_id_.c_str(), msg_id_.length()); + members["msgId"] = msg_id_v; + + ASSERT(queue_id_ >= 0); + ProtobufWkt::Value queue_id_v; + queue_id_v.set_number_value(queue_id_); + members["queueId"] = queue_id_v; + + ASSERT(queue_offset_ >= 0); + ProtobufWkt::Value queue_offset_v; + queue_offset_v.set_number_value(queue_offset_); + members["queueOffset"] = queue_offset_v; + + if (!transaction_id_.empty()) { + ProtobufWkt::Value transaction_id_v; + transaction_id_v.set_string_value(transaction_id_.c_str(), transaction_id_.length()); + members["transactionId"] = transaction_id_v; + } +} + +void SendMessageResponseHeader::decode(const ProtobufWkt::Value& ext_fields) { + const auto& members = ext_fields.struct_value().fields(); + ASSERT(members.contains("msgId")); + ASSERT(members.contains("queueId")); + ASSERT(members.contains("queueOffset")); + + msg_id_ = members.at("msgId").string_value(); + + if (members.at("queueId").kind_case() == ProtobufWkt::Value::kNumberValue) { + queue_id_ = members.at("queueId").number_value(); + } else { + queue_id_ = std::stoi(members.at("queueId").string_value()); + } + + if (members.at("queueOffset").kind_case() == ProtobufWkt::Value::kNumberValue) { + queue_offset_ = members.at("queueOffset").number_value(); + } else { + queue_offset_ = std::stoll(members.at("queueOffset").string_value()); + } + + if (members.contains("transactionId")) { + transaction_id_ = members.at("transactionId").string_value(); + } +} + +void GetRouteInfoRequestHeader::encode(ProtobufWkt::Value& root) { + auto& members = *(root.mutable_struct_value()->mutable_fields()); + + ProtobufWkt::Value topic_v; + topic_v.set_string_value(topic_.c_str(), topic_.length()); + members["topic"] = topic_v; +} + +void GetRouteInfoRequestHeader::decode(const ProtobufWkt::Value& ext_fields) { + const auto& members = ext_fields.struct_value().fields(); + ASSERT(members.contains("topic")); + topic_ = members.at("topic").string_value(); +} + +void PopMessageRequestHeader::encode(ProtobufWkt::Value& root) { + auto& members = *(root.mutable_struct_value()->mutable_fields()); + + ASSERT(!consumer_group_.empty()); + ProtobufWkt::Value consumer_group_v; + consumer_group_v.set_string_value(consumer_group_.c_str(), consumer_group_.size()); + members["consumerGroup"] = consumer_group_v; + + ASSERT(!topic_.empty()); + ProtobufWkt::Value topicNode; + topicNode.set_string_value(topic_.c_str(), topic_.length()); + members["topic"] = topicNode; + + ProtobufWkt::Value queue_id_v; + queue_id_v.set_number_value(queue_id_); + members["queueId"] = queue_id_v; + + ProtobufWkt::Value max_msg_nums_v; + max_msg_nums_v.set_number_value(max_msg_nums_); + members["maxMsgNums"] = max_msg_nums_v; + + ProtobufWkt::Value invisible_time_v; + invisible_time_v.set_number_value(invisible_time_); + members["invisibleTime"] = invisible_time_v; + + ProtobufWkt::Value poll_time_v; + poll_time_v.set_number_value(poll_time_); + members["pollTime"] = poll_time_v; + + ProtobufWkt::Value born_time_v; + born_time_v.set_number_value(born_time_); + members["bornTime"] = born_time_v; + + ProtobufWkt::Value init_mode_v; + init_mode_v.set_number_value(init_mode_); + members["initMode"] = init_mode_v; + + if (!exp_type_.empty()) { + ProtobufWkt::Value exp_type_v; + exp_type_v.set_string_value(exp_type_.c_str(), exp_type_.size()); + members["expType"] = exp_type_v; + } + + if (!exp_.empty()) { + ProtobufWkt::Value exp_v; + exp_v.set_string_value(exp_.c_str(), exp_.size()); + members["exp"] = exp_v; + } +} + +void PopMessageRequestHeader::decode(const ProtobufWkt::Value& ext_fields) { + const auto& members = ext_fields.struct_value().fields(); + ASSERT(members.contains("consumerGroup")); + ASSERT(members.contains("topic")); + ASSERT(members.contains("queueId")); + ASSERT(members.contains("maxMsgNums")); + ASSERT(members.contains("invisibleTime")); + ASSERT(members.contains("pollTime")); + ASSERT(members.contains("bornTime")); + ASSERT(members.contains("initMode")); + + consumer_group_ = members.at("consumerGroup").string_value(); + topic_ = members.at("topic").string_value(); + + if (members.at("queueId").kind_case() == ProtobufWkt::Value::kNumberValue) { + queue_id_ = members.at("queueId").number_value(); + } else { + queue_id_ = std::stoi(members.at("queueId").string_value()); + } + + if (members.at("maxMsgNums").kind_case() == ProtobufWkt::Value::kNumberValue) { + max_msg_nums_ = members.at("maxMsgNums").number_value(); + } else { + max_msg_nums_ = std::stoi(members.at("maxMsgNums").string_value()); + } + + if (members.at("invisibleTime").kind_case() == ProtobufWkt::Value::kNumberValue) { + invisible_time_ = members.at("invisibleTime").number_value(); + } else { + invisible_time_ = std::stoll(members.at("invisibleTime").string_value()); + } + + if (members.at("pollTime").kind_case() == ProtobufWkt::Value::kNumberValue) { + poll_time_ = members.at("pollTime").number_value(); + } else { + poll_time_ = std::stoll(members.at("pollTime").string_value()); + } + + if (members.at("bornTime").kind_case() == ProtobufWkt::Value::kNumberValue) { + born_time_ = members.at("bornTime").number_value(); + } else { + born_time_ = std::stoll(members.at("bornTime").string_value()); + } + + if (members.at("initMode").kind_case() == ProtobufWkt::Value::kNumberValue) { + init_mode_ = members.at("initMode").number_value(); + } else { + init_mode_ = std::stol(members.at("initMode").string_value()); + } + + if (members.contains("expType")) { + exp_type_ = members.at("expType").string_value(); + } + + if (members.contains("exp")) { + exp_ = members.at("exp").string_value(); + } +} + +void PopMessageResponseHeader::encode(ProtobufWkt::Value& root) { + auto& members = *(root.mutable_struct_value()->mutable_fields()); + + ProtobufWkt::Value pop_time_v; + pop_time_v.set_number_value(pop_time_); + members["popTime"] = pop_time_v; + + ProtobufWkt::Value invisible_time_v; + invisible_time_v.set_number_value(invisible_time_); + members["invisibleTime"] = invisible_time_v; + + ProtobufWkt::Value revive_qid_v; + revive_qid_v.set_number_value(revive_qid_); + members["reviveQid"] = revive_qid_v; + + ProtobufWkt::Value rest_num_v; + rest_num_v.set_number_value(rest_num_); + members["restNum"] = rest_num_v; + + if (!start_offset_info_.empty()) { + ProtobufWkt::Value start_offset_info_v; + start_offset_info_v.set_string_value(start_offset_info_.c_str(), start_offset_info_.size()); + members["startOffsetInfo"] = start_offset_info_v; + } + + if (!msg_off_set_info_.empty()) { + ProtobufWkt::Value msg_offset_info_v; + msg_offset_info_v.set_string_value(msg_off_set_info_.c_str(), msg_off_set_info_.size()); + members["msgOffsetInfo"] = msg_offset_info_v; + } + + if (!order_count_info_.empty()) { + ProtobufWkt::Value order_count_info_v; + order_count_info_v.set_string_value(order_count_info_.c_str(), order_count_info_.size()); + members["orderCountInfo"] = order_count_info_v; + } +} + +void PopMessageResponseHeader::decode(const ProtobufWkt::Value& ext_fields) { + const auto& members = ext_fields.struct_value().fields(); + ASSERT(members.contains("popTime")); + ASSERT(members.contains("invisibleTime")); + ASSERT(members.contains("reviveQid")); + ASSERT(members.contains("restNum")); + + if (members.at("popTime").kind_case() == ProtobufWkt::Value::kNumberValue) { + pop_time_ = members.at("popTime").number_value(); + } else { + pop_time_ = std::stoull(members.at("popTime").string_value()); + } + + if (members.at("invisibleTime").kind_case() == ProtobufWkt::Value::kNumberValue) { + invisible_time_ = members.at("invisibleTime").number_value(); + } else { + invisible_time_ = std::stoull(members.at("invisibleTime").string_value()); + } + + if (members.at("reviveQid").kind_case() == ProtobufWkt::Value::kNumberValue) { + revive_qid_ = members.at("reviveQid").number_value(); + } else { + revive_qid_ = std::stoul(members.at("reviveQid").string_value()); + } + + if (members.at("restNum").kind_case() == ProtobufWkt::Value::kNumberValue) { + rest_num_ = members.at("restNum").number_value(); + } else { + rest_num_ = std::stoull(members.at("restNum").string_value()); + } + + if (members.contains("startOffsetInfo")) { + start_offset_info_ = members.at("startOffsetInfo").string_value(); + } + + if (members.contains("msgOffsetInfo")) { + msg_off_set_info_ = members.at("msgOffsetInfo").string_value(); + } + + if (members.contains("orderCountInfo")) { + order_count_info_ = members.at("orderCountInfo").string_value(); + } +} + +void AckMessageRequestHeader::encode(ProtobufWkt::Value& root) { + auto& members = *(root.mutable_struct_value()->mutable_fields()); + + ASSERT(!consumer_group_.empty()); + ProtobufWkt::Value consumer_group_v; + consumer_group_v.set_string_value(consumer_group_.c_str(), consumer_group_.size()); + members["consumerGroup"] = consumer_group_v; + + ASSERT(!topic_.empty()); + ProtobufWkt::Value topic_v; + topic_v.set_string_value(topic_.c_str(), topic_.size()); + members["topic"] = topic_v; + + ASSERT(queue_id_ >= 0); + ProtobufWkt::Value queue_id_v; + queue_id_v.set_number_value(queue_id_); + members["queueId"] = queue_id_v; + + ASSERT(!extra_info_.empty()); + ProtobufWkt::Value extra_info_v; + extra_info_v.set_string_value(extra_info_.c_str(), extra_info_.size()); + members["extraInfo"] = extra_info_v; + + ASSERT(offset_ >= 0); + ProtobufWkt::Value offset_v; + offset_v.set_number_value(offset_); + members["offset"] = offset_v; +} + +void AckMessageRequestHeader::decode(const ProtobufWkt::Value& ext_fields) { + const auto& members = ext_fields.struct_value().fields(); + ASSERT(members.contains("consumerGroup")); + ASSERT(members.contains("topic")); + ASSERT(members.contains("queueId")); + ASSERT(members.contains("extraInfo")); + ASSERT(members.contains("offset")); + + consumer_group_ = members.at("consumerGroup").string_value(); + + topic_ = members.at("topic").string_value(); + + if (members.at("queueId").kind_case() == ProtobufWkt::Value::kNumberValue) { + queue_id_ = members.at("queueId").number_value(); + } else { + queue_id_ = std::stoi(members.at("queueId").string_value()); + } + + extra_info_ = members.at("extraInfo").string_value(); + + if (members.at("offset").kind_case() == ProtobufWkt::Value::kNumberValue) { + offset_ = members.at("offset").number_value(); + } else { + offset_ = std::stoll(members.at("offset").string_value()); + } +} + +void UnregisterClientRequestHeader::encode(ProtobufWkt::Value& root) { + auto& members = *(root.mutable_struct_value()->mutable_fields()); + + ASSERT(!client_id_.empty()); + ProtobufWkt::Value client_id_v; + client_id_v.set_string_value(client_id_.c_str(), client_id_.size()); + members["clientID"] = client_id_v; + + ASSERT(!producer_group_.empty() || !consumer_group_.empty()); + if (!producer_group_.empty()) { + ProtobufWkt::Value producer_group_v; + producer_group_v.set_string_value(producer_group_.c_str(), producer_group_.size()); + members["producerGroup"] = producer_group_v; + } + + if (!consumer_group_.empty()) { + ProtobufWkt::Value consumer_group_v; + consumer_group_v.set_string_value(consumer_group_.c_str(), consumer_group_.size()); + members["consumerGroup"] = consumer_group_v; + } +} + +void UnregisterClientRequestHeader::decode(const ProtobufWkt::Value& ext_fields) { + const auto& members = ext_fields.struct_value().fields(); + ASSERT(members.contains("clientID")); + ASSERT(members.contains("producerGroup") || members.contains("consumerGroup")); + + client_id_ = members.at("clientID").string_value(); + + if (members.contains("consumerGroup")) { + consumer_group_ = members.at("consumerGroup").string_value(); + } + + if (members.contains("producerGroup")) { + producer_group_ = members.at("producerGroup").string_value(); + } +} + +void GetConsumerListByGroupResponseBody::encode(ProtobufWkt::Struct& root) { + auto& members = *(root.mutable_fields()); + + ProtobufWkt::Value consumer_id_list_v; + auto member_list = consumer_id_list_v.mutable_list_value(); + for (const auto& consumerId : consumer_id_list_) { + auto consumer_id_v = new ProtobufWkt::Value; + consumer_id_v->set_string_value(consumerId.c_str(), consumerId.size()); + member_list->mutable_values()->AddAllocated(consumer_id_v); + } + members["consumerIdList"] = consumer_id_list_v; +} + +bool HeartbeatData::decode(ProtobufWkt::Struct& doc) { + const auto& members = doc.fields(); + if (!members.contains("clientID")) { + return false; + } + + client_id_ = members.at("clientID").string_value(); + + if (members.contains("consumerDataSet")) { + auto& consumer_data_list = members.at("consumerDataSet").list_value().values(); + for (const auto& it : consumer_data_list) { + if (it.struct_value().fields().contains("groupName")) { + consumer_groups_.push_back(it.struct_value().fields().at("groupName").string_value()); + } + } + } + return true; +} + +void HeartbeatData::encode(ProtobufWkt::Struct& root) { + auto& members = *(root.mutable_fields()); + + ProtobufWkt::Value client_id_v; + client_id_v.set_string_value(client_id_.c_str(), client_id_.size()); + members["clientID"] = client_id_v; +} + +void GetConsumerListByGroupRequestHeader::encode(ProtobufWkt::Value& root) { + auto& members = *(root.mutable_struct_value()->mutable_fields()); + + ProtobufWkt::Value consumer_group_v; + consumer_group_v.set_string_value(consumer_group_.c_str(), consumer_group_.size()); + members["consumerGroup"] = consumer_group_v; +} + +void GetConsumerListByGroupRequestHeader::decode(const ProtobufWkt::Value& ext_fields) { + const auto& members = ext_fields.struct_value().fields(); + ASSERT(members.contains("consumerGroup")); + + consumer_group_ = members.at("consumerGroup").string_value(); +} + +void MetadataHelper::parseRequest(RemotingCommandPtr& request, MessageMetadataSharedPtr metadata) { + metadata->setOneWay(request->isOneWay()); + CommandCustomHeader* custom_header = request->customHeader(); + + auto route_command_custom_header = request->typedCustomHeader(); + if (route_command_custom_header != nullptr) { + metadata->setTopicName(route_command_custom_header->topic()); + } + + const uint64_t code = request->code(); + metadata->headers().addCopy(Http::LowerCaseString("code"), code); + + if (enumToInt(RequestCode::AckMessage) == code) { + metadata->headers().addCopy(Http::LowerCaseString(RocketmqConstants::get().BrokerName), + custom_header->targetBrokerName()); + metadata->headers().addCopy(Http::LowerCaseString(RocketmqConstants::get().BrokerId), + custom_header->targetBrokerId()); + } +} + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/protocol.h b/source/extensions/filters/network/rocketmq_proxy/protocol.h new file mode 100644 index 0000000000000..fee961767e0e7 --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/protocol.h @@ -0,0 +1,672 @@ +#pragma once + +#include +#include + +#include "envoy/common/pure.h" +#include "envoy/common/time.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/logger.h" +#include "common/protobuf/protobuf.h" + +#include "extensions/filters/network/rocketmq_proxy/metadata.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +/** + * Retry topic prefix + */ +constexpr absl::string_view RetryTopicPrefix = "%RETRY%"; + +/** + * RocketMQ supports two versions of sending message protocol. These two versions are identical in + * terms of functionality. But they do differ in encoding scheme. See SendMessageRequestHeader + * encode/decode functions for specific differences. + */ +enum class SendMessageRequestVersion : uint32_t { + V1 = 0, + V2 = 1, + // Only for test purpose + V3 = 2, +}; + +/** + * Command custom header are used in combination with RemotingCommand::code, to provide further + * instructions and data for the operation defined by the protocol. + * In addition to the shared encode/decode functions, this class also defines target-broker-name and + * target-broker-id fields, which are helpful if the associated remoting command should be delivered + * to specific host according to the semantics of the previous command. + */ +class CommandCustomHeader { +public: + CommandCustomHeader() = default; + + virtual ~CommandCustomHeader() = default; + + virtual void encode(ProtobufWkt::Value& root) PURE; + + virtual void decode(const ProtobufWkt::Value& ext_fields) PURE; + + const std::string& targetBrokerName() const { return target_broker_name_; } + + void targetBrokerName(absl::string_view broker_name) { + target_broker_name_ = std::string(broker_name.data(), broker_name.length()); + } + + int32_t targetBrokerId() const { return target_broker_id_; } + + void targetBrokerId(int32_t broker_id) { target_broker_id_ = broker_id; } + +protected: + /** + * If this field is not empty, RDS will employ this field and target-broker-id to direct the + * associated request to a subset of the chosen cluster. + */ + std::string target_broker_name_; + + /** + * Used along with target-broker-name field. + */ + int32_t target_broker_id_; +}; + +using CommandCustomHeaderPtr = CommandCustomHeader*; + +/** + * This class extends from CommandCustomHeader, adding a commonly used field by various custom + * command headers which participate the process of request routing. + */ +class RoutingCommandCustomHeader : public CommandCustomHeader { +public: + virtual const std::string& topic() const { return topic_; } + + virtual void topic(absl::string_view t) { topic_ = std::string(t.data(), t.size()); } + +protected: + std::string topic_; +}; + +/** + * This class defines basic request/response forms used by RocketMQ among all its components. + */ +class RemotingCommand { +public: + RemotingCommand() : RemotingCommand(0, 0, 0) {} + + RemotingCommand(int code, int version, int opaque) + : code_(code), version_(version), opaque_(opaque), flag_(0) {} + + ~RemotingCommand() { delete custom_header_; } + + int32_t code() const { return code_; } + + void code(int code) { code_ = code; } + + const std::string& language() const { return language_; } + + void language(absl::string_view lang) { language_ = std::string(lang.data(), lang.size()); } + + int32_t version() const { return version_; } + + void opaque(int opaque) { opaque_ = opaque; } + + int32_t opaque() const { return opaque_; } + + uint32_t flag() const { return flag_; } + + void flag(uint32_t f) { flag_ = f; } + + void customHeader(CommandCustomHeaderPtr custom_header) { custom_header_ = custom_header; } + + CommandCustomHeaderPtr customHeader() const { return custom_header_; } + + template T* typedCustomHeader() { + if (!custom_header_) { + return nullptr; + } + + return dynamic_cast(custom_header_); + } + + uint32_t bodyLength() const { return body_.length(); } + + Buffer::Instance& body() { return body_; } + + const std::string& remark() const { return remark_; } + + void remark(absl::string_view remark) { remark_ = std::string(remark.data(), remark.length()); } + + const std::string& serializeTypeCurrentRPC() const { return serialize_type_current_rpc_; } + + void serializeTypeCurrentRPC(absl::string_view serialization_type) { + serialize_type_current_rpc_ = std::string(serialization_type.data(), serialization_type.size()); + } + + bool isOneWay() const { + uint32_t marker = 1u << SHIFT_ONEWAY; + return (flag_ & marker) == marker; + } + + void markAsResponse() { flag_ |= (1u << SHIFT_RPC); } + + void markAsOneway() { flag_ |= (1u << SHIFT_ONEWAY); } + + static bool isResponse(uint32_t flag) { return (flag & (1u << SHIFT_RPC)) == (1u << SHIFT_RPC); } + +private: + /** + * Action code of this command. Possible values are defined in RequestCode enumeration. + */ + int32_t code_; + + /** + * Language used by the client. + */ + std::string language_{"CPP"}; + + /** + * Version of the client SDK. + */ + int32_t version_; + + /** + * Request ID. If the RPC is request-response form, this field is used to establish the + * association. + */ + int32_t opaque_; + + /** + * Bit-wise flag indicating RPC type, including whether it is one-way or request-response; + * a request or response command. + */ + uint32_t flag_; + + /** + * Remark is used to deliver text message in addition to code. Urgent scenarios may use this field + * to transfer diagnostic message to the counterparts when a full-fledged response is impossible. + */ + std::string remark_; + + /** + * Indicate how the custom command header is serialized. + */ + std::string serialize_type_current_rpc_{"JSON"}; + + /** + * The custom command header works with command code to provide additional protocol + * implementation. + * Generally speaking, each code has pair of request/response custom command header. + */ + CommandCustomHeaderPtr custom_header_{nullptr}; + + /** + * The command body, in form of binary. + */ + Buffer::OwnedImpl body_; + + static constexpr uint32_t SHIFT_RPC = 0; + + static constexpr uint32_t SHIFT_ONEWAY = 1; + + friend class Encoder; + friend class Decoder; +}; + +using RemotingCommandPtr = std::unique_ptr; + +/** + * Command codes used when sending requests. Meaning of each field is self-explanatory. + */ +enum class RequestCode : uint32_t { + SendMessage = 10, + HeartBeat = 34, + UnregisterClient = 35, + GetConsumerListByGroup = 38, + PopMessage = 50, + AckMessage = 51, + GetRouteInfoByTopic = 105, + SendMessageV2 = 310, + // Only for test purpose + Unsupported = 999, +}; + +/** + * Command code used when sending responses. Meaning of each enum is self-explanatory. + */ +enum class ResponseCode : uint32_t { + Success = 0, + SystemError = 1, + SystemBusy = 2, + RequestCodeNotSupported = 3, + ReplicaNotAvailable = 11, +}; + +/** + * Custom command header for sending messages. + */ +class SendMessageRequestHeader : public RoutingCommandCustomHeader, + Logger::Loggable { +public: + ~SendMessageRequestHeader() override = default; + + int32_t queueId() const { return queue_id_; } + + /** + * TODO(lizhanhui): Remove this write API after adding queue-id-aware route logic + * @param queue_id target queue Id. + */ + void queueId(int32_t queue_id) { queue_id_ = queue_id; } + + void producerGroup(std::string producer_group) { producer_group_ = std::move(producer_group); } + + void encode(ProtobufWkt::Value& root) override; + + void decode(const ProtobufWkt::Value& ext_fields) override; + + const std::string& producerGroup() const { return producer_group_; } + + const std::string& defaultTopic() const { return default_topic_; } + + int32_t defaultTopicQueueNumber() const { return default_topic_queue_number_; } + + int32_t sysFlag() const { return sys_flag_; } + + int32_t flag() const { return flag_; } + + int64_t bornTimestamp() const { return born_timestamp_; } + + const std::string& properties() const { return properties_; } + + int32_t reconsumeTimes() const { return reconsume_time_; } + + bool unitMode() const { return unit_mode_; } + + bool batch() const { return batch_; } + + int32_t maxReconsumeTimes() const { return max_reconsume_time_; } + + void properties(absl::string_view props) { + properties_ = std::string(props.data(), props.size()); + } + + void reconsumeTimes(int32_t reconsume_times) { reconsume_time_ = reconsume_times; } + + void unitMode(bool unit_mode) { unit_mode_ = unit_mode; } + + void batch(bool batch) { batch_ = batch; } + + void maxReconsumeTimes(int32_t max_reconsume_times) { max_reconsume_time_ = max_reconsume_times; } + + void version(SendMessageRequestVersion version) { version_ = version; } + + SendMessageRequestVersion version() const { return version_; } + +private: + std::string producer_group_; + std::string default_topic_; + int32_t default_topic_queue_number_{0}; + int32_t queue_id_{-1}; + int32_t sys_flag_{0}; + int64_t born_timestamp_{0}; + int32_t flag_{0}; + std::string properties_; + int32_t reconsume_time_{0}; + bool unit_mode_{false}; + bool batch_{false}; + int32_t max_reconsume_time_{0}; + SendMessageRequestVersion version_{SendMessageRequestVersion::V1}; + + friend class Decoder; +}; + +/** + * Custom command header to respond to a send-message-request. + */ +class SendMessageResponseHeader : public CommandCustomHeader { +public: + SendMessageResponseHeader() = default; + + SendMessageResponseHeader(std::string msg_id, int32_t queue_id, int64_t queue_offset, + std::string transaction_id) + : msg_id_(std::move(msg_id)), queue_id_(queue_id), queue_offset_(queue_offset), + transaction_id_(std::move(transaction_id)) {} + + void encode(ProtobufWkt::Value& root) override; + + void decode(const ProtobufWkt::Value& ext_fields) override; + + const std::string& msgId() const { return msg_id_; } + + int32_t queueId() const { return queue_id_; } + + int64_t queueOffset() const { return queue_offset_; } + + const std::string& transactionId() const { return transaction_id_; } + + // This function is for testing only. + void msgIdForTest(absl::string_view msg_id) { + msg_id_ = std::string(msg_id.data(), msg_id.size()); + } + + void queueId(int32_t queue_id) { queue_id_ = queue_id; } + + void queueOffset(int64_t queue_offset) { queue_offset_ = queue_offset; } + + void transactionId(absl::string_view transaction_id) { + transaction_id_ = std::string(transaction_id.data(), transaction_id.size()); + } + +private: + std::string msg_id_; + int32_t queue_id_{0}; + int64_t queue_offset_{0}; + std::string transaction_id_; +}; + +/** + * Classic RocketMQ needs to known addresses of each broker to work with. To resolve the addresses, + * client SDK uses this command header to query name servers. + * + * This header is kept for compatible purpose only. + */ +class GetRouteInfoRequestHeader : public RoutingCommandCustomHeader { +public: + void encode(ProtobufWkt::Value& root) override; + + void decode(const ProtobufWkt::Value& ext_fields) override; +}; + +/** + * When a client wishes to consume messages stored in brokers, it sends a pop command to brokers. + * Brokers would send a batch of messages to the client. At the same time, the broker keeps the + * batch invisible for a configured period of time, waiting for acknowledgments from the client. + * + * If the client manages to consume the messages within promised time interval and sends ack command + * back to the broker, the broker will mark the acknowledged ones as consumed. Otherwise, the + * previously sent messages are visible again and would be consumable for other client instances. + * + * Through this approach, we achieves stateless message-pulling, comparing to classic offset-based + * consuming progress management. This models brings about some extra workload to broker side, but + * it fits Envoy well. + */ +class PopMessageRequestHeader : public RoutingCommandCustomHeader { +public: + friend class Decoder; + + void encode(ProtobufWkt::Value& root) override; + + void decode(const ProtobufWkt::Value& ext_fields) override; + + const std::string& consumerGroup() const { return consumer_group_; } + + void consumerGroup(absl::string_view consumer_group) { + consumer_group_ = std::string(consumer_group.data(), consumer_group.size()); + } + + int32_t queueId() const { return queue_id_; } + + void queueId(int32_t queue_id) { queue_id_ = queue_id; } + + int32_t maxMsgNum() const { return max_msg_nums_; } + + void maxMsgNum(int32_t max_msg_num) { max_msg_nums_ = max_msg_num; } + + int64_t invisibleTime() const { return invisible_time_; } + + void invisibleTime(int64_t invisible_time) { invisible_time_ = invisible_time; } + + int64_t pollTime() const { return poll_time_; } + + void pollTime(int64_t poll_time) { poll_time_ = poll_time; } + + int64_t bornTime() const { return born_time_; } + + void bornTime(int64_t born_time) { born_time_ = born_time; } + + int32_t initMode() const { return init_mode_; } + + void initMode(int32_t init_mode) { init_mode_ = init_mode; } + + const std::string& expType() const { return exp_type_; } + + void expType(absl::string_view exp_type) { + exp_type_ = std::string(exp_type.data(), exp_type.size()); + } + + const std::string& exp() const { return exp_; } + + void exp(absl::string_view exp) { exp_ = std::string(exp.data(), exp.size()); } + +private: + std::string consumer_group_; + int32_t queue_id_{-1}; + int32_t max_msg_nums_{32}; + int64_t invisible_time_{0}; + int64_t poll_time_{0}; + int64_t born_time_{0}; + int32_t init_mode_{0}; + std::string exp_type_; + std::string exp_; + bool order_{false}; +}; + +/** + * The pop response command header. See pop request header for how-things-work explanation. + */ +class PopMessageResponseHeader : public CommandCustomHeader { +public: + void decode(const ProtobufWkt::Value& ext_fields) override; + + void encode(ProtobufWkt::Value& root) override; + + // This function is for testing only. + int64_t popTimeForTest() const { return pop_time_; } + + void popTime(int64_t pop_time) { pop_time_ = pop_time; } + + int64_t invisibleTime() const { return invisible_time_; } + + void invisibleTime(int64_t invisible_time) { invisible_time_ = invisible_time; } + + int32_t reviveQid() const { return revive_qid_; } + + void reviveQid(int32_t revive_qid) { revive_qid_ = revive_qid; } + + int64_t restNum() const { return rest_num_; } + + void restNum(int64_t rest_num) { rest_num_ = rest_num; } + + const std::string& startOffsetInfo() const { return start_offset_info_; } + + void startOffsetInfo(absl::string_view start_offset_info) { + start_offset_info_ = std::string(start_offset_info.data(), start_offset_info.size()); + } + + const std::string& msgOffsetInfo() const { return msg_off_set_info_; } + + void msgOffsetInfo(absl::string_view msg_offset_info) { + msg_off_set_info_ = std::string(msg_offset_info.data(), msg_offset_info.size()); + } + + const std::string& orderCountInfo() const { return order_count_info_; } + + void orderCountInfo(absl::string_view order_count_info) { + order_count_info_ = std::string(order_count_info.data(), order_count_info.size()); + } + +private: + int64_t pop_time_{0}; + int64_t invisible_time_{0}; + int32_t revive_qid_{0}; + int64_t rest_num_{0}; + std::string start_offset_info_; + std::string msg_off_set_info_; + std::string order_count_info_; +}; + +/** + * This command is used by the client to acknowledge message(s) that has been successfully consumed. + * Once the broker received this request, the associated message will formally marked as consumed. + * + * Note: the ack request has to be sent the exactly same broker where messages are popped from. + */ +class AckMessageRequestHeader : public RoutingCommandCustomHeader { +public: + void decode(const ProtobufWkt::Value& ext_fields) override; + + void encode(ProtobufWkt::Value& root) override; + + absl::string_view consumerGroup() const { return consumer_group_; } + + int64_t offset() const { return offset_; } + + void consumerGroup(absl::string_view consumer_group) { + consumer_group_ = std::string(consumer_group.data(), consumer_group.size()); + } + + int32_t queueId() const { return queue_id_; } + void queueId(int32_t queue_id) { queue_id_ = queue_id; } + + absl::string_view extraInfo() const { return extra_info_; } + void extraInfo(absl::string_view extra_info) { + extra_info_ = std::string(extra_info.data(), extra_info.size()); + } + + void offset(int64_t offset) { offset_ = offset; } + + const std::string& directiveKey() { + if (key_.empty()) { + key_ = fmt::format("{}-{}-{}-{}", consumer_group_, topic_, queue_id_, offset_); + } + return key_; + } + +private: + std::string consumer_group_; + int32_t queue_id_{0}; + std::string extra_info_; + int64_t offset_{0}; + std::string key_; +}; + +/** + * When a client shuts down gracefully, it notifies broker(now envoy) this event. + */ +class UnregisterClientRequestHeader : public CommandCustomHeader { +public: + void encode(ProtobufWkt::Value& root) override; + + void decode(const ProtobufWkt::Value& ext_fields) override; + + void clientId(absl::string_view client_id) { + client_id_ = std::string(client_id.data(), client_id.length()); + } + + const std::string& clientId() const { return client_id_; } + + void producerGroup(absl::string_view producer_group) { + producer_group_ = std::string(producer_group.data(), producer_group.length()); + } + + const std::string& producerGroup() const { return producer_group_; } + + void consumerGroup(absl::string_view consumer_group) { + consumer_group_ = std::string(consumer_group.data(), consumer_group.length()); + } + + const std::string& consumerGroup() const { return consumer_group_; } + +private: + std::string client_id_; + std::string producer_group_; + std::string consumer_group_; +}; + +/** + * Classic SDK clients use client-side load balancing. This header is kept for compatibility. + */ +class GetConsumerListByGroupRequestHeader : public CommandCustomHeader { +public: + void encode(ProtobufWkt::Value& root) override; + + void decode(const ProtobufWkt::Value& ext_fields) override; + + void consumerGroup(absl::string_view consumer_group) { + consumer_group_ = std::string(consumer_group.data(), consumer_group.length()); + } + + const std::string& consumerGroup() const { return consumer_group_; } + +private: + std::string consumer_group_; +}; + +/** + * The response body. + */ +class GetConsumerListByGroupResponseBody { +public: + void encode(ProtobufWkt::Struct& root); + + void add(absl::string_view consumer_id) { + consumer_id_list_.emplace_back(std::string(consumer_id.data(), consumer_id.length())); + } + +private: + std::vector consumer_id_list_; +}; + +/** + * Client periodically sends heartbeat to servers to maintain alive status. + */ +class HeartbeatData : public Logger::Loggable { +public: + bool decode(ProtobufWkt::Struct& doc); + + const std::string& clientId() const { return client_id_; } + + const std::vector& consumerGroups() const { return consumer_groups_; } + + void encode(ProtobufWkt::Struct& root); + + void clientId(absl::string_view client_id) { + client_id_ = std::string(client_id.data(), client_id.size()); + } + +private: + std::string client_id_; + std::vector consumer_groups_; +}; + +class MetadataHelper { +public: + MetadataHelper() = delete; + + static void parseRequest(RemotingCommandPtr& request, MessageMetadataSharedPtr metadata); +}; + +/** + * Directive to ensure entailing ack requests are routed to the same broker host where pop + * requests are made. + */ +struct AckMessageDirective { + + AckMessageDirective(absl::string_view broker_name, int32_t broker_id, MonotonicTime create_time) + : broker_name_(broker_name.data(), broker_name.length()), broker_id_(broker_id), + creation_time_(create_time) {} + + std::string broker_name_; + int32_t broker_id_; + MonotonicTime creation_time_; +}; + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/router/BUILD b/source/extensions/filters/network/rocketmq_proxy/router/BUILD new file mode 100644 index 0000000000000..8f303861daae2 --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/router/BUILD @@ -0,0 +1,50 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_library( + name = "router_interface", + hdrs = ["router.h"], + deps = [ + "//include/envoy/tcp:conn_pool_interface", + "//include/envoy/upstream:load_balancer_interface", + "//source/common/upstream:load_balancer_lib", + ], +) + +envoy_cc_library( + name = "router_lib", + srcs = ["router_impl.cc"], + hdrs = ["router_impl.h"], + deps = [ + ":router_interface", + "//include/envoy/upstream:cluster_manager_interface", + "//include/envoy/upstream:thread_local_cluster_interface", + "//source/extensions/filters/network:well_known_names", + "//source/extensions/filters/network/rocketmq_proxy:conn_manager_lib", + ], +) + +envoy_cc_library( + name = "route_matcher", + srcs = ["route_matcher.cc"], + hdrs = ["route_matcher.h"], + deps = [ + ":router_interface", + "//include/envoy/config:typed_config_interface", + "//include/envoy/server:filter_config_interface", + "//source/common/common:logger_lib", + "//source/common/common:matchers_lib", + "//source/common/http:header_utility_lib", + "//source/common/router:metadatamatchcriteria_lib", + "//source/extensions/filters/network:well_known_names", + "//source/extensions/filters/network/rocketmq_proxy:metadata_lib", + "@envoy_api//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/filters/network/rocketmq_proxy/router/route_matcher.cc b/source/extensions/filters/network/rocketmq_proxy/router/route_matcher.cc new file mode 100644 index 0000000000000..e99d6c249ebb7 --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/router/route_matcher.cc @@ -0,0 +1,73 @@ +#include "extensions/filters/network/rocketmq_proxy/router/route_matcher.h" + +#include "common/router/metadatamatchcriteria_impl.h" + +#include "extensions/filters/network/rocketmq_proxy/metadata.h" +#include "extensions/filters/network/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { +namespace Router { + +RouteEntryImpl::RouteEntryImpl( + const envoy::extensions::filters::network::rocketmq_proxy::v3::Route& route) + : topic_name_(route.match().topic()), cluster_name_(route.route().cluster()), + config_headers_(Http::HeaderUtility::buildHeaderDataVector(route.match().headers())) { + + if (route.route().has_metadata_match()) { + const auto filter_it = route.route().metadata_match().filter_metadata().find( + Envoy::Config::MetadataFilters::get().ENVOY_LB); + if (filter_it != route.route().metadata_match().filter_metadata().end()) { + metadata_match_criteria_ = + std::make_unique(filter_it->second); + } + } +} + +const std::string& RouteEntryImpl::clusterName() const { return cluster_name_; } + +const RouteEntry* RouteEntryImpl::routeEntry() const { return this; } + +RouteConstSharedPtr RouteEntryImpl::matches(const MessageMetadata& metadata) const { + if (headersMatch(metadata.headers())) { + const std::string& topic_name = metadata.topicName(); + if (topic_name_.match(topic_name)) { + return shared_from_this(); + } + } + return nullptr; +} + +bool RouteEntryImpl::headersMatch(const Http::HeaderMap& headers) const { + ENVOY_LOG(debug, "rocketmq route matcher: headers size {}, metadata headers size {}", + config_headers_.size(), headers.size()); + return Http::HeaderUtility::matchHeaders(headers, config_headers_); +} + +RouteMatcher::RouteMatcher(const RouteConfig& config) { + for (const auto& route : config.routes()) { + routes_.emplace_back(std::make_shared(route)); + } + ENVOY_LOG(debug, "rocketmq route matcher: routes list size {}", routes_.size()); +} + +RouteConstSharedPtr RouteMatcher::route(const MessageMetadata& metadata) const { + const std::string& topic_name = metadata.topicName(); + for (const auto& route : routes_) { + RouteConstSharedPtr route_entry = route->matches(metadata); + if (nullptr != route_entry) { + ENVOY_LOG(debug, "rocketmq route matcher: find cluster success for topic: {}", topic_name); + return route_entry; + } + } + ENVOY_LOG(debug, "rocketmq route matcher: find cluster failed for topic: {}", topic_name); + return nullptr; +} + +} // namespace Router +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/router/route_matcher.h b/source/extensions/filters/network/rocketmq_proxy/router/route_matcher.h new file mode 100644 index 0000000000000..8cd4c533a5413 --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/router/route_matcher.h @@ -0,0 +1,71 @@ +#pragma once + +#include + +#include "envoy/config/typed_config.h" +#include "envoy/extensions/filters/network/rocketmq_proxy/v3/route.pb.h" +#include "envoy/server/filter_config.h" + +#include "common/common/logger.h" +#include "common/common/matchers.h" +#include "common/http/header_utility.h" + +#include "extensions/filters/network/rocketmq_proxy/router/router.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +class MessageMetadata; + +namespace Router { + +class RouteEntryImpl : public RouteEntry, + public Route, + public std::enable_shared_from_this, + public Logger::Loggable { +public: + RouteEntryImpl(const envoy::extensions::filters::network::rocketmq_proxy::v3::Route& route); + ~RouteEntryImpl() override = default; + + // Router::RouteEntry + const std::string& clusterName() const override; + const Envoy::Router::MetadataMatchCriteria* metadataMatchCriteria() const override { + return metadata_match_criteria_.get(); + } + + // Router::Route + const RouteEntry* routeEntry() const override; + + RouteConstSharedPtr matches(const MessageMetadata& metadata) const; + +private: + bool headersMatch(const Http::HeaderMap& headers) const; + + const Matchers::StringMatcherImpl topic_name_; + const std::string cluster_name_; + const std::vector config_headers_; + Envoy::Router::MetadataMatchCriteriaConstPtr metadata_match_criteria_; +}; + +using RouteEntryImplConstSharedPtr = std::shared_ptr; + +class RouteMatcher : public Logger::Loggable { +public: + using RouteConfig = envoy::extensions::filters::network::rocketmq_proxy::v3::RouteConfiguration; + RouteMatcher(const RouteConfig& config); + + RouteConstSharedPtr route(const MessageMetadata& metadata) const; + +private: + std::vector routes_; +}; + +using RouteMatcherPtr = std::unique_ptr; + +} // namespace Router +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/router/router.h b/source/extensions/filters/network/rocketmq_proxy/router/router.h new file mode 100644 index 0000000000000..6067a7295fc64 --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/router/router.h @@ -0,0 +1,85 @@ +#pragma once + +#include "envoy/tcp/conn_pool.h" + +#include "common/upstream/load_balancer_impl.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +class ActiveMessage; +class MessageMetadata; + +namespace Router { + +/** + * RouteEntry is an individual resolved route entry. + */ +class RouteEntry { +public: + virtual ~RouteEntry() = default; + + /** + * @return const std::string& the upstream cluster that owns the route. + */ + virtual const std::string& clusterName() const PURE; + + /** + * @return MetadataMatchCriteria* the metadata that a subset load balancer should match when + * selecting an upstream host + */ + virtual const Envoy::Router::MetadataMatchCriteria* metadataMatchCriteria() const PURE; +}; + +/** + * Route holds the RouteEntry for a request. + */ +class Route { +public: + virtual ~Route() = default; + + /** + * @return the route entry or nullptr if there is no matching route for the request. + */ + virtual const RouteEntry* routeEntry() const PURE; +}; + +using RouteConstSharedPtr = std::shared_ptr; +using RouteSharedPtr = std::shared_ptr; + +/** + * The router configuration. + */ +class Config { +public: + virtual ~Config() = default; + + virtual RouteConstSharedPtr route(const MessageMetadata& metadata) const PURE; +}; + +class Router : public Tcp::ConnectionPool::UpstreamCallbacks, + public Upstream::LoadBalancerContextBase { + +public: + virtual void sendRequestToUpstream(ActiveMessage& active_message) PURE; + + /** + * Release resources associated with this router. + */ + virtual void reset() PURE; + + /** + * Return host description that is eventually connected. + * @return upstream host if a connection has been established; nullptr otherwise. + */ + virtual Upstream::HostDescriptionConstSharedPtr upstreamHost() PURE; +}; + +using RouterPtr = std::unique_ptr; +} // namespace Router +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/router/router_impl.cc b/source/extensions/filters/network/rocketmq_proxy/router/router_impl.cc new file mode 100644 index 0000000000000..425eeec687c23 --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/router/router_impl.cc @@ -0,0 +1,218 @@ +#include "extensions/filters/network/rocketmq_proxy/router/router_impl.h" + +#include "common/common/enum_to_int.h" + +#include "extensions/filters/network/rocketmq_proxy/active_message.h" +#include "extensions/filters/network/rocketmq_proxy/codec.h" +#include "extensions/filters/network/rocketmq_proxy/conn_manager.h" +#include "extensions/filters/network/rocketmq_proxy/protocol.h" +#include "extensions/filters/network/rocketmq_proxy/well_known_names.h" +#include "extensions/filters/network/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { +namespace Router { + +RouterImpl::RouterImpl(Envoy::Upstream::ClusterManager& cluster_manager) + : cluster_manager_(cluster_manager), handle_(nullptr), active_message_(nullptr) {} + +RouterImpl::~RouterImpl() { + if (handle_) { + handle_->cancel(Tcp::ConnectionPool::CancelPolicy::Default); + } +} + +Upstream::HostDescriptionConstSharedPtr RouterImpl::upstreamHost() { return upstream_host_; } + +void RouterImpl::onAboveWriteBufferHighWatermark() { + ENVOY_LOG(trace, "Above write buffer high watermark"); +} + +void RouterImpl::onBelowWriteBufferLowWatermark() { + ENVOY_LOG(trace, "Below write buffer low watermark"); +} + +void RouterImpl::onEvent(Network::ConnectionEvent event) { + switch (event) { + case Network::ConnectionEvent::RemoteClose: { + ENVOY_LOG(error, "Connection to upstream: {} is closed by remote peer", + upstream_host_->address()->asString()); + // Send local reply to downstream + active_message_->onError("Connection to upstream is closed by remote peer"); + break; + } + case Network::ConnectionEvent::LocalClose: { + ENVOY_LOG(error, "Connection to upstream: {} has been closed", + upstream_host_->address()->asString()); + // Send local reply to downstream + active_message_->onError("Connection to upstream has been closed"); + break; + } + default: + // Ignore other events for now + ENVOY_LOG(trace, "Ignore event type"); + return; + } + active_message_->onReset(); +} + +const Envoy::Router::MetadataMatchCriteria* RouterImpl::metadataMatchCriteria() { + if (route_entry_) { + return route_entry_->metadataMatchCriteria(); + } + return nullptr; +} + +void RouterImpl::onUpstreamData(Buffer::Instance& data, bool end_stream) { + ENVOY_LOG(trace, "Received some data from upstream: {} bytes, end_stream: {}", data.length(), + end_stream); + if (active_message_->onUpstreamData(data, end_stream, connection_data_)) { + reset(); + } +} + +void RouterImpl::sendRequestToUpstream(ActiveMessage& active_message) { + active_message_ = &active_message; + int opaque = active_message_->downstreamRequest()->opaque(); + ASSERT(active_message_->metadata()->hasTopicName()); + std::string topic_name = active_message_->metadata()->topicName(); + + RouteConstSharedPtr route = active_message.route(); + if (!route) { + active_message.onError("No route for current request."); + ENVOY_LOG(warn, "Can not find route for topic {}", topic_name); + reset(); + return; + } + + route_entry_ = route->routeEntry(); + const std::string cluster_name = route_entry_->clusterName(); + Upstream::ThreadLocalCluster* cluster = cluster_manager_.get(cluster_name); + if (!cluster) { + active_message.onError("Cluster does not exist."); + ENVOY_LOG(warn, "Cluster for {} is not available", cluster_name); + reset(); + return; + } + + cluster_info_ = cluster->info(); + if (cluster_info_->maintenanceMode()) { + ENVOY_LOG(warn, "Cluster {} is under maintenance. Opaque: {}", cluster_name, opaque); + active_message.onError("Cluster under maintenance."); + active_message.connectionManager().stats().maintenance_failure_.inc(); + reset(); + return; + } + + Tcp::ConnectionPool::Instance* conn_pool = cluster_manager_.tcpConnPoolForCluster( + cluster_name, Upstream::ResourcePriority::Default, this); + if (!conn_pool) { + ENVOY_LOG(warn, "No host available for cluster {}. Opaque: {}", cluster_name, opaque); + active_message.onError("No host available"); + reset(); + return; + } + + upstream_request_ = std::make_unique(*this); + Tcp::ConnectionPool::Cancellable* cancellable = conn_pool->newConnection(*upstream_request_); + if (cancellable) { + handle_ = cancellable; + ENVOY_LOG(trace, "No connection is available for now. Create a cancellable handle. Opaque: {}", + opaque); + } else { + /* + * UpstreamRequest#onPoolReady or #onPoolFailure should have been invoked. + */ + ENVOY_LOG(trace, + "One connection is picked up from connection pool, callback should have been " + "executed. Opaque: {}", + opaque); + } +} + +RouterImpl::UpstreamRequest::UpstreamRequest(RouterImpl& router) : router_(router) {} + +void RouterImpl::UpstreamRequest::onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn, + Upstream::HostDescriptionConstSharedPtr host) { + router_.connection_data_ = std::move(conn); + router_.upstream_host_ = host; + router_.connection_data_->addUpstreamCallbacks(router_); + if (router_.handle_) { + ENVOY_LOG(trace, "#onPoolReady, reset cancellable handle to nullptr"); + router_.handle_ = nullptr; + } + ENVOY_LOG(debug, "Current chosen host address: {}", host->address()->asString()); + // TODO(lizhanhui): we may optimize out encoding in case we there is no protocol translation. + Buffer::OwnedImpl buffer; + Encoder::encode(router_.active_message_->downstreamRequest(), buffer); + router_.connection_data_->connection().write(buffer, false); + ENVOY_LOG(trace, "Write data to upstream OK. Opaque: {}", + router_.active_message_->downstreamRequest()->opaque()); + + if (router_.active_message_->metadata()->isOneWay()) { + ENVOY_LOG(trace, + "Reset ActiveMessage since data is written and the downstream request is one-way. " + "Opaque: {}", + router_.active_message_->downstreamRequest()->opaque()); + + // For one-way ack-message requests, we need erase previously stored ack-directive. + if (enumToSignedInt(RequestCode::AckMessage) == + router_.active_message_->downstreamRequest()->code()) { + auto ack_header = router_.active_message_->downstreamRequest() + ->typedCustomHeader(); + router_.active_message_->connectionManager().eraseAckDirective(ack_header->directiveKey()); + } + + router_.reset(); + } +} + +void RouterImpl::UpstreamRequest::onPoolFailure(Tcp::ConnectionPool::PoolFailureReason reason, + Upstream::HostDescriptionConstSharedPtr host) { + if (router_.handle_) { + ENVOY_LOG(trace, "#onPoolFailure, reset cancellable handle to nullptr"); + router_.handle_ = nullptr; + } + switch (reason) { + case Tcp::ConnectionPool::PoolFailureReason::Overflow: { + ENVOY_LOG(error, "Unable to acquire a connection to send request to upstream"); + router_.active_message_->onError("overflow"); + } break; + + case Tcp::ConnectionPool::PoolFailureReason::RemoteConnectionFailure: { + ENVOY_LOG(error, "Failed to make request to upstream due to remote connection error. Host {}", + host->address()->asString()); + router_.active_message_->onError("remote connection failure"); + } break; + + case Tcp::ConnectionPool::PoolFailureReason::LocalConnectionFailure: { + ENVOY_LOG(error, "Failed to make request to upstream due to local connection error. Host: {}", + host->address()->asString()); + router_.active_message_->onError("local connection failure"); + } break; + + case Tcp::ConnectionPool::PoolFailureReason::Timeout: { + ENVOY_LOG(error, "Failed to make request to upstream due to timeout. Host: {}", + host->address()->asString()); + router_.active_message_->onError("timeout"); + } break; + } + + // Release resources allocated to this request. + router_.reset(); +} + +void RouterImpl::reset() { + active_message_->onReset(); + if (connection_data_) { + connection_data_.reset(nullptr); + } +} + +} // namespace Router +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/router/router_impl.h b/source/extensions/filters/network/rocketmq_proxy/router/router_impl.h new file mode 100644 index 0000000000000..b3eca29e1e673 --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/router/router_impl.h @@ -0,0 +1,75 @@ +#pragma once + +#include "envoy/tcp/conn_pool.h" +#include "envoy/upstream/cluster_manager.h" +#include "envoy/upstream/thread_local_cluster.h" + +#include "common/common/logger.h" +#include "common/upstream/load_balancer_impl.h" + +#include "extensions/filters/network/rocketmq_proxy/router/router.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { +namespace Router { + +class RouterImpl : public Router, public Logger::Loggable { +public: + explicit RouterImpl(Upstream::ClusterManager& cluster_manager); + + ~RouterImpl() override; + + // Tcp::ConnectionPool::UpstreamCallbacks + void onUpstreamData(Buffer::Instance& data, bool end_stream) override; + void onAboveWriteBufferHighWatermark() override; + void onBelowWriteBufferLowWatermark() override; + void onEvent(Network::ConnectionEvent event) override; + + // Upstream::LoadBalancerContextBase + const Envoy::Router::MetadataMatchCriteria* metadataMatchCriteria() override; + + void sendRequestToUpstream(ActiveMessage& active_message) override; + + void reset() override; + + Upstream::HostDescriptionConstSharedPtr upstreamHost() override; + +private: + class UpstreamRequest : public Tcp::ConnectionPool::Callbacks { + public: + UpstreamRequest(RouterImpl& router); + + void onPoolFailure(Tcp::ConnectionPool::PoolFailureReason reason, + Upstream::HostDescriptionConstSharedPtr host) override; + + void onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn, + Upstream::HostDescriptionConstSharedPtr host) override; + + private: + RouterImpl& router_; + }; + using UpstreamRequestPtr = std::unique_ptr; + + Upstream::ClusterManager& cluster_manager_; + Tcp::ConnectionPool::ConnectionDataPtr connection_data_; + + /** + * On requesting connection from upstream connection pool, this handle may be assigned when no + * connection is readily available at the moment. We may cancel the request through this handle. + * + * If there are connections which can be returned immediately, this handle is assigned as nullptr. + */ + Tcp::ConnectionPool::Cancellable* handle_; + Upstream::HostDescriptionConstSharedPtr upstream_host_; + ActiveMessage* active_message_; + Upstream::ClusterInfoConstSharedPtr cluster_info_; + UpstreamRequestPtr upstream_request_; + const RouteEntry* route_entry_{}; +}; +} // namespace Router +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/stats.h b/source/extensions/filters/network/rocketmq_proxy/stats.h new file mode 100644 index 0000000000000..13f3122b6effc --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/stats.h @@ -0,0 +1,62 @@ +#pragma once + +#include + +#include "envoy/stats/scope.h" +#include "envoy/stats/stats_macros.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +/** + * All rocketmq filter stats. @see stats_macros.h + */ +#define ALL_ROCKETMQ_FILTER_STATS(COUNTER, GAUGE, HISTOGRAM) \ + COUNTER(request) \ + COUNTER(request_decoding_error) \ + COUNTER(request_decoding_success) \ + COUNTER(response) \ + COUNTER(response_decoding_error) \ + COUNTER(response_decoding_success) \ + COUNTER(response_error) \ + COUNTER(response_success) \ + COUNTER(heartbeat) \ + COUNTER(unregister) \ + COUNTER(get_topic_route) \ + COUNTER(send_message_v1) \ + COUNTER(send_message_v2) \ + COUNTER(pop_message) \ + COUNTER(ack_message) \ + COUNTER(get_consumer_list) \ + COUNTER(maintenance_failure) \ + GAUGE(request_active, Accumulate) \ + GAUGE(send_message_v1_active, Accumulate) \ + GAUGE(send_message_v2_active, Accumulate) \ + GAUGE(pop_message_active, Accumulate) \ + GAUGE(get_topic_route_active, Accumulate) \ + GAUGE(send_message_pending, Accumulate) \ + GAUGE(pop_message_pending, Accumulate) \ + GAUGE(get_topic_route_pending, Accumulate) \ + GAUGE(total_pending, Accumulate) \ + HISTOGRAM(request_time_ms, Milliseconds) + +/** + * Struct definition for all rocketmq proxy stats. @see stats_macros.h + */ +struct RocketmqFilterStats { + ALL_ROCKETMQ_FILTER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, + GENERATE_HISTOGRAM_STRUCT) + + static RocketmqFilterStats generateStats(const std::string& prefix, Stats::Scope& scope) { + return RocketmqFilterStats{ALL_ROCKETMQ_FILTER_STATS(POOL_COUNTER_PREFIX(scope, prefix), + POOL_GAUGE_PREFIX(scope, prefix), + POOL_HISTOGRAM_PREFIX(scope, prefix))}; + } +}; + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/rocketmq_proxy/topic_route.cc b/source/extensions/filters/network/rocketmq_proxy/topic_route.cc new file mode 100644 index 0000000000000..8c445ab1c6f0e --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/topic_route.cc @@ -0,0 +1,76 @@ +#include "extensions/filters/network/rocketmq_proxy/topic_route.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +void QueueData::encode(ProtobufWkt::Struct& data_struct) { + auto* fields = data_struct.mutable_fields(); + + ProtobufWkt::Value broker_name_v; + broker_name_v.set_string_value(broker_name_); + (*fields)["brokerName"] = broker_name_v; + + ProtobufWkt::Value read_queue_num_v; + read_queue_num_v.set_number_value(read_queue_nums_); + (*fields)["readQueueNums"] = read_queue_num_v; + + ProtobufWkt::Value write_queue_num_v; + write_queue_num_v.set_number_value(write_queue_nums_); + (*fields)["writeQueueNums"] = write_queue_num_v; + + ProtobufWkt::Value perm_v; + perm_v.set_number_value(perm_); + (*fields)["perm"] = perm_v; +} + +void BrokerData::encode(ProtobufWkt::Struct& data_struct) { + auto& members = *(data_struct.mutable_fields()); + + ProtobufWkt::Value cluster_v; + cluster_v.set_string_value(cluster_); + members["cluster"] = cluster_v; + + ProtobufWkt::Value broker_name_v; + broker_name_v.set_string_value(broker_name_); + members["brokerName"] = broker_name_v; + + if (!broker_addrs_.empty()) { + ProtobufWkt::Value brokerAddrsNode; + auto& brokerAddrsMembers = *(brokerAddrsNode.mutable_struct_value()->mutable_fields()); + for (auto& entry : broker_addrs_) { + ProtobufWkt::Value address_v; + address_v.set_string_value(entry.second); + brokerAddrsMembers[std::to_string(entry.first)] = address_v; + } + members["brokerAddrs"] = brokerAddrsNode; + } +} + +void TopicRouteData::encode(ProtobufWkt::Struct& data_struct) { + auto* fields = data_struct.mutable_fields(); + + if (!queue_data_.empty()) { + ProtobufWkt::ListValue queue_data_list_v; + for (auto& queueData : queue_data_) { + queueData.encode(data_struct); + queue_data_list_v.add_values()->mutable_struct_value()->CopyFrom(data_struct); + } + (*fields)["queueDatas"].mutable_list_value()->CopyFrom(queue_data_list_v); + } + + if (!broker_data_.empty()) { + ProtobufWkt::ListValue broker_data_list_v; + for (auto& brokerData : broker_data_) { + brokerData.encode(data_struct); + broker_data_list_v.add_values()->mutable_struct_value()->CopyFrom(data_struct); + } + (*fields)["brokerDatas"].mutable_list_value()->CopyFrom(broker_data_list_v); + } +} + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/topic_route.h b/source/extensions/filters/network/rocketmq_proxy/topic_route.h new file mode 100644 index 0000000000000..f6c1bc9eba193 --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/topic_route.h @@ -0,0 +1,79 @@ +#pragma once + +#include +#include + +#include "common/protobuf/utility.h" + +#include "absl/container/node_hash_map.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { +class QueueData { +public: + QueueData(const std::string& broker_name, int32_t read_queue_num, int32_t write_queue_num, + int32_t perm) + : broker_name_(broker_name), read_queue_nums_(read_queue_num), + write_queue_nums_(write_queue_num), perm_(perm) {} + + void encode(ProtobufWkt::Struct& data_struct); + + const std::string& brokerName() const { return broker_name_; } + + int32_t readQueueNum() const { return read_queue_nums_; } + + int32_t writeQueueNum() const { return write_queue_nums_; } + + int32_t perm() const { return perm_; } + +private: + std::string broker_name_; + int32_t read_queue_nums_; + int32_t write_queue_nums_; + int32_t perm_; +}; + +class BrokerData { +public: + BrokerData(const std::string& cluster, const std::string& broker_name, + absl::node_hash_map&& broker_addrs) + : cluster_(cluster), broker_name_(broker_name), broker_addrs_(broker_addrs) {} + + void encode(ProtobufWkt::Struct& data_struct); + + const std::string& cluster() const { return cluster_; } + + const std::string& brokerName() const { return broker_name_; } + + absl::node_hash_map& brokerAddresses() { return broker_addrs_; } + +private: + std::string cluster_; + std::string broker_name_; + absl::node_hash_map broker_addrs_; +}; + +class TopicRouteData { +public: + void encode(ProtobufWkt::Struct& data_struct); + + TopicRouteData() = default; + + TopicRouteData(std::vector&& queue_data, std::vector&& broker_data) + : queue_data_(queue_data), broker_data_(broker_data) {} + + std::vector& queueData() { return queue_data_; } + + std::vector& brokerData() { return broker_data_; } + +private: + std::vector queue_data_; + std::vector broker_data_; +}; + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/well_known_names.h b/source/extensions/filters/network/rocketmq_proxy/well_known_names.h new file mode 100644 index 0000000000000..659b387db28bd --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/well_known_names.h @@ -0,0 +1,29 @@ +#pragma once + +#include + +#include "common/singleton/const_singleton.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +struct RocketmqValues { + /** + * All the values below are the properties of single broker in filter_metadata. + */ + const std::string ReadQueueNum = "read_queue_num"; + const std::string WriteQueueNum = "write_queue_num"; + const std::string ClusterName = "cluster_name"; + const std::string BrokerName = "broker_name"; + const std::string BrokerId = "broker_id"; + const std::string Perm = "perm"; +}; + +using RocketmqConstants = ConstSingleton; + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/sni_cluster/BUILD b/source/extensions/filters/network/sni_cluster/BUILD index 06dbdf39d2909..e6670b8e42601 100644 --- a/source/extensions/filters/network/sni_cluster/BUILD +++ b/source/extensions/filters/network/sni_cluster/BUILD @@ -1,13 +1,13 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "sni_cluster", diff --git a/source/extensions/filters/network/sni_dynamic_forward_proxy/BUILD b/source/extensions/filters/network/sni_dynamic_forward_proxy/BUILD index b4a08a55260c8..372fce9155e2b 100644 --- a/source/extensions/filters/network/sni_dynamic_forward_proxy/BUILD +++ b/source/extensions/filters/network/sni_dynamic_forward_proxy/BUILD @@ -1,13 +1,13 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "proxy_filter_lib", diff --git a/source/extensions/filters/network/sni_dynamic_forward_proxy/config.cc b/source/extensions/filters/network/sni_dynamic_forward_proxy/config.cc index a5f9fa9e18191..aaedee18a560e 100644 --- a/source/extensions/filters/network/sni_dynamic_forward_proxy/config.cc +++ b/source/extensions/filters/network/sni_dynamic_forward_proxy/config.cc @@ -20,7 +20,7 @@ SniDynamicForwardProxyNetworkFilterConfigFactory::createFilterFactoryFromProtoTy Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactoryImpl cache_manager_factory( context.singletonManager(), context.dispatcher(), context.threadLocal(), context.random(), - context.scope()); + context.runtime(), context.scope()); ProxyFilterConfigSharedPtr filter_config(std::make_shared( proto_config, cache_manager_factory, context.clusterManager())); diff --git a/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.cc b/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.cc index d9eeceb120986..9b3584de72c48 100644 --- a/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.cc +++ b/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.cc @@ -15,11 +15,10 @@ namespace SniDynamicForwardProxy { ProxyFilterConfig::ProxyFilterConfig( const FilterConfig& proto_config, Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactory& cache_manager_factory, - Upstream::ClusterManager& cluster_manager) + Upstream::ClusterManager&) : port_(static_cast(proto_config.port_value())), dns_cache_manager_(cache_manager_factory.get()), - dns_cache_(dns_cache_manager_->getCache(proto_config.dns_cache_config())), - cluster_manager_(cluster_manager) {} + dns_cache_(dns_cache_manager_->getCache(proto_config.dns_cache_config())) {} ProxyFilter::ProxyFilter(ProxyFilterConfigSharedPtr config) : config_(std::move(config)) {} @@ -34,14 +33,23 @@ Network::FilterStatus ProxyFilter::onNewConnection() { return Network::FilterStatus::Continue; } - // TODO(lizan): implement circuit breaker in SNI dynamic forward proxy like it is in HTTP: - // https://github.com/envoyproxy/envoy/blob/master/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc#L65 + circuit_breaker_ = config_->cache().canCreateDnsRequest(absl::nullopt); + + if (circuit_breaker_ == nullptr) { + ENVOY_CONN_LOG(debug, "pending request overflow", read_callbacks_->connection()); + read_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush); + return Network::FilterStatus::StopIteration; + } uint32_t default_port = config_->port(); auto result = config_->cache().loadDnsCacheEntry(sni, default_port, *this); cache_load_handle_ = std::move(result.handle_); + if (cache_load_handle_ == nullptr) { + circuit_breaker_.reset(); + } + switch (result.status_) { case LoadDnsCacheEntryStatus::InCache: { ASSERT(cache_load_handle_ == nullptr); @@ -67,6 +75,8 @@ Network::FilterStatus ProxyFilter::onNewConnection() { void ProxyFilter::onLoadDnsCacheComplete() { ENVOY_CONN_LOG(debug, "load DNS cache complete, continuing", read_callbacks_->connection()); + ASSERT(circuit_breaker_ != nullptr); + circuit_breaker_.reset(); read_callbacks_->continueReading(); } diff --git a/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.h b/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.h index 49f66aba3fb61..65cd7235b71e7 100644 --- a/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.h +++ b/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.h @@ -24,14 +24,12 @@ class ProxyFilterConfig { Upstream::ClusterManager& cluster_manager); Extensions::Common::DynamicForwardProxy::DnsCache& cache() { return *dns_cache_; } - Upstream::ClusterManager& clusterManager() { return cluster_manager_; } uint32_t port() { return port_; } private: const uint32_t port_; const Extensions::Common::DynamicForwardProxy::DnsCacheManagerSharedPtr dns_cache_manager_; const Extensions::Common::DynamicForwardProxy::DnsCacheSharedPtr dns_cache_; - Upstream::ClusterManager& cluster_manager_; }; using ProxyFilterConfigSharedPtr = std::shared_ptr; @@ -42,6 +40,7 @@ class ProxyFilter Logger::Loggable { public: ProxyFilter(ProxyFilterConfigSharedPtr config); + // Network::ReadFilter Network::FilterStatus onData(Buffer::Instance&, bool) override { return Network::FilterStatus::Continue; @@ -56,6 +55,7 @@ class ProxyFilter private: const ProxyFilterConfigSharedPtr config_; + Upstream::ResourceAutoIncDecPtr circuit_breaker_; Extensions::Common::DynamicForwardProxy::DnsCache::LoadDnsCacheEntryHandlePtr cache_load_handle_; Network::ReadFilterCallbacks* read_callbacks_{}; }; diff --git a/source/extensions/filters/network/tcp_proxy/BUILD b/source/extensions/filters/network/tcp_proxy/BUILD index 9b6db30952a0a..d6d7495e9122e 100644 --- a/source/extensions/filters/network/tcp_proxy/BUILD +++ b/source/extensions/filters/network/tcp_proxy/BUILD @@ -1,21 +1,23 @@ -licenses(["notice"]) # Apache 2 - -# TCP proxy L4 network filter. -# Public docs: docs/root/configuration/network_filters/tcp_proxy_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# TCP proxy L4 network filter. +# Public docs: docs/root/configuration/network_filters/tcp_proxy_filter.rst + +envoy_extension_package() envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream", + # This is core Envoy config. + visibility = ["//visibility:public"], deps = [ "//include/envoy/registry", "//source/common/tcp_proxy", diff --git a/source/extensions/filters/network/thrift_proxy/BUILD b/source/extensions/filters/network/thrift_proxy/BUILD index cad78eb1d783c..78f484da3f9e8 100644 --- a/source/extensions/filters/network/thrift_proxy/BUILD +++ b/source/extensions/filters/network/thrift_proxy/BUILD @@ -1,13 +1,13 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "app_exception_lib", diff --git a/source/extensions/filters/network/thrift_proxy/config.h b/source/extensions/filters/network/thrift_proxy/config.h index 62a123936bac4..532298c380e3b 100644 --- a/source/extensions/filters/network/thrift_proxy/config.h +++ b/source/extensions/filters/network/thrift_proxy/config.h @@ -52,7 +52,8 @@ class ThriftProxyFilterConfigFactory Upstream::ProtocolOptionsConfigConstSharedPtr createProtocolOptionsTyped( const envoy::extensions::filters::network::thrift_proxy::v3::ThriftProtocolOptions& - proto_config) override { + proto_config, + Server::Configuration::ProtocolOptionsFactoryContext&) override { return std::make_shared(proto_config); } }; diff --git a/source/extensions/filters/network/thrift_proxy/conn_manager.cc b/source/extensions/filters/network/thrift_proxy/conn_manager.cc index 8f697c0a606df..737e707369785 100644 --- a/source/extensions/filters/network/thrift_proxy/conn_manager.cc +++ b/source/extensions/filters/network/thrift_proxy/conn_manager.cc @@ -12,7 +12,7 @@ namespace Extensions { namespace NetworkFilters { namespace ThriftProxy { -ConnectionManager::ConnectionManager(Config& config, Runtime::RandomGenerator& random_generator, +ConnectionManager::ConnectionManager(Config& config, Random::RandomGenerator& random_generator, TimeSource& time_source) : config_(config), stats_(config_.stats()), transport_(config.createTransport()), protocol_(config.createProtocol()), @@ -171,7 +171,7 @@ DecoderEventHandler& ConnectionManager::newDecoderEventHandler() { ActiveRpcPtr new_rpc(new ActiveRpc(*this)); new_rpc->createFilterChain(); - new_rpc->moveIntoList(std::move(new_rpc), rpcs_); + LinkedList::moveIntoList(std::move(new_rpc), rpcs_); return **rpcs_.begin(); } @@ -201,13 +201,30 @@ FilterStatus ConnectionManager::ResponseDecoder::fieldBegin(absl::string_view na // Reply messages contain a struct where field 0 is the call result and fields 1+ are // exceptions, if defined. At most one field may be set. Therefore, the very first field we // encounter in a reply is either field 0 (success) or not (IDL exception returned). - success_ = field_id == 0 && field_type != FieldType::Stop; + // If first fieldType is FieldType::Stop then it is a void success and handled in messageEnd() + // because decoder state machine does not call decoder event callback fieldBegin on + // FieldType::Stop. + success_ = (field_id == 0); first_reply_field_ = false; } return ProtocolConverter::fieldBegin(name, field_type, field_id); } +FilterStatus ConnectionManager::ResponseDecoder::messageEnd() { + if (first_reply_field_) { + // When the response is thrift void type there is never a fieldBegin call on a success + // because the response struct has no fields and so the first field type is FieldType::Stop. + // The decoder state machine handles FieldType::Stop by going immediately to structEnd, + // skipping fieldBegin callback. Therefore if we are still waiting for the first reply field + // at end of message then it is a void success. + success_ = true; + first_reply_field_ = false; + } + + return ProtocolConverter::messageEnd(); +} + FilterStatus ConnectionManager::ResponseDecoder::transportEnd() { ASSERT(metadata_ != nullptr); diff --git a/source/extensions/filters/network/thrift_proxy/conn_manager.h b/source/extensions/filters/network/thrift_proxy/conn_manager.h index 7bbf35710ced4..b7408e1a3def6 100644 --- a/source/extensions/filters/network/thrift_proxy/conn_manager.h +++ b/source/extensions/filters/network/thrift_proxy/conn_manager.h @@ -1,10 +1,10 @@ #pragma once #include "envoy/common/pure.h" +#include "envoy/common/random_generator.h" #include "envoy/event/deferred_deletable.h" #include "envoy/network/connection.h" #include "envoy/network/filter.h" -#include "envoy/runtime/runtime.h" #include "envoy/stats/timespan.h" #include "common/buffer/buffer_impl.h" @@ -60,7 +60,7 @@ class ConnectionManager : public Network::ReadFilter, public DecoderCallbacks, Logger::Loggable { public: - ConnectionManager(Config& config, Runtime::RandomGenerator& random_generator, + ConnectionManager(Config& config, Random::RandomGenerator& random_generator, TimeSource& time_system); ~ConnectionManager() override; @@ -91,6 +91,7 @@ class ConnectionManager : public Network::ReadFilter, // ProtocolConverter FilterStatus messageBegin(MessageMetadataSharedPtr metadata) override; + FilterStatus messageEnd() override; FilterStatus fieldBegin(absl::string_view name, FieldType& field_type, int16_t& field_id) override; FilterStatus transportBegin(MessageMetadataSharedPtr metadata) override { @@ -221,7 +222,7 @@ class ConnectionManager : public Network::ReadFilter, void addDecoderFilter(ThriftFilters::DecoderFilterSharedPtr filter) override { ActiveRpcDecoderFilterPtr wrapper = std::make_unique(*this, filter); filter->setDecoderFilterCallbacks(*wrapper); - wrapper->moveIntoListBack(std::move(wrapper), decoder_filters_); + LinkedList::moveIntoListBack(std::move(wrapper), decoder_filters_); } FilterStatus applyDecoderFilters(ActiveRpcDecoderFilter* filter); @@ -267,7 +268,7 @@ class ConnectionManager : public Network::ReadFilter, DecoderPtr decoder_; std::list rpcs_; Buffer::OwnedImpl request_buffer_; - Runtime::RandomGenerator& random_generator_; + Random::RandomGenerator& random_generator_; bool stopped_{false}; bool half_closed_{false}; TimeSource& time_source_; diff --git a/source/extensions/filters/network/thrift_proxy/decoder.cc b/source/extensions/filters/network/thrift_proxy/decoder.cc index 487e026232f63..73a12ff23377b 100644 --- a/source/extensions/filters/network/thrift_proxy/decoder.cc +++ b/source/extensions/filters/network/thrift_proxy/decoder.cc @@ -1,7 +1,5 @@ #include "extensions/filters/network/thrift_proxy/decoder.h" -#include - #include "envoy/common/exception.h" #include "common/common/assert.h" @@ -111,13 +109,16 @@ DecoderStateMachine::DecoderStatus DecoderStateMachine::listBegin(Buffer::Instan // ListValue -> ListEnd DecoderStateMachine::DecoderStatus DecoderStateMachine::listValue(Buffer::Instance& buffer) { ASSERT(!stack_.empty()); - Frame& frame = stack_.back(); - if (frame.remaining_ == 0) { + const uint32_t index = stack_.size() - 1; + if (stack_[index].remaining_ == 0) { return {popReturnState(), FilterStatus::Continue}; } - frame.remaining_--; + DecoderStatus status = handleValue(buffer, stack_[index].elem_type_, ProtocolState::ListValue); + if (status.next_state_ != ProtocolState::WaitForData) { + stack_[index].remaining_--; + } - return handleValue(buffer, frame.elem_type_, ProtocolState::ListValue); + return status; } // ListEnd -> stack's return state @@ -159,11 +160,14 @@ DecoderStateMachine::DecoderStatus DecoderStateMachine::mapKey(Buffer::Instance& // MapValue -> MapKey DecoderStateMachine::DecoderStatus DecoderStateMachine::mapValue(Buffer::Instance& buffer) { ASSERT(!stack_.empty()); - Frame& frame = stack_.back(); - ASSERT(frame.remaining_ != 0); - frame.remaining_--; + const uint32_t index = stack_.size() - 1; + ASSERT(stack_[index].remaining_ != 0); + DecoderStatus status = handleValue(buffer, stack_[index].value_type_, ProtocolState::MapKey); + if (status.next_state_ != ProtocolState::WaitForData) { + stack_[index].remaining_--; + } - return handleValue(buffer, frame.value_type_, ProtocolState::MapKey); + return status; } // MapEnd -> stack's return state @@ -193,13 +197,16 @@ DecoderStateMachine::DecoderStatus DecoderStateMachine::setBegin(Buffer::Instanc // SetValue -> SetEnd DecoderStateMachine::DecoderStatus DecoderStateMachine::setValue(Buffer::Instance& buffer) { ASSERT(!stack_.empty()); - Frame& frame = stack_.back(); - if (frame.remaining_ == 0) { + const uint32_t index = stack_.size() - 1; + if (stack_[index].remaining_ == 0) { return {popReturnState(), FilterStatus::Continue}; } - frame.remaining_--; + DecoderStatus status = handleValue(buffer, stack_[index].elem_type_, ProtocolState::SetValue); + if (status.next_state_ != ProtocolState::WaitForData) { + stack_[index].remaining_--; + } - return handleValue(buffer, frame.elem_type_, ProtocolState::SetValue); + return status; } // SetEnd -> stack's return state diff --git a/source/extensions/filters/network/thrift_proxy/filters/BUILD b/source/extensions/filters/network/thrift_proxy/filters/BUILD index ba1054d990cf0..a1b91d2868090 100644 --- a/source/extensions/filters/network/thrift_proxy/filters/BUILD +++ b/source/extensions/filters/network/thrift_proxy/filters/BUILD @@ -1,12 +1,12 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "filter_config_interface", diff --git a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD index 953db0fdffc72..7252afc340a74 100644 --- a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD +++ b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD @@ -1,13 +1,13 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "ratelimit_lib", diff --git a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/config.cc b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/config.cc index 43e2fd28f9916..9813ec583c97d 100644 --- a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/config.cc +++ b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/config.cc @@ -34,7 +34,8 @@ RateLimitFilterConfig::createFilterFactoryFromProtoTyped( config](ThriftProxy::ThriftFilters::FilterChainFactoryCallbacks& callbacks) -> void { callbacks.addDecoderFilter(std::make_shared( config, Filters::Common::RateLimit::rateLimitClient( - context, proto_config.rate_limit_service().grpc_service(), timeout))); + context, proto_config.rate_limit_service().grpc_service(), timeout, + proto_config.rate_limit_service().transport_api_version()))); }; } diff --git a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.cc b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.cc index 2c85cb099818b..e26a565f5856a 100644 --- a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.cc +++ b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.cc @@ -58,11 +58,13 @@ void Filter::onDestroy() { } void Filter::complete(Filters::Common::RateLimit::LimitStatus status, + Filters::Common::RateLimit::DescriptorStatusListPtr&& descriptor_statuses, Http::ResponseHeaderMapPtr&& response_headers_to_add, Http::RequestHeaderMapPtr&& request_headers_to_add) { // TODO(zuercher): Store headers to append to a response. Adding them to a local reply (over // limit or error) is a matter of modifying the callbacks to allow it. Adding them to an upstream // response requires either response (aka encoder) filters or some other mechanism. + UNREFERENCED_PARAMETER(descriptor_statuses); UNREFERENCED_PARAMETER(response_headers_to_add); UNREFERENCED_PARAMETER(request_headers_to_add); diff --git a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.h b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.h index 5226c0356a6db..caa5333cda651 100644 --- a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.h +++ b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.h @@ -38,7 +38,6 @@ class Config { const std::string& domain() const { return domain_; } const LocalInfo::LocalInfo& localInfo() const { return local_info_; } uint32_t stage() const { return stage_; } - Stats::Scope& scope() { return scope_; } Runtime::Loader& runtime() { return runtime_; } Upstream::ClusterManager& cm() { return cm_; } bool failureModeAllow() const { return !failure_mode_deny_; }; @@ -78,6 +77,7 @@ class Filter : public ThriftProxy::ThriftFilters::PassThroughDecoderFilter, // RateLimit::RequestCallbacks void complete(Filters::Common::RateLimit::LimitStatus status, + Filters::Common::RateLimit::DescriptorStatusListPtr&& descriptor_statuses, Http::ResponseHeaderMapPtr&& response_headers_to_add, Http::RequestHeaderMapPtr&& request_headers_to_add) override; diff --git a/source/extensions/filters/network/thrift_proxy/header_transport_impl.cc b/source/extensions/filters/network/thrift_proxy/header_transport_impl.cc index 26885a842de47..0a548452c8200 100644 --- a/source/extensions/filters/network/thrift_proxy/header_transport_impl.cc +++ b/source/extensions/filters/network/thrift_proxy/header_transport_impl.cc @@ -8,6 +8,8 @@ #include "extensions/filters/network/thrift_proxy/buffer_helper.h" +#include "absl/strings/str_replace.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { @@ -143,8 +145,11 @@ bool HeaderTransportImpl::decodeFrameStart(Buffer::Instance& buffer, MessageMeta } while (num_headers-- > 0) { - const Http::LowerCaseString key = - Http::LowerCaseString(drainVarString(buffer, header_size, "header key")); + std::string key_string = drainVarString(buffer, header_size, "header key"); + // LowerCaseString doesn't allow '\0', '\n', and '\r'. + key_string = + absl::StrReplaceAll(key_string, {{std::string(1, '\0'), ""}, {"\n", ""}, {"\r", ""}}); + const Http::LowerCaseString key = Http::LowerCaseString(key_string); const std::string value = drainVarString(buffer, header_size, "header value"); metadata.headers().addCopy(key, value); } @@ -205,14 +210,11 @@ void HeaderTransportImpl::encodeFrame(Buffer::Instance& buffer, const MessageMet // Num headers BufferHelper::writeVarIntI32(header_buffer, static_cast(headers.size())); - headers.iterate( - [](const Http::HeaderEntry& header, void* context) -> Http::HeaderMap::Iterate { - Buffer::Instance* hb = static_cast(context); - writeVarString(*hb, header.key().getStringView()); - writeVarString(*hb, header.value().getStringView()); - return Http::HeaderMap::Iterate::Continue; - }, - &header_buffer); + headers.iterate([&header_buffer](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + writeVarString(header_buffer, header.key().getStringView()); + writeVarString(header_buffer, header.value().getStringView()); + return Http::HeaderMap::Iterate::Continue; + }); } uint64_t header_size = header_buffer.length(); diff --git a/source/extensions/filters/network/thrift_proxy/header_transport_impl.h b/source/extensions/filters/network/thrift_proxy/header_transport_impl.h index c2d97ead1607c..e7eb6d3e18ac9 100644 --- a/source/extensions/filters/network/thrift_proxy/header_transport_impl.h +++ b/source/extensions/filters/network/thrift_proxy/header_transport_impl.h @@ -45,15 +45,6 @@ class HeaderTransportImpl : public Transport { const char* desc); static void writeVarString(Buffer::Instance& buffer, const absl::string_view str); - void setException(AppExceptionType type, std::string reason) { - if (exception_.has_value()) { - return; - } - - exception_ = type; - exception_reason_ = reason; - } - absl::optional exception_; std::string exception_reason_; }; diff --git a/source/extensions/filters/network/thrift_proxy/metadata.h b/source/extensions/filters/network/thrift_proxy/metadata.h index 7ee3e68f297fd..f99be53d812b5 100644 --- a/source/extensions/filters/network/thrift_proxy/metadata.h +++ b/source/extensions/filters/network/thrift_proxy/metadata.h @@ -54,8 +54,8 @@ class MessageMetadata { /** * @return HeaderMap of current headers (never throws) */ - const Http::HeaderMap& headers() const { return headers_; } - Http::HeaderMap& headers() { return headers_; } + const Http::HeaderMap& headers() const { return *headers_; } + Http::HeaderMap& headers() { return *headers_; } /** * @return SpanList an immutable list of Spans @@ -65,7 +65,7 @@ class MessageMetadata { /** * @return SpanList& a reference to a mutable list of Spans */ - SpanList& mutable_spans() { return spans_; } + SpanList& mutableSpans() { return spans_; } bool hasAppException() const { return app_ex_type_.has_value(); } void setAppException(AppExceptionType app_ex_type, const std::string& message) { @@ -104,7 +104,7 @@ class MessageMetadata { absl::optional method_name_{}; absl::optional seq_id_{}; absl::optional msg_type_{}; - Http::HeaderMapImpl headers_; + Http::HeaderMapPtr headers_{Http::RequestHeaderMapImpl::create()}; absl::optional app_ex_type_; absl::optional app_ex_msg_; bool protocol_upgrade_message_{false}; diff --git a/source/extensions/filters/network/thrift_proxy/protocol_converter.h b/source/extensions/filters/network/thrift_proxy/protocol_converter.h index 47ab48ac8204f..2d73f4c9498bd 100644 --- a/source/extensions/filters/network/thrift_proxy/protocol_converter.h +++ b/source/extensions/filters/network/thrift_proxy/protocol_converter.h @@ -122,9 +122,6 @@ class ProtocolConverter : public virtual DecoderEventHandler { return FilterStatus::Continue; } -protected: - ProtocolType protocolType() const { return proto_->type(); } - private: Protocol* proto_; Buffer::Instance* buffer_{}; diff --git a/source/extensions/filters/network/thrift_proxy/router/BUILD b/source/extensions/filters/network/thrift_proxy/router/BUILD index f268e6a85cbd8..00e32bbf06a25 100644 --- a/source/extensions/filters/network/thrift_proxy/router/BUILD +++ b/source/extensions/filters/network/thrift_proxy/router/BUILD @@ -1,13 +1,13 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_extension( name = "config", diff --git a/source/extensions/filters/network/thrift_proxy/router/router_impl.h b/source/extensions/filters/network/thrift_proxy/router/router_impl.h index c41793a8066cc..26a94c90c7534 100644 --- a/source/extensions/filters/network/thrift_proxy/router/router_impl.h +++ b/source/extensions/filters/network/thrift_proxy/router/router_impl.h @@ -127,8 +127,6 @@ class MethodNameRouteEntryImpl : public RouteEntryImplBase { MethodNameRouteEntryImpl( const envoy::extensions::filters::network::thrift_proxy::v3::Route& route); - const std::string& methodName() const { return method_name_; } - // RouteEntryImplBase RouteConstSharedPtr matches(const MessageMetadata& metadata, uint64_t random_value) const override; @@ -143,8 +141,6 @@ class ServiceNameRouteEntryImpl : public RouteEntryImplBase { ServiceNameRouteEntryImpl( const envoy::extensions::filters::network::thrift_proxy::v3::Route& route); - const std::string& serviceName() const { return service_name_; } - // RouteEntryImplBase RouteConstSharedPtr matches(const MessageMetadata& metadata, uint64_t random_value) const override; diff --git a/source/extensions/filters/network/thrift_proxy/twitter_protocol_impl.cc b/source/extensions/filters/network/thrift_proxy/twitter_protocol_impl.cc index 37a2961f891e2..7f1ca57592cdb 100644 --- a/source/extensions/filters/network/thrift_proxy/twitter_protocol_impl.cc +++ b/source/extensions/filters/network/thrift_proxy/twitter_protocol_impl.cc @@ -8,6 +8,8 @@ #include "extensions/filters/network/thrift_proxy/thrift_object_impl.h" #include "extensions/filters/network/thrift_proxy/unframed_transport_impl.h" +#include "absl/strings/str_replace.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { @@ -384,28 +386,24 @@ class RequestHeader { sampled_ = metadata.sampled().value(); } - metadata.headers().iterate( - [](const Http::HeaderEntry& header, void* cb) -> Http::HeaderMap::Iterate { - absl::string_view key = header.key().getStringView(); - if (key.empty()) { - return Http::HeaderMap::Iterate::Continue; - } - - RequestHeader& rh = *static_cast(cb); - if (key == Headers::get().ClientId.get()) { - rh.client_id_ = ClientId(std::string(header.value().getStringView())); - } else if (key == Headers::get().Dest.get()) { - rh.dest_ = std::string(header.value().getStringView()); - } else if (key.find(":d:") == 0 && key.size() > 3) { - rh.delegations_.emplace_back(std::string(key.substr(3)), - std::string(header.value().getStringView())); - } else if (key[0] != ':') { - rh.contexts_.emplace_back(std::string(key), - std::string(header.value().getStringView())); - } - return Http::HeaderMap::Iterate::Continue; - }, - this); + metadata.headers().iterate([this](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + absl::string_view key = header.key().getStringView(); + if (key.empty()) { + return Http::HeaderMap::Iterate::Continue; + } + + if (key == Headers::get().ClientId.get()) { + client_id_ = ClientId(std::string(header.value().getStringView())); + } else if (key == Headers::get().Dest.get()) { + dest_ = std::string(header.value().getStringView()); + } else if (key.find(":d:") == 0 && key.size() > 3) { + delegations_.emplace_back(std::string(key.substr(3)), + std::string(header.value().getStringView())); + } else if (key[0] != ':') { + contexts_.emplace_back(std::string(key), std::string(header.value().getStringView())); + } + return Http::HeaderMap::Iterate::Continue; + }); } void write(Buffer::Instance& buffer) { @@ -575,16 +573,13 @@ class ResponseHeader { } } ResponseHeader(const MessageMetadata& metadata) : spans_(metadata.spans()) { - metadata.headers().iterate( - [](const Http::HeaderEntry& header, void* cb) -> Http::HeaderMap::Iterate { - absl::string_view key = header.key().getStringView(); - if (!key.empty() && key[0] != ':') { - static_cast*>(cb)->emplace_back( - std::string(key), std::string(header.value().getStringView())); - } - return Http::HeaderMap::Iterate::Continue; - }, - &contexts_); + metadata.headers().iterate([this](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + absl::string_view key = header.key().getStringView(); + if (!key.empty() && key[0] != ':') { + contexts_.emplace_back(std::string(key), std::string(header.value().getStringView())); + } + return Http::HeaderMap::Iterate::Continue; + }); } void write(Buffer::Instance& buffer) { @@ -1029,7 +1024,10 @@ void TwitterProtocolImpl::updateMetadataWithRequestHeader(const ThriftObject& he metadata.setFlags(*req_header.flags()); } for (const auto& context : *req_header.contexts()) { - headers.addCopy(Http::LowerCaseString{context.key_}, context.value_); + // LowerCaseString doesn't allow '\0', '\n', and '\r'. + const std::string key = + absl::StrReplaceAll(context.key_, {{std::string(1, '\0'), ""}, {"\n", ""}, {"\r", ""}}); + headers.addCopy(Http::LowerCaseString{key}, context.value_); } if (req_header.dest()) { headers.addReferenceKey(Headers::get().Dest, *req_header.dest()); @@ -1037,7 +1035,10 @@ void TwitterProtocolImpl::updateMetadataWithRequestHeader(const ThriftObject& he // TODO(zuercher): Delegations are stored as headers for now. Consider passing them as simple // objects for (const auto& delegation : *req_header.delegations()) { - std::string key = fmt::format(":d:{}", delegation.src_); + // LowerCaseString doesn't allow '\0', '\n', and '\r'. + const std::string src = + absl::StrReplaceAll(delegation.src_, {{std::string(1, '\0'), ""}, {"\n", ""}, {"\r", ""}}); + const std::string key = fmt::format(":d:{}", src); headers.addCopy(Http::LowerCaseString{key}, delegation.dst_); } if (req_header.traceIdHigh()) { @@ -1057,11 +1058,14 @@ void TwitterProtocolImpl::updateMetadataWithResponseHeader(const ThriftObject& h Http::HeaderMap& headers = metadata.headers(); for (const auto& context : resp_header.contexts()) { - headers.addCopy(Http::LowerCaseString(context.key_), context.value_); + // LowerCaseString doesn't allow '\0', '\n', and '\r'. + const std::string key = + absl::StrReplaceAll(context.key_, {{std::string(1, '\0'), ""}, {"\n", ""}, {"\r", ""}}); + headers.addCopy(Http::LowerCaseString(key), context.value_); } SpanList& spans = resp_header.spans(); - std::copy(spans.begin(), spans.end(), std::back_inserter(metadata.mutable_spans())); + std::copy(spans.begin(), spans.end(), std::back_inserter(metadata.mutableSpans())); } void TwitterProtocolImpl::writeResponseHeader(Buffer::Instance& buffer, diff --git a/source/extensions/filters/network/well_known_names.h b/source/extensions/filters/network/well_known_names.h index a7577b8ffd2c3..78564a5a990fc 100644 --- a/source/extensions/filters/network/well_known_names.h +++ b/source/extensions/filters/network/well_known_names.h @@ -18,6 +18,8 @@ class NetworkFilterNameValues { const std::string Echo = "envoy.filters.network.echo"; // Direct response filter const std::string DirectResponse = "envoy.filters.network.direct_response"; + // RocketMQ proxy filter + const std::string RocketmqProxy = "envoy.filters.network.rocketmq_proxy"; // Dubbo proxy filter const std::string DubboProxy = "envoy.filters.network.dubbo_proxy"; // HTTP connection manager filter @@ -29,7 +31,7 @@ class NetworkFilterNameValues { // MySQL proxy filter const std::string MySQLProxy = "envoy.filters.network.mysql_proxy"; // Postgres proxy filter - const std::string Postgres = "envoy.filters.network.postgres_proxy"; + const std::string PostgresProxy = "envoy.filters.network.postgres_proxy"; // Rate limit filter const std::string RateLimit = "envoy.filters.network.ratelimit"; // Redis proxy filter diff --git a/source/extensions/filters/network/zookeeper_proxy/BUILD b/source/extensions/filters/network/zookeeper_proxy/BUILD index 5582ecdec9fed..8dc6e07913921 100644 --- a/source/extensions/filters/network/zookeeper_proxy/BUILD +++ b/source/extensions/filters/network/zookeeper_proxy/BUILD @@ -1,16 +1,16 @@ -licenses(["notice"]) # Apache 2 - -# ZooKeeper proxy L7 network filter. -# Public docs: docs/root/configuration/network_filters/zookeeper_proxy_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# ZooKeeper proxy L7 network filter. +# Public docs: docs/root/configuration/network_filters/zookeeper_proxy_filter.rst + +envoy_extension_package() envoy_cc_library( name = "proxy_lib", diff --git a/source/extensions/filters/network/zookeeper_proxy/decoder.cc b/source/extensions/filters/network/zookeeper_proxy/decoder.cc index b3b40c3ab9d9b..56877b8ac82ef 100644 --- a/source/extensions/filters/network/zookeeper_proxy/decoder.cc +++ b/source/extensions/filters/network/zookeeper_proxy/decoder.cc @@ -176,15 +176,18 @@ void DecoderImpl::decodeOnWrite(Buffer::Instance& data, uint64_t& offset) { const auto xid = helper_.peekInt32(data, offset); const auto xid_code = static_cast(xid); - // Find the corresponding request for this XID. - const auto it = requests_by_xid_.find(xid); - std::chrono::milliseconds latency; OpCodes opcode; if (xid_code != XidCodes::WatchXid) { - // If this fails, it's a server-side bug. - ASSERT(it != requests_by_xid_.end()); + // Find the corresponding request for this XID. + const auto it = requests_by_xid_.find(xid); + + // If this fails, it's either a server-side bug or a malformed packet. + if (it == requests_by_xid_.end()) { + throw EnvoyException("xid not found"); + } + latency = std::chrono::duration_cast(time_source_.monotonicTime() - it->second.start_time); opcode = it->second.opcode; diff --git a/source/extensions/filters/network/zookeeper_proxy/decoder.h b/source/extensions/filters/network/zookeeper_proxy/decoder.h index 6492f2179f5c6..85b99fdffbf73 100644 --- a/source/extensions/filters/network/zookeeper_proxy/decoder.h +++ b/source/extensions/filters/network/zookeeper_proxy/decoder.h @@ -10,6 +10,8 @@ #include "extensions/filters/network/zookeeper_proxy/utils.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { @@ -169,7 +171,7 @@ class DecoderImpl : public Decoder, Logger::Loggable { const uint32_t max_packet_bytes_; BufferHelper helper_; TimeSource& time_source_; - std::unordered_map requests_by_xid_; + absl::node_hash_map requests_by_xid_; }; } // namespace ZooKeeperProxy diff --git a/source/extensions/filters/network/zookeeper_proxy/filter.cc b/source/extensions/filters/network/zookeeper_proxy/filter.cc index 331d8a476e692..b6c38c0ec2971 100644 --- a/source/extensions/filters/network/zookeeper_proxy/filter.cc +++ b/source/extensions/filters/network/zookeeper_proxy/filter.cc @@ -154,11 +154,11 @@ void ZooKeeperFilter::onPing() { } void ZooKeeperFilter::onAuthRequest(const std::string& scheme) { - Stats::SymbolTable::StoragePtr storage = config_->scope_.symbolTable().join( - {config_->stat_prefix_, config_->auth_, - config_->stat_name_set_->getBuiltin(absl::StrCat(scheme, "_rq"), - config_->unknown_scheme_rq_)}); - config_->scope_.counterFromStatName(Stats::StatName(storage.get())).inc(); + Stats::Counter& counter = Stats::Utility::counterFromStatNames( + config_->scope_, {config_->stat_prefix_, config_->auth_, + config_->stat_name_set_->getBuiltin(absl::StrCat(scheme, "_rq"), + config_->unknown_scheme_rq_)}); + counter.inc(); setDynamicMetadata("opname", "auth"); } @@ -290,11 +290,10 @@ void ZooKeeperFilter::onConnectResponse(const int32_t proto_version, const int32 const std::chrono::milliseconds& latency) { config_->stats_.connect_resp_.inc(); - Stats::SymbolTable::StoragePtr storage = - config_->scope_.symbolTable().join({config_->stat_prefix_, config_->connect_latency_}); - config_->scope_ - .histogramFromStatName(Stats::StatName(storage.get()), Stats::Histogram::Unit::Milliseconds) - .recordValue(latency.count()); + Stats::Histogram& histogram = Stats::Utility::histogramFromElements( + config_->scope_, {config_->stat_prefix_, config_->connect_latency_}, + Stats::Histogram::Unit::Milliseconds); + histogram.recordValue(latency.count()); setDynamicMetadata({{"opname", "connect_response"}, {"protocol_version", std::to_string(proto_version)}, @@ -313,11 +312,11 @@ void ZooKeeperFilter::onResponse(const OpCodes opcode, const int32_t xid, const opname = opcode_info.opname_; opcode_latency = opcode_info.latency_name_; } - Stats::SymbolTable::StoragePtr storage = - config_->scope_.symbolTable().join({config_->stat_prefix_, opcode_latency}); - config_->scope_ - .histogramFromStatName(Stats::StatName(storage.get()), Stats::Histogram::Unit::Milliseconds) - .recordValue(latency.count()); + + Stats::Histogram& histogram = Stats::Utility::histogramFromStatNames( + config_->scope_, {config_->stat_prefix_, opcode_latency}, + Stats::Histogram::Unit::Milliseconds); + histogram.recordValue(latency.count()); setDynamicMetadata({{"opname", opname}, {"xid", std::to_string(xid)}, diff --git a/source/extensions/filters/udp/dns_filter/BUILD b/source/extensions/filters/udp/dns_filter/BUILD index 3020f321940ec..4511fb6380da4 100644 --- a/source/extensions/filters/udp/dns_filter/BUILD +++ b/source/extensions/filters/udp/dns_filter/BUILD @@ -1,32 +1,46 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "dns_filter_lib", - srcs = ["dns_filter.cc"], - hdrs = ["dns_filter.h"], + srcs = [ + "dns_filter.cc", + "dns_filter_resolver.cc", + "dns_parser.cc", + ], + hdrs = [ + "dns_filter.h", + "dns_filter_resolver.h", + "dns_parser.h", + ], + external_deps = ["ares"], deps = [ "//include/envoy/buffer:buffer_interface", - "//include/envoy/event:file_event_interface", - "//include/envoy/event:timer_interface", + "//include/envoy/event:dispatcher_interface", "//include/envoy/network:address_interface", + "//include/envoy/network:dns_interface", "//include/envoy/network:filter_interface", "//include/envoy/network:listener_interface", "//source/common/buffer:buffer_lib", "//source/common/common:empty_string", + "//source/common/common:matchers_lib", "//source/common/config:config_provider_lib", + "//source/common/config:datasource_lib", "//source/common/network:address_lib", "//source/common/network:utility_lib", - "//source/common/router:rds_lib", - "@envoy_api//envoy/config/filter/udp/dns_filter/v2alpha:pkg_cc_proto", + "//source/common/protobuf:message_validator_lib", + "//source/common/runtime:runtime_lib", + "//source/common/upstream:cluster_manager_lib", + "@envoy_api//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/type/matcher/v3:pkg_cc_proto", ], ) @@ -40,6 +54,6 @@ envoy_cc_extension( ":dns_filter_lib", "//include/envoy/registry", "//include/envoy/server:filter_config_interface", - "@envoy_api//envoy/config/filter/udp/dns_filter/v2alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/udp/dns_filter/config.cc b/source/extensions/filters/udp/dns_filter/config.cc index f5bae1c6ec0e4..242bbab75e9c3 100644 --- a/source/extensions/filters/udp/dns_filter/config.cc +++ b/source/extensions/filters/udp/dns_filter/config.cc @@ -9,7 +9,7 @@ Network::UdpListenerFilterFactoryCb DnsFilterConfigFactory::createFilterFactoryF const Protobuf::Message& config, Server::Configuration::ListenerFactoryContext& context) { auto shared_config = std::make_shared( context, MessageUtil::downcastAndValidate< - const envoy::config::filter::udp::dns_filter::v2alpha::DnsFilterConfig&>( + const envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig&>( config, context.messageValidationVisitor())); return [shared_config](Network::UdpListenerFilterManager& filter_manager, @@ -19,7 +19,7 @@ Network::UdpListenerFilterFactoryCb DnsFilterConfigFactory::createFilterFactoryF } ProtobufTypes::MessagePtr DnsFilterConfigFactory::createEmptyConfigProto() { - return std::make_unique(); + return std::make_unique(); } std::string DnsFilterConfigFactory::name() const { return "envoy.filters.udp.dns_filter"; } diff --git a/source/extensions/filters/udp/dns_filter/config.h b/source/extensions/filters/udp/dns_filter/config.h index 8031f450a092d..421feb7866754 100644 --- a/source/extensions/filters/udp/dns_filter/config.h +++ b/source/extensions/filters/udp/dns_filter/config.h @@ -1,7 +1,7 @@ #pragma once -#include "envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.pb.h" -#include "envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.pb.validate.h" +#include "envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.h" +#include "envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.validate.h" #include "envoy/server/filter_config.h" #include "extensions/filters/udp/dns_filter/dns_filter.h" diff --git a/source/extensions/filters/udp/dns_filter/dns_filter.cc b/source/extensions/filters/udp/dns_filter/dns_filter.cc index f2eeaaada0cb8..dc4c8bc30820e 100644 --- a/source/extensions/filters/udp/dns_filter/dns_filter.cc +++ b/source/extensions/filters/udp/dns_filter/dns_filter.cc @@ -1,53 +1,369 @@ #include "extensions/filters/udp/dns_filter/dns_filter.h" #include "envoy/network/listener.h" +#include "envoy/type/matcher/v3/string.pb.h" + +#include "common/config/datasource.h" +#include "common/network/address_impl.h" +#include "common/protobuf/message_validator_impl.h" namespace Envoy { namespace Extensions { namespace UdpFilters { namespace DnsFilter { +static constexpr std::chrono::milliseconds DEFAULT_RESOLVER_TIMEOUT{500}; +static constexpr std::chrono::seconds DEFAULT_RESOLVER_TTL{300}; + DnsFilterEnvoyConfig::DnsFilterEnvoyConfig( Server::Configuration::ListenerFactoryContext& context, - const envoy::config::filter::udp::dns_filter::v2alpha::DnsFilterConfig& config) - : root_scope_(context.scope()), stats_(generateStats(config.stat_prefix(), root_scope_)) { - - using envoy::config::filter::udp::dns_filter::v2alpha::DnsFilterConfig; + const envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig& config) + : root_scope_(context.scope()), cluster_manager_(context.clusterManager()), api_(context.api()), + stats_(generateStats(config.stat_prefix(), root_scope_)), random_(context.random()) { + using envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig; - // store configured data for server context const auto& server_config = config.server_config(); - if (server_config.has_inline_dns_table()) { + envoy::data::dns::v3::DnsTable dns_table; + bool result = loadServerConfig(server_config, dns_table); + ENVOY_LOG(debug, "Loading DNS table from external file: {}", result ? "Success" : "Failure"); - const auto& cfg = server_config.inline_dns_table(); - const size_t entries = cfg.virtual_domains().size(); + retry_count_ = dns_table.external_retry_count(); - // TODO (abaptiste): Check that the domain configured here appears - // in the known domains list - virtual_domains_.reserve(entries); - for (const auto& virtual_domain : cfg.virtual_domains()) { - DnsAddressList addresses{}; + const size_t entries = dns_table.virtual_domains().size(); + virtual_domains_.reserve(entries); + for (const auto& virtual_domain : dns_table.virtual_domains()) { + AddressConstPtrVec addrs{}; + absl::string_view cluster_name; + if (virtual_domain.endpoint().has_address_list()) { + const auto& address_list = virtual_domain.endpoint().address_list().address(); + addrs.reserve(address_list.size()); - if (virtual_domain.endpoint().has_address_list()) { - const auto& address_list = virtual_domain.endpoint().address_list().address(); - addresses.reserve(address_list.size()); - for (const auto& configured_address : address_list) { - addresses.push_back(configured_address); - } + // Shuffle the configured addresses. We store the addresses starting at a random + // list index so that we do not always return answers in the same order as the IPs + // are configured. + size_t i = random_.random(); + + // Creating the IP address will throw an exception if the address string is malformed + for (auto index = 0; index < address_list.size(); index++) { + const auto address_iter = std::next(address_list.begin(), (i++ % address_list.size())); + auto ipaddr = Network::Utility::parseInternetAddress(*address_iter, 0 /* port */); + addrs.push_back(std::move(ipaddr)); } + } else { + cluster_name = virtual_domain.endpoint().cluster_name(); + } + + DnsEndpointConfig endpoint_config; + endpoint_config.address_list = absl::make_optional(std::move(addrs)); + endpoint_config.cluster_name = absl::make_optional(cluster_name); + + virtual_domains_.emplace(virtual_domain.name(), endpoint_config); + + std::chrono::seconds ttl = virtual_domain.has_answer_ttl() + ? std::chrono::seconds(virtual_domain.answer_ttl().seconds()) + : DEFAULT_RESOLVER_TTL; + domain_ttl_.emplace(virtual_domain.name(), ttl); + } + + // Add known domain suffixes + known_suffixes_.reserve(dns_table.known_suffixes().size()); + for (const auto& suffix : dns_table.known_suffixes()) { + auto matcher_ptr = std::make_unique(suffix); + known_suffixes_.push_back(std::move(matcher_ptr)); + } - virtual_domains_.emplace(virtual_domain.name(), std::move(addresses)); + forward_queries_ = config.has_client_config(); + if (forward_queries_) { + const auto& client_config = config.client_config(); + const auto& upstream_resolvers = client_config.upstream_resolvers(); + resolvers_.reserve(upstream_resolvers.size()); + for (const auto& resolver : upstream_resolvers) { + auto ipaddr = Network::Utility::protobufAddressToAddress(resolver); + resolvers_.emplace_back(std::move(ipaddr)); } + resolver_timeout_ = std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT( + client_config, resolver_timeout, DEFAULT_RESOLVER_TIMEOUT.count())); + + max_pending_lookups_ = client_config.max_pending_lookups(); } } +bool DnsFilterEnvoyConfig::loadServerConfig( + const envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig:: + ServerContextConfig& config, + envoy::data::dns::v3::DnsTable& table) { + using envoy::data::dns::v3::DnsTable; + + if (config.has_inline_dns_table()) { + table = config.inline_dns_table(); + return true; + } + + const auto& datasource = config.external_dns_table(); + bool data_source_loaded = false; + try { + // Data structure is deduced from the file extension. If the data is not read an exception + // is thrown. If no table can be read, the filter will refer all queries to an external + // DNS server, if configured, otherwise all queries will be responded to with Name Error. + MessageUtil::loadFromFile(datasource.filename(), table, + ProtobufMessage::getNullValidationVisitor(), api_, + false /* do_boosting */); + data_source_loaded = true; + } catch (const ProtobufMessage::UnknownProtoFieldException& e) { + ENVOY_LOG(warn, "Invalid field in DNS Filter datasource configuration: {}", e.what()); + } catch (const EnvoyException& e) { + ENVOY_LOG(warn, "Filesystem DNS Filter config update failure: {}", e.what()); + } + return data_source_loaded; +} + +DnsFilter::DnsFilter(Network::UdpReadFilterCallbacks& callbacks, + const DnsFilterEnvoyConfigSharedPtr& config) + : UdpListenerReadFilter(callbacks), config_(config), listener_(callbacks.udpListener()), + cluster_manager_(config_->clusterManager()), + message_parser_(config->forwardQueries(), listener_.dispatcher().timeSource(), + config->retryCount(), config->random(), + config_->stats().downstream_rx_query_latency_) { + // This callback is executed when the dns resolution completes. At that time of a response by the + // resolver, we build an answer record from each IP returned then send a response to the client + resolver_callback_ = [this](DnsQueryContextPtr context, const DnsQueryRecord* query, + AddressConstPtrVec& iplist) -> void { + if (context->resolution_status_ != Network::DnsResolver::ResolutionStatus::Success && + context->retry_ > 0) { + --context->retry_; + ENVOY_LOG(debug, "resolving name [{}] via external resolvers [retry {}]", query->name_, + context->retry_); + resolver_->resolveExternalQuery(std::move(context), query); + return; + } + + config_->stats().externally_resolved_queries_.inc(); + if (iplist.empty()) { + config_->stats().unanswered_queries_.inc(); + } + + incrementExternalQueryTypeCount(query->type_); + for (const auto& ip : iplist) { + incrementExternalQueryTypeAnswerCount(query->type_); + const std::chrono::seconds ttl = getDomainTTL(query->name_); + message_parser_.buildDnsAnswerRecord(context, *query, ttl, std::move(ip)); + } + sendDnsResponse(std::move(context)); + }; + + resolver_ = std::make_unique(resolver_callback_, config->resolvers(), + config->resolverTimeout(), listener_.dispatcher(), + config->maxPendingLookups()); +} + void DnsFilter::onData(Network::UdpRecvData& client_request) { - // Handle incoming request and respond with an answer - UNREFERENCED_PARAMETER(client_request); + config_->stats().downstream_rx_bytes_.recordValue(client_request.buffer_->length()); + config_->stats().downstream_rx_queries_.inc(); + + // Setup counters for the parser + DnsParserCounters parser_counters(config_->stats().query_buffer_underflow_, + config_->stats().record_name_overflow_, + config_->stats().query_parsing_failure_); + + // Parse the query, if it fails return an response to the client + DnsQueryContextPtr query_context = + message_parser_.createQueryContext(client_request, parser_counters); + incrementQueryTypeCount(query_context->queries_); + if (!query_context->parse_status_) { + config_->stats().downstream_rx_invalid_queries_.inc(); + sendDnsResponse(std::move(query_context)); + return; + } + + // Resolve the requested name + auto response = getResponseForQuery(query_context); + + // We were not able to satisfy the request locally. Return an empty response to the client + if (response == DnsLookupResponseCode::Failure) { + sendDnsResponse(std::move(query_context)); + return; + } + + // Externally resolved. We'll respond to the client when the external DNS resolution callback + // is executed + if (response == DnsLookupResponseCode::External) { + return; + } + + // We have an answer. Send it to the client + sendDnsResponse(std::move(query_context)); +} + +void DnsFilter::sendDnsResponse(DnsQueryContextPtr query_context) { + Buffer::OwnedImpl response; + + // Serializes the generated response to the parsed query from the client. If there is a + // parsing error or the incoming query is invalid, we will still generate a valid DNS response + message_parser_.buildResponseBuffer(query_context, response); + config_->stats().downstream_tx_responses_.inc(); + config_->stats().downstream_tx_bytes_.recordValue(response.length()); + Network::UdpSendData response_data{query_context->local_->ip(), *(query_context->peer_), + response}; + listener_.send(response_data); +} + +DnsLookupResponseCode DnsFilter::getResponseForQuery(DnsQueryContextPtr& context) { + /* It appears to be a rare case where we would have more than one query in a single request. + * It is allowed by the protocol but not widely supported: + * + * See: https://www.ietf.org/rfc/rfc1035.txt + * + * The question section is used to carry the "question" in most queries, + * i.e., the parameters that define what is being asked. The section + * contains QDCOUNT (usually 1) entries. + */ + for (const auto& query : context->queries_) { + // Try to resolve the query locally. If forwarding the query externally is disabled we will + // always attempt to resolve with the configured domains + if (isKnownDomain(query->name_) || !config_->forwardQueries()) { + // Determine whether the name is a cluster. Move on to the next query if successful + if (resolveViaClusters(context, *query)) { + continue; + } + + // Determine whether we an answer this query with the static configuration + if (resolveViaConfiguredHosts(context, *query)) { + continue; + } + } + + ENVOY_LOG(debug, "resolving name [{}] via external resolvers", query->name_); + resolver_->resolveExternalQuery(std::move(context), query.get()); + + return DnsLookupResponseCode::External; + } + + if (context->answers_.empty()) { + config_->stats().unanswered_queries_.inc(); + return DnsLookupResponseCode::Failure; + } + return DnsLookupResponseCode::Success; +} + +std::chrono::seconds DnsFilter::getDomainTTL(const absl::string_view domain) { + const auto& domain_ttl_config = config_->domainTtl(); + const auto& iter = domain_ttl_config.find(domain); + + if (iter == domain_ttl_config.end()) { + return DEFAULT_RESOLVER_TTL; + } + return iter->second; +} + +bool DnsFilter::isKnownDomain(const absl::string_view domain_name) { + const auto& known_suffixes = config_->knownSuffixes(); + + // If we don't have a list of allowlisted domain suffixes, we will resolve the name with an + // external DNS server + if (known_suffixes.empty()) { + ENVOY_LOG(debug, "Known domains list is empty"); + return false; + } + + // TODO(abaptiste): Use a trie to find a match instead of iterating through the list + for (auto& suffix : known_suffixes) { + if (suffix->match(domain_name)) { + config_->stats().known_domain_queries_.inc(); + return true; + } + } + return false; +} + +const DnsEndpointConfig* DnsFilter::getEndpointConfigForDomain(const absl::string_view domain) { + const auto& domains = config_->domains(); + const auto iter = domains.find(domain); + if (iter == domains.end()) { + ENVOY_LOG(debug, "No endpoint configuration exists for [{}]", domain); + return nullptr; + } + return &(iter->second); +} + +const AddressConstPtrVec* DnsFilter::getAddressListForDomain(const absl::string_view domain) { + const DnsEndpointConfig* endpoint_config = getEndpointConfigForDomain(domain); + if (endpoint_config != nullptr && endpoint_config->address_list.has_value()) { + return &(endpoint_config->address_list.value()); + } + return nullptr; +} + +const absl::string_view DnsFilter::getClusterNameForDomain(const absl::string_view domain) { + const DnsEndpointConfig* endpoint_config = getEndpointConfigForDomain(domain); + if (endpoint_config != nullptr && endpoint_config->cluster_name.has_value()) { + return endpoint_config->cluster_name.value(); + } + return {}; +} + +bool DnsFilter::resolveViaClusters(DnsQueryContextPtr& context, const DnsQueryRecord& query) { + // Determine if the domain name is being redirected to a cluster + const auto cluster_name = getClusterNameForDomain(query.name_); + absl::string_view lookup_name; + if (!cluster_name.empty()) { + lookup_name = cluster_name; + } else { + lookup_name = query.name_; + } + + Upstream::ThreadLocalCluster* cluster = cluster_manager_.get(lookup_name); + if (cluster == nullptr) { + ENVOY_LOG(debug, "Did not find a cluster for name [{}]", lookup_name); + return false; + } + + // TODO(abaptiste): consider using host weights when returning answer addresses + + // Return the address for all discovered endpoints + size_t discovered_endpoints = 0; + const std::chrono::seconds ttl = getDomainTTL(query.name_); + for (const auto& hostsets : cluster->prioritySet().hostSetsPerPriority()) { + for (const auto& host : hostsets->hosts()) { + ++discovered_endpoints; + ENVOY_LOG(debug, "using cluster host address {} for domain [{}]", + host->address()->ip()->addressAsString(), lookup_name); + incrementClusterQueryTypeAnswerCount(query.type_); + message_parser_.buildDnsAnswerRecord(context, query, ttl, host->address()); + } + } + return (discovered_endpoints != 0); +} + +bool DnsFilter::resolveViaConfiguredHosts(DnsQueryContextPtr& context, + const DnsQueryRecord& query) { + const auto* configured_address_list = getAddressListForDomain(query.name_); + if (configured_address_list == nullptr) { + ENVOY_LOG(debug, "Domain [{}] address list was not found", query.name_); + return false; + } + + if (configured_address_list->empty()) { + ENVOY_LOG(debug, "Domain [{}] address list is empty", query.name_); + return false; + } + + // Build an answer record from each configured IP address + uint64_t hosts_found = 0; + for (const auto& configured_address : *configured_address_list) { + ASSERT(configured_address != nullptr); + incrementLocalQueryTypeAnswerCount(query.type_); + ENVOY_LOG(debug, "using local address {} for domain [{}]", + configured_address->ip()->addressAsString(), query.name_); + ++hosts_found; + const std::chrono::seconds ttl = getDomainTTL(query.name_); + message_parser_.buildDnsAnswerRecord(context, query, ttl, configured_address); + } + return (hosts_found != 0); } void DnsFilter::onReceiveError(Api::IoError::IoErrorCode error_code) { - // Increment error stats + config_->stats().downstream_rx_errors_.inc(); UNREFERENCED_PARAMETER(error_code); } diff --git a/source/extensions/filters/udp/dns_filter/dns_filter.h b/source/extensions/filters/udp/dns_filter/dns_filter.h index f62d0c8162ba9..780f63a32c2a9 100644 --- a/source/extensions/filters/udp/dns_filter/dns_filter.h +++ b/source/extensions/filters/udp/dns_filter/dns_filter.h @@ -1,12 +1,19 @@ #pragma once -#include "envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.pb.h" +#include "envoy/event/file_event.h" +#include "envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.h" +#include "envoy/network/dns.h" #include "envoy/network/filter.h" #include "common/buffer/buffer_impl.h" +#include "common/common/matchers.h" #include "common/config/config_provider_impl.h" #include "common/network/utility.h" -#include "common/runtime/runtime_impl.h" + +#include "extensions/filters/udp/dns_filter/dns_filter_resolver.h" +#include "extensions/filters/udp/dns_filter/dns_parser.h" + +#include "absl/container/flat_hash_set.h" namespace Envoy { namespace Extensions { @@ -14,62 +21,298 @@ namespace UdpFilters { namespace DnsFilter { /** - * All Dns Filter stats. @see stats_macros.h - * Track the number of answered and un-answered queries for A and AAAA records + * All DNS Filter stats. @see stats_macros.h */ -#define ALL_DNS_FILTER_STATS(COUNTER) \ - COUNTER(queries_a_record) \ - COUNTER(noanswers_a_record) \ - COUNTER(answers_a_record) \ - COUNTER(queries_aaaa_record) \ - COUNTER(noanswers_aaaa_record) \ - COUNTER(answers_aaaa_record) +#define ALL_DNS_FILTER_STATS(COUNTER, HISTOGRAM) \ + COUNTER(a_record_queries) \ + COUNTER(aaaa_record_queries) \ + COUNTER(cluster_a_record_answers) \ + COUNTER(cluster_aaaa_record_answers) \ + COUNTER(cluster_unsupported_answers) \ + COUNTER(downstream_rx_errors) \ + COUNTER(downstream_rx_invalid_queries) \ + COUNTER(downstream_rx_queries) \ + COUNTER(external_a_record_queries) \ + COUNTER(external_a_record_answers) \ + COUNTER(external_aaaa_record_answers) \ + COUNTER(external_aaaa_record_queries) \ + COUNTER(external_unsupported_answers) \ + COUNTER(external_unsupported_queries) \ + COUNTER(externally_resolved_queries) \ + COUNTER(known_domain_queries) \ + COUNTER(local_a_record_answers) \ + COUNTER(local_aaaa_record_answers) \ + COUNTER(local_unsupported_answers) \ + COUNTER(unanswered_queries) \ + COUNTER(unsupported_queries) \ + COUNTER(downstream_tx_responses) \ + COUNTER(query_buffer_underflow) \ + COUNTER(query_parsing_failure) \ + COUNTER(record_name_overflow) \ + HISTOGRAM(downstream_rx_bytes, Bytes) \ + HISTOGRAM(downstream_rx_query_latency, Milliseconds) \ + HISTOGRAM(downstream_tx_bytes, Bytes) /** - * Struct definition for all Dns Filter stats. @see stats_macros.h + * Struct definition for all DNS Filter stats. @see stats_macros.h */ struct DnsFilterStats { - ALL_DNS_FILTER_STATS(GENERATE_COUNTER_STRUCT) + ALL_DNS_FILTER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_HISTOGRAM_STRUCT) }; -using DnsAddressList = std::vector; -using DnsVirtualDomainConfig = absl::flat_hash_map; +struct DnsEndpointConfig { + absl::optional address_list; + absl::optional cluster_name; +}; -class DnsFilterEnvoyConfig { +using DnsVirtualDomainConfig = absl::flat_hash_map; + +/** + * DnsFilter configuration class abstracting access to data necessary for the filter's operation + */ +class DnsFilterEnvoyConfig : public Logger::Loggable { public: DnsFilterEnvoyConfig( Server::Configuration::ListenerFactoryContext& context, - const envoy::config::filter::udp::dns_filter::v2alpha::DnsFilterConfig& config); + const envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig& config); DnsFilterStats& stats() const { return stats_; } - DnsVirtualDomainConfig& domains() const { return virtual_domains_; } + const DnsVirtualDomainConfig& domains() const { return virtual_domains_; } + const std::vector& knownSuffixes() const { return known_suffixes_; } + const absl::flat_hash_map& domainTtl() const { + return domain_ttl_; + } + const AddressConstPtrVec& resolvers() const { return resolvers_; } + bool forwardQueries() const { return forward_queries_; } + const std::chrono::milliseconds resolverTimeout() const { return resolver_timeout_; } + Upstream::ClusterManager& clusterManager() const { return cluster_manager_; } + uint64_t retryCount() const { return retry_count_; } + Random::RandomGenerator& random() const { return random_; } + uint64_t maxPendingLookups() const { return max_pending_lookups_; } private: static DnsFilterStats generateStats(const std::string& stat_prefix, Stats::Scope& scope) { const auto final_prefix = absl::StrCat("dns_filter.", stat_prefix); - return {ALL_DNS_FILTER_STATS(POOL_COUNTER_PREFIX(scope, final_prefix))}; + return {ALL_DNS_FILTER_STATS(POOL_COUNTER_PREFIX(scope, final_prefix), + POOL_HISTOGRAM_PREFIX(scope, final_prefix))}; } + bool loadServerConfig(const envoy::extensions::filters::udp::dns_filter::v3alpha:: + DnsFilterConfig::ServerContextConfig& config, + envoy::data::dns::v3::DnsTable& table); + Stats::Scope& root_scope_; + Upstream::ClusterManager& cluster_manager_; + Network::DnsResolverSharedPtr resolver_; + Api::Api& api_; + mutable DnsFilterStats stats_; - mutable DnsVirtualDomainConfig virtual_domains_; + DnsVirtualDomainConfig virtual_domains_; + std::vector known_suffixes_; + absl::flat_hash_map domain_ttl_; + bool forward_queries_; + uint64_t retry_count_; + AddressConstPtrVec resolvers_; + std::chrono::milliseconds resolver_timeout_; + Random::RandomGenerator& random_; + uint64_t max_pending_lookups_; }; using DnsFilterEnvoyConfigSharedPtr = std::shared_ptr; +enum class DnsLookupResponseCode { Success, Failure, External }; + +/** + * This class is responsible for handling incoming DNS datagrams and responding to the queries. + * The filter will attempt to resolve the query via its configuration or direct to an external + * resolver when necessary + */ class DnsFilter : public Network::UdpListenerReadFilter, Logger::Loggable { public: - DnsFilter(Network::UdpReadFilterCallbacks& callbacks, const DnsFilterEnvoyConfigSharedPtr& config) - : UdpListenerReadFilter(callbacks), config_(config), listener_(callbacks.udpListener()) {} + DnsFilter(Network::UdpReadFilterCallbacks& callbacks, + const DnsFilterEnvoyConfigSharedPtr& config); // Network::UdpListenerReadFilter callbacks void onData(Network::UdpRecvData& client_request) override; void onReceiveError(Api::IoError::IoErrorCode error_code) override; + /** + * @return bool true if the domain_name is a known domain for which we respond to queries + */ + bool isKnownDomain(const absl::string_view domain_name); + private: + /** + * Prepare the response buffer and send it to the client + * + * @param context contains the data necessary to create a response and send it to a client + */ + void sendDnsResponse(DnsQueryContextPtr context); + + /** + * @brief Encapsulates all of the logic required to find an answer for a DNS query + * + * @return DnsLookupResponseCode indicating whether we were able to respond to the query or send + * the query to an external resolver + */ + DnsLookupResponseCode getResponseForQuery(DnsQueryContextPtr& context); + + /** + * @return std::chrono::seconds retrieves the configured per domain TTL to be inserted into answer + * records + */ + std::chrono::seconds getDomainTTL(const absl::string_view domain); + + /** + * @brief Resolves the supplied query from configured clusters + * + * @param context object containing the query context + * @param query query object containing the name to be resolved + * @return bool true if the requested name matched a cluster and an answer record was constructed + */ + bool resolveViaClusters(DnsQueryContextPtr& context, const DnsQueryRecord& query); + + /** + * @brief Resolves the supplied query from configured hosts + * + * @param context object containing the query context + * @param query query object containing the name to be resolved + * @return bool true if the requested name matches a configured domain and answer records can be + * constructed + */ + bool resolveViaConfiguredHosts(DnsQueryContextPtr& context, const DnsQueryRecord& query); + + /** + * @brief Increment the counter for the given query type for external queries + * + * @param query_type indicate the type of record being resolved (A, AAAA, or other). + */ + void incrementExternalQueryTypeCount(const uint16_t query_type) { + switch (query_type) { + case DNS_RECORD_TYPE_A: + config_->stats().external_a_record_queries_.inc(); + break; + case DNS_RECORD_TYPE_AAAA: + config_->stats().external_aaaa_record_queries_.inc(); + break; + default: + config_->stats().external_unsupported_queries_.inc(); + break; + } + } + + /** + * @brief Increment the counter for the parsed query type + * + * @param queries a vector of all the incoming queries received from a client + */ + void incrementQueryTypeCount(const DnsQueryPtrVec& queries) { + for (const auto& query : queries) { + incrementQueryTypeCount(query->type_); + } + } + + /** + * @brief Increment the counter for the given query type. + * + * @param query_type indicate the type of record being resolved (A, AAAA, or other). + */ + void incrementQueryTypeCount(const uint16_t query_type) { + switch (query_type) { + case DNS_RECORD_TYPE_A: + config_->stats().a_record_queries_.inc(); + break; + case DNS_RECORD_TYPE_AAAA: + config_->stats().aaaa_record_queries_.inc(); + break; + default: + config_->stats().unsupported_queries_.inc(); + break; + } + } + + /** + * @brief Increment the counter for answers for the given query type resolved via cluster names + * + * @param query_type indicate the type of answer record returned to the client + */ + void incrementClusterQueryTypeAnswerCount(const uint16_t query_type) { + switch (query_type) { + case DNS_RECORD_TYPE_A: + config_->stats().cluster_a_record_answers_.inc(); + break; + case DNS_RECORD_TYPE_AAAA: + config_->stats().cluster_aaaa_record_answers_.inc(); + break; + default: + config_->stats().cluster_unsupported_answers_.inc(); + break; + } + } + + /** + * @brief Increment the counter for answers for the given query type resolved from the local + * configuration. + * + * @param query_type indicate the type of answer record returned to the client + */ + void incrementLocalQueryTypeAnswerCount(const uint16_t query_type) { + switch (query_type) { + case DNS_RECORD_TYPE_A: + config_->stats().local_a_record_answers_.inc(); + break; + case DNS_RECORD_TYPE_AAAA: + config_->stats().local_aaaa_record_answers_.inc(); + break; + default: + config_->stats().local_unsupported_answers_.inc(); + break; + } + } + + /** + * @brief Increment the counter for answers for the given query type resolved via an external + * resolver + * + * @param query_type indicate the type of answer record returned to the client + */ + void incrementExternalQueryTypeAnswerCount(const uint16_t query_type) { + switch (query_type) { + case DNS_RECORD_TYPE_A: + config_->stats().external_a_record_answers_.inc(); + break; + case DNS_RECORD_TYPE_AAAA: + config_->stats().external_aaaa_record_answers_.inc(); + break; + default: + config_->stats().external_unsupported_answers_.inc(); + break; + } + } + + /** + * @brief Helper function to retrieve the Endpoint configuration for a requested domain + */ + const DnsEndpointConfig* getEndpointConfigForDomain(const absl::string_view domain); + + /** + * @brief Helper function to retrieve the Address List for a requested domain + */ + const AddressConstPtrVec* getAddressListForDomain(const absl::string_view domain); + + /** + * @brief Helper function to retrieve a cluster name that a domain may be redirected towards + */ + const absl::string_view getClusterNameForDomain(const absl::string_view domain); + const DnsFilterEnvoyConfigSharedPtr config_; Network::UdpListener& listener_; - Runtime::RandomGeneratorImpl rng_; + Upstream::ClusterManager& cluster_manager_; + DnsMessageParser message_parser_; + DnsFilterResolverPtr resolver_; + Network::Address::InstanceConstSharedPtr local_; + Network::Address::InstanceConstSharedPtr peer_; + DnsFilterResolverCallback resolver_callback_; }; } // namespace DnsFilter diff --git a/source/extensions/filters/udp/dns_filter/dns_filter_resolver.cc b/source/extensions/filters/udp/dns_filter/dns_filter_resolver.cc new file mode 100644 index 0000000000000..48fd7b683b0d1 --- /dev/null +++ b/source/extensions/filters/udp/dns_filter/dns_filter_resolver.cc @@ -0,0 +1,128 @@ +#include "extensions/filters/udp/dns_filter/dns_filter_resolver.h" + +#include "common/network/utility.h" + +namespace Envoy { +namespace Extensions { +namespace UdpFilters { +namespace DnsFilter { + +void DnsFilterResolver::resolveExternalQuery(DnsQueryContextPtr context, + const DnsQueryRecord* domain_query) { + // Create an external resolution context for the query. + LookupContext ctx{}; + ctx.query_rec = domain_query; + ctx.query_context = std::move(context); + ctx.expiry = std::chrono::duration_cast( + dispatcher_.timeSource().systemTime().time_since_epoch()) + .count() + + std::chrono::duration_cast(timeout_).count(); + ctx.resolver_status = DnsFilterResolverStatus::Pending; + + Network::DnsLookupFamily lookup_family; + switch (domain_query->type_) { + case DNS_RECORD_TYPE_A: + lookup_family = Network::DnsLookupFamily::V4Only; + break; + case DNS_RECORD_TYPE_AAAA: + lookup_family = Network::DnsLookupFamily::V6Only; + break; + default: + // We don't support other lookups other than A and AAAA. Set success here so that we don't + // retry for something that we are certain will fail. + ENVOY_LOG(debug, "Unknown query type [{}] for upstream lookup", domain_query->type_); + ctx.query_context->resolution_status_ = Network::DnsResolver::ResolutionStatus::Success; + ctx.resolver_status = DnsFilterResolverStatus::Complete; + invokeCallback(ctx); + return; + } + + const DnsQueryRecord* id = domain_query; + + // If we have too many pending lookups, invoke the callback to retry the query. + if (lookups_.size() > max_pending_lookups_) { + ENVOY_LOG( + trace, + "Retrying query for [{}] because there are too many pending lookups: [pending {}/max {}]", + domain_query->name_, lookups_.size(), max_pending_lookups_); + ctx.resolver_status = DnsFilterResolverStatus::Complete; + invokeCallback(ctx); + return; + } + + ctx.timeout_timer = dispatcher_.createTimer([this]() -> void { onResolveTimeout(); }); + ctx.timeout_timer->enableTimer(timeout_); + + lookups_.emplace(id, std::move(ctx)); + + ENVOY_LOG(trace, "Pending queries: {}", lookups_.size()); + + // Define the callback that is executed when resolution completes + auto resolve_cb = [this, id](Network::DnsResolver::ResolutionStatus status, + std::list&& response) -> void { + auto ctx_iter = lookups_.find(id); + + // If the context is not in the map, the lookup has timed out and was removed + // when the timer executed + if (ctx_iter == lookups_.end()) { + ENVOY_LOG(debug, "Unable to find context for DNS query for ID [{}]", + reinterpret_cast(id)); + return; + } + + auto ctx = std::move(ctx_iter->second); + lookups_.erase(ctx_iter->first); + + // We are processing the response here, so we did not timeout. Cancel the timer + ctx.timeout_timer->disableTimer(); + + ENVOY_LOG(trace, "async query status returned. Entries {}", response.size()); + ASSERT(ctx.resolver_status == DnsFilterResolverStatus::Pending); + + ctx.query_context->resolution_status_ = status; + ctx.resolver_status = DnsFilterResolverStatus::Complete; + + // C-ares doesn't expose the TTL in the data available here. + if (status == Network::DnsResolver::ResolutionStatus::Success) { + ctx.resolved_hosts.reserve(response.size()); + for (const auto& resp : response) { + ASSERT(resp.address_ != nullptr); + ENVOY_LOG(trace, "Resolved address: {} for {}", resp.address_->ip()->addressAsString(), + ctx.query_rec->name_); + ctx.resolved_hosts.emplace_back(std::move(resp.address_)); + } + } + // Invoke the filter callback notifying it of resolved addresses + invokeCallback(ctx); + }; + + // Resolve the address in the query and add to the resolved_hosts vector + resolver_->resolve(domain_query->name_, lookup_family, resolve_cb); +} + +void DnsFilterResolver::onResolveTimeout() { + const uint64_t now = std::chrono::duration_cast( + dispatcher_.timeSource().systemTime().time_since_epoch()) + .count(); + ENVOY_LOG(trace, "Pending queries: {}", lookups_.size()); + + // Find an outstanding pending query and purge it + for (auto& ctx_iter : lookups_) { + if (ctx_iter.second.expiry <= now && + ctx_iter.second.resolver_status == DnsFilterResolverStatus::Pending) { + auto ctx = std::move(ctx_iter.second); + + ENVOY_LOG(trace, "Purging expired query: {}", ctx_iter.first->name_); + + ctx.query_context->resolution_status_ = Network::DnsResolver::ResolutionStatus::Failure; + + lookups_.erase(ctx_iter.first); + callback_(std::move(ctx.query_context), ctx.query_rec, ctx.resolved_hosts); + return; + } + } +} +} // namespace DnsFilter +} // namespace UdpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/udp/dns_filter/dns_filter_resolver.h b/source/extensions/filters/udp/dns_filter/dns_filter_resolver.h new file mode 100644 index 0000000000000..ee499683db76a --- /dev/null +++ b/source/extensions/filters/udp/dns_filter/dns_filter_resolver.h @@ -0,0 +1,77 @@ +#pragma once + +#include "envoy/event/dispatcher.h" +#include "envoy/network/dns.h" + +#include "extensions/filters/udp/dns_filter/dns_parser.h" + +namespace Envoy { +namespace Extensions { +namespace UdpFilters { +namespace DnsFilter { + +enum class DnsFilterResolverStatus { Pending, Complete, TimedOut }; + +/* + * This class encapsulates the logic of handling an asynchronous DNS request for the DNS filter. + * External request timeouts are handled here. + */ +class DnsFilterResolver : Logger::Loggable { +public: + DnsFilterResolver(DnsFilterResolverCallback& callback, AddressConstPtrVec resolvers, + std::chrono::milliseconds timeout, Event::Dispatcher& dispatcher, + uint64_t max_pending_lookups) + : dispatcher_(dispatcher), + resolver_(dispatcher.createDnsResolver(resolvers, false /* use_tcp_for_dns_lookups */)), + callback_(callback), timeout_(timeout), max_pending_lookups_(max_pending_lookups) {} + /** + * @brief entry point to resolve the name in a DnsQueryRecord + * + * This function uses the query object to determine whether it is requesting an A or AAAA record + * for the given name. When the resolver callback executes, this will execute a DNS Filter + * callback in order to build the answer object returned to the client. + * + * @param domain_query the query record object containing the name for which we are resolving + */ + void resolveExternalQuery(DnsQueryContextPtr context, const DnsQueryRecord* domain_query); + +private: + struct LookupContext { + const DnsQueryRecord* query_rec; + DnsQueryContextPtr query_context; + uint64_t expiry; + AddressConstPtrVec resolved_hosts; + DnsFilterResolverStatus resolver_status; + Event::TimerPtr timeout_timer; + }; + /** + * @brief invokes the DNS Filter callback only if our state indicates we have not timed out + * waiting for a response from the external resolver + */ + void invokeCallback(LookupContext& context) { + // If we've timed out. Guard against sending a response + if (context.resolver_status == DnsFilterResolverStatus::Complete) { + callback_(std::move(context.query_context), context.query_rec, context.resolved_hosts); + } + } + + /** + * @brief Invoke the DNS Filter callback to send a response to a client if the query has timed out + * DNS Filter will respond to the client appropriately. + */ + void onResolveTimeout(); + + Event::Dispatcher& dispatcher_; + const Network::DnsResolverSharedPtr resolver_; + DnsFilterResolverCallback& callback_; + std::chrono::milliseconds timeout_; + absl::flat_hash_map lookups_; + uint64_t max_pending_lookups_; +}; + +using DnsFilterResolverPtr = std::unique_ptr; + +} // namespace DnsFilter +} // namespace UdpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/udp/dns_filter/dns_parser.cc b/source/extensions/filters/udp/dns_filter/dns_parser.cc new file mode 100644 index 0000000000000..b68016d191851 --- /dev/null +++ b/source/extensions/filters/udp/dns_filter/dns_parser.cc @@ -0,0 +1,630 @@ +#include "extensions/filters/udp/dns_filter/dns_parser.h" + +#include "envoy/network/address.h" + +#include "common/common/empty_string.h" +#include "common/network/address_impl.h" +#include "common/network/utility.h" + +#include "ares.h" + +namespace Envoy { +namespace Extensions { +namespace UdpFilters { +namespace DnsFilter { + +bool BaseDnsRecord::serializeName(Buffer::OwnedImpl& output) { + // Iterate over a name e.g. "www.domain.com" once and produce a buffer containing each name + // segment prefixed by its length + static constexpr char SEPARATOR = '.'; + static constexpr size_t MAX_LABEL_LENGTH = 63; + static constexpr size_t MAX_NAME_LENGTH = 255; + + // Names are restricted to 255 bytes per RFC + if (name_.size() > MAX_NAME_LENGTH) { + return false; + } + + size_t last = 0; + size_t count = name_.find_first_of(SEPARATOR); + auto iter = name_.begin(); + + while (count != std::string::npos) { + if ((count - last) > MAX_LABEL_LENGTH) { + return false; + } + + count -= last; + output.writeBEInt(count); + for (size_t i = 0; i < count; i++) { + output.writeByte(*iter); + ++iter; + } + + // periods are not serialized. Skip to the next character + if (*iter == SEPARATOR) { + ++iter; + } + + // Move our last marker to the first position after where we stopped. Search for the next name + // separator + last += count; + ++last; + count = name_.find_first_of(SEPARATOR, last); + } + + // Write the remaining segment prepended by its length + count = name_.size() - last; + output.writeBEInt(count); + for (size_t i = 0; i < count; i++) { + output.writeByte(*iter); + ++iter; + } + + // Terminate the name record with a null byte + output.writeByte(0x00); + return true; +} + +// Serialize a DNS Query Record +bool DnsQueryRecord::serialize(Buffer::OwnedImpl& output) { + if (serializeName(output)) { + output.writeBEInt(type_); + output.writeBEInt(class_); + return true; + } + return false; +} + +// Serialize a DNS Answer Record +bool DnsAnswerRecord::serialize(Buffer::OwnedImpl& output) { + if (serializeName(output)) { + output.writeBEInt(type_); + output.writeBEInt(class_); + output.writeBEInt(static_cast(ttl_.count())); + + ASSERT(ip_addr_ != nullptr); + const auto ip_address = ip_addr_->ip(); + + ASSERT(ip_address != nullptr); + if (ip_address->ipv6() != nullptr) { + // Store the 128bit address with 2 64 bit writes + const absl::uint128 addr6 = ip_address->ipv6()->address(); + output.writeBEInt(sizeof(addr6)); + output.writeLEInt(absl::Uint128Low64(addr6)); + output.writeLEInt(absl::Uint128High64(addr6)); + } else if (ip_address->ipv4() != nullptr) { + output.writeBEInt(4); + output.writeLEInt(ip_address->ipv4()->address()); + } + return true; + } + return false; +} + +DnsQueryContextPtr DnsMessageParser::createQueryContext(Network::UdpRecvData& client_request, + DnsParserCounters& counters) { + DnsQueryContextPtr query_context = std::make_unique( + client_request.addresses_.local_, client_request.addresses_.peer_, counters, retry_count_); + + query_context->parse_status_ = parseDnsObject(query_context, client_request.buffer_); + if (!query_context->parse_status_) { + query_context->response_code_ = DNS_RESPONSE_CODE_FORMAT_ERROR; + ENVOY_LOG(debug, "Unable to parse query buffer from '{}' into a DNS object", + client_request.addresses_.peer_->ip()->addressAsString()); + } + return query_context; +} + +bool DnsMessageParser::parseDnsObject(DnsQueryContextPtr& context, + const Buffer::InstancePtr& buffer) { + static constexpr uint64_t field_size = sizeof(uint16_t); + size_t available_bytes = buffer->length(); + uint64_t offset = 0; + uint16_t data; + DnsQueryParseState state{DnsQueryParseState::Init}; + + header_ = {}; + while (state != DnsQueryParseState::Finish) { + // Ensure that we have enough data remaining in the buffer to parse the query + if (available_bytes < field_size) { + context->counters_.underflow_counter.inc(); + ENVOY_LOG(debug, + "Exhausted available bytes in the buffer. Insufficient data to parse query field."); + return false; + } + + // Each aggregate DNS header field is 2 bytes wide. + data = buffer->peekBEInt(offset); + offset += field_size; + available_bytes -= field_size; + + if (offset > buffer->length()) { + ENVOY_LOG(debug, "Buffer read offset [{}] is beyond buffer length [{}].", offset, + buffer->length()); + return false; + } + + switch (state) { + case DnsQueryParseState::Init: + header_.id = data; + state = DnsQueryParseState::Flags; + break; + + case DnsQueryParseState::Flags: + ::memcpy(static_cast(&header_.flags), &data, sizeof(uint16_t)); + state = DnsQueryParseState::Questions; + break; + + case DnsQueryParseState::Questions: + header_.questions = data; + state = DnsQueryParseState::Answers; + break; + + case DnsQueryParseState::Answers: + header_.answers = data; + state = DnsQueryParseState::Authority; + break; + + case DnsQueryParseState::Authority: + header_.authority_rrs = data; + state = DnsQueryParseState::Authority2; + break; + + case DnsQueryParseState::Authority2: + header_.additional_rrs = data; + state = DnsQueryParseState::Finish; + break; + + case DnsQueryParseState::Finish: + break; + } + } + + if (!header_.flags.qr && header_.answers) { + ENVOY_LOG(debug, "Answer records present in query"); + return false; + } + + if (header_.questions > 1) { + ENVOY_LOG(debug, "Multiple [{}] questions in DNS query", header_.questions); + return false; + } + + // Verify that we still have available data in the buffer to read answer and query records + if (offset > buffer->length()) { + ENVOY_LOG(debug, "Buffer read offset[{}] is larget than buffer length [{}].", offset, + buffer->length()); + return false; + } + + context->id_ = static_cast(header_.id); + if (context->id_ == 0) { + ENVOY_LOG(debug, "No ID in DNS query"); + return false; + } + + if (header_.questions == 0) { + ENVOY_LOG(debug, "No questions in DNS request"); + return false; + } + + // Almost always, we will have only one query here. Per the RFC, QDCOUNT is usually 1 + context->queries_.reserve(header_.questions); + for (auto index = 0; index < header_.questions; index++) { + ENVOY_LOG(trace, "Parsing [{}/{}] questions", index, header_.questions); + auto rec = parseDnsQueryRecord(buffer, &offset); + if (rec == nullptr) { + context->counters_.query_parsing_failure.inc(); + ENVOY_LOG(debug, "Couldn't parse query record from buffer"); + return false; + } + context->queries_.push_back(std::move(rec)); + } + + // Parse all answer records and store them. This is exercised primarily in tests to + // verify the responses returned from the filter. + for (auto index = 0; index < header_.answers; index++) { + ENVOY_LOG(trace, "Parsing [{}/{}] answers", index, header_.answers); + auto rec = parseDnsAnswerRecord(buffer, &offset); + if (rec == nullptr) { + ENVOY_LOG(debug, "Couldn't parse answer record from buffer"); + return false; + } + const std::string name = rec->name_; + context->answers_.emplace(name, std::move(rec)); + } + + return true; +} + +const std::string DnsMessageParser::parseDnsNameRecord(const Buffer::InstancePtr& buffer, + uint64_t* available_bytes, + uint64_t* name_offset) { + void* buf = buffer->linearize(static_cast(buffer->length())); + const unsigned char* linearized_data = static_cast(buf); + const unsigned char* record = linearized_data + *name_offset; + long encoded_len; + char* output; + + int result = ares_expand_name(record, linearized_data, buffer->length(), &output, &encoded_len); + if (result != ARES_SUCCESS) { + return EMPTY_STRING; + } + + std::string name(output); + ares_free_string(output); + *name_offset += encoded_len; + *available_bytes -= encoded_len; + + return name; +} + +DnsAnswerRecordPtr DnsMessageParser::parseDnsAnswerRecord(const Buffer::InstancePtr& buffer, + uint64_t* offset) { + uint64_t data_offset = *offset; + + if (data_offset > buffer->length()) { + ENVOY_LOG(debug, "Invalid offset for parsing answer record"); + return nullptr; + } + + uint64_t available_bytes = buffer->length() - data_offset; + + if (available_bytes == 0) { + ENVOY_LOG(debug, "No data left in buffer for reading answer record"); + return nullptr; + } + + const std::string record_name = parseDnsNameRecord(buffer, &available_bytes, &data_offset); + if (record_name.empty()) { + ENVOY_LOG(debug, "Unable to parse name record from buffer"); + return nullptr; + } + + if (available_bytes < (sizeof(uint32_t) + 3 * sizeof(uint16_t))) { + ENVOY_LOG(debug, + "Insufficient data in buffer to read answer record data." + "Available bytes: {}", + available_bytes); + return nullptr; + } + + // Parse the record type + uint16_t record_type; + record_type = buffer->peekBEInt(data_offset); + data_offset += sizeof(uint16_t); + available_bytes -= sizeof(uint16_t); + + // We support only A and AAAA record types + if (record_type != DNS_RECORD_TYPE_A && record_type != DNS_RECORD_TYPE_AAAA) { + ENVOY_LOG(debug, "Unsupported record type [{}] found in answer", record_type); + return nullptr; + } + + // Parse the record class + uint16_t record_class; + record_class = buffer->peekBEInt(data_offset); + data_offset += sizeof(uint16_t); + available_bytes -= sizeof(uint16_t); + + // We support only IN record classes + if (record_class != DNS_RECORD_CLASS_IN) { + ENVOY_LOG(debug, "Unsupported record class [{}] found in answer", record_class); + return nullptr; + } + + // Read the record's TTL + uint32_t ttl; + ttl = buffer->peekBEInt(data_offset); + data_offset += sizeof(uint32_t); + available_bytes -= sizeof(uint32_t); + + // Parse the Data Length and address data record + uint16_t data_length; + data_length = buffer->peekBEInt(data_offset); + data_offset += sizeof(uint16_t); + available_bytes -= sizeof(uint16_t); + + if (data_length == 0) { + ENVOY_LOG(debug, "Read zero for data length when reading address from answer record"); + return nullptr; + } + + // Build an address pointer from the string data. + // We don't support anything other than A or AAAA records. If we add support for other record + // types, we must account for them here + Network::Address::InstanceConstSharedPtr ip_addr = nullptr; + + switch (record_type) { + case DNS_RECORD_TYPE_A: + if (available_bytes >= sizeof(uint32_t)) { + sockaddr_in sa4; + memset(&sa4, 0, sizeof(sa4)); + sa4.sin_addr.s_addr = buffer->peekLEInt(data_offset); + ip_addr = std::make_shared(&sa4); + data_offset += data_length; + } + break; + case DNS_RECORD_TYPE_AAAA: + if (available_bytes >= sizeof(absl::uint128)) { + sockaddr_in6 sa6; + memset(&sa6, 0, sizeof(sa6)); + uint8_t* address6_bytes = reinterpret_cast(&sa6.sin6_addr.s6_addr); + static constexpr size_t count = sizeof(absl::uint128) / sizeof(uint8_t); + for (size_t index = 0; index < count; index++) { + *address6_bytes++ = buffer->peekLEInt(data_offset++); + } + ip_addr = std::make_shared(sa6, true); + } + break; + default: + ENVOY_LOG(debug, "Unsupported record type [{}] found in answer", record_type); + break; + } + + if (ip_addr == nullptr) { + ENVOY_LOG(debug, "Unable to parse IP address from data in answer record"); + return nullptr; + } + + ENVOY_LOG(trace, "Parsed address [{}] from record type [{}]: offset {}", + ip_addr->ip()->addressAsString(), record_type, data_offset); + + *offset = data_offset; + + return std::make_unique(record_name, record_type, record_class, + std::chrono::seconds(ttl), std::move(ip_addr)); +} + +DnsQueryRecordPtr DnsMessageParser::parseDnsQueryRecord(const Buffer::InstancePtr& buffer, + uint64_t* offset) { + uint64_t name_offset = *offset; + uint64_t available_bytes = buffer->length() - name_offset; + + if (available_bytes == 0) { + ENVOY_LOG(debug, "No available data in buffer to parse a query record"); + return nullptr; + } + + const std::string record_name = parseDnsNameRecord(buffer, &available_bytes, &name_offset); + if (record_name.empty()) { + ENVOY_LOG(debug, "Unable to parse name record from buffer [length {}]", buffer->length()); + return nullptr; + } + + if (available_bytes < 2 * sizeof(uint16_t)) { + ENVOY_LOG(debug, + "Insufficient data in buffer to read query record type and class. " + "Available bytes: {}", + available_bytes); + return nullptr; + } + + // Read the record type (A or AAAA) + uint16_t record_type; + record_type = buffer->peekBEInt(name_offset); + name_offset += sizeof(record_type); + + // Read the record class. This value is always 1 for internet address records + uint16_t record_class; + record_class = buffer->peekBEInt(name_offset); + name_offset += sizeof(record_class); + + if (record_class != DNS_RECORD_CLASS_IN) { + ENVOY_LOG(debug, "Unsupported record class '{}' in address record", record_class); + return nullptr; + } + + auto rec = std::make_unique(record_name, record_type, record_class); + rec->query_time_ms_ = std::make_unique( + query_latency_histogram_, timesource_); + + // stop reading the buffer here since we aren't parsing additional records + ENVOY_LOG(trace, "Extracted query record. Name: {} type: {} class: {}", record_name, record_type, + record_class); + + *offset = name_offset; + return std::make_unique(record_name, record_type, record_class); +} + +void DnsMessageParser::setDnsResponseFlags(DnsQueryContextPtr& query_context, + const uint16_t questions, const uint16_t answers) { + // Copy the transaction ID + response_header_.id = header_.id; + + // Signify that this is a response to a query + response_header_.flags.qr = 1; + + response_header_.flags.opcode = header_.flags.opcode; + response_header_.flags.aa = 0; + response_header_.flags.tc = 0; + + // Copy Recursion flags + response_header_.flags.rd = header_.flags.rd; + + // Set the recursion flag based on whether Envoy is configured to forward queries + response_header_.flags.ra = recursion_available_; + + // reserved flag is not set + response_header_.flags.z = 0; + + // Set the authenticated flags to zero + response_header_.flags.ad = 0; + + response_header_.flags.cd = 0; + response_header_.answers = answers; + response_header_.flags.rcode = query_context->response_code_; + + // Set the number of questions from the incoming query + response_header_.questions = questions; + + // We will not include any additional records + response_header_.authority_rrs = 0; + response_header_.additional_rrs = 0; +} + +void DnsMessageParser::buildDnsAnswerRecord(DnsQueryContextPtr& context, + const DnsQueryRecord& query_rec, + const std::chrono::seconds ttl, + Network::Address::InstanceConstSharedPtr ipaddr) { + // Verify that we have an address matching the query record type + switch (query_rec.type_) { + case DNS_RECORD_TYPE_AAAA: + if (ipaddr->ip()->ipv6() == nullptr) { + ENVOY_LOG(debug, "Unable to return IPV6 address for query"); + return; + } + break; + + case DNS_RECORD_TYPE_A: + if (ipaddr->ip()->ipv4() == nullptr) { + ENVOY_LOG(debug, "Unable to return IPV4 address for query"); + return; + } + break; + + // TODO(abbaptis): Support additional records (e.g. SRV) + default: + ENVOY_LOG(debug, "record type [{}] is not supported", query_rec.type_); + return; + } + + auto answer_record = std::make_unique(query_rec.name_, query_rec.type_, + query_rec.class_, ttl, std::move(ipaddr)); + context->answers_.emplace(query_rec.name_, std::move(answer_record)); +} + +void DnsMessageParser::setResponseCode(DnsQueryContextPtr& context, + const uint16_t serialized_queries, + const uint16_t serialized_answers) { + // If the question is malformed, don't change the response + if (context->response_code_ == DNS_RESPONSE_CODE_FORMAT_ERROR) { + return; + } + // Check for unsupported request types + for (const auto& query : context->queries_) { + if (query->type_ != DNS_RECORD_TYPE_A && query->type_ != DNS_RECORD_TYPE_AAAA) { + context->response_code_ = DNS_RESPONSE_CODE_NOT_IMPLEMENTED; + return; + } + } + // Output validation + if (serialized_queries == 0) { + context->response_code_ = DNS_RESPONSE_CODE_FORMAT_ERROR; + return; + } + if (serialized_answers == 0) { + context->response_code_ = DNS_RESPONSE_CODE_NAME_ERROR; + return; + } + context->response_code_ = DNS_RESPONSE_CODE_NO_ERROR; +} + +void DnsMessageParser::buildResponseBuffer(DnsQueryContextPtr& query_context, + Buffer::OwnedImpl& buffer) { + // Ensure that responses stay below the 512 byte byte limit. If we are to exceed this we must add + // DNS extension fields + // + // Note: There is Network::MAX_UDP_PACKET_SIZE, which is defined as 1500 bytes. If we support + // DNS extensions, which support up to 4096 bytes, we will have to keep this 1500 byte limit in + // mind. + static constexpr uint64_t MAX_DNS_RESPONSE_SIZE = 512; + static constexpr uint64_t MAX_DNS_NAME_SIZE = 255; + + // Amazon Route53 will return up to 8 records in an answer + // https://aws.amazon.com/route53/faqs/#associate_multiple_ip_with_single_record + static constexpr size_t MAX_RETURNED_RECORDS = 8; + + // Each response must have DNS flags, which spans 4 bytes. Account for them immediately so that we + // can adjust the number of returned answers to remain under the limit + uint64_t total_buffer_size = sizeof(DnsHeaderFlags); + uint16_t serialized_answers = 0; + uint16_t serialized_queries = 0; + + Buffer::OwnedImpl query_buffer{}; + Buffer::OwnedImpl answer_buffer{}; + + ENVOY_LOG(trace, "Building response for query ID [{}]", query_context->id_); + + for (const auto& query : query_context->queries_) { + if (!query->serialize(query_buffer)) { + ENVOY_LOG(debug, "Unable to serialize query record for {}", query->name_); + continue; + } + + // Serialize and account for each query's size. That said, there should be only one query. + ++serialized_queries; + total_buffer_size += query_buffer.length(); + + const auto& answers = query_context->answers_; + if (answers.empty()) { + continue; + } + const size_t num_answers = answers.size(); + + // Randomize the starting index if we have more than 8 records + size_t index = num_answers > MAX_RETURNED_RECORDS ? rng_.random() % num_answers : 0; + + while (serialized_answers < num_answers) { + const auto answer = std::next(answers.begin(), (index++ % num_answers)); + // Query names are limited to 255 characters. Since we are using ares to decode the encoded + // names, we should not end up with a non-conforming name here. + // + // See Section 2.3.4 of https://tools.ietf.org/html/rfc1035 + if (query->name_.size() > MAX_DNS_NAME_SIZE) { + query_context->counters_.record_name_overflow.inc(); + ENVOY_LOG( + debug, + "Query name '{}' is longer than the maximum permitted length. Skipping serialization", + query->name_); + continue; + } + if (answer->first != query->name_) { + continue; + } + + Buffer::OwnedImpl serialized_answer; + if (!answer->second->serialize(serialized_answer)) { + ENVOY_LOG(debug, "Unable to serialize answer record for {}", query->name_); + continue; + } + const uint64_t serialized_answer_length = serialized_answer.length(); + if ((total_buffer_size + serialized_answer_length) > MAX_DNS_RESPONSE_SIZE) { + break; + } + + ++serialized_answers; + total_buffer_size += serialized_answer_length; + answer_buffer.add(serialized_answer); + + if (serialized_answers == MAX_RETURNED_RECORDS) { + break; + } + } + } + + setResponseCode(query_context, serialized_queries, serialized_answers); + setDnsResponseFlags(query_context, serialized_queries, serialized_answers); + + // Build the response buffer for transmission to the client + buffer.writeBEInt(response_header_.id); + + uint16_t flags; + ::memcpy(&flags, static_cast(&response_header_.flags), sizeof(uint16_t)); + buffer.writeBEInt(flags); + + buffer.writeBEInt(response_header_.questions); + buffer.writeBEInt(response_header_.answers); + buffer.writeBEInt(response_header_.authority_rrs); + buffer.writeBEInt(response_header_.additional_rrs); + + // write the queries and answers + buffer.move(query_buffer); + buffer.move(answer_buffer); +} + +} // namespace DnsFilter +} // namespace UdpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/udp/dns_filter/dns_parser.h b/source/extensions/filters/udp/dns_filter/dns_parser.h new file mode 100644 index 0000000000000..f32d34e3ede8d --- /dev/null +++ b/source/extensions/filters/udp/dns_filter/dns_parser.h @@ -0,0 +1,279 @@ +#pragma once + +#include "envoy/buffer/buffer.h" +#include "envoy/common/platform.h" +#include "envoy/common/random_generator.h" +#include "envoy/network/address.h" +#include "envoy/network/dns.h" +#include "envoy/network/listener.h" + +#include "common/buffer/buffer_impl.h" +#include "common/stats/timespan_impl.h" + +namespace Envoy { +namespace Extensions { +namespace UdpFilters { +namespace DnsFilter { + +constexpr uint16_t DNS_RECORD_CLASS_IN = 1; +constexpr uint16_t DNS_RECORD_TYPE_A = 1; +constexpr uint16_t DNS_RECORD_TYPE_AAAA = 28; + +constexpr uint16_t DNS_RESPONSE_CODE_NO_ERROR = 0; +constexpr uint16_t DNS_RESPONSE_CODE_FORMAT_ERROR = 1; +constexpr uint16_t DNS_RESPONSE_CODE_NAME_ERROR = 3; +constexpr uint16_t DNS_RESPONSE_CODE_NOT_IMPLEMENTED = 4; + +/** + * BaseDnsRecord contains the fields and functions common to both query and answer records. + */ +class BaseDnsRecord { +public: + BaseDnsRecord(const std::string& rec_name, const uint16_t rec_type, const uint16_t rec_class) + : name_(rec_name), type_(rec_type), class_(rec_class) {} + virtual ~BaseDnsRecord() = default; + bool serializeName(Buffer::OwnedImpl& output); + virtual bool serialize(Buffer::OwnedImpl& output) PURE; + + const std::string name_; + const uint16_t type_; + const uint16_t class_; +}; + +/** + * DnsQueryRecord represents a query record parsed from a DNS request from a client. Each query + * record contains the domain requested and the flags dictating the type of record that is sought. + */ +class DnsQueryRecord : public BaseDnsRecord { +public: + DnsQueryRecord(const std::string& rec_name, const uint16_t rec_type, const uint16_t rec_class) + : BaseDnsRecord(rec_name, rec_type, rec_class) {} + bool serialize(Buffer::OwnedImpl& output) override; + + std::unique_ptr query_time_ms_; +}; + +using DnsQueryRecordPtr = std::unique_ptr; +using DnsQueryPtrVec = std::vector; +using AddressConstPtrVec = std::vector; + +/** + * DnsAnswerRecord represents a single answer record for a name that is to be serialized and sent to + * a client. This class differs from the BaseDnsRecord and DnsQueryRecord because it contains + * additional fields for the TTL and address. + */ +class DnsAnswerRecord : public BaseDnsRecord { +public: + DnsAnswerRecord(const std::string& query_name, const uint16_t rec_type, const uint16_t rec_class, + const std::chrono::seconds ttl, Network::Address::InstanceConstSharedPtr ipaddr) + : BaseDnsRecord(query_name, rec_type, rec_class), ttl_(ttl), ip_addr_(ipaddr) {} + bool serialize(Buffer::OwnedImpl& output) override; + + const std::chrono::seconds ttl_; + const Network::Address::InstanceConstSharedPtr ip_addr_; +}; + +using DnsAnswerRecordPtr = std::unique_ptr; +using DnsAnswerMap = std::unordered_multimap; + +/** + * @brief This struct is used to hold pointers to the counters that are relevant to the + * parser. This is done to prevent dependency loops between the parser and filter headers + */ +struct DnsParserCounters { + Stats::Counter& underflow_counter; + Stats::Counter& record_name_overflow; + Stats::Counter& query_parsing_failure; + + DnsParserCounters(Stats::Counter& underflow, Stats::Counter& record_name, + Stats::Counter& query_parsing) + : underflow_counter(underflow), record_name_overflow(record_name), + query_parsing_failure(query_parsing) {} +}; + +/** + * DnsQueryContext contains all the data necessary for responding to a query from a given client. + */ +class DnsQueryContext { +public: + DnsQueryContext(Network::Address::InstanceConstSharedPtr local, + Network::Address::InstanceConstSharedPtr peer, DnsParserCounters& counters, + uint64_t retry_count) + : local_(std::move(local)), peer_(std::move(peer)), counters_(counters), parse_status_(false), + response_code_(DNS_RESPONSE_CODE_NO_ERROR), retry_(retry_count) {} + + const Network::Address::InstanceConstSharedPtr local_; + const Network::Address::InstanceConstSharedPtr peer_; + DnsParserCounters& counters_; + bool parse_status_; + uint16_t response_code_; + uint64_t retry_; + uint16_t id_; + Network::DnsResolver::ResolutionStatus resolution_status_; + DnsQueryPtrVec queries_; + DnsAnswerMap answers_; +}; + +using DnsQueryContextPtr = std::unique_ptr; +using DnsFilterResolverCallback = std::function; + +/** + * This class orchestrates parsing a DNS query and building the response to be sent to a client. + */ +class DnsMessageParser : public Logger::Loggable { +public: + enum class DnsQueryParseState { + Init, + Flags, // 2 bytes + Questions, // 2 bytes + Answers, // 2 bytes + Authority, // 2 bytes + Authority2, // 2 bytes + Finish + }; + + // The flags have been verified with dig and this structure should not be modified. The flag order + // here does not match the RFC, but takes byte ordering into account so that serialization does + // not bitwise operations. + PACKED_STRUCT(struct DnsHeaderFlags { + unsigned rcode : 4; // return code + unsigned cd : 1; // checking disabled + unsigned ad : 1; // authenticated data + unsigned z : 1; // z - bit (must be zero in queries per RFC1035) + unsigned ra : 1; // recursion available + unsigned rd : 1; // recursion desired + unsigned tc : 1; // truncated response + unsigned aa : 1; // authoritative answer + unsigned opcode : 4; // operation code + unsigned qr : 1; // query or response + }); + + /** + * Structure representing the DNS header as it appears in a packet + * See https://www.ietf.org/rfc/rfc1035.txt for more details + */ + PACKED_STRUCT(struct DnsHeader { + uint16_t id; + struct DnsHeaderFlags flags; + uint16_t questions; + uint16_t answers; + uint16_t authority_rrs; + uint16_t additional_rrs; + }); + + DnsMessageParser(bool recurse, TimeSource& timesource, uint64_t retry_count, + Random::RandomGenerator& random, Stats::Histogram& latency_histogram) + : recursion_available_(recurse), timesource_(timesource), retry_count_(retry_count), + query_latency_histogram_(latency_histogram), rng_(random) {} + + /** + * @brief Builds an Answer record for the active query. The active query transaction ID is at the + * top of a queue. This ID is sufficient enough to determine the answer records associated with + * the query + */ + DnsAnswerRecordPtr getResponseForQuery(); + + /** + * @param buffer the buffer containing the constructed DNS response to be sent to a client + */ + void buildResponseBuffer(DnsQueryContextPtr& query_context, Buffer::OwnedImpl& buffer); + + /** + * @brief parse a single query record from a client request + * + * @param buffer a reference to the incoming request object received by the listener + * @param offset the buffer offset at which parsing is to begin. This parameter is updated when + * one record is parsed from the buffer and returned to the caller. + * @return DnsQueryRecordPtr a pointer to a DnsQueryRecord object containing all query data parsed + * from the buffer + */ + DnsQueryRecordPtr parseDnsQueryRecord(const Buffer::InstancePtr& buffer, uint64_t* offset); + + /** + * @brief parse a single answer record from a client request + * + * @param buffer a reference to a buffer containing a DNS response + * @param offset the buffer offset at which parsing is to begin. This parameter is updated when + * one record is parsed from the buffer and returned to the caller. + * @return DnsQueryRecordPtr a pointer to a DnsAnswerRecord object containing all answer data + * parsed from the buffer + */ + DnsAnswerRecordPtr parseDnsAnswerRecord(const Buffer::InstancePtr& buffer, uint64_t* offset); + + /** + * @brief Constructs a DNS Answer record for a given IP Address and stores the object in a map + * where the response is associated with query name + * + * @param query_record to which the answer is matched. + * @param ttl the TTL specifying how long the returned answer is cached + * @param ipaddr the address that is returned in the answer record + */ + void buildDnsAnswerRecord(DnsQueryContextPtr& context, const DnsQueryRecord& query_rec, + const std::chrono::seconds ttl, + Network::Address::InstanceConstSharedPtr ipaddr); + + /** + * @return uint16_t the response code flag value from a parsed dns object + */ + uint16_t getQueryResponseCode() { return static_cast(header_.flags.rcode); } + + /** + * @brief Parse the incoming query and create a context object for the filter + * + * @param client_request a structure containing addressing information and the buffer received + * from a client + */ + DnsQueryContextPtr createQueryContext(Network::UdpRecvData& client_request, + DnsParserCounters& counters); + /** + * @param buffer a reference to the incoming request object received by the listener + * @return bool true if all DNS records and flags were successfully parsed from the buffer + */ + bool parseDnsObject(DnsQueryContextPtr& context, const Buffer::InstancePtr& buffer); + +private: + /** + * @brief sets the response code returned to the client + * + * @param context the query context for which we are generating a response + * @param queries specify the number of query records contained in the response + * @param answers specify the number of answer records contained in the response + */ + void setResponseCode(DnsQueryContextPtr& context, const uint16_t serialized_queries, + const uint16_t serialized_answers); + + /** + * @brief sets the flags in the DNS header of the response sent to a client + * + * @param context the query context for which we are generating a response + * @param queries specify the number of query records contained in the response + * @param answers specify the number of answer records contained in the response + */ + void setDnsResponseFlags(DnsQueryContextPtr& context, const uint16_t questions, + const uint16_t answers); + + /** + * @brief Extracts a DNS query name from a buffer + * + * @param buffer the buffer from which the name is extracted + * @param available_bytes the size of the remaining bytes in the buffer on which we can operate + * @param name_offset the offset from which parsing begins and ends. The updated value is returned + * to the caller + */ + const std::string parseDnsNameRecord(const Buffer::InstancePtr& buffer, uint64_t* available_bytes, + uint64_t* name_offset); + + bool recursion_available_; + TimeSource& timesource_; + uint64_t retry_count_; + Stats::Histogram& query_latency_histogram_; + DnsHeader header_; + DnsHeader response_header_; + Random::RandomGenerator& rng_; +}; + +} // namespace DnsFilter +} // namespace UdpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/udp/udp_proxy/BUILD b/source/extensions/filters/udp/udp_proxy/BUILD index 0704d744ad648..834c8ed66a0a0 100644 --- a/source/extensions/filters/udp/udp_proxy/BUILD +++ b/source/extensions/filters/udp/udp_proxy/BUILD @@ -1,13 +1,13 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "udp_proxy_filter_lib", @@ -20,7 +20,7 @@ envoy_cc_library( "//include/envoy/network:listener_interface", "//include/envoy/upstream:cluster_manager_interface", "//source/common/network:utility_lib", - "@envoy_api//envoy/config/filter/udp/udp_proxy/v2alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/udp/udp_proxy/v3:pkg_cc_proto", ], ) @@ -29,11 +29,10 @@ envoy_cc_extension( srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream", - status = "alpha", deps = [ ":udp_proxy_filter_lib", "//include/envoy/registry", "//include/envoy/server:filter_config_interface", - "@envoy_api//envoy/config/filter/udp/udp_proxy/v2alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/udp/udp_proxy/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/udp/udp_proxy/config.h b/source/extensions/filters/udp/udp_proxy/config.h index a82991f5dd57f..36dc5e2b75503 100644 --- a/source/extensions/filters/udp/udp_proxy/config.h +++ b/source/extensions/filters/udp/udp_proxy/config.h @@ -1,7 +1,7 @@ #pragma once -#include "envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.pb.h" -#include "envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.pb.validate.h" +#include "envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.pb.h" +#include "envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.pb.validate.h" #include "envoy/server/filter_config.h" #include "extensions/filters/udp/udp_proxy/udp_proxy_filter.h" @@ -24,7 +24,7 @@ class UdpProxyFilterConfigFactory auto shared_config = std::make_shared( context.clusterManager(), context.timeSource(), context.scope(), MessageUtil::downcastAndValidate< - const envoy::config::filter::udp::udp_proxy::v2alpha::UdpProxyConfig&>( + const envoy::extensions::filters::udp::udp_proxy::v3::UdpProxyConfig&>( config, context.messageValidationVisitor())); return [shared_config](Network::UdpListenerFilterManager& filter_manager, Network::UdpReadFilterCallbacks& callbacks) -> void { @@ -33,7 +33,7 @@ class UdpProxyFilterConfigFactory } ProtobufTypes::MessagePtr createEmptyConfigProto() override { - return std::make_unique(); + return std::make_unique(); } std::string name() const override { return "envoy.filters.udp_listener.udp_proxy"; } diff --git a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.cc b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.cc index 8afb8035dbf5a..095bc869f7e69 100644 --- a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.cc +++ b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.cc @@ -206,6 +206,8 @@ void UdpProxyFilter::ActiveSession::onReadReady() { if (result->getErrorCode() != Api::IoError::IoErrorCode::Again) { cluster_.cluster_stats_.sess_rx_errors_.inc(); } + // Flush out buffered data at the end of IO event. + cluster_.filter_.read_callbacks_->udpListener().flush(); } void UdpProxyFilter::ActiveSession::write(const Buffer::Instance& buffer) { diff --git a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h index d96eda5299953..90c1f345ac388 100644 --- a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h +++ b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h @@ -1,11 +1,12 @@ #pragma once -#include "envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.pb.h" #include "envoy/event/file_event.h" #include "envoy/event/timer.h" +#include "envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.pb.h" #include "envoy/network/filter.h" #include "envoy/upstream/cluster_manager.h" +#include "common/network/socket_interface_impl.h" #include "common/network/utility.h" #include "absl/container/flat_hash_set.h" @@ -59,7 +60,7 @@ class UdpProxyFilterConfig { public: UdpProxyFilterConfig(Upstream::ClusterManager& cluster_manager, TimeSource& time_source, Stats::Scope& root_scope, - const envoy::config::filter::udp::udp_proxy::v2alpha::UdpProxyConfig& config) + const envoy::extensions::filters::udp::udp_proxy::v3::UdpProxyConfig& config) : cluster_manager_(cluster_manager), time_source_(time_source), cluster_(config.cluster()), session_timeout_(PROTOBUF_GET_MS_OR_DEFAULT(config, idle_timeout, 60 * 1000)), stats_(generateStats(config.stat_prefix(), root_scope)) {} @@ -221,11 +222,11 @@ class UdpProxyFilter : public Network::UdpListenerReadFilter, virtual Network::IoHandlePtr createIoHandle(const Upstream::HostConstSharedPtr& host) { // Virtual so this can be overridden in unit tests. - return host->address()->socket(Network::Address::SocketType::Datagram); + return Network::ioHandleForAddr(Network::Socket::Type::Datagram, host->address()); } // Upstream::ClusterUpdateCallbacks - void onClusterAddOrUpdate(Upstream::ThreadLocalCluster& cluster) override; + void onClusterAddOrUpdate(Upstream::ThreadLocalCluster& cluster) final; void onClusterRemoval(const std::string& cluster_name) override; const UdpProxyFilterConfigSharedPtr config_; diff --git a/source/extensions/grpc_credentials/BUILD b/source/extensions/grpc_credentials/BUILD index 6156949edef64..40a5e79b39d3b 100644 --- a/source/extensions/grpc_credentials/BUILD +++ b/source/extensions/grpc_credentials/BUILD @@ -1,16 +1,18 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "well_known_names", hdrs = ["well_known_names.h"], + # well known names files are public as long as they exist. + visibility = ["//visibility:public"], deps = [ "//source/common/singleton:const_singleton", ], diff --git a/source/extensions/grpc_credentials/aws_iam/BUILD b/source/extensions/grpc_credentials/aws_iam/BUILD index 2b8980e7651fc..ab920487e2641 100644 --- a/source/extensions/grpc_credentials/aws_iam/BUILD +++ b/source/extensions/grpc_credentials/aws_iam/BUILD @@ -1,14 +1,14 @@ -licenses(["notice"]) # Apache 2 - -# AWS IAM gRPC Credentials - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# AWS IAM gRPC Credentials + +envoy_extension_package() envoy_cc_extension( name = "config", diff --git a/source/extensions/grpc_credentials/aws_iam/config.cc b/source/extensions/grpc_credentials/aws_iam/config.cc index 5f60eab1464f2..345d975fedbda 100644 --- a/source/extensions/grpc_credentials/aws_iam/config.cc +++ b/source/extensions/grpc_credentials/aws_iam/config.cc @@ -74,7 +74,7 @@ std::shared_ptr AwsIamGrpcCredentialsFactory::getChann std::string AwsIamGrpcCredentialsFactory::getRegion( const envoy::config::grpc_credential::v3::AwsIamConfig& config) { - std::unique_ptr region_provider; + Common::Aws::RegionProviderPtr region_provider; if (!config.region().empty()) { region_provider = std::make_unique(config.region()); } else { @@ -129,18 +129,15 @@ AwsIamHeaderAuthenticator::buildMessageToSign(absl::string_view service_url, void AwsIamHeaderAuthenticator::signedHeadersToMetadata( const Http::HeaderMap& headers, std::multimap& metadata) { - headers.iterate( - [](const Http::HeaderEntry& entry, void* context) -> Http::HeaderMap::Iterate { - auto* md = static_cast*>(context); - const auto& key = entry.key().getStringView(); - // Skip pseudo-headers - if (key.empty() || key[0] == ':') { - return Http::HeaderMap::Iterate::Continue; - } - md->emplace(key, entry.value().getStringView()); - return Http::HeaderMap::Iterate::Continue; - }, - &metadata); + headers.iterate([&metadata](const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate { + const auto& key = entry.key().getStringView(); + // Skip pseudo-headers + if (key.empty() || key[0] == ':') { + return Http::HeaderMap::Iterate::Continue; + } + metadata.emplace(key, entry.value().getStringView()); + return Http::HeaderMap::Iterate::Continue; + }); } REGISTER_FACTORY(AwsIamGrpcCredentialsFactory, Grpc::GoogleGrpcCredentialsFactory); diff --git a/source/extensions/grpc_credentials/example/BUILD b/source/extensions/grpc_credentials/example/BUILD index b62762a2030da..8c43f6c275323 100644 --- a/source/extensions/grpc_credentials/example/BUILD +++ b/source/extensions/grpc_credentials/example/BUILD @@ -1,20 +1,26 @@ -licenses(["notice"]) # Apache 2 - -# Example gRPC Credentials - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# Example gRPC Credentials + +envoy_extension_package() envoy_cc_library( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], external_deps = ["grpc"], + # Legacy test use. + visibility = [ + "//source/extensions:__subpackages__", + "//test/common/grpc:__subpackages__", + "//test/extensions:__subpackages__", + ], deps = [ "//include/envoy/grpc:google_grpc_creds_interface", "//include/envoy/registry", diff --git a/source/extensions/grpc_credentials/file_based_metadata/BUILD b/source/extensions/grpc_credentials/file_based_metadata/BUILD index f1feb60d31963..d6c8b8d5e5fb6 100644 --- a/source/extensions/grpc_credentials/file_based_metadata/BUILD +++ b/source/extensions/grpc_credentials/file_based_metadata/BUILD @@ -1,14 +1,14 @@ -licenses(["notice"]) # Apache 2 - -# File Based Metadata gRPC Credentials - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# File Based Metadata gRPC Credentials + +envoy_extension_package() envoy_cc_extension( name = "config", diff --git a/source/extensions/health_checkers/BUILD b/source/extensions/health_checkers/BUILD index 6156949edef64..40a5e79b39d3b 100644 --- a/source/extensions/health_checkers/BUILD +++ b/source/extensions/health_checkers/BUILD @@ -1,16 +1,18 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "well_known_names", hdrs = ["well_known_names.h"], + # well known names files are public as long as they exist. + visibility = ["//visibility:public"], deps = [ "//source/common/singleton:const_singleton", ], diff --git a/source/extensions/health_checkers/redis/BUILD b/source/extensions/health_checkers/redis/BUILD index 0dbdfb73694ef..3bc89797ab32f 100644 --- a/source/extensions/health_checkers/redis/BUILD +++ b/source/extensions/health_checkers/redis/BUILD @@ -1,15 +1,15 @@ -licenses(["notice"]) # Apache 2 - -# Redis custom health checker. - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# Redis custom health checker. + +envoy_extension_package() envoy_cc_library( name = "redis", @@ -49,6 +49,7 @@ envoy_cc_library( name = "utility", hdrs = ["utility.h"], deps = [ + "//source/common/config:utility_lib", "//source/common/protobuf", "//source/common/protobuf:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", diff --git a/source/extensions/health_checkers/redis/redis.cc b/source/extensions/health_checkers/redis/redis.cc index 738092a00a144..7eb66b5f8b0c8 100644 --- a/source/extensions/health_checkers/redis/redis.cc +++ b/source/extensions/health_checkers/redis/redis.cc @@ -14,12 +14,14 @@ namespace RedisHealthChecker { RedisHealthChecker::RedisHealthChecker( const Upstream::Cluster& cluster, const envoy::config::core::v3::HealthCheck& config, const envoy::config::health_checker::redis::v2::Redis& redis_config, - Event::Dispatcher& dispatcher, Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Event::Dispatcher& dispatcher, Runtime::Loader& runtime, Random::RandomGenerator& random, Upstream::HealthCheckEventLoggerPtr&& event_logger, Api::Api& api, Extensions::NetworkFilters::Common::Redis::Client::ClientFactory& client_factory) : HealthCheckerImplBase(cluster, config, dispatcher, runtime, random, std::move(event_logger)), client_factory_(client_factory), key_(redis_config.key()), - auth_password_(NetworkFilters::RedisProxy::ProtocolOptionsConfigImpl::auth_password( + auth_username_( + NetworkFilters::RedisProxy::ProtocolOptionsConfigImpl::authUsername(cluster.info(), api)), + auth_password_(NetworkFilters::RedisProxy::ProtocolOptionsConfigImpl::authPassword( cluster.info(), api)) { if (!key_.empty()) { type_ = Type::Exists; @@ -65,7 +67,7 @@ void RedisHealthChecker::RedisActiveHealthCheckSession::onInterval() { if (!client_) { client_ = parent_.client_factory_.create( host_, parent_.dispatcher_, *this, redis_command_stats_, - parent_.cluster_.info()->statsScope(), parent_.auth_password_); + parent_.cluster_.info()->statsScope(), parent_.auth_username_, parent_.auth_password_); client_->addConnectionCallbacks(*this); } diff --git a/source/extensions/health_checkers/redis/redis.h b/source/extensions/health_checkers/redis/redis.h index 6284c475eda59..73088832f18c4 100644 --- a/source/extensions/health_checkers/redis/redis.h +++ b/source/extensions/health_checkers/redis/redis.h @@ -28,7 +28,7 @@ class RedisHealthChecker : public Upstream::HealthCheckerImplBase { RedisHealthChecker( const Upstream::Cluster& cluster, const envoy::config::core::v3::HealthCheck& config, const envoy::config::health_checker::redis::v2::Redis& redis_config, - Event::Dispatcher& dispatcher, Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Event::Dispatcher& dispatcher, Runtime::Loader& runtime, Random::RandomGenerator& random, Upstream::HealthCheckEventLoggerPtr&& event_logger, Api::Api& api, Extensions::NetworkFilters::Common::Redis::Client::ClientFactory& client_factory); @@ -75,7 +75,7 @@ class RedisHealthChecker : public Upstream::HealthCheckerImplBase { return true; } // Redirection errors are treated as check successes. NetworkFilters::Common::Redis::Client::ReadPolicy readPolicy() const override { - return NetworkFilters::Common::Redis::Client::ReadPolicy::Master; + return NetworkFilters::Common::Redis::Client::ReadPolicy::Primary; } // Batching @@ -125,6 +125,7 @@ class RedisHealthChecker : public Upstream::HealthCheckerImplBase { Extensions::NetworkFilters::Common::Redis::Client::ClientFactory& client_factory_; Type type_; const std::string key_; + const std::string auth_username_; const std::string auth_password_; }; diff --git a/source/extensions/retry/host/BUILD b/source/extensions/internal_redirect/BUILD similarity index 62% rename from source/extensions/retry/host/BUILD rename to source/extensions/internal_redirect/BUILD index 6156949edef64..40a5e79b39d3b 100644 --- a/source/extensions/retry/host/BUILD +++ b/source/extensions/internal_redirect/BUILD @@ -1,16 +1,18 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "well_known_names", hdrs = ["well_known_names.h"], + # well known names files are public as long as they exist. + visibility = ["//visibility:public"], deps = [ "//source/common/singleton:const_singleton", ], diff --git a/source/extensions/internal_redirect/allow_listed_routes/BUILD b/source/extensions/internal_redirect/allow_listed_routes/BUILD new file mode 100644 index 0000000000000..6fe252ddf6bb6 --- /dev/null +++ b/source/extensions/internal_redirect/allow_listed_routes/BUILD @@ -0,0 +1,40 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_library( + name = "allow_listed_routes_lib", + hdrs = ["allow_listed_routes.h"], + deps = [ + "//include/envoy/router:internal_redirect_interface", + "//include/envoy/stream_info:filter_state_interface", + "//source/extensions/internal_redirect:well_known_names", + "@envoy_api//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg_cc_proto", + ], +) + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream_and_upstream", + # TODO(#9953) clean up by moving the redirect test to extensions. + visibility = [ + "//:extension_config", + "//test/integration:__subpackages__", + ], + deps = [ + ":allow_listed_routes_lib", + "//include/envoy/registry", + "//include/envoy/router:internal_redirect_interface", + "//source/extensions/internal_redirect:well_known_names", + "@envoy_api//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/internal_redirect/allow_listed_routes/allow_listed_routes.h b/source/extensions/internal_redirect/allow_listed_routes/allow_listed_routes.h new file mode 100644 index 0000000000000..72d8d605db0f6 --- /dev/null +++ b/source/extensions/internal_redirect/allow_listed_routes/allow_listed_routes.h @@ -0,0 +1,37 @@ +#pragma once + +#include "envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.pb.h" +#include "envoy/router/internal_redirect.h" +#include "envoy/stream_info/filter_state.h" + +#include "extensions/internal_redirect/well_known_names.h" + +#include "absl/container/flat_hash_set.h" +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Extensions { +namespace InternalRedirect { + +class AllowListedRoutesPredicate : public Router::InternalRedirectPredicate { +public: + explicit AllowListedRoutesPredicate( + const envoy::extensions::internal_redirect::allow_listed_routes::v3::AllowListedRoutesConfig& + config) + : allowed_routes_(config.allowed_route_names().begin(), config.allowed_route_names().end()) {} + + bool acceptTargetRoute(StreamInfo::FilterState&, absl::string_view route_name, bool, + bool) override { + return allowed_routes_.contains(route_name); + } + + absl::string_view name() const override { + return InternalRedirectPredicateValues::get().AllowListedRoutesPredicate; + } + + const absl::flat_hash_set allowed_routes_; +}; + +} // namespace InternalRedirect +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/internal_redirect/allow_listed_routes/config.cc b/source/extensions/internal_redirect/allow_listed_routes/config.cc new file mode 100644 index 0000000000000..55c2d5af81cec --- /dev/null +++ b/source/extensions/internal_redirect/allow_listed_routes/config.cc @@ -0,0 +1,14 @@ +#include "extensions/internal_redirect/allow_listed_routes/config.h" + +#include "envoy/registry/registry.h" +#include "envoy/router/internal_redirect.h" + +namespace Envoy { +namespace Extensions { +namespace InternalRedirect { + +REGISTER_FACTORY(AllowListedRoutesPredicateFactory, Router::InternalRedirectPredicateFactory); + +} // namespace InternalRedirect +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/internal_redirect/allow_listed_routes/config.h b/source/extensions/internal_redirect/allow_listed_routes/config.h new file mode 100644 index 0000000000000..1a122f4f31b6a --- /dev/null +++ b/source/extensions/internal_redirect/allow_listed_routes/config.h @@ -0,0 +1,40 @@ +#pragma once + +#include "envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.pb.h" +#include "envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.pb.validate.h" +#include "envoy/router/internal_redirect.h" + +#include "common/protobuf/message_validator_impl.h" +#include "common/protobuf/utility.h" + +#include "extensions/internal_redirect/allow_listed_routes/allow_listed_routes.h" +#include "extensions/internal_redirect/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace InternalRedirect { + +class AllowListedRoutesPredicateFactory : public Router::InternalRedirectPredicateFactory { +public: + Router::InternalRedirectPredicateSharedPtr + createInternalRedirectPredicate(const Protobuf::Message& config, absl::string_view) override { + auto allow_listed_routes_config = + MessageUtil::downcastAndValidate( + config, ProtobufMessage::getStrictValidationVisitor()); + return std::make_shared(allow_listed_routes_config); + } + + std::string name() const override { + return InternalRedirectPredicateValues::get().AllowListedRoutesPredicate; + } + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique< + envoy::extensions::internal_redirect::allow_listed_routes::v3::AllowListedRoutesConfig>(); + } +}; + +} // namespace InternalRedirect +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/internal_redirect/previous_routes/BUILD b/source/extensions/internal_redirect/previous_routes/BUILD new file mode 100644 index 0000000000000..58a0878f09573 --- /dev/null +++ b/source/extensions/internal_redirect/previous_routes/BUILD @@ -0,0 +1,40 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_library( + name = "previous_routes_lib", + srcs = ["previous_routes.cc"], + hdrs = ["previous_routes.h"], + deps = [ + "//include/envoy/router:internal_redirect_interface", + "//include/envoy/stream_info:filter_state_interface", + "//source/extensions/internal_redirect:well_known_names", + ], +) + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream_and_upstream", + # TODO(#9953) clean up by moving the redirect test to extensions. + visibility = [ + "//:extension_config", + "//test/integration:__subpackages__", + ], + deps = [ + ":previous_routes_lib", + "//include/envoy/registry", + "//include/envoy/router:internal_redirect_interface", + "//source/extensions/internal_redirect:well_known_names", + "@envoy_api//envoy/extensions/internal_redirect/previous_routes/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/internal_redirect/previous_routes/config.cc b/source/extensions/internal_redirect/previous_routes/config.cc new file mode 100644 index 0000000000000..d5d4b67c491ec --- /dev/null +++ b/source/extensions/internal_redirect/previous_routes/config.cc @@ -0,0 +1,14 @@ +#include "extensions/internal_redirect/previous_routes/config.h" + +#include "envoy/registry/registry.h" +#include "envoy/router/internal_redirect.h" + +namespace Envoy { +namespace Extensions { +namespace InternalRedirect { + +REGISTER_FACTORY(PreviousRoutesPredicateFactory, Router::InternalRedirectPredicateFactory); + +} // namespace InternalRedirect +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/internal_redirect/previous_routes/config.h b/source/extensions/internal_redirect/previous_routes/config.h new file mode 100644 index 0000000000000..21ccb3c1646ba --- /dev/null +++ b/source/extensions/internal_redirect/previous_routes/config.h @@ -0,0 +1,34 @@ +#pragma once + +#include "envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.pb.h" +#include "envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.pb.validate.h" +#include "envoy/router/internal_redirect.h" + +#include "extensions/internal_redirect/previous_routes/previous_routes.h" +#include "extensions/internal_redirect/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace InternalRedirect { + +class PreviousRoutesPredicateFactory : public Router::InternalRedirectPredicateFactory { +public: + Router::InternalRedirectPredicateSharedPtr + createInternalRedirectPredicate(const Protobuf::Message&, + absl::string_view current_route_name) override { + return std::make_shared(current_route_name); + } + + std::string name() const override { + return InternalRedirectPredicateValues::get().PreviousRoutesPredicate; + } + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique< + envoy::extensions::internal_redirect::previous_routes::v3::PreviousRoutesConfig>(); + } +}; + +} // namespace InternalRedirect +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/internal_redirect/previous_routes/previous_routes.cc b/source/extensions/internal_redirect/previous_routes/previous_routes.cc new file mode 100644 index 0000000000000..a29187e29d43c --- /dev/null +++ b/source/extensions/internal_redirect/previous_routes/previous_routes.cc @@ -0,0 +1,52 @@ +#include "extensions/internal_redirect/previous_routes/previous_routes.h" + +#include "envoy/router/internal_redirect.h" +#include "envoy/stream_info/filter_state.h" + +#include "absl/container/flat_hash_set.h" + +namespace Envoy { +namespace Extensions { +namespace InternalRedirect { + +namespace { + +constexpr absl::string_view PreviousRoutesPredicateStateNamePrefix = + "envoy.internal_redirect.previous_routes_predicate_state"; + +class PreviousRoutesPredicateState : public StreamInfo::FilterState::Object { +public: + PreviousRoutesPredicateState() = default; + // Disallow copy so that we don't accidentally take a copy of the state + // through FilterState::getDataMutable, which will cause confusing bug that + // states are not updated in the original copy. + PreviousRoutesPredicateState(const PreviousRoutesPredicateState&) = delete; + PreviousRoutesPredicateState& operator=(const PreviousRoutesPredicateState&) = delete; + + bool insertRouteIfNotPresent(absl::string_view route) { + return previous_routes_.insert(std::string(route)).second; + } + +private: + absl::flat_hash_set previous_routes_; +}; + +} // namespace + +bool PreviousRoutesPredicate::acceptTargetRoute(StreamInfo::FilterState& filter_state, + absl::string_view route_name, bool, bool) { + auto filter_state_name = + absl::StrCat(PreviousRoutesPredicateStateNamePrefix, ".", current_route_name_); + if (!filter_state.hasData(filter_state_name)) { + filter_state.setData(filter_state_name, std::make_unique(), + StreamInfo::FilterState::StateType::Mutable, + StreamInfo::FilterState::LifeSpan::Request); + } + auto& predicate_state = + filter_state.getDataMutable(filter_state_name); + return predicate_state.insertRouteIfNotPresent(route_name); +} + +} // namespace InternalRedirect +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/internal_redirect/previous_routes/previous_routes.h b/source/extensions/internal_redirect/previous_routes/previous_routes.h new file mode 100644 index 0000000000000..b79f4f8b17544 --- /dev/null +++ b/source/extensions/internal_redirect/previous_routes/previous_routes.h @@ -0,0 +1,32 @@ +#pragma once + +#include "envoy/router/internal_redirect.h" +#include "envoy/stream_info/filter_state.h" + +#include "extensions/internal_redirect/well_known_names.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Extensions { +namespace InternalRedirect { + +class PreviousRoutesPredicate : public Router::InternalRedirectPredicate { +public: + explicit PreviousRoutesPredicate(absl::string_view current_route_name) + : current_route_name_(current_route_name) {} + + bool acceptTargetRoute(StreamInfo::FilterState& filter_state, absl::string_view route_name, bool, + bool) override; + + absl::string_view name() const override { + return InternalRedirectPredicateValues::get().PreviousRoutesPredicate; + } + +private: + const std::string current_route_name_; +}; + +} // namespace InternalRedirect +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/internal_redirect/safe_cross_scheme/BUILD b/source/extensions/internal_redirect/safe_cross_scheme/BUILD new file mode 100644 index 0000000000000..d957fa57673f1 --- /dev/null +++ b/source/extensions/internal_redirect/safe_cross_scheme/BUILD @@ -0,0 +1,39 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_library( + name = "safe_cross_scheme_lib", + hdrs = ["safe_cross_scheme.h"], + deps = [ + "//include/envoy/router:internal_redirect_interface", + "//include/envoy/stream_info:filter_state_interface", + "//source/extensions/internal_redirect:well_known_names", + ], +) + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream_and_upstream", + # TODO(#9953) clean up by moving the redirect test to extensions. + visibility = [ + "//:extension_config", + "//test/integration:__subpackages__", + ], + deps = [ + ":safe_cross_scheme_lib", + "//include/envoy/registry", + "//include/envoy/router:internal_redirect_interface", + "//source/extensions/internal_redirect:well_known_names", + "@envoy_api//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/internal_redirect/safe_cross_scheme/config.cc b/source/extensions/internal_redirect/safe_cross_scheme/config.cc new file mode 100644 index 0000000000000..43b7664fd7ffc --- /dev/null +++ b/source/extensions/internal_redirect/safe_cross_scheme/config.cc @@ -0,0 +1,14 @@ +#include "extensions/internal_redirect/safe_cross_scheme/config.h" + +#include "envoy/registry/registry.h" +#include "envoy/router/internal_redirect.h" + +namespace Envoy { +namespace Extensions { +namespace InternalRedirect { + +REGISTER_FACTORY(SafeCrossSchemePredicateFactory, Router::InternalRedirectPredicateFactory); + +} // namespace InternalRedirect +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/internal_redirect/safe_cross_scheme/config.h b/source/extensions/internal_redirect/safe_cross_scheme/config.h new file mode 100644 index 0000000000000..49a8fdfa8b69b --- /dev/null +++ b/source/extensions/internal_redirect/safe_cross_scheme/config.h @@ -0,0 +1,32 @@ +#pragma once + +#include "envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.pb.h" +#include "envoy/router/internal_redirect.h" + +#include "extensions/internal_redirect/safe_cross_scheme/safe_cross_scheme.h" +#include "extensions/internal_redirect/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace InternalRedirect { + +class SafeCrossSchemePredicateFactory : public Router::InternalRedirectPredicateFactory { +public: + Router::InternalRedirectPredicateSharedPtr + createInternalRedirectPredicate(const Protobuf::Message&, absl::string_view) override { + return std::make_shared(); + } + + std::string name() const override { + return InternalRedirectPredicateValues::get().SafeCrossSchemePredicate; + } + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique< + envoy::extensions::internal_redirect::safe_cross_scheme::v3::SafeCrossSchemeConfig>(); + } +}; + +} // namespace InternalRedirect +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/internal_redirect/safe_cross_scheme/safe_cross_scheme.h b/source/extensions/internal_redirect/safe_cross_scheme/safe_cross_scheme.h new file mode 100644 index 0000000000000..fb33e58b6fdd3 --- /dev/null +++ b/source/extensions/internal_redirect/safe_cross_scheme/safe_cross_scheme.h @@ -0,0 +1,28 @@ +#pragma once + +#include "envoy/router/internal_redirect.h" +#include "envoy/stream_info/filter_state.h" + +#include "extensions/internal_redirect/well_known_names.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Extensions { +namespace InternalRedirect { + +class SafeCrossSchemePredicate : public Router::InternalRedirectPredicate { +public: + bool acceptTargetRoute(StreamInfo::FilterState&, absl::string_view, bool downstream_is_https, + bool target_is_https) override { + return downstream_is_https || !target_is_https; + } + + absl::string_view name() const override { + return InternalRedirectPredicateValues::get().SafeCrossSchemePredicate; + } +}; + +} // namespace InternalRedirect +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/internal_redirect/well_known_names.h b/source/extensions/internal_redirect/well_known_names.h new file mode 100644 index 0000000000000..003e270329d6a --- /dev/null +++ b/source/extensions/internal_redirect/well_known_names.h @@ -0,0 +1,27 @@ +#pragma once + +#include + +#include "common/singleton/const_singleton.h" + +namespace Envoy { +namespace Extensions { +namespace InternalRedirect { + +/** + * Well-known internal redirect predicate names. + */ +class InternalRedirectPredicatesNameValues { +public: + const std::string AllowListedRoutesPredicate = + "envoy.internal_redirect_predicates.allow_listed_routes"; + const std::string PreviousRoutesPredicate = "envoy.internal_redirect_predicates.previous_routes"; + const std::string SafeCrossSchemePredicate = + "envoy.internal_redirect_predicates.safe_cross_scheme"; +}; + +using InternalRedirectPredicateValues = ConstSingleton; + +} // namespace InternalRedirect +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/BUILD b/source/extensions/quic_listeners/quiche/BUILD index dde1927d81abc..31a4ff5dec98b 100644 --- a/source/extensions/quic_listeners/quiche/BUILD +++ b/source/extensions/quic_listeners/quiche/BUILD @@ -1,12 +1,13 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "envoy_quic_alarm_lib", @@ -49,36 +50,63 @@ envoy_cc_library( ) envoy_cc_library( - name = "envoy_quic_packet_writer_lib", - srcs = ["envoy_quic_packet_writer.cc"], - hdrs = ["envoy_quic_packet_writer.h"], + name = "envoy_quic_proof_source_base_lib", + srcs = ["envoy_quic_proof_source_base.cc"], + hdrs = ["envoy_quic_proof_source_base.h"], external_deps = ["quiche_quic_platform"], tags = ["nofips"], deps = [ ":envoy_quic_utils_lib", - "@com_googlesource_quiche//:quic_core_packet_writer_interface_lib", + "@com_googlesource_quiche//:quic_core_crypto_certificate_view_lib", + "@com_googlesource_quiche//:quic_core_crypto_crypto_handshake_lib", + "@com_googlesource_quiche//:quic_core_crypto_proof_source_interface_lib", + "@com_googlesource_quiche//:quic_core_data_lib", + "@com_googlesource_quiche//:quic_core_versions_lib", ], ) envoy_cc_library( name = "envoy_quic_proof_source_lib", - hdrs = ["envoy_quic_fake_proof_source.h"], + srcs = ["envoy_quic_proof_source.cc"], + hdrs = ["envoy_quic_proof_source.h"], + external_deps = ["ssl"], + tags = ["nofips"], + deps = [ + ":envoy_quic_proof_source_base_lib", + ":envoy_quic_utils_lib", + ":quic_io_handle_wrapper_lib", + ":quic_transport_socket_factory_lib", + "//include/envoy/ssl:tls_certificate_config_interface", + "//source/extensions/transport_sockets:well_known_names", + "//source/server:connection_handler_lib", + "@com_googlesource_quiche//:quic_core_crypto_certificate_view_lib", + ], +) + +envoy_cc_library( + name = "envoy_quic_proof_verifier_base_lib", + srcs = ["envoy_quic_proof_verifier_base.cc"], + hdrs = ["envoy_quic_proof_verifier_base.h"], external_deps = ["quiche_quic_platform"], tags = ["nofips"], deps = [ - "@com_googlesource_quiche//:quic_core_crypto_proof_source_interface_lib", + ":envoy_quic_utils_lib", + "@com_googlesource_quiche//:quic_core_crypto_certificate_view_lib", + "@com_googlesource_quiche//:quic_core_crypto_crypto_handshake_lib", "@com_googlesource_quiche//:quic_core_versions_lib", ], ) envoy_cc_library( name = "envoy_quic_proof_verifier_lib", - hdrs = ["envoy_quic_fake_proof_verifier.h"], + srcs = ["envoy_quic_proof_verifier.cc"], + hdrs = ["envoy_quic_proof_verifier.h"], external_deps = ["quiche_quic_platform"], tags = ["nofips"], deps = [ - "@com_googlesource_quiche//:quic_core_crypto_crypto_handshake_lib", - "@com_googlesource_quiche//:quic_core_versions_lib", + ":envoy_quic_proof_verifier_base_lib", + ":envoy_quic_utils_lib", + "//source/extensions/transport_sockets/tls:context_lib", ], ) @@ -153,6 +181,7 @@ envoy_cc_library( ], tags = ["nofips"], deps = [ + ":envoy_quic_crypto_server_stream_lib", ":envoy_quic_stream_lib", ":envoy_quic_utils_lib", ":quic_filter_manager_connection_lib", @@ -233,6 +262,7 @@ envoy_cc_library( ":envoy_quic_packet_writer_lib", "//include/envoy/event:dispatcher_interface", "//source/common/network:socket_option_factory_lib", + "//source/common/network:udp_packet_writer_handler_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) @@ -247,6 +277,7 @@ envoy_cc_library( ":envoy_quic_server_connection_lib", ":envoy_quic_server_session_lib", "//include/envoy/network:listener_interface", + "//source/common/http:utility_lib", "//source/server:connection_handler_lib", "@com_googlesource_quiche//:quic_core_server_lib", "@com_googlesource_quiche//:quic_core_utils_lib", @@ -271,9 +302,11 @@ envoy_cc_library( ":envoy_quic_packet_writer_lib", ":envoy_quic_proof_source_lib", ":envoy_quic_utils_lib", + ":udp_gso_batch_writer_lib", "//include/envoy/network:listener_interface", "//source/common/network:listener_lib", "//source/common/protobuf:utility_lib", + "//source/common/runtime:runtime_lib", "//source/server:connection_handler_lib", "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", ], @@ -284,6 +317,12 @@ envoy_cc_library( srcs = ["active_quic_listener_config.cc"], hdrs = ["active_quic_listener_config.h"], tags = ["nofips"], + # TODO(#9953) this should be cleaned up + visibility = [ + "//source/extensions:__subpackages__", + "//test/extensions:__subpackages__", + "//test/server:__subpackages__", + ], deps = [ ":active_quic_listener_lib", "//include/envoy/registry", @@ -295,7 +334,10 @@ envoy_cc_library( name = "envoy_quic_utils_lib", srcs = ["envoy_quic_utils.cc"], hdrs = ["envoy_quic_utils.h"], - external_deps = ["quiche_quic_platform"], + external_deps = [ + "quiche_quic_platform", + "ssl", + ], tags = ["nofips"], deps = [ "//include/envoy/http:codec_interface", @@ -308,10 +350,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "quic_transport_socket_factory_lib", srcs = ["quic_transport_socket_factory.cc"], hdrs = ["quic_transport_socket_factory.h"], + security_posture = "unknown", tags = ["nofips"], deps = [ "//include/envoy/network:transport_socket_interface", @@ -320,6 +363,66 @@ envoy_cc_library( "//source/common/common:assert_lib", "//source/extensions/transport_sockets:well_known_names", "//source/extensions/transport_sockets/tls:context_config_lib", - "@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/transport_sockets/quic/v3:pkg_cc_proto", + ], +) + +envoy_cc_library( + name = "envoy_quic_packet_writer_lib", + srcs = ["envoy_quic_packet_writer.cc"], + hdrs = ["envoy_quic_packet_writer.h"], + external_deps = ["quiche_quic_platform"], + tags = ["nofips"], + deps = [ + ":envoy_quic_utils_lib", + "@com_googlesource_quiche//:quic_core_packet_writer_interface_lib", + ], +) + +envoy_cc_library( + name = "udp_gso_batch_writer_lib", + srcs = ["udp_gso_batch_writer.cc"], + hdrs = ["udp_gso_batch_writer.h"], + external_deps = ["quiche_quic_platform"], + tags = ["nofips"], + visibility = [ + "//test/common/network:__subpackages__", + "//test/extensions:__subpackages__", + ], + deps = [ + ":envoy_quic_utils_lib", + "//include/envoy/network:udp_packet_writer_handler_interface", + "//source/common/network:io_socket_error_lib", + "//source/common/protobuf:utility_lib", + "//source/common/runtime:runtime_lib", + "@com_googlesource_quiche//:quic_core_batch_writer_gso_batch_writer_lib", + ], +) + +envoy_cc_library( + name = "udp_gso_batch_writer_config_lib", + srcs = ["udp_gso_batch_writer_config.cc"], + hdrs = ["udp_gso_batch_writer_config.h"], + tags = ["nofips"], + visibility = [ + "//test/server:__subpackages__", + ], + deps = [ + ":udp_gso_batch_writer_lib", + "//include/envoy/network:udp_packet_writer_config_interface", + "//include/envoy/registry", + "//source/common/api:os_sys_calls_lib", + "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", + ], +) + +envoy_cc_library( + name = "envoy_quic_crypto_server_stream_lib", + srcs = ["envoy_quic_crypto_server_stream.cc"], + hdrs = ["envoy_quic_crypto_server_stream.h"], + tags = ["nofips"], + deps = [ + ":envoy_quic_proof_source_lib", + "@com_googlesource_quiche//:quic_core_http_spdy_session_lib", ], ) diff --git a/source/extensions/quic_listeners/quiche/active_quic_listener.cc b/source/extensions/quic_listeners/quiche/active_quic_listener.cc index 8ab780021d621..eda0b7210e729 100644 --- a/source/extensions/quic_listeners/quiche/active_quic_listener.cc +++ b/source/extensions/quic_listeners/quiche/active_quic_listener.cc @@ -1,5 +1,7 @@ #include "extensions/quic_listeners/quiche/active_quic_listener.h" +#include "envoy/network/exception.h" + #if defined(__linux__) #include #endif @@ -9,9 +11,10 @@ #include "extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h" #include "extensions/quic_listeners/quiche/envoy_quic_connection_helper.h" #include "extensions/quic_listeners/quiche/envoy_quic_dispatcher.h" -#include "extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h" -#include "extensions/quic_listeners/quiche/envoy_quic_packet_writer.h" +#include "extensions/quic_listeners/quiche/envoy_quic_proof_source.h" #include "extensions/quic_listeners/quiche/envoy_quic_utils.h" +#include "extensions/quic_listeners/quiche/envoy_quic_packet_writer.h" +#include "extensions/quic_listeners/quiche/udp_gso_batch_writer.h" namespace Envoy { namespace Quic { @@ -20,37 +23,40 @@ ActiveQuicListener::ActiveQuicListener(Event::Dispatcher& dispatcher, Network::ConnectionHandler& parent, Network::ListenerConfig& listener_config, const quic::QuicConfig& quic_config, - Network::Socket::OptionsSharedPtr options) + Network::Socket::OptionsSharedPtr options, + const envoy::config::core::v3::RuntimeFeatureFlag& enabled) : ActiveQuicListener(dispatcher, parent, listener_config.listenSocketFactory().getListenSocket(), listener_config, - quic_config, std::move(options)) {} + quic_config, std::move(options), enabled) {} ActiveQuicListener::ActiveQuicListener(Event::Dispatcher& dispatcher, Network::ConnectionHandler& parent, Network::SocketSharedPtr listen_socket, Network::ListenerConfig& listener_config, const quic::QuicConfig& quic_config, - Network::Socket::OptionsSharedPtr options) + Network::Socket::OptionsSharedPtr options, + const envoy::config::core::v3::RuntimeFeatureFlag& enabled) : Server::ConnectionHandlerImpl::ActiveListenerImplBase(parent, &listener_config), dispatcher_(dispatcher), version_manager_(quic::CurrentSupportedVersions()), - listen_socket_(*listen_socket) { + listen_socket_(*listen_socket), enabled_(enabled, Runtime::LoaderSingleton::get()) { if (options != nullptr) { const bool ok = Network::Socket::applyOptions( options, listen_socket_, envoy::config::core::v3::SocketOption::STATE_BOUND); if (!ok) { ENVOY_LOG(warn, "Failed to apply socket options to socket {} on listener {} after binding", listen_socket_.ioHandle().fd(), listener_config.name()); - throw EnvoyException("Failed to apply socket options."); + throw Network::CreateListenerException("Failed to apply socket options."); } listen_socket_.addOptions(options); } - udp_listener_ = dispatcher_.createUdpListener(std::move(listen_socket), *this); quic::QuicRandom* const random = quic::QuicRandom::GetInstance(); random->RandBytes(random_seed_, sizeof(random_seed_)); crypto_config_ = std::make_unique( quiche::QuicheStringPiece(reinterpret_cast(random_seed_), sizeof(random_seed_)), - quic::QuicRandom::GetInstance(), std::make_unique(), + quic::QuicRandom::GetInstance(), + std::make_unique(listen_socket_, listener_config.filterChainManager(), + stats_), quic::KeyExchangeSource::Default()); auto connection_helper = std::make_unique(dispatcher_); crypto_config_->AddDefaultConfig(random, connection_helper->GetClock(), @@ -61,7 +67,20 @@ ActiveQuicListener::ActiveQuicListener(Event::Dispatcher& dispatcher, crypto_config_.get(), quic_config, &version_manager_, std::move(connection_helper), std::move(alarm_factory), quic::kQuicDefaultConnectionIdLength, parent, *config_, stats_, per_worker_stats_, dispatcher, listen_socket_); - quic_dispatcher_->InitializeWithWriter(new EnvoyQuicPacketWriter(listen_socket_)); + + // Create udp_packet_writer + Network::UdpPacketWriterPtr udp_packet_writer = + listener_config.udpPacketWriterFactory()->get().createUdpPacketWriter( + listen_socket_.ioHandle(), listener_config.listenerScope()); + udp_packet_writer_ = udp_packet_writer.get(); + if (udp_packet_writer->isBatchMode()) { + // UdpPacketWriter* can be downcasted to UdpGsoBatchWriter*, which indirectly inherits + // from the quic::QuicPacketWriter class and can be passed to InitializeWithWriter(). + quic_dispatcher_->InitializeWithWriter( + dynamic_cast(udp_packet_writer.release())); + } else { + quic_dispatcher_->InitializeWithWriter(new EnvoyQuicPacketWriter(std::move(udp_packet_writer))); + } } ActiveQuicListener::~ActiveQuicListener() { onListenerShutdown(); } @@ -74,9 +93,9 @@ void ActiveQuicListener::onListenerShutdown() { void ActiveQuicListener::onData(Network::UdpRecvData& data) { quic::QuicSocketAddress peer_address( - envoyAddressInstanceToQuicSocketAddress(data.addresses_.peer_)); + envoyIpAddressToQuicSocketAddress(data.addresses_.peer_->ip())); quic::QuicSocketAddress self_address( - envoyAddressInstanceToQuicSocketAddress(data.addresses_.local_)); + envoyIpAddressToQuicSocketAddress(data.addresses_.local_->ip())); quic::QuicTime timestamp = quic::QuicTime::Zero() + quic::QuicTime::Delta::FromMicroseconds(std::chrono::duration_cast( @@ -93,6 +112,10 @@ void ActiveQuicListener::onData(Network::UdpRecvData& data) { } void ActiveQuicListener::onReadReady() { + if (!enabled_.enabled()) { + ENVOY_LOG(trace, "Quic listener {}: runtime disabled", config_->name()); + return; + } quic_dispatcher_->ProcessBufferedChlos(kNumSessionsToCreatePerLoop); } @@ -112,12 +135,11 @@ void ActiveQuicListener::shutdownListener() { ActiveQuicListenerFactory::ActiveQuicListenerFactory( const envoy::config::listener::v3::QuicProtocolOptions& config, uint32_t concurrency) - : concurrency_(concurrency) { + : concurrency_(concurrency), enabled_(config.enabled()) { uint64_t idle_network_timeout_ms = config.has_idle_timeout() ? DurationUtil::durationToMilliseconds(config.idle_timeout()) : 300000; quic_config_.SetIdleNetworkTimeout( - quic::QuicTime::Delta::FromMilliseconds(idle_network_timeout_ms), quic::QuicTime::Delta::FromMilliseconds(idle_network_timeout_ms)); int32_t max_time_before_crypto_handshake_ms = config.has_crypto_handshake_timeout() @@ -191,8 +213,9 @@ ActiveQuicListenerFactory::createActiveUdpListener(Network::ConnectionHandler& p #endif } #endif + return std::make_unique(disptacher, parent, config, quic_config_, - std::move(options)); + std::move(options), enabled_); } } // namespace Quic diff --git a/source/extensions/quic_listeners/quiche/active_quic_listener.h b/source/extensions/quic_listeners/quiche/active_quic_listener.h index 6536731c199f5..08b7807dfc4f6 100644 --- a/source/extensions/quic_listeners/quiche/active_quic_listener.h +++ b/source/extensions/quic_listeners/quiche/active_quic_listener.h @@ -3,9 +3,11 @@ #include "envoy/config/listener/v3/quic_config.pb.h" #include "envoy/network/connection_handler.h" #include "envoy/network/listener.h" +#include "envoy/runtime/runtime.h" #include "common/network/socket_option_impl.h" #include "common/protobuf/utility.h" +#include "common/runtime/runtime_protos.h" #include "server/connection_handler_impl.h" @@ -25,12 +27,14 @@ class ActiveQuicListener : public Network::UdpListenerCallbacks, ActiveQuicListener(Event::Dispatcher& dispatcher, Network::ConnectionHandler& parent, Network::ListenerConfig& listener_config, const quic::QuicConfig& quic_config, - Network::Socket::OptionsSharedPtr options); + Network::Socket::OptionsSharedPtr options, + const envoy::config::core::v3::RuntimeFeatureFlag& enabled); ActiveQuicListener(Event::Dispatcher& dispatcher, Network::ConnectionHandler& parent, Network::SocketSharedPtr listen_socket, Network::ListenerConfig& listener_config, const quic::QuicConfig& quic_config, - Network::Socket::OptionsSharedPtr options); + Network::Socket::OptionsSharedPtr options, + const envoy::config::core::v3::RuntimeFeatureFlag& enabled); ~ActiveQuicListener() override; @@ -43,6 +47,7 @@ class ActiveQuicListener : public Network::UdpListenerCallbacks, void onReceiveError(Api::IoError::IoErrorCode /*error_code*/) override { // No-op. Quic can't do anything upon listener error. } + Network::UdpPacketWriter& udpPacketWriter() override { return *udp_packet_writer_; } // ActiveListenerImplBase Network::Listener* listener() override { return udp_listener_.get(); } @@ -60,6 +65,8 @@ class ActiveQuicListener : public Network::UdpListenerCallbacks, quic::QuicVersionManager version_manager_; std::unique_ptr quic_dispatcher_; Network::Socket& listen_socket_; + Runtime::FeatureFlag enabled_; + Network::UdpPacketWriter* udp_packet_writer_; }; using ActiveQuicListenerPtr = std::unique_ptr; @@ -83,6 +90,7 @@ class ActiveQuicListenerFactory : public Network::ActiveUdpListenerFactory, quic::QuicConfig quic_config_; const uint32_t concurrency_; absl::once_flag install_bpf_once_; + envoy::config::core::v3::RuntimeFeatureFlag enabled_; }; } // namespace Quic diff --git a/source/extensions/quic_listeners/quiche/codec_impl.h b/source/extensions/quic_listeners/quiche/codec_impl.h index 732a8aa8e5ab0..58098ecd9ce5c 100644 --- a/source/extensions/quic_listeners/quiche/codec_impl.h +++ b/source/extensions/quic_listeners/quiche/codec_impl.h @@ -22,7 +22,7 @@ class QuicHttpConnectionImplBase : public virtual Http::Connection, : quic_session_(quic_session) {} // Http::Connection - void dispatch(Buffer::Instance& /*data*/) override { + Http::Status dispatch(Buffer::Instance& /*data*/) override { // Bypassed. QUIC connection already hands all data to streams. NOT_REACHED_GCOVR_EXCL_LINE; } diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_alarm.cc b/source/extensions/quic_listeners/quiche/envoy_quic_alarm.cc index b490aff8b9559..349eb5f2a32b3 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_alarm.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_alarm.cc @@ -1,5 +1,7 @@ #include "extensions/quic_listeners/quiche/envoy_quic_alarm.h" +#include + namespace Envoy { namespace Quic { @@ -11,8 +13,15 @@ EnvoyQuicAlarm::EnvoyQuicAlarm(Event::Dispatcher& dispatcher, const quic::QuicCl void EnvoyQuicAlarm::CancelImpl() { timer_->disableTimer(); } void EnvoyQuicAlarm::SetImpl() { - // TODO(#7170) switch to use microseconds if it is supported. - timer_->enableTimer(std::chrono::milliseconds(getDurationBeforeDeadline().ToMilliseconds())); + quic::QuicTime::Delta duration = getDurationBeforeDeadline(); + // Round up the duration so that any duration < 1us will not be triggered within current event + // loop. QUICHE alarm is not expected to be scheduled in current event loop. This bit is a bummer + // in QUICHE, and we are working on the fix. Once QUICHE is fixed of expecting this behavior, we + // no longer need to round up the duration. + // TODO(antoniovicente) Remove the std::max(1, ...) when decommissioning the + // envoy.reloadable_features.activate_timers_next_event_loop runtime flag. + timer_->enableHRTimer( + std::chrono::microseconds(std::max(static_cast(1), duration.ToMicroseconds()))); } void EnvoyQuicAlarm::UpdateImpl() { diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_client_connection.cc b/source/extensions/quic_listeners/quiche/envoy_quic_client_connection.cc index 0ae38b38dbb21..bb3c172536dfb 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_client_connection.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_client_connection.cc @@ -6,6 +6,7 @@ #include "common/network/listen_socket_impl.h" #include "common/network/socket_option_factory.h" +#include "common/network/udp_packet_writer_handler_impl.h" #include "extensions/quic_listeners/quiche/envoy_quic_packet_writer.h" #include "extensions/quic_listeners/quiche/envoy_quic_utils.h" @@ -30,9 +31,11 @@ EnvoyQuicClientConnection::EnvoyQuicClientConnection( const quic::QuicConnectionId& server_connection_id, quic::QuicConnectionHelperInterface& helper, quic::QuicAlarmFactory& alarm_factory, const quic::ParsedQuicVersionVector& supported_versions, Event::Dispatcher& dispatcher, Network::ConnectionSocketPtr&& connection_socket) - : EnvoyQuicClientConnection(server_connection_id, helper, alarm_factory, - new EnvoyQuicPacketWriter(*connection_socket), true, - supported_versions, dispatcher, std::move(connection_socket)) {} + : EnvoyQuicClientConnection( + server_connection_id, helper, alarm_factory, + new EnvoyQuicPacketWriter( + std::make_unique(connection_socket->ioHandle())), + true, supported_versions, dispatcher, std::move(connection_socket)) {} EnvoyQuicClientConnection::EnvoyQuicClientConnection( const quic::QuicConnectionId& server_connection_id, quic::QuicConnectionHelperInterface& helper, @@ -41,7 +44,7 @@ EnvoyQuicClientConnection::EnvoyQuicClientConnection( Network::ConnectionSocketPtr&& connection_socket) : EnvoyQuicConnection( server_connection_id, - envoyAddressInstanceToQuicSocketAddress(connection_socket->remoteAddress()), helper, + envoyIpAddressToQuicSocketAddress(connection_socket->remoteAddress()->ip()), helper, alarm_factory, writer, owns_writer, quic::Perspective::IS_CLIENT, supported_versions, std::move(connection_socket)), dispatcher_(dispatcher) {} @@ -64,8 +67,8 @@ void EnvoyQuicClientConnection::processPacket( timestamp, /*owns_buffer=*/false, /*ttl=*/0, /*ttl_valid=*/false, /*packet_headers=*/nullptr, /*headers_length=*/0, /*owns_header_buffer*/ false); - ProcessUdpPacket(envoyAddressInstanceToQuicSocketAddress(local_address), - envoyAddressInstanceToQuicSocketAddress(peer_address), packet); + ProcessUdpPacket(envoyIpAddressToQuicSocketAddress(local_address->ip()), + envoyIpAddressToQuicSocketAddress(peer_address->ip()), packet); } uint64_t EnvoyQuicClientConnection::maxPacketSize() const { @@ -94,7 +97,12 @@ void EnvoyQuicClientConnection::setUpConnectionSocket() { void EnvoyQuicClientConnection::switchConnectionSocket( Network::ConnectionSocketPtr&& connection_socket) { - auto writer = std::make_unique(*connection_socket); + auto writer = std::make_unique( + std::make_unique(connection_socket->ioHandle())); + // Destroy the old file_event before closing the old socket. Otherwise the socket might be picked + // up by another socket() call while file_event is still operating on it. + file_event_.reset(); + // The old socket is closed in this call. setConnectionSocket(std::move(connection_socket)); setUpConnectionSocket(); SetQuicPacketWriter(writer.release(), true); diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_client_session.cc b/source/extensions/quic_listeners/quiche/envoy_quic_client_session.cc index 930e470528005..3fd67c0ab4dea 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_client_session.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_client_session.cc @@ -1,5 +1,7 @@ #include "extensions/quic_listeners/quiche/envoy_quic_client_session.h" +#include "extensions/quic_listeners/quiche/envoy_quic_utils.h" + namespace Envoy { namespace Quic { @@ -27,7 +29,9 @@ void EnvoyQuicClientSession::connect() { // Start version negotiation and crypto handshake during which the connection may fail if server // doesn't support the one and only supported version. CryptoConnect(); - SetMaxAllowedPushId(0u); + if (quic::VersionUsesHttp3(transport_version())) { + SetMaxPushId(0u); + } } void EnvoyQuicClientSession::OnConnectionClosed(const quic::QuicConnectionCloseFrame& frame, @@ -42,7 +46,12 @@ void EnvoyQuicClientSession::Initialize() { } void EnvoyQuicClientSession::OnCanWrite() { + const uint64_t headers_to_send_old = + quic::VersionUsesHttp3(transport_version()) ? 0u : headers_stream()->BufferedDataBytes(); quic::QuicSpdyClientSession::OnCanWrite(); + const uint64_t headers_to_send_new = + quic::VersionUsesHttp3(transport_version()) ? 0u : headers_stream()->BufferedDataBytes(); + adjustBytesToSend(headers_to_send_new - headers_to_send_old); maybeApplyDelayClosePolicy(); } @@ -51,7 +60,7 @@ void EnvoyQuicClientSession::OnGoAway(const quic::QuicGoAwayFrame& frame) { quic::QuicErrorCodeToString(frame.error_code), frame.reason_phrase); quic::QuicSpdyClientSession::OnGoAway(frame); if (http_connection_callbacks_ != nullptr) { - http_connection_callbacks_->onGoAway(); + http_connection_callbacks_->onGoAway(quicErrorCodeToEnvoyErrorCode(frame.error_code)); } } @@ -81,5 +90,9 @@ EnvoyQuicClientSession::CreateIncomingStream(quic::PendingStream* /*pending*/) { bool EnvoyQuicClientSession::hasDataToWrite() { return HasDataToWrite(); } +void EnvoyQuicClientSession::OnOneRttKeysAvailable() { + raiseConnectionEvent(Network::ConnectionEvent::Connected); +} + } // namespace Quic } // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_client_session.h b/source/extensions/quic_listeners/quiche/envoy_quic_client_session.h index a3b2542dfb4a5..b79943da1f12c 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_client_session.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_client_session.h @@ -55,6 +55,7 @@ class EnvoyQuicClientSession : public QuicFilterManagerConnectionImpl, void Initialize() override; void OnCanWrite() override; void OnGoAway(const quic::QuicGoAwayFrame& frame) override; + void OnOneRttKeysAvailable() override; // quic::QuicSpdyClientSessionBase void SetDefaultEncryptionLevel(quic::EncryptionLevel level) override; diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.cc b/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.cc index aa604d34092f8..39a16309c271a 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.cc @@ -46,8 +46,18 @@ EnvoyQuicClientStream::EnvoyQuicClientStream(quic::PendingStream* pending, void EnvoyQuicClientStream::encodeHeaders(const Http::RequestHeaderMap& headers, bool end_stream) { ENVOY_STREAM_LOG(debug, "encodeHeaders: (end_stream={}) {}.", *this, end_stream, headers); + quic::QuicStream* writing_stream = + quic::VersionUsesHttp3(transport_version()) + ? static_cast(this) + : (dynamic_cast(session())->headers_stream()); + const uint64_t bytes_to_send_old = writing_stream->BufferedDataBytes(); WriteHeaders(envoyHeadersToSpdyHeaderBlock(headers), end_stream, nullptr); local_end_stream_ = end_stream; + const uint64_t bytes_to_send_new = writing_stream->BufferedDataBytes(); + ASSERT(bytes_to_send_old <= bytes_to_send_new); + // IETF QUIC sends HEADER frame on current stream. After writing headers, the + // buffer may increase. + maybeCheckWatermark(bytes_to_send_old, bytes_to_send_new, *filterManagerConnection()); } void EnvoyQuicClientStream::encodeData(Buffer::Instance& data, bool end_stream) { @@ -55,7 +65,7 @@ void EnvoyQuicClientStream::encodeData(Buffer::Instance& data, bool end_stream) data.length()); local_end_stream_ = end_stream; // This is counting not serialized bytes in the send buffer. - uint64_t bytes_to_send_old = BufferedDataBytes(); + const uint64_t bytes_to_send_old = BufferedDataBytes(); // QUIC stream must take all. WriteBodySlices(quic::QuicMemSliceSpan(quic::QuicMemSliceSpanImpl(data)), end_stream); if (data.length() > 0) { @@ -64,7 +74,7 @@ void EnvoyQuicClientStream::encodeData(Buffer::Instance& data, bool end_stream) return; } - uint64_t bytes_to_send_new = BufferedDataBytes(); + const uint64_t bytes_to_send_new = BufferedDataBytes(); ASSERT(bytes_to_send_old <= bytes_to_send_new); maybeCheckWatermark(bytes_to_send_old, bytes_to_send_new, *filterManagerConnection()); } @@ -73,7 +83,18 @@ void EnvoyQuicClientStream::encodeTrailers(const Http::RequestTrailerMap& traile ASSERT(!local_end_stream_); local_end_stream_ = true; ENVOY_STREAM_LOG(debug, "encodeTrailers: {}.", *this, trailers); + quic::QuicStream* writing_stream = + quic::VersionUsesHttp3(transport_version()) + ? static_cast(this) + : (dynamic_cast(session())->headers_stream()); + + const uint64_t bytes_to_send_old = writing_stream->BufferedDataBytes(); WriteTrailers(envoyHeadersToSpdyHeaderBlock(trailers), nullptr); + const uint64_t bytes_to_send_new = writing_stream->BufferedDataBytes(); + ASSERT(bytes_to_send_old <= bytes_to_send_new); + // IETF QUIC sends HEADER frame on current stream. After writing trailers, the + // buffer may increase. + maybeCheckWatermark(bytes_to_send_old, bytes_to_send_new, *filterManagerConnection()); } void EnvoyQuicClientStream::encodeMetadata(const Http::MetadataMapVector& /*metadata_map_vector*/) { diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.h b/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.h index 8884c63dac990..761201c16f7cf 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.h @@ -39,6 +39,7 @@ class EnvoyQuicClientStream : public quic::QuicSpdyClientStream, // Http::Stream void resetStream(Http::StreamResetReason reason) override; + void setFlushTimeout(std::chrono::milliseconds) override {} // quic::QuicSpdyStream void OnBodyAvailable() override; void OnStreamReset(const quic::QuicRstStreamFrame& frame) override; diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_crypto_server_stream.cc b/source/extensions/quic_listeners/quiche/envoy_quic_crypto_server_stream.cc new file mode 100644 index 0000000000000..fb52d075c374d --- /dev/null +++ b/source/extensions/quic_listeners/quiche/envoy_quic_crypto_server_stream.cc @@ -0,0 +1,48 @@ +#include "extensions/quic_listeners/quiche/envoy_quic_crypto_server_stream.h" + +namespace Envoy { +namespace Quic { + +void EnvoyQuicCryptoServerStream::EnvoyProcessClientHelloResultCallback::Run( + quic::QuicErrorCode error, const std::string& error_details, + std::unique_ptr message, + std::unique_ptr diversification_nonce, + std::unique_ptr proof_source_details) { + if (parent_ == nullptr) { + return; + } + + if (proof_source_details != nullptr) { + // Retain a copy of the proof source details after getting filter chain. + parent_->details_ = std::make_unique( + static_cast(*proof_source_details)); + } + parent_->done_cb_wrapper_ = nullptr; + parent_ = nullptr; + done_cb_->Run(error, error_details, std::move(message), std::move(diversification_nonce), + std::move(proof_source_details)); +} + +EnvoyQuicCryptoServerStream::~EnvoyQuicCryptoServerStream() { + if (done_cb_wrapper_ != nullptr) { + done_cb_wrapper_->cancel(); + } +} + +void EnvoyQuicCryptoServerStream::ProcessClientHello( + quic::QuicReferenceCountedPointer result, + std::unique_ptr proof_source_details, + std::unique_ptr done_cb) { + auto done_cb_wrapper = + std::make_unique(this, std::move(done_cb)); + ASSERT(done_cb_wrapper_ == nullptr); + done_cb_wrapper_ = done_cb_wrapper.get(); + // Old QUICHE code might call GetProof() earlier and pass in proof source instance here. But this + // is no longer the case, so proof_source_details should always be null. + ASSERT(proof_source_details == nullptr); + quic::QuicCryptoServerStream::ProcessClientHello(result, std::move(proof_source_details), + std::move(done_cb_wrapper)); +} + +} // namespace Quic +} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_crypto_server_stream.h b/source/extensions/quic_listeners/quiche/envoy_quic_crypto_server_stream.h new file mode 100644 index 0000000000000..faaa6254bdf89 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/envoy_quic_crypto_server_stream.h @@ -0,0 +1,89 @@ +#pragma once + +#pragma GCC diagnostic push +// QUICHE allows unused parameters. +#pragma GCC diagnostic ignored "-Wunused-parameter" +// QUICHE uses offsetof(). +#pragma GCC diagnostic ignored "-Winvalid-offsetof" + +#include "quiche/quic/core/quic_crypto_server_stream.h" +#include "quiche/quic/core/tls_server_handshaker.h" + +#pragma GCC diagnostic pop + +#include "extensions/quic_listeners/quiche/envoy_quic_proof_source.h" + +#include + +namespace Envoy { +namespace Quic { + +class EnvoyCryptoServerStream : protected Logger::Loggable { +public: + virtual ~EnvoyCryptoServerStream() = default; + virtual const EnvoyQuicProofSourceDetails* proofSourceDetails() const = 0; +}; + +// A dedicated stream to do QUIC crypto handshake. +class EnvoyQuicCryptoServerStream : public quic::QuicCryptoServerStream, + public EnvoyCryptoServerStream { +public: + // A wrapper to retain proof source details which has filter chain. + class EnvoyProcessClientHelloResultCallback : public quic::ProcessClientHelloResultCallback { + public: + EnvoyProcessClientHelloResultCallback( + EnvoyQuicCryptoServerStream* parent, + std::unique_ptr done_cb) + : parent_(parent), done_cb_(std::move(done_cb)) {} + + // quic::ProcessClientHelloResultCallback + void Run(quic::QuicErrorCode error, const std::string& error_details, + std::unique_ptr message, + std::unique_ptr diversification_nonce, + std::unique_ptr proof_source_details) override; + + void cancel() { parent_ = nullptr; } + + private: + EnvoyQuicCryptoServerStream* parent_; + std::unique_ptr done_cb_; + }; + + EnvoyQuicCryptoServerStream(const quic::QuicCryptoServerConfig* crypto_config, + quic::QuicCompressedCertsCache* compressed_certs_cache, + quic::QuicSession* session, + quic::QuicCryptoServerStreamBase::Helper* helper) + : quic::QuicCryptoServerStream(crypto_config, compressed_certs_cache, session, helper) {} + + ~EnvoyQuicCryptoServerStream() override; + + // quic::QuicCryptoServerStream + // Override to retain ProofSource::Details. + void ProcessClientHello( + quic::QuicReferenceCountedPointer result, + std::unique_ptr proof_source_details, + std::unique_ptr done_cb) override; + // EnvoyCryptoServerStream + const EnvoyQuicProofSourceDetails* proofSourceDetails() const override { return details_.get(); } + +private: + EnvoyProcessClientHelloResultCallback* done_cb_wrapper_{nullptr}; + std::unique_ptr details_; +}; + +// A dedicated stream to do TLS1.3 handshake. +class EnvoyQuicTlsServerHandshaker : public quic::TlsServerHandshaker, + public EnvoyCryptoServerStream { +public: + EnvoyQuicTlsServerHandshaker(quic::QuicSession* session, + const quic::QuicCryptoServerConfig& crypto_config) + : quic::TlsServerHandshaker(session, crypto_config) {} + + // EnvoyCryptoServerStream + const EnvoyQuicProofSourceDetails* proofSourceDetails() const override { + return dynamic_cast(proof_source_details()); + } +}; + +} // namespace Quic +} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.cc b/source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.cc index ab999d5b204d9..ba8f7f3a8239f 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.cc @@ -48,16 +48,16 @@ void EnvoyQuicDispatcher::OnConnectionClosed(quic::QuicConnectionId connection_i } std::unique_ptr EnvoyQuicDispatcher::CreateQuicSession( - quic::QuicConnectionId server_connection_id, const quic::QuicSocketAddress& peer_address, - quiche::QuicheStringPiece /*alpn*/, const quic::ParsedQuicVersion& version) { + quic::QuicConnectionId server_connection_id, const quic::QuicSocketAddress& /*self_address*/, + const quic::QuicSocketAddress& peer_address, quiche::QuicheStringPiece /*alpn*/, + const quic::ParsedQuicVersion& version) { auto quic_connection = std::make_unique( server_connection_id, peer_address, *helper(), *alarm_factory(), writer(), - /*owns_writer=*/false, quic::ParsedQuicVersionVector{version}, listener_config_, - listener_stats_, listen_socket_); + /*owns_writer=*/false, quic::ParsedQuicVersionVector{version}, listen_socket_); auto quic_session = std::make_unique( config(), quic::ParsedQuicVersionVector{version}, std::move(quic_connection), this, session_helper(), crypto_config(), compressed_certs_cache(), dispatcher_, - listener_config_.perConnectionBufferLimitBytes()); + listener_config_.perConnectionBufferLimitBytes(), listener_config_); quic_session->Initialize(); // Filter chain can't be retrieved here as self address is unknown at this // point. diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.h b/source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.h index 2ad8d56241a56..5921342b84bfe 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.h @@ -56,16 +56,10 @@ class EnvoyQuicDispatcher : public quic::QuicDispatcher { const std::string& error_details, quic::ConnectionCloseSource source) override; - quic::QuicConnectionId - GenerateNewServerConnectionId(quic::ParsedQuicVersion /*version*/, - quic::QuicConnectionId /*connection_id*/) const override { - // TODO(danzh): create reject connection id based on given connection_id. - return quic::QuicUtils::CreateRandomConnectionId(); - } - protected: std::unique_ptr CreateQuicSession(quic::QuicConnectionId server_connection_id, + const quic::QuicSocketAddress& self_address, const quic::QuicSocketAddress& peer_address, quiche::QuicheStringPiece alpn, const quic::ParsedQuicVersion& version) override; diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h b/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h deleted file mode 100644 index 01f392279c18f..0000000000000 --- a/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h +++ /dev/null @@ -1,88 +0,0 @@ -#pragma once - -#include - -#include "common/common/assert.h" - -#include "absl/strings/str_cat.h" - -#pragma GCC diagnostic push - -// QUICHE allows unused parameters. -#pragma GCC diagnostic ignored "-Wunused-parameter" -#include "quiche/quic/core/crypto/proof_source.h" -#include "quiche/quic/core/quic_versions.h" - -#pragma GCC diagnostic pop - -#include "quiche/quic/platform/api/quic_reference_counted.h" -#include "quiche/quic/platform/api/quic_socket_address.h" -#include "quiche/common/platform/api/quiche_string_piece.h" - -namespace Envoy { -namespace Quic { - -// A fake implementation of quic::ProofSource which returns a fake cert and -// a fake signature for a given QUIC server config. -class EnvoyQuicFakeProofSource : public quic::ProofSource { -public: - ~EnvoyQuicFakeProofSource() override = default; - - // quic::ProofSource - // Returns a fake certs chain and its fake SCT "Fake timestamp" and fake TLS signature wrapped - // in QuicCryptoProof. - void GetProof(const quic::QuicSocketAddress& server_address, const std::string& hostname, - const std::string& server_config, quic::QuicTransportVersion /*transport_version*/, - quiche::QuicheStringPiece /*chlo_hash*/, - std::unique_ptr callback) override { - quic::QuicReferenceCountedPointer chain = - GetCertChain(server_address, hostname); - quic::QuicCryptoProof proof; - bool success = false; - auto signature_callback = std::make_unique(success, proof.signature); - ComputeTlsSignature(server_address, hostname, 0, server_config, std::move(signature_callback)); - ASSERT(success); - proof.leaf_cert_scts = "Fake timestamp"; - callback->Run(true, chain, proof, nullptr /* details */); - } - - // Returns a certs chain with a fake certificate "Fake cert from [host_name]". - quic::QuicReferenceCountedPointer - GetCertChain(const quic::QuicSocketAddress& /*server_address*/, - const std::string& /*hostname*/) override { - std::vector certs; - certs.push_back(absl::StrCat("Fake cert")); - return quic::QuicReferenceCountedPointer( - new quic::ProofSource::Chain(certs)); - } - - // Always call callback with a signature "Fake signature for { [server_config] }". - void - ComputeTlsSignature(const quic::QuicSocketAddress& /*server_address*/, - const std::string& /*hostname*/, uint16_t /*signature_algorithm*/, - quiche::QuicheStringPiece in, - std::unique_ptr callback) override { - callback->Run(true, absl::StrCat("Fake signature for { ", in, " }")); - } - -private: - // Used by GetProof() to get fake signature. - class FakeSignatureCallback : public quic::ProofSource::SignatureCallback { - public: - FakeSignatureCallback(bool& success, std::string& signature) - : success_(success), signature_(signature) {} - - // quic::ProofSource::SignatureCallback - void Run(bool ok, std::string signature) override { - success_ = ok; - signature_ = signature; - } - - private: - bool& success_; - std::string& signature_; - }; -}; - -} // namespace Quic -} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h b/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h deleted file mode 100644 index 49abe56e91226..0000000000000 --- a/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h +++ /dev/null @@ -1,62 +0,0 @@ -#pragma once - -#include "absl/strings/str_cat.h" - -#pragma GCC diagnostic push - -// QUICHE allows unused parameters. -#pragma GCC diagnostic ignored "-Wunused-parameter" - -#include "quiche/quic/core/crypto/proof_verifier.h" -#include "quiche/quic/core/quic_versions.h" - -#pragma GCC diagnostic pop - -namespace Envoy { -namespace Quic { - -// A fake implementation of quic::ProofVerifier which approves the certs and -// signature produced by EnvoyQuicFakeProofSource. -class EnvoyQuicFakeProofVerifier : public quic::ProofVerifier { -public: - ~EnvoyQuicFakeProofVerifier() override = default; - - // quic::ProofVerifier - // Return success if the certs chain is valid and signature is "Fake signature for { - // [server_config] }". Otherwise failure. - quic::QuicAsyncStatus - VerifyProof(const std::string& hostname, const uint16_t /*port*/, - const std::string& server_config, quic::QuicTransportVersion /*quic_version*/, - absl::string_view /*chlo_hash*/, const std::vector& certs, - const std::string& cert_sct, const std::string& signature, - const quic::ProofVerifyContext* context, std::string* error_details, - std::unique_ptr* details, - std::unique_ptr callback) override { - if (VerifyCertChain(hostname, certs, "", cert_sct, context, error_details, details, - std::move(callback)) == quic::QUIC_SUCCESS && - signature == absl::StrCat("Fake signature for { ", server_config, " }")) { - return quic::QUIC_SUCCESS; - } - return quic::QUIC_FAILURE; - } - - // Return success if the certs chain has only one fake certificate "Fake cert from [host_name]" - // and its SCT is "Fake timestamp". Otherwise failure. - quic::QuicAsyncStatus - VerifyCertChain(const std::string& /*hostname*/, const std::vector& certs, - const std::string& /*ocsp_response*/, const std::string& cert_sct, - const quic::ProofVerifyContext* /*context*/, std::string* /*error_details*/, - std::unique_ptr* /*details*/, - std::unique_ptr /*callback*/) override { - // Cert SCT support is not enabled for fake ProofSource. - if (cert_sct.empty() && certs.size() == 1 && certs[0] == "Fake cert") { - return quic::QUIC_SUCCESS; - } - return quic::QUIC_FAILURE; - } - - std::unique_ptr CreateDefaultContext() override { return nullptr; } -}; - -} // namespace Quic -} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_packet_writer.cc b/source/extensions/quic_listeners/quiche/envoy_quic_packet_writer.cc index 88816a34d0598..a6a70623a43f2 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_packet_writer.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_packet_writer.cc @@ -1,43 +1,75 @@ #include "extensions/quic_listeners/quiche/envoy_quic_packet_writer.h" -#include "common/buffer/buffer_impl.h" -#include "common/network/utility.h" +#include #include "extensions/quic_listeners/quiche/envoy_quic_utils.h" namespace Envoy { namespace Quic { -EnvoyQuicPacketWriter::EnvoyQuicPacketWriter(Network::Socket& socket) - : write_blocked_(false), socket_(socket) {} -quic::WriteResult EnvoyQuicPacketWriter::WritePacket(const char* buffer, size_t buf_len, +namespace { + +quic::WriteResult convertToQuicWriteResult(Api::IoCallUint64Result& result) { + if (result.ok()) { + return {quic::WRITE_STATUS_OK, static_cast(result.rc_)}; + } + quic::WriteStatus status = result.err_->getErrorCode() == Api::IoError::IoErrorCode::Again + ? quic::WRITE_STATUS_BLOCKED + : quic::WRITE_STATUS_ERROR; + return {status, static_cast(result.err_->getErrorCode())}; +} + +} // namespace + +EnvoyQuicPacketWriter::EnvoyQuicPacketWriter(Network::UdpPacketWriterPtr envoy_udp_packet_writer) + : envoy_udp_packet_writer_(std::move(envoy_udp_packet_writer)) {} + +quic::WriteResult EnvoyQuicPacketWriter::WritePacket(const char* buffer, size_t buffer_len, const quic::QuicIpAddress& self_ip, const quic::QuicSocketAddress& peer_address, quic::PerPacketOptions* options) { ASSERT(options == nullptr, "Per packet option is not supported yet."); - ASSERT(!write_blocked_, "Cannot write while IO handle is blocked."); - Buffer::RawSlice slice; - slice.mem_ = const_cast(buffer); - slice.len_ = buf_len; + Buffer::BufferFragmentImpl fragment(buffer, buffer_len, nullptr); + Buffer::OwnedImpl buf; + buf.addBufferFragment(fragment); + quic::QuicSocketAddress self_address(self_ip, /*port=*/0); Network::Address::InstanceConstSharedPtr local_addr = quicAddressToEnvoyAddressInstance(self_address); Network::Address::InstanceConstSharedPtr remote_addr = quicAddressToEnvoyAddressInstance(peer_address); - Api::IoCallUint64Result result = Network::Utility::writeToSocket( - socket_.ioHandle(), &slice, 1, local_addr == nullptr ? nullptr : local_addr->ip(), - *remote_addr); - if (result.ok()) { - return {quic::WRITE_STATUS_OK, static_cast(result.rc_)}; - } - quic::WriteStatus status = result.err_->getErrorCode() == Api::IoError::IoErrorCode::Again - ? quic::WRITE_STATUS_BLOCKED - : quic::WRITE_STATUS_ERROR; - if (quic::IsWriteBlockedStatus(status)) { - write_blocked_ = true; - } - return {status, static_cast(result.err_->getErrorCode())}; + + Api::IoCallUint64Result result = envoy_udp_packet_writer_->writePacket( + buf, local_addr == nullptr ? nullptr : local_addr->ip(), *remote_addr); + + return convertToQuicWriteResult(result); +} + +quic::QuicByteCount +EnvoyQuicPacketWriter::GetMaxPacketSize(const quic::QuicSocketAddress& peer_address) const { + Network::Address::InstanceConstSharedPtr remote_addr = + quicAddressToEnvoyAddressInstance(peer_address); + return static_cast(envoy_udp_packet_writer_->getMaxPacketSize(*remote_addr)); +} + +quic::QuicPacketBuffer +EnvoyQuicPacketWriter::GetNextWriteLocation(const quic::QuicIpAddress& self_ip, + const quic::QuicSocketAddress& peer_address) { + quic::QuicSocketAddress self_address(self_ip, /*port=*/0); + Network::Address::InstanceConstSharedPtr local_addr = + quicAddressToEnvoyAddressInstance(self_address); + Network::Address::InstanceConstSharedPtr remote_addr = + quicAddressToEnvoyAddressInstance(peer_address); + Network::UdpPacketWriterBuffer write_location = envoy_udp_packet_writer_->getNextWriteLocation( + local_addr == nullptr ? nullptr : local_addr->ip(), *remote_addr); + return quic::QuicPacketBuffer(reinterpret_cast(write_location.buffer_), + write_location.release_buffer_); +} + +quic::WriteResult EnvoyQuicPacketWriter::Flush() { + Api::IoCallUint64Result result = envoy_udp_packet_writer_->flush(); + return convertToQuicWriteResult(result); } } // namespace Quic diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_packet_writer.h b/source/extensions/quic_listeners/quiche/envoy_quic_packet_writer.h index 55a6e5146d3a1..bb4b736c84c8d 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_packet_writer.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_packet_writer.h @@ -10,14 +10,14 @@ #pragma GCC diagnostic pop -#include "envoy/network/listener.h" +#include "envoy/network/udp_packet_writer_handler.h" namespace Envoy { namespace Quic { class EnvoyQuicPacketWriter : public quic::QuicPacketWriter { public: - EnvoyQuicPacketWriter(Network::Socket& socket); + EnvoyQuicPacketWriter(Network::UdpPacketWriterPtr envoy_udp_packet_writer); quic::WriteResult WritePacket(const char* buffer, size_t buf_len, const quic::QuicIpAddress& self_address, @@ -25,25 +25,19 @@ class EnvoyQuicPacketWriter : public quic::QuicPacketWriter { quic::PerPacketOptions* options) override; // quic::QuicPacketWriter - bool IsWriteBlocked() const override { return write_blocked_; } - void SetWritable() override { write_blocked_ = false; } - quic::QuicByteCount - GetMaxPacketSize(const quic::QuicSocketAddress& /*peer_address*/) const override { - return quic::kMaxOutgoingPacketSize; - } - // Currently this writer doesn't support pacing offload or batch writing. + bool IsWriteBlocked() const override { return envoy_udp_packet_writer_->isWriteBlocked(); } + void SetWritable() override { envoy_udp_packet_writer_->setWritable(); } + bool IsBatchMode() const override { return envoy_udp_packet_writer_->isBatchMode(); } + // Currently this writer doesn't support pacing offload. bool SupportsReleaseTime() const override { return false; } - bool IsBatchMode() const override { return false; } - char* GetNextWriteLocation(const quic::QuicIpAddress& /*self_address*/, - const quic::QuicSocketAddress& /*peer_address*/) override { - return nullptr; - } - quic::WriteResult Flush() override { return {quic::WRITE_STATUS_OK, 0}; } + + quic::QuicByteCount GetMaxPacketSize(const quic::QuicSocketAddress& peer_address) const override; + quic::QuicPacketBuffer GetNextWriteLocation(const quic::QuicIpAddress& self_address, + const quic::QuicSocketAddress& peer_address) override; + quic::WriteResult Flush() override; private: - // Modified by WritePacket() to indicate underlying IoHandle status. - bool write_blocked_; - Network::Socket& socket_; + Network::UdpPacketWriterPtr envoy_udp_packet_writer_; }; } // namespace Quic diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.cc b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.cc new file mode 100644 index 0000000000000..1f65e4e7e6a0f --- /dev/null +++ b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.cc @@ -0,0 +1,113 @@ +#include "extensions/quic_listeners/quiche/envoy_quic_proof_source.h" + +#include + +#include "envoy/ssl/tls_certificate_config.h" + +#include "extensions/quic_listeners/quiche/envoy_quic_utils.h" +#include "extensions/quic_listeners/quiche/quic_io_handle_wrapper.h" +#include "extensions/transport_sockets/well_known_names.h" + +#include "openssl/bytestring.h" +#include "quiche/quic/core/crypto/certificate_view.h" + +namespace Envoy { +namespace Quic { + +quic::QuicReferenceCountedPointer +EnvoyQuicProofSource::GetCertChain(const quic::QuicSocketAddress& server_address, + const quic::QuicSocketAddress& client_address, + const std::string& hostname) { + CertConfigWithFilterChain res = + getTlsCertConfigAndFilterChain(server_address, client_address, hostname); + absl::optional> cert_config_ref = + res.cert_config_; + if (!cert_config_ref.has_value()) { + ENVOY_LOG(warn, "No matching filter chain found for handshake."); + return nullptr; + } + auto& cert_config = cert_config_ref.value().get(); + const std::string& chain_str = cert_config.certificateChain(); + std::stringstream pem_stream(chain_str); + std::vector chain = quic::CertificateView::LoadPemFromStream(&pem_stream); + return quic::QuicReferenceCountedPointer( + new quic::ProofSource::Chain(chain)); +} + +void EnvoyQuicProofSource::signPayload( + const quic::QuicSocketAddress& server_address, const quic::QuicSocketAddress& client_address, + const std::string& hostname, uint16_t signature_algorithm, quiche::QuicheStringPiece in, + std::unique_ptr callback) { + CertConfigWithFilterChain res = + getTlsCertConfigAndFilterChain(server_address, client_address, hostname); + absl::optional> cert_config_ref = + res.cert_config_; + if (!cert_config_ref.has_value()) { + ENVOY_LOG(warn, "No matching filter chain found for handshake."); + callback->Run(false, "", nullptr); + return; + } + auto& cert_config = cert_config_ref.value().get(); + // Load private key. + const std::string& pkey = cert_config.privateKey(); + std::stringstream pem_str(pkey); + std::unique_ptr pem_key = + quic::CertificatePrivateKey::LoadPemFromStream(&pem_str); + if (pem_key == nullptr) { + ENVOY_LOG(warn, "Failed to load private key."); + callback->Run(false, "", nullptr); + return; + } + // Verify the signature algorithm is as expected. + std::string error_details; + int sign_alg = deduceSignatureAlgorithmFromPublicKey(pem_key->private_key(), &error_details); + if (sign_alg != signature_algorithm) { + ENVOY_LOG(warn, + fmt::format("The signature algorithm {} from the private key is not expected: {}", + sign_alg, error_details)); + callback->Run(false, "", nullptr); + return; + } + + // Sign. + std::string sig = pem_key->Sign(in, signature_algorithm); + bool success = !sig.empty(); + ASSERT(res.filter_chain_.has_value()); + callback->Run(success, sig, + std::make_unique(res.filter_chain_.value().get())); +} + +EnvoyQuicProofSource::CertConfigWithFilterChain +EnvoyQuicProofSource::getTlsCertConfigAndFilterChain(const quic::QuicSocketAddress& server_address, + const quic::QuicSocketAddress& client_address, + const std::string& hostname) { + ENVOY_LOG(trace, "Getting cert chain for {}", hostname); + Network::ConnectionSocketImpl connection_socket( + std::make_unique(listen_socket_.ioHandle()), + quicAddressToEnvoyAddressInstance(server_address), + quicAddressToEnvoyAddressInstance(client_address)); + connection_socket.setDetectedTransportProtocol( + Extensions::TransportSockets::TransportProtocolNames::get().Quic); + connection_socket.setRequestedServerName(hostname); + connection_socket.setRequestedApplicationProtocols({"h2"}); + const Network::FilterChain* filter_chain = + filter_chain_manager_.findFilterChain(connection_socket); + if (filter_chain == nullptr) { + listener_stats_.no_filter_chain_match_.inc(); + return {absl::nullopt, absl::nullopt}; + } + const Network::TransportSocketFactory& transport_socket_factory = + filter_chain->transportSocketFactory(); + std::vector> tls_cert_configs = + dynamic_cast(transport_socket_factory) + .serverContextConfig() + .tlsCertificates(); + + // Only return the first TLS cert config. + // TODO(danzh) Choose based on supported cipher suites in TLS1.3 CHLO and prefer EC + // certs if supported. + return {tls_cert_configs[0].get(), *filter_chain}; +} + +} // namespace Quic +} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.h b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.h new file mode 100644 index 0000000000000..6e1c74c9234c3 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.h @@ -0,0 +1,51 @@ +#pragma once + +#include "server/connection_handler_impl.h" + +#include "extensions/quic_listeners/quiche/envoy_quic_proof_source_base.h" +#include "extensions/quic_listeners/quiche/quic_transport_socket_factory.h" + +namespace Envoy { +namespace Quic { + +// A ProofSource implementation which supplies a proof instance with certs from filter chain. +class EnvoyQuicProofSource : public EnvoyQuicProofSourceBase { +public: + EnvoyQuicProofSource(Network::Socket& listen_socket, + Network::FilterChainManager& filter_chain_manager, + Server::ListenerStats& listener_stats) + : listen_socket_(listen_socket), filter_chain_manager_(filter_chain_manager), + listener_stats_(listener_stats) {} + + ~EnvoyQuicProofSource() override = default; + + // quic::ProofSource + quic::QuicReferenceCountedPointer + GetCertChain(const quic::QuicSocketAddress& server_address, + const quic::QuicSocketAddress& client_address, const std::string& hostname) override; + +protected: + // quic::ProofSource + void signPayload(const quic::QuicSocketAddress& server_address, + const quic::QuicSocketAddress& client_address, const std::string& hostname, + uint16_t signature_algorithm, quiche::QuicheStringPiece in, + std::unique_ptr callback) override; + +private: + struct CertConfigWithFilterChain { + absl::optional> cert_config_; + absl::optional> filter_chain_; + }; + + CertConfigWithFilterChain + getTlsCertConfigAndFilterChain(const quic::QuicSocketAddress& server_address, + const quic::QuicSocketAddress& client_address, + const std::string& hostname); + + Network::Socket& listen_socket_; + Network::FilterChainManager& filter_chain_manager_; + Server::ListenerStats& listener_stats_; +}; + +} // namespace Quic +} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_proof_source_base.cc b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source_base.cc new file mode 100644 index 0000000000000..220dc4cb1ccfe --- /dev/null +++ b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source_base.cc @@ -0,0 +1,81 @@ +#include "extensions/quic_listeners/quiche/envoy_quic_proof_source_base.h" + +#pragma GCC diagnostic push + +// QUICHE allows unused parameters. +#pragma GCC diagnostic ignored "-Wunused-parameter" +#include "quiche/quic/core/quic_data_writer.h" + +#pragma GCC diagnostic pop + +#include "extensions/quic_listeners/quiche/envoy_quic_utils.h" + +namespace Envoy { +namespace Quic { + +void EnvoyQuicProofSourceBase::GetProof(const quic::QuicSocketAddress& server_address, + const quic::QuicSocketAddress& client_address, + const std::string& hostname, + const std::string& server_config, + quic::QuicTransportVersion /*transport_version*/, + quiche::QuicheStringPiece chlo_hash, + std::unique_ptr callback) { + quic::QuicReferenceCountedPointer chain = + GetCertChain(server_address, client_address, hostname); + + if (chain == nullptr || chain->certs.empty()) { + quic::QuicCryptoProof proof; + callback->Run(/*ok=*/false, nullptr, proof, nullptr); + return; + } + size_t payload_size = sizeof(quic::kProofSignatureLabel) + sizeof(uint32_t) + chlo_hash.size() + + server_config.size(); + auto payload = std::make_unique(payload_size); + quic::QuicDataWriter payload_writer(payload_size, payload.get(), + quiche::Endianness::HOST_BYTE_ORDER); + bool success = + payload_writer.WriteBytes(quic::kProofSignatureLabel, sizeof(quic::kProofSignatureLabel)) && + payload_writer.WriteUInt32(chlo_hash.size()) && payload_writer.WriteStringPiece(chlo_hash) && + payload_writer.WriteStringPiece(server_config); + if (!success) { + quic::QuicCryptoProof proof; + callback->Run(/*ok=*/false, nullptr, proof, nullptr); + return; + } + + std::string error_details; + bssl::UniquePtr cert = parseDERCertificate(chain->certs[0], &error_details); + if (cert == nullptr) { + ENVOY_LOG(warn, absl::StrCat("Invalid leaf cert: ", error_details)); + quic::QuicCryptoProof proof; + callback->Run(/*ok=*/false, nullptr, proof, nullptr); + return; + } + + bssl::UniquePtr pub_key(X509_get_pubkey(cert.get())); + int sign_alg = deduceSignatureAlgorithmFromPublicKey(pub_key.get(), &error_details); + if (sign_alg == 0) { + ENVOY_LOG(warn, absl::StrCat("Failed to deduce signature algorithm from public key: ", + error_details)); + quic::QuicCryptoProof proof; + callback->Run(/*ok=*/false, nullptr, proof, nullptr); + return; + } + + auto signature_callback = std::make_unique(std::move(callback), chain); + + signPayload(server_address, client_address, hostname, sign_alg, + quiche::QuicheStringPiece(payload.get(), payload_size), + std::move(signature_callback)); +} + +void EnvoyQuicProofSourceBase::ComputeTlsSignature( + const quic::QuicSocketAddress& server_address, const quic::QuicSocketAddress& client_address, + const std::string& hostname, uint16_t signature_algorithm, quiche::QuicheStringPiece in, + std::unique_ptr callback) { + signPayload(server_address, client_address, hostname, signature_algorithm, in, + std::move(callback)); +} + +} // namespace Quic +} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_proof_source_base.h b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source_base.h new file mode 100644 index 0000000000000..149cc50c7d63a --- /dev/null +++ b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source_base.h @@ -0,0 +1,103 @@ +#pragma once + +#include + +#include "common/common/assert.h" + +#include "absl/strings/str_cat.h" + +#pragma GCC diagnostic push + +// QUICHE allows unused parameters. +#pragma GCC diagnostic ignored "-Wunused-parameter" +#include "quiche/quic/core/crypto/proof_source.h" +#include "quiche/quic/core/quic_versions.h" +#include "quiche/quic/core/crypto/crypto_protocol.h" +#include "quiche/quic/platform/api/quic_reference_counted.h" +#include "quiche/quic/platform/api/quic_socket_address.h" +#include "quiche/common/platform/api/quiche_string_piece.h" +#pragma GCC diagnostic pop + +#include "openssl/ssl.h" +#include "envoy/network/filter.h" +#include "server/backtrace.h" +#include "common/common/logger.h" + +namespace Envoy { +namespace Quic { + +// A ProofSource::Detail implementation which retains filter chain. +class EnvoyQuicProofSourceDetails : public quic::ProofSource::Details { +public: + explicit EnvoyQuicProofSourceDetails(const Network::FilterChain& filter_chain) + : filter_chain_(filter_chain) {} + EnvoyQuicProofSourceDetails(const EnvoyQuicProofSourceDetails& other) + : filter_chain_(other.filter_chain_) {} + + const Network::FilterChain& filterChain() const { return filter_chain_; } + +private: + const Network::FilterChain& filter_chain_; +}; + +// A partial implementation of quic::ProofSource which chooses a cipher suite according to the leaf +// cert to sign in GetProof(). +class EnvoyQuicProofSourceBase : public quic::ProofSource, + protected Logger::Loggable { +public: + ~EnvoyQuicProofSourceBase() override = default; + + // quic::ProofSource + // Returns a certs chain and its fake SCT "Fake timestamp" and TLS signature wrapped + // in QuicCryptoProof. + void GetProof(const quic::QuicSocketAddress& server_address, + const quic::QuicSocketAddress& client_address, const std::string& hostname, + const std::string& server_config, quic::QuicTransportVersion /*transport_version*/, + quiche::QuicheStringPiece chlo_hash, + std::unique_ptr callback) override; + + TicketCrypter* GetTicketCrypter() override { return nullptr; } + + void ComputeTlsSignature(const quic::QuicSocketAddress& server_address, + const quic::QuicSocketAddress& client_address, + const std::string& hostname, uint16_t signature_algorithm, + quiche::QuicheStringPiece in, + std::unique_ptr callback) override; + +protected: + virtual void signPayload(const quic::QuicSocketAddress& server_address, + const quic::QuicSocketAddress& client_address, + const std::string& hostname, uint16_t signature_algorithm, + quiche::QuicheStringPiece in, + std::unique_ptr callback) PURE; + +private: + // Used by GetProof() to get signature. + class SignatureCallback : public quic::ProofSource::SignatureCallback { + public: + // TODO(danzh) Pass in Details to retain the certs chain, and quic::ProofSource::Callback to be + // triggered in Run(). + SignatureCallback(std::unique_ptr callback, + quic::QuicReferenceCountedPointer chain) + : callback_(std::move(callback)), chain_(chain) {} + + // quic::ProofSource::SignatureCallback + void Run(bool ok, std::string signature, std::unique_ptr
details) override { + quic::QuicCryptoProof proof; + if (!ok) { + callback_->Run(false, chain_, proof, nullptr); + return; + } + proof.signature = signature; + proof.leaf_cert_scts = "Fake timestamp"; + callback_->Run(true, chain_, proof, std::move(details)); + } + + private: + std::unique_ptr callback_; + quic::QuicReferenceCountedPointer chain_; + }; +}; + +} // namespace Quic +} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier.cc b/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier.cc new file mode 100644 index 0000000000000..b7040d1279d71 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier.cc @@ -0,0 +1,48 @@ +#include "extensions/quic_listeners/quiche/envoy_quic_proof_verifier.h" + +#include "extensions/quic_listeners/quiche/envoy_quic_utils.h" + +#include "quiche/quic/core/crypto/certificate_view.h" + +namespace Envoy { +namespace Quic { + +quic::QuicAsyncStatus EnvoyQuicProofVerifier::VerifyCertChain( + const std::string& hostname, const uint16_t /*port*/, const std::vector& certs, + const std::string& /*ocsp_response*/, const std::string& /*cert_sct*/, + const quic::ProofVerifyContext* /*context*/, std::string* error_details, + std::unique_ptr* /*details*/, + std::unique_ptr /*callback*/) { + ASSERT(!certs.empty()); + bssl::UniquePtr intermediates(sk_X509_new_null()); + bssl::UniquePtr leaf; + for (size_t i = 0; i < certs.size(); i++) { + bssl::UniquePtr cert = parseDERCertificate(certs[i], error_details); + if (!cert) { + return quic::QUIC_FAILURE; + } + if (i == 0) { + leaf = std::move(cert); + } else { + sk_X509_push(intermediates.get(), cert.release()); + } + } + bool success = context_impl_.verifyCertChain(*leaf, *intermediates, *error_details); + if (!success) { + return quic::QUIC_FAILURE; + } + + std::unique_ptr cert_view = + quic::CertificateView::ParseSingleCertificate(certs[0]); + ASSERT(cert_view != nullptr); + for (const absl::string_view config_san : cert_view->subject_alt_name_domains()) { + if (Extensions::TransportSockets::Tls::ContextImpl::dnsNameMatch(hostname, config_san)) { + return quic::QUIC_SUCCESS; + } + } + *error_details = absl::StrCat("Leaf certificate doesn't match hostname: ", hostname); + return quic::QUIC_FAILURE; +} + +} // namespace Quic +} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier.h b/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier.h new file mode 100644 index 0000000000000..a29eb999119f2 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier.h @@ -0,0 +1,30 @@ +#pragma once + +#include "extensions/quic_listeners/quiche/envoy_quic_proof_verifier_base.h" +#include "extensions/transport_sockets/tls/context_impl.h" + +namespace Envoy { +namespace Quic { + +// A quic::ProofVerifier implementation which verifies cert chain using SSL +// client context config. +class EnvoyQuicProofVerifier : public EnvoyQuicProofVerifierBase { +public: + EnvoyQuicProofVerifier(Stats::Scope& scope, const Envoy::Ssl::ClientContextConfig& config, + TimeSource& time_source) + : context_impl_(scope, config, time_source) {} + + // EnvoyQuicProofVerifierBase + quic::QuicAsyncStatus + VerifyCertChain(const std::string& hostname, const uint16_t port, + const std::vector& certs, const std::string& ocsp_response, + const std::string& cert_sct, const quic::ProofVerifyContext* context, + std::string* error_details, std::unique_ptr* details, + std::unique_ptr callback) override; + +private: + Extensions::TransportSockets::Tls::ClientContextImpl context_impl_; +}; + +} // namespace Quic +} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier_base.cc b/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier_base.cc new file mode 100644 index 0000000000000..229b3ab36628b --- /dev/null +++ b/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier_base.cc @@ -0,0 +1,70 @@ +#include "extensions/quic_listeners/quiche/envoy_quic_proof_verifier_base.h" + +#include "extensions/quic_listeners/quiche/envoy_quic_utils.h" + +#include "openssl/ssl.h" +#include "quiche/quic/core/crypto/certificate_view.h" +#include "quiche/quic/core/crypto/crypto_protocol.h" +#include "quiche/quic/core/quic_data_writer.h" + +namespace Envoy { +namespace Quic { + +quic::QuicAsyncStatus EnvoyQuicProofVerifierBase::VerifyProof( + const std::string& hostname, const uint16_t port, const std::string& server_config, + quic::QuicTransportVersion /*quic_version*/, absl::string_view chlo_hash, + const std::vector& certs, const std::string& cert_sct, + const std::string& signature, const quic::ProofVerifyContext* context, + std::string* error_details, std::unique_ptr* details, + std::unique_ptr callback) { + if (certs.empty()) { + *error_details = "Received empty cert chain."; + return quic::QUIC_FAILURE; + } + if (!verifySignature(server_config, chlo_hash, certs[0], signature, error_details)) { + return quic::QUIC_FAILURE; + } + + return VerifyCertChain(hostname, port, certs, "", cert_sct, context, error_details, details, + std::move(callback)); +} + +bool EnvoyQuicProofVerifierBase::verifySignature(const std::string& server_config, + absl::string_view chlo_hash, + const std::string& cert, + const std::string& signature, + std::string* error_details) { + std::unique_ptr cert_view = + quic::CertificateView::ParseSingleCertificate(cert); + if (cert_view == nullptr) { + *error_details = "Invalid leaf cert."; + return false; + } + int sign_alg = deduceSignatureAlgorithmFromPublicKey(cert_view->public_key(), error_details); + if (sign_alg == 0) { + return false; + } + + size_t payload_size = sizeof(quic::kProofSignatureLabel) + sizeof(uint32_t) + chlo_hash.size() + + server_config.size(); + auto payload = std::make_unique(payload_size); + quic::QuicDataWriter payload_writer(payload_size, payload.get(), + quiche::Endianness::HOST_BYTE_ORDER); + bool success = + payload_writer.WriteBytes(quic::kProofSignatureLabel, sizeof(quic::kProofSignatureLabel)) && + payload_writer.WriteUInt32(chlo_hash.size()) && payload_writer.WriteStringPiece(chlo_hash) && + payload_writer.WriteStringPiece(server_config); + if (!success) { + *error_details = "QuicPacketWriter error."; + return false; + } + bool valid = cert_view->VerifySignature(quiche::QuicheStringPiece(payload.get(), payload_size), + signature, sign_alg); + if (!valid) { + *error_details = "Signature is not valid."; + } + return valid; +} + +} // namespace Quic +} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier_base.h b/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier_base.h new file mode 100644 index 0000000000000..02dac5facd42f --- /dev/null +++ b/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier_base.h @@ -0,0 +1,47 @@ +#pragma once + +#include "absl/strings/str_cat.h" + +#pragma GCC diagnostic push + +// QUICHE allows unused parameters. +#pragma GCC diagnostic ignored "-Wunused-parameter" + +#include "quiche/quic/core/crypto/proof_verifier.h" +#include "quiche/quic/core/quic_versions.h" + +#pragma GCC diagnostic pop + +#include "common/common/logger.h" + +namespace Envoy { +namespace Quic { + +// A partial implementation of quic::ProofVerifier which does signature +// verification. +class EnvoyQuicProofVerifierBase : public quic::ProofVerifier, + protected Logger::Loggable { +public: + ~EnvoyQuicProofVerifierBase() override = default; + + // quic::ProofVerifier + // Return success if the certs chain is valid and signature of { + // server_config + chlo_hash} is valid. Otherwise failure. + quic::QuicAsyncStatus + VerifyProof(const std::string& hostname, const uint16_t port, const std::string& server_config, + quic::QuicTransportVersion /*quic_version*/, absl::string_view chlo_hash, + const std::vector& certs, const std::string& cert_sct, + const std::string& signature, const quic::ProofVerifyContext* context, + std::string* error_details, std::unique_ptr* details, + std::unique_ptr callback) override; + + std::unique_ptr CreateDefaultContext() override { return nullptr; } + +protected: + virtual bool verifySignature(const std::string& server_config, absl::string_view chlo_hash, + const std::string& cert, const std::string& signature, + std::string* error_details); +}; + +} // namespace Quic +} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_server_connection.cc b/source/extensions/quic_listeners/quiche/envoy_quic_server_connection.cc index c8a18a45acfb8..b8fa94221f054 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_server_connection.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_server_connection.cc @@ -13,17 +13,14 @@ EnvoyQuicServerConnection::EnvoyQuicServerConnection( const quic::QuicConnectionId& server_connection_id, quic::QuicSocketAddress initial_peer_address, quic::QuicConnectionHelperInterface& helper, quic::QuicAlarmFactory& alarm_factory, quic::QuicPacketWriter* writer, bool owns_writer, - const quic::ParsedQuicVersionVector& supported_versions, - Network::ListenerConfig& listener_config, Server::ListenerStats& listener_stats, - Network::Socket& listen_socket) + const quic::ParsedQuicVersionVector& supported_versions, Network::Socket& listen_socket) : EnvoyQuicConnection(server_connection_id, initial_peer_address, helper, alarm_factory, writer, owns_writer, quic::Perspective::IS_SERVER, supported_versions, std::make_unique( // Wraps the real IoHandle instance so that if the connection socket // gets closed, the real IoHandle won't be affected. std::make_unique(listen_socket.ioHandle()), - nullptr, quicAddressToEnvoyAddressInstance(initial_peer_address))), - listener_config_(listener_config), listener_stats_(listener_stats) {} + nullptr, quicAddressToEnvoyAddressInstance(initial_peer_address))) {} bool EnvoyQuicServerConnection::OnPacketHeader(const quic::QuicPacketHeader& header) { if (!EnvoyQuicConnection::OnPacketHeader(header)) { @@ -33,27 +30,10 @@ bool EnvoyQuicServerConnection::OnPacketHeader(const quic::QuicPacketHeader& hea return true; } ASSERT(self_address().IsInitialized()); - // Self address should be initialized by now. It's time to install filters. + // Self address should be initialized by now. connectionSocket()->setLocalAddress(quicAddressToEnvoyAddressInstance(self_address())); connectionSocket()->setDetectedTransportProtocol( Extensions::TransportSockets::TransportProtocolNames::get().Quic); - ASSERT(filter_chain_ == nullptr); - filter_chain_ = listener_config_.filterChainManager().findFilterChain(*connectionSocket()); - if (filter_chain_ == nullptr) { - listener_stats_.no_filter_chain_match_.inc(); - CloseConnection(quic::QUIC_CRYPTO_INTERNAL_ERROR, - "closing connection: no matching filter chain found for handshake", - quic::ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET); - return false; - } - const bool empty_filter_chain = !listener_config_.filterChainFactory().createNetworkFilterChain( - envoyConnection(), filter_chain_->networkFilterFactories()); - if (empty_filter_chain) { - // TODO(danzh) check empty filter chain at config load time instead of here. - CloseConnection(quic::QUIC_CRYPTO_INTERNAL_ERROR, "closing connection: filter chain is empty", - quic::ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET); - return false; - } return true; } diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_server_connection.h b/source/extensions/quic_listeners/quiche/envoy_quic_server_connection.h index ad46147107505..7b7fac05e9257 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_server_connection.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_server_connection.h @@ -15,19 +15,11 @@ class EnvoyQuicServerConnection : public EnvoyQuicConnection { quic::QuicAlarmFactory& alarm_factory, quic::QuicPacketWriter* writer, bool owns_writer, const quic::ParsedQuicVersionVector& supported_versions, - Network::ListenerConfig& listener_config, - Server::ListenerStats& listener_stats, Network::Socket& listen_socket); + Network::Socket& listen_socket); // EnvoyQuicConnection // Overridden to set connection_socket_ with initialized self address and retrieve filter chain. bool OnPacketHeader(const quic::QuicPacketHeader& header) override; - -private: - Network::ListenerConfig& listener_config_; - Server::ListenerStats& listener_stats_; - // Latched to the corresponding quic FilterChain after connection_socket_ is - // initialized. - const Network::FilterChain* filter_chain_{nullptr}; }; } // namespace Quic diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_server_session.cc b/source/extensions/quic_listeners/quiche/envoy_quic_server_session.cc index 05e76e9ba459f..bc708dea4866f 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_server_session.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_server_session.cc @@ -1,15 +1,10 @@ #include "extensions/quic_listeners/quiche/envoy_quic_server_session.h" -#pragma GCC diagnostic push -// QUICHE allows unused parameters. -#pragma GCC diagnostic ignored "-Wunused-parameter" -// QUICHE uses offsetof(). -#pragma GCC diagnostic ignored "-Winvalid-offsetof" - -#include "quiche/quic/core/quic_crypto_server_stream.h" -#pragma GCC diagnostic pop +#include #include "common/common/assert.h" + +#include "extensions/quic_listeners/quiche/envoy_quic_crypto_server_stream.h" #include "extensions/quic_listeners/quiche/envoy_quic_server_stream.h" namespace Envoy { @@ -20,11 +15,11 @@ EnvoyQuicServerSession::EnvoyQuicServerSession( std::unique_ptr connection, quic::QuicSession::Visitor* visitor, quic::QuicCryptoServerStream::Helper* helper, const quic::QuicCryptoServerConfig* crypto_config, quic::QuicCompressedCertsCache* compressed_certs_cache, Event::Dispatcher& dispatcher, - uint32_t send_buffer_limit) + uint32_t send_buffer_limit, Network::ListenerConfig& listener_config) : quic::QuicServerSessionBase(config, supported_versions, connection.get(), visitor, helper, crypto_config, compressed_certs_cache), QuicFilterManagerConnectionImpl(*connection, dispatcher, send_buffer_limit), - quic_connection_(std::move(connection)) {} + quic_connection_(std::move(connection)), listener_config_(listener_config) {} EnvoyQuicServerSession::~EnvoyQuicServerSession() { ASSERT(!quic_connection_->connected()); @@ -39,8 +34,17 @@ std::unique_ptr EnvoyQuicServerSession::CreateQuicCryptoServerStream( const quic::QuicCryptoServerConfig* crypto_config, quic::QuicCompressedCertsCache* compressed_certs_cache) { - return quic::CreateCryptoServerStream(crypto_config, compressed_certs_cache, this, - stream_helper()); + switch (connection()->version().handshake_protocol) { + case quic::PROTOCOL_QUIC_CRYPTO: + return std::make_unique(crypto_config, compressed_certs_cache, + this, stream_helper()); + case quic::PROTOCOL_TLS1_3: + return std::make_unique(this, *crypto_config); + case quic::PROTOCOL_UNSUPPORTED: + PANIC(fmt::format("Unknown handshake protocol: {}", + static_cast(connection()->version().handshake_protocol))); + } + return nullptr; } quic::QuicSpdyStream* EnvoyQuicServerSession::CreateIncomingStream(quic::QuicStreamId id) { @@ -89,7 +93,13 @@ void EnvoyQuicServerSession::Initialize() { } void EnvoyQuicServerSession::OnCanWrite() { + const uint64_t headers_to_send_old = + quic::VersionUsesHttp3(transport_version()) ? 0u : headers_stream()->BufferedDataBytes(); + quic::QuicServerSessionBase::OnCanWrite(); + const uint64_t headers_to_send_new = + quic::VersionUsesHttp3(transport_version()) ? 0u : headers_stream()->BufferedDataBytes(); + adjustBytesToSend(headers_to_send_new - headers_to_send_old); // Do not update delay close state according to connection level packet egress because that is // equivalent to TCP transport layer egress. But only do so if the session gets chance to write. maybeApplyDelayClosePolicy(); @@ -97,13 +107,33 @@ void EnvoyQuicServerSession::OnCanWrite() { void EnvoyQuicServerSession::SetDefaultEncryptionLevel(quic::EncryptionLevel level) { quic::QuicServerSessionBase::SetDefaultEncryptionLevel(level); - if (level == quic::ENCRYPTION_FORWARD_SECURE) { - // This is only reached once, when handshake is done. - raiseConnectionEvent(Network::ConnectionEvent::Connected); + if (level != quic::ENCRYPTION_FORWARD_SECURE) { + return; } + maybeCreateNetworkFilters(); + // This is only reached once, when handshake is done. + raiseConnectionEvent(Network::ConnectionEvent::Connected); } bool EnvoyQuicServerSession::hasDataToWrite() { return HasDataToWrite(); } +void EnvoyQuicServerSession::OnOneRttKeysAvailable() { + quic::QuicServerSessionBase::OnOneRttKeysAvailable(); + maybeCreateNetworkFilters(); + raiseConnectionEvent(Network::ConnectionEvent::Connected); +} + +void EnvoyQuicServerSession::maybeCreateNetworkFilters() { + const EnvoyQuicProofSourceDetails* proof_source_details = + dynamic_cast(GetCryptoStream())->proofSourceDetails(); + ASSERT(proof_source_details != nullptr, + "ProofSource didn't provide ProofSource::Details. No filter chain will be installed."); + + const bool has_filter_initialized = + listener_config_.filterChainFactory().createNetworkFilterChain( + *this, proof_source_details->filterChain().networkFilterFactories()); + ASSERT(has_filter_initialized); +} + } // namespace Quic } // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_server_session.h b/source/extensions/quic_listeners/quiche/envoy_quic_server_session.h index cbbbfb8c0f372..a50e6fbe8f44c 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_server_session.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_server_session.h @@ -15,6 +15,7 @@ #include "extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.h" #include "extensions/quic_listeners/quiche/envoy_quic_server_stream.h" +#include "extensions/quic_listeners/quiche/envoy_quic_crypto_server_stream.h" namespace Envoy { namespace Quic { @@ -33,7 +34,8 @@ class EnvoyQuicServerSession : public quic::QuicServerSessionBase, quic::QuicCryptoServerStreamBase::Helper* helper, const quic::QuicCryptoServerConfig* crypto_config, quic::QuicCompressedCertsCache* compressed_certs_cache, - Event::Dispatcher& dispatcher, uint32_t send_buffer_limit); + Event::Dispatcher& dispatcher, uint32_t send_buffer_limit, + Network::ListenerConfig& listener_config); ~EnvoyQuicServerSession() override; @@ -50,6 +52,7 @@ class EnvoyQuicServerSession : public quic::QuicServerSessionBase, quic::ConnectionCloseSource source) override; void Initialize() override; void OnCanWrite() override; + void OnOneRttKeysAvailable() override; // quic::QuicSpdySession void SetDefaultEncryptionLevel(quic::EncryptionLevel level) override; @@ -73,8 +76,10 @@ class EnvoyQuicServerSession : public quic::QuicServerSessionBase, private: void setUpRequestDecoder(EnvoyQuicServerStream& stream); + void maybeCreateNetworkFilters(); std::unique_ptr quic_connection_; + Network::ListenerConfig& listener_config_; // These callbacks are owned by network filters and quic session should out live // them. Http::ServerConnectionCallbacks* http_connection_callbacks_{nullptr}; diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.cc b/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.cc index 12d93227bbb20..feda7c2f2a94d 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.cc @@ -64,8 +64,18 @@ void EnvoyQuicServerStream::encodeHeaders(const Http::ResponseHeaderMap& headers // Same vulnerability exists in crypto stream which can infinitely buffer data // if handshake implementation goes wrong. // TODO(#8826) Modify QUICHE to have an upper bound for header stream send buffer. + // This is counting not serialized bytes in the send buffer. + quic::QuicStream* writing_stream = + quic::VersionUsesHttp3(transport_version()) + ? static_cast(this) + : (dynamic_cast(session())->headers_stream()); + const uint64_t bytes_to_send_old = writing_stream->BufferedDataBytes(); + WriteHeaders(envoyHeadersToSpdyHeaderBlock(headers), end_stream, nullptr); local_end_stream_ = end_stream; + const uint64_t bytes_to_send_new = writing_stream->BufferedDataBytes(); + ASSERT(bytes_to_send_old <= bytes_to_send_new); + maybeCheckWatermark(bytes_to_send_old, bytes_to_send_new, *filterManagerConnection()); } void EnvoyQuicServerStream::encodeData(Buffer::Instance& data, bool end_stream) { @@ -73,7 +83,7 @@ void EnvoyQuicServerStream::encodeData(Buffer::Instance& data, bool end_stream) data.length()); local_end_stream_ = end_stream; // This is counting not serialized bytes in the send buffer. - uint64_t bytes_to_send_old = BufferedDataBytes(); + const uint64_t bytes_to_send_old = BufferedDataBytes(); // QUIC stream must take all. WriteBodySlices(quic::QuicMemSliceSpan(quic::QuicMemSliceSpanImpl(data)), end_stream); if (data.length() > 0) { @@ -82,7 +92,7 @@ void EnvoyQuicServerStream::encodeData(Buffer::Instance& data, bool end_stream) return; } - uint64_t bytes_to_send_new = BufferedDataBytes(); + const uint64_t bytes_to_send_new = BufferedDataBytes(); ASSERT(bytes_to_send_old <= bytes_to_send_new); maybeCheckWatermark(bytes_to_send_old, bytes_to_send_new, *filterManagerConnection()); } @@ -91,7 +101,15 @@ void EnvoyQuicServerStream::encodeTrailers(const Http::ResponseTrailerMap& trail ASSERT(!local_end_stream_); local_end_stream_ = true; ENVOY_STREAM_LOG(debug, "encodeTrailers: {}.", *this, trailers); + quic::QuicStream* writing_stream = + quic::VersionUsesHttp3(transport_version()) + ? static_cast(this) + : (dynamic_cast(session())->headers_stream()); + const uint64_t bytes_to_send_old = writing_stream->BufferedDataBytes(); WriteTrailers(envoyHeadersToSpdyHeaderBlock(trailers), nullptr); + const uint64_t bytes_to_send_new = writing_stream->BufferedDataBytes(); + ASSERT(bytes_to_send_old <= bytes_to_send_new); + maybeCheckWatermark(bytes_to_send_old, bytes_to_send_new, *filterManagerConnection()); } void EnvoyQuicServerStream::encodeMetadata(const Http::MetadataMapVector& /*metadata_map_vector*/) { @@ -230,9 +248,9 @@ void EnvoyQuicServerStream::OnClose() { } void EnvoyQuicServerStream::OnCanWrite() { - uint64_t buffered_data_old = BufferedDataBytes(); + const uint64_t buffered_data_old = BufferedDataBytes(); quic::QuicSpdyServerStreamBase::OnCanWrite(); - uint64_t buffered_data_new = BufferedDataBytes(); + const uint64_t buffered_data_new = BufferedDataBytes(); // As long as OnCanWriteNewData() is no-op, data to sent in buffer shouldn't // increase. ASSERT(buffered_data_new <= buffered_data_old); diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.h b/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.h index 59c03e79509a1..a9393a1761ff2 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.h @@ -39,6 +39,9 @@ class EnvoyQuicServerStream : public quic::QuicSpdyServerStreamBase, // Http::Stream void resetStream(Http::StreamResetReason reason) override; + void setFlushTimeout(std::chrono::milliseconds) override { + // TODO(mattklein123): Actually implement this for HTTP/3 similar to HTTP/2. + } // quic::QuicSpdyStream void OnBodyAvailable() override; void OnStreamReset(const quic::QuicRstStreamFrame& frame) override; diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_stream.h b/source/extensions/quic_listeners/quiche/envoy_quic_stream.h index 7171473e322ce..258f212917f1e 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_stream.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_stream.h @@ -72,9 +72,11 @@ class EnvoyQuicStream : public virtual Http::StreamEncoder, void addCallbacks(Http::StreamCallbacks& callbacks) override { ASSERT(!local_end_stream_); - addCallbacks_(callbacks); + addCallbacksHelper(callbacks); + } + void removeCallbacks(Http::StreamCallbacks& callbacks) override { + removeCallbacksHelper(callbacks); } - void removeCallbacks(Http::StreamCallbacks& callbacks) override { removeCallbacks_(callbacks); } uint32_t bufferLimit() override { return send_buffer_simulation_.highWatermark(); } const Network::Address::InstanceConstSharedPtr& connectionLocalAddress() override { return connection()->localAddress(); diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc b/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc index de1cf601c3bce..c7a32fbf317d6 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc @@ -22,39 +22,41 @@ quicAddressToEnvoyAddressInstance(const quic::QuicSocketAddress& quic_address) { : nullptr; } -quic::QuicSocketAddress envoyAddressInstanceToQuicSocketAddress( - const Network::Address::InstanceConstSharedPtr& envoy_address) { - ASSERT(envoy_address != nullptr && envoy_address->type() == Network::Address::Type::Ip); - uint32_t port = envoy_address->ip()->port(); +quic::QuicSocketAddress envoyIpAddressToQuicSocketAddress(const Network::Address::Ip* envoy_ip) { + if (envoy_ip == nullptr) { + // Return uninitialized socket addr + return quic::QuicSocketAddress(); + } + + uint32_t port = envoy_ip->port(); sockaddr_storage ss; - if (envoy_address->ip()->version() == Network::Address::IpVersion::v4) { + + if (envoy_ip->version() == Network::Address::IpVersion::v4) { + // Create and return quic ipv4 address auto ipv4_addr = reinterpret_cast(&ss); memset(ipv4_addr, 0, sizeof(sockaddr_in)); ipv4_addr->sin_family = AF_INET; ipv4_addr->sin_port = htons(port); - ipv4_addr->sin_addr.s_addr = envoy_address->ip()->ipv4()->address(); + ipv4_addr->sin_addr.s_addr = envoy_ip->ipv4()->address(); } else { + // Create and return quic ipv6 address auto ipv6_addr = reinterpret_cast(&ss); memset(ipv6_addr, 0, sizeof(sockaddr_in6)); ipv6_addr->sin6_family = AF_INET6; ipv6_addr->sin6_port = htons(port); ASSERT(sizeof(ipv6_addr->sin6_addr.s6_addr) == 16u); - *reinterpret_cast(ipv6_addr->sin6_addr.s6_addr) = - envoy_address->ip()->ipv6()->address(); + *reinterpret_cast(ipv6_addr->sin6_addr.s6_addr) = envoy_ip->ipv6()->address(); } return quic::QuicSocketAddress(ss); } spdy::SpdyHeaderBlock envoyHeadersToSpdyHeaderBlock(const Http::HeaderMap& headers) { spdy::SpdyHeaderBlock header_block; - headers.iterate( - [](const Http::HeaderEntry& header, void* context) -> Http::HeaderMap::Iterate { - auto spdy_headers = static_cast(context); - // The key-value pairs are copied. - spdy_headers->insert({header.key().getStringView(), header.value().getStringView()}); - return Http::HeaderMap::Iterate::Continue; - }, - &header_block); + headers.iterate([&header_block](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + // The key-value pairs are copied. + header_block.insert({header.key().getStringView(), header.value().getStringView()}); + return Http::HeaderMap::Iterate::Continue; + }); return header_block; } @@ -90,13 +92,21 @@ Http::StreamResetReason quicErrorCodeToEnvoyResetReason(quic::QuicErrorCode erro } } +Http::GoAwayErrorCode quicErrorCodeToEnvoyErrorCode(quic::QuicErrorCode error) noexcept { + switch (error) { + case quic::QUIC_NO_ERROR: + return Http::GoAwayErrorCode::NoError; + default: + return Http::GoAwayErrorCode::Other; + } +} + Network::ConnectionSocketPtr createConnectionSocket(Network::Address::InstanceConstSharedPtr& peer_addr, Network::Address::InstanceConstSharedPtr& local_addr, const Network::ConnectionSocket::OptionsSharedPtr& options) { - Network::IoHandlePtr io_handle = peer_addr->socket(Network::Address::SocketType::Datagram); - auto connection_socket = - std::make_unique(std::move(io_handle), local_addr, peer_addr); + auto connection_socket = std::make_unique( + Network::Socket::Type::Datagram, local_addr, peer_addr); connection_socket->addOptions(Network::SocketOptionFactory::buildIpPacketInfoOptions()); connection_socket->addOptions(Network::SocketOptionFactory::buildRxQueueOverFlowOptions()); if (options != nullptr) { @@ -108,12 +118,9 @@ createConnectionSocket(Network::Address::InstanceConstSharedPtr& peer_addr, ENVOY_LOG_MISC(error, "Fail to apply pre-bind options"); return connection_socket; } - local_addr->bind(connection_socket->ioHandle().fd()); + connection_socket->bind(local_addr); ASSERT(local_addr->ip()); - if (local_addr->ip()->port() == 0) { - // Get ephemeral port number. - local_addr = Network::Address::addressFromFd(connection_socket->ioHandle().fd()); - } + local_addr = connection_socket->localAddress(); if (!Network::Socket::applyOptions(connection_socket->options(), *connection_socket, envoy::config::core::v3::SocketOption::STATE_BOUND)) { ENVOY_LOG_MISC(error, "Fail to apply post-bind options"); @@ -122,5 +129,66 @@ createConnectionSocket(Network::Address::InstanceConstSharedPtr& peer_addr, return connection_socket; } +bssl::UniquePtr parseDERCertificate(const std::string& der_bytes, + std::string* error_details) { + const uint8_t* data; + const uint8_t* orig_data; + orig_data = data = reinterpret_cast(der_bytes.data()); + bssl::UniquePtr cert(d2i_X509(nullptr, &data, der_bytes.size())); + if (!cert.get()) { + *error_details = "d2i_X509: fail to parse DER"; + return nullptr; + } + if (data < orig_data || static_cast(data - orig_data) != der_bytes.size()) { + *error_details = "There is trailing garbage in DER."; + return nullptr; + } + return cert; +} + +int deduceSignatureAlgorithmFromPublicKey(const EVP_PKEY* public_key, std::string* error_details) { + int sign_alg = 0; + const int pkey_id = EVP_PKEY_id(public_key); + switch (pkey_id) { + case EVP_PKEY_EC: { + // We only support P-256 ECDSA today. + const EC_KEY* ecdsa_public_key = EVP_PKEY_get0_EC_KEY(public_key); + // Since we checked the key type above, this should be valid. + ASSERT(ecdsa_public_key != nullptr); + const EC_GROUP* ecdsa_group = EC_KEY_get0_group(ecdsa_public_key); + if (ecdsa_group == nullptr || EC_GROUP_get_curve_name(ecdsa_group) != NID_X9_62_prime256v1) { + *error_details = "Invalid leaf cert, only P-256 ECDSA certificates are supported"; + break; + } + // QUICHE uses SHA-256 as hash function in cert signature. + sign_alg = SSL_SIGN_ECDSA_SECP256R1_SHA256; + } break; + case EVP_PKEY_RSA: { + // We require RSA certificates with 2048-bit or larger keys. + const RSA* rsa_public_key = EVP_PKEY_get0_RSA(public_key); + // Since we checked the key type above, this should be valid. + ASSERT(rsa_public_key != nullptr); + const unsigned rsa_key_length = RSA_size(rsa_public_key); +#ifdef BORINGSSL_FIPS + if (rsa_key_length != 2048 / 8 && rsa_key_length != 3072 / 8) { + *error_details = "Invalid leaf cert, only RSA certificates with 2048-bit or 3072-bit keys " + "are supported in FIPS mode"; + break; + } +#else + if (rsa_key_length < 2048 / 8) { + *error_details = + "Invalid leaf cert, only RSA certificates with 2048-bit or larger keys are supported"; + break; + } +#endif + sign_alg = SSL_SIGN_RSA_PSS_RSAE_SHA256; + } break; + default: + *error_details = "Invalid leaf cert, only RSA and ECDSA certificates are supported"; + } + return sign_alg; +} + } // namespace Quic } // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_utils.h b/source/extensions/quic_listeners/quiche/envoy_quic_utils.h index 3348a1096b5fb..5c321ab749f1d 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_utils.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_utils.h @@ -24,6 +24,8 @@ #include "quiche/quic/platform/api/quic_ip_address.h" #include "quiche/quic/platform/api/quic_socket_address.h" +#include "openssl/ssl.h" + namespace Envoy { namespace Quic { @@ -32,13 +34,12 @@ namespace Quic { Network::Address::InstanceConstSharedPtr quicAddressToEnvoyAddressInstance(const quic::QuicSocketAddress& quic_address); -quic::QuicSocketAddress envoyAddressInstanceToQuicSocketAddress( - const Network::Address::InstanceConstSharedPtr& envoy_address); +quic::QuicSocketAddress envoyIpAddressToQuicSocketAddress(const Network::Address::Ip* envoy_ip); // The returned header map has all keys in lower case. template std::unique_ptr quicHeadersToEnvoyHeaders(const quic::QuicHeaderList& header_list) { - auto headers = std::make_unique(); + auto headers = T::create(); for (const auto& entry : header_list) { // TODO(danzh): Avoid copy by referencing entry as header_list is already validated by QUIC. headers->addCopy(Http::LowerCaseString(entry.first), entry.second); @@ -48,7 +49,7 @@ std::unique_ptr quicHeadersToEnvoyHeaders(const quic::QuicHeaderList& header_ template std::unique_ptr spdyHeaderBlockToEnvoyHeaders(const spdy::SpdyHeaderBlock& header_block) { - auto headers = std::make_unique(); + auto headers = T::create(); for (auto entry : header_block) { // TODO(danzh): Avoid temporary strings and addCopy() with std::string_view. std::string key(entry.first); @@ -69,6 +70,10 @@ Http::StreamResetReason quicRstErrorToEnvoyResetReason(quic::QuicRstStreamErrorC // Called when underlying QUIC connection is closed either locally or by peer. Http::StreamResetReason quicErrorCodeToEnvoyResetReason(quic::QuicErrorCode error); +// Called when a GOAWAY frame is received. +ABSL_MUST_USE_RESULT +Http::GoAwayErrorCode quicErrorCodeToEnvoyErrorCode(quic::QuicErrorCode error) noexcept; + // Create a connection socket instance and apply given socket options to the // socket. IP_PKTINFO and SO_RXQ_OVFL is always set if supported. Network::ConnectionSocketPtr @@ -76,5 +81,14 @@ createConnectionSocket(Network::Address::InstanceConstSharedPtr& peer_addr, Network::Address::InstanceConstSharedPtr& local_addr, const Network::ConnectionSocket::OptionsSharedPtr& options); +// Convert a cert in string form to X509 object. +// Return nullptr if the bytes passed cannot be passed. +bssl::UniquePtr parseDERCertificate(const std::string& der_bytes, std::string* error_details); + +// Deduce the suitable signature algorithm according to the public key. +// Return the sign algorithm id works with the public key; If the public key is +// not supported, return 0 with error_details populated correspondingly. +int deduceSignatureAlgorithmFromPublicKey(const EVP_PKEY* public_key, std::string* error_details); + } // namespace Quic } // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/platform/BUILD b/source/extensions/quic_listeners/quiche/platform/BUILD index 3f5cb5fef47f6..e7f70f86cb264 100644 --- a/source/extensions/quic_listeners/quiche/platform/BUILD +++ b/source/extensions/quic_listeners/quiche/platform/BUILD @@ -1,12 +1,12 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() # Build targets in this package are part of the QUICHE platform implementation. # These implementations are the infrastructure building block for QUIC. They are @@ -104,6 +104,7 @@ envoy_cc_library( deps = [ "//source/common/common:assert_lib", "//source/common/common:stl_helpers", + "//source/common/common:utility_lib", ], ) @@ -187,7 +188,7 @@ envoy_cc_library( "//source/common/common:assert_lib", "//source/common/filesystem:directory_lib", "//source/common/filesystem:filesystem_lib", - "//source/common/http:utility_lib", + "//source/common/http:url_utility_lib", ], ) @@ -228,6 +229,16 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "quic_platform_udp_socket_impl_lib", + hdrs = select({ + "//bazel:linux": ["quic_udp_socket_platform_impl.h"], + "//conditions:default": [], + }), + repository = "@envoy", + tags = ["nofips"], +) + envoy_cc_library( name = "envoy_quic_clock_lib", srcs = ["envoy_quic_clock.cc"], @@ -240,17 +251,27 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "quiche_common_platform_optional_impl_lib", + hdrs = ["quiche_optional_impl.h"], + external_deps = [ + "abseil_node_hash_map", + ], + visibility = ["//visibility:public"], +) + envoy_cc_library( name = "quiche_common_platform_impl_lib", + srcs = ["quiche_time_utils_impl.cc"], hdrs = [ "quiche_arraysize_impl.h", "quiche_logging_impl.h", "quiche_map_util_impl.h", - "quiche_optional_impl.h", "quiche_ptr_util_impl.h", "quiche_str_cat_impl.h", "quiche_string_piece_impl.h", "quiche_text_utils_impl.h", + "quiche_time_utils_impl.h", "quiche_unordered_containers_impl.h", ], external_deps = [ @@ -261,6 +282,7 @@ envoy_cc_library( deps = [ ":quic_platform_logging_impl_lib", ":string_utils_lib", + "@com_googlesource_quiche//:quiche_common_platform_optional", ], ) diff --git a/source/extensions/quic_listeners/quiche/platform/flags_impl.h b/source/extensions/quic_listeners/quiche/platform/flags_impl.h index 22aca0ef995f7..5db9399255105 100644 --- a/source/extensions/quic_listeners/quiche/platform/flags_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/flags_impl.h @@ -91,7 +91,7 @@ template class TypedFlag : public Flag { private: mutable absl::Mutex mutex_; - T value_ GUARDED_BY(mutex_); + T value_ ABSL_GUARDED_BY(mutex_); T default_value_; }; diff --git a/source/extensions/quic_listeners/quiche/platform/flags_list.h b/source/extensions/quic_listeners/quiche/platform/flags_list.h index 2043554da4389..587e80054c0a3 100644 --- a/source/extensions/quic_listeners/quiche/platform/flags_list.h +++ b/source/extensions/quic_listeners/quiche/platform/flags_list.h @@ -13,41 +13,40 @@ #if defined(QUICHE_FLAG) -QUICHE_FLAG(bool, http2_reloadable_flag_http2_add_backend_ping_manager, true, - "If true, SpdyBackendDispatcher will instantiate and use a PeriodicPingManager for " - "handling PING logic.") - QUICHE_FLAG( bool, http2_reloadable_flag_http2_backend_alpn_failure_error_code, false, "If true, the GFE will return a new ResponseCodeDetails error when ALPN to the backend fails.") +QUICHE_FLAG(bool, http2_reloadable_flag_http2_ip_based_cwnd_exp, false, + "If true, enable IP address based CWND bootstrapping experiment with different " + "bandwidth models and priorities in HTTP2.") + QUICHE_FLAG(bool, http2_reloadable_flag_http2_security_requirement_for_client3, false, "If true, check whether client meets security requirements during SSL handshake. If " "flag is true and client does not meet security requirements, do not negotiate HTTP/2 " "with client or terminate the session with SPDY_INADEQUATE_SECURITY if HTTP/2 is " "already negotiated. The spec contains both cipher and TLS version requirements.") -QUICHE_FLAG( - bool, http2_reloadable_flag_http2_skip_querying_entry_buffer_error, true, - "If true, do not query entry_buffer_.error_detected() in HpackDecoder::error_detected().") - -QUICHE_FLAG( - bool, http2_reloadable_flag_http2_support_periodic_ping_manager_cbs, true, - "If true, PeriodicPingManager will invoke user-provided callbacks on receiving PING acks.") - -QUICHE_FLAG( - bool, http2_reloadable_flag_http2_use_settings_rtt_in_ping_manager, true, - "If true along with --gfe2_reloadable_flag_http2_add_backend_ping_manager, SpdyDispatcher will " - "bootstrap its PingManager RTT with the RTT determined from the initial SETTINGS<-->ack.") +QUICHE_FLAG(bool, http2_reloadable_flag_permissive_http2_switch, false, + "If true, the GFE allows both HTTP/1.0 and HTTP/1.1 versions in HTTP/2 upgrade " + "requests/responses.") QUICHE_FLAG(bool, quic_reloadable_flag_advertise_quic_for_https_for_debugips, false, "") QUICHE_FLAG(bool, quic_reloadable_flag_advertise_quic_for_https_for_external_users, false, "") +QUICHE_FLAG(bool, quic_reloadable_flag_gclb_quic_allow_alia, true, + "If gfe2_reloadable_flag_gclb_use_alia is also true, use Alia for GCLB QUIC " + "handshakes. To be used as a big red button if there's a problem with Alia/QUIC.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_ack_delay_alarm_granularity, false, "When true, ensure the ACK delay is never less than the alarm granularity when ACK " "decimation is enabled.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_add_silent_idle_timeout, false, + "If true, when server is silently closing connections due to idle timeout, serialize " + "the connection close packets which will be added to time wait list.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_allow_backend_set_stream_ttl, false, "If true, check backend response header for X-Response-Ttl. If it is provided, the " "stream TTL is set. A QUIC stream will be immediately canceled when tries to write " @@ -59,56 +58,45 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_allow_client_enabled_bbr_v2, true, QUICHE_FLAG(bool, quic_reloadable_flag_quic_alpn_dispatch, false, "Support different QUIC sessions, as indicated by ALPN. Used for QBONE.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_arm_pto_with_earliest_sent_time, true, - "If true, arm the 1st PTO with earliest in flight sent time.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_avoid_overestimate_bandwidth_with_aggregation, true, - "If true, fix QUIC bandwidth sampler to avoid over estimating bandwidth in the " - "presence of ack aggregation.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_avoid_too_low_probe_bw_cwnd, false, + "If true, QUIC BBRv2's PROBE_BW mode will not reduce cwnd below BDP+ack_height.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_add_ack_height_to_queueing_threshold, false, - "If true, QUIC BBRv2 to take ack height into account when calculating " - "queuing_threshold in PROBE_UP.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_fewer_startup_round_trips, false, + "When true, the 1RTT and 2RTT connection options decrease the number of round trips in " + "BBRv2 STARTUP without a 25% bandwidth increase to 1 or 2 round trips respectively.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_avoid_unnecessary_probe_rtt, true, - "If true, QUIC BBRv2 to avoid unnecessary PROBE_RTTs after quiescence.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_ignore_inflight_lo, false, + "When true, QUIC's BBRv2 ignores inflight_lo in PROBE_BW.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_donot_inject_bandwidth, true, - "If true, do not inject bandwidth in BbrSender::AdjustNetworkParameters.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_fix_pacing_rate, true, - "If true, re-calculate pacing rate when cwnd gets bootstrapped.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_improve_adjust_network_parameters, false, + "If true, improve Bbr2Sender::AdjustNetworkParameters by 1) do not inject a bandwidth " + "sample to the bandwidth filter, and 2) re-calculate pacing rate after cwnd updated..") QUICHE_FLAG( - bool, quic_reloadable_flag_quic_bbr_fix_zero_bw_on_loss_only_event, false, - "If true, fix a bug in QUIC BBR where bandwidth estimate becomes 0 after a loss only event.") + bool, quic_reloadable_flag_quic_bbr2_limit_inflight_hi, false, + "When true, the B2HI connection option limits reduction of inflight_hi to (1-Beta)*CWND.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_flexible_app_limited, false, "When true and the BBR9 connection option is present, BBR only considers bandwidth " "samples app-limited if they're not filling the pipe.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_mitigate_overly_large_bandwidth_sample, true, - "If true, when cwnd gets bootstrapped and causing badly overshoot, reset cwnd and " - "pacing rate based on measured bw.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_no_bytes_acked_in_startup_recovery, false, "When in STARTUP and recovery, do not add bytes_acked to QUIC BBR's CWND in " "CalculateCongestionWindow()") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_one_mss_conservation, false, - "When true, ensure BBR allows at least one MSS to be sent in response to an ACK in " - "packet conservation.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bootstrap_cwnd_by_gfe_bandwidth, false, + "If true, bootstrap initial QUIC cwnd by GFE measured bandwidth models.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_startup_rate_reduction, false, - "When true, enables the BBS4 and BBS5 connection options, which reduce BBR's pacing " - "rate in STARTUP as more losses occur as a fraction of CWND.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bootstrap_cwnd_by_spdy_priority, true, + "If true, bootstrap initial QUIC cwnd by SPDY priorities.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bundle_retransmittable_with_pto_ack, true, - "When the EACK connection option is sent by the client, an ack-eliciting frame is " - "bundled with ACKs sent after the PTO fires.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_check_encryption_level_in_fast_path, false, + "If true, when data is sending in fast path mode in the creator, making sure stream " + "data is sent in the right encryption level.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bw_sampler_app_limited_starting_value, false, - "If true, quic::BandwidthSampler will start in application limited phase.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_coalesced_packet_of_higher_space2, false, + "If true, try to coalesce packet of higher space with retransmissions to mitigate RTT " + "inflations.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_conservative_bursts, false, "If true, set burst token to 2 in cwnd bootstrapping experiment.") @@ -116,12 +104,11 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_conservative_bursts, false, QUICHE_FLAG(bool, quic_reloadable_flag_quic_conservative_cwnd_and_pacing_gains, false, "If true, uses conservative cwnd gain and pacing gain when cwnd gets bootstrapped.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_create_incoming_stream_bug, false, - "If true, trigger QUIC_BUG in two ShouldCreateIncomingStream() overrides when called " - "with locally initiated stream ID.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_default_enable_5rto_blackhole_detection2, true, + "If true, default-enable 5RTO blachole detection.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_debug_wrong_qos, false, - "If true, consider getting QoS after stream has been detached as GFE bug.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_default_on_pto, false, + "If true, default on PTO which unifies TLP + RTO loss recovery.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_default_to_bbr, true, "When true, defaults to BBR congestion control instead of Cubic.") @@ -130,85 +117,161 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_default_to_bbr_v2, false, "If true, use BBRv2 as the default congestion controller. Takes precedence over " "--quic_default_to_bbr.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_determine_serialized_packet_fate_early, false, + "If true, determine a serialized packet's fate before the packet gets serialized.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_server_blackhole_detection, false, + "If true, disable blackhole detection on server side.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_draft_25, true, + "If true, disable QUIC version h3-25.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_draft_27, false, + "If true, disable QUIC version h3-27.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_draft_29, false, + "If true, disable QUIC version h3-29.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_q043, false, "If true, disable QUIC version Q043.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_q046, false, "If true, disable QUIC version Q046.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_q048, false, - "If true, disable QUIC version Q048.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_q049, false, - "If true, disable QUIC version Q049.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_q050, false, "If true, disable QUIC version Q050.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_t050, false, + "If true, disable QUIC version h3-T050.") + +QUICHE_FLAG( + bool, quic_reloadable_flag_quic_dispatcher_legacy_version_encapsulation, false, + "When true, QuicDispatcher supports decapsulation of Legacy Version Encapsulation packets.") + QUICHE_FLAG( bool, quic_reloadable_flag_quic_do_not_accept_stop_waiting, false, "In v44 and above, where STOP_WAITING is never sent, close the connection if it's received.") +QUICHE_FLAG( + bool, quic_reloadable_flag_quic_do_not_close_stream_again_on_connection_close, false, + "If true, do not try to close stream again if stream fails to be closed upon connection close.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_do_not_use_stream_map, false, + "If true, QUIC subclasses will no longer directly access stream_map for its content.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_donot_reset_ideal_next_packet_send_time, false, "If true, stop resetting ideal_next_packet_send_time_ in pacing sender.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_ack_decimation, true, - "Default enables QUIC ack decimation and adds a connection option to disable it.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_dont_pad_chlo, true, + "When true, do not pad the QUIC_CRYPTO CHLO message itself. Note that the packet " + "containing the CHLO will still be padded.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_dont_send_max_ack_delay_if_default, true, + "When true, QUIC_CRYPTO versions of QUIC will not send the max ACK delay unless it is " + "configured to a non-default value.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_pcc3, false, - "If true, enable experiment for testing PCC congestion-control.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_loss_detection_experiment_at_gfe, false, + "If ture, enable GFE-picked loss detection experiment.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_draft_25_v3, false, - "If true, enable QUIC version h3-25.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_loss_detection_tuner, false, + "If true, allow QUIC loss detection tuning to be enabled by connection option ELDT.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_draft_27, false, - "If true, enable QUIC version h3-27.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_overshooting_detection, false, + "If true, enable overshooting detection when the DTOS connection option is supplied.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_t050, true, - "If true, enable QUIC version T050.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_t051, false, + "If true, enable QUIC version h3-T051.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_enabled, false, "") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_bbr_cwnd_in_bandwidth_resumption, true, - "If true, adjust congestion window when doing bandwidth resumption in BBR.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_extra_padding_bytes, false, + "If true, consider frame expansion when calculating extra padding bytes to meet " + "minimum plaintext packet size required for header protection.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_ignore_one_write_error_after_mtu_probe, false, - "If true, QUIC connection will ignore one packet write error after MTU probe.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_neuter_handshake_data, false, + "If true, fix a case where data is marked lost in HANDSHAKE level but HANDSHAKE key " + "gets decrypted later.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_listener_never_fake_epollout, false, - "If true, QuicListener::OnSocketIsWritable will always return false, which means there " - "will never be a fake EPOLLOUT event in the next epoll iteration.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_packet_number_length, false, + "If true, take the largest acked packet into account when computing the sent packet " + "number length.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_print_draft_version, false, + "When true, ParsedQuicVersionToString will print IETF drafts with format draft29 " + "instead of ff00001d.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_get_stream_information_from_stream_map, false, + "If true, gQUIC will only consult stream_map in QuicSession::GetNumActiveStreams().") QUICHE_FLAG( - bool, quic_reloadable_flag_quic_minimum_validation_of_coalesced_packets, true, - "If true, only do minimum validation of coalesced packets (only validate connection ID).") + bool, quic_reloadable_flag_quic_http3_goaway_new_behavior, false, + "If true, server accepts GOAWAY (draft-28 behavior), client receiving GOAWAY with stream ID " + "that is not client-initiated bidirectional stream ID closes connection with H3_ID_ERROR " + "(draft-28 behavior). Also, receiving a GOAWAY with ID larger than previously received closes " + "connection with H3_ID_ERROR. If false, server receiving GOAWAY closes connection with " + "H3_FRAME_UNEXPECTED (draft-27 behavior), client receiving GOAWAY with stream ID that is not " + "client-initiated bidirectional stream ID closes connection with PROTOCOL_VIOLATION (draft-04 " + "behavior), larger ID than previously received does not trigger connection close.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_ip_based_cwnd_exp, false, + "If true, enable IP address based CWND bootstrapping experiment with different " + "bandwidth models and priorities. ") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_negotiate_ack_delay_time, false, - "If true, will negotiate the ACK delay time.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_listener_never_fake_epollout, false, + "If true, QuicListener::OnSocketIsWritable will always return false, which means there " + "will never be a fake EPOLLOUT event in the next epoll iteration.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_no_dup_experiment_id_2, false, "If true, transport connection stats doesn't report duplicated experiments for same " "connection.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_populate_mean_rtt_deviation_in_tcs, true, - "If true, populate mean rtt deviation in transport connection stats.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_only_truncate_long_cids, true, + "In IETF QUIC, only truncate long CIDs from the client's Initial, don't modify them.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_proxy_write_packed_strings, false, "If true, QuicProxyDispatcher will write packed_client_address and packed_server_vip " "in TcpProxyHeaderProto.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_record_frontend_service_vip_mapping, false, +QUICHE_FLAG(bool, quic_reloadable_flag_quic_record_frontend_service_vip_mapping, true, "If true, for L1 GFE, as requests come in, record frontend service to VIP mapping " - "which is used to announce VIP in SHLO for proxied sessions.") + "which is used to announce VIP in SHLO for proxied sessions. ") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_record_received_min_ack_delay, false, + "If true, record the received min_ack_delay in transport parameters to QUIC config.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_reject_all_traffic, false, "") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_remove_streams_waiting_for_acks, false, + "If true, QuicSession will no longer need streams_waiting_for_acks_.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_remove_unused_ack_options, false, + "Remove ACK_DECIMATION_WITH_REORDERING mode and fast_ack_after_quiescence option in " + "QUIC received packet manager.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_remove_zombie_streams, false, + "If true, QuicSession doesn't keep a separate zombie_streams. Instead, all streams are " + "stored in stream_map_.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_require_handshake_confirmation, false, "If true, require handshake confirmation for QUIC connections, functionally disabling " "0-rtt handshakes.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_send_ping_when_pto_skips_packet_number, false, - "If true, send PING when PTO skips packet number and there is no data to send.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_retransmit_handshake_data_early, false, + "If true, retransmit unacked handshake data before PTO expiry.") + +QUICHE_FLAG( + bool, quic_reloadable_flag_quic_revert_mtu_after_two_ptos, false, + "If true, QUIC connection will revert to a previously validated MTU(if exists) after two PTOs.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_save_user_agent_in_quic_session, true, + "If true, save user agent into in QuicSession.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_send_early_data_header_to_backend, false, + "If true, for 0RTT IETF QUIC requests, GFE will append a Early-Data header and send it " + "to backend.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_send_path_response, false, + "If true, send PATH_RESPONSE upon receiving PATH_CHALLENGE regardless of perspective.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_send_timestamps, false, "When the STMP connection option is sent by the client, timestamps in the QUIC ACK " @@ -217,8 +280,11 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_send_timestamps, false, QUICHE_FLAG(bool, quic_reloadable_flag_quic_server_push, true, "If true, enable server push feature on QUIC.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_skip_packet_threshold_loss_detection_with_runt, false, - "If true, skip packet threshold loss detection if largest acked is a runt.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_simplify_received_packet_manager_ack, false, + "Simplify the ACK code in quic_received_packet_manager.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_support_handshake_done_in_t050, true, + "If true, support HANDSHAKE_DONE frame in T050.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_testonly_default_false, false, "A testonly reloadable flag that will always default to false.") @@ -226,44 +292,23 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_testonly_default_false, false, QUICHE_FLAG(bool, quic_reloadable_flag_quic_testonly_default_true, true, "A testonly reloadable flag that will always default to true.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_tracegraf_populate_rtt_variation, true, - "If true, QUIC tracegraf populates RTT variation.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_unified_iw_options, false, "When true, set the initial congestion control window from connection options in " "QuicSentPacketManager rather than TcpCubicSenderBytes.") -QUICHE_FLAG( - bool, quic_reloadable_flag_quic_use_ack_frame_to_get_min_size, false, - "If true, use passed in ack_frame to calculate minimum size of the serialized ACK frame.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_update_packet_size, false, + "If true, update packet size when the first frame gets queued.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_half_rtt_as_first_pto, false, + "If true, when TLPR copt is used, enable half RTT as first PTO timeout.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_header_stage_idle_list2, false, "If true, use header stage idle list for QUIC connections in GFE.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_ip_bandwidth_module, true, - "If true, use IpBandwidthModule for cwnd bootstrapping if it is registered.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_leto_key_exchange, false, "If true, QUIC will attempt to use the Leto key exchange service and only fall back to " "local key exchange if that fails.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_pigeon_sockets, false, - "Use USPS Direct Path for QUIC egress.") - -QUICHE_FLAG( - bool, quic_reloadable_flag_quic_use_quic_time_for_received_timestamp2, true, - "If true, use QuicClock::Now() as the source of packet receive time instead of WallNow().") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_standard_deviation_for_pto, true, - "If true, use standard deviation when calculating PTO timeout.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_write_with_transmission, false, - "If true, QuicSession's various write methods will set transmission type.") - -QUICHE_FLAG( - bool, quic_reloadable_flag_quic_writevdata_at_level, true, - "If true, QuicSession::WritevData() will support writing data at a specified encryption level.") - QUICHE_FLAG(bool, quic_reloadable_flag_send_quic_fallback_server_config_on_leto_error, false, "If true and using Leto for QUIC shared-key calculations, GFE will react to a failure " "to contact Leto by sending a REJ containing a fallback ServerConfig, allowing the " @@ -273,19 +318,24 @@ QUICHE_FLAG( bool, quic_restart_flag_dont_fetch_quic_private_keys_from_leto, false, "If true, GFE will not request private keys when fetching QUIC ServerConfigs from Leto.") +QUICHE_FLAG(bool, quic_restart_flag_quic_adjust_initial_cwnd_by_gws, true, + "If true, GFE informs backend that a client request is the first one on the connection " + "via frontline header \"first_request=1\". Also, adjust initial cwnd based on " + "X-Google-Gws-Initial-Cwnd-Mode sent by GWS.") + QUICHE_FLAG( bool, quic_restart_flag_quic_allow_loas_multipacket_chlo, false, "If true, inspects QUIC CHLOs for kLOAS and early creates sessions to allow multi-packet CHLOs") -QUICHE_FLAG(bool, quic_restart_flag_quic_batch_writer_always_drop_packets_on_error, false, - "If true, QUIC (gso|sendmmsg) batch writers will always drop packets on write error.") +QUICHE_FLAG(bool, quic_restart_flag_quic_enable_tls_resumption_v4, false, + "If true, enables support for TLS resumption in QUIC.") -QUICHE_FLAG( - bool, quic_restart_flag_quic_no_cap_net_raw_for_usps_egress, true, - "If true, gfe2::RawSocket::CapabilityNeeded will return false if QUIC egress method is USPS.") +QUICHE_FLAG(bool, quic_restart_flag_quic_enable_zero_rtt_for_tls_v2, false, + "If true, support for IETF QUIC 0-rtt is enabled.") -QUICHE_FLAG(bool, quic_restart_flag_quic_no_fallback_for_pigeon_socket, true, - "If true, GFEs using USPS egress will not fallback to raw ip socket.") +QUICHE_FLAG(bool, quic_restart_flag_quic_google_transport_param_omit_old, true, + "When true, QUIC+TLS will not send nor parse the old-format Google-specific transport " + "parameters.") QUICHE_FLAG(bool, quic_restart_flag_quic_offload_pacing_to_usps2, false, "If true, QUIC offload pacing when using USPS as egress method.") @@ -293,13 +343,13 @@ QUICHE_FLAG(bool, quic_restart_flag_quic_offload_pacing_to_usps2, false, QUICHE_FLAG(bool, quic_restart_flag_quic_rx_ring_use_tpacket_v3, false, "If true, use TPACKET_V3 for QuicRxRing instead of TPACKET_V2.") -QUICHE_FLAG(bool, quic_restart_flag_quic_send_settings_on_write_key_available, false, - "If true, send H3 SETTINGs when 1-RTT write key is available (rather then both keys " - "are available).") - QUICHE_FLAG(bool, quic_restart_flag_quic_should_accept_new_connection, false, "If true, reject QUIC CHLO packets when dispatcher is asked to do so.") +QUICHE_FLAG(bool, quic_restart_flag_quic_support_release_time_for_gso, false, + "If true, QuicGsoBatchWriter will support release time if it is available and the " + "process has the permission to do so.") + QUICHE_FLAG(bool, quic_restart_flag_quic_testonly_default_false, false, "A testonly restart flag that will always default to false.") @@ -314,6 +364,9 @@ QUICHE_FLAG(bool, quic_restart_flag_quic_use_pigeon_socket_to_backend, false, "If true, create a shared pigeon socket for all quic to backend connections and switch " "to use it after successful handshake.") +QUICHE_FLAG(bool, spdy_reloadable_flag_quic_bootstrap_cwnd_by_spdy_priority, true, + "If true, bootstrap initial QUIC cwnd by SPDY priorities.") + QUICHE_FLAG( bool, spdy_reloadable_flag_spdy_discard_response_body_if_disallowed, false, "If true, SPDY will discard all response body bytes when response code indicates no response " @@ -321,10 +374,6 @@ QUICHE_FLAG( "and the rest of the response bytes would still be delivered even though the response code " "said there should not be any body associated with the response code.") -QUICHE_FLAG(bool, spdy_reloadable_flag_spdy_enable_granular_decompress_errors, false, - "If true, emit more granular errors instead of " - "SpdyFramerError::SPDY_DECOMPRESS_FAILURE in Http2DecoderAdapter.") - QUICHE_FLAG(bool, quic_allow_chlo_buffering, true, "If true, allows packets to be buffered in anticipation of a " "future CHLO, and allow CHLO packets to be buffered until next " @@ -387,6 +436,9 @@ QUICHE_FLAG(bool, quic_client_convert_http_header_name_to_lowercase, true, "If true, HTTP request header names sent from QuicSpdyClientBase(and " "descendents) will be automatically converted to lower case.") +QUICHE_FLAG(bool, quic_enable_http3_server_push, false, + "If true, server push will be allowed in QUIC versions that use HTTP/3.") + QUICHE_FLAG(int32_t, quic_bbr2_default_probe_bw_base_duration_ms, 2000, "The default minimum duration for BBRv2-native probes, in milliseconds.") @@ -419,7 +471,7 @@ QUICHE_FLAG(double, quic_ack_aggregation_bandwidth_threshold, 1.0, "bandwidth * this flag), consider the current aggregation completed " "and starts a new one.") -QUICHE_FLAG(int32_t, quic_anti_amplification_factor, 3, +QUICHE_FLAG(int32_t, quic_anti_amplification_factor, 5, "Anti-amplification factor. Before address validation, server will " "send no more than factor times bytes received.") @@ -434,6 +486,9 @@ QUICHE_FLAG(int32_t, quic_max_aggressive_retransmittable_on_wire_ping_count, 0, QUICHE_FLAG(int32_t, quic_max_congestion_window, 2000, "The maximum congestion window in packets.") +QUICHE_FLAG(int32_t, quic_max_streams_window_divisor, 2, + "The divisor that controls how often MAX_STREAMS frame is sent.") + QUICHE_FLAG(bool, http2_reloadable_flag_http2_testonly_default_false, false, "A testonly reloadable flag that will always default to false.") diff --git a/source/extensions/quic_listeners/quiche/platform/quic_containers_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_containers_impl.h index 06d79eb00112c..0e20247707e42 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_containers_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/quic_containers_impl.h @@ -7,6 +7,7 @@ #include #include "absl/container/flat_hash_map.h" +#include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/container/node_hash_map.h" #include "absl/container/node_hash_set.h" @@ -26,6 +27,11 @@ template using QuicDefaultHasherImpl = absl::Hash; template using QuicUnorderedMapImpl = absl::node_hash_map; +template +using QuicHashMapImpl = absl::flat_hash_map; + +template using QuicHashSetImpl = absl::flat_hash_set; + template using QuicUnorderedSetImpl = absl::node_hash_set; template diff --git a/source/extensions/quic_listeners/quiche/platform/quic_hostname_utils_impl.cc b/source/extensions/quic_listeners/quiche/platform/quic_hostname_utils_impl.cc index bcbafb56639ed..7b26dac94e267 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_hostname_utils_impl.cc +++ b/source/extensions/quic_listeners/quiche/platform/quic_hostname_utils_impl.cc @@ -1,3 +1,4 @@ + // NOLINT(namespace-envoy) // This file is part of the QUICHE platform implementation, and is not to be @@ -8,7 +9,7 @@ #include -#include "common/http/utility.h" +#include "common/http/url_utility.h" #include "absl/strings/ascii.h" #include "absl/strings/str_cat.h" diff --git a/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.cc b/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.cc index 60870a742fdd1..ae7caedd69d14 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.cc +++ b/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.cc @@ -8,6 +8,8 @@ #include +#include "common/common/utility.h" + namespace quic { namespace { @@ -25,8 +27,8 @@ QuicLogEmitter::QuicLogEmitter(QuicLogLevel level) : level_(level), saved_errno_ QuicLogEmitter::~QuicLogEmitter() { if (is_perror_) { - // TODO(wub): Change to a thread-safe version of strerror. - stream_ << ": " << strerror(saved_errno_) << " [" << saved_errno_ << "]"; + // TODO(wub): Change to a thread-safe version of errorDetails. + stream_ << ": " << Envoy::errorDetails(saved_errno_) << " [" << saved_errno_ << "]"; } std::string content = stream_.str(); if (!content.empty() && content.back() == '\n') { diff --git a/source/extensions/quic_listeners/quiche/platform/quic_macros_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_macros_impl.h index eb8ce413fb8a7..b8b70a0426b43 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_macros_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/quic_macros_impl.h @@ -10,3 +10,4 @@ #define QUIC_MUST_USE_RESULT_IMPL ABSL_MUST_USE_RESULT #define QUIC_UNUSED_IMPL ABSL_ATTRIBUTE_UNUSED +#define QUIC_CONST_INIT_IMPL ABSL_CONST_INIT diff --git a/source/extensions/quic_listeners/quiche/platform/quic_mem_slice_span_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_mem_slice_span_impl.h index 60917fcd0d7c4..1824fb8d1fa54 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_mem_slice_span_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/quic_mem_slice_span_impl.h @@ -20,7 +20,7 @@ namespace quic { // Wraps a Buffer::Instance and deliver its data with minimum number of copies. class QuicMemSliceSpanImpl { public: - QuicMemSliceSpanImpl() : buffer_(nullptr) {} + QuicMemSliceSpanImpl() = default; /** * @param buffer has to outlive the life time of this class. */ diff --git a/source/extensions/quic_listeners/quiche/platform/quic_mutex_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_mutex_impl.h index 15297625deefc..c3759e47d560f 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_mutex_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/quic_mutex_impl.h @@ -24,27 +24,32 @@ namespace quic { #define QUIC_ASSERT_SHARED_LOCK_IMPL ABSL_ASSERT_SHARED_LOCK // A class wrapping a non-reentrant mutex. -class LOCKABLE QUIC_EXPORT_PRIVATE QuicLockImpl { +class QUIC_LOCKABLE_IMPL QUIC_EXPORT_PRIVATE QuicLockImpl { public: QuicLockImpl() = default; QuicLockImpl(const QuicLockImpl&) = delete; QuicLockImpl& operator=(const QuicLockImpl&) = delete; // Block until mu_ is free, then acquire it exclusively. - void WriterLock() EXCLUSIVE_LOCK_FUNCTION() { mu_.WriterLock(); } + // NOLINTNEXTLINE(readability-identifier-naming) + void WriterLock() QUIC_EXCLUSIVE_LOCK_FUNCTION_IMPL() { mu_.WriterLock(); } // Release mu_. Caller must hold it exclusively. - void WriterUnlock() UNLOCK_FUNCTION() { mu_.WriterUnlock(); } + // NOLINTNEXTLINE(readability-identifier-naming) + void WriterUnlock() QUIC_UNLOCK_FUNCTION_IMPL() { mu_.WriterUnlock(); } // Block until mu_ is free or shared, then acquire a share of it. - void ReaderLock() SHARED_LOCK_FUNCTION() { mu_.ReaderLock(); } + // NOLINTNEXTLINE(readability-identifier-naming) + void ReaderLock() QUIC_SHARED_LOCK_FUNCTION_IMPL() { mu_.ReaderLock(); } // Release mu_. Caller could hold it in shared mode. - void ReaderUnlock() UNLOCK_FUNCTION() { mu_.ReaderUnlock(); } + // NOLINTNEXTLINE(readability-identifier-naming) + void ReaderUnlock() QUIC_UNLOCK_FUNCTION_IMPL() { mu_.ReaderUnlock(); } // Returns immediately if current thread holds mu_ in at least shared // mode. Otherwise, reports an error by crashing with a diagnostic. - void AssertReaderHeld() const ASSERT_SHARED_LOCK() { mu_.AssertReaderHeld(); } + // NOLINTNEXTLINE(readability-identifier-naming) + void AssertReaderHeld() const QUIC_ASSERT_SHARED_LOCK_IMPL() { mu_.AssertReaderHeld(); } private: absl::Mutex mu_; diff --git a/source/extensions/quic_listeners/quiche/platform/quic_udp_socket_platform_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_udp_socket_platform_impl.h new file mode 100644 index 0000000000000..248cfc193e029 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quic_udp_socket_platform_impl.h @@ -0,0 +1,22 @@ +#pragma once + +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include + +namespace quic { + +const size_t kCmsgSpaceForGooglePacketHeaderImpl = 0; + +// NOLINTNEXTLINE(readability-identifier-naming) +inline bool GetGooglePacketHeadersFromControlMessageImpl(struct ::cmsghdr* /*cmsg*/, + char** /*packet_headers*/, + size_t* /*packet_headers_len*/) { + return false; +} + +} // namespace quic diff --git a/source/extensions/quic_listeners/quiche/platform/quiche_optional_impl.h b/source/extensions/quic_listeners/quiche/platform/quiche_optional_impl.h index b5c63d7ec303e..f8b2b6c0800d4 100644 --- a/source/extensions/quic_listeners/quiche/platform/quiche_optional_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/quiche_optional_impl.h @@ -12,6 +12,6 @@ namespace quiche { template using QuicheOptionalImpl = absl::optional; -#define QuicheNullOptImpl absl::nullopt +#define QUICHE_NULLOPT_IMPL absl::nullopt } // namespace quiche diff --git a/source/extensions/quic_listeners/quiche/platform/quiche_text_utils_impl.h b/source/extensions/quic_listeners/quiche/platform/quiche_text_utils_impl.h index c2b117284bbff..3a6d1a393a8b3 100644 --- a/source/extensions/quic_listeners/quiche/platform/quiche_text_utils_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/quiche_text_utils_impl.h @@ -1,5 +1,8 @@ #pragma once +#include "common/common/base64.h" + +#include "extensions/quic_listeners/quiche/platform/quiche_optional_impl.h" #include "extensions/quic_listeners/quiche/platform/quiche_string_piece_impl.h" #include "extensions/quic_listeners/quiche/platform/string_utils.h" @@ -21,58 +24,86 @@ namespace quiche { class QuicheTextUtilsImpl { public: + // NOLINTNEXTLINE(readability-identifier-naming) static bool StartsWith(QuicheStringPieceImpl data, QuicheStringPieceImpl prefix) { return absl::StartsWith(data, prefix); } + // NOLINTNEXTLINE(readability-identifier-naming) + static bool EndsWith(QuicheStringPieceImpl data, QuicheStringPieceImpl suffix) { + return absl::EndsWith(data, suffix); + } + + // NOLINTNEXTLINE(readability-identifier-naming) static bool EndsWithIgnoreCase(QuicheStringPieceImpl data, QuicheStringPieceImpl suffix) { return absl::EndsWithIgnoreCase(data, suffix); } + // NOLINTNEXTLINE(readability-identifier-naming) static std::string ToLower(QuicheStringPieceImpl data) { return absl::AsciiStrToLower(data); } + // NOLINTNEXTLINE(readability-identifier-naming) static void RemoveLeadingAndTrailingWhitespace(QuicheStringPieceImpl* data) { *data = absl::StripAsciiWhitespace(*data); } + // NOLINTNEXTLINE(readability-identifier-naming) static bool StringToUint64(QuicheStringPieceImpl in, uint64_t* out) { return absl::SimpleAtoi(in, out); } + // NOLINTNEXTLINE(readability-identifier-naming) static bool StringToInt(QuicheStringPieceImpl in, int* out) { return absl::SimpleAtoi(in, out); } + // NOLINTNEXTLINE(readability-identifier-naming) static bool StringToUint32(QuicheStringPieceImpl in, uint32_t* out) { return absl::SimpleAtoi(in, out); } + // NOLINTNEXTLINE(readability-identifier-naming) static bool StringToSizeT(QuicheStringPieceImpl in, size_t* out) { return absl::SimpleAtoi(in, out); } + // NOLINTNEXTLINE(readability-identifier-naming) static std::string Uint64ToString(uint64_t in) { return absl::StrCat(in); } + // NOLINTNEXTLINE(readability-identifier-naming) static std::string HexEncode(QuicheStringPieceImpl data) { return absl::BytesToHexString(data); } + // NOLINTNEXTLINE(readability-identifier-naming) static std::string Hex(uint32_t v) { return absl::StrCat(absl::Hex(v)); } + // NOLINTNEXTLINE(readability-identifier-naming) static std::string HexDecode(QuicheStringPieceImpl data) { return absl::HexStringToBytes(data); } + // NOLINTNEXTLINE(readability-identifier-naming) static void Base64Encode(const uint8_t* data, size_t data_len, std::string* output) { - return quiche::Base64Encode(data, data_len, output); + *output = + Envoy::Base64::encode(reinterpret_cast(data), data_len, /*add_padding=*/false); + } + + // NOLINTNEXTLINE(readability-identifier-naming) + static QuicheOptionalImpl Base64Decode(QuicheStringPieceImpl input) { + return Envoy::Base64::decodeWithoutPadding(input); } + // NOLINTNEXTLINE(readability-identifier-naming) static std::string HexDump(QuicheStringPieceImpl binary_data) { return quiche::HexDump(binary_data); } + // NOLINTNEXTLINE(readability-identifier-naming) static bool ContainsUpperCase(QuicheStringPieceImpl data) { return std::any_of(data.begin(), data.end(), absl::ascii_isupper); } + // NOLINTNEXTLINE(readability-identifier-naming) static bool IsAllDigits(QuicheStringPieceImpl data) { return std::all_of(data.begin(), data.end(), absl::ascii_isdigit); } + // NOLINTNEXTLINE(readability-identifier-naming) static std::vector Split(QuicheStringPieceImpl data, char delim) { return absl::StrSplit(data, delim); } diff --git a/source/extensions/quic_listeners/quiche/platform/quiche_time_utils_impl.cc b/source/extensions/quic_listeners/quiche/platform/quiche_time_utils_impl.cc new file mode 100644 index 0000000000000..3260eafee4da0 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quiche_time_utils_impl.cc @@ -0,0 +1,42 @@ +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "extensions/quic_listeners/quiche/platform/quiche_time_utils_impl.h" + +namespace quiche { + +namespace { +QuicheOptional quicheUtcDateTimeToUnixSecondsInner(int year, int month, int day, int hour, + int minute, int second) { + const absl::CivilSecond civil_time(year, month, day, hour, minute, second); + if (second != 60 && (civil_time.year() != year || civil_time.month() != month || + civil_time.day() != day || civil_time.hour() != hour || + civil_time.minute() != minute || civil_time.second() != second)) { + return absl::nullopt; + } + + const absl::Time time = absl::FromCivil(civil_time, absl::UTCTimeZone()); + return absl::ToUnixSeconds(time); +} +} // namespace + +// NOLINTNEXTLINE(readability-identifier-naming) +QuicheOptional QuicheUtcDateTimeToUnixSecondsImpl(int year, int month, int day, int hour, + int minute, int second) { + // Handle leap seconds without letting any other irregularities happen. + if (second == 60) { + auto previous_second = + quicheUtcDateTimeToUnixSecondsInner(year, month, day, hour, minute, second - 1); + if (!previous_second.has_value()) { + return absl::nullopt; + } + return *previous_second + 1; + } + + return quicheUtcDateTimeToUnixSecondsInner(year, month, day, hour, minute, second); +} + +} // namespace quiche diff --git a/source/extensions/quic_listeners/quiche/platform/quiche_time_utils_impl.h b/source/extensions/quic_listeners/quiche/platform/quiche_time_utils_impl.h new file mode 100644 index 0000000000000..a1b70b70a51ea --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quiche_time_utils_impl.h @@ -0,0 +1,21 @@ +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#pragma once + +#include + +#include "absl/time/civil_time.h" +#include "absl/time/time.h" +#include "quiche/common/platform/api/quiche_optional.h" + +namespace quiche { + +// NOLINTNEXTLINE(readability-identifier-naming) +QuicheOptional QuicheUtcDateTimeToUnixSecondsImpl(int year, int month, int day, int hour, + int minute, int second); + +} // namespace quiche diff --git a/source/extensions/quic_listeners/quiche/platform/quiche_unordered_containers_impl.h b/source/extensions/quic_listeners/quiche/platform/quiche_unordered_containers_impl.h index 508efe2ee01f5..f3e4130b01ff7 100644 --- a/source/extensions/quic_listeners/quiche/platform/quiche_unordered_containers_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/quiche_unordered_containers_impl.h @@ -14,7 +14,7 @@ namespace quiche { // The default hasher used by hash tables. template using QuicheDefaultHasherImpl = absl::Hash; -// Similar to std::unordered_map, but with better performance and memory usage. +// Similar to absl::node_hash_map, but with better performance and memory usage. template using QuicheUnorderedMapImpl = absl::node_hash_map; diff --git a/source/extensions/quic_listeners/quiche/platform/spdy_string_utils_impl.h b/source/extensions/quic_listeners/quiche/platform/spdy_string_utils_impl.h index 08884d56b8f80..41fa3cad815fa 100644 --- a/source/extensions/quic_listeners/quiche/platform/spdy_string_utils_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/spdy_string_utils_impl.h @@ -13,32 +13,40 @@ #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "fmt/printf.h" +#include "quiche/common/platform/api/quiche_string_piece.h" namespace spdy { template +// NOLINTNEXTLINE(readability-identifier-naming) inline void SpdyStrAppendImpl(std::string* output, const Args&... args) { absl::StrAppend(output, std::forward(args)...); } +// NOLINTNEXTLINE(readability-identifier-naming) inline char SpdyHexDigitToIntImpl(char c) { return quiche::HexDigitToInt(c); } +// NOLINTNEXTLINE(readability-identifier-naming) inline std::string SpdyHexDecodeImpl(absl::string_view data) { return absl::HexStringToBytes(data); } +// NOLINTNEXTLINE(readability-identifier-naming) inline bool SpdyHexDecodeToUInt32Impl(absl::string_view data, uint32_t* out) { return quiche::HexDecodeToUInt32(data, out); } +// NOLINTNEXTLINE(readability-identifier-naming) inline std::string SpdyHexEncodeImpl(const void* bytes, size_t size) { return absl::BytesToHexString(absl::string_view(static_cast(bytes), size)); } +// NOLINTNEXTLINE(readability-identifier-naming) inline std::string SpdyHexEncodeUInt32AndTrimImpl(uint32_t data) { return absl::StrCat(absl::Hex(data)); } +// NOLINTNEXTLINE(readability-identifier-naming) inline std::string SpdyHexDumpImpl(absl::string_view data) { return quiche::HexDump(data); } struct SpdyStringPieceCaseHashImpl { diff --git a/source/extensions/quic_listeners/quiche/platform/string_utils.cc b/source/extensions/quic_listeners/quiche/platform/string_utils.cc index 85452204d9cac..24ef55bfe94a2 100644 --- a/source/extensions/quic_listeners/quiche/platform/string_utils.cc +++ b/source/extensions/quic_listeners/quiche/platform/string_utils.cc @@ -14,15 +14,10 @@ #include "absl/strings/escaping.h" #include "absl/strings/str_format.h" #include "common/common/assert.h" -#include "common/common/base64.h" namespace quiche { -void Base64Encode(const uint8_t* data, size_t data_len, std::string* output) { - *output = - Envoy::Base64::encode(reinterpret_cast(data), data_len, /*add_padding=*/false); -} - +// NOLINTNEXTLINE(readability-identifier-naming) std::string HexDump(absl::string_view data) { const int kBytesPerLine = 16; const char* buf = data.data(); @@ -56,6 +51,7 @@ std::string HexDump(absl::string_view data) { return out; } +// NOLINTNEXTLINE(readability-identifier-naming) char HexDigitToInt(char c) { ASSERT(std::isxdigit(c)); @@ -71,6 +67,7 @@ char HexDigitToInt(char c) { return 0; } +// NOLINTNEXTLINE(readability-identifier-naming) bool HexDecodeToUInt32(absl::string_view data, uint32_t* out) { if (data.empty() || data.size() > 8u) { return false; @@ -88,7 +85,7 @@ bool HexDecodeToUInt32(absl::string_view data, uint32_t* out) { std::string byte_string = absl::HexStringToBytes(data_padded); - RELEASE_ASSERT(byte_string.size() == 4u, "padded dtat is not 4 byte long."); + RELEASE_ASSERT(byte_string.size() == 4u, "padded data is not 4 byte long."); uint32_t bytes; memcpy(&bytes, byte_string.data(), byte_string.length()); *out = ntohl(bytes); diff --git a/source/extensions/quic_listeners/quiche/platform/string_utils.h b/source/extensions/quic_listeners/quiche/platform/string_utils.h index 43ebe1c066f3b..28441305f2bf3 100644 --- a/source/extensions/quic_listeners/quiche/platform/string_utils.h +++ b/source/extensions/quic_listeners/quiche/platform/string_utils.h @@ -11,15 +11,16 @@ namespace quiche { -void Base64Encode(const uint8_t* data, size_t data_len, std::string* output); - +// NOLINTNEXTLINE(readability-identifier-naming) std::string HexDump(absl::string_view data); // '0' => 0, '1' => 1, 'a' or 'A' => 10, etc. +// NOLINTNEXTLINE(readability-identifier-naming) char HexDigitToInt(char c); // Turns a 8-byte hex string into a uint32 in host byte order. // e.g. "12345678" => 0x12345678 +// NOLINTNEXTLINE(readability-identifier-naming) bool HexDecodeToUInt32(absl::string_view data, uint32_t* out); } // namespace quiche diff --git a/source/extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.cc b/source/extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.cc index 270c7eec91fff..e005a3dd7691c 100644 --- a/source/extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.cc +++ b/source/extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.cc @@ -130,7 +130,13 @@ void QuicFilterManagerConnectionImpl::rawWrite(Buffer::Instance& /*data*/, bool } void QuicFilterManagerConnectionImpl::adjustBytesToSend(int64_t delta) { + const size_t bytes_to_send_old = bytes_to_send_; bytes_to_send_ += delta; + if (delta < 0) { + ASSERT(bytes_to_send_old > bytes_to_send_); + } else { + ASSERT(bytes_to_send_old <= bytes_to_send_); + } write_buffer_watermark_simulation_.checkHighWatermark(bytes_to_send_); write_buffer_watermark_simulation_.checkLowWatermark(bytes_to_send_); } diff --git a/source/extensions/quic_listeners/quiche/quic_io_handle_wrapper.h b/source/extensions/quic_listeners/quiche/quic_io_handle_wrapper.h index 6a468bf867bec..84420816b917c 100644 --- a/source/extensions/quic_listeners/quiche/quic_io_handle_wrapper.h +++ b/source/extensions/quic_listeners/quiche/quic_io_handle_wrapper.h @@ -63,6 +63,32 @@ class QuicIoHandleWrapper : public Network::IoHandle { return io_handle_.recvmmsg(slices, self_port, output); } bool supportsMmsg() const override { return io_handle_.supportsMmsg(); } + bool supportsUdpGro() const override { return io_handle_.supportsUdpGro(); } + Api::SysCallIntResult bind(Network::Address::InstanceConstSharedPtr address) override { + return io_handle_.bind(address); + } + Api::SysCallIntResult listen(int backlog) override { return io_handle_.listen(backlog); } + Api::SysCallIntResult connect(Network::Address::InstanceConstSharedPtr address) override { + return io_handle_.connect(address); + } + Api::SysCallIntResult setOption(int level, int optname, const void* optval, + socklen_t optlen) override { + return io_handle_.setOption(level, optname, optval, optlen); + } + Api::SysCallIntResult getOption(int level, int optname, void* optval, + socklen_t* optlen) override { + return io_handle_.getOption(level, optname, optval, optlen); + } + Api::SysCallIntResult setBlocking(bool blocking) override { + return io_handle_.setBlocking(blocking); + } + absl::optional domain() override { return io_handle_.domain(); } + Network::Address::InstanceConstSharedPtr localAddress() override { + return io_handle_.localAddress(); + } + Network::Address::InstanceConstSharedPtr peerAddress() override { + return io_handle_.peerAddress(); + } private: Network::IoHandle& io_handle_; diff --git a/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.cc b/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.cc index 6d1bf0a156912..17f16d2a82543 100644 --- a/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.cc +++ b/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.cc @@ -1,8 +1,8 @@ #include "extensions/quic_listeners/quiche/quic_transport_socket_factory.h" -#include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" -#include "envoy/extensions/transport_sockets/tls/v3/cert.pb.validate.h" - +// #include "envoy/extensions/transport_sockets/tls/v3/tls.pb.validate.h" +#include "envoy/extensions/transport_sockets/quic/v3/quic_transport.pb.h" +#include "envoy/extensions/transport_sockets/quic/v3/quic_transport.pb.validate.h" #include "extensions/transport_sockets/tls/context_config_impl.h" namespace Envoy { @@ -12,32 +12,33 @@ Network::TransportSocketFactoryPtr QuicServerTransportSocketConfigFactory::createTransportSocketFactory( const Protobuf::Message& config, Server::Configuration::TransportSocketFactoryContext& context, const std::vector& /*server_names*/) { + auto quic_transport = MessageUtil::downcastAndValidate< + const envoy::extensions::transport_sockets::quic::v3::QuicDownstreamTransport&>( + config, context.messageValidationVisitor()); auto server_config = std::make_unique( - MessageUtil::downcastAndValidate< - const envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext&>( - config, context.messageValidationVisitor()), - context); + quic_transport.downstream_tls_context(), context); return std::make_unique(std::move(server_config)); } ProtobufTypes::MessagePtr QuicServerTransportSocketConfigFactory::createEmptyConfigProto() { - return std::make_unique(); + return std::make_unique< + envoy::extensions::transport_sockets::quic::v3::QuicDownstreamTransport>(); } Network::TransportSocketFactoryPtr QuicClientTransportSocketConfigFactory::createTransportSocketFactory( const Protobuf::Message& config, Server::Configuration::TransportSocketFactoryContext& context) { + auto quic_transport = MessageUtil::downcastAndValidate< + const envoy::extensions::transport_sockets::quic::v3::QuicUpstreamTransport&>( + config, context.messageValidationVisitor()); auto client_config = std::make_unique( - MessageUtil::downcastAndValidate< - const envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext&>( - config, context.messageValidationVisitor()), - context); + quic_transport.upstream_tls_context(), context); return std::make_unique(std::move(client_config)); } ProtobufTypes::MessagePtr QuicClientTransportSocketConfigFactory::createEmptyConfigProto() { - return std::make_unique(); + return std::make_unique(); } REGISTER_FACTORY(QuicServerTransportSocketConfigFactory, diff --git a/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.h b/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.h index 009af30083689..2ada9e2de17b4 100644 --- a/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.h +++ b/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.h @@ -1,3 +1,5 @@ +#pragma once + #include "envoy/network/transport_socket.h" #include "envoy/server/transport_socket_config.h" #include "envoy/ssl/context_config.h" diff --git a/source/extensions/quic_listeners/quiche/udp_gso_batch_writer.cc b/source/extensions/quic_listeners/quiche/udp_gso_batch_writer.cc new file mode 100644 index 0000000000000..5525ee285d417 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/udp_gso_batch_writer.cc @@ -0,0 +1,126 @@ +#include "extensions/quic_listeners/quiche/udp_gso_batch_writer.h" + +#include "common/network/io_socket_error_impl.h" + +#include "extensions/quic_listeners/quiche/envoy_quic_utils.h" + +namespace Envoy { +namespace Quic { +namespace { +Api::IoCallUint64Result convertQuicWriteResult(quic::WriteResult quic_result, size_t payload_len) { + switch (quic_result.status) { + case quic::WRITE_STATUS_OK: { + if (quic_result.bytes_written == 0) { + ENVOY_LOG_MISC(trace, "sendmsg successful, message buffered to send"); + } else { + ENVOY_LOG_MISC(trace, "sendmsg successful, flushed bytes {}", quic_result.bytes_written); + } + // Return payload_len as rc & nullptr as error on success + return Api::IoCallUint64Result( + /*rc=*/payload_len, + /*err=*/Api::IoErrorPtr(nullptr, Network::IoSocketError::deleteIoError)); + } + case quic::WRITE_STATUS_BLOCKED_DATA_BUFFERED: { + // Data was buffered, Return payload_len as rc & nullptr as error + ENVOY_LOG_MISC(trace, "sendmsg blocked, message buffered to send"); + return Api::IoCallUint64Result( + /*rc=*/payload_len, + /*err=*/Api::IoErrorPtr(nullptr, Network::IoSocketError::deleteIoError)); + } + case quic::WRITE_STATUS_BLOCKED: { + // Writer blocked, return error + ENVOY_LOG_MISC(trace, "sendmsg blocked, message not buffered"); + return Api::IoCallUint64Result( + /*rc=*/0, + /*err=*/Api::IoErrorPtr(Network::IoSocketError::getIoSocketEagainInstance(), + Network::IoSocketError::deleteIoError)); + } + default: { + // Write Failed, return {0 and error_code} + ENVOY_LOG_MISC(trace, "sendmsg failed with error code {}", + static_cast(quic_result.error_code)); + return Api::IoCallUint64Result( + /*rc=*/0, + /*err=*/Api::IoErrorPtr(new Network::IoSocketError(quic_result.error_code), + Network::IoSocketError::deleteIoError)); + } + } +} + +} // namespace + +// Initialize QuicGsoBatchWriter, set io_handle_ and stats_ +UdpGsoBatchWriter::UdpGsoBatchWriter(Network::IoHandle& io_handle, Stats::Scope& scope) + : quic::QuicGsoBatchWriter(std::make_unique(), io_handle.fd()), + stats_(generateStats(scope)) {} + +// Do Nothing in the Destructor For now +UdpGsoBatchWriter::~UdpGsoBatchWriter() = default; + +Api::IoCallUint64Result +UdpGsoBatchWriter::writePacket(const Buffer::Instance& buffer, const Network::Address::Ip* local_ip, + const Network::Address::Instance& peer_address) { + // Convert received parameters to relevant forms + quic::QuicSocketAddress peer_addr = envoyIpAddressToQuicSocketAddress(peer_address.ip()); + quic::QuicSocketAddress self_addr = envoyIpAddressToQuicSocketAddress(local_ip); + size_t payload_len = static_cast(buffer.length()); + + // TODO(yugant): Currently we do not use PerPacketOptions with Quic, we may want to + // specify this parameter here at a later stage. + quic::WriteResult quic_result = + WritePacket(buffer.toString().c_str(), payload_len, self_addr.host(), peer_addr, + /*quic::PerPacketOptions=*/nullptr); + updateUdpGsoBatchWriterStats(quic_result); + + return convertQuicWriteResult(quic_result, payload_len); +} + +uint64_t UdpGsoBatchWriter::getMaxPacketSize(const Network::Address::Instance& peer_address) const { + quic::QuicSocketAddress peer_addr = envoyIpAddressToQuicSocketAddress(peer_address.ip()); + return static_cast(GetMaxPacketSize(peer_addr)); +} + +Network::UdpPacketWriterBuffer +UdpGsoBatchWriter::getNextWriteLocation(const Network::Address::Ip* local_ip, + const Network::Address::Instance& peer_address) { + quic::QuicSocketAddress peer_addr = envoyIpAddressToQuicSocketAddress(peer_address.ip()); + quic::QuicSocketAddress self_addr = envoyIpAddressToQuicSocketAddress(local_ip); + quic::QuicPacketBuffer quic_buf = GetNextWriteLocation(self_addr.host(), peer_addr); + return Network::UdpPacketWriterBuffer(reinterpret_cast(quic_buf.buffer), + Network::UdpMaxOutgoingPacketSize, quic_buf.release_buffer); +} + +Api::IoCallUint64Result UdpGsoBatchWriter::flush() { + quic::WriteResult quic_result = Flush(); + updateUdpGsoBatchWriterStats(quic_result); + + return convertQuicWriteResult(quic_result, /*payload_len=*/0); +} + +void UdpGsoBatchWriter::updateUdpGsoBatchWriterStats(quic::WriteResult quic_result) { + if (quic_result.status == quic::WRITE_STATUS_OK && quic_result.bytes_written > 0) { + if (gso_size_ > 0u) { + uint64_t num_pkts_in_batch = + std::ceil(static_cast(quic_result.bytes_written) / gso_size_); + stats_.pkts_sent_per_batch_.recordValue(num_pkts_in_batch); + } + stats_.total_bytes_sent_.add(quic_result.bytes_written); + } + stats_.internal_buffer_size_.set(batch_buffer().SizeInUse()); + gso_size_ = buffered_writes().empty() ? 0u : buffered_writes().front().buf_len; +} + +UdpGsoBatchWriterStats UdpGsoBatchWriter::generateStats(Stats::Scope& scope) { + return { + UDP_GSO_BATCH_WRITER_STATS(POOL_COUNTER(scope), POOL_GAUGE(scope), POOL_HISTOGRAM(scope))}; +} + +UdpGsoBatchWriterFactory::UdpGsoBatchWriterFactory() = default; + +Network::UdpPacketWriterPtr +UdpGsoBatchWriterFactory::createUdpPacketWriter(Network::IoHandle& io_handle, Stats::Scope& scope) { + return std::make_unique(io_handle, scope); +} + +} // namespace Quic +} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/udp_gso_batch_writer.h b/source/extensions/quic_listeners/quiche/udp_gso_batch_writer.h new file mode 100644 index 0000000000000..477ad8bdcdc7a --- /dev/null +++ b/source/extensions/quic_listeners/quiche/udp_gso_batch_writer.h @@ -0,0 +1,124 @@ +#pragma once + +#pragma GCC diagnostic push +// QUICHE allows unused parameters. +#pragma GCC diagnostic ignored "-Wunused-parameter" +// QUICHE uses offsetof(). +#pragma GCC diagnostic ignored "-Winvalid-offsetof" +// QUICHE allows ignored qualifiers +#pragma GCC diagnostic ignored "-Wignored-qualifiers" + +// QUICHE doesn't mark override at QuicBatchWriterBase::SupportsReleaseTime() +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Winconsistent-missing-override" +#elif defined(__GNUC__) && __GNUC__ >= 5 +#pragma GCC diagnostic ignored "-Wsuggest-override" +#endif + +#include "quiche/quic/core/batch_writer/quic_gso_batch_writer.h" + +#ifdef __clang__ +#pragma clang diagnostic pop +#endif + +#pragma GCC diagnostic pop + +#include "envoy/network/udp_packet_writer_handler.h" + +#include "common/protobuf/utility.h" +#include "common/runtime/runtime_protos.h" + +namespace Envoy { +namespace Quic { + +/** + * @brief The following can be used to collect statistics + * related to UdpGsoBatchWriter. The stats maintained are + * as follows: + * + * @total_bytes_sent: Maintains the count of total bytes + * sent via the UdpGsoBatchWriter on the current ioHandle + * via both WritePacket() and Flush() functions. + * + * @internal_buffer_size: Gauge value to keep a track of the + * total bytes buffered to writer by UdpGsoBatchWriter. + * Resets whenever the internal bytes are sent to the client. + * + * @pkts_sent_per_batch: Histogram to keep maintain stats of + * total number of packets sent in each batch by UdpGsoBatchWriter + * Provides summary count of batch-sizes within bucketed range, + * and also provides sum and count stats. + * + * TODO(danzh): Add writer stats to QUIC Documentation when it is + * created for QUIC/HTTP3 docs. Also specify in the documentation + * that user has to compile in QUICHE to use UdpGsoBatchWriter. + */ +#define UDP_GSO_BATCH_WRITER_STATS(COUNTER, GAUGE, HISTOGRAM) \ + COUNTER(total_bytes_sent) \ + GAUGE(internal_buffer_size, NeverImport) \ + HISTOGRAM(pkts_sent_per_batch, Unspecified) + +/** + * Wrapper struct for udp gso batch writer stats. @see stats_macros.h + */ +struct UdpGsoBatchWriterStats { + UDP_GSO_BATCH_WRITER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, + GENERATE_HISTOGRAM_STRUCT) +}; + +/** + * UdpPacketWriter implementation based on quic::QuicGsoBatchWriter to send packets + * in batches, using UDP socket's generic segmentation offload(GSO) capability. + */ +class UdpGsoBatchWriter : public quic::QuicGsoBatchWriter, public Network::UdpPacketWriter { +public: + UdpGsoBatchWriter(Network::IoHandle& io_handle, Stats::Scope& scope); + + ~UdpGsoBatchWriter() override; + + // writePacket perform batched sends based on QuicGsoBatchWriter::WritePacket + Api::IoCallUint64Result writePacket(const Buffer::Instance& buffer, + const Network::Address::Ip* local_ip, + const Network::Address::Instance& peer_address) override; + + // UdpPacketWriter Implementations + bool isWriteBlocked() const override { return IsWriteBlocked(); } + void setWritable() override { return SetWritable(); } + bool isBatchMode() const override { return IsBatchMode(); } + uint64_t getMaxPacketSize(const Network::Address::Instance& peer_address) const override; + Network::UdpPacketWriterBuffer + getNextWriteLocation(const Network::Address::Ip* local_ip, + const Network::Address::Instance& peer_address) override; + Api::IoCallUint64Result flush() override; + +private: + /** + * @brief Update stats_ field for the udp packet writer + * @param quic_result is the result from Flush/WritePacket + */ + void updateUdpGsoBatchWriterStats(quic::WriteResult quic_result); + + /** + * @brief Generate UdpGsoBatchWriterStats object from scope + * @param scope for stats + * @return UdpGsoBatchWriterStats for scope + */ + UdpGsoBatchWriterStats generateStats(Stats::Scope& scope); + UdpGsoBatchWriterStats stats_; + uint64_t gso_size_; +}; + +class UdpGsoBatchWriterFactory : public Network::UdpPacketWriterFactory { +public: + UdpGsoBatchWriterFactory(); + + Network::UdpPacketWriterPtr createUdpPacketWriter(Network::IoHandle& io_handle, + Stats::Scope& scope) override; + +private: + envoy::config::core::v3::RuntimeFeatureFlag enabled_; +}; + +} // namespace Quic +} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/udp_gso_batch_writer_config.cc b/source/extensions/quic_listeners/quiche/udp_gso_batch_writer_config.cc new file mode 100644 index 0000000000000..e2428f32ecafa --- /dev/null +++ b/source/extensions/quic_listeners/quiche/udp_gso_batch_writer_config.cc @@ -0,0 +1,30 @@ +#include "extensions/quic_listeners/quiche/udp_gso_batch_writer_config.h" + +#include "envoy/config/listener/v3/udp_gso_batch_writer_config.pb.h" + +#include "common/api/os_sys_calls_impl.h" + +#include "extensions/quic_listeners/quiche/udp_gso_batch_writer.h" + +namespace Envoy { +namespace Quic { + +ProtobufTypes::MessagePtr UdpGsoBatchWriterConfigFactory::createEmptyConfigProto() { + return std::make_unique(); +} + +Network::UdpPacketWriterFactoryPtr +UdpGsoBatchWriterConfigFactory::createUdpPacketWriterFactory(const Protobuf::Message& /*message*/) { + if (!Api::OsSysCallsSingleton::get().supportsUdpGso()) { + throw EnvoyException("Error configuring batch writer on platform without support " + "for UDP GSO. Reset udp_writer_config to default writer"); + } + return std::make_unique(); +} + +std::string UdpGsoBatchWriterConfigFactory::name() const { return GsoBatchWriterName; } + +REGISTER_FACTORY(UdpGsoBatchWriterConfigFactory, Network::UdpPacketWriterConfigFactory); + +} // namespace Quic +} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/udp_gso_batch_writer_config.h b/source/extensions/quic_listeners/quiche/udp_gso_batch_writer_config.h new file mode 100644 index 0000000000000..20c2868088728 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/udp_gso_batch_writer_config.h @@ -0,0 +1,28 @@ +#pragma once + +#include + +#include "envoy/network/udp_packet_writer_config.h" +#include "envoy/registry/registry.h" + +namespace Envoy { +namespace Quic { + +const std::string GsoBatchWriterName{"udp_gso_batch_writer"}; + +// Network::UdpPacketWriterConfigFactory to create UdpGsoBatchWriterFactory based on given +// protobuf. +class UdpGsoBatchWriterConfigFactory : public Network::UdpPacketWriterConfigFactory { +public: + ProtobufTypes::MessagePtr createEmptyConfigProto() override; + + Network::UdpPacketWriterFactoryPtr + createUdpPacketWriterFactory(const Protobuf::Message&) override; + + std::string name() const override; +}; + +DECLARE_FACTORY(UdpGsoBatchWriterConfigFactory); + +} // namespace Quic +} // namespace Envoy diff --git a/source/extensions/resource_monitors/BUILD b/source/extensions/resource_monitors/BUILD index 6156949edef64..40a5e79b39d3b 100644 --- a/source/extensions/resource_monitors/BUILD +++ b/source/extensions/resource_monitors/BUILD @@ -1,16 +1,18 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "well_known_names", hdrs = ["well_known_names.h"], + # well known names files are public as long as they exist. + visibility = ["//visibility:public"], deps = [ "//source/common/singleton:const_singleton", ], diff --git a/source/extensions/resource_monitors/common/BUILD b/source/extensions/resource_monitors/common/BUILD index ff6773aaa8d13..a17f10b5c3780 100644 --- a/source/extensions/resource_monitors/common/BUILD +++ b/source/extensions/resource_monitors/common/BUILD @@ -1,16 +1,18 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "factory_base_lib", hdrs = ["factory_base.h"], + # This resource monitoring library is considered core code. + visibility = ["//visibility:public"], deps = [ "//include/envoy/server:resource_monitor_config_interface", "//source/common/protobuf:utility_lib", diff --git a/source/extensions/resource_monitors/fixed_heap/BUILD b/source/extensions/resource_monitors/fixed_heap/BUILD index e54cfe813179e..6c2022537d3d7 100644 --- a/source/extensions/resource_monitors/fixed_heap/BUILD +++ b/source/extensions/resource_monitors/fixed_heap/BUILD @@ -1,13 +1,13 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "fixed_heap_monitor", diff --git a/source/extensions/resource_monitors/injected_resource/BUILD b/source/extensions/resource_monitors/injected_resource/BUILD index 650d87c69b987..6f1c24318cee1 100644 --- a/source/extensions/resource_monitors/injected_resource/BUILD +++ b/source/extensions/resource_monitors/injected_resource/BUILD @@ -1,13 +1,13 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "injected_resource_monitor", @@ -28,6 +28,11 @@ envoy_cc_extension( hdrs = ["config.h"], security_posture = "data_plane_agnostic", status = "alpha", + # TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/integration:__subpackages__", + ], deps = [ ":injected_resource_monitor", "//include/envoy/registry", diff --git a/source/extensions/retry/host/omit_canary_hosts/BUILD b/source/extensions/retry/host/omit_canary_hosts/BUILD index 39ecd978faafd..9427fa9fc5071 100644 --- a/source/extensions/retry/host/omit_canary_hosts/BUILD +++ b/source/extensions/retry/host/omit_canary_hosts/BUILD @@ -1,13 +1,13 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "omit_canary_hosts_predicate_lib", @@ -26,7 +26,6 @@ envoy_cc_extension( ":omit_canary_hosts_predicate_lib", "//include/envoy/registry", "//include/envoy/upstream:retry_interface", - "//source/extensions/retry/host:well_known_names", "@envoy_api//envoy/config/retry/omit_canary_hosts/v2:pkg_cc_proto", ], ) diff --git a/source/extensions/retry/host/omit_canary_hosts/config.h b/source/extensions/retry/host/omit_canary_hosts/config.h index c34398003b822..d453bc8c85066 100644 --- a/source/extensions/retry/host/omit_canary_hosts/config.h +++ b/source/extensions/retry/host/omit_canary_hosts/config.h @@ -2,7 +2,6 @@ #include "envoy/upstream/retry.h" #include "extensions/retry/host/omit_canary_hosts/omit_canary_hosts.h" -#include "extensions/retry/host/well_known_names.h" namespace Envoy { namespace Extensions { @@ -17,9 +16,7 @@ class OmitCanaryHostsRetryPredicateFactory : public Upstream::RetryHostPredicate return std::make_shared(); } - std::string name() const override { - return RetryHostPredicateValues::get().OmitCanaryHostsPredicate; - } + std::string name() const override { return "envoy.retry_host_predicates.omit_canary_hosts"; } ProtobufTypes::MessagePtr createEmptyConfigProto() override { return std::make_unique< diff --git a/source/extensions/retry/host/omit_host_metadata/BUILD b/source/extensions/retry/host/omit_host_metadata/BUILD index d2a0de1ceac74..5e1aaa38c5af5 100644 --- a/source/extensions/retry/host/omit_host_metadata/BUILD +++ b/source/extensions/retry/host/omit_host_metadata/BUILD @@ -1,13 +1,13 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "omit_host_metadata_predicate_lib", @@ -29,7 +29,6 @@ envoy_cc_extension( "//include/envoy/registry", "//include/envoy/upstream:retry_interface", "//source/common/protobuf", - "//source/extensions/retry/host:well_known_names", "@envoy_api//envoy/extensions/retry/host/omit_host_metadata/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/retry/host/omit_host_metadata/config.h b/source/extensions/retry/host/omit_host_metadata/config.h index a510a0076cae0..85cb0734692d3 100644 --- a/source/extensions/retry/host/omit_host_metadata/config.h +++ b/source/extensions/retry/host/omit_host_metadata/config.h @@ -5,7 +5,6 @@ #include "envoy/upstream/retry.h" #include "extensions/retry/host/omit_host_metadata/omit_host_metadata.h" -#include "extensions/retry/host/well_known_names.h" namespace Envoy { namespace Extensions { @@ -17,9 +16,7 @@ class OmitHostsRetryPredicateFactory : public Upstream::RetryHostPredicateFactor Upstream::RetryHostPredicateSharedPtr createHostPredicate(const Protobuf::Message& config, uint32_t retry_count) override; - std::string name() const override { - return RetryHostPredicateValues::get().OmitHostMetadataPredicate; - } + std::string name() const override { return "envoy.retry_host_predicates.omit_host_metadata"; } ProtobufTypes::MessagePtr createEmptyConfigProto() override { return ProtobufTypes::MessagePtr( diff --git a/source/extensions/retry/host/omit_host_metadata/omit_host_metadata.cc b/source/extensions/retry/host/omit_host_metadata/omit_host_metadata.cc index 1eb21f52f971b..91559571ef2e9 100644 --- a/source/extensions/retry/host/omit_host_metadata/omit_host_metadata.cc +++ b/source/extensions/retry/host/omit_host_metadata/omit_host_metadata.cc @@ -1,7 +1,6 @@ #include "extensions/retry/host/omit_host_metadata/omit_host_metadata.h" #include "common/config/metadata.h" -#include "common/config/well_known_names.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/retry/host/previous_hosts/BUILD b/source/extensions/retry/host/previous_hosts/BUILD index 17ab0e326132f..78e78b1a330e1 100644 --- a/source/extensions/retry/host/previous_hosts/BUILD +++ b/source/extensions/retry/host/previous_hosts/BUILD @@ -1,13 +1,13 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "previous_hosts_predicate_lib", @@ -26,7 +26,6 @@ envoy_cc_extension( ":previous_hosts_predicate_lib", "//include/envoy/registry", "//include/envoy/upstream:retry_interface", - "//source/extensions/retry/host:well_known_names", "@envoy_api//envoy/config/retry/previous_hosts/v2:pkg_cc_proto", ], ) diff --git a/source/extensions/retry/host/previous_hosts/config.h b/source/extensions/retry/host/previous_hosts/config.h index d01261adb8d15..201290d5be994 100644 --- a/source/extensions/retry/host/previous_hosts/config.h +++ b/source/extensions/retry/host/previous_hosts/config.h @@ -4,7 +4,6 @@ #include "envoy/upstream/retry.h" #include "extensions/retry/host/previous_hosts/previous_hosts.h" -#include "extensions/retry/host/well_known_names.h" namespace Envoy { namespace Extensions { @@ -18,9 +17,7 @@ class PreviousHostsRetryPredicateFactory : public Upstream::RetryHostPredicateFa return std::make_shared(retry_count); } - std::string name() const override { - return RetryHostPredicateValues::get().PreviousHostsPredicate; - } + std::string name() const override { return "envoy.retry_host_predicates.previous_hosts"; } ProtobufTypes::MessagePtr createEmptyConfigProto() override { return std::make_unique(); diff --git a/source/extensions/retry/host/well_known_names.h b/source/extensions/retry/host/well_known_names.h deleted file mode 100644 index fc009573c43d1..0000000000000 --- a/source/extensions/retry/host/well_known_names.h +++ /dev/null @@ -1,28 +0,0 @@ -#pragma once - -#include - -#include "common/singleton/const_singleton.h" - -namespace Envoy { -namespace Extensions { -namespace Retry { -namespace Host { - -/** - * Well-known retry host predicate names. - */ -class RetryHostPredicatesNameValues { -public: - // Previous host predicate. Rejects hosts that have already been tried. - const std::string PreviousHostsPredicate = "envoy.retry_host_predicates.previous_hosts"; - const std::string OmitCanaryHostsPredicate = "envoy.retry_host_predicates.omit_canary_hosts"; - const std::string OmitHostMetadataPredicate = "envoy.retry_host_predicates.omit_host_metadata"; -}; - -using RetryHostPredicateValues = ConstSingleton; - -} // namespace Host -} // namespace Retry -} // namespace Extensions -} // namespace Envoy diff --git a/source/extensions/retry/priority/BUILD b/source/extensions/retry/priority/BUILD index 6156949edef64..22d835b407061 100644 --- a/source/extensions/retry/priority/BUILD +++ b/source/extensions/retry/priority/BUILD @@ -1,12 +1,12 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "well_known_names", diff --git a/source/extensions/retry/priority/previous_priorities/BUILD b/source/extensions/retry/priority/previous_priorities/BUILD index 1a545c2509f05..66a592d9c7727 100644 --- a/source/extensions/retry/priority/previous_priorities/BUILD +++ b/source/extensions/retry/priority/previous_priorities/BUILD @@ -1,13 +1,13 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "previous_priorities_lib", diff --git a/source/extensions/retry/priority/previous_priorities/previous_priorities.cc b/source/extensions/retry/priority/previous_priorities/previous_priorities.cc index 7a1ec35d52637..96dc7c540b25f 100644 --- a/source/extensions/retry/priority/previous_priorities/previous_priorities.cc +++ b/source/extensions/retry/priority/previous_priorities/previous_priorities.cc @@ -7,20 +7,24 @@ namespace Priority { const Upstream::HealthyAndDegradedLoad& PreviousPrioritiesRetryPriority::determinePriorityLoad( const Upstream::PrioritySet& priority_set, - const Upstream::HealthyAndDegradedLoad& original_priority_load) { + const Upstream::HealthyAndDegradedLoad& original_priority_load, + const PriorityMappingFunc& priority_mapping_func) { // If we've not seen enough retries to modify the priority load, just // return the original. // If this retry should trigger an update, recalculate the priority load by excluding attempted // priorities. - if (attempted_priorities_.size() < update_frequency_) { + if (attempted_hosts_.size() < update_frequency_) { return original_priority_load; - } else if (attempted_priorities_.size() % update_frequency_ == 0) { + } else if (attempted_hosts_.size() % update_frequency_ == 0) { if (excluded_priorities_.size() < priority_set.hostSetsPerPriority().size()) { excluded_priorities_.resize(priority_set.hostSetsPerPriority().size()); } - for (const auto priority : attempted_priorities_) { - excluded_priorities_[priority] = true; + for (const auto& host : attempted_hosts_) { + absl::optional mapped_host_priority = priority_mapping_func(*host); + if (mapped_host_priority.has_value()) { + excluded_priorities_[mapped_host_priority.value()] = true; + } } if (!adjustForAttemptedPriorities(priority_set)) { @@ -50,7 +54,7 @@ bool PreviousPrioritiesRetryPriority::adjustForAttemptedPriorities( for (auto&& excluded_priority : excluded_priorities_) { excluded_priority = false; } - attempted_priorities_.clear(); + attempted_hosts_.clear(); total_availability = adjustedAvailability(adjusted_per_priority_health, adjusted_per_priority_degraded); } diff --git a/source/extensions/retry/priority/previous_priorities/previous_priorities.h b/source/extensions/retry/priority/previous_priorities/previous_priorities.h index 1723fc49b7c97..05e4f3db37a24 100644 --- a/source/extensions/retry/priority/previous_priorities/previous_priorities.h +++ b/source/extensions/retry/priority/previous_priorities/previous_priorities.h @@ -13,15 +13,16 @@ class PreviousPrioritiesRetryPriority : public Upstream::RetryPriority { public: PreviousPrioritiesRetryPriority(uint32_t update_frequency, uint32_t max_retries) : update_frequency_(update_frequency) { - attempted_priorities_.reserve(max_retries); + attempted_hosts_.reserve(max_retries); } const Upstream::HealthyAndDegradedLoad& determinePriorityLoad(const Upstream::PrioritySet& priority_set, - const Upstream::HealthyAndDegradedLoad& original_priority_load) override; + const Upstream::HealthyAndDegradedLoad& original_priority_load, + const PriorityMappingFunc& priority_mapping_func) override; void onHostAttempted(Upstream::HostDescriptionConstSharedPtr attempted_host) override { - attempted_priorities_.emplace_back(attempted_host->priority()); + attempted_hosts_.emplace_back(attempted_host); } private: @@ -41,7 +42,7 @@ class PreviousPrioritiesRetryPriority : public Upstream::RetryPriority { bool adjustForAttemptedPriorities(const Upstream::PrioritySet& priority_set); const uint32_t update_frequency_; - std::vector attempted_priorities_; + std::vector attempted_hosts_; std::vector excluded_priorities_; Upstream::HealthyAndDegradedLoad per_priority_load_; Upstream::HealthyAvailability per_priority_health_; diff --git a/source/extensions/stat_sinks/BUILD b/source/extensions/stat_sinks/BUILD index 6156949edef64..40a5e79b39d3b 100644 --- a/source/extensions/stat_sinks/BUILD +++ b/source/extensions/stat_sinks/BUILD @@ -1,16 +1,18 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "well_known_names", hdrs = ["well_known_names.h"], + # well known names files are public as long as they exist. + visibility = ["//visibility:public"], deps = [ "//source/common/singleton:const_singleton", ], diff --git a/source/extensions/stat_sinks/common/statsd/BUILD b/source/extensions/stat_sinks/common/statsd/BUILD index 57c0b009c5068..5e3d6a771d21e 100644 --- a/source/extensions/stat_sinks/common/statsd/BUILD +++ b/source/extensions/stat_sinks/common/statsd/BUILD @@ -1,12 +1,12 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "statsd_lib", diff --git a/source/extensions/stat_sinks/common/statsd/statsd.cc b/source/extensions/stat_sinks/common/statsd/statsd.cc index b4676d2901550..d7c1a5099178c 100644 --- a/source/extensions/stat_sinks/common/statsd/statsd.cc +++ b/source/extensions/stat_sinks/common/statsd/statsd.cc @@ -4,6 +4,7 @@ #include #include +#include "envoy/buffer/buffer.h" #include "envoy/common/exception.h" #include "envoy/common/platform.h" #include "envoy/event/dispatcher.h" @@ -11,10 +12,12 @@ #include "envoy/upstream/cluster_manager.h" #include "common/api/os_sys_calls_impl.h" +#include "common/buffer/buffer_impl.h" #include "common/common/assert.h" #include "common/common/fmt.h" #include "common/common/utility.h" #include "common/config/utility.h" +#include "common/network/socket_interface_impl.h" #include "common/network/utility.h" #include "common/stats/symbol_table_impl.h" @@ -27,8 +30,8 @@ namespace Common { namespace Statsd { UdpStatsdSink::WriterImpl::WriterImpl(UdpStatsdSink& parent) - : parent_(parent), - io_handle_(parent_.server_address_->socket(Network::Address::SocketType::Datagram)) {} + : parent_(parent), io_handle_(Network::ioHandleForAddr(Network::Socket::Type::Datagram, + parent_.server_address_)) {} void UdpStatsdSink::WriterImpl::write(const std::string& message) { // TODO(mattklein123): We can avoid this const_cast pattern by having a constant variant of @@ -37,11 +40,16 @@ void UdpStatsdSink::WriterImpl::write(const std::string& message) { Network::Utility::writeToSocket(*io_handle_, &slice, 1, nullptr, *parent_.server_address_); } +void UdpStatsdSink::WriterImpl::writeBuffer(Buffer::Instance& data) { + Network::Utility::writeToSocket(*io_handle_, data, nullptr, *parent_.server_address_); +} + UdpStatsdSink::UdpStatsdSink(ThreadLocal::SlotAllocator& tls, Network::Address::InstanceConstSharedPtr address, const bool use_tag, - const std::string& prefix) + const std::string& prefix, absl::optional buffer_size) : tls_(tls.allocateSlot()), server_address_(std::move(address)), use_tag_(use_tag), - prefix_(prefix.empty() ? Statsd::getDefaultPrefix() : prefix) { + prefix_(prefix.empty() ? Statsd::getDefaultPrefix() : prefix), + buffer_size_(buffer_size.value_or(0)) { tls_->set([this](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr { return std::make_shared(*this); }); @@ -49,22 +57,57 @@ UdpStatsdSink::UdpStatsdSink(ThreadLocal::SlotAllocator& tls, void UdpStatsdSink::flush(Stats::MetricSnapshot& snapshot) { Writer& writer = tls_->getTyped(); + Buffer::OwnedImpl buffer; + for (const auto& counter : snapshot.counters()) { if (counter.counter_.get().used()) { - writer.write(absl::StrCat(prefix_, ".", getName(counter.counter_.get()), ":", counter.delta_, - "|c", buildTagStr(counter.counter_.get().tags()))); + const std::string counter_str = + absl::StrCat(prefix_, ".", getName(counter.counter_.get()), ":", counter.delta_, "|c", + buildTagStr(counter.counter_.get().tags())); + writeBuffer(buffer, writer, counter_str); } } for (const auto& gauge : snapshot.gauges()) { if (gauge.get().used()) { - writer.write(absl::StrCat(prefix_, ".", getName(gauge.get()), ":", gauge.get().value(), "|g", - buildTagStr(gauge.get().tags()))); + const std::string gauge_str = + absl::StrCat(prefix_, ".", getName(gauge.get()), ":", gauge.get().value(), "|g", + buildTagStr(gauge.get().tags())); + writeBuffer(buffer, writer, gauge_str); } } + + flushBuffer(buffer, writer); // TODO(efimki): Add support of text readouts stats. } +void UdpStatsdSink::writeBuffer(Buffer::OwnedImpl& buffer, Writer& writer, + const std::string& statsd_metric) const { + if (statsd_metric.length() >= buffer_size_) { + // Our statsd_metric is too large to fit into the buffer, skip buffering and write directly + writer.write(statsd_metric); + } else { + if ((buffer.length() + statsd_metric.length() + 1) > buffer_size_) { + // If we add the new statsd_metric, we'll overflow our buffer. Flush the buffer to make + // room for the new statsd_metric. + flushBuffer(buffer, writer); + } else if (buffer.length() > 0) { + // We have room and have metrics already in the buffer, add a newline to separate + // metric entries. + buffer.add("\n"); + } + buffer.add(statsd_metric); + } +} + +void UdpStatsdSink::flushBuffer(Buffer::OwnedImpl& buffer, Writer& writer) const { + if (buffer.length() == 0) { + return; + } + writer.writeBuffer(buffer); + buffer.drain(buffer.length()); +} + void UdpStatsdSink::onHistogramComplete(const Stats::Histogram& histogram, uint64_t value) { // For statsd histograms are all timers in milliseconds, Envoy histograms are however // not necessarily timers in milliseconds, for Envoy histograms suffixed with their corresponding diff --git a/source/extensions/stat_sinks/common/statsd/statsd.h b/source/extensions/stat_sinks/common/statsd/statsd.h index 41218ace192d0..b7eb8bfac627c 100644 --- a/source/extensions/stat_sinks/common/statsd/statsd.h +++ b/source/extensions/stat_sinks/common/statsd/statsd.h @@ -1,5 +1,6 @@ #pragma once +#include "envoy/buffer/buffer.h" #include "envoy/common/platform.h" #include "envoy/local_info/local_info.h" #include "envoy/network/connection.h" @@ -15,6 +16,8 @@ #include "common/common/macros.h" #include "common/network/io_socket_handle_impl.h" +#include "absl/types/optional.h" + namespace Envoy { namespace Extensions { namespace StatSinks { @@ -34,15 +37,19 @@ class UdpStatsdSink : public Stats::Sink { class Writer : public ThreadLocal::ThreadLocalObject { public: virtual void write(const std::string& message) PURE; + virtual void writeBuffer(Buffer::Instance& data) PURE; }; UdpStatsdSink(ThreadLocal::SlotAllocator& tls, Network::Address::InstanceConstSharedPtr address, - const bool use_tag, const std::string& prefix = getDefaultPrefix()); + const bool use_tag, const std::string& prefix = getDefaultPrefix(), + absl::optional buffer_size = absl::nullopt); // For testing. UdpStatsdSink(ThreadLocal::SlotAllocator& tls, const std::shared_ptr& writer, - const bool use_tag, const std::string& prefix = getDefaultPrefix()) + const bool use_tag, const std::string& prefix = getDefaultPrefix(), + absl::optional buffer_size = absl::nullopt) : tls_(tls.allocateSlot()), use_tag_(use_tag), - prefix_(prefix.empty() ? getDefaultPrefix() : prefix) { + prefix_(prefix.empty() ? getDefaultPrefix() : prefix), + buffer_size_(buffer_size.value_or(0)) { tls_->set( [writer](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr { return writer; }); } @@ -52,6 +59,7 @@ class UdpStatsdSink : public Stats::Sink { void onHistogramComplete(const Stats::Histogram& histogram, uint64_t value) override; bool getUseTagForTest() { return use_tag_; } + uint64_t getBufferSizeForTest() { return buffer_size_; } const std::string& getPrefix() { return prefix_; } private: @@ -64,12 +72,16 @@ class UdpStatsdSink : public Stats::Sink { // Writer void write(const std::string& message) override; + void writeBuffer(Buffer::Instance& data) override; private: UdpStatsdSink& parent_; const Network::IoHandlePtr io_handle_; }; + void flushBuffer(Buffer::OwnedImpl& buffer, Writer& writer) const; + void writeBuffer(Buffer::OwnedImpl& buffer, Writer& writer, const std::string& data) const; + const std::string getName(const Stats::Metric& metric) const; const std::string buildTagStr(const std::vector& tags) const; @@ -78,6 +90,7 @@ class UdpStatsdSink : public Stats::Sink { const bool use_tag_; // Prefix for all flushed stats. const std::string prefix_; + const uint64_t buffer_size_; }; /** diff --git a/source/extensions/stat_sinks/dog_statsd/BUILD b/source/extensions/stat_sinks/dog_statsd/BUILD index 0c9e5f2995733..662a3c18c24f4 100644 --- a/source/extensions/stat_sinks/dog_statsd/BUILD +++ b/source/extensions/stat_sinks/dog_statsd/BUILD @@ -1,15 +1,15 @@ -licenses(["notice"]) # Apache 2 - -# Stats sink for the DataDog (https://www.datadoghq.com/) variant of the statsd protocol -# (https://docs.datadoghq.com/developers/dogstatsd/). - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# Stats sink for the DataDog (https://www.datadoghq.com/) variant of the statsd protocol +# (https://docs.datadoghq.com/developers/dogstatsd/). + +envoy_extension_package() envoy_cc_extension( name = "config", diff --git a/source/extensions/stat_sinks/dog_statsd/config.cc b/source/extensions/stat_sinks/dog_statsd/config.cc index fecd087b2f7f7..985eb3e255b05 100644 --- a/source/extensions/stat_sinks/dog_statsd/config.cc +++ b/source/extensions/stat_sinks/dog_statsd/config.cc @@ -11,21 +11,28 @@ #include "extensions/stat_sinks/common/statsd/statsd.h" #include "extensions/stat_sinks/well_known_names.h" +#include "absl/types/optional.h" + namespace Envoy { namespace Extensions { namespace StatSinks { namespace DogStatsd { -Stats::SinkPtr DogStatsdSinkFactory::createStatsSink(const Protobuf::Message& config, - Server::Instance& server) { +Stats::SinkPtr +DogStatsdSinkFactory::createStatsSink(const Protobuf::Message& config, + Server::Configuration::ServerFactoryContext& server) { const auto& sink_config = MessageUtil::downcastAndValidate( config, server.messageValidationContext().staticValidationVisitor()); Network::Address::InstanceConstSharedPtr address = Network::Address::resolveProtoAddress(sink_config.address()); ENVOY_LOG(debug, "dog_statsd UDP ip address: {}", address->asString()); + absl::optional max_bytes; + if (sink_config.has_max_bytes_per_datagram()) { + max_bytes = sink_config.max_bytes_per_datagram().value(); + } return std::make_unique(server.threadLocal(), std::move(address), - true, sink_config.prefix()); + true, sink_config.prefix(), max_bytes); } ProtobufTypes::MessagePtr DogStatsdSinkFactory::createEmptyConfigProto() { diff --git a/source/extensions/stat_sinks/dog_statsd/config.h b/source/extensions/stat_sinks/dog_statsd/config.h index 5e9cfdef1cb28..037dd4476eff4 100644 --- a/source/extensions/stat_sinks/dog_statsd/config.h +++ b/source/extensions/stat_sinks/dog_statsd/config.h @@ -16,7 +16,7 @@ class DogStatsdSinkFactory : Logger::Loggable, public Server::Configuration::StatsSinkFactory { public: Stats::SinkPtr createStatsSink(const Protobuf::Message& config, - Server::Instance& server) override; + Server::Configuration::ServerFactoryContext& server) override; ProtobufTypes::MessagePtr createEmptyConfigProto() override; diff --git a/source/extensions/stat_sinks/hystrix/BUILD b/source/extensions/stat_sinks/hystrix/BUILD index d058088df9b2f..7b28f8218c1b9 100644 --- a/source/extensions/stat_sinks/hystrix/BUILD +++ b/source/extensions/stat_sinks/hystrix/BUILD @@ -1,15 +1,15 @@ -licenses(["notice"]) # Apache 2 - -# Stats sink for the basic version of the hystrix protocol (https://github.com/b/hystrix_spec). - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# Stats sink for the basic version of the hystrix protocol (https://github.com/b/hystrix_spec). + +envoy_extension_package() envoy_cc_extension( name = "config", diff --git a/source/extensions/stat_sinks/hystrix/config.cc b/source/extensions/stat_sinks/hystrix/config.cc index e23c4ab050d46..4997231b8be89 100644 --- a/source/extensions/stat_sinks/hystrix/config.cc +++ b/source/extensions/stat_sinks/hystrix/config.cc @@ -16,8 +16,9 @@ namespace Extensions { namespace StatSinks { namespace Hystrix { -Stats::SinkPtr HystrixSinkFactory::createStatsSink(const Protobuf::Message& config, - Server::Instance& server) { +Stats::SinkPtr +HystrixSinkFactory::createStatsSink(const Protobuf::Message& config, + Server::Configuration::ServerFactoryContext& server) { const auto& hystrix_sink = MessageUtil::downcastAndValidate( config, server.messageValidationContext().staticValidationVisitor()); diff --git a/source/extensions/stat_sinks/hystrix/config.h b/source/extensions/stat_sinks/hystrix/config.h index 396cab600254d..cff7ede28a177 100644 --- a/source/extensions/stat_sinks/hystrix/config.h +++ b/source/extensions/stat_sinks/hystrix/config.h @@ -16,7 +16,7 @@ class HystrixSinkFactory : Logger::Loggable, public: // StatsSinkFactory Stats::SinkPtr createStatsSink(const Protobuf::Message& config, - Server::Instance& server) override; + Server::Configuration::ServerFactoryContext& server) override; ProtobufTypes::MessagePtr createEmptyConfigProto() override; diff --git a/source/extensions/stat_sinks/hystrix/hystrix.cc b/source/extensions/stat_sinks/hystrix/hystrix.cc index 0596dd4cda41f..a35f67a8d3f77 100644 --- a/source/extensions/stat_sinks/hystrix/hystrix.cc +++ b/source/extensions/stat_sinks/hystrix/hystrix.cc @@ -22,6 +22,13 @@ namespace Extensions { namespace StatSinks { namespace Hystrix { +Http::RegisterCustomInlineHeader + access_control_allow_origin_handle(Http::CustomHeaders::get().AccessControlAllowOrigin); +Http::RegisterCustomInlineHeader + access_control_allow_headers_handle(Http::CustomHeaders::get().AccessControlAllowHeaders); +Http::RegisterCustomInlineHeader + cache_control_handle(Http::CustomHeaders::get().CacheControl); + const uint64_t HystrixSink::DEFAULT_NUM_BUCKETS; ClusterStatsCache::ClusterStatsCache(const std::string& cluster_name) : cluster_name_(cluster_name) {} @@ -264,9 +271,10 @@ const std::string HystrixSink::printRollingWindows() { return out_str.str(); } -HystrixSink::HystrixSink(Server::Instance& server, const uint64_t num_buckets) +HystrixSink::HystrixSink(Server::Configuration::ServerFactoryContext& server, + const uint64_t num_buckets) : server_(server), current_index_(num_buckets > 0 ? num_buckets : DEFAULT_NUM_BUCKETS), - window_size_(current_index_ + 1), stat_name_pool_(server.stats().symbolTable()), + window_size_(current_index_ + 1), stat_name_pool_(server.scope().symbolTable()), cluster_name_(stat_name_pool_.add(Config::TagNames::get().CLUSTER_NAME)), cluster_upstream_rq_time_(stat_name_pool_.add("cluster.upstream_rq_time")), membership_total_(stat_name_pool_.add("membership_total")), @@ -288,12 +296,13 @@ Http::Code HystrixSink::handlerHystrixEventStream(absl::string_view, Server::AdminStream& admin_stream) { response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.TextEventStream); - response_headers.setReferenceCacheControl(Http::Headers::get().CacheControlValues.NoCache); + response_headers.setReferenceInline(cache_control_handle.handle(), + Http::CustomHeaders::get().CacheControlValues.NoCache); response_headers.setReferenceConnection(Http::Headers::get().ConnectionValues.Close); - response_headers.setReferenceAccessControlAllowHeaders( - AccessControlAllowHeadersValue.AllowHeadersHystrix); - response_headers.setReferenceAccessControlAllowOrigin( - Http::Headers::get().AccessControlAllowOriginValue.All); + response_headers.setReferenceInline(access_control_allow_headers_handle.handle(), + AccessControlAllowHeadersValue.AllowHeadersHystrix); + response_headers.setReferenceInline(access_control_allow_origin_handle.handle(), + Http::CustomHeaders::get().AccessControlAllowOriginValue.All); Http::StreamDecoderFilterCallbacks& stream_decoder_filter_callbacks = admin_stream.getDecoderFilterCallbacks(); @@ -333,14 +342,14 @@ void HystrixSink::flush(Stats::MetricSnapshot& snapshot) { Upstream::ClusterManager::ClusterInfoMap clusters = server_.clusterManager().clusters(); // Save a map of the relevant histograms per cluster in a convenient format. - std::unordered_map time_histograms; + absl::node_hash_map time_histograms; for (const auto& histogram : snapshot.histograms()) { if (histogram.get().tagExtractedStatName() == cluster_upstream_rq_time_) { absl::optional value = Stats::Utility::findTag(histogram.get(), cluster_name_); // Make sure we found the cluster name tag ASSERT(value); - std::string value_str = server_.stats().symbolTable().toString(*value); + std::string value_str = server_.scope().symbolTable().toString(*value); auto it_bool_pair = time_histograms.emplace(std::make_pair(value_str, QuantileLatencyMap())); // Make sure histogram with this name was not already added ASSERT(it_bool_pair.second); @@ -401,7 +410,9 @@ void HystrixSink::flush(Stats::MetricSnapshot& snapshot) { if (clusters.size() < cluster_stats_cache_map_.size()) { for (auto it = cluster_stats_cache_map_.begin(); it != cluster_stats_cache_map_.end();) { if (clusters.find(it->first) == clusters.end()) { - it = cluster_stats_cache_map_.erase(it); + auto next_it = std::next(it); + cluster_stats_cache_map_.erase(it); + it = next_it; } else { ++it; } diff --git a/source/extensions/stat_sinks/hystrix/hystrix.h b/source/extensions/stat_sinks/hystrix/hystrix.h index 08aa4f6b0c7e6..796e72d1f97a5 100644 --- a/source/extensions/stat_sinks/hystrix/hystrix.h +++ b/source/extensions/stat_sinks/hystrix/hystrix.h @@ -19,7 +19,7 @@ namespace Hystrix { using RollingWindow = std::vector; using RollingStatsMap = std::map; -using QuantileLatencyMap = std::unordered_map; +using QuantileLatencyMap = absl::node_hash_map; static const std::vector hystrix_quantiles = {0, 0.25, 0.5, 0.75, 0.90, 0.95, 0.99, 0.995, 1}; @@ -47,7 +47,7 @@ using ClusterStatsCachePtr = std::unique_ptr; class HystrixSink : public Stats::Sink, public Logger::Loggable { public: - HystrixSink(Server::Instance& server, uint64_t num_buckets); + HystrixSink(Server::Configuration::ServerFactoryContext& server, uint64_t num_buckets); Http::Code handlerHystrixEventStream(absl::string_view, Http::ResponseHeaderMap& response_headers, Buffer::Instance&, Server::AdminStream& admin_stream); void flush(Stats::MetricSnapshot& snapshot) override; @@ -149,13 +149,13 @@ class HystrixSink : public Stats::Sink, public Logger::Loggable callbacks_list_; - Server::Instance& server_; + Server::Configuration::ServerFactoryContext& server_; uint64_t current_index_; const uint64_t window_size_; static const uint64_t DEFAULT_NUM_BUCKETS = 10; // Map from cluster names to a struct of all of that cluster's stat windows. - std::unordered_map cluster_stats_cache_map_; + absl::node_hash_map cluster_stats_cache_map_; // Saved StatNames for fast comparisons in loop. // TODO(mattklein123): Many/all of these stats should just be pulled directly from the cluster diff --git a/source/extensions/stat_sinks/metrics_service/BUILD b/source/extensions/stat_sinks/metrics_service/BUILD index c26135a750494..df78d152ba532 100644 --- a/source/extensions/stat_sinks/metrics_service/BUILD +++ b/source/extensions/stat_sinks/metrics_service/BUILD @@ -1,15 +1,15 @@ -licenses(["notice"]) # Apache 2 - -# Stats sink for the gRPC metrics service: api/envoy/service/metrics/v2/metrics_service.proto - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# Stats sink for the gRPC metrics service: api/envoy/service/metrics/v2/metrics_service.proto + +envoy_extension_package() envoy_cc_library( name = "metrics_service_grpc_lib", diff --git a/source/extensions/stat_sinks/metrics_service/config.cc b/source/extensions/stat_sinks/metrics_service/config.cc index 4f8402e201b0e..db1998aefe5bc 100644 --- a/source/extensions/stat_sinks/metrics_service/config.cc +++ b/source/extensions/stat_sinks/metrics_service/config.cc @@ -17,23 +17,27 @@ namespace Extensions { namespace StatSinks { namespace MetricsService { -Stats::SinkPtr MetricsServiceSinkFactory::createStatsSink(const Protobuf::Message& config, - Server::Instance& server) { +Stats::SinkPtr +MetricsServiceSinkFactory::createStatsSink(const Protobuf::Message& config, + Server::Configuration::ServerFactoryContext& server) { validateProtoDescriptors(); const auto& sink_config = MessageUtil::downcastAndValidate( config, server.messageValidationContext().staticValidationVisitor()); const auto& grpc_service = sink_config.grpc_service(); + const auto& transport_api_version = sink_config.transport_api_version(); ENVOY_LOG(debug, "Metrics Service gRPC service configuration: {}", grpc_service.DebugString()); std::shared_ptr grpc_metrics_streamer = std::make_shared( server.clusterManager().grpcAsyncClientManager().factoryForGrpcService( - grpc_service, server.stats(), false), - server.localInfo()); + grpc_service, server.scope(), false), + server.localInfo(), transport_api_version); - return std::make_unique(grpc_metrics_streamer, server.timeSource()); + return std::make_unique( + grpc_metrics_streamer, server.timeSource(), + PROTOBUF_GET_WRAPPED_OR_DEFAULT(sink_config, report_counters_as_deltas, false)); } ProtobufTypes::MessagePtr MetricsServiceSinkFactory::createEmptyConfigProto() { diff --git a/source/extensions/stat_sinks/metrics_service/config.h b/source/extensions/stat_sinks/metrics_service/config.h index 702ea0e97821b..f67eeb2cb538e 100644 --- a/source/extensions/stat_sinks/metrics_service/config.h +++ b/source/extensions/stat_sinks/metrics_service/config.h @@ -17,7 +17,7 @@ class MetricsServiceSinkFactory : Logger::Loggable, public Server::Configuration::StatsSinkFactory { public: Stats::SinkPtr createStatsSink(const Protobuf::Message& config, - Server::Instance& server) override; + Server::Configuration::ServerFactoryContext& server) override; ProtobufTypes::MessagePtr createEmptyConfigProto() override; diff --git a/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.cc b/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.cc index cfea996f40d78..092e3fbe6fcf6 100644 --- a/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.cc +++ b/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.cc @@ -16,37 +16,48 @@ namespace Extensions { namespace StatSinks { namespace MetricsService { -GrpcMetricsStreamerImpl::GrpcMetricsStreamerImpl(Grpc::AsyncClientFactoryPtr&& factory, - const LocalInfo::LocalInfo& local_info) - : client_(factory->create()), local_info_(local_info) {} +GrpcMetricsStreamerImpl::GrpcMetricsStreamerImpl( + Grpc::AsyncClientFactoryPtr&& factory, const LocalInfo::LocalInfo& local_info, + envoy::config::core::v3::ApiVersion transport_api_version) + : client_(factory->create()), local_info_(local_info), + service_method_( + Grpc::VersionedMethods("envoy.service.metrics.v3.MetricsService.StreamMetrics", + "envoy.service.metrics.v2.MetricsService.StreamMetrics") + .getMethodDescriptorForVersion(transport_api_version)), + transport_api_version_(transport_api_version) {} void GrpcMetricsStreamerImpl::send(envoy::service::metrics::v3::StreamMetricsMessage& message) { if (stream_ == nullptr) { - stream_ = client_->start(*Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - "envoy.service.metrics.v2.MetricsService.StreamMetrics"), - *this, Http::AsyncClient::StreamOptions()); + stream_ = client_->start(service_method_, *this, Http::AsyncClient::StreamOptions()); auto* identifier = message.mutable_identifier(); *identifier->mutable_node() = local_info_.node(); } if (stream_ != nullptr) { - stream_->sendMessage(message, false); + stream_->sendMessage(message, transport_api_version_, false); } } MetricsServiceSink::MetricsServiceSink(const GrpcMetricsStreamerSharedPtr& grpc_metrics_streamer, - TimeSource& time_source) - : grpc_metrics_streamer_(grpc_metrics_streamer), time_source_(time_source) {} + TimeSource& time_source, + const bool report_counters_as_deltas) + : grpc_metrics_streamer_(grpc_metrics_streamer), time_source_(time_source), + report_counters_as_deltas_(report_counters_as_deltas) {} -void MetricsServiceSink::flushCounter(const Stats::Counter& counter) { +void MetricsServiceSink::flushCounter( + const Stats::MetricSnapshot::CounterSnapshot& counter_snapshot) { io::prometheus::client::MetricFamily* metrics_family = message_.add_envoy_metrics(); metrics_family->set_type(io::prometheus::client::MetricType::COUNTER); - metrics_family->set_name(counter.name()); + metrics_family->set_name(counter_snapshot.counter_.get().name()); auto* metric = metrics_family->add_metric(); metric->set_timestamp_ms(std::chrono::duration_cast( time_source_.systemTime().time_since_epoch()) .count()); auto* counter_metric = metric->mutable_counter(); - counter_metric->set_value(counter.value()); + if (report_counters_as_deltas_) { + counter_metric->set_value(counter_snapshot.delta_); + } else { + counter_metric->set_value(counter_snapshot.counter_.get().value()); + } } void MetricsServiceSink::flushGauge(const Stats::Gauge& gauge) { @@ -110,7 +121,7 @@ void MetricsServiceSink::flush(Stats::MetricSnapshot& snapshot) { snapshot.histograms().size()); for (const auto& counter : snapshot.counters()) { if (counter.counter_.get().used()) { - flushCounter(counter.counter_.get()); + flushCounter(counter); } } diff --git a/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h b/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h index f8d500a058496..d65bae27f9bb8 100644 --- a/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h +++ b/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h @@ -1,5 +1,7 @@ #pragma once +#include + #include "envoy/grpc/async_client.h" #include "envoy/local_info/local_info.h" #include "envoy/network/connection.h" @@ -50,7 +52,8 @@ using GrpcMetricsStreamerSharedPtr = std::shared_ptr; class GrpcMetricsStreamerImpl : public Singleton::Instance, public GrpcMetricsStreamer { public: GrpcMetricsStreamerImpl(Grpc::AsyncClientFactoryPtr&& factory, - const LocalInfo::LocalInfo& local_info); + const LocalInfo::LocalInfo& local_info, + envoy::config::core::v3::ApiVersion transport_api_version); // GrpcMetricsStreamer void send(envoy::service::metrics::v3::StreamMetricsMessage& message) override; @@ -64,8 +67,12 @@ class GrpcMetricsStreamerImpl : public Singleton::Instance, public GrpcMetricsSt envoy::service::metrics::v3::StreamMetricsResponse> client_; const LocalInfo::LocalInfo& local_info_; + const Protobuf::MethodDescriptor& service_method_; + const envoy::config::core::v3::ApiVersion transport_api_version_; }; +using GrpcMetricsStreamerImplPtr = std::unique_ptr; + /** * Stat Sink implementation of Metrics Service. */ @@ -73,11 +80,11 @@ class MetricsServiceSink : public Stats::Sink { public: // MetricsService::Sink MetricsServiceSink(const GrpcMetricsStreamerSharedPtr& grpc_metrics_streamer, - TimeSource& time_system); + TimeSource& time_system, const bool report_counters_as_deltas); void flush(Stats::MetricSnapshot& snapshot) override; void onHistogramComplete(const Stats::Histogram&, uint64_t) override {} - void flushCounter(const Stats::Counter& counter); + void flushCounter(const Stats::MetricSnapshot::CounterSnapshot& counter_snapshot); void flushGauge(const Stats::Gauge& gauge); void flushHistogram(const Stats::ParentHistogram& envoy_histogram); @@ -85,6 +92,7 @@ class MetricsServiceSink : public Stats::Sink { GrpcMetricsStreamerSharedPtr grpc_metrics_streamer_; envoy::service::metrics::v3::StreamMetricsMessage message_; TimeSource& time_source_; + const bool report_counters_as_deltas_; }; } // namespace MetricsService diff --git a/source/extensions/stat_sinks/statsd/BUILD b/source/extensions/stat_sinks/statsd/BUILD index 5ec22566d12bc..0a8ed4648bca3 100644 --- a/source/extensions/stat_sinks/statsd/BUILD +++ b/source/extensions/stat_sinks/statsd/BUILD @@ -1,20 +1,25 @@ -licenses(["notice"]) # Apache 2 - -# Stats sink for the basic version of the statsd protocol (https://github.com/b/statsd_spec). - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# Stats sink for the basic version of the statsd protocol (https://github.com/b/statsd_spec). + +envoy_extension_package() envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "data_plane_agnostic", + # Legacy test use. TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/server:__subpackages__", + ], deps = [ "//include/envoy/registry", "//source/common/network:address_lib", diff --git a/source/extensions/stat_sinks/statsd/config.cc b/source/extensions/stat_sinks/statsd/config.cc index fa0c1e758e618..3cbea7f511a26 100644 --- a/source/extensions/stat_sinks/statsd/config.cc +++ b/source/extensions/stat_sinks/statsd/config.cc @@ -16,8 +16,9 @@ namespace Extensions { namespace StatSinks { namespace Statsd { -Stats::SinkPtr StatsdSinkFactory::createStatsSink(const Protobuf::Message& config, - Server::Instance& server) { +Stats::SinkPtr +StatsdSinkFactory::createStatsSink(const Protobuf::Message& config, + Server::Configuration::ServerFactoryContext& server) { const auto& statsd_sink = MessageUtil::downcastAndValidate( @@ -34,7 +35,7 @@ Stats::SinkPtr StatsdSinkFactory::createStatsSink(const Protobuf::Message& confi ENVOY_LOG(debug, "statsd TCP cluster: {}", statsd_sink.tcp_cluster_name()); return std::make_unique( server.localInfo(), statsd_sink.tcp_cluster_name(), server.threadLocal(), - server.clusterManager(), server.stats(), statsd_sink.prefix()); + server.clusterManager(), server.scope(), statsd_sink.prefix()); default: // Verified by schema. NOT_REACHED_GCOVR_EXCL_LINE; diff --git a/source/extensions/stat_sinks/statsd/config.h b/source/extensions/stat_sinks/statsd/config.h index 591308a70ef41..3a709715b2bdb 100644 --- a/source/extensions/stat_sinks/statsd/config.h +++ b/source/extensions/stat_sinks/statsd/config.h @@ -17,13 +17,15 @@ class StatsdSinkFactory : Logger::Loggable, public: // StatsSinkFactory Stats::SinkPtr createStatsSink(const Protobuf::Message& config, - Server::Instance& server) override; + Server::Configuration::ServerFactoryContext& server) override; ProtobufTypes::MessagePtr createEmptyConfigProto() override; std::string name() const override; }; +DECLARE_FACTORY(StatsdSinkFactory); + } // namespace Statsd } // namespace StatSinks } // namespace Extensions diff --git a/source/extensions/tracers/BUILD b/source/extensions/tracers/BUILD deleted file mode 100644 index 6156949edef64..0000000000000 --- a/source/extensions/tracers/BUILD +++ /dev/null @@ -1,17 +0,0 @@ -licenses(["notice"]) # Apache 2 - -load( - "//bazel:envoy_build_system.bzl", - "envoy_cc_library", - "envoy_package", -) - -envoy_package() - -envoy_cc_library( - name = "well_known_names", - hdrs = ["well_known_names.h"], - deps = [ - "//source/common/singleton:const_singleton", - ], -) diff --git a/source/extensions/tracers/common/BUILD b/source/extensions/tracers/common/BUILD index 04a67fdad5f46..450aef98b536f 100644 --- a/source/extensions/tracers/common/BUILD +++ b/source/extensions/tracers/common/BUILD @@ -1,12 +1,12 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "factory_base_lib", diff --git a/source/extensions/tracers/common/ot/BUILD b/source/extensions/tracers/common/ot/BUILD index 29dd62e655f8a..beced5b3f219e 100644 --- a/source/extensions/tracers/common/ot/BUILD +++ b/source/extensions/tracers/common/ot/BUILD @@ -1,12 +1,12 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "opentracing_driver_lib", diff --git a/source/extensions/tracers/common/ot/opentracing_driver_impl.cc b/source/extensions/tracers/common/ot/opentracing_driver_impl.cc index cf2f798d9b14e..cad01b83bb833 100644 --- a/source/extensions/tracers/common/ot/opentracing_driver_impl.cc +++ b/source/extensions/tracers/common/ot/opentracing_driver_impl.cc @@ -15,6 +15,9 @@ namespace Tracers { namespace Common { namespace Ot { +Http::RegisterCustomInlineHeader + ot_span_context_handle(Http::CustomHeaders::get().OtSpanContext); + namespace { class OpenTracingHTTPHeadersWriter : public opentracing::HTTPHeadersWriter { public: @@ -45,41 +48,36 @@ class OpenTracingHTTPHeadersReader : public opentracing::HTTPHeadersReader { // opentracing::HTTPHeadersReader opentracing::expected LookupKey(opentracing::string_view key) const override { - const Http::HeaderEntry* entry; - Http::HeaderMap::Lookup lookup_result = - request_headers_.lookup(Http::LowerCaseString{key}, &entry); - switch (lookup_result) { - case Http::HeaderMap::Lookup::Found: + const Http::HeaderEntry* entry = request_headers_.get(Http::LowerCaseString{key}); + if (entry != nullptr) { return opentracing::string_view{entry->value().getStringView().data(), entry->value().getStringView().length()}; - case Http::HeaderMap::Lookup::NotFound: + } else { return opentracing::make_unexpected(opentracing::key_not_found_error); - case Http::HeaderMap::Lookup::NotSupported: - return opentracing::make_unexpected(opentracing::lookup_key_not_supported_error); } - NOT_REACHED_GCOVR_EXCL_LINE; } opentracing::expected ForeachKey(OpenTracingCb f) const override { - request_headers_.iterate(headerMapCallback, static_cast(&f)); + request_headers_.iterate(headerMapCallback(f)); return {}; } private: const Http::RequestHeaderMap& request_headers_; - static Http::HeaderMap::Iterate headerMapCallback(const Http::HeaderEntry& header, - void* context) { - auto* callback = static_cast(context); - opentracing::string_view key{header.key().getStringView().data(), - header.key().getStringView().length()}; - opentracing::string_view value{header.value().getStringView().data(), - header.value().getStringView().length()}; - if ((*callback)(key, value)) { - return Http::HeaderMap::Iterate::Continue; - } else { - return Http::HeaderMap::Iterate::Break; - } + static Http::HeaderMap::ConstIterateCb headerMapCallback(OpenTracingCb callback) { + return [callback = + std::move(callback)](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + opentracing::string_view key{header.key().getStringView().data(), + header.key().getStringView().length()}; + opentracing::string_view value{header.value().getStringView().data(), + header.value().getStringView().length()}; + if (callback(key, value)) { + return Http::HeaderMap::Iterate::Continue; + } else { + return Http::HeaderMap::Iterate::Break; + } + }; } }; } // namespace @@ -104,6 +102,14 @@ void OpenTracingSpan::log(SystemTime timestamp, const std::string& event) { finish_options_.log_records.emplace_back(std::move(record)); } +void OpenTracingSpan::setBaggage(absl::string_view key, absl::string_view value) { + span_->SetBaggageItem({key.data(), key.length()}, {value.data(), value.length()}); +} + +std::string OpenTracingSpan::getBaggage(absl::string_view key) { + return span_->BaggageItem({key.data(), key.length()}); +} + void OpenTracingSpan::injectContext(Http::RequestHeaderMap& request_headers) { if (driver_.propagationMode() == OpenTracingDriver::PropagationMode::SingleHeader) { // Inject the span context using Envoy's single-header format. @@ -116,7 +122,8 @@ void OpenTracingSpan::injectContext(Http::RequestHeaderMap& request_headers) { return; } const std::string current_span_context = oss.str(); - request_headers.setOtSpanContext( + request_headers.setInline( + ot_span_context_handle.handle(), Base64::encode(current_span_context.c_str(), current_span_context.length())); } else { // Inject the context using the tracer's standard HTTP header format. @@ -155,10 +162,11 @@ Tracing::SpanPtr OpenTracingDriver::startSpan(const Tracing::Config& config, const opentracing::Tracer& tracer = this->tracer(); std::unique_ptr active_span; std::unique_ptr parent_span_ctx; - if (propagation_mode == PropagationMode::SingleHeader && request_headers.OtSpanContext()) { + if (propagation_mode == PropagationMode::SingleHeader && + request_headers.getInline(ot_span_context_handle.handle())) { opentracing::expected> parent_span_ctx_maybe; - std::string parent_context = - Base64::decode(std::string(request_headers.OtSpanContext()->value().getStringView())); + std::string parent_context = Base64::decode( + std::string(request_headers.getInlineValue(ot_span_context_handle.handle()))); if (!parent_context.empty()) { InputConstMemoryStream istream{parent_context.data(), parent_context.size()}; diff --git a/source/extensions/tracers/common/ot/opentracing_driver_impl.h b/source/extensions/tracers/common/ot/opentracing_driver_impl.h index d99ad7444dc51..2bfbddfe1886f 100644 --- a/source/extensions/tracers/common/ot/opentracing_driver_impl.h +++ b/source/extensions/tracers/common/ot/opentracing_driver_impl.h @@ -40,6 +40,8 @@ class OpenTracingSpan : public Tracing::Span, Logger::Loggable& tracer, Driver::Driver(const envoy::config::trace::v3::DatadogConfig& datadog_config, Upstream::ClusterManager& cluster_manager, Stats::Scope& scope, - ThreadLocal::SlotAllocator& tls, Runtime::Loader& runtime) + ThreadLocal::SlotAllocator& tls, Runtime::Loader&) : OpenTracingDriver{scope}, cm_(cluster_manager), tracer_stats_{DATADOG_TRACER_STATS( POOL_COUNTER_PREFIX(scope, "tracing.datadog."))}, - tls_(tls.allocateSlot()), runtime_(runtime) { + tls_(tls.allocateSlot()) { - Config::Utility::checkCluster(TracerNames::get().Datadog, datadog_config.collector_cluster(), cm_, + Config::Utility::checkCluster("envoy.tracers.datadog", datadog_config.collector_cluster(), cm_, /* allow_added_via_api */ true); cluster_ = datadog_config.collector_cluster(); // Default tracer options. + tracer_options_.version = absl::StrCat("envoy ", Envoy::VersionInfo::version()); tracer_options_.operation_name_override = "envoy.proxy"; tracer_options_.service = "envoy"; tracer_options_.inject = std::set{ diff --git a/source/extensions/tracers/datadog/datadog_tracer_impl.h b/source/extensions/tracers/datadog/datadog_tracer_impl.h index 5cdb482543bcf..b3dc01d6a7cfc 100644 --- a/source/extensions/tracers/datadog/datadog_tracer_impl.h +++ b/source/extensions/tracers/datadog/datadog_tracer_impl.h @@ -52,9 +52,7 @@ class Driver : public Common::Ot::OpenTracingDriver { // Getters to return the DatadogDriver's key members. Upstream::ClusterManager& clusterManager() { return cm_; } const std::string& cluster() { return cluster_; } - Runtime::Loader& runtime() { return runtime_; } DatadogTracerStats& tracerStats() { return tracer_stats_; } - const datadog::opentracing::TracerOptions& tracerOptions() { return tracer_options_; } // Tracer::OpenTracingDriver opentracing::Tracer& tracer() override; @@ -80,7 +78,6 @@ class Driver : public Common::Ot::OpenTracingDriver { DatadogTracerStats tracer_stats_; datadog::opentracing::TracerOptions tracer_options_; ThreadLocal::SlotPtr tls_; - Runtime::Loader& runtime_; }; /** @@ -112,6 +109,7 @@ class TraceReporter : public Http::AsyncClient::Callbacks, // Http::AsyncClient::Callbacks. void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&&) override; void onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason) override; + void onBeforeFinalizeUpstreamSpan(Tracing::Span&, const Http::ResponseHeaderMap*) override {} private: /** diff --git a/source/extensions/tracers/dynamic_ot/BUILD b/source/extensions/tracers/dynamic_ot/BUILD index 4302159453d8f..95b903be987d6 100644 --- a/source/extensions/tracers/dynamic_ot/BUILD +++ b/source/extensions/tracers/dynamic_ot/BUILD @@ -1,15 +1,15 @@ -licenses(["notice"]) # Apache 2 - -# Trace driver for dynamically loadable C++ OpenTracing drivers (http://opentracing.io/). - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# Trace driver for dynamically loadable C++ OpenTracing drivers (http://opentracing.io/). + +envoy_extension_package() envoy_cc_library( name = "dynamic_opentracing_driver_lib", @@ -32,7 +32,6 @@ envoy_cc_extension( security_posture = "robust_to_untrusted_downstream", deps = [ ":dynamic_opentracing_driver_lib", - "//source/extensions/tracers:well_known_names", "//source/extensions/tracers/common:factory_base_lib", "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", ], diff --git a/source/extensions/tracers/dynamic_ot/config.cc b/source/extensions/tracers/dynamic_ot/config.cc index c9667ac2a5d67..f8ddf4ceeeb5c 100644 --- a/source/extensions/tracers/dynamic_ot/config.cc +++ b/source/extensions/tracers/dynamic_ot/config.cc @@ -8,7 +8,6 @@ #include "common/tracing/http_tracer_impl.h" #include "extensions/tracers/dynamic_ot/dynamic_opentracing_driver_impl.h" -#include "extensions/tracers/well_known_names.h" namespace Envoy { namespace Extensions { @@ -16,7 +15,7 @@ namespace Tracers { namespace DynamicOt { DynamicOpenTracingTracerFactory::DynamicOpenTracingTracerFactory() - : FactoryBase(TracerNames::get().DynamicOt) {} + : FactoryBase("envoy.tracers.dynamic_ot") {} Tracing::HttpTracerSharedPtr DynamicOpenTracingTracerFactory::createHttpTracerTyped( const envoy::config::trace::v3::DynamicOtConfig& proto_config, diff --git a/source/extensions/tracers/lightstep/BUILD b/source/extensions/tracers/lightstep/BUILD index a72d39b37376a..6c287b4a75fe6 100644 --- a/source/extensions/tracers/lightstep/BUILD +++ b/source/extensions/tracers/lightstep/BUILD @@ -1,15 +1,15 @@ -licenses(["notice"]) # Apache 2 - -# Trace driver for LightStep (https://lightstep.com/) - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# Trace driver for LightStep (https://lightstep.com/) + +envoy_extension_package() envoy_cc_library( name = "lightstep_tracer_lib", @@ -26,7 +26,6 @@ envoy_cc_library( "//source/common/stats:symbol_table_lib", "//source/common/tracing:http_tracer_lib", "//source/common/upstream:cluster_update_tracker_lib", - "//source/extensions/tracers:well_known_names", "//source/extensions/tracers/common/ot:opentracing_driver_lib", "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", ], @@ -39,7 +38,6 @@ envoy_cc_extension( security_posture = "robust_to_untrusted_downstream", deps = [ ":lightstep_tracer_lib", - "//source/extensions/tracers:well_known_names", "//source/extensions/tracers/common:factory_base_lib", "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", ], diff --git a/source/extensions/tracers/lightstep/config.cc b/source/extensions/tracers/lightstep/config.cc index 52509819dcaf7..3a636b76dd9ab 100644 --- a/source/extensions/tracers/lightstep/config.cc +++ b/source/extensions/tracers/lightstep/config.cc @@ -8,7 +8,6 @@ #include "common/tracing/http_tracer_impl.h" #include "extensions/tracers/lightstep/lightstep_tracer_impl.h" -#include "extensions/tracers/well_known_names.h" #include "lightstep/tracer.h" @@ -17,7 +16,7 @@ namespace Extensions { namespace Tracers { namespace Lightstep { -LightstepTracerFactory::LightstepTracerFactory() : FactoryBase(TracerNames::get().Lightstep) {} +LightstepTracerFactory::LightstepTracerFactory() : FactoryBase("envoy.tracers.lightstep") {} Tracing::HttpTracerSharedPtr LightstepTracerFactory::createHttpTracerTyped( const envoy::config::trace::v3::LightstepConfig& proto_config, diff --git a/source/extensions/tracers/lightstep/lightstep_tracer_impl.cc b/source/extensions/tracers/lightstep/lightstep_tracer_impl.cc index 9cafe7e8a9fa6..e66a3c3558458 100644 --- a/source/extensions/tracers/lightstep/lightstep_tracer_impl.cc +++ b/source/extensions/tracers/lightstep/lightstep_tracer_impl.cc @@ -16,8 +16,6 @@ #include "common/http/message_impl.h" #include "common/tracing/http_tracer_impl.h" -#include "extensions/tracers/well_known_names.h" - namespace Envoy { namespace Extensions { namespace Tracers { @@ -198,7 +196,7 @@ LightStepDriver::LightStepDriver(const envoy::config::trace::v3::LightstepConfig pool_.add(lightstep::CollectorServiceFullName()), pool_.add(lightstep::CollectorMethodName())} { - Config::Utility::checkCluster(TracerNames::get().Lightstep, lightstep_config.collector_cluster(), + Config::Utility::checkCluster("envoy.tracers.lightstep", lightstep_config.collector_cluster(), cm_, /* allow_added_via_api */ true); cluster_ = lightstep_config.collector_cluster(); diff --git a/source/extensions/tracers/lightstep/lightstep_tracer_impl.h b/source/extensions/tracers/lightstep/lightstep_tracer_impl.h index 5a67bc8575b81..e99d92b5346e9 100644 --- a/source/extensions/tracers/lightstep/lightstep_tracer_impl.h +++ b/source/extensions/tracers/lightstep/lightstep_tracer_impl.h @@ -95,6 +95,7 @@ class LightStepDriver : public Common::Ot::OpenTracingDriver { void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&& response) override; void onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason failure_reason) override; + void onBeforeFinalizeUpstreamSpan(Tracing::Span&, const Http::ResponseHeaderMap*) override {} private: std::unique_ptr active_report_; diff --git a/source/extensions/tracers/opencensus/BUILD b/source/extensions/tracers/opencensus/BUILD index 0956ae5cce3af..2513be7249f6a 100644 --- a/source/extensions/tracers/opencensus/BUILD +++ b/source/extensions/tracers/opencensus/BUILD @@ -1,15 +1,16 @@ -licenses(["notice"]) # Apache 2 - -# Trace driver for OpenCensus: https://opencensus.io/ - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", + "envoy_select_google_grpc", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# Trace driver for OpenCensus: https://opencensus.io/ + +envoy_extension_package() envoy_cc_extension( name = "config", @@ -18,7 +19,6 @@ envoy_cc_extension( security_posture = "robust_to_untrusted_downstream", deps = [ ":opencensus_tracer_impl", - "//source/extensions/tracers:well_known_names", "//source/extensions/tracers/common:factory_base_lib", "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", ], @@ -42,8 +42,7 @@ envoy_cc_library( ], deps = [ "//source/common/config:utility_lib", - "//source/common/grpc:google_async_client_lib", "//source/common/tracing:http_tracer_lib", "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", - ], + ] + envoy_select_google_grpc(["//source/common/grpc:google_async_client_lib"]), ) diff --git a/source/extensions/tracers/opencensus/config.cc b/source/extensions/tracers/opencensus/config.cc index af778ad04f7cb..24a439a98a650 100644 --- a/source/extensions/tracers/opencensus/config.cc +++ b/source/extensions/tracers/opencensus/config.cc @@ -7,14 +7,13 @@ #include "common/tracing/http_tracer_impl.h" #include "extensions/tracers/opencensus/opencensus_tracer_impl.h" -#include "extensions/tracers/well_known_names.h" namespace Envoy { namespace Extensions { namespace Tracers { namespace OpenCensus { -OpenCensusTracerFactory::OpenCensusTracerFactory() : FactoryBase(TracerNames::get().OpenCensus) {} +OpenCensusTracerFactory::OpenCensusTracerFactory() : FactoryBase("envoy.tracers.opencensus") {} Tracing::HttpTracerSharedPtr OpenCensusTracerFactory::createHttpTracerTyped( const envoy::config::trace::v3::OpenCensusConfig& proto_config, diff --git a/source/extensions/tracers/opencensus/opencensus_tracer_impl.cc b/source/extensions/tracers/opencensus/opencensus_tracer_impl.cc index c39d4ebddd56d..41bd08e03d446 100644 --- a/source/extensions/tracers/opencensus/opencensus_tracer_impl.cc +++ b/source/extensions/tracers/opencensus/opencensus_tracer_impl.cc @@ -6,7 +6,6 @@ #include "envoy/http/header_map.h" #include "common/common/base64.h" -#include "common/grpc/google_grpc_utils.h" #include "absl/strings/str_cat.h" #include "google/devtools/cloudtrace/v2/tracing.grpc.pb.h" @@ -24,12 +23,18 @@ #include "opencensus/trace/trace_config.h" #include "opencensus/trace/trace_params.h" +#ifdef ENVOY_GOOGLE_GRPC +#include "common/grpc/google_grpc_utils.h" +#endif + namespace Envoy { namespace Extensions { namespace Tracers { namespace OpenCensus { +#ifdef ENVOY_GOOGLE_GRPC constexpr char GoogleStackdriverTraceAddress[] = "cloudtrace.googleapis.com"; +#endif namespace { @@ -68,6 +73,10 @@ class Span : public Tracing::Span { SystemTime start_time) override; void setSampled(bool sampled) override; + // OpenCensus doesn't support baggage, so noop these OpenTracing functions. + void setBaggage(absl::string_view, absl::string_view) override{}; + std::string getBaggage(absl::string_view) override { return std::string(); }; + private: ::opencensus::trace::Span span_; const envoy::config::trace::v3::OpenCensusConfig& oc_config_; @@ -267,6 +276,7 @@ Driver::Driver(const envoy::config::trace::v3::OpenCensusConfig& oc_config, opts.trace_service_stub = ::google::devtools::cloudtrace::v2::TraceService::NewStub(channel); } else if (oc_config.has_stackdriver_grpc_service() && oc_config.stackdriver_grpc_service().has_google_grpc()) { +#ifdef ENVOY_GOOGLE_GRPC envoy::config::core::v3::GrpcService stackdriver_service = oc_config.stackdriver_grpc_service(); if (stackdriver_service.google_grpc().target_uri().empty()) { @@ -275,7 +285,21 @@ Driver::Driver(const envoy::config::trace::v3::OpenCensusConfig& oc_config, stackdriver_service.mutable_google_grpc()->set_target_uri(GoogleStackdriverTraceAddress); } auto channel = Envoy::Grpc::GoogleGrpcUtils::createChannel(stackdriver_service, api); + // TODO(bianpengyuan): add tests for trace_service_stub and initial_metadata options with mock + // stubs. opts.trace_service_stub = ::google::devtools::cloudtrace::v2::TraceService::NewStub(channel); + const auto& initial_metadata = stackdriver_service.initial_metadata(); + if (!initial_metadata.empty()) { + opts.prepare_client_context = [initial_metadata](grpc::ClientContext* ctx) { + for (const auto& metadata : initial_metadata) { + ctx->AddMetadata(metadata.key(), metadata.value()); + } + }; + } +#else + throw EnvoyException("Opencensus tracer: cannot handle stackdriver google grpc service, " + "google grpc is not built in."); +#endif } ::opencensus::exporters::trace::StackdriverExporter::Register(std::move(opts)); } @@ -290,11 +314,16 @@ Driver::Driver(const envoy::config::trace::v3::OpenCensusConfig& oc_config, opts.address = oc_config.ocagent_address(); } else if (oc_config.has_ocagent_grpc_service() && oc_config.ocagent_grpc_service().has_google_grpc()) { +#ifdef ENVOY_GOOGLE_GRPC const envoy::config::core::v3::GrpcService& ocagent_service = oc_config.ocagent_grpc_service(); auto channel = Envoy::Grpc::GoogleGrpcUtils::createChannel(ocagent_service, api); opts.trace_service_stub = ::opencensus::proto::agent::trace::v1::TraceService::NewStub(channel); +#else + throw EnvoyException("Opencensus tracer: cannot handle ocagent google grpc service, google " + "grpc is not built in."); +#endif } opts.service_name = local_info_.clusterName(); ::opencensus::exporters::trace::OcAgentExporter::Register(std::move(opts)); diff --git a/source/extensions/tracers/well_known_names.h b/source/extensions/tracers/well_known_names.h deleted file mode 100644 index 8a83cdf21d02f..0000000000000 --- a/source/extensions/tracers/well_known_names.h +++ /dev/null @@ -1,34 +0,0 @@ -#pragma once -#include - -#include "common/singleton/const_singleton.h" - -namespace Envoy { -namespace Extensions { -namespace Tracers { - -/** - * Well-known tracer names. - * NOTE: New tracers should use the well known name: envoy.tracers.name. - */ -class TracerNameValues { -public: - // Lightstep tracer - const std::string Lightstep = "envoy.tracers.lightstep"; - // Zipkin tracer - const std::string Zipkin = "envoy.tracers.zipkin"; - // Dynamic tracer - const std::string DynamicOt = "envoy.tracers.dynamic_ot"; - // Datadog tracer - const std::string Datadog = "envoy.tracers.datadog"; - // OpenCensus tracer - const std::string OpenCensus = "envoy.tracers.opencensus"; - // AWS XRay tracer - const std::string XRay = "envoy.tracers.xray"; -}; - -using TracerNames = ConstSingleton; - -} // namespace Tracers -} // namespace Extensions -} // namespace Envoy diff --git a/source/extensions/tracers/xray/BUILD b/source/extensions/tracers/xray/BUILD index f225797780d3d..ef486aaac4eb1 100644 --- a/source/extensions/tracers/xray/BUILD +++ b/source/extensions/tracers/xray/BUILD @@ -1,16 +1,16 @@ -licenses(["notice"]) # Apache 2 - -# Trace driver for AWS X-Ray. - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", "envoy_proto_library", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# Trace driver for AWS X-Ray. + +envoy_extension_package() envoy_proto_library( name = "daemon", @@ -61,7 +61,6 @@ envoy_cc_extension( deps = [ ":xray_lib", "//source/common/config:datasource_lib", - "//source/extensions/tracers:well_known_names", "//source/extensions/tracers/common:factory_base_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", diff --git a/source/extensions/tracers/xray/config.cc b/source/extensions/tracers/xray/config.cc index ad4bfc0ebcfe0..b9af01f887ad4 100644 --- a/source/extensions/tracers/xray/config.cc +++ b/source/extensions/tracers/xray/config.cc @@ -11,7 +11,6 @@ #include "common/config/datasource.h" #include "common/tracing/http_tracer_impl.h" -#include "extensions/tracers/well_known_names.h" #include "extensions/tracers/xray/xray_tracer_impl.h" namespace Envoy { @@ -19,7 +18,7 @@ namespace Extensions { namespace Tracers { namespace XRay { -XRayTracerFactory::XRayTracerFactory() : FactoryBase(TracerNames::get().XRay) {} +XRayTracerFactory::XRayTracerFactory() : FactoryBase("envoy.tracers.xray") {} Tracing::HttpTracerSharedPtr XRayTracerFactory::createHttpTracerTyped(const envoy::config::trace::v3::XRayConfig& proto_config, diff --git a/source/extensions/tracers/xray/daemon.proto b/source/extensions/tracers/xray/daemon.proto index 78594a0b5985c..d19563a5ddf5c 100644 --- a/source/extensions/tracers/xray/daemon.proto +++ b/source/extensions/tracers/xray/daemon.proto @@ -5,6 +5,7 @@ syntax = "proto3"; package source.extensions.tracers.xray.daemon; import "validate/validate.proto"; +import "google/protobuf/struct.proto"; // see https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html message Segment { @@ -14,12 +15,12 @@ message Segment { double start_time = 4 [(validate.rules).double = {gt: 0}]; double end_time = 5 [(validate.rules).double = {gt: 0}]; string parent_id = 6; - map annotations = 7; - http_annotations http = 8; + http_annotations http = 7; message http_annotations { - map request = 1; - map response = 2; + google.protobuf.Struct request = 1; + google.protobuf.Struct response = 2; } + map annotations = 8; } message Header { diff --git a/source/extensions/tracers/xray/daemon_broker.cc b/source/extensions/tracers/xray/daemon_broker.cc index d5667c423aa82..9772fbe0073df 100644 --- a/source/extensions/tracers/xray/daemon_broker.cc +++ b/source/extensions/tracers/xray/daemon_broker.cc @@ -3,7 +3,9 @@ #include "envoy/network/address.h" #include "common/buffer/buffer_impl.h" +#include "common/network/socket_interface_impl.h" #include "common/network/utility.h" +#include "common/protobuf/utility.h" #include "source/extensions/tracers/xray/daemon.pb.h" @@ -20,20 +22,15 @@ std::string createHeader(const std::string& format, uint32_t version) { source::extensions::tracers::xray::daemon::Header header; header.set_format(format); header.set_version(version); - - Protobuf::util::JsonPrintOptions json_options; - json_options.preserve_proto_field_names = true; - std::string json; - const auto status = Protobuf::util::MessageToJsonString(header, &json, json_options); - ASSERT(status.ok()); - return json; + return MessageUtil::getJsonStringFromMessage(header, false /* pretty_print */, + false /* always_print_primitive_fields */); } } // namespace DaemonBrokerImpl::DaemonBrokerImpl(const std::string& daemon_endpoint) : address_(Network::Utility::parseInternetAddressAndPort(daemon_endpoint, false /*v6only*/)), - io_handle_(address_->socket(Network::Address::SocketType::Datagram)) {} + io_handle_(Network::ioHandleForAddr(Network::Socket::Type::Datagram, address_)) {} void DaemonBrokerImpl::send(const std::string& data) const { auto& logger = Logger::Registry::getLog(Logger::Id::tracing); diff --git a/source/extensions/tracers/xray/localized_sampling.h b/source/extensions/tracers/xray/localized_sampling.h index 709ec144a32be..f622d9f8874b6 100644 --- a/source/extensions/tracers/xray/localized_sampling.h +++ b/source/extensions/tracers/xray/localized_sampling.h @@ -5,9 +5,9 @@ #include #include "envoy/common/time.h" -#include "envoy/runtime/runtime.h" #include "common/common/logger.h" +#include "common/common/random_generator.h" #include "extensions/tracers/xray/reservoir.h" #include "extensions/tracers/xray/sampling_strategy.h" @@ -74,13 +74,7 @@ class LocalizedSamplingRule { * Set the percentage of requests to sample _after_ sampling |fixed_target| requests per second. */ void setRate(double rate) { rate_ = rate; } - - const std::string& host() const { return host_; } - const std::string& httpMethod() const { return http_method_; } - const std::string& urlPath() const { return url_path_; } - uint32_t fixedTarget() const { return fixed_target_; } double rate() const { return rate_; } - const Reservoir& reservoir() const { return reservoir_; } Reservoir& reservoir() { return reservoir_; } private: @@ -143,7 +137,7 @@ class LocalizedSamplingManifest { class LocalizedSamplingStrategy : public SamplingStrategy { public: - LocalizedSamplingStrategy(const std::string& sampling_rules_json, Runtime::RandomGenerator& rng, + LocalizedSamplingStrategy(const std::string& sampling_rules_json, Random::RandomGenerator& rng, TimeSource& time_source) : SamplingStrategy(rng), default_manifest_(LocalizedSamplingManifest::createDefault()), custom_manifest_(sampling_rules_json), time_source_(time_source), diff --git a/source/extensions/tracers/xray/sampling_strategy.h b/source/extensions/tracers/xray/sampling_strategy.h index 2cb488a2c33ff..908c28c6414e1 100644 --- a/source/extensions/tracers/xray/sampling_strategy.h +++ b/source/extensions/tracers/xray/sampling_strategy.h @@ -3,7 +3,7 @@ #include #include "envoy/common/pure.h" -#include "envoy/runtime/runtime.h" +#include "envoy/common/random_generator.h" #include "common/common/macros.h" @@ -25,7 +25,7 @@ struct SamplingRequest { */ class SamplingStrategy { public: - explicit SamplingStrategy(Runtime::RandomGenerator& rng) : rng_(rng) {} + explicit SamplingStrategy(Random::RandomGenerator& rng) : rng_(rng) {} virtual ~SamplingStrategy() = default; /** @@ -38,7 +38,7 @@ class SamplingStrategy { uint64_t random() const { return rng_.random(); } private: - Runtime::RandomGenerator& rng_; + Random::RandomGenerator& rng_; }; using SamplingStrategyPtr = std::unique_ptr; diff --git a/source/extensions/tracers/xray/tracer.cc b/source/extensions/tracers/xray/tracer.cc index 4fdfb26fdfee5..3dfb0c8cf75e7 100644 --- a/source/extensions/tracers/xray/tracer.cc +++ b/source/extensions/tracers/xray/tracer.cc @@ -10,8 +10,9 @@ #include "common/common/assert.h" #include "common/common/fmt.h" #include "common/common/hex.h" +#include "common/common/random_generator.h" #include "common/protobuf/message_validator_impl.h" -#include "common/runtime/runtime_impl.h" +#include "common/protobuf/utility.h" #include "source/extensions/tracers/xray/daemon.pb.validate.h" @@ -38,7 +39,7 @@ constexpr auto XRaySerializationVersion = "1"; std::string generateTraceId(SystemTime point_in_time) { using std::chrono::seconds; using std::chrono::time_point_cast; - Runtime::RandomGeneratorImpl rng; + Random::RandomGeneratorImpl rng; const auto epoch = time_point_cast(point_in_time).time_since_epoch().count(); std::string out; out.reserve(35); @@ -70,23 +71,25 @@ void Span::finishSpan() { daemon::Segment s; s.set_name(name()); - s.set_id(Id()); + s.set_id(id()); s.set_trace_id(traceId()); s.set_start_time(time_point_cast(startTime()).time_since_epoch().count()); s.set_end_time( time_point_cast(time_source_.systemTime()).time_since_epoch().count()); s.set_parent_id(parentId()); - using KeyValue = Protobuf::Map::value_type; - for (const auto& item : custom_annotations_) { - s.mutable_annotations()->insert(KeyValue{item.first, item.second}); + + auto* request_fields = s.mutable_http()->mutable_request()->mutable_fields(); + for (const auto& field : http_request_annotations_) { + request_fields->insert({field.first, field.second}); } - for (const auto& item : http_request_annotations_) { - s.mutable_http()->mutable_request()->insert(KeyValue{item.first, item.second}); + auto* response_fields = s.mutable_http()->mutable_response()->mutable_fields(); + for (const auto& field : http_response_annotations_) { + response_fields->insert({field.first, field.second}); } - for (const auto& item : http_response_annotations_) { - s.mutable_http()->mutable_response()->insert(KeyValue{item.first, item.second}); + for (const auto& item : custom_annotations_) { + s.mutable_annotations()->insert({item.first, item.second}); } const std::string json = MessageUtil::getJsonStringFromMessage( @@ -97,7 +100,7 @@ void Span::finishSpan() { void Span::injectContext(Http::RequestHeaderMap& request_headers) { const std::string xray_header_value = - fmt::format("Root={};Parent={};Sampled={}", traceId(), Id(), sampled() ? "1" : "0"); + fmt::format("Root={};Parent={};Sampled={}", traceId(), id(), sampled() ? "1" : "0"); request_headers.setCopy(Http::LowerCaseString(XRayTraceHeader), xray_header_value); } @@ -109,7 +112,7 @@ Tracing::SpanPtr Span::spawnChild(const Tracing::Config&, const std::string& ope child_span->setName(name()); child_span->setOperation(operation_name); child_span->setStartTime(start_time); - child_span->setParentId(Id()); + child_span->setParentId(id()); child_span->setTraceId(traceId()); child_span->setSampled(sampled()); return child_span; @@ -179,20 +182,30 @@ void Span::setTag(absl::string_view name, absl::string_view value) { } if (name == HttpUrl) { - http_request_annotations_.emplace(SpanUrl, value); + http_request_annotations_.emplace(SpanUrl, ValueUtil::stringValue(std::string(value))); } else if (name == HttpMethod) { - http_request_annotations_.emplace(SpanMethod, value); + http_request_annotations_.emplace(SpanMethod, ValueUtil::stringValue(std::string(value))); } else if (name == HttpUserAgent) { - http_request_annotations_.emplace(SpanUserAgent, value); + http_request_annotations_.emplace(SpanUserAgent, ValueUtil::stringValue(std::string(value))); } else if (name == HttpStatusCode) { - http_response_annotations_.emplace(SpanStatus, value); + uint64_t status_code; + if (!absl::SimpleAtoi(value, &status_code)) { + ENVOY_LOG(debug, "{} must be a number, given: {}", HttpStatusCode, value); + return; + } + http_response_annotations_.emplace(SpanStatus, ValueUtil::numberValue(status_code)); } else if (name == HttpResponseSize) { - http_response_annotations_.emplace(SpanContentLength, value); + uint64_t response_size; + if (!absl::SimpleAtoi(value, &response_size)) { + ENVOY_LOG(debug, "{} must be a number, given: {}", HttpResponseSize, value); + return; + } + http_response_annotations_.emplace(SpanContentLength, ValueUtil::numberValue(response_size)); } else if (name == PeerAddress) { - http_request_annotations_.emplace(SpanClientIp, value); + http_request_annotations_.emplace(SpanClientIp, ValueUtil::stringValue(std::string(value))); // In this case, PeerAddress refers to the client's actual IP address, not // the address specified in the the HTTP X-Forwarded-For header. - http_request_annotations_.emplace(SpanXForwardedFor, "false"); + http_request_annotations_.emplace(SpanXForwardedFor, ValueUtil::boolValue(false)); } else { custom_annotations_.emplace(name, value); } diff --git a/source/extensions/tracers/xray/tracer.h b/source/extensions/tracers/xray/tracer.h index f9a3818cbc793..69383ea5129f8 100644 --- a/source/extensions/tracers/xray/tracer.h +++ b/source/extensions/tracers/xray/tracer.h @@ -8,6 +8,7 @@ #include "envoy/tracing/http_tracer.h" #include "common/common/hex.h" +#include "common/protobuf/utility.h" #include "extensions/tracers/xray/daemon_broker.h" #include "extensions/tracers/xray/sampling_strategy.h" @@ -23,7 +24,7 @@ namespace XRay { constexpr auto XRayTraceHeader = "x-amzn-trace-id"; -class Span : public Tracing::Span { +class Span : public Tracing::Span, Logger::Loggable { public: /** * Creates a new Span. @@ -64,7 +65,7 @@ class Span : public Tracing::Span { /** * Adds a key-value pair to either the Span's annotations or metadata. - * A whitelist of keys are added to the annotations, everything else is added to the metadata. + * An allowlist of keys are added to the annotations, everything else is added to the metadata. */ void setTag(absl::string_view name, absl::string_view value) override; @@ -112,7 +113,7 @@ class Span : public Tracing::Span { /** * Gets this Span's ID. */ - const std::string& Id() const { return id_; } + const std::string& id() const { return id_; } const std::string& parentId() const { return parent_segment_id_; } @@ -131,6 +132,10 @@ class Span : public Tracing::Span { */ void log(Envoy::SystemTime, const std::string&) override {} + // X-Ray doesn't support baggage, so noop these OpenTracing functions. + void setBaggage(absl::string_view, absl::string_view) override {} + std::string getBaggage(absl::string_view) override { return std::string(); } + /** * Creates a child span. * In X-Ray terms this creates a sub-segment and sets its parent ID to the current span's ID. @@ -147,8 +152,8 @@ class Span : public Tracing::Span { std::string trace_id_; std::string parent_segment_id_; std::string name_; - absl::flat_hash_map http_request_annotations_; - absl::flat_hash_map http_response_annotations_; + absl::flat_hash_map http_request_annotations_; + absl::flat_hash_map http_response_annotations_; absl::flat_hash_map custom_annotations_; Envoy::TimeSource& time_source_; DaemonBroker& broker_; diff --git a/source/extensions/tracers/xray/xray_configuration.h b/source/extensions/tracers/xray/xray_configuration.h index 852f705614980..114ea398444ac 100644 --- a/source/extensions/tracers/xray/xray_configuration.h +++ b/source/extensions/tracers/xray/xray_configuration.h @@ -27,7 +27,7 @@ enum class SamplingDecision { struct XRayHeader { std::string trace_id_; std::string parent_id_; - SamplingDecision sample_decision_; + SamplingDecision sample_decision_{}; }; } // namespace XRay diff --git a/source/extensions/tracers/xray/xray_tracer_impl.cc b/source/extensions/tracers/xray/xray_tracer_impl.cc index d0ecb0684e257..cc50ebfeb996f 100644 --- a/source/extensions/tracers/xray/xray_tracer_impl.cc +++ b/source/extensions/tracers/xray/xray_tracer_impl.cc @@ -95,9 +95,9 @@ Tracing::SpanPtr Driver::startSpan(const Tracing::Config& config, } if (!should_trace.has_value()) { - const SamplingRequest request{std::string{request_headers.Host()->value().getStringView()}, - std::string{request_headers.Method()->value().getStringView()}, - std::string{request_headers.Path()->value().getStringView()}}; + const SamplingRequest request{std::string{request_headers.getHostValue()}, + std::string{request_headers.getMethodValue()}, + std::string{request_headers.getPathValue()}}; should_trace = sampling_strategy_->shouldTrace(request); } diff --git a/source/extensions/tracers/zipkin/BUILD b/source/extensions/tracers/zipkin/BUILD index f2321bab87101..fc2d417c4d1cd 100644 --- a/source/extensions/tracers/zipkin/BUILD +++ b/source/extensions/tracers/zipkin/BUILD @@ -1,15 +1,15 @@ -licenses(["notice"]) # Apache 2 - -# Trace driver for Zipkin (https://zipkin.io/). - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# Trace driver for Zipkin (https://zipkin.io/). + +envoy_extension_package() envoy_cc_library( name = "zipkin_lib", @@ -58,7 +58,6 @@ envoy_cc_library( "//source/common/singleton:const_singleton", "//source/common/tracing:http_tracer_lib", "//source/common/upstream:cluster_update_tracker_lib", - "//source/extensions/tracers:well_known_names", "@com_github_openzipkin_zipkinapi//:zipkin_cc_proto", "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", ], @@ -69,9 +68,13 @@ envoy_cc_extension( srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream", + # Legacy test use. TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/server:__subpackages__", + ], deps = [ ":zipkin_lib", - "//source/extensions/tracers:well_known_names", "//source/extensions/tracers/common:factory_base_lib", "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", ], diff --git a/source/extensions/tracers/zipkin/config.cc b/source/extensions/tracers/zipkin/config.cc index 0fca39dd4a310..36d1f38fae8e7 100644 --- a/source/extensions/tracers/zipkin/config.cc +++ b/source/extensions/tracers/zipkin/config.cc @@ -7,7 +7,6 @@ #include "common/common/utility.h" #include "common/tracing/http_tracer_impl.h" -#include "extensions/tracers/well_known_names.h" #include "extensions/tracers/zipkin/zipkin_tracer_impl.h" namespace Envoy { @@ -15,7 +14,7 @@ namespace Extensions { namespace Tracers { namespace Zipkin { -ZipkinTracerFactory::ZipkinTracerFactory() : FactoryBase(TracerNames::get().Zipkin) {} +ZipkinTracerFactory::ZipkinTracerFactory() : FactoryBase("envoy.tracers.zipkin") {} Tracing::HttpTracerSharedPtr ZipkinTracerFactory::createHttpTracerTyped( const envoy::config::trace::v3::ZipkinConfig& proto_config, diff --git a/source/extensions/tracers/zipkin/span_buffer.cc b/source/extensions/tracers/zipkin/span_buffer.cc index a0803fe080bd3..d40071c961826 100644 --- a/source/extensions/tracers/zipkin/span_buffer.cc +++ b/source/extensions/tracers/zipkin/span_buffer.cc @@ -137,7 +137,8 @@ JsonV2Serializer::toListOfSpans(const Span& zipkin_span, Util::Replacements& rep // us 1.58432429547687e+15. Instead we store it as the string of 1584324295476870 (when it is // serialized: "1584324295476870"), and replace it post MessageToJsonString serialization with // integer (1584324295476870 without `"`), see: JsonV2Serializer::serialize. - (*fields)[SPAN_TIMESTAMP] = Util::uint64Value(annotation.timestamp(), replacements); + (*fields)[SPAN_TIMESTAMP] = + Util::uint64Value(annotation.timestamp(), SPAN_TIMESTAMP, replacements); (*fields)[SPAN_LOCAL_ENDPOINT] = ValueUtil::structValue(toProtoEndpoint(annotation.endpoint())); } @@ -157,7 +158,8 @@ JsonV2Serializer::toListOfSpans(const Span& zipkin_span, Util::Replacements& rep if (zipkin_span.isSetDuration()) { // Since SPAN_DURATION has the same data type with SPAN_TIMESTAMP, we use Util::uint64Value to // store it. - (*fields)[SPAN_DURATION] = Util::uint64Value(zipkin_span.duration(), replacements); + (*fields)[SPAN_DURATION] = + Util::uint64Value(zipkin_span.duration(), SPAN_DURATION, replacements); } const auto& binary_annotations = zipkin_span.binaryAnnotations(); diff --git a/source/extensions/tracers/zipkin/span_context.h b/source/extensions/tracers/zipkin/span_context.h index 6dd08c3b291b5..c06381272cbe7 100644 --- a/source/extensions/tracers/zipkin/span_context.h +++ b/source/extensions/tracers/zipkin/span_context.h @@ -52,17 +52,17 @@ class SpanContext { /** * @return the span's parent id as an integer. */ - uint64_t parent_id() const { return parent_id_; } + uint64_t parentId() const { return parent_id_; } /** * @return the high 64 bits of the trace id as an integer. */ - uint64_t trace_id_high() const { return trace_id_high_; } + uint64_t traceIdHigh() const { return trace_id_high_; } /** * @return the low 64 bits of the trace id as an integer. */ - uint64_t trace_id() const { return trace_id_; } + uint64_t traceId() const { return trace_id_; } /** * @return whether using 128 bit trace id. diff --git a/source/extensions/tracers/zipkin/span_context_extractor.h b/source/extensions/tracers/zipkin/span_context_extractor.h index e48939b247084..425a0d59973cc 100644 --- a/source/extensions/tracers/zipkin/span_context_extractor.h +++ b/source/extensions/tracers/zipkin/span_context_extractor.h @@ -14,7 +14,6 @@ class SpanContext; struct ExtractorException : public EnvoyException { ExtractorException(const std::string& what) : EnvoyException(what) {} - ExtractorException(const ExtractorException& ex) : EnvoyException(ex.what()) {} }; /** diff --git a/source/extensions/tracers/zipkin/tracer.cc b/source/extensions/tracers/zipkin/tracer.cc index 866f40813382d..f334246d4c510 100644 --- a/source/extensions/tracers/zipkin/tracer.cc +++ b/source/extensions/tracers/zipkin/tracer.cc @@ -86,8 +86,8 @@ SpanPtr Tracer::startSpan(const Tracing::Config& config, const std::string& span // Initialize the shared context for the new span span_ptr->setId(previous_context.id()); - if (previous_context.parent_id()) { - span_ptr->setParentId(previous_context.parent_id()); + if (previous_context.parentId()) { + span_ptr->setParentId(previous_context.parentId()); } // Set the SR annotation value @@ -105,9 +105,9 @@ SpanPtr Tracer::startSpan(const Tracing::Config& config, const std::string& span span_ptr->addAnnotation(std::move(annotation)); // Keep the same trace id - span_ptr->setTraceId(previous_context.trace_id()); + span_ptr->setTraceId(previous_context.traceId()); if (previous_context.is128BitTraceId()) { - span_ptr->setTraceIdHigh(previous_context.trace_id_high()); + span_ptr->setTraceIdHigh(previous_context.traceIdHigh()); } // Keep the same sampled flag diff --git a/source/extensions/tracers/zipkin/tracer.h b/source/extensions/tracers/zipkin/tracer.h index d51e0645844ae..109982af7ff08 100644 --- a/source/extensions/tracers/zipkin/tracer.h +++ b/source/extensions/tracers/zipkin/tracer.h @@ -1,8 +1,8 @@ #pragma once #include "envoy/common/pure.h" +#include "envoy/common/random_generator.h" #include "envoy/common/time.h" -#include "envoy/runtime/runtime.h" #include "envoy/tracing/http_tracer.h" #include "extensions/tracers/zipkin/span_context.h" @@ -60,7 +60,7 @@ class Tracer : public TracerInterface { * @param shared_span_context Whether shared span id should be used. */ Tracer(const std::string& service_name, Network::Address::InstanceConstSharedPtr address, - Runtime::RandomGenerator& random_generator, const bool trace_id_128bit, + Random::RandomGenerator& random_generator, const bool trace_id_128bit, const bool shared_span_context, TimeSource& time_source) : service_name_(service_name), address_(address), reporter_(nullptr), random_generator_(random_generator), trace_id_128bit_(trace_id_128bit), @@ -95,16 +95,6 @@ class Tracer : public TracerInterface { */ void reportSpan(Span&& span) override; - /** - * @return the service-name attribute associated with the Tracer. - */ - const std::string& serviceName() const { return service_name_; } - - /** - * @return the pointer to the address object associated with the Tracer. - */ - const Network::Address::InstanceConstSharedPtr address() const { return address_; } - /** * Associates a Reporter object with this Tracer. * @@ -112,16 +102,11 @@ class Tracer : public TracerInterface { */ void setReporter(ReporterPtr reporter); - /** - * @return the random-number generator associated with the Tracer. - */ - Runtime::RandomGenerator& randomGenerator() { return random_generator_; } - private: const std::string service_name_; Network::Address::InstanceConstSharedPtr address_; ReporterPtr reporter_; - Runtime::RandomGenerator& random_generator_; + Random::RandomGenerator& random_generator_; const bool trace_id_128bit_; const bool shared_span_context_; TimeSource& time_source_; diff --git a/source/extensions/tracers/zipkin/util.cc b/source/extensions/tracers/zipkin/util.cc index 3d4ff6913f53b..5263eec00ecc4 100644 --- a/source/extensions/tracers/zipkin/util.cc +++ b/source/extensions/tracers/zipkin/util.cc @@ -23,9 +23,11 @@ uint64_t Util::generateRandom64(TimeSource& time_source) { return rand_64(); } -ProtobufWkt::Value Util::uint64Value(uint64_t value, Replacements& replacements) { +ProtobufWkt::Value Util::uint64Value(uint64_t value, absl::string_view name, + Replacements& replacements) { const std::string string_value = std::to_string(value); - replacements.push_back({absl::StrCat("\"", string_value, "\""), string_value}); + replacements.push_back({absl::StrCat("\"", name, "\":\"", string_value, "\""), + absl::StrCat("\"", name, "\":", string_value)}); return ValueUtil::stringValue(string_value); } diff --git a/source/extensions/tracers/zipkin/util.h b/source/extensions/tracers/zipkin/util.h index 6f1a933744842..0c9158a36423e 100644 --- a/source/extensions/tracers/zipkin/util.h +++ b/source/extensions/tracers/zipkin/util.h @@ -49,13 +49,16 @@ class Util { /** * Returns a wrapped uint64_t value as a string. In addition to that, it also pushes back a - * replacement to the given replacements vector. + * replacement to the given replacements vector. The replacement includes the supplied name + * as a key, for identification in a JSON stream. * * @param value unt64_t number that will be represented in string. + * @param name std::string that is the key for the value being replaced. * @param replacements a container to hold the required replacements when serializing this value. * @return ProtobufWkt::Value wrapped uint64_t as a string. */ - static ProtobufWkt::Value uint64Value(uint64_t value, Replacements& replacements); + static ProtobufWkt::Value uint64Value(uint64_t value, absl::string_view name, + Replacements& replacements); }; } // namespace Zipkin diff --git a/source/extensions/tracers/zipkin/zipkin_core_types.cc b/source/extensions/tracers/zipkin/zipkin_core_types.cc index 19db113b49975..3128a82586f8b 100644 --- a/source/extensions/tracers/zipkin/zipkin_core_types.cc +++ b/source/extensions/tracers/zipkin/zipkin_core_types.cc @@ -69,7 +69,7 @@ void Annotation::changeEndpointServiceName(const std::string& service_name) { const ProtobufWkt::Struct Annotation::toStruct(Util::Replacements& replacements) const { ProtobufWkt::Struct annotation; auto* fields = annotation.mutable_fields(); - (*fields)[ANNOTATION_TIMESTAMP] = Util::uint64Value(timestamp_, replacements); + (*fields)[ANNOTATION_TIMESTAMP] = Util::uint64Value(timestamp_, SPAN_TIMESTAMP, replacements); (*fields)[ANNOTATION_VALUE] = ValueUtil::stringValue(value_); if (endpoint_.has_value()) { (*fields)[ANNOTATION_ENDPOINT] = @@ -159,13 +159,13 @@ const ProtobufWkt::Struct Span::toStruct(Util::Replacements& replacements) const // Usually we store number to a ProtobufWkt::Struct object via ValueUtil::numberValue. // However, due to the possibility of rendering that to a number with scientific notation, we // chose to store it as a string and keeping track the corresponding replacement. - (*fields)[SPAN_TIMESTAMP] = Util::uint64Value(timestamp_.value(), replacements); + (*fields)[SPAN_TIMESTAMP] = Util::uint64Value(timestamp_.value(), SPAN_TIMESTAMP, replacements); } if (duration_.has_value()) { // Since SPAN_DURATION has the same data type with SPAN_TIMESTAMP, we use Util::uint64Value to // store it. - (*fields)[SPAN_DURATION] = Util::uint64Value(duration_.value(), replacements); + (*fields)[SPAN_DURATION] = Util::uint64Value(duration_.value(), SPAN_DURATION, replacements); } if (!annotations_.empty()) { diff --git a/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc b/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc index 2cb4338c0ad36..8cf176d1fabcd 100644 --- a/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc +++ b/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc @@ -11,7 +11,6 @@ #include "common/http/utility.h" #include "common/tracing/http_tracer_impl.h" -#include "extensions/tracers/well_known_names.h" #include "extensions/tracers/zipkin/span_context_extractor.h" #include "extensions/tracers/zipkin/zipkin_core_constants.h" @@ -36,6 +35,10 @@ void ZipkinSpan::log(SystemTime timestamp, const std::string& event) { span_.log(timestamp, event); } +// TODO(#11622): Implement baggage storage for zipkin spans +void ZipkinSpan::setBaggage(absl::string_view, absl::string_view) {} +std::string ZipkinSpan::getBaggage(absl::string_view) { return std::string(); } + void ZipkinSpan::injectContext(Http::RequestHeaderMap& request_headers) { // Set the trace-id and span-id headers properly, based on the newly-created span structure. request_headers.setReferenceKey(ZipkinCoreConstants::get().X_B3_TRACE_ID, @@ -68,13 +71,13 @@ Driver::TlsTracer::TlsTracer(TracerPtr&& tracer, Driver& driver) Driver::Driver(const envoy::config::trace::v3::ZipkinConfig& zipkin_config, Upstream::ClusterManager& cluster_manager, Stats::Scope& scope, ThreadLocal::SlotAllocator& tls, Runtime::Loader& runtime, - const LocalInfo::LocalInfo& local_info, Runtime::RandomGenerator& random_generator, + const LocalInfo::LocalInfo& local_info, Random::RandomGenerator& random_generator, TimeSource& time_source) : cm_(cluster_manager), tracer_stats_{ZIPKIN_TRACER_STATS( POOL_COUNTER_PREFIX(scope, "tracing.zipkin."))}, tls_(tls.allocateSlot()), runtime_(runtime), local_info_(local_info), time_source_(time_source) { - Config::Utility::checkCluster(TracerNames::get().Zipkin, zipkin_config.collector_cluster(), cm_, + Config::Utility::checkCluster("envoy.tracers.zipkin", zipkin_config.collector_cluster(), cm_, /* allow_added_via_api */ true); cluster_ = zipkin_config.collector_cluster(); @@ -113,13 +116,12 @@ Tracing::SpanPtr Driver::startSpan(const Tracing::Config& config, auto ret_span_context = extractor.extractSpanContext(sampled); if (!ret_span_context.second) { // Create a root Zipkin span. No context was found in the headers. - new_zipkin_span = tracer.startSpan( - config, std::string(request_headers.Host()->value().getStringView()), start_time); + new_zipkin_span = + tracer.startSpan(config, std::string(request_headers.getHostValue()), start_time); new_zipkin_span->setSampled(sampled); } else { - new_zipkin_span = - tracer.startSpan(config, std::string(request_headers.Host()->value().getStringView()), - start_time, ret_span_context.first); + new_zipkin_span = tracer.startSpan(config, std::string(request_headers.getHostValue()), + start_time, ret_span_context.first); } } catch (const ExtractorException& e) { diff --git a/source/extensions/tracers/zipkin/zipkin_tracer_impl.h b/source/extensions/tracers/zipkin/zipkin_tracer_impl.h index 5968a4464bbfa..9cb39ea27e925 100644 --- a/source/extensions/tracers/zipkin/zipkin_tracer_impl.h +++ b/source/extensions/tracers/zipkin/zipkin_tracer_impl.h @@ -1,5 +1,6 @@ #pragma once +#include "envoy/common/random_generator.h" #include "envoy/config/trace/v3/zipkin.pb.h" #include "envoy/local_info/local_info.h" #include "envoy/runtime/runtime.h" @@ -75,6 +76,10 @@ class ZipkinSpan : public Tracing::Span { void setSampled(bool sampled) override; + // TODO(#11622): Implement baggage storage for zipkin spans + void setBaggage(absl::string_view, absl::string_view) override; + std::string getBaggage(absl::string_view) override; + /** * @return a reference to the Zipkin::Span object. */ @@ -99,7 +104,7 @@ class Driver : public Tracing::Driver { Driver(const envoy::config::trace::v3::ZipkinConfig& zipkin_config, Upstream::ClusterManager& cluster_manager, Stats::Scope& scope, ThreadLocal::SlotAllocator& tls, Runtime::Loader& runtime, - const LocalInfo::LocalInfo& localinfo, Runtime::RandomGenerator& random_generator, + const LocalInfo::LocalInfo& localinfo, Random::RandomGenerator& random_generator, TimeSource& time_source); /** @@ -201,6 +206,7 @@ class ReporterImpl : Logger::Loggable, // The callbacks below record Zipkin-span-related stats. void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&&) override; void onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason) override; + void onBeforeFinalizeUpstreamSpan(Tracing::Span&, const Http::ResponseHeaderMap*) override {} /** * Creates a heap-allocated ZipkinReporter. diff --git a/source/extensions/transport_sockets/BUILD b/source/extensions/transport_sockets/BUILD index 6156949edef64..40a5e79b39d3b 100644 --- a/source/extensions/transport_sockets/BUILD +++ b/source/extensions/transport_sockets/BUILD @@ -1,16 +1,18 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "well_known_names", hdrs = ["well_known_names.h"], + # well known names files are public as long as they exist. + visibility = ["//visibility:public"], deps = [ "//source/common/singleton:const_singleton", ], diff --git a/source/extensions/transport_sockets/alts/BUILD b/source/extensions/transport_sockets/alts/BUILD index 4575772e5a585..631c74a1c8d3f 100644 --- a/source/extensions/transport_sockets/alts/BUILD +++ b/source/extensions/transport_sockets/alts/BUILD @@ -1,16 +1,16 @@ -licenses(["notice"]) # Apache 2 - -# ALTS transport socket. This provides Google's ALTS protocol support in GCP to Envoy. -# https://cloud.google.com/security/encryption-in-transit/application-layer-transport-security/ - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# ALTS transport socket. This provides Google's ALTS protocol support in GCP to Envoy. +# https://cloud.google.com/security/encryption-in-transit/application-layer-transport-security/ + +envoy_extension_package() envoy_cc_library( name = "grpc_tsi_wrapper", @@ -34,6 +34,9 @@ envoy_cc_extension( hdrs = [ "config.h", ], + external_deps = [ + "abseil_node_hash_set", + ], security_posture = "robust_to_untrusted_downstream_and_upstream", deps = [ ":tsi_handshaker", diff --git a/source/extensions/transport_sockets/alts/config.cc b/source/extensions/transport_sockets/alts/config.cc index 1b9514162463b..1d8b60eab3869 100644 --- a/source/extensions/transport_sockets/alts/config.cc +++ b/source/extensions/transport_sockets/alts/config.cc @@ -13,6 +13,7 @@ #include "extensions/transport_sockets/alts/grpc_tsi.h" #include "extensions/transport_sockets/alts/tsi_socket.h" +#include "absl/container/node_hash_set.h" #include "absl/strings/str_join.h" namespace Envoy { @@ -26,9 +27,18 @@ using GrpcAltsCredentialsOptionsPtr = namespace { +// TODO: gRPC v1.30.0-pre1 defines the equivalent function grpc_alts_set_rpc_protocol_versions +// that should be called directly when available. +void grpcAltsSetRpcProtocolVersions(grpc_gcp_rpc_protocol_versions* rpc_versions) { + grpc_gcp_rpc_protocol_versions_set_max(rpc_versions, GRPC_PROTOCOL_VERSION_MAX_MAJOR, + GRPC_PROTOCOL_VERSION_MAX_MINOR); + grpc_gcp_rpc_protocol_versions_set_min(rpc_versions, GRPC_PROTOCOL_VERSION_MIN_MAJOR, + GRPC_PROTOCOL_VERSION_MIN_MINOR); +} + // Returns true if the peer's service account is found in peers, otherwise // returns false and fills out err with an error message. -bool doValidate(const tsi_peer& peer, const std::unordered_set& peers, +bool doValidate(const tsi_peer& peer, const absl::node_hash_set& peers, std::string& err) { for (size_t i = 0; i < peer.property_count; ++i) { const std::string name = std::string(peer.properties[i].name); @@ -48,8 +58,8 @@ bool doValidate(const tsi_peer& peer, const std::unordered_set& pee HandshakeValidator createHandshakeValidator(const envoy::extensions::transport_sockets::alts::v3::Alts& config) { const auto& peer_service_accounts = config.peer_service_accounts(); - const std::unordered_set peers(peer_service_accounts.cbegin(), - peer_service_accounts.cend()); + const absl::node_hash_set peers(peer_service_accounts.cbegin(), + peer_service_accounts.cend()); HandshakeValidator validator; // Skip validation if peers is empty. if (!peers.empty()) { @@ -108,6 +118,7 @@ Network::TransportSocketFactoryPtr createTransportSocketFactoryHelper( } else { options = GrpcAltsCredentialsOptionsPtr(grpc_alts_credentials_server_options_create()); } + grpcAltsSetRpcProtocolVersions(&options->rpc_versions); const char* target_name = is_upstream ? "" : nullptr; tsi_handshaker* handshaker = nullptr; // Specifying target name as empty since TSI won't take care of validating peer identity diff --git a/source/extensions/transport_sockets/alts/grpc_tsi.h b/source/extensions/transport_sockets/alts/grpc_tsi.h index de36141e87dda..d07cd8d57fb2b 100644 --- a/source/extensions/transport_sockets/alts/grpc_tsi.h +++ b/source/extensions/transport_sockets/alts/grpc_tsi.h @@ -11,8 +11,10 @@ #endif #include "grpc/grpc_security.h" +#include "src/core/lib/transport/transport.h" #include "src/core/tsi/alts/handshaker/alts_shared_resource.h" #include "src/core/tsi/alts/handshaker/alts_tsi_handshaker.h" +#include "src/core/tsi/alts/handshaker/transport_security_common_api.h" #include "src/core/tsi/transport_security_grpc.h" #include "src/core/tsi/transport_security_interface.h" diff --git a/source/extensions/transport_sockets/common/BUILD b/source/extensions/transport_sockets/common/BUILD new file mode 100644 index 0000000000000..eee229da12fb2 --- /dev/null +++ b/source/extensions/transport_sockets/common/BUILD @@ -0,0 +1,20 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_library( + name = "passthrough_lib", + srcs = ["passthrough.cc"], + hdrs = ["passthrough.h"], + deps = [ + "//include/envoy/network:connection_interface", + "//include/envoy/network:transport_socket_interface", + "//source/common/buffer:buffer_lib", + ], +) diff --git a/source/extensions/transport_sockets/common/passthrough.cc b/source/extensions/transport_sockets/common/passthrough.cc new file mode 100644 index 0000000000000..60d632adb24a7 --- /dev/null +++ b/source/extensions/transport_sockets/common/passthrough.cc @@ -0,0 +1,47 @@ +#include "extensions/transport_sockets/common/passthrough.h" + +#include "envoy/network/connection.h" +#include "envoy/network/transport_socket.h" + +#include "common/buffer/buffer_impl.h" + +namespace Envoy { +namespace Extensions { +namespace TransportSockets { + +PassthroughSocket::PassthroughSocket(Network::TransportSocketPtr&& transport_socket) + : transport_socket_(std::move(transport_socket)) {} + +void PassthroughSocket::setTransportSocketCallbacks(Network::TransportSocketCallbacks& callbacks) { + transport_socket_->setTransportSocketCallbacks(callbacks); +} + +std::string PassthroughSocket::protocol() const { return transport_socket_->protocol(); } + +absl::string_view PassthroughSocket::failureReason() const { + return transport_socket_->failureReason(); +} + +bool PassthroughSocket::canFlushClose() { return transport_socket_->canFlushClose(); } + +void PassthroughSocket::closeSocket(Network::ConnectionEvent event) { + transport_socket_->closeSocket(event); +} + +Network::IoResult PassthroughSocket::doRead(Buffer::Instance& buffer) { + return transport_socket_->doRead(buffer); +} + +Network::IoResult PassthroughSocket::doWrite(Buffer::Instance& buffer, bool end_stream) { + return transport_socket_->doWrite(buffer, end_stream); +} + +void PassthroughSocket::onConnected() { transport_socket_->onConnected(); } + +Ssl::ConnectionInfoConstSharedPtr PassthroughSocket::ssl() const { + return transport_socket_->ssl(); +} + +} // namespace TransportSockets +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/transport_sockets/common/passthrough.h b/source/extensions/transport_sockets/common/passthrough.h new file mode 100644 index 0000000000000..bbf832c73419e --- /dev/null +++ b/source/extensions/transport_sockets/common/passthrough.h @@ -0,0 +1,32 @@ +#pragma once + +#include "envoy/network/connection.h" +#include "envoy/network/transport_socket.h" + +#include "common/buffer/buffer_impl.h" + +namespace Envoy { +namespace Extensions { +namespace TransportSockets { + +class PassthroughSocket : public Network::TransportSocket { +public: + PassthroughSocket(Network::TransportSocketPtr&& transport_socket); + + void setTransportSocketCallbacks(Network::TransportSocketCallbacks& callbacks) override; + std::string protocol() const override; + absl::string_view failureReason() const override; + bool canFlushClose() override; + void closeSocket(Network::ConnectionEvent event) override; + Network::IoResult doRead(Buffer::Instance& buffer) override; + Network::IoResult doWrite(Buffer::Instance& buffer, bool end_stream) override; + void onConnected() override; + Ssl::ConnectionInfoConstSharedPtr ssl() const override; + +protected: + Network::TransportSocketPtr transport_socket_; +}; + +} // namespace TransportSockets +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/transport_sockets/proxy_protocol/BUILD b/source/extensions/transport_sockets/proxy_protocol/BUILD new file mode 100644 index 0000000000000..251721adfbb4a --- /dev/null +++ b/source/extensions/transport_sockets/proxy_protocol/BUILD @@ -0,0 +1,26 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_extension( + name = "upstream_proxy_protocol", + srcs = ["proxy_protocol.cc"], + hdrs = ["proxy_protocol.h"], + security_posture = "robust_to_untrusted_downstream", + undocumented = True, + deps = [ + "//include/envoy/network:connection_interface", + "//include/envoy/network:transport_socket_interface", + "//source/common/buffer:buffer_lib", + "//source/common/network:address_lib", + "//source/extensions/common/proxy_protocol:proxy_protocol_header_lib", + "//source/extensions/transport_sockets/common:passthrough_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.cc b/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.cc new file mode 100644 index 0000000000000..d1427b7aaa9d1 --- /dev/null +++ b/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.cc @@ -0,0 +1,106 @@ +#include "extensions/transport_sockets/proxy_protocol/proxy_protocol.h" + +#include + +#include "envoy/config/core/v3/proxy_protocol.pb.h" +#include "envoy/network/transport_socket.h" + +#include "common/buffer/buffer_impl.h" +#include "common/network/address_impl.h" + +#include "extensions/common/proxy_protocol/proxy_protocol_header.h" + +using envoy::config::core::v3::ProxyProtocolConfig_Version; + +namespace Envoy { +namespace Extensions { +namespace TransportSockets { +namespace ProxyProtocol { + +UpstreamProxyProtocolSocket::UpstreamProxyProtocolSocket( + Network::TransportSocketPtr&& transport_socket, + Network::TransportSocketOptionsSharedPtr options, ProxyProtocolConfig_Version version) + : PassthroughSocket(std::move(transport_socket)), options_(options), version_(version) {} + +void UpstreamProxyProtocolSocket::setTransportSocketCallbacks( + Network::TransportSocketCallbacks& callbacks) { + transport_socket_->setTransportSocketCallbacks(callbacks); + callbacks_ = &callbacks; + generateHeader(); +} + +Network::IoResult UpstreamProxyProtocolSocket::doWrite(Buffer::Instance& buffer, bool end_stream) { + if (header_buffer_.length() > 0) { + auto header_res = writeHeader(); + if (header_buffer_.length() == 0 && header_res.action_ == Network::PostIoAction::KeepOpen) { + auto inner_res = transport_socket_->doWrite(buffer, end_stream); + return {inner_res.action_, header_res.bytes_processed_ + inner_res.bytes_processed_, false}; + } + return header_res; + } else { + return transport_socket_->doWrite(buffer, end_stream); + } +} + +void UpstreamProxyProtocolSocket::generateHeader() { + if (version_ == ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1) { + generateHeaderV1(); + } else { + generateHeaderV2(); + } +} + +void UpstreamProxyProtocolSocket::generateHeaderV1() { + // Default to local addresses + auto src_addr = callbacks_->connection().localAddress(); + auto dst_addr = callbacks_->connection().remoteAddress(); + + if (options_ && options_->proxyProtocolOptions().has_value()) { + const auto options = options_->proxyProtocolOptions().value(); + src_addr = options.src_addr_; + dst_addr = options.dst_addr_; + } + + Common::ProxyProtocol::generateV1Header(*src_addr->ip(), *dst_addr->ip(), header_buffer_); +} + +void UpstreamProxyProtocolSocket::generateHeaderV2() { + if (!options_ || !options_->proxyProtocolOptions().has_value()) { + Common::ProxyProtocol::generateV2LocalHeader(header_buffer_); + } else { + const auto options = options_->proxyProtocolOptions().value(); + Common::ProxyProtocol::generateV2Header(*options.src_addr_->ip(), *options.dst_addr_->ip(), + header_buffer_); + } +} + +Network::IoResult UpstreamProxyProtocolSocket::writeHeader() { + Network::PostIoAction action = Network::PostIoAction::KeepOpen; + uint64_t bytes_written = 0; + do { + if (header_buffer_.length() == 0) { + break; + } + + Api::IoCallUint64Result result = header_buffer_.write(callbacks_->ioHandle()); + + if (result.ok()) { + ENVOY_CONN_LOG(trace, "write returns: {}", callbacks_->connection(), result.rc_); + bytes_written += result.rc_; + } else { + ENVOY_CONN_LOG(trace, "write error: {}", callbacks_->connection(), + result.err_->getErrorDetails()); + if (result.err_->getErrorCode() != Api::IoError::IoErrorCode::Again) { + action = Network::PostIoAction::Close; + } + break; + } + } while (true); + + return {action, bytes_written, false}; +} + +} // namespace ProxyProtocol +} // namespace TransportSockets +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.h b/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.h new file mode 100644 index 0000000000000..3b0996e20882c --- /dev/null +++ b/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.h @@ -0,0 +1,44 @@ +#pragma once + +#include "envoy/config/core/v3/proxy_protocol.pb.h" +#include "envoy/network/connection.h" +#include "envoy/network/transport_socket.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/logger.h" + +#include "extensions/transport_sockets/common/passthrough.h" + +using envoy::config::core::v3::ProxyProtocolConfig_Version; + +namespace Envoy { +namespace Extensions { +namespace TransportSockets { +namespace ProxyProtocol { + +class UpstreamProxyProtocolSocket : public TransportSockets::PassthroughSocket, + public Logger::Loggable { +public: + UpstreamProxyProtocolSocket(Network::TransportSocketPtr&& transport_socket, + Network::TransportSocketOptionsSharedPtr options, + ProxyProtocolConfig_Version version); + + void setTransportSocketCallbacks(Network::TransportSocketCallbacks& callbacks) override; + Network::IoResult doWrite(Buffer::Instance& buffer, bool end_stream) override; + +private: + void generateHeader(); + void generateHeaderV1(); + void generateHeaderV2(); + Network::IoResult writeHeader(); + + Network::TransportSocketOptionsSharedPtr options_; + Network::TransportSocketCallbacks* callbacks_{}; + Buffer::OwnedImpl header_buffer_{}; + ProxyProtocolConfig_Version version_{ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1}; +}; + +} // namespace ProxyProtocol +} // namespace TransportSockets +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/transport_sockets/raw_buffer/BUILD b/source/extensions/transport_sockets/raw_buffer/BUILD index f5b11f64def99..3d4b41c96cdee 100644 --- a/source/extensions/transport_sockets/raw_buffer/BUILD +++ b/source/extensions/transport_sockets/raw_buffer/BUILD @@ -1,20 +1,22 @@ -licenses(["notice"]) # Apache 2 - -# Built-in plaintext connection transport socket. - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# Built-in plaintext connection transport socket. + +envoy_extension_package() envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "requires_trusted_downstream_and_upstream", + # This is core Envoy config. + visibility = ["//visibility:public"], deps = [ "//include/envoy/network:transport_socket_interface", "//include/envoy/registry", diff --git a/source/extensions/transport_sockets/raw_buffer/config.h b/source/extensions/transport_sockets/raw_buffer/config.h index a7c68d6875a77..b17f9836f5552 100644 --- a/source/extensions/transport_sockets/raw_buffer/config.h +++ b/source/extensions/transport_sockets/raw_buffer/config.h @@ -16,7 +16,6 @@ namespace RawBuffer { */ class RawBufferSocketFactory : public virtual Server::Configuration::TransportSocketConfigFactory { public: - ~RawBufferSocketFactory() override = default; std::string name() const override { return TransportSocketNames::get().RawBuffer; } ProtobufTypes::MessagePtr createEmptyConfigProto() override; }; diff --git a/source/extensions/transport_sockets/tap/BUILD b/source/extensions/transport_sockets/tap/BUILD index e9107a046cfcd..4adb0db7cb38a 100644 --- a/source/extensions/transport_sockets/tap/BUILD +++ b/source/extensions/transport_sockets/tap/BUILD @@ -1,15 +1,15 @@ -licenses(["notice"]) # Apache 2 - -# tap wrapper around a transport socket. - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# tap wrapper around a transport socket. + +envoy_extension_package() envoy_cc_library( name = "tap_config_interface", @@ -42,6 +42,7 @@ envoy_cc_library( "//include/envoy/network:transport_socket_interface", "//source/common/buffer:buffer_lib", "//source/extensions/common/tap:extension_config_base", + "//source/extensions/transport_sockets/common:passthrough_lib", "@envoy_api//envoy/extensions/transport_sockets/tap/v3:pkg_cc_proto", ], ) @@ -52,6 +53,12 @@ envoy_cc_extension( hdrs = ["config.h"], security_posture = "requires_trusted_downstream_and_upstream", status = "alpha", + # TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/common/access_log:__subpackages__", + "//test/integration:__subpackages__", + ], deps = [ ":tap_config_impl", ":tap_lib", diff --git a/source/extensions/transport_sockets/tap/config.h b/source/extensions/transport_sockets/tap/config.h index 8068779ada01e..ac41dd19c9fc0 100644 --- a/source/extensions/transport_sockets/tap/config.h +++ b/source/extensions/transport_sockets/tap/config.h @@ -15,7 +15,6 @@ namespace Tap { */ class TapSocketConfigFactory : public virtual Server::Configuration::TransportSocketConfigFactory { public: - ~TapSocketConfigFactory() override = default; std::string name() const override { return TransportSocketNames::get().Tap; } ProtobufTypes::MessagePtr createEmptyConfigProto() override; }; diff --git a/source/extensions/transport_sockets/tap/tap.cc b/source/extensions/transport_sockets/tap/tap.cc index 21109084247b7..7674ba6b584d4 100644 --- a/source/extensions/transport_sockets/tap/tap.cc +++ b/source/extensions/transport_sockets/tap/tap.cc @@ -11,7 +11,7 @@ namespace Tap { TapSocket::TapSocket(SocketTapConfigSharedPtr config, Network::TransportSocketPtr&& transport_socket) - : config_(config), transport_socket_(std::move(transport_socket)) {} + : PassthroughSocket(std::move(transport_socket)), config_(config) {} void TapSocket::setTransportSocketCallbacks(Network::TransportSocketCallbacks& callbacks) { ASSERT(!tapper_); @@ -19,11 +19,6 @@ void TapSocket::setTransportSocketCallbacks(Network::TransportSocketCallbacks& c tapper_ = config_ ? config_->createPerSocketTapper(callbacks.connection()) : nullptr; } -std::string TapSocket::protocol() const { return transport_socket_->protocol(); } -absl::string_view TapSocket::failureReason() const { return transport_socket_->failureReason(); } - -bool TapSocket::canFlushClose() { return transport_socket_->canFlushClose(); } - void TapSocket::closeSocket(Network::ConnectionEvent event) { if (tapper_ != nullptr) { tapper_->closeSocket(event); @@ -51,10 +46,6 @@ Network::IoResult TapSocket::doWrite(Buffer::Instance& buffer, bool end_stream) return result; } -void TapSocket::onConnected() { transport_socket_->onConnected(); } - -Ssl::ConnectionInfoConstSharedPtr TapSocket::ssl() const { return transport_socket_->ssl(); } - TapSocketFactory::TapSocketFactory( const envoy::extensions::transport_sockets::tap::v3::Tap& proto_config, Common::Tap::TapConfigFactoryPtr&& config_factory, Server::Admin& admin, diff --git a/source/extensions/transport_sockets/tap/tap.h b/source/extensions/transport_sockets/tap/tap.h index 72d8967468d7a..33156b705153d 100644 --- a/source/extensions/transport_sockets/tap/tap.h +++ b/source/extensions/transport_sockets/tap/tap.h @@ -5,6 +5,7 @@ #include "envoy/network/transport_socket.h" #include "extensions/common/tap/extension_config_base.h" +#include "extensions/transport_sockets/common/passthrough.h" #include "extensions/transport_sockets/tap/tap_config.h" namespace Envoy { @@ -12,25 +13,19 @@ namespace Extensions { namespace TransportSockets { namespace Tap { -class TapSocket : public Network::TransportSocket { +class TapSocket : public TransportSockets::PassthroughSocket { public: TapSocket(SocketTapConfigSharedPtr config, Network::TransportSocketPtr&& transport_socket); // Network::TransportSocket void setTransportSocketCallbacks(Network::TransportSocketCallbacks& callbacks) override; - std::string protocol() const override; - absl::string_view failureReason() const override; - bool canFlushClose() override; void closeSocket(Network::ConnectionEvent event) override; Network::IoResult doRead(Buffer::Instance& buffer) override; Network::IoResult doWrite(Buffer::Instance& buffer, bool end_stream) override; - void onConnected() override; - Ssl::ConnectionInfoConstSharedPtr ssl() const override; private: SocketTapConfigSharedPtr config_; PerSocketTapperPtr tapper_; - Network::TransportSocketPtr transport_socket_; }; class TapSocketFactory : public Network::TransportSocketFactory, diff --git a/source/extensions/transport_sockets/tls/BUILD b/source/extensions/transport_sockets/tls/BUILD index 748c7b99559fb..1cd091050d15a 100644 --- a/source/extensions/transport_sockets/tls/BUILD +++ b/source/extensions/transport_sockets/tls/BUILD @@ -1,21 +1,23 @@ -licenses(["notice"]) # Apache 2 - -# Built-in TLS connection transport socket. - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +# Built-in TLS connection transport socket. + +envoy_extension_package() envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream_and_upstream", + # TLS is core functionality. + visibility = ["//visibility:public"], deps = [ ":ssl_socket_lib", "//include/envoy/network:transport_socket_interface", @@ -37,6 +39,8 @@ envoy_cc_library( "abseil_synchronization", "ssl", ], + # TLS is core functionality. + visibility = ["//visibility:public"], deps = [ ":context_config_lib", ":context_lib", @@ -62,6 +66,8 @@ envoy_cc_library( external_deps = [ "ssl", ], + # TLS is core functionality. + visibility = ["//visibility:public"], deps = [ "//include/envoy/secret:secret_callbacks_interface", "//include/envoy/secret:secret_provider_interface", @@ -91,9 +97,12 @@ envoy_cc_library( "context_manager_impl.h", ], external_deps = [ + "abseil_node_hash_set", "abseil_synchronization", "ssl", ], + # TLS is core functionality. + visibility = ["//visibility:public"], deps = [ ":utility_lib", "//include/envoy/ssl:context_config_interface", @@ -109,7 +118,9 @@ envoy_cc_library( "//source/common/common:utility_lib", "//source/common/network:address_lib", "//source/common/protobuf:utility_lib", + "//source/common/runtime:runtime_features_lib", "//source/common/stats:symbol_table_lib", + "//source/common/stats:utility_lib", "//source/extensions/transport_sockets/tls/private_key:private_key_manager_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", "@envoy_api//envoy/type/matcher/v3:pkg_cc_proto", diff --git a/source/extensions/transport_sockets/tls/config.cc b/source/extensions/transport_sockets/tls/config.cc index 655ac5724dc83..c743f5f6def86 100644 --- a/source/extensions/transport_sockets/tls/config.cc +++ b/source/extensions/transport_sockets/tls/config.cc @@ -1,7 +1,7 @@ #include "extensions/transport_sockets/tls/config.h" #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" -#include "envoy/extensions/transport_sockets/tls/v3/cert.pb.validate.h" +#include "envoy/extensions/transport_sockets/tls/v3/tls.pb.validate.h" #include "common/protobuf/utility.h" diff --git a/source/extensions/transport_sockets/tls/context_config_impl.cc b/source/extensions/transport_sockets/tls/context_config_impl.cc index 793adc7597deb..6f20081eed80a 100644 --- a/source/extensions/transport_sockets/tls/context_config_impl.cc +++ b/source/extensions/transport_sockets/tls/context_config_impl.cc @@ -353,12 +353,7 @@ ClientContextConfigImpl::ClientContextConfigImpl( } const unsigned ServerContextConfigImpl::DEFAULT_MIN_VERSION = TLS1_VERSION; -const unsigned ServerContextConfigImpl::DEFAULT_MAX_VERSION = -#ifndef BORINGSSL_FIPS - TLS1_3_VERSION; -#else // BoringSSL FIPS - TLS1_2_VERSION; -#endif +const unsigned ServerContextConfigImpl::DEFAULT_MAX_VERSION = TLS1_3_VERSION; const std::string ServerContextConfigImpl::DEFAULT_CIPHER_SUITES = #ifndef BORINGSSL_FIPS diff --git a/source/extensions/transport_sockets/tls/context_config_impl.h b/source/extensions/transport_sockets/tls/context_config_impl.h index 9cfaff0482fbf..ad2d927d82313 100644 --- a/source/extensions/transport_sockets/tls/context_config_impl.h +++ b/source/extensions/transport_sockets/tls/context_config_impl.h @@ -98,6 +98,9 @@ class ContextConfigImpl : public virtual Ssl::ContextConfig { class ClientContextConfigImpl : public ContextConfigImpl, public Envoy::Ssl::ClientContextConfig { public: + static const std::string DEFAULT_CIPHER_SUITES; + static const std::string DEFAULT_CURVES; + ClientContextConfigImpl( const envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext& config, absl::string_view sigalgs, @@ -116,8 +119,6 @@ class ClientContextConfigImpl : public ContextConfigImpl, public Envoy::Ssl::Cli private: static const unsigned DEFAULT_MIN_VERSION; static const unsigned DEFAULT_MAX_VERSION; - static const std::string DEFAULT_CIPHER_SUITES; - static const std::string DEFAULT_CURVES; const std::string server_name_indication_; const bool allow_renegotiation_; diff --git a/source/extensions/transport_sockets/tls/context_impl.cc b/source/extensions/transport_sockets/tls/context_impl.cc index 7292bba9b005f..502739958e50e 100644 --- a/source/extensions/transport_sockets/tls/context_impl.cc +++ b/source/extensions/transport_sockets/tls/context_impl.cc @@ -19,9 +19,13 @@ #include "common/common/utility.h" #include "common/network/address_impl.h" #include "common/protobuf/utility.h" +#include "common/runtime/runtime_features.h" +#include "common/stats/utility.h" #include "extensions/transport_sockets/tls/utility.h" +#include "absl/container/node_hash_set.h" +#include "absl/strings/match.h" #include "absl/strings/str_join.h" #include "openssl/evp.h" #include "openssl/hmac.h" @@ -87,11 +91,24 @@ ContextImpl::ContextImpl(Stats::Scope& scope, const Envoy::Ssl::ContextConfig& c RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); if (!SSL_CTX_set_strict_cipher_list(ctx.ssl_ctx_.get(), config.cipherSuites().c_str())) { + // Break up a set of ciphers into each individual cipher and try them each individually in + // order to attempt to log which specific one failed. Example of config.cipherSuites(): + // "-ALL:[ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305]:ECDHE-ECDSA-AES128-SHA". + // + // "-" is both an operator when in the leading position of a token (-ALL: don't allow this + // cipher), and the common separator in names (ECDHE-ECDSA-AES128-GCM-SHA256). Don't split on + // it because it will separate pieces of the same cipher. When it is a leading character, it + // is removed below. std::vector ciphers = - StringUtil::splitToken(config.cipherSuites(), ":+-![|]", false); + StringUtil::splitToken(config.cipherSuites(), ":+![|]", false); std::vector bad_ciphers; for (const auto& cipher : ciphers) { std::string cipher_str(cipher); + + if (absl::StartsWith(cipher_str, "-")) { + cipher_str.erase(cipher_str.begin()); + } + if (!SSL_CTX_set_strict_cipher_list(ctx.ssl_ctx_.get(), cipher_str.c_str())) { bad_ciphers.push_back(cipher_str); } @@ -252,7 +269,7 @@ ContextImpl::ContextImpl(Stats::Scope& scope, const Envoy::Ssl::ContextConfig& c } } - std::unordered_set cert_pkey_ids; + absl::node_hash_set cert_pkey_ids; for (uint32_t i = 0; i < tls_certificates.size(); ++i) { auto& ctx = tls_contexts_[i]; // Load certificate chain. @@ -450,7 +467,7 @@ ContextImpl::ContextImpl(Stats::Scope& scope, const Envoy::Ssl::ContextConfig& c int ServerContextImpl::alpnSelectCallback(const unsigned char** out, unsigned char* outlen, const unsigned char* in, unsigned int inlen) { // Currently this uses the standard selection algorithm in priority order. - const uint8_t* alpn_data = &parsed_alpn_protocols_[0]; + const uint8_t* alpn_data = parsed_alpn_protocols_.data(); size_t alpn_data_size = parsed_alpn_protocols_.size(); if (SSL_select_next_proto(const_cast(out), outlen, alpn_data, alpn_data_size, in, @@ -510,49 +527,50 @@ int ContextImpl::verifyCallback(X509_STORE_CTX* store_ctx, void* arg) { ContextImpl* impl = reinterpret_cast(arg); SSL* ssl = reinterpret_cast( X509_STORE_CTX_get_ex_data(store_ctx, SSL_get_ex_data_X509_STORE_CTX_idx())); - Envoy::Ssl::SslExtendedSocketInfo* sslExtendedInfo = + auto cert = bssl::UniquePtr(SSL_get_peer_certificate(ssl)); + return impl->doVerifyCertChain( + store_ctx, reinterpret_cast( - SSL_get_ex_data(ssl, ContextImpl::sslExtendedSocketInfoIndex())); + SSL_get_ex_data(ssl, ContextImpl::sslExtendedSocketInfoIndex())), + *cert, static_cast(SSL_get_app_data(ssl))); +} - if (impl->verify_trusted_ca_) { +int ContextImpl::doVerifyCertChain( + X509_STORE_CTX* store_ctx, Ssl::SslExtendedSocketInfo* ssl_extended_info, X509& leaf_cert, + const Network::TransportSocketOptions* transport_socket_options) { + if (verify_trusted_ca_) { int ret = X509_verify_cert(store_ctx); - if (sslExtendedInfo) { - sslExtendedInfo->setCertificateValidationStatus( + if (ssl_extended_info) { + ssl_extended_info->setCertificateValidationStatus( ret == 1 ? Envoy::Ssl::ClientValidationStatus::Validated : Envoy::Ssl::ClientValidationStatus::Failed); } if (ret <= 0) { - impl->stats_.fail_verify_error_.inc(); - return impl->allow_untrusted_certificate_ ? 1 : ret; + stats_.fail_verify_error_.inc(); + return allow_untrusted_certificate_ ? 1 : ret; } } - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl)); - - const Network::TransportSocketOptions* transport_socket_options = - static_cast(SSL_get_app_data(ssl)); - - Envoy::Ssl::ClientValidationStatus validated = impl->verifyCertificate( - cert.get(), + Envoy::Ssl::ClientValidationStatus validated = verifyCertificate( + &leaf_cert, transport_socket_options && !transport_socket_options->verifySubjectAltNameListOverride().empty() ? transport_socket_options->verifySubjectAltNameListOverride() - : impl->verify_subject_alt_name_list_, - impl->subject_alt_name_matchers_); + : verify_subject_alt_name_list_, + subject_alt_name_matchers_); - if (sslExtendedInfo) { - if (sslExtendedInfo->certificateValidationStatus() == + if (ssl_extended_info) { + if (ssl_extended_info->certificateValidationStatus() == Envoy::Ssl::ClientValidationStatus::NotValidated) { - sslExtendedInfo->setCertificateValidationStatus(validated); + ssl_extended_info->setCertificateValidationStatus(validated); } else if (validated != Envoy::Ssl::ClientValidationStatus::NotValidated) { - sslExtendedInfo->setCertificateValidationStatus(validated); + ssl_extended_info->setCertificateValidationStatus(validated); } } - return impl->allow_untrusted_certificate_ - ? 1 - : (validated != Envoy::Ssl::ClientValidationStatus::Failed); + return allow_untrusted_certificate_ ? 1 + : (validated != Envoy::Ssl::ClientValidationStatus::Failed); } Envoy::Ssl::ClientValidationStatus ContextImpl::verifyCertificate( @@ -594,10 +612,9 @@ Envoy::Ssl::ClientValidationStatus ContextImpl::verifyCertificate( void ContextImpl::incCounter(const Stats::StatName name, absl::string_view value, const Stats::StatName fallback) const { - Stats::SymbolTable& symbol_table = scope_.symbolTable(); - Stats::SymbolTable::StoragePtr storage = - symbol_table.join({name, stat_name_set_->getBuiltin(value, fallback)}); - scope_.counterFromStatName(Stats::StatName(storage.get())).inc(); + Stats::Counter& counter = Stats::Utility::counterFromElements( + scope_, {name, stat_name_set_->getBuiltin(value, fallback)}); + counter.inc(); #ifdef LOG_BUILTIN_STAT_NAMES std::cerr << absl::StrCat("Builtin ", symbol_table.toString(name), ": ", value, "\n") @@ -659,7 +676,7 @@ bool ContextImpl::matchSubjectAltName( if (general_name->type == GEN_DNS && config_san_matcher.matcher().match_pattern_case() == envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kExact - ? dnsNameMatch(config_san_matcher.matcher().exact(), san.c_str()) + ? dnsNameMatch(config_san_matcher.matcher().exact(), absl::string_view(san)) : config_san_matcher.match(san)) { return true; } @@ -687,16 +704,21 @@ bool ContextImpl::verifySubjectAltName(X509* cert, return false; } -bool ContextImpl::dnsNameMatch(const std::string& dns_name, const char* pattern) { +bool ContextImpl::dnsNameMatch(const absl::string_view dns_name, const absl::string_view pattern) { if (dns_name == pattern) { return true; } - size_t pattern_len = strlen(pattern); + size_t pattern_len = pattern.length(); if (pattern_len > 1 && pattern[0] == '*' && pattern[1] == '.') { if (dns_name.length() > pattern_len - 1) { const size_t off = dns_name.length() - pattern_len + 1; - return dns_name.compare(off, pattern_len - 1, pattern + 1) == 0; + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.fix_wildcard_matching")) { + return dns_name.substr(0, off).find('.') == std::string::npos && + dns_name.substr(off, pattern_len - 1) == pattern.substr(1, pattern_len - 1); + } else { + return dns_name.substr(off, pattern_len - 1) == pattern.substr(1, pattern_len - 1); + } } } @@ -821,7 +843,7 @@ ClientContextImpl::ClientContextImpl(Stats::Scope& scope, ASSERT(tls_contexts_.size() == 1); if (!parsed_alpn_protocols_.empty()) { for (auto& ctx : tls_contexts_) { - const int rc = SSL_CTX_set_alpn_protos(ctx.ssl_ctx_.get(), &parsed_alpn_protocols_[0], + const int rc = SSL_CTX_set_alpn_protos(ctx.ssl_ctx_.get(), parsed_alpn_protocols_.data(), parsed_alpn_protocols_.size()); RELEASE_ASSERT(rc == 0, Utility::getLastCryptoError().value_or("")); } @@ -851,6 +873,18 @@ ClientContextImpl::ClientContextImpl(Stats::Scope& scope, } } +bool ContextImpl::parseAndSetAlpn(const std::vector& alpn, SSL& ssl) { + std::vector parsed_alpn = parseAlpnProtocols(absl::StrJoin(alpn, ",")); + if (!parsed_alpn.empty()) { + const int rc = SSL_set_alpn_protos(&ssl, parsed_alpn.data(), parsed_alpn.size()); + // This should only if memory allocation fails, e.g. OOM. + RELEASE_ASSERT(rc == 0, Utility::getLastCryptoError().value_or("")); + return true; + } + + return false; +} + bssl::UniquePtr ClientContextImpl::newSsl(const Network::TransportSocketOptions* options) { bssl::UniquePtr ssl_con(ContextImpl::newSsl(options)); @@ -868,14 +902,23 @@ bssl::UniquePtr ClientContextImpl::newSsl(const Network::TransportSocketOpt SSL_set_verify(ssl_con.get(), SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT, nullptr); } - if (options && !options->applicationProtocolListOverride().empty()) { - std::vector parsed_override_alpn = - parseAlpnProtocols(absl::StrJoin(options->applicationProtocolListOverride(), ",")); - if (!parsed_override_alpn.empty()) { - const int rc = SSL_set_alpn_protos(ssl_con.get(), parsed_override_alpn.data(), - parsed_override_alpn.size()); - RELEASE_ASSERT(rc == 0, Utility::getLastCryptoError().value_or("")); - } + // We determine what ALPN using the following precedence: + // 1. Option-provided ALPN override. + // 2. ALPN statically configured in the upstream TLS context. + // 3. Option-provided ALPN fallback. + + // At this point in the code the ALPN has already been set (if present) to the value specified in + // the TLS context. We've stored this value in parsed_alpn_protocols_ so we can check that to see + // if it's already been set. + bool has_alpn_defined = !parsed_alpn_protocols_.empty(); + if (options) { + // ALPN override takes precedence over TLS context specified, so blindly overwrite it. + has_alpn_defined |= parseAndSetAlpn(options->applicationProtocolListOverride(), *ssl_con); + } + + if (options && !has_alpn_defined && options->applicationProtocolFallback().has_value()) { + // If ALPN hasn't already been set (either through TLS context or override), use the fallback. + parseAndSetAlpn({*options->applicationProtocolFallback()}, *ssl_con); } if (allow_renegotiation_) { @@ -1352,6 +1395,28 @@ bool ServerContextImpl::TlsContext::isCipherEnabled(uint16_t cipher_id, uint16_t return false; } +bool ContextImpl::verifyCertChain(X509& leaf_cert, STACK_OF(X509) & intermediates, + std::string& error_details) { + bssl::UniquePtr ctx(X509_STORE_CTX_new()); + // It doesn't matter which SSL context is used, because they share the same + // cert validation config. + X509_STORE* store = SSL_CTX_get_cert_store(tls_contexts_[0].ssl_ctx_.get()); + if (!X509_STORE_CTX_init(ctx.get(), store, &leaf_cert, &intermediates)) { + error_details = "Failed to verify certificate chain: X509_STORE_CTX_init"; + return false; + } + + int res = doVerifyCertChain(ctx.get(), nullptr, leaf_cert, nullptr); + if (res <= 0) { + const int n = X509_STORE_CTX_get_error(ctx.get()); + const int depth = X509_STORE_CTX_get_error_depth(ctx.get()); + error_details = absl::StrCat("X509_verify_cert: certificate verification error at depth ", + depth, ": ", X509_verify_cert_error_string(n)); + return false; + } + return true; +} + } // namespace Tls } // namespace TransportSockets } // namespace Extensions diff --git a/source/extensions/transport_sockets/tls/context_impl.h b/source/extensions/transport_sockets/tls/context_impl.h index b72168337d72c..5ea35a48228e4 100644 --- a/source/extensions/transport_sockets/tls/context_impl.h +++ b/source/extensions/transport_sockets/tls/context_impl.h @@ -84,7 +84,7 @@ class ContextImpl : public virtual Envoy::Ssl::Context { * @param pattern the pattern to match against (*.example.com) * @return true if the san matches pattern */ - static bool dnsNameMatch(const std::string& dns_name, const char* pattern); + static bool dnsNameMatch(const absl::string_view dns_name, const absl::string_view pattern); SslStats& stats() { return stats_; } @@ -101,6 +101,8 @@ class ContextImpl : public virtual Envoy::Ssl::Context { std::vector getPrivateKeyMethodProviders(); + bool verifyCertChain(X509& leaf_cert, STACK_OF(X509) & intermediates, std::string& error_details); + protected: ContextImpl(Stats::Scope& scope, const Envoy::Ssl::ContextConfig& config, TimeSource& time_source); @@ -117,6 +119,11 @@ class ContextImpl : public virtual Envoy::Ssl::Context { // A SSL_CTX_set_cert_verify_callback for custom cert validation. static int verifyCallback(X509_STORE_CTX* store_ctx, void* arg); + // Called by verifyCallback to do the actual cert chain verification. + int doVerifyCertChain(X509_STORE_CTX* store_ctx, Ssl::SslExtendedSocketInfo* ssl_extended_info, + X509& leaf_cert, + const Network::TransportSocketOptions* transport_socket_options); + Envoy::Ssl::ClientValidationStatus verifyCertificate(X509* cert, const std::vector& verify_san_list, const std::vector& subject_alt_name_matchers); @@ -143,6 +150,7 @@ class ContextImpl : public virtual Envoy::Ssl::Context { static bool verifyCertificateSpkiList(X509* cert, const std::vector>& expected_hashes); + bool parseAndSetAlpn(const std::vector& alpn, SSL& ssl); std::vector parseAlpnProtocols(const std::string& alpn_protocols); static SslStats generateStats(Stats::Scope& scope); diff --git a/source/extensions/transport_sockets/tls/private_key/BUILD b/source/extensions/transport_sockets/tls/private_key/BUILD index 8f8a96663c146..f6163ca640126 100644 --- a/source/extensions/transport_sockets/tls/private_key/BUILD +++ b/source/extensions/transport_sockets/tls/private_key/BUILD @@ -1,12 +1,12 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) -envoy_package() +licenses(["notice"]) # Apache 2 + +envoy_extension_package() envoy_cc_library( name = "private_key_manager_lib", diff --git a/source/extensions/transport_sockets/tls/ssl_socket.cc b/source/extensions/transport_sockets/tls/ssl_socket.cc index 1e3082f806536..ab2644ccc8084 100644 --- a/source/extensions/transport_sockets/tls/ssl_socket.cc +++ b/source/extensions/transport_sockets/tls/ssl_socket.cc @@ -47,14 +47,13 @@ SslSocket::SslSocket(Envoy::Ssl::ContextSharedPtr ctx, InitialState state, : transport_socket_options_(transport_socket_options), ctx_(std::dynamic_pointer_cast(ctx)), state_(SocketState::PreHandshake) { bssl::UniquePtr ssl = ctx_->newSsl(transport_socket_options_.get()); - ssl_ = ssl.get(); info_ = std::make_shared(std::move(ssl), ctx_); if (state == InitialState::Client) { - SSL_set_connect_state(ssl_); + SSL_set_connect_state(rawSsl()); } else { ASSERT(state == InitialState::Server); - SSL_set_accept_state(ssl_); + SSL_set_accept_state(rawSsl()); } } @@ -65,11 +64,11 @@ void SslSocket::setTransportSocketCallbacks(Network::TransportSocketCallbacks& c // Associate this SSL connection with all the certificates (with their potentially different // private key methods). for (auto const& provider : ctx_->getPrivateKeyMethodProviders()) { - provider->registerPrivateKeyMethod(ssl_, *this, callbacks_->connection().dispatcher()); + provider->registerPrivateKeyMethod(rawSsl(), *this, callbacks_->connection().dispatcher()); } BIO* bio = BIO_new_socket(callbacks_->ioHandle().fd(), 0); - SSL_set_bio(ssl_, bio, bio); + SSL_set_bio(rawSsl(), bio, bio); } SslSocket::ReadResult SslSocket::sslReadIntoSlice(Buffer::RawSlice& slice) { @@ -77,7 +76,7 @@ SslSocket::ReadResult SslSocket::sslReadIntoSlice(Buffer::RawSlice& slice) { uint8_t* mem = static_cast(slice.mem_); size_t remaining = slice.len_; while (remaining > 0) { - int rc = SSL_read(ssl_, mem, remaining); + int rc = SSL_read(rawSsl(), mem, remaining); ENVOY_CONN_LOG(trace, "ssl read returns: {}", callbacks_->connection(), rc); if (rc > 0) { ASSERT(static_cast(rc) <= remaining); @@ -124,7 +123,7 @@ Network::IoResult SslSocket::doRead(Buffer::Instance& read_buffer) { } if (result.error_.has_value()) { keep_reading = false; - int err = SSL_get_error(ssl_, result.error_.value()); + int err = SSL_get_error(rawSsl(), result.error_.value()); switch (err) { case SSL_ERROR_WANT_READ: break; @@ -171,11 +170,11 @@ void SslSocket::onPrivateKeyMethodComplete() { PostIoAction SslSocket::doHandshake() { ASSERT(state_ != SocketState::HandshakeComplete && state_ != SocketState::ShutdownSent); - int rc = SSL_do_handshake(ssl_); + int rc = SSL_do_handshake(rawSsl()); if (rc == 1) { ENVOY_CONN_LOG(debug, "handshake complete", callbacks_->connection()); state_ = SocketState::HandshakeComplete; - ctx_->logHandshake(ssl_); + ctx_->logHandshake(rawSsl()); callbacks_->raiseEvent(Network::ConnectionEvent::Connected); // It's possible that we closed during the handshake callback. @@ -183,7 +182,7 @@ PostIoAction SslSocket::doHandshake() { ? PostIoAction::KeepOpen : PostIoAction::Close; } else { - int err = SSL_get_error(ssl_, rc); + int err = SSL_get_error(rawSsl(), rc); switch (err) { case SSL_ERROR_WANT_READ: case SSL_ERROR_WANT_WRITE: @@ -255,7 +254,7 @@ Network::IoResult SslSocket::doWrite(Buffer::Instance& write_buffer, bool end_st // it again with the same parameters. This is done by tracking last write size, but not write // data, since linearize() will return the same undrained data anyway. ASSERT(bytes_to_write <= write_buffer.length()); - int rc = SSL_write(ssl_, write_buffer.linearize(bytes_to_write), bytes_to_write); + int rc = SSL_write(rawSsl(), write_buffer.linearize(bytes_to_write), bytes_to_write); ENVOY_CONN_LOG(trace, "ssl write returns: {}", callbacks_->connection(), rc); if (rc > 0) { ASSERT(rc == static_cast(bytes_to_write)); @@ -263,7 +262,7 @@ Network::IoResult SslSocket::doWrite(Buffer::Instance& write_buffer, bool end_st write_buffer.drain(rc); bytes_to_write = std::min(write_buffer.length(), static_cast(16384)); } else { - int err = SSL_get_error(ssl_, rc); + int err = SSL_get_error(rawSsl(), rc); switch (err) { case SSL_ERROR_WANT_WRITE: bytes_to_retry_ = bytes_to_write; @@ -294,7 +293,7 @@ void SslSocket::shutdownSsl() { ASSERT(state_ != SocketState::PreHandshake); if (state_ != SocketState::ShutdownSent && callbacks_->connection().state() != Network::Connection::State::Closed) { - int rc = SSL_shutdown(ssl_); + int rc = SSL_shutdown(rawSsl()); ENVOY_CONN_LOG(debug, "SSL shutdown: rc={}", callbacks_->connection(), rc); drainErrorQueue(); state_ = SocketState::ShutdownSent; @@ -316,7 +315,7 @@ SslSocketInfo::SslSocketInfo(bssl::UniquePtr ssl, ContextImplSharedPtr ctx) } bool SslSocketInfo::peerCertificatePresented() const { - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl_.get())); + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); return cert != nullptr; } @@ -331,7 +330,7 @@ absl::Span SslSocketInfo::uriSanLocalCertificate() const { } // The cert object is not owned. - X509* cert = SSL_get_certificate(ssl_.get()); + X509* cert = SSL_get_certificate(ssl()); if (!cert) { ASSERT(cached_uri_san_local_certificate_.empty()); return cached_uri_san_local_certificate_; @@ -345,7 +344,7 @@ absl::Span SslSocketInfo::dnsSansLocalCertificate() const { return cached_dns_san_local_certificate_; } - X509* cert = SSL_get_certificate(ssl_.get()); + X509* cert = SSL_get_certificate(ssl()); if (!cert) { ASSERT(cached_dns_san_local_certificate_.empty()); return cached_dns_san_local_certificate_; @@ -358,7 +357,7 @@ const std::string& SslSocketInfo::sha256PeerCertificateDigest() const { if (!cached_sha_256_peer_certificate_digest_.empty()) { return cached_sha_256_peer_certificate_digest_; } - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl_.get())); + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); if (!cert) { ASSERT(cached_sha_256_peer_certificate_digest_.empty()); return cached_sha_256_peer_certificate_digest_; @@ -372,11 +371,29 @@ const std::string& SslSocketInfo::sha256PeerCertificateDigest() const { return cached_sha_256_peer_certificate_digest_; } +const std::string& SslSocketInfo::sha1PeerCertificateDigest() const { + if (!cached_sha_1_peer_certificate_digest_.empty()) { + return cached_sha_1_peer_certificate_digest_; + } + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); + if (!cert) { + ASSERT(cached_sha_1_peer_certificate_digest_.empty()); + return cached_sha_1_peer_certificate_digest_; + } + + std::vector computed_hash(SHA_DIGEST_LENGTH); + unsigned int n; + X509_digest(cert.get(), EVP_sha1(), computed_hash.data(), &n); + RELEASE_ASSERT(n == computed_hash.size(), ""); + cached_sha_1_peer_certificate_digest_ = Hex::encode(computed_hash); + return cached_sha_1_peer_certificate_digest_; +} + const std::string& SslSocketInfo::urlEncodedPemEncodedPeerCertificate() const { if (!cached_url_encoded_pem_encoded_peer_certificate_.empty()) { return cached_url_encoded_pem_encoded_peer_certificate_; } - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl_.get())); + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); if (!cert) { ASSERT(cached_url_encoded_pem_encoded_peer_certificate_.empty()); return cached_url_encoded_pem_encoded_peer_certificate_; @@ -399,7 +416,7 @@ const std::string& SslSocketInfo::urlEncodedPemEncodedPeerCertificateChain() con return cached_url_encoded_pem_encoded_peer_cert_chain_; } - STACK_OF(X509)* cert_chain = SSL_get_peer_full_cert_chain(ssl_.get()); + STACK_OF(X509)* cert_chain = SSL_get_peer_full_cert_chain(ssl()); if (cert_chain == nullptr) { ASSERT(cached_url_encoded_pem_encoded_peer_cert_chain_.empty()); return cached_url_encoded_pem_encoded_peer_cert_chain_; @@ -429,7 +446,7 @@ absl::Span SslSocketInfo::uriSanPeerCertificate() const { return cached_uri_san_peer_certificate_; } - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl_.get())); + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); if (!cert) { ASSERT(cached_uri_san_peer_certificate_.empty()); return cached_uri_san_peer_certificate_; @@ -443,7 +460,7 @@ absl::Span SslSocketInfo::dnsSansPeerCertificate() const { return cached_dns_san_peer_certificate_; } - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl_.get())); + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); if (!cert) { ASSERT(cached_dns_san_peer_certificate_.empty()); return cached_dns_san_peer_certificate_; @@ -455,7 +472,7 @@ absl::Span SslSocketInfo::dnsSansPeerCertificate() const { void SslSocket::closeSocket(Network::ConnectionEvent) { // Unregister the SSL connection object from private key method providers. for (auto const& provider : ctx_->getPrivateKeyMethodProviders()) { - provider->unregisterPrivateKeyMethod(ssl_); + provider->unregisterPrivateKeyMethod(rawSsl()); } // Attempt to send a shutdown before closing the socket. It's possible this won't go out if @@ -469,12 +486,12 @@ void SslSocket::closeSocket(Network::ConnectionEvent) { std::string SslSocket::protocol() const { const unsigned char* proto; unsigned int proto_len; - SSL_get0_alpn_selected(ssl_, &proto, &proto_len); + SSL_get0_alpn_selected(rawSsl(), &proto, &proto_len); return std::string(reinterpret_cast(proto), proto_len); } uint16_t SslSocketInfo::ciphersuiteId() const { - const SSL_CIPHER* cipher = SSL_get_current_cipher(ssl_.get()); + const SSL_CIPHER* cipher = SSL_get_current_cipher(ssl()); if (cipher == nullptr) { return 0xffff; } @@ -486,7 +503,7 @@ uint16_t SslSocketInfo::ciphersuiteId() const { } std::string SslSocketInfo::ciphersuiteString() const { - const SSL_CIPHER* cipher = SSL_get_current_cipher(ssl_.get()); + const SSL_CIPHER* cipher = SSL_get_current_cipher(ssl()); if (cipher == nullptr) { return {}; } @@ -498,12 +515,12 @@ const std::string& SslSocketInfo::tlsVersion() const { if (!cached_tls_version_.empty()) { return cached_tls_version_; } - cached_tls_version_ = SSL_get_version(ssl_.get()); + cached_tls_version_ = SSL_get_version(ssl()); return cached_tls_version_; } absl::optional SslSocketInfo::x509Extension(absl::string_view extension_name) const { - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl_.get())); + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); if (!cert) { return absl::nullopt; } @@ -516,7 +533,7 @@ const std::string& SslSocketInfo::serialNumberPeerCertificate() const { if (!cached_serial_number_peer_certificate_.empty()) { return cached_serial_number_peer_certificate_; } - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl_.get())); + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); if (!cert) { ASSERT(cached_serial_number_peer_certificate_.empty()); return cached_serial_number_peer_certificate_; @@ -529,7 +546,7 @@ const std::string& SslSocketInfo::issuerPeerCertificate() const { if (!cached_issuer_peer_certificate_.empty()) { return cached_issuer_peer_certificate_; } - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl_.get())); + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); if (!cert) { ASSERT(cached_issuer_peer_certificate_.empty()); return cached_issuer_peer_certificate_; @@ -542,7 +559,7 @@ const std::string& SslSocketInfo::subjectPeerCertificate() const { if (!cached_subject_peer_certificate_.empty()) { return cached_subject_peer_certificate_; } - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl_.get())); + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); if (!cert) { ASSERT(cached_subject_peer_certificate_.empty()); return cached_subject_peer_certificate_; @@ -555,7 +572,7 @@ const std::string& SslSocketInfo::subjectLocalCertificate() const { if (!cached_subject_local_certificate_.empty()) { return cached_subject_local_certificate_; } - X509* cert = SSL_get_certificate(ssl_.get()); + X509* cert = SSL_get_certificate(ssl()); if (!cert) { ASSERT(cached_subject_local_certificate_.empty()); return cached_subject_local_certificate_; @@ -565,7 +582,7 @@ const std::string& SslSocketInfo::subjectLocalCertificate() const { } absl::optional SslSocketInfo::validFromPeerCertificate() const { - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl_.get())); + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); if (!cert) { return absl::nullopt; } @@ -573,7 +590,7 @@ absl::optional SslSocketInfo::validFromPeerCertificate() const { } absl::optional SslSocketInfo::expirationPeerCertificate() const { - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl_.get())); + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); if (!cert) { return absl::nullopt; } @@ -584,7 +601,7 @@ const std::string& SslSocketInfo::sessionId() const { if (!cached_session_id_.empty()) { return cached_session_id_; } - SSL_SESSION* session = SSL_get_session(ssl_.get()); + SSL_SESSION* session = SSL_get_session(ssl()); if (session == nullptr) { ASSERT(cached_session_id_.empty()); return cached_session_id_; diff --git a/source/extensions/transport_sockets/tls/ssl_socket.h b/source/extensions/transport_sockets/tls/ssl_socket.h index 43ee5efdfceb7..27416ce7f635a 100644 --- a/source/extensions/transport_sockets/tls/ssl_socket.h +++ b/source/extensions/transport_sockets/tls/ssl_socket.h @@ -60,6 +60,7 @@ class SslSocketInfo : public Envoy::Ssl::ConnectionInfo { bool peerCertificateValidated() const override; absl::Span uriSanLocalCertificate() const override; const std::string& sha256PeerCertificateDigest() const override; + const std::string& sha1PeerCertificateDigest() const override; const std::string& serialNumberPeerCertificate() const override; const std::string& issuerPeerCertificate() const override; const std::string& subjectPeerCertificate() const override; @@ -76,14 +77,14 @@ class SslSocketInfo : public Envoy::Ssl::ConnectionInfo { std::string ciphersuiteString() const override; const std::string& tlsVersion() const override; absl::optional x509Extension(absl::string_view extension_name) const override; - - SSL* rawSslForTest() const { return ssl_.get(); } + SSL* ssl() const { return ssl_.get(); } bssl::UniquePtr ssl_; private: mutable std::vector cached_uri_san_local_certificate_; mutable std::string cached_sha_256_peer_certificate_digest_; + mutable std::string cached_sha_1_peer_certificate_digest_; mutable std::string cached_serial_number_peer_certificate_; mutable std::string cached_issuer_peer_certificate_; mutable std::string cached_subject_peer_certificate_; @@ -98,6 +99,8 @@ class SslSocketInfo : public Envoy::Ssl::ConnectionInfo { mutable SslExtendedSocketInfoImpl extended_socket_info_; }; +using SslSocketInfoConstSharedPtr = std::shared_ptr; + class SslSocket : public Network::TransportSocket, public Envoy::Ssl::PrivateKeyConnectionCallbacks, protected Logger::Loggable { @@ -118,7 +121,10 @@ class SslSocket : public Network::TransportSocket, // Ssl::PrivateKeyConnectionCallbacks void onPrivateKeyMethodComplete() override; - SSL* rawSslForTest() const { return ssl_; } + SSL* rawSslForTest() const { return rawSsl(); } + +protected: + SSL* rawSsl() const { return info_->ssl_.get(); } private: struct ReadResult { @@ -141,8 +147,7 @@ class SslSocket : public Network::TransportSocket, std::string failure_reason_; SocketState state_; - SSL* ssl_; - Ssl::ConnectionInfoConstSharedPtr info_; + SslSocketInfoConstSharedPtr info_; }; class ClientSslSocketFactory : public Network::TransportSocketFactory, diff --git a/source/extensions/transport_sockets/well_known_names.h b/source/extensions/transport_sockets/well_known_names.h index 404357f454771..471e1e8b60cfc 100644 --- a/source/extensions/transport_sockets/well_known_names.h +++ b/source/extensions/transport_sockets/well_known_names.h @@ -15,10 +15,11 @@ namespace TransportSockets { class TransportSocketNameValues { public: const std::string Alts = "envoy.transport_sockets.alts"; - const std::string Tap = "envoy.transport_sockets.tap"; + const std::string Quic = "envoy.transport_sockets.quic"; const std::string RawBuffer = "envoy.transport_sockets.raw_buffer"; + const std::string Tap = "envoy.transport_sockets.tap"; const std::string Tls = "envoy.transport_sockets.tls"; - const std::string Quic = "envoy.transport_sockets.quic"; + const std::string UpstreamProxyProtocol = "envoy.transport_sockets.upstream_proxy_protocol"; }; using TransportSocketNames = ConstSingleton; diff --git a/source/extensions/upstreams/http/generic/BUILD b/source/extensions/upstreams/http/generic/BUILD new file mode 100644 index 0000000000000..563b4bf5a9e2b --- /dev/null +++ b/source/extensions/upstreams/http/generic/BUILD @@ -0,0 +1,26 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_extension( + name = "config", + srcs = [ + "config.cc", + ], + hdrs = [ + "config.h", + ], + security_posture = "robust_to_untrusted_downstream", + visibility = ["//visibility:public"], + deps = [ + "//source/extensions/upstreams/http/http:upstream_request_lib", + "//source/extensions/upstreams/http/tcp:upstream_request_lib", + "@envoy_api//envoy/extensions/upstreams/http/generic/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/upstreams/http/generic/config.cc b/source/extensions/upstreams/http/generic/config.cc new file mode 100644 index 0000000000000..3404f49bf46ab --- /dev/null +++ b/source/extensions/upstreams/http/generic/config.cc @@ -0,0 +1,32 @@ +#include "extensions/upstreams/http/generic/config.h" + +#include "extensions/upstreams/http/http/upstream_request.h" +#include "extensions/upstreams/http/tcp/upstream_request.h" + +namespace Envoy { +namespace Extensions { +namespace Upstreams { +namespace Http { +namespace Generic { + +Router::GenericConnPoolPtr GenericGenericConnPoolFactory::createGenericConnPool( + Upstream::ClusterManager& cm, bool is_connect, const Router::RouteEntry& route_entry, + absl::optional downstream_protocol, + Upstream::LoadBalancerContext* ctx) const { + if (is_connect) { + auto ret = std::make_unique(cm, is_connect, route_entry, + downstream_protocol, ctx); + return (ret->valid() ? std::move(ret) : nullptr); + } + auto ret = std::make_unique(cm, is_connect, route_entry, + downstream_protocol, ctx); + return (ret->valid() ? std::move(ret) : nullptr); +} + +REGISTER_FACTORY(GenericGenericConnPoolFactory, Router::GenericConnPoolFactory); + +} // namespace Generic +} // namespace Http +} // namespace Upstreams +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/upstreams/http/generic/config.h b/source/extensions/upstreams/http/generic/config.h new file mode 100644 index 0000000000000..1c2f1a2f16d3d --- /dev/null +++ b/source/extensions/upstreams/http/generic/config.h @@ -0,0 +1,38 @@ +#pragma once + +#include "envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.pb.h" +#include "envoy/registry/registry.h" +#include "envoy/router/router.h" + +namespace Envoy { +namespace Extensions { +namespace Upstreams { +namespace Http { +namespace Generic { + +/** + * Config registration for the GenericConnPool. * @see Router::GenericConnPoolFactory + */ +class GenericGenericConnPoolFactory : public Router::GenericConnPoolFactory { +public: + std::string name() const override { return "envoy.filters.connection_pools.http.generic"; } + std::string category() const override { return "envoy.upstreams"; } + Router::GenericConnPoolPtr + createGenericConnPool(Upstream::ClusterManager& cm, bool is_connect, + const Router::RouteEntry& route_entry, + absl::optional downstream_protocol, + Upstream::LoadBalancerContext* ctx) const override; + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique< + envoy::extensions::upstreams::http::generic::v3::GenericConnectionPoolProto>(); + } +}; + +DECLARE_FACTORY(GenericGenericConnPoolFactory); + +} // namespace Generic +} // namespace Http +} // namespace Upstreams +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/upstreams/http/http/BUILD b/source/extensions/upstreams/http/http/BUILD new file mode 100644 index 0000000000000..4c0b5be394b94 --- /dev/null +++ b/source/extensions/upstreams/http/http/BUILD @@ -0,0 +1,54 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_extension( + name = "config", + srcs = [ + "config.cc", + ], + hdrs = [ + "config.h", + ], + security_posture = "robust_to_untrusted_downstream", + visibility = ["//visibility:public"], + deps = [ + ":upstream_request_lib", + "@envoy_api//envoy/extensions/upstreams/http/http/v3:pkg_cc_proto", + ], +) + +envoy_cc_library( + name = "upstream_request_lib", + srcs = [ + "upstream_request.cc", + ], + hdrs = [ + "upstream_request.h", + ], + deps = [ + "//include/envoy/http:codes_interface", + "//include/envoy/http:conn_pool_interface", + "//include/envoy/http:filter_interface", + "//include/envoy/upstream:cluster_manager_interface", + "//include/envoy/upstream:upstream_interface", + "//source/common/common:assert_lib", + "//source/common/common:minimal_logger_lib", + "//source/common/http:codes_lib", + "//source/common/http:header_map_lib", + "//source/common/http:headers_lib", + "//source/common/http:message_lib", + "//source/common/network:application_protocol_lib", + "//source/common/network:transport_socket_options_lib", + "//source/common/router:router_lib", + "//source/common/upstream:load_balancer_lib", + "//source/extensions/common/proxy_protocol:proxy_protocol_header_lib", + ], +) diff --git a/source/extensions/upstreams/http/http/config.cc b/source/extensions/upstreams/http/http/config.cc new file mode 100644 index 0000000000000..e8c933f45216d --- /dev/null +++ b/source/extensions/upstreams/http/http/config.cc @@ -0,0 +1,25 @@ +#include "extensions/upstreams/http/http/config.h" + +#include "extensions/upstreams/http/http/upstream_request.h" + +namespace Envoy { +namespace Extensions { +namespace Upstreams { +namespace Http { +namespace Http { + +Router::GenericConnPoolPtr HttpGenericConnPoolFactory::createGenericConnPool( + Upstream::ClusterManager& cm, bool is_connect, const Router::RouteEntry& route_entry, + absl::optional downstream_protocol, + Upstream::LoadBalancerContext* ctx) const { + auto ret = std::make_unique(cm, is_connect, route_entry, downstream_protocol, ctx); + return (ret->valid() ? std::move(ret) : nullptr); +} + +REGISTER_FACTORY(HttpGenericConnPoolFactory, Router::GenericConnPoolFactory); + +} // namespace Http +} // namespace Http +} // namespace Upstreams +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/upstreams/http/http/config.h b/source/extensions/upstreams/http/http/config.h new file mode 100644 index 0000000000000..4c6036ddf3b50 --- /dev/null +++ b/source/extensions/upstreams/http/http/config.h @@ -0,0 +1,38 @@ +#pragma once + +#include "envoy/extensions/upstreams/http/http/v3/http_connection_pool.pb.h" +#include "envoy/registry/registry.h" +#include "envoy/router/router.h" + +namespace Envoy { +namespace Extensions { +namespace Upstreams { +namespace Http { +namespace Http { + +/** + * Config registration for the HttpConnPool. @see Router::GenericConnPoolFactory + */ +class HttpGenericConnPoolFactory : public Router::GenericConnPoolFactory { +public: + std::string name() const override { return "envoy.filters.connection_pools.http.http"; } + std::string category() const override { return "envoy.upstreams"; } + Router::GenericConnPoolPtr + createGenericConnPool(Upstream::ClusterManager& cm, bool is_connect, + const Router::RouteEntry& route_entry, + absl::optional downstream_protocol, + Upstream::LoadBalancerContext* ctx) const override; + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique< + envoy::extensions::upstreams::http::http::v3::HttpConnectionPoolProto>(); + } +}; + +DECLARE_FACTORY(HttpGenericConnPoolFactory); + +} // namespace Http +} // namespace Http +} // namespace Upstreams +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/upstreams/http/http/upstream_request.cc b/source/extensions/upstreams/http/http/upstream_request.cc new file mode 100644 index 0000000000000..dce29ea2d9d8f --- /dev/null +++ b/source/extensions/upstreams/http/http/upstream_request.cc @@ -0,0 +1,75 @@ +#include "extensions/upstreams/http/http/upstream_request.h" + +#include +#include + +#include "envoy/event/dispatcher.h" +#include "envoy/event/timer.h" +#include "envoy/grpc/status.h" +#include "envoy/http/conn_pool.h" +#include "envoy/upstream/cluster_manager.h" +#include "envoy/upstream/upstream.h" + +#include "common/common/assert.h" +#include "common/common/utility.h" +#include "common/http/codes.h" +#include "common/http/header_map_impl.h" +#include "common/http/headers.h" +#include "common/http/message_impl.h" +#include "common/http/utility.h" +#include "common/router/router.h" + +using Envoy::Router::GenericConnectionPoolCallbacks; + +namespace Envoy { +namespace Extensions { +namespace Upstreams { +namespace Http { +namespace Http { + +void HttpConnPool::newStream(GenericConnectionPoolCallbacks* callbacks) { + callbacks_ = callbacks; + // It's possible for a reset to happen inline within the newStream() call. In this case, we + // might get deleted inline as well. Only write the returned handle out if it is not nullptr to + // deal with this case. + Envoy::Http::ConnectionPool::Cancellable* handle = + conn_pool_->newStream(callbacks->upstreamToDownstream(), *this); + if (handle) { + conn_pool_stream_handle_ = handle; + } +} + +bool HttpConnPool::cancelAnyPendingRequest() { + if (conn_pool_stream_handle_) { + conn_pool_stream_handle_->cancel(ConnectionPool::CancelPolicy::Default); + conn_pool_stream_handle_ = nullptr; + return true; + } + return false; +} + +absl::optional HttpConnPool::protocol() const { + return conn_pool_->protocol(); +} + +void HttpConnPool::onPoolFailure(ConnectionPool::PoolFailureReason reason, + absl::string_view transport_failure_reason, + Upstream::HostDescriptionConstSharedPtr host) { + callbacks_->onPoolFailure(reason, transport_failure_reason, host); +} + +void HttpConnPool::onPoolReady(Envoy::Http::RequestEncoder& request_encoder, + Upstream::HostDescriptionConstSharedPtr host, + const StreamInfo::StreamInfo& info) { + conn_pool_stream_handle_ = nullptr; + auto upstream = + std::make_unique(callbacks_->upstreamToDownstream(), &request_encoder); + callbacks_->onPoolReady(std::move(upstream), host, + request_encoder.getStream().connectionLocalAddress(), info); +} + +} // namespace Http +} // namespace Http +} // namespace Upstreams +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/upstreams/http/http/upstream_request.h b/source/extensions/upstreams/http/http/upstream_request.h new file mode 100644 index 0000000000000..fa01ddbb5d478 --- /dev/null +++ b/source/extensions/upstreams/http/http/upstream_request.h @@ -0,0 +1,102 @@ +#pragma once + +#include +#include + +#include "envoy/http/codes.h" +#include "envoy/http/conn_pool.h" + +#include "common/common/logger.h" +#include "common/config/well_known_names.h" +#include "common/router/upstream_request.h" + +namespace Envoy { +namespace Extensions { +namespace Upstreams { +namespace Http { +namespace Http { + +class HttpConnPool : public Router::GenericConnPool, public Envoy::Http::ConnectionPool::Callbacks { +public: + // GenericConnPool + HttpConnPool(Upstream::ClusterManager& cm, bool is_connect, const Router::RouteEntry& route_entry, + absl::optional downstream_protocol, + Upstream::LoadBalancerContext* ctx) { + ASSERT(!is_connect); + conn_pool_ = cm.httpConnPoolForCluster(route_entry.clusterName(), route_entry.priority(), + downstream_protocol, ctx); + } + void newStream(Router::GenericConnectionPoolCallbacks* callbacks) override; + bool cancelAnyPendingRequest() override; + absl::optional protocol() const override; + + // Http::ConnectionPool::Callbacks + void onPoolFailure(ConnectionPool::PoolFailureReason reason, + absl::string_view transport_failure_reason, + Upstream::HostDescriptionConstSharedPtr host) override; + void onPoolReady(Envoy::Http::RequestEncoder& callbacks_encoder, + Upstream::HostDescriptionConstSharedPtr host, + const StreamInfo::StreamInfo& info) override; + Upstream::HostDescriptionConstSharedPtr host() const override { return conn_pool_->host(); } + + bool valid() { return conn_pool_ != nullptr; } + +private: + // Points to the actual connection pool to create streams from. + Envoy::Http::ConnectionPool::Instance* conn_pool_{}; + Envoy::Http::ConnectionPool::Cancellable* conn_pool_stream_handle_{}; + Router::GenericConnectionPoolCallbacks* callbacks_{}; +}; + +class HttpUpstream : public Router::GenericUpstream, public Envoy::Http::StreamCallbacks { +public: + HttpUpstream(Router::UpstreamToDownstream& upstream_request, Envoy::Http::RequestEncoder* encoder) + : upstream_request_(upstream_request), request_encoder_(encoder) { + request_encoder_->getStream().addCallbacks(*this); + } + + // GenericUpstream + void encodeData(Buffer::Instance& data, bool end_stream) override { + request_encoder_->encodeData(data, end_stream); + } + void encodeMetadata(const Envoy::Http::MetadataMapVector& metadata_map_vector) override { + request_encoder_->encodeMetadata(metadata_map_vector); + } + void encodeHeaders(const Envoy::Http::RequestHeaderMap& headers, bool end_stream) override { + request_encoder_->encodeHeaders(headers, end_stream); + } + void encodeTrailers(const Envoy::Http::RequestTrailerMap& trailers) override { + request_encoder_->encodeTrailers(trailers); + } + + void readDisable(bool disable) override { request_encoder_->getStream().readDisable(disable); } + + void resetStream() override { + request_encoder_->getStream().removeCallbacks(*this); + request_encoder_->getStream().resetStream(Envoy::Http::StreamResetReason::LocalReset); + } + + // Http::StreamCallbacks + void onResetStream(Envoy::Http::StreamResetReason reason, + absl::string_view transport_failure_reason) override { + upstream_request_.onResetStream(reason, transport_failure_reason); + } + + void onAboveWriteBufferHighWatermark() override { + upstream_request_.onAboveWriteBufferHighWatermark(); + } + + void onBelowWriteBufferLowWatermark() override { + upstream_request_.onBelowWriteBufferLowWatermark(); + } + +private: + Router::UpstreamToDownstream& upstream_request_; + Envoy::Http::RequestEncoder* request_encoder_{}; +}; + +} // namespace Http +} // namespace Http +} // namespace Upstreams +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/upstreams/http/tcp/BUILD b/source/extensions/upstreams/http/tcp/BUILD new file mode 100644 index 0000000000000..6daa95ce15d7e --- /dev/null +++ b/source/extensions/upstreams/http/tcp/BUILD @@ -0,0 +1,53 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_extension( + name = "config", + srcs = [ + "config.cc", + ], + hdrs = [ + "config.h", + ], + security_posture = "robust_to_untrusted_downstream", + visibility = ["//visibility:public"], + deps = [ + ":upstream_request_lib", + "@envoy_api//envoy/extensions/upstreams/http/tcp/v3:pkg_cc_proto", + ], +) + +envoy_cc_library( + name = "upstream_request_lib", + srcs = [ + "upstream_request.cc", + ], + hdrs = [ + "upstream_request.h", + ], + deps = [ + "//include/envoy/http:codes_interface", + "//include/envoy/http:filter_interface", + "//include/envoy/upstream:upstream_interface", + "//source/common/common:assert_lib", + "//source/common/common:minimal_logger_lib", + "//source/common/common:utility_lib", + "//source/common/http:codes_lib", + "//source/common/http:header_map_lib", + "//source/common/http:headers_lib", + "//source/common/http:message_lib", + "//source/common/network:application_protocol_lib", + "//source/common/network:transport_socket_options_lib", + "//source/common/router:router_lib", + "//source/common/upstream:load_balancer_lib", + "//source/extensions/common/proxy_protocol:proxy_protocol_header_lib", + ], +) diff --git a/source/extensions/upstreams/http/tcp/config.cc b/source/extensions/upstreams/http/tcp/config.cc new file mode 100644 index 0000000000000..15c01f524af7a --- /dev/null +++ b/source/extensions/upstreams/http/tcp/config.cc @@ -0,0 +1,25 @@ +#include "extensions/upstreams/http/tcp/config.h" + +#include "extensions/upstreams/http/tcp/upstream_request.h" + +namespace Envoy { +namespace Extensions { +namespace Upstreams { +namespace Http { +namespace Tcp { + +Router::GenericConnPoolPtr TcpGenericConnPoolFactory::createGenericConnPool( + Upstream::ClusterManager& cm, bool is_connect, const Router::RouteEntry& route_entry, + absl::optional downstream_protocol, + Upstream::LoadBalancerContext* ctx) const { + auto ret = std::make_unique(cm, is_connect, route_entry, downstream_protocol, ctx); + return (ret->valid() ? std::move(ret) : nullptr); +} + +REGISTER_FACTORY(TcpGenericConnPoolFactory, Router::GenericConnPoolFactory); + +} // namespace Tcp +} // namespace Http +} // namespace Upstreams +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/upstreams/http/tcp/config.h b/source/extensions/upstreams/http/tcp/config.h new file mode 100644 index 0000000000000..5ff4df42f5b3d --- /dev/null +++ b/source/extensions/upstreams/http/tcp/config.h @@ -0,0 +1,36 @@ +#pragma once + +#include "envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.pb.h" +#include "envoy/registry/registry.h" +#include "envoy/router/router.h" + +namespace Envoy { +namespace Extensions { +namespace Upstreams { +namespace Http { +namespace Tcp { + +/** + * Config registration for the TcpConnPool. @see Router::GenericConnPoolFactory + */ +class TcpGenericConnPoolFactory : public Router::GenericConnPoolFactory { +public: + std::string name() const override { return "envoy.filters.connection_pools.http.tcp"; } + std::string category() const override { return "envoy.upstreams"; } + Router::GenericConnPoolPtr + createGenericConnPool(Upstream::ClusterManager& cm, bool is_connect, + const Router::RouteEntry& route_entry, + absl::optional downstream_protocol, + Upstream::LoadBalancerContext* ctx) const override; + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique(); + } +}; + +DECLARE_FACTORY(TcpGenericConnPoolFactory); + +} // namespace Tcp +} // namespace Http +} // namespace Upstreams +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/upstreams/http/tcp/upstream_request.cc b/source/extensions/upstreams/http/tcp/upstream_request.cc new file mode 100644 index 0000000000000..4284a2e5a13d6 --- /dev/null +++ b/source/extensions/upstreams/http/tcp/upstream_request.cc @@ -0,0 +1,112 @@ +#include "extensions/upstreams/http/tcp/upstream_request.h" + +#include +#include + +#include "envoy/upstream/upstream.h" + +#include "common/common/assert.h" +#include "common/common/utility.h" +#include "common/http/codes.h" +#include "common/http/header_map_impl.h" +#include "common/http/headers.h" +#include "common/http/message_impl.h" +#include "common/network/transport_socket_options_impl.h" +#include "common/router/router.h" + +#include "extensions/common/proxy_protocol/proxy_protocol_header.h" + +namespace Envoy { +namespace Extensions { +namespace Upstreams { +namespace Http { +namespace Tcp { + +void TcpConnPool::onPoolReady(Envoy::Tcp::ConnectionPool::ConnectionDataPtr&& conn_data, + Upstream::HostDescriptionConstSharedPtr host) { + upstream_handle_ = nullptr; + Network::Connection& latched_conn = conn_data->connection(); + auto upstream = + std::make_unique(&callbacks_->upstreamToDownstream(), std::move(conn_data)); + callbacks_->onPoolReady(std::move(upstream), host, latched_conn.localAddress(), + latched_conn.streamInfo()); +} + +TcpUpstream::TcpUpstream(Router::UpstreamToDownstream* upstream_request, + Envoy::Tcp::ConnectionPool::ConnectionDataPtr&& upstream) + : upstream_request_(upstream_request), upstream_conn_data_(std::move(upstream)) { + upstream_conn_data_->connection().enableHalfClose(true); + upstream_conn_data_->addUpstreamCallbacks(*this); +} + +void TcpUpstream::encodeData(Buffer::Instance& data, bool end_stream) { + upstream_conn_data_->connection().write(data, end_stream); +} + +void TcpUpstream::encodeHeaders(const Envoy::Http::RequestHeaderMap&, bool end_stream) { + // Headers should only happen once, so use this opportunity to add the proxy + // proto header, if configured. + ASSERT(upstream_request_->routeEntry().connectConfig().has_value()); + Buffer::OwnedImpl data; + auto& connect_config = upstream_request_->routeEntry().connectConfig().value(); + if (connect_config.has_proxy_protocol_config()) { + Extensions::Common::ProxyProtocol::generateProxyProtoHeader( + connect_config.proxy_protocol_config(), upstream_request_->connection(), data); + } + + if (data.length() != 0 || end_stream) { + upstream_conn_data_->connection().write(data, end_stream); + } + + // TcpUpstream::encodeHeaders is called after the UpstreamRequest is fully initialized. Also use + // this time to synthesize the 200 response headers downstream to complete the CONNECT handshake. + Envoy::Http::ResponseHeaderMapPtr headers{ + Envoy::Http::createHeaderMap( + {{Envoy::Http::Headers::get().Status, "200"}})}; + upstream_request_->decodeHeaders(std::move(headers), false); +} + +void TcpUpstream::encodeTrailers(const Envoy::Http::RequestTrailerMap&) { + Buffer::OwnedImpl data; + upstream_conn_data_->connection().write(data, true); +} + +void TcpUpstream::readDisable(bool disable) { + if (upstream_conn_data_->connection().state() != Network::Connection::State::Open) { + return; + } + upstream_conn_data_->connection().readDisable(disable); +} + +void TcpUpstream::resetStream() { + upstream_request_ = nullptr; + upstream_conn_data_->connection().close(Network::ConnectionCloseType::NoFlush); +} + +void TcpUpstream::onUpstreamData(Buffer::Instance& data, bool end_stream) { + upstream_request_->decodeData(data, end_stream); +} + +void TcpUpstream::onEvent(Network::ConnectionEvent event) { + if (event != Network::ConnectionEvent::Connected && upstream_request_) { + upstream_request_->onResetStream(Envoy::Http::StreamResetReason::ConnectionTermination, ""); + } +} + +void TcpUpstream::onAboveWriteBufferHighWatermark() { + if (upstream_request_) { + upstream_request_->onAboveWriteBufferHighWatermark(); + } +} + +void TcpUpstream::onBelowWriteBufferLowWatermark() { + if (upstream_request_) { + upstream_request_->onBelowWriteBufferLowWatermark(); + } +} + +} // namespace Tcp +} // namespace Http +} // namespace Upstreams +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/upstreams/http/tcp/upstream_request.h b/source/extensions/upstreams/http/tcp/upstream_request.h new file mode 100644 index 0000000000000..1c2e7a44d0338 --- /dev/null +++ b/source/extensions/upstreams/http/tcp/upstream_request.h @@ -0,0 +1,93 @@ +#pragma once + +#include +#include + +#include "envoy/http/codec.h" +#include "envoy/tcp/conn_pool.h" + +#include "common/buffer/watermark_buffer.h" +#include "common/common/cleanup.h" +#include "common/common/logger.h" +#include "common/config/well_known_names.h" +#include "common/router/upstream_request.h" +#include "common/stream_info/stream_info_impl.h" + +namespace Envoy { +namespace Extensions { +namespace Upstreams { +namespace Http { +namespace Tcp { + +class TcpConnPool : public Router::GenericConnPool, public Envoy::Tcp::ConnectionPool::Callbacks { +public: + TcpConnPool(Upstream::ClusterManager& cm, bool is_connect, const Router::RouteEntry& route_entry, + absl::optional, Upstream::LoadBalancerContext* ctx) { + ASSERT(is_connect); + conn_pool_ = cm.tcpConnPoolForCluster(route_entry.clusterName(), + Upstream::ResourcePriority::Default, ctx); + } + void newStream(Router::GenericConnectionPoolCallbacks* callbacks) override { + callbacks_ = callbacks; + upstream_handle_ = conn_pool_->newConnection(*this); + } + + bool cancelAnyPendingRequest() override { + if (upstream_handle_) { + upstream_handle_->cancel(Envoy::Tcp::ConnectionPool::CancelPolicy::Default); + upstream_handle_ = nullptr; + return true; + } + return false; + } + absl::optional protocol() const override { return absl::nullopt; } + Upstream::HostDescriptionConstSharedPtr host() const override { return conn_pool_->host(); } + + bool valid() { return conn_pool_ != nullptr; } + + // Tcp::ConnectionPool::Callbacks + void onPoolFailure(ConnectionPool::PoolFailureReason reason, + Upstream::HostDescriptionConstSharedPtr host) override { + upstream_handle_ = nullptr; + callbacks_->onPoolFailure(reason, "", host); + } + + void onPoolReady(Envoy::Tcp::ConnectionPool::ConnectionDataPtr&& conn_data, + Upstream::HostDescriptionConstSharedPtr host) override; + +private: + Envoy::Tcp::ConnectionPool::Instance* conn_pool_; + Envoy::Tcp::ConnectionPool::Cancellable* upstream_handle_{}; + Router::GenericConnectionPoolCallbacks* callbacks_{}; +}; + +class TcpUpstream : public Router::GenericUpstream, + public Envoy::Tcp::ConnectionPool::UpstreamCallbacks { +public: + TcpUpstream(Router::UpstreamToDownstream* upstream_request, + Envoy::Tcp::ConnectionPool::ConnectionDataPtr&& upstream); + + // GenericUpstream + void encodeData(Buffer::Instance& data, bool end_stream) override; + void encodeMetadata(const Envoy::Http::MetadataMapVector&) override {} + void encodeHeaders(const Envoy::Http::RequestHeaderMap&, bool end_stream) override; + void encodeTrailers(const Envoy::Http::RequestTrailerMap&) override; + void readDisable(bool disable) override; + void resetStream() override; + + // Tcp::ConnectionPool::UpstreamCallbacks + void onUpstreamData(Buffer::Instance& data, bool end_stream) override; + void onEvent(Network::ConnectionEvent event) override; + void onAboveWriteBufferHighWatermark() override; + void onBelowWriteBufferLowWatermark() override; + +private: + Router::UpstreamToDownstream* upstream_request_; + Envoy::Tcp::ConnectionPool::ConnectionDataPtr upstream_conn_data_; +}; + +} // namespace Tcp +} // namespace Http +} // namespace Upstreams +} // namespace Extensions +} // namespace Envoy diff --git a/source/server/BUILD b/source/server/BUILD index 1a4eeb771ad85..7bfcd76995768 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", @@ -8,6 +6,8 @@ load( "envoy_select_hot_restart", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( @@ -21,7 +21,7 @@ envoy_cc_library( tags = ["backtrace"], deps = [ "//source/common/common:minimal_logger_lib", - "//source/common/common:version_lib", + "//source/common/version:version_lib", ], ) @@ -46,7 +46,9 @@ envoy_cc_library( "//source/common/config:runtime_utility_lib", "//source/common/config:utility_lib", "//source/common/network:resolver_lib", + "//source/common/network:socket_interface_lib", "//source/common/network:socket_option_factory_lib", + "//source/common/network:socket_option_lib", "//source/common/network:utility_lib", "//source/common/protobuf:utility_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", @@ -66,6 +68,7 @@ envoy_cc_library( "//include/envoy/event:timer_interface", "//include/envoy/network:connection_handler_interface", "//include/envoy/network:connection_interface", + "//include/envoy/network:exception_interface", "//include/envoy/network:filter_interface", "//include/envoy/network:listen_socket_interface", "//include/envoy/network:listener_interface", @@ -108,6 +111,7 @@ envoy_cc_library( deps = [ ":watchdog_lib", "//include/envoy/api:api_interface", + "//include/envoy/common:time_interface", "//include/envoy/event:timer_interface", "//include/envoy/server:configuration_interface", "//include/envoy/server:guarddog_interface", @@ -139,10 +143,12 @@ envoy_cc_library( "//include/envoy/server:hot_restart_interface", "//include/envoy/server:instance_interface", "//include/envoy/server:options_interface", + "//include/envoy/stats:stats_interface", "//source/common/api:os_sys_calls_lib", "//source/common/common:assert_lib", "//source/common/common:utility_lib", "//source/common/network:utility_lib", + "//source/common/stats:utility_lib", ], ) @@ -161,12 +167,12 @@ envoy_cc_library( srcs = envoy_select_hot_restart(["hot_restarting_parent.cc"]), hdrs = envoy_select_hot_restart(["hot_restarting_parent.h"]), deps = [ - ":api_listener_lib", ":hot_restarting_base", - ":listener_lib", + ":listener_manager_lib", "//source/common/memory:stats_lib", "//source/common/stats:stat_merger_lib", "//source/common/stats:symbol_table_lib", + "//source/common/stats:utility_lib", ], ) @@ -228,9 +234,9 @@ envoy_cc_library( "//source/common/api:os_sys_calls_lib", "//source/common/common:logger_lib", "//source/common/common:macros", - "//source/common/common:version_lib", "//source/common/protobuf:utility_lib", "//source/common/stats:stats_lib", + "//source/common/version:version_lib", "@envoy_api//envoy/config/bootstrap/v2:pkg_cc_proto", ], ) @@ -275,77 +281,14 @@ envoy_cc_library( ], ) -envoy_cc_library( - name = "api_listener_lib", - srcs = [ - "api_listener_impl.cc", - ], - hdrs = [ - "api_listener_impl.h", - ], - deps = [ - ":drain_manager_lib", - ":filter_chain_manager_lib", - ":listener_manager_impl", - "//include/envoy/network:connection_interface", - "//include/envoy/server:api_listener_interface", - "//include/envoy/server:filter_config_interface", - "//include/envoy/server:listener_manager_interface", - "//source/common/common:empty_string", - "//source/common/http:conn_manager_lib", - "//source/common/init:manager_lib", - "//source/common/network:resolver_lib", - "//source/common/stream_info:stream_info_lib", - "//source/extensions/filters/network/http_connection_manager:config", - "@envoy_api//envoy/api/v2:pkg_cc_proto", - "@envoy_api//envoy/api/v2/listener:pkg_cc_proto", - "@envoy_api//envoy/config/core/v3:pkg_cc_proto", - "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", - ], -) - -envoy_cc_library( - name = "listener_lib", - srcs = [ - "listener_impl.cc", - ], - hdrs = [ - "listener_impl.h", - ], - deps = [ - ":configuration_lib", - ":drain_manager_lib", - ":filter_chain_manager_lib", - ":listener_manager_impl", - ":transport_socket_config_lib", - ":well_known_names_lib", - "//include/envoy/access_log:access_log_interface", - "//include/envoy/server:active_udp_listener_config_interface", - "//include/envoy/server:filter_config_interface", - "//include/envoy/server:listener_manager_interface", - "//include/envoy/server:transport_socket_config_interface", - "//source/common/access_log:access_log_lib", - "//source/common/config:utility_lib", - "//source/common/init:manager_lib", - "//source/common/init:target_lib", - "//source/common/network:connection_balancer_lib", - "//source/common/network:listen_socket_lib", - "//source/common/network:socket_option_factory_lib", - "//source/common/network:utility_lib", - "//source/common/protobuf:utility_lib", - "//source/extensions/filters/listener:well_known_names", - "//source/extensions/transport_sockets:well_known_names", - "@envoy_api//envoy/config/core/v3:pkg_cc_proto", - "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", - ], -) - # TODO(junr03): actually separate this lib from the listener and api listener lib. # this can be done if the parent_ in the listener and the api listener becomes the ListenerManager interface. # the issue right now is that the listener's reach into the listener manager's server_ instance variable. envoy_cc_library( - name = "listener_manager_impl", + name = "listener_manager_lib", srcs = [ + "api_listener_impl.cc", + "listener_impl.cc", "listener_manager_impl.cc", ], hdrs = [ @@ -360,26 +303,41 @@ envoy_cc_library( ":lds_api_lib", ":transport_socket_config_lib", ":well_known_names_lib", + "//include/envoy/access_log:access_log_interface", + "//include/envoy/network:connection_interface", + "//include/envoy/network:udp_packet_writer_config_interface", "//include/envoy/server:active_udp_listener_config_interface", + "//include/envoy/server:api_listener_interface", "//include/envoy/server:filter_config_interface", "//include/envoy/server:listener_manager_interface", "//include/envoy/server:transport_socket_config_interface", "//include/envoy/server:worker_interface", + "//source/common/access_log:access_log_lib", + "//source/common/common:basic_resource_lib", + "//source/common/common:empty_string", "//source/common/config:utility_lib", "//source/common/config:version_converter_lib", "//source/common/http:conn_manager_lib", "//source/common/init:manager_lib", + "//source/common/init:target_lib", + "//source/common/network:connection_balancer_lib", "//source/common/network:filter_matcher_lib", "//source/common/network:listen_socket_lib", + "//source/common/network:resolver_lib", "//source/common/network:socket_option_factory_lib", "//source/common/network:utility_lib", "//source/common/protobuf:utility_lib", + "//source/common/stream_info:stream_info_lib", "//source/extensions/filters/listener:well_known_names", "//source/extensions/filters/network/http_connection_manager:config", "//source/extensions/transport_sockets:well_known_names", + "//source/extensions/upstreams/http/generic:config", "@envoy_api//envoy/admin/v3:pkg_cc_proto", + "@envoy_api//envoy/api/v2:pkg_cc_proto", + "@envoy_api//envoy/api/v2/listener:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/listener/proxy_protocol/v3:pkg_cc_proto", ], ) @@ -437,18 +395,18 @@ envoy_cc_library( ], deps = [ ":active_raw_udp_listener_config", - ":api_listener_lib", ":configuration_lib", ":connection_handler_lib", ":guarddog_lib", ":listener_hooks_lib", - ":listener_lib", + ":listener_manager_lib", ":ssl_context_manager_lib", ":worker_lib", "//include/envoy/event:dispatcher_interface", "//include/envoy/event:signal_interface", "//include/envoy/event:timer_interface", "//include/envoy/network:dns_interface", + "//include/envoy/server:bootstrap_extension_config_interface", "//include/envoy/server:drain_manager_interface", "//include/envoy/server:instance_interface", "//include/envoy/server:listener_manager_interface", @@ -463,7 +421,6 @@ envoy_cc_library( "//source/common/common:logger_lib", "//source/common/common:mutex_tracer_lib", "//source/common/common:utility_lib", - "//source/common/common:version_lib", "//source/common/config:utility_lib", "//source/common/grpc:async_client_manager_lib", "//source/common/grpc:context_lib", @@ -482,9 +439,11 @@ envoy_cc_library( "//source/common/stats:thread_local_store_lib", "//source/common/upstream:cluster_manager_lib", "//source/common/upstream:health_discovery_service_lib", + "//source/common/version:version_lib", "//source/server:overload_manager_lib", - "//source/server/http:admin_lib", + "//source/server/admin:admin_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", + "@envoy_api//envoy/config/bootstrap/v2:pkg_cc_proto", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", ], ) @@ -526,6 +485,7 @@ envoy_cc_library( "//include/envoy/api:api_interface", "//include/envoy/event:dispatcher_interface", "//include/envoy/event:timer_interface", + "//include/envoy/network:exception_interface", "//include/envoy/server:configuration_interface", "//include/envoy/server:guarddog_interface", "//include/envoy/server:listener_manager_interface", diff --git a/source/server/active_raw_udp_listener_config.cc b/source/server/active_raw_udp_listener_config.cc index eb242510219cd..f34abe2fcb0ef 100644 --- a/source/server/active_raw_udp_listener_config.cc +++ b/source/server/active_raw_udp_listener_config.cc @@ -15,7 +15,7 @@ Network::ConnectionHandler::ActiveListenerPtr ActiveRawUdpListenerFactory::createActiveUdpListener(Network::ConnectionHandler& parent, Event::Dispatcher& dispatcher, Network::ListenerConfig& config) { - return std::make_unique(parent, dispatcher, config); + return std::make_unique(parent, dispatcher, config); } ProtobufTypes::MessagePtr ActiveRawUdpListenerConfigFactory::createEmptyConfigProto() { diff --git a/source/server/http/BUILD b/source/server/admin/BUILD similarity index 53% rename from source/server/http/BUILD rename to source/server/admin/BUILD index dbbb828c89303..370d803b0fab8 100644 --- a/source/server/http/BUILD +++ b/source/server/admin/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( @@ -15,6 +15,12 @@ envoy_cc_library( deps = [ ":admin_filter_lib", ":config_tracker_lib", + ":listeners_handler_lib", + ":logs_handler_lib", + ":profiling_handler_lib", + ":runtime_handler_lib", + ":server_cmd_handler_lib", + ":server_info_handler_lib", ":stats_handler_lib", ":utils_lib", "//include/envoy/filesystem:filesystem_interface", @@ -22,7 +28,6 @@ envoy_cc_library( "//include/envoy/http:request_id_extension_interface", "//include/envoy/network:filter_interface", "//include/envoy/network:listen_socket_interface", - "//include/envoy/runtime:runtime_interface", "//include/envoy/server:admin_interface", "//include/envoy/server:hot_restart_interface", "//include/envoy/server:instance_interface", @@ -31,16 +36,16 @@ envoy_cc_library( "//include/envoy/upstream:cluster_manager_interface", "//include/envoy/upstream:resource_manager_interface", "//include/envoy/upstream:upstream_interface", - "//source/common/access_log:access_log_formatter_lib", "//source/common/access_log:access_log_lib", "//source/common/buffer:buffer_lib", "//source/common/common:assert_lib", + "//source/common/common:basic_resource_lib", "//source/common/common:empty_string", "//source/common/common:macros", "//source/common/common:minimal_logger_lib", "//source/common/common:mutex_tracer_lib", "//source/common/common:utility_lib", - "//source/common/common:version_includes", + "//source/common/formatter:substitution_formatter_lib", "//source/common/html:utility_lib", "//source/common/http:codes_lib", "//source/common/http:conn_manager_lib", @@ -50,13 +55,11 @@ envoy_cc_library( "//source/common/http:headers_lib", "//source/common/http:request_id_extension_lib", "//source/common/http:utility_lib", - "//source/common/memory:stats_lib", "//source/common/memory:utils_lib", "//source/common/network:connection_balancer_lib", "//source/common/network:listen_socket_lib", "//source/common/network:raw_buffer_socket_lib", "//source/common/network:utility_lib", - "//source/common/profiler:profiler_lib", "//source/common/router:config_lib", "//source/common/router:scoped_config_lib", "//source/common/stats:isolated_store_lib", @@ -64,6 +67,7 @@ envoy_cc_library( "//source/extensions/access_loggers/file:file_access_log_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", ], @@ -85,11 +89,21 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "handler_ctx_lib", + hdrs = ["handler_ctx.h"], + deps = [ + "//include/envoy/server:instance_interface", + ], +) + envoy_cc_library( name = "stats_handler_lib", srcs = ["stats_handler.cc"], hdrs = ["stats_handler.h"], deps = [ + ":handler_ctx_lib", + ":prometheus_stats_lib", ":utils_lib", "//include/envoy/http:codes_interface", "//include/envoy/server:admin_interface", @@ -99,6 +113,118 @@ envoy_cc_library( "//source/common/http:codes_lib", "//source/common/http:header_map_lib", "//source/common/stats:histogram_lib", + "@envoy_api//envoy/admin/v3:pkg_cc_proto", + ], +) + +envoy_cc_library( + name = "prometheus_stats_lib", + srcs = ["prometheus_stats.cc"], + hdrs = ["prometheus_stats.h"], + deps = [ + ":utils_lib", + "//source/common/buffer:buffer_lib", + "//source/common/stats:histogram_lib", + ], +) + +envoy_cc_library( + name = "listeners_handler_lib", + srcs = ["listeners_handler.cc"], + hdrs = ["listeners_handler.h"], + deps = [ + ":handler_ctx_lib", + ":utils_lib", + "//include/envoy/http:codes_interface", + "//include/envoy/server:admin_interface", + "//include/envoy/server:instance_interface", + "//source/common/buffer:buffer_lib", + "//source/common/http:codes_lib", + "//source/common/http:header_map_lib", + "@envoy_api//envoy/admin/v3:pkg_cc_proto", + ], +) + +envoy_cc_library( + name = "runtime_handler_lib", + srcs = ["runtime_handler.cc"], + hdrs = ["runtime_handler.h"], + deps = [ + ":handler_ctx_lib", + ":utils_lib", + "//include/envoy/http:codes_interface", + "//include/envoy/server:admin_interface", + "//include/envoy/server:instance_interface", + "//source/common/buffer:buffer_lib", + "//source/common/http:codes_lib", + "//source/common/http:header_map_lib", + ], +) + +envoy_cc_library( + name = "logs_handler_lib", + srcs = ["logs_handler.cc"], + hdrs = ["logs_handler.h"], + deps = [ + ":handler_ctx_lib", + ":utils_lib", + "//include/envoy/http:codes_interface", + "//include/envoy/server:admin_interface", + "//include/envoy/server:instance_interface", + "//source/common/buffer:buffer_lib", + "//source/common/common:minimal_logger_lib", + "//source/common/http:codes_lib", + "//source/common/http:header_map_lib", + ], +) + +envoy_cc_library( + name = "profiling_handler_lib", + srcs = ["profiling_handler.cc"], + hdrs = ["profiling_handler.h"], + deps = [ + ":utils_lib", + "//include/envoy/http:codes_interface", + "//include/envoy/server:admin_interface", + "//source/common/buffer:buffer_lib", + "//source/common/http:codes_lib", + "//source/common/http:header_map_lib", + "//source/common/profiler:profiler_lib", + ], +) + +envoy_cc_library( + name = "server_cmd_handler_lib", + srcs = ["server_cmd_handler.cc"], + hdrs = ["server_cmd_handler.h"], + deps = [ + ":handler_ctx_lib", + ":utils_lib", + "//include/envoy/http:codes_interface", + "//include/envoy/server:admin_interface", + "//include/envoy/server:instance_interface", + "//source/common/buffer:buffer_lib", + "//source/common/http:codes_lib", + "//source/common/http:header_map_lib", + ], +) + +envoy_cc_library( + name = "server_info_handler_lib", + srcs = ["server_info_handler.cc"], + hdrs = ["server_info_handler.h"], + deps = [ + ":handler_ctx_lib", + ":utils_lib", + "//include/envoy/http:codes_interface", + "//include/envoy/server:admin_interface", + "//include/envoy/server:instance_interface", + "//source/common/buffer:buffer_lib", + "//source/common/http:codes_lib", + "//source/common/http:header_map_lib", + "//source/common/memory:stats_lib", + "//source/common/version:version_includes", + "@envoy_api//envoy/admin/v3:pkg_cc_proto", ], ) diff --git a/source/server/http/admin.cc b/source/server/admin/admin.cc similarity index 56% rename from source/server/http/admin.cc rename to source/server/admin/admin.cc index 3a8f7a563ce1e..7eb12d3180d8e 100644 --- a/source/server/http/admin.cc +++ b/source/server/admin/admin.cc @@ -1,32 +1,27 @@ -#include "server/http/admin.h" +#include "server/admin/admin.h" #include #include #include #include -#include -#include #include #include #include "envoy/admin/v3/certs.pb.h" #include "envoy/admin/v3/clusters.pb.h" #include "envoy/admin/v3/config_dump.pb.h" -#include "envoy/admin/v3/listeners.pb.h" -#include "envoy/admin/v3/memory.pb.h" #include "envoy/admin/v3/metrics.pb.h" -#include "envoy/admin/v3/mutex_stats.pb.h" #include "envoy/admin/v3/server_info.pb.h" #include "envoy/config/core/v3/health_check.pb.h" +#include "envoy/config/endpoint/v3/endpoint_components.pb.h" #include "envoy/filesystem/filesystem.h" -#include "envoy/runtime/runtime.h" #include "envoy/server/hot_restart.h" #include "envoy/server/instance.h" #include "envoy/server/options.h" #include "envoy/upstream/cluster_manager.h" +#include "envoy/upstream/outlier_detection.h" #include "envoy/upstream/upstream.h" -#include "common/access_log/access_log_formatter.h" #include "common/access_log/access_log_impl.h" #include "common/buffer/buffer_impl.h" #include "common/common/assert.h" @@ -34,24 +29,21 @@ #include "common/common/fmt.h" #include "common/common/mutex_tracer_impl.h" #include "common/common/utility.h" -#include "common/common/version.h" +#include "common/formatter/substitution_formatter.h" #include "common/html/utility.h" #include "common/http/codes.h" #include "common/http/conn_manager_utility.h" #include "common/http/header_map_impl.h" #include "common/http/headers.h" -#include "common/memory/stats.h" #include "common/memory/utils.h" #include "common/network/listen_socket_impl.h" #include "common/network/utility.h" -#include "common/profiler/profiler.h" #include "common/protobuf/protobuf.h" #include "common/protobuf/utility.h" #include "common/router/config_impl.h" #include "common/upstream/host_utility.h" -#include "server/http/stats_handler.h" -#include "server/http/utils.h" +#include "server/admin/utils.h" #include "extensions/access_loggers/file/file_access_log_impl.h" @@ -139,6 +131,11 @@ absl::optional maskParam(const Http::Utility::QueryParams& params) return Utility::queryParam(params, "mask"); } +// Helper method to get the eds parameter. +bool shouldIncludeEdsInDump(const Http::Utility::QueryParams& params) { + return Utility::queryParam(params, "include_eds") != absl::nullopt; +} + // Helper method that ensures that we've setting flags based on all the health flag values on the // host. void setHealthFlag(Upstream::Host::HealthFlag flag, const Upstream::Host& host, @@ -263,53 +260,6 @@ void trimResourceMessage(const Protobuf::FieldMask& field_mask, Protobuf::Messag } // namespace -bool AdminImpl::changeLogLevel(const Http::Utility::QueryParams& params) { - if (params.size() != 1) { - return false; - } - - std::string name = params.begin()->first; - std::string level = params.begin()->second; - - // First see if the level is valid. - size_t level_to_use = std::numeric_limits::max(); - for (size_t i = 0; i < ARRAY_SIZE(spdlog::level::level_string_views); i++) { - if (level == spdlog::level::level_string_views[i]) { - level_to_use = i; - break; - } - } - - if (level_to_use == std::numeric_limits::max()) { - return false; - } - - // Now either change all levels or a single level. - if (name == "level") { - ENVOY_LOG(debug, "change all log levels: level='{}'", level); - for (Logger::Logger& logger : Logger::Registry::loggers()) { - logger.setLevel(static_cast(level_to_use)); - } - } else { - ENVOY_LOG(debug, "change log level: name='{}' level='{}'", name, level); - Logger::Logger* logger_to_change = nullptr; - for (Logger::Logger& logger : Logger::Registry::loggers()) { - if (logger.name() == name) { - logger_to_change = &logger; - break; - } - } - - if (!logger_to_change) { - return false; - } - - logger_to_change->setLevel(static_cast(level_to_use)); - } - - return true; -} - void AdminImpl::addOutlierInfo(const std::string& cluster_name, const Upstream::Outlier::Detector* outlier_detector, Buffer::Instance& response) { @@ -349,8 +299,8 @@ void AdminImpl::addCircuitSettings(const std::string& cluster_name, const std::s // TODO(efimki): Add support of text readouts stats. void AdminImpl::writeClustersAsJson(Buffer::Instance& response) { envoy::admin::v3::Clusters clusters; - for (auto& cluster_pair : server_.clusterManager().clusters()) { - const Upstream::Cluster& cluster = cluster_pair.second.get(); + for (const auto& [name, cluster_ref] : server_.clusterManager().clusters()) { + const Upstream::Cluster& cluster = cluster_ref.get(); Upstream::ClusterInfoConstSharedPtr cluster_info = cluster.info(); envoy::admin::v3::ClusterStatus& cluster_status = *clusters.add_cluster_statuses(); @@ -382,17 +332,17 @@ void AdminImpl::writeClustersAsJson(Buffer::Instance& response) { host_status.set_hostname(host->hostname()); host_status.mutable_locality()->MergeFrom(host->locality()); - for (const auto& named_counter : host->counters()) { + for (const auto& [counter_name, counter] : host->counters()) { auto& metric = *host_status.add_stats(); - metric.set_name(std::string(named_counter.first)); - metric.set_value(named_counter.second.get().value()); + metric.set_name(std::string(counter_name)); + metric.set_value(counter.get().value()); metric.set_type(envoy::admin::v3::SimpleMetric::COUNTER); } - for (const auto& named_gauge : host->gauges()) { + for (const auto& [gauge_name, gauge] : host->gauges()) { auto& metric = *host_status.add_stats(); - metric.set_name(std::string(named_gauge.first)); - metric.set_value(named_gauge.second.get().value()); + metric.set_name(std::string(gauge_name)); + metric.set_value(gauge.get().value()); metric.set_type(envoy::admin::v3::SimpleMetric::GAUGE); } @@ -426,61 +376,58 @@ void AdminImpl::writeClustersAsJson(Buffer::Instance& response) { // TODO(efimki): Add support of text readouts stats. void AdminImpl::writeClustersAsText(Buffer::Instance& response) { - for (auto& cluster : server_.clusterManager().clusters()) { - addOutlierInfo(cluster.second.get().info()->name(), cluster.second.get().outlierDetector(), - response); - - addCircuitSettings( - cluster.second.get().info()->name(), "default", - cluster.second.get().info()->resourceManager(Upstream::ResourcePriority::Default), - response); - addCircuitSettings( - cluster.second.get().info()->name(), "high", - cluster.second.get().info()->resourceManager(Upstream::ResourcePriority::High), response); - - response.add(fmt::format("{}::added_via_api::{}\n", cluster.second.get().info()->name(), - cluster.second.get().info()->addedViaApi())); - for (auto& host_set : cluster.second.get().prioritySet().hostSetsPerPriority()) { + for (const auto& [name, cluster_ref] : server_.clusterManager().clusters()) { + const Upstream::Cluster& cluster = cluster_ref.get(); + const std::string& cluster_name = cluster.info()->name(); + addOutlierInfo(cluster_name, cluster.outlierDetector(), response); + + addCircuitSettings(cluster_name, "default", + cluster.info()->resourceManager(Upstream::ResourcePriority::Default), + response); + addCircuitSettings(cluster_name, "high", + cluster.info()->resourceManager(Upstream::ResourcePriority::High), response); + + response.add( + fmt::format("{}::added_via_api::{}\n", cluster_name, cluster.info()->addedViaApi())); + for (auto& host_set : cluster.prioritySet().hostSetsPerPriority()) { for (auto& host : host_set->hosts()) { + const std::string& host_address = host->address()->asString(); std::map all_stats; - for (const auto& counter : host->counters()) { - all_stats[counter.first] = counter.second.get().value(); + for (const auto& [counter_name, counter] : host->counters()) { + all_stats[counter_name] = counter.get().value(); } - for (const auto& gauge : host->gauges()) { - all_stats[gauge.first] = gauge.second.get().value(); + for (const auto& [gauge_name, gauge] : host->gauges()) { + all_stats[gauge_name] = gauge.get().value(); } - for (const auto& stat : all_stats) { - response.add(fmt::format("{}::{}::{}::{}\n", cluster.second.get().info()->name(), - host->address()->asString(), stat.first, stat.second)); + for (const auto& [stat_name, stat] : all_stats) { + response.add( + fmt::format("{}::{}::{}::{}\n", cluster_name, host_address, stat_name, stat)); } - response.add(fmt::format("{}::{}::hostname::{}\n", cluster.second.get().info()->name(), - host->address()->asString(), host->hostname())); - response.add(fmt::format("{}::{}::health_flags::{}\n", cluster.second.get().info()->name(), - host->address()->asString(), + response.add( + fmt::format("{}::{}::hostname::{}\n", cluster_name, host_address, host->hostname())); + response.add(fmt::format("{}::{}::health_flags::{}\n", cluster_name, host_address, Upstream::HostUtility::healthFlagsToString(*host))); - response.add(fmt::format("{}::{}::weight::{}\n", cluster.second.get().info()->name(), - host->address()->asString(), host->weight())); - response.add(fmt::format("{}::{}::region::{}\n", cluster.second.get().info()->name(), - host->address()->asString(), host->locality().region())); - response.add(fmt::format("{}::{}::zone::{}\n", cluster.second.get().info()->name(), - host->address()->asString(), host->locality().zone())); - response.add(fmt::format("{}::{}::sub_zone::{}\n", cluster.second.get().info()->name(), - host->address()->asString(), host->locality().sub_zone())); - response.add(fmt::format("{}::{}::canary::{}\n", cluster.second.get().info()->name(), - host->address()->asString(), host->canary())); - response.add(fmt::format("{}::{}::priority::{}\n", cluster.second.get().info()->name(), - host->address()->asString(), host->priority())); + response.add( + fmt::format("{}::{}::weight::{}\n", cluster_name, host_address, host->weight())); + response.add(fmt::format("{}::{}::region::{}\n", cluster_name, host_address, + host->locality().region())); + response.add( + fmt::format("{}::{}::zone::{}\n", cluster_name, host_address, host->locality().zone())); + response.add(fmt::format("{}::{}::sub_zone::{}\n", cluster_name, host_address, + host->locality().sub_zone())); + response.add( + fmt::format("{}::{}::canary::{}\n", cluster_name, host_address, host->canary())); + response.add( + fmt::format("{}::{}::priority::{}\n", cluster_name, host_address, host->priority())); response.add(fmt::format( - "{}::{}::success_rate::{}\n", cluster.second.get().info()->name(), - host->address()->asString(), + "{}::{}::success_rate::{}\n", cluster_name, host_address, host->outlierDetector().successRate( Upstream::Outlier::DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin))); response.add(fmt::format( - "{}::{}::local_origin_success_rate::{}\n", cluster.second.get().info()->name(), - host->address()->asString(), + "{}::{}::local_origin_success_rate::{}\n", cluster_name, host_address, host->outlierDetector().successRate( Upstream::Outlier::DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin))); } @@ -488,28 +435,10 @@ void AdminImpl::writeClustersAsText(Buffer::Instance& response) { } } -void AdminImpl::writeListenersAsJson(Buffer::Instance& response) { - envoy::admin::v3::Listeners listeners; - for (const auto& listener : server_.listenerManager().listeners()) { - envoy::admin::v3::ListenerStatus& listener_status = *listeners.add_listener_statuses(); - listener_status.set_name(listener.get().name()); - Network::Utility::addressToProtobufAddress(*listener.get().listenSocketFactory().localAddress(), - *listener_status.mutable_local_address()); - } - response.add(MessageUtil::getJsonStringFromMessage(listeners, true)); // pretty-print -} - -void AdminImpl::writeListenersAsText(Buffer::Instance& response) { - for (const auto& listener : server_.listenerManager().listeners()) { - response.add(fmt::format("{}::{}\n", listener.get().name(), - listener.get().listenSocketFactory().localAddress()->asString())); - } -} - Http::Code AdminImpl::handlerClusters(absl::string_view url, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream&) { - Http::Utility::QueryParams query_params = Http::Utility::parseQueryString(url); + Http::Utility::QueryParams query_params = Http::Utility::parseAndDecodeQueryString(url); const auto format_value = Utility::formatParam(query_params); if (format_value.has_value() && format_value.value() == "json") { @@ -523,9 +452,17 @@ Http::Code AdminImpl::handlerClusters(absl::string_view url, } void AdminImpl::addAllConfigToDump(envoy::admin::v3::ConfigDump& dump, - const absl::optional& mask) const { - for (const auto& key_callback_pair : config_tracker_.getCallbacksMap()) { - ProtobufTypes::MessagePtr message = key_callback_pair.second(); + const absl::optional& mask, + bool include_eds) const { + Envoy::Server::ConfigTracker::CbsMap callbacks_map = config_tracker_.getCallbacksMap(); + if (include_eds) { + if (!server_.clusterManager().clusters().empty()) { + callbacks_map.emplace("endpoint", [this] { return dumpEndpointConfigs(); }); + } + } + + for (const auto& [name, callback] : callbacks_map) { + ProtobufTypes::MessagePtr message = callback(); ASSERT(message); if (mask.has_value()) { @@ -543,10 +480,17 @@ void AdminImpl::addAllConfigToDump(envoy::admin::v3::ConfigDump& dump, absl::optional> AdminImpl::addResourceToDump(envoy::admin::v3::ConfigDump& dump, - const absl::optional& mask, - const std::string& resource) const { - for (const auto& key_callback_pair : config_tracker_.getCallbacksMap()) { - ProtobufTypes::MessagePtr message = key_callback_pair.second(); + const absl::optional& mask, const std::string& resource, + bool include_eds) const { + Envoy::Server::ConfigTracker::CbsMap callbacks_map = config_tracker_.getCallbacksMap(); + if (include_eds) { + if (!server_.clusterManager().clusters().empty()) { + callbacks_map.emplace("endpoint", [this] { return dumpEndpointConfigs(); }); + } + } + + for (const auto& [name, callback] : callbacks_map) { + ProtobufTypes::MessagePtr message = callback(); ASSERT(message); auto field_descriptor = message->GetDescriptor()->FindFieldByName(resource); @@ -580,378 +524,120 @@ AdminImpl::addResourceToDump(envoy::admin::v3::ConfigDump& dump, std::make_pair(Http::Code::NotFound, fmt::format("{} not found in config dump", resource))}; } -Http::Code AdminImpl::handlerConfigDump(absl::string_view url, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&) const { - Http::Utility::QueryParams query_params = Http::Utility::parseQueryString(url); - const auto resource = resourceParam(query_params); - const auto mask = maskParam(query_params); - - envoy::admin::v3::ConfigDump dump; - - if (resource.has_value()) { - auto err = addResourceToDump(dump, mask, resource.value()); - if (err.has_value()) { - response.add(err.value().second); - return err.value().first; - } - } else { - addAllConfigToDump(dump, mask); - } - MessageUtil::redact(dump); - - response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); - response.add(MessageUtil::getJsonStringFromMessage(dump, true)); // pretty-print - return Http::Code::OK; -} - -// TODO(ambuc) Export this as a server (?) stat for monitoring. -Http::Code AdminImpl::handlerContention(absl::string_view, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&) { - - if (server_.options().mutexTracingEnabled() && server_.mutexTracer() != nullptr) { - response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); - - envoy::admin::v3::MutexStats mutex_stats; - mutex_stats.set_num_contentions(server_.mutexTracer()->numContentions()); - mutex_stats.set_current_wait_cycles(server_.mutexTracer()->currentWaitCycles()); - mutex_stats.set_lifetime_wait_cycles(server_.mutexTracer()->lifetimeWaitCycles()); - response.add(MessageUtil::getJsonStringFromMessage(mutex_stats, true, true)); - } else { - response.add("Mutex contention tracing is not enabled. To enable, run Envoy with flag " - "--enable-mutex-tracing."); +void AdminImpl::addLbEndpoint( + const Upstream::HostSharedPtr& host, + envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoint) const { + auto& lb_endpoint = *locality_lb_endpoint.mutable_lb_endpoints()->Add(); + if (host->metadata() != nullptr) { + lb_endpoint.mutable_metadata()->MergeFrom(*host->metadata()); } - return Http::Code::OK; -} + lb_endpoint.mutable_load_balancing_weight()->set_value(host->weight()); -Http::Code AdminImpl::handlerCpuProfiler(absl::string_view url, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&) { - Http::Utility::QueryParams query_params = Http::Utility::parseQueryString(url); - if (query_params.size() != 1 || query_params.begin()->first != "enable" || - (query_params.begin()->second != "y" && query_params.begin()->second != "n")) { - response.add("?enable=\n"); - return Http::Code::BadRequest; + switch (host->health()) { + case Upstream::Host::Health::Healthy: + lb_endpoint.set_health_status(envoy::config::core::v3::HealthStatus::HEALTHY); + break; + case Upstream::Host::Health::Unhealthy: + lb_endpoint.set_health_status(envoy::config::core::v3::HealthStatus::UNHEALTHY); + break; + case Upstream::Host::Health::Degraded: + lb_endpoint.set_health_status(envoy::config::core::v3::HealthStatus::DEGRADED); + break; + default: + lb_endpoint.set_health_status(envoy::config::core::v3::HealthStatus::UNKNOWN); } - bool enable = query_params.begin()->second == "y"; - if (enable && !Profiler::Cpu::profilerEnabled()) { - if (!Profiler::Cpu::startProfiler(profile_path_)) { - response.add("failure to start the profiler"); - return Http::Code::InternalServerError; - } - - } else if (!enable && Profiler::Cpu::profilerEnabled()) { - Profiler::Cpu::stopProfiler(); + auto& endpoint = *lb_endpoint.mutable_endpoint(); + endpoint.set_hostname(host->hostname()); + Network::Utility::addressToProtobufAddress(*host->address(), *endpoint.mutable_address()); + auto& health_check_config = *endpoint.mutable_health_check_config(); + health_check_config.set_hostname(host->hostnameForHealthChecks()); + if (host->healthCheckAddress()->asString() != host->address()->asString()) { + health_check_config.set_port_value(host->healthCheckAddress()->ip()->port()); } - - response.add("OK\n"); - return Http::Code::OK; } -Http::Code AdminImpl::handlerHeapProfiler(absl::string_view url, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&) { - if (!Profiler::Heap::profilerEnabled()) { - response.add("The current build does not support heap profiler"); - return Http::Code::NotImplemented; - } +ProtobufTypes::MessagePtr AdminImpl::dumpEndpointConfigs() const { + auto endpoint_config_dump = std::make_unique(); - Http::Utility::QueryParams query_params = Http::Utility::parseQueryString(url); - if (query_params.size() != 1 || query_params.begin()->first != "enable" || - (query_params.begin()->second != "y" && query_params.begin()->second != "n")) { - response.add("?enable=\n"); - return Http::Code::BadRequest; - } + for (const auto& [name, cluster_ref] : server_.clusterManager().clusters()) { + const Upstream::Cluster& cluster = cluster_ref.get(); + Upstream::ClusterInfoConstSharedPtr cluster_info = cluster.info(); + envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment; - Http::Code res = Http::Code::OK; - bool enable = query_params.begin()->second == "y"; - if (enable) { - if (Profiler::Heap::isProfilerStarted()) { - response.add("Fail to start heap profiler: already started"); - res = Http::Code::BadRequest; - } else if (!Profiler::Heap::startProfiler(profile_path_)) { - // GCOVR_EXCL_START - // TODO(silentdai) remove the GCOVR when startProfiler is better implemented - response.add("Fail to start the heap profiler"); - res = Http::Code::InternalServerError; - // GCOVR_EXCL_STOP - } else { - response.add("Starting heap profiler"); - res = Http::Code::OK; - } - } else { - // !enable - if (!Profiler::Heap::isProfilerStarted()) { - response.add("Fail to stop heap profiler: not started"); - res = Http::Code::BadRequest; + if (cluster_info->edsServiceName().has_value()) { + cluster_load_assignment.set_cluster_name(cluster_info->edsServiceName().value()); } else { - Profiler::Heap::stopProfiler(); - response.add( - fmt::format("Heap profiler stopped and data written to {}. See " - "http://goog-perftools.sourceforge.net/doc/heap_profiler.html for details.", - profile_path_)); - res = Http::Code::OK; + cluster_load_assignment.set_cluster_name(cluster_info->name()); } - } - return res; -} + auto& policy = *cluster_load_assignment.mutable_policy(); -Http::Code AdminImpl::handlerHealthcheckFail(absl::string_view, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&) { - server_.failHealthcheck(true); - response.add("OK\n"); - return Http::Code::OK; -} - -Http::Code AdminImpl::handlerHealthcheckOk(absl::string_view, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&) { - server_.failHealthcheck(false); - response.add("OK\n"); - return Http::Code::OK; -} - -Http::Code AdminImpl::handlerHotRestartVersion(absl::string_view, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&) { - response.add(server_.hotRestart().version()); - return Http::Code::OK; -} - -Http::Code AdminImpl::handlerLogging(absl::string_view url, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&) { - Http::Utility::QueryParams query_params = Http::Utility::parseQueryString(url); - - Http::Code rc = Http::Code::OK; - if (!query_params.empty() && !changeLogLevel(query_params)) { - response.add("usage: /logging?= (change single level)\n"); - response.add("usage: /logging?level= (change all levels)\n"); - response.add("levels: "); - for (auto level_string_view : spdlog::level::level_string_views) { - response.add(fmt::format("{} ", level_string_view)); - } - - response.add("\n"); - rc = Http::Code::NotFound; - } - - response.add("active loggers:\n"); - for (const Logger::Logger& logger : Logger::Registry::loggers()) { - response.add(fmt::format(" {}: {}\n", logger.name(), logger.levelString())); - } - - response.add("\n"); - return rc; -} - -// TODO(ambuc): Add more tcmalloc stats, export proto details based on allocator. -Http::Code AdminImpl::handlerMemory(absl::string_view, Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&) { - response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); - envoy::admin::v3::Memory memory; - memory.set_allocated(Memory::Stats::totalCurrentlyAllocated()); - memory.set_heap_size(Memory::Stats::totalCurrentlyReserved()); - memory.set_total_thread_cache(Memory::Stats::totalThreadCacheBytes()); - memory.set_pageheap_unmapped(Memory::Stats::totalPageHeapUnmapped()); - memory.set_pageheap_free(Memory::Stats::totalPageHeapFree()); - memory.set_total_physical_bytes(Memory::Stats::totalPhysicalBytes()); - response.add(MessageUtil::getJsonStringFromMessage(memory, true, true)); // pretty-print - return Http::Code::OK; -} - -Http::Code AdminImpl::handlerDrainListeners(absl::string_view url, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&) { - const Http::Utility::QueryParams params = Http::Utility::parseQueryString(url); - ListenerManager::StopListenersType stop_listeners_type = - params.find("inboundonly") != params.end() ? ListenerManager::StopListenersType::InboundOnly - : ListenerManager::StopListenersType::All; - server_.listenerManager().stopListeners(stop_listeners_type); - response.add("OK\n"); - return Http::Code::OK; -} - -Http::Code AdminImpl::handlerServerInfo(absl::string_view, Http::ResponseHeaderMap& headers, - Buffer::Instance& response, AdminStream&) { - const std::time_t current_time = - std::chrono::system_clock::to_time_t(server_.timeSource().systemTime()); - const std::time_t uptime_current_epoch = current_time - server_.startTimeCurrentEpoch(); - const std::time_t uptime_all_epochs = current_time - server_.startTimeFirstEpoch(); - - ASSERT(uptime_current_epoch >= 0); - ASSERT(uptime_all_epochs >= 0); - - envoy::admin::v3::ServerInfo server_info; - server_info.set_version(VersionInfo::version()); - server_info.set_hot_restart_version(server_.hotRestart().version()); - server_info.set_state( - Utility::serverState(server_.initManager().state(), server_.healthCheckFailed())); - - server_info.mutable_uptime_current_epoch()->set_seconds(uptime_current_epoch); - server_info.mutable_uptime_all_epochs()->set_seconds(uptime_all_epochs); - envoy::admin::v3::CommandLineOptions* command_line_options = - server_info.mutable_command_line_options(); - *command_line_options = *server_.options().toCommandLineOptions(); - response.add(MessageUtil::getJsonStringFromMessage(server_info, true, true)); - headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); - return Http::Code::OK; -} - -Http::Code AdminImpl::handlerReady(absl::string_view, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&) { - const envoy::admin::v3::ServerInfo::State state = - Utility::serverState(server_.initManager().state(), server_.healthCheckFailed()); - - response.add(envoy::admin::v3::ServerInfo::State_Name(state) + "\n"); - Http::Code code = - state == envoy::admin::v3::ServerInfo::LIVE ? Http::Code::OK : Http::Code::ServiceUnavailable; - return code; -} - -Http::Code AdminImpl::handlerQuitQuitQuit(absl::string_view, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&) { - server_.shutdown(); - response.add("OK\n"); - return Http::Code::OK; -} - -Http::Code AdminImpl::handlerListenerInfo(absl::string_view url, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&) { - const Http::Utility::QueryParams query_params = Http::Utility::parseQueryString(url); - const auto format_value = Utility::formatParam(query_params); - - if (format_value.has_value() && format_value.value() == "json") { - writeListenersAsJson(response); - response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); - } else { - writeListenersAsText(response); - } - return Http::Code::OK; -} - -Http::Code AdminImpl::handlerCerts(absl::string_view, Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&) { - // This set is used to track distinct certificates. We may have multiple listeners, upstreams, etc - // using the same cert. - response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); - envoy::admin::v3::Certificates certificates; - server_.sslContextManager().iterateContexts([&](const Ssl::Context& context) -> void { - envoy::admin::v3::Certificate& certificate = *certificates.add_certificates(); - if (context.getCaCertInformation() != nullptr) { - envoy::admin::v3::CertificateDetails* ca_certificate = certificate.add_ca_cert(); - *ca_certificate = *context.getCaCertInformation(); - } - for (const auto& cert_details : context.getCertChainInformation()) { - envoy::admin::v3::CertificateDetails* cert_chain = certificate.add_cert_chain(); - *cert_chain = *cert_details; - } - }); - response.add(MessageUtil::getJsonStringFromMessage(certificates, true, true)); - return Http::Code::OK; -} - -Http::Code AdminImpl::handlerRuntime(absl::string_view url, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&) { - const Http::Utility::QueryParams params = Http::Utility::parseQueryString(url); - response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); - - // TODO(jsedgwick): Use proto to structure this output instead of arbitrary JSON. - const auto& layers = server_.runtime().snapshot().getLayers(); - - std::vector layer_names; - layer_names.reserve(layers.size()); - std::map> entries; - for (const auto& layer : layers) { - layer_names.push_back(ValueUtil::stringValue(layer->name())); - for (const auto& value : layer->values()) { - const auto found = entries.find(value.first); - if (found == entries.end()) { - entries.emplace(value.first, std::vector{}); + for (auto& host_set : cluster.prioritySet().hostSetsPerPriority()) { + policy.mutable_overprovisioning_factor()->set_value(host_set->overprovisioningFactor()); + + if (!host_set->hostsPerLocality().get().empty()) { + for (int index = 0; index < static_cast(host_set->hostsPerLocality().get().size()); + index++) { + auto locality_host_set = host_set->hostsPerLocality().get()[index]; + + if (!locality_host_set.empty()) { + auto& locality_lb_endpoint = *cluster_load_assignment.mutable_endpoints()->Add(); + locality_lb_endpoint.mutable_locality()->MergeFrom(locality_host_set[0]->locality()); + locality_lb_endpoint.set_priority(locality_host_set[0]->priority()); + if (host_set->localityWeights() != nullptr && !host_set->localityWeights()->empty()) { + locality_lb_endpoint.mutable_load_balancing_weight()->set_value( + (*host_set->localityWeights())[index]); + } + + for (auto& host : locality_host_set) { + addLbEndpoint(host, locality_lb_endpoint); + } + } + } + } else { + for (auto& host : host_set->hosts()) { + auto& locality_lb_endpoint = *cluster_load_assignment.mutable_endpoints()->Add(); + locality_lb_endpoint.mutable_locality()->MergeFrom(host->locality()); + locality_lb_endpoint.set_priority(host->priority()); + addLbEndpoint(host, locality_lb_endpoint); + } } } - } - for (const auto& layer : layers) { - for (auto& entry : entries) { - const auto found = layer->values().find(entry.first); - const auto& entry_value = - found == layer->values().end() ? EMPTY_STRING : found->second.raw_string_value_; - entry.second.push_back(entry_value); - } - } - - ProtobufWkt::Struct layer_entries; - auto* layer_entry_fields = layer_entries.mutable_fields(); - for (const auto& entry : entries) { - std::vector layer_entry_values; - layer_entry_values.reserve(entry.second.size()); - std::string final_value; - for (const auto& value : entry.second) { - if (!value.empty()) { - final_value = value; - } - layer_entry_values.push_back(ValueUtil::stringValue(value)); + if (cluster_info->addedViaApi()) { + auto& dynamic_endpoint = *endpoint_config_dump->mutable_dynamic_endpoint_configs()->Add(); + dynamic_endpoint.mutable_endpoint_config()->PackFrom(cluster_load_assignment); + } else { + auto& static_endpoint = *endpoint_config_dump->mutable_static_endpoint_configs()->Add(); + static_endpoint.mutable_endpoint_config()->PackFrom(cluster_load_assignment); } - - ProtobufWkt::Struct layer_entry_value; - auto* layer_entry_value_fields = layer_entry_value.mutable_fields(); - - (*layer_entry_value_fields)["final_value"] = ValueUtil::stringValue(final_value); - (*layer_entry_value_fields)["layer_values"] = ValueUtil::listValue(layer_entry_values); - (*layer_entry_fields)[entry.first] = ValueUtil::structValue(layer_entry_value); } - - ProtobufWkt::Struct runtime; - auto* fields = runtime.mutable_fields(); - - (*fields)["layers"] = ValueUtil::listValue(layer_names); - (*fields)["entries"] = ValueUtil::structValue(layer_entries); - - response.add(MessageUtil::getJsonStringFromMessage(runtime, true, true)); - return Http::Code::OK; + return endpoint_config_dump; } -bool AdminImpl::isFormUrlEncoded(const Http::HeaderEntry* content_type) const { - if (content_type == nullptr) { - return false; - } - - return content_type->value().getStringView() == - Http::Headers::get().ContentTypeValues.FormUrlEncoded; -} +Http::Code AdminImpl::handlerConfigDump(absl::string_view url, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&) const { + Http::Utility::QueryParams query_params = Http::Utility::parseAndDecodeQueryString(url); + const auto resource = resourceParam(query_params); + const auto mask = maskParam(query_params); + const bool include_eds = shouldIncludeEdsInDump(query_params); -Http::Code AdminImpl::handlerRuntimeModify(absl::string_view url, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream& admin_stream) { - Http::Utility::QueryParams params = Http::Utility::parseQueryString(url); - if (params.empty()) { - // Check if the params are in the request's body. - if (admin_stream.getRequestBody() != nullptr && - isFormUrlEncoded(admin_stream.getRequestHeaders().ContentType())) { - params = Http::Utility::parseFromBody(admin_stream.getRequestBody()->toString()); - } + envoy::admin::v3::ConfigDump dump; - if (params.empty()) { - response.add("usage: /runtime_modify?key1=value1&key2=value2&keyN=valueN\n"); - response.add(" or send the parameters as form values\n"); - response.add("use an empty value to remove a previously added override"); - return Http::Code::BadRequest; + if (resource.has_value()) { + auto err = addResourceToDump(dump, mask, resource.value(), include_eds); + if (err.has_value()) { + response.add(err.value().second); + return err.value().first; } + } else { + addAllConfigToDump(dump, mask, include_eds); } - std::unordered_map overrides; - overrides.insert(params.begin(), params.end()); - try { - server_.runtime().mergeValues(overrides); - } catch (const EnvoyException& e) { - response.add(e.what()); - return Http::Code::ServiceUnavailable; - } - response.add("OK\n"); - return Http::Code::OK; -} + MessageUtil::redact(dump); -Http::Code AdminImpl::handlerReopenLogs(absl::string_view, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&) { - server_.accessLogManager().reopen(); - response.add("OK\n"); + response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); + response.add(MessageUtil::getJsonStringFromMessage(dump, true)); // pretty-print return Http::Code::OK; } @@ -968,8 +654,9 @@ void AdminImpl::startHttpListener(const std::string& access_log_path, // TODO(mattklein123): Allow admin to use normal access logger extension loading and avoid the // hard dependency here. access_logs_.emplace_back(new Extensions::AccessLoggers::File::FileAccessLog( - access_log_path, {}, AccessLog::AccessLogFormatUtils::defaultAccessLogFormatter(), + access_log_path, {}, Formatter::SubstitutionFormatUtils::defaultSubstitutionFormatter(), server_.accessLogManager())); + null_overload_manager_.start(); socket_ = std::make_shared(address, socket_options, true); socket_factory_ = std::make_shared(socket_); listener_ = std::make_unique(*this, std::move(listener_scope)); @@ -989,73 +676,81 @@ AdminImpl::AdminImpl(const std::string& profile_path, Server::Instance& server) request_id_extension_(Http::RequestIDExtensionFactory::defaultInstance(server_.random())), profile_path_(profile_path), stats_(Http::ConnectionManagerImpl::generateStats("http.admin.", server_.stats())), + null_overload_manager_(server_.threadLocal()), tracing_stats_( Http::ConnectionManagerImpl::generateTracingStats("http.admin.", no_op_store_)), route_config_provider_(server.timeSource()), - scoped_route_config_provider_(server.timeSource()), + scoped_route_config_provider_(server.timeSource()), stats_handler_(server), + logs_handler_(server), profiling_handler_(profile_path), runtime_handler_(server), + listeners_handler_(server), server_cmd_handler_(server), server_info_handler_(server), // TODO(jsedgwick) add /runtime_reset endpoint that removes all admin-set values handlers_{ {"/", "Admin home page", MAKE_ADMIN_HANDLER(handlerAdminHome), false, false}, - {"/certs", "print certs on machine", MAKE_ADMIN_HANDLER(handlerCerts), false, false}, + {"/certs", "print certs on machine", + MAKE_ADMIN_HANDLER(server_info_handler_.handlerCerts), false, false}, {"/clusters", "upstream cluster status", MAKE_ADMIN_HANDLER(handlerClusters), false, false}, {"/config_dump", "dump current Envoy configs (experimental)", MAKE_ADMIN_HANDLER(handlerConfigDump), false, false}, {"/contention", "dump current Envoy mutex contention stats (if enabled)", - MAKE_ADMIN_HANDLER(handlerContention), false, false}, + MAKE_ADMIN_HANDLER(stats_handler_.handlerContention), false, false}, {"/cpuprofiler", "enable/disable the CPU profiler", - MAKE_ADMIN_HANDLER(handlerCpuProfiler), false, true}, + MAKE_ADMIN_HANDLER(profiling_handler_.handlerCpuProfiler), false, true}, {"/heapprofiler", "enable/disable the heap profiler", - MAKE_ADMIN_HANDLER(handlerHeapProfiler), false, true}, + MAKE_ADMIN_HANDLER(profiling_handler_.handlerHeapProfiler), false, true}, {"/healthcheck/fail", "cause the server to fail health checks", - MAKE_ADMIN_HANDLER(handlerHealthcheckFail), false, true}, + MAKE_ADMIN_HANDLER(server_cmd_handler_.handlerHealthcheckFail), false, true}, {"/healthcheck/ok", "cause the server to pass health checks", - MAKE_ADMIN_HANDLER(handlerHealthcheckOk), false, true}, + MAKE_ADMIN_HANDLER(server_cmd_handler_.handlerHealthcheckOk), false, true}, {"/help", "print out list of admin commands", MAKE_ADMIN_HANDLER(handlerHelp), false, false}, {"/hot_restart_version", "print the hot restart compatibility version", - MAKE_ADMIN_HANDLER(handlerHotRestartVersion), false, false}, - {"/logging", "query/change logging levels", MAKE_ADMIN_HANDLER(handlerLogging), false, - true}, - {"/memory", "print current allocation/heap usage", MAKE_ADMIN_HANDLER(handlerMemory), - false, false}, - {"/quitquitquit", "exit the server", MAKE_ADMIN_HANDLER(handlerQuitQuitQuit), false, - true}, - {"/reset_counters", "reset all counters to zero", StatsHandler::handlerResetCounters, - false, true}, - {"/drain_listeners", "drain listeners", MAKE_ADMIN_HANDLER(handlerDrainListeners), false, - true}, + MAKE_ADMIN_HANDLER(server_info_handler_.handlerHotRestartVersion), false, false}, + {"/logging", "query/change logging levels", + MAKE_ADMIN_HANDLER(logs_handler_.handlerLogging), false, true}, + {"/memory", "print current allocation/heap usage", + MAKE_ADMIN_HANDLER(server_info_handler_.handlerMemory), false, false}, + {"/quitquitquit", "exit the server", + MAKE_ADMIN_HANDLER(server_cmd_handler_.handlerQuitQuitQuit), false, true}, + {"/reset_counters", "reset all counters to zero", + MAKE_ADMIN_HANDLER(stats_handler_.handlerResetCounters), false, true}, + {"/drain_listeners", "drain listeners", + MAKE_ADMIN_HANDLER(listeners_handler_.handlerDrainListeners), false, true}, {"/server_info", "print server version/status information", - MAKE_ADMIN_HANDLER(handlerServerInfo), false, false}, + MAKE_ADMIN_HANDLER(server_info_handler_.handlerServerInfo), false, false}, {"/ready", "print server state, return 200 if LIVE, otherwise return 503", - MAKE_ADMIN_HANDLER(handlerReady), false, false}, - {"/stats", "print server stats", StatsHandler::handlerStats, false, false}, + MAKE_ADMIN_HANDLER(server_info_handler_.handlerReady), false, false}, + {"/stats", "print server stats", MAKE_ADMIN_HANDLER(stats_handler_.handlerStats), false, + false}, {"/stats/prometheus", "print server stats in prometheus format", - StatsHandler::handlerPrometheusStats, false, false}, + MAKE_ADMIN_HANDLER(stats_handler_.handlerPrometheusStats), false, false}, {"/stats/recentlookups", "Show recent stat-name lookups", - StatsHandler::handlerStatsRecentLookups, false, false}, + MAKE_ADMIN_HANDLER(stats_handler_.handlerStatsRecentLookups), false, false}, {"/stats/recentlookups/clear", "clear list of stat-name lookups and counter", - StatsHandler::handlerStatsRecentLookupsClear, false, true}, + MAKE_ADMIN_HANDLER(stats_handler_.handlerStatsRecentLookupsClear), false, true}, {"/stats/recentlookups/disable", "disable recording of reset stat-name lookup names", - StatsHandler::handlerStatsRecentLookupsDisable, false, true}, + MAKE_ADMIN_HANDLER(stats_handler_.handlerStatsRecentLookupsDisable), false, true}, {"/stats/recentlookups/enable", "enable recording of reset stat-name lookup names", - StatsHandler::handlerStatsRecentLookupsEnable, false, true}, - {"/listeners", "print listener info", MAKE_ADMIN_HANDLER(handlerListenerInfo), false, - false}, - {"/runtime", "print runtime values", MAKE_ADMIN_HANDLER(handlerRuntime), false, false}, - {"/runtime_modify", "modify runtime values", MAKE_ADMIN_HANDLER(handlerRuntimeModify), - false, true}, - {"/reopen_logs", "reopen access logs", MAKE_ADMIN_HANDLER(handlerReopenLogs), false, - true}, + MAKE_ADMIN_HANDLER(stats_handler_.handlerStatsRecentLookupsEnable), false, true}, + {"/listeners", "print listener info", + MAKE_ADMIN_HANDLER(listeners_handler_.handlerListenerInfo), false, false}, + {"/runtime", "print runtime values", MAKE_ADMIN_HANDLER(runtime_handler_.handlerRuntime), + false, false}, + {"/runtime_modify", "modify runtime values", + MAKE_ADMIN_HANDLER(runtime_handler_.handlerRuntimeModify), false, true}, + {"/reopen_logs", "reopen access logs", + MAKE_ADMIN_HANDLER(logs_handler_.handlerReopenLogs), false, true}, }, date_provider_(server.dispatcher().timeSource()), - admin_filter_chain_(std::make_shared()) {} + admin_filter_chain_(std::make_shared()), + local_reply_(LocalReply::Factory::createDefault()) {} Http::ServerConnectionPtr AdminImpl::createCodec(Network::Connection& connection, const Buffer::Instance& data, Http::ServerConnectionCallbacks& callbacks) { return Http::ConnectionManagerUtility::autoCreateCodec( - connection, data, callbacks, server_.stats(), Http::Http1Settings(), + connection, data, callbacks, server_.stats(), http1_codec_stats_, http2_codec_stats_, + Http::Http1Settings(), ::Envoy::Http2::Utility::initializeAndValidateOptions( envoy::config::core::v3::Http2ProtocolOptions()), maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction()); @@ -1063,11 +758,12 @@ Http::ServerConnectionPtr AdminImpl::createCodec(Network::Connection& connection bool AdminImpl::createNetworkFilterChain(Network::Connection& connection, const std::vector&) { - // Don't pass in the overload manager so that the admin interface is accessible even when - // the envoy is overloaded. + // Pass in the null overload manager so that the admin interface is accessible even when Envoy is + // overloaded. connection.addReadFilter(Network::ReadFilterSharedPtr{new Http::ConnectionManagerImpl( *this, server_.drainManager(), server_.random(), server_.httpContext(), server_.runtime(), - server_.localInfo(), server_.clusterManager(), nullptr, server_.timeSource())}); + server_.localInfo(), server_.clusterManager(), null_overload_manager_, + server_.timeSource())}); return true; } @@ -1091,8 +787,7 @@ Http::Code AdminImpl::runCallback(absl::string_view path_and_query, if (path_and_query.compare(0, query_index, handler.prefix_) == 0) { found_handler = true; if (handler.mutates_server_state_) { - const absl::string_view method = - admin_stream.getRequestHeaders().Method()->value().getStringView(); + const absl::string_view method = admin_stream.getRequestHeaders().getMethodValue(); if (method != Http::Headers::get().MethodValues.Post) { ENVOY_LOG(error, "admin path \"{}\" mutates state, method={} rather than POST", handler.prefix_, method); @@ -1101,12 +796,7 @@ Http::Code AdminImpl::runCallback(absl::string_view path_and_query, break; } } - if (handler.requires_server_) { - code = handler.handler_with_server_(path_and_query, response_headers, response, - admin_stream, server_); - } else { - code = handler.handler_(path_and_query, response_headers, response, admin_stream); - } + code = handler.handler_(path_and_query, response_headers, response, admin_stream); Memory::Utils::tryShrinkHeap(); break; } @@ -1234,9 +924,9 @@ Http::Code AdminImpl::request(absl::string_view path_and_query, absl::string_vie Http::ResponseHeaderMap& response_headers, std::string& body) { AdminFilter filter(createCallbackFunction()); - Http::RequestHeaderMapImpl request_headers; - request_headers.setMethod(method); - filter.decodeHeaders(request_headers, false); + auto request_headers = Http::RequestHeaderMapImpl::create(); + request_headers->setMethod(method); + filter.decodeHeaders(*request_headers, false); Buffer::OwnedImpl response; Http::Code code = runCallback(path_and_query, response_headers, response, filter); diff --git a/source/server/http/admin.h b/source/server/admin/admin.h similarity index 75% rename from source/server/http/admin.h rename to source/server/admin/admin.h index 19dada14018ff..e3c66660fdc59 100644 --- a/source/server/http/admin.h +++ b/source/server/admin/admin.h @@ -4,7 +4,6 @@ #include #include #include -#include #include #include @@ -17,19 +16,23 @@ #include "envoy/http/request_id_extension.h" #include "envoy/network/filter.h" #include "envoy/network/listen_socket.h" -#include "envoy/runtime/runtime.h" #include "envoy/server/admin.h" #include "envoy/server/instance.h" #include "envoy/server/listener_manager.h" +#include "envoy/server/overload_manager.h" #include "envoy/upstream/outlier_detection.h" #include "envoy/upstream/resource_manager.h" +#include "common/common/assert.h" +#include "common/common/basic_resource_impl.h" #include "common/common/empty_string.h" #include "common/common/logger.h" #include "common/common/macros.h" #include "common/http/conn_manager_impl.h" #include "common/http/date_provider_impl.h" #include "common/http/default_server_string.h" +#include "common/http/http1/codec_stats.h" +#include "common/http/http2/codec_stats.h" #include "common/http/request_id_extension_impl.h" #include "common/http/utility.h" #include "common/network/connection_balancer_impl.h" @@ -37,8 +40,15 @@ #include "common/router/scoped_config_impl.h" #include "common/stats/isolated_store_impl.h" -#include "server/http/admin_filter.h" -#include "server/http/config_tracker_impl.h" +#include "server/admin/admin_filter.h" +#include "server/admin/config_tracker_impl.h" +#include "server/admin/listeners_handler.h" +#include "server/admin/logs_handler.h" +#include "server/admin/profiling_handler.h" +#include "server/admin/runtime_handler.h" +#include "server/admin/server_cmd_handler.h" +#include "server/admin/server_info_handler.h" +#include "server/admin/stats_handler.h" #include "extensions/filters/http/common/pass_through_filter.h" @@ -67,7 +77,7 @@ class AdminImpl : public Admin, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream& admin_stream); const Network::Socket& socket() override { return *socket_; } - Network::Socket& mutable_socket() { return *socket_; } + Network::Socket& mutableSocket() { return *socket_; } // Server::Admin // TODO(jsedgwick) These can be managed with a generic version of ConfigTracker. @@ -83,6 +93,7 @@ class AdminImpl : public Admin, Network::Address::InstanceConstSharedPtr address, const Network::Socket::OptionsSharedPtr& socket_options, Stats::ScopePtr&& listener_scope) override; + uint32_t concurrency() const override { return server_.options().concurrency(); } // Network::FilterChainManager const Network::FilterChain* findFilterChain(const Network::ConnectionSocket&) const override { @@ -115,6 +126,7 @@ class AdminImpl : public Admin, Http::FilterChainFactory& filterFactory() override { return *this; } bool generateRequestId() const override { return false; } bool preserveExternalRequestId() const override { return false; } + bool alwaysSetRequestIdInResponse() const override { return false; } absl::optional idleTimeout() const override { return idle_timeout_; } bool isRoutable() const override { return false; } absl::optional maxConnectionDuration() const override { @@ -158,13 +170,16 @@ class AdminImpl : public Admin, const Http::TracingConnectionManagerConfig* tracingConfig() override { return nullptr; } Http::ConnectionManagerListenerStats& listenerStats() override { return listener_->stats_; } bool proxy100Continue() const override { return false; } + bool streamErrorOnInvalidHttpMessaging() const override { return false; } const Http::Http1Settings& http1Settings() const override { return http1_settings_; } bool shouldNormalizePath() const override { return true; } bool shouldMergeSlashes() const override { return true; } + bool shouldStripMatchingPort() const override { return false; } envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headersWithUnderscoresAction() const override { return envoy::config::core::v3::HttpProtocolOptions::ALLOW; } + const LocalReply::LocalReply& localReply() const override { return *local_reply_; } Http::Code request(absl::string_view path_and_query, absl::string_view method, Http::ResponseHeaderMap& response_headers, std::string& body) override; void closeSocket(); @@ -178,33 +193,16 @@ class AdminImpl : public Admin, }; } - using HandlerWithServerCb = std::function; - private: /** * Individual admin handler including prefix, help text, and callback. */ struct UrlHandler { - UrlHandler(std::string prefix, std::string help_text, HandlerCb handler, bool removable, - bool mutates_server_state) - : prefix_(prefix), help_text_(help_text), handler_(handler), removable_(removable), - mutates_server_state_(mutates_server_state), requires_server_(false) {} - - UrlHandler(std::string prefix, std::string help_text, HandlerWithServerCb handler_with_server, - bool removable, bool mutates_server_state) - : prefix_(prefix), help_text_(help_text), handler_with_server_(handler_with_server), - removable_(removable), mutates_server_state_(mutates_server_state), - requires_server_(true) {} - const std::string prefix_; const std::string help_text_; const HandlerCb handler_; - const HandlerWithServerCb handler_with_server_; const bool removable_; const bool mutates_server_state_; - const bool requires_server_; }; /** @@ -251,11 +249,37 @@ class AdminImpl : public Admin, }; /** - * Attempt to change the log level of a logger or all loggers - * @param params supplies the incoming endpoint query params. - * @return TRUE if level change succeeded, FALSE otherwise. + * Implementation of OverloadManager that is never overloaded. Using this instead of the real + * OverloadManager keeps the admin interface accessible even when the proxy is overloaded. */ - bool changeLogLevel(const Http::Utility::QueryParams& params); + struct NullOverloadManager : public OverloadManager { + struct NullThreadLocalOverloadState : public ThreadLocalOverloadState { + const OverloadActionState& getState(const std::string&) override { return inactive_; } + + const OverloadActionState inactive_ = OverloadActionState::Inactive; + }; + + NullOverloadManager(ThreadLocal::SlotAllocator& slot_allocator) + : tls_(slot_allocator.allocateSlot()) {} + + void start() override { + tls_->set([](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr { + return std::make_shared(); + }); + } + + ThreadLocalOverloadState& getThreadLocalOverloadState() override { + return tls_->getTyped(); + } + + bool registerForAction(const std::string&, Event::Dispatcher&, OverloadActionCb) override { + // This method shouldn't be called by the admin listener + NOT_REACHED_GCOVR_EXCL_LINE; + return false; + } + + ThreadLocal::SlotPtr tls_; + }; /** * Helper methods for the /clusters url handler. @@ -268,17 +292,11 @@ class AdminImpl : public Admin, void writeClustersAsJson(Buffer::Instance& response); void writeClustersAsText(Buffer::Instance& response); - /** - * Helper methods for the /listeners url handler. - */ - void writeListenersAsJson(Buffer::Instance& response); - void writeListenersAsText(Buffer::Instance& response); - /** * Helper methods for the /config_dump url handler. */ void addAllConfigToDump(envoy::admin::v3::ConfigDump& dump, - const absl::optional& mask) const; + const absl::optional& mask, bool include_eds) const; /** * Add the config matching the passed resource to the passed config dump. * @return absl::nullopt on success, else the Http::Code and an error message that should be added @@ -286,85 +304,39 @@ class AdminImpl : public Admin, */ absl::optional> addResourceToDump(envoy::admin::v3::ConfigDump& dump, const absl::optional& mask, - const std::string& resource) const; + const std::string& resource, bool include_eds) const; std::vector sortedHandlers() const; envoy::admin::v3::ServerInfo::State serverState(); + + /** + * Helper methods for the /config_dump url handler to add endpoints config + */ + void addLbEndpoint(const Upstream::HostSharedPtr& host, + envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoint) const; + ProtobufTypes::MessagePtr dumpEndpointConfigs() const; /** * URL handlers. */ Http::Code handlerAdminHome(absl::string_view path_and_query, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream&); - Http::Code handlerCerts(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, - AdminStream&); Http::Code handlerClusters(absl::string_view path_and_query, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream&); Http::Code handlerConfigDump(absl::string_view path_and_query, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream&) const; - Http::Code handlerContention(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&); - Http::Code handlerCpuProfiler(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&); - Http::Code handlerHeapProfiler(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&); - Http::Code handlerHealthcheckFail(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&); - Http::Code handlerHealthcheckOk(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&); Http::Code handlerHelp(absl::string_view path_and_query, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream&); - Http::Code handlerHotRestartVersion(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&); - Http::Code handlerListenerInfo(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&); - Http::Code handlerLogging(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, - AdminStream&); - Http::Code handlerMemory(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, - AdminStream&); - Http::Code handlerMain(const std::string& path, Buffer::Instance& response, AdminStream&); - Http::Code handlerQuitQuitQuit(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&); - Http::Code handlerDrainListeners(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&); - Http::Code handlerServerInfo(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&); - Http::Code handlerReady(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, - AdminStream&); - Http::Code handlerRuntime(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, - AdminStream&); - Http::Code handlerRuntimeModify(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&); - Http::Code handlerReopenLogs(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&); - bool isFormUrlEncoded(const Http::HeaderEntry* content_type) const; class AdminListenSocketFactory : public Network::ListenSocketFactory { public: AdminListenSocketFactory(Network::SocketSharedPtr socket) : socket_(socket) {} // Network::ListenSocketFactory - Network::Address::SocketType socketType() const override { return socket_->socketType(); } + Network::Socket::Type socketType() const override { return socket_->socketType(); } const Network::Address::InstanceConstSharedPtr& localAddress() const override { return socket_->localAddress(); @@ -407,10 +379,14 @@ class AdminImpl : public Admin, Network::ActiveUdpListenerFactory* udpListenerFactory() override { NOT_REACHED_GCOVR_EXCL_LINE; } + Network::UdpPacketWriterFactoryOptRef udpPacketWriterFactory() override { + NOT_REACHED_GCOVR_EXCL_LINE; + } envoy::config::core::v3::TrafficDirection direction() const override { return envoy::config::core::v3::UNSPECIFIED; } Network::ConnectionBalancer& connectionBalancer() override { return connection_balancer_; } + ResourceLimit& openConnections() override { return open_connections_; } const std::vector& accessLogs() const override { return empty_access_logs_; } @@ -420,6 +396,7 @@ class AdminImpl : public Admin, Stats::ScopePtr scope_; Http::ConnectionManagerListenerStats stats_; Network::NopConnectionBalancerImpl connection_balancer_; + BasicResourceLimitImpl open_connections_; private: const std::vector empty_access_logs_; @@ -451,12 +428,20 @@ class AdminImpl : public Admin, std::list access_logs_; const std::string profile_path_; Http::ConnectionManagerStats stats_; + NullOverloadManager null_overload_manager_; // Note: this is here to essentially blackhole the tracing stats since they aren't used in the // Admin case. Stats::IsolatedStoreImpl no_op_store_; Http::ConnectionManagerTracingStats tracing_stats_; NullRouteConfigProvider route_config_provider_; NullScopedRouteConfigProvider scoped_route_config_provider_; + Server::StatsHandler stats_handler_; + Server::LogsHandler logs_handler_; + Server::ProfilingHandler profiling_handler_; + Server::RuntimeHandler runtime_handler_; + Server::ListenersHandler listeners_handler_; + Server::ServerCmdHandler server_cmd_handler_; + Server::ServerInfoHandler server_info_handler_; std::list handlers_; const uint32_t max_request_headers_kb_{Http::DEFAULT_MAX_REQUEST_HEADERS_KB}; const uint32_t max_request_headers_count_{Http::DEFAULT_MAX_HEADERS_COUNT}; @@ -467,12 +452,15 @@ class AdminImpl : public Admin, Http::SlowDateProviderImpl date_provider_; std::vector set_current_client_cert_details_; Http::Http1Settings http1_settings_; + Http::Http1::CodecStats::AtomicPtr http1_codec_stats_; + Http::Http2::CodecStats::AtomicPtr http2_codec_stats_; ConfigTrackerImpl config_tracker_; const Network::FilterChainSharedPtr admin_filter_chain_; Network::SocketSharedPtr socket_; Network::ListenSocketFactorySharedPtr socket_factory_; AdminListenerPtr listener_; const AdminInternalAddressConfig internal_address_config_; + const LocalReply::LocalReplyPtr local_reply_; }; } // namespace Server diff --git a/source/server/http/admin_filter.cc b/source/server/admin/admin_filter.cc similarity index 88% rename from source/server/http/admin_filter.cc rename to source/server/admin/admin_filter.cc index 7f9cf39309747..d2b70fa36a6dd 100644 --- a/source/server/http/admin_filter.cc +++ b/source/server/admin/admin_filter.cc @@ -1,6 +1,6 @@ -#include "server/http/admin_filter.h" +#include "server/admin/admin_filter.h" -#include "server/http/utils.h" +#include "server/admin/utils.h" namespace Envoy { namespace Server { @@ -62,14 +62,16 @@ const Http::RequestHeaderMap& AdminFilter::getRequestHeaders() const { } void AdminFilter::onComplete() { - absl::string_view path = request_headers_->Path()->value().getStringView(); + const absl::string_view path = request_headers_->getPathValue(); ENVOY_STREAM_LOG(debug, "request complete: path: {}", *decoder_callbacks_, path); Buffer::OwnedImpl response; - Http::ResponseHeaderMapPtr header_map{new Http::ResponseHeaderMapImpl}; + auto header_map = Http::ResponseHeaderMapImpl::create(); RELEASE_ASSERT(request_headers_, ""); Http::Code code = admin_server_callback_func_(path, *header_map, response, *this); Utility::populateFallbackResponseHeaders(code, *header_map); + decoder_callbacks_->streamInfo().setResponseCodeDetails( + StreamInfo::ResponseCodeDetails::get().AdminFilterResponse); decoder_callbacks_->encodeHeaders(std::move(header_map), end_stream_on_complete_ && response.length() == 0); diff --git a/source/server/http/admin_filter.h b/source/server/admin/admin_filter.h similarity index 100% rename from source/server/http/admin_filter.h rename to source/server/admin/admin_filter.h diff --git a/source/server/http/config_tracker_impl.cc b/source/server/admin/config_tracker_impl.cc similarity index 94% rename from source/server/http/config_tracker_impl.cc rename to source/server/admin/config_tracker_impl.cc index 252bd24a0f555..da1bc875a6ee3 100644 --- a/source/server/http/config_tracker_impl.cc +++ b/source/server/admin/config_tracker_impl.cc @@ -1,4 +1,4 @@ -#include "server/http/config_tracker_impl.h" +#include "server/admin/config_tracker_impl.h" namespace Envoy { namespace Server { diff --git a/source/server/http/config_tracker_impl.h b/source/server/admin/config_tracker_impl.h similarity index 100% rename from source/server/http/config_tracker_impl.h rename to source/server/admin/config_tracker_impl.h diff --git a/source/server/admin/handler_ctx.h b/source/server/admin/handler_ctx.h new file mode 100644 index 0000000000000..6ac5098213dec --- /dev/null +++ b/source/server/admin/handler_ctx.h @@ -0,0 +1,17 @@ +#pragma once + +#include "envoy/server/instance.h" + +namespace Envoy { +namespace Server { + +class HandlerContextBase { +public: + HandlerContextBase(Server::Instance& server) : server_(server) {} + +protected: + Server::Instance& server_; +}; + +} // namespace Server +} // namespace Envoy diff --git a/source/server/admin/listeners_handler.cc b/source/server/admin/listeners_handler.cc new file mode 100644 index 0000000000000..93407d9eb6cc8 --- /dev/null +++ b/source/server/admin/listeners_handler.cc @@ -0,0 +1,75 @@ +#include "server/admin/listeners_handler.h" + +#include "envoy/admin/v3/listeners.pb.h" + +#include "common/http/headers.h" +#include "common/http/utility.h" +#include "common/network/utility.h" + +#include "server/admin/utils.h" + +namespace Envoy { +namespace Server { + +ListenersHandler::ListenersHandler(Server::Instance& server) : HandlerContextBase(server) {} + +Http::Code ListenersHandler::handlerDrainListeners(absl::string_view url, Http::ResponseHeaderMap&, + Buffer::Instance& response, AdminStream&) { + const Http::Utility::QueryParams params = Http::Utility::parseQueryString(url); + + ListenerManager::StopListenersType stop_listeners_type = + params.find("inboundonly") != params.end() ? ListenerManager::StopListenersType::InboundOnly + : ListenerManager::StopListenersType::All; + + const bool graceful = params.find("graceful") != params.end(); + if (graceful) { + // Ignore calls to /drain_listeners?graceful if the drain sequence has + // already started. + if (!server_.drainManager().draining()) { + server_.drainManager().startDrainSequence([this, stop_listeners_type]() { + server_.listenerManager().stopListeners(stop_listeners_type); + }); + } + } else { + server_.listenerManager().stopListeners(stop_listeners_type); + } + + response.add("OK\n"); + return Http::Code::OK; +} + +Http::Code ListenersHandler::handlerListenerInfo(absl::string_view url, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&) { + const Http::Utility::QueryParams query_params = Http::Utility::parseQueryString(url); + const auto format_value = Utility::formatParam(query_params); + + if (format_value.has_value() && format_value.value() == "json") { + writeListenersAsJson(response); + response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); + } else { + writeListenersAsText(response); + } + return Http::Code::OK; +} + +void ListenersHandler::writeListenersAsJson(Buffer::Instance& response) { + envoy::admin::v3::Listeners listeners; + for (const auto& listener : server_.listenerManager().listeners()) { + envoy::admin::v3::ListenerStatus& listener_status = *listeners.add_listener_statuses(); + listener_status.set_name(listener.get().name()); + Network::Utility::addressToProtobufAddress(*listener.get().listenSocketFactory().localAddress(), + *listener_status.mutable_local_address()); + } + response.add(MessageUtil::getJsonStringFromMessage(listeners, true)); // pretty-print +} + +void ListenersHandler::writeListenersAsText(Buffer::Instance& response) { + for (const auto& listener : server_.listenerManager().listeners()) { + response.add(fmt::format("{}::{}\n", listener.get().name(), + listener.get().listenSocketFactory().localAddress()->asString())); + } +} + +} // namespace Server +} // namespace Envoy diff --git a/source/server/admin/listeners_handler.h b/source/server/admin/listeners_handler.h new file mode 100644 index 0000000000000..bf48f86419e58 --- /dev/null +++ b/source/server/admin/listeners_handler.h @@ -0,0 +1,38 @@ +#pragma once + +#include "envoy/buffer/buffer.h" +#include "envoy/http/codes.h" +#include "envoy/http/header_map.h" +#include "envoy/server/admin.h" +#include "envoy/server/instance.h" + +#include "server/admin/handler_ctx.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Server { + +class ListenersHandler : public HandlerContextBase { + +public: + ListenersHandler(Server::Instance& server); + + Http::Code handlerDrainListeners(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + + Http::Code handlerListenerInfo(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + +private: + /** + * Helper methods for the /listeners url handler. + */ + void writeListenersAsJson(Buffer::Instance& response); + void writeListenersAsText(Buffer::Instance& response); +}; + +} // namespace Server +} // namespace Envoy diff --git a/source/server/admin/logs_handler.cc b/source/server/admin/logs_handler.cc new file mode 100644 index 0000000000000..57b0fbdfca2fe --- /dev/null +++ b/source/server/admin/logs_handler.cc @@ -0,0 +1,95 @@ +#include "server/admin/logs_handler.h" + +#include + +#include "common/common/logger.h" + +#include "server/admin/utils.h" + +namespace Envoy { +namespace Server { + +LogsHandler::LogsHandler(Server::Instance& server) : HandlerContextBase(server) {} + +Http::Code LogsHandler::handlerLogging(absl::string_view url, Http::ResponseHeaderMap&, + Buffer::Instance& response, AdminStream&) { + Http::Utility::QueryParams query_params = Http::Utility::parseQueryString(url); + + Http::Code rc = Http::Code::OK; + if (!query_params.empty() && !changeLogLevel(query_params)) { + response.add("usage: /logging?= (change single level)\n"); + response.add("usage: /logging?level= (change all levels)\n"); + response.add("levels: "); + for (auto level_string_view : spdlog::level::level_string_views) { + response.add(fmt::format("{} ", level_string_view)); + } + + response.add("\n"); + rc = Http::Code::NotFound; + } + + response.add("active loggers:\n"); + for (const Logger::Logger& logger : Logger::Registry::loggers()) { + response.add(fmt::format(" {}: {}\n", logger.name(), logger.levelString())); + } + + response.add("\n"); + return rc; +} + +Http::Code LogsHandler::handlerReopenLogs(absl::string_view, Http::ResponseHeaderMap&, + Buffer::Instance& response, AdminStream&) { + server_.accessLogManager().reopen(); + response.add("OK\n"); + return Http::Code::OK; +} + +bool LogsHandler::changeLogLevel(const Http::Utility::QueryParams& params) { + if (params.size() != 1) { + return false; + } + + std::string name = params.begin()->first; + std::string level = params.begin()->second; + + // First see if the level is valid. + size_t level_to_use = std::numeric_limits::max(); + for (size_t i = 0; i < ARRAY_SIZE(spdlog::level::level_string_views); i++) { + if (level == spdlog::level::level_string_views[i]) { + level_to_use = i; + break; + } + } + + if (level_to_use == std::numeric_limits::max()) { + return false; + } + + // Now either change all levels or a single level. + if (name == "level") { + ENVOY_LOG(debug, "change all log levels: level='{}'", level); + for (Logger::Logger& logger : Logger::Registry::loggers()) { + logger.setLevel(static_cast(level_to_use)); + } + } else { + ENVOY_LOG(debug, "change log level: name='{}' level='{}'", name, level); + Logger::Logger* logger_to_change = nullptr; + for (Logger::Logger& logger : Logger::Registry::loggers()) { + if (logger.name() == name) { + logger_to_change = &logger; + break; + } + } + + if (!logger_to_change) { + return false; + } + + logger_to_change->setLevel(static_cast(level_to_use)); + } + + return true; +} + +} // namespace Server +} // namespace Envoy diff --git a/source/server/admin/logs_handler.h b/source/server/admin/logs_handler.h new file mode 100644 index 0000000000000..1eea995d88ba2 --- /dev/null +++ b/source/server/admin/logs_handler.h @@ -0,0 +1,39 @@ +#pragma once + +#include "envoy/buffer/buffer.h" +#include "envoy/http/codes.h" +#include "envoy/http/header_map.h" +#include "envoy/server/admin.h" +#include "envoy/server/instance.h" + +#include "server/admin/handler_ctx.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Server { + +class LogsHandler : public HandlerContextBase, Logger::Loggable { + +public: + LogsHandler(Server::Instance& server); + + Http::Code handlerLogging(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, + AdminStream&); + + Http::Code handlerReopenLogs(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + +private: + /** + * Attempt to change the log level of a logger or all loggers + * @param params supplies the incoming endpoint query params. + * @return TRUE if level change succeeded, FALSE otherwise. + */ + bool changeLogLevel(const Http::Utility::QueryParams& params); +}; + +} // namespace Server +} // namespace Envoy diff --git a/source/server/admin/profiling_handler.cc b/source/server/admin/profiling_handler.cc new file mode 100644 index 0000000000000..243b8292a0af6 --- /dev/null +++ b/source/server/admin/profiling_handler.cc @@ -0,0 +1,84 @@ +#include "server/admin/profiling_handler.h" + +#include "common/profiler/profiler.h" + +#include "server/admin/utils.h" + +namespace Envoy { +namespace Server { + +ProfilingHandler::ProfilingHandler(const std::string& profile_path) : profile_path_(profile_path) {} + +Http::Code ProfilingHandler::handlerCpuProfiler(absl::string_view url, Http::ResponseHeaderMap&, + Buffer::Instance& response, AdminStream&) { + Http::Utility::QueryParams query_params = Http::Utility::parseAndDecodeQueryString(url); + if (query_params.size() != 1 || query_params.begin()->first != "enable" || + (query_params.begin()->second != "y" && query_params.begin()->second != "n")) { + response.add("?enable=\n"); + return Http::Code::BadRequest; + } + + bool enable = query_params.begin()->second == "y"; + if (enable && !Profiler::Cpu::profilerEnabled()) { + if (!Profiler::Cpu::startProfiler(profile_path_)) { + response.add("failure to start the profiler"); + return Http::Code::InternalServerError; + } + + } else if (!enable && Profiler::Cpu::profilerEnabled()) { + Profiler::Cpu::stopProfiler(); + } + + response.add("OK\n"); + return Http::Code::OK; +} + +Http::Code ProfilingHandler::handlerHeapProfiler(absl::string_view url, Http::ResponseHeaderMap&, + Buffer::Instance& response, AdminStream&) { + if (!Profiler::Heap::profilerEnabled()) { + response.add("The current build does not support heap profiler"); + return Http::Code::NotImplemented; + } + + Http::Utility::QueryParams query_params = Http::Utility::parseAndDecodeQueryString(url); + if (query_params.size() != 1 || query_params.begin()->first != "enable" || + (query_params.begin()->second != "y" && query_params.begin()->second != "n")) { + response.add("?enable=\n"); + return Http::Code::BadRequest; + } + + Http::Code res = Http::Code::OK; + bool enable = query_params.begin()->second == "y"; + if (enable) { + if (Profiler::Heap::isProfilerStarted()) { + response.add("Fail to start heap profiler: already started"); + res = Http::Code::BadRequest; + } else if (!Profiler::Heap::startProfiler(profile_path_)) { + // GCOVR_EXCL_START + // TODO(silentdai) remove the GCOVR when startProfiler is better implemented + response.add("Fail to start the heap profiler"); + res = Http::Code::InternalServerError; + // GCOVR_EXCL_STOP + } else { + response.add("Starting heap profiler"); + res = Http::Code::OK; + } + } else { + // !enable + if (!Profiler::Heap::isProfilerStarted()) { + response.add("Fail to stop heap profiler: not started"); + res = Http::Code::BadRequest; + } else { + Profiler::Heap::stopProfiler(); + response.add( + fmt::format("Heap profiler stopped and data written to {}. See " + "http://goog-perftools.sourceforge.net/doc/heap_profiler.html for details.", + profile_path_)); + res = Http::Code::OK; + } + } + return res; +} + +} // namespace Server +} // namespace Envoy diff --git a/source/server/admin/profiling_handler.h b/source/server/admin/profiling_handler.h new file mode 100644 index 0000000000000..2ec81e24cae59 --- /dev/null +++ b/source/server/admin/profiling_handler.h @@ -0,0 +1,31 @@ +#pragma once + +#include "envoy/buffer/buffer.h" +#include "envoy/http/codes.h" +#include "envoy/http/header_map.h" +#include "envoy/server/admin.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Server { + +class ProfilingHandler { + +public: + ProfilingHandler(const std::string& profile_path); + + Http::Code handlerCpuProfiler(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + + Http::Code handlerHeapProfiler(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + +private: + const std::string profile_path_; +}; + +} // namespace Server +} // namespace Envoy diff --git a/source/server/admin/prometheus_stats.cc b/source/server/admin/prometheus_stats.cc new file mode 100644 index 0000000000000..a82d59878d93e --- /dev/null +++ b/source/server/admin/prometheus_stats.cc @@ -0,0 +1,249 @@ +#include "server/admin/prometheus_stats.h" + +#include "common/common/empty_string.h" +#include "common/common/macros.h" +#include "common/stats/histogram_impl.h" + +#include "absl/strings/str_cat.h" + +namespace Envoy { +namespace Server { + +namespace { + +const std::regex& promRegex() { CONSTRUCT_ON_FIRST_USE(std::regex, "[^a-zA-Z0-9_]"); } +const std::regex& namespaceRegex() { + CONSTRUCT_ON_FIRST_USE(std::regex, "^[a-zA-Z_][a-zA-Z0-9]*$"); +} + +/** + * Take a string and sanitize it according to Prometheus conventions. + */ +std::string sanitizeName(const std::string& name) { + // The name must match the regex [a-zA-Z_][a-zA-Z0-9_]* as required by + // prometheus. Refer to https://prometheus.io/docs/concepts/data_model/. + // The initial [a-zA-Z_] constraint is always satisfied by the namespace prefix. + return std::regex_replace(name, promRegex(), "_"); +} + +/* + * Determine whether a metric has never been emitted and choose to + * not show it if we only wanted used metrics. + */ +template +static bool shouldShowMetric(const StatType& metric, const bool used_only, + const absl::optional& regex) { + return ((!used_only || metric.used()) && + (!regex.has_value() || std::regex_search(metric.name(), regex.value()))); +} + +/* + * Comparator for Stats::Metric that does not require a string representation + * to make the comparison, for memory efficiency. + */ +struct MetricLessThan { + bool operator()(const Stats::Metric* a, const Stats::Metric* b) const { + ASSERT(&a->constSymbolTable() == &b->constSymbolTable()); + return a->constSymbolTable().lessThan(a->statName(), b->statName()); + } +}; + +/** + * Processes a stat type (counter, gauge, histogram) by generating all output lines, sorting + * them by tag-extracted metric name, and then outputting them in the correct sorted order into + * response. + * + * @param response The buffer to put the output into. + * @param used_only Whether to only output stats that are used. + * @param regex A filter on which stats to output. + * @param metrics The metrics to output stats for. This must contain all stats of the given type + * to be included in the same output. + * @param generate_output A function which returns the output text for this metric. + * @param type The name of the prometheus metric type for used in TYPE annotations. + */ +template +uint64_t outputStatType( + Buffer::Instance& response, const bool used_only, const absl::optional& regex, + const std::vector>& metrics, + const std::function& generate_output, + absl::string_view type) { + + /* + * From + * https:*github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#grouping-and-sorting: + * + * All lines for a given metric must be provided as one single group, with the optional HELP and + * TYPE lines first (in no particular order). Beyond that, reproducible sorting in repeated + * expositions is preferred but not required, i.e. do not sort if the computational cost is + * prohibitive. + */ + + // This is an unsorted collection of dumb-pointers (no need to increment then decrement every + // refcount; ownership is held throughout by `metrics`). It is unsorted for efficiency, but will + // be sorted before producing the final output to satisfy the "preferred" ordering from the + // prometheus spec: metrics will be sorted by their tags' textual representation, which will be + // consistent across calls. + using StatTypeUnsortedCollection = std::vector; + + // Return early to avoid crashing when getting the symbol table from the first metric. + if (metrics.empty()) { + return 0; + } + + // There should only be one symbol table for all of the stats in the admin + // interface. If this assumption changes, the name comparisons in this function + // will have to change to compare to convert all StatNames to strings before + // comparison. + const Stats::SymbolTable& global_symbol_table = metrics.front()->constSymbolTable(); + + // Sorted collection of metrics sorted by their tagExtractedName, to satisfy the requirements + // of the exposition format. + std::map groups( + global_symbol_table); + + for (const auto& metric : metrics) { + ASSERT(&global_symbol_table == &metric->constSymbolTable()); + + if (!shouldShowMetric(*metric, used_only, regex)) { + continue; + } + + groups[metric->tagExtractedStatName()].push_back(metric.get()); + } + + for (auto& group : groups) { + const std::string prefixed_tag_extracted_name = + PrometheusStatsFormatter::metricName(global_symbol_table.toString(group.first)); + response.add(fmt::format("# TYPE {0} {1}\n", prefixed_tag_extracted_name, type)); + + // Sort before producing the final output to satisfy the "preferred" ordering from the + // prometheus spec: metrics will be sorted by their tags' textual representation, which will + // be consistent across calls. + std::sort(group.second.begin(), group.second.end(), MetricLessThan()); + + for (const auto& metric : group.second) { + response.add(generate_output(*metric, prefixed_tag_extracted_name)); + } + response.add("\n"); + } + return groups.size(); +} + +/* + * Return the prometheus output for a numeric Stat (Counter or Gauge). + */ +template +std::string generateNumericOutput(const StatType& metric, + const std::string& prefixed_tag_extracted_name) { + const std::string tags = PrometheusStatsFormatter::formattedTags(metric.tags()); + return fmt::format("{0}{{{1}}} {2}\n", prefixed_tag_extracted_name, tags, metric.value()); +} + +/* + * Returns the prometheus output for a histogram. The output is a multi-line string (with embedded + * newlines) that contains all the individual bucket counts and sum/count for a single histogram + * (metric_name plus all tags). + */ +std::string generateHistogramOutput(const Stats::ParentHistogram& histogram, + const std::string& prefixed_tag_extracted_name) { + const std::string tags = PrometheusStatsFormatter::formattedTags(histogram.tags()); + const std::string hist_tags = histogram.tags().empty() ? EMPTY_STRING : (tags + ","); + + const Stats::HistogramStatistics& stats = histogram.cumulativeStatistics(); + Stats::ConstSupportedBuckets& supported_buckets = stats.supportedBuckets(); + const std::vector& computed_buckets = stats.computedBuckets(); + std::string output; + for (size_t i = 0; i < supported_buckets.size(); ++i) { + double bucket = supported_buckets[i]; + uint64_t value = computed_buckets[i]; + // We want to print the bucket in a fixed point (non-scientific) format. The fmt library + // doesn't have a specific modifier to format as a fixed-point value only so we use the + // 'g' operator which prints the number in general fixed point format or scientific format + // with precision 50 to round the number up to 32 significant digits in fixed point format + // which should cover pretty much all cases + output.append(fmt::format("{0}_bucket{{{1}le=\"{2:.32g}\"}} {3}\n", prefixed_tag_extracted_name, + hist_tags, bucket, value)); + } + + output.append(fmt::format("{0}_bucket{{{1}le=\"+Inf\"}} {2}\n", prefixed_tag_extracted_name, + hist_tags, stats.sampleCount())); + output.append(fmt::format("{0}_sum{{{1}}} {2:.32g}\n", prefixed_tag_extracted_name, tags, + stats.sampleSum())); + output.append(fmt::format("{0}_count{{{1}}} {2}\n", prefixed_tag_extracted_name, tags, + stats.sampleCount())); + + return output; +}; + +absl::flat_hash_set& prometheusNamespaces() { + MUTABLE_CONSTRUCT_ON_FIRST_USE(absl::flat_hash_set); +} + +} // namespace + +std::string PrometheusStatsFormatter::formattedTags(const std::vector& tags) { + std::vector buf; + buf.reserve(tags.size()); + for (const Stats::Tag& tag : tags) { + buf.push_back(fmt::format("{}=\"{}\"", sanitizeName(tag.name_), tag.value_)); + } + return absl::StrJoin(buf, ","); +} + +std::string PrometheusStatsFormatter::metricName(const std::string& extracted_name) { + std::string sanitized_name = sanitizeName(extracted_name); + + absl::string_view prom_namespace{sanitized_name}; + prom_namespace = prom_namespace.substr(0, prom_namespace.find_first_of('_')); + + if (prometheusNamespaces().contains(prom_namespace)) { + return sanitized_name; + } + + // Add namespacing prefix to avoid conflicts, as per best practice: + // https://prometheus.io/docs/practices/naming/#metric-names + // Also, naming conventions on https://prometheus.io/docs/concepts/data_model/ + return absl::StrCat("envoy_", sanitized_name); +} + +// TODO(efimki): Add support of text readouts stats. +uint64_t PrometheusStatsFormatter::statsAsPrometheus( + const std::vector& counters, + const std::vector& gauges, + const std::vector& histograms, Buffer::Instance& response, + const bool used_only, const absl::optional& regex) { + + uint64_t metric_name_count = 0; + metric_name_count += outputStatType( + response, used_only, regex, counters, generateNumericOutput, "counter"); + + metric_name_count += outputStatType(response, used_only, regex, gauges, + generateNumericOutput, "gauge"); + + metric_name_count += outputStatType( + response, used_only, regex, histograms, generateHistogramOutput, "histogram"); + + return metric_name_count; +} + +bool PrometheusStatsFormatter::registerPrometheusNamespace(absl::string_view prometheus_namespace) { + if (std::regex_match(prometheus_namespace.begin(), prometheus_namespace.end(), + namespaceRegex())) { + return prometheusNamespaces().insert(std::string(prometheus_namespace)).second; + } + return false; +} + +bool PrometheusStatsFormatter::unregisterPrometheusNamespace( + absl::string_view prometheus_namespace) { + auto it = prometheusNamespaces().find(prometheus_namespace); + if (it == prometheusNamespaces().end()) { + return false; + } + prometheusNamespaces().erase(it); + return true; +} + +} // namespace Server +} // namespace Envoy diff --git a/source/server/admin/prometheus_stats.h b/source/server/admin/prometheus_stats.h new file mode 100644 index 0000000000000..6e45db166db5e --- /dev/null +++ b/source/server/admin/prometheus_stats.h @@ -0,0 +1,59 @@ +#pragma once + +#include +#include + +#include "envoy/buffer/buffer.h" +#include "envoy/stats/histogram.h" +#include "envoy/stats/stats.h" + +namespace Envoy { +namespace Server { +/** + * Formatter for metric/labels exported to Prometheus. + * + * See: https://prometheus.io/docs/concepts/data_model + */ +class PrometheusStatsFormatter { +public: + /** + * Extracts counters and gauges and relevant tags, appending them to + * the response buffer after sanitizing the metric / label names. + * @return uint64_t total number of metric types inserted in response. + */ + static uint64_t statsAsPrometheus(const std::vector& counters, + const std::vector& gauges, + const std::vector& histograms, + Buffer::Instance& response, const bool used_only, + const absl::optional& regex); + /** + * Format the given tags, returning a string as a comma-separated list + * of ="" pairs. + */ + static std::string formattedTags(const std::vector& tags); + + /** + * Format the given metric name, prefixed with "envoy_". + */ + static std::string metricName(const std::string& extracted_name); + + /** + * Register a prometheus namespace, stats starting with the namespace will not be + * automatically prefixed with envoy namespace. + * This method must be called from the main thread. + * @returns bool if a new namespace is registered, false if the namespace is already + * registered or the namespace is invalid. + */ + static bool registerPrometheusNamespace(absl::string_view prometheus_namespace); + + /** + * Unregister a prometheus namespace registered by `registerPrometheusNamespace` + * This method must be called from the main thread. + * @returns bool if the Prometheus namespace is unregistered. false if the namespace + * wasn't registered. + */ + static bool unregisterPrometheusNamespace(absl::string_view prometheus_namespace); +}; + +} // namespace Server +} // namespace Envoy diff --git a/source/server/admin/runtime_handler.cc b/source/server/admin/runtime_handler.cc new file mode 100644 index 0000000000000..5719f4ac730e0 --- /dev/null +++ b/source/server/admin/runtime_handler.cc @@ -0,0 +1,113 @@ +#include "server/admin/runtime_handler.h" + +#include +#include + +#include "common/common/empty_string.h" +#include "common/http/headers.h" +#include "common/http/utility.h" + +#include "server/admin/utils.h" + +#include "absl/container/node_hash_map.h" + +namespace Envoy { +namespace Server { + +RuntimeHandler::RuntimeHandler(Server::Instance& server) : HandlerContextBase(server) {} + +Http::Code RuntimeHandler::handlerRuntime(absl::string_view url, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&) { + const Http::Utility::QueryParams params = Http::Utility::parseAndDecodeQueryString(url); + response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); + + // TODO(jsedgwick): Use proto to structure this output instead of arbitrary JSON. + const auto& layers = server_.runtime().snapshot().getLayers(); + + std::vector layer_names; + layer_names.reserve(layers.size()); + std::map> entries; + for (const auto& layer : layers) { + layer_names.push_back(ValueUtil::stringValue(layer->name())); + for (const auto& value : layer->values()) { + const auto found = entries.find(value.first); + if (found == entries.end()) { + entries.emplace(value.first, std::vector{}); + } + } + } + + for (const auto& layer : layers) { + for (auto& entry : entries) { + const auto found = layer->values().find(entry.first); + const auto& entry_value = + found == layer->values().end() ? EMPTY_STRING : found->second.raw_string_value_; + entry.second.push_back(entry_value); + } + } + + ProtobufWkt::Struct layer_entries; + auto* layer_entry_fields = layer_entries.mutable_fields(); + for (const auto& entry : entries) { + std::vector layer_entry_values; + layer_entry_values.reserve(entry.second.size()); + std::string final_value; + for (const auto& value : entry.second) { + if (!value.empty()) { + final_value = value; + } + layer_entry_values.push_back(ValueUtil::stringValue(value)); + } + + ProtobufWkt::Struct layer_entry_value; + auto* layer_entry_value_fields = layer_entry_value.mutable_fields(); + + (*layer_entry_value_fields)["final_value"] = ValueUtil::stringValue(final_value); + (*layer_entry_value_fields)["layer_values"] = ValueUtil::listValue(layer_entry_values); + (*layer_entry_fields)[entry.first] = ValueUtil::structValue(layer_entry_value); + } + + ProtobufWkt::Struct runtime; + auto* fields = runtime.mutable_fields(); + + (*fields)["layers"] = ValueUtil::listValue(layer_names); + (*fields)["entries"] = ValueUtil::structValue(layer_entries); + + response.add(MessageUtil::getJsonStringFromMessage(runtime, true, true)); + return Http::Code::OK; +} + +Http::Code RuntimeHandler::handlerRuntimeModify(absl::string_view url, Http::ResponseHeaderMap&, + Buffer::Instance& response, + AdminStream& admin_stream) { + Http::Utility::QueryParams params = Http::Utility::parseAndDecodeQueryString(url); + if (params.empty()) { + // Check if the params are in the request's body. + if (admin_stream.getRequestBody() != nullptr && + admin_stream.getRequestHeaders().getContentTypeValue() == + Http::Headers::get().ContentTypeValues.FormUrlEncoded) { + params = Http::Utility::parseFromBody(admin_stream.getRequestBody()->toString()); + } + + if (params.empty()) { + response.add("usage: /runtime_modify?key1=value1&key2=value2&keyN=valueN\n"); + response.add(" or send the parameters as form values\n"); + response.add("use an empty value to remove a previously added override"); + return Http::Code::BadRequest; + } + } + absl::node_hash_map overrides; + overrides.insert(params.begin(), params.end()); + try { + server_.runtime().mergeValues(overrides); + } catch (const EnvoyException& e) { + response.add(e.what()); + return Http::Code::ServiceUnavailable; + } + response.add("OK\n"); + return Http::Code::OK; +} + +} // namespace Server +} // namespace Envoy diff --git a/source/server/admin/runtime_handler.h b/source/server/admin/runtime_handler.h new file mode 100644 index 0000000000000..306a356574b3d --- /dev/null +++ b/source/server/admin/runtime_handler.h @@ -0,0 +1,30 @@ +#pragma once + +#include "envoy/buffer/buffer.h" +#include "envoy/http/codes.h" +#include "envoy/http/header_map.h" +#include "envoy/server/admin.h" +#include "envoy/server/instance.h" + +#include "server/admin/handler_ctx.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Server { + +class RuntimeHandler : public HandlerContextBase { + +public: + RuntimeHandler(Server::Instance& server); + + Http::Code handlerRuntime(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, + AdminStream&); + Http::Code handlerRuntimeModify(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); +}; + +} // namespace Server +} // namespace Envoy diff --git a/source/server/admin/server_cmd_handler.cc b/source/server/admin/server_cmd_handler.cc new file mode 100644 index 0000000000000..dfa66a41ee84d --- /dev/null +++ b/source/server/admin/server_cmd_handler.cc @@ -0,0 +1,30 @@ +#include "server/admin/server_cmd_handler.h" + +namespace Envoy { +namespace Server { + +ServerCmdHandler::ServerCmdHandler(Server::Instance& server) : HandlerContextBase(server) {} + +Http::Code ServerCmdHandler::handlerHealthcheckFail(absl::string_view, Http::ResponseHeaderMap&, + Buffer::Instance& response, AdminStream&) { + server_.failHealthcheck(true); + response.add("OK\n"); + return Http::Code::OK; +} + +Http::Code ServerCmdHandler::handlerHealthcheckOk(absl::string_view, Http::ResponseHeaderMap&, + Buffer::Instance& response, AdminStream&) { + server_.failHealthcheck(false); + response.add("OK\n"); + return Http::Code::OK; +} + +Http::Code ServerCmdHandler::handlerQuitQuitQuit(absl::string_view, Http::ResponseHeaderMap&, + Buffer::Instance& response, AdminStream&) { + server_.shutdown(); + response.add("OK\n"); + return Http::Code::OK; +} + +} // namespace Server +} // namespace Envoy diff --git a/source/server/admin/server_cmd_handler.h b/source/server/admin/server_cmd_handler.h new file mode 100644 index 0000000000000..cddfb94b39176 --- /dev/null +++ b/source/server/admin/server_cmd_handler.h @@ -0,0 +1,35 @@ +#pragma once + +#include "envoy/buffer/buffer.h" +#include "envoy/http/codes.h" +#include "envoy/http/header_map.h" +#include "envoy/server/admin.h" +#include "envoy/server/instance.h" + +#include "server/admin/handler_ctx.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Server { + +class ServerCmdHandler : public HandlerContextBase { + +public: + ServerCmdHandler(Server::Instance& server); + + Http::Code handlerQuitQuitQuit(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + + Http::Code handlerHealthcheckFail(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + + Http::Code handlerHealthcheckOk(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); +}; + +} // namespace Server +} // namespace Envoy diff --git a/source/server/admin/server_info_handler.cc b/source/server/admin/server_info_handler.cc new file mode 100644 index 0000000000000..d668dac83992f --- /dev/null +++ b/source/server/admin/server_info_handler.cc @@ -0,0 +1,97 @@ +#include "server/admin/server_info_handler.h" + +#include "envoy/admin/v3/memory.pb.h" + +#include "common/memory/stats.h" +#include "common/version/version.h" + +#include "server/admin/utils.h" + +namespace Envoy { +namespace Server { + +ServerInfoHandler::ServerInfoHandler(Server::Instance& server) : HandlerContextBase(server) {} + +Http::Code ServerInfoHandler::handlerCerts(absl::string_view, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&) { + // This set is used to track distinct certificates. We may have multiple listeners, upstreams, etc + // using the same cert. + response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); + envoy::admin::v3::Certificates certificates; + server_.sslContextManager().iterateContexts([&](const Ssl::Context& context) -> void { + envoy::admin::v3::Certificate& certificate = *certificates.add_certificates(); + if (context.getCaCertInformation() != nullptr) { + envoy::admin::v3::CertificateDetails* ca_certificate = certificate.add_ca_cert(); + *ca_certificate = *context.getCaCertInformation(); + } + for (const auto& cert_details : context.getCertChainInformation()) { + envoy::admin::v3::CertificateDetails* cert_chain = certificate.add_cert_chain(); + *cert_chain = *cert_details; + } + }); + response.add(MessageUtil::getJsonStringFromMessage(certificates, true, true)); + return Http::Code::OK; +} + +Http::Code ServerInfoHandler::handlerHotRestartVersion(absl::string_view, Http::ResponseHeaderMap&, + Buffer::Instance& response, AdminStream&) { + response.add(server_.hotRestart().version()); + return Http::Code::OK; +} + +// TODO(ambuc): Add more tcmalloc stats, export proto details based on allocator. +Http::Code ServerInfoHandler::handlerMemory(absl::string_view, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&) { + response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); + envoy::admin::v3::Memory memory; + memory.set_allocated(Memory::Stats::totalCurrentlyAllocated()); + memory.set_heap_size(Memory::Stats::totalCurrentlyReserved()); + memory.set_total_thread_cache(Memory::Stats::totalThreadCacheBytes()); + memory.set_pageheap_unmapped(Memory::Stats::totalPageHeapUnmapped()); + memory.set_pageheap_free(Memory::Stats::totalPageHeapFree()); + memory.set_total_physical_bytes(Memory::Stats::totalPhysicalBytes()); + response.add(MessageUtil::getJsonStringFromMessage(memory, true, true)); // pretty-print + return Http::Code::OK; +} + +Http::Code ServerInfoHandler::handlerReady(absl::string_view, Http::ResponseHeaderMap&, + Buffer::Instance& response, AdminStream&) { + const envoy::admin::v3::ServerInfo::State state = + Utility::serverState(server_.initManager().state(), server_.healthCheckFailed()); + + response.add(envoy::admin::v3::ServerInfo::State_Name(state) + "\n"); + Http::Code code = + state == envoy::admin::v3::ServerInfo::LIVE ? Http::Code::OK : Http::Code::ServiceUnavailable; + return code; +} + +Http::Code ServerInfoHandler::handlerServerInfo(absl::string_view, Http::ResponseHeaderMap& headers, + Buffer::Instance& response, AdminStream&) { + const std::time_t current_time = + std::chrono::system_clock::to_time_t(server_.timeSource().systemTime()); + const std::time_t uptime_current_epoch = current_time - server_.startTimeCurrentEpoch(); + const std::time_t uptime_all_epochs = current_time - server_.startTimeFirstEpoch(); + + ASSERT(uptime_current_epoch >= 0); + ASSERT(uptime_all_epochs >= 0); + + envoy::admin::v3::ServerInfo server_info; + server_info.set_version(VersionInfo::version()); + server_info.set_hot_restart_version(server_.hotRestart().version()); + server_info.set_state( + Utility::serverState(server_.initManager().state(), server_.healthCheckFailed())); + + server_info.mutable_uptime_current_epoch()->set_seconds(uptime_current_epoch); + server_info.mutable_uptime_all_epochs()->set_seconds(uptime_all_epochs); + envoy::admin::v3::CommandLineOptions* command_line_options = + server_info.mutable_command_line_options(); + *command_line_options = *server_.options().toCommandLineOptions(); + response.add(MessageUtil::getJsonStringFromMessage(server_info, true, true)); + headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); + return Http::Code::OK; +} + +} // namespace Server +} // namespace Envoy diff --git a/source/server/admin/server_info_handler.h b/source/server/admin/server_info_handler.h new file mode 100644 index 0000000000000..6a2a29abf3acf --- /dev/null +++ b/source/server/admin/server_info_handler.h @@ -0,0 +1,43 @@ +#pragma once + +#include "envoy/buffer/buffer.h" +#include "envoy/http/codes.h" +#include "envoy/http/header_map.h" +#include "envoy/server/admin.h" +#include "envoy/server/instance.h" + +#include "server/admin/handler_ctx.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Server { + +class ServerInfoHandler : public HandlerContextBase { + +public: + ServerInfoHandler(Server::Instance& server); + + Http::Code handlerCerts(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, + AdminStream&); + + Http::Code handlerServerInfo(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + + Http::Code handlerReady(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, + AdminStream&); + + Http::Code handlerHotRestartVersion(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + + Http::Code handlerMemory(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, + AdminStream&); +}; + +} // namespace Server +} // namespace Envoy diff --git a/source/server/http/stats_handler.cc b/source/server/admin/stats_handler.cc similarity index 55% rename from source/server/http/stats_handler.cc rename to source/server/admin/stats_handler.cc index 0d37267cf66f8..e64fd878a8fb4 100644 --- a/source/server/http/stats_handler.cc +++ b/source/server/admin/stats_handler.cc @@ -1,36 +1,35 @@ -#include "server/http/stats_handler.h" +#include "server/admin/stats_handler.h" + +#include "envoy/admin/v3/mutex_stats.pb.h" #include "common/common/empty_string.h" #include "common/html/utility.h" #include "common/http/headers.h" #include "common/http/utility.h" -#include "server/http/utils.h" +#include "server/admin/prometheus_stats.h" +#include "server/admin/utils.h" namespace Envoy { namespace Server { const uint64_t RecentLookupsCapacity = 100; -namespace { -const std::regex& promRegex() { CONSTRUCT_ON_FIRST_USE(std::regex, "[^a-zA-Z0-9_]"); } -} // namespace +StatsHandler::StatsHandler(Server::Instance& server) : HandlerContextBase(server) {} Http::Code StatsHandler::handlerResetCounters(absl::string_view, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&, - Server::Instance& server) { - for (const Stats::CounterSharedPtr& counter : server.stats().counters()) { + Buffer::Instance& response, AdminStream&) { + for (const Stats::CounterSharedPtr& counter : server_.stats().counters()) { counter->reset(); } - server.stats().symbolTable().clearRecentLookups(); + server_.stats().symbolTable().clearRecentLookups(); response.add("OK\n"); return Http::Code::OK; } Http::Code StatsHandler::handlerStatsRecentLookups(absl::string_view, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&, - Server::Instance& server) { - Stats::SymbolTable& symbol_table = server.stats().symbolTable(); + Buffer::Instance& response, AdminStream&) { + Stats::SymbolTable& symbol_table = server_.stats().symbolTable(); std::string table; const uint64_t total = symbol_table.getRecentLookups([&table](absl::string_view name, uint64_t count) { @@ -46,37 +45,34 @@ Http::Code StatsHandler::handlerStatsRecentLookups(absl::string_view, Http::Resp } Http::Code StatsHandler::handlerStatsRecentLookupsClear(absl::string_view, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&, - Server::Instance& server) { - server.stats().symbolTable().clearRecentLookups(); + Buffer::Instance& response, AdminStream&) { + server_.stats().symbolTable().clearRecentLookups(); response.add("OK\n"); return Http::Code::OK; } Http::Code StatsHandler::handlerStatsRecentLookupsDisable(absl::string_view, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&, - Server::Instance& server) { - server.stats().symbolTable().setRecentLookupCapacity(0); + Buffer::Instance& response, + AdminStream&) { + server_.stats().symbolTable().setRecentLookupCapacity(0); response.add("OK\n"); return Http::Code::OK; } Http::Code StatsHandler::handlerStatsRecentLookupsEnable(absl::string_view, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&, - Server::Instance& server) { - server.stats().symbolTable().setRecentLookupCapacity(RecentLookupsCapacity); + Buffer::Instance& response, AdminStream&) { + server_.stats().symbolTable().setRecentLookupCapacity(RecentLookupsCapacity); response.add("OK\n"); return Http::Code::OK; } Http::Code StatsHandler::handlerStats(absl::string_view url, Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream& admin_stream, - Server::Instance& server) { + Buffer::Instance& response, AdminStream& admin_stream) { Http::Code rc = Http::Code::OK; - const Http::Utility::QueryParams params = Http::Utility::parseQueryString(url); + const Http::Utility::QueryParams params = Http::Utility::parseAndDecodeQueryString(url); const bool used_only = params.find("usedonly") != params.end(); absl::optional regex; @@ -85,13 +81,13 @@ Http::Code StatsHandler::handlerStats(absl::string_view url, } std::map all_stats; - for (const Stats::CounterSharedPtr& counter : server.stats().counters()) { + for (const Stats::CounterSharedPtr& counter : server_.stats().counters()) { if (shouldShowMetric(*counter, used_only, regex)) { all_stats.emplace(counter->name(), counter->value()); } } - for (const Stats::GaugeSharedPtr& gauge : server.stats().gauges()) { + for (const Stats::GaugeSharedPtr& gauge : server_.stats().gauges()) { if (shouldShowMetric(*gauge, used_only, regex)) { ASSERT(gauge->importMode() != Stats::Gauge::ImportMode::Uninitialized); all_stats.emplace(gauge->name(), gauge->value()); @@ -99,7 +95,7 @@ Http::Code StatsHandler::handlerStats(absl::string_view url, } std::map text_readouts; - for (const auto& text_readout : server.stats().textReadouts()) { + for (const auto& text_readout : server_.stats().textReadouts()) { if (shouldShowMetric(*text_readout, used_only, regex)) { text_readouts.emplace(text_readout->name(), text_readout->value()); } @@ -109,9 +105,9 @@ Http::Code StatsHandler::handlerStats(absl::string_view url, if (format_value.value() == "json") { response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); response.add( - statsAsJson(all_stats, text_readouts, server.stats().histograms(), used_only, regex)); + statsAsJson(all_stats, text_readouts, server_.stats().histograms(), used_only, regex)); } else if (format_value.value() == "prometheus") { - return handlerPrometheusStats(url, response_headers, response, admin_stream, server); + return handlerPrometheusStats(url, response_headers, response, admin_stream); } else { response.add("usage: /stats?format=json or /stats?format=prometheus \n"); response.add("\n"); @@ -125,13 +121,11 @@ Http::Code StatsHandler::handlerStats(absl::string_view url, for (const auto& stat : all_stats) { response.add(fmt::format("{}: {}\n", stat.first, stat.second)); } - // TODO(ramaraochavali): See the comment in ThreadLocalStoreImpl::histograms() for why we use a - // multimap here. This makes sure that duplicate histograms get output. When shared storage is - // implemented this can be switched back to a normal map. - std::multimap all_histograms; - for (const Stats::ParentHistogramSharedPtr& histogram : server.stats().histograms()) { + std::map all_histograms; + for (const Stats::ParentHistogramSharedPtr& histogram : server_.stats().histograms()) { if (shouldShowMetric(*histogram, used_only, regex)) { - all_histograms.emplace(histogram->name(), histogram->quantileSummary()); + auto insert = all_histograms.emplace(histogram->name(), histogram->quantileSummary()); + ASSERT(insert.second); // No duplicates expected. } } for (const auto& histogram : all_histograms) { @@ -143,118 +137,38 @@ Http::Code StatsHandler::handlerStats(absl::string_view url, Http::Code StatsHandler::handlerPrometheusStats(absl::string_view path_and_query, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&, - Server::Instance& server) { - const Http::Utility::QueryParams params = Http::Utility::parseQueryString(path_and_query); + Buffer::Instance& response, AdminStream&) { + const Http::Utility::QueryParams params = + Http::Utility::parseAndDecodeQueryString(path_and_query); const bool used_only = params.find("usedonly") != params.end(); absl::optional regex; if (!Utility::filterParam(params, response, regex)) { return Http::Code::BadRequest; } - PrometheusStatsFormatter::statsAsPrometheus(server.stats().counters(), server.stats().gauges(), - server.stats().histograms(), response, used_only, + PrometheusStatsFormatter::statsAsPrometheus(server_.stats().counters(), server_.stats().gauges(), + server_.stats().histograms(), response, used_only, regex); return Http::Code::OK; } -std::string PrometheusStatsFormatter::sanitizeName(const std::string& name) { - // The name must match the regex [a-zA-Z_][a-zA-Z0-9_]* as required by - // prometheus. Refer to https://prometheus.io/docs/concepts/data_model/. - std::string stats_name = std::regex_replace(name, promRegex(), "_"); - if (stats_name[0] >= '0' && stats_name[0] <= '9') { - return absl::StrCat("_", stats_name); - } else { - return stats_name; - } -} - -std::string PrometheusStatsFormatter::formattedTags(const std::vector& tags) { - std::vector buf; - buf.reserve(tags.size()); - for (const Stats::Tag& tag : tags) { - buf.push_back(fmt::format("{}=\"{}\"", sanitizeName(tag.name_), tag.value_)); - } - return absl::StrJoin(buf, ","); -} - -std::string PrometheusStatsFormatter::metricName(const std::string& extracted_name) { - // Add namespacing prefix to avoid conflicts, as per best practice: - // https://prometheus.io/docs/practices/naming/#metric-names - // Also, naming conventions on https://prometheus.io/docs/concepts/data_model/ - return sanitizeName(fmt::format("envoy_{0}", extracted_name)); -} - -// TODO(efimki): Add support of text readouts stats. -uint64_t PrometheusStatsFormatter::statsAsPrometheus( - const std::vector& counters, - const std::vector& gauges, - const std::vector& histograms, Buffer::Instance& response, - const bool used_only, const absl::optional& regex) { - std::unordered_set metric_type_tracker; - for (const auto& counter : counters) { - if (!shouldShowMetric(*counter, used_only, regex)) { - continue; - } - - const std::string tags = formattedTags(counter->tags()); - const std::string metric_name = metricName(counter->tagExtractedName()); - if (metric_type_tracker.find(metric_name) == metric_type_tracker.end()) { - metric_type_tracker.insert(metric_name); - response.add(fmt::format("# TYPE {0} counter\n", metric_name)); - } - response.add(fmt::format("{0}{{{1}}} {2}\n", metric_name, tags, counter->value())); - } - - for (const auto& gauge : gauges) { - if (!shouldShowMetric(*gauge, used_only, regex)) { - continue; - } - - const std::string tags = formattedTags(gauge->tags()); - const std::string metric_name = metricName(gauge->tagExtractedName()); - if (metric_type_tracker.find(metric_name) == metric_type_tracker.end()) { - metric_type_tracker.insert(metric_name); - response.add(fmt::format("# TYPE {0} gauge\n", metric_name)); - } - response.add(fmt::format("{0}{{{1}}} {2}\n", metric_name, tags, gauge->value())); - } - - for (const auto& histogram : histograms) { - if (!shouldShowMetric(*histogram, used_only, regex)) { - continue; - } - - const std::string tags = formattedTags(histogram->tags()); - const std::string hist_tags = histogram->tags().empty() ? EMPTY_STRING : (tags + ","); - - const std::string metric_name = metricName(histogram->tagExtractedName()); - if (metric_type_tracker.find(metric_name) == metric_type_tracker.end()) { - metric_type_tracker.insert(metric_name); - response.add(fmt::format("# TYPE {0} histogram\n", metric_name)); - } +// TODO(ambuc) Export this as a server (?) stat for monitoring. +Http::Code StatsHandler::handlerContention(absl::string_view, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&) { - const Stats::HistogramStatistics& stats = histogram->cumulativeStatistics(); - const std::vector& supported_buckets = stats.supportedBuckets(); - const std::vector& computed_buckets = stats.computedBuckets(); - for (size_t i = 0; i < supported_buckets.size(); ++i) { - double bucket = supported_buckets[i]; - uint64_t value = computed_buckets[i]; - // We want to print the bucket in a fixed point (non-scientific) format. The fmt library - // doesn't have a specific modifier to format as a fixed-point value only so we use the - // 'g' operator which prints the number in general fixed point format or scientific format - // with precision 50 to round the number up to 32 significant digits in fixed point format - // which should cover pretty much all cases - response.add(fmt::format("{0}_bucket{{{1}le=\"{2:.32g}\"}} {3}\n", metric_name, hist_tags, - bucket, value)); - } + if (server_.options().mutexTracingEnabled() && server_.mutexTracer() != nullptr) { + response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); - response.add(fmt::format("{0}_bucket{{{1}le=\"+Inf\"}} {2}\n", metric_name, hist_tags, - stats.sampleCount())); - response.add(fmt::format("{0}_sum{{{1}}} {2:.32g}\n", metric_name, tags, stats.sampleSum())); - response.add(fmt::format("{0}_count{{{1}}} {2}\n", metric_name, tags, stats.sampleCount())); + envoy::admin::v3::MutexStats mutex_stats; + mutex_stats.set_num_contentions(server_.mutexTracer()->numContentions()); + mutex_stats.set_current_wait_cycles(server_.mutexTracer()->currentWaitCycles()); + mutex_stats.set_lifetime_wait_cycles(server_.mutexTracer()->lifetimeWaitCycles()); + response.add(MessageUtil::getJsonStringFromMessage(mutex_stats, true, true)); + } else { + response.add("Mutex contention tracing is not enabled. To enable, run Envoy with flag " + "--enable-mutex-tracing."); } - - return metric_type_tracker.size(); + return Http::Code::OK; } std::string diff --git a/source/server/admin/stats_handler.h b/source/server/admin/stats_handler.h new file mode 100644 index 0000000000000..abdb656ed2c94 --- /dev/null +++ b/source/server/admin/stats_handler.h @@ -0,0 +1,70 @@ +#pragma once + +#include +#include + +#include "envoy/buffer/buffer.h" +#include "envoy/http/codes.h" +#include "envoy/http/header_map.h" +#include "envoy/server/admin.h" +#include "envoy/server/instance.h" + +#include "common/stats/histogram_impl.h" + +#include "server/admin/handler_ctx.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Server { + +class StatsHandler : public HandlerContextBase { + +public: + StatsHandler(Server::Instance& server); + + Http::Code handlerResetCounters(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + Http::Code handlerStatsRecentLookups(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + Http::Code handlerStatsRecentLookupsClear(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + Http::Code handlerStatsRecentLookupsDisable(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + Http::Code handlerStatsRecentLookupsEnable(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + Http::Code handlerStats(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, + AdminStream&); + Http::Code handlerPrometheusStats(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + Http::Code handlerContention(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + +private: + template + static bool shouldShowMetric(const StatType& metric, const bool used_only, + const absl::optional& regex) { + return ((!used_only || metric.used()) && + (!regex.has_value() || std::regex_search(metric.name(), regex.value()))); + } + + friend class AdminStatsTest; + + static std::string statsAsJson(const std::map& all_stats, + const std::map& text_readouts, + const std::vector& all_histograms, + bool used_only, + const absl::optional regex = absl::nullopt, + bool pretty_print = false); +}; + +} // namespace Server +} // namespace Envoy diff --git a/source/server/http/utils.cc b/source/server/admin/utils.cc similarity index 82% rename from source/server/http/utils.cc rename to source/server/admin/utils.cc index cd564784a5664..eaa5a6689a02d 100644 --- a/source/server/http/utils.cc +++ b/source/server/admin/utils.cc @@ -1,4 +1,4 @@ -#include "server/http/utils.h" +#include "server/admin/utils.h" #include "common/common/enum_to_int.h" #include "common/http/headers.h" @@ -23,18 +23,19 @@ envoy::admin::v3::ServerInfo::State serverState(Init::Manager::State state, void populateFallbackResponseHeaders(Http::Code code, Http::ResponseHeaderMap& header_map) { header_map.setStatus(std::to_string(enumToInt(code))); - const auto& headers = Http::Headers::get(); if (header_map.ContentType() == nullptr) { // Default to text-plain if unset. - header_map.setReferenceContentType(headers.ContentTypeValues.TextUtf8); + header_map.setReferenceContentType(Http::Headers::get().ContentTypeValues.TextUtf8); } // Default to 'no-cache' if unset, but not 'no-store' which may break the back button. - if (header_map.CacheControl() == nullptr) { - header_map.setReferenceCacheControl(headers.CacheControlValues.NoCacheMaxAge0); + if (header_map.get(Http::CustomHeaders::get().CacheControl) == nullptr) { + header_map.setReference(Http::CustomHeaders::get().CacheControl, + Http::CustomHeaders::get().CacheControlValues.NoCacheMaxAge0); } // Under no circumstance should browsers sniff content-type. - header_map.addReference(headers.XContentTypeOptions, headers.XContentTypeOptionValues.Nosniff); + header_map.addReference(Http::Headers::get().XContentTypeOptions, + Http::Headers::get().XContentTypeOptionValues.Nosniff); } // Helper method to get filter parameter, or report an error for an invalid regex. diff --git a/source/server/http/utils.h b/source/server/admin/utils.h similarity index 100% rename from source/server/http/utils.h rename to source/server/admin/utils.h diff --git a/source/server/backtrace.h b/source/server/backtrace.h index 966d9017baf26..fd391a691c813 100644 --- a/source/server/backtrace.h +++ b/source/server/backtrace.h @@ -3,7 +3,7 @@ #include #include "common/common/logger.h" -#include "common/common/version.h" +#include "common/version/version.h" #include "absl/debugging/stacktrace.h" #include "absl/debugging/symbolize.h" diff --git a/source/server/config_validation/BUILD b/source/server/config_validation/BUILD index 9a11519933294..f56d6069dd570 100644 --- a/source/server/config_validation/BUILD +++ b/source/server/config_validation/BUILD @@ -1,7 +1,7 @@ -licenses(["notice"]) # Apache 2 - load("//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package") +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( @@ -11,7 +11,7 @@ envoy_cc_library( deps = [ "//include/envoy/server:admin_interface", "//source/common/common:assert_lib", - "//source/server/http:config_tracker_lib", + "//source/server/admin:config_tracker_lib", ], ) @@ -97,7 +97,6 @@ envoy_cc_library( "//source/common/access_log:access_log_manager_lib", "//source/common/common:assert_lib", "//source/common/common:utility_lib", - "//source/common/common:version_lib", "//source/common/config:utility_lib", "//source/common/grpc:common_lib", "//source/common/local_info:local_info_lib", @@ -106,9 +105,10 @@ envoy_cc_library( "//source/common/runtime:runtime_lib", "//source/common/stats:stats_lib", "//source/common/thread_local:thread_local_lib", + "//source/common/version:version_lib", "//source/server:configuration_lib", "//source/server:server_lib", - "//source/server/http:admin_lib", + "//source/server/admin:admin_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", diff --git a/source/server/config_validation/admin.cc b/source/server/config_validation/admin.cc index 44368513e1376..dec3bb773c701 100644 --- a/source/server/config_validation/admin.cc +++ b/source/server/config_validation/admin.cc @@ -3,11 +3,12 @@ namespace Envoy { namespace Server { +// Pretend that handler was added successfully. bool ValidationAdmin::addHandler(const std::string&, const std::string&, HandlerCb, bool, bool) { - return false; + return true; } -bool ValidationAdmin::removeHandler(const std::string&) { return false; } +bool ValidationAdmin::removeHandler(const std::string&) { return true; } const Network::Socket& ValidationAdmin::socket() { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } diff --git a/source/server/config_validation/admin.h b/source/server/config_validation/admin.h index 521aaf838b94e..14cd04d93713e 100644 --- a/source/server/config_validation/admin.h +++ b/source/server/config_validation/admin.h @@ -4,7 +4,7 @@ #include "common/common/assert.h" -#include "server/http/config_tracker_impl.h" +#include "server/admin/config_tracker_impl.h" namespace Envoy { namespace Server { @@ -27,6 +27,7 @@ class ValidationAdmin : public Admin { Http::Code request(absl::string_view path_and_query, absl::string_view method, Http::ResponseHeaderMap& response_headers, std::string& body) override; void addListenerToHandler(Network::ConnectionHandler* handler) override; + uint32_t concurrency() const override { return 1; } private: ConfigTrackerImpl config_tracker_; diff --git a/source/server/config_validation/cluster_manager.cc b/source/server/config_validation/cluster_manager.cc index 4dbdc73a31ef4..d5f4489c918c5 100644 --- a/source/server/config_validation/cluster_manager.cc +++ b/source/server/config_validation/cluster_manager.cc @@ -28,7 +28,7 @@ ValidationClusterManagerFactory::createCds(const envoy::config::core::v3::Config ValidationClusterManager::ValidationClusterManager( const envoy::config::bootstrap::v3::Bootstrap& bootstrap, ClusterManagerFactory& factory, Stats::Store& stats, ThreadLocal::Instance& tls, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, const LocalInfo::LocalInfo& local_info, + Random::RandomGenerator& random, const LocalInfo::LocalInfo& local_info, AccessLog::AccessLogManager& log_manager, Event::Dispatcher& main_thread_dispatcher, Server::Admin& admin, ProtobufMessage::ValidationContext& validation_context, Api::Api& api, Http::Context& http_context, Grpc::Context& grpc_context, Event::TimeSystem& time_system) @@ -37,9 +37,8 @@ ValidationClusterManager::ValidationClusterManager( grpc_context), async_client_(api, time_system) {} -Http::ConnectionPool::Instance* -ValidationClusterManager::httpConnPoolForCluster(const std::string&, ResourcePriority, - Http::Protocol, LoadBalancerContext*) { +Http::ConnectionPool::Instance* ValidationClusterManager::httpConnPoolForCluster( + const std::string&, ResourcePriority, absl::optional, LoadBalancerContext*) { return nullptr; } diff --git a/source/server/config_validation/cluster_manager.h b/source/server/config_validation/cluster_manager.h index 07ea8f3f8c1c1..6ce2c46941fee 100644 --- a/source/server/config_validation/cluster_manager.h +++ b/source/server/config_validation/cluster_manager.h @@ -23,7 +23,7 @@ class ValidationClusterManagerFactory : public ProdClusterManagerFactory { explicit ValidationClusterManagerFactory( Server::Admin& admin, Runtime::Loader& runtime, Stats::Store& stats, - ThreadLocal::Instance& tls, Runtime::RandomGenerator& random, + ThreadLocal::Instance& tls, Random::RandomGenerator& random, Network::DnsResolverSharedPtr dns_resolver, Ssl::ContextManager& ssl_context_manager, Event::Dispatcher& main_thread_dispatcher, const LocalInfo::LocalInfo& local_info, Secret::SecretManager& secret_manager, ProtobufMessage::ValidationContext& validation_context, @@ -57,7 +57,7 @@ class ValidationClusterManager : public ClusterManagerImpl { ValidationClusterManager(const envoy::config::bootstrap::v3::Bootstrap& bootstrap, ClusterManagerFactory& factory, Stats::Store& stats, ThreadLocal::Instance& tls, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, const LocalInfo::LocalInfo& local_info, + Random::RandomGenerator& random, const LocalInfo::LocalInfo& local_info, AccessLog::AccessLogManager& log_manager, Event::Dispatcher& dispatcher, Server::Admin& admin, ProtobufMessage::ValidationContext& validation_context, Api::Api& api, @@ -65,7 +65,7 @@ class ValidationClusterManager : public ClusterManagerImpl { Event::TimeSystem& time_system); Http::ConnectionPool::Instance* httpConnPoolForCluster(const std::string&, ResourcePriority, - Http::Protocol, + absl::optional, LoadBalancerContext*) override; Host::CreateConnectionData tcpConnForCluster(const std::string&, LoadBalancerContext*) override; Http::AsyncClient& httpAsyncClientForCluster(const std::string&) override; diff --git a/source/server/config_validation/server.cc b/source/server/config_validation/server.cc index 1ae5c43dc7988..4df691f6b96e0 100644 --- a/source/server/config_validation/server.cc +++ b/source/server/config_validation/server.cc @@ -5,12 +5,12 @@ #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "common/common/utility.h" -#include "common/common/version.h" #include "common/config/utility.h" #include "common/event/real_time_system.h" #include "common/local_info/local_info_impl.h" #include "common/protobuf/utility.h" #include "common/singleton/manager_impl.h" +#include "common/version/version.h" #include "server/ssl_context_manager.h" @@ -42,7 +42,8 @@ ValidationInstance::ValidationInstance( Thread::BasicLockable& access_log_lock, ComponentFactory& component_factory, Thread::ThreadFactory& thread_factory, Filesystem::Instance& file_system) : options_(options), validation_context_(options_.allowUnknownStaticFields(), - !options.rejectUnknownDynamicFields()), + !options.rejectUnknownDynamicFields(), + !options.ignoreUnknownDynamicFields()), stats_store_(store), api_(new Api::ValidationImpl(thread_factory, store, time_system, file_system)), dispatcher_(api_->allocateDispatcher("main_thread")), @@ -93,7 +94,8 @@ void ValidationInstance::initialize(const Options& options, messageValidationContext().staticValidationVisitor(), *api_); listener_manager_ = std::make_unique(*this, *this, *this, false); thread_local_.registerThread(*dispatcher_, true); - runtime_loader_ = component_factory.createRuntime(*this, initial_config); + runtime_singleton_ = std::make_unique( + component_factory.createRuntime(*this, initial_config)); secret_manager_ = std::make_unique(admin().getConfigTracker()); ssl_context_manager_ = createContextManager("ssl_context_manager", api_->timeSource()); cluster_manager_factory_ = std::make_unique( @@ -101,7 +103,7 @@ void ValidationInstance::initialize(const Options& options, dispatcher(), localInfo(), *secret_manager_, messageValidationContext(), *api_, http_context_, grpc_context_, accessLogManager(), singletonManager(), time_system_); config_.initialize(bootstrap, *this, *cluster_manager_factory_); - runtime_loader_->initialize(clusterManager()); + runtime().initialize(clusterManager()); clusterManager().setInitializedCb([this]() -> void { init_manager_.initialize(init_watcher_); }); } diff --git a/source/server/config_validation/server.h b/source/server/config_validation/server.h index 9111dea8a3a80..c9108ffaec913 100644 --- a/source/server/config_validation/server.h +++ b/source/server/config_validation/server.h @@ -13,6 +13,7 @@ #include "common/access_log/access_log_manager_impl.h" #include "common/common/assert.h" +#include "common/common/random_generator.h" #include "common/grpc/common.h" #include "common/protobuf/message_validator_impl.h" #include "common/router/rds_impl.h" @@ -20,11 +21,11 @@ #include "common/secret/secret_manager_impl.h" #include "common/thread_local/thread_local_impl.h" +#include "server/admin/admin.h" #include "server/config_validation/admin.h" #include "server/config_validation/api.h" #include "server/config_validation/cluster_manager.h" #include "server/config_validation/dns.h" -#include "server/http/admin.h" #include "server/listener_manager_impl.h" #include "server/server.h" @@ -84,8 +85,8 @@ class ValidationInstance final : Logger::Loggable, ServerLifecycleNotifier& lifecycleNotifier() override { return *this; } ListenerManager& listenerManager() override { return *listener_manager_; } Secret::SecretManager& secretManager() override { return *secret_manager_; } - Runtime::RandomGenerator& random() override { return random_generator_; } - Runtime::Loader& runtime() override { return *runtime_loader_; } + Random::RandomGenerator& random() override { return random_generator_; } + Runtime::Loader& runtime() override { return Runtime::LoaderSingleton::get(); } void shutdown() override; bool isShutdown() override { return false; } void shutdownAdmin() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } @@ -141,7 +142,7 @@ class ValidationInstance final : Logger::Loggable, return ProdListenerComponentFactory::createUdpListenerFilterFactoryList_(filters, context); } Network::SocketSharedPtr createListenSocket(Network::Address::InstanceConstSharedPtr, - Network::Address::SocketType, + Network::Socket::Type, const Network::Socket::OptionsSharedPtr&, const ListenSocketCreationParams&) override { // Returned sockets are not currently used so we can return nothing here safely vs. a @@ -192,8 +193,8 @@ class ValidationInstance final : Logger::Loggable, Event::DispatcherPtr dispatcher_; Server::ValidationAdmin admin_; Singleton::ManagerPtr singleton_manager_; - Runtime::LoaderPtr runtime_loader_; - Runtime::RandomGeneratorImpl random_generator_; + std::unique_ptr runtime_singleton_; + Random::RandomGeneratorImpl random_generator_; std::unique_ptr ssl_context_manager_; Configuration::MainImpl config_; LocalInfo::LocalInfoPtr local_info_; diff --git a/source/server/configuration_impl.cc b/source/server/configuration_impl.cc index 533bbdfef8662..7510f068f7ee6 100644 --- a/source/server/configuration_impl.cc +++ b/source/server/configuration_impl.cc @@ -90,10 +90,24 @@ void MainImpl::initialize(const envoy::config::bootstrap::v3::Bootstrap& bootstr std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(watchdog, miss_timeout, 200)); watchdog_megamiss_timeout_ = std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(watchdog, megamiss_timeout, 1000)); - watchdog_kill_timeout_ = - std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(watchdog, kill_timeout, 0)); + uint64_t kill_timeout = PROTOBUF_GET_MS_OR_DEFAULT(watchdog, kill_timeout, 0); + const uint64_t max_kill_timeout_jitter = + PROTOBUF_GET_MS_OR_DEFAULT(watchdog, max_kill_timeout_jitter, 0); + + // Adjust kill timeout if we have skew enabled. + if (kill_timeout > 0 && max_kill_timeout_jitter > 0) { + // Increments the kill timeout with a random value in (0, max_skew]. + // We shouldn't have overflow issues due to the range of Duration. + // This won't be entirely uniform, depending on how large max_skew + // is relation to uint64. + kill_timeout += (server.random().random() % max_kill_timeout_jitter) + 1; + } + + watchdog_kill_timeout_ = std::chrono::milliseconds(kill_timeout); watchdog_multikill_timeout_ = std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(watchdog, multikill_timeout, 0)); + watchdog_multikill_threshold_ = + PROTOBUF_PERCENT_TO_DOUBLE_OR_DEFAULT(watchdog, multikill_threshold, 0.0); initializeStatsSinks(bootstrap, server); } @@ -134,7 +148,7 @@ void MainImpl::initializeStatsSinks(const envoy::config::bootstrap::v3::Bootstra ProtobufTypes::MessagePtr message = Config::Utility::translateToFactoryConfig( sink_object, server.messageValidationContext().staticValidationVisitor(), factory); - stats_sinks_.emplace_back(factory.createStatsSink(*message, server)); + stats_sinks_.emplace_back(factory.createStatsSink(*message, server.serverFactoryContext())); } } diff --git a/source/server/configuration_impl.h b/source/server/configuration_impl.h index 5faf632f89594..d1c88000c1d1f 100644 --- a/source/server/configuration_impl.h +++ b/source/server/configuration_impl.h @@ -6,7 +6,6 @@ #include #include #include -#include #include #include "envoy/config/bootstrap/v3/bootstrap.pb.h" @@ -42,7 +41,8 @@ class StatsSinkFactory : public Config::TypedFactory { * @param config supplies the custom proto configuration for the Stats::Sink * @param server supplies the server instance */ - virtual Stats::SinkPtr createStatsSink(const Protobuf::Message& config, Instance& server) PURE; + virtual Stats::SinkPtr createStatsSink(const Protobuf::Message& config, + Server::Configuration::ServerFactoryContext& server) PURE; std::string category() const override { return "envoy.stats_sinks"; } }; @@ -110,6 +110,8 @@ class MainImpl : Logger::Loggable, public Main { return watchdog_multikill_timeout_; } + double wdMultiKillThreshold() const override { return watchdog_multikill_threshold_; } + private: /** * Initialize tracers and corresponding sinks. @@ -126,6 +128,7 @@ class MainImpl : Logger::Loggable, public Main { std::chrono::milliseconds watchdog_megamiss_timeout_; std::chrono::milliseconds watchdog_kill_timeout_; std::chrono::milliseconds watchdog_multikill_timeout_; + double watchdog_multikill_threshold_; }; /** diff --git a/source/server/connection_handler_impl.cc b/source/server/connection_handler_impl.cc index 2547ede9f35d4..323ddf4df4301 100644 --- a/source/server/connection_handler_impl.cc +++ b/source/server/connection_handler_impl.cc @@ -2,6 +2,7 @@ #include "envoy/event/dispatcher.h" #include "envoy/event/timer.h" +#include "envoy/network/exception.h" #include "envoy/network/filter.h" #include "envoy/stats/scope.h" #include "envoy/stats/timespan.h" @@ -30,7 +31,7 @@ void ConnectionHandlerImpl::decNumConnections() { void ConnectionHandlerImpl::addListener(absl::optional overridden_listener, Network::ListenerConfig& config) { ActiveListenerDetails details; - if (config.listenSocketFactory().socketType() == Network::Address::SocketType::Stream) { + if (config.listenSocketFactory().socketType() == Network::Socket::Type::Stream) { if (overridden_listener.has_value()) { for (auto& listener : listeners_) { if (listener.second.listener_->listenerTag() == overridden_listener) { @@ -67,9 +68,7 @@ void ConnectionHandlerImpl::removeListeners(uint64_t listener_tag) { void ConnectionHandlerImpl::removeFilterChains( uint64_t listener_tag, const std::list& filter_chains, std::function completion) { - // TODO(lambdai): Merge the optimistic path and the pessimistic path. for (auto& listener : listeners_) { - // Optimistic path: The listener tag provided by arg is not stale. if (listener.second.listener_->listenerTag() == listener_tag) { listener.second.tcp_listener_->get().deferredRemoveFilterChains(filter_chains); // Completion is deferred because the above removeFilterChains() may defer delete connection. @@ -77,17 +76,7 @@ void ConnectionHandlerImpl::removeFilterChains( return; } } - // Fallback to iterate over all listeners. The reason is that the target listener might have began - // another update and the previous tag is lost. - // TODO(lambdai): Remove this once we decide to use the same listener tag during intelligent - // update. - for (auto& listener : listeners_) { - if (listener.second.tcp_listener_.has_value()) { - listener.second.tcp_listener_->get().deferredRemoveFilterChains(filter_chains); - } - } - // Completion is deferred because the above removeFilterChains() may defer delete connection. - Event::DeferredTaskUtil::deferredRun(dispatcher_, std::move(completion)); + NOT_REACHED_GCOVR_EXCL_LINE; } void ConnectionHandlerImpl::stopListeners(uint64_t listener_tag) { @@ -298,6 +287,11 @@ void ConnectionHandlerImpl::ActiveTcpSocket::continueFilterChain(bool success) { } } +void ConnectionHandlerImpl::ActiveTcpSocket::setDynamicMetadata(const std::string& name, + const ProtobufWkt::Struct& value) { + (*metadata_.mutable_filter_metadata())[name].MergeFrom(value); +} + void ConnectionHandlerImpl::ActiveTcpSocket::newConnection() { // Check if the socket may need to be redirected to another listener. ActiveTcpListenerOptRef new_listener; @@ -329,11 +323,19 @@ void ConnectionHandlerImpl::ActiveTcpSocket::newConnection() { // Particularly the assigned events need to reset before assigning new events in the follow up. accept_filters_.clear(); // Create a new connection on this listener. - listener_.newConnection(std::move(socket_)); + listener_.newConnection(std::move(socket_), dynamicMetadata()); } } void ConnectionHandlerImpl::ActiveTcpListener::onAccept(Network::ConnectionSocketPtr&& socket) { + if (listenerConnectionLimitReached()) { + ENVOY_LOG(trace, "closing connection: listener connection limit reached for {}", + config_->name()); + socket->close(); + stats_.downstream_cx_overflow_.inc(); + return; + } + onAcceptWorker(std::move(socket), config_->handOffRestoredDestinationConnections(), false); } @@ -360,7 +362,7 @@ void ConnectionHandlerImpl::ActiveTcpListener::onAcceptWorker( // Otherwise we let active_socket be destructed when it goes out of scope. if (active_socket->iter_ != active_socket->accept_filters_.end()) { active_socket->startTimer(); - active_socket->moveIntoListBack(std::move(active_socket), sockets_); + LinkedList::moveIntoListBack(std::move(active_socket), sockets_); } } @@ -374,12 +376,19 @@ void emitLogs(Network::ListenerConfig& config, StreamInfo::StreamInfo& stream_in } // namespace void ConnectionHandlerImpl::ActiveTcpListener::newConnection( - Network::ConnectionSocketPtr&& socket) { - auto stream_info = std::make_unique(parent_.dispatcher_.timeSource()); + Network::ConnectionSocketPtr&& socket, + const envoy::config::core::v3::Metadata& dynamic_metadata) { + auto stream_info = std::make_unique( + parent_.dispatcher_.timeSource(), StreamInfo::FilterState::LifeSpan::Connection); stream_info->setDownstreamLocalAddress(socket->localAddress()); stream_info->setDownstreamRemoteAddress(socket->remoteAddress()); stream_info->setDownstreamDirectRemoteAddress(socket->directRemoteAddress()); + // merge from the given dynamic metadata if it's not empty + if (dynamic_metadata.filter_metadata_size() > 0) { + stream_info->dynamicMetadata().MergeFrom(dynamic_metadata); + } + // Find matching filter chain. const auto filter_chain = config_->filterChainManager().findFilterChain(*socket); if (filter_chain == nullptr) { @@ -399,7 +408,7 @@ void ConnectionHandlerImpl::ActiveTcpListener::newConnection( std::move(socket), std::move(transport_socket), *stream_info); ActiveTcpConnectionPtr active_connection( new ActiveTcpConnection(active_connections, std::move(server_conn_ptr), - parent_.dispatcher_.timeSource(), *config_, std::move(stream_info))); + parent_.dispatcher_.timeSource(), std::move(stream_info))); active_connection->connection_->setBufferLimits(config_->perConnectionBufferLimitBytes()); const bool empty_filter_chain = !config_->filterChainFactory().createNetworkFilterChain( @@ -413,7 +422,7 @@ void ConnectionHandlerImpl::ActiveTcpListener::newConnection( if (active_connection->connection_->state() != Network::Connection::State::Closed) { ENVOY_CONN_LOG(debug, "new connection", *active_connection->connection_); active_connection->connection_->addConnectionCallbacks(*active_connection); - active_connection->moveIntoList(std::move(active_connection), active_connections.connections_); + LinkedList::moveIntoList(std::move(active_connection), active_connections.connections_); } } @@ -444,7 +453,7 @@ void ConnectionHandlerImpl::ActiveTcpListener::deferredRemoveFilterChains( // Since is_deleting_ is on, we need to manually remove the map value and drive the iterator. // Defer delete connection container to avoid race condition in destroying connection. parent_.dispatcher_.deferredDelete(std::move(iter->second)); - iter = connections_by_context_.erase(iter); + connections_by_context_.erase(iter); } } is_deleting_ = was_deleting; @@ -498,55 +507,68 @@ ConnectionHandlerImpl::ActiveConnections::~ActiveConnections() { ConnectionHandlerImpl::ActiveTcpConnection::ActiveTcpConnection( ActiveConnections& active_connections, Network::ConnectionPtr&& new_connection, - TimeSource& time_source, Network::ListenerConfig& config, - std::unique_ptr&& stream_info) + TimeSource& time_source, std::unique_ptr&& stream_info) : stream_info_(std::move(stream_info)), active_connections_(active_connections), connection_(std::move(new_connection)), conn_length_(new Stats::HistogramCompletableTimespanImpl( - active_connections_.listener_.stats_.downstream_cx_length_ms_, time_source)), - config_(config) { + active_connections_.listener_.stats_.downstream_cx_length_ms_, time_source)) { // We just universally set no delay on connections. Theoretically we might at some point want // to make this configurable. connection_->noDelay(true); - - active_connections_.listener_.stats_.downstream_cx_total_.inc(); - active_connections_.listener_.stats_.downstream_cx_active_.inc(); - active_connections_.listener_.per_worker_stats_.downstream_cx_total_.inc(); - active_connections_.listener_.per_worker_stats_.downstream_cx_active_.inc(); + auto& listener = active_connections_.listener_; + listener.stats_.downstream_cx_total_.inc(); + listener.stats_.downstream_cx_active_.inc(); + listener.per_worker_stats_.downstream_cx_total_.inc(); + listener.per_worker_stats_.downstream_cx_active_.inc(); // Active connections on the handler (not listener). The per listener connections have already // been incremented at this point either via the connection balancer or in the socket accept // path if there is no configured balancer. - ++active_connections_.listener_.parent_.num_handler_connections_; + ++listener.parent_.num_handler_connections_; } ConnectionHandlerImpl::ActiveTcpConnection::~ActiveTcpConnection() { - emitLogs(config_, *stream_info_); - - active_connections_.listener_.stats_.downstream_cx_active_.dec(); - active_connections_.listener_.stats_.downstream_cx_destroy_.inc(); - active_connections_.listener_.per_worker_stats_.downstream_cx_active_.dec(); + emitLogs(*active_connections_.listener_.config_, *stream_info_); + auto& listener = active_connections_.listener_; + listener.stats_.downstream_cx_active_.dec(); + listener.stats_.downstream_cx_destroy_.inc(); + listener.per_worker_stats_.downstream_cx_active_.dec(); conn_length_->complete(); // Active listener connections (not handler). - active_connections_.listener_.decNumConnections(); + listener.decNumConnections(); // Active handler connections (not listener). - active_connections_.listener_.parent_.decNumConnections(); -} - -ActiveUdpListener::ActiveUdpListener(Network::ConnectionHandler& parent, - Event::Dispatcher& dispatcher, Network::ListenerConfig& config) - : ActiveUdpListener( - parent, - dispatcher.createUdpListener(config.listenSocketFactory().getListenSocket(), *this), - config) {} - -ActiveUdpListener::ActiveUdpListener(Network::ConnectionHandler& parent, - Network::UdpListenerPtr&& listener, - Network::ListenerConfig& config) + listener.parent_.decNumConnections(); +} + +ActiveRawUdpListener::ActiveRawUdpListener(Network::ConnectionHandler& parent, + Event::Dispatcher& dispatcher, + Network::ListenerConfig& config) + : ActiveRawUdpListener(parent, config.listenSocketFactory().getListenSocket(), dispatcher, + config) {} + +ActiveRawUdpListener::ActiveRawUdpListener(Network::ConnectionHandler& parent, + Network::SocketSharedPtr listen_socket_ptr, + Event::Dispatcher& dispatcher, + Network::ListenerConfig& config) + : ActiveRawUdpListener(parent, *listen_socket_ptr, listen_socket_ptr, dispatcher, config) {} + +ActiveRawUdpListener::ActiveRawUdpListener(Network::ConnectionHandler& parent, + Network::Socket& listen_socket, + Network::SocketSharedPtr listen_socket_ptr, + Event::Dispatcher& dispatcher, + Network::ListenerConfig& config) + : ActiveRawUdpListener(parent, listen_socket, + dispatcher.createUdpListener(std::move(listen_socket_ptr), *this), + config) {} + +ActiveRawUdpListener::ActiveRawUdpListener(Network::ConnectionHandler& parent, + Network::Socket& listen_socket, + Network::UdpListenerPtr&& listener, + Network::ListenerConfig& config) : ConnectionHandlerImpl::ActiveListenerImplBase(parent, &config), - udp_listener_(std::move(listener)), read_filter_(nullptr) { + udp_listener_(std::move(listener)), read_filter_(nullptr), listen_socket_(listen_socket) { // Create the filter chain on creating a new udp listener config_->filterChainFactory().createUdpListenerFilterChain(*this, *this); @@ -556,28 +578,35 @@ ActiveUdpListener::ActiveUdpListener(Network::ConnectionHandler& parent, fmt::format("Cannot create listener as no read filter registered for the udp listener: {} ", config_->name())); } + + // Create udp_packet_writer + udp_packet_writer_ = config.udpPacketWriterFactory()->get().createUdpPacketWriter( + listen_socket_.ioHandle(), config.listenerScope()); } -void ActiveUdpListener::onData(Network::UdpRecvData& data) { read_filter_->onData(data); } +void ActiveRawUdpListener::onData(Network::UdpRecvData& data) { read_filter_->onData(data); } -void ActiveUdpListener::onReadReady() {} +void ActiveRawUdpListener::onReadReady() {} -void ActiveUdpListener::onWriteReady(const Network::Socket&) { +void ActiveRawUdpListener::onWriteReady(const Network::Socket&) { // TODO(sumukhs): This is not used now. When write filters are implemented, this is a // trigger to invoke the on write ready API on the filters which is when they can write // data + + // Clear write_blocked_ status for udpPacketWriter + udp_packet_writer_->setWritable(); } -void ActiveUdpListener::onReceiveError(Api::IoError::IoErrorCode error_code) { +void ActiveRawUdpListener::onReceiveError(Api::IoError::IoErrorCode error_code) { read_filter_->onReceiveError(error_code); } -void ActiveUdpListener::addReadFilter(Network::UdpListenerReadFilterPtr&& filter) { +void ActiveRawUdpListener::addReadFilter(Network::UdpListenerReadFilterPtr&& filter) { ASSERT(read_filter_ == nullptr, "Cannot add a 2nd UDP read filter"); read_filter_ = std::move(filter); } -Network::UdpListener& ActiveUdpListener::udpListener() { return *udp_listener_; } +Network::UdpListener& ActiveRawUdpListener::udpListener() { return *udp_listener_; } } // namespace Server } // namespace Envoy diff --git a/source/server/connection_handler_impl.h b/source/server/connection_handler_impl.h index c65ddf397da37..63a8c97575f35 100644 --- a/source/server/connection_handler_impl.h +++ b/source/server/connection_handler_impl.h @@ -28,7 +28,9 @@ namespace Server { #define ALL_LISTENER_STATS(COUNTER, GAUGE, HISTOGRAM) \ COUNTER(downstream_cx_destroy) \ + COUNTER(downstream_cx_overflow) \ COUNTER(downstream_cx_total) \ + COUNTER(downstream_global_cx_overflow) \ COUNTER(downstream_pre_cx_timeout) \ COUNTER(no_filter_chain_match) \ GAUGE(downstream_cx_active, Accumulate) \ @@ -113,15 +115,22 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, ActiveTcpListener(ConnectionHandlerImpl& parent, Network::ListenerPtr&& listener, Network::ListenerConfig& config); ~ActiveTcpListener() override; + bool listenerConnectionLimitReached() const { + // TODO(tonya11en): Delegate enforcement of per-listener connection limits to overload + // manager. + return !config_->openConnections().canCreate(); + } void onAcceptWorker(Network::ConnectionSocketPtr&& socket, bool hand_off_restored_destination_connections, bool rebalanced); void decNumConnections() { ASSERT(num_listener_connections_ > 0); --num_listener_connections_; + config_->openConnections().dec(); } // Network::ListenerCallbacks void onAccept(Network::ConnectionSocketPtr&& socket) override; + void onReject() override { stats_.downstream_global_cx_overflow_.inc(); } // ActiveListenerImplBase Network::Listener* listener() override { return listener_.get(); } @@ -131,7 +140,10 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, // Network::BalancedConnectionHandler uint64_t numConnections() const override { return num_listener_connections_; } - void incNumConnections() override { ++num_listener_connections_; } + void incNumConnections() override { + ++num_listener_connections_; + config_->openConnections().inc(); + } void post(Network::ConnectionSocketPtr&& socket) override; /** @@ -143,7 +155,8 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, /** * Create a new connection from a socket accepted by the listener. */ - void newConnection(Network::ConnectionSocketPtr&& socket); + void newConnection(Network::ConnectionSocketPtr&& socket, + const envoy::config::core::v3::Metadata& dynamic_metadata); /** * Return the active connections container attached with the given filter chain. @@ -168,7 +181,7 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, const std::chrono::milliseconds listener_filters_timeout_; const bool continue_on_listener_filters_timeout_; std::list sockets_; - std::unordered_map connections_by_context_; + absl::node_hash_map connections_by_context_; // The number of connections currently active on this listener. This is typically used for // connection balancing across per-handler listeners. @@ -199,7 +212,6 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, public Network::ConnectionCallbacks { ActiveTcpConnection(ActiveConnections& active_connections, Network::ConnectionPtr&& new_connection, TimeSource& time_system, - Network::ListenerConfig& config, std::unique_ptr&& stream_info); ~ActiveTcpConnection() override; @@ -218,7 +230,6 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, ActiveConnections& active_connections_; Network::ConnectionPtr connection_; Stats::TimespanPtr conn_length_; - Network::ListenerConfig& config_; }; /** @@ -296,6 +307,9 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, Network::ConnectionSocket& socket() override { return *socket_.get(); } Event::Dispatcher& dispatcher() override { return listener_.parent_.dispatcher_; } void continueFilterChain(bool success) override; + void setDynamicMetadata(const std::string& name, const ProtobufWkt::Struct& value) override; + envoy::config::core::v3::Metadata& dynamicMetadata() override { return metadata_; }; + const envoy::config::core::v3::Metadata& dynamicMetadata() const override { return metadata_; }; ActiveTcpListener& listener_; Network::ConnectionSocketPtr socket_; @@ -303,6 +317,7 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, std::list accept_filters_; std::list::iterator iter_; Event::TimerPtr timer_; + envoy::config::core::v3::Metadata metadata_{}; }; using ActiveTcpListenerOptRef = absl::optional>; @@ -325,29 +340,42 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, /** * Wrapper for an active udp listener owned by this handler. - * TODO(danzh): rename to ActiveRawUdpListener. */ -class ActiveUdpListener : public Network::UdpListenerCallbacks, - public ConnectionHandlerImpl::ActiveListenerImplBase, - public Network::UdpListenerFilterManager, - public Network::UdpReadFilterCallbacks { +class ActiveRawUdpListener : public Network::UdpListenerCallbacks, + public ConnectionHandlerImpl::ActiveListenerImplBase, + public Network::UdpListenerFilterManager, + public Network::UdpReadFilterCallbacks { public: - ActiveUdpListener(Network::ConnectionHandler& parent, Event::Dispatcher& dispatcher, - Network::ListenerConfig& config); - ActiveUdpListener(Network::ConnectionHandler& parent, Network::UdpListenerPtr&& listener, - Network::ListenerConfig& config); + ActiveRawUdpListener(Network::ConnectionHandler& parent, Event::Dispatcher& dispatcher, + Network::ListenerConfig& config); + ActiveRawUdpListener(Network::ConnectionHandler& parent, + Network::SocketSharedPtr listen_socket_ptr, Event::Dispatcher& dispatcher, + Network::ListenerConfig& config); + ActiveRawUdpListener(Network::ConnectionHandler& parent, Network::Socket& listen_socket, + Network::SocketSharedPtr listen_socket_ptr, Event::Dispatcher& dispatcher, + Network::ListenerConfig& config); + ActiveRawUdpListener(Network::ConnectionHandler& parent, Network::Socket& listen_socket, + Network::UdpListenerPtr&& listener, Network::ListenerConfig& config); // Network::UdpListenerCallbacks void onData(Network::UdpRecvData& data) override; void onReadReady() override; void onWriteReady(const Network::Socket& socket) override; void onReceiveError(Api::IoError::IoErrorCode error_code) override; + Network::UdpPacketWriter& udpPacketWriter() override { return *udp_packet_writer_; } // ActiveListenerImplBase Network::Listener* listener() override { return udp_listener_.get(); } void pauseListening() override { udp_listener_->disable(); } void resumeListening() override { udp_listener_->enable(); } - void shutdownListener() override { udp_listener_.reset(); } + void shutdownListener() override { + // The read filter should be deleted before the UDP listener is deleted. + // The read filter refers to the UDP listener to send packets to downstream. + // If the UDP listener is deleted before the read filter, the read filter may try to use it + // after deletion. + read_filter_.reset(); + udp_listener_.reset(); + } // Network::UdpListenerFilterManager void addReadFilter(Network::UdpListenerReadFilterPtr&& filter) override; @@ -358,6 +386,8 @@ class ActiveUdpListener : public Network::UdpListenerCallbacks, private: Network::UdpListenerPtr udp_listener_; Network::UdpListenerReadFilterPtr read_filter_; + Network::UdpPacketWriterPtr udp_packet_writer_; + Network::Socket& listen_socket_; }; } // namespace Server diff --git a/source/server/drain_manager_impl.cc b/source/server/drain_manager_impl.cc index d9b98e5eaa84e..d9b1e1dcfa8ff 100644 --- a/source/server/drain_manager_impl.cc +++ b/source/server/drain_manager_impl.cc @@ -5,10 +5,7 @@ #include #include "envoy/config/listener/v3/listener.pb.h" -#include "envoy/event/dispatcher.h" #include "envoy/event/timer.h" -#include "envoy/runtime/runtime.h" -#include "envoy/server/instance.h" #include "common/common/assert.h" @@ -35,30 +32,35 @@ bool DrainManagerImpl::drainClose() const { return false; } - // We use the tick time as in increasing chance that we shutdown connections. - return static_cast(drain_time_completed_.load()) > - (server_.random().random() % server_.options().drainTime().count()); -} - -void DrainManagerImpl::drainSequenceTick() { - ENVOY_LOG(trace, "drain tick #{}", drain_time_completed_.load()); - ASSERT(drain_time_completed_.load() < server_.options().drainTime().count()); - ++drain_time_completed_; + if (server_.options().drainStrategy() == Server::DrainStrategy::Immediate) { + return true; + } + ASSERT(server_.options().drainStrategy() == Server::DrainStrategy::Gradual); - if (drain_time_completed_.load() < server_.options().drainTime().count()) { - drain_tick_timer_->enableTimer(std::chrono::milliseconds(1000)); - } else if (drain_sequence_completion_) { - drain_sequence_completion_(); + // P(return true) = elapsed time / drain timeout + // If the drain deadline is exceeded, skip the probability calculation. + const MonotonicTime current_time = server_.dispatcher().timeSource().monotonicTime(); + if (current_time >= drain_deadline_) { + return true; } + + const auto remaining_time = + std::chrono::duration_cast(drain_deadline_ - current_time); + ASSERT(server_.options().drainTime() >= remaining_time); + const auto elapsed_time = server_.options().drainTime() - remaining_time; + return static_cast(elapsed_time.count()) > + (server_.random().random() % server_.options().drainTime().count()); } -void DrainManagerImpl::startDrainSequence(std::function completion) { - drain_sequence_completion_ = completion; +void DrainManagerImpl::startDrainSequence(std::function drain_complete_cb) { + ASSERT(drain_complete_cb); ASSERT(!draining_); ASSERT(!drain_tick_timer_); draining_ = true; - drain_tick_timer_ = server_.dispatcher().createTimer([this]() -> void { drainSequenceTick(); }); - drainSequenceTick(); + drain_tick_timer_ = server_.dispatcher().createTimer(drain_complete_cb); + const std::chrono::seconds drain_delay(server_.options().drainTime()); + drain_tick_timer_->enableTimer(drain_delay); + drain_deadline_ = server_.dispatcher().timeSource().monotonicTime() + drain_delay; } void DrainManagerImpl::startParentShutdownSequence() { diff --git a/source/server/drain_manager_impl.h b/source/server/drain_manager_impl.h index 7b0e8d6519886..c8056f22396cd 100644 --- a/source/server/drain_manager_impl.h +++ b/source/server/drain_manager_impl.h @@ -1,9 +1,10 @@ #pragma once -#include #include +#include "envoy/common/time.h" #include "envoy/config/listener/v3/listener.pb.h" +#include "envoy/event/timer.h" #include "envoy/server/drain_manager.h" #include "envoy/server/instance.h" @@ -22,21 +23,23 @@ class DrainManagerImpl : Logger::Loggable, public DrainManager public: DrainManagerImpl(Instance& server, envoy::config::listener::v3::Listener::DrainType drain_type); - // Server::DrainManager + // Network::DrainDecision bool drainClose() const override; - void startDrainSequence(std::function completion) override; + + // Server::DrainManager + void startDrainSequence(std::function drain_complete_cb) override; + bool draining() const override { return draining_; } void startParentShutdownSequence() override; private: - void drainSequenceTick(); - Instance& server_; const envoy::config::listener::v3::Listener::DrainType drain_type_; - Event::TimerPtr drain_tick_timer_; + std::atomic draining_{false}; - std::atomic drain_time_completed_{}; + Event::TimerPtr drain_tick_timer_; + MonotonicTime drain_deadline_; + Event::TimerPtr parent_shutdown_timer_; - std::function drain_sequence_completion_; }; } // namespace Server diff --git a/source/server/filter_chain_factory_context_callback.h b/source/server/filter_chain_factory_context_callback.h index 1230bfe5c7e15..883f1477b48e8 100644 --- a/source/server/filter_chain_factory_context_callback.h +++ b/source/server/filter_chain_factory_context_callback.h @@ -21,7 +21,7 @@ class FilterChainFactoryContextCreator { * Generate the filter chain factory context from proto. Note the caller does not own the filter * chain context. */ - virtual std::unique_ptr createFilterChainFactoryContext( + virtual Configuration::FilterChainFactoryContextPtr createFilterChainFactoryContext( const ::envoy::config::listener::v3::FilterChain* const filter_chain) PURE; }; diff --git a/source/server/filter_chain_manager_impl.cc b/source/server/filter_chain_manager_impl.cc index 8b8345f64cc45..4ab8fa9a68673 100644 --- a/source/server/filter_chain_manager_impl.cc +++ b/source/server/filter_chain_manager_impl.cc @@ -6,10 +6,12 @@ #include "common/common/empty_string.h" #include "common/common/fmt.h" #include "common/config/utility.h" +#include "common/network/socket_interface_impl.h" #include "common/protobuf/utility.h" #include "server/configuration_impl.h" +#include "absl/container/node_hash_map.h" #include "absl/strings/match.h" #include "absl/strings/str_cat.h" @@ -86,7 +88,7 @@ const LocalInfo::LocalInfo& PerFilterChainFactoryContextImpl::localInfo() const return parent_context_.localInfo(); } -Envoy::Runtime::RandomGenerator& PerFilterChainFactoryContextImpl::random() { +Envoy::Random::RandomGenerator& PerFilterChainFactoryContextImpl::random() { return parent_context_.random(); } @@ -148,22 +150,25 @@ void FilterChainManagerImpl::addFilterChain( FilterChainFactoryBuilder& filter_chain_factory_builder, FilterChainFactoryContextCreator& context_creator) { Cleanup cleanup([this]() { origin_ = absl::nullopt; }); - std::unordered_set + absl::node_hash_map filter_chains; uint32_t new_filter_chain_size = 0; for (const auto& filter_chain : filter_chain_span) { const auto& filter_chain_match = filter_chain->filter_chain_match(); if (!filter_chain_match.address_suffix().empty() || filter_chain_match.has_suffix_len()) { - throw EnvoyException(fmt::format("error adding listener '{}': contains filter chains with " + throw EnvoyException(fmt::format("error adding listener '{}': filter chain '{}' contains " "unimplemented fields", - address_->asString())); + address_->asString(), filter_chain->name())); } - if (filter_chains.find(filter_chain_match) != filter_chains.end()) { - throw EnvoyException(fmt::format("error adding listener '{}': multiple filter chains with " - "the same matching rules are defined", - address_->asString())); + const auto& matching_iter = filter_chains.find(filter_chain_match); + if (matching_iter != filter_chains.end()) { + throw EnvoyException(fmt::format("error adding listener '{}': filter chain '{}' has " + "the same matching rules defined as '{}'", + address_->asString(), filter_chain->name(), + matching_iter->second)); } - filter_chains.insert(filter_chain_match); + filter_chains.insert({filter_chain_match, filter_chain->name()}); // Validate IP addresses. std::vector destination_ips; @@ -360,11 +365,11 @@ std::pair> makeCidrListEntry(const s const T& data) { std::vector subnets; if (cidr == EMPTY_STRING) { - if (Network::Address::ipFamilySupported(AF_INET)) { + if (Network::SocketInterfaceSingleton::get().ipFamilySupported(AF_INET)) { subnets.push_back( Network::Address::CidrRange::create(Network::Utility::getIpv4CidrCatchAllAddress())); } - if (Network::Address::ipFamilySupported(AF_INET6)) { + if (Network::SocketInterfaceSingleton::get().ipFamilySupported(AF_INET6)) { subnets.push_back( Network::Address::CidrRange::create(Network::Utility::getIpv6CidrCatchAllAddress())); } @@ -553,46 +558,44 @@ const Network::FilterChain* FilterChainManagerImpl::findFilterChainForSourceIpAn } void FilterChainManagerImpl::convertIPsToTries() { - for (auto& port : destination_ports_map_) { + for (auto& [destination_port, destination_ips_pair] : destination_ports_map_) { // These variables are used as we build up the destination CIDRs used for the trie. - auto& destination_ips_pair = port.second; - auto& destination_ips_map = destination_ips_pair.first; + auto& [destination_ips_map, destination_ips_trie] = destination_ips_pair; std::vector>> destination_ips_list; destination_ips_list.reserve(destination_ips_map.size()); - for (const auto& entry : destination_ips_map) { - destination_ips_list.push_back(makeCidrListEntry(entry.first, entry.second)); + for (const auto& [destination_ip, server_names_map_ptr] : destination_ips_map) { + destination_ips_list.push_back(makeCidrListEntry(destination_ip, server_names_map_ptr)); // This hugely nested for loop greatly pains me, but I'm not sure how to make it better. // We need to get access to all of the source IP strings so that we can convert them into // a trie like we did for the destination IPs above. - for (auto& server_names_entry : *entry.second) { - for (auto& transport_protocols_entry : server_names_entry.second) { - for (auto& application_protocols_entry : transport_protocols_entry.second) { - for (auto& source_array_entry : application_protocols_entry.second) { - auto& source_ips_map = source_array_entry.first; + for (auto& [server_name, transport_protocols_map] : *server_names_map_ptr) { + for (auto& [transport_protocol, application_protocols_map] : transport_protocols_map) { + for (auto& [application_protocol, source_arrays] : application_protocols_map) { + for (auto& [source_ips_map, source_ips_trie] : source_arrays) { std::vector< std::pair>> source_ips_list; source_ips_list.reserve(source_ips_map.size()); - for (auto& source_ip : source_ips_map) { - source_ips_list.push_back(makeCidrListEntry(source_ip.first, source_ip.second)); + for (auto& [source_ip, source_port_map_ptr] : source_ips_map) { + source_ips_list.push_back(makeCidrListEntry(source_ip, source_port_map_ptr)); } - source_array_entry.second = std::make_unique(source_ips_list, true); + source_ips_trie = std::make_unique(source_ips_list, true); } } } } } - destination_ips_pair.second = std::make_unique(destination_ips_list, true); + destination_ips_trie = std::make_unique(destination_ips_list, true); } } -std::shared_ptr FilterChainManagerImpl::findExistingFilterChain( +Network::DrainableFilterChainSharedPtr FilterChainManagerImpl::findExistingFilterChain( const envoy::config::listener::v3::FilterChain& filter_chain_message) { // Origin filter chain manager could be empty if the current is the ancestor. const auto* origin = getOriginFilterChainManager(); @@ -608,8 +611,7 @@ std::shared_ptr FilterChainManagerImpl::findExist return nullptr; } -std::unique_ptr -FilterChainManagerImpl::createFilterChainFactoryContext( +Configuration::FilterChainFactoryContextPtr FilterChainManagerImpl::createFilterChainFactoryContext( const ::envoy::config::listener::v3::FilterChain* const filter_chain) { // TODO(lambdai): add stats UNREFERENCED_PARAMETER(filter_chain); @@ -633,7 +635,7 @@ bool FactoryContextImpl::healthCheckFailed() { return server_.healthCheckFailed( Http::Context& FactoryContextImpl::httpContext() { return server_.httpContext(); } Init::Manager& FactoryContextImpl::initManager() { return server_.initManager(); } const LocalInfo::LocalInfo& FactoryContextImpl::localInfo() const { return server_.localInfo(); } -Envoy::Runtime::RandomGenerator& FactoryContextImpl::random() { return server_.random(); } +Envoy::Random::RandomGenerator& FactoryContextImpl::random() { return server_.random(); } Envoy::Runtime::Loader& FactoryContextImpl::runtime() { return server_.runtime(); } Stats::Scope& FactoryContextImpl::scope() { return global_scope_; } Singleton::Manager& FactoryContextImpl::singletonManager() { return server_.singletonManager(); } diff --git a/source/server/filter_chain_manager_impl.h b/source/server/filter_chain_manager_impl.h index 681876a5cb1e4..59af0bb78ac54 100644 --- a/source/server/filter_chain_manager_impl.h +++ b/source/server/filter_chain_manager_impl.h @@ -30,7 +30,7 @@ class FilterChainFactoryBuilder { * @return Shared filter chain where builder is allowed to determine and reuse duplicated filter * chain. Throw exception if failed. */ - virtual std::shared_ptr + virtual Network::DrainableFilterChainSharedPtr buildFilterChain(const envoy::config::listener::v3::FilterChain& filter_chain, FilterChainFactoryContextCreator& context_creator) const PURE; }; @@ -58,7 +58,7 @@ class PerFilterChainFactoryContextImpl : public Configuration::FilterChainFactor Http::Context& httpContext() override; Init::Manager& initManager() override; const LocalInfo::LocalInfo& localInfo() const override; - Envoy::Runtime::RandomGenerator& random() override; + Envoy::Random::RandomGenerator& random() override; Envoy::Runtime::Loader& runtime() override; Stats::Scope& scope() override; Singleton::Manager& singletonManager() override; @@ -131,7 +131,7 @@ class FactoryContextImpl : public Configuration::FactoryContext { Http::Context& httpContext() override; Init::Manager& initManager() override; const LocalInfo::LocalInfo& localInfo() const override; - Envoy::Runtime::RandomGenerator& random() override; + Envoy::Random::RandomGenerator& random() override; Envoy::Runtime::Loader& runtime() override; Stats::Scope& scope() override; Singleton::Manager& singletonManager() override; @@ -168,7 +168,7 @@ class FilterChainManagerImpl : public Network::FilterChainManager, public: using FcContextMap = absl::flat_hash_map, MessageUtil, MessageUtil>; + Network::DrainableFilterChainSharedPtr, MessageUtil, MessageUtil>; FilterChainManagerImpl(const Network::Address::InstanceConstSharedPtr& address, Configuration::FactoryContext& factory_context, Init::Manager& init_manager) @@ -179,7 +179,7 @@ class FilterChainManagerImpl : public Network::FilterChainManager, Init::Manager& init_manager, const FilterChainManagerImpl& parent_manager); // FilterChainFactoryContextCreator - std::unique_ptr createFilterChainFactoryContext( + Configuration::FilterChainFactoryContextPtr createFilterChainFactoryContext( const ::envoy::config::listener::v3::FilterChain* const filter_chain) override; // Network::FilterChainManager @@ -193,6 +193,10 @@ class FilterChainManagerImpl : public Network::FilterChainManager, FilterChainFactoryBuilder& b, FilterChainFactoryContextCreator& context_creator); static bool isWildcardServerName(const std::string& name); + // Return the current view of filter chains, keyed by filter chain message. Used by the owning + // listener to calculate the intersection of filter chains with another listener. + const FcContextMap& filterChainsByMessage() const { return fc_contexts_; } + private: void convertIPsToTries(); using SourcePortsMap = absl::flat_hash_map; @@ -284,7 +288,7 @@ class FilterChainManagerImpl : public Network::FilterChainManager, const FilterChainManagerImpl* getOriginFilterChainManager() { return origin_.value(); } // Duplicate the inherent factory context if any. - std::shared_ptr + Network::DrainableFilterChainSharedPtr findExistingFilterChain(const envoy::config::listener::v3::FilterChain& filter_chain_message); // Mapping from filter chain message to filter chain. This is used by LDS response handler to diff --git a/source/server/guarddog_impl.cc b/source/server/guarddog_impl.cc index d05e84f6ff6e8..add9ca270d51e 100644 --- a/source/server/guarddog_impl.cc +++ b/source/server/guarddog_impl.cc @@ -1,5 +1,7 @@ #include "server/guarddog_impl.h" +#include + #include #include @@ -23,6 +25,7 @@ GuardDogImpl::GuardDogImpl(Stats::Scope& stats_scope, const Server::Configuratio time_source_(api.timeSource()), miss_timeout_(config.wdMissTimeout()), megamiss_timeout_(config.wdMegaMissTimeout()), kill_timeout_(config.wdKillTimeout()), multi_kill_timeout_(config.wdMultiKillTimeout()), + multi_kill_fraction_(config.wdMultiKillThreshold() / 100.0), loop_interval_([&]() -> std::chrono::milliseconds { // The loop interval is simply the minimum of all specified intervals, // but we must account for the 0=disabled case. This lambda takes care @@ -60,8 +63,14 @@ void GuardDogImpl::step() { const auto now = time_source_.monotonicTime(); { - bool seen_one_multi_timeout(false); + size_t multi_kill_count = 0; Thread::LockGuard guard(wd_lock_); + + // Compute the multikill threshold + const size_t required_for_multi_kill = + std::max(static_cast(2), + static_cast(ceil(multi_kill_fraction_ * watched_dogs_.size()))); + for (auto& watched_dog : watched_dogs_) { const auto ltt = watched_dog->dog_->lastTouchTime(); const auto delta = now - ltt; @@ -90,13 +99,10 @@ void GuardDogImpl::step() { watched_dog->dog_->threadId().debugString())); } if (multikillEnabled() && delta > multi_kill_timeout_) { - if (seen_one_multi_timeout) { - - PANIC(fmt::format( - "GuardDog: multiple threads ({},...) stuck for more than watchdog_multikill_timeout", - watched_dog->dog_->threadId().debugString())); - } else { - seen_one_multi_timeout = true; + if (++multi_kill_count >= required_for_multi_kill) { + PANIC(fmt::format("GuardDog: At least {} threads ({},...) stuck for more than " + "watchdog_multikill_timeout", + multi_kill_count, watched_dog->dog_->threadId().debugString())); } } } @@ -142,8 +148,10 @@ void GuardDogImpl::stopWatching(WatchDogSharedPtr wd) { void GuardDogImpl::start(Api::Api& api) { Thread::LockGuard guard(mutex_); + // See comments in WorkerImpl::start for the naming convention. + Thread::Options options{absl::StrCat("dog:", dispatcher_->name())}; thread_ = api.threadFactory().createThread( - [this]() -> void { dispatcher_->run(Event::Dispatcher::RunType::RunUntilExit); }); + [this]() -> void { dispatcher_->run(Event::Dispatcher::RunType::RunUntilExit); }, options); loop_timer_->enableTimer(std::chrono::milliseconds(0)); } diff --git a/source/server/guarddog_impl.h b/source/server/guarddog_impl.h index b570043bcbf3a..2fba7f0edcbb9 100644 --- a/source/server/guarddog_impl.h +++ b/source/server/guarddog_impl.h @@ -120,6 +120,7 @@ class GuardDogImpl : public GuardDog { const std::chrono::milliseconds megamiss_timeout_; const std::chrono::milliseconds kill_timeout_; const std::chrono::milliseconds multi_kill_timeout_; + const double multi_kill_fraction_; const std::chrono::milliseconds loop_interval_; Stats::Counter& watchdog_miss_counter_; Stats::Counter& watchdog_megamiss_counter_; diff --git a/source/server/hot_restart_impl.cc b/source/server/hot_restart_impl.cc index 2a39c9c425d20..c9e0aa7e7d023 100644 --- a/source/server/hot_restart_impl.cc +++ b/source/server/hot_restart_impl.cc @@ -12,7 +12,6 @@ #include "envoy/event/dispatcher.h" #include "envoy/event/file_event.h" #include "envoy/server/instance.h" -#include "envoy/server/options.h" #include "common/api/os_sys_calls_impl.h" #include "common/api/os_sys_calls_impl_hot_restart.h" @@ -24,13 +23,13 @@ namespace Envoy { namespace Server { -SharedMemory* attachSharedMemory(const Options& options) { +SharedMemory* attachSharedMemory(uint32_t base_id, uint32_t restart_epoch) { Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get(); Api::HotRestartOsSysCalls& hot_restart_os_sys_calls = Api::HotRestartOsSysCallsSingleton::get(); int flags = O_RDWR; - const std::string shmem_name = fmt::format("/envoy_shared_memory_{}", options.baseId()); - if (options.restartEpoch() == 0) { + const std::string shmem_name = fmt::format("/envoy_shared_memory_{}", base_id); + if (restart_epoch == 0) { flags |= O_CREAT | O_EXCL; // If we are meant to be first, attempt to unlink a previous shared memory instance. If this @@ -42,10 +41,10 @@ SharedMemory* attachSharedMemory(const Options& options) { hot_restart_os_sys_calls.shmOpen(shmem_name.c_str(), flags, S_IRUSR | S_IWUSR); if (result.rc_ == -1) { PANIC(fmt::format("cannot open shared memory region {} check user permissions. Error: {}", - shmem_name, strerror(result.errno_))); + shmem_name, errorDetails(result.errno_))); } - if (options.restartEpoch() == 0) { + if (restart_epoch == 0) { const Api::SysCallIntResult truncateRes = os_sys_calls.ftruncate(result.rc_, sizeof(SharedMemory)); RELEASE_ASSERT(truncateRes.rc_ != -1, ""); @@ -57,7 +56,7 @@ SharedMemory* attachSharedMemory(const Options& options) { RELEASE_ASSERT(shmem != MAP_FAILED, ""); RELEASE_ASSERT((reinterpret_cast(shmem) % alignof(decltype(shmem))) == 0, ""); - if (options.restartEpoch() == 0) { + if (restart_epoch == 0) { shmem->size_ = sizeof(SharedMemory); shmem->version_ = HOT_RESTART_VERSION; initializeMutex(shmem->log_lock_); @@ -91,10 +90,16 @@ void initializeMutex(pthread_mutex_t& mutex) { pthread_mutex_init(&mutex, &attribute); } -HotRestartImpl::HotRestartImpl(const Options& options) - : as_child_(HotRestartingChild(options.baseId(), options.restartEpoch())), - as_parent_(HotRestartingParent(options.baseId(), options.restartEpoch())), - shmem_(attachSharedMemory(options)), log_lock_(shmem_->log_lock_), +// The base id is automatically scaled by 10 to prevent overlap of domain socket names when +// multiple Envoys with different base-ids run on a single host. Note that older versions of Envoy +// performed the multiplication in OptionsImpl which produced incorrect server info output. +// TODO(zuercher): ideally, the base_id would be separated from the restart_epoch in +// the socket names to entirely prevent collisions between consecutive base ids. +HotRestartImpl::HotRestartImpl(uint32_t base_id, uint32_t restart_epoch) + : base_id_(base_id), scaled_base_id_(base_id * 10), + as_child_(HotRestartingChild(scaled_base_id_, restart_epoch)), + as_parent_(HotRestartingParent(scaled_base_id_, restart_epoch)), + shmem_(attachSharedMemory(scaled_base_id_, restart_epoch)), log_lock_(shmem_->log_lock_), access_log_lock_(shmem_->access_log_lock_) { // If our parent ever goes away just terminate us so that we don't have to rely on ops/launching // logic killing the entire process tree. We should never exist without our parent. @@ -137,6 +142,7 @@ HotRestartImpl::mergeParentStatsIfAny(Stats::StoreRoot& stats_store) { void HotRestartImpl::shutdown() { as_parent_.shutdown(); } +uint32_t HotRestartImpl::baseId() { return base_id_; } std::string HotRestartImpl::version() { return hotRestartVersion(); } std::string HotRestartImpl::hotRestartVersion() { diff --git a/source/server/hot_restart_impl.h b/source/server/hot_restart_impl.h index b8cb4c636e220..9b91e892d104a 100644 --- a/source/server/hot_restart_impl.h +++ b/source/server/hot_restart_impl.h @@ -40,8 +40,11 @@ static const uint64_t SHMEM_FLAGS_INITIALIZING = 0x1; /** * Initialize the shared memory segment, depending on whether we are the first running * envoy, or a host restarted envoy process. + * + * @param base_id uint32_t that is the base id flag used to start this Envoy. + * @param restart_epoch uint32_t the restart epoch flag used to start this Envoy. */ -SharedMemory* attachSharedMemory(const Options& options); +SharedMemory* attachSharedMemory(uint32_t base_id, uint32_t restart_epoch); /** * Initialize a pthread mutex for process shared locking. @@ -55,7 +58,7 @@ class ProcessSharedMutex : public Thread::BasicLockable { public: ProcessSharedMutex(pthread_mutex_t& mutex) : mutex_(mutex) {} - void lock() EXCLUSIVE_LOCK_FUNCTION() override { + void lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() override { // Deal with robust handling here. If the other process dies without unlocking, we are going // to die shortly but try to make sure that we can handle any signals, etc. that happen without // getting into a further messed up state. @@ -66,7 +69,7 @@ class ProcessSharedMutex : public Thread::BasicLockable { } } - bool tryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true) override { + bool tryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) override { int rc = pthread_mutex_trylock(&mutex_); if (rc == EBUSY) { return false; @@ -80,7 +83,7 @@ class ProcessSharedMutex : public Thread::BasicLockable { return true; } - void unlock() UNLOCK_FUNCTION() override { + void unlock() ABSL_UNLOCK_FUNCTION() override { int rc = pthread_mutex_unlock(&mutex_); ASSERT(rc == 0); } @@ -95,7 +98,7 @@ class ProcessSharedMutex : public Thread::BasicLockable { */ class HotRestartImpl : public HotRestart { public: - HotRestartImpl(const Options& options); + HotRestartImpl(uint32_t base_id, uint32_t restart_epoch); // Server::HotRestart void drainParentListeners() override; @@ -105,6 +108,7 @@ class HotRestartImpl : public HotRestart { void sendParentTerminateRequest() override; ServerStatsFromParent mergeParentStatsIfAny(Stats::StoreRoot& stats_store) override; void shutdown() override; + uint32_t baseId() override; std::string version() override; Thread::BasicLockable& logLock() override { return log_lock_; } Thread::BasicLockable& accessLogLock() override { return access_log_lock_; } @@ -116,6 +120,8 @@ class HotRestartImpl : public HotRestart { static std::string hotRestartVersion(); private: + uint32_t base_id_; + uint32_t scaled_base_id_; HotRestartingChild as_child_; HotRestartingParent as_parent_; // This pointer is shared memory, and is expected to exist until process end. diff --git a/source/server/hot_restart_nop_impl.h b/source/server/hot_restart_nop_impl.h index 205097649b81d..5e52501855827 100644 --- a/source/server/hot_restart_nop_impl.h +++ b/source/server/hot_restart_nop_impl.h @@ -23,6 +23,7 @@ class HotRestartNopImpl : public Server::HotRestart { void sendParentTerminateRequest() override {} ServerStatsFromParent mergeParentStatsIfAny(Stats::StoreRoot&) override { return {}; } void shutdown() override {} + uint32_t baseId() override { return 0; } std::string version() override { return "disabled"; } Thread::BasicLockable& logLock() override { return log_lock_; } Thread::BasicLockable& accessLogLock() override { return access_log_lock_; } diff --git a/source/server/hot_restarting_base.cc b/source/server/hot_restarting_base.cc index 07cfbd94bfd9a..724dd9e8b31ab 100644 --- a/source/server/hot_restarting_base.cc +++ b/source/server/hot_restarting_base.cc @@ -2,6 +2,7 @@ #include "common/api/os_sys_calls_impl.h" #include "common/common/utility.h" +#include "common/stats/utility.h" namespace Envoy { namespace Server { @@ -10,6 +11,14 @@ using HotRestartMessage = envoy::HotRestartMessage; static constexpr uint64_t MaxSendmsgSize = 4096; +HotRestartingBase::~HotRestartingBase() { + if (my_domain_socket_ != -1) { + Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get(); + Api::SysCallIntResult result = os_sys_calls.close(my_domain_socket_); + ASSERT(result.rc_ == 0); + } +} + void HotRestartingBase::initDomainSocketAddress(sockaddr_un* address) { memset(address, 0, sizeof(*address)); address->sun_family = AF_UNIX; @@ -40,8 +49,13 @@ void HotRestartingBase::bindDomainSocket(uint64_t id, const std::string& role) { Api::SysCallIntResult result = os_sys_calls.bind(my_domain_socket_, reinterpret_cast(&address), sizeof(address)); if (result.rc_ != 0) { - throw EnvoyException( - fmt::format("unable to bind domain socket with id={} (see --base-id option)", id)); + const auto msg = fmt::format( + "unable to bind domain socket with base_id={}, id={}, errno={} (see --base-id option)", + base_id_, id, result.errno_); + if (result.errno_ == SOCKET_ERROR_ADDR_IN_USE) { + throw HotRestartDomainSocketInUseException(msg); + } + throw EnvoyException(msg); } } @@ -173,7 +187,7 @@ std::unique_ptr HotRestartingBase::receiveHotRestartMessage(B message.msg_controllen = CMSG_SPACE(sizeof(int)); const int recvmsg_rc = recvmsg(my_domain_socket_, &message, 0); - if (block == Blocking::No && recvmsg_rc == -1 && errno == EAGAIN) { + if (block == Blocking::No && recvmsg_rc == -1 && errno == SOCKET_ERROR_AGAIN) { return nullptr; } RELEASE_ASSERT(recvmsg_rc != -1, fmt::format("recvmsg() returned -1, errno = {}", errno)); @@ -211,5 +225,21 @@ std::unique_ptr HotRestartingBase::receiveHotRestartMessage(B return ret; } +Stats::Gauge& HotRestartingBase::hotRestartGeneration(Stats::Scope& scope) { + // Track the hot-restart generation. Using gauge's accumulate semantics, + // the increments will be combined across hot-restart. This may be useful + // at some point, though the main motivation for this stat is to enable + // an integration test showing that dynamic stat-names can be coalesced + // across hot-restarts. There's no other reason this particular stat-name + // needs to be created dynamically. + // + // Note also, this stat cannot currently be represented as a counter due to + // the way stats get latched on sink update. See the comment in + // InstanceUtil::flushMetricsToSinks. + return Stats::Utility::gaugeFromElements(scope, + {Stats::DynamicName("server.hot_restart_generation")}, + Stats::Gauge::ImportMode::Accumulate); +} + } // namespace Server } // namespace Envoy diff --git a/source/server/hot_restarting_base.h b/source/server/hot_restarting_base.h index 933c41adf6a73..0e2b5abc4817e 100644 --- a/source/server/hot_restarting_base.h +++ b/source/server/hot_restarting_base.h @@ -11,6 +11,7 @@ #include "envoy/common/platform.h" #include "envoy/server/hot_restart.h" #include "envoy/server/options.h" +#include "envoy/stats/scope.h" #include "common/common/assert.h" @@ -24,6 +25,7 @@ namespace Server { class HotRestartingBase { protected: HotRestartingBase(uint64_t base_id) : base_id_(base_id) {} + ~HotRestartingBase(); void initDomainSocketAddress(sockaddr_un* address); sockaddr_un createDomainSocketAddress(uint64_t id, const std::string& role); @@ -56,6 +58,10 @@ class HotRestartingBase { bool replyIsExpectedType(const envoy::HotRestartMessage* proto, envoy::HotRestartMessage::Reply::ReplyCase oneof_type) const; + // Returns a Gauge that tracks hot-restart generation, where every successive + // child increments this number. + static Stats::Gauge& hotRestartGeneration(Stats::Scope& scope); + private: void getPassedFdIfPresent(envoy::HotRestartMessage* out, msghdr* message); std::unique_ptr parseProtoAndResetState(); diff --git a/source/server/hot_restarting_child.cc b/source/server/hot_restarting_child.cc index f5eb8296c6634..25cb46bcf0fd9 100644 --- a/source/server/hot_restarting_child.cc +++ b/source/server/hot_restarting_child.cc @@ -80,12 +80,17 @@ void HotRestartingChild::sendParentTerminateRequest() { wrapped_request.mutable_request()->mutable_terminate(); sendHotRestartMessage(parent_address_, wrapped_request); parent_terminated_ = true; - // Once setting parent_terminated_ == true, we can send no more hot restart RPCs, and therefore - // receive no more responses, including stats. So, now safe to forget our stat transferral state. + + // Note that the 'generation' counter needs to retain the contribution from + // the parent. + stat_merger_->retainParentGaugeValue(hot_restart_generation_stat_name_); + + // Now it is safe to forget our stat transferral state. // - // This destruction is actually important far beyond memory efficiency. The scope-based temporary - // counter logic relies on the StatMerger getting destroyed once hot restart's stat merging is - // all done. (See stat_merger.h for details). + // This destruction is actually important far beyond memory efficiency. The + // scope-based temporary counter logic relies on the StatMerger getting + // destroyed once hot restart's stat merging is all done. (See stat_merger.h + // for details). stat_merger_.reset(); } @@ -93,6 +98,7 @@ void HotRestartingChild::mergeParentStats(Stats::Store& stats_store, const HotRestartMessage::Reply::Stats& stats_proto) { if (!stat_merger_) { stat_merger_ = std::make_unique(stats_store); + hot_restart_generation_stat_name_ = hotRestartGeneration(stats_store).statName(); } // Convert the protobuf for serialized dynamic spans into the structure diff --git a/source/server/hot_restarting_child.h b/source/server/hot_restarting_child.h index 08c3cc27359f1..0fe656d06d105 100644 --- a/source/server/hot_restarting_child.h +++ b/source/server/hot_restarting_child.h @@ -27,6 +27,7 @@ class HotRestartingChild : HotRestartingBase, Logger::Loggable bool parent_terminated_{}; sockaddr_un parent_address_; std::unique_ptr stat_merger_{}; + Stats::StatName hot_restart_generation_stat_name_; }; } // namespace Server diff --git a/source/server/hot_restarting_parent.cc b/source/server/hot_restarting_parent.cc index 6022b204cf890..5049c8077f911 100644 --- a/source/server/hot_restarting_parent.cc +++ b/source/server/hot_restarting_parent.cc @@ -6,6 +6,7 @@ #include "common/network/utility.h" #include "common/stats/stat_merger.h" #include "common/stats/symbol_table_impl.h" +#include "common/stats/utility.h" #include "server/listener_impl.h" @@ -85,16 +86,8 @@ void HotRestartingParent::onSocketEvent() { void HotRestartingParent::shutdown() { socket_event_.reset(); } HotRestartingParent::Internal::Internal(Server::Instance* server) : server_(server) { - // Track the hot-restart generation. Using gauge's accumulate semantics, - // the increments will be combined across hot-restart. This may be useful - // at some point, though the main motivation for this stat is to enable - // an integration test showing that dynamic stat-names can be coalesced - // across hot-restarts. There's no other reason this particular stat-name - // needs to be created dynamically. - Stats::StatNameDynamicPool pool(server_->stats().symbolTable()); - Stats::Gauge& gauge = server_->stats().gaugeFromStatName( - pool.add("server.hot_restart_generation"), Stats::Gauge::ImportMode::Accumulate); - gauge.inc(); + Stats::Gauge& hot_restart_generation = hotRestartGeneration(server->stats()); + hot_restart_generation.inc(); } HotRestartMessage HotRestartingParent::Internal::shutdownAdmin() { diff --git a/source/server/http/stats_handler.h b/source/server/http/stats_handler.h deleted file mode 100644 index ad4272c1d1931..0000000000000 --- a/source/server/http/stats_handler.h +++ /dev/null @@ -1,115 +0,0 @@ -#pragma once - -#include -#include - -#include "envoy/buffer/buffer.h" -#include "envoy/http/codes.h" -#include "envoy/http/header_map.h" -#include "envoy/server/admin.h" -#include "envoy/server/instance.h" - -#include "common/stats/histogram_impl.h" - -#include "absl/strings/string_view.h" - -namespace Envoy { -namespace Server { - -class StatsHandler { - -public: - static Http::Code handlerResetCounters(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&, - Server::Instance& server); - static Http::Code handlerStatsRecentLookups(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&, - Server::Instance& server); - static Http::Code handlerStatsRecentLookupsClear(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&, - Server::Instance& server); - static Http::Code handlerStatsRecentLookupsDisable(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&, - Server::Instance& server); - static Http::Code handlerStatsRecentLookupsEnable(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&, - Server::Instance& server); - static Http::Code handlerStats(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&, - Server::Instance& server); - static Http::Code handlerPrometheusStats(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&, - Server::Instance& server); - -private: - template - static bool shouldShowMetric(const StatType& metric, const bool used_only, - const absl::optional& regex) { - return ((!used_only || metric.used()) && - (!regex.has_value() || std::regex_search(metric.name(), regex.value()))); - } - - friend class AdminStatsTest; - - static std::string statsAsJson(const std::map& all_stats, - const std::map& text_readouts, - const std::vector& all_histograms, - bool used_only, - const absl::optional regex = absl::nullopt, - bool pretty_print = false); -}; - -/** - * Formatter for metric/labels exported to Prometheus. - * - * See: https://prometheus.io/docs/concepts/data_model - */ -class PrometheusStatsFormatter { -public: - /** - * Extracts counters and gauges and relevant tags, appending them to - * the response buffer after sanitizing the metric / label names. - * @return uint64_t total number of metric types inserted in response. - */ - static uint64_t statsAsPrometheus(const std::vector& counters, - const std::vector& gauges, - const std::vector& histograms, - Buffer::Instance& response, const bool used_only, - const absl::optional& regex); - /** - * Format the given tags, returning a string as a comma-separated list - * of ="" pairs. - */ - static std::string formattedTags(const std::vector& tags); - /** - * Format the given metric name, prefixed with "envoy_". - */ - static std::string metricName(const std::string& extracted_name); - -private: - /** - * Take a string and sanitize it according to Prometheus conventions. - */ - static std::string sanitizeName(const std::string& name); - - /* - * Determine whether a metric has never been emitted and choose to - * not show it if we only wanted used metrics. - */ - template - static bool shouldShowMetric(const StatType& metric, const bool used_only, - const absl::optional& regex) { - return ((!used_only || metric.used()) && - (!regex.has_value() || std::regex_search(metric.name(), regex.value()))); - } -}; - -} // namespace Server -} // namespace Envoy diff --git a/source/server/lds_api.cc b/source/server/lds_api.cc index 373956339a96e..4a1a65ed125be 100644 --- a/source/server/lds_api.cc +++ b/source/server/lds_api.cc @@ -1,12 +1,8 @@ #include "server/lds_api.h" -#include - #include "envoy/admin/v3/config_dump.pb.h" #include "envoy/api/v2/listener.pb.h" #include "envoy/config/core/v3/config_source.pb.h" -#include "envoy/config/listener/v3/listener.pb.h" -#include "envoy/config/listener/v3/listener.pb.validate.h" #include "envoy/config/route/v3/route.pb.h" #include "envoy/service/discovery/v3/discovery.pb.h" #include "envoy/stats/scope.h" @@ -17,6 +13,7 @@ #include "common/config/utility.h" #include "common/protobuf/utility.h" +#include "absl/container/node_hash_set.h" #include "absl/strings/str_join.h" namespace Envoy { @@ -27,27 +24,23 @@ LdsApiImpl::LdsApiImpl(const envoy::config::core::v3::ConfigSource& lds_config, Stats::Scope& scope, ListenerManager& lm, ProtobufMessage::ValidationVisitor& validation_visitor) : Envoy::Config::SubscriptionBase( - lds_config.resource_api_version()), + lds_config.resource_api_version(), validation_visitor, "name"), listener_manager_(lm), scope_(scope.createScope("listener_manager.lds.")), cm_(cm), - init_target_("LDS", [this]() { subscription_->start({}); }), - validation_visitor_(validation_visitor) { + init_target_("LDS", [this]() { subscription_->start({}); }) { const auto resource_name = getResourceName(); subscription_ = cm.subscriptionFactory().subscriptionFromConfigSource( - lds_config, Grpc::Common::typeUrl(resource_name), *scope_, *this); + lds_config, Grpc::Common::typeUrl(resource_name), *scope_, *this, resource_decoder_); init_manager.add(init_target_); } -void LdsApiImpl::onConfigUpdate( - const Protobuf::RepeatedPtrField& added_resources, - const Protobuf::RepeatedPtrField& removed_resources, - const std::string& system_version_info) { - std::unique_ptr maybe_eds_resume; +void LdsApiImpl::onConfigUpdate(const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& system_version_info) { + Config::ScopedResume maybe_resume_rds; if (cm_.adsMux()) { - const auto type_url = Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V2); - cm_.adsMux()->pause(type_url); - maybe_eds_resume = - std::make_unique([this, type_url] { cm_.adsMux()->resume(type_url); }); + const auto type_urls = + Config::getAllVersionTypeUrls(); + maybe_resume_rds = cm_.adsMux()->pause(type_urls); } bool any_applied = false; @@ -63,18 +56,18 @@ void LdsApiImpl::onConfigUpdate( } ListenerManager::FailureStates failure_state; - std::unordered_set listener_names; + absl::node_hash_set listener_names; std::string message; for (const auto& resource : added_resources) { envoy::config::listener::v3::Listener listener; try { - listener = MessageUtil::anyConvertAndValidate( - resource.resource(), validation_visitor_); + listener = + dynamic_cast(resource.get().resource()); if (!listener_names.insert(listener.name()).second) { // NOTE: at this point, the first of these duplicates has already been successfully applied. throw EnvoyException(fmt::format("duplicate listener {} found", listener.name())); } - if (listener_manager_.addOrUpdateListener(listener, resource.version(), true)) { + if (listener_manager_.addOrUpdateListener(listener, resource.get().version(), true)) { ENVOY_LOG(info, "lds: add/update listener '{}'", listener.name()); any_applied = true; } else { @@ -84,7 +77,7 @@ void LdsApiImpl::onConfigUpdate( failure_state.push_back(std::make_unique()); auto& state = failure_state.back(); state->set_details(e.what()); - state->mutable_failed_configuration()->PackFrom(resource); + state->mutable_failed_configuration()->PackFrom(resource.get().resource()); absl::StrAppend(&message, listener.name(), ": ", e.what(), "\n"); } } @@ -99,35 +92,25 @@ void LdsApiImpl::onConfigUpdate( } } -void LdsApiImpl::onConfigUpdate(const Protobuf::RepeatedPtrField& resources, +void LdsApiImpl::onConfigUpdate(const std::vector& resources, const std::string& version_info) { // We need to keep track of which listeners need to remove. // Specifically, it's [listeners we currently have] - [listeners found in the response]. - std::unordered_set listeners_to_remove; - for (const auto& listener : listener_manager_.listeners()) { + absl::node_hash_set listeners_to_remove; + for (const auto& listener : + listener_manager_.listeners(ListenerManager::WARMING | ListenerManager::ACTIVE)) { listeners_to_remove.insert(listener.get().name()); } - - Protobuf::RepeatedPtrField to_add_repeated; - for (const auto& listener_blob : resources) { - // Add this resource to our delta added/updated pile... - envoy::service::discovery::v3::Resource* to_add = to_add_repeated.Add(); - // No validation needed here the overloaded call to onConfigUpdate validates. - const std::string listener_name = - MessageUtil::anyConvert(listener_blob).name(); - to_add->set_name(listener_name); - to_add->set_version(version_info); - to_add->mutable_resource()->MergeFrom(listener_blob); - // ...and remove its name from our delta removed pile. - listeners_to_remove.erase(listener_name); + for (const auto& resource : resources) { + // Remove its name from our delta removed pile. + listeners_to_remove.erase(resource.get().name()); } - // Copy our delta removed pile into the desired format. Protobuf::RepeatedPtrField to_remove_repeated; for (const auto& listener : listeners_to_remove) { *to_remove_repeated.Add() = listener; } - onConfigUpdate(to_add_repeated, to_remove_repeated, version_info); + onConfigUpdate(resources, to_remove_repeated, version_info); } void LdsApiImpl::onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, @@ -139,4 +122,4 @@ void LdsApiImpl::onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason r } } // namespace Server -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/server/lds_api.h b/source/server/lds_api.h index 00a4155636688..0ace5e7b937c2 100644 --- a/source/server/lds_api.h +++ b/source/server/lds_api.h @@ -4,6 +4,7 @@ #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/config/listener/v3/listener.pb.h" +#include "envoy/config/listener/v3/listener.pb.validate.h" #include "envoy/config/subscription.h" #include "envoy/config/subscription_factory.h" #include "envoy/init/manager.h" @@ -34,25 +35,20 @@ class LdsApiImpl : public LdsApi, private: // Config::SubscriptionCallbacks - void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + void onConfigUpdate(const std::vector& resources, const std::string& version_info) override; - void onConfigUpdate( - const Protobuf::RepeatedPtrField& added_resources, - const Protobuf::RepeatedPtrField& removed_resources, - const std::string& system_version_info) override; + void onConfigUpdate(const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& system_version_info) override; void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException* e) override; - std::string resourceName(const ProtobufWkt::Any& resource) override { - return MessageUtil::anyConvert(resource).name(); - } - std::unique_ptr subscription_; + Config::SubscriptionPtr subscription_; std::string system_version_info_; ListenerManager& listener_manager_; Stats::ScopePtr scope_; Upstream::ClusterManager& cm_; Init::TargetImpl init_target_; - ProtobufMessage::ValidationVisitor& validation_visitor_; }; } // namespace Server diff --git a/source/server/listener_impl.cc b/source/server/listener_impl.cc index 149d8d1fba212..f3fe37ade1873 100644 --- a/source/server/listener_impl.cc +++ b/source/server/listener_impl.cc @@ -3,19 +3,25 @@ #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/listener/v3/listener.pb.h" #include "envoy/config/listener/v3/listener_components.pb.h" +#include "envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.pb.h" +#include "envoy/network/exception.h" +#include "envoy/network/udp_packet_writer_config.h" #include "envoy/registry/registry.h" #include "envoy/server/active_udp_listener_config.h" #include "envoy/server/transport_socket_config.h" #include "envoy/stats/scope.h" #include "common/access_log/access_log_impl.h" +#include "common/api/os_sys_calls_impl.h" #include "common/common/assert.h" #include "common/config/utility.h" #include "common/network/connection_balancer_impl.h" #include "common/network/resolver_impl.h" #include "common/network/socket_option_factory.h" +#include "common/network/socket_option_impl.h" #include "common/network/utility.h" #include "common/protobuf/utility.h" +#include "common/runtime/runtime_features.h" #include "server/configuration_impl.h" #include "server/drain_manager_impl.h" @@ -30,9 +36,29 @@ namespace Envoy { namespace Server { +namespace { +bool needTlsInspector(const envoy::config::listener::v3::Listener& config) { + return std::any_of(config.filter_chains().begin(), config.filter_chains().end(), + [](const auto& filter_chain) { + const auto& matcher = filter_chain.filter_chain_match(); + return matcher.transport_protocol() == "tls" || + (matcher.transport_protocol().empty() && + (!matcher.server_names().empty() || + !matcher.application_protocols().empty())); + }) && + !std::any_of( + config.listener_filters().begin(), config.listener_filters().end(), + [](const auto& filter) { + return filter.name() == + Extensions::ListenerFilters::ListenerFilterNames::get().TlsInspector || + filter.name() == "envoy.listener.tls_inspector"; + }); +} +} // namespace + ListenSocketFactoryImpl::ListenSocketFactoryImpl(ListenerComponentFactory& factory, Network::Address::InstanceConstSharedPtr address, - Network::Address::SocketType socket_type, + Network::Socket::Type socket_type, const Network::Socket::OptionsSharedPtr& options, bool bind_to_port, const std::string& listener_name, bool reuse_port) @@ -41,7 +67,7 @@ ListenSocketFactoryImpl::ListenSocketFactoryImpl(ListenerComponentFactory& facto bool create_socket = false; if (local_address_->type() == Network::Address::Type::Ip) { - if (socket_type_ == Network::Address::SocketType::Datagram) { + if (socket_type_ == Network::Socket::Type::Datagram) { ASSERT(reuse_port_ == true); } @@ -85,7 +111,7 @@ Network::SocketSharedPtr ListenSocketFactoryImpl::createListenSocketAndApplyOpti fmt::format("{}: Setting socket options {}", listener_name_, ok ? "succeeded" : "failed"); if (!ok) { ENVOY_LOG(warn, "{}", message); - throw EnvoyException(message); + throw Network::CreateListenerException(message); } else { ENVOY_LOG(debug, "{}", message); } @@ -145,7 +171,7 @@ Http::Context& ListenerFactoryContextBaseImpl::httpContext() { return server_.ht const LocalInfo::LocalInfo& ListenerFactoryContextBaseImpl::localInfo() const { return server_.localInfo(); } -Envoy::Runtime::RandomGenerator& ListenerFactoryContextBaseImpl::random() { +Envoy::Random::RandomGenerator& ListenerFactoryContextBaseImpl::random() { return server_.random(); } Envoy::Runtime::Loader& ListenerFactoryContextBaseImpl::runtime() { return server_.runtime(); } @@ -223,6 +249,11 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, parent.factory_.createDrainManager(config.drain_type()))), filter_chain_manager_(address_, listener_factory_context_->parentFactoryContext(), initManager()), + cx_limit_runtime_key_("envoy.resource_limits.listener." + config_.name() + + ".connection_limit"), + open_connections_(std::make_shared( + std::numeric_limits::max(), listener_factory_context_->runtime(), + cx_limit_runtime_key_)), local_init_watcher_(fmt::format("Listener-local-init-watcher {}", name), [this] { if (workers_started_) { parent_.onListenerWarmed(*this); @@ -232,30 +263,106 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, listener_init_target_.ready(); } }) { - Network::Address::SocketType socket_type = - Network::Utility::protobufAddressSocketType(config.address()); - if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, transparent, false)) { - addListenSocketOptions(Network::SocketOptionFactory::buildIpTransparentOptions()); + + const absl::optional runtime_val = + listener_factory_context_->runtime().snapshot().get(cx_limit_runtime_key_); + if (runtime_val && runtime_val->empty()) { + ENVOY_LOG(warn, + "Listener connection limit runtime key {} is empty. There are currently no " + "limitations on the number of accepted connections for listener {}.", + cx_limit_runtime_key_, config_.name()); } - if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, freebind, false)) { - addListenSocketOptions(Network::SocketOptionFactory::buildIpFreebindOptions()); + + buildAccessLog(); + auto socket_type = Network::Utility::protobufAddressSocketType(config.address()); + buildListenSocketOptions(socket_type); + buildUdpListenerFactory(socket_type, concurrency); + buildUdpWriterFactory(socket_type); + createListenerFilterFactories(socket_type); + validateFilterChains(socket_type); + buildFilterChains(); + if (socket_type == Network::Socket::Type::Datagram) { + return; } - if (config.reuse_port()) { - addListenSocketOptions(Network::SocketOptionFactory::buildReusePortOptions()); - } else if (socket_type == Network::Address::SocketType::Datagram && concurrency > 1) { - ENVOY_LOG(warn, "Listening on UDP without SO_REUSEPORT socket option may result to unstable " - "packet proxying. Consider configuring the reuse_port listener option."); + buildSocketOptions(); + buildOriginalDstListenerFilter(); + buildProxyProtocolListenerFilter(); + buildTlsInspectorListenerFilter(); + if (!workers_started_) { + // Initialize dynamic_init_manager_ from Server's init manager if it's not initialized. + // NOTE: listener_init_target_ should be added to parent's initManager at the end of the + // listener constructor so that this listener's children entities could register their targets + // with their parent's initManager. + parent_.server_.initManager().add(listener_init_target_); } - if (!config.socket_options().empty()) { - addListenSocketOptions( - Network::SocketOptionFactory::buildLiteralOptions(config.socket_options())); +} + +ListenerImpl::ListenerImpl(ListenerImpl& origin, + const envoy::config::listener::v3::Listener& config, + const std::string& version_info, ListenerManagerImpl& parent, + const std::string& name, bool added_via_api, bool workers_started, + uint64_t hash, uint32_t concurrency) + : parent_(parent), address_(origin.address_), + bind_to_port_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.deprecated_v1(), bind_to_port, true)), + hand_off_restored_destination_connections_( + PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, hidden_envoy_deprecated_use_original_dst, false)), + per_connection_buffer_limit_bytes_( + PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, per_connection_buffer_limit_bytes, 1024 * 1024)), + listener_tag_(origin.listener_tag_), name_(name), added_via_api_(added_via_api), + workers_started_(workers_started), hash_(hash), + validation_visitor_( + added_via_api_ ? parent_.server_.messageValidationContext().dynamicValidationVisitor() + : parent_.server_.messageValidationContext().staticValidationVisitor()), + // listener_init_target_ is not used during in place update because we expect server started. + listener_init_target_("", nullptr), + dynamic_init_manager_(std::make_unique( + fmt::format("Listener-local-init-manager {} {}", name, hash))), + config_(config), version_info_(version_info), + listener_filters_timeout_( + PROTOBUF_GET_MS_OR_DEFAULT(config, listener_filters_timeout, 15000)), + continue_on_listener_filters_timeout_(config.continue_on_listener_filters_timeout()), + listener_factory_context_(std::make_shared( + origin.listener_factory_context_->listener_factory_context_base_, this, *this)), + filter_chain_manager_(address_, origin.listener_factory_context_->parentFactoryContext(), + initManager(), origin.filter_chain_manager_), + local_init_watcher_(fmt::format("Listener-local-init-watcher {}", name), [this] { + ASSERT(workers_started_); + parent_.inPlaceFilterChainUpdate(*this); + }) { + buildAccessLog(); + auto socket_type = Network::Utility::protobufAddressSocketType(config.address()); + buildListenSocketOptions(socket_type); + buildUdpListenerFactory(socket_type, concurrency); + buildUdpWriterFactory(socket_type); + createListenerFilterFactories(socket_type); + validateFilterChains(socket_type); + buildFilterChains(); + // In place update is tcp only so it's safe to apply below tcp only initialization. + buildSocketOptions(); + buildOriginalDstListenerFilter(); + buildProxyProtocolListenerFilter(); + buildTlsInspectorListenerFilter(); + open_connections_ = origin.open_connections_; +} + +void ListenerImpl::buildAccessLog() { + for (const auto& access_log : config_.access_log()) { + AccessLog::InstanceSharedPtr current_access_log = + AccessLog::AccessLogFactory::fromProto(access_log, *listener_factory_context_); + access_logs_.push_back(current_access_log); } - if (socket_type == Network::Address::SocketType::Datagram) { - // Needed for recvmsg to return destination address in IP header. - addListenSocketOptions(Network::SocketOptionFactory::buildIpPacketInfoOptions()); - // Needed to return receive buffer overflown indicator. - addListenSocketOptions(Network::SocketOptionFactory::buildRxQueueOverFlowOptions()); - auto udp_config = config.udp_listener_config(); +} + +void ListenerImpl::buildUdpListenerFactory(Network::Socket::Type socket_type, + uint32_t concurrency) { + if (socket_type == Network::Socket::Type::Datagram) { + if (!config_.reuse_port() && concurrency > 1) { + throw EnvoyException("Listening on UDP when concurrency is > 1 without the SO_REUSEPORT " + "socket option results in " + "unstable packet proxying. Configure the reuse_port listener option or " + "set concurrency = 1."); + } + auto udp_config = config_.udp_listener_config(); if (udp_config.udp_listener_name().empty()) { udp_config.set_udp_listener_name(UdpListenerNames::get().RawUdp); } @@ -266,43 +373,91 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, Config::Utility::translateToFactoryConfig(udp_config, validation_visitor_, config_factory); udp_listener_factory_ = config_factory.createActiveUdpListenerFactory(*message, concurrency); } +} + +void ListenerImpl::buildUdpWriterFactory(Network::Socket::Type socket_type) { + if (socket_type == Network::Socket::Type::Datagram) { + auto udp_writer_config = config_.udp_writer_config(); + if (!Api::OsSysCallsSingleton::get().supportsUdpGso() || + udp_writer_config.typed_config().type_url().empty()) { + const std::string default_type_url = + "type.googleapis.com/envoy.config.listener.v3.UdpDefaultWriterOptions"; + udp_writer_config.mutable_typed_config()->set_type_url(default_type_url); + } + auto& config_factory = + Config::Utility::getAndCheckFactory( + udp_writer_config); + ProtobufTypes::MessagePtr message = Config::Utility::translateAnyToFactoryConfig( + udp_writer_config.typed_config(), validation_visitor_, config_factory); + udp_writer_factory_ = config_factory.createUdpPacketWriterFactory(*message); + } +} - if (!config.listener_filters().empty()) { +void ListenerImpl::buildListenSocketOptions(Network::Socket::Type socket_type) { + // The process-wide `signal()` handling may fail to handle SIGPIPE if overridden + // in the process (i.e., on a mobile client). Some OSes support handling it at the socket layer: + if (ENVOY_SOCKET_SO_NOSIGPIPE.hasValue()) { + addListenSocketOptions(Network::SocketOptionFactory::buildSocketNoSigpipeOptions()); + } + if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(config_, transparent, false)) { + addListenSocketOptions(Network::SocketOptionFactory::buildIpTransparentOptions()); + } + if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(config_, freebind, false)) { + addListenSocketOptions(Network::SocketOptionFactory::buildIpFreebindOptions()); + } + if (config_.reuse_port()) { + addListenSocketOptions(Network::SocketOptionFactory::buildReusePortOptions()); + } + if (!config_.socket_options().empty()) { + addListenSocketOptions( + Network::SocketOptionFactory::buildLiteralOptions(config_.socket_options())); + } + if (socket_type == Network::Socket::Type::Datagram) { + // Needed for recvmsg to return destination address in IP header. + addListenSocketOptions(Network::SocketOptionFactory::buildIpPacketInfoOptions()); + // Needed to return receive buffer overflown indicator. + addListenSocketOptions(Network::SocketOptionFactory::buildRxQueueOverFlowOptions()); + // TODO(yugant) : Add a config option for UDP_GRO + if (Api::OsSysCallsSingleton::get().supportsUdpGro()) { + // Needed to receive gso_size option + addListenSocketOptions(Network::SocketOptionFactory::buildUdpGroOptions()); + } + } +} + +void ListenerImpl::createListenerFilterFactories(Network::Socket::Type socket_type) { + if (!config_.listener_filters().empty()) { switch (socket_type) { - case Network::Address::SocketType::Datagram: - if (config.listener_filters().size() > 1) { - // Currently supports only 1 UDP listener - throw EnvoyException( - fmt::format("error adding listener '{}': Only 1 UDP filter per listener supported", - address_->asString())); + case Network::Socket::Type::Datagram: + if (config_.listener_filters().size() > 1) { + // Currently supports only 1 UDP listener filter. + throw EnvoyException(fmt::format( + "error adding listener '{}': Only 1 UDP listener filter per listener supported", + address_->asString())); } udp_listener_filter_factories_ = parent_.factory_.createUdpListenerFilterFactoryList( - config.listener_filters(), *listener_factory_context_); + config_.listener_filters(), *listener_factory_context_); break; - case Network::Address::SocketType::Stream: + case Network::Socket::Type::Stream: listener_filter_factories_ = parent_.factory_.createListenerFilterFactoryList( - config.listener_filters(), *listener_factory_context_); + config_.listener_filters(), *listener_factory_context_); break; default: NOT_REACHED_GCOVR_EXCL_LINE; } } +} - for (const auto& access_log : config.access_log()) { - AccessLog::InstanceSharedPtr current_access_log = - AccessLog::AccessLogFactory::fromProto(access_log, *listener_factory_context_); - access_logs_.push_back(current_access_log); - } - - if (config.filter_chains().empty() && (socket_type == Network::Address::SocketType::Stream || - !udp_listener_factory_->isTransportConnectionless())) { +void ListenerImpl::validateFilterChains(Network::Socket::Type socket_type) { + if (config_.filter_chains().empty() && (socket_type == Network::Socket::Type::Stream || + !udp_listener_factory_->isTransportConnectionless())) { // If we got here, this is a tcp listener or connection-oriented udp listener, so ensure there // is a filter chain specified throw EnvoyException(fmt::format("error adding listener '{}': no filter chains specified", address_->asString())); } else if (udp_listener_factory_ != nullptr && !udp_listener_factory_->isTransportConnectionless()) { - for (auto& filter_chain : config.filter_chains()) { + for (auto& filter_chain : config_.filter_chains()) { // Early fail if any filter chain doesn't have transport socket configured. if (!filter_chain.has_transport_socket()) { throw EnvoyException(fmt::format("error adding listener '{}': no transport socket " @@ -311,7 +466,9 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, } } } +} +void ListenerImpl::buildFilterChains() { Server::Configuration::TransportSocketFactoryContextImpl transport_factory_context( parent_.server_.admin(), parent_.server_.sslContextManager(), listenerScope(), parent_.server_.clusterManager(), parent_.server_.localInfo(), parent_.server_.dispatcher(), @@ -322,28 +479,28 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, // network filter chain update. // TODO(lambdai): create builder from filter_chain_manager to obtain the init manager ListenerFilterChainFactoryBuilder builder(*this, transport_factory_context); - filter_chain_manager_.addFilterChain(config.filter_chains(), builder, filter_chain_manager_); - - if (socket_type == Network::Address::SocketType::Datagram) { - return; - } + filter_chain_manager_.addFilterChain(config_.filter_chains(), builder, filter_chain_manager_); +} +void ListenerImpl::buildSocketOptions() { // TCP specific setup. - if (config.has_connection_balance_config()) { + if (config_.has_connection_balance_config()) { // Currently exact balance is the only supported type and there are no options. - ASSERT(config.connection_balance_config().has_exact_balance()); + ASSERT(config_.connection_balance_config().has_exact_balance()); connection_balancer_ = std::make_unique(); } else { connection_balancer_ = std::make_unique(); } - if (config.has_tcp_fast_open_queue_length()) { + if (config_.has_tcp_fast_open_queue_length()) { addListenSocketOptions(Network::SocketOptionFactory::buildTcpFastOpenOptions( - config.tcp_fast_open_queue_length().value())); + config_.tcp_fast_open_queue_length().value())); } +} +void ListenerImpl::buildOriginalDstListenerFilter() { // Add original dst listener filter if 'use_original_dst' flag is set. - if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, hidden_envoy_deprecated_use_original_dst, false)) { + if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(config_, hidden_envoy_deprecated_use_original_dst, false)) { auto& factory = Config::Utility::getAndCheckFactoryByName( Extensions::ListenerFilters::ListenerFilterNames::get().OriginalDst); @@ -352,36 +509,26 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, Envoy::ProtobufWkt::Empty(), /*listener_filter_matcher=*/nullptr, *listener_factory_context_)); } +} + +void ListenerImpl::buildProxyProtocolListenerFilter() { // Add proxy protocol listener filter if 'use_proxy_proto' flag is set. // TODO(jrajahalme): This is the last listener filter on purpose. When filter chain matching // is implemented, this needs to be run after the filter chain has been // selected. - if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.filter_chains()[0], use_proxy_proto, false)) { + if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(config_.filter_chains()[0], use_proxy_proto, false)) { auto& factory = Config::Utility::getAndCheckFactoryByName( Extensions::ListenerFilters::ListenerFilterNames::get().ProxyProtocol); listener_filter_factories_.push_back(factory.createListenerFilterFactoryFromProto( - Envoy::ProtobufWkt::Empty(), + envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol(), /*listener_filter_matcher=*/nullptr, *listener_factory_context_)); } +} +void ListenerImpl::buildTlsInspectorListenerFilter() { // TODO(zuercher) remove the deprecated TLS inspector name when the deprecated names are removed. - const bool need_tls_inspector = - std::any_of( - config.filter_chains().begin(), config.filter_chains().end(), - [](const auto& filter_chain) { - const auto& matcher = filter_chain.filter_chain_match(); - return matcher.transport_protocol() == "tls" || - (matcher.transport_protocol().empty() && - (!matcher.server_names().empty() || !matcher.application_protocols().empty())); - }) && - !std::any_of( - config.listener_filters().begin(), config.listener_filters().end(), - [](const auto& filter) { - return filter.name() == - Extensions::ListenerFilters::ListenerFilterNames::get().TlsInspector || - filter.name() == "envoy.listener.tls_inspector"; - }); + const bool need_tls_inspector = needTlsInspector(config_); // Automatically inject TLS Inspector if it wasn't configured explicitly and it's needed. if (need_tls_inspector) { const std::string message = @@ -398,14 +545,6 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, Envoy::ProtobufWkt::Empty(), /*listener_filter_matcher=*/nullptr, *listener_factory_context_)); } - - if (!workers_started_) { - // Initialize dynamic_init_manager_ from Server's init manager if it's not initialized. - // NOTE: listener_init_target_ should be added to parent's initManager at the end of the - // listener constructor so that this listener's children entities could register their targets - // with their parent's initManager. - parent_.server_.initManager().add(listener_init_target_); - } } AccessLog::AccessLogManager& PerListenerFactoryContextImpl::accessLogManager() { @@ -432,7 +571,7 @@ Http::Context& PerListenerFactoryContextImpl::httpContext() { const LocalInfo::LocalInfo& PerListenerFactoryContextImpl::localInfo() const { return listener_factory_context_base_->localInfo(); } -Envoy::Runtime::RandomGenerator& PerListenerFactoryContextImpl::random() { +Envoy::Random::RandomGenerator& PerListenerFactoryContextImpl::random() { return listener_factory_context_base_->random(); } Envoy::Runtime::Loader& PerListenerFactoryContextImpl::runtime() { @@ -515,8 +654,8 @@ void ListenerImpl::initialize() { // by resetting the watcher. if (workers_started_) { ENVOY_LOG_MISC(debug, "Initialize listener {} local-init-manager.", name_); - // If workers_started_ is true, dynamic_init_manager_ should be initialized by listener manager - // directly. + // If workers_started_ is true, dynamic_init_manager_ should be initialized by listener + // manager directly. dynamic_init_manager_->initialize(local_init_watcher_); } } @@ -536,5 +675,79 @@ void ListenerImpl::setSocketFactory(const Network::ListenSocketFactorySharedPtr& socket_factory_ = socket_factory; } +bool ListenerImpl::supportUpdateFilterChain(const envoy::config::listener::v3::Listener& config, + bool worker_started) { + if (!Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.listener_in_place_filterchain_update")) { + return false; + } + + // The in place update needs the active listener in worker thread. worker_started guarantees the + // existence of that active listener. + if (!worker_started) { + return false; + } + + // Currently we only support TCP filter chain update. + if (Network::Utility::protobufAddressSocketType(config_.address()) != + Network::Socket::Type::Stream || + Network::Utility::protobufAddressSocketType(config.address()) != + Network::Socket::Type::Stream) { + return false; + } + + // Full listener update currently rejects tcp listener having 0 filter chain. + // In place filter chain update could survive under zero filter chain but we should keep the same + // behavior for now. This also guards the below filter chain access. + if (config.filter_chains_size() == 0) { + return false; + } + + // See buildProxyProtocolListenerFilter(). Full listener update guarantees at least 1 filter chain + // at tcp listener. + if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(config_.filter_chains()[0], use_proxy_proto, false) ^ + PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.filter_chains()[0], use_proxy_proto, false)) { + return false; + } + + // See buildTlsInspectorListenerFilter(). + if (needTlsInspector(config_) ^ needTlsInspector(config)) { + return false; + } + return ListenerMessageUtil::filterChainOnlyChange(config_, config); +} + +ListenerImplPtr +ListenerImpl::newListenerWithFilterChain(const envoy::config::listener::v3::Listener& config, + bool workers_started, uint64_t hash) { + // Use WrapUnique since the constructor is private. + return absl::WrapUnique( + new ListenerImpl(*this, config, version_info_, parent_, name_, added_via_api_, + /* new new workers started state */ workers_started, + /* use new hash */ hash, parent_.server_.options().concurrency())); +} + +void ListenerImpl::diffFilterChain(const ListenerImpl& another_listener, + std::function callback) { + for (const auto& message_and_filter_chain : filter_chain_manager_.filterChainsByMessage()) { + if (another_listener.filter_chain_manager_.filterChainsByMessage().find( + message_and_filter_chain.first) == + another_listener.filter_chain_manager_.filterChainsByMessage().end()) { + // The filter chain exists in `this` listener but not in the listener passed in. + callback(*message_and_filter_chain.second); + } + } +} + +bool ListenerMessageUtil::filterChainOnlyChange(const envoy::config::listener::v3::Listener& lhs, + const envoy::config::listener::v3::Listener& rhs) { + Protobuf::util::MessageDifferencer differencer; + differencer.set_message_field_comparison(Protobuf::util::MessageDifferencer::EQUIVALENT); + differencer.set_repeated_field_comparison(Protobuf::util::MessageDifferencer::AS_SET); + differencer.IgnoreField( + envoy::config::listener::v3::Listener::GetDescriptor()->FindFieldByName("filter_chains")); + return differencer.Compare(lhs, rhs); +} + } // namespace Server } // namespace Envoy diff --git a/source/server/listener_impl.h b/source/server/listener_impl.h index 53e27b02024b9..920f8a24e9b3f 100644 --- a/source/server/listener_impl.h +++ b/source/server/listener_impl.h @@ -13,6 +13,7 @@ #include "envoy/server/listener_manager.h" #include "envoy/stats/scope.h" +#include "common/common/basic_resource_impl.h" #include "common/common/logger.h" #include "common/init/manager_impl.h" #include "common/init/target_impl.h" @@ -24,6 +25,15 @@ namespace Envoy { namespace Server { +class ListenerMessageUtil { +public: + /** + * @return true if listener message lhs and rhs are the same if ignoring filter_chains field. + */ + static bool filterChainOnlyChange(const envoy::config::listener::v3::Listener& lhs, + const envoy::config::listener::v3::Listener& rhs); +}; + class ListenerManagerImpl; class ListenSocketFactoryImpl : public Network::ListenSocketFactory, @@ -31,12 +41,12 @@ class ListenSocketFactoryImpl : public Network::ListenSocketFactory, public: ListenSocketFactoryImpl(ListenerComponentFactory& factory, Network::Address::InstanceConstSharedPtr address, - Network::Address::SocketType socket_type, + Network::Socket::Type socket_type, const Network::Socket::OptionsSharedPtr& options, bool bind_to_port, const std::string& listener_name, bool reuse_port); // Network::ListenSocketFactory - Network::Address::SocketType socketType() const override { return socket_type_; } + Network::Socket::Type socketType() const override { return socket_type_; } const Network::Address::InstanceConstSharedPtr& localAddress() const override { return local_address_; } @@ -64,7 +74,7 @@ class ListenSocketFactoryImpl : public Network::ListenSocketFactory, // Initially, its port number might be 0. Once a socket is created, its port // will be set to the binding port. Network::Address::InstanceConstSharedPtr local_address_; - Network::Address::SocketType socket_type_; + Network::Socket::Type socket_type_; const Network::Socket::OptionsSharedPtr options_; bool bind_to_port_; const std::string& listener_name_; @@ -96,7 +106,7 @@ class ListenerFactoryContextBaseImpl final : public Configuration::FactoryContex Http::Context& httpContext() override; Init::Manager& initManager() override; const LocalInfo::LocalInfo& localInfo() const override; - Envoy::Runtime::RandomGenerator& random() override; + Envoy::Random::RandomGenerator& random() override; Envoy::Runtime::Loader& runtime() override; Stats::Scope& scope() override; Singleton::Manager& singletonManager() override; @@ -162,7 +172,7 @@ class PerListenerFactoryContextImpl : public Configuration::ListenerFactoryConte Http::Context& httpContext() override; Init::Manager& initManager() override; const LocalInfo::LocalInfo& localInfo() const override; - Envoy::Runtime::RandomGenerator& random() override; + Envoy::Random::RandomGenerator& random() override; Envoy::Runtime::Loader& runtime() override; Stats::Scope& scope() override; Singleton::Manager& singletonManager() override; @@ -211,13 +221,35 @@ class ListenerImpl final : public Network::ListenerConfig, * @param workers_started supplies whether the listener is being added before or after workers * have been started. This controls various behavior related to init management. * @param hash supplies the hash to use for duplicate checking. - * @param validation_visitor message validation visitor instance. + * @param concurrency is the number of listeners instances to be created. */ ListenerImpl(const envoy::config::listener::v3::Listener& config, const std::string& version_info, ListenerManagerImpl& parent, const std::string& name, bool added_via_api, bool workers_started, uint64_t hash, uint32_t concurrency); ~ListenerImpl() override; + // TODO(lambdai): Explore using the same ListenerImpl object to execute in place filter chain + // update. + /** + * Execute in place filter chain update. The filter chain update is less expensive than full + * listener update because connections may not need to be drained. + */ + std::unique_ptr + newListenerWithFilterChain(const envoy::config::listener::v3::Listener& config, + bool workers_started, uint64_t hash); + /** + * Determine if in place filter chain update could be executed at this moment. + */ + bool supportUpdateFilterChain(const envoy::config::listener::v3::Listener& config, + bool worker_started); + + /** + * Run the callback on each filter chain that exists in this listener but not in the passed + * listener config. + */ + void diffFilterChain(const ListenerImpl& another_listener, + std::function callback); + /** * Helper functions to determine whether a listener is blocked for update or remove. */ @@ -270,7 +302,12 @@ class ListenerImpl final : public Network::ListenerConfig, Network::ActiveUdpListenerFactory* udpListenerFactory() override { return udp_listener_factory_.get(); } + Network::UdpPacketWriterFactoryOptRef udpPacketWriterFactory() override { + return Network::UdpPacketWriterFactoryOptRef(std::ref(*udp_writer_factory_)); + } Network::ConnectionBalancer& connectionBalancer() override { return *connection_balancer_; } + + ResourceLimit& openConnections() override { return *open_connections_; } const std::vector& accessLogs() const override { return access_logs_; } @@ -296,10 +333,27 @@ class ListenerImpl final : public Network::ListenerConfig, SystemTime last_updated_; private: - void addListenSocketOption(const Network::Socket::OptionConstSharedPtr& option) { - ensureSocketOptions(); - listen_socket_options_->emplace_back(std::move(option)); - } + /** + * Create a new listener from an existing listener and the new config message if the in place + * filter chain update is decided. Should be called only by newListenerWithFilterChain(). + */ + ListenerImpl(ListenerImpl& origin, const envoy::config::listener::v3::Listener& config, + const std::string& version_info, ListenerManagerImpl& parent, + const std::string& name, bool added_via_api, bool workers_started, uint64_t hash, + uint32_t concurrency); + // Helpers for constructor. + void buildAccessLog(); + void buildUdpListenerFactory(Network::Socket::Type socket_type, uint32_t concurrency); + void buildUdpWriterFactory(Network::Socket::Type socket_type); + void buildListenSocketOptions(Network::Socket::Type socket_type); + void createListenerFilterFactories(Network::Socket::Type socket_type); + void validateFilterChains(Network::Socket::Type socket_type); + void buildFilterChains(); + void buildSocketOptions(); + void buildOriginalDstListenerFilter(); + void buildProxyProtocolListenerFilter(); + void buildTlsInspectorListenerFilter(); + void addListenSocketOptions(const Network::Socket::OptionsSharedPtr& options) { ensureSocketOptions(); Network::Socket::appendOptions(listen_socket_options_, options); @@ -336,10 +390,17 @@ class ListenerImpl final : public Network::ListenerConfig, const std::chrono::milliseconds listener_filters_timeout_; const bool continue_on_listener_filters_timeout_; Network::ActiveUdpListenerFactoryPtr udp_listener_factory_; + Network::UdpPacketWriterFactoryPtr udp_writer_factory_; Network::ConnectionBalancerPtr connection_balancer_; std::shared_ptr listener_factory_context_; FilterChainManagerImpl filter_chain_manager_; + // Per-listener connection limits are only specified via runtime. + // + // TODO (tonya11en): Move this functionality into the overload manager. + const std::string cx_limit_runtime_key_; + std::shared_ptr open_connections_; + // This init watcher, if workers_started_ is false, notifies the "parent" listener manager when // listener initialization is complete. // Important: local_init_watcher_ must be the last field in the class to avoid unexpected watcher diff --git a/source/server/listener_manager_impl.cc b/source/server/listener_manager_impl.cc index d6c8de15d828f..a257b69b36e52 100644 --- a/source/server/listener_manager_impl.cc +++ b/source/server/listener_manager_impl.cc @@ -39,11 +39,11 @@ namespace Envoy { namespace Server { namespace { -std::string toString(Network::Address::SocketType socket_type) { +std::string toString(Network::Socket::Type socket_type) { switch (socket_type) { - case Network::Address::SocketType::Stream: + case Network::Socket::Type::Stream: return "SocketType::Stream"; - case Network::Address::SocketType::Datagram: + case Network::Socket::Type::Datagram: return "SocketType::Datagram"; } NOT_REACHED_GCOVR_EXCL_LINE; @@ -188,17 +188,17 @@ Network::ListenerFilterMatcherSharedPtr ProdListenerComponentFactory::createList } Network::SocketSharedPtr ProdListenerComponentFactory::createListenSocket( - Network::Address::InstanceConstSharedPtr address, Network::Address::SocketType socket_type, + Network::Address::InstanceConstSharedPtr address, Network::Socket::Type socket_type, const Network::Socket::OptionsSharedPtr& options, const ListenSocketCreationParams& params) { ASSERT(address->type() == Network::Address::Type::Ip || address->type() == Network::Address::Type::Pipe); - ASSERT(socket_type == Network::Address::SocketType::Stream || - socket_type == Network::Address::SocketType::Datagram); + ASSERT(socket_type == Network::Socket::Type::Stream || + socket_type == Network::Socket::Type::Datagram); // For each listener config we share a single socket among all threaded listeners. // First we try to get the socket from our parent if applicable. if (address->type() == Network::Address::Type::Pipe) { - if (socket_type != Network::Address::SocketType::Stream) { + if (socket_type != Network::Socket::Type::Stream) { // This could be implemented in the future, since Unix domain sockets // support SOCK_DGRAM, but there would need to be a way to specify it in // envoy.api.v2.core.Pipe. @@ -215,7 +215,7 @@ Network::SocketSharedPtr ProdListenerComponentFactory::createListenSocket( return std::make_shared(address); } - const std::string scheme = (socket_type == Network::Address::SocketType::Stream) + const std::string scheme = (socket_type == Network::Socket::Type::Stream) ? std::string(Network::Utility::TCP_SCHEME) : std::string(Network::Utility::UDP_SCHEME); const std::string addr = absl::StrCat(scheme, address->asString()); @@ -225,7 +225,7 @@ Network::SocketSharedPtr ProdListenerComponentFactory::createListenSocket( if (fd != -1) { ENVOY_LOG(debug, "obtained socket for address {} from parent", addr); Network::IoHandlePtr io_handle = std::make_unique(fd); - if (socket_type == Network::Address::SocketType::Stream) { + if (socket_type == Network::Socket::Type::Stream) { return std::make_shared(std::move(io_handle), address, options); } else { return std::make_shared(std::move(io_handle), address, options); @@ -233,7 +233,7 @@ Network::SocketSharedPtr ProdListenerComponentFactory::createListenSocket( } } - if (socket_type == Network::Address::SocketType::Stream) { + if (socket_type == Network::Socket::Type::Stream) { return std::make_shared(address, options, params.bind_to_port); } else { return std::make_shared(address, options, params.bind_to_port); @@ -312,11 +312,11 @@ ProtobufTypes::MessagePtr ListenerManagerImpl::dumpListenerConfigs() { fillState(*dump_listener, *listener); } - for (const auto& state_and_name : error_state_tracker_) { + for (const auto& [error_name, error_state] : error_state_tracker_) { DynamicListener* dynamic_listener = - getOrCreateDynamicListener(state_and_name.first, *config_dump, listener_map); + getOrCreateDynamicListener(error_name, *config_dump, listener_map); - const envoy::admin::v3::UpdateFailureState& state = *state_and_name.second; + const envoy::admin::v3::UpdateFailureState& state = *error_state; dynamic_listener->mutable_error_state()->CopyFrom(state); } @@ -403,22 +403,46 @@ bool ListenerManagerImpl::addOrUpdateListenerInternal( return false; } - ListenerImplPtr new_listener(new ListenerImpl(config, version_info, *this, name, added_via_api, - workers_started_, hash, - server_.options().concurrency())); + ListenerImplPtr new_listener = nullptr; + + // In place filter chain update depends on the active listener at worker. + if (existing_active_listener != active_listeners_.end() && + (*existing_active_listener)->supportUpdateFilterChain(config, workers_started_)) { + ENVOY_LOG(debug, "use in place update filter chain update path for listener name={} hash={}", + name, hash); + new_listener = + (*existing_active_listener)->newListenerWithFilterChain(config, workers_started_, hash); + stats_.listener_in_place_updated_.inc(); + } else { + ENVOY_LOG(debug, "use full listener update path for listener name={} hash={}", name, hash); + new_listener = + std::make_unique(config, version_info, *this, name, added_via_api, + workers_started_, hash, server_.options().concurrency()); + } + ListenerImpl& new_listener_ref = *new_listener; // We mandate that a listener with the same name must have the same configured address. This // avoids confusion during updates and allows us to use the same bound address. Note that in // the case of port 0 binding, the new listener will implicitly use the same bound port from // the existing listener. - if ((existing_warming_listener != warming_listeners_.end() && - *(*existing_warming_listener)->address() != *new_listener->address()) || - (existing_active_listener != active_listeners_.end() && - *(*existing_active_listener)->address() != *new_listener->address())) { - const std::string message = fmt::format( - "error updating listener: '{}' has a different address '{}' from existing listener", name, - new_listener->address()->asString()); + bool active_listener_exists = false; + bool warming_listener_exists = false; + if (existing_warming_listener != warming_listeners_.end() && + *(*existing_warming_listener)->address() != *new_listener->address()) { + warming_listener_exists = true; + } + if (existing_active_listener != active_listeners_.end() && + *(*existing_active_listener)->address() != *new_listener->address()) { + active_listener_exists = true; + } + if (active_listener_exists || warming_listener_exists) { + const std::string message = + fmt::format("error updating listener: '{}' has a different address '{}' from existing " + "listener address '{}'", + name, new_listener->address()->asString(), + warming_listener_exists ? (*existing_warming_listener)->address()->asString() + : (*existing_active_listener)->address()->asString()); ENVOY_LOG(warn, "{}", message); throw EnvoyException(message); } @@ -460,7 +484,7 @@ bool ListenerManagerImpl::addOrUpdateListenerInternal( // We have no warming or active listener so we need to make a new one. What we do depends on // whether workers have been started or not. Additionally, search through draining listeners // to see if there is a listener that has a socket factory for the same address we are - // configured for and doesn't not use SO_REUSEPORT. This is an edge case, but may happen if a + // configured for and doesn't use SO_REUSEPORT. This is an edge case, but may happen if a // listener is removed and then added back with a same or different name and intended to listen // on the same address. This should work and not fail. Network::ListenSocketFactorySharedPtr draining_listen_socket_factory; @@ -477,13 +501,13 @@ bool ListenerManagerImpl::addOrUpdateListenerInternal( draining_listen_socket_factory = existing_draining_listener->listener_->getSocketFactory(); } - Network::Address::SocketType socket_type = + Network::Socket::Type socket_type = Network::Utility::protobufAddressSocketType(config.address()); new_listener->setSocketFactory( draining_listen_socket_factory ? draining_listen_socket_factory : createListenSocketFactory(config.address(), *new_listener, - (socket_type == Network::Address::SocketType::Datagram) || + (socket_type == Network::Socket::Type::Datagram) || config.reuse_port())); if (workers_started_) { new_listener->debugLog("add warming listener"); @@ -579,14 +603,14 @@ void ListenerManagerImpl::drainListener(ListenerImplPtr&& listener) { server_.dispatcher().post([this, draining_it]() -> void { // TODO(lambdai): Resolve race condition below. // Consider the below events in global sequence order - // master thread: calling drainListener + // main thread: calling drainListener // work thread: deferred delete the active connection - // work thread: post to master that the drain is done - // master thread: erase the listener + // work thread: post to main that the drain is done + // main thread: erase the listener // worker thread: execute destroying connection when the shared listener config is // destroyed at step 4 (could be worse such as access the connection because connection is // not yet started to deleted). The race condition is introduced because 3 occurs too - // early. My solution is to defer schedule the callback posting to master thread, by + // early. My solution is to defer schedule the callback posting to main thread, by // introducing DeferTaskUtil. So that 5 should always happen before 3. if (--draining_it->workers_pending_removal_ == 0) { draining_it->listener_->debugLog("draining listener removal complete"); @@ -615,11 +639,30 @@ ListenerManagerImpl::getListenerByName(ListenerList& listeners, const std::strin return ret; } -std::vector> ListenerManagerImpl::listeners() { +std::vector> +ListenerManagerImpl::listeners(ListenerState state) { std::vector> ret; - ret.reserve(active_listeners_.size()); - for (const auto& listener : active_listeners_) { - ret.push_back(*listener); + + size_t size = 0; + size += state & WARMING ? warming_listeners_.size() : 0; + size += state & ACTIVE ? active_listeners_.size() : 0; + size += state & DRAINING ? draining_listeners_.size() : 0; + ret.reserve(size); + + if (state & WARMING) { + for (const auto& listener : warming_listeners_) { + ret.push_back(*listener); + } + } + if (state & ACTIVE) { + for (const auto& listener : active_listeners_) { + ret.push_back(*listener); + } + } + if (state & DRAINING) { + for (const auto& draining_listener : draining_listeners_) { + ret.push_back(*(draining_listener.listener_)); + } } return ret; } @@ -645,19 +688,22 @@ void ListenerManagerImpl::addListenerToWorker(Worker& worker, // The add listener completion runs on the worker thread. Post back to the main thread to // avoid locking. server_.dispatcher().post([this, success, &listener, completion_callback]() -> void { - // It is theoretically possible for a listener to get added on 1 worker but not the - // others. The below check with onListenerCreateFailure() is there to ensure we execute - // the removal/logging/stats at most once on failure. Note also that drain/removal can - // race with addition. It's guaranteed that workers process remove after add so this - // should be fine. + // It is possible for a listener to get added on 1 worker but not the others. The below + // check with onListenerCreateFailure() is there to ensure we execute the + // removal/logging/stats at most once on failure. Note also that drain/removal can race + // with addition. It's guaranteed that workers process remove after add so this should be + // fine. + // + // TODO(mattklein123): We should consider rewriting how listener sockets are added to + // workers, especially in the case of reuse port. If we were to create all needed + // listener sockets on the main thread (even in the case of reuse port) we could catch + // almost all socket errors here. This would both greatly simplify the logic and allow + // for xDS NACK in most cases. if (!success && !listener.onListenerCreateFailure()) { - // TODO(mattklein123): In addition to a critical log and a stat, we should consider - // adding a startup option here to cause the server to exit. I think we probably want - // this at Lyft but I will do it in a follow up. - ENVOY_LOG(critical, "listener '{}' failed to listen on address '{}' on worker", + ENVOY_LOG(error, "listener '{}' failed to listen on address '{}' on worker", listener.name(), listener.listenSocketFactory().localAddress()->asString()); stats_.listener_create_failure_.inc(); - removeListener(listener.name()); + removeListenerInternal(listener.name(), false); } if (success) { stats_.listener_create_success_.inc(); @@ -694,20 +740,47 @@ void ListenerManagerImpl::onListenerWarmed(ListenerImpl& listener) { updateWarmingActiveGauges(); } -void ListenerManagerImpl::drainFilterChains(ListenerImplPtr&& listener) { +void ListenerManagerImpl::inPlaceFilterChainUpdate(ListenerImpl& listener) { + auto existing_active_listener = getListenerByName(active_listeners_, listener.name()); + auto existing_warming_listener = getListenerByName(warming_listeners_, listener.name()); + ASSERT(existing_warming_listener != warming_listeners_.end()); + ASSERT(*existing_warming_listener != nullptr); + + (*existing_warming_listener)->debugLog("execute in place filter chain update"); + + // Now that in place filter chain update was decided, the replaced listener must be in active + // list. It requires stop/remove listener procedure cancelling the in placed update if any. + ASSERT(existing_active_listener != active_listeners_.end()); + ASSERT(*existing_active_listener != nullptr); + + for (const auto& worker : workers_) { + // Explicitly override the existing listener with a new listener config. + addListenerToWorker(*worker, listener.listenerTag(), listener, nullptr); + } + + auto previous_listener = std::move(*existing_active_listener); + *existing_active_listener = std::move(*existing_warming_listener); + // Finish active_listeners_ transformation before calling `drainFilterChains` as it depends on + // their state. + drainFilterChains(std::move(previous_listener), **existing_active_listener); + + warming_listeners_.erase(existing_warming_listener); + updateWarmingActiveGauges(); +} + +void ListenerManagerImpl::drainFilterChains(ListenerImplPtr&& draining_listener, + ListenerImpl& new_listener) { // First add the listener to the draining list. std::list::iterator draining_group = draining_filter_chains_manager_.emplace(draining_filter_chains_manager_.begin(), - std::move(listener), workers_.size()); - int filter_chain_size = draining_group->getDrainingFilterChains().size(); - - // Using set() avoids a multiple modifiers problem during the multiple processes phase of hot - // restart. Same below inside the lambda. - // TODO(lambdai): Currently the number of DrainFilterChains objects are tracked: - // len(filter_chains). What we really need is accumulate(filter_chains, filter_chains: - // len(filter_chains)) - stats_.total_filter_chains_draining_.set(draining_filter_chains_manager_.size()); - + std::move(draining_listener), workers_.size()); + draining_group->getDrainingListener().diffFilterChain( + new_listener, [&draining_group](Network::DrainableFilterChain& filter_chain) mutable { + filter_chain.startDraining(); + draining_group->addFilterChainToDrain(filter_chain); + }); + auto filter_chain_size = draining_group->numDrainingFilterChains(); + stats_.total_filter_chains_draining_.add(filter_chain_size); draining_group->getDrainingListener().debugLog( absl::StrCat("draining ", filter_chain_size, " filter chains in listener ", draining_group->getDrainingListener().name())); @@ -733,15 +806,14 @@ void ListenerManagerImpl::drainFilterChains(ListenerImplPtr&& listener) { draining_group->getDrainingListener().debugLog( absl::StrCat("draining filter chains from listener ", draining_group->getDrainingListener().name(), " complete")); + stats_.total_filter_chains_draining_.sub( + draining_group->numDrainingFilterChains()); draining_filter_chains_manager_.erase(draining_group); - stats_.total_filter_chains_draining_.set( - draining_filter_chains_manager_.size()); } }); }); } }); - updateWarmingActiveGauges(); } @@ -755,14 +827,19 @@ uint64_t ListenerManagerImpl::numConnections() const { } bool ListenerManagerImpl::removeListener(const std::string& name) { + return removeListenerInternal(name, true); +} + +bool ListenerManagerImpl::removeListenerInternal(const std::string& name, + bool dynamic_listeners_only) { ENVOY_LOG(debug, "begin remove listener: name={}", name); auto existing_active_listener = getListenerByName(active_listeners_, name); auto existing_warming_listener = getListenerByName(warming_listeners_, name); if ((existing_warming_listener == warming_listeners_.end() || - (*existing_warming_listener)->blockRemove()) && + (dynamic_listeners_only && (*existing_warming_listener)->blockRemove())) && (existing_active_listener == active_listeners_.end() || - (*existing_active_listener)->blockRemove())) { + (dynamic_listeners_only && (*existing_active_listener)->blockRemove()))) { ENVOY_LOG(debug, "unknown/locked listener '{}'. no remove", name); return false; } @@ -896,18 +973,16 @@ ListenerFilterChainFactoryBuilder::ListenerFilterChainFactoryBuilder( : validator_(validator), listener_component_factory_(listener_component_factory), factory_context_(factory_context) {} -std::shared_ptr ListenerFilterChainFactoryBuilder::buildFilterChain( +Network::DrainableFilterChainSharedPtr ListenerFilterChainFactoryBuilder::buildFilterChain( const envoy::config::listener::v3::FilterChain& filter_chain, FilterChainFactoryContextCreator& context_creator) const { return buildFilterChainInternal(filter_chain, context_creator.createFilterChainFactoryContext(&filter_chain)); } -std::shared_ptr -ListenerFilterChainFactoryBuilder::buildFilterChainInternal( +Network::DrainableFilterChainSharedPtr ListenerFilterChainFactoryBuilder::buildFilterChainInternal( const envoy::config::listener::v3::FilterChain& filter_chain, - std::unique_ptr&& filter_chain_factory_context) - const { + Configuration::FilterChainFactoryContextPtr&& filter_chain_factory_context) const { // If the cluster doesn't have transport socket configured, then use the default "raw_buffer" // transport socket or BoringSSL-based "tls" transport socket if TLS settings are configured. // We copy by value first then override if necessary. @@ -943,8 +1018,7 @@ ListenerFilterChainFactoryBuilder::buildFilterChainInternal( Network::ListenSocketFactorySharedPtr ListenerManagerImpl::createListenSocketFactory( const envoy::config::core::v3::Address& proto_address, ListenerImpl& listener, bool reuse_port) { - Network::Address::SocketType socket_type = - Network::Utility::protobufAddressSocketType(proto_address); + Network::Socket::Type socket_type = Network::Utility::protobufAddressSocketType(proto_address); return std::make_shared( factory_, listener.address(), socket_type, listener.listenSocketOptions(), listener.bindToPort(), listener.name(), reuse_port); diff --git a/source/server/listener_manager_impl.h b/source/server/listener_manager_impl.h index cfa2603322c74..106e1d629dc90 100644 --- a/source/server/listener_manager_impl.h +++ b/source/server/listener_manager_impl.h @@ -89,7 +89,7 @@ class ProdListenerComponentFactory : public ListenerComponentFactory, } Network::SocketSharedPtr createListenSocket(Network::Address::InstanceConstSharedPtr address, - Network::Address::SocketType socket_type, + Network::Socket::Type socket_type, const Network::Socket::OptionsSharedPtr& options, const ListenSocketCreationParams& params) override; @@ -112,6 +112,7 @@ using ListenerImplPtr = std::unique_ptr; COUNTER(listener_added) \ COUNTER(listener_create_failure) \ COUNTER(listener_create_success) \ + COUNTER(listener_in_place_updated) \ COUNTER(listener_modified) \ COUNTER(listener_removed) \ COUNTER(listener_stopped) \ @@ -152,6 +153,12 @@ class DrainingFilterChainsManager { drain_timer_->enableTimer(drain_time); } + void addFilterChainToDrain(const Network::FilterChain& filter_chain) { + draining_filter_chains_.push_back(&filter_chain); + } + + uint32_t numDrainingFilterChains() const { return draining_filter_chains_.size(); } + private: ListenerImplPtr draining_listener_; std::list draining_filter_chains_; @@ -170,6 +177,7 @@ class ListenerManagerImpl : public ListenerManager, Logger::Loggable> listeners() override; + std::vector> + listeners(ListenerState state = ListenerState::ACTIVE) override; uint64_t numConnections() const override; bool removeListener(const std::string& listener_name) override; void startWorkers(GuardDog& guard_dog) override; @@ -189,27 +198,6 @@ class ListenerManagerImpl : public ListenerManager, Logger::Loggable overridden_listener, - ListenerImpl& listener, - std::function completion_callback) { - addListenerToWorker(worker, overridden_listener, listener, completion_callback); - } - // Erase the the listener draining filter chain from active listeners and then start the drain - // sequence. - void drainFilterChainsForTest(ListenerImpl* listener_raw_ptr) { - auto iter = std::find_if(active_listeners_.begin(), active_listeners_.end(), - [listener_raw_ptr](const ListenerImplPtr& ptr) { - return ptr != nullptr && ptr.get() == listener_raw_ptr; - }); - ASSERT(iter != active_listeners_.end()); - - ListenerImplPtr listener_impl_ptr = std::move(*iter); - active_listeners_.erase(iter); - drainFilterChains(std::move(listener_impl_ptr)); - } - Instance& server_; ListenerComponentFactory& factory_; @@ -223,6 +211,8 @@ class ListenerManagerImpl : public ListenerManager, Logger::Loggable + Network::DrainableFilterChainSharedPtr buildFilterChain(const envoy::config::listener::v3::FilterChain& filter_chain, FilterChainFactoryContextCreator& context_creator) const override; private: - std::shared_ptr - buildFilterChainInternal(const envoy::config::listener::v3::FilterChain& filter_chain, - std::unique_ptr&& - filter_chain_factory_context) const; + Network::DrainableFilterChainSharedPtr buildFilterChainInternal( + const envoy::config::listener::v3::FilterChain& filter_chain, + Configuration::FilterChainFactoryContextPtr&& filter_chain_factory_context) const; ProtobufMessage::ValidationVisitor& validator_; ListenerComponentFactory& listener_component_factory_; diff --git a/source/server/options_impl.cc b/source/server/options_impl.cc index e2ca56f39e26b..fac2d8ae32c18 100644 --- a/source/server/options_impl.cc +++ b/source/server/options_impl.cc @@ -10,8 +10,8 @@ #include "common/common/fmt.h" #include "common/common/logger.h" #include "common/common/macros.h" -#include "common/common/version.h" #include "common/protobuf/utility.h" +#include "common/version/version.h" #include "server/options_impl_platform.h" @@ -59,6 +59,13 @@ OptionsImpl::OptionsImpl(std::vector args, TCLAP::ValueArg base_id( "", "base-id", "base ID so that multiple envoys can run on the same host if needed", false, 0, "uint32_t", cmd); + TCLAP::SwitchArg use_dynamic_base_id( + "", "use-dynamic-base-id", + "the server chooses a base ID dynamically. Supersedes a static base ID. May not be used " + "when the restart epoch is non-zero.", + cmd, false); + TCLAP::ValueArg base_id_path( + "", "base-id-path", "path to which the base ID is written", false, "", "string", cmd); TCLAP::ValueArg concurrency("", "concurrency", "# of worker threads to run", false, std::thread::hardware_concurrency(), "uint32_t", cmd); TCLAP::ValueArg config_path("c", "config-path", "Path to configuration file", false, @@ -66,6 +73,11 @@ OptionsImpl::OptionsImpl(std::vector args, TCLAP::ValueArg config_yaml( "", "config-yaml", "Inline YAML configuration, merges with the contents of --config-path", false, "", "string", cmd); + TCLAP::ValueArg bootstrap_version( + "", "bootstrap-version", + "API version to parse the bootstrap config as (e.g. 3). If " + "unset, all known versions will be attempted", + false, 0, "string", cmd); TCLAP::SwitchArg allow_unknown_fields("", "allow-unknown-fields", "allow unknown fields in static configuration (DEPRECATED)", @@ -76,6 +88,9 @@ OptionsImpl::OptionsImpl(std::vector args, TCLAP::SwitchArg reject_unknown_dynamic_fields("", "reject-unknown-dynamic-fields", "reject unknown fields in dynamic configuration", cmd, false); + TCLAP::SwitchArg ignore_unknown_dynamic_fields("", "ignore-unknown-dynamic-fields", + "ignore unknown fields in dynamic configuration", + cmd, false); TCLAP::ValueArg admin_address_path("", "admin-address-path", "Admin address path", false, "", "string", cmd); @@ -97,7 +112,7 @@ OptionsImpl::OptionsImpl(std::vector args, "", "log-format-prefix-with-location", "Prefix all occurrences of '%v' in log format with with '[%g:%#] ' ('[path/to/file.cc:99] " "').", - false, true, "bool", cmd); + false, false, "bool", cmd); TCLAP::ValueArg log_path("", "log-path", "Path to logfile", false, "", "string", cmd); TCLAP::ValueArg restart_epoch("", "restart-epoch", "hot restart epoch #", false, 0, @@ -116,6 +131,10 @@ OptionsImpl::OptionsImpl(std::vector args, TCLAP::ValueArg drain_time_s("", "drain-time-s", "Hot restart and LDS removal drain time in seconds", false, 600, "uint32_t", cmd); + TCLAP::ValueArg drain_strategy( + "", "drain-strategy", + "Hot restart drain sequence behaviour, one of 'gradual' (default) or 'immediate'.", false, + "gradual", "string", cmd); TCLAP::ValueArg parent_shutdown_time_s("", "parent-shutdown-time-s", "Hot restart parent shutdown time in seconds", false, 900, "uint32_t", cmd); @@ -123,12 +142,6 @@ OptionsImpl::OptionsImpl(std::vector args, "One of 'serve' (default; validate configs and then serve " "traffic normally) or 'validate' (validate configs and exit).", false, "serve", "string", cmd); - TCLAP::ValueArg max_stats("", "max-stats", - "Deprecated and unused; please do not specify.", false, 123, - "uint64_t", cmd); - TCLAP::ValueArg max_obj_name_len("", "max-obj-name-len", - "Deprecated and unused; please do not specify.", false, - 123, "uint64_t", cmd); TCLAP::SwitchArg disable_hot_restart("", "disable-hot-restart", "Disable hot restart functionality", cmd, false); TCLAP::SwitchArg enable_mutex_tracing( @@ -137,7 +150,7 @@ OptionsImpl::OptionsImpl(std::vector args, "", "cpuset-threads", "Get the default # of worker threads from cpuset size", cmd, false); TCLAP::ValueArg use_fake_symbol_table("", "use-fake-symbol-table", - "Use fake symbol table implementation", false, true, + "Use fake symbol table implementation", false, false, "bool", cmd); TCLAP::ValueArg disable_extensions("", "disable-extensions", @@ -201,9 +214,16 @@ OptionsImpl::OptionsImpl(std::vector args, fmt::format("error: unknown IP address version '{}'", local_address_ip_version.getValue()); throw MalformedArgvException(message); } + base_id_ = base_id.getValue(); + use_dynamic_base_id_ = use_dynamic_base_id.getValue(); + base_id_path_ = base_id_path.getValue(); + restart_epoch_ = restart_epoch.getValue(); - // For base ID, scale what the user inputs by 10 so that we have spread for domain sockets. - base_id_ = base_id.getValue() * 10; + if (use_dynamic_base_id_ && restart_epoch_ > 0) { + const std::string message = fmt::format( + "error: cannot use --restart-epoch={} with --use-dynamic-base-id", restart_epoch_); + throw MalformedArgvException(message); + } if (!concurrency.isSet() && cpuset_threads_) { // The 'concurrency' command line option wasn't set but the 'cpuset-threads' @@ -220,6 +240,9 @@ OptionsImpl::OptionsImpl(std::vector args, config_path_ = config_path.getValue(); config_yaml_ = config_yaml.getValue(); + if (bootstrap_version.getValue() != 0) { + bootstrap_version_ = bootstrap_version.getValue(); + } if (allow_unknown_fields.getValue()) { ENVOY_LOG(warn, "--allow-unknown-fields is deprecated, use --allow-unknown-static-fields instead."); @@ -227,9 +250,9 @@ OptionsImpl::OptionsImpl(std::vector args, allow_unknown_static_fields_ = allow_unknown_static_fields.getValue() || allow_unknown_fields.getValue(); reject_unknown_dynamic_fields_ = reject_unknown_dynamic_fields.getValue(); + ignore_unknown_dynamic_fields_ = ignore_unknown_dynamic_fields.getValue(); admin_address_path_ = admin_address_path.getValue(); log_path_ = log_path.getValue(); - restart_epoch_ = restart_epoch.getValue(); service_cluster_ = service_cluster.getValue(); service_node_ = service_node.getValue(); service_zone_ = service_zone.getValue(); @@ -237,6 +260,15 @@ OptionsImpl::OptionsImpl(std::vector args, drain_time_ = std::chrono::seconds(drain_time_s.getValue()); parent_shutdown_time_ = std::chrono::seconds(parent_shutdown_time_s.getValue()); + if (drain_strategy.getValue() == "immediate") { + drain_strategy_ = Server::DrainStrategy::Immediate; + } else if (drain_strategy.getValue() == "gradual") { + drain_strategy_ = Server::DrainStrategy::Gradual; + } else { + throw MalformedArgvException( + fmt::format("error: unknown drain-strategy '{}'", mode.getValue())); + } + if (hot_restart_version_option.getValue()) { std::cerr << hot_restart_version_cb(!hot_restart_disabled_); throw NoServingException(); @@ -308,11 +340,14 @@ Server::CommandLineOptionsPtr OptionsImpl::toCommandLineOptions() const { Server::CommandLineOptionsPtr command_line_options = std::make_unique(); command_line_options->set_base_id(baseId()); + command_line_options->set_use_dynamic_base_id(useDynamicBaseId()); + command_line_options->set_base_id_path(baseIdPath()); command_line_options->set_concurrency(concurrency()); command_line_options->set_config_path(configPath()); command_line_options->set_config_yaml(configYaml()); command_line_options->set_allow_unknown_static_fields(allow_unknown_static_fields_); command_line_options->set_reject_unknown_dynamic_fields(reject_unknown_dynamic_fields_); + command_line_options->set_ignore_unknown_dynamic_fields(ignore_unknown_dynamic_fields_); command_line_options->set_admin_address_path(adminAddressPath()); command_line_options->set_component_log_level(component_log_level_str_); command_line_options->set_log_level(spdlog::level::to_string_view(logLevel()).data(), @@ -337,10 +372,15 @@ Server::CommandLineOptionsPtr OptionsImpl::toCommandLineOptions() const { } command_line_options->mutable_file_flush_interval()->MergeFrom( Protobuf::util::TimeUtil::MillisecondsToDuration(fileFlushIntervalMsec().count())); - command_line_options->mutable_parent_shutdown_time()->MergeFrom( - Protobuf::util::TimeUtil::SecondsToDuration(parentShutdownTime().count())); + command_line_options->mutable_drain_time()->MergeFrom( Protobuf::util::TimeUtil::SecondsToDuration(drainTime().count())); + command_line_options->set_drain_strategy(drainStrategy() == Server::DrainStrategy::Immediate + ? envoy::admin::v3::CommandLineOptions::Immediate + : envoy::admin::v3::CommandLineOptions::Gradual); + command_line_options->mutable_parent_shutdown_time()->MergeFrom( + Protobuf::util::TimeUtil::SecondsToDuration(parentShutdownTime().count())); + command_line_options->set_disable_hot_restart(hotRestartDisabled()); command_line_options->set_enable_mutex_tracing(mutexTracingEnabled()); command_line_options->set_cpuset_threads(cpusetThreadsEnabled()); @@ -353,14 +393,15 @@ Server::CommandLineOptionsPtr OptionsImpl::toCommandLineOptions() const { OptionsImpl::OptionsImpl(const std::string& service_cluster, const std::string& service_node, const std::string& service_zone, spdlog::level::level_enum log_level) - : base_id_(0u), concurrency_(1u), config_path_(""), config_yaml_(""), + : base_id_(0u), use_dynamic_base_id_(false), base_id_path_(""), concurrency_(1u), + config_path_(""), config_yaml_(""), local_address_ip_version_(Network::Address::IpVersion::v4), log_level_(log_level), log_format_(Logger::Logger::DEFAULT_LOG_FORMAT), log_format_escaped_(false), restart_epoch_(0u), service_cluster_(service_cluster), service_node_(service_node), service_zone_(service_zone), file_flush_interval_msec_(10000), drain_time_(600), - parent_shutdown_time_(900), mode_(Server::Mode::Serve), hot_restart_disabled_(false), - signal_handling_enabled_(true), mutex_tracing_enabled_(false), cpuset_threads_(false), - fake_symbol_table_enabled_(false) {} + parent_shutdown_time_(900), drain_strategy_(Server::DrainStrategy::Gradual), + mode_(Server::Mode::Serve), hot_restart_disabled_(false), signal_handling_enabled_(true), + mutex_tracing_enabled_(false), cpuset_threads_(false), fake_symbol_table_enabled_(false) {} void OptionsImpl::disableExtensions(const std::vector& names) { for (const auto& name : names) { diff --git a/source/server/options_impl.h b/source/server/options_impl.h index b8f3e64695c47..bb8fd78eaadd7 100644 --- a/source/server/options_impl.h +++ b/source/server/options_impl.h @@ -20,7 +20,7 @@ namespace Envoy { class OptionsImpl : public Server::Options, protected Logger::Loggable { public: /** - * Parameters are max_stat_name_len, hot_restart_enabled + * Parameters are hot_restart_enabled */ using HotRestartVersionCb = std::function; @@ -50,12 +50,15 @@ class OptionsImpl : public Server::Options, protected Logger::Loggable& bootstrapVersion() const override { return bootstrap_version_; } const std::string& configYaml() const override { return config_yaml_; } bool allowUnknownStaticFields() const override { return allow_unknown_static_fields_; } bool rejectUnknownDynamicFields() const override { return reject_unknown_dynamic_fields_; } + bool ignoreUnknownDynamicFields() const override { return ignore_unknown_dynamic_fields_; } const std::string& adminAddressPath() const override { return admin_address_path_; } Network::Address::IpVersion localAddressIpVersion() const override { return local_address_ip_version_; } std::chrono::seconds drainTime() const override { return drain_time_; } + std::chrono::seconds parentShutdownTime() const override { return parent_shutdown_time_; } + Server::DrainStrategy drainStrategy() const override { return drain_strategy_; } + spdlog::level::level_enum logLevel() const override { return log_level_; } const std::vector>& componentLogLevels() const override { @@ -119,7 +134,6 @@ class OptionsImpl : public Server::Options, protected Logger::Loggable bootstrap_version_; std::string config_yaml_; bool allow_unknown_static_fields_{false}; bool reject_unknown_dynamic_fields_{false}; + bool ignore_unknown_dynamic_fields_{false}; std::string admin_address_path_; Network::Address::IpVersion local_address_ip_version_; spdlog::level::level_enum log_level_; @@ -174,6 +192,7 @@ class OptionsImpl : public Server::Options, protected Logger::Loggable value_; }; +/** + * Thread-local copy of the state of each configured overload action. + */ +class ThreadLocalOverloadStateImpl : public ThreadLocalOverloadState { +public: + const OverloadActionState& getState(const std::string& action) override { + auto it = actions_.find(action); + if (it == actions_.end()) { + it = actions_.insert(std::make_pair(action, OverloadActionState::Inactive)).first; + } + return it->second; + } + + void setState(const std::string& action, OverloadActionState state) { actions_[action] = state; } + +private: + absl::node_hash_map actions_; +}; + Stats::Counter& makeCounter(Stats::Scope& scope, absl::string_view a, absl::string_view b) { Stats::StatNameManagedStorage stat_name(absl::StrCat("overload.", a, ".", b), scope.symbolTable()); @@ -65,7 +85,7 @@ OverloadAction::OverloadAction(const envoy::config::overload::v3::OverloadAction NOT_REACHED_GCOVR_EXCL_LINE; } - if (!triggers_.insert(std::make_pair(trigger_config.name(), std::move(trigger))).second) { + if (!triggers_.try_emplace(trigger_config.name(), std::move(trigger)).second) { throw EnvoyException( absl::StrCat("Duplicate trigger resource for overload action ", config.name())); } @@ -113,9 +133,7 @@ OverloadManagerImpl::OverloadManagerImpl(Event::Dispatcher& dispatcher, Stats::S auto config = Config::Utility::translateToFactoryConfig(resource, validation_visitor, factory); auto monitor = factory.createResourceMonitor(*config, context); - auto result = - resources_.emplace(std::piecewise_construct, std::forward_as_tuple(name), - std::forward_as_tuple(name, std::move(monitor), *this, stats_scope)); + auto result = resources_.try_emplace(name, name, std::move(monitor), *this, stats_scope); if (!result.second) { throw EnvoyException(absl::StrCat("Duplicate resource monitor ", name)); } @@ -124,8 +142,12 @@ OverloadManagerImpl::OverloadManagerImpl(Event::Dispatcher& dispatcher, Stats::S for (const auto& action : config.actions()) { const auto& name = action.name(); ENVOY_LOG(debug, "Adding overload action {}", name); - auto result = actions_.emplace(std::piecewise_construct, std::forward_as_tuple(name), - std::forward_as_tuple(action, stats_scope)); + // TODO: use in place construction once https://github.com/abseil/abseil-cpp/issues/388 is + // addressed + // We cannot currently use in place construction as the OverloadAction constructor may throw, + // causing an inconsistent internal state of the actions_ map, which on destruction results in + // an invalid free. + auto result = actions_.try_emplace(name, OverloadAction(action, stats_scope)); if (!result.second) { throw EnvoyException(absl::StrCat("Duplicate overload action ", name)); } @@ -148,7 +170,7 @@ void OverloadManagerImpl::start() { started_ = true; tls_->set([](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr { - return std::make_shared(); + return std::make_shared(); }); if (resources_.empty()) { @@ -191,7 +213,7 @@ bool OverloadManagerImpl::registerForAction(const std::string& action, } ThreadLocalOverloadState& OverloadManagerImpl::getThreadLocalOverloadState() { - return tls_->getTyped(); + return tls_->getTyped(); } void OverloadManagerImpl::updateResourcePressure(const std::string& resource, double pressure) { @@ -208,7 +230,7 @@ void OverloadManagerImpl::updateResourcePressure(const std::string& resource, do ENVOY_LOG(info, "Overload action {} became {}", action, is_active ? "active" : "inactive"); tls_->runOnAllThreads([this, action, state] { - tls_->getTyped().setState(action, state); + tls_->getTyped().setState(action, state); }); auto callback_range = action_to_callbacks_.equal_range(action); std::for_each(callback_range.first, callback_range.second, diff --git a/source/server/overload_manager_impl.h b/source/server/overload_manager_impl.h index d76eedc3659f4..4405bfeaf3aae 100644 --- a/source/server/overload_manager_impl.h +++ b/source/server/overload_manager_impl.h @@ -1,8 +1,6 @@ #pragma once #include -#include -#include #include #include "envoy/api/api.h" @@ -17,6 +15,9 @@ #include "common/common/logger.h" +#include "absl/container/node_hash_map.h" +#include "absl/container/node_hash_set.h" + namespace Envoy { namespace Server { @@ -45,8 +46,8 @@ class OverloadAction { using TriggerPtr = std::unique_ptr; private: - std::unordered_map triggers_; - std::unordered_set fired_triggers_; + absl::node_hash_map triggers_; + absl::node_hash_set fired_triggers_; Stats::Gauge& active_gauge_; }; @@ -104,8 +105,8 @@ class OverloadManagerImpl : Logger::Loggable, public OverloadM ThreadLocal::SlotPtr tls_; const std::chrono::milliseconds refresh_interval_; Event::TimerPtr timer_; - std::unordered_map resources_; - std::unordered_map actions_; + absl::node_hash_map resources_; + absl::node_hash_map actions_; using ResourceToActionMap = std::unordered_multimap; ResourceToActionMap resource_to_actions_; diff --git a/source/server/server.cc b/source/server/server.cc index 6c8bc3a3f3053..912665143c984 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -5,9 +5,11 @@ #include #include #include -#include #include "envoy/admin/v3/config_dump.pb.h" +#include "envoy/common/exception.h" +#include "envoy/config/bootstrap/v2/bootstrap.pb.h" +#include "envoy/config/bootstrap/v2/bootstrap.pb.validate.h" #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/bootstrap/v3/bootstrap.pb.validate.h" #include "envoy/event/dispatcher.h" @@ -15,6 +17,7 @@ #include "envoy/event/timer.h" #include "envoy/network/dns.h" #include "envoy/registry/registry.h" +#include "envoy/server/bootstrap_extension_config.h" #include "envoy/server/options.h" #include "envoy/upstream/cluster_manager.h" @@ -23,12 +26,15 @@ #include "common/common/enum_to_int.h" #include "common/common/mutex_tracer_impl.h" #include "common/common/utility.h" -#include "common/common/version.h" #include "common/config/utility.h" +#include "common/config/version_converter.h" #include "common/http/codes.h" #include "common/local_info/local_info_impl.h" #include "common/memory/stats.h" #include "common/network/address_impl.h" +#include "common/network/listener_impl.h" +#include "common/network/socket_interface.h" +#include "common/network/socket_interface_impl.h" #include "common/protobuf/utility.h" #include "common/router/rds_impl.h" #include "common/runtime/runtime_impl.h" @@ -36,11 +42,12 @@ #include "common/stats/thread_local_store.h" #include "common/stats/timespan_impl.h" #include "common/upstream/cluster_manager_impl.h" +#include "common/version/version.h" +#include "server/admin/utils.h" #include "server/configuration_impl.h" #include "server/connection_handler_impl.h" #include "server/guarddog_impl.h" -#include "server/http/utils.h" #include "server/listener_hooks.h" #include "server/ssl_context_manager.h" @@ -51,12 +58,13 @@ InstanceImpl::InstanceImpl( Init::Manager& init_manager, const Options& options, Event::TimeSystem& time_system, Network::Address::InstanceConstSharedPtr local_address, ListenerHooks& hooks, HotRestart& restarter, Stats::StoreRoot& store, Thread::BasicLockable& access_log_lock, - ComponentFactory& component_factory, Runtime::RandomGeneratorPtr&& random_generator, + ComponentFactory& component_factory, Random::RandomGeneratorPtr&& random_generator, ThreadLocal::Instance& tls, Thread::ThreadFactory& thread_factory, Filesystem::Instance& file_system, std::unique_ptr process_context) : init_manager_(init_manager), workers_started_(false), live_(false), shutdown_(false), options_(options), validation_context_(options_.allowUnknownStaticFields(), - !options.rejectUnknownDynamicFields()), + !options.rejectUnknownDynamicFields(), + options.ignoreUnknownDynamicFields()), time_source_(time_system), restarter_(restarter), start_time_(time(nullptr)), original_start_time_(start_time_), stats_store_(store), thread_local_(tls), api_(new Api::Impl(thread_factory, store, time_system, file_system, @@ -131,7 +139,7 @@ Upstream::ClusterManager& InstanceImpl::clusterManager() { return *config_.clust void InstanceImpl::drainListeners() { ENVOY_LOG(info, "closing and draining listeners"); listener_manager_->stopListeners(ListenerManager::StopListenersType::All); - drain_manager_->startDrainSequence(nullptr); + drain_manager_->startDrainSequence([] {}); } void InstanceImpl::failHealthcheck(bool fail) { @@ -158,6 +166,12 @@ MetricSnapshotImpl::MetricSnapshotImpl(Stats::Store& store) { for (const auto& histogram : snapped_histograms_) { histograms_.push_back(*histogram); } + + snapped_text_readouts_ = store.textReadouts(); + text_readouts_.reserve(snapped_text_readouts_.size()); + for (const auto& text_readout : snapped_text_readouts_) { + text_readouts_.push_back(*text_readout); + } } void InstanceUtil::flushMetricsToSinks(const std::list& sinks, @@ -219,6 +233,26 @@ void InstanceImpl::flushStatsInternal() { bool InstanceImpl::healthCheckFailed() { return !live_.load(); } +namespace { +// Loads a bootstrap object, potentially at a specific version (upgrading if necessary). +void loadBootsrap(absl::optional bootstrap_version, + envoy::config::bootstrap::v3::Bootstrap& bootstrap, + std::function load_function) { + + if (!bootstrap_version.has_value()) { + load_function(bootstrap, true); + } else if (*bootstrap_version == 3) { + load_function(bootstrap, false); + } else if (*bootstrap_version == 2) { + envoy::config::bootstrap::v2::Bootstrap bootstrap_v2; + load_function(bootstrap_v2, false); + Config::VersionConverter::upgrade(bootstrap_v2, bootstrap); + } else { + throw EnvoyException(fmt::format("Unknown bootstrap version {}.", *bootstrap_version)); + } +} +} // namespace + void InstanceUtil::loadBootstrapConfig(envoy::config::bootstrap::v3::Bootstrap& bootstrap, const Options& options, ProtobufMessage::ValidationVisitor& validation_visitor, @@ -234,11 +268,19 @@ void InstanceUtil::loadBootstrapConfig(envoy::config::bootstrap::v3::Bootstrap& } if (!config_path.empty()) { - MessageUtil::loadFromFile(config_path, bootstrap, validation_visitor, api); + loadBootsrap( + options.bootstrapVersion(), bootstrap, + [&config_path, &validation_visitor, &api](Protobuf::Message& message, bool do_boosting) { + MessageUtil::loadFromFile(config_path, message, validation_visitor, api, do_boosting); + }); } if (!config_yaml.empty()) { envoy::config::bootstrap::v3::Bootstrap bootstrap_override; - MessageUtil::loadFromYaml(config_yaml, bootstrap_override, validation_visitor); + loadBootsrap(options.bootstrapVersion(), bootstrap_override, + [&config_yaml, &validation_visitor](Protobuf::Message& message, bool do_boosting) { + MessageUtil::loadFromYaml(config_yaml, message, validation_visitor, do_boosting); + }); + // TODO(snowp): The fact that we do a merge here doesn't seem to be covered under test. bootstrap.MergeFrom(bootstrap_override); } if (config_proto.ByteSize() != 0) { @@ -250,8 +292,8 @@ void InstanceUtil::loadBootstrapConfig(envoy::config::bootstrap::v3::Bootstrap& void InstanceImpl::initialize(const Options& options, Network::Address::InstanceConstSharedPtr local_address, ComponentFactory& component_factory, ListenerHooks& hooks) { - ENVOY_LOG(info, "initializing epoch {} (hot restart version={})", options.restartEpoch(), - restarter_.version()); + ENVOY_LOG(info, "initializing epoch {} (base id={}, hot restart version={})", + options.restartEpoch(), restarter_.baseId(), restarter_.version()); ENVOY_LOG(info, "statically linked extensions:"); for (const auto& ext : Envoy::Registry::FactoryCategoryRegistry::registeredFactories()) { @@ -269,20 +311,28 @@ void InstanceImpl::initialize(const Options& options, // setPrefix has a release assert verifying that setPrefix() is not called after prefix() ThreadSafeSingleton::get().setPrefix(bootstrap_.header_prefix().c_str()); } + // TODO(mattklein123): Custom O(1) headers can be registered at this point for creating/finalizing + // any header maps. + ENVOY_LOG(info, "HTTP header map info:"); + for (const auto& info : Http::HeaderMapImplUtility::getAllHeaderMapImplInfo()) { + ENVOY_LOG(info, " {}: {} bytes: {}", info.name_, info.size_, + absl::StrJoin(info.registered_headers_, ",")); + } // Needs to happen as early as possible in the instantiation to preempt the objects that require // stats. stats_store_.setTagProducer(Config::Utility::createTagProducer(bootstrap_)); stats_store_.setStatsMatcher(Config::Utility::createStatsMatcher(bootstrap_)); + stats_store_.setHistogramSettings(Config::Utility::createHistogramSettings(bootstrap_)); const std::string server_stats_prefix = "server."; server_stats_ = std::make_unique( ServerStats{ALL_SERVER_STATS(POOL_COUNTER_PREFIX(stats_store_, server_stats_prefix), POOL_GAUGE_PREFIX(stats_store_, server_stats_prefix), POOL_HISTOGRAM_PREFIX(stats_store_, server_stats_prefix))}); - validation_context_.static_warning_validation_visitor().setCounter( + validation_context_.staticWarningValidationVisitor().setUnknownCounter( server_stats_->static_unknown_fields_); - validation_context_.dynamic_warning_validation_visitor().setCounter( + validation_context_.dynamicWarningValidationVisitor().setUnknownCounter( server_stats_->dynamic_unknown_fields_); initialization_timer_ = std::make_unique( @@ -292,6 +342,8 @@ void InstanceImpl::initialize(const Options& options, assert_action_registration_ = Assert::setDebugAssertionFailureRecordAction( [this]() { server_stats_->debug_assertion_failures_.inc(); }); + envoy_bug_action_registration_ = Assert::setEnvoyBugFailureRecordAction( + [this]() { server_stats_->envoy_bug_failures_.inc(); }); InstanceImpl::failHealthcheck(false); @@ -332,23 +384,6 @@ void InstanceImpl::initialize(const Options& options, // Learn original_start_time_ if our parent is still around to inform us of it. restarter_.sendParentAdminShutdownRequest(original_start_time_); admin_ = std::make_unique(initial_config.admin().profilePath(), *this); - if (initial_config.admin().address()) { - if (initial_config.admin().accessLogPath().empty()) { - throw EnvoyException("An admin access log path is required for a listening server."); - } - ENVOY_LOG(info, "admin address: {}", initial_config.admin().address()->asString()); - admin_->startHttpListener(initial_config.admin().accessLogPath(), options.adminAddressPath(), - initial_config.admin().address(), - initial_config.admin().socketOptions(), - stats_store_.createScope("listener.admin.")); - } else { - ENVOY_LOG(warn, "No admin address given, so no admin HTTP server started."); - } - config_tracker_entry_ = - admin_->getConfigTracker().add("bootstrap", [this] { return dumpBootstrapConfig(); }); - if (initial_config.admin().address()) { - admin_->addListenerToHandler(handler_.get()); - } loadServerFlags(initial_config.flagsPath()); @@ -362,6 +397,25 @@ void InstanceImpl::initialize(const Options& options, heap_shrinker_ = std::make_unique(*dispatcher_, *overload_manager_, stats_store_); + for (const auto& bootstrap_extension : bootstrap_.bootstrap_extensions()) { + auto& factory = Config::Utility::getAndCheckFactory( + bootstrap_extension); + auto config = Config::Utility::translateAnyToFactoryConfig( + bootstrap_extension.typed_config(), messageValidationContext().staticValidationVisitor(), + factory); + bootstrap_extensions_.push_back( + factory.createBootstrapExtension(*config, serverFactoryContext())); + } + + if (!bootstrap_.default_socket_interface().empty()) { + auto& sock_name = bootstrap_.default_socket_interface(); + auto sock = const_cast(Network::socketInterface(sock_name)); + if (sock != nullptr) { + Network::SocketInterfaceSingleton::clear(); + Network::SocketInterfaceSingleton::initialize(sock); + } + } + // Workers get created first so they register for thread local updates. listener_manager_ = std::make_unique( *this, listener_component_factory_, worker_factory_, bootstrap_.enable_dispatcher_stats()); @@ -378,6 +432,24 @@ void InstanceImpl::initialize(const Options& options, dispatcher_->initializeStats(stats_store_, "server."); } + if (initial_config.admin().address()) { + if (initial_config.admin().accessLogPath().empty()) { + throw EnvoyException("An admin access log path is required for a listening server."); + } + ENVOY_LOG(info, "admin address: {}", initial_config.admin().address()->asString()); + admin_->startHttpListener(initial_config.admin().accessLogPath(), options.adminAddressPath(), + initial_config.admin().address(), + initial_config.admin().socketOptions(), + stats_store_.createScope("listener.admin.")); + } else { + ENVOY_LOG(warn, "No admin address given, so no admin HTTP server started."); + } + config_tracker_entry_ = + admin_->getConfigTracker().add("bootstrap", [this] { return dumpBootstrapConfig(); }); + if (initial_config.admin().address()) { + admin_->addListenerToHandler(handler_.get()); + } + // The broad order of initialization from this point on is the following: // 1. Statically provisioned configuration (bootstrap) are loaded. // 2. Cluster manager is created and all primary clusters (i.e. with endpoint assignments @@ -427,8 +499,8 @@ void InstanceImpl::initialize(const Options& options, // instantiated (which in turn relies on runtime...). Runtime::LoaderSingleton::get().initialize(clusterManager()); - // If RTDS was not configured the `onRuntimeReady` callback is immediately invoked. - Runtime::LoaderSingleton::get().startRtdsSubscriptions([this]() { onRuntimeReady(); }); + clusterManager().setPrimaryClustersInitializedCb( + [this]() { onClusterManagerPrimaryInitializationComplete(); }); for (Stats::SinkPtr& sink : config_.statsSinks()) { stats_store_.addSink(*sink); @@ -444,6 +516,11 @@ void InstanceImpl::initialize(const Options& options, guard_dog_ = std::make_unique(stats_store_, config_, *api_); } +void InstanceImpl::onClusterManagerPrimaryInitializationComplete() { + // If RTDS was not configured the `onRuntimeReady` callback is immediately invoked. + Runtime::LoaderSingleton::get().startRtdsSubscriptions([this]() { onRuntimeReady(); }); +} + void InstanceImpl::onRuntimeReady() { // Begin initializing secondary clusters after RTDS configuration has been applied. clusterManager().initializeSecondaryClusters(bootstrap_); @@ -462,6 +539,15 @@ void InstanceImpl::onRuntimeReady() { *config_.clusterManager(), *local_info_, *admin_, *singleton_manager_, thread_local_, messageValidationContext().dynamicValidationVisitor(), *api_); } + + // If there is no global limit to the number of active connections, warn on startup. + // TODO (tonya11en): Move this functionality into the overload manager. + if (!runtime().snapshot().get(Network::ListenerImpl::GlobalMaxCxRuntimeKey)) { + ENVOY_LOG(warn, + "there is no configured limit to the number of allowed active connections. Set a " + "limit via the runtime key {}", + Network::ListenerImpl::GlobalMaxCxRuntimeKey); + } } void InstanceImpl::startWorkers() { @@ -545,13 +631,14 @@ RunHelper::RunHelper(Instance& instance, const Options& options, Event::Dispatch return; } - const auto type_url = Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V2); + const auto type_urls = + Config::getAllVersionTypeUrls(); // Pause RDS to ensure that we don't send any requests until we've // subscribed to all the RDS resources. The subscriptions happen in the init callbacks, // so we pause RDS until we've completed all the callbacks. + Config::ScopedResume maybe_resume_rds; if (cm.adsMux()) { - cm.adsMux()->pause(type_url); + maybe_resume_rds = cm.adsMux()->pause(type_urls); } ENVOY_LOG(info, "all clusters initialized. initializing init manager"); @@ -559,9 +646,7 @@ RunHelper::RunHelper(Instance& instance, const Options& options, Event::Dispatch // Now that we're execute all the init callbacks we can resume RDS // as we've subscribed to all the statically defined RDS resources. - if (cm.adsMux()) { - cm.adsMux()->resume(type_url); - } + // This is done by tearing down the maybe_resume_rds Cleanup object. }); } diff --git a/source/server/server.h b/source/server/server.h index 7670ff08ab7d6..22d2a8dd2c9d2 100644 --- a/source/server/server.h +++ b/source/server/server.h @@ -10,6 +10,7 @@ #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/event/timer.h" +#include "envoy/server/bootstrap_extension_config.h" #include "envoy/server/drain_manager.h" #include "envoy/server/guarddog.h" #include "envoy/server/instance.h" @@ -35,8 +36,8 @@ #include "common/secret/secret_manager_impl.h" #include "common/upstream/health_discovery_service.h" +#include "server/admin/admin.h" #include "server/configuration_impl.h" -#include "server/http/admin.h" #include "server/listener_hooks.h" #include "server/listener_manager_impl.h" #include "server/overload_manager_impl.h" @@ -53,6 +54,7 @@ namespace Server { */ #define ALL_SERVER_STATS(COUNTER, GAUGE, HISTOGRAM) \ COUNTER(debug_assertion_failures) \ + COUNTER(envoy_bug_failures) \ COUNTER(dynamic_unknown_fields) \ COUNTER(static_unknown_fields) \ GAUGE(concurrency, NeverImport) \ @@ -116,8 +118,8 @@ class InstanceUtil : Logger::Loggable { * Load a bootstrap config and perform validation. * @param bootstrap supplies the bootstrap to fill. * @param options supplies the server options. - * @param api reference to the Api object * @param validation_visitor message validation visitor instance. + * @param api reference to the Api object */ static void loadBootstrapConfig(envoy::config::bootstrap::v3::Bootstrap& bootstrap, const Options& options, @@ -160,7 +162,7 @@ class ServerFactoryContextImpl : public Configuration::ServerFactoryContext, ProtobufMessage::ValidationContext& messageValidationContext() override { return server_.messageValidationContext(); } - Envoy::Runtime::RandomGenerator& random() override { return server_.random(); } + Envoy::Random::RandomGenerator& random() override { return server_.random(); } Envoy::Runtime::Loader& runtime() override { return server_.runtime(); } Stats::Scope& scope() override { return *server_scope_; } Singleton::Manager& singletonManager() override { return server_.singletonManager(); } @@ -170,19 +172,23 @@ class ServerFactoryContextImpl : public Configuration::ServerFactoryContext, Api::Api& api() override { return server_.api(); } Grpc::Context& grpcContext() override { return server_.grpcContext(); } Envoy::Server::DrainManager& drainManager() override { return server_.drainManager(); } + ServerLifecycleNotifier& lifecycleNotifier() override { return server_.lifecycleNotifier(); } + std::chrono::milliseconds statsFlushInterval() const override { + return server_.statsFlushInterval(); + } // Configuration::TransportSocketFactoryContext Ssl::ContextManager& sslContextManager() override { return server_.sslContextManager(); } Secret::SecretManager& secretManager() override { return server_.secretManager(); } Stats::Store& stats() override { return server_.stats(); } - Init::Manager* initManager() override { return &server_.initManager(); } + Init::Manager& initManager() override { return server_.initManager(); } ProtobufMessage::ValidationVisitor& messageValidationVisitor() override { // Server has two message validation visitors, one for static and // other for dynamic configuration. Choose the dynamic validation // visitor if server's init manager indicates that the server is // in the Initialized state, as this state is engaged right after // the static configuration (e.g., bootstrap) has been completed. - return initManager()->state() == Init::Manager::State::Initialized + return initManager().state() == Init::Manager::State::Initialized ? server_.messageValidationContext().dynamicValidationVisitor() : server_.messageValidationContext().staticValidationVisitor(); } @@ -206,7 +212,7 @@ class InstanceImpl final : Logger::Loggable, Network::Address::InstanceConstSharedPtr local_address, ListenerHooks& hooks, HotRestart& restarter, Stats::StoreRoot& store, Thread::BasicLockable& access_log_lock, ComponentFactory& component_factory, - Runtime::RandomGeneratorPtr&& random_generator, ThreadLocal::Instance& tls, + Random::RandomGeneratorPtr&& random_generator, ThreadLocal::Instance& tls, Thread::ThreadFactory& thread_factory, Filesystem::Instance& file_system, std::unique_ptr process_context); @@ -232,7 +238,7 @@ class InstanceImpl final : Logger::Loggable, Secret::SecretManager& secretManager() override { return *secret_manager_; } Envoy::MutexTracer* mutexTracer() override { return mutex_tracer_; } OverloadManager& overloadManager() override { return *overload_manager_; } - Runtime::RandomGenerator& random() override { return *random_generator_; } + Random::RandomGenerator& random() override { return *random_generator_; } Runtime::Loader& runtime() override; void shutdown() override; bool isShutdown() final { return shutdown_; } @@ -286,6 +292,7 @@ class InstanceImpl final : Logger::Loggable, void notifyCallbacksForStage( Stage stage, Event::PostCb completion_cb = [] {}); void onRuntimeReady(); + void onClusterManagerPrimaryInitializationComplete(); using LifecycleNotifierCallbacks = std::list; using LifecycleNotifierCompletionCallbacks = std::list; @@ -315,13 +322,14 @@ class InstanceImpl final : Logger::Loggable, Stats::StoreRoot& stats_store_; std::unique_ptr server_stats_; Assert::ActionRegistrationPtr assert_action_registration_; + Assert::ActionRegistrationPtr envoy_bug_action_registration_; ThreadLocal::Instance& thread_local_; Api::ApiPtr api_; Event::DispatcherPtr dispatcher_; std::unique_ptr admin_; Singleton::ManagerPtr singleton_manager_; Network::ConnectionHandlerPtr handler_; - Runtime::RandomGeneratorPtr random_generator_; + Random::RandomGeneratorPtr random_generator_; std::unique_ptr runtime_singleton_; std::unique_ptr ssl_context_manager_; ProdListenerComponentFactory listener_component_factory_; @@ -345,6 +353,7 @@ class InstanceImpl final : Logger::Loggable, Upstream::ProdClusterInfoFactory info_factory_; Upstream::HdsDelegatePtr hds_delegate_; std::unique_ptr overload_manager_; + std::vector bootstrap_extensions_; Envoy::MutexTracer* mutex_tracer_; Grpc::ContextImpl grpc_context_; Http::ContextImpl http_context_; @@ -384,6 +393,9 @@ class MetricSnapshotImpl : public Stats::MetricSnapshot { const std::vector>& histograms() override { return histograms_; } + const std::vector>& textReadouts() override { + return text_readouts_; + } private: std::vector snapped_counters_; @@ -392,6 +404,8 @@ class MetricSnapshotImpl : public Stats::MetricSnapshot { std::vector> gauges_; std::vector snapped_histograms_; std::vector> histograms_; + std::vector snapped_text_readouts_; + std::vector> text_readouts_; }; } // namespace Server diff --git a/source/server/transport_socket_config_impl.h b/source/server/transport_socket_config_impl.h index 9e5bb4639e929..560b9cf61aed6 100644 --- a/source/server/transport_socket_config_impl.h +++ b/source/server/transport_socket_config_impl.h @@ -15,7 +15,7 @@ class TransportSocketFactoryContextImpl : public TransportSocketFactoryContext { TransportSocketFactoryContextImpl( Server::Admin& admin, Ssl::ContextManager& context_manager, Stats::Scope& stats_scope, Upstream::ClusterManager& cm, const LocalInfo::LocalInfo& local_info, - Event::Dispatcher& dispatcher, Envoy::Runtime::RandomGenerator& random, Stats::Store& stats, + Event::Dispatcher& dispatcher, Envoy::Random::RandomGenerator& random, Stats::Store& stats, Singleton::Manager& singleton_manager, ThreadLocal::SlotAllocator& tls, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) : admin_(admin), context_manager_(context_manager), stats_scope_(stats_scope), @@ -39,9 +39,12 @@ class TransportSocketFactoryContextImpl : public TransportSocketFactoryContext { Upstream::ClusterManager& clusterManager() override { return cluster_manager_; } const LocalInfo::LocalInfo& localInfo() const override { return local_info_; } Event::Dispatcher& dispatcher() override { return dispatcher_; } - Envoy::Runtime::RandomGenerator& random() override { return random_; } + Envoy::Random::RandomGenerator& random() override { return random_; } Stats::Store& stats() override { return stats_; } - Init::Manager* initManager() override { return init_manager_; } + Init::Manager& initManager() override { + ASSERT(init_manager_ != nullptr); + return *init_manager_; + } Singleton::Manager& singletonManager() override { return singleton_manager_; } ThreadLocal::SlotAllocator& threadLocal() override { return tls_; } ProtobufMessage::ValidationVisitor& messageValidationVisitor() override { @@ -56,7 +59,7 @@ class TransportSocketFactoryContextImpl : public TransportSocketFactoryContext { Upstream::ClusterManager& cluster_manager_; const LocalInfo::LocalInfo& local_info_; Event::Dispatcher& dispatcher_; - Envoy::Runtime::RandomGenerator& random_; + Envoy::Random::RandomGenerator& random_; Stats::Store& stats_; Singleton::Manager& singleton_manager_; ThreadLocal::SlotAllocator& tls_; diff --git a/source/server/worker_impl.cc b/source/server/worker_impl.cc index 17e02486e5f80..eae51fa688376 100644 --- a/source/server/worker_impl.cc +++ b/source/server/worker_impl.cc @@ -5,6 +5,7 @@ #include "envoy/event/dispatcher.h" #include "envoy/event/timer.h" +#include "envoy/network/exception.h" #include "envoy/server/configuration.h" #include "envoy/thread_local/thread_local.h" @@ -45,6 +46,7 @@ void WorkerImpl::addListener(absl::optional overridden_listener, hooks_.onWorkerListenerAdded(); completion(true); } catch (const Network::CreateListenerException& e) { + ENVOY_LOG(error, "failed to add listener on worker: {}", e.what()); completion(false); } }); @@ -81,8 +83,20 @@ void WorkerImpl::removeFilterChains(uint64_t listener_tag, void WorkerImpl::start(GuardDog& guard_dog) { ASSERT(!thread_); - thread_ = - api_.threadFactory().createThread([this, &guard_dog]() -> void { threadRoutine(guard_dog); }); + + // In posix, thread names are limited to 15 characters, so contrive to make + // sure all interesting data fits there. The naming occurs in + // ListenerManagerImpl's constructor: absl::StrCat("worker_", i). Let's say we + // have 9999 threads. We'd need, so we need 7 bytes for "worker_", 4 bytes + // for the thread index, leaving us 4 bytes left to distinguish between the + // two threads used per dispatcher. We'll call this one "dsp:" and the + // one allocated in guarddog_impl.cc "dog:". + // + // TODO(jmarantz): consider refactoring how this naming works so this naming + // architecture is centralized, resulting in clearer names. + Thread::Options options{absl::StrCat("wrk:", dispatcher_->name())}; + thread_ = api_.threadFactory().createThread( + [this, &guard_dog]() -> void { threadRoutine(guard_dog); }, options); } void WorkerImpl::initializeStats(Stats::Scope& scope) { dispatcher_->initializeStats(scope); } diff --git a/test/BUILD b/test/BUILD index 41cf8c14bcb36..ab4a56a7d42e0 100644 --- a/test/BUILD +++ b/test/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() # TODO(htuch): remove when we have a solution for https://github.com/bazelbuild/bazel/issues/3510 @@ -23,9 +23,6 @@ envoy_cc_test_library( "test_runner.h", ], hdrs = ["test_listener.h"], - external_deps = [ - "abseil_symbolize", - ], deps = [ "//source/common/common:logger_lib", "//source/common/common:thread_lib", @@ -37,8 +34,5 @@ envoy_cc_test_library( "//test/test_common:environment_lib", "//test/test_common:global_lib", "//test/test_common:printers_lib", - ] + select({ - "//bazel:disable_signal_trace": [], - "//conditions:default": ["//source/common/signal:sigaction_lib"], - }), + ], ) diff --git a/test/README.md b/test/README.md index 3617c73719c1d..2746efe98c8db 100644 --- a/test/README.md +++ b/test/README.md @@ -119,3 +119,20 @@ test infrastructure that wants to be agnostic to which `TimeSystem` is used in a test. When no `TimeSystem` is instantiated in a test, the `Event::GlobalTimeSystem` will lazy-initialize itself into a concrete `TimeSystem`. Currently this is `TestRealTimeSystem` but will be changed in the future to `SimulatedTimeSystem`. + + +## Benchmark tests + +Envoy uses [Google Benchmark](https://github.com/google/benchmark/) for +microbenchmarks. There are custom bazel rules, `envoy_cc_benchmark_binary` and +`envoy_benchmark_test`, to execute them locally and in CI environments +respectively. `envoy_benchmark_test` rules call the benchmark binary from a +[script](https://github.com/envoyproxy/envoy/blob/master/bazel/test_for_benchmark_wrapper.sh) +which runs the benchmark with a minimal number of iterations and skipping +expensive benchmarks to quickly verify that the binary is able to run to +completion. In order to collect meaningful bechmarks, `bazel run -c opt` the +benchmark binary target on a quiescent machine. + +If you would like to detect when your benchmark test is running under the +wrapper, call +[`Envoy::benchmark::skipExpensiveBechmarks()`](https://github.com/envoyproxy/envoy/blob/master/test/benchmark/main.h). diff --git a/test/benchmark/BUILD b/test/benchmark/BUILD index 7bfc766727b5f..afcb2602898de 100644 --- a/test/benchmark/BUILD +++ b/test/benchmark/BUILD @@ -1,22 +1,23 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_library( name = "main", srcs = ["main.cc"], + hdrs = ["main.h"], external_deps = [ - "abseil_symbolize", "benchmark", + "tclap", + ], + deps = [ + "//source/common/common:minimal_logger_lib", + "//test/test_common:environment_lib", ], - deps = select({ - "//bazel:disable_signal_trace": [], - "//conditions:default": ["//source/common/signal:sigaction_lib"], - }), ) diff --git a/test/benchmark/main.cc b/test/benchmark/main.cc index ae39333d72a82..3c79ff36b2e0f 100644 --- a/test/benchmark/main.cc +++ b/test/benchmark/main.cc @@ -1,27 +1,50 @@ // NOLINT(namespace-envoy) // This is an Envoy driver for benchmarks. +#include "test/benchmark/main.h" + +#include "common/common/logger.h" + +#include "test/test_common/environment.h" #include "benchmark/benchmark.h" +#include "tclap/CmdLine.h" -#ifdef ENVOY_HANDLE_SIGNALS -#include "common/signal/signal_action.h" -#endif +using namespace Envoy; -#include "absl/debugging/symbolize.h" +static bool skip_expensive_benchmarks = false; -// Boilerplate main(), which discovers benchmarks and runs them. +// Boilerplate main(), which discovers benchmarks and runs them. This uses two +// different flag parsers, so the order of flags matters: flags defined here +// must be passed first, and flags defined in benchmark::Initialize second, +// separated by --. +// TODO(pgenera): convert this to abseil/flags/ when benchmark also adopts abseil. int main(int argc, char** argv) { -#ifndef __APPLE__ - absl::InitializeSymbolizer(argv[0]); -#endif -#ifdef ENVOY_HANDLE_SIGNALS - // Enabled by default. Control with "bazel --define=signal_trace=disabled" - Envoy::SignalAction handle_sigs; -#endif - - benchmark::Initialize(&argc, argv); - if (benchmark::ReportUnrecognizedArguments(argc, argv)) { - return 1; + TestEnvironment::initializeTestMain(argv[0]); + + // NOLINTNEXTLINE(clang-analyzer-optin.cplusplus.VirtualCall) + TCLAP::CmdLine cmd("envoy-benchmark-test", ' ', "0.1"); + TCLAP::SwitchArg skip_switch("s", "skip_expensive_benchmarks", + "skip or minimize expensive benchmarks", cmd, false); + + cmd.setExceptionHandling(false); + try { + cmd.parse(argc, argv); + } catch (const TCLAP::ExitException& e) { + // parse() throws an ExitException with status 0 after printing the output + // for --help and --version. + return 0; } - benchmark::RunSpecifiedBenchmarks(); + + skip_expensive_benchmarks = skip_switch.getValue(); + + ::benchmark::Initialize(&argc, argv); + + if (skip_expensive_benchmarks) { + ENVOY_LOG_MISC( + critical, + "Expensive benchmarks are being skipped; see test/README.md for more information"); + } + ::benchmark::RunSpecifiedBenchmarks(); } + +bool Envoy::benchmark::skipExpensiveBenchmarks() { return skip_expensive_benchmarks; } diff --git a/test/benchmark/main.h b/test/benchmark/main.h new file mode 100644 index 0000000000000..efb6797a74ef2 --- /dev/null +++ b/test/benchmark/main.h @@ -0,0 +1,13 @@ +#pragma once + +/** + * Benchmarks can use this to skip or hurry through long-running tests in CI. + */ + +namespace Envoy { +namespace benchmark { + +bool skipExpensiveBenchmarks(); + +} +} // namespace Envoy diff --git a/test/common/access_log/BUILD b/test/common/access_log/BUILD index 836975e9ae34e..91e0a1665c412 100644 --- a/test/common/access_log/BUILD +++ b/test/common/access_log/BUILD @@ -1,56 +1,12 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", - "envoy_benchmark_test", - "envoy_cc_benchmark_binary", - "envoy_cc_fuzz_test", "envoy_cc_test", "envoy_package", - "envoy_proto_library", ) -envoy_package() - -envoy_proto_library( - name = "access_log_formatter_fuzz_proto", - srcs = ["access_log_formatter_fuzz.proto"], - deps = ["//test/fuzz:common_proto"], -) - -envoy_cc_fuzz_test( - name = "access_log_formatter_fuzz_test", - srcs = ["access_log_formatter_fuzz_test.cc"], - corpus = "access_log_formatter_corpus", - dictionaries = [ - "access_log_formatter_fuzz_test.dict", - "//test/fuzz:headers.dict", - ], - deps = [ - ":access_log_formatter_fuzz_proto_cc_proto", - "//source/common/access_log:access_log_formatter_lib", - "//test/fuzz:utility_lib", - ], -) +licenses(["notice"]) # Apache 2 -envoy_cc_test( - name = "access_log_formatter_test", - srcs = ["access_log_formatter_test.cc"], - deps = [ - "//source/common/access_log:access_log_formatter_lib", - "//source/common/common:utility_lib", - "//source/common/http:header_map_lib", - "//source/common/router:string_accessor_lib", - "//test/mocks/api:api_mocks", - "//test/mocks/http:http_mocks", - "//test/mocks/ssl:ssl_mocks", - "//test/mocks/stream_info:stream_info_mocks", - "//test/mocks/upstream:upstream_mocks", - "//test/test_common:threadsafe_singleton_injector_lib", - "//test/test_common:utility_lib", - "@envoy_api//envoy/config/core/v3:pkg_cc_proto", - ], -) +envoy_package() envoy_cc_test( name = "access_log_impl_test", @@ -66,7 +22,7 @@ envoy_cc_test( "//test/mocks/event:event_mocks", "//test/mocks/filesystem:filesystem_mocks", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:registry_lib", "//test/test_common:utility_lib", @@ -87,25 +43,3 @@ envoy_cc_test( "//test/mocks/filesystem:filesystem_mocks", ], ) - -envoy_cc_benchmark_binary( - name = "access_log_formatter_speed_test", - srcs = ["access_log_formatter_speed_test.cc"], - external_deps = [ - "benchmark", - ], - deps = [ - "//source/common/access_log:access_log_formatter_lib", - "//source/common/http:header_map_lib", - "//source/common/network:address_lib", - "//test/common/stream_info:test_util", - "//test/mocks/http:http_mocks", - "//test/mocks/stream_info:stream_info_mocks", - "//test/test_common:printers_lib", - ], -) - -envoy_benchmark_test( - name = "access_log_formatter_speed_test_benchmark_test", - benchmark_binary = "access_log_formatter_speed_test", -) diff --git a/test/common/access_log/access_log_impl_test.cc b/test/common/access_log/access_log_impl_test.cc index c1194630dcee2..788d8885ba50a 100644 --- a/test/common/access_log/access_log_impl_test.cc +++ b/test/common/access_log/access_log_impl_test.cc @@ -19,7 +19,7 @@ #include "test/mocks/event/mocks.h" #include "test/mocks/filesystem/mocks.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/printers.h" #include "test/test_common/registry.h" @@ -37,9 +37,10 @@ namespace Envoy { namespace AccessLog { namespace { -envoy::config::accesslog::v3::AccessLog parseAccessLogFromV2Yaml(const std::string& yaml) { +envoy::config::accesslog::v3::AccessLog parseAccessLogFromV3Yaml(const std::string& yaml, + bool avoid_boosting = true) { envoy::config::accesslog::v3::AccessLog access_log; - TestUtility::loadFromYamlAndValidate(yaml, access_log); + TestUtility::loadFromYamlAndValidate(yaml, access_log, false, avoid_boosting); return access_log; } @@ -72,7 +73,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)); stream_info_.response_flags_ = StreamInfo::ResponseFlag::UpstreamConnectionFailure; @@ -95,7 +96,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)); @@ -118,7 +119,7 @@ name: accesslog format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %ROUTE_NAME% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\"\n" )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)); stream_info_.route_name_ = "route-test-name"; @@ -144,7 +145,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)); response_headers_.addCopy(Http::Headers::get().EnvoyUpstreamServiceTime, "999"); @@ -163,7 +164,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -183,7 +184,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -215,7 +216,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)).Times(0); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -253,7 +254,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)).Times(3); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -278,8 +279,8 @@ name: accesslog path: /dev/null )EOF"; - Runtime::RandomGeneratorImpl random; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + Random::RandomGeneratorImpl random; + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); // Value is taken from random generator. EXPECT_CALL(context_.random_, random()).WillOnce(Return(42)); @@ -321,8 +322,8 @@ name: accesslog path: /dev/null )EOF"; - Runtime::RandomGeneratorImpl random; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + Random::RandomGeneratorImpl random; + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); // Value is taken from random generator. EXPECT_CALL(context_.random_, random()).WillOnce(Return(42)); @@ -365,7 +366,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); // Value should not be taken from x-request-id. request_headers_.addCopy("x-request-id", "000000ff-0000-0000-0000-000000000000"); @@ -392,7 +393,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -411,7 +412,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); Http::TestRequestHeaderMapImpl header_map{}; stream_info_.health_check_request_ = true; @@ -430,7 +431,7 @@ name: accesslog path: "/dev/null" )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); Http::TestRequestHeaderMapImpl header_map{}; EXPECT_CALL(*file_, write(_)); @@ -439,7 +440,7 @@ name: accesslog } TEST_F(AccessLogImplTest, RequestTracing) { - Runtime::RandomGeneratorImpl random; + Random::RandomGeneratorImpl random; const std::string yaml = R"EOF( name: accesslog @@ -450,7 +451,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); { Http::TestRequestHeaderMapImpl forced_header{{"x-request-id", random.uuid()}}; @@ -487,7 +488,7 @@ name: accesslog path: /dev/null )EOF"; - EXPECT_THROW(AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context), + EXPECT_THROW(AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context), EnvoyException); } @@ -501,7 +502,7 @@ name: accesslog path: /dev/null )EOF"; - EXPECT_THROW(AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context), + EXPECT_THROW(AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context), EnvoyException); } } @@ -524,7 +525,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); stream_info_.response_code_ = 500; { @@ -560,7 +561,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); stream_info_.response_code_ = 500; { @@ -603,7 +604,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); stream_info_.response_code_ = 500; { @@ -703,7 +704,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); stream_info_.response_code_ = 499; EXPECT_CALL(runtime_.snapshot_, getInteger("hello", 499)).WillOnce(Return(499)); @@ -728,7 +729,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)).Times(0); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -752,7 +753,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)).Times(0); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -782,7 +783,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)).Times(0); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -817,7 +818,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)).Times(0); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -857,7 +858,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)).Times(0); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -879,7 +880,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)).Times(0); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -906,7 +907,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)).Times(0); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -945,12 +946,15 @@ name: accesslog - SI - IH - DPE + - UMSDR + - RFCF + - NFCF typed_config: "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: /dev/null )EOF"; - static_assert(StreamInfo::ResponseFlag::LastFlag == 0x40000, + static_assert(StreamInfo::ResponseFlag::LastFlag == 0x200000, "A flag has been added. Fix this code."); const std::vector all_response_flags = { @@ -973,9 +977,11 @@ name: accesslog StreamInfo::ResponseFlag::StreamIdleTimeout, StreamInfo::ResponseFlag::InvalidEnvoyRequestHeaders, StreamInfo::ResponseFlag::DownstreamProtocolError, - }; + StreamInfo::ResponseFlag::UpstreamMaxStreamDurationReached, + StreamInfo::ResponseFlag::ResponseFromCacheFilter, + StreamInfo::ResponseFlag::NoFilterConfigFound}; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); for (const auto response_flag : all_response_flags) { TestStreamInfo stream_info; @@ -998,14 +1004,16 @@ name: accesslog )EOF"; EXPECT_THROW_WITH_MESSAGE( - AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_), + AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_), ProtoValidationException, "Proto constraint validation failed (AccessLogValidationError.Filter: [\"embedded message " "failed validation\"] | caused by AccessLogFilterValidationError.ResponseFlagFilter: " "[\"embedded message failed validation\"] | caused by " "ResponseFlagFilterValidationError.Flags[i]: [\"value must be in list \" [\"LH\" \"UH\" " "\"UT\" \"LR\" \"UR\" \"UF\" \"UC\" \"UO\" \"NR\" \"DI\" \"FI\" \"RL\" \"UAEX\" \"RLSE\" " - "\"DC\" \"URX\" \"SI\" \"IH\" \"DPE\"]]): name: \"accesslog\"\nfilter {\n " + "\"DC\" \"URX\" \"SI\" \"IH\" \"DPE\" \"UMSDR\" \"RFCF\" \"NFCF\"]]): name: " + "\"accesslog\"\nfilter {\n " + " " "response_flag_filter {\n flags: \"UnsupportedFlag\"\n }\n}\ntyped_config {\n " "[type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog] {\n path: \"/dev/null\"\n " "}\n}\n"); @@ -1024,14 +1032,16 @@ name: accesslog )EOF"; EXPECT_THROW_WITH_MESSAGE( - AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_), + AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_), ProtoValidationException, "Proto constraint validation failed (AccessLogValidationError.Filter: [\"embedded message " "failed validation\"] | caused by AccessLogFilterValidationError.ResponseFlagFilter: " "[\"embedded message failed validation\"] | caused by " "ResponseFlagFilterValidationError.Flags[i]: [\"value must be in list \" [\"LH\" \"UH\" " "\"UT\" \"LR\" \"UR\" \"UF\" \"UC\" \"UO\" \"NR\" \"DI\" \"FI\" \"RL\" \"UAEX\" \"RLSE\" " - "\"DC\" \"URX\" \"SI\" \"IH\" \"DPE\"]]): name: \"accesslog\"\nfilter {\n " + "\"DC\" \"URX\" \"SI\" \"IH\" \"DPE\" \"UMSDR\" \"RFCF\" \"NFCF\"]]): name: " + "\"accesslog\"\nfilter {\n " + " " "response_flag_filter {\n flags: \"UnsupportedFlag\"\n }\n}\ntyped_config {\n " "[type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog] {\n path: \"/dev/null\"\n " "}\n}\n"); @@ -1046,7 +1056,7 @@ name: accesslog format: "%GRPC_STATUS%\n" )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); { EXPECT_CALL(*file_, write(_)); response_trailers_.addCopy(Http::Headers::get().GrpcStatus, "0"); @@ -1055,7 +1065,7 @@ name: accesslog response_trailers_.remove(Http::Headers::get().GrpcStatus); } { - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)); response_headers_.addCopy(Http::Headers::get().GrpcStatus, "1"); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -1063,7 +1073,7 @@ name: accesslog response_headers_.remove(Http::Headers::get().GrpcStatus); } { - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)); response_headers_.addCopy(Http::Headers::get().GrpcStatus, "-1"); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -1093,7 +1103,7 @@ name: accesslog for (int i = 0; i < desc->value_count(); i++) { InstanceSharedPtr log = AccessLogFactory::fromProto( - parseAccessLogFromV2Yaml(fmt::format(yaml_template, desc->value(i)->name())), context_); + parseAccessLogFromV3Yaml(fmt::format(yaml_template, desc->value(i)->name())), context_); EXPECT_CALL(*file_, write(_)); @@ -1115,7 +1125,7 @@ name: accesslog path: /dev/null )EOF"; - EXPECT_THROW_WITH_REGEX(AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_), + EXPECT_THROW_WITH_REGEX(AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_), EnvoyException, ".*\"NOT_A_VALID_CODE\" for type TYPE_ENUM.*"); } @@ -1132,7 +1142,7 @@ name: accesslog )EOF"; const InstanceSharedPtr log = - AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); response_trailers_.addCopy(Http::Headers::get().GrpcStatus, "1"); @@ -1165,7 +1175,7 @@ name: accesslog stream_info_.response_code_ = pair.second; const InstanceSharedPtr log = AccessLogFactory::fromProto( - parseAccessLogFromV2Yaml(fmt::format(yaml_template, pair.first)), context_); + parseAccessLogFromV3Yaml(fmt::format(yaml_template, pair.first)), context_); EXPECT_CALL(*file_, write(_)); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -1185,7 +1195,7 @@ name: accesslog )EOF"; const InstanceSharedPtr log = - AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -1205,7 +1215,7 @@ name: accesslog )EOF"; const InstanceSharedPtr log = - AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); for (int i = 0; i <= static_cast(Grpc::Status::WellKnownGrpcStatus::MaximumKnown); i++) { EXPECT_CALL(*file_, write(_)).Times(i == 0 ? 0 : 1); @@ -1230,7 +1240,7 @@ name: accesslog )EOF"; const InstanceSharedPtr log = - AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); response_trailers_.addCopy(Http::Headers::get().GrpcStatus, "0"); @@ -1251,7 +1261,7 @@ name: accesslog )EOF"; const InstanceSharedPtr log = - AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)); @@ -1259,12 +1269,136 @@ name: accesslog log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); } +TEST_F(AccessLogImplTest, MetadataFilter) { + const std::string yaml = R"EOF( +name: accesslog +filter: + metadata_filter: + matcher: + filter: "some.namespace" + path: + - key: "a" + - key: "b" + - key: "c" + value: + bool_match: true + +typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + path: /dev/null + )EOF"; + + TestStreamInfo stream_info; + ProtobufWkt::Struct metadata_val; + auto& fields_a = *metadata_val.mutable_fields(); + auto& struct_b = *fields_a["a"].mutable_struct_value(); + auto& fields_b = *struct_b.mutable_fields(); + auto& struct_c = *fields_b["b"].mutable_struct_value(); + auto& fields_c = *struct_c.mutable_fields(); + fields_c["c"].set_bool_value(true); + + stream_info.setDynamicMetadata("some.namespace", metadata_val); + + const InstanceSharedPtr log = + AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); + + EXPECT_CALL(*file_, write(_)).Times(1); + + log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info); + fields_c["c"].set_bool_value(false); + + EXPECT_CALL(*file_, write(_)).Times(0); +} + +// This is a regression test for fuzz bug https://oss-fuzz.com/testcase-detail/4863844862918656 +// where a missing matcher would attempt to create a ValueMatcher and crash in debug mode. Instead, +// the configured metadata filter does not match. +TEST_F(AccessLogImplTest, MetadataFilterNoMatcher) { + const std::string yaml = R"EOF( +name: accesslog +filter: + metadata_filter: + match_if_key_not_found: false +typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + path: /dev/null + )EOF"; + + TestStreamInfo stream_info; + ProtobufWkt::Struct metadata_val; + stream_info.setDynamicMetadata("some.namespace", metadata_val); + + const InstanceSharedPtr log = + AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); + + // If no matcher is set, then expect no logs. + EXPECT_CALL(*file_, write(_)).Times(0); + log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info); +} + +TEST_F(AccessLogImplTest, MetadataFilterNoKey) { + const std::string default_true_yaml = R"EOF( +name: accesslog +filter: + metadata_filter: + matcher: + filter: "some.namespace" + path: + - key: "x" + value: + bool_match: true + +typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + path: /dev/null + )EOF"; + + const std::string default_false_yaml = R"EOF( +name: accesslog +filter: + metadata_filter: + matcher: + filter: "some.namespace" + path: + - key: "y" + value: + bool_match: true + match_if_key_not_found: + value: false + +typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + path: /dev/null + )EOF"; + + TestStreamInfo stream_info; + ProtobufWkt::Struct metadata_val; + auto& fields_a = *metadata_val.mutable_fields(); + auto& struct_b = *fields_a["a"].mutable_struct_value(); + auto& fields_b = *struct_b.mutable_fields(); + fields_b["b"].set_bool_value(true); + + stream_info.setDynamicMetadata("some.namespace", metadata_val); + + const InstanceSharedPtr default_false_log = + AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(default_false_yaml), context_); + EXPECT_CALL(*file_, write(_)).Times(0); + + default_false_log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info); + + const InstanceSharedPtr default_true_log = + AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(default_true_yaml), context_); + EXPECT_CALL(*file_, write(_)).Times(1); + + default_true_log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info); +} + class TestHeaderFilterFactory : public ExtensionFilterFactory { public: ~TestHeaderFilterFactory() override = default; FilterPtr createFilter(const envoy::config::accesslog::v3::ExtensionFilter& config, - Runtime::Loader&, Runtime::RandomGenerator&) override { + Runtime::Loader&, Random::RandomGenerator&) override { auto factory_config = Config::Utility::translateToFactoryConfig( config, Envoy::ProtobufMessage::getNullValidationVisitor(), *this); const auto& header_config = @@ -1298,7 +1432,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr logger = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr logger = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)).Times(0); logger->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -1317,7 +1451,7 @@ class SampleExtensionFilter : public Filter { // AccessLog::Filter bool evaluate(const StreamInfo::StreamInfo&, const Http::RequestHeaderMap&, - const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&) override { + const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&) const override { if (current_++ == 0) { return true; } @@ -1328,7 +1462,7 @@ class SampleExtensionFilter : public Filter { } private: - uint32_t current_ = 0; + mutable uint32_t current_ = 0; uint32_t sample_rate_; }; @@ -1340,7 +1474,7 @@ class SampleExtensionFilterFactory : public ExtensionFilterFactory { ~SampleExtensionFilterFactory() override = default; FilterPtr createFilter(const envoy::config::accesslog::v3::ExtensionFilter& config, - Runtime::Loader&, Runtime::RandomGenerator&) override { + Runtime::Loader&, Random::RandomGenerator&) override { auto factory_config = Config::Utility::translateToFactoryConfig( config, Envoy::ProtobufMessage::getNullValidationVisitor(), *this); @@ -1375,7 +1509,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr logger = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr logger = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); // For rate=5 expect 1st request to be recorded, 2nd-5th skipped, and 6th recorded. EXPECT_CALL(*file_, write(_)); logger->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -1403,7 +1537,7 @@ name: accesslog path: /dev/null )EOF"; - EXPECT_THROW(AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_), + EXPECT_THROW(AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_), EnvoyException); } @@ -1418,7 +1552,7 @@ name: accesslog path: /dev/null )EOF"; - EXPECT_THROW(AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_), + EXPECT_THROW(AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_), EnvoyException); } } diff --git a/test/common/buffer/BUILD b/test/common/buffer/BUILD index d91cc0b573549..bd01534ca6cae 100644 --- a/test/common/buffer/BUILD +++ b/test/common/buffer/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_benchmark_test", @@ -11,6 +9,8 @@ load( "envoy_proto_library", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_library( @@ -79,6 +79,7 @@ envoy_cc_test( "//source/common/buffer:buffer_lib", "//source/common/buffer:watermark_buffer_lib", "//source/common/network:address_lib", + "//test/test_common:test_runtime_lib", ], ) diff --git a/test/common/buffer/buffer_fuzz.cc b/test/common/buffer/buffer_fuzz.cc index 86e168532c9b2..5ab1bd85c4ae4 100644 --- a/test/common/buffer/buffer_fuzz.cc +++ b/test/common/buffer/buffer_fuzz.cc @@ -68,6 +68,12 @@ void releaseFragmentAllocation(const void* p, size_t, const Buffer::BufferFragme // walk off the edge; the caller should be guaranteeing this. class StringBuffer : public Buffer::Instance { public: + void addDrainTracker(std::function drain_tracker) override { + // Not implemented well. + ASSERT(false); + drain_tracker(); + } + void add(const void* data, uint64_t size) override { FUZZ_ASSERT(start_ + size_ + size <= data_.size()); ::memcpy(mutableEnd(), data, size); @@ -127,6 +133,8 @@ class StringBuffer : public Buffer::Instance { return mutableStart(); } + Buffer::SliceDataPtr extractMutableFrontSlice() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } + void move(Buffer::Instance& rhs) override { move(rhs, rhs.length()); } void move(Buffer::Instance& rhs, uint64_t length) override { @@ -153,7 +161,8 @@ class StringBuffer : public Buffer::Instance { return 1; } - ssize_t search(const void* data, uint64_t size, size_t start) const override { + ssize_t search(const void* data, uint64_t size, size_t start, size_t length) const override { + UNREFERENCED_PARAMETER(length); return asStringView().find({static_cast(data), size}, start); } diff --git a/test/common/buffer/buffer_speed_test.cc b/test/common/buffer/buffer_speed_test.cc index d9456072b137b..49240c69f3566 100644 --- a/test/common/buffer/buffer_speed_test.cc +++ b/test/common/buffer/buffer_speed_test.cc @@ -291,7 +291,7 @@ static void bufferSearch(benchmark::State& state) { Buffer::OwnedImpl buffer(input); ssize_t result = 0; for (auto _ : state) { - result += buffer.search(Pattern.c_str(), Pattern.length(), 0); + result += buffer.search(Pattern.c_str(), Pattern.length(), 0, 0); } benchmark::DoNotOptimize(result); } @@ -314,7 +314,7 @@ static void bufferSearchPartialMatch(benchmark::State& state) { Buffer::OwnedImpl buffer(input); ssize_t result = 0; for (auto _ : state) { - result += buffer.search(Pattern.c_str(), Pattern.length(), 0); + result += buffer.search(Pattern.c_str(), Pattern.length(), 0, 0); } benchmark::DoNotOptimize(result); } diff --git a/test/common/buffer/owned_impl_test.cc b/test/common/buffer/owned_impl_test.cc index bd06b3233da8f..ce7ec99e3847d 100644 --- a/test/common/buffer/owned_impl_test.cc +++ b/test/common/buffer/owned_impl_test.cc @@ -37,12 +37,21 @@ class OwnedImplTest : public testing::Test { static void expectSlices(std::vector> buffer_list, OwnedImpl& buffer) { const auto& buffer_slices = buffer.describeSlicesForTest(); + ASSERT_EQ(buffer_list.size(), buffer_slices.size()); for (uint64_t i = 0; i < buffer_slices.size(); i++) { EXPECT_EQ(buffer_slices[i].data, buffer_list[i][0]); EXPECT_EQ(buffer_slices[i].reservable, buffer_list[i][1]); EXPECT_EQ(buffer_slices[i].capacity, buffer_list[i][2]); } } + + static void expectFirstSlice(std::vector slice_description, OwnedImpl& buffer) { + const auto& buffer_slices = buffer.describeSlicesForTest(); + ASSERT_LE(1, buffer_slices.size()); + EXPECT_EQ(buffer_slices[0].data, slice_description[0]); + EXPECT_EQ(buffer_slices[0].reservable, slice_description[1]); + EXPECT_EQ(buffer_slices[0].capacity, slice_description[2]); + } }; TEST_F(OwnedImplTest, AddBufferFragmentNoCleanup) { @@ -80,6 +89,7 @@ TEST_F(OwnedImplTest, AddEmptyFragment) { BufferFragmentImpl frag2("", 0, [this](const void*, size_t, const BufferFragmentImpl*) { release_callback_called_ = true; }); + BufferFragmentImpl frag3(input, 11, [](const void*, size_t, const BufferFragmentImpl*) {}); Buffer::OwnedImpl buffer; buffer.addBufferFragment(frag1); EXPECT_EQ(11, buffer.length()); @@ -87,7 +97,18 @@ TEST_F(OwnedImplTest, AddEmptyFragment) { buffer.addBufferFragment(frag2); EXPECT_EQ(11, buffer.length()); - buffer.drain(11); + buffer.addBufferFragment(frag3); + EXPECT_EQ(22, buffer.length()); + + // Cover case of copying a buffer with an empty fragment. + Buffer::OwnedImpl buffer2; + buffer2.add(buffer); + + // Cover copyOut + std::unique_ptr outbuf(new char[buffer.length()]); + buffer.copyOut(0, buffer.length(), outbuf.get()); + + buffer.drain(22); EXPECT_EQ(0, buffer.length()); EXPECT_TRUE(release_callback_called_); } @@ -272,7 +293,8 @@ TEST_F(OwnedImplTest, Write) { EXPECT_EQ(0, result.rc_); EXPECT_EQ(1, buffer.length()); - EXPECT_CALL(os_sys_calls, writev(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{-1, EAGAIN})); + EXPECT_CALL(os_sys_calls, writev(_, _, _)) + .WillOnce(Return(Api::SysCallSizeResult{-1, SOCKET_ERROR_AGAIN})); result = buffer.write(io_handle); EXPECT_EQ(Api::IoError::IoErrorCode::Again, result.err_->getErrorCode()); EXPECT_EQ(0, result.rc_); @@ -310,7 +332,8 @@ TEST_F(OwnedImplTest, Read) { EXPECT_EQ(0, buffer.length()); EXPECT_THAT(buffer.describeSlicesForTest(), testing::IsEmpty()); - EXPECT_CALL(os_sys_calls, readv(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{-1, EAGAIN})); + EXPECT_CALL(os_sys_calls, readv(_, _, _)) + .WillOnce(Return(Api::SysCallSizeResult{-1, SOCKET_ERROR_AGAIN})); result = buffer.read(io_handle, 100); EXPECT_EQ(Api::IoError::IoErrorCode::Again, result.err_->getErrorCode()); EXPECT_EQ(0, result.rc_); @@ -324,6 +347,445 @@ TEST_F(OwnedImplTest, Read) { EXPECT_THAT(buffer.describeSlicesForTest(), testing::IsEmpty()); } +TEST_F(OwnedImplTest, ExtractOwnedSlice) { + // Create a buffer with two owned slices. + Buffer::OwnedImpl buffer; + buffer.appendSliceForTest("abcde"); + const uint64_t expected_length0 = 5; + buffer.appendSliceForTest("123"); + const uint64_t expected_length1 = 3; + EXPECT_EQ(buffer.toString(), "abcde123"); + RawSliceVector slices = buffer.getRawSlices(); + EXPECT_EQ(2, slices.size()); + + // Extract first slice. + auto slice = buffer.extractMutableFrontSlice(); + ASSERT_TRUE(slice); + auto slice_data = slice->getMutableData(); + ASSERT_NE(slice_data.data(), nullptr); + EXPECT_EQ(slice_data.size(), expected_length0); + EXPECT_EQ("abcde", + absl::string_view(reinterpret_cast(slice_data.data()), slice_data.size())); + EXPECT_EQ(buffer.toString(), "123"); + + // Modify and re-add extracted first slice data to the end of the buffer. + auto slice_mutable_data = slice->getMutableData(); + ASSERT_NE(slice_mutable_data.data(), nullptr); + EXPECT_EQ(slice_mutable_data.size(), expected_length0); + *slice_mutable_data.data() = 'A'; + buffer.appendSliceForTest(slice_mutable_data.data(), slice_mutable_data.size()); + EXPECT_EQ(buffer.toString(), "123Abcde"); + + // Extract second slice, leaving only the original first slice. + slice = buffer.extractMutableFrontSlice(); + ASSERT_TRUE(slice); + slice_data = slice->getMutableData(); + ASSERT_NE(slice_data.data(), nullptr); + EXPECT_EQ(slice_data.size(), expected_length1); + EXPECT_EQ("123", + absl::string_view(reinterpret_cast(slice_data.data()), slice_data.size())); + EXPECT_EQ(buffer.toString(), "Abcde"); +} + +TEST_F(OwnedImplTest, ExtractAfterSentinelDiscard) { + // Create a buffer with a sentinel and one owned slice. + Buffer::OwnedImpl buffer; + bool sentinel_discarded = false; + const Buffer::OwnedBufferFragmentImpl::Releasor sentinel_releasor{ + [&](const Buffer::OwnedBufferFragmentImpl* sentinel) { + sentinel_discarded = true; + delete sentinel; + }}; + auto sentinel = + Buffer::OwnedBufferFragmentImpl::create(absl::string_view("", 0), sentinel_releasor); + buffer.addBufferFragment(*sentinel.release()); + + buffer.appendSliceForTest("abcde"); + const uint64_t expected_length = 5; + EXPECT_EQ(buffer.toString(), "abcde"); + RawSliceVector slices = buffer.getRawSlices(); // only returns slices with data + EXPECT_EQ(1, slices.size()); + + // Extract owned slice after discarding sentinel. + EXPECT_FALSE(sentinel_discarded); + auto slice = buffer.extractMutableFrontSlice(); + ASSERT_TRUE(slice); + EXPECT_TRUE(sentinel_discarded); + auto slice_data = slice->getMutableData(); + ASSERT_NE(slice_data.data(), nullptr); + EXPECT_EQ(slice_data.size(), expected_length); + EXPECT_EQ("abcde", + absl::string_view(reinterpret_cast(slice_data.data()), slice_data.size())); + EXPECT_EQ(0, buffer.length()); +} + +TEST_F(OwnedImplTest, DrainThenExtractOwnedSlice) { + // Create a buffer with two owned slices. + Buffer::OwnedImpl buffer; + buffer.appendSliceForTest("abcde"); + const uint64_t expected_length0 = 5; + buffer.appendSliceForTest("123"); + EXPECT_EQ(buffer.toString(), "abcde123"); + RawSliceVector slices = buffer.getRawSlices(); + EXPECT_EQ(2, slices.size()); + + // Partially drain the first slice. + const uint64_t partial_drain_size = 2; + buffer.drain(partial_drain_size); + EXPECT_EQ(buffer.toString(), static_cast("abcde123") + partial_drain_size); + + // Extracted partially drained first slice, leaving the second slice. + auto slice = buffer.extractMutableFrontSlice(); + ASSERT_TRUE(slice); + auto slice_data = slice->getMutableData(); + ASSERT_NE(slice_data.data(), nullptr); + EXPECT_EQ(slice_data.size(), expected_length0 - partial_drain_size); + EXPECT_EQ(static_cast("abcde") + partial_drain_size, + absl::string_view(reinterpret_cast(slice_data.data()), slice_data.size())); + EXPECT_EQ(buffer.toString(), "123"); +} + +TEST_F(OwnedImplTest, ExtractUnownedSlice) { + // Create a buffer with an unowned slice. + std::string input{"unowned test slice"}; + const size_t expected_length0 = input.size(); + auto frag = OwnedBufferFragmentImpl::create( + {input.c_str(), expected_length0}, + [this](const OwnedBufferFragmentImpl*) { release_callback_called_ = true; }); + Buffer::OwnedImpl buffer; + buffer.addBufferFragment(*frag); + + bool drain_tracker_called{false}; + buffer.addDrainTracker([&] { drain_tracker_called = true; }); + + // Add an owned slice to the end of the buffer. + EXPECT_EQ(expected_length0, buffer.length()); + std::string owned_slice_content{"another slice, but owned"}; + buffer.add(owned_slice_content); + const uint64_t expected_length1 = owned_slice_content.length(); + + // Partially drain the unowned slice. + const uint64_t partial_drain_size = 5; + buffer.drain(partial_drain_size); + EXPECT_EQ(expected_length0 - partial_drain_size + expected_length1, buffer.length()); + EXPECT_FALSE(release_callback_called_); + EXPECT_FALSE(drain_tracker_called); + + // Extract what remains of the unowned slice, leaving only the owned slice. + auto slice = buffer.extractMutableFrontSlice(); + ASSERT_TRUE(slice); + EXPECT_TRUE(drain_tracker_called); + auto slice_data = slice->getMutableData(); + ASSERT_NE(slice_data.data(), nullptr); + EXPECT_EQ(slice_data.size(), expected_length0 - partial_drain_size); + EXPECT_EQ(input.data() + partial_drain_size, + absl::string_view(reinterpret_cast(slice_data.data()), slice_data.size())); + EXPECT_EQ(expected_length1, buffer.length()); + + // The underlying immutable unowned slice was discarded during the extract + // operation and replaced with a mutable copy. The drain trackers were + // called as part of the extract, implying that the release callback was called. + EXPECT_TRUE(release_callback_called_); +} + +TEST_F(OwnedImplTest, ExtractWithDrainTracker) { + testing::InSequence s; + + Buffer::OwnedImpl buffer; + buffer.add("a"); + + testing::MockFunction tracker1; + testing::MockFunction tracker2; + buffer.addDrainTracker(tracker1.AsStdFunction()); + buffer.addDrainTracker(tracker2.AsStdFunction()); + + testing::MockFunction done; + EXPECT_CALL(tracker1, Call()); + EXPECT_CALL(tracker2, Call()); + EXPECT_CALL(done, Call()); + auto slice = buffer.extractMutableFrontSlice(); + // The test now has ownership of the slice, but the drain trackers were + // called as part of the extract operation + done.Call(); + slice.reset(); +} + +TEST_F(OwnedImplTest, DrainTracking) { + testing::InSequence s; + + Buffer::OwnedImpl buffer; + buffer.add("a"); + + testing::MockFunction tracker1; + testing::MockFunction tracker2; + buffer.addDrainTracker(tracker1.AsStdFunction()); + buffer.addDrainTracker(tracker2.AsStdFunction()); + + testing::MockFunction done; + EXPECT_CALL(tracker1, Call()); + EXPECT_CALL(tracker2, Call()); + EXPECT_CALL(done, Call()); + buffer.drain(buffer.length()); + done.Call(); +} + +TEST_F(OwnedImplTest, MoveDrainTrackersWhenTransferingSlices) { + testing::InSequence s; + + Buffer::OwnedImpl buffer1; + buffer1.add("a"); + + testing::MockFunction tracker1; + buffer1.addDrainTracker(tracker1.AsStdFunction()); + + Buffer::OwnedImpl buffer2; + buffer2.add("b"); + + testing::MockFunction tracker2; + buffer2.addDrainTracker(tracker2.AsStdFunction()); + + buffer2.add(std::string(10000, 'c')); + testing::MockFunction tracker3; + buffer2.addDrainTracker(tracker3.AsStdFunction()); + EXPECT_EQ(2, buffer2.getRawSlices().size()); + + buffer1.move(buffer2); + EXPECT_EQ(10002, buffer1.length()); + EXPECT_EQ(0, buffer2.length()); + EXPECT_EQ(3, buffer1.getRawSlices().size()); + EXPECT_EQ(0, buffer2.getRawSlices().size()); + + testing::MockFunction done; + EXPECT_CALL(tracker1, Call()); + EXPECT_CALL(tracker2, Call()); + EXPECT_CALL(tracker3, Call()); + EXPECT_CALL(done, Call()); + buffer1.drain(buffer1.length()); + done.Call(); +} + +TEST_F(OwnedImplTest, MoveDrainTrackersWhenCopying) { + testing::InSequence s; + + Buffer::OwnedImpl buffer1; + buffer1.add("a"); + + testing::MockFunction tracker1; + buffer1.addDrainTracker(tracker1.AsStdFunction()); + + Buffer::OwnedImpl buffer2; + buffer2.add("b"); + + testing::MockFunction tracker2; + buffer2.addDrainTracker(tracker2.AsStdFunction()); + + buffer1.move(buffer2); + EXPECT_EQ(2, buffer1.length()); + EXPECT_EQ(0, buffer2.length()); + EXPECT_EQ(1, buffer1.getRawSlices().size()); + EXPECT_EQ(0, buffer2.getRawSlices().size()); + + buffer1.drain(1); + testing::MockFunction done; + EXPECT_CALL(tracker1, Call()); + EXPECT_CALL(tracker2, Call()); + EXPECT_CALL(done, Call()); + buffer1.drain(1); + done.Call(); +} + +TEST_F(OwnedImplTest, PartialMoveDrainTrackers) { + testing::InSequence s; + + Buffer::OwnedImpl buffer1; + buffer1.add("a"); + + testing::MockFunction tracker1; + buffer1.addDrainTracker(tracker1.AsStdFunction()); + + Buffer::OwnedImpl buffer2; + buffer2.add("b"); + + testing::MockFunction tracker2; + buffer2.addDrainTracker(tracker2.AsStdFunction()); + + buffer2.add(std::string(10000, 'c')); + testing::MockFunction tracker3; + buffer2.addDrainTracker(tracker3.AsStdFunction()); + EXPECT_EQ(2, buffer2.getRawSlices().size()); + + // Move the first slice and associated trackers and part of the second slice to buffer1. + buffer1.move(buffer2, 4999); + EXPECT_EQ(5000, buffer1.length()); + EXPECT_EQ(5002, buffer2.length()); + EXPECT_EQ(3, buffer1.getRawSlices().size()); + EXPECT_EQ(1, buffer2.getRawSlices().size()); + + testing::MockFunction done; + EXPECT_CALL(tracker1, Call()); + buffer1.drain(1); + + EXPECT_CALL(tracker2, Call()); + EXPECT_CALL(done, Call()); + buffer1.drain(buffer1.length()); + done.Call(); + + // tracker3 remained in buffer2. + EXPECT_CALL(tracker3, Call()); + buffer2.drain(buffer2.length()); +} + +TEST_F(OwnedImplTest, DrainTrackingOnDestruction) { + testing::InSequence s; + + auto buffer = std::make_unique(); + buffer->add("a"); + + testing::MockFunction tracker; + buffer->addDrainTracker(tracker.AsStdFunction()); + + testing::MockFunction done; + EXPECT_CALL(tracker, Call()); + EXPECT_CALL(done, Call()); + buffer.reset(); + done.Call(); +} + +TEST_F(OwnedImplTest, Linearize) { + Buffer::OwnedImpl buffer; + + // Unowned slice to track when linearize kicks in. + std::string input(1000, 'a'); + BufferFragmentImpl frag( + input.c_str(), input.size(), + [this](const void*, size_t, const BufferFragmentImpl*) { release_callback_called_ = true; }); + buffer.addBufferFragment(frag); + + // Second slice with more data. + buffer.add(std::string(1000, 'b')); + + // Linearize does not change the pointer associated with the first slice if requested size is less + // than or equal to size of the first slice. + EXPECT_EQ(input.c_str(), buffer.linearize(input.size())); + EXPECT_FALSE(release_callback_called_); + + constexpr uint64_t LinearizeSize = 2000; + void* out_ptr = buffer.linearize(LinearizeSize); + EXPECT_TRUE(release_callback_called_); + EXPECT_EQ(input + std::string(1000, 'b'), + absl::string_view(reinterpret_cast(out_ptr), LinearizeSize)); +} + +TEST_F(OwnedImplTest, LinearizeEmptyBuffer) { + Buffer::OwnedImpl buffer; + EXPECT_EQ(nullptr, buffer.linearize(0)); +} + +TEST_F(OwnedImplTest, LinearizeSingleSlice) { + auto buffer = std::make_unique(); + + // Unowned slice to track when linearize kicks in. + std::string input(1000, 'a'); + BufferFragmentImpl frag( + input.c_str(), input.size(), + [this](const void*, size_t, const BufferFragmentImpl*) { release_callback_called_ = true; }); + buffer->addBufferFragment(frag); + + EXPECT_EQ(input.c_str(), buffer->linearize(buffer->length())); + EXPECT_FALSE(release_callback_called_); + + buffer.reset(); + EXPECT_TRUE(release_callback_called_); +} + +TEST_F(OwnedImplTest, LinearizeDrainTracking) { + constexpr uint32_t SmallChunk = 200; + constexpr uint32_t LargeChunk = 16384 - SmallChunk; + constexpr uint32_t LinearizeSize = SmallChunk + LargeChunk; + + // Create a buffer with a eclectic combination of buffer OwnedSlice and UnownedSlices that will + // help us explore the properties of linearize. + Buffer::OwnedImpl buffer; + + // Large add below the target linearize size. + testing::MockFunction tracker1; + buffer.add(std::string(LargeChunk, 'a')); + buffer.addDrainTracker(tracker1.AsStdFunction()); + + // Unowned slice which causes some fragmentation. + testing::MockFunction tracker2; + testing::MockFunction + release_callback_tracker; + std::string frag_input(2 * SmallChunk, 'b'); + BufferFragmentImpl frag(frag_input.c_str(), frag_input.size(), + release_callback_tracker.AsStdFunction()); + buffer.addBufferFragment(frag); + buffer.addDrainTracker(tracker2.AsStdFunction()); + + // And an unowned slice with 0 size, because. + testing::MockFunction tracker3; + testing::MockFunction + release_callback_tracker2; + BufferFragmentImpl frag2(nullptr, 0, release_callback_tracker2.AsStdFunction()); + buffer.addBufferFragment(frag2); + buffer.addDrainTracker(tracker3.AsStdFunction()); + + // Add a very large chunk + testing::MockFunction tracker4; + buffer.add(std::string(LargeChunk + LinearizeSize, 'c')); + buffer.addDrainTracker(tracker4.AsStdFunction()); + + // Small adds that create no gaps. + testing::MockFunction tracker5; + for (int i = 0; i < 105; ++i) { + buffer.add(std::string(SmallChunk, 'd')); + } + buffer.addDrainTracker(tracker5.AsStdFunction()); + + expectSlices({{16184, 136, 16320}, + {400, 0, 400}, + {0, 0, 0}, + {32704, 0, 32704}, + {4032, 0, 4032}, + {4032, 0, 4032}, + {4032, 0, 4032}, + {4032, 0, 4032}, + {4032, 0, 4032}, + {704, 3328, 4032}}, + buffer); + + testing::InSequence s; + testing::MockFunction drain_tracker; + testing::MockFunction done_tracker; + EXPECT_CALL(tracker1, Call()); + EXPECT_CALL(drain_tracker, Call(3 * LargeChunk + 108 * SmallChunk, 16384)); + EXPECT_CALL(release_callback_tracker, Call(_, _, _)); + EXPECT_CALL(tracker2, Call()); + EXPECT_CALL(release_callback_tracker2, Call(_, _, _)); + EXPECT_CALL(tracker3, Call()); + EXPECT_CALL(drain_tracker, Call(2 * LargeChunk + 107 * SmallChunk, 16384)); + EXPECT_CALL(drain_tracker, Call(LargeChunk + 106 * SmallChunk, 16384)); + EXPECT_CALL(tracker4, Call()); + EXPECT_CALL(drain_tracker, Call(105 * SmallChunk, 16384)); + EXPECT_CALL(tracker5, Call()); + EXPECT_CALL(drain_tracker, Call(4616, 4616)); + EXPECT_CALL(done_tracker, Call()); + for (auto& expected_first_slice : std::vector>{{16384, 4032, 20416}, + {16384, 4032, 20416}, + {16520, 0, 32704}, + {16384, 4032, 20416}, + {4616, 3512, 8128}}) { + const uint32_t write_size = std::min(LinearizeSize, buffer.length()); + buffer.linearize(write_size); + expectFirstSlice(expected_first_slice, buffer); + drain_tracker.Call(buffer.length(), write_size); + buffer.drain(write_size); + } + done_tracker.Call(); + + expectSlices({}, buffer); +} + TEST_F(OwnedImplTest, ReserveCommit) { // This fragment will later be added to the buffer. It is declared in an enclosing scope to // ensure it is not destructed until after the buffer is. @@ -375,12 +837,12 @@ TEST_F(OwnedImplTest, ReserveCommit) { // Request a reservation that too big to fit in the existing slices. This should result // in the creation of a third slice. - expectSlices({{1, 4055, 4056}}, buffer); + expectSlices({{1, 4031, 4032}}, buffer); buffer.reserve(4096 - sizeof(OwnedSlice), iovecs, NumIovecs); - expectSlices({{1, 4055, 4056}, {0, 4056, 4056}}, buffer); + expectSlices({{1, 4031, 4032}, {0, 4032, 4032}}, buffer); const void* slice2 = iovecs[1].mem_; num_reserved = buffer.reserve(8192, iovecs, NumIovecs); - expectSlices({{1, 4055, 4056}, {0, 4056, 4056}, {0, 4056, 4056}}, buffer); + expectSlices({{1, 4031, 4032}, {0, 4032, 4032}, {0, 4032, 4032}}, buffer); EXPECT_EQ(3, num_reserved); EXPECT_EQ(slice1, iovecs[0].mem_); EXPECT_EQ(slice2, iovecs[1].mem_); @@ -389,11 +851,11 @@ TEST_F(OwnedImplTest, ReserveCommit) { // Append a fragment to the buffer, and then request a small reservation. The buffer // should make a new slice to satisfy the reservation; it cannot safely use any of // the previously seen slices, because they are no longer at the end of the buffer. - expectSlices({{1, 4055, 4056}}, buffer); + expectSlices({{1, 4031, 4032}}, buffer); buffer.addBufferFragment(fragment); EXPECT_EQ(13, buffer.length()); num_reserved = buffer.reserve(1, iovecs, NumIovecs); - expectSlices({{1, 4055, 4056}, {12, 0, 12}, {0, 4056, 4056}}, buffer); + expectSlices({{1, 4031, 4032}, {12, 0, 12}, {0, 4032, 4032}}, buffer); EXPECT_EQ(1, num_reserved); EXPECT_NE(slice1, iovecs[0].mem_); commitReservation(iovecs, num_reserved, buffer); @@ -424,16 +886,16 @@ TEST_F(OwnedImplTest, ReserveCommitReuse) { EXPECT_EQ(2, num_reserved); const void* first_slice = iovecs[0].mem_; iovecs[0].len_ = 1; - expectSlices({{8000, 4248, 12248}, {0, 12248, 12248}}, buffer); + expectSlices({{8000, 4224, 12224}, {0, 12224, 12224}}, buffer); buffer.commit(iovecs, 1); EXPECT_EQ(8001, buffer.length()); EXPECT_EQ(first_slice, iovecs[0].mem_); // The second slice is now released because there's nothing in the second slice. - expectSlices({{8001, 4247, 12248}}, buffer); + expectSlices({{8001, 4223, 12224}}, buffer); // Reserve 16KB again. num_reserved = buffer.reserve(16384, iovecs, NumIovecs); - expectSlices({{8001, 4247, 12248}, {0, 12248, 12248}}, buffer); + expectSlices({{8001, 4223, 12224}, {0, 12224, 12224}}, buffer); EXPECT_EQ(2, num_reserved); EXPECT_EQ(static_cast(first_slice) + 1, static_cast(iovecs[0].mem_)); @@ -460,7 +922,7 @@ TEST_F(OwnedImplTest, ReserveReuse) { EXPECT_EQ(2, num_reserved); EXPECT_EQ(first_slice, iovecs[0].mem_); EXPECT_EQ(second_slice, iovecs[1].mem_); - expectSlices({{0, 12248, 12248}, {0, 8152, 8152}}, buffer); + expectSlices({{0, 12224, 12224}, {0, 8128, 8128}}, buffer); // Request a larger reservation, verify that the second entry is replaced with a block with a // larger size. @@ -468,51 +930,51 @@ TEST_F(OwnedImplTest, ReserveReuse) { const void* third_slice = iovecs[1].mem_; EXPECT_EQ(2, num_reserved); EXPECT_EQ(first_slice, iovecs[0].mem_); - EXPECT_EQ(12248, iovecs[0].len_); + EXPECT_EQ(12224, iovecs[0].len_); EXPECT_NE(second_slice, iovecs[1].mem_); EXPECT_EQ(30000 - iovecs[0].len_, iovecs[1].len_); - expectSlices({{0, 12248, 12248}, {0, 8152, 8152}, {0, 20440, 20440}}, buffer); + expectSlices({{0, 12224, 12224}, {0, 8128, 8128}, {0, 20416, 20416}}, buffer); // Repeating a the reservation request for a smaller block returns the previous entry. num_reserved = buffer.reserve(16384, iovecs, NumIovecs); EXPECT_EQ(2, num_reserved); EXPECT_EQ(first_slice, iovecs[0].mem_); EXPECT_EQ(second_slice, iovecs[1].mem_); - expectSlices({{0, 12248, 12248}, {0, 8152, 8152}, {0, 20440, 20440}}, buffer); + expectSlices({{0, 12224, 12224}, {0, 8128, 8128}, {0, 20416, 20416}}, buffer); // Repeat the larger reservation notice that it doesn't match the prior reservation for 30000 // bytes. num_reserved = buffer.reserve(30000, iovecs, NumIovecs); EXPECT_EQ(2, num_reserved); EXPECT_EQ(first_slice, iovecs[0].mem_); - EXPECT_EQ(12248, iovecs[0].len_); + EXPECT_EQ(12224, iovecs[0].len_); EXPECT_NE(second_slice, iovecs[1].mem_); EXPECT_NE(third_slice, iovecs[1].mem_); EXPECT_EQ(30000 - iovecs[0].len_, iovecs[1].len_); - expectSlices({{0, 12248, 12248}, {0, 8152, 8152}, {0, 20440, 20440}, {0, 20440, 20440}}, buffer); + expectSlices({{0, 12224, 12224}, {0, 8128, 8128}, {0, 20416, 20416}, {0, 20416, 20416}}, buffer); // Commit the most recent reservation and verify the representation. buffer.commit(iovecs, num_reserved); - expectSlices({{12248, 0, 12248}, {0, 8152, 8152}, {0, 20440, 20440}, {17752, 2688, 20440}}, + expectSlices({{12224, 0, 12224}, {0, 8128, 8128}, {0, 20416, 20416}, {17776, 2640, 20416}}, buffer); // Do another reservation. num_reserved = buffer.reserve(16384, iovecs, NumIovecs); EXPECT_EQ(2, num_reserved); - expectSlices({{12248, 0, 12248}, - {0, 8152, 8152}, - {0, 20440, 20440}, - {17752, 2688, 20440}, - {0, 16344, 16344}}, + expectSlices({{12224, 0, 12224}, + {0, 8128, 8128}, + {0, 20416, 20416}, + {17776, 2640, 20416}, + {0, 16320, 16320}}, buffer); // And commit. buffer.commit(iovecs, num_reserved); - expectSlices({{12248, 0, 12248}, - {0, 8152, 8152}, - {0, 20440, 20440}, - {20440, 0, 20440}, - {13696, 2648, 16344}}, + expectSlices({{12224, 0, 12224}, + {0, 8128, 8128}, + {0, 20416, 20416}, + {20416, 0, 20416}, + {13744, 2576, 16320}}, buffer); } @@ -526,21 +988,56 @@ TEST_F(OwnedImplTest, Search) { } EXPECT_STREQ("abaaaabaaaaaba", buffer.toString().c_str()); - EXPECT_EQ(-1, buffer.search("c", 1, 0)); - EXPECT_EQ(0, buffer.search("", 0, 0)); - EXPECT_EQ(buffer.length(), buffer.search("", 0, buffer.length())); - EXPECT_EQ(-1, buffer.search("", 0, buffer.length() + 1)); - EXPECT_EQ(0, buffer.search("a", 1, 0)); - EXPECT_EQ(1, buffer.search("b", 1, 1)); - EXPECT_EQ(2, buffer.search("a", 1, 1)); - EXPECT_EQ(0, buffer.search("abaa", 4, 0)); - EXPECT_EQ(2, buffer.search("aaaa", 4, 0)); - EXPECT_EQ(2, buffer.search("aaaa", 4, 1)); - EXPECT_EQ(2, buffer.search("aaaa", 4, 2)); - EXPECT_EQ(7, buffer.search("aaaaab", 6, 0)); - EXPECT_EQ(0, buffer.search("abaaaabaaaaaba", 14, 0)); - EXPECT_EQ(12, buffer.search("ba", 2, 10)); - EXPECT_EQ(-1, buffer.search("abaaaabaaaaabaa", 15, 0)); + EXPECT_EQ(-1, buffer.search("c", 1, 0, 0)); + EXPECT_EQ(0, buffer.search("", 0, 0, 0)); + EXPECT_EQ(buffer.length(), buffer.search("", 0, buffer.length(), 0)); + EXPECT_EQ(-1, buffer.search("", 0, buffer.length() + 1, 0)); + EXPECT_EQ(0, buffer.search("a", 1, 0, 0)); + EXPECT_EQ(1, buffer.search("b", 1, 1, 0)); + EXPECT_EQ(2, buffer.search("a", 1, 1, 0)); + EXPECT_EQ(0, buffer.search("abaa", 4, 0, 0)); + EXPECT_EQ(2, buffer.search("aaaa", 4, 0, 0)); + EXPECT_EQ(2, buffer.search("aaaa", 4, 1, 0)); + EXPECT_EQ(2, buffer.search("aaaa", 4, 2, 0)); + EXPECT_EQ(7, buffer.search("aaaaab", 6, 0, 0)); + EXPECT_EQ(0, buffer.search("abaaaabaaaaaba", 14, 0, 0)); + EXPECT_EQ(12, buffer.search("ba", 2, 10, 0)); + EXPECT_EQ(-1, buffer.search("abaaaabaaaaabaa", 15, 0, 0)); +} + +TEST_F(OwnedImplTest, SearchWithLengthLimit) { + // Populate a buffer with a string split across many small slices, to + // exercise edge cases in the search implementation. + static const char* Inputs[] = {"ab", "a", "", "aaa", "b", "a", "aaa", "ab", "a"}; + Buffer::OwnedImpl buffer; + for (const auto& input : Inputs) { + buffer.appendSliceForTest(input); + } + EXPECT_STREQ("abaaaabaaaaaba", buffer.toString().c_str()); + + // The string is there, but the search is limited to 1 byte. + EXPECT_EQ(-1, buffer.search("b", 1, 0, 1)); + // The string is there, but the search is limited to 1 byte. + EXPECT_EQ(-1, buffer.search("ab", 2, 0, 1)); + // The string is there, but spans over 2 slices. The search length is enough + // to find it. + EXPECT_EQ(1, buffer.search("ba", 2, 0, 3)); + EXPECT_EQ(1, buffer.search("ba", 2, 0, 5)); + EXPECT_EQ(1, buffer.search("ba", 2, 1, 2)); + EXPECT_EQ(1, buffer.search("ba", 2, 1, 5)); + // The string spans over 3 slices. test different variations of search length + // and starting position. + EXPECT_EQ(2, buffer.search("aaaab", 5, 2, 5)); + EXPECT_EQ(-1, buffer.search("aaaab", 5, 2, 3)); + EXPECT_EQ(2, buffer.search("aaaab", 5, 2, 6)); + EXPECT_EQ(2, buffer.search("aaaab", 5, 0, 8)); + EXPECT_EQ(-1, buffer.search("aaaab", 5, 0, 6)); + // Test searching for the string which in in the last slice. + EXPECT_EQ(12, buffer.search("ba", 2, 12, 2)); + EXPECT_EQ(12, buffer.search("ba", 2, 11, 3)); + EXPECT_EQ(-1, buffer.search("ba", 2, 11, 2)); + // Test cases when length to search is larger than buffer + EXPECT_EQ(12, buffer.search("ba", 2, 11, 10e6)); } TEST_F(OwnedImplTest, StartsWith) { @@ -667,9 +1164,9 @@ TEST_F(OwnedImplTest, ReserveZeroCommit) { Api::IoCallUint64Result result = buf.read(io_handle, max_length); ASSERT_EQ(result.rc_, static_cast(rc)); ASSERT_EQ(os_sys_calls.close(pipe_fds[1]).rc_, 0); - ASSERT_EQ(previous_length, buf.search(data.data(), rc, previous_length)); + ASSERT_EQ(previous_length, buf.search(data.data(), rc, previous_length, 0)); EXPECT_EQ("bbbbb", buf.toString().substr(0, 5)); - expectSlices({{5, 0, 4056}, {1953, 2103, 4056}}, buf); + expectSlices({{5, 0, 4032}, {1953, 2079, 4032}}, buf); } TEST_F(OwnedImplTest, ReadReserveAndCommit) { @@ -696,11 +1193,10 @@ TEST_F(OwnedImplTest, ReadReserveAndCommit) { ASSERT_EQ(result.rc_, static_cast(rc)); ASSERT_EQ(os_sys_calls.close(pipe_fds[1]).rc_, 0); EXPECT_EQ("bbbbbe", buf.toString()); - expectSlices({{6, 4050, 4056}}, buf); + expectSlices({{6, 4026, 4032}}, buf); } TEST(OverflowDetectingUInt64, Arithmetic) { - Logger::StderrSinkDelegate stderr_sink(Logger::Registry::getSink()); // For coverage build. OverflowDetectingUInt64 length; length += 1; length -= 1; diff --git a/test/common/buffer/watermark_buffer_test.cc b/test/common/buffer/watermark_buffer_test.cc index f5c13fa7e1779..3e7cf0b57eeda 100644 --- a/test/common/buffer/watermark_buffer_test.cc +++ b/test/common/buffer/watermark_buffer_test.cc @@ -6,6 +6,7 @@ #include "common/network/io_socket_handle_impl.h" #include "test/common/buffer/utility.h" +#include "test/test_common/test_runtime.h" #include "gtest/gtest.h" @@ -20,9 +21,11 @@ class WatermarkBufferTest : public testing::Test { WatermarkBufferTest() { buffer_.setWatermarks(5, 10); } Buffer::WatermarkBuffer buffer_{[&]() -> void { ++times_low_watermark_called_; }, - [&]() -> void { ++times_high_watermark_called_; }}; + [&]() -> void { ++times_high_watermark_called_; }, + [&]() -> void { ++times_overflow_watermark_called_; }}; uint32_t times_low_watermark_called_{0}; uint32_t times_high_watermark_called_{0}; + uint32_t times_overflow_watermark_called_{0}; }; TEST_F(WatermarkBufferTest, TestWatermark) { ASSERT_EQ(10, buffer_.highWatermark()); } @@ -97,8 +100,10 @@ TEST_F(WatermarkBufferTest, PrependBuffer) { uint32_t prefix_buffer_low_watermark_hits{0}; uint32_t prefix_buffer_high_watermark_hits{0}; + uint32_t prefix_buffer_overflow_watermark_hits{0}; WatermarkBuffer prefixBuffer{[&]() -> void { ++prefix_buffer_low_watermark_hits; }, - [&]() -> void { ++prefix_buffer_high_watermark_hits; }}; + [&]() -> void { ++prefix_buffer_high_watermark_hits; }, + [&]() -> void { ++prefix_buffer_overflow_watermark_hits; }}; prefixBuffer.setWatermarks(5, 10); prefixBuffer.add(prefix); prefixBuffer.add(suffix); @@ -137,6 +142,7 @@ TEST_F(WatermarkBufferTest, Drain) { buffer_.add(TEN_BYTES, 11); buffer_.drain(5); EXPECT_EQ(6, buffer_.length()); + EXPECT_EQ(1, times_high_watermark_called_); EXPECT_EQ(0, times_low_watermark_called_); // Now drain below. @@ -148,6 +154,38 @@ TEST_F(WatermarkBufferTest, Drain) { EXPECT_EQ(2, times_high_watermark_called_); } +TEST_F(WatermarkBufferTest, DrainUsingExtract) { + // Similar to `Drain` test, but using extractMutableFrontSlice() instead of drain(). + buffer_.add(TEN_BYTES, 10); + ASSERT_EQ(buffer_.length(), 10); + buffer_.extractMutableFrontSlice(); + EXPECT_EQ(0, times_high_watermark_called_); + EXPECT_EQ(0, times_low_watermark_called_); + + // Go above the high watermark then drain down to just at the low watermark. + buffer_.appendSliceForTest(TEN_BYTES, 5); + buffer_.appendSliceForTest(TEN_BYTES, 1); + buffer_.appendSliceForTest(TEN_BYTES, 5); + EXPECT_EQ(1, times_high_watermark_called_); + EXPECT_EQ(0, times_low_watermark_called_); + auto slice0 = buffer_.extractMutableFrontSlice(); // essentially drain(5) + ASSERT_TRUE(slice0); + EXPECT_EQ(slice0->getMutableData().size(), 5); + EXPECT_EQ(6, buffer_.length()); + EXPECT_EQ(0, times_low_watermark_called_); + + // Now drain below. + auto slice1 = buffer_.extractMutableFrontSlice(); // essentially drain(1) + ASSERT_TRUE(slice1); + EXPECT_EQ(slice1->getMutableData().size(), 1); + EXPECT_EQ(1, times_high_watermark_called_); + EXPECT_EQ(1, times_low_watermark_called_); + + // Going back above should trigger the high again. + buffer_.add(TEN_BYTES, 10); + EXPECT_EQ(2, times_high_watermark_called_); +} + // Verify that low watermark callback is called on drain in the case where the // high watermark is non-zero and low watermark is 0. TEST_F(WatermarkBufferTest, DrainWithLowWatermarkOfZero) { @@ -252,22 +290,27 @@ TEST_F(WatermarkBufferTest, MoveWatermarks) { EXPECT_EQ(1, times_low_watermark_called_); buffer_.setWatermarks(9, 20); EXPECT_EQ(1, times_low_watermark_called_); + EXPECT_EQ(0, times_overflow_watermark_called_); EXPECT_EQ(1, times_high_watermark_called_); buffer_.setWatermarks(2); EXPECT_EQ(2, times_high_watermark_called_); EXPECT_EQ(1, times_low_watermark_called_); + EXPECT_EQ(0, times_overflow_watermark_called_); buffer_.setWatermarks(0); EXPECT_EQ(2, times_high_watermark_called_); EXPECT_EQ(2, times_low_watermark_called_); + EXPECT_EQ(0, times_overflow_watermark_called_); buffer_.setWatermarks(1); EXPECT_EQ(3, times_high_watermark_called_); EXPECT_EQ(2, times_low_watermark_called_); + EXPECT_EQ(0, times_overflow_watermark_called_); // Fully drain the buffer. buffer_.drain(9); EXPECT_EQ(3, times_low_watermark_called_); EXPECT_EQ(0, buffer_.length()); + EXPECT_EQ(0, times_overflow_watermark_called_); } TEST_F(WatermarkBufferTest, GetRawSlices) { @@ -285,9 +328,9 @@ TEST_F(WatermarkBufferTest, GetRawSlices) { TEST_F(WatermarkBufferTest, Search) { buffer_.add(TEN_BYTES, 10); - EXPECT_EQ(1, buffer_.search(&TEN_BYTES[1], 2, 0)); + EXPECT_EQ(1, buffer_.search(&TEN_BYTES[1], 2, 0, 0)); - EXPECT_EQ(-1, buffer_.search(&TEN_BYTES[1], 2, 5)); + EXPECT_EQ(-1, buffer_.search(&TEN_BYTES[1], 2, 5, 0)); } TEST_F(WatermarkBufferTest, StartsWith) { @@ -301,8 +344,10 @@ TEST_F(WatermarkBufferTest, StartsWith) { TEST_F(WatermarkBufferTest, MoveBackWithWatermarks) { int high_watermark_buffer1 = 0; int low_watermark_buffer1 = 0; + int overflow_watermark_buffer1 = 0; Buffer::WatermarkBuffer buffer1{[&]() -> void { ++low_watermark_buffer1; }, - [&]() -> void { ++high_watermark_buffer1; }}; + [&]() -> void { ++high_watermark_buffer1; }, + [&]() -> void { ++overflow_watermark_buffer1; }}; buffer1.setWatermarks(5, 10); // Stick 20 bytes in buffer_ and expect the high watermark is hit. @@ -314,16 +359,198 @@ TEST_F(WatermarkBufferTest, MoveBackWithWatermarks) { buffer1.move(buffer_, 10); EXPECT_EQ(0, times_low_watermark_called_); EXPECT_EQ(0, high_watermark_buffer1); + EXPECT_EQ(0, overflow_watermark_buffer1); // Move 10 more bytes to the new buffer. Both buffers should hit watermark callbacks. buffer1.move(buffer_, 10); EXPECT_EQ(1, times_low_watermark_called_); EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(0, times_overflow_watermark_called_); + EXPECT_EQ(0, overflow_watermark_buffer1); // Now move all the data back to the original buffer. Watermarks should trigger immediately. buffer_.move(buffer1); EXPECT_EQ(2, times_high_watermark_called_); EXPECT_EQ(1, low_watermark_buffer1); + EXPECT_EQ(0, times_overflow_watermark_called_); + EXPECT_EQ(0, overflow_watermark_buffer1); +} + +TEST_F(WatermarkBufferTest, OverflowWatermark) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues({{"envoy.buffer.overflow_multiplier", "2"}}); + + int high_watermark_buffer1 = 0; + int low_watermark_buffer1 = 0; + int overflow_watermark_buffer1 = 0; + Buffer::WatermarkBuffer buffer1{[&]() -> void { ++low_watermark_buffer1; }, + [&]() -> void { ++high_watermark_buffer1; }, + [&]() -> void { ++overflow_watermark_buffer1; }}; + buffer1.setWatermarks(5, 10); + + buffer1.add(TEN_BYTES, 10); + EXPECT_EQ(0, high_watermark_buffer1); + EXPECT_EQ(0, overflow_watermark_buffer1); + buffer1.add("a", 1); + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(0, overflow_watermark_buffer1); + buffer1.add(TEN_BYTES, 9); + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(0, overflow_watermark_buffer1); + buffer1.add("a", 1); + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(1, overflow_watermark_buffer1); + EXPECT_EQ(21, buffer1.length()); + buffer1.add("a", 1); + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(1, overflow_watermark_buffer1); + EXPECT_EQ(22, buffer1.length()); + + // Overflow is only triggered once + buffer1.drain(18); + EXPECT_EQ(4, buffer1.length()); + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(1, low_watermark_buffer1); + EXPECT_EQ(1, overflow_watermark_buffer1); + buffer1.add(TEN_BYTES, 10); + EXPECT_EQ(2, high_watermark_buffer1); + EXPECT_EQ(1, overflow_watermark_buffer1); + EXPECT_EQ(14, buffer1.length()); + buffer1.add(TEN_BYTES, 6); + EXPECT_EQ(2, high_watermark_buffer1); + EXPECT_EQ(1, overflow_watermark_buffer1); + EXPECT_EQ(20, buffer1.length()); +} + +TEST_F(WatermarkBufferTest, OverflowWatermarkDisabled) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues({{"envoy.buffer.overflow_multiplier", "0"}}); + + int high_watermark_buffer1 = 0; + int low_watermark_buffer1 = 0; + int overflow_watermark_buffer1 = 0; + Buffer::WatermarkBuffer buffer1{[&]() -> void { ++low_watermark_buffer1; }, + [&]() -> void { ++high_watermark_buffer1; }, + [&]() -> void { ++overflow_watermark_buffer1; }}; + buffer1.setWatermarks(5, 10); + + buffer1.add(TEN_BYTES, 10); + EXPECT_EQ(0, high_watermark_buffer1); + EXPECT_EQ(0, overflow_watermark_buffer1); + buffer1.add("a", 1); + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(0, overflow_watermark_buffer1); + buffer1.add(TEN_BYTES, 10); + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(0, overflow_watermark_buffer1); + EXPECT_EQ(21, buffer1.length()); +} + +TEST_F(WatermarkBufferTest, OverflowWatermarkDisabledOnVeryHighValue) { +// Disabling execution with TSAN as it causes the test to use too much memory +// and time, making the test fail in some settings (such as CI) +#if defined(__has_feature) && __has_feature(thread_sanitizer) + ENVOY_LOG_MISC(critical, "WatermarkBufferTest::OverflowWatermarkDisabledOnVeryHighValue not " + "supported by this compiler configuration"); +#else + // Verifies that the overflow watermark is disabled when its value is higher + // than uint32_t max value + TestScopedRuntime scoped_runtime; + + int high_watermark_buffer1 = 0; + int overflow_watermark_buffer1 = 0; + Buffer::WatermarkBuffer buffer1{[&]() -> void {}, [&]() -> void { ++high_watermark_buffer1; }, + [&]() -> void { ++overflow_watermark_buffer1; }}; + + // Make sure the overflow threshold will be above std::numeric_limits::max() + Runtime::LoaderSingleton::getExisting()->mergeValues({{"envoy.buffer.overflow_multiplier", "3"}}); + buffer1.setWatermarks((std::numeric_limits::max() / 3) + 1); + + // Add many segments instead of full uint32_t::max to get around std::bad_alloc exception + const uint32_t segment_denominator = 128; + const uint32_t big_segment_len = std::numeric_limits::max() / segment_denominator + 1; + const std::string big_segment_str = std::string(big_segment_len, 'a'); + for (uint32_t i = 0; i < segment_denominator; ++i) { + buffer1.add(big_segment_str.data(), big_segment_len); + } + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(0, overflow_watermark_buffer1); + buffer1.add(TEN_BYTES, 10); + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(0, overflow_watermark_buffer1); + EXPECT_EQ(static_cast(segment_denominator) * big_segment_len + 10, buffer1.length()); + EXPECT_GT(buffer1.length(), std::numeric_limits::max()); +#endif +} + +TEST_F(WatermarkBufferTest, OverflowWatermarkEqualHighWatermark) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues({{"envoy.buffer.overflow_multiplier", "1"}}); + + int high_watermark_buffer1 = 0; + int low_watermark_buffer1 = 0; + int overflow_watermark_buffer1 = 0; + Buffer::WatermarkBuffer buffer1{[&]() -> void { ++low_watermark_buffer1; }, + [&]() -> void { ++high_watermark_buffer1; }, + [&]() -> void { ++overflow_watermark_buffer1; }}; + buffer1.setWatermarks(5, 10); + + buffer1.add(TEN_BYTES, 10); + EXPECT_EQ(0, high_watermark_buffer1); + EXPECT_EQ(0, overflow_watermark_buffer1); + buffer1.add("a", 1); + EXPECT_EQ(0, low_watermark_buffer1); + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(1, overflow_watermark_buffer1); + + buffer1.drain(6); + EXPECT_EQ(1, low_watermark_buffer1); + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(1, overflow_watermark_buffer1); + buffer1.add(TEN_BYTES, 10); + EXPECT_EQ(15, buffer1.length()); + EXPECT_EQ(2, high_watermark_buffer1); + EXPECT_EQ(1, overflow_watermark_buffer1); +} + +TEST_F(WatermarkBufferTest, MoveWatermarksOverflow) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues({{"envoy.buffer.overflow_multiplier", "2"}}); + + int high_watermark_buffer1 = 0; + int low_watermark_buffer1 = 0; + int overflow_watermark_buffer1 = 0; + Buffer::WatermarkBuffer buffer1{[&]() -> void { ++low_watermark_buffer1; }, + [&]() -> void { ++high_watermark_buffer1; }, + [&]() -> void { ++overflow_watermark_buffer1; }}; + buffer1.setWatermarks(5, 10); + buffer1.add(TEN_BYTES, 9); + EXPECT_EQ(0, high_watermark_buffer1); + EXPECT_EQ(0, overflow_watermark_buffer1); + buffer1.setWatermarks(1, 9); + EXPECT_EQ(0, high_watermark_buffer1); + EXPECT_EQ(0, overflow_watermark_buffer1); + buffer1.setWatermarks(1, 8); + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(0, overflow_watermark_buffer1); + buffer1.setWatermarks(1, 5); + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(0, overflow_watermark_buffer1); + buffer1.setWatermarks(1, 4); + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(1, overflow_watermark_buffer1); + + // Overflow is only triggered once + buffer1.setWatermarks(3, 6); + EXPECT_EQ(0, low_watermark_buffer1); + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(1, overflow_watermark_buffer1); + buffer1.drain(7); + buffer1.add(TEN_BYTES, 9); + EXPECT_EQ(11, buffer1.length()); + EXPECT_EQ(1, low_watermark_buffer1); + EXPECT_EQ(2, high_watermark_buffer1); + EXPECT_EQ(1, overflow_watermark_buffer1); } } // namespace diff --git a/test/common/buffer/zero_copy_input_stream_test.cc b/test/common/buffer/zero_copy_input_stream_test.cc index 8a35002d91bdd..9ff0ffd6683ef 100644 --- a/test/common/buffer/zero_copy_input_stream_test.cc +++ b/test/common/buffer/zero_copy_input_stream_test.cc @@ -90,6 +90,152 @@ TEST_F(ZeroCopyInputStreamTest, Finish) { EXPECT_FALSE(stream_.Next(&data_, &size_)); } +class ZeroCopyInputStreamSkipTest : public testing::Test { +public: + ZeroCopyInputStreamSkipTest() { + Buffer::OwnedImpl buffer; + buffer.addBufferFragment(buffer1_); + buffer.addBufferFragment(buffer2_); + buffer.addBufferFragment(buffer3_); + buffer.addBufferFragment(buffer4_); + + stream_.move(buffer); + } + + const std::string slice1_{"This is the first slice of the message."}; + const std::string slice2_{"This is the second slice of the message."}; + const std::string slice3_{"This is the third slice of the message."}; + const std::string slice4_{"This is the fourth slice of the message."}; + BufferFragmentImpl buffer1_{slice1_.data(), slice1_.size(), nullptr}; + BufferFragmentImpl buffer2_{slice2_.data(), slice2_.size(), nullptr}; + BufferFragmentImpl buffer3_{slice3_.data(), slice3_.size(), nullptr}; + BufferFragmentImpl buffer4_{slice4_.data(), slice4_.size(), nullptr}; + + const size_t total_bytes_{slice1_.size() + slice2_.size() + slice3_.size() + slice4_.size()}; + ZeroCopyInputStreamImpl stream_; + + const void* data_; + int size_; + + // Convert data_ buffer into a string + absl::string_view dataString() const { + return absl::string_view{reinterpret_cast(data_), static_cast(size_)}; + } +}; + +TEST_F(ZeroCopyInputStreamSkipTest, SkipFirstPartialSlice) { + // Only skip the 10 bytes in the first slice. + constexpr int skip_count = 10; + EXPECT_TRUE(stream_.Skip(skip_count)); + + EXPECT_EQ(skip_count, stream_.ByteCount()); + + // Read the first slice + EXPECT_TRUE(stream_.Next(&data_, &size_)); + EXPECT_EQ(slice1_.size() - skip_count, size_); + EXPECT_EQ(slice1_.substr(skip_count), dataString()); + EXPECT_EQ(slice1_.size(), stream_.ByteCount()); +} + +TEST_F(ZeroCopyInputStreamSkipTest, SkipFirstFullSlice) { + // Skip the full first slice + EXPECT_TRUE(stream_.Skip(slice1_.size())); + + EXPECT_EQ(slice1_.size(), stream_.ByteCount()); + + // Read the second slice + EXPECT_TRUE(stream_.Next(&data_, &size_)); + EXPECT_EQ(slice2_.size(), size_); + EXPECT_EQ(slice2_, dataString()); + EXPECT_EQ(slice1_.size() + slice2_.size(), stream_.ByteCount()); +} + +TEST_F(ZeroCopyInputStreamSkipTest, BackUpAndSkipToEndOfSlice) { + // Read the first slice, backUp 10 byes, skip 10 bytes to the end of the first slice. + EXPECT_TRUE(stream_.Next(&data_, &size_)); + EXPECT_EQ(slice1_.size(), size_); + EXPECT_EQ(slice1_, dataString()); + + constexpr int backup_count = 10; + stream_.BackUp(backup_count); + EXPECT_TRUE(stream_.Skip(backup_count)); + + EXPECT_EQ(slice1_.size(), stream_.ByteCount()); + + // Next read is the second slice + EXPECT_TRUE(stream_.Next(&data_, &size_)); + EXPECT_EQ(slice2_.size(), size_); + EXPECT_EQ(slice2_, dataString()); + EXPECT_EQ(slice1_.size() + slice2_.size(), stream_.ByteCount()); +} + +TEST_F(ZeroCopyInputStreamSkipTest, SkipAcrossTwoSlices) { + // Read the first slice, backUp 10 byes, skip 15 bytes; 5 bytes into the second slice. + EXPECT_TRUE(stream_.Next(&data_, &size_)); + EXPECT_EQ(slice1_.size(), size_); + EXPECT_EQ(slice1_, dataString()); + + constexpr int backup_count = 10; // the backup bytes to the end of first slice. + constexpr int skip_count = 5; // The skip bytes in the second slice + stream_.BackUp(backup_count); + EXPECT_TRUE(stream_.Skip(backup_count + skip_count)); + + EXPECT_EQ(slice1_.size() + skip_count, stream_.ByteCount()); + + // Read the remain second slice + EXPECT_TRUE(stream_.Next(&data_, &size_)); + EXPECT_EQ(slice2_.size() - skip_count, size_); + EXPECT_EQ(slice2_.substr(skip_count), dataString()); + EXPECT_EQ(slice1_.size() + slice2_.size(), stream_.ByteCount()); +} + +TEST_F(ZeroCopyInputStreamSkipTest, SkipAcrossThreeSlices) { + // Read the first slice, backUp 10 byes, skip 10 + slice2.size + 5; 5 bytes into the third slice. + EXPECT_TRUE(stream_.Next(&data_, &size_)); + EXPECT_EQ(slice1_.size(), size_); + EXPECT_EQ(slice1_, dataString()); + + constexpr int backup_count = 10; // the backup bytes to the end of first slice. + constexpr int skip_count = 5; // The skip bytes in the third slice + stream_.BackUp(backup_count); + EXPECT_TRUE(stream_.Skip(backup_count + slice2_.size() + skip_count)); + + EXPECT_EQ(slice1_.size() + slice2_.size() + skip_count, stream_.ByteCount()); + + // Read the remain third slice + EXPECT_TRUE(stream_.Next(&data_, &size_)); + EXPECT_EQ(slice3_.size() - skip_count, size_); + EXPECT_EQ(slice3_.substr(skip_count), dataString()); + EXPECT_EQ(slice1_.size() + slice2_.size() + slice3_.size(), stream_.ByteCount()); +} + +TEST_F(ZeroCopyInputStreamSkipTest, SkipToEndOfBuffer) { + // Failed to skip one extra byte + EXPECT_FALSE(stream_.Skip(total_bytes_ + 1)); + + EXPECT_TRUE(stream_.Skip(total_bytes_)); + EXPECT_EQ(total_bytes_, stream_.ByteCount()); + + // Failed to skip one extra byte + EXPECT_FALSE(stream_.Skip(1)); +} + +TEST_F(ZeroCopyInputStreamSkipTest, ReadFirstSkipToTheEnd) { + // Read the first slice, backUp 10 byes, skip to the end of buffer + EXPECT_TRUE(stream_.Next(&data_, &size_)); + EXPECT_EQ(slice1_.size(), size_); + EXPECT_EQ(slice1_, dataString()); + + constexpr int backup_count = 10; // the backup bytes to the end of first slice. + stream_.BackUp(backup_count); + + EXPECT_TRUE(stream_.Skip(total_bytes_ - slice1_.size() + backup_count)); + EXPECT_EQ(total_bytes_, stream_.ByteCount()); + + // Failed to skip one extra byte + EXPECT_FALSE(stream_.Skip(1)); +} + } // namespace } // namespace Buffer } // namespace Envoy diff --git a/test/common/common/BUILD b/test/common/common/BUILD index 288afab64682a..21ea6fe21df95 100644 --- a/test/common/common/BUILD +++ b/test/common/common/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_benchmark_test", @@ -9,6 +7,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( @@ -102,10 +102,19 @@ envoy_cc_test( deps = ["//source/common/common:hex_lib"], ) +envoy_cc_test( + name = "linked_object_test", + srcs = ["linked_object_test.cc"], + deps = [ + "//source/common/common:linked_object", + ], +) + envoy_cc_test( name = "log_macros_test", srcs = ["log_macros_test.cc"], deps = [ + "//source/common/common:fancy_logger_lib", "//source/common/common:minimal_logger_lib", "//test/mocks/http:http_mocks", "//test/mocks/network:network_mocks", @@ -114,6 +123,21 @@ envoy_cc_test( ], ) +envoy_cc_benchmark_binary( + name = "logger_speed_test", + srcs = ["logger_speed_test.cc"], + external_deps = ["benchmark"], + deps = [ + "//source/common/common:fancy_logger_lib", + "//source/common/common:minimal_logger_lib", + ], +) + +envoy_benchmark_test( + name = "logger_speed_test_benchmark_test", + benchmark_binary = "logger_speed_test", +) + envoy_cc_test( name = "logger_test", srcs = ["logger_test.cc"], @@ -154,6 +178,16 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "random_generator_test", + srcs = ["random_generator_test.cc"], + deps = [ + "//source/common/common:random_generator_lib", + "//test/mocks/runtime:runtime_mocks", + "//test/test_common:environment_lib", + ], +) + envoy_cc_test( name = "utility_test", srcs = ["utility_test.cc"], @@ -173,6 +207,8 @@ envoy_cc_test( srcs = ["regex_test.cc"], deps = [ "//source/common/common:regex_lib", + "//test/test_common:logging_lib", + "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/type/matcher/v3:pkg_cc_proto", ], @@ -194,6 +230,15 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "basic_resource_impl_test", + srcs = ["basic_resource_impl_test.cc"], + deps = [ + "//source/common/common:basic_resource_lib", + "//test/mocks/runtime:runtime_mocks", + ], +) + envoy_cc_test( name = "token_bucket_impl_test", srcs = ["token_bucket_impl_test.cc"], @@ -247,6 +292,16 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "thread_test", + srcs = ["thread_test.cc"], + deps = [ + "//source/common/common:thread_lib", + "//source/common/common:thread_synchronizer_lib", + "//test/test_common:thread_factory_for_test_lib", + ], +) + envoy_cc_test( name = "stl_helpers_test", srcs = ["stl_helpers_test.cc"], @@ -262,7 +317,7 @@ envoy_cc_test( "abseil_strings", ], deps = [ - "//source/common/common:version_lib", + "//source/common/version:version_lib", ], ) diff --git a/test/common/common/assert_test.cc b/test/common/common/assert_test.cc index 44d65b4957666..880aa0f4f6022 100644 --- a/test/common/common/assert_test.cc +++ b/test/common/common/assert_test.cc @@ -7,7 +7,6 @@ namespace Envoy { TEST(ReleaseAssertDeathTest, VariousLogs) { - Logger::StderrSinkDelegate stderr_sink(Logger::Registry::getSink()); // For coverage build. EXPECT_DEATH({ RELEASE_ASSERT(0, ""); }, ".*assert failure: 0.*"); EXPECT_DEATH({ RELEASE_ASSERT(0, "With some logs"); }, ".*assert failure: 0. Details: With some logs.*"); @@ -42,4 +41,32 @@ TEST(AssertDeathTest, VariousLogs) { EXPECT_EQ(expected_counted_failures, assert_fail_count); } +TEST(EnvoyBugDeathTest, VariousLogs) { + int envoy_bug_fail_count = 0; + // ENVOY_BUG actions only occur on power of two counts. + auto envoy_bug_action_registration = + Assert::setEnvoyBugFailureRecordAction([&]() { envoy_bug_fail_count++; }); + +#ifndef NDEBUG + EXPECT_DEATH({ ENVOY_BUG(false, ""); }, ".*envoy bug failure: false.*"); + EXPECT_DEATH({ ENVOY_BUG(false, ""); }, ".*envoy bug failure: false.*"); + EXPECT_DEATH({ ENVOY_BUG(false, "With some logs"); }, + ".*envoy bug failure: false. Details: With some logs.*"); + EXPECT_EQ(0, envoy_bug_fail_count); +#else + // Same log lines trigger exponential back-off. + for (int i = 0; i < 4; i++) { + ENVOY_BUG(false, ""); + } + // 3 counts because 1st, 2nd, and 4th instances are powers of 2. + EXPECT_EQ(3, envoy_bug_fail_count); + + // Different log lines have separate counters for exponential back-off. + EXPECT_LOG_CONTAINS("error", "envoy bug failure: false", ENVOY_BUG(false, "")); + EXPECT_LOG_CONTAINS("error", "envoy bug failure: false. Details: With some logs", + ENVOY_BUG(false, "With some logs")); + EXPECT_EQ(5, envoy_bug_fail_count); +#endif +} + } // namespace Envoy diff --git a/test/common/common/backoff_strategy_test.cc b/test/common/common/backoff_strategy_test.cc index 5db265e66bfa5..20ffe937065c9 100644 --- a/test/common/common/backoff_strategy_test.cc +++ b/test/common/common/backoff_strategy_test.cc @@ -1,6 +1,6 @@ #include "common/common/backoff_strategy.h" -#include "test/mocks/runtime/mocks.h" +#include "test/mocks/common.h" #include "gtest/gtest.h" @@ -10,7 +10,7 @@ using testing::Return; namespace Envoy { TEST(BackOffStrategyTest, JitteredBackOffBasicFlow) { - NiceMock random; + NiceMock random; ON_CALL(random, random()).WillByDefault(Return(27)); JitteredBackOffStrategy jittered_back_off(25, 30, random); @@ -19,7 +19,7 @@ TEST(BackOffStrategyTest, JitteredBackOffBasicFlow) { } TEST(BackOffStrategyTest, JitteredBackOffBasicReset) { - NiceMock random; + NiceMock random; ON_CALL(random, random()).WillByDefault(Return(27)); JitteredBackOffStrategy jittered_back_off(25, 30, random); @@ -31,7 +31,7 @@ TEST(BackOffStrategyTest, JitteredBackOffBasicReset) { } TEST(BackOffStrategyTest, JitteredBackOffDoesntOverflow) { - NiceMock random; + NiceMock random; ON_CALL(random, random()).WillByDefault(Return(std::numeric_limits::max() - 1)); JitteredBackOffStrategy jittered_back_off(1, std::numeric_limits::max(), random); @@ -42,7 +42,7 @@ TEST(BackOffStrategyTest, JitteredBackOffDoesntOverflow) { } TEST(BackOffStrategyTest, JitteredBackOffWithMaxInterval) { - NiceMock random; + NiceMock random; ON_CALL(random, random()).WillByDefault(Return(9999)); JitteredBackOffStrategy jittered_back_off(5, 100, random); @@ -56,7 +56,7 @@ TEST(BackOffStrategyTest, JitteredBackOffWithMaxInterval) { } TEST(BackOffStrategyTest, JitteredBackOffWithMaxIntervalReset) { - NiceMock random; + NiceMock random; ON_CALL(random, random()).WillByDefault(Return(9999)); JitteredBackOffStrategy jittered_back_off(5, 100, random); diff --git a/test/common/common/basic_resource_impl_test.cc b/test/common/common/basic_resource_impl_test.cc new file mode 100644 index 0000000000000..60481535d06cd --- /dev/null +++ b/test/common/common/basic_resource_impl_test.cc @@ -0,0 +1,73 @@ +#include + +#include "common/common/basic_resource_impl.h" + +#include "test/mocks/runtime/mocks.h" + +#include "gtest/gtest.h" + +using testing::NiceMock; +using testing::Return; + +namespace Envoy { + +class BasicResourceLimitImplTest : public testing::Test { +protected: + NiceMock runtime_; +}; + +TEST_F(BasicResourceLimitImplTest, NoArgsConstructorVerifyMax) { + BasicResourceLimitImpl br; + + EXPECT_EQ(br.max(), std::numeric_limits::max()); +} + +TEST_F(BasicResourceLimitImplTest, VerifySetClearMax) { + BasicResourceLimitImpl br(123); + + EXPECT_EQ(br.max(), 123); + br.setMax(321); + EXPECT_EQ(br.max(), 321); + br.resetMax(); + EXPECT_EQ(br.max(), std::numeric_limits::max()); +} + +TEST_F(BasicResourceLimitImplTest, IncDecCount) { + BasicResourceLimitImpl br; + + EXPECT_EQ(br.count(), 0); + br.inc(); + EXPECT_EQ(br.count(), 1); + br.inc(); + br.inc(); + EXPECT_EQ(br.count(), 3); + br.dec(); + EXPECT_EQ(br.count(), 2); + br.decBy(2); + EXPECT_EQ(br.count(), 0); +} + +TEST_F(BasicResourceLimitImplTest, CanCreate) { + BasicResourceLimitImpl br(2); + + EXPECT_TRUE(br.canCreate()); + br.inc(); + EXPECT_TRUE(br.canCreate()); + br.inc(); + EXPECT_FALSE(br.canCreate()); + br.dec(); + EXPECT_TRUE(br.canCreate()); + br.dec(); +} + +TEST_F(BasicResourceLimitImplTest, RuntimeMods) { + BasicResourceLimitImpl br(1337, runtime_, "trololo"); + + EXPECT_CALL(runtime_.snapshot_, getInteger("trololo", 1337)).WillOnce(Return(555)); + EXPECT_EQ(br.max(), 555); + + EXPECT_CALL(runtime_.snapshot_, getInteger("trololo", 1337)).WillOnce(Return(1337)); + EXPECT_EQ(br.max(), 1337); +} + +} // namespace Envoy diff --git a/test/common/common/hash_fuzz_test.cc b/test/common/common/hash_fuzz_test.cc index a4b3f6c032a30..c9d84205831e4 100644 --- a/test/common/common/hash_fuzz_test.cc +++ b/test/common/common/hash_fuzz_test.cc @@ -12,7 +12,7 @@ DEFINE_FUZZER(const uint8_t* buf, size_t len) { const std::string input(reinterpret_cast(buf), len); { HashUtil::xxHash64(input); } { HashUtil::djb2CaseInsensitiveHash(input); } - { MurmurHash::murmurHash2_64(input); } + { MurmurHash::murmurHash2(input); } if (len > 0) { // Split the input string into two parts to make a key-value pair. const size_t split_point = *reinterpret_cast(buf) % len; diff --git a/test/common/common/hash_test.cc b/test/common/common/hash_test.cc index 4112b67d59c56..facb7be4c1f21 100644 --- a/test/common/common/hash_test.cc +++ b/test/common/common/hash_test.cc @@ -19,22 +19,21 @@ TEST(Hash, djb2CaseInsensitiveHash) { EXPECT_EQ(5381U, HashUtil::djb2CaseInsensitiveHash("")); } -TEST(Hash, murmurHash2_64) { - EXPECT_EQ(9631199822919835226U, MurmurHash::murmurHash2_64("foo")); - EXPECT_EQ(11474628671133349555U, MurmurHash::murmurHash2_64("bar")); - EXPECT_EQ(16306510975912980159U, MurmurHash::murmurHash2_64("foo\nbar")); - EXPECT_EQ(12847078931730529320U, MurmurHash::murmurHash2_64("lyft")); - EXPECT_EQ(6142509188972423790U, MurmurHash::murmurHash2_64("")); +TEST(Hash, murmurHash2) { + EXPECT_EQ(9631199822919835226U, MurmurHash::murmurHash2("foo")); + EXPECT_EQ(11474628671133349555U, MurmurHash::murmurHash2("bar")); + EXPECT_EQ(16306510975912980159U, MurmurHash::murmurHash2("foo\nbar")); + EXPECT_EQ(12847078931730529320U, MurmurHash::murmurHash2("lyft")); + EXPECT_EQ(6142509188972423790U, MurmurHash::murmurHash2("")); } #if __GLIBCXX__ >= 20130411 && __GLIBCXX__ <= 20180726 TEST(Hash, stdhash) { - EXPECT_EQ(std::hash()(std::string("foo")), MurmurHash::murmurHash2_64("foo")); - EXPECT_EQ(std::hash()(std::string("bar")), MurmurHash::murmurHash2_64("bar")); - EXPECT_EQ(std::hash()(std::string("foo\nbar")), - MurmurHash::murmurHash2_64("foo\nbar")); - EXPECT_EQ(std::hash()(std::string("lyft")), MurmurHash::murmurHash2_64("lyft")); - EXPECT_EQ(std::hash()(std::string("")), MurmurHash::murmurHash2_64("")); + EXPECT_EQ(std::hash()(std::string("foo")), MurmurHash::murmurHash2("foo")); + EXPECT_EQ(std::hash()(std::string("bar")), MurmurHash::murmurHash2("bar")); + EXPECT_EQ(std::hash()(std::string("foo\nbar")), MurmurHash::murmurHash2("foo\nbar")); + EXPECT_EQ(std::hash()(std::string("lyft")), MurmurHash::murmurHash2("lyft")); + EXPECT_EQ(std::hash()(std::string("")), MurmurHash::murmurHash2("")); } #endif diff --git a/test/common/common/linked_object_test.cc b/test/common/common/linked_object_test.cc new file mode 100644 index 0000000000000..351f2f340d3f8 --- /dev/null +++ b/test/common/common/linked_object_test.cc @@ -0,0 +1,44 @@ +#include "common/common/linked_object.h" + +#include "gtest/gtest.h" + +namespace Envoy { + +class TestObject : public LinkedObject { +public: + TestObject() = default; +}; + +TEST(LinkedObjectTest, MoveIntoListFront) { + std::list> list; + auto object = std::make_unique(); + TestObject* object_ptr = object.get(); + LinkedList::moveIntoList(std::move(object), list); + ASSERT_EQ(1, list.size()); + ASSERT_EQ(object_ptr, list.front().get()); + + auto object2 = std::make_unique(); + TestObject* object2_ptr = object2.get(); + LinkedList::moveIntoList(std::move(object2), list); + ASSERT_EQ(2, list.size()); + ASSERT_EQ(object2_ptr, list.front().get()); + ASSERT_EQ(object_ptr, list.back().get()); +} + +TEST(LinkedObjectTest, MoveIntoListBack) { + std::list> list; + std::unique_ptr object = std::make_unique(); + TestObject* object_ptr = object.get(); + LinkedList::moveIntoListBack(std::move(object), list); + ASSERT_EQ(1, list.size()); + ASSERT_EQ(object_ptr, list.front().get()); + + auto object2 = std::make_unique(); + TestObject* object2_ptr = object2.get(); + LinkedList::moveIntoListBack(std::move(object2), list); + ASSERT_EQ(2, list.size()); + ASSERT_EQ(object2_ptr, list.back().get()); + ASSERT_EQ(object_ptr, list.front().get()); +} + +} // namespace Envoy diff --git a/test/common/common/lock_guard_test.cc b/test/common/common/lock_guard_test.cc index 8cc8eb8b8355c..01b677c8dc48c 100644 --- a/test/common/common/lock_guard_test.cc +++ b/test/common/common/lock_guard_test.cc @@ -5,32 +5,33 @@ namespace Envoy { namespace Thread { +namespace { -class ThreadTest : public testing::Test { +class LockGuardTest : public testing::Test { protected: - ThreadTest() = default; + LockGuardTest() = default; int a_ ABSL_GUARDED_BY(a_mutex_){0}; MutexBasicLockable a_mutex_; int b_{0}; }; -TEST_F(ThreadTest, TestLockGuard) { +TEST_F(LockGuardTest, TestLockGuard) { LockGuard lock(a_mutex_); EXPECT_EQ(1, ++a_); } -TEST_F(ThreadTest, TestOptionalLockGuard) { +TEST_F(LockGuardTest, TestOptionalLockGuard) { OptionalLockGuard lock(nullptr); EXPECT_EQ(1, ++b_); } -TEST_F(ThreadTest, TestReleasableLockGuard) { +TEST_F(LockGuardTest, TestReleasableLockGuard) { ReleasableLockGuard lock(a_mutex_); EXPECT_EQ(1, ++a_); lock.release(); } -TEST_F(ThreadTest, TestTryLockGuard) { +TEST_F(LockGuardTest, TestTryLockGuard) { TryLockGuard lock(a_mutex_); if (lock.tryLock()) { @@ -44,5 +45,6 @@ TEST_F(ThreadTest, TestTryLockGuard) { } } +} // namespace } // namespace Thread } // namespace Envoy diff --git a/test/common/common/log_macros_test.cc b/test/common/common/log_macros_test.cc index 9de22d83b26cb..c19fbebfde01a 100644 --- a/test/common/common/log_macros_test.cc +++ b/test/common/common/log_macros_test.cc @@ -1,6 +1,7 @@ #include #include +#include "common/common/fancy_logger.h" #include "common/common/logger.h" #include "test/mocks/http/mocks.h" @@ -48,6 +49,7 @@ TEST(Logger, evaluateParams) { // Log message with higher severity and make sure that params were evaluated. GET_MISC_LOGGER().set_level(spdlog::level::info); ENVOY_LOG_MISC(warn, "test message '{}'", i++); + EXPECT_THAT(i, testing::Eq(2)); } @@ -137,4 +139,46 @@ TEST_F(FormatTest, OutputEscaped) { EXPECT_LOG_CONTAINS_ALL_OF_ESCAPED(message, logMessageEscapeSequences()); } +/** + * Test for Fancy Logger convenient macros. + */ +TEST(Fancy, Global) { + FANCY_LOG(info, "Hello world! Here's a line of fancy log!"); + FANCY_LOG(error, "Fancy Error! Here's the second message!"); + + NiceMock connection_; + NiceMock stream_; + FANCY_CONN_LOG(warn, "Fake info {} of connection", connection_, 1); + FANCY_STREAM_LOG(warn, "Fake warning {} of stream", stream_, 1); + + FANCY_LOG(critical, "Critical message for later flush."); + FANCY_FLUSH_LOG(); +} + +TEST(Fancy, SetLevel) { + const char* file = "P=NP_file"; + getFancyContext().setFancyLogger(file, spdlog::level::trace); + + getFancyContext().setFancyLogger(__FILE__, spdlog::level::err); + FANCY_LOG(error, "Fancy Error! Here's a test for level."); + FANCY_LOG(warn, "Warning: you shouldn't see this message!"); +} + +TEST(Fancy, Default) { + getFancyContext().setFancyLogger(__FILE__, spdlog::level::info); // revert to default + std::string fmt = "[%t][%l][%n] %v"; + getFancyContext().setDefaultFancyLevelFormat(spdlog::level::warn, fmt); + FANCY_LOG(info, "Info: you shouldn't see this message!"); + FANCY_LOG(warn, "Warning: warning at default log level!"); + EXPECT_EQ(Logger::Context::getFancyLogFormat(), "[%Y-%m-%d %T.%e][%t][%l][%n] %v"); + EXPECT_EQ(Logger::Context::getFancyDefaultLevel(), spdlog::level::info); +} + +TEST(Fancy, FastPath) { + getFancyContext().setFancyLogger(__FILE__, spdlog::level::info); + for (int i = 0; i < 10; i++) { + FANCY_LOG(warn, "Fake warning No. {}", i); + } +} + } // namespace Envoy diff --git a/test/common/common/logger_speed_test.cc b/test/common/common/logger_speed_test.cc new file mode 100644 index 0000000000000..55f4275db251c --- /dev/null +++ b/test/common/common/logger_speed_test.cc @@ -0,0 +1,139 @@ +#include +#include + +#include "common/common/fancy_logger.h" +#include "common/common/logger.h" + +#include "benchmark/benchmark.h" + +namespace Envoy { + +/** + * Benchmark for the main slow path, i.e. new logger creation here. + */ +static void fancySlowPath(benchmark::State& state) { + FANCY_LOG(info, "Slow path test begins."); + std::atomic logger; + for (auto _ : state) { + UNREFERENCED_PARAMETER(_); + for (int i = 0; i < state.range(0); i++) { + std::string key = "k" + std::to_string(i + (state.thread_index << 8)); + getFancyContext().initFancyLogger(key, logger); + } + } +} + +#define FL FANCY_LOG(trace, "Default") +#define FL_8 \ + FL; \ + FL; \ + FL; \ + FL; \ + FL; \ + FL; \ + FL; \ + FL; +#define FL_64 \ + { FL_8 FL_8 FL_8 FL_8 FL_8 FL_8 FL_8 FL_8 } +#define FL_512 \ + { FL_64 FL_64 FL_64 FL_64 FL_64 FL_64 FL_64 FL_64 } +#define FL_1024 \ + { FL_512 FL_512 } + +/** + * Benchmark for medium path, i.e. new site initialization within the same file. + */ +static void fancyMediumPath(benchmark::State& state) { + FANCY_LOG(info, "Medium path test begins."); + for (auto _ : state) { + UNREFERENCED_PARAMETER(_); + // create different call sites for medium path + for (int i = 0; i < state.range(0); i++) { + FL_1024 + } + } +} + +/** + * Benchmark for fast path, i.e. integration test of common scenario. + */ +static void fancyFastPath(benchmark::State& state) { + // control log length to be the same as normal Envoy below + std::string msg(100 - strlen(__FILE__) + 4, '.'); + spdlog::level::level_enum lv = state.range(1) ? spdlog::level::trace : spdlog::level::info; + getFancyContext().setFancyLogger(FANCY_KEY, lv); + for (auto _ : state) { + UNREFERENCED_PARAMETER(_); + for (int i = 0; i < state.range(0); i++) { + FANCY_LOG(trace, "Fast path: {}", msg); + } + } +} + +/** + * Benchmark for ENVOY_LOG to compare. + */ +static void envoyNormal(benchmark::State& state) { + spdlog::level::level_enum lv = state.range(1) ? spdlog::level::trace : spdlog::level::info; + std::string msg(100, '.'); + GET_MISC_LOGGER().set_level(lv); + for (auto _ : state) { + UNREFERENCED_PARAMETER(_); + for (int i = 0; i < state.range(0); i++) { + ENVOY_LOG_MISC(trace, "Fast path: {}", msg); + } + } +} + +/** + * Benchmark for a large number of level setting. + */ +static void fancyLevelSetting(benchmark::State& state) { + FANCY_LOG(info, "Level setting test begins."); + for (auto _ : state) { + UNREFERENCED_PARAMETER(_); + for (int i = 0; i < state.range(0); i++) { + getFancyContext().setFancyLogger(__FILE__, spdlog::level::warn); + } + } +} + +/** + * Comparison with Envoy's level setting. + */ +static void envoyLevelSetting(benchmark::State& state) { + ENVOY_LOG_MISC(info, "Envoy's level setting begins."); + for (auto _ : state) { + UNREFERENCED_PARAMETER(_); + for (int i = 0; i < state.range(0); i++) { + GET_MISC_LOGGER().set_level(spdlog::level::warn); + } + } +} + +/** + * Benchmarks in detail starts. + */ +BENCHMARK(fancySlowPath)->Arg(1 << 10); +BENCHMARK(fancySlowPath)->Arg(1 << 10)->Threads(20)->MeasureProcessCPUTime(); +BENCHMARK(fancySlowPath)->Arg(1 << 10)->Threads(200)->MeasureProcessCPUTime(); + +BENCHMARK(fancyMediumPath)->Arg(1)->Iterations(1); +// Seems medium path's concurrency test doesn't make sense (hard to do as well) + +BENCHMARK(fancyFastPath)->Args({1024, 0})->Args({1024, 1}); // First no actual log, then log +BENCHMARK(fancyFastPath)->Args({1 << 10, 0})->Threads(20)->MeasureProcessCPUTime(); +BENCHMARK(fancyFastPath)->Args({1 << 10, 1})->Threads(20)->MeasureProcessCPUTime(); +BENCHMARK(fancyFastPath)->Args({1 << 10, 0})->Threads(200)->MeasureProcessCPUTime(); +BENCHMARK(fancyFastPath)->Args({1 << 10, 1})->Threads(200)->MeasureProcessCPUTime(); + +BENCHMARK(envoyNormal)->Args({1024, 0})->Args({1024, 1}); +BENCHMARK(envoyNormal)->Args({1 << 10, 0})->Threads(20)->MeasureProcessCPUTime(); +BENCHMARK(envoyNormal)->Args({1 << 10, 1})->Threads(20)->MeasureProcessCPUTime(); +BENCHMARK(envoyNormal)->Args({1 << 10, 0})->Threads(200)->MeasureProcessCPUTime(); +BENCHMARK(envoyNormal)->Args({1 << 10, 1})->Threads(200)->MeasureProcessCPUTime(); + +BENCHMARK(fancyLevelSetting)->Arg(1 << 10); +BENCHMARK(envoyLevelSetting)->Arg(1 << 10); + +} // namespace Envoy diff --git a/test/common/common/logger_test.cc b/test/common/common/logger_test.cc index 9320078e881a3..8e4d8839852dd 100644 --- a/test/common/common/logger_test.cc +++ b/test/common/common/logger_test.cc @@ -8,30 +8,32 @@ namespace Envoy { namespace Logger { -class LoggerEscapeTest : public testing::Test {}; +TEST(LoggerTest, StackingStderrSinkDelegate) { + StderrSinkDelegate stacked(Envoy::Logger::Registry::getSink()); +} -TEST_F(LoggerEscapeTest, LinuxEOL) { +TEST(LoggerEscapeTest, LinuxEOL) { EXPECT_EQ("line 1 \\n line 2\n", DelegatingLogSink::escapeLogLine("line 1 \n line 2\n")); } -TEST_F(LoggerEscapeTest, WindowEOL) { +TEST(LoggerEscapeTest, WindowEOL) { EXPECT_EQ("line 1 \\n line 2\r\n", DelegatingLogSink::escapeLogLine("line 1 \n line 2\r\n")); } -TEST_F(LoggerEscapeTest, NoTrailingWhitespace) { +TEST(LoggerEscapeTest, NoTrailingWhitespace) { EXPECT_EQ("line 1 \\n line 2", DelegatingLogSink::escapeLogLine("line 1 \n line 2")); } -TEST_F(LoggerEscapeTest, NoWhitespace) { +TEST(LoggerEscapeTest, NoWhitespace) { EXPECT_EQ("line1", DelegatingLogSink::escapeLogLine("line1")); } -TEST_F(LoggerEscapeTest, AnyTrailingWhitespace) { +TEST(LoggerEscapeTest, AnyTrailingWhitespace) { EXPECT_EQ("line 1 \\t tab 1 \\n line 2\t\n", DelegatingLogSink::escapeLogLine("line 1 \t tab 1 \n line 2\t\n")); } -TEST_F(LoggerEscapeTest, WhitespaceOnly) { +TEST(LoggerEscapeTest, WhitespaceOnly) { // 8 spaces EXPECT_EQ(" ", DelegatingLogSink::escapeLogLine(" ")); @@ -39,7 +41,7 @@ TEST_F(LoggerEscapeTest, WhitespaceOnly) { EXPECT_EQ("\r\n\t \r\n \n", DelegatingLogSink::escapeLogLine("\r\n\t \r\n \n")); } -TEST_F(LoggerEscapeTest, Empty) { EXPECT_EQ("", DelegatingLogSink::escapeLogLine("")); } +TEST(LoggerEscapeTest, Empty) { EXPECT_EQ("", DelegatingLogSink::escapeLogLine("")); } } // namespace Logger } // namespace Envoy diff --git a/test/common/common/random_generator_test.cc b/test/common/common/random_generator_test.cc new file mode 100644 index 0000000000000..b2098f987b62a --- /dev/null +++ b/test/common/common/random_generator_test.cc @@ -0,0 +1,72 @@ +#include + +#include "common/common/random_generator.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Random { +namespace { + +TEST(Random, DISABLED_benchmarkRandom) { + Random::RandomGeneratorImpl random; + + for (size_t i = 0; i < 1000000000; ++i) { + random.random(); + } +} + +TEST(Random, SanityCheckOfUniquenessRandom) { + Random::RandomGeneratorImpl random; + std::set results; + const size_t num_of_results = 1000000; + + for (size_t i = 0; i < num_of_results; ++i) { + results.insert(random.random()); + } + + EXPECT_EQ(num_of_results, results.size()); +} + +TEST(Random, SanityCheckOfStdLibRandom) { + Random::RandomGeneratorImpl random; + + static const auto num_of_items = 100; + std::vector v(num_of_items); + std::iota(v.begin(), v.end(), 0); + + static const auto num_of_checks = 10000; + for (size_t i = 0; i < num_of_checks; ++i) { + const auto prev = v; + std::shuffle(v.begin(), v.end(), random); + EXPECT_EQ(v.size(), prev.size()); + EXPECT_NE(v, prev); + EXPECT_FALSE(std::is_sorted(v.begin(), v.end())); + } +} + +TEST(UUID, CheckLengthOfUUID) { + Random::RandomGeneratorImpl random; + + std::string result = random.uuid(); + + size_t expected_length = 36; + EXPECT_EQ(expected_length, result.length()); +} + +TEST(UUID, SanityCheckOfUniqueness) { + std::set uuids; + const size_t num_of_uuids = 100000; + + Random::RandomGeneratorImpl random; + for (size_t i = 0; i < num_of_uuids; ++i) { + uuids.insert(random.uuid()); + } + + EXPECT_EQ(num_of_uuids, uuids.size()); +} + +} // namespace +} // namespace Random +} // namespace Envoy diff --git a/test/common/common/regex_test.cc b/test/common/common/regex_test.cc index b5848799acafb..5b1d9bdd4bf4e 100644 --- a/test/common/common/regex_test.cc +++ b/test/common/common/regex_test.cc @@ -3,6 +3,8 @@ #include "common/common/regex.h" +#include "test/test_common/logging.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" @@ -58,11 +60,24 @@ TEST(Utility, ParseRegex) { EXPECT_TRUE(compiled_matcher->match(long_string)); } - // Verify max program size. + // Positive case to ensure no max program size is enforced. { + TestScopedRuntime scoped_runtime; + envoy::type::matcher::v3::RegexMatcher matcher; + matcher.set_regex("/asdf/.*"); + matcher.mutable_google_re2(); + EXPECT_NO_THROW(Utility::parseRegex(matcher)); + } + + // Verify max program size with the deprecated field codepath plus runtime. + // The deprecated field codepath precedes any runtime settings. + { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"re2.max_program_size.error_level", "3"}}); envoy::type::matcher::v3::RegexMatcher matcher; - matcher.mutable_google_re2()->mutable_max_program_size()->set_value(1); matcher.set_regex("/asdf/.*"); + matcher.mutable_google_re2()->mutable_max_program_size()->set_value(1); #ifndef GTEST_USES_SIMPLE_RE EXPECT_THROW_WITH_REGEX(Utility::parseRegex(matcher), EnvoyException, "RE2 program size of [0-9]+ > max program size of 1\\."); @@ -71,6 +86,75 @@ TEST(Utility, ParseRegex) { "RE2 program size of \\d+ > max program size of 1\\."); #endif } + + // Verify that an exception is thrown for the error level max program size. + { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"re2.max_program_size.error_level", "1"}}); + envoy::type::matcher::v3::RegexMatcher matcher; + matcher.set_regex("/asdf/.*"); + matcher.mutable_google_re2(); +#ifndef GTEST_USES_SIMPLE_RE + EXPECT_THROW_WITH_REGEX( + Utility::parseRegex(matcher), EnvoyException, + "RE2 program size of [0-9]+ > max program size of 1 set for the error level threshold\\."); +#else + EXPECT_THROW_WITH_REGEX( + Utility::parseRegex(matcher), EnvoyException, + "RE2 program size of \\d+ > max program size of 1 set for the error level threshold\\."); +#endif + } + + // Verify that the error level max program size defaults to 100 if not set by runtime. + { + TestScopedRuntime scoped_runtime; + envoy::type::matcher::v3::RegexMatcher matcher; + matcher.set_regex( + "/asdf/.*/asdf/.*/asdf/.*/asdf/.*/asdf/.*/asdf/.*/asdf/.*/asdf/.*/asdf/.*/asdf/.*"); + matcher.mutable_google_re2(); +#ifndef GTEST_USES_SIMPLE_RE + EXPECT_THROW_WITH_REGEX(Utility::parseRegex(matcher), EnvoyException, + "RE2 program size of [0-9]+ > max program size of 100 set for the " + "error level threshold\\."); +#else + EXPECT_THROW_WITH_REGEX( + Utility::parseRegex(matcher), EnvoyException, + "RE2 program size of \\d+ > max program size of 100 set for the error level threshold\\."); +#endif + } + + // Verify that a warning is logged for the warn level max program size. + { + TestScopedRuntime scoped_runtime; + Envoy::Stats::Counter& warn_count = + Runtime::LoaderSingleton::getExisting()->getRootScope().counterFromString( + "re2.exceeded_warn_level"); + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"re2.max_program_size.warn_level", "1"}}); + envoy::type::matcher::v3::RegexMatcher matcher; + matcher.set_regex("/asdf/.*"); + matcher.mutable_google_re2(); + EXPECT_NO_THROW(Utility::parseRegex(matcher)); + EXPECT_EQ(1, warn_count.value()); + EXPECT_LOG_CONTAINS("warn", "> max program size of 1 set for the warn level threshold", + Utility::parseRegex(matcher)); + EXPECT_EQ(2, warn_count.value()); + } + + // Verify that no check is performed if the warn level max program size is not set by runtime. + { + TestScopedRuntime scoped_runtime; + Envoy::Stats::Counter& warn_count = + Runtime::LoaderSingleton::getExisting()->getRootScope().counterFromString( + "re2.exceeded_warn_level"); + envoy::type::matcher::v3::RegexMatcher matcher; + matcher.set_regex("/asdf/.*"); + matcher.mutable_google_re2(); + EXPECT_NO_THROW(Utility::parseRegex(matcher)); + EXPECT_LOG_NOT_CONTAINS("warn", "> max program size", Utility::parseRegex(matcher)); + EXPECT_EQ(0, warn_count.value()); + } } } // namespace diff --git a/test/common/common/thread_test.cc b/test/common/common/thread_test.cc new file mode 100644 index 0000000000000..9dac043921f72 --- /dev/null +++ b/test/common/common/thread_test.cc @@ -0,0 +1,251 @@ +#include + +#include "common/common/thread.h" +#include "common/common/thread_synchronizer.h" + +#include "test/test_common/thread_factory_for_test.h" + +#include "absl/strings/str_cat.h" +#include "absl/synchronization/notification.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Thread { +namespace { + +class ThreadAsyncPtrTest : public testing::Test { +protected: + ThreadFactory& thread_factory_{threadFactoryForTest()}; +}; + +// Tests that two threads racing to create an object have well-defined +// behavior. +TEST_F(ThreadAsyncPtrTest, DeleteOnDestruct) { + AtomicPtr str; + ThreadSynchronizer sync; + sync.enable(); + sync.waitOn("creator"); + + // On thread1, we will lazily instantiate the string as "thread1". However + // in the creation function we will block on a sync-point. + auto thread1 = thread_factory_.createThread( + [&str, &sync]() { + str.get([&sync]() -> std::string* { + sync.syncPoint("creator"); + return new std::string("thread1"); + }); + }, + Options{"thread1"}); + EXPECT_EQ("thread1", thread1->name()); + + sync.barrierOn("creator"); + + // Now spawn a separate thread that will attempt to lazy-initialize the + // string as "thread2", but that allocator will never run because + // the allocator on thread1 has already locked the AtomicPtr's mutex. + auto thread2 = thread_factory_.createThread( + [&str]() { str.get([]() -> std::string* { return new std::string("thread2"); }); }, + Options{"thread2"}); + EXPECT_EQ("thread2", thread2->name()); + + // Now let thread1's initializer finish. + sync.signal("creator"); + thread1->join(); + thread2->join(); + + // Now ensure the "thread1" value sticks past the thread lifetimes. + bool called = false; + EXPECT_EQ("thread1", *str.get([&called]() -> std::string* { + called = true; + return nullptr; + })); + EXPECT_FALSE(called); +} + +// Same test as AtomicPtrDeleteOnDestruct, except the allocator callbacks return +// pointers to locals, rather than allocating the strings on the heap. +TEST_F(ThreadAsyncPtrTest, DoNotDelete) { + const std::string thread1_str("thread1"); + const std::string thread2_str("thread2"); + AtomicPtr str; + ThreadSynchronizer sync; + sync.enable(); + sync.waitOn("creator"); + + // On thread1, we will lazily instantiate the string as "thread1". However + // in the creation function we will block on a sync-point. + auto thread1 = thread_factory_.createThread( + [&str, &sync, &thread1_str]() { + str.get([&sync, &thread1_str]() -> const std::string* { + sync.syncPoint("creator"); + return &thread1_str; + }); + }, + Options{"thread1"}); + + sync.barrierOn("creator"); + + // Now spawn a separate thread that will attempt to lazy-initialize the + // string as "thread2", but that allocator will never run because + // the allocator on thread1 has already locked the AtomicPtr's mutex. + auto thread2 = thread_factory_.createThread( + [&str, &thread2_str]() { + str.get([&thread2_str]() -> const std::string* { return &thread2_str; }); + }, + Options{"thread2"}); + + // Now let thread1's initializer finish. + sync.signal("creator"); + thread1->join(); + thread2->join(); + + // Now ensure the "thread1" value sticks past the thread lifetimes. + bool called = false; + EXPECT_EQ("thread1", *str.get([&called]() -> std::string* { + called = true; + return nullptr; + })); + EXPECT_FALSE(called); +} + +TEST_F(ThreadAsyncPtrTest, ThreadSpammer) { + AtomicPtr str; + absl::Notification go; + constexpr uint32_t num_threads = 100; + AtomicPtr answer; + uint32_t calls = 0; + auto thread_fn = [&go, &answer, &calls]() { + go.WaitForNotification(); + answer.get([&calls]() { + ++calls; + return new uint32_t(42); + }); + }; + std::vector threads; + for (uint32_t i = 0; i < num_threads; ++i) { + std::string name = absl::StrCat("thread", i); + threads.emplace_back(thread_factory_.createThread(thread_fn, Options{name})); + EXPECT_EQ(name, threads.back()->name()); + } + EXPECT_EQ(0, calls); + go.Notify(); + for (auto& thread : threads) { + thread->join(); + } + EXPECT_EQ(1, calls); + EXPECT_EQ(42, *answer.get([&calls]() { + ++calls; + return nullptr; + })); + EXPECT_EQ(1, calls); +} + +// Tests that null can be allocated, but the allocator will be re-called each +// time until a non-null result is returned. +TEST_F(ThreadAsyncPtrTest, Null) { + AtomicPtr str; + uint32_t calls = 0; + EXPECT_EQ(nullptr, str.get([&calls]() -> std::string* { + ++calls; + return nullptr; + })); + EXPECT_EQ(nullptr, str.get([&calls]() -> std::string* { + ++calls; + return nullptr; + })); + EXPECT_EQ(2, calls); + EXPECT_EQ("x", *str.get([&calls]() -> std::string* { + ++calls; + return new std::string("x"); + })); + EXPECT_EQ(3, calls); + EXPECT_EQ("x", *str.get([&calls]() -> std::string* { + ++calls; + return nullptr; + })); + EXPECT_EQ(3, calls); // allocator was not called this last time. +} + +// Tests array semantics. Note that AtomicPtr is implemented a 1-element +// AtomicPtrArray, so there's no need to repeat the complex thread-race test +// from AtomicPtr. +TEST_F(ThreadAsyncPtrTest, Array) { + const uint32_t size = 5; + AtomicPtrArray strs; + for (uint32_t i = 0; i < size; ++i) { + std::string val = absl::StrCat("x", i); + EXPECT_EQ(val, *strs.get(i, [&val]() -> std::string* { return new std::string(val); })); + } + for (uint32_t i = 0; i < size; ++i) { + std::string val = absl::StrCat("x", i); + // Second time through the array, the allocator will not be called, but + // we'll have all the expected values returned from get. + bool called = false; + EXPECT_EQ(val, *strs.get(i, [&called]() -> std::string* { + called = true; + return nullptr; + })); + EXPECT_FALSE(called); + } +} + +TEST_F(ThreadAsyncPtrTest, ManagedAlloc) { + const uint32_t size = 5; + std::vector> pool; + AtomicPtrArray strs; + for (uint32_t i = 0; i < size; ++i) { + std::string val = absl::StrCat("x", i); + EXPECT_EQ(val, *strs.get(i, [&pool, &val]() -> std::string* { + pool.emplace_back(std::make_unique(val)); + return pool.back().get(); + })); + } +} + +TEST_F(ThreadAsyncPtrTest, TruncateWait) { + absl::Notification notify; + auto thread = thread_factory_.createThread([¬ify]() { notify.WaitForNotification(); }, + Options{"this name is way too long for posix"}); + notify.Notify(); + + // To make this test work on multiple platforms, just assume the first 10 characters + // are retained. + EXPECT_THAT(thread->name(), testing::StartsWith("this name ")); + thread->join(); +} + +TEST_F(ThreadAsyncPtrTest, TruncateNoWait) { + auto thread = + thread_factory_.createThread([]() {}, Options{"this name is way too long for posix"}); + + // In general, across platforms, just assume the first 10 characters are + // retained. + EXPECT_THAT(thread->name(), testing::StartsWith("this name ")); + + // On Linux we can check for 15 exactly. +#ifdef __linux__ + EXPECT_EQ("this name is wa", thread->name()) << "truncated to 15 chars"; +#endif + + thread->join(); +} + +TEST_F(ThreadAsyncPtrTest, NameNotSpecifiedWait) { + absl::Notification notify; + auto thread = thread_factory_.createThread([¬ify]() { notify.WaitForNotification(); }); + notify.Notify(); + + // For linux builds, the thread name defaults to the name of the + // binary. However the name of the binary is different depending on whether + // this is a coverage test or not. Currently, this population does not occur + // for Mac or Windows. +#ifdef __linux__ + EXPECT_FALSE(thread->name().empty()); +#endif + thread->join(); +} + +} // namespace +} // namespace Thread +} // namespace Envoy diff --git a/test/common/common/utility_fuzz_test.cc b/test/common/common/utility_fuzz_test.cc index e7daf80a452e7..c4f468c917257 100644 --- a/test/common/common/utility_fuzz_test.cc +++ b/test/common/common/utility_fuzz_test.cc @@ -53,6 +53,20 @@ DEFINE_FUZZER(const uint8_t* buf, size_t len) { StringUtil::cropRight(string_buffer.substr(0, split_point), string_buffer.substr(split_point)); } + { + const std::string string_buffer(reinterpret_cast(buf), len); + + // sample random bit to use as the whitespace flag + bool trimWhitespace = split_point & 1; + const size_t split_point2 = + len > 1 ? reinterpret_cast(buf)[1] % len : split_point; + const size_t split1 = std::min(split_point, split_point2); + const size_t split2 = std::max(split_point, split_point2); + + StringUtil::findToken(string_buffer.substr(0, split1), + string_buffer.substr(split1, split2 - split2), + string_buffer.substr(split2), trimWhitespace); + } } } diff --git a/test/common/common/utility_speed_test.cc b/test/common/common/utility_speed_test.cc index c95af89c3a3cd..a0563263b0768 100644 --- a/test/common/common/utility_speed_test.cc +++ b/test/common/common/utility_speed_test.cc @@ -62,6 +62,34 @@ static void BM_DateTimeFormatterWithSubseconds(benchmark::State& state) { } BENCHMARK(BM_DateTimeFormatterWithSubseconds); +// This benchmark is basically similar with the above BM_DateTimeFormatterWithSubseconds, the +// differences are: 1. the format string input is long with duplicated subseconds. 2. The purpose +// is to test DateFormatter.parse() which is called in constructor. +// NOLINTNEXTLINE(readability-identifier-naming) +static void BM_DateTimeFormatterWithLongSubsecondsString(benchmark::State& state) { + int outputBytes = 0; + + Envoy::SystemTime time(std::chrono::seconds(1522796769)); + std::mt19937 prng(1); + std::uniform_int_distribution distribution(-10, 20); + std::string input; + int num_duplicates = 400; + std::string duplicate_input = "%%1f %1f, %2f, %3f, %4f, "; + for (int i = 0; i < num_duplicates; i++) { + absl::StrAppend(&input, duplicate_input, "("); + } + absl::StrAppend(&input, duplicate_input); + + for (auto _ : state) { + Envoy::DateFormatter date_formatter(input); + time += std::chrono::milliseconds(static_cast(distribution(prng))); + outputBytes += date_formatter.fromTime(time).length(); + } + benchmark::DoNotOptimize(outputBytes); +} +BENCHMARK(BM_DateTimeFormatterWithLongSubsecondsString); + +// NOLINTNEXTLINE(readability-identifier-naming) static void BM_DateTimeFormatterWithoutSubseconds(benchmark::State& state) { int outputBytes = 0; diff --git a/test/common/common/utility_test.cc b/test/common/common/utility_test.cc index bb326989b94ff..cda2a65f807ae 100644 --- a/test/common/common/utility_test.cc +++ b/test/common/common/utility_test.cc @@ -18,6 +18,10 @@ #include "gtest/gtest.h" using testing::ContainerEq; +#ifdef WIN32 +using testing::HasSubstr; +using testing::Not; +#endif namespace Envoy { @@ -813,6 +817,26 @@ TEST(DateFormatter, FromTime) { EXPECT_EQ("aaa00", DateFormatter(std::string(3, 'a') + "%H").fromTime(time2)); } +// Check the time complexity. Make sure DateFormatter can finish parsing long messy string without +// crashing/freezing. This should pass in 0-2 seconds if O(n). Finish in 30-120 seconds if O(n^2) +TEST(DateFormatter, ParseLongString) { + std::string input; + std::string expected_output; + int num_duplicates = 400; + std::string duplicate_input = "%%1f %1f, %2f, %3f, %4f, "; + std::string duplicate_output = "%1 1, 14, 142, 1420, "; + for (int i = 0; i < num_duplicates; i++) { + absl::StrAppend(&input, duplicate_input, "("); + absl::StrAppend(&expected_output, duplicate_output, "("); + } + absl::StrAppend(&input, duplicate_input); + absl::StrAppend(&expected_output, duplicate_output); + + const SystemTime time1(std::chrono::seconds(1522796769) + std::chrono::milliseconds(142)); + std::string output = DateFormatter(input).fromTime(time1); + EXPECT_EQ(expected_output, output); +} + // Verify that two DateFormatter patterns with the same ??? patterns but // different format strings don't false share cache entries. This is a // regression test for when they did. @@ -875,4 +899,26 @@ TEST(InlineStorageTest, InlineString) { EXPECT_EQ("Hello, world!", hello->toString()); } +#ifdef WIN32 +TEST(ErrorDetailsTest, WindowsFormatMessage) { + // winsock2 error + EXPECT_NE(errorDetails(SOCKET_ERROR_AGAIN), ""); + EXPECT_THAT(errorDetails(SOCKET_ERROR_AGAIN), Not(HasSubstr("\r\n"))); + EXPECT_NE(errorDetails(SOCKET_ERROR_AGAIN), "Unknown error"); + + // winsock2 error with a long message + EXPECT_NE(errorDetails(SOCKET_ERROR_MSG_SIZE), ""); + EXPECT_THAT(errorDetails(SOCKET_ERROR_MSG_SIZE), Not(HasSubstr("\r\n"))); + EXPECT_NE(errorDetails(SOCKET_ERROR_MSG_SIZE), "Unknown error"); + + // regular Windows error + EXPECT_NE(errorDetails(ERROR_FILE_NOT_FOUND), ""); + EXPECT_THAT(errorDetails(ERROR_FILE_NOT_FOUND), Not(HasSubstr("\r\n"))); + EXPECT_NE(errorDetails(ERROR_FILE_NOT_FOUND), "Unknown error"); + + // invalid error code + EXPECT_EQ(errorDetails(99999), "Unknown error"); +} +#endif + } // namespace Envoy diff --git a/test/common/common/version_test.cc b/test/common/common/version_test.cc index 8dee39254f26d..5177f5ac1661f 100644 --- a/test/common/common/version_test.cc +++ b/test/common/common/version_test.cc @@ -1,4 +1,4 @@ -#include "common/common/version.h" +#include "common/version/version.h" #include "absl/strings/str_cat.h" #include "gmock/gmock.h" diff --git a/test/common/compressor/BUILD b/test/common/compressor/BUILD deleted file mode 100644 index e58b835ab69a7..0000000000000 --- a/test/common/compressor/BUILD +++ /dev/null @@ -1,33 +0,0 @@ -licenses(["notice"]) # Apache 2 - -load( - "//bazel:envoy_build_system.bzl", - "envoy_cc_fuzz_test", - "envoy_cc_test", - "envoy_package", -) - -envoy_package() - -envoy_cc_fuzz_test( - name = "compressor_fuzz_test", - srcs = ["compressor_fuzz_test.cc"], - corpus = "compressor_corpus", - deps = [ - "//source/common/buffer:buffer_lib", - "//source/common/common:assert_lib", - "//source/common/compressor:compressor_lib", - "//source/common/decompressor:decompressor_lib", - ], -) - -envoy_cc_test( - name = "compressor_test", - srcs = ["zlib_compressor_impl_test.cc"], - deps = [ - "//source/common/common:assert_lib", - "//source/common/common:hex_lib", - "//source/common/compressor:compressor_lib", - "//test/test_common:utility_lib", - ], -) diff --git a/test/common/compressor/zlib_compressor_impl_test.cc b/test/common/compressor/zlib_compressor_impl_test.cc deleted file mode 100644 index 3e2db26f4d432..0000000000000 --- a/test/common/compressor/zlib_compressor_impl_test.cc +++ /dev/null @@ -1,207 +0,0 @@ -#include "common/buffer/buffer_impl.h" -#include "common/common/hex.h" -#include "common/compressor/zlib_compressor_impl.h" - -#include "test/test_common/utility.h" - -#include "absl/container/fixed_array.h" -#include "gtest/gtest.h" - -namespace Envoy { -namespace Compressor { -namespace { - -class ZlibCompressorImplTest : public testing::Test { -protected: - void expectValidFlushedBuffer(const Buffer::OwnedImpl& output_buffer) { - Buffer::RawSliceVector compressed_slices = output_buffer.getRawSlices(); - const uint64_t num_comp_slices = compressed_slices.size(); - - const std::string header_hex_str = Hex::encode( - reinterpret_cast(compressed_slices[0].mem_), compressed_slices[0].len_); - - // HEADER 0x1f = 31 (window_bits) - EXPECT_EQ("1f8b", header_hex_str.substr(0, 4)); - // CM 0x8 = deflate (compression method) - EXPECT_EQ("08", header_hex_str.substr(4, 2)); - - const std::string footer_hex_str = - Hex::encode(reinterpret_cast(compressed_slices[num_comp_slices - 1].mem_), - compressed_slices[num_comp_slices - 1].len_); - // FOOTER four-byte sequence (sync flush) - EXPECT_EQ("0000ffff", footer_hex_str.substr(footer_hex_str.size() - 8, 10)); - } - - void expectValidFinishedBuffer(const Buffer::OwnedImpl& output_buffer, - const uint32_t input_size) { - Buffer::RawSliceVector compressed_slices = output_buffer.getRawSlices(); - const uint64_t num_comp_slices = compressed_slices.size(); - - const std::string header_hex_str = Hex::encode( - reinterpret_cast(compressed_slices[0].mem_), compressed_slices[0].len_); - // HEADER 0x1f = 31 (window_bits) - EXPECT_EQ("1f8b", header_hex_str.substr(0, 4)); - // CM 0x8 = deflate (compression method) - EXPECT_EQ("08", header_hex_str.substr(4, 2)); - - const std::string footer_bytes_str = - Hex::encode(reinterpret_cast(compressed_slices[num_comp_slices - 1].mem_), - compressed_slices[num_comp_slices - 1].len_); - - // A valid finished compressed buffer should have trailer with input size in it. - expectEqualInputSize(footer_bytes_str, input_size); - } - - void expectEqualInputSize(const std::string& footer_bytes, const uint32_t input_size) { - const std::string size_bytes = footer_bytes.substr(footer_bytes.size() - 8, 8); - uint64_t size; - StringUtil::atoull(size_bytes.c_str(), size, 16); - EXPECT_EQ(TestUtility::flipOrder(size), input_size); - } - - void drainBuffer(Buffer::OwnedImpl& buffer) { buffer.drain(buffer.length()); } - - static constexpr int64_t gzip_window_bits{31}; - static constexpr int64_t memory_level{8}; - static constexpr uint64_t default_input_size{796}; -}; - -class ZlibCompressorImplTester : public ZlibCompressorImpl { -public: - ZlibCompressorImplTester() = default; - ZlibCompressorImplTester(uint64_t chunk_size) : ZlibCompressorImpl(chunk_size) {} - void compressThenFlush(Buffer::OwnedImpl& buffer) { compress(buffer, State::Flush); } - void finish(Buffer::OwnedImpl& buffer) { compress(buffer, State::Finish); } -}; - -class ZlibCompressorImplDeathTest : public ZlibCompressorImplTest { -protected: - static void compressorBadInitTestHelper(int64_t window_bits, int64_t mem_level) { - ZlibCompressorImpl compressor; - compressor.init(ZlibCompressorImpl::CompressionLevel::Standard, - ZlibCompressorImpl::CompressionStrategy::Standard, window_bits, mem_level); - } - - static void uninitializedCompressorTestHelper() { - Buffer::OwnedImpl buffer; - ZlibCompressorImplTester compressor; - TestUtility::feedBufferWithRandomCharacters(buffer, 100); - compressor.finish(buffer); - } - - static void uninitializedCompressorFlushTestHelper() { - Buffer::OwnedImpl buffer; - ZlibCompressorImplTester compressor; - compressor.compressThenFlush(buffer); - } - - static void uninitializedCompressorFinishTestHelper() { - Buffer::OwnedImpl buffer; - ZlibCompressorImplTester compressor; - compressor.finish(buffer); - } -}; - -// Exercises death by passing bad initialization params or by calling -// compress before init. -TEST_F(ZlibCompressorImplDeathTest, CompressorDeathTest) { - EXPECT_DEATH_LOG_TO_STDERR(compressorBadInitTestHelper(100, 8), "assert failure: result >= 0"); - EXPECT_DEATH_LOG_TO_STDERR(compressorBadInitTestHelper(31, 10), "assert failure: result >= 0"); - EXPECT_DEATH_LOG_TO_STDERR(uninitializedCompressorTestHelper(), "assert failure: result == Z_OK"); - EXPECT_DEATH_LOG_TO_STDERR(uninitializedCompressorFlushTestHelper(), - "assert failure: result == Z_OK"); - EXPECT_DEATH_LOG_TO_STDERR(uninitializedCompressorFinishTestHelper(), - "assert failure: result == Z_STREAM_END"); -} - -// Exercises compressor's checksum by calling it before init or compress. -TEST_F(ZlibCompressorImplTest, CallingChecksum) { - Buffer::OwnedImpl buffer; - - ZlibCompressorImplTester compressor; - EXPECT_EQ(0, compressor.checksum()); - - compressor.init(Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, - gzip_window_bits, memory_level); - EXPECT_EQ(0, compressor.checksum()); - - TestUtility::feedBufferWithRandomCharacters(buffer, 4096); - compressor.compressThenFlush(buffer); - expectValidFlushedBuffer(buffer); - - drainBuffer(buffer); - EXPECT_TRUE(compressor.checksum() > 0); -} - -// Exercises compressor's checksum by calling it before init or compress. -TEST_F(ZlibCompressorImplTest, CallingFinishOnly) { - Buffer::OwnedImpl buffer; - - ZlibCompressorImplTester compressor; - compressor.init(Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, - gzip_window_bits, memory_level); - EXPECT_EQ(0, compressor.checksum()); - - TestUtility::feedBufferWithRandomCharacters(buffer, 4096); - compressor.finish(buffer); - expectValidFinishedBuffer(buffer, 4096); -} - -TEST_F(ZlibCompressorImplTest, CompressWithSmallChunkSize) { - Buffer::OwnedImpl buffer; - Buffer::OwnedImpl accumulation_buffer; - - ZlibCompressorImplTester compressor(8); - compressor.init(ZlibCompressorImpl::CompressionLevel::Standard, - ZlibCompressorImpl::CompressionStrategy::Standard, gzip_window_bits, - memory_level); - - uint64_t input_size = 0; - for (uint64_t i = 0; i < 10; i++) { - TestUtility::feedBufferWithRandomCharacters(buffer, default_input_size * i, i); - ASSERT_EQ(default_input_size * i, buffer.length()); - input_size += buffer.length(); - compressor.compressThenFlush(buffer); - accumulation_buffer.add(buffer); - drainBuffer(buffer); - ASSERT_EQ(0, buffer.length()); - } - expectValidFlushedBuffer(accumulation_buffer); - - compressor.finish(buffer); - accumulation_buffer.add(buffer); - expectValidFinishedBuffer(accumulation_buffer, input_size); -} - -// Exercises compression with other supported zlib initialization params. -TEST_F(ZlibCompressorImplTest, CompressWithNotCommonParams) { - Buffer::OwnedImpl buffer; - Buffer::OwnedImpl accumulation_buffer; - - ZlibCompressorImplTester compressor; - compressor.init(ZlibCompressorImpl::CompressionLevel::Speed, - ZlibCompressorImpl::CompressionStrategy::Rle, gzip_window_bits, 1); - - uint64_t input_size = 0; - for (uint64_t i = 0; i < 10; i++) { - TestUtility::feedBufferWithRandomCharacters(buffer, default_input_size * i, i); - ASSERT_EQ(default_input_size * i, buffer.length()); - input_size += buffer.length(); - compressor.compressThenFlush(buffer); - accumulation_buffer.add(buffer); - drainBuffer(buffer); - ASSERT_EQ(0, buffer.length()); - } - - expectValidFlushedBuffer(accumulation_buffer); - - compressor.finish(buffer); - accumulation_buffer.add(buffer); - expectValidFinishedBuffer(accumulation_buffer, input_size); -} - -} // namespace -} // namespace Compressor -} // namespace Envoy diff --git a/test/common/config/BUILD b/test/common/config/BUILD index 390374dad542d..7fec979a8a8f1 100644 --- a/test/common/config/BUILD +++ b/test/common/config/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -8,6 +6,8 @@ load( "envoy_proto_library", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( @@ -26,6 +26,16 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "decoded_resource_impl_test", + srcs = ["decoded_resource_impl_test.cc"], + deps = [ + "//source/common/config:decoded_resource_lib", + "//test/mocks/config:config_mocks", + "//test/test_common:utility_lib", + ], +) + envoy_cc_test( name = "delta_subscription_impl_test", srcs = ["delta_subscription_impl_test.cc"], @@ -72,6 +82,7 @@ envoy_cc_test( envoy_cc_test( name = "filesystem_subscription_impl_test", srcs = ["filesystem_subscription_impl_test.cc"], + tags = ["fails_on_windows"], deps = [ ":filesystem_subscription_test_harness", "//test/mocks/event:event_mocks", @@ -247,6 +258,17 @@ envoy_cc_test_library( ], ) +envoy_cc_test( + name = "opaque_resource_decoder_impl_test", + srcs = ["opaque_resource_decoder_impl_test.cc"], + deps = [ + "//source/common/config:opaque_resource_decoder_lib", + "//source/common/protobuf:message_validator_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", + ], +) + envoy_cc_test( name = "subscription_factory_impl_test", srcs = ["subscription_factory_impl_test.cc"], @@ -261,6 +283,7 @@ envoy_cc_test( "//test/mocks/stats:stats_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:environment_lib", + "//test/test_common:logging_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", @@ -271,6 +294,7 @@ envoy_cc_test( envoy_cc_test( name = "subscription_impl_test", srcs = ["subscription_impl_test.cc"], + tags = ["fails_on_windows"], deps = [ ":delta_subscription_test_harness", ":filesystem_subscription_test_harness", @@ -390,7 +414,7 @@ envoy_cc_test( ":dummy_config_proto_cc_proto", "//source/common/config:config_provider_lib", "//source/common/protobuf:utility_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:simulated_time_system_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", @@ -406,13 +430,23 @@ envoy_cc_test( "//source/common/protobuf:utility_lib", "//source/extensions/common/crypto:utility_lib", "//test/mocks/event:event_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/init:init_mocks", + "//test/mocks/runtime:runtime_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) +envoy_cc_test( + name = "udpa_resource_test", + srcs = ["udpa_resource_test.cc"], + deps = [ + "//source/common/config:udpa_resource_lib", + "//test/test_common:utility_lib", + ], +) + envoy_proto_library( name = "version_converter_proto", srcs = ["version_converter.proto"], diff --git a/test/common/config/config_provider_impl_test.cc b/test/common/config/config_provider_impl_test.cc index 63cdc00669d3b..c8308e42e500e 100644 --- a/test/common/config/config_provider_impl_test.cc +++ b/test/common/config/config_provider_impl_test.cc @@ -7,7 +7,7 @@ #include "common/protobuf/utility.h" #include "test/common/config/dummy_config.pb.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/test_common/simulated_time_system.h" #include "test/test_common/utility.h" @@ -79,16 +79,17 @@ class DummyConfigSubscription : public ConfigSubscriptionInstance, } // Envoy::Config::SubscriptionCallbacks - void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + void onConfigUpdate(const std::vector& resources, const std::string& version_info) override { - auto config = TestUtility::anyConvert(resources[0]); + const auto& config = + dynamic_cast(resources[0].get().resource()); if (checkAndApplyConfigUpdate(config, "dummy_config", version_info)) { config_proto_ = config; } ConfigSubscriptionCommonBase::onConfigUpdate(); } - void onConfigUpdate(const Protobuf::RepeatedPtrField&, + void onConfigUpdate(const std::vector&, const Protobuf::RepeatedPtrField&, const std::string&) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } @@ -97,10 +98,7 @@ class DummyConfigSubscription : public ConfigSubscriptionInstance, void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason, const EnvoyException*) override {} - // Envoy::Config::SubscriptionCallbacks - std::string resourceName(const ProtobufWkt::Any&) override { return ""; } - - const absl::optional& config_proto() const { + const absl::optional& configProto() const { return config_proto_; } @@ -122,10 +120,10 @@ class DummyDynamicConfigProvider : public MutableConfigProviderCommonBase { // Envoy::Config::ConfigProvider const Protobuf::Message* getConfigProto() const override { - if (!subscription_->config_proto().has_value()) { + if (!subscription_->configProto().has_value()) { return nullptr; } - return &subscription_->config_proto().value(); + return &subscription_->configProto().value(); } std::string getConfigVersion() const override { return ""; } @@ -152,7 +150,7 @@ class DummyConfigProviderManager : public ConfigProviderManagerImplBase { auto* dynamic_config = config_dump->mutable_dynamic_dummy_configs()->Add(); dynamic_config->set_version_info(subscription->configInfo().value().last_config_version_); dynamic_config->mutable_dummy_config()->MergeFrom( - static_cast(subscription.get())->config_proto().value()); + static_cast(subscription.get())->configProto().value()); TimestampUtil::systemClockToTimestamp(subscription->lastUpdated(), *dynamic_config->mutable_last_updated()); } @@ -265,11 +263,12 @@ TEST_F(ConfigProviderImplTest, SharedOwnership) { EXPECT_FALSE(provider1->configProtoInfo().has_value()); Protobuf::RepeatedPtrField untyped_dummy_configs; - untyped_dummy_configs.Add()->PackFrom(parseDummyConfigFromYaml("a: a dummy config")); + const auto dummy_config = parseDummyConfigFromYaml("a: a dummy config"); DummyConfigSubscription& subscription = dynamic_cast(*provider1).subscription(); - subscription.onConfigUpdate(untyped_dummy_configs, "1"); + const auto decoded_resources = TestUtility::decodeResources({dummy_config}, "a"); + subscription.onConfigUpdate(decoded_resources.refvec_, "1"); // Check that a newly created provider with the same config source will share // the subscription, config proto and resulting ConfigProvider::Config. @@ -298,7 +297,7 @@ TEST_F(ConfigProviderImplTest, SharedOwnership) { dynamic_cast(*provider3) .subscription() - .onConfigUpdate(untyped_dummy_configs, "provider3"); + .onConfigUpdate(decoded_resources.refvec_, "provider3"); EXPECT_EQ(2UL, static_cast( provider_manager_->dumpConfigs().get()) @@ -364,15 +363,15 @@ TEST_F(ConfigProviderImplTest, DuplicateConfigProto) { auto& subscription = static_cast(typed_provider->subscription()); EXPECT_EQ(subscription.getConfig(), nullptr); // First time issuing a configUpdate(). A new ConfigProvider::Config should be created. - Protobuf::RepeatedPtrField untyped_dummy_configs; - untyped_dummy_configs.Add()->PackFrom(parseDummyConfigFromYaml("a: a dynamic dummy config")); - subscription.onConfigUpdate(untyped_dummy_configs, "1"); + const auto dummy_config = parseDummyConfigFromYaml("a: a dynamic dummy config"); + const auto decoded_resources = TestUtility::decodeResources({dummy_config}, "a"); + subscription.onConfigUpdate(decoded_resources.refvec_, "1"); EXPECT_NE(subscription.getConfig(), nullptr); auto config_ptr = subscription.getConfig(); EXPECT_EQ(typed_provider->config().get(), config_ptr.get()); // Second time issuing the configUpdate(), this time with a duplicate proto. A new // ConfigProvider::Config _should not_ be created. - subscription.onConfigUpdate(untyped_dummy_configs, "2"); + subscription.onConfigUpdate(decoded_resources.refvec_, "2"); EXPECT_EQ(config_ptr, subscription.getConfig()); EXPECT_EQ(typed_provider->config().get(), config_ptr.get()); } @@ -449,13 +448,13 @@ TEST_F(ConfigProviderImplTest, ConfigDump) { ConfigProviderManager::NullOptionalArg()); // Static + dynamic config dump. - Protobuf::RepeatedPtrField untyped_dummy_configs; - untyped_dummy_configs.Add()->PackFrom(parseDummyConfigFromYaml("a: a dynamic dummy config")); + const auto dummy_config = parseDummyConfigFromYaml("a: a dynamic dummy config"); timeSystem().setSystemTime(std::chrono::milliseconds(1234567891567)); DummyConfigSubscription& subscription = dynamic_cast(*dynamic_provider).subscription(); - subscription.onConfigUpdate(untyped_dummy_configs, "v1"); + const auto decoded_resources = TestUtility::decodeResources({dummy_config}, "a"); + subscription.onConfigUpdate(decoded_resources.refvec_, "v1"); message_ptr = server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["dummy"](); const auto& dummy_config_dump3 = @@ -527,7 +526,7 @@ class DeltaDummyConfigSubscription : public DeltaConfigSubscriptionInstance, void start() override {} // Envoy::Config::SubscriptionCallbacks - void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + void onConfigUpdate(const std::vector& resources, const std::string& version_info) override { if (resources.empty()) { return; @@ -537,8 +536,9 @@ class DeltaDummyConfigSubscription : public DeltaConfigSubscriptionInstance, // config proto set (i.e., this is append only). Real xDS APIs will need to track additions, // updates and removals to the config set and apply the diffs to the underlying config // implementations. - for (const auto& resource_any : resources) { - auto dummy_config = TestUtility::anyConvert(resource_any); + for (const auto& resource : resources) { + const auto& dummy_config = + dynamic_cast(resource.get().resource()); proto_map_[version_info] = dummy_config; // Propagate the new config proto to all worker threads. applyConfigUpdate([&dummy_config](ConfigProvider::ConfigConstSharedPtr prev_config) @@ -553,7 +553,7 @@ class DeltaDummyConfigSubscription : public DeltaConfigSubscriptionInstance, ConfigSubscriptionCommonBase::onConfigUpdate(); setLastConfigInfo(absl::optional({absl::nullopt, version_info})); } - void onConfigUpdate(const Protobuf::RepeatedPtrField&, + void onConfigUpdate(const std::vector&, const Protobuf::RepeatedPtrField&, const std::string&) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } @@ -561,10 +561,6 @@ class DeltaDummyConfigSubscription : public DeltaConfigSubscriptionInstance, const EnvoyException*) override { ConfigSubscriptionCommonBase::onConfigUpdateFailed(); } - std::string resourceName(const ProtobufWkt::Any&) override { - return "test.common.config.DummyConfig"; - } - const ProtoMap& protoMap() const { return proto_map_; } private: @@ -689,13 +685,14 @@ TEST_F(DeltaConfigProviderImplTest, MultipleDeltaSubscriptions) { // No config protos have been received via the subscription yet. EXPECT_FALSE(provider1->configProtoInfoVector().has_value()); - Protobuf::RepeatedPtrField untyped_dummy_configs; - untyped_dummy_configs.Add()->PackFrom(parseDummyConfigFromYaml("a: a dummy config")); - untyped_dummy_configs.Add()->PackFrom(parseDummyConfigFromYaml("a: another dummy config")); + const auto dummy_config_0 = parseDummyConfigFromYaml("a: a dummy config"); + const auto dummy_config_1 = parseDummyConfigFromYaml("a: another dummy config"); + const auto decoded_resources = + TestUtility::decodeResources({dummy_config_0, dummy_config_1}, "a"); DeltaDummyConfigSubscription& subscription = dynamic_cast(*provider1).subscription(); - subscription.onConfigUpdate(untyped_dummy_configs, "1"); + subscription.onConfigUpdate(decoded_resources.refvec_, "1"); ConfigProviderPtr provider2 = provider_manager_->createXdsConfigProvider( config_source_proto, server_factory_context_, init_manager_, "dummy_prefix", @@ -716,7 +713,7 @@ TEST_F(DeltaConfigProviderImplTest, MultipleDeltaSubscriptions) { // Issue a second config update to validate that having multiple providers bound to the // subscription causes a single update to the underlying shared config implementation. - subscription.onConfigUpdate(untyped_dummy_configs, "2"); + subscription.onConfigUpdate(decoded_resources.refvec_, "2"); // NOTE: the config implementation is append only and _does not_ track updates/removals to the // config proto set, so the expectation is to double the size of the set. EXPECT_EQ(provider1->config().get(), diff --git a/test/common/config/datasource_test.cc b/test/common/config/datasource_test.cc index 1897c1a867cec..8ef1710b465bc 100644 --- a/test/common/config/datasource_test.cc +++ b/test/common/config/datasource_test.cc @@ -6,19 +6,18 @@ #include "common/protobuf/protobuf.h" #include "test/mocks/event/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/init/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" -using testing::AtLeast; -using testing::NiceMock; -using testing::Return; - namespace Envoy { namespace Config { namespace { +using ::testing::AtLeast; +using ::testing::NiceMock; +using ::testing::Return; class AsyncDataSourceTest : public testing::Test { protected: @@ -29,7 +28,7 @@ class AsyncDataSourceTest : public testing::Test { Init::ExpectableWatcherImpl init_watcher_; Init::TargetHandlePtr init_target_handle_; Api::ApiPtr api_{Api::createApiForTest()}; - NiceMock random_; + NiceMock random_; Event::MockDispatcher dispatcher_; Event::MockTimer* retry_timer_; Event::TimerCb retry_timer_cb_; diff --git a/test/common/config/decoded_resource_impl_test.cc b/test/common/config/decoded_resource_impl_test.cc new file mode 100644 index 0000000000000..938d29611d336 --- /dev/null +++ b/test/common/config/decoded_resource_impl_test.cc @@ -0,0 +1,84 @@ +#include "common/config/decoded_resource_impl.h" + +#include "test/mocks/config/mocks.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +using ::testing::InvokeWithoutArgs; +using ::testing::Return; + +namespace Envoy { +namespace Config { +namespace { + +TEST(DecodedResourceImplTest, All) { + MockOpaqueResourceDecoder resource_decoder; + ProtobufWkt::Any some_opaque_resource; + some_opaque_resource.set_type_url("some_type_url"); + + { + EXPECT_CALL(resource_decoder, decodeResource(ProtoEq(some_opaque_resource))) + .WillOnce(InvokeWithoutArgs( + []() -> ProtobufTypes::MessagePtr { return std::make_unique(); })); + EXPECT_CALL(resource_decoder, resourceName(ProtoEq(ProtobufWkt::Empty()))) + .WillOnce(Return("some_name")); + DecodedResourceImpl decoded_resource(resource_decoder, some_opaque_resource, "foo"); + EXPECT_EQ("some_name", decoded_resource.name()); + EXPECT_TRUE(decoded_resource.aliases().empty()); + EXPECT_EQ("foo", decoded_resource.version()); + EXPECT_THAT(decoded_resource.resource(), ProtoEq(ProtobufWkt::Empty())); + EXPECT_TRUE(decoded_resource.hasResource()); + } + + { + envoy::service::discovery::v3::Resource resource_wrapper; + resource_wrapper.set_name("real_name"); + resource_wrapper.add_aliases("bar"); + resource_wrapper.add_aliases("baz"); + resource_wrapper.mutable_resource()->MergeFrom(some_opaque_resource); + resource_wrapper.set_version("foo"); + EXPECT_CALL(resource_decoder, decodeResource(ProtoEq(some_opaque_resource))) + .WillOnce(InvokeWithoutArgs( + []() -> ProtobufTypes::MessagePtr { return std::make_unique(); })); + EXPECT_CALL(resource_decoder, resourceName(ProtoEq(ProtobufWkt::Empty()))).Times(0); + DecodedResourceImpl decoded_resource(resource_decoder, resource_wrapper); + EXPECT_EQ("real_name", decoded_resource.name()); + EXPECT_EQ((std::vector{"bar", "baz"}), decoded_resource.aliases()); + EXPECT_EQ("foo", decoded_resource.version()); + EXPECT_THAT(decoded_resource.resource(), ProtoEq(ProtobufWkt::Empty())); + EXPECT_TRUE(decoded_resource.hasResource()); + } + + { + envoy::service::discovery::v3::Resource resource_wrapper; + resource_wrapper.set_name("real_name"); + resource_wrapper.set_version("foo"); + resource_wrapper.add_aliases("bar"); + resource_wrapper.add_aliases("baz"); + EXPECT_CALL(resource_decoder, decodeResource(ProtoEq(ProtobufWkt::Any()))) + .WillOnce(InvokeWithoutArgs( + []() -> ProtobufTypes::MessagePtr { return std::make_unique(); })); + EXPECT_CALL(resource_decoder, resourceName(_)).Times(0); + DecodedResourceImpl decoded_resource(resource_decoder, resource_wrapper); + EXPECT_EQ("real_name", decoded_resource.name()); + EXPECT_EQ((std::vector{"bar", "baz"}), decoded_resource.aliases()); + EXPECT_EQ("foo", decoded_resource.version()); + EXPECT_THAT(decoded_resource.resource(), ProtoEq(ProtobufWkt::Empty())); + EXPECT_FALSE(decoded_resource.hasResource()); + } + + { + auto message = std::make_unique(); + DecodedResourceImpl decoded_resource(std::move(message), "real_name", {"bar", "baz"}, "foo"); + EXPECT_EQ("real_name", decoded_resource.name()); + EXPECT_EQ((std::vector{"bar", "baz"}), decoded_resource.aliases()); + EXPECT_EQ("foo", decoded_resource.version()); + EXPECT_THAT(decoded_resource.resource(), ProtoEq(ProtobufWkt::Empty())); + EXPECT_TRUE(decoded_resource.hasResource()); + } +} + +} // namespace +} // namespace Config +} // namespace Envoy diff --git a/test/common/config/delta_subscription_impl_test.cc b/test/common/config/delta_subscription_impl_test.cc index a13cb22ae28db..9633eb08d34d6 100644 --- a/test/common/config/delta_subscription_impl_test.cc +++ b/test/common/config/delta_subscription_impl_test.cc @@ -42,7 +42,10 @@ TEST_F(DeltaSubscriptionImplTest, UpdateResourcesCausesRequest) { // can be sent, not just with pausing: rate limiting or a down gRPC stream would also do it). TEST_F(DeltaSubscriptionImplTest, PauseHoldsRequest) { startSubscription({"name1", "name2", "name3"}); - subscription_->pause(); + auto resume_sub = subscription_->pause(); + // If nested pause wasn't handled correctly, the single expectedSendMessage below would be + // insufficient. + auto nested_resume_sub = subscription_->pause(); expectSendMessage({"name4"}, {"name1", "name2"}, Grpc::Status::WellKnownGrpcStatus::Ok, "", {}); // If not for the pause, these updates would make the expectSendMessage fail due to too many @@ -52,8 +55,6 @@ TEST_F(DeltaSubscriptionImplTest, PauseHoldsRequest) { subscription_->updateResourceInterest({"name3", "name4"}); subscription_->updateResourceInterest({"name1", "name2", "name3", "name4"}); subscription_->updateResourceInterest({"name3", "name4"}); - - subscription_->resume(); } TEST_F(DeltaSubscriptionImplTest, ResponseCausesAck) { @@ -65,7 +66,7 @@ TEST_F(DeltaSubscriptionImplTest, ResponseCausesAck) { // resume, *all* ACKs that arrived during the pause are sent (in order). TEST_F(DeltaSubscriptionImplTest, PauseQueuesAcks) { startSubscription({"name1", "name2", "name3"}); - subscription_->pause(); + auto resume_sub = subscription_->pause(); // The server gives us our first version of resource name1. // subscription_ now wants to ACK name1 (but can't due to pause). { @@ -78,7 +79,7 @@ TEST_F(DeltaSubscriptionImplTest, PauseQueuesAcks) { message->set_type_url(Config::TypeUrl::get().ClusterLoadAssignment); nonce_acks_required_.push(nonce); static_cast(subscription_->grpcMux().get()) - ->onDiscoveryResponse(std::move(message)); + ->onDiscoveryResponse(std::move(message), control_plane_stats_); } // The server gives us our first version of resource name2. // subscription_ now wants to ACK name1 and then name2 (but can't due to pause). @@ -92,7 +93,7 @@ TEST_F(DeltaSubscriptionImplTest, PauseQueuesAcks) { message->set_type_url(Config::TypeUrl::get().ClusterLoadAssignment); nonce_acks_required_.push(nonce); static_cast(subscription_->grpcMux().get()) - ->onDiscoveryResponse(std::move(message)); + ->onDiscoveryResponse(std::move(message), control_plane_stats_); } // The server gives us an updated version of resource name1. // subscription_ now wants to ACK name1A, then name2, then name1B (but can't due to pause). @@ -106,7 +107,7 @@ TEST_F(DeltaSubscriptionImplTest, PauseQueuesAcks) { message->set_type_url(Config::TypeUrl::get().ClusterLoadAssignment); nonce_acks_required_.push(nonce); static_cast(subscription_->grpcMux().get()) - ->onDiscoveryResponse(std::move(message)); + ->onDiscoveryResponse(std::move(message), control_plane_stats_); } // All ACK sendMessage()s will happen upon calling resume(). EXPECT_CALL(async_stream_, sendMessageRaw_(_, _)) @@ -118,7 +119,6 @@ TEST_F(DeltaSubscriptionImplTest, PauseQueuesAcks) { nonce_acks_sent_.push(nonce); } })); - subscription_->resume(); // DeltaSubscriptionTestHarness's dtor will check that all ACKs were sent with the correct nonces, // in the correct order. } @@ -133,22 +133,23 @@ TEST(DeltaSubscriptionImplFixturelessTest, NoGrpcStream) { EXPECT_CALL(local_info, node()).WillRepeatedly(testing::ReturnRef(node)); NiceMock dispatcher; - NiceMock random; + NiceMock random; Envoy::Config::RateLimitSettings rate_limit_settings; NiceMock callbacks; + NiceMock resource_decoder; auto* async_client = new Grpc::MockAsyncClient(); const Protobuf::MethodDescriptor* method_descriptor = Protobuf::DescriptorPool::generated_pool()->FindMethodByName( "envoy.api.v2.EndpointDiscoveryService.StreamEndpoints"); - std::shared_ptr xds_context = std::make_shared( + NewGrpcMuxImplSharedPtr xds_context = std::make_shared( std::unique_ptr(async_client), dispatcher, *method_descriptor, envoy::config::core::v3::ApiVersion::AUTO, random, stats_store, rate_limit_settings, local_info); - std::unique_ptr subscription = std::make_unique( - xds_context, callbacks, stats, Config::TypeUrl::get().ClusterLoadAssignment, dispatcher, - std::chrono::milliseconds(12345), false); + GrpcSubscriptionImplPtr subscription = std::make_unique( + xds_context, callbacks, resource_decoder, stats, Config::TypeUrl::get().ClusterLoadAssignment, + dispatcher, std::chrono::milliseconds(12345), false); EXPECT_CALL(*async_client, startRaw(_, _, _, _)).WillOnce(Return(nullptr)); diff --git a/test/common/config/delta_subscription_state_test.cc b/test/common/config/delta_subscription_state_test.cc index 474172e3c9a41..554ebe3884df0 100644 --- a/test/common/config/delta_subscription_state_test.cc +++ b/test/common/config/delta_subscription_state_test.cc @@ -61,7 +61,7 @@ class DeltaSubscriptionStateTest : public testing::Test { return state_.handleResponse(message); } - NiceMock callbacks_; + NiceMock callbacks_; NiceMock local_info_; NiceMock dispatcher_; // We start out interested in three resources: name1, name2, and name3. diff --git a/test/common/config/delta_subscription_test_harness.h b/test/common/config/delta_subscription_test_harness.h index c52a7fd1a2d10..04fca753ab056 100644 --- a/test/common/config/delta_subscription_test_harness.h +++ b/test/common/config/delta_subscription_test_harness.h @@ -4,6 +4,7 @@ #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.validate.h" #include "envoy/service/discovery/v3/discovery.pb.h" #include "common/config/grpc_subscription_impl.h" @@ -12,11 +13,11 @@ #include "common/grpc/common.h" #include "test/common/config/subscription_test_harness.h" +#include "test/mocks/common.h" #include "test/mocks/config/mocks.h" #include "test/mocks/event/mocks.h" #include "test/mocks/grpc/mocks.h" #include "test/mocks/local_info/mocks.h" -#include "test/mocks/runtime/mocks.h" #include "test/mocks/stats/mocks.h" #include "gmock/gmock.h" @@ -45,8 +46,8 @@ class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { envoy::config::core::v3::ApiVersion::AUTO, random_, stats_store_, rate_limit_settings_, local_info_); subscription_ = std::make_unique( - xds_context_, callbacks_, stats_, Config::TypeUrl::get().ClusterLoadAssignment, dispatcher_, - init_fetch_timeout, false); + xds_context_, callbacks_, resource_decoder_, stats_, + Config::TypeUrl::get().ClusterLoadAssignment, dispatcher_, init_fetch_timeout, false); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); } @@ -156,7 +157,7 @@ class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { expectSendMessage({}, {}, Grpc::Status::WellKnownGrpcStatus::Internal, "bad config", {}); } static_cast(subscription_->grpcMux().get()) - ->onDiscoveryResponse(std::move(response)); + ->onDiscoveryResponse(std::move(response), control_plane_stats_); Mock::VerifyAndClearExpectations(&async_stream_); } @@ -193,17 +194,19 @@ class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { const Protobuf::MethodDescriptor* method_descriptor_; Grpc::MockAsyncClient* async_client_; Event::MockDispatcher dispatcher_; - NiceMock random_; + NiceMock random_; NiceMock local_info_; Grpc::MockAsyncStream async_stream_; - std::shared_ptr xds_context_; - std::unique_ptr subscription_; + NewGrpcMuxImplSharedPtr xds_context_; + GrpcSubscriptionImplPtr subscription_; std::string last_response_nonce_; std::set last_cluster_names_; Envoy::Config::RateLimitSettings rate_limit_settings_; Event::MockTimer* init_timeout_timer_; envoy::config::core::v3::Node node_; NiceMock callbacks_; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder_{"cluster_name"}; std::queue nonce_acks_required_; std::queue nonce_acks_sent_; bool subscription_started_{}; diff --git a/test/common/config/filesystem_subscription_impl_test.cc b/test/common/config/filesystem_subscription_impl_test.cc index ddbf73b72d6cb..cee04cea212a4 100644 --- a/test/common/config/filesystem_subscription_impl_test.cc +++ b/test/common/config/filesystem_subscription_impl_test.cc @@ -21,20 +21,20 @@ class FilesystemSubscriptionImplTest : public testing::Test, // Validate that the client can recover from bad JSON responses. TEST_F(FilesystemSubscriptionImplTest, BadJsonRecovery) { startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); EXPECT_CALL(callbacks_, onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, _)); updateFile(";!@#badjso n"); - EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0)); + EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0, "")); deliverConfigUpdate({"cluster0", "cluster1"}, "0", true); - EXPECT_TRUE(statsAre(3, 1, 0, 1, 0, TEST_TIME_MILLIS, 7148434200721666028)); + EXPECT_TRUE(statsAre(3, 1, 0, 1, 0, TEST_TIME_MILLIS, 7148434200721666028, "0")); } // Validate that a file that is initially available results in a successful update. TEST_F(FilesystemSubscriptionImplTest, InitialFile) { updateFile("{\"versionInfo\": \"0\", \"resources\": []}", false); startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); + EXPECT_TRUE(statsAre(1, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, "0")); } // Validate that if we fail to set a watch, we get a sensible warning. @@ -48,8 +48,10 @@ TEST(MiscFilesystemSubscriptionImplTest, BadWatch) { EXPECT_CALL(dispatcher, createFilesystemWatcher_()).WillOnce(Return(watcher)); EXPECT_CALL(*watcher, addWatch(_, _, _)).WillOnce(Throw(EnvoyException("bad path"))); NiceMock callbacks; + NiceMock resource_decoder; EXPECT_THROW_WITH_MESSAGE(FilesystemSubscriptionImpl(dispatcher, "##!@/dev/null", callbacks, - stats, validation_visitor, *api), + resource_decoder, stats, validation_visitor, + *api), EnvoyException, "bad path"); } @@ -57,24 +59,24 @@ TEST(MiscFilesystemSubscriptionImplTest, BadWatch) { // rejected. TEST_F(FilesystemSubscriptionImplTest, UpdateTimeNotChangedOnUpdateReject) { startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); EXPECT_CALL(callbacks_, onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, _)); updateFile(";!@#badjso n"); - EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0)); + EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0, "")); } // Validate that the update_time statistic is changed after a trivial configuration update // (update that resulted in no change). TEST_F(FilesystemSubscriptionImplTest, UpdateTimeChangedOnUpdateSuccess) { startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); deliverConfigUpdate({"cluster0", "cluster1"}, "0", true); - EXPECT_TRUE(statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); + EXPECT_TRUE(statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, "0")); // Advance the simulated time. simTime().setSystemTime(SystemTime(std::chrono::milliseconds(TEST_TIME_MILLIS + 1))); deliverConfigUpdate({"cluster0", "cluster1"}, "0", true); - EXPECT_TRUE(statsAre(3, 2, 0, 0, 0, TEST_TIME_MILLIS + 1, 7148434200721666028)); + EXPECT_TRUE(statsAre(3, 2, 0, 0, 0, TEST_TIME_MILLIS + 1, 7148434200721666028, "0")); } } // namespace diff --git a/test/common/config/filesystem_subscription_test_harness.h b/test/common/config/filesystem_subscription_test_harness.h index 08de45f776b19..d8d721eb060a2 100644 --- a/test/common/config/filesystem_subscription_test_harness.h +++ b/test/common/config/filesystem_subscription_test_harness.h @@ -3,6 +3,7 @@ #include #include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.validate.h" #include "envoy/service/discovery/v3/discovery.pb.h" #include "common/config/filesystem_subscription_impl.h" @@ -31,7 +32,8 @@ class FilesystemSubscriptionTestHarness : public SubscriptionTestHarness { : path_(TestEnvironment::temporaryPath("eds.json")), api_(Api::createApiForTest(stats_store_, simTime())), dispatcher_(api_->allocateDispatcher("test_thread")), - subscription_(*dispatcher_, path_, callbacks_, stats_, validation_visitor_, *api_) {} + subscription_(*dispatcher_, path_, callbacks_, resource_decoder_, stats_, + validation_visitor_, *api_) {} ~FilesystemSubscriptionTestHarness() override { TestEnvironment::removePath(path_); } @@ -74,7 +76,10 @@ class FilesystemSubscriptionTestHarness : public SubscriptionTestHarness { file_json += "]}"; envoy::service::discovery::v3::DiscoveryResponse response_pb; TestUtility::loadFromJson(file_json, response_pb); - EXPECT_CALL(callbacks_, onConfigUpdate(RepeatedProtoEq(response_pb.resources()), version)) + const auto decoded_resources = + TestUtility::decodeResources( + response_pb, "cluster_name"); + EXPECT_CALL(callbacks_, onConfigUpdate(DecodedResourcesEq(decoded_resources.refvec_), version)) .WillOnce(ThrowOnRejectedConfig(accept)); if (accept) { version_ = version; @@ -85,12 +90,12 @@ class FilesystemSubscriptionTestHarness : public SubscriptionTestHarness { } AssertionResult statsAre(uint32_t attempt, uint32_t success, uint32_t rejected, uint32_t failure, - uint32_t init_fetch_timeout, uint64_t update_time, - uint64_t version) override { + uint32_t init_fetch_timeout, uint64_t update_time, uint64_t version, + absl::string_view version_text) override { // The first attempt always fail unless there was a file there to begin with. return SubscriptionTestHarness::statsAre(attempt, success, rejected, failure + (file_at_start_ ? 0 : 1), init_fetch_timeout, - update_time, version); + update_time, version, version_text); } void expectConfigUpdateFailed() override { stats_.update_failure_.inc(); } @@ -114,6 +119,8 @@ class FilesystemSubscriptionTestHarness : public SubscriptionTestHarness { Api::ApiPtr api_; Event::DispatcherPtr dispatcher_; NiceMock callbacks_; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder_{"cluster_name"}; FilesystemSubscriptionImpl subscription_; bool file_at_start_{false}; }; diff --git a/test/common/config/grpc_mux_impl_test.cc b/test/common/config/grpc_mux_impl_test.cc index 4f6abbc893c2a..5a8bd21840dba 100644 --- a/test/common/config/grpc_mux_impl_test.cc +++ b/test/common/config/grpc_mux_impl_test.cc @@ -2,6 +2,7 @@ #include "envoy/api/v2/discovery.pb.h" #include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.validate.h" #include "envoy/service/discovery/v3/discovery.pb.h" #include "common/common/empty_string.h" @@ -49,7 +50,11 @@ class GrpcMuxImplTestBase : public testing::Test { GrpcMuxImplTestBase() : async_client_(new Grpc::MockAsyncClient()), control_plane_connected_state_( - stats_.gauge("control_plane.connected_state", Stats::Gauge::ImportMode::NeverImport)) {} + stats_.gauge("control_plane.connected_state", Stats::Gauge::ImportMode::NeverImport)), + control_plane_pending_requests_( + stats_.gauge("control_plane.pending_requests", Stats::Gauge::ImportMode::NeverImport)) + + {} void setup() { grpc_mux_ = std::make_unique( @@ -94,15 +99,17 @@ class GrpcMuxImplTestBase : public testing::Test { } NiceMock dispatcher_; - NiceMock random_; + NiceMock random_; Grpc::MockAsyncClient* async_client_; Grpc::MockAsyncStream async_stream_; - std::unique_ptr grpc_mux_; + GrpcMuxImplPtr grpc_mux_; NiceMock callbacks_; + NiceMock resource_decoder_; NiceMock local_info_; Stats::TestUtil::TestStore stats_; Envoy::Config::RateLimitSettings rate_limit_settings_; Stats::Gauge& control_plane_connected_state_; + Stats::Gauge& control_plane_pending_requests_; }; class GrpcMuxImplTest : public GrpcMuxImplTestBase { @@ -115,17 +122,17 @@ class GrpcMuxImplTest : public GrpcMuxImplTestBase { TEST_F(GrpcMuxImplTest, MultipleTypeUrlStreams) { setup(); InSequence s; - auto foo_sub = grpc_mux_->addWatch("foo", {"x", "y"}, callbacks_); - auto bar_sub = grpc_mux_->addWatch("bar", {}, callbacks_); + auto foo_sub = grpc_mux_->addWatch("foo", {"x", "y"}, callbacks_, resource_decoder_); + auto bar_sub = grpc_mux_->addWatch("bar", {}, callbacks_, resource_decoder_); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); expectSendMessage("foo", {"x", "y"}, "", true); expectSendMessage("bar", {}, ""); grpc_mux_->start(); EXPECT_EQ(1, control_plane_connected_state_.value()); expectSendMessage("bar", {"z"}, ""); - auto bar_z_sub = grpc_mux_->addWatch("bar", {"z"}, callbacks_); + auto bar_z_sub = grpc_mux_->addWatch("bar", {"z"}, callbacks_, resource_decoder_); expectSendMessage("bar", {"zz", "z"}, ""); - auto bar_zz_sub = grpc_mux_->addWatch("bar", {"zz"}, callbacks_); + auto bar_zz_sub = grpc_mux_->addWatch("bar", {"zz"}, callbacks_, resource_decoder_); expectSendMessage("bar", {"z"}, ""); expectSendMessage("bar", {}, ""); expectSendMessage("foo", {}, ""); @@ -145,9 +152,9 @@ TEST_F(GrpcMuxImplTest, ResetStream) { })); setup(); - auto foo_sub = grpc_mux_->addWatch("foo", {"x", "y"}, callbacks_); - auto bar_sub = grpc_mux_->addWatch("bar", {}, callbacks_); - auto baz_sub = grpc_mux_->addWatch("baz", {"z"}, callbacks_); + auto foo_sub = grpc_mux_->addWatch("foo", {"x", "y"}, callbacks_, resource_decoder_); + auto bar_sub = grpc_mux_->addWatch("bar", {}, callbacks_, resource_decoder_); + auto baz_sub = grpc_mux_->addWatch("baz", {"z"}, callbacks_, resource_decoder_); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); expectSendMessage("foo", {"x", "y"}, "", true); expectSendMessage("bar", {}, ""); @@ -162,6 +169,7 @@ TEST_F(GrpcMuxImplTest, ResetStream) { EXPECT_CALL(*timer, enableTimer(_, _)); grpc_mux_->grpcStreamForTest().onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Canceled, ""); EXPECT_EQ(0, control_plane_connected_state_.value()); + EXPECT_EQ(0, control_plane_pending_requests_.value()); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); expectSendMessage("foo", {"x", "y"}, "", true); expectSendMessage("bar", {}, ""); @@ -176,21 +184,34 @@ TEST_F(GrpcMuxImplTest, ResetStream) { TEST_F(GrpcMuxImplTest, PauseResume) { setup(); InSequence s; - auto foo_sub = grpc_mux_->addWatch("foo", {"x", "y"}, callbacks_); - grpc_mux_->pause("foo"); - EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); - grpc_mux_->start(); - expectSendMessage("foo", {"x", "y"}, "", true); - grpc_mux_->resume("foo"); - grpc_mux_->pause("bar"); - expectSendMessage("foo", {"z", "x", "y"}, ""); - auto foo_z_sub = grpc_mux_->addWatch("foo", {"z"}, callbacks_); - grpc_mux_->resume("bar"); - grpc_mux_->pause("foo"); - auto foo_zz_sub = grpc_mux_->addWatch("foo", {"zz"}, callbacks_); - expectSendMessage("foo", {"zz", "z", "x", "y"}, ""); - grpc_mux_->resume("foo"); - grpc_mux_->pause("foo"); + GrpcMuxWatchPtr foo_sub; + GrpcMuxWatchPtr foo_z_sub; + GrpcMuxWatchPtr foo_zz_sub; + foo_sub = grpc_mux_->addWatch("foo", {"x", "y"}, callbacks_, resource_decoder_); + { + ScopedResume a = grpc_mux_->pause("foo"); + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + grpc_mux_->start(); + expectSendMessage("foo", {"x", "y"}, "", true); + } + { + ScopedResume a = grpc_mux_->pause("bar"); + expectSendMessage("foo", {"z", "x", "y"}, ""); + foo_z_sub = grpc_mux_->addWatch("foo", {"z"}, callbacks_, resource_decoder_); + } + { + ScopedResume a = grpc_mux_->pause("foo"); + foo_zz_sub = grpc_mux_->addWatch("foo", {"zz"}, callbacks_, resource_decoder_); + expectSendMessage("foo", {"zz", "z", "x", "y"}, ""); + } + // When nesting, we only have a single resumption. + { + ScopedResume a = grpc_mux_->pause("foo"); + ScopedResume b = grpc_mux_->pause("foo"); + foo_zz_sub = grpc_mux_->addWatch("foo", {"zz"}, callbacks_, resource_decoder_); + expectSendMessage("foo", {"zz", "z", "x", "y"}, ""); + } + grpc_mux_->pause("foo")->cancel(); } // Validate behavior when type URL mismatches occur. @@ -199,7 +220,7 @@ TEST_F(GrpcMuxImplTest, TypeUrlMismatch) { auto invalid_response = std::make_unique(); InSequence s; - auto foo_sub = grpc_mux_->addWatch("foo", {"x", "y"}, callbacks_); + auto foo_sub = grpc_mux_->addWatch("foo", {"x", "y"}, callbacks_, resource_decoder_); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); expectSendMessage("foo", {"x", "y"}, "", true); @@ -234,7 +255,7 @@ TEST_F(GrpcMuxImplTest, RpcErrorMessageTruncated) { setup(); auto invalid_response = std::make_unique(); InSequence s; - auto foo_sub = grpc_mux_->addWatch("foo", {"x", "y"}, callbacks_); + auto foo_sub = grpc_mux_->addWatch("foo", {"x", "y"}, callbacks_, resource_decoder_); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); expectSendMessage("foo", {"x", "y"}, "", true); @@ -266,7 +287,9 @@ TEST_F(GrpcMuxImplTest, WildcardWatch) { InSequence s; const std::string& type_url = Config::TypeUrl::get().ClusterLoadAssignment; - auto foo_sub = grpc_mux_->addWatch(type_url, {}, callbacks_); + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder("cluster_name"); + auto foo_sub = grpc_mux_->addWatch(type_url, {}, callbacks_, resource_decoder); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); expectSendMessage(type_url, {}, "", true); grpc_mux_->start(); @@ -279,15 +302,14 @@ TEST_F(GrpcMuxImplTest, WildcardWatch) { load_assignment.set_cluster_name("x"); response->add_resources()->PackFrom(API_DOWNGRADE(load_assignment)); EXPECT_CALL(callbacks_, onConfigUpdate(_, "1")) - .WillOnce( - Invoke([&load_assignment](const Protobuf::RepeatedPtrField& resources, - const std::string&) { - EXPECT_EQ(1, resources.size()); - envoy::config::endpoint::v3::ClusterLoadAssignment expected_assignment = - MessageUtil::anyConvert( - resources[0]); - EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment)); - })); + .WillOnce(Invoke([&load_assignment](const std::vector& resources, + const std::string&) { + EXPECT_EQ(1, resources.size()); + const auto& expected_assignment = + dynamic_cast( + resources[0].get().resource()); + EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment)); + })); expectSendMessage(type_url, {}, "1"); grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); } @@ -297,11 +319,13 @@ TEST_F(GrpcMuxImplTest, WildcardWatch) { TEST_F(GrpcMuxImplTest, WatchDemux) { setup(); InSequence s; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder("cluster_name"); const std::string& type_url = Config::TypeUrl::get().ClusterLoadAssignment; NiceMock foo_callbacks; - auto foo_sub = grpc_mux_->addWatch(type_url, {"x", "y"}, foo_callbacks); + auto foo_sub = grpc_mux_->addWatch(type_url, {"x", "y"}, foo_callbacks, resource_decoder); NiceMock bar_callbacks; - auto bar_sub = grpc_mux_->addWatch(type_url, {"y", "z"}, bar_callbacks); + auto bar_sub = grpc_mux_->addWatch(type_url, {"y", "z"}, bar_callbacks, resource_decoder); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); // Should dedupe the "x" resource. expectSendMessage(type_url, {"y", "z", "x"}, "", true); @@ -316,15 +340,14 @@ TEST_F(GrpcMuxImplTest, WatchDemux) { response->add_resources()->PackFrom(API_DOWNGRADE(load_assignment)); EXPECT_CALL(bar_callbacks, onConfigUpdate(_, "1")).Times(0); EXPECT_CALL(foo_callbacks, onConfigUpdate(_, "1")) - .WillOnce( - Invoke([&load_assignment](const Protobuf::RepeatedPtrField& resources, - const std::string&) { - EXPECT_EQ(1, resources.size()); - envoy::config::endpoint::v3::ClusterLoadAssignment expected_assignment = - MessageUtil::anyConvert( - resources[0]); - EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment)); - })); + .WillOnce(Invoke([&load_assignment](const std::vector& resources, + const std::string&) { + EXPECT_EQ(1, resources.size()); + const auto& expected_assignment = + dynamic_cast( + resources[0].get().resource()); + EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment)); + })); expectSendMessage(type_url, {"y", "z", "x"}, "1"); grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); } @@ -343,33 +366,31 @@ TEST_F(GrpcMuxImplTest, WatchDemux) { load_assignment_z.set_cluster_name("z"); response->add_resources()->PackFrom(API_DOWNGRADE(load_assignment_z)); EXPECT_CALL(bar_callbacks, onConfigUpdate(_, "2")) - .WillOnce(Invoke( - [&load_assignment_y, &load_assignment_z]( - const Protobuf::RepeatedPtrField& resources, const std::string&) { - EXPECT_EQ(2, resources.size()); - envoy::config::endpoint::v3::ClusterLoadAssignment expected_assignment = - MessageUtil::anyConvert( - resources[0]); - EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment_y)); - expected_assignment = - MessageUtil::anyConvert( - resources[1]); - EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment_z)); - })); + .WillOnce(Invoke([&load_assignment_y, &load_assignment_z]( + const std::vector& resources, const std::string&) { + EXPECT_EQ(2, resources.size()); + const auto& expected_assignment = + dynamic_cast( + resources[0].get().resource()); + EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment_y)); + const auto& expected_assignment_1 = + dynamic_cast( + resources[1].get().resource()); + EXPECT_TRUE(TestUtility::protoEqual(expected_assignment_1, load_assignment_z)); + })); EXPECT_CALL(foo_callbacks, onConfigUpdate(_, "2")) - .WillOnce(Invoke( - [&load_assignment_x, &load_assignment_y]( - const Protobuf::RepeatedPtrField& resources, const std::string&) { - EXPECT_EQ(2, resources.size()); - envoy::config::endpoint::v3::ClusterLoadAssignment expected_assignment = - MessageUtil::anyConvert( - resources[0]); - EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment_x)); - expected_assignment = - MessageUtil::anyConvert( - resources[1]); - EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment_y)); - })); + .WillOnce(Invoke([&load_assignment_x, &load_assignment_y]( + const std::vector& resources, const std::string&) { + EXPECT_EQ(2, resources.size()); + const auto& expected_assignment = + dynamic_cast( + resources[0].get().resource()); + EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment_x)); + const auto& expected_assignment_1 = + dynamic_cast( + resources[1].get().resource()); + EXPECT_TRUE(TestUtility::protoEqual(expected_assignment_1, load_assignment_y)); + })); expectSendMessage(type_url, {"y", "z", "x"}, "2"); grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); } @@ -384,7 +405,7 @@ TEST_F(GrpcMuxImplTest, MultipleWatcherWithEmptyUpdates) { InSequence s; const std::string& type_url = Config::TypeUrl::get().ClusterLoadAssignment; NiceMock foo_callbacks; - auto foo_sub = grpc_mux_->addWatch(type_url, {"x", "y"}, foo_callbacks); + auto foo_sub = grpc_mux_->addWatch(type_url, {"x", "y"}, foo_callbacks, resource_decoder_); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); expectSendMessage(type_url, {"x", "y"}, "", true); @@ -406,7 +427,7 @@ TEST_F(GrpcMuxImplTest, SingleWatcherWithEmptyUpdates) { setup(); const std::string& type_url = Config::TypeUrl::get().Cluster; NiceMock foo_callbacks; - auto foo_sub = grpc_mux_->addWatch(type_url, {}, foo_callbacks); + auto foo_sub = grpc_mux_->addWatch(type_url, {}, foo_callbacks, resource_decoder_); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); expectSendMessage(type_url, {}, "", true); @@ -417,8 +438,9 @@ TEST_F(GrpcMuxImplTest, SingleWatcherWithEmptyUpdates) { response->set_version_info("1"); // Validate that onConfigUpdate is called with empty resources. EXPECT_CALL(foo_callbacks, onConfigUpdate(_, "1")) - .WillOnce(Invoke([](const Protobuf::RepeatedPtrField& resources, - const std::string&) { EXPECT_TRUE(resources.empty()); })); + .WillOnce(Invoke([](const std::vector& resources, const std::string&) { + EXPECT_TRUE(resources.empty()); + })); expectSendMessage(type_url, {}, "1"); grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); } @@ -460,7 +482,7 @@ TEST_F(GrpcMuxImplTestWithMockTimeSystem, TooManyRequestsWithDefaultSettings) { } }; - auto foo_sub = grpc_mux_->addWatch("foo", {"x"}, callbacks_); + auto foo_sub = grpc_mux_->addWatch("foo", {"x"}, callbacks_, resource_decoder_); expectSendMessage("foo", {"x"}, "", true); grpc_mux_->start(); @@ -473,12 +495,13 @@ TEST_F(GrpcMuxImplTestWithMockTimeSystem, TooManyRequestsWithDefaultSettings) { } // Verifies that default rate limiting is enforced with empty RateLimitSettings. -TEST_F(GrpcMuxImplTestWithMockTimeSystem, TooManyRequestsWithEmptyRateLimitSettings) { +TEST_F(GrpcMuxImplTest, TooManyRequestsWithEmptyRateLimitSettings) { // Validate that request drain timer is created. Event::MockTimer* timer = nullptr; Event::MockTimer* drain_request_timer = nullptr; Event::TimerCb timer_cb; + Event::TimerCb drain_timer_cb; EXPECT_CALL(dispatcher_, createTimer_(_)) .WillOnce(Invoke([&timer, &timer_cb](Event::TimerCb cb) { timer_cb = cb; @@ -486,20 +509,19 @@ TEST_F(GrpcMuxImplTestWithMockTimeSystem, TooManyRequestsWithEmptyRateLimitSetti timer = new Event::MockTimer(); return timer; })) - .WillOnce(Invoke([&drain_request_timer, &timer_cb](Event::TimerCb cb) { - timer_cb = cb; + .WillOnce(Invoke([&drain_request_timer, &drain_timer_cb](Event::TimerCb cb) { + drain_timer_cb = cb; EXPECT_EQ(nullptr, drain_request_timer); drain_request_timer = new Event::MockTimer(); return drain_request_timer; })); - EXPECT_CALL(*mock_time_system_, monotonicTime()) - .WillRepeatedly(Return(std::chrono::steady_clock::time_point{})); RateLimitSettings custom_rate_limit_settings; custom_rate_limit_settings.enabled_ = true; setup(custom_rate_limit_settings); - EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)).Times(AtLeast(99)); + // Attempt to send 99 messages. One of them is rate limited (and we never drain). + EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)).Times(99); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); const auto onReceiveMessage = [&](uint64_t burst) { @@ -512,17 +534,35 @@ TEST_F(GrpcMuxImplTestWithMockTimeSystem, TooManyRequestsWithEmptyRateLimitSetti } }; - auto foo_sub = grpc_mux_->addWatch("foo", {"x"}, callbacks_); + auto foo_sub = grpc_mux_->addWatch("foo", {"x"}, callbacks_, resource_decoder_); expectSendMessage("foo", {"x"}, "", true); grpc_mux_->start(); // Validate that drain_request_timer is enabled when there are no tokens. EXPECT_CALL(*drain_request_timer, enableTimer(std::chrono::milliseconds(100), _)); - onReceiveMessage(99); - EXPECT_EQ(1, stats_.counter("control_plane.rate_limit_enforced").value()); - EXPECT_EQ( - 1, - stats_.gauge("control_plane.pending_requests", Stats::Gauge::ImportMode::Accumulate).value()); + // The drain timer enable is checked twice, once when we limit, again when the watch is destroyed. + EXPECT_CALL(*drain_request_timer, enabled()).Times(11); + onReceiveMessage(110); + EXPECT_EQ(11, stats_.counter("control_plane.rate_limit_enforced").value()); + EXPECT_EQ(11, control_plane_pending_requests_.value()); + + // Validate that when we reset a stream with pending requests, it reverts back to the initial + // query (i.e. the queue is discarded). + EXPECT_CALL(callbacks_, + onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, _)); + EXPECT_CALL(random_, random()); + ASSERT_TRUE(timer != nullptr); // initialized from dispatcher mock. + EXPECT_CALL(*timer, enableTimer(_, _)); + grpc_mux_->grpcStreamForTest().onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Canceled, ""); + EXPECT_EQ(11, control_plane_pending_requests_.value()); + EXPECT_EQ(0, control_plane_connected_state_.value()); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)); + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + time_system_.setMonotonicTime(std::chrono::seconds(30)); + timer_cb(); + EXPECT_EQ(0, control_plane_pending_requests_.value()); + // One more message on the way out when the watch is destroyed. + EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)); } // Verifies that rate limiting is enforced with custom RateLimitSettings. @@ -567,7 +607,7 @@ TEST_F(GrpcMuxImplTest, TooManyRequestsWithCustomRateLimitSettings) { } }; - auto foo_sub = grpc_mux_->addWatch("foo", {"x"}, callbacks_); + auto foo_sub = grpc_mux_->addWatch("foo", {"x"}, callbacks_, resource_decoder_); expectSendMessage("foo", {"x"}, "", true); grpc_mux_->start(); @@ -576,20 +616,18 @@ TEST_F(GrpcMuxImplTest, TooManyRequestsWithCustomRateLimitSettings) { EXPECT_EQ(0, stats_.counter("control_plane.rate_limit_enforced").value()); // Validate that drain_request_timer is enabled when there are no tokens. - EXPECT_CALL(*drain_request_timer, enableTimer(std::chrono::milliseconds(500), _)) - .Times(AtLeast(1)); + EXPECT_CALL(*drain_request_timer, enableTimer(std::chrono::milliseconds(500), _)); + EXPECT_CALL(*drain_request_timer, enabled()).Times(11); onReceiveMessage(160); - EXPECT_EQ(12, stats_.counter("control_plane.rate_limit_enforced").value()); - Stats::Gauge& pending_requests = - stats_.gauge("control_plane.pending_requests", Stats::Gauge::ImportMode::Accumulate); - EXPECT_EQ(12, pending_requests.value()); + EXPECT_EQ(11, stats_.counter("control_plane.rate_limit_enforced").value()); + EXPECT_EQ(11, control_plane_pending_requests_.value()); // Validate that drain requests call when there are multiple requests in queue. time_system_.setMonotonicTime(std::chrono::seconds(10)); drain_timer_cb(); // Check that the pending_requests stat is updated with the queue drain. - EXPECT_EQ(0, pending_requests.value()); + EXPECT_EQ(0, control_plane_pending_requests_.value()); } // Verifies that a message with no resources is accepted. @@ -604,7 +642,7 @@ TEST_F(GrpcMuxImplTest, UnwatchedTypeAcceptsEmptyResources) { { // subscribe and unsubscribe to simulate a cluster added and removed expectSendMessage(type_url, {"y"}, "", true); - auto temp_sub = grpc_mux_->addWatch(type_url, {"y"}, callbacks_); + auto temp_sub = grpc_mux_->addWatch(type_url, {"y"}, callbacks_, resource_decoder_); expectSendMessage(type_url, {}, ""); } @@ -624,7 +662,7 @@ TEST_F(GrpcMuxImplTest, UnwatchedTypeAcceptsEmptyResources) { expectSendMessage(type_url, {"x"}, "1", false, "bar"); // simulate a new cluster x is added. add CLA subscription for it. - auto sub = grpc_mux_->addWatch(type_url, {"x"}, callbacks_); + auto sub = grpc_mux_->addWatch(type_url, {"x"}, callbacks_, resource_decoder_); expectSendMessage(type_url, {}, "1", false, "bar"); } @@ -640,7 +678,7 @@ TEST_F(GrpcMuxImplTest, UnwatchedTypeRejectsResources) { // subscribe and unsubscribe (by not keeping the return watch) so that the type is known to envoy expectSendMessage(type_url, {"y"}, "", true); expectSendMessage(type_url, {}, ""); - grpc_mux_->addWatch(type_url, {"y"}, callbacks_); + grpc_mux_->addWatch(type_url, {"y"}, callbacks_, resource_decoder_); // simulate the server sending CLA message to notify envoy that the CLA was added, // even though envoy doesn't expect it. Envoy should reject this update. diff --git a/test/common/config/grpc_stream_test.cc b/test/common/config/grpc_stream_test.cc index 03864161b5f02..3f28cc9691e71 100644 --- a/test/common/config/grpc_stream_test.cc +++ b/test/common/config/grpc_stream_test.cc @@ -33,7 +33,7 @@ class GrpcStreamTest : public testing::Test { NiceMock dispatcher_; Grpc::MockAsyncStream async_stream_; Stats::TestUtil::TestStore stats_; - NiceMock random_; + NiceMock random_; Envoy::Config::RateLimitSettings rate_limit_settings_; NiceMock callbacks_; std::unique_ptr async_client_owner_; @@ -101,11 +101,10 @@ TEST_F(GrpcStreamTest, ReceiveMessage) { response_copy.set_type_url("faketypeURL"); auto response = std::make_unique(response_copy); envoy::service::discovery::v3::DiscoveryResponse received_message; - EXPECT_CALL(callbacks_, onDiscoveryResponse(_)) + EXPECT_CALL(callbacks_, onDiscoveryResponse(_, _)) .WillOnce([&received_message]( - std::unique_ptr&& message) { - received_message = *message; - }); + std::unique_ptr&& message, + ControlPlaneStats&) { received_message = *message; }); grpc_stream_.onReceiveMessage(std::move(response)); EXPECT_TRUE(TestUtility::protoEqual(response_copy, received_message)); } diff --git a/test/common/config/grpc_subscription_impl_test.cc b/test/common/config/grpc_subscription_impl_test.cc index 2036e69bf336e..eb51a0d051d71 100644 --- a/test/common/config/grpc_subscription_impl_test.cc +++ b/test/common/config/grpc_subscription_impl_test.cc @@ -22,7 +22,7 @@ TEST_F(GrpcSubscriptionImplTest, StreamCreationFailure) { EXPECT_CALL(random_, random()); EXPECT_CALL(*timer_, enableTimer(_, _)); subscription_->start({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0)); + EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0, "")); // Ensure this doesn't cause an issue by sending a request, since we don't // have a gRPC stream. subscription_->updateResourceInterest({"cluster2"}); @@ -32,14 +32,14 @@ TEST_F(GrpcSubscriptionImplTest, StreamCreationFailure) { expectSendMessage({"cluster2"}, "", true); timer_cb_(); - EXPECT_TRUE(statsAre(3, 0, 0, 1, 0, 0, 0)); + EXPECT_TRUE(statsAre(3, 0, 0, 1, 0, 0, 0, "")); verifyControlPlaneStats(1); } // Validate that the client can recover from a remote stream closure via retry. TEST_F(GrpcSubscriptionImplTest, RemoteStreamClose) { startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); // onConfigUpdateFailed() should not be called for gRPC stream connection failure EXPECT_CALL(callbacks_, onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, _)) @@ -47,14 +47,14 @@ TEST_F(GrpcSubscriptionImplTest, RemoteStreamClose) { EXPECT_CALL(*timer_, enableTimer(_, _)); EXPECT_CALL(random_, random()); mux_->grpcStreamForTest().onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Canceled, ""); - EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0)); + EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0, "")); verifyControlPlaneStats(0); // Retry and succeed. EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); expectSendMessage({"cluster0", "cluster1"}, "", true); timer_cb_(); - EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0)); + EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0, "")); } // Validate that When the management server gets multiple requests for the same version, it can @@ -62,43 +62,43 @@ TEST_F(GrpcSubscriptionImplTest, RemoteStreamClose) { TEST_F(GrpcSubscriptionImplTest, RepeatedNonce) { InSequence s; startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); // First with the initial, empty version update to "0". updateResourceInterest({"cluster2"}); - EXPECT_TRUE(statsAre(2, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(2, 0, 0, 0, 0, 0, 0, "")); deliverConfigUpdate({"cluster0", "cluster2"}, "0", false); - EXPECT_TRUE(statsAre(3, 0, 1, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(3, 0, 1, 0, 0, 0, 0, "")); deliverConfigUpdate({"cluster0", "cluster2"}, "0", true); - EXPECT_TRUE(statsAre(4, 1, 1, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); + EXPECT_TRUE(statsAre(4, 1, 1, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, "0")); // Now with version "0" update to "1". updateResourceInterest({"cluster3"}); - EXPECT_TRUE(statsAre(5, 1, 1, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); - deliverConfigUpdate({"cluster3"}, "1", false); - EXPECT_TRUE(statsAre(6, 1, 2, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); - deliverConfigUpdate({"cluster3"}, "1", true); - EXPECT_TRUE(statsAre(7, 2, 2, 0, 0, TEST_TIME_MILLIS, 13237225503670494420U)); + EXPECT_TRUE(statsAre(5, 1, 1, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, "0")); + deliverConfigUpdate({"cluster3"}, "42", false); + EXPECT_TRUE(statsAre(6, 1, 2, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, "0")); + deliverConfigUpdate({"cluster3"}, "42", true); + EXPECT_TRUE(statsAre(7, 2, 2, 0, 0, TEST_TIME_MILLIS, 7919287270473417401, "42")); } TEST_F(GrpcSubscriptionImplTest, UpdateTimeNotChangedOnUpdateReject) { InSequence s; startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); deliverConfigUpdate({"cluster0", "cluster2"}, "0", false); - EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0, "")); } TEST_F(GrpcSubscriptionImplTest, UpdateTimeChangedOnUpdateSuccess) { InSequence s; startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); deliverConfigUpdate({"cluster0", "cluster2"}, "0", true); - EXPECT_TRUE(statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); + EXPECT_TRUE(statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, "0")); // Advance the simulated time and verify that a trivial update (no change) also changes the update // time. simTime().setSystemTime(SystemTime(std::chrono::milliseconds(TEST_TIME_MILLIS + 1))); deliverConfigUpdate({"cluster0", "cluster2"}, "0", true); - EXPECT_TRUE(statsAre(2, 2, 0, 0, 0, TEST_TIME_MILLIS + 1, 7148434200721666028)); + EXPECT_TRUE(statsAre(2, 2, 0, 0, 0, TEST_TIME_MILLIS + 1, 7148434200721666028, "0")); } } // namespace diff --git a/test/common/config/grpc_subscription_test_harness.h b/test/common/config/grpc_subscription_test_harness.h index 643009df1b95f..8649dcb01afea 100644 --- a/test/common/config/grpc_subscription_test_harness.h +++ b/test/common/config/grpc_subscription_test_harness.h @@ -5,6 +5,7 @@ #include "envoy/api/v2/discovery.pb.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.validate.h" #include "envoy/service/discovery/v3/discovery.pb.h" #include "common/common/hash.h" @@ -54,8 +55,8 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { *method_descriptor_, envoy::config::core::v3::ApiVersion::AUTO, random_, stats_store_, rate_limit_settings_, true); subscription_ = std::make_unique( - mux_, callbacks_, stats_, Config::TypeUrl::get().ClusterLoadAssignment, dispatcher_, - init_fetch_timeout, false); + mux_, callbacks_, resource_decoder_, stats_, Config::TypeUrl::get().ClusterLoadAssignment, + dispatcher_, init_fetch_timeout, false); } ~GrpcSubscriptionTestHarness() override { EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)); } @@ -105,6 +106,7 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { last_response_nonce_ = std::to_string(HashUtil::xxHash64(version)); response->set_nonce(last_response_nonce_); response->set_type_url(Config::TypeUrl::get().ClusterLoadAssignment); + response->mutable_control_plane()->set_identifier("ground_control_foo123"); Protobuf::RepeatedPtrField typed_resources; for (const auto& cluster : cluster_names) { if (std::find(last_cluster_names_.begin(), last_cluster_names_.end(), cluster) != @@ -114,7 +116,10 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { response->add_resources()->PackFrom(API_DOWNGRADE(*load_assignment)); } } - EXPECT_CALL(callbacks_, onConfigUpdate(RepeatedProtoEq(response->resources()), version)) + const auto decoded_resources = + TestUtility::decodeResources( + *response, "cluster_name"); + EXPECT_CALL(callbacks_, onConfigUpdate(DecodedResourcesEq(decoded_resources.refvec_), version)) .WillOnce(ThrowOnRejectedConfig(accept)); if (accept) { expectSendMessage(last_cluster_names_, version, false); @@ -125,7 +130,8 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { expectSendMessage(last_cluster_names_, version_, false, Grpc::Status::WellKnownGrpcStatus::Internal, "bad config"); } - mux_->onDiscoveryResponse(std::move(response)); + mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); + EXPECT_EQ(control_plane_stats_.identifier_.value(), "ground_control_foo123"); Mock::VerifyAndClearExpectations(&async_stream_); } @@ -172,14 +178,16 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { Grpc::MockAsyncClient* async_client_; NiceMock cm_; Event::MockDispatcher dispatcher_; - Runtime::MockRandomGenerator random_; + Random::MockRandomGenerator random_; Event::MockTimer* timer_; Event::TimerCb timer_cb_; envoy::config::core::v3::Node node_; NiceMock callbacks_; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder_{"cluster_name"}; NiceMock async_stream_; - std::shared_ptr mux_; - std::unique_ptr subscription_; + GrpcMuxImplSharedPtr mux_; + GrpcSubscriptionImplPtr subscription_; std::string last_response_nonce_; std::set last_cluster_names_; NiceMock local_info_; diff --git a/test/common/config/http_subscription_impl_test.cc b/test/common/config/http_subscription_impl_test.cc index d79884ef19158..abda847f03c41 100644 --- a/test/common/config/http_subscription_impl_test.cc +++ b/test/common/config/http_subscription_impl_test.cc @@ -19,11 +19,11 @@ TEST_F(HttpSubscriptionImplTest, OnRequestReset) { onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, _)) .Times(0); http_callbacks_->onFailure(http_request_, Http::AsyncClient::FailureReason::Reset); - EXPECT_TRUE(statsAre(1, 0, 0, 1, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 1, 0, 0, 0, "")); timerTick(); - EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0)); - deliverConfigUpdate({"cluster0", "cluster1"}, "0", true); - EXPECT_TRUE(statsAre(3, 1, 0, 1, 0, TEST_TIME_MILLIS, 7148434200721666028)); + EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0, "")); + deliverConfigUpdate({"cluster0", "cluster1"}, "42", true); + EXPECT_TRUE(statsAre(3, 1, 0, 1, 0, TEST_TIME_MILLIS, 7919287270473417401, "42")); } // Validate that the client can recover from bad JSON responses. @@ -38,48 +38,48 @@ TEST_F(HttpSubscriptionImplTest, BadJsonRecovery) { EXPECT_CALL(callbacks_, onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, _)); http_callbacks_->onSuccess(http_request_, std::move(message)); - EXPECT_TRUE(statsAre(1, 0, 1, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 1, 0, 0, 0, 0, "")); request_in_progress_ = false; timerTick(); - EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0, "")); deliverConfigUpdate({"cluster0", "cluster1"}, "0", true); - EXPECT_TRUE(statsAre(3, 1, 1, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); + EXPECT_TRUE(statsAre(3, 1, 1, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, "0")); } TEST_F(HttpSubscriptionImplTest, ConfigNotModified) { startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); timerTick(); - EXPECT_TRUE(statsAre(2, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(2, 0, 0, 0, 0, 0, 0, "")); // accept and modify. deliverConfigUpdate({"cluster0", "cluster1"}, "0", true, true, "200"); - EXPECT_TRUE(statsAre(3, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); + EXPECT_TRUE(statsAre(3, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, "0")); // accept and does not modify. deliverConfigUpdate({"cluster0", "cluster1"}, "0", true, false, "304"); - EXPECT_TRUE(statsAre(4, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); + EXPECT_TRUE(statsAre(4, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, "0")); } TEST_F(HttpSubscriptionImplTest, UpdateTimeNotChangedOnUpdateReject) { startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); deliverConfigUpdate({"cluster0", "cluster1"}, "0", false); - EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0, "")); } TEST_F(HttpSubscriptionImplTest, UpdateTimeChangedOnUpdateSuccess) { startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); deliverConfigUpdate({"cluster0", "cluster1"}, "0", true); - EXPECT_TRUE(statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); + EXPECT_TRUE(statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, "0")); // Advance the simulated time and verify that a trivial update (no change) also changes the update // time. simTime().setSystemTime(SystemTime(std::chrono::milliseconds(TEST_TIME_MILLIS + 1))); deliverConfigUpdate({"cluster0", "cluster1"}, "0", true); - EXPECT_TRUE(statsAre(3, 2, 0, 0, 0, TEST_TIME_MILLIS + 1, 7148434200721666028)); + EXPECT_TRUE(statsAre(3, 2, 0, 0, 0, TEST_TIME_MILLIS + 1, 7148434200721666028, "0")); } } // namespace diff --git a/test/common/config/http_subscription_test_harness.h b/test/common/config/http_subscription_test_harness.h index af798a4efac8f..d0a1dc18fd873 100644 --- a/test/common/config/http_subscription_test_harness.h +++ b/test/common/config/http_subscription_test_harness.h @@ -4,6 +4,7 @@ #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.validate.h" #include "envoy/http/async_client.h" #include "envoy/service/discovery/v3/discovery.pb.h" @@ -51,7 +52,7 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { local_info_, cm_, "eds_cluster", dispatcher_, random_gen_, std::chrono::milliseconds(1), std::chrono::milliseconds(1000), *method_descriptor_, Config::TypeUrl::get().ClusterLoadAssignment, envoy::config::core::v3::ApiVersion::AUTO, - callbacks_, stats_, init_fetch_timeout, validation_visitor_); + callbacks_, resource_decoder_, stats_, init_fetch_timeout, validation_visitor_); } ~HttpSubscriptionTestHarness() override { @@ -70,12 +71,11 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { Http::AsyncClient::Callbacks& callbacks, const Http::AsyncClient::RequestOptions&) { http_callbacks_ = &callbacks; - EXPECT_EQ("POST", std::string(request->headers().Method()->value().getStringView())); + EXPECT_EQ("POST", request->headers().getMethodValue()); EXPECT_EQ(Http::Headers::get().ContentTypeValues.Json, - std::string(request->headers().ContentType()->value().getStringView())); - EXPECT_EQ("eds_cluster", std::string(request->headers().Host()->value().getStringView())); - EXPECT_EQ("/v2/discovery:endpoints", - std::string(request->headers().Path()->value().getStringView())); + request->headers().getContentTypeValue()); + EXPECT_EQ("eds_cluster", request->headers().getHostValue()); + EXPECT_EQ("/v2/discovery:endpoints", request->headers().getPathValue()); std::string expected_request = "{"; if (!version_.empty()) { expected_request += "\"version_info\":\"" + version + "\","; @@ -98,7 +98,7 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { expected_request += "}"; EXPECT_EQ(expected_request, request->bodyAsString()); EXPECT_EQ(fmt::format_int(expected_request.size()).str(), - std::string(request->headers().ContentLength()->value().getStringView())); + request->headers().getContentLengthValue()); request_in_progress_ = true; return &http_request_; })); @@ -140,9 +140,13 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { new Http::TestResponseHeaderMapImpl{{":status", response_code}}}; Http::ResponseMessagePtr message{new Http::ResponseMessageImpl(std::move(response_headers))}; message->body() = std::make_unique(response_json); + const auto decoded_resources = + TestUtility::decodeResources( + response_pb, "cluster_name"); if (modify) { - EXPECT_CALL(callbacks_, onConfigUpdate(RepeatedProtoEq(response_pb.resources()), version)) + EXPECT_CALL(callbacks_, + onConfigUpdate(DecodedResourcesEq(decoded_resources.refvec_), version)) .WillOnce(ThrowOnRejectedConfig(accept)); } if (!accept) { @@ -188,10 +192,12 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { Event::MockTimer* timer_; Event::TimerCb timer_cb_; envoy::config::core::v3::Node node_; - Runtime::MockRandomGenerator random_gen_; + Random::MockRandomGenerator random_gen_; Http::MockAsyncClientRequest http_request_; Http::AsyncClient::Callbacks* http_callbacks_; Config::MockSubscriptionCallbacks callbacks_; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder_{"cluster_name"}; std::unique_ptr subscription_; NiceMock local_info_; Event::MockTimer* init_timeout_timer_; diff --git a/test/common/config/new_grpc_mux_impl_test.cc b/test/common/config/new_grpc_mux_impl_test.cc index 81ecf627bc7d9..a35a38d577256 100644 --- a/test/common/config/new_grpc_mux_impl_test.cc +++ b/test/common/config/new_grpc_mux_impl_test.cc @@ -1,6 +1,7 @@ #include #include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.validate.h" #include "envoy/service/discovery/v3/discovery.pb.h" #include "common/common/empty_string.h" @@ -41,6 +42,7 @@ class NewGrpcMuxImplTestBase : public testing::Test { public: NewGrpcMuxImplTestBase() : async_client_(new Grpc::MockAsyncClient()), + control_plane_stats_(Utility::generateControlPlaneStats(stats_)), control_plane_connected_state_( stats_.gauge("control_plane.connected_state", Stats::Gauge::ImportMode::NeverImport)) {} @@ -54,14 +56,17 @@ class NewGrpcMuxImplTestBase : public testing::Test { } NiceMock dispatcher_; - NiceMock random_; + NiceMock random_; Grpc::MockAsyncClient* async_client_; NiceMock async_stream_; - std::unique_ptr grpc_mux_; + NewGrpcMuxImplPtr grpc_mux_; NiceMock callbacks_; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder_{"cluster_name"}; NiceMock local_info_; Stats::TestUtil::TestStore stats_; Envoy::Config::RateLimitSettings rate_limit_settings_; + ControlPlaneStats control_plane_stats_; Stats::Gauge& control_plane_connected_state_; }; @@ -75,7 +80,7 @@ TEST_F(NewGrpcMuxImplTest, DiscoveryResponseNonexistentSub) { setup(); const std::string& type_url = Config::TypeUrl::get().ClusterLoadAssignment; - auto watch = grpc_mux_->addWatch(type_url, {}, callbacks_); + auto watch = grpc_mux_->addWatch(type_url, {}, callbacks_, resource_decoder_); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); grpc_mux_->start(); @@ -86,7 +91,7 @@ TEST_F(NewGrpcMuxImplTest, DiscoveryResponseNonexistentSub) { unexpected_response->set_type_url(type_url); unexpected_response->set_system_version_info("0"); EXPECT_CALL(callbacks_, onConfigUpdate(_, _, "0")).Times(0); - grpc_mux_->onDiscoveryResponse(std::move(unexpected_response)); + grpc_mux_->onDiscoveryResponse(std::move(unexpected_response), control_plane_stats_); } { auto response = std::make_unique(); @@ -96,18 +101,14 @@ TEST_F(NewGrpcMuxImplTest, DiscoveryResponseNonexistentSub) { load_assignment.set_cluster_name("x"); response->add_resources()->mutable_resource()->PackFrom(API_DOWNGRADE(load_assignment)); EXPECT_CALL(callbacks_, onConfigUpdate(_, _, "1")) - .WillOnce( - Invoke([&load_assignment]( - const Protobuf::RepeatedPtrField& - added_resources, - const Protobuf::RepeatedPtrField&, const std::string&) { - EXPECT_EQ(1, added_resources.size()); - envoy::config::endpoint::v3::ClusterLoadAssignment expected_assignment = - MessageUtil::anyConvert( - added_resources[0].resource()); - EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment)); - })); - grpc_mux_->onDiscoveryResponse(std::move(response)); + .WillOnce(Invoke([&load_assignment](const std::vector& added_resources, + const Protobuf::RepeatedPtrField&, + const std::string&) { + EXPECT_EQ(1, added_resources.size()); + EXPECT_TRUE( + TestUtility::protoEqual(added_resources[0].get().resource(), load_assignment)); + })); + grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); } } @@ -117,7 +118,7 @@ TEST_F(NewGrpcMuxImplTest, ConfigUpdateWithAliases) { setup(); const std::string& type_url = Config::TypeUrl::get().VirtualHost; - auto watch = grpc_mux_->addWatch(type_url, {"domain1.test"}, callbacks_); + auto watch = grpc_mux_->addWatch(type_url, {"domain1.test"}, callbacks_, resource_decoder_); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); grpc_mux_->start(); @@ -136,7 +137,7 @@ TEST_F(NewGrpcMuxImplTest, ConfigUpdateWithAliases) { response->mutable_resources()->at(0).add_aliases("domain1.test"); response->mutable_resources()->at(0).add_aliases("domain2.test"); - grpc_mux_->onDiscoveryResponse(std::move(response)); + grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); const auto& subscriptions = grpc_mux_->subscriptions(); auto sub = subscriptions.find(type_url); @@ -152,7 +153,7 @@ TEST_F(NewGrpcMuxImplTest, ConfigUpdateWithNotFoundResponse) { setup(); const std::string& type_url = Config::TypeUrl::get().VirtualHost; - auto watch = grpc_mux_->addWatch(type_url, {"domain1.test"}, callbacks_); + auto watch = grpc_mux_->addWatch(type_url, {"domain1.test"}, callbacks_, resource_decoder_); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); grpc_mux_->start(); @@ -165,7 +166,7 @@ TEST_F(NewGrpcMuxImplTest, ConfigUpdateWithNotFoundResponse) { response->mutable_resources()->at(0).set_name("not-found"); response->mutable_resources()->at(0).add_aliases("domain1.test"); - grpc_mux_->onDiscoveryResponse(std::move(response)); + grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); const auto& subscriptions = grpc_mux_->subscriptions(); auto sub = subscriptions.find(type_url); diff --git a/test/common/config/opaque_resource_decoder_impl_test.cc b/test/common/config/opaque_resource_decoder_impl_test.cc new file mode 100644 index 0000000000000..9aded46538bac --- /dev/null +++ b/test/common/config/opaque_resource_decoder_impl_test.cc @@ -0,0 +1,106 @@ +#include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.validate.h" + +#include "common/config/opaque_resource_decoder_impl.h" +#include "common/protobuf/message_validator_impl.h" + +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Config { +namespace { + +class OpaqueResourceDecoderImplTest : public testing::Test { +public: + std::pair + decodeTypedResource(const envoy::config::endpoint::v3::ClusterLoadAssignment& typed_resource) { + ProtobufWkt::Any opaque_resource; + opaque_resource.PackFrom(typed_resource); + auto decoded_resource = resource_decoder_.decodeResource(opaque_resource); + const std::string name = resource_decoder_.resourceName(*decoded_resource); + return {std::move(decoded_resource), name}; + } + + ProtobufMessage::StrictValidationVisitorImpl validation_visitor_; + OpaqueResourceDecoderImpl resource_decoder_{ + validation_visitor_, "cluster_name"}; +}; + +// Negative test for bad type URL in Any. +TEST_F(OpaqueResourceDecoderImplTest, WrongType) { + ProtobufWkt::Any opaque_resource; + opaque_resource.set_type_url("huh"); + EXPECT_THROW_WITH_REGEX(resource_decoder_.decodeResource(opaque_resource), EnvoyException, + "Unable to unpack"); +} + +// If the Any is empty (no type set), the default instance of the opaque resource decoder type is +// created. +TEST_F(OpaqueResourceDecoderImplTest, Empty) { + ProtobufWkt::Any opaque_resource; + const auto decoded_resource = resource_decoder_.decodeResource(opaque_resource); + EXPECT_THAT(*decoded_resource, ProtoEq(envoy::config::endpoint::v3::ClusterLoadAssignment())); + EXPECT_EQ("", resource_decoder_.resourceName(*decoded_resource)); +} + +// Negative test for protoc-gen-validate constraints. +TEST_F(OpaqueResourceDecoderImplTest, ValidateFail) { + envoy::config::endpoint::v3::ClusterLoadAssignment invalid_resource; + EXPECT_THROW(decodeTypedResource(invalid_resource), ProtoValidationException); +} + +// When validation is skipped, verify that we can ignore unknown fields. +TEST_F(OpaqueResourceDecoderImplTest, ValidateIgnored) { + ProtobufMessage::NullValidationVisitorImpl validation_visitor; + OpaqueResourceDecoderImpl resource_decoder{ + validation_visitor, "cluster_name"}; + envoy::config::endpoint::v3::ClusterLoadAssignment strange_resource; + strange_resource.set_cluster_name("fare"); + auto* unknown = strange_resource.GetReflection()->MutableUnknownFields(&strange_resource); + // add a field that doesn't exist in the proto definition: + unknown->AddFixed32(1000, 1); + ProtobufWkt::Any opaque_resource; + opaque_resource.PackFrom(strange_resource); + const auto decoded_resource = resource_decoder.decodeResource(opaque_resource); + EXPECT_THAT(*decoded_resource, ProtoEq(strange_resource)); + EXPECT_EQ("fare", resource_decoder_.resourceName(*decoded_resource)); +} + +// Handling of smuggled deprecated fields during Any conversion. +TEST_F(OpaqueResourceDecoderImplTest, HiddenEnvoyDeprecatedFields) { + // This test is only valid in API-v3, and should be updated for API-v4, as + // the deprecated fields of API-v2 will be removed. + envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment = + TestUtility::parseYaml(R"EOF( + cluster_name: fare + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 1.2.3.4 + port_value: 80 + policy: + overprovisioning_factor: 100 + hidden_envoy_deprecated_disable_overprovisioning: true + )EOF"); + EXPECT_THROW_WITH_REGEX(decodeTypedResource(cluster_load_assignment), ProtoValidationException, + "Illegal use of hidden_envoy_deprecated_ V2 field " + "'envoy.config.endpoint.v3.ClusterLoadAssignment.Policy.hidden_envoy_" + "deprecated_disable_overprovisioning'"); +} + +// Happy path. +TEST_F(OpaqueResourceDecoderImplTest, Success) { + envoy::config::endpoint::v3::ClusterLoadAssignment cluster_resource; + cluster_resource.set_cluster_name("foo"); + const auto result = decodeTypedResource(cluster_resource); + EXPECT_THAT(*result.first, ProtoEq(cluster_resource)); + EXPECT_EQ("foo", result.second); +} + +} // namespace +} // namespace Config +} // namespace Envoy diff --git a/test/common/config/pausable_ack_queue_test.cc b/test/common/config/pausable_ack_queue_test.cc index b282c20929927..f817cc7ff52ae 100644 --- a/test/common/config/pausable_ack_queue_test.cc +++ b/test/common/config/pausable_ack_queue_test.cc @@ -50,6 +50,15 @@ TEST(PausableAckQueueTest, TestPauseResume) { EXPECT_EQ("nonce2", p.front().nonce_); EXPECT_EQ("type2", p.front().type_url_); + // validate the above result is invariant even if we nest pauses. + p.pause("type1"); + EXPECT_EQ(4, p.size()); + EXPECT_EQ("nonce2", p.front().nonce_); + EXPECT_EQ("type2", p.front().type_url_); + p.resume("type1"); + EXPECT_EQ("nonce2", p.front().nonce_); + EXPECT_EQ("type2", p.front().type_url_); + UpdateAck ack = p.popFront(); EXPECT_EQ("nonce2", ack.nonce_); EXPECT_EQ("type2", ack.type_url_); diff --git a/test/common/config/registry_test.cc b/test/common/config/registry_test.cc index ada81ac0bfd7d..be6ea155d8bd7 100644 --- a/test/common/config/registry_test.cc +++ b/test/common/config/registry_test.cc @@ -15,6 +15,8 @@ namespace Envoy { namespace Config { namespace { +using ::testing::Optional; + class InternalFactory : public Config::UntypedFactory { public: ~InternalFactory() override = default; @@ -103,6 +105,18 @@ TEST(RegistryTest, DEPRECATED_FEATURE_TEST(WithDeprecatedFactoryPublished)) { ->name()); } +class NoNamePublishedFactory : public PublishedFactory { +public: + std::string name() const override { return ""; } +}; + +TEST(RegistryTest, DEPRECATED_FEATURE_TEST(AssertsIfNoDeprecatedNameGiven)) { + // Expects an assert to raise if we register a factory that has an empty name + // and no associated deprecated names. + EXPECT_DEBUG_DEATH((Registry::RegisterFactory({})), + "Attempted to register a factory without a name or deprecated name"); +} + class TestVersionedFactory : public PublishedFactory { public: std::string name() const override { return "testing.published.versioned"; } @@ -182,6 +196,48 @@ TEST(RegistryTest, TestDoubleRegistrationByName) { "Double registration for name: 'testing.published.test'"); } +class PublishedFactoryWithNameAndCategory : public PublishedFactory { +public: + std::string category() const override { return "testing.published.additional.category"; } + std::string name() const override { + return "testing.published.versioned.instead_name_and_category"; + } +}; + +TEST(RegistryTest, DEPRECATED_FEATURE_TEST(VersionedWithDeprecatedNamesFactoryAndNewCategory)) { + PublishedFactoryWithNameAndCategory test; + + // Check the category is not registered + ASSERT_FALSE(Registry::FactoryCategoryRegistry::isRegistered(test.category())); + + auto factory = Registry::RegisterFactory( + FACTORY_VERSION(0, 0, 1, {{"build.kind", "private"}}), + {"testing.published.versioned.deprecated_name_and_category"}); + + // Check the category now registered + ASSERT_TRUE(Registry::FactoryCategoryRegistry::isRegistered(test.category())); + + const auto& factories = Envoy::Registry::FactoryCategoryRegistry::registeredFactories(); + + auto version = + factories.find("testing.published.additional.category") + ->second->getFactoryVersion("testing.published.versioned.instead_name_and_category"); + + ASSERT_TRUE(version.has_value()); + EXPECT_EQ(0, version.value().version().major_number()); + EXPECT_EQ(0, version.value().version().minor_number()); + EXPECT_EQ(1, version.value().version().patch()); + EXPECT_EQ(1, version.value().metadata().fields().size()); + EXPECT_EQ("private", version.value().metadata().fields().at("build.kind").string_value()); + + // Get the version using deprecated name and check that it matches the + // version obtained through the new name. + auto deprecated_version = + factories.find("testing.published.additional.category") + ->second->getFactoryVersion("testing.published.versioned.deprecated_name_and_category"); + EXPECT_THAT(deprecated_version, Optional(ProtoEq(version.value()))); +} + } // namespace } // namespace Config } // namespace Envoy diff --git a/test/common/config/subscription_factory_impl_test.cc b/test/common/config/subscription_factory_impl_test.cc index f52407bcc0c47..3c0cbc5c5d739 100644 --- a/test/common/config/subscription_factory_impl_test.cc +++ b/test/common/config/subscription_factory_impl_test.cc @@ -19,6 +19,7 @@ #include "test/mocks/stats/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/environment.h" +#include "test/test_common/logging.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -27,6 +28,7 @@ using ::testing::_; using ::testing::Invoke; using ::testing::Return; +using ::testing::ReturnRef; namespace Envoy { namespace Config { @@ -35,25 +37,29 @@ namespace { class SubscriptionFactoryTest : public testing::Test { public: SubscriptionFactoryTest() - : http_request_(&cm_.async_client_), api_(Api::createApiForTest(stats_store_)) {} + : http_request_(&cm_.async_client_), api_(Api::createApiForTest(stats_store_)), + subscription_factory_(local_info_, dispatcher_, cm_, random_, validation_visitor_, *api_, + runtime_) {} - std::unique_ptr + SubscriptionPtr subscriptionFromConfigSource(const envoy::config::core::v3::ConfigSource& config) { - return SubscriptionFactoryImpl(local_info_, dispatcher_, cm_, random_, validation_visitor_, - *api_) - .subscriptionFromConfigSource(config, Config::TypeUrl::get().ClusterLoadAssignment, - stats_store_, callbacks_); + return subscription_factory_.subscriptionFromConfigSource( + config, Config::TypeUrl::get().ClusterLoadAssignment, stats_store_, callbacks_, + resource_decoder_); } Upstream::MockClusterManager cm_; Event::MockDispatcher dispatcher_; - Runtime::MockRandomGenerator random_; + Random::MockRandomGenerator random_; MockSubscriptionCallbacks callbacks_; + MockOpaqueResourceDecoder resource_decoder_; Http::MockAsyncClientRequest http_request_; Stats::MockIsolatedStatsStore stats_store_; NiceMock local_info_; NiceMock validation_visitor_; Api::ApiPtr api_; + NiceMock runtime_; + SubscriptionFactoryImpl subscription_factory_; }; class SubscriptionFactoryTestApiConfigSource @@ -64,63 +70,59 @@ TEST_F(SubscriptionFactoryTest, NoConfigSpecifier) { envoy::config::core::v3::ConfigSource config; EXPECT_THROW_WITH_MESSAGE( subscriptionFromConfigSource(config), EnvoyException, - "Missing config source specifier in envoy::api::v2::core::ConfigSource"); + "Missing config source specifier in envoy::config::core::v3::ConfigSource"); } TEST_F(SubscriptionFactoryTest, RestClusterEmpty) { envoy::config::core::v3::ConfigSource config; - Upstream::ClusterManager::ClusterInfoMap cluster_map; + Upstream::ClusterManager::ClusterSet primary_clusters; config.mutable_api_config_source()->set_api_type(envoy::config::core::v3::ApiConfigSource::REST); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); + EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters)); EXPECT_THROW_WITH_REGEX(subscriptionFromConfigSource(config), EnvoyException, "API configs must have either a gRPC service or a cluster name defined:"); } TEST_F(SubscriptionFactoryTest, GrpcClusterEmpty) { envoy::config::core::v3::ConfigSource config; - Upstream::ClusterManager::ClusterInfoMap cluster_map; + Upstream::ClusterManager::ClusterSet primary_clusters; config.mutable_api_config_source()->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); + EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters)); EXPECT_THROW_WITH_REGEX(subscriptionFromConfigSource(config), EnvoyException, "API configs must have either a gRPC service or a cluster name defined:"); } TEST_F(SubscriptionFactoryTest, RestClusterSingleton) { envoy::config::core::v3::ConfigSource config; - Upstream::ClusterManager::ClusterInfoMap cluster_map; - NiceMock cluster; + Upstream::ClusterManager::ClusterSet primary_clusters; config.mutable_api_config_source()->set_api_type(envoy::config::core::v3::ApiConfigSource::REST); config.mutable_api_config_source()->mutable_refresh_delay()->set_seconds(1); config.mutable_api_config_source()->add_cluster_names("static_cluster"); - cluster_map.emplace("static_cluster", cluster); + primary_clusters.insert("static_cluster"); EXPECT_CALL(dispatcher_, createTimer_(_)); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); - EXPECT_CALL(*cluster.info_, addedViaApi()).WillOnce(Return(false)); - EXPECT_CALL(*cluster.info_, type()).WillOnce(Return(envoy::config::cluster::v3::Cluster::STATIC)); + EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters)); subscriptionFromConfigSource(config); } TEST_F(SubscriptionFactoryTest, GrpcClusterSingleton) { envoy::config::core::v3::ConfigSource config; - Upstream::ClusterManager::ClusterInfoMap cluster_map; - NiceMock cluster; + Upstream::ClusterManager::ClusterSet primary_clusters; config.mutable_api_config_source()->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC); config.mutable_api_config_source()->mutable_refresh_delay()->set_seconds(1); config.mutable_api_config_source()->add_grpc_services()->mutable_envoy_grpc()->set_cluster_name( "static_cluster"); - cluster_map.emplace("static_cluster", cluster); + primary_clusters.insert("static_cluster"); envoy::config::core::v3::GrpcService expected_grpc_service; expected_grpc_service.mutable_envoy_grpc()->set_cluster_name("static_cluster"); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); + EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters)); EXPECT_CALL(cm_, grpcAsyncClientManager()).WillOnce(ReturnRef(cm_.async_client_manager_)); EXPECT_CALL(cm_.async_client_manager_, factoryForGrpcService(ProtoEq(expected_grpc_service), _, _)) @@ -131,8 +133,6 @@ TEST_F(SubscriptionFactoryTest, GrpcClusterSingleton) { })); return async_client_factory; })); - EXPECT_CALL(*cluster.info_, addedViaApi()).WillOnce(Return(false)); - EXPECT_CALL(*cluster.info_, type()).WillOnce(Return(envoy::config::cluster::v3::Cluster::STATIC)); EXPECT_CALL(dispatcher_, createTimer_(_)); subscriptionFromConfigSource(config); @@ -140,21 +140,17 @@ TEST_F(SubscriptionFactoryTest, GrpcClusterSingleton) { TEST_F(SubscriptionFactoryTest, RestClusterMultiton) { envoy::config::core::v3::ConfigSource config; - Upstream::ClusterManager::ClusterInfoMap cluster_map; - NiceMock cluster; + Upstream::ClusterManager::ClusterSet primary_clusters; config.mutable_api_config_source()->set_api_type(envoy::config::core::v3::ApiConfigSource::REST); config.mutable_api_config_source()->add_cluster_names("static_cluster_foo"); - cluster_map.emplace("static_cluster_foo", cluster); + primary_clusters.insert("static_cluster_foo"); config.mutable_api_config_source()->add_cluster_names("static_cluster_bar"); - cluster_map.emplace("static_cluster_bar", cluster); + primary_clusters.insert("static_cluster_bar"); - EXPECT_CALL(cm_, clusters()).WillRepeatedly(Return(cluster_map)); - EXPECT_CALL(*cluster.info_, addedViaApi()).WillRepeatedly(Return(false)); - EXPECT_CALL(*cluster.info_, type()) - .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::STATIC)); + EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters)); EXPECT_THROW_WITH_REGEX(subscriptionFromConfigSource(config), EnvoyException, fmt::format("{} must have a singleton cluster name specified:", config.mutable_api_config_source()->GetTypeName())); @@ -162,23 +158,19 @@ TEST_F(SubscriptionFactoryTest, RestClusterMultiton) { TEST_F(SubscriptionFactoryTest, GrpcClusterMultiton) { envoy::config::core::v3::ConfigSource config; - Upstream::ClusterManager::ClusterInfoMap cluster_map; - NiceMock cluster; + Upstream::ClusterManager::ClusterSet primary_clusters; config.mutable_api_config_source()->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC); config.mutable_api_config_source()->add_grpc_services()->mutable_envoy_grpc()->set_cluster_name( "static_cluster_foo"); - cluster_map.emplace("static_cluster_foo", cluster); + primary_clusters.insert("static_cluster_foo"); config.mutable_api_config_source()->add_grpc_services()->mutable_envoy_grpc()->set_cluster_name( "static_cluster_bar"); - cluster_map.emplace("static_cluster_bar", cluster); + primary_clusters.insert("static_cluster_bar"); - EXPECT_CALL(cm_, clusters()).WillRepeatedly(Return(cluster_map)); EXPECT_CALL(cm_, grpcAsyncClientManager()).WillRepeatedly(ReturnRef(cm_.async_client_manager_)); - EXPECT_CALL(*cluster.info_, addedViaApi()).WillRepeatedly(Return(false)); - EXPECT_CALL(*cluster.info_, type()) - .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::STATIC)); + EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters)); EXPECT_THROW_WITH_REGEX(subscriptionFromConfigSource(config), EnvoyException, fmt::format("{}::.DELTA_.GRPC must have a " @@ -211,12 +203,9 @@ TEST_F(SubscriptionFactoryTest, LegacySubscription) { api_config_source->set_api_type( envoy::config::core::v3::ApiConfigSource::hidden_envoy_deprecated_UNSUPPORTED_REST_LEGACY); api_config_source->add_cluster_names("static_cluster"); - Upstream::ClusterManager::ClusterInfoMap cluster_map; - Upstream::MockClusterMockPrioritySet cluster; - cluster_map.emplace("static_cluster", cluster); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); - EXPECT_CALL(cluster, info()).Times(2); - EXPECT_CALL(*cluster.info_, addedViaApi()); + Upstream::ClusterManager::ClusterSet primary_clusters; + primary_clusters.insert("static_cluster"); + EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters)); EXPECT_THROW_WITH_REGEX(subscriptionFromConfigSource(config)->start({"static_cluster"}), EnvoyException, "REST_LEGACY no longer a supported ApiConfigSource.*"); } @@ -228,12 +217,9 @@ TEST_F(SubscriptionFactoryTest, HttpSubscriptionCustomRequestTimeout) { api_config_source->add_cluster_names("static_cluster"); api_config_source->mutable_refresh_delay()->set_seconds(1); api_config_source->mutable_request_timeout()->set_seconds(5); - Upstream::ClusterManager::ClusterInfoMap cluster_map; - Upstream::MockClusterMockPrioritySet cluster; - cluster_map.emplace("static_cluster", cluster); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); - EXPECT_CALL(cluster, info()).Times(2); - EXPECT_CALL(*cluster.info_, addedViaApi()); + Upstream::ClusterManager::ClusterSet primary_clusters; + primary_clusters.insert("static_cluster"); + EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters)); EXPECT_CALL(dispatcher_, createTimer_(_)).Times(2); EXPECT_CALL(cm_, httpAsyncClientForCluster("static_cluster")); EXPECT_CALL( @@ -248,22 +234,17 @@ TEST_F(SubscriptionFactoryTest, HttpSubscription) { api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::REST); api_config_source->add_cluster_names("static_cluster"); api_config_source->mutable_refresh_delay()->set_seconds(1); - Upstream::ClusterManager::ClusterInfoMap cluster_map; - Upstream::MockClusterMockPrioritySet cluster; - cluster_map.emplace("static_cluster", cluster); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); - EXPECT_CALL(cluster, info()).Times(2); - EXPECT_CALL(*cluster.info_, addedViaApi()); + Upstream::ClusterManager::ClusterSet primary_clusters; + primary_clusters.insert("static_cluster"); + EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters)); EXPECT_CALL(dispatcher_, createTimer_(_)).Times(2); EXPECT_CALL(cm_, httpAsyncClientForCluster("static_cluster")); EXPECT_CALL(cm_.async_client_, send_(_, _, _)) .WillOnce(Invoke([this](Http::RequestMessagePtr& request, Http::AsyncClient::Callbacks&, const Http::AsyncClient::RequestOptions&) { - EXPECT_EQ("POST", std::string(request->headers().Method()->value().getStringView())); - EXPECT_EQ("static_cluster", - std::string(request->headers().Host()->value().getStringView())); - EXPECT_EQ("/v2/discovery:endpoints", - std::string(request->headers().Path()->value().getStringView())); + EXPECT_EQ("POST", request->headers().getMethodValue()); + EXPECT_EQ("static_cluster", request->headers().getHostValue()); + EXPECT_EQ("/v2/discovery:endpoints", request->headers().getPathValue()); return &http_request_; })); EXPECT_CALL(http_request_, cancel()); @@ -276,12 +257,9 @@ TEST_F(SubscriptionFactoryTest, HttpSubscriptionNoRefreshDelay) { auto* api_config_source = config.mutable_api_config_source(); api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::REST); api_config_source->add_cluster_names("static_cluster"); - Upstream::ClusterManager::ClusterInfoMap cluster_map; - Upstream::MockClusterMockPrioritySet cluster; - cluster_map.emplace("static_cluster", cluster); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); - EXPECT_CALL(cluster, info()).Times(2); - EXPECT_CALL(*cluster.info_, addedViaApi()); + Upstream::ClusterManager::ClusterSet primary_clusters; + primary_clusters.insert("static_cluster"); + EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters)); EXPECT_THROW_WITH_MESSAGE(subscriptionFromConfigSource(config)->start({"static_cluster"}), EnvoyException, "refresh_delay is required for REST API configuration sources"); @@ -294,10 +272,9 @@ TEST_F(SubscriptionFactoryTest, GrpcSubscription) { api_config_source->add_grpc_services()->mutable_envoy_grpc()->set_cluster_name("static_cluster"); envoy::config::core::v3::GrpcService expected_grpc_service; expected_grpc_service.mutable_envoy_grpc()->set_cluster_name("static_cluster"); - Upstream::ClusterManager::ClusterInfoMap cluster_map; - NiceMock cluster; - cluster_map.emplace("static_cluster", cluster); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); + Upstream::ClusterManager::ClusterSet primary_clusters; + primary_clusters.insert("static_cluster"); + EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters)); EXPECT_CALL(cm_, grpcAsyncClientManager()).WillOnce(ReturnRef(cm_.async_client_manager_)); EXPECT_CALL(cm_.async_client_manager_, factoryForGrpcService(ProtoEq(expected_grpc_service), _, _)) @@ -315,6 +292,29 @@ TEST_F(SubscriptionFactoryTest, GrpcSubscription) { subscriptionFromConfigSource(config)->start({"static_cluster"}); } +TEST_F(SubscriptionFactoryTest, LogWarningOnDeprecatedApi) { + envoy::config::core::v3::ConfigSource config; + + config.mutable_api_config_source()->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC); + config.mutable_api_config_source()->set_transport_api_version( + envoy::config::core::v3::ApiVersion::V2); + NiceMock snapshot; + EXPECT_CALL(runtime_, snapshot()).WillRepeatedly(ReturnRef(snapshot)); + EXPECT_CALL(snapshot, runtimeFeatureEnabled(_)).WillOnce(Return(true)); + EXPECT_CALL(snapshot, countDeprecatedFeatureUse()); + + Upstream::ClusterManager::ClusterSet primary_clusters; + primary_clusters.insert("static_cluster"); + EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters)); + + EXPECT_LOG_CONTAINS( + "warn", "xDS of version v2 has been deprecated", try { + subscription_factory_.subscriptionFromConfigSource( + config, Config::TypeUrl::get().ClusterLoadAssignment, stats_store_, callbacks_, + resource_decoder_); + } catch (EnvoyException&){/* expected, we pass an empty configuration */}); +} + INSTANTIATE_TEST_SUITE_P(SubscriptionFactoryTestApiConfigSource, SubscriptionFactoryTestApiConfigSource, ::testing::Values(envoy::config::core::v3::ApiConfigSource::REST, @@ -330,57 +330,8 @@ TEST_P(SubscriptionFactoryTestApiConfigSource, NonExistentCluster) { } else { api_config_source->add_cluster_names("static_cluster"); } - Upstream::ClusterManager::ClusterInfoMap cluster_map; - EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); - EXPECT_THROW_WITH_MESSAGE(subscriptionFromConfigSource(config)->start({"static_cluster"}), - EnvoyException, - fmt::format("{} must have a statically defined " - "non-EDS cluster: 'static_cluster' does not exist, was " - "added via api, or is an EDS cluster", - api_config_source->GetTypeName())); -} - -TEST_P(SubscriptionFactoryTestApiConfigSource, DynamicCluster) { - envoy::config::core::v3::ConfigSource config; - auto* api_config_source = config.mutable_api_config_source(); - api_config_source->set_api_type(GetParam()); - if (api_config_source->api_type() == envoy::config::core::v3::ApiConfigSource::GRPC) { - api_config_source->add_grpc_services()->mutable_envoy_grpc()->set_cluster_name( - "static_cluster"); - } else { - api_config_source->add_cluster_names("static_cluster"); - } - Upstream::ClusterManager::ClusterInfoMap cluster_map; - Upstream::MockClusterMockPrioritySet cluster; - cluster_map.emplace("static_cluster", cluster); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); - EXPECT_CALL(cluster, info()); - EXPECT_CALL(*cluster.info_, addedViaApi()).WillOnce(Return(true)); - EXPECT_THROW_WITH_MESSAGE(subscriptionFromConfigSource(config)->start({"static_cluster"}), - EnvoyException, - fmt::format("{} must have a statically defined " - "non-EDS cluster: 'static_cluster' does not exist, was " - "added via api, or is an EDS cluster", - api_config_source->GetTypeName())); -} - -TEST_P(SubscriptionFactoryTestApiConfigSource, EDSClusterBackingEDSCluster) { - envoy::config::core::v3::ConfigSource config; - auto* api_config_source = config.mutable_api_config_source(); - api_config_source->set_api_type(GetParam()); - if (api_config_source->api_type() == envoy::config::core::v3::ApiConfigSource::GRPC) { - api_config_source->add_grpc_services()->mutable_envoy_grpc()->set_cluster_name( - "static_cluster"); - } else { - api_config_source->add_cluster_names("static_cluster"); - } - Upstream::ClusterManager::ClusterInfoMap cluster_map; - Upstream::MockClusterMockPrioritySet cluster; - cluster_map.emplace("static_cluster", cluster); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); - EXPECT_CALL(cluster, info()).Times(2); - EXPECT_CALL(*cluster.info_, addedViaApi()); - EXPECT_CALL(*cluster.info_, type()).WillOnce(Return(envoy::config::cluster::v3::Cluster::EDS)); + Upstream::ClusterManager::ClusterSet primary_clusters; + EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters)); EXPECT_THROW_WITH_MESSAGE(subscriptionFromConfigSource(config)->start({"static_cluster"}), EnvoyException, fmt::format("{} must have a statically defined " diff --git a/test/common/config/subscription_impl_test.cc b/test/common/config/subscription_impl_test.cc index cbda19812c0ac..d8e48bcb820c0 100644 --- a/test/common/config/subscription_impl_test.cc +++ b/test/common/config/subscription_impl_test.cc @@ -55,9 +55,10 @@ class SubscriptionImplTest : public testing::TestWithParam { } AssertionResult statsAre(uint32_t attempt, uint32_t success, uint32_t rejected, uint32_t failure, - uint32_t init_fetch_timeout, uint64_t update_time, uint64_t version) { + uint32_t init_fetch_timeout, uint64_t update_time, uint64_t version, + std::string version_text) { return test_harness_->statsAre(attempt, success, rejected, failure, init_fetch_timeout, - update_time, version); + update_time, version, version_text); } void deliverConfigUpdate(const std::vector cluster_names, const std::string& version, @@ -92,57 +93,58 @@ INSTANTIATE_TEST_SUITE_P(SubscriptionImplTest, SubscriptionImplInitFetchTimeoutT // Validate basic request-response succeeds. TEST_P(SubscriptionImplTest, InitialRequestResponse) { startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); - deliverConfigUpdate({"cluster0", "cluster1"}, "0", true); - EXPECT_TRUE(statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); + deliverConfigUpdate({"cluster0", "cluster1"}, "v25-ubuntu18-beta", true); + EXPECT_TRUE( + statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 18202868392629624077U, "v25-ubuntu18-beta")); } // Validate that multiple streamed updates succeed. TEST_P(SubscriptionImplTest, ResponseStream) { startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); - deliverConfigUpdate({"cluster0", "cluster1"}, "0", true); - EXPECT_TRUE(statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); - deliverConfigUpdate({"cluster0", "cluster1"}, "1", true); - EXPECT_TRUE(statsAre(3, 2, 0, 0, 0, TEST_TIME_MILLIS, 13237225503670494420U)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); + deliverConfigUpdate({"cluster0", "cluster1"}, "1.2.3.4", true); + EXPECT_TRUE(statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 14026795738668939420U, "1.2.3.4")); + deliverConfigUpdate({"cluster0", "cluster1"}, "5_6_7", true); + EXPECT_TRUE(statsAre(3, 2, 0, 0, 0, TEST_TIME_MILLIS, 7612520132475921171U, "5_6_7")); } // Validate that the client can reject a config. TEST_P(SubscriptionImplTest, RejectConfig) { startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); deliverConfigUpdate({"cluster0", "cluster1"}, "0", false); - EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0, "")); } // Validate that the client can reject a config and accept the same config later. TEST_P(SubscriptionImplTest, RejectAcceptConfig) { startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); deliverConfigUpdate({"cluster0", "cluster1"}, "0", false); - EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0, "")); deliverConfigUpdate({"cluster0", "cluster1"}, "0", true); - EXPECT_TRUE(statsAre(3, 1, 1, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); + EXPECT_TRUE(statsAre(3, 1, 1, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, "0")); } // Validate that the client can reject a config and accept another config later. TEST_P(SubscriptionImplTest, RejectAcceptNextConfig) { startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); deliverConfigUpdate({"cluster0", "cluster1"}, "0", false); - EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0, "")); deliverConfigUpdate({"cluster0", "cluster1"}, "1", true); - EXPECT_TRUE(statsAre(3, 1, 1, 0, 0, TEST_TIME_MILLIS, 13237225503670494420U)); + EXPECT_TRUE(statsAre(3, 1, 1, 0, 0, TEST_TIME_MILLIS, 13237225503670494420U, "1")); } // Validate that stream updates send a message with the updated resources. TEST_P(SubscriptionImplTest, UpdateResources) { startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); - deliverConfigUpdate({"cluster0", "cluster1"}, "0", true); - EXPECT_TRUE(statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); + deliverConfigUpdate({"cluster0", "cluster1"}, "42", true); + EXPECT_TRUE(statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 7919287270473417401, "42")); updateResourceInterest({"cluster2"}); - EXPECT_TRUE(statsAre(3, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); + EXPECT_TRUE(statsAre(3, 1, 0, 0, 0, TEST_TIME_MILLIS, 7919287270473417401, "42")); } // Validate that initial fetch timer is created and calls callback on timeout @@ -153,14 +155,14 @@ TEST_P(SubscriptionImplInitFetchTimeoutTest, InitialFetchTimeout) { InSequence s; expectEnableInitFetchTimeoutTimer(std::chrono::milliseconds(1000)); startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); if (GetParam() == SubscriptionType::Http) { expectDisableInitFetchTimeoutTimer(); } expectConfigUpdateFailed(); callInitFetchTimeoutCb(); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 1, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 1, 0, 0, "")); } // Validate that initial fetch timer is disabled on config update @@ -168,7 +170,7 @@ TEST_P(SubscriptionImplInitFetchTimeoutTest, DisableInitTimeoutOnSuccess) { InSequence s; expectEnableInitFetchTimeoutTimer(std::chrono::milliseconds(1000)); startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); expectDisableInitFetchTimeoutTimer(); deliverConfigUpdate({"cluster0", "cluster1"}, "0", true); } @@ -178,7 +180,7 @@ TEST_P(SubscriptionImplInitFetchTimeoutTest, DisableInitTimeoutOnFail) { InSequence s; expectEnableInitFetchTimeoutTimer(std::chrono::milliseconds(1000)); startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); expectDisableInitFetchTimeoutTimer(); deliverConfigUpdate({"cluster0", "cluster1"}, "0", false); } diff --git a/test/common/config/subscription_test_harness.h b/test/common/config/subscription_test_harness.h index e3d13e37caccd..57342a11af92c 100644 --- a/test/common/config/subscription_test_harness.h +++ b/test/common/config/subscription_test_harness.h @@ -20,7 +20,9 @@ const uint64_t TEST_TIME_MILLIS = 42000; */ class SubscriptionTestHarness : public Event::TestUsingSimulatedTime { public: - SubscriptionTestHarness() : stats_(Utility::generateStats(stats_store_)) { + SubscriptionTestHarness() + : stats_(Utility::generateStats(stats_store_)), + control_plane_stats_(Utility::generateControlPlaneStats(stats_store_)) { simTime().setSystemTime(SystemTime(std::chrono::milliseconds(TEST_TIME_MILLIS))); } virtual ~SubscriptionTestHarness() = default; @@ -57,7 +59,8 @@ class SubscriptionTestHarness : public Event::TestUsingSimulatedTime { virtual testing::AssertionResult statsAre(uint32_t attempt, uint32_t success, uint32_t rejected, uint32_t failure, uint32_t init_fetch_timeout, - uint64_t update_time, uint64_t version) { + uint64_t update_time, uint64_t version, + absl::string_view version_text) { // TODO(fredlas) rework update_success_ to make sense across all xDS carriers. Its value in // statsAre() calls in many tests will probably have to be changed. UNREFERENCED_PARAMETER(attempt); @@ -85,14 +88,15 @@ class SubscriptionTestHarness : public Event::TestUsingSimulatedTime { return testing::AssertionFailure() << "version: expected " << version << ", got " << stats_.version_.value(); } + if (version_text != stats_.version_text_.value()) { + return testing::AssertionFailure() + << "version_text: expected " << version << ", got " << stats_.version_text_.value(); + } return testing::AssertionSuccess(); } virtual void verifyControlPlaneStats(uint32_t connected_state) { - EXPECT_EQ( - connected_state, - stats_store_.gauge("control_plane.connected_state", Stats::Gauge::ImportMode::NeverImport) - .value()); + EXPECT_EQ(connected_state, control_plane_stats_.connected_state_.value()); } virtual void expectConfigUpdateFailed() PURE; @@ -107,6 +111,7 @@ class SubscriptionTestHarness : public Event::TestUsingSimulatedTime { Stats::TestUtil::TestStore stats_store_; SubscriptionStats stats_; + ControlPlaneStats control_plane_stats_; }; ACTION_P(ThrowOnRejectedConfig, accept) { diff --git a/test/common/config/type_to_endpoint_test.cc b/test/common/config/type_to_endpoint_test.cc index 353580f17244f..f163d832218cd 100644 --- a/test/common/config/type_to_endpoint_test.cc +++ b/test/common/config/type_to_endpoint_test.cc @@ -16,23 +16,87 @@ TEST(TypeToEndpoint, All) { // The dummy messages are included for link purposes only. envoy::api::v2::RdsDummy _v2_rds_dummy; envoy::service::route::v3::RdsDummy _v3_rds_dummy; + // Delta gRPC endpoints. EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.DeltaRoutes", - deltaGrpcMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration").full_name()); - EXPECT_EQ( - "envoy.service.route.v3.RouteDiscoveryService.DeltaRoutes", - deltaGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration").full_name()); + deltaGrpcMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration", + envoy::config::core::v3::ApiVersion::AUTO) + .full_name()); + EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.DeltaRoutes", + deltaGrpcMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration", + envoy::config::core::v3::ApiVersion::V2) + .full_name()); + EXPECT_EQ("envoy.service.route.v3.RouteDiscoveryService.DeltaRoutes", + deltaGrpcMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration", + envoy::config::core::v3::ApiVersion::V3) + .full_name()); + + EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.DeltaRoutes", + deltaGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + envoy::config::core::v3::ApiVersion::AUTO) + .full_name()); + EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.DeltaRoutes", + deltaGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + envoy::config::core::v3::ApiVersion::V2) + .full_name()); + EXPECT_EQ("envoy.service.route.v3.RouteDiscoveryService.DeltaRoutes", + deltaGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + envoy::config::core::v3::ApiVersion::V3) + .full_name()); + // SotW gRPC endpoints. EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.StreamRoutes", - sotwGrpcMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration").full_name()); - EXPECT_EQ( - "envoy.service.route.v3.RouteDiscoveryService.StreamRoutes", - sotwGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration").full_name()); + sotwGrpcMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration", + envoy::config::core::v3::ApiVersion::AUTO) + .full_name()); + EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.StreamRoutes", + sotwGrpcMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration", + envoy::config::core::v3::ApiVersion::V2) + .full_name()); + EXPECT_EQ("envoy.service.route.v3.RouteDiscoveryService.StreamRoutes", + sotwGrpcMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration", + envoy::config::core::v3::ApiVersion::V3) + .full_name()); + + EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.StreamRoutes", + sotwGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + envoy::config::core::v3::ApiVersion::AUTO) + .full_name()); + EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.StreamRoutes", + sotwGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + envoy::config::core::v3::ApiVersion::V2) + .full_name()); + EXPECT_EQ("envoy.service.route.v3.RouteDiscoveryService.StreamRoutes", + sotwGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + envoy::config::core::v3::ApiVersion::V3) + .full_name()); + // REST endpoints. EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.FetchRoutes", - restMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration").full_name()); + restMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration", + envoy::config::core::v3::ApiVersion::AUTO) + .full_name()); + EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.FetchRoutes", + restMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration", + envoy::config::core::v3::ApiVersion::V2) + .full_name()); + EXPECT_EQ("envoy.service.route.v3.RouteDiscoveryService.FetchRoutes", + restMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration", + envoy::config::core::v3::ApiVersion::V3) + .full_name()); + + EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.FetchRoutes", + restMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + envoy::config::core::v3::ApiVersion::AUTO) + .full_name()); + EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.FetchRoutes", + restMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + envoy::config::core::v3::ApiVersion::V2) + .full_name()); EXPECT_EQ("envoy.service.route.v3.RouteDiscoveryService.FetchRoutes", - restMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration").full_name()); + restMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + envoy::config::core::v3::ApiVersion::V3) + .full_name()); } } // namespace diff --git a/test/common/config/udpa_resource_test.cc b/test/common/config/udpa_resource_test.cc new file mode 100644 index 0000000000000..0cf6aeef45f80 --- /dev/null +++ b/test/common/config/udpa_resource_test.cc @@ -0,0 +1,188 @@ +#include "common/config/udpa_resource.h" + +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +using ::testing::ElementsAre; +using ::testing::Pair; +using ::testing::UnorderedElementsAre; + +#define EXPECT_CONTEXT_PARAMS(context_params, ...) \ + { \ + std::map param_map((context_params).begin(), \ + (context_params).end()); \ + EXPECT_THAT(param_map, UnorderedElementsAre(__VA_ARGS__)); \ + } + +namespace Envoy { +namespace Config { +namespace { + +const std::string EscapedUrn = + "udpa://f123%25%2F%3F%23o/envoy.config.listener.v3.Listener/b%25%3A%2F%3F%23%5B%5Dar//" + "baz?%25%23%5B%5D%26%3Dab=cde%25%23%5B%5D%26%3Df"; +const std::string EscapedUrnWithManyQueryParams = + "udpa://f123%25%2F%3F%23o/envoy.config.listener.v3.Listener/b%25%3A%2F%3F%23%5B%5Dar//" + "baz?%25%23%5B%5D%26%3D=bar&%25%23%5B%5D%26%3Dab=cde%25%23%5B%5D%26%3Df&foo=%25%23%5B%5D%26%3D"; +const std::string EscapedUrlWithManyQueryParamsAndDirectives = + EscapedUrnWithManyQueryParams + + "#entry=some_en%25%23%5B%5D%2Ctry,alt=udpa://fo%2525%252F%253F%2523o/bar%23alt=udpa://bar/" + "baz%2Centry=h%2525%2523%255B%255D%252Cuh"; + +// for all x. encodeUri(decodeUri(x)) = x where x comes from sample of valid udpa:// URIs. +// TODO(htuch): write a fuzzer that validates this property as well. +TEST(UdpaResourceIdentifierTest, DecodeEncode) { + const std::vector uris = { + "udpa:///envoy.config.listener.v3.Listener", + "udpa://foo/envoy.config.listener.v3.Listener", + "udpa://foo/envoy.config.listener.v3.Listener/bar", + "udpa://foo/envoy.config.listener.v3.Listener/bar/baz", + "udpa://foo/envoy.config.listener.v3.Listener/bar////baz", + "udpa://foo/envoy.config.listener.v3.Listener?ab=cde", + "udpa://foo/envoy.config.listener.v3.Listener/bar?ab=cd", + "udpa://foo/envoy.config.listener.v3.Listener/bar/baz?ab=cde", + "udpa://foo/envoy.config.listener.v3.Listener/bar/baz?ab=", + "udpa://foo/envoy.config.listener.v3.Listener/bar/baz?=cd", + "udpa://foo/envoy.config.listener.v3.Listener/bar/baz?ab=cde&ba=edc&z=f", + EscapedUrn, + EscapedUrnWithManyQueryParams, + }; + UdpaResourceIdentifier::EncodeOptions encode_options; + encode_options.sort_context_params_ = true; + for (const std::string& uri : uris) { + EXPECT_EQ(uri, UdpaResourceIdentifier::encodeUrn(UdpaResourceIdentifier::decodeUrn(uri), + encode_options)); + EXPECT_EQ(uri, UdpaResourceIdentifier::encodeUrl(UdpaResourceIdentifier::decodeUrl(uri), + encode_options)); + } +} + +// Validate that URN decoding behaves as expected component-wise. +TEST(UdpaResourceNameTest, DecodeSuccess) { + const auto resource_name = UdpaResourceIdentifier::decodeUrn(EscapedUrnWithManyQueryParams); + EXPECT_EQ("f123%/?#o", resource_name.authority()); + EXPECT_EQ("envoy.config.listener.v3.Listener", resource_name.resource_type()); + EXPECT_THAT(resource_name.id(), ElementsAre("b%:/?#[]ar", "", "baz")); + EXPECT_CONTEXT_PARAMS(resource_name.context().params(), Pair("%#[]&=", "bar"), + Pair("%#[]&=ab", "cde%#[]&=f"), Pair("foo", "%#[]&=")); +} + +// Validate that URL decoding behaves as expected component-wise. +TEST(UdpaResourceLocatorTest, DecodeSuccess) { + const auto resource_locator = + UdpaResourceIdentifier::decodeUrl(EscapedUrlWithManyQueryParamsAndDirectives); + EXPECT_EQ("f123%/?#o", resource_locator.authority()); + EXPECT_EQ("envoy.config.listener.v3.Listener", resource_locator.resource_type()); + EXPECT_THAT(resource_locator.id(), ElementsAre("b%:/?#[]ar", "", "baz")); + EXPECT_CONTEXT_PARAMS(resource_locator.exact_context().params(), Pair("%#[]&=", "bar"), + Pair("%#[]&=ab", "cde%#[]&=f"), Pair("foo", "%#[]&=")); + EXPECT_EQ(2, resource_locator.directives().size()); + EXPECT_EQ("some_en%#[],try", resource_locator.directives()[0].entry()); + const auto& alt = resource_locator.directives()[1].alt(); + EXPECT_EQ("fo%/?#o", alt.authority()); + EXPECT_EQ("bar", alt.resource_type()); + EXPECT_EQ(2, alt.directives().size()); + const auto& inner_alt = alt.directives()[0].alt(); + EXPECT_EQ("bar", inner_alt.authority()); + EXPECT_EQ("baz", inner_alt.resource_type()); + EXPECT_EQ("h%#[],uh", alt.directives()[1].entry()); +} + +// Validate that the URN decoding behaves with a near-empty UDPA resource name. +TEST(UdpaResourceLocatorTest, DecodeEmpty) { + const auto resource_name = + UdpaResourceIdentifier::decodeUrn("udpa:///envoy.config.listener.v3.Listener"); + EXPECT_TRUE(resource_name.authority().empty()); + EXPECT_EQ("envoy.config.listener.v3.Listener", resource_name.resource_type()); + EXPECT_TRUE(resource_name.id().empty()); + EXPECT_TRUE(resource_name.context().params().empty()); +} + +// Validate that the URL decoding behaves with a near-empty UDPA resource locator. +TEST(UdpaResourceNameTest, DecodeEmpty) { + const auto resource_locator = + UdpaResourceIdentifier::decodeUrl("udpa:///envoy.config.listener.v3.Listener"); + EXPECT_TRUE(resource_locator.authority().empty()); + EXPECT_EQ("envoy.config.listener.v3.Listener", resource_locator.resource_type()); + EXPECT_TRUE(resource_locator.id().empty()); + EXPECT_TRUE(resource_locator.exact_context().params().empty()); + EXPECT_TRUE(resource_locator.directives().empty()); +} + +// Negative tests for URN decoding. +TEST(UdpaResourceNameTest, DecodeFail) { + { + EXPECT_THROW_WITH_MESSAGE(UdpaResourceIdentifier::decodeUrn("foo://"), + UdpaResourceIdentifier::DecodeException, + "foo:// does not have an udpa: scheme"); + } + { + EXPECT_THROW_WITH_MESSAGE(UdpaResourceIdentifier::decodeUrn("udpa://foo"), + UdpaResourceIdentifier::DecodeException, + "Resource type missing from /"); + } +} + +// Negative tests for URL decoding. +TEST(UdpaResourceLocatorTest, DecodeFail) { + { + EXPECT_THROW_WITH_MESSAGE(UdpaResourceIdentifier::decodeUrl("foo://"), + UdpaResourceIdentifier::DecodeException, + "foo:// does not have a udpa:, http: or file: scheme"); + } + { + EXPECT_THROW_WITH_MESSAGE(UdpaResourceIdentifier::decodeUrl("udpa://foo"), + UdpaResourceIdentifier::DecodeException, + "Resource type missing from /"); + } + { + EXPECT_THROW_WITH_MESSAGE(UdpaResourceIdentifier::decodeUrl("udpa://foo/some-type#bar=baz"), + UdpaResourceIdentifier::DecodeException, + "Unknown fragment component bar=baz"); + } +} + +// Validate parsing for udpa:, http: and file: schemes. +TEST(UdpaResourceLocatorTest, Schemes) { + { + const auto resource_locator = + UdpaResourceIdentifier::decodeUrl("udpa://foo/bar/baz/blah?a=b#entry=m"); + EXPECT_EQ(udpa::core::v1::ResourceLocator::UDPA, resource_locator.scheme()); + EXPECT_EQ("foo", resource_locator.authority()); + EXPECT_EQ("bar", resource_locator.resource_type()); + EXPECT_THAT(resource_locator.id(), ElementsAre("baz", "blah")); + EXPECT_CONTEXT_PARAMS(resource_locator.exact_context().params(), Pair("a", "b")); + EXPECT_EQ(1, resource_locator.directives().size()); + EXPECT_EQ("m", resource_locator.directives()[0].entry()); + EXPECT_EQ("udpa://foo/bar/baz/blah?a=b#entry=m", + UdpaResourceIdentifier::encodeUrl(resource_locator)); + } + { + const auto resource_locator = + UdpaResourceIdentifier::decodeUrl("http://foo/bar/baz/blah?a=b#entry=m"); + EXPECT_EQ(udpa::core::v1::ResourceLocator::HTTP, resource_locator.scheme()); + EXPECT_EQ("foo", resource_locator.authority()); + EXPECT_EQ("bar", resource_locator.resource_type()); + EXPECT_THAT(resource_locator.id(), ElementsAre("baz", "blah")); + EXPECT_CONTEXT_PARAMS(resource_locator.exact_context().params(), Pair("a", "b")); + EXPECT_EQ(1, resource_locator.directives().size()); + EXPECT_EQ("m", resource_locator.directives()[0].entry()); + EXPECT_EQ("http://foo/bar/baz/blah?a=b#entry=m", + UdpaResourceIdentifier::encodeUrl(resource_locator)); + } + { + const auto resource_locator = UdpaResourceIdentifier::decodeUrl("file:///bar/baz/blah#entry=m"); + EXPECT_EQ(udpa::core::v1::ResourceLocator::FILE, resource_locator.scheme()); + EXPECT_THAT(resource_locator.id(), ElementsAre("bar", "baz", "blah")); + EXPECT_EQ(1, resource_locator.directives().size()); + EXPECT_EQ("m", resource_locator.directives()[0].entry()); + EXPECT_EQ("file:///bar/baz/blah#entry=m", UdpaResourceIdentifier::encodeUrl(resource_locator)); + } +} + +// extra tests for fragment handling + +} // namespace +} // namespace Config +} // namespace Envoy diff --git a/test/common/config/utility_test.cc b/test/common/config/utility_test.cc index 503866e34ce8c..23ab3e0b02353 100644 --- a/test/common/config/utility_test.cc +++ b/test/common/config/utility_test.cc @@ -246,7 +246,7 @@ TEST(UtilityTest, FactoryForGrpcApiConfigSource) { } TEST(UtilityTest, PrepareDnsRefreshStrategy) { - NiceMock random; + NiceMock random; { // dns_failure_refresh_rate not set. @@ -524,58 +524,33 @@ TEST(UtilityTest, EmptyToEmptyConfig) { TEST(CheckApiConfigSourceSubscriptionBackingClusterTest, GrpcClusterTestAcrossTypes) { envoy::config::core::v3::ConfigSource config; auto* api_config_source = config.mutable_api_config_source(); - Upstream::ClusterManager::ClusterInfoMap cluster_map; + Upstream::ClusterManager::ClusterSet primary_clusters; // API of type GRPC api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC); // GRPC cluster without GRPC services. EXPECT_THROW_WITH_REGEX( - Utility::checkApiConfigSourceSubscriptionBackingCluster(cluster_map, *api_config_source), + Utility::checkApiConfigSourceSubscriptionBackingCluster(primary_clusters, *api_config_source), EnvoyException, "API configs must have either a gRPC service or a cluster name defined:"); // Non-existent cluster. api_config_source->add_grpc_services()->mutable_envoy_grpc()->set_cluster_name("foo_cluster"); EXPECT_THROW_WITH_MESSAGE( - Utility::checkApiConfigSourceSubscriptionBackingCluster(cluster_map, *api_config_source), - EnvoyException, - fmt::format("{} must have a statically defined non-EDS cluster: " - "'foo_cluster' does not exist, was added via api, or is an EDS cluster", - api_config_source->GetTypeName())); - - // Dynamic Cluster. - Upstream::MockClusterMockPrioritySet cluster; - cluster_map.emplace("foo_cluster", cluster); - EXPECT_CALL(cluster, info()); - EXPECT_CALL(*cluster.info_, addedViaApi()).WillOnce(Return(true)); - EXPECT_THROW_WITH_MESSAGE( - Utility::checkApiConfigSourceSubscriptionBackingCluster(cluster_map, *api_config_source), - EnvoyException, - fmt ::format("{} must have a statically defined non-EDS cluster: " - "'foo_cluster' does not exist, was added via api, or is an EDS cluster", - api_config_source->GetTypeName())); - - // EDS Cluster backing EDS Cluster. - EXPECT_CALL(cluster, info()).Times(2); - EXPECT_CALL(*cluster.info_, addedViaApi()); - EXPECT_CALL(*cluster.info_, type()).WillOnce(Return(envoy::config::cluster::v3::Cluster::EDS)); - EXPECT_THROW_WITH_MESSAGE( - Utility::checkApiConfigSourceSubscriptionBackingCluster(cluster_map, *api_config_source), + Utility::checkApiConfigSourceSubscriptionBackingCluster(primary_clusters, *api_config_source), EnvoyException, fmt::format("{} must have a statically defined non-EDS cluster: " "'foo_cluster' does not exist, was added via api, or is an EDS cluster", api_config_source->GetTypeName())); // All ok. - EXPECT_CALL(cluster, info()).Times(2); - EXPECT_CALL(*cluster.info_, addedViaApi()); - EXPECT_CALL(*cluster.info_, type()); - Utility::checkApiConfigSourceSubscriptionBackingCluster(cluster_map, *api_config_source); + primary_clusters.insert("foo_cluster"); + Utility::checkApiConfigSourceSubscriptionBackingCluster(primary_clusters, *api_config_source); // API with cluster_names set should be rejected. api_config_source->add_cluster_names("foo_cluster"); EXPECT_THROW_WITH_REGEX( - Utility::checkApiConfigSourceSubscriptionBackingCluster(cluster_map, *api_config_source), + Utility::checkApiConfigSourceSubscriptionBackingCluster(primary_clusters, *api_config_source), EnvoyException, fmt::format("{}::.DELTA_.GRPC must not have a cluster name " "specified:", @@ -585,46 +560,21 @@ TEST(CheckApiConfigSourceSubscriptionBackingClusterTest, GrpcClusterTestAcrossTy TEST(CheckApiConfigSourceSubscriptionBackingClusterTest, RestClusterTestAcrossTypes) { envoy::config::core::v3::ConfigSource config; auto* api_config_source = config.mutable_api_config_source(); - Upstream::ClusterManager::ClusterInfoMap cluster_map; + Upstream::ClusterManager::ClusterSet primary_clusters; api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::REST); // Non-existent cluster. api_config_source->add_cluster_names("foo_cluster"); EXPECT_THROW_WITH_MESSAGE( - Utility::checkApiConfigSourceSubscriptionBackingCluster(cluster_map, *api_config_source), - EnvoyException, - fmt::format("{} must have a statically defined non-EDS cluster: " - "'foo_cluster' does not exist, was added via api, or is an EDS cluster", - api_config_source->GetTypeName())); - - // Dynamic Cluster. - Upstream::MockClusterMockPrioritySet cluster; - cluster_map.emplace("foo_cluster", cluster); - EXPECT_CALL(cluster, info()); - EXPECT_CALL(*cluster.info_, addedViaApi()).WillOnce(Return(true)); - EXPECT_THROW_WITH_MESSAGE( - Utility::checkApiConfigSourceSubscriptionBackingCluster(cluster_map, *api_config_source), - EnvoyException, - fmt::format("{} must have a statically defined non-EDS cluster: " - "'foo_cluster' does not exist, was added via api, or is an EDS cluster", - api_config_source->GetTypeName())); - - // EDS Cluster backing EDS Cluster. - EXPECT_CALL(cluster, info()).Times(2); - EXPECT_CALL(*cluster.info_, addedViaApi()); - EXPECT_CALL(*cluster.info_, type()).WillOnce(Return(envoy::config::cluster::v3::Cluster::EDS)); - EXPECT_THROW_WITH_MESSAGE( - Utility::checkApiConfigSourceSubscriptionBackingCluster(cluster_map, *api_config_source), + Utility::checkApiConfigSourceSubscriptionBackingCluster(primary_clusters, *api_config_source), EnvoyException, fmt::format("{} must have a statically defined non-EDS cluster: " "'foo_cluster' does not exist, was added via api, or is an EDS cluster", api_config_source->GetTypeName())); // All ok. - EXPECT_CALL(cluster, info()).Times(2); - EXPECT_CALL(*cluster.info_, addedViaApi()); - EXPECT_CALL(*cluster.info_, type()); - Utility::checkApiConfigSourceSubscriptionBackingCluster(cluster_map, *api_config_source); + primary_clusters.insert("foo_cluster"); + Utility::checkApiConfigSourceSubscriptionBackingCluster(primary_clusters, *api_config_source); } // Validates CheckCluster functionality. diff --git a/test/common/config/watch_map_test.cc b/test/common/config/watch_map_test.cc index 57f0aaba360ff..6749cb901a8ae 100644 --- a/test/common/config/watch_map_test.cc +++ b/test/common/config/watch_map_test.cc @@ -2,6 +2,7 @@ #include "envoy/common/exception.h" #include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.validate.h" #include "envoy/service/discovery/v3/discovery.pb.h" #include "envoy/stats/scope.h" @@ -14,70 +15,60 @@ #include "gtest/gtest.h" using ::testing::_; +using ::testing::AtMost; using ::testing::Invoke; +using ::testing::InvokeWithoutArgs; +using ::testing::NiceMock; namespace Envoy { namespace Config { namespace { -class NamedMockSubscriptionCallbacks : public MockSubscriptionCallbacks { -public: - std::string resourceName(const ProtobufWkt::Any& resource) override { - return TestUtility::anyConvert(resource) - .cluster_name(); - } -}; - // expectDeltaAndSotwUpdate() EXPECTs two birds with one function call: we want to cover both SotW // and delta, which, while mechanically different, can behave identically for our testing purposes. // Specifically, as a simplification for these tests, every still-present resource is updated in // every update. Therefore, a resource can never show up in the SotW update but not the delta // update. We can therefore use the same expected_resources for both. void expectDeltaAndSotwUpdate( - NamedMockSubscriptionCallbacks& callbacks, + MockSubscriptionCallbacks& callbacks, const std::vector& expected_resources, const std::vector& expected_removals, const std::string& version) { EXPECT_CALL(callbacks, onConfigUpdate(_, version)) - .WillOnce(Invoke( - [expected_resources](const Protobuf::RepeatedPtrField& gotten_resources, - const std::string&) { - EXPECT_EQ(expected_resources.size(), gotten_resources.size()); - for (size_t i = 0; i < expected_resources.size(); i++) { - envoy::config::endpoint::v3::ClusterLoadAssignment cur_gotten_resource; - gotten_resources[i].UnpackTo(&cur_gotten_resource); - EXPECT_TRUE(TestUtility::protoEqual(cur_gotten_resource, expected_resources[i])); - } - })); + .WillOnce(Invoke([expected_resources](const std::vector& gotten_resources, + const std::string&) { + EXPECT_EQ(expected_resources.size(), gotten_resources.size()); + for (size_t i = 0; i < expected_resources.size(); i++) { + EXPECT_TRUE( + TestUtility::protoEqual(gotten_resources[i].get().resource(), expected_resources[i])); + } + })); EXPECT_CALL(callbacks, onConfigUpdate(_, _, _)) - .WillOnce( - Invoke([expected_resources, expected_removals, version]( - const Protobuf::RepeatedPtrField& - gotten_resources, - const Protobuf::RepeatedPtrField& removed_resources, - const std::string&) { - EXPECT_EQ(expected_resources.size(), gotten_resources.size()); - for (size_t i = 0; i < expected_resources.size(); i++) { - EXPECT_EQ(gotten_resources[i].version(), version); - envoy::config::endpoint::v3::ClusterLoadAssignment cur_gotten_resource; - gotten_resources[i].resource().UnpackTo(&cur_gotten_resource); - EXPECT_TRUE(TestUtility::protoEqual(cur_gotten_resource, expected_resources[i])); - } - EXPECT_EQ(expected_removals.size(), removed_resources.size()); - for (size_t i = 0; i < expected_removals.size(); i++) { - EXPECT_EQ(expected_removals[i], removed_resources[i]); - } - })); + .WillOnce(Invoke([expected_resources, expected_removals, + version](const std::vector& gotten_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string&) { + EXPECT_EQ(expected_resources.size(), gotten_resources.size()); + for (size_t i = 0; i < expected_resources.size(); i++) { + EXPECT_EQ(gotten_resources[i].get().version(), version); + EXPECT_TRUE( + TestUtility::protoEqual(gotten_resources[i].get().resource(), expected_resources[i])); + } + EXPECT_EQ(expected_removals.size(), removed_resources.size()); + for (size_t i = 0; i < expected_removals.size(); i++) { + EXPECT_EQ(expected_removals[i], removed_resources[i]); + } + })); } -void expectNoUpdate(NamedMockSubscriptionCallbacks& callbacks, const std::string& version) { +void expectNoUpdate(MockSubscriptionCallbacks& callbacks, const std::string& version) { EXPECT_CALL(callbacks, onConfigUpdate(_, version)).Times(0); EXPECT_CALL(callbacks, onConfigUpdate(_, _, version)).Times(0); } -void expectEmptySotwNoDeltaUpdate(NamedMockSubscriptionCallbacks& callbacks, +void expectEmptySotwNoDeltaUpdate(MockSubscriptionCallbacks& callbacks, const std::string& version) { EXPECT_CALL(callbacks, onConfigUpdate(_, version)) - .WillOnce(Invoke([](const Protobuf::RepeatedPtrField& gotten_resources, + .WillOnce(Invoke([](const std::vector& gotten_resources, const std::string&) { EXPECT_EQ(gotten_resources.size(), 0); })); EXPECT_CALL(callbacks, onConfigUpdate(_, _, version)).Times(0); } @@ -99,7 +90,7 @@ wrapInResource(const Protobuf::RepeatedPtrField& anys, // Similar to expectDeltaAndSotwUpdate(), but making the onConfigUpdate() happen, rather than // EXPECT-ing it. -void doDeltaAndSotwUpdate(SubscriptionCallbacks& watch_map, +void doDeltaAndSotwUpdate(WatchMap& watch_map, const Protobuf::RepeatedPtrField& sotw_resources, const std::vector& removed_names, const std::string& version) { @@ -118,9 +109,11 @@ void doDeltaAndSotwUpdate(SubscriptionCallbacks& watch_map, // resources it doesn't care about. Checks that the watch can later decide it does care about them, // and then receive subsequent updates to them. TEST(WatchMapTest, Basic) { - NamedMockSubscriptionCallbacks callbacks; + MockSubscriptionCallbacks callbacks; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder("cluster_name"); WatchMap watch_map; - Watch* watch = watch_map.addWatch(callbacks); + Watch* watch = watch_map.addWatch(callbacks, resource_decoder); { // The watch is interested in Alice and Bob... @@ -182,11 +175,13 @@ TEST(WatchMapTest, Basic) { // NOTE: we need the resource name "dummy" to keep either watch from ever having no names watched, // which is treated as interest in all names. TEST(WatchMapTest, Overlap) { - NamedMockSubscriptionCallbacks callbacks1; - NamedMockSubscriptionCallbacks callbacks2; + MockSubscriptionCallbacks callbacks1; + MockSubscriptionCallbacks callbacks2; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder("cluster_name"); WatchMap watch_map; - Watch* watch1 = watch_map.addWatch(callbacks1); - Watch* watch2 = watch_map.addWatch(callbacks2); + Watch* watch1 = watch_map.addWatch(callbacks1, resource_decoder); + Watch* watch2 = watch_map.addWatch(callbacks2, resource_decoder); Protobuf::RepeatedPtrField updated_resources; envoy::config::endpoint::v3::ClusterLoadAssignment alice; @@ -241,6 +236,75 @@ TEST(WatchMapTest, Overlap) { } } +// These are regression tests for #11877, validate that when two watches point at the same +// watched resource, and an update to one of the watches removes one or both of them, that +// WatchMap defers deletes and doesn't crash. +class SameWatchRemoval : public testing::Test { +public: + void SetUp() override { + envoy::config::endpoint::v3::ClusterLoadAssignment alice; + alice.set_cluster_name("alice"); + updated_resources_.Add()->PackFrom(alice); + watch1_ = watch_map_.addWatch(callbacks1_, resource_decoder_); + watch2_ = watch_map_.addWatch(callbacks2_, resource_decoder_); + watch_map_.updateWatchInterest(watch1_, {"alice"}); + watch_map_.updateWatchInterest(watch2_, {"alice"}); + } + + void removeAllInterest() { + ASSERT_FALSE(watch_cb_invoked_); + watch_cb_invoked_ = true; + watch_map_.removeWatch(watch1_); + watch_map_.removeWatch(watch2_); + } + + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder_{"cluster_name"}; + WatchMap watch_map_; + NiceMock callbacks1_; + MockSubscriptionCallbacks callbacks2_; + Protobuf::RepeatedPtrField updated_resources_; + Watch* watch1_; + Watch* watch2_; + bool watch_cb_invoked_{}; +}; + +TEST_F(SameWatchRemoval, SameWatchRemovalSotw) { + EXPECT_CALL(callbacks1_, onConfigUpdate(_, _)) + .Times(AtMost(1)) + .WillRepeatedly(InvokeWithoutArgs([this] { removeAllInterest(); })); + EXPECT_CALL(callbacks2_, onConfigUpdate(_, _)) + .Times(AtMost(1)) + .WillRepeatedly(InvokeWithoutArgs([this] { removeAllInterest(); })); + watch_map_.onConfigUpdate(updated_resources_, "version1"); +} + +TEST_F(SameWatchRemoval, SameWatchRemovalDeltaAdd) { + Protobuf::RepeatedPtrField delta_resources = + wrapInResource(updated_resources_, "version1"); + Protobuf::RepeatedPtrField removed_names_proto; + + EXPECT_CALL(callbacks1_, onConfigUpdate(_, _, _)) + .Times(AtMost(1)) + .WillRepeatedly(InvokeWithoutArgs([this] { removeAllInterest(); })); + EXPECT_CALL(callbacks2_, onConfigUpdate(_, _, _)) + .Times(AtMost(1)) + .WillRepeatedly(InvokeWithoutArgs([this] { removeAllInterest(); })); + watch_map_.onConfigUpdate(delta_resources, removed_names_proto, "version1"); +} + +TEST_F(SameWatchRemoval, SameWatchRemovalDeltaRemove) { + Protobuf::RepeatedPtrField removed_names_proto; + *removed_names_proto.Add() = "alice"; + EXPECT_CALL(callbacks1_, onConfigUpdate(_, _, _)) + .Times(AtMost(1)) + .WillRepeatedly(InvokeWithoutArgs([this] { removeAllInterest(); })); + EXPECT_CALL(callbacks2_, onConfigUpdate(_, _, _)) + .Times(AtMost(1)) + .WillRepeatedly(InvokeWithoutArgs([this] { removeAllInterest(); })); + watch_map_.onConfigUpdate({}, removed_names_proto, "version1"); +} + // Checks the following: // First watch on a resource name ==> updateWatchInterest() returns "add it to subscription" // Watch loses interest ==> "remove it from subscription" @@ -248,11 +312,13 @@ TEST(WatchMapTest, Overlap) { // NOTE: we need the resource name "dummy" to keep either watch from ever having no names watched, // which is treated as interest in all names. TEST(WatchMapTest, AddRemoveAdd) { - NamedMockSubscriptionCallbacks callbacks1; - NamedMockSubscriptionCallbacks callbacks2; + MockSubscriptionCallbacks callbacks1; + MockSubscriptionCallbacks callbacks2; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder("cluster_name"); WatchMap watch_map; - Watch* watch1 = watch_map.addWatch(callbacks1); - Watch* watch2 = watch_map.addWatch(callbacks2); + Watch* watch1 = watch_map.addWatch(callbacks1, resource_decoder); + Watch* watch2 = watch_map.addWatch(callbacks2, resource_decoder); Protobuf::RepeatedPtrField updated_resources; envoy::config::endpoint::v3::ClusterLoadAssignment alice; @@ -276,7 +342,8 @@ TEST(WatchMapTest, AddRemoveAdd) { { AddedRemoved added_removed = watch_map.updateWatchInterest(watch1, {"dummy"}); EXPECT_TRUE(added_removed.added_.empty()); - EXPECT_EQ(std::set({"alice"}), added_removed.removed_); // remove from subscription + EXPECT_EQ(std::set({"alice"}), + added_removed.removed_); // remove from subscription // (The xDS client should have responded to updateWatchInterest()'s return value by removing // Alice from the subscription, so onConfigUpdate() calls should be impossible right now.) @@ -301,9 +368,11 @@ TEST(WatchMapTest, AddRemoveAdd) { // Tests that nothing breaks if an update arrives that we entirely do not care about. TEST(WatchMapTest, UninterestingUpdate) { - NamedMockSubscriptionCallbacks callbacks; + MockSubscriptionCallbacks callbacks; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder("cluster_name"); WatchMap watch_map; - Watch* watch = watch_map.addWatch(callbacks); + Watch* watch = watch_map.addWatch(callbacks, resource_decoder); watch_map.updateWatchInterest(watch, {"alice"}); Protobuf::RepeatedPtrField alice_update; @@ -342,11 +411,13 @@ TEST(WatchMapTest, UninterestingUpdate) { // Tests that a watch that specifies no particular resource interest is treated as interested in // everything. TEST(WatchMapTest, WatchingEverything) { - NamedMockSubscriptionCallbacks callbacks1; - NamedMockSubscriptionCallbacks callbacks2; + MockSubscriptionCallbacks callbacks1; + MockSubscriptionCallbacks callbacks2; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder("cluster_name"); WatchMap watch_map; - /*Watch* watch1 = */ watch_map.addWatch(callbacks1); - Watch* watch2 = watch_map.addWatch(callbacks2); + /*Watch* watch1 = */ watch_map.addWatch(callbacks1, resource_decoder); + Watch* watch2 = watch_map.addWatch(callbacks2, resource_decoder); // watch1 never specifies any names, and so is treated as interested in everything. watch_map.updateWatchInterest(watch2, {"alice"}); @@ -369,19 +440,21 @@ TEST(WatchMapTest, WatchingEverything) { doDeltaAndSotwUpdate(watch_map, updated_resources, {}, "version1"); } -// Delta onConfigUpdate has some slightly subtle details with how it handles the three cases where a -// watch receives {only updates, updates+removals, only removals} to its resources. This test +// Delta onConfigUpdate has some slightly subtle details with how it handles the three cases where +// a watch receives {only updates, updates+removals, only removals} to its resources. This test // exercise those cases. Also, the removal-only case tests that SotW does call a watch's -// onConfigUpdate even if none of the watch's interested resources are among the updated resources. -// (Which ensures we deliver empty config updates when a resource is dropped.) +// onConfigUpdate even if none of the watch's interested resources are among the updated +// resources. (Which ensures we deliver empty config updates when a resource is dropped.) TEST(WatchMapTest, DeltaOnConfigUpdate) { - NamedMockSubscriptionCallbacks callbacks1; - NamedMockSubscriptionCallbacks callbacks2; - NamedMockSubscriptionCallbacks callbacks3; + MockSubscriptionCallbacks callbacks1; + MockSubscriptionCallbacks callbacks2; + MockSubscriptionCallbacks callbacks3; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder("cluster_name"); WatchMap watch_map; - Watch* watch1 = watch_map.addWatch(callbacks1); - Watch* watch2 = watch_map.addWatch(callbacks2); - Watch* watch3 = watch_map.addWatch(callbacks3); + Watch* watch1 = watch_map.addWatch(callbacks1, resource_decoder); + Watch* watch2 = watch_map.addWatch(callbacks2, resource_decoder); + Watch* watch3 = watch_map.addWatch(callbacks3, resource_decoder); watch_map.updateWatchInterest(watch1, {"updated"}); watch_map.updateWatchInterest(watch2, {"updated", "removed"}); watch_map.updateWatchInterest(watch3, {"removed"}); @@ -415,10 +488,12 @@ TEST(WatchMapTest, OnConfigUpdateFailed) { // calling on empty map doesn't break watch_map.onConfigUpdateFailed(ConfigUpdateFailureReason::UpdateRejected, nullptr); - NamedMockSubscriptionCallbacks callbacks1; - NamedMockSubscriptionCallbacks callbacks2; - watch_map.addWatch(callbacks1); - watch_map.addWatch(callbacks2); + MockSubscriptionCallbacks callbacks1; + MockSubscriptionCallbacks callbacks2; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder("cluster_name"); + watch_map.addWatch(callbacks1, resource_decoder); + watch_map.addWatch(callbacks2, resource_decoder); EXPECT_CALL(callbacks1, onConfigUpdateFailed(ConfigUpdateFailureReason::UpdateRejected, nullptr)); EXPECT_CALL(callbacks2, onConfigUpdateFailed(ConfigUpdateFailureReason::UpdateRejected, nullptr)); @@ -427,9 +502,11 @@ TEST(WatchMapTest, OnConfigUpdateFailed) { // verifies that a watch is updated with the resource name TEST(WatchMapTest, ConvertAliasWatchesToNameWatches) { - NamedMockSubscriptionCallbacks callbacks; + MockSubscriptionCallbacks callbacks; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder("cluster_name"); WatchMap watch_map; - Watch* watch = watch_map.addWatch(callbacks); + Watch* watch = watch_map.addWatch(callbacks, resource_decoder); watch_map.updateWatchInterest(watch, {"alias"}); envoy::service::discovery::v3::Resource resource; @@ -448,9 +525,11 @@ TEST(WatchMapTest, ConvertAliasWatchesToNameWatches) { // verifies that if a resource contains an alias the same as its name, and the watch has been set // with that alias, the watch won't be updated TEST(WatchMapTest, ConvertAliasWatchesToNameWatchesAliasIsSameAsName) { - NamedMockSubscriptionCallbacks callbacks; + MockSubscriptionCallbacks callbacks; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder("cluster_name"); WatchMap watch_map; - Watch* watch = watch_map.addWatch(callbacks); + Watch* watch = watch_map.addWatch(callbacks, resource_decoder); watch_map.updateWatchInterest(watch, {"name-and-alias"}); envoy::service::discovery::v3::Resource resource; diff --git a/test/common/crypto/BUILD b/test/common/crypto/BUILD index b1f7d592ace4d..a7243b2309f04 100644 --- a/test/common/crypto/BUILD +++ b/test/common/crypto/BUILD @@ -1,11 +1,13 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", + "envoy_cc_fuzz_test", "envoy_cc_test", "envoy_package", + "envoy_proto_library", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( @@ -23,3 +25,28 @@ envoy_cc_test( "//source/extensions/common/crypto:utility_lib", ], ) + +envoy_proto_library( + name = "verify_signature_fuzz_proto", + srcs = ["verify_signature_fuzz.proto"], +) + +envoy_cc_fuzz_test( + name = "get_sha_256_digest_fuzz_test", + srcs = ["get_sha_256_digest_fuzz_test.cc"], + corpus = "get_sha_256_digest_corpus", + deps = ["//source/extensions/common/crypto:utility_lib"], +) + +envoy_cc_fuzz_test( + name = "verify_signature_fuzz_test", + srcs = ["verify_signature_fuzz_test.cc"], + corpus = "verify_signature_corpus", + dictionaries = ["verify_signature_fuzz_test.dict"], + deps = [ + ":verify_signature_fuzz_proto_cc_proto", + "//source/common/common:hex_lib", + "//source/common/crypto:utility_lib", + "//source/extensions/common/crypto:utility_lib", + ], +) diff --git a/test/common/crypto/get_sha_256_digest_corpus/35d26780ea66d4ffb726bbafaa9302687bda7624 b/test/common/crypto/get_sha_256_digest_corpus/35d26780ea66d4ffb726bbafaa9302687bda7624 new file mode 100644 index 0000000000000..5062158477797 Binary files /dev/null and b/test/common/crypto/get_sha_256_digest_corpus/35d26780ea66d4ffb726bbafaa9302687bda7624 differ diff --git a/test/common/crypto/get_sha_256_digest_corpus/58030c65410d7553b1804eb7ed64bdff1188f145 b/test/common/crypto/get_sha_256_digest_corpus/58030c65410d7553b1804eb7ed64bdff1188f145 new file mode 100644 index 0000000000000..11a9c7dafb9f9 Binary files /dev/null and b/test/common/crypto/get_sha_256_digest_corpus/58030c65410d7553b1804eb7ed64bdff1188f145 differ diff --git a/test/common/crypto/get_sha_256_digest_corpus/9c8bd40d34a88522d71d184c462af82e3148c02d b/test/common/crypto/get_sha_256_digest_corpus/9c8bd40d34a88522d71d184c462af82e3148c02d new file mode 100644 index 0000000000000..7d28a98757a87 Binary files /dev/null and b/test/common/crypto/get_sha_256_digest_corpus/9c8bd40d34a88522d71d184c462af82e3148c02d differ diff --git a/test/common/crypto/get_sha_256_digest_corpus/e7af10a10f2540b1d1d497df2926786640285b1c b/test/common/crypto/get_sha_256_digest_corpus/e7af10a10f2540b1d1d497df2926786640285b1c new file mode 100644 index 0000000000000..cec6755d08e70 Binary files /dev/null and b/test/common/crypto/get_sha_256_digest_corpus/e7af10a10f2540b1d1d497df2926786640285b1c differ diff --git a/test/common/crypto/get_sha_256_digest_fuzz_test.cc b/test/common/crypto/get_sha_256_digest_fuzz_test.cc new file mode 100644 index 0000000000000..085778d4e41c4 --- /dev/null +++ b/test/common/crypto/get_sha_256_digest_fuzz_test.cc @@ -0,0 +1,15 @@ +#include "common/buffer/buffer_impl.h" +#include "common/crypto/utility.h" + +#include "test/fuzz/fuzz_runner.h" + +namespace Envoy { +namespace Fuzz { + +DEFINE_FUZZER(const uint8_t* buf, size_t len) { + Buffer::OwnedImpl buffer(buf, len); + auto digest = Common::Crypto::UtilitySingleton::get().getSha256Digest(buffer); +} + +} // namespace Fuzz +} // namespace Envoy diff --git a/test/common/crypto/verify_signature_corpus/test_contains_sha1_wrong b/test/common/crypto/verify_signature_corpus/test_contains_sha1_wrong new file mode 100644 index 0000000000000..73f683034300e --- /dev/null +++ b/test/common/crypto/verify_signature_corpus/test_contains_sha1_wrong @@ -0,0 +1,4 @@ +key: "30820122300d06092a864886f70d01010105000382010f003082010a0282010100a7471266d01d160308d73409c06f2e8d35c531c458d3e480e9f3191847d062ec5ccff7bc51e949d5f2c3540c189a4eca1e8633a62cf2d0923101c27e38013e71de9ae91a704849bff7fbe2ce5bf4bd666fd9731102a53193fe5a9a5a50644ff8b1183fa897646598caad22a37f9544510836372b44c58c98586fb7144629cd8c9479592d996d32ff6d395c0b8442ec5aa1ef8051529ea0e375883cefc72c04e360b4ef8f5760650589ca814918f678eee39b884d5af8136a9630a6cc0cde157dc8e00f39540628d5f335b2c36c54c7c8bc3738a6b21acff815405afa28e5183f550dac19abcf1145a7f9ced987db680e4a229cac75dee347ec9ebce1fc3dbbbb0203010001" +hash_func: "sha1" +signature: "345ac3a167558f4f387a81c2d64234d901a7ceaa544db779d2f797b0ea4ef851b740905a63e2f4d5af42cee093a29c7155db9a63d3d483e0ef948f5ac51ce4e10a3a6606fd93ef68ee47b30c37491103039459122f78e1c7ea71a1a5ea24bb6519bca02c8c9915fe8be24927c91812a13db72dbcb500103a79e8f67ff8cb9e2a631974e0668ab3977bf570a91b67d1b6bcd5dce84055f21427d64f4256a042ab1dc8e925d53a769f6681a873f5859693a7728fcbe95beace1563b5ffbcd7c93b898aeba31421dafbfadeea50229c49fd6c445449314460f3d19150bd29a91333beaced557ed6295234f7c14fa46303b7e977d2c89ba8a39a46a35f33eb07a332" +data: "hello" diff --git a/test/common/crypto/verify_signature_corpus/test_contains_sha256_correct b/test/common/crypto/verify_signature_corpus/test_contains_sha256_correct new file mode 100644 index 0000000000000..1afb6ea95a072 --- /dev/null +++ b/test/common/crypto/verify_signature_corpus/test_contains_sha256_correct @@ -0,0 +1,4 @@ +key: "30820122300d06092a864886f70d01010105000382010f003082010a0282010100a7471266d01d160308d73409c06f2e8d35c531c458d3e480e9f3191847d062ec5ccff7bc51e949d5f2c3540c189a4eca1e8633a62cf2d0923101c27e38013e71de9ae91a704849bff7fbe2ce5bf4bd666fd9731102a53193fe5a9a5a50644ff8b1183fa897646598caad22a37f9544510836372b44c58c98586fb7144629cd8c9479592d996d32ff6d395c0b8442ec5aa1ef8051529ea0e375883cefc72c04e360b4ef8f5760650589ca814918f678eee39b884d5af8136a9630a6cc0cde157dc8e00f39540628d5f335b2c36c54c7c8bc3738a6b21acff815405afa28e5183f550dac19abcf1145a7f9ced987db680e4a229cac75dee347ec9ebce1fc3dbbbb0203010001" +hash_func: "sha256" +signature: "345ac3a167558f4f387a81c2d64234d901a7ceaa544db779d2f797b0ea4ef851b740905a63e2f4d5af42cee093a29c7155db9a63d3d483e0ef948f5ac51ce4e10a3a6606fd93ef68ee47b30c37491103039459122f78e1c7ea71a1a5ea24bb6519bca02c8c9915fe8be24927c91812a13db72dbcb500103a79e8f67ff8cb9e2a631974e0668ab3977bf570a91b67d1b6bcd5dce84055f21427d64f4256a042ab1dc8e925d53a769f6681a873f5859693a7728fcbe95beace1563b5ffbcd7c93b898aeba31421dafbfadeea50229c49fd6c445449314460f3d19150bd29a91333beaced557ed6295234f7c14fa46303b7e977d2c89ba8a39a46a35f33eb07a332" +data: "hello" diff --git a/test/common/crypto/verify_signature_fuzz.proto b/test/common/crypto/verify_signature_fuzz.proto new file mode 100644 index 0000000000000..3d22351e98347 --- /dev/null +++ b/test/common/crypto/verify_signature_fuzz.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package test.common.crypto; + +message VerifySignatureFuzzTestCase { + string key = 1; + string hash_func = 2; + string signature = 3; + string data = 4; +} \ No newline at end of file diff --git a/test/common/crypto/verify_signature_fuzz_test.cc b/test/common/crypto/verify_signature_fuzz_test.cc new file mode 100644 index 0000000000000..c64fa2436ecb8 --- /dev/null +++ b/test/common/crypto/verify_signature_fuzz_test.cc @@ -0,0 +1,31 @@ +#include "common/common/hex.h" +#include "common/crypto/utility.h" + +#include "test/common/crypto/verify_signature_fuzz.pb.validate.h" +#include "test/fuzz/fuzz_runner.h" + +namespace Envoy { +namespace Common { +namespace Crypto { +namespace { + +DEFINE_PROTO_FUZZER(const test::common::crypto::VerifySignatureFuzzTestCase& input) { + const auto& key = input.key(); + const auto& hash_func = input.hash_func(); + const auto& signature = input.signature(); + const auto& data = input.data(); + + Common::Crypto::CryptoObjectPtr crypto_ptr( + Common::Crypto::UtilitySingleton::get().importPublicKey(Hex::decode(key))); + Common::Crypto::CryptoObject* crypto(crypto_ptr.get()); + + std::vector text(data.begin(), data.end()); + + const auto sig = Hex::decode(signature); + UtilitySingleton::get().verifySignature(hash_func, *crypto, sig, text); +} + +} // namespace +} // namespace Crypto +} // namespace Common +} // namespace Envoy diff --git a/test/common/crypto/verify_signature_fuzz_test.dict b/test/common/crypto/verify_signature_fuzz_test.dict new file mode 100644 index 0000000000000..b6378abfd1909 --- /dev/null +++ b/test/common/crypto/verify_signature_fuzz_test.dict @@ -0,0 +1,6 @@ +# hash_func +"sha1" +"sha224" +"sha256" +"sha384" +"sha512" diff --git a/test/common/decompressor/BUILD b/test/common/decompressor/BUILD deleted file mode 100644 index d1608797be719..0000000000000 --- a/test/common/decompressor/BUILD +++ /dev/null @@ -1,21 +0,0 @@ -licenses(["notice"]) # Apache 2 - -load( - "//bazel:envoy_build_system.bzl", - "envoy_cc_test", - "envoy_package", -) - -envoy_package() - -envoy_cc_test( - name = "decompressor_test", - srcs = ["zlib_decompressor_impl_test.cc"], - deps = [ - "//source/common/common:assert_lib", - "//source/common/common:hex_lib", - "//source/common/compressor:compressor_lib", - "//source/common/decompressor:decompressor_lib", - "//test/test_common:utility_lib", - ], -) diff --git a/test/common/event/BUILD b/test/common/event/BUILD index f2af3acf3dc57..b6032fe718258 100644 --- a/test/common/event/BUILD +++ b/test/common/event/BUILD @@ -1,16 +1,17 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( name = "dispatcher_impl_test", srcs = ["dispatcher_impl_test.cc"], + tags = ["fails_on_windows"], deps = [ "//source/common/api:api_lib", "//source/common/event:deferred_task", @@ -20,6 +21,7 @@ envoy_cc_test( "//test/mocks:common_lib", "//test/mocks/stats:stats_mocks", "//test/test_common:simulated_time_system_lib", + "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", ], ) @@ -27,6 +29,7 @@ envoy_cc_test( envoy_cc_test( name = "file_event_impl_test", srcs = ["file_event_impl_test.cc"], + tags = ["fails_on_windows"], deps = [ "//include/envoy/event:file_event_interface", "//source/common/event:dispatcher_includes", @@ -34,6 +37,7 @@ envoy_cc_test( "//source/common/stats:isolated_store_lib", "//test/mocks:common_lib", "//test/test_common:environment_lib", + "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", ], ) diff --git a/test/common/event/dispatcher_impl_test.cc b/test/common/event/dispatcher_impl_test.cc index f6db84f844bfb..44055a091c899 100644 --- a/test/common/event/dispatcher_impl_test.cc +++ b/test/common/event/dispatcher_impl_test.cc @@ -12,6 +12,7 @@ #include "test/mocks/common.h" #include "test/mocks/stats/mocks.h" #include "test/test_common/simulated_time_system.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -25,6 +26,153 @@ namespace Envoy { namespace Event { namespace { +static void onWatcherReady(evwatch*, const evwatch_prepare_cb_info*, void* arg) { + // `arg` contains the ReadyWatcher passed in from evwatch_prepare_new. + auto watcher = static_cast(arg); + watcher->ready(); +} + +class SchedulableCallbackImplTest : public testing::Test { +protected: + SchedulableCallbackImplTest() + : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher("test_thread")) {} + + void createCallback(std::function cb) { + callbacks_.emplace_back(dispatcher_->createSchedulableCallback(cb)); + } + + Api::ApiPtr api_; + DispatcherPtr dispatcher_; + std::vector callbacks_; +}; + +TEST_F(SchedulableCallbackImplTest, ScheduleCurrentAndCancel) { + ReadyWatcher watcher; + + auto cb = dispatcher_->createSchedulableCallback([&]() { watcher.ready(); }); + + // Cancel is a no-op if not scheduled. + cb->cancel(); + dispatcher_->run(Dispatcher::RunType::Block); + + // Callback is not invoked if cancelled before it executes. + cb->scheduleCallbackCurrentIteration(); + EXPECT_TRUE(cb->enabled()); + cb->cancel(); + EXPECT_FALSE(cb->enabled()); + dispatcher_->run(Dispatcher::RunType::Block); + + // Scheduled callback executes. + cb->scheduleCallbackCurrentIteration(); + EXPECT_CALL(watcher, ready()); + dispatcher_->run(Dispatcher::RunType::Block); + + // Callbacks implicitly cancelled if runner is deleted. + cb->scheduleCallbackCurrentIteration(); + cb.reset(); + dispatcher_->run(Dispatcher::RunType::Block); +} + +TEST_F(SchedulableCallbackImplTest, ScheduleNextAndCancel) { + ReadyWatcher watcher; + + auto cb = dispatcher_->createSchedulableCallback([&]() { watcher.ready(); }); + + // Cancel is a no-op if not scheduled. + cb->cancel(); + dispatcher_->run(Dispatcher::RunType::Block); + + // Callback is not invoked if cancelled before it executes. + cb->scheduleCallbackNextIteration(); + EXPECT_TRUE(cb->enabled()); + cb->cancel(); + EXPECT_FALSE(cb->enabled()); + dispatcher_->run(Dispatcher::RunType::Block); + + // Scheduled callback executes. + cb->scheduleCallbackNextIteration(); + EXPECT_CALL(watcher, ready()); + dispatcher_->run(Dispatcher::RunType::Block); + + // Callbacks implicitly cancelled if runner is deleted. + cb->scheduleCallbackNextIteration(); + cb.reset(); + dispatcher_->run(Dispatcher::RunType::Block); +} + +TEST_F(SchedulableCallbackImplTest, ScheduleOrder) { + ReadyWatcher watcher0; + createCallback([&]() { watcher0.ready(); }); + ReadyWatcher watcher1; + createCallback([&]() { watcher1.ready(); }); + ReadyWatcher watcher2; + createCallback([&]() { watcher2.ready(); }); + + // Current iteration callbacks run in the order they are scheduled. Next iteration callbacks run + // after current iteration callbacks. + callbacks_[0]->scheduleCallbackNextIteration(); + callbacks_[1]->scheduleCallbackCurrentIteration(); + callbacks_[2]->scheduleCallbackCurrentIteration(); + InSequence s; + EXPECT_CALL(watcher1, ready()); + EXPECT_CALL(watcher2, ready()); + EXPECT_CALL(watcher0, ready()); + dispatcher_->run(Dispatcher::RunType::Block); +} + +TEST_F(SchedulableCallbackImplTest, ScheduleChainingAndCancellation) { + DispatcherImpl* dispatcher_impl = static_cast(dispatcher_.get()); + ReadyWatcher prepare_watcher; + evwatch_prepare_new(&dispatcher_impl->base(), onWatcherReady, &prepare_watcher); + + ReadyWatcher watcher0; + createCallback([&]() { + watcher0.ready(); + callbacks_[1]->scheduleCallbackCurrentIteration(); + }); + + ReadyWatcher watcher1; + createCallback([&]() { + watcher1.ready(); + callbacks_[2]->scheduleCallbackCurrentIteration(); + callbacks_[3]->scheduleCallbackCurrentIteration(); + callbacks_[4]->scheduleCallbackCurrentIteration(); + callbacks_[5]->scheduleCallbackNextIteration(); + }); + + ReadyWatcher watcher2; + createCallback([&]() { + watcher2.ready(); + EXPECT_TRUE(callbacks_[3]->enabled()); + callbacks_[3]->cancel(); + EXPECT_TRUE(callbacks_[4]->enabled()); + callbacks_[4].reset(); + }); + + ReadyWatcher watcher3; + createCallback([&]() { watcher3.ready(); }); + + ReadyWatcher watcher4; + createCallback([&]() { watcher4.ready(); }); + + ReadyWatcher watcher5; + createCallback([&]() { watcher5.ready(); }); + + // Chained callbacks run in the same event loop iteration, as signaled by a single call to + // prepare_watcher.ready(). watcher3 and watcher4 are not invoked because cb2 cancels + // cb3 and deletes cb4 as part of its execution. cb5 runs after a second call to the + // prepare callback since it's scheduled for the next iteration. + callbacks_[0]->scheduleCallbackCurrentIteration(); + InSequence s; + EXPECT_CALL(prepare_watcher, ready()); + EXPECT_CALL(watcher0, ready()); + EXPECT_CALL(watcher1, ready()); + EXPECT_CALL(watcher2, ready()); + EXPECT_CALL(prepare_watcher, ready()); + EXPECT_CALL(watcher5, ready()); + dispatcher_->run(Dispatcher::RunType::Block); +} + class TestDeferredDeletable : public DeferredDeletable { public: TestDeferredDeletable(std::function on_destroy) : on_destroy_(on_destroy) {} @@ -209,6 +357,7 @@ TEST_F(DispatcherImplTest, RunPostCallbacksLocking) { } TEST_F(DispatcherImplTest, Timer) { + timerTest([](Timer& timer) { timer.enableTimer(std::chrono::milliseconds(0)); }); timerTest([](Timer& timer) { timer.enableTimer(std::chrono::milliseconds(50)); }); timerTest([](Timer& timer) { timer.enableHRTimer(std::chrono::microseconds(50)); }); } @@ -226,7 +375,7 @@ TEST_F(DispatcherImplTest, TimerWithScope) { timer = dispatcher_->createTimer([this]() { { Thread::LockGuard lock(mu_); - static_cast(dispatcher_.get())->onFatalError(); + static_cast(dispatcher_.get())->onFatalError(std::cerr); work_finished_ = true; } cv_.notifyOne(); @@ -324,21 +473,486 @@ TEST_F(DispatcherMonotonicTimeTest, ApproximateMonotonicTime) { dispatcher_->run(Dispatcher::RunType::Block); } -TEST(TimerImplTest, TimerEnabledDisabled) { - Api::ApiPtr api = Api::createApiForTest(); - DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); - Event::TimerPtr timer = dispatcher->createTimer([] {}); +class TimerImplTest : public testing::TestWithParam { +protected: + TimerImplTest() { + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.activate_timers_next_event_loop", + activateTimersNextEventLoop() ? "true" : "false"}}); + // Watch for dispatcher prepare events. + evwatch_prepare_new(&static_cast(dispatcher_.get())->base(), onWatcherReady, + &prepare_watcher_); + } + + bool activateTimersNextEventLoop() { return GetParam(); } + + void SetUp() override { + // Update time cache to provide a stable time reference for timer registration. + event_base_update_cache_time(&libevent_base_); + } + + // Advance time forward while updating the libevent's time cache and monotonic time reference. + // Pushing the monotonic time reference forward eliminates the possibility of time moving + // backwards and breaking the overly picky TimerImpl tests below. + void advanceLibeventTime(absl::Duration duration) { + timeval start_tv; + { + int ret = event_base_gettimeofday_cached(&libevent_base_, &start_tv); + RELEASE_ASSERT(ret == 0, "event_base_gettimeofday_cached failed"); + } + + timeval now_tv; + do { + absl::SleepFor(duration); + event_base_update_cache_time(&libevent_base_); + int ret = event_base_gettimeofday_cached(&libevent_base_, &now_tv); + RELEASE_ASSERT(ret == 0, "event_base_gettimeofday_cached failed"); + } while (duration > absl::DurationFromTimeval(now_tv) - absl::DurationFromTimeval(start_tv)); + } + + TestScopedRuntime scoped_runtime_; + Api::ApiPtr api_{Api::createApiForTest()}; + DispatcherPtr dispatcher_{api_->allocateDispatcher("test_thread")}; + event_base& libevent_base_{static_cast(*dispatcher_).base()}; + ReadyWatcher prepare_watcher_; +}; + +INSTANTIATE_TEST_SUITE_P(DelayActivation, TimerImplTest, testing::Bool()); + +TEST_P(TimerImplTest, TimerEnabledDisabled) { + InSequence s; + + Event::TimerPtr timer = dispatcher_->createTimer([] {}); EXPECT_FALSE(timer->enabled()); timer->enableTimer(std::chrono::milliseconds(0)); EXPECT_TRUE(timer->enabled()); - dispatcher->run(Dispatcher::RunType::NonBlock); + EXPECT_CALL(prepare_watcher_, ready()); + dispatcher_->run(Dispatcher::RunType::NonBlock); EXPECT_FALSE(timer->enabled()); timer->enableHRTimer(std::chrono::milliseconds(0)); EXPECT_TRUE(timer->enabled()); - dispatcher->run(Dispatcher::RunType::NonBlock); + EXPECT_CALL(prepare_watcher_, ready()); + dispatcher_->run(Dispatcher::RunType::NonBlock); EXPECT_FALSE(timer->enabled()); } +TEST_P(TimerImplTest, ChangeTimerBackwardsBeforeRun) { + ReadyWatcher watcher1; + Event::TimerPtr timer1 = dispatcher_->createTimer([&] { watcher1.ready(); }); + + ReadyWatcher watcher2; + Event::TimerPtr timer2 = dispatcher_->createTimer([&] { watcher2.ready(); }); + + ReadyWatcher watcher3; + Event::TimerPtr timer3 = dispatcher_->createTimer([&] { watcher3.ready(); }); + + timer1->enableTimer(std::chrono::milliseconds(0)); + timer2->enableTimer(std::chrono::milliseconds(1)); + timer3->enableTimer(std::chrono::milliseconds(2)); + timer2->enableTimer(std::chrono::milliseconds(3)); + timer1->enableTimer(std::chrono::milliseconds(4)); + + // Advance time by 5ms so timers above all trigger in the same loop iteration. + advanceLibeventTime(absl::Milliseconds(5)); + + // Expect watcher3 to trigger first because the deadlines for timers 1 and 2 was moved backwards. + InSequence s; + EXPECT_CALL(prepare_watcher_, ready()); + EXPECT_CALL(watcher3, ready()); + EXPECT_CALL(watcher2, ready()); + EXPECT_CALL(watcher1, ready()); + dispatcher_->run(Dispatcher::RunType::Block); +} + +TEST_P(TimerImplTest, ChangeTimerForwardsToZeroBeforeRun) { + ReadyWatcher watcher1; + Event::TimerPtr timer1 = dispatcher_->createTimer([&] { watcher1.ready(); }); + + ReadyWatcher watcher2; + Event::TimerPtr timer2 = dispatcher_->createTimer([&] { watcher2.ready(); }); + + timer1->enableTimer(std::chrono::milliseconds(2)); + timer2->enableTimer(std::chrono::milliseconds(1)); + timer1->enableTimer(std::chrono::milliseconds(0)); + + // Advance time by 5ms so timers above all trigger in the same loop iteration. + advanceLibeventTime(absl::Milliseconds(5)); + + // Expect watcher1 to trigger first because timer1's deadline was moved forward. + InSequence s; + EXPECT_CALL(prepare_watcher_, ready()); + if (activateTimersNextEventLoop()) { + EXPECT_CALL(watcher1, ready()); + EXPECT_CALL(watcher2, ready()); + } else { + // Timers execute in the wrong order. + EXPECT_CALL(watcher2, ready()); + EXPECT_CALL(watcher1, ready()); + } + dispatcher_->run(Dispatcher::RunType::NonBlock); +} + +TEST_P(TimerImplTest, ChangeTimerForwardsToNonZeroBeforeRun) { + ReadyWatcher watcher1; + Event::TimerPtr timer1 = dispatcher_->createTimer([&] { watcher1.ready(); }); + + ReadyWatcher watcher2; + Event::TimerPtr timer2 = dispatcher_->createTimer([&] { watcher2.ready(); }); + + timer1->enableTimer(std::chrono::milliseconds(3)); + timer2->enableTimer(std::chrono::milliseconds(2)); + timer1->enableTimer(std::chrono::milliseconds(1)); + + // Advance time by 5ms so timers above all trigger in the same loop iteration. + advanceLibeventTime(absl::Milliseconds(5)); + + // Expect watcher1 to trigger first because timer1's deadline was moved forward. + InSequence s; + EXPECT_CALL(prepare_watcher_, ready()); + EXPECT_CALL(watcher1, ready()); + EXPECT_CALL(watcher2, ready()); + dispatcher_->run(Dispatcher::RunType::NonBlock); +} + +TEST_P(TimerImplTest, ChangeLargeTimerForwardToZeroBeforeRun) { + ReadyWatcher watcher1; + Event::TimerPtr timer1 = dispatcher_->createTimer([&] { watcher1.ready(); }); + + ReadyWatcher watcher2; + Event::TimerPtr timer2 = dispatcher_->createTimer([&] { watcher2.ready(); }); + + timer1->enableTimer(std::chrono::seconds(2000)); + timer2->enableTimer(std::chrono::seconds(1000)); + timer1->enableTimer(std::chrono::seconds(0)); + + // Expect watcher1 to trigger because timer1's deadline was moved forward. + InSequence s; + EXPECT_CALL(prepare_watcher_, ready()); + EXPECT_CALL(watcher1, ready()); + EXPECT_CALL(prepare_watcher_, ready()); + dispatcher_->run(Dispatcher::RunType::NonBlock); +} + +TEST_P(TimerImplTest, ChangeLargeTimerForwardToNonZeroBeforeRun) { + ReadyWatcher watcher1; + Event::TimerPtr timer1 = dispatcher_->createTimer([&] { watcher1.ready(); }); + + ReadyWatcher watcher2; + Event::TimerPtr timer2 = dispatcher_->createTimer([&] { watcher2.ready(); }); + + timer1->enableTimer(std::chrono::seconds(2000)); + timer2->enableTimer(std::chrono::seconds(1000)); + timer1->enableTimer(std::chrono::milliseconds(1)); + + // Advance time by 5ms so timers above all trigger in the same loop iteration. + advanceLibeventTime(absl::Milliseconds(5)); + + // Expect watcher1 to trigger because timer1's deadline was moved forward. + InSequence s; + EXPECT_CALL(prepare_watcher_, ready()); + EXPECT_CALL(watcher1, ready()); + EXPECT_CALL(prepare_watcher_, ready()); + dispatcher_->run(Dispatcher::RunType::NonBlock); +} + +// Timers scheduled at different times execute in order. +TEST_P(TimerImplTest, TimerOrdering) { + ReadyWatcher watcher1; + Event::TimerPtr timer1 = dispatcher_->createTimer([&] { watcher1.ready(); }); + + ReadyWatcher watcher2; + Event::TimerPtr timer2 = dispatcher_->createTimer([&] { watcher2.ready(); }); + + ReadyWatcher watcher3; + Event::TimerPtr timer3 = dispatcher_->createTimer([&] { watcher3.ready(); }); + + timer1->enableTimer(std::chrono::milliseconds(0)); + timer2->enableTimer(std::chrono::milliseconds(1)); + timer3->enableTimer(std::chrono::milliseconds(2)); + + // Advance time by 5ms so timers above all trigger in the same loop iteration. + advanceLibeventTime(absl::Milliseconds(5)); + + EXPECT_TRUE(timer1->enabled()); + EXPECT_TRUE(timer2->enabled()); + EXPECT_TRUE(timer3->enabled()); + + // Expect watcher calls to happen in order since timers have different times. + InSequence s; + EXPECT_CALL(prepare_watcher_, ready()); + EXPECT_CALL(watcher1, ready()); + EXPECT_CALL(watcher2, ready()); + EXPECT_CALL(watcher3, ready()); + dispatcher_->run(Dispatcher::RunType::Block); +} + +// Alarms that are scheduled to execute and are cancelled do not trigger. +TEST_P(TimerImplTest, TimerOrderAndDisableAlarm) { + ReadyWatcher watcher3; + Event::TimerPtr timer3 = dispatcher_->createTimer([&] { watcher3.ready(); }); + + ReadyWatcher watcher2; + Event::TimerPtr timer2 = dispatcher_->createTimer([&] { watcher2.ready(); }); + + ReadyWatcher watcher1; + Event::TimerPtr timer1 = dispatcher_->createTimer([&] { + timer2->disableTimer(); + watcher1.ready(); + }); + + timer1->enableTimer(std::chrono::milliseconds(0)); + timer2->enableTimer(std::chrono::milliseconds(1)); + timer3->enableTimer(std::chrono::milliseconds(2)); + + // Advance time by 5ms so timers above all trigger in the same loop iteration. + advanceLibeventTime(absl::Milliseconds(5)); + + EXPECT_TRUE(timer1->enabled()); + EXPECT_TRUE(timer2->enabled()); + EXPECT_TRUE(timer3->enabled()); + + // Expect watcher calls to happen in order since timers have different times. + InSequence s; + EXPECT_CALL(prepare_watcher_, ready()); + EXPECT_CALL(watcher1, ready()); + EXPECT_CALL(watcher3, ready()); + dispatcher_->run(Dispatcher::RunType::Block); +} + +// Change the registration time for a timer that is already activated by disabling and re-enabling +// the timer. Verify that execution is delayed. +TEST_P(TimerImplTest, TimerOrderDisableAndReschedule) { + ReadyWatcher watcher4; + Event::TimerPtr timer4 = dispatcher_->createTimer([&] { watcher4.ready(); }); + + ReadyWatcher watcher3; + Event::TimerPtr timer3 = dispatcher_->createTimer([&] { watcher3.ready(); }); + + ReadyWatcher watcher2; + Event::TimerPtr timer2 = dispatcher_->createTimer([&] { watcher2.ready(); }); + + ReadyWatcher watcher1; + Event::TimerPtr timer1 = dispatcher_->createTimer([&] { + timer2->disableTimer(); + timer2->enableTimer(std::chrono::milliseconds(0)); + timer3->disableTimer(); + timer3->enableTimer(std::chrono::milliseconds(1)); + watcher1.ready(); + }); + + timer1->enableTimer(std::chrono::milliseconds(0)); + timer2->enableTimer(std::chrono::milliseconds(1)); + timer3->enableTimer(std::chrono::milliseconds(2)); + timer4->enableTimer(std::chrono::milliseconds(3)); + + // Advance time by 5ms so timers above all trigger in the same loop iteration. + advanceLibeventTime(absl::Milliseconds(5)); + + EXPECT_TRUE(timer1->enabled()); + EXPECT_TRUE(timer2->enabled()); + EXPECT_TRUE(timer3->enabled()); + EXPECT_TRUE(timer4->enabled()); + + // timer1 is expected to run first and reschedule timers 2 and 3. timer4 should fire before + // timer2 and timer3 since timer4's registration is unaffected. + InSequence s; + EXPECT_CALL(prepare_watcher_, ready()); + EXPECT_CALL(watcher1, ready()); + if (activateTimersNextEventLoop()) { + EXPECT_CALL(watcher4, ready()); + // Sleep during prepare to ensure that enough time has elapsed before timer evaluation to ensure + // that timers 2 and 3 are picked up by the same loop iteration. Without the sleep the two + // timers could execute in different loop iterations. + EXPECT_CALL(prepare_watcher_, ready()).WillOnce(testing::InvokeWithoutArgs([&]() { + advanceLibeventTime(absl::Milliseconds(5)); + })); + EXPECT_CALL(watcher2, ready()); + EXPECT_CALL(watcher3, ready()); + } else { + EXPECT_CALL(watcher4, ready()); + EXPECT_CALL(watcher2, ready()); + // Sleep in prepare cb to avoid flakiness if epoll_wait returns before the timer timeout. + EXPECT_CALL(prepare_watcher_, ready()).WillOnce(testing::InvokeWithoutArgs([&]() { + advanceLibeventTime(absl::Milliseconds(5)); + })); + EXPECT_CALL(watcher3, ready()); + } + dispatcher_->run(Dispatcher::RunType::Block); +} + +// Change the registration time for a timer that is already activated by re-enabling the timer +// without calling disableTimer first. +TEST_P(TimerImplTest, TimerOrderAndReschedule) { + ReadyWatcher watcher4; + Event::TimerPtr timer4 = dispatcher_->createTimer([&] { watcher4.ready(); }); + + ReadyWatcher watcher3; + Event::TimerPtr timer3 = dispatcher_->createTimer([&] { watcher3.ready(); }); + + ReadyWatcher watcher2; + Event::TimerPtr timer2 = dispatcher_->createTimer([&] { watcher2.ready(); }); + + ReadyWatcher watcher1; + Event::TimerPtr timer1 = dispatcher_->createTimer([&] { + timer2->enableTimer(std::chrono::milliseconds(0)); + timer3->enableTimer(std::chrono::milliseconds(1)); + watcher1.ready(); + }); + + timer1->enableTimer(std::chrono::milliseconds(0)); + timer2->enableTimer(std::chrono::milliseconds(1)); + timer3->enableTimer(std::chrono::milliseconds(2)); + timer4->enableTimer(std::chrono::milliseconds(3)); + + // Advance time by 5ms so timers above all trigger in the same loop iteration. + advanceLibeventTime(absl::Milliseconds(5)); + + EXPECT_TRUE(timer1->enabled()); + EXPECT_TRUE(timer2->enabled()); + EXPECT_TRUE(timer3->enabled()); + EXPECT_TRUE(timer4->enabled()); + + // Rescheduling timers that are already scheduled to run in the current event loop iteration has + // no effect if the time delta is 0. Expect timers 1, 2 and 4 to execute in the original order. + // Timer 3 is delayed since it is rescheduled with a non-zero delta. + InSequence s; + EXPECT_CALL(prepare_watcher_, ready()); + EXPECT_CALL(watcher1, ready()); + if (activateTimersNextEventLoop()) { + EXPECT_CALL(watcher4, ready()); + // Sleep during prepare to ensure that enough time has elapsed before timer evaluation to ensure + // that timers 2 and 3 are picked up by the same loop iteration. Without the sleep the two + // timers could execute in different loop iterations. + EXPECT_CALL(prepare_watcher_, ready()).WillOnce(testing::InvokeWithoutArgs([&]() { + advanceLibeventTime(absl::Milliseconds(5)); + })); + EXPECT_CALL(watcher2, ready()); + EXPECT_CALL(watcher3, ready()); + } else { + EXPECT_CALL(watcher2, ready()); + EXPECT_CALL(watcher4, ready()); + // Sleep in prepare cb to avoid flakiness if epoll_wait returns before the timer timeout. + EXPECT_CALL(prepare_watcher_, ready()).WillOnce(testing::InvokeWithoutArgs([&]() { + advanceLibeventTime(absl::Milliseconds(5)); + })); + EXPECT_CALL(watcher3, ready()); + } + dispatcher_->run(Dispatcher::RunType::Block); +} + +TEST_P(TimerImplTest, TimerChaining) { + ReadyWatcher watcher1; + Event::TimerPtr timer1 = dispatcher_->createTimer([&] { watcher1.ready(); }); + + ReadyWatcher watcher2; + Event::TimerPtr timer2 = dispatcher_->createTimer([&] { + watcher2.ready(); + timer1->enableTimer(std::chrono::milliseconds(0)); + }); + + ReadyWatcher watcher3; + Event::TimerPtr timer3 = dispatcher_->createTimer([&] { + watcher3.ready(); + timer2->enableTimer(std::chrono::milliseconds(0)); + }); + + ReadyWatcher watcher4; + Event::TimerPtr timer4 = dispatcher_->createTimer([&] { + watcher4.ready(); + timer3->enableTimer(std::chrono::milliseconds(0)); + }); + + timer4->enableTimer(std::chrono::milliseconds(0)); + + EXPECT_FALSE(timer1->enabled()); + EXPECT_FALSE(timer2->enabled()); + EXPECT_FALSE(timer3->enabled()); + EXPECT_TRUE(timer4->enabled()); + InSequence s; + EXPECT_CALL(prepare_watcher_, ready()); + EXPECT_CALL(watcher4, ready()); + if (activateTimersNextEventLoop()) { + EXPECT_CALL(prepare_watcher_, ready()); + } + EXPECT_CALL(watcher3, ready()); + if (activateTimersNextEventLoop()) { + EXPECT_CALL(prepare_watcher_, ready()); + } + EXPECT_CALL(watcher2, ready()); + if (activateTimersNextEventLoop()) { + EXPECT_CALL(prepare_watcher_, ready()); + } + EXPECT_CALL(watcher1, ready()); + dispatcher_->run(Dispatcher::RunType::NonBlock); + + EXPECT_FALSE(timer1->enabled()); + EXPECT_FALSE(timer2->enabled()); + EXPECT_FALSE(timer3->enabled()); + EXPECT_FALSE(timer4->enabled()); +} + +TEST_P(TimerImplTest, TimerChainDisable) { + ReadyWatcher watcher; + Event::TimerPtr timer1; + Event::TimerPtr timer2; + Event::TimerPtr timer3; + + auto timer_cb = [&] { + watcher.ready(); + timer1->disableTimer(); + timer2->disableTimer(); + timer3->disableTimer(); + }; + + timer1 = dispatcher_->createTimer(timer_cb); + timer2 = dispatcher_->createTimer(timer_cb); + timer3 = dispatcher_->createTimer(timer_cb); + + timer3->enableTimer(std::chrono::milliseconds(0)); + timer2->enableTimer(std::chrono::milliseconds(0)); + timer1->enableTimer(std::chrono::milliseconds(0)); + + EXPECT_TRUE(timer1->enabled()); + EXPECT_TRUE(timer2->enabled()); + EXPECT_TRUE(timer3->enabled()); + InSequence s; + // Only 1 call to watcher ready since the other 2 timers were disabled by the first timer. + EXPECT_CALL(prepare_watcher_, ready()); + EXPECT_CALL(watcher, ready()); + dispatcher_->run(Dispatcher::RunType::NonBlock); +} + +TEST_P(TimerImplTest, TimerChainDelete) { + ReadyWatcher watcher; + Event::TimerPtr timer1; + Event::TimerPtr timer2; + Event::TimerPtr timer3; + + auto timer_cb = [&] { + watcher.ready(); + timer1.reset(); + timer2.reset(); + timer3.reset(); + }; + + timer1 = dispatcher_->createTimer(timer_cb); + timer2 = dispatcher_->createTimer(timer_cb); + timer3 = dispatcher_->createTimer(timer_cb); + + timer3->enableTimer(std::chrono::milliseconds(0)); + timer2->enableTimer(std::chrono::milliseconds(0)); + timer1->enableTimer(std::chrono::milliseconds(0)); + + EXPECT_TRUE(timer1->enabled()); + EXPECT_TRUE(timer2->enabled()); + EXPECT_TRUE(timer3->enabled()); + InSequence s; + // Only 1 call to watcher ready since the other 2 timers were deleted by the first timer. + EXPECT_CALL(prepare_watcher_, ready()); + EXPECT_CALL(watcher, ready()); + dispatcher_->run(Dispatcher::RunType::NonBlock); +} + class TimerImplTimingTest : public testing::Test { public: std::chrono::nanoseconds getTimerTiming(Event::SimulatedTimeSystem& time_system, @@ -347,6 +961,13 @@ class TimerImplTimingTest : public testing::Test { EXPECT_TRUE(timer.enabled()); while (true) { dispatcher.run(Dispatcher::RunType::NonBlock); +#ifdef WIN32 + // The event loop runs for a single iteration in NonBlock mode on Windows. A few iterations + // are required to ensure that next iteration callbacks have a chance to run before time + // advances once again. + dispatcher.run(Dispatcher::RunType::NonBlock); + dispatcher.run(Dispatcher::RunType::NonBlock); +#endif if (timer.enabled()) { time_system.advanceTimeAsync(std::chrono::microseconds(1)); } else { diff --git a/test/common/event/file_event_impl_test.cc b/test/common/event/file_event_impl_test.cc index bca220731f7e7..ca34b5eaf8449 100644 --- a/test/common/event/file_event_impl_test.cc +++ b/test/common/event/file_event_impl_test.cc @@ -8,6 +8,7 @@ #include "test/mocks/common.h" #include "test/test_common/environment.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" @@ -46,22 +47,37 @@ class FileEventImplTest : public testing::Test { Api::OsSysCalls& os_sys_calls_; }; -class FileEventImplActivateTest : public testing::TestWithParam { +class FileEventImplActivateTest + : public testing::TestWithParam> { public: - FileEventImplActivateTest() : os_sys_calls_(Api::OsSysCallsSingleton::get()) {} + FileEventImplActivateTest() : os_sys_calls_(Api::OsSysCallsSingleton::get()) { + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.activate_fds_next_event_loop", + activateFdsNextEventLoop() ? "true" : "false"}}); + } + + static void onWatcherReady(evwatch*, const evwatch_prepare_cb_info*, void* arg) { + // `arg` contains the ReadyWatcher passed in from evwatch_prepare_new. + auto watcher = static_cast(arg); + watcher->ready(); + } + + int domain() { + return std::get<0>(GetParam()) == Network::Address::IpVersion::v4 ? AF_INET : AF_INET6; + } + bool activateFdsNextEventLoop() { return std::get<1>(GetParam()); } protected: Api::OsSysCalls& os_sys_calls_; + TestScopedRuntime scoped_runtime_; }; -INSTANTIATE_TEST_SUITE_P(IpVersions, FileEventImplActivateTest, - testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), - TestUtility::ipTestParamsToString); +INSTANTIATE_TEST_SUITE_P( + IpVersions, FileEventImplActivateTest, + testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), testing::Bool())); TEST_P(FileEventImplActivateTest, Activate) { - os_fd_t fd; - int domain = GetParam() == Network::Address::IpVersion::v4 ? AF_INET : AF_INET6; - fd = os_sys_calls_.socket(domain, SOCK_STREAM, 0).rc_; + os_fd_t fd = os_sys_calls_.socket(domain(), SOCK_STREAM, 0).rc_; ASSERT_TRUE(SOCKET_VALID(fd)); Api::ApiPtr api = Api::createApiForTest(); @@ -102,6 +118,149 @@ TEST_P(FileEventImplActivateTest, Activate) { os_sys_calls_.close(fd); } +TEST_P(FileEventImplActivateTest, ActivateChaining) { + os_fd_t fd = os_sys_calls_.socket(domain(), SOCK_STREAM, 0).rc_; + ASSERT_TRUE(SOCKET_VALID(fd)); + + Api::ApiPtr api = Api::createApiForTest(); + DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); + ReadyWatcher fd_event; + ReadyWatcher read_event; + ReadyWatcher write_event; + ReadyWatcher closed_event; + + ReadyWatcher prepare_watcher; + evwatch_prepare_new(&static_cast(dispatcher.get())->base(), onWatcherReady, + &prepare_watcher); + +#ifdef WIN32 + const FileTriggerType trigger = FileTriggerType::Level; +#else + const FileTriggerType trigger = FileTriggerType::Edge; +#endif + + Event::FileEventPtr file_event = dispatcher->createFileEvent( + fd, + [&](uint32_t events) -> void { + fd_event.ready(); + if (events & FileReadyType::Read) { + read_event.ready(); + file_event->activate(FileReadyType::Write); + file_event->activate(FileReadyType::Closed); + } + + if (events & FileReadyType::Write) { + write_event.ready(); + file_event->activate(FileReadyType::Closed); + } + + if (events & FileReadyType::Closed) { + closed_event.ready(); + } + }, + trigger, FileReadyType::Read | FileReadyType::Write | FileReadyType::Closed); + + testing::InSequence s; + // First loop iteration: handle scheduled read event and the real write event produced by poll. + // Note that the real and injected events are combined and delivered in a single call to the fd + // callback. + EXPECT_CALL(prepare_watcher, ready()); + EXPECT_CALL(fd_event, ready()); + EXPECT_CALL(read_event, ready()); + EXPECT_CALL(write_event, ready()); + if (activateFdsNextEventLoop()) { + // Second loop iteration: handle write and close events scheduled while handling read. + EXPECT_CALL(prepare_watcher, ready()); + EXPECT_CALL(fd_event, ready()); + EXPECT_CALL(write_event, ready()); + EXPECT_CALL(closed_event, ready()); + // Third loop iteration: handle close event scheduled while handling write. + EXPECT_CALL(prepare_watcher, ready()); + EXPECT_CALL(fd_event, ready()); + EXPECT_CALL(closed_event, ready()); + // Fourth loop iteration: poll returned no new real events. + EXPECT_CALL(prepare_watcher, ready()); + } else { + // Same loop iteration activation: handle write and close events scheduled while handling read. + EXPECT_CALL(fd_event, ready()); + EXPECT_CALL(write_event, ready()); + EXPECT_CALL(closed_event, ready()); + // Second same loop iteration activation: handle close event scheduled while handling write. + EXPECT_CALL(fd_event, ready()); + EXPECT_CALL(closed_event, ready()); + // Second loop iteration: poll returned no new real events. + EXPECT_CALL(prepare_watcher, ready()); + } + + file_event->activate(FileReadyType::Read); + dispatcher->run(Event::Dispatcher::RunType::NonBlock); + + os_sys_calls_.close(fd); +} + +TEST_P(FileEventImplActivateTest, SetEnableCancelsActivate) { + os_fd_t fd = os_sys_calls_.socket(domain(), SOCK_STREAM, 0).rc_; + ASSERT_TRUE(SOCKET_VALID(fd)); + + Api::ApiPtr api = Api::createApiForTest(); + DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); + ReadyWatcher fd_event; + ReadyWatcher read_event; + ReadyWatcher write_event; + ReadyWatcher closed_event; + + ReadyWatcher prepare_watcher; + evwatch_prepare_new(&static_cast(dispatcher.get())->base(), onWatcherReady, + &prepare_watcher); + +#ifdef WIN32 + const FileTriggerType trigger = FileTriggerType::Level; +#else + const FileTriggerType trigger = FileTriggerType::Edge; +#endif + + Event::FileEventPtr file_event = dispatcher->createFileEvent( + fd, + [&](uint32_t events) -> void { + fd_event.ready(); + if (events & FileReadyType::Read) { + read_event.ready(); + file_event->activate(FileReadyType::Closed); + file_event->setEnabled(FileReadyType::Write | FileReadyType::Closed); + } + + if (events & FileReadyType::Write) { + write_event.ready(); + } + + if (events & FileReadyType::Closed) { + closed_event.ready(); + } + }, + trigger, FileReadyType::Read | FileReadyType::Write | FileReadyType::Closed); + + testing::InSequence s; + // First loop iteration: handle scheduled read event and the real write event produced by poll. + // Note that the real and injected events are combined and delivered in a single call to the fd + // callback. + EXPECT_CALL(prepare_watcher, ready()); + EXPECT_CALL(fd_event, ready()); + EXPECT_CALL(read_event, ready()); + EXPECT_CALL(write_event, ready()); + // Second loop iteration: handle real write event after resetting event mask via setEnabled. Close + // injected event is discarded by the setEnable call. + EXPECT_CALL(prepare_watcher, ready()); + EXPECT_CALL(fd_event, ready()); + EXPECT_CALL(write_event, ready()); + // Third loop iteration: poll returned no new real events. + EXPECT_CALL(prepare_watcher, ready()); + + file_event->activate(FileReadyType::Read); + dispatcher->run(Event::Dispatcher::RunType::NonBlock); + + os_sys_calls_.close(fd); +} + #ifndef WIN32 // Libevent on Windows doesn't support edge trigger. TEST_F(FileEventImplTest, EdgeTrigger) { ReadyWatcher read_event; diff --git a/test/common/filesystem/BUILD b/test/common/filesystem/BUILD index 6f385036615a6..82e28ebda60d3 100644 --- a/test/common/filesystem/BUILD +++ b/test/common/filesystem/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/filesystem/directory_test.cc b/test/common/filesystem/directory_test.cc index b9bfa86a6a47f..82f44f977238b 100644 --- a/test/common/filesystem/directory_test.cc +++ b/test/common/filesystem/directory_test.cc @@ -1,13 +1,13 @@ #include #include #include -#include #include "common/filesystem/directory.h" #include "test/test_common/environment.h" #include "test/test_common/utility.h" +#include "absl/container/node_hash_set.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -66,7 +66,7 @@ struct EntryHash { } }; -using EntrySet = std::unordered_set; +using EntrySet = absl::node_hash_set; EntrySet getDirectoryContents(const std::string& dir_path, bool recursive) { Directory directory(dir_path); diff --git a/test/common/filesystem/filesystem_impl_test.cc b/test/common/filesystem/filesystem_impl_test.cc index 0531f3b0d6c84..7870c285e19d0 100644 --- a/test/common/filesystem/filesystem_impl_test.cc +++ b/test/common/filesystem/filesystem_impl_test.cc @@ -2,6 +2,7 @@ #include #include "common/common/assert.h" +#include "common/common/utility.h" #include "common/filesystem/filesystem_impl.h" #include "test/test_common/environment.h" @@ -103,7 +104,7 @@ TEST_F(FileSystemImplTest, FileReadToEndDoesNotExist) { EnvoyException); } -TEST_F(FileSystemImplTest, FileReadToEndBlacklisted) { +TEST_F(FileSystemImplTest, FileReadToEndDenylisted) { EXPECT_THROW(file_system_.fileReadToEnd("/dev/urandom"), EnvoyException); EXPECT_THROW(file_system_.fileReadToEnd("/proc/cpuinfo"), EnvoyException); EXPECT_THROW(file_system_.fileReadToEnd("/sys/block/sda/dev"), EnvoyException); @@ -117,7 +118,7 @@ TEST_F(FileSystemImplTest, CanonicalPathSuccess) { EXPECT_EQ("/", canonicalPath( TEST_F(FileSystemImplTest, CanonicalPathFail) { const Api::SysCallStringResult result = canonicalPath("/_some_non_existent_file"); EXPECT_TRUE(result.rc_.empty()); - EXPECT_STREQ("No such file or directory", ::strerror(result.errno_)); + EXPECT_EQ("No such file or directory", errorDetails(result.errno_)); } #endif diff --git a/test/common/filesystem/watcher_impl_test.cc b/test/common/filesystem/watcher_impl_test.cc index 64133ab70249c..7928de7d2b4f7 100644 --- a/test/common/filesystem/watcher_impl_test.cc +++ b/test/common/filesystem/watcher_impl_test.cc @@ -75,7 +75,6 @@ TEST_F(WatcherImplTest, Create) { { std::ofstream file(TestEnvironment::temporaryPath("envoy_test/watcher_target")); } WatchCallback callback; - EXPECT_CALL(callback, called(Watcher::Events::MovedTo)); watcher->addWatch(TestEnvironment::temporaryPath("envoy_test/watcher_link"), Watcher::Events::MovedTo, [&](uint32_t events) -> void { callback.called(events); @@ -85,6 +84,7 @@ TEST_F(WatcherImplTest, Create) { { std::ofstream file(TestEnvironment::temporaryPath("envoy_test/other_file")); } dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + EXPECT_CALL(callback, called(Watcher::Events::MovedTo)); TestEnvironment::createSymlink(TestEnvironment::temporaryPath("envoy_test/watcher_target"), TestEnvironment::temporaryPath("envoy_test/watcher_new_link")); TestEnvironment::renameFile(TestEnvironment::temporaryPath("envoy_test/watcher_new_link"), @@ -109,7 +109,7 @@ TEST_F(WatcherImplTest, Modify) { file << "text" << std::flush; file.close(); EXPECT_CALL(callback, called(Watcher::Events::Modified)); - dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + dispatcher_->run(Event::Dispatcher::RunType::Block); } TEST_F(WatcherImplTest, BadPath) { @@ -152,6 +152,9 @@ TEST_F(WatcherImplTest, RootDirectoryPath) { #endif } +// Skipping this test on Windows as there is no Windows API able to atomically move a +// directory/symlink when the new name is a non-empty directory +#ifndef WIN32 TEST_F(WatcherImplTest, SymlinkAtomicRename) { Filesystem::WatcherPtr watcher = dispatcher_->createFilesystemWatcher(); @@ -181,6 +184,7 @@ TEST_F(WatcherImplTest, SymlinkAtomicRename) { dispatcher_->run(Event::Dispatcher::RunType::Block); } +#endif } // namespace Filesystem } // namespace Envoy diff --git a/test/common/filter/http/BUILD b/test/common/filter/http/BUILD new file mode 100644 index 0000000000000..c6ce0344543c4 --- /dev/null +++ b/test/common/filter/http/BUILD @@ -0,0 +1,30 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_test( + name = "filter_config_discovery_impl_test", + srcs = ["filter_config_discovery_impl_test.cc"], + deps = [ + "//source/common/config:utility_lib", + "//source/common/filter/http:filter_config_discovery_lib", + "//source/common/json:json_loader_lib", + "//source/extensions/filters/http/health_check:config", + "//source/extensions/filters/http/router:config", + "//test/mocks/local_info:local_info_mocks", + "//test/mocks/protobuf:protobuf_mocks", + "//test/mocks/server:server_mocks", + "//test/mocks/thread_local:thread_local_mocks", + "//test/mocks/upstream:upstream_mocks", + "//test/test_common:simulated_time_system_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", + ], +) diff --git a/test/common/filter/http/filter_config_discovery_impl_test.cc b/test/common/filter/http/filter_config_discovery_impl_test.cc new file mode 100644 index 0000000000000..2d7d7d0e00e61 --- /dev/null +++ b/test/common/filter/http/filter_config_discovery_impl_test.cc @@ -0,0 +1,300 @@ +#include +#include +#include + +#include "envoy/config/core/v3/config_source.pb.h" +#include "envoy/config/core/v3/extension.pb.h" +#include "envoy/config/core/v3/extension.pb.validate.h" +#include "envoy/service/discovery/v3/discovery.pb.h" +#include "envoy/stats/scope.h" + +#include "common/config/utility.h" +#include "common/filter/http/filter_config_discovery_impl.h" +#include "common/json/json_loader.h" + +#include "test/mocks/init/mocks.h" +#include "test/mocks/local_info/mocks.h" +#include "test/mocks/protobuf/mocks.h" +#include "test/mocks/server/mocks.h" +#include "test/mocks/thread_local/mocks.h" +#include "test/mocks/upstream/mocks.h" +#include "test/test_common/printers.h" +#include "test/test_common/simulated_time_system.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::InSequence; +using testing::Invoke; +using testing::Return; +using testing::ReturnRef; + +namespace Envoy { +namespace Filter { +namespace Http { +namespace { + +class FilterConfigDiscoveryTestBase : public testing::Test { +public: + FilterConfigDiscoveryTestBase() { + // For server_factory_context + ON_CALL(factory_context_, scope()).WillByDefault(ReturnRef(scope_)); + ON_CALL(factory_context_, messageValidationContext()) + .WillByDefault(ReturnRef(validation_context_)); + EXPECT_CALL(validation_context_, dynamicValidationVisitor()) + .WillRepeatedly(ReturnRef(validation_visitor_)); + EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(ReturnRef(init_manager_)); + ON_CALL(init_manager_, add(_)).WillByDefault(Invoke([this](const Init::Target& target) { + init_target_handle_ = target.createHandle("test"); + })); + ON_CALL(init_manager_, initialize(_)) + .WillByDefault(Invoke( + [this](const Init::Watcher& watcher) { init_target_handle_->initialize(watcher); })); + // Thread local storage assumes a single (main) thread with no workers. + ON_CALL(factory_context_.admin_, concurrency()).WillByDefault(Return(0)); + } + + Event::SimulatedTimeSystem& timeSystem() { return time_system_; } + + Event::SimulatedTimeSystem time_system_; + NiceMock validation_context_; + NiceMock validation_visitor_; + NiceMock init_manager_; + NiceMock factory_context_; + Init::ExpectableWatcherImpl init_watcher_; + Init::TargetHandlePtr init_target_handle_; + NiceMock scope_; +}; + +// Test base class with a single provider. +class FilterConfigDiscoveryImplTest : public FilterConfigDiscoveryTestBase { +public: + FilterConfigDiscoveryImplTest() { + filter_config_provider_manager_ = std::make_unique(); + } + ~FilterConfigDiscoveryImplTest() override { factory_context_.thread_local_.shutdownThread(); } + + FilterConfigProviderPtr createProvider(std::string name, bool warm) { + EXPECT_CALL(init_manager_, add(_)); + envoy::config::core::v3::ConfigSource config_source; + TestUtility::loadFromYaml("ads: {}", config_source); + return filter_config_provider_manager_->createDynamicFilterConfigProvider( + config_source, name, {"envoy.extensions.filters.http.router.v3.Router"}, factory_context_, + "xds.", !warm); + } + + void setup(bool warm = true) { + provider_ = createProvider("foo", warm); + callbacks_ = factory_context_.cluster_manager_.subscription_factory_.callbacks_; + EXPECT_CALL(*factory_context_.cluster_manager_.subscription_factory_.subscription_, start(_)); + if (!warm) { + EXPECT_CALL(init_watcher_, ready()); + } + init_manager_.initialize(init_watcher_); + } + + std::unique_ptr filter_config_provider_manager_; + FilterConfigProviderPtr provider_; + Config::SubscriptionCallbacks* callbacks_{}; +}; + +TEST_F(FilterConfigDiscoveryImplTest, DestroyReady) { + setup(); + EXPECT_CALL(init_watcher_, ready()); +} + +TEST_F(FilterConfigDiscoveryImplTest, Basic) { + InSequence s; + setup(); + EXPECT_EQ("foo", provider_->name()); + EXPECT_EQ(absl::nullopt, provider_->config()); + + // Initial request. + { + const std::string response_yaml = R"EOF( + version_info: "1" + resources: + - "@type": type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig + name: foo + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + )EOF"; + const auto response = + TestUtility::parseYaml(response_yaml); + const auto decoded_resources = + TestUtility::decodeResources(response); + + EXPECT_CALL(init_watcher_, ready()); + callbacks_->onConfigUpdate(decoded_resources.refvec_, response.version_info()); + EXPECT_NE(absl::nullopt, provider_->config()); + EXPECT_EQ(1UL, scope_.counter("xds.extension_config_discovery.foo.config_reload").value()); + EXPECT_EQ(0UL, scope_.counter("xds.extension_config_discovery.foo.config_fail").value()); + } + + // 2nd request with same response. Based on hash should not reload config. + { + const std::string response_yaml = R"EOF( + version_info: "2" + resources: + - "@type": type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig + name: foo + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + )EOF"; + const auto response = + TestUtility::parseYaml(response_yaml); + const auto decoded_resources = + TestUtility::decodeResources(response); + callbacks_->onConfigUpdate(decoded_resources.refvec_, response.version_info()); + EXPECT_EQ(1UL, scope_.counter("xds.extension_config_discovery.foo.config_reload").value()); + EXPECT_EQ(0UL, scope_.counter("xds.extension_config_discovery.foo.config_fail").value()); + } +} + +TEST_F(FilterConfigDiscoveryImplTest, ConfigFailed) { + InSequence s; + setup(); + EXPECT_CALL(init_watcher_, ready()); + callbacks_->onConfigUpdateFailed(Config::ConfigUpdateFailureReason::FetchTimedout, {}); + EXPECT_EQ(0UL, scope_.counter("xds.extension_config_discovery.foo.config_reload").value()); + EXPECT_EQ(1UL, scope_.counter("xds.extension_config_discovery.foo.config_fail").value()); +} + +TEST_F(FilterConfigDiscoveryImplTest, TooManyResources) { + InSequence s; + setup(); + const std::string response_yaml = R"EOF( + version_info: "1" + resources: + - "@type": type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig + name: foo + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + - "@type": type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig + name: foo + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + )EOF"; + const auto response = + TestUtility::parseYaml(response_yaml); + const auto decoded_resources = + TestUtility::decodeResources(response); + EXPECT_CALL(init_watcher_, ready()); + EXPECT_THROW_WITH_MESSAGE( + callbacks_->onConfigUpdate(decoded_resources.refvec_, response.version_info()), + EnvoyException, "Unexpected number of resources in ExtensionConfigDS response: 2"); + EXPECT_EQ(0UL, scope_.counter("xds.extension_config_discovery.foo.config_reload").value()); +} + +TEST_F(FilterConfigDiscoveryImplTest, WrongName) { + InSequence s; + setup(); + const std::string response_yaml = R"EOF( + version_info: "1" + resources: + - "@type": type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig + name: bar + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + )EOF"; + const auto response = + TestUtility::parseYaml(response_yaml); + const auto decoded_resources = + TestUtility::decodeResources(response); + EXPECT_CALL(init_watcher_, ready()); + EXPECT_THROW_WITH_MESSAGE( + callbacks_->onConfigUpdate(decoded_resources.refvec_, response.version_info()), + EnvoyException, "Unexpected resource name in ExtensionConfigDS response: bar"); + EXPECT_EQ(0UL, scope_.counter("xds.extension_config_discovery.foo.config_reload").value()); +} + +TEST_F(FilterConfigDiscoveryImplTest, Incremental) { + InSequence s; + setup(); + const std::string response_yaml = R"EOF( +version_info: "1" +resources: +- "@type": type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig + name: foo + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router +)EOF"; + const auto response = + TestUtility::parseYaml(response_yaml); + const auto decoded_resources = + TestUtility::decodeResources(response); + Protobuf::RepeatedPtrField remove; + *remove.Add() = "bar"; + EXPECT_CALL(init_watcher_, ready()); + callbacks_->onConfigUpdate(decoded_resources.refvec_, remove, response.version_info()); + EXPECT_NE(absl::nullopt, provider_->config()); + EXPECT_EQ(1UL, scope_.counter("xds.extension_config_discovery.foo.config_reload").value()); + EXPECT_EQ(0UL, scope_.counter("xds.extension_config_discovery.foo.config_fail").value()); +} + +TEST_F(FilterConfigDiscoveryImplTest, ApplyWithoutWarming) { + InSequence s; + setup(false); + EXPECT_EQ("foo", provider_->name()); + EXPECT_EQ(absl::nullopt, provider_->config()); + EXPECT_EQ(0UL, scope_.counter("xds.extension_config_discovery.foo.config_reload").value()); + EXPECT_EQ(0UL, scope_.counter("xds.extension_config_discovery.foo.config_fail").value()); +} + +TEST_F(FilterConfigDiscoveryImplTest, DualProviders) { + InSequence s; + setup(); + auto provider2 = createProvider("foo", true); + EXPECT_EQ("foo", provider2->name()); + EXPECT_EQ(absl::nullopt, provider2->config()); + const std::string response_yaml = R"EOF( + version_info: "1" + resources: + - "@type": type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig + name: foo + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + )EOF"; + const auto response = + TestUtility::parseYaml(response_yaml); + const auto decoded_resources = + TestUtility::decodeResources(response); + EXPECT_CALL(init_watcher_, ready()); + callbacks_->onConfigUpdate(decoded_resources.refvec_, response.version_info()); + EXPECT_NE(absl::nullopt, provider_->config()); + EXPECT_NE(absl::nullopt, provider2->config()); + EXPECT_EQ(1UL, scope_.counter("xds.extension_config_discovery.foo.config_reload").value()); +} + +TEST_F(FilterConfigDiscoveryImplTest, DualProvidersInvalid) { + InSequence s; + setup(); + auto provider2 = createProvider("foo", true); + const std::string response_yaml = R"EOF( + version_info: "1" + resources: + - "@type": type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig + name: foo + typed_config: + "@type": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck + pass_through_mode: false + )EOF"; + const auto response = + TestUtility::parseYaml(response_yaml); + const auto decoded_resources = + TestUtility::decodeResources(response); + EXPECT_CALL(init_watcher_, ready()); + EXPECT_THROW_WITH_MESSAGE( + callbacks_->onConfigUpdate(decoded_resources.refvec_, response.version_info()), + EnvoyException, + "Error: filter config has type URL envoy.config.filter.http.health_check.v2.HealthCheck but " + "expect envoy.extensions.filters.http.router.v3.Router."); + EXPECT_EQ(0UL, scope_.counter("xds.extension_config_discovery.foo.config_reload").value()); +} + +} // namespace +} // namespace Http +} // namespace Filter +} // namespace Envoy diff --git a/test/common/formatter/BUILD b/test/common/formatter/BUILD new file mode 100644 index 0000000000000..bb4ffbcacdcf4 --- /dev/null +++ b/test/common/formatter/BUILD @@ -0,0 +1,87 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_benchmark_test", + "envoy_cc_benchmark_binary", + "envoy_cc_fuzz_test", + "envoy_cc_test", + "envoy_package", + "envoy_proto_library", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_proto_library( + name = "substitution_formatter_fuzz_proto", + srcs = ["substitution_formatter_fuzz.proto"], + deps = ["//test/fuzz:common_proto"], +) + +envoy_cc_fuzz_test( + name = "substitution_formatter_fuzz_test", + srcs = ["substitution_formatter_fuzz_test.cc"], + corpus = "substitution_formatter_corpus", + dictionaries = [ + "substitution_formatter_fuzz_test.dict", + "//test/fuzz:headers.dict", + ], + deps = [ + ":substitution_formatter_fuzz_proto_cc_proto", + "//source/common/formatter:substitution_formatter_lib", + "//test/fuzz:utility_lib", + ], +) + +envoy_cc_test( + name = "substitution_formatter_test", + srcs = ["substitution_formatter_test.cc"], + deps = [ + "//source/common/common:utility_lib", + "//source/common/formatter:substitution_formatter_lib", + "//source/common/http:header_map_lib", + "//source/common/router:string_accessor_lib", + "//test/mocks/api:api_mocks", + "//test/mocks/http:http_mocks", + "//test/mocks/ssl:ssl_mocks", + "//test/mocks/stream_info:stream_info_mocks", + "//test/mocks/upstream:upstream_mocks", + "//test/test_common:threadsafe_singleton_injector_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) + +envoy_cc_test( + name = "substitution_format_string_test", + srcs = ["substitution_format_string_test.cc"], + deps = [ + "//source/common/formatter:substitution_format_string_lib", + "//test/mocks/http:http_mocks", + "//test/mocks/stream_info:stream_info_mocks", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) + +envoy_cc_benchmark_binary( + name = "substitution_formatter_speed_test", + srcs = ["substitution_formatter_speed_test.cc"], + external_deps = [ + "benchmark", + ], + deps = [ + "//source/common/formatter:substitution_formatter_lib", + "//source/common/http:header_map_lib", + "//source/common/network:address_lib", + "//test/common/stream_info:test_util", + "//test/mocks/http:http_mocks", + "//test/mocks/stream_info:stream_info_mocks", + "//test/test_common:printers_lib", + ], +) + +envoy_benchmark_test( + name = "substitution_formatter_speed_test_benchmark_test", + benchmark_binary = "substitution_formatter_speed_test", +) diff --git a/test/common/formatter/substitution_format_string_test.cc b/test/common/formatter/substitution_format_string_test.cc new file mode 100644 index 0000000000000..22e4a030d4308 --- /dev/null +++ b/test/common/formatter/substitution_format_string_test.cc @@ -0,0 +1,97 @@ +#include "envoy/config/core/v3/substitution_format_string.pb.validate.h" + +#include "common/formatter/substitution_format_string.h" + +#include "test/mocks/http/mocks.h" +#include "test/mocks/stream_info/mocks.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::Return; + +namespace Envoy { +namespace Formatter { + +class SubstitutionFormatStringUtilsTest : public ::testing::Test { +public: + SubstitutionFormatStringUtilsTest() { + absl::optional response_code{200}; + EXPECT_CALL(stream_info_, responseCode()).WillRepeatedly(Return(response_code)); + } + + Http::TestRequestHeaderMapImpl request_headers_{{":method", "GET"}, {":path", "/bar/foo"}}; + Http::TestResponseHeaderMapImpl response_headers_; + Http::TestResponseTrailerMapImpl response_trailers_; + StreamInfo::MockStreamInfo stream_info_; + std::string body_; + + envoy::config::core::v3::SubstitutionFormatString config_; +}; + +TEST_F(SubstitutionFormatStringUtilsTest, TestEmptyIsInvalid) { + envoy::config::core::v3::SubstitutionFormatString empty_config; + std::string err; + EXPECT_FALSE(Validate(empty_config, &err)); +} + +TEST_F(SubstitutionFormatStringUtilsTest, TestFromProtoConfigText) { + const std::string yaml = R"EOF( + text_format: "plain text, path=%REQ(:path)%, code=%RESPONSE_CODE%" +)EOF"; + TestUtility::loadFromYaml(yaml, config_); + + auto formatter = SubstitutionFormatStringUtils::fromProtoConfig(config_); + EXPECT_EQ("plain text, path=/bar/foo, code=200", + formatter->format(request_headers_, response_headers_, response_trailers_, stream_info_, + body_)); +} + +TEST_F(SubstitutionFormatStringUtilsTest, TestFromProtoConfigJson) { + const std::string yaml = R"EOF( + json_format: + text: "plain text" + path: "%REQ(:path)%" + code: "%RESPONSE_CODE%" +)EOF"; + TestUtility::loadFromYaml(yaml, config_); + + auto formatter = SubstitutionFormatStringUtils::fromProtoConfig(config_); + const auto out_json = formatter->format(request_headers_, response_headers_, response_trailers_, + stream_info_, body_); + + const std::string expected = R"EOF({ + "text": "plain text", + "path": "/bar/foo", + "code": 200 +})EOF"; + EXPECT_TRUE(TestUtility::jsonStringEqual(out_json, expected)); +} + +TEST_F(SubstitutionFormatStringUtilsTest, TestInvalidConfigs) { + const std::vector invalid_configs = { + R"( + json_format: + field: true +)", + R"( + json_format: + field: 200 +)", + R"( + json_format: + field: + nest_field: "value" +)", + }; + for (const auto& yaml : invalid_configs) { + TestUtility::loadFromYaml(yaml, config_); + EXPECT_THROW_WITH_MESSAGE(SubstitutionFormatStringUtils::fromProtoConfig(config_), + EnvoyException, + "Only string values are supported in the JSON access log format."); + } +} + +} // namespace Formatter +} // namespace Envoy diff --git a/test/common/access_log/access_log_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-4673648219652096 b/test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-4673648219652096 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-4673648219652096 rename to test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-4673648219652096 diff --git a/test/common/access_log/access_log_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5630958620901376 b/test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5630958620901376 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5630958620901376 rename to test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5630958620901376 diff --git a/test/common/access_log/access_log_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5633770020929536 b/test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5633770020929536 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5633770020929536 rename to test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5633770020929536 diff --git a/test/common/access_log/access_log_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5645869313687552 b/test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5645869313687552 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5645869313687552 rename to test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5645869313687552 diff --git a/test/common/access_log/access_log_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5701824317751296 b/test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5701824317751296 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5701824317751296 rename to test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5701824317751296 diff --git a/test/common/access_log/access_log_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5758486359572480 b/test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5758486359572480 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5758486359572480 rename to test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5758486359572480 diff --git a/test/common/access_log/access_log_formatter_corpus/clusterfuzz-testcase-minimized-header_parser_fuzz_test-5633924724424704.fuzz b/test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-header_parser_fuzz_test-5633924724424704.fuzz similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/clusterfuzz-testcase-minimized-header_parser_fuzz_test-5633924724424704.fuzz rename to test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-header_parser_fuzz_test-5633924724424704.fuzz diff --git a/test/common/access_log/access_log_formatter_corpus/dynamic_metadata b/test/common/formatter/substitution_formatter_corpus/dynamic_metadata similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/dynamic_metadata rename to test/common/formatter/substitution_formatter_corpus/dynamic_metadata diff --git a/test/common/access_log/access_log_formatter_corpus/empty b/test/common/formatter/substitution_formatter_corpus/empty similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/empty rename to test/common/formatter/substitution_formatter_corpus/empty diff --git a/test/common/access_log/access_log_formatter_corpus/headers b/test/common/formatter/substitution_formatter_corpus/headers similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/headers rename to test/common/formatter/substitution_formatter_corpus/headers diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_0 b/test/common/formatter/substitution_formatter_corpus/invalid_0 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_0 rename to test/common/formatter/substitution_formatter_corpus/invalid_0 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_1 b/test/common/formatter/substitution_formatter_corpus/invalid_1 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_1 rename to test/common/formatter/substitution_formatter_corpus/invalid_1 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_10 b/test/common/formatter/substitution_formatter_corpus/invalid_10 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_10 rename to test/common/formatter/substitution_formatter_corpus/invalid_10 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_11 b/test/common/formatter/substitution_formatter_corpus/invalid_11 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_11 rename to test/common/formatter/substitution_formatter_corpus/invalid_11 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_12 b/test/common/formatter/substitution_formatter_corpus/invalid_12 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_12 rename to test/common/formatter/substitution_formatter_corpus/invalid_12 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_13 b/test/common/formatter/substitution_formatter_corpus/invalid_13 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_13 rename to test/common/formatter/substitution_formatter_corpus/invalid_13 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_14 b/test/common/formatter/substitution_formatter_corpus/invalid_14 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_14 rename to test/common/formatter/substitution_formatter_corpus/invalid_14 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_15 b/test/common/formatter/substitution_formatter_corpus/invalid_15 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_15 rename to test/common/formatter/substitution_formatter_corpus/invalid_15 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_16 b/test/common/formatter/substitution_formatter_corpus/invalid_16 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_16 rename to test/common/formatter/substitution_formatter_corpus/invalid_16 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_17 b/test/common/formatter/substitution_formatter_corpus/invalid_17 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_17 rename to test/common/formatter/substitution_formatter_corpus/invalid_17 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_18 b/test/common/formatter/substitution_formatter_corpus/invalid_18 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_18 rename to test/common/formatter/substitution_formatter_corpus/invalid_18 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_19 b/test/common/formatter/substitution_formatter_corpus/invalid_19 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_19 rename to test/common/formatter/substitution_formatter_corpus/invalid_19 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_2 b/test/common/formatter/substitution_formatter_corpus/invalid_2 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_2 rename to test/common/formatter/substitution_formatter_corpus/invalid_2 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_3 b/test/common/formatter/substitution_formatter_corpus/invalid_3 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_3 rename to test/common/formatter/substitution_formatter_corpus/invalid_3 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_4 b/test/common/formatter/substitution_formatter_corpus/invalid_4 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_4 rename to test/common/formatter/substitution_formatter_corpus/invalid_4 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_5 b/test/common/formatter/substitution_formatter_corpus/invalid_5 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_5 rename to test/common/formatter/substitution_formatter_corpus/invalid_5 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_6 b/test/common/formatter/substitution_formatter_corpus/invalid_6 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_6 rename to test/common/formatter/substitution_formatter_corpus/invalid_6 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_7 b/test/common/formatter/substitution_formatter_corpus/invalid_7 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_7 rename to test/common/formatter/substitution_formatter_corpus/invalid_7 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_8 b/test/common/formatter/substitution_formatter_corpus/invalid_8 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_8 rename to test/common/formatter/substitution_formatter_corpus/invalid_8 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_9 b/test/common/formatter/substitution_formatter_corpus/invalid_9 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_9 rename to test/common/formatter/substitution_formatter_corpus/invalid_9 diff --git a/test/common/access_log/access_log_formatter_corpus/plain_string b/test/common/formatter/substitution_formatter_corpus/plain_string similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/plain_string rename to test/common/formatter/substitution_formatter_corpus/plain_string diff --git a/test/common/access_log/access_log_formatter_corpus/response_code b/test/common/formatter/substitution_formatter_corpus/response_code similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/response_code rename to test/common/formatter/substitution_formatter_corpus/response_code diff --git a/test/common/access_log/access_log_formatter_corpus/start_time_0 b/test/common/formatter/substitution_formatter_corpus/start_time_0 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/start_time_0 rename to test/common/formatter/substitution_formatter_corpus/start_time_0 diff --git a/test/common/access_log/access_log_formatter_corpus/start_time_1 b/test/common/formatter/substitution_formatter_corpus/start_time_1 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/start_time_1 rename to test/common/formatter/substitution_formatter_corpus/start_time_1 diff --git a/test/common/access_log/access_log_formatter_corpus/start_time_2 b/test/common/formatter/substitution_formatter_corpus/start_time_2 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/start_time_2 rename to test/common/formatter/substitution_formatter_corpus/start_time_2 diff --git a/test/common/access_log/access_log_formatter_corpus/start_time_3 b/test/common/formatter/substitution_formatter_corpus/start_time_3 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/start_time_3 rename to test/common/formatter/substitution_formatter_corpus/start_time_3 diff --git a/test/common/access_log/access_log_formatter_corpus/upstream_local_address b/test/common/formatter/substitution_formatter_corpus/upstream_local_address similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/upstream_local_address rename to test/common/formatter/substitution_formatter_corpus/upstream_local_address diff --git a/test/common/access_log/access_log_formatter_fuzz.proto b/test/common/formatter/substitution_formatter_fuzz.proto similarity index 84% rename from test/common/access_log/access_log_formatter_fuzz.proto rename to test/common/formatter/substitution_formatter_fuzz.proto index 8a58841849e40..6cd0a2f116eaa 100644 --- a/test/common/access_log/access_log_formatter_fuzz.proto +++ b/test/common/formatter/substitution_formatter_fuzz.proto @@ -1,12 +1,12 @@ syntax = "proto3"; -package test.common.access_log; +package test.common.substitution; import "test/fuzz/common.proto"; import "validate/validate.proto"; -// Structured input for access_log_formatter_fuzz_test. +// Structured input for substitution_formatter_fuzz_test. message TestCase { // Do not allow invalid header characters in %REQ(...)% and %RESP(...)%. diff --git a/test/common/access_log/access_log_formatter_fuzz_test.cc b/test/common/formatter/substitution_formatter_fuzz_test.cc similarity index 69% rename from test/common/access_log/access_log_formatter_fuzz_test.cc rename to test/common/formatter/substitution_formatter_fuzz_test.cc index bf1e2eeb49847..38356f182a219 100644 --- a/test/common/access_log/access_log_formatter_fuzz_test.cc +++ b/test/common/formatter/substitution_formatter_fuzz_test.cc @@ -1,6 +1,6 @@ -#include "common/access_log/access_log_formatter.h" +#include "common/formatter/substitution_formatter.h" -#include "test/common/access_log/access_log_formatter_fuzz.pb.validate.h" +#include "test/common/formatter/substitution_formatter_fuzz.pb.validate.h" #include "test/fuzz/fuzz_runner.h" #include "test/fuzz/utility.h" @@ -8,11 +8,11 @@ namespace Envoy { namespace Fuzz { namespace { -DEFINE_PROTO_FUZZER(const test::common::access_log::TestCase& input) { +DEFINE_PROTO_FUZZER(const test::common::substitution::TestCase& input) { try { TestUtility::validate(input); - std::vector formatters = - AccessLog::AccessLogFormatParser::parse(input.format()); + std::vector formatters = + Formatter::SubstitutionFormatParser::parse(input.format()); const auto& request_headers = Fuzz::fromHeaders(input.request_headers()); const auto& response_headers = @@ -21,7 +21,8 @@ DEFINE_PROTO_FUZZER(const test::common::access_log::TestCase& input) { Fuzz::fromHeaders(input.response_trailers()); const auto& stream_info = Fuzz::fromStreamInfo(input.stream_info()); for (const auto& it : formatters) { - it->format(request_headers, response_headers, response_trailers, stream_info); + it->format(request_headers, response_headers, response_trailers, stream_info, + absl::string_view()); } ENVOY_LOG_MISC(trace, "Success"); } catch (const EnvoyException& e) { diff --git a/test/common/access_log/access_log_formatter_fuzz_test.dict b/test/common/formatter/substitution_formatter_fuzz_test.dict similarity index 100% rename from test/common/access_log/access_log_formatter_fuzz_test.dict rename to test/common/formatter/substitution_formatter_fuzz_test.dict diff --git a/test/common/access_log/access_log_formatter_speed_test.cc b/test/common/formatter/substitution_formatter_speed_test.cc similarity index 72% rename from test/common/access_log/access_log_formatter_speed_test.cc rename to test/common/formatter/substitution_formatter_speed_test.cc index c946cfab8ed1c..fd2b6c7fe7a92 100644 --- a/test/common/access_log/access_log_formatter_speed_test.cc +++ b/test/common/formatter/substitution_formatter_speed_test.cc @@ -1,4 +1,4 @@ -#include "common/access_log/access_log_formatter.h" +#include "common/formatter/substitution_formatter.h" #include "common/network/address_impl.h" #include "test/common/stream_info/test_util.h" @@ -6,10 +6,12 @@ #include "benchmark/benchmark.h" +namespace Envoy { + namespace { -std::unique_ptr MakeJsonFormatter(bool typed) { - std::unordered_map JsonLogFormat = { +std::unique_ptr makeJsonFormatter(bool typed) { + absl::flat_hash_map JsonLogFormat = { {"remote_address", "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%"}, {"start_time", "%START_TIME(%Y/%m/%dT%H:%M:%S%z %s)%"}, {"method", "%REQ(:METHOD)%"}, @@ -21,7 +23,7 @@ std::unique_ptr MakeJsonFormatter(bool type {"referer", "%REQ(REFERER)%"}, {"user-agent", "%REQ(USER-AGENT)%"}}; - return std::make_unique(JsonLogFormat, typed); + return std::make_unique(JsonLogFormat, typed); } std::unique_ptr makeStreamInfo() { @@ -33,8 +35,7 @@ std::unique_ptr makeStreamInfo() { } // namespace -namespace Envoy { - +// NOLINTNEXTLINE(readability-identifier-naming) static void BM_AccessLogFormatter(benchmark::State& state) { std::unique_ptr stream_info = makeStreamInfo(); static const char* LogFormat = @@ -43,52 +44,59 @@ static void BM_AccessLogFormatter(benchmark::State& state) { "%REQ(X-FORWARDED-PROTO)%://%REQ(:AUTHORITY)%%REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL% " "s%RESPONSE_CODE% %BYTES_SENT% %DURATION% %REQ(REFERER)% \"%REQ(USER-AGENT)%\" - - -\n"; - std::unique_ptr formatter = - std::make_unique(LogFormat); + std::unique_ptr formatter = + std::make_unique(LogFormat); size_t output_bytes = 0; Http::TestRequestHeaderMapImpl request_headers; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; + std::string body; for (auto _ : state) { output_bytes += - formatter->format(request_headers, response_headers, response_trailers, *stream_info) + formatter->format(request_headers, response_headers, response_trailers, *stream_info, body) .length(); } benchmark::DoNotOptimize(output_bytes); } BENCHMARK(BM_AccessLogFormatter); +// NOLINTNEXTLINE(readability-identifier-naming) static void BM_JsonAccessLogFormatter(benchmark::State& state) { std::unique_ptr stream_info = makeStreamInfo(); - std::unique_ptr json_formatter = MakeJsonFormatter(false); + std::unique_ptr json_formatter = makeJsonFormatter(false); size_t output_bytes = 0; Http::TestRequestHeaderMapImpl request_headers; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; + std::string body; for (auto _ : state) { output_bytes += - json_formatter->format(request_headers, response_headers, response_trailers, *stream_info) + json_formatter + ->format(request_headers, response_headers, response_trailers, *stream_info, body) .length(); } benchmark::DoNotOptimize(output_bytes); } BENCHMARK(BM_JsonAccessLogFormatter); +// NOLINTNEXTLINE(readability-identifier-naming) static void BM_TypedJsonAccessLogFormatter(benchmark::State& state) { std::unique_ptr stream_info = makeStreamInfo(); - std::unique_ptr typed_json_formatter = - MakeJsonFormatter(true); + std::unique_ptr typed_json_formatter = + makeJsonFormatter(true); size_t output_bytes = 0; Http::TestRequestHeaderMapImpl request_headers; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; + std::string body; for (auto _ : state) { - output_bytes += typed_json_formatter - ->format(request_headers, response_headers, response_trailers, *stream_info) - .length(); + output_bytes += + typed_json_formatter + ->format(request_headers, response_headers, response_trailers, *stream_info, body) + .length(); } benchmark::DoNotOptimize(output_bytes); } diff --git a/test/common/access_log/access_log_formatter_test.cc b/test/common/formatter/substitution_formatter_test.cc similarity index 73% rename from test/common/access_log/access_log_formatter_test.cc rename to test/common/formatter/substitution_formatter_test.cc index 0de62f0887da7..66ed458a23440 100644 --- a/test/common/access_log/access_log_formatter_test.cc +++ b/test/common/formatter/substitution_formatter_test.cc @@ -5,8 +5,8 @@ #include "envoy/config/core/v3/base.pb.h" -#include "common/access_log/access_log_formatter.h" #include "common/common/utility.h" +#include "common/formatter/substitution_formatter.h" #include "common/http/header_map_impl.h" #include "common/protobuf/utility.h" #include "common/router/string_accessor_impl.h" @@ -30,7 +30,7 @@ using testing::Return; using testing::ReturnRef; namespace Envoy { -namespace AccessLog { +namespace Formatter { namespace { class TestSerializedUnknownFilterState : public StreamInfo::FilterState::Object { @@ -92,43 +92,45 @@ class TestSerializedStringFilterState : public StreamInfo::FilterState::Object { std::string raw_string_; }; -TEST(AccessLogFormatUtilsTest, protocolToString) { - EXPECT_EQ("HTTP/1.0", AccessLogFormatUtils::protocolToString(Http::Protocol::Http10)); - EXPECT_EQ("HTTP/1.1", AccessLogFormatUtils::protocolToString(Http::Protocol::Http11)); - EXPECT_EQ("HTTP/2", AccessLogFormatUtils::protocolToString(Http::Protocol::Http2)); - EXPECT_EQ("-", AccessLogFormatUtils::protocolToString({})); +TEST(SubstitutionFormatUtilsTest, protocolToString) { + EXPECT_EQ("HTTP/1.0", SubstitutionFormatUtils::protocolToString(Http::Protocol::Http10)); + EXPECT_EQ("HTTP/1.1", SubstitutionFormatUtils::protocolToString(Http::Protocol::Http11)); + EXPECT_EQ("HTTP/2", SubstitutionFormatUtils::protocolToString(Http::Protocol::Http2)); + EXPECT_EQ("-", SubstitutionFormatUtils::protocolToString({})); } -TEST(AccessLogFormatterTest, plainStringFormatter) { +TEST(SubstitutionFormatterTest, plainStringFormatter) { PlainStringFormatter formatter("plain"); Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, {":path", "/"}}; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; StreamInfo::MockStreamInfo stream_info; + std::string body; - EXPECT_EQ("plain", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(ValueUtil::stringValue("plain"))); + EXPECT_EQ("plain", formatter.format(request_headers, response_headers, response_trailers, + stream_info, body)); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::stringValue("plain"))); } -TEST(AccessLogFormatterTest, streamInfoFormatter) { +TEST(SubstitutionFormatterTest, streamInfoFormatter) { EXPECT_THROW(StreamInfoFormatter formatter("unknown_field"), EnvoyException); NiceMock stream_info; Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, {":path", "/"}}; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; + std::string body; { StreamInfoFormatter request_duration_format("REQUEST_DURATION"); absl::optional dur = std::chrono::nanoseconds(5000000); EXPECT_CALL(stream_info, lastDownstreamRxByteReceived()).WillRepeatedly(Return(dur)); EXPECT_EQ("5", request_duration_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(request_duration_format.formatValue(request_headers, response_headers, - response_trailers, stream_info), + response_trailers, stream_info, body), ProtoEq(ValueUtil::numberValue(5.0))); } @@ -137,9 +139,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { absl::optional dur; EXPECT_CALL(stream_info, lastDownstreamRxByteReceived()).WillRepeatedly(Return(dur)); EXPECT_EQ("-", request_duration_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(request_duration_format.formatValue(request_headers, response_headers, - response_trailers, stream_info), + response_trailers, stream_info, body), ProtoEq(ValueUtil::nullValue())); } @@ -148,9 +150,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { absl::optional dur = std::chrono::nanoseconds(10000000); EXPECT_CALL(stream_info, firstUpstreamRxByteReceived()).WillRepeatedly(Return(dur)); EXPECT_EQ("10", response_duration_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(response_duration_format.formatValue(request_headers, response_headers, - response_trailers, stream_info), + response_trailers, stream_info, body), ProtoEq(ValueUtil::numberValue(10.0))); } @@ -159,9 +161,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { absl::optional dur; EXPECT_CALL(stream_info, firstUpstreamRxByteReceived()).WillRepeatedly(Return(dur)); EXPECT_EQ("-", response_duration_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(response_duration_format.formatValue(request_headers, response_headers, - response_trailers, stream_info), + response_trailers, stream_info, body), ProtoEq(ValueUtil::nullValue())); } @@ -174,9 +176,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(stream_info, lastDownstreamTxByteSent()).WillRepeatedly(Return(dur_downstream)); EXPECT_EQ("15", ttlb_duration_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(ttlb_duration_format.formatValue(request_headers, response_headers, - response_trailers, stream_info), + response_trailers, stream_info, body), ProtoEq(ValueUtil::numberValue(15.0))); } @@ -189,9 +191,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(stream_info, lastDownstreamTxByteSent()).WillRepeatedly(Return(dur_downstream)); EXPECT_EQ("-", ttlb_duration_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(ttlb_duration_format.formatValue(request_headers, response_headers, - response_trailers, stream_info), + response_trailers, stream_info, body), ProtoEq(ValueUtil::nullValue())); } @@ -199,9 +201,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { StreamInfoFormatter bytes_received_format("BYTES_RECEIVED"); EXPECT_CALL(stream_info, bytesReceived()).WillRepeatedly(Return(1)); EXPECT_EQ("1", bytes_received_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(bytes_received_format.formatValue(request_headers, response_headers, - response_trailers, stream_info), + response_trailers, stream_info, body), ProtoEq(ValueUtil::numberValue(1.0))); } @@ -210,9 +212,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { absl::optional protocol = Http::Protocol::Http11; EXPECT_CALL(stream_info, protocol()).WillRepeatedly(Return(protocol)); EXPECT_EQ("HTTP/1.1", protocol_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(protocol_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("HTTP/1.1"))); } @@ -221,9 +223,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { absl::optional response_code{200}; EXPECT_CALL(stream_info, responseCode()).WillRepeatedly(Return(response_code)); EXPECT_EQ("200", response_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(response_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::numberValue(200.0))); } @@ -232,9 +234,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { absl::optional response_code; EXPECT_CALL(stream_info, responseCode()).WillRepeatedly(Return(response_code)); EXPECT_EQ("0", response_code_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(response_code_format.formatValue(request_headers, response_headers, - response_trailers, stream_info), + response_trailers, stream_info, body), ProtoEq(ValueUtil::numberValue(0.0))); } @@ -243,9 +245,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { absl::optional rc_details; EXPECT_CALL(stream_info, responseCodeDetails()).WillRepeatedly(ReturnRef(rc_details)); EXPECT_EQ("-", response_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(response_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } @@ -254,9 +256,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { absl::optional rc_details{"via_upstream"}; EXPECT_CALL(stream_info, responseCodeDetails()).WillRepeatedly(ReturnRef(rc_details)); EXPECT_EQ("via_upstream", response_code_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(response_code_format.formatValue(request_headers, response_headers, - response_trailers, stream_info), + response_trailers, stream_info, body), ProtoEq(ValueUtil::stringValue("via_upstream"))); } @@ -264,9 +266,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { StreamInfoFormatter bytes_sent_format("BYTES_SENT"); EXPECT_CALL(stream_info, bytesSent()).WillRepeatedly(Return(1)); EXPECT_EQ("1", bytes_sent_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(bytes_sent_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::numberValue(1.0))); } @@ -275,9 +277,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { absl::optional dur = std::chrono::nanoseconds(15000000); EXPECT_CALL(stream_info, requestComplete()).WillRepeatedly(Return(dur)); EXPECT_EQ("15", duration_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(duration_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::numberValue(15.0))); } @@ -286,18 +288,18 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { ON_CALL(stream_info, hasResponseFlag(StreamInfo::ResponseFlag::LocalReset)) .WillByDefault(Return(true)); EXPECT_EQ("LR", response_flags_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(response_flags_format.formatValue(request_headers, response_headers, - response_trailers, stream_info), + response_trailers, stream_info, body), ProtoEq(ValueUtil::stringValue("LR"))); } { StreamInfoFormatter upstream_format("UPSTREAM_HOST"); EXPECT_EQ("10.0.0.1:443", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("10.0.0.1:443"))); } @@ -307,9 +309,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(stream_info.host_->cluster_, name()) .WillRepeatedly(ReturnRef(upstream_cluster_name)); EXPECT_EQ("cluster_name", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("cluster_name"))); } @@ -317,9 +319,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { StreamInfoFormatter upstream_format("UPSTREAM_HOST"); EXPECT_CALL(stream_info, upstreamHost()).WillRepeatedly(Return(nullptr)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } @@ -333,9 +335,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { StreamInfoFormatter upstream_format("HOSTNAME"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("-"))); } @@ -350,9 +352,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { StreamInfoFormatter upstream_format("HOSTNAME"); EXPECT_EQ("myhostname", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("myhostname"))); } @@ -360,27 +362,27 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { StreamInfoFormatter upstream_format("UPSTREAM_CLUSTER"); EXPECT_CALL(stream_info, upstreamHost()).WillRepeatedly(Return(nullptr)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { StreamInfoFormatter upstream_format("DOWNSTREAM_LOCAL_ADDRESS"); EXPECT_EQ("127.0.0.2:0", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("127.0.0.2:0"))); } { StreamInfoFormatter upstream_format("DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT"); EXPECT_EQ("127.0.0.2", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("127.0.0.2"))); } @@ -392,9 +394,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { new Network::Address::Ipv4Instance("127.1.2.3", 8443)}; EXPECT_CALL(stream_info, downstreamLocalAddress()).WillRepeatedly(ReturnRef(address)); EXPECT_EQ("8443", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("8443"))); // Validate for IPv6 address @@ -402,54 +404,54 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { Network::Address::InstanceConstSharedPtr{new Network::Address::Ipv6Instance("::1", 9443)}; EXPECT_CALL(stream_info, downstreamLocalAddress()).WillRepeatedly(ReturnRef(address)); EXPECT_EQ("9443", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("9443"))); // Validate for Pipe address = Network::Address::InstanceConstSharedPtr{new Network::Address::PipeInstance("/foo")}; EXPECT_CALL(stream_info, downstreamLocalAddress()).WillRepeatedly(ReturnRef(address)); EXPECT_EQ("", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue(""))); } { StreamInfoFormatter upstream_format("DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT"); EXPECT_EQ("127.0.0.1", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("127.0.0.1"))); } { StreamInfoFormatter upstream_format("DOWNSTREAM_REMOTE_ADDRESS"); EXPECT_EQ("127.0.0.1:0", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("127.0.0.1:0"))); } { StreamInfoFormatter upstream_format("DOWNSTREAM_DIRECT_REMOTE_ADDRESS_WITHOUT_PORT"); EXPECT_EQ("127.0.0.1", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("127.0.0.1"))); } { StreamInfoFormatter upstream_format("DOWNSTREAM_DIRECT_REMOTE_ADDRESS"); EXPECT_EQ("127.0.0.1:0", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("127.0.0.1:0"))); } @@ -459,9 +461,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(stream_info, requestedServerName()) .WillRepeatedly(ReturnRef(requested_server_name)); EXPECT_EQ("stub_server", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("stub_server"))); } @@ -471,9 +473,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(stream_info, requestedServerName()) .WillRepeatedly(ReturnRef(requested_server_name)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -483,9 +485,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, uriSanPeerCertificate()).WillRepeatedly(Return(sans)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("san", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("san"))); } @@ -496,7 +498,7 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, uriSanPeerCertificate()).WillRepeatedly(Return(sans)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("san1,san2", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); } { StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_URI_SAN"); @@ -505,18 +507,18 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { .WillRepeatedly(Return(std::vector())); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_URI_SAN"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -526,9 +528,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, uriSanLocalCertificate()).WillRepeatedly(Return(sans)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("san", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("san"))); } { @@ -538,7 +540,7 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, uriSanLocalCertificate()).WillRepeatedly(Return(sans)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("san1,san2", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); } { StreamInfoFormatter upstream_format("DOWNSTREAM_LOCAL_URI_SAN"); @@ -547,18 +549,18 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { .WillRepeatedly(Return(std::vector())); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); StreamInfoFormatter upstream_format("DOWNSTREAM_LOCAL_URI_SAN"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -569,9 +571,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { .WillRepeatedly(ReturnRef(subject_local)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("subject", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("subject"))); } { @@ -581,18 +583,18 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { .WillRepeatedly(ReturnRef(EMPTY_STRING)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); StreamInfoFormatter upstream_format("DOWNSTREAM_LOCAL_SUBJECT"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -602,9 +604,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, subjectPeerCertificate()).WillRepeatedly(ReturnRef(subject_peer)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("subject", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("subject"))); } { @@ -613,18 +615,18 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, subjectPeerCertificate()).WillRepeatedly(ReturnRef(EMPTY_STRING)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_SUBJECT"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -634,9 +636,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, sessionId()).WillRepeatedly(ReturnRef(session_id)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("deadbeef", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("deadbeef"))); } { @@ -645,18 +647,18 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, sessionId()).WillRepeatedly(ReturnRef(EMPTY_STRING)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); StreamInfoFormatter upstream_format("DOWNSTREAM_TLS_SESSION_ID"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -665,9 +667,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, ciphersuiteString()) .WillRepeatedly(Return("TLS_DHE_RSA_WITH_AES_256_GCM_SHA384")); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); - EXPECT_EQ( - "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384", - upstream_format.format(request_headers, response_headers, response_trailers, stream_info)); + EXPECT_EQ("TLS_DHE_RSA_WITH_AES_256_GCM_SHA384", + upstream_format.format(request_headers, response_headers, response_trailers, + stream_info, body)); } { StreamInfoFormatter upstream_format("DOWNSTREAM_TLS_CIPHER"); @@ -675,18 +677,18 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, ciphersuiteString()).WillRepeatedly(Return("")); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); StreamInfoFormatter upstream_format("DOWNSTREAM_TLS_CIPHER"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -696,9 +698,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, tlsVersion()).WillRepeatedly(ReturnRef(tlsVersion)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("TLSv1.2", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("TLSv1.2"))); } { @@ -707,18 +709,18 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, tlsVersion()).WillRepeatedly(ReturnRef(EMPTY_STRING)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); StreamInfoFormatter upstream_format("DOWNSTREAM_TLS_VERSION"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -729,9 +731,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { .WillRepeatedly(ReturnRef(expected_sha)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ(expected_sha, upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue(expected_sha))); } { @@ -742,18 +744,53 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { .WillRepeatedly(ReturnRef(expected_sha)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_FINGERPRINT_256"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); + EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::nullValue())); + } + { + StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_FINGERPRINT_1"); + auto connection_info = std::make_shared(); + std::string expected_sha = "685a2db593d5f86d346cb1a297009c3b467ad77f1944aa799039a2fb3d531f3f"; + EXPECT_CALL(*connection_info, sha1PeerCertificateDigest()) + .WillRepeatedly(ReturnRef(expected_sha)); + EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + EXPECT_EQ(expected_sha, upstream_format.format(request_headers, response_headers, + response_trailers, stream_info, body)); + EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::stringValue(expected_sha))); + } + { + StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_FINGERPRINT_1"); + auto connection_info = std::make_shared(); + std::string expected_sha; + EXPECT_CALL(*connection_info, sha1PeerCertificateDigest()) + .WillRepeatedly(ReturnRef(expected_sha)); + EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, + stream_info, body)); + EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::nullValue())); + } + { + EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_FINGERPRINT_1"); + EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -764,9 +801,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { .WillRepeatedly(ReturnRef(serial_number)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("b8b5ecc898f2124a", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("b8b5ecc898f2124a"))); } { @@ -776,18 +813,18 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { .WillRepeatedly(ReturnRef(EMPTY_STRING)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_SERIAL"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -797,9 +834,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { "CN=Test CA,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US"; EXPECT_CALL(*connection_info, issuerPeerCertificate()).WillRepeatedly(ReturnRef(issuer_peer)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); - EXPECT_EQ( - "CN=Test CA,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US", - upstream_format.format(request_headers, response_headers, response_trailers, stream_info)); + EXPECT_EQ("CN=Test CA,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US", + upstream_format.format(request_headers, response_headers, response_trailers, + stream_info, body)); } { StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_ISSUER"); @@ -807,18 +844,18 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, issuerPeerCertificate()).WillRepeatedly(ReturnRef(EMPTY_STRING)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_ISSUER"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -828,9 +865,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { "CN=Test Server,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US"; EXPECT_CALL(*connection_info, subjectPeerCertificate()).WillRepeatedly(ReturnRef(subject_peer)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); - EXPECT_EQ( - "CN=Test Server,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US", - upstream_format.format(request_headers, response_headers, response_trailers, stream_info)); + EXPECT_EQ("CN=Test Server,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US", + upstream_format.format(request_headers, response_headers, response_trailers, + stream_info, body)); } { StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_SUBJECT"); @@ -838,18 +875,18 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, subjectPeerCertificate()).WillRepeatedly(ReturnRef(EMPTY_STRING)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_SUBJECT"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -860,9 +897,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { .WillRepeatedly(ReturnRef(expected_cert)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ(expected_cert, upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue(expected_cert))); } { @@ -873,18 +910,18 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { .WillRepeatedly(ReturnRef(expected_cert)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_CERT"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -895,8 +932,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { SystemTime startTime = absl::ToChronoTime(abslStartTime); EXPECT_CALL(*connection_info, validFromPeerCertificate()).WillRepeatedly(Return(startTime)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); - EXPECT_EQ("2018-12-18T01:50:34.000Z", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + EXPECT_EQ("2018-12-18T01:50:34.000Z", + upstream_format.format(request_headers, response_headers, response_trailers, + stream_info, body)); } { StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_CERT_V_START"); @@ -904,18 +942,18 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, validFromPeerCertificate()).WillRepeatedly(Return(absl::nullopt)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_CERT_V_START"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -926,8 +964,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { SystemTime endTime = absl::ToChronoTime(abslEndTime); EXPECT_CALL(*connection_info, expirationPeerCertificate()).WillRepeatedly(Return(endTime)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); - EXPECT_EQ("2020-12-17T01:50:34.000Z", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + EXPECT_EQ("2020-12-17T01:50:34.000Z", + upstream_format.format(request_headers, response_headers, response_trailers, + stream_info, body)); } { StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_CERT_V_END"); @@ -936,18 +975,18 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { .WillRepeatedly(Return(absl::nullopt)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_CERT_V_END"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -956,9 +995,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(stream_info, upstreamTransportFailureReason()) .WillRepeatedly(ReturnRef(upstream_transport_failure_reason)); EXPECT_EQ("SSL error", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("SSL error"))); } @@ -968,165 +1007,168 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(stream_info, upstreamTransportFailureReason()) .WillRepeatedly(ReturnRef(upstream_transport_failure_reason)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } } -TEST(AccessLogFormatterTest, requestHeaderFormatter) { +TEST(SubstitutionFormatterTest, requestHeaderFormatter) { StreamInfo::MockStreamInfo stream_info; Http::TestRequestHeaderMapImpl request_header{{":method", "GET"}, {":path", "/"}}; Http::TestResponseHeaderMapImpl response_header{{":method", "PUT"}}; Http::TestResponseTrailerMapImpl response_trailer{{":method", "POST"}, {"test-2", "test-2"}}; + std::string body; { RequestHeaderFormatter formatter(":Method", "", absl::optional()); - EXPECT_EQ("GET", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("GET", formatter.format(request_header, response_header, response_trailer, + stream_info, body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("GET"))); } { RequestHeaderFormatter formatter(":path", ":method", absl::optional()); - EXPECT_EQ("/", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("/", formatter.format(request_header, response_header, response_trailer, stream_info, + body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("/"))); } { RequestHeaderFormatter formatter(":TEST", ":METHOD", absl::optional()); - EXPECT_EQ("GET", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("GET", formatter.format(request_header, response_header, response_trailer, + stream_info, body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("GET"))); } { RequestHeaderFormatter formatter("does_not_exist", "", absl::optional()); - EXPECT_EQ("-", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("-", formatter.format(request_header, response_header, response_trailer, stream_info, + body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::nullValue())); } { RequestHeaderFormatter formatter(":Method", "", absl::optional(2)); - EXPECT_EQ("GE", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("GE", formatter.format(request_header, response_header, response_trailer, stream_info, + body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("GE"))); } } -TEST(AccessLogFormatterTest, responseHeaderFormatter) { +TEST(SubstitutionFormatterTest, responseHeaderFormatter) { StreamInfo::MockStreamInfo stream_info; Http::TestRequestHeaderMapImpl request_header{{":method", "GET"}, {":path", "/"}}; Http::TestResponseHeaderMapImpl response_header{{":method", "PUT"}, {"test", "test"}}; Http::TestResponseTrailerMapImpl response_trailer{{":method", "POST"}, {"test-2", "test-2"}}; + std::string body; { ResponseHeaderFormatter formatter(":method", "", absl::optional()); - EXPECT_EQ("PUT", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("PUT", formatter.format(request_header, response_header, response_trailer, + stream_info, body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("PUT"))); } { ResponseHeaderFormatter formatter("test", ":method", absl::optional()); - EXPECT_EQ("test", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("test", formatter.format(request_header, response_header, response_trailer, + stream_info, body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("test"))); } { ResponseHeaderFormatter formatter(":path", ":method", absl::optional()); - EXPECT_EQ("PUT", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("PUT", formatter.format(request_header, response_header, response_trailer, + stream_info, body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("PUT"))); } { ResponseHeaderFormatter formatter("does_not_exist", "", absl::optional()); - EXPECT_EQ("-", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("-", formatter.format(request_header, response_header, response_trailer, stream_info, + body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::nullValue())); } { ResponseHeaderFormatter formatter(":method", "", absl::optional(2)); - EXPECT_EQ("PU", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("PU", formatter.format(request_header, response_header, response_trailer, stream_info, + body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("PU"))); } } -TEST(AccessLogFormatterTest, responseTrailerFormatter) { +TEST(SubstitutionFormatterTest, responseTrailerFormatter) { StreamInfo::MockStreamInfo stream_info; Http::TestRequestHeaderMapImpl request_header{{":method", "GET"}, {":path", "/"}}; Http::TestResponseHeaderMapImpl response_header{{":method", "PUT"}, {"test", "test"}}; Http::TestResponseTrailerMapImpl response_trailer{{":method", "POST"}, {"test-2", "test-2"}}; + std::string body; { ResponseTrailerFormatter formatter(":method", "", absl::optional()); - EXPECT_EQ("POST", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("POST", formatter.format(request_header, response_header, response_trailer, + stream_info, body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("POST"))); } { ResponseTrailerFormatter formatter("test-2", ":method", absl::optional()); - EXPECT_EQ("test-2", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("test-2", formatter.format(request_header, response_header, response_trailer, + stream_info, body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("test-2"))); } { ResponseTrailerFormatter formatter(":path", ":method", absl::optional()); - EXPECT_EQ("POST", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("POST", formatter.format(request_header, response_header, response_trailer, + stream_info, body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("POST"))); } { ResponseTrailerFormatter formatter("does_not_exist", "", absl::optional()); - EXPECT_EQ("-", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("-", formatter.format(request_header, response_header, response_trailer, stream_info, + body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::nullValue())); } { ResponseTrailerFormatter formatter(":method", "", absl::optional(2)); - EXPECT_EQ("PO", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("PO", formatter.format(request_header, response_header, response_trailer, stream_info, + body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("PO"))); } } @@ -1147,7 +1189,7 @@ void populateMetadataTestData(envoy::config::core::v3::Metadata& metadata) { (*metadata.mutable_filter_metadata())["com.test"] = struct_obj; } -TEST(AccessLogFormatterTest, DynamicMetadataFormatter) { +TEST(SubstitutionFormatterTest, DynamicMetadataFormatter) { envoy::config::core::v3::Metadata metadata; populateMetadataTestData(metadata); NiceMock stream_info; @@ -1156,95 +1198,99 @@ TEST(AccessLogFormatterTest, DynamicMetadataFormatter) { Http::TestRequestHeaderMapImpl request_headers; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; + std::string body; { DynamicMetadataFormatter formatter("com.test", {}, absl::optional()); std::string val = - formatter.format(request_headers, response_headers, response_trailers, stream_info); + formatter.format(request_headers, response_headers, response_trailers, stream_info, body); EXPECT_TRUE(val.find("\"test_key\":\"test_value\"") != std::string::npos); EXPECT_TRUE(val.find("\"test_obj\":{\"inner_key\":\"inner_value\"}") != std::string::npos); ProtobufWkt::Value expected_val; expected_val.mutable_struct_value()->CopyFrom(metadata.filter_metadata().at("com.test")); - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(expected_val)); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(expected_val)); } { DynamicMetadataFormatter formatter("com.test", {"test_key"}, absl::optional()); - EXPECT_EQ("\"test_value\"", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(ValueUtil::stringValue("test_value"))); + EXPECT_EQ("\"test_value\"", formatter.format(request_headers, response_headers, + response_trailers, stream_info, body)); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::stringValue("test_value"))); } { DynamicMetadataFormatter formatter("com.test", {"test_obj"}, absl::optional()); - EXPECT_EQ("{\"inner_key\":\"inner_value\"}", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); + EXPECT_EQ( + "{\"inner_key\":\"inner_value\"}", + formatter.format(request_headers, response_headers, response_trailers, stream_info, body)); ProtobufWkt::Value expected_val; (*expected_val.mutable_struct_value()->mutable_fields())["inner_key"] = ValueUtil::stringValue("inner_value"); - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(expected_val)); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(expected_val)); } { DynamicMetadataFormatter formatter("com.test", {"test_obj", "inner_key"}, absl::optional()); - EXPECT_EQ("\"inner_value\"", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(ValueUtil::stringValue("inner_value"))); + EXPECT_EQ("\"inner_value\"", formatter.format(request_headers, response_headers, + response_trailers, stream_info, body)); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::stringValue("inner_value"))); } // not found cases { DynamicMetadataFormatter formatter("com.notfound", {}, absl::optional()); - EXPECT_EQ("-", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(ValueUtil::nullValue())); + EXPECT_EQ("-", formatter.format(request_headers, response_headers, response_trailers, + stream_info, body)); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::nullValue())); } { DynamicMetadataFormatter formatter("com.test", {"notfound"}, absl::optional()); - EXPECT_EQ("-", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(ValueUtil::nullValue())); + EXPECT_EQ("-", formatter.format(request_headers, response_headers, response_trailers, + stream_info, body)); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::nullValue())); } { DynamicMetadataFormatter formatter("com.test", {"test_obj", "notfound"}, absl::optional()); - EXPECT_EQ("-", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(ValueUtil::nullValue())); + EXPECT_EQ("-", formatter.format(request_headers, response_headers, response_trailers, + stream_info, body)); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::nullValue())); } // size limit { DynamicMetadataFormatter formatter("com.test", {"test_key"}, absl::optional(5)); - EXPECT_EQ("\"test", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); + EXPECT_EQ("\"test", formatter.format(request_headers, response_headers, response_trailers, + stream_info, body)); // N.B. Does not truncate. - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(ValueUtil::stringValue("test_value"))); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::stringValue("test_value"))); } } -TEST(AccessLogFormatterTest, FilterStateFormatter) { +TEST(SubstitutionFormatterTest, FilterStateFormatter) { Http::TestRequestHeaderMapImpl request_headers; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; StreamInfo::MockStreamInfo stream_info; + std::string body; + stream_info.filter_state_->setData("key", std::make_unique("test_value"), StreamInfo::FilterState::StateType::ReadOnly); @@ -1266,106 +1312,108 @@ TEST(AccessLogFormatterTest, FilterStateFormatter) { { FilterStateFormatter formatter("key", absl::optional(), false); - EXPECT_EQ("\"test_value\"", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(ValueUtil::stringValue("test_value"))); + EXPECT_EQ("\"test_value\"", formatter.format(request_headers, response_headers, + response_trailers, stream_info, body)); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::stringValue("test_value"))); } { FilterStateFormatter formatter("key-struct", absl::optional(), false); - EXPECT_EQ("{\"inner_key\":\"inner_value\"}", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); + EXPECT_EQ( + "{\"inner_key\":\"inner_value\"}", + formatter.format(request_headers, response_headers, response_trailers, stream_info, body)); ProtobufWkt::Value expected; (*expected.mutable_struct_value()->mutable_fields())["inner_key"] = ValueUtil::stringValue("inner_value"); - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(expected)); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(expected)); } // not found case { FilterStateFormatter formatter("key-not-found", absl::optional(), false); - EXPECT_EQ("-", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(ValueUtil::nullValue())); + EXPECT_EQ("-", formatter.format(request_headers, response_headers, response_trailers, + stream_info, body)); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::nullValue())); } // no serialization case { FilterStateFormatter formatter("key-no-serialization", absl::optional(), false); - EXPECT_EQ("-", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(ValueUtil::nullValue())); + EXPECT_EQ("-", formatter.format(request_headers, response_headers, response_trailers, + stream_info, body)); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::nullValue())); } // serialization error case { FilterStateFormatter formatter("key-serialization-error", absl::optional(), false); - EXPECT_EQ("-", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(ValueUtil::nullValue())); + EXPECT_EQ("-", formatter.format(request_headers, response_headers, response_trailers, + stream_info, body)); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::nullValue())); } // size limit { FilterStateFormatter formatter("key", absl::optional(5), false); - EXPECT_EQ("\"test", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); + EXPECT_EQ("\"test", formatter.format(request_headers, response_headers, response_trailers, + stream_info, body)); // N.B. Does not truncate. - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(ValueUtil::stringValue("test_value"))); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::stringValue("test_value"))); } // serializeAsString case { FilterStateFormatter formatter("test_key", absl::optional(), true); - EXPECT_EQ("test_value By PLAIN", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); + EXPECT_EQ("test_value By PLAIN", formatter.format(request_headers, response_headers, + response_trailers, stream_info, body)); } // size limit for serializeAsString { FilterStateFormatter formatter("test_key", absl::optional(10), true); - EXPECT_EQ("test_value", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); + EXPECT_EQ("test_value", formatter.format(request_headers, response_headers, response_trailers, + stream_info, body)); } // no serialization case for serializeAsString { FilterStateFormatter formatter("key-no-serialization", absl::optional(), true); - EXPECT_EQ("-", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(ValueUtil::nullValue())); + EXPECT_EQ("-", formatter.format(request_headers, response_headers, response_trailers, + stream_info, body)); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::nullValue())); } } -TEST(AccessLogFormatterTest, StartTimeFormatter) { +TEST(SubstitutionFormatterTest, StartTimeFormatter) { NiceMock stream_info; Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, {":path", "/"}}; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; + std::string body; { StartTimeFormatter start_time_format("%Y/%m/%d"); @@ -1373,9 +1421,9 @@ TEST(AccessLogFormatterTest, StartTimeFormatter) { SystemTime time = std::chrono::system_clock::from_time_t(test_epoch); EXPECT_CALL(stream_info, startTime()).WillRepeatedly(Return(time)); EXPECT_EQ("2018/03/28", start_time_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(start_time_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("2018/03/28"))); } @@ -1385,19 +1433,20 @@ TEST(AccessLogFormatterTest, StartTimeFormatter) { EXPECT_CALL(stream_info, startTime()).WillRepeatedly(Return(time)); EXPECT_EQ(AccessLogDateTimeFormatter::fromTime(time), start_time_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(start_time_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue(AccessLogDateTimeFormatter::fromTime(time)))); } } -TEST(AccessLogFormatterTest, GrpcStatusFormatterTest) { +TEST(SubstitutionFormatterTest, GrpcStatusFormatterTest) { GrpcStatusFormatter formatter("grpc-status", "", absl::optional()); NiceMock stream_info; Http::TestRequestHeaderMapImpl request_header; Http::TestResponseHeaderMapImpl response_header; Http::TestResponseTrailerMapImpl response_trailer; + std::string body; std::array grpc_statuses{ "OK", "Canceled", "Unknown", "InvalidArgument", "DeadlineExceeded", @@ -1406,46 +1455,46 @@ TEST(AccessLogFormatterTest, GrpcStatusFormatterTest) { "DataLoss", "Unauthenticated"}; for (size_t i = 0; i < grpc_statuses.size(); ++i) { response_trailer = Http::TestResponseTrailerMapImpl{{"grpc-status", std::to_string(i)}}; - EXPECT_EQ(grpc_statuses[i], - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ(grpc_statuses[i], formatter.format(request_header, response_header, response_trailer, + stream_info, body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue(grpc_statuses[i]))); } { response_trailer = Http::TestResponseTrailerMapImpl{{"grpc-status", "-1"}}; - EXPECT_EQ("-1", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("-1", formatter.format(request_header, response_header, response_trailer, stream_info, + body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("-1"))); response_trailer = Http::TestResponseTrailerMapImpl{{"grpc-status", "42738"}}; - EXPECT_EQ("42738", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("42738", formatter.format(request_header, response_header, response_trailer, + stream_info, body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("42738"))); response_trailer.clear(); } { response_header = Http::TestResponseHeaderMapImpl{{"grpc-status", "-1"}}; - EXPECT_EQ("-1", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("-1", formatter.format(request_header, response_header, response_trailer, stream_info, + body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("-1"))); response_header = Http::TestResponseHeaderMapImpl{{"grpc-status", "42738"}}; - EXPECT_EQ("42738", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("42738", formatter.format(request_header, response_header, response_trailer, + stream_info, body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("42738"))); response_header.clear(); } } void verifyJsonOutput(std::string json_string, - std::unordered_map expected_map) { + absl::node_hash_map expected_map) { const auto parsed = Json::Factory::loadFromString(json_string); // Every json log line should have only one newline character, and it should be the last character @@ -1459,61 +1508,66 @@ void verifyJsonOutput(std::string json_string, } } -TEST(AccessLogFormatterTest, JsonFormatterPlainStringTest) { +TEST(SubstitutionFormatterTest, JsonFormatterPlainStringTest) { StreamInfo::MockStreamInfo stream_info; Http::TestRequestHeaderMapImpl request_header; Http::TestResponseHeaderMapImpl response_header; Http::TestResponseTrailerMapImpl response_trailer; + std::string body; envoy::config::core::v3::Metadata metadata; populateMetadataTestData(metadata); absl::optional protocol = Http::Protocol::Http11; EXPECT_CALL(stream_info, protocol()).WillRepeatedly(Return(protocol)); - std::unordered_map expected_json_map = { + absl::node_hash_map expected_json_map = { {"plain_string", "plain_string_value"}}; - std::unordered_map key_mapping = { + absl::flat_hash_map key_mapping = { {"plain_string", "plain_string_value"}}; JsonFormatterImpl formatter(key_mapping, false); - verifyJsonOutput(formatter.format(request_header, response_header, response_trailer, stream_info), - expected_json_map); + verifyJsonOutput( + formatter.format(request_header, response_header, response_trailer, stream_info, body), + expected_json_map); } -TEST(AccessLogFormatterTest, JsonFormatterSingleOperatorTest) { +TEST(SubstitutionFormatterTest, JsonFormatterSingleOperatorTest) { StreamInfo::MockStreamInfo stream_info; Http::TestRequestHeaderMapImpl request_header; Http::TestResponseHeaderMapImpl response_header; Http::TestResponseTrailerMapImpl response_trailer; + std::string body; envoy::config::core::v3::Metadata metadata; populateMetadataTestData(metadata); absl::optional protocol = Http::Protocol::Http11; EXPECT_CALL(stream_info, protocol()).WillRepeatedly(Return(protocol)); - std::unordered_map expected_json_map = {{"protocol", "HTTP/1.1"}}; + absl::node_hash_map expected_json_map = {{"protocol", "HTTP/1.1"}}; - std::unordered_map key_mapping = {{"protocol", "%PROTOCOL%"}}; + absl::flat_hash_map key_mapping = {{"protocol", "%PROTOCOL%"}}; JsonFormatterImpl formatter(key_mapping, false); - verifyJsonOutput(formatter.format(request_header, response_header, response_trailer, stream_info), - expected_json_map); + verifyJsonOutput( + formatter.format(request_header, response_header, response_trailer, stream_info, body), + expected_json_map); } -TEST(AccessLogFormatterTest, JsonFormatterNonExistentHeaderTest) { +TEST(SubstitutionFormatterTest, JsonFormatterNonExistentHeaderTest) { StreamInfo::MockStreamInfo stream_info; Http::TestRequestHeaderMapImpl request_header{{"some_request_header", "SOME_REQUEST_HEADER"}}; Http::TestResponseHeaderMapImpl response_header{{"some_response_header", "SOME_RESPONSE_HEADER"}}; Http::TestResponseTrailerMapImpl response_trailer; + std::string body; - std::unordered_map expected_json_map = { + absl::node_hash_map expected_json_map = { {"protocol", "HTTP/1.1"}, {"some_request_header", "SOME_REQUEST_HEADER"}, {"nonexistent_response_header", "-"}, {"some_response_header", "SOME_RESPONSE_HEADER"}}; - std::unordered_map key_mapping = { + absl::flat_hash_map key_mapping = { {"protocol", "%PROTOCOL%"}, {"some_request_header", "%REQ(some_request_header)%"}, {"nonexistent_response_header", "%RESP(nonexistent_response_header)%"}, @@ -1523,25 +1577,27 @@ TEST(AccessLogFormatterTest, JsonFormatterNonExistentHeaderTest) { absl::optional protocol = Http::Protocol::Http11; EXPECT_CALL(stream_info, protocol()).WillRepeatedly(Return(protocol)); - verifyJsonOutput(formatter.format(request_header, response_header, response_trailer, stream_info), - expected_json_map); + verifyJsonOutput( + formatter.format(request_header, response_header, response_trailer, stream_info, body), + expected_json_map); } -TEST(AccessLogFormatterTest, JsonFormatterAlternateHeaderTest) { +TEST(SubstitutionFormatterTest, JsonFormatterAlternateHeaderTest) { StreamInfo::MockStreamInfo stream_info; Http::TestRequestHeaderMapImpl request_header{ {"request_present_header", "REQUEST_PRESENT_HEADER"}}; Http::TestResponseHeaderMapImpl response_header{ {"response_present_header", "RESPONSE_PRESENT_HEADER"}}; Http::TestResponseTrailerMapImpl response_trailer; + std::string body; - std::unordered_map expected_json_map = { + absl::node_hash_map expected_json_map = { {"request_present_header_or_request_absent_header", "REQUEST_PRESENT_HEADER"}, {"request_absent_header_or_request_present_header", "REQUEST_PRESENT_HEADER"}, {"response_absent_header_or_response_absent_header", "RESPONSE_PRESENT_HEADER"}, {"response_present_header_or_response_absent_header", "RESPONSE_PRESENT_HEADER"}}; - std::unordered_map key_mapping = { + absl::flat_hash_map key_mapping = { {"request_present_header_or_request_absent_header", "%REQ(request_present_header?request_absent_header)%"}, {"request_absent_header_or_request_present_header", @@ -1555,49 +1611,53 @@ TEST(AccessLogFormatterTest, JsonFormatterAlternateHeaderTest) { absl::optional protocol = Http::Protocol::Http11; EXPECT_CALL(stream_info, protocol()).WillRepeatedly(Return(protocol)); - verifyJsonOutput(formatter.format(request_header, response_header, response_trailer, stream_info), - expected_json_map); + verifyJsonOutput( + formatter.format(request_header, response_header, response_trailer, stream_info, body), + expected_json_map); } -TEST(AccessLogFormatterTest, JsonFormatterDynamicMetadataTest) { +TEST(SubstitutionFormatterTest, JsonFormatterDynamicMetadataTest) { StreamInfo::MockStreamInfo stream_info; Http::TestRequestHeaderMapImpl request_header{{"first", "GET"}, {":path", "/"}}; Http::TestResponseHeaderMapImpl response_header{{"second", "PUT"}, {"test", "test"}}; Http::TestResponseTrailerMapImpl response_trailer{{"third", "POST"}, {"test-2", "test-2"}}; + std::string body; envoy::config::core::v3::Metadata metadata; populateMetadataTestData(metadata); EXPECT_CALL(stream_info, dynamicMetadata()).WillRepeatedly(ReturnRef(metadata)); EXPECT_CALL(Const(stream_info), dynamicMetadata()).WillRepeatedly(ReturnRef(metadata)); - std::unordered_map expected_json_map = { + absl::node_hash_map expected_json_map = { {"test_key", "\"test_value\""}, {"test_obj", "{\"inner_key\":\"inner_value\"}"}, {"test_obj.inner_key", "\"inner_value\""}}; - std::unordered_map key_mapping = { + absl::flat_hash_map key_mapping = { {"test_key", "%DYNAMIC_METADATA(com.test:test_key)%"}, {"test_obj", "%DYNAMIC_METADATA(com.test:test_obj)%"}, {"test_obj.inner_key", "%DYNAMIC_METADATA(com.test:test_obj:inner_key)%"}}; JsonFormatterImpl formatter(key_mapping, false); - verifyJsonOutput(formatter.format(request_header, response_header, response_trailer, stream_info), - expected_json_map); + verifyJsonOutput( + formatter.format(request_header, response_header, response_trailer, stream_info, body), + expected_json_map); } -TEST(AccessLogFormatterTest, JsonFormatterTypedDynamicMetadataTest) { +TEST(SubstitutionFormatterTest, JsonFormatterTypedDynamicMetadataTest) { StreamInfo::MockStreamInfo stream_info; Http::TestRequestHeaderMapImpl request_header{{"first", "GET"}, {":path", "/"}}; Http::TestResponseHeaderMapImpl response_header{{"second", "PUT"}, {"test", "test"}}; Http::TestResponseTrailerMapImpl response_trailer{{"third", "POST"}, {"test-2", "test-2"}}; + std::string body; envoy::config::core::v3::Metadata metadata; populateMetadataTestData(metadata); EXPECT_CALL(stream_info, dynamicMetadata()).WillRepeatedly(ReturnRef(metadata)); EXPECT_CALL(Const(stream_info), dynamicMetadata()).WillRepeatedly(ReturnRef(metadata)); - std::unordered_map key_mapping = { + absl::flat_hash_map key_mapping = { {"test_key", "%DYNAMIC_METADATA(com.test:test_key)%"}, {"test_obj", "%DYNAMIC_METADATA(com.test:test_obj)%"}, {"test_obj.inner_key", "%DYNAMIC_METADATA(com.test:test_obj:inner_key)%"}}; @@ -1605,7 +1665,7 @@ TEST(AccessLogFormatterTest, JsonFormatterTypedDynamicMetadataTest) { JsonFormatterImpl formatter(key_mapping, true); const std::string json = - formatter.format(request_header, response_header, response_trailer, stream_info); + formatter.format(request_header, response_header, response_trailer, stream_info, body); ProtobufWkt::Struct output; MessageUtil::loadFromJson(json, output); @@ -1616,11 +1676,12 @@ TEST(AccessLogFormatterTest, JsonFormatterTypedDynamicMetadataTest) { fields.at("test_obj").struct_value().fields().at("inner_key").string_value()); } -TEST(AccessLogFormatterTest, JsonFormatterFilterStateTest) { +TEST(SubstitutionFormatterTest, JsonFormatterFilterStateTest) { Http::TestRequestHeaderMapImpl request_headers; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; StreamInfo::MockStreamInfo stream_info; + std::string body; stream_info.filter_state_->setData("test_key", std::make_unique("test_value"), StreamInfo::FilterState::StateType::ReadOnly); @@ -1629,24 +1690,25 @@ TEST(AccessLogFormatterTest, JsonFormatterFilterStateTest) { StreamInfo::FilterState::StateType::ReadOnly); EXPECT_CALL(Const(stream_info), filterState()).Times(testing::AtLeast(1)); - std::unordered_map expected_json_map = { + absl::node_hash_map expected_json_map = { {"test_key", "\"test_value\""}, {"test_obj", "{\"inner_key\":\"inner_value\"}"}}; - std::unordered_map key_mapping = { + absl::flat_hash_map key_mapping = { {"test_key", "%FILTER_STATE(test_key)%"}, {"test_obj", "%FILTER_STATE(test_obj)%"}}; JsonFormatterImpl formatter(key_mapping, false); verifyJsonOutput( - formatter.format(request_headers, response_headers, response_trailers, stream_info), + formatter.format(request_headers, response_headers, response_trailers, stream_info, body), expected_json_map); } -TEST(AccessLogFormatterTest, JsonFormatterTypedFilterStateTest) { +TEST(SubstitutionFormatterTest, JsonFormatterTypedFilterStateTest) { Http::TestRequestHeaderMapImpl request_headers; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; StreamInfo::MockStreamInfo stream_info; + std::string body; stream_info.filter_state_->setData("test_key", std::make_unique("test_value"), StreamInfo::FilterState::StateType::ReadOnly); @@ -1655,13 +1717,13 @@ TEST(AccessLogFormatterTest, JsonFormatterTypedFilterStateTest) { StreamInfo::FilterState::StateType::ReadOnly); EXPECT_CALL(Const(stream_info), filterState()).Times(testing::AtLeast(1)); - std::unordered_map key_mapping = { + absl::flat_hash_map key_mapping = { {"test_key", "%FILTER_STATE(test_key)%"}, {"test_obj", "%FILTER_STATE(test_obj)%"}}; JsonFormatterImpl formatter(key_mapping, true); std::string json = - formatter.format(request_headers, response_headers, response_trailers, stream_info); + formatter.format(request_headers, response_headers, response_trailers, stream_info, body); ProtobufWkt::Struct output; MessageUtil::loadFromJson(json, output); @@ -1673,52 +1735,54 @@ TEST(AccessLogFormatterTest, JsonFormatterTypedFilterStateTest) { // Test new specifier (PLAIN/TYPED) of FilterState. Ensure that after adding additional specifier, // the FilterState can call the serializeAsProto or serializeAsString methods correctly. -TEST(AccessLogFormatterTest, FilterStateSpeciferTest) { +TEST(SubstitutionFormatterTest, FilterStateSpeciferTest) { Http::TestRequestHeaderMapImpl request_headers; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; StreamInfo::MockStreamInfo stream_info; + std::string body; stream_info.filter_state_->setData( "test_key", std::make_unique("test_value"), StreamInfo::FilterState::StateType::ReadOnly); EXPECT_CALL(Const(stream_info), filterState()).Times(testing::AtLeast(1)); - std::unordered_map expected_json_map = { + absl::node_hash_map expected_json_map = { {"test_key_plain", "test_value By PLAIN"}, {"test_key_typed", "\"test_value By TYPED\""}, }; - std::unordered_map key_mapping = { + absl::flat_hash_map key_mapping = { {"test_key_plain", "%FILTER_STATE(test_key:PLAIN)%"}, {"test_key_typed", "%FILTER_STATE(test_key:TYPED)%"}}; JsonFormatterImpl formatter(key_mapping, false); verifyJsonOutput( - formatter.format(request_headers, response_headers, response_trailers, stream_info), + formatter.format(request_headers, response_headers, response_trailers, stream_info, body), expected_json_map); } // Test new specifier (PLAIN/TYPED) of FilterState and convert the output log string to proto // and then verify the result. -TEST(AccessLogFormatterTest, TypedFilterStateSpeciferTest) { +TEST(SubstitutionFormatterTest, TypedFilterStateSpeciferTest) { Http::TestRequestHeaderMapImpl request_headers; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; StreamInfo::MockStreamInfo stream_info; + std::string body; stream_info.filter_state_->setData( "test_key", std::make_unique("test_value"), StreamInfo::FilterState::StateType::ReadOnly); EXPECT_CALL(Const(stream_info), filterState()).Times(testing::AtLeast(1)); - std::unordered_map key_mapping = { + absl::flat_hash_map key_mapping = { {"test_key_plain", "%FILTER_STATE(test_key:PLAIN)%"}, {"test_key_typed", "%FILTER_STATE(test_key:TYPED)%"}}; JsonFormatterImpl formatter(key_mapping, true); std::string json = - formatter.format(request_headers, response_headers, response_trailers, stream_info); + formatter.format(request_headers, response_headers, response_trailers, stream_info, body); ProtobufWkt::Struct output; MessageUtil::loadFromJson(json, output); @@ -1729,17 +1793,18 @@ TEST(AccessLogFormatterTest, TypedFilterStateSpeciferTest) { } // Error specifier will cause an exception to be thrown. -TEST(AccessLogFormatterTest, FilterStateErrorSpeciferTest) { +TEST(SubstitutionFormatterTest, FilterStateErrorSpeciferTest) { Http::TestRequestHeaderMapImpl request_headers; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; StreamInfo::MockStreamInfo stream_info; + std::string body; stream_info.filter_state_->setData( "test_key", std::make_unique("test_value"), StreamInfo::FilterState::StateType::ReadOnly); // 'ABCDE' is error specifier. - std::unordered_map key_mapping = { + absl::flat_hash_map key_mapping = { {"test_key_plain", "%FILTER_STATE(test_key:ABCDE)%"}, {"test_key_typed", "%FILTER_STATE(test_key:TYPED)%"}}; @@ -1747,24 +1812,25 @@ TEST(AccessLogFormatterTest, FilterStateErrorSpeciferTest) { "Invalid filter state serialize type, only support PLAIN/TYPED."); } -TEST(AccessLogFormatterTest, JsonFormatterStartTimeTest) { +TEST(SubstitutionFormatterTest, JsonFormatterStartTimeTest) { StreamInfo::MockStreamInfo stream_info; Http::TestRequestHeaderMapImpl request_header; Http::TestResponseHeaderMapImpl response_header; Http::TestResponseTrailerMapImpl response_trailer; + std::string body; time_t expected_time_in_epoch = 1522280158; SystemTime time = std::chrono::system_clock::from_time_t(expected_time_in_epoch); EXPECT_CALL(stream_info, startTime()).WillRepeatedly(Return(time)); - std::unordered_map expected_json_map = { + absl::node_hash_map expected_json_map = { {"simple_date", "2018/03/28"}, {"test_time", fmt::format("{}", expected_time_in_epoch)}, {"bad_format", "bad_format"}, {"default", "2018-03-28T23:35:58.000Z"}, {"all_zeroes", "000000000.0.00.000"}}; - std::unordered_map key_mapping = { + absl::flat_hash_map key_mapping = { {"simple_date", "%START_TIME(%Y/%m/%d)%"}, {"test_time", "%START_TIME(%s)%"}, {"bad_format", "%START_TIME(bad_format)%"}, @@ -1772,22 +1838,24 @@ TEST(AccessLogFormatterTest, JsonFormatterStartTimeTest) { {"all_zeroes", "%START_TIME(%f.%1f.%2f.%3f)%"}}; JsonFormatterImpl formatter(key_mapping, false); - verifyJsonOutput(formatter.format(request_header, response_header, response_trailer, stream_info), - expected_json_map); + verifyJsonOutput( + formatter.format(request_header, response_header, response_trailer, stream_info, body), + expected_json_map); } -TEST(AccessLogFormatterTest, JsonFormatterMultiTokenTest) { +TEST(SubstitutionFormatterTest, JsonFormatterMultiTokenTest) { { StreamInfo::MockStreamInfo stream_info; Http::TestRequestHeaderMapImpl request_header{{"some_request_header", "SOME_REQUEST_HEADER"}}; Http::TestResponseHeaderMapImpl response_header{ {"some_response_header", "SOME_RESPONSE_HEADER"}}; Http::TestResponseTrailerMapImpl response_trailer; + std::string body; - std::unordered_map expected_json_map = { + absl::node_hash_map expected_json_map = { {"multi_token_field", "HTTP/1.1 plainstring SOME_REQUEST_HEADER SOME_RESPONSE_HEADER"}}; - std::unordered_map key_mapping = { + absl::flat_hash_map key_mapping = { {"multi_token_field", "%PROTOCOL% plainstring %REQ(some_request_header)% %RESP(some_response_header)%"}}; @@ -1798,7 +1866,7 @@ TEST(AccessLogFormatterTest, JsonFormatterMultiTokenTest) { EXPECT_CALL(stream_info, protocol()).WillRepeatedly(Return(protocol)); const auto parsed = Json::Factory::loadFromString( - formatter.format(request_header, response_header, response_trailer, stream_info)); + formatter.format(request_header, response_header, response_trailer, stream_info, body)); for (const auto& pair : expected_json_map) { EXPECT_EQ(parsed->getString(pair.first), pair.second); } @@ -1806,11 +1874,12 @@ TEST(AccessLogFormatterTest, JsonFormatterMultiTokenTest) { } } -TEST(AccessLogFormatterTest, JsonFormatterTypedTest) { +TEST(SubstitutionFormatterTest, JsonFormatterTypedTest) { Http::TestRequestHeaderMapImpl request_headers; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; StreamInfo::MockStreamInfo stream_info; + std::string body; EXPECT_CALL(Const(stream_info), lastDownstreamRxByteReceived()) .WillRepeatedly(Return(std::chrono::nanoseconds(5000000))); @@ -1827,7 +1896,7 @@ TEST(AccessLogFormatterTest, JsonFormatterTypedTest) { StreamInfo::FilterState::StateType::ReadOnly); EXPECT_CALL(Const(stream_info), filterState()).Times(testing::AtLeast(1)); - std::unordered_map key_mapping = { + absl::flat_hash_map key_mapping = { {"request_duration", "%REQUEST_DURATION%"}, {"request_duration_multi", "%REQUEST_DURATION%ms"}, {"filter_state", "%FILTER_STATE(test_obj)%"}, @@ -1836,7 +1905,7 @@ TEST(AccessLogFormatterTest, JsonFormatterTypedTest) { JsonFormatterImpl formatter(key_mapping, true); const auto json = - formatter.format(request_headers, response_headers, response_trailers, stream_info); + formatter.format(request_headers, response_headers, response_trailers, stream_info, body); ProtobufWkt::Struct output; MessageUtil::loadFromJson(json, output); @@ -1848,11 +1917,12 @@ TEST(AccessLogFormatterTest, JsonFormatterTypedTest) { EXPECT_THAT(output.fields().at("filter_state"), ProtoEq(expected)); } -TEST(AccessLogFormatterTest, CompositeFormatterSuccess) { +TEST(SubstitutionFormatterTest, CompositeFormatterSuccess) { StreamInfo::MockStreamInfo stream_info; Http::TestRequestHeaderMapImpl request_header{{"first", "GET"}, {":path", "/"}}; Http::TestResponseHeaderMapImpl response_header{{"second", "PUT"}, {"test", "test"}}; Http::TestResponseTrailerMapImpl response_trailer{{"third", "POST"}, {"test-2", "test-2"}}; + std::string body; { const std::string format = "{{%PROTOCOL%}} %RESP(not exist)%++%RESP(test)% " @@ -1863,16 +1933,17 @@ TEST(AccessLogFormatterTest, CompositeFormatterSuccess) { absl::optional protocol = Http::Protocol::Http11; EXPECT_CALL(stream_info, protocol()).WillRepeatedly(Return(protocol)); - EXPECT_EQ("{{HTTP/1.1}} -++test GET PUT\t@POST@\ttest-2[]", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ( + "{{HTTP/1.1}} -++test GET PUT\t@POST@\ttest-2[]", + formatter.format(request_header, response_header, response_trailer, stream_info, body)); } { const std::string format = "{}*JUST PLAIN string]"; FormatterImpl formatter(format); - EXPECT_EQ(format, - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ(format, formatter.format(request_header, response_header, response_trailer, + stream_info, body)); } { @@ -1881,8 +1952,8 @@ TEST(AccessLogFormatterTest, CompositeFormatterSuccess) { FormatterImpl formatter(format); - EXPECT_EQ("GET|G|PU|GET|POS", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("GET|G|PU|GET|POS", formatter.format(request_header, response_header, + response_trailer, stream_info, body)); } { @@ -1894,8 +1965,9 @@ TEST(AccessLogFormatterTest, CompositeFormatterSuccess) { "test_obj)%|%DYNAMIC_METADATA(com.test:test_obj:inner_key)%"; FormatterImpl formatter(format); - EXPECT_EQ("\"test_value\"|{\"inner_key\":\"inner_value\"}|\"inner_value\"", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ( + "\"test_value\"|{\"inner_key\":\"inner_value\"}|\"inner_value\"", + formatter.format(request_header, response_header, response_trailer, stream_info, body)); } { @@ -1912,8 +1984,9 @@ TEST(AccessLogFormatterTest, CompositeFormatterSuccess) { "%FILTER_STATE(testing):8%|%FILTER_STATE(nonexisting)%"; FormatterImpl formatter(format); - EXPECT_EQ("\"test_value\"|-|\"test_va|-", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ( + "\"test_value\"|-|\"test_va|-", + formatter.format(request_header, response_header, response_trailer, stream_info, body)); } { @@ -1925,9 +1998,10 @@ TEST(AccessLogFormatterTest, CompositeFormatterSuccess) { EXPECT_CALL(stream_info, startTime()).WillRepeatedly(Return(time)); FormatterImpl formatter(format); - EXPECT_EQ(fmt::format("2018/03/28|{}|bad_format|2018-03-28T23:35:58.000Z|000000000.0.00.000", - expected_time_in_epoch), - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ( + fmt::format("2018/03/28|{}|bad_format|2018-03-28T23:35:58.000Z|000000000.0.00.000", + expected_time_in_epoch), + formatter.format(request_header, response_header, response_trailer, stream_info, body)); } { @@ -1940,8 +2014,9 @@ TEST(AccessLogFormatterTest, CompositeFormatterSuccess) { EXPECT_CALL(stream_info, startTime()).WillRepeatedly(Return(time)); FormatterImpl formatter(format); - EXPECT_EQ("1970/01/01|0|bad_format|1970-01-01T00:00:00.000Z|000000000.0.00.000", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ( + "1970/01/01|0|bad_format|1970-01-01T00:00:00.000Z|000000000.0.00.000", + formatter.format(request_header, response_header, response_trailer, stream_info, body)); } { @@ -1951,8 +2026,9 @@ TEST(AccessLogFormatterTest, CompositeFormatterSuccess) { const SystemTime start_time(std::chrono::microseconds(1522796769123456)); EXPECT_CALL(stream_info, startTime()).WillRepeatedly(Return(start_time)); FormatterImpl formatter(format); - EXPECT_EQ("1522796769.123|1522796769.1234|1522796769.12345|1522796769.123456", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ( + "1522796769.123|1522796769.1234|1522796769.12345|1522796769.123456", + formatter.format(request_header, response_header, response_trailer, stream_info, body)); } { @@ -1961,9 +2037,10 @@ TEST(AccessLogFormatterTest, CompositeFormatterSuccess) { const SystemTime start_time(std::chrono::microseconds(1522796769123456)); EXPECT_CALL(stream_info, startTime()).WillRepeatedly(Return(start_time)); FormatterImpl formatter(format); - EXPECT_EQ("segment1:1522796769.123|segment2:1522796769.1234|seg3:1522796769.123456|1522796769-" - "123-asdf-123456000|.1234560:segm5:2018", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ( + "segment1:1522796769.123|segment2:1522796769.1234|seg3:1522796769.123456|1522796769-" + "123-asdf-123456000|.1234560:segm5:2018", + formatter.format(request_header, response_header, response_trailer, stream_info, body)); } { @@ -1973,13 +2050,14 @@ TEST(AccessLogFormatterTest, CompositeFormatterSuccess) { const SystemTime start_time(std::chrono::microseconds(1522796769123456)); EXPECT_CALL(stream_info, startTime()).WillOnce(Return(start_time)); FormatterImpl formatter(format); - EXPECT_EQ("%%|%%123456000|1522796769%%123|1%%1522796769", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ( + "%%|%%123456000|1522796769%%123|1%%1522796769", + formatter.format(request_header, response_header, response_trailer, stream_info, body)); } } -TEST(AccessLogFormatterTest, ParserFailures) { - AccessLogFormatParser parser; +TEST(SubstitutionFormatterTest, ParserFailures) { + SubstitutionFormatParser parser; std::vector test_cases = { "{{%PROTOCOL%}} ++ %REQ(FIRST?SECOND)% %RESP(FIRST?SECOND)", @@ -1988,6 +2066,7 @@ TEST(AccessLogFormatterTest, ParserFailures) { "%REQ(valid)% %NOT_VALID%", "%REQ(FIRST?SECOND%", "%%", + "%%HOSTNAME%PROTOCOL%", "%protocol%", "%REQ(TEST):%", "%REQ(TEST):3q4%", @@ -2017,5 +2096,5 @@ TEST(AccessLogFormatterTest, ParserFailures) { } } // namespace -} // namespace AccessLog +} // namespace Formatter } // namespace Envoy diff --git a/test/common/grpc/BUILD b/test/common/grpc/BUILD index 612f7f798b5b8..edfdfbda404a6 100644 --- a/test/common/grpc/BUILD +++ b/test/common/grpc/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_fuzz_test", @@ -9,6 +7,8 @@ load( "envoy_select_google_grpc", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( @@ -145,10 +145,12 @@ envoy_cc_test_library( ":utility_lib", "//source/common/api:api_lib", "//source/common/event:dispatcher_lib", + "//source/common/grpc:context_lib", + "//source/common/http:context_lib", "//source/common/http/http2:conn_pool_lib", "//test/integration:integration_lib", "//test/mocks/local_info:local_info_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:transport_socket_factory_context_mocks", "//test/proto:helloworld_proto_cc_proto", "//test/test_common:global_lib", "//test/test_common:test_time_lib", diff --git a/test/common/grpc/async_client_impl_test.cc b/test/common/grpc/async_client_impl_test.cc index bdc77f95af50e..fd49adf1a6920 100644 --- a/test/common/grpc/async_client_impl_test.cc +++ b/test/common/grpc/async_client_impl_test.cc @@ -38,6 +38,64 @@ class EnvoyAsyncClientImplTest : public testing::Test { DangerousDeprecatedTestTime test_time_; }; +// Validate that the host header is the cluster name in grpc config. +TEST_F(EnvoyAsyncClientImplTest, HostIsClusterNameByDefault) { + NiceMock> grpc_callbacks; + Http::AsyncClient::StreamCallbacks* http_callbacks; + + Http::MockAsyncClientStream http_stream; + EXPECT_CALL(http_client_, start(_, _)) + .WillOnce( + Invoke([&http_callbacks, &http_stream](Http::AsyncClient::StreamCallbacks& callbacks, + const Http::AsyncClient::StreamOptions&) { + http_callbacks = &callbacks; + return &http_stream; + })); + + EXPECT_CALL(grpc_callbacks, + onCreateInitialMetadata(testing::Truly([](Http::RequestHeaderMap& headers) { + return headers.Host()->value() == "test_cluster"; + }))); + EXPECT_CALL(http_stream, sendHeaders(_, _)) + .WillOnce(Invoke([&http_callbacks](Http::HeaderMap&, bool) { http_callbacks->onReset(); })); + auto grpc_stream = + grpc_client_->start(*method_descriptor_, grpc_callbacks, Http::AsyncClient::StreamOptions()); + EXPECT_EQ(grpc_stream, nullptr); +} + +// Validate that the host header is the authority field in grpc config. +TEST_F(EnvoyAsyncClientImplTest, HostIsOverrideByConfig) { + envoy::config::core::v3::GrpcService config; + config.mutable_envoy_grpc()->set_cluster_name("test_cluster"); + config.mutable_envoy_grpc()->set_authority("demo.com"); + + grpc_client_ = std::make_unique(cm_, config, test_time_.timeSystem()); + EXPECT_CALL(cm_, httpAsyncClientForCluster("test_cluster")) + .WillRepeatedly(ReturnRef(http_client_)); + + NiceMock> grpc_callbacks; + Http::AsyncClient::StreamCallbacks* http_callbacks; + + Http::MockAsyncClientStream http_stream; + EXPECT_CALL(http_client_, start(_, _)) + .WillOnce( + Invoke([&http_callbacks, &http_stream](Http::AsyncClient::StreamCallbacks& callbacks, + const Http::AsyncClient::StreamOptions&) { + http_callbacks = &callbacks; + return &http_stream; + })); + + EXPECT_CALL(grpc_callbacks, + onCreateInitialMetadata(testing::Truly([](Http::RequestHeaderMap& headers) { + return headers.Host()->value() == "demo.com"; + }))); + EXPECT_CALL(http_stream, sendHeaders(_, _)) + .WillOnce(Invoke([&http_callbacks](Http::HeaderMap&, bool) { http_callbacks->onReset(); })); + auto grpc_stream = + grpc_client_->start(*method_descriptor_, grpc_callbacks, Http::AsyncClient::StreamOptions()); + EXPECT_EQ(grpc_stream, nullptr); +} + // Validate that a failure in the HTTP client returns immediately with status // UNAVAILABLE. TEST_F(EnvoyAsyncClientImplTest, StreamHttpStartFail) { @@ -46,7 +104,7 @@ TEST_F(EnvoyAsyncClientImplTest, StreamHttpStartFail) { EXPECT_CALL(grpc_callbacks, onRemoteClose(Status::WellKnownGrpcStatus::Unavailable, "")); auto grpc_stream = grpc_client_->start(*method_descriptor_, grpc_callbacks, Http::AsyncClient::StreamOptions()); - EXPECT_TRUE(grpc_stream == nullptr); + EXPECT_EQ(grpc_stream, nullptr); } // Validate that a failure in the HTTP client returns immediately with status @@ -98,7 +156,7 @@ TEST_F(EnvoyAsyncClientImplTest, StreamHttpSendHeadersFail) { EXPECT_CALL(grpc_callbacks, onRemoteClose(Status::WellKnownGrpcStatus::Internal, "")); auto grpc_stream = grpc_client_->start(*method_descriptor_, grpc_callbacks, Http::AsyncClient::StreamOptions()); - EXPECT_TRUE(grpc_stream == nullptr); + EXPECT_EQ(grpc_stream, nullptr); } // Validate that a failure to sendHeaders() in the HTTP client returns @@ -150,7 +208,7 @@ TEST_F(EnvoyAsyncClientImplTest, StreamHttpClientException) { onRemoteClose(Status::WellKnownGrpcStatus::Unavailable, "Cluster not available")); auto grpc_stream = grpc_client_->start(*method_descriptor_, grpc_callbacks, Http::AsyncClient::StreamOptions()); - EXPECT_TRUE(grpc_stream == nullptr); + EXPECT_EQ(grpc_stream, nullptr); } } // namespace diff --git a/test/common/grpc/common_test.cc b/test/common/grpc/common_test.cc index 02f948b9c6014..6847f159670e6 100644 --- a/test/common/grpc/common_test.cc +++ b/test/common/grpc/common_test.cc @@ -104,20 +104,20 @@ TEST(GrpcContextTest, GetGrpcTimeout) { } TEST(GrpcCommonTest, GrpcStatusDetailsBin) { - Http::TestHeaderMapImpl empty_trailers; + Http::TestResponseTrailerMapImpl empty_trailers; EXPECT_FALSE(Common::getGrpcStatusDetailsBin(empty_trailers)); - Http::TestHeaderMapImpl invalid_value{{"grpc-status-details-bin", "invalid"}}; + Http::TestResponseTrailerMapImpl invalid_value{{"grpc-status-details-bin", "invalid"}}; EXPECT_FALSE(Common::getGrpcStatusDetailsBin(invalid_value)); - Http::TestHeaderMapImpl unpadded_value{ + Http::TestResponseTrailerMapImpl unpadded_value{ {"grpc-status-details-bin", "CAUSElJlc291cmNlIG5vdCBmb3VuZA"}}; auto status = Common::getGrpcStatusDetailsBin(unpadded_value); ASSERT_TRUE(status); EXPECT_EQ(Status::WellKnownGrpcStatus::NotFound, status->code()); EXPECT_EQ("Resource not found", status->message()); - Http::TestHeaderMapImpl padded_value{ + Http::TestResponseTrailerMapImpl padded_value{ {"grpc-status-details-bin", "CAUSElJlc291cmNlIG5vdCBmb3VuZA=="}}; status = Common::getGrpcStatusDetailsBin(padded_value); ASSERT_TRUE(status); @@ -129,25 +129,25 @@ TEST(GrpcContextTest, ToGrpcTimeout) { Http::TestRequestHeaderMapImpl headers; Common::toGrpcTimeout(std::chrono::milliseconds(0UL), headers); - EXPECT_EQ("0m", headers.GrpcTimeout()->value().getStringView()); + EXPECT_EQ("0m", headers.getGrpcTimeoutValue()); Common::toGrpcTimeout(std::chrono::milliseconds(1UL), headers); - EXPECT_EQ("1m", headers.GrpcTimeout()->value().getStringView()); + EXPECT_EQ("1m", headers.getGrpcTimeoutValue()); Common::toGrpcTimeout(std::chrono::milliseconds(100000000UL), headers); - EXPECT_EQ("100000S", headers.GrpcTimeout()->value().getStringView()); + EXPECT_EQ("100000S", headers.getGrpcTimeoutValue()); Common::toGrpcTimeout(std::chrono::milliseconds(100000000000UL), headers); - EXPECT_EQ("1666666M", headers.GrpcTimeout()->value().getStringView()); + EXPECT_EQ("1666666M", headers.getGrpcTimeoutValue()); Common::toGrpcTimeout(std::chrono::milliseconds(9000000000000UL), headers); - EXPECT_EQ("2500000H", headers.GrpcTimeout()->value().getStringView()); + EXPECT_EQ("2500000H", headers.getGrpcTimeoutValue()); Common::toGrpcTimeout(std::chrono::milliseconds(360000000000000UL), headers); - EXPECT_EQ("99999999H", headers.GrpcTimeout()->value().getStringView()); + EXPECT_EQ("99999999H", headers.getGrpcTimeoutValue()); Common::toGrpcTimeout(std::chrono::milliseconds(UINT64_MAX), headers); - EXPECT_EQ("99999999H", headers.GrpcTimeout()->value().getStringView()); + EXPECT_EQ("99999999H", headers.getGrpcTimeoutValue()); } TEST(GrpcContextTest, PrepareHeaders) { @@ -155,71 +155,71 @@ TEST(GrpcContextTest, PrepareHeaders) { Http::RequestMessagePtr message = Common::prepareHeaders("cluster", "service_name", "method_name", absl::nullopt); - EXPECT_EQ("POST", message->headers().Method()->value().getStringView()); - EXPECT_EQ("/service_name/method_name", message->headers().Path()->value().getStringView()); - EXPECT_EQ("cluster", message->headers().Host()->value().getStringView()); - EXPECT_EQ("application/grpc", message->headers().ContentType()->value().getStringView()); + EXPECT_EQ("POST", message->headers().getMethodValue()); + EXPECT_EQ("/service_name/method_name", message->headers().getPathValue()); + EXPECT_EQ("cluster", message->headers().getHostValue()); + EXPECT_EQ("application/grpc", message->headers().getContentTypeValue()); } { Http::RequestMessagePtr message = Common::prepareHeaders( "cluster", "service_name", "method_name", absl::optional(1)); - EXPECT_EQ("POST", message->headers().Method()->value().getStringView()); - EXPECT_EQ("/service_name/method_name", message->headers().Path()->value().getStringView()); - EXPECT_EQ("cluster", message->headers().Host()->value().getStringView()); - EXPECT_EQ("application/grpc", message->headers().ContentType()->value().getStringView()); - EXPECT_EQ("1m", message->headers().GrpcTimeout()->value().getStringView()); + EXPECT_EQ("POST", message->headers().getMethodValue()); + EXPECT_EQ("/service_name/method_name", message->headers().getPathValue()); + EXPECT_EQ("cluster", message->headers().getHostValue()); + EXPECT_EQ("application/grpc", message->headers().getContentTypeValue()); + EXPECT_EQ("1m", message->headers().getGrpcTimeoutValue()); } { Http::RequestMessagePtr message = Common::prepareHeaders( "cluster", "service_name", "method_name", absl::optional(1)); - EXPECT_EQ("POST", message->headers().Method()->value().getStringView()); - EXPECT_EQ("/service_name/method_name", message->headers().Path()->value().getStringView()); - EXPECT_EQ("cluster", message->headers().Host()->value().getStringView()); - EXPECT_EQ("application/grpc", message->headers().ContentType()->value().getStringView()); - EXPECT_EQ("1000m", message->headers().GrpcTimeout()->value().getStringView()); + EXPECT_EQ("POST", message->headers().getMethodValue()); + EXPECT_EQ("/service_name/method_name", message->headers().getPathValue()); + EXPECT_EQ("cluster", message->headers().getHostValue()); + EXPECT_EQ("application/grpc", message->headers().getContentTypeValue()); + EXPECT_EQ("1000m", message->headers().getGrpcTimeoutValue()); } { Http::RequestMessagePtr message = Common::prepareHeaders( "cluster", "service_name", "method_name", absl::optional(1)); - EXPECT_EQ("POST", message->headers().Method()->value().getStringView()); - EXPECT_EQ("/service_name/method_name", message->headers().Path()->value().getStringView()); - EXPECT_EQ("cluster", message->headers().Host()->value().getStringView()); - EXPECT_EQ("application/grpc", message->headers().ContentType()->value().getStringView()); - EXPECT_EQ("60000m", message->headers().GrpcTimeout()->value().getStringView()); + EXPECT_EQ("POST", message->headers().getMethodValue()); + EXPECT_EQ("/service_name/method_name", message->headers().getPathValue()); + EXPECT_EQ("cluster", message->headers().getHostValue()); + EXPECT_EQ("application/grpc", message->headers().getContentTypeValue()); + EXPECT_EQ("60000m", message->headers().getGrpcTimeoutValue()); } { Http::RequestMessagePtr message = Common::prepareHeaders( "cluster", "service_name", "method_name", absl::optional(1)); - EXPECT_EQ("POST", message->headers().Method()->value().getStringView()); - EXPECT_EQ("/service_name/method_name", message->headers().Path()->value().getStringView()); - EXPECT_EQ("cluster", message->headers().Host()->value().getStringView()); - EXPECT_EQ("application/grpc", message->headers().ContentType()->value().getStringView()); - EXPECT_EQ("3600000m", message->headers().GrpcTimeout()->value().getStringView()); + EXPECT_EQ("POST", message->headers().getMethodValue()); + EXPECT_EQ("/service_name/method_name", message->headers().getPathValue()); + EXPECT_EQ("cluster", message->headers().getHostValue()); + EXPECT_EQ("application/grpc", message->headers().getContentTypeValue()); + EXPECT_EQ("3600000m", message->headers().getGrpcTimeoutValue()); } { Http::RequestMessagePtr message = Common::prepareHeaders( "cluster", "service_name", "method_name", absl::optional(100000000)); - EXPECT_EQ("POST", message->headers().Method()->value().getStringView()); - EXPECT_EQ("/service_name/method_name", message->headers().Path()->value().getStringView()); - EXPECT_EQ("cluster", message->headers().Host()->value().getStringView()); - EXPECT_EQ("application/grpc", message->headers().ContentType()->value().getStringView()); - EXPECT_EQ("99999999H", message->headers().GrpcTimeout()->value().getStringView()); + EXPECT_EQ("POST", message->headers().getMethodValue()); + EXPECT_EQ("/service_name/method_name", message->headers().getPathValue()); + EXPECT_EQ("cluster", message->headers().getHostValue()); + EXPECT_EQ("application/grpc", message->headers().getContentTypeValue()); + EXPECT_EQ("99999999H", message->headers().getGrpcTimeoutValue()); } { Http::RequestMessagePtr message = Common::prepareHeaders("cluster", "service_name", "method_name", absl::optional(100000000000)); - EXPECT_EQ("POST", message->headers().Method()->value().getStringView()); - EXPECT_EQ("/service_name/method_name", message->headers().Path()->value().getStringView()); - EXPECT_EQ("cluster", message->headers().Host()->value().getStringView()); - EXPECT_EQ("application/grpc", message->headers().ContentType()->value().getStringView()); - EXPECT_EQ("1666666M", message->headers().GrpcTimeout()->value().getStringView()); + EXPECT_EQ("POST", message->headers().getMethodValue()); + EXPECT_EQ("/service_name/method_name", message->headers().getPathValue()); + EXPECT_EQ("cluster", message->headers().getHostValue()); + EXPECT_EQ("application/grpc", message->headers().getContentTypeValue()); + EXPECT_EQ("1666666M", message->headers().getGrpcTimeoutValue()); } } @@ -285,20 +285,29 @@ TEST(GrpcContextTest, HasGrpcContentType) { EXPECT_FALSE(isGrpcContentType("application/grpc-web+foo")); } +TEST(GrpcContextTest, IsGrpcRequestHeader) { + Http::TestRequestHeaderMapImpl is{ + {":method", "GET"}, {":path", "/"}, {"content-type", "application/grpc"}}; + EXPECT_TRUE(Common::isGrpcRequestHeaders(is)); + Http::TestRequestHeaderMapImpl is_not{{":method", "CONNECT"}, + {"content-type", "application/grpc"}}; + EXPECT_FALSE(Common::isGrpcRequestHeaders(is_not)); +} + TEST(GrpcContextTest, IsGrpcResponseHeader) { Http::TestResponseHeaderMapImpl grpc_status_only{{":status", "500"}, {"grpc-status", "14"}}; - EXPECT_TRUE(Common::isGrpcResponseHeader(grpc_status_only, true)); - EXPECT_FALSE(Common::isGrpcResponseHeader(grpc_status_only, false)); + EXPECT_TRUE(Common::isGrpcResponseHeaders(grpc_status_only, true)); + EXPECT_FALSE(Common::isGrpcResponseHeaders(grpc_status_only, false)); Http::TestResponseHeaderMapImpl grpc_response_header{{":status", "200"}, {"content-type", "application/grpc"}}; - EXPECT_FALSE(Common::isGrpcResponseHeader(grpc_response_header, true)); - EXPECT_TRUE(Common::isGrpcResponseHeader(grpc_response_header, false)); + EXPECT_FALSE(Common::isGrpcResponseHeaders(grpc_response_header, true)); + EXPECT_TRUE(Common::isGrpcResponseHeaders(grpc_response_header, false)); Http::TestResponseHeaderMapImpl json_response_header{{":status", "200"}, {"content-type", "application/json"}}; - EXPECT_FALSE(Common::isGrpcResponseHeader(json_response_header, true)); - EXPECT_FALSE(Common::isGrpcResponseHeader(json_response_header, false)); + EXPECT_FALSE(Common::isGrpcResponseHeaders(json_response_header, true)); + EXPECT_FALSE(Common::isGrpcResponseHeaders(json_response_header, false)); } TEST(GrpcContextTest, ValidateResponse) { diff --git a/test/common/grpc/context_impl_test.cc b/test/common/grpc/context_impl_test.cc index c1fa773b25d3c..ec8e340770c18 100644 --- a/test/common/grpc/context_impl_test.cc +++ b/test/common/grpc/context_impl_test.cc @@ -65,7 +65,7 @@ TEST(GrpcContextTest, ChargeStats) { TEST(GrpcContextTest, ResolveServiceAndMethod) { std::string service; std::string method; - Http::RequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; headers.setPath("/service_name/method_name?a=b"); const Http::HeaderEntry* path = headers.Path(); Stats::TestSymbolTable symbol_table; @@ -73,8 +73,8 @@ TEST(GrpcContextTest, ResolveServiceAndMethod) { absl::optional request_names = context.resolveDynamicServiceAndMethod(path); EXPECT_TRUE(request_names); - EXPECT_EQ("service_name", symbol_table->toString(request_names->service_)); - EXPECT_EQ("method_name", symbol_table->toString(request_names->method_)); + EXPECT_EQ("service_name", absl::get(request_names->service_)); + EXPECT_EQ("method_name", absl::get(request_names->method_)); headers.setPath(""); EXPECT_FALSE(context.resolveDynamicServiceAndMethod(path)); headers.setPath("/"); diff --git a/test/common/grpc/google_async_client_impl_test.cc b/test/common/grpc/google_async_client_impl_test.cc index cd9749cb63c1c..1474899621f10 100644 --- a/test/common/grpc/google_async_client_impl_test.cc +++ b/test/common/grpc/google_async_client_impl_test.cc @@ -17,6 +17,7 @@ using testing::_; using testing::Eq; +using testing::NiceMock; using testing::Return; namespace Envoy { @@ -38,12 +39,12 @@ class MockGenericStub : public GoogleStub { class MockStubFactory : public GoogleStubFactory { public: - std::shared_ptr createStub(std::shared_ptr /*channel*/) override { + GoogleStubSharedPtr createStub(std::shared_ptr /*channel*/) override { return shared_stub_; } MockGenericStub* stub_ = new MockGenericStub(); - std::shared_ptr shared_stub_{stub_}; + GoogleStubSharedPtr shared_stub_{stub_}; }; class EnvoyGoogleAsyncClientImplTest : public testing::Test { @@ -54,21 +55,24 @@ class EnvoyGoogleAsyncClientImplTest : public testing::Test { method_descriptor_(helloworld::Greeter::descriptor()->FindMethodByName("SayHello")), stat_names_(scope_->symbolTable()) { - envoy::config::core::v3::GrpcService config; - auto* google_grpc = config.mutable_google_grpc(); + auto* google_grpc = config_.mutable_google_grpc(); google_grpc->set_target_uri("fake_address"); google_grpc->set_stat_prefix("test_cluster"); tls_ = std::make_unique(*api_); + } + + virtual void initialize() { grpc_client_ = std::make_unique(*dispatcher_, *tls_, stub_factory_, - scope_, config, *api_, stat_names_); + scope_, config_, *api_, stat_names_); } + envoy::config::core::v3::GrpcService config_; DangerousDeprecatedTestTime test_time_; Stats::IsolatedStoreImpl* stats_store_; // Ownership transferred to scope_. Api::ApiPtr api_; Event::DispatcherPtr dispatcher_; Stats::ScopeSharedPtr scope_; - std::unique_ptr tls_; + GoogleAsyncClientThreadLocalPtr tls_; MockStubFactory stub_factory_; const Protobuf::MethodDescriptor* method_descriptor_; StatNames stat_names_; @@ -78,6 +82,8 @@ class EnvoyGoogleAsyncClientImplTest : public testing::Test { // Validate that a failure in gRPC stub call creation returns immediately with // status UNAVAILABLE. TEST_F(EnvoyGoogleAsyncClientImplTest, StreamHttpStartFail) { + initialize(); + EXPECT_CALL(*stub_factory_.stub_, PrepareCall_(_, _, _)).WillOnce(Return(nullptr)); MockAsyncStreamCallbacks grpc_callbacks; EXPECT_CALL(grpc_callbacks, onCreateInitialMetadata(_)); @@ -91,6 +97,8 @@ TEST_F(EnvoyGoogleAsyncClientImplTest, StreamHttpStartFail) { // Validate that a failure in gRPC stub call creation returns immediately with // status UNAVAILABLE. TEST_F(EnvoyGoogleAsyncClientImplTest, RequestHttpStartFail) { + initialize(); + EXPECT_CALL(*stub_factory_.stub_, PrepareCall_(_, _, _)).WillOnce(Return(nullptr)); MockAsyncRequestCallbacks grpc_callbacks; EXPECT_CALL(grpc_callbacks, onCreateInitialMetadata(_)); @@ -114,6 +122,39 @@ TEST_F(EnvoyGoogleAsyncClientImplTest, RequestHttpStartFail) { EXPECT_TRUE(grpc_request == nullptr); } +class EnvoyGoogleLessMockedAsyncClientImplTest : public EnvoyGoogleAsyncClientImplTest { +public: + void initialize() override { + grpc_client_ = std::make_unique(*dispatcher_, *tls_, real_stub_factory_, + scope_, config_, *api_, stat_names_); + } + + GoogleGenericStubFactory real_stub_factory_; +}; + +TEST_F(EnvoyGoogleLessMockedAsyncClientImplTest, TestOverflow) { + // Set an (unreasonably) low byte limit. + auto* google_grpc = config_.mutable_google_grpc(); + google_grpc->mutable_per_stream_buffer_limit_bytes()->set_value(1); + initialize(); + + NiceMock> grpc_callbacks; + AsyncStream grpc_stream = + grpc_client_->start(*method_descriptor_, grpc_callbacks, Http::AsyncClient::RequestOptions()); + EXPECT_FALSE(grpc_stream == nullptr); + EXPECT_FALSE(grpc_stream->isAboveWriteBufferHighWatermark()); + + // With no data in the message, it won't back up. + helloworld::HelloRequest request_msg; + grpc_stream->sendMessage(request_msg, false); + EXPECT_FALSE(grpc_stream->isAboveWriteBufferHighWatermark()); + + // With actual data we pass the very small byte limit. + request_msg.set_name("bob"); + grpc_stream->sendMessage(request_msg, false); + EXPECT_TRUE(grpc_stream->isAboveWriteBufferHighWatermark()); +} + } // namespace } // namespace Grpc } // namespace Envoy diff --git a/test/common/grpc/google_grpc_utils_test.cc b/test/common/grpc/google_grpc_utils_test.cc index f115d1ab30152..a44580b813e09 100644 --- a/test/common/grpc/google_grpc_utils_test.cc +++ b/test/common/grpc/google_grpc_utils_test.cc @@ -8,6 +8,10 @@ #include "gtest/gtest.h" +using testing::HasSubstr; +using testing::Pair; +using testing::UnorderedElementsAre; + namespace Envoy { namespace Grpc { namespace { @@ -83,6 +87,38 @@ TEST(GoogleGrpcUtilsTest, ByteBufferInstanceRoundTrip) { EXPECT_EQ(buffer_instance2->toString(), "test this"); } +// Validate that we build the grpc::ChannelArguments as expected. +TEST(GoogleGrpcUtilsTest, ChannelArgsFromConfig) { + const auto config = TestUtility::parseYaml(R"EOF( + google_grpc: + channel_args: + args: + grpc.http2.max_pings_without_data: { int_value: 3 } + grpc.default_authority: { string_value: foo } + grpc.http2.max_ping_strikes: { int_value: 5 } + grpc.ssl_target_name_override: { string_value: bar } + )EOF"); + const grpc::ChannelArguments channel_args = GoogleGrpcUtils::channelArgsFromConfig(config); + grpc_channel_args effective_args = channel_args.c_channel_args(); + absl::node_hash_map string_args; + absl::node_hash_map int_args; + for (uint32_t n = 0; n < effective_args.num_args; ++n) { + const grpc_arg arg = effective_args.args[n]; + ASSERT_TRUE(arg.type == GRPC_ARG_STRING || arg.type == GRPC_ARG_INTEGER); + if (arg.type == GRPC_ARG_STRING) { + string_args[arg.key] = arg.value.string; + } else if (arg.type == GRPC_ARG_INTEGER) { + int_args[arg.key] = arg.value.integer; + } + } + EXPECT_THAT(string_args, + UnorderedElementsAre(Pair("grpc.ssl_target_name_override", "bar"), + Pair("grpc.primary_user_agent", HasSubstr("grpc-c++/")), + Pair("grpc.default_authority", "foo"))); + EXPECT_THAT(int_args, UnorderedElementsAre(Pair("grpc.http2.max_ping_strikes", 5), + Pair("grpc.http2.max_pings_without_data", 3))); +} + } // namespace } // namespace Grpc } // namespace Envoy diff --git a/test/common/grpc/grpc_client_integration.h b/test/common/grpc/grpc_client_integration.h index 6f35c4b06e59e..1f2bb941d6221 100644 --- a/test/common/grpc/grpc_client_integration.h +++ b/test/common/grpc/grpc_client_integration.h @@ -55,6 +55,26 @@ class GrpcClientIntegrationParamTest ClientType clientType() const override { return std::get<1>(GetParam()); } }; +class VersionedGrpcClientIntegrationParamTest + : public BaseGrpcClientIntegrationParamTest, + public testing::TestWithParam> { +public: + static std::string protocolTestParamsToString( + const ::testing::TestParamInfo>& p) { + return fmt::format("{}_{}_{}", + std::get<0>(p.param) == Network::Address::IpVersion::v4 ? "IPv4" : "IPv6", + std::get<1>(p.param) == ClientType::GoogleGrpc ? "GoogleGrpc" : "EnvoyGrpc", + std::get<2>(p.param) == envoy::config::core::v3::ApiVersion::V3 + ? "V3" + : envoy::config::core::v3::ApiVersion::V2 ? "V2" : "AUTO"); + } + Network::Address::IpVersion ipVersion() const override { return std::get<0>(GetParam()); } + ClientType clientType() const override { return std::get<1>(GetParam()); } + envoy::config::core::v3::ApiVersion apiVersion() const { return std::get<2>(GetParam()); } +}; + class DeltaSotwIntegrationParamTest : public BaseGrpcClientIntegrationParamTest, public testing::TestWithParam< @@ -90,6 +110,12 @@ class DeltaSotwIntegrationParamTest #define GRPC_CLIENT_INTEGRATION_PARAMS \ testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), \ testing::Values(Grpc::ClientType::EnvoyGrpc, Grpc::ClientType::GoogleGrpc)) +#define VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS \ + testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), \ + testing::Values(Grpc::ClientType::EnvoyGrpc, Grpc::ClientType::GoogleGrpc), \ + testing::Values(envoy::config::core::v3::ApiVersion::V3, \ + envoy::config::core::v3::ApiVersion::V2, \ + envoy::config::core::v3::ApiVersion::AUTO)) #define DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS \ testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), \ testing::Values(Grpc::ClientType::EnvoyGrpc, Grpc::ClientType::GoogleGrpc), \ @@ -98,6 +124,12 @@ class DeltaSotwIntegrationParamTest #define GRPC_CLIENT_INTEGRATION_PARAMS \ testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), \ testing::Values(Grpc::ClientType::EnvoyGrpc)) +#define VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS \ + testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), \ + testing::Values(Grpc::ClientType::EnvoyGrpc), \ + testing::Values(envoy::config::core::v3::ApiVersion::V3, \ + envoy::config::core::v3::ApiVersion::V2, \ + envoy::config::core::v3::ApiVersion::AUTO)) #define DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS \ testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), \ testing::Values(Grpc::ClientType::EnvoyGrpc), \ diff --git a/test/common/grpc/grpc_client_integration_test.cc b/test/common/grpc/grpc_client_integration_test.cc index 15d8567a77a3f..e347226f0354f 100644 --- a/test/common/grpc/grpc_client_integration_test.cc +++ b/test/common/grpc/grpc_client_integration_test.cc @@ -76,7 +76,8 @@ TEST_P(GrpcClientIntegrationTest, HttpNon200Status) { initialize(); for (const auto http_response_status : {400, 401, 403, 404, 429, 431}) { auto stream = createStream(empty_metadata_); - const Http::TestHeaderMapImpl reply_headers{{":status", std::to_string(http_response_status)}}; + const Http::TestResponseHeaderMapImpl reply_headers{ + {":status", std::to_string(http_response_status)}}; stream->expectInitialMetadata(empty_metadata_); stream->expectTrailingMetadata(empty_metadata_); // Technically this should be @@ -93,7 +94,7 @@ TEST_P(GrpcClientIntegrationTest, HttpNon200Status) { TEST_P(GrpcClientIntegrationTest, GrpcStatusFallback) { initialize(); auto stream = createStream(empty_metadata_); - const Http::TestHeaderMapImpl reply_headers{ + const Http::TestResponseHeaderMapImpl reply_headers{ {":status", "404"}, {"grpc-status", std::to_string(enumToInt(Status::WellKnownGrpcStatus::PermissionDenied))}, {"grpc-message", "error message"}}; @@ -133,6 +134,18 @@ TEST_P(GrpcClientIntegrationTest, BadReplyGrpcFraming) { dispatcher_helper_.runDispatcher(); } +// Validate that custom channel args can be set on the Google gRPC client. +// +TEST_P(GrpcClientIntegrationTest, CustomChannelArgs) { + SKIP_IF_GRPC_CLIENT(ClientType::EnvoyGrpc); + channel_args_.emplace_back("grpc.primary_user_agent", "test_agent"); + initialize(); + auto request = createRequest(empty_metadata_); + request->sendReply(); + dispatcher_helper_.runDispatcher(); + EXPECT_THAT(stream_headers_->get_("user-agent"), testing::HasSubstr("test_agent")); +} + // Validate that a reply with bad protobuf is handled as an INTERNAL gRPC error. TEST_P(GrpcClientIntegrationTest, BadReplyProtobuf) { initialize(); @@ -177,7 +190,7 @@ TEST_P(GrpcClientIntegrationTest, OutOfRangeGrpcStatus) { EXPECT_CALL(*stream, onReceiveTrailingMetadata_(_)).WillExitIfNeeded(); dispatcher_helper_.setStreamEventPending(); stream->expectGrpcStatus(Status::WellKnownGrpcStatus::InvalidCode); - const Http::TestHeaderMapImpl reply_trailers{{"grpc-status", std::to_string(0x1337)}}; + const Http::TestResponseTrailerMapImpl reply_trailers{{"grpc-status", std::to_string(0x1337)}}; stream->fake_stream_->encodeTrailers(reply_trailers); dispatcher_helper_.runDispatcher(); } @@ -191,7 +204,7 @@ TEST_P(GrpcClientIntegrationTest, MissingGrpcStatus) { EXPECT_CALL(*stream, onReceiveTrailingMetadata_(_)).WillExitIfNeeded(); dispatcher_helper_.setStreamEventPending(); stream->expectGrpcStatus(Status::WellKnownGrpcStatus::Unknown); - const Http::TestHeaderMapImpl reply_trailers{{"some", "other header"}}; + const Http::TestResponseTrailerMapImpl reply_trailers{{"some", "other header"}}; stream->fake_stream_->encodeTrailers(reply_trailers); dispatcher_helper_.runDispatcher(); } @@ -292,7 +305,7 @@ TEST_P(GrpcClientIntegrationTest, StreamTrailersOnly) { TEST_P(GrpcClientIntegrationTest, RequestTrailersOnly) { initialize(); auto request = createRequest(empty_metadata_); - const Http::TestHeaderMapImpl reply_headers{{":status", "200"}, {"grpc-status", "0"}}; + const Http::TestResponseTrailerMapImpl reply_headers{{":status", "200"}, {"grpc-status", "0"}}; EXPECT_CALL(*request->child_span_, setTag(Eq(Tracing::Tags::get().GrpcStatusCode), Eq("0"))); EXPECT_CALL(*request->child_span_, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True))); @@ -400,14 +413,13 @@ class GrpcAccessTokenClientIntegrationTest : public GrpcSslClientIntegrationTest void expectExtraHeaders(FakeStream& fake_stream) override { AssertionResult result = fake_stream.waitForHeadersComplete(); RELEASE_ASSERT(result, result.message()); - Http::TestHeaderMapImpl stream_headers(fake_stream.headers()); + std::vector auth_headers; + Http::HeaderUtility::getAllOfHeader(fake_stream.headers(), "authorization", auth_headers); if (!access_token_value_.empty()) { - if (access_token_value_2_.empty()) { - EXPECT_EQ("Bearer " + access_token_value_, stream_headers.get_("authorization")); - } else { - EXPECT_EQ("Bearer " + access_token_value_ + ",Bearer " + access_token_value_2_, - stream_headers.get_("authorization")); - } + EXPECT_EQ("Bearer " + access_token_value_, auth_headers[0]); + } + if (!access_token_value_2_.empty()) { + EXPECT_EQ("Bearer " + access_token_value_2_, auth_headers[1]); } } diff --git a/test/common/grpc/grpc_client_integration_test_harness.h b/test/common/grpc/grpc_client_integration_test_harness.h index 903deb0080e0b..c60342eb968ab 100644 --- a/test/common/grpc/grpc_client_integration_test_harness.h +++ b/test/common/grpc/grpc_client_integration_test_harness.h @@ -1,5 +1,7 @@ #pragma once +#include + #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/core/v3/grpc_service.pb.h" #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" @@ -8,6 +10,8 @@ #include "common/api/api_impl.h" #include "common/event/dispatcher_impl.h" #include "common/grpc/async_client_impl.h" +#include "common/grpc/context_impl.h" +#include "common/http/context_impl.h" #ifdef ENVOY_GOOGLE_GRPC #include "common/grpc/google_async_client_impl.h" @@ -28,7 +32,7 @@ #include "test/integration/fake_upstream.h" #include "test/mocks/grpc/mocks.h" #include "test/mocks/local_info/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/transport_socket_factory_context.h" #include "test/mocks/tracing/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/proto/helloworld.pb.h" @@ -112,7 +116,7 @@ class HelloworldStream : public MockAsyncStreamCallbacks void expectInitialMetadata(const TestMetadata& metadata) { EXPECT_CALL(*this, onReceiveInitialMetadata_(_)) .WillOnce(Invoke([this, &metadata](const Http::HeaderMap& received_headers) { - Http::TestHeaderMapImpl stream_headers(received_headers); + Http::TestResponseHeaderMapImpl stream_headers(received_headers); for (const auto& value : metadata) { EXPECT_EQ(value.second, stream_headers.get_(value.first)); } @@ -124,7 +128,7 @@ class HelloworldStream : public MockAsyncStreamCallbacks void expectTrailingMetadata(const TestMetadata& metadata) { EXPECT_CALL(*this, onReceiveTrailingMetadata_(_)) .WillOnce(Invoke([this, &metadata](const Http::HeaderMap& received_headers) { - Http::TestHeaderMapImpl stream_headers(received_headers); + Http::TestResponseTrailerMapImpl stream_headers(received_headers); for (auto& value : metadata) { EXPECT_EQ(value.second, stream_headers.get_(value.first)); } @@ -139,7 +143,7 @@ class HelloworldStream : public MockAsyncStreamCallbacks reply_headers->addReference(value.first, value.second); } expectInitialMetadata(metadata); - fake_stream_->encodeHeaders(Http::TestHeaderMapImpl(*reply_headers), false); + fake_stream_->encodeHeaders(Http::TestResponseHeaderMapImpl(*reply_headers), false); } void sendReply() { @@ -164,7 +168,8 @@ class HelloworldStream : public MockAsyncStreamCallbacks void sendServerTrailers(Status::GrpcStatus grpc_status, const std::string& grpc_message, const TestMetadata& metadata, bool trailers_only = false) { - Http::TestHeaderMapImpl reply_trailers{{"grpc-status", std::to_string(enumToInt(grpc_status))}}; + Http::TestResponseTrailerMapImpl reply_trailers{ + {"grpc-status", std::to_string(enumToInt(grpc_status))}}; if (!grpc_message.empty()) { reply_trailers.addCopy("grpc-message", grpc_message); } @@ -198,6 +203,8 @@ class HelloworldStream : public MockAsyncStreamCallbacks const TestMetadata empty_metadata_; }; +using HelloworldStreamPtr = std::unique_ptr; + // Request related test utilities. class HelloworldRequest : public MockAsyncRequestCallbacks { public: @@ -221,6 +228,8 @@ class HelloworldRequest : public MockAsyncRequestCallbacks; + class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { public: GrpcClientIntegrationTest() @@ -307,6 +316,10 @@ class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { auto* google_grpc = config.mutable_google_grpc(); google_grpc->set_target_uri(fake_upstream_->localAddress()->asString()); google_grpc->set_stat_prefix("fake_cluster"); + for (const auto& config_arg : channel_args_) { + (*google_grpc->mutable_channel_args()->mutable_args())[config_arg.first].set_string_value( + config_arg.second); + } fillServiceWideInitialMetadata(config); return config; } @@ -326,22 +339,22 @@ class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { void expectInitialHeaders(FakeStream& fake_stream, const TestMetadata& initial_metadata) { AssertionResult result = fake_stream.waitForHeadersComplete(); RELEASE_ASSERT(result, result.message()); - Http::TestHeaderMapImpl stream_headers(fake_stream.headers()); - EXPECT_EQ("POST", stream_headers.get_(":method")); - EXPECT_EQ("/helloworld.Greeter/SayHello", stream_headers.get_(":path")); - EXPECT_EQ("application/grpc", stream_headers.get_("content-type")); - EXPECT_EQ("trailers", stream_headers.get_("te")); + stream_headers_ = std::make_unique(fake_stream.headers()); + EXPECT_EQ("POST", stream_headers_->get_(":method")); + EXPECT_EQ("/helloworld.Greeter/SayHello", stream_headers_->get_(":path")); + EXPECT_EQ("application/grpc", stream_headers_->get_("content-type")); + EXPECT_EQ("trailers", stream_headers_->get_("te")); for (const auto& value : initial_metadata) { - EXPECT_EQ(value.second, stream_headers.get_(value.first)); + EXPECT_EQ(value.second, stream_headers_->get_(value.first)); } for (const auto& value : service_wide_initial_metadata_) { - EXPECT_EQ(value.second, stream_headers.get_(value.first)); + EXPECT_EQ(value.second, stream_headers_->get_(value.first)); } } virtual void expectExtraHeaders(FakeStream&) {} - std::unique_ptr createRequest(const TestMetadata& initial_metadata) { + HelloworldRequestPtr createRequest(const TestMetadata& initial_metadata) { auto request = std::make_unique(dispatcher_helper_); EXPECT_CALL(*request, onCreateInitialMetadata(_)) .WillOnce(Invoke([&initial_metadata](Http::HeaderMap& headers) { @@ -387,7 +400,7 @@ class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { return request; } - std::unique_ptr createStream(const TestMetadata& initial_metadata) { + HelloworldStreamPtr createStream(const TestMetadata& initial_metadata) { auto stream = std::make_unique(dispatcher_helper_); EXPECT_CALL(*stream, onCreateInitialMetadata(_)) .WillOnce(Invoke([&initial_metadata](Http::HeaderMap& headers) { @@ -430,8 +443,10 @@ class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { Stats::ScopeSharedPtr stats_scope_{stats_store_}; Grpc::StatNames google_grpc_stat_names_{stats_store_->symbolTable()}; TestMetadata service_wide_initial_metadata_; + std::unique_ptr stream_headers_; + std::vector> channel_args_; #ifdef ENVOY_GOOGLE_GRPC - std::unique_ptr google_tls_; + GoogleAsyncClientThreadLocalPtr google_tls_; #endif AsyncClient grpc_client_; Event::TimerPtr timeout_timer_; @@ -447,7 +462,7 @@ class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { NiceMock local_info_; Runtime::MockLoader runtime_; Extensions::TransportSockets::Tls::ContextManagerImpl context_manager_{test_time_.timeSystem()}; - NiceMock random_; + NiceMock random_; Http::AsyncClientPtr http_async_client_; Http::ConnectionPool::InstancePtr http_conn_pool_; Http::ContextImpl http_context_; @@ -514,7 +529,7 @@ class GrpcSslClientIntegrationTest : public GrpcClientIntegrationTest { Network::TransportSocketFactoryPtr createUpstreamSslContext() { envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; auto* common_tls_context = tls_context.mutable_common_tls_context(); - common_tls_context->add_alpn_protocols("h2"); + common_tls_context->add_alpn_protocols(Http::Utility::AlpnNames::get().Http2); auto* tls_cert = common_tls_context->add_tls_certificates(); tls_cert->mutable_certificate_chain()->set_filename( TestEnvironment::runfilesPath("test/config/integration/certs/upstreamcert.pem")); diff --git a/test/common/html/BUILD b/test/common/html/BUILD index 6dfff216dc3eb..378ddd98eddec 100644 --- a/test/common/html/BUILD +++ b/test/common/html/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/http/BUILD b/test/common/http/BUILD index 91fc3ec75460e..b317052c44146 100644 --- a/test/common/http/BUILD +++ b/test/common/http/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_benchmark_test", @@ -11,6 +9,8 @@ load( "envoy_proto_library", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( @@ -23,6 +23,7 @@ envoy_cc_test( "//source/common/http:context_lib", "//source/common/http:headers_lib", "//source/common/http:utility_lib", + "//source/extensions/upstreams/http/generic:config", "//test/mocks:common_lib", "//test/mocks/buffer:buffer_mocks", "//test/mocks/http:http_mocks", @@ -49,8 +50,6 @@ envoy_cc_test( envoy_cc_test( name = "codec_client_test", srcs = ["codec_client_test.cc"], - # IpVersions/CodecNetworkTest.SendData/IPv4: Test times out on Windows. - tags = ["fails_on_windows"], deps = [ ":common_lib", "//source/common/buffer:buffer_lib", @@ -154,6 +153,7 @@ envoy_proto_library( srcs = ["conn_manager_impl_fuzz.proto"], deps = [ "//test/fuzz:common_proto", + "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg", ], ) @@ -180,6 +180,7 @@ envoy_cc_fuzz_test( "//test/mocks/network:network_mocks", "//test/mocks/router:router_mocks", "//test/mocks/runtime:runtime_mocks", + "//test/mocks/server:server_mocks", "//test/mocks/ssl:ssl_mocks", "//test/mocks/tracing:tracing_mocks", "//test/mocks/upstream:upstream_mocks", @@ -197,11 +198,11 @@ envoy_cc_test( "//include/envoy/event:dispatcher_interface", "//include/envoy/http:request_id_extension_interface", "//include/envoy/tracing:http_tracer_interface", - "//source/common/access_log:access_log_formatter_lib", "//source/common/access_log:access_log_lib", "//source/common/buffer:buffer_lib", "//source/common/common:macros", "//source/common/event:dispatcher_lib", + "//source/common/formatter:substitution_formatter_lib", "//source/common/http:conn_manager_lib", "//source/common/http:context_lib", "//source/common/http:date_provider_lib", @@ -223,11 +224,14 @@ envoy_cc_test( "//test/mocks/network:network_mocks", "//test/mocks/router:router_mocks", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/server:overload_manager_mocks", "//test/mocks/ssl:ssl_mocks", "//test/mocks/tracing:tracing_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:logging_lib", + "//test/test_common:test_runtime_lib", "//test/test_common:test_time_lib", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", "@envoy_api//envoy/type/tracing/v3:pkg_cc_proto", @@ -275,6 +279,7 @@ envoy_cc_test( name = "header_map_impl_test", srcs = ["header_map_impl_test.cc"], deps = [ + "//source/common/http:header_list_view_lib", "//source/common/http:header_map_lib", "//source/common/http:header_utility_lib", "//test/test_common:utility_lib", @@ -338,6 +343,7 @@ envoy_cc_test( envoy_proto_library( name = "utility_fuzz_proto", srcs = ["utility_fuzz.proto"], + deps = ["@envoy_api//envoy/config/core/v3:pkg"], ) envoy_cc_fuzz_test( @@ -358,10 +364,12 @@ envoy_cc_test( deps = [ "//source/common/http:exception_lib", "//source/common/http:header_map_lib", + "//source/common/http:url_utility_lib", "//source/common/http:utility_lib", "//source/common/network:address_lib", "//test/mocks/http:http_mocks", "//test/mocks/upstream:upstream_mocks", + "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], diff --git a/test/common/http/async_client_impl_test.cc b/test/common/http/async_client_impl_test.cc index fc46f8a5ef43a..80cc5af92695d 100644 --- a/test/common/http/async_client_impl_test.cc +++ b/test/common/http/async_client_impl_test.cc @@ -52,7 +52,8 @@ class AsyncClientImplTest : public testing::Test { .WillByDefault(ReturnRef(envoy::config::core::v3::Locality().default_instance())); } - void expectSuccess(AsyncClient::Request* sent_request, uint64_t code) { + virtual void expectSuccess(AsyncClient::Request* sent_request, uint64_t code) { + EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _)).Times(1); EXPECT_CALL(callbacks_, onSuccess_(_, _)) .WillOnce(Invoke([sent_request, code](const AsyncClient::Request& request, ResponseMessage* response) -> void { @@ -67,7 +68,7 @@ class AsyncClientImplTest : public testing::Test { bool end_stream) { EXPECT_CALL(callbacks, onHeaders_(_, end_stream)) .WillOnce(Invoke([code](ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ(std::to_string(code), headers.Status()->value().getStringView()); + EXPECT_EQ(std::to_string(code), headers.getStatusValue()); })); } @@ -81,7 +82,7 @@ class AsyncClientImplTest : public testing::Test { NiceMock* timer_; NiceMock dispatcher_; NiceMock runtime_; - NiceMock random_; + NiceMock random_; NiceMock local_info_; Http::ContextImpl http_context_; AsyncClientImpl client_; @@ -92,6 +93,22 @@ class AsyncClientImplTracingTest : public AsyncClientImplTest { public: Tracing::MockSpan parent_span_; const std::string child_span_name_{"Test Child Span Name"}; + + void expectSuccess(AsyncClient::Request* sent_request, uint64_t code) override { + EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _)) + .WillOnce(Invoke([](Tracing::Span& span, const Http::ResponseHeaderMap* response_headers) { + span.setTag("onBeforeFinalizeUpstreamSpan", "called"); + ASSERT_NE(nullptr, response_headers); + })); + EXPECT_CALL(callbacks_, onSuccess_(_, _)) + .WillOnce(Invoke([sent_request, code](const AsyncClient::Request& request, + ResponseMessage* response) -> void { + // Verify that callback is called with the same request handle as returned by + // AsyncClient::send(). + EXPECT_EQ(sent_request, &request); + EXPECT_EQ(code, Utility::getResponseStatus(response->headers())); + })); + } }; TEST_F(AsyncClientImplTest, BasicStream) { @@ -149,7 +166,7 @@ TEST_F(AsyncClientImplTest, Basic) { return nullptr; })); - TestHeaderMapImpl copy(message_->headers()); + TestRequestHeaderMapImpl copy(message_->headers()); copy.addCopy("x-envoy-internal", "true"); copy.addCopy("x-forwarded-for", "127.0.0.1"); copy.addCopy(":scheme", "http"); @@ -187,7 +204,7 @@ TEST_F(AsyncClientImplTracingTest, Basic) { return nullptr; })); - TestHeaderMapImpl copy(message_->headers()); + TestRequestHeaderMapImpl copy(message_->headers()); copy.addCopy("x-envoy-internal", "true"); copy.addCopy("x-forwarded-for", "127.0.0.1"); copy.addCopy(":scheme", "http"); @@ -204,6 +221,7 @@ TEST_F(AsyncClientImplTracingTest, Basic) { expectSuccess(request, 200); + EXPECT_CALL(*child_span, setTag(Eq("onBeforeFinalizeUpstreamSpan"), Eq("called"))); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy))); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq("HTTP/1.1"))); @@ -231,7 +249,7 @@ TEST_F(AsyncClientImplTracingTest, BasicNamedChildSpan) { return nullptr; })); - TestHeaderMapImpl copy(message_->headers()); + TestRequestHeaderMapImpl copy(message_->headers()); copy.addCopy("x-envoy-internal", "true"); copy.addCopy("x-forwarded-for", "127.0.0.1"); copy.addCopy(":scheme", "http"); @@ -250,6 +268,7 @@ TEST_F(AsyncClientImplTracingTest, BasicNamedChildSpan) { expectSuccess(request, 200); + EXPECT_CALL(*child_span, setTag(Eq("onBeforeFinalizeUpstreamSpan"), Eq("called"))); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy))); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq("HTTP/1.1"))); @@ -277,14 +296,14 @@ TEST_F(AsyncClientImplTest, BasicHashPolicy) { })); EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _)) .WillOnce( - Invoke([&](const std::string&, Upstream::ResourcePriority, Http::Protocol, + Invoke([&](const std::string&, Upstream::ResourcePriority, auto, Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* { // this is the hash of :path header value "/" EXPECT_EQ(16761507700594825962UL, context->computeHashKey().value()); return &cm_.conn_pool_; })); - TestHeaderMapImpl copy(message_->headers()); + TestRequestHeaderMapImpl copy(message_->headers()); copy.addCopy("x-envoy-internal", "true"); copy.addCopy("x-forwarded-for", "127.0.0.1"); copy.addCopy(":scheme", "http"); @@ -520,6 +539,7 @@ TEST_F(AsyncClientImplTest, MultipleRequests) { // Finish request 2. ResponseHeaderMapPtr response_headers2(new TestResponseHeaderMapImpl{{":status", "503"}}); + EXPECT_CALL(callbacks2, onBeforeFinalizeUpstreamSpan(_, _)).Times(1); EXPECT_CALL(callbacks2, onSuccess_(_, _)) .WillOnce(Invoke( [request2](const AsyncClient::Request& request, ResponseMessage* response) -> void { @@ -538,6 +558,7 @@ TEST_F(AsyncClientImplTest, MultipleRequests) { // Finish request 3. ResponseHeaderMapPtr response_headers3(new TestResponseHeaderMapImpl{{":status", "500"}}); + EXPECT_CALL(callbacks3, onBeforeFinalizeUpstreamSpan(_, _)).Times(1); EXPECT_CALL(callbacks3, onSuccess_(_, _)) .WillOnce(Invoke( [request3](const AsyncClient::Request& request, ResponseMessage* response) -> void { @@ -885,6 +906,7 @@ TEST_F(AsyncClientImplTest, ResetAfterResponseStart) { auto* request = client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions()); EXPECT_NE(request, nullptr); + EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _)).Times(1); EXPECT_CALL(callbacks_, onFailure(_, _)) .WillOnce(Invoke([sent_request = request](const AsyncClient::Request& request, AsyncClient::FailureReason reason) { @@ -927,6 +949,7 @@ TEST_F(AsyncClientImplTest, CancelRequest) { EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&message_->headers()), true)); EXPECT_CALL(stream_encoder_.stream_, resetStream(_)); + EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _)).Times(1); AsyncClient::Request* request = client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions()); request->cancel(); @@ -947,8 +970,15 @@ TEST_F(AsyncClientImplTracingTest, CancelRequest) { AsyncClient::RequestOptions options = AsyncClient::RequestOptions().setParentSpan(parent_span_); EXPECT_CALL(*child_span, setSampled(true)); EXPECT_CALL(*child_span, injectContext(_)); + EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _)) + .WillOnce(Invoke([](Tracing::Span& span, const Http::ResponseHeaderMap* response_headers) { + span.setTag("onBeforeFinalizeUpstreamSpan", "called"); + // Since this is a failure, we expect no response headers. + ASSERT_EQ(nullptr, response_headers); + })); AsyncClient::Request* request = client_.send(std::move(message_), callbacks_, options); + EXPECT_CALL(*child_span, setTag(Eq("onBeforeFinalizeUpstreamSpan"), Eq("called"))); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy))); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq("HTTP/1.1"))); @@ -992,6 +1022,7 @@ TEST_F(AsyncClientImplTest, DestroyWithActiveRequest) { EXPECT_NE(request, nullptr); EXPECT_CALL(stream_encoder_.stream_, resetStream(_)); + EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _)).Times(1); EXPECT_CALL(callbacks_, onFailure(_, _)) .WillOnce(Invoke([sent_request = request](const AsyncClient::Request& request, AsyncClient::FailureReason reason) { @@ -1021,6 +1052,7 @@ TEST_F(AsyncClientImplTracingTest, DestroyWithActiveRequest) { auto* request = client_.send(std::move(message_), callbacks_, options); EXPECT_NE(request, nullptr); + EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _)).Times(1); EXPECT_CALL(callbacks_, onFailure(_, _)) .WillOnce(Invoke([sent_request = request](const AsyncClient::Request& request, AsyncClient::FailureReason reason) { @@ -1051,6 +1083,7 @@ TEST_F(AsyncClientImplTest, PoolFailure) { return nullptr; })); + EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _)).Times(1); EXPECT_CALL(callbacks_, onSuccess_(_, _)) .WillOnce(Invoke([](const AsyncClient::Request& request, ResponseMessage* response) -> void { // The callback gets called before AsyncClient::send() completes, which means that we don't @@ -1075,6 +1108,7 @@ TEST_F(AsyncClientImplTest, PoolFailureWithBody) { return nullptr; })); + EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _)).Times(1); EXPECT_CALL(callbacks_, onSuccess_(_, _)) .WillOnce(Invoke([](const AsyncClient::Request& request, ResponseMessage* response) -> void { // The callback gets called before AsyncClient::send() completes, which means that we don't @@ -1103,7 +1137,7 @@ TEST_F(AsyncClientImplTest, StreamTimeout) { EXPECT_CALL(*timer_, enableTimer(std::chrono::milliseconds(40), _)); EXPECT_CALL(stream_encoder_.stream_, resetStream(_)); - TestHeaderMapImpl expected_timeout{ + TestRequestHeaderMapImpl expected_timeout{ {":status", "504"}, {"content-length", "24"}, {"content-type", "text/plain"}}; EXPECT_CALL(stream_callbacks_, onHeaders_(HeaderMapEqualRef(&expected_timeout), false)); EXPECT_CALL(stream_callbacks_, onData(_, true)); @@ -1138,7 +1172,7 @@ TEST_F(AsyncClientImplTest, StreamTimeoutHeadReply) { EXPECT_CALL(*timer_, enableTimer(std::chrono::milliseconds(40), _)); EXPECT_CALL(stream_encoder_.stream_, resetStream(_)); - TestHeaderMapImpl expected_timeout{ + TestRequestHeaderMapImpl expected_timeout{ {":status", "504"}, {"content-length", "24"}, {"content-type", "text/plain"}}; EXPECT_CALL(stream_callbacks_, onHeaders_(HeaderMapEqualRef(&expected_timeout), true)); EXPECT_CALL(stream_callbacks_, onComplete()); @@ -1207,6 +1241,7 @@ TEST_F(AsyncClientImplTracingTest, RequestTimeout) { expectSuccess(request, 504); + EXPECT_CALL(*child_span, setTag(Eq("onBeforeFinalizeUpstreamSpan"), Eq("called"))); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy))); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq("HTTP/1.1"))); @@ -1236,6 +1271,7 @@ TEST_F(AsyncClientImplTest, DisableTimer) { AsyncClient::Request* request = client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions().setTimeout(std::chrono::milliseconds(200))); + EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _)).Times(1); request->cancel(); } @@ -1316,7 +1352,13 @@ TEST_F(AsyncClientImplTest, WatermarkCallbacks) { Http::StreamDecoderFilterCallbacks* filter_callbacks = static_cast(stream); filter_callbacks->onDecoderFilterAboveWriteBufferHighWatermark(); + EXPECT_TRUE(stream->isAboveWriteBufferHighWatermark()); + filter_callbacks->onDecoderFilterAboveWriteBufferHighWatermark(); + EXPECT_TRUE(stream->isAboveWriteBufferHighWatermark()); + filter_callbacks->onDecoderFilterBelowWriteBufferLowWatermark(); + EXPECT_TRUE(stream->isAboveWriteBufferHighWatermark()); filter_callbacks->onDecoderFilterBelowWriteBufferLowWatermark(); + EXPECT_FALSE(stream->isAboveWriteBufferHighWatermark()); EXPECT_CALL(stream_callbacks_, onReset()); } @@ -1394,8 +1436,7 @@ TEST_F(AsyncClientImplUnitTest, RouteImplInitTest) { route_impl_.routeEntry()->typedMetadata().get("bar")); EXPECT_EQ(nullptr, route_impl_.routeEntry()->perFilterConfig("bar")); EXPECT_TRUE(route_impl_.routeEntry()->upgradeMap().empty()); - EXPECT_EQ(Router::InternalRedirectAction::PassThrough, - route_impl_.routeEntry()->internalRedirectAction()); + EXPECT_EQ(false, route_impl_.routeEntry()->internalRedirectPolicy().enabled()); EXPECT_TRUE(route_impl_.routeEntry()->shadowPolicies().empty()); EXPECT_TRUE(route_impl_.routeEntry()->virtualHost().rateLimitPolicy().empty()); EXPECT_EQ(nullptr, route_impl_.routeEntry()->virtualHost().corsPolicy()); diff --git a/test/common/http/codec_client_test.cc b/test/common/http/codec_client_test.cc index 607cdb8cd612d..15979e8350b35 100644 --- a/test/common/http/codec_client_test.cc +++ b/test/common/http/codec_client_test.cc @@ -229,7 +229,7 @@ TEST_F(CodecClientTest, IdleTimerClientLocalCloseWithActiveRequests) { } TEST_F(CodecClientTest, ProtocolError) { - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Throw(CodecProtocolException("protocol error"))); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Return(codecProtocolError("protocol error"))); EXPECT_CALL(*connection_, close(Network::ConnectionCloseType::NoFlush)); Buffer::OwnedImpl data; @@ -239,10 +239,8 @@ TEST_F(CodecClientTest, ProtocolError) { } TEST_F(CodecClientTest, 408Response) { - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([](Buffer::Instance&) -> void { - throw PrematureResponseException(Code::RequestTimeout); - })); - + EXPECT_CALL(*codec_, dispatch(_)) + .WillOnce(Return(prematureResponseError("", Code::RequestTimeout))); EXPECT_CALL(*connection_, close(Network::ConnectionCloseType::NoFlush)); Buffer::OwnedImpl data; @@ -252,10 +250,7 @@ TEST_F(CodecClientTest, 408Response) { } TEST_F(CodecClientTest, PrematureResponse) { - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([](Buffer::Instance&) -> void { - throw PrematureResponseException(Code::OK); - })); - + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Return(prematureResponseError("", Code::OK))); EXPECT_CALL(*connection_, close(Network::ConnectionCloseType::NoFlush)); Buffer::OwnedImpl data; @@ -288,7 +283,7 @@ class CodecNetworkTest : public testing::TestWithParamtimeSource()) { dispatcher_ = api_->allocateDispatcher("test_thread"); auto socket = std::make_shared( - Network::Test::getAnyAddress(GetParam()), nullptr, true); + Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true); Network::ClientConnectionPtr client_connection = dispatcher_->createClientConnection( socket->localAddress(), source_address_, Network::Test::createRawBufferSocket(), nullptr); upstream_listener_ = dispatcher_->createListener(std::move(socket), listener_callbacks_, true); @@ -375,9 +370,10 @@ TEST_P(CodecNetworkTest, SendData) { const std::string full_data = "HTTP/1.1 200 OK\r\ncontent-length: 0\r\n"; Buffer::OwnedImpl data(full_data); upstream_connection_->write(data, false); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { EXPECT_EQ(full_data, data.toString()); dispatcher_->exit(); + return Http::okStatus(); })); dispatcher_->run(Event::Dispatcher::RunType::Block); @@ -397,9 +393,14 @@ TEST_P(CodecNetworkTest, SendHeadersAndClose) { upstream_connection_->close(Network::ConnectionCloseType::FlushWrite); EXPECT_CALL(*codec_, dispatch(_)) .Times(2) - .WillOnce( - Invoke([&](Buffer::Instance& data) -> void { EXPECT_EQ(full_data, data.toString()); })) - .WillOnce(Invoke([&](Buffer::Instance& data) -> void { EXPECT_EQ("", data.toString()); })); + .WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { + EXPECT_EQ(full_data, data.toString()); + return Http::okStatus(); + })) + .WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { + EXPECT_EQ("", data.toString()); + return Http::okStatus(); + })); // Because the headers are not complete, the disconnect will reset the stream. // Note even if the final \r\n were appended to the header data, enough of the // codec state is mocked out that the data would not be framed and the stream @@ -430,9 +431,14 @@ TEST_P(CodecNetworkTest, SendHeadersAndCloseUnderReadDisable) { EXPECT_CALL(*codec_, dispatch(_)) .Times(2) - .WillOnce( - Invoke([&](Buffer::Instance& data) -> void { EXPECT_EQ(full_data, data.toString()); })) - .WillOnce(Invoke([&](Buffer::Instance& data) -> void { EXPECT_EQ("", data.toString()); })); + .WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { + EXPECT_EQ(full_data, data.toString()); + return Http::okStatus(); + })) + .WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { + EXPECT_EQ("", data.toString()); + return Http::okStatus(); + })); EXPECT_CALL(inner_encoder_.stream_, resetStream(_)).WillOnce(InvokeWithoutArgs([&]() -> void { for (auto callbacks : inner_encoder_.stream_.callbacks_) { callbacks->onResetStream(StreamResetReason::RemoteReset, absl::string_view()); diff --git a/test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5698895985508352 b/test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5698895985508352 new file mode 100644 index 0000000000000..19f5125b23def --- /dev/null +++ b/test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5698895985508352 @@ -0,0 +1,49 @@ +actions { + new_stream { + request_headers { + headers { + key: ":method" + value: "5" + } + headers { + key: ":path" + value: "/" + } + headers { + key: ":scheme" + value: "r" + } + headers { + key: ":authority" + value: "5" + } + } + } +} +actions { + client_drain { + } +} +actions { + stream_action { + response { + headers { + } + } + } +} +actions { + stream_action { + request { + data: 1 + } + } +} +actions { + stream_action { + request { + trailers { + } + } + } +} diff --git a/test/common/http/codec_impl_corpus/example b/test/common/http/codec_impl_corpus/example index 897c48be43d66..4efce45126444 100644 --- a/test/common/http/codec_impl_corpus/example +++ b/test/common/http/codec_impl_corpus/example @@ -89,6 +89,32 @@ actions { } } } +actions { + stream_action { + stream_id: 1 + response { + metadata { + metadata { + key: "a" + value: "a" + } + } + } + } +} +actions { + stream_action { + stream_id: 1 + response { + metadata { + metadata { + key: "a" + value: "a" + } + } + } + } +} actions { stream_action { stream_id: 1 diff --git a/test/common/http/codec_impl_corpus/metadata b/test/common/http/codec_impl_corpus/metadata new file mode 100644 index 0000000000000..cc07a24428f30 --- /dev/null +++ b/test/common/http/codec_impl_corpus/metadata @@ -0,0 +1,114 @@ +actions { + new_stream { + request_headers { + headers { + key: ":method" + value: "GET" + } + headers { + key: ":path" + value: "/" + } + headers { + key: ":scheme" + value: "http" + } + headers { + key: ":authority" + value: "foo.com" + } + headers { + key: "blah" + value: "nosniff" + } + headers { + key: "cookie" + value: "foo=bar" + } + headers { + key: "cookie" + value: "foo2=bar2" + } + } + } +} +actions { quiesce_drain {} } + +actions { + stream_action { + stream_id: 1 + request { + data: 128000 + } + } +} +actions { quiesce_drain {} } +actions { + stream_action { + stream_id: 1 + response { + metadata { + metadata { + key: "aaaaaaaaaaaaaaaaaaaaaaaa" + value: "bbbbbbbbbbbbbbbbbbbbbbb" + } + metadata { + key: "aaaaaaaaaaaaaaaaaaaaaaaa" + value: "bbbbbbbbbbbbbbbbbbbbbbb" + } + metadata { + key: "aaaaaaaaaaaaaaaaaaaaaaaa" + value: "bbbbbbbbbbbbbbbbbbbbbbb" + } + } + } + } +} +actions { + stream_action { + stream_id: 2 + response { + metadata { + metadata { + key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + value: "bbbbbbbbbbbbbbbbbbbbbbb" + } + } + } + } +} +actions { + new_stream { + request_headers { + headers { + key: ":method" + value: "GET" + } + headers { + key: ":path" + value: "/" + } + headers { + key: ":scheme" + value: "http" + } + headers { + key: ":authority" + value: "foo.com" + } + headers { + key: "blah" + value: "nosniff" + } + headers { + key: "cookie" + value: "foo=bar" + } + headers { + key: "cookie" + value: "foo2=bar2" + } + } + } +} +actions { quiesce_drain {} } diff --git a/test/common/http/codec_impl_corpus/metadata_corrupt b/test/common/http/codec_impl_corpus/metadata_corrupt new file mode 100644 index 0000000000000..04176a675294b --- /dev/null +++ b/test/common/http/codec_impl_corpus/metadata_corrupt @@ -0,0 +1,52 @@ +actions { + new_stream { + request_headers { + headers { + key: ":method" + value: "GET" + } + headers { + key: ":path" + value: "/" + } + headers { + key: ":scheme" + value: "http" + } + headers { + key: ":authority" + value: "foo.com" + } + } + } +} +actions { quiesce_drain {} } +actions { + stream_action { + stream_id: 1 + request { + metadata { + metadata { + key: "header_key1" + value: "header_value1" + } + metadata { + key: "header_key2" + value: "header_value2" + } + metadata { + key: "header_key3" + value: "header_value3" + } + } + } + } +} +actions { + mutate { + buffer: 0 + offset: 8 + value: 0 + } +} +actions { quiesce_drain {} } \ No newline at end of file diff --git a/test/common/http/codec_impl_corpus/metadata_dispatch b/test/common/http/codec_impl_corpus/metadata_dispatch new file mode 100644 index 0000000000000..cbacc6a4126a7 --- /dev/null +++ b/test/common/http/codec_impl_corpus/metadata_dispatch @@ -0,0 +1,61 @@ +actions { + new_stream { + request_headers { + headers { + key: ":method" + value: "GET" + } + headers { + key: ":path" + value: "/" + } + headers { + key: ":scheme" + value: "http" + } + headers { + key: ":authority" + value: "foo.com" + } + headers { + key: "blah" + value: "nosniff" + } + headers { + key: "cookie" + value: "foo=bar" + } + headers { + key: "cookie" + value: "foo2=bar2" + } + } + } +} +actions { quiesce_drain {} } +actions { + stream_action { + stream_id: 1 + request { + data: 128000 + } + dispatching_action { + metadata { + metadata { + key: "aaaaaaaaaaaaaaaaaaaaaaaa" + value: "bbbbbbbbbbbbbbbbbbbbbbb" + } + metadata { + key: "aaaaaaaaaaaaaaaaaaaaaaaa" + value: "bbbbbbbbbbbbbbbbbbbbbbb" + } + metadata { + key: "aaaaaaaaaaaaaaaaaaaaaaaa" + value: "bbbbbbbbbbbbbbbbbbbbbbb" + } + } + } + + } +} +actions { quiesce_drain {} } \ No newline at end of file diff --git a/test/common/http/codec_impl_corpus/method_connect b/test/common/http/codec_impl_corpus/method_connect new file mode 100644 index 0000000000000..d3682266d5e72 --- /dev/null +++ b/test/common/http/codec_impl_corpus/method_connect @@ -0,0 +1,10 @@ +actions { + new_stream { + request_headers { + headers { + key: ":method" + value: "CONNECT" + } + } + } +} \ No newline at end of file diff --git a/test/common/http/codec_impl_corpus/read_disable b/test/common/http/codec_impl_corpus/read_disable new file mode 100644 index 0000000000000..d0525e6285ad3 --- /dev/null +++ b/test/common/http/codec_impl_corpus/read_disable @@ -0,0 +1,25 @@ +actions { + new_stream { + request_headers { + headers { + key: ":method" + value: "GET" + } + headers { + key: ":path" + value: "/" + } + } + } +} +actions { + client_drain { + } +} +actions { + stream_action { + response { + read_disable: true + } + } +} diff --git a/test/common/http/codec_impl_corpus/response_204_A b/test/common/http/codec_impl_corpus/response_204_A new file mode 100644 index 0000000000000..61d1fc05b28a6 --- /dev/null +++ b/test/common/http/codec_impl_corpus/response_204_A @@ -0,0 +1,41 @@ +actions { + new_stream { + request_headers { + headers { + key: ":path" + value: "/" + } + actions { +} +actions { + mutate { + offset: 255 + value: 255 + } +} +actions { +} +actions { +} headers { + key: ":method" + value: "GET" + } + } + } +} +actions { + client_drain { + } +} +actions { + stream_action { + response { + headers { + headers { + key: ":status" + value: "204" + } + } + } + } +} diff --git a/test/common/http/codec_impl_corpus/response_204_B b/test/common/http/codec_impl_corpus/response_204_B new file mode 100644 index 0000000000000..985ed79951a32 --- /dev/null +++ b/test/common/http/codec_impl_corpus/response_204_B @@ -0,0 +1,37 @@ +actions { + new_stream { + request_headers { + headers { + key: ":path" + value: "/" + } + headers { + key: ":method" + value: "GET" + } + } + } +} +actions { + client_drain { + } +} +actions { + stream_action { + response { + headers { + headers { + key: ":status" + value: "204" + } + } + } + } +} +actions { + stream_action { + response { + data: 64 + } + } +} diff --git a/test/common/http/codec_impl_corpus/simple_stream b/test/common/http/codec_impl_corpus/simple_stream index dc23bacfcd36e..294b8df5a3492 100644 --- a/test/common/http/codec_impl_corpus/simple_stream +++ b/test/common/http/codec_impl_corpus/simple_stream @@ -1,5 +1,15 @@ actions { new_stream { + metadata { + metadata { + key: "" + value: "" + } + metadata { + key: "new_key" + value: "new_value" + } + } request_headers { headers { key: ":method" diff --git a/test/common/http/codec_impl_fuzz.proto b/test/common/http/codec_impl_fuzz.proto index f5d39f9ded2f0..99f9f9592b054 100644 --- a/test/common/http/codec_impl_fuzz.proto +++ b/test/common/http/codec_impl_fuzz.proto @@ -10,6 +10,9 @@ import "test/fuzz/common.proto"; // Structured input for H2 codec_impl_fuzz_test. message NewStream { + // Optional metadata before request headers. + // Metadata sent after request headers can be send via a directional action. + test.fuzz.Metadata metadata = 3; test.fuzz.Headers request_headers = 1 [(validate.rules).message.required = true]; bool end_stream = 2; } @@ -22,6 +25,7 @@ message DirectionalAction { uint32 data = 3; string data_value = 8; test.fuzz.Headers trailers = 4; + test.fuzz.Metadata metadata = 9; uint32 reset_stream = 5; bool read_disable = 6; } @@ -36,6 +40,12 @@ message StreamAction { DirectionalAction request = 2; DirectionalAction response = 3; } + // Optionally set a dispatching action. This is a directional action that will + // be called while the stream action is sending headers, data, or trailers. + // This will only apply to request stream actions (so that the dispatching + // action occurs in the response direction). This may happen as a result of a + // filter sending a direct response. + DirectionalAction dispatching_action = 4; } message MutateAction { diff --git a/test/common/http/codec_impl_fuzz_test.cc b/test/common/http/codec_impl_fuzz_test.cc index 33856e11a6ac8..50b4cac3aacf2 100644 --- a/test/common/http/codec_impl_fuzz_test.cc +++ b/test/common/http/codec_impl_fuzz_test.cc @@ -42,6 +42,17 @@ template T fromSanitizedHeaders(const test::fuzz::Headers& headers) { return Fuzz::fromHeaders(headers, {"transfer-encoding"}); } +// Template specialization for TestRequestHeaderMapImpl to include a Host header. This guards +// against missing host headers in CONNECT requests that would have failed parsing on ingress. +// TODO(#10878): When proper error handling is introduced for non-dispatching codec calls, remove +// this and fail gracefully. +template <> +TestRequestHeaderMapImpl +fromSanitizedHeaders(const test::fuzz::Headers& headers) { + return Fuzz::fromHeaders(headers, {"transfer-encoding"}, + {":authority", ":method", ":path"}); +} + // Convert from test proto Http1ServerSettings to Http1Settings. Http1Settings fromHttp1Settings(const test::common::http::Http1ServerSettings& settings) { Http1Settings h1_settings; @@ -77,6 +88,7 @@ fromHttp2Settings(const test::common::http::Http2Settings& settings) { settings.initial_connection_window_size() % (1 + Http2Utility::OptionsLimits::MAX_INITIAL_CONNECTION_WINDOW_SIZE - Http2Utility::OptionsLimits::MIN_INITIAL_CONNECTION_WINDOW_SIZE)); + options.set_allow_metadata(true); return options; } @@ -169,6 +181,7 @@ class HttpStream : public LinkedObject { if (!end_stream) { request_.request_encoder_->getStream().addCallbacks(request_.stream_callbacks_); } + request_.request_encoder_->encodeHeaders(request_headers, end_stream); request_.stream_state_ = end_stream ? StreamState::Closed : StreamState::PendingDataOrTrailers; response_.stream_state_ = StreamState::PendingHeaders; @@ -260,6 +273,17 @@ class HttpStream : public LinkedObject { } break; } + case test::common::http::DirectionalAction::kMetadata: { + if (state.isLocalOpen() && state.stream_state_ != StreamState::Closed) { + if (response) { + state.response_encoder_->encodeMetadata( + Fuzz::fromMetadata(directional_action.metadata())); + } else { + state.request_encoder_->encodeMetadata(Fuzz::fromMetadata(directional_action.metadata())); + } + } + break; + } case test::common::http::DirectionalAction::kResetStream: { if (state.stream_state_ != StreamState::Closed) { StreamEncoder* encoder; @@ -307,6 +331,29 @@ class HttpStream : public LinkedObject { ENVOY_LOG_MISC(debug, "Request stream action on {} in state {} {}", stream_index_, static_cast(request_.stream_state_), static_cast(response_.stream_state_)); + if (stream_action.has_dispatching_action()) { + // Simulate some response action while dispatching request headers, data, or trailers. This + // may happen as a result of a filter sending a direct response. + ENVOY_LOG_MISC(debug, "Setting dispatching action on {} in state {} {}", stream_index_, + static_cast(request_.stream_state_), + static_cast(response_.stream_state_)); + auto request_action = stream_action.request().directional_action_selector_case(); + if (request_action == test::common::http::DirectionalAction::kHeaders) { + EXPECT_CALL(request_.request_decoder_, decodeHeaders_(_, _)) + .WillOnce(InvokeWithoutArgs( + [&] { directionalAction(response_, stream_action.dispatching_action()); })); + } else if (request_action == test::common::http::DirectionalAction::kData) { + EXPECT_CALL(request_.request_decoder_, decodeData(_, _)) + .Times(testing::AtLeast(1)) + .WillRepeatedly(InvokeWithoutArgs( + [&] { directionalAction(response_, stream_action.dispatching_action()); })); + } else if (request_action == test::common::http::DirectionalAction::kTrailers) { + EXPECT_CALL(request_.request_decoder_, decodeTrailers_(_)) + .WillOnce(InvokeWithoutArgs( + [&] { directionalAction(response_, stream_action.dispatching_action()); })); + } + } + // Perform the stream action. directionalAction(request_, stream_action.request()); break; } @@ -345,14 +392,20 @@ class ReorderBuffer { bufs_.back().move(data); } - void drain() { + Http::Status drain() { + Status status = Http::okStatus(); while (!bufs_.empty()) { Buffer::OwnedImpl& buf = bufs_.front(); while (buf.length() > 0) { - connection_.dispatch(buf); + status = connection_.dispatch(buf); + if (!status.ok()) { + ENVOY_LOG_MISC(trace, "Error status: {}", status.message()); + return status; + } } bufs_.pop_front(); } + return status; } void mutate(uint32_t buffer, uint32_t offset, uint8_t value) { @@ -401,39 +454,44 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi fromHttp2Settings(input.h2_settings().client())}; const Http1Settings client_http1settings; NiceMock client_callbacks; + NiceMock server_connection; + NiceMock server_callbacks; uint32_t max_request_headers_kb = Http::DEFAULT_MAX_REQUEST_HEADERS_KB; uint32_t max_request_headers_count = Http::DEFAULT_MAX_HEADERS_COUNT; uint32_t max_response_headers_count = Http::DEFAULT_MAX_HEADERS_COUNT; const envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action = envoy::config::core::v3::HttpProtocolOptions::ALLOW; + + Http1::CodecStats::AtomicPtr http1_stats; + Http2::CodecStats::AtomicPtr http2_stats; ClientConnectionPtr client; ServerConnectionPtr server; const bool http2 = http_version == HttpVersion::Http2; if (http2) { - client = std::make_unique( - client_connection, client_callbacks, stats_store, client_http2_options, - max_request_headers_kb, max_response_headers_count, + client = std::make_unique( + client_connection, client_callbacks, Http2::CodecStats::atomicGet(http2_stats, stats_store), + client_http2_options, max_request_headers_kb, max_response_headers_count, Http2::ProdNghttp2SessionFactory::get()); } else { - client = std::make_unique(client_connection, stats_store, - client_callbacks, client_http1settings, - max_response_headers_count); + client = std::make_unique( + client_connection, Http1::CodecStats::atomicGet(http1_stats, stats_store), client_callbacks, + client_http1settings, max_response_headers_count); } - NiceMock server_connection; - NiceMock server_callbacks; if (http2) { const envoy::config::core::v3::Http2ProtocolOptions server_http2_options{ fromHttp2Settings(input.h2_settings().server())}; - server = std::make_unique( - server_connection, server_callbacks, stats_store, server_http2_options, - max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); + server = std::make_unique( + server_connection, server_callbacks, Http2::CodecStats::atomicGet(http2_stats, stats_store), + server_http2_options, max_request_headers_kb, max_request_headers_count, + headers_with_underscores_action); } else { const Http1Settings server_http1settings{fromHttp1Settings(input.h1_settings().server())}; server = std::make_unique( - server_connection, stats_store, server_callbacks, server_http1settings, - max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); + server_connection, Http1::CodecStats::atomicGet(http1_stats, stats_store), server_callbacks, + server_http1settings, max_request_headers_kb, max_request_headers_count, + headers_with_underscores_action); } ReorderBuffer client_write_buf{*server}; @@ -465,18 +523,26 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi } auto stream_ptr = pending_streams.front()->removeFromList(pending_streams); HttpStream* const stream = stream_ptr.get(); - stream_ptr->moveIntoListBack(std::move(stream_ptr), streams); + LinkedList::moveIntoListBack(std::move(stream_ptr), streams); stream->response_.response_encoder_ = &encoder; encoder.getStream().addCallbacks(stream->response_.stream_callbacks_); stream->stream_index_ = streams.size() - 1; return stream->request_.request_decoder_; })); - const auto client_server_buf_drain = [&client_write_buf, &server_write_buf] { + auto client_server_buf_drain = [&client_write_buf, &server_write_buf] { + Http::Status status = Http::okStatus(); while (!client_write_buf.empty() || !server_write_buf.empty()) { - client_write_buf.drain(); - server_write_buf.drain(); + status = client_write_buf.drain(); + if (!status.ok()) { + return status; + } + status = server_write_buf.drain(); + if (!status.ok()) { + return status; + } } + return status; }; // We track whether the connection should be closed for HTTP/1, since stream resets imply @@ -484,92 +550,103 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi bool should_close_connection = false; constexpr auto max_actions = 1024; - try { - for (int i = 0; i < std::min(max_actions, input.actions().size()) && !should_close_connection; - ++i) { - const auto& action = input.actions(i); - ENVOY_LOG_MISC(trace, "action {} with {} streams", action.DebugString(), streams.size()); - switch (action.action_selector_case()) { - case test::common::http::Action::kNewStream: { - if (!http2) { - // HTTP/1 codec needs to have existing streams complete, so make it - // easier to achieve a successful multi-stream example by flushing. - client_server_buf_drain(); - // HTTP/1 client codec can only have a single active stream. - if (!pending_streams.empty() || (!streams.empty() && streams.back()->active())) { - ENVOY_LOG_MISC(trace, "Skipping new stream as HTTP/1 and already have existing stream"); - continue; - } - } - HttpStreamPtr stream = std::make_unique( - *client, - fromSanitizedHeaders(action.new_stream().request_headers()), - action.new_stream().end_stream(), [&should_close_connection, http2]() { - // HTTP/1 codec has stream reset implying connection close. - if (!http2) { - should_close_connection = true; - } - }); - stream->moveIntoListBack(std::move(stream), pending_streams); - break; - } - case test::common::http::Action::kStreamAction: { - const auto& stream_action = action.stream_action(); - if (streams.empty()) { + bool codec_error = false; + for (int i = 0; i < std::min(max_actions, input.actions().size()) && !should_close_connection && + !codec_error; + ++i) { + const auto& action = input.actions(i); + ENVOY_LOG_MISC(trace, "action {} with {} streams", action.DebugString(), streams.size()); + switch (action.action_selector_case()) { + case test::common::http::Action::kNewStream: { + if (!http2) { + // HTTP/1 codec needs to have existing streams complete, so make it + // easier to achieve a successful multi-stream example by flushing. + if (!client_server_buf_drain().ok()) { + codec_error = true; break; } - // Index into list of created streams (not HTTP/2 level stream ID). - const uint32_t stream_id = stream_action.stream_id() % streams.size(); - ENVOY_LOG_MISC(trace, "action for stream index {}", stream_id); - (*std::next(streams.begin(), stream_id))->streamAction(stream_action); - break; - } - case test::common::http::Action::kMutate: { - const auto& mutate = action.mutate(); - ReorderBuffer& write_buf = mutate.server() ? server_write_buf : client_write_buf; - write_buf.mutate(mutate.buffer(), mutate.offset(), mutate.value()); - break; + // HTTP/1 client codec can only have a single active stream. + if (!pending_streams.empty() || (!streams.empty() && streams.back()->active())) { + ENVOY_LOG_MISC(trace, "Skipping new stream as HTTP/1 and already have existing stream"); + continue; + } } - case test::common::http::Action::kSwapBuffer: { - const auto& swap_buffer = action.swap_buffer(); - ReorderBuffer& write_buf = swap_buffer.server() ? server_write_buf : client_write_buf; - write_buf.swap(swap_buffer.buffer()); + HttpStreamPtr stream = std::make_unique( + *client, + fromSanitizedHeaders(action.new_stream().request_headers()), + action.new_stream().end_stream(), [&should_close_connection, http2]() { + // HTTP/1 codec has stream reset implying connection close. + if (!http2) { + should_close_connection = true; + } + }); + LinkedList::moveIntoListBack(std::move(stream), pending_streams); + break; + } + case test::common::http::Action::kStreamAction: { + const auto& stream_action = action.stream_action(); + if (streams.empty()) { break; } - case test::common::http::Action::kClientDrain: { - client_write_buf.drain(); + // Index into list of created streams (not HTTP/2 level stream ID). + const uint32_t stream_id = stream_action.stream_id() % streams.size(); + ENVOY_LOG_MISC(trace, "action for stream index {}", stream_id); + (*std::next(streams.begin(), stream_id))->streamAction(stream_action); + break; + } + case test::common::http::Action::kMutate: { + const auto& mutate = action.mutate(); + ReorderBuffer& write_buf = mutate.server() ? server_write_buf : client_write_buf; + write_buf.mutate(mutate.buffer(), mutate.offset(), mutate.value()); + break; + } + case test::common::http::Action::kSwapBuffer: { + const auto& swap_buffer = action.swap_buffer(); + ReorderBuffer& write_buf = swap_buffer.server() ? server_write_buf : client_write_buf; + write_buf.swap(swap_buffer.buffer()); + break; + } + case test::common::http::Action::kClientDrain: { + if (!client_write_buf.drain().ok()) { + codec_error = true; break; } - case test::common::http::Action::kServerDrain: { - server_write_buf.drain(); + break; + } + case test::common::http::Action::kServerDrain: { + if (!server_write_buf.drain().ok()) { + codec_error = true; break; } - case test::common::http::Action::kQuiesceDrain: { - client_server_buf_drain(); + break; + } + case test::common::http::Action::kQuiesceDrain: { + if (!client_server_buf_drain().ok()) { + codec_error = true; break; } - default: - // Maybe nothing is set? + break; + } + default: + // Maybe nothing is set? + break; + } + if (DebugMode && !should_close_connection && !codec_error) { + if (!client_server_buf_drain().ok()) { + codec_error = true; break; } - if (DebugMode && !should_close_connection) { - client_server_buf_drain(); - } } - // Drain all remaining buffers, unless the connection is effectively closed. - if (!should_close_connection) { - client_server_buf_drain(); - } - if (http2) { - dynamic_cast(*client).goAway(); - dynamic_cast(*server).goAway(); + } + // Drain all remaining buffers, unless the connection is effectively closed. + if (!should_close_connection && !codec_error) { + if (!client_server_buf_drain().ok()) { + codec_error = true; } - } catch (CodecProtocolException& e) { - ENVOY_LOG_MISC(debug, "CodecProtocolException {}", e.what()); - } catch (CodecClientException& e) { - ENVOY_LOG_MISC(debug, "CodecClientException {}", e.what()); - } catch (PrematureResponseException& e) { - ENVOY_LOG_MISC(debug, "PrematureResponseException {}", e.what()); + } + if (!codec_error && http2) { + dynamic_cast(*client).goAway(); + dynamic_cast(*server).goAway(); } } diff --git a/test/common/http/codes_test.cc b/test/common/http/codes_test.cc index 136519f60c5a4..9a071f8c122f9 100644 --- a/test/common/http/codes_test.cc +++ b/test/common/http/codes_test.cc @@ -53,6 +53,7 @@ class CodeUtilityTest : public testing::Test { }; TEST_F(CodeUtilityTest, GroupStrings) { + EXPECT_EQ("1xx", CodeUtility::groupStringForResponseCode(Code::SwitchingProtocols)); EXPECT_EQ("2xx", CodeUtility::groupStringForResponseCode(Code::OK)); EXPECT_EQ("3xx", CodeUtility::groupStringForResponseCode(Code::Found)); EXPECT_EQ("4xx", CodeUtility::groupStringForResponseCode(Code::NotFound)); diff --git a/test/common/http/common.h b/test/common/http/common.h index 7eacfa3ad03bb..2cd5a9db335b6 100644 --- a/test/common/http/common.h +++ b/test/common/http/common.h @@ -28,7 +28,7 @@ class CodecClientForTest : public Http::CodecClient { destroy_cb_(this); } } - void raiseGoAway() { onGoAway(); } + void raiseGoAway(Http::GoAwayErrorCode error_code) { onGoAway(error_code); } Event::Timer* idleTimer() { return idle_timer_.get(); } DestroyCb destroy_cb_; diff --git a/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-continueandendstream-endstream b/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-continueandendstream-endstream new file mode 100644 index 0000000000000..2b64d93f4355b --- /dev/null +++ b/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-continueandendstream-endstream @@ -0,0 +1,16 @@ +actions { + new_stream { + request_headers { + headers { + key: ":path" + value: "/" + } + headers { + key: ":authority" + value: "foo.com" + } + } + end_stream: true + status: HEADER_CONTINUE_AND_END_STREAM + } +} diff --git a/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-failed-dispatch b/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-failed-dispatch new file mode 100644 index 0000000000000..485481def130b --- /dev/null +++ b/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-failed-dispatch @@ -0,0 +1,294 @@ +actions { +} +actions { + new_stream { + request_headers { + headers { + key: ":scheme" + value: "t" + } + headers { + key: ":method" + value: "GET" + } + headers { + key: "blah" + value: "nosniff" + } + headers { + key: "blah" + value: "nosniff" + } + headers { + key: "\'" + } + headers { + key: ":path" + value: "/" + } + headers { + key: ":authority" + value: "foo.com" + } + } + } +} +actions { + new_stream { + } +} +actions { + stream_action { + stream_id: 1634017305 + request { + trailers { + headers { + headers { + key: "&" + } + } + } + } + } +} +actions { + new_stream { + } +} +actions { + stream_action { + stream_id: 1073741824 + } +} +actions { + new_stream { + } +} +actions { + new_stream { + end_stream: true + } +} +actions { + new_stream { + } +} +actions { + new_stream { + } +} +actions { + new_stream { + } +} +actions { + stream_action { + stream_id: 1073741824 + } +} +actions { + new_stream { + } +} +actions { +} +actions { + new_stream { + end_stream: true + } +} +actions { + new_stream { + request_headers { + headers { + key: ":scheme" + value: "\'" + } + headers { + key: ":method" + value: "GOT" + } + headers { + key: ":path" + value: "/" + } + headers { + key: ":authority" + value: "foo.com" + } + } + end_stream: true + } +} +actions { +} +actions { +} +actions { + new_stream { + request_headers { + headers { + key: ":scheme" + value: "t" + } + headers { + key: ":method" + value: "GET" + } + headers { + key: "blah" + value: "nosniff" + } + headers { + key: "blah" + value: "nosniff" + } + headers { + key: "\'" + } + headers { + key: ":path" + value: "/" + } + headers { + key: "/" + value: "foo.com" + } + } + } +} +actions { + new_stream { + end_stream: true + } +} +actions { + new_stream { + } +} +actions { + stream_action { + request { + throw_decoder_exception { + } + } + } +} +actions { + stream_action { + request { + data { + status: DATA_STOP_ITERATION_NO_BUFFER + } + } + } +} +actions { + new_stream { + request_headers { + headers { + key: "\'" + } + } + } +} +actions { + stream_action { + stream_id: 1073741824 + } +} +actions { + new_stream { + } +} +actions { +} +actions { + new_stream { + end_stream: true + } +} +actions { + new_stream { + } +} +actions { + new_stream { + request_headers { + headers { + key: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + } + } + } +} +actions { + stream_action { + request { + data { + decoder_filter_callback_action { + add_decoded_data { + size: 262144 + } + } + } + } + } +} +actions { + new_stream { + } +} +actions { +} +actions { + new_stream { + request_headers { + headers { + key: ":scheme" + value: "t" + } + headers { + key: ":method" + value: "GET" + } + headers { + key: "blah" + value: "nosniff" + } + headers { + key: "blah" + value: "nosniff" + } + headers { + key: "\'" + } + headers { + key: ":path" + value: "/" + } + headers { + key: ":authority" + value: "foo.com" + } + } + } +} +actions { + new_stream { + request_headers { + headers { + key: "&" + } + } + } +} +actions { + stream_action { + stream_id: 4294967295 + } +} +actions { + stream_action { + stream_id: 4 + } +} \ No newline at end of file diff --git a/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-invalidhost b/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-invalidhost new file mode 100644 index 0000000000000..c6ab3140f0f2d --- /dev/null +++ b/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-invalidhost @@ -0,0 +1,108 @@ +actions { + stream_action { + response { + data: 2683 + } + } +} +actions { + new_stream { + request_headers { + headers { + key: ":scheme" + value: "t" + } + headers { + key: ":method" + value: "GET" + } + headers { + key: "blah" + value: "nosniff" + } + headers { + key: "blah" + value: "nosniff" + } + headers { + key: "\'" + } + headers { + key: ":path" + value: "/" + } + headers { + key: ":authority" + value: "foKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKjKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKAKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKEKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKZKKKKKKKKKKKKKdKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK>KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK2KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK]KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK957191215689797641957=bar1" + } + } + end_stream: true + } +} +actions { +} +actions { + new_stream { + end_stream: true + status: HEADER_STOP_ALL_ITERATION_AND_WATERMARK + } +} +actions { + new_stream { + end_stream: true + } +} +actions { + stream_action { + stream_id: 721420288 + } +} +actions { + stream_action { + stream_id: 1024 + } +} +actions { + stream_action { + request { + trailers { + status: TRAILER_STOP_ITERATION + } + } + } +} +actions { + new_stream { + end_stream: true + } +} +actions { + new_stream { + } +} +actions { +} +actions { + new_stream { + } +} +actions { + new_stream { + } +} +actions { + new_stream { + } +} +actions { +} +actions { +} +actions { + new_stream { + } +} +actions { + new_stream { + } +} \ No newline at end of file diff --git a/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5669833168912384 b/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5669833168912384 new file mode 100644 index 0000000000000..a25a96c91dbd6 --- /dev/null +++ b/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5669833168912384 @@ -0,0 +1 @@ +actions { new_stream { request_headers { headers { key: ":path" value: "/" } headers { key: ":authority" } } } } actions { stream_action { request { continue_decoding { } } } } diff --git a/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5674283828772864 b/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5674283828772864 new file mode 100644 index 0000000000000..64081c27010c4 --- /dev/null +++ b/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5674283828772864 @@ -0,0 +1,120 @@ +actions { + stream_action { + request { + trailers { + headers { + headers { + key: "foo" + value: "bar" + } + } + decoder_filter_callback_action { + add_decoded_data { + size: 1000000 + } + } + } + } + } +} +actions { + new_stream { + request_headers { + headers { + key: ":method" + value: "GET" + } + headers { + key: ":path" + value: "/" + } + headers { + key: ":scheme" + value: "http" + } + headers { + key: ":authority" + value: "foo.com" + } + headers { + key: "blah" + value: "nosniff" + } + headers { + key: "cookie" + value: "foo=bar" + } + headers { + key: "cookie" + value: "foo2=bar2" + } + } + } +} +actions { + stream_action { + request { + data { + size: 3000000 + status: DATA_STOP_ITERATION_AND_BUFFER + decoder_filter_callback_action { + add_decoded_data { + size: 1000000 + } + } + } + } + } +} +actions { + stream_action { + response { + trailers { + headers { + key: "foo" + value: "bar" + } + } + } + } +} +actions { + stream_action { + stream_id: 5505024 + } +} +actions { + stream_action { + response { + continue_headers { + } + } + } +} +actions { + stream_action { + request { + continue_decoding { + } + } + } +} +actions { + stream_action { + response { + data: 5 + } + } +} +actions { + stream_action { + response { + headers { + headers { + key: ":status" + value: "200" + } + } + } + } +} diff --git a/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5701624673861632 b/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5701624673861632 new file mode 100644 index 0000000000000..8cc7ae72c499d --- /dev/null +++ b/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5701624673861632 @@ -0,0 +1,30 @@ +actions { + new_stream { + request_headers { + headers { + key: ":path" + value: "/" + } + headers { + key: ":authority" + value: "foo.com" + } + } + } +} +actions { + stream_action { + response { + continue_headers { + } + } + } +} +actions { + stream_action { + response { + continue_headers { + } + } + } +} diff --git a/test/common/http/conn_manager_impl_corpus/invalid_host b/test/common/http/conn_manager_impl_corpus/invalid_host new file mode 100644 index 0000000000000..7ce2011b96682 --- /dev/null +++ b/test/common/http/conn_manager_impl_corpus/invalid_host @@ -0,0 +1,100 @@ +actions { + new_stream { + request_headers { + headers { + key: ":method" + value: "G?T" + } + headers { + key: ":path" + value: "/" + } + headers { + key: "cookie" + value: "http" + } + headers { + key: ":authority" + value: "foo.c/m" + } + headers { + key: ":path" + value: "foo-968957191215689797641957=bar1" + } + } + } +} +actions { + new_stream { + } +} +actions { + new_stream { + end_stream: true + } +} +actions { + new_stream { + } +} +actions { + new_stream { + } +} +actions { + new_stream { + } +} +actions { + new_stream { + } +} +actions { + new_stream { + end_stream: true + } +} +actions { + new_stream { + end_stream: true + } +} +actions { + new_stream { + end_stream: true + } +} +actions { + stream_action { + stream_id: 67108864 + request { + data { + size: 67108864 + } + } + } +} +actions { + new_stream { + } +} +actions { + new_stream { + } +} +actions { + new_stream { + } +} +actions { + new_stream { + } +} +actions { + new_stream { + } +} +actions { + new_stream { + } +} \ No newline at end of file diff --git a/test/common/http/conn_manager_impl_corpus/status_163 b/test/common/http/conn_manager_impl_corpus/status_163 new file mode 100644 index 0000000000000..3c8e7c99f56fc --- /dev/null +++ b/test/common/http/conn_manager_impl_corpus/status_163 @@ -0,0 +1,22 @@ +actions { + new_stream { + request_headers { + headers { + key: ":path" + value: "/" + } + } + } +} +actions { + stream_action { + response { + headers { + headers { + key: ":status" + value: "162" + } + } + } + } +} \ No newline at end of file diff --git a/test/common/http/conn_manager_impl_corpus/upgrade_test_case b/test/common/http/conn_manager_impl_corpus/upgrade_test_case new file mode 100644 index 0000000000000..8ec4c2364e32d --- /dev/null +++ b/test/common/http/conn_manager_impl_corpus/upgrade_test_case @@ -0,0 +1,53 @@ +actions { + new_stream { + request_headers { + headers { + key: ":method" + value: "GET" + } + headers { + key: ":path" + value: "/" + } + headers { + key: ":scheme" + value: "http" + } + headers { + key: ":authority" + value: "host" + } + headers { + key: "connection" + value: "upgrade" + } + headers { + key: "upgrade" + value: "WebSocket" + } + } + } +} + + +actions { + stream_action { + stream_id: 0 + response { + headers { + headers { + key: "connection" + value: "upgrade" + } + headers { + key: "upgrade" + value: "WebSocket" + } + headers { + key: ":status" + value: "101" + } + } + } + } +} diff --git a/test/common/http/conn_manager_impl_fuzz.proto b/test/common/http/conn_manager_impl_fuzz.proto index a6a3617d0165e..92d6e1c32652f 100644 --- a/test/common/http/conn_manager_impl_fuzz.proto +++ b/test/common/http/conn_manager_impl_fuzz.proto @@ -6,17 +6,22 @@ import "google/protobuf/empty.proto"; import "test/fuzz/common.proto"; +import "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto"; + // Structured input for conn_manager_impl_fuzz_test. message NewStream { test.fuzz.Headers request_headers = 1; bool end_stream = 2; - // TODO(htuch): Support stop/continue status with headers. + HeaderStatus status = 3; } enum HeaderStatus { HEADER_CONTINUE = 0; HEADER_STOP_ITERATION = 1; + HEADER_CONTINUE_AND_END_STREAM = 2; + HEADER_STOP_ALL_ITERATION_AND_BUFFER = 3; + HEADER_STOP_ALL_ITERATION_AND_WATERMARK = 4; } enum DataStatus { @@ -58,7 +63,9 @@ message RequestAction { DataAction data = 1; TrailerAction trailers = 2; google.protobuf.Empty continue_decoding = 3; - google.protobuf.Empty throw_decoder_exception = 4; + // Dispatch no longer throws, but rather returns an error status. + google.protobuf.Empty throw_decoder_exception = 4 [deprecated = true]; + google.protobuf.Empty return_decoder_error = 5; // TODO(htuch): Model and fuzz watermark events. } } @@ -94,4 +101,6 @@ message Action { message ConnManagerImplTestCase { repeated Action actions = 1; + envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + .ForwardClientCertDetails forward_client_cert = 2; } diff --git a/test/common/http/conn_manager_impl_fuzz_test.cc b/test/common/http/conn_manager_impl_fuzz_test.cc index 38d471ded4cc2..99f8b18dc779d 100644 --- a/test/common/http/conn_manager_impl_fuzz_test.cc +++ b/test/common/http/conn_manager_impl_fuzz_test.cc @@ -34,6 +34,7 @@ #include "test/mocks/network/mocks.h" #include "test/mocks/router/mocks.h" #include "test/mocks/runtime/mocks.h" +#include "test/mocks/server/mocks.h" #include "test/mocks/ssl/mocks.h" #include "test/mocks/tracing/mocks.h" #include "test/mocks/upstream/mocks.h" @@ -62,17 +63,20 @@ class FuzzConfig : public ConnectionManagerConfig { std::shared_ptr route_config_{new NiceMock()}; }; - FuzzConfig() + FuzzConfig(envoy::extensions::filters::network::http_connection_manager::v3:: + HttpConnectionManager::ForwardClientCertDetails forward_client_cert) : stats_({ALL_HTTP_CONN_MAN_STATS(POOL_COUNTER(fake_stats_), POOL_GAUGE(fake_stats_), POOL_HISTOGRAM(fake_stats_))}, "", fake_stats_), tracing_stats_{CONN_MAN_TRACING_STATS(POOL_COUNTER(fake_stats_))}, - listener_stats_{CONN_MAN_LISTENER_STATS(POOL_COUNTER(fake_stats_))} { + listener_stats_{CONN_MAN_LISTENER_STATS(POOL_COUNTER(fake_stats_))}, + local_reply_(LocalReply::Factory::createDefault()) { ON_CALL(route_config_provider_, lastUpdated()).WillByDefault(Return(time_system_.systemTime())); ON_CALL(scoped_route_config_provider_, lastUpdated()) .WillByDefault(Return(time_system_.systemTime())); access_logs_.emplace_back(std::make_shared>()); request_id_extension_ = RequestIDExtensionFactory::defaultInstance(random_); + forward_client_cert_ = fromClientCert(forward_client_cert); } void newStream() { @@ -86,8 +90,42 @@ class FuzzConfig : public ConnectionManagerConfig { callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{decoder_filter_}); callbacks.addStreamEncoderFilter(StreamEncoderFilterSharedPtr{encoder_filter_}); })); - EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)); + EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)) + .WillOnce(Invoke([this](StreamDecoderFilterCallbacks& callbacks) -> void { + decoder_filter_->callbacks_ = &callbacks; + callbacks.streamInfo().setResponseCodeDetails(""); + })); EXPECT_CALL(*encoder_filter_, setEncoderFilterCallbacks(_)); + EXPECT_CALL(filter_factory_, createUpgradeFilterChain("WebSocket", _, _)) + .WillRepeatedly(Invoke([&](absl::string_view, const Http::FilterChainFactory::UpgradeMap*, + FilterChainFactoryCallbacks& callbacks) -> bool { + filter_factory_.createFilterChain(callbacks); + return true; + })); + } + + Http::ForwardClientCertType + fromClientCert(envoy::extensions::filters::network::http_connection_manager::v3:: + HttpConnectionManager::ForwardClientCertDetails forward_client_cert) { + switch (forward_client_cert) { + case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager:: + SANITIZE: + return Http::ForwardClientCertType::Sanitize; + case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager:: + FORWARD_ONLY: + return Http::ForwardClientCertType::ForwardOnly; + case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager:: + APPEND_FORWARD: + return Http::ForwardClientCertType::AppendForward; + case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager:: + SANITIZE_SET: + return Http::ForwardClientCertType::SanitizeSet; + case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager:: + ALWAYS_FORWARD_ONLY: + return Http::ForwardClientCertType::AlwaysForwardOnly; + default: + return Http::ForwardClientCertType::Sanitize; + } } // Http::ConnectionManagerConfig @@ -103,6 +141,7 @@ class FuzzConfig : public ConnectionManagerConfig { FilterChainFactory& filterFactory() override { return filter_factory_; } bool generateRequestId() const override { return true; } bool preserveExternalRequestId() const override { return false; } + bool alwaysSetRequestIdInResponse() const override { return false; } uint32_t maxRequestHeadersKb() const override { return max_request_headers_kb_; } uint32_t maxRequestHeadersCount() const override { return max_request_headers_count_; } absl::optional idleTimeout() const override { return idle_timeout_; } @@ -152,17 +191,22 @@ class FuzzConfig : public ConnectionManagerConfig { const TracingConnectionManagerConfig* tracingConfig() override { return tracing_config_.get(); } ConnectionManagerListenerStats& listenerStats() override { return listener_stats_; } bool proxy100Continue() const override { return proxy_100_continue_; } + bool streamErrorOnInvalidHttpMessaging() const override { + return stream_error_on_invalid_http_messaging_; + } const Http::Http1Settings& http1Settings() const override { return http1_settings_; } bool shouldNormalizePath() const override { return false; } bool shouldMergeSlashes() const override { return false; } + bool shouldStripMatchingPort() const override { return false; } envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headersWithUnderscoresAction() const override { return envoy::config::core::v3::HttpProtocolOptions::ALLOW; } + const LocalReply::LocalReply& localReply() const override { return *local_reply_; } const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager config_; - NiceMock random_; + NiceMock random_; RequestIDExtensionSharedPtr request_id_extension_; std::list access_logs_; MockServerConnection* codec_{}; @@ -190,17 +234,19 @@ class FuzzConfig : public ConnectionManagerConfig { std::chrono::milliseconds request_timeout_{}; std::chrono::milliseconds delayed_close_timeout_{}; bool use_remote_address_{true}; - Http::ForwardClientCertType forward_client_cert_{Http::ForwardClientCertType::Sanitize}; + Http::ForwardClientCertType forward_client_cert_; std::vector set_current_client_cert_details_; Network::Address::Ipv4Instance local_address_{"127.0.0.1"}; absl::optional user_agent_; Tracing::HttpTracerSharedPtr http_tracer_{std::make_shared>()}; TracingConnectionManagerConfigPtr tracing_config_; bool proxy_100_continue_{true}; + bool stream_error_on_invalid_http_messaging_ = false; bool preserve_external_request_id_{false}; Http::Http1Settings http1_settings_; Http::DefaultInternalAddressConfig internal_address_config_; bool normalize_path_{true}; + LocalReply::LocalReplyPtr local_reply_; }; // Internal representation of stream state. Encapsulates the stream state, mocks @@ -213,14 +259,22 @@ class FuzzStream { // course, it's the codecs must be robust to wire-level violations. We // explore these violations via MutateAction and SwapAction at the connection // buffer level. - enum class StreamState { PendingHeaders, PendingDataOrTrailers, Closed }; + enum class StreamState { + PendingHeaders, + PendingNonInformationalHeaders, + PendingDataOrTrailers, + Closed + }; FuzzStream(ConnectionManagerImpl& conn_manager, FuzzConfig& config, - const HeaderMap& request_headers, bool end_stream) + const HeaderMap& request_headers, + test::common::http::HeaderStatus decode_header_status, bool end_stream) : conn_manager_(conn_manager), config_(config) { config_.newStream(); request_state_ = end_stream ? StreamState::Closed : StreamState::PendingDataOrTrailers; response_state_ = StreamState::PendingHeaders; + decoder_filter_ = config.decoder_filter_; + encoder_filter_ = config.encoder_filter_; EXPECT_CALL(*config_.codec_, dispatch(_)) .WillOnce(InvokeWithoutArgs([this, &request_headers, end_stream] { decoder_ = &conn_manager_.newStream(encoder_); @@ -229,11 +283,10 @@ class FuzzStream { headers->setReferenceKey(Headers::get().Method, "GET"); } if (headers->Host() != nullptr && - !HeaderUtility::authorityIsValid(headers->Host()->value().getStringView())) { + !HeaderUtility::authorityIsValid(headers->getHostValue())) { // Sanitize host header so we don't fail at ASSERTs that verify header sanity checks // which should have been performed by the codec. - headers->setHost( - Fuzz::replaceInvalidHostCharacters(headers->Host()->value().getStringView())); + headers->setHost(Fuzz::replaceInvalidHostCharacters(headers->getHostValue())); } // If sendLocalReply is called: ON_CALL(encoder_, encodeHeaders(_, true)) @@ -242,10 +295,20 @@ class FuzzStream { end_stream ? StreamState::Closed : StreamState::PendingDataOrTrailers; })); decoder_->decodeHeaders(std::move(headers), end_stream); + return Http::okStatus(); + })); + ON_CALL(*decoder_filter_, decodeHeaders(_, _)) + .WillByDefault(InvokeWithoutArgs([this, decode_header_status, + end_stream]() -> Http::FilterHeadersStatus { + header_status_ = fromHeaderStatus(decode_header_status); + // When a filter should not return ContinueAndEndStream when send with end_stream set + // (see https://github.com/envoyproxy/envoy/pull/4885#discussion_r232176826) + if (end_stream && (*header_status_ == Http::FilterHeadersStatus::ContinueAndEndStream)) { + *header_status_ = Http::FilterHeadersStatus::Continue; + } + return *header_status_; })); fakeOnData(); - decoder_filter_ = config.decoder_filter_; - encoder_filter_ = config.encoder_filter_; FUZZ_ASSERT(testing::Mock::VerifyAndClearExpectations(config_.codec_)); } @@ -260,6 +323,12 @@ class FuzzStream { return Http::FilterHeadersStatus::Continue; case test::common::http::HeaderStatus::HEADER_STOP_ITERATION: return Http::FilterHeadersStatus::StopIteration; + case test::common::http::HeaderStatus::HEADER_CONTINUE_AND_END_STREAM: + return Http::FilterHeadersStatus::ContinueAndEndStream; + case test::common::http::HeaderStatus::HEADER_STOP_ALL_ITERATION_AND_BUFFER: + return Http::FilterHeadersStatus::StopAllIterationAndBuffer; + case test::common::http::HeaderStatus::HEADER_STOP_ALL_ITERATION_AND_WATERMARK: + return Http::FilterHeadersStatus::StopAllIterationAndWatermark; default: return Http::FilterHeadersStatus::Continue; } @@ -319,11 +388,13 @@ class FuzzStream { if (data_action.has_decoder_filter_callback_action()) { decoderFilterCallbackAction(data_action.decoder_filter_callback_action()); } - return fromDataStatus(data_action.status()); + data_status_ = fromDataStatus(data_action.status()); + return *data_status_; })); EXPECT_CALL(*config_.codec_, dispatch(_)).WillOnce(InvokeWithoutArgs([this, &data_action] { Buffer::OwnedImpl buf(std::string(data_action.size() % (1024 * 1024), 'a')); decoder_->decodeData(buf, data_action.end_stream()); + return Http::okStatus(); })); fakeOnData(); FUZZ_ASSERT(testing::Mock::VerifyAndClearExpectations(config_.codec_)); @@ -346,6 +417,7 @@ class FuzzStream { .WillOnce(InvokeWithoutArgs([this, &trailers_action] { decoder_->decodeTrailers(std::make_unique( Fuzz::fromHeaders(trailers_action.headers()))); + return Http::okStatus(); })); fakeOnData(); FUZZ_ASSERT(testing::Mock::VerifyAndClearExpectations(config_.codec_)); @@ -354,14 +426,22 @@ class FuzzStream { break; } case test::common::http::RequestAction::kContinueDecoding: { - decoder_filter_->callbacks_->continueDecoding(); + if (header_status_ == FilterHeadersStatus::StopAllIterationAndBuffer || + header_status_ == FilterHeadersStatus::StopAllIterationAndWatermark || + (header_status_ == FilterHeadersStatus::StopIteration && + (data_status_ == FilterDataStatus::StopIterationAndBuffer || + data_status_ == FilterDataStatus::StopIterationAndWatermark || + data_status_ == FilterDataStatus::StopIterationNoBuffer))) { + decoder_filter_->callbacks_->continueDecoding(); + } break; } - case test::common::http::RequestAction::kThrowDecoderException: { + case test::common::http::RequestAction::kThrowDecoderException: + // Dispatch no longer throws, execute subsequent kReturnDecoderError case. + case test::common::http::RequestAction::kReturnDecoderError: { if (state == StreamState::PendingDataOrTrailers) { - EXPECT_CALL(*config_.codec_, dispatch(_)).WillOnce(InvokeWithoutArgs([] { - throw CodecProtocolException("blah"); - })); + EXPECT_CALL(*config_.codec_, dispatch(_)) + .WillOnce(testing::Return(codecProtocolError("blah"))); fakeOnData(); FUZZ_ASSERT(testing::Mock::VerifyAndClearExpectations(config_.codec_)); state = StreamState::Closed; @@ -384,20 +464,30 @@ class FuzzStream { Fuzz::fromHeaders(response_action.continue_headers())); headers->setReferenceKey(Headers::get().Status, "100"); decoder_filter_->callbacks_->encode100ContinueHeaders(std::move(headers)); + // We don't allow multiple 100-continue headers in HCM, UpstreamRequest is responsible + // for coalescing. + state = StreamState::PendingNonInformationalHeaders; } break; } case test::common::http::ResponseAction::kHeaders: { - if (state == StreamState::PendingHeaders) { + if (state == StreamState::PendingHeaders || + state == StreamState::PendingNonInformationalHeaders) { auto headers = std::make_unique( Fuzz::fromHeaders(response_action.headers())); // The client codec will ensure we always have a valid :status. // Similarly, local replies should always contain this. + uint64_t status; try { - Utility::getResponseStatus(*headers); + status = Utility::getResponseStatus(*headers); } catch (const CodecClientException&) { headers->setReferenceKey(Headers::get().Status, "200"); } + // The only 1xx header that may be provided to encodeHeaders() is a 101 upgrade, + // guaranteed by the codec parsers. See include/envoy/http/filter.h. + if (CodeUtility::is1xx(status) && status != enumToInt(Http::Code::SwitchingProtocols)) { + headers->setReferenceKey(Headers::get().Status, "200"); + } decoder_filter_->callbacks_->encodeHeaders(std::move(headers), end_stream); state = end_stream ? StreamState::Closed : StreamState::PendingDataOrTrailers; } @@ -449,6 +539,8 @@ class FuzzStream { MockStreamEncoderFilter* encoder_filter_{}; StreamState request_state_; StreamState response_state_; + absl::optional header_status_; + absl::optional data_status_; }; using FuzzStreamPtr = std::unique_ptr; @@ -459,17 +551,21 @@ DEFINE_PROTO_FUZZER(const test::common::http::ConnManagerImplTestCase& input) { } catch (const ProtoValidationException& e) { ENVOY_LOG_MISC(debug, "ProtoValidationException: {}", e.what()); return; + } catch (const Envoy::ProtobufMessage::DeprecatedProtoFieldException& e) { + ENVOY_LOG_MISC(debug, "DeprecatedProtoFieldException: {}", e.what()); + return; } - FuzzConfig config; + FuzzConfig config(input.forward_client_cert()); NiceMock drain_close; - NiceMock random; + NiceMock random; Stats::SymbolTablePtr symbol_table(Stats::SymbolTableCreator::makeSymbolTable()); Http::ContextImpl http_context(*symbol_table); NiceMock runtime; NiceMock local_info; NiceMock cluster_manager; NiceMock filter_callbacks; + NiceMock overload_manager; auto ssl_connection = std::make_shared(); bool connection_alive = true; @@ -483,7 +579,7 @@ DEFINE_PROTO_FUZZER(const test::common::http::ConnManagerImplTestCase& input) { std::make_shared("0.0.0.0"); ConnectionManagerImpl conn_manager(config, drain_close, random, http_context, runtime, local_info, - cluster_manager, nullptr, config.time_system_); + cluster_manager, overload_manager, config.time_system_); conn_manager.initializeReadFilterCallbacks(filter_callbacks); std::vector streams; @@ -499,8 +595,9 @@ DEFINE_PROTO_FUZZER(const test::common::http::ConnManagerImplTestCase& input) { case test::common::http::Action::kNewStream: { streams.emplace_back(new FuzzStream( conn_manager, config, - Fuzz::fromHeaders(action.new_stream().request_headers()), - action.new_stream().end_stream())); + Fuzz::fromHeaders(action.new_stream().request_headers(), + /* ignore_headers =*/{}, {":authority"}), + action.new_stream().status(), action.new_stream().end_stream())); break; } case test::common::http::Action::kStreamAction: { diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 324f2c58259f1..a13dd0133a1c8 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -13,11 +13,11 @@ #include "envoy/type/tracing/v3/custom_tag.pb.h" #include "envoy/type/v3/percent.pb.h" -#include "common/access_log/access_log_formatter.h" #include "common/access_log/access_log_impl.h" #include "common/buffer/buffer_impl.h" #include "common/common/empty_string.h" #include "common/common/macros.h" +#include "common/formatter/substitution_formatter.h" #include "common/http/conn_manager_impl.h" #include "common/http/context_impl.h" #include "common/http/date_provider_impl.h" @@ -39,13 +39,16 @@ #include "test/mocks/network/mocks.h" #include "test/mocks/router/mocks.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/server/instance.h" +#include "test/mocks/server/overload_manager.h" #include "test/mocks/ssl/mocks.h" #include "test/mocks/tracing/mocks.h" #include "test/mocks/upstream/cluster_info.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/logging.h" #include "test/test_common/printers.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/test_time.h" #include "gmock/gmock.h" @@ -61,6 +64,7 @@ using testing::InSequence; using testing::Invoke; using testing::InvokeWithoutArgs; using testing::NiceMock; +using testing::Property; using testing::Ref; using testing::Return; using testing::ReturnRef; @@ -87,15 +91,16 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan : http_context_(fake_stats_.symbolTable()), access_log_path_("dummy_path"), access_logs_{ AccessLog::InstanceSharedPtr{new Extensions::AccessLoggers::File::FileAccessLog( - access_log_path_, {}, AccessLog::AccessLogFormatUtils::defaultAccessLogFormatter(), - log_manager_)}}, + access_log_path_, {}, + Formatter::SubstitutionFormatUtils::defaultSubstitutionFormatter(), log_manager_)}}, codec_(new NiceMock()), stats_({ALL_HTTP_CONN_MAN_STATS(POOL_COUNTER(fake_stats_), POOL_GAUGE(fake_stats_), POOL_HISTOGRAM(fake_stats_))}, "", fake_stats_), - tracing_stats_{CONN_MAN_TRACING_STATS(POOL_COUNTER(fake_stats_))}, - listener_stats_{CONN_MAN_LISTENER_STATS(POOL_COUNTER(fake_listener_stats_))}, - request_id_extension_(RequestIDExtensionFactory::defaultInstance(random_)) { + + listener_stats_({CONN_MAN_LISTENER_STATS(POOL_COUNTER(fake_listener_stats_))}), + request_id_extension_(RequestIDExtensionFactory::defaultInstance(random_)), + local_reply_(LocalReply::Factory::createDefault()) { ON_CALL(route_config_provider_, lastUpdated()) .WillByDefault(Return(test_time_.timeSystem().systemTime())); @@ -126,12 +131,12 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan ON_CALL(filter_callbacks_.connection_, ssl()).WillByDefault(Return(ssl_connection_)); ON_CALL(Const(filter_callbacks_.connection_), ssl()).WillByDefault(Return(ssl_connection_)); filter_callbacks_.connection_.local_address_ = - std::make_shared("127.0.0.1"); + std::make_shared("127.0.0.1", 443); filter_callbacks_.connection_.remote_address_ = std::make_shared("0.0.0.0"); conn_manager_ = std::make_unique( *this, drain_close_, random_, http_context_, runtime_, local_info_, cluster_manager_, - &overload_manager_, test_time_.timeSystem()); + overload_manager_, test_time_.timeSystem()); conn_manager_->initializeReadFilterCallbacks(filter_callbacks_); if (tracing) { @@ -190,10 +195,11 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan void setUpBufferLimits() { ON_CALL(response_encoder_, getStream()).WillByDefault(ReturnRef(stream_)); + EXPECT_CALL(stream_, bufferLimit()).WillOnce(Return(initial_buffer_limit_)); EXPECT_CALL(stream_, addCallbacks(_)) .WillOnce(Invoke( [&](Http::StreamCallbacks& callbacks) -> void { stream_callbacks_ = &callbacks; })); - EXPECT_CALL(stream_, bufferLimit()).WillOnce(Return(initial_buffer_limit_)); + EXPECT_CALL(stream_, setFlushTimeout(_)); } // If request_with_data_and_trailers is true, includes data and trailers in the request. If @@ -202,7 +208,7 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan void setUpEncoderAndDecoder(bool request_with_data_and_trailers, bool decode_headers_stop_all) { setUpBufferLimits(); EXPECT_CALL(*codec_, dispatch(_)) - .WillOnce(Invoke([&, request_with_data_and_trailers](Buffer::Instance&) -> void { + .WillOnce(Invoke([&, request_with_data_and_trailers](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -217,6 +223,7 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan } else { decoder->decodeHeaders(std::move(headers), true); } + return Http::okStatus(); })); setupFilterChain(2, 2); @@ -265,6 +272,7 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) .WillOnce(Return(FilterHeadersStatus::Continue)); EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false); return altered_response_headers; } @@ -293,6 +301,7 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan FilterChainFactory& filterFactory() override { return filter_factory_; } bool generateRequestId() const override { return true; } bool preserveExternalRequestId() const override { return false; } + bool alwaysSetRequestIdInResponse() const override { return false; } uint32_t maxRequestHeadersKb() const override { return max_request_headers_kb_; } uint32_t maxRequestHeadersCount() const override { return max_request_headers_count_; } absl::optional idleTimeout() const override { return idle_timeout_; } @@ -343,14 +352,19 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan const TracingConnectionManagerConfig* tracingConfig() override { return tracing_config_.get(); } ConnectionManagerListenerStats& listenerStats() override { return listener_stats_; } bool proxy100Continue() const override { return proxy_100_continue_; } + bool streamErrorOnInvalidHttpMessaging() const override { + return stream_error_on_invalid_http_messaging_; + } const Http::Http1Settings& http1Settings() const override { return http1_settings_; } bool shouldNormalizePath() const override { return normalize_path_; } bool shouldMergeSlashes() const override { return merge_slashes_; } + bool shouldStripMatchingPort() const override { return strip_matching_port_; } RequestIDExtensionSharedPtr requestIDExtension() override { return request_id_extension_; } envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headersWithUnderscoresAction() const override { return headers_with_underscores_action_; } + const LocalReply::LocalReply& localReply() const override { return *local_reply_; } Envoy::Event::SimulatedTimeSystem test_time_; NiceMock route_config_provider_; @@ -366,7 +380,7 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan MockServerConnection* codec_; NiceMock filter_factory_; ConnectionManagerStats stats_; - ConnectionManagerTracingStats tracing_stats_; + ConnectionManagerTracingStats tracing_stats_{CONN_MAN_TRACING_STATS(POOL_COUNTER(fake_stats_))}; NiceMock drain_close_; std::unique_ptr conn_manager_; std::string server_name_; @@ -386,7 +400,7 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan std::chrono::milliseconds request_timeout_{}; std::chrono::milliseconds delayed_close_timeout_{}; absl::optional max_stream_duration_{}; - NiceMock random_; + NiceMock random_; NiceMock local_info_; NiceMock factory_context_; std::shared_ptr ssl_connection_; @@ -403,15 +417,18 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan Stats::IsolatedStoreImpl fake_listener_stats_; ConnectionManagerListenerStats listener_stats_; bool proxy_100_continue_ = false; + bool stream_error_on_invalid_http_messaging_ = false; bool preserve_external_request_id_ = false; Http::Http1Settings http1_settings_; bool normalize_path_ = false; bool merge_slashes_ = false; + bool strip_matching_port_ = false; envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::ALLOW; NiceMock upstream_conn_; // for websocket tests NiceMock conn_pool_; // for websocket tests RequestIDExtensionSharedPtr request_id_extension_; + const LocalReply::LocalReplyPtr local_reply_; // TODO(mattklein123): Not all tests have been converted over to better setup. Convert the rest. MockResponseEncoder response_encoder_; @@ -429,7 +446,7 @@ TEST_F(HttpConnectionManagerImplTest, HeaderOnlyRequestAndResponse) { .Times(2) .WillRepeatedly(Invoke([&](RequestHeaderMap& headers, bool) -> FilterHeadersStatus { EXPECT_NE(nullptr, headers.ForwardedFor()); - EXPECT_EQ("http", headers.ForwardedProto()->value().getStringView()); + EXPECT_EQ("http", headers.getForwardedProtoValue()); if (headers.Path()->value() == "/healthcheck") { filter->callbacks_->streamInfo().healthCheck(true); } @@ -449,12 +466,11 @@ TEST_F(HttpConnectionManagerImplTest, HeaderOnlyRequestAndResponse) { // When dispatch is called on the codec, we pretend to get a new stream and then fire a headers // only request into it. Then we respond into the filter. - RequestDecoder* decoder = nullptr; NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .Times(2) - .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); // Test not charging stats on the second call. if (data.length() == 4) { @@ -468,10 +484,12 @@ TEST_F(HttpConnectionManagerImplTest, HeaderOnlyRequestAndResponse) { } ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); filter->callbacks_->encodeHeaders(std::move(response_headers), true); // Drain 2 so that on the 2nd iteration we will hit zero. data.drain(2); + return Http::okStatus(); })); // Kick off the incoming data. Use extra data which should cause a redispatch. @@ -494,7 +512,7 @@ TEST_F(HttpConnectionManagerImplTest, 100ContinueResponse) { EXPECT_CALL(*filter, decodeHeaders(_, true)) .WillRepeatedly(Invoke([&](RequestHeaderMap& headers, bool) -> FilterHeadersStatus { EXPECT_NE(nullptr, headers.ForwardedFor()); - EXPECT_EQ("http", headers.ForwardedProto()->value().getStringView()); + EXPECT_EQ("http", headers.getForwardedProtoValue()); return FilterHeadersStatus::StopIteration; })); @@ -509,23 +527,25 @@ TEST_F(HttpConnectionManagerImplTest, 100ContinueResponse) { // When dispatch is called on the codec, we pretend to get a new stream and then fire a headers // only request into it. Then we respond into the filter. - RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); - // Test not charging stats on the second call. - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); + // Test not charging stats on the second call. + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), true); - ResponseHeaderMapPtr continue_headers{new TestResponseHeaderMapImpl{{":status", "100"}}}; - filter->callbacks_->encode100ContinueHeaders(std::move(continue_headers)); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); + ResponseHeaderMapPtr continue_headers{new TestResponseHeaderMapImpl{{":status", "100"}}}; + filter->callbacks_->encode100ContinueHeaders(std::move(continue_headers)); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); + filter->callbacks_->encodeHeaders(std::move(response_headers), true); - data.drain(4); - })); + data.drain(4); + return Http::okStatus(); + })); // Kick off the incoming data. Buffer::OwnedImpl fake_input("1234"); @@ -560,6 +580,7 @@ TEST_F(HttpConnectionManagerImplTest, 100ContinueResponseWithEncoderFiltersProxy .WillOnce(Return(FilterHeadersStatus::Continue)); EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false); } @@ -583,6 +604,7 @@ TEST_F(HttpConnectionManagerImplTest, 100ContinueResponseWithEncoderFilters) { .WillOnce(Return(FilterHeadersStatus::Continue)); EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false); } @@ -613,9 +635,77 @@ TEST_F(HttpConnectionManagerImplTest, PauseResume100Continue) { .WillOnce(Return(FilterHeadersStatus::Continue)); EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[1]->callbacks_->encodeHeaders(std::move(response_headers), false); } +// Regression test for https://github.com/envoyproxy/envoy/issues/10923. +TEST_F(HttpConnectionManagerImplTest, 100ContinueResponseWithDecoderPause) { + proxy_100_continue_ = true; + setup(false, "envoy-custom-server", false); + + std::shared_ptr filter(new NiceMock()); + + // Allow headers to pass. + EXPECT_CALL(*filter, decodeHeaders(_, false)) + .WillRepeatedly( + InvokeWithoutArgs([]() -> FilterHeadersStatus { return FilterHeadersStatus::Continue; })); + // Pause and then resume the decode pipeline, this is key to triggering #10923. + EXPECT_CALL(*filter, decodeData(_, false)).WillOnce(InvokeWithoutArgs([]() -> FilterDataStatus { + return FilterDataStatus::StopIterationAndBuffer; + })); + EXPECT_CALL(*filter, decodeData(_, true)) + .WillRepeatedly( + InvokeWithoutArgs([]() -> FilterDataStatus { return FilterDataStatus::Continue; })); + + EXPECT_CALL(*filter, setDecoderFilterCallbacks(_)); + + EXPECT_CALL(filter_factory_, createFilterChain(_)) + .WillRepeatedly(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamDecoderFilter(filter); + })); + + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)); + + NiceMock encoder; + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); + + // Test not charging stats on the second call. + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), false); + // Allow the decode pipeline to pause. + decoder->decodeData(data, false); + + ResponseHeaderMapPtr continue_headers{new TestResponseHeaderMapImpl{{":status", "100"}}}; + filter->callbacks_->encode100ContinueHeaders(std::move(continue_headers)); + + // Resume decode pipeline after encoding 100 continue headers, we're now + // ready to trigger #10923. + decoder->decodeData(data, true); + + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); + filter->callbacks_->encodeHeaders(std::move(response_headers), true); + + data.drain(4); + return Http::okStatus(); + })); + + // Kick off the incoming data. + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + EXPECT_EQ(1U, stats_.named_.downstream_rq_1xx_.value()); + EXPECT_EQ(1U, listener_stats_.downstream_rq_1xx_.value()); + EXPECT_EQ(1U, stats_.named_.downstream_rq_2xx_.value()); + EXPECT_EQ(1U, listener_stats_.downstream_rq_2xx_.value()); + EXPECT_EQ(2U, stats_.named_.downstream_rq_completed_.value()); + EXPECT_EQ(2U, listener_stats_.downstream_rq_completed_.value()); +} + // By default, Envoy will set the server header to the server name, here "custom-value" TEST_F(HttpConnectionManagerImplTest, ServerHeaderOverwritten) { setup(false, "custom-value", false); @@ -624,7 +714,7 @@ TEST_F(HttpConnectionManagerImplTest, ServerHeaderOverwritten) { sendRequestHeadersAndData(); const ResponseHeaderMap* altered_headers = sendResponseHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}, {"server", "foo"}}}); - EXPECT_EQ("custom-value", altered_headers->Server()->value().getStringView()); + EXPECT_EQ("custom-value", altered_headers->getServerValue()); } // When configured APPEND_IF_ABSENT if the server header is present it will be retained. @@ -636,7 +726,7 @@ TEST_F(HttpConnectionManagerImplTest, ServerHeaderAppendPresent) { sendRequestHeadersAndData(); const ResponseHeaderMap* altered_headers = sendResponseHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}, {"server", "foo"}}}); - EXPECT_EQ("foo", altered_headers->Server()->value().getStringView()); + EXPECT_EQ("foo", altered_headers->getServerValue()); } // When configured APPEND_IF_ABSENT if the server header is absent the server name will be set. @@ -648,7 +738,7 @@ TEST_F(HttpConnectionManagerImplTest, ServerHeaderAppendAbsent) { sendRequestHeadersAndData(); const ResponseHeaderMap* altered_headers = sendResponseHeaders(ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}); - EXPECT_EQ("custom-value", altered_headers->Server()->value().getStringView()); + EXPECT_EQ("custom-value", altered_headers->getServerValue()); } // When configured PASS_THROUGH, the server name will pass through. @@ -660,7 +750,7 @@ TEST_F(HttpConnectionManagerImplTest, ServerHeaderPassthroughPresent) { sendRequestHeadersAndData(); const ResponseHeaderMap* altered_headers = sendResponseHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}, {"server", "foo"}}}); - EXPECT_EQ("foo", altered_headers->Server()->value().getStringView()); + EXPECT_EQ("foo", altered_headers->getServerValue()); } // When configured PASS_THROUGH, the server header will not be added if absent. @@ -679,12 +769,13 @@ TEST_F(HttpConnectionManagerImplTest, InvalidPathWithDualFilter) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":path", "http://api.lyft.com/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), true); data.drain(4); + return Http::okStatus(); })); // This test also verifies that decoder/encoder filters have onDestroy() called only once. @@ -699,7 +790,7 @@ TEST_F(HttpConnectionManagerImplTest, InvalidPathWithDualFilter) { EXPECT_CALL(*filter, encodeHeaders(_, true)); EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("404", headers.Status()->value().getStringView()); + EXPECT_EQ("404", headers.getStatusValue()); EXPECT_EQ("absolute_path_rejected", filter->decoder_callbacks_->streamInfo().responseCodeDetails().value()); })); @@ -716,7 +807,7 @@ TEST_F(HttpConnectionManagerImplTest, PathFailedtoSanitize) { // Enable path sanitizer normalize_path_ = true; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, @@ -724,6 +815,7 @@ TEST_F(HttpConnectionManagerImplTest, PathFailedtoSanitize) { {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), true); data.drain(4); + return Http::okStatus(); })); // This test also verifies that decoder/encoder filters have onDestroy() called only once. @@ -738,7 +830,7 @@ TEST_F(HttpConnectionManagerImplTest, PathFailedtoSanitize) { EXPECT_CALL(*filter, encodeHeaders(_, true)); EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("400", headers.Status()->value().getStringView()); + EXPECT_EQ("400", headers.getStatusValue()); EXPECT_EQ("path_normalization_failed", filter->decoder_callbacks_->streamInfo().responseCodeDetails().value()); })); @@ -766,22 +858,26 @@ TEST_F(HttpConnectionManagerImplTest, FilterShouldUseSantizedPath) { EXPECT_CALL(*filter, decodeHeaders(_, true)) .WillRepeatedly(Invoke([&](RequestHeaderMap& header_map, bool) -> FilterHeadersStatus { - EXPECT_EQ(normalized_path, header_map.Path()->value().getStringView()); + EXPECT_EQ(normalized_path, header_map.getPathValue()); return FilterHeadersStatus::StopIteration; })); EXPECT_CALL(*filter, setDecoderFilterCallbacks(_)); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":path", original_path}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); })); // Kick off the incoming data. Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + + EXPECT_CALL(*filter, onDestroy()); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } // The router observes normalized paths, not the original path, when path @@ -793,11 +889,12 @@ TEST_F(HttpConnectionManagerImplTest, RouteShouldUseSantizedPath) { const std::string original_path = "/x/%2E%2e/z"; const std::string normalized_path = "/z"; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":path", original_path}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); })); const std::string fake_cluster_name = "fake_cluster"; @@ -807,18 +904,378 @@ TEST_F(HttpConnectionManagerImplTest, RouteShouldUseSantizedPath) { std::shared_ptr route = std::make_shared>(); EXPECT_CALL(route->route_entry_, clusterName()).WillRepeatedly(ReturnRef(fake_cluster_name)); - EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _)) - .WillOnce(Invoke( - [&](const Http::RequestHeaderMap& header_map, const StreamInfo::StreamInfo&, uint64_t) { - EXPECT_EQ(normalized_path, header_map.Path()->value().getStringView()); - return route; - })); + EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)) + .WillOnce(Invoke([&](const Router::RouteCallback&, const Http::RequestHeaderMap& header_map, + const StreamInfo::StreamInfo&, uint64_t) { + EXPECT_EQ(normalized_path, header_map.getPathValue()); + return route; + })); + EXPECT_CALL(filter_factory_, createFilterChain(_)) + .WillOnce(Invoke([&](FilterChainFactoryCallbacks&) -> void {})); + + // Kick off the incoming data. + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + // Clean up. + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); +} + +TEST_F(HttpConnectionManagerImplTest, RouteOverride) { + setup(false, ""); + + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); + })); + + setupFilterChain(2, 0); + const std::string foo_bar_baz_cluster_name = "foo_bar_baz"; + const std::string foo_bar_cluster_name = "foo_bar"; + const std::string foo_cluster_name = "foo"; + const std::string default_cluster_name = "default"; + + std::shared_ptr foo_bar_baz_cluster = + std::make_shared>(); + + std::shared_ptr foo_bar_cluster = + std::make_shared>(); + EXPECT_CALL(cluster_manager_, get(absl::string_view{foo_bar_cluster_name})) + .WillOnce(Return(foo_bar_cluster.get())); + + std::shared_ptr foo_cluster = + std::make_shared>(); + + std::shared_ptr default_cluster = + std::make_shared>(); + EXPECT_CALL(cluster_manager_, get(absl::string_view{default_cluster_name})) + .Times(2) + .WillRepeatedly(Return(default_cluster.get())); + + std::shared_ptr foo_bar_baz_route = + std::make_shared>(); + + std::shared_ptr foo_bar_route = + std::make_shared>(); + EXPECT_CALL(foo_bar_route->route_entry_, clusterName()).WillOnce(ReturnRef(foo_bar_cluster_name)); + + std::shared_ptr foo_route = std::make_shared>(); + + std::shared_ptr default_route = + std::make_shared>(); + EXPECT_CALL(default_route->route_entry_, clusterName()) + .Times(2) + .WillRepeatedly(ReturnRef(default_cluster_name)); + + using ::testing::InSequence; + { + InSequence seq; + EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)) + .WillOnce(Return(default_route)); + + // This filter iterates through all possible route matches and choose the last matched route + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { + EXPECT_EQ(default_route, decoder_filters_[0]->callbacks_->route()); + EXPECT_EQ(default_route->routeEntry(), + decoder_filters_[0]->callbacks_->streamInfo().routeEntry()); + EXPECT_EQ(default_cluster->info(), decoder_filters_[0]->callbacks_->clusterInfo()); + + // Not clearing cached route returns cached route and doesn't invoke cb. + Router::RouteConstSharedPtr route = decoder_filters_[0]->callbacks_->route( + [](Router::RouteConstSharedPtr, Router::RouteEvalStatus) -> Router::RouteMatchStatus { + ADD_FAILURE() << "When route cache is not cleared CB should not be invoked"; + return Router::RouteMatchStatus::Accept; + }); + EXPECT_EQ(default_route, route); + + int ctr = 0; + const Router::RouteCallback& cb = + [&](Router::RouteConstSharedPtr route, + Router::RouteEvalStatus route_eval_status) -> Router::RouteMatchStatus { + EXPECT_LE(ctr, 3); + if (ctr == 0) { + ++ctr; + EXPECT_EQ(foo_bar_baz_route, route); + EXPECT_EQ(route_eval_status, Router::RouteEvalStatus::HasMoreRoutes); + return Router::RouteMatchStatus::Continue; + } + + if (ctr == 1) { + ++ctr; + EXPECT_EQ(foo_bar_route, route); + EXPECT_EQ(route_eval_status, Router::RouteEvalStatus::HasMoreRoutes); + return Router::RouteMatchStatus::Continue; + } + + if (ctr == 2) { + ++ctr; + EXPECT_EQ(foo_route, route); + EXPECT_EQ(route_eval_status, Router::RouteEvalStatus::HasMoreRoutes); + return Router::RouteMatchStatus::Continue; + } + + if (ctr == 3) { + ++ctr; + EXPECT_EQ(default_route, route); + EXPECT_EQ(route_eval_status, Router::RouteEvalStatus::NoMoreRoutes); + return Router::RouteMatchStatus::Accept; + } + return Router::RouteMatchStatus::Accept; + }; + + decoder_filters_[0]->callbacks_->clearRouteCache(); + route = decoder_filters_[0]->callbacks_->route(cb); + + EXPECT_EQ(default_route, route); + EXPECT_EQ(default_route, decoder_filters_[0]->callbacks_->route()); + EXPECT_EQ(default_route->routeEntry(), + decoder_filters_[0]->callbacks_->streamInfo().routeEntry()); + EXPECT_EQ(default_cluster->info(), decoder_filters_[0]->callbacks_->clusterInfo()); + + return FilterHeadersStatus::Continue; + })); + + // This route config expected to be invoked for all matching routes + EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)) + .WillOnce(Invoke([&](const Router::RouteCallback& cb, const Http::RequestHeaderMap&, + const Envoy::StreamInfo::StreamInfo&, + uint64_t) -> Router::RouteConstSharedPtr { + EXPECT_EQ(cb(foo_bar_baz_route, Router::RouteEvalStatus::HasMoreRoutes), + Router::RouteMatchStatus::Continue); + EXPECT_EQ(cb(foo_bar_route, Router::RouteEvalStatus::HasMoreRoutes), + Router::RouteMatchStatus::Continue); + EXPECT_EQ(cb(foo_route, Router::RouteEvalStatus::HasMoreRoutes), + Router::RouteMatchStatus::Continue); + EXPECT_EQ(cb(default_route, Router::RouteEvalStatus::NoMoreRoutes), + Router::RouteMatchStatus::Accept); + return default_route; + })); + + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + + // This filter chooses second route + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, true)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { + EXPECT_EQ(default_route, decoder_filters_[1]->callbacks_->route()); + EXPECT_EQ(default_route->routeEntry(), + decoder_filters_[1]->callbacks_->streamInfo().routeEntry()); + EXPECT_EQ(default_cluster->info(), decoder_filters_[1]->callbacks_->clusterInfo()); + + int ctr = 0; + const Router::RouteCallback& cb = + [&](Router::RouteConstSharedPtr route, + Router::RouteEvalStatus route_eval_status) -> Router::RouteMatchStatus { + EXPECT_LE(ctr, 1); + if (ctr == 0) { + ++ctr; + EXPECT_EQ(foo_bar_baz_route, route); + EXPECT_EQ(route_eval_status, Router::RouteEvalStatus::HasMoreRoutes); + return Router::RouteMatchStatus::Continue; + } + + if (ctr == 1) { + ++ctr; + EXPECT_EQ(foo_bar_route, route); + EXPECT_EQ(route_eval_status, Router::RouteEvalStatus::HasMoreRoutes); + return Router::RouteMatchStatus::Accept; + } + return Router::RouteMatchStatus::Accept; + }; + + decoder_filters_[0]->callbacks_->clearRouteCache(); + decoder_filters_[1]->callbacks_->route(cb); + + EXPECT_EQ(foo_bar_route, decoder_filters_[1]->callbacks_->route()); + EXPECT_EQ(foo_bar_route->routeEntry(), + decoder_filters_[1]->callbacks_->streamInfo().routeEntry()); + EXPECT_EQ(foo_bar_cluster->info(), decoder_filters_[1]->callbacks_->clusterInfo()); + + return FilterHeadersStatus::Continue; + })); + + // This route config expected to be invoked for first two matching routes + EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)) + .WillOnce(Invoke([&](const Router::RouteCallback& cb, const Http::RequestHeaderMap&, + const Envoy::StreamInfo::StreamInfo&, + uint64_t) -> Router::RouteConstSharedPtr { + EXPECT_EQ(cb(foo_bar_baz_route, Router::RouteEvalStatus::HasMoreRoutes), + Router::RouteMatchStatus::Continue); + EXPECT_EQ(cb(foo_bar_route, Router::RouteEvalStatus::HasMoreRoutes), + Router::RouteMatchStatus::Accept); + return foo_bar_route; + })); + + EXPECT_CALL(*decoder_filters_[1], decodeComplete()); + } + + // Kick off the incoming data. + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + // Clean up. + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); +} + +// Filters observe host header w/o port's part when port's removal is configured +TEST_F(HttpConnectionManagerImplTest, FilterShouldUseNormalizedHost) { + setup(false, ""); + // Enable port removal + strip_matching_port_ = true; + const std::string original_host = "host:443"; + const std::string normalized_host = "host"; + + auto* filter = new MockStreamFilter(); + + EXPECT_CALL(filter_factory_, createFilterChain(_)) + .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{filter}); + })); + + EXPECT_CALL(*filter, decodeHeaders(_, true)) + .WillRepeatedly(Invoke([&](RequestHeaderMap& header_map, bool) -> FilterHeadersStatus { + EXPECT_EQ(normalized_host, header_map.getHostValue()); + return FilterHeadersStatus::StopIteration; + })); + + EXPECT_CALL(*filter, setDecoderFilterCallbacks(_)); + + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", original_host}, {":path", "/"}, {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); + })); + + // Kick off the incoming data. + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + // Clean up. + EXPECT_CALL(*filter, onDestroy()); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); +} + +// The router observes host header w/o port, not the original host, when +// remove_port is configured +TEST_F(HttpConnectionManagerImplTest, RouteShouldUseNormalizedHost) { + setup(false, ""); + // Enable port removal + strip_matching_port_ = true; + const std::string original_host = "host:443"; + const std::string normalized_host = "host"; + + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", original_host}, {":path", "/"}, {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); + })); + + const std::string fake_cluster_name = "fake_cluster"; + + std::shared_ptr fake_cluster = + std::make_shared>(); + std::shared_ptr route = std::make_shared>(); + EXPECT_CALL(route->route_entry_, clusterName()).WillRepeatedly(ReturnRef(fake_cluster_name)); + + EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)) + .WillOnce(Invoke([&](const Router::RouteCallback&, const Http::RequestHeaderMap& header_map, + const StreamInfo::StreamInfo&, uint64_t) { + EXPECT_EQ(normalized_host, header_map.getHostValue()); + return route; + })); EXPECT_CALL(filter_factory_, createFilterChain(_)) .WillOnce(Invoke([&](FilterChainFactoryCallbacks&) -> void {})); // Kick off the incoming data. Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + + // Clean up. + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); +} + +TEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateDisabledDateNotSet) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.preserve_upstream_date", "false"}}); + setup(false, ""); + setUpEncoderAndDecoder(false, false); + sendRequestHeadersAndData(); + const auto* modified_headers = sendResponseHeaders( + ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}, {"server", "foo"}}}); + ASSERT_TRUE(modified_headers); + EXPECT_TRUE(modified_headers->Date()); +} + +TEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateEnabledDateNotSet) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.preserve_upstream_date", "true"}}); + setup(false, ""); + setUpEncoderAndDecoder(false, false); + sendRequestHeadersAndData(); + const auto* modified_headers = sendResponseHeaders( + ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}, {"server", "foo"}}}); + ASSERT_TRUE(modified_headers); + EXPECT_TRUE(modified_headers->Date()); +} + +TEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateDisabledDateSet) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.preserve_upstream_date", "false"}}); + setup(false, ""); + setUpEncoderAndDecoder(false, false); + sendRequestHeadersAndData(); + const std::string expected_date{"Tue, 15 Nov 1994 08:12:31 GMT"}; + const auto* modified_headers = + sendResponseHeaders(ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{ + {":status", "200"}, {"server", "foo"}, {"date", expected_date.c_str()}}}); + ASSERT_TRUE(modified_headers); + ASSERT_TRUE(modified_headers->Date()); + EXPECT_NE(expected_date, modified_headers->getDateValue()); +} + +TEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateEnabledDateSet) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.preserve_upstream_date", "true"}}); + setup(false, ""); + setUpEncoderAndDecoder(false, false); + sendRequestHeadersAndData(); + const std::string expected_date{"Tue, 15 Nov 1994 08:12:31 GMT"}; + const auto* modified_headers = + sendResponseHeaders(ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{ + {":status", "200"}, {"server", "foo"}, {"date", expected_date.c_str()}}}); + ASSERT_TRUE(modified_headers); + ASSERT_TRUE(modified_headers->Date()); + EXPECT_EQ(expected_date, modified_headers->getDateValue()); +} + +TEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateDisabledDateFromCache) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.preserve_upstream_date", "false"}}); + setup(false, ""); + setUpEncoderAndDecoder(false, false); + sendRequestHeadersAndData(); + encoder_filters_[0]->callbacks_->streamInfo().setResponseFlag( + StreamInfo::ResponseFlag::ResponseFromCacheFilter); + const std::string expected_date{"Tue, 15 Nov 1994 08:12:31 GMT"}; + const auto* modified_headers = + sendResponseHeaders(ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{ + {":status", "200"}, {"server", "foo"}, {"date", expected_date.c_str()}}}); + ASSERT_TRUE(modified_headers); + ASSERT_TRUE(modified_headers->Date()); + EXPECT_EQ(expected_date, modified_headers->getDateValue()); } TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlow) { @@ -947,23 +1404,25 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlow) { use_remote_address_ = false; EXPECT_CALL(random_, uuid()).Times(0); - RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":method", "GET"}, - {":authority", "host"}, - {":path", "/"}, - {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":method", "GET"}, + {":authority", "host"}, + {":path", "/"}, + {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; + decoder->decodeHeaders(std::move(headers), true); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); - filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); - data.drain(4); - })); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); + filter->callbacks_->encodeHeaders(std::move(response_headers), true); + filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); + data.drain(4); + return Http::okStatus(); + })); // Should be no 'x-envoy-decorator-operation' response header. EXPECT_CALL(encoder, encodeHeaders(_, true)) @@ -1015,28 +1474,30 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowIngressDecorat use_remote_address_ = false; EXPECT_CALL(random_, uuid()).Times(0); - RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":method", "GET"}, - {":authority", "host"}, - {":path", "/"}, - {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":method", "GET"}, + {":authority", "host"}, + {":path", "/"}, + {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; + decoder->decodeHeaders(std::move(headers), true); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); - filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); - data.drain(4); - })); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); + filter->callbacks_->encodeHeaders(std::move(response_headers), true); + filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); + data.drain(4); + return Http::okStatus(); + })); // Verify decorator operation response header has been defined. EXPECT_CALL(encoder, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("testOp", headers.EnvoyDecoratorOperation()->value().getStringView()); + EXPECT_EQ("testOp", headers.getEnvoyDecoratorOperationValue()); })); Buffer::OwnedImpl fake_input("1234"); @@ -1081,23 +1542,25 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowIngressDecorat use_remote_address_ = false; EXPECT_CALL(random_, uuid()).Times(0); - RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":method", "GET"}, - {":authority", "host"}, - {":path", "/"}, - {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":method", "GET"}, + {":authority", "host"}, + {":path", "/"}, + {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; + decoder->decodeHeaders(std::move(headers), true); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); - filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); - data.drain(4); - })); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); + filter->callbacks_->encodeHeaders(std::move(response_headers), true); + filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); + data.drain(4); + return Http::okStatus(); + })); // Verify decorator operation response header has NOT been defined (i.e. not propagated). EXPECT_CALL(encoder, encodeHeaders(_, true)) @@ -1145,25 +1608,27 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowIngressDecorat use_remote_address_ = false; EXPECT_CALL(random_, uuid()).Times(0); - RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":method", "GET"}, - {":authority", "host"}, - {":path", "/"}, - {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}, - {"x-envoy-decorator-operation", "testOp"}}}; - decoder->decodeHeaders(std::move(headers), true); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":method", "GET"}, + {":authority", "host"}, + {":path", "/"}, + {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}, + {"x-envoy-decorator-operation", "testOp"}}}; + decoder->decodeHeaders(std::move(headers), true); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); - filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); + filter->callbacks_->encodeHeaders(std::move(response_headers), true); + filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); - data.drain(4); - })); + data.drain(4); + return Http::okStatus(); + })); // Should be no 'x-envoy-decorator-operation' response header, as decorator // was overridden by request header. @@ -1226,30 +1691,32 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowEgressDecorato use_remote_address_ = false; EXPECT_CALL(random_, uuid()).Times(0); - RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":method", "GET"}, - {":authority", "host"}, - {":path", "/"}, - {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":method", "GET"}, + {":authority", "host"}, + {":path", "/"}, + {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; + decoder->decodeHeaders(std::move(headers), true); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); - filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); + filter->callbacks_->encodeHeaders(std::move(response_headers), true); + filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); - data.drain(4); - })); + data.drain(4); + return Http::okStatus(); + })); EXPECT_CALL(*filter, decodeHeaders(_, true)) .WillOnce(Invoke([](RequestHeaderMap& headers, bool) -> FilterHeadersStatus { EXPECT_NE(nullptr, headers.EnvoyDecoratorOperation()); // Verify that decorator operation has been set as request header. - EXPECT_EQ("testOp", headers.EnvoyDecoratorOperation()->value().getStringView()); + EXPECT_EQ("testOp", headers.getEnvoyDecoratorOperationValue()); return FilterHeadersStatus::StopIteration; })); @@ -1308,24 +1775,25 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowEgressDecorato use_remote_address_ = false; EXPECT_CALL(random_, uuid()).Times(0); - RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":method", "GET"}, - {":authority", "host"}, - {":path", "/"}, - {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":method", "GET"}, + {":authority", "host"}, + {":path", "/"}, + {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; + decoder->decodeHeaders(std::move(headers), true); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); - filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->encodeHeaders(std::move(response_headers), true); + filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); - data.drain(4); - })); + data.drain(4); + return Http::okStatus(); + })); // Verify that decorator operation has NOT been set as request header (propagate is false) EXPECT_CALL(*filter, decodeHeaders(_, true)) @@ -1336,6 +1804,8 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowEgressDecorato Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowEgressDecoratorOverrideOp) { @@ -1384,29 +1854,31 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowEgressDecorato callbacks.addStreamDecoderFilter(filter); })); - // Treat request as internal, otherwise x-request-id header will be overwritten. - use_remote_address_ = false; - EXPECT_CALL(random_, uuid()).Times(0); - - RequestDecoder* decoder = nullptr; - NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); - - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":method", "GET"}, - {":authority", "host"}, - {":path", "/"}, - {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); - - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{ - {":status", "200"}, {"x-envoy-decorator-operation", "testOp"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); - filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); + // Treat request as internal, otherwise x-request-id header will be overwritten. + use_remote_address_ = false; + EXPECT_CALL(random_, uuid()).Times(0); - data.drain(4); - })); + NiceMock encoder; + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); + + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":method", "GET"}, + {":authority", "host"}, + {":path", "/"}, + {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; + decoder->decodeHeaders(std::move(headers), true); + + filter->callbacks_->streamInfo().setResponseCodeDetails(""); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{ + {":status", "200"}, {"x-envoy-decorator-operation", "testOp"}}}; + filter->callbacks_->encodeHeaders(std::move(response_headers), true); + filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); + + data.drain(4); + return Http::okStatus(); + })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); @@ -1444,24 +1916,26 @@ TEST_F(HttpConnectionManagerImplTest, use_remote_address_ = false; EXPECT_CALL(random_, uuid()).Times(0); - RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); - - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":method", "GET"}, - {":authority", "host"}, - {":path", "/"}, - {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); - - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{ - {":status", "200"}, {"x-envoy-decorator-operation", "testOp"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); + + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":method", "GET"}, + {":authority", "host"}, + {":path", "/"}, + {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; + decoder->decodeHeaders(std::move(headers), true); + + filter->callbacks_->streamInfo().setResponseCodeDetails(""); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{ + {":status", "200"}, {"x-envoy-decorator-operation", "testOp"}}}; + filter->callbacks_->encodeHeaders(std::move(response_headers), true); - data.drain(4); - })); + data.drain(4); + return Http::okStatus(); + })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); @@ -1500,24 +1974,26 @@ TEST_F(HttpConnectionManagerImplTest, TestAccessLog) { local_address); })); - RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); - - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":method", "GET"}, - {":authority", "host"}, - {":path", "/"}, - {"x-forwarded-for", xff_address}, - {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); - - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); + + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":method", "GET"}, + {":authority", "host"}, + {":path", "/"}, + {"x-forwarded-for", xff_address}, + {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; + decoder->decodeHeaders(std::move(headers), true); + + filter->callbacks_->streamInfo().setResponseCodeDetails(""); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->encodeHeaders(std::move(response_headers), true); - data.drain(4); - })); + data.drain(4); + return Http::okStatus(); + })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); @@ -1542,22 +2018,26 @@ TEST_F(HttpConnectionManagerImplTest, TestDownstreamDisconnectAccessLog) { EXPECT_TRUE(stream_info.hasAnyResponseFlag()); EXPECT_TRUE( stream_info.hasResponseFlag(StreamInfo::ResponseFlag::DownstreamConnectionTermination)); + EXPECT_EQ("downstream_remote_disconnect", stream_info.responseCodeDetails().value()); })); - RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":method", "GET"}, {":authority", "host"}, {":path", "/"}}}; - decoder->decodeHeaders(std::move(headers), true); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":method", "GET"}, {":authority", "host"}, {":path", "/"}}}; + decoder->decodeHeaders(std::move(headers), true); - data.drain(4); - })); + data.drain(4); + return Http::okStatus(); + })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, TestAccessLogWithTrailers) { @@ -1583,26 +2063,28 @@ TEST_F(HttpConnectionManagerImplTest, TestAccessLogWithTrailers) { EXPECT_NE(nullptr, stream_info.routeEntry()); })); - RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":method", "GET"}, - {":authority", "host"}, - {":path", "/"}, - {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":method", "GET"}, + {":authority", "host"}, + {":path", "/"}, + {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; + decoder->decodeHeaders(std::move(headers), true); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), false); + filter->callbacks_->streamInfo().setResponseCodeDetails(""); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->encodeHeaders(std::move(response_headers), false); - ResponseTrailerMapPtr response_trailers{new TestResponseTrailerMapImpl{{"x-trailer", "1"}}}; - filter->callbacks_->encodeTrailers(std::move(response_trailers)); + ResponseTrailerMapPtr response_trailers{new TestResponseTrailerMapImpl{{"x-trailer", "1"}}}; + filter->callbacks_->encodeTrailers(std::move(response_trailers)); - data.drain(4); - })); + data.drain(4); + return Http::okStatus(); + })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); @@ -1632,16 +2114,18 @@ TEST_F(HttpConnectionManagerImplTest, TestAccessLogWithInvalidRequest) { EXPECT_EQ(nullptr, stream_info.routeEntry()); })); - RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); - - // These request headers are missing the necessary ":host" - RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{{":method", "GET"}, {":path", "/"}}}; - decoder->decodeHeaders(std::move(headers), true); - data.drain(0); - })); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); + + // These request headers are missing the necessary ":host" + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":method", "GET"}, {":path", "/"}}}; + decoder->decodeHeaders(std::move(headers), true); + data.drain(0); + return Http::okStatus(); + })); Buffer::OwnedImpl fake_input; conn_manager_->onData(fake_input, false); @@ -1671,26 +2155,28 @@ TEST_F(HttpConnectionManagerImplTest, TestAccessLogSsl) { EXPECT_NE(nullptr, stream_info.routeEntry()); })); - RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":method", "GET"}, - {":authority", "host"}, - {":path", "/"}, - {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":method", "GET"}, + {":authority", "host"}, + {":path", "/"}, + {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; + decoder->decodeHeaders(std::move(headers), true); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), false); + filter->callbacks_->streamInfo().setResponseCodeDetails(""); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->encodeHeaders(std::move(response_headers), false); - ResponseTrailerMapPtr response_trailers{new TestResponseTrailerMapImpl{{"x-trailer", "1"}}}; - filter->callbacks_->encodeTrailers(std::move(response_trailers)); + ResponseTrailerMapPtr response_trailers{new TestResponseTrailerMapImpl{{"x-trailer", "1"}}}; + filter->callbacks_->encodeTrailers(std::move(response_trailers)); - data.drain(4); - })); + data.drain(4); + return Http::okStatus(); + })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); @@ -1714,23 +2200,25 @@ TEST_F(HttpConnectionManagerImplTest, DoNotStartSpanIfTracingIsNotEnabled) { callbacks.addStreamDecoderFilter(filter); })); - RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":method", "GET"}, - {":authority", "host"}, - {":path", "/"}, - {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":method", "GET"}, + {":authority", "host"}, + {":path", "/"}, + {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; + decoder->decodeHeaders(std::move(headers), true); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); + filter->callbacks_->streamInfo().setResponseCodeDetails(""); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->encodeHeaders(std::move(response_headers), true); - data.drain(4); - })); + data.drain(4); + return Http::okStatus(); + })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); @@ -1739,19 +2227,19 @@ TEST_F(HttpConnectionManagerImplTest, DoNotStartSpanIfTracingIsNotEnabled) { TEST_F(HttpConnectionManagerImplTest, NoPath) { setup(false, ""); - RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":method", "NOT_CONNECT"}}}; decoder->decodeHeaders(std::move(headers), true); data.drain(4); + return Http::okStatus(); })); EXPECT_CALL(encoder, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("404", headers.Status()->value().getStringView()); + EXPECT_EQ("404", headers.getStatusValue()); })); Buffer::OwnedImpl fake_input("1234"); @@ -1765,20 +2253,23 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutNotConfigured) { setup(false, ""); EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, createTimer_(_)).Times(0); - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), false); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), false); - data.drain(4); - })); + data.drain(4); + return Http::okStatus(); + })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); EXPECT_EQ(0U, stats_.named_.downstream_rq_idle_timeout_.value()); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } // When the global timeout is configured, the timer is enabled before we receive @@ -1787,7 +2278,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutGlobal) { stream_idle_timeout_ = std::chrono::milliseconds(10); setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status { Event::MockTimer* idle_timer = setUpTimer(); EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(10), _)); conn_manager_->newStream(response_encoder_); @@ -1797,12 +2288,13 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutGlobal) { EXPECT_CALL(*idle_timer, enableTimer(_, _)).Times(2); EXPECT_CALL(*idle_timer, disableTimer()); idle_timer->invokeCallback(); + return Http::okStatus(); })); // 408 direct response after timeout. EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("408", headers.Status()->value().getStringView()); + EXPECT_EQ("408", headers.getStatusValue()); })); std::string response_body; EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body)); @@ -1825,7 +2317,7 @@ TEST_F(HttpConnectionManagerImplTest, AccessEncoderRouteBeforeHeadersArriveOnIdl callbacks.addStreamEncoderFilter(filter); })); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { Event::MockTimer* idle_timer = setUpTimer(); EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(10), _)); conn_manager_->newStream(response_encoder_); @@ -1836,10 +2328,11 @@ TEST_F(HttpConnectionManagerImplTest, AccessEncoderRouteBeforeHeadersArriveOnIdl EXPECT_CALL(*idle_timer, disableTimer()); // Simulate and idle timeout so that the filter chain gets created. idle_timer->invokeCallback(); + return Http::okStatus(); })); // This should not be called as we don't have request headers. - EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _)).Times(0); + EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)).Times(0); EXPECT_CALL(*filter, encodeHeaders(_, _)) .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { @@ -1866,7 +2359,7 @@ TEST_F(HttpConnectionManagerImplTest, TestStreamIdleAccessLog) { setup(false, ""); NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status { Event::MockTimer* idle_timer = setUpTimer(); EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(10), _)); conn_manager_->newStream(response_encoder_); @@ -1876,6 +2369,7 @@ TEST_F(HttpConnectionManagerImplTest, TestStreamIdleAccessLog) { EXPECT_CALL(*idle_timer, enableTimer(_, _)).Times(2); EXPECT_CALL(*idle_timer, disableTimer()); idle_timer->invokeCallback(); + return Http::okStatus(); })); std::shared_ptr filter(new NiceMock()); @@ -1884,7 +2378,7 @@ TEST_F(HttpConnectionManagerImplTest, TestStreamIdleAccessLog) { // 408 direct response after timeout. EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("408", headers.Status()->value().getStringView()); + EXPECT_EQ("408", headers.getStatusValue()); })); std::string response_body; @@ -1918,23 +2412,27 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutRouteOverride) { ON_CALL(route_config_provider_.route_config_->route_->route_entry_, idleTimeout()) .WillByDefault(Return(std::chrono::milliseconds(30))); - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - Event::MockTimer* idle_timer = setUpTimer(); - EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(10), _)); - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(30), _)); - decoder->decodeHeaders(std::move(headers), false); - - data.drain(4); - })); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + Event::MockTimer* idle_timer = setUpTimer(); + EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(10), _)); + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(30), _)); + EXPECT_CALL(*idle_timer, disableTimer()); + decoder->decodeHeaders(std::move(headers), false); + + data.drain(4); + return Http::okStatus(); + })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); EXPECT_EQ(0U, stats_.named_.downstream_rq_idle_timeout_.value()); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } // Per-route zero timeout overrides the global stream idle timeout. @@ -1944,23 +2442,27 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutRouteZeroOverride) { ON_CALL(route_config_provider_.route_config_->route_->route_entry_, idleTimeout()) .WillByDefault(Return(std::chrono::milliseconds(0))); - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - Event::MockTimer* idle_timer = setUpTimer(); - EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(10), _)); - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - EXPECT_CALL(*idle_timer, disableTimer()); - decoder->decodeHeaders(std::move(headers), false); - - data.drain(4); - })); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + Event::MockTimer* idle_timer = setUpTimer(); + EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(10), _)); + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + EXPECT_CALL(*idle_timer, disableTimer()); + decoder->decodeHeaders(std::move(headers), false); + + data.drain(4); + return Http::okStatus(); + })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); EXPECT_EQ(0U, stats_.named_.downstream_rq_idle_timeout_.value()); + + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } // Validate the per-stream idle timeout after having sent downstream headers. @@ -1970,7 +2472,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterDownstreamHeaders .WillByDefault(Return(std::chrono::milliseconds(10))); // Codec sends downstream request headers. - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); Event::MockTimer* idle_timer = setUpTimer(); @@ -1986,12 +2488,13 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterDownstreamHeaders idle_timer->invokeCallback(); data.drain(4); + return Http::okStatus(); })); // 408 direct response after timeout. EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("408", headers.Status()->value().getStringView()); + EXPECT_EQ("408", headers.getStatusValue()); })); std::string response_body; EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body)); @@ -2011,7 +2514,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutNormalTermination) { // Codec sends downstream request headers. Event::MockTimer* idle_timer = setUpTimer(); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ @@ -2020,6 +2523,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutNormalTermination) { decoder->decodeHeaders(std::move(headers), false); data.drain(4); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); @@ -2039,7 +2543,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterDownstreamHeaders .WillByDefault(Return(std::chrono::milliseconds(10))); // Codec sends downstream request headers. - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); Event::MockTimer* idle_timer = setUpTimer(); @@ -2058,12 +2562,13 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterDownstreamHeaders idle_timer->invokeCallback(); data.drain(4); + return Http::okStatus(); })); // 408 direct response after timeout. EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("408", headers.Status()->value().getStringView()); + EXPECT_EQ("408", headers.getStatusValue()); })); std::string response_body; EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body)); @@ -2092,7 +2597,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterUpstreamHeaders) // Codec sends downstream request headers, upstream response headers are // encoded. - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); Event::MockTimer* idle_timer = setUpTimer(); @@ -2109,12 +2614,13 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterUpstreamHeaders) idle_timer->invokeCallback(); data.drain(4); + return Http::okStatus(); })); // 200 upstream response. EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("200", headers.Status()->value().getStringView()); + EXPECT_EQ("200", headers.getStatusValue()); })); Buffer::OwnedImpl fake_input("1234"); @@ -2143,7 +2649,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterBidiData) { // encoded, data events happen in various directions. Event::MockTimer* idle_timer = setUpTimer(); RequestDecoder* decoder; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -2157,6 +2663,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterBidiData) { ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; EXPECT_CALL(*idle_timer, enableTimer(_, _)); + filter->callbacks_->streamInfo().setResponseCodeDetails(""); filter->callbacks_->encodeHeaders(std::move(response_headers), false); EXPECT_CALL(*idle_timer, enableTimer(_, _)); @@ -2174,6 +2681,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterBidiData) { idle_timer->invokeCallback(); data.drain(4); + return Http::okStatus(); })); // 100 continue. @@ -2182,7 +2690,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterBidiData) { // 200 upstream response. EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("200", headers.Status()->value().getStringView()); + EXPECT_EQ("200", headers.getStatusValue()); })); std::string response_body; @@ -2198,41 +2706,48 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterBidiData) { TEST_F(HttpConnectionManagerImplTest, RequestTimeoutDisabledByDefault) { setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, createTimer_).Times(0); conn_manager_->newStream(response_encoder_); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, RequestTimeoutDisabledIfSetToZero) { request_timeout_ = std::chrono::milliseconds(0); setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, createTimer_).Times(0); conn_manager_->newStream(response_encoder_); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, RequestTimeoutValidlyConfigured) { request_timeout_ = std::chrono::milliseconds(10); setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { Event::MockTimer* request_timer = setUpTimer(); EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)); + EXPECT_CALL(*request_timer, disableTimer()); conn_manager_->newStream(response_encoder_); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, RequestTimeoutCallbackDisarmsAndReturns408) { @@ -2240,20 +2755,21 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutCallbackDisarmsAndReturns408 setup(false, ""); std::string response_body; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { Event::MockTimer* request_timer = setUpTimer(); EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1); EXPECT_CALL(*request_timer, disableTimer()).Times(AtLeast(1)); EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("408", headers.Status()->value().getStringView()); + EXPECT_EQ("408", headers.getStatusValue()); })); EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body)); conn_manager_->newStream(response_encoder_); EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, setTrackedObject(_)).Times(2); request_timer->invokeCallback(); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); @@ -2267,10 +2783,10 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsNotDisarmedOnIncompleteReq request_timeout_ = std::chrono::milliseconds(10); setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { Event::MockTimer* request_timer = setUpTimer(); EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1); - EXPECT_CALL(*request_timer, disableTimer()).Times(0); + EXPECT_CALL(*request_timer, disableTimer()).Times(1); RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ @@ -2278,19 +2794,22 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsNotDisarmedOnIncompleteReq // the second parameter 'false' leaves the stream open decoder->decodeHeaders(std::move(headers), false); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); // kick off request EXPECT_EQ(0U, stats_.named_.downstream_rq_timeout_.value()); + + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnCompleteRequestWithHeader) { request_timeout_ = std::chrono::milliseconds(10); setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { Event::MockTimer* request_timer = setUpTimer(); EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1); @@ -2298,21 +2817,23 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnCompleteRequestW RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - EXPECT_CALL(*request_timer, disableTimer()).Times(1); + EXPECT_CALL(*request_timer, disableTimer()).Times(2); decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); // kick off request EXPECT_EQ(0U, stats_.named_.downstream_rq_timeout_.value()); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnCompleteRequestWithData) { request_timeout_ = std::chrono::milliseconds(10); setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { Event::MockTimer* request_timer = setUpTimer(); EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1); @@ -2321,21 +2842,23 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnCompleteRequestW new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "POST"}}}; decoder->decodeHeaders(std::move(headers), false); - EXPECT_CALL(*request_timer, disableTimer()).Times(1); + EXPECT_CALL(*request_timer, disableTimer()).Times(2); decoder->decodeData(data, true); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); EXPECT_EQ(0U, stats_.named_.downstream_rq_timeout_.value()); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnCompleteRequestWithTrailers) { request_timeout_ = std::chrono::milliseconds(10); setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { Event::MockTimer* request_timer = setUpTimer(); EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1); RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); @@ -2345,15 +2868,17 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnCompleteRequestW decoder->decodeHeaders(std::move(headers), false); decoder->decodeData(data, false); - EXPECT_CALL(*request_timer, disableTimer()).Times(1); + EXPECT_CALL(*request_timer, disableTimer()).Times(2); RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"foo", "bar"}}}; decoder->decodeTrailers(std::move(trailers)); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); EXPECT_EQ(0U, stats_.named_.downstream_rq_timeout_.value()); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnEncodeHeaders) { @@ -2366,7 +2891,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnEncodeHeaders) { })); EXPECT_CALL(response_encoder_, encodeHeaders(_, _)); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { Event::MockTimer* request_timer = setUpTimer(); EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1); @@ -2378,7 +2903,9 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnEncodeHeaders) { EXPECT_CALL(*request_timer, disableTimer()).Times(1); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); filter->callbacks_->encodeHeaders(std::move(response_headers), false); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); @@ -2392,12 +2919,13 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnConnectionTermin setup(false, ""); Event::MockTimer* request_timer = setUpTimer(); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), false); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); @@ -2415,28 +2943,33 @@ TEST_F(HttpConnectionManagerImplTest, MaxStreamDurationDisabledIfSetToZero) { max_stream_duration_ = std::chrono::milliseconds(0); setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, createTimer_).Times(0); conn_manager_->newStream(response_encoder_); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); // kick off request + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, MaxStreamDurationValidlyConfigured) { max_stream_duration_ = std::chrono::milliseconds(10); setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { Event::MockTimer* duration_timer = setUpTimer(); EXPECT_CALL(*duration_timer, enableTimer(max_stream_duration_.value(), _)); + EXPECT_CALL(*duration_timer, disableTimer()); conn_manager_->newStream(response_encoder_); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); // kick off request + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, MaxStreamDurationCallbackResetStream) { @@ -2444,29 +2977,134 @@ TEST_F(HttpConnectionManagerImplTest, MaxStreamDurationCallbackResetStream) { setup(false, ""); Event::MockTimer* duration_timer = setUpTimer(); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { EXPECT_CALL(*duration_timer, enableTimer(max_stream_duration_.value(), _)).Times(1); conn_manager_->newStream(response_encoder_); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); // kick off request EXPECT_CALL(*duration_timer, disableTimer()); + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + EXPECT_CALL(response_encoder_, encodeData(_, true)); duration_timer->invokeCallback(); EXPECT_EQ(1U, stats_.named_.downstream_rq_max_duration_reached_.value()); EXPECT_EQ(1U, stats_.named_.downstream_rq_rx_reset_.value()); } +TEST_F(HttpConnectionManagerImplTest, Http10Rejected) { + setup(false, ""); + RequestDecoder* decoder = nullptr; + NiceMock encoder; + EXPECT_CALL(*codec_, protocol()).Times(AnyNumber()).WillRepeatedly(Return(Protocol::Http10)); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder = &conn_manager_->newStream(encoder); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":method", "GET"}, {":path", "/"}}}; + decoder->decodeHeaders(std::move(headers), true); + data.drain(4); + return Http::okStatus(); + })); + + EXPECT_CALL(encoder, encodeHeaders(_, true)) + .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { + EXPECT_EQ("426", headers.getStatusValue()); + EXPECT_EQ("close", headers.getConnectionValue()); + })); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); +} + +TEST_F(HttpConnectionManagerImplTest, Http10ConnCloseLegacy) { + http1_settings_.accept_http_10_ = true; + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.fixed_connection_close", "false"}}); + setup(false, ""); + RequestDecoder* decoder = nullptr; + NiceMock encoder; + EXPECT_CALL(*codec_, protocol()).Times(AnyNumber()).WillRepeatedly(Return(Protocol::Http10)); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder = &conn_manager_->newStream(encoder); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host:80"}, {":method", "CONNECT"}}}; + decoder->decodeHeaders(std::move(headers), true); + data.drain(4); + return Http::okStatus(); + })); + + EXPECT_CALL(encoder, encodeHeaders(_, true)) + .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { + EXPECT_EQ("close", headers.getConnectionValue()); + })); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); +} + +TEST_F(HttpConnectionManagerImplTest, ProxyConnectLegacyClose) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.fixed_connection_close", "false"}}); + setup(false, ""); + RequestDecoder* decoder = nullptr; + NiceMock encoder; + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder = &conn_manager_->newStream(encoder); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", "host:80"}, {":method", "CONNECT"}, {"proxy-connection", "close"}}}; + decoder->decodeHeaders(std::move(headers), true); + data.drain(4); + return Http::okStatus(); + })); + + EXPECT_CALL(encoder, encodeHeaders(_, true)) + .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { + EXPECT_EQ("close", headers.getConnectionValue()); + })); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); +} + +TEST_F(HttpConnectionManagerImplTest, ConnectLegacyClose) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.fixed_connection_close", "false"}}); + setup(false, ""); + RequestDecoder* decoder = nullptr; + NiceMock encoder; + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder = &conn_manager_->newStream(encoder); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", "host"}, {":method", "CONNECT"}, {"connection", "close"}}}; + decoder->decodeHeaders(std::move(headers), true); + data.drain(4); + return Http::okStatus(); + })); + + EXPECT_CALL(encoder, encodeHeaders(_, true)) + .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { + EXPECT_EQ("close", headers.getConnectionValue()); + })); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); +} + TEST_F(HttpConnectionManagerImplTest, MaxStreamDurationCallbackNotCalledIfResetStreamValidly) { max_stream_duration_ = std::chrono::milliseconds(5000); setup(false, ""); Event::MockTimer* duration_timer = setUpTimer(); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { EXPECT_CALL(*duration_timer, enableTimer(max_stream_duration_.value(), _)).Times(1); conn_manager_->newStream(response_encoder_); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); @@ -2481,22 +3119,27 @@ TEST_F(HttpConnectionManagerImplTest, MaxStreamDurationCallbackNotCalledIfResetS TEST_F(HttpConnectionManagerImplTest, RejectWebSocketOnNonWebSocketRoute) { setup(false, ""); - RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{{":authority", "host"}, {":method", "GET"}, {":path", "/"}, {"connection", "Upgrade"}, {"upgrade", "websocket"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder->decodeHeaders(std::move(headers), false); + // Try sending trailers after the headers which will be rejected, just to + // test the HCM logic that further decoding will not be passed to the + // filters once the early response path is kicked off. + RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"bazzz", "bar"}}}; + decoder->decodeTrailers(std::move(trailers)); data.drain(4); + return Http::okStatus(); })); EXPECT_CALL(encoder, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("403", headers.Status()->value().getStringView()); + EXPECT_EQ("403", headers.getStatusValue()); })); Buffer::OwnedImpl fake_input("1234"); @@ -2526,7 +3169,7 @@ TEST_F(HttpConnectionManagerImplTest, FooUpgradeDrainClose) { EXPECT_CALL(encoder, encodeHeaders(_, false)) .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void { EXPECT_NE(nullptr, headers.Connection()); - EXPECT_EQ("upgrade", headers.Connection()->value().getStringView()); + EXPECT_EQ("upgrade", headers.getConnectionValue()); })); EXPECT_CALL(*filter, setDecoderFilterCallbacks(_)); @@ -2541,48 +3184,108 @@ TEST_F(HttpConnectionManagerImplTest, FooUpgradeDrainClose) { // When dispatch is called on the codec, we pretend to get a new stream and then fire a headers // only request into it. Then we respond into the filter. - RequestDecoder* decoder = nullptr; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); + + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{{":authority", "host"}, + {":method", "GET"}, + {":path", "/"}, + {"connection", "Upgrade"}, + {"upgrade", "foo"}}}; + decoder->decodeHeaders(std::move(headers), false); + + filter->decoder_callbacks_->streamInfo().setResponseCodeDetails(""); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{ + {":status", "101"}, {"Connection", "upgrade"}, {"upgrade", "foo"}}}; + filter->decoder_callbacks_->encodeHeaders(std::move(response_headers), false); + + data.drain(4); + return Http::okStatus(); + })); - RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{{":authority", "host"}, - {":method", "GET"}, - {":path", "/"}, - {"connection", "Upgrade"}, - {"upgrade", "foo"}}}; - decoder->decodeHeaders(std::move(headers), false); + // Kick off the incoming data. Use extra data which should cause a redispatch. + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); +} + +// Make sure CONNECT requests hit the upgrade filter path. +TEST_F(HttpConnectionManagerImplTest, ConnectAsUpgrade) { + setup(false, "envoy-custom-server", false); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{ - {":status", "101"}, {"Connection", "upgrade"}, {"upgrade", "foo"}}}; - filter->decoder_callbacks_->encodeHeaders(std::move(response_headers), false); + NiceMock encoder; - data.drain(4); - })); + EXPECT_CALL(filter_factory_, createUpgradeFilterChain("CONNECT", _, _)) + .WillRepeatedly(Return(true)); + + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":method", "CONNECT"}}}; + decoder->decodeHeaders(std::move(headers), false); + data.drain(4); + return Http::okStatus(); + })); // Kick off the incoming data. Use extra data which should cause a redispatch. Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } -// Make sure CONNECT requests hit the upgrade filter path. -TEST_F(HttpConnectionManagerImplTest, ConnectAsUpgrade) { +TEST_F(HttpConnectionManagerImplTest, ConnectWithEmptyPath) { setup(false, "envoy-custom-server", false); NiceMock encoder; - RequestDecoder* decoder = nullptr; EXPECT_CALL(filter_factory_, createUpgradeFilterChain("CONNECT", _, _)) .WillRepeatedly(Return(true)); - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":method", "CONNECT"}}}; - decoder->decodeHeaders(std::move(headers), false); - data.drain(4); - })); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", "host"}, {":path", ""}, {":method", "CONNECT"}}}; + decoder->decodeHeaders(std::move(headers), false); + data.drain(4); + return Http::okStatus(); + })); + + // Kick off the incoming data. Use extra data which should cause a redispatch. + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); +} + +TEST_F(HttpConnectionManagerImplTest, ConnectLegacy) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.stop_faking_paths", "false"}}); + + setup(false, "envoy-custom-server", false); + + NiceMock encoder; + RequestDecoder* decoder = nullptr; + + EXPECT_CALL(filter_factory_, createUpgradeFilterChain("CONNECT", _, _)) + .WillRepeatedly(Return(false)); + + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder = &conn_manager_->newStream(encoder); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":method", "CONNECT"}}}; + decoder->decodeHeaders(std::move(headers), false); + data.drain(4); + return Http::okStatus(); + })); + + EXPECT_CALL(encoder, encodeHeaders(_, _)) + .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { + EXPECT_EQ("403", headers.getStatusValue()); + })); - // Kick off the incoming data. Use extra data which should cause a redispatch. + // Kick off the incoming data. Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); } @@ -2592,13 +3295,13 @@ TEST_F(HttpConnectionManagerImplTest, DrainCloseRaceWithClose) { InSequence s; setup(false, ""); - RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); })); setupFilterChain(1, 0); @@ -2616,13 +3319,12 @@ TEST_F(HttpConnectionManagerImplTest, DrainCloseRaceWithClose) { Event::MockTimer* drain_timer = setUpTimer(); EXPECT_CALL(*drain_timer, enableTimer(_, _)); expectOnDestroy(); + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), true); // Fake a protocol error that races with the drain timeout. This will cause a local close. // Also fake the local close not closing immediately. - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { - throw CodecProtocolException("protocol error"); - })); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Return(codecProtocolError("protocol error"))); EXPECT_CALL(*drain_timer, disableTimer()); EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWriteAndDelay)) @@ -2633,6 +3335,57 @@ TEST_F(HttpConnectionManagerImplTest, DrainCloseRaceWithClose) { filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::LocalClose); } +TEST_F(HttpConnectionManagerImplTest, + FilterThatWaitsForBodyCanBeCalledAfterFilterThatAddsBodyEvenIfItIsNotLast) { + InSequence s; + setup(false, ""); + + NiceMock encoder; + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); + })); + + // 3 filters: + // 1st filter adds a body + // 2nd filter waits for the body + // 3rd filter simulates router filter. + setupFilterChain(3, 0); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) + .WillOnce(Invoke([&](RequestHeaderMap&, bool) -> FilterHeadersStatus { + Buffer::OwnedImpl body("body"); + decoder_filters_[0]->callbacks_->addDecodedData(body, false); + return FilterHeadersStatus::Continue; + })); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) + .WillOnce(Invoke([](RequestHeaderMap&, bool) -> FilterHeadersStatus { + return FilterHeadersStatus::StopIteration; + })); + EXPECT_CALL(*decoder_filters_[1], decodeData(_, true)) + .WillOnce(Invoke( + [](Buffer::Instance&, bool) -> FilterDataStatus { return FilterDataStatus::Continue; })); + EXPECT_CALL(*decoder_filters_[1], decodeComplete()); + EXPECT_CALL(*decoder_filters_[2], decodeHeaders(_, false)) + .WillOnce(Invoke([](RequestHeaderMap&, bool) -> FilterHeadersStatus { + return FilterHeadersStatus::Continue; + })); + EXPECT_CALL(*decoder_filters_[2], decodeData(_, true)) + .WillOnce(Invoke( + [](Buffer::Instance&, bool) -> FilterDataStatus { return FilterDataStatus::Continue; })); + EXPECT_CALL(*decoder_filters_[2], decodeComplete()); + + Buffer::OwnedImpl fake_input; + conn_manager_->onData(fake_input, false); + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); +} + TEST_F(HttpConnectionManagerImplTest, DrainClose) { setup(true, ""); @@ -2645,17 +3398,17 @@ TEST_F(HttpConnectionManagerImplTest, DrainClose) { EXPECT_CALL(*filter, decodeHeaders(_, true)) .WillOnce(Invoke([](RequestHeaderMap& headers, bool) -> FilterHeadersStatus { EXPECT_NE(nullptr, headers.ForwardedFor()); - EXPECT_EQ("https", headers.ForwardedProto()->value().getStringView()); + EXPECT_EQ("https", headers.getForwardedProtoValue()); return FilterHeadersStatus::StopIteration; })); - RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input; @@ -2666,6 +3419,7 @@ TEST_F(HttpConnectionManagerImplTest, DrainClose) { EXPECT_CALL(*drain_timer, enableTimer(_, _)); EXPECT_CALL(drain_close_, drainClose()).WillOnce(Return(true)); EXPECT_CALL(*codec_, shutdownNotice()); + filter->callbacks_->streamInfo().setResponseCodeDetails(""); filter->callbacks_->encodeHeaders(std::move(response_headers), true); EXPECT_EQ(ssl_connection_.get(), filter->callbacks_->connection()->ssl().get()); @@ -2686,11 +3440,12 @@ TEST_F(HttpConnectionManagerImplTest, ResponseBeforeRequestComplete) { InSequence s; setup(false, "envoy-server-test"); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), false); + return Http::okStatus(); })); setupFilterChain(1, 0); @@ -2704,13 +3459,14 @@ TEST_F(HttpConnectionManagerImplTest, ResponseBeforeRequestComplete) { EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { EXPECT_NE(nullptr, headers.Server()); - EXPECT_EQ("envoy-server-test", headers.Server()->value().getStringView()); + EXPECT_EQ("envoy-server-test", headers.getServerValue()); })); EXPECT_CALL(*decoder_filters_[0], onDestroy()); EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWriteAndDelay)); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), true); } @@ -2718,11 +3474,12 @@ TEST_F(HttpConnectionManagerImplTest, DisconnectOnProxyConnectionDisconnect) { InSequence s; setup(false, "envoy-server-test"); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":path", "/"}, {":method", "GET"}, {"proxy-connection", "close"}}}; decoder->decodeHeaders(std::move(headers), false); + return Http::okStatus(); })); setupFilterChain(1, 0); @@ -2736,7 +3493,7 @@ TEST_F(HttpConnectionManagerImplTest, DisconnectOnProxyConnectionDisconnect) { EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { EXPECT_NE(nullptr, headers.Connection()); - EXPECT_EQ("close", headers.Connection()->value().getStringView()); + EXPECT_EQ("close", headers.getConnectionValue()); EXPECT_EQ(nullptr, headers.ProxyConnection()); })); EXPECT_CALL(*decoder_filters_[0], onDestroy()); @@ -2744,6 +3501,7 @@ TEST_F(HttpConnectionManagerImplTest, DisconnectOnProxyConnectionDisconnect) { close(Network::ConnectionCloseType::FlushWriteAndDelay)); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), true); } @@ -2762,13 +3520,14 @@ TEST_F(HttpConnectionManagerImplTest, ResponseStartBeforeRequestComplete) { .WillOnce(Return(FilterHeadersStatus::StopIteration)); // Start the request - RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + RequestDecoder* decoder = nullptr; + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), false); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("hello"); @@ -2779,14 +3538,16 @@ TEST_F(HttpConnectionManagerImplTest, ResponseStartBeforeRequestComplete) { EXPECT_CALL(encoder, encodeHeaders(_, false)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { EXPECT_NE(nullptr, headers.Server()); - EXPECT_EQ("", headers.Server()->value().getStringView()); + EXPECT_EQ("", headers.getServerValue()); })); + filter->callbacks_->streamInfo().setResponseCodeDetails(""); filter->callbacks_->encodeHeaders(std::move(response_headers), false); // Finish the request. EXPECT_CALL(*filter, decodeData(_, true)); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { decoder->decodeData(data, true); + return Http::okStatus(); })); conn_manager_->onData(fake_input, false); @@ -2805,9 +3566,10 @@ TEST_F(HttpConnectionManagerImplTest, DownstreamDisconnect) { setup(false, ""); NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { conn_manager_->newStream(encoder); data.drain(2); + return Http::okStatus(); })); EXPECT_CALL(filter_factory_, createFilterChain(_)).Times(0); @@ -2824,9 +3586,9 @@ TEST_F(HttpConnectionManagerImplTest, DownstreamProtocolError) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { conn_manager_->newStream(response_encoder_); - throw CodecProtocolException("protocol error"); + return codecProtocolError("protocol error"); })); EXPECT_CALL(response_encoder_.stream_, removeCallbacks(_)); @@ -2856,11 +3618,10 @@ TEST_F(HttpConnectionManagerImplTest, TestDownstreamProtocolErrorAccessLog) { EXPECT_TRUE(stream_info.hasResponseFlag(StreamInfo::ResponseFlag::DownstreamProtocolError)); })); - RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> void { - decoder = &conn_manager_->newStream(encoder); - throw CodecProtocolException("protocol error"); + EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status { + conn_manager_->newStream(encoder); + return codecProtocolError("protocol error"); })); Buffer::OwnedImpl fake_input("1234"); @@ -2887,16 +3648,15 @@ TEST_F(HttpConnectionManagerImplTest, TestDownstreamProtocolErrorAfterHeadersAcc EXPECT_TRUE(stream_info.hasResponseFlag(StreamInfo::ResponseFlag::DownstreamProtocolError)); })); - RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":method", "GET"}, {":authority", "host"}, {":path", "/"}}}; decoder->decodeHeaders(std::move(headers), true); - throw CodecProtocolException("protocol error"); + return codecProtocolError("protocol error"); })); Buffer::OwnedImpl fake_input("1234"); @@ -2908,9 +3668,9 @@ TEST_F(HttpConnectionManagerImplTest, FrameFloodError) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { conn_manager_->newStream(response_encoder_); - throw FrameFloodException("too many outbound frames."); + return bufferFloodError("too many outbound frames."); })); EXPECT_CALL(response_encoder_.stream_, removeCallbacks(_)); @@ -2959,15 +3719,15 @@ TEST_F(HttpConnectionManagerImplTest, IdleTimeout) { })); NiceMock encoder; - RequestDecoder* decoder = nullptr; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), false); Buffer::OwnedImpl fake_data("hello"); decoder->decodeData(fake_data, true); + return Http::okStatus(); })); EXPECT_CALL(*idle_timer, disableTimer()); @@ -2982,6 +3742,7 @@ TEST_F(HttpConnectionManagerImplTest, IdleTimeout) { EXPECT_CALL(*idle_timer, enableTimer(_, _)); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); filter->callbacks_->encodeHeaders(std::move(response_headers), true); Event::MockTimer* drain_timer = setUpTimer(); @@ -3028,15 +3789,15 @@ TEST_F(HttpConnectionManagerImplTest, ConnectionDuration) { })); NiceMock encoder; - RequestDecoder* decoder = nullptr; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), false); Buffer::OwnedImpl fake_data("hello"); decoder->decodeData(fake_data, true); + return Http::okStatus(); })); EXPECT_CALL(*filter, decodeHeaders(_, false)) @@ -3049,6 +3810,7 @@ TEST_F(HttpConnectionManagerImplTest, ConnectionDuration) { conn_manager_->onData(fake_input, false); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); filter->callbacks_->encodeHeaders(std::move(response_headers), true); Event::MockTimer* drain_timer = setUpTimer(); @@ -3069,7 +3831,7 @@ TEST_F(HttpConnectionManagerImplTest, IntermediateBufferingEarlyResponse) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -3077,6 +3839,7 @@ TEST_F(HttpConnectionManagerImplTest, IntermediateBufferingEarlyResponse) { Buffer::OwnedImpl fake_data("hello"); decoder->decodeData(fake_data, true); + return Http::okStatus(); })); setupFilterChain(2, 0); @@ -3096,6 +3859,7 @@ TEST_F(HttpConnectionManagerImplTest, IntermediateBufferingEarlyResponse) { .WillOnce(Invoke([&](RequestHeaderMap&, bool) -> FilterHeadersStatus { // Now filter 2 will send a complete response. ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[1]->callbacks_->encodeHeaders(std::move(response_headers), true); return FilterHeadersStatus::StopIteration; })); @@ -3115,12 +3879,13 @@ TEST_F(HttpConnectionManagerImplTest, DoubleBuffering) { // The data will get moved so we need to have a copy to compare against. Buffer::OwnedImpl fake_data("hello"); Buffer::OwnedImpl fake_data_copy("hello"); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), false); decoder->decodeData(fake_data, true); + return Http::okStatus(); })); setupFilterChain(3, 0); @@ -3151,6 +3916,9 @@ TEST_F(HttpConnectionManagerImplTest, DoubleBuffering) { .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); EXPECT_CALL(*decoder_filters_[2], decodeComplete()); decoder_filters_[1]->callbacks_->continueDecoding(); + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, ZeroByteDataFiltering) { @@ -3158,11 +3926,12 @@ TEST_F(HttpConnectionManagerImplTest, ZeroByteDataFiltering) { setup(false, ""); RequestDecoder* decoder = nullptr; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), false); + return Http::okStatus(); })); setupFilterChain(2, 0); @@ -3191,13 +3960,16 @@ TEST_F(HttpConnectionManagerImplTest, ZeroByteDataFiltering) { .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); EXPECT_CALL(*decoder_filters_[1], decodeComplete()); decoder_filters_[0]->callbacks_->continueDecoding(); + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, FilterAddTrailersInTrailersCallback) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -3208,6 +3980,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddTrailersInTrailersCallback) { RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"bazzz", "bar"}}}; decoder->decodeTrailers(std::move(trailers)); + return Http::okStatus(); })); setupFilterChain(2, 2); @@ -3245,6 +4018,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddTrailersInTrailersCallback) { EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); // invoke encodeHeaders + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); @@ -3283,7 +4057,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddTrailersInDataCallbackNoTrailers) InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -3291,6 +4065,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddTrailersInDataCallbackNoTrailers) Buffer::OwnedImpl fake_data("hello"); decoder->decodeData(fake_data, true); + return Http::okStatus(); })); setupFilterChain(2, 2); @@ -3335,6 +4110,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddTrailersInDataCallbackNoTrailers) EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); // invoke encodeHeaders + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); @@ -3374,7 +4150,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInTrailersCallback) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -3385,6 +4161,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInTrailersCallback) { RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"foo", "bar"}}}; decoder->decodeTrailers(std::move(trailers)); + return Http::okStatus(); })); setupFilterChain(2, 2); @@ -3420,6 +4197,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInTrailersCallback) { .WillOnce(Return(FilterHeadersStatus::Continue)); EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[1]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); @@ -3455,7 +4233,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInTrailersCallback_NoDataFram InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -3463,6 +4241,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInTrailersCallback_NoDataFram RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"foo", "bar"}}}; decoder->decodeTrailers(std::move(trailers)); + return Http::okStatus(); })); setupFilterChain(2, 1); @@ -3491,6 +4270,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInTrailersCallback_NoDataFram EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) .WillOnce(Return(FilterHeadersStatus::StopIteration)); + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); @@ -3514,7 +4294,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInTrailersCallback_ContinueAf InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -3522,6 +4302,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInTrailersCallback_ContinueAf RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"foo", "bar"}}}; decoder->decodeTrailers(std::move(trailers)); + return Http::okStatus(); })); setupFilterChain(2, 1); @@ -3553,6 +4334,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInTrailersCallback_ContinueAf EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) .WillOnce(Return(FilterHeadersStatus::StopIteration)); + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); @@ -3579,7 +4361,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyDuringDecodeData) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -3590,6 +4372,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyDuringDecodeData) { Buffer::OwnedImpl data2("world"); decoder->decodeData(data2, true); + return Http::okStatus(); })); setupFilterChain(2, 2); @@ -3635,6 +4418,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyDuringDecodeData) { EXPECT_CALL(response_encoder_, encodeData(_, true)); expectOnDestroy(); + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[1]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); Buffer::OwnedImpl data1("good"); @@ -3647,11 +4431,12 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInline) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); })); setupFilterChain(2, 2); @@ -3690,18 +4475,20 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInline) { EXPECT_CALL(response_encoder_, encodeData(_, true)); expectOnDestroy(); + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[1]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); } -TEST_F(HttpConnectionManagerImplTest, FilterClearRouteCache) { +TEST_F(HttpConnectionManagerImplTest, Filter) { setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); })); setupFilterChain(3, 2); @@ -3719,7 +4506,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterClearRouteCache) { std::shared_ptr route2 = std::make_shared>(); EXPECT_CALL(route2->route_entry_, clusterName()).WillRepeatedly(ReturnRef(fake_cluster2_name)); - EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _)) + EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)) .WillOnce(Return(route1)) .WillOnce(Return(route2)) .WillOnce(Return(nullptr)); @@ -3755,6 +4542,9 @@ TEST_F(HttpConnectionManagerImplTest, FilterClearRouteCache) { // Kick off the incoming data. Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, UpstreamWatermarkCallbacks) { @@ -3792,15 +4582,8 @@ TEST_F(HttpConnectionManagerImplTest, UpstreamWatermarkCallbacks) { EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, true)); EXPECT_CALL(*encoder_filters_[1], encodeComplete()); EXPECT_CALL(response_encoder_, encodeHeaders(_, true)); - // When the stream ends, the manager should check to see if the connection is - // read disabled, and keep calling readDisable(false) until readEnabled() - // returns true. - EXPECT_CALL(filter_callbacks_.connection_, readEnabled()) - .Times(2) - .WillOnce(Return(false)) - .WillRepeatedly(Return(true)); - EXPECT_CALL(filter_callbacks_.connection_, readDisable(false)); expectOnDestroy(); + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[1]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); } @@ -3823,10 +4606,11 @@ TEST_F(HttpConnectionManagerImplTest, UnderlyingConnectionWatermarksPassedOnWith RequestDecoder* decoder; { setUpBufferLimits(); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { decoder = &conn_manager_->newStream(response_encoder_); // Call the high buffer callbacks as the codecs do. stream_callbacks_->onAboveWriteBufferHighWatermark(); + return Http::okStatus(); })); // Send fake data to kick off newStream being created. @@ -3839,10 +4623,11 @@ TEST_F(HttpConnectionManagerImplTest, UnderlyingConnectionWatermarksPassedOnWith { setupFilterChain(2, 2); EXPECT_CALL(filter_callbacks_.connection_, aboveHighWatermark()).Times(0); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); })); EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { @@ -3863,6 +4648,10 @@ TEST_F(HttpConnectionManagerImplTest, UnderlyingConnectionWatermarksPassedOnWith EXPECT_CALL(callbacks2, onAboveWriteBufferHighWatermark()); decoder_filters_[0]->callbacks_->addDownstreamWatermarkCallbacks(callbacks2); } + + expectOnDestroy(); + EXPECT_CALL(stream_, removeCallbacks(_)); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, UnderlyingConnectionWatermarksUnwoundWithLazyCreation) { @@ -3883,10 +4672,11 @@ TEST_F(HttpConnectionManagerImplTest, UnderlyingConnectionWatermarksUnwoundWithL RequestDecoder* decoder; { setUpBufferLimits(); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { decoder = &conn_manager_->newStream(response_encoder_); // Call the high buffer callbacks as the codecs do. stream_callbacks_->onAboveWriteBufferHighWatermark(); + return Http::okStatus(); })); // Send fake data to kick off newStream being created. @@ -3906,10 +4696,11 @@ TEST_F(HttpConnectionManagerImplTest, UnderlyingConnectionWatermarksUnwoundWithL { setupFilterChain(2, 2); EXPECT_CALL(filter_callbacks_.connection_, aboveHighWatermark()).Times(0); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); })); EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { @@ -3925,6 +4716,9 @@ TEST_F(HttpConnectionManagerImplTest, UnderlyingConnectionWatermarksUnwoundWithL EXPECT_CALL(callbacks, onBelowWriteBufferLowWatermark()).Times(0); decoder_filters_[0]->callbacks_->addDownstreamWatermarkCallbacks(callbacks); } + expectOnDestroy(); + EXPECT_CALL(stream_, removeCallbacks(_)); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, AlterFilterWatermarkLimits) { @@ -3953,6 +4747,10 @@ TEST_F(HttpConnectionManagerImplTest, AlterFilterWatermarkLimits) { // Once the limits are turned off can be turned on again. decoder_filters_[0]->callbacks_->setDecoderBufferLimit(100); EXPECT_EQ(100, decoder_filters_[0]->callbacks_->decoderBufferLimit()); + + expectOnDestroy(); + EXPECT_CALL(stream_, removeCallbacks(_)); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, HitFilterWatermarkLimits) { @@ -3976,6 +4774,7 @@ TEST_F(HttpConnectionManagerImplTest, HitFilterWatermarkLimits) { ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) .WillOnce(Return(FilterHeadersStatus::StopIteration)); + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false); MockDownstreamWatermarkCallbacks callbacks; @@ -4030,7 +4829,7 @@ TEST_F(HttpConnectionManagerImplTest, HitRequestBufferLimitsIntermediateFilter) initial_buffer_limit_ = 10; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -4041,6 +4840,7 @@ TEST_F(HttpConnectionManagerImplTest, HitRequestBufferLimitsIntermediateFilter) Buffer::OwnedImpl fake_data2("world world"); decoder->decodeData(fake_data2, true); + return Http::okStatus(); })); setUpBufferLimits(); @@ -4077,6 +4877,7 @@ TEST_F(HttpConnectionManagerImplTest, HitResponseBufferLimitsBeforeHeaders) { ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) .WillOnce(Return(FilterHeadersStatus::StopIteration)); + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false); // Now overload the buffer with response data. The filter returns @@ -4092,9 +4893,11 @@ TEST_F(HttpConnectionManagerImplTest, HitResponseBufferLimitsBeforeHeaders) { EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> FilterHeadersStatus { // Make sure this is a 500 - EXPECT_EQ("500", headers.Status()->value().getStringView()); + EXPECT_EQ("500", headers.getStatusValue()); // Make sure Envoy standard sanitization has been applied. EXPECT_TRUE(headers.Date() != nullptr); + EXPECT_EQ("response_payload_too_large", + decoder_filters_[0]->callbacks_->streamInfo().responseCodeDetails().value()); return FilterHeadersStatus::Continue; })); EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body)); @@ -4117,6 +4920,7 @@ TEST_F(HttpConnectionManagerImplTest, HitResponseBufferLimitsAfterHeaders) { EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) .WillOnce(Return(FilterHeadersStatus::Continue)); EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false); // Now overload the buffer with response data. The filter returns @@ -4128,7 +4932,8 @@ TEST_F(HttpConnectionManagerImplTest, HitResponseBufferLimitsAfterHeaders) { .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); EXPECT_CALL(stream_, resetStream(_)); EXPECT_LOG_CONTAINS( - "debug", "Resetting stream. Response data too large and headers have already been sent", + "debug", + "Resetting stream due to response_payload_too_large. Prior headers have already been sent", decoder_filters_[0]->callbacks_->encodeData(fake_response, false);); EXPECT_EQ(1U, stats_.named_.rs_too_large_.value()); @@ -4138,12 +4943,13 @@ TEST_F(HttpConnectionManagerImplTest, FilterHeadReply) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "HEAD"}}}; decoder->decodeHeaders(std::move(headers), true); data.drain(4); + return Http::okStatus(); })); setupFilterChain(1, 1); @@ -4157,7 +4963,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterHeadReply) { EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, true)) .WillOnce(Invoke([&](ResponseHeaderMap& headers, bool) -> FilterHeadersStatus { - EXPECT_EQ("11", headers.ContentLength()->value().getStringView()); + EXPECT_EQ("11", headers.getContentLengthValue()); return FilterHeadersStatus::Continue; })); EXPECT_CALL(*encoder_filters_[0], encodeComplete()); @@ -4177,13 +4983,15 @@ TEST_F(HttpConnectionManagerImplTest, ResetWithStoppedFilter) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); - data.drain(4); - })); + EXPECT_CALL(*codec_, dispatch(_)) + .WillOnce(Invoke([&](Buffer::Instance& data) -> Envoy::Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), true); + data.drain(4); + return Http::okStatus(); + })); setupFilterChain(1, 1); @@ -4196,7 +5004,7 @@ TEST_F(HttpConnectionManagerImplTest, ResetWithStoppedFilter) { EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) .WillOnce(Invoke([&](ResponseHeaderMap& headers, bool) -> FilterHeadersStatus { - EXPECT_EQ("11", headers.ContentLength()->value().getStringView()); + EXPECT_EQ("11", headers.getContentLengthValue()); return FilterHeadersStatus::Continue; })); EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); @@ -4221,12 +5029,13 @@ TEST_F(HttpConnectionManagerImplTest, FilterContinueAndEndStreamHeaders) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); auto headers = std::make_unique( std::initializer_list>( {{":authority", "host"}, {":path", "/"}, {":method", "GET"}})); decoder->decodeHeaders(std::move(headers), false); + return Http::okStatus(); })); setupFilterChain(2, 2); @@ -4251,6 +5060,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterContinueAndEndStreamHeaders) { expectOnDestroy(); + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[1]->callbacks_->encodeHeaders( makeHeaderMap({{":status", "200"}}), true); @@ -4262,7 +5072,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterContinueAndEndStreamData) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); auto headers = makeHeaderMap( {{":authority", "host"}, {":path", "/"}, {":method", "GET"}}); @@ -4270,6 +5080,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterContinueAndEndStreamData) { Buffer::OwnedImpl fake_data("hello"); decoder->decodeData(fake_data, true); + return Http::okStatus(); })); setupFilterChain(2, 2); @@ -4293,6 +5104,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterContinueAndEndStreamData) { expectOnDestroy(); + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[1]->callbacks_->encodeHeaders( makeHeaderMap({{":status", "200"}}), false); @@ -4304,7 +5116,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterContinueAndEndStreamTrailers) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); auto headers = makeHeaderMap( {{":authority", "host"}, {":path", "/"}, {":method", "GET"}}); @@ -4315,6 +5127,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterContinueAndEndStreamTrailers) { auto trailers = makeHeaderMap({{"foo", "bar"}}); decoder->decodeTrailers(std::move(trailers)); + return Http::okStatus(); })); setupFilterChain(2, 2); @@ -4338,6 +5151,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterContinueAndEndStreamTrailers) { expectOnDestroy(); + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[1]->callbacks_->encodeHeaders( makeHeaderMap({{":status", "200"}}), false); @@ -4352,11 +5166,12 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyContinuation) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); })); setupFilterChain(2, 2); @@ -4383,6 +5198,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyContinuation) { .WillOnce(Return(FilterHeadersStatus::StopIteration)); EXPECT_CALL(*encoder_filters_[1], encodeComplete()); + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[1]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); @@ -4437,11 +5253,12 @@ TEST_F(HttpConnectionManagerImplTest, AddDataWithAllContinue) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); })); setupFilterChain(3, 3); @@ -4496,6 +5313,7 @@ TEST_F(HttpConnectionManagerImplTest, AddDataWithAllContinue) { EXPECT_CALL(*encoder_filters_[2], encodeData(_, true)).Times(0); EXPECT_CALL(*encoder_filters_[1], encodeData(_, true)).Times(0); + decoder_filters_[2]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[2]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); } @@ -4539,11 +5357,12 @@ TEST_F(HttpConnectionManagerImplTest, AddDataWithStopAndContinue) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); })); setupFilterChain(3, 3); @@ -4582,6 +5401,7 @@ TEST_F(HttpConnectionManagerImplTest, AddDataWithStopAndContinue) { .WillOnce(Return(FilterHeadersStatus::StopIteration)); EXPECT_CALL(*encoder_filters_[2], encodeComplete()); + decoder_filters_[2]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[2]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); @@ -4614,7 +5434,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterDirectDecodeEncodeDataNoTrailers) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -4622,9 +5442,10 @@ TEST_F(HttpConnectionManagerImplTest, FilterDirectDecodeEncodeDataNoTrailers) { Buffer::OwnedImpl fake_data("hello"); decoder->decodeData(fake_data, true); + return Http::okStatus(); })); - EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _)); + EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)); setupFilterChain(2, 2); EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) @@ -4671,6 +5492,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterDirectDecodeEncodeDataNoTrailers) { })); EXPECT_CALL(*encoder_filters_[1], encodeComplete()); + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[1]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); Buffer::OwnedImpl response_body("response"); @@ -4694,7 +5516,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterDirectDecodeEncodeDataTrailers) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -4705,9 +5527,10 @@ TEST_F(HttpConnectionManagerImplTest, FilterDirectDecodeEncodeDataTrailers) { RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"foo", "bar"}}}; decoder->decodeTrailers(std::move(trailers)); + return Http::okStatus(); })); - EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _)); + EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)); setupFilterChain(2, 2); EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) @@ -4760,6 +5583,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterDirectDecodeEncodeDataTrailers) { .WillOnce(Return(FilterTrailersStatus::StopIteration)); EXPECT_CALL(*encoder_filters_[1], encodeComplete()); + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[1]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); Buffer::OwnedImpl response_body("response"); @@ -4788,7 +5612,7 @@ TEST_F(HttpConnectionManagerImplTest, MultipleFilters) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -4799,9 +5623,10 @@ TEST_F(HttpConnectionManagerImplTest, MultipleFilters) { Buffer::OwnedImpl fake_data2("world"); decoder->decodeData(fake_data2, true); + return Http::okStatus(); })); - EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _)); + EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)); setupFilterChain(3, 2); EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) @@ -4853,6 +5678,7 @@ TEST_F(HttpConnectionManagerImplTest, MultipleFilters) { .WillOnce(Return(FilterTrailersStatus::StopIteration)); EXPECT_CALL(*encoder_filters_[1], encodeComplete()); EXPECT_EQ(ssl_connection_.get(), encoder_filters_[1]->callbacks_->connection()->ssl().get()); + decoder_filters_[2]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[2]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); Buffer::OwnedImpl response_body("response"); @@ -4891,26 +5717,31 @@ TEST(HttpConnectionManagerTracingStatsTest, verifyTracingStats) { ConnectionManagerImpl::chargeTracingStats(Tracing::Reason::NotTraceableRequestId, tracing_stats); EXPECT_EQ(1UL, tracing_stats.not_traceable_.value()); + + ConnectionManagerImpl::chargeTracingStats(Tracing::Reason::Sampling, tracing_stats); + EXPECT_EQ(1UL, tracing_stats.random_sampling_.value()); } TEST_F(HttpConnectionManagerImplTest, NoNewStreamWhenOverloaded) { - setup(false, ""); + Server::OverloadActionState stop_accepting_requests = Server::OverloadActionState::Active; + ON_CALL(overload_manager_.overload_state_, + getState(Server::OverloadActionNames::get().StopAcceptingRequests)) + .WillByDefault(ReturnRef(stop_accepting_requests)); - overload_manager_.overload_state_.setState( - Server::OverloadActionNames::get().StopAcceptingRequests, - Server::OverloadActionState::Active); + setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), false); + return Http::okStatus(); })); // 503 direct response when overloaded. EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("503", headers.Status()->value().getStringView()); + EXPECT_EQ("503", headers.getStatusValue()); })); std::string response_body; EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body)); @@ -4923,10 +5754,12 @@ TEST_F(HttpConnectionManagerImplTest, NoNewStreamWhenOverloaded) { } TEST_F(HttpConnectionManagerImplTest, DisableKeepAliveWhenOverloaded) { - setup(false, ""); + Server::OverloadActionState disable_http_keep_alive = Server::OverloadActionState::Active; + ON_CALL(overload_manager_.overload_state_, + getState(Server::OverloadActionNames::get().DisableHttpKeepAlive)) + .WillByDefault(ReturnRef(disable_http_keep_alive)); - overload_manager_.overload_state_.setState( - Server::OverloadActionNames::get().DisableHttpKeepAlive, Server::OverloadActionState::Active); + setup(false, ""); std::shared_ptr filter(new NiceMock()); EXPECT_CALL(filter_factory_, createFilterChain(_)) @@ -4934,21 +5767,26 @@ TEST_F(HttpConnectionManagerImplTest, DisableKeepAliveWhenOverloaded) { callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{filter}); })); - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ - {":authority", "host"}, {":path", "/"}, {":method", "GET"}, {"connection", "keep-alive"}}}; - decoder->decodeHeaders(std::move(headers), true); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{{":authority", "host"}, + {":path", "/"}, + {":method", "GET"}, + {"connection", "keep-alive"}}}; + decoder->decodeHeaders(std::move(headers), true); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); + filter->callbacks_->encodeHeaders(std::move(response_headers), true); - data.drain(4); - })); + data.drain(4); + return Http::okStatus(); + })); EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("close", headers.Connection()->value().getStringView()); + EXPECT_EQ("close", headers.getConnectionValue()); })); Buffer::OwnedImpl fake_input("1234"); @@ -4976,6 +5814,10 @@ TEST_F(HttpConnectionManagerImplTest, TestStopAllIterationAndBufferOnDecodingPat .WillOnce(Return(FilterTrailersStatus::Continue)); EXPECT_CALL(*decoder_filters_[1], decodeComplete()); decoder_filters_[0]->callbacks_->continueDecoding(); + + expectOnDestroy(); + EXPECT_CALL(stream_, removeCallbacks(_)); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, TestStopAllIterationAndBufferOnDecodingPathSecondFilter) { @@ -4999,6 +5841,10 @@ TEST_F(HttpConnectionManagerImplTest, TestStopAllIterationAndBufferOnDecodingPat .WillOnce(Return(FilterTrailersStatus::Continue)); EXPECT_CALL(*decoder_filters_[1], decodeComplete()); decoder_filters_[1]->callbacks_->continueDecoding(); + + expectOnDestroy(); + EXPECT_CALL(stream_, removeCallbacks(_)); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, TestStopAllIterationAndBufferOnEncodingPath) { @@ -5012,6 +5858,7 @@ TEST_F(HttpConnectionManagerImplTest, TestStopAllIterationAndBufferOnEncodingPat return FilterHeadersStatus::StopAllIterationAndBuffer; })); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false); // Invoke encodeData while all iteration is stopped and make sure the filters do not have @@ -5053,21 +5900,26 @@ TEST_F(HttpConnectionManagerImplTest, DisableKeepAliveWhenDraining) { callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{filter}); })); - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ - {":authority", "host"}, {":path", "/"}, {":method", "GET"}, {"connection", "keep-alive"}}}; - decoder->decodeHeaders(std::move(headers), true); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{{":authority", "host"}, + {":path", "/"}, + {":method", "GET"}, + {"connection", "keep-alive"}}}; + decoder->decodeHeaders(std::move(headers), true); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); + filter->callbacks_->encodeHeaders(std::move(response_headers), true); - data.drain(4); - })); + data.drain(4); + return Http::okStatus(); + })); EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("close", headers.Connection()->value().getStringView()); + EXPECT_EQ("close", headers.getConnectionValue()); })); Buffer::OwnedImpl fake_input; @@ -5078,9 +5930,11 @@ TEST_F(HttpConnectionManagerImplTest, TestSessionTrace) { setup(false, ""); // Set up the codec. - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - data.drain(4); - })); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + data.drain(4); + return Http::okStatus(); + })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); @@ -5100,7 +5954,7 @@ TEST_F(HttpConnectionManagerImplTest, TestSessionTrace) { std::stringstream out; object->dumpState(out); std::string state = out.str(); - EXPECT_THAT(state, testing::HasSubstr("request_headers_: null")); + EXPECT_THAT(state, testing::HasSubstr("request_headers_: null")); EXPECT_THAT(state, testing::HasSubstr("protocol_: 1")); return nullptr; })) @@ -5133,6 +5987,9 @@ TEST_F(HttpConnectionManagerImplTest, TestSessionTrace) { .WillOnce(Return(FilterTrailersStatus::StopIteration)); decoder->decodeTrailers(std::move(trailers)); } + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } // SRDS no scope found. @@ -5145,12 +6002,13 @@ TEST_F(HttpConnectionManagerImplTest, TestSrdsRouteNotFound) { getRouteConfig(_)) .Times(2) .WillRepeatedly(Return(nullptr)); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":method", "GET"}, {":path", "/foo"}}}; decoder->decodeHeaders(std::move(headers), true); data.drain(4); + return Http::okStatus(); })); EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) @@ -5162,6 +6020,9 @@ TEST_F(HttpConnectionManagerImplTest, TestSrdsRouteNotFound) { Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } // SRDS updating scopes affects routing. @@ -5175,12 +6036,13 @@ TEST_F(HttpConnectionManagerImplTest, TestSrdsUpdate) { .WillOnce(Return(nullptr)) .WillOnce(Return(nullptr)) // refreshCachedRoute first time. .WillOnce(Return(route_config_)); // triggered by callbacks_->route(), SRDS now updated. - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":method", "GET"}, {":path", "/foo"}}}; decoder->decodeHeaders(std::move(headers), true); data.drain(4); + return Http::okStatus(); })); const std::string fake_cluster1_name = "fake_cluster1"; std::shared_ptr route1 = std::make_shared>(); @@ -5188,7 +6050,7 @@ TEST_F(HttpConnectionManagerImplTest, TestSrdsUpdate) { std::shared_ptr fake_cluster1 = std::make_shared>(); EXPECT_CALL(cluster_manager_, get(_)).WillOnce(Return(fake_cluster1.get())); - EXPECT_CALL(*route_config_, route(_, _, _)).WillOnce(Return(route1)); + EXPECT_CALL(*route_config_, route(_, _, _, _)).WillOnce(Return(route1)); // First no-scope-found request will be handled by decoder_filters_[0]. setupFilterChain(1, 0); EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) @@ -5210,6 +6072,9 @@ TEST_F(HttpConnectionManagerImplTest, TestSrdsUpdate) { EXPECT_CALL(*decoder_filters_[0], decodeComplete()); // end_stream=true. Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } // SRDS Scope header update cause cross-scope reroute. @@ -5222,8 +6087,8 @@ TEST_F(HttpConnectionManagerImplTest, TestSrdsCrossScopeReroute) { std::make_shared>(); std::shared_ptr route1 = std::make_shared>(); std::shared_ptr route2 = std::make_shared>(); - EXPECT_CALL(*route_config1, route(_, _, _)).WillRepeatedly(Return(route1)); - EXPECT_CALL(*route_config2, route(_, _, _)).WillRepeatedly(Return(route2)); + EXPECT_CALL(*route_config1, route(_, _, _, _)).WillRepeatedly(Return(route1)); + EXPECT_CALL(*route_config2, route(_, _, _, _)).WillRepeatedly(Return(route2)); EXPECT_CALL(*static_cast( scopedRouteConfigProvider()->config().get()), getRouteConfig(_)) @@ -5238,12 +6103,13 @@ TEST_F(HttpConnectionManagerImplTest, TestSrdsCrossScopeReroute) { } return route_config2; })); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":method", "GET"}, {"scope_key", "foo"}, {":path", "/foo"}}}; decoder->decodeHeaders(std::move(headers), false); data.drain(4); + return Http::okStatus(); })); setupFilterChain(2, 0); EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) @@ -5268,6 +6134,9 @@ TEST_F(HttpConnectionManagerImplTest, TestSrdsCrossScopeReroute) { Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } // SRDS scoped RouteConfiguration found and route found. @@ -5288,15 +6157,15 @@ TEST_F(HttpConnectionManagerImplTest, TestSrdsRouteFound) { EXPECT_CALL( *static_cast( scopedRouteConfigProvider()->config()->route_config_.get()), - route(_, _, _)) + route(_, _, _, _)) .WillOnce(Return(route1)); - RequestDecoder* decoder = nullptr; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(response_encoder_); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":method", "GET"}, {":path", "/foo"}}}; decoder->decodeHeaders(std::move(headers), true); data.drain(4); + return Http::okStatus(); })); EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { @@ -5309,6 +6178,9 @@ TEST_F(HttpConnectionManagerImplTest, TestSrdsRouteFound) { Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, NewConnection) { @@ -5329,6 +6201,196 @@ TEST_F(HttpConnectionManagerImplTest, NewConnection) { EXPECT_EQ(1U, stats_.named_.downstream_cx_http3_active_.value()); } +TEST_F(HttpConnectionManagerImplTest, TestUpstreamRequestHeadersSize) { + // Test with Headers only request, No Data, No response. + setup(false, ""); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); + })); + + setupFilterChain(1, 0); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + + std::shared_ptr> host_{ + new NiceMock()}; + filter_callbacks_.upstreamHost(host_); + + EXPECT_CALL( + host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_headers_size"), 30)); + EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_body_size"), 0)); + EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rs_body_size"), 0)); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); +} + +TEST_F(HttpConnectionManagerImplTest, TestUpstreamRequestBodySize) { + // Test Request with Headers and Data, No response. + setup(false, ""); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), false); + + Buffer::OwnedImpl fake_data("12345"); + decoder->decodeData(fake_data, true); + return Http::okStatus(); + })); + + setupFilterChain(1, 0); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)) + .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); + + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + + std::shared_ptr> host_{ + new NiceMock()}; + filter_callbacks_.upstreamHost(host_); + + EXPECT_CALL( + host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_headers_size"), 30)); + EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_body_size"), 5)); + EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rs_body_size"), 0)); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); +} + +TEST_F(HttpConnectionManagerImplTest, TestUpstreamResponseHeadersSize) { + // Test with Header only response. + setup(false, ""); + + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), false); + + Buffer::OwnedImpl fake_data("1234"); + decoder->decodeData(fake_data, true); + + return Http::okStatus(); + })); + + setupFilterChain(1, 0); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)) + .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); + + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + + std::shared_ptr> host_{ + new NiceMock()}; + filter_callbacks_.upstreamHost(host_); + + EXPECT_CALL( + host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_headers_size"), 30)); + + // Response headers are internally mutated and we record final response headers. + // for example in the below test case, response headers are modified as + // {':status', '200' 'date', 'Mon, 06 Jul 2020 06:08:55 GMT' 'server', ''} + // whose size is 49 instead of original response headers size 10({":status", "200"}). + EXPECT_CALL( + host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rs_headers_size"), 49)); + EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_body_size"), 4)); + EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rs_body_size"), 0)); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + EXPECT_CALL(response_encoder_, encodeHeaders(_, true)); + expectOnDestroy(); + + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); + decoder_filters_[0]->callbacks_->encodeHeaders( + ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); +} + +TEST_F(HttpConnectionManagerImplTest, TestUpstreamResponseBodySize) { + // Test with response headers and body. + setup(false, ""); + + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), false); + + Buffer::OwnedImpl fake_data("1234"); + decoder->decodeData(fake_data, true); + + return Http::okStatus(); + })); + + setupFilterChain(1, 0); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)) + .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); + + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + + std::shared_ptr> host_{ + new NiceMock()}; + filter_callbacks_.upstreamHost(host_); + + EXPECT_CALL( + host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_headers_size"), 30)); + EXPECT_CALL( + host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rs_headers_size"), 49)); + EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_body_size"), 4)); + EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rs_body_size"), 11)); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); + decoder_filters_[0]->callbacks_->encodeHeaders( + ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); + + EXPECT_CALL(response_encoder_, encodeData(_, true)); + expectOnDestroy(); + + Buffer::OwnedImpl fake_response("hello-world"); + decoder_filters_[0]->callbacks_->encodeData(fake_response, true); +} + TEST_F(HttpConnectionManagerImplTest, HeaderOnlyRequestAndResponseUsingHttp3) { setup(false, "envoy-custom-server", false); @@ -5342,7 +6404,7 @@ TEST_F(HttpConnectionManagerImplTest, HeaderOnlyRequestAndResponseUsingHttp3) { EXPECT_CALL(*filter, decodeHeaders(_, true)) .WillOnce(Invoke([&](RequestHeaderMap& headers, bool) -> FilterHeadersStatus { EXPECT_NE(nullptr, headers.ForwardedFor()); - EXPECT_EQ("http", headers.ForwardedProto()->value().getStringView()); + EXPECT_EQ("http", headers.getForwardedProtoValue()); return FilterHeadersStatus::StopIteration; })); @@ -5364,6 +6426,7 @@ TEST_F(HttpConnectionManagerImplTest, HeaderOnlyRequestAndResponseUsingHttp3) { decoder.decodeHeaders(std::move(headers), true); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); filter->callbacks_->encodeHeaders(std::move(response_headers), true); EXPECT_EQ(1U, stats_.named_.downstream_rq_2xx_.value()); @@ -5390,16 +6453,23 @@ class SimpleType : public StreamInfo::FilterState::Object { } // namespace TEST_F(HttpConnectionManagerImplTest, ConnectionFilterState) { + filter_callbacks_.connection_.stream_info_.filter_state_->setData( + "connection_provided_data", std::make_shared(555), + StreamInfo::FilterState::StateType::ReadOnly); + setup(false, "envoy-custom-server", false); setupFilterChain(1, 0, /* num_requests = */ 3); - EXPECT_CALL(*codec_, dispatch(_)).Times(2).WillRepeatedly(Invoke([&](Buffer::Instance&) -> void { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); - })); + EXPECT_CALL(*codec_, dispatch(_)) + .Times(2) + .WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); + })); { InSequence s; EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) @@ -5429,6 +6499,9 @@ TEST_F(HttpConnectionManagerImplTest, ConnectionFilterState) { EXPECT_TRUE( decoder_filters_[1]->callbacks_->streamInfo().filterState()->hasData( "per_downstream_connection")); + EXPECT_TRUE( + decoder_filters_[1]->callbacks_->streamInfo().filterState()->hasData( + "connection_provided_data")); return FilterHeadersStatus::StopIteration; })); EXPECT_CALL(*decoder_filters_[2], decodeHeaders(_, true)) @@ -5442,6 +6515,9 @@ TEST_F(HttpConnectionManagerImplTest, ConnectionFilterState) { EXPECT_TRUE( decoder_filters_[2]->callbacks_->streamInfo().filterState()->hasData( "per_downstream_connection")); + EXPECT_TRUE( + decoder_filters_[1]->callbacks_->streamInfo().filterState()->hasData( + "connection_provided_data")); return FilterHeadersStatus::StopIteration; })); } @@ -5455,6 +6531,13 @@ TEST_F(HttpConnectionManagerImplTest, ConnectionFilterState) { conn_manager_->onData(fake_input, false); decoder_filters_[0]->callbacks_->recreateStream(); conn_manager_->onData(fake_input, false); + + // The connection life time data should have been written to the connection filter state. + EXPECT_TRUE(filter_callbacks_.connection_.stream_info_.filter_state_->hasData( + "per_downstream_connection")); + EXPECT_CALL(*decoder_filters_[1], onDestroy()); + EXPECT_CALL(*decoder_filters_[2], onDestroy()); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } class HttpConnectionManagerImplDeathTest : public HttpConnectionManagerImplTest { @@ -5475,8 +6558,9 @@ TEST_F(HttpConnectionManagerImplDeathTest, InvalidConnectionManagerConfig) { setup(false, ""); Buffer::OwnedImpl fake_input("1234"); - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status { conn_manager_->newStream(response_encoder_); + return Http::okStatus(); })); // Either RDS or SRDS should be set. EXPECT_DEBUG_DEATH(conn_manager_->onData(fake_input, false), @@ -5498,6 +6582,7 @@ TEST_F(HttpConnectionManagerImplDeathTest, InvalidConnectionManagerConfig) { route_config_provider2_.reset(); // Only scoped route config provider valid. EXPECT_NO_THROW(conn_manager_->onData(fake_input, false)); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } } // namespace Http diff --git a/test/common/http/conn_manager_utility_test.cc b/test/common/http/conn_manager_utility_test.cc index 71e7b5a40933e..4b43487108447 100644 --- a/test/common/http/conn_manager_utility_test.cc +++ b/test/common/http/conn_manager_utility_test.cc @@ -4,6 +4,7 @@ #include "envoy/http/request_id_extension.h" #include "envoy/type/v3/percent.pb.h" +#include "common/common/random_generator.h" #include "common/http/conn_manager_utility.h" #include "common/http/header_utility.h" #include "common/http/headers.h" @@ -36,7 +37,7 @@ namespace Http { class MockRequestIDExtension : public RequestIDExtension { public: - explicit MockRequestIDExtension(Runtime::RandomGenerator& random) + explicit MockRequestIDExtension(Random::RandomGenerator& random) : real_(RequestIDExtensionFactory::defaultInstance(random)) { ON_CALL(*this, set(_, _)) .WillByDefault([this](Http::RequestHeaderMap& request_headers, bool force) { @@ -81,6 +82,7 @@ class MockConnectionManagerConfig : public ConnectionManagerConfig { ON_CALL(*this, generateRequestId()).WillByDefault(Return(true)); ON_CALL(*this, isRoutable()).WillByDefault(Return(true)); ON_CALL(*this, preserveExternalRequestId()).WillByDefault(Return(false)); + ON_CALL(*this, alwaysSetRequestIdInResponse()).WillByDefault(Return(false)); } // Http::ConnectionManagerConfig @@ -98,6 +100,7 @@ class MockConnectionManagerConfig : public ConnectionManagerConfig { MOCK_METHOD(FilterChainFactory&, filterFactory, ()); MOCK_METHOD(bool, generateRequestId, (), (const)); MOCK_METHOD(bool, preserveExternalRequestId, (), (const)); + MOCK_METHOD(bool, alwaysSetRequestIdInResponse, (), (const)); MOCK_METHOD(uint32_t, maxRequestHeadersKb, (), (const)); MOCK_METHOD(uint32_t, maxRequestHeadersCount, (), (const)); MOCK_METHOD(absl::optional, idleTimeout, (), (const)); @@ -118,6 +121,7 @@ class MockConnectionManagerConfig : public ConnectionManagerConfig { const Http::InternalAddressConfig& internalAddressConfig() const override { return *internal_address_config_; } + MOCK_METHOD(bool, unixSocketInternal, ()); MOCK_METHOD(uint32_t, xffNumTrustedHops, (), (const)); MOCK_METHOD(bool, skipXffAppend, (), (const)); @@ -131,11 +135,14 @@ class MockConnectionManagerConfig : public ConnectionManagerConfig { MOCK_METHOD(Tracing::HttpTracerSharedPtr, tracer, ()); MOCK_METHOD(ConnectionManagerListenerStats&, listenerStats, ()); MOCK_METHOD(bool, proxy100Continue, (), (const)); + MOCK_METHOD(bool, streamErrorOnInvalidHttpMessaging, (), (const)); MOCK_METHOD(const Http::Http1Settings&, http1Settings, (), (const)); MOCK_METHOD(bool, shouldNormalizePath, (), (const)); MOCK_METHOD(bool, shouldMergeSlashes, (), (const)); + MOCK_METHOD(bool, shouldStripMatchingPort, (), (const)); MOCK_METHOD(envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction, headersWithUnderscoresAction, (), (const)); + MOCK_METHOD(const LocalReply::LocalReply&, localReply, (), (const)); std::unique_ptr internal_address_config_ = std::make_unique(); @@ -149,7 +156,8 @@ const Http::LowerCaseString& traceStatusHeader() { class ConnectionManagerUtilityTest : public testing::Test { public: ConnectionManagerUtilityTest() - : request_id_extension_(std::make_shared>(random_)) { + : request_id_extension_(std::make_shared>(random_)), + local_reply_(LocalReply::Factory::createDefault()) { ON_CALL(config_, userAgent()).WillByDefault(ReturnRef(user_agent_)); envoy::type::v3::FractionalPercent percent1; @@ -160,6 +168,7 @@ class ConnectionManagerUtilityTest : public testing::Test { tracing_config_ = { Tracing::OperationName::Ingress, {}, percent1, percent2, percent1, false, 256}; ON_CALL(config_, tracingConfig()).WillByDefault(Return(&tracing_config_)); + ON_CALL(config_, localReply()).WillByDefault(ReturnRef(*local_reply_)); ON_CALL(config_, via()).WillByDefault(ReturnRef(via_)); ON_CALL(config_, requestIDExtension()).WillByDefault(Return(request_id_extension_)); @@ -188,7 +197,7 @@ class ConnectionManagerUtilityTest : public testing::Test { } NiceMock connection_; - NiceMock random_; + NiceMock random_; std::shared_ptr> request_id_extension_; NiceMock config_; NiceMock route_config_; @@ -197,6 +206,7 @@ class ConnectionManagerUtilityTest : public testing::Test { NiceMock runtime_; Http::TracingConnectionManagerConfig tracing_config_; NiceMock local_info_; + LocalReply::LocalReplyPtr local_reply_; std::string canary_node_{"canary"}; std::string empty_node_; std::string via_; @@ -229,14 +239,16 @@ TEST_F(ConnectionManagerUtilityTest, DetermineNextProtocol) { Network::MockConnection connection; EXPECT_CALL(connection, nextProtocol()).WillRepeatedly(Return("")); Buffer::OwnedImpl data("PRI * HTTP/2.0\r\n"); - EXPECT_EQ("h2", ConnectionManagerUtility::determineNextProtocol(connection, data)); + EXPECT_EQ(Utility::AlpnNames::get().Http2, + ConnectionManagerUtility::determineNextProtocol(connection, data)); } { Network::MockConnection connection; EXPECT_CALL(connection, nextProtocol()).WillRepeatedly(Return("")); Buffer::OwnedImpl data("PRI * HTTP/2"); - EXPECT_EQ("h2", ConnectionManagerUtility::determineNextProtocol(connection, data)); + EXPECT_EQ(Utility::AlpnNames::get().Http2, + ConnectionManagerUtility::determineNextProtocol(connection, data)); } { @@ -290,7 +302,7 @@ TEST_F(ConnectionManagerUtilityTest, SkipXffAppendPassThruUseRemoteAddress) { EXPECT_EQ((MutateRequestRet{"12.12.12.12:0", false}), callMutateRequestHeaders(headers, Protocol::Http2)); - EXPECT_EQ("198.51.100.1", headers.ForwardedFor()->value().getStringView()); + EXPECT_EQ("198.51.100.1", headers.getForwardedForValue()); } TEST_F(ConnectionManagerUtilityTest, PreserveForwardedProtoWhenInternal) { @@ -304,7 +316,7 @@ TEST_F(ConnectionManagerUtilityTest, PreserveForwardedProtoWhenInternal) { TestRequestHeaderMapImpl headers{{"x-forwarded-proto", "https"}}; callMutateRequestHeaders(headers, Protocol::Http2); - EXPECT_EQ("https", headers.ForwardedProto()->value().getStringView()); + EXPECT_EQ("https", headers.getForwardedProtoValue()); } TEST_F(ConnectionManagerUtilityTest, OverwriteForwardedProtoWhenExternal) { @@ -316,7 +328,7 @@ TEST_F(ConnectionManagerUtilityTest, OverwriteForwardedProtoWhenExternal) { ON_CALL(config_, localAddress()).WillByDefault(ReturnRef(local_address)); callMutateRequestHeaders(headers, Protocol::Http2); - EXPECT_EQ("http", headers.ForwardedProto()->value().getStringView()); + EXPECT_EQ("http", headers.getForwardedProtoValue()); } // Verify internal request and XFF is set when we are using remote address and the address is @@ -384,8 +396,8 @@ TEST_F(ConnectionManagerUtilityTest, ViaEmpty) { EXPECT_FALSE(request_headers.has(Headers::get().Via)); TestResponseHeaderMapImpl response_headers; - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, - config_.requestIDExtension(), via_); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, + via_); EXPECT_FALSE(response_headers.has(Headers::get().Via)); } @@ -402,11 +414,10 @@ TEST_F(ConnectionManagerUtilityTest, ViaAppend) { TestResponseHeaderMapImpl response_headers; // Pretend we're doing a 100-continue transform here. - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, - config_.requestIDExtension(), ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, ""); // The actual response header processing. - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, - config_.requestIDExtension(), via_); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, + via_); EXPECT_EQ("foo", response_headers.get_(Headers::get().Via)); } @@ -599,7 +610,7 @@ TEST_F(ConnectionManagerUtilityTest, RequestIdGeneratedWhenItsNotPresent) { } { - Runtime::RandomGeneratorImpl rand; + Random::RandomGeneratorImpl rand; TestRequestHeaderMapImpl headers{{"x-client-trace-id", "trace-id"}}; const std::string uuid = rand.uuid(); EXPECT_CALL(random_, uuid()).WillOnce(Return(uuid)); @@ -753,8 +764,7 @@ TEST_F(ConnectionManagerUtilityTest, MutateResponseHeaders) { {"connection", "foo"}, {"transfer-encoding", "foo"}, {"custom_header", "custom_value"}}; TestRequestHeaderMapImpl request_headers{{"x-request-id", "request-id"}}; - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, - config_.requestIDExtension(), ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, ""); EXPECT_EQ(1UL, response_headers.size()); EXPECT_EQ("custom_value", response_headers.get_("custom_header")); @@ -771,8 +781,7 @@ TEST_F(ConnectionManagerUtilityTest, DoNotRemoveConnectionUpgradeForWebSocketRes {"upgrade", "bar"}}; EXPECT_TRUE(Utility::isUpgrade(request_headers)); EXPECT_TRUE(Utility::isUpgrade(response_headers)); - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, - config_.requestIDExtension(), ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, ""); EXPECT_EQ(3UL, response_headers.size()) << response_headers; EXPECT_EQ("upgrade", response_headers.get_("connection")); @@ -787,8 +796,7 @@ TEST_F(ConnectionManagerUtilityTest, DoNotAddConnectionLengthForWebSocket101Resp {":status", "101"}, {"connection", "upgrade"}, {"upgrade", "bar"}}; EXPECT_TRUE(Utility::isUpgrade(request_headers)); EXPECT_TRUE(Utility::isUpgrade(response_headers)); - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, - config_.requestIDExtension(), ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, ""); EXPECT_EQ(3UL, response_headers.size()) << response_headers; EXPECT_EQ("upgrade", response_headers.get_("connection")); @@ -804,8 +812,8 @@ TEST_F(ConnectionManagerUtilityTest, ClearUpgradeHeadersForNonUpgradeRequests) { {"connection", "foo"}, {"transfer-encoding", "bar"}, {"custom_header", "custom_value"}}; EXPECT_FALSE(Utility::isUpgrade(request_headers)); EXPECT_FALSE(Utility::isUpgrade(response_headers)); - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, - config_.requestIDExtension(), ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, + ""); EXPECT_EQ(1UL, response_headers.size()) << response_headers; EXPECT_EQ("custom_value", response_headers.get_("custom_header")); @@ -820,8 +828,8 @@ TEST_F(ConnectionManagerUtilityTest, ClearUpgradeHeadersForNonUpgradeRequests) { {"custom_header", "custom_value"}}; EXPECT_FALSE(Utility::isUpgrade(request_headers)); EXPECT_TRUE(Utility::isUpgrade(response_headers)); - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, - config_.requestIDExtension(), ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, + ""); EXPECT_EQ(1UL, response_headers.size()) << response_headers; EXPECT_EQ("custom_value", response_headers.get_("custom_header")); @@ -833,8 +841,8 @@ TEST_F(ConnectionManagerUtilityTest, ClearUpgradeHeadersForNonUpgradeRequests) { TestResponseHeaderMapImpl response_headers{{"transfer-encoding", "foo"}, {"upgrade", "bar"}}; EXPECT_TRUE(Utility::isUpgrade(request_headers)); EXPECT_FALSE(Utility::isUpgrade(response_headers)); - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, - config_.requestIDExtension(), ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, + ""); EXPECT_EQ(0UL, response_headers.size()) << response_headers; } @@ -854,8 +862,8 @@ TEST_F(ConnectionManagerUtilityTest, ClearUpgradeHeadersForNonUpgradeRequestsLeg {"custom_header", "custom_value"}}; EXPECT_FALSE(Utility::isUpgrade(request_headers)); EXPECT_TRUE(Utility::isUpgrade(response_headers)); - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, - config_.requestIDExtension(), ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, + ""); EXPECT_EQ(2UL, response_headers.size()) << response_headers; EXPECT_EQ("custom_value", response_headers.get_("custom_header")); @@ -868,8 +876,8 @@ TEST_F(ConnectionManagerUtilityTest, ClearUpgradeHeadersForNonUpgradeRequestsLeg TestResponseHeaderMapImpl response_headers{{"transfer-encoding", "foo"}, {"upgrade", "bar"}}; EXPECT_TRUE(Utility::isUpgrade(request_headers)); EXPECT_FALSE(Utility::isUpgrade(response_headers)); - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, - config_.requestIDExtension(), ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, + ""); EXPECT_EQ(1UL, response_headers.size()) << response_headers; EXPECT_EQ("bar", response_headers.get_("upgrade")); @@ -885,8 +893,7 @@ TEST_F(ConnectionManagerUtilityTest, MutateResponseHeadersReturnXRequestId) { EXPECT_CALL(*request_id_extension_, setInResponse(testing::Ref(response_headers), testing::Ref(request_headers))) .Times(1); - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, - config_.requestIDExtension(), ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, ""); EXPECT_EQ("request-id", response_headers.get_("x-request-id")); } @@ -898,11 +905,24 @@ TEST_F(ConnectionManagerUtilityTest, SkipMutateResponseHeadersReturnXRequestId) EXPECT_CALL(*request_id_extension_, setInResponse(testing::Ref(response_headers), testing::Ref(request_headers))) .Times(0); - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, - config_.requestIDExtension(), ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, ""); EXPECT_EQ("", response_headers.get_("x-request-id")); } +// Test that we do return x-request-id if we were asked to always return it even if trace is not +// forced. +TEST_F(ConnectionManagerUtilityTest, AlwaysMutateResponseHeadersReturnXRequestId) { + TestResponseHeaderMapImpl response_headers; + TestRequestHeaderMapImpl request_headers{{"x-request-id", "request-id"}}; + + EXPECT_CALL(*request_id_extension_, + setInResponse(testing::Ref(response_headers), testing::Ref(request_headers))) + .Times(1); + ON_CALL(config_, alwaysSetRequestIdInResponse()).WillByDefault(Return(true)); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, ""); + EXPECT_EQ("request-id", response_headers.get_("x-request-id")); +} + // Test full sanitization of x-forwarded-client-cert. TEST_F(ConnectionManagerUtilityTest, MtlsSanitizeClientCert) { auto ssl = std::make_shared>(); @@ -1400,8 +1420,7 @@ TEST_F(ConnectionManagerUtilityTest, RemovesProxyResponseHeaders) { Http::TestResponseHeaderMapImpl response_headers{{"keep-alive", "timeout=60"}, {"proxy-connection", "proxy-header"}}; EXPECT_CALL(*request_id_extension_, setTraceStatus(_, _)).Times(0); - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, - config_.requestIDExtension(), ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, ""); EXPECT_EQ(TraceStatus::NoTrace, request_id_extension_->getTraceStatus(request_headers)); @@ -1409,6 +1428,16 @@ TEST_F(ConnectionManagerUtilityTest, RemovesProxyResponseHeaders) { EXPECT_FALSE(response_headers.has("proxy-connection")); } +// maybeNormalizePath() returns true with an empty path. +TEST_F(ConnectionManagerUtilityTest, SanitizeEmptyPath) { + ON_CALL(config_, shouldNormalizePath()).WillByDefault(Return(false)); + TestRequestHeaderMapImpl original_headers; + + TestRequestHeaderMapImpl header_map(original_headers); + EXPECT_TRUE(ConnectionManagerUtility::maybeNormalizePath(header_map, config_)); + EXPECT_EQ(original_headers, header_map); +} + // maybeNormalizePath() does nothing by default. TEST_F(ConnectionManagerUtilityTest, SanitizePathDefaultOff) { ON_CALL(config_, shouldNormalizePath()).WillByDefault(Return(false)); @@ -1439,7 +1468,7 @@ TEST_F(ConnectionManagerUtilityTest, SanitizePathRelativePAth) { TestRequestHeaderMapImpl header_map(original_headers); ConnectionManagerUtility::maybeNormalizePath(header_map, config_); - EXPECT_EQ(header_map.Path()->value().getStringView(), "/abc"); + EXPECT_EQ(header_map.getPathValue(), "/abc"); } // maybeNormalizePath() does not touch adjacent slashes by default. @@ -1451,7 +1480,7 @@ TEST_F(ConnectionManagerUtilityTest, MergeSlashesDefaultOff) { TestRequestHeaderMapImpl header_map(original_headers); ConnectionManagerUtility::maybeNormalizePath(header_map, config_); - EXPECT_EQ(header_map.Path()->value().getStringView(), "/xyz///abc"); + EXPECT_EQ(header_map.getPathValue(), "/xyz///abc"); } // maybeNormalizePath() merges adjacent slashes. @@ -1463,7 +1492,7 @@ TEST_F(ConnectionManagerUtilityTest, MergeSlashes) { TestRequestHeaderMapImpl header_map(original_headers); ConnectionManagerUtility::maybeNormalizePath(header_map, config_); - EXPECT_EQ(header_map.Path()->value().getStringView(), "/xyz/abc"); + EXPECT_EQ(header_map.getPathValue(), "/xyz/abc"); } // maybeNormalizePath() merges adjacent slashes if normalization if off. @@ -1475,7 +1504,18 @@ TEST_F(ConnectionManagerUtilityTest, MergeSlashesWithoutNormalization) { TestRequestHeaderMapImpl header_map(original_headers); ConnectionManagerUtility::maybeNormalizePath(header_map, config_); - EXPECT_EQ(header_map.Path()->value().getStringView(), "/xyz/../abc"); + EXPECT_EQ(header_map.getPathValue(), "/xyz/../abc"); +} + +// maybeNormalizeHost() removes port part from host header. +TEST_F(ConnectionManagerUtilityTest, RemovePort) { + ON_CALL(config_, shouldStripMatchingPort()).WillByDefault(Return(true)); + TestRequestHeaderMapImpl original_headers; + original_headers.setHost("host:443"); + + TestRequestHeaderMapImpl header_map(original_headers); + ConnectionManagerUtility::maybeNormalizeHost(header_map, config_, 443); + EXPECT_EQ(header_map.getHostValue(), "host"); } // test preserve_external_request_id true does not reset the passed requestId if passed diff --git a/test/common/http/date_provider_impl_test.cc b/test/common/http/date_provider_impl_test.cc index 304238232ad10..42312ad6af6e2 100644 --- a/test/common/http/date_provider_impl_test.cc +++ b/test/common/http/date_provider_impl_test.cc @@ -22,7 +22,7 @@ TEST(DateProviderImplTest, All) { EXPECT_CALL(*timer, enableTimer(std::chrono::milliseconds(500), _)); TlsCachingDateProviderImpl provider(dispatcher, tls); - ResponseHeaderMapImpl headers; + TestResponseHeaderMapImpl headers; provider.setDateHeader(headers); EXPECT_NE(nullptr, headers.Date()); diff --git a/test/common/http/header_map_impl_fuzz.proto b/test/common/http/header_map_impl_fuzz.proto index bebe373b6ae5b..69e4ae244a0a6 100644 --- a/test/common/http/header_map_impl_fuzz.proto +++ b/test/common/http/header_map_impl_fuzz.proto @@ -83,7 +83,6 @@ message Action { MutateAndMove mutate_and_move = 12; Append append = 11; google.protobuf.Empty copy = 7; - string lookup = 8; string remove = 9; string remove_prefix = 10; } diff --git a/test/common/http/header_map_impl_fuzz_test.cc b/test/common/http/header_map_impl_fuzz_test.cc index bfd7507e05587..97e327ce57f10 100644 --- a/test/common/http/header_map_impl_fuzz_test.cc +++ b/test/common/http/header_map_impl_fuzz_test.cc @@ -16,7 +16,7 @@ namespace Envoy { // Fuzz the header map implementation. DEFINE_PROTO_FUZZER(const test::common::http::HeaderMapImplFuzzTestCase& input) { - auto header_map = std::make_unique(); + auto header_map = Http::RequestHeaderMapImpl::create(); std::vector> lower_case_strings; std::vector> strings; uint64_t set_integer; @@ -149,13 +149,7 @@ DEFINE_PROTO_FUZZER(const test::common::http::HeaderMapImplFuzzTestCase& input) break; } case test::common::http::Action::kCopy: { - header_map = Http::createHeaderMap(*header_map); - break; - } - case test::common::http::Action::kLookup: { - const Http::HeaderEntry* header_entry; - header_map->lookup(Http::LowerCaseString(replaceInvalidCharacters(action.lookup())), - &header_entry); + header_map = Http::createHeaderMap(*header_map); break; } case test::common::http::Action::kRemove: { @@ -178,20 +172,16 @@ DEFINE_PROTO_FUZZER(const test::common::http::HeaderMapImplFuzzTestCase& input) // Exercise some read-only accessors. header_map->size(); header_map->byteSize(); - header_map->iterate( - [](const Http::HeaderEntry& header, void * /*context*/) -> Http::HeaderMap::Iterate { - header.key(); - header.value(); - return Http::HeaderMap::Iterate::Continue; - }, - nullptr); - header_map->iterateReverse( - [](const Http::HeaderEntry& header, void * /*context*/) -> Http::HeaderMap::Iterate { - header.key(); - header.value(); - return Http::HeaderMap::Iterate::Continue; - }, - nullptr); + header_map->iterate([](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + header.key(); + header.value(); + return Http::HeaderMap::Iterate::Continue; + }); + header_map->iterateReverse([](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + header.key(); + header.value(); + return Http::HeaderMap::Iterate::Continue; + }); } } diff --git a/test/common/http/header_map_impl_speed_test.cc b/test/common/http/header_map_impl_speed_test.cc index eb0f5e980a65c..32020e841c902 100644 --- a/test/common/http/header_map_impl_speed_test.cc +++ b/test/common/http/header_map_impl_speed_test.cc @@ -16,14 +16,16 @@ static void addDummyHeaders(HeaderMap& headers, size_t num_headers) { } } -/** Measure the construction/destruction speed of HeaderMapImpl.*/ -static void HeaderMapImplCreate(benchmark::State& state) { - for (auto _ : state) { - HeaderMapImpl headers; - benchmark::DoNotOptimize(headers.size()); +/** Measure the construction/destruction speed of RequestHeaderMapImpl.*/ +static void headerMapImplCreate(benchmark::State& state) { + // Make sure first time construction is not counted. + Http::ResponseHeaderMapImpl::create(); + for (auto _ : state) { // NOLINT + auto headers = Http::ResponseHeaderMapImpl::create(); + benchmark::DoNotOptimize(headers->size()); } } -BENCHMARK(HeaderMapImplCreate); +BENCHMARK(headerMapImplCreate); /** * Measure the speed of setting/overwriting a header value. The numeric Arg passed @@ -32,17 +34,17 @@ BENCHMARK(HeaderMapImplCreate); * identify whether the speed of setReference() is dependent on the number of other * headers in the HeaderMapImpl. */ -static void HeaderMapImplSetReference(benchmark::State& state) { +static void headerMapImplSetReference(benchmark::State& state) { const LowerCaseString key("example-key"); const std::string value("01234567890123456789"); - HeaderMapImpl headers; - addDummyHeaders(headers, state.range(0)); - for (auto _ : state) { - headers.setReference(key, value); + auto headers = Http::ResponseHeaderMapImpl::create(); + addDummyHeaders(*headers, state.range(0)); + for (auto _ : state) { // NOLINT + headers->setReference(key, value); } - benchmark::DoNotOptimize(headers.size()); + benchmark::DoNotOptimize(headers->size()); } -BENCHMARK(HeaderMapImplSetReference)->Arg(0)->Arg(1)->Arg(10)->Arg(50); +BENCHMARK(headerMapImplSetReference)->Arg(0)->Arg(1)->Arg(10)->Arg(50); /** * Measure the speed of retrieving a header value. The numeric Arg passed by the @@ -52,127 +54,112 @@ BENCHMARK(HeaderMapImplSetReference)->Arg(0)->Arg(1)->Arg(10)->Arg(50); * method depends (or doesn't depend) on the number of other headers in the * HeaderMapImpl. */ -static void HeaderMapImplGet(benchmark::State& state) { +static void headerMapImplGet(benchmark::State& state) { const LowerCaseString key("example-key"); const std::string value("01234567890123456789"); - HeaderMapImpl headers; - addDummyHeaders(headers, state.range(0)); - headers.setReference(key, value); + auto headers = Http::ResponseHeaderMapImpl::create(); + addDummyHeaders(*headers, state.range(0)); + headers->setReference(key, value); size_t successes = 0; - for (auto _ : state) { - successes += (headers.get(key) != nullptr); + for (auto _ : state) { // NOLINT + successes += (headers->get(key) != nullptr); } benchmark::DoNotOptimize(successes); } -BENCHMARK(HeaderMapImplGet)->Arg(0)->Arg(1)->Arg(10)->Arg(50); +BENCHMARK(headerMapImplGet)->Arg(0)->Arg(1)->Arg(10)->Arg(50); /** * Measure the retrieval speed of a header for which HeaderMapImpl is expected to * provide special optimizations. */ -static void HeaderMapImplGetInline(benchmark::State& state) { +static void headerMapImplGetInline(benchmark::State& state) { const std::string value("01234567890123456789"); - RequestHeaderMapImpl headers; - addDummyHeaders(headers, state.range(0)); - headers.setReferenceConnection(value); + auto headers = Http::ResponseHeaderMapImpl::create(); + addDummyHeaders(*headers, state.range(0)); + headers->setReferenceConnection(value); size_t size = 0; - for (auto _ : state) { - size += headers.Connection()->value().size(); + for (auto _ : state) { // NOLINT + size += headers->Connection()->value().size(); } benchmark::DoNotOptimize(size); } -BENCHMARK(HeaderMapImplGetInline)->Arg(0)->Arg(1)->Arg(10)->Arg(50); +BENCHMARK(headerMapImplGetInline)->Arg(0)->Arg(1)->Arg(10)->Arg(50); /** * Measure the speed of writing to a header for which HeaderMapImpl is expected to * provide special optimizations. */ -static void HeaderMapImplSetInlineMacro(benchmark::State& state) { +static void headerMapImplSetInlineMacro(benchmark::State& state) { const std::string value("01234567890123456789"); - RequestHeaderMapImpl headers; - addDummyHeaders(headers, state.range(0)); - for (auto _ : state) { - headers.setReferenceConnection(value); + auto headers = Http::ResponseHeaderMapImpl::create(); + addDummyHeaders(*headers, state.range(0)); + for (auto _ : state) { // NOLINT + headers->setReferenceConnection(value); } - benchmark::DoNotOptimize(headers.size()); + benchmark::DoNotOptimize(headers->size()); } -BENCHMARK(HeaderMapImplSetInlineMacro)->Arg(0)->Arg(1)->Arg(10)->Arg(50); +BENCHMARK(headerMapImplSetInlineMacro)->Arg(0)->Arg(1)->Arg(10)->Arg(50); /** * Measure the speed of writing to a header for which HeaderMapImpl is expected to * provide special optimizations. */ -static void HeaderMapImplSetInlineInteger(benchmark::State& state) { +static void headerMapImplSetInlineInteger(benchmark::State& state) { uint64_t value = 12345; - RequestHeaderMapImpl headers; - addDummyHeaders(headers, state.range(0)); - for (auto _ : state) { - headers.setConnection(value); + auto headers = Http::ResponseHeaderMapImpl::create(); + addDummyHeaders(*headers, state.range(0)); + for (auto _ : state) { // NOLINT + headers->setConnection(value); } - benchmark::DoNotOptimize(headers.size()); + benchmark::DoNotOptimize(headers->size()); } -BENCHMARK(HeaderMapImplSetInlineInteger)->Arg(0)->Arg(1)->Arg(10)->Arg(50); +BENCHMARK(headerMapImplSetInlineInteger)->Arg(0)->Arg(1)->Arg(10)->Arg(50); /** Measure the speed of the byteSize() estimation method. */ -static void HeaderMapImplGetByteSize(benchmark::State& state) { - HeaderMapImpl headers; - addDummyHeaders(headers, state.range(0)); +static void headerMapImplGetByteSize(benchmark::State& state) { + auto headers = Http::ResponseHeaderMapImpl::create(); + addDummyHeaders(*headers, state.range(0)); uint64_t size = 0; - for (auto _ : state) { - size += headers.byteSize(); + for (auto _ : state) { // NOLINT + size += headers->byteSize(); } benchmark::DoNotOptimize(size); } -BENCHMARK(HeaderMapImplGetByteSize)->Arg(0)->Arg(1)->Arg(10)->Arg(50); +BENCHMARK(headerMapImplGetByteSize)->Arg(0)->Arg(1)->Arg(10)->Arg(50); /** Measure the speed of iteration with a lightweight callback. */ -static void HeaderMapImplIterate(benchmark::State& state) { - HeaderMapImpl headers; +static void headerMapImplIterate(benchmark::State& state) { + auto headers = Http::ResponseHeaderMapImpl::create(); size_t num_callbacks = 0; - addDummyHeaders(headers, state.range(0)); - auto counting_callback = [](const HeaderEntry&, void* context) -> HeaderMap::Iterate { - (*static_cast(context))++; + addDummyHeaders(*headers, state.range(0)); + auto counting_callback = [&num_callbacks](const HeaderEntry&) -> HeaderMap::Iterate { + num_callbacks++; return HeaderMap::Iterate::Continue; }; - for (auto _ : state) { - headers.iterate(counting_callback, &num_callbacks); + for (auto _ : state) { // NOLINT + headers->iterate(counting_callback); } benchmark::DoNotOptimize(num_callbacks); } -BENCHMARK(HeaderMapImplIterate)->Arg(0)->Arg(1)->Arg(10)->Arg(50); - -/** Measure the speed of the HeaderMapImpl lookup() method. */ -static void HeaderMapImplLookup(benchmark::State& state) { - const LowerCaseString key("connection"); - const std::string value("01234567890123456789"); - HeaderMapImpl headers; - addDummyHeaders(headers, state.range(0)); - headers.addReference(key, value); - for (auto _ : state) { - const HeaderEntry* entry = nullptr; - auto result = headers.lookup(key, &entry); - benchmark::DoNotOptimize(result); - } -} -BENCHMARK(HeaderMapImplLookup)->Arg(0)->Arg(1)->Arg(10)->Arg(50); +BENCHMARK(headerMapImplIterate)->Arg(0)->Arg(1)->Arg(10)->Arg(50); /** * Measure the speed of removing a header by key name. * @note The measured time for each iteration includes the time needed to add * one copy of the header. */ -static void HeaderMapImplRemove(benchmark::State& state) { +static void headerMapImplRemove(benchmark::State& state) { const LowerCaseString key("example-key"); const std::string value("01234567890123456789"); - HeaderMapImpl headers; - addDummyHeaders(headers, state.range(0)); - for (auto _ : state) { - headers.addReference(key, value); - headers.remove(key); + auto headers = Http::ResponseHeaderMapImpl::create(); + addDummyHeaders(*headers, state.range(0)); + for (auto _ : state) { // NOLINT + headers->addReference(key, value); + headers->remove(key); } - benchmark::DoNotOptimize(headers.size()); + benchmark::DoNotOptimize(headers->size()); } -BENCHMARK(HeaderMapImplRemove)->Arg(0)->Arg(1)->Arg(10)->Arg(50); +BENCHMARK(headerMapImplRemove)->Arg(0)->Arg(1)->Arg(10)->Arg(50); /** * Measure the speed of removing a header by key name, for the special case of @@ -180,24 +167,24 @@ BENCHMARK(HeaderMapImplRemove)->Arg(0)->Arg(1)->Arg(10)->Arg(50); * @note The measured time for each iteration includes the time needed to add * one copy of the header. */ -static void HeaderMapImplRemoveInline(benchmark::State& state) { +static void headerMapImplRemoveInline(benchmark::State& state) { const LowerCaseString key("connection"); const std::string value("01234567890123456789"); - HeaderMapImpl headers; - addDummyHeaders(headers, state.range(0)); - for (auto _ : state) { - headers.addReference(key, value); - headers.remove(key); + auto headers = Http::ResponseHeaderMapImpl::create(); + addDummyHeaders(*headers, state.range(0)); + for (auto _ : state) { // NOLINT + headers->addReference(key, value); + headers->remove(key); } - benchmark::DoNotOptimize(headers.size()); + benchmark::DoNotOptimize(headers->size()); } -BENCHMARK(HeaderMapImplRemoveInline)->Arg(0)->Arg(1)->Arg(10)->Arg(50); +BENCHMARK(headerMapImplRemoveInline)->Arg(0)->Arg(1)->Arg(10)->Arg(50); /** * Measure the speed of creating a HeaderMapImpl and populating it with a realistic * set of response headers. */ -static void HeaderMapImplPopulate(benchmark::State& state) { +static void headerMapImplPopulate(benchmark::State& state) { const std::pair headers_to_add[] = { {LowerCaseString("cache-control"), "max-age=0, private, must-revalidate"}, {LowerCaseString("content-encoding"), "gzip"}, @@ -210,15 +197,15 @@ static void HeaderMapImplPopulate(benchmark::State& state) { {LowerCaseString("set-cookie"), "_cookie1=12345678; path = /; secure"}, {LowerCaseString("set-cookie"), "_cookie2=12345678; path = /; secure"}, }; - for (auto _ : state) { - HeaderMapImpl headers; + for (auto _ : state) { // NOLINT + auto headers = Http::ResponseHeaderMapImpl::create(); for (const auto& key_value : headers_to_add) { - headers.addReference(key_value.first, key_value.second); + headers->addReference(key_value.first, key_value.second); } - benchmark::DoNotOptimize(headers.size()); + benchmark::DoNotOptimize(headers->size()); } } -BENCHMARK(HeaderMapImplPopulate); +BENCHMARK(headerMapImplPopulate); } // namespace Http } // namespace Envoy diff --git a/test/common/http/header_map_impl_test.cc b/test/common/http/header_map_impl_test.cc index b233e9e948213..0e5b0c3df8cda 100644 --- a/test/common/http/header_map_impl_test.cc +++ b/test/common/http/header_map_impl_test.cc @@ -1,6 +1,8 @@ +#include #include #include +#include "common/http/header_list_view.h" #include "common/http/header_map_impl.h" #include "common/http/header_utility.h" @@ -9,6 +11,7 @@ #include "gtest/gtest.h" +using ::testing::ElementsAre; using ::testing::InSequence; namespace Envoy { @@ -105,6 +108,19 @@ TEST(HeaderStringTest, All) { EXPECT_EQ("HELLO", string.getStringView()); } + // Inline rtrim removes trailing whitespace only. + { + const std::string data_with_leading_lws = " \t\f\v data"; + const std::string data_with_leading_and_trailing_lws = data_with_leading_lws + " \t\f\v"; + HeaderString string; + string.append(data_with_leading_and_trailing_lws.data(), + data_with_leading_and_trailing_lws.size()); + EXPECT_EQ(data_with_leading_and_trailing_lws, string.getStringView()); + string.rtrim(); + EXPECT_NE(data_with_leading_and_trailing_lws, string.getStringView()); + EXPECT_EQ(data_with_leading_lws, string.getStringView()); + } + // Static clear() does nothing. { std::string static_string("HELLO"); @@ -338,31 +354,48 @@ TEST(HeaderStringTest, All) { } } +Http::RegisterCustomInlineHeader + custom_header_1(Http::LowerCaseString{"foo_custom_header"}); +Http::RegisterCustomInlineHeader + custom_header_1_copy(Http::LowerCaseString{"foo_custom_header"}); + +// Make sure that the same header registered twice points to the same location. +TEST(HeaderMapImplTest, CustomRegisteredHeaders) { + TestRequestHeaderMapImpl headers; + EXPECT_EQ(custom_header_1.handle(), custom_header_1_copy.handle()); + EXPECT_EQ(nullptr, headers.getInline(custom_header_1.handle())); + EXPECT_EQ(nullptr, headers.getInline(custom_header_1_copy.handle())); + headers.setInline(custom_header_1.handle(), 42); + EXPECT_EQ("42", headers.getInlineValue(custom_header_1_copy.handle())); + EXPECT_EQ("foo_custom_header", + headers.getInline(custom_header_1.handle())->key().getStringView()); +} + #define TEST_INLINE_HEADER_FUNCS(name) \ - header_map.addCopy(Headers::get().name, #name); \ - EXPECT_EQ(header_map.name()->value().getStringView(), #name); \ - header_map.remove##name(); \ - EXPECT_EQ(nullptr, header_map.name()); \ - header_map.set##name(#name); \ - EXPECT_EQ(header_map.get(Headers::get().name)->value().getStringView(), #name); + header_map->addCopy(Headers::get().name, #name); \ + EXPECT_EQ(header_map->name()->value().getStringView(), #name); \ + header_map->remove##name(); \ + EXPECT_EQ(nullptr, header_map->name()); \ + header_map->set##name(#name); \ + EXPECT_EQ(header_map->get(Headers::get().name)->value().getStringView(), #name); // Make sure that the O(1) headers are wired up properly. TEST(HeaderMapImplTest, AllInlineHeaders) { { - RequestHeaderMapImpl header_map; + auto header_map = RequestHeaderMapImpl::create(); INLINE_REQ_HEADERS(TEST_INLINE_HEADER_FUNCS) INLINE_REQ_RESP_HEADERS(TEST_INLINE_HEADER_FUNCS) } { // No request trailer O(1) headers. } { - ResponseHeaderMapImpl header_map; + auto header_map = ResponseHeaderMapImpl::create(); INLINE_RESP_HEADERS(TEST_INLINE_HEADER_FUNCS) INLINE_REQ_RESP_HEADERS(TEST_INLINE_HEADER_FUNCS) INLINE_RESP_HEADERS_TRAILERS(TEST_INLINE_HEADER_FUNCS) } { - ResponseTrailerMapImpl header_map; + auto header_map = ResponseTrailerMapImpl::create(); INLINE_RESP_HEADERS_TRAILERS(TEST_INLINE_HEADER_FUNCS) } } @@ -376,7 +409,7 @@ TEST(HeaderMapImplTest, InlineInsert) { EXPECT_FALSE(headers.empty()); EXPECT_EQ(1, headers.size()); EXPECT_EQ(":authority", headers.Host()->key().getStringView()); - EXPECT_EQ("hello", headers.Host()->value().getStringView()); + EXPECT_EQ("hello", headers.getHostValue()); EXPECT_EQ("hello", headers.get(Headers::get().Host)->value().getStringView()); } @@ -386,63 +419,63 @@ TEST(HeaderMapImplTest, InlineAppend) { // Create via header and append. headers.setVia(""); headers.appendVia("1.0 fred", ","); - EXPECT_EQ(headers.Via()->value().getStringView(), "1.0 fred"); + EXPECT_EQ(headers.getViaValue(), "1.0 fred"); headers.appendVia("1.1 nowhere.com", ","); - EXPECT_EQ(headers.Via()->value().getStringView(), "1.0 fred,1.1 nowhere.com"); + EXPECT_EQ(headers.getViaValue(), "1.0 fred,1.1 nowhere.com"); } { // Append to via header without explicitly creating first. TestRequestHeaderMapImpl headers; headers.appendVia("1.0 fred", ","); - EXPECT_EQ(headers.Via()->value().getStringView(), "1.0 fred"); + EXPECT_EQ(headers.getViaValue(), "1.0 fred"); headers.appendVia("1.1 nowhere.com", ","); - EXPECT_EQ(headers.Via()->value().getStringView(), "1.0 fred,1.1 nowhere.com"); + EXPECT_EQ(headers.getViaValue(), "1.0 fred,1.1 nowhere.com"); } { // Custom delimiter. TestRequestHeaderMapImpl headers; headers.setVia(""); headers.appendVia("1.0 fred", ", "); - EXPECT_EQ(headers.Via()->value().getStringView(), "1.0 fred"); + EXPECT_EQ(headers.getViaValue(), "1.0 fred"); headers.appendVia("1.1 nowhere.com", ", "); - EXPECT_EQ(headers.Via()->value().getStringView(), "1.0 fred, 1.1 nowhere.com"); + EXPECT_EQ(headers.getViaValue(), "1.0 fred, 1.1 nowhere.com"); } { // Append and then later set. TestRequestHeaderMapImpl headers; headers.appendVia("1.0 fred", ","); headers.appendVia("1.1 nowhere.com", ","); - EXPECT_EQ(headers.Via()->value().getStringView(), "1.0 fred,1.1 nowhere.com"); + EXPECT_EQ(headers.getViaValue(), "1.0 fred,1.1 nowhere.com"); headers.setVia("2.0 override"); - EXPECT_EQ(headers.Via()->value().getStringView(), "2.0 override"); + EXPECT_EQ(headers.getViaValue(), "2.0 override"); } { // Set and then append. This mimics how GrpcTimeout is set. TestRequestHeaderMapImpl headers; headers.setGrpcTimeout(42); - EXPECT_EQ(headers.GrpcTimeout()->value().getStringView(), "42"); + EXPECT_EQ(headers.getGrpcTimeoutValue(), "42"); headers.appendGrpcTimeout("s", ""); - EXPECT_EQ(headers.GrpcTimeout()->value().getStringView(), "42s"); + EXPECT_EQ(headers.getGrpcTimeoutValue(), "42s"); } } TEST(HeaderMapImplTest, MoveIntoInline) { TestRequestHeaderMapImpl headers; HeaderString key; - key.setCopy(Headers::get().CacheControl.get()); + key.setCopy(Headers::get().EnvoyRetryOn.get()); HeaderString value; value.setCopy("hello"); headers.addViaMove(std::move(key), std::move(value)); - EXPECT_EQ("cache-control", headers.CacheControl()->key().getStringView()); - EXPECT_EQ("hello", headers.CacheControl()->value().getStringView()); + EXPECT_EQ("x-envoy-retry-on", headers.EnvoyRetryOn()->key().getStringView()); + EXPECT_EQ("hello", headers.getEnvoyRetryOnValue()); HeaderString key2; - key2.setCopy(Headers::get().CacheControl.get()); + key2.setCopy(Headers::get().EnvoyRetryOn.get()); HeaderString value2; value2.setCopy("there"); headers.addViaMove(std::move(key2), std::move(value2)); - EXPECT_EQ("cache-control", headers.CacheControl()->key().getStringView()); - EXPECT_EQ("hello,there", headers.CacheControl()->value().getStringView()); + EXPECT_EQ("x-envoy-retry-on", headers.EnvoyRetryOn()->key().getStringView()); + EXPECT_EQ("hello,there", headers.getEnvoyRetryOnValue()); } TEST(HeaderMapImplTest, Remove) { @@ -464,7 +497,7 @@ TEST(HeaderMapImplTest, Remove) { // Add and remove by inline. EXPECT_EQ(0UL, headers.removeContentLength()); headers.setContentLength(5); - EXPECT_EQ("5", headers.ContentLength()->value().getStringView()); + EXPECT_EQ("5", headers.getContentLengthValue()); EXPECT_EQ(1UL, headers.size()); EXPECT_FALSE(headers.empty()); EXPECT_EQ(1UL, headers.removeContentLength()); @@ -474,7 +507,7 @@ TEST(HeaderMapImplTest, Remove) { // Add inline and remove by name. headers.setContentLength(5); - EXPECT_EQ("5", headers.ContentLength()->value().getStringView()); + EXPECT_EQ("5", headers.getContentLengthValue()); EXPECT_EQ(1UL, headers.size()); EXPECT_FALSE(headers.empty()); EXPECT_EQ(1UL, headers.remove(Headers::get().ContentLength)); @@ -487,7 +520,42 @@ TEST(HeaderMapImplTest, Remove) { EXPECT_EQ(0UL, headers.remove(Headers::get().ContentLength)); } -TEST(HeaderMapImplTest, RemoveRegex) { +TEST(HeaderMapImplTest, RemoveHost) { + TestRequestHeaderMapImpl headers; + headers.setHost("foo"); + EXPECT_EQ("foo", headers.get_("host")); + EXPECT_EQ("foo", headers.get_(":authority")); + // Make sure that when we remove by "host" without using the inline functions, the mapping to + // ":authority" still takes place. + // https://github.com/envoyproxy/envoy/pull/12160 + EXPECT_EQ(1UL, headers.remove("host")); + EXPECT_EQ("", headers.get_("host")); + EXPECT_EQ("", headers.get_(":authority")); + EXPECT_EQ(nullptr, headers.Host()); +} + +TEST(HeaderMapImplTest, RemoveIf) { + LowerCaseString key1 = LowerCaseString("X-postfix-foo"); + LowerCaseString key2 = LowerCaseString("X-postfix-"); + LowerCaseString key3 = LowerCaseString("x-postfix-eep"); + + TestRequestHeaderMapImpl headers; + headers.addReference(key1, "value"); + headers.addReference(key2, "value"); + headers.addReference(key3, "value"); + + EXPECT_EQ(0UL, headers.removeIf([](const HeaderEntry&) -> bool { return false; })); + + EXPECT_EQ(2UL, headers.removeIf([](const HeaderEntry& entry) -> bool { + return absl::EndsWith(entry.key().getStringView(), "foo") || + absl::EndsWith(entry.key().getStringView(), "eep"); + })); + + TestRequestHeaderMapImpl expected{{"X-postfix-", "value"}}; + EXPECT_EQ(expected, headers); +} + +TEST(HeaderMapImplTest, RemovePrefix) { // These will match. LowerCaseString key1 = LowerCaseString("X-prefix-foo"); LowerCaseString key3 = LowerCaseString("X-Prefix-"); @@ -519,17 +587,28 @@ TEST(HeaderMapImplTest, RemoveRegex) { EXPECT_EQ(nullptr, headers.get(key2)); EXPECT_EQ(nullptr, headers.get(key4)); - // Add inline and remove by regex + // Add inline and remove by prefix headers.setContentLength(5); - EXPECT_EQ("5", headers.ContentLength()->value().getStringView()); + EXPECT_EQ("5", headers.getContentLengthValue()); EXPECT_EQ(1UL, headers.size()); EXPECT_FALSE(headers.empty()); EXPECT_EQ(1UL, headers.removePrefix(LowerCaseString("content"))); EXPECT_EQ(nullptr, headers.ContentLength()); } +class HeaderAndValueCb + : public testing::MockFunction { +public: + HeaderMap::ConstIterateCb asIterateCb() { + return [this](const Http::HeaderEntry& header) -> HeaderMap::Iterate { + Call(std::string(header.key().getStringView()), std::string(header.value().getStringView())); + return HeaderMap::Iterate::Continue; + }; + } +}; + TEST(HeaderMapImplTest, SetRemovesAllValues) { - TestHeaderMapImpl headers; + TestRequestHeaderMapImpl headers; LowerCaseString key1("hello"); LowerCaseString key2("olleh"); @@ -544,10 +623,8 @@ TEST(HeaderMapImplTest, SetRemovesAllValues) { headers.addReference(key1, ref_value3); headers.addReference(key1, ref_value4); - using MockCb = testing::MockFunction; - { - MockCb cb; + HeaderAndValueCb cb; InSequence seq; EXPECT_CALL(cb, Call("hello", "world")); @@ -555,31 +632,19 @@ TEST(HeaderMapImplTest, SetRemovesAllValues) { EXPECT_CALL(cb, Call("hello", "globe")); EXPECT_CALL(cb, Call("hello", "earth")); - headers.iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate(cb.asIterateCb()); } headers.setReference(key1, ref_value5); // set moves key to end { - MockCb cb; + HeaderAndValueCb cb; InSequence seq; EXPECT_CALL(cb, Call("olleh", "planet")); EXPECT_CALL(cb, Call("hello", "blue marble")); - headers.iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate(cb.asIterateCb()); } } @@ -590,21 +655,21 @@ TEST(HeaderMapImplTest, DoubleInlineAdd) { const std::string bar("bar"); headers.addReference(Headers::get().ContentLength, foo); headers.addReference(Headers::get().ContentLength, bar); - EXPECT_EQ("foo,bar", headers.ContentLength()->value().getStringView()); + EXPECT_EQ("foo,bar", headers.getContentLengthValue()); EXPECT_EQ(1UL, headers.size()); } { TestRequestHeaderMapImpl headers; headers.addReferenceKey(Headers::get().ContentLength, "foo"); headers.addReferenceKey(Headers::get().ContentLength, "bar"); - EXPECT_EQ("foo,bar", headers.ContentLength()->value().getStringView()); + EXPECT_EQ("foo,bar", headers.getContentLengthValue()); EXPECT_EQ(1UL, headers.size()); } { TestRequestHeaderMapImpl headers; headers.addReferenceKey(Headers::get().ContentLength, 5); headers.addReferenceKey(Headers::get().ContentLength, 6); - EXPECT_EQ("5,6", headers.ContentLength()->value().getStringView()); + EXPECT_EQ("5,6", headers.getContentLengthValue()); EXPECT_EQ(1UL, headers.size()); } { @@ -612,7 +677,7 @@ TEST(HeaderMapImplTest, DoubleInlineAdd) { const std::string foo("foo"); headers.addReference(Headers::get().ContentLength, foo); headers.addReferenceKey(Headers::get().ContentLength, 6); - EXPECT_EQ("foo,6", headers.ContentLength()->value().getStringView()); + EXPECT_EQ("foo,6", headers.getContentLengthValue()); EXPECT_EQ(1UL, headers.size()); } } @@ -620,7 +685,7 @@ TEST(HeaderMapImplTest, DoubleInlineAdd) { // Per https://github.com/envoyproxy/envoy/issues/7488 make sure we don't // combine set-cookie headers TEST(HeaderMapImplTest, DoubleCookieAdd) { - TestHeaderMapImpl headers; + TestRequestHeaderMapImpl headers; const std::string foo("foo"); const std::string bar("bar"); const LowerCaseString& set_cookie = Http::Headers::get().SetCookie; @@ -639,12 +704,12 @@ TEST(HeaderMapImplTest, DoubleInlineSet) { TestRequestHeaderMapImpl headers; headers.setReferenceKey(Headers::get().ContentType, "blah"); headers.setReferenceKey(Headers::get().ContentType, "text/html"); - EXPECT_EQ("text/html", headers.ContentType()->value().getStringView()); + EXPECT_EQ("text/html", headers.getContentTypeValue()); EXPECT_EQ(1UL, headers.size()); } TEST(HeaderMapImplTest, AddReferenceKey) { - TestHeaderMapImpl headers; + TestRequestHeaderMapImpl headers; LowerCaseString foo("hello"); headers.addReferenceKey(foo, "world"); EXPECT_NE("world", headers.get(foo)->value().getStringView().data()); @@ -652,7 +717,7 @@ TEST(HeaderMapImplTest, AddReferenceKey) { } TEST(HeaderMapImplTest, SetReferenceKey) { - TestHeaderMapImpl headers; + TestRequestHeaderMapImpl headers; LowerCaseString foo("hello"); headers.setReferenceKey(foo, "world"); EXPECT_NE("world", headers.get(foo)->value().getStringView().data()); @@ -681,19 +746,12 @@ TEST(HeaderMapImplTest, SetCopy) { headers.setCopy(foo, "override-monde"); EXPECT_EQ(headers.size(), 2); - using MockCb = testing::MockFunction; - MockCb cb; + HeaderAndValueCb cb; InSequence seq; EXPECT_CALL(cb, Call("hello", "override-monde")); EXPECT_CALL(cb, Call("hello", "monde2")); - headers.iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate(cb.asIterateCb()); // Test setting an empty string and then overriding. EXPECT_EQ(2UL, headers.remove(foo)); @@ -709,10 +767,10 @@ TEST(HeaderMapImplTest, SetCopy) { EXPECT_EQ(headers.size(), 0); headers.setCopy(Headers::get().Path, "/"); EXPECT_EQ(headers.size(), 1); - EXPECT_EQ(headers.Path()->value().getStringView(), "/"); + EXPECT_EQ(headers.getPathValue(), "/"); headers.setPath("/foo"); EXPECT_EQ(headers.size(), 1); - EXPECT_EQ(headers.Path()->value().getStringView(), "/foo"); + EXPECT_EQ(headers.getPathValue(), "/foo"); } TEST(HeaderMapImplTest, AddCopy) { @@ -771,24 +829,24 @@ TEST(HeaderMapImplTest, AddCopy) { EXPECT_EQ("42", headers.get(lcKey3)->value().getStringView()); EXPECT_EQ(2UL, headers.get(lcKey3)->value().size()); - LowerCaseString cache_control("cache-control"); - headers.addCopy(cache_control, "max-age=1345"); - EXPECT_EQ("max-age=1345", headers.get(cache_control)->value().getStringView()); - EXPECT_EQ("max-age=1345", headers.CacheControl()->value().getStringView()); - headers.addCopy(cache_control, "public"); - EXPECT_EQ("max-age=1345,public", headers.get(cache_control)->value().getStringView()); - headers.addCopy(cache_control, ""); - EXPECT_EQ("max-age=1345,public", headers.get(cache_control)->value().getStringView()); - headers.addCopy(cache_control, 123); - EXPECT_EQ("max-age=1345,public,123", headers.get(cache_control)->value().getStringView()); - headers.addCopy(cache_control, std::numeric_limits::max()); + LowerCaseString envoy_retry_on("x-envoy-retry-on"); + headers.addCopy(envoy_retry_on, "max-age=1345"); + EXPECT_EQ("max-age=1345", headers.get(envoy_retry_on)->value().getStringView()); + EXPECT_EQ("max-age=1345", headers.getEnvoyRetryOnValue()); + headers.addCopy(envoy_retry_on, "public"); + EXPECT_EQ("max-age=1345,public", headers.get(envoy_retry_on)->value().getStringView()); + headers.addCopy(envoy_retry_on, ""); + EXPECT_EQ("max-age=1345,public", headers.get(envoy_retry_on)->value().getStringView()); + headers.addCopy(envoy_retry_on, 123); + EXPECT_EQ("max-age=1345,public,123", headers.get(envoy_retry_on)->value().getStringView()); + headers.addCopy(envoy_retry_on, std::numeric_limits::max()); EXPECT_EQ("max-age=1345,public,123,18446744073709551615", - headers.get(cache_control)->value().getStringView()); + headers.get(envoy_retry_on)->value().getStringView()); } TEST(HeaderMapImplTest, Equality) { - TestHeaderMapImpl headers1; - TestHeaderMapImpl headers2; + TestRequestHeaderMapImpl headers1; + TestRequestHeaderMapImpl headers2; EXPECT_EQ(headers1, headers2); headers1.addCopy(LowerCaseString("hello"), "world"); @@ -799,7 +857,7 @@ TEST(HeaderMapImplTest, Equality) { } TEST(HeaderMapImplTest, LargeCharInHeader) { - TestHeaderMapImpl headers; + TestRequestHeaderMapImpl headers; LowerCaseString static_key("\x90hello"); std::string ref_value("value"); headers.addReference(static_key, ref_value); @@ -807,110 +865,97 @@ TEST(HeaderMapImplTest, LargeCharInHeader) { } TEST(HeaderMapImplTest, Iterate) { - TestHeaderMapImpl headers; + TestRequestHeaderMapImpl headers; headers.addCopy(LowerCaseString("hello"), "world"); headers.addCopy(LowerCaseString("foo"), "xxx"); headers.addCopy(LowerCaseString("world"), "hello"); LowerCaseString foo_key("foo"); headers.setReferenceKey(foo_key, "bar"); // set moves key to end - using MockCb = testing::MockFunction; - MockCb cb; + HeaderAndValueCb cb; InSequence seq; EXPECT_CALL(cb, Call("hello", "world")); EXPECT_CALL(cb, Call("world", "hello")); EXPECT_CALL(cb, Call("foo", "bar")); - headers.iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate(cb.asIterateCb()); } TEST(HeaderMapImplTest, IterateReverse) { - TestHeaderMapImpl headers; + TestRequestHeaderMapImpl headers; headers.addCopy(LowerCaseString("hello"), "world"); headers.addCopy(LowerCaseString("foo"), "bar"); LowerCaseString world_key("world"); headers.setReferenceKey(world_key, "hello"); - using MockCb = testing::MockFunction; - MockCb cb; + HeaderAndValueCb cb; InSequence seq; EXPECT_CALL(cb, Call("world", "hello")); EXPECT_CALL(cb, Call("foo", "bar")); // no "hello" - headers.iterateReverse( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - if (header.key().getStringView() != "foo") { - return HeaderMap::Iterate::Continue; - } else { - return HeaderMap::Iterate::Break; - } - }, - &cb); -} - -TEST(HeaderMapImplTest, Lookup) { - TestRequestHeaderMapImpl headers; - headers.addCopy(LowerCaseString("hello"), "world"); - headers.setContentLength(5); - - // Lookup is not supported for non predefined inline headers. - { - const HeaderEntry* entry; - EXPECT_EQ(HeaderMap::Lookup::NotSupported, headers.lookup(LowerCaseString{"hello"}, &entry)); - EXPECT_EQ(nullptr, entry); - } - - // Lookup returns the entry of a predefined inline header if it exists. - { - const HeaderEntry* entry; - EXPECT_EQ(HeaderMap::Lookup::Found, headers.lookup(Headers::get().ContentLength, &entry)); - EXPECT_EQ("5", entry->value().getStringView()); - } - - // Lookup returns HeaderMap::Lookup::NotFound if a predefined inline header does not exist. - { - const HeaderEntry* entry; - EXPECT_EQ(HeaderMap::Lookup::NotFound, headers.lookup(Headers::get().Host, &entry)); - EXPECT_EQ(nullptr, entry); - } + headers.iterateReverse([&cb](const Http::HeaderEntry& header) -> HeaderMap::Iterate { + cb.Call(std::string(header.key().getStringView()), std::string(header.value().getStringView())); + if (header.key().getStringView() != "foo") { + return HeaderMap::Iterate::Continue; + } else { + return HeaderMap::Iterate::Break; + } + }); } TEST(HeaderMapImplTest, Get) { { - auto headers = createHeaderMap( - {{Headers::get().Path, "/"}, {LowerCaseString("hello"), "world"}}); - EXPECT_EQ("/", headers->get(LowerCaseString(":path"))->value().getStringView()); - EXPECT_EQ("world", headers->get(LowerCaseString("hello"))->value().getStringView()); - EXPECT_EQ(nullptr, headers->get(LowerCaseString("foo"))); + auto headers = TestRequestHeaderMapImpl({{Headers::get().Path.get(), "/"}, {"hello", "world"}}); + EXPECT_EQ("/", headers.get(LowerCaseString(":path"))->value().getStringView()); + EXPECT_EQ("world", headers.get(LowerCaseString("hello"))->value().getStringView()); + EXPECT_EQ(nullptr, headers.get(LowerCaseString("foo"))); } { - auto headers = createHeaderMap( - {{Headers::get().Path, "/"}, {LowerCaseString("hello"), "world"}}); + auto headers = TestRequestHeaderMapImpl({{Headers::get().Path.get(), "/"}, {"hello", "world"}}); // There is not HeaderMap method to set a header and copy both the key and value. const LowerCaseString path(":path"); - headers->setReferenceKey(path, "/new_path"); - EXPECT_EQ("/new_path", headers->get(LowerCaseString(":path"))->value().getStringView()); + headers.setReferenceKey(path, "/new_path"); + EXPECT_EQ("/new_path", headers.get(LowerCaseString(":path"))->value().getStringView()); const LowerCaseString foo("hello"); - headers->setReferenceKey(foo, "world2"); - EXPECT_EQ("world2", headers->get(foo)->value().getStringView()); - EXPECT_EQ(nullptr, headers->get(LowerCaseString("foo"))); + headers.setReferenceKey(foo, "world2"); + EXPECT_EQ("world2", headers.get(foo)->value().getStringView()); + EXPECT_EQ(nullptr, headers.get(LowerCaseString("foo"))); } } +TEST(HeaderMapImplTest, CreateHeaderMapFromIterator) { + std::vector> iter_headers{ + {LowerCaseString(Headers::get().Path), "/"}, {LowerCaseString("hello"), "world"}}; + auto headers = createHeaderMap(iter_headers.cbegin(), iter_headers.cend()); + EXPECT_EQ("/", headers->get(LowerCaseString(":path"))->value().getStringView()); + EXPECT_EQ("world", headers->get(LowerCaseString("hello"))->value().getStringView()); + EXPECT_EQ(nullptr, headers->get(LowerCaseString("foo"))); +} + +TEST(HeaderMapImplTest, TestHeaderList) { + std::array keys{Headers::get().Path.get(), "hello"}; + std::array values{"/", "world"}; + + auto headers = TestRequestHeaderMapImpl({{keys[0], values[0]}, {keys[1], values[1]}}); + HeaderListView header_list(headers); + auto to_string_views = + [](const HeaderListView::HeaderStringRefs& strs) -> std::vector { + std::vector str_views(strs.size()); + std::transform(strs.begin(), strs.end(), str_views.begin(), + [](auto value) -> absl::string_view { return value.get().getStringView(); }); + return str_views; + }; + + EXPECT_THAT(to_string_views(header_list.keys()), ElementsAre(":path", "hello")); + EXPECT_THAT(to_string_views(header_list.values()), ElementsAre("/", "world")); +} + TEST(HeaderMapImplTest, TestAppendHeader) { // Test appending to a string with a value. { - TestHeaderMapImpl headers; + TestRequestHeaderMapImpl headers; LowerCaseString foo("key1"); headers.addCopy(foo, "some;"); headers.appendCopy(foo, "test"); @@ -919,7 +964,7 @@ TEST(HeaderMapImplTest, TestAppendHeader) { // Test appending to an empty string. { - TestHeaderMapImpl headers; + TestRequestHeaderMapImpl headers; LowerCaseString key2("key2"); headers.appendCopy(key2, "my tag data"); EXPECT_EQ(headers.get(key2)->value().getStringView(), "my tag data"); @@ -927,7 +972,7 @@ TEST(HeaderMapImplTest, TestAppendHeader) { // Test empty data case. { - TestHeaderMapImpl headers; + TestRequestHeaderMapImpl headers; LowerCaseString key3("key3"); headers.addCopy(key3, "empty"); headers.appendCopy(key3, ""); @@ -942,42 +987,40 @@ TEST(HeaderMapImplTest, TestAppendHeader) { // Append with default delimiter. headers.appendPath(" ", ","); headers.setPath(0); - EXPECT_EQ("0", headers.Path()->value().getStringView()); + EXPECT_EQ("0", headers.getPathValue()); EXPECT_EQ(1U, headers.Path()->value().size()); } // Test append for inline headers using this method and append##name. { TestRequestHeaderMapImpl headers; headers.addCopy(Headers::get().Via, "1.0 fred"); - EXPECT_EQ(headers.Via()->value().getStringView(), "1.0 fred"); + EXPECT_EQ(headers.getViaValue(), "1.0 fred"); headers.appendCopy(Headers::get().Via, "1.1 p.example.net"); - EXPECT_EQ(headers.Via()->value().getStringView(), "1.0 fred,1.1 p.example.net"); + EXPECT_EQ(headers.getViaValue(), "1.0 fred,1.1 p.example.net"); headers.appendVia("1.1 new.example.net", ","); - EXPECT_EQ(headers.Via()->value().getStringView(), - "1.0 fred,1.1 p.example.net,1.1 new.example.net"); + EXPECT_EQ(headers.getViaValue(), "1.0 fred,1.1 p.example.net,1.1 new.example.net"); } } TEST(TestHeaderMapImplDeathTest, TestHeaderLengthChecks) { HeaderString value; value.setCopy("some;"); - EXPECT_DEATH_LOG_TO_STDERR(value.append(nullptr, std::numeric_limits::max()), - "Trying to allocate overly large headers."); + EXPECT_DEATH(value.append(nullptr, std::numeric_limits::max()), + "Trying to allocate overly large headers."); std::string source("hello"); HeaderString reference; reference.setReference(source); - EXPECT_DEATH_LOG_TO_STDERR(reference.append(nullptr, std::numeric_limits::max()), - "Trying to allocate overly large headers."); + EXPECT_DEATH(reference.append(nullptr, std::numeric_limits::max()), + "Trying to allocate overly large headers."); } TEST(HeaderMapImplTest, PseudoHeaderOrder) { - using MockCb = testing::MockFunction; - MockCb cb; + HeaderAndValueCb cb; { LowerCaseString foo("hello"); - Http::TestHeaderMapImpl headers{}; + Http::TestRequestHeaderMapImpl headers{}; EXPECT_EQ(0UL, headers.size()); EXPECT_TRUE(headers.empty()); @@ -999,13 +1042,7 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { EXPECT_CALL(cb, Call("hello", "world")); EXPECT_CALL(cb, Call("content-type", "text/html")); - headers.iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate(cb.asIterateCb()); // Removal of the header before which pseudo-headers are inserted EXPECT_EQ(1UL, headers.remove(foo)); @@ -1015,13 +1052,7 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { EXPECT_CALL(cb, Call(":method", "PUT")); EXPECT_CALL(cb, Call("content-type", "text/html")); - headers.iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate(cb.asIterateCb()); // Next pseudo-header goes after other pseudo-headers, but before normal headers headers.setReferenceKey(Headers::get().Path, "/test"); @@ -1032,13 +1063,7 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { EXPECT_CALL(cb, Call(":path", "/test")); EXPECT_CALL(cb, Call("content-type", "text/html")); - headers.iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate(cb.asIterateCb()); // Removing the last normal header EXPECT_EQ(1UL, headers.remove(Headers::get().ContentType)); @@ -1048,13 +1073,7 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { EXPECT_CALL(cb, Call(":method", "PUT")); EXPECT_CALL(cb, Call(":path", "/test")); - headers.iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate(cb.asIterateCb()); // Adding a new pseudo-header after removing the last normal header headers.setReferenceKey(Headers::get().Host, "host"); @@ -1065,13 +1084,7 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { EXPECT_CALL(cb, Call(":path", "/test")); EXPECT_CALL(cb, Call(":authority", "host")); - headers.iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate(cb.asIterateCb()); // Adding the first normal header headers.setReferenceKey(Headers::get().ContentType, "text/html"); @@ -1083,13 +1096,7 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { EXPECT_CALL(cb, Call(":authority", "host")); EXPECT_CALL(cb, Call("content-type", "text/html")); - headers.iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate(cb.asIterateCb()); // Removing all pseudo-headers EXPECT_EQ(1UL, headers.remove(Headers::get().Path)); @@ -1100,13 +1107,7 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { EXPECT_CALL(cb, Call("content-type", "text/html")); - headers.iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate(cb.asIterateCb()); // Removing all headers EXPECT_EQ(1UL, headers.remove(Headers::get().ContentType)); @@ -1120,22 +1121,16 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { EXPECT_CALL(cb, Call(":status", "200")); - headers.iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate(cb.asIterateCb()); } // Starting with a normal header { - auto headers = createHeaderMap({{Headers::get().ContentType, "text/plain"}, - {Headers::get().Method, "GET"}, - {Headers::get().Path, "/"}, - {LowerCaseString("hello"), "world"}, - {Headers::get().Host, "host"}}); + auto headers = TestRequestHeaderMapImpl({{Headers::get().ContentType.get(), "text/plain"}, + {Headers::get().Method.get(), "GET"}, + {Headers::get().Path.get(), "/"}, + {"hello", "world"}, + {Headers::get().Host.get(), "host"}}); InSequence seq; EXPECT_CALL(cb, Call(":method", "GET")); @@ -1144,22 +1139,16 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { EXPECT_CALL(cb, Call("content-type", "text/plain")); EXPECT_CALL(cb, Call("hello", "world")); - headers->iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate(cb.asIterateCb()); } // Starting with a pseudo-header { - auto headers = createHeaderMap({{Headers::get().Path, "/"}, - {Headers::get().ContentType, "text/plain"}, - {Headers::get().Method, "GET"}, - {LowerCaseString("hello"), "world"}, - {Headers::get().Host, "host"}}); + auto headers = TestRequestHeaderMapImpl({{Headers::get().Path.get(), "/"}, + {Headers::get().ContentType.get(), "text/plain"}, + {Headers::get().Method.get(), "GET"}, + {"hello", "world"}, + {Headers::get().Host.get(), "host"}}); InSequence seq; EXPECT_CALL(cb, Call(":path", "/")); @@ -1168,28 +1157,22 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { EXPECT_CALL(cb, Call("content-type", "text/plain")); EXPECT_CALL(cb, Call("hello", "world")); - headers->iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate(cb.asIterateCb()); } } -// Validate that TestHeaderMapImpl copy construction and assignment works. This is a +// Validate that TestRequestHeaderMapImpl copy construction and assignment works. This is a // regression for where we were missing a valid copy constructor and had the // default (dangerous) move semantics takeover. -TEST(HeaderMapImplTest, TestHeaderMapImplyCopy) { - TestHeaderMapImpl foo; +TEST(HeaderMapImplTest, TestRequestHeaderMapImplyCopy) { + TestRequestHeaderMapImpl foo; foo.addCopy(LowerCaseString("foo"), "bar"); - auto headers = std::make_unique(foo); + auto headers = std::make_unique(foo); EXPECT_EQ("bar", headers->get(LowerCaseString("foo"))->value().getStringView()); - TestHeaderMapImpl baz{{"foo", "baz"}}; + TestRequestHeaderMapImpl baz{{"foo", "baz"}}; baz = *headers; EXPECT_EQ("bar", baz.get(LowerCaseString("foo"))->value().getStringView()); - const TestHeaderMapImpl& baz2 = baz; + const TestRequestHeaderMapImpl& baz2 = baz; baz = baz2; EXPECT_EQ("bar", baz.get(LowerCaseString("foo"))->value().getStringView()); } @@ -1239,7 +1222,7 @@ TEST(HeaderMapImplTest, ClearHeaderMap) { // Add inline and clear. headers.setContentLength(5); - EXPECT_EQ("5", headers.ContentLength()->value().getStringView()); + EXPECT_EQ("5", headers.getContentLengthValue()); EXPECT_EQ(1UL, headers.size()); EXPECT_FALSE(headers.empty()); headers.clear(); @@ -1316,5 +1299,11 @@ TEST(HeaderMapImplTest, InlineHeaderByteSize) { } } +TEST(HeaderMapImplTest, ValidHeaderString) { + EXPECT_TRUE(validHeaderString("abc")); + EXPECT_FALSE(validHeaderString(absl::string_view("a\000bc", 4))); + EXPECT_FALSE(validHeaderString("abc\n")); +} + } // namespace Http } // namespace Envoy diff --git a/test/common/http/header_utility_test.cc b/test/common/http/header_utility_test.cc index 55d2056aab78f..dc8c831c650d1 100644 --- a/test/common/http/header_utility_test.cc +++ b/test/common/http/header_utility_test.cc @@ -2,6 +2,7 @@ #include #include "envoy/config/route/v3/route_components.pb.h" +#include "envoy/http/protocol.h" #include "envoy/json/json_object.h" #include "common/http/header_utility.h" @@ -20,6 +21,56 @@ envoy::config::route::v3::HeaderMatcher parseHeaderMatcherFromYaml(const std::st return header_matcher; } +class HeaderUtilityTest : public testing::Test { +public: + const HeaderEntry& hostHeaderEntry(const std::string& host_value, bool set_connect = false) { + headers_.setHost(host_value); + if (set_connect) { + headers_.setMethod(Http::Headers::get().MethodValues.Connect); + } + return *headers_.Host(); + } + TestRequestHeaderMapImpl headers_; +}; + +// Port's part from host header get removed +TEST_F(HeaderUtilityTest, RemovePortsFromHost) { + const std::vector> host_headers{ + {"localhost", "localhost"}, // w/o port part + {"localhost:443", "localhost"}, // name w/ port + {"", ""}, // empty + {":443", ""}, // just port + {"192.168.1.1", "192.168.1.1"}, // ipv4 + {"192.168.1.1:443", "192.168.1.1"}, // ipv4 w/ port + {"[fc00::1]:443", "[fc00::1]"}, // ipv6 w/ port + {"[fc00::1]", "[fc00::1]"}, // ipv6 + {":", ":"}, // malformed string #1 + {"]:", "]:"}, // malformed string #2 + {":abc", ":abc"}, // malformed string #3 + {"localhost:80", "localhost:80"}, // port not matching w/ hostname + {"192.168.1.1:80", "192.168.1.1:80"}, // port not matching w/ ipv4 + {"[fc00::1]:80", "[fc00::1]:80"} // port not matching w/ ipv6 + }; + + for (const auto& host_pair : host_headers) { + auto& host_header = hostHeaderEntry(host_pair.first); + HeaderUtility::stripPortFromHost(headers_, 443); + EXPECT_EQ(host_header.value().getStringView(), host_pair.second); + } +} + +// Port's part from host header won't be removed if method is "connect" +TEST_F(HeaderUtilityTest, RemovePortsFromHostConnect) { + const std::vector> host_headers{ + {"localhost:443", "localhost:443"}, + }; + for (const auto& host_pair : host_headers) { + auto& host_header = hostHeaderEntry(host_pair.first, true); + HeaderUtility::stripPortFromHost(headers_, 443); + EXPECT_EQ(host_header.value().getStringView(), host_pair.second); + } +} + TEST(HeaderDataConstructorTest, NoSpecifierSet) { const std::string yaml = R"EOF( name: test-header @@ -137,7 +188,8 @@ invert_match: true } TEST(HeaderDataConstructorTest, GetAllOfHeader) { - TestHeaderMapImpl headers{{"foo", "val1"}, {"bar", "bar2"}, {"foo", "eep, bar"}, {"foo", ""}}; + TestRequestHeaderMapImpl headers{ + {"foo", "val1"}, {"bar", "bar2"}, {"foo", "eep, bar"}, {"foo", ""}}; std::vector foo_out; Http::HeaderUtility::getAllOfHeader(headers, "foo", foo_out); @@ -157,7 +209,7 @@ TEST(HeaderDataConstructorTest, GetAllOfHeader) { } TEST(MatchHeadersTest, MayMatchOneOrMoreRequestHeader) { - TestHeaderMapImpl headers{{"some-header", "a"}, {"other-header", "b"}}; + TestRequestHeaderMapImpl headers{{"some-header", "a"}, {"other-header", "b"}}; const std::string yaml = R"EOF( name: match-header @@ -176,13 +228,13 @@ regex_match: (a|b) } TEST(MatchHeadersTest, MustMatchAllHeaderData) { - TestHeaderMapImpl matching_headers_1{{"match-header-A", "1"}, {"match-header-B", "2"}}; - TestHeaderMapImpl matching_headers_2{ + TestRequestHeaderMapImpl matching_headers_1{{"match-header-A", "1"}, {"match-header-B", "2"}}; + TestRequestHeaderMapImpl matching_headers_2{ {"match-header-A", "3"}, {"match-header-B", "4"}, {"match-header-C", "5"}}; - TestHeaderMapImpl unmatching_headers_1{{"match-header-A", "6"}}; - TestHeaderMapImpl unmatching_headers_2{{"match-header-B", "7"}}; - TestHeaderMapImpl unmatching_headers_3{{"match-header-A", "8"}, {"match-header-C", "9"}}; - TestHeaderMapImpl unmatching_headers_4{{"match-header-C", "10"}, {"match-header-D", "11"}}; + TestRequestHeaderMapImpl unmatching_headers_1{{"match-header-A", "6"}}; + TestRequestHeaderMapImpl unmatching_headers_2{{"match-header-B", "7"}}; + TestRequestHeaderMapImpl unmatching_headers_3{{"match-header-A", "8"}, {"match-header-C", "9"}}; + TestRequestHeaderMapImpl unmatching_headers_4{{"match-header-C", "10"}, {"match-header-D", "11"}}; const std::string yamlA = R"EOF( name: match-header-A @@ -206,8 +258,8 @@ name: match-header-B } TEST(MatchHeadersTest, HeaderPresence) { - TestHeaderMapImpl matching_headers{{"match-header", "value"}}; - TestHeaderMapImpl unmatching_headers{{"other-header", "value"}}; + TestRequestHeaderMapImpl matching_headers{{"match-header", "value"}}; + TestRequestHeaderMapImpl unmatching_headers{{"other-header", "value"}}; const std::string yaml = R"EOF( name: match-header )EOF"; @@ -220,9 +272,9 @@ name: match-header } TEST(MatchHeadersTest, HeaderExactMatch) { - TestHeaderMapImpl matching_headers{{"match-header", "match-value"}}; - TestHeaderMapImpl unmatching_headers{{"match-header", "other-value"}, - {"other-header", "match-value"}}; + TestRequestHeaderMapImpl matching_headers{{"match-header", "match-value"}}; + TestRequestHeaderMapImpl unmatching_headers{{"match-header", "other-value"}, + {"other-header", "match-value"}}; const std::string yaml = R"EOF( name: match-header exact_match: match-value @@ -236,9 +288,9 @@ exact_match: match-value } TEST(MatchHeadersTest, HeaderExactMatchInverse) { - TestHeaderMapImpl matching_headers{{"match-header", "other-value"}, - {"other-header", "match-value"}}; - TestHeaderMapImpl unmatching_headers{{"match-header", "match-value"}}; + TestRequestHeaderMapImpl matching_headers{{"match-header", "other-value"}, + {"other-header", "match-value"}}; + TestRequestHeaderMapImpl unmatching_headers{{"match-header", "match-value"}}; const std::string yaml = R"EOF( name: match-header @@ -254,8 +306,9 @@ invert_match: true } TEST(MatchHeadersTest, HeaderRegexMatch) { - TestHeaderMapImpl matching_headers{{"match-header", "123"}}; - TestHeaderMapImpl unmatching_headers{{"match-header", "1234"}, {"match-header", "123.456"}}; + TestRequestHeaderMapImpl matching_headers{{"match-header", "123"}}; + TestRequestHeaderMapImpl unmatching_headers{{"match-header", "1234"}, + {"match-header", "123.456"}}; const std::string yaml = R"EOF( name: match-header regex_match: \d{3} @@ -269,8 +322,9 @@ regex_match: \d{3} } TEST(MatchHeadersTest, HeaderSafeRegexMatch) { - TestHeaderMapImpl matching_headers{{"match-header", "123"}}; - TestHeaderMapImpl unmatching_headers{{"match-header", "1234"}, {"match-header", "123.456"}}; + TestRequestHeaderMapImpl matching_headers{{"match-header", "123"}}; + TestRequestHeaderMapImpl unmatching_headers{{"match-header", "1234"}, + {"match-header", "123.456"}}; const std::string yaml = R"EOF( name: match-header safe_regex_match: @@ -286,8 +340,8 @@ name: match-header } TEST(MatchHeadersTest, HeaderRegexInverseMatch) { - TestHeaderMapImpl matching_headers{{"match-header", "1234"}, {"match-header", "123.456"}}; - TestHeaderMapImpl unmatching_headers{{"match-header", "123"}}; + TestRequestHeaderMapImpl matching_headers{{"match-header", "1234"}, {"match-header", "123.456"}}; + TestRequestHeaderMapImpl unmatching_headers{{"match-header", "123"}}; const std::string yaml = R"EOF( name: match-header @@ -303,11 +357,11 @@ invert_match: true } TEST(MatchHeadersTest, HeaderRangeMatch) { - TestHeaderMapImpl matching_headers{{"match-header", "-1"}}; - TestHeaderMapImpl unmatching_headers{{"match-header", "0"}, - {"match-header", "somestring"}, - {"match-header", "10.9"}, - {"match-header", "-1somestring"}}; + TestRequestHeaderMapImpl matching_headers{{"match-header", "-1"}}; + TestRequestHeaderMapImpl unmatching_headers{{"match-header", "0"}, + {"match-header", "somestring"}, + {"match-header", "10.9"}, + {"match-header", "-1somestring"}}; const std::string yaml = R"EOF( name: match-header range_match: @@ -323,11 +377,11 @@ name: match-header } TEST(MatchHeadersTest, HeaderRangeInverseMatch) { - TestHeaderMapImpl matching_headers{{"match-header", "0"}, - {"match-header", "somestring"}, - {"match-header", "10.9"}, - {"match-header", "-1somestring"}}; - TestHeaderMapImpl unmatching_headers{{"match-header", "-1"}}; + TestRequestHeaderMapImpl matching_headers{{"match-header", "0"}, + {"match-header", "somestring"}, + {"match-header", "10.9"}, + {"match-header", "-1somestring"}}; + TestRequestHeaderMapImpl unmatching_headers{{"match-header", "-1"}}; const std::string yaml = R"EOF( name: match-header @@ -345,9 +399,9 @@ invert_match: true } TEST(MatchHeadersTest, HeaderPresentMatch) { - TestHeaderMapImpl matching_headers{{"match-header", "123"}}; - TestHeaderMapImpl unmatching_headers{{"nonmatch-header", "1234"}, - {"other-nonmatch-header", "123.456"}}; + TestRequestHeaderMapImpl matching_headers{{"match-header", "123"}}; + TestRequestHeaderMapImpl unmatching_headers{{"nonmatch-header", "1234"}, + {"other-nonmatch-header", "123.456"}}; const std::string yaml = R"EOF( name: match-header @@ -362,9 +416,9 @@ present_match: true } TEST(MatchHeadersTest, HeaderPresentInverseMatch) { - TestHeaderMapImpl unmatching_headers{{"match-header", "123"}}; - TestHeaderMapImpl matching_headers{{"nonmatch-header", "1234"}, - {"other-nonmatch-header", "123.456"}}; + TestRequestHeaderMapImpl unmatching_headers{{"match-header", "123"}}; + TestRequestHeaderMapImpl matching_headers{{"nonmatch-header", "1234"}, + {"other-nonmatch-header", "123.456"}}; const std::string yaml = R"EOF( name: match-header @@ -380,8 +434,8 @@ invert_match: true } TEST(MatchHeadersTest, HeaderPrefixMatch) { - TestHeaderMapImpl matching_headers{{"match-header", "value123"}}; - TestHeaderMapImpl unmatching_headers{{"match-header", "123value"}}; + TestRequestHeaderMapImpl matching_headers{{"match-header", "value123"}}; + TestRequestHeaderMapImpl unmatching_headers{{"match-header", "123value"}}; const std::string yaml = R"EOF( name: match-header @@ -396,8 +450,8 @@ prefix_match: value } TEST(MatchHeadersTest, HeaderPrefixInverseMatch) { - TestHeaderMapImpl unmatching_headers{{"match-header", "value123"}}; - TestHeaderMapImpl matching_headers{{"match-header", "123value"}}; + TestRequestHeaderMapImpl unmatching_headers{{"match-header", "value123"}}; + TestRequestHeaderMapImpl matching_headers{{"match-header", "123value"}}; const std::string yaml = R"EOF( name: match-header @@ -413,8 +467,8 @@ invert_match: true } TEST(MatchHeadersTest, HeaderSuffixMatch) { - TestHeaderMapImpl matching_headers{{"match-header", "123value"}}; - TestHeaderMapImpl unmatching_headers{{"match-header", "value123"}}; + TestRequestHeaderMapImpl matching_headers{{"match-header", "123value"}}; + TestRequestHeaderMapImpl unmatching_headers{{"match-header", "value123"}}; const std::string yaml = R"EOF( name: match-header @@ -429,8 +483,8 @@ suffix_match: value } TEST(MatchHeadersTest, HeaderSuffixInverseMatch) { - TestHeaderMapImpl unmatching_headers{{"match-header", "123value"}}; - TestHeaderMapImpl matching_headers{{"match-header", "value123"}}; + TestRequestHeaderMapImpl unmatching_headers{{"match-header", "123value"}}; + TestRequestHeaderMapImpl matching_headers{{"match-header", "value123"}}; const std::string yaml = R"EOF( name: match-header @@ -469,20 +523,35 @@ TEST(HeaderIsValidTest, AuthorityIsValid) { EXPECT_FALSE(HeaderUtility::authorityIsValid("illegal{}")); } +TEST(HeaderIsValidTest, IsConnect) { + EXPECT_TRUE(HeaderUtility::isConnect(Http::TestRequestHeaderMapImpl{{":method", "CONNECT"}})); + EXPECT_FALSE(HeaderUtility::isConnect(Http::TestRequestHeaderMapImpl{{":method", "GET"}})); + EXPECT_FALSE(HeaderUtility::isConnect(Http::TestRequestHeaderMapImpl{})); +} + +TEST(HeaderIsValidTest, IsConnectResponse) { + RequestHeaderMapPtr connect_request{new TestRequestHeaderMapImpl{{":method", "CONNECT"}}}; + RequestHeaderMapPtr get_request{new TestRequestHeaderMapImpl{{":method", "GET"}}}; + TestResponseHeaderMapImpl success_response{{":status", "200"}}; + TestResponseHeaderMapImpl failure_response{{":status", "500"}}; + + EXPECT_TRUE(HeaderUtility::isConnectResponse(connect_request.get(), success_response)); + EXPECT_FALSE(HeaderUtility::isConnectResponse(connect_request.get(), failure_response)); + EXPECT_FALSE(HeaderUtility::isConnectResponse(nullptr, success_response)); + EXPECT_FALSE(HeaderUtility::isConnectResponse(get_request.get(), success_response)); +} + TEST(HeaderAddTest, HeaderAdd) { - TestHeaderMapImpl headers{{"myheader1", "123value"}}; - TestHeaderMapImpl headers_to_add{{"myheader2", "456value"}}; + TestRequestHeaderMapImpl headers{{"myheader1", "123value"}}; + TestRequestHeaderMapImpl headers_to_add{{"myheader2", "456value"}}; HeaderUtility::addHeaders(headers, headers_to_add); - headers_to_add.iterate( - [](const Http::HeaderEntry& entry, void* context) -> Http::HeaderMap::Iterate { - TestHeaderMapImpl* headers = static_cast(context); - Http::LowerCaseString lower_key{std::string(entry.key().getStringView())}; - EXPECT_EQ(entry.value().getStringView(), headers->get(lower_key)->value().getStringView()); - return Http::HeaderMap::Iterate::Continue; - }, - &headers); + headers_to_add.iterate([&headers](const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate { + Http::LowerCaseString lower_key{std::string(entry.key().getStringView())}; + EXPECT_EQ(entry.value().getStringView(), headers.get(lower_key)->value().getStringView()); + return Http::HeaderMap::Iterate::Continue; + }); } TEST(HeaderIsValidTest, HeaderNameContainsUnderscore) { @@ -493,5 +562,25 @@ TEST(HeaderIsValidTest, HeaderNameContainsUnderscore) { EXPECT_TRUE(HeaderUtility::headerNameContainsUnderscore("x_something")); } +TEST(PercentEncoding, ShouldCloseConnection) { + EXPECT_TRUE(HeaderUtility::shouldCloseConnection(Protocol::Http10, + TestRequestHeaderMapImpl{{"foo", "bar"}})); + EXPECT_FALSE(HeaderUtility::shouldCloseConnection( + Protocol::Http10, TestRequestHeaderMapImpl{{"connection", "keep-alive"}})); + EXPECT_FALSE(HeaderUtility::shouldCloseConnection( + Protocol::Http10, TestRequestHeaderMapImpl{{"connection", "foo, keep-alive"}})); + + EXPECT_FALSE(HeaderUtility::shouldCloseConnection(Protocol::Http11, + TestRequestHeaderMapImpl{{"foo", "bar"}})); + EXPECT_TRUE(HeaderUtility::shouldCloseConnection( + Protocol::Http11, TestRequestHeaderMapImpl{{"connection", "close"}})); + EXPECT_TRUE(HeaderUtility::shouldCloseConnection( + Protocol::Http11, TestRequestHeaderMapImpl{{"connection", "te,close"}})); + EXPECT_TRUE(HeaderUtility::shouldCloseConnection( + Protocol::Http11, TestRequestHeaderMapImpl{{"proxy-connection", "close"}})); + EXPECT_TRUE(HeaderUtility::shouldCloseConnection( + Protocol::Http11, TestRequestHeaderMapImpl{{"proxy-connection", "foo,close"}})); +} + } // namespace Http } // namespace Envoy diff --git a/test/common/http/http1/BUILD b/test/common/http/http1/BUILD index 76ef5380d85a6..dbcdcd4d4c8b1 100644 --- a/test/common/http/http1/BUILD +++ b/test/common/http/http1/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( @@ -26,6 +26,7 @@ envoy_cc_test( "//source/common/event:dispatcher_lib", "//source/common/http:exception_lib", "//source/common/http:header_map_lib", + "//source/common/http/http1:codec_legacy_lib", "//source/common/http/http1:codec_lib", "//test/common/stats:stat_test_utility_lib", "//test/mocks/buffer:buffer_mocks", @@ -61,30 +62,7 @@ envoy_cc_test( "//test/mocks/runtime:runtime_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:simulated_time_system_lib", - "//test/test_common:utility_lib", - ], -) - -envoy_cc_test( - name = "conn_pool_legacy_test", - srcs = ["conn_pool_legacy_test.cc"], - deps = [ - "//source/common/buffer:buffer_lib", - "//source/common/event:dispatcher_lib", - "//source/common/http:codec_client_lib", - "//source/common/http/http1:conn_pool_legacy_lib", - "//source/common/network:utility_lib", - "//source/common/upstream:upstream_includes", - "//source/common/upstream:upstream_lib", - "//test/common/http:common_lib", - "//test/common/upstream:utility_lib", - "//test/mocks/buffer:buffer_mocks", - "//test/mocks/event:event_mocks", - "//test/mocks/http:http_mocks", - "//test/mocks/network:network_mocks", - "//test/mocks/runtime:runtime_mocks", - "//test/mocks/upstream:upstream_mocks", - "//test/test_common:simulated_time_system_lib", + "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", ], ) diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index 5110bd6ccd937..f6da689eacd62 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -9,6 +9,7 @@ #include "common/http/exception.h" #include "common/http/header_map_impl.h" #include "common/http/http1/codec_impl.h" +#include "common/http/http1/codec_impl_legacy.h" #include "common/runtime/runtime_impl.h" #include "test/common/stats/stat_test_utility.h" @@ -33,7 +34,6 @@ using testing::StrictMock; namespace Envoy { namespace Http { -namespace Http1 { namespace { std::string createHeaderFragment(int num_headers) { // Create a header field with num_headers headers. @@ -44,21 +44,42 @@ std::string createHeaderFragment(int num_headers) { return headers; } -Buffer::OwnedImpl createBufferWithOneByteSlices(absl::string_view input) { +Buffer::OwnedImpl createBufferWithNByteSlices(absl::string_view input, size_t max_slice_size) { Buffer::OwnedImpl buffer; - for (const char& c : input) { - buffer.appendSliceForTest(&c, 1); + for (size_t offset = 0; offset < input.size(); offset += max_slice_size) { + buffer.appendSliceForTest(input.substr(offset, max_slice_size)); } + // Verify that the buffer contains the right number of slices. + ASSERT(buffer.getRawSlices().size() == (input.size() + max_slice_size - 1) / max_slice_size); return buffer; } } // namespace -class Http1ServerConnectionImplTest : public testing::Test { +class Http1CodecTestBase { +protected: + Http::Http1::CodecStats& http1CodecStats() { + return Http::Http1::CodecStats::atomicGet(http1_codec_stats_, store_); + } + + Stats::TestUtil::TestStore store_; + Http::Http1::CodecStats::AtomicPtr http1_codec_stats_; +}; + +class Http1ServerConnectionImplTest : public Http1CodecTestBase, + public testing::TestWithParam { public: + bool testingNewCodec() { return GetParam(); } + void initialize() { - codec_ = std::make_unique( - connection_, store_, callbacks_, codec_settings_, max_request_headers_kb_, - max_request_headers_count_, headers_with_underscores_action_); + if (testingNewCodec()) { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, headers_with_underscores_action_); + } else { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, headers_with_underscores_action_); + } } NiceMock connection_; @@ -67,7 +88,7 @@ class Http1ServerConnectionImplTest : public testing::Test { Http::ServerConnectionPtr codec_; void expectHeadersTest(Protocol p, bool allow_absolute_url, Buffer::OwnedImpl& buffer, - TestHeaderMapImpl& expected_headers); + TestRequestHeaderMapImpl& expected_headers); void expect400(Protocol p, bool allow_absolute_url, Buffer::OwnedImpl& buffer, absl::string_view details = ""); void testRequestHeadersExceedLimit(std::string header_string, absl::string_view details = ""); @@ -78,8 +99,16 @@ class Http1ServerConnectionImplTest : public testing::Test { // Send the request, and validate the received request headers. // Then send a response just to clean up. - void sendAndValidateRequestAndSendResponse(absl::string_view raw_request, - const TestHeaderMapImpl& expected_request_headers) { + void + sendAndValidateRequestAndSendResponse(absl::string_view raw_request, + const TestRequestHeaderMapImpl& expected_request_headers) { + Buffer::OwnedImpl buffer(raw_request); + sendAndValidateRequestAndSendResponse(buffer, expected_request_headers); + } + + void + sendAndValidateRequestAndSendResponse(Buffer::Instance& buffer, + const TestRequestHeaderMapImpl& expected_request_headers) { NiceMock decoder; Http::ResponseEncoder* response_encoder = nullptr; EXPECT_CALL(callbacks_, newStream(_, _)) @@ -88,8 +117,8 @@ class Http1ServerConnectionImplTest : public testing::Test { return decoder; })); EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_request_headers), true)); - Buffer::OwnedImpl buffer(raw_request); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); response_encoder->encodeHeaders(TestResponseHeaderMapImpl{{":status", "200"}}, true); } @@ -99,7 +128,6 @@ class Http1ServerConnectionImplTest : public testing::Test { uint32_t max_request_headers_count_{Http::DEFAULT_MAX_HEADERS_COUNT}; envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action_{envoy::config::core::v3::HttpProtocolOptions::ALLOW}; - Stats::TestUtil::TestStore store_; }; void Http1ServerConnectionImplTest::expect400(Protocol p, bool allow_absolute_url, @@ -107,14 +135,17 @@ void Http1ServerConnectionImplTest::expect400(Protocol p, bool allow_absolute_ur absl::string_view details) { InSequence sequence; - std::string output; - ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); - if (allow_absolute_url) { codec_settings_.allow_absolute_url_ = allow_absolute_url; - codec_ = std::make_unique( - connection_, store_, callbacks_, codec_settings_, max_request_headers_kb_, - max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + if (testingNewCodec()) { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + } else { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + } } MockRequestDecoder decoder; @@ -125,8 +156,9 @@ void Http1ServerConnectionImplTest::expect400(Protocol p, bool allow_absolute_ur return decoder; })); - EXPECT_THROW(codec_->dispatch(buffer), CodecProtocolException); - EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", output); + EXPECT_CALL(decoder, sendLocalReply(_, Http::Code::BadRequest, "Bad Request", _, _, _)); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ(p, codec_->protocol()); if (!details.empty()) { EXPECT_EQ(details, response_encoder->getStream().responseDetails()); @@ -135,22 +167,29 @@ void Http1ServerConnectionImplTest::expect400(Protocol p, bool allow_absolute_ur void Http1ServerConnectionImplTest::expectHeadersTest(Protocol p, bool allow_absolute_url, Buffer::OwnedImpl& buffer, - TestHeaderMapImpl& expected_headers) { + TestRequestHeaderMapImpl& expected_headers) { InSequence sequence; // Make a new 'codec' with the right settings if (allow_absolute_url) { codec_settings_.allow_absolute_url_ = allow_absolute_url; - codec_ = std::make_unique( - connection_, store_, callbacks_, codec_settings_, max_request_headers_kb_, - max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + if (testingNewCodec()) { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + } else { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + } } MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); EXPECT_EQ(p, codec_->protocol()); } @@ -161,9 +200,15 @@ void Http1ServerConnectionImplTest::expectTrailersTest(bool enable_trailers) { // Make a new 'codec' with the right settings if (enable_trailers) { codec_settings_.enable_trailers_ = enable_trailers; - codec_ = std::make_unique( - connection_, store_, callbacks_, codec_settings_, max_request_headers_kb_, - max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + if (testingNewCodec()) { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + } else { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + } } InSequence sequence; @@ -182,11 +227,13 @@ void Http1ServerConnectionImplTest::expectTrailersTest(bool enable_trailers) { EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)); EXPECT_CALL(decoder, decodeData(_, true)); } + Buffer::OwnedImpl buffer("POST / HTTP/1.1\r\ntransfer-encoding: chunked\r\n\r\n" "6\r\nHello \r\n" "5\r\nWorld\r\n" "0\r\nhello: world\r\nsecond: header\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); } @@ -195,9 +242,15 @@ void Http1ServerConnectionImplTest::testTrailersExceedLimit(std::string trailer_ initialize(); // Make a new 'codec' with the right settings codec_settings_.enable_trailers_ = enable_trailers; - codec_ = std::make_unique( - connection_, store_, callbacks_, codec_settings_, max_request_headers_kb_, - max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + if (testingNewCodec()) { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + } else { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + } std::string exception_reason; NiceMock decoder; EXPECT_CALL(callbacks_, newStream(_, _)) @@ -216,15 +269,19 @@ void Http1ServerConnectionImplTest::testTrailersExceedLimit(std::string trailer_ "Transfer-Encoding: chunked\r\n\r\n" "4\r\n" "body\r\n0\r\n"); - codec_->dispatch(buffer); - buffer = Buffer::OwnedImpl(trailer_string + "\r\n\r\n"); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); + buffer = Buffer::OwnedImpl(trailer_string); if (enable_trailers) { - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), EnvoyException, - "trailers size exceeds limit"); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); + status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "trailers size exceeds limit"); } else { // If trailers are not enabled, we expect Envoy to simply skip over the large // trailers as if nothing has happened! - codec_->dispatch(buffer); + status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); } } void Http1ServerConnectionImplTest::testRequestHeadersExceedLimit(std::string header_string, @@ -241,9 +298,13 @@ void Http1ServerConnectionImplTest::testRequestHeadersExceedLimit(std::string he })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); buffer = Buffer::OwnedImpl(header_string + "\r\n"); - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), EnvoyException, "headers size exceeds limit"); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); + status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "headers size exceeds limit"); if (!details.empty()) { EXPECT_EQ(details, response_encoder->getStream().responseDetails()); } @@ -261,12 +322,18 @@ void Http1ServerConnectionImplTest::testRequestHeadersAccepted(std::string heade })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); buffer = Buffer::OwnedImpl(header_string + "\r\n"); - codec_->dispatch(buffer); + status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); } -TEST_F(Http1ServerConnectionImplTest, EmptyHeader) { +INSTANTIATE_TEST_SUITE_P(Codecs, Http1ServerConnectionImplTest, testing::Bool(), + [](const testing::TestParamInfo& param) { + return param.param ? "New" : "Legacy"; + }); + +TEST_P(Http1ServerConnectionImplTest, EmptyHeader) { initialize(); InSequence sequence; @@ -274,7 +341,7 @@ TEST_F(Http1ServerConnectionImplTest, EmptyHeader) { MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {"Test", ""}, {"Hello", "World"}, {":path", "/"}, @@ -283,13 +350,14 @@ TEST_F(Http1ServerConnectionImplTest, EmptyHeader) { EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\nTest:\r\nHello: World\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); } // We support the identity encoding, but because it does not end in chunked encoding we reject it // per RFC 7230 Section 3.3.3 -TEST_F(Http1ServerConnectionImplTest, IdentityEncodingNoChunked) { +TEST_P(Http1ServerConnectionImplTest, IdentityEncodingNoChunked) { initialize(); InSequence sequence; @@ -298,11 +366,13 @@ TEST_F(Http1ServerConnectionImplTest, IdentityEncodingNoChunked) { EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\ntransfer-encoding: identity\r\n\r\n"); - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), CodecProtocolException, - "http/1.1 protocol error: unsupported transfer encoding"); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported transfer encoding"); } -TEST_F(Http1ServerConnectionImplTest, UnsupportedEncoding) { +TEST_P(Http1ServerConnectionImplTest, UnsupportedEncoding) { initialize(); InSequence sequence; @@ -311,12 +381,14 @@ TEST_F(Http1ServerConnectionImplTest, UnsupportedEncoding) { EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\ntransfer-encoding: gzip\r\n\r\n"); - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), CodecProtocolException, - "http/1.1 protocol error: unsupported transfer encoding"); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported transfer encoding"); } // Verify that data in the two body chunks is merged before the call to decodeData. -TEST_F(Http1ServerConnectionImplTest, ChunkedBody) { +TEST_P(Http1ServerConnectionImplTest, ChunkedBody) { initialize(); InSequence sequence; @@ -324,7 +396,7 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBody) { MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":path", "/"}, {":method", "POST"}, {"transfer-encoding", "chunked"}, @@ -340,13 +412,14 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBody) { "6\r\nHello \r\n" "5\r\nWorld\r\n" "0\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); } // Verify dispatch behavior when dispatching an incomplete chunk, and resumption of the parse via a // second dispatch. -TEST_F(Http1ServerConnectionImplTest, ChunkedBodySplitOverTwoDispatches) { +TEST_P(Http1ServerConnectionImplTest, ChunkedBodySplitOverTwoDispatches) { initialize(); InSequence sequence; @@ -354,7 +427,7 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBodySplitOverTwoDispatches) { MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":path", "/"}, {":method", "POST"}, {"transfer-encoding", "chunked"}, @@ -366,7 +439,8 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBodySplitOverTwoDispatches) { Buffer::OwnedImpl buffer("POST / HTTP/1.1\r\ntransfer-encoding: chunked\r\n\r\n" "6\r\nHello \r\n" "5\r\nWorl"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); // Process the rest of the body and final chunk. @@ -376,13 +450,14 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBodySplitOverTwoDispatches) { Buffer::OwnedImpl buffer2("d\r\n" "0\r\n\r\n"); - codec_->dispatch(buffer2); + status = codec_->dispatch(buffer2); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer2.length()); } // Verify that headers and chunked body are processed correctly and data is merged before the // decodeData call even if delivered in a buffer that holds 1 byte per slice. -TEST_F(Http1ServerConnectionImplTest, ChunkedBodyFragmentedBuffer) { +TEST_P(Http1ServerConnectionImplTest, ChunkedBodyFragmentedBuffer) { initialize(); InSequence sequence; @@ -390,7 +465,7 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBodyFragmentedBuffer) { MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":path", "/"}, {":method", "POST"}, {"transfer-encoding", "chunked"}, @@ -401,15 +476,17 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBodyFragmentedBuffer) { EXPECT_CALL(decoder, decodeData(_, true)); Buffer::OwnedImpl buffer = - createBufferWithOneByteSlices("POST / HTTP/1.1\r\ntransfer-encoding: chunked\r\n\r\n" - "6\r\nHello \r\n" - "5\r\nWorld\r\n" - "0\r\n\r\n"); - codec_->dispatch(buffer); + createBufferWithNByteSlices("POST / HTTP/1.1\r\ntransfer-encoding: chunked\r\n\r\n" + "6\r\nHello \r\n" + "5\r\nWorld\r\n" + "0\r\n\r\n", + 1); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); } -TEST_F(Http1ServerConnectionImplTest, ChunkedBodyCase) { +TEST_P(Http1ServerConnectionImplTest, ChunkedBodyCase) { initialize(); InSequence sequence; @@ -417,7 +494,7 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBodyCase) { MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":path", "/"}, {":method", "POST"}, {"transfer-encoding", "Chunked"}, @@ -429,13 +506,14 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBodyCase) { Buffer::OwnedImpl buffer( "POST / HTTP/1.1\r\ntransfer-encoding: Chunked\r\n\r\nb\r\nHello World\r\n0\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); } // Verify that body dispatch does not happen after detecting a parse error processing a chunk // header. -TEST_F(Http1ServerConnectionImplTest, InvalidChunkHeader) { +TEST_P(Http1ServerConnectionImplTest, InvalidChunkHeader) { initialize(); InSequence sequence; @@ -443,7 +521,7 @@ TEST_F(Http1ServerConnectionImplTest, InvalidChunkHeader) { MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":path", "/"}, {":method", "POST"}, {"transfer-encoding", "chunked"}, @@ -455,11 +533,13 @@ TEST_F(Http1ServerConnectionImplTest, InvalidChunkHeader) { "6\r\nHello \r\n" "invalid\r\nWorl"); - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), CodecProtocolException, - "http/1.1 protocol error: HPE_INVALID_CHUNK_SIZE"); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "http/1.1 protocol error: HPE_INVALID_CHUNK_SIZE"); } -TEST_F(Http1ServerConnectionImplTest, IdentityAndChunkedBody) { +TEST_P(Http1ServerConnectionImplTest, IdentityAndChunkedBody) { initialize(); InSequence sequence; @@ -469,14 +549,18 @@ TEST_F(Http1ServerConnectionImplTest, IdentityAndChunkedBody) { Buffer::OwnedImpl buffer("POST / HTTP/1.1\r\ntransfer-encoding: " "identity,chunked\r\n\r\nb\r\nHello World\r\n0\r\n\r\n"); - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), CodecProtocolException, - "http/1.1 protocol error: unsupported transfer encoding"); + + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported transfer encoding"); } -TEST_F(Http1ServerConnectionImplTest, HostWithLWS) { +TEST_P(Http1ServerConnectionImplTest, HostWithLWS) { initialize(); - TestHeaderMapImpl expected_headers{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}; + TestRequestHeaderMapImpl expected_headers{ + {":authority", "host"}, {":path", "/"}, {":method", "GET"}}; // Regression test spaces before and after the host header value. sendAndValidateRequestAndSendResponse("GET / HTTP/1.1\r\nHost: host \r\n\r\n", expected_headers); @@ -490,7 +574,43 @@ TEST_F(Http1ServerConnectionImplTest, HostWithLWS) { "GET / HTTP/1.1\r\nHost: host \r\n\r\n", expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http10) { +// Regression test for https://github.com/envoyproxy/envoy/issues/10270. Linear whitespace at the +// beginning and end of a header value should be stripped. Whitespace in the middle should be +// preserved. +TEST_P(Http1ServerConnectionImplTest, InnerLWSIsPreserved) { + initialize(); + + // Header with many spaces surrounded by non-whitespace characters to ensure that dispatching is + // split across multiple dispatch calls. The threshold used here comes from Envoy preferring 16KB + // reads, but the important part is that the header value is split such that the pieces have + // leading and trailing whitespace characters. + const std::string header_value_with_inner_lws = "v" + std::string(32 * 1024, ' ') + "v"; + TestRequestHeaderMapImpl expected_headers{{":authority", "host"}, + {":path", "/"}, + {":method", "GET"}, + {"header_field", header_value_with_inner_lws}}; + + { + // Regression test spaces in the middle are preserved + Buffer::OwnedImpl header_buffer = createBufferWithNByteSlices( + "GET / HTTP/1.1\r\nHost: host\r\nheader_field: " + header_value_with_inner_lws + "\r\n\r\n", + 16 * 1024); + EXPECT_EQ(3, header_buffer.getRawSlices().size()); + sendAndValidateRequestAndSendResponse(header_buffer, expected_headers); + } + + { + // Regression test spaces before and after are removed + Buffer::OwnedImpl header_buffer = createBufferWithNByteSlices( + "GET / HTTP/1.1\r\nHost: host\r\nheader_field: " + header_value_with_inner_lws + + " \r\n\r\n", + 16 * 1024); + EXPECT_EQ(3, header_buffer.getRawSlices().size()); + sendAndValidateRequestAndSendResponse(header_buffer, expected_headers); + } +} + +TEST_P(Http1ServerConnectionImplTest, Http10) { initialize(); InSequence sequence; @@ -498,33 +618,34 @@ TEST_F(Http1ServerConnectionImplTest, Http10) { MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - TestHeaderMapImpl expected_headers{{":path", "/"}, {":method", "GET"}}; + TestRequestHeaderMapImpl expected_headers{{":path", "/"}, {":method", "GET"}}; EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); Buffer::OwnedImpl buffer("GET / HTTP/1.0\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); EXPECT_EQ(Protocol::Http10, codec_->protocol()); } -TEST_F(Http1ServerConnectionImplTest, Http10AbsoluteNoOp) { +TEST_P(Http1ServerConnectionImplTest, Http10AbsoluteNoOp) { initialize(); - TestHeaderMapImpl expected_headers{{":path", "/"}, {":method", "GET"}}; + TestRequestHeaderMapImpl expected_headers{{":path", "/"}, {":method", "GET"}}; Buffer::OwnedImpl buffer("GET / HTTP/1.0\r\n\r\n"); expectHeadersTest(Protocol::Http10, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http10Absolute) { +TEST_P(Http1ServerConnectionImplTest, Http10Absolute) { initialize(); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":authority", "www.somewhere.com"}, {":path", "/foobar"}, {":method", "GET"}}; Buffer::OwnedImpl buffer("GET http://www.somewhere.com/foobar HTTP/1.0\r\n\r\n"); expectHeadersTest(Protocol::Http10, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http10MultipleResponses) { +TEST_P(Http1ServerConnectionImplTest, Http10MultipleResponses) { initialize(); MockRequestDecoder decoder; @@ -540,7 +661,8 @@ TEST_F(Http1ServerConnectionImplTest, Http10MultipleResponses) { })); EXPECT_CALL(decoder, decodeHeaders_(_, true)); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); std::string output; ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); @@ -552,7 +674,7 @@ TEST_F(Http1ServerConnectionImplTest, Http10MultipleResponses) { // Now send an HTTP/1.1 request and make sure the protocol is tracked correctly. { - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":authority", "www.somewhere.com"}, {":path", "/foobar"}, {":method", "GET"}}; Buffer::OwnedImpl buffer("GET /foobar HTTP/1.1\r\nHost: www.somewhere.com\r\n\r\n"); @@ -563,49 +685,50 @@ TEST_F(Http1ServerConnectionImplTest, Http10MultipleResponses) { return decoder; })); EXPECT_CALL(decoder, decodeHeaders_(_, true)); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(Protocol::Http11, codec_->protocol()); } } -TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePath1) { +TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePath1) { initialize(); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":authority", "www.somewhere.com"}, {":path", "/"}, {":method", "GET"}}; Buffer::OwnedImpl buffer("GET http://www.somewhere.com/ HTTP/1.1\r\nHost: bah\r\n\r\n"); expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePath2) { +TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePath2) { initialize(); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":authority", "www.somewhere.com"}, {":path", "/foo/bar"}, {":method", "GET"}}; Buffer::OwnedImpl buffer("GET http://www.somewhere.com/foo/bar HTTP/1.1\r\nHost: bah\r\n\r\n"); expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePathWithPort) { +TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePathWithPort) { initialize(); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":authority", "www.somewhere.com:4532"}, {":path", "/foo/bar"}, {":method", "GET"}}; Buffer::OwnedImpl buffer( "GET http://www.somewhere.com:4532/foo/bar HTTP/1.1\r\nHost: bah\r\n\r\n"); expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http11AbsoluteEnabledNoOp) { +TEST_P(Http1ServerConnectionImplTest, Http11AbsoluteEnabledNoOp) { initialize(); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":authority", "bah"}, {":path", "/foo/bar"}, {":method", "GET"}}; Buffer::OwnedImpl buffer("GET /foo/bar HTTP/1.1\r\nHost: bah\r\n\r\n"); expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http11InvalidRequest) { +TEST_P(Http1ServerConnectionImplTest, Http11InvalidRequest) { initialize(); // Invalid because www.somewhere.com is not an absolute path nor an absolute url @@ -613,12 +736,9 @@ TEST_F(Http1ServerConnectionImplTest, Http11InvalidRequest) { expect400(Protocol::Http11, true, buffer, "http1.codec_error"); } -TEST_F(Http1ServerConnectionImplTest, Http11InvalidTrailerPost) { +TEST_P(Http1ServerConnectionImplTest, Http11InvalidTrailerPost) { initialize(); - std::string output; - ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); - MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)) .WillOnce(Invoke([&](ResponseEncoder&, bool) -> RequestDecoder& { return decoder; })); @@ -635,34 +755,36 @@ TEST_F(Http1ServerConnectionImplTest, Http11InvalidTrailerPost) { "4\r\n" "body\r\n0\r\n" "badtrailer\r\n\r\n"); - EXPECT_THROW(codec_->dispatch(buffer), CodecProtocolException); - EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", output); + + EXPECT_CALL(decoder, sendLocalReply(_, Http::Code::BadRequest, "Bad Request", _, _, _)); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); } -TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePathNoSlash) { +TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePathNoSlash) { initialize(); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":authority", "www.somewhere.com"}, {":path", "/"}, {":method", "GET"}}; Buffer::OwnedImpl buffer("GET http://www.somewhere.com HTTP/1.1\r\nHost: bah\r\n\r\n"); expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePathBad) { +TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePathBad) { initialize(); Buffer::OwnedImpl buffer("GET * HTTP/1.1\r\nHost: bah\r\n\r\n"); expect400(Protocol::Http11, true, buffer, "http1.invalid_url"); } -TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePortTooLarge) { +TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePortTooLarge) { initialize(); Buffer::OwnedImpl buffer("GET http://foobar.com:1000000 HTTP/1.1\r\nHost: bah\r\n\r\n"); expect400(Protocol::Http11, true, buffer); } -TEST_F(Http1ServerConnectionImplTest, SketchyConnectionHeader) { +TEST_P(Http1ServerConnectionImplTest, SketchyConnectionHeader) { initialize(); Buffer::OwnedImpl buffer( @@ -670,25 +792,25 @@ TEST_F(Http1ServerConnectionImplTest, SketchyConnectionHeader) { expect400(Protocol::Http11, true, buffer, "http1.connection_header_rejected"); } -TEST_F(Http1ServerConnectionImplTest, Http11RelativeOnly) { +TEST_P(Http1ServerConnectionImplTest, Http11RelativeOnly) { initialize(); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":authority", "bah"}, {":path", "http://www.somewhere.com/"}, {":method", "GET"}}; Buffer::OwnedImpl buffer("GET http://www.somewhere.com/ HTTP/1.1\r\nHost: bah\r\n\r\n"); expectHeadersTest(Protocol::Http11, false, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http11Options) { +TEST_P(Http1ServerConnectionImplTest, Http11Options) { initialize(); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":authority", "www.somewhere.com"}, {":path", "*"}, {":method", "OPTIONS"}}; Buffer::OwnedImpl buffer("OPTIONS * HTTP/1.1\r\nHost: www.somewhere.com\r\n\r\n"); expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, SimpleGet) { +TEST_P(Http1ServerConnectionImplTest, SimpleGet) { initialize(); InSequence sequence; @@ -696,59 +818,84 @@ TEST_F(Http1ServerConnectionImplTest, SimpleGet) { MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - TestHeaderMapImpl expected_headers{{":path", "/"}, {":method", "GET"}}; + TestRequestHeaderMapImpl expected_headers{{":path", "/"}, {":method", "GET"}}; EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); } -TEST_F(Http1ServerConnectionImplTest, BadRequestNoStream) { +TEST_P(Http1ServerConnectionImplTest, BadRequestNoStreamLegacy) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.early_errors_via_hcm", "false"}}); initialize(); std::string output; ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); + MockRequestDecoder decoder; + EXPECT_CALL(callbacks_, newStream(_, _)).Times(0); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)).Times(0); + + Buffer::OwnedImpl buffer("bad"); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); +} + +// Test that if the stream is not created at the time an error is detected, it +// is created as part of sending the protocol error. +TEST_P(Http1ServerConnectionImplTest, BadRequestNoStream) { + initialize(); + + MockRequestDecoder decoder; + Http::ResponseEncoder* response_encoder = nullptr; + EXPECT_CALL(callbacks_, newStream(_, _)) + .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& { + response_encoder = &encoder; + return decoder; + })); + // Check that before any headers are parsed, requests do not look like HEAD or gRPC requests. + EXPECT_CALL(decoder, sendLocalReply(false, _, _, _, _, _)); + Buffer::OwnedImpl buffer("bad"); - EXPECT_THROW(codec_->dispatch(buffer), CodecProtocolException); - EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", output); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); } // This behavior was observed during CVE-2019-18801 and helped to limit the // scope of affected Envoy configurations. -TEST_F(Http1ServerConnectionImplTest, RejectInvalidMethod) { +TEST_P(Http1ServerConnectionImplTest, RejectInvalidMethod) { initialize(); MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - std::string output; - ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); - Buffer::OwnedImpl buffer("BAD / HTTP/1.1\r\nHost: foo\r\n"); - EXPECT_THROW(codec_->dispatch(buffer), CodecProtocolException); - EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", output); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); } -TEST_F(Http1ServerConnectionImplTest, BadRequestStartedStream) { +TEST_P(Http1ServerConnectionImplTest, BadRequestStartedStream) { initialize(); - std::string output; - ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); - MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); Buffer::OwnedImpl buffer("G"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); Buffer::OwnedImpl buffer2("g"); - EXPECT_THROW(codec_->dispatch(buffer2), CodecProtocolException); - EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", output); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); + status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); } -TEST_F(Http1ServerConnectionImplTest, FloodProtection) { +TEST_P(Http1ServerConnectionImplTest, FloodProtection) { initialize(); NiceMock decoder; @@ -764,7 +911,8 @@ TEST_F(Http1ServerConnectionImplTest, FloodProtection) { })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); // In most tests the write output is serialized to a buffer here it is @@ -791,13 +939,14 @@ TEST_F(Http1ServerConnectionImplTest, FloodProtection) { })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), FrameFloodException, - "Too many responses queued."); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isBufferFloodError(status)); + EXPECT_EQ(status.message(), "Too many responses queued."); EXPECT_EQ(1, store_.counter("http1.response_flood").value()); } } -TEST_F(Http1ServerConnectionImplTest, FloodProtectionOff) { +TEST_P(Http1ServerConnectionImplTest, FloodProtectionOff) { TestScopedRuntime scoped_runtime; Runtime::LoaderSingleton::getExisting()->mergeValues( {{"envoy.reloadable_features.http1_flood_protection", "false"}}); @@ -815,7 +964,8 @@ TEST_F(Http1ServerConnectionImplTest, FloodProtectionOff) { })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); // In most tests the write output is serialized to a buffer here it is @@ -832,7 +982,7 @@ TEST_F(Http1ServerConnectionImplTest, FloodProtectionOff) { } } -TEST_F(Http1ServerConnectionImplTest, HostHeaderTranslation) { +TEST_P(Http1ServerConnectionImplTest, HostHeaderTranslation) { initialize(); InSequence sequence; @@ -840,41 +990,22 @@ TEST_F(Http1ServerConnectionImplTest, HostHeaderTranslation) { MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - TestHeaderMapImpl expected_headers{{":authority", "hello"}, {":path", "/"}, {":method", "GET"}}; + TestRequestHeaderMapImpl expected_headers{ + {":authority", "hello"}, {":path", "/"}, {":method", "GET"}}; EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\nHOST: hello\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); } -// Ensures that requests with invalid HTTP header values are not rejected -// when the runtime guard is not enabled for the feature. -TEST_F(Http1ServerConnectionImplTest, HeaderInvalidCharsRuntimeGuard) { - TestScopedRuntime scoped_runtime; - // When the runtime-guarded feature is NOT enabled, invalid header values - // should be accepted by the codec. - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.strict_header_validation", "false"}}); - - initialize(); - - MockRequestDecoder decoder; - EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - - Buffer::OwnedImpl buffer( - absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: ", std::string(1, 3), "\r\n")); - codec_->dispatch(buffer); -} - // Ensures that requests with invalid HTTP header values are properly rejected // when the runtime guard is enabled for the feature. -TEST_F(Http1ServerConnectionImplTest, HeaderInvalidCharsRejection) { +TEST_P(Http1ServerConnectionImplTest, HeaderInvalidCharsRejection) { TestScopedRuntime scoped_runtime; // When the runtime-guarded feature is enabled, invalid header values // should result in a rejection. - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.strict_header_validation", "true"}}); initialize(); @@ -887,21 +1018,23 @@ TEST_F(Http1ServerConnectionImplTest, HeaderInvalidCharsRejection) { })); Buffer::OwnedImpl buffer( absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: ", std::string(1, 3), "\r\n")); - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), CodecProtocolException, - "http/1.1 protocol error: header value contains invalid chars"); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "http/1.1 protocol error: header value contains invalid chars"); EXPECT_EQ("http1.invalid_characters", response_encoder->getStream().responseDetails()); } // Ensures that request headers with names containing the underscore character are allowed // when the option is set to allow. -TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAllowed) { +TEST_P(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAllowed) { headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::ALLOW; initialize(); MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":authority", "h.com"}, {":path", "/"}, {":method", "GET"}, @@ -910,21 +1043,22 @@ TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAllowed) { EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); Buffer::OwnedImpl buffer(absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo_bar: bar\r\n\r\n")); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); EXPECT_EQ(0, store_.counter("http1.dropped_headers_with_underscores").value()); } // Ensures that request headers with names containing the underscore character are dropped // when the option is set to drop headers. -TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAreDropped) { +TEST_P(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAreDropped) { headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER; initialize(); MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":authority", "h.com"}, {":path", "/"}, {":method", "GET"}, @@ -932,14 +1066,15 @@ TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAreDropped) { EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); Buffer::OwnedImpl buffer(absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo_bar: bar\r\n\r\n")); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); EXPECT_EQ(1, store_.counter("http1.dropped_headers_with_underscores").value()); } // Ensures that request with header names containing the underscore character are rejected // when the option is set to reject request. -TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreCauseRequestRejected) { +TEST_P(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreCauseRequestRejected) { headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::REJECT_REQUEST; initialize(); @@ -952,13 +1087,15 @@ TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreCauseRequestReject })); Buffer::OwnedImpl buffer(absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo_bar: bar\r\n\r\n")); - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), EnvoyException, - "http/1.1 protocol error: header name contains underscores"); - EXPECT_EQ("http1.invalid_characters", response_encoder->getStream().responseDetails()); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "http/1.1 protocol error: header name contains underscores"); + EXPECT_EQ("http1.unexpected_underscore", response_encoder->getStream().responseDetails()); EXPECT_EQ(1, store_.counter("http1.requests_rejected_with_underscores_in_headers").value()); } -TEST_F(Http1ServerConnectionImplTest, HeaderInvalidAuthority) { +TEST_P(Http1ServerConnectionImplTest, HeaderInvalidAuthority) { TestScopedRuntime scoped_runtime; initialize(); @@ -971,34 +1108,17 @@ TEST_F(Http1ServerConnectionImplTest, HeaderInvalidAuthority) { return decoder; })); Buffer::OwnedImpl buffer(absl::StrCat("GET / HTTP/1.1\r\nHOST: h.\"com\r\n\r\n")); - EXPECT_THROW_WITH_MESSAGE( - codec_->dispatch(buffer), CodecProtocolException, - "http/1.1 protocol error: request headers failed spec compliance checks"); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), + "http/1.1 protocol error: request headers failed spec compliance checks"); EXPECT_EQ("http.invalid_authority", response_encoder->getStream().responseDetails()); } -// Regression test for http-parser allowing embedded NULs in header values, -// verify we reject them. -TEST_F(Http1ServerConnectionImplTest, HeaderEmbeddedNulRejection) { - TestScopedRuntime scoped_runtime; - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.strict_header_validation", "false"}}); - initialize(); - - InSequence sequence; - - MockRequestDecoder decoder; - EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - - Buffer::OwnedImpl buffer( - absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: bar", std::string(1, '\0'), "baz\r\n")); - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), CodecProtocolException, - "http/1.1 protocol error: HPE_INVALID_HEADER_TOKEN"); -} - // Mutate an HTTP GET with embedded NULs, this should always be rejected in some // way (not necessarily with "head value contains NUL" though). -TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedNul) { +TEST_P(Http1ServerConnectionImplTest, HeaderMutateEmbeddedNul) { const std::string example_input = "GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: barbaz\r\n"; for (size_t n = 1; n < example_input.size(); ++n) { @@ -1011,15 +1131,18 @@ TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedNul) { Buffer::OwnedImpl buffer( absl::StrCat(example_input.substr(0, n), std::string(1, '\0'), example_input.substr(n))); - EXPECT_THROW_WITH_REGEX(codec_->dispatch(buffer), CodecProtocolException, - "http/1.1 protocol error:"); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); + auto status = codec_->dispatch(buffer); + EXPECT_FALSE(status.ok()); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_THAT(status.message(), testing::HasSubstr("http/1.1 protocol error:")); } } -// Mutate an HTTP GET with CR or LF. These can cause an exception or maybe +// Mutate an HTTP GET with CR or LF. These can cause an error status or maybe // result in a valid decodeHeaders(). In any case, the validHeaderString() // ASSERTs should validate we never have any embedded CR or LF. -TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedCRLF) { +TEST_P(Http1ServerConnectionImplTest, HeaderMutateEmbeddedCRLF) { const std::string example_input = "GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: barbaz\r\n"; for (const char c : {'\r', '\n'}) { @@ -1033,15 +1156,13 @@ TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedCRLF) { Buffer::OwnedImpl buffer( absl::StrCat(example_input.substr(0, n), std::string(1, c), example_input.substr(n))); - try { - codec_->dispatch(buffer); - } catch (CodecProtocolException&) { - } + // May or may not cause an error status, but should never trip on a debug ASSERT. + auto status = codec_->dispatch(buffer); } } } -TEST_F(Http1ServerConnectionImplTest, CloseDuringHeadersComplete) { +TEST_P(Http1ServerConnectionImplTest, CloseDuringHeadersComplete) { initialize(); InSequence sequence; @@ -1058,11 +1179,12 @@ TEST_F(Http1ServerConnectionImplTest, CloseDuringHeadersComplete) { EXPECT_CALL(decoder, decodeData(_, _)).Times(0); Buffer::OwnedImpl buffer("POST / HTTP/1.1\r\ncontent-length: 5\r\n\r\n12345"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_NE(0U, buffer.length()); } -TEST_F(Http1ServerConnectionImplTest, PostWithContentLength) { +TEST_P(Http1ServerConnectionImplTest, PostWithContentLength) { initialize(); InSequence sequence; @@ -1070,7 +1192,8 @@ TEST_F(Http1ServerConnectionImplTest, PostWithContentLength) { MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - TestHeaderMapImpl expected_headers{{"content-length", "5"}, {":path", "/"}, {":method", "POST"}}; + TestRequestHeaderMapImpl expected_headers{ + {"content-length", "5"}, {":path", "/"}, {":method", "POST"}}; EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false)); Buffer::OwnedImpl expected_data1("12345"); @@ -1080,13 +1203,14 @@ TEST_F(Http1ServerConnectionImplTest, PostWithContentLength) { EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data2), true)); Buffer::OwnedImpl buffer("POST / HTTP/1.1\r\ncontent-length: 5\r\n\r\n12345"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); } // Verify that headers and body with content length are processed correctly and data is merged // before the decodeData call even if delivered in a buffer that holds 1 byte per slice. -TEST_F(Http1ServerConnectionImplTest, PostWithContentLengthFragmentedBuffer) { +TEST_P(Http1ServerConnectionImplTest, PostWithContentLengthFragmentedBuffer) { initialize(); InSequence sequence; @@ -1094,7 +1218,8 @@ TEST_F(Http1ServerConnectionImplTest, PostWithContentLengthFragmentedBuffer) { MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - TestHeaderMapImpl expected_headers{{"content-length", "5"}, {":path", "/"}, {":method", "POST"}}; + TestRequestHeaderMapImpl expected_headers{ + {"content-length", "5"}, {":path", "/"}, {":method", "POST"}}; EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false)); Buffer::OwnedImpl expected_data1("12345"); @@ -1104,12 +1229,13 @@ TEST_F(Http1ServerConnectionImplTest, PostWithContentLengthFragmentedBuffer) { EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data2), true)); Buffer::OwnedImpl buffer = - createBufferWithOneByteSlices("POST / HTTP/1.1\r\ncontent-length: 5\r\n\r\n12345"); - codec_->dispatch(buffer); + createBufferWithNByteSlices("POST / HTTP/1.1\r\ncontent-length: 5\r\n\r\n12345", 1); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); } -TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponse) { +TEST_P(Http1ServerConnectionImplTest, HeaderOnlyResponse) { initialize(); NiceMock decoder; @@ -1121,7 +1247,8 @@ TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponse) { })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); std::string output; @@ -1134,7 +1261,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponse) { // As with Http1ClientConnectionImplTest.LargeHeaderRequestEncode but validate // the response encoder instead of request encoder. -TEST_F(Http1ServerConnectionImplTest, LargeHeaderResponseEncode) { +TEST_P(Http1ServerConnectionImplTest, LargeHeaderResponseEncode) { initialize(); NiceMock decoder; @@ -1146,7 +1273,8 @@ TEST_F(Http1ServerConnectionImplTest, LargeHeaderResponseEncode) { })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); std::string output; @@ -1159,7 +1287,7 @@ TEST_F(Http1ServerConnectionImplTest, LargeHeaderResponseEncode) { output); } -TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseTrainProperHeaders) { +TEST_P(Http1ServerConnectionImplTest, HeaderOnlyResponseTrainProperHeaders) { codec_settings_.header_key_format_ = Http1Settings::HeaderKeyFormat::ProperCase; initialize(); @@ -1172,7 +1300,8 @@ TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseTrainProperHeaders) { })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); std::string output; @@ -1185,7 +1314,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseTrainProperHeaders) { output); } -TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseWith204) { +TEST_P(Http1ServerConnectionImplTest, HeaderOnlyResponseWith204) { initialize(); NiceMock decoder; @@ -1197,7 +1326,8 @@ TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseWith204) { })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); std::string output; @@ -1208,7 +1338,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseWith204) { EXPECT_EQ("HTTP/1.1 204 No Content\r\n\r\n", output); } -TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseWith100Then200) { +TEST_P(Http1ServerConnectionImplTest, HeaderOnlyResponseWith100Then200) { initialize(); NiceMock decoder; @@ -1220,7 +1350,8 @@ TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseWith100Then200) { })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); std::string output; @@ -1238,7 +1369,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseWith100Then200) { EXPECT_EQ("HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n", output); } -TEST_F(Http1ServerConnectionImplTest, MetadataTest) { +TEST_P(Http1ServerConnectionImplTest, MetadataTest) { initialize(); NiceMock decoder; @@ -1249,7 +1380,8 @@ TEST_F(Http1ServerConnectionImplTest, MetadataTest) { return decoder; })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); MetadataMap metadata_map = {{"key", "value"}}; @@ -1260,7 +1392,7 @@ TEST_F(Http1ServerConnectionImplTest, MetadataTest) { EXPECT_EQ(1, store_.counter("http1.metadata_not_supported_error").value()); } -TEST_F(Http1ServerConnectionImplTest, ChunkedResponse) { +TEST_P(Http1ServerConnectionImplTest, ChunkedResponse) { initialize(); NiceMock decoder; @@ -1272,7 +1404,8 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedResponse) { })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); std::string output; @@ -1295,7 +1428,7 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedResponse) { output); } -TEST_F(Http1ServerConnectionImplTest, ChunkedResponseWithTrailers) { +TEST_P(Http1ServerConnectionImplTest, ChunkedResponseWithTrailers) { codec_settings_.enable_trailers_ = true; initialize(); NiceMock decoder; @@ -1307,7 +1440,8 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedResponseWithTrailers) { })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); std::string output; @@ -1327,7 +1461,7 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedResponseWithTrailers) { output); } -TEST_F(Http1ServerConnectionImplTest, ContentLengthResponse) { +TEST_P(Http1ServerConnectionImplTest, ContentLengthResponse) { initialize(); NiceMock decoder; @@ -1339,7 +1473,8 @@ TEST_F(Http1ServerConnectionImplTest, ContentLengthResponse) { })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); std::string output; @@ -1353,7 +1488,7 @@ TEST_F(Http1ServerConnectionImplTest, ContentLengthResponse) { EXPECT_EQ("HTTP/1.1 200 OK\r\ncontent-length: 11\r\n\r\nHello World", output); } -TEST_F(Http1ServerConnectionImplTest, HeadRequestResponse) { +TEST_P(Http1ServerConnectionImplTest, HeadRequestResponse) { initialize(); NiceMock decoder; @@ -1365,7 +1500,8 @@ TEST_F(Http1ServerConnectionImplTest, HeadRequestResponse) { })); Buffer::OwnedImpl buffer("HEAD / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); std::string output; @@ -1376,7 +1512,7 @@ TEST_F(Http1ServerConnectionImplTest, HeadRequestResponse) { EXPECT_EQ("HTTP/1.1 200 OK\r\ncontent-length: 5\r\n\r\n", output); } -TEST_F(Http1ServerConnectionImplTest, HeadChunkedRequestResponse) { +TEST_P(Http1ServerConnectionImplTest, HeadChunkedRequestResponse) { initialize(); NiceMock decoder; @@ -1388,7 +1524,8 @@ TEST_F(Http1ServerConnectionImplTest, HeadChunkedRequestResponse) { })); Buffer::OwnedImpl buffer("HEAD / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); std::string output; @@ -1399,7 +1536,7 @@ TEST_F(Http1ServerConnectionImplTest, HeadChunkedRequestResponse) { EXPECT_EQ("HTTP/1.1 200 OK\r\ntransfer-encoding: chunked\r\n\r\n", output); } -TEST_F(Http1ServerConnectionImplTest, DoubleRequest) { +TEST_P(Http1ServerConnectionImplTest, DoubleRequest) { initialize(); NiceMock decoder; @@ -1415,23 +1552,24 @@ TEST_F(Http1ServerConnectionImplTest, DoubleRequest) { Buffer::OwnedImpl buffer(request); buffer.add(request); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(request.size(), buffer.length()); response_encoder->encodeHeaders(TestResponseHeaderMapImpl{{":status", "200"}}, true); - codec_->dispatch(buffer); + status = codec_->dispatch(buffer); EXPECT_EQ(0U, buffer.length()); } -TEST_F(Http1ServerConnectionImplTest, RequestWithTrailersDropped) { expectTrailersTest(false); } +TEST_P(Http1ServerConnectionImplTest, RequestWithTrailersDropped) { expectTrailersTest(false); } -TEST_F(Http1ServerConnectionImplTest, RequestWithTrailersKept) { expectTrailersTest(true); } +TEST_P(Http1ServerConnectionImplTest, RequestWithTrailersKept) { expectTrailersTest(true); } -TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2c) { +TEST_P(Http1ServerConnectionImplTest, IgnoreUpgradeH2c) { initialize(); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":authority", "www.somewhere.com"}, {":path", "/"}, {":method", "GET"}}; Buffer::OwnedImpl buffer( "GET http://www.somewhere.com/ HTTP/1.1\r\nConnection: " @@ -1439,33 +1577,33 @@ TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2c) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2cClose) { +TEST_P(Http1ServerConnectionImplTest, IgnoreUpgradeH2cClose) { initialize(); - TestHeaderMapImpl expected_headers{{":authority", "www.somewhere.com"}, - {":path", "/"}, - {":method", "GET"}, - {"connection", "Close"}}; + TestRequestHeaderMapImpl expected_headers{{":authority", "www.somewhere.com"}, + {":path", "/"}, + {":method", "GET"}, + {"connection", "Close"}}; Buffer::OwnedImpl buffer("GET http://www.somewhere.com/ HTTP/1.1\r\nConnection: " "Upgrade, Close, HTTP2-Settings\r\nUpgrade: h2c\r\nHTTP2-Settings: " "token64\r\nHost: bah\r\n\r\n"); expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2cCloseEtc) { +TEST_P(Http1ServerConnectionImplTest, IgnoreUpgradeH2cCloseEtc) { initialize(); - TestHeaderMapImpl expected_headers{{":authority", "www.somewhere.com"}, - {":path", "/"}, - {":method", "GET"}, - {"connection", "Close"}}; + TestRequestHeaderMapImpl expected_headers{{":authority", "www.somewhere.com"}, + {":path", "/"}, + {":method", "GET"}, + {"connection", "Close"}}; Buffer::OwnedImpl buffer("GET http://www.somewhere.com/ HTTP/1.1\r\nConnection: " "Upgrade, Close, HTTP2-Settings, Etc\r\nUpgrade: h2c\r\nHTTP2-Settings: " "token64\r\nHost: bah\r\n\r\n"); expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, UpgradeRequest) { +TEST_P(Http1ServerConnectionImplTest, UpgradeRequest) { initialize(); InSequence sequence; @@ -1475,20 +1613,21 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequest) { EXPECT_CALL(decoder, decodeHeaders_(_, false)); Buffer::OwnedImpl buffer( "POST / HTTP/1.1\r\nConnection: upgrade\r\nUpgrade: foo\r\ncontent-length:5\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); Buffer::OwnedImpl expected_data1("12345"); Buffer::OwnedImpl body("12345"); EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data1), false)); - codec_->dispatch(body); + status = codec_->dispatch(body); Buffer::OwnedImpl expected_data2("abcd"); Buffer::OwnedImpl websocket_payload("abcd"); EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data2), false)); - codec_->dispatch(websocket_payload); + status = codec_->dispatch(websocket_payload); + EXPECT_TRUE(status.ok()); } -TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithEarlyData) { +TEST_P(Http1ServerConnectionImplTest, UpgradeRequestWithEarlyData) { initialize(); InSequence sequence; @@ -1500,10 +1639,11 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithEarlyData) { EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)); Buffer::OwnedImpl buffer("POST / HTTP/1.1\r\nConnection: upgrade\r\nUpgrade: " "foo\r\ncontent-length:5\r\n\r\n12345abcd"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); } -TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithTEChunked) { +TEST_P(Http1ServerConnectionImplTest, UpgradeRequestWithTEChunked) { initialize(); InSequence sequence; @@ -1517,10 +1657,11 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithTEChunked) { EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)); Buffer::OwnedImpl buffer("POST / HTTP/1.1\r\nConnection: upgrade\r\nUpgrade: " "foo\r\ntransfer-encoding: chunked\r\n\r\n12345abcd"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); } -TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithNoBody) { +TEST_P(Http1ServerConnectionImplTest, UpgradeRequestWithNoBody) { initialize(); InSequence sequence; @@ -1534,33 +1675,60 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithNoBody) { EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)); Buffer::OwnedImpl buffer( "GET / HTTP/1.1\r\nConnection: upgrade\r\nUpgrade: foo\r\ncontent-length: 0\r\n\r\nabcd"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); +} + +// Test that 101 upgrade responses do not contain content-length or transfer-encoding headers. +TEST_P(Http1ServerConnectionImplTest, UpgradeRequestResponseHeaders) { + initialize(); + + NiceMock decoder; + Http::ResponseEncoder* response_encoder = nullptr; + EXPECT_CALL(callbacks_, newStream(_, _)) + .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& { + response_encoder = &encoder; + return decoder; + })); + + Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\nConnection: upgrade\r\nUpgrade: foo\r\n\r\n"); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); + EXPECT_EQ(0U, buffer.length()); + + std::string output; + ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); + + TestResponseHeaderMapImpl headers{{":status", "101"}}; + response_encoder->encodeHeaders(headers, false); + EXPECT_EQ("HTTP/1.1 101 Switching Protocols\r\n\r\n", output); } -TEST_F(Http1ServerConnectionImplTest, ConnectRequestNoContentLength) { +TEST_P(Http1ServerConnectionImplTest, ConnectRequestNoContentLength) { initialize(); InSequence sequence; NiceMock decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":authority", "host:80"}, {":method", "CONNECT"}, }; EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false)); Buffer::OwnedImpl buffer("CONNECT host:80 HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); Buffer::OwnedImpl expected_data("abcd"); Buffer::OwnedImpl connect_payload("abcd"); EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)); - codec_->dispatch(connect_payload); + status = codec_->dispatch(connect_payload); + EXPECT_TRUE(status.ok()); } // We use the absolute URL parsing code for CONNECT requests, but it does not // actually allow absolute URLs. -TEST_F(Http1ServerConnectionImplTest, ConnectRequestAbsoluteURLNotallowed) { +TEST_P(Http1ServerConnectionImplTest, ConnectRequestAbsoluteURLNotallowed) { initialize(); InSequence sequence; @@ -1568,10 +1736,12 @@ TEST_F(Http1ServerConnectionImplTest, ConnectRequestAbsoluteURLNotallowed) { EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); Buffer::OwnedImpl buffer("CONNECT http://host:80 HTTP/1.1\r\n\r\n"); - EXPECT_THROW(codec_->dispatch(buffer), CodecProtocolException); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); } -TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithEarlyData) { +TEST_P(Http1ServerConnectionImplTest, ConnectRequestWithEarlyData) { initialize(); InSequence sequence; @@ -1582,29 +1752,44 @@ TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithEarlyData) { EXPECT_CALL(decoder, decodeHeaders_(_, false)); EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)); Buffer::OwnedImpl buffer("CONNECT host:80 HTTP/1.1\r\n\r\nabcd"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); } -TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithTEChunked) { +TEST_P(Http1ServerConnectionImplTest, ConnectRequestWithTEChunked) { initialize(); InSequence sequence; NiceMock decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - // Connect with body is technically illegal, but Envoy does not inspect the - // body to see if there is a non-zero byte chunk. It will instead pass it - // through. - // TODO(alyssawilk) track connect payload and block if this happens. - Buffer::OwnedImpl expected_data("12345abcd"); - EXPECT_CALL(decoder, decodeHeaders_(_, false)); - EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)); + // Per https://tools.ietf.org/html/rfc7231#section-4.3.6 CONNECT with body has no defined + // semantics: Envoy will reject chunked CONNECT requests. + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); Buffer::OwnedImpl buffer( "CONNECT host:80 HTTP/1.1\r\ntransfer-encoding: chunked\r\n\r\n12345abcd"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported transfer encoding"); } -TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithContentLength) { +TEST_P(Http1ServerConnectionImplTest, ConnectRequestWithNonZeroContentLength) { + initialize(); + + InSequence sequence; + NiceMock decoder; + EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); + + // Make sure we avoid the deferred_end_stream_headers_ optimization for + // requests-with-no-body. + Buffer::OwnedImpl buffer("CONNECT host:80 HTTP/1.1\r\ncontent-length: 1\r\n\r\nabcd"); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported content length"); +} + +TEST_P(Http1ServerConnectionImplTest, ConnectRequestWithZeroContentLength) { initialize(); InSequence sequence; @@ -1617,10 +1802,11 @@ TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithContentLength) { EXPECT_CALL(decoder, decodeHeaders_(_, false)); EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)); Buffer::OwnedImpl buffer("CONNECT host:80 HTTP/1.1\r\ncontent-length: 0\r\n\r\nabcd"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); } -TEST_F(Http1ServerConnectionImplTest, WatermarkTest) { +TEST_P(Http1ServerConnectionImplTest, WatermarkTest) { EXPECT_CALL(connection_, bufferLimit()).WillOnce(Return(10)); initialize(); @@ -1633,7 +1819,7 @@ TEST_F(Http1ServerConnectionImplTest, WatermarkTest) { })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); Http::MockStreamCallbacks stream_callbacks; response_encoder->getStream().addCallbacks(stream_callbacks); @@ -1654,24 +1840,45 @@ TEST_F(Http1ServerConnectionImplTest, WatermarkTest) { ->onUnderlyingConnectionBelowWriteBufferLowWatermark(); } -class Http1ClientConnectionImplTest : public testing::Test { +class Http1ClientConnectionImplTest : public Http1CodecTestBase, + public testing::TestWithParam { public: + bool testingNewCodec() { return GetParam(); } + void initialize() { - codec_ = std::make_unique(connection_, store_, callbacks_, - codec_settings_, max_response_headers_count_); + if (testingNewCodec()) { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_response_headers_count_); + } else { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_response_headers_count_); + } + } + + void readDisableOnRequestEncoder(RequestEncoder* request_encoder, bool disable) { + if (testingNewCodec()) { + dynamic_cast(request_encoder)->readDisable(disable); + } else { + dynamic_cast(request_encoder)->readDisable(disable); + } } NiceMock connection_; NiceMock callbacks_; NiceMock codec_settings_; - std::unique_ptr codec_; + Http::ClientConnectionPtr codec_; protected: Stats::TestUtil::TestStore store_; uint32_t max_response_headers_count_{Http::DEFAULT_MAX_HEADERS_COUNT}; }; -TEST_F(Http1ClientConnectionImplTest, SimpleGet) { +INSTANTIATE_TEST_SUITE_P(Codecs, Http1ClientConnectionImplTest, testing::Bool(), + [](const testing::TestParamInfo& param) { + return param.param ? "New" : "Legacy"; + }); + +TEST_P(Http1ClientConnectionImplTest, SimpleGet) { initialize(); MockResponseDecoder response_decoder; @@ -1685,7 +1892,7 @@ TEST_F(Http1ClientConnectionImplTest, SimpleGet) { EXPECT_EQ("GET / HTTP/1.1\r\ncontent-length: 0\r\n\r\n", output); } -TEST_F(Http1ClientConnectionImplTest, SimpleGetWithHeaderCasing) { +TEST_P(Http1ClientConnectionImplTest, SimpleGetWithHeaderCasing) { codec_settings_.header_key_format_ = Http1Settings::HeaderKeyFormat::ProperCase; initialize(); @@ -1701,7 +1908,7 @@ TEST_F(Http1ClientConnectionImplTest, SimpleGetWithHeaderCasing) { EXPECT_EQ("GET / HTTP/1.1\r\nMy-Custom-Header: hey\r\nContent-Length: 0\r\n\r\n", output); } -TEST_F(Http1ClientConnectionImplTest, HostHeaderTranslate) { +TEST_P(Http1ClientConnectionImplTest, HostHeaderTranslate) { initialize(); MockResponseDecoder response_decoder; @@ -1715,7 +1922,7 @@ TEST_F(Http1ClientConnectionImplTest, HostHeaderTranslate) { EXPECT_EQ("GET / HTTP/1.1\r\nhost: host\r\ncontent-length: 0\r\n\r\n", output); } -TEST_F(Http1ClientConnectionImplTest, Reset) { +TEST_P(Http1ClientConnectionImplTest, Reset) { initialize(); MockResponseDecoder response_decoder; @@ -1729,11 +1936,15 @@ TEST_F(Http1ClientConnectionImplTest, Reset) { // Verify that we correctly enable reads on the connection when the final response is // received. -TEST_F(Http1ClientConnectionImplTest, FlowControlReadDisabledReenable) { +TEST_P(Http1ClientConnectionImplTest, FlowControlReadDisabledReenable) { initialize(); MockResponseDecoder response_decoder; - Http::RequestEncoder* request_encoder = &codec_->newStream(response_decoder); + auto* request_encoder = &codec_->newStream(response_decoder); + // Manually read disable. + EXPECT_CALL(connection_, readDisable(true)).Times(2); + readDisableOnRequestEncoder(request_encoder, true); + readDisableOnRequestEncoder(request_encoder, true); std::string output; ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); @@ -1744,28 +1955,25 @@ TEST_F(Http1ClientConnectionImplTest, FlowControlReadDisabledReenable) { EXPECT_EQ("GET / HTTP/1.1\r\nhost: host\r\ncontent-length: 0\r\n\r\n", output); output.clear(); - // Simulate the underlying connection being backed up. Ensure that it is - // read-enabled when the final response completes. - EXPECT_CALL(connection_, readEnabled()) - .Times(2) - .WillOnce(Return(false)) - .WillRepeatedly(Return(true)); - EXPECT_CALL(connection_, readDisable(false)); + // When the response is sent, the read disable should be unwound. + EXPECT_CALL(connection_, readDisable(false)).Times(2); // Response. EXPECT_CALL(response_decoder, decodeHeaders_(_, true)); Buffer::OwnedImpl response("HTTP/1.1 503 Service Unavailable\r\nContent-Length: 0\r\n\r\n"); - codec_->dispatch(response); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, PrematureResponse) { +TEST_P(Http1ClientConnectionImplTest, PrematureResponse) { initialize(); Buffer::OwnedImpl response("HTTP/1.1 408 Request Timeout\r\nConnection: Close\r\n\r\n"); - EXPECT_THROW(codec_->dispatch(response), PrematureResponseException); + auto status = codec_->dispatch(response); + EXPECT_TRUE(isPrematureResponseError(status)); } -TEST_F(Http1ClientConnectionImplTest, EmptyBodyResponse503) { +TEST_P(Http1ClientConnectionImplTest, EmptyBodyResponse503) { initialize(); NiceMock response_decoder; @@ -1775,10 +1983,11 @@ TEST_F(Http1ClientConnectionImplTest, EmptyBodyResponse503) { EXPECT_CALL(response_decoder, decodeHeaders_(_, true)); Buffer::OwnedImpl response("HTTP/1.1 503 Service Unavailable\r\nContent-Length: 0\r\n\r\n"); - codec_->dispatch(response); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, EmptyBodyResponse200) { +TEST_P(Http1ClientConnectionImplTest, EmptyBodyResponse200) { initialize(); NiceMock response_decoder; @@ -1788,10 +1997,11 @@ TEST_F(Http1ClientConnectionImplTest, EmptyBodyResponse200) { EXPECT_CALL(response_decoder, decodeHeaders_(_, true)); Buffer::OwnedImpl response("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n"); - codec_->dispatch(response); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, HeadRequest) { +TEST_P(Http1ClientConnectionImplTest, HeadRequest) { initialize(); NiceMock response_decoder; @@ -1801,10 +2011,11 @@ TEST_F(Http1ClientConnectionImplTest, HeadRequest) { EXPECT_CALL(response_decoder, decodeHeaders_(_, true)); Buffer::OwnedImpl response("HTTP/1.1 200 OK\r\nContent-Length: 20\r\n\r\n"); - codec_->dispatch(response); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, 204Response) { +TEST_P(Http1ClientConnectionImplTest, 204Response) { initialize(); NiceMock response_decoder; @@ -1813,11 +2024,118 @@ TEST_F(Http1ClientConnectionImplTest, 204Response) { request_encoder.encodeHeaders(headers, true); EXPECT_CALL(response_decoder, decodeHeaders_(_, true)); - Buffer::OwnedImpl response("HTTP/1.1 204 OK\r\nContent-Length: 20\r\n\r\n"); - codec_->dispatch(response); + Buffer::OwnedImpl response("HTTP/1.1 204 OK\r\n\r\n"); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, 100Response) { +// 204 No Content with Content-Length is barred by RFC 7230, Section 3.3.2. +TEST_P(Http1ClientConnectionImplTest, 204ResponseContentLengthNotAllowed) { + // By default, content-length is barred. + { + initialize(); + + NiceMock response_decoder; + Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); + TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + request_encoder.encodeHeaders(headers, true); + + Buffer::OwnedImpl response("HTTP/1.1 204 OK\r\nContent-Length: 20\r\n\r\n"); + auto status = codec_->dispatch(response); + EXPECT_FALSE(status.ok()); + } + + // Test with feature disabled: content-length allowed. + { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.strict_1xx_and_204_response_headers", "false"}}); + + initialize(); + + NiceMock response_decoder; + Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); + TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + request_encoder.encodeHeaders(headers, true); + + Buffer::OwnedImpl response("HTTP/1.1 204 OK\r\nContent-Length: 20\r\n\r\n"); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); + } +} + +// 204 No Content with Content-Length: 0 is technically barred by RFC 7230, Section 3.3.2, but we +// allow it. +TEST_P(Http1ClientConnectionImplTest, 204ResponseWithContentLength0) { + { + initialize(); + + NiceMock response_decoder; + Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); + TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + request_encoder.encodeHeaders(headers, true); + + EXPECT_CALL(response_decoder, decodeHeaders_(_, true)); + Buffer::OwnedImpl response("HTTP/1.1 204 OK\r\nContent-Length: 0\r\n\r\n"); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); + } + + // Test with feature disabled: content-length allowed. + { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.strict_1xx_and_204_response_headers", "false"}}); + + NiceMock response_decoder; + Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); + TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + request_encoder.encodeHeaders(headers, true); + + EXPECT_CALL(response_decoder, decodeHeaders_(_, true)); + Buffer::OwnedImpl response("HTTP/1.1 204 OK\r\nContent-Length: 0\r\n\r\n"); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); + } +} + +// 204 No Content with Transfer-Encoding headers is barred by RFC 7230, Section 3.3.1. +TEST_P(Http1ClientConnectionImplTest, 204ResponseTransferEncodingNotAllowed) { + // By default, transfer-encoding is barred. + { + initialize(); + + NiceMock response_decoder; + Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); + TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + request_encoder.encodeHeaders(headers, true); + + Buffer::OwnedImpl response("HTTP/1.1 204 OK\r\nTransfer-Encoding: chunked\r\n\r\n"); + auto status = codec_->dispatch(response); + EXPECT_FALSE(status.ok()); + } + + // Test with feature disabled: transfer-encoding allowed. + { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.strict_1xx_and_204_response_headers", "false"}}); + + initialize(); + + NiceMock response_decoder; + Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); + TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + request_encoder.encodeHeaders(headers, true); + + Buffer::OwnedImpl response("HTTP/1.1 204 OK\r\nTransfer-Encoding: chunked\r\n\r\n"); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); + } +} + +// 100 response followed by 200 results in a [decode100ContinueHeaders, decodeHeaders] sequence. +TEST_P(Http1ClientConnectionImplTest, ContinueHeaders) { initialize(); NiceMock response_decoder; @@ -1828,28 +2146,122 @@ TEST_F(Http1ClientConnectionImplTest, 100Response) { EXPECT_CALL(response_decoder, decode100ContinueHeaders_(_)); EXPECT_CALL(response_decoder, decodeData(_, _)).Times(0); Buffer::OwnedImpl initial_response("HTTP/1.1 100 Continue\r\n\r\n"); - codec_->dispatch(initial_response); + auto status = codec_->dispatch(initial_response); + EXPECT_TRUE(status.ok()); EXPECT_CALL(response_decoder, decodeHeaders_(_, false)); EXPECT_CALL(response_decoder, decodeData(_, _)).Times(0); - Buffer::OwnedImpl response("HTTP/1.1 200 OK\r\nContent-Length: 20\r\n\r\n"); - codec_->dispatch(response); + Buffer::OwnedImpl response("HTTP/1.1 200 OK\r\n\r\n"); + status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, BadEncodeParams) { +// Multiple 100 responses are passed to the response encoder (who is responsible for coalescing). +TEST_P(Http1ClientConnectionImplTest, MultipleContinueHeaders) { initialize(); NiceMock response_decoder; + Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); + TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + request_encoder.encodeHeaders(headers, true); + + EXPECT_CALL(response_decoder, decode100ContinueHeaders_(_)); + EXPECT_CALL(response_decoder, decodeData(_, _)).Times(0); + Buffer::OwnedImpl initial_response("HTTP/1.1 100 Continue\r\n\r\n"); + auto status = codec_->dispatch(initial_response); + EXPECT_TRUE(status.ok()); + + EXPECT_CALL(response_decoder, decode100ContinueHeaders_(_)); + EXPECT_CALL(response_decoder, decodeData(_, _)).Times(0); + Buffer::OwnedImpl another_100_response("HTTP/1.1 100 Continue\r\n\r\n"); + status = codec_->dispatch(another_100_response); + EXPECT_TRUE(status.ok()); + + EXPECT_CALL(response_decoder, decodeHeaders_(_, false)); + EXPECT_CALL(response_decoder, decodeData(_, _)).Times(0); + Buffer::OwnedImpl response("HTTP/1.1 200 OK\r\n\r\n"); + status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); +} - // Need to set :method and :path +// 101/102 headers etc. are passed to the response encoder (who is responsibly for deciding to +// upgrade, ignore, etc.). +TEST_P(Http1ClientConnectionImplTest, 1xxNonContinueHeaders) { + initialize(); + + NiceMock response_decoder; + Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); + TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + request_encoder.encodeHeaders(headers, true); + + EXPECT_CALL(response_decoder, decodeHeaders_(_, false)); + Buffer::OwnedImpl response("HTTP/1.1 102 Processing\r\n\r\n"); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); +} + +// 101 Switching Protocol with Transfer-Encoding headers is barred by RFC 7230, Section 3.3.1. +TEST_P(Http1ClientConnectionImplTest, 101ResponseTransferEncodingNotAllowed) { + // By default, transfer-encoding is barred. + { + initialize(); + + NiceMock response_decoder; + Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); + TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + request_encoder.encodeHeaders(headers, true); + + Buffer::OwnedImpl response( + "HTTP/1.1 101 Switching Protocols\r\nTransfer-Encoding: chunked\r\n\r\n"); + auto status = codec_->dispatch(response); + EXPECT_FALSE(status.ok()); + } + + // Test with feature disabled: transfer-encoding allowed. + { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.strict_1xx_and_204_response_headers", "false"}}); + + initialize(); + + NiceMock response_decoder; + Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); + TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + request_encoder.encodeHeaders(headers, true); + + Buffer::OwnedImpl response( + "HTTP/1.1 101 Switching Protocols\r\nTransfer-Encoding: chunked\r\n\r\n"); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); + } +} + +TEST_P(Http1ClientConnectionImplTest, BadEncodeParams) { + initialize(); + + NiceMock response_decoder; + + // Need to set :method and :path. + // New and legacy codecs will behave differently on errors from processing outbound data. The + // legacy codecs will throw an exception (that presently will be uncaught in contexts like + // sendLocalReply), while the new codecs temporarily RELEASE_ASSERT until Envoy handles errors on + // outgoing data. Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); - EXPECT_THROW(request_encoder.encodeHeaders(TestRequestHeaderMapImpl{{":path", "/"}}, true), - CodecClientException); - EXPECT_THROW(request_encoder.encodeHeaders(TestRequestHeaderMapImpl{{":method", "GET"}}, true), - CodecClientException); + if (testingNewCodec()) { + EXPECT_DEATH(request_encoder.encodeHeaders(TestRequestHeaderMapImpl{{":path", "/"}}, true), + ":method and :path must be specified"); + EXPECT_DEATH(request_encoder.encodeHeaders(TestRequestHeaderMapImpl{{":method", "GET"}}, true), + ":method and :path must be specified"); + } else { + EXPECT_THROW(request_encoder.encodeHeaders(TestRequestHeaderMapImpl{{":path", "/"}}, true), + CodecClientException); + EXPECT_THROW(request_encoder.encodeHeaders(TestRequestHeaderMapImpl{{":method", "GET"}}, true), + CodecClientException); + } } -TEST_F(Http1ClientConnectionImplTest, NoContentLengthResponse) { +TEST_P(Http1ClientConnectionImplTest, NoContentLengthResponse) { initialize(); NiceMock response_decoder; @@ -1864,13 +2276,14 @@ TEST_F(Http1ClientConnectionImplTest, NoContentLengthResponse) { EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data2), true)); Buffer::OwnedImpl response("HTTP/1.1 200 OK\r\n\r\nHello World"); - codec_->dispatch(response); + auto status = codec_->dispatch(response); Buffer::OwnedImpl empty; - codec_->dispatch(empty); + status = codec_->dispatch(empty); + EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, ResponseWithTrailers) { +TEST_P(Http1ClientConnectionImplTest, ResponseWithTrailers) { initialize(); NiceMock response_decoder; @@ -1880,11 +2293,12 @@ TEST_F(Http1ClientConnectionImplTest, ResponseWithTrailers) { Buffer::OwnedImpl response("HTTP/1.1 200 OK\r\n\r\ntransfer-encoding: chunked\r\n\r\nb\r\nHello " "World\r\n0\r\nhello: world\r\nsecond: header\r\n\r\n"); - codec_->dispatch(response); + auto status = codec_->dispatch(response); EXPECT_EQ(0UL, response.length()); + EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, GiantPath) { +TEST_P(Http1ClientConnectionImplTest, GiantPath) { initialize(); NiceMock response_decoder; @@ -1895,19 +2309,21 @@ TEST_F(Http1ClientConnectionImplTest, GiantPath) { EXPECT_CALL(response_decoder, decodeHeaders_(_, false)); Buffer::OwnedImpl response("HTTP/1.1 200 OK\r\nContent-Length: 20\r\n\r\n"); - codec_->dispatch(response); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, PrematureUpgradeResponse) { +TEST_P(Http1ClientConnectionImplTest, PrematureUpgradeResponse) { initialize(); // make sure upgradeAllowed doesn't cause crashes if run with no pending response. Buffer::OwnedImpl response( "HTTP/1.1 200 OK\r\nContent-Length: 5\r\nConnection: upgrade\r\nUpgrade: websocket\r\n\r\n"); - EXPECT_THROW(codec_->dispatch(response), PrematureResponseException); + auto status = codec_->dispatch(response); + EXPECT_TRUE(isPrematureResponseError(status)); } -TEST_F(Http1ClientConnectionImplTest, UpgradeResponse) { +TEST_P(Http1ClientConnectionImplTest, UpgradeResponse) { initialize(); InSequence s; @@ -1925,24 +2341,25 @@ TEST_F(Http1ClientConnectionImplTest, UpgradeResponse) { EXPECT_CALL(response_decoder, decodeHeaders_(_, false)); Buffer::OwnedImpl response( "HTTP/1.1 200 OK\r\nContent-Length: 5\r\nConnection: upgrade\r\nUpgrade: websocket\r\n\r\n"); - codec_->dispatch(response); + auto status = codec_->dispatch(response); // Send body payload Buffer::OwnedImpl expected_data1("12345"); Buffer::OwnedImpl body("12345"); EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data1), false)); - codec_->dispatch(body); + status = codec_->dispatch(body); // Send websocket payload Buffer::OwnedImpl expected_data2("abcd"); Buffer::OwnedImpl websocket_payload("abcd"); EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data2), false)); - codec_->dispatch(websocket_payload); + status = codec_->dispatch(websocket_payload); + EXPECT_TRUE(status.ok()); } // Same data as above, but make sure directDispatch immediately hands off any // outstanding data. -TEST_F(Http1ClientConnectionImplTest, UpgradeResponseWithEarlyData) { +TEST_P(Http1ClientConnectionImplTest, UpgradeResponseWithEarlyData) { initialize(); InSequence s; @@ -1962,10 +2379,11 @@ TEST_F(Http1ClientConnectionImplTest, UpgradeResponseWithEarlyData) { EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data), false)); Buffer::OwnedImpl response("HTTP/1.1 200 OK\r\nContent-Length: 5\r\nConnection: " "upgrade\r\nUpgrade: websocket\r\n\r\n12345abcd"); - codec_->dispatch(response); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, ConnectResponse) { +TEST_P(Http1ClientConnectionImplTest, ConnectResponse) { initialize(); InSequence s; @@ -1978,24 +2396,25 @@ TEST_F(Http1ClientConnectionImplTest, ConnectResponse) { // Send response headers EXPECT_CALL(response_decoder, decodeHeaders_(_, false)); Buffer::OwnedImpl response("HTTP/1.1 200 OK\r\nContent-Length: 5\r\n\r\n"); - codec_->dispatch(response); + auto status = codec_->dispatch(response); // Send body payload Buffer::OwnedImpl expected_data1("12345"); Buffer::OwnedImpl body("12345"); EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data1), false)); - codec_->dispatch(body); + status = codec_->dispatch(body); // Send connect payload Buffer::OwnedImpl expected_data2("abcd"); Buffer::OwnedImpl connect_payload("abcd"); EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data2), false)); - codec_->dispatch(connect_payload); + status = codec_->dispatch(connect_payload); + EXPECT_TRUE(status.ok()); } // Same data as above, but make sure directDispatch immediately hands off any // outstanding data. -TEST_F(Http1ClientConnectionImplTest, ConnectResponseWithEarlyData) { +TEST_P(Http1ClientConnectionImplTest, ConnectResponseWithEarlyData) { initialize(); InSequence s; @@ -2010,10 +2429,11 @@ TEST_F(Http1ClientConnectionImplTest, ConnectResponseWithEarlyData) { Buffer::OwnedImpl expected_data("12345abcd"); EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data), false)).Times(1); Buffer::OwnedImpl response("HTTP/1.1 200 OK\r\n\r\n12345abcd"); - codec_->dispatch(response); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, ConnectRejected) { +TEST_P(Http1ClientConnectionImplTest, ConnectRejected) { initialize(); InSequence s; @@ -2027,10 +2447,11 @@ TEST_F(Http1ClientConnectionImplTest, ConnectRejected) { Buffer::OwnedImpl expected_data("12345abcd"); EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data), false)); Buffer::OwnedImpl response("HTTP/1.1 400 OK\r\n\r\n12345abcd"); - codec_->dispatch(response); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, WatermarkTest) { +TEST_P(Http1ClientConnectionImplTest, WatermarkTest) { EXPECT_CALL(connection_, bufferLimit()).WillOnce(Return(10)); initialize(); @@ -2065,7 +2486,7 @@ TEST_F(Http1ClientConnectionImplTest, WatermarkTest) { // caller attempts to close the connection. This causes the network connection to attempt to write // pending data, even in the no flush scenario, which can cause us to go below low watermark // which then raises callbacks for a stream that no longer exists. -TEST_F(Http1ClientConnectionImplTest, HighwatermarkMultipleResponses) { +TEST_P(Http1ClientConnectionImplTest, HighwatermarkMultipleResponses) { initialize(); InSequence s; @@ -2085,10 +2506,11 @@ TEST_F(Http1ClientConnectionImplTest, HighwatermarkMultipleResponses) { EXPECT_CALL(response_decoder, decodeHeaders_(_, true)); Buffer::OwnedImpl response("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n"); - codec_->dispatch(response); + auto status = codec_->dispatch(response); Buffer::OwnedImpl response2("HTTP/1.1 400 Bad Request\r\nContent-Length: 0\r\n\r\n"); - EXPECT_THROW(codec_->dispatch(response2), PrematureResponseException); + status = codec_->dispatch(response2); + EXPECT_TRUE(isPrematureResponseError(status)); // Fake a call for going below the low watermark. Make sure no stream callbacks get called. EXPECT_CALL(stream_callbacks, onBelowWriteBufferLowWatermark()).Times(0); @@ -2098,7 +2520,7 @@ TEST_F(Http1ClientConnectionImplTest, HighwatermarkMultipleResponses) { // Regression test for https://github.com/envoyproxy/envoy/issues/10655. Make sure we correctly // handle going below low watermark when closing the connection during a completion callback. -TEST_F(Http1ClientConnectionImplTest, LowWatermarkDuringClose) { +TEST_P(Http1ClientConnectionImplTest, LowWatermarkDuringClose) { initialize(); InSequence s; @@ -2124,46 +2546,81 @@ TEST_F(Http1ClientConnectionImplTest, LowWatermarkDuringClose) { ->onUnderlyingConnectionBelowWriteBufferLowWatermark(); })); Buffer::OwnedImpl response("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n"); - codec_->dispatch(response); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); } -TEST_F(Http1ServerConnectionImplTest, LargeTrailersRejected) { +TEST_P(Http1ServerConnectionImplTest, LargeTrailersRejected) { // Default limit of 60 KiB - std::string long_string = "big: " + std::string(60 * 1024, 'q') + "\r\n"; + std::string long_string = "big: " + std::string(60 * 1024, 'q') + "\r\n\r\n\r\n"; + testTrailersExceedLimit(long_string, true); +} + +TEST_P(Http1ServerConnectionImplTest, LargeTrailerFieldRejected) { + // Construct partial headers with a long field name that exceeds the default limit of 60KiB. + std::string long_string = "bigfield" + std::string(60 * 1024, 'q'); testTrailersExceedLimit(long_string, true); } // Tests that the default limit for the number of request headers is 100. -TEST_F(Http1ServerConnectionImplTest, ManyTrailersRejected) { +TEST_P(Http1ServerConnectionImplTest, ManyTrailersRejected) { // Send a request with 101 headers. - testTrailersExceedLimit(createHeaderFragment(101), true); + testTrailersExceedLimit(createHeaderFragment(101) + "\r\n\r\n", true); } -TEST_F(Http1ServerConnectionImplTest, LargeTrailersRejectedIgnored) { +TEST_P(Http1ServerConnectionImplTest, LargeTrailersRejectedIgnored) { // Default limit of 60 KiB - std::string long_string = "big: " + std::string(60 * 1024, 'q') + "\r\n"; + std::string long_string = "big: " + std::string(60 * 1024, 'q') + "\r\n\r\n\r\n"; + testTrailersExceedLimit(long_string, false); +} + +TEST_P(Http1ServerConnectionImplTest, LargeTrailerFieldRejectedIgnored) { + // Default limit of 60 KiB + std::string long_string = "bigfield" + std::string(60 * 1024, 'q') + ": value\r\n\r\n\r\n"; testTrailersExceedLimit(long_string, false); } // Tests that the default limit for the number of request headers is 100. -TEST_F(Http1ServerConnectionImplTest, ManyTrailersIgnored) { +TEST_P(Http1ServerConnectionImplTest, ManyTrailersIgnored) { // Send a request with 101 headers. - testTrailersExceedLimit(createHeaderFragment(101), false); + testTrailersExceedLimit(createHeaderFragment(101) + "\r\n\r\n", false); +} + +TEST_P(Http1ServerConnectionImplTest, LargeRequestUrlRejected) { + initialize(); + + std::string exception_reason; + NiceMock decoder; + Http::ResponseEncoder* response_encoder = nullptr; + EXPECT_CALL(callbacks_, newStream(_, _)) + .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& { + response_encoder = &encoder; + return decoder; + })); + + // Default limit of 60 KiB + std::string long_url = "/" + std::string(60 * 1024, 'q'); + Buffer::OwnedImpl buffer("GET " + long_url + " HTTP/1.1\r\n"); + + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "headers size exceeds limit"); + EXPECT_EQ("http1.headers_too_large", response_encoder->getStream().responseDetails()); } -TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersRejected) { +TEST_P(Http1ServerConnectionImplTest, LargeRequestHeadersRejected) { // Default limit of 60 KiB std::string long_string = "big: " + std::string(60 * 1024, 'q') + "\r\n"; - testRequestHeadersExceedLimit(long_string); + testRequestHeadersExceedLimit(long_string, ""); } // Tests that the default limit for the number of request headers is 100. -TEST_F(Http1ServerConnectionImplTest, ManyRequestHeadersRejected) { +TEST_P(Http1ServerConnectionImplTest, ManyRequestHeadersRejected) { // Send a request with 101 headers. testRequestHeadersExceedLimit(createHeaderFragment(101), "http1.too_many_headers"); } -TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersSplitRejected) { +TEST_P(Http1ServerConnectionImplTest, LargeRequestHeadersSplitRejected) { // Default limit of 60 KiB initialize(); @@ -2176,22 +2633,25 @@ TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersSplitRejected) { return decoder; })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); std::string long_string = std::string(1024, 'q'); for (int i = 0; i < 59; i++) { buffer = Buffer::OwnedImpl(fmt::format("big: {}\r\n", long_string)); - codec_->dispatch(buffer); + status = codec_->dispatch(buffer); } // the 60th 1kb header should induce overflow buffer = Buffer::OwnedImpl(fmt::format("big: {}\r\n", long_string)); - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), EnvoyException, "headers size exceeds limit"); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); + status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "headers size exceeds limit"); EXPECT_EQ("http1.headers_too_large", response_encoder->getStream().responseDetails()); } // Tests that the 101th request header causes overflow with the default max number of request // headers. -TEST_F(Http1ServerConnectionImplTest, ManyRequestHeadersSplitRejected) { +TEST_P(Http1ServerConnectionImplTest, ManyRequestHeadersSplitRejected) { // Default limit of 100. initialize(); @@ -2204,38 +2664,41 @@ TEST_F(Http1ServerConnectionImplTest, ManyRequestHeadersSplitRejected) { return decoder; })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); // Dispatch 100 headers. buffer = Buffer::OwnedImpl(createHeaderFragment(100)); - codec_->dispatch(buffer); + status = codec_->dispatch(buffer); // The final 101th header should induce overflow. buffer = Buffer::OwnedImpl("header101:\r\n\r\n"); - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), EnvoyException, "headers size exceeds limit"); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); + status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "headers size exceeds limit"); } -TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersAccepted) { +TEST_P(Http1ServerConnectionImplTest, LargeRequestHeadersAccepted) { max_request_headers_kb_ = 65; std::string long_string = "big: " + std::string(64 * 1024, 'q') + "\r\n"; testRequestHeadersAccepted(long_string); } -TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersAcceptedMaxConfigurable) { +TEST_P(Http1ServerConnectionImplTest, LargeRequestHeadersAcceptedMaxConfigurable) { max_request_headers_kb_ = 96; std::string long_string = "big: " + std::string(95 * 1024, 'q') + "\r\n"; testRequestHeadersAccepted(long_string); } // Tests that the number of request headers is configurable. -TEST_F(Http1ServerConnectionImplTest, ManyRequestHeadersAccepted) { +TEST_P(Http1ServerConnectionImplTest, ManyRequestHeadersAccepted) { max_request_headers_count_ = 150; // Create a request with 150 headers. testRequestHeadersAccepted(createHeaderFragment(150)); } -// Tests that response headers of 80 kB fails. -TEST_F(Http1ClientConnectionImplTest, LargeResponseHeadersRejected) { +// Tests that incomplete response headers of 80 kB header value fails. +TEST_P(Http1ClientConnectionImplTest, ResponseHeadersWithLargeValueRejected) { initialize(); NiceMock response_decoder; @@ -2244,14 +2707,37 @@ TEST_F(Http1ClientConnectionImplTest, LargeResponseHeadersRejected) { request_encoder.encodeHeaders(headers, true); Buffer::OwnedImpl buffer("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n"); - codec_->dispatch(buffer); - std::string long_header = "big: " + std::string(80 * 1024, 'q') + "\r\n"; + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); + std::string long_header = "big: " + std::string(80 * 1024, 'q'); buffer = Buffer::OwnedImpl(long_header); - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), EnvoyException, "headers size exceeds limit"); + status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "headers size exceeds limit"); +} + +// Tests that incomplete response headers with a 80 kB header field fails. +TEST_P(Http1ClientConnectionImplTest, ResponseHeadersWithLargeFieldRejected) { + initialize(); + + NiceMock decoder; + NiceMock response_decoder; + Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); + TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + request_encoder.encodeHeaders(headers, true); + + Buffer::OwnedImpl buffer("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n"); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); + std::string long_header = "big: " + std::string(80 * 1024, 'q'); + buffer = Buffer::OwnedImpl(long_header); + status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "headers size exceeds limit"); } // Tests that the size of response headers for HTTP/1 must be under 80 kB. -TEST_F(Http1ClientConnectionImplTest, LargeResponseHeadersAccepted) { +TEST_P(Http1ClientConnectionImplTest, LargeResponseHeadersAccepted) { initialize(); NiceMock response_decoder; @@ -2260,15 +2746,16 @@ TEST_F(Http1ClientConnectionImplTest, LargeResponseHeadersAccepted) { request_encoder.encodeHeaders(headers, true); Buffer::OwnedImpl buffer("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); std::string long_header = "big: " + std::string(79 * 1024, 'q') + "\r\n"; buffer = Buffer::OwnedImpl(long_header); - codec_->dispatch(buffer); + status = codec_->dispatch(buffer); } // Regression test for CVE-2019-18801. Large method headers should not trigger // ASSERTs or ASAN, which they previously did. -TEST_F(Http1ClientConnectionImplTest, LargeMethodRequestEncode) { +TEST_P(Http1ClientConnectionImplTest, LargeMethodRequestEncode) { initialize(); NiceMock response_decoder; @@ -2286,7 +2773,7 @@ TEST_F(Http1ClientConnectionImplTest, LargeMethodRequestEncode) { // in CVE-2019-18801, but the related code does explicit size calculations on // both path and method (these are the two distinguished headers). So, // belt-and-braces. -TEST_F(Http1ClientConnectionImplTest, LargePathRequestEncode) { +TEST_P(Http1ClientConnectionImplTest, LargePathRequestEncode) { initialize(); NiceMock response_decoder; @@ -2302,7 +2789,7 @@ TEST_F(Http1ClientConnectionImplTest, LargePathRequestEncode) { // As with LargeMethodEncode, but for an arbitrary header. This was not an issue // in CVE-2019-18801. -TEST_F(Http1ClientConnectionImplTest, LargeHeaderRequestEncode) { +TEST_P(Http1ClientConnectionImplTest, LargeHeaderRequestEncode) { initialize(); NiceMock response_decoder; @@ -2319,7 +2806,7 @@ TEST_F(Http1ClientConnectionImplTest, LargeHeaderRequestEncode) { } // Exception called when the number of response headers exceeds the default value of 100. -TEST_F(Http1ClientConnectionImplTest, ManyResponseHeadersRejected) { +TEST_P(Http1ClientConnectionImplTest, ManyResponseHeadersRejected) { initialize(); NiceMock response_decoder; @@ -2328,13 +2815,16 @@ TEST_F(Http1ClientConnectionImplTest, ManyResponseHeadersRejected) { request_encoder.encodeHeaders(headers, true); Buffer::OwnedImpl buffer("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); buffer = Buffer::OwnedImpl(createHeaderFragment(101) + "\r\n"); - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), EnvoyException, "headers size exceeds limit"); + + status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "headers size exceeds limit"); } // Tests that the number of response headers is configurable. -TEST_F(Http1ClientConnectionImplTest, ManyResponseHeadersAccepted) { +TEST_P(Http1ClientConnectionImplTest, ManyResponseHeadersAccepted) { max_response_headers_count_ = 152; initialize(); @@ -2345,12 +2835,11 @@ TEST_F(Http1ClientConnectionImplTest, ManyResponseHeadersAccepted) { request_encoder.encodeHeaders(headers, true); Buffer::OwnedImpl buffer("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); // Response already contains one header. buffer = Buffer::OwnedImpl(createHeaderFragment(150) + "\r\n"); - codec_->dispatch(buffer); + status = codec_->dispatch(buffer); } -} // namespace Http1 } // namespace Http } // namespace Envoy diff --git a/test/common/http/http1/conn_pool_legacy_test.cc b/test/common/http/http1/conn_pool_legacy_test.cc deleted file mode 100644 index c657b9529c2ee..0000000000000 --- a/test/common/http/http1/conn_pool_legacy_test.cc +++ /dev/null @@ -1,972 +0,0 @@ -#include -#include - -#include "envoy/http/codec.h" - -#include "common/buffer/buffer_impl.h" -#include "common/event/dispatcher_impl.h" -#include "common/http/codec_client.h" -#include "common/http/http1/conn_pool_legacy.h" -#include "common/network/utility.h" -#include "common/upstream/upstream_impl.h" - -#include "test/common/http/common.h" -#include "test/common/upstream/utility.h" -#include "test/mocks/buffer/mocks.h" -#include "test/mocks/event/mocks.h" -#include "test/mocks/http/mocks.h" -#include "test/mocks/network/mocks.h" -#include "test/mocks/runtime/mocks.h" -#include "test/mocks/upstream/mocks.h" -#include "test/test_common/printers.h" -#include "test/test_common/simulated_time_system.h" -#include "test/test_common/utility.h" - -#include "gmock/gmock.h" -#include "gtest/gtest.h" - -using testing::_; -using testing::DoAll; -using testing::InSequence; -using testing::Invoke; -using testing::NiceMock; -using testing::Property; -using testing::Return; -using testing::ReturnRef; -using testing::SaveArg; - -namespace Envoy { -namespace Http { -namespace Legacy { -namespace Http1 { -namespace { - -/** - * A test version of ConnPoolImpl that allows for mocking beneath the codec clients. - */ -class ConnPoolImplForTest : public ConnPoolImpl { -public: - ConnPoolImplForTest(Event::MockDispatcher& dispatcher, - Upstream::ClusterInfoConstSharedPtr cluster, - NiceMock* upstream_ready_timer) - : ConnPoolImpl(dispatcher, Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), - Upstream::ResourcePriority::Default, nullptr, nullptr), - api_(Api::createApiForTest()), mock_dispatcher_(dispatcher), - mock_upstream_ready_timer_(upstream_ready_timer) {} - - ~ConnPoolImplForTest() override { - EXPECT_EQ(0U, ready_clients_.size()); - EXPECT_EQ(0U, busy_clients_.size()); - EXPECT_EQ(0U, pending_requests_.size()); - } - - struct TestCodecClient { - Http::MockClientConnection* codec_; - Network::MockClientConnection* connection_; - CodecClient* codec_client_; - Event::MockTimer* connect_timer_; - Event::DispatcherPtr client_dispatcher_; - }; - - CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) override { - // We expect to own the connection, but already have it, so just release it to prevent it from - // getting deleted. - data.connection_.release(); - return CodecClientPtr{createCodecClient_()}; - } - - MOCK_METHOD0(createCodecClient_, CodecClient*()); - MOCK_METHOD0(onClientDestroy, void()); - - void expectClientCreate(Protocol protocol = Protocol::Http11) { - test_clients_.emplace_back(); - TestCodecClient& test_client = test_clients_.back(); - test_client.connection_ = new NiceMock(); - test_client.codec_ = new NiceMock(); - test_client.connect_timer_ = new NiceMock(&mock_dispatcher_); - std::shared_ptr cluster{new NiceMock()}; - test_client.client_dispatcher_ = api_->allocateDispatcher("test_thread"); - Network::ClientConnectionPtr connection{test_client.connection_}; - test_client.codec_client_ = new CodecClientForTest( - CodecClient::Type::HTTP1, std::move(connection), test_client.codec_, - [this](CodecClient* codec_client) -> void { - for (auto i = test_clients_.begin(); i != test_clients_.end(); i++) { - if (i->codec_client_ == codec_client) { - onClientDestroy(); - test_clients_.erase(i); - return; - } - } - }, - Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), *test_client.client_dispatcher_); - EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _)); - EXPECT_CALL(mock_dispatcher_, createClientConnection_(_, _, _, _)) - .WillOnce(Return(test_client.connection_)); - EXPECT_CALL(*this, createCodecClient_()).WillOnce(Return(test_client.codec_client_)); - ON_CALL(*test_client.codec_, protocol()).WillByDefault(Return(protocol)); - } - - void expectEnableUpstreamReady() { - EXPECT_FALSE(upstream_ready_enabled_); - EXPECT_CALL(*mock_upstream_ready_timer_, enableTimer(_, _)).Times(1).RetiresOnSaturation(); - } - - void expectAndRunUpstreamReady() { - EXPECT_TRUE(upstream_ready_enabled_); - mock_upstream_ready_timer_->invokeCallback(); - EXPECT_FALSE(upstream_ready_enabled_); - } - - Api::ApiPtr api_; - Event::MockDispatcher& mock_dispatcher_; - NiceMock* mock_upstream_ready_timer_; - std::vector test_clients_; -}; - -/** - * Test fixture for all connection pool tests. - */ -class Http1ConnPoolImplLegacyTest : public testing::Test { -public: - Http1ConnPoolImplLegacyTest() - : upstream_ready_timer_(new NiceMock(&dispatcher_)), - conn_pool_(dispatcher_, cluster_, upstream_ready_timer_) {} - - ~Http1ConnPoolImplLegacyTest() override { - EXPECT_TRUE(TestUtility::gaugesZeroed(cluster_->stats_store_.gauges())); - } - - NiceMock dispatcher_; - std::shared_ptr cluster_{new NiceMock()}; - NiceMock* upstream_ready_timer_; - ConnPoolImplForTest conn_pool_; - NiceMock runtime_; -}; - -/** - * Helper for dealing with an active test request. - */ -struct ActiveTestRequest { - enum class Type { Pending, CreateConnection, Immediate }; - - ActiveTestRequest(Http1ConnPoolImplLegacyTest& parent, size_t client_index, Type type) - : parent_(parent), client_index_(client_index) { - uint64_t active_rq_observed = - parent_.cluster_->resourceManager(Upstream::ResourcePriority::Default).requests().count(); - uint64_t current_rq_total = parent_.cluster_->stats_.upstream_rq_total_.value(); - if (type == Type::CreateConnection) { - parent.conn_pool_.expectClientCreate(); - } - - if (type == Type::Immediate) { - expectNewStream(); - } - - handle_ = parent.conn_pool_.newStream(outer_decoder_, callbacks_); - - if (type == Type::Immediate) { - EXPECT_EQ(nullptr, handle_); - } else { - EXPECT_NE(nullptr, handle_); - } - - if (type == Type::CreateConnection) { - EXPECT_CALL(*parent_.conn_pool_.test_clients_[client_index_].connect_timer_, disableTimer()); - expectNewStream(); - parent.conn_pool_.test_clients_[client_index_].connection_->raiseEvent( - Network::ConnectionEvent::Connected); - } - if (type != Type::Pending) { - EXPECT_EQ(current_rq_total + 1, parent_.cluster_->stats_.upstream_rq_total_.value()); - EXPECT_EQ(active_rq_observed + 1, - parent_.cluster_->resourceManager(Upstream::ResourcePriority::Default) - .requests() - .count()); - } - } - - void completeResponse(bool with_body) { - // Test additional metric writes also. - Http::ResponseHeaderMapPtr response_headers( - new TestResponseHeaderMapImpl{{":status", "200"}, {"x-envoy-upstream-canary", "true"}}); - - inner_decoder_->decodeHeaders(std::move(response_headers), !with_body); - if (with_body) { - Buffer::OwnedImpl data; - inner_decoder_->decodeData(data, true); - } - } - - void expectNewStream() { - EXPECT_CALL(*parent_.conn_pool_.test_clients_[client_index_].codec_, newStream(_)) - .WillOnce(DoAll(SaveArgAddress(&inner_decoder_), ReturnRef(request_encoder_))); - EXPECT_CALL(callbacks_.pool_ready_, ready()); - } - - void startRequest() { - callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - } - - Http1ConnPoolImplLegacyTest& parent_; - size_t client_index_; - NiceMock outer_decoder_; - Http::ConnectionPool::Cancellable* handle_{}; - NiceMock request_encoder_; - Http::ResponseDecoder* inner_decoder_{}; - ConnPoolCallbacks callbacks_; -}; - -/** - * Verify that the pool's host is a member of the cluster the pool was constructed with. - */ -TEST_F(Http1ConnPoolImplLegacyTest, Host) { - EXPECT_EQ(cluster_.get(), &conn_pool_.host()->cluster()); -} - -/** - * Verify that connections are drained when requested. - */ -TEST_F(Http1ConnPoolImplLegacyTest, DrainConnections) { - cluster_->resetResourceManager(2, 1024, 1024, 1, 1); - InSequence s; - - ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); - r1.startRequest(); - - ActiveTestRequest r2(*this, 1, ActiveTestRequest::Type::CreateConnection); - r2.startRequest(); - - r1.completeResponse(false); - - // This will destroy the ready client and set requests remaining to 1 on the busy client. - conn_pool_.drainConnections(); - EXPECT_CALL(conn_pool_, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - // This will destroy the busy client when the response finishes. - r2.completeResponse(false); - EXPECT_CALL(conn_pool_, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); -} - -/** - * Test all timing stats are set. - */ -TEST_F(Http1ConnPoolImplLegacyTest, VerifyTimingStats) { - EXPECT_CALL(cluster_->stats_store_, - deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _)); - EXPECT_CALL(cluster_->stats_store_, - deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_length_ms"), _)); - - ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); - r1.startRequest(); - r1.completeResponse(false); - - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - dispatcher_.clearDeferredDeleteList(); -} - -/** - * Test that buffer limits are set. - */ -TEST_F(Http1ConnPoolImplLegacyTest, VerifyBufferLimits) { - NiceMock outer_decoder; - ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - EXPECT_CALL(*cluster_, perConnectionBufferLimitBytes()).WillOnce(Return(8192)); - EXPECT_CALL(*conn_pool_.test_clients_.back().connection_, setBufferLimits(8192)); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); - EXPECT_NE(nullptr, handle); - - EXPECT_CALL(conn_pool_, onClientDestroy()); - EXPECT_CALL(callbacks.pool_failure_, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - dispatcher_.clearDeferredDeleteList(); -} - -/** - * Verify that canceling pending connections within the callback works. - */ -TEST_F(Http1ConnPoolImplLegacyTest, VerifyCancelInCallback) { - Http::ConnectionPool::Cancellable* handle1{}; - // In this scenario, all connections must succeed, so when - // one fails, the others are canceled. - // Note: We rely on the fact that the implementation cancels the second request first, - // to simplify the test. - ConnPoolCallbacks callbacks1; - EXPECT_CALL(callbacks1.pool_failure_, ready()).Times(0); - ConnPoolCallbacks callbacks2; - EXPECT_CALL(callbacks2.pool_failure_, ready()).WillOnce(Invoke([&]() -> void { - handle1->cancel(); - })); - - NiceMock outer_decoder; - // Create the first client. - conn_pool_.expectClientCreate(); - handle1 = conn_pool_.newStream(outer_decoder, callbacks1); - ASSERT_NE(nullptr, handle1); - - // Create the second client. - Http::ConnectionPool::Cancellable* handle2 = conn_pool_.newStream(outer_decoder, callbacks2); - ASSERT_NE(nullptr, handle2); - - // Simulate connection failure. - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - dispatcher_.clearDeferredDeleteList(); -} - -/** - * Tests a request that generates a new connection, completes, and then a second request that uses - * the same connection. - */ -TEST_F(Http1ConnPoolImplLegacyTest, MultipleRequestAndResponse) { - InSequence s; - - // Request 1 should kick off a new connection. - ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); - r1.startRequest(); - r1.completeResponse(false); - - // Request 2 should not. - ActiveTestRequest r2(*this, 0, ActiveTestRequest::Type::Immediate); - r2.startRequest(); - r2.completeResponse(true); - - // Cause the connection to go away. - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - dispatcher_.clearDeferredDeleteList(); -} - -/** - * Test when we overflow max pending requests. - */ -TEST_F(Http1ConnPoolImplLegacyTest, MaxPendingRequests) { - cluster_->resetResourceManager(1, 1, 1024, 1, 1); - - EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_pending_open_.value()); - - NiceMock outer_decoder; - ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); - EXPECT_NE(nullptr, handle); - - NiceMock outer_decoder2; - ConnPoolCallbacks callbacks2; - EXPECT_CALL(callbacks2.pool_failure_, ready()); - Http::ConnectionPool::Cancellable* handle2 = conn_pool_.newStream(outer_decoder2, callbacks2); - EXPECT_EQ(nullptr, handle2); - - EXPECT_EQ(1U, cluster_->circuit_breakers_stats_.rq_pending_open_.value()); - - handle->cancel(); - - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(1U, cluster_->stats_.upstream_rq_pending_overflow_.value()); -} - -/** - * Tests a connection failure before a request is bound which should result in the pending request - * getting purged. - */ -TEST_F(Http1ConnPoolImplLegacyTest, ConnectFailure) { - InSequence s; - - // Request 1 should kick off a new connection. - NiceMock outer_decoder; - ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); - EXPECT_NE(nullptr, handle); - - EXPECT_CALL(callbacks.pool_failure_, ready()); - EXPECT_CALL(*conn_pool_.test_clients_[0].connect_timer_, disableTimer()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(conn_pool_, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_connect_fail_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_rq_pending_failure_eject_.value()); -} - -/** - * Tests that connection creation time is recorded correctly even in cases where - * there are multiple pending connection creation attempts to the same upstream. - */ -TEST_F(Http1ConnPoolImplLegacyTest, MeasureConnectTime) { - constexpr uint64_t sleep1_ms = 20; - constexpr uint64_t sleep2_ms = 10; - constexpr uint64_t sleep3_ms = 5; - Event::SimulatedTimeSystem simulated_time; - - // Allow concurrent creation of 2 upstream connections. - cluster_->resetResourceManager(2, 1024, 1024, 1, 1); - - InSequence s; - - // Start the first connect attempt. - conn_pool_.expectClientCreate(); - ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::Pending); - - // Move time forward and start the second connect attempt. - simulated_time.advanceTimeWait(std::chrono::milliseconds(sleep1_ms)); - conn_pool_.expectClientCreate(); - ActiveTestRequest r2(*this, 1, ActiveTestRequest::Type::Pending); - - // Move time forward, signal that the first connect completed and verify the time to connect. - uint64_t upstream_cx_connect_ms1 = 0; - simulated_time.advanceTimeWait(std::chrono::milliseconds(sleep2_ms)); - EXPECT_CALL(*conn_pool_.test_clients_[0].connect_timer_, disableTimer()); - EXPECT_CALL(cluster_->stats_store_, - deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _)) - .WillOnce(SaveArg<1>(&upstream_cx_connect_ms1)); - r1.expectNewStream(); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - EXPECT_EQ(sleep1_ms + sleep2_ms, upstream_cx_connect_ms1); - - // Move time forward, signal that the second connect completed and verify the time to connect. - uint64_t upstream_cx_connect_ms2 = 0; - simulated_time.advanceTimeWait(std::chrono::milliseconds(sleep3_ms)); - EXPECT_CALL(*conn_pool_.test_clients_[1].connect_timer_, disableTimer()); - EXPECT_CALL(cluster_->stats_store_, - deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _)) - .WillOnce(SaveArg<1>(&upstream_cx_connect_ms2)); - r2.expectNewStream(); - conn_pool_.test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::Connected); - EXPECT_EQ(sleep2_ms + sleep3_ms, upstream_cx_connect_ms2); - - // Cleanup, cause the connections to go away. - for (auto& test_client : conn_pool_.test_clients_) { - EXPECT_CALL(conn_pool_, onClientDestroy()); - EXPECT_CALL( - cluster_->stats_store_, - deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_length_ms"), _)); - test_client.connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - } - dispatcher_.clearDeferredDeleteList(); -} - -/** - * Tests a connect timeout. Also test that we can add a new request during ejection processing. - */ -TEST_F(Http1ConnPoolImplLegacyTest, ConnectTimeout) { - InSequence s; - - // Request 1 should kick off a new connection. - NiceMock outer_decoder1; - ConnPoolCallbacks callbacks1; - conn_pool_.expectClientCreate(); - EXPECT_NE(nullptr, conn_pool_.newStream(outer_decoder1, callbacks1)); - - NiceMock outer_decoder2; - ConnPoolCallbacks callbacks2; - EXPECT_CALL(callbacks1.pool_failure_, ready()).WillOnce(Invoke([&]() -> void { - conn_pool_.expectClientCreate(); - EXPECT_NE(nullptr, conn_pool_.newStream(outer_decoder2, callbacks2)); - })); - - conn_pool_.test_clients_[0].connect_timer_->invokeCallback(); - - EXPECT_CALL(callbacks2.pool_failure_, ready()); - conn_pool_.test_clients_[1].connect_timer_->invokeCallback(); - - EXPECT_CALL(conn_pool_, onClientDestroy()).Times(2); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(0U, cluster_->stats_.upstream_rq_total_.value()); - EXPECT_EQ(2U, cluster_->stats_.upstream_cx_connect_fail_.value()); - EXPECT_EQ(2U, cluster_->stats_.upstream_cx_connect_timeout_.value()); -} - -/** - * Test cancelling before the request is bound to a connection. - */ -TEST_F(Http1ConnPoolImplLegacyTest, CancelBeforeBound) { - InSequence s; - - // Request 1 should kick off a new connection. - NiceMock outer_decoder; - ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); - EXPECT_NE(nullptr, handle); - - handle->cancel(); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - - // Cause the connection to go away. - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - dispatcher_.clearDeferredDeleteList(); -} - -/** - * Test an upstream disconnection while there is a bound request. - */ -TEST_F(Http1ConnPoolImplLegacyTest, DisconnectWhileBound) { - InSequence s; - - // Request 1 should kick off a new connection. - NiceMock outer_decoder; - ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); - EXPECT_NE(nullptr, handle); - - NiceMock request_encoder; - ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) - .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); - EXPECT_CALL(callbacks.pool_ready_, ready()); - - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - - // We should get a reset callback when the connection disconnects. - Http::MockStreamCallbacks stream_callbacks; - EXPECT_CALL(stream_callbacks, onResetStream(StreamResetReason::ConnectionTermination, _)); - request_encoder.getStream().addCallbacks(stream_callbacks); - - // Kill the connection while it has an active request. - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - dispatcher_.clearDeferredDeleteList(); -} - -/** - * Test that we correctly handle reaching max connections. - */ -TEST_F(Http1ConnPoolImplLegacyTest, MaxConnections) { - InSequence s; - - EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.cx_open_.value()); - - // Request 1 should kick off a new connection. - NiceMock outer_decoder1; - ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder1, callbacks); - - EXPECT_NE(nullptr, handle); - - // Request 2 should not kick off a new connection. - NiceMock outer_decoder2; - ConnPoolCallbacks callbacks2; - handle = conn_pool_.newStream(outer_decoder2, callbacks2); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_overflow_.value()); - EXPECT_EQ(1U, cluster_->circuit_breakers_stats_.cx_open_.value()); - - EXPECT_NE(nullptr, handle); - - // Connect event will bind to request 1. - NiceMock request_encoder; - ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) - .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); - EXPECT_CALL(callbacks.pool_ready_, ready()); - - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - - // Finishing request 1 will immediately bind to request 2. - conn_pool_.expectEnableUpstreamReady(); - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) - .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); - EXPECT_CALL(callbacks2.pool_ready_, ready()); - - callbacks.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - Http::ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{":status", "200"}}); - inner_decoder->decodeHeaders(std::move(response_headers), true); - - conn_pool_.expectAndRunUpstreamReady(); - callbacks2.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - // N.B. clang_tidy insists that we use std::make_unique which can not infer std::initialize_list. - response_headers = std::make_unique( - std::initializer_list>{{":status", "200"}}); - inner_decoder->decodeHeaders(std::move(response_headers), true); - - // Cause the connection to go away. - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - dispatcher_.clearDeferredDeleteList(); -} - -/** - * Test when upstream closes connection without 'connection: close' like - * https://github.com/envoyproxy/envoy/pull/2715 - */ -TEST_F(Http1ConnPoolImplLegacyTest, ConnectionCloseWithoutHeader) { - InSequence s; - - // Request 1 should kick off a new connection. - NiceMock outer_decoder1; - ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder1, callbacks); - - EXPECT_NE(nullptr, handle); - - // Request 2 should not kick off a new connection. - NiceMock outer_decoder2; - ConnPoolCallbacks callbacks2; - handle = conn_pool_.newStream(outer_decoder2, callbacks2); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_overflow_.value()); - - EXPECT_NE(nullptr, handle); - - // Connect event will bind to request 1. - NiceMock request_encoder; - ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) - .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); - EXPECT_CALL(callbacks.pool_ready_, ready()); - - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - - // Finishing request 1 will schedule binding the connection to request 2. - conn_pool_.expectEnableUpstreamReady(); - - callbacks.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - Http::ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{":status", "200"}}); - inner_decoder->decodeHeaders(std::move(response_headers), true); - - // Cause the connection to go away. - conn_pool_.expectClientCreate(); - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - dispatcher_.clearDeferredDeleteList(); - - conn_pool_.expectAndRunUpstreamReady(); - - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) - .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); - EXPECT_CALL(callbacks2.pool_ready_, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - - callbacks2.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - // N.B. clang_tidy insists that we use std::make_unique which can not infer std::initialize_list. - response_headers = std::make_unique( - std::initializer_list>{{":status", "200"}}); - inner_decoder->decodeHeaders(std::move(response_headers), true); - - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - dispatcher_.clearDeferredDeleteList(); -} - -/** - * Test when upstream sends us 'connection: close' - */ -TEST_F(Http1ConnPoolImplLegacyTest, ConnectionCloseHeader) { - InSequence s; - - // Request 1 should kick off a new connection. - NiceMock outer_decoder; - ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); - - EXPECT_NE(nullptr, handle); - - NiceMock request_encoder; - ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) - .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); - EXPECT_CALL(callbacks.pool_ready_, ready()); - - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - callbacks.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - // Response with 'connection: close' which should cause the connection to go away. - EXPECT_CALL(conn_pool_, onClientDestroy()); - Http::ResponseHeaderMapPtr response_headers( - new TestResponseHeaderMapImpl{{":status", "200"}, {"Connection", "Close"}}); - inner_decoder->decodeHeaders(std::move(response_headers), true); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(0U, cluster_->stats_.upstream_cx_destroy_with_active_rq_.value()); -} - -/** - * Test when upstream sends us 'proxy-connection: close' - */ -TEST_F(Http1ConnPoolImplLegacyTest, ProxyConnectionCloseHeader) { - InSequence s; - - // Request 1 should kick off a new connection. - NiceMock outer_decoder; - ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); - - EXPECT_NE(nullptr, handle); - - NiceMock request_encoder; - ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) - .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); - EXPECT_CALL(callbacks.pool_ready_, ready()); - - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - callbacks.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - // Response with 'proxy-connection: close' which should cause the connection to go away. - EXPECT_CALL(conn_pool_, onClientDestroy()); - Http::ResponseHeaderMapPtr response_headers( - new TestResponseHeaderMapImpl{{":status", "200"}, {"Proxy-Connection", "Close"}}); - inner_decoder->decodeHeaders(std::move(response_headers), true); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(0U, cluster_->stats_.upstream_cx_destroy_with_active_rq_.value()); -} - -/** - * Test when upstream is HTTP/1.0 and does not send 'connection: keep-alive' - */ -TEST_F(Http1ConnPoolImplLegacyTest, Http10NoConnectionKeepAlive) { - InSequence s; - - // Request 1 should kick off a new connection. - NiceMock outer_decoder; - ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(Protocol::Http10); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); - - EXPECT_NE(nullptr, handle); - - NiceMock request_encoder; - ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) - .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); - EXPECT_CALL(callbacks.pool_ready_, ready()); - - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - callbacks.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - // Response without 'connection: keep-alive' which should cause the connection to go away. - EXPECT_CALL(conn_pool_, onClientDestroy()); - Http::ResponseHeaderMapPtr response_headers( - new TestResponseHeaderMapImpl{{":protocol", "HTTP/1.0"}, {":status", "200"}}); - inner_decoder->decodeHeaders(std::move(response_headers), true); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(0U, cluster_->stats_.upstream_cx_destroy_with_active_rq_.value()); -} - -/** - * Test when we reach max requests per connection. - */ -TEST_F(Http1ConnPoolImplLegacyTest, MaxRequestsPerConnection) { - InSequence s; - - cluster_->max_requests_per_connection_ = 1; - - // Request 1 should kick off a new connection. - NiceMock outer_decoder; - ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); - - EXPECT_NE(nullptr, handle); - - NiceMock request_encoder; - ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) - .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); - EXPECT_CALL(callbacks.pool_ready_, ready()); - - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - callbacks.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - // Response with 'connection: close' which should cause the connection to go away. - EXPECT_CALL(conn_pool_, onClientDestroy()); - Http::ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{":status", "200"}}); - inner_decoder->decodeHeaders(std::move(response_headers), true); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(0U, cluster_->stats_.upstream_cx_destroy_with_active_rq_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_max_requests_.value()); -} - -TEST_F(Http1ConnPoolImplLegacyTest, ConcurrentConnections) { - cluster_->resetResourceManager(2, 1024, 1024, 1, 1); - InSequence s; - - ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); - r1.startRequest(); - - ActiveTestRequest r2(*this, 1, ActiveTestRequest::Type::CreateConnection); - r2.startRequest(); - - ActiveTestRequest r3(*this, 0, ActiveTestRequest::Type::Pending); - - // Finish r1, which gets r3 going. - conn_pool_.expectEnableUpstreamReady(); - r3.expectNewStream(); - - r1.completeResponse(false); - conn_pool_.expectAndRunUpstreamReady(); - r3.startRequest(); - EXPECT_EQ(3U, cluster_->stats_.upstream_rq_total_.value()); - - r2.completeResponse(false); - r3.completeResponse(false); - - // Disconnect both clients. - EXPECT_CALL(conn_pool_, onClientDestroy()).Times(2); - conn_pool_.test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -TEST_F(Http1ConnPoolImplLegacyTest, DrainCallback) { - InSequence s; - ReadyWatcher drained; - - EXPECT_CALL(drained, ready()); - conn_pool_.addDrainedCallback([&]() -> void { drained.ready(); }); - - ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); - ActiveTestRequest r2(*this, 0, ActiveTestRequest::Type::Pending); - r2.handle_->cancel(); - EXPECT_EQ(1U, cluster_->stats_.upstream_rq_total_.value()); - - EXPECT_CALL(drained, ready()); - r1.startRequest(); - r1.completeResponse(false); - - EXPECT_CALL(conn_pool_, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); -} - -// Test draining a connection pool that has a pending connection. -TEST_F(Http1ConnPoolImplLegacyTest, DrainWhileConnecting) { - InSequence s; - ReadyWatcher drained; - - NiceMock outer_decoder; - ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); - EXPECT_NE(nullptr, handle); - - conn_pool_.addDrainedCallback([&]() -> void { drained.ready(); }); - handle->cancel(); - EXPECT_CALL(*conn_pool_.test_clients_[0].connection_, - close(Network::ConnectionCloseType::NoFlush)); - EXPECT_CALL(drained, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - - EXPECT_CALL(conn_pool_, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); -} - -TEST_F(Http1ConnPoolImplLegacyTest, RemoteCloseToCompleteResponse) { - InSequence s; - - NiceMock outer_decoder; - ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); - EXPECT_NE(nullptr, handle); - - NiceMock request_encoder; - ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].connect_timer_, disableTimer()); - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) - .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); - EXPECT_CALL(callbacks.pool_ready_, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - - callbacks.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - inner_decoder->decodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); - Buffer::OwnedImpl dummy_data("12345"); - inner_decoder->decodeData(dummy_data, false); - - Buffer::OwnedImpl empty_data; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, dispatch(BufferEqual(&empty_data))) - .WillOnce(Invoke([&](Buffer::Instance& data) -> void { - // Simulate the onResponseComplete call to decodeData since dispatch is mocked out. - inner_decoder->decodeData(data, true); - })); - - EXPECT_CALL(*conn_pool_.test_clients_[0].connection_, - close(Network::ConnectionCloseType::NoFlush)); - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -TEST_F(Http1ConnPoolImplLegacyTest, NoActiveConnectionsByDefault) { - EXPECT_FALSE(conn_pool_.hasActiveConnections()); -} - -TEST_F(Http1ConnPoolImplLegacyTest, ActiveRequestHasActiveConnectionsTrue) { - ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); - r1.startRequest(); - - EXPECT_TRUE(conn_pool_.hasActiveConnections()); - - // cleanup - r1.completeResponse(false); - conn_pool_.drainConnections(); - EXPECT_CALL(conn_pool_, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); -} - -TEST_F(Http1ConnPoolImplLegacyTest, ResponseCompletedConnectionReadyNoActiveConnections) { - ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); - r1.startRequest(); - r1.completeResponse(false); - - EXPECT_FALSE(conn_pool_.hasActiveConnections()); - - conn_pool_.drainConnections(); - EXPECT_CALL(conn_pool_, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); -} - -TEST_F(Http1ConnPoolImplLegacyTest, PendingRequestIsConsideredActive) { - conn_pool_.expectClientCreate(); - ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::Pending); - - EXPECT_TRUE(conn_pool_.hasActiveConnections()); - - EXPECT_CALL(conn_pool_, onClientDestroy()); - r1.handle_->cancel(); - EXPECT_EQ(0U, cluster_->stats_.upstream_rq_total_.value()); - conn_pool_.drainConnections(); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -} // namespace -} // namespace Http1 -} // namespace Legacy -} // namespace Http -} // namespace Envoy diff --git a/test/common/http/http1/conn_pool_test.cc b/test/common/http/http1/conn_pool_test.cc index beb7344b53335..64b459c7ef226 100644 --- a/test/common/http/http1/conn_pool_test.cc +++ b/test/common/http/http1/conn_pool_test.cc @@ -2,11 +2,14 @@ #include #include "envoy/http/codec.h" +#include "envoy/network/transport_socket.h" #include "common/buffer/buffer_impl.h" #include "common/event/dispatcher_impl.h" #include "common/http/codec_client.h" #include "common/http/http1/conn_pool.h" +#include "common/http/utility.h" +#include "common/network/raw_buffer_socket.h" #include "common/network/utility.h" #include "common/upstream/upstream_impl.h" @@ -18,8 +21,10 @@ #include "test/mocks/network/mocks.h" #include "test/mocks/runtime/mocks.h" #include "test/mocks/upstream/mocks.h" +#include "test/mocks/upstream/transport_socket_match.h" #include "test/test_common/printers.h" #include "test/test_common/simulated_time_system.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -47,16 +52,16 @@ class ConnPoolImplForTest : public ConnPoolImpl { public: ConnPoolImplForTest(Event::MockDispatcher& dispatcher, Upstream::ClusterInfoConstSharedPtr cluster, - NiceMock* upstream_ready_timer) + NiceMock* upstream_ready_cb) : ConnPoolImpl(dispatcher, Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), Upstream::ResourcePriority::Default, nullptr, nullptr), api_(Api::createApiForTest()), mock_dispatcher_(dispatcher), - mock_upstream_ready_timer_(upstream_ready_timer) {} + mock_upstream_ready_cb_(upstream_ready_cb) {} ~ConnPoolImplForTest() override { EXPECT_EQ(0U, ready_clients_.size()); EXPECT_EQ(0U, busy_clients_.size()); - EXPECT_EQ(0U, pending_requests_.size()); + EXPECT_EQ(0U, pending_streams_.size()); } struct TestCodecClient { @@ -98,27 +103,29 @@ class ConnPoolImplForTest : public ConnPoolImpl { } }, Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), *test_client.client_dispatcher_); + EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _)); EXPECT_CALL(mock_dispatcher_, createClientConnection_(_, _, _, _)) .WillOnce(Return(test_client.connection_)); EXPECT_CALL(*this, createCodecClient_()).WillOnce(Return(test_client.codec_client_)); - EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _)); ON_CALL(*test_client.codec_, protocol()).WillByDefault(Return(protocol)); } void expectEnableUpstreamReady() { EXPECT_FALSE(upstream_ready_enabled_); - EXPECT_CALL(*mock_upstream_ready_timer_, enableTimer(_, _)).Times(1).RetiresOnSaturation(); + EXPECT_CALL(*mock_upstream_ready_cb_, scheduleCallbackCurrentIteration()) + .Times(1) + .RetiresOnSaturation(); } void expectAndRunUpstreamReady() { EXPECT_TRUE(upstream_ready_enabled_); - mock_upstream_ready_timer_->invokeCallback(); + mock_upstream_ready_cb_->invokeCallback(); EXPECT_FALSE(upstream_ready_enabled_); } Api::ApiPtr api_; Event::MockDispatcher& mock_dispatcher_; - NiceMock* mock_upstream_ready_timer_; + NiceMock* mock_upstream_ready_cb_; std::vector test_clients_; }; @@ -128,8 +135,9 @@ class ConnPoolImplForTest : public ConnPoolImpl { class Http1ConnPoolImplTest : public testing::Test { public: Http1ConnPoolImplTest() - : upstream_ready_timer_(new NiceMock(&dispatcher_)), - conn_pool_(dispatcher_, cluster_, upstream_ready_timer_) {} + : upstream_ready_cb_(new NiceMock(&dispatcher_)), + conn_pool_( + std::make_unique(dispatcher_, cluster_, upstream_ready_cb_)) {} ~Http1ConnPoolImplTest() override { EXPECT_EQ("", TestUtility::nonZeroedGauges(cluster_->stats_store_.gauges())); @@ -137,8 +145,8 @@ class Http1ConnPoolImplTest : public testing::Test { NiceMock dispatcher_; std::shared_ptr cluster_{new NiceMock()}; - NiceMock* upstream_ready_timer_; - ConnPoolImplForTest conn_pool_; + NiceMock* upstream_ready_cb_; + std::unique_ptr conn_pool_; NiceMock runtime_; }; @@ -154,14 +162,14 @@ struct ActiveTestRequest { parent_.cluster_->resourceManager(Upstream::ResourcePriority::Default).requests().count(); uint64_t current_rq_total = parent_.cluster_->stats_.upstream_rq_total_.value(); if (type == Type::CreateConnection) { - parent.conn_pool_.expectClientCreate(); + parent.conn_pool_->expectClientCreate(); } if (type == Type::Immediate) { expectNewStream(); } - handle_ = parent.conn_pool_.newStream(outer_decoder_, callbacks_); + handle_ = parent.conn_pool_->newStream(outer_decoder_, callbacks_); if (type == Type::Immediate) { EXPECT_EQ(nullptr, handle_); @@ -171,8 +179,8 @@ struct ActiveTestRequest { if (type == Type::CreateConnection) { expectNewStream(); - EXPECT_CALL(*parent_.conn_pool_.test_clients_[client_index_].connect_timer_, disableTimer()); - parent.conn_pool_.test_clients_[client_index_].connection_->raiseEvent( + EXPECT_CALL(*parent_.conn_pool_->test_clients_[client_index_].connect_timer_, disableTimer()); + parent.conn_pool_->test_clients_[client_index_].connection_->raiseEvent( Network::ConnectionEvent::Connected); } if (type != Type::Pending) { @@ -197,7 +205,7 @@ struct ActiveTestRequest { } void expectNewStream() { - EXPECT_CALL(*parent_.conn_pool_.test_clients_[client_index_].codec_, newStream(_)) + EXPECT_CALL(*parent_.conn_pool_->test_clients_[client_index_].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&inner_decoder_), ReturnRef(request_encoder_))); EXPECT_CALL(callbacks_.pool_ready_, ready()); } @@ -219,7 +227,7 @@ struct ActiveTestRequest { /** * Verify that the pool's host is a member of the cluster the pool was constructed with. */ -TEST_F(Http1ConnPoolImplTest, Host) { EXPECT_EQ(cluster_.get(), &conn_pool_.host()->cluster()); } +TEST_F(Http1ConnPoolImplTest, Host) { EXPECT_EQ(cluster_.get(), &conn_pool_->host()->cluster()); } /** * Verify that connections are drained when requested. @@ -237,13 +245,13 @@ TEST_F(Http1ConnPoolImplTest, DrainConnections) { r1.completeResponse(false); // This will destroy the ready client and set requests remaining to 1 on the busy client. - conn_pool_.drainConnections(); - EXPECT_CALL(conn_pool_, onClientDestroy()); + conn_pool_->drainConnections(); + EXPECT_CALL(*conn_pool_, onClientDestroy()); dispatcher_.clearDeferredDeleteList(); // This will destroy the busy client when the response finishes. r2.completeResponse(false); - EXPECT_CALL(conn_pool_, onClientDestroy()); + EXPECT_CALL(*conn_pool_, onClientDestroy()); dispatcher_.clearDeferredDeleteList(); } @@ -260,8 +268,42 @@ TEST_F(Http1ConnPoolImplTest, VerifyTimingStats) { r1.startRequest(); r1.completeResponse(false); - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*conn_pool_, onClientDestroy()); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); +} + +/** + * Verify that we set the ALPN fallback. + */ +TEST_F(Http1ConnPoolImplTest, VerifyAlpnFallback) { + // Override the TransportSocketFactory with a mock version we can add expectations to. + auto factory = std::make_unique(); + EXPECT_CALL(*factory, createTransportSocket(_)) + .WillOnce(Invoke( + [](Network::TransportSocketOptionsSharedPtr options) -> Network::TransportSocketPtr { + EXPECT_TRUE(options != nullptr); + EXPECT_EQ(options->applicationProtocolFallback(), + Http::Utility::AlpnNames::get().Http11); + return std::make_unique(); + })); + cluster_->transport_socket_matcher_ = + std::make_unique>(std::move(factory)); + + new NiceMock(&dispatcher_); + + // Recreate the conn pool so that the host re-evaluates the transport socket match, arriving at + // our test transport socket factory. + conn_pool_ = std::make_unique(dispatcher_, cluster_, upstream_ready_cb_); + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_->expectClientCreate(Protocol::Http11); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); + EXPECT_NE(nullptr, handle); + + EXPECT_CALL(*conn_pool_, onClientDestroy()); + EXPECT_CALL(callbacks.pool_failure_, ready()); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); } @@ -271,15 +313,15 @@ TEST_F(Http1ConnPoolImplTest, VerifyTimingStats) { TEST_F(Http1ConnPoolImplTest, VerifyBufferLimits) { NiceMock outer_decoder; ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); + conn_pool_->expectClientCreate(); EXPECT_CALL(*cluster_, perConnectionBufferLimitBytes()).WillOnce(Return(8192)); - EXPECT_CALL(*conn_pool_.test_clients_.back().connection_, setBufferLimits(8192)); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + EXPECT_CALL(*conn_pool_->test_clients_.back().connection_, setBufferLimits(8192)); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); EXPECT_NE(nullptr, handle); - EXPECT_CALL(conn_pool_, onClientDestroy()); + EXPECT_CALL(*conn_pool_, onClientDestroy()); EXPECT_CALL(callbacks.pool_failure_, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); } @@ -296,22 +338,22 @@ TEST_F(Http1ConnPoolImplTest, VerifyCancelInCallback) { EXPECT_CALL(callbacks1.pool_failure_, ready()).Times(0); ConnPoolCallbacks callbacks2; EXPECT_CALL(callbacks2.pool_failure_, ready()).WillOnce(Invoke([&]() -> void { - handle1->cancel(); + handle1->cancel(Envoy::ConnectionPool::CancelPolicy::Default); })); NiceMock outer_decoder; // Create the first client. - conn_pool_.expectClientCreate(); - handle1 = conn_pool_.newStream(outer_decoder, callbacks1); + conn_pool_->expectClientCreate(); + handle1 = conn_pool_->newStream(outer_decoder, callbacks1); ASSERT_NE(nullptr, handle1); // Create the second client. - Http::ConnectionPool::Cancellable* handle2 = conn_pool_.newStream(outer_decoder, callbacks2); + Http::ConnectionPool::Cancellable* handle2 = conn_pool_->newStream(outer_decoder, callbacks2); ASSERT_NE(nullptr, handle2); // Simulate connection failure. - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*conn_pool_, onClientDestroy()); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); } @@ -333,8 +375,8 @@ TEST_F(Http1ConnPoolImplTest, MultipleRequestAndResponse) { r2.completeResponse(true); // Cause the connection to go away. - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*conn_pool_, onClientDestroy()); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); } @@ -348,23 +390,23 @@ TEST_F(Http1ConnPoolImplTest, MaxPendingRequests) { NiceMock outer_decoder; ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + conn_pool_->expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); EXPECT_NE(nullptr, handle); NiceMock outer_decoder2; ConnPoolCallbacks callbacks2; EXPECT_CALL(callbacks2.pool_failure_, ready()); - Http::ConnectionPool::Cancellable* handle2 = conn_pool_.newStream(outer_decoder2, callbacks2); + Http::ConnectionPool::Cancellable* handle2 = conn_pool_->newStream(outer_decoder2, callbacks2); EXPECT_EQ(nullptr, handle2); EXPECT_EQ(callbacks2.reason_, ConnectionPool::PoolFailureReason::Overflow); EXPECT_EQ(1U, cluster_->circuit_breakers_stats_.rq_pending_open_.value()); - handle->cancel(); + handle->cancel(Envoy::ConnectionPool::CancelPolicy::Default); - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*conn_pool_, onClientDestroy()); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); EXPECT_EQ(1U, cluster_->stats_.upstream_rq_pending_overflow_.value()); @@ -380,14 +422,14 @@ TEST_F(Http1ConnPoolImplTest, ConnectFailure) { // Request 1 should kick off a new connection. NiceMock outer_decoder; ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + conn_pool_->expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); EXPECT_NE(nullptr, handle); EXPECT_CALL(callbacks.pool_failure_, ready()); - EXPECT_CALL(*conn_pool_.test_clients_[0].connect_timer_, disableTimer()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(conn_pool_, onClientDestroy()); + EXPECT_CALL(*conn_pool_->test_clients_[0].connect_timer_, disableTimer()); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*conn_pool_, onClientDestroy()); dispatcher_.clearDeferredDeleteList(); EXPECT_EQ(1U, cluster_->stats_.upstream_cx_connect_fail_.value()); @@ -410,12 +452,12 @@ TEST_F(Http1ConnPoolImplTest, MeasureConnectTime) { InSequence s; // Start the first connect attempt. - conn_pool_.expectClientCreate(); + conn_pool_->expectClientCreate(); ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::Pending); // Move time forward and start the second connect attempt. simulated_time.advanceTimeWait(std::chrono::milliseconds(sleep1_ms)); - conn_pool_.expectClientCreate(); + conn_pool_->expectClientCreate(); ActiveTestRequest r2(*this, 1, ActiveTestRequest::Type::Pending); // Move time forward, signal that the first connect completed and verify the time to connect. @@ -425,8 +467,8 @@ TEST_F(Http1ConnPoolImplTest, MeasureConnectTime) { deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _)) .WillOnce(SaveArg<1>(&upstream_cx_connect_ms1)); r1.expectNewStream(); - EXPECT_CALL(*conn_pool_.test_clients_[0].connect_timer_, disableTimer()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + EXPECT_CALL(*conn_pool_->test_clients_[0].connect_timer_, disableTimer()); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); EXPECT_EQ(sleep1_ms + sleep2_ms, upstream_cx_connect_ms1); // Move time forward, signal that the second connect completed and verify the time to connect. @@ -436,17 +478,18 @@ TEST_F(Http1ConnPoolImplTest, MeasureConnectTime) { deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _)) .WillOnce(SaveArg<1>(&upstream_cx_connect_ms2)); r2.expectNewStream(); - EXPECT_CALL(*conn_pool_.test_clients_[1].connect_timer_, disableTimer()); - conn_pool_.test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::Connected); + EXPECT_CALL(*conn_pool_->test_clients_[1].connect_timer_, disableTimer()); + conn_pool_->test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::Connected); EXPECT_EQ(sleep2_ms + sleep3_ms, upstream_cx_connect_ms2); // Cleanup, cause the connections to go away. - while (!conn_pool_.test_clients_.empty()) { + while (!conn_pool_->test_clients_.empty()) { EXPECT_CALL( cluster_->stats_store_, deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_length_ms"), _)); - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_.front().connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*conn_pool_, onClientDestroy()); + conn_pool_->test_clients_.front().connection_->raiseEvent( + Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); } } @@ -460,22 +503,22 @@ TEST_F(Http1ConnPoolImplTest, ConnectTimeout) { // Request 1 should kick off a new connection. NiceMock outer_decoder1; ConnPoolCallbacks callbacks1; - conn_pool_.expectClientCreate(); - EXPECT_NE(nullptr, conn_pool_.newStream(outer_decoder1, callbacks1)); + conn_pool_->expectClientCreate(); + EXPECT_NE(nullptr, conn_pool_->newStream(outer_decoder1, callbacks1)); NiceMock outer_decoder2; ConnPoolCallbacks callbacks2; EXPECT_CALL(callbacks1.pool_failure_, ready()).WillOnce(Invoke([&]() -> void { - conn_pool_.expectClientCreate(); - EXPECT_NE(nullptr, conn_pool_.newStream(outer_decoder2, callbacks2)); + conn_pool_->expectClientCreate(); + EXPECT_NE(nullptr, conn_pool_->newStream(outer_decoder2, callbacks2)); })); - conn_pool_.test_clients_[0].connect_timer_->invokeCallback(); + conn_pool_->test_clients_[0].connect_timer_->invokeCallback(); EXPECT_CALL(callbacks2.pool_failure_, ready()); - conn_pool_.test_clients_[1].connect_timer_->invokeCallback(); + conn_pool_->test_clients_[1].connect_timer_->invokeCallback(); - EXPECT_CALL(conn_pool_, onClientDestroy()).Times(2); + EXPECT_CALL(*conn_pool_, onClientDestroy()).Times(2); dispatcher_.clearDeferredDeleteList(); EXPECT_EQ(0U, cluster_->stats_.upstream_rq_total_.value()); @@ -492,16 +535,35 @@ TEST_F(Http1ConnPoolImplTest, CancelBeforeBound) { // Request 1 should kick off a new connection. NiceMock outer_decoder; ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + conn_pool_->expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); EXPECT_NE(nullptr, handle); - handle->cancel(); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + handle->cancel(Envoy::ConnectionPool::CancelPolicy::Default); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); // Cause the connection to go away. - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*conn_pool_, onClientDestroy()); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); +} + +/** + * Test cancelling with CloseExcess + */ +TEST_F(Http1ConnPoolImplTest, CancelExcessBeforeBound) { + InSequence s; + + // Request 1 should kick off a new connection. + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_->expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); + EXPECT_NE(nullptr, handle); + + handle->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + // Unlike CancelBeforeBound there is no need to raise a close event to destroy the connection. + EXPECT_CALL(*conn_pool_, onClientDestroy()); dispatcher_.clearDeferredDeleteList(); } @@ -514,17 +576,17 @@ TEST_F(Http1ConnPoolImplTest, DisconnectWhileBound) { // Request 1 should kick off a new connection. NiceMock outer_decoder; ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + conn_pool_->expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); EXPECT_NE(nullptr, handle); NiceMock request_encoder; ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); EXPECT_CALL(callbacks.pool_ready_, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); // We should get a reset callback when the connection disconnects. Http::MockStreamCallbacks stream_callbacks; @@ -532,8 +594,8 @@ TEST_F(Http1ConnPoolImplTest, DisconnectWhileBound) { request_encoder.getStream().addCallbacks(stream_callbacks); // Kill the connection while it has an active request. - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*conn_pool_, onClientDestroy()); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); } @@ -548,15 +610,15 @@ TEST_F(Http1ConnPoolImplTest, MaxConnections) { // Request 1 should kick off a new connection. NiceMock outer_decoder1; ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder1, callbacks); + conn_pool_->expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder1, callbacks); EXPECT_NE(nullptr, handle); // Request 2 should not kick off a new connection. NiceMock outer_decoder2; ConnPoolCallbacks callbacks2; - handle = conn_pool_.newStream(outer_decoder2, callbacks2); + handle = conn_pool_->newStream(outer_decoder2, callbacks2); EXPECT_EQ(1U, cluster_->stats_.upstream_cx_overflow_.value()); EXPECT_EQ(1U, cluster_->circuit_breakers_stats_.cx_open_.value()); @@ -565,15 +627,15 @@ TEST_F(Http1ConnPoolImplTest, MaxConnections) { // Connect event will bind to request 1. NiceMock request_encoder; ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); EXPECT_CALL(callbacks.pool_ready_, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); // Finishing request 1 will immediately bind to request 2. - conn_pool_.expectEnableUpstreamReady(); - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + conn_pool_->expectEnableUpstreamReady(); + EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); EXPECT_CALL(callbacks2.pool_ready_, ready()); @@ -582,7 +644,7 @@ TEST_F(Http1ConnPoolImplTest, MaxConnections) { Http::ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{":status", "200"}}); inner_decoder->decodeHeaders(std::move(response_headers), true); - conn_pool_.expectAndRunUpstreamReady(); + conn_pool_->expectAndRunUpstreamReady(); callbacks2.outer_encoder_->encodeHeaders( TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); // N.B. clang_tidy insists that we use std::make_unique which can not infer std::initialize_list. @@ -591,8 +653,8 @@ TEST_F(Http1ConnPoolImplTest, MaxConnections) { inner_decoder->decodeHeaders(std::move(response_headers), true); // Cause the connection to go away. - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*conn_pool_, onClientDestroy()); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); } @@ -606,15 +668,15 @@ TEST_F(Http1ConnPoolImplTest, ConnectionCloseWithoutHeader) { // Request 1 should kick off a new connection. NiceMock outer_decoder1; ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder1, callbacks); + conn_pool_->expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder1, callbacks); EXPECT_NE(nullptr, handle); // Request 2 should not kick off a new connection. NiceMock outer_decoder2; ConnPoolCallbacks callbacks2; - handle = conn_pool_.newStream(outer_decoder2, callbacks2); + handle = conn_pool_->newStream(outer_decoder2, callbacks2); EXPECT_EQ(1U, cluster_->stats_.upstream_cx_overflow_.value()); EXPECT_NE(nullptr, handle); @@ -622,14 +684,14 @@ TEST_F(Http1ConnPoolImplTest, ConnectionCloseWithoutHeader) { // Connect event will bind to request 1. NiceMock request_encoder; ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); EXPECT_CALL(callbacks.pool_ready_, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); // Finishing request 1 will schedule binding the connection to request 2. - conn_pool_.expectEnableUpstreamReady(); + conn_pool_->expectEnableUpstreamReady(); callbacks.outer_encoder_->encodeHeaders( TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); @@ -637,17 +699,17 @@ TEST_F(Http1ConnPoolImplTest, ConnectionCloseWithoutHeader) { inner_decoder->decodeHeaders(std::move(response_headers), true); // Cause the connection to go away. - conn_pool_.expectClientCreate(); - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + conn_pool_->expectClientCreate(); + EXPECT_CALL(*conn_pool_, onClientDestroy()); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); - conn_pool_.expectAndRunUpstreamReady(); + conn_pool_->expectAndRunUpstreamReady(); - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); EXPECT_CALL(callbacks2.pool_ready_, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); callbacks2.outer_encoder_->encodeHeaders( TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); @@ -656,8 +718,8 @@ TEST_F(Http1ConnPoolImplTest, ConnectionCloseWithoutHeader) { std::initializer_list>{{":status", "200"}}); inner_decoder->decodeHeaders(std::move(response_headers), true); - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*conn_pool_, onClientDestroy()); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); } @@ -670,23 +732,23 @@ TEST_F(Http1ConnPoolImplTest, ConnectionCloseHeader) { // Request 1 should kick off a new connection. NiceMock outer_decoder; ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + conn_pool_->expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); EXPECT_NE(nullptr, handle); NiceMock request_encoder; ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); EXPECT_CALL(callbacks.pool_ready_, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); callbacks.outer_encoder_->encodeHeaders( TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); // Response with 'connection: close' which should cause the connection to go away. - EXPECT_CALL(conn_pool_, onClientDestroy()); + EXPECT_CALL(*conn_pool_, onClientDestroy()); ResponseHeaderMapPtr response_headers( new TestResponseHeaderMapImpl{{":status", "200"}, {"Connection", "Close"}}); inner_decoder->decodeHeaders(std::move(response_headers), true); @@ -704,23 +766,62 @@ TEST_F(Http1ConnPoolImplTest, ProxyConnectionCloseHeader) { // Request 1 should kick off a new connection. NiceMock outer_decoder; ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + conn_pool_->expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); + + EXPECT_NE(nullptr, handle); + + NiceMock request_encoder; + ResponseDecoder* inner_decoder; + EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); + EXPECT_CALL(callbacks.pool_ready_, ready()); + + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + callbacks.outer_encoder_->encodeHeaders( + TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); + + EXPECT_CALL(*conn_pool_, onClientDestroy()); + // Response with 'proxy-connection: close' which should cause the connection to go away, even if + // there are other tokens in that header. + ResponseHeaderMapPtr response_headers( + new TestResponseHeaderMapImpl{{":status", "200"}, {"Proxy-Connection", "Close, foo"}}); + inner_decoder->decodeHeaders(std::move(response_headers), true); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(0U, cluster_->stats_.upstream_cx_destroy_with_active_rq_.value()); +} + +/** + * Test legacy behavior when upstream sends us 'proxy-connection: close' + */ +TEST_F(Http1ConnPoolImplTest, ProxyConnectionCloseHeaderLegacy) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.fixed_connection_close", "false"}}); + InSequence s; + + // Request 1 should kick off a new connection. + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_->expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); EXPECT_NE(nullptr, handle); NiceMock request_encoder; ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); EXPECT_CALL(callbacks.pool_ready_, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); callbacks.outer_encoder_->encodeHeaders( TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - // Response with 'proxy-connection: close' which should cause the connection to go away. - EXPECT_CALL(conn_pool_, onClientDestroy()); + // Response with 'proxy-connection: close' which should cause the connection to go away, even if + // there are other tokens in that header. + EXPECT_CALL(*conn_pool_, onClientDestroy()); ResponseHeaderMapPtr response_headers( new TestResponseHeaderMapImpl{{":status", "200"}, {"Proxy-Connection", "Close"}}); inner_decoder->decodeHeaders(std::move(response_headers), true); @@ -738,23 +839,60 @@ TEST_F(Http1ConnPoolImplTest, Http10NoConnectionKeepAlive) { // Request 1 should kick off a new connection. NiceMock outer_decoder; ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(Protocol::Http10); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + conn_pool_->expectClientCreate(Protocol::Http10); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); + + EXPECT_NE(nullptr, handle); + + NiceMock request_encoder; + ResponseDecoder* inner_decoder; + EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); + EXPECT_CALL(callbacks.pool_ready_, ready()); + + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + callbacks.outer_encoder_->encodeHeaders( + TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); + + // Response without 'connection: keep-alive' which should cause the connection to go away. + EXPECT_CALL(*conn_pool_, onClientDestroy()); + ResponseHeaderMapPtr response_headers( + new TestResponseHeaderMapImpl{{":protocol", "HTTP/1.0"}, {":status", "200"}}); + inner_decoder->decodeHeaders(std::move(response_headers), true); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(0U, cluster_->stats_.upstream_cx_destroy_with_active_rq_.value()); +} + +/** + * Test legacy behavior when upstream is HTTP/1.0 and does not send 'connection: keep-alive' + */ +TEST_F(Http1ConnPoolImplTest, Http10NoConnectionKeepAliveLegacy) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.fixed_connection_close", "false"}}); + InSequence s; + + // Request 1 should kick off a new connection. + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_->expectClientCreate(Protocol::Http10); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); EXPECT_NE(nullptr, handle); NiceMock request_encoder; ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); EXPECT_CALL(callbacks.pool_ready_, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); callbacks.outer_encoder_->encodeHeaders( TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); // Response without 'connection: keep-alive' which should cause the connection to go away. - EXPECT_CALL(conn_pool_, onClientDestroy()); + EXPECT_CALL(*conn_pool_, onClientDestroy()); ResponseHeaderMapPtr response_headers( new TestResponseHeaderMapImpl{{":protocol", "HTTP/1.0"}, {":status", "200"}}); inner_decoder->decodeHeaders(std::move(response_headers), true); @@ -774,23 +912,23 @@ TEST_F(Http1ConnPoolImplTest, MaxRequestsPerConnection) { // Request 1 should kick off a new connection. NiceMock outer_decoder; ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + conn_pool_->expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); EXPECT_NE(nullptr, handle); NiceMock request_encoder; ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); EXPECT_CALL(callbacks.pool_ready_, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); callbacks.outer_encoder_->encodeHeaders( TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); // Response with 'connection: close' which should cause the connection to go away. - EXPECT_CALL(conn_pool_, onClientDestroy()); + EXPECT_CALL(*conn_pool_, onClientDestroy()); Http::ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{":status", "200"}}); inner_decoder->decodeHeaders(std::move(response_headers), true); dispatcher_.clearDeferredDeleteList(); @@ -812,11 +950,11 @@ TEST_F(Http1ConnPoolImplTest, ConcurrentConnections) { ActiveTestRequest r3(*this, 0, ActiveTestRequest::Type::Pending); // Finish r1, which gets r3 going. - conn_pool_.expectEnableUpstreamReady(); + conn_pool_->expectEnableUpstreamReady(); r3.expectNewStream(); r1.completeResponse(false); - conn_pool_.expectAndRunUpstreamReady(); + conn_pool_->expectAndRunUpstreamReady(); r3.startRequest(); EXPECT_EQ(3U, cluster_->stats_.upstream_rq_total_.value()); @@ -824,9 +962,9 @@ TEST_F(Http1ConnPoolImplTest, ConcurrentConnections) { r3.completeResponse(false); // Disconnect both clients. - EXPECT_CALL(conn_pool_, onClientDestroy()).Times(2); - conn_pool_.test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*conn_pool_, onClientDestroy()).Times(2); + conn_pool_->test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_.value()); @@ -838,18 +976,18 @@ TEST_F(Http1ConnPoolImplTest, DrainCallback) { ReadyWatcher drained; EXPECT_CALL(drained, ready()); - conn_pool_.addDrainedCallback([&]() -> void { drained.ready(); }); + conn_pool_->addDrainedCallback([&]() -> void { drained.ready(); }); ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); ActiveTestRequest r2(*this, 0, ActiveTestRequest::Type::Pending); - r2.handle_->cancel(); + r2.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::Default); EXPECT_EQ(1U, cluster_->stats_.upstream_rq_total_.value()); EXPECT_CALL(drained, ready()); r1.startRequest(); r1.completeResponse(false); - EXPECT_CALL(conn_pool_, onClientDestroy()); + EXPECT_CALL(*conn_pool_, onClientDestroy()); dispatcher_.clearDeferredDeleteList(); } @@ -860,17 +998,17 @@ TEST_F(Http1ConnPoolImplTest, DrainWhileConnecting) { NiceMock outer_decoder; ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + conn_pool_->expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); EXPECT_NE(nullptr, handle); - conn_pool_.addDrainedCallback([&]() -> void { drained.ready(); }); - EXPECT_CALL(*conn_pool_.test_clients_[0].connection_, + conn_pool_->addDrainedCallback([&]() -> void { drained.ready(); }); + EXPECT_CALL(*conn_pool_->test_clients_[0].connection_, close(Network::ConnectionCloseType::NoFlush)); EXPECT_CALL(drained, ready()); - handle->cancel(); + handle->cancel(Envoy::ConnectionPool::CancelPolicy::Default); - EXPECT_CALL(conn_pool_, onClientDestroy()); + EXPECT_CALL(*conn_pool_, onClientDestroy()); dispatcher_.clearDeferredDeleteList(); } @@ -879,17 +1017,17 @@ TEST_F(Http1ConnPoolImplTest, RemoteCloseToCompleteResponse) { NiceMock outer_decoder; ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + conn_pool_->expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); EXPECT_NE(nullptr, handle); NiceMock request_encoder; ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); EXPECT_CALL(callbacks.pool_ready_, ready()); - EXPECT_CALL(*conn_pool_.test_clients_[0].connect_timer_, disableTimer()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + EXPECT_CALL(*conn_pool_->test_clients_[0].connect_timer_, disableTimer()); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); callbacks.outer_encoder_->encodeHeaders( TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); @@ -900,16 +1038,17 @@ TEST_F(Http1ConnPoolImplTest, RemoteCloseToCompleteResponse) { inner_decoder->decodeData(dummy_data, false); Buffer::OwnedImpl empty_data; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, dispatch(BufferEqual(&empty_data))) - .WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, dispatch(BufferEqual(&empty_data))) + .WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { // Simulate the onResponseComplete call to decodeData since dispatch is mocked out. inner_decoder->decodeData(data, true); + return Http::okStatus(); })); - EXPECT_CALL(*conn_pool_.test_clients_[0].connection_, + EXPECT_CALL(*conn_pool_->test_clients_[0].connection_, close(Network::ConnectionCloseType::NoFlush)); - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*conn_pool_, onClientDestroy()); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); @@ -917,19 +1056,19 @@ TEST_F(Http1ConnPoolImplTest, RemoteCloseToCompleteResponse) { } TEST_F(Http1ConnPoolImplTest, NoActiveConnectionsByDefault) { - EXPECT_FALSE(conn_pool_.hasActiveConnections()); + EXPECT_FALSE(conn_pool_->hasActiveConnections()); } TEST_F(Http1ConnPoolImplTest, ActiveRequestHasActiveConnectionsTrue) { ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); r1.startRequest(); - EXPECT_TRUE(conn_pool_.hasActiveConnections()); + EXPECT_TRUE(conn_pool_->hasActiveConnections()); // cleanup r1.completeResponse(false); - conn_pool_.drainConnections(); - EXPECT_CALL(conn_pool_, onClientDestroy()); + conn_pool_->drainConnections(); + EXPECT_CALL(*conn_pool_, onClientDestroy()); dispatcher_.clearDeferredDeleteList(); } @@ -938,24 +1077,24 @@ TEST_F(Http1ConnPoolImplTest, ResponseCompletedConnectionReadyNoActiveConnection r1.startRequest(); r1.completeResponse(false); - EXPECT_FALSE(conn_pool_.hasActiveConnections()); + EXPECT_FALSE(conn_pool_->hasActiveConnections()); - conn_pool_.drainConnections(); - EXPECT_CALL(conn_pool_, onClientDestroy()); + conn_pool_->drainConnections(); + EXPECT_CALL(*conn_pool_, onClientDestroy()); dispatcher_.clearDeferredDeleteList(); } TEST_F(Http1ConnPoolImplTest, PendingRequestIsConsideredActive) { - conn_pool_.expectClientCreate(); + conn_pool_->expectClientCreate(); ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::Pending); - EXPECT_TRUE(conn_pool_.hasActiveConnections()); + EXPECT_TRUE(conn_pool_->hasActiveConnections()); - EXPECT_CALL(conn_pool_, onClientDestroy()); - r1.handle_->cancel(); + EXPECT_CALL(*conn_pool_, onClientDestroy()); + r1.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::Default); EXPECT_EQ(0U, cluster_->stats_.upstream_rq_total_.value()); - conn_pool_.drainConnections(); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + conn_pool_->drainConnections(); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); diff --git a/test/common/http/http2/BUILD b/test/common/http/http2/BUILD index 870d27aced8c8..6c9e4f8c7f2ee 100644 --- a/test/common/http/http2/BUILD +++ b/test/common/http/http2/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_fuzz_test", @@ -8,34 +6,57 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() +CODEC_TEST_DEPS = [ + ":codec_impl_test_util", + "//source/common/event:dispatcher_lib", + "//source/common/http:exception_lib", + "//source/common/http:header_map_lib", + "//source/common/http:header_utility_lib", + "//source/common/http/http2:codec_legacy_lib", + "//source/common/http/http2:codec_lib", + "//source/common/runtime:runtime_lib", + "//source/common/stats:stats_lib", + "//test/common/http:common_lib", + "//test/common/http/http2:http2_frame", + "//test/common/stats:stat_test_utility_lib", + "//test/mocks/http:http_mocks", + "//test/mocks/init:init_mocks", + "//test/mocks/local_info:local_info_mocks", + "//test/mocks/network:network_mocks", + "//test/mocks/protobuf:protobuf_mocks", + "//test/mocks/thread_local:thread_local_mocks", + "//test/mocks/upstream:transport_socket_match_mocks", + "//test/mocks/upstream:upstream_mocks", + "//test/test_common:logging_lib", + "//test/test_common:registry_lib", + "//test/test_common:test_runtime_lib", + "//test/test_common:utility_lib", +] + envoy_cc_test( name = "codec_impl_test", srcs = ["codec_impl_test.cc"], + # The default codec is the legacy codec. Override runtime flag for testing new codec. + args = [ + "--runtime-feature-override-for-tests=envoy.reloadable_features.new_codec_behavior", + ], shard_count = 5, - deps = [ - ":codec_impl_test_util", - "//source/common/event:dispatcher_lib", - "//source/common/http:exception_lib", - "//source/common/http:header_map_lib", - "//source/common/http:header_utility_lib", - "//source/common/http/http2:codec_lib", - "//source/common/stats:stats_lib", - "//test/common/http:common_lib", - "//test/common/http/http2:http2_frame", - "//test/common/stats:stat_test_utility_lib", - "//test/mocks/http:http_mocks", - "//test/mocks/init:init_mocks", - "//test/mocks/local_info:local_info_mocks", - "//test/mocks/network:network_mocks", - "//test/mocks/protobuf:protobuf_mocks", - "//test/mocks/thread_local:thread_local_mocks", - "//test/mocks/upstream:upstream_mocks", - "//test/test_common:registry_lib", - "//test/test_common:test_runtime_lib", - "//test/test_common:utility_lib", + deps = CODEC_TEST_DEPS, +) + +envoy_cc_test( + name = "codec_impl_legacy_test", + srcs = ["codec_impl_test.cc"], + # The default codec is the legacy codec. Verify the runtime flag for the new codec is disabled. + args = [ + "--runtime-feature-disable-for-tests=envoy.reloadable_features.new_codec_behavior", ], + shard_count = 5, + deps = CODEC_TEST_DEPS, ) envoy_cc_test_library( @@ -43,6 +64,7 @@ envoy_cc_test_library( hdrs = ["codec_impl_test_util.h"], external_deps = ["abseil_optional"], deps = [ + "//source/common/http/http2:codec_legacy_lib", "//source/common/http/http2:codec_lib", ], ) @@ -63,25 +85,7 @@ envoy_cc_test( "//test/mocks/network:network_mocks", "//test/mocks/runtime:runtime_mocks", "//test/mocks/upstream:upstream_mocks", - ], -) - -envoy_cc_test( - name = "conn_pool_legacy_test", - srcs = ["conn_pool_legacy_test.cc"], - deps = [ - "//source/common/event:dispatcher_lib", - "//source/common/http/http2:conn_pool_legacy_lib", - "//source/common/network:utility_lib", - "//source/common/upstream:upstream_includes", - "//source/common/upstream:upstream_lib", - "//test/common/http:common_lib", - "//test/common/upstream:utility_lib", - "//test/mocks/event:event_mocks", - "//test/mocks/http:http_mocks", - "//test/mocks/network:network_mocks", - "//test/mocks/runtime:runtime_mocks", - "//test/mocks/upstream:upstream_mocks", + "//test/test_common:test_runtime_lib", ], ) @@ -90,6 +94,8 @@ envoy_cc_test_library( srcs = ["http2_frame.cc"], hdrs = ["http2_frame.h"], deps = [ + "//source/common/common:assert_lib", + "//source/common/common:hex_lib", "//source/common/common:macros", ], ) @@ -104,7 +110,6 @@ envoy_cc_test_library( "//source/common/http:utility_lib", "//source/common/http/http2:codec_lib", "//test/common/http:common_lib", - "//test/common/http/http2:codec_impl_test_util", "//test/mocks/http:http_mocks", "//test/mocks/network:network_mocks", "//test/test_common:environment_lib", @@ -121,7 +126,10 @@ envoy_cc_test( "response_header_corpus/simple_example_huffman", "response_header_corpus/simple_example_plain", ], - deps = [":frame_replay_lib"], + deps = [ + ":frame_replay_lib", + "//test/common/http/http2:codec_impl_test_util", + ], ) envoy_cc_test( @@ -144,12 +152,18 @@ envoy_cc_fuzz_test( name = "response_header_fuzz_test", srcs = ["response_header_fuzz_test.cc"], corpus = "response_header_corpus", - deps = [":frame_replay_lib"], + deps = [ + ":frame_replay_lib", + "//test/common/http/http2:codec_impl_test_util", + ], ) envoy_cc_fuzz_test( name = "request_header_fuzz_test", srcs = ["request_header_fuzz_test.cc"], corpus = "request_header_corpus", - deps = [":frame_replay_lib"], + deps = [ + ":frame_replay_lib", + "//test/common/http/http2:codec_impl_test_util", + ], ) diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index 6ee56bf7234f6..0c5c6d68f8c60 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -7,6 +7,7 @@ #include "common/http/exception.h" #include "common/http/header_map_impl.h" #include "common/http/http2/codec_impl.h" +#include "common/runtime/runtime_features.h" #include "test/common/http/common.h" #include "test/common/http/http2/http2_frame.h" @@ -17,6 +18,7 @@ #include "test/mocks/network/mocks.h" #include "test/mocks/protobuf/mocks.h" #include "test/mocks/thread_local/mocks.h" +#include "test/test_common/logging.h" #include "test/test_common/printers.h" #include "test/test_common/registry.h" #include "test/test_common/test_runtime.h" @@ -45,16 +47,48 @@ namespace CommonUtility = ::Envoy::Http2::Utility; class Http2CodecImplTestFixture { public: + // The Http::Connection::dispatch method does not throw (any more). However unit tests in this + // file use codecs for sending test data through mock network connections to the codec under test. + // It is infeasible to plumb error codes returned by the dispatch() method of the codecs under + // test, through mock connections and sending codec. As a result error returned by the dispatch + // method of the codec under test invoked by the ConnectionWrapper is thrown as an exception. Note + // that exception goes only through the mock network connection and sending codec, i.e. it is + // thrown only through the test harness code. Specific exception types are to distinguish error + // codes returned when processing requests or responses. + // TODO(yanavlasov): modify the code to verify test expectations at the point of calling codec + // under test through the ON_CALL expectations in the + // setupDefaultConnectionMocks() method. This will make the exceptions below + // unnecessary. + struct ClientCodecError : public std::runtime_error { + ClientCodecError(Http::Status&& status) + : std::runtime_error(std::string(status.message())), status_(std::move(status)) {} + const char* what() const noexcept override { return status_.message().data(); } + const Http::Status status_; + }; + + struct ServerCodecError : public std::runtime_error { + ServerCodecError(Http::Status&& status) + : std::runtime_error(std::string(status.message())), status_(std::move(status)) {} + const char* what() const noexcept override { return status_.message().data(); } + const Http::Status status_; + }; + struct ConnectionWrapper { - void dispatch(const Buffer::Instance& data, ConnectionImpl& connection) { + Http::Status dispatch(const Buffer::Instance& data, Connection& connection) { + Http::Status status = Http::okStatus(); buffer_.add(data); if (!dispatching_) { while (buffer_.length() > 0) { dispatching_ = true; - connection.dispatch(buffer_); + status = connection.dispatch(buffer_); + if (!status.ok()) { + // Exit early if we hit an error status. + return status; + } dispatching_ = false; } } + return status; } bool dispatching_{}; @@ -70,19 +104,48 @@ class Http2CodecImplTestFixture { Http2CodecImplTestFixture() = default; Http2CodecImplTestFixture(Http2SettingsTuple client_settings, Http2SettingsTuple server_settings) - : client_settings_(client_settings), server_settings_(server_settings) {} - virtual ~Http2CodecImplTestFixture() = default; + : client_settings_(client_settings), server_settings_(server_settings) { + // Make sure we explicitly test for stream flush timer creation. + EXPECT_CALL(client_connection_.dispatcher_, createTimer_(_)).Times(0); + EXPECT_CALL(server_connection_.dispatcher_, createTimer_(_)).Times(0); + } + virtual ~Http2CodecImplTestFixture() { + client_connection_.dispatcher_.clearDeferredDeleteList(); + if (client_ != nullptr) { + client_.reset(); + EXPECT_EQ(0, TestUtility::findGauge(client_stats_store_, "http2.streams_active")->value()); + EXPECT_EQ(0, + TestUtility::findGauge(client_stats_store_, "http2.pending_send_bytes")->value()); + } + server_connection_.dispatcher_.clearDeferredDeleteList(); + if (server_ != nullptr) { + server_.reset(); + EXPECT_EQ(0, TestUtility::findGauge(server_stats_store_, "http2.streams_active")->value()); + EXPECT_EQ(0, + TestUtility::findGauge(server_stats_store_, "http2.pending_send_bytes")->value()); + } + } virtual void initialize() { http2OptionsFromTuple(client_http2_options_, client_settings_); http2OptionsFromTuple(server_http2_options_, server_settings_); - client_ = std::make_unique( - client_connection_, client_callbacks_, stats_store_, client_http2_options_, - max_request_headers_kb_, max_response_headers_count_, ProdNghttp2SessionFactory::get()); - server_ = std::make_unique( - server_connection_, server_callbacks_, stats_store_, server_http2_options_, - max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); - + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { + client_ = std::make_unique( + client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, + max_request_headers_kb_, max_response_headers_count_, + ProdNghttp2SessionFactoryNew::get()); + server_ = std::make_unique( + server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, + max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); + } else { + client_ = std::make_unique( + client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, + max_request_headers_kb_, max_response_headers_count_, + ProdNghttp2SessionFactoryLegacy::get()); + server_ = std::make_unique( + server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, + max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); + } request_encoder_ = &client_->newStream(response_decoder_); setupDefaultConnectionMocks(); @@ -90,6 +153,7 @@ class Http2CodecImplTestFixture { .WillRepeatedly(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& { response_encoder_ = &encoder; encoder.getStream().addCallbacks(server_stream_callbacks_); + encoder.getStream().setFlushTimeout(std::chrono::milliseconds(30000)); return request_decoder_; })); } @@ -100,11 +164,17 @@ class Http2CodecImplTestFixture { if (corrupt_metadata_frame_) { corruptMetadataFramePayload(data); } - server_wrapper_.dispatch(data, *server_); + auto status = server_wrapper_.dispatch(data, *server_); + if (!status.ok()) { + throw ServerCodecError(std::move(status)); + } })); ON_CALL(server_connection_, write(_, _)) .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void { - client_wrapper_.dispatch(data, *client_); + auto status = client_wrapper_.dispatch(data, *client_); + if (!status.ok()) { + throw ClientCodecError(std::move(status)); + } })); } @@ -123,7 +193,8 @@ class Http2CodecImplTestFixture { (tp.has_value()) ? ::testing::get(*tp) : CommonUtility::OptionsLimits::DEFAULT_INITIAL_CONNECTION_WINDOW_SIZE); options.set_allow_metadata(allow_metadata_); - options.set_stream_error_on_invalid_http_messaging(stream_error_on_invalid_http_messaging_); + options.mutable_override_stream_error_on_invalid_http_message()->set_value( + stream_error_on_invalid_http_messaging_); options.mutable_max_outbound_frames()->set_value(max_outbound_frames_); options.mutable_max_outbound_control_frames()->set_value(max_outbound_control_frames_); options.mutable_max_consecutive_inbound_frames_with_empty_payload()->set_value( @@ -165,16 +236,17 @@ class Http2CodecImplTestFixture { absl::optional server_settings_; bool allow_metadata_ = false; bool stream_error_on_invalid_http_messaging_ = false; - Stats::TestUtil::TestStore stats_store_; + Stats::TestUtil::TestStore client_stats_store_; envoy::config::core::v3::Http2ProtocolOptions client_http2_options_; NiceMock client_connection_; MockConnectionCallbacks client_callbacks_; - std::unique_ptr client_; + std::unique_ptr client_; ConnectionWrapper client_wrapper_; + Stats::TestUtil::TestStore server_stats_store_; envoy::config::core::v3::Http2ProtocolOptions server_http2_options_; NiceMock server_connection_; MockServerConnectionCallbacks server_callbacks_; - std::unique_ptr server_; + std::unique_ptr server_; ConnectionWrapper server_wrapper_; MockResponseDecoder response_decoder_; RequestEncoder* request_encoder_; @@ -279,7 +351,7 @@ TEST_P(Http2CodecImplTest, ShutdownNotice) { EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); request_encoder_->encodeHeaders(request_headers, true); - EXPECT_CALL(client_callbacks_, onGoAway()); + EXPECT_CALL(client_callbacks_, onGoAway(_)); server_->shutdownNotice(); server_->goAway(); @@ -288,6 +360,7 @@ TEST_P(Http2CodecImplTest, ShutdownNotice) { response_encoder_->encodeHeaders(response_headers, true); } +// 100 response followed by 200 results in a [decode100ContinueHeaders, decodeHeaders] sequence. TEST_P(Http2CodecImplTest, ContinueHeaders) { initialize(); @@ -305,6 +378,78 @@ TEST_P(Http2CodecImplTest, ContinueHeaders) { response_encoder_->encodeHeaders(response_headers, true); }; +// nghttp2 rejects trailers with :status. +TEST_P(Http2CodecImplTest, TrailerStatus) { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + TestResponseHeaderMapImpl continue_headers{{":status", "100"}}; + EXPECT_CALL(response_decoder_, decode100ContinueHeaders_(_)); + response_encoder_->encode100ContinueHeaders(continue_headers); + + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + response_encoder_->encodeHeaders(response_headers, false); + + // nghttp2 doesn't allow :status in trailers + EXPECT_THROW(response_encoder_->encode100ContinueHeaders(continue_headers), ClientCodecError); + EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); +}; + +// Multiple 100 responses are passed to the response encoder (who is responsible for coalescing). +TEST_P(Http2CodecImplTest, MultipleContinueHeaders) { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + TestResponseHeaderMapImpl continue_headers{{":status", "100"}}; + EXPECT_CALL(response_decoder_, decode100ContinueHeaders_(_)); + response_encoder_->encode100ContinueHeaders(continue_headers); + EXPECT_CALL(response_decoder_, decode100ContinueHeaders_(_)); + response_encoder_->encode100ContinueHeaders(continue_headers); + + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, true)); + response_encoder_->encodeHeaders(response_headers, true); +}; + +// 101/102 headers etc. are passed to the response encoder (who is responsibly for deciding to +// upgrade, ignore, etc.). +TEST_P(Http2CodecImplTest, 1xxNonContinueHeaders) { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + TestResponseHeaderMapImpl other_headers{{":status", "102"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + response_encoder_->encodeHeaders(other_headers, false); +}; + +// nghttp2 treats 101 inside an HTTP/2 stream as an invalid HTTP header field. +TEST_P(Http2CodecImplTest, Invalid101SwitchingProtocols) { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + TestResponseHeaderMapImpl upgrade_headers{{":status", "101"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, _)).Times(0); + EXPECT_THROW(response_encoder_->encodeHeaders(upgrade_headers, false), ClientCodecError); + EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); +} + TEST_P(Http2CodecImplTest, InvalidContinueWithFin) { initialize(); @@ -314,8 +459,8 @@ TEST_P(Http2CodecImplTest, InvalidContinueWithFin) { request_encoder_->encodeHeaders(request_headers, true); TestResponseHeaderMapImpl continue_headers{{":status", "100"}}; - EXPECT_THROW(response_encoder_->encodeHeaders(continue_headers, true), CodecProtocolException); - EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value()); + EXPECT_THROW(response_encoder_->encodeHeaders(continue_headers, true), ClientCodecError); + EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); } TEST_P(Http2CodecImplTest, InvalidContinueWithFinAllowed) { @@ -341,9 +486,10 @@ TEST_P(Http2CodecImplTest, InvalidContinueWithFinAllowed) { // Flush pending data. EXPECT_CALL(request_callbacks, onResetStream(StreamResetReason::LocalReset, _)); setupDefaultConnectionMocks(); - client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); + auto status = client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); + EXPECT_TRUE(status.ok()); - EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value()); + EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); expectDetailsRequest("http2.violation.of.messaging.rule"); } @@ -359,8 +505,8 @@ TEST_P(Http2CodecImplTest, InvalidRepeatContinue) { EXPECT_CALL(response_decoder_, decode100ContinueHeaders_(_)); response_encoder_->encode100ContinueHeaders(continue_headers); - EXPECT_THROW(response_encoder_->encodeHeaders(continue_headers, true), CodecProtocolException); - EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value()); + EXPECT_THROW(response_encoder_->encodeHeaders(continue_headers, true), ClientCodecError); + EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); }; TEST_P(Http2CodecImplTest, InvalidRepeatContinueAllowed) { @@ -389,33 +535,13 @@ TEST_P(Http2CodecImplTest, InvalidRepeatContinueAllowed) { // Flush pending data. EXPECT_CALL(request_callbacks, onResetStream(StreamResetReason::LocalReset, _)); setupDefaultConnectionMocks(); - client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); + auto status = client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); + EXPECT_TRUE(status.ok()); - EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value()); + EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); expectDetailsRequest("http2.violation.of.messaging.rule"); }; -TEST_P(Http2CodecImplTest, Invalid103) { - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); - request_encoder_->encodeHeaders(request_headers, true); - - TestResponseHeaderMapImpl continue_headers{{":status", "100"}}; - EXPECT_CALL(response_decoder_, decode100ContinueHeaders_(_)); - response_encoder_->encode100ContinueHeaders(continue_headers); - - TestResponseHeaderMapImpl early_hint_headers{{":status", "103"}}; - EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); - response_encoder_->encodeHeaders(early_hint_headers, false); - - EXPECT_THROW_WITH_MESSAGE(response_encoder_->encodeHeaders(early_hint_headers, false), - CodecProtocolException, "Unexpected 'trailers' with no end stream."); - EXPECT_EQ(1, stats_store_.counter("http2.too_many_header_frames").value()); -} - TEST_P(Http2CodecImplTest, Invalid204WithContentLength) { initialize(); @@ -433,8 +559,12 @@ TEST_P(Http2CodecImplTest, Invalid204WithContentLength) { response_headers.addCopy(std::to_string(i), std::to_string(i)); } - EXPECT_THROW(response_encoder_->encodeHeaders(response_headers, false), CodecProtocolException); - EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value()); + EXPECT_LOG_CONTAINS( + "debug", + "Invalid HTTP header field was received: frame type: 1, stream: 1, name: [content-length], " + "value: [3]", + EXPECT_THROW(response_encoder_->encodeHeaders(response_headers, false), ClientCodecError)); + EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); }; TEST_P(Http2CodecImplTest, Invalid204WithContentLengthAllowed) { @@ -469,9 +599,10 @@ TEST_P(Http2CodecImplTest, Invalid204WithContentLengthAllowed) { EXPECT_CALL(request_callbacks, onResetStream(StreamResetReason::LocalReset, _)); EXPECT_CALL(server_stream_callbacks_, onResetStream(StreamResetReason::RemoteReset, _)); setupDefaultConnectionMocks(); - client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); + auto status = client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); + EXPECT_TRUE(status.ok()); - EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value()); + EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); expectDetailsRequest("http2.invalid.header.field"); }; @@ -494,9 +625,8 @@ TEST_P(Http2CodecImplTest, RefusedStreamReset) { TEST_P(Http2CodecImplTest, InvalidHeadersFrame) { initialize(); - EXPECT_THROW(request_encoder_->encodeHeaders(TestRequestHeaderMapImpl{}, true), - CodecProtocolException); - EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value()); + EXPECT_THROW(request_encoder_->encodeHeaders(TestRequestHeaderMapImpl{}, true), ServerCodecError); + EXPECT_EQ(1, server_stats_store_.counter("http2.rx_messaging_error").value()); } TEST_P(Http2CodecImplTest, InvalidHeadersFrameAllowed) { @@ -513,7 +643,8 @@ TEST_P(Http2CodecImplTest, InvalidHeadersFrameAllowed) { request_encoder_->encodeHeaders(TestRequestHeaderMapImpl{}, true); EXPECT_CALL(server_stream_callbacks_, onResetStream(StreamResetReason::LocalReset, _)); EXPECT_CALL(request_callbacks, onResetStream(StreamResetReason::RemoteReset, _)); - server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_); + auto status = server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_); + EXPECT_TRUE(status.ok()); expectDetailsResponse("http2.violation.of.messaging.rule"); } @@ -540,7 +671,66 @@ TEST_P(Http2CodecImplTest, TrailingHeaders) { response_encoder_->encodeTrailers(TestResponseTrailerMapImpl{{"trailing", "header"}}); } -TEST_P(Http2CodecImplTest, TrailingHeadersLargeBody) { +// When having empty trailers, codec submits empty buffer and end_stream instead. +TEST_P(Http2CodecImplTest, IgnoreTrailingEmptyHeaders) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.http2_skip_encoding_empty_trailers", "true"}}); + + initialize(); + + Buffer::OwnedImpl empty_buffer; + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); + request_encoder_->encodeHeaders(request_headers, false); + EXPECT_CALL(request_decoder_, decodeData(_, false)); + Buffer::OwnedImpl hello("hello"); + request_encoder_->encodeData(hello, false); + EXPECT_CALL(request_decoder_, decodeData(BufferEqual(&empty_buffer), true)); + request_encoder_->encodeTrailers(TestRequestTrailerMapImpl{}); + + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + response_encoder_->encodeHeaders(response_headers, false); + EXPECT_CALL(response_decoder_, decodeData(_, false)); + Buffer::OwnedImpl world("world"); + response_encoder_->encodeData(world, false); + EXPECT_CALL(response_decoder_, decodeData(BufferEqual(&empty_buffer), true)); + response_encoder_->encodeTrailers(TestResponseTrailerMapImpl{}); +} + +// When having empty trailers and "envoy.reloadable_features.http2_skip_encoding_empty_trailers" is +// turned off, codec submits empty trailers. +TEST_P(Http2CodecImplTest, SubmitTrailingEmptyHeaders) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.http2_skip_encoding_empty_trailers", "false"}}); + + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); + request_encoder_->encodeHeaders(request_headers, false); + EXPECT_CALL(request_decoder_, decodeData(_, false)); + Buffer::OwnedImpl hello("hello"); + request_encoder_->encodeData(hello, false); + EXPECT_CALL(request_decoder_, decodeTrailers_(_)); + request_encoder_->encodeTrailers(TestRequestTrailerMapImpl{}); + + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + response_encoder_->encodeHeaders(response_headers, false); + EXPECT_CALL(response_decoder_, decodeData(_, false)); + Buffer::OwnedImpl world("world"); + response_encoder_->encodeData(world, false); + EXPECT_CALL(response_decoder_, decodeTrailers_(_)); + response_encoder_->encodeTrailers(TestResponseTrailerMapImpl{}); +} + +TEST_P(Http2CodecImplTest, TrailingHeadersLargeClientBody) { initialize(); // Buffer server data so we can make sure we don't get any window updates. @@ -555,12 +745,13 @@ TEST_P(Http2CodecImplTest, TrailingHeadersLargeBody) { EXPECT_CALL(request_decoder_, decodeData(_, false)).Times(AtLeast(1)); Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); request_encoder_->encodeData(body, false); - EXPECT_CALL(request_decoder_, decodeTrailers_(_)); request_encoder_->encodeTrailers(TestRequestTrailerMapImpl{{"trailing", "header"}}); // Flush pending data. setupDefaultConnectionMocks(); - server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_); + EXPECT_CALL(request_decoder_, decodeTrailers_(_)); + auto status = server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_); + EXPECT_TRUE(status.ok()); TestResponseHeaderMapImpl response_headers{{":status", "200"}}; EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); @@ -650,7 +841,7 @@ TEST_P(Http2CodecImplTest, BadMetadataVecReceivedTest) { metadata_map_vector.push_back(std::move(metadata_map_ptr)); corrupt_metadata_frame_ = true; - EXPECT_THROW_WITH_MESSAGE(request_encoder_->encodeMetadata(metadata_map_vector), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(request_encoder_->encodeMetadata(metadata_map_vector), ServerCodecError, "The user callback function failed"); } @@ -720,7 +911,8 @@ TEST_P(Http2CodecImplDeferredResetTest, DeferredResetClient) { EXPECT_CALL(server_stream_callbacks_, onResetStream(StreamResetReason::RemoteReset, _)); setupDefaultConnectionMocks(); - server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_); + auto status = server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_); + EXPECT_TRUE(status.ok()); } TEST_P(Http2CodecImplDeferredResetTest, DeferredResetServer) { @@ -741,8 +933,11 @@ TEST_P(Http2CodecImplDeferredResetTest, DeferredResetServer) { response_encoder_->encodeHeaders(response_headers, false); Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark()).Times(AnyNumber()); + auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_); + EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _)); response_encoder_->encodeData(body, true); EXPECT_CALL(server_stream_callbacks_, onResetStream(StreamResetReason::LocalReset, _)); + EXPECT_CALL(*flush_timer, disableTimer()); response_encoder_->getStream().resetStream(StreamResetReason::LocalReset); MockStreamCallbacks client_stream_callbacks; @@ -751,7 +946,8 @@ TEST_P(Http2CodecImplDeferredResetTest, DeferredResetServer) { EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); EXPECT_CALL(client_stream_callbacks, onResetStream(StreamResetReason::RemoteReset, _)); setupDefaultConnectionMocks(); - client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); + auto status = client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); + EXPECT_TRUE(status.ok()); } class Http2CodecImplFlowControlTest : public Http2CodecImplTest {}; @@ -776,6 +972,8 @@ TEST_P(Http2CodecImplFlowControlTest, TestFlowControlInPendingSendData) { // Force the server stream to be read disabled. This will cause it to stop sending window // updates to the client. server_->getStream(1)->readDisable(true); + EXPECT_EQ(1, TestUtility::findGauge(client_stats_store_, "http2.streams_active")->value()); + EXPECT_EQ(1, TestUtility::findGauge(server_stats_store_, "http2.streams_active")->value()); uint32_t initial_stream_window = nghttp2_session_get_stream_effective_local_window_size(client_->session(), 1); @@ -795,22 +993,26 @@ TEST_P(Http2CodecImplFlowControlTest, TestFlowControlInPendingSendData) { // stream. EXPECT_EQ(0, nghttp2_session_get_stream_local_window_size(server_->session(), 1)); EXPECT_EQ(0, nghttp2_session_get_stream_remote_window_size(client_->session(), 1)); - EXPECT_EQ(initial_stream_window, server_->getStream(1)->unconsumed_bytes_); + EXPECT_EQ(initial_stream_window, server_->getStreamUnconsumedBytes(1)); // Now that the flow control window is full, further data causes the send buffer to back up. Buffer::OwnedImpl more_long_data(std::string(initial_stream_window, 'a')); request_encoder_->encodeData(more_long_data, false); - EXPECT_EQ(initial_stream_window, client_->getStream(1)->pending_send_data_.length()); - EXPECT_EQ(initial_stream_window, server_->getStream(1)->unconsumed_bytes_); + EXPECT_EQ(initial_stream_window, client_->getStreamPendingSendDataLength(1)); + EXPECT_EQ(initial_stream_window, + TestUtility::findGauge(client_stats_store_, "http2.pending_send_bytes")->value()); + EXPECT_EQ(initial_stream_window, server_->getStreamUnconsumedBytes(1)); // If we go over the limit, the stream callbacks should fire. EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark()); Buffer::OwnedImpl last_byte("!"); request_encoder_->encodeData(last_byte, false); - EXPECT_EQ(initial_stream_window + 1, client_->getStream(1)->pending_send_data_.length()); + EXPECT_EQ(initial_stream_window + 1, client_->getStreamPendingSendDataLength(1)); + EXPECT_EQ(initial_stream_window + 1, + TestUtility::findGauge(client_stats_store_, "http2.pending_send_bytes")->value()); // Now create a second stream on the connection. - MockStreamDecoder response_decoder2; + MockResponseDecoder response_decoder2; RequestEncoder* request_encoder2 = &client_->newStream(response_decoder_); StreamEncoder* response_encoder2; MockStreamCallbacks server_stream_callbacks2; @@ -850,7 +1052,8 @@ TEST_P(Http2CodecImplFlowControlTest, TestFlowControlInPendingSendData) { EXPECT_CALL(callbacks2, onBelowWriteBufferLowWatermark()).Times(0); EXPECT_CALL(callbacks3, onBelowWriteBufferLowWatermark()); server_->getStream(1)->readDisable(false); - EXPECT_EQ(0, client_->getStream(1)->pending_send_data_.length()); + EXPECT_EQ(0, client_->getStreamPendingSendDataLength(1)); + EXPECT_EQ(0, TestUtility::findGauge(client_stats_store_, "http2.pending_send_bytes")->value()); // The extra 1 byte sent won't trigger another window update, so the final window should be the // initial window minus the last 1 byte flush from the client to server. EXPECT_EQ(initial_stream_window - 1, @@ -894,7 +1097,7 @@ TEST_P(Http2CodecImplFlowControlTest, EarlyResetRestoresWindow) { // stream. EXPECT_EQ(0, nghttp2_session_get_stream_local_window_size(server_->session(), 1)); EXPECT_EQ(0, nghttp2_session_get_stream_remote_window_size(client_->session(), 1)); - EXPECT_EQ(initial_stream_window, server_->getStream(1)->unconsumed_bytes_); + EXPECT_EQ(initial_stream_window, server_->getStreamUnconsumedBytes(1)); EXPECT_GT(initial_connection_window, nghttp2_session_get_remote_window_size(client_->session())); EXPECT_CALL(server_stream_callbacks_, @@ -936,13 +1139,151 @@ TEST_P(Http2CodecImplFlowControlTest, FlowControlPendingRecvData) { // the recv buffer can be overrun by a client which negotiates a larger // SETTINGS_MAX_FRAME_SIZE but there's no current easy way to tweak that in // envoy (without sending raw HTTP/2 frames) so we lower the buffer limit instead. - server_->getStream(1)->setWriteBufferWatermarks(10, 20); + server_->setStreamWriteBufferWatermarks(1, 10, 20); EXPECT_CALL(request_decoder_, decodeData(_, false)); Buffer::OwnedImpl data(std::string(40, 'a')); request_encoder_->encodeData(data, false); } +// Verify that we create and disable the stream flush timer when trailers follow a stream that +// does not have enough window. +TEST_P(Http2CodecImplFlowControlTest, TrailingHeadersLargeServerBody) { + initialize(); + + InSequence s; + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + ON_CALL(client_connection_, write(_, _)) + .WillByDefault( + Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); })); + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + response_encoder_->encodeHeaders(response_headers, false); + EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark()); + EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); + auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_); + EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _)); + Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); + response_encoder_->encodeData(body, false); + response_encoder_->encodeTrailers(TestResponseTrailerMapImpl{{"trailing", "header"}}); + + // Send window updates from the client. + setupDefaultConnectionMocks(); + EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); + EXPECT_CALL(response_decoder_, decodeTrailers_(_)); + EXPECT_CALL(*flush_timer, disableTimer()); + auto status = server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_); + EXPECT_TRUE(status.ok()); + EXPECT_EQ(0, server_stats_store_.counter("http2.tx_flush_timeout").value()); +} + +// Verify that we create and handle the stream flush timeout when trailers follow a stream that +// does not have enough window. +TEST_P(Http2CodecImplFlowControlTest, TrailingHeadersLargeServerBodyFlushTimeout) { + initialize(); + + InSequence s; + MockStreamCallbacks client_stream_callbacks; + request_encoder_->getStream().addCallbacks(client_stream_callbacks); + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + ON_CALL(client_connection_, write(_, _)) + .WillByDefault( + Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); })); + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + response_encoder_->encodeHeaders(response_headers, false); + EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark()); + EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); + auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_); + EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _)); + Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); + response_encoder_->encodeData(body, false); + response_encoder_->encodeTrailers(TestResponseTrailerMapImpl{{"trailing", "header"}}); + + // Invoke a stream flush timeout. Make sure we don't get a reset locally for higher layers but + // we do get a reset on the client. + EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); + EXPECT_CALL(client_stream_callbacks, onResetStream(StreamResetReason::RemoteReset, _)); + flush_timer->invokeCallback(); + EXPECT_EQ(1, server_stats_store_.counter("http2.tx_flush_timeout").value()); +} + +// Verify that we create and handle the stream flush timeout when there is a large body that +// does not have enough window. +TEST_P(Http2CodecImplFlowControlTest, LargeServerBodyFlushTimeout) { + initialize(); + + InSequence s; + MockStreamCallbacks client_stream_callbacks; + request_encoder_->getStream().addCallbacks(client_stream_callbacks); + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + ON_CALL(client_connection_, write(_, _)) + .WillByDefault( + Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); })); + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + response_encoder_->encodeHeaders(response_headers, false); + EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); + auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_); + EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _)); + Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); + response_encoder_->encodeData(body, true); + + // Invoke a stream flush timeout. Make sure we don't get a reset locally for higher layers but + // we do get a reset on the client. + EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); + EXPECT_CALL(client_stream_callbacks, onResetStream(StreamResetReason::RemoteReset, _)); + flush_timer->invokeCallback(); + EXPECT_EQ(1, server_stats_store_.counter("http2.tx_flush_timeout").value()); +} + +// Verify that when an incoming protocol error races with a stream flush timeout we correctly +// disable the flush timeout and do not attempt to reset the stream. +TEST_P(Http2CodecImplFlowControlTest, LargeServerBodyFlushTimeoutAfterGoaway) { + initialize(); + + InSequence s; + MockStreamCallbacks client_stream_callbacks; + request_encoder_->getStream().addCallbacks(client_stream_callbacks); + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + ON_CALL(client_connection_, write(_, _)) + .WillByDefault( + Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); })); + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + response_encoder_->encodeHeaders(response_headers, false); + EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); + auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_); + EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _)); + Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); + response_encoder_->encodeData(body, true); + + // Force a protocol error. + Buffer::OwnedImpl garbage_data("this should cause a protocol error"); + EXPECT_CALL(client_callbacks_, onGoAway(_)); + EXPECT_CALL(*flush_timer, disableTimer()); + EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); + auto status = server_wrapper_.dispatch(garbage_data, *server_); + EXPECT_FALSE(status.ok()); + EXPECT_EQ(0, server_stats_store_.counter("http2.tx_flush_timeout").value()); +} + TEST_P(Http2CodecImplTest, WatermarkUnderEndStream) { initialize(); MockStreamCallbacks callbacks; @@ -996,13 +1337,23 @@ class Http2CodecImplStreamLimitTest : public Http2CodecImplTest {}; TEST_P(Http2CodecImplStreamLimitTest, MaxClientStreams) { http2OptionsFromTuple(client_http2_options_, ::testing::get<0>(GetParam())); http2OptionsFromTuple(server_http2_options_, ::testing::get<1>(GetParam())); - client_ = std::make_unique( - client_connection_, client_callbacks_, stats_store_, client_http2_options_, - max_request_headers_kb_, max_response_headers_count_, ProdNghttp2SessionFactory::get()); - server_ = std::make_unique( - server_connection_, server_callbacks_, stats_store_, server_http2_options_, - max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { + client_ = std::make_unique( + client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, + max_request_headers_kb_, max_response_headers_count_, ProdNghttp2SessionFactoryNew::get()); + server_ = std::make_unique( + server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, + max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); + } else { + client_ = std::make_unique( + client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, + max_request_headers_kb_, max_response_headers_count_, + ProdNghttp2SessionFactoryLegacy::get()); + server_ = std::make_unique( + server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, + max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); + } for (int i = 0; i < 101; ++i) { request_encoder_ = &client_->newStream(response_decoder_); setupDefaultConnectionMocks(); @@ -1263,7 +1614,7 @@ TEST_P(Http2CodecImplTest, HeaderNameWithUnderscoreAreDropped) { request_headers.addCopy("bad_header", "something"); EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), _)); request_encoder_->encodeHeaders(request_headers, false); - EXPECT_EQ(1, stats_store_.counter("http2.dropped_headers_with_underscores").value()); + EXPECT_EQ(1, server_stats_store_.counter("http2.dropped_headers_with_underscores").value()); } // Tests that request with header names containing underscore are rejected when the option is set to @@ -1277,7 +1628,9 @@ TEST_P(Http2CodecImplTest, HeaderNameWithUnderscoreAreRejectedByDefault) { request_headers.addCopy("bad_header", "something"); EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(1); request_encoder_->encodeHeaders(request_headers, false); - EXPECT_EQ(1, stats_store_.counter("http2.requests_rejected_with_underscores_in_headers").value()); + EXPECT_EQ( + 1, + server_stats_store_.counter("http2.requests_rejected_with_underscores_in_headers").value()); } // Tests request headers with name containing underscore are allowed when the option is set to @@ -1293,7 +1646,7 @@ TEST_P(Http2CodecImplTest, HeaderNameWithUnderscoreAllowed) { EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), _)); EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); request_encoder_->encodeHeaders(request_headers, false); - EXPECT_EQ(0, stats_store_.counter("http2.dropped_headers_with_underscores").value()); + EXPECT_EQ(0, server_stats_store_.counter("http2.dropped_headers_with_underscores").value()); } // This is the HTTP/2 variant of the HTTP/1 regression test for CVE-2019-18801. @@ -1412,7 +1765,7 @@ TEST_P(Http2CodecImplTest, LargeRequestHeadersExceedPerHeaderLimit) { request_headers.addCopy("big", long_string); EXPECT_CALL(request_decoder_, decodeHeaders_(_, _)).Times(0); - EXPECT_CALL(client_callbacks_, onGoAway()); + EXPECT_CALL(client_callbacks_, onGoAway(_)); server_->shutdownNotice(); server_->goAway(); request_encoder_->encodeHeaders(request_headers, true); @@ -1507,9 +1860,14 @@ TEST_P(Http2CodecImplTest, PingFlood) { buffer.move(frame); })); - EXPECT_THROW(client_->sendPendingFrames(), FrameFloodException); + // Legacy codec does not propagate error details and uses generic error message + EXPECT_THROW_WITH_MESSAGE( + client_->sendPendingFrames().IgnoreError(), ServerCodecError, + Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior") + ? "Too many control frames in the outbound queue." + : "Too many frames in the outbound queue."); EXPECT_EQ(ack_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_CONTROL_FRAMES); - EXPECT_EQ(1, stats_store_.counter("http2.outbound_control_flood").value()); + EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_control_flood").value()); } // Verify that codec allows PING flood when mitigation is disabled @@ -1530,12 +1888,15 @@ TEST_P(Http2CodecImplTest, PingFloodMitigationDisabled) { EXPECT_CALL(server_connection_, write(_, _)) .Times(CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_CONTROL_FRAMES + 1); - EXPECT_NO_THROW(client_->sendPendingFrames()); + EXPECT_NO_THROW(client_->sendPendingFrames().IgnoreError()); } // Verify that outbound control frame counter decreases when send buffer is drained TEST_P(Http2CodecImplTest, PingFloodCounterReset) { - static const int kMaxOutboundControlFrames = 100; + // Ping frames are 17 bytes each so 237 full frames and a partial frame fit in the current min + // size for buffer slices. Setting the limit to 2x+1 the number that fits in a single slice allows + // the logic below that verifies drain and overflow thresholds. + static const int kMaxOutboundControlFrames = 475; max_outbound_control_frames_ = kMaxOutboundControlFrames; initialize(); @@ -1557,23 +1918,29 @@ TEST_P(Http2CodecImplTest, PingFloodCounterReset) { })); // We should be 1 frame under the control frame flood mitigation threshold. - EXPECT_NO_THROW(client_->sendPendingFrames()); + EXPECT_NO_THROW(client_->sendPendingFrames().IgnoreError()); EXPECT_EQ(ack_count, kMaxOutboundControlFrames); - // Drain kMaxOutboundFrames / 2 slices from the send buffer + // Drain floor(kMaxOutboundFrames / 2) slices from the send buffer buffer.drain(buffer.length() / 2); - // Send kMaxOutboundFrames / 2 more pings. + // Send floor(kMaxOutboundFrames / 2) more pings. for (int i = 0; i < kMaxOutboundControlFrames / 2; ++i) { EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); } // The number of outbound frames should be half of max so the connection should not be // terminated. - EXPECT_NO_THROW(client_->sendPendingFrames()); + EXPECT_NO_THROW(client_->sendPendingFrames().IgnoreError()); + EXPECT_EQ(ack_count, kMaxOutboundControlFrames + kMaxOutboundControlFrames / 2); // 1 more ping frame should overflow the outbound frame limit. EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_THROW(client_->sendPendingFrames(), FrameFloodException); + // Legacy codec does not propagate error details and uses generic error message + EXPECT_THROW_WITH_MESSAGE( + client_->sendPendingFrames().IgnoreError(), ServerCodecError, + Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior") + ? "Too many control frames in the outbound queue." + : "Too many frames in the outbound queue."); } // Verify that codec detects flood of outbound HEADER frames @@ -1600,10 +1967,11 @@ TEST_P(Http2CodecImplTest, ResponseHeadersFlood) { // Presently flood mitigation is done only when processing downstream data // So we need to send stream from downstream client to trigger mitigation EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_THROW(client_->sendPendingFrames(), FrameFloodException); + EXPECT_THROW_WITH_MESSAGE(client_->sendPendingFrames().IgnoreError(), ServerCodecError, + "Too many frames in the outbound queue."); EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 1); - EXPECT_EQ(1, stats_store_.counter("http2.outbound_flood").value()); + EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); } // Verify that codec detects flood of outbound DATA frames @@ -1633,10 +2001,11 @@ TEST_P(Http2CodecImplTest, ResponseDataFlood) { // Presently flood mitigation is done only when processing downstream data // So we need to send stream from downstream client to trigger mitigation EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_THROW(client_->sendPendingFrames(), FrameFloodException); + EXPECT_THROW_WITH_MESSAGE(client_->sendPendingFrames().IgnoreError(), ServerCodecError, + "Too many frames in the outbound queue."); EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 1); - EXPECT_EQ(1, stats_store_.counter("http2.outbound_flood").value()); + EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); } // Verify that codec allows outbound DATA flood when mitigation is disabled @@ -1665,7 +2034,7 @@ TEST_P(Http2CodecImplTest, ResponseDataFloodMitigationDisabled) { // Presently flood mitigation is done only when processing downstream data // So we need to send stream from downstream client to trigger mitigation EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_NO_THROW(client_->sendPendingFrames()); + EXPECT_NO_THROW(client_->sendPendingFrames().IgnoreError()); } // Verify that outbound frame counter decreases when send buffer is drained @@ -1707,7 +2076,8 @@ TEST_P(Http2CodecImplTest, ResponseDataFloodCounterReset) { // Presently flood mitigation is done only when processing downstream data // So we need to send a frame from downstream client to trigger mitigation EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_THROW(client_->sendPendingFrames(), FrameFloodException); + EXPECT_THROW_WITH_MESSAGE(client_->sendPendingFrames().IgnoreError(), ServerCodecError, + "Too many frames in the outbound queue."); } // Verify that control frames are added to the counter of outbound frames of all types. @@ -1736,40 +2106,58 @@ TEST_P(Http2CodecImplTest, PingStacksWithDataFlood) { } // Send one PING frame above the outbound queue size limit EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_THROW(client_->sendPendingFrames(), FrameFloodException); + EXPECT_THROW_WITH_MESSAGE(client_->sendPendingFrames().IgnoreError(), ServerCodecError, + "Too many frames in the outbound queue."); EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES); - EXPECT_EQ(1, stats_store_.counter("http2.outbound_flood").value()); + EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); } TEST_P(Http2CodecImplTest, PriorityFlood) { priorityFlood(); - EXPECT_THROW(client_->sendPendingFrames(), FrameFloodException); + // Legacy codec does not propagate error details and uses generic error message + EXPECT_THROW_WITH_MESSAGE( + client_->sendPendingFrames().IgnoreError(), ServerCodecError, + Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior") + ? "Too many PRIORITY frames" + : "Flooding was detected in this HTTP/2 session, and it must be closed"); } TEST_P(Http2CodecImplTest, PriorityFloodOverride) { max_inbound_priority_frames_per_stream_ = 2147483647; priorityFlood(); - EXPECT_NO_THROW(client_->sendPendingFrames()); + EXPECT_NO_THROW(client_->sendPendingFrames().IgnoreError()); } TEST_P(Http2CodecImplTest, WindowUpdateFlood) { windowUpdateFlood(); - EXPECT_THROW(client_->sendPendingFrames(), FrameFloodException); + // Legacy codec does not propagate error details and uses generic error message + EXPECT_THROW_WITH_MESSAGE( + client_->sendPendingFrames().IgnoreError(), ServerCodecError, + Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior") + ? "Too many WINDOW_UPDATE frames" + : "Flooding was detected in this HTTP/2 session, and it must be closed"); } TEST_P(Http2CodecImplTest, WindowUpdateFloodOverride) { max_inbound_window_update_frames_per_data_frame_sent_ = 2147483647; windowUpdateFlood(); - EXPECT_NO_THROW(client_->sendPendingFrames()); + EXPECT_NO_THROW(client_->sendPendingFrames().IgnoreError()); } TEST_P(Http2CodecImplTest, EmptyDataFlood) { Buffer::OwnedImpl data; emptyDataFlood(data); EXPECT_CALL(request_decoder_, decodeData(_, false)); - EXPECT_THROW(server_wrapper_.dispatch(data, *server_), FrameFloodException); + auto status = server_wrapper_.dispatch(data, *server_); + EXPECT_FALSE(status.ok()); + EXPECT_TRUE(isBufferFloodError(status)); + // Legacy codec does not propagate error details and uses generic error message + EXPECT_EQ(Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior") + ? "Too many consecutive frames with an empty payload" + : "Flooding was detected in this HTTP/2 session, and it must be closed", + status.message()); } TEST_P(Http2CodecImplTest, EmptyDataFloodOverride) { @@ -1780,7 +2168,8 @@ TEST_P(Http2CodecImplTest, EmptyDataFloodOverride) { .Times( CommonUtility::OptionsLimits::DEFAULT_MAX_CONSECUTIVE_INBOUND_FRAMES_WITH_EMPTY_PAYLOAD + 1); - EXPECT_NO_THROW(server_wrapper_.dispatch(data, *server_)); + auto status = server_wrapper_.dispatch(data, *server_); + EXPECT_TRUE(status.ok()); } // CONNECT without upgrade type gets tagged with "bytestream" @@ -1803,78 +2192,67 @@ TEST_P(Http2CodecImplTest, ConnectTest) { request_encoder_->encodeHeaders(request_headers, false); } -class TestNghttp2SessionFactory; +template class TestNghttp2SessionFactory; // Test client for H/2 METADATA frame edge cases. -class MetadataTestClientConnectionImpl : public TestClientConnectionImpl { +template +class MetadataTestClientConnectionImpl : public TestClientConnectionImplType { public: MetadataTestClientConnectionImpl( Network::Connection& connection, Http::ConnectionCallbacks& callbacks, Stats::Scope& scope, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, uint32_t max_request_headers_kb, uint32_t max_request_headers_count, - Nghttp2SessionFactory& http2_session_factory) - : TestClientConnectionImpl(connection, callbacks, scope, http2_options, - max_request_headers_kb, max_request_headers_count, - http2_session_factory) {} + typename TestClientConnectionImplType::SessionFactory& http2_session_factory) + : TestClientConnectionImplType(connection, callbacks, scope, http2_options, + max_request_headers_kb, max_request_headers_count, + http2_session_factory) {} // Overrides TestClientConnectionImpl::submitMetadata(). bool submitMetadata(const MetadataMapVector& metadata_map_vector, int32_t stream_id) override { // Creates metadata payload. encoder_.createPayload(metadata_map_vector); for (uint8_t flags : encoder_.payloadFrameFlagBytes()) { - int result = nghttp2_submit_extension(session(), ::Envoy::Http::METADATA_FRAME_TYPE, flags, - stream_id, nullptr); + int result = + nghttp2_submit_extension(TestClientConnectionImplType::session(), + ::Envoy::Http::METADATA_FRAME_TYPE, flags, stream_id, nullptr); if (result != 0) { return false; } } // Triggers nghttp2 to populate the payloads of the METADATA frames. - int result = nghttp2_session_send(session()); + int result = nghttp2_session_send(TestClientConnectionImplType::session()); return result == 0; } protected: - friend class TestNghttp2SessionFactory; + template friend class TestNghttp2SessionFactory; MetadataEncoder encoder_; }; -class TestNghttp2SessionFactory : public Nghttp2SessionFactory { +using MetadataTestClientConnectionImplNew = + MetadataTestClientConnectionImpl; +using MetadataTestClientConnectionImplLegacy = + MetadataTestClientConnectionImpl; + +struct Nghttp2SessionFactoryDeleter { + virtual ~Nghttp2SessionFactoryDeleter() = default; +}; + +template +class TestNghttp2SessionFactory : public Nghttp2SessionFactoryType, + public Nghttp2SessionFactoryDeleter { public: ~TestNghttp2SessionFactory() override { nghttp2_session_callbacks_del(callbacks_); nghttp2_option_del(options_); } - nghttp2_session* create(const nghttp2_session_callbacks*, ConnectionImpl* connection, - const nghttp2_option*) override { - // Only need to provide callbacks required to send METADATA frames. - nghttp2_session_callbacks_new(&callbacks_); - nghttp2_session_callbacks_set_pack_extension_callback( - callbacks_, - [](nghttp2_session*, uint8_t* data, size_t length, const nghttp2_frame*, - void* user_data) -> ssize_t { - // Double cast required due to multiple inheritance. - return static_cast( - static_cast(user_data)) - ->encoder_.packNextFramePayload(data, length); - }); - nghttp2_session_callbacks_set_send_callback( - callbacks_, - [](nghttp2_session*, const uint8_t* data, size_t length, int, void* user_data) -> ssize_t { - // Cast down to MetadataTestClientConnectionImpl to leverage friendship. - return static_cast( - static_cast(user_data)) - ->onSend(data, length); - }); - nghttp2_option_new(&options_); - nghttp2_option_set_user_recv_extension_type(options_, METADATA_FRAME_TYPE); - nghttp2_session* session; - nghttp2_session_client_new2(&session, callbacks_, connection, options_); - return session; - } + nghttp2_session* create(const nghttp2_session_callbacks*, + typename Nghttp2SessionFactoryType::ConnectionImplType* connection, + const nghttp2_option*) override; - void init(nghttp2_session*, ConnectionImpl*, + void init(nghttp2_session*, typename Nghttp2SessionFactoryType::ConnectionImplType*, const envoy::config::core::v3::Http2ProtocolOptions&) override {} private: @@ -1882,6 +2260,83 @@ class TestNghttp2SessionFactory : public Nghttp2SessionFactory { nghttp2_option* options_; }; +template +nghttp2_session* +TestNghttp2SessionFactory::create( + const nghttp2_session_callbacks*, + typename Nghttp2SessionFactoryType::ConnectionImplType* connection, const nghttp2_option*) { + // Only need to provide callbacks required to send METADATA frames. + nghttp2_session_callbacks_new(&callbacks_); + nghttp2_session_callbacks_set_pack_extension_callback( + callbacks_, + [](nghttp2_session*, uint8_t* data, size_t length, const nghttp2_frame*, + void* user_data) -> ssize_t { + // Double cast required due to multiple inheritance. + return static_cast*>( + static_cast(user_data)) + ->encoder_.packNextFramePayload(data, length); + }); + nghttp2_session_callbacks_set_send_callback( + callbacks_, + [](nghttp2_session*, const uint8_t* data, size_t length, int, void* user_data) -> ssize_t { + // Cast down to MetadataTestClientConnectionImpl to leverage friendship. + auto status_or_len = + static_cast*>( + static_cast(user_data)) + ->onSend(data, length); + if (status_or_len.ok()) { + return status_or_len.value(); + } + return NGHTTP2_ERR_CALLBACK_FAILURE; + }); + nghttp2_option_new(&options_); + nghttp2_option_set_user_recv_extension_type(options_, METADATA_FRAME_TYPE); + nghttp2_session* session; + nghttp2_session_client_new2(&session, callbacks_, connection, options_); + return session; +} + +template <> +nghttp2_session* TestNghttp2SessionFactory:: + create(const nghttp2_session_callbacks*, + Envoy::Http::Legacy::Http2::ProdNghttp2SessionFactory::ConnectionImplType* connection, + const nghttp2_option*) { + // Only need to provide callbacks required to send METADATA frames. + nghttp2_session_callbacks_new(&callbacks_); + nghttp2_session_callbacks_set_pack_extension_callback( + callbacks_, + [](nghttp2_session*, uint8_t* data, size_t length, const nghttp2_frame*, + void* user_data) -> ssize_t { + // Double cast required due to multiple inheritance. + return static_cast*>( + static_cast< + Envoy::Http::Legacy::Http2::ProdNghttp2SessionFactory::ConnectionImplType*>( + user_data)) + ->encoder_.packNextFramePayload(data, length); + }); + nghttp2_session_callbacks_set_send_callback( + callbacks_, + [](nghttp2_session*, const uint8_t* data, size_t length, int, void* user_data) -> ssize_t { + // Cast down to MetadataTestClientConnectionImpl to leverage friendship. + return static_cast*>( + static_cast(user_data)) + ->onSend(data, length); + }); + nghttp2_option_new(&options_); + nghttp2_option_set_user_recv_extension_type(options_, METADATA_FRAME_TYPE); + nghttp2_session* session; + nghttp2_session_client_new2(&session, callbacks_, connection, options_); + return session; +} + +using TestNghttp2SessionFactoryNew = + TestNghttp2SessionFactory; +using TestNghttp2SessionFactoryLegacy = + TestNghttp2SessionFactory; + class Http2CodecMetadataTest : public Http2CodecImplTestFixture, public ::testing::Test { public: Http2CodecMetadataTest() = default; @@ -1891,24 +2346,39 @@ class Http2CodecMetadataTest : public Http2CodecImplTestFixture, public ::testin allow_metadata_ = true; http2OptionsFromTuple(client_http2_options_, client_settings_); http2OptionsFromTuple(server_http2_options_, server_settings_); - client_ = std::make_unique( - client_connection_, client_callbacks_, stats_store_, client_http2_options_, - max_request_headers_kb_, max_response_headers_count_, http2_session_factory_); - server_ = std::make_unique( - server_connection_, server_callbacks_, stats_store_, server_http2_options_, - max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { + std::unique_ptr session_factory = + std::make_unique(); + client_ = std::make_unique( + client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, + max_request_headers_kb_, max_response_headers_count_, *session_factory); + server_ = std::make_unique( + server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, + max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); + http2_session_factory_ = std::move(session_factory); + } else { + std::unique_ptr session_factory = + std::make_unique(); + client_ = std::make_unique( + client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, + max_request_headers_kb_, max_response_headers_count_, *session_factory); + server_ = std::make_unique( + server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, + max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); + http2_session_factory_ = std::move(session_factory); + } ON_CALL(client_connection_, write(_, _)) .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void { - server_wrapper_.dispatch(data, *server_); + ASSERT_TRUE(server_wrapper_.dispatch(data, *server_).ok()); })); ON_CALL(server_connection_, write(_, _)) .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void { - client_wrapper_.dispatch(data, *client_); + ASSERT_TRUE(client_wrapper_.dispatch(data, *client_).ok()); })); } private: - TestNghttp2SessionFactory http2_session_factory_; + std::unique_ptr http2_session_factory_; }; // Validates noop handling of METADATA frames without a known stream ID. diff --git a/test/common/http/http2/codec_impl_test_util.h b/test/common/http/http2/codec_impl_test_util.h index 4eb42ac1c2826..6049876ef8446 100644 --- a/test/common/http/http2/codec_impl_test_util.h +++ b/test/common/http/http2/codec_impl_test_util.h @@ -3,12 +3,25 @@ #include "envoy/http/codec.h" #include "common/http/http2/codec_impl.h" +#include "common/http/http2/codec_impl_legacy.h" #include "common/http/utility.h" namespace Envoy { namespace Http { namespace Http2 { +class TestCodecStatsProvider { +public: + TestCodecStatsProvider(Stats::Scope& scope) : scope_(scope) {} + + Http::Http2::CodecStats& http2CodecStats() { + return Http::Http2::CodecStats::atomicGet(http2_codec_stats_, scope_); + } + + Stats::Scope& scope_; + Http::Http2::CodecStats::AtomicPtr http2_codec_stats_; +}; + class TestCodecSettingsProvider { public: // Returns the value of the SETTINGS parameter keyed by |identifier| sent by the remote endpoint. @@ -20,7 +33,7 @@ class TestCodecSettingsProvider { return it->second; } -protected: + // protected: // Stores SETTINGS parameters contained in |settings_frame| to make them available via // getRemoteSettingsParameterValue(). void onSettingsFrame(const nghttp2_settings& settings_frame) { @@ -42,10 +55,26 @@ class TestCodecSettingsProvider { } private: - std::unordered_map settings_; + absl::node_hash_map settings_; +}; + +struct ServerCodecFacade : public virtual Connection { + virtual nghttp2_session* session() PURE; + virtual Http::Stream* getStream(int32_t stream_id) PURE; + virtual uint32_t getStreamUnconsumedBytes(int32_t stream_id) PURE; + virtual void setStreamWriteBufferWatermarks(int32_t stream_id, uint32_t low_watermark, + uint32_t high_watermark) PURE; +}; + +class TestServerConnection : public TestCodecStatsProvider, + public TestCodecSettingsProvider, + public ServerCodecFacade { +public: + TestServerConnection(Stats::Scope& scope) : TestCodecStatsProvider(scope) {} }; -class TestServerConnectionImpl : public ServerConnectionImpl, public TestCodecSettingsProvider { +template +class TestServerConnectionImpl : public TestServerConnection, public CodecImplType { public: TestServerConnectionImpl( Network::Connection& connection, ServerConnectionCallbacks& callbacks, Stats::Scope& scope, @@ -53,44 +82,106 @@ class TestServerConnectionImpl : public ServerConnectionImpl, public TestCodecSe uint32_t max_request_headers_kb, uint32_t max_request_headers_count, envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action) - : ServerConnectionImpl(connection, callbacks, scope, http2_options, max_request_headers_kb, - max_request_headers_count, headers_with_underscores_action) {} - nghttp2_session* session() { return session_; } - using ServerConnectionImpl::getStream; + : TestServerConnection(scope), + CodecImplType(connection, callbacks, http2CodecStats(), http2_options, + max_request_headers_kb, max_request_headers_count, + headers_with_underscores_action) {} + + // ServerCodecFacade + nghttp2_session* session() override { return CodecImplType::session_; } + Http::Stream* getStream(int32_t stream_id) override { + return CodecImplType::getStream(stream_id); + } + uint32_t getStreamUnconsumedBytes(int32_t stream_id) override { + return CodecImplType::getStream(stream_id)->unconsumed_bytes_; + } + void setStreamWriteBufferWatermarks(int32_t stream_id, uint32_t low_watermark, + uint32_t high_watermark) override { + CodecImplType::getStream(stream_id)->setWriteBufferWatermarks(low_watermark, high_watermark); + } protected: // Overrides ServerConnectionImpl::onSettingsForTest(). void onSettingsForTest(const nghttp2_settings& settings) override { onSettingsFrame(settings); } }; -class TestClientConnectionImpl : public ClientConnectionImpl, public TestCodecSettingsProvider { +using TestServerConnectionImplLegacy = + TestServerConnectionImpl; +using TestServerConnectionImplNew = + TestServerConnectionImpl; + +struct ClientCodecFacade : public ClientConnection { + virtual nghttp2_session* session() PURE; + virtual Http::Stream* getStream(int32_t stream_id) PURE; + virtual uint64_t getStreamPendingSendDataLength(int32_t stream_id) PURE; + virtual Status sendPendingFrames() PURE; + virtual bool submitMetadata(const MetadataMapVector& mm_vector, int32_t stream_id) PURE; +}; + +class TestClientConnection : public TestCodecStatsProvider, + public TestCodecSettingsProvider, + public ClientCodecFacade { +public: + TestClientConnection(Stats::Scope& scope) : TestCodecStatsProvider(scope) {} +}; + +template +class TestClientConnectionImpl : public TestClientConnection, public CodecImplType { public: TestClientConnectionImpl(Network::Connection& connection, Http::ConnectionCallbacks& callbacks, Stats::Scope& scope, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, uint32_t max_request_headers_kb, uint32_t max_request_headers_count, - Nghttp2SessionFactory& http2_session_factory) - : ClientConnectionImpl(connection, callbacks, scope, http2_options, max_request_headers_kb, - max_request_headers_count, http2_session_factory) {} - - nghttp2_session* session() { return session_; } + typename CodecImplType::SessionFactory& http2_session_factory) + : TestClientConnection(scope), + CodecImplType(connection, callbacks, http2CodecStats(), http2_options, + max_request_headers_kb, max_request_headers_count, http2_session_factory) {} + // ClientCodecFacade + RequestEncoder& newStream(ResponseDecoder& response_decoder) override { + return CodecImplType::newStream(response_decoder); + } + nghttp2_session* session() override { return CodecImplType::session_; } + Http::Stream* getStream(int32_t stream_id) override { + return CodecImplType::getStream(stream_id); + } + uint64_t getStreamPendingSendDataLength(int32_t stream_id) override { + return CodecImplType::getStream(stream_id)->pending_send_data_.length(); + } + Status sendPendingFrames() override; // Submits an H/2 METADATA frame to the peer. // Returns true on success, false otherwise. - virtual bool submitMetadata(const MetadataMapVector& mm_vector, int32_t stream_id) { + bool submitMetadata(const MetadataMapVector& mm_vector, int32_t stream_id) override { UNREFERENCED_PARAMETER(mm_vector); UNREFERENCED_PARAMETER(stream_id); return false; } - using ClientConnectionImpl::getStream; - using ConnectionImpl::sendPendingFrames; - protected: // Overrides ClientConnectionImpl::onSettingsForTest(). void onSettingsForTest(const nghttp2_settings& settings) override { onSettingsFrame(settings); } }; +template +Status TestClientConnectionImpl::sendPendingFrames() { + return CodecImplType::sendPendingFrames(); +} + +template <> +Status +TestClientConnectionImpl::sendPendingFrames() { + Envoy::Http::Legacy::Http2::ClientConnectionImpl::sendPendingFrames(); + return okStatus(); +} + +using TestClientConnectionImplLegacy = + TestClientConnectionImpl; +using TestClientConnectionImplNew = + TestClientConnectionImpl; + +using ProdNghttp2SessionFactoryLegacy = Envoy::Http::Legacy::Http2::ProdNghttp2SessionFactory; +using ProdNghttp2SessionFactoryNew = Envoy::Http::Http2::ProdNghttp2SessionFactory; + } // namespace Http2 } // namespace Http } // namespace Envoy diff --git a/test/common/http/http2/conn_pool_legacy_test.cc b/test/common/http/http2/conn_pool_legacy_test.cc deleted file mode 100644 index ece922fd09ba6..0000000000000 --- a/test/common/http/http2/conn_pool_legacy_test.cc +++ /dev/null @@ -1,810 +0,0 @@ -#include -#include -#include - -#include "common/event/dispatcher_impl.h" -#include "common/http/http2/conn_pool_legacy.h" -#include "common/network/utility.h" -#include "common/upstream/upstream_impl.h" - -#include "test/common/http/common.h" -#include "test/common/upstream/utility.h" -#include "test/mocks/event/mocks.h" -#include "test/mocks/http/mocks.h" -#include "test/mocks/network/mocks.h" -#include "test/mocks/runtime/mocks.h" -#include "test/mocks/upstream/mocks.h" -#include "test/test_common/printers.h" - -#include "gmock/gmock.h" -#include "gtest/gtest.h" - -using testing::_; -using testing::DoAll; -using testing::InSequence; -using testing::Invoke; -using testing::NiceMock; -using testing::Property; -using testing::Return; -using testing::ReturnRef; - -namespace Envoy { -namespace Http { -namespace Legacy { -namespace Http2 { - -class TestConnPoolImpl : public ConnPoolImpl { -public: - using ConnPoolImpl::ConnPoolImpl; - - CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) override { - // We expect to own the connection, but already have it, so just release it to prevent it from - // getting deleted. - data.connection_.release(); - return CodecClientPtr{createCodecClient_(data)}; - } - - MOCK_METHOD1(createCodecClient_, CodecClient*(Upstream::Host::CreateConnectionData& data)); - - uint32_t maxTotalStreams() override { return max_streams_; } - - uint32_t max_streams_{std::numeric_limits::max()}; -}; - -class ActiveTestRequest; - -class Http2ConnPoolImplLegacyTest : public testing::Test { -public: - struct TestCodecClient { - Http::MockClientConnection* codec_; - Network::MockClientConnection* connection_; - CodecClientForTest* codec_client_; - Event::MockTimer* connect_timer_; - Event::DispatcherPtr client_dispatcher_; - }; - - Http2ConnPoolImplLegacyTest() - : api_(Api::createApiForTest(stats_store_)), - pool_(dispatcher_, host_, Upstream::ResourcePriority::Default, nullptr, nullptr) {} - - ~Http2ConnPoolImplLegacyTest() override { - EXPECT_TRUE(TestUtility::gaugesZeroed(cluster_->stats_store_.gauges())); - } - - // Creates a new test client, expecting a new connection to be created and associated - // with the new client. - void expectClientCreate(absl::optional buffer_limits = {}) { - test_clients_.emplace_back(); - TestCodecClient& test_client = test_clients_.back(); - test_client.connection_ = new NiceMock(); - test_client.codec_ = new NiceMock(); - test_client.connect_timer_ = new NiceMock(&dispatcher_); - test_client.client_dispatcher_ = api_->allocateDispatcher("test_thread"); - EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _)); - EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)) - .WillOnce(Return(test_client.connection_)); - auto cluster = std::make_shared>(); - Network::ClientConnectionPtr connection{test_client.connection_}; - test_client.codec_client_ = new CodecClientForTest( - CodecClient::Type::HTTP1, std::move(connection), test_client.codec_, - [this](CodecClient*) -> void { onClientDestroy(); }, - Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), *test_client.client_dispatcher_); - if (buffer_limits) { - EXPECT_CALL(*cluster_, perConnectionBufferLimitBytes()).WillOnce(Return(*buffer_limits)); - EXPECT_CALL(*test_clients_.back().connection_, setBufferLimits(*buffer_limits)); - } - EXPECT_CALL(pool_, createCodecClient_(_)) - .WillOnce(Invoke([this](Upstream::Host::CreateConnectionData&) -> CodecClient* { - return test_clients_.back().codec_client_; - })); - } - - // Connects a pending connection for client with the given index, asserting - // that the provided request receives onPoolReady. - void expectClientConnect(size_t index, ActiveTestRequest& r); - // Asserts that onPoolReady is called on the request. - void expectStreamConnect(size_t index, ActiveTestRequest& r); - - // Resets the connection belonging to the provided index, asserting that the - // provided request receives onPoolFailure. - void expectClientReset(size_t index, ActiveTestRequest& r); - // Asserts that the provided requests receives onPoolFailure. - void expectStreamReset(ActiveTestRequest& r); - - /** - * Closes a test client. - */ - void closeClient(size_t index); - - /** - * Completes an active request. Useful when this flow is not part of the main test assertions. - */ - void completeRequest(ActiveTestRequest& r); - - /** - * Completes an active request and closes the upstream connection. Useful when this flow is - * not part of the main test assertions. - */ - void completeRequestCloseUpstream(size_t index, ActiveTestRequest& r); - - MOCK_METHOD0(onClientDestroy, void()); - - Stats::IsolatedStoreImpl stats_store_; - Api::ApiPtr api_; - NiceMock dispatcher_; - std::shared_ptr cluster_{new NiceMock()}; - Upstream::HostSharedPtr host_{Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:80")}; - TestConnPoolImpl pool_; - std::vector test_clients_; - NiceMock runtime_; -}; - -class ActiveTestRequest { -public: - ActiveTestRequest(Http2ConnPoolImplLegacyTest& test, size_t client_index, bool expect_connected) { - if (expect_connected) { - EXPECT_CALL(*test.test_clients_[client_index].codec_, newStream(_)) - .WillOnce(DoAll(SaveArgAddress(&inner_decoder_), ReturnRef(inner_encoder_))); - EXPECT_CALL(callbacks_.pool_ready_, ready()); - EXPECT_EQ(nullptr, test.pool_.newStream(decoder_, callbacks_)); - } else { - EXPECT_NE(nullptr, test.pool_.newStream(decoder_, callbacks_)); - } - } - - MockResponseDecoder decoder_; - ConnPoolCallbacks callbacks_; - ResponseDecoder* inner_decoder_{}; - NiceMock inner_encoder_; -}; - -void Http2ConnPoolImplLegacyTest::expectClientConnect(size_t index, ActiveTestRequest& r) { - expectStreamConnect(index, r); - EXPECT_CALL(*test_clients_[index].connect_timer_, disableTimer()); - test_clients_[index].connection_->raiseEvent(Network::ConnectionEvent::Connected); -} - -void Http2ConnPoolImplLegacyTest::expectStreamConnect(size_t index, ActiveTestRequest& r) { - EXPECT_CALL(*test_clients_[index].codec_, newStream(_)) - .WillOnce(DoAll(SaveArgAddress(&r.inner_decoder_), ReturnRef(r.inner_encoder_))); - EXPECT_CALL(r.callbacks_.pool_ready_, ready()); -} - -void Http2ConnPoolImplLegacyTest::expectClientReset(size_t index, ActiveTestRequest& r) { - expectStreamReset(r); - EXPECT_CALL(*test_clients_[0].connect_timer_, disableTimer()); - test_clients_[index].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); -} - -void Http2ConnPoolImplLegacyTest::expectStreamReset(ActiveTestRequest& r) { - EXPECT_CALL(r.callbacks_.pool_failure_, ready()); -} - -void Http2ConnPoolImplLegacyTest::closeClient(size_t index) { - test_clients_[index].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); -} - -void Http2ConnPoolImplLegacyTest::completeRequest(ActiveTestRequest& r) { - EXPECT_CALL(r.inner_encoder_, encodeHeaders(_, true)); - r.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - EXPECT_CALL(r.decoder_, decodeHeaders_(_, true)); - r.inner_decoder_->decodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); -} - -void Http2ConnPoolImplLegacyTest::completeRequestCloseUpstream(size_t index, ActiveTestRequest& r) { - completeRequest(r); - closeClient(index); -} - -/** - * Verify that the pool retains and returns the host it was constructed with. - */ -TEST_F(Http2ConnPoolImplLegacyTest, Host) { EXPECT_EQ(host_, pool_.host()); } - -/** - * Verify that connections are drained when requested. - */ -TEST_F(Http2ConnPoolImplLegacyTest, DrainConnections) { - InSequence s; - pool_.max_streams_ = 1; - - // Test drain connections call prior to any connections being created. - pool_.drainConnections(); - - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - expectClientConnect(0, r1); - EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); - r1.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - expectClientCreate(); - ActiveTestRequest r2(*this, 1, false); - expectClientConnect(1, r2); - EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); - r2.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - // This will move primary to draining and destroy draining. - pool_.drainConnections(); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - // This will destroy draining. - test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -// Verifies that requests are queued up in the conn pool until the connection becomes ready. -TEST_F(Http2ConnPoolImplLegacyTest, PendingRequests) { - InSequence s; - - // Create three requests. These should be queued up. - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - ActiveTestRequest r2(*this, 0, false); - ActiveTestRequest r3(*this, 0, false); - - // The connection now becomes ready. This should cause all the queued requests to be sent. - expectStreamConnect(0, r1); - expectStreamConnect(0, r2); - expectClientConnect(0, r3); - - // Send a request through each stream. - EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); - r1.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); - r2.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - EXPECT_CALL(r3.inner_encoder_, encodeHeaders(_, true)); - r3.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - // Since we now have an active connection, subsequent requests should connect immediately. - ActiveTestRequest r4(*this, 0, true); - - // Clean up everything. - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -// Verifies that requests are queued up in the conn pool and fail when the connection -// fails to be established. -TEST_F(Http2ConnPoolImplLegacyTest, PendingRequestsFailure) { - InSequence s; - pool_.max_streams_ = 10; - - // Create three requests. These should be queued up. - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - ActiveTestRequest r2(*this, 0, false); - ActiveTestRequest r3(*this, 0, false); - - // The connection now becomes ready. This should cause all the queued requests to be sent. - // Note that these occur in reverse order due to the order we purge pending requests in. - expectStreamReset(r3); - expectStreamReset(r2); - expectClientReset(0, r1); - - expectClientCreate(); - // Since we have no active connection, subsequence requests will queue until - // the new connection is established. - ActiveTestRequest r4(*this, 1, false); - expectClientConnect(1, r4); - - // Clean up everything. - test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()).Times(2); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -// Verifies that requests are queued up in the conn pool and respect max request circuit breaking -// when the connection is established. -TEST_F(Http2ConnPoolImplLegacyTest, PendingRequestsRequestOverflow) { - InSequence s; - - // Inflate the resource count to just under the limit. - auto& requests = host_->cluster().resourceManager(Upstream::ResourcePriority::Default).requests(); - for (uint64_t i = 0; i < requests.max() - 1; ++i) { - requests.inc(); - } - - // Create three requests. These should be queued up. - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - ActiveTestRequest r2(*this, 0, false); - ActiveTestRequest r3(*this, 0, false); - - // We queued up three requests, but we can only afford one before hitting the circuit - // breaker. Thus, we expect to see 2 resets and one successful connect. - expectStreamConnect(0, r1); - expectStreamReset(r2); - expectStreamReset(r3); - EXPECT_CALL(*test_clients_[0].connect_timer_, disableTimer()); - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - - // Clean up everything. - for (uint64_t i = 0; i < requests.max() - 1; ++i) { - requests.dec(); - } - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -// Verifies that we honor the max pending requests circuit breaker. -TEST_F(Http2ConnPoolImplLegacyTest, PendingRequestsMaxPendingCircuitBreaker) { - InSequence s; - - // Inflate the resource count to just under the limit. - auto& pending_reqs = - host_->cluster().resourceManager(Upstream::ResourcePriority::Default).pendingRequests(); - for (uint64_t i = 0; i < pending_reqs.max() - 1; ++i) { - pending_reqs.inc(); - } - - // Create two requests. The first one should be enqueued, while the second one - // should fail fast due to us being above the max pending requests limit. - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - - MockResponseDecoder decoder; - ConnPoolCallbacks callbacks; - EXPECT_CALL(callbacks.pool_failure_, ready()); - EXPECT_EQ(nullptr, pool_.newStream(decoder, callbacks)); - - expectStreamConnect(0, r1); - EXPECT_CALL(*test_clients_[0].connect_timer_, disableTimer()); - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - - // Clean up everything. - for (uint64_t i = 0; i < pending_reqs.max() - 1; ++i) { - pending_reqs.dec(); - } - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -TEST_F(Http2ConnPoolImplLegacyTest, VerifyConnectionTimingStats) { - InSequence s; - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - EXPECT_CALL(cluster_->stats_store_, - deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _)); - expectClientConnect(0, r1); - EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); - r1.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); - r1.inner_decoder_->decodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); - - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); - EXPECT_CALL(cluster_->stats_store_, - deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_length_ms"), _)); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -/** - * Test that buffer limits are set. - */ -TEST_F(Http2ConnPoolImplLegacyTest, VerifyBufferLimits) { - InSequence s; - expectClientCreate(8192); - ActiveTestRequest r1(*this, 0, false); - - expectClientConnect(0, r1); - EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); - r1.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); - r1.inner_decoder_->decodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); - - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -TEST_F(Http2ConnPoolImplLegacyTest, RequestAndResponse) { - InSequence s; - - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - expectClientConnect(0, r1); - EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); - r1.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); - r1.inner_decoder_->decodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); - - ActiveTestRequest r2(*this, 0, true); - EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); - r2.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); - r2.inner_decoder_->decodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); - - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -TEST_F(Http2ConnPoolImplLegacyTest, LocalReset) { - InSequence s; - - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - expectClientConnect(0, r1); - EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, false)); - r1.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, false); - r1.callbacks_.outer_encoder_->getStream().resetStream(Http::StreamResetReason::LocalReset); - - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_rq_tx_reset_.value()); - EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value()); -} - -TEST_F(Http2ConnPoolImplLegacyTest, RemoteReset) { - InSequence s; - - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - expectClientConnect(0, r1); - EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, false)); - r1.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, false); - r1.inner_encoder_.stream_.resetStream(Http::StreamResetReason::RemoteReset); - - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_rq_rx_reset_.value()); - EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value()); -} - -TEST_F(Http2ConnPoolImplLegacyTest, DrainDisconnectWithActiveRequest) { - InSequence s; - pool_.max_streams_ = 1; - - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - expectClientConnect(0, r1); - EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); - r1.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - ReadyWatcher drained; - pool_.addDrainedCallback([&]() -> void { drained.ready(); }); - - EXPECT_CALL(dispatcher_, deferredDelete_(_)); - EXPECT_CALL(drained, ready()); - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -TEST_F(Http2ConnPoolImplLegacyTest, DrainDisconnectDrainingWithActiveRequest) { - InSequence s; - pool_.max_streams_ = 1; - - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - expectClientConnect(0, r1); - EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); - r1.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - expectClientCreate(); - ActiveTestRequest r2(*this, 1, false); - expectClientConnect(1, r2); - EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); - r2.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - ReadyWatcher drained; - pool_.addDrainedCallback([&]() -> void { drained.ready(); }); - - EXPECT_CALL(dispatcher_, deferredDelete_(_)); - EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); - r2.inner_decoder_->decodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_CALL(dispatcher_, deferredDelete_(_)); - EXPECT_CALL(drained, ready()); - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -TEST_F(Http2ConnPoolImplLegacyTest, DrainPrimary) { - InSequence s; - pool_.max_streams_ = 1; - - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - expectClientConnect(0, r1); - EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); - r1.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - expectClientCreate(); - ActiveTestRequest r2(*this, 1, false); - expectClientConnect(1, r2); - EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); - r2.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - ReadyWatcher drained; - pool_.addDrainedCallback([&]() -> void { drained.ready(); }); - - EXPECT_CALL(dispatcher_, deferredDelete_(_)); - EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); - r2.inner_decoder_->decodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_CALL(dispatcher_, deferredDelete_(_)); - EXPECT_CALL(drained, ready()); - EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); - r1.inner_decoder_->decodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); - - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); -} - -TEST_F(Http2ConnPoolImplLegacyTest, DrainPrimaryNoActiveRequest) { - InSequence s; - pool_.max_streams_ = 1; - - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - expectClientConnect(0, r1); - EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); - r1.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); - r1.inner_decoder_->decodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); - - EXPECT_CALL(dispatcher_, deferredDelete_(_)); - expectClientCreate(); - ActiveTestRequest r2(*this, 1, false); - expectClientConnect(1, r2); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); - r2.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); - r2.inner_decoder_->decodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); - - ReadyWatcher drained; - EXPECT_CALL(dispatcher_, deferredDelete_(_)); - EXPECT_CALL(drained, ready()); - pool_.addDrainedCallback([&]() -> void { drained.ready(); }); - - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); -} - -TEST_F(Http2ConnPoolImplLegacyTest, ConnectTimeout) { - InSequence s; - - EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value()); - - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - EXPECT_CALL(r1.callbacks_.pool_failure_, ready()); - test_clients_[0].connect_timer_->invokeCallback(); - - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value()); - - expectClientCreate(); - ActiveTestRequest r2(*this, 1, false); - expectClientConnect(1, r2); - EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); - r2.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); - r2.inner_decoder_->decodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); - - test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(1U, cluster_->stats_.upstream_rq_total_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_connect_fail_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_connect_timeout_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_rq_pending_failure_eject_.value()); - EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_local_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -TEST_F(Http2ConnPoolImplLegacyTest, MaxGlobalRequests) { - cluster_->resetResourceManager(1024, 1024, 1, 1, 1); - InSequence s; - - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - expectClientConnect(0, r1); - EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); - r1.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - ConnPoolCallbacks callbacks; - MockResponseDecoder decoder; - EXPECT_CALL(callbacks.pool_failure_, ready()); - EXPECT_EQ(nullptr, pool_.newStream(decoder, callbacks)); - - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -TEST_F(Http2ConnPoolImplLegacyTest, GoAway) { - InSequence s; - - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - expectClientConnect(0, r1); - EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); - r1.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); - r1.inner_decoder_->decodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); - - test_clients_[0].codec_client_->raiseGoAway(); - - expectClientCreate(); - ActiveTestRequest r2(*this, 1, false); - expectClientConnect(1, r2); - EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); - r2.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); - r2.inner_decoder_->decodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); - - test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()).Times(2); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_close_notify_.value()); -} - -TEST_F(Http2ConnPoolImplLegacyTest, NoActiveConnectionsByDefault) { - EXPECT_FALSE(pool_.hasActiveConnections()); -} - -// Show that an active request on the primary connection is considered active. -TEST_F(Http2ConnPoolImplLegacyTest, ActiveConnectionsHasActiveRequestsTrue) { - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - expectClientConnect(0, r1); - - EXPECT_TRUE(pool_.hasActiveConnections()); - - completeRequestCloseUpstream(0, r1); -} - -// Show that pending requests are considered active. -TEST_F(Http2ConnPoolImplLegacyTest, PendingRequestsConsideredActive) { - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - - EXPECT_TRUE(pool_.hasActiveConnections()); - - expectClientConnect(0, r1); - completeRequestCloseUpstream(0, r1); -} - -// Show that even if there is a primary client still, if all of its requests have completed, then it -// does not have any active connections. -TEST_F(Http2ConnPoolImplLegacyTest, ResponseCompletedConnectionReadyNoActiveConnections) { - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - expectClientConnect(0, r1); - completeRequest(r1); - - EXPECT_FALSE(pool_.hasActiveConnections()); - - closeClient(0); -} - -// Show that if connections are draining, they're still considered active. -TEST_F(Http2ConnPoolImplLegacyTest, DrainingConnectionsConsideredActive) { - pool_.max_streams_ = 1; - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - expectClientConnect(0, r1); - pool_.drainConnections(); - - EXPECT_TRUE(pool_.hasActiveConnections()); - - completeRequest(r1); - closeClient(0); -} - -// Show that once we've drained all connections, there are no longer any active. -TEST_F(Http2ConnPoolImplLegacyTest, DrainedConnectionsNotActive) { - pool_.max_streams_ = 1; - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - expectClientConnect(0, r1); - pool_.drainConnections(); - completeRequest(r1); - - EXPECT_FALSE(pool_.hasActiveConnections()); - - closeClient(0); -} -} // namespace Http2 -} // namespace Legacy -} // namespace Http -} // namespace Envoy diff --git a/test/common/http/http2/conn_pool_test.cc b/test/common/http/http2/conn_pool_test.cc index c8ced6e33f5e9..d0f0ed1c50617 100644 --- a/test/common/http/http2/conn_pool_test.cc +++ b/test/common/http/http2/conn_pool_test.cc @@ -4,6 +4,7 @@ #include "common/event/dispatcher_impl.h" #include "common/http/http2/conn_pool.h" +#include "common/network/raw_buffer_socket.h" #include "common/network/utility.h" #include "common/upstream/upstream_impl.h" @@ -15,6 +16,7 @@ #include "test/mocks/runtime/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/printers.h" +#include "test/test_common/test_runtime.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -23,6 +25,7 @@ using testing::_; using testing::DoAll; using testing::InSequence; using testing::Invoke; +using testing::InvokeWithoutArgs; using testing::NiceMock; using testing::Property; using testing::Return; @@ -60,7 +63,8 @@ class Http2ConnPoolImplTest : public testing::Test { Http2ConnPoolImplTest() : api_(Api::createApiForTest(stats_store_)), - pool_(dispatcher_, host_, Upstream::ResourcePriority::Default, nullptr, nullptr) { + pool_(std::make_unique( + dispatcher_, host_, Upstream::ResourcePriority::Default, nullptr, nullptr)) { // Default connections to 1024 because the tests shouldn't be relying on the // connection resource limit for most tests. cluster_->resetResourceManager(1024, 1024, 1024, 1, 1); @@ -70,34 +74,82 @@ class Http2ConnPoolImplTest : public testing::Test { EXPECT_EQ("", TestUtility::nonZeroedGauges(cluster_->stats_store_.gauges())); } - // Creates a new test client, expecting a new connection to be created and associated - // with the new client. - void expectClientCreate(absl::optional buffer_limits = {}) { - test_clients_.emplace_back(); - TestCodecClient& test_client = test_clients_.back(); - test_client.connection_ = new NiceMock(); - test_client.codec_ = new NiceMock(); - test_client.connect_timer_ = new NiceMock(&dispatcher_); - test_client.client_dispatcher_ = api_->allocateDispatcher("test_thread"); + void createTestClients(int num_clients) { + // Create N clients. + for (int i = 0; i < num_clients; ++i) { + test_clients_.emplace_back(); + TestCodecClient& test_client = test_clients_.back(); + test_client.connection_ = new NiceMock(); + test_client.codec_ = new NiceMock(); + test_client.connect_timer_ = new NiceMock(); + test_client.client_dispatcher_ = api_->allocateDispatcher("test_thread"); + } + + // Outside the for loop, set the createTimer expectations. + EXPECT_CALL(dispatcher_, createTimer_(_)) + .Times(num_clients) + .WillRepeatedly(Invoke([this](Event::TimerCb cb) { + test_clients_[timer_index_].connect_timer_->callback_ = cb; + return test_clients_[timer_index_++].connect_timer_; + })); + // Loop again through the last num_clients entries to set enableTimer expectations. + // Ideally this could be done in the loop above but it breaks InSequence + // assertions. + for (size_t i = test_clients_.size() - num_clients; i < test_clients_.size(); ++i) { + TestCodecClient& test_client = test_clients_[i]; + EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _)); + } + } + + void expectConnectionSetupForClient(int num_clients, + absl::optional buffer_limits = {}) { + // Set the createClientConnection mocks. The createCodecClient_ invoke + // below takes care of making sure connection_index_ is updated. EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)) - .WillOnce(Return(test_client.connection_)); - auto cluster = std::make_shared>(); - Network::ClientConnectionPtr connection{test_client.connection_}; - test_client.codec_client_ = new CodecClientForTest( - CodecClient::Type::HTTP1, std::move(connection), test_client.codec_, - [this](CodecClient*) -> void { onClientDestroy(); }, - Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), *test_client.client_dispatcher_); - if (buffer_limits) { - EXPECT_CALL(*cluster_, perConnectionBufferLimitBytes()).WillOnce(Return(*buffer_limits)); - EXPECT_CALL(*test_clients_.back().connection_, setBufferLimits(*buffer_limits)); + .Times(num_clients) + .WillRepeatedly(InvokeWithoutArgs([this]() -> Network::ClientConnection* { + return test_clients_[connection_index_].connection_; + })); + + // Loop through the last num_clients clients, setting up codec clients and + // per-client mocks. + for (size_t i = test_clients_.size() - num_clients; i < test_clients_.size(); ++i) { + TestCodecClient& test_client = test_clients_[i]; + auto cluster = std::make_shared>(); + Network::ClientConnectionPtr connection{test_client.connection_}; + test_client.codec_client_ = new CodecClientForTest( + CodecClient::Type::HTTP1, std::move(connection), test_client.codec_, + [this](CodecClient*) -> void { onClientDestroy(); }, + Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), *test_client.client_dispatcher_); + if (buffer_limits) { + EXPECT_CALL(*cluster_, perConnectionBufferLimitBytes()) + .Times(num_clients) + .WillRepeatedly(Return(*buffer_limits)); + EXPECT_CALL(*test_client.connection_, setBufferLimits(*buffer_limits)).Times(1); + } } - EXPECT_CALL(pool_, createCodecClient_(_)) - .WillOnce(Invoke([this](Upstream::Host::CreateConnectionData&) -> CodecClient* { - return test_clients_.back().codec_client_; + // Finally (for InSequence tests) set up createCodecClient and make sure the + // index is incremented to avoid returning the same client more than once. + EXPECT_CALL(*pool_, createCodecClient_(_)) + .Times(num_clients) + .WillRepeatedly(Invoke([this](Upstream::Host::CreateConnectionData&) -> CodecClient* { + return test_clients_[connection_index_++].codec_client_; })); - EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _)); } + // Creates a new test client, expecting a new connection to be created and associated + // with the new client. + void expectClientCreate(absl::optional buffer_limits = {}) { + createTestClients(1); + expectConnectionSetupForClient(1, buffer_limits); + } + void expectClientsCreate(int num_clients) { + createTestClients(num_clients); + expectConnectionSetupForClient(num_clients, absl::nullopt); + } + + // Connects a pending connection for client with the given index. + void expectClientConnect(size_t index); // Connects a pending connection for client with the given index, asserting // that the provided request receives onPoolReady. void expectClientConnect(size_t index, ActiveTestRequest& r); @@ -115,6 +167,11 @@ class Http2ConnPoolImplTest : public testing::Test { */ void closeClient(size_t index); + /** + * Closes all test clients. + */ + void closeAllClients(); + /** * Completes an active request. Useful when this flow is not part of the main test assertions. */ @@ -128,12 +185,14 @@ class Http2ConnPoolImplTest : public testing::Test { MOCK_METHOD(void, onClientDestroy, ()); + int timer_index_{}; + int connection_index_{}; Stats::IsolatedStoreImpl stats_store_; Api::ApiPtr api_; NiceMock dispatcher_; std::shared_ptr cluster_{new NiceMock()}; Upstream::HostSharedPtr host_{Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:80")}; - TestConnPoolImpl pool_; + std::unique_ptr pool_; std::vector test_clients_; NiceMock runtime_; }; @@ -145,9 +204,9 @@ class ActiveTestRequest { EXPECT_CALL(*test.test_clients_[client_index].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&inner_decoder_), ReturnRef(inner_encoder_))); EXPECT_CALL(callbacks_.pool_ready_, ready()); - EXPECT_EQ(nullptr, test.pool_.newStream(decoder_, callbacks_)); + EXPECT_EQ(nullptr, test.pool_->newStream(decoder_, callbacks_)); } else { - handle_ = test.pool_.newStream(decoder_, callbacks_); + handle_ = test.pool_->newStream(decoder_, callbacks_); EXPECT_NE(nullptr, handle_); } } @@ -159,12 +218,16 @@ class ActiveTestRequest { ConnectionPool::Cancellable* handle_{}; }; -void Http2ConnPoolImplTest::expectClientConnect(size_t index, ActiveTestRequest& r) { - expectStreamConnect(index, r); +void Http2ConnPoolImplTest::expectClientConnect(size_t index) { EXPECT_CALL(*test_clients_[index].connect_timer_, disableTimer()); test_clients_[index].connection_->raiseEvent(Network::ConnectionEvent::Connected); } +void Http2ConnPoolImplTest::expectClientConnect(size_t index, ActiveTestRequest& r) { + expectStreamConnect(index, r); + expectClientConnect(index); +} + void Http2ConnPoolImplTest::expectStreamConnect(size_t index, ActiveTestRequest& r) { EXPECT_CALL(*test_clients_[index].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&r.inner_decoder_), ReturnRef(r.inner_encoder_))); @@ -194,6 +257,14 @@ void Http2ConnPoolImplTest::closeClient(size_t index) { dispatcher_.clearDeferredDeleteList(); } +void Http2ConnPoolImplTest::closeAllClients() { + for (auto& test_client : test_clients_) { + test_client.connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + } + EXPECT_CALL(*this, onClientDestroy()).Times(test_clients_.size()); + dispatcher_.clearDeferredDeleteList(); +} + void Http2ConnPoolImplTest::completeRequest(ActiveTestRequest& r) { EXPECT_CALL(r.inner_encoder_, encodeHeaders(_, true)); r.callbacks_.outer_encoder_->encodeHeaders( @@ -211,7 +282,7 @@ void Http2ConnPoolImplTest::completeRequestCloseUpstream(size_t index, ActiveTes /** * Verify that the pool retains and returns the host it was constructed with. */ -TEST_F(Http2ConnPoolImplTest, Host) { EXPECT_EQ(host_, pool_.host()); } +TEST_F(Http2ConnPoolImplTest, Host) { EXPECT_EQ(host_, pool_->host()); } /** * Verify that idle connections are closed immediately when draining. @@ -225,7 +296,54 @@ TEST_F(Http2ConnPoolImplTest, DrainConnectionIdle) { completeRequest(r); EXPECT_CALL(*this, onClientDestroy()); - pool_.drainConnections(); + pool_->drainConnections(); +} + +/** + * Verify that we set the ALPN fallback. + */ +TEST_F(Http2ConnPoolImplTest, VerifyAlpnFallback) { + InSequence s; + + // Override the TransportSocketFactory with a mock version we can add expectations to. + auto factory = std::make_unique(); + auto factory_ptr = factory.get(); + cluster_->transport_socket_matcher_ = + std::make_unique>(std::move(factory)); + + // Recreate the conn pool so that the host re-evaluates the transport socket match, arriving at + // our test transport socket factory. + host_ = Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:80"); + pool_ = std::make_unique(dispatcher_, host_, + Upstream::ResourcePriority::Default, nullptr, nullptr); + + // This requires some careful set up of expectations ordering: the call to createTransportSocket + // happens before all the connection set up but after the test client is created (due to some) + // of the mocks that are constructed as part of the test client. + createTestClients(1); + EXPECT_CALL(*factory_ptr, createTransportSocket(_)) + .WillOnce(Invoke( + [](Network::TransportSocketOptionsSharedPtr options) -> Network::TransportSocketPtr { + EXPECT_TRUE(options != nullptr); + EXPECT_EQ(options->applicationProtocolFallback(), + Http::Utility::AlpnNames::get().Http2); + return std::make_unique(); + })); + expectConnectionSetupForClient(1); + ActiveTestRequest r(*this, 0, false); + expectClientConnect(0, r); + EXPECT_CALL(r.inner_encoder_, encodeHeaders(_, true)); + r.callbacks_.outer_encoder_->encodeHeaders( + TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); + + EXPECT_CALL(r.decoder_, decodeHeaders_(_, true)); + EXPECT_CALL(*this, onClientDestroy()); + r.inner_decoder_->decodeHeaders( + ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); + + // Close connections. + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); } /** @@ -242,7 +360,7 @@ TEST_F(Http2ConnPoolImplTest, DrainConnectionReadyWithRequest) { r.callbacks_.outer_encoder_->encodeHeaders( TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - pool_.drainConnections(); + pool_->drainConnections(); EXPECT_CALL(r.decoder_, decodeHeaders_(_, true)); EXPECT_CALL(*this, onClientDestroy()); @@ -265,7 +383,7 @@ TEST_F(Http2ConnPoolImplTest, DrainConnectionBusy) { r.callbacks_.outer_encoder_->encodeHeaders( TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - pool_.drainConnections(); + pool_->drainConnections(); EXPECT_CALL(r.decoder_, decodeHeaders_(_, true)); EXPECT_CALL(*this, onClientDestroy()); @@ -285,12 +403,153 @@ TEST_F(Http2ConnPoolImplTest, DrainConnectionConnecting) { ActiveTestRequest r(*this, 0, false); // Pending request prevents the connection from being drained - pool_.drainConnections(); + pool_->drainConnections(); // Cancel the pending request, and then the connection can be closed. - r.handle_->cancel(); + r.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::Default); EXPECT_CALL(*this, onClientDestroy()); - pool_.drainConnections(); + pool_->drainConnections(); +} + +/** + * Verify that on CloseExcess, the connection is destroyed immediately. + */ +TEST_F(Http2ConnPoolImplTest, CloseExcess) { + InSequence s; + + expectClientCreate(); + ActiveTestRequest r(*this, 0, false); + + // Pending request prevents the connection from being drained + pool_->drainConnections(); + + EXPECT_CALL(*this, onClientDestroy()); + r.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); +} + +/** + * Verify that on CloseExcess connections are destroyed when they can be. + */ +TEST_F(Http2ConnPoolImplTest, CloseExcessTwo) { + cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1); + InSequence s; + + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + + expectClientCreate(); + ActiveTestRequest r2(*this, 0, false); + { + EXPECT_CALL(*this, onClientDestroy()); + r1.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + } + + { + EXPECT_CALL(*this, onClientDestroy()); + r2.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + } +} + +/** + * Verify that on CloseExcess, the connections are destroyed iff they are actually excess. + */ +TEST_F(Http2ConnPoolImplTest, CloseExcessMultipleRequests) { + cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(3); + InSequence s; + + // With 3 requests per connection, the first request will result in a client + // connection, and the next two will be queued for that connection. + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + ActiveTestRequest r2(*this, 0, false); + ActiveTestRequest r3(*this, 0, false); + + // The fourth request will kick off a second connection, and the fifth will plan to share it. + expectClientCreate(); + ActiveTestRequest r4(*this, 0, false); + ActiveTestRequest r5(*this, 0, false); + + // The section below cancels the active requests in fairly random order, to + // ensure there's no association between the requests and the clients created + // for them. + + // The first cancel will not destroy any clients, as there are still four pending + // requests and they can not all share the first connection. + { + EXPECT_CALL(*this, onClientDestroy()).Times(0); + r5.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + } + // The second cancel will destroy one client, as there will be three pending requests + // remaining, and they only need one connection. + { + EXPECT_CALL(*this, onClientDestroy()); + r1.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + } + + // The next two calls will not destroy the final client, as there are two other + // pending requests waiting on it. + { + EXPECT_CALL(*this, onClientDestroy()).Times(0); + r2.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + r4.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + } + // Finally with the last request gone, the final client is destroyed. + { + EXPECT_CALL(*this, onClientDestroy()); + r3.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + } +} + +TEST_F(Http2ConnPoolImplTest, CloseExcessMixedMultiplexing) { + InSequence s; + + // Create clients with in-order capacity: + // 3 2 6 + // Connection capacity is min(max requests per connection, max concurrent streams). + // Use maxRequestsPerConnection here since max requests is tested above. + EXPECT_CALL(*cluster_, maxRequestsPerConnection).WillOnce(Return(3)); + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + ActiveTestRequest r2(*this, 0, false); + ActiveTestRequest r3(*this, 0, false); + + EXPECT_CALL(*cluster_, maxRequestsPerConnection).WillOnce(Return(2)); + expectClientCreate(); + ActiveTestRequest r4(*this, 0, false); + ActiveTestRequest r5(*this, 0, false); + + EXPECT_CALL(*cluster_, maxRequestsPerConnection).WillOnce(Return(6)); + expectClientCreate(); + ActiveTestRequest r6(*this, 0, false); + + // 6 requests, capacity [3, 2, 6] - the first cancel should tear down the client with [3] + // since we destroy oldest first and [3, 2] can handle the remaining 5 requests. + { + EXPECT_CALL(*this, onClientDestroy()); + r1.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + } + + // 5 requests, capacity [3, 2] - no teardown + { + EXPECT_CALL(*this, onClientDestroy()).Times(0); + r2.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + } + // 4 requests, capacity [3, 2] - canceling one destroys the client with [2] + { + EXPECT_CALL(*this, onClientDestroy()); + r3.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + } + + // 3 requests, capacity [3]. Tear down the last channel when all 3 are canceled. + { + EXPECT_CALL(*this, onClientDestroy()).Times(0); + r4.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + r5.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + } + { + EXPECT_CALL(*this, onClientDestroy()); + r6.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + } } /** @@ -303,7 +562,7 @@ TEST_F(Http2ConnPoolImplTest, DrainConnections) { cluster_->max_requests_per_connection_ = 1; // Test drain connections call prior to any connections being created. - pool_.drainConnections(); + pool_->drainConnections(); expectClientCreate(); ActiveTestRequest r1(*this, 0, false); @@ -322,7 +581,7 @@ TEST_F(Http2ConnPoolImplTest, DrainConnections) { TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); // This will move the second connection to draining. - pool_.drainConnections(); + pool_->drainConnections(); // This will destroy the 2 draining connections. test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); @@ -580,8 +839,7 @@ TEST_F(Http2ConnPoolImplTest, PendingRequestsRequestOverflow) { expectStreamConnect(0, r1); expectStreamReset(r2); expectStreamReset(r3); - EXPECT_CALL(*test_clients_[0].connect_timer_, disableTimer()); - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + expectClientConnect(0); // Clean up everything. for (uint64_t i = 0; i < requests.max() - 1; ++i) { @@ -614,11 +872,10 @@ TEST_F(Http2ConnPoolImplTest, PendingRequestsMaxPendingCircuitBreaker) { MockResponseDecoder decoder; ConnPoolCallbacks callbacks; EXPECT_CALL(callbacks.pool_failure_, ready()); - EXPECT_EQ(nullptr, pool_.newStream(decoder, callbacks)); + EXPECT_EQ(nullptr, pool_->newStream(decoder, callbacks)); expectStreamConnect(0, r1); - EXPECT_CALL(*test_clients_[0].connect_timer_, disableTimer()); - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + expectClientConnect(0); // Clean up everything. for (uint64_t i = 0; i < pending_reqs.max() - 1; ++i) { @@ -765,7 +1022,7 @@ TEST_F(Http2ConnPoolImplTest, DrainDisconnectWithActiveRequest) { TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); ReadyWatcher drained; - pool_.addDrainedCallback([&]() -> void { drained.ready(); }); + pool_->addDrainedCallback([&]() -> void { drained.ready(); }); EXPECT_CALL(dispatcher_, deferredDelete_(_)); EXPECT_CALL(drained, ready()); @@ -798,7 +1055,7 @@ TEST_F(Http2ConnPoolImplTest, DrainDisconnectDrainingWithActiveRequest) { TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); ReadyWatcher drained; - pool_.addDrainedCallback([&]() -> void { drained.ready(); }); + pool_->addDrainedCallback([&]() -> void { drained.ready(); }); EXPECT_CALL(dispatcher_, deferredDelete_(_)); EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); @@ -838,7 +1095,7 @@ TEST_F(Http2ConnPoolImplTest, DrainPrimary) { TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); ReadyWatcher drained; - pool_.addDrainedCallback([&]() -> void { drained.ready(); }); + pool_->addDrainedCallback([&]() -> void { drained.ready(); }); EXPECT_CALL(dispatcher_, deferredDelete_(_)); EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); @@ -889,7 +1146,7 @@ TEST_F(Http2ConnPoolImplTest, DrainPrimaryNoActiveRequest) { ReadyWatcher drained; EXPECT_CALL(drained, ready()); - pool_.addDrainedCallback([&]() -> void { drained.ready(); }); + pool_->addDrainedCallback([&]() -> void { drained.ready(); }); EXPECT_CALL(*this, onClientDestroy()); dispatcher_.clearDeferredDeleteList(); @@ -948,7 +1205,7 @@ TEST_F(Http2ConnPoolImplTest, MaxGlobalRequests) { ConnPoolCallbacks callbacks; MockResponseDecoder decoder; EXPECT_CALL(callbacks.pool_failure_, ready()); - EXPECT_EQ(nullptr, pool_.newStream(decoder, callbacks)); + EXPECT_EQ(nullptr, pool_->newStream(decoder, callbacks)); test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); EXPECT_CALL(*this, onClientDestroy()); @@ -971,7 +1228,7 @@ TEST_F(Http2ConnPoolImplTest, GoAway) { r1.inner_decoder_->decodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); - test_clients_[0].codec_client_->raiseGoAway(); + test_clients_[0].codec_client_->raiseGoAway(Http::GoAwayErrorCode::NoError); expectClientCreate(); ActiveTestRequest r2(*this, 1, false); @@ -992,7 +1249,7 @@ TEST_F(Http2ConnPoolImplTest, GoAway) { } TEST_F(Http2ConnPoolImplTest, NoActiveConnectionsByDefault) { - EXPECT_FALSE(pool_.hasActiveConnections()); + EXPECT_FALSE(pool_->hasActiveConnections()); } // Show that an active request on the primary connection is considered active. @@ -1001,7 +1258,7 @@ TEST_F(Http2ConnPoolImplTest, ActiveConnectionsHasActiveRequestsTrue) { ActiveTestRequest r1(*this, 0, false); expectClientConnect(0, r1); - EXPECT_TRUE(pool_.hasActiveConnections()); + EXPECT_TRUE(pool_->hasActiveConnections()); completeRequestCloseUpstream(0, r1); } @@ -1011,7 +1268,7 @@ TEST_F(Http2ConnPoolImplTest, PendingRequestsConsideredActive) { expectClientCreate(); ActiveTestRequest r1(*this, 0, false); - EXPECT_TRUE(pool_.hasActiveConnections()); + EXPECT_TRUE(pool_->hasActiveConnections()); expectClientConnect(0, r1); completeRequestCloseUpstream(0, r1); @@ -1025,7 +1282,7 @@ TEST_F(Http2ConnPoolImplTest, ResponseCompletedConnectionReadyNoActiveConnection expectClientConnect(0, r1); completeRequest(r1); - EXPECT_FALSE(pool_.hasActiveConnections()); + EXPECT_FALSE(pool_->hasActiveConnections()); closeClient(0); } @@ -1036,9 +1293,9 @@ TEST_F(Http2ConnPoolImplTest, DrainingConnectionsConsideredActive) { expectClientCreate(); ActiveTestRequest r1(*this, 0, false); expectClientConnect(0, r1); - pool_.drainConnections(); + pool_->drainConnections(); - EXPECT_TRUE(pool_.hasActiveConnections()); + EXPECT_TRUE(pool_->hasActiveConnections()); completeRequest(r1); closeClient(0); @@ -1050,13 +1307,152 @@ TEST_F(Http2ConnPoolImplTest, DrainedConnectionsNotActive) { expectClientCreate(); ActiveTestRequest r1(*this, 0, false); expectClientConnect(0, r1); - pool_.drainConnections(); + pool_->drainConnections(); completeRequest(r1); - EXPECT_FALSE(pool_.hasActiveConnections()); + EXPECT_FALSE(pool_->hasActiveConnections()); closeClient(0); } + +TEST_F(Http2ConnPoolImplTest, PrefetchWithoutMultiplexing) { + cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1); + ON_CALL(*cluster_, prefetchRatio).WillByDefault(Return(1.5)); + + // With one request per connection, and prefetch 1.5, the first request will + // kick off 2 connections. + expectClientsCreate(2); + ActiveTestRequest r1(*this, 0, false); + + // With another incoming request, we'll have 2 in flight and want 1.5*2 so + // create one connection. + expectClientsCreate(1); + ActiveTestRequest r2(*this, 0, false); + + // With a third request we'll have 3 in flight and want 1.5*3 -> 5 so kick off + // two again. + expectClientsCreate(2); + ActiveTestRequest r3(*this, 0, false); + + r1.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + r2.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + r3.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + pool_->drainConnections(); + + closeAllClients(); +} + +TEST_F(Http2ConnPoolImplTest, PrefetchOff) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.allow_prefetch", "false"}}); + cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1); + ON_CALL(*cluster_, prefetchRatio).WillByDefault(Return(1.5)); + + // Despite the prefetch ratio, no prefetch will happen due to the runtime + // disable. + expectClientsCreate(1); + ActiveTestRequest r1(*this, 0, false); + + // Clean up. + r1.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + pool_->drainConnections(); + closeAllClients(); +} + +TEST_F(Http2ConnPoolImplTest, PrefetchWithMultiplexing) { + cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(2); + ON_CALL(*cluster_, prefetchRatio).WillByDefault(Return(1.5)); + + // With two requests per connection, and prefetch 1.5, the first request will + // only kick off 1 connection. + expectClientsCreate(1); + ActiveTestRequest r1(*this, 0, false); + + // With another incoming request, we'll have capacity(2) in flight and want 1.5*2 so + // create an additional connection. + expectClientsCreate(1); + ActiveTestRequest r2(*this, 0, false); + + // Clean up. + r1.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + r2.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + pool_->drainConnections(); + closeAllClients(); +} + +TEST_F(Http2ConnPoolImplTest, PrefetchEvenWhenReady) { + cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1); + ON_CALL(*cluster_, prefetchRatio).WillByDefault(Return(1.5)); + + // With one request per connection, and prefetch 1.5, the first request will + // kick off 2 connections. + expectClientsCreate(2); + ActiveTestRequest r1(*this, 0, false); + + // When the first client connects, r1 will be assigned. + expectClientConnect(0, r1); + // When the second connects, there is no waiting stream request to assign. + expectClientConnect(1); + + // The next incoming request will immediately be assigned a stream, and also + // kick off a prefetch. + expectClientsCreate(1); + ActiveTestRequest r2(*this, 1, true); + + // Clean up. + completeRequest(r1); + completeRequest(r2); + pool_->drainConnections(); + closeAllClients(); +} + +TEST_F(Http2ConnPoolImplTest, PrefetchAfterTimeout) { + cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1); + ON_CALL(*cluster_, prefetchRatio).WillByDefault(Return(1.5)); + + expectClientsCreate(2); + ActiveTestRequest r1(*this, 0, false); + + // When the first client connects, r1 will be assigned. + expectClientConnect(0, r1); + + // Now cause the prefetched connection to fail. We should try to create + // another in its place. + expectClientsCreate(1); + test_clients_[1].connect_timer_->invokeCallback(); + + // Clean up. + completeRequest(r1); + pool_->drainConnections(); + closeAllClients(); +} + +TEST_F(Http2ConnPoolImplTest, CloseExcessWithPrefetch) { + cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1); + ON_CALL(*cluster_, prefetchRatio).WillByDefault(Return(1.00)); + + // First request prefetches an additional connection. + expectClientsCreate(1); + ActiveTestRequest r1(*this, 0, false); + + // Second request does not prefetch. + expectClientsCreate(1); + ActiveTestRequest r2(*this, 0, false); + + // Change the prefetch ratio to force the connection to no longer be excess. + ON_CALL(*cluster_, prefetchRatio).WillByDefault(Return(2)); + // Closing off the second request should bring us back to 1 request in queue, + // desired capacity 2, so will not close the connection. + EXPECT_CALL(*this, onClientDestroy()).Times(0); + r2.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + + // Clean up. + r1.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + pool_->drainConnections(); + closeAllClients(); +} + } // namespace Http2 } // namespace Http } // namespace Envoy diff --git a/test/common/http/http2/frame_replay.cc b/test/common/http/http2/frame_replay.cc index 7f63fd514160d..b0aa61e14ff07 100644 --- a/test/common/http/http2/frame_replay.cc +++ b/test/common/http/http2/frame_replay.cc @@ -89,13 +89,16 @@ ServerCodecFrameInjector::ServerCodecFrameInjector() : CodecFrameInjector("clien })); } -void CodecFrameInjector::write(const Frame& frame, Http::Connection& connection) { +Http::Status CodecFrameInjector::write(const Frame& frame, Http::Connection& connection) { Buffer::OwnedImpl buffer; buffer.add(frame.data(), frame.size()); ENVOY_LOG_MISC(trace, "{} write: {}", injector_name_, Hex::encode(frame.data(), frame.size())); - while (buffer.length() > 0) { - connection.dispatch(buffer); + auto status = Http::okStatus(); + while (buffer.length() > 0 && status.ok()) { + status = connection.dispatch(buffer); } + ENVOY_LOG_MISC(trace, "Status: {}", status.message()); + return status; } } // namespace Http2 diff --git a/test/common/http/http2/frame_replay.h b/test/common/http/http2/frame_replay.h index 7024c292cb4e9..2922d6a191105 100644 --- a/test/common/http/http2/frame_replay.h +++ b/test/common/http/http2/frame_replay.h @@ -4,7 +4,6 @@ #include "common/stats/isolated_store_impl.h" -#include "test/common/http/http2/codec_impl_test_util.h" #include "test/mocks/http/mocks.h" #include "test/mocks/network/mocks.h" #include "test/test_common/utility.h" @@ -53,7 +52,7 @@ class CodecFrameInjector { CodecFrameInjector(const std::string& injector_name); // Writes the data using the Http::Connection's nghttp2 session. - void write(const Frame& frame, Http::Connection& connection); + Http::Status write(const Frame& frame, Http::Connection& connection); envoy::config::core::v3::Http2ProtocolOptions options_; Stats::IsolatedStoreImpl stats_store_; diff --git a/test/common/http/http2/frame_replay_test.cc b/test/common/http/http2/frame_replay_test.cc index c6afd9a8be5ec..b1931d350bb85 100644 --- a/test/common/http/http2/frame_replay_test.cc +++ b/test/common/http/http2/frame_replay_test.cc @@ -1,6 +1,7 @@ #include "common/http/exception.h" #include "test/common/http/common.h" +#include "test/common/http/http2/codec_impl_test_util.h" #include "test/common/http/http2/frame_replay.h" #include "gtest/gtest.h" @@ -26,7 +27,7 @@ class RequestFrameCommentTest : public ::testing::Test {}; class ResponseFrameCommentTest : public ::testing::Test {}; // Creates and sets up a stream to reply to. -void setupStream(ClientCodecFrameInjector& codec, TestClientConnectionImpl& connection) { +void setupStream(ClientCodecFrameInjector& codec, TestClientConnectionImplNew& connection) { codec.request_encoder_ = &connection.newStream(codec.response_decoder_); codec.request_encoder_->getStream().addCallbacks(codec.client_stream_callbacks_); // Setup a single stream to inject frames as a reply to. @@ -56,18 +57,18 @@ TEST_F(RequestFrameCommentTest, SimpleExampleHuffman) { // Validate HEADERS decode. ServerCodecFrameInjector codec; - TestServerConnectionImpl connection( + TestServerConnectionImplNew connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW); - codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection); - codec.write(WellKnownFrames::defaultSettingsFrame(), connection); - codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); + EXPECT_TRUE(codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection).ok()); + EXPECT_TRUE(codec.write(WellKnownFrames::defaultSettingsFrame(), connection).ok()); + EXPECT_TRUE(codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection).ok()); TestRequestHeaderMapImpl expected_headers; HttpTestUtility::addDefaultHeaders(expected_headers); expected_headers.addCopy("foo", "barbaz"); EXPECT_CALL(codec.request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); - codec.write(header.frame(), connection); + EXPECT_TRUE(codec.write(header.frame(), connection).ok()); } // Validate that a simple Huffman encoded response HEADERS frame can be decoded. @@ -89,19 +90,19 @@ TEST_F(ResponseFrameCommentTest, SimpleExampleHuffman) { // Validate HEADERS decode. ClientCodecFrameInjector codec; - TestClientConnectionImpl connection( + TestClientConnectionImplNew connection( codec.client_connection_, codec.client_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, ProdNghttp2SessionFactory::get()); setupStream(codec, connection); - codec.write(WellKnownFrames::defaultSettingsFrame(), connection); - codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); - TestHeaderMapImpl expected_headers; + EXPECT_TRUE(codec.write(WellKnownFrames::defaultSettingsFrame(), connection).ok()); + EXPECT_TRUE(codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection).ok()); + TestResponseHeaderMapImpl expected_headers; expected_headers.addCopy(":status", "200"); expected_headers.addCopy("compression", "test"); EXPECT_CALL(codec.response_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); - codec.write(header.frame(), connection); + EXPECT_TRUE(codec.write(header.frame(), connection).ok()); } // Validate that a simple non-Huffman request HEADERS frame with no static table user either can be @@ -134,18 +135,18 @@ TEST_F(RequestFrameCommentTest, SimpleExamplePlain) { // Validate HEADERS decode. ServerCodecFrameInjector codec; - TestServerConnectionImpl connection( + TestServerConnectionImplNew connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW); - codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection); - codec.write(WellKnownFrames::defaultSettingsFrame(), connection); - codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); + EXPECT_TRUE(codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection).ok()); + EXPECT_TRUE(codec.write(WellKnownFrames::defaultSettingsFrame(), connection).ok()); + EXPECT_TRUE(codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection).ok()); TestRequestHeaderMapImpl expected_headers; HttpTestUtility::addDefaultHeaders(expected_headers); expected_headers.addCopy("foo", "barbaz"); EXPECT_CALL(codec.request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); - codec.write(header.frame(), connection); + EXPECT_TRUE(codec.write(header.frame(), connection).ok()); } // Validate that a simple non-Huffman response HEADERS frame with no static table user either can be @@ -169,19 +170,19 @@ TEST_F(ResponseFrameCommentTest, SimpleExamplePlain) { // Validate HEADERS decode. ClientCodecFrameInjector codec; - TestClientConnectionImpl connection( + TestClientConnectionImplNew connection( codec.client_connection_, codec.client_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, ProdNghttp2SessionFactory::get()); setupStream(codec, connection); - codec.write(WellKnownFrames::defaultSettingsFrame(), connection); - codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); - TestHeaderMapImpl expected_headers; + EXPECT_TRUE(codec.write(WellKnownFrames::defaultSettingsFrame(), connection).ok()); + EXPECT_TRUE(codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection).ok()); + TestResponseHeaderMapImpl expected_headers; expected_headers.addCopy(":status", "200"); expected_headers.addCopy("compression", "test"); EXPECT_CALL(codec.response_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); - codec.write(header.frame(), connection); + EXPECT_TRUE(codec.write(header.frame(), connection).ok()); } // Validate that corrupting any single byte with {NUL, CR, LF} in a HEADERS frame doesn't crash or @@ -199,19 +200,18 @@ TEST_F(RequestFrameCommentTest, SingleByteNulCrLfInHeaderFrame) { header.frame()[offset] = c; // Play the frames back. ServerCodecFrameInjector codec; - TestServerConnectionImpl connection( + TestServerConnectionImplNew connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW); - codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection); - codec.write(WellKnownFrames::defaultSettingsFrame(), connection); - codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); - try { - EXPECT_CALL(codec.request_decoder_, decodeHeaders_(_, _)).Times(AnyNumber()); - EXPECT_CALL(codec.server_stream_callbacks_, onResetStream(_, _)).Times(AnyNumber()); - codec.write(header.frame(), connection); - } catch (const CodecProtocolException& e) { - ENVOY_LOG_MISC(trace, "CodecProtocolException: {}", e.what()); + EXPECT_TRUE(codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection).ok()); + EXPECT_TRUE(codec.write(WellKnownFrames::defaultSettingsFrame(), connection).ok()); + EXPECT_TRUE(codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection).ok()); + EXPECT_CALL(codec.request_decoder_, decodeHeaders_(_, _)).Times(AnyNumber()); + EXPECT_CALL(codec.server_stream_callbacks_, onResetStream(_, _)).Times(AnyNumber()); + auto status = codec.write(header.frame(), connection); + if (isCodecProtocolError(status)) { + ENVOY_LOG_MISC(trace, "CodecProtocolError: {}", status.message()); } header.frame()[offset] = original; } @@ -233,20 +233,19 @@ TEST_F(ResponseFrameCommentTest, SingleByteNulCrLfInHeaderFrame) { header.frame()[offset] = c; // Play the frames back. ClientCodecFrameInjector codec; - TestClientConnectionImpl connection( + TestClientConnectionImplNew connection( codec.client_connection_, codec.client_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, ProdNghttp2SessionFactory::get()); setupStream(codec, connection); - codec.write(WellKnownFrames::defaultSettingsFrame(), connection); - codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); - try { - EXPECT_CALL(codec.response_decoder_, decodeHeaders_(_, _)).Times(AnyNumber()); - EXPECT_CALL(codec.client_stream_callbacks_, onResetStream(_, _)).Times(AnyNumber()); - codec.write(header.frame(), connection); - } catch (const CodecProtocolException& e) { - ENVOY_LOG_MISC(trace, "CodecProtocolException: {}", e.what()); + EXPECT_TRUE(codec.write(WellKnownFrames::defaultSettingsFrame(), connection).ok()); + EXPECT_TRUE(codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection).ok()); + EXPECT_CALL(codec.response_decoder_, decodeHeaders_(_, _)).Times(AnyNumber()); + EXPECT_CALL(codec.client_stream_callbacks_, onResetStream(_, _)).Times(AnyNumber()); + auto status = codec.write(header.frame(), connection); + if (isCodecProtocolError(status)) { + ENVOY_LOG_MISC(trace, "CodecProtocolError: {}", status.message()); } header.frame()[offset] = original; } @@ -269,21 +268,20 @@ TEST_F(RequestFrameCommentTest, SingleByteNulCrLfInHeaderField) { header.frame()[offset] = c; // Play the frames back. ServerCodecFrameInjector codec; - TestServerConnectionImpl connection( + TestServerConnectionImplNew connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW); - codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection); - codec.write(WellKnownFrames::defaultSettingsFrame(), connection); - codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); + EXPECT_TRUE(codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection).ok()); + EXPECT_TRUE(codec.write(WellKnownFrames::defaultSettingsFrame(), connection).ok()); + EXPECT_TRUE(codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection).ok()); bool stream_reset = false; EXPECT_CALL(codec.request_decoder_, decodeHeaders_(_, _)).Times(0); EXPECT_CALL(codec.server_stream_callbacks_, onResetStream(_, _)) .WillRepeatedly(InvokeWithoutArgs([&stream_reset] { stream_reset = true; })); bool codec_exception = false; - try { - codec.write(header.frame(), connection); - } catch (const CodecProtocolException& e) { + auto status = codec.write(header.frame(), connection); + if (isCodecProtocolError(status)) { codec_exception = true; } EXPECT_TRUE(stream_reset || codec_exception); @@ -308,22 +306,21 @@ TEST_F(ResponseFrameCommentTest, SingleByteNulCrLfInHeaderField) { header.frame()[offset] = c; // Play the frames back. ClientCodecFrameInjector codec; - TestClientConnectionImpl connection( + TestClientConnectionImplNew connection( codec.client_connection_, codec.client_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, ProdNghttp2SessionFactory::get()); setupStream(codec, connection); - codec.write(WellKnownFrames::defaultSettingsFrame(), connection); - codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); + EXPECT_TRUE(codec.write(WellKnownFrames::defaultSettingsFrame(), connection).ok()); + EXPECT_TRUE(codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection).ok()); bool stream_reset = false; EXPECT_CALL(codec.response_decoder_, decodeHeaders_(_, _)).Times(0); EXPECT_CALL(codec.client_stream_callbacks_, onResetStream(_, _)) .WillRepeatedly(InvokeWithoutArgs([&stream_reset] { stream_reset = true; })); bool codec_exception = false; - try { - codec.write(header.frame(), connection); - } catch (const CodecProtocolException& e) { + auto status = codec.write(header.frame(), connection); + if (isCodecProtocolError(status)) { codec_exception = true; } EXPECT_TRUE(stream_reset || codec_exception); diff --git a/test/common/http/http2/http2_frame.cc b/test/common/http/http2/http2_frame.cc index a0b5f0a912681..d4b3af94a6de3 100644 --- a/test/common/http/http2/http2_frame.cc +++ b/test/common/http/http2/http2_frame.cc @@ -4,6 +4,8 @@ #include "envoy/common/platform.h" +#include "common/common/hex.h" + namespace { // Make request stream ID in the network byte order @@ -33,12 +35,15 @@ const char Http2Frame::Preamble[25] = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; void Http2Frame::setHeader(absl::string_view header) { ASSERT(header.size() >= HeaderSize); data_.assign(HeaderSize, 0); - memcpy(&data_[0], header.data(), HeaderSize); + // TODO(adisuissa): memcpy is discouraged as it may be unsafe. This should be + // use a safer memcpy alternative (example: https://abseil.io/tips/93) + memcpy(data_.data(), header.data(), HeaderSize); data_.resize(HeaderSize + payloadSize()); } void Http2Frame::setPayload(absl::string_view payload) { ASSERT(payload.size() >= payloadSize()); + ASSERT(data_.capacity() >= HeaderSize + payloadSize()); memcpy(&data_[HeaderSize], payload.data(), payloadSize()); } @@ -116,6 +121,7 @@ Http2Frame Http2Frame::makePingFrame(absl::string_view data) { static constexpr size_t kPingPayloadSize = 8; Http2Frame frame; frame.buildHeader(Type::Ping, kPingPayloadSize); + ASSERT(frame.data_.capacity() >= HeaderSize + std::min(kPingPayloadSize, data.size())); if (!data.empty()) { memcpy(&frame.data_[HeaderSize], data.data(), std::min(kPingPayloadSize, data.size())); } @@ -152,8 +158,46 @@ Http2Frame Http2Frame::makePriorityFrame(uint32_t stream_index, uint32_t depende static constexpr size_t kPriorityPayloadSize = 5; Http2Frame frame; frame.buildHeader(Type::Priority, kPriorityPayloadSize, 0, makeRequestStreamId(stream_index)); - uint32_t dependent_net = makeRequestStreamId(dependent_index); - memcpy(&frame.data_[HeaderSize], reinterpret_cast(&dependent_net), sizeof(uint32_t)); + const uint32_t dependent_net = makeRequestStreamId(dependent_index); + ASSERT(frame.data_.capacity() >= HeaderSize + sizeof(uint32_t)); + memcpy(&frame.data_[HeaderSize], reinterpret_cast(&dependent_net), sizeof(uint32_t)); + return frame; +} + +Http2Frame Http2Frame::makeEmptyPushPromiseFrame(uint32_t stream_index, + uint32_t promised_stream_index, + HeadersFlags flags) { + static constexpr size_t kEmptyPushPromisePayloadSize = 4; + Http2Frame frame; + frame.buildHeader(Type::PushPromise, kEmptyPushPromisePayloadSize, static_cast(flags), + makeRequestStreamId(stream_index)); + const uint32_t promised_stream_id = makeRequestStreamId(promised_stream_index); + ASSERT(frame.data_.capacity() >= HeaderSize + sizeof(uint32_t)); + memcpy(&frame.data_[HeaderSize], reinterpret_cast(&promised_stream_id), + sizeof(uint32_t)); + return frame; +} + +Http2Frame Http2Frame::makeResetStreamFrame(uint32_t stream_index, ErrorCode error_code) { + static constexpr size_t kResetStreamPayloadSize = 4; + Http2Frame frame; + frame.buildHeader(Type::RstStream, kResetStreamPayloadSize, 0, makeRequestStreamId(stream_index)); + const uint32_t error = static_cast(error_code); + ASSERT(frame.data_.capacity() >= HeaderSize + sizeof(uint32_t)); + memcpy(&frame.data_[HeaderSize], reinterpret_cast(&error), sizeof(uint32_t)); + return frame; +} + +Http2Frame Http2Frame::makeEmptyGoAwayFrame(uint32_t last_stream_index, ErrorCode error_code) { + static constexpr size_t kEmptyGoAwayPayloadSize = 8; + Http2Frame frame; + frame.buildHeader(Type::GoAway, kEmptyGoAwayPayloadSize, 0, makeRequestStreamId(0)); + const uint32_t last_stream_id = makeRequestStreamId(last_stream_index); + ASSERT(frame.data_.capacity() >= HeaderSize + 4 + sizeof(uint32_t)); + memcpy(&frame.data_[HeaderSize], reinterpret_cast(&last_stream_id), + sizeof(uint32_t)); + const uint32_t error = static_cast(error_code); + memcpy(&frame.data_[HeaderSize + 4], reinterpret_cast(&error), sizeof(uint32_t)); return frame; } @@ -162,8 +206,9 @@ Http2Frame Http2Frame::makeWindowUpdateFrame(uint32_t stream_index, uint32_t inc Http2Frame frame; frame.buildHeader(Type::WindowUpdate, kWindowUpdatePayloadSize, 0, makeRequestStreamId(stream_index)); - uint32_t increment_net = htonl(increment); - memcpy(&frame.data_[HeaderSize], reinterpret_cast(&increment_net), sizeof(uint32_t)); + const uint32_t increment_net = htonl(increment); + ASSERT(frame.data_.capacity() >= HeaderSize + sizeof(uint32_t)); + memcpy(&frame.data_[HeaderSize], reinterpret_cast(&increment_net), sizeof(uint32_t)); return frame; } @@ -218,6 +263,18 @@ Http2Frame Http2Frame::makePostRequest(uint32_t stream_index, absl::string_view return frame; } +Http2Frame Http2Frame::makeGenericFrame(absl::string_view contents) { + Http2Frame frame; + frame.appendData(contents); + return frame; +} + +Http2Frame Http2Frame::makeGenericFrameFromHexDump(absl::string_view contents) { + Http2Frame frame; + frame.appendData(Hex::decode(std::string(contents))); + return frame; +} + } // namespace Http2 } // namespace Http } // namespace Envoy diff --git a/test/common/http/http2/http2_frame.h b/test/common/http/http2/http2_frame.h index 9b04779a0a44b..f805f30281a1b 100644 --- a/test/common/http/http2/http2_frame.h +++ b/test/common/http/http2/http2_frame.h @@ -66,6 +66,23 @@ class Http2Frame { Host = 38, }; + enum class ErrorCode : uint8_t { + NoError = 0, + ProtocolError, + InternalError, + FlowControlError, + SettingsTimeout, + StreamClosed, + FrameSizeError, + RefusedStream, + Cancel, + CompressionError, + ConnectError, + EnhanceYourCalm, + InadequateSecurity, + Http11Required + }; + enum class ResponseStatus { Unknown, Ok, NotFound }; // Methods for creating HTTP2 frames @@ -77,6 +94,12 @@ class Http2Frame { HeadersFlags flags = HeadersFlags::None); static Http2Frame makeEmptyDataFrame(uint32_t stream_index, DataFlags flags = DataFlags::None); static Http2Frame makePriorityFrame(uint32_t stream_index, uint32_t dependent_index); + + static Http2Frame makeEmptyPushPromiseFrame(uint32_t stream_index, uint32_t promised_stream_index, + HeadersFlags flags = HeadersFlags::None); + static Http2Frame makeResetStreamFrame(uint32_t stream_index, ErrorCode error_code); + static Http2Frame makeEmptyGoAwayFrame(uint32_t last_stream_index, ErrorCode error_code); + static Http2Frame makeWindowUpdateFrame(uint32_t stream_index, uint32_t increment); static Http2Frame makeMalformedRequest(uint32_t stream_index); static Http2Frame makeMalformedRequestWithZerolenHeader(uint32_t stream_index, @@ -86,6 +109,14 @@ class Http2Frame { absl::string_view path); static Http2Frame makePostRequest(uint32_t stream_index, absl::string_view host, absl::string_view path); + /** + * Creates a frame with the given contents. This frame can be + * malformed/invalid depending on the given contents. + * @param contents the contents of the newly created frame. + * @return an Http2Frame that is comprised of the given contents. + */ + static Http2Frame makeGenericFrame(absl::string_view contents); + static Http2Frame makeGenericFrameFromHexDump(absl::string_view contents); Type type() const { return static_cast(data_[3]); } ResponseStatus responseStatus() const; @@ -125,6 +156,9 @@ class Http2Frame { // header. void appendHpackInt(uint64_t value, unsigned char prefix_mask); void appendData(absl::string_view data) { data_.insert(data_.end(), data.begin(), data.end()); } + void appendData(std::vector data) { + data_.insert(data_.end(), data.begin(), data.end()); + } // Headers are directly encoded void appendStaticHeader(StaticHeaderIndex index); diff --git a/test/common/http/http2/metadata_encoder_decoder_test.cc b/test/common/http/http2/metadata_encoder_decoder_test.cc index 5cce5b40893fb..ef225502dcaa6 100644 --- a/test/common/http/http2/metadata_encoder_decoder_test.cc +++ b/test/common/http/http2/metadata_encoder_decoder_test.cc @@ -1,8 +1,8 @@ #include "common/buffer/buffer_impl.h" #include "common/common/logger.h" +#include "common/common/random_generator.h" #include "common/http/http2/metadata_decoder.h" #include "common/http/http2/metadata_encoder.h" -#include "common/runtime/runtime_impl.h" #include "test/test_common/logging.h" @@ -152,7 +152,7 @@ class MetadataEncoderDecoderTest : public testing::Test { // Application data passed to nghttp2. UserData user_data_; - Runtime::RandomGeneratorImpl random_generator_; + Random::RandomGeneratorImpl random_generator_; }; TEST_F(MetadataEncoderDecoderTest, TestMetadataSizeLimit) { @@ -306,7 +306,7 @@ TEST_F(MetadataEncoderDecoderTest, EncodeMetadataMapVectorLarge) { TEST_F(MetadataEncoderDecoderTest, EncodeFuzzedMetadata) { MetadataMapVector metadata_map_vector; for (int i = 0; i < 10; i++) { - Runtime::RandomGeneratorImpl random; + Random::RandomGeneratorImpl random; int value_size_1 = random.random() % (2 * Http::METADATA_MAX_PAYLOAD_SIZE) + 1; int value_size_2 = random.random() % (2 * Http::METADATA_MAX_PAYLOAD_SIZE) + 1; MetadataMap metadata_map = { @@ -333,7 +333,6 @@ using MetadataEncoderDecoderDeathTest = MetadataEncoderDecoderTest; // Crash if a caller tries to pack more frames than the encoder has data for. TEST_F(MetadataEncoderDecoderDeathTest, PackTooManyFrames) { - Logger::StderrSinkDelegate stderr_sink(Logger::Registry::getSink()); // For coverage build. MetadataMap metadata_map = { {"header_key1", std::string(5, 'a')}, {"header_key2", std::string(5, 'b')}, diff --git a/test/common/http/http2/request_header_fuzz_test.cc b/test/common/http/http2/request_header_fuzz_test.cc index 9ac05cbfbe944..3af7f5c594cea 100644 --- a/test/common/http/http2/request_header_fuzz_test.cc +++ b/test/common/http/http2/request_header_fuzz_test.cc @@ -4,6 +4,7 @@ #include "common/http/exception.h" +#include "test/common/http/http2/codec_impl_test_util.h" #include "test/common/http/http2/frame_replay.h" #include "test/fuzz/fuzz_runner.h" @@ -14,17 +15,15 @@ namespace { void Replay(const Frame& frame, ServerCodecFrameInjector& codec) { // Create the server connection containing the nghttp2 session. - TestServerConnectionImpl connection( + TestServerConnectionImplNew connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW); - codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection); - codec.write(WellKnownFrames::defaultSettingsFrame(), connection); - codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); - try { - codec.write(frame, connection); - } catch (const CodecProtocolException& e) { - } + Http::Status status = Http::okStatus(); + status = codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection); + status = codec.write(WellKnownFrames::defaultSettingsFrame(), connection); + status = codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); + status = codec.write(frame, connection); } DEFINE_FUZZER(const uint8_t* buf, size_t len) { diff --git a/test/common/http/http2/response_header_corpus/set_details_twice b/test/common/http/http2/response_header_corpus/set_details_twice new file mode 100644 index 0000000000000..52aba4d72c31b Binary files /dev/null and b/test/common/http/http2/response_header_corpus/set_details_twice differ diff --git a/test/common/http/http2/response_header_fuzz_test.cc b/test/common/http/http2/response_header_fuzz_test.cc index 756af3860c3f5..4559aa06419b8 100644 --- a/test/common/http/http2/response_header_fuzz_test.cc +++ b/test/common/http/http2/response_header_fuzz_test.cc @@ -5,6 +5,7 @@ #include "common/http/exception.h" #include "test/common/http/common.h" +#include "test/common/http/http2/codec_impl_test_util.h" #include "test/common/http/http2/frame_replay.h" #include "test/fuzz/fuzz_runner.h" @@ -15,11 +16,12 @@ namespace { void Replay(const Frame& frame, ClientCodecFrameInjector& codec) { // Create the client connection containing the nghttp2 session. - TestClientConnectionImpl connection( + TestClientConnectionImplNew connection( codec.client_connection_, codec.client_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, ProdNghttp2SessionFactory::get()); // Create a new stream. + Http::Status status = Http::okStatus(); codec.request_encoder_ = &connection.newStream(codec.response_decoder_); codec.request_encoder_->getStream().addCallbacks(codec.client_stream_callbacks_); // Setup a single stream to inject frames as a reply to. @@ -28,12 +30,9 @@ void Replay(const Frame& frame, ClientCodecFrameInjector& codec) { codec.request_encoder_->encodeHeaders(request_headers, true); // Send frames. - codec.write(WellKnownFrames::defaultSettingsFrame(), connection); - codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); - try { - codec.write(frame, connection); - } catch (const CodecProtocolException& e) { - } + status = codec.write(WellKnownFrames::defaultSettingsFrame(), connection); + status = codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); + status = codec.write(frame, connection); } DEFINE_FUZZER(const uint8_t* buf, size_t len) { diff --git a/test/common/http/path_utility_test.cc b/test/common/http/path_utility_test.cc index 0cd17e324c6d0..d7c6399341362 100644 --- a/test/common/http/path_utility_test.cc +++ b/test/common/http/path_utility_test.cc @@ -1,9 +1,10 @@ #include #include -#include "common/http/header_map_impl.h" #include "common/http/path_utility.h" +#include "test/test_common/utility.h" + #include "gtest/gtest.h" namespace Envoy { @@ -18,7 +19,11 @@ class PathUtilityTest : public testing::Test { headers_.setPath(path_value); return *headers_.Path(); } - RequestHeaderMapImpl headers_; + const HeaderEntry& hostHeaderEntry(const std::string& host_value) { + headers_.setHost(host_value); + return *headers_.Host(); + } + TestRequestHeaderMapImpl headers_; }; // Already normalized path don't change. @@ -105,6 +110,7 @@ TEST_F(PathUtilityTest, MergeSlashes) { EXPECT_EQ("/a/b/c", mergeSlashes("/a////b/c")); // quadruple / in the middle EXPECT_EQ("/a/b?a=///c", mergeSlashes("/a//b?a=///c")); // slashes in the query are ignored EXPECT_EQ("/a/b?", mergeSlashes("/a//b?")); // empty query + EXPECT_EQ("/a/?b", mergeSlashes("//a/?b")); // ends with slash + query } TEST_F(PathUtilityTest, RemoveQueryAndFragment) { diff --git a/test/common/http/request_id_extension_uuid_impl_test.cc b/test/common/http/request_id_extension_uuid_impl_test.cc index fb3da43f97862..0b471c3a88cf6 100644 --- a/test/common/http/request_id_extension_uuid_impl_test.cc +++ b/test/common/http/request_id_extension_uuid_impl_test.cc @@ -1,9 +1,9 @@ #include +#include "common/common/random_generator.h" #include "common/http/request_id_extension_uuid_impl.h" -#include "common/runtime/runtime_impl.h" -#include "test/mocks/runtime/mocks.h" +#include "test/mocks/common.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" @@ -14,7 +14,7 @@ namespace Envoy { namespace Http { TEST(UUIDRequestIDExtensionTest, SetRequestID) { - testing::StrictMock random; + testing::StrictMock random; UUIDRequestIDExtension uuid_utils(random); TestRequestHeaderMapImpl request_headers; @@ -28,7 +28,7 @@ TEST(UUIDRequestIDExtensionTest, SetRequestID) { } TEST(UUIDRequestIDExtensionTest, EnsureRequestID) { - testing::StrictMock random; + testing::StrictMock random; UUIDRequestIDExtension uuid_utils(random); TestRequestHeaderMapImpl request_headers; @@ -42,7 +42,7 @@ TEST(UUIDRequestIDExtensionTest, EnsureRequestID) { } TEST(UUIDRequestIDExtensionTest, PreserveRequestIDInResponse) { - testing::StrictMock random; + testing::StrictMock random; UUIDRequestIDExtension uuid_utils(random); TestRequestHeaderMapImpl request_headers; TestResponseHeaderMapImpl response_headers; @@ -65,7 +65,7 @@ TEST(UUIDRequestIDExtensionTest, PreserveRequestIDInResponse) { } TEST(UUIDRequestIDExtensionTest, ModRequestIDBy) { - Runtime::RandomGeneratorImpl random; + Random::RandomGeneratorImpl random; UUIDRequestIDExtension uuid_utils(random); TestRequestHeaderMapImpl request_headers; @@ -115,7 +115,7 @@ TEST(UUIDRequestIDExtensionTest, ModRequestIDBy) { } TEST(UUIDRequestIDExtensionTest, RequestIDModDistribution) { - Runtime::RandomGeneratorImpl random; + Random::RandomGeneratorImpl random; UUIDRequestIDExtension uuid_utils(random); TestRequestHeaderMapImpl request_headers; @@ -145,7 +145,7 @@ TEST(UUIDRequestIDExtensionTest, RequestIDModDistribution) { } TEST(UUIDRequestIDExtensionTest, DISABLED_benchmark) { - Runtime::RandomGeneratorImpl random; + Random::RandomGeneratorImpl random; for (int i = 0; i < 100000000; ++i) { random.uuid(); @@ -153,7 +153,7 @@ TEST(UUIDRequestIDExtensionTest, DISABLED_benchmark) { } TEST(UUIDRequestIDExtensionTest, SetTraceStatus) { - Runtime::RandomGeneratorImpl random; + Random::RandomGeneratorImpl random; UUIDRequestIDExtension uuid_utils(random); TestRequestHeaderMapImpl request_headers; request_headers.setRequestId(random.uuid()); @@ -175,7 +175,7 @@ TEST(UUIDRequestIDExtensionTest, SetTraceStatus) { // Invalid request ID. request_headers.setRequestId(""); uuid_utils.setTraceStatus(request_headers, TraceStatus::Forced); - EXPECT_EQ(request_headers.RequestId()->value().getStringView(), ""); + EXPECT_EQ(request_headers.getRequestIdValue(), ""); } } // namespace Http diff --git a/test/common/http/status_test.cc b/test/common/http/status_test.cc index 4783b64dd0909..327bba34a5a53 100644 --- a/test/common/http/status_test.cc +++ b/test/common/http/status_test.cc @@ -67,5 +67,37 @@ TEST(Status, CodecClientError) { EXPECT_TRUE(isCodecClientError(status)); } +TEST(Status, ReturnIfError) { + + auto outer = [](Status (*inner)()) { + RETURN_IF_ERROR(inner()); + return bufferFloodError("boom"); + }; + + auto result = outer([]() { return okStatus(); }); + EXPECT_FALSE(result.ok()); + EXPECT_EQ("boom", result.message()); + EXPECT_TRUE(isBufferFloodError(result)); + result = outer([]() { return codecClientError("foobar"); }); + EXPECT_FALSE(result.ok()); + EXPECT_TRUE(isCodecClientError(result)); + EXPECT_EQ("foobar", result.message()); + + // Check that passing a `Status` object directly into the RETURN_IF_ERROR works. + auto direct_status = [](const Status& status) { + RETURN_IF_ERROR(status); + return bufferFloodError("baz"); + }; + result = direct_status(codecClientError("foobar")); + EXPECT_FALSE(result.ok()); + EXPECT_TRUE(isCodecClientError(result)); + EXPECT_EQ("foobar", result.message()); + + result = direct_status(okStatus()); + EXPECT_FALSE(result.ok()); + EXPECT_EQ("baz", result.message()); + EXPECT_TRUE(isBufferFloodError(result)); +} + } // namespace Http } // namespace Envoy diff --git a/test/common/http/utility_corpus/clusterfuzz-testcase-minimized-utility_fuzz_test-5091558495092736 b/test/common/http/utility_corpus/clusterfuzz-testcase-minimized-utility_fuzz_test-5091558495092736 new file mode 100644 index 0000000000000..aff9fb7726040 --- /dev/null +++ b/test/common/http/utility_corpus/clusterfuzz-testcase-minimized-utility_fuzz_test-5091558495092736 @@ -0,0 +1,17 @@ +initialize_and_validate { + custom_settings_parameters { + identifier { + value: 11008 + } + value { + value: 65536 + } + } + custom_settings_parameters { + identifier { + value: 11008 + } + value { + } + } +} diff --git a/test/common/http/utility_corpus/parse_authority_string_0 b/test/common/http/utility_corpus/parse_authority_string_0 new file mode 100644 index 0000000000000..d4cbd3049147d --- /dev/null +++ b/test/common/http/utility_corpus/parse_authority_string_0 @@ -0,0 +1 @@ +parse_authority_string: "1.2.3.4" diff --git a/test/common/http/utility_corpus/parse_authority_string_1 b/test/common/http/utility_corpus/parse_authority_string_1 new file mode 100644 index 0000000000000..21904cf3c1e4b --- /dev/null +++ b/test/common/http/utility_corpus/parse_authority_string_1 @@ -0,0 +1 @@ +parse_authority_string: "[a:b:c:d::]:0" diff --git a/test/common/http/utility_corpus/parse_authority_string_2 b/test/common/http/utility_corpus/parse_authority_string_2 new file mode 100644 index 0000000000000..6e472e09b0b9c --- /dev/null +++ b/test/common/http/utility_corpus/parse_authority_string_2 @@ -0,0 +1 @@ +parse_authority_string: "example.com" diff --git a/test/common/http/utility_corpus/parse_authority_string_3 b/test/common/http/utility_corpus/parse_authority_string_3 new file mode 100644 index 0000000000000..369543b8883e4 --- /dev/null +++ b/test/common/http/utility_corpus/parse_authority_string_3 @@ -0,0 +1 @@ +parse_authority_string: "localhost:10000" diff --git a/test/common/http/utility_corpus/parse_authority_string_4 b/test/common/http/utility_corpus/parse_authority_string_4 new file mode 100644 index 0000000000000..b4257b2544d57 --- /dev/null +++ b/test/common/http/utility_corpus/parse_authority_string_4 @@ -0,0 +1 @@ +parse_authority_string: "0.0.0.0:4000" diff --git a/test/common/http/utility_corpus/pare_cookie_value_5 b/test/common/http/utility_corpus/parse_cookie_value_5 similarity index 100% rename from test/common/http/utility_corpus/pare_cookie_value_5 rename to test/common/http/utility_corpus/parse_cookie_value_5 diff --git a/test/common/http/utility_corpus/percent_decoding_string_0 b/test/common/http/utility_corpus/percent_decoding_string_0 index af1fb389f2aa3..0229e0d87f6d1 100644 --- a/test/common/http/utility_corpus/percent_decoding_string_0 +++ b/test/common/http/utility_corpus/percent_decoding_string_0 @@ -1 +1 @@ -"too%20lar%20" +percent_decoding_string: "too%20lar%20" diff --git a/test/common/http/utility_corpus/percent_decoding_string_1 b/test/common/http/utility_corpus/percent_decoding_string_1 index 49b96d308acd8..e1f4fe3f99b4b 100644 --- a/test/common/http/utility_corpus/percent_decoding_string_1 +++ b/test/common/http/utility_corpus/percent_decoding_string_1 @@ -1 +1 @@ -"too%20larg%e" +percent_decoding_string: "too%20larg%e" diff --git a/test/common/http/utility_corpus/percent_decoding_string_2 b/test/common/http/utility_corpus/percent_decoding_string_2 index 77f241c09555d..34e7e53257ee5 100644 --- a/test/common/http/utility_corpus/percent_decoding_string_2 +++ b/test/common/http/utility_corpus/percent_decoding_string_2 @@ -1 +1 @@ -"too%20large%" +percent_decoding_string: "too%20large%" diff --git a/test/common/http/utility_corpus/valid b/test/common/http/utility_corpus/valid index f47f99e15a995..1ea7275bf45e8 100644 --- a/test/common/http/utility_corpus/valid +++ b/test/common/http/utility_corpus/valid @@ -1,2 +1 @@ -find_query_string: "\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\7\177\177\17 -U²@/177\177N¿77\177" \ No newline at end of file +find_query_string: "\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\7\177\177\17U²@/177\177N¿77\177" diff --git a/test/common/http/utility_fuzz.proto b/test/common/http/utility_fuzz.proto index 940be6f4e0f33..50bb1a3c911bf 100644 --- a/test/common/http/utility_fuzz.proto +++ b/test/common/http/utility_fuzz.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package test.common.http; import "validate/validate.proto"; +import "envoy/config/core/v3/protocol.proto"; // Structured input for utility_fuzz_test. @@ -43,5 +44,7 @@ message UtilityTestCase { string find_query_string = 9 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE, strict: false}]; CookieValue make_set_cookie_value = 10; + string parse_authority_string = 11; + envoy.config.core.v3.Http2ProtocolOptions initialize_and_validate = 12; } } diff --git a/test/common/http/utility_fuzz_test.cc b/test/common/http/utility_fuzz_test.cc index 54d5ce8bfa1bd..2b665893f50f3 100644 --- a/test/common/http/utility_fuzz_test.cc +++ b/test/common/http/utility_fuzz_test.cc @@ -10,29 +10,36 @@ namespace Fuzz { namespace { DEFINE_PROTO_FUZZER(const test::common::http::UtilityTestCase& input) { + try { + TestUtility::validate(input); + } catch (ProtoValidationException& e) { + ENVOY_LOG_MISC(debug, "ProtoValidationException: {}", e.what()); + return; + } switch (input.utility_selector_case()) { case test::common::http::UtilityTestCase::kParseQueryString: { + // TODO(dio): Add the case when using parseAndDecodeQueryString(). Http::Utility::parseQueryString(input.parse_query_string()); break; } case test::common::http::UtilityTestCase::kParseCookieValue: { const auto& parse_cookie_value = input.parse_cookie_value(); - // Use the production HeaderMapImpl to avoid timeouts from TestHeaderMapImpl asserts. - Http::HeaderMapImpl headers; + // Use the production RequestHeaderMapImpl to avoid timeouts from TestHeaderMapImpl asserts. + auto headers = Http::RequestHeaderMapImpl::create(); for (const std::string& cookie : parse_cookie_value.cookies()) { - headers.addCopy(Http::LowerCaseString("cookie"), replaceInvalidCharacters(cookie)); + headers->addCopy(Http::LowerCaseString("cookie"), replaceInvalidCharacters(cookie)); } - Http::Utility::parseCookieValue(headers, parse_cookie_value.key()); + Http::Utility::parseCookieValue(*headers, parse_cookie_value.key()); break; } case test::common::http::UtilityTestCase::kGetLastAddressFromXff: { const auto& get_last_address_from_xff = input.get_last_address_from_xff(); - // Use the production HeaderMapImpl to avoid timeouts from TestHeaderMapImpl asserts. - Http::RequestHeaderMapImpl headers; - headers.addCopy(Http::LowerCaseString("x-forwarded-for"), - replaceInvalidCharacters(get_last_address_from_xff.xff())); + // Use the production RequestHeaderMapImpl to avoid timeouts from TestHeaderMapImpl asserts. + auto headers = Http::RequestHeaderMapImpl::create(); + headers->addCopy(Http::LowerCaseString("x-forwarded-for"), + replaceInvalidCharacters(get_last_address_from_xff.xff())); // Take num_to_skip modulo 32 to avoid wasting time in lala land. - Http::Utility::getLastAddressFromXFF(headers, get_last_address_from_xff.num_to_skip() % 32); + Http::Utility::getLastAddressFromXFF(*headers, get_last_address_from_xff.num_to_skip() % 32); break; } case test::common::http::UtilityTestCase::kExtractHostPathFromUri: { @@ -51,7 +58,9 @@ DEFINE_PROTO_FUZZER(const test::common::http::UtilityTestCase& input) { } case test::common::http::UtilityTestCase::kParseParameters: { const auto& parse_parameters = input.parse_parameters(); - Http::Utility::parseParameters(parse_parameters.data(), parse_parameters.start()); + // TODO(dio): Add a case when doing parse_parameters with decode_params flag true. + Http::Utility::parseParameters(parse_parameters.data(), parse_parameters.start(), + /*decode_params*/ false); break; } case test::common::http::UtilityTestCase::kFindQueryString: { @@ -66,6 +75,37 @@ DEFINE_PROTO_FUZZER(const test::common::http::UtilityTestCase& input) { max_age, cookie_value.httponly()); break; } + case test::common::http::UtilityTestCase::kParseAuthorityString: { + const auto& authority_string = input.parse_authority_string(); + Http::Utility::parseAuthority(authority_string); + break; + } + case test::common::http::UtilityTestCase::kInitializeAndValidate: { + const auto& options = input.initialize_and_validate(); + try { + Http2::Utility::initializeAndValidateOptions(options); + } catch (EnvoyException& e) { + absl::string_view msg = e.what(); + // initializeAndValidateOptions throws exceptions for 4 different reasons due to malformed + // settings, so check for them and allow any other exceptions through + if (absl::StartsWith( + msg, "server push is not supported by Envoy and can not be enabled via a SETTINGS " + "parameter.") || + absl::StartsWith( + msg, "the \"allow_connect\" SETTINGS parameter must only be configured through the " + "named field") || + absl::StartsWith( + msg, "inconsistent HTTP/2 custom SETTINGS parameter(s) detected; identifiers =") || + absl::EndsWith( + msg, "HTTP/2 SETTINGS parameter(s) can not be configured through both named and " + "custom parameters")) { + ENVOY_LOG_MISC(trace, "Caught exception {} in initializeAndValidateOptions test", e.what()); + } else { + throw EnvoyException(e.what()); + } + } + break; + } default: // Nothing to do. diff --git a/test/common/http/utility_test.cc b/test/common/http/utility_test.cc index 20d7e97305a50..687c5255ee366 100644 --- a/test/common/http/utility_test.cc +++ b/test/common/http/utility_test.cc @@ -9,11 +9,13 @@ #include "common/common/fmt.h" #include "common/http/exception.h" #include "common/http/header_map_impl.h" +#include "common/http/url_utility.h" #include "common/http/utility.h" #include "common/network/address_impl.h" #include "test/mocks/http/mocks.h" #include "test/test_common/printers.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" @@ -28,16 +30,65 @@ namespace Http { TEST(HttpUtility, parseQueryString) { EXPECT_EQ(Utility::QueryParams(), Utility::parseQueryString("/hello")); + EXPECT_EQ(Utility::QueryParams(), Utility::parseAndDecodeQueryString("/hello")); + EXPECT_EQ(Utility::QueryParams(), Utility::parseQueryString("/hello?")); + EXPECT_EQ(Utility::QueryParams(), Utility::parseAndDecodeQueryString("/hello?")); + EXPECT_EQ(Utility::QueryParams({{"hello", ""}}), Utility::parseQueryString("/hello?hello")); + EXPECT_EQ(Utility::QueryParams({{"hello", ""}}), + Utility::parseAndDecodeQueryString("/hello?hello")); + EXPECT_EQ(Utility::QueryParams({{"hello", "world"}}), Utility::parseQueryString("/hello?hello=world")); + EXPECT_EQ(Utility::QueryParams({{"hello", "world"}}), + Utility::parseAndDecodeQueryString("/hello?hello=world")); + EXPECT_EQ(Utility::QueryParams({{"hello", ""}}), Utility::parseQueryString("/hello?hello=")); + EXPECT_EQ(Utility::QueryParams({{"hello", ""}}), + Utility::parseAndDecodeQueryString("/hello?hello=")); + EXPECT_EQ(Utility::QueryParams({{"hello", ""}}), Utility::parseQueryString("/hello?hello=&")); + EXPECT_EQ(Utility::QueryParams({{"hello", ""}}), + Utility::parseAndDecodeQueryString("/hello?hello=&")); + EXPECT_EQ(Utility::QueryParams({{"hello", ""}, {"hello2", "world2"}}), Utility::parseQueryString("/hello?hello=&hello2=world2")); + EXPECT_EQ(Utility::QueryParams({{"hello", ""}, {"hello2", "world2"}}), + Utility::parseAndDecodeQueryString("/hello?hello=&hello2=world2")); + EXPECT_EQ(Utility::QueryParams({{"name", "admin"}, {"level", "trace"}}), Utility::parseQueryString("/logging?name=admin&level=trace")); + EXPECT_EQ(Utility::QueryParams({{"name", "admin"}, {"level", "trace"}}), + Utility::parseAndDecodeQueryString("/logging?name=admin&level=trace")); + + EXPECT_EQ(Utility::QueryParams({{"param_value_has_encoded_ampersand", "a%26b"}}), + Utility::parseQueryString("/hello?param_value_has_encoded_ampersand=a%26b")); + EXPECT_EQ(Utility::QueryParams({{"param_value_has_encoded_ampersand", "a&b"}}), + Utility::parseAndDecodeQueryString("/hello?param_value_has_encoded_ampersand=a%26b")); + + EXPECT_EQ(Utility::QueryParams({{"params_has_encoded_%26", "a%26b"}, {"ok", "1"}}), + Utility::parseQueryString("/hello?params_has_encoded_%26=a%26b&ok=1")); + EXPECT_EQ(Utility::QueryParams({{"params_has_encoded_&", "a&b"}, {"ok", "1"}}), + Utility::parseAndDecodeQueryString("/hello?params_has_encoded_%26=a%26b&ok=1")); + + // A sample of request path with query strings by Prometheus: + // https://github.com/envoyproxy/envoy/issues/10926#issuecomment-651085261. + EXPECT_EQ( + Utility::QueryParams( + {{"filter", + "%28cluster.upstream_%28rq_total%7Crq_time_sum%7Crq_time_count%7Crq_time_" + "bucket%7Crq_xx%7Crq_complete%7Crq_active%7Ccx_active%29%29%7C%28server.version%29"}}), + Utility::parseQueryString( + "/stats?filter=%28cluster.upstream_%28rq_total%7Crq_time_sum%7Crq_time_count%7Crq_time_" + "bucket%7Crq_xx%7Crq_complete%7Crq_active%7Ccx_active%29%29%7C%28server.version%29")); + EXPECT_EQ( + Utility::QueryParams( + {{"filter", "(cluster.upstream_(rq_total|rq_time_sum|rq_time_count|rq_time_bucket|rq_xx|" + "rq_complete|rq_active|cx_active))|(server.version)"}}), + Utility::parseAndDecodeQueryString( + "/stats?filter=%28cluster.upstream_%28rq_total%7Crq_time_sum%7Crq_time_count%7Crq_time_" + "bucket%7Crq_xx%7Crq_complete%7Crq_active%7Ccx_active%29%29%7C%28server.version%29")); } TEST(HttpUtility, getResponseStatus) { @@ -259,9 +310,10 @@ TEST(HttpUtility, createSslRedirectPath) { namespace { -envoy::config::core::v3::Http2ProtocolOptions parseHttp2OptionsFromV2Yaml(const std::string& yaml) { +envoy::config::core::v3::Http2ProtocolOptions +parseHttp2OptionsFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) { envoy::config::core::v3::Http2ProtocolOptions http2_options; - TestUtility::loadFromYamlAndValidate(yaml, http2_options); + TestUtility::loadFromYamlAndValidate(yaml, http2_options, false, avoid_boosting); return ::Envoy::Http2::Utility::initializeAndValidateOptions(http2_options); } @@ -270,7 +322,7 @@ envoy::config::core::v3::Http2ProtocolOptions parseHttp2OptionsFromV2Yaml(const TEST(HttpUtility, parseHttp2Settings) { { using ::Envoy::Http2::Utility::OptionsLimits; - auto http2_options = parseHttp2OptionsFromV2Yaml("{}"); + auto http2_options = parseHttp2OptionsFromV3Yaml("{}"); EXPECT_EQ(OptionsLimits::DEFAULT_HPACK_TABLE_SIZE, http2_options.hpack_table_size().value()); EXPECT_EQ(OptionsLimits::DEFAULT_MAX_CONCURRENT_STREAMS, http2_options.max_concurrent_streams().value()); @@ -297,7 +349,7 @@ max_concurrent_streams: 2 initial_stream_window_size: 65535 initial_connection_window_size: 65535 )EOF"; - auto http2_options = parseHttp2OptionsFromV2Yaml(yaml); + auto http2_options = parseHttp2OptionsFromV3Yaml(yaml); EXPECT_EQ(1U, http2_options.hpack_table_size().value()); EXPECT_EQ(2U, http2_options.max_concurrent_streams().value()); EXPECT_EQ(65535U, http2_options.initial_stream_window_size().value()); @@ -305,6 +357,70 @@ initial_connection_window_size: 65535 } } +TEST(HttpUtility, ValidateStreamErrors) { + // Both false, the result should be false. + envoy::config::core::v3::Http2ProtocolOptions http2_options; + EXPECT_FALSE(Envoy::Http2::Utility::initializeAndValidateOptions(http2_options) + .override_stream_error_on_invalid_http_message() + .value()); + + // If the new value is not present, the legacy value is respected. + http2_options.set_stream_error_on_invalid_http_messaging(true); + EXPECT_TRUE(Envoy::Http2::Utility::initializeAndValidateOptions(http2_options) + .override_stream_error_on_invalid_http_message() + .value()); + + // If the new value is present, it is used. + http2_options.mutable_override_stream_error_on_invalid_http_message()->set_value(true); + http2_options.set_stream_error_on_invalid_http_messaging(false); + EXPECT_TRUE(Envoy::Http2::Utility::initializeAndValidateOptions(http2_options) + .override_stream_error_on_invalid_http_message() + .value()); + + // Invert values - the new value should still be used. + http2_options.mutable_override_stream_error_on_invalid_http_message()->set_value(false); + http2_options.set_stream_error_on_invalid_http_messaging(true); + EXPECT_FALSE(Envoy::Http2::Utility::initializeAndValidateOptions(http2_options) + .override_stream_error_on_invalid_http_message() + .value()); +} + +TEST(HttpUtility, ValidateStreamErrorsWithHcm) { + envoy::config::core::v3::Http2ProtocolOptions http2_options; + http2_options.set_stream_error_on_invalid_http_messaging(true); + EXPECT_TRUE(Envoy::Http2::Utility::initializeAndValidateOptions(http2_options) + .override_stream_error_on_invalid_http_message() + .value()); + + // If the HCM value is present it will take precedence over the old value. + Protobuf::BoolValue hcm_value; + hcm_value.set_value(false); + EXPECT_FALSE(Envoy::Http2::Utility::initializeAndValidateOptions(http2_options, true, hcm_value) + .override_stream_error_on_invalid_http_message() + .value()); + // The HCM value will be ignored if initializeAndValidateOptions is told it is not present. + EXPECT_TRUE(Envoy::Http2::Utility::initializeAndValidateOptions(http2_options, false, hcm_value) + .override_stream_error_on_invalid_http_message() + .value()); + + // The override_stream_error_on_invalid_http_message takes precedence over the + // global one. + http2_options.mutable_override_stream_error_on_invalid_http_message()->set_value(true); + EXPECT_TRUE(Envoy::Http2::Utility::initializeAndValidateOptions(http2_options, true, hcm_value) + .override_stream_error_on_invalid_http_message() + .value()); + + { + // With runtime flipped, override is ignored. + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.hcm_stream_error_on_invalid_message", "false"}}); + EXPECT_TRUE(Envoy::Http2::Utility::initializeAndValidateOptions(http2_options, true, hcm_value) + .override_stream_error_on_invalid_http_message() + .value()); + } +} + TEST(HttpUtility, getLastAddressFromXFF) { { const std::string first_address = "192.0.2.10"; @@ -398,7 +514,7 @@ TEST(HttpUtility, getLastAddressFromXFF) { } TEST(HttpUtility, TestParseCookie) { - TestHeaderMapImpl headers{ + TestRequestHeaderMapImpl headers{ {"someheader", "10.0.0.1"}, {"cookie", "somekey=somevalue; someotherkey=someothervalue"}, {"cookie", "abc=def; token=abc123; Expires=Wed, 09 Jun 2021 10:18:14 GMT"}, @@ -410,10 +526,10 @@ TEST(HttpUtility, TestParseCookie) { } TEST(HttpUtility, TestParseCookieBadValues) { - TestHeaderMapImpl headers{{"cookie", "token1=abc123; = "}, - {"cookie", "token2=abc123; "}, - {"cookie", "; token3=abc123;"}, - {"cookie", "=; token4=\"abc123\""}}; + TestRequestHeaderMapImpl headers{{"cookie", "token1=abc123; = "}, + {"cookie", "token2=abc123; "}, + {"cookie", "; token3=abc123;"}, + {"cookie", "=; token4=\"abc123\""}}; EXPECT_EQ(Utility::parseCookieValue(headers, "token1"), "abc123"); EXPECT_EQ(Utility::parseCookieValue(headers, "token2"), "abc123"); @@ -422,7 +538,7 @@ TEST(HttpUtility, TestParseCookieBadValues) { } TEST(HttpUtility, TestParseCookieWithQuotes) { - TestHeaderMapImpl headers{ + TestRequestHeaderMapImpl headers{ {"someheader", "10.0.0.1"}, {"cookie", "dquote=\"; quoteddquote=\"\"\""}, {"cookie", "leadingdquote=\"foobar;"}, @@ -460,8 +576,9 @@ TEST(HttpUtility, SendLocalReply) { EXPECT_CALL(callbacks, encodeHeaders_(_, false)); EXPECT_CALL(callbacks, encodeData(_, true)); - Utility::sendLocalReply(false, callbacks, is_reset, Http::Code::PayloadTooLarge, "large", - absl::nullopt, false); + Utility::sendLocalReply( + is_reset, callbacks, + Utility::LocalReplyData{false, Http::Code::PayloadTooLarge, "large", absl::nullopt, false}); } TEST(HttpUtility, SendLocalGrpcReply) { @@ -470,15 +587,16 @@ TEST(HttpUtility, SendLocalGrpcReply) { EXPECT_CALL(callbacks, encodeHeaders_(_, true)) .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ(headers.Status()->value().getStringView(), "200"); + EXPECT_EQ(headers.getStatusValue(), "200"); EXPECT_NE(headers.GrpcStatus(), nullptr); - EXPECT_EQ(headers.GrpcStatus()->value().getStringView(), + EXPECT_EQ(headers.getGrpcStatusValue(), std::to_string(enumToInt(Grpc::Status::WellKnownGrpcStatus::Unknown))); EXPECT_NE(headers.GrpcMessage(), nullptr); - EXPECT_EQ(headers.GrpcMessage()->value().getStringView(), "large"); + EXPECT_EQ(headers.getGrpcMessageValue(), "large"); })); - Utility::sendLocalReply(true, callbacks, is_reset, Http::Code::PayloadTooLarge, "large", - absl::nullopt, false); + Utility::sendLocalReply( + is_reset, callbacks, + Utility::LocalReplyData{true, Http::Code::PayloadTooLarge, "large", absl::nullopt, false}); } TEST(HttpUtility, SendLocalGrpcReplyWithUpstreamJsonPayload) { @@ -496,16 +614,17 @@ TEST(HttpUtility, SendLocalGrpcReplyWithUpstreamJsonPayload) { EXPECT_CALL(callbacks, encodeHeaders_(_, true)) .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ(headers.Status()->value().getStringView(), "200"); + EXPECT_EQ(headers.getStatusValue(), "200"); EXPECT_NE(headers.GrpcStatus(), nullptr); - EXPECT_EQ(headers.GrpcStatus()->value().getStringView(), + EXPECT_EQ(headers.getGrpcStatusValue(), std::to_string(enumToInt(Grpc::Status::WellKnownGrpcStatus::Unauthenticated))); EXPECT_NE(headers.GrpcMessage(), nullptr); const auto& encoded = Utility::PercentEncoding::encode(json); - EXPECT_EQ(headers.GrpcMessage()->value().getStringView(), encoded); + EXPECT_EQ(headers.getGrpcMessageValue(), encoded); })); - Utility::sendLocalReply(true, callbacks, is_reset, Http::Code::Unauthorized, json, absl::nullopt, - false); + Utility::sendLocalReply( + is_reset, callbacks, + Utility::LocalReplyData{true, Http::Code::Unauthorized, json, absl::nullopt, false}); } TEST(HttpUtility, RateLimitedGrpcStatus) { @@ -514,22 +633,25 @@ TEST(HttpUtility, RateLimitedGrpcStatus) { EXPECT_CALL(callbacks, encodeHeaders_(_, true)) .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void { EXPECT_NE(headers.GrpcStatus(), nullptr); - EXPECT_EQ(headers.GrpcStatus()->value().getStringView(), + EXPECT_EQ(headers.getGrpcStatusValue(), std::to_string(enumToInt(Grpc::Status::WellKnownGrpcStatus::Unavailable))); })); - Utility::sendLocalReply(true, callbacks, false, Http::Code::TooManyRequests, "", absl::nullopt, - false); + Utility::sendLocalReply( + false, callbacks, + Utility::LocalReplyData{true, Http::Code::TooManyRequests, "", absl::nullopt, false}); EXPECT_CALL(callbacks, encodeHeaders_(_, true)) .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void { EXPECT_NE(headers.GrpcStatus(), nullptr); - EXPECT_EQ(headers.GrpcStatus()->value().getStringView(), + EXPECT_EQ(headers.getGrpcStatusValue(), std::to_string(enumToInt(Grpc::Status::WellKnownGrpcStatus::ResourceExhausted))); })); - Utility::sendLocalReply(true, callbacks, false, Http::Code::TooManyRequests, "", - absl::make_optional( - Grpc::Status::WellKnownGrpcStatus::ResourceExhausted), - false); + Utility::sendLocalReply( + false, callbacks, + Utility::LocalReplyData{true, Http::Code::TooManyRequests, "", + absl::make_optional( + Grpc::Status::WellKnownGrpcStatus::ResourceExhausted), + false}); } TEST(HttpUtility, SendLocalReplyDestroyedEarly) { @@ -540,8 +662,9 @@ TEST(HttpUtility, SendLocalReplyDestroyedEarly) { is_reset = true; })); EXPECT_CALL(callbacks, encodeData(_, true)).Times(0); - Utility::sendLocalReply(false, callbacks, is_reset, Http::Code::PayloadTooLarge, "large", - absl::nullopt, false); + Utility::sendLocalReply( + is_reset, callbacks, + Utility::LocalReplyData{false, Http::Code::PayloadTooLarge, "large", absl::nullopt, false}); } TEST(HttpUtility, SendLocalReplyHeadRequest) { @@ -549,11 +672,11 @@ TEST(HttpUtility, SendLocalReplyHeadRequest) { bool is_reset = false; EXPECT_CALL(callbacks, encodeHeaders_(_, true)) .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ(headers.ContentLength()->value().getStringView(), - fmt::format("{}", strlen("large"))); + EXPECT_EQ(headers.getContentLengthValue(), fmt::format("{}", strlen("large"))); })); - Utility::sendLocalReply(false, callbacks, is_reset, Http::Code::PayloadTooLarge, "large", - absl::nullopt, true); + Utility::sendLocalReply( + is_reset, callbacks, + Utility::LocalReplyData{false, Http::Code::PayloadTooLarge, "large", absl::nullopt, true}); } TEST(HttpUtility, TestExtractHostPathFromUri) { @@ -599,8 +722,8 @@ TEST(HttpUtility, TestPrepareHeaders) { Http::RequestMessagePtr message = Utility::prepareHeaders(http_uri); - EXPECT_EQ("/x/y/z", message->headers().Path()->value().getStringView()); - EXPECT_EQ("dns.name", message->headers().Host()->value().getStringView()); + EXPECT_EQ("/x/y/z", message->headers().getPathValue()); + EXPECT_EQ("dns.name", message->headers().getHostValue()); } TEST(HttpUtility, QueryParamsToString) { @@ -658,10 +781,9 @@ TEST(HttpUtility, ResolveMostSpecificPerFilterConfigGeneric) { const std::string filter_name = "envoy.filter"; NiceMock filter_callbacks; - const Router::RouteSpecificFilterConfig* nullconfig = nullptr; - const Router::RouteSpecificFilterConfig* one = nullconfig + 1; - const Router::RouteSpecificFilterConfig* two = nullconfig + 2; - const Router::RouteSpecificFilterConfig* three = nullconfig + 3; + const Router::RouteSpecificFilterConfig one; + const Router::RouteSpecificFilterConfig two; + const Router::RouteSpecificFilterConfig three; // Test when there's nothing on the route EXPECT_EQ(nullptr, Utility::resolveMostSpecificPerFilterConfigGeneric(filter_name, @@ -669,23 +791,23 @@ TEST(HttpUtility, ResolveMostSpecificPerFilterConfigGeneric) { // Testing in reverse order, so that the method always returns the last object. ON_CALL(filter_callbacks.route_->route_entry_.virtual_host_, perFilterConfig(filter_name)) - .WillByDefault(Return(one)); - EXPECT_EQ(one, Utility::resolveMostSpecificPerFilterConfigGeneric(filter_name, - filter_callbacks.route())); + .WillByDefault(Return(&one)); + EXPECT_EQ(&one, Utility::resolveMostSpecificPerFilterConfigGeneric(filter_name, + filter_callbacks.route())); - ON_CALL(*filter_callbacks.route_, perFilterConfig(filter_name)).WillByDefault(Return(two)); - EXPECT_EQ(two, Utility::resolveMostSpecificPerFilterConfigGeneric(filter_name, - filter_callbacks.route())); + ON_CALL(*filter_callbacks.route_, perFilterConfig(filter_name)).WillByDefault(Return(&two)); + EXPECT_EQ(&two, Utility::resolveMostSpecificPerFilterConfigGeneric(filter_name, + filter_callbacks.route())); ON_CALL(filter_callbacks.route_->route_entry_, perFilterConfig(filter_name)) - .WillByDefault(Return(three)); - EXPECT_EQ(three, Utility::resolveMostSpecificPerFilterConfigGeneric(filter_name, - filter_callbacks.route())); + .WillByDefault(Return(&three)); + EXPECT_EQ(&three, Utility::resolveMostSpecificPerFilterConfigGeneric(filter_name, + filter_callbacks.route())); // Cover the case of no route entry ON_CALL(*filter_callbacks.route_, routeEntry()).WillByDefault(Return(nullptr)); - EXPECT_EQ(two, Utility::resolveMostSpecificPerFilterConfigGeneric(filter_name, - filter_callbacks.route())); + EXPECT_EQ(&two, Utility::resolveMostSpecificPerFilterConfigGeneric(filter_name, + filter_callbacks.route())); } // Verify that traversePerFilterConfigGeneric traverses in the order of specificity. @@ -695,16 +817,16 @@ TEST(HttpUtility, TraversePerFilterConfigIteratesInOrder) { // Create configs to test; to ease of testing instead of using real objects // we will use pointers that are actually indexes. - const Router::RouteSpecificFilterConfig* nullconfig = nullptr; + const std::vector nullconfigs(5); size_t num_configs = 1; ON_CALL(filter_callbacks.route_->route_entry_.virtual_host_, perFilterConfig(filter_name)) - .WillByDefault(Return(nullconfig + num_configs)); + .WillByDefault(Return(&nullconfigs[num_configs])); num_configs++; ON_CALL(*filter_callbacks.route_, perFilterConfig(filter_name)) - .WillByDefault(Return(nullconfig + num_configs)); + .WillByDefault(Return(&nullconfigs[num_configs])); num_configs++; ON_CALL(filter_callbacks.route_->route_entry_, perFilterConfig(filter_name)) - .WillByDefault(Return(nullconfig + num_configs)); + .WillByDefault(Return(&nullconfigs[num_configs])); // a vector to save which configs are visited by the traversePerFilterConfigGeneric std::vector visited_configs(num_configs, 0); @@ -713,7 +835,7 @@ TEST(HttpUtility, TraversePerFilterConfigIteratesInOrder) { size_t index = 0; Utility::traversePerFilterConfigGeneric(filter_name, filter_callbacks.route(), [&](const Router::RouteSpecificFilterConfig& cfg) { - int cfg_index = &cfg - nullconfig; + int cfg_index = &cfg - nullconfigs.data(); visited_configs[index] = cfg_index - 1; index++; }); @@ -820,7 +942,7 @@ TEST(HttpUtility, TestTeHeaderGzipTrailersSanitized) { // Expect that the set of headers is valid and can be sanitized EXPECT_TRUE(Utility::sanitizeConnectionHeader(request_headers)); - Http::TestHeaderMapImpl sanitized_headers = { + Http::TestRequestHeaderMapImpl sanitized_headers = { {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, @@ -848,7 +970,7 @@ TEST(HttpUtility, TestNominatedConnectionHeader) { }; EXPECT_TRUE(Utility::sanitizeConnectionHeader(request_headers)); - TestHeaderMapImpl sanitized_headers = { + TestRequestHeaderMapImpl sanitized_headers = { {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, @@ -876,7 +998,7 @@ TEST(HttpUtility, TestNominatedConnectionHeader2) { }; EXPECT_TRUE(Utility::sanitizeConnectionHeader(request_headers)); - Http::TestHeaderMapImpl sanitized_headers = { + Http::TestRequestHeaderMapImpl sanitized_headers = { {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, @@ -903,7 +1025,7 @@ TEST(HttpUtility, TestNominatedPseudoHeader) { }; // Headers remain unchanged since there are nominated pseudo headers - Http::TestHeaderMapImpl sanitized_headers(request_headers); + Http::TestRequestHeaderMapImpl sanitized_headers(request_headers); EXPECT_FALSE(Utility::sanitizeConnectionHeader(request_headers)); EXPECT_EQ(sanitized_headers, request_headers); @@ -925,7 +1047,7 @@ TEST(HttpUtility, TestSanitizeEmptyTokensFromHeaders) { }; EXPECT_TRUE(Utility::sanitizeConnectionHeader(request_headers)); - Http::TestHeaderMapImpl sanitized_headers = { + Http::TestRequestHeaderMapImpl sanitized_headers = { {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, @@ -952,7 +1074,7 @@ TEST(HttpUtility, TestTooManyNominatedHeaders) { }; // Headers remain unchanged because there are too many nominated headers - Http::TestHeaderMapImpl sanitized_headers(request_headers); + Http::TestRequestHeaderMapImpl sanitized_headers(request_headers); EXPECT_FALSE(Utility::sanitizeConnectionHeader(request_headers)); EXPECT_EQ(sanitized_headers, request_headers); @@ -970,7 +1092,7 @@ TEST(HttpUtility, TestRejectNominatedXForwardedFor) { }; // Headers remain unchanged due to nominated X-Forwarded* header - Http::TestHeaderMapImpl sanitized_headers(request_headers); + Http::TestRequestHeaderMapImpl sanitized_headers(request_headers); EXPECT_FALSE(Utility::sanitizeConnectionHeader(request_headers)); EXPECT_EQ(sanitized_headers, request_headers); @@ -988,7 +1110,7 @@ TEST(HttpUtility, TestRejectNominatedXForwardedHost) { }; // Headers remain unchanged due to nominated X-Forwarded* header - Http::TestHeaderMapImpl sanitized_headers(request_headers); + Http::TestRequestHeaderMapImpl sanitized_headers(request_headers); EXPECT_FALSE(Utility::sanitizeConnectionHeader(request_headers)); EXPECT_EQ(sanitized_headers, request_headers); @@ -1008,7 +1130,7 @@ TEST(HttpUtility, TestRejectNominatedXForwardedProto) { // Headers are not sanitized due to nominated X-Forwarded* header EXPECT_FALSE(Utility::sanitizeConnectionHeader(request_headers)); - Http::TestHeaderMapImpl sanitized_headers = { + Http::TestRequestHeaderMapImpl sanitized_headers = { {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, @@ -1032,7 +1154,7 @@ TEST(HttpUtility, TestRejectTrailersSubString) { }; EXPECT_TRUE(Utility::sanitizeConnectionHeader(request_headers)); - Http::TestHeaderMapImpl sanitized_headers = { + Http::TestRequestHeaderMapImpl sanitized_headers = { {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, @@ -1070,7 +1192,7 @@ TEST(HttpUtility, TestRejectTeHeaderTooLong) { }; // Headers remain unchanged because the TE value is too long - Http::TestHeaderMapImpl sanitized_headers(request_headers); + Http::TestRequestHeaderMapImpl sanitized_headers(request_headers); EXPECT_FALSE(Utility::sanitizeConnectionHeader(request_headers)); EXPECT_EQ(sanitized_headers, request_headers); @@ -1078,87 +1200,110 @@ TEST(HttpUtility, TestRejectTeHeaderTooLong) { TEST(Url, ParsingFails) { Utility::Url url; - EXPECT_FALSE(url.initialize("", false)); - EXPECT_FALSE(url.initialize("foo", false)); - EXPECT_FALSE(url.initialize("http://", false)); - EXPECT_FALSE(url.initialize("random_scheme://host.com/path", false)); - EXPECT_FALSE(url.initialize("http://www.foo.com", true)); - EXPECT_FALSE(url.initialize("foo.com", true)); + const bool is_connect = true; + EXPECT_FALSE(url.initialize("", !is_connect)); + EXPECT_FALSE(url.initialize("foo", !is_connect)); + EXPECT_FALSE(url.initialize("http://", !is_connect)); + EXPECT_FALSE(url.initialize("random_scheme://host.com/path", !is_connect)); + // Only port value in valid range (1-65535) is allowed. + EXPECT_FALSE(url.initialize("http://host.com:65536/path", !is_connect)); + EXPECT_FALSE(url.initialize("http://host.com:0/path", !is_connect)); + EXPECT_FALSE(url.initialize("http://host.com:-1/path", !is_connect)); + EXPECT_FALSE(url.initialize("http://host.com:port/path", !is_connect)); + + // Test parsing fails for CONNECT request URLs. + EXPECT_FALSE(url.initialize("http://www.foo.com", is_connect)); + EXPECT_FALSE(url.initialize("foo.com", is_connect)); + // Only port value in valid range (1-65535) is allowed. + EXPECT_FALSE(url.initialize("foo.com:65536", is_connect)); + EXPECT_FALSE(url.initialize("foo.com:0", is_connect)); + EXPECT_FALSE(url.initialize("foo.com:-1", is_connect)); + EXPECT_FALSE(url.initialize("foo.com:port", is_connect)); } void validateUrl(absl::string_view raw_url, absl::string_view expected_scheme, - absl::string_view expected_host_port, absl::string_view expected_path) { + absl::string_view expected_host_port, absl::string_view expected_path, + uint16_t expected_port) { Utility::Url url; - ASSERT_TRUE(url.initialize(raw_url, false)) << "Failed to initialize " << raw_url; + ASSERT_TRUE(url.initialize(raw_url, /*is_connect=*/false)) << "Failed to initialize " << raw_url; EXPECT_EQ(url.scheme(), expected_scheme); EXPECT_EQ(url.hostAndPort(), expected_host_port); EXPECT_EQ(url.pathAndQueryParams(), expected_path); -} - -void validateConnectUrl(absl::string_view raw_url, absl::string_view expected_host_port) { - Utility::Url url; - ASSERT_TRUE(url.initialize(raw_url, true)) << "Failed to initialize " << raw_url; - EXPECT_TRUE(url.scheme().empty()); - EXPECT_TRUE(url.pathAndQueryParams().empty()); - EXPECT_EQ(url.hostAndPort(), expected_host_port); + EXPECT_EQ(url.port(), expected_port); } TEST(Url, ParsingTest) { - // Test url with no explicit path (with and without port) - validateUrl("http://www.host.com", "http", "www.host.com", "/"); - validateUrl("http://www.host.com:80", "http", "www.host.com:80", "/"); + // Test url with no explicit path (with and without port). + validateUrl("http://www.host.com", "http", "www.host.com", "/", 80); + validateUrl("http://www.host.com:80", "http", "www.host.com", "/", 80); // Test url with "/" path. - validateUrl("http://www.host.com:80/", "http", "www.host.com:80", "/"); - validateUrl("http://www.host.com/", "http", "www.host.com", "/"); + validateUrl("http://www.host.com:80/", "http", "www.host.com", "/", 80); + validateUrl("http://www.host.com/", "http", "www.host.com", "/", 80); // Test url with "?". - validateUrl("http://www.host.com:80/?", "http", "www.host.com:80", "/?"); - validateUrl("http://www.host.com/?", "http", "www.host.com", "/?"); + validateUrl("http://www.host.com:80/?", "http", "www.host.com", "/?", 80); + validateUrl("http://www.host.com/?", "http", "www.host.com", "/?", 80); // Test url with "?" but without slash. - validateUrl("http://www.host.com:80?", "http", "www.host.com:80", "?"); - validateUrl("http://www.host.com?", "http", "www.host.com", "?"); + validateUrl("http://www.host.com:80?", "http", "www.host.com", "/?", 80); + validateUrl("http://www.host.com?", "http", "www.host.com", "/?", 80); - // Test url with multi-character path - validateUrl("http://www.host.com:80/path", "http", "www.host.com:80", "/path"); - validateUrl("http://www.host.com/path", "http", "www.host.com", "/path"); + // Test url with multi-character path. + validateUrl("http://www.host.com:80/path", "http", "www.host.com", "/path", 80); + validateUrl("http://www.host.com/path", "http", "www.host.com", "/path", 80); - // Test url with multi-character path and ? at the end - validateUrl("http://www.host.com:80/path?", "http", "www.host.com:80", "/path?"); - validateUrl("http://www.host.com/path?", "http", "www.host.com", "/path?"); + // Test url with multi-character path and ? at the end. + validateUrl("http://www.host.com:80/path?", "http", "www.host.com", "/path?", 80); + validateUrl("http://www.host.com/path?", "http", "www.host.com", "/path?", 80); - // Test https scheme - validateUrl("https://www.host.com", "https", "www.host.com", "/"); + // Test https scheme. + validateUrl("https://www.host.com", "https", "www.host.com", "/", 443); - // Test url with query parameter - validateUrl("http://www.host.com:80/?query=param", "http", "www.host.com:80", "/?query=param"); - validateUrl("http://www.host.com/?query=param", "http", "www.host.com", "/?query=param"); + // Test url with query parameter. + validateUrl("http://www.host.com:80/?query=param", "http", "www.host.com", "/?query=param", 80); + validateUrl("http://www.host.com/?query=param", "http", "www.host.com", "/?query=param", 80); - // Test url with query parameter but without slash - validateUrl("http://www.host.com:80?query=param", "http", "www.host.com:80", "?query=param"); - validateUrl("http://www.host.com?query=param", "http", "www.host.com", "?query=param"); + // Test url with query parameter but without slash. It will be normalized. + validateUrl("http://www.host.com:80?query=param", "http", "www.host.com", "/?query=param", 80); + validateUrl("http://www.host.com?query=param", "http", "www.host.com", "/?query=param", 80); - // Test url with multi-character path and query parameter - validateUrl("http://www.host.com:80/path?query=param", "http", "www.host.com:80", - "/path?query=param"); - validateUrl("http://www.host.com/path?query=param", "http", "www.host.com", "/path?query=param"); + // Test url with multi-character path and query parameter. + validateUrl("http://www.host.com:80/path?query=param", "http", "www.host.com", + "/path?query=param", 80); + validateUrl("http://www.host.com/path?query=param", "http", "www.host.com", "/path?query=param", + 80); - // Test url with multi-character path and more than one query parameter - validateUrl("http://www.host.com:80/path?query=param&query2=param2", "http", "www.host.com:80", - "/path?query=param&query2=param2"); + // Test url with multi-character path and more than one query parameter. + validateUrl("http://www.host.com:80/path?query=param&query2=param2", "http", "www.host.com", + "/path?query=param&query2=param2", 80); validateUrl("http://www.host.com/path?query=param&query2=param2", "http", "www.host.com", - "/path?query=param&query2=param2"); + "/path?query=param&query2=param2", 80); + // Test url with multi-character path, more than one query parameter and fragment validateUrl("http://www.host.com:80/path?query=param&query2=param2#fragment", "http", - "www.host.com:80", "/path?query=param&query2=param2#fragment"); + "www.host.com", "/path?query=param&query2=param2#fragment", 80); validateUrl("http://www.host.com/path?query=param&query2=param2#fragment", "http", "www.host.com", - "/path?query=param&query2=param2#fragment"); + "/path?query=param&query2=param2#fragment", 80); + + // Test url with non-default ports. + validateUrl("https://www.host.com:8443", "https", "www.host.com:8443", "/", 8443); + validateUrl("http://www.host.com:8080", "http", "www.host.com:8080", "/", 8080); +} + +void validateConnectUrl(absl::string_view raw_url, absl::string_view expected_host_port, + uint16_t expected_port) { + Utility::Url url; + ASSERT_TRUE(url.initialize(raw_url, /*is_connect=*/true)) << "Failed to initialize " << raw_url; + EXPECT_TRUE(url.scheme().empty()); + EXPECT_TRUE(url.pathAndQueryParams().empty()); + EXPECT_EQ(url.hostAndPort(), expected_host_port); + EXPECT_EQ(url.port(), expected_port); } TEST(Url, ParsingForConnectTest) { - validateConnectUrl("host.com:443", "host.com:443"); - validateConnectUrl("host.com:80", "host.com:80"); + validateConnectUrl("host.com:443", "host.com:443", 443); + validateConnectUrl("host.com:80", "host.com:80", 80); } void validatePercentEncodingEncodeDecode(absl::string_view source, @@ -1182,11 +1327,29 @@ TEST(PercentEncoding, EncodeDecode) { validatePercentEncodingEncodeDecode("_-ok-_", "_-ok-_"); } -TEST(PercentEncoding, Trailing) { +TEST(PercentEncoding, Decoding) { + EXPECT_EQ(Utility::PercentEncoding::decode("a%26b"), "a&b"); + EXPECT_EQ(Utility::PercentEncoding::decode("hello%20world"), "hello world"); + EXPECT_EQ(Utility::PercentEncoding::decode("upstream%7Cdownstream"), "upstream|downstream"); + EXPECT_EQ( + Utility::PercentEncoding::decode( + "filter=%28cluster.upstream_%28rq_total%7Crq_time_sum%7Crq_time_count%7Crq_time_bucket%" + "7Crq_xx%7Crq_complete%7Crq_active%7Ccx_active%29%29%7C%28server.version%29"), + "filter=(cluster.upstream_(rq_total|rq_time_sum|rq_time_count|rq_time_bucket|rq_xx|rq_" + "complete|rq_active|cx_active))|(server.version)"); +} + +TEST(PercentEncoding, DecodingWithTrailingInput) { EXPECT_EQ(Utility::PercentEncoding::decode("too%20lar%20"), "too lar "); EXPECT_EQ(Utility::PercentEncoding::decode("too%20larg%e"), "too larg%e"); EXPECT_EQ(Utility::PercentEncoding::decode("too%20large%"), "too large%"); } +TEST(PercentEncoding, Encoding) { + EXPECT_EQ(Utility::PercentEncoding::encode("too%large"), "too%25large"); + EXPECT_EQ(Utility::PercentEncoding::encode("too%!large/"), "too%25!large/"); + EXPECT_EQ(Utility::PercentEncoding::encode("too%!large/", "%!/"), "too%25%21large%2F"); +} + } // namespace Http } // namespace Envoy diff --git a/test/common/init/BUILD b/test/common/init/BUILD index 894e7493aa722..e2d1645763260 100644 --- a/test/common/init/BUILD +++ b/test/common/init/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/json/BUILD b/test/common/json/BUILD index a3b170033a51b..803f2abca6af4 100644 --- a/test/common/json/BUILD +++ b/test/common/json/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_fuzz_test", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_fuzz_test( diff --git a/test/common/json/config_schemas_test_data/BUILD b/test/common/json/config_schemas_test_data/BUILD index 603cfa6adcca7..185f28e77ce9f 100644 --- a/test/common/json/config_schemas_test_data/BUILD +++ b/test/common/json/config_schemas_test_data/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", "envoy_py_test_binary", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_py_test_binary( diff --git a/test/common/json/json_loader_test.cc b/test/common/json/json_loader_test.cc index 47c4094f7969c..884caf5b0d0c1 100644 --- a/test/common/json/json_loader_test.cc +++ b/test/common/json/json_loader_test.cc @@ -198,7 +198,7 @@ TEST_F(JsonLoaderTest, Basic) { { ObjectSharedPtr json = Factory::loadFromString("{}"); - EXPECT_THROW(json->getObjectArray("hello").empty(), Exception); + EXPECT_THROW((void)json->getObjectArray("hello").empty(), Exception); } { @@ -246,8 +246,14 @@ TEST_F(JsonLoaderTest, Hash) { ObjectSharedPtr json1 = Factory::loadFromString("{\"value1\": 10.5, \"value2\": -12.3}"); ObjectSharedPtr json2 = Factory::loadFromString("{\"value2\": -12.3, \"value1\": 10.5}"); ObjectSharedPtr json3 = Factory::loadFromString(" { \"value2\": -12.3, \"value1\": 10.5} "); - EXPECT_NE(json1->hash(), json2->hash()); + ObjectSharedPtr json4 = Factory::loadFromString("{\"value1\": 10.5}"); + + // Objects with keys in different orders should be the same + EXPECT_EQ(json1->hash(), json2->hash()); + // Whitespace is ignored EXPECT_EQ(json2->hash(), json3->hash()); + // Ensure different hash is computed for different objects + EXPECT_NE(json1->hash(), json4->hash()); } TEST_F(JsonLoaderTest, Schema) { diff --git a/test/common/local_reply/BUILD b/test/common/local_reply/BUILD new file mode 100644 index 0000000000000..9b5fc8f50ec11 --- /dev/null +++ b/test/common/local_reply/BUILD @@ -0,0 +1,22 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_test( + name = "local_reply_test", + srcs = ["local_reply_test.cc"], + deps = [ + "//source/common/local_reply:local_reply_lib", + "//test/mocks/http:http_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/stream_info:stream_info_mocks", + "//test/test_common:utility_lib", + "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", + ], +) diff --git a/test/common/local_reply/local_reply_test.cc b/test/common/local_reply/local_reply_test.cc new file mode 100644 index 0000000000000..a43519128fc51 --- /dev/null +++ b/test/common/local_reply/local_reply_test.cc @@ -0,0 +1,339 @@ +#include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.h" +#include "envoy/http/codes.h" + +#include "common/local_reply/local_reply.h" + +#include "test/mocks/http/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/test_common/simulated_time_system.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace LocalReply { +namespace { + +const Http::Code TestInitCode = Http::Code::OK; +const std::string TestInitBody = "Init body text"; +const absl::string_view TestInitContentType = "content-type"; +} // namespace + +class LocalReplyTest : public testing::Test { +public: + LocalReplyTest() : stream_info_(time_system_.timeSystem()) { resetData(TestInitCode); } + + void resetData(Http::Code code) { + code_ = code; + body_ = TestInitBody; + content_type_ = TestInitContentType; + } + void resetData(uint32_t code) { resetData(static_cast(code)); } + + Http::Code code_; + std::string body_; + absl::string_view content_type_; + + Http::TestRequestHeaderMapImpl request_headers_{{":method", "GET"}, {":path", "/bar/foo"}}; + Http::TestResponseHeaderMapImpl response_headers_; + Event::SimulatedTimeSystem time_system_; + StreamInfo::StreamInfoImpl stream_info_; + + envoy::extensions::filters::network::http_connection_manager::v3::LocalReplyConfig config_; + NiceMock context_; +}; + +TEST_F(LocalReplyTest, TestEmptyConfig) { + // Empty LocalReply config. + auto local = Factory::create(config_, context_); + + local->rewrite(nullptr, response_headers_, stream_info_, code_, body_, content_type_); + EXPECT_EQ(code_, TestInitCode); + EXPECT_EQ(stream_info_.response_code_, static_cast(TestInitCode)); + EXPECT_EQ(response_headers_.Status()->value().getStringView(), + std::to_string(enumToInt(TestInitCode))); + EXPECT_EQ(body_, TestInitBody); + EXPECT_EQ(content_type_, "text/plain"); +} + +TEST_F(LocalReplyTest, TestDefaultLocalReply) { + // Default LocalReply should be the same as empty config. + auto local = Factory::createDefault(); + + local->rewrite(nullptr, response_headers_, stream_info_, code_, body_, content_type_); + EXPECT_EQ(code_, TestInitCode); + EXPECT_EQ(stream_info_.response_code_, static_cast(TestInitCode)); + EXPECT_EQ(response_headers_.Status()->value().getStringView(), + std::to_string(enumToInt(TestInitCode))); + EXPECT_EQ(body_, TestInitBody); + EXPECT_EQ(content_type_, "text/plain"); +} + +TEST_F(LocalReplyTest, TestInvalidConfigEmptyFilter) { + // Invalid config: a mapper should have a valid filter + const std::string yaml = R"( + mappers: + - status_code: 401 +)"; + TestUtility::loadFromYaml(yaml, config_); + + std::string err; + EXPECT_FALSE(Validate(config_, &err)); +} + +TEST_F(LocalReplyTest, TestInvalidConfigStatusCode) { + // Invalid config: status_code should be at range [200, 600) + const std::string yaml = R"( + mappers: + - filter: + status_code_filter: + comparison: + op: EQ + value: + default_value: 400 + runtime_key: key_b + status_code: 100 +)"; + TestUtility::loadFromYaml(yaml, config_); + + std::string err; + EXPECT_FALSE(Validate(config_, &err)); +} + +TEST_F(LocalReplyTest, TestDefaultTextFormatter) { + // Default text formatter without any mappers + const std::string yaml = R"( + body_format: + text_format: "%LOCAL_REPLY_BODY% %RESPONSE_CODE%" +)"; + TestUtility::loadFromYaml(yaml, config_); + auto local = Factory::create(config_, context_); + + local->rewrite(nullptr, response_headers_, stream_info_, code_, body_, content_type_); + EXPECT_EQ(code_, TestInitCode); + EXPECT_EQ(stream_info_.response_code_, static_cast(TestInitCode)); + EXPECT_EQ(response_headers_.Status()->value().getStringView(), + std::to_string(enumToInt(TestInitCode))); + EXPECT_EQ(body_, "Init body text 200"); + EXPECT_EQ(content_type_, "text/plain"); +} + +TEST_F(LocalReplyTest, TestDefaultJsonFormatter) { + // Default json formatter without any mappers + const std::string yaml = R"( + body_format: + json_format: + text: "plain text" + path: "%REQ(:path)%" + code: "%RESPONSE_CODE%" + body: "%LOCAL_REPLY_BODY%" +)"; + TestUtility::loadFromYaml(yaml, config_); + auto local = Factory::create(config_, context_); + + local->rewrite(&request_headers_, response_headers_, stream_info_, code_, body_, content_type_); + EXPECT_EQ(code_, TestInitCode); + EXPECT_EQ(stream_info_.response_code_, static_cast(TestInitCode)); + EXPECT_EQ(response_headers_.Status()->value().getStringView(), + std::to_string(enumToInt(TestInitCode))); + EXPECT_EQ(content_type_, "application/json"); + + const std::string expected = R"({ + "text": "plain text", + "path": "/bar/foo", + "code": 200, + "body": "Init body text" +})"; + EXPECT_TRUE(TestUtility::jsonStringEqual(body_, expected)); +} + +TEST_F(LocalReplyTest, TestMapperRewrite) { + // Match with response_code, and rewrite the code and body. + const std::string yaml = R"( + mappers: + - filter: + status_code_filter: + comparison: + op: EQ + value: + default_value: 400 + runtime_key: key_b + status_code: 401 + body: + inline_string: "400 body text" + - filter: + status_code_filter: + comparison: + op: EQ + value: + default_value: 410 + runtime_key: key_b + body: + inline_string: "410 body text" + - filter: + status_code_filter: + comparison: + op: EQ + value: + default_value: 420 + runtime_key: key_b + status_code: 421 + - filter: + status_code_filter: + comparison: + op: EQ + value: + default_value: 430 + runtime_key: key_b +)"; + TestUtility::loadFromYaml(yaml, config_); + auto local = Factory::create(config_, context_); + + // code=400 matches the first filter; rewrite code and body + resetData(400); + local->rewrite(&request_headers_, response_headers_, stream_info_, code_, body_, content_type_); + EXPECT_EQ(code_, static_cast(401)); + EXPECT_EQ(stream_info_.response_code_, 401U); + EXPECT_EQ(response_headers_.Status()->value().getStringView(), "401"); + EXPECT_EQ(body_, "400 body text"); + EXPECT_EQ(content_type_, "text/plain"); + + // code=410 matches the second filter; rewrite body only + resetData(410); + local->rewrite(&request_headers_, response_headers_, stream_info_, code_, body_, content_type_); + EXPECT_EQ(code_, static_cast(410)); + EXPECT_EQ(stream_info_.response_code_, 410U); + EXPECT_EQ(response_headers_.Status()->value().getStringView(), "410"); + EXPECT_EQ(body_, "410 body text"); + EXPECT_EQ(content_type_, "text/plain"); + + // code=420 matches the third filter; rewrite code only + resetData(420); + local->rewrite(&request_headers_, response_headers_, stream_info_, code_, body_, content_type_); + EXPECT_EQ(code_, static_cast(421)); + EXPECT_EQ(stream_info_.response_code_, 421U); + EXPECT_EQ(response_headers_.Status()->value().getStringView(), "421"); + EXPECT_EQ(body_, TestInitBody); + EXPECT_EQ(content_type_, "text/plain"); + + // code=430 matches the fourth filter; rewrite nothing + resetData(430); + local->rewrite(&request_headers_, response_headers_, stream_info_, code_, body_, content_type_); + EXPECT_EQ(code_, static_cast(430)); + EXPECT_EQ(stream_info_.response_code_, 430U); + EXPECT_EQ(response_headers_.Status()->value().getStringView(), "430"); + EXPECT_EQ(body_, TestInitBody); + EXPECT_EQ(content_type_, "text/plain"); +} + +TEST_F(LocalReplyTest, TestMapperFormat) { + // Match with response_code, and rewrite the code and body. + const std::string yaml = R"( + mappers: + - filter: + status_code_filter: + comparison: + op: EQ + value: + default_value: 400 + runtime_key: key_b + status_code: 401 + body: + inline_string: "401 body text" + body_format_override: + json_format: + text: "401 filter formatter" + path: "%REQ(:path)%" + code: "%RESPONSE_CODE%" + body: "%LOCAL_REPLY_BODY%" + - filter: + status_code_filter: + comparison: + op: EQ + value: + default_value: 410 + runtime_key: key_b + status_code: 411 + body: + inline_string: "411 body text" + body_format: + text_format: "%LOCAL_REPLY_BODY% %RESPONSE_CODE% default formatter" +)"; + TestUtility::loadFromYaml(yaml, config_); + auto local = Factory::create(config_, context_); + + // code=400 matches the first filter; rewrite code and body + // has its own formatter + resetData(400); + local->rewrite(&request_headers_, response_headers_, stream_info_, code_, body_, content_type_); + EXPECT_EQ(code_, static_cast(401)); + EXPECT_EQ(stream_info_.response_code_, 401U); + EXPECT_EQ(response_headers_.Status()->value().getStringView(), "401"); + EXPECT_EQ(content_type_, "application/json"); + + const std::string expected = R"({ + "text": "401 filter formatter", + "path": "/bar/foo", + "code": 401, + "body": "401 body text" +})"; + EXPECT_TRUE(TestUtility::jsonStringEqual(body_, expected)); + + // code=410 matches the second filter; rewrite code and body + // but using default formatter + resetData(410); + local->rewrite(&request_headers_, response_headers_, stream_info_, code_, body_, content_type_); + EXPECT_EQ(code_, static_cast(411)); + EXPECT_EQ(stream_info_.response_code_, 411U); + EXPECT_EQ(response_headers_.Status()->value().getStringView(), "411"); + EXPECT_EQ(body_, "411 body text 411 default formatter"); + EXPECT_EQ(content_type_, "text/plain"); +} + +TEST_F(LocalReplyTest, TestHeaderAddition) { + // Default text formatter without any mappers + const std::string yaml = R"( + mappers: + - filter: + status_code_filter: + comparison: + op: GE + value: + default_value: 0 + runtime_key: key_b + headers_to_add: + - header: + key: foo-1 + value: bar1 + append: true + - header: + key: foo-2 + value: override-bar2 + append: false + - header: + key: foo-3 + value: append-bar3 + append: true +)"; + TestUtility::loadFromYaml(yaml, config_); + auto local = Factory::create(config_, context_); + + response_headers_.addCopy("foo-2", "bar2"); + response_headers_.addCopy("foo-3", "bar3"); + local->rewrite(nullptr, response_headers_, stream_info_, code_, body_, content_type_); + EXPECT_EQ(code_, TestInitCode); + EXPECT_EQ(stream_info_.response_code_, static_cast(TestInitCode)); + EXPECT_EQ(content_type_, "text/plain"); + + EXPECT_EQ(response_headers_.get_("foo-1"), "bar1"); + EXPECT_EQ(response_headers_.get_("foo-2"), "override-bar2"); + std::vector out; + Http::HeaderUtility::getAllOfHeader(response_headers_, "foo-3", out); + ASSERT_EQ(out.size(), 2); + ASSERT_EQ(out[0], "bar3"); + ASSERT_EQ(out[1], "append-bar3"); +} + +} // namespace LocalReply +} // namespace Envoy diff --git a/test/common/memory/BUILD b/test/common/memory/BUILD index 55aa793bcee1b..3123d827b9491 100644 --- a/test/common/memory/BUILD +++ b/test/common/memory/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( @@ -23,7 +23,7 @@ envoy_cc_test( "//source/common/memory:stats_lib", "//test/common/stats:stat_test_utility_lib", "//test/mocks/event:event_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:overload_manager_mocks", "//test/test_common:simulated_time_system_lib", "//test/test_common:utility_lib", ], diff --git a/test/common/memory/heap_shrinker_test.cc b/test/common/memory/heap_shrinker_test.cc index 68071e1e4f1ff..5889424f54355 100644 --- a/test/common/memory/heap_shrinker_test.cc +++ b/test/common/memory/heap_shrinker_test.cc @@ -4,7 +4,7 @@ #include "test/common/stats/stat_test_utility.h" #include "test/mocks/event/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/overload_manager.h" #include "test/test_common/simulated_time_system.h" #include "gmock/gmock.h" diff --git a/test/common/network/BUILD b/test/common/network/BUILD index 3f9b79c5e7a62..3e7b3941d1e3b 100644 --- a/test/common/network/BUILD +++ b/test/common/network/BUILD @@ -1,15 +1,15 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_benchmark_test", "envoy_cc_benchmark_binary", + "envoy_cc_fuzz_test", "envoy_cc_test", - "envoy_cc_test_binary", "envoy_cc_test_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_library( @@ -21,11 +21,13 @@ envoy_cc_test_library( "//source/common/network:listener_lib", "//source/common/network:utility_lib", "//source/common/stats:stats_lib", + "//source/common/stream_info:stream_info_lib", "//test/mocks/network:network_mocks", "//test/mocks/server:server_mocks", "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", "//test/test_common:simulated_time_system_lib", + "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", ], ) @@ -64,7 +66,6 @@ envoy_cc_test( name = "cidr_range_test", srcs = ["cidr_range_test.cc"], deps = [ - "//source/common/json:json_loader_lib", "//source/common/network:address_lib", "//source/common/network:cidr_range_lib", ], @@ -73,8 +74,6 @@ envoy_cc_test( envoy_cc_test( name = "connection_impl_test", srcs = ["connection_impl_test.cc"], - # Times out on Windows - tags = ["fails_on_windows"], deps = [ "//source/common/buffer:buffer_lib", "//source/common/common:empty_string", @@ -87,7 +86,6 @@ envoy_cc_test( "//test/mocks/buffer:buffer_mocks", "//test/mocks/event:event_mocks", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", "//test/mocks/stats:stats_mocks", "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", @@ -138,11 +136,12 @@ envoy_cc_test( "//test/mocks/buffer:buffer_mocks", "//test/mocks/network:network_mocks", "//test/mocks/ratelimit:ratelimit_mocks", - "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/tracing:tracing_mocks", "//test/mocks/upstream:host_mocks", "//test/mocks/upstream:upstream_mocks", + "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/network/ratelimit/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/tcp_proxy/v3:pkg_cc_proto", @@ -179,19 +178,38 @@ envoy_cc_test( envoy_cc_test( name = "listener_impl_test", srcs = ["listener_impl_test.cc"], - # Times out on Windows - tags = ["fails_on_windows"], deps = [ "//source/common/event:dispatcher_lib", "//source/common/network:address_lib", "//source/common/network:listener_lib", "//source/common/network:utility_lib", "//source/common/stats:stats_lib", + "//source/common/stream_info:stream_info_lib", "//test/common/network:listener_impl_test_base_lib", "//test/mocks/network:network_mocks", + "//test/mocks/runtime:runtime_mocks", + "//test/test_common:environment_lib", + "//test/test_common:network_utility_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) + +envoy_cc_test_library( + name = "udp_listener_impl_test_base_lib", + hdrs = ["udp_listener_impl_test_base.h"], + deps = [ + "//source/common/event:dispatcher_lib", + "//source/common/network:address_lib", + "//source/common/network:listener_lib", + "//source/common/network:utility_lib", + "//source/common/stats:stats_lib", + "//test/mocks/network:network_mocks", "//test/mocks/server:server_mocks", "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", + "//test/test_common:simulated_time_system_lib", + "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], @@ -200,11 +218,14 @@ envoy_cc_test( envoy_cc_test( name = "udp_listener_impl_test", srcs = ["udp_listener_impl_test.cc"], + tags = ["fails_on_windows"], deps = [ + ":udp_listener_impl_test_base_lib", "//source/common/event:dispatcher_lib", "//source/common/network:address_lib", "//source/common/network:listener_lib", "//source/common/network:socket_option_lib", + "//source/common/network:udp_packet_writer_handler_lib", "//source/common/network:utility_lib", "//source/common/stats:stats_lib", "//test/common/network:listener_impl_test_base_lib", @@ -218,6 +239,34 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "udp_listener_impl_batch_writer_test", + srcs = ["udp_listener_impl_batch_writer_test.cc"], + tags = [ + # Skipping as quiche quic_gso_batch_writer.h does not exist on Windows + "skip_on_windows", + ], + deps = [ + ":udp_listener_impl_test_base_lib", + "//source/common/event:dispatcher_lib", + "//source/common/network:address_lib", + "//source/common/network:listener_lib", + "//source/common/network:socket_option_lib", + "//source/common/network:udp_packet_writer_handler_lib", + "//source/common/network:utility_lib", + "//source/common/stats:stats_lib", + "//source/extensions/quic_listeners/quiche:udp_gso_batch_writer_lib", + "//test/common/network:listener_impl_test_base_lib", + "//test/mocks/network:network_mocks", + "//test/test_common:environment_lib", + "//test/test_common:network_utility_lib", + "//test/test_common:threadsafe_singleton_injector_lib", + "//test/test_common:utility_lib", + "@com_googlesource_quiche//:quic_test_tools_mock_syscall_wrapper_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) + envoy_cc_test( name = "resolver_test", srcs = ["resolver_impl_test.cc"], @@ -296,7 +345,18 @@ envoy_cc_test( ], ) -envoy_cc_test_binary( +envoy_cc_fuzz_test( + name = "utility_fuzz_test", + srcs = ["utility_fuzz_test.cc"], + corpus = "utility_corpus", + deps = [ + "//source/common/network:address_lib", + "//source/common/network:utility_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) + +envoy_cc_benchmark_binary( name = "lc_trie_speed_test", srcs = ["lc_trie_speed_test.cc"], external_deps = [ @@ -308,10 +368,16 @@ envoy_cc_test_binary( ], ) +envoy_benchmark_test( + name = "lc_trie_speed_test_benchmark_test", + benchmark_binary = "lc_trie_speed_test", +) + envoy_cc_test( name = "io_socket_handle_impl_test", srcs = ["io_socket_handle_impl_test.cc"], deps = [ + "//source/common/common:utility_lib", "//source/common/network:address_lib", ], ) @@ -320,6 +386,7 @@ envoy_cc_test( name = "transport_socket_options_impl_test", srcs = ["transport_socket_options_impl_test.cc"], deps = [ + "//source/common/network:address_lib", "//source/common/network:transport_socket_options_lib", "//source/common/stream_info:filter_state_lib", ], diff --git a/test/common/network/addr_family_aware_socket_option_impl_test.cc b/test/common/network/addr_family_aware_socket_option_impl_test.cc index ce315917b80b8..ff0cabd40acc4 100644 --- a/test/common/network/addr_family_aware_socket_option_impl_test.cc +++ b/test/common/network/addr_family_aware_socket_option_impl_test.cc @@ -3,6 +3,7 @@ #include "common/network/addr_family_aware_socket_option_impl.h" #include "common/network/io_socket_handle_impl.h" +#include "common/network/socket_interface_impl.h" #include "common/network/utility.h" #include "test/common/network/socket_option_test.h" @@ -24,6 +25,7 @@ class AddrFamilyAwareSocketOptionImplTest : public SocketOptionTest { // We fail to set the option when the underlying setsockopt syscall fails. TEST_F(AddrFamilyAwareSocketOptionImplTest, SetOptionFailure) { + EXPECT_CALL(socket_, ipVersion).WillRepeatedly(testing::Return(absl::nullopt)); AddrFamilyAwareSocketOptionImpl socket_option{ envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), @@ -32,23 +34,11 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, SetOptionFailure) { EXPECT_LOG_CONTAINS("warning", "Failed to set IP socket option on non-IP socket", EXPECT_FALSE(socket_option.setOption( socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND))); - - Address::InstanceConstSharedPtr pipe_address = - std::make_shared("/foo"); - { - EXPECT_CALL(socket_, localAddress).WillRepeatedly(testing::ReturnRef(pipe_address)); - EXPECT_LOG_CONTAINS("warning", "Failed to set IP socket option on non-IP socket", - EXPECT_FALSE(socket_option.setOption( - socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND))); - } } // If a platform supports IPv4 socket option variant for an IPv4 address, it works TEST_F(AddrFamilyAwareSocketOptionImplTest, SetOptionSuccess) { - Address::Ipv4Instance address("1.2.3.4", 5678); - IoHandlePtr io_handle = address.socket(Address::SocketType::Stream); - EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); - + EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v4)); AddrFamilyAwareSocketOptionImpl socket_option{ envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), @@ -60,12 +50,9 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, SetOptionSuccess) { // If a platform doesn't support IPv4 socket option variant for an IPv4 address we fail TEST_F(AddrFamilyAwareSocketOptionImplTest, V4EmptyOptionNames) { - Address::Ipv4Instance address("1.2.3.4", 5678); - IoHandlePtr io_handle = address.socket(Address::SocketType::Stream); - EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); + EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v4)); AddrFamilyAwareSocketOptionImpl socket_option{ envoy::config::core::v3::SocketOption::STATE_PREBIND, {}, {}, 1}; - EXPECT_LOG_CONTAINS("warning", "Failed to set unsupported option on socket", EXPECT_FALSE(socket_option.setOption( socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND))); @@ -73,12 +60,8 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V4EmptyOptionNames) { // If a platform doesn't support IPv4 and IPv6 socket option variants for an IPv4 address, we fail TEST_F(AddrFamilyAwareSocketOptionImplTest, V6EmptyOptionNames) { - Address::Ipv6Instance address("::1:2:3:4", 5678); - IoHandlePtr io_handle = address.socket(Address::SocketType::Stream); - EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); AddrFamilyAwareSocketOptionImpl socket_option{ envoy::config::core::v3::SocketOption::STATE_PREBIND, {}, {}, 1}; - EXPECT_LOG_CONTAINS("warning", "Failed to set unsupported option on socket", EXPECT_FALSE(socket_option.setOption( socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND))); @@ -87,10 +70,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V6EmptyOptionNames) { // If a platform supports IPv4 and IPv6 socket option variants for an IPv4 address, we apply the // IPv4 variant TEST_F(AddrFamilyAwareSocketOptionImplTest, V4IgnoreV6) { - Address::Ipv4Instance address("1.2.3.4", 5678); - IoHandlePtr io_handle = address.socket(Address::SocketType::Stream); - EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); - + EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v4)); AddrFamilyAwareSocketOptionImpl socket_option{ envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), ENVOY_MAKE_SOCKET_OPTION_NAME(6, 11), 1}; @@ -100,10 +80,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V4IgnoreV6) { // If a platform supports IPv6 socket option variant for an IPv6 address it works TEST_F(AddrFamilyAwareSocketOptionImplTest, V6Only) { - Address::Ipv6Instance address("::1:2:3:4", 5678); - IoHandlePtr io_handle = address.socket(Address::SocketType::Stream); - EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); - + EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v6)); AddrFamilyAwareSocketOptionImpl socket_option{ envoy::config::core::v3::SocketOption::STATE_PREBIND, {}, @@ -116,10 +93,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V6Only) { // If a platform supports only the IPv4 variant for an IPv6 address, // we apply the IPv4 variant. TEST_F(AddrFamilyAwareSocketOptionImplTest, V6OnlyV4Fallback) { - Address::Ipv6Instance address("::1:2:3:4", 5678); - IoHandlePtr io_handle = address.socket(Address::SocketType::Stream); - EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); - + EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v6)); AddrFamilyAwareSocketOptionImpl socket_option{ envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), @@ -132,10 +106,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V6OnlyV4Fallback) { // If a platform supports IPv4 and IPv6 socket option variants for an IPv6 address, // AddrFamilyAwareSocketOptionImpl::setIpSocketOption() works with the IPv6 variant. TEST_F(AddrFamilyAwareSocketOptionImplTest, V6Precedence) { - Address::Ipv6Instance address("::1:2:3:4", 5678); - IoHandlePtr io_handle = address.socket(Address::SocketType::Stream); - EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); - + EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v6)); AddrFamilyAwareSocketOptionImpl socket_option{ envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), ENVOY_MAKE_SOCKET_OPTION_NAME(6, 11), 1}; @@ -145,10 +116,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V6Precedence) { // GetSocketOptionName returns the v4 information for a v4 address TEST_F(AddrFamilyAwareSocketOptionImplTest, V4GetSocketOptionName) { - Address::Ipv4Instance address("1.2.3.4", 5678); - IoHandlePtr io_handle = address.socket(Address::SocketType::Stream); - EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); - + EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v4)); AddrFamilyAwareSocketOptionImpl socket_option{ envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), ENVOY_MAKE_SOCKET_OPTION_NAME(6, 11), 1}; @@ -160,10 +128,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V4GetSocketOptionName) { // GetSocketOptionName returns the v4 information for a v6 address TEST_F(AddrFamilyAwareSocketOptionImplTest, V6GetSocketOptionName) { - Address::Ipv6Instance address("2::1", 5678); - IoHandlePtr io_handle = address.socket(Address::SocketType::Stream); - EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); - + EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v6)); AddrFamilyAwareSocketOptionImpl socket_option{ envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), ENVOY_MAKE_SOCKET_OPTION_NAME(6, 11), 5}; @@ -175,8 +140,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V6GetSocketOptionName) { // GetSocketOptionName returns nullopt if the state is wrong TEST_F(AddrFamilyAwareSocketOptionImplTest, GetSocketOptionWrongState) { - socket_.local_address_ = Utility::parseInternetAddress("2::1", 5678); - + EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v6)); AddrFamilyAwareSocketOptionImpl socket_option{ envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), ENVOY_MAKE_SOCKET_OPTION_NAME(6, 11), 5}; @@ -192,7 +156,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, GetSocketOptionCannotDetermineVersio ENVOY_MAKE_SOCKET_OPTION_NAME(6, 11), 5}; IoHandlePtr io_handle = std::make_unique(); - EXPECT_CALL(testing::Const(socket_), ioHandle()).WillOnce(testing::ReturnRef(*io_handle)); + EXPECT_CALL(socket_, ipVersion).WillOnce(testing::Return(absl::nullopt)); auto result = socket_option.getOptionDetails(socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND); EXPECT_FALSE(result.has_value()); diff --git a/test/common/network/address_impl_test.cc b/test/common/network/address_impl_test.cc index 33997fc4bb0f7..127632fe3737c 100644 --- a/test/common/network/address_impl_test.cc +++ b/test/common/network/address_impl_test.cc @@ -9,6 +9,7 @@ #include "common/common/fmt.h" #include "common/common/utility.h" #include "common/network/address_impl.h" +#include "common/network/listen_socket_impl.h" #include "common/network/utility.h" #include "test/mocks/api/mocks.h" @@ -42,50 +43,47 @@ void testSocketBindAndConnect(Network::Address::IpVersion ip_version, bool v6onl ASSERT_NE(addr_port, nullptr); if (addr_port->ip()->port() == 0) { - addr_port = Network::Test::findOrCheckFreePort(addr_port, SocketType::Stream); + addr_port = Network::Test::findOrCheckFreePort(addr_port, Socket::Type::Stream); } ASSERT_NE(addr_port, nullptr); ASSERT_NE(addr_port->ip(), nullptr); // Create a socket on which we'll listen for connections from clients. - IoHandlePtr io_handle = addr_port->socket(SocketType::Stream); - ASSERT_GE(io_handle->fd(), 0) << addr_port->asString(); - auto& os_sys_calls = Api::OsSysCallsSingleton::get(); + SocketImpl sock(Socket::Type::Stream, addr_port); + ASSERT_GE(sock.ioHandle().fd(), 0) << addr_port->asString(); // Check that IPv6 sockets accept IPv6 connections only. if (addr_port->ip()->version() == IpVersion::v6) { int socket_v6only = 0; socklen_t size_int = sizeof(socket_v6only); - ASSERT_GE(os_sys_calls - .getsockopt(io_handle->fd(), IPPROTO_IPV6, IPV6_V6ONLY, &socket_v6only, &size_int) - .rc_, - 0); + ASSERT_GE(sock.getSocketOption(IPPROTO_IPV6, IPV6_V6ONLY, &socket_v6only, &size_int).rc_, 0); EXPECT_EQ(v6only, socket_v6only != 0); } // Bind the socket to the desired address and port. - const Api::SysCallIntResult result = addr_port->bind(io_handle->fd()); - ASSERT_EQ(result.rc_, 0) << addr_port->asString() << "\nerror: " << strerror(result.errno_) + const Api::SysCallIntResult result = sock.bind(addr_port); + ASSERT_EQ(result.rc_, 0) << addr_port->asString() << "\nerror: " << errorDetails(result.errno_) << "\nerrno: " << result.errno_; // Do a bare listen syscall. Not bothering to accept connections as that would // require another thread. - ASSERT_EQ(os_sys_calls.listen(io_handle->fd(), 128).rc_, 0); + ASSERT_EQ(sock.listen(128).rc_, 0); - auto client_connect = [&os_sys_calls](Address::InstanceConstSharedPtr addr_port) { + auto client_connect = [](Address::InstanceConstSharedPtr addr_port) { // Create a client socket and connect to the server. - IoHandlePtr client_handle = addr_port->socket(SocketType::Stream); - ASSERT_GE(client_handle->fd(), 0) << addr_port->asString(); + SocketImpl client_sock(Socket::Type::Stream, addr_port); + + ASSERT_GE(client_sock.ioHandle().fd(), 0) << addr_port->asString(); // Instance::socket creates a non-blocking socket, which that extends all the way to the // operation of ::connect(), so connect returns with errno==EWOULDBLOCK before the tcp // handshake can complete. For testing convenience, re-enable blocking on the socket // so that connect will wait for the handshake to complete. - ASSERT_EQ(os_sys_calls.setsocketblocking(client_handle->fd(), true).rc_, 0); + ASSERT_EQ(client_sock.setBlockingForTest(true).rc_, 0); // Connect to the server. - const Api::SysCallIntResult result = addr_port->connect(client_handle->fd()); - ASSERT_EQ(result.rc_, 0) << addr_port->asString() << "\nerror: " << strerror(result.errno_) + const Api::SysCallIntResult result = client_sock.connect(addr_port); + ASSERT_EQ(result.rc_, 0) << addr_port->asString() << "\nerror: " << errorDetails(result.errno_) << "\nerrno: " << result.errno_; }; @@ -321,17 +319,19 @@ TEST(PipeInstanceTest, Basic) { EXPECT_EQ(nullptr, address.ip()); } +#ifndef WIN32 TEST(PipeInstanceTest, BasicPermission) { std::string path = TestEnvironment::unixDomainSocketPath("foo.sock"); const mode_t mode = 0777; - PipeInstance address(path, mode); + PipeInstance pipe(path, mode); + InstanceConstSharedPtr address = std::make_shared(pipe); + SocketImpl sock(Socket::Type::Stream, address); - IoHandlePtr io_handle = address.socket(SocketType::Stream); - ASSERT_GE(io_handle->fd(), 0) << address.asString(); + ASSERT_GE(sock.ioHandle().fd(), 0) << pipe.asString(); - Api::SysCallIntResult result = address.bind(io_handle->fd()); - ASSERT_EQ(result.rc_, 0) << address.asString() << "\nerror: " << strerror(result.errno_) + Api::SysCallIntResult result = sock.bind(address); + ASSERT_EQ(result.rc_, 0) << pipe.asString() << "\nerror: " << errorDetails(result.errno_) << "\terrno: " << result.errno_; Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get(); @@ -341,8 +341,9 @@ TEST(PipeInstanceTest, BasicPermission) { // Get file permissions bits ASSERT_EQ(stat_buf.st_mode & 07777, mode) << path << std::oct << "\t" << (stat_buf.st_mode & 07777) << std::dec << "\t" - << (stat_buf.st_mode) << strerror(result.errno_); + << (stat_buf.st_mode) << errorDetails(result.errno_); } +#endif TEST(PipeInstanceTest, PermissionFail) { NiceMock os_sys_calls; @@ -350,14 +351,15 @@ TEST(PipeInstanceTest, PermissionFail) { std::string path = TestEnvironment::unixDomainSocketPath("foo.sock"); const mode_t mode = 0777; - PipeInstance address(path, mode); + PipeInstance pipe(path, mode); + InstanceConstSharedPtr address = std::make_shared(pipe); + SocketImpl sock(Socket::Type::Stream, address); + + ASSERT_GE(sock.ioHandle().fd(), 0) << pipe.asString(); - IoHandlePtr io_handle = address.socket(SocketType::Stream); - ASSERT_GE(io_handle->fd(), 0) << address.asString(); EXPECT_CALL(os_sys_calls, bind(_, _, _)).WillOnce(Return(Api::SysCallIntResult{0, 0})); EXPECT_CALL(os_sys_calls, chmod(_, _)).WillOnce(Return(Api::SysCallIntResult{-1, 0})); - EXPECT_THROW_WITH_REGEX(address.bind(io_handle->fd()), EnvoyException, - "Failed to create socket with mode"); + EXPECT_THROW_WITH_REGEX(sock.bind(address), EnvoyException, "Failed to create socket with mode"); } TEST(PipeInstanceTest, AbstractNamespacePermission) { @@ -421,12 +423,15 @@ TEST(PipeInstanceTest, EmbeddedNullPathError) { TEST(PipeInstanceTest, UnlinksExistingFile) { const auto bind_uds_socket = [](const std::string& path) { - PipeInstance address(path); - IoHandlePtr io_handle = address.socket(SocketType::Stream); - ASSERT_GE(io_handle->fd(), 0) << address.asString(); + PipeInstance pipe(path); + InstanceConstSharedPtr address = std::make_shared(pipe); + SocketImpl sock(Socket::Type::Stream, address); + + ASSERT_GE(sock.ioHandle().fd(), 0) << pipe.asString(); + + const Api::SysCallIntResult result = sock.bind(address); - const Api::SysCallIntResult result = address.bind(io_handle->fd()); - ASSERT_EQ(result.rc_, 0) << address.asString() << "\nerror: " << strerror(result.errno_) + ASSERT_EQ(result.rc_, 0) << pipe.asString() << "\nerror: " << errorDetails(result.errno_) << "\nerrno: " << result.errno_; }; @@ -443,9 +448,9 @@ TEST(AddressFromSockAddrDeathTest, IPv4) { EXPECT_EQ(1, inet_pton(AF_INET, "1.2.3.4", &sin.sin_addr)); sin.sin_port = htons(6502); - EXPECT_DEATH_LOG_TO_STDERR(addressFromSockAddr(ss, 1), "ss_len"); - EXPECT_DEATH_LOG_TO_STDERR(addressFromSockAddr(ss, sizeof(sockaddr_in) - 1), "ss_len"); - EXPECT_DEATH_LOG_TO_STDERR(addressFromSockAddr(ss, sizeof(sockaddr_in) + 1), "ss_len"); + EXPECT_DEATH(addressFromSockAddr(ss, 1), "ss_len"); + EXPECT_DEATH(addressFromSockAddr(ss, sizeof(sockaddr_in) - 1), "ss_len"); + EXPECT_DEATH(addressFromSockAddr(ss, sizeof(sockaddr_in) + 1), "ss_len"); EXPECT_EQ("1.2.3.4:6502", addressFromSockAddr(ss, sizeof(sockaddr_in))->asString()); @@ -462,9 +467,9 @@ TEST(AddressFromSockAddrDeathTest, IPv6) { EXPECT_EQ(1, inet_pton(AF_INET6, "01:023::00Ef", &sin6.sin6_addr)); sin6.sin6_port = htons(32000); - EXPECT_DEATH_LOG_TO_STDERR(addressFromSockAddr(ss, 1), "ss_len"); - EXPECT_DEATH_LOG_TO_STDERR(addressFromSockAddr(ss, sizeof(sockaddr_in6) - 1), "ss_len"); - EXPECT_DEATH_LOG_TO_STDERR(addressFromSockAddr(ss, sizeof(sockaddr_in6) + 1), "ss_len"); + EXPECT_DEATH(addressFromSockAddr(ss, 1), "ss_len"); + EXPECT_DEATH(addressFromSockAddr(ss, sizeof(sockaddr_in6) - 1), "ss_len"); + EXPECT_DEATH(addressFromSockAddr(ss, sizeof(sockaddr_in6) + 1), "ss_len"); EXPECT_EQ("[1:23::ef]:32000", addressFromSockAddr(ss, sizeof(sockaddr_in6))->asString()); @@ -485,9 +490,8 @@ TEST(AddressFromSockAddrDeathTest, Pipe) { StringUtil::strlcpy(sun.sun_path, "/some/path", sizeof sun.sun_path); - EXPECT_DEATH_LOG_TO_STDERR(addressFromSockAddr(ss, 1), "ss_len"); - EXPECT_DEATH_LOG_TO_STDERR(addressFromSockAddr(ss, offsetof(struct sockaddr_un, sun_path)), - "ss_len"); + EXPECT_DEATH(addressFromSockAddr(ss, 1), "ss_len"); + EXPECT_DEATH(addressFromSockAddr(ss, offsetof(struct sockaddr_un, sun_path)), "ss_len"); socklen_t ss_len = offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sun.sun_path); EXPECT_EQ("/some/path", addressFromSockAddr(ss, ss_len)->asString()); diff --git a/test/common/network/cidr_range_test.cc b/test/common/network/cidr_range_test.cc index 51d9efd754353..5a30bf6cd18ed 100644 --- a/test/common/network/cidr_range_test.cc +++ b/test/common/network/cidr_range_test.cc @@ -378,197 +378,109 @@ TEST(Ipv6CidrRange, BigRange) { EXPECT_FALSE(rng.isInRange(Ipv6Instance("2001:0db8:85a4::"))); } -TEST(IpListTest, Errors) { - { - std::string json = R"EOF( - { - "ip_white_list": ["foo"] - } - )EOF"; - - Json::ObjectSharedPtr loader = Json::Factory::loadFromString(json); - EXPECT_THROW({ IpList wl(*loader, "ip_white_list"); }, EnvoyException); - } - - { - std::string json = R"EOF( - { - "ip_white_list": ["foo/bar"] - } - )EOF"; - - Json::ObjectSharedPtr loader = Json::Factory::loadFromString(json); - EXPECT_THROW({ IpList wl(*loader, "ip_white_list"); }, EnvoyException); - } - - { - std::string json = R"EOF( - { - "ip_white_list": ["192.168.1.1/33"] - } - )EOF"; - - Json::ObjectSharedPtr loader = Json::Factory::loadFromString(json); - EXPECT_THROW({ IpList wl(*loader, "ip_white_list"); }, EnvoyException); - } - - { - std::string json = R"EOF( - { - "ip_white_list": ["192.168.1.1"] - } - )EOF"; - - Json::ObjectSharedPtr loader = Json::Factory::loadFromString(json); - EXPECT_THROW({ IpList wl(*loader, "ip_white_list"); }, EnvoyException); +Protobuf::RepeatedPtrField +makeCidrRangeList(const std::vector>& ranges) { + Protobuf::RepeatedPtrField ret; + for (auto& range : ranges) { + auto new_element = ret.Add(); + new_element->set_address_prefix(range.first); + new_element->mutable_prefix_len()->set_value(range.second); } + return ret; +} +TEST(IpListTest, Errors) { { - std::string json = R"EOF( - { - "ip_white_list": ["::/129"] - } - )EOF"; - - Json::ObjectSharedPtr loader = Json::Factory::loadFromString(json); - EXPECT_THROW({ IpList wl(*loader, "ip_white_list"); }, EnvoyException); + EXPECT_THROW({ IpList list(makeCidrRangeList({{"foo", 0}})); }, EnvoyException); } } TEST(IpListTest, SpecificAddressAllowed) { - std::string json = R"EOF( - { - "ip_white_list": ["192.168.1.1/24"] - } - )EOF"; - - Json::ObjectSharedPtr loader = Json::Factory::loadFromString(json); - IpList wl(*loader, "ip_white_list"); + IpList list(makeCidrRangeList({{"192.168.1.1", 24}})); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.1.0"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.1.3"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.1.255"))); - EXPECT_FALSE(wl.contains(Address::Ipv4Instance("192.168.3.0"))); - EXPECT_FALSE(wl.contains(Address::Ipv4Instance("192.168.0.0"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.1.0"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.1.3"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.1.255"))); + EXPECT_FALSE(list.contains(Address::Ipv4Instance("192.168.3.0"))); + EXPECT_FALSE(list.contains(Address::Ipv4Instance("192.168.0.0"))); } TEST(IpListTest, Normal) { - std::string json = R"EOF( - { - "ip_white_list": [ - "192.168.3.0/24", - "50.1.2.3/32", - "10.15.0.0/16" - ] - } - )EOF"; - - Json::ObjectSharedPtr loader = Json::Factory::loadFromString(json); - IpList wl(*loader, "ip_white_list"); - - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.3.0"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.3.3"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.3.255"))); - EXPECT_FALSE(wl.contains(Address::Ipv4Instance("192.168.2.255"))); - EXPECT_FALSE(wl.contains(Address::Ipv4Instance("192.168.4.0"))); - - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("50.1.2.3"))); - EXPECT_FALSE(wl.contains(Address::Ipv4Instance("50.1.2.2"))); - EXPECT_FALSE(wl.contains(Address::Ipv4Instance("50.1.2.4"))); - - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("10.15.0.0"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("10.15.90.90"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("10.15.255.255"))); - EXPECT_FALSE(wl.contains(Address::Ipv4Instance("10.14.255.255"))); - EXPECT_FALSE(wl.contains(Address::Ipv4Instance("10.16.0.0"))); - - EXPECT_FALSE(wl.contains(Address::Ipv6Instance("::1"))); - EXPECT_FALSE(wl.contains(Address::PipeInstance("foo"))); + IpList list(makeCidrRangeList({{"192.168.3.0", 24}, {"50.1.2.3", 32}, {"10.15.0.0", 16}})); + + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.3.0"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.3.3"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.3.255"))); + EXPECT_FALSE(list.contains(Address::Ipv4Instance("192.168.2.255"))); + EXPECT_FALSE(list.contains(Address::Ipv4Instance("192.168.4.0"))); + + EXPECT_TRUE(list.contains(Address::Ipv4Instance("50.1.2.3"))); + EXPECT_FALSE(list.contains(Address::Ipv4Instance("50.1.2.2"))); + EXPECT_FALSE(list.contains(Address::Ipv4Instance("50.1.2.4"))); + + EXPECT_TRUE(list.contains(Address::Ipv4Instance("10.15.0.0"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("10.15.90.90"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("10.15.255.255"))); + EXPECT_FALSE(list.contains(Address::Ipv4Instance("10.14.255.255"))); + EXPECT_FALSE(list.contains(Address::Ipv4Instance("10.16.0.0"))); + + EXPECT_FALSE(list.contains(Address::Ipv6Instance("::1"))); + EXPECT_FALSE(list.contains(Address::PipeInstance("foo"))); } TEST(IpListTest, AddressVersionMix) { - std::string json = R"EOF( - { - "ip_white_list": [ - "192.168.3.0/24", - "2001:db8:85a3::/64", - "::1/128" - ] - } - )EOF"; - - Json::ObjectSharedPtr loader = Json::Factory::loadFromString(json); - IpList wl(*loader, "ip_white_list"); - - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.3.0"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.3.3"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.3.255"))); - EXPECT_FALSE(wl.contains(Address::Ipv4Instance("192.168.2.255"))); - EXPECT_FALSE(wl.contains(Address::Ipv4Instance("192.168.4.0"))); - - EXPECT_TRUE(wl.contains(Address::Ipv6Instance("2001:db8:85a3::"))); - EXPECT_TRUE(wl.contains(Address::Ipv6Instance("2001:db8:85a3:0:1::"))); - EXPECT_TRUE(wl.contains(Address::Ipv6Instance("2001:db8:85a3::ffff:ffff:ffff:ffff"))); - EXPECT_TRUE(wl.contains(Address::Ipv6Instance("2001:db8:85a3::ffff"))); - EXPECT_TRUE(wl.contains(Address::Ipv6Instance("2001:db8:85a3::1"))); - EXPECT_FALSE(wl.contains(Address::Ipv6Instance("2001:db8:85a3:1::"))); - EXPECT_FALSE(wl.contains(Address::Ipv6Instance("2002:db8:85a3::"))); - - EXPECT_TRUE(wl.contains(Address::Ipv6Instance("::1"))); - EXPECT_FALSE(wl.contains(Address::Ipv6Instance("::"))); - - EXPECT_FALSE(wl.contains(Address::PipeInstance("foo"))); + IpList list(makeCidrRangeList({{"192.168.3.0", 24}, {"2001:db8:85a3::", 64}, {"::1", 128}})); + + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.3.0"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.3.3"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.3.255"))); + EXPECT_FALSE(list.contains(Address::Ipv4Instance("192.168.2.255"))); + EXPECT_FALSE(list.contains(Address::Ipv4Instance("192.168.4.0"))); + + EXPECT_TRUE(list.contains(Address::Ipv6Instance("2001:db8:85a3::"))); + EXPECT_TRUE(list.contains(Address::Ipv6Instance("2001:db8:85a3:0:1::"))); + EXPECT_TRUE(list.contains(Address::Ipv6Instance("2001:db8:85a3::ffff:ffff:ffff:ffff"))); + EXPECT_TRUE(list.contains(Address::Ipv6Instance("2001:db8:85a3::ffff"))); + EXPECT_TRUE(list.contains(Address::Ipv6Instance("2001:db8:85a3::1"))); + EXPECT_FALSE(list.contains(Address::Ipv6Instance("2001:db8:85a3:1::"))); + EXPECT_FALSE(list.contains(Address::Ipv6Instance("2002:db8:85a3::"))); + + EXPECT_TRUE(list.contains(Address::Ipv6Instance("::1"))); + EXPECT_FALSE(list.contains(Address::Ipv6Instance("::"))); + + EXPECT_FALSE(list.contains(Address::PipeInstance("foo"))); } TEST(IpListTest, MatchAny) { - std::string json = R"EOF( - { - "ip_white_list": [ - "0.0.0.0/0" - ] - } - )EOF"; - - Json::ObjectSharedPtr loader = Json::Factory::loadFromString(json); - IpList wl(*loader, "ip_white_list"); + IpList list(makeCidrRangeList({{"0.0.0.0", 0}})); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.3.3"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.3.0"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.3.255"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.0.0"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.0.0.0"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("1.1.1.1"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.3.3"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.3.0"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.3.255"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.0.0"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.0.0.0"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("1.1.1.1"))); - EXPECT_FALSE(wl.contains(Address::Ipv6Instance("::1"))); - EXPECT_FALSE(wl.contains(Address::PipeInstance("foo"))); + EXPECT_FALSE(list.contains(Address::Ipv6Instance("::1"))); + EXPECT_FALSE(list.contains(Address::PipeInstance("foo"))); } TEST(IpListTest, MatchAnyAll) { - std::string json = R"EOF( - { - "ip_white_list": [ - "0.0.0.0/0", - "::/0" - ] - } - )EOF"; - - Json::ObjectSharedPtr loader = Json::Factory::loadFromString(json); - IpList wl(*loader, "ip_white_list"); + IpList list(makeCidrRangeList({{"0.0.0.0", 0}, {"::", 0}})); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.3.3"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.3.0"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.3.255"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.0.0"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.0.0.0"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("1.1.1.1"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.3.3"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.3.0"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.3.255"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.0.0"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.0.0.0"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("1.1.1.1"))); - EXPECT_TRUE(wl.contains(Address::Ipv6Instance("::1"))); - EXPECT_TRUE(wl.contains(Address::Ipv6Instance("::"))); - EXPECT_TRUE(wl.contains(Address::Ipv6Instance("2001:db8:85a3::"))); - EXPECT_TRUE(wl.contains(Address::Ipv6Instance("ffee::"))); + EXPECT_TRUE(list.contains(Address::Ipv6Instance("::1"))); + EXPECT_TRUE(list.contains(Address::Ipv6Instance("::"))); + EXPECT_TRUE(list.contains(Address::Ipv6Instance("2001:db8:85a3::"))); + EXPECT_TRUE(list.contains(Address::Ipv6Instance("ffee::"))); - EXPECT_FALSE(wl.contains(Address::PipeInstance("foo"))); + EXPECT_FALSE(list.contains(Address::PipeInstance("foo"))); } } // namespace diff --git a/test/common/network/connection_impl_test.cc b/test/common/network/connection_impl_test.cc index ba7c7092c7ded..c4ea2f60c4fdd 100644 --- a/test/common/network/connection_impl_test.cc +++ b/test/common/network/connection_impl_test.cc @@ -19,7 +19,6 @@ #include "test/mocks/buffer/mocks.h" #include "test/mocks/event/mocks.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" #include "test/mocks/stats/mocks.h" #include "test/test_common/environment.h" #include "test/test_common/network_utility.h" @@ -86,13 +85,19 @@ TEST_P(ConnectionImplDeathTest, BadFd) { Event::DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); IoHandlePtr io_handle = std::make_unique(); StreamInfo::StreamInfoImpl stream_info(dispatcher->timeSource()); - EXPECT_DEATH_LOG_TO_STDERR( + EXPECT_DEATH( ConnectionImpl(*dispatcher, std::make_unique(std::move(io_handle), nullptr, nullptr), Network::Test::createRawBufferSocket(), stream_info, false), - ".*assert failure: SOCKET_VALID\\(ioHandle\\(\\)\\.fd\\(\\)\\).*"); + ".*assert failure: SOCKET_VALID\\(ConnectionImpl::ioHandle\\(\\)\\.fd\\(\\)\\).*"); } +class TestClientConnectionImpl : public Network::ClientConnectionImpl { +public: + using ClientConnectionImpl::ClientConnectionImpl; + Buffer::WatermarkBuffer& readBuffer() { return read_buffer_; } +}; + class ConnectionImplTest : public testing::TestWithParam { protected: ConnectionImplTest() : api_(Api::createApiForTest(time_system_)), stream_info_(time_system_) {} @@ -101,12 +106,12 @@ class ConnectionImplTest : public testing::TestWithParam { if (dispatcher_.get() == nullptr) { dispatcher_ = api_->allocateDispatcher("test_thread"); } - socket_ = std::make_shared(Network::Test::getAnyAddress(GetParam()), - nullptr, true); + socket_ = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true); listener_ = dispatcher_->createListener(socket_, listener_callbacks_, true); - client_connection_ = dispatcher_->createClientConnection( - socket_->localAddress(), source_address_, Network::Test::createRawBufferSocket(), - socket_options_); + client_connection_ = std::make_unique( + *dispatcher_, socket_->localAddress(), source_address_, + Network::Test::createRawBufferSocket(), socket_options_); client_connection_->addConnectionCallbacks(client_callbacks_); EXPECT_EQ(nullptr, client_connection_->ssl()); const Network::ClientConnection& const_connection = *client_connection_; @@ -166,16 +171,16 @@ class ConnectionImplTest : public testing::TestWithParam { dispatcher_ = api_->allocateDispatcher("test_thread", Buffer::WatermarkFactoryPtr{factory}); // The first call to create a client session will get a MockBuffer. // Other calls for server sessions will by default get a normal OwnedImpl. - EXPECT_CALL(*factory, create_(_, _)) + EXPECT_CALL(*factory, create_(_, _, _)) .Times(AnyNumber()) - .WillOnce(Invoke([&](std::function below_low, - std::function above_high) -> Buffer::Instance* { - client_write_buffer_ = new MockWatermarkBuffer(below_low, above_high); + .WillOnce(Invoke([&](std::function below_low, std::function above_high, + std::function above_overflow) -> Buffer::Instance* { + client_write_buffer_ = new MockWatermarkBuffer(below_low, above_high, above_overflow); return client_write_buffer_; })) - .WillRepeatedly(Invoke([](std::function below_low, - std::function above_high) -> Buffer::Instance* { - return new Buffer::WatermarkBuffer(below_low, above_high); + .WillRepeatedly(Invoke([](std::function below_low, std::function above_high, + std::function above_overflow) -> Buffer::Instance* { + return new Buffer::WatermarkBuffer(below_low, above_high, above_overflow); })); } @@ -188,19 +193,22 @@ class ConnectionImplTest : public testing::TestWithParam { Event::FileReadyCb* file_ready_cb_; }; - ConnectionMocks createConnectionMocks() { + ConnectionMocks createConnectionMocks(bool create_timer = true) { auto dispatcher = std::make_unique>(); - EXPECT_CALL(dispatcher->buffer_factory_, create_(_, _)) - .WillRepeatedly(Invoke([](std::function below_low, - std::function above_high) -> Buffer::Instance* { + EXPECT_CALL(dispatcher->buffer_factory_, create_(_, _, _)) + .WillRepeatedly(Invoke([](std::function below_low, std::function above_high, + std::function above_overflow) -> Buffer::Instance* { // ConnectionImpl calls Envoy::MockBufferFactory::create(), which calls create_() and // wraps the returned raw pointer below with a unique_ptr. - return new Buffer::WatermarkBuffer(below_low, above_high); + return new Buffer::WatermarkBuffer(below_low, above_high, above_overflow); })); - // This timer will be returned (transferring ownership) to the ConnectionImpl when createTimer() - // is called to allocate the delayed close timer. - Event::MockTimer* timer = new Event::MockTimer(dispatcher.get()); + Event::MockTimer* timer = nullptr; + if (create_timer) { + // This timer will be returned (transferring ownership) to the ConnectionImpl when + // createTimer() is called to allocate the delayed close timer. + timer = new Event::MockTimer(dispatcher.get()); + } NiceMock* file_event = new NiceMock; EXPECT_CALL(*dispatcher, createFileEvent_(0, _, _, _)) @@ -212,6 +220,9 @@ class ConnectionImplTest : public testing::TestWithParam { return ConnectionMocks{std::move(dispatcher), timer, std::move(transport_socket), file_event, &file_ready_cb_}; } + Network::TestClientConnectionImpl* testClientConnection() { + return dynamic_cast(client_connection_.get()); + } Event::FileReadyCb file_ready_cb_; Event::SimulatedTimeSystem time_system_; @@ -280,8 +291,8 @@ TEST_P(ConnectionImplTest, ImmediateConnectError) { // Using a broadcast/multicast address as the connection destinations address causes an // immediate error return from connect(). Address::InstanceConstSharedPtr broadcast_address; - socket_ = std::make_shared(Network::Test::getAnyAddress(GetParam()), - nullptr, true); + socket_ = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true); if (socket_->localAddress()->ip()->version() == Address::IpVersion::v4) { broadcast_address = std::make_shared("224.0.0.1", 0); } else { @@ -482,24 +493,41 @@ TEST_P(ConnectionImplTest, ConnectionStats) { // Ensure the new counter logic in ReadDisable avoids tripping asserts in ReadDisable guarding // against actual enabling twice in a row. TEST_P(ConnectionImplTest, ReadDisable) { - setUpBasicConnection(); - - client_connection_->readDisable(true); - client_connection_->readDisable(false); + ConnectionMocks mocks = createConnectionMocks(false); + IoHandlePtr io_handle = std::make_unique(0); + auto connection = std::make_unique( + *mocks.dispatcher_, + std::make_unique(std::move(io_handle), nullptr, nullptr), + std::move(mocks.transport_socket_), stream_info_, true); - client_connection_->readDisable(true); - client_connection_->readDisable(true); - client_connection_->readDisable(false); - client_connection_->readDisable(false); + EXPECT_CALL(*mocks.file_event_, setEnabled(_)); + connection->readDisable(true); + EXPECT_CALL(*mocks.file_event_, setEnabled(_)); + connection->readDisable(false); + + EXPECT_CALL(*mocks.file_event_, setEnabled(_)); + connection->readDisable(true); + EXPECT_CALL(*mocks.file_event_, setEnabled(_)).Times(0); + connection->readDisable(true); + EXPECT_CALL(*mocks.file_event_, setEnabled(_)).Times(0); + connection->readDisable(false); + EXPECT_CALL(*mocks.file_event_, setEnabled(_)); + connection->readDisable(false); + + EXPECT_CALL(*mocks.file_event_, setEnabled(_)); + connection->readDisable(true); + EXPECT_CALL(*mocks.file_event_, setEnabled(_)).Times(0); + connection->readDisable(true); + EXPECT_CALL(*mocks.file_event_, setEnabled(_)).Times(0); + connection->readDisable(false); + EXPECT_CALL(*mocks.file_event_, setEnabled(_)).Times(0); + connection->readDisable(true); + EXPECT_CALL(*mocks.file_event_, setEnabled(_)).Times(0); + connection->readDisable(false); + EXPECT_CALL(*mocks.file_event_, setEnabled(_)); + connection->readDisable(false); - client_connection_->readDisable(true); - client_connection_->readDisable(true); - client_connection_->readDisable(false); - client_connection_->readDisable(true); - client_connection_->readDisable(false); - client_connection_->readDisable(false); - - disconnect(false); + connection->close(ConnectionCloseType::NoFlush); } // The HTTP/1 codec handles pipelined connections by relying on readDisable(false) resulting in the @@ -722,7 +750,7 @@ TEST_P(ConnectionImplTest, HalfCloseNoEarlyCloseDetection) { } // Test that as watermark levels are changed, the appropriate callbacks are triggered. -TEST_P(ConnectionImplTest, Watermarks) { +TEST_P(ConnectionImplTest, WriteWatermarks) { useMockBuffer(); setUpBasicConnection(); @@ -771,6 +799,120 @@ TEST_P(ConnectionImplTest, Watermarks) { disconnect(false); } +// Test that as watermark levels are changed, the appropriate callbacks are triggered. +TEST_P(ConnectionImplTest, ReadWatermarks) { + + setUpBasicConnection(); + client_connection_->setBufferLimits(2); + std::shared_ptr client_read_filter(new NiceMock()); + client_connection_->addReadFilter(client_read_filter); + connect(); + + auto on_filter_data_exit = [&](Buffer::Instance&, bool) -> FilterStatus { + dispatcher_->exit(); + return FilterStatus::StopIteration; + }; + + EXPECT_FALSE(testClientConnection()->readBuffer().highWatermarkTriggered()); + EXPECT_TRUE(client_connection_->readEnabled()); + // Add 4 bytes to the buffer and verify the connection becomes read disabled. + { + Buffer::OwnedImpl buffer("data"); + server_connection_->write(buffer, false); + EXPECT_CALL(*client_read_filter, onData(_, false)).WillOnce(Invoke(on_filter_data_exit)); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + EXPECT_TRUE(testClientConnection()->readBuffer().highWatermarkTriggered()); + EXPECT_FALSE(client_connection_->readEnabled()); + } + + // Drain 3 bytes from the buffer. This bring sit below the low watermark, and + // read enables, as well as triggering a kick for the remaining byte. + { + testClientConnection()->readBuffer().drain(3); + EXPECT_FALSE(testClientConnection()->readBuffer().highWatermarkTriggered()); + EXPECT_TRUE(client_connection_->readEnabled()); + + EXPECT_CALL(*client_read_filter, onData(_, false)); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + } + + // Add 3 bytes to the buffer and verify the connection becomes read disabled + // again. + { + Buffer::OwnedImpl buffer("bye"); + server_connection_->write(buffer, false); + EXPECT_CALL(*client_read_filter, onData(_, false)).WillOnce(Invoke(on_filter_data_exit)); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + EXPECT_TRUE(testClientConnection()->readBuffer().highWatermarkTriggered()); + EXPECT_FALSE(client_connection_->readEnabled()); + } + + // Now have the consumer read disable. + // This time when the buffer is drained, there will be no kick as the consumer + // does not want to read. + { + client_connection_->readDisable(true); + testClientConnection()->readBuffer().drain(3); + EXPECT_FALSE(testClientConnection()->readBuffer().highWatermarkTriggered()); + EXPECT_FALSE(client_connection_->readEnabled()); + + EXPECT_CALL(*client_read_filter, onData(_, false)).Times(0); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + } + + // Now read enable again. + // Inside the onData call, readDisable and readEnable. This should trigger + // another kick on the next dispatcher loop, so onData gets called twice. + { + client_connection_->readDisable(false); + EXPECT_CALL(*client_read_filter, onData(_, false)) + .Times(2) + .WillOnce(Invoke([&](Buffer::Instance&, bool) -> FilterStatus { + client_connection_->readDisable(true); + client_connection_->readDisable(false); + return FilterStatus::StopIteration; + })) + .WillRepeatedly(Invoke(on_filter_data_exit)); + dispatcher_->run(Event::Dispatcher::RunType::Block); + } + + // Test the same logic for dispatched_buffered_data from the + // onReadReady() (read_disable_count_ != 0) path. + { + // Fill the buffer and verify the socket is read disabled. + Buffer::OwnedImpl buffer("bye"); + server_connection_->write(buffer, false); + EXPECT_CALL(*client_read_filter, onData(_, false)) + .WillOnce(Invoke([&](Buffer::Instance&, bool) -> FilterStatus { + dispatcher_->exit(); + return FilterStatus::StopIteration; + })); + dispatcher_->run(Event::Dispatcher::RunType::Block); + EXPECT_TRUE(testClientConnection()->readBuffer().highWatermarkTriggered()); + EXPECT_FALSE(client_connection_->readEnabled()); + + // Read disable and read enable, to set dispatch_buffered_data_ true. + client_connection_->readDisable(true); + client_connection_->readDisable(false); + // Now event loop. This hits the early on-Read path. As above, read + // disable and read enable from inside the stack of onData, to ensure that + // dispatch_buffered_data_ works correctly. + EXPECT_CALL(*client_read_filter, onData(_, false)) + .Times(2) + .WillOnce(Invoke([&](Buffer::Instance&, bool) -> FilterStatus { + client_connection_->readDisable(true); + client_connection_->readDisable(false); + return FilterStatus::StopIteration; + })) + .WillRepeatedly(Invoke(on_filter_data_exit)); + dispatcher_->run(Event::Dispatcher::RunType::Block); + } + + disconnect(true); +} + // Write some data to the connection. It will automatically attempt to flush // it to the upstream file descriptor via a write() call to buffer_, which is // configured to succeed and accept all bytes read. @@ -988,8 +1130,8 @@ TEST_P(ConnectionImplTest, BindFailureTest) { new Network::Address::Ipv6Instance(address_string, 0)}; } dispatcher_ = api_->allocateDispatcher("test_thread"); - socket_ = std::make_shared(Network::Test::getAnyAddress(GetParam()), - nullptr, true); + socket_ = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true); listener_ = dispatcher_->createListener(socket_, listener_callbacks_, true); client_connection_ = dispatcher_->createClientConnection( @@ -1267,10 +1409,10 @@ TEST_P(ConnectionImplTest, FlushWriteAndDelayConfigDisabledTest) { NiceMock callbacks; NiceMock dispatcher; - EXPECT_CALL(dispatcher.buffer_factory_, create_(_, _)) - .WillRepeatedly(Invoke([](std::function below_low, - std::function above_high) -> Buffer::Instance* { - return new Buffer::WatermarkBuffer(below_low, above_high); + EXPECT_CALL(dispatcher.buffer_factory_, create_(_, _, _)) + .WillRepeatedly(Invoke([](std::function below_low, std::function above_high, + std::function above_overflow) -> Buffer::Instance* { + return new Buffer::WatermarkBuffer(below_low, above_high, above_overflow); })); IoHandlePtr io_handle = std::make_unique(0); std::unique_ptr server_connection(new Network::ConnectionImpl( @@ -1309,6 +1451,11 @@ TEST_P(ConnectionImplTest, DelayedCloseTimerResetWithPendingWriteBufferFlushes) std::make_unique(std::move(io_handle), nullptr, nullptr), std::move(mocks.transport_socket_), stream_info_, true); +#ifndef NDEBUG + // Ignore timer enabled() calls used to check timer state in ASSERTs. + EXPECT_CALL(*mocks.timer_, enabled()).Times(AnyNumber()); +#endif + InSequence s1; // The actual timeout is insignificant, we just need to enable delayed close processing by // setting it to > 0. @@ -1331,18 +1478,114 @@ TEST_P(ConnectionImplTest, DelayedCloseTimerResetWithPendingWriteBufferFlushes) // The write ready event cb (ConnectionImpl::onWriteReady()) will reset the timer to its // original timeout value to avoid triggering while the write buffer is being actively flushed. EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual("data"), _)) - .WillOnce(Invoke([&](Buffer::Instance&, bool) -> IoResult { + .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> IoResult { // Partial flush. - return IoResult{PostIoAction::KeepOpen, 1, false}; + uint64_t bytes_drained = 1; + buffer.drain(bytes_drained); + return IoResult{PostIoAction::KeepOpen, bytes_drained, false}; })); EXPECT_CALL(*mocks.timer_, enableTimer(timeout, _)).Times(1); (*mocks.file_ready_cb_)(Event::FileReadyType::Write); + EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual("ata"), _)) + .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> IoResult { + // Flush the entire buffer. + uint64_t bytes_drained = buffer.length(); + buffer.drain(buffer.length()); + return IoResult{PostIoAction::KeepOpen, bytes_drained, false}; + })); + EXPECT_CALL(*mocks.timer_, enableTimer(timeout, _)).Times(1); + (*mocks.file_ready_cb_)(Event::FileReadyType::Write); + + // Force the delayed close timeout to trigger so the connection is cleaned up. + mocks.timer_->invokeCallback(); +} + +// Test that the delayed close timer is not reset by spurious fd Write events that either consume 0 +// bytes from the output buffer or are delivered after close(FlushWriteAndDelay). +TEST_P(ConnectionImplTest, IgnoreSpuriousFdWriteEventsDuringFlushWriteAndDelay) { + ConnectionMocks mocks = createConnectionMocks(); + MockTransportSocket* transport_socket = mocks.transport_socket_.get(); + IoHandlePtr io_handle = std::make_unique(0); + auto server_connection = std::make_unique( + *mocks.dispatcher_, + std::make_unique(std::move(io_handle), nullptr, nullptr), + std::move(mocks.transport_socket_), stream_info_, true); + +#ifndef NDEBUG + // Ignore timer enabled() calls used to check timer state in ASSERTs. + EXPECT_CALL(*mocks.timer_, enabled()).Times(AnyNumber()); +#endif + + InSequence s1; + // The actual timeout is insignificant, we just need to enable delayed close processing by + // setting it to > 0. + auto timeout = std::chrono::milliseconds(100); + server_connection->setDelayedCloseTimeout(timeout); + + EXPECT_CALL(*mocks.file_event_, activate(Event::FileReadyType::Write)) + .WillOnce(Invoke(*mocks.file_ready_cb_)); + EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual("data"), _)) + .WillOnce(Invoke([&](Buffer::Instance&, bool) -> IoResult { + // Do not drain the buffer and return 0 bytes processed to simulate backpressure. + return IoResult{PostIoAction::KeepOpen, 0, false}; + })); + Buffer::OwnedImpl data("data"); + server_connection->write(data, false); + + EXPECT_CALL(*mocks.timer_, enableTimer(timeout, _)).Times(1); + server_connection->close(ConnectionCloseType::FlushWriteAndDelay); + + // The write ready event cb (ConnectionImpl::onWriteReady()) will reset the timer to its + // original timeout value to avoid triggering while the write buffer is being actively flushed. EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual("data"), _)) + .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> IoResult { + // Partial flush. + uint64_t bytes_drained = 1; + buffer.drain(bytes_drained); + return IoResult{PostIoAction::KeepOpen, bytes_drained, false}; + })); + EXPECT_CALL(*mocks.timer_, enableTimer(timeout, _)).Times(1); + (*mocks.file_ready_cb_)(Event::FileReadyType::Write); + + // Handle a write event and drain 0 bytes from the buffer. Verify that the timer is not reset. + EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual("ata"), _)) + .WillOnce(Invoke([&](Buffer::Instance&, bool) -> IoResult { + // Don't consume any bytes. + return IoResult{PostIoAction::KeepOpen, 0, false}; + })); + EXPECT_CALL(*mocks.timer_, enableTimer(timeout, _)).Times(0); + (*mocks.file_ready_cb_)(Event::FileReadyType::Write); + + // Handle a write event and drain the remainder of the buffer. Verify that the timer is reset. + EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual("ata"), _)) .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> IoResult { // Flush the entire buffer. + ASSERT(buffer.length() > 0); + uint64_t bytes_drained = buffer.length(); buffer.drain(buffer.length()); - return IoResult{PostIoAction::KeepOpen, buffer.length(), false}; + EXPECT_EQ(server_connection->state(), Connection::State::Closing); + return IoResult{PostIoAction::KeepOpen, bytes_drained, false}; + })); + EXPECT_CALL(*mocks.timer_, enableTimer(timeout, _)).Times(1); + (*mocks.file_ready_cb_)(Event::FileReadyType::Write); + + // Handle a write event after entering the half-closed state. Verify that the timer is not reset + // because write consumed 0 bytes from the empty buffer. + EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual(""), _)) + .WillOnce(Invoke([&](Buffer::Instance&, bool) -> IoResult { + EXPECT_EQ(server_connection->state(), Connection::State::Closing); + return IoResult{PostIoAction::KeepOpen, 0, false}; + })); + EXPECT_CALL(*mocks.timer_, enableTimer(timeout, _)).Times(0); + (*mocks.file_ready_cb_)(Event::FileReadyType::Write); + + // Handle a write event that somehow drains bytes from an empty output buffer. Since + // some bytes were consumed, the timer is reset. + EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual(""), _)) + .WillOnce(Invoke([&](Buffer::Instance&, bool) -> IoResult { + EXPECT_EQ(server_connection->state(), Connection::State::Closing); + return IoResult{PostIoAction::KeepOpen, 1, false}; })); EXPECT_CALL(*mocks.timer_, enableTimer(timeout, _)).Times(1); (*mocks.file_ready_cb_)(Event::FileReadyType::Write); @@ -1455,10 +1698,10 @@ class FakeReadFilter : public Network::ReadFilter { class MockTransportConnectionImplTest : public testing::Test { public: MockTransportConnectionImplTest() : stream_info_(dispatcher_.timeSource()) { - EXPECT_CALL(dispatcher_.buffer_factory_, create_(_, _)) - .WillRepeatedly(Invoke([](std::function below_low, - std::function above_high) -> Buffer::Instance* { - return new Buffer::WatermarkBuffer(below_low, above_high); + EXPECT_CALL(dispatcher_.buffer_factory_, create_(_, _, _)) + .WillRepeatedly(Invoke([](std::function below_low, std::function above_high, + std::function above_overflow) -> Buffer::Instance* { + return new Buffer::WatermarkBuffer(below_low, above_high, above_overflow); })); file_event_ = new Event::MockFileEvent; @@ -1993,8 +2236,8 @@ class ReadBufferLimitTest : public ConnectionImplTest { void readBufferLimitTest(uint32_t read_buffer_limit, uint32_t expected_chunk_size) { const uint32_t buffer_size = 256 * 1024; dispatcher_ = api_->allocateDispatcher("test_thread"); - socket_ = std::make_shared(Network::Test::getAnyAddress(GetParam()), - nullptr, true); + socket_ = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true); listener_ = dispatcher_->createListener(socket_, listener_callbacks_, true); client_connection_ = dispatcher_->createClientConnection( diff --git a/test/common/network/dns_impl_test.cc b/test/common/network/dns_impl_test.cc index 38abf99b904a7..df6aed9816bbc 100644 --- a/test/common/network/dns_impl_test.cc +++ b/test/common/network/dns_impl_test.cc @@ -1,7 +1,6 @@ #include #include #include -#include #include #include "envoy/common/platform.h" @@ -27,6 +26,7 @@ #include "test/test_common/utility.h" #include "absl/container/fixed_array.h" +#include "absl/container/node_hash_map.h" #include "ares.h" #include "ares_dns.h" #include "gtest/gtest.h" @@ -53,9 +53,9 @@ namespace { // List of IP address (in human readable format). using IpList = std::list; // Map from hostname to IpList. -using HostMap = std::unordered_map; +using HostMap = absl::node_hash_map; // Map from hostname to CNAME -using CNameMap = std::unordered_map; +using CNameMap = absl::node_hash_map; // Represents a single TestDnsServer query state and lifecycle. This implements // just enough of RFC 1035 to handle queries we generate in the tests below. enum class RecordType { A, AAAA }; @@ -281,6 +281,8 @@ class TestDnsServer : public ListenerCallbacks { queries_.emplace_back(query); } + void onReject() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } + void addHosts(const std::string& hostname, const IpList& ip, const RecordType& type) { if (type == RecordType::A) { hosts_a_[hostname] = ip; @@ -318,7 +320,7 @@ class DnsResolverImplPeer { ares_channel channel() const { return resolver_->channel_; } bool isChannelDirty() const { return resolver_->dirty_channel_; } - const std::unordered_map& events() { return resolver_->events_; } + const absl::node_hash_map& events() { return resolver_->events_; } // Reset the channel state for a DnsResolverImpl such that it will only use // TCP and optionally has a zero timeout (for validating timeout behavior). void resetChannelTcpOnly(bool zero_timeout) { @@ -386,15 +388,17 @@ class CustomInstance : public Address::Instance { const std::string& asString() const override { return antagonistic_name_; } absl::string_view asStringView() const override { return antagonistic_name_; } const std::string& logicalName() const override { return antagonistic_name_; } - Api::SysCallIntResult bind(os_fd_t fd) const override { return instance_.bind(fd); } - Api::SysCallIntResult connect(os_fd_t fd) const override { return instance_.connect(fd); } const Address::Ip* ip() const override { return instance_.ip(); } - IoHandlePtr socket(Address::SocketType type) const override { return instance_.socket(type); } + const Address::Pipe* pipe() const override { return instance_.pipe(); } + const sockaddr* sockAddr() const override { return instance_.sockAddr(); } + socklen_t sockAddrLen() const override { return instance_.sockAddrLen(); } Address::Type type() const override { return instance_.type(); } + const std::string& socketInterface() const override { return socket_interface_; } private: std::string antagonistic_name_; Address::Ipv4Instance instance_; + std::string socket_interface_{""}; }; TEST_F(DnsImplConstructor, SupportCustomAddressInstances) { diff --git a/test/common/network/filter_manager_impl_test.cc b/test/common/network/filter_manager_impl_test.cc index f8dea3442a22f..76e066e57d1dc 100644 --- a/test/common/network/filter_manager_impl_test.cc +++ b/test/common/network/filter_manager_impl_test.cc @@ -17,7 +17,8 @@ #include "test/mocks/network/mocks.h" #include "test/mocks/ratelimit/mocks.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/server/instance.h" #include "test/mocks/tracing/mocks.h" #include "test/mocks/upstream/host.h" #include "test/mocks/upstream/mocks.h" @@ -415,7 +416,7 @@ stat_prefix: name .WillOnce(Return(&conn_pool)); request_callbacks->complete(Extensions::Filters::Common::RateLimit::LimitStatus::OK, nullptr, - nullptr); + nullptr, nullptr); conn_pool.poolReady(upstream_connection); diff --git a/test/common/network/filter_matcher_test.cc b/test/common/network/filter_matcher_test.cc index 2668400adbc11..cd00d5cc71744 100644 --- a/test/common/network/filter_matcher_test.cc +++ b/test/common/network/filter_matcher_test.cc @@ -19,13 +19,13 @@ struct CallbackHandle { } // namespace class ListenerFilterMatcherTest : public testing::Test { public: - CallbackHandle createCallbackOnPort(int port) { - CallbackHandle handle; - handle.address_ = std::make_shared("127.0.0.1", port); - handle.socket_ = std::make_unique(); - handle.callback_ = std::make_unique(); - EXPECT_CALL(*handle.socket_, localAddress()).WillRepeatedly(ReturnRef(handle.address_)); - EXPECT_CALL(*handle.callback_, socket()).WillRepeatedly(ReturnRef(*handle.socket_)); + std::unique_ptr createCallbackOnPort(int port) { + auto handle = std::make_unique(); + handle->address_ = std::make_shared("127.0.0.1", port); + handle->socket_ = std::make_unique(); + handle->callback_ = std::make_unique(); + EXPECT_CALL(*(handle->socket_), localAddress()).WillRepeatedly(ReturnRef(handle->address_)); + EXPECT_CALL(*(handle->callback_), socket()).WillRepeatedly(ReturnRef(*(handle->socket_))); return handle; } envoy::config::listener::v3::ListenerFilterChainMatchPredicate createPortPredicate(int port_start, @@ -44,9 +44,9 @@ TEST_F(ListenerFilterMatcherTest, DstPortMatcher) { auto handle79 = createCallbackOnPort(79); auto handle80 = createCallbackOnPort(80); auto handle81 = createCallbackOnPort(81); - EXPECT_FALSE(matcher->matches(*handle79.callback_)); - EXPECT_TRUE(matcher->matches(*handle80.callback_)); - EXPECT_FALSE(matcher->matches(*handle81.callback_)); + EXPECT_FALSE(matcher->matches(*(handle79->callback_))); + EXPECT_TRUE(matcher->matches(*(handle80->callback_))); + EXPECT_FALSE(matcher->matches(*(handle81->callback_))); } TEST_F(ListenerFilterMatcherTest, AnyMatdcher) { @@ -56,9 +56,9 @@ TEST_F(ListenerFilterMatcherTest, AnyMatdcher) { auto handle79 = createCallbackOnPort(79); auto handle80 = createCallbackOnPort(80); auto handle81 = createCallbackOnPort(81); - EXPECT_TRUE(matcher->matches(*handle79.callback_)); - EXPECT_TRUE(matcher->matches(*handle80.callback_)); - EXPECT_TRUE(matcher->matches(*handle81.callback_)); + EXPECT_TRUE(matcher->matches(*(handle79->callback_))); + EXPECT_TRUE(matcher->matches(*(handle80->callback_))); + EXPECT_TRUE(matcher->matches(*(handle81->callback_))); } TEST_F(ListenerFilterMatcherTest, NotMatcher) { @@ -69,9 +69,9 @@ TEST_F(ListenerFilterMatcherTest, NotMatcher) { auto handle79 = createCallbackOnPort(79); auto handle80 = createCallbackOnPort(80); auto handle81 = createCallbackOnPort(81); - EXPECT_TRUE(matcher->matches(*handle79.callback_)); - EXPECT_FALSE(matcher->matches(*handle80.callback_)); - EXPECT_TRUE(matcher->matches(*handle81.callback_)); + EXPECT_TRUE(matcher->matches(*(handle79->callback_))); + EXPECT_FALSE(matcher->matches(*(handle80->callback_))); + EXPECT_TRUE(matcher->matches(*(handle81->callback_))); } TEST_F(ListenerFilterMatcherTest, OrMatcher) { @@ -87,9 +87,9 @@ TEST_F(ListenerFilterMatcherTest, OrMatcher) { auto handle443 = createCallbackOnPort(443); auto handle3306 = createCallbackOnPort(3306); - EXPECT_FALSE(matcher->matches(*handle3306.callback_)); - EXPECT_TRUE(matcher->matches(*handle80.callback_)); - EXPECT_TRUE(matcher->matches(*handle443.callback_)); + EXPECT_FALSE(matcher->matches(*(handle3306->callback_))); + EXPECT_TRUE(matcher->matches(*(handle80->callback_))); + EXPECT_TRUE(matcher->matches(*(handle443->callback_))); } TEST_F(ListenerFilterMatcherTest, AndMatcher) { @@ -105,9 +105,9 @@ TEST_F(ListenerFilterMatcherTest, AndMatcher) { auto handle443 = createCallbackOnPort(443); auto handle3306 = createCallbackOnPort(3306); - EXPECT_FALSE(matcher->matches(*handle3306.callback_)); - EXPECT_FALSE(matcher->matches(*handle80.callback_)); - EXPECT_TRUE(matcher->matches(*handle443.callback_)); + EXPECT_FALSE(matcher->matches(*(handle3306->callback_))); + EXPECT_FALSE(matcher->matches(*(handle80->callback_))); + EXPECT_TRUE(matcher->matches(*(handle443->callback_))); } } // namespace Network -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/test/common/network/io_socket_handle_impl_test.cc b/test/common/network/io_socket_handle_impl_test.cc index 4aae5b5296d19..a1a1a506b1585 100644 --- a/test/common/network/io_socket_handle_impl_test.cc +++ b/test/common/network/io_socket_handle_impl_test.cc @@ -1,6 +1,8 @@ +#include "common/common/utility.h" #include "common/network/io_socket_error_impl.h" #include "common/network/io_socket_handle_impl.h" +#include "gmock/gmock.h" #include "gtest/gtest.h" namespace Envoy { @@ -8,32 +10,44 @@ namespace Network { namespace { TEST(IoSocketHandleImplTest, TestIoSocketError) { - IoSocketError error1(EAGAIN); + IoSocketError error1(SOCKET_ERROR_AGAIN); EXPECT_DEBUG_DEATH(error1.getErrorCode(), ".*assert failure: .* Details: Didn't use getIoSocketEagainInstance.*"); + EXPECT_EQ(errorDetails(SOCKET_ERROR_AGAIN), + IoSocketError::getIoSocketEagainInstance()->getErrorDetails()); - EXPECT_EQ(::strerror(EAGAIN), IoSocketError::getIoSocketEagainInstance()->getErrorDetails()); + IoSocketError error2(SOCKET_ERROR_NOT_SUP); + EXPECT_EQ(IoSocketError::IoErrorCode::NoSupport, error2.getErrorCode()); + EXPECT_EQ(errorDetails(SOCKET_ERROR_NOT_SUP), error2.getErrorDetails()); - IoSocketError error3(ENOTSUP); - EXPECT_EQ(IoSocketError::IoErrorCode::NoSupport, error3.getErrorCode()); - EXPECT_EQ(::strerror(ENOTSUP), error3.getErrorDetails()); + IoSocketError error3(SOCKET_ERROR_AF_NO_SUP); + EXPECT_EQ(IoSocketError::IoErrorCode::AddressFamilyNoSupport, error3.getErrorCode()); + EXPECT_EQ(errorDetails(SOCKET_ERROR_AF_NO_SUP), error3.getErrorDetails()); - IoSocketError error4(EAFNOSUPPORT); - EXPECT_EQ(IoSocketError::IoErrorCode::AddressFamilyNoSupport, error4.getErrorCode()); - EXPECT_EQ(::strerror(EAFNOSUPPORT), error4.getErrorDetails()); + IoSocketError error4(SOCKET_ERROR_IN_PROGRESS); + EXPECT_EQ(IoSocketError::IoErrorCode::InProgress, error4.getErrorCode()); + EXPECT_EQ(errorDetails(SOCKET_ERROR_IN_PROGRESS), error4.getErrorDetails()); - IoSocketError error5(EINPROGRESS); - EXPECT_EQ(IoSocketError::IoErrorCode::InProgress, error5.getErrorCode()); - EXPECT_EQ(::strerror(EINPROGRESS), error5.getErrorDetails()); + IoSocketError error5(SOCKET_ERROR_PERM); + EXPECT_EQ(IoSocketError::IoErrorCode::Permission, error5.getErrorCode()); + EXPECT_EQ(errorDetails(SOCKET_ERROR_PERM), error5.getErrorDetails()); - IoSocketError error6(EPERM); - EXPECT_EQ(IoSocketError::IoErrorCode::Permission, error6.getErrorCode()); - EXPECT_EQ(::strerror(EPERM), error6.getErrorDetails()); + IoSocketError error6(SOCKET_ERROR_MSG_SIZE); + EXPECT_EQ(IoSocketError::IoErrorCode::MessageTooBig, error6.getErrorCode()); + EXPECT_EQ(errorDetails(SOCKET_ERROR_MSG_SIZE), error6.getErrorDetails()); - // Random unknown error. - IoSocketError error7(123); - EXPECT_EQ(IoSocketError::IoErrorCode::UnknownError, error7.getErrorCode()); - EXPECT_EQ(::strerror(123), error7.getErrorDetails()); + IoSocketError error7(SOCKET_ERROR_INTR); + EXPECT_EQ(IoSocketError::IoErrorCode::Interrupt, error7.getErrorCode()); + EXPECT_EQ(errorDetails(SOCKET_ERROR_INTR), error7.getErrorDetails()); + + IoSocketError error8(SOCKET_ERROR_ADDR_NOT_AVAIL); + EXPECT_EQ(IoSocketError::IoErrorCode::AddressNotAvailable, error8.getErrorCode()); + EXPECT_EQ(errorDetails(SOCKET_ERROR_ADDR_NOT_AVAIL), error8.getErrorDetails()); + + // Random unknown error + IoSocketError error9(123); + EXPECT_EQ(IoSocketError::IoErrorCode::UnknownError, error9.getErrorCode()); + EXPECT_EQ(errorDetails(123), error9.getErrorDetails()); } } // namespace diff --git a/test/common/network/lc_trie_speed_test.cc b/test/common/network/lc_trie_speed_test.cc index 632754af8cdee..24d52fe0fcaab 100644 --- a/test/common/network/lc_trie_speed_test.cc +++ b/test/common/network/lc_trie_speed_test.cc @@ -5,136 +5,145 @@ namespace { -std::vector addresses; - -std::vector>> tag_data; - -std::vector>> - tag_data_nested_prefixes; - -std::vector>> - tag_data_minimal; - -std::unique_ptr> lc_trie; +struct AddressInputs { + AddressInputs() { + // Random test addresses from RFC 5737 netblocks + static const std::string test_addresses[] = { + "192.0.2.225", "198.51.100.55", "198.51.100.105", "192.0.2.150", "203.0.113.162", + "203.0.113.110", "203.0.113.99", "198.51.100.23", "198.51.100.24", "203.0.113.12"}; + for (const auto& address : test_addresses) { + addresses_.push_back(Envoy::Network::Utility::parseInternetAddress(address)); + } + } -std::unique_ptr> lc_trie_nested_prefixes; + std::vector addresses_; +}; + +struct CidrInputs { + CidrInputs() { + // Construct three sets of prefixes: one consisting of 1,024 addresses in an + // RFC 5737 netblock, another consisting of those same addresses plus + // 0.0.0.0/0 (to exercise the LC Trie's support for nested prefixes), + // and finally a set containing only 0.0.0.0/0. + for (int i = 0; i < 32; i++) { + for (int j = 0; j < 32; j++) { + tag_data_.emplace_back( + std::pair>( + {"tag_1", + {Envoy::Network::Address::CidrRange::create( + fmt::format("192.0.{}.{}/32", i, j))}})); + } + } + tag_data_nested_prefixes_ = tag_data_; + tag_data_nested_prefixes_.emplace_back( + std::pair>( + {"tag_0", {Envoy::Network::Address::CidrRange::create("0.0.0.0/0")}})); + tag_data_minimal_.emplace_back( + std::pair>( + {"tag_1", {Envoy::Network::Address::CidrRange::create("0.0.0.0/0")}})); + } -std::unique_ptr> lc_trie_minimal; + std::vector>> tag_data_; + std::vector>> + tag_data_nested_prefixes_; + std::vector>> + tag_data_minimal_; +}; } // namespace namespace Envoy { -static void BM_LcTrieConstruct(benchmark::State& state) { +static void lcTrieConstruct(benchmark::State& state) { + CidrInputs inputs; + std::unique_ptr> trie; for (auto _ : state) { - trie = std::make_unique>(tag_data); + trie = std::make_unique>(inputs.tag_data_); } benchmark::DoNotOptimize(trie); } -BENCHMARK(BM_LcTrieConstruct); +BENCHMARK(lcTrieConstruct); + +static void lcTrieConstructNested(benchmark::State& state) { + CidrInputs inputs; -static void BM_LcTrieConstructNested(benchmark::State& state) { std::unique_ptr> trie; for (auto _ : state) { - trie = std::make_unique>(tag_data_nested_prefixes); + trie = std::make_unique>( + inputs.tag_data_nested_prefixes_); } benchmark::DoNotOptimize(trie); } -BENCHMARK(BM_LcTrieConstructNested); +BENCHMARK(lcTrieConstructNested); -static void BM_LcTrieConstructMinimal(benchmark::State& state) { +static void lcTrieConstructMinimal(benchmark::State& state) { + CidrInputs inputs; std::unique_ptr> trie; for (auto _ : state) { - trie = std::make_unique>(tag_data_minimal); + trie = std::make_unique>(inputs.tag_data_minimal_); } benchmark::DoNotOptimize(trie); } -BENCHMARK(BM_LcTrieConstructMinimal); +BENCHMARK(lcTrieConstructMinimal); + +static void lcTrieLookup(benchmark::State& state) { + CidrInputs cidr_inputs; + AddressInputs address_inputs; + std::unique_ptr> lc_trie = + std::make_unique>(cidr_inputs.tag_data_); -static void BM_LcTrieLookup(benchmark::State& state) { static size_t i = 0; size_t output_tags = 0; for (auto _ : state) { i++; - i %= addresses.size(); - output_tags += lc_trie->getData(addresses[i]).size(); + i %= address_inputs.addresses_.size(); + output_tags += lc_trie->getData(address_inputs.addresses_[i]).size(); } benchmark::DoNotOptimize(output_tags); } -BENCHMARK(BM_LcTrieLookup); +BENCHMARK(lcTrieLookup); + +static void lcTrieLookupWithNestedPrefixes(benchmark::State& state) { + CidrInputs cidr_inputs; + AddressInputs address_inputs; + std::unique_ptr> lc_trie_nested_prefixes = + std::make_unique>( + cidr_inputs.tag_data_nested_prefixes_); -static void BM_LcTrieLookupWithNestedPrefixes(benchmark::State& state) { static size_t i = 0; size_t output_tags = 0; for (auto _ : state) { i++; - i %= addresses.size(); - output_tags += lc_trie_nested_prefixes->getData(addresses[i]).size(); + i %= address_inputs.addresses_.size(); + output_tags += lc_trie_nested_prefixes->getData(address_inputs.addresses_[i]).size(); } benchmark::DoNotOptimize(output_tags); } -BENCHMARK(BM_LcTrieLookupWithNestedPrefixes); +BENCHMARK(lcTrieLookupWithNestedPrefixes); + +static void lcTrieLookupMinimal(benchmark::State& state) { + CidrInputs cidr_inputs; + AddressInputs address_inputs; + std::unique_ptr> lc_trie_minimal = + std::make_unique>(cidr_inputs.tag_data_minimal_); -static void BM_LcTrieLookupMinimal(benchmark::State& state) { static size_t i = 0; size_t output_tags = 0; for (auto _ : state) { i++; - i %= addresses.size(); - output_tags += lc_trie_minimal->getData(addresses[i]).size(); + i %= address_inputs.addresses_.size(); + output_tags += lc_trie_minimal->getData(address_inputs.addresses_[i]).size(); } benchmark::DoNotOptimize(output_tags); } -BENCHMARK(BM_LcTrieLookupMinimal); +BENCHMARK(lcTrieLookupMinimal); } // namespace Envoy - -// Boilerplate main(), which discovers benchmarks in the same file and runs them. -int main(int argc, char** argv) { - - // Random test addresses from RFC 5737 netblocks - static const std::string test_addresses[] = { - "192.0.2.225", "198.51.100.55", "198.51.100.105", "192.0.2.150", "203.0.113.162", - "203.0.113.110", "203.0.113.99", "198.51.100.23", "198.51.100.24", "203.0.113.12"}; - for (const auto& address : test_addresses) { - addresses.push_back(Envoy::Network::Utility::parseInternetAddress(address)); - } - - // Construct three sets of prefixes: one consisting of 1,024 addresses in an - // RFC 5737 netblock, another consisting of those same addresses plus - // 0.0.0.0/0 (to exercise the LC Trie's support for nested prefixes), - // and finally a set containing only 0.0.0.0/0. - for (int i = 0; i < 32; i++) { - for (int j = 0; j < 32; j++) { - tag_data.emplace_back(std::pair>( - {"tag_1", - {Envoy::Network::Address::CidrRange::create(fmt::format("192.0.{}.{}/32", i, j))}})); - } - } - tag_data_nested_prefixes = tag_data; - tag_data_nested_prefixes.emplace_back( - std::pair>( - {"tag_0", {Envoy::Network::Address::CidrRange::create("0.0.0.0/0")}})); - tag_data_minimal.emplace_back( - std::pair>( - {"tag_1", {Envoy::Network::Address::CidrRange::create("0.0.0.0/0")}})); - - lc_trie = std::make_unique>(tag_data); - lc_trie_nested_prefixes = - std::make_unique>(tag_data_nested_prefixes); - lc_trie_minimal = std::make_unique>(tag_data_minimal); - - benchmark::Initialize(&argc, argv); - if (benchmark::ReportUnrecognizedArguments(argc, argv)) { - return 1; - } - benchmark::RunSpecifiedBenchmarks(); -} diff --git a/test/common/network/listen_socket_impl_test.cc b/test/common/network/listen_socket_impl_test.cc index f705f9163f910..39b790d14163d 100644 --- a/test/common/network/listen_socket_impl_test.cc +++ b/test/common/network/listen_socket_impl_test.cc @@ -1,5 +1,6 @@ #include "envoy/common/platform.h" #include "envoy/config/core/v3/base.pb.h" +#include "envoy/network/exception.h" #include "common/api/os_sys_calls_impl.h" #include "common/network/io_socket_handle_impl.h" @@ -20,7 +21,7 @@ namespace Envoy { namespace Network { namespace { -template +template class ListenSocketImplTest : public testing::TestWithParam { protected: ListenSocketImplTest() : version_(GetParam()) {} @@ -44,10 +45,10 @@ class ListenSocketImplTest : public testing::TestWithParam { while (true) { ++loop_number; - auto addr_fd = Network::Test::bindFreeLoopbackPort(version_, Address::SocketType::Stream); + auto addr_fd = Network::Test::bindFreeLoopbackPort(version_, Socket::Type::Stream); auto addr = addr_fd.first; - Network::IoHandlePtr& io_handle = addr_fd.second; - EXPECT_TRUE(SOCKET_VALID(io_handle->fd())); + SocketPtr& sock = addr_fd.second; + EXPECT_TRUE(SOCKET_VALID(sock->ioHandle().fd())); // Confirm that we got a reasonable address and port. ASSERT_EQ(Address::Type::Ip, addr->type()); @@ -55,7 +56,8 @@ class ListenSocketImplTest : public testing::TestWithParam { ASSERT_LT(0U, addr->ip()->port()); // Release the socket and re-bind it. - EXPECT_EQ(nullptr, io_handle->close().err_); + EXPECT_TRUE(sock->isOpen()); + sock->close(); auto option = std::make_unique(); auto options = std::make_shared>(); @@ -81,8 +83,8 @@ class ListenSocketImplTest : public testing::TestWithParam { // TODO (conqerAtapple): This is unfortunate. We should be able to templatize this // instead of if block. auto os_sys_calls = Api::OsSysCallsSingleton::get(); - if (NetworkSocketTrait::type == Address::SocketType::Stream) { - EXPECT_EQ(0, os_sys_calls.listen(socket1->ioHandle().fd(), 0).rc_); + if (NetworkSocketTrait::type == Socket::Type::Stream) { + EXPECT_EQ(0, socket1->listen(0).rc_); } EXPECT_EQ(addr->ip()->port(), socket1->localAddress()->ip()->port()); @@ -104,7 +106,7 @@ class ListenSocketImplTest : public testing::TestWithParam { int domain = version_ == Address::IpVersion::v4 ? AF_INET : AF_INET6; auto socket_result = os_sys_calls.socket(domain, SOCK_STREAM, 0); EXPECT_TRUE(SOCKET_VALID(socket_result.rc_)); - io_handle = std::make_unique(socket_result.rc_); + Network::IoHandlePtr io_handle = std::make_unique(socket_result.rc_); auto socket3 = createListenSocketPtr(std::move(io_handle), addr, nullptr); EXPECT_EQ(socket3->localAddress()->asString(), addr->asString()); @@ -124,8 +126,8 @@ class ListenSocketImplTest : public testing::TestWithParam { } }; -using ListenSocketImplTestTcp = ListenSocketImplTest; -using ListenSocketImplTestUdp = ListenSocketImplTest; +using ListenSocketImplTestTcp = ListenSocketImplTest; +using ListenSocketImplTestUdp = ListenSocketImplTest; INSTANTIATE_TEST_SUITE_P(IpVersions, ListenSocketImplTestTcp, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), @@ -145,7 +147,7 @@ class TestListenSocket : public ListenSocketImpl { public: TestListenSocket(Address::InstanceConstSharedPtr address) : ListenSocketImpl(std::make_unique(), address) {} - Address::SocketType socketType() const override { return Address::SocketType::Stream; } + Socket::Type socketType() const override { return Socket::Type::Stream; } }; TEST_P(ListenSocketImplTestTcp, SetLocalAddress) { diff --git a/test/common/network/listener_impl_test.cc b/test/common/network/listener_impl_test.cc index a19fa577163ea..5aaef758ce4a8 100644 --- a/test/common/network/listener_impl_test.cc +++ b/test/common/network/listener_impl_test.cc @@ -1,14 +1,16 @@ #include "envoy/config/core/v3/base.pb.h" +#include "envoy/network/exception.h" #include "common/network/address_impl.h" #include "common/network/listener_impl.h" #include "common/network/utility.h" +#include "common/stream_info/stream_info_impl.h" #include "test/common/network/listener_impl_test_base.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" #include "test/test_common/environment.h" #include "test/test_common/network_utility.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -57,7 +59,7 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, ListenerImplDeathTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); TEST_P(ListenerImplDeathTest, ErrorCallback) { - EXPECT_DEATH_LOG_TO_STDERR(errorCallbackTest(GetParam()), ".*listener accept failure.*"); + EXPECT_DEATH(errorCallbackTest(GetParam()), ".*listener accept failure.*"); } class TestListenerImpl : public ListenerImpl { @@ -138,9 +140,75 @@ TEST_P(ListenerImplTest, UseActualDst) { dispatcher_->run(Event::Dispatcher::RunType::Block); } +TEST_P(ListenerImplTest, GlobalConnectionLimitEnforcement) { + // Required to manipulate runtime values when there is no test server. + TestScopedRuntime scoped_runtime; + + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"overload.global_downstream_max_connections", "2"}}); + auto socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(version_), nullptr, true); + Network::MockListenerCallbacks listener_callbacks; + Network::MockConnectionHandler connection_handler; + Network::ListenerPtr listener = dispatcher_->createListener(socket, listener_callbacks, true); + + std::vector client_connections; + std::vector server_connections; + StreamInfo::StreamInfoImpl stream_info(dispatcher_->timeSource()); + EXPECT_CALL(listener_callbacks, onAccept_(_)) + .WillRepeatedly(Invoke([&](Network::ConnectionSocketPtr& accepted_socket) -> void { + server_connections.emplace_back(dispatcher_->createServerConnection( + std::move(accepted_socket), Network::Test::createRawBufferSocket(), stream_info)); + dispatcher_->exit(); + })); + + auto initiate_connections = [&](const int count) { + for (int i = 0; i < count; ++i) { + client_connections.emplace_back(dispatcher_->createClientConnection( + socket->localAddress(), Network::Address::InstanceConstSharedPtr(), + Network::Test::createRawBufferSocket(), nullptr)); + client_connections.back()->connect(); + } + }; + + initiate_connections(5); + EXPECT_CALL(listener_callbacks, onReject()).Times(3); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + // We expect any server-side connections that get created to populate 'server_connections'. + EXPECT_EQ(2, server_connections.size()); + + // Let's increase the allowed connections and try sending more connections. + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"overload.global_downstream_max_connections", "3"}}); + initiate_connections(5); + EXPECT_CALL(listener_callbacks, onReject()).Times(4); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + EXPECT_EQ(3, server_connections.size()); + + // Clear the limit and verify there's no longer a limit. + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"overload.global_downstream_max_connections", ""}}); + initiate_connections(10); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + EXPECT_EQ(13, server_connections.size()); + + for (const auto& conn : client_connections) { + conn->close(ConnectionCloseType::NoFlush); + } + for (const auto& conn : server_connections) { + conn->close(ConnectionCloseType::NoFlush); + } + + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"overload.global_downstream_max_connections", ""}}); +} + TEST_P(ListenerImplTest, WildcardListenerUseActualDst) { - auto socket = - std::make_shared(Network::Test::getAnyAddress(version_), nullptr, true); + auto socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(version_), nullptr, true); Network::MockListenerCallbacks listener_callbacks; Network::MockConnectionHandler connection_handler; // Do not redirect since use_original_dst is false. @@ -153,8 +221,6 @@ TEST_P(ListenerImplTest, WildcardListenerUseActualDst) { Network::Test::createRawBufferSocket(), nullptr); client_connection->connect(); - EXPECT_CALL(listener, getLocalAddress(_)).WillOnce(Return(local_dst_address)); - StreamInfo::StreamInfoImpl stream_info(dispatcher_->timeSource()); EXPECT_CALL(listener_callbacks, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { @@ -199,11 +265,6 @@ TEST_P(ListenerImplTest, WildcardListenerIpv4Compat) { Network::Test::createRawBufferSocket(), nullptr); client_connection->connect(); - EXPECT_CALL(listener, getLocalAddress(_)) - .WillOnce(Invoke([](os_fd_t fd) -> Address::InstanceConstSharedPtr { - return Address::addressFromFd(fd); - })); - StreamInfo::StreamInfoImpl stream_info(dispatcher_->timeSource()); EXPECT_CALL(listener_callbacks, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { @@ -223,8 +284,8 @@ TEST_P(ListenerImplTest, WildcardListenerIpv4Compat) { TEST_P(ListenerImplTest, DisableAndEnableListener) { testing::InSequence s1; - auto socket = - std::make_shared(Network::Test::getAnyAddress(version_), nullptr, true); + auto socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(version_), nullptr, true); MockListenerCallbacks listener_callbacks; MockConnectionCallbacks connection_callbacks; TestListenerImpl listener(dispatcherImpl(), socket, listener_callbacks, true); @@ -249,10 +310,6 @@ TEST_P(ListenerImplTest, DisableAndEnableListener) { // When the listener is re-enabled, the pending connection should be accepted. listener.enable(); - EXPECT_CALL(listener, getLocalAddress(_)) - .WillOnce(Invoke([](os_fd_t fd) -> Address::InstanceConstSharedPtr { - return Address::addressFromFd(fd); - })); EXPECT_CALL(listener_callbacks, onAccept_(_)).WillOnce(Invoke([&](ConnectionSocketPtr&) -> void { client_connection->close(ConnectionCloseType::NoFlush); })); diff --git a/test/common/network/listener_impl_test_base.h b/test/common/network/listener_impl_test_base.h index 19884ba469826..8f3ee21b87278 100644 --- a/test/common/network/listener_impl_test_base.h +++ b/test/common/network/listener_impl_test_base.h @@ -20,7 +20,7 @@ class ListenerImplTestBase : public testing::TestWithParam { ListenerImplTestBase() : version_(GetParam()), alt_address_(Network::Test::findOrCheckFreePort( - Network::Test::getCanonicalLoopbackAddress(version_), Address::SocketType::Stream)), + Network::Test::getCanonicalLoopbackAddress(version_), Socket::Type::Stream)), api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher("test_thread")) {} Event::DispatcherImpl& dispatcherImpl() { diff --git a/test/common/network/socket_option_factory_test.cc b/test/common/network/socket_option_factory_test.cc index 4ec848f57c9c0..7ca08149bd939 100644 --- a/test/common/network/socket_option_factory_test.cc +++ b/test/common/network/socket_option_factory_test.cc @@ -43,7 +43,7 @@ class SocketOptionFactoryTest : public testing::Test { }; #define CHECK_OPTION_SUPPORTED(option) \ - if (!option.has_value()) { \ + if (!option.hasValue()) { \ return; \ } @@ -59,13 +59,13 @@ TEST_F(SocketOptionFactoryTest, TestBuildSocketMarkOptions) { const int type = expected_option.level(); const int option = expected_option.option(); - EXPECT_CALL(os_sys_calls_mock_, setsockopt_(_, _, _, _, sizeof(int))) - .WillOnce(Invoke([type, option](os_fd_t, int input_type, int input_option, const void* optval, - socklen_t) -> int { + EXPECT_CALL(socket_mock_, setSocketOption(_, _, _, sizeof(int))) + .WillOnce(Invoke([type, option](int input_type, int input_option, const void* optval, + socklen_t) -> Api::SysCallIntResult { EXPECT_EQ(100, *static_cast(optval)); EXPECT_EQ(type, input_type); EXPECT_EQ(option, input_option); - return 0; + return {0, 0}; })); EXPECT_TRUE(Network::Socket::applyOptions(options, socket_mock_, @@ -83,16 +83,16 @@ TEST_F(SocketOptionFactoryTest, TestBuildIpv4TransparentOptions) { const int type = expected_option.level(); const int option = expected_option.option(); - EXPECT_CALL(os_sys_calls_mock_, setsockopt_(_, _, _, _, sizeof(int))) + EXPECT_CALL(socket_mock_, setSocketOption(_, _, _, sizeof(int))) .Times(2) - .WillRepeatedly(Invoke([type, option](os_fd_t, int input_type, int input_option, - const void* optval, socklen_t) -> int { + .WillRepeatedly(Invoke([type, option](int input_type, int input_option, const void* optval, + socklen_t) -> Api::SysCallIntResult { EXPECT_EQ(type, input_type); EXPECT_EQ(option, input_option); EXPECT_EQ(1, *static_cast(optval)); - return 0; + return {0, 0}; })); - + EXPECT_CALL(socket_mock_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v4)); EXPECT_TRUE(Network::Socket::applyOptions(options, socket_mock_, envoy::config::core::v3::SocketOption::STATE_PREBIND)); EXPECT_TRUE(Network::Socket::applyOptions(options, socket_mock_, @@ -110,16 +110,17 @@ TEST_F(SocketOptionFactoryTest, TestBuildIpv6TransparentOptions) { const int type = expected_option.level(); const int option = expected_option.option(); - EXPECT_CALL(os_sys_calls_mock_, setsockopt_(_, _, _, _, sizeof(int))) + EXPECT_CALL(socket_mock_, setSocketOption(_, _, _, sizeof(int))) .Times(2) - .WillRepeatedly(Invoke([type, option](os_fd_t, int input_type, int input_option, - const void* optval, socklen_t) -> int { + .WillRepeatedly(Invoke([type, option](int input_type, int input_option, const void* optval, + socklen_t) -> Api::SysCallIntResult { EXPECT_EQ(type, input_type); EXPECT_EQ(option, input_option); EXPECT_EQ(1, *static_cast(optval)); - return 0; + return {0, 0}; })); + EXPECT_CALL(socket_mock_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v6)); EXPECT_TRUE(Network::Socket::applyOptions(options, socket_mock_, envoy::config::core::v3::SocketOption::STATE_PREBIND)); EXPECT_TRUE(Network::Socket::applyOptions(options, socket_mock_, @@ -130,13 +131,20 @@ TEST_F(SocketOptionFactoryTest, TestBuildLiteralOptions) { Protobuf::RepeatedPtrField socket_options_proto; Envoy::Protobuf::TextFormat::Parser parser; envoy::config::core::v3::SocketOption socket_option_proto; + struct linger expected_linger; + expected_linger.l_onoff = 1; + expected_linger.l_linger = 3456; + absl::string_view linger_bstr{reinterpret_cast(&expected_linger), + sizeof(struct linger)}; + std::string linger_bstr_formatted = testing::PrintToString(linger_bstr); static const char linger_option_format[] = R"proto( state: STATE_PREBIND level: %d name: %d - buf_value: "\x01\x00\x00\x00\x80\x0d\x00\x00" + buf_value: %s )proto"; - auto linger_option = absl::StrFormat(linger_option_format, SOL_SOCKET, SO_LINGER); + auto linger_option = + absl::StrFormat(linger_option_format, SOL_SOCKET, SO_LINGER, linger_bstr_formatted); ASSERT_TRUE(parser.ParseFromString(linger_option, &socket_option_proto)); *socket_options_proto.Add() = socket_option_proto; static const char keepalive_option_format[] = R"proto( @@ -156,11 +164,6 @@ TEST_F(SocketOptionFactoryTest, TestBuildLiteralOptions) { EXPECT_TRUE(option_details.has_value()); EXPECT_EQ(SOL_SOCKET, option_details->name_.level()); EXPECT_EQ(SO_LINGER, option_details->name_.option()); - struct linger expected_linger; - expected_linger.l_onoff = 1; - expected_linger.l_linger = 3456; - absl::string_view linger_bstr{reinterpret_cast(&expected_linger), - sizeof(struct linger)}; EXPECT_EQ(linger_bstr, option_details->value_); option_details = socket_options->at(1)->getOptionDetails( diff --git a/test/common/network/socket_option_impl_test.cc b/test/common/network/socket_option_impl_test.cc index e0a6be83f703b..2bbf4eba6400e 100644 --- a/test/common/network/socket_option_impl_test.cc +++ b/test/common/network/socket_option_impl_test.cc @@ -13,29 +13,29 @@ TEST_F(SocketOptionImplTest, BadFd) { Api::SysCallIntResult result = SocketOptionImpl::setSocketOption(socket_, {}, zero.data(), zero.size()); EXPECT_EQ(-1, result.rc_); - EXPECT_EQ(ENOTSUP, result.errno_); + EXPECT_EQ(SOCKET_ERROR_NOT_SUP, result.errno_); } TEST_F(SocketOptionImplTest, HasName) { auto optname = ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_SNDBUF); // Verify that the constructor macro sets all the fields correctly. - EXPECT_TRUE(optname.has_value()); + EXPECT_TRUE(optname.hasValue()); EXPECT_EQ(SOL_SOCKET, optname.level()); EXPECT_EQ(SO_SNDBUF, optname.option()); EXPECT_EQ("SOL_SOCKET/SO_SNDBUF", optname.name()); // The default constructor should not have a value, i.e. should // be unsupported. - EXPECT_FALSE(SocketOptionName().has_value()); + EXPECT_FALSE(SocketOptionName().hasValue()); // If we fail to set an option, verify that the log message // contains the option name so the operator can debug. SocketOptionImpl socket_option{envoy::config::core::v3::SocketOption::STATE_PREBIND, optname, 1}; - EXPECT_CALL(os_sys_calls_, setsockopt_(_, _, _, _, _)) - .WillOnce(Invoke([](os_fd_t, int, int, const void* optval, socklen_t) -> int { + EXPECT_CALL(socket_, setSocketOption(_, _, _, _)) + .WillOnce(Invoke([](int, int, const void* optval, socklen_t) -> Api::SysCallIntResult { EXPECT_EQ(1, *static_cast(optval)); - return -1; + return {-1, 0}; })); EXPECT_LOG_CONTAINS( @@ -46,10 +46,10 @@ TEST_F(SocketOptionImplTest, HasName) { TEST_F(SocketOptionImplTest, SetOptionSuccessTrue) { SocketOptionImpl socket_option{envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), 1}; - EXPECT_CALL(os_sys_calls_, setsockopt_(_, 5, 10, _, sizeof(int))) - .WillOnce(Invoke([](os_fd_t, int, int, const void* optval, socklen_t) -> int { + EXPECT_CALL(socket_, setSocketOption(5, 10, _, sizeof(int))) + .WillOnce(Invoke([](int, int, const void* optval, socklen_t) -> Api::SysCallIntResult { EXPECT_EQ(1, *static_cast(optval)); - return 0; + return {0, 0}; })); EXPECT_TRUE( socket_option.setOption(socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND)); diff --git a/test/common/network/socket_option_test.h b/test/common/network/socket_option_test.h index aab277f97858c..af50690af6cb7 100644 --- a/test/common/network/socket_option_test.h +++ b/test/common/network/socket_option_test.h @@ -74,13 +74,14 @@ class SocketOptionTest : public testing::Test { Socket::Option& socket_option, Network::SocketOptionName option_name, int option_val, const std::set& when) { for (auto state : when) { - if (option_name.has_value()) { - EXPECT_CALL(os_sys_calls_, - setsockopt_(_, option_name.level(), option_name.option(), _, sizeof(int))) - .WillOnce(Invoke([option_val](os_fd_t, int, int, const void* optval, socklen_t) -> int { - EXPECT_EQ(option_val, *static_cast(optval)); - return 0; - })); + if (option_name.hasValue()) { + EXPECT_CALL(socket_, + setSocketOption(option_name.level(), option_name.option(), _, sizeof(int))) + .WillOnce(Invoke( + [option_val](int, int, const void* optval, socklen_t) -> Api::SysCallIntResult { + EXPECT_EQ(option_val, *static_cast(optval)); + return {0, 0}; + })); EXPECT_TRUE(socket_option.setOption(socket_, state)); } else { EXPECT_FALSE(socket_option.setOption(socket_, state)); diff --git a/test/common/network/transport_socket_options_impl_test.cc b/test/common/network/transport_socket_options_impl_test.cc index d330a8edf8d3c..a96fbc53bdd3c 100644 --- a/test/common/network/transport_socket_options_impl_test.cc +++ b/test/common/network/transport_socket_options_impl_test.cc @@ -1,4 +1,7 @@ +#include "common/http/utility.h" +#include "common/network/address_impl.h" #include "common/network/application_protocol.h" +#include "common/network/proxy_protocol_filter_state.h" #include "common/network/transport_socket_options_impl.h" #include "common/network/upstream_server_name.h" #include "common/stream_info/filter_state_impl.h" @@ -30,6 +33,14 @@ TEST_F(TransportSocketOptionsImplTest, UpstreamServer) { filter_state_.setData( UpstreamServerName::key(), std::make_unique("www.example.com"), StreamInfo::FilterState::StateType::ReadOnly, StreamInfo::FilterState::LifeSpan::FilterChain); + filter_state_.setData(ProxyProtocolFilterState::key(), + std::make_unique(Network::ProxyProtocolData{ + Network::Address::InstanceConstSharedPtr( + new Network::Address::Ipv4Instance("202.168.0.13", 52000)), + Network::Address::InstanceConstSharedPtr( + new Network::Address::Ipv4Instance("174.2.2.222", 80))}), + StreamInfo::FilterState::StateType::ReadOnly, + StreamInfo::FilterState::LifeSpan::FilterChain); auto transport_socket_options = TransportSocketOptionsUtility::fromFilterState(filter_state_); EXPECT_EQ(absl::make_optional("www.example.com"), transport_socket_options->serverNameOverride()); @@ -37,7 +48,8 @@ TEST_F(TransportSocketOptionsImplTest, UpstreamServer) { } TEST_F(TransportSocketOptionsImplTest, ApplicationProtocols) { - std::vector http_alpns{"h2", "http/1.1"}; + std::vector http_alpns{Http::Utility::AlpnNames::get().Http2, + Http::Utility::AlpnNames::get().Http11}; filter_state_.setData( ApplicationProtocols::key(), std::make_unique(http_alpns), StreamInfo::FilterState::StateType::ReadOnly, StreamInfo::FilterState::LifeSpan::FilterChain); @@ -47,7 +59,8 @@ TEST_F(TransportSocketOptionsImplTest, ApplicationProtocols) { } TEST_F(TransportSocketOptionsImplTest, Both) { - std::vector http_alpns{"h2", "http/1.1"}; + std::vector http_alpns{Http::Utility::AlpnNames::get().Http2, + Http::Utility::AlpnNames::get().Http11}; filter_state_.setData( UpstreamServerName::key(), std::make_unique("www.example.com"), StreamInfo::FilterState::StateType::ReadOnly, StreamInfo::FilterState::LifeSpan::FilterChain); diff --git a/test/common/network/udp_listener_impl_batch_writer_test.cc b/test/common/network/udp_listener_impl_batch_writer_test.cc new file mode 100644 index 0000000000000..959fd52515f1c --- /dev/null +++ b/test/common/network/udp_listener_impl_batch_writer_test.cc @@ -0,0 +1,279 @@ +#include +#include +#include +#include +#include + +#ifdef __GNUC__ +#pragma GCC diagnostic push +// QUICHE allows unused parameters. +#pragma GCC diagnostic ignored "-Wunused-parameter" +// QUICHE uses offsetof(). +#pragma GCC diagnostic ignored "-Winvalid-offsetof" +#pragma GCC diagnostic ignored "-Wtype-limits" + +#include "quiche/quic/test_tools/quic_mock_syscall_wrapper.h" + +#pragma GCC diagnostic pop +#else +#include "quiche/quic/test_tools/quic_mock_syscall_wrapper.h" +#endif + +#include "envoy/config/core/v3/base.pb.h" + +#include "common/network/address_impl.h" +#include "common/network/socket_option_factory.h" +#include "common/network/socket_option_impl.h" +#include "common/network/udp_listener_impl.h" +#include "common/network/utility.h" + +#include "extensions/quic_listeners/quiche/udp_gso_batch_writer.h" + +#include "test/common/network/udp_listener_impl_test_base.h" +#include "test/mocks/api/mocks.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/server/mocks.h" +#include "test/test_common/environment.h" +#include "test/test_common/network_utility.h" +#include "test/test_common/threadsafe_singleton_injector.h" +#include "test/test_common/utility.h" + +#include "absl/time/time.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::Invoke; +using testing::ReturnRef; + +namespace Envoy { +namespace Network { +namespace { + +size_t getPacketLength(const msghdr* msg) { + size_t length = 0; + for (size_t i = 0; i < msg->msg_iovlen; ++i) { + length += msg->msg_iov[i].iov_len; + } + return length; +} + +class UdpListenerImplBatchWriterTest : public UdpListenerImplTestBase { +public: + void SetUp() override { + // Set listening socket options and set UdpGsoBatchWriter + server_socket_->addOptions(SocketOptionFactory::buildIpPacketInfoOptions()); + server_socket_->addOptions(SocketOptionFactory::buildRxQueueOverFlowOptions()); + listener_ = std::make_unique( + dispatcherImpl(), server_socket_, listener_callbacks_, dispatcherImpl().timeSource()); + udp_packet_writer_ = std::make_unique( + server_socket_->ioHandle(), listener_config_.listenerScope()); + ON_CALL(listener_callbacks_, udpPacketWriter()).WillByDefault(ReturnRef(*udp_packet_writer_)); + } +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, UdpListenerImplBatchWriterTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +/** + * Tests UDP Packet Writer To Send packets in Batches to a client + * 1. Setup a udp listener and client socket + * 2. Send different sized payloads to client. + * - Verify that the packets are buffered as long as payload + * length matches gso_size. + * - When payload size > gso_size verify that the new payload is + * buffered and already buffered packets are sent to client + * - When payload size < gso_size verify that the new payload is + * sent along with the already buffered payloads. + * 3. Call UdpPacketWriter's External Flush + * - Verify that the internal buffer is emptied and the + * total_bytes_sent counter is updated accordingly. + */ +TEST_P(UdpListenerImplBatchWriterTest, SendData) { + EXPECT_TRUE(udp_packet_writer_->isBatchMode()); + Address::InstanceConstSharedPtr send_from_addr = getNonDefaultSourceAddress(); + + absl::FixedArray payloads{"length7", "length7", "len<7", + "length7", "length7", "length>7"}; + std::string internal_buffer(""); + std::string last_buffered(""); + std::list pkts_to_send; + bool send_buffered_pkts = false; + + // Get initial value of total_bytes_sent + uint64_t total_bytes_sent = + listener_config_.listenerScope().counterFromString("total_bytes_sent").value(); + + for (const auto& payload : payloads) { + Buffer::InstancePtr buffer(new Buffer::OwnedImpl()); + buffer->add(payload); + UdpSendData send_data{send_from_addr->ip(), *client_.localAddress(), *buffer}; + + auto send_result = listener_->send(send_data); + EXPECT_TRUE(send_result.ok()) << "send() failed : " << send_result.err_->getErrorDetails(); + EXPECT_EQ(send_result.rc_, payload.length()); + + // Verify udp_packet_writer stats for batch writing + if (internal_buffer.length() == 0 || /* internal buffer is empty*/ + payload.compare(last_buffered) == 0) { /*len(payload) == gso_size*/ + pkts_to_send.emplace_back(payload); + internal_buffer.append(payload); + last_buffered = payload; + } else if (payload.compare(last_buffered) < 0) { /*len(payload) < gso_size*/ + pkts_to_send.emplace_back(payload); + internal_buffer.clear(); + last_buffered.clear(); + send_buffered_pkts = true; + } else { /*len(payload) > gso_size*/ + internal_buffer = payload; + last_buffered = payload; + send_buffered_pkts = true; + } + + EXPECT_EQ(listener_config_.listenerScope() + .gaugeFromString("internal_buffer_size", Stats::Gauge::ImportMode::NeverImport) + .value(), + internal_buffer.length()); + + // Verify that the total_bytes_sent is only updated when the packets + // are actually sent to the client, and not on being buffered. + if (send_buffered_pkts) { + for (const auto& pkt : pkts_to_send) { + total_bytes_sent += pkt.length(); + } + pkts_to_send.clear(); + if (last_buffered.length() != 0) { + pkts_to_send.emplace_back(last_buffered); + } + send_buffered_pkts = false; + } + EXPECT_EQ(listener_config_.listenerScope().counterFromString("total_bytes_sent").value(), + total_bytes_sent); + } + + // Test External Flush + auto flush_result = udp_packet_writer_->flush(); + EXPECT_TRUE(flush_result.ok()); + EXPECT_EQ(listener_config_.listenerScope() + .gaugeFromString("internal_buffer_size", Stats::Gauge::ImportMode::NeverImport) + .value(), + 0); + total_bytes_sent += payloads.back().length(); + + EXPECT_EQ(listener_config_.listenerScope().counterFromString("total_bytes_sent").value(), + total_bytes_sent); +} + +/** + * Tests UDP Packet writer behavior when socket is write-blocked. + * 1. Setup the udp_listener and have a payload buffered in the internal buffer. + * 2. Then set the socket to return EWOULDBLOCK error on sendmsg and write a + * different sized buffer to the packet writer. + * - Ensure that a buffer shorter than the initial buffer is added to the + * Internal Buffer. + * - A buffer longer than the initial buffer should not get appended to the + * Internal Buffer. + */ +TEST_P(UdpListenerImplBatchWriterTest, WriteBlocked) { + // Quic Mock Objects + quic::test::MockQuicSyscallWrapper os_sys_calls; + quic::ScopedGlobalSyscallWrapperOverride os_calls(&os_sys_calls); + + // The initial payload to be buffered + std::string initial_payload("length7"); + + // Get initial value of total_bytes_sent + uint64_t total_bytes_sent = + listener_config_.listenerScope().counterFromString("total_bytes_sent").value(); + + // Possible following payloads to be sent after the initial payload + absl::FixedArray following_payloads{"length<7", "len<7"}; + + for (const auto& following_payload : following_payloads) { + std::string internal_buffer(""); + + // First have initial payload added to the udp_packet_writer's internal buffer. + Buffer::InstancePtr initial_buffer(new Buffer::OwnedImpl()); + initial_buffer->add(initial_payload); + UdpSendData initial_send_data{send_to_addr_->ip(), *server_socket_->localAddress(), + *initial_buffer}; + auto send_result = listener_->send(initial_send_data); + internal_buffer.append(initial_payload); + EXPECT_TRUE(send_result.ok()); + EXPECT_EQ(send_result.rc_, initial_payload.length()); + EXPECT_FALSE(udp_packet_writer_->isWriteBlocked()); + EXPECT_EQ(listener_config_.listenerScope() + .gaugeFromString("internal_buffer_size", Stats::Gauge::ImportMode::NeverImport) + .value(), + initial_payload.length()); + EXPECT_EQ(listener_config_.listenerScope().counterFromString("total_bytes_sent").value(), + total_bytes_sent); + + // Mock the socket to be write blocked on sendmsg syscall + EXPECT_CALL(os_sys_calls, Sendmsg(_, _, _)) + .WillOnce(Invoke([](int /*sockfd*/, const msghdr* /*msg*/, int /*flags*/) { + errno = EWOULDBLOCK; + return -1; + })); + + // Now send the following payload + Buffer::InstancePtr following_buffer(new Buffer::OwnedImpl()); + following_buffer->add(following_payload); + UdpSendData following_send_data{send_to_addr_->ip(), *server_socket_->localAddress(), + *following_buffer}; + send_result = listener_->send(following_send_data); + + if (following_payload.length() < initial_payload.length()) { + // The following payload should get buffered if it is + // shorter than initial payload + EXPECT_TRUE(send_result.ok()); + EXPECT_EQ(send_result.rc_, following_payload.length()); + EXPECT_FALSE(udp_packet_writer_->isWriteBlocked()); + internal_buffer.append(following_payload); + // Send another packet and verify that writer gets blocked later + EXPECT_CALL(os_sys_calls, Sendmsg(_, _, _)) + .WillOnce(Invoke([](int /*sockfd*/, const msghdr* /*msg*/, int /*flags*/) { + errno = EWOULDBLOCK; + return -1; + })); + following_buffer->add(following_payload); + UdpSendData final_send_data{send_to_addr_->ip(), *server_socket_->localAddress(), + *following_buffer}; + send_result = listener_->send(final_send_data); + } + + EXPECT_FALSE(send_result.ok()); + EXPECT_EQ(send_result.rc_, 0); + EXPECT_TRUE(udp_packet_writer_->isWriteBlocked()); + EXPECT_EQ(listener_config_.listenerScope().counterFromString("total_bytes_sent").value(), + total_bytes_sent); + EXPECT_EQ(listener_config_.listenerScope() + .gaugeFromString("internal_buffer_size", Stats::Gauge::ImportMode::NeverImport) + .value(), + internal_buffer.length()); + + // Reset write blocked status and verify correct buffer is flushed + udp_packet_writer_->setWritable(); + EXPECT_CALL(os_sys_calls, Sendmsg(_, _, _)) + .WillOnce(Invoke([&](int /*sockfd*/, const msghdr* msg, int /*flags*/) { + EXPECT_EQ(internal_buffer.length(), getPacketLength(msg)); + return internal_buffer.length(); + })); + auto flush_result = udp_packet_writer_->flush(); + EXPECT_TRUE(flush_result.ok()); + EXPECT_EQ(flush_result.rc_, 0); + EXPECT_FALSE(udp_packet_writer_->isWriteBlocked()); + EXPECT_EQ(listener_config_.listenerScope() + .gaugeFromString("internal_buffer_size", Stats::Gauge::ImportMode::NeverImport) + .value(), + 0); + total_bytes_sent += internal_buffer.length(); + EXPECT_EQ(listener_config_.listenerScope().counterFromString("total_bytes_sent").value(), + total_bytes_sent); + } +} + +} // namespace +} // namespace Network +} // namespace Envoy diff --git a/test/common/network/udp_listener_impl_test.cc b/test/common/network/udp_listener_impl_test.cc index c42bbafa30fa6..c2d1a4216bb60 100644 --- a/test/common/network/udp_listener_impl_test.cc +++ b/test/common/network/udp_listener_impl_test.cc @@ -3,18 +3,20 @@ #include #include +#include "envoy/api/os_sys_calls.h" #include "envoy/config/core/v3/base.pb.h" +#include "common/api/os_sys_calls_impl.h" #include "common/network/address_impl.h" #include "common/network/socket_option_factory.h" #include "common/network/socket_option_impl.h" #include "common/network/udp_listener_impl.h" +#include "common/network/udp_packet_writer_handler_impl.h" #include "common/network/utility.h" -#include "test/common/network/listener_impl_test_base.h" +#include "test/common/network/udp_listener_impl_test_base.h" #include "test/mocks/api/mocks.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" #include "test/test_common/environment.h" #include "test/test_common/network_utility.h" #include "test/test_common/threadsafe_singleton_injector.h" @@ -27,81 +29,43 @@ using testing::_; using testing::Invoke; using testing::Return; +using testing::ReturnRef; namespace Envoy { namespace Network { namespace { -class UdpListenerImplTest : public ListenerImplTestBase { +// UdpGro is only supported on Linux versions >= 5.0. Also, the +// underlying platform only performs the payload concatenation when +// packets are sent from a network namespace different to that of +// the client. Currently, the testing framework does not support +// this behavior. +// This helper allows to intercept the supportsUdpGro syscall and +// toggle the gro behavior as per individual test requirements. +class MockSupportsUdpGro : public Api::OsSysCallsImpl { public: - UdpListenerImplTest() - : server_socket_(createServerSocket(true)), send_to_addr_(getServerLoopbackAddress()) { - time_system_.advanceTimeWait(std::chrono::milliseconds(100)); - } + MOCK_METHOD(bool, supportsUdpGro, (), (const)); +}; +class UdpListenerImplTest : public UdpListenerImplTestBase { +public: void SetUp() override { + ON_CALL(udp_gro_syscall_, supportsUdpGro()).WillByDefault(Return(false)); + // Set listening socket options. server_socket_->addOptions(SocketOptionFactory::buildIpPacketInfoOptions()); server_socket_->addOptions(SocketOptionFactory::buildRxQueueOverFlowOptions()); - + if (Api::OsSysCallsSingleton::get().supportsUdpGro()) { + server_socket_->addOptions(SocketOptionFactory::buildUdpGroOptions()); + } listener_ = std::make_unique( dispatcherImpl(), server_socket_, listener_callbacks_, dispatcherImpl().timeSource()); + udp_packet_writer_ = std::make_unique(server_socket_->ioHandle()); + ON_CALL(listener_callbacks_, udpPacketWriter()).WillByDefault(ReturnRef(*udp_packet_writer_)); } -protected: - Address::Instance* getServerLoopbackAddress() { - if (version_ == Address::IpVersion::v4) { - return new Address::Ipv4Instance(Network::Test::getLoopbackAddressString(version_), - server_socket_->localAddress()->ip()->port()); - } - return new Address::Ipv6Instance(Network::Test::getLoopbackAddressString(version_), - server_socket_->localAddress()->ip()->port()); - } - - SocketSharedPtr createServerSocket(bool bind) { - // Set IP_FREEBIND to allow sendmsg to send with non-local IPv6 source address. - return std::make_shared(Network::Test::getAnyAddress(version_), -#ifdef IP_FREEBIND - SocketOptionFactory::buildIpFreebindOptions(), -#else - nullptr, -#endif - bind); - } - - // Validates receive data, source/destination address and received time. - void validateRecvCallbackParams(const UdpRecvData& data) { - ASSERT_NE(data.addresses_.local_, nullptr); - - ASSERT_NE(data.addresses_.peer_, nullptr); - ASSERT_NE(data.addresses_.peer_->ip(), nullptr); - - EXPECT_EQ(data.addresses_.local_->asString(), send_to_addr_->asString()); - - EXPECT_EQ(data.addresses_.peer_->ip()->addressAsString(), - client_.localAddress()->ip()->addressAsString()); - - EXPECT_EQ(*data.addresses_.local_, *send_to_addr_); - - size_t num_packet_per_recv = 1u; - if (Api::OsSysCallsSingleton::get().supportsMmsg()) { - num_packet_per_recv = 16u; - } - EXPECT_EQ(time_system_.monotonicTime(), - data.receive_time_ + - std::chrono::milliseconds( - (num_packets_received_by_listener_ % num_packet_per_recv) * 100)); - // Advance time so that next onData() should have different received time. - time_system_.advanceTimeWait(std::chrono::milliseconds(100)); - ++num_packets_received_by_listener_; - } - - SocketSharedPtr server_socket_; - Network::Test::UdpSyncPeer client_{GetParam()}; - Address::InstanceConstSharedPtr send_to_addr_; - MockUdpListenerCallbacks listener_callbacks_; - std::unique_ptr listener_; - size_t num_packets_received_by_listener_{0}; + NiceMock udp_gro_syscall_; + TestThreadsafeSingletonInjector os_calls{&udp_gro_syscall_}; }; INSTANTIATE_TEST_SUITE_P(IpVersions, UdpListenerImplTest, @@ -123,10 +87,9 @@ TEST_P(UdpListenerImplTest, UdpSetListeningSocketOptionsSuccess) { #ifdef SO_RXQ_OVFL // Verify that overflow detection is enabled. int get_overflow = 0; - auto& os_syscalls = Api::OsSysCallsSingleton::get(); socklen_t int_size = static_cast(sizeof(get_overflow)); - const Api::SysCallIntResult result = os_syscalls.getsockopt( - server_socket_->ioHandle().fd(), SOL_SOCKET, SO_RXQ_OVFL, &get_overflow, &int_size); + const Api::SysCallIntResult result = + server_socket_->getSocketOption(SOL_SOCKET, SO_RXQ_OVFL, &get_overflow, &int_size); EXPECT_EQ(0, result.rc_); EXPECT_EQ(1, get_overflow); #endif @@ -145,13 +108,11 @@ TEST_P(UdpListenerImplTest, UseActualDstUdp) { EXPECT_CALL(listener_callbacks_, onReadReady()); EXPECT_CALL(listener_callbacks_, onData(_)) .WillOnce(Invoke([&](const UdpRecvData& data) -> void { - validateRecvCallbackParams(data); - + validateRecvCallbackParams(data, Api::OsSysCallsSingleton::get().supportsMmsg() ? 16u : 1u); EXPECT_EQ(data.buffer_->toString(), first); })) .WillOnce(Invoke([&](const UdpRecvData& data) -> void { - validateRecvCallbackParams(data); - + validateRecvCallbackParams(data, Api::OsSysCallsSingleton::get().supportsMmsg() ? 16u : 1u); EXPECT_EQ(data.buffer_->toString(), second); dispatcher_->exit(); @@ -186,7 +147,7 @@ TEST_P(UdpListenerImplTest, UdpEcho) { EXPECT_CALL(listener_callbacks_, onReadReady()); EXPECT_CALL(listener_callbacks_, onData(_)) .WillOnce(Invoke([&](const UdpRecvData& data) -> void { - validateRecvCallbackParams(data); + validateRecvCallbackParams(data, Api::OsSysCallsSingleton::get().supportsMmsg() ? 16u : 1u); test_peer_address = data.addresses_.peer_; @@ -196,7 +157,7 @@ TEST_P(UdpListenerImplTest, UdpEcho) { server_received_data.push_back(data_str); })) .WillRepeatedly(Invoke([&](const UdpRecvData& data) -> void { - validateRecvCallbackParams(data); + validateRecvCallbackParams(data, Api::OsSysCallsSingleton::get().supportsMmsg() ? 16u : 1u); const std::string data_str = data.buffer_->toString(); EXPECT_EQ(data_str, client_data[num_packets_received_by_listener_ - 1]); @@ -272,7 +233,7 @@ TEST_P(UdpListenerImplTest, UdpListenerEnableDisable) { .Times(2) .WillOnce(Return()) .WillOnce(Invoke([&](const UdpRecvData& data) -> void { - validateRecvCallbackParams(data); + validateRecvCallbackParams(data, Api::OsSysCallsSingleton::get().supportsMmsg() ? 16u : 1u); EXPECT_EQ(data.buffer_->toString(), second); @@ -315,8 +276,10 @@ TEST_P(UdpListenerImplTest, UdpListenerRecvMsgError) { // Inject mocked OsSysCalls implementation to mock a read failure. Api::MockOsSysCalls os_sys_calls; TestThreadsafeSingletonInjector os_calls(&os_sys_calls); + EXPECT_CALL(os_sys_calls, supportsUdpGro()); EXPECT_CALL(os_sys_calls, supportsMmsg()); - EXPECT_CALL(os_sys_calls, recvmsg(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{-1, ENOTSUP})); + EXPECT_CALL(os_sys_calls, recvmsg(_, _, _)) + .WillOnce(Return(Api::SysCallSizeResult{-1, SOCKET_ERROR_NOT_SUP})); dispatcher_->run(Event::Dispatcher::RunType::Block); } @@ -328,35 +291,12 @@ TEST_P(UdpListenerImplTest, UdpListenerRecvMsgError) { * address. */ TEST_P(UdpListenerImplTest, SendData) { + EXPECT_FALSE(udp_packet_writer_->isBatchMode()); const std::string payload("hello world"); Buffer::InstancePtr buffer(new Buffer::OwnedImpl()); buffer->add(payload); - // Use a self address that is unlikely to be picked by source address discovery - // algorithm if not specified in recvmsg/recvmmsg. Port is not taken into - // consideration. - Address::InstanceConstSharedPtr send_from_addr; - if (version_ == Address::IpVersion::v4) { - // Linux kernel regards any 127.x.x.x as local address. But Mac OS doesn't. - send_from_addr = std::make_shared( -#ifndef __APPLE__ - "127.1.2.3", -#else - "127.0.0.1", -#endif - server_socket_->localAddress()->ip()->port()); - } else { - // Only use non-local v6 address if IP_FREEBIND is supported. Otherwise use - // ::1 to avoid EINVAL error. Unfortunately this can't verify that sendmsg with - // customized source address is doing the work because kernel also picks ::1 - // if it's not specified in cmsghdr. - send_from_addr = std::make_shared( -#ifdef IP_FREEBIND - "::9", -#else - "::1", -#endif - server_socket_->localAddress()->ip()->port()); - } + + Address::InstanceConstSharedPtr send_from_addr = getNonDefaultSourceAddress(); UdpSendData send_data{send_from_addr->ip(), *client_.localAddress(), *buffer}; @@ -370,13 +310,17 @@ TEST_P(UdpListenerImplTest, SendData) { EXPECT_EQ(bytes_to_read, data.buffer_->length()); EXPECT_EQ(send_from_addr->asString(), data.addresses_.peer_->asString()); EXPECT_EQ(data.buffer_->toString(), payload); + + // Verify External Flush is a No-op + auto flush_result = udp_packet_writer_->flush(); + EXPECT_TRUE(flush_result.ok()); + EXPECT_EQ(0, flush_result.rc_); } /** * The send fails because the server_socket is created with bind=false. */ TEST_P(UdpListenerImplTest, SendDataError) { - Logger::StderrSinkDelegate stderr_sink(Logger::Registry::getSink()); // For coverage build. const std::string payload("hello world"); Buffer::InstancePtr buffer(new Buffer::OwnedImpl()); buffer->add(payload); @@ -386,18 +330,160 @@ TEST_P(UdpListenerImplTest, SendDataError) { // Inject mocked OsSysCalls implementation to mock a write failure. Api::MockOsSysCalls os_sys_calls; TestThreadsafeSingletonInjector os_calls(&os_sys_calls); - EXPECT_CALL(os_sys_calls, sendmsg(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{-1, ENOTSUP})); + + EXPECT_CALL(os_sys_calls, sendmsg(_, _, _)) + .WillOnce(Return(Api::SysCallSizeResult{-1, SOCKET_ERROR_AGAIN})); auto send_result = listener_->send(send_data); EXPECT_FALSE(send_result.ok()); + EXPECT_EQ(send_result.err_->getErrorCode(), Api::IoError::IoErrorCode::Again); + // Failed write shouldn't drain the data. + EXPECT_EQ(payload.length(), buffer->length()); + // Verify the writer is set to blocked + EXPECT_TRUE(udp_packet_writer_->isWriteBlocked()); + + // Reset write_blocked status + udp_packet_writer_->setWritable(); + EXPECT_FALSE(udp_packet_writer_->isWriteBlocked()); + + EXPECT_CALL(os_sys_calls, sendmsg(_, _, _)) + .WillOnce(Return(Api::SysCallSizeResult{-1, SOCKET_ERROR_NOT_SUP})); + send_result = listener_->send(send_data); + EXPECT_FALSE(send_result.ok()); EXPECT_EQ(send_result.err_->getErrorCode(), Api::IoError::IoErrorCode::NoSupport); // Failed write shouldn't drain the data. EXPECT_EQ(payload.length(), buffer->length()); - ON_CALL(os_sys_calls, sendmsg(_, _, _)).WillByDefault(Return(Api::SysCallSizeResult{-1, EINVAL})); + ON_CALL(os_sys_calls, sendmsg(_, _, _)) + .WillByDefault(Return(Api::SysCallSizeResult{-1, SOCKET_ERROR_INVAL})); // EINVAL should cause RELEASE_ASSERT. EXPECT_DEATH(listener_->send(send_data), "Invalid argument passed in"); } +/** + * Test that multiple stacked packets of the same size are properly segmented + * when UDP GRO is enabled on the platform. + */ +#ifdef UDP_GRO +TEST_P(UdpListenerImplTest, UdpGroBasic) { + // We send 4 packets (3 of equal length and 1 as a trail), which are concatenated together by + // kernel supporting udp gro. Verify the concatenated packet is transformed back into individual + // packets + absl::FixedArray client_data({"Equal!!!", "Length!!", "Messages", "trail"}); + + for (const auto& i : client_data) { + client_.write(i, *send_to_addr_); + } + + // The concatenated payload received from kernel supporting udp_gro + std::string stacked_message = absl::StrJoin(client_data, ""); + + // Mock OsSysCalls to mimic kernel behavior for packet concatenation + // based on udp_gro. supportsUdpGro should return true and recvmsg should + // return the concatenated payload with the gso_size set appropriately. + Api::MockOsSysCalls os_sys_calls; + TestThreadsafeSingletonInjector os_calls(&os_sys_calls); + EXPECT_CALL(os_sys_calls, supportsUdpGro).WillRepeatedly(Return(true)); + EXPECT_CALL(os_sys_calls, supportsMmsg).Times(0); + + EXPECT_CALL(os_sys_calls, recvmsg(_, _, _)) + .WillOnce(Invoke([&](os_fd_t, msghdr* msg, int) { + // Set msg_name and msg_namelen + if (client_.localAddress()->ip()->version() == Address::IpVersion::v4) { + sockaddr_storage ss; + auto ipv4_addr = reinterpret_cast(&ss); + memset(ipv4_addr, 0, sizeof(sockaddr_in)); + ipv4_addr->sin_family = AF_INET; + ipv4_addr->sin_addr.s_addr = htonl(INADDR_LOOPBACK); + ipv4_addr->sin_port = client_.localAddress()->ip()->port(); + msg->msg_namelen = sizeof(sockaddr_in); + *reinterpret_cast(msg->msg_name) = *ipv4_addr; + } else if (client_.localAddress()->ip()->version() == Address::IpVersion::v6) { + sockaddr_storage ss; + auto ipv6_addr = reinterpret_cast(&ss); + memset(ipv6_addr, 0, sizeof(sockaddr_in6)); + ipv6_addr->sin6_family = AF_INET6; + ipv6_addr->sin6_addr = in6addr_loopback; + ipv6_addr->sin6_port = client_.localAddress()->ip()->port(); + *reinterpret_cast(msg->msg_name) = *ipv6_addr; + msg->msg_namelen = sizeof(sockaddr_in6); + } + + // Set msg_iovec + EXPECT_EQ(msg->msg_iovlen, 1); + memcpy(msg->msg_iov[0].iov_base, stacked_message.data(), stacked_message.length()); + msg->msg_iov[0].iov_len = stacked_message.length(); + + // Set control headers + memset(msg->msg_control, 0, msg->msg_controllen); + cmsghdr* cmsg = CMSG_FIRSTHDR(msg); + if (send_to_addr_->ip()->version() == Address::IpVersion::v4) { + cmsg->cmsg_level = IPPROTO_IP; +#ifndef IP_RECVDSTADDR + cmsg->cmsg_type = IP_PKTINFO; + cmsg->cmsg_len = CMSG_LEN(sizeof(in_pktinfo)); + reinterpret_cast(CMSG_DATA(cmsg))->ipi_addr.s_addr = + send_to_addr_->ip()->ipv4()->address(); +#else + cmsg.cmsg_type = IP_RECVDSTADDR; + cmsg->cmsg_len = CMSG_LEN(sizeof(in_addr)); + *reinterpret_cast(CMSG_DATA(cmsg)) = send_to_addr_->ip()->ipv4()->address(); +#endif + } else if (send_to_addr_->ip()->version() == Address::IpVersion::v6) { + cmsg->cmsg_len = CMSG_LEN(sizeof(in6_pktinfo)); + cmsg->cmsg_level = IPPROTO_IPV6; + cmsg->cmsg_type = IPV6_PKTINFO; + auto pktinfo = reinterpret_cast(CMSG_DATA(cmsg)); + pktinfo->ipi6_ifindex = 0; + *(reinterpret_cast(pktinfo->ipi6_addr.s6_addr)) = + send_to_addr_->ip()->ipv6()->address(); + } + + // Set gso_size + cmsg = CMSG_NXTHDR(msg, cmsg); + cmsg->cmsg_level = SOL_UDP; + cmsg->cmsg_type = UDP_GRO; + cmsg->cmsg_len = CMSG_LEN(sizeof(uint16_t)); + const uint16_t gso_size = 8; + *reinterpret_cast(CMSG_DATA(cmsg)) = gso_size; + +#ifdef SO_RXQ_OVFL + // Set SO_RXQ_OVFL + cmsg = CMSG_NXTHDR(msg, cmsg); + EXPECT_NE(cmsg, nullptr); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SO_RXQ_OVFL; + cmsg->cmsg_len = CMSG_LEN(sizeof(uint32_t)); + const uint32_t overflow = 0; + *reinterpret_cast(CMSG_DATA(cmsg)) = overflow; +#endif + return Api::SysCallSizeResult{static_cast(stacked_message.length()), 0}; + })) + .WillRepeatedly(Return(Api::SysCallSizeResult{-1, EAGAIN})); + + EXPECT_CALL(listener_callbacks_, onReadReady()); + EXPECT_CALL(listener_callbacks_, onData(_)) + .WillOnce(Invoke([&](const UdpRecvData& data) -> void { + validateRecvCallbackParams(data, client_data.size()); + + const std::string data_str = data.buffer_->toString(); + EXPECT_EQ(data_str, client_data[num_packets_received_by_listener_ - 1]); + })) + .WillRepeatedly(Invoke([&](const UdpRecvData& data) -> void { + validateRecvCallbackParams(data, client_data.size()); + + const std::string data_str = data.buffer_->toString(); + EXPECT_EQ(data_str, client_data[num_packets_received_by_listener_ - 1]); + })); + + EXPECT_CALL(listener_callbacks_, onWriteReady(_)).WillOnce(Invoke([&](const Socket& socket) { + EXPECT_EQ(socket.ioHandle().fd(), server_socket_->ioHandle().fd()); + dispatcher_->exit(); + })); + + dispatcher_->run(Event::Dispatcher::RunType::Block); +} +#endif + } // namespace } // namespace Network } // namespace Envoy diff --git a/test/common/network/udp_listener_impl_test_base.h b/test/common/network/udp_listener_impl_test_base.h new file mode 100644 index 0000000000000..2547986a316a5 --- /dev/null +++ b/test/common/network/udp_listener_impl_test_base.h @@ -0,0 +1,123 @@ +#include +#include +#include +#include + +#include "envoy/config/core/v3/base.pb.h" + +#include "common/network/address_impl.h" +#include "common/network/socket_option_factory.h" +#include "common/network/socket_option_impl.h" +#include "common/network/udp_listener_impl.h" +#include "common/network/udp_packet_writer_handler_impl.h" +#include "common/network/utility.h" + +#include "test/common/network/listener_impl_test_base.h" +#include "test/mocks/api/mocks.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/server/mocks.h" +#include "test/test_common/environment.h" +#include "test/test_common/network_utility.h" +#include "test/test_common/threadsafe_singleton_injector.h" +#include "test/test_common/utility.h" + +#include "absl/time/time.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Network { + +class UdpListenerImplTestBase : public ListenerImplTestBase { +public: + UdpListenerImplTestBase() + : server_socket_(createServerSocket(true)), send_to_addr_(getServerLoopbackAddress()) { + time_system_.advanceTimeWait(std::chrono::milliseconds(100)); + } + +protected: + Address::Instance* getServerLoopbackAddress() { + if (version_ == Address::IpVersion::v4) { + return new Address::Ipv4Instance(Network::Test::getLoopbackAddressString(version_), + server_socket_->localAddress()->ip()->port()); + } + return new Address::Ipv6Instance(Network::Test::getLoopbackAddressString(version_), + server_socket_->localAddress()->ip()->port()); + } + + SocketSharedPtr createServerSocket(bool bind) { + // Set IP_FREEBIND to allow sendmsg to send with non-local IPv6 source address. + return std::make_shared(Network::Test::getAnyAddress(version_), +#ifdef IP_FREEBIND + SocketOptionFactory::buildIpFreebindOptions(), +#else + nullptr, +#endif + bind); + } + + Address::InstanceConstSharedPtr getNonDefaultSourceAddress() { + // Use a self address that is unlikely to be picked by source address discovery + // algorithm if not specified in recvmsg/recvmmsg. Port is not taken into + // consideration. + Address::InstanceConstSharedPtr send_from_addr; + if (version_ == Address::IpVersion::v4) { + // Linux kernel regards any 127.x.x.x as local address. But Mac OS doesn't. + send_from_addr = std::make_shared( +#ifndef __APPLE__ + "127.1.2.3", +#else + "127.0.0.1", +#endif + server_socket_->localAddress()->ip()->port()); + } else { + // Only use non-local v6 address if IP_FREEBIND is supported. Otherwise use + // ::1 to avoid EINVAL error. Unfortunately this can't verify that sendmsg with + // customized source address is doing the work because kernel also picks ::1 + // if it's not specified in cmsghdr. + send_from_addr = std::make_shared( +#ifdef IP_FREEBIND + "::9", +#else + "::1", +#endif + server_socket_->localAddress()->ip()->port()); + } + return send_from_addr; + } + + // Validates receive data, source/destination address and received time. + void validateRecvCallbackParams(const UdpRecvData& data, size_t num_packet_per_recv) { + ASSERT_NE(data.addresses_.local_, nullptr); + + ASSERT_NE(data.addresses_.peer_, nullptr); + ASSERT_NE(data.addresses_.peer_->ip(), nullptr); + + EXPECT_EQ(data.addresses_.local_->asString(), send_to_addr_->asString()); + + EXPECT_EQ(data.addresses_.peer_->ip()->addressAsString(), + client_.localAddress()->ip()->addressAsString()); + + EXPECT_EQ(*data.addresses_.local_, *send_to_addr_); + + EXPECT_EQ(time_system_.monotonicTime(), + data.receive_time_ + + std::chrono::milliseconds( + (num_packets_received_by_listener_ % num_packet_per_recv) * 100)); + // Advance time so that next onData() should have different received time. + time_system_.advanceTimeWait(std::chrono::milliseconds(100)); + ++num_packets_received_by_listener_; + } + + SocketSharedPtr server_socket_; + Network::Test::UdpSyncPeer client_{GetParam()}; + Address::InstanceConstSharedPtr send_to_addr_; + NiceMock listener_callbacks_; + NiceMock listener_config_; + std::unique_ptr listener_; + size_t num_packets_received_by_listener_{0}; + Network::UdpPacketWriterPtr udp_packet_writer_; +}; + +} // namespace Network +} // namespace Envoy diff --git a/test/common/network/utility_corpus/test b/test/common/network/utility_corpus/test new file mode 100644 index 0000000000000..f7ea73bed6ef8 --- /dev/null +++ b/test/common/network/utility_corpus/test @@ -0,0 +1 @@ +127.0.0.1:0 \ No newline at end of file diff --git a/test/common/network/utility_fuzz_test.cc b/test/common/network/utility_fuzz_test.cc new file mode 100644 index 0000000000000..8d49667a24e0e --- /dev/null +++ b/test/common/network/utility_fuzz_test.cc @@ -0,0 +1,71 @@ +#include "envoy/config/core/v3/address.pb.h" + +#include "common/network/address_impl.h" +#include "common/network/utility.h" + +#include "test/fuzz/fuzz_runner.h" + +namespace Envoy { +namespace Fuzz { + +DEFINE_FUZZER(const uint8_t* buf, size_t len) { + const std::string string_buffer(reinterpret_cast(buf), len); + + try { + Network::Utility::parseInternetAddress(string_buffer); + } catch (const EnvoyException& e) { + ENVOY_LOG_MISC(debug, "EnvoyException: {}", e.what()); + } + + try { + Network::Utility::parseInternetAddressAndPort(string_buffer); + } catch (const EnvoyException& e) { + ENVOY_LOG_MISC(debug, "EnvoyException: {}", e.what()); + } + + try { + std::list port_range_list; + Network::Utility::parsePortRangeList(string_buffer, port_range_list); + } catch (const EnvoyException& e) { + ENVOY_LOG_MISC(debug, "EnvoyException: {}", e.what()); + } + + try { + envoy::config::core::v3::Address proto_address; + proto_address.mutable_pipe()->set_path(string_buffer); + Network::Utility::protobufAddressToAddress(proto_address); + } catch (const EnvoyException& e) { + ENVOY_LOG_MISC(debug, "EnvoyException: {}", e.what()); + } + + try { + FuzzedDataProvider provider(buf, len); + envoy::config::core::v3::Address proto_address; + const auto port_value = provider.ConsumeIntegral(); + const std::string address_value = provider.ConsumeRemainingBytesAsString(); + proto_address.mutable_socket_address()->set_address(address_value); + proto_address.mutable_socket_address()->set_port_value(port_value); + Network::Utility::protobufAddressToAddress(proto_address); + } catch (const EnvoyException& e) { + ENVOY_LOG_MISC(debug, "EnvoyException: {}", e.what()); + } + + try { + envoy::config::core::v3::Address proto_address; + Network::Address::Ipv4Instance address(string_buffer); + Network::Utility::addressToProtobufAddress(address, proto_address); + } catch (const EnvoyException& e) { + ENVOY_LOG_MISC(debug, "EnvoyException: {}", e.what()); + } + + try { + envoy::config::core::v3::Address proto_address; + Network::Address::PipeInstance address(string_buffer); + Network::Utility::addressToProtobufAddress(address, proto_address); + } catch (const EnvoyException& e) { + ENVOY_LOG_MISC(debug, "EnvoyException: {}", e.what()); + } +} + +} // namespace Fuzz +} // namespace Envoy diff --git a/test/common/network/utility_test.cc b/test/common/network/utility_test.cc index 120f13615c82b..96f42f40dc978 100644 --- a/test/common/network/utility_test.cc +++ b/test/common/network/utility_test.cc @@ -169,7 +169,13 @@ TEST_P(NetworkUtilityGetLocalAddress, GetLocalAddress) { EXPECT_NE(nullptr, Utility::getLocalAddress(GetParam())); } -TEST(NetworkUtility, GetOriginalDst) { EXPECT_EQ(nullptr, Utility::getOriginalDst(-1)); } +TEST(NetworkUtility, GetOriginalDst) { + testing::NiceMock socket; +#ifdef SOL_IP + EXPECT_CALL(socket, ipVersion()).WillOnce(testing::Return(absl::nullopt)); +#endif + EXPECT_EQ(nullptr, Utility::getOriginalDst(socket)); +} TEST(NetworkUtility, LocalConnection) { Network::Address::InstanceConstSharedPtr local_addr; @@ -347,24 +353,24 @@ TEST(NetworkUtility, ProtobufAddressSocketType) { { envoy::config::core::v3::Address proto_address; proto_address.mutable_socket_address(); - EXPECT_EQ(Address::SocketType::Stream, Utility::protobufAddressSocketType(proto_address)); + EXPECT_EQ(Socket::Type::Stream, Utility::protobufAddressSocketType(proto_address)); } { envoy::config::core::v3::Address proto_address; proto_address.mutable_socket_address()->set_protocol( envoy::config::core::v3::SocketAddress::TCP); - EXPECT_EQ(Address::SocketType::Stream, Utility::protobufAddressSocketType(proto_address)); + EXPECT_EQ(Socket::Type::Stream, Utility::protobufAddressSocketType(proto_address)); } { envoy::config::core::v3::Address proto_address; proto_address.mutable_socket_address()->set_protocol( envoy::config::core::v3::SocketAddress::UDP); - EXPECT_EQ(Address::SocketType::Datagram, Utility::protobufAddressSocketType(proto_address)); + EXPECT_EQ(Socket::Type::Datagram, Utility::protobufAddressSocketType(proto_address)); } { envoy::config::core::v3::Address proto_address; proto_address.mutable_pipe(); - EXPECT_EQ(Address::SocketType::Stream, Utility::protobufAddressSocketType(proto_address)); + EXPECT_EQ(Socket::Type::Stream, Utility::protobufAddressSocketType(proto_address)); } } diff --git a/test/common/protobuf/BUILD b/test/common/protobuf/BUILD index c6709f80e46f0..bb018981c2904 100644 --- a/test/common/protobuf/BUILD +++ b/test/common/protobuf/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_fuzz_test", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( @@ -30,16 +30,18 @@ envoy_cc_test( "//test/mocks/init:init_mocks", "//test/mocks/local_info:local_info_mocks", "//test/mocks/protobuf:protobuf_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/proto:deprecated_proto_cc_proto", "//test/proto:sensitive_proto_cc_proto", "//test/test_common:environment_lib", "//test/test_common:logging_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/api/v2:pkg_cc_proto", + "@envoy_api//envoy/api/v2/core:pkg_cc_proto", "@envoy_api//envoy/config/bootstrap/v2:pkg_cc_proto", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/type/v3:pkg_cc_proto", ], ) diff --git a/test/common/protobuf/message_validator_impl_test.cc b/test/common/protobuf/message_validator_impl_test.cc index fd54337045846..d558799cc4406 100644 --- a/test/common/protobuf/message_validator_impl_test.cc +++ b/test/common/protobuf/message_validator_impl_test.cc @@ -16,14 +16,17 @@ namespace { // The null validation visitor doesn't do anything on unknown fields. TEST(NullValidationVisitorImpl, UnknownField) { NullValidationVisitorImpl null_validation_visitor; + EXPECT_TRUE(null_validation_visitor.skipValidation()); EXPECT_NO_THROW(null_validation_visitor.onUnknownField("foo")); } // The warning validation visitor logs and bumps stats on unknown fields TEST(WarningValidationVisitorImpl, UnknownField) { Stats::TestUtil::TestStore stats; - Stats::Counter& counter = stats.counter("counter"); + Stats::Counter& unknown_counter = stats.counter("counter"); WarningValidationVisitorImpl warning_validation_visitor; + // we want to be executed. + EXPECT_FALSE(warning_validation_visitor.skipValidation()); // First time around we should log. EXPECT_LOG_CONTAINS("warn", "Unknown field: foo", warning_validation_visitor.onUnknownField("foo")); @@ -34,18 +37,19 @@ TEST(WarningValidationVisitorImpl, UnknownField) { EXPECT_LOG_CONTAINS("warn", "Unknown field: bar", warning_validation_visitor.onUnknownField("bar")); // When we set the stats counter, the above increments are transferred. - EXPECT_EQ(0, counter.value()); - warning_validation_visitor.setCounter(counter); - EXPECT_EQ(2, counter.value()); + EXPECT_EQ(0, unknown_counter.value()); + warning_validation_visitor.setUnknownCounter(unknown_counter); + EXPECT_EQ(2, unknown_counter.value()); // A third unknown field is tracked in stats post-initialization. EXPECT_LOG_CONTAINS("warn", "Unknown field: baz", warning_validation_visitor.onUnknownField("baz")); - EXPECT_EQ(3, counter.value()); + EXPECT_EQ(3, unknown_counter.value()); } // The strict validation visitor throws on unknown fields. TEST(StrictValidationVisitorImpl, UnknownField) { StrictValidationVisitorImpl strict_validation_visitor; + EXPECT_FALSE(strict_validation_visitor.skipValidation()); EXPECT_THROW_WITH_MESSAGE(strict_validation_visitor.onUnknownField("foo"), UnknownProtoFieldException, "Protobuf message (foo) has unknown fields"); diff --git a/test/common/protobuf/utility_test.cc b/test/common/protobuf/utility_test.cc index 6ba14af4d27d6..2132fd25e2d2a 100644 --- a/test/common/protobuf/utility_test.cc +++ b/test/common/protobuf/utility_test.cc @@ -1,6 +1,5 @@ -#include - #include "envoy/api/v2/cluster.pb.h" +#include "envoy/api/v2/core/base.pb.h" #include "envoy/config/bootstrap/v2/bootstrap.pb.h" #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/bootstrap/v3/bootstrap.pb.validate.h" @@ -8,6 +7,7 @@ #include "envoy/config/cluster/v3/cluster.pb.validate.h" #include "envoy/config/cluster/v3/filter.pb.h" #include "envoy/config/cluster/v3/filter.pb.validate.h" +#include "envoy/config/core/v3/base.pb.h" #include "envoy/type/v3/percent.pb.h" #include "common/common/base64.h" @@ -21,13 +21,14 @@ #include "test/mocks/init/mocks.h" #include "test/mocks/local_info/mocks.h" #include "test/mocks/protobuf/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/proto/deprecated.pb.h" #include "test/proto/sensitive.pb.h" #include "test/test_common/environment.h" #include "test/test_common/logging.h" #include "test/test_common/utility.h" +#include "absl/container/node_hash_set.h" #include "gtest/gtest.h" #include "udpa/type/v1/typed_struct.pb.h" @@ -154,6 +155,34 @@ TEST_F(ProtobufUtilityTest, MessageUtilHash) { EXPECT_NE(MessageUtil::hash(s), MessageUtil::hash(a1)); } +TEST_F(ProtobufUtilityTest, MessageUtilHashAndEqualToIgnoreOriginalTypeField) { + ProtobufWkt::Struct s; + (*s.mutable_fields())["ab"].set_string_value("fgh"); + EXPECT_EQ(1, s.fields_size()); + envoy::api::v2::core::Metadata mv2; + mv2.mutable_filter_metadata()->insert({"xyz", s}); + EXPECT_EQ(1, mv2.filter_metadata_size()); + + // Add the OriginalTypeFieldNumber as unknown field. + envoy::config::core::v3::Metadata mv3; + Config::VersionConverter::upgrade(mv2, mv3); + + // Add another unknown field. + { + const Protobuf::Reflection* reflection = mv3.GetReflection(); + auto* unknown_field_set = reflection->MutableUnknownFields(&mv3); + auto set_size = unknown_field_set->field_count(); + // 183412668 is the magic number OriginalTypeFieldNumber. The successor number should not be + // occupied. + unknown_field_set->AddFixed32(183412668 + 1, 1); + EXPECT_EQ(set_size + 1, unknown_field_set->field_count()) << "Fail to add an unknown field"; + } + + envoy::config::core::v3::Metadata mv3dup = mv3; + ASSERT_EQ(MessageUtil::hash(mv3), MessageUtil::hash(mv3dup)); + ASSERT(MessageUtil()(mv3, mv3dup)); +} + TEST_F(ProtobufUtilityTest, RepeatedPtrUtilDebugString) { Protobuf::RepeatedPtrField repeated; EXPECT_EQ("[]", RepeatedPtrUtil::debugString(repeated)); @@ -219,8 +248,15 @@ TEST_F(ProtobufUtilityTest, LoadBinaryProtoUnknownFieldFromFile) { source_duration.set_seconds(42); const std::string filename = TestEnvironment::writeStringToFileForTest("proto.pb", source_duration.SerializeAsString()); + // Verify without boosting envoy::config::bootstrap::v3::Bootstrap proto_from_file; - EXPECT_THROW_WITH_MESSAGE(TestUtility::loadFromFile(filename, proto_from_file, *api_), + EXPECT_THROW_WITH_MESSAGE(TestUtility::loadFromFile(filename, proto_from_file, *api_, false), + EnvoyException, + "Protobuf message (type envoy.config.bootstrap.v3.Bootstrap with " + "unknown field set {1}) has unknown fields"); + + // Verify with boosting + EXPECT_THROW_WITH_MESSAGE(TestUtility::loadFromFile(filename, proto_from_file, *api_, true), EnvoyException, "Protobuf message (type envoy.config.bootstrap.v3.Bootstrap with " "unknown field set {1}) has unknown fields"); @@ -257,6 +293,23 @@ TEST_F(ProtobufUtilityTest, LoadTextProtoFromFile) { EXPECT_TRUE(TestUtility::protoEqual(bootstrap, proto_from_file)); } +TEST_F(ProtobufUtilityTest, LoadJsonFromFileNoBoosting) { + envoy::config::bootstrap::v3::Bootstrap bootstrap; + bootstrap.mutable_cluster_manager() + ->mutable_upstream_bind_config() + ->mutable_source_address() + ->set_address("1.1.1.1"); + + std::string bootstrap_text; + ASSERT_TRUE(Protobuf::TextFormat::PrintToString(bootstrap, &bootstrap_text)); + const std::string filename = + TestEnvironment::writeStringToFileForTest("proto.pb_text", bootstrap_text); + + envoy::config::bootstrap::v3::Bootstrap proto_from_file; + TestUtility::loadFromFile(filename, proto_from_file, *api_); + EXPECT_TRUE(TestUtility::protoEqual(bootstrap, proto_from_file)); +} + TEST_F(ProtobufUtilityTest, DEPRECATED_FEATURE_TEST(LoadV2TextProtoFromFile)) { API_NO_BOOST(envoy::config::bootstrap::v2::Bootstrap) bootstrap; bootstrap.mutable_node()->set_build_version("foo"); @@ -1088,7 +1141,7 @@ TEST_F(ProtobufUtilityTest, HashedValueStdHash) { HashedValue hv1(v1), hv2(v2), hv3(v3); - std::unordered_set set; + absl::node_hash_set set; set.emplace(hv1); set.emplace(hv2); set.emplace(hv3); @@ -1200,6 +1253,15 @@ TEST_F(ProtobufUtilityTest, LoadFromJsonSameVersion) { } } +// MessageUtility::loadFromJson() avoids boosting when version specified. +TEST_F(ProtobufUtilityTest, LoadFromJsonNoBoosting) { + envoy::config::cluster::v3::Cluster dst; + EXPECT_THROW_WITH_REGEX( + MessageUtil::loadFromJson("{drain_connections_on_host_removal: true}", dst, + ProtobufMessage::getStrictValidationVisitor(), false), + EnvoyException, "INVALID_ARGUMENT:drain_connections_on_host_removal: Cannot find field."); +} + // MessageUtility::loadFromJson() with API message works across version. TEST_F(ProtobufUtilityTest, LoadFromJsonNextVersion) { { @@ -1370,7 +1432,10 @@ class DeprecatedFieldsTest : public testing::TestWithParam { protected: DeprecatedFieldsTest() : with_upgrade_(GetParam()), api_(Api::createApiForTest(store_)), - runtime_deprecated_feature_use_(store_.counter("runtime.deprecated_feature_use")) { + runtime_deprecated_feature_use_(store_.counter("runtime.deprecated_feature_use")), + deprecated_feature_seen_since_process_start_( + store_.gauge("runtime.deprecated_feature_seen_since_process_start", + Stats::Gauge::ImportMode::NeverImport)) { envoy::config::bootstrap::v3::LayeredRuntime config; config.add_layers()->mutable_admin_layer(); loader_ = std::make_unique( @@ -1393,11 +1458,12 @@ class DeprecatedFieldsTest : public testing::TestWithParam { Event::MockDispatcher dispatcher_; NiceMock tls_; Stats::TestUtil::TestStore store_; - Runtime::MockRandomGenerator generator_; + Random::MockRandomGenerator generator_; Api::ApiPtr api_; - Runtime::MockRandomGenerator rand_; + Random::MockRandomGenerator rand_; std::unique_ptr loader_; Stats::Counter& runtime_deprecated_feature_use_; + Stats::Gauge& deprecated_feature_seen_since_process_start_; NiceMock local_info_; NiceMock validation_visitor_; }; @@ -1419,6 +1485,7 @@ TEST_P(DeprecatedFieldsTest, NoErrorWhenDeprecatedFieldsUnused) { // Fatal checks for a non-deprecated field should cause no problem. checkForDeprecation(base); EXPECT_EQ(0, runtime_deprecated_feature_use_.value()); + EXPECT_EQ(0, deprecated_feature_seen_since_process_start_.value()); } TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(IndividualFieldDeprecated)) { @@ -1429,6 +1496,7 @@ TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(IndividualFieldDeprecated)) "Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated'", checkForDeprecation(base)); EXPECT_EQ(1, runtime_deprecated_feature_use_.value()); + EXPECT_EQ(1, deprecated_feature_seen_since_process_start_.value()); } // Use of a deprecated and disallowed field should result in an exception. @@ -1436,7 +1504,7 @@ TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(IndividualFieldDisallowed)) envoy::test::deprecation_test::Base base; base.set_is_deprecated_fatal("foo"); EXPECT_THROW_WITH_REGEX( - checkForDeprecation(base), ProtoValidationException, + checkForDeprecation(base), Envoy::ProtobufMessage::DeprecatedProtoFieldException, "Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated_fatal'"); } @@ -1447,7 +1515,7 @@ TEST_P(DeprecatedFieldsTest, // Make sure this is set up right. EXPECT_THROW_WITH_REGEX( - checkForDeprecation(base), ProtoValidationException, + checkForDeprecation(base), Envoy::ProtobufMessage::DeprecatedProtoFieldException, "Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated_fatal'"); // The config will be rejected, so the feature will not be used. EXPECT_EQ(0, runtime_deprecated_feature_use_.value()); @@ -1480,7 +1548,7 @@ TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(DisallowViaRuntime)) { {{"envoy.deprecated_features:envoy.test.deprecation_test.Base.is_deprecated", " false"}}); EXPECT_THROW_WITH_REGEX( - checkForDeprecation(base), ProtoValidationException, + checkForDeprecation(base), Envoy::ProtobufMessage::DeprecatedProtoFieldException, "Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated'"); EXPECT_EQ(1, runtime_deprecated_feature_use_.value()); } @@ -1495,7 +1563,7 @@ TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(MixOfFatalAndWarnings)) { EXPECT_LOG_CONTAINS( "warning", "Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated'", { EXPECT_THROW_WITH_REGEX( - checkForDeprecation(base), ProtoValidationException, + checkForDeprecation(base), Envoy::ProtobufMessage::DeprecatedProtoFieldException, "Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated_fatal'"); }); } @@ -1588,7 +1656,8 @@ TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(RuntimeOverrideEnumDefault) {{"envoy.deprecated_features:envoy.test.deprecation_test.Base.DEPRECATED_DEFAULT", "false"}}); // Make sure this is set up right. - EXPECT_THROW_WITH_REGEX(checkForDeprecation(base), ProtoValidationException, + EXPECT_THROW_WITH_REGEX(checkForDeprecation(base), + Envoy::ProtobufMessage::DeprecatedProtoFieldException, "Using the default now-deprecated value DEPRECATED_DEFAULT"); } @@ -1597,7 +1666,8 @@ TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(FatalEnum)) { envoy::test::deprecation_test::Base base; base.mutable_enum_container()->set_deprecated_enum( envoy::test::deprecation_test::Base::DEPRECATED_FATAL); - EXPECT_THROW_WITH_REGEX(checkForDeprecation(base), ProtoValidationException, + EXPECT_THROW_WITH_REGEX(checkForDeprecation(base), + Envoy::ProtobufMessage::DeprecatedProtoFieldException, "Using deprecated value DEPRECATED_FATAL"); Runtime::LoaderSingleton::getExisting()->mergeValues( @@ -1612,6 +1682,51 @@ TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(FatalEnum)) { checkForDeprecation(base)); } +// Verify that direct use of a hidden_envoy_deprecated field fails, but upgrade +// succeeds +TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(ManualDeprecatedFieldAddition)) { + // Create a base message and insert a deprecated field. When upgrading the + // deprecated field should be set as deprecated, and a warning should be logged + envoy::test::deprecation_test::Base base_should_warn = + TestUtility::parseYaml(R"EOF( + not_deprecated: field1 + is_deprecated: hidden_field1 + not_deprecated_message: + inner_not_deprecated: subfield1 + repeated_message: + - inner_not_deprecated: subfield2 + )EOF"); + + // Non-fatal checks for a deprecated field should log rather than throw an exception. + EXPECT_LOG_CONTAINS("warning", + "Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated'", + checkForDeprecation(base_should_warn)); + EXPECT_EQ(1, runtime_deprecated_feature_use_.value()); + EXPECT_EQ(1, deprecated_feature_seen_since_process_start_.value()); + + // Create an upgraded message and insert a deprecated field. This is a bypass + // of the upgrading procedure validation, and should fail + envoy::test::deprecation_test::UpgradedBase base_should_fail = + TestUtility::parseYaml(R"EOF( + not_deprecated: field1 + hidden_envoy_deprecated_is_deprecated: hidden_field1 + not_deprecated_message: + inner_not_deprecated: subfield1 + repeated_message: + - inner_not_deprecated: subfield2 + )EOF"); + + EXPECT_THROW_WITH_REGEX( + MessageUtil::checkForUnexpectedFields(base_should_fail, + ProtobufMessage::getStrictValidationVisitor()), + ProtoValidationException, + "Illegal use of hidden_envoy_deprecated_ V2 field " + "'envoy.test.deprecation_test.UpgradedBase.hidden_envoy_deprecated_is_deprecated'"); + // The config will be rejected, so the feature will not be used. + EXPECT_EQ(1, runtime_deprecated_feature_use_.value()); + EXPECT_EQ(1, deprecated_feature_seen_since_process_start_.value()); +} + class TimestampUtilTest : public testing::Test, public ::testing::WithParamInterface {}; TEST_P(TimestampUtilTest, SystemClockToTimestampTest) { diff --git a/test/common/router/BUILD b/test/common/router/BUILD index c3cab9845d2c0..a377e5672fdd3 100644 --- a/test/common/router/BUILD +++ b/test/common/router/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_fuzz_test", @@ -11,6 +9,8 @@ load( "envoy_proto_library", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( @@ -30,7 +30,7 @@ envoy_cc_test_library( "//source/common/stream_info:filter_state_lib", "//test/extensions/filters/http/common:empty_http_filter_config_lib", "//test/fuzz:utility_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:environment_lib", "//test/test_common:registry_lib", "//test/test_common:test_runtime_lib", @@ -68,10 +68,10 @@ envoy_cc_test( "//source/common/config:utility_lib", "//source/common/json:json_loader_lib", "//source/common/router:rds_lib", - "//source/server/http:admin_lib", + "//source/server/admin:admin_lib", "//test/mocks/local_info:local_info_mocks", "//test/mocks/protobuf:protobuf_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:simulated_time_system_lib", @@ -112,12 +112,12 @@ envoy_cc_test( "//source/common/http:message_lib", "//source/common/json:json_loader_lib", "//source/common/router:scoped_rds_lib", - "//source/server/http:admin_lib", + "//source/server/admin:admin_lib", "//test/mocks/config:config_mocks", "//test/mocks/init:init_mocks", "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/router:router_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:simulated_time_system_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", @@ -137,10 +137,10 @@ envoy_cc_test( "//source/common/protobuf", "//source/common/router:rds_lib", "//source/common/router:vhds_lib", - "//source/server/http:admin_lib", + "//source/server/admin:admin_lib", "//test/mocks/config:config_mocks", "//test/mocks/local_info:local_info_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:simulated_time_system_lib", @@ -160,6 +160,7 @@ envoy_cc_test( "//test/mocks/router:router_mocks", "//test/mocks/runtime:runtime_mocks", "//test/mocks/upstream:upstream_mocks", + "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", ], @@ -227,7 +228,7 @@ envoy_cc_fuzz_test( ":route_fuzz_proto_cc_proto", "//source/common/router:config_lib", "//test/fuzz:utility_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", ], ) @@ -243,7 +244,7 @@ envoy_cc_test( "//test/mocks/http:http_mocks", "//test/mocks/ratelimit:ratelimit_mocks", "//test/mocks/router:router_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", ], @@ -261,6 +262,9 @@ envoy_cc_test( "//source/common/stream_info:uint32_accessor_lib", "//source/common/upstream:upstream_includes", "//source/common/upstream:upstream_lib", + "//source/extensions/upstreams/http/generic:config", + "//source/extensions/upstreams/http/http:config", + "//source/extensions/upstreams/http/tcp:config", "//test/common/http:common_lib", "//test/mocks/http:http_mocks", "//test/mocks/local_info:local_info_mocks", @@ -275,6 +279,8 @@ envoy_cc_test( "//test/test_common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/upstreams/http/http/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/upstreams/http/tcp/v3:pkg_cc_proto", "@envoy_api//envoy/type/v3:pkg_cc_proto", ], ) @@ -290,6 +296,7 @@ envoy_cc_test( "//source/common/upstream:upstream_includes", "//source/common/upstream:upstream_lib", "//source/extensions/access_loggers/file:config", + "//source/extensions/upstreams/http/generic:config", "//test/common/http:common_lib", "//test/mocks/access_log:access_log_mocks", "//test/mocks/filesystem:filesystem_mocks", @@ -298,7 +305,7 @@ envoy_cc_test( "//test/mocks/network:network_mocks", "//test/mocks/router:router_mocks", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/mocks/ssl:ssl_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:utility_lib", @@ -348,3 +355,12 @@ envoy_cc_test( "//source/common/router:string_accessor_lib", ], ) + +envoy_cc_test( + name = "upstream_request_test", + srcs = ["upstream_request_test.cc"], + deps = [ + "//source/common/router:router_lib", + "//test/mocks/router:router_filter_interface", + ], +) diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index fd61d51ff961a..5d4ce7b56bc77 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -4,6 +4,7 @@ #include #include #include +#include #include "envoy/config/route/v3/route.pb.h" #include "envoy/config/route/v3/route.pb.validate.h" @@ -22,7 +23,7 @@ #include "test/common/router/route_fuzz.pb.h" #include "test/extensions/filters/http/common/empty_http_filter_config.h" #include "test/fuzz/utility.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/test_common/environment.h" #include "test/test_common/printers.h" #include "test/test_common/registry.h" @@ -58,9 +59,7 @@ class TestConfigImpl : public ConfigImpl { validate_clusters_default), config_(config) {} - RouteConstSharedPtr route(const Http::RequestHeaderMap& headers, - const Envoy::StreamInfo::StreamInfo& stream_info, - uint64_t random_value) const override { + void setupRouteConfig(const Http::RequestHeaderMap& headers, uint64_t random_value) const { absl::optional corpus_path = TestEnvironment::getOptionalEnvVar("GENRULE_OUTPUT_DIR"); if (corpus_path) { @@ -77,9 +76,28 @@ class TestConfigImpl : public ConfigImpl { corpus_file << corpus; } } + } + + RouteConstSharedPtr route(const Http::RequestHeaderMap& headers, + const Envoy::StreamInfo::StreamInfo& stream_info, + uint64_t random_value) const override { + + setupRouteConfig(headers, random_value); return ConfigImpl::route(headers, stream_info, random_value); } + RouteConstSharedPtr route(const RouteCallback& cb, const Http::RequestHeaderMap& headers, + const StreamInfo::StreamInfo& stream_info, + uint64_t random_value) const override { + + setupRouteConfig(headers, random_value); + return ConfigImpl::route(cb, headers, stream_info, random_value); + } + + RouteConstSharedPtr route(const RouteCallback& cb, const Http::RequestHeaderMap& headers) const { + return route(cb, headers, NiceMock(), 0); + } + RouteConstSharedPtr route(const Http::RequestHeaderMap& headers, uint64_t random_value) const { return route(headers, NiceMock(), random_value); } @@ -87,18 +105,42 @@ class TestConfigImpl : public ConfigImpl { const envoy::config::route::v3::RouteConfiguration config_; }; +Http::TestRequestHeaderMapImpl genPathlessHeaders(const std::string& host, + const std::string& method) { + return Http::TestRequestHeaderMapImpl{{":authority", host}, {":method", method}, + {"x-safe", "safe"}, {"x-global-nope", "global"}, + {"x-vhost-nope", "vhost"}, {"x-route-nope", "route"}, + {"x-forwarded-proto", "http"}}; +} + +Http::TestRequestHeaderMapImpl genHeaders(const std::string& host, const std::string& path, + const std::string& method, + const std::string& forwarded_proto) { + auto hdrs = Http::TestRequestHeaderMapImpl{ + {":authority", host}, {":path", path}, + {":method", method}, {"x-safe", "safe"}, + {"x-global-nope", "global"}, {"x-vhost-nope", "vhost"}, + {"x-route-nope", "route"}, {"x-forwarded-proto", forwarded_proto}}; + + if (forwarded_proto.empty()) { + hdrs.remove("x-forwarded-proto"); + } + + return hdrs; +} + Http::TestRequestHeaderMapImpl genHeaders(const std::string& host, const std::string& path, const std::string& method) { - return Http::TestRequestHeaderMapImpl{{":authority", host}, {":path", path}, - {":method", method}, {"x-safe", "safe"}, - {"x-global-nope", "global"}, {"x-vhost-nope", "vhost"}, - {"x-route-nope", "route"}, {"x-forwarded-proto", "http"}}; + return genHeaders(host, path, method, "http"); } +// Loads a V3 RouteConfiguration yaml envoy::config::route::v3::RouteConfiguration -parseRouteConfigurationFromV2Yaml(const std::string& yaml) { +parseRouteConfigurationFromYaml(const std::string& yaml) { envoy::config::route::v3::RouteConfiguration route_config; - TestUtility::loadFromYaml(yaml, route_config); + // Load the file and keep the annotations (in case of an upgrade) to make sure + // validate() observes the upgrade + TestUtility::loadFromYaml(yaml, route_config, true); TestUtility::validate(route_config); return route_config; } @@ -121,7 +163,6 @@ class ConfigImplTestBase { std::string responseHeadersConfig(const bool most_specific_wins, const bool append) const { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: www2 domains: ["www.lyft.com"] @@ -207,7 +248,6 @@ most_specific_header_mutations_wins: {0} std::string requestHeadersConfig(const bool most_specific_wins) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: www2 domains: ["www.lyft.com"] @@ -340,7 +380,7 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(TestLegacyRoutes)) { )EOF"; NiceMock stream_info; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); // Regular Expression matching EXPECT_EQ("clock", @@ -368,6 +408,7 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(TestLegacyRoutes)) { config.route(genHeaders("bat2.com", "/foo", "GET"), 0)->routeEntry()->clusterName()); EXPECT_EQ("regex_default", config.route(genHeaders("bat2.com", " ", "GET"), 0)->routeEntry()->clusterName()); + EXPECT_TRUE(config.route(genPathlessHeaders("bat2.com", "GET"), 0) == nullptr); // Regular Expression matching with query string params EXPECT_EQ( @@ -432,6 +473,88 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(TestLegacyRoutes)) { } } +TEST_F(RouteMatcherTest, TestConnectRoutes) { + const std::string yaml = R"EOF( +virtual_hosts: +- name: connect + domains: + - bat3.com + routes: + - match: + safe_regex: + google_re2: {} + regex: "foobar" + route: + cluster: connect_break + - match: + connect_matcher: + {} + route: + cluster: connect_match + prefix_rewrite: "/rewrote" + - match: + safe_regex: + google_re2: {} + regex: ".*" + route: + cluster: connect_fallthrough +- name: connect2 + domains: + - bat4.com + routes: + - match: + connect_matcher: + {} + redirect: { path_redirect: /new_path } +- name: default + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: instant-server + timeout: 30s + virtual_clusters: + - headers: + - name: ":path" + safe_regex_match: + google_re2: {} + regex: "^/users/\\d+/location$" + - name: ":method" + exact_match: POST + name: ulu + )EOF"; + NiceMock stream_info; + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); + + // Connect matching + EXPECT_EQ("connect_match", + config.route(genHeaders("bat3.com", " ", "CONNECT"), 0)->routeEntry()->clusterName()); + EXPECT_EQ( + "connect_match", + config.route(genPathlessHeaders("bat3.com", "CONNECT"), 0)->routeEntry()->clusterName()); + EXPECT_EQ("connect_fallthrough", + config.route(genHeaders("bat3.com", " ", "GET"), 0)->routeEntry()->clusterName()); + + // Prefix rewrite for CONNECT with path (for HTTP/2) + { + Http::TestRequestHeaderMapImpl headers = + genHeaders("bat3.com", "/api/locations?works=true", "CONNECT"); + const RouteEntry* route = config.route(headers, 0)->routeEntry(); + route->finalizeRequestHeaders(headers, stream_info, true); + EXPECT_EQ("/rewrote?works=true", headers.get_(Http::Headers::get().Path)); + } + // Prefix rewrite for CONNECT without path (for non-crashing) + { + Http::TestRequestHeaderMapImpl headers = genPathlessHeaders("bat4.com", "CONNECT"); + const DirectResponseEntry* redirect = config.route(headers, 0)->directResponseEntry(); + ASSERT(redirect != nullptr); + redirect->rewritePathHeader(headers, true); + EXPECT_EQ("http://bat4.com/new_path", redirect->newPath(headers)); + } +} + TEST_F(RouteMatcherTest, TestRoutes) { const std::string yaml = R"EOF( virtual_hosts: @@ -687,9 +810,8 @@ TEST_F(RouteMatcherTest, TestRoutes) { exact_match: POST name: ulu )EOF"; - NiceMock stream_info; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); // No host header, no x-forwarded-proto and no path header testing. EXPECT_EQ(nullptr, @@ -1057,7 +1179,7 @@ TEST_F(RouteMatcherTest, TestRoutesWithWildcardAndDefaultOnly) { route: { cluster: "default" } )EOF"; - const auto proto_config = parseRouteConfigurationFromV2Yaml(yaml); + const auto proto_config = parseRouteConfigurationFromYaml(yaml); TestConfigImpl config(proto_config, factory_context_, true); EXPECT_EQ("wildcard", @@ -1092,10 +1214,10 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(TestRoutesWithInvalidRegexLegac NiceMock stream_info; EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(invalid_route), factory_context_, true), + TestConfigImpl(parseRouteConfigurationFromYaml(invalid_route), factory_context_, true), EnvoyException, "Invalid regex '/\\(\\+invalid\\)':"); - EXPECT_THROW_WITH_REGEX(TestConfigImpl(parseRouteConfigurationFromV2Yaml(invalid_virtual_cluster), + EXPECT_THROW_WITH_REGEX(TestConfigImpl(parseRouteConfigurationFromYaml(invalid_virtual_cluster), factory_context_, true), EnvoyException, "Invalid regex '\\^/\\(\\+invalid\\)':"); } @@ -1132,10 +1254,10 @@ TEST_F(RouteMatcherTest, TestRoutesWithInvalidRegex) { NiceMock stream_info; EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(invalid_route), factory_context_, true), + TestConfigImpl(parseRouteConfigurationFromYaml(invalid_route), factory_context_, true), EnvoyException, "no argument for repetition operator:"); - EXPECT_THROW_WITH_REGEX(TestConfigImpl(parseRouteConfigurationFromV2Yaml(invalid_virtual_cluster), + EXPECT_THROW_WITH_REGEX(TestConfigImpl(parseRouteConfigurationFromYaml(invalid_virtual_cluster), factory_context_, true), EnvoyException, "no argument for repetition operator"); } @@ -1143,7 +1265,7 @@ TEST_F(RouteMatcherTest, TestRoutesWithInvalidRegex) { // Virtual cluster that contains neither pattern nor regex. This must be checked while pattern is // deprecated. TEST_F(RouteMatcherTest, TestRoutesWithInvalidVirtualCluster) { - const std::string invalid_virtual_cluster = R"EOF( + const std::string yaml = R"EOF( virtual_hosts: - name: regex domains: ["*"] @@ -1154,10 +1276,9 @@ TEST_F(RouteMatcherTest, TestRoutesWithInvalidVirtualCluster) { - name: "invalid" )EOF"; - EXPECT_THROW_WITH_REGEX(TestConfigImpl(parseRouteConfigurationFromV2Yaml(invalid_virtual_cluster), - factory_context_, true), - EnvoyException, - "virtual clusters must define either 'pattern' or 'headers'"); + EXPECT_THROW_WITH_REGEX( + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "virtual clusters must define either 'pattern' or 'headers'"); } // Validates behavior of request_headers_to_add at router, vhost, and route levels. @@ -1249,7 +1370,7 @@ TEST_F(RouteMatcherTest, TestAddRemoveRequestHeaders) { NiceMock stream_info; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); // Request header manipulation testing. { @@ -1299,8 +1420,7 @@ TEST_F(RouteMatcherTest, TestRequestHeadersToAddWithAppendFalse) { const std::string yaml = requestHeadersConfig(false); NiceMock stream_info; - envoy::config::route::v3::RouteConfiguration route_config = - parseRouteConfigurationFromV2Yaml(yaml); + envoy::config::route::v3::RouteConfiguration route_config = parseRouteConfigurationFromYaml(yaml); TestConfigImpl config(route_config, factory_context_, true); @@ -1357,7 +1477,7 @@ TEST_F(RouteMatcherTest, TestRequestHeadersToAddWithAppendFalseMostSpecificWins) const std::string yaml = requestHeadersConfig(true); NiceMock stream_info; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); // Route overrides vhost and global. { @@ -1396,7 +1516,7 @@ TEST_F(RouteMatcherTest, TestAddRemoveResponseHeaders) { const std::string yaml = responseHeadersConfig(false, true); NiceMock stream_info; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); // Response header manipulation testing. { @@ -1452,7 +1572,7 @@ TEST_F(RouteMatcherTest, TestAddRemoveResponseHeadersAppendFalse) { const std::string yaml = responseHeadersConfig(false, false); NiceMock stream_info; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); Http::TestRequestHeaderMapImpl req_headers = genHeaders("www.lyft.com", "/new_endpoint/foo", "GET"); @@ -1468,7 +1588,7 @@ TEST_F(RouteMatcherTest, TestAddRemoveResponseHeadersAppendMostSpecificWins) { const std::string yaml = responseHeadersConfig(true, false); NiceMock stream_info; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); Http::TestRequestHeaderMapImpl req_headers = genHeaders("www.lyft.com", "/new_endpoint/foo", "GET"); @@ -1482,7 +1602,6 @@ TEST_F(RouteMatcherTest, TestAddRemoveResponseHeadersAppendMostSpecificWins) { TEST_F(RouteMatcherTest, TestAddGlobalResponseHeaderRemoveFromRoute) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: www2 domains: ["www.lyft.com"] @@ -1504,7 +1623,7 @@ most_specific_header_mutations_wins: true )EOF"; NiceMock stream_info; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); { Http::TestRequestHeaderMapImpl req_headers = genHeaders("www.lyft.com", "/cacheable", "GET"); @@ -1528,7 +1647,6 @@ TEST_F(RouteMatcherTest, TestRequestHeadersToAddNoPseudoHeader) { for (const std::string& header : {":path", ":authority", ":method", ":scheme", ":status", ":protocol"}) { const std::string yaml = fmt::format(R"EOF( -name: foo virtual_hosts: - name: www2 domains: ["*"] @@ -1543,7 +1661,7 @@ name: foo NiceMock stream_info; envoy::config::route::v3::RouteConfiguration route_config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromYaml(yaml); EXPECT_THROW_WITH_MESSAGE(TestConfigImpl config(route_config, factory_context_, true), EnvoyException, ":-prefixed headers may not be modified"); @@ -1555,7 +1673,6 @@ TEST_F(RouteMatcherTest, TestRequestHeadersToRemoveNoPseudoHeader) { for (const std::string& header : {":path", ":authority", ":method", ":scheme", ":status", ":protocol", "host"}) { const std::string yaml = fmt::format(R"EOF( -name: foo virtual_hosts: - name: www2 domains: ["*"] @@ -1567,7 +1684,7 @@ name: foo NiceMock stream_info; envoy::config::route::v3::RouteConfiguration route_config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromYaml(yaml); EXPECT_THROW_WITH_MESSAGE(TestConfigImpl config(route_config, factory_context_, true), EnvoyException, ":-prefixed or host headers may not be removed"); @@ -1592,7 +1709,7 @@ TEST_F(RouteMatcherTest, Priority) { cluster: local_service_grpc )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_EQ(Upstream::ResourcePriority::High, config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0)->routeEntry()->priority()); @@ -1615,7 +1732,7 @@ TEST_F(RouteMatcherTest, NoHostRewriteAndAutoRewrite) { auto_host_rewrite: true )EOF"; - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException); } @@ -1634,7 +1751,7 @@ TEST_F(RouteMatcherTest, NoHostRewriteAndAutoRewriteHeader) { auto_host_rewrite_header: "dummy-header" )EOF"; - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException); } @@ -1653,7 +1770,7 @@ TEST_F(RouteMatcherTest, NoAutoRewriteAndAutoRewriteHeader) { auto_host_rewrite_header: "dummy-header" )EOF"; - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException); } @@ -1718,7 +1835,7 @@ TEST_F(RouteMatcherTest, HeaderMatchedRouting) { cluster: local_service_without_headers )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); { EXPECT_EQ("local_service_without_headers", @@ -1811,11 +1928,11 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(InvalidHeaderMatchedRoutingConf route: { cluster: "local_service" } )EOF"; - EXPECT_NO_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(value_with_regex_chars), + EXPECT_NO_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(value_with_regex_chars), factory_context_, true)); EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(invalid_regex), factory_context_, true), + TestConfigImpl(parseRouteConfigurationFromYaml(invalid_regex), factory_context_, true), EnvoyException, "Invalid regex"); } @@ -1849,11 +1966,11 @@ TEST_F(RouteMatcherTest, InvalidHeaderMatchedRoutingConfig) { route: { cluster: "local_service" } )EOF"; - EXPECT_NO_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(value_with_regex_chars), + EXPECT_NO_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(value_with_regex_chars), factory_context_, true)); EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(invalid_regex), factory_context_, true), + TestConfigImpl(parseRouteConfigurationFromYaml(invalid_regex), factory_context_, true), EnvoyException, "no argument for repetition operator"); } @@ -1909,7 +2026,7 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(QueryParamMatchedRouting)) { )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); { Http::TestRequestHeaderMapImpl headers = genHeaders("example.com", "/", "GET"); @@ -2003,11 +2120,11 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(InvalidQueryParamMatchedRouting route: { cluster: "local_service" } )EOF"; - EXPECT_NO_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(value_with_regex_chars), + EXPECT_NO_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(value_with_regex_chars), factory_context_, true)); EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(invalid_regex), factory_context_, true), + TestConfigImpl(parseRouteConfigurationFromYaml(invalid_regex), factory_context_, true), EnvoyException, "Invalid regex"); } @@ -2031,7 +2148,7 @@ class RouterMatcherHashPolicyTest : public testing::Test, public ConfigImplTestB route: cluster: bar )EOF"; - route_config_ = parseRouteConfigurationFromV2Yaml(yaml); + route_config_ = parseRouteConfigurationFromYaml(yaml); } envoy::config::route::v3::RouteAction::HashPolicy* firstRouteHashPolicy() { @@ -2082,6 +2199,27 @@ TEST_F(RouterMatcherHashPolicyTest, HashHeaders) { } } +TEST_F(RouterMatcherHashPolicyTest, HashHeadersRegexSubstitution) { + // Apply a regex substitution before hashing. + auto* header = firstRouteHashPolicy()->mutable_header(); + header->set_header_name(":path"); + auto* regex_spec = header->mutable_regex_rewrite(); + regex_spec->set_substitution("\\1"); + auto* pattern = regex_spec->mutable_pattern(); + pattern->mutable_google_re2(); + pattern->set_regex("^/(\\w+)$"); + { + Http::TestRequestHeaderMapImpl headers = genHeaders("www.lyft.com", "/foo", "GET"); + Router::RouteConstSharedPtr route = config().route(headers, 0); + const auto foo_hash_value = 3728699739546630719; + EXPECT_EQ(route->routeEntry() + ->hashPolicy() + ->generateHash(nullptr, headers, add_cookie_nop_, nullptr) + .value(), + foo_hash_value); + } +} + class RouterMatcherCookieHashPolicyTest : public RouterMatcherHashPolicyTest { public: RouterMatcherCookieHashPolicyTest() { @@ -2587,7 +2725,7 @@ TEST_F(RouteMatcherTest, ClusterHeader) { )EOF"; NiceMock stream_info; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_EQ( "some_cluster", @@ -2618,8 +2756,7 @@ TEST_F(RouteMatcherTest, ClusterHeader) { route->routeEntry()->maxGrpcTimeout(); route->routeEntry()->grpcTimeoutOffset(); route->routeEntry()->upgradeMap(); - route->routeEntry()->internalRedirectAction(); - route->routeEntry()->maxInternalRedirects(); + route->routeEntry()->internalRedirectPolicy(); } } @@ -2643,7 +2780,7 @@ TEST_F(RouteMatcherTest, ContentType) { cluster: local_service )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); { EXPECT_EQ("local_service", @@ -2681,7 +2818,7 @@ TEST_F(RouteMatcherTest, GrpcTimeoutOffset) { cluster: local_service_grpc )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); { EXPECT_EQ( @@ -2693,6 +2830,50 @@ TEST_F(RouteMatcherTest, GrpcTimeoutOffset) { ->grpcTimeoutOffset()); } +TEST_F(RouteMatcherTest, GrpcTimeoutOffsetOfDynamicRoute) { + // A DynamicRouteEntry will be created when 'cluster_header' is set. + const std::string yaml = R"EOF( +virtual_hosts: +- name: local_service + domains: + - "*" + routes: + - match: + prefix: "/foo" + route: + cluster: local_service_grpc + max_grpc_timeout: 0.1s + grpc_timeout_offset: 0.01s + - match: + prefix: "/" + route: + max_grpc_timeout: 0.2s + grpc_timeout_offset: 0.02s + cluster_header: request_to + )EOF"; + + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); + + { + Http::TestRequestHeaderMapImpl reqeust_headers = genHeaders("www.lyft.com", "/", "GET"); + reqeust_headers.addCopy(Http::LowerCaseString("reqeust_to"), "dynamic_grpc_service"); + EXPECT_EQ(absl::make_optional(std::chrono::milliseconds(20)), + config.route(reqeust_headers, 0)->routeEntry()->grpcTimeoutOffset()); + EXPECT_EQ(absl::make_optional(std::chrono::milliseconds(200)), + config.route(reqeust_headers, 0)->routeEntry()->maxGrpcTimeout()); + } + { + + EXPECT_EQ(absl::make_optional(std::chrono::milliseconds(10)), + config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0) + ->routeEntry() + ->grpcTimeoutOffset()); + EXPECT_EQ( + absl::make_optional(std::chrono::milliseconds(100)), + config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0)->routeEntry()->maxGrpcTimeout()); + } +} + TEST_F(RouteMatcherTest, FractionalRuntime) { const std::string yaml = R"EOF( virtual_hosts: @@ -2717,7 +2898,7 @@ TEST_F(RouteMatcherTest, FractionalRuntime) { Runtime::MockSnapshot snapshot; ON_CALL(factory_context_.runtime_loader_, snapshot()).WillByDefault(ReturnRef(snapshot)); - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, false); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, false); EXPECT_CALL(snapshot, featureEnabled("bogus_key", Matcher(_), 41)) @@ -2754,7 +2935,7 @@ TEST_F(RouteMatcherTest, ShadowClusterNotFound) { EXPECT_CALL(factory_context_.cluster_manager_, get(Eq("some_cluster"))) .WillRepeatedly(Return(nullptr)); - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException); } @@ -2773,7 +2954,7 @@ TEST_F(RouteMatcherTest, ClusterNotFound) { EXPECT_CALL(factory_context_.cluster_manager_, get(Eq("www2"))).WillRepeatedly(Return(nullptr)); - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException); } @@ -2792,7 +2973,7 @@ TEST_F(RouteMatcherTest, ClusterNotFoundNotChecking) { EXPECT_CALL(factory_context_.cluster_manager_, get(Eq("www2"))).WillRepeatedly(Return(nullptr)); - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, false); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, false); } TEST_F(RouteMatcherTest, ClusterNotFoundNotCheckingViaConfig) { @@ -2811,7 +2992,7 @@ validate_clusters: false EXPECT_CALL(factory_context_.cluster_manager_, get(Eq("www2"))).WillRepeatedly(Return(nullptr)); - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true); } TEST_F(RouteMatcherTest, AttemptCountHeader) { @@ -2827,7 +3008,7 @@ TEST_F(RouteMatcherTest, AttemptCountHeader) { cluster: "whatever" )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_TRUE(config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0) ->routeEntry() @@ -2849,7 +3030,7 @@ TEST_F(RouteMatcherTest, ClusterNotFoundResponseCode) { cluster: "not_found" )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, false); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, false); Http::TestRequestHeaderMapImpl headers = genHeaders("www.lyft.com", "/", "GET"); @@ -2870,7 +3051,7 @@ TEST_F(RouteMatcherTest, ClusterNotFoundResponseCodeConfig503) { cluster_not_found_response_code: SERVICE_UNAVAILABLE )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, false); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, false); Http::TestRequestHeaderMapImpl headers = genHeaders("www.lyft.com", "/", "GET"); @@ -2891,7 +3072,7 @@ TEST_F(RouteMatcherTest, ClusterNotFoundResponseCodeConfig404) { cluster_not_found_response_code: NOT_FOUND )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, false); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, false); Http::TestRequestHeaderMapImpl headers = genHeaders("www.lyft.com", "/", "GET"); @@ -2943,7 +3124,7 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(Shadow)) { cluster: www2 )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); const auto& foo_shadow_policies = config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0)->routeEntry()->shadowPolicies(); @@ -2989,8 +3170,8 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(ShadowPolicyAndPolicies)) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, "Cannot specify both request_mirror_policy and request_mirror_policies"); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "Cannot specify both request_mirror_policy and request_mirror_policies"); } class RouteConfigurationV2 : public testing::Test, public ConfigImplTestBase {}; @@ -2998,7 +3179,6 @@ class RouteConfigurationV2 : public testing::Test, public ConfigImplTestBase {}; // When removing runtime_key: this test can be removed. TEST_F(RouteConfigurationV2, DEPRECATED_FEATURE_TEST(RequestMirrorPolicy)) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: mirror domains: [mirror.lyft.com] @@ -3017,7 +3197,7 @@ name: foo )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_EQ("foo_mirror", config.route(genHeaders("mirror.lyft.com", "/foo", "GET"), 0) ->routeEntry() @@ -3065,7 +3245,7 @@ TEST_F(RouteMatcherTest, Retry) { retry_on: 5xx,gateway-error,connect-failure,reset )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_EQ(std::chrono::milliseconds(0), config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0) @@ -3115,7 +3295,6 @@ TEST_F(RouteMatcherTest, Retry) { TEST_F(RouteMatcherTest, RetryVirtualHostLevel) { const std::string yaml = R"EOF( -name: RetryVirtualHostLevel virtual_hosts: - domains: [www.lyft.com] per_request_buffer_limit_bytes: 8 @@ -3133,7 +3312,7 @@ name: RetryVirtualHostLevel route: {cluster: www} )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); // Route level retry policy takes precedence. EXPECT_EQ(std::chrono::milliseconds(0), @@ -3217,7 +3396,7 @@ TEST_F(RouteMatcherTest, GrpcRetry) { retry_on: 5xx,deadline-exceeded,resource-exhausted )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_EQ(std::chrono::milliseconds(0), config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0) @@ -3304,7 +3483,7 @@ TEST_F(RouteMatcherTest, RetryBackOffIntervals) { retry_on: connect-failure )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_EQ(absl::optional(50), config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0) @@ -3355,7 +3534,7 @@ TEST_F(RouteMatcherTest, RetryBackOffIntervals) { // Test invalid route-specific retry back-off configs. TEST_F(RouteMatcherTest, InvalidRetryBackOff) { - const std::string invalid_max = R"EOF( + const std::string yaml = R"EOF( virtual_hosts: - name: backoff domains: ["*"] @@ -3370,13 +3549,12 @@ TEST_F(RouteMatcherTest, InvalidRetryBackOff) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(invalid_max), factory_context_, true), - EnvoyException, "retry_policy.max_interval must greater than or equal to the base_interval"); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "retry_policy.max_interval must greater than or equal to the base_interval"); } TEST_F(RouteMatcherTest, HedgeRouteLevel) { const std::string yaml = R"EOF( -name: HedgeRouteLevel virtual_hosts: - domains: [www.lyft.com] name: www @@ -3402,7 +3580,7 @@ name: HedgeRouteLevel denominator: HUNDRED )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_EQ(3, config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0) ->routeEntry() @@ -3452,7 +3630,6 @@ name: HedgeRouteLevel TEST_F(RouteMatcherTest, HedgeVirtualHostLevel) { const std::string yaml = R"EOF( -name: HedgeVirtualHostLevel virtual_hosts: - domains: [www.lyft.com] name: www @@ -3470,7 +3647,7 @@ name: HedgeVirtualHostLevel route: {cluster: www} )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); // Route level hedge policy takes precedence. EXPECT_EQ(1, config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0) @@ -3542,9 +3719,8 @@ TEST_F(RouteMatcherTest, TestBadDefaultConfig) { - x-lyft-user-id )EOF"; - EXPECT_THROW( - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException); + EXPECT_THROW(TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true), + EnvoyException); } TEST_F(RouteMatcherTest, TestDuplicateDomainConfig) { @@ -3568,14 +3744,14 @@ TEST_F(RouteMatcherTest, TestDuplicateDomainConfig) { cluster: www2_staging )EOF"; - EXPECT_THROW( - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException); + EXPECT_THROW(TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true), + EnvoyException); } // Test to detect if hostname matches are case-insensitive TEST_F(RouteMatcherTest, TestCaseSensitiveDomainConfig) { - std::string config_with_case_sensitive_domains = R"EOF( + std::string yaml = R"EOF( +name: foo virtual_hosts: - name: www2 domains: [www.lyft.com] @@ -3590,14 +3766,14 @@ TEST_F(RouteMatcherTest, TestCaseSensitiveDomainConfig) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(config_with_case_sensitive_domains), - factory_context_, true), - EnvoyException, - "Only unique values for domains are permitted. Duplicate entry of domain www.lyft.com"); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "Only unique values for domains are permitted. Duplicate entry of domain www.lyft.com in " + "route foo"); } TEST_F(RouteMatcherTest, TestDuplicateWildcardDomainConfig) { const std::string yaml = R"EOF( +name: foo virtual_hosts: - name: www2 domains: ["*"] @@ -3612,12 +3788,13 @@ TEST_F(RouteMatcherTest, TestDuplicateWildcardDomainConfig) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, "Only a single wildcard domain is permitted"); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "Only a single wildcard domain is permitted in route foo"); } TEST_F(RouteMatcherTest, TestDuplicateSuffixWildcardDomainConfig) { const std::string yaml = R"EOF( +name: foo virtual_hosts: - name: www2 domains: ["*.lyft.com"] @@ -3632,13 +3809,14 @@ TEST_F(RouteMatcherTest, TestDuplicateSuffixWildcardDomainConfig) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, - "Only unique values for domains are permitted. Duplicate entry of domain *.lyft.com"); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "Only unique values for domains are permitted. Duplicate entry of domain *.lyft.com in route " + "foo"); } TEST_F(RouteMatcherTest, TestDuplicatePrefixWildcardDomainConfig) { const std::string yaml = R"EOF( +name: foo virtual_hosts: - name: www2 domains: ["bar.*"] @@ -3653,9 +3831,8 @@ TEST_F(RouteMatcherTest, TestDuplicatePrefixWildcardDomainConfig) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, - "Only unique values for domains are permitted. Duplicate entry of domain bar.*"); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "Only unique values for domains are permitted. Duplicate entry of domain bar.* in route foo"); } TEST_F(RouteMatcherTest, TestInvalidCharactersInPrefixRewrites) { @@ -3671,8 +3848,7 @@ TEST_F(RouteMatcherTest, TestInvalidCharactersInPrefixRewrites) { )EOF"; EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "RouteActionValidationError.PrefixRewrite:.*value does not match regex pattern"); } @@ -3689,8 +3865,7 @@ TEST_F(RouteMatcherTest, TestInvalidCharactersInHostRewrites) { )EOF"; EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "RouteActionValidationError.HostRewriteLiteral:.*value does not match regex pattern"); } @@ -3707,8 +3882,7 @@ TEST_F(RouteMatcherTest, TestInvalidCharactersInAutoHostRewrites) { )EOF"; EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "RouteActionValidationError.HostRewriteHeader:.*value does not match regex pattern"); } @@ -3723,8 +3897,7 @@ TEST_F(RouteMatcherTest, TestInvalidCharactersInHostRedirect) { )EOF"; EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "RedirectActionValidationError.HostRedirect:.*value does not match regex pattern"); } @@ -3739,8 +3912,7 @@ TEST_F(RouteMatcherTest, TestInvalidCharactersInPathRedirect) { )EOF"; EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "RedirectActionValidationError.PathRedirect:.*value does not match regex pattern"); } @@ -3755,8 +3927,7 @@ TEST_F(RouteMatcherTest, TestInvalidCharactersInPrefixRewriteRedirect) { )EOF"; EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "RedirectActionValidationError.PrefixRewrite:.*value does not match regex pattern"); } @@ -3778,8 +3949,8 @@ TEST_F(RouteMatcherTest, TestPrefixAndRegexRewrites) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, "Cannot specify both prefix_rewrite and regex_rewrite"); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "Cannot specify both prefix_rewrite and regex_rewrite"); } TEST_F(RouteMatcherTest, TestDomainMatchOrderConfig) { @@ -3807,7 +3978,7 @@ TEST_F(RouteMatcherTest, TestDomainMatchOrderConfig) { route: { cluster: default } )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_EQ( "exact", @@ -3841,7 +4012,7 @@ TEST_F(RouteMatcherTest, NoProtocolInHeadersWhenTlsIsRequired) { cluster: www )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); // route may be called early in some edge cases and "x-forwarded-proto" will not be set. Http::TestRequestHeaderMapImpl headers{{":authority", "www.lyft.com"}, {":path", "/"}}; @@ -3852,7 +4023,7 @@ TEST_F(RouteMatcherTest, NoProtocolInHeadersWhenTlsIsRequired) { * @brief Generate headers for testing * @param ssl set true to insert "x-forwarded-proto: https", else "x-forwarded-proto: http" * @param internal nullopt for no such "x-envoy-internal" header, or explicit "true/false" - * @return Http::TestHeaderMapImpl + * @return Http::TestRequestHeaderMapImpl */ static Http::TestRequestHeaderMapImpl genRedirectHeaders(const std::string& host, const std::string& path, bool ssl, @@ -3884,7 +4055,7 @@ TEST_F(RouteMatcherTest, RouteName) { redirect: { host_redirect: new.lyft.com } )EOF"; NiceMock factory_context; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context, false); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context, false); { Http::TestRequestHeaderMapImpl headers = genHeaders("www.lyft.com", "/", "GET"); EXPECT_EQ("route-test", config.route(headers, 0)->routeEntry()->routeName()); @@ -3903,7 +4074,6 @@ TEST_F(RouteMatcherTest, DirectResponse) { TestEnvironment::writeStringToFileForTest("direct_response_body", "Example text 3"); static const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: www2 domains: [www.lyft.com] @@ -4050,7 +4220,7 @@ name: foo route: { cluster: www2 } )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_EQ(nullptr, config.route(genRedirectHeaders("www.foo.com", "/foo", true, true), 0)); { Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("www.lyft.com", "/foo", true, true); @@ -4353,7 +4523,7 @@ TEST_F(RouteMatcherTest, ExclusiveRouteEntryOrDirectResponseEntry) { host_redirect: new.lyft.com )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); { Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("www.lyft.com", "/foo", true, true); @@ -4397,7 +4567,7 @@ TEST_F(RouteMatcherTest, ExclusiveWeightedClustersEntryOrDirectResponseEntry) { host_redirect: "[fe80::1]" )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); { Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("www.lyft.com", "/foo", true, true); @@ -4499,7 +4669,7 @@ TEST_F(RouteMatcherTest, WeightedClusters) { BazFactory baz_factory; Registry::InjectFactory registered_factory(baz_factory); auto& runtime = factory_context_.runtime_loader_; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); { Http::TestRequestHeaderMapImpl headers = @@ -4536,7 +4706,7 @@ TEST_F(RouteMatcherTest, WeightedClusters) { Http::TestResponseHeaderMapImpl response_headers; StreamInfo::MockStreamInfo stream_info; route_entry->finalizeResponseHeaders(response_headers, stream_info); - EXPECT_EQ(response_headers, Http::TestHeaderMapImpl{}); + EXPECT_EQ(response_headers, Http::TestResponseHeaderMapImpl{}); } // Weighted Cluster with no runtime, total weight = 10000 @@ -4636,7 +4806,7 @@ TEST_F(RouteMatcherTest, ExclusiveWeightedClustersOrClusterConfig) { cluster: www2 )EOF"; - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException); } @@ -4654,7 +4824,7 @@ TEST_F(RouteMatcherTest, WeightedClustersMissingClusterList) { runtime_key_prefix: www2 )EOF"; - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException); } @@ -4673,7 +4843,7 @@ TEST_F(RouteMatcherTest, WeightedClustersEmptyClustersList) { clusters: [] )EOF"; - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException); } @@ -4696,8 +4866,8 @@ TEST_F(RouteMatcherTest, WeightedClustersSumOFWeightsNotEqualToMax) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, "Sum of weights in the weighted_cluster should add up to 100"); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "Sum of weights in the weighted_cluster should add up to 100"); yaml = R"EOF( virtual_hosts: @@ -4718,8 +4888,8 @@ TEST_F(RouteMatcherTest, WeightedClustersSumOFWeightsNotEqualToMax) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, "Sum of weights in the weighted_cluster should add up to 99"); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "Sum of weights in the weighted_cluster should add up to 99"); } TEST_F(RouteMatcherTest, TestWeightedClusterWithMissingWeights) { @@ -4741,7 +4911,7 @@ TEST_F(RouteMatcherTest, TestWeightedClusterWithMissingWeights) { - name: cluster3 )EOF"; - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException); } @@ -4772,7 +4942,7 @@ TEST_F(RouteMatcherTest, TestWeightedClusterInvalidClusterName) { EXPECT_CALL(factory_context_.cluster_manager_, get(Eq("cluster3-invalid"))) .WillRepeatedly(Return(nullptr)); - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException); } @@ -4810,7 +4980,7 @@ TEST_F(RouteMatcherTest, TestWeightedClusterHeaderManipulation) { response_headers_to_remove: [ "x-remove-cluster2" ] )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); NiceMock stream_info; { @@ -4868,7 +5038,7 @@ TEST_F(BadHttpRouteConfigurationsTest, BadRouteConfig) { fake_entry: fake_type )EOF"; - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException); } @@ -4887,7 +5057,7 @@ TEST_F(BadHttpRouteConfigurationsTest, BadVirtualHostConfig) { cluster: www2 )EOF"; - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException); } @@ -4905,7 +5075,7 @@ TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfig) { timeout: 1234s )EOF"; - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException); } @@ -4923,11 +5093,22 @@ TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPrefixAndPath) { cluster: www2 )EOF"; +#ifndef GTEST_USES_SIMPLE_RE EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "invalid value oneof field 'path_specifier' is already set. Cannot set '(prefix|path)' for " "type oneof"); +#else + EXPECT_THAT_THROWS_MESSAGE( + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + ::testing::AnyOf( + ::testing::ContainsRegex( + "invalid value oneof field 'path_specifier' is already set. Cannot set 'prefix' for " + "type oneof"), + ::testing::ContainsRegex( + "invalid value oneof field 'path_specifier' is already set. Cannot set 'path' for " + "type oneof"))); +#endif } TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigMissingPathSpecifier) { @@ -4942,8 +5123,8 @@ TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigMissingPathSpecifier) )EOF"; EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, "RouteValidationError.Match: \\[\"value is required\"\\]"); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "RouteValidationError.Match: \\[\"value is required\"\\]"); } TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPrefixAndRegex) { @@ -4960,11 +5141,22 @@ TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPrefixAndRegex) { cluster: www2 )EOF"; +#ifndef GTEST_USES_SIMPLE_RE EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "invalid value oneof field 'path_specifier' is already set. Cannot set '(prefix|regex)' for " "type oneof"); +#else + EXPECT_THAT_THROWS_MESSAGE( + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + ::testing::AnyOf( + ::testing::ContainsRegex( + "invalid value oneof field 'path_specifier' is already set. Cannot set 'prefix' for " + "type oneof"), + ::testing::ContainsRegex( + "invalid value oneof field 'path_specifier' is already set. Cannot set 'regex' for " + "type oneof"))); +#endif } TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigNoAction) { @@ -4979,8 +5171,8 @@ TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigNoAction) { )EOF"; EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, "caused by field: \"action\", reason: is required"); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "caused by field: \"action\", reason: is required"); } TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPathAndRegex) { @@ -4997,11 +5189,22 @@ TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPathAndRegex) { cluster: www2 )EOF"; +#ifndef GTEST_USES_SIMPLE_RE EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "invalid value oneof field 'path_specifier' is already set. Cannot set '(path|regex)' for " "type oneof"); +#else + EXPECT_THAT_THROWS_MESSAGE( + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + ::testing::AnyOf( + ::testing::ContainsRegex( + "invalid value oneof field 'path_specifier' is already set. Cannot set 'path' for " + "type oneof"), + ::testing::ContainsRegex( + "invalid value oneof field 'path_specifier' is already set. Cannot set 'regex' for " + "type oneof"))); +#endif } TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPrefixAndPathAndRegex) { @@ -5020,8 +5223,8 @@ TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPrefixAndPathAndRegex) )EOF"; EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, "invalid value oneof field 'path_specifier' is already set."); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "invalid value oneof field 'path_specifier' is already set."); } TEST_F(RouteMatcherTest, TestOpaqueConfig) { @@ -5042,7 +5245,7 @@ TEST_F(RouteMatcherTest, TestOpaqueConfig) { name2: value2 )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); const std::multimap& opaque_config = config.route(genHeaders("api.lyft.com", "/api", "GET"), 0)->routeEntry()->opaqueConfig(); @@ -5070,7 +5273,7 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(TestOpaqueConfigUsingDeprecated name2: value2 )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); const std::multimap& opaque_config = config.route(genHeaders("api.lyft.com", "/api", "GET"), 0)->routeEntry()->opaqueConfig(); @@ -5097,7 +5300,7 @@ TEST_F(RoutePropertyTest, ExcludeVHRateLimits) { Http::TestRequestHeaderMapImpl headers = genHeaders("www.lyft.com", "/foo", "GET"); std::unique_ptr config_ptr; - config_ptr = std::make_unique(parseRouteConfigurationFromV2Yaml(yaml), + config_ptr = std::make_unique(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_TRUE(config_ptr->route(headers, 0)->routeEntry()->includeVirtualHostRateLimits()); @@ -5116,7 +5319,7 @@ TEST_F(RoutePropertyTest, ExcludeVHRateLimits) { - remote_address: {} )EOF"; - config_ptr = std::make_unique(parseRouteConfigurationFromV2Yaml(yaml), + config_ptr = std::make_unique(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_FALSE(config_ptr->route(headers, 0)->routeEntry()->includeVirtualHostRateLimits()); @@ -5136,7 +5339,7 @@ TEST_F(RoutePropertyTest, ExcludeVHRateLimits) { - remote_address: {} )EOF"; - config_ptr = std::make_unique(parseRouteConfigurationFromV2Yaml(yaml), + config_ptr = std::make_unique(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_TRUE(config_ptr->route(headers, 0)->routeEntry()->includeVirtualHostRateLimits()); } @@ -5187,7 +5390,7 @@ TEST_F(RoutePropertyTest, DEPRECATED_FEATURE_TEST(TestVHostCorsConfig)) { .WillOnce(Return(true)); EXPECT_CALL(factory_context_.runtime_loader_, snapshot()).WillRepeatedly(ReturnRef(snapshot)); - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, false); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, false); const Router::CorsPolicy* cors_policy = config.route(genHeaders("api.lyft.com", "/api", "GET"), 0) @@ -5244,7 +5447,7 @@ TEST_F(RoutePropertyTest, TestRouteCorsConfig) { .WillOnce(Return(true)); EXPECT_CALL(factory_context_.runtime_loader_, snapshot()).WillRepeatedly(ReturnRef(snapshot)); - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, false); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, false); const Router::CorsPolicy* cors_policy = config.route(genHeaders("api.lyft.com", "/api", "GET"), 0)->routeEntry()->corsPolicy(); @@ -5281,7 +5484,7 @@ TEST_F(RoutePropertyTest, DEPRECATED_FEATURE_TEST(TTestVHostCorsLegacyConfig)) { cluster: ats )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); const Router::CorsPolicy* cors_policy = config.route(genHeaders("api.lyft.com", "/api", "GET"), 0) @@ -5321,7 +5524,7 @@ TEST_F(RoutePropertyTest, DEPRECATED_FEATURE_TEST(TestRouteCorsLegacyConfig)) { allow_credentials: true )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); const Router::CorsPolicy* cors_policy = config.route(genHeaders("api.lyft.com", "/api", "GET"), 0)->routeEntry()->corsPolicy(); @@ -5352,8 +5555,8 @@ TEST_F(RoutePropertyTest, TestBadCorsConfig) { )EOF"; EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, "Unable to parse JSON as proto .*: invalid value 0 for type TYPE_BOOL"); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "Unable to parse JSON as proto .*: invalid value 0 for type TYPE_BOOL"); } TEST_F(RouteMatcherTest, Decorator) { @@ -5376,7 +5579,7 @@ TEST_F(RouteMatcherTest, Decorator) { cluster: bar )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); { Http::TestRequestHeaderMapImpl headers = genHeaders("www.lyft.com", "/foo", "GET"); @@ -5425,7 +5628,7 @@ TEST_F(CustomRequestHeadersTest, AddNewHeader) { value: "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%" )EOF"; NiceMock stream_info; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); Http::TestRequestHeaderMapImpl headers = genHeaders("www.lyft.com", "/new_endpoint/foo", "GET"); const RouteEntry* route = config.route(headers, 0)->routeEntry(); route->finalizeRequestHeaders(headers, stream_info, true); @@ -5463,7 +5666,7 @@ TEST_F(CustomRequestHeadersTest, CustomHeaderWrongFormat) { )EOF"; NiceMock stream_info; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "Invalid header configuration. Un-terminated variable expression " "'DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT'"); @@ -5731,7 +5934,6 @@ TEST_F(ConfigUtilityTest, ParseDirectResponseBody) { TEST_F(RouteConfigurationV2, RedirectCode) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: redirect domains: [redirect.lyft.com] @@ -5741,7 +5943,7 @@ name: foo )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_EQ(nullptr, config.route(genRedirectHeaders("www.foo.com", "/foo", true, true), 0)); @@ -5758,7 +5960,6 @@ name: foo // Test the parsing of direct response configurations within routes. TEST_F(RouteConfigurationV2, DirectResponse) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: direct domains: [example.com] @@ -5767,7 +5968,7 @@ name: foo direct_response: { status: 200, body: { inline_string: "content" } } )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); const auto* direct_response = config.route(genHeaders("example.com", "/", "GET"), 0)->directResponseEntry(); @@ -5780,7 +5981,6 @@ name: foo TEST_F(RouteConfigurationV2, DirectResponseTooLarge) { std::string response_body(4097, 'A'); const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: direct domains: [example.com] @@ -5792,9 +5992,9 @@ name: foo inline_string: )EOF" + response_body + "\n"; - EXPECT_THROW_WITH_MESSAGE(TestConfigImpl invalid_config(parseRouteConfigurationFromV2Yaml(yaml), - factory_context_, true), - EnvoyException, "response body size is 4097 bytes; maximum is 4096"); + EXPECT_THROW_WITH_MESSAGE( + TestConfigImpl invalid_config(parseRouteConfigurationFromYaml(yaml), factory_context_, true), + EnvoyException, "response body size is 4097 bytes; maximum is 4096"); } void checkPathMatchCriterion(const Route* route, const std::string& expected_matcher, @@ -5810,7 +6010,6 @@ void checkPathMatchCriterion(const Route* route, const std::string& expected_mat // Test loading broken config throws EnvoyException. TEST_F(RouteConfigurationV2, BrokenTypedMetadata) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -5823,7 +6022,7 @@ name: foo BazFactory baz_factory; Registry::InjectFactory registered_factory(baz_factory); EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true), Envoy::EnvoyException, "Cannot create a Baz when metadata is empty."); } @@ -5847,7 +6046,7 @@ name: foo )EOF"; BazFactory baz_factory; Registry::InjectFactory registered_factory(baz_factory); - const TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + const TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); checkPathMatchCriterion(config.route(genHeaders("www.foo.com", "/regex", "GET"), 0).get(), "/rege[xy]", PathMatchType::Regex); @@ -5871,7 +6070,6 @@ name: foo TEST_F(RouteConfigurationV2, RouteTracingConfig) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -5921,7 +6119,7 @@ name: foo )EOF"; BazFactory baz_factory; Registry::InjectFactory registered_factory(baz_factory); - const TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + const TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); const auto route1 = config.route(genHeaders("www.foo.com", "/first", "GET"), 0); const auto route2 = config.route(genHeaders("www.foo.com", "/second", "GET"), 0); @@ -5953,8 +6151,7 @@ name: foo // Test to check Prefix Rewrite for redirects TEST_F(RouteConfigurationV2, RedirectPrefixRewrite) { - std::string RedirectPrefixRewrite = R"EOF( -name: AllRedirects + std::string yaml = R"EOF( virtual_hosts: - name: redirect domains: [redirect.lyft.com] @@ -5982,8 +6179,7 @@ name: AllRedirects redirect: { prefix_rewrite: "/" } )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(RedirectPrefixRewrite), factory_context_, - true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_EQ(nullptr, config.route(genRedirectHeaders("www.foo.com", "/foo", true, true), 0)); @@ -6081,10 +6277,70 @@ name: AllRedirects } } +TEST_F(RouteConfigurationV2, PathRedirectQueryNotPreserved) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.preserve_query_string_in_path_redirects", "false"}}); + + std::string yaml = R"EOF( +virtual_hosts: + - name: redirect + domains: [redirect.lyft.com] + routes: + - match: { path: "/path/redirect/"} + redirect: { path_redirect: "/new/path-redirect/" } + - match: { path: "/path/redirect/strip-query/true"} + redirect: { path_redirect: "/new/path-redirect/", strip_query: "true" } + - match: { path: "/path/redirect/query"} + redirect: { path_redirect: "/new/path-redirect?foo=1" } + - match: { path: "/path/redirect/query-with-strip"} + redirect: { path_redirect: "/new/path-redirect?foo=2", strip_query: "true" } + )EOF"; + + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); + EXPECT_EQ(nullptr, config.route(genRedirectHeaders("www.foo.com", "/foo", true, true), 0)); + + { + Http::TestRequestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com", "/path/redirect/?lang=eng&con=US", true, false); + EXPECT_EQ("https://redirect.lyft.com/new/path-redirect/", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestRequestHeaderMapImpl headers = genRedirectHeaders( + "redirect.lyft.com", "/path/redirect/strip-query/true?lang=eng&con=US", true, false); + EXPECT_EQ("https://redirect.lyft.com/new/path-redirect/", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestRequestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com", "/path/redirect/query", true, false); + EXPECT_EQ("https://redirect.lyft.com/new/path-redirect?foo=1", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestRequestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com", "/path/redirect/query?bar=1", true, false); + EXPECT_EQ("https://redirect.lyft.com/new/path-redirect?foo=1", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestRequestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com", "/path/redirect/query-with-strip", true, false); + EXPECT_EQ("https://redirect.lyft.com/new/path-redirect?foo=2", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestRequestHeaderMapImpl headers = genRedirectHeaders( + "redirect.lyft.com", "/path/redirect/query-with-strip?bar=1", true, false); + EXPECT_EQ("https://redirect.lyft.com/new/path-redirect?foo=2", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } +} + // Test to check Strip Query for redirect messages TEST_F(RouteConfigurationV2, RedirectStripQuery) { - std::string RouteDynPathRedirect = R"EOF( -name: AllRedirects + std::string yaml = R"EOF( virtual_hosts: - name: redirect domains: [redirect.lyft.com] @@ -6097,12 +6353,17 @@ name: AllRedirects redirect: { host_redirect: new.lyft.com } - match: { path: "/path/redirect/"} redirect: { path_redirect: "/new/path-redirect/" } + - match: { path: "/path/redirect/strip-query/true"} + redirect: { path_redirect: "/new/path-redirect/", strip_query: "true" } + - match: { path: "/path/redirect/query"} + redirect: { path_redirect: "/new/path-redirect?foo=1" } + - match: { path: "/path/redirect/query-with-strip"} + redirect: { path_redirect: "/new/path-redirect?foo=2", strip_query: "true" } - match: { prefix: "/all/combinations"} redirect: { host_redirect: "new.lyft.com", prefix_rewrite: "/new/prefix" , https_redirect: "true", strip_query: "true" } )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(RouteDynPathRedirect), factory_context_, - true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_EQ(nullptr, config.route(genRedirectHeaders("www.foo.com", "/foo", true, true), 0)); @@ -6129,10 +6390,40 @@ name: AllRedirects } { Http::TestRequestHeaderMapImpl headers = - genRedirectHeaders("redirect.lyft.com", "/path/redirect/", true, false); + genRedirectHeaders("redirect.lyft.com", "/path/redirect/?lang=eng&con=US", true, false); + EXPECT_EQ("https://redirect.lyft.com/new/path-redirect/?lang=eng&con=US", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestRequestHeaderMapImpl headers = genRedirectHeaders( + "redirect.lyft.com", "/path/redirect/strip-query/true?lang=eng&con=US", true, false); EXPECT_EQ("https://redirect.lyft.com/new/path-redirect/", config.route(headers, 0)->directResponseEntry()->newPath(headers)); } + { + Http::TestRequestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com", "/path/redirect/query", true, false); + EXPECT_EQ("https://redirect.lyft.com/new/path-redirect?foo=1", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestRequestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com", "/path/redirect/query?bar=1", true, false); + EXPECT_EQ("https://redirect.lyft.com/new/path-redirect?foo=1", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestRequestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com", "/path/redirect/query-with-strip", true, false); + EXPECT_EQ("https://redirect.lyft.com/new/path-redirect?foo=2", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestRequestHeaderMapImpl headers = genRedirectHeaders( + "redirect.lyft.com", "/path/redirect/query-with-strip?bar=1", true, false); + EXPECT_EQ("https://redirect.lyft.com/new/path-redirect?foo=2", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } { Http::TestRequestHeaderMapImpl headers = genRedirectHeaders( "redirect.lyft.com", "/all/combinations/here/we/go?key=value", false, false); @@ -6144,7 +6435,6 @@ name: AllRedirects TEST_F(RouteMatcherTest, HeaderMatchedRoutingV2) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: local_service domains: ["*"] @@ -6238,7 +6528,7 @@ name: foo cluster: local_service_without_headers )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); { EXPECT_EQ("local_service_without_headers", @@ -6323,7 +6613,6 @@ name: foo // Validate configured and default settings are routed to the correct cluster. TEST_F(RouteMatcherTest, TlsContextMatching) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: local_service domains: ["*"] @@ -6362,7 +6651,7 @@ name: foo cluster: local_service_without_headers )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); { NiceMock stream_info; @@ -6482,8 +6771,7 @@ TEST_F(RouteConfigurationV2, RegexPrefixWithNoRewriteWorksWhenPathChanged) { // Setup regex route entry. the regex is trivial, that's ok as we only want to test that // path change works. - std::string RegexRewrite = R"EOF( -name: RegexNoMatch + std::string yaml = R"EOF( virtual_hosts: - name: regex domains: [regex.lyft.com] @@ -6495,7 +6783,7 @@ name: RegexNoMatch route: { cluster: some-cluster } )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(RegexRewrite), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); { // Get our regex route entry @@ -6514,8 +6802,7 @@ name: RegexNoMatch } TEST_F(RouteConfigurationV2, NoIdleTimeout) { - const std::string NoIdleTimeout = R"EOF( -name: NoIdleTimeout + const std::string yaml = R"EOF( virtual_hosts: - name: regex domains: [idle.lyft.com] @@ -6528,7 +6815,7 @@ name: NoIdleTimeout cluster: some-cluster )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(NoIdleTimeout), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); const RouteEntry* route_entry = config.route(headers, 0)->routeEntry(); @@ -6536,8 +6823,7 @@ name: NoIdleTimeout } TEST_F(RouteConfigurationV2, ZeroIdleTimeout) { - const std::string ZeroIdleTimeout = R"EOF( -name: ZeroIdleTimeout + const std::string yaml = R"EOF( virtual_hosts: - name: regex domains: [idle.lyft.com] @@ -6551,7 +6837,7 @@ name: ZeroIdleTimeout idle_timeout: 0s )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(ZeroIdleTimeout), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); const RouteEntry* route_entry = config.route(headers, 0)->routeEntry(); @@ -6559,8 +6845,7 @@ name: ZeroIdleTimeout } TEST_F(RouteConfigurationV2, ExplicitIdleTimeout) { - const std::string ExplicitIdleTimeout = R"EOF( -name: ExplicitIdleTimeout + const std::string yaml = R"EOF( virtual_hosts: - name: regex domains: [idle.lyft.com] @@ -6574,8 +6859,7 @@ name: ExplicitIdleTimeout idle_timeout: 7s )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(ExplicitIdleTimeout), factory_context_, - true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); const RouteEntry* route_entry = config.route(headers, 0)->routeEntry(); @@ -6583,8 +6867,7 @@ name: ExplicitIdleTimeout } TEST_F(RouteConfigurationV2, RetriableStatusCodes) { - const std::string ExplicitIdleTimeout = R"EOF( -name: RetriableStatusCodes + const std::string yaml = R"EOF( virtual_hosts: - name: regex domains: [idle.lyft.com] @@ -6599,8 +6882,7 @@ name: RetriableStatusCodes retriable_status_codes: [100, 200] )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(ExplicitIdleTimeout), factory_context_, - true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); const auto& retry_policy = config.route(headers, 0)->routeEntry()->retryPolicy(); @@ -6609,8 +6891,7 @@ name: RetriableStatusCodes } TEST_F(RouteConfigurationV2, RetriableHeaders) { - const std::string RetriableHeaders = R"EOF( -name: RetriableHeaders + const std::string yaml = R"EOF( virtual_hosts: - name: regex domains: [idle.lyft.com] @@ -6628,17 +6909,16 @@ name: RetriableHeaders - name: X-Upstream-Pushback )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(RetriableHeaders), factory_context_, - true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); const auto& retry_policy = config.route(headers, 0)->routeEntry()->retryPolicy(); ASSERT_EQ(2, retry_policy.retriableHeaders().size()); - Http::TestHeaderMapImpl expected_0{{":status", "500"}}; - Http::TestHeaderMapImpl unexpected_0{{":status", "200"}}; - Http::TestHeaderMapImpl expected_1{{"x-upstream-pushback", "bar"}}; - Http::TestHeaderMapImpl unexpected_1{{"x-test", "foo"}}; + Http::TestResponseHeaderMapImpl expected_0{{":status", "500"}}; + Http::TestResponseHeaderMapImpl unexpected_0{{":status", "200"}}; + Http::TestResponseHeaderMapImpl expected_1{{"x-upstream-pushback", "bar"}}; + Http::TestResponseHeaderMapImpl unexpected_1{{"x-test", "foo"}}; EXPECT_TRUE(retry_policy.retriableHeaders()[0]->matchesHeaders(expected_0)); EXPECT_FALSE(retry_policy.retriableHeaders()[0]->matchesHeaders(unexpected_0)); @@ -6647,8 +6927,7 @@ name: RetriableHeaders } TEST_F(RouteConfigurationV2, UpgradeConfigs) { - const std::string UpgradeYaml = R"EOF( -name: RetriableStatusCodes + const std::string yaml = R"EOF( virtual_hosts: - name: regex domains: [idle.lyft.com] @@ -6665,7 +6944,7 @@ name: RetriableStatusCodes enabled: false )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(UpgradeYaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); const RouteEntry::UpgradeMap& upgrade_map = config.route(headers, 0)->routeEntry()->upgradeMap(); @@ -6676,7 +6955,6 @@ name: RetriableStatusCodes TEST_F(RouteConfigurationV2, DuplicateUpgradeConfigs) { const std::string yaml = R"EOF( -name: RetriableStatusCodes virtual_hosts: - name: regex domains: [idle.lyft.com] @@ -6694,15 +6972,37 @@ name: RetriableStatusCodes )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, "Duplicate upgrade WebSocket"); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "Duplicate upgrade WebSocket"); +} + +TEST_F(RouteConfigurationV2, BadConnectConfig) { + const std::string yaml = R"EOF( +virtual_hosts: + - name: regex + domains: [idle.lyft.com] + routes: + - match: + safe_regex: + google_re2: {} + regex: "/regex" + route: + cluster: some-cluster + upgrade_configs: + - upgrade_type: Websocket + connect_config: {} + enabled: false + )EOF"; + + EXPECT_THROW_WITH_MESSAGE( + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "Non-CONNECT upgrade type Websocket has ConnectConfig"); } // Verifies that we're creating a new instance of the retry plugins on each call instead of always // returning the same one. TEST_F(RouteConfigurationV2, RetryPluginsAreNotReused) { - const std::string ExplicitIdleTimeout = R"EOF( -name: RetriableStatusCodes + const std::string yaml = R"EOF( virtual_hosts: - name: regex domains: [idle.lyft.com] @@ -6728,8 +7028,7 @@ name: RetriableStatusCodes Registry::InjectFactory inject_predicate_factory( host_predicate_factory); - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(ExplicitIdleTimeout), factory_context_, - true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); const auto& retry_policy = config.route(headers, 0)->routeEntry()->retryPolicy(); @@ -6741,6 +7040,122 @@ name: RetriableStatusCodes EXPECT_NE(predicates1, predicates2); } +TEST_F(RouteConfigurationV2, InternalRedirectIsDisabledWhenNotSpecifiedInRouteAction) { + const std::string yaml = R"EOF( +virtual_hosts: + - name: regex + domains: [idle.lyft.com] + routes: + - match: + safe_regex: + google_re2: {} + regex: "/regex" + route: + cluster: some-cluster + )EOF"; + + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); + Http::TestRequestHeaderMapImpl headers = + genRedirectHeaders("idle.lyft.com", "/regex", true, false); + const auto& internal_redirect_policy = + config.route(headers, 0)->routeEntry()->internalRedirectPolicy(); + EXPECT_FALSE(internal_redirect_policy.enabled()); +} + +TEST_F(RouteConfigurationV2, DefaultInternalRedirectPolicyIsSensible) { + const std::string yaml = R"EOF( +virtual_hosts: + - name: regex + domains: [idle.lyft.com] + routes: + - match: + safe_regex: + google_re2: {} + regex: "/regex" + route: + cluster: some-cluster + internal_redirect_policy: {} + )EOF"; + + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); + Http::TestRequestHeaderMapImpl headers = + genRedirectHeaders("idle.lyft.com", "/regex", true, false); + const auto& internal_redirect_policy = + config.route(headers, 0)->routeEntry()->internalRedirectPolicy(); + EXPECT_TRUE(internal_redirect_policy.enabled()); + EXPECT_TRUE(internal_redirect_policy.shouldRedirectForResponseCode(static_cast(302))); + EXPECT_FALSE( + internal_redirect_policy.shouldRedirectForResponseCode(static_cast(200))); + EXPECT_EQ(1, internal_redirect_policy.maxInternalRedirects()); + EXPECT_TRUE(internal_redirect_policy.predicates().empty()); + EXPECT_FALSE(internal_redirect_policy.isCrossSchemeRedirectAllowed()); +} + +TEST_F(RouteConfigurationV2, InternalRedirectPolicyDropsInvalidRedirectCode) { + const std::string yaml = R"EOF( +virtual_hosts: + - name: regex + domains: [idle.lyft.com] + routes: + - match: + safe_regex: + google_re2: {} + regex: "/regex" + route: + cluster: some-cluster + internal_redirect_policy: + redirect_response_codes: [301, 302, 303, 304] + )EOF"; + + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); + Http::TestRequestHeaderMapImpl headers = + genRedirectHeaders("idle.lyft.com", "/regex", true, false); + const auto& internal_redirect_policy = + config.route(headers, 0)->routeEntry()->internalRedirectPolicy(); + EXPECT_TRUE(internal_redirect_policy.enabled()); + EXPECT_TRUE(internal_redirect_policy.shouldRedirectForResponseCode(static_cast(301))); + EXPECT_TRUE(internal_redirect_policy.shouldRedirectForResponseCode(static_cast(302))); + EXPECT_TRUE(internal_redirect_policy.shouldRedirectForResponseCode(static_cast(303))); + EXPECT_FALSE( + internal_redirect_policy.shouldRedirectForResponseCode(static_cast(304))); + EXPECT_FALSE( + internal_redirect_policy.shouldRedirectForResponseCode(static_cast(305))); + EXPECT_FALSE( + internal_redirect_policy.shouldRedirectForResponseCode(static_cast(306))); + EXPECT_FALSE( + internal_redirect_policy.shouldRedirectForResponseCode(static_cast(307))); +} + +TEST_F(RouteConfigurationV2, InternalRedirectPolicyDropsInvalidRedirectCodeCauseEmptySet) { + const std::string yaml = R"EOF( +virtual_hosts: + - name: regex + domains: [idle.lyft.com] + routes: + - match: + safe_regex: + google_re2: {} + regex: "/regex" + route: + cluster: some-cluster + internal_redirect_policy: + redirect_response_codes: [200, 304] + )EOF"; + + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); + Http::TestRequestHeaderMapImpl headers = + genRedirectHeaders("idle.lyft.com", "/regex", true, false); + const auto& internal_redirect_policy = + config.route(headers, 0)->routeEntry()->internalRedirectPolicy(); + EXPECT_TRUE(internal_redirect_policy.enabled()); + EXPECT_FALSE( + internal_redirect_policy.shouldRedirectForResponseCode(static_cast(302))); + EXPECT_FALSE( + internal_redirect_policy.shouldRedirectForResponseCode(static_cast(304))); + EXPECT_FALSE( + internal_redirect_policy.shouldRedirectForResponseCode(static_cast(200))); +} + class PerFilterConfigsTest : public testing::Test, public ConfigImplTestBase { public: PerFilterConfigsTest() @@ -6788,7 +7203,7 @@ class PerFilterConfigsTest : public testing::Test, public ConfigImplTestBase { void checkEach(const std::string& yaml, uint32_t expected_entry, uint32_t expected_route, uint32_t expected_vhost) { - const TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + const TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); const auto route = config.route(genHeaders("www.foo.com", "/", "GET"), 0); const auto* route_entry = route->routeEntry(); @@ -6809,7 +7224,7 @@ class PerFilterConfigsTest : public testing::Test, public ConfigImplTestBase { } void checkNoPerFilterConfig(const std::string& yaml) { - const TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + const TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); const auto route = config.route(genHeaders("www.foo.com", "/", "GET"), 0); const auto* route_entry = route->routeEntry(); @@ -6831,7 +7246,6 @@ class PerFilterConfigsTest : public testing::Test, public ConfigImplTestBase { TEST_F(PerFilterConfigsTest, DEPRECATED_FEATURE_TEST(TypedConfigFilterError)) { { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -6845,13 +7259,12 @@ name: foo )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "Only one of typed_configs or configs can be specified"); } { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -6865,14 +7278,13 @@ name: foo )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "Only one of typed_configs or configs can be specified"); } } TEST_F(PerFilterConfigsTest, DEPRECATED_FEATURE_TEST(UnknownFilterStruct)) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -6883,13 +7295,12 @@ name: foo )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, "Didn't find a registered implementation for name: 'unknown.filter'"); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "Didn't find a registered implementation for name: 'unknown.filter'"); } TEST_F(PerFilterConfigsTest, UnknownFilterAny) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -6902,15 +7313,14 @@ name: foo )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, "Didn't find a registered implementation for name: 'unknown.filter'"); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "Didn't find a registered implementation for name: 'unknown.filter'"); } // Test that a trivially specified NamedHttpFilterConfigFactory ignores per_filter_config without // error. TEST_F(PerFilterConfigsTest, DEPRECATED_FEATURE_TEST(DefaultFilterImplementationStruct)) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -6925,7 +7335,6 @@ name: foo TEST_F(PerFilterConfigsTest, DefaultFilterImplementationAny) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -6944,7 +7353,6 @@ name: foo TEST_F(PerFilterConfigsTest, DEPRECATED_FEATURE_TEST(RouteLocalConfig)) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -6960,7 +7368,6 @@ name: foo TEST_F(PerFilterConfigsTest, RouteLocalTypedConfig) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -6984,7 +7391,6 @@ name: foo TEST_F(PerFilterConfigsTest, DEPRECATED_FEATURE_TEST(WeightedClusterConfig)) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -7004,7 +7410,6 @@ name: foo TEST_F(PerFilterConfigsTest, WeightedClusterTypedConfig) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -7032,7 +7437,6 @@ name: foo TEST_F(PerFilterConfigsTest, DEPRECATED_FEATURE_TEST(WeightedClusterFallthroughConfig)) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -7052,7 +7456,6 @@ name: foo TEST_F(PerFilterConfigsTest, WeightedClusterFallthroughTypedConfig) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -7078,6 +7481,272 @@ name: foo checkEach(yaml, 1213, 1213, 1415); } +class RouteMatchOverrideTest : public testing::Test, public ConfigImplTestBase {}; + +TEST_F(RouteMatchOverrideTest, VerifyAllMatchableRoutes) { + const std::string yaml = R"EOF( +virtual_hosts: + - name: bar + domains: ["*"] + routes: + - match: { prefix: "/foo/bar/baz" } + route: + cluster: foo_bar_baz + - match: { prefix: "/foo/bar" } + route: + cluster: foo_bar + - match: { prefix: "/foo" } + route: + cluster: foo + - match: { prefix: "/" } + route: + cluster: default +)EOF"; + + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); + std::vector clusters{"default", "foo", "foo_bar", "foo_bar_baz"}; + + RouteConstSharedPtr accepted_route = config.route( + [&clusters](RouteConstSharedPtr route, + RouteEvalStatus route_eval_status) -> RouteMatchStatus { + EXPECT_FALSE(clusters.empty()); + EXPECT_EQ(clusters[clusters.size() - 1], route->routeEntry()->clusterName()); + clusters.pop_back(); + if (clusters.empty()) { + EXPECT_EQ(route_eval_status, RouteEvalStatus::NoMoreRoutes); + return RouteMatchStatus::Accept; + } + EXPECT_EQ(route_eval_status, RouteEvalStatus::HasMoreRoutes); + return RouteMatchStatus::Continue; + }, + genHeaders("bat.com", "/foo/bar/baz", "GET")); + EXPECT_EQ(accepted_route->routeEntry()->clusterName(), "default"); +} + +TEST_F(RouteMatchOverrideTest, VerifyRouteOverrideStops) { + const std::string yaml = R"EOF( +virtual_hosts: + - name: bar + domains: ["*"] + routes: + - match: { prefix: "/foo/bar/baz" } + route: + cluster: foo_bar_baz + - match: { prefix: "/foo/bar" } + route: + cluster: foo_bar + - match: { prefix: "/foo" } + route: + cluster: foo + - match: { prefix: "/" } + route: + cluster: default +)EOF"; + + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); + std::vector clusters{"foo", "foo_bar"}; + + RouteConstSharedPtr accepted_route = config.route( + [&clusters](RouteConstSharedPtr route, + RouteEvalStatus route_eval_status) -> RouteMatchStatus { + EXPECT_FALSE(clusters.empty()); + EXPECT_EQ(clusters[clusters.size() - 1], route->routeEntry()->clusterName()); + clusters.pop_back(); + EXPECT_EQ(route_eval_status, RouteEvalStatus::HasMoreRoutes); + + if (clusters.empty()) { + return RouteMatchStatus::Accept; // Do not match default route + } + return RouteMatchStatus::Continue; + }, + genHeaders("bat.com", "/foo/bar", "GET")); + EXPECT_EQ(accepted_route->routeEntry()->clusterName(), "foo"); +} + +TEST_F(RouteMatchOverrideTest, StopWhenNoMoreRoutes) { + const std::string yaml = R"EOF( +virtual_hosts: + - name: bar + domains: ["*"] + routes: + - match: { prefix: "/foo/bar/baz" } + route: + cluster: foo_bar_baz + - match: { prefix: "/foo/bar" } + route: + cluster: foo_bar + - match: { prefix: "/foo" } + route: + cluster: foo + - match: { prefix: "/" } + route: + cluster: default +)EOF"; + + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); + std::vector clusters{"default", "foo", "foo_bar", "foo_bar_baz"}; + + RouteConstSharedPtr accepted_route = config.route( + [&clusters](RouteConstSharedPtr route, + RouteEvalStatus route_eval_status) -> RouteMatchStatus { + EXPECT_FALSE(clusters.empty()); + EXPECT_EQ(clusters[clusters.size() - 1], route->routeEntry()->clusterName()); + clusters.pop_back(); + + if (clusters.empty()) { + EXPECT_EQ(route_eval_status, RouteEvalStatus::NoMoreRoutes); + } else { + EXPECT_EQ(route_eval_status, RouteEvalStatus::HasMoreRoutes); + } + // Returning continue when no more routes are available will be ignored by ConfigImpl::route + return RouteMatchStatus::Continue; + }, + genHeaders("bat.com", "/foo/bar/baz", "GET")); + EXPECT_EQ(accepted_route, nullptr); +} + +TEST_F(RouteMatchOverrideTest, NullRouteOnNoRouteMatch) { + const std::string yaml = R"EOF( +virtual_hosts: + - name: bar + domains: ["*"] + routes: + - match: { prefix: "/foo/bar/baz" } + route: + cluster: foo_bar_baz + - match: { prefix: "/foo/bar" } + route: + cluster: foo_bar + - match: { prefix: "/foo" } + route: + cluster: foo +)EOF"; + + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); + RouteConstSharedPtr accepted_route = config.route( + [](RouteConstSharedPtr, RouteEvalStatus) -> RouteMatchStatus { + ADD_FAILURE() + << "RouteCallback should not be invoked since there are no matching route to override"; + return RouteMatchStatus::Continue; + }, + genHeaders("bat.com", "/", "GET")); + EXPECT_EQ(accepted_route, nullptr); +} + +TEST_F(RouteMatchOverrideTest, NullRouteOnNoHostMatch) { + const std::string yaml = R"EOF( +virtual_hosts: + - name: bar + domains: ["www.acme.com"] + routes: + - match: { prefix: "/foo/bar/baz" } + route: + cluster: foo_bar_baz + - match: { prefix: "/foo/bar" } + route: + cluster: foo_bar + - match: { prefix: "/" } + route: + cluster: default +)EOF"; + + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); + RouteConstSharedPtr accepted_route = config.route( + [](RouteConstSharedPtr, RouteEvalStatus) -> RouteMatchStatus { + ADD_FAILURE() + << "RouteCallback should not be invoked since there are no matching route to override"; + return RouteMatchStatus::Continue; + }, + genHeaders("bat.com", "/", "GET")); + EXPECT_EQ(accepted_route, nullptr); +} + +TEST_F(RouteMatchOverrideTest, NullRouteOnNullXForwardedProto) { + const std::string yaml = R"EOF( +virtual_hosts: + - name: bar + domains: ["*"] + routes: + - match: { prefix: "/foo/bar/baz" } + route: + cluster: foo_bar_baz + - match: { prefix: "/foo/bar" } + route: + cluster: foo_bar + - match: { prefix: "/" } + route: + cluster: default +)EOF"; + + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); + RouteConstSharedPtr accepted_route = config.route( + [](RouteConstSharedPtr, RouteEvalStatus) -> RouteMatchStatus { + ADD_FAILURE() + << "RouteCallback should not be invoked since there are no matching route to override"; + return RouteMatchStatus::Continue; + }, + genHeaders("bat.com", "/", "GET", "")); + EXPECT_EQ(accepted_route, nullptr); +} + +TEST_F(RouteMatchOverrideTest, NullRouteOnRequireTlsAll) { + const std::string yaml = R"EOF( +virtual_hosts: + - name: bar + domains: ["*"] + routes: + - match: { prefix: "/foo/bar/baz" } + route: + cluster: foo_bar_baz + - match: { prefix: "/foo/bar" } + route: + cluster: foo_bar + - match: { prefix: "/" } + route: + cluster: default + require_tls: ALL +)EOF"; + + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); + RouteConstSharedPtr accepted_route = config.route( + [](RouteConstSharedPtr, RouteEvalStatus) -> RouteMatchStatus { + ADD_FAILURE() + << "RouteCallback should not be invoked since there are no matching route to override"; + return RouteMatchStatus::Continue; + }, + genHeaders("bat.com", "/", "GET")); + EXPECT_NE(nullptr, dynamic_cast(accepted_route.get())); +} + +TEST_F(RouteMatchOverrideTest, NullRouteOnRequireTlsInternal) { + const std::string yaml = R"EOF( +virtual_hosts: + - name: bar + domains: ["*"] + routes: + - match: { prefix: "/foo/bar/baz" } + route: + cluster: foo_bar_baz + - match: { prefix: "/foo/bar" } + route: + cluster: foo_bar + - match: { prefix: "/" } + route: + cluster: default + require_tls: EXTERNAL_ONLY +)EOF"; + + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); + RouteConstSharedPtr accepted_route = config.route( + [](RouteConstSharedPtr, RouteEvalStatus) -> RouteMatchStatus { + ADD_FAILURE() + << "RouteCallback should not be invoked since there are no matching route to override"; + return RouteMatchStatus::Continue; + }, + genHeaders("bat.com", "/", "GET")); + EXPECT_NE(nullptr, dynamic_cast(accepted_route.get())); +} + } // namespace } // namespace Router } // namespace Envoy diff --git a/test/common/router/header_formatter_test.cc b/test/common/router/header_formatter_test.cc index 81044fddb558b..84140d099d7b1 100644 --- a/test/common/router/header_formatter_test.cc +++ b/test/common/router/header_formatter_test.cc @@ -34,9 +34,10 @@ namespace Envoy { namespace Router { namespace { -static envoy::config::route::v3::Route parseRouteFromV2Yaml(const std::string& yaml) { +static envoy::config::route::v3::Route parseRouteFromV3Yaml(const std::string& yaml, + bool avoid_boosting = true) { envoy::config::route::v3::Route route; - TestUtility::loadFromYaml(yaml, route); + TestUtility::loadFromYaml(yaml, route, false, avoid_boosting); return route; } @@ -297,7 +298,7 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamTlsVersionNoTls) { testFormatting(stream_info, "DOWNSTREAM_TLS_VERSION", EMPTY_STRING); } -TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerFingerprint) { +TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSha256Fingerprint) { NiceMock stream_info; auto connection_info = std::make_shared>(); std::string expected_sha = "685a2db593d5f86d346cb1a297009c3b467ad77f1944aa799039a2fb3d531f3f"; @@ -307,7 +308,7 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerFingerprint) { "685a2db593d5f86d346cb1a297009c3b467ad77f1944aa799039a2fb3d531f3f"); } -TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerFingerprintEmpty) { +TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSha256FingerprintEmpty) { NiceMock stream_info; auto connection_info = std::make_shared>(); std::string expected_sha; @@ -316,12 +317,37 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerFingerprintEmp testFormatting(stream_info, "DOWNSTREAM_PEER_FINGERPRINT_256", EMPTY_STRING); } -TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerFingerprintNoTls) { +TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSha256FingerprintNoTls) { NiceMock stream_info; EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); testFormatting(stream_info, "DOWNSTREAM_PEER_FINGERPRINT_256", EMPTY_STRING); } +TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSha1Fingerprint) { + NiceMock stream_info; + auto connection_info = std::make_shared>(); + std::string expected_sha = "685a2db593d5f86d346cb1a297009c3b467ad77f1944aa799039a2fb3d531f3f"; + ON_CALL(*connection_info, sha1PeerCertificateDigest()).WillByDefault(ReturnRef(expected_sha)); + EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + testFormatting(stream_info, "DOWNSTREAM_PEER_FINGERPRINT_1", + "685a2db593d5f86d346cb1a297009c3b467ad77f1944aa799039a2fb3d531f3f"); +} + +TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSha1FingerprintEmpty) { + NiceMock stream_info; + auto connection_info = std::make_shared>(); + std::string expected_sha; + ON_CALL(*connection_info, sha1PeerCertificateDigest()).WillByDefault(ReturnRef(expected_sha)); + EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + testFormatting(stream_info, "DOWNSTREAM_PEER_FINGERPRINT_1", EMPTY_STRING); +} + +TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSha1FingerprintNoTls) { + NiceMock stream_info; + EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + testFormatting(stream_info, "DOWNSTREAM_PEER_FINGERPRINT_1", EMPTY_STRING); +} + TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSerial) { NiceMock stream_info; auto connection_info = std::make_shared>(); @@ -750,6 +776,8 @@ TEST(HeaderParserTest, TestParseInternal) { {"%PER_REQUEST_STATE(testing)%", {"test_value"}, {}}, {"%REQ(x-request-id)%", {"123"}, {}}, {"%START_TIME%", {"2018-04-03T23:06:09.123Z"}, {}}, + {"%RESPONSE_FLAGS%", {"LR"}, {}}, + {"%RESPONSE_CODE_DETAILS%", {"via_upstream"}, {}}, // Unescaped % {"%", {}, {"Invalid header configuration. Un-escaped % at position 0"}}, @@ -846,7 +874,7 @@ TEST(HeaderParserTest, TestParseInternal) { new NiceMock()); ON_CALL(stream_info, upstreamHost()).WillByDefault(Return(host)); - Http::RequestHeaderMapImpl request_headers; + Http::TestRequestHeaderMapImpl request_headers; request_headers.addCopy(Http::LowerCaseString(std::string("x-request-id")), 123); ON_CALL(stream_info, getRequestHeaders()).WillByDefault(Return(&request_headers)); @@ -875,6 +903,12 @@ TEST(HeaderParserTest, TestParseInternal) { ON_CALL(stream_info, filterState()).WillByDefault(ReturnRef(filter_state)); ON_CALL(Const(stream_info), filterState()).WillByDefault(ReturnRef(*filter_state)); + ON_CALL(stream_info, hasResponseFlag(StreamInfo::ResponseFlag::LocalReset)) + .WillByDefault(Return(true)); + + absl::optional rc_details{"via_upstream"}; + ON_CALL(stream_info, responseCodeDetails()).WillByDefault(ReturnRef(rc_details)); + for (const auto& test_case : test_cases) { Protobuf::RepeatedPtrField to_add; envoy::config::core::v3::HeaderValueOption* header = to_add.Add(); @@ -890,7 +924,7 @@ TEST(HeaderParserTest, TestParseInternal) { HeaderParserPtr req_header_parser = HeaderParser::configure(to_add); - Http::TestHeaderMapImpl header_map{{":method", "POST"}}; + Http::TestRequestHeaderMapImpl header_map{{":method", "POST"}}; req_header_parser->evaluateHeaders(header_map, stream_info); std::string descriptor = fmt::format("for test case input: {}", test_case.input_); @@ -923,8 +957,8 @@ match: { prefix: "/new_endpoint" } )EOF"; HeaderParserPtr req_header_parser = - HeaderParser::configure(parseRouteFromV2Yaml(ymal).request_headers_to_add()); - Http::TestHeaderMapImpl header_map{{":method", "POST"}}; + HeaderParser::configure(parseRouteFromV3Yaml(ymal).request_headers_to_add()); + Http::TestRequestHeaderMapImpl header_map{{":method", "POST"}}; NiceMock stream_info; req_header_parser->evaluateHeaders(header_map, stream_info); EXPECT_TRUE(header_map.has("x-client-ip")); @@ -945,8 +979,8 @@ match: { prefix: "/new_endpoint" } )EOF"; HeaderParserPtr req_header_parser = - HeaderParser::configure(parseRouteFromV2Yaml(ymal).request_headers_to_add()); - Http::TestHeaderMapImpl header_map{{":method", "POST"}}; + HeaderParser::configure(parseRouteFromV3Yaml(ymal).request_headers_to_add()); + Http::TestRequestHeaderMapImpl header_map{{":method", "POST"}}; std::shared_ptr> host( new NiceMock()); NiceMock stream_info; @@ -971,8 +1005,8 @@ match: { prefix: "/new_endpoint" } )EOF"; HeaderParserPtr req_header_parser = - HeaderParser::configure(parseRouteFromV2Yaml(ymal).request_headers_to_add()); - Http::TestHeaderMapImpl header_map{{":method", "POST"}}; + HeaderParser::configure(parseRouteFromV3Yaml(ymal).request_headers_to_add()); + Http::TestRequestHeaderMapImpl header_map{{":method", "POST"}}; NiceMock stream_info; req_header_parser->evaluateHeaders(header_map, stream_info); EXPECT_TRUE(header_map.has("static-header")); @@ -1015,10 +1049,11 @@ match: { prefix: "/new_endpoint" } request_headers_to_remove: ["x-nope"] )EOF"; - const auto route = parseRouteFromV2Yaml(yaml); + const auto route = parseRouteFromV3Yaml(yaml); HeaderParserPtr req_header_parser = HeaderParser::configure(route.request_headers_to_add(), route.request_headers_to_remove()); - Http::TestHeaderMapImpl header_map{{":method", "POST"}, {"x-safe", "safe"}, {"x-nope", "nope"}}; + Http::TestRequestHeaderMapImpl header_map{ + {":method", "POST"}, {"x-safe", "safe"}, {"x-nope", "nope"}}; NiceMock stream_info; absl::optional protocol = Envoy::Http::Protocol::Http11; ON_CALL(stream_info, protocol()).WillByDefault(ReturnPointee(&protocol)); @@ -1109,14 +1144,14 @@ match: { prefix: "/new_endpoint" } )EOF"; // Disable append mode. - envoy::config::route::v3::Route route = parseRouteFromV2Yaml(ymal); + envoy::config::route::v3::Route route = parseRouteFromV3Yaml(ymal); route.mutable_request_headers_to_add(0)->mutable_append()->set_value(false); route.mutable_request_headers_to_add(1)->mutable_append()->set_value(false); route.mutable_request_headers_to_add(2)->mutable_append()->set_value(false); HeaderParserPtr req_header_parser = Router::HeaderParser::configure(route.request_headers_to_add()); - Http::TestHeaderMapImpl header_map{ + Http::TestRequestHeaderMapImpl header_map{ {":method", "POST"}, {"static-header", "old-value"}, {"x-client-ip", "0.0.0.0"}}; NiceMock stream_info; @@ -1138,19 +1173,16 @@ match: { prefix: "/new_endpoint" } using CountMap = absl::flat_hash_map; CountMap counts; - header_map.iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> Http::HeaderMap::Iterate { - CountMap* m = static_cast(cb_v); - absl::string_view key = header.key().getStringView(); - CountMap::iterator i = m->find(key); - if (i == m->end()) { - m->insert({std::string(key), 1}); - } else { - i->second++; - } - return Http::HeaderMap::Iterate::Continue; - }, - &counts); + header_map.iterate([&counts](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + absl::string_view key = header.key().getStringView(); + CountMap::iterator i = counts.find(key); + if (i == counts.end()) { + counts.insert({std::string(key), 1}); + } else { + i->second++; + } + return Http::HeaderMap::Iterate::Continue; + }); EXPECT_EQ(1, counts["static-header"]); EXPECT_EQ(1, counts["x-client-ip"]); @@ -1202,10 +1234,11 @@ match: { prefix: "/new_endpoint" } response_headers_to_remove: ["x-nope"] )EOF"; - const auto route = parseRouteFromV2Yaml(yaml); + const auto route = parseRouteFromV3Yaml(yaml); HeaderParserPtr resp_header_parser = HeaderParser::configure(route.response_headers_to_add(), route.response_headers_to_remove()); - Http::TestHeaderMapImpl header_map{{":method", "POST"}, {"x-safe", "safe"}, {"x-nope", "nope"}}; + Http::TestRequestHeaderMapImpl header_map{ + {":method", "POST"}, {"x-safe", "safe"}, {"x-nope", "nope"}}; NiceMock stream_info; // Initialize start_time as 2018-04-03T23:06:09.123Z in microseconds. @@ -1252,10 +1285,10 @@ match: { prefix: "/new_endpoint" } request_headers_to_remove: ["x-foo-header"] )EOF"; - const auto route = parseRouteFromV2Yaml(yaml); + const auto route = parseRouteFromV3Yaml(yaml); HeaderParserPtr req_header_parser = HeaderParser::configure(route.request_headers_to_add(), route.request_headers_to_remove()); - Http::TestHeaderMapImpl header_map{{"x-foo-header", "foo"}}; + Http::TestRequestHeaderMapImpl header_map{{"x-foo-header", "foo"}}; NiceMock stream_info; req_header_parser->evaluateHeaders(header_map, stream_info); @@ -1274,10 +1307,10 @@ match: { prefix: "/new_endpoint" } response_headers_to_remove: ["x-foo-header"] )EOF"; - const auto route = parseRouteFromV2Yaml(yaml); + const auto route = parseRouteFromV3Yaml(yaml); HeaderParserPtr resp_header_parser = HeaderParser::configure(route.response_headers_to_add(), route.response_headers_to_remove()); - Http::TestHeaderMapImpl header_map{{"x-foo-header", "foo"}}; + Http::TestResponseHeaderMapImpl header_map{{"x-foo-header", "foo"}}; NiceMock stream_info; resp_header_parser->evaluateHeaders(header_map, stream_info); diff --git a/test/common/router/header_parser_corpus/timeout_test_case b/test/common/router/header_parser_corpus/timeout_test_case new file mode 100644 index 0000000000000..a4df354180628 --- /dev/null +++ b/test/common/router/header_parser_corpus/timeout_test_case @@ -0,0 +1,6 @@ +headers_to_add { + header { + key: " " + value: "%START_TIME(`Qf;BBBBB)%%START_TIME(%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %2f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1%3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 , ,,2, fff%f%13%(%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, % %3f,f, %1f, %2f, %3f, %4f, %5f, %6f,, %6f,(%7f, %8f, % %4f, %5)%%START_TIME(%%3 %1f, %2f, %3f,f, %1f, %2f, %3f, %4f, %5f, %6f,, %6f,(%7f, %8f, % %4f, %5)%%START_TIME(%%3 %1f, %2f, %3f,f, %1f, %2f, %3f, %4f, %5f, %6f,, %6f,(%7f, %8f, % %4f, %5)%%START_TIME(%%3 %1f, %2f, %3f,f, %1f, %2f, %3f, %4f, %5f, %6f,, %6f,(%7f, %8f, % %4f, %5)%%START_TIME(%%3 %1f, %2f, %3f,f, %1f, %2f, %3f, %4f, %5f, %6f,, %6f,(%7f, %8f, % %4f, %5)%%START_TIME(%%3 %1f, %2f, %3f,f, %1f, %2f, %3f, %4f, %5f, %6f,, %6f,(%7f, %8f, % %4f, %5)%%START_TIME(%5)%%START_TIME(%%3 %1f, %2f, %3f,f, %1f, %2f, %3f, %4f, %5f, %6f, % %4f, %5)%%START_TIME(%%3 %1f, %2f, %3f,f, %1f, %25f, %6f, %4294967295f, %8f, 9f)%" + } +} diff --git a/test/common/router/header_parser_fuzz_test.cc b/test/common/router/header_parser_fuzz_test.cc index 74dede78b3319..8acd737fa1902 100644 --- a/test/common/router/header_parser_fuzz_test.cc +++ b/test/common/router/header_parser_fuzz_test.cc @@ -14,7 +14,7 @@ DEFINE_PROTO_FUZZER(const test::common::router::TestCase& input) { TestUtility::validate(input); Router::HeaderParserPtr parser = Router::HeaderParser::configure(input.headers_to_add(), input.headers_to_remove()); - Http::HeaderMapImpl header_map; + Http::TestRequestHeaderMapImpl header_map; TestStreamInfo test_stream_info = fromStreamInfo(input.stream_info()); parser->evaluateHeaders(header_map, test_stream_info); ENVOY_LOG_MISC(trace, "Success"); diff --git a/test/common/router/rds_impl_test.cc b/test/common/router/rds_impl_test.cc index ad211cedc787b..3c70a9c93f545 100644 --- a/test/common/router/rds_impl_test.cc +++ b/test/common/router/rds_impl_test.cc @@ -13,12 +13,12 @@ #include "common/json/json_loader.h" #include "common/router/rds_impl.h" -#include "server/http/admin.h" +#include "server/admin/admin.h" #include "test/mocks/init/mocks.h" #include "test/mocks/local_info/mocks.h" #include "test/mocks/protobuf/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/mocks/thread_local/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/printers.h" @@ -120,7 +120,7 @@ stat_prefix: foo } NiceMock server_; - std::unique_ptr route_config_provider_manager_; + RouteConfigProviderManagerImplPtr route_config_provider_manager_; RouteConfigProviderSharedPtr rds_; }; @@ -158,7 +158,7 @@ TEST_F(RdsImplTest, Basic) { setup(); // Make sure the initial empty route table works. - EXPECT_EQ(nullptr, route(Http::TestHeaderMapImpl{{":authority", "foo"}})); + EXPECT_EQ(nullptr, route(Http::TestRequestHeaderMapImpl{{":authority", "foo"}})); // Initial request. const std::string response1_json = R"EOF( @@ -175,14 +175,16 @@ TEST_F(RdsImplTest, Basic) { )EOF"; auto response1 = TestUtility::parseYaml(response1_json); + const auto decoded_resources = + TestUtility::decodeResources(response1); EXPECT_CALL(init_watcher_, ready()); - rds_callbacks_->onConfigUpdate(response1.resources(), response1.version_info()); - EXPECT_EQ(nullptr, route(Http::TestHeaderMapImpl{{":authority", "foo"}})); + rds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()); + EXPECT_EQ(nullptr, route(Http::TestRequestHeaderMapImpl{{":authority", "foo"}})); // 2nd request with same response. Based on hash should not reload config. - rds_callbacks_->onConfigUpdate(response1.resources(), response1.version_info()); - EXPECT_EQ(nullptr, route(Http::TestHeaderMapImpl{{":authority", "foo"}})); + rds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()); + EXPECT_EQ(nullptr, route(Http::TestRequestHeaderMapImpl{{":authority", "foo"}})); // Load the config and verified shared count. ConfigConstSharedPtr config = rds_->config(); @@ -220,11 +222,13 @@ TEST_F(RdsImplTest, Basic) { )EOF"; auto response2 = TestUtility::parseYaml(response2_json); + const auto decoded_resources_2 = + TestUtility::decodeResources(response2); // Make sure we don't lookup/verify clusters. EXPECT_CALL(server_factory_context_.cluster_manager_, get(Eq("bar"))).Times(0); - rds_callbacks_->onConfigUpdate(response2.resources(), response2.version_info()); - EXPECT_EQ("foo", route(Http::TestHeaderMapImpl{{":authority", "foo"}, {":path", "/foo"}}) + rds_callbacks_->onConfigUpdate(decoded_resources_2.refvec_, response2.version_info()); + EXPECT_EQ("foo", route(Http::TestRequestHeaderMapImpl{{":authority", "foo"}, {":path", "/foo"}}) ->routeEntry() ->clusterName()); @@ -253,10 +257,12 @@ TEST_F(RdsImplTest, FailureInvalidConfig) { )EOF"; auto response1 = TestUtility::parseYaml(response1_json); + const auto decoded_resources = + TestUtility::decodeResources(response1); EXPECT_CALL(init_watcher_, ready()); EXPECT_THROW_WITH_MESSAGE( - rds_callbacks_->onConfigUpdate(response1.resources(), response1.version_info()), + rds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()), EnvoyException, "Unexpected RDS configuration (expecting foo_route_config): INVALID_NAME_FOR_route_config"); } @@ -284,7 +290,7 @@ class RdsRouteConfigSubscriptionTest : public RdsTestBase { server_factory_context_.thread_local_.shutdownThread(); } - std::unique_ptr route_config_provider_manager_; + RouteConfigProviderManagerImplPtr route_config_provider_manager_; }; // Verifies that maybeCreateInitManager() creates a noop init manager if the main init manager is in @@ -347,14 +353,14 @@ class RouteConfigProviderManagerImplTest : public RdsTestBase { } envoy::extensions::filters::network::http_connection_manager::v3::Rds rds_; - std::unique_ptr route_config_provider_manager_; + RouteConfigProviderManagerImplPtr route_config_provider_manager_; RouteConfigProviderSharedPtr provider_; }; envoy::config::route::v3::RouteConfiguration -parseRouteConfigurationFromV2Yaml(const std::string& yaml) { +parseRouteConfigurationFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) { envoy::config::route::v3::RouteConfiguration route_config; - TestUtility::loadFromYaml(yaml, route_config, true); + TestUtility::loadFromYaml(yaml, route_config, true, avoid_boosting); return route_config; } @@ -388,7 +394,7 @@ name: foo // Only static route. RouteConfigProviderPtr static_config = route_config_provider_manager_->createStaticRouteConfigProvider( - parseRouteConfigurationFromV2Yaml(config_yaml), server_factory_context_, + parseRouteConfigurationFromV3Yaml(config_yaml), server_factory_context_, validation_visitor_); message_ptr = server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["routes"](); @@ -397,7 +403,7 @@ name: foo TestUtility::loadFromYaml(R"EOF( static_route_configs: - route_config: - "@type": type.googleapis.com/envoy.api.v2.RouteConfiguration + "@type": type.googleapis.com/envoy.config.route.v3.RouteConfiguration name: foo virtual_hosts: - name: bar @@ -424,7 +430,7 @@ name: foo "version_info": "1", "resources": [ { - "@type": "type.googleapis.com/envoy.api.v2.RouteConfiguration", + "@type": "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", "name": "foo_route_config", "virtual_hosts": null } @@ -433,9 +439,11 @@ name: foo )EOF"; auto response1 = TestUtility::parseYaml(response1_json); + const auto decoded_resources = + TestUtility::decodeResources(response1); EXPECT_CALL(init_watcher_, ready()); - rds_callbacks_->onConfigUpdate(response1.resources(), response1.version_info()); + rds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()); message_ptr = server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["routes"](); const auto& route_config_dump3 = @@ -443,7 +451,7 @@ name: foo TestUtility::loadFromYaml(R"EOF( static_route_configs: - route_config: - "@type": type.googleapis.com/envoy.api.v2.RouteConfiguration + "@type": type.googleapis.com/envoy.config.route.v3.RouteConfiguration name: foo virtual_hosts: - name: bar @@ -457,7 +465,7 @@ name: foo dynamic_route_configs: - version_info: "1" route_config: - "@type": type.googleapis.com/envoy.api.v2.RouteConfiguration + "@type": type.googleapis.com/envoy.config.route.v3.RouteConfiguration name: foo_route_config virtual_hosts: last_updated: @@ -476,8 +484,7 @@ TEST_F(RouteConfigProviderManagerImplTest, Basic) { EXPECT_FALSE(provider_->configInfo().has_value()); - Protobuf::RepeatedPtrField route_configs; - route_configs.Add()->PackFrom(parseRouteConfigurationFromV2Yaml(R"EOF( + const auto route_config = parseRouteConfigurationFromV3Yaml(R"EOF( name: foo_route_config virtual_hosts: - name: bar @@ -485,10 +492,11 @@ name: foo_route_config routes: - match: { prefix: "/" } route: { cluster: baz } -)EOF")); +)EOF"); + const auto decoded_resources = TestUtility::decodeResources({route_config}); server_factory_context_.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( - route_configs, "1"); + decoded_resources.refvec_, "1"); RouteConfigProviderSharedPtr provider2 = route_config_provider_manager_->createRdsRouteConfigProvider( @@ -512,7 +520,7 @@ name: foo_route_config rds2, server_factory_context_, "foo_prefix", outer_init_manager_); EXPECT_NE(provider3, provider_); server_factory_context_.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( - route_configs, "provider3"); + decoded_resources.refvec_, "provider3"); EXPECT_EQ(2UL, route_config_provider_manager_->dumpRouteConfigs()->dynamic_route_configs().size()); @@ -557,8 +565,7 @@ TEST_F(RouteConfigProviderManagerImplTest, SameProviderOnTwoInitManager) { EXPECT_EQ(Init::Manager::State::Initializing, real_init_manager.state()); { - Protobuf::RepeatedPtrField route_configs; - route_configs.Add()->PackFrom(parseRouteConfigurationFromV2Yaml(R"EOF( + const auto route_config = parseRouteConfigurationFromV3Yaml(R"EOF( name: foo_route_config virtual_hosts: - name: bar @@ -566,10 +573,11 @@ name: foo_route_config routes: - match: { prefix: "/" } route: { cluster: baz } -)EOF")); +)EOF"); + const auto decoded_resources = TestUtility::decodeResources({route_config}); server_factory_context_.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( - route_configs, "1"); + decoded_resources.refvec_, "1"); EXPECT_TRUE(provider_->configInfo().has_value()); EXPECT_TRUE(provider2->configInfo().has_value()); @@ -577,20 +585,6 @@ name: foo_route_config } } -// Negative test for protoc-gen-validate constraints. -TEST_F(RouteConfigProviderManagerImplTest, ValidateFail) { - setup(); - Protobuf::RepeatedPtrField route_configs; - envoy::config::route::v3::RouteConfiguration route_config; - route_config.set_name("foo_route_config"); - route_config.mutable_virtual_hosts()->Add(); - route_configs.Add()->PackFrom(route_config); - EXPECT_THROW( - server_factory_context_.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( - route_configs, ""), - ProtoValidationException); -} - TEST_F(RouteConfigProviderManagerImplTest, OnConfigUpdateEmpty) { setup(); EXPECT_CALL(*server_factory_context_.cluster_manager_.subscription_factory_.subscription_, @@ -605,13 +599,12 @@ TEST_F(RouteConfigProviderManagerImplTest, OnConfigUpdateWrongSize) { EXPECT_CALL(*server_factory_context_.cluster_manager_.subscription_factory_.subscription_, start(_)); outer_init_manager_.initialize(init_watcher_); - Protobuf::RepeatedPtrField route_configs; - route_configs.Add(); - route_configs.Add(); + envoy::config::route::v3::RouteConfiguration route_config; + const auto decoded_resources = TestUtility::decodeResources({route_config, route_config}); EXPECT_CALL(init_watcher_, ready()); EXPECT_THROW_WITH_MESSAGE( server_factory_context_.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( - route_configs, ""), + decoded_resources.refvec_, ""), EnvoyException, "Unexpected RDS resource length: 2"); } @@ -664,12 +657,14 @@ version_info: '1' )EOF"; auto response1 = TestUtility::parseYaml(response1_yaml); + const auto decoded_resources = + TestUtility::decodeResources(response1); EXPECT_CALL(init_watcher_, ready()); EXPECT_THROW_WITH_MESSAGE( - rds_callbacks_->onConfigUpdate(response1.resources(), response1.version_info()), - EnvoyException, "Only a single wildcard domain is permitted"); + rds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()), + EnvoyException, "Only a single wildcard domain is permitted in route foo_route_config"); message_ptr = server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["routes"](); diff --git a/test/common/router/retry_state_impl_test.cc b/test/common/router/retry_state_impl_test.cc index 6b55caf212881..6f3d6441baaf9 100644 --- a/test/common/router/retry_state_impl_test.cc +++ b/test/common/router/retry_state_impl_test.cc @@ -12,6 +12,7 @@ #include "test/mocks/stats/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/printers.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -126,7 +127,7 @@ class RouterRetryStateImplTest : public testing::Test { NiceMock cluster_; TestVirtualCluster virtual_cluster_; NiceMock runtime_; - NiceMock random_; + NiceMock random_; Event::MockDispatcher dispatcher_; Event::MockTimer* retry_timer_{}; RetryStatePtr state_; @@ -203,7 +204,8 @@ TEST_F(RouterRetryStateImplTest, Policy5xxRemote503Overloaded) { Http::TestResponseHeaderMapImpl response_headers{{":status", "503"}, {"x-envoy-overloaded", "true"}}; - EXPECT_EQ(RetryStatus::No, state_->shouldRetryHeaders(response_headers, callback_)); + expectTimerCreateAndEnable(); + EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_)); } TEST_F(RouterRetryStateImplTest, PolicyResourceExhaustedRemoteRateLimited) { @@ -216,6 +218,22 @@ TEST_F(RouterRetryStateImplTest, PolicyResourceExhaustedRemoteRateLimited) { EXPECT_EQ(RetryStatus::No, state_->shouldRetryHeaders(response_headers, callback_)); } +TEST_F(RouterRetryStateImplTest, PolicyEnvoyRateLimitedRemoteRateLimited) { + Http::TestRequestHeaderMapImpl request_headers{{"x-envoy-retry-on", "envoy-ratelimited"}}; + setup(request_headers); + EXPECT_TRUE(state_->enabled()); + + expectTimerCreateAndEnable(); + Http::TestResponseHeaderMapImpl response_headers{{":status", "429"}, + {"x-envoy-ratelimited", "true"}}; + EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_)); + EXPECT_CALL(callback_ready_, ready()); + retry_timer_->invokeCallback(); + + EXPECT_EQ(RetryStatus::NoRetryLimitExceeded, + state_->shouldRetryHeaders(response_headers, callback_)); +} + TEST_F(RouterRetryStateImplTest, PolicyGatewayErrorRemote502) { verifyPolicyWithRemoteResponse("gateway-error" /* retry_on */, "502" /* response_status */, false /* is_grpc */); @@ -759,9 +777,6 @@ TEST_F(RouterRetryStateImplTest, MaxRetriesHeader) { {"x-envoy-retry-grpc-on", "cancelled"}, {"x-envoy-max-retries", "3"}}; setup(request_headers); - EXPECT_FALSE(request_headers.has("x-envoy-retry-on")); - EXPECT_FALSE(request_headers.has("x-envoy-retry-grpc-on")); - EXPECT_FALSE(request_headers.has("x-envoy-max-retries")); EXPECT_TRUE(state_->enabled()); expectTimerCreateAndEnable(); @@ -935,9 +950,6 @@ TEST_F(RouterRetryStateImplTest, ZeroMaxRetriesHeader) { {"x-envoy-retry-grpc-on", "cancelled"}, {"x-envoy-max-retries", "0"}}; setup(request_headers); - EXPECT_FALSE(request_headers.has("x-envoy-retry-on")); - EXPECT_FALSE(request_headers.has("x-envoy-retry-grpc-on")); - EXPECT_FALSE(request_headers.has("x-envoy-max-retries")); EXPECT_TRUE(state_->enabled()); EXPECT_EQ(RetryStatus::NoRetryLimitExceeded, @@ -1126,6 +1138,104 @@ TEST_F(RouterRetryStateImplTest, ParseRetryGrpcOn) { EXPECT_FALSE(result.second); } +TEST_F(RouterRetryStateImplTest, RemoveAllRetryHeaders) { + // Make sure retry related headers are removed when the policy is enabled. + { + Http::TestRequestHeaderMapImpl request_headers{ + {"x-envoy-retry-on", "5xx,retriable-header-names,retriable-status-codes"}, + {"x-envoy-retry-grpc-on", "resource-exhausted"}, + {"x-envoy-retriable-header-names", "X-Upstream-Pushback"}, + {"x-envoy-retriable-status-codes", "418,420"}, + {"x-envoy-max-retries", "7"}, + {"x-envoy-hedge-on-per-try-timeout", "true"}, + {"x-envoy-upstream-rq-per-try-timeout-ms", "2"}, + }; + setup(request_headers); + EXPECT_TRUE(state_->enabled()); + + EXPECT_FALSE(request_headers.has("x-envoy-retry-on")); + EXPECT_FALSE(request_headers.has("x-envoy-retry-grpc-on")); + EXPECT_FALSE(request_headers.has("x-envoy-max-retries")); + EXPECT_FALSE(request_headers.has("x-envoy-retriable-header-names")); + EXPECT_FALSE(request_headers.has("x-envoy-retriable-status-codes")); + EXPECT_FALSE(request_headers.has("x-envoy-hedge-on-per-try-timeout")); + EXPECT_FALSE(request_headers.has("x-envoy-upstream-rq-per-try-timeout-ms")); + } + + // Make sure retry related headers are removed even if the policy is disabled. + { + Http::TestRequestHeaderMapImpl request_headers{ + {"x-envoy-retriable-header-names", "X-Upstream-Pushback"}, + {"x-envoy-retriable-status-codes", "418,420"}, + {"x-envoy-max-retries", "7"}, + {"x-envoy-hedge-on-per-try-timeout", "true"}, + {"x-envoy-upstream-rq-per-try-timeout-ms", "2"}, + }; + setup(request_headers); + EXPECT_EQ(nullptr, state_); + + EXPECT_FALSE(request_headers.has("x-envoy-retry-on")); + EXPECT_FALSE(request_headers.has("x-envoy-retry-grpc-on")); + EXPECT_FALSE(request_headers.has("x-envoy-max-retries")); + EXPECT_FALSE(request_headers.has("x-envoy-retriable-header-names")); + EXPECT_FALSE(request_headers.has("x-envoy-retriable-status-codes")); + EXPECT_FALSE(request_headers.has("x-envoy-hedge-on-per-try-timeout")); + EXPECT_FALSE(request_headers.has("x-envoy-upstream-rq-per-try-timeout-ms")); + } + + // Repeat policy is enabled case with runtime flag disabled. + { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.consume_all_retry_headers", "false"}}); + + Http::TestRequestHeaderMapImpl request_headers{ + {"x-envoy-retry-on", "5xx,retriable-header-names,retriable-status-codes"}, + {"x-envoy-retry-grpc-on", "resource-exhausted"}, + {"x-envoy-retriable-header-names", "X-Upstream-Pushback"}, + {"x-envoy-retriable-status-codes", "418,420"}, + {"x-envoy-max-retries", "7"}, + {"x-envoy-hedge-on-per-try-timeout", "true"}, + {"x-envoy-upstream-rq-per-try-timeout-ms", "2"}, + }; + setup(request_headers); + EXPECT_TRUE(state_->enabled()); + + EXPECT_FALSE(request_headers.has("x-envoy-retry-on")); + EXPECT_FALSE(request_headers.has("x-envoy-retry-grpc-on")); + EXPECT_FALSE(request_headers.has("x-envoy-max-retries")); + EXPECT_TRUE(request_headers.has("x-envoy-retriable-header-names")); + EXPECT_TRUE(request_headers.has("x-envoy-retriable-status-codes")); + EXPECT_TRUE(request_headers.has("x-envoy-hedge-on-per-try-timeout")); + EXPECT_TRUE(request_headers.has("x-envoy-upstream-rq-per-try-timeout-ms")); + } + + // Repeat policy is disabled case with runtime flag disabled. + { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.consume_all_retry_headers", "false"}}); + + Http::TestRequestHeaderMapImpl request_headers{ + {"x-envoy-retriable-header-names", "X-Upstream-Pushback"}, + {"x-envoy-retriable-status-codes", "418,420"}, + {"x-envoy-max-retries", "7"}, + {"x-envoy-hedge-on-per-try-timeout", "true"}, + {"x-envoy-upstream-rq-per-try-timeout-ms", "2"}, + }; + setup(request_headers); + EXPECT_EQ(nullptr, state_); + + EXPECT_FALSE(request_headers.has("x-envoy-retry-on")); + EXPECT_FALSE(request_headers.has("x-envoy-retry-grpc-on")); + EXPECT_FALSE(request_headers.has("x-envoy-max-retries")); + EXPECT_TRUE(request_headers.has("x-envoy-retriable-header-names")); + EXPECT_TRUE(request_headers.has("x-envoy-retriable-status-codes")); + EXPECT_TRUE(request_headers.has("x-envoy-hedge-on-per-try-timeout")); + EXPECT_TRUE(request_headers.has("x-envoy-upstream-rq-per-try-timeout-ms")); + } +} + } // namespace } // namespace Router } // namespace Envoy diff --git a/test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-4701452596674560.fuzz b/test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-4701452596674560.fuzz new file mode 100644 index 0000000000000..a147ab2392517 --- /dev/null +++ b/test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-4701452596674560.fuzz @@ -0,0 +1,10 @@ +config { + virtual_hosts { + name: "&\006\000\000\000" + domains: "-" + require_tls: ALL + response_headers_to_remove: "\0Ï3\022\362\211\245\247V\036" + request_headers_to_remove: "\003\022\360\234\254\265V\036" + } +} +random_value: 67070975 diff --git a/test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-4803620674732032.fuzz b/test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-4803620674732032.fuzz new file mode 100644 index 0000000000000..f47ad4226d392 --- /dev/null +++ b/test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-4803620674732032.fuzz @@ -0,0 +1,485 @@ +config { + virtual_hosts { + name: "/" + domains: "" + domains: "" + domains: "*" + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + name: "J" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + value: true + } + } + } + response_headers_to_remove: "\021" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\25537" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "W" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\020" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + value: true + } + } + } + response_headers_to_remove: "\021" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + response_headers_to_remove: "\022" + filter_action { + } + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + value: true + } + } + } + response_headers_to_remove: "" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + value: true + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + } + } + response_headers_to_remove: "\021" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\001" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\0s#" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + request_headers_to_remove: "J" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + } +} diff --git a/test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-5748492233605120 b/test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-5748492233605120 new file mode 100644 index 0000000000000..10c1b0a3d452e --- /dev/null +++ b/test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-5748492233605120 @@ -0,0 +1,30 @@ +config { + virtual_hosts { + name: "[" + domains: "bat.com" + routes { + match { + safe_regex { + google_re2 { + } + regex: "." + } + } + filter_action { + } + } + } +} +headers { + headers { + key: ":authority" + value: "bat.com" + } + headers { + key: ":path" + value: "b" + } + headers { + key: "x-forwarded-proto" + } +} diff --git a/test/common/router/route_corpus/internal_redirect_nullderef b/test/common/router/route_corpus/internal_redirect_nullderef new file mode 100644 index 0000000000000..962e3eb264e5a --- /dev/null +++ b/test/common/router/route_corpus/internal_redirect_nullderef @@ -0,0 +1,23 @@ +config { + virtual_hosts { + name: "q" + domains: "" + routes { + match { + path: "" + } + route { + cluster: "." + internal_redirect_policy { + predicates { + name: ":" + typed_config { + value: "-" + } + } + } + } + } + } +} +random_value: 1 \ No newline at end of file diff --git a/test/common/router/route_fuzz_test.cc b/test/common/router/route_fuzz_test.cc index 089424a2744da..17e6b532e36b6 100644 --- a/test/common/router/route_fuzz_test.cc +++ b/test/common/router/route_fuzz_test.cc @@ -7,7 +7,7 @@ #include "test/common/router/route_fuzz.pb.validate.h" #include "test/fuzz/fuzz_runner.h" #include "test/fuzz/utility.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" namespace Envoy { namespace Router { @@ -22,12 +22,7 @@ cleanRouteConfig(envoy::config::route::v3::RouteConfiguration route_config) { [](envoy::config::route::v3::VirtualHost& virtual_host) { auto routes = virtual_host.mutable_routes(); for (int i = 0; i < routes->size();) { - // Erase routes that use a regex matcher. This is deprecated and may cause - // crashes when wildcards are matched against very long headers. See - // https://github.com/envoyproxy/envoy/issues/7728. - if (routes->Get(i).match().path_specifier_case() == - envoy::config::route::v3::RouteMatch::PathSpecifierCase:: - kHiddenEnvoyDeprecatedRegex) { + if (routes->Get(i).has_filter_action()) { routes->erase(routes->begin() + i); } else { ++i; diff --git a/test/common/router/router_ratelimit_test.cc b/test/common/router/router_ratelimit_test.cc index 1205fe0707f5c..496c33633e8bb 100644 --- a/test/common/router/router_ratelimit_test.cc +++ b/test/common/router/router_ratelimit_test.cc @@ -15,7 +15,7 @@ #include "test/mocks/http/mocks.h" #include "test/mocks/ratelimit/mocks.h" #include "test/mocks/router/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/test_common/printers.h" #include "test/test_common/utility.h" @@ -28,15 +28,16 @@ namespace Envoy { namespace Router { namespace { -envoy::config::route::v3::RateLimit parseRateLimitFromV2Yaml(const std::string& yaml_string) { +envoy::config::route::v3::RateLimit parseRateLimitFromV3Yaml(const std::string& yaml_string, + bool avoid_boosting = true) { envoy::config::route::v3::RateLimit rate_limit; - TestUtility::loadFromYaml(yaml_string, rate_limit); + TestUtility::loadFromYaml(yaml_string, rate_limit, false, avoid_boosting); TestUtility::validate(rate_limit); return rate_limit; } TEST(BadRateLimitConfiguration, MissingActions) { - EXPECT_THROW_WITH_REGEX(parseRateLimitFromV2Yaml("{}"), EnvoyException, + EXPECT_THROW_WITH_REGEX(parseRateLimitFromV3Yaml("{}"), EnvoyException, "value must contain at least"); } @@ -46,7 +47,7 @@ TEST(BadRateLimitConfiguration, ActionsMissingRequiredFields) { - request_headers: {} )EOF"; - EXPECT_THROW_WITH_REGEX(parseRateLimitFromV2Yaml(yaml_one), EnvoyException, + EXPECT_THROW_WITH_REGEX(parseRateLimitFromV3Yaml(yaml_one), EnvoyException, "value length must be at least"); const std::string yaml_two = R"EOF( @@ -55,7 +56,7 @@ TEST(BadRateLimitConfiguration, ActionsMissingRequiredFields) { header_name: test )EOF"; - EXPECT_THROW_WITH_REGEX(parseRateLimitFromV2Yaml(yaml_two), EnvoyException, + EXPECT_THROW_WITH_REGEX(parseRateLimitFromV3Yaml(yaml_two), EnvoyException, "value length must be at least"); const std::string yaml_three = R"EOF( @@ -64,7 +65,7 @@ TEST(BadRateLimitConfiguration, ActionsMissingRequiredFields) { descriptor_key: test )EOF"; - EXPECT_THROW_WITH_REGEX(parseRateLimitFromV2Yaml(yaml_three), EnvoyException, + EXPECT_THROW_WITH_REGEX(parseRateLimitFromV3Yaml(yaml_three), EnvoyException, "value length must be at least"); } @@ -86,9 +87,10 @@ class RateLimitConfiguration : public testing::Test { NiceMock factory_context_; ProtobufMessage::NullValidationVisitorImpl any_validation_visitor_; std::unique_ptr config_; - Http::TestHeaderMapImpl header_; + Http::TestRequestHeaderMapImpl header_; const RouteEntry* route_; Network::Address::Ipv4Instance default_remote_address_{"10.0.0.1"}; + const envoy::config::core::v3::Metadata* dynamic_metadata_; }; TEST_F(RateLimitConfiguration, NoApplicableRateLimit) { @@ -169,7 +171,8 @@ TEST_F(RateLimitConfiguration, TestGetApplicationRateLimit) { std::vector descriptors; for (const RateLimitPolicyEntry& rate_limit : rate_limits) { - rate_limit.populateDescriptors(*route_, descriptors, "", header_, default_remote_address_); + rate_limit.populateDescriptors(*route_, descriptors, "", header_, default_remote_address_, + dynamic_metadata_); } EXPECT_THAT(std::vector({{{{"remote_address", "10.0.0.1"}}}}), testing::ContainerEq(descriptors)); @@ -202,7 +205,7 @@ TEST_F(RateLimitConfiguration, TestVirtualHost) { std::vector descriptors; for (const RateLimitPolicyEntry& rate_limit : rate_limits) { rate_limit.populateDescriptors(*route_, descriptors, "service_cluster", header_, - default_remote_address_); + default_remote_address_, dynamic_metadata_); } EXPECT_THAT(std::vector({{{{"destination_cluster", "www2test"}}}}), testing::ContainerEq(descriptors)); @@ -241,7 +244,7 @@ TEST_F(RateLimitConfiguration, Stages) { std::vector descriptors; for (const RateLimitPolicyEntry& rate_limit : rate_limits) { rate_limit.populateDescriptors(*route_, descriptors, "service_cluster", header_, - default_remote_address_); + default_remote_address_, dynamic_metadata_); } EXPECT_THAT(std::vector( {{{{"destination_cluster", "www2test"}}}, @@ -254,7 +257,7 @@ TEST_F(RateLimitConfiguration, Stages) { for (const RateLimitPolicyEntry& rate_limit : rate_limits) { rate_limit.populateDescriptors(*route_, descriptors, "service_cluster", header_, - default_remote_address_); + default_remote_address_, dynamic_metadata_); } EXPECT_THAT(std::vector({{{{"remote_address", "10.0.0.1"}}}}), testing::ContainerEq(descriptors)); @@ -266,15 +269,16 @@ TEST_F(RateLimitConfiguration, Stages) { class RateLimitPolicyEntryTest : public testing::Test { public: void setupTest(const std::string& yaml) { - rate_limit_entry_ = std::make_unique(parseRateLimitFromV2Yaml(yaml)); + rate_limit_entry_ = std::make_unique(parseRateLimitFromV3Yaml(yaml)); descriptors_.clear(); } std::unique_ptr rate_limit_entry_; - Http::TestHeaderMapImpl header_; + Http::TestRequestHeaderMapImpl header_; NiceMock route_; std::vector descriptors_; Network::Address::Ipv4Instance default_remote_address_{"10.0.0.1"}; + const envoy::config::core::v3::Metadata* dynamic_metadata_; }; TEST_F(RateLimitPolicyEntryTest, RateLimitPolicyEntryMembers) { @@ -299,8 +303,8 @@ TEST_F(RateLimitPolicyEntryTest, RemoteAddress) { setupTest(yaml); - rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, - default_remote_address_); + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + dynamic_metadata_); EXPECT_THAT(std::vector({{{{"remote_address", "10.0.0.1"}}}}), testing::ContainerEq(descriptors_)); } @@ -315,7 +319,8 @@ TEST_F(RateLimitPolicyEntryTest, PipeAddress) { setupTest(yaml); Network::Address::PipeInstance pipe_address("/hello"); - rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, pipe_address); + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, pipe_address, + dynamic_metadata_); EXPECT_TRUE(descriptors_.empty()); } @@ -328,7 +333,7 @@ TEST_F(RateLimitPolicyEntryTest, SourceService) { setupTest(yaml); rate_limit_entry_->populateDescriptors(route_, descriptors_, "service_cluster", header_, - default_remote_address_); + default_remote_address_, dynamic_metadata_); EXPECT_THAT( std::vector({{{{"source_cluster", "service_cluster"}}}}), testing::ContainerEq(descriptors_)); @@ -343,7 +348,7 @@ TEST_F(RateLimitPolicyEntryTest, DestinationService) { setupTest(yaml); rate_limit_entry_->populateDescriptors(route_, descriptors_, "service_cluster", header_, - default_remote_address_); + default_remote_address_, dynamic_metadata_); EXPECT_THAT( std::vector({{{{"destination_cluster", "fake_cluster"}}}}), testing::ContainerEq(descriptors_)); @@ -358,14 +363,61 @@ TEST_F(RateLimitPolicyEntryTest, RequestHeaders) { )EOF"; setupTest(yaml); - Http::TestHeaderMapImpl header{{"x-header-name", "test_value"}}; + Http::TestRequestHeaderMapImpl header{{"x-header-name", "test_value"}}; rate_limit_entry_->populateDescriptors(route_, descriptors_, "service_cluster", header, - default_remote_address_); + default_remote_address_, dynamic_metadata_); EXPECT_THAT(std::vector({{{{"my_header_name", "test_value"}}}}), testing::ContainerEq(descriptors_)); } +// Validate that a descriptor is added if the missing request header +// has skip_if_absent set to true +TEST_F(RateLimitPolicyEntryTest, RequestHeadersWithSkipIfAbsent) { + const std::string yaml = R"EOF( +actions: +- request_headers: + header_name: x-header-name + descriptor_key: my_header_name + skip_if_absent: false +- request_headers: + header_name: x-header + descriptor_key: my_header + skip_if_absent: true + )EOF"; + + setupTest(yaml); + Http::TestRequestHeaderMapImpl header{{"x-header-name", "test_value"}}; + + rate_limit_entry_->populateDescriptors(route_, descriptors_, "service_cluster", header, + default_remote_address_, dynamic_metadata_); + EXPECT_THAT(std::vector({{{{"my_header_name", "test_value"}}}}), + testing::ContainerEq(descriptors_)); +} + +// Tests if the descriptors are added if one of the headers is missing +// and skip_if_absent is set to default value which is false +TEST_F(RateLimitPolicyEntryTest, RequestHeadersWithDefaultSkipIfAbsent) { + const std::string yaml = R"EOF( +actions: +- request_headers: + header_name: x-header-name + descriptor_key: my_header_name + skip_if_absent: false +- request_headers: + header_name: x-header + descriptor_key: my_header + skip_if_absent: false + )EOF"; + + setupTest(yaml); + Http::TestRequestHeaderMapImpl header{{"x-header-test", "test_value"}}; + + rate_limit_entry_->populateDescriptors(route_, descriptors_, "service_cluster", header, + default_remote_address_, dynamic_metadata_); + EXPECT_TRUE(descriptors_.empty()); +} + TEST_F(RateLimitPolicyEntryTest, RequestHeadersNoMatch) { const std::string yaml = R"EOF( actions: @@ -375,10 +427,10 @@ TEST_F(RateLimitPolicyEntryTest, RequestHeadersNoMatch) { )EOF"; setupTest(yaml); - Http::TestHeaderMapImpl header{{"x-header-name", "test_value"}}; + Http::TestRequestHeaderMapImpl header{{"x-header-name", "test_value"}}; rate_limit_entry_->populateDescriptors(route_, descriptors_, "service_cluster", header, - default_remote_address_); + default_remote_address_, dynamic_metadata_); EXPECT_TRUE(descriptors_.empty()); } @@ -391,12 +443,199 @@ TEST_F(RateLimitPolicyEntryTest, RateLimitKey) { setupTest(yaml); - rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, - default_remote_address_); + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + dynamic_metadata_); EXPECT_THAT(std::vector({{{{"generic_key", "fake_key"}}}}), testing::ContainerEq(descriptors_)); } +TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataMatch) { + const std::string yaml = R"EOF( +actions: +- dynamic_metadata: + descriptor_key: fake_key + default_value: fake_value + metadata_key: + key: 'envoy.xxx' + path: + - key: test + - key: prop + )EOF"; + + setupTest(yaml); + + std::string metadata_yaml = R"EOF( +filter_metadata: + envoy.xxx: + test: + prop: foo + )EOF"; + + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(metadata_yaml, metadata); + + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + &metadata); + + EXPECT_THAT(std::vector({{{{"fake_key", "foo"}}}}), + testing::ContainerEq(descriptors_)); +} + +// Tests that the default_value is used in the descriptor when the metadata_key is empty. +TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataNoMatchWithDefaultValue) { + const std::string yaml = R"EOF( +actions: +- dynamic_metadata: + descriptor_key: fake_key + default_value: fake_value + metadata_key: + key: 'envoy.xxx' + path: + - key: test + - key: prop + )EOF"; + + setupTest(yaml); + + std::string metadata_yaml = R"EOF( +filter_metadata: + envoy.xxx: + another_key: + prop: foo + )EOF"; + + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(metadata_yaml, metadata); + + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + &metadata); + + EXPECT_THAT(std::vector({{{{"fake_key", "fake_value"}}}}), + testing::ContainerEq(descriptors_)); +} + +TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataNoMatch) { + const std::string yaml = R"EOF( +actions: +- dynamic_metadata: + descriptor_key: fake_key + metadata_key: + key: 'envoy.xxx' + path: + - key: test + - key: prop + )EOF"; + + setupTest(yaml); + + std::string metadata_yaml = R"EOF( +filter_metadata: + envoy.xxx: + another_key: + prop: foo + )EOF"; + + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(metadata_yaml, metadata); + + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + &metadata); + + EXPECT_TRUE(descriptors_.empty()); +} + +TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataEmptyValue) { + const std::string yaml = R"EOF( +actions: +- dynamic_metadata: + descriptor_key: fake_key + metadata_key: + key: 'envoy.xxx' + path: + - key: test + - key: prop + )EOF"; + + setupTest(yaml); + + std::string metadata_yaml = R"EOF( +filter_metadata: + envoy.xxx: + test: + prop: "" + )EOF"; + + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(metadata_yaml, metadata); + + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + &metadata); + + EXPECT_TRUE(descriptors_.empty()); +} +// Tests that no descriptor is generated when both the metadata_key and default_value are empty. +TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataAndDefaultValueEmpty) { + const std::string yaml = R"EOF( +actions: +- dynamic_metadata: + descriptor_key: fake_key + default_value: "" + metadata_key: + key: 'envoy.xxx' + path: + - key: test + - key: prop + )EOF"; + + setupTest(yaml); + + std::string metadata_yaml = R"EOF( +filter_metadata: + envoy.xxx: + another_key: + prop: "" + )EOF"; + + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(metadata_yaml, metadata); + + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + &metadata); + + EXPECT_TRUE(descriptors_.empty()); +} + +TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataNonStringMatch) { + const std::string yaml = R"EOF( +actions: +- dynamic_metadata: + descriptor_key: fake_key + metadata_key: + key: 'envoy.xxx' + path: + - key: test + - key: prop + )EOF"; + + setupTest(yaml); + + std::string metadata_yaml = R"EOF( +filter_metadata: + envoy.xxx: + test: + prop: + foo: bar + )EOF"; + + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(metadata_yaml, metadata); + + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + &metadata); + + EXPECT_TRUE(descriptors_.empty()); +} + TEST_F(RateLimitPolicyEntryTest, HeaderValueMatch) { const std::string yaml = R"EOF( actions: @@ -408,9 +647,10 @@ TEST_F(RateLimitPolicyEntryTest, HeaderValueMatch) { )EOF"; setupTest(yaml); - Http::TestHeaderMapImpl header{{"x-header-name", "test_value"}}; + Http::TestRequestHeaderMapImpl header{{"x-header-name", "test_value"}}; - rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header, default_remote_address_); + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header, default_remote_address_, + dynamic_metadata_); EXPECT_THAT(std::vector({{{{"header_match", "fake_value"}}}}), testing::ContainerEq(descriptors_)); } @@ -426,9 +666,10 @@ TEST_F(RateLimitPolicyEntryTest, HeaderValueMatchNoMatch) { )EOF"; setupTest(yaml); - Http::TestHeaderMapImpl header{{"x-header-name", "not_same_value"}}; + Http::TestRequestHeaderMapImpl header{{"x-header-name", "not_same_value"}}; - rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header, default_remote_address_); + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header, default_remote_address_, + dynamic_metadata_); EXPECT_TRUE(descriptors_.empty()); } @@ -444,9 +685,10 @@ TEST_F(RateLimitPolicyEntryTest, HeaderValueMatchHeadersNotPresent) { )EOF"; setupTest(yaml); - Http::TestHeaderMapImpl header{{"x-header-name", "not_same_value"}}; + Http::TestRequestHeaderMapImpl header{{"x-header-name", "not_same_value"}}; - rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header, default_remote_address_); + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header, default_remote_address_, + dynamic_metadata_); EXPECT_THAT(std::vector({{{{"header_match", "fake_value"}}}}), testing::ContainerEq(descriptors_)); } @@ -463,9 +705,10 @@ TEST_F(RateLimitPolicyEntryTest, HeaderValueMatchHeadersPresent) { )EOF"; setupTest(yaml); - Http::TestHeaderMapImpl header{{"x-header-name", "test_value"}}; + Http::TestRequestHeaderMapImpl header{{"x-header-name", "test_value"}}; - rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header, default_remote_address_); + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header, default_remote_address_, + dynamic_metadata_); EXPECT_TRUE(descriptors_.empty()); } @@ -479,7 +722,7 @@ TEST_F(RateLimitPolicyEntryTest, CompoundActions) { setupTest(yaml); rate_limit_entry_->populateDescriptors(route_, descriptors_, "service_cluster", header_, - default_remote_address_); + default_remote_address_, dynamic_metadata_); EXPECT_THAT( std::vector( {{{{"destination_cluster", "fake_cluster"}, {"source_cluster", "service_cluster"}}}}), @@ -500,10 +743,134 @@ TEST_F(RateLimitPolicyEntryTest, CompoundActionsNoDescriptor) { setupTest(yaml); rate_limit_entry_->populateDescriptors(route_, descriptors_, "service_cluster", header_, - default_remote_address_); + default_remote_address_, dynamic_metadata_); EXPECT_TRUE(descriptors_.empty()); } +TEST_F(RateLimitPolicyEntryTest, DynamicMetadataRateLimitOverride) { + const std::string yaml = R"EOF( +actions: +- generic_key: + descriptor_value: limited_fake_key +limit: + dynamic_metadata: + metadata_key: + key: test.filter.key + path: + - key: test + )EOF"; + + setupTest(yaml); + + std::string metadata_yaml = R"EOF( +filter_metadata: + test.filter.key: + test: + requests_per_unit: 42 + unit: HOUR + )EOF"; + + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(metadata_yaml, metadata); + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + &metadata); + EXPECT_THAT( + std::vector( + {{{{"generic_key", "limited_fake_key"}}, {{42, envoy::type::v3::RateLimitUnit::HOUR}}}}), + testing::ContainerEq(descriptors_)); +} + +TEST_F(RateLimitPolicyEntryTest, DynamicMetadataRateLimitOverrideNotFound) { + const std::string yaml = R"EOF( +actions: +- generic_key: + descriptor_value: limited_fake_key +limit: + dynamic_metadata: + metadata_key: + key: unknown.key + path: + - key: test + )EOF"; + + setupTest(yaml); + + std::string metadata_yaml = R"EOF( +filter_metadata: + test.filter.key: + test: + requests_per_unit: 42 + unit: HOUR + )EOF"; + + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(metadata_yaml, metadata); + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + &metadata); + EXPECT_THAT(std::vector({{{{"generic_key", "limited_fake_key"}}}}), + testing::ContainerEq(descriptors_)); +} + +TEST_F(RateLimitPolicyEntryTest, DynamicMetadataRateLimitOverrideWrongType) { + const std::string yaml = R"EOF( +actions: +- generic_key: + descriptor_value: limited_fake_key +limit: + dynamic_metadata: + metadata_key: + key: test.filter.key + path: + - key: test + )EOF"; + + setupTest(yaml); + + std::string metadata_yaml = R"EOF( +filter_metadata: + test.filter.key: + test: some_string + )EOF"; + + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(metadata_yaml, metadata); + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + &metadata); + EXPECT_THAT(std::vector({{{{"generic_key", "limited_fake_key"}}}}), + testing::ContainerEq(descriptors_)); +} + +TEST_F(RateLimitPolicyEntryTest, DynamicMetadataRateLimitOverrideWrongUnit) { + const std::string yaml = R"EOF( +actions: +- generic_key: + descriptor_value: limited_fake_key +limit: + dynamic_metadata: + metadata_key: + key: test.filter.key + path: + - key: test + )EOF"; + + setupTest(yaml); + + std::string metadata_yaml = R"EOF( +filter_metadata: + test.filter.key: + test: + requests_per_unit: 42 + unit: NOT_A_UNIT + )EOF"; + + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(metadata_yaml, metadata); + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + &metadata); + EXPECT_THAT(std::vector({{{{"generic_key", "limited_fake_key"}}}}), + testing::ContainerEq(descriptors_)); +} + } // namespace } // namespace Router } // namespace Envoy diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index d5395ebd3d25c..0e829d87914ef 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -5,6 +5,8 @@ #include "envoy/config/core/v3/base.pb.h" #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" +#include "envoy/extensions/upstreams/http/http/v3/http_connection_pool.pb.h" +#include "envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.pb.h" #include "envoy/type/v3/percent.pb.h" #include "common/buffer/buffer_impl.h" @@ -68,7 +70,7 @@ class RouterTestFilter : public Filter { // Filter RetryStatePtr createRetryState(const RetryPolicy&, Http::RequestHeaderMap&, const Upstream::ClusterInfo&, const VirtualCluster*, - Runtime::Loader&, Runtime::RandomGenerator&, Event::Dispatcher&, + Runtime::Loader&, Random::RandomGenerator&, Event::Dispatcher&, Upstream::ResourcePriority) override { EXPECT_EQ(nullptr, retry_state_); retry_state_ = new NiceMock(); @@ -125,6 +127,12 @@ class RouterTestBase : public testing::Test { EXPECT_CALL(*per_try_timeout_, disableTimer()); } + void expectMaxStreamDurationTimerCreate() { + max_stream_duration_timer_ = new Event::MockTimer(&callbacks_.dispatcher_); + EXPECT_CALL(*max_stream_duration_timer_, enableTimer(_, _)); + EXPECT_CALL(*max_stream_duration_timer_, disableTimer()); + } + AssertionResult verifyHostUpstreamStats(uint64_t success, uint64_t error) { if (success != cm_.conn_pool_.host_->stats_.rq_success_.value()) { return AssertionFailure() << fmt::format("rq_success {} does not match expected {}", @@ -168,9 +176,9 @@ class RouterTestBase : public testing::Test { } EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _)) - .WillOnce( - Invoke([&](const std::string&, Upstream::ResourcePriority, Http::Protocol, - Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* { + .WillOnce(Invoke( + [&](const std::string&, Upstream::ResourcePriority, absl::optional, + Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* { auto match = context->metadataMatchCriteria()->metadataMatchCriteria(); EXPECT_EQ(match.size(), 2); auto it = match.begin(); @@ -201,7 +209,7 @@ class RouterTestBase : public testing::Test { router_.decodeHeaders(headers, true); // When the router filter gets reset we should cancel the pool request. - EXPECT_CALL(cancellable_, cancel()); + EXPECT_CALL(cancellable_, cancel(_)); router_.onDestroy(); } @@ -219,11 +227,10 @@ class RouterTestBase : public testing::Test { } router_.decodeHeaders(headers, true); - EXPECT_EQ(expected_count, - atoi(std::string(headers.EnvoyAttemptCount()->value().getStringView()).c_str())); + EXPECT_EQ(expected_count, atoi(std::string(headers.getEnvoyAttemptCountValue()).c_str())); // When the router filter gets reset we should cancel the pool request. - EXPECT_CALL(cancellable_, cancel()); + EXPECT_CALL(cancellable_, cancel(_)); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(0U, @@ -261,9 +268,7 @@ class RouterTestBase : public testing::Test { EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200)); EXPECT_CALL(callbacks_, encodeHeaders_(_, true)) .WillOnce(Invoke([expected_count](Http::ResponseHeaderMap& headers, bool) { - EXPECT_EQ( - expected_count, - atoi(std::string(headers.EnvoyAttemptCount()->value().getStringView()).c_str())); + EXPECT_EQ(expected_count, atoi(std::string(headers.getEnvoyAttemptCountValue()).c_str())); })); response_decoder->decodeHeaders(std::move(response_headers), true); EXPECT_TRUE(verifyHostUpstreamStats(1, 0)); @@ -288,16 +293,18 @@ class RouterTestBase : public testing::Test { router_.decodeHeaders(default_request_headers_, end_stream); } - void enableRedirects() { - ON_CALL(callbacks_.route_->route_entry_, internalRedirectAction()) - .WillByDefault(Return(InternalRedirectAction::Handle)); - ON_CALL(callbacks_, connection()).WillByDefault(Return(&connection_)); - setMaxInternalRedirects(1); - } - - void setMaxInternalRedirects(uint32_t max_internal_redirects) { - ON_CALL(callbacks_.route_->route_entry_, maxInternalRedirects()) + void enableRedirects(uint32_t max_internal_redirects = 1) { + ON_CALL(callbacks_.route_->route_entry_.internal_redirect_policy_, enabled()) + .WillByDefault(Return(true)); + ON_CALL(callbacks_.route_->route_entry_.internal_redirect_policy_, + shouldRedirectForResponseCode(_)) + .WillByDefault(Return(true)); + ON_CALL(callbacks_.route_->route_entry_.internal_redirect_policy_, maxInternalRedirects()) .WillByDefault(Return(max_internal_redirects)); + ON_CALL(callbacks_.route_->route_entry_.internal_redirect_policy_, + isCrossSchemeRedirectAllowed()) + .WillByDefault(Return(false)); + ON_CALL(callbacks_, connection()).WillByDefault(Return(&connection_)); } void setNumPreviousRedirect(uint32_t num_previous_redirects) { @@ -317,6 +324,13 @@ class RouterTestBase : public testing::Test { .WillByDefault(Return(include)); } + void setUpstreamMaxStreamDuration(uint32_t seconds) { + common_http_protocol_options_.mutable_max_stream_duration()->MergeFrom( + ProtobufUtil::TimeUtil::MillisecondsToDuration(seconds)); + ON_CALL(cm_.conn_pool_.host_->cluster_, commonHttpProtocolOptions()) + .WillByDefault(ReturnRef(common_http_protocol_options_)); + } + void enableHedgeOnPerTryTimeout() { callbacks_.route_->route_entry_.hedge_policy_.hedge_on_per_try_timeout_ = true; callbacks_.route_->route_entry_.hedge_policy_.additional_request_chance_ = @@ -334,11 +348,12 @@ class RouterTestBase : public testing::Test { Event::SimulatedTimeSystem test_time_; std::string upstream_zone_{"to_az"}; envoy::config::core::v3::Locality upstream_locality_; + envoy::config::core::v3::HttpProtocolOptions common_http_protocol_options_; NiceMock stats_store_; NiceMock cm_; NiceMock runtime_; - NiceMock random_; - Http::ConnectionPool::MockCancellable cancellable_; + NiceMock random_; + Envoy::ConnectionPool::MockCancellable cancellable_; Http::ContextImpl http_context_; NiceMock callbacks_; MockShadowWriter* shadow_writer_; @@ -347,6 +362,7 @@ class RouterTestBase : public testing::Test { RouterTestFilter router_; Event::MockTimer* response_timeout_{}; Event::MockTimer* per_try_timeout_{}; + Event::MockTimer* max_stream_duration_timer_{}; Network::Address::InstanceConstSharedPtr host_address_{ Network::Utility::resolveUrl("tcp://10.0.0.5:9211")}; NiceMock original_encoder_; @@ -395,7 +411,7 @@ TEST_F(RouterTest, UpdateServerNameFilterState) { stream_info.filterState() ->getDataReadOnly(Network::UpstreamServerName::key()) .value()); - EXPECT_CALL(cancellable_, cancel()); + EXPECT_CALL(cancellable_, cancel(_)); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(0U, @@ -423,7 +439,7 @@ TEST_F(RouterTest, UpdateSubjectAltNamesFilterState) { ->getDataReadOnly( Network::UpstreamSubjectAltNames::key()) .value()[0]); - EXPECT_CALL(cancellable_, cancel()); + EXPECT_CALL(cancellable_, cancel(_)); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(0U, @@ -467,12 +483,12 @@ TEST_F(RouterTest, PoolFailureWithPriority) { .WillOnce(Invoke([&](Http::StreamDecoder&, Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::RemoteConnectionFailure, - absl::string_view(), cm_.conn_pool_.host_); + "tls version mismatch", cm_.conn_pool_.host_); return nullptr; })); Http::TestResponseHeaderMapImpl response_headers{ - {":status", "503"}, {"content-length", "91"}, {"content-type", "text/plain"}}; + {":status", "503"}, {"content-length", "139"}, {"content-type", "text/plain"}}; EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false)); EXPECT_CALL(callbacks_, encodeData(_, true)); EXPECT_CALL(callbacks_.stream_info_, @@ -489,14 +505,12 @@ TEST_F(RouterTest, PoolFailureWithPriority) { // Pool failure, so upstream request was not initiated. EXPECT_EQ(0U, callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); - EXPECT_EQ(callbacks_.details_, "upstream_reset_before_response_started{connection failure}"); + EXPECT_EQ(callbacks_.details_, + "upstream_reset_before_response_started{connection failure,tls version mismatch}"); } TEST_F(RouterTest, Http1Upstream) { - EXPECT_CALL(*cm_.thread_local_cluster_.cluster_.info_, upstreamHttpProtocol(_)) - .WillOnce(Return(Http::Protocol::Http11)); - - EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, Http::Protocol::Http11, _)); + EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, absl::optional(), _)); EXPECT_CALL(cm_.conn_pool_, newStream(_, _)).WillOnce(Return(&cancellable_)); expectResponseTimerCreate(); @@ -508,7 +522,7 @@ TEST_F(RouterTest, Http1Upstream) { EXPECT_EQ("10", headers.get_("x-envoy-expected-rq-timeout-ms")); // When the router filter gets reset we should cancel the pool request. - EXPECT_CALL(cancellable_, cancel()); + EXPECT_CALL(cancellable_, cancel(_)); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(0U, @@ -519,10 +533,7 @@ TEST_F(RouterTest, Http1Upstream) { // x-envoy-original-path in the basic upstream test when Envoy header // suppression is configured. TEST_F(RouterTestSuppressEnvoyHeaders, Http1Upstream) { - EXPECT_CALL(*cm_.thread_local_cluster_.cluster_.info_, upstreamHttpProtocol(_)) - .WillOnce(Return(Http::Protocol::Http11)); - - EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, Http::Protocol::Http11, _)); + EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, absl::optional(), _)); EXPECT_CALL(cm_.conn_pool_, newStream(_, _)).WillOnce(Return(&cancellable_)); expectResponseTimerCreate(); @@ -533,7 +544,7 @@ TEST_F(RouterTestSuppressEnvoyHeaders, Http1Upstream) { EXPECT_FALSE(headers.has("x-envoy-expected-rq-timeout-ms")); // When the router filter gets reset we should cancel the pool request. - EXPECT_CALL(cancellable_, cancel()); + EXPECT_CALL(cancellable_, cancel(_)); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(0U, @@ -541,10 +552,7 @@ TEST_F(RouterTestSuppressEnvoyHeaders, Http1Upstream) { } TEST_F(RouterTest, Http2Upstream) { - EXPECT_CALL(*cm_.thread_local_cluster_.cluster_.info_, upstreamHttpProtocol(_)) - .WillOnce(Return(Http::Protocol::Http2)); - - EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, Http::Protocol::Http2, _)); + EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, absl::optional(), _)); EXPECT_CALL(cm_.conn_pool_, newStream(_, _)).WillOnce(Return(&cancellable_)); expectResponseTimerCreate(); @@ -554,7 +562,7 @@ TEST_F(RouterTest, Http2Upstream) { router_.decodeHeaders(headers, true); // When the router filter gets reset we should cancel the pool request. - EXPECT_CALL(cancellable_, cancel()); + EXPECT_CALL(cancellable_, cancel(_)); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(0U, @@ -568,7 +576,7 @@ TEST_F(RouterTest, HashPolicy) { .WillOnce(Return(absl::optional(10))); EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _)) .WillOnce( - Invoke([&](const std::string&, Upstream::ResourcePriority, Http::Protocol, + Invoke([&](const std::string&, Upstream::ResourcePriority, absl::optional, Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* { EXPECT_EQ(10UL, context->computeHashKey().value()); return &cm_.conn_pool_; @@ -581,7 +589,7 @@ TEST_F(RouterTest, HashPolicy) { router_.decodeHeaders(headers, true); // When the router filter gets reset we should cancel the pool request. - EXPECT_CALL(cancellable_, cancel()); + EXPECT_CALL(cancellable_, cancel(_)); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(0U, @@ -595,7 +603,7 @@ TEST_F(RouterTest, HashPolicyNoHash) { .WillOnce(Return(absl::optional())); EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, &router_)) .WillOnce( - Invoke([&](const std::string&, Upstream::ResourcePriority, Http::Protocol, + Invoke([&](const std::string&, Upstream::ResourcePriority, absl::optional, Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* { EXPECT_FALSE(context->computeHashKey()); return &cm_.conn_pool_; @@ -608,7 +616,7 @@ TEST_F(RouterTest, HashPolicyNoHash) { router_.decodeHeaders(headers, true); // When the router filter gets reset we should cancel the pool request. - EXPECT_CALL(cancellable_, cancel()); + EXPECT_CALL(cancellable_, cancel(_)); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(0U, @@ -637,7 +645,7 @@ TEST_F(RouterTest, AddCookie) { EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _)) .WillOnce( - Invoke([&](const std::string&, Upstream::ResourcePriority, Http::Protocol, + Invoke([&](const std::string&, Upstream::ResourcePriority, absl::optional, Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* { EXPECT_EQ(10UL, context->computeHashKey().value()); return &cm_.conn_pool_; @@ -689,7 +697,7 @@ TEST_F(RouterTest, AddCookieNoDuplicate) { EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _)) .WillOnce( - Invoke([&](const std::string&, Upstream::ResourcePriority, Http::Protocol, + Invoke([&](const std::string&, Upstream::ResourcePriority, absl::optional, Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* { EXPECT_EQ(10UL, context->computeHashKey().value()); return &cm_.conn_pool_; @@ -706,7 +714,8 @@ TEST_F(RouterTest, AddCookieNoDuplicate) { EXPECT_CALL(callbacks_, encodeHeaders_(_, _)) .WillOnce(Invoke([&](const Http::HeaderMap& headers, const bool) -> void { - EXPECT_EQ(headers.get(Http::Headers::get().SetCookie)->value().getStringView(), "foo=baz"); + EXPECT_EQ(std::string{headers.get(Http::Headers::get().SetCookie)->value().getStringView()}, + "foo=baz"); })); expectResponseTimerCreate(); @@ -738,7 +747,7 @@ TEST_F(RouterTest, AddMultipleCookies) { EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _)) .WillOnce( - Invoke([&](const std::string&, Upstream::ResourcePriority, Http::Protocol, + Invoke([&](const std::string&, Upstream::ResourcePriority, absl::optional, Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* { EXPECT_EQ(10UL, context->computeHashKey().value()); return &cm_.conn_pool_; @@ -760,15 +769,12 @@ TEST_F(RouterTest, AddMultipleCookies) { EXPECT_CALL(cb, Call("foo=\"" + foo_c + "\"; Max-Age=1337; Path=/path; HttpOnly")); EXPECT_CALL(cb, Call("choco=\"" + choco_c + "\"; Max-Age=15; HttpOnly")); - headers.iterate( - [](const Http::HeaderEntry& header, void* context) -> Http::HeaderMap::Iterate { - if (header.key() == Http::Headers::get().SetCookie.get()) { - static_cast*>(context)->Call( - std::string(header.value().getStringView())); - } - return Http::HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate([&cb](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + if (header.key() == Http::Headers::get().SetCookie.get()) { + cb.Call(std::string(header.value().getStringView())); + } + return Http::HeaderMap::Iterate::Continue; + }); })); expectResponseTimerCreate(); @@ -789,7 +795,7 @@ TEST_F(RouterTest, MetadataMatchCriteria) { .WillByDefault(Return(&callbacks_.route_->route_entry_.metadata_matches_criteria_)); EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _)) .WillOnce( - Invoke([&](const std::string&, Upstream::ResourcePriority, Http::Protocol, + Invoke([&](const std::string&, Upstream::ResourcePriority, absl::optional, Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* { EXPECT_EQ(context->metadataMatchCriteria(), &callbacks_.route_->route_entry_.metadata_matches_criteria_); @@ -803,7 +809,7 @@ TEST_F(RouterTest, MetadataMatchCriteria) { router_.decodeHeaders(headers, true); // When the router filter gets reset we should cancel the pool request. - EXPECT_CALL(cancellable_, cancel()); + EXPECT_CALL(cancellable_, cancel(_)); router_.onDestroy(); } @@ -819,7 +825,7 @@ TEST_F(RouterTest, NoMetadataMatchCriteria) { ON_CALL(callbacks_.route_->route_entry_, metadataMatchCriteria()).WillByDefault(Return(nullptr)); EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _)) .WillOnce( - Invoke([&](const std::string&, Upstream::ResourcePriority, Http::Protocol, + Invoke([&](const std::string&, Upstream::ResourcePriority, absl::optional, Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* { EXPECT_EQ(context->metadataMatchCriteria(), nullptr); return &cm_.conn_pool_; @@ -832,7 +838,7 @@ TEST_F(RouterTest, NoMetadataMatchCriteria) { router_.decodeHeaders(headers, true); // When the router filter gets reset we should cancel the pool request. - EXPECT_CALL(cancellable_, cancel()); + EXPECT_CALL(cancellable_, cancel(_)); router_.onDestroy(); } @@ -845,7 +851,7 @@ TEST_F(RouterTest, CancelBeforeBoundToPool) { router_.decodeHeaders(headers, true); // When the router filter gets reset we should cancel the pool request. - EXPECT_CALL(cancellable_, cancel()); + EXPECT_CALL(cancellable_, cancel(_)); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(0U, @@ -1020,7 +1026,7 @@ TEST_F(RouterTest, EnvoyAttemptCountInRequestUpdatedInRetries) { callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // Initial request has 1 attempt. - EXPECT_EQ(1, atoi(std::string(headers.EnvoyAttemptCount()->value().getStringView()).c_str())); + EXPECT_EQ(1, atoi(std::string(headers.getEnvoyAttemptCountValue()).c_str())); // 5xx response. router_.retry_state_->expectHeadersRetry(); @@ -1046,7 +1052,7 @@ TEST_F(RouterTest, EnvoyAttemptCountInRequestUpdatedInRetries) { callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // The retry should cause the header to increase to 2. - EXPECT_EQ(2, atoi(std::string(headers.EnvoyAttemptCount()->value().getStringView()).c_str())); + EXPECT_EQ(2, atoi(std::string(headers.getEnvoyAttemptCountValue()).c_str())); // Normal response. EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No)); @@ -1192,8 +1198,7 @@ TEST_F(RouterTest, EnvoyAttemptCountInResponseWithRetries) { EXPECT_CALL(callbacks_, encodeHeaders_(_, true)) .WillOnce(Invoke([](Http::ResponseHeaderMap& headers, bool) { // Because a retry happened the number of attempts in the response headers should be 2. - EXPECT_EQ(2, - atoi(std::string(headers.EnvoyAttemptCount()->value().getStringView()).c_str())); + EXPECT_EQ(2, atoi(std::string(headers.getEnvoyAttemptCountValue()).c_str())); })); response_decoder->decodeHeaders(std::move(response_headers2), true); EXPECT_TRUE(verifyHostUpstreamStats(1, 1)); @@ -1423,8 +1428,8 @@ TEST_F(RouterTestSuppressEnvoyHeaders, EnvoyUpstreamServiceTime) { Http::ResponseHeaderMapPtr response_headers( new Http::TestResponseHeaderMapImpl{{":status", "200"}}); EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200)); - Http::TestHeaderMapImpl downstream_response_headers{{":status", "200"}, - {"x-envoy-upstream-service-time", "0"}}; + Http::TestResponseHeaderMapImpl downstream_response_headers{ + {":status", "200"}, {"x-envoy-upstream-service-time", "0"}}; EXPECT_CALL(callbacks_, encodeHeaders_(_, true)) .WillOnce(Invoke([](Http::HeaderMap& headers, bool) { EXPECT_EQ(nullptr, headers.get(Http::Headers::get().EnvoyUpstreamServiceTime)); @@ -3747,10 +3752,13 @@ TEST_F(RouterTest, RetryUpstreamResetResponseStarted) { new Http::TestResponseHeaderMapImpl{{":status", "200"}}); EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200)); response_decoder->decodeHeaders(std::move(response_headers), false); - absl::string_view rc_details2 = "upstream_reset_after_response_started{remote reset}"; - EXPECT_CALL(callbacks_.stream_info_, setResponseCodeDetails(rc_details2)); EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _)); + // Normally, sendLocalReply will actually send the reply, but in this case the + // HCM will detect the headers have already been sent and not route through + // the encoder again. + EXPECT_CALL(callbacks_, sendLocalReply(_, _, _, _, _)).WillOnce(testing::InvokeWithoutArgs([] { + })); encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset); // For normal HTTP, once we have a 200 we consider this a success, even if a // later reset occurs. @@ -3759,6 +3767,58 @@ TEST_F(RouterTest, RetryUpstreamResetResponseStarted) { callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); } +// The router filter is responsible for not propagating 100-continue headers after the initial 100. +TEST_F(RouterTest, Coalesce100ContinueHeaders) { + // Setup. + NiceMock encoder1; + Http::ResponseDecoder* response_decoder = nullptr; + EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke( + [&](Http::ResponseDecoder& decoder, + Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { + response_decoder = &decoder; + callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_); + return nullptr; + })); + expectResponseTimerCreate(); + + Http::TestRequestHeaderMapImpl headers; + HttpTestUtility::addDefaultHeaders(headers); + router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); + + // Initial 100-continue, this is processed normally. + EXPECT_CALL(callbacks_, encode100ContinueHeaders_(_)); + { + Http::ResponseHeaderMapPtr continue_headers( + new Http::TestResponseHeaderMapImpl{{":status", "100"}}); + response_decoder->decode100ContinueHeaders(std::move(continue_headers)); + } + EXPECT_EQ( + 1U, + cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter("upstream_rq_100").value()); + + // No encode100ContinueHeaders() invocation for the second 100-continue (but we continue to track + // stats from upstream). + EXPECT_CALL(callbacks_, encode100ContinueHeaders_(_)).Times(0); + { + Http::ResponseHeaderMapPtr continue_headers( + new Http::TestResponseHeaderMapImpl{{":status", "100"}}); + response_decoder->decode100ContinueHeaders(std::move(continue_headers)); + } + EXPECT_EQ( + 2U, + cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter("upstream_rq_100").value()); + + // Reset stream and cleanup. + EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, + putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _)); + encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); +} + TEST_F(RouterTest, RetryUpstreamReset100ContinueResponseStarted) { NiceMock encoder1; Http::ResponseDecoder* response_decoder = nullptr; @@ -3888,6 +3948,145 @@ TEST_F(RouterTest, RetryTimeoutDuringRetryDelay) { EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); } +TEST_F(RouterTest, MaxStreamDurationValidlyConfiguredWithoutRetryPolicy) { + NiceMock encoder1; + Http::ResponseDecoder* response_decoder = nullptr; + setUpstreamMaxStreamDuration(500); + EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke( + [&](Http::ResponseDecoder& decoder, + Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { + response_decoder = &decoder; + callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_); + return nullptr; + })); + expectMaxStreamDurationTimerCreate(); + + Http::TestRequestHeaderMapImpl headers; + HttpTestUtility::addDefaultHeaders(headers); + router_.decodeHeaders(headers, false); + max_stream_duration_timer_->invokeCallback(); + + router_.onDestroy(); + EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); +} + +TEST_F(RouterTest, MaxStreamDurationDisabledIfSetToZero) { + NiceMock encoder1; + Http::ResponseDecoder* response_decoder = nullptr; + setUpstreamMaxStreamDuration(0); + EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke( + [&](Http::ResponseDecoder& decoder, + Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { + response_decoder = &decoder; + callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_); + return nullptr; + })); + + // not to be called timer creation. + EXPECT_CALL(callbacks_.dispatcher_, createTimer_).Times(0); + + Http::TestRequestHeaderMapImpl headers; + HttpTestUtility::addDefaultHeaders(headers); + router_.decodeHeaders(headers, false); + + router_.onDestroy(); + EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); +} + +TEST_F(RouterTest, MaxStreamDurationCallbackNotCalled) { + NiceMock encoder1; + Http::ResponseDecoder* response_decoder = nullptr; + setUpstreamMaxStreamDuration(5000); + EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke( + [&](Http::ResponseDecoder& decoder, + Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { + response_decoder = &decoder; + callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_); + return nullptr; + })); + expectMaxStreamDurationTimerCreate(); + + Http::TestRequestHeaderMapImpl headers; + HttpTestUtility::addDefaultHeaders(headers); + router_.decodeHeaders(headers, false); + + router_.onDestroy(); + EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); +} + +TEST_F(RouterTest, MaxStreamDurationWhenDownstreamAlreadyStartedWithoutRetryPolicy) { + NiceMock encoder1; + Http::ResponseDecoder* response_decoder = nullptr; + setUpstreamMaxStreamDuration(500); + EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke( + [&](Http::ResponseDecoder& decoder, + Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { + response_decoder = &decoder; + callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_); + return nullptr; + })); + expectMaxStreamDurationTimerCreate(); + + Http::TestRequestHeaderMapImpl headers; + HttpTestUtility::addDefaultHeaders(headers); + router_.decodeHeaders(headers, false); + Http::ResponseHeaderMapPtr response_headers( + new Http::TestResponseHeaderMapImpl{{":status", "200"}}); + response_decoder->decodeHeaders(std::move(response_headers), false); + max_stream_duration_timer_->invokeCallback(); + + router_.onDestroy(); + EXPECT_TRUE(verifyHostUpstreamStats(1, 0)); +} + +TEST_F(RouterTest, MaxStreamDurationWithRetryPolicy) { + // First upstream request + NiceMock encoder1; + Http::ResponseDecoder* response_decoder = nullptr; + setUpstreamMaxStreamDuration(500); + EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke( + [&](Http::ResponseDecoder& decoder, + Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { + response_decoder = &decoder; + callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_); + return nullptr; + })); + expectMaxStreamDurationTimerCreate(); + + Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "reset"}, + {"x-envoy-internal", "true"}}; + HttpTestUtility::addDefaultHeaders(headers); + router_.decodeHeaders(headers, false); + + router_.retry_state_->expectResetRetry(); + max_stream_duration_timer_->invokeCallback(); + + // Second upstream request + NiceMock encoder2; + setUpstreamMaxStreamDuration(500); + EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke( + [&](Http::ResponseDecoder& decoder, + Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { + response_decoder = &decoder; + callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_); + return nullptr; + })); + expectMaxStreamDurationTimerCreate(); + router_.retry_state_->callback_(); + + EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No)); + Http::ResponseHeaderMapPtr response_headers( + new Http::TestResponseHeaderMapImpl{{":status", "200"}}); + response_decoder->decodeHeaders(std::move(response_headers), true); + EXPECT_TRUE(verifyHostUpstreamStats(1, 1)); +} + TEST_F(RouterTest, RetryTimeoutDuringRetryDelayWithUpstreamRequestNoHost) { NiceMock encoder1; Http::ResponseDecoder* response_decoder = nullptr; @@ -3915,7 +4114,7 @@ TEST_F(RouterTest, RetryTimeoutDuringRetryDelayWithUpstreamRequestNoHost) { response_decoder->decodeHeaders(std::move(response_headers1), true); EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); - Http::ConnectionPool::MockCancellable cancellable; + Envoy::ConnectionPool::MockCancellable cancellable; EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) .WillOnce(Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks&) -> Http::ConnectionPool::Cancellable* { @@ -3925,7 +4124,7 @@ TEST_F(RouterTest, RetryTimeoutDuringRetryDelayWithUpstreamRequestNoHost) { router_.retry_state_->callback_(); // Fire timeout. - EXPECT_CALL(cancellable, cancel()); + EXPECT_CALL(cancellable, cancel(_)); EXPECT_CALL(callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout)); @@ -3971,7 +4170,7 @@ TEST_F(RouterTest, RetryTimeoutDuringRetryDelayWithUpstreamRequestNoHostAltRespo response_decoder->decodeHeaders(std::move(response_headers1), true); EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); - Http::ConnectionPool::MockCancellable cancellable; + Envoy::ConnectionPool::MockCancellable cancellable; EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) .WillOnce(Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks&) -> Http::ConnectionPool::Cancellable* { @@ -3981,7 +4180,7 @@ TEST_F(RouterTest, RetryTimeoutDuringRetryDelayWithUpstreamRequestNoHostAltRespo router_.retry_state_->callback_(); // Fire timeout. - EXPECT_CALL(cancellable, cancel()); + EXPECT_CALL(cancellable, cancel(_)); EXPECT_CALL(callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout)); @@ -4279,11 +4478,12 @@ TEST_F(RouterTest, RetryRespectsRetryHostPredicate) { } TEST_F(RouterTest, InternalRedirectRejectedWhenReachingMaxInternalRedirect) { - enableRedirects(); - setMaxInternalRedirects(3); + enableRedirects(3); setNumPreviousRedirect(3); sendRequest(); + EXPECT_CALL(callbacks_, recreateStream()).Times(0); + response_decoder_->decodeHeaders(std::move(redirect_headers_), false); Buffer::OwnedImpl data("1234567890"); @@ -4291,6 +4491,8 @@ TEST_F(RouterTest, InternalRedirectRejectedWhenReachingMaxInternalRedirect) { EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ .counter("upstream_internal_redirect_failed_total") .value()); + EXPECT_EQ(1UL, + stats_store_.counter("test.passthrough_internal_redirect_too_many_redirects").value()); } TEST_F(RouterTest, InternalRedirectRejectedWithEmptyLocation) { @@ -4298,6 +4500,9 @@ TEST_F(RouterTest, InternalRedirectRejectedWithEmptyLocation) { sendRequest(); redirect_headers_->setLocation(""); + + EXPECT_CALL(callbacks_, recreateStream()).Times(0); + response_decoder_->decodeHeaders(std::move(redirect_headers_), false); Buffer::OwnedImpl data("1234567890"); @@ -4305,6 +4510,7 @@ TEST_F(RouterTest, InternalRedirectRejectedWithEmptyLocation) { EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ .counter("upstream_internal_redirect_failed_total") .value()); + EXPECT_EQ(1UL, stats_store_.counter("test.passthrough_internal_redirect_bad_location").value()); } TEST_F(RouterTest, InternalRedirectRejectedWithInvalidLocation) { @@ -4312,6 +4518,9 @@ TEST_F(RouterTest, InternalRedirectRejectedWithInvalidLocation) { sendRequest(); redirect_headers_->setLocation("h"); + + EXPECT_CALL(callbacks_, recreateStream()).Times(0); + response_decoder_->decodeHeaders(std::move(redirect_headers_), false); Buffer::OwnedImpl data("1234567890"); @@ -4319,6 +4528,7 @@ TEST_F(RouterTest, InternalRedirectRejectedWithInvalidLocation) { EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ .counter("upstream_internal_redirect_failed_total") .value()); + EXPECT_EQ(1UL, stats_store_.counter("test.passthrough_internal_redirect_bad_location").value()); } TEST_F(RouterTest, InternalRedirectRejectedWithoutCompleteRequest) { @@ -4326,6 +4536,8 @@ TEST_F(RouterTest, InternalRedirectRejectedWithoutCompleteRequest) { sendRequest(false); + EXPECT_CALL(callbacks_, recreateStream()).Times(0); + response_decoder_->decodeHeaders(std::move(redirect_headers_), false); Buffer::OwnedImpl data("1234567890"); @@ -4341,6 +4553,9 @@ TEST_F(RouterTest, InternalRedirectRejectedWithoutLocation) { sendRequest(); redirect_headers_->removeLocation(); + + EXPECT_CALL(callbacks_, recreateStream()).Times(0); + response_decoder_->decodeHeaders(std::move(redirect_headers_), false); Buffer::OwnedImpl data("1234567890"); response_decoder_->decodeData(data, true); @@ -4354,7 +4569,10 @@ TEST_F(RouterTest, InternalRedirectRejectedWithBody) { sendRequest(); - EXPECT_CALL(callbacks_, decodingBuffer()).Times(1); + Buffer::InstancePtr body_data(new Buffer::OwnedImpl("random_fake_data")); + EXPECT_CALL(callbacks_, decodingBuffer()).WillOnce(Return(body_data.get())); + EXPECT_CALL(callbacks_, recreateStream()).Times(0); + response_decoder_->decodeHeaders(std::move(redirect_headers_), false); Buffer::OwnedImpl data("1234567890"); response_decoder_->decodeData(data, true); @@ -4363,26 +4581,61 @@ TEST_F(RouterTest, InternalRedirectRejectedWithBody) { .value()); } -TEST_F(RouterTest, InternalRedirectRejectedWithCrossSchemeRedirect) { +TEST_F(RouterTest, CrossSchemeRedirectRejectedByPolicy) { enableRedirects(); sendRequest(); redirect_headers_->setLocation("https://www.foo.com"); + + EXPECT_CALL(callbacks_, decodingBuffer()).Times(1); + EXPECT_CALL(callbacks_, recreateStream()).Times(0); + response_decoder_->decodeHeaders(std::move(redirect_headers_), true); EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ .counter("upstream_internal_redirect_failed_total") .value()); + EXPECT_EQ(1UL, stats_store_.counter("test.passthrough_internal_redirect_unsafe_scheme").value()); } -TEST_F(RouterTest, HttpInternalRedirectSucceeded) { +TEST_F(RouterTest, InternalRedirectRejectedByPredicate) { enableRedirects(); - setMaxInternalRedirects(3); + + sendRequest(); + + redirect_headers_->setLocation("http://www.foo.com/some/path"); + + auto mock_predicate = std::make_shared>(); + + EXPECT_CALL(callbacks_, decodingBuffer()).Times(1); + EXPECT_CALL(callbacks_, clearRouteCache()).Times(1); + EXPECT_CALL(callbacks_.route_->route_entry_.internal_redirect_policy_, predicates()) + .WillOnce(Return(std::vector({mock_predicate}))); + EXPECT_CALL(*mock_predicate, acceptTargetRoute(_, _, _, _)).WillOnce(Return(false)); + ON_CALL(*mock_predicate, name()).WillByDefault(Return("mock_predicate")); + EXPECT_CALL(callbacks_, recreateStream()).Times(0); + + response_decoder_->decodeHeaders(std::move(redirect_headers_), true); + EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ + .counter("upstream_internal_redirect_failed_total") + .value()); + EXPECT_EQ(1UL, stats_store_.counter("test.passthrough_internal_redirect_predicate").value()); + + // Make sure the original host/path is preserved. + EXPECT_EQ("host", default_request_headers_.getHostValue()); + EXPECT_EQ("/", default_request_headers_.getPathValue()); + // Make sure x-envoy-original-url is not set for unsuccessful redirect. + EXPECT_EQ(nullptr, default_request_headers_.EnvoyOriginalUrl()); +} + +TEST_F(RouterTest, HttpInternalRedirectSucceeded) { + enableRedirects(3); setNumPreviousRedirect(2); default_request_headers_.setForwardedProto("http"); sendRequest(); EXPECT_CALL(callbacks_, decodingBuffer()).Times(1); + EXPECT_CALL(callbacks_, clearRouteCache()).Times(1); EXPECT_CALL(callbacks_, recreateStream()).Times(1).WillOnce(Return(true)); response_decoder_->decodeHeaders(std::move(redirect_headers_), false); EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ @@ -4399,8 +4652,7 @@ TEST_F(RouterTest, HttpInternalRedirectSucceeded) { TEST_F(RouterTest, HttpsInternalRedirectSucceeded) { auto ssl_connection = std::make_shared(); - enableRedirects(); - setMaxInternalRedirects(3); + enableRedirects(3); setNumPreviousRedirect(1); sendRequest(); @@ -4408,6 +4660,30 @@ TEST_F(RouterTest, HttpsInternalRedirectSucceeded) { redirect_headers_->setLocation("https://www.foo.com"); EXPECT_CALL(connection_, ssl()).Times(1).WillOnce(Return(ssl_connection)); EXPECT_CALL(callbacks_, decodingBuffer()).Times(1); + EXPECT_CALL(callbacks_, clearRouteCache()).Times(1); + EXPECT_CALL(callbacks_, recreateStream()).Times(1).WillOnce(Return(true)); + response_decoder_->decodeHeaders(std::move(redirect_headers_), false); + EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ + .counter("upstream_internal_redirect_succeeded_total") + .value()); + + // In production, the HCM recreateStream would have called this. + router_.onDestroy(); +} + +TEST_F(RouterTest, CrossSchemeRedirectAllowedByPolicy) { + auto ssl_connection = std::make_shared(); + enableRedirects(); + + sendRequest(); + + redirect_headers_->setLocation("http://www.foo.com"); + EXPECT_CALL(connection_, ssl()).Times(1).WillOnce(Return(ssl_connection)); + EXPECT_CALL(callbacks_, decodingBuffer()).Times(1); + EXPECT_CALL(callbacks_.route_->route_entry_.internal_redirect_policy_, + isCrossSchemeRedirectAllowed()) + .WillOnce(Return(true)); + EXPECT_CALL(callbacks_, clearRouteCache()).Times(1); EXPECT_CALL(callbacks_, recreateStream()).Times(1).WillOnce(Return(true)); response_decoder_->decodeHeaders(std::move(redirect_headers_), false); EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ @@ -5639,7 +5915,7 @@ TEST_F(RouterTest, ApplicationProtocols) { EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _)) .WillOnce( - Invoke([&](const std::string&, Upstream::ResourcePriority, Http::Protocol, + Invoke([&](const std::string&, Upstream::ResourcePriority, absl::optional, Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* { Network::TransportSocketOptionsSharedPtr transport_socket_options = context->upstreamTransportSocketOptions(); @@ -5660,13 +5936,107 @@ TEST_F(RouterTest, ApplicationProtocols) { router_.decodeHeaders(headers, true); // When the router filter gets reset we should cancel the pool request. - EXPECT_CALL(cancellable_, cancel()); + EXPECT_CALL(cancellable_, cancel(_)); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(0U, callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); } +// Verify that CONNECT payload is not sent upstream until :200 response headers +// are received. +TEST_F(RouterTest, ConnectPauseAndResume) { + NiceMock encoder; + Http::ResponseDecoder* response_decoder = nullptr; + EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke( + [&](Http::ResponseDecoder& decoder, + Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { + response_decoder = &decoder; + callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_); + return nullptr; + })); + expectResponseTimerCreate(); + + EXPECT_CALL(encoder, encodeHeaders(_, false)); + Http::TestRequestHeaderMapImpl headers; + HttpTestUtility::addDefaultHeaders(headers); + headers.setMethod("CONNECT"); + router_.decodeHeaders(headers, false); + + // Make sure any early data does not go upstream. + EXPECT_CALL(encoder, encodeData(_, _)).Times(0); + Buffer::OwnedImpl data; + router_.decodeData(data, true); + + // Now send the response headers, and ensure the deferred payload is proxied. + EXPECT_CALL(encoder, encodeData(_, _)); + Http::ResponseHeaderMapPtr response_headers( + new Http::TestResponseHeaderMapImpl{{":status", "200"}}); + response_decoder->decodeHeaders(std::move(response_headers), true); +} + +// Verify that CONNECT payload is not sent upstream if non-200 response headers are received. +TEST_F(RouterTest, ConnectPauseNoResume) { + // Explicitly configure an HTTP upstream, to test factory creation. + cm_.thread_local_cluster_.cluster_.info_->upstream_config_ = + absl::make_optional(); + envoy::extensions::upstreams::http::http::v3::HttpConnectionPoolProto http_config; + cm_.thread_local_cluster_.cluster_.info_->upstream_config_.value() + .mutable_typed_config() + ->PackFrom(http_config); + + NiceMock encoder; + Http::ResponseDecoder* response_decoder = nullptr; + EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke( + [&](Http::ResponseDecoder& decoder, + Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { + response_decoder = &decoder; + callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_); + return nullptr; + })); + expectResponseTimerCreate(); + + EXPECT_CALL(encoder, encodeHeaders(_, false)); + Http::TestRequestHeaderMapImpl headers; + HttpTestUtility::addDefaultHeaders(headers); + headers.setMethod("CONNECT"); + router_.decodeHeaders(headers, false); + + // Make sure any early data does not go upstream. + EXPECT_CALL(encoder, encodeData(_, _)).Times(0); + Buffer::OwnedImpl data; + router_.decodeData(data, true); + + // Now send the response headers, and ensure the deferred payload is not proxied. + EXPECT_CALL(encoder, encodeData(_, _)).Times(0); + Http::ResponseHeaderMapPtr response_headers( + new Http::TestResponseHeaderMapImpl{{":status", "400"}}); + response_decoder->decodeHeaders(std::move(response_headers), true); +} + +TEST_F(RouterTest, ConnectExplicitTcpUpstream) { + // Explicitly configure an TCP upstream, to test factory creation. + cm_.thread_local_cluster_.cluster_.info_->upstream_config_ = + absl::make_optional(); + envoy::extensions::upstreams::http::tcp::v3::TcpConnectionPoolProto tcp_config; + cm_.thread_local_cluster_.cluster_.info_->upstream_config_.value() + .mutable_typed_config() + ->PackFrom(tcp_config); + callbacks_.route_->route_entry_.connect_config_ = + absl::make_optional(); + + // Make sure newConnection is called on the TCP pool, not newStream on the HTTP pool. + EXPECT_CALL(cm_.tcp_conn_pool_, newConnection(_)); + Http::TestRequestHeaderMapImpl headers; + HttpTestUtility::addDefaultHeaders(headers); + headers.setMethod("CONNECT"); + router_.decodeHeaders(headers, false); + + router_.onDestroy(); +} + class WatermarkTest : public RouterTest { public: void sendRequest(bool header_only_request = true, bool pool_ready = true) { @@ -5674,9 +6044,10 @@ class WatermarkTest : public RouterTest { .WillOnce(Return(std::chrono::milliseconds(0))); EXPECT_CALL(callbacks_.dispatcher_, createTimer_(_)).Times(0); - EXPECT_CALL(stream_, addCallbacks(_)).WillOnce(Invoke([&](Http::StreamCallbacks& callbacks) { - stream_callbacks_ = &callbacks; - })); + EXPECT_CALL(stream_, addCallbacks(_)) + .Times(num_add_callbacks_) + .WillOnce( + Invoke([&](Http::StreamCallbacks& callbacks) { stream_callbacks_ = &callbacks; })); EXPECT_CALL(encoder_, getStream()).WillRepeatedly(ReturnRef(stream_)); EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) .WillOnce(Invoke( @@ -5707,6 +6078,7 @@ class WatermarkTest : public RouterTest { Http::ResponseDecoder* response_decoder_ = nullptr; Http::TestRequestHeaderMapImpl headers_; Http::ConnectionPool::Callbacks* pool_callbacks_{nullptr}; + int num_add_callbacks_{1}; }; TEST_F(WatermarkTest, DownstreamWatermarks) { @@ -5786,7 +6158,29 @@ TEST_F(WatermarkTest, FilterWatermarks) { .value()); sendResponse(); -} // namespace Router +} + +TEST_F(WatermarkTest, FilterWatermarksUnwound) { + num_add_callbacks_ = 0; + EXPECT_CALL(callbacks_, decoderBufferLimit()).Times(3).WillRepeatedly(Return(10)); + router_.setDecoderFilterCallbacks(callbacks_); + // Send the headers sans-fin, and don't flag the pool as ready. + sendRequest(false, false); + + // Send 11 bytes of body to fill the 10 byte buffer. + Buffer::OwnedImpl data("1234567890!"); + router_.decodeData(data, false); + EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ + .counter("upstream_flow_control_backed_up_total") + .value()); + + // Set up a pool failure, and make sure the flow control blockage is undone. + pool_callbacks_->onPoolFailure(Http::ConnectionPool::PoolFailureReason::RemoteConnectionFailure, + absl::string_view(), nullptr); + EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ + .counter("upstream_flow_control_drained_total") + .value()); +} // Same as RetryRequestNotComplete but with decodeData larger than the buffer // limit, no retry will occur. diff --git a/test/common/router/router_upstream_log_test.cc b/test/common/router/router_upstream_log_test.cc index d62043effadb3..8662e760364a2 100644 --- a/test/common/router/router_upstream_log_test.cc +++ b/test/common/router/router_upstream_log_test.cc @@ -18,7 +18,7 @@ #include "test/mocks/network/mocks.h" #include "test/mocks/router/mocks.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/utility.h" @@ -63,7 +63,7 @@ class TestFilter : public Filter { // Filter RetryStatePtr createRetryState(const RetryPolicy&, Http::RequestHeaderMap&, const Upstream::ClusterInfo&, const VirtualCluster*, - Runtime::Loader&, Runtime::RandomGenerator&, Event::Dispatcher&, + Runtime::Loader&, Random::RandomGenerator&, Event::Dispatcher&, Upstream::ResourcePriority) override { EXPECT_EQ(nullptr, retry_state_); retry_state_ = new NiceMock(); diff --git a/test/common/router/scoped_config_impl_test.cc b/test/common/router/scoped_config_impl_test.cc index 5d29bfd22309e..cc7adfd1adcb1 100644 --- a/test/common/router/scoped_config_impl_test.cc +++ b/test/common/router/scoped_config_impl_test.cc @@ -15,7 +15,7 @@ namespace Envoy { namespace Router { namespace { -using ::Envoy::Http::TestHeaderMapImpl; +using ::Envoy::Http::TestRequestHeaderMapImpl; using ::testing::NiceMock; class FooFragment : public ScopeKeyFragmentBase { @@ -116,30 +116,30 @@ TEST(HeaderValueExtractorImplTest, HeaderExtractionByIndex) { TestUtility::loadFromYaml(yaml_plain, config); HeaderValueExtractorImpl extractor(std::move(config)); - std::unique_ptr fragment = - extractor.computeFragment(TestHeaderMapImpl{{"foo_header", "part-0,part-1:value_bluh"}}); + std::unique_ptr fragment = extractor.computeFragment( + TestRequestHeaderMapImpl{{"foo_header", "part-0,part-1:value_bluh"}}); EXPECT_NE(fragment, nullptr); EXPECT_EQ(*fragment, StringKeyFragment{"part-1:value_bluh"}); // No such header. - fragment = extractor.computeFragment(TestHeaderMapImpl{{"bar_header", "part-0"}}); + fragment = extractor.computeFragment(TestRequestHeaderMapImpl{{"bar_header", "part-0"}}); EXPECT_EQ(fragment, nullptr); // Empty header value. - fragment = extractor.computeFragment(TestHeaderMapImpl{ + fragment = extractor.computeFragment(TestRequestHeaderMapImpl{ {"foo_header", ""}, }); EXPECT_EQ(fragment, nullptr); // Index out of bound. - fragment = extractor.computeFragment(TestHeaderMapImpl{ + fragment = extractor.computeFragment(TestRequestHeaderMapImpl{ {"foo_header", "part-0"}, }); EXPECT_EQ(fragment, nullptr); // Element is empty. - fragment = extractor.computeFragment(TestHeaderMapImpl{ + fragment = extractor.computeFragment(TestRequestHeaderMapImpl{ {"foo_header", "part-0,,,bluh"}, }); EXPECT_NE(fragment, nullptr); @@ -159,47 +159,48 @@ TEST(HeaderValueExtractorImplTest, HeaderExtractionByKey) { TestUtility::loadFromYaml(yaml_plain, config); HeaderValueExtractorImpl extractor(std::move(config)); - std::unique_ptr fragment = extractor.computeFragment(TestHeaderMapImpl{ - {"foo_header", "part-0;bar=>bluh;foo=>foo_value"}, - }); + std::unique_ptr fragment = + extractor.computeFragment(TestRequestHeaderMapImpl{ + {"foo_header", "part-0;bar=>bluh;foo=>foo_value"}, + }); EXPECT_NE(fragment, nullptr); EXPECT_EQ(*fragment, StringKeyFragment{"bluh"}); // No such header. - fragment = extractor.computeFragment(TestHeaderMapImpl{ + fragment = extractor.computeFragment(TestRequestHeaderMapImpl{ {"bluh", "part-0;"}, }); EXPECT_EQ(fragment, nullptr); // Empty header value. - fragment = extractor.computeFragment(TestHeaderMapImpl{ + fragment = extractor.computeFragment(TestRequestHeaderMapImpl{ {"foo_header", ""}, }); EXPECT_EQ(fragment, nullptr); // No such key. - fragment = extractor.computeFragment(TestHeaderMapImpl{ + fragment = extractor.computeFragment(TestRequestHeaderMapImpl{ {"foo_header", "part-0"}, }); EXPECT_EQ(fragment, nullptr); // Empty value. - fragment = extractor.computeFragment(TestHeaderMapImpl{ + fragment = extractor.computeFragment(TestRequestHeaderMapImpl{ {"foo_header", "bluh;;bar=>;foo=>last_value"}, }); EXPECT_NE(fragment, nullptr); EXPECT_EQ(*fragment, StringKeyFragment{""}); // Duplicate values, the first value returned. - fragment = extractor.computeFragment(TestHeaderMapImpl{ + fragment = extractor.computeFragment(TestRequestHeaderMapImpl{ {"foo_header", "bluh;;bar=>value1;bar=>value2;bluh;;bar=>last_value"}, }); EXPECT_NE(fragment, nullptr); EXPECT_EQ(*fragment, StringKeyFragment{"value1"}); // No separator in the element, value is set to empty string. - fragment = extractor.computeFragment(TestHeaderMapImpl{ + fragment = extractor.computeFragment(TestRequestHeaderMapImpl{ {"foo_header", "bluh;;bar;bar=>value2;bluh;;bar=>last_value"}, }); EXPECT_NE(fragment, nullptr); @@ -219,13 +220,14 @@ TEST(HeaderValueExtractorImplTest, ElementSeparatorEmpty) { TestUtility::loadFromYaml(yaml_plain, config); HeaderValueExtractorImpl extractor(std::move(config)); - std::unique_ptr fragment = extractor.computeFragment(TestHeaderMapImpl{ - {"foo_header", "bar=b;c=d;e=f"}, - }); + std::unique_ptr fragment = + extractor.computeFragment(TestRequestHeaderMapImpl{ + {"foo_header", "bar=b;c=d;e=f"}, + }); EXPECT_NE(fragment, nullptr); EXPECT_EQ(*fragment, StringKeyFragment{"b;c=d;e=f"}); - fragment = extractor.computeFragment(TestHeaderMapImpl{ + fragment = extractor.computeFragment(TestRequestHeaderMapImpl{ {"foo_header", "a=b;bar=d;e=f"}, }); EXPECT_EQ(fragment, nullptr); @@ -297,7 +299,7 @@ TEST(ScopeKeyBuilderImplTest, Parse) { TestUtility::loadFromYaml(yaml_plain, config); ScopeKeyBuilderImpl key_builder(std::move(config)); - std::unique_ptr key = key_builder.computeScopeKey(TestHeaderMapImpl{ + ScopeKeyPtr key = key_builder.computeScopeKey(TestRequestHeaderMapImpl{ {"foo_header", "a=b,bar=bar_value,e=f"}, {"bar_header", "a=b;bar=bar_value;index2"}, }); @@ -305,7 +307,7 @@ TEST(ScopeKeyBuilderImplTest, Parse) { EXPECT_EQ(*key, makeKey({"bar_value", "index2"})); // Empty string fragment is fine. - key = key_builder.computeScopeKey(TestHeaderMapImpl{ + key = key_builder.computeScopeKey(TestRequestHeaderMapImpl{ {"foo_header", "a=b,bar,e=f"}, {"bar_header", "a=b;bar=bar_value;"}, }); @@ -313,35 +315,35 @@ TEST(ScopeKeyBuilderImplTest, Parse) { EXPECT_EQ(*key, makeKey({"", ""})); // Key not found. - key = key_builder.computeScopeKey(TestHeaderMapImpl{ + key = key_builder.computeScopeKey(TestRequestHeaderMapImpl{ {"foo_header", "a=b,meh,e=f"}, {"bar_header", "a=b;bar=bar_value;"}, }); EXPECT_EQ(key, nullptr); // Index out of bound. - key = key_builder.computeScopeKey(TestHeaderMapImpl{ + key = key_builder.computeScopeKey(TestRequestHeaderMapImpl{ {"foo_header", "a=b,bar=bar_value,e=f"}, {"bar_header", "a=b;bar=bar_value"}, }); EXPECT_EQ(key, nullptr); // Header missing. - key = key_builder.computeScopeKey(TestHeaderMapImpl{ + key = key_builder.computeScopeKey(TestRequestHeaderMapImpl{ {"foo_header", "a=b,bar=bar_value,e=f"}, {"foobar_header", "a=b;bar=bar_value;index2"}, }); EXPECT_EQ(key, nullptr); // Header value empty. - key = key_builder.computeScopeKey(TestHeaderMapImpl{ + key = key_builder.computeScopeKey(TestRequestHeaderMapImpl{ {"foo_header", ""}, {"bar_header", "a=b;bar=bar_value;index2"}, }); EXPECT_EQ(key, nullptr); // Case sensitive. - key = key_builder.computeScopeKey(TestHeaderMapImpl{ + key = key_builder.computeScopeKey(TestRequestHeaderMapImpl{ {"foo_header", "a=b,Bar=bar_value,e=f"}, {"bar_header", "a=b;bar=bar_value;index2"}, }); @@ -443,25 +445,25 @@ class ScopedConfigImplTest : public testing::Test { // Test a ScopedConfigImpl returns the correct route Config. TEST_F(ScopedConfigImplTest, PickRoute) { scoped_config_impl_ = std::make_unique(std::move(key_builder_config_)); - scoped_config_impl_->addOrUpdateRoutingScope(scope_info_a_); - scoped_config_impl_->addOrUpdateRoutingScope(scope_info_b_); + scoped_config_impl_->addOrUpdateRoutingScopes({scope_info_a_}); + scoped_config_impl_->addOrUpdateRoutingScopes({scope_info_b_}); // Key (foo, bar) maps to scope_info_a_. - ConfigConstSharedPtr route_config = scoped_config_impl_->getRouteConfig(TestHeaderMapImpl{ + ConfigConstSharedPtr route_config = scoped_config_impl_->getRouteConfig(TestRequestHeaderMapImpl{ {"foo_header", ",,key=value,bar=foo,"}, {"bar_header", ";val1;bar;val3"}, }); EXPECT_EQ(route_config, scope_info_a_->routeConfig()); // Key (bar, baz) maps to scope_info_b_. - route_config = scoped_config_impl_->getRouteConfig(TestHeaderMapImpl{ + route_config = scoped_config_impl_->getRouteConfig(TestRequestHeaderMapImpl{ {"foo_header", ",,key=value,bar=bar,"}, {"bar_header", ";val1;baz;val3"}, }); EXPECT_EQ(route_config, scope_info_b_->routeConfig()); // No such key (bar, NOT_BAZ). - route_config = scoped_config_impl_->getRouteConfig(TestHeaderMapImpl{ + route_config = scoped_config_impl_->getRouteConfig(TestRequestHeaderMapImpl{ {"foo_header", ",key=value,bar=bar,"}, {"bar_header", ";val1;NOT_BAZ;val3"}, }); @@ -472,7 +474,7 @@ TEST_F(ScopedConfigImplTest, PickRoute) { TEST_F(ScopedConfigImplTest, Update) { scoped_config_impl_ = std::make_unique(std::move(key_builder_config_)); - TestHeaderMapImpl headers{ + TestRequestHeaderMapImpl headers{ {"foo_header", ",,key=value,bar=foo,"}, {"bar_header", ";val1;bar;val3"}, }; @@ -480,36 +482,36 @@ TEST_F(ScopedConfigImplTest, Update) { EXPECT_EQ(scoped_config_impl_->getRouteConfig(headers), nullptr); // Add scope_key (bar, baz). - scoped_config_impl_->addOrUpdateRoutingScope(scope_info_b_); + scoped_config_impl_->addOrUpdateRoutingScopes({scope_info_b_}); + // scope_info_a_ not found EXPECT_EQ(scoped_config_impl_->getRouteConfig(headers), nullptr); - EXPECT_EQ(scoped_config_impl_->getRouteConfig( - TestHeaderMapImpl{{"foo_header", ",,key=v,bar=bar,"}, {"bar_header", ";val1;baz"}}), + // scope_info_b_ found + EXPECT_EQ(scoped_config_impl_->getRouteConfig(TestRequestHeaderMapImpl{ + {"foo_header", ",,key=v,bar=bar,"}, {"bar_header", ";val1;baz"}}), scope_info_b_->routeConfig()); // Add scope_key (foo, bar). - scoped_config_impl_->addOrUpdateRoutingScope(scope_info_a_); + scoped_config_impl_->addOrUpdateRoutingScopes({scope_info_a_}); // Found scope_info_a_. EXPECT_EQ(scoped_config_impl_->getRouteConfig(headers), scope_info_a_->routeConfig()); // Update scope foo_scope. - scoped_config_impl_->addOrUpdateRoutingScope(scope_info_a_v2_); + scoped_config_impl_->addOrUpdateRoutingScopes({scope_info_a_v2_}); EXPECT_EQ(scoped_config_impl_->getRouteConfig(headers), nullptr); // foo_scope now is keyed by (xyz, xyz). - EXPECT_EQ(scoped_config_impl_->getRouteConfig( - TestHeaderMapImpl{{"foo_header", ",bar=xyz,foo=bar"}, {"bar_header", ";;xyz"}}), + EXPECT_EQ(scoped_config_impl_->getRouteConfig(TestRequestHeaderMapImpl{ + {"foo_header", ",bar=xyz,foo=bar"}, {"bar_header", ";;xyz"}}), scope_info_a_v2_->routeConfig()); // Remove scope "foo_scope". - scoped_config_impl_->removeRoutingScope("foo_scope"); + scoped_config_impl_->removeRoutingScopes({"foo_scope"}); // scope_info_a_ is gone. EXPECT_EQ(scoped_config_impl_->getRouteConfig(headers), nullptr); // Now delete some non-existent scopes. - EXPECT_NO_THROW(scoped_config_impl_->removeRoutingScope("foo_scope1")); - EXPECT_NO_THROW(scoped_config_impl_->removeRoutingScope("base_scope")); - EXPECT_NO_THROW(scoped_config_impl_->removeRoutingScope("bluh_scope")); - EXPECT_NO_THROW(scoped_config_impl_->removeRoutingScope("xyz_scope")); + EXPECT_NO_THROW(scoped_config_impl_->removeRoutingScopes( + {"foo_scope1", "base_scope", "bluh_scope", "xyz_scope"})); } } // namespace diff --git a/test/common/router/scoped_rds_test.cc b/test/common/router/scoped_rds_test.cc index 6940ed7ff02bf..b00466a702a49 100644 --- a/test/common/router/scoped_rds_test.cc +++ b/test/common/router/scoped_rds_test.cc @@ -6,6 +6,7 @@ #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/config/route/v3/route.pb.h" #include "envoy/config/route/v3/scoped_route.pb.h" +#include "envoy/config/route/v3/scoped_route.pb.validate.h" #include "envoy/config/subscription.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/init/manager.h" @@ -20,7 +21,7 @@ #include "test/mocks/config/mocks.h" #include "test/mocks/protobuf/mocks.h" #include "test/mocks/router/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/test_common/simulated_time_system.h" #include "test/test_common/utility.h" @@ -42,7 +43,7 @@ namespace Envoy { namespace Router { namespace { -using ::Envoy::Http::TestHeaderMapImpl; +using ::Envoy::Http::TestRequestHeaderMapImpl; envoy::config::route::v3::ScopedRouteConfiguration parseScopedRouteConfigurationFromYaml(const std::string& yaml) { @@ -51,11 +52,6 @@ parseScopedRouteConfigurationFromYaml(const std::string& yaml) { return scoped_route_config; } -void parseScopedRouteConfigurationFromYaml(ProtobufWkt::Any& scoped_route_config, - const std::string& yaml) { - scoped_route_config.PackFrom(parseScopedRouteConfigurationFromYaml(yaml)); -} - envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager parseHttpConnectionManagerFromYaml(const std::string& config_yaml) { envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager @@ -105,8 +101,8 @@ class ScopedRoutesTestBase : public testing::Test { NiceMock validation_context_; // server_factory_context_ is used by rds NiceMock server_factory_context_; - std::unique_ptr route_config_provider_manager_; - std::unique_ptr config_provider_manager_; + RouteConfigProviderManagerPtr route_config_provider_manager_; + ScopedRoutesConfigProviderManagerPtr config_provider_manager_; Event::SimulatedTimeSystem time_system_; }; @@ -125,7 +121,7 @@ class ScopedRdsTest : public ScopedRoutesTestBase { // srds subscription EXPECT_CALL(server_factory_context_.cluster_manager_.subscription_factory_, - subscriptionFromConfigSource(_, _, _, _)) + subscriptionFromConfigSource(_, _, _, _, _)) .Times(AnyNumber()); // rds subscription EXPECT_CALL( @@ -134,11 +130,12 @@ class ScopedRdsTest : public ScopedRoutesTestBase { _, Eq(Grpc::Common::typeUrl( API_NO_BOOST(envoy::api::v2::RouteConfiguration)().GetDescriptor()->full_name())), - _, _)) + _, _, _)) .Times(AnyNumber()) .WillRepeatedly(Invoke([this](const envoy::config::core::v3::ConfigSource&, absl::string_view, Stats::Scope&, - Envoy::Config::SubscriptionCallbacks& callbacks) { + Envoy::Config::SubscriptionCallbacks& callbacks, + Envoy::Config::OpaqueResourceDecoder&) { auto ret = std::make_unique>(); rds_subscription_by_config_subscription_[ret.get()] = &callbacks; EXPECT_CALL(*ret, start(_)) @@ -197,11 +194,11 @@ name: foo_scoped_routes route: {{ cluster: bluh }} )EOF"; for (const std::string& name : route_config_names) { - Protobuf::RepeatedPtrField resources; - resources.Add()->PackFrom( + const auto route_config = TestUtility::parseYaml( - fmt::format(route_config_tmpl, name))); - rds_subscription_by_name_[name]->onConfigUpdate(resources, version); + fmt::format(route_config_tmpl, name)); + const auto decoded_resources = TestUtility::decodeResources({route_config}); + rds_subscription_by_name_[name]->onConfigUpdate(decoded_resources.refvec_, version); } } @@ -224,54 +221,6 @@ name: foo_scoped_routes absl::flat_hash_map rds_subscription_by_name_; }; -TEST_F(ScopedRdsTest, ValidateFail) { - setup(); - - // 'name' validation: value must be > 1 byte. - const std::string config_yaml = R"EOF( -name: -route_configuration_name: foo_routes -key: - fragments: - - string_key: x-foo-key -)EOF"; - Protobuf::RepeatedPtrField resources; - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml); - EXPECT_THROW(srds_subscription_->onConfigUpdate(resources, "1"), ProtoValidationException); - EXPECT_THROW_WITH_REGEX( - srds_subscription_->onConfigUpdate(anyToResource(resources, "1"), {}, "1"), EnvoyException, - "Error adding/updating scoped route\\(s\\): Proto constraint validation failed.*"); - - // 'route_configuration_name' validation: value must be > 1 byte. - const std::string config_yaml2 = R"EOF( -name: foo_scope -route_configuration_name: -key: - fragments: - - string_key: x-foo-key -)EOF"; - Protobuf::RepeatedPtrField resources2; - parseScopedRouteConfigurationFromYaml(*resources2.Add(), config_yaml2); - EXPECT_THROW(srds_subscription_->onConfigUpdate(resources2, "1"), ProtoValidationException); - EXPECT_THROW_WITH_REGEX( - srds_subscription_->onConfigUpdate(anyToResource(resources2, "1"), {}, "1"), EnvoyException, - "Error adding/updating scoped route\\(s\\): Proto constraint validation failed.*"); - - // 'key' validation: must define at least 1 fragment. - const std::string config_yaml3 = R"EOF( -name: foo_scope -route_configuration_name: foo_routes -key: -)EOF"; - Protobuf::RepeatedPtrField resources3; - parseScopedRouteConfigurationFromYaml(*resources3.Add(), config_yaml3); - EXPECT_THROW(srds_subscription_->onConfigUpdate(resources3, "1"), ProtoValidationException); - EXPECT_THROW_WITH_REGEX( - srds_subscription_->onConfigUpdate(anyToResource(resources3, "1"), {}, "1"), EnvoyException, - "Error adding/updating scoped route\\(s\\): Proto constraint validation failed .*value is " - "required.*"); -} - // Tests that multiple uniquely named non-conflict resources are allowed in config updates. TEST_F(ScopedRdsTest, MultipleResourcesSotw) { setup(); @@ -283,8 +232,7 @@ route_configuration_name: foo_routes fragments: - string_key: x-foo-key )EOF"; - Protobuf::RepeatedPtrField resources; - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml); + const auto resource = parseScopedRouteConfigurationFromYaml(config_yaml); const std::string config_yaml2 = R"EOF( name: foo_scope2 route_configuration_name: foo_routes @@ -292,44 +240,45 @@ route_configuration_name: foo_routes fragments: - string_key: x-bar-key )EOF"; - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml2); - init_watcher_.expectReady().Times(1); // Only the SRDS parent_init_target_. + const auto resource_2 = parseScopedRouteConfigurationFromYaml(config_yaml2); + init_watcher_.expectReady(); // Only the SRDS parent_init_target_. context_init_manager_.initialize(init_watcher_); - EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(resources, "1")); + const auto decoded_resources = TestUtility::decodeResources({resource, resource_2}); + EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources.refvec_, "1")); EXPECT_EQ(1UL, server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") .value()); // Verify the config is a ScopedConfigImpl instance, both scopes point to "" as RDS hasn't kicked // in yet(NullConfigImpl returned). - EXPECT_NE(getScopedRdsProvider(), nullptr); - EXPECT_NE(getScopedRdsProvider()->config(), nullptr); + ASSERT_THAT(getScopedRdsProvider(), Not(IsNull())); + ASSERT_THAT(getScopedRdsProvider()->config(), Not(IsNull())); EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) ->name(), ""); EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}) ->name(), ""); // RDS updates foo_routes. pushRdsConfig({"foo_routes"}, "111"); EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) ->name(), "foo_routes"); EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}) ->name(), "foo_routes"); // Delete foo_scope2. - resources.RemoveLast(); - EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(resources, "3")); + const auto decoded_resources_2 = TestUtility::decodeResources({resource}); + EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources_2.refvec_, "3")); EXPECT_EQ(getScopedRouteMap().size(), 1); EXPECT_EQ(getScopedRouteMap().count("foo_scope"), 1); EXPECT_EQ(2UL, @@ -337,11 +286,11 @@ route_configuration_name: foo_routes .value()); // now scope key "x-bar-key" points to nowhere. EXPECT_THAT(getScopedRdsProvider()->config()->getRouteConfig( - TestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}), + TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}), IsNull()); EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) ->name(), "foo_routes"); } @@ -349,7 +298,7 @@ route_configuration_name: foo_routes // Tests that multiple uniquely named non-conflict resources are allowed in config updates. TEST_F(ScopedRdsTest, MultipleResourcesDelta) { setup(); - init_watcher_.expectReady().Times(1); + init_watcher_.expectReady(); const std::string config_yaml = R"EOF( name: foo_scope route_configuration_name: foo_routes @@ -357,8 +306,7 @@ route_configuration_name: foo_routes fragments: - string_key: x-foo-key )EOF"; - Protobuf::RepeatedPtrField resources; - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml); + const auto resource = parseScopedRouteConfigurationFromYaml(config_yaml); const std::string config_yaml2 = R"EOF( name: foo_scope2 route_configuration_name: foo_routes @@ -366,11 +314,12 @@ route_configuration_name: foo_routes fragments: - string_key: x-bar-key )EOF"; - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml2); + const auto resource_2 = parseScopedRouteConfigurationFromYaml(config_yaml2); // Delta API. - EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(anyToResource(resources, "2"), {}, "1")); + const auto decoded_resources = TestUtility::decodeResources({resource, resource_2}); context_init_manager_.initialize(init_watcher_); + EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources.refvec_, {}, "1")); EXPECT_EQ(1UL, server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") .value()); @@ -378,36 +327,36 @@ route_configuration_name: foo_routes // Verify the config is a ScopedConfigImpl instance, both scopes point to "" as RDS hasn't kicked // in yet(NullConfigImpl returned). - EXPECT_NE(getScopedRdsProvider(), nullptr); - EXPECT_NE(getScopedRdsProvider()->config(), nullptr); + ASSERT_THAT(getScopedRdsProvider(), Not(IsNull())); + ASSERT_THAT(getScopedRdsProvider()->config(), Not(IsNull())); EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) ->name(), ""); EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}) ->name(), ""); // RDS updates foo_routes. pushRdsConfig({"foo_routes"}, "111"); EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) ->name(), "foo_routes"); EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}) ->name(), "foo_routes"); // Delete foo_scope2. - resources.RemoveLast(); Protobuf::RepeatedPtrField deletes; *deletes.Add() = "foo_scope2"; - EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(anyToResource(resources, "4"), deletes, "2")); + const auto decoded_resources_2 = TestUtility::decodeResources({resource}); + EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources_2.refvec_, deletes, "2")); EXPECT_EQ(getScopedRouteMap().size(), 1); EXPECT_EQ(getScopedRouteMap().count("foo_scope"), 1); EXPECT_EQ(2UL, @@ -415,11 +364,11 @@ route_configuration_name: foo_routes .value()); // now scope key "x-bar-key" points to nowhere. EXPECT_THAT(getScopedRdsProvider()->config()->getRouteConfig( - TestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}), + TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}), IsNull()); EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) ->name(), "foo_routes"); } @@ -435,8 +384,7 @@ route_configuration_name: foo_routes fragments: - string_key: x-foo-key )EOF"; - Protobuf::RepeatedPtrField resources; - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml); + const auto resource = parseScopedRouteConfigurationFromYaml(config_yaml); const std::string config_yaml2 = R"EOF( name: foo_scope2 route_configuration_name: foo_routes @@ -444,21 +392,22 @@ route_configuration_name: foo_routes fragments: - string_key: x-foo-key )EOF"; - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml2); + const auto resource_2 = parseScopedRouteConfigurationFromYaml(config_yaml2); init_watcher_.expectReady().Times(0); // The onConfigUpdate will simply throw an exception. context_init_manager_.initialize(init_watcher_); + const auto decoded_resources = TestUtility::decodeResources({resource, resource_2}); EXPECT_THROW_WITH_REGEX( - srds_subscription_->onConfigUpdate(resources, "1"), EnvoyException, + srds_subscription_->onConfigUpdate(decoded_resources.refvec_, "1"), EnvoyException, ".*scope key conflict found, first scope is 'foo_scope', second scope is 'foo_scope2'"); EXPECT_EQ( // Fully rejected. 0UL, server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") .value()); // Scope key "x-foo-key" points to nowhere. - EXPECT_NE(getScopedRdsProvider(), nullptr); - EXPECT_NE(getScopedRdsProvider()->config(), nullptr); + ASSERT_THAT(getScopedRdsProvider(), Not(IsNull())); + ASSERT_THAT(getScopedRdsProvider()->config(), Not(IsNull())); EXPECT_THAT(getScopedRdsProvider()->config()->getRouteConfig( - TestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}), + TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}), IsNull()); EXPECT_EQ(server_factory_context_.scope_.counter("foo.rds.foo_routes.config_reload").value(), 0UL); @@ -475,8 +424,7 @@ route_configuration_name: foo_routes fragments: - string_key: x-foo-key )EOF"; - Protobuf::RepeatedPtrField resources; - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml); + const auto resource = parseScopedRouteConfigurationFromYaml(config_yaml); const std::string config_yaml2 = R"EOF( name: foo_scope2 route_configuration_name: foo_routes @@ -484,29 +432,26 @@ route_configuration_name: foo_routes fragments: - string_key: x-foo-key )EOF"; - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml2); - init_watcher_.expectReady().Times(1); // Partial success gets the subscription ready. + const auto resource_2 = parseScopedRouteConfigurationFromYaml(config_yaml2); + init_watcher_.expectReady().Times(0); // The onConfigUpdate will simply throw an exception. context_init_manager_.initialize(init_watcher_); + const auto decoded_resources = TestUtility::decodeResources({resource, resource_2}); EXPECT_THROW_WITH_REGEX( - srds_subscription_->onConfigUpdate(anyToResource(resources, "2"), {}, "2"), EnvoyException, + srds_subscription_->onConfigUpdate(decoded_resources.refvec_, "1"), EnvoyException, ".*scope key conflict found, first scope is 'foo_scope', second scope is 'foo_scope2'"); EXPECT_EQ( - // Partially reject. - 1UL, server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") + // Fully rejected. + 0UL, server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") .value()); - // foo_scope update is applied. - EXPECT_EQ(getScopedRouteMap().size(), 1UL); - EXPECT_EQ(getScopedRouteMap().count("foo_scope"), 1); - // Scope key "x-foo-key" points to foo_routes due to partial rejection. - pushRdsConfig({"foo_routes"}, "111"); // Push some real route configuration. - EXPECT_EQ(1UL, - server_factory_context_.scope_.counter("foo.rds.foo_routes.config_reload").value()); - EXPECT_EQ(getScopedRdsProvider() - ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) - ->name(), - "foo_routes"); + // Scope key "x-foo-key" points to nowhere. + ASSERT_THAT(getScopedRdsProvider(), Not(IsNull())); + ASSERT_THAT(getScopedRdsProvider()->config(), Not(IsNull())); + EXPECT_THAT(getScopedRdsProvider()->config()->getRouteConfig( + TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}), + IsNull()); + EXPECT_EQ(server_factory_context_.scope_.counter("foo.rds.foo_routes.config_reload").value(), + 0UL); } // Tests that scope-key conflict resources in different config updates are handled correctly. @@ -527,24 +472,24 @@ route_configuration_name: bar_routes fragments: - string_key: x-bar-key )EOF"; - Protobuf::RepeatedPtrField resources; - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml1); - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml2); - EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(resources, "1")); + const auto resource = parseScopedRouteConfigurationFromYaml(config_yaml1); + const auto resource_2 = parseScopedRouteConfigurationFromYaml(config_yaml2); + const auto decoded_resources = TestUtility::decodeResources({resource, resource_2}); + init_watcher_.expectReady(); + context_init_manager_.initialize(init_watcher_); + EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources.refvec_, "1")); EXPECT_EQ(1UL, server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") .value()); // Scope key "x-foo-key" points to nowhere. - EXPECT_NE(getScopedRdsProvider(), nullptr); - EXPECT_NE(getScopedRdsProvider()->config(), nullptr); + ASSERT_THAT(getScopedRdsProvider(), Not(IsNull())); + ASSERT_THAT(getScopedRdsProvider()->config(), Not(IsNull())); // No RDS "foo_routes" config push happened yet, Router::NullConfig is returned. EXPECT_THAT(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) ->name(), ""); - init_watcher_.expectReady().Times(1); - context_init_manager_.initialize(init_watcher_); pushRdsConfig({"foo_routes", "bar_routes"}, "111"); EXPECT_EQ(server_factory_context_.scope_.counter("foo.rds.foo_routes.config_reload").value(), 1UL); @@ -552,7 +497,7 @@ route_configuration_name: bar_routes 1UL); EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) ->name(), "foo_routes"); @@ -563,12 +508,11 @@ route_configuration_name: foo_routes fragments: - string_key: x-foo-key )EOF"; - resources.Clear(); // Remove foo_scope1 and add a new scope3 reuses the same scope_key. - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml2); - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml3); - EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(resources, "2")); + const auto resource_3 = parseScopedRouteConfigurationFromYaml(config_yaml3); + const auto decoded_resources_2 = TestUtility::decodeResources({resource_2, resource_3}); + EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources_2.refvec_, "2")); EXPECT_EQ(2UL, server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") .value()); @@ -580,7 +524,7 @@ route_configuration_name: foo_routes // The same scope-key now points to the same route table. EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) ->name(), "foo_routes"); @@ -593,12 +537,11 @@ route_configuration_name: foo_routes fragments: - string_key: x-bar-key )EOF"; - resources.Clear(); - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml2); - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml3); - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml4); + const auto resource_4 = parseScopedRouteConfigurationFromYaml(config_yaml4); + const auto decoded_resources_3 = + TestUtility::decodeResources({resource_2, resource_3, resource_4}); EXPECT_THROW_WITH_REGEX( - srds_subscription_->onConfigUpdate(resources, "3"), EnvoyException, + srds_subscription_->onConfigUpdate(decoded_resources_3.refvec_, "3"), EnvoyException, "scope key conflict found, first scope is 'foo_scope2', second scope is 'foo_scope4'"); EXPECT_EQ(getScopedRouteMap().size(), 2UL); EXPECT_EQ(getScopedRouteMap().count("foo_scope1"), 0); @@ -606,15 +549,13 @@ route_configuration_name: foo_routes EXPECT_EQ(getScopedRouteMap().count("foo_scope3"), 1); EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}) ->name(), "bar_routes"); // Delete foo_scope2, and push a new foo_scope4 with the same scope key but different route-table. - resources.Clear(); - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml3); - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml4); - EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(resources, "4")); + const auto decoded_resources_4 = TestUtility::decodeResources({resource_3, resource_4}); + EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources_4.refvec_, "4")); EXPECT_EQ(server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") .value(), 3UL); @@ -623,12 +564,12 @@ route_configuration_name: foo_routes EXPECT_EQ(getScopedRouteMap().count("foo_scope4"), 1); EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}) ->name(), "foo_routes"); EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) ->name(), "foo_routes"); } @@ -647,14 +588,15 @@ route_configuration_name: foo_routes fragments: - string_key: x-foo-key )EOF"; - Protobuf::RepeatedPtrField resources; - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml); - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml); - EXPECT_THROW_WITH_MESSAGE(srds_subscription_->onConfigUpdate(resources, "1"), EnvoyException, - "duplicate scoped route configuration 'foo_scope' found"); + const auto resource = parseScopedRouteConfigurationFromYaml(config_yaml); + const auto decoded_resources = TestUtility::decodeResources({resource, resource}); + EXPECT_THROW_WITH_MESSAGE(srds_subscription_->onConfigUpdate(decoded_resources.refvec_, "1"), + EnvoyException, + "Error adding/updating scoped route(s): duplicate scoped route " + "configuration 'foo_scope' found"); } -// Tests that only one resource is provided during a config update. +// Tests duplicate resources in the same update, should be fully rejected. TEST_F(ScopedRdsTest, InvalidDuplicateResourceDelta) { setup(); init_watcher_.expectReady().Times(0); @@ -667,20 +609,24 @@ route_configuration_name: foo_routes fragments: - string_key: x-foo-key )EOF"; - Protobuf::RepeatedPtrField resources; - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml); - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml); + const auto resource = parseScopedRouteConfigurationFromYaml(config_yaml); + const auto decoded_resources = TestUtility::decodeResources({resource, resource}); EXPECT_THROW_WITH_MESSAGE( - srds_subscription_->onConfigUpdate(anyToResource(resources, "1"), {}, "1"), EnvoyException, + srds_subscription_->onConfigUpdate(decoded_resources.refvec_, {}, "1"), EnvoyException, "Error adding/updating scoped route(s): duplicate scoped route configuration 'foo_scope' " "found"); EXPECT_EQ( - // Partially reject. - 1UL, server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") + // Fully rejected. + 0UL, server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") .value()); - // foo_scope update is applied. - EXPECT_EQ(getScopedRouteMap().size(), 1UL); - EXPECT_EQ(getScopedRouteMap().count("foo_scope"), 1); + // Scope key "x-foo-key" points to nowhere. + ASSERT_THAT(getScopedRdsProvider(), Not(IsNull())); + ASSERT_THAT(getScopedRdsProvider()->config(), Not(IsNull())); + EXPECT_THAT(getScopedRdsProvider()->config()->getRouteConfig( + TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}), + IsNull()); + EXPECT_EQ(server_factory_context_.scope_.counter("foo.rds.foo_routes.config_reload").value(), + 0UL); } // Tests a config update failure. @@ -702,7 +648,7 @@ TEST_F(ScopedRdsTest, ConfigUpdateFailure) { // config. TEST_F(ScopedRdsTest, ConfigDump) { setup(); - init_watcher_.expectReady().Times(1); + init_watcher_.expectReady(); context_init_manager_.initialize(init_watcher_); auto message_ptr = server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["route_scopes"](); @@ -782,15 +728,16 @@ stat_prefix: foo // Now SRDS kicks off. Protobuf::RepeatedPtrField resources; - resources.Add()->PackFrom(parseScopedRouteConfigurationFromYaml(R"EOF( + const auto resource = parseScopedRouteConfigurationFromYaml(R"EOF( name: dynamic-foo route_configuration_name: dynamic-foo-route-config key: fragments: { string_key: "172.30.30.10" } -)EOF")); +)EOF"); timeSystem().setSystemTime(std::chrono::milliseconds(1234567891567)); - srds_subscription_->onConfigUpdate(resources, "1"); + const auto decoded_resources = TestUtility::decodeResources({resource}); + srds_subscription_->onConfigUpdate(decoded_resources.refvec_, "1"); TestUtility::loadFromYaml(R"EOF( inline_scoped_route_configs: @@ -830,8 +777,7 @@ route_configuration_name: dynamic-foo-route-config *message_ptr); EXPECT_THAT(expected_config_dump, ProtoEq(scoped_routes_config_dump3)); - resources.Clear(); - srds_subscription_->onConfigUpdate(resources, "2"); + srds_subscription_->onConfigUpdate({}, "2"); TestUtility::loadFromYaml(R"EOF( inline_scoped_route_configs: - name: foo-scoped-routes @@ -881,6 +827,112 @@ route_configuration_name: static-foo-route-config ".*"); } +// Tests whether scope key conflict with updated scopes is ignored. +TEST_F(ScopedRdsTest, IgnoreConflictWithUpdatedScopeDelta) { + setup(); + const std::string config_yaml = R"EOF( +name: foo_scope +route_configuration_name: foo_routes +key: + fragments: + - string_key: x-foo-key +)EOF"; + const auto resource = parseScopedRouteConfigurationFromYaml(config_yaml); + const std::string config_yaml2 = R"EOF( +name: bar_scope +route_configuration_name: foo_routes +key: + fragments: + - string_key: x-bar-key +)EOF"; + const auto resource_2 = parseScopedRouteConfigurationFromYaml(config_yaml2); + + // Delta API. + const auto decoded_resources = TestUtility::decodeResources({resource, resource_2}); + context_init_manager_.initialize(init_watcher_); + EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources.refvec_, {}, "1")); + EXPECT_EQ(1UL, + server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") + .value()); + EXPECT_EQ(getScopedRouteMap().size(), 2); + + const std::string config_yaml3 = R"EOF( +name: bar_scope +route_configuration_name: foo_routes +key: + fragments: + - string_key: x-foo-key +)EOF"; + const auto resource_3 = parseScopedRouteConfigurationFromYaml(config_yaml); + const std::string config_yaml4 = R"EOF( +name: foo_scope +route_configuration_name: foo_routes +key: + fragments: + - string_key: x-bar-key +)EOF"; + const auto resource_4 = parseScopedRouteConfigurationFromYaml(config_yaml2); + const auto decoded_resources_2 = TestUtility::decodeResources({resource_3, resource_4}); + EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources_2.refvec_, {}, "2")); + EXPECT_EQ(2UL, + server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") + .value()); + EXPECT_EQ(getScopedRouteMap().size(), 2); +} + +// Tests whether scope key conflict with updated scopes is ignored. +TEST_F(ScopedRdsTest, IgnoreConflictWithUpdatedScopeSotW) { + setup(); + const std::string config_yaml = R"EOF( +name: foo_scope +route_configuration_name: foo_routes +key: + fragments: + - string_key: x-foo-key +)EOF"; + const auto resource = parseScopedRouteConfigurationFromYaml(config_yaml); + const std::string config_yaml2 = R"EOF( +name: bar_scope +route_configuration_name: foo_routes +key: + fragments: + - string_key: x-bar-key +)EOF"; + const auto resource_2 = parseScopedRouteConfigurationFromYaml(config_yaml2); + + // Delta API. + const auto decoded_resources = TestUtility::decodeResources({resource, resource_2}); + context_init_manager_.initialize(init_watcher_); + EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources.refvec_, "1")); + EXPECT_EQ(1UL, + server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") + .value()); + EXPECT_EQ(getScopedRouteMap().size(), 2); + + const std::string config_yaml3 = R"EOF( +name: bar_scope +route_configuration_name: foo_routes +key: + fragments: + - string_key: x-foo-key +)EOF"; + const auto resource_3 = parseScopedRouteConfigurationFromYaml(config_yaml); + const std::string config_yaml4 = R"EOF( +name: foo_scope +route_configuration_name: foo_routes +key: + fragments: + - string_key: x-bar-key +)EOF"; + const auto resource_4 = parseScopedRouteConfigurationFromYaml(config_yaml2); + const auto decoded_resources_2 = TestUtility::decodeResources({resource_3, resource_4}); + EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources_2.refvec_, "2")); + EXPECT_EQ(2UL, + server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") + .value()); + EXPECT_EQ(getScopedRouteMap().size(), 2); +} + } // namespace } // namespace Router } // namespace Envoy diff --git a/test/common/router/shadow_writer_impl_test.cc b/test/common/router/shadow_writer_impl_test.cc index d95ae08565b42..7121a9abe0276 100644 --- a/test/common/router/shadow_writer_impl_test.cc +++ b/test/common/router/shadow_writer_impl_test.cc @@ -33,7 +33,7 @@ class ShadowWriterImplTest : public testing::Test { [&](Http::RequestMessagePtr& inner_message, Http::AsyncClient::Callbacks& callbacks, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { EXPECT_EQ(message, inner_message); - EXPECT_EQ(shadowed_host, message->headers().Host()->value().getStringView()); + EXPECT_EQ(shadowed_host, message->headers().getHostValue()); callback_ = &callbacks; return &request_; })); diff --git a/test/common/router/upstream_request_test.cc b/test/common/router/upstream_request_test.cc new file mode 100644 index 0000000000000..72c48179290f2 --- /dev/null +++ b/test/common/router/upstream_request_test.cc @@ -0,0 +1,48 @@ +#include "common/router/upstream_request.h" + +#include "test/mocks/router/router_filter_interface.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::NiceMock; + +namespace Envoy { +namespace Router { +namespace { + +class UpstreamRequestTest : public testing::Test { +public: + NiceMock router_filter_interface_; + UpstreamRequest upstream_request_{router_filter_interface_, + std::make_unique>()}; +}; + +// UpstreamRequest is responsible processing for passing 101 upgrade headers to onUpstreamHeaders. +TEST_F(UpstreamRequestTest, Decode101UpgradeHeaders) { + auto upgrade_headers = std::make_unique( + Http::TestResponseHeaderMapImpl({{":status", "101"}})); + EXPECT_CALL(router_filter_interface_, onUpstreamHeaders(_, _, _, _)); + upstream_request_.decodeHeaders(std::move(upgrade_headers), false); +} + +// UpstreamRequest is responsible for ignoring non-{100,101} 1xx headers. +TEST_F(UpstreamRequestTest, IgnoreOther1xxHeaders) { + auto other_headers = std::make_unique( + Http::TestResponseHeaderMapImpl({{":status", "102"}})); + EXPECT_CALL(router_filter_interface_, onUpstreamHeaders(_, _, _, _)).Times(0); + upstream_request_.decodeHeaders(std::move(other_headers), false); +} + +// UpstreamRequest is responsible processing for passing 200 upgrade headers to onUpstreamHeaders. +TEST_F(UpstreamRequestTest, Decode200UpgradeHeaders) { + auto response_headers = std::make_unique( + Http::TestResponseHeaderMapImpl({{":status", "200"}})); + EXPECT_CALL(router_filter_interface_, onUpstreamHeaders(_, _, _, _)); + upstream_request_.decodeHeaders(std::move(response_headers), false); +} + +} // namespace +} // namespace Router +} // namespace Envoy diff --git a/test/common/router/vhds_test.cc b/test/common/router/vhds_test.cc index 97cf20b50bf97..f1abea4ac04ff 100644 --- a/test/common/router/vhds_test.cc +++ b/test/common/router/vhds_test.cc @@ -11,11 +11,11 @@ #include "common/protobuf/protobuf.h" #include "common/router/rds_impl.h" -#include "server/http/admin.h" +#include "server/admin/admin.h" #include "test/mocks/config/mocks.h" #include "test/mocks/init/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/mocks/thread_local/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/printers.h" @@ -76,8 +76,8 @@ name: my_route } RouteConfigUpdatePtr makeRouteConfigUpdate(const envoy::config::route::v3::RouteConfiguration& rc) { - RouteConfigUpdatePtr config_update_info = std::make_unique( - factory_context_.timeSource(), factory_context_.messageValidationVisitor()); + RouteConfigUpdatePtr config_update_info = + std::make_unique(factory_context_.timeSource()); config_update_info->onRdsUpdate(rc, "1"); return config_update_info; } @@ -86,7 +86,7 @@ name: my_route Init::ExpectableWatcherImpl init_watcher_; Init::TargetHandlePtr init_target_handle_; const std::string context_ = "vhds_test"; - std::unordered_set providers_; + absl::node_hash_set providers_; Protobuf::util::MessageDifferencer messageDifferencer_; std::string default_vhds_config_; NiceMock subscription_factory_; @@ -131,9 +131,11 @@ TEST_F(VhdsTest, VhdsAddsVirtualHosts) { auto vhost = buildVirtualHost("vhost1", "vhost.first"); const auto& added_resources = buildAddedResources({vhost}); + const auto decoded_resources = + TestUtility::decodeResources(added_resources); const Protobuf::RepeatedPtrField removed_resources; factory_context_.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( - added_resources, removed_resources, "1"); + decoded_resources.refvec_, removed_resources, "1"); EXPECT_EQ(1UL, config_update_info->routeConfiguration().virtual_hosts_size()); EXPECT_TRUE( @@ -189,9 +191,11 @@ name: my_route auto vhost = buildVirtualHost("vhost_vhds1", "vhost.first"); const auto& added_resources = buildAddedResources({vhost}); + const auto decoded_resources = + TestUtility::decodeResources(added_resources); const Protobuf::RepeatedPtrField removed_resources; factory_context_.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( - added_resources, removed_resources, "1"); + decoded_resources.refvec_, removed_resources, "1"); EXPECT_EQ(2UL, config_update_info->routeConfiguration().virtual_hosts_size()); config_update_info->onRdsUpdate(updated_route_config, "2"); @@ -208,30 +212,6 @@ name: my_route "vhost_vhds1" == actual_vhost_2.name()); } -// verify vhds validates VirtualHosts in added_resources -TEST_F(VhdsTest, VhdsValidatesAddedVirtualHosts) { - const auto route_config = - TestUtility::parseYaml(default_vhds_config_); - RouteConfigUpdatePtr config_update_info = makeRouteConfigUpdate(route_config); - - VhdsSubscription subscription(config_update_info, factory_context_, context_, providers_); - - auto vhost = TestUtility::parseYaml(R"EOF( - name: invalid_vhost - domains: [] - routes: - - match: { prefix: "/" } - route: { cluster: "my_service" } -)EOF"); - - const auto& added_resources = buildAddedResources({vhost}); - const Protobuf::RepeatedPtrField removed_resources; - - EXPECT_THROW(factory_context_.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( - added_resources, removed_resources, "1"), - ProtoValidationException); -} - } // namespace } // namespace Router } // namespace Envoy diff --git a/test/common/runtime/BUILD b/test/common/runtime/BUILD index 1df1476d41495..8cb4a424daa75 100644 --- a/test/common/runtime/BUILD +++ b/test/common/runtime/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() exports_files(["filesystem_setup.sh"]) @@ -29,8 +29,6 @@ envoy_cc_test_library( envoy_cc_test( name = "runtime_protos_test", srcs = ["runtime_protos_test.cc"], - # Pass for the time being, test times out on windows - tags = ["fails_on_windows"], deps = [ "//source/common/runtime:runtime_lib", "//test/mocks/runtime:runtime_mocks", @@ -44,9 +42,6 @@ envoy_cc_test( name = "runtime_impl_test", srcs = ["runtime_impl_test.cc"], data = glob(["test_data/**"]) + ["filesystem_setup.sh"], - # Inexplicable failure promoting arguments to mock, see - # https://envoyproxy.slack.com/archives/CNAK09BSB/p1571946165007300 - tags = ["fails_on_windows"], deps = [ "//source/common/config:runtime_utility_lib", "//source/common/runtime:runtime_lib", @@ -58,6 +53,7 @@ envoy_cc_test( "//test/mocks/local_info:local_info_mocks", "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/runtime:runtime_mocks", + "//test/mocks/server:server_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:environment_lib", @@ -74,6 +70,20 @@ envoy_cc_test( srcs = ["runtime_flag_override_test.cc"], args = [ "--runtime-feature-override-for-tests=envoy.reloadable_features.test_feature_false", + "--runtime-feature-disable-for-tests=envoy.reloadable_features.test_feature_true", + ], + coverage = False, + deps = [ + "//source/common/runtime:runtime_lib", + ], +) + +envoy_cc_test( + name = "runtime_flag_override_noop_test", + srcs = ["runtime_flag_override_noop_test.cc"], + args = [ + "--runtime-feature-override-for-tests=envoy.reloadable_features.test_feature_true", + "--runtime-feature-disable-for-tests=envoy.reloadable_features.test_feature_false", ], coverage = False, deps = [ diff --git a/test/common/runtime/filesystem_setup.sh b/test/common/runtime/filesystem_setup.sh index 39684619067a0..ef27243da8547 100755 --- a/test/common/runtime/filesystem_setup.sh +++ b/test/common/runtime/filesystem_setup.sh @@ -10,8 +10,9 @@ rm -rf "${TEST_TMPDIR}/${TEST_DATA}" mkdir -p "${TEST_TMPDIR}/${TEST_DATA}" cp -RfL "${TEST_DATA}"/* "${TEST_TMPDIR}/${TEST_DATA}" chmod -R u+rwX "${TEST_TMPDIR}/${TEST_DATA}" -ln -sf "${TEST_TMPDIR}/${TEST_DATA}/root" "${TEST_TMPDIR}/${TEST_DATA}/current" -ln -sf "${TEST_TMPDIR}/${TEST_DATA}/root/envoy/subdir" "${TEST_TMPDIR}/${TEST_DATA}/root/envoy/badlink" +# Verify text value is treated as a binary blob regardless of source line-ending settings +printf "hello\nworld" > "${TEST_TMPDIR}/${TEST_DATA}/root/envoy/file_lf" +printf "hello\r\nworld" > "${TEST_TMPDIR}/${TEST_DATA}/root/envoy/file_crlf" # Deliberate symlink of doom. LOOP_PATH="${TEST_TMPDIR}/${TEST_DATA}/loop" @@ -20,8 +21,13 @@ mkdir -p "${LOOP_PATH}" # the ln in MSYS2 doesn't handle recursive symlinks correctly, # so use the cmd built in mklink instead on Windows if [[ -z "${WINDIR}" ]]; then + ln -sf "${TEST_TMPDIR}/${TEST_DATA}/root" "${TEST_TMPDIR}/${TEST_DATA}/current" + ln -sf "${TEST_TMPDIR}/${TEST_DATA}/root/envoy/subdir" "${TEST_TMPDIR}/${TEST_DATA}/root/envoy/badlink" ln -sf "${LOOP_PATH}" "${LOOP_PATH}"/loop else + win_test_root="$(echo $TEST_TMPDIR/$TEST_DATA | tr '/' '\\')" + cmd.exe /C "mklink /D ${win_test_root}\\current ${win_test_root}\\root" + cmd.exe /C "mklink /D ${win_test_root}\\root\\envoy\\badlink ${win_test_root}\\root\\envoy\\subdir" win_loop_path="$(echo $LOOP_PATH | tr '/' '\\')" cmd.exe /C "mklink /D ${win_loop_path}\\loop ${win_loop_path}" fi diff --git a/test/common/runtime/runtime_flag_override_noop_test.cc b/test/common/runtime/runtime_flag_override_noop_test.cc new file mode 100644 index 0000000000000..ab19fac7293bf --- /dev/null +++ b/test/common/runtime/runtime_flag_override_noop_test.cc @@ -0,0 +1,24 @@ +#include "common/runtime/runtime_features.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Runtime { + +// Features not in runtime_features.cc are false by default (and this particular one is verified to +// be false in runtime_impl_test.cc). However in in the envoy_cc_test declaration, the flag is set +// "--runtime-feature-override-for-tests=envoy.reloadable_features.test_feature_false" +// to override the return value of runtimeFeatureEnabled to true. +TEST(RuntimeFlagOverrideNoopTest, OverridesNoop) { + EXPECT_FALSE(Runtime::runtimeFeatureEnabled("envoy.reloadable_features.test_feature_false")); +} + +// For features in runtime_features.cc that are true by default, this flag +// "--runtime-feature-override-for-tests=envoy.reloadable_features.test_feature_false" is set in the +// envoy_cc_test declaration to override the return value of runtimeFeatureEnabled to false. +TEST(RuntimeFlagOverrideNoopTest, OverrideDisableFeatureNoop) { + EXPECT_TRUE(Runtime::runtimeFeatureEnabled("envoy.reloadable_features.test_feature_true")); +} + +} // namespace Runtime +} // namespace Envoy diff --git a/test/common/runtime/runtime_flag_override_test.cc b/test/common/runtime/runtime_flag_override_test.cc index 37a1ab6444731..6d90407e38573 100644 --- a/test/common/runtime/runtime_flag_override_test.cc +++ b/test/common/runtime/runtime_flag_override_test.cc @@ -13,5 +13,12 @@ TEST(RuntimeFlagOverrideTest, OverridesWork) { EXPECT_TRUE(Runtime::runtimeFeatureEnabled("envoy.reloadable_features.test_feature_false")); } +// For features in runtime_features.cc that are true by default, this flag +// "--runtime-feature-override-for-tests=envoy.reloadable_features.test_feature_false" is set in the +// envoy_cc_test declaration to override the return value of runtimeFeatureEnabled to false. +TEST(RuntimeFlagOverrideTest, OverrideDisableFeatureWork) { + EXPECT_FALSE(Runtime::runtimeFeatureEnabled("envoy.reloadable_features.test_feature_true")); +} + } // namespace Runtime } // namespace Envoy diff --git a/test/common/runtime/runtime_impl_test.cc b/test/common/runtime/runtime_impl_test.cc index 4701f54128345..dad18c5d2bf80 100644 --- a/test/common/runtime/runtime_impl_test.cc +++ b/test/common/runtime/runtime_impl_test.cc @@ -12,12 +12,14 @@ #include "common/runtime/runtime_impl.h" #include "test/common/stats/stat_test_utility.h" +#include "test/mocks/common.h" #include "test/mocks/event/mocks.h" #include "test/mocks/filesystem/mocks.h" #include "test/mocks/init/mocks.h" #include "test/mocks/local_info/mocks.h" #include "test/mocks/protobuf/mocks.h" #include "test/mocks/runtime/mocks.h" +#include "test/mocks/server/mocks.h" #include "test/mocks/thread_local/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/environment.h" @@ -36,64 +38,6 @@ namespace Envoy { namespace Runtime { namespace { -TEST(Random, DISABLED_benchmarkRandom) { - Runtime::RandomGeneratorImpl random; - - for (size_t i = 0; i < 1000000000; ++i) { - random.random(); - } -} - -TEST(Random, SanityCheckOfUniquenessRandom) { - Runtime::RandomGeneratorImpl random; - std::set results; - const size_t num_of_results = 1000000; - - for (size_t i = 0; i < num_of_results; ++i) { - results.insert(random.random()); - } - - EXPECT_EQ(num_of_results, results.size()); -} - -TEST(Random, SanityCheckOfStdLibRandom) { - Runtime::RandomGeneratorImpl random; - - static const auto num_of_items = 100; - std::vector v(num_of_items); - std::iota(v.begin(), v.end(), 0); - - static const auto num_of_checks = 10000; - for (size_t i = 0; i < num_of_checks; ++i) { - const auto prev = v; - std::shuffle(v.begin(), v.end(), random); - EXPECT_EQ(v.size(), prev.size()); - EXPECT_NE(v, prev); - EXPECT_FALSE(std::is_sorted(v.begin(), v.end())); - } -} - -TEST(UUID, CheckLengthOfUUID) { - RandomGeneratorImpl random; - - std::string result = random.uuid(); - - size_t expected_length = 36; - EXPECT_EQ(expected_length, result.length()); -} - -TEST(UUID, SanityCheckOfUniqueness) { - std::set uuids; - const size_t num_of_uuids = 100000; - - RandomGeneratorImpl random; - for (size_t i = 0; i < num_of_uuids; ++i) { - uuids.insert(random.uuid()); - } - - EXPECT_EQ(num_of_uuids, uuids.size()); -} - class LoaderImplTest : public testing::Test { protected: LoaderImplTest() : api_(Api::createApiForTest(store_)) { local_info_.node_.set_cluster(""); } @@ -114,7 +58,7 @@ class LoaderImplTest : public testing::Test { Event::MockDispatcher dispatcher_; NiceMock tls_; Stats::TestUtil::TestStore store_; - MockRandomGenerator generator_; + Random::MockRandomGenerator generator_; std::unique_ptr loader_; Api::ApiPtr api_; Upstream::MockClusterManager cm_; @@ -196,12 +140,14 @@ TEST_F(DiskLoaderImplTest, All) { // Basic string getting. EXPECT_EQ("world", loader_->snapshot().get("file2").value().get()); - EXPECT_EQ("hello\nworld", loader_->snapshot().get("subdir.file3").value().get()); + EXPECT_EQ("hello", loader_->snapshot().get("subdir.file").value().get()); + EXPECT_EQ("hello\nworld", loader_->snapshot().get("file_lf").value().get()); + EXPECT_EQ("hello\r\nworld", loader_->snapshot().get("file_crlf").value().get()); EXPECT_FALSE(loader_->snapshot().get("invalid").has_value()); // Existence checking. EXPECT_EQ(true, loader_->snapshot().get("file2").has_value()); - EXPECT_EQ(true, loader_->snapshot().get("subdir.file3").has_value()); + EXPECT_EQ(true, loader_->snapshot().get("subdir.file").has_value()); EXPECT_EQ(false, loader_->snapshot().get("invalid").has_value()); // Integer getting. @@ -311,7 +257,7 @@ TEST_F(DiskLoaderImplTest, All) { EXPECT_EQ(0, store_.counter("runtime.load_error").value()); EXPECT_EQ(1, store_.counter("runtime.load_success").value()); - EXPECT_EQ(23, store_.gauge("runtime.num_keys", Stats::Gauge::ImportMode::NeverImport).value()); + EXPECT_EQ(25, store_.gauge("runtime.num_keys", Stats::Gauge::ImportMode::NeverImport).value()); EXPECT_EQ(4, store_.gauge("runtime.num_layers", Stats::Gauge::ImportMode::NeverImport).value()); } @@ -612,7 +558,7 @@ TEST_F(StaticLoaderImplTest, ProtoParsing) { file12: FaLSe file13: false subdir: - file3: "hello\nworld" + file: "hello" numerator_only: numerator: 52 denominator_only: @@ -623,6 +569,8 @@ TEST_F(StaticLoaderImplTest, ProtoParsing) { empty: {} file_with_words: "some words" file_with_double: 23.2 + file_lf: "hello\nworld" + file_crlf: "hello\r\nworld" bool_as_int0: 0 bool_as_int1: 1 )EOF"); @@ -630,7 +578,9 @@ TEST_F(StaticLoaderImplTest, ProtoParsing) { // Basic string getting. EXPECT_EQ("world", loader_->snapshot().get("file2").value().get()); - EXPECT_EQ("hello\nworld", loader_->snapshot().get("subdir.file3").value().get()); + EXPECT_EQ("hello", loader_->snapshot().get("subdir.file").value().get()); + EXPECT_EQ("hello\nworld", loader_->snapshot().get("file_lf").value().get()); + EXPECT_EQ("hello\r\nworld", loader_->snapshot().get("file_crlf").value().get()); EXPECT_FALSE(loader_->snapshot().get("invalid").has_value()); // Integer getting. @@ -730,7 +680,7 @@ TEST_F(StaticLoaderImplTest, ProtoParsing) { EXPECT_EQ(0, store_.counter("runtime.load_error").value()); EXPECT_EQ(1, store_.counter("runtime.load_success").value()); - EXPECT_EQ(19, store_.gauge("runtime.num_keys", Stats::Gauge::ImportMode::NeverImport).value()); + EXPECT_EQ(21, store_.gauge("runtime.num_keys", Stats::Gauge::ImportMode::NeverImport).value()); EXPECT_EQ(2, store_.gauge("runtime.num_layers", Stats::Gauge::ImportMode::NeverImport).value()); } @@ -809,10 +759,11 @@ class DiskLayerTest : public testing::Test { TEST_F(DiskLayerTest, IllegalPath) { #ifdef WIN32 - // no illegal paths on Windows at the moment - return; -#endif + EXPECT_THROW_WITH_MESSAGE(DiskLayer("test", R"EOF(\\.\)EOF", *api_), EnvoyException, + R"EOF(Invalid path: \\.\)EOF"); +#else EXPECT_THROW_WITH_MESSAGE(DiskLayer("test", "/dev", *api_), EnvoyException, "Invalid path: /dev"); +#endif } // Validate that we catch recursion that goes too deep in the runtime filesystem @@ -863,10 +814,11 @@ class RtdsLoaderImplTest : public LoaderImplTest { rtds_layer->mutable_rtds_config(); } EXPECT_CALL(cm_, subscriptionFactory()).Times(layers_.size()); - ON_CALL(cm_.subscription_factory_, subscriptionFromConfigSource(_, _, _, _)) - .WillByDefault(testing::Invoke( - [this](const envoy::config::core::v3::ConfigSource&, absl::string_view, Stats::Scope&, - Config::SubscriptionCallbacks& callbacks) -> Config::SubscriptionPtr { + ON_CALL(cm_.subscription_factory_, subscriptionFromConfigSource(_, _, _, _, _)) + .WillByDefault( + testing::Invoke([this](const envoy::config::core::v3::ConfigSource&, absl::string_view, + Stats::Scope&, Config::SubscriptionCallbacks& callbacks, + Config::OpaqueResourceDecoder&) -> Config::SubscriptionPtr { auto ret = std::make_unique>(); rtds_subscriptions_.push_back(ret.get()); rtds_callbacks_.push_back(&callbacks); @@ -899,17 +851,14 @@ class RtdsLoaderImplTest : public LoaderImplTest { void doOnConfigUpdateVerifyNoThrow(const envoy::service::runtime::v3::Runtime& runtime, uint32_t callback_index = 0) { - Protobuf::RepeatedPtrField resources; - resources.Add()->PackFrom(runtime); - VERBOSE_EXPECT_NO_THROW(rtds_callbacks_[callback_index]->onConfigUpdate(resources, "")); + const auto decoded_resources = TestUtility::decodeResources({runtime}); + VERBOSE_EXPECT_NO_THROW( + rtds_callbacks_[callback_index]->onConfigUpdate(decoded_resources.refvec_, "")); } void doDeltaOnConfigUpdateVerifyNoThrow(const envoy::service::runtime::v3::Runtime& runtime) { - Protobuf::RepeatedPtrField resources; - auto* resource = resources.Add(); - resource->mutable_resource()->PackFrom(runtime); - resource->set_version(""); - VERBOSE_EXPECT_NO_THROW(rtds_callbacks_[0]->onConfigUpdate(resources, {}, "")); + const auto decoded_resources = TestUtility::decodeResources({runtime}); + VERBOSE_EXPECT_NO_THROW(rtds_callbacks_[0]->onConfigUpdate(decoded_resources.refvec_, {}, "")); } std::vector layers_{"some_resource"}; @@ -922,10 +871,8 @@ class RtdsLoaderImplTest : public LoaderImplTest { TEST_F(RtdsLoaderImplTest, UnexpectedSizeEmpty) { setup(); - Protobuf::RepeatedPtrField runtimes; - EXPECT_CALL(rtds_init_callback_, Call()); - EXPECT_THROW_WITH_MESSAGE(rtds_callbacks_[0]->onConfigUpdate(runtimes, ""), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(rtds_callbacks_[0]->onConfigUpdate({}, ""), EnvoyException, "Unexpected RTDS resource length: 0"); EXPECT_EQ(0, store_.counter("runtime.load_error").value()); @@ -938,13 +885,12 @@ TEST_F(RtdsLoaderImplTest, UnexpectedSizeEmpty) { TEST_F(RtdsLoaderImplTest, UnexpectedSizeTooMany) { setup(); - Protobuf::RepeatedPtrField runtimes; - runtimes.Add(); - runtimes.Add(); + const envoy::service::runtime::v3::Runtime runtime; + const auto decoded_resources = TestUtility::decodeResources({runtime, runtime}); EXPECT_CALL(rtds_init_callback_, Call()); - EXPECT_THROW_WITH_MESSAGE(rtds_callbacks_[0]->onConfigUpdate(runtimes, ""), EnvoyException, - "Unexpected RTDS resource length: 2"); + EXPECT_THROW_WITH_MESSAGE(rtds_callbacks_[0]->onConfigUpdate(decoded_resources.refvec_, ""), + EnvoyException, "Unexpected RTDS resource length: 2"); EXPECT_EQ(0, store_.counter("runtime.load_error").value()); EXPECT_EQ(1, store_.counter("runtime.load_success").value()); @@ -977,9 +923,9 @@ TEST_F(RtdsLoaderImplTest, WrongResourceName) { foo: bar baz: meh )EOF"); - Protobuf::RepeatedPtrField resources; - resources.Add()->PackFrom(runtime); - EXPECT_THROW_WITH_MESSAGE(rtds_callbacks_[0]->onConfigUpdate(resources, ""), EnvoyException, + const auto decoded_resources = TestUtility::decodeResources({runtime}); + EXPECT_THROW_WITH_MESSAGE(rtds_callbacks_[0]->onConfigUpdate(decoded_resources.refvec_, ""), + EnvoyException, "Unexpected RTDS runtime (expecting some_resource): other_resource"); EXPECT_EQ("whatevs", loader_->snapshot().get("foo").value().get()); @@ -1116,6 +1062,28 @@ TEST_F(RtdsLoaderImplTest, MultipleRtdsLayers) { EXPECT_EQ(3, store_.gauge("runtime.num_layers", Stats::Gauge::ImportMode::NeverImport).value()); } +TEST_F(RtdsLoaderImplTest, BadConfigSource) { + Upstream::MockClusterManager cm_; + EXPECT_CALL(cm_.subscription_factory_, subscriptionFromConfigSource(_, _, _, _, _)) + .WillOnce(InvokeWithoutArgs([]() -> Config::SubscriptionPtr { + throw EnvoyException("bad config"); + return nullptr; + })); + + envoy::config::bootstrap::v3::LayeredRuntime config; + auto* layer = config.add_layers(); + layer->set_name("some_other_resource"); + auto* rtds_layer = layer->mutable_rtds_layer(); + rtds_layer->set_name("some_resource"); + rtds_layer->mutable_rtds_config(); + + EXPECT_CALL(cm_, subscriptionFactory()).Times(1); + LoaderImpl loader(dispatcher_, tls_, config, local_info_, store_, generator_, validation_visitor_, + *api_); + + EXPECT_THROW_WITH_MESSAGE(loader.initialize(cm_), EnvoyException, "bad config"); +} + } // namespace } // namespace Runtime } // namespace Envoy diff --git a/test/common/runtime/test_data/root/envoy/subdir/file3 b/test/common/runtime/test_data/root/envoy/subdir/file similarity index 50% rename from test/common/runtime/test_data/root/envoy/subdir/file3 rename to test/common/runtime/test_data/root/envoy/subdir/file index 94954abda49de..ce013625030ba 100644 --- a/test/common/runtime/test_data/root/envoy/subdir/file3 +++ b/test/common/runtime/test_data/root/envoy/subdir/file @@ -1,2 +1 @@ hello -world diff --git a/test/common/runtime/utility.h b/test/common/runtime/utility.h index e442e8940e009..ba1bb9f1b3981 100644 --- a/test/common/runtime/utility.h +++ b/test/common/runtime/utility.h @@ -8,14 +8,21 @@ namespace Runtime { class RuntimeFeaturesPeer { public: - static bool addFeature(const std::string& feature) { + static bool enableFeature(const std::string& feature) { + // Remove from disabled features and add to enabled features. + const_cast(&Runtime::RuntimeFeaturesDefaults::get()) + ->disabled_features_.erase(feature); return const_cast(&Runtime::RuntimeFeaturesDefaults::get()) ->enabled_features_.insert(feature) .second; } - static void removeFeature(const std::string& feature) { + static bool disableFeature(const std::string& feature) { + // Remove from enabled features and add to disabled features. const_cast(&Runtime::RuntimeFeaturesDefaults::get()) ->enabled_features_.erase(feature); + return const_cast(&Runtime::RuntimeFeaturesDefaults::get()) + ->disabled_features_.insert(feature) + .second; } }; diff --git a/test/common/secret/BUILD b/test/common/secret/BUILD index d0d72d1b22ff1..48572641a39b2 100644 --- a/test/common/secret/BUILD +++ b/test/common/secret/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( @@ -19,7 +19,9 @@ envoy_cc_test( "//source/common/secret:secret_manager_impl_lib", "//source/common/ssl:certificate_validation_context_config_impl_lib", "//source/common/ssl:tls_certificate_config_impl_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:config_tracker_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/server:transport_socket_factory_context_mocks", "//test/test_common:environment_lib", "//test/test_common:registry_lib", "//test/test_common:simulated_time_system_lib", @@ -45,7 +47,7 @@ envoy_cc_test( "//test/mocks/init:init_mocks", "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/secret:secret_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:environment_lib", "//test/test_common:registry_lib", "//test/test_common:utility_lib", diff --git a/test/common/secret/sds_api_test.cc b/test/common/secret/sds_api_test.cc index ec1c6ee5acc8b..bec6c41a0a479 100644 --- a/test/common/secret/sds_api_test.cc +++ b/test/common/secret/sds_api_test.cc @@ -15,7 +15,7 @@ #include "test/mocks/init/mocks.h" #include "test/mocks/protobuf/mocks.h" #include "test/mocks/secret/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/test_common/environment.h" #include "test/test_common/utility.h" @@ -24,6 +24,7 @@ using ::testing::_; using ::testing::Invoke; +using ::testing::InvokeWithoutArgs; namespace Envoy { namespace Secret { @@ -32,14 +33,15 @@ namespace { class SdsApiTest : public testing::Test { protected: SdsApiTest() - : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher("test_thread")) { + : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher("test_thread")) {} + + void initialize() { init_target_handle_->initialize(init_watcher_); } + void setupMocks() { EXPECT_CALL(init_manager_, add(_)).WillOnce(Invoke([this](const Init::Target& target) { init_target_handle_ = target.createHandle("test"); })); } - void initialize() { init_target_handle_->initialize(init_watcher_); } - Api::ApiPtr api_; NiceMock validation_visitor_; NiceMock subscription_factory_; @@ -57,17 +59,37 @@ TEST_F(SdsApiTest, BasicTest) { NiceMock server; envoy::config::core::v3::ConfigSource config_source; + setupMocks(); TlsCertificateSdsApi sds_api( config_source, "abc.com", subscription_factory_, time_system_, validation_visitor_, server.stats(), init_manager_, []() {}, *dispatcher_, *api_); initialize(); } +// Validate that bad ConfigSources are caught at construction time. This is a regression test for +// https://github.com/envoyproxy/envoy/issues/10976. +TEST_F(SdsApiTest, BadConfigSource) { + ::testing::InSequence s; + NiceMock server; + envoy::config::core::v3::ConfigSource config_source; + EXPECT_CALL(subscription_factory_, subscriptionFromConfigSource(_, _, _, _, _)) + .WillOnce(InvokeWithoutArgs([]() -> Config::SubscriptionPtr { + throw EnvoyException("bad config"); + return nullptr; + })); + EXPECT_THROW_WITH_MESSAGE(TlsCertificateSdsApi( + config_source, "abc.com", subscription_factory_, time_system_, + validation_visitor_, server.stats(), init_manager_, []() {}, + *dispatcher_, *api_), + EnvoyException, "bad config"); +} + // Validate that TlsCertificateSdsApi updates secrets successfully if a good secret // is passed to onConfigUpdate(). TEST_F(SdsApiTest, DynamicTlsCertificateUpdateSuccess) { NiceMock server; envoy::config::core::v3::ConfigSource config_source; + setupMocks(); TlsCertificateSdsApi sds_api( config_source, "abc.com", subscription_factory_, time_system_, validation_visitor_, server.stats(), init_manager_, []() {}, *dispatcher_, *api_); @@ -87,11 +109,10 @@ TEST_F(SdsApiTest, DynamicTlsCertificateUpdateSuccess) { )EOF"; envoy::extensions::transport_sockets::tls::v3::Secret typed_secret; TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret); - Protobuf::RepeatedPtrField secret_resources; - secret_resources.Add()->PackFrom(typed_secret); + const auto decoded_resources = TestUtility::decodeResources({typed_secret}); EXPECT_CALL(secret_callback, onAddOrUpdateSecret()); - subscription_factory_.callbacks_->onConfigUpdate(secret_resources, ""); + subscription_factory_.callbacks_->onConfigUpdate(decoded_resources.refvec_, ""); Ssl::TlsCertificateConfigImpl tls_config(*sds_api.secret(), nullptr, *api_); const std::string cert_pem = @@ -118,11 +139,10 @@ class PartialMockSds : public SdsApi { server.stats(), init_manager, []() {}, dispatcher, api) {} MOCK_METHOD(void, onConfigUpdate, - (const Protobuf::RepeatedPtrField&, const std::string&)); - void - onConfigUpdate(const Protobuf::RepeatedPtrField& added, - const Protobuf::RepeatedPtrField& removed, - const std::string& version) override { + (const std::vector&, const std::string&)); + void onConfigUpdate(const std::vector& added, + const Protobuf::RepeatedPtrField& removed, + const std::string& version) override { SdsApi::onConfigUpdate(added, removed, version); } void setSecret(const envoy::extensions::transport_sockets::tls::v3::Secret&) override {} @@ -135,39 +155,38 @@ class PartialMockSds : public SdsApi { // Basic test of delta's passthrough call to the state-of-the-world variant, to // increase coverage. TEST_F(SdsApiTest, Delta) { - Protobuf::RepeatedPtrField resources; - envoy::extensions::transport_sockets::tls::v3::Secret secret; - secret.set_name("secret_1"); - auto* resource = resources.Add(); - resource->mutable_resource()->PackFrom(secret); - resource->set_name("secret_1"); - resource->set_version("version1"); - - Protobuf::RepeatedPtrField for_matching; - for_matching.Add()->PackFrom(secret); + auto secret = std::make_unique(); + secret->set_name("secret_1"); + Config::DecodedResourceImpl resource(std::move(secret), "name", {}, "version1"); + std::vector resources{resource}; NiceMock server; envoy::config::core::v3::ConfigSource config_source; Event::GlobalTimeSystem time_system; + setupMocks(); PartialMockSds sds(server, init_manager_, config_source, subscription_factory_, time_system, *dispatcher_, *api_); initialize(); - EXPECT_CALL(sds, onConfigUpdate(RepeatedProtoEq(for_matching), "version1")); + EXPECT_CALL(sds, onConfigUpdate(DecodedResourcesEq(resources), "version1")); subscription_factory_.callbacks_->onConfigUpdate(resources, {}, "ignored"); // An attempt to remove a resource logs an error, but otherwise just carries on (ignoring the // removal attempt). - resource->set_version("version2"); - EXPECT_CALL(sds, onConfigUpdate(RepeatedProtoEq(for_matching), "version2")); + auto secret_again = std::make_unique(); + secret_again->set_name("secret_1"); + Config::DecodedResourceImpl resource_v2(std::move(secret_again), "name", {}, "version2"); + std::vector resources_v2{resource_v2}; + EXPECT_CALL(sds, onConfigUpdate(DecodedResourcesEq(resources_v2), "version2")); Protobuf::RepeatedPtrField removals; *removals.Add() = "route_0"; - subscription_factory_.callbacks_->onConfigUpdate(resources, removals, "ignored"); + subscription_factory_.callbacks_->onConfigUpdate(resources_v2, removals, "ignored"); } // Tests SDS's use of the delta variant of onConfigUpdate(). TEST_F(SdsApiTest, DeltaUpdateSuccess) { NiceMock server; envoy::config::core::v3::ConfigSource config_source; + setupMocks(); TlsCertificateSdsApi sds_api( config_source, "abc.com", subscription_factory_, time_system_, validation_visitor_, server.stats(), init_manager_, []() {}, *dispatcher_, *api_); @@ -187,12 +206,11 @@ TEST_F(SdsApiTest, DeltaUpdateSuccess) { )EOF"; envoy::extensions::transport_sockets::tls::v3::Secret typed_secret; TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret); - Protobuf::RepeatedPtrField secret_resources; - secret_resources.Add()->mutable_resource()->PackFrom(typed_secret); + const auto decoded_resources = TestUtility::decodeResources({typed_secret}); EXPECT_CALL(secret_callback, onAddOrUpdateSecret()); initialize(); - subscription_factory_.callbacks_->onConfigUpdate(secret_resources, {}, ""); + subscription_factory_.callbacks_->onConfigUpdate(decoded_resources.refvec_, {}, ""); Ssl::TlsCertificateConfigImpl tls_config(*sds_api.secret(), nullptr, *api_); const std::string cert_pem = @@ -213,6 +231,7 @@ TEST_F(SdsApiTest, DeltaUpdateSuccess) { TEST_F(SdsApiTest, DynamicCertificateValidationContextUpdateSuccess) { NiceMock server; envoy::config::core::v3::ConfigSource config_source; + setupMocks(); CertificateValidationContextSdsApi sds_api( config_source, "abc.com", subscription_factory_, time_system_, validation_visitor_, server.stats(), init_manager_, []() {}, *dispatcher_, *api_); @@ -231,11 +250,10 @@ TEST_F(SdsApiTest, DynamicCertificateValidationContextUpdateSuccess) { envoy::extensions::transport_sockets::tls::v3::Secret typed_secret; TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret); - Protobuf::RepeatedPtrField secret_resources; - secret_resources.Add()->PackFrom(typed_secret); + const auto decoded_resources = TestUtility::decodeResources({typed_secret}); EXPECT_CALL(secret_callback, onAddOrUpdateSecret()); initialize(); - subscription_factory_.callbacks_->onConfigUpdate(secret_resources, ""); + subscription_factory_.callbacks_->onConfigUpdate(decoded_resources.refvec_, ""); Ssl::CertificateValidationContextConfigImpl cvc_config(*sds_api.secret(), *api_); const std::string ca_cert = @@ -267,6 +285,7 @@ class MockCvcValidationCallback : public CvcValidationCallback { TEST_F(SdsApiTest, DefaultCertificateValidationContextTest) { NiceMock server; envoy::config::core::v3::ConfigSource config_source; + setupMocks(); CertificateValidationContextSdsApi sds_api( config_source, "abc.com", subscription_factory_, time_system_, validation_visitor_, server.stats(), init_manager_, []() {}, *dispatcher_, *api_); @@ -294,10 +313,9 @@ TEST_F(SdsApiTest, DefaultCertificateValidationContextTest) { EXPECT_CALL(secret_callback, onAddOrUpdateSecret()); EXPECT_CALL(validation_callback, validateCvc(_)); - Protobuf::RepeatedPtrField secret_resources; - secret_resources.Add()->PackFrom(typed_secret); + const auto decoded_resources = TestUtility::decodeResources({typed_secret}); initialize(); - subscription_factory_.callbacks_->onConfigUpdate(secret_resources, ""); + subscription_factory_.callbacks_->onConfigUpdate(decoded_resources.refvec_, ""); const std::string default_verify_certificate_hash = "0000000000000000000000000000000000000000000000000000000000000000"; @@ -355,6 +373,7 @@ class MockGenericSecretValidationCallback : public GenericSecretValidationCallba TEST_F(SdsApiTest, GenericSecretSdsApiTest) { NiceMock server; envoy::config::core::v3::ConfigSource config_source; + setupMocks(); GenericSecretSdsApi sds_api( config_source, "encryption_key", subscription_factory_, time_system_, validation_visitor_, server.stats(), init_manager_, []() {}, *dispatcher_, *api_); @@ -378,12 +397,11 @@ name: "encryption_key" )EOF"; envoy::extensions::transport_sockets::tls::v3::Secret typed_secret; TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret); - Protobuf::RepeatedPtrField secret_resources; - secret_resources.Add()->PackFrom(typed_secret); + const auto decoded_resources = TestUtility::decodeResources({typed_secret}); EXPECT_CALL(secret_callback, onAddOrUpdateSecret()); EXPECT_CALL(validation_callback, validateGenericSecret(_)); initialize(); - subscription_factory_.callbacks_->onConfigUpdate(secret_resources, ""); + subscription_factory_.callbacks_->onConfigUpdate(decoded_resources.refvec_, ""); const envoy::extensions::transport_sockets::tls::v3::GenericSecret generic_secret( *sds_api.secret()); @@ -400,14 +418,13 @@ name: "encryption_key" TEST_F(SdsApiTest, EmptyResource) { NiceMock server; envoy::config::core::v3::ConfigSource config_source; + setupMocks(); TlsCertificateSdsApi sds_api( config_source, "abc.com", subscription_factory_, time_system_, validation_visitor_, server.stats(), init_manager_, []() {}, *dispatcher_, *api_); - Protobuf::RepeatedPtrField secret_resources; - initialize(); - EXPECT_THROW_WITH_MESSAGE(subscription_factory_.callbacks_->onConfigUpdate(secret_resources, ""), + EXPECT_THROW_WITH_MESSAGE(subscription_factory_.callbacks_->onConfigUpdate({}, ""), EnvoyException, "Missing SDS resources for abc.com in onConfigUpdate()"); } @@ -416,6 +433,7 @@ TEST_F(SdsApiTest, EmptyResource) { TEST_F(SdsApiTest, SecretUpdateWrongSize) { NiceMock server; envoy::config::core::v3::ConfigSource config_source; + setupMocks(); TlsCertificateSdsApi sds_api( config_source, "abc.com", subscription_factory_, time_system_, validation_visitor_, server.stats(), init_manager_, []() {}, *dispatcher_, *api_); @@ -432,13 +450,12 @@ TEST_F(SdsApiTest, SecretUpdateWrongSize) { envoy::extensions::transport_sockets::tls::v3::Secret typed_secret; TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret); - Protobuf::RepeatedPtrField secret_resources; - secret_resources.Add()->PackFrom(typed_secret); - secret_resources.Add()->PackFrom(typed_secret); + const auto decoded_resources = TestUtility::decodeResources({typed_secret, typed_secret}); initialize(); - EXPECT_THROW_WITH_MESSAGE(subscription_factory_.callbacks_->onConfigUpdate(secret_resources, ""), - EnvoyException, "Unexpected SDS secrets length: 2"); + EXPECT_THROW_WITH_MESSAGE( + subscription_factory_.callbacks_->onConfigUpdate(decoded_resources.refvec_, ""), + EnvoyException, "Unexpected SDS secrets length: 2"); } // Validate that SdsApi throws exception if secret name passed to onConfigUpdate() @@ -446,6 +463,7 @@ TEST_F(SdsApiTest, SecretUpdateWrongSize) { TEST_F(SdsApiTest, SecretUpdateWrongSecretName) { NiceMock server; envoy::config::core::v3::ConfigSource config_source; + setupMocks(); TlsCertificateSdsApi sds_api( config_source, "abc.com", subscription_factory_, time_system_, validation_visitor_, server.stats(), init_manager_, []() {}, *dispatcher_, *api_); @@ -462,13 +480,12 @@ TEST_F(SdsApiTest, SecretUpdateWrongSecretName) { envoy::extensions::transport_sockets::tls::v3::Secret typed_secret; TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret); - Protobuf::RepeatedPtrField secret_resources; - secret_resources.Add()->PackFrom(typed_secret); + const auto decoded_resources = TestUtility::decodeResources({typed_secret}); initialize(); - EXPECT_THROW_WITH_MESSAGE(subscription_factory_.callbacks_->onConfigUpdate(secret_resources, ""), - EnvoyException, - "Unexpected SDS secret (expecting abc.com): wrong.name.com"); + EXPECT_THROW_WITH_MESSAGE( + subscription_factory_.callbacks_->onConfigUpdate(decoded_resources.refvec_, ""), + EnvoyException, "Unexpected SDS secret (expecting abc.com): wrong.name.com"); } } // namespace diff --git a/test/common/secret/secret_manager_impl_test.cc b/test/common/secret/secret_manager_impl_test.cc index 4ee5fe1088062..58304e1a1106e 100644 --- a/test/common/secret/secret_manager_impl_test.cc +++ b/test/common/secret/secret_manager_impl_test.cc @@ -15,7 +15,9 @@ #include "common/ssl/tls_certificate_config_impl.h" #include "test/mocks/event/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/config_tracker.h" +#include "test/mocks/server/instance.h" +#include "test/mocks/server/transport_socket_factory_context.h" #include "test/test_common/environment.h" #include "test/test_common/simulated_time_system.h" #include "test/test_common/utility.h" @@ -41,7 +43,8 @@ class SecretManagerImplTest : public testing::Test, public Logger::Loggable(*message_ptr); envoy::admin::v3::SecretsConfigDump expected_secrets_config_dump; TestUtility::loadFromYaml(expected_dump_yaml, expected_secrets_config_dump); - EXPECT_EQ(expected_secrets_config_dump.DebugString(), secrets_config_dump.DebugString()); + EXPECT_THAT(secrets_config_dump, + ProtoEqIgnoreRepeatedFieldOrdering(expected_secrets_config_dump)); } void setupSecretProviderContext() {} @@ -257,7 +260,7 @@ TEST_F(SecretManagerImplTest, DeduplicateDynamicTlsCertificateSecretProvider) { NiceMock local_info; NiceMock dispatcher; - NiceMock random; + NiceMock random; Stats::IsolatedStoreImpl stats; NiceMock init_manager; NiceMock init_watcher; @@ -267,7 +270,7 @@ TEST_F(SecretManagerImplTest, DeduplicateDynamicTlsCertificateSecretProvider) { init_target_handle = target.createHandle("test"); })); EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); - EXPECT_CALL(secret_context, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); @@ -340,7 +343,7 @@ TEST_F(SecretManagerImplTest, SdsDynamicSecretUpdateSuccess) { envoy::config::core::v3::ConfigSource config_source; NiceMock local_info; - NiceMock random; + NiceMock random; Stats::IsolatedStoreImpl stats; NiceMock init_manager; NiceMock init_watcher; @@ -350,7 +353,7 @@ TEST_F(SecretManagerImplTest, SdsDynamicSecretUpdateSuccess) { init_target_handle = target.createHandle("test"); })); EXPECT_CALL(secret_context, stats()).WillOnce(ReturnRef(stats)); - EXPECT_CALL(secret_context, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(*dispatcher_)); EXPECT_CALL(secret_context, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(secret_context, api()).WillRepeatedly(ReturnRef(*api_)); @@ -368,11 +371,10 @@ name: "abc.com" )EOF"; envoy::extensions::transport_sockets::tls::v3::Secret typed_secret; TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret); - Protobuf::RepeatedPtrField secret_resources; - secret_resources.Add()->PackFrom(typed_secret); + const auto decoded_resources = TestUtility::decodeResources({typed_secret}); init_target_handle->initialize(init_watcher); - secret_context.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate(secret_resources, - ""); + secret_context.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( + decoded_resources.refvec_, ""); Ssl::TlsCertificateConfigImpl tls_config(*secret_provider->secret(), nullptr, *api_); const std::string cert_pem = "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem"; @@ -400,7 +402,7 @@ TEST_F(SecretManagerImplTest, SdsDynamicGenericSecret) { EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(*dispatcher_)); EXPECT_CALL(secret_context, messageValidationVisitor()).WillOnce(ReturnRef(validation_visitor)); EXPECT_CALL(secret_context, stats()).WillOnce(ReturnRef(stats)); - EXPECT_CALL(secret_context, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(secret_context, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(secret_context, api()).WillRepeatedly(ReturnRef(*api_)); EXPECT_CALL(init_manager, add(_)) @@ -419,11 +421,10 @@ name: "encryption_key" )EOF"; envoy::extensions::transport_sockets::tls::v3::Secret typed_secret; TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret); - Protobuf::RepeatedPtrField secret_resources; - secret_resources.Add()->PackFrom(typed_secret); + const auto decoded_resources = TestUtility::decodeResources({typed_secret}); init_target_handle->initialize(init_watcher); - secret_context.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate(secret_resources, - ""); + secret_context.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( + decoded_resources.refvec_, ""); const envoy::extensions::transport_sockets::tls::v3::GenericSecret generic_secret( *secret_provider->secret()); @@ -440,7 +441,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandler) { envoy::config::core::v3::ConfigSource config_source; NiceMock local_info; NiceMock dispatcher; - NiceMock random; + NiceMock random; Stats::IsolatedStoreImpl stats; NiceMock init_manager; NiceMock init_watcher; @@ -450,7 +451,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandler) { init_target_handle = target.createHandle("test"); })); EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); - EXPECT_CALL(secret_context, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); @@ -469,11 +470,10 @@ name: "abc.com" )EOF"; envoy::extensions::transport_sockets::tls::v3::Secret typed_secret; TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret); - Protobuf::RepeatedPtrField secret_resources; - secret_resources.Add()->PackFrom(typed_secret); + const auto decoded_resources = TestUtility::decodeResources({typed_secret}); init_target_handle->initialize(init_watcher); - secret_context.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate(secret_resources, - "keycert-v1"); + secret_context.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( + decoded_resources.refvec_, "keycert-v1"); Ssl::TlsCertificateConfigImpl tls_config(*secret_provider->secret(), nullptr, *api_); EXPECT_EQ("DUMMY_INLINE_BYTES_FOR_CERT_CHAIN", tls_config.certificateChain()); EXPECT_EQ("DUMMY_INLINE_BYTES_FOR_PRIVATE_KEY", tls_config.privateKey()); @@ -511,12 +511,11 @@ name: "abc.com.validation" inline_string: "DUMMY_INLINE_STRING_TRUSTED_CA" )EOF"; TestUtility::loadFromYaml(TestEnvironment::substitute(validation_yaml), typed_secret); - secret_resources.Clear(); - secret_resources.Add()->PackFrom(typed_secret); + const auto decoded_resources_2 = TestUtility::decodeResources({typed_secret}); init_target_handle->initialize(init_watcher); secret_context.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( - secret_resources, "validation-context-v1"); + decoded_resources_2.refvec_, "validation-context-v1"); Ssl::CertificateValidationContextConfigImpl cert_validation_context( *context_secret_provider->secret(), *api_); EXPECT_EQ("DUMMY_INLINE_STRING_TRUSTED_CA", cert_validation_context.caCert()); @@ -563,12 +562,11 @@ name: "abc.com.stek" - inline_bytes: "RFVNTVlfSU5MSU5FX0JZVEVT" )EOF"; TestUtility::loadFromYaml(TestEnvironment::substitute(stek_yaml), typed_secret); - secret_resources.Clear(); - secret_resources.Add()->PackFrom(typed_secret); + const auto decoded_resources_3 = TestUtility::decodeResources({typed_secret}); init_target_handle->initialize(init_watcher); secret_context.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( - secret_resources, "stek-context-v1"); + decoded_resources_3.refvec_, "stek-context-v1"); EXPECT_EQ(stek_secret_provider->secret()->keys()[1].inline_string(), "DUMMY_INLINE_STRING"); const std::string updated_once_more_config_dump = R"EOF( @@ -625,11 +623,10 @@ name: "signing_key" inline_string: "DUMMY_ECDSA_KEY" )EOF"; TestUtility::loadFromYaml(TestEnvironment::substitute(generic_secret_yaml), typed_secret); - secret_resources.Clear(); - secret_resources.Add()->PackFrom(typed_secret); + const auto decoded_resources_4 = TestUtility::decodeResources({typed_secret}); init_target_handle->initialize(init_watcher); secret_context.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( - secret_resources, "signing-key-v1"); + decoded_resources_4.refvec_, "signing-key-v1"); const envoy::extensions::transport_sockets::tls::v3::GenericSecret generic_secret( *generic_secret_provider->secret()); @@ -698,7 +695,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandlerWarmingSecrets) { envoy::config::core::v3::ConfigSource config_source; NiceMock local_info; NiceMock dispatcher; - NiceMock random; + NiceMock random; Stats::IsolatedStoreImpl stats; NiceMock init_manager; NiceMock init_watcher; @@ -708,7 +705,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandlerWarmingSecrets) { init_target_handle = target.createHandle("test"); })); EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); - EXPECT_CALL(secret_context, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); @@ -831,7 +828,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandlerStaticSecrets) { envoy::config::core::v3::ConfigSource config_source; NiceMock local_info; NiceMock dispatcher; - NiceMock random; + NiceMock random; Stats::IsolatedStoreImpl stats; NiceMock init_manager; NiceMock init_watcher; @@ -841,7 +838,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandlerStaticSecrets) { init_target_handle = target.createHandle("test"); })); EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); - EXPECT_CALL(secret_context, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); @@ -903,7 +900,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandlerStaticValidationContext) { envoy::config::core::v3::ConfigSource config_source; NiceMock local_info; NiceMock dispatcher; - NiceMock random; + NiceMock random; Stats::IsolatedStoreImpl stats; NiceMock init_manager; NiceMock init_watcher; @@ -913,7 +910,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandlerStaticValidationContext) { init_target_handle = target.createHandle("test"); })); EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); - EXPECT_CALL(secret_context, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); @@ -948,7 +945,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandlerStaticSessionTicketsContext) { envoy::config::core::v3::ConfigSource config_source; NiceMock local_info; NiceMock dispatcher; - NiceMock random; + NiceMock random; Stats::IsolatedStoreImpl stats; NiceMock init_manager; NiceMock init_watcher; @@ -958,7 +955,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandlerStaticSessionTicketsContext) { init_target_handle = target.createHandle("test"); })); EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); - EXPECT_CALL(secret_context, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); diff --git a/test/common/shared_pool/BUILD b/test/common/shared_pool/BUILD index 9a9a641f380a9..f4eaecda80fa2 100644 --- a/test/common/shared_pool/BUILD +++ b/test/common/shared_pool/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/signal/BUILD b/test/common/signal/BUILD index 746babe0fedfb..c3f9cf5df8434 100644 --- a/test/common/signal/BUILD +++ b/test/common/signal/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( @@ -17,7 +17,9 @@ envoy_cc_test( "skip_on_windows", ], deps = [ + "//source/common/signal:fatal_error_handler_lib", "//source/common/signal:sigaction_lib", + "//test/common/stats:stat_test_utility_lib", "//test/test_common:utility_lib", ], ) diff --git a/test/common/signal/signals_test.cc b/test/common/signal/signals_test.cc index 98753f047d0fa..cc66d32d81d0e 100644 --- a/test/common/signal/signals_test.cc +++ b/test/common/signal/signals_test.cc @@ -2,8 +2,10 @@ #include +#include "common/signal/fatal_error_handler.h" #include "common/signal/signal_action.h" +#include "test/common/stats/stat_test_utility.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" @@ -19,6 +21,12 @@ namespace Envoy { #define ASANITIZED /* Sanitized by GCC */ #endif +// Use this test handler instead of a mock, because fatal error handlers must be +// signal-safe and a mock might allocate memory. +class TestFatalErrorHandler : public FatalErrorHandlerInterface { + void onFatalError(std::ostream& os) const override { os << "HERE!"; } +}; + // Death tests that expect a particular output are disabled under address sanitizer. // The sanitizer does its own special signal handling and prints messages that are // not ours instead of what this test expects. As of latest Clang this appears @@ -26,37 +34,33 @@ namespace Envoy { #ifndef ASANITIZED TEST(SignalsDeathTest, InvalidAddressDeathTest) { SignalAction actions; - EXPECT_DEATH_LOG_TO_STDERR( + EXPECT_DEATH( []() -> void { // Oops! volatile int* nasty_ptr = reinterpret_cast(0x0); - *(nasty_ptr) = 0; + *(nasty_ptr) = 0; // NOLINT(clang-analyzer-core.NullDereference) }(), "backtrace.*Segmentation fault"); } -class TestFatalErrorHandler : public FatalErrorHandlerInterface { - void onFatalError() const override { std::cerr << "HERE!"; } -}; - TEST(SignalsDeathTest, RegisteredHandlerTest) { TestFatalErrorHandler handler; - SignalAction::registerFatalErrorHandler(handler); + FatalErrorHandler::registerFatalErrorHandler(handler); SignalAction actions; // Make sure the fatal error log "HERE" registered above is logged on fatal error. - EXPECT_DEATH_LOG_TO_STDERR( + EXPECT_DEATH( []() -> void { // Oops! volatile int* nasty_ptr = reinterpret_cast(0x0); - *(nasty_ptr) = 0; + *(nasty_ptr) = 0; // NOLINT(clang-analyzer-core.NullDereference) }(), "HERE"); - SignalAction::removeFatalErrorHandler(handler); + FatalErrorHandler::removeFatalErrorHandler(handler); } TEST(SignalsDeathTest, BusDeathTest) { SignalAction actions; - EXPECT_DEATH_LOG_TO_STDERR( + EXPECT_DEATH( []() -> void { // Bus error is tricky. There's one way that can work on POSIX systems // described below but it depends on mmaping a file. Just make it easy and @@ -72,7 +76,7 @@ TEST(SignalsDeathTest, BusDeathTest) { TEST(SignalsDeathTest, BadMathDeathTest) { SignalAction actions; - EXPECT_DEATH_LOG_TO_STDERR( + EXPECT_DEATH( []() -> void { // It turns out to be really hard to not have the optimizer get rid of a // division by zero. Just raise the signal for this test. @@ -85,7 +89,7 @@ TEST(SignalsDeathTest, BadMathDeathTest) { // Unfortunately we don't have a reliable way to do this on other platforms TEST(SignalsDeathTest, IllegalInstructionDeathTest) { SignalAction actions; - EXPECT_DEATH_LOG_TO_STDERR( + EXPECT_DEATH( []() -> void { // Intel defines the "ud2" opcode to be an invalid instruction: __asm__("ud2"); @@ -96,7 +100,7 @@ TEST(SignalsDeathTest, IllegalInstructionDeathTest) { TEST(SignalsDeathTest, AbortDeathTest) { SignalAction actions; - EXPECT_DEATH_LOG_TO_STDERR([]() -> void { abort(); }(), "backtrace.*Abort(ed)?"); + EXPECT_DEATH([]() -> void { abort(); }(), "backtrace.*Abort(ed)?"); } TEST(SignalsDeathTest, RestoredPreviousHandlerDeathTest) { @@ -108,7 +112,7 @@ TEST(SignalsDeathTest, RestoredPreviousHandlerDeathTest) { // goes out of scope, NOT the default. } // Outer SignalAction should be active again: - EXPECT_DEATH_LOG_TO_STDERR([]() -> void { abort(); }(), "backtrace.*Abort(ed)?"); + EXPECT_DEATH([]() -> void { abort(); }(), "backtrace.*Abort(ed)?"); } #endif @@ -145,4 +149,60 @@ TEST(Signals, HandlerTest) { SignalAction::sigHandler(SIGURG, &fake_si, nullptr); } +TEST(FatalErrorHandler, CallHandler) { + // Reserve space in advance so that the handler doesn't allocate memory. + std::string s; + s.reserve(1024); + std::ostringstream os(std::move(s)); + + TestFatalErrorHandler handler; + FatalErrorHandler::registerFatalErrorHandler(handler); + + FatalErrorHandler::callFatalErrorHandlers(os); + EXPECT_EQ(os.str(), "HERE!"); + + // callFatalErrorHandlers() will unregister the handler, so this isn't + // necessary for cleanup. Call it anyway, to simulate the case when one thread + // tries to remove the handler while another thread crashes. + FatalErrorHandler::removeFatalErrorHandler(handler); +} + +// Use this specialized test handler instead of a mock, because fatal error +// handlers must be signal-safe and a mock might allocate memory. +class MemoryCheckingFatalErrorHandler : public FatalErrorHandlerInterface { +public: + MemoryCheckingFatalErrorHandler(const Stats::TestUtil::MemoryTest& memory_test, + uint64_t& allocated_after_call) + : memory_test_(memory_test), allocated_after_call_(allocated_after_call) {} + void onFatalError(std::ostream& os) const override { + UNREFERENCED_PARAMETER(os); + allocated_after_call_ = memory_test_.consumedBytes(); + } + +private: + const Stats::TestUtil::MemoryTest& memory_test_; + uint64_t& allocated_after_call_; +}; + +// FatalErrorHandler::callFatalErrorHandlers shouldn't allocate any heap memory, +// so that it's safe to call from a signal handler. Test by comparing the +// allocated memory before a call with the allocated memory during a handler. +TEST(FatalErrorHandler, DontAllocateMemory) { + // Reserve space in advance so that the handler doesn't allocate memory. + std::string s; + s.reserve(1024); + std::ostringstream os(std::move(s)); + + Stats::TestUtil::MemoryTest memory_test; + + uint64_t allocated_after_call; + MemoryCheckingFatalErrorHandler handler(memory_test, allocated_after_call); + FatalErrorHandler::registerFatalErrorHandler(handler); + + uint64_t allocated_before_call = memory_test.consumedBytes(); + FatalErrorHandler::callFatalErrorHandlers(os); + + EXPECT_MEMORY_EQ(allocated_after_call, allocated_before_call); +} + } // namespace Envoy diff --git a/test/common/singleton/BUILD b/test/common/singleton/BUILD index 8076c8bafbe52..cd5e582a5ed82 100644 --- a/test/common/singleton/BUILD +++ b/test/common/singleton/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/singleton/manager_impl_test.cc b/test/common/singleton/manager_impl_test.cc index 1a1f4c671a1cc..aa5796ae79c1b 100644 --- a/test/common/singleton/manager_impl_test.cc +++ b/test/common/singleton/manager_impl_test.cc @@ -18,8 +18,7 @@ static void deathTestWorker() { } TEST(SingletonManagerImplDeathTest, NotRegistered) { - EXPECT_DEATH_LOG_TO_STDERR(deathTestWorker(), - "invalid singleton name 'foo'. Make sure it is registered."); + EXPECT_DEATH(deathTestWorker(), "invalid singleton name 'foo'. Make sure it is registered."); } SINGLETON_MANAGER_REGISTRATION(test); diff --git a/test/common/stats/BUILD b/test/common/stats/BUILD index ad8e7885cd1cb..a01eb1a1ae260 100644 --- a/test/common/stats/BUILD +++ b/test/common/stats/BUILD @@ -1,7 +1,7 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", + "envoy_benchmark_test", + "envoy_cc_benchmark_binary", "envoy_cc_fuzz_test", "envoy_cc_test", "envoy_cc_test_binary", @@ -9,6 +9,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( @@ -31,6 +33,15 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "histogram_impl_test", + srcs = ["histogram_impl_test.cc"], + deps = [ + "//source/common/stats:histogram_lib", + "@envoy_api//envoy/config/metrics/v3:pkg_cc_proto", + ], +) + envoy_cc_test( name = "metric_impl_test", srcs = ["metric_impl_test.cc"], @@ -157,15 +168,27 @@ envoy_cc_fuzz_test( ":stat_test_utility_lib", "//source/common/buffer:buffer_lib", "//source/common/common:assert_lib", - "//source/common/decompressor:decompressor_lib", "//source/common/stats:symbol_table_lib", "//test/fuzz:utility_lib", ], ) +envoy_cc_fuzz_test( + name = "utility_fuzz_test", + srcs = ["utility_fuzz_test.cc"], + corpus = "utility_corpus", + deps = [ + "//source/common/stats:utility_lib", + ], +) + envoy_cc_test_binary( name = "symbol_table_speed_test", - srcs = ["symbol_table_speed_test.cc"], + srcs = [ + "make_elements_helper.cc", + "make_elements_helper.h", + "symbol_table_speed_test.cc", + ], external_deps = [ "abseil_strings", "benchmark", @@ -173,7 +196,9 @@ envoy_cc_test_binary( deps = [ ":stat_test_utility_lib", "//source/common/memory:stats_lib", + "//source/common/stats:isolated_store_lib", "//source/common/stats:symbol_table_lib", + "//source/common/stats:utility_lib", "//test/mocks/stats:stats_mocks", "//test/test_common:logging_lib", "//test/test_common:utility_lib", @@ -212,7 +237,7 @@ envoy_cc_test( "//source/common/stats:thread_local_store_lib", "//source/common/thread_local:thread_local_lib", "//test/mocks/event:event_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/stats:stats_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/test_common:logging_lib", @@ -222,7 +247,7 @@ envoy_cc_test( ], ) -envoy_cc_test_binary( +envoy_cc_benchmark_binary( name = "thread_local_store_speed_test", srcs = ["thread_local_store_speed_test.cc"], external_deps = [ @@ -241,3 +266,18 @@ envoy_cc_test_binary( "@envoy_api//envoy/config/metrics/v3:pkg_cc_proto", ], ) + +envoy_benchmark_test( + name = "thread_local_store_speed_test_benchmark_test", + benchmark_binary = "thread_local_store_speed_test", +) + +envoy_cc_test( + name = "utility_test", + srcs = ["utility_test.cc"], + deps = [ + "//source/common/stats:isolated_store_lib", + "//source/common/stats:thread_local_store_lib", + "//source/common/stats:utility_lib", + ], +) diff --git a/test/common/stats/histogram_impl_test.cc b/test/common/stats/histogram_impl_test.cc new file mode 100644 index 0000000000000..085e3d9a5a1ad --- /dev/null +++ b/test/common/stats/histogram_impl_test.cc @@ -0,0 +1,99 @@ +#include "envoy/config/metrics/v3/stats.pb.h" + +#include "common/stats/histogram_impl.h" + +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Stats { + +class HistogramSettingsImplTest : public testing::Test { +public: + void initialize() { + envoy::config::metrics::v3::StatsConfig config; + auto& bucket_settings = *config.mutable_histogram_bucket_settings(); + for (auto& item : buckets_configs_) { + bucket_settings.Add(std::move(item)); + } + settings_ = std::make_unique(config); + } + + std::vector buckets_configs_; + std::unique_ptr settings_; +}; + +// Test that a matching stat returns the configured buckets, and a non-matching +// stat returns the defaults. +TEST_F(HistogramSettingsImplTest, Basic) { + envoy::config::metrics::v3::HistogramBucketSettings setting; + setting.mutable_match()->set_prefix("a"); + setting.mutable_buckets()->Add(0.1); + setting.mutable_buckets()->Add(2); + buckets_configs_.push_back(setting); + + initialize(); + EXPECT_EQ(settings_->buckets("test"), settings_->defaultBuckets()); + EXPECT_EQ(settings_->buckets("abcd"), ConstSupportedBuckets({0.1, 2})); +} + +// Test that buckets are correctly sorted. +TEST_F(HistogramSettingsImplTest, Sorted) { + envoy::config::metrics::v3::HistogramBucketSettings setting; + setting.mutable_match()->set_exact("a"); + setting.mutable_buckets()->Add(0.1); + setting.mutable_buckets()->Add(2); + setting.mutable_buckets()->Add(1); // Out-of-order + buckets_configs_.push_back(setting); + + initialize(); + EXPECT_EQ(settings_->buckets("a"), ConstSupportedBuckets({0.1, 1, 2})); +} + +// Test that only matching configurations are applied. +TEST_F(HistogramSettingsImplTest, Matching) { + { + envoy::config::metrics::v3::HistogramBucketSettings setting; + setting.mutable_match()->set_prefix("a"); + setting.mutable_buckets()->Add(1); + setting.mutable_buckets()->Add(2); + buckets_configs_.push_back(setting); + } + + { + envoy::config::metrics::v3::HistogramBucketSettings setting; + setting.mutable_match()->set_prefix("b"); + setting.mutable_buckets()->Add(3); + setting.mutable_buckets()->Add(4); + buckets_configs_.push_back(setting); + } + + initialize(); + EXPECT_EQ(settings_->buckets("abcd"), ConstSupportedBuckets({1, 2})); + EXPECT_EQ(settings_->buckets("bcde"), ConstSupportedBuckets({3, 4})); +} + +// Test that earlier configs take precedence over later configs when both match. +TEST_F(HistogramSettingsImplTest, Priority) { + { + envoy::config::metrics::v3::HistogramBucketSettings setting; + setting.mutable_match()->set_prefix("a"); + setting.mutable_buckets()->Add(1); + setting.mutable_buckets()->Add(2); + buckets_configs_.push_back(setting); + } + + { + envoy::config::metrics::v3::HistogramBucketSettings setting; + setting.mutable_match()->set_prefix("ab"); + setting.mutable_buckets()->Add(3); + setting.mutable_buckets()->Add(4); + } + + initialize(); + EXPECT_EQ(settings_->buckets("abcd"), ConstSupportedBuckets({1, 2})); +} + +} // namespace Stats +} // namespace Envoy diff --git a/test/common/stats/make_elements_helper.cc b/test/common/stats/make_elements_helper.cc new file mode 100644 index 0000000000000..d7d4538248040 --- /dev/null +++ b/test/common/stats/make_elements_helper.cc @@ -0,0 +1,15 @@ +#include "common/stats/utility.h" + +namespace Envoy { +namespace Stats { + +ElementVec makeElements(Element a, Element b, Element c, Element d, Element e) { + return ElementVec{a, b, c, d, e}; +} + +StatNameVec makeStatNames(StatName a, StatName b, StatName c, StatName d, StatName e) { + return StatNameVec{a, b, c, d, e}; +} + +} // namespace Stats +} // namespace Envoy diff --git a/test/common/stats/make_elements_helper.h b/test/common/stats/make_elements_helper.h new file mode 100644 index 0000000000000..218fa6f7aec6d --- /dev/null +++ b/test/common/stats/make_elements_helper.h @@ -0,0 +1,15 @@ +#pragma once + +#include "common/stats/utility.h" + +namespace Envoy { +namespace Stats { + +// These two trivial functions are broken out into a separate compilation unit +// to make sure the optimizer cannot hoist vector-creation out of the loop. They +// simply create vectors based on their 5 inputs. +ElementVec makeElements(Element a, Element b, Element c, Element d, Element e); +StatNameVec makeStatNames(StatName a, StatName b, StatName c, StatName d, StatName e); + +} // namespace Stats +} // namespace Envoy diff --git a/test/common/stats/recent_lookups_speed_test.cc b/test/common/stats/recent_lookups_speed_test.cc index e3ac80b2ad396..9af07ad4ef936 100644 --- a/test/common/stats/recent_lookups_speed_test.cc +++ b/test/common/stats/recent_lookups_speed_test.cc @@ -20,6 +20,7 @@ // BM_LookupsNoEvictions 45662 ns 45662 ns 15329 // BM_LookupsAllEvictions 83015 ns 83015 ns 8435 +#include "common/common/random_generator.h" #include "common/runtime/runtime_impl.h" #include "common/stats/recent_lookups.h" @@ -30,7 +31,7 @@ class RecentLookupsSpeedTest { public: RecentLookupsSpeedTest(uint64_t lookup_variants, uint64_t capacity) { recent_lookups_.setCapacity(capacity); - Envoy::Runtime::RandomGeneratorImpl random; + Envoy::Random::RandomGeneratorImpl random; lookups_.reserve(lookup_variants); for (size_t i = 0; i < lookup_variants; ++i) { lookups_.push_back(absl::StrCat("lookup #", random.random())); @@ -39,7 +40,7 @@ class RecentLookupsSpeedTest { void test(benchmark::State& state) { for (auto _ : state) { - Envoy::Runtime::RandomGeneratorImpl random; + Envoy::Random::RandomGeneratorImpl random; for (uint64_t i = 0; i < lookups_.size(); ++i) { recent_lookups_.lookup(lookups_[random.random() % lookups_.size()]); } diff --git a/test/common/stats/stat_merger_corpus/clusterfuzz-testcase-minimized-stat_merger_fuzz_test-4800677542100992.fuzz b/test/common/stats/stat_merger_corpus/clusterfuzz-testcase-minimized-stat_merger_fuzz_test-4800677542100992.fuzz new file mode 100644 index 0000000000000..77b5c6fdaf93b --- /dev/null +++ b/test/common/stats/stat_merger_corpus/clusterfuzz-testcase-minimized-stat_merger_fuzz_test-4800677542100992.fuzz @@ -0,0 +1 @@ +aVa.b \ No newline at end of file diff --git a/test/common/stats/stat_merger_fuzz_test.cc b/test/common/stats/stat_merger_fuzz_test.cc index 44077aa82e240..70579f3786767 100644 --- a/test/common/stats/stat_merger_fuzz_test.cc +++ b/test/common/stats/stat_merger_fuzz_test.cc @@ -15,7 +15,7 @@ namespace Fuzz { void testDynamicEncoding(absl::string_view data, SymbolTable& symbol_table) { StatNameDynamicPool dynamic_pool(symbol_table); StatNamePool symbolic_pool(symbol_table); - std::vector stat_names; + StatNameVec stat_names; // This local string is write-only; it's used to help when debugging // a crash. If a crash is found, you can print the unit_test_encoding @@ -31,7 +31,7 @@ void testDynamicEncoding(absl::string_view data, SymbolTable& symbol_table) { // TODO(#10008): We should remove the "1 +" below, so we can get empty // segments, which trigger some inconsistent handling as described in that // bug. - uint32_t num_bytes = 1 + data[index] & 0x7; + uint32_t num_bytes = (1 + data[index]) & 0x7; num_bytes = std::min(static_cast(data.size() - 1), num_bytes); // restrict number up to the size of data @@ -58,8 +58,10 @@ void testDynamicEncoding(absl::string_view data, SymbolTable& symbol_table) { StatMerger::DynamicContext dynamic_context(symbol_table); std::string name = symbol_table.toString(stat_name); StatMerger::DynamicsMap dynamic_map; - dynamic_map[name] = symbol_table.getDynamicSpans(stat_name); - + DynamicSpans spans = symbol_table.getDynamicSpans(stat_name); + if (!spans.empty()) { + dynamic_map[name] = spans; + } StatName decoded = dynamic_context.makeDynamicStatName(name, dynamic_map); FUZZ_ASSERT(name == symbol_table.toString(decoded)); FUZZ_ASSERT(stat_name == decoded); diff --git a/test/common/stats/stat_merger_test.cc b/test/common/stats/stat_merger_test.cc index bb47651bcdc93..ee8b5bf65ae83 100644 --- a/test/common/stats/stat_merger_test.cc +++ b/test/common/stats/stat_merger_test.cc @@ -34,7 +34,7 @@ class StatMergerTest : public testing::Test { // Encode the input name into a joined StatName, using "D:" to indicate // a dynamic component. - std::vector components; + StatNameVec components; StatNamePool symbolic_pool(symbol_table); StatNameDynamicPool dynamic_pool(symbol_table); @@ -233,7 +233,7 @@ class StatMergerDynamicTest : public testing::Test { uint32_t dynamicEncodeDecodeTest(absl::string_view input_descriptor) { // Encode the input name into a joined StatName, using "D:" to indicate // a dynamic component. - std::vector components; + StatNameVec components; StatNamePool symbolic_pool(*symbol_table_); StatNameDynamicPool dynamic_pool(*symbol_table_); @@ -328,6 +328,7 @@ TEST_F(StatMergerDynamicTest, DynamicsWithFakeSymbolTable) { EXPECT_EQ(0, dynamicEncodeDecodeTest("hello..D:world")); EXPECT_EQ(0, dynamicEncodeDecodeTest("D:hello..D:world")); EXPECT_EQ(0, dynamicEncodeDecodeTest("D:hello.D:.D:world")); + EXPECT_EQ(0, dynamicEncodeDecodeTest("aV.D:,b")); // TODO(#10008): these tests fail because fake/real symbol tables // deal with empty components differently. @@ -399,9 +400,10 @@ TEST_F(StatMergerThreadLocalTest, RetainImportModeAfterMerge) { Protobuf::Map gauges; gauges["mygauge"] = 789; stat_merger.mergeStats(counter_deltas, gauges); + EXPECT_EQ(789 + 42, gauge.value()); } + EXPECT_EQ(42, gauge.value()); EXPECT_EQ(Gauge::ImportMode::Accumulate, gauge.importMode()); - EXPECT_EQ(789 + 42, gauge.value()); } // Verify that if we create a never import stat in the child process which then gets merged diff --git a/test/common/stats/stat_test_utility.cc b/test/common/stats/stat_test_utility.cc index a195614e56821..7cdbc08ab4dc5 100644 --- a/test/common/stats/stat_test_utility.cc +++ b/test/common/stats/stat_test_utility.cc @@ -124,14 +124,14 @@ MemoryTest::Mode MemoryTest::mode() { "$ENVOY_MEMORY_TEST_EXACT is set for canonical memory measurements, " "but memory measurement looks broken"); return Mode::Canonical; - } else { - // Different versions of STL and other compiler/architecture differences may - // also impact memory usage, so when not compiling with MEMORY_TEST_EXACT, - // memory comparisons must be given some slack. There have recently emerged - // some memory-allocation differences between development and Envoy CI and - // Bazel CI (which compiles Envoy as a test of Bazel). - return can_measure_memory ? Mode::Approximate : Mode::Disabled; } + + // Different versions of STL and other compiler/architecture differences may + // also impact memory usage, so when not compiling with MEMORY_TEST_EXACT, + // memory comparisons must be given some slack. There have recently emerged + // some memory-allocation differences between development and Envoy CI and + // Bazel CI (which compiles Envoy as a test of Bazel). + return can_measure_memory ? Mode::Approximate : Mode::Disabled; #endif } @@ -152,7 +152,8 @@ Counter& TestStore::counterFromStatNameWithTags(const StatName& stat_name, } else { // Ensures StatNames with the same string representation are specified // consistently using symbolic/dynamic components on every access. - ASSERT(counter_ref->statName() == stat_name); + ASSERT(counter_ref->statName() == stat_name, "Inconsistent dynamic vs symbolic " + "stat name specification"); } return *counter_ref; } @@ -173,7 +174,8 @@ Gauge& TestStore::gaugeFromStatNameWithTags(const StatName& stat_name, if (gauge_ref == nullptr) { gauge_ref = &IsolatedStoreImpl::gaugeFromStatNameWithTags(stat_name, tags, mode); } else { - ASSERT(gauge_ref->statName() == stat_name); + ASSERT(gauge_ref->statName() == stat_name, "Inconsistent dynamic vs symbolic " + "stat name specification"); } return *gauge_ref; } @@ -194,15 +196,19 @@ Histogram& TestStore::histogramFromStatNameWithTags(const StatName& stat_name, if (histogram_ref == nullptr) { histogram_ref = &IsolatedStoreImpl::histogramFromStatNameWithTags(stat_name, tags, unit); } else { - ASSERT(histogram_ref->statName() == stat_name); + ASSERT(histogram_ref->statName() == stat_name, "Inconsistent dynamic vs symbolic " + "stat name specification"); } return *histogram_ref; } template -static absl::optional> +using StatTypeOptConstRef = absl::optional>; + +template +static StatTypeOptConstRef findByString(const std::string& name, const absl::flat_hash_map& map) { - absl::optional> ret; + StatTypeOptConstRef ret; auto iter = map.find(name); if (iter != map.end()) { ret = *iter->second; diff --git a/test/common/stats/stat_test_utility.h b/test/common/stats/stat_test_utility.h index b0ccd31763b5e..6b46a0f05aeae 100644 --- a/test/common/stats/stat_test_utility.h +++ b/test/common/stats/stat_test_utility.h @@ -102,6 +102,7 @@ class TestStore : public IsolatedStoreImpl { Histogram& histogram(const std::string& name, Histogram::Unit unit) { return histogramFromString(name, unit); } + TextReadout& textReadout(const std::string& name) { return textReadoutFromString(name); } // Override the Stats::Store methods for name-based lookup of stats, to use // and update the string-maps in this class. Note that IsolatedStoreImpl diff --git a/test/common/stats/symbol_table_impl_test.cc b/test/common/stats/symbol_table_impl_test.cc index b7def5f190390..5913b47b4be6b 100644 --- a/test/common/stats/symbol_table_impl_test.cc +++ b/test/common/stats/symbol_table_impl_test.cc @@ -142,21 +142,35 @@ TEST_P(StatNameTest, TestEmpty) { } TEST_P(StatNameTest, TestDynamic100k) { - // Tests 100k different sizes of dynamic stat, covering all kinds of - // corner cases of spilling over into multi-byte lengths. + // Tests a variety different sizes of dynamic stat ranging to 500k, covering + // potential corner cases of spilling over into multi-byte lengths. + std::string stat_str("dyn.x"); + char ch = '\001'; + StatName ab = makeStat("a.b"); + StatName cd = makeStat("c.d"); + auto test_at_size = [this, &stat_str, &ch, ab, cd](uint32_t size) { + if (size > stat_str.size()) { + // Add rotating characters to stat_str until we hit size. + for (uint32_t i = stat_str.size(); i < size; ++i, ++ch) { + stat_str += (ch == '.') ? 'x' : ch; + } + StatNameDynamicStorage storage(stat_str, *table_); + StatName dynamic = storage.statName(); + EXPECT_EQ(stat_str, table_->toString(dynamic)); + SymbolTable::StoragePtr joined = table_->join({ab, dynamic, cd}); + EXPECT_EQ(absl::StrCat("a.b.", stat_str, ".c.d"), table_->toString(StatName(joined.get()))); + } + }; - std::string stat_str("dynamic_stat.x"); - for (int i = 0; i < 100 * 1000; ++i) { - char ch = i % 256; - if (ch == '.') { - ch = 'x'; + // The outer-loop hits powers of 2 from 8 to 512k. + for (uint32_t i = 3; i < 20; ++i) { + int32_t pow_2 = 1 << i; + + // The inner-loop covers every offset from the power of 2, between offsets of + // -10 and +10. + for (int32_t j = std::max(0, pow_2 - 10); j < pow_2 + 10; ++j) { + test_at_size(j); } - stat_str += ch; - StatNameDynamicStorage storage(stat_str, *table_); - StatName dynamic = storage.statName(); - EXPECT_EQ(stat_str, table_->toString(dynamic)); - SymbolTable::StoragePtr joined = table_->join({makeStat("a.b"), dynamic, makeStat("c.d")}); - EXPECT_EQ(absl::StrCat("a.b.", stat_str, ".c.d"), table_->toString(StatName(joined.get()))); } } diff --git a/test/common/stats/symbol_table_speed_test.cc b/test/common/stats/symbol_table_speed_test.cc index 1ef4d3f636849..a32f0d8c05b13 100644 --- a/test/common/stats/symbol_table_speed_test.cc +++ b/test/common/stats/symbol_table_speed_test.cc @@ -5,13 +5,17 @@ #include "common/common/logger.h" #include "common/common/thread.h" +#include "common/stats/isolated_store_impl.h" #include "common/stats/symbol_table_impl.h" +#include "common/stats/utility.h" +#include "test/common/stats/make_elements_helper.h" #include "test/test_common/utility.h" #include "absl/synchronization/blocking_counter.h" #include "benchmark/benchmark.h" +// NOLINTNEXTLINE(readability-identifier-naming) static void BM_CreateRace(benchmark::State& state) { Envoy::Thread::ThreadFactory& thread_factory = Envoy::Thread::threadFactoryForTest(); @@ -54,6 +58,38 @@ static void BM_CreateRace(benchmark::State& state) { } BENCHMARK(BM_CreateRace); +// NOLINTNEXTLINE(readability-identifier-naming) +static void BM_JoinStatNames(benchmark::State& state) { + Envoy::Stats::SymbolTableImpl symbol_table; + Envoy::Stats::IsolatedStoreImpl store(symbol_table); + Envoy::Stats::StatNamePool pool(symbol_table); + Envoy::Stats::StatName a = pool.add("a"); + Envoy::Stats::StatName b = pool.add("b"); + Envoy::Stats::StatName c = pool.add("c"); + Envoy::Stats::StatName d = pool.add("d"); + Envoy::Stats::StatName e = pool.add("e"); + for (auto _ : state) { + Envoy::Stats::Utility::counterFromStatNames(store, Envoy::Stats::makeStatNames(a, b, c, d, e)); + } +} +BENCHMARK(BM_JoinStatNames); + +// NOLINTNEXTLINE(readability-identifier-naming) +static void BM_JoinElements(benchmark::State& state) { + Envoy::Stats::SymbolTableImpl symbol_table; + Envoy::Stats::IsolatedStoreImpl store(symbol_table); + Envoy::Stats::StatNamePool pool(symbol_table); + Envoy::Stats::StatName a = pool.add("a"); + Envoy::Stats::StatName b = pool.add("b"); + Envoy::Stats::StatName c = pool.add("c"); + Envoy::Stats::StatName e = pool.add("e"); + for (auto _ : state) { + Envoy::Stats::Utility::counterFromElements( + store, Envoy::Stats::makeElements(a, b, c, Envoy::Stats::DynamicName("d"), e)); + } +} +BENCHMARK(BM_JoinElements); + int main(int argc, char** argv) { Envoy::Thread::MutexBasicLockable lock; Envoy::Logger::Context logger_context(spdlog::level::warn, diff --git a/test/common/stats/tag_extractor_impl_test.cc b/test/common/stats/tag_extractor_impl_test.cc index c80fbf7047df6..5ca2bb933b735 100644 --- a/test/common/stats/tag_extractor_impl_test.cc +++ b/test/common/stats/tag_extractor_impl_test.cc @@ -306,8 +306,8 @@ TEST(TagExtractorTest, DefaultTagExtractors) { client_ssl.name_ = tag_names.CLIENTSSL_PREFIX; client_ssl.value_ = "clientssl_prefix"; - regex_tester.testRegex("auth.clientssl.clientssl_prefix.auth_ip_white_list", - "auth.clientssl.auth_ip_white_list", {client_ssl}); + regex_tester.testRegex("auth.clientssl.clientssl_prefix.auth_ip_allowlist", + "auth.clientssl.auth_ip_allowlist", {client_ssl}); // TCP Prefix Tag tcp_prefix; diff --git a/test/common/stats/thread_local_store_speed_test.cc b/test/common/stats/thread_local_store_speed_test.cc index 8ad68fd7ba0b7..6e2c62ace9ef2 100644 --- a/test/common/stats/thread_local_store_speed_test.cc +++ b/test/common/stats/thread_local_store_speed_test.cc @@ -40,6 +40,10 @@ class ThreadLocalStorePerf { store_.shutdownThreading(); if (tls_) { tls_->shutdownGlobalThreading(); + tls_->shutdownThread(); + } + if (dispatcher_) { + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); } } @@ -50,8 +54,12 @@ class ThreadLocalStorePerf { } void initThreading() { + if (!Envoy::Event::Libevent::Global::initialized()) { + Envoy::Event::Libevent::Global::initialize(); + } dispatcher_ = api_->allocateDispatcher("test_thread"); tls_ = std::make_unique(); + tls_->registerThread(*dispatcher_, true); store_.initializeThreading(*dispatcher_, *tls_); } @@ -59,10 +67,10 @@ class ThreadLocalStorePerf { Stats::SymbolTablePtr symbol_table_; Event::SimulatedTimeSystem time_system_; Stats::AllocatorImpl heap_alloc_; + Event::DispatcherPtr dispatcher_; + ThreadLocal::InstanceImplPtr tls_; Stats::ThreadLocalStoreImpl store_; Api::ApiPtr api_; - Event::DispatcherPtr dispatcher_; - std::unique_ptr tls_; envoy::config::metrics::v3::StatsConfig stats_config_; std::vector> stat_names_; }; @@ -95,14 +103,3 @@ BENCHMARK(BM_StatsWithTls); // TODO(jmarantz): add multi-threaded variant of this test, that aggressively // looks up stats in multiple threads to try to trigger contention issues. - -// Boilerplate main(), which discovers benchmarks in the same file and runs them. -int main(int argc, char** argv) { - benchmark::Initialize(&argc, argv); - - Envoy::Event::Libevent::Global::initialize(); - if (benchmark::ReportUnrecognizedArguments(argc, argv)) { - return 1; - } - benchmark::RunSpecifiedBenchmarks(); -} diff --git a/test/common/stats/thread_local_store_test.cc b/test/common/stats/thread_local_store_test.cc index 3dc88b03c9b32..135c6b424097e 100644 --- a/test/common/stats/thread_local_store_test.cc +++ b/test/common/stats/thread_local_store_test.cc @@ -1,7 +1,6 @@ #include #include #include -#include #include "envoy/config/metrics/v3/stats.pb.h" #include "envoy/stats/histogram.h" @@ -17,7 +16,7 @@ #include "test/common/stats/stat_test_utility.h" #include "test/mocks/event/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/mocks/stats/mocks.h" #include "test/mocks/thread_local/mocks.h" #include "test/test_common/logging.h" @@ -30,6 +29,7 @@ #include "gtest/gtest.h" using testing::_; +using testing::HasSubstr; using testing::InSequence; using testing::NiceMock; using testing::Ref; @@ -40,6 +40,30 @@ namespace Stats { const uint64_t MaxStatNameLength = 127; +class ThreadLocalStoreTestingPeer { +public: + // Calculates the number of TLS histograms across all threads. This requires + // dispatching to all threads and blocking on their completion, and is exposed + // as a testing peer to enable tests that ensure that TLS histograms don't + // leak. + // + // Note that this must be called from the "main thread", which has different + // implications for unit tests that use real threads vs mocks. The easiest way + // to capture this in a general purpose helper is to use a callback to convey + // the resultant sum. + static void numTlsHistograms(ThreadLocalStoreImpl& thread_local_store_impl, + const std::function& num_tls_hist_cb) { + auto num_tls_histograms = std::make_shared>(0); + thread_local_store_impl.tls_->runOnAllThreads( + [&thread_local_store_impl, num_tls_histograms]() { + auto& tls_cache = + thread_local_store_impl.tls_->getTyped(); + *num_tls_histograms += tls_cache.tls_histogram_cache_.size(); + }, + [num_tls_hist_cb, num_tls_histograms]() { num_tls_hist_cb(*num_tls_histograms); }); + } +}; + class StatsThreadLocalStoreTest : public testing::Test { public: StatsThreadLocalStoreTest() @@ -53,12 +77,27 @@ class StatsThreadLocalStoreTest : public testing::Test { store_->addSink(sink_); } + uint32_t numTlsHistograms() { + uint32_t num_tls_histograms; + absl::Mutex mutex; + bool done = false; + ThreadLocalStoreTestingPeer::numTlsHistograms( + *store_, [&mutex, &done, &num_tls_histograms](uint32_t num) { + absl::MutexLock lock(&mutex); + num_tls_histograms = num; + done = true; + }); + absl::MutexLock lock(&mutex); + mutex.Await(absl::Condition(&done)); + return num_tls_histograms; + } + SymbolTablePtr symbol_table_; NiceMock main_thread_dispatcher_; NiceMock tls_; AllocatorImpl alloc_; MockSink sink_; - std::unique_ptr store_; + ThreadLocalStoreImplPtr store_; }; class HistogramWrapper { @@ -176,7 +215,7 @@ class HistogramTest : public testing::Test { NiceMock tls_; AllocatorImpl alloc_; MockSink sink_; - std::unique_ptr store_; + ThreadLocalStoreImplPtr store_; InSequence s; std::vector h1_cumulative_values_, h2_cumulative_values_, h1_interval_values_, h2_interval_values_; @@ -382,6 +421,52 @@ TEST_F(StatsThreadLocalStoreTest, BasicScope) { store_->shutdownThreading(); scope1->deliverHistogramToSinks(h1, 100); scope1->deliverHistogramToSinks(h2, 200); + scope1.reset(); + tls_.shutdownThread(); +} + +TEST_F(StatsThreadLocalStoreTest, HistogramScopeOverlap) { + InSequence s; + store_->initializeThreading(main_thread_dispatcher_, tls_); + + // Creating two scopes with the same name gets you two distinct scope objects. + ScopePtr scope1 = store_->createScope("scope."); + ScopePtr scope2 = store_->createScope("scope."); + EXPECT_NE(scope1, scope2); + + EXPECT_EQ(0, store_->histograms().size()); + EXPECT_EQ(0, numTlsHistograms()); + + // However, stats created in the two same-named scopes will be the same objects. + Counter& counter = scope1->counterFromString("counter"); + EXPECT_EQ(&counter, &scope2->counterFromString("counter")); + Gauge& gauge = scope1->gaugeFromString("gauge", Gauge::ImportMode::Accumulate); + EXPECT_EQ(&gauge, &scope2->gaugeFromString("gauge", Gauge::ImportMode::Accumulate)); + TextReadout& text_readout = scope1->textReadoutFromString("tr"); + EXPECT_EQ(&text_readout, &scope2->textReadoutFromString("tr")); + Histogram& histogram = scope1->histogramFromString("histogram", Histogram::Unit::Unspecified); + EXPECT_EQ(&histogram, &scope2->histogramFromString("histogram", Histogram::Unit::Unspecified)); + + // The histogram was created in scope1, which can now be destroyed. But the + // histogram is kept alive by scope2. + EXPECT_CALL(sink_, onHistogramComplete(Ref(histogram), 100)); + histogram.recordValue(100); + EXPECT_EQ(1, store_->histograms().size()); + EXPECT_EQ(1, numTlsHistograms()); + scope1.reset(); + EXPECT_EQ(1, store_->histograms().size()); + EXPECT_EQ(1, numTlsHistograms()); + EXPECT_CALL(sink_, onHistogramComplete(Ref(histogram), 200)); + histogram.recordValue(200); + EXPECT_EQ(&histogram, &scope2->histogramFromString("histogram", Histogram::Unit::Unspecified)); + scope2.reset(); + EXPECT_EQ(0, store_->histograms().size()); + EXPECT_EQ(0, numTlsHistograms()); + + store_->shutdownThreading(); + + store_->histogramFromString("histogram_after_shutdown", Histogram::Unit::Unspecified); + tls_.shutdownThread(); } @@ -587,7 +672,7 @@ class ThreadLocalStoreNoMocksTestBase : public testing::Test { SymbolTablePtr symbol_table_; AllocatorImpl alloc_; - std::unique_ptr store_; + ThreadLocalStoreImplPtr store_; StatNamePool pool_; }; @@ -643,6 +728,7 @@ TEST_F(LookupWithStatNameTest, NotFound) { EXPECT_FALSE(store_->findCounter(not_found)); EXPECT_FALSE(store_->findGauge(not_found)); EXPECT_FALSE(store_->findHistogram(not_found)); + EXPECT_FALSE(store_->findTextReadout(not_found)); } class StatsMatcherTLSTest : public StatsThreadLocalStoreTest { @@ -716,6 +802,7 @@ TEST_F(StatsMatcherTLSTest, TestNoOpStatImpls) { store_->histogramFromString("noop_histogram", Stats::Histogram::Unit::Unspecified); EXPECT_EQ(noop_histogram.name(), ""); EXPECT_FALSE(noop_histogram.used()); + EXPECT_EQ(Stats::Histogram::Unit::Null, noop_histogram.unit()); Histogram& noop_histogram_2 = store_->histogramFromString("noop_histogram_2", Stats::Histogram::Unit::Unspecified); EXPECT_EQ(&noop_histogram, &noop_histogram_2); @@ -938,6 +1025,12 @@ class RememberStatsMatcherTest : public testing::TestWithParam { }; } + LookupStatFn lookupTextReadoutFn() { + return [this](const std::string& stat_name) -> std::string { + return scope_->textReadoutFromString(stat_name).name(); + }; + } + Stats::SymbolTablePtr symbol_table_; NiceMock main_thread_dispatcher_; NiceMock tls_; @@ -979,6 +1072,14 @@ TEST_P(RememberStatsMatcherTest, HistogramRejectsAll) { testRejectsAll(lookupHis TEST_P(RememberStatsMatcherTest, HistogramAcceptsAll) { testAcceptsAll(lookupHistogramFn()); } +TEST_P(RememberStatsMatcherTest, TextReadoutRejectOne) { + testRememberMatcher(lookupTextReadoutFn()); +} + +TEST_P(RememberStatsMatcherTest, TextReadoutRejectsAll) { testRejectsAll(lookupTextReadoutFn()); } + +TEST_P(RememberStatsMatcherTest, TextReadoutAcceptsAll) { testAcceptsAll(lookupTextReadoutFn()); } + TEST_F(StatsThreadLocalStoreTest, RemoveRejectedStats) { store_->initializeThreading(main_thread_dispatcher_, tls_); Counter& counter = store_->counterFromString("c1"); @@ -1064,7 +1165,7 @@ class StatsThreadLocalStoreTestNoFixture : public testing::Test { MockSink sink_; SymbolTablePtr symbol_table_; std::unique_ptr alloc_; - std::unique_ptr store_; + ThreadLocalStoreImplPtr store_; NiceMock main_thread_dispatcher_; NiceMock tls_; TestUtil::SymbolTableCreatorTestPeer symbol_table_creator_test_peer_; @@ -1087,7 +1188,7 @@ TEST_F(StatsThreadLocalStoreTestNoFixture, MemoryWithTlsFakeSymbolTable) { TestUtil::MemoryTest memory_test; TestUtil::forEachSampleStat( 100, [this](absl::string_view name) { store_->counterFromString(std::string(name)); }); - EXPECT_MEMORY_EQ(memory_test.consumedBytes(), 1498160); // Apr 8, 2020 + EXPECT_MEMORY_EQ(memory_test.consumedBytes(), 1498128); // July 30, 2020 EXPECT_MEMORY_LE(memory_test.consumedBytes(), 1.6 * million_); } @@ -1097,7 +1198,7 @@ TEST_F(StatsThreadLocalStoreTestNoFixture, MemoryWithoutTlsRealSymbolTable) { TestUtil::MemoryTest memory_test; TestUtil::forEachSampleStat( 100, [this](absl::string_view name) { store_->counterFromString(std::string(name)); }); - EXPECT_MEMORY_EQ(memory_test.consumedBytes(), 689648); // Jan 23, 2020 + EXPECT_MEMORY_EQ(memory_test.consumedBytes(), 688080); // July 2, 2020 EXPECT_MEMORY_LE(memory_test.consumedBytes(), 0.75 * million_); } @@ -1107,7 +1208,7 @@ TEST_F(StatsThreadLocalStoreTestNoFixture, MemoryWithTlsRealSymbolTable) { TestUtil::MemoryTest memory_test; TestUtil::forEachSampleStat( 100, [this](absl::string_view name) { store_->counterFromString(std::string(name)); }); - EXPECT_MEMORY_EQ(memory_test.consumedBytes(), 829232); // Apr 08, 2020 + EXPECT_MEMORY_EQ(memory_test.consumedBytes(), 827632); // July 20, 2020 EXPECT_MEMORY_LE(memory_test.consumedBytes(), 0.9 * million_); } @@ -1363,9 +1464,8 @@ TEST_F(HistogramTest, ParentHistogramBucketSummary) { parent_histogram->bucketSummary()); } -class ClusterShutdownCleanupStarvationTest : public ThreadLocalStoreNoMocksTestBase { -public: - static constexpr uint32_t NumThreads = 2; +class ThreadLocalRealThreadsTestBase : public ThreadLocalStoreNoMocksTestBase { +protected: static constexpr uint32_t NumScopes = 1000; static constexpr uint32_t NumIters = 35; @@ -1401,18 +1501,17 @@ class ClusterShutdownCleanupStarvationTest : public ThreadLocalStoreNoMocksTestB absl::BlockingCounter blocking_counter_; }; - ClusterShutdownCleanupStarvationTest() - : start_time_(time_system_.monotonicTime()), api_(Api::createApiForTest()), - thread_factory_(api_->threadFactory()), pool_(store_->symbolTable()), - my_counter_name_(pool_.add("my_counter")), - my_counter_scoped_name_(pool_.add("scope.my_counter")) { + ThreadLocalRealThreadsTestBase(uint32_t num_threads) + : num_threads_(num_threads), start_time_(time_system_.monotonicTime()), + api_(Api::createApiForTest()), thread_factory_(api_->threadFactory()), + pool_(store_->symbolTable()) { // This is the same order as InstanceImpl::initialize in source/server/server.cc. - thread_dispatchers_.resize(NumThreads); + thread_dispatchers_.resize(num_threads_); { - BlockingBarrier blocking_barrier(NumThreads + 1); + BlockingBarrier blocking_barrier(num_threads_ + 1); main_thread_ = thread_factory_.createThread( [this, &blocking_barrier]() { mainThreadFn(blocking_barrier); }); - for (uint32_t i = 0; i < NumThreads; ++i) { + for (uint32_t i = 0; i < num_threads_; ++i) { threads_.emplace_back(thread_factory_.createThread( [this, i, &blocking_barrier]() { workerThreadFn(i, blocking_barrier); })); } @@ -1432,7 +1531,7 @@ class ClusterShutdownCleanupStarvationTest : public ThreadLocalStoreNoMocksTestB } } - ~ClusterShutdownCleanupStarvationTest() override { + ~ThreadLocalRealThreadsTestBase() override { { BlockingBarrier blocking_barrier(1); main_dispatcher_->post(blocking_barrier.run([this]() { @@ -1458,14 +1557,6 @@ class ClusterShutdownCleanupStarvationTest : public ThreadLocalStoreNoMocksTestB main_thread_->join(); } - void createScopesIncCountersAndCleanup() { - for (uint32_t i = 0; i < NumScopes; ++i) { - ScopePtr scope = store_->createScope("scope."); - Counter& counter = scope->counterFromStatName(my_counter_name_); - counter.inc(); - } - } - void workerThreadFn(uint32_t thread_index, BlockingBarrier& blocking_barrier) { thread_dispatchers_[thread_index] = api_->allocateDispatcher(absl::StrCat("test_worker_", thread_index)); @@ -1479,29 +1570,62 @@ class ClusterShutdownCleanupStarvationTest : public ThreadLocalStoreNoMocksTestB main_dispatcher_->run(Event::Dispatcher::RunType::RunUntilExit); } - void createScopesIncCountersAndCleanupAllThreads() { - BlockingBarrier blocking_barrier(NumThreads); - for (Event::DispatcherPtr& thread_dispatcher : thread_dispatchers_) { - thread_dispatcher->post( - blocking_barrier.run([this]() { createScopesIncCountersAndCleanup(); })); - } + void mainDispatchBlock() { + // To ensure all stats are freed we have to wait for a few posts() to clear. + // First, wait for the main-dispatcher to initiate the cross-thread TLS cleanup. + BlockingBarrier blocking_barrier(1); + main_dispatcher_->post(blocking_barrier.run([]() {})); } - std::chrono::seconds elapsedTime() { - return std::chrono::duration_cast(time_system_.monotonicTime() - - start_time_); + void tlsBlock() { + BlockingBarrier blocking_barrier(num_threads_); + for (Event::DispatcherPtr& thread_dispatcher : thread_dispatchers_) { + thread_dispatcher->post(blocking_barrier.run([]() {})); + } } + const uint32_t num_threads_; Event::TestRealTimeSystem time_system_; MonotonicTime start_time_; Api::ApiPtr api_; Event::DispatcherPtr main_dispatcher_; std::vector thread_dispatchers_; Thread::ThreadFactory& thread_factory_; - std::unique_ptr tls_; + ThreadLocal::InstanceImplPtr tls_; Thread::ThreadPtr main_thread_; std::vector threads_; StatNamePool pool_; +}; + +class ClusterShutdownCleanupStarvationTest : public ThreadLocalRealThreadsTestBase { +protected: + static constexpr uint32_t NumThreads = 2; + + ClusterShutdownCleanupStarvationTest() + : ThreadLocalRealThreadsTestBase(NumThreads), my_counter_name_(pool_.add("my_counter")), + my_counter_scoped_name_(pool_.add("scope.my_counter")) {} + + void createScopesIncCountersAndCleanup() { + for (uint32_t i = 0; i < NumScopes; ++i) { + ScopePtr scope = store_->createScope("scope."); + Counter& counter = scope->counterFromStatName(my_counter_name_); + counter.inc(); + } + } + + void createScopesIncCountersAndCleanupAllThreads() { + BlockingBarrier blocking_barrier(NumThreads); + for (Event::DispatcherPtr& thread_dispatcher : thread_dispatchers_) { + thread_dispatcher->post( + blocking_barrier.run([this]() { createScopesIncCountersAndCleanup(); })); + } + } + + std::chrono::seconds elapsedTime() { + return std::chrono::duration_cast(time_system_.monotonicTime() - + start_time_); + } + StatName my_counter_name_; StatName my_counter_scoped_name_; }; @@ -1514,24 +1638,14 @@ TEST_F(ClusterShutdownCleanupStarvationTest, TwelveThreadsWithBlockade) { for (uint32_t i = 0; i < NumIters && elapsedTime() < std::chrono::seconds(5); ++i) { createScopesIncCountersAndCleanupAllThreads(); - // To ensure all stats are freed we have to wait for a few posts() to clear. // First, wait for the main-dispatcher to initiate the cross-thread TLS cleanup. - auto main_dispatch_block = [this]() { - BlockingBarrier blocking_barrier(1); - main_dispatcher_->post(blocking_barrier.run([]() {})); - }; - main_dispatch_block(); + mainDispatchBlock(); // Next, wait for all the worker threads to complete their TLS cleanup. - { - BlockingBarrier blocking_barrier(NumThreads); - for (Event::DispatcherPtr& thread_dispatcher : thread_dispatchers_) { - thread_dispatcher->post(blocking_barrier.run([]() {})); - } - } + tlsBlock(); // Finally, wait for the final central-cache cleanup, which occurs on the main thread. - main_dispatch_block(); + mainDispatchBlock(); // Here we show that the counter cleanups have finished, because the use-count is 1. CounterSharedPtr counter = @@ -1568,5 +1682,124 @@ TEST_F(ClusterShutdownCleanupStarvationTest, TwelveThreadsWithoutBlockade) { store_->sync().signal(ThreadLocalStoreImpl::MainDispatcherCleanupSync); } +class HistogramThreadTest : public ThreadLocalRealThreadsTestBase { +protected: + static constexpr uint32_t NumThreads = 10; + + HistogramThreadTest() : ThreadLocalRealThreadsTestBase(NumThreads) {} + + void mergeHistograms() { + BlockingBarrier blocking_barrier(1); + main_dispatcher_->post([this, &blocking_barrier]() { + store_->mergeHistograms(blocking_barrier.decrementCountFn()); + }); + } + + uint32_t numTlsHistograms() { + uint32_t num; + { + BlockingBarrier blocking_barrier(1); + main_dispatcher_->post([this, &num, &blocking_barrier]() { + ThreadLocalStoreTestingPeer::numTlsHistograms(*store_, + [&num, &blocking_barrier](uint32_t num_hist) { + num = num_hist; + blocking_barrier.decrementCount(); + }); + }); + } + return num; + } + + // Executes a function on every worker thread dispatcher. + void foreachThread(const std::function& fn) { + BlockingBarrier blocking_barrier(NumThreads); + for (Event::DispatcherPtr& thread_dispatcher : thread_dispatchers_) { + thread_dispatcher->post(blocking_barrier.run(fn)); + } + } +}; + +TEST_F(HistogramThreadTest, MakeHistogramsAndRecordValues) { + foreachThread([this]() { + Histogram& histogram = + store_->histogramFromString("my_hist", Stats::Histogram::Unit::Unspecified); + histogram.recordValue(42); + }); + + mergeHistograms(); + + auto histograms = store_->histograms(); + ASSERT_EQ(1, histograms.size()); + ParentHistogramSharedPtr hist = histograms[0]; + EXPECT_THAT(hist->bucketSummary(), + HasSubstr(absl::StrCat(" B25(0,0) B50(", NumThreads, ",", NumThreads, ") "))); +} + +TEST_F(HistogramThreadTest, ScopeOverlap) { + // Creating two scopes with the same name gets you two distinct scope objects. + ScopePtr scope1 = store_->createScope("scope."); + ScopePtr scope2 = store_->createScope("scope."); + EXPECT_NE(scope1, scope2); + + EXPECT_EQ(0, store_->histograms().size()); + EXPECT_EQ(0, numTlsHistograms()); + + // Histograms created in the two same-named scopes will be the same objects. + foreachThread([&scope1, &scope2]() { + Histogram& histogram = scope1->histogramFromString("histogram", Histogram::Unit::Unspecified); + EXPECT_EQ(&histogram, &scope2->histogramFromString("histogram", Histogram::Unit::Unspecified)); + histogram.recordValue(100); + }); + + mergeHistograms(); + + // Verify that we have the expected number of TLS histograms since we accessed + // the histogram on every thread. + std::vector histograms = store_->histograms(); + ASSERT_EQ(1, histograms.size()); + EXPECT_EQ(NumThreads, numTlsHistograms()); + + // There's no convenient API to pull data out of the histogram, except as + // a string. This expectation captures the bucket transition to indicate + // 0 samples at less than 100, and 10 between 100 and 249 inclusive. + EXPECT_THAT(histograms[0]->bucketSummary(), + HasSubstr(absl::StrCat(" B100(0,0) B250(", NumThreads, ",", NumThreads, ") "))); + + // The histogram was created in scope1, which can now be destroyed. But the + // histogram is kept alive by scope2. + scope1.reset(); + histograms = store_->histograms(); + EXPECT_EQ(1, histograms.size()); + EXPECT_EQ(NumThreads, numTlsHistograms()); + + // We can continue to accumulate samples at the scope2's view of the same + // histogram, and they will combine with the existing data, despite the + // fact that scope1 has been deleted. + foreachThread([&scope2]() { + Histogram& histogram = scope2->histogramFromString("histogram", Histogram::Unit::Unspecified); + histogram.recordValue(300); + }); + + mergeHistograms(); + + // Shows the bucket summary with 10 samples at >=100, and 20 at >=250. + EXPECT_THAT(histograms[0]->bucketSummary(), + HasSubstr(absl::StrCat(" B100(0,0) B250(0,", NumThreads, ") B500(", NumThreads, ",", + 2 * NumThreads, ") "))); + + // Now clear everything, and synchronize the system by calling mergeHistograms(). + // THere should be no more ParentHistograms or TlsHistograms. + scope2.reset(); + histograms.clear(); + mergeHistograms(); + + EXPECT_EQ(0, store_->histograms().size()); + EXPECT_EQ(0, numTlsHistograms()); + + store_->shutdownThreading(); + + store_->histogramFromString("histogram_after_shutdown", Histogram::Unit::Unspecified); +} + } // namespace Stats } // namespace Envoy diff --git a/test/common/stats/utility_corpus/test b/test/common/stats/utility_corpus/test new file mode 100644 index 0000000000000..95d09f2b10159 --- /dev/null +++ b/test/common/stats/utility_corpus/test @@ -0,0 +1 @@ +hello world \ No newline at end of file diff --git a/test/common/stats/utility_fuzz_test.cc b/test/common/stats/utility_fuzz_test.cc new file mode 100644 index 0000000000000..e61257e2d191d --- /dev/null +++ b/test/common/stats/utility_fuzz_test.cc @@ -0,0 +1,86 @@ +#include +#include + +#include "common/stats/isolated_store_impl.h" +#include "common/stats/symbol_table_creator.h" +#include "common/stats/utility.h" + +#include "test/fuzz/fuzz_runner.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Fuzz { + +namespace { + +// The maximum number of iterations the fuzz test can run until stopped. This is +// to avoid lengthy tests and timeouts. +constexpr size_t MaxIterations = 1000; + +} // namespace + +DEFINE_FUZZER(const uint8_t* buf, size_t len) { + + Stats::Utility::sanitizeStatsName(absl::string_view(reinterpret_cast(buf), len)); + + if (len < 4) { + return; + } + + // Create a greater scope vector to store the string to prevent the string memory from being free + std::list string_list; + auto make_string = [&string_list](absl::string_view str) -> absl::string_view { + string_list.push_back(std::string(str)); + return string_list.back(); + }; + + // generate a random number as the maximum length of the stat name + const size_t max_len = *reinterpret_cast(buf) % (len - 3); + FuzzedDataProvider provider(buf, len); + + // model common/stats/utility_test.cc, initialize those objects to create random elements as + // input + Stats::SymbolTablePtr symbol_table; + if (provider.ConsumeBool()) { + symbol_table = std::make_unique(); + } else { + symbol_table = std::make_unique(); + } + std::unique_ptr store = + std::make_unique(*symbol_table); + Stats::StatNamePool pool(*symbol_table); + Stats::ScopePtr scope = store->createScope(provider.ConsumeRandomLengthString(max_len)); + Stats::ElementVec ele_vec; + Stats::StatNameVec sn_vec; + Stats::StatNameTagVector tags; + Stats::StatName key, val; + + if (provider.remaining_bytes() == 0) { + Stats::Utility::counterFromStatNames(*scope, {}); + Stats::Utility::counterFromElements(*scope, {}); + } else { + // Run until either running out of strings to process or a maximal number of + // iterations is reached. + for (size_t iter = 0; iter < MaxIterations && provider.remaining_bytes() > 3; iter++) { + // add random length string in each loop + if (provider.ConsumeBool()) { + absl::string_view str = make_string( + provider.ConsumeRandomLengthString(std::min(max_len, provider.remaining_bytes()))); + ele_vec.push_back(Stats::DynamicName(str)); + sn_vec.push_back(pool.add(str)); + } else { + key = pool.add( + provider.ConsumeRandomLengthString(std::min(max_len, provider.remaining_bytes() / 2))); + val = pool.add( + provider.ConsumeRandomLengthString(std::min(max_len, provider.remaining_bytes()))); + tags.push_back({key, val}); + } + Stats::Utility::counterFromStatNames(*scope, sn_vec, tags); + Stats::Utility::counterFromElements(*scope, ele_vec, tags); + } + } +} + +} // namespace Fuzz +} // namespace Envoy diff --git a/test/common/stats/utility_test.cc b/test/common/stats/utility_test.cc new file mode 100644 index 0000000000000..bf1643ff4ed5d --- /dev/null +++ b/test/common/stats/utility_test.cc @@ -0,0 +1,277 @@ +#include + +#include "envoy/stats/stats_macros.h" + +#include "common/stats/isolated_store_impl.h" +#include "common/stats/null_counter.h" +#include "common/stats/null_gauge.h" +#include "common/stats/symbol_table_creator.h" +#include "common/stats/thread_local_store.h" + +#include "absl/strings/str_cat.h" +#include "absl/strings/string_view.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::UnorderedElementsAre; + +namespace Envoy { +namespace Stats { +namespace { + +// All the tests should be run for both IsolatedStore and ThreadLocalStore. +enum class StoreType { + ThreadLocal, + Isolated, +}; + +class StatsUtilityTest : public testing::TestWithParam { +protected: + template + using IterateFn = std::function& stat)>; + using MakeStatFn = std::function; + + StatsUtilityTest() + : symbol_table_(SymbolTableCreator::makeSymbolTable()), pool_(*symbol_table_), + tags_( + {{pool_.add("tag1"), pool_.add("value1")}, {pool_.add("tag2"), pool_.add("value2")}}) { + switch (GetParam()) { + case StoreType::ThreadLocal: + alloc_ = std::make_unique(*symbol_table_), + store_ = std::make_unique(*alloc_); + break; + case StoreType::Isolated: + store_ = std::make_unique(*symbol_table_); + break; + } + scope_ = store_->createScope("scope"); + } + + ~StatsUtilityTest() override { + scope_.reset(); + pool_.clear(); + store_.reset(); + EXPECT_EQ(0, symbol_table_->numSymbols()); + } + + void init(MakeStatFn make_stat) { + make_stat(*store_, {pool_.add("symbolic1")}); + make_stat(*store_, {Stats::DynamicName("dynamic1")}); + make_stat(*scope_, {pool_.add("symbolic2")}); + make_stat(*scope_, {Stats::DynamicName("dynamic2")}); + } + + template IterateFn iterOnce() { + return [this](const RefcountPtr& stat) -> bool { + results_.insert(stat->name()); + return false; + }; + } + + template IterateFn iterAll() { + return [this](const RefcountPtr& stat) -> bool { + results_.insert(stat->name()); + return true; + }; + } + + static MakeStatFn makeCounter() { + return [](Scope& scope, const ElementVec& elements) { + Utility::counterFromElements(scope, elements).inc(); + }; + } + + static bool checkValue(const Counter& counter) { return counter.value() == 1; } + + static MakeStatFn makeGauge() { + return [](Scope& scope, const ElementVec& elements) { + Utility::gaugeFromElements(scope, elements, Gauge::ImportMode::Accumulate).inc(); + }; + } + + static bool checkValue(const Gauge& gauge) { return gauge.value() == 1; } + + static MakeStatFn makeHistogram() { + return [](Scope& scope, const ElementVec& elements) { + Utility::histogramFromElements(scope, elements, Histogram::Unit::Milliseconds); + }; + } + + static bool checkValue(const Histogram& histogram) { + return histogram.unit() == Histogram::Unit::Milliseconds; + } + + static MakeStatFn makeTextReadout() { + return [](Scope& scope, const ElementVec& elements) { + Utility::textReadoutFromElements(scope, elements).set("my-value"); + }; + } + + static bool checkValue(const TextReadout& text_readout) { + return text_readout.value() == "my-value"; + } + + template void storeOnce(const MakeStatFn make_stat) { + CachedReference symbolic1_ref(*store_, "symbolic1"); + CachedReference dynamic1_ref(*store_, "dynamic1"); + EXPECT_FALSE(symbolic1_ref.get()); + EXPECT_FALSE(dynamic1_ref.get()); + + init(make_stat); + + ASSERT_TRUE(symbolic1_ref.get()); + ASSERT_TRUE(dynamic1_ref.get()); + EXPECT_FALSE(store_->iterate(iterOnce())); + EXPECT_EQ(1, results_.size()); + EXPECT_TRUE(checkValue(*symbolic1_ref.get())); + EXPECT_TRUE(checkValue(*dynamic1_ref.get())); + } + + template void storeAll(const MakeStatFn make_stat) { + init(make_stat); + EXPECT_TRUE(store_->iterate(iterAll())); + EXPECT_THAT(results_, + UnorderedElementsAre("symbolic1", "dynamic1", "scope.symbolic2", "scope.dynamic2")); + } + + template void scopeOnce(const MakeStatFn make_stat) { + CachedReference symbolic2_ref(*store_, "scope.symbolic2"); + CachedReference dynamic2_ref(*store_, "scope.dynamic2"); + EXPECT_FALSE(symbolic2_ref.get()); + EXPECT_FALSE(dynamic2_ref.get()); + + init(make_stat); + + ASSERT_TRUE(symbolic2_ref.get()); + ASSERT_TRUE(dynamic2_ref.get()); + EXPECT_FALSE(scope_->iterate(iterOnce())); + EXPECT_EQ(1, results_.size()); + EXPECT_TRUE(checkValue(*symbolic2_ref.get())); + EXPECT_TRUE(checkValue(*dynamic2_ref.get())); + } + + template void scopeAll(const MakeStatFn make_stat) { + init(make_stat); + EXPECT_TRUE(scope_->iterate(iterAll())); + EXPECT_THAT(results_, UnorderedElementsAre("scope.symbolic2", "scope.dynamic2")); + } + + SymbolTablePtr symbol_table_; + StatNamePool pool_; + std::unique_ptr alloc_; + std::unique_ptr store_; + ScopePtr scope_; + absl::flat_hash_set results_; + StatNameTagVector tags_; +}; + +INSTANTIATE_TEST_SUITE_P(StatsUtilityTest, StatsUtilityTest, + testing::ValuesIn({StoreType::ThreadLocal, StoreType::Isolated})); + +TEST_P(StatsUtilityTest, Counters) { + ScopePtr scope = store_->createScope("scope."); + Counter& c1 = Utility::counterFromElements(*scope, {DynamicName("a"), DynamicName("b")}); + EXPECT_EQ("scope.a.b", c1.name()); + StatName token = pool_.add("token"); + Counter& c2 = Utility::counterFromElements(*scope, {DynamicName("a"), token, DynamicName("b")}); + EXPECT_EQ("scope.a.token.b", c2.name()); + StatName suffix = pool_.add("suffix"); + Counter& c3 = Utility::counterFromElements(*scope, {token, suffix}); + EXPECT_EQ("scope.token.suffix", c3.name()); + Counter& c4 = Utility::counterFromStatNames(*scope, {token, suffix}); + EXPECT_EQ("scope.token.suffix", c4.name()); + EXPECT_EQ(&c3, &c4); + + Counter& ctags = + Utility::counterFromElements(*scope, {DynamicName("x"), token, DynamicName("y")}, tags_); + EXPECT_EQ("scope.x.token.y.tag1.value1.tag2.value2", ctags.name()); +} + +TEST_P(StatsUtilityTest, Gauges) { + ScopePtr scope = store_->createScope("scope."); + Gauge& g1 = Utility::gaugeFromElements(*scope, {DynamicName("a"), DynamicName("b")}, + Gauge::ImportMode::NeverImport); + EXPECT_EQ("scope.a.b", g1.name()); + EXPECT_EQ(Gauge::ImportMode::NeverImport, g1.importMode()); + StatName token = pool_.add("token"); + Gauge& g2 = Utility::gaugeFromElements(*scope, {DynamicName("a"), token, DynamicName("b")}, + Gauge::ImportMode::Accumulate); + EXPECT_EQ("scope.a.token.b", g2.name()); + EXPECT_EQ(Gauge::ImportMode::Accumulate, g2.importMode()); + StatName suffix = pool_.add("suffix"); + Gauge& g3 = Utility::gaugeFromElements(*scope, {token, suffix}, Gauge::ImportMode::NeverImport); + EXPECT_EQ("scope.token.suffix", g3.name()); + Gauge& g4 = Utility::gaugeFromStatNames(*scope, {token, suffix}, Gauge::ImportMode::NeverImport); + EXPECT_EQ("scope.token.suffix", g4.name()); + EXPECT_EQ(&g3, &g4); +} + +TEST_P(StatsUtilityTest, Histograms) { + ScopePtr scope = store_->createScope("scope."); + Histogram& h1 = Utility::histogramFromElements(*scope, {DynamicName("a"), DynamicName("b")}, + Histogram::Unit::Milliseconds); + EXPECT_EQ("scope.a.b", h1.name()); + EXPECT_EQ(Histogram::Unit::Milliseconds, h1.unit()); + StatName token = pool_.add("token"); + Histogram& h2 = Utility::histogramFromElements( + *scope, {DynamicName("a"), token, DynamicName("b")}, Histogram::Unit::Microseconds); + EXPECT_EQ("scope.a.token.b", h2.name()); + EXPECT_EQ(Histogram::Unit::Microseconds, h2.unit()); + StatName suffix = pool_.add("suffix"); + Histogram& h3 = Utility::histogramFromElements(*scope, {token, suffix}, Histogram::Unit::Bytes); + EXPECT_EQ("scope.token.suffix", h3.name()); + EXPECT_EQ(Histogram::Unit::Bytes, h3.unit()); + Histogram& h4 = Utility::histogramFromStatNames(*scope, {token, suffix}, Histogram::Unit::Bytes); + EXPECT_EQ(&h3, &h4); +} + +TEST_P(StatsUtilityTest, TextReadouts) { + ScopePtr scope = store_->createScope("scope."); + TextReadout& t1 = Utility::textReadoutFromElements(*scope, {DynamicName("a"), DynamicName("b")}); + EXPECT_EQ("scope.a.b", t1.name()); + StatName token = pool_.add("token"); + TextReadout& t2 = + Utility::textReadoutFromElements(*scope, {DynamicName("a"), token, DynamicName("b")}); + EXPECT_EQ("scope.a.token.b", t2.name()); + StatName suffix = pool_.add("suffix"); + TextReadout& t3 = Utility::textReadoutFromElements(*scope, {token, suffix}); + EXPECT_EQ("scope.token.suffix", t3.name()); + TextReadout& t4 = Utility::textReadoutFromStatNames(*scope, {token, suffix}); + EXPECT_EQ(&t3, &t4); +} + +TEST_P(StatsUtilityTest, StoreCounterOnce) { storeOnce(makeCounter()); } + +TEST_P(StatsUtilityTest, StoreCounterAll) { storeAll(makeCounter()); } + +TEST_P(StatsUtilityTest, ScopeCounterOnce) { scopeOnce(makeCounter()); } + +TEST_P(StatsUtilityTest, ScopeCounterAll) { scopeAll(makeCounter()); } + +TEST_P(StatsUtilityTest, StoreGaugeOnce) { storeOnce(makeGauge()); } + +TEST_P(StatsUtilityTest, StoreGaugeAll) { storeAll(makeGauge()); } + +TEST_P(StatsUtilityTest, ScopeGaugeOnce) { scopeOnce(makeGauge()); } + +TEST_P(StatsUtilityTest, ScopeGaugeAll) { scopeAll(makeGauge()); } + +TEST_P(StatsUtilityTest, StoreHistogramOnce) { storeOnce(makeHistogram()); } + +TEST_P(StatsUtilityTest, StoreHistogramAll) { storeAll(makeHistogram()); } + +TEST_P(StatsUtilityTest, ScopeHistogramOnce) { scopeOnce(makeHistogram()); } + +TEST_P(StatsUtilityTest, ScopeHistogramAll) { scopeAll(makeHistogram()); } + +TEST_P(StatsUtilityTest, StoreTextReadoutOnce) { storeOnce(makeTextReadout()); } + +TEST_P(StatsUtilityTest, StoreTextReadoutAll) { storeAll(makeTextReadout()); } + +TEST_P(StatsUtilityTest, ScopeTextReadoutOnce) { scopeOnce(makeTextReadout()); } + +TEST_P(StatsUtilityTest, ScopeTextReadoutAll) { scopeAll(makeTextReadout()); } + +} // namespace +} // namespace Stats +} // namespace Envoy diff --git a/test/common/stream_info/BUILD b/test/common/stream_info/BUILD index de97bc18ddeaf..9b88e001fc35c 100644 --- a/test/common/stream_info/BUILD +++ b/test/common/stream_info/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/stream_info/stream_info_impl_test.cc b/test/common/stream_info/stream_info_impl_test.cc index 19d86a4d98c4a..5c31924b59873 100644 --- a/test/common/stream_info/stream_info_impl_test.cc +++ b/test/common/stream_info/stream_info_impl_test.cc @@ -232,7 +232,7 @@ TEST_F(StreamInfoImplTest, RequestHeadersTest) { StreamInfoImpl stream_info(Http::Protocol::Http2, test_time_.timeSystem()); EXPECT_FALSE(stream_info.getRequestHeaders()); - Http::RequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; stream_info.setRequestHeaders(headers); EXPECT_EQ(&headers, stream_info.getRequestHeaders()); } @@ -243,8 +243,8 @@ TEST_F(StreamInfoImplTest, DefaultRequestIDExtensionTest) { auto rid_extension = stream_info.getRequestIDExtension(); - Http::RequestHeaderMapImpl request_headers; - Http::ResponseHeaderMapImpl response_headers; + Http::TestRequestHeaderMapImpl request_headers; + Http::TestResponseHeaderMapImpl response_headers; rid_extension->set(request_headers, false); rid_extension->set(request_headers, true); rid_extension->setInResponse(response_headers, request_headers); diff --git a/test/common/stream_info/test_util.h b/test/common/stream_info/test_util.h index b141abeb0c2eb..5767592c74060 100644 --- a/test/common/stream_info/test_util.h +++ b/test/common/stream_info/test_util.h @@ -4,8 +4,8 @@ #include "envoy/stream_info/stream_info.h" #include "common/common/assert.h" +#include "common/common/random_generator.h" #include "common/http/request_id_extension_impl.h" -#include "common/runtime/runtime_impl.h" #include "common/stream_info/filter_state_impl.h" #include "test/test_common/simulated_time_system.h" @@ -227,7 +227,7 @@ class TestStreamInfo : public StreamInfo::StreamInfo { return upstream_cluster_info_; } - Runtime::RandomGeneratorImpl random_; + Random::RandomGeneratorImpl random_; SystemTime start_time_; MonotonicTime start_time_monotonic_; diff --git a/test/common/stream_info/utility_test.cc b/test/common/stream_info/utility_test.cc index b3a02d18f1170..f74faa9022201 100644 --- a/test/common/stream_info/utility_test.cc +++ b/test/common/stream_info/utility_test.cc @@ -15,7 +15,7 @@ namespace StreamInfo { namespace { TEST(ResponseFlagUtilsTest, toShortStringConversion) { - static_assert(ResponseFlag::LastFlag == 0x40000, "A flag has been added. Fix this code."); + static_assert(ResponseFlag::LastFlag == 0x200000, "A flag has been added. Fix this code."); std::vector> expected = { std::make_pair(ResponseFlag::FailedLocalHealthCheck, "LH"), @@ -37,7 +37,9 @@ TEST(ResponseFlagUtilsTest, toShortStringConversion) { std::make_pair(ResponseFlag::StreamIdleTimeout, "SI"), std::make_pair(ResponseFlag::InvalidEnvoyRequestHeaders, "IH"), std::make_pair(ResponseFlag::DownstreamProtocolError, "DPE"), - }; + std::make_pair(ResponseFlag::UpstreamMaxStreamDurationReached, "UMSDR"), + std::make_pair(ResponseFlag::ResponseFromCacheFilter, "RFCF"), + std::make_pair(ResponseFlag::NoFilterConfigFound, "NFCF")}; for (const auto& test_case : expected) { NiceMock stream_info; @@ -65,7 +67,7 @@ TEST(ResponseFlagUtilsTest, toShortStringConversion) { } TEST(ResponseFlagsUtilsTest, toResponseFlagConversion) { - static_assert(ResponseFlag::LastFlag == 0x40000, "A flag has been added. Fix this code."); + static_assert(ResponseFlag::LastFlag == 0x200000, "A flag has been added. Fix this code."); std::vector> expected = { std::make_pair("LH", ResponseFlag::FailedLocalHealthCheck), @@ -87,7 +89,9 @@ TEST(ResponseFlagsUtilsTest, toResponseFlagConversion) { std::make_pair("SI", ResponseFlag::StreamIdleTimeout), std::make_pair("IH", ResponseFlag::InvalidEnvoyRequestHeaders), std::make_pair("DPE", ResponseFlag::DownstreamProtocolError), - }; + std::make_pair("UMSDR", ResponseFlag::UpstreamMaxStreamDurationReached), + std::make_pair("RFCF", ResponseFlag::ResponseFromCacheFilter), + std::make_pair("NFCF", ResponseFlag::NoFilterConfigFound)}; EXPECT_FALSE(ResponseFlagUtils::toResponseFlag("NonExistentFlag").has_value()); diff --git a/test/common/tcp/BUILD b/test/common/tcp/BUILD index 097d23b9ecb6e..f455f407e7073 100644 --- a/test/common/tcp/BUILD +++ b/test/common/tcp/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/tcp/conn_pool_test.cc b/test/common/tcp/conn_pool_test.cc index 1470abc69c37c..70fb4a6bc3fa9 100644 --- a/test/common/tcp/conn_pool_test.cc +++ b/test/common/tcp/conn_pool_test.cc @@ -4,6 +4,7 @@ #include "common/event/dispatcher_impl.h" #include "common/network/utility.h" #include "common/tcp/conn_pool.h" +#include "common/tcp/original_conn_pool.h" #include "common/upstream/upstream_impl.h" #include "test/common/upstream/utility.h" @@ -18,8 +19,8 @@ #include "gtest/gtest.h" using testing::_; -using testing::InSequence; using testing::Invoke; +using testing::InvokeWithoutArgs; using testing::NiceMock; using testing::Property; using testing::Return; @@ -65,20 +66,22 @@ struct ConnPoolCallbacks : public Tcp::ConnectionPool::Callbacks { }; /** - * A test version of ConnPoolImpl that allows for mocking. + * A wrapper around a ConnectionPoolImpl which tracks when the bridge between + * the pool and the consumer of the connection is released and destroyed. */ -class ConnPoolImplForTest : public ConnPoolImpl { +class ConnPoolBase : public Tcp::ConnectionPool::Instance { public: - ConnPoolImplForTest(Event::MockDispatcher& dispatcher, Upstream::HostSharedPtr host, - NiceMock* upstream_ready_timer) - : ConnPoolImpl(dispatcher, host, Upstream::ResourcePriority::Default, nullptr, nullptr), - mock_dispatcher_(dispatcher), mock_upstream_ready_timer_(upstream_ready_timer) {} - - ~ConnPoolImplForTest() override { - EXPECT_EQ(0U, ready_conns_.size()); - EXPECT_EQ(0U, busy_conns_.size()); - EXPECT_EQ(0U, pending_requests_.size()); + ConnPoolBase(Event::MockDispatcher& dispatcher, Upstream::HostSharedPtr host, + NiceMock* upstream_ready_cb, + bool test_new_connection_pool); + + void addDrainedCallback(DrainedCb cb) override { conn_pool_->addDrainedCallback(cb); } + void drainConnections() override { conn_pool_->drainConnections(); } + void closeConnections() override { conn_pool_->closeConnections(); } + ConnectionPool::Cancellable* newConnection(Tcp::ConnectionPool::Callbacks& callbacks) override { + return conn_pool_->newConnection(callbacks); } + Upstream::HostDescriptionConstSharedPtr host() const override { return conn_pool_->host(); } MOCK_METHOD(void, onConnReleasedForTest, ()); MOCK_METHOD(void, onConnDestroyedForTest, ()); @@ -102,81 +105,146 @@ class ConnPoolImplForTest : public ConnPoolImpl { [&](Network::ReadFilterSharedPtr filter) -> void { test_conn.filter_ = filter; })); EXPECT_CALL(*test_conn.connection_, connect()); EXPECT_CALL(*test_conn.connect_timer_, enableTimer(_, _)); - } - void expectEnableUpstreamReady() { - EXPECT_FALSE(upstream_ready_enabled_); - EXPECT_CALL(*mock_upstream_ready_timer_, enableTimer(_, _)).Times(1).RetiresOnSaturation(); + ON_CALL(*test_conn.connection_, close(Network::ConnectionCloseType::NoFlush)) + .WillByDefault(InvokeWithoutArgs([test_conn]() -> void { + test_conn.connection_->raiseEvent(Network::ConnectionEvent::LocalClose); + })); } - void expectAndRunUpstreamReady() { - EXPECT_TRUE(upstream_ready_enabled_); - mock_upstream_ready_timer_->invokeCallback(); - EXPECT_FALSE(upstream_ready_enabled_); - } + void expectEnableUpstreamReady(bool run); + std::unique_ptr conn_pool_; Event::MockDispatcher& mock_dispatcher_; - NiceMock* mock_upstream_ready_timer_; + NiceMock* mock_upstream_ready_cb_; std::vector test_conns_; + Network::ConnectionCallbacks* callbacks_ = nullptr; + bool test_new_connection_pool_; protected: - void onConnReleased(ConnPoolImpl::ActiveConn& conn) override { - for (auto& test_conn : test_conns_) { - if (conn.conn_.get() == test_conn.connection_) { - onConnReleasedForTest(); - break; - } + class ConnPoolImplForTest : public ConnPoolImpl { + public: + ConnPoolImplForTest(Event::MockDispatcher& dispatcher, Upstream::HostSharedPtr host, + ConnPoolBase& parent) + : ConnPoolImpl(dispatcher, host, Upstream::ResourcePriority::Default, nullptr, nullptr), + parent_(parent) {} + + void onConnReleased(Envoy::ConnectionPool::ActiveClient& client) override { + ConnPoolImpl::onConnReleased(client); + parent_.onConnReleasedForTest(); } - ConnPoolImpl::onConnReleased(conn); - } + void onConnDestroyed() override { parent_.onConnDestroyedForTest(); } + ConnPoolBase& parent_; + }; + + class OriginalConnPoolImplForTest : public OriginalConnPoolImpl { + public: + OriginalConnPoolImplForTest(Event::MockDispatcher& dispatcher, Upstream::HostSharedPtr host, + ConnPoolBase& parent) + : OriginalConnPoolImpl(dispatcher, host, Upstream::ResourcePriority::Default, nullptr, + nullptr), + parent_(parent) {} + + ~OriginalConnPoolImplForTest() override { + EXPECT_EQ(0U, ready_conns_.size()); + EXPECT_EQ(0U, busy_conns_.size()); + EXPECT_EQ(0U, pending_requests_.size()); + } + + void onConnReleased(OriginalConnPoolImpl::ActiveConn& conn) override { + parent_.onConnReleasedForTest(); + OriginalConnPoolImpl::onConnReleased(conn); + } - void onConnDestroyed(ConnPoolImpl::ActiveConn& conn) override { - for (auto i = test_conns_.begin(); i != test_conns_.end(); i++) { - if (conn.conn_.get() == i->connection_) { - onConnDestroyedForTest(); - test_conns_.erase(i); - break; + void onConnDestroyed(OriginalConnPoolImpl::ActiveConn& conn) override { + parent_.onConnDestroyedForTest(); + OriginalConnPoolImpl::onConnDestroyed(conn); + } + void expectEnableUpstreamReady(bool run) { + if (!run) { + EXPECT_FALSE(upstream_ready_enabled_); + EXPECT_CALL(*parent_.mock_upstream_ready_cb_, scheduleCallbackCurrentIteration()) + .Times(1) + .RetiresOnSaturation(); + } else { + EXPECT_TRUE(upstream_ready_enabled_); + parent_.mock_upstream_ready_cb_->invokeCallback(); + EXPECT_FALSE(upstream_ready_enabled_); } } + ConnPoolBase& parent_; + }; +}; - ConnPoolImpl::onConnDestroyed(conn); +ConnPoolBase::ConnPoolBase(Event::MockDispatcher& dispatcher, Upstream::HostSharedPtr host, + NiceMock* upstream_ready_cb, + bool test_new_connection_pool) + : mock_dispatcher_(dispatcher), mock_upstream_ready_cb_(upstream_ready_cb), + test_new_connection_pool_(test_new_connection_pool) { + if (test_new_connection_pool_) { + conn_pool_ = std::make_unique(dispatcher, host, *this); + } else { + conn_pool_ = std::make_unique(dispatcher, host, *this); } -}; +} + +void ConnPoolBase::expectEnableUpstreamReady(bool run) { + if (!test_new_connection_pool_) { + dynamic_cast(conn_pool_.get())->expectEnableUpstreamReady(run); + } else { + if (!run) { + EXPECT_CALL(*mock_upstream_ready_cb_, scheduleCallbackCurrentIteration()) + .Times(1) + .RetiresOnSaturation(); + } else { + mock_upstream_ready_cb_->invokeCallback(); + } + } +} /** * Test fixture for connection pool tests. */ -class TcpConnPoolImplTest : public testing::Test { +class TcpConnPoolImplTest : public testing::TestWithParam { public: TcpConnPoolImplTest() - : upstream_ready_timer_(new NiceMock(&dispatcher_)), + : test_new_connection_pool_(GetParam()), + upstream_ready_cb_(new NiceMock(&dispatcher_)), host_(Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:9000")), - conn_pool_(dispatcher_, host_, upstream_ready_timer_) {} + conn_pool_(dispatcher_, host_, upstream_ready_cb_, test_new_connection_pool_) {} ~TcpConnPoolImplTest() override { - EXPECT_TRUE(TestUtility::gaugesZeroed(cluster_->stats_store_.gauges())); + EXPECT_TRUE(TestUtility::gaugesZeroed(cluster_->stats_store_.gauges())) + << TestUtility::nonZeroedGauges(cluster_->stats_store_.gauges()); } + bool test_new_connection_pool_; NiceMock dispatcher_; std::shared_ptr cluster_{new NiceMock()}; - NiceMock* upstream_ready_timer_; + NiceMock* upstream_ready_cb_; Upstream::HostSharedPtr host_; - ConnPoolImplForTest conn_pool_; + ConnPoolBase conn_pool_; NiceMock runtime_; }; /** * Test fixture for connection pool destructor tests. */ -class TcpConnPoolImplDestructorTest : public testing::Test { +class TcpConnPoolImplDestructorTest : public testing::TestWithParam { public: TcpConnPoolImplDestructorTest() - : upstream_ready_timer_(new NiceMock(&dispatcher_)), - conn_pool_{new ConnPoolImpl(dispatcher_, - Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:9000"), - Upstream::ResourcePriority::Default, nullptr, nullptr)} {} - + : test_new_connection_pool_(GetParam()), + upstream_ready_cb_(new NiceMock(&dispatcher_)) { + host_ = Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:9000"); + if (test_new_connection_pool_) { + conn_pool_ = std::make_unique( + dispatcher_, host_, Upstream::ResourcePriority::Default, nullptr, nullptr); + } else { + conn_pool_ = std::make_unique( + dispatcher_, host_, Upstream::ResourcePriority::Default, nullptr, nullptr); + } + } ~TcpConnPoolImplDestructorTest() override = default; void prepareConn() { @@ -194,12 +262,14 @@ class TcpConnPoolImplDestructorTest : public testing::Test { connection_->raiseEvent(Network::ConnectionEvent::Connected); } + bool test_new_connection_pool_; + Upstream::HostConstSharedPtr host_; NiceMock dispatcher_; std::shared_ptr cluster_{new NiceMock()}; - NiceMock* upstream_ready_timer_; + NiceMock* upstream_ready_cb_; NiceMock* connect_timer_; NiceMock* connection_; - std::unique_ptr conn_pool_; + std::unique_ptr conn_pool_; std::unique_ptr callbacks_; }; @@ -264,14 +334,13 @@ struct ActiveTestConn { bool completed_{}; }; -TEST_F(TcpConnPoolImplTest, HostAccessor) { EXPECT_EQ(conn_pool_.host(), host_); } +TEST_P(TcpConnPoolImplTest, HostAccessor) { EXPECT_EQ(conn_pool_.host(), host_); } /** * Verify that connections are drained when requested. */ -TEST_F(TcpConnPoolImplTest, DrainConnections) { +TEST_P(TcpConnPoolImplTest, DrainConnections) { cluster_->resetResourceManager(3, 1024, 1024, 1, 1); - InSequence s; ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection); ActiveTestConn c2(*this, 1, ActiveTestConn::Type::CreateConnection); @@ -280,32 +349,35 @@ TEST_F(TcpConnPoolImplTest, DrainConnections) { EXPECT_CALL(conn_pool_, onConnReleasedForTest()); c1.releaseConn(); - // This will destroy the ready connection and set requests remaining to 1 on the busy and pending - // connections. - EXPECT_CALL(conn_pool_, onConnDestroyedForTest()); - conn_pool_.drainConnections(); - dispatcher_.clearDeferredDeleteList(); - - // This will destroy the busy connection when the response finishes. - EXPECT_CALL(conn_pool_, onConnReleasedForTest()); - EXPECT_CALL(conn_pool_, onConnDestroyedForTest()); - c2.releaseConn(); - dispatcher_.clearDeferredDeleteList(); - - // This will destroy the pending connection when the response finishes. - c3.conn_index_ = 0; // c1/c2 have been deleted from test_conns_. - c3.completeConnection(); - - EXPECT_CALL(conn_pool_, onConnReleasedForTest()); - EXPECT_CALL(conn_pool_, onConnDestroyedForTest()); - c3.releaseConn(); - dispatcher_.clearDeferredDeleteList(); + { + // This will destroy the ready connection and set requests remaining to 1 on the busy and + // pending connections. + EXPECT_CALL(conn_pool_, onConnDestroyedForTest()); + conn_pool_.drainConnections(); + dispatcher_.clearDeferredDeleteList(); + } + { + // This will destroy the busy connection when the response finishes. + EXPECT_CALL(conn_pool_, onConnReleasedForTest()); + EXPECT_CALL(conn_pool_, onConnDestroyedForTest()); + c2.releaseConn(); + dispatcher_.clearDeferredDeleteList(); + } + { + // This will destroy the pending connection when the response finishes. + c3.completeConnection(); + + EXPECT_CALL(conn_pool_, onConnReleasedForTest()); + EXPECT_CALL(conn_pool_, onConnDestroyedForTest()); + c3.releaseConn(); + dispatcher_.clearDeferredDeleteList(); + } } /** * Test all timing stats are set. */ -TEST_F(TcpConnPoolImplTest, VerifyTimingStats) { +TEST_P(TcpConnPoolImplTest, VerifyTimingStats) { EXPECT_CALL(cluster_->stats_store_, deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _)); EXPECT_CALL(cluster_->stats_store_, @@ -325,7 +397,7 @@ TEST_F(TcpConnPoolImplTest, VerifyTimingStats) { /** * Test that buffer limits are set. */ -TEST_F(TcpConnPoolImplTest, VerifyBufferLimits) { +TEST_P(TcpConnPoolImplTest, VerifyBufferLimits) { ConnPoolCallbacks callbacks; conn_pool_.expectConnCreate(); EXPECT_CALL(*cluster_, perConnectionBufferLimitBytes()).WillOnce(Return(8192)); @@ -343,10 +415,9 @@ TEST_F(TcpConnPoolImplTest, VerifyBufferLimits) { /** * Test that upstream callback fire for assigned connections. */ -TEST_F(TcpConnPoolImplTest, UpstreamCallbacks) { +TEST_P(TcpConnPoolImplTest, UpstreamCallbacks) { Buffer::OwnedImpl buffer; - InSequence s; ConnectionPool::MockUpstreamCallbacks callbacks; // Create connection, set UpstreamCallbacks @@ -380,10 +451,9 @@ TEST_F(TcpConnPoolImplTest, UpstreamCallbacks) { /** * Test that upstream callback close event fires for assigned connections. */ -TEST_F(TcpConnPoolImplTest, UpstreamCallbacksCloseEvent) { +TEST_P(TcpConnPoolImplTest, UpstreamCallbacksCloseEvent) { Buffer::OwnedImpl buffer; - InSequence s; ConnectionPool::MockUpstreamCallbacks callbacks; // Create connection, set UpstreamCallbacks @@ -400,11 +470,9 @@ TEST_F(TcpConnPoolImplTest, UpstreamCallbacksCloseEvent) { /** * Test that a connection pool functions without upstream callbacks. */ -TEST_F(TcpConnPoolImplTest, NoUpstreamCallbacks) { +TEST_P(TcpConnPoolImplTest, NoUpstreamCallbacks) { Buffer::OwnedImpl buffer; - InSequence s; - // Create connection. ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection); @@ -419,8 +487,7 @@ TEST_F(TcpConnPoolImplTest, NoUpstreamCallbacks) { * Tests a request that generates a new connection, completes, and then a second request that uses * the same connection. */ -TEST_F(TcpConnPoolImplTest, MultipleRequestAndResponse) { - InSequence s; +TEST_P(TcpConnPoolImplTest, MultipleRequestAndResponse) { // Request 1 should kick off a new connection. ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection); @@ -443,8 +510,7 @@ TEST_F(TcpConnPoolImplTest, MultipleRequestAndResponse) { /** * Tests ConnectionState assignment, lookup and destruction. */ -TEST_F(TcpConnPoolImplTest, ConnectionStateLifecycle) { - InSequence s; +TEST_P(TcpConnPoolImplTest, ConnectionStateLifecycle) { bool state_destroyed = false; @@ -482,7 +548,7 @@ TEST_F(TcpConnPoolImplTest, ConnectionStateLifecycle) { /** * Test when we overflow max pending requests. */ -TEST_F(TcpConnPoolImplTest, MaxPendingRequests) { +TEST_P(TcpConnPoolImplTest, MaxPendingRequests) { cluster_->resetResourceManager(1, 1, 1024, 1, 1); ConnPoolCallbacks callbacks; @@ -510,8 +576,7 @@ TEST_F(TcpConnPoolImplTest, MaxPendingRequests) { * Tests a connection failure before a request is bound which should result in the pending request * getting purged. */ -TEST_F(TcpConnPoolImplTest, RemoteConnectFailure) { - InSequence s; +TEST_P(TcpConnPoolImplTest, RemoteConnectFailure) { // Request 1 should kick off a new connection. ConnPoolCallbacks callbacks; @@ -536,8 +601,7 @@ TEST_F(TcpConnPoolImplTest, RemoteConnectFailure) { * Tests a connection failure before a request is bound which should result in the pending request * getting purged. */ -TEST_F(TcpConnPoolImplTest, LocalConnectFailure) { - InSequence s; +TEST_P(TcpConnPoolImplTest, LocalConnectFailure) { // Request 1 should kick off a new connection. ConnPoolCallbacks callbacks; @@ -561,8 +625,7 @@ TEST_F(TcpConnPoolImplTest, LocalConnectFailure) { /** * Tests a connect timeout. Also test that we can add a new request during ejection processing. */ -TEST_F(TcpConnPoolImplTest, ConnectTimeout) { - InSequence s; +TEST_P(TcpConnPoolImplTest, ConnectTimeout) { // Request 1 should kick off a new connection. ConnPoolCallbacks callbacks1; @@ -593,8 +656,7 @@ TEST_F(TcpConnPoolImplTest, ConnectTimeout) { /** * Test cancelling before the request is bound to a connection. */ -TEST_F(TcpConnPoolImplTest, CancelBeforeBound) { - InSequence s; +TEST_P(TcpConnPoolImplTest, CancelBeforeBound) { // Request 1 should kick off a new connection. ConnPoolCallbacks callbacks; @@ -614,8 +676,7 @@ TEST_F(TcpConnPoolImplTest, CancelBeforeBound) { /** * Test cancelling before the request is bound to a connection, with connection close. */ -TEST_F(TcpConnPoolImplTest, CancelAndCloseBeforeBound) { - InSequence s; +TEST_P(TcpConnPoolImplTest, CancelAndCloseBeforeBound) { // Request 1 should kick off a new connection. ConnPoolCallbacks callbacks; @@ -633,8 +694,7 @@ TEST_F(TcpConnPoolImplTest, CancelAndCloseBeforeBound) { /** * Test an upstream disconnection while there is a bound request. */ -TEST_F(TcpConnPoolImplTest, DisconnectWhileBound) { - InSequence s; +TEST_P(TcpConnPoolImplTest, DisconnectWhileBound) { // Request 1 should kick off a new connection. ConnPoolCallbacks callbacks; @@ -655,9 +715,8 @@ TEST_F(TcpConnPoolImplTest, DisconnectWhileBound) { /** * Test upstream disconnection of one request while another is pending. */ -TEST_F(TcpConnPoolImplTest, DisconnectWhilePending) { +TEST_P(TcpConnPoolImplTest, DisconnectWhilePending) { cluster_->resetResourceManager(1, 1024, 1024, 1, 1); - InSequence s; // First request connected. ConnPoolCallbacks callbacks; @@ -681,26 +740,24 @@ TEST_F(TcpConnPoolImplTest, DisconnectWhilePending) { EXPECT_CALL(conn_pool_, onConnDestroyedForTest()); dispatcher_.clearDeferredDeleteList(); - // test_conns_[0] was replaced with a new connection - EXPECT_CALL(*conn_pool_.test_conns_[0].connect_timer_, disableTimer()); + // test_conns_[1] is the new connection + EXPECT_CALL(*conn_pool_.test_conns_[1].connect_timer_, disableTimer()); EXPECT_CALL(callbacks2.pool_ready_, ready()); - conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + conn_pool_.test_conns_[1].connection_->raiseEvent(Network::ConnectionEvent::Connected); EXPECT_CALL(conn_pool_, onConnReleasedForTest()); callbacks2.conn_data_.reset(); // Disconnect EXPECT_CALL(conn_pool_, onConnDestroyedForTest()); - conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + conn_pool_.test_conns_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); } /** * Test that we correctly handle reaching max connections. */ -TEST_F(TcpConnPoolImplTest, MaxConnections) { - InSequence s; - +TEST_P(TcpConnPoolImplTest, MaxConnections) { // Request 1 should kick off a new connection. ConnPoolCallbacks callbacks; conn_pool_.expectConnCreate(); @@ -721,11 +778,11 @@ TEST_F(TcpConnPoolImplTest, MaxConnections) { // Finishing request 1 will immediately bind to request 2. EXPECT_CALL(conn_pool_, onConnReleasedForTest()); - conn_pool_.expectEnableUpstreamReady(); + conn_pool_.expectEnableUpstreamReady(false); EXPECT_CALL(callbacks2.pool_ready_, ready()); callbacks.conn_data_.reset(); - conn_pool_.expectAndRunUpstreamReady(); + conn_pool_.expectEnableUpstreamReady(true); EXPECT_CALL(conn_pool_, onConnReleasedForTest()); callbacks2.conn_data_.reset(); @@ -738,8 +795,7 @@ TEST_F(TcpConnPoolImplTest, MaxConnections) { /** * Test when we reach max requests per connection. */ -TEST_F(TcpConnPoolImplTest, MaxRequestsPerConnection) { - InSequence s; +TEST_P(TcpConnPoolImplTest, MaxRequestsPerConnection) { cluster_->max_requests_per_connection_ = 1; @@ -765,9 +821,8 @@ TEST_F(TcpConnPoolImplTest, MaxRequestsPerConnection) { /* * Test that multiple connections can be assigned at once. */ -TEST_F(TcpConnPoolImplTest, ConcurrentConnections) { +TEST_P(TcpConnPoolImplTest, ConcurrentConnections) { cluster_->resetResourceManager(2, 1024, 1024, 1, 1); - InSequence s; ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection); ActiveTestConn c2(*this, 1, ActiveTestConn::Type::CreateConnection); @@ -775,11 +830,11 @@ TEST_F(TcpConnPoolImplTest, ConcurrentConnections) { // Finish c1, which gets c3 going. EXPECT_CALL(conn_pool_, onConnReleasedForTest()); - conn_pool_.expectEnableUpstreamReady(); + conn_pool_.expectEnableUpstreamReady(false); c3.expectNewConn(); c1.releaseConn(); - conn_pool_.expectAndRunUpstreamReady(); + conn_pool_.expectEnableUpstreamReady(true); EXPECT_CALL(conn_pool_, onConnReleasedForTest()).Times(2); c2.releaseConn(); c3.releaseConn(); @@ -794,8 +849,7 @@ TEST_F(TcpConnPoolImplTest, ConcurrentConnections) { /** * Tests ConnectionState lifecycle with multiple concurrent connections. */ -TEST_F(TcpConnPoolImplTest, ConnectionStateWithConcurrentConnections) { - InSequence s; +TEST_P(TcpConnPoolImplTest, ConnectionStateWithConcurrentConnections) { int state_destroyed = 0; auto* s1 = new TestConnectionState(1, [&]() -> void { state_destroyed |= 1; }); @@ -813,11 +867,11 @@ TEST_F(TcpConnPoolImplTest, ConnectionStateWithConcurrentConnections) { // Finish c1, which gets c3 going. EXPECT_CALL(conn_pool_, onConnReleasedForTest()); - conn_pool_.expectEnableUpstreamReady(); + conn_pool_.expectEnableUpstreamReady(false); c3.expectNewConn(); c1.releaseConn(); - conn_pool_.expectAndRunUpstreamReady(); + conn_pool_.expectEnableUpstreamReady(true); // c3 now has the state set by c1. EXPECT_EQ(s1, c3.callbacks_.conn_data_->connectionStateTyped()); @@ -845,8 +899,7 @@ TEST_F(TcpConnPoolImplTest, ConnectionStateWithConcurrentConnections) { /** * Tests that the DrainCallback is invoked when the number of connections goes to zero. */ -TEST_F(TcpConnPoolImplTest, DrainCallback) { - InSequence s; +TEST_P(TcpConnPoolImplTest, DrainCallback) { ReadyWatcher drained; EXPECT_CALL(drained, ready()); @@ -868,8 +921,7 @@ TEST_F(TcpConnPoolImplTest, DrainCallback) { /** * Test draining a connection pool that has a pending connection. */ -TEST_F(TcpConnPoolImplTest, DrainWhileConnecting) { - InSequence s; +TEST_P(TcpConnPoolImplTest, DrainWhileConnecting) { ReadyWatcher drained; ConnPoolCallbacks callbacks; @@ -878,11 +930,18 @@ TEST_F(TcpConnPoolImplTest, DrainWhileConnecting) { EXPECT_NE(nullptr, handle); conn_pool_.addDrainedCallback([&]() -> void { drained.ready(); }); - handle->cancel(ConnectionPool::CancelPolicy::Default); - EXPECT_CALL(*conn_pool_.test_conns_[0].connection_, close(Network::ConnectionCloseType::NoFlush)); - EXPECT_CALL(drained, ready()); - conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - + if (test_new_connection_pool_) { + // The shared connection pool removes and closes connecting clients if there are no + // pending requests. + EXPECT_CALL(drained, ready()); + handle->cancel(ConnectionPool::CancelPolicy::Default); + } else { + handle->cancel(ConnectionPool::CancelPolicy::Default); + EXPECT_CALL(*conn_pool_.test_conns_[0].connection_, + close(Network::ConnectionCloseType::NoFlush)); + EXPECT_CALL(drained, ready()); + conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + } EXPECT_CALL(conn_pool_, onConnDestroyedForTest()); dispatcher_.clearDeferredDeleteList(); } @@ -890,12 +949,11 @@ TEST_F(TcpConnPoolImplTest, DrainWhileConnecting) { /** * Test that the DrainCallback is invoked when a connection is closed. */ -TEST_F(TcpConnPoolImplTest, DrainOnClose) { +TEST_P(TcpConnPoolImplTest, DrainOnClose) { ReadyWatcher drained; EXPECT_CALL(drained, ready()); conn_pool_.addDrainedCallback([&]() -> void { drained.ready(); }); - InSequence s; ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection); ConnectionPool::MockUpstreamCallbacks callbacks; @@ -913,10 +971,78 @@ TEST_F(TcpConnPoolImplTest, DrainOnClose) { dispatcher_.clearDeferredDeleteList(); } +/** + * Test connecting_request_capacity logic. + */ +TEST_P(TcpConnPoolImplTest, RequestCapacity) { + if (!test_new_connection_pool_) { + return; + } + cluster_->resetResourceManager(5, 1024, 1024, 1, 1); + cluster_->max_requests_per_connection_ = 100; + + ConnPoolCallbacks callbacks1; + ConnPoolCallbacks callbacks2; + Tcp::ConnectionPool::Cancellable* handle1; + Tcp::ConnectionPool::Cancellable* handle2; + { + // Request 1 should kick off a new connection. + conn_pool_.expectConnCreate(); + handle1 = conn_pool_.newConnection(callbacks1); + EXPECT_NE(nullptr, handle1); + } + { + // Request 2 should kick off a new connection. + conn_pool_.expectConnCreate(); + handle2 = conn_pool_.newConnection(callbacks2); + EXPECT_NE(nullptr, handle2); + } + + // This should set the number of requests remaining to 1 on the active + // connections, and the connecting_request_capacity to 2 as well. + conn_pool_.drainConnections(); + + // Cancel the connections. Because neither used CloseExcess, the two connections should persist. + handle1->cancel(ConnectionPool::CancelPolicy::Default); + handle2->cancel(ConnectionPool::CancelPolicy::Default); + + Tcp::ConnectionPool::Cancellable* handle3; + Tcp::ConnectionPool::Cancellable* handle4; + Tcp::ConnectionPool::Cancellable* handle5; + ConnPoolCallbacks callbacks3; + ConnPoolCallbacks callbacks4; + ConnPoolCallbacks callbacks5; + + { + // The next two requests will use the connections in progress, bringing + // connecting_request_capacity to zero. + handle3 = conn_pool_.newConnection(callbacks3); + EXPECT_NE(nullptr, handle3); + + handle4 = conn_pool_.newConnection(callbacks4); + EXPECT_NE(nullptr, handle4); + } + { + // With connecting_request_capacity zero, a request for a new connection + // will kick off connection #3. + conn_pool_.expectConnCreate(); + handle5 = conn_pool_.newConnection(callbacks5); + EXPECT_NE(nullptr, handle5); + } + + // Clean up remaining connections. + handle3->cancel(ConnectionPool::CancelPolicy::Default); + handle4->cancel(ConnectionPool::CancelPolicy::Default); + handle5->cancel(ConnectionPool::CancelPolicy::Default); + conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + conn_pool_.test_conns_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + conn_pool_.test_conns_[2].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); +} + /** * Test that pending connections are closed when the connection pool is destroyed. */ -TEST_F(TcpConnPoolImplDestructorTest, TestPendingConnectionsAreClosed) { +TEST_P(TcpConnPoolImplDestructorTest, TestPendingConnectionsAreClosed) { connection_ = new NiceMock(); connect_timer_ = new NiceMock(&dispatcher_); EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)).WillOnce(Return(connection_)); @@ -935,7 +1061,7 @@ TEST_F(TcpConnPoolImplDestructorTest, TestPendingConnectionsAreClosed) { /** * Test that busy connections are closed when the connection pool is destroyed. */ -TEST_F(TcpConnPoolImplDestructorTest, TestBusyConnectionsAreClosed) { +TEST_P(TcpConnPoolImplDestructorTest, TestBusyConnectionsAreClosed) { prepareConn(); EXPECT_CALL(*connection_, close(Network::ConnectionCloseType::NoFlush)); @@ -946,7 +1072,7 @@ TEST_F(TcpConnPoolImplDestructorTest, TestBusyConnectionsAreClosed) { /** * Test that ready connections are closed when the connection pool is destroyed. */ -TEST_F(TcpConnPoolImplDestructorTest, TestReadyConnectionsAreClosed) { +TEST_P(TcpConnPoolImplDestructorTest, TestReadyConnectionsAreClosed) { prepareConn(); // Transition connection to ready list @@ -956,6 +1082,8 @@ TEST_F(TcpConnPoolImplDestructorTest, TestReadyConnectionsAreClosed) { EXPECT_CALL(dispatcher_, clearDeferredDeleteList()); conn_pool_.reset(); } +INSTANTIATE_TEST_SUITE_P(ConnectionPools, TcpConnPoolImplTest, testing::Bool()); +INSTANTIATE_TEST_SUITE_P(ConnectionPools, TcpConnPoolImplDestructorTest, testing::Bool()); } // namespace Tcp } // namespace Envoy diff --git a/test/common/tcp_proxy/BUILD b/test/common/tcp_proxy/BUILD index 493f742d2db14..e6149dc13e73d 100644 --- a/test/common/tcp_proxy/BUILD +++ b/test/common/tcp_proxy/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( @@ -28,7 +28,8 @@ envoy_cc_test( "//test/mocks/buffer:buffer_mocks", "//test/mocks/network:network_mocks", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/ssl:ssl_mocks", "//test/mocks/stream_info:stream_info_mocks", "//test/mocks/upstream:host_mocks", diff --git a/test/common/tcp_proxy/tcp_proxy_test.cc b/test/common/tcp_proxy/tcp_proxy_test.cc index ee399514b7522..3658360dcad07 100644 --- a/test/common/tcp_proxy/tcp_proxy_test.cc +++ b/test/common/tcp_proxy/tcp_proxy_test.cc @@ -25,7 +25,8 @@ #include "test/mocks/buffer/mocks.h" #include "test/mocks/network/mocks.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/server/instance.h" #include "test/mocks/ssl/mocks.h" #include "test/mocks/stream_info/mocks.h" #include "test/mocks/tcp/mocks.h" @@ -36,33 +37,35 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" -using testing::_; -using testing::Invoke; -using testing::InvokeWithoutArgs; -using testing::NiceMock; -using testing::Return; -using testing::ReturnPointee; -using testing::ReturnRef; -using testing::SaveArg; - namespace Envoy { namespace TcpProxy { namespace { using ::Envoy::Network::UpstreamServerName; +using ::testing::_; +using ::testing::DoAll; +using ::testing::Invoke; +using ::testing::InvokeWithoutArgs; +using ::testing::NiceMock; +using ::testing::Return; +using ::testing::ReturnPointee; +using ::testing::ReturnRef; +using ::testing::SaveArg; namespace { Config constructConfigFromYaml(const std::string& yaml, - Server::Configuration::FactoryContext& context) { + Server::Configuration::FactoryContext& context, + bool avoid_boosting = true) { envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy tcp_proxy; - TestUtility::loadFromYamlAndValidate(yaml, tcp_proxy); + TestUtility::loadFromYamlAndValidate(yaml, tcp_proxy, false, avoid_boosting); return Config(tcp_proxy, context); } -Config constructConfigFromV2Yaml(const std::string& yaml, - Server::Configuration::FactoryContext& context) { +Config constructConfigFromV3Yaml(const std::string& yaml, + Server::Configuration::FactoryContext& context, + bool avoid_boosting = true) { envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy tcp_proxy; - TestUtility::loadFromYamlAndValidate(yaml, tcp_proxy); + TestUtility::loadFromYamlAndValidate(yaml, tcp_proxy, false, avoid_boosting); return Config(tcp_proxy, context); } @@ -75,7 +78,7 @@ cluster: foo )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); EXPECT_EQ(std::chrono::hours(1), config_obj.sharedConfig()->idleTimeout().value()); } @@ -87,7 +90,7 @@ idle_timeout: 0s )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); EXPECT_FALSE(config_obj.sharedConfig()->idleTimeout().has_value()); } @@ -99,7 +102,7 @@ idle_timeout: 1s )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); EXPECT_EQ(std::chrono::seconds(1), config_obj.sharedConfig()->idleTimeout().value()); } @@ -122,7 +125,7 @@ TEST(ConfigTest, DEPRECATED_FEATURE_TEST(BadConfig)) { )EOF"; NiceMock factory_context; - EXPECT_THROW(constructConfigFromYaml(yaml_string, factory_context), EnvoyException); + EXPECT_THROW(constructConfigFromYaml(yaml_string, factory_context, false), EnvoyException); } TEST(ConfigTest, DEPRECATED_FEATURE_TEST(EmptyRouteConfig)) { @@ -134,7 +137,7 @@ TEST(ConfigTest, DEPRECATED_FEATURE_TEST(EmptyRouteConfig)) { )EOF"; NiceMock factory_context_; - EXPECT_THROW(constructConfigFromYaml(yaml, factory_context_), EnvoyException); + EXPECT_THROW(constructConfigFromYaml(yaml, factory_context_, false), EnvoyException); } TEST(ConfigTest, DEPRECATED_FEATURE_TEST(Routes)) { @@ -185,7 +188,7 @@ TEST(ConfigTest, DEPRECATED_FEATURE_TEST(Routes)) { )EOF"; NiceMock factory_context_; - Config config_obj(constructConfigFromYaml(yaml, factory_context_)); + Config config_obj(constructConfigFromYaml(yaml, factory_context_, false)); { // hit route with destination_ip (10.10.10.10/32) @@ -364,7 +367,7 @@ TEST(ConfigTest, DEPRECATED_FEATURE_TEST(RouteWithTopLevelMetadataMatchConfig)) )EOF"; NiceMock factory_context_; - Config config_obj(constructConfigFromYaml(yaml, factory_context_)); + Config config_obj(constructConfigFromYaml(yaml, factory_context_, false)); ProtobufWkt::Value v1, v2; v1.set_string_value("v1"); @@ -402,7 +405,7 @@ TEST(ConfigTest, WeightedClusterWithZeroWeightConfig) { )EOF"; NiceMock factory_context; - EXPECT_THROW(constructConfigFromV2Yaml(yaml, factory_context), EnvoyException); + EXPECT_THROW(constructConfigFromV3Yaml(yaml, factory_context), EnvoyException); } // Tests that it is possible to define a list of weighted clusters. @@ -418,7 +421,7 @@ TEST(ConfigTest, WeightedClustersConfig) { )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); NiceMock connection; EXPECT_CALL(factory_context.random_, random()).WillOnce(Return(0)); @@ -452,7 +455,7 @@ TEST(ConfigTest, WeightedClustersWithMetadataMatchConfig) { )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); { ProtobufWkt::Value v1, v2; @@ -539,7 +542,7 @@ TEST(ConfigTest, WeightedClustersWithMetadataMatchAndTopLevelMetadataMatchConfig )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); ProtobufWkt::Value v00, v01, v04; v00.set_string_value("v00"); @@ -630,7 +633,7 @@ TEST(ConfigTest, WeightedClustersWithTopLevelMetadataMatchConfig) { )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); ProtobufWkt::Value v1, v2; v1.set_string_value("v1"); @@ -669,7 +672,7 @@ TEST(ConfigTest, TopLevelMetadataMatchConfig) { )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); ProtobufWkt::Value v1, v2; v1.set_string_value("v1"); @@ -702,7 +705,7 @@ TEST(ConfigTest, ClusterWithTopLevelMetadataMatchConfig) { )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); ProtobufWkt::Value v1, v2; v1.set_string_value("v1"); @@ -741,7 +744,7 @@ TEST(ConfigTest, PerConnectionClusterWithTopLevelMetadataMatchConfig) { )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); ProtobufWkt::Value v1, v2; v1.set_string_value("v1"); @@ -780,7 +783,7 @@ TEST(ConfigTest, HashWithSourceIpConfig) { )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); EXPECT_NE(nullptr, config_obj.hashPolicy()); } @@ -791,7 +794,7 @@ TEST(ConfigTest, HashWithSourceIpDefaultConfig) { )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); EXPECT_EQ(nullptr, config_obj.hashPolicy()); } @@ -802,7 +805,7 @@ TEST(ConfigTest, AccessLogConfig) { { envoy::extensions::access_loggers::file::v3::FileAccessLog file_access_log; file_access_log.set_path("some_path"); - file_access_log.set_format("the format specifier"); + file_access_log.mutable_log_format()->set_text_format("the format specifier"); log->mutable_typed_config()->PackFrom(file_access_log); } @@ -859,7 +862,7 @@ class TcpProxyTest : public testing::Test { access_log->set_name(Extensions::AccessLoggers::AccessLogNames::get().File); envoy::extensions::access_loggers::file::v3::FileAccessLog file_access_log; file_access_log.set_path("unused"); - file_access_log.set_format(access_log_format); + file_access_log.mutable_log_format()->set_text_format(access_log_format); access_log->mutable_typed_config()->PackFrom(file_access_log); return config; } @@ -877,7 +880,7 @@ class TcpProxyTest : public testing::Test { .WillByDefault(ReturnRef(*upstream_connections_.back())); upstream_hosts_.push_back(std::make_shared>()); conn_pool_handles_.push_back( - std::make_unique>()); + std::make_unique>()); ON_CALL(*upstream_hosts_.at(i), cluster()) .WillByDefault(ReturnPointee( @@ -969,7 +972,7 @@ class TcpProxyTest : public testing::Test { std::vector>> upstream_connection_data_{}; std::vector conn_pool_callbacks_; - std::vector>> conn_pool_handles_; + std::vector>> conn_pool_handles_; NiceMock conn_pool_; Tcp::ConnectionPool::UpstreamCallbacks* upstream_callbacks_; StringViewSaver access_log_data_; @@ -1742,7 +1745,7 @@ class TcpProxyRoutingTest : public testing::Test { public: TcpProxyRoutingTest() = default; - void setup() { + void setup(bool avoid_boosting = true) { const std::string yaml = R"EOF( stat_prefix: name cluster: fallback_cluster @@ -1752,7 +1755,8 @@ class TcpProxyRoutingTest : public testing::Test { cluster: fake_cluster )EOF"; - config_ = std::make_shared(constructConfigFromYaml(yaml, factory_context_)); + config_ = + std::make_shared(constructConfigFromYaml(yaml, factory_context_, avoid_boosting)); } void initializeFilter() { @@ -1772,7 +1776,7 @@ class TcpProxyRoutingTest : public testing::Test { }; TEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(NonRoutableConnection)) { - setup(); + setup(false); const uint32_t total_cx = config_->stats().downstream_cx_total_.value(); const uint32_t non_routable_cx = config_->stats().downstream_cx_no_route_.value(); @@ -1793,7 +1797,7 @@ TEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(NonRoutableConnection)) { } TEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(RoutableConnection)) { - setup(); + setup(false); const uint32_t total_cx = config_->stats().downstream_cx_total_.value(); const uint32_t non_routable_cx = config_->stats().downstream_cx_no_route_.value(); @@ -1815,7 +1819,7 @@ TEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(RoutableConnection)) { // Test that the tcp proxy uses the cluster from FilterState if set TEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(UseClusterFromPerConnectionCluster)) { - setup(); + setup(false); initializeFilter(); connection_.streamInfo().filterState()->setData( @@ -1832,7 +1836,7 @@ TEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(UseClusterFromPerConnectionC // Test that the tcp proxy forwards the requested server name from FilterState if set TEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(UpstreamServerName)) { - setup(); + setup(false); initializeFilter(); connection_.streamInfo().filterState()->setData( @@ -1862,7 +1866,7 @@ TEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(UpstreamServerName)) { // Test that the tcp proxy override ALPN from FilterState if set TEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(ApplicationProtocols)) { - setup(); + setup(false); initializeFilter(); connection_.streamInfo().filterState()->setData( diff --git a/test/common/thread_local/BUILD b/test/common/thread_local/BUILD index c1d95410e5ab9..fdd28bc076fb3 100644 --- a/test/common/thread_local/BUILD +++ b/test/common/thread_local/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/tracing/BUILD b/test/common/tracing/BUILD index 73152809a8f4f..57640551c3460 100644 --- a/test/common/tracing/BUILD +++ b/test/common/tracing/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( @@ -46,7 +46,8 @@ envoy_cc_test( "//source/common/tracing:http_tracer_config_lib", "//source/common/tracing:http_tracer_lib", "//source/common/tracing:http_tracer_manager_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/server:tracer_factory_mocks", "//test/mocks/tracing:tracing_mocks", "//test/test_common:registry_lib", ], diff --git a/test/common/tracing/http_tracer_impl_test.cc b/test/common/tracing/http_tracer_impl_test.cc index 3d05232cb56f7..ef1686bcc688c 100644 --- a/test/common/tracing/http_tracer_impl_test.cc +++ b/test/common/tracing/http_tracer_impl_test.cc @@ -7,12 +7,12 @@ #include "envoy/type/tracing/v3/custom_tag.pb.h" #include "common/common/base64.h" +#include "common/common/random_generator.h" #include "common/http/header_map_impl.h" #include "common/http/headers.h" #include "common/http/message_impl.h" #include "common/http/request_id_extension_impl.h" #include "common/network/utility.h" -#include "common/runtime/runtime_impl.h" #include "common/tracing/http_tracer_impl.h" #include "test/mocks/http/mocks.h" @@ -44,7 +44,7 @@ namespace { TEST(HttpTracerUtilityTest, IsTracing) { NiceMock stream_info; NiceMock stats; - Runtime::RandomGeneratorImpl random; + Random::RandomGeneratorImpl random; std::string not_traceable_guid = random.uuid(); auto rid_extension = Http::RequestIDExtensionFactory::defaultInstance(random); @@ -158,6 +158,7 @@ TEST_F(HttpConnManFinalizerImplTest, OriginalAndLongPath) { Http::TestRequestHeaderMapImpl request_headers{{"x-request-id", "id"}, {"x-envoy-original-path", path}, {":method", "GET"}, + {":path", ""}, {"x-forwarded-proto", "http"}}; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; @@ -189,8 +190,10 @@ TEST_F(HttpConnManFinalizerImplTest, NoGeneratedId) { const auto remote_address = Network::Address::InstanceConstSharedPtr{new Network::Address::Ipv4Instance(expected_ip, 0)}; - Http::TestRequestHeaderMapImpl request_headers{ - {"x-envoy-original-path", path}, {":method", "GET"}, {"x-forwarded-proto", "http"}}; + Http::TestRequestHeaderMapImpl request_headers{{":path", ""}, + {"x-envoy-original-path", path}, + {":method", "GET"}, + {"x-forwarded-proto", "http"}}; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; @@ -213,6 +216,38 @@ TEST_F(HttpConnManFinalizerImplTest, NoGeneratedId) { &response_trailers, stream_info, config); } +TEST_F(HttpConnManFinalizerImplTest, Connect) { + const std::string path(300, 'a'); + const std::string path_prefix = "http://"; + const std::string expected_path(256, 'a'); + const std::string expected_ip = "10.0.0.100"; + const auto remote_address = + Network::Address::InstanceConstSharedPtr{new Network::Address::Ipv4Instance(expected_ip, 0)}; + + Http::TestRequestHeaderMapImpl request_headers{{":method", "CONNECT"}, + {"x-forwarded-proto", "http"}}; + Http::TestResponseHeaderMapImpl response_headers; + Http::TestResponseTrailerMapImpl response_trailers; + + absl::optional protocol = Http::Protocol::Http2; + EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(10)); + EXPECT_CALL(stream_info, bytesSent()).WillOnce(Return(11)); + EXPECT_CALL(stream_info, protocol()).WillRepeatedly(ReturnPointee(&protocol)); + absl::optional response_code; + EXPECT_CALL(stream_info, responseCode()).WillRepeatedly(ReturnPointee(&response_code)); + EXPECT_CALL(stream_info, downstreamDirectRemoteAddress()) + .WillRepeatedly(ReturnPointee(&remote_address)); + + EXPECT_CALL(span, setTag(_, _)).Times(testing::AnyNumber()); + EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpUrl), Eq(""))); + EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpMethod), Eq("CONNECT"))); + EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq("HTTP/2"))); + EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().PeerAddress), Eq(expected_ip))); + + HttpTracerUtility::finalizeDownstreamSpan(span, &request_headers, &response_headers, + &response_trailers, stream_info, config); +} + TEST_F(HttpConnManFinalizerImplTest, NullRequestHeadersAndNullRouteEntry) { EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(10)); EXPECT_CALL(stream_info, bytesSent()).WillOnce(Return(11)); @@ -709,6 +744,8 @@ TEST(HttpNullTracerTest, BasicFunctionality) { span_ptr->setOperation("foo"); span_ptr->setTag("foo", "bar"); + span_ptr->setBaggage("key", "value"); + ASSERT_EQ("", span_ptr->getBaggage("baggage_key")); span_ptr->injectContext(request_headers); EXPECT_NE(nullptr, span_ptr->spawnChild(config, "foo", SystemTime())); diff --git a/test/common/tracing/http_tracer_manager_impl_test.cc b/test/common/tracing/http_tracer_manager_impl_test.cc index f6e719fa434ef..67f802af9edc6 100644 --- a/test/common/tracing/http_tracer_manager_impl_test.cc +++ b/test/common/tracing/http_tracer_manager_impl_test.cc @@ -2,7 +2,8 @@ #include "common/tracing/http_tracer_impl.h" #include "common/tracing/http_tracer_manager_impl.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" +#include "test/mocks/server/tracer_factory.h" #include "test/mocks/tracing/mocks.h" #include "test/test_common/registry.h" diff --git a/test/common/upstream/BUILD b/test/common/upstream/BUILD index 789cb54019253..cdfb7d42b72bc 100644 --- a/test/common/upstream/BUILD +++ b/test/common/upstream/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_benchmark_test", @@ -9,6 +7,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( @@ -85,7 +85,8 @@ envoy_cc_test( "//test/mocks/local_info:local_info_mocks", "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:admin_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/ssl:ssl_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:utility_lib", @@ -96,6 +97,41 @@ envoy_cc_test( ], ) +envoy_cc_benchmark_binary( + name = "eds_speed_test", + srcs = ["eds_speed_test.cc"], + external_deps = [ + "benchmark", + ], + deps = [ + ":utility_lib", + "//source/common/config:grpc_mux_lib", + "//source/common/config:grpc_subscription_lib", + "//source/common/config:protobuf_link_hacks", + "//source/common/config:utility_lib", + "//source/common/upstream:eds_lib", + "//source/extensions/transport_sockets/raw_buffer:config", + "//source/server:transport_socket_config_lib", + "//test/mocks/local_info:local_info_mocks", + "//test/mocks/protobuf:protobuf_mocks", + "//test/mocks/runtime:runtime_mocks", + "//test/mocks/server:admin_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/ssl:ssl_mocks", + "//test/mocks/upstream:upstream_mocks", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", + "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", + ], +) + +envoy_benchmark_test( + name = "eds_speed_test_benchmark_test", + benchmark_binary = "eds_speed_test", +) + envoy_cc_test( name = "health_checker_impl_test", srcs = ["health_checker_impl_test.cc"], @@ -117,6 +153,7 @@ envoy_cc_test( "//test/mocks/runtime:runtime_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:simulated_time_system_lib", + "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", @@ -156,6 +193,8 @@ envoy_cc_test( "//source/common/upstream:upstream_lib", "//test/mocks/runtime:runtime_mocks", "//test/mocks/upstream:upstream_mocks", + "//test/test_common:logging_lib", + "//test/test_common:test_runtime_lib", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", ], ) @@ -207,7 +246,8 @@ envoy_cc_test( "//test/mocks/local_info:local_info_mocks", "//test/mocks/network:network_mocks", "//test/mocks/protobuf:protobuf_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:admin_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:simulated_time_system_lib", "//test/test_common:utility_lib", @@ -233,7 +273,8 @@ envoy_cc_test( "//test/mocks/network:network_mocks", "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:admin_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/ssl:ssl_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/upstream:upstream_mocks", @@ -258,7 +299,8 @@ envoy_cc_test( "//test/mocks/network:network_mocks", "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:admin_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/ssl:ssl_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:utility_lib", @@ -395,7 +437,7 @@ envoy_cc_test( "//source/server:transport_socket_config_lib", "//test/mocks:common_lib", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:transport_socket_factory_context_mocks", "//test/test_common:registry_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", @@ -484,7 +526,8 @@ envoy_cc_test_library( "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/runtime:runtime_mocks", "//test/mocks/secret:secret_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:admin_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/tcp:tcp_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/upstream:upstream_mocks", @@ -522,7 +565,8 @@ envoy_cc_test( "//test/mocks/network:network_mocks", "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:admin_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/ssl:ssl_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:registry_lib", diff --git a/test/common/upstream/cds_api_impl_test.cc b/test/common/upstream/cds_api_impl_test.cc index fd35bb3ebb2b3..388fd9ed5ca82 100644 --- a/test/common/upstream/cds_api_impl_test.cc +++ b/test/common/upstream/cds_api_impl_test.cc @@ -71,21 +71,6 @@ class CdsApiImplTest : public testing::Test { NiceMock validation_visitor_; }; -// Negative test for protoc-gen-validate constraints. -TEST_F(CdsApiImplTest, ValidateFail) { - InSequence s; - - setup(); - - Protobuf::RepeatedPtrField clusters; - envoy::config::cluster::v3::Cluster cluster; - clusters.Add()->PackFrom(cluster); - - EXPECT_CALL(cm_, clusters()).WillRepeatedly(Return(cluster_map_)); - EXPECT_CALL(initialized_, ready()); - EXPECT_THROW(cds_callbacks_->onConfigUpdate(clusters, ""), EnvoyException); -} - // Regression test against only updating versionInfo() if at least one cluster // is are added/updated even if one or more are removed. TEST_F(CdsApiImplTest, UpdateVersionOnClusterRemove) { @@ -96,7 +81,7 @@ TEST_F(CdsApiImplTest, UpdateVersionOnClusterRemove) { const std::string response1_yaml = R"EOF( version_info: '0' resources: -- "@type": type.googleapis.com/envoy.api.v2.Cluster +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: cluster1 type: EDS eds_cluster_config: @@ -111,7 +96,9 @@ version_info: '0' EXPECT_CALL(initialized_, ready()); EXPECT_EQ("", cds_->versionInfo()); - cds_callbacks_->onConfigUpdate(response1.resources(), response1.version_info()); + const auto decoded_resources = + TestUtility::decodeResources(response1); + cds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()); EXPECT_EQ("0", cds_->versionInfo()); const std::string response2_yaml = R"EOF( @@ -122,7 +109,9 @@ version_info: '1' TestUtility::parseYaml(response2_yaml); EXPECT_CALL(cm_, clusters()).WillOnce(Return(makeClusterMap({"cluster1"}))); EXPECT_CALL(cm_, removeCluster("cluster1")).WillOnce(Return(true)); - cds_callbacks_->onConfigUpdate(response2.resources(), response2.version_info()); + const auto decoded_resources_2 = + TestUtility::decodeResources(response2); + cds_callbacks_->onConfigUpdate(decoded_resources_2.refvec_, response2.version_info()); EXPECT_EQ("1", cds_->versionInfo()); } @@ -132,15 +121,14 @@ TEST_F(CdsApiImplTest, ValidateDuplicateClusters) { setup(); - Protobuf::RepeatedPtrField clusters; envoy::config::cluster::v3::Cluster cluster_1; cluster_1.set_name("duplicate_cluster"); - clusters.Add()->PackFrom(cluster_1); - clusters.Add()->PackFrom(cluster_1); + const auto decoded_resources = TestUtility::decodeResources({cluster_1, cluster_1}); EXPECT_CALL(cm_, clusters()).WillRepeatedly(Return(cluster_map_)); EXPECT_CALL(initialized_, ready()); - EXPECT_THROW_WITH_MESSAGE(cds_callbacks_->onConfigUpdate(clusters, ""), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(cds_callbacks_->onConfigUpdate(decoded_resources.refvec_, ""), + EnvoyException, "Error adding/updating cluster(s) duplicate_cluster: duplicate cluster " "duplicate_cluster found"); } @@ -153,8 +141,7 @@ TEST_F(CdsApiImplTest, EmptyConfigUpdate) { EXPECT_CALL(cm_, clusters()).WillOnce(Return(ClusterManager::ClusterInfoMap{})); EXPECT_CALL(initialized_, ready()); - Protobuf::RepeatedPtrField clusters; - cds_callbacks_->onConfigUpdate(clusters, ""); + cds_callbacks_->onConfigUpdate({}, ""); } TEST_F(CdsApiImplTest, ConfigUpdateWith2ValidClusters) { @@ -166,19 +153,16 @@ TEST_F(CdsApiImplTest, ConfigUpdateWith2ValidClusters) { EXPECT_CALL(cm_, clusters()).WillOnce(Return(ClusterManager::ClusterInfoMap{})); EXPECT_CALL(initialized_, ready()); - Protobuf::RepeatedPtrField clusters; - envoy::config::cluster::v3::Cluster cluster_1; cluster_1.set_name("cluster_1"); - clusters.Add()->PackFrom(cluster_1); expectAdd("cluster_1"); envoy::config::cluster::v3::Cluster cluster_2; cluster_2.set_name("cluster_2"); - clusters.Add()->PackFrom(cluster_2); expectAdd("cluster_2"); - cds_callbacks_->onConfigUpdate(clusters, ""); + const auto decoded_resources = TestUtility::decodeResources({cluster_1, cluster_2}); + cds_callbacks_->onConfigUpdate(decoded_resources.refvec_, ""); } TEST_F(CdsApiImplTest, DeltaConfigUpdate) { @@ -208,7 +192,9 @@ TEST_F(CdsApiImplTest, DeltaConfigUpdate) { resource->set_name("cluster_2"); resource->set_version("v1"); } - cds_callbacks_->onConfigUpdate(resources, {}, "v1"); + const auto decoded_resources = + TestUtility::decodeResources(resources); + cds_callbacks_->onConfigUpdate(decoded_resources.refvec_, {}, "v1"); } { @@ -225,7 +211,9 @@ TEST_F(CdsApiImplTest, DeltaConfigUpdate) { Protobuf::RepeatedPtrField removed; *removed.Add() = "cluster_1"; EXPECT_CALL(cm_, removeCluster(StrEq("cluster_1"))).WillOnce(Return(true)); - cds_callbacks_->onConfigUpdate(resources, removed, "v2"); + const auto decoded_resources = + TestUtility::decodeResources(resources); + cds_callbacks_->onConfigUpdate(decoded_resources.refvec_, removed, "v2"); } } @@ -238,25 +226,21 @@ TEST_F(CdsApiImplTest, ConfigUpdateAddsSecondClusterEvenIfFirstThrows) { EXPECT_CALL(cm_, clusters()).WillOnce(Return(ClusterManager::ClusterInfoMap{})); EXPECT_CALL(initialized_, ready()); - Protobuf::RepeatedPtrField clusters; - envoy::config::cluster::v3::Cluster cluster_1; cluster_1.set_name("cluster_1"); - clusters.Add()->PackFrom(cluster_1); expectAddToThrow("cluster_1", "An exception"); envoy::config::cluster::v3::Cluster cluster_2; cluster_2.set_name("cluster_2"); - clusters.Add()->PackFrom(cluster_2); expectAdd("cluster_2"); envoy::config::cluster::v3::Cluster cluster_3; cluster_3.set_name("cluster_3"); - clusters.Add()->PackFrom(cluster_3); expectAddToThrow("cluster_3", "Another exception"); + const auto decoded_resources = TestUtility::decodeResources({cluster_1, cluster_2, cluster_3}); EXPECT_THROW_WITH_MESSAGE( - cds_callbacks_->onConfigUpdate(clusters, ""), EnvoyException, + cds_callbacks_->onConfigUpdate(decoded_resources.refvec_, ""), EnvoyException, "Error adding/updating cluster(s) cluster_1: An exception, cluster_3: Another exception"); } @@ -268,13 +252,13 @@ TEST_F(CdsApiImplTest, Basic) { const std::string response1_yaml = R"EOF( version_info: '0' resources: -- "@type": type.googleapis.com/envoy.api.v2.Cluster +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: cluster1 type: EDS eds_cluster_config: eds_config: path: eds path -- "@type": type.googleapis.com/envoy.api.v2.Cluster +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: cluster2 type: EDS eds_cluster_config: @@ -289,19 +273,21 @@ version_info: '0' expectAdd("cluster2", "0"); EXPECT_CALL(initialized_, ready()); EXPECT_EQ("", cds_->versionInfo()); - cds_callbacks_->onConfigUpdate(response1.resources(), response1.version_info()); + const auto decoded_resources = + TestUtility::decodeResources(response1); + cds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()); EXPECT_EQ("0", cds_->versionInfo()); const std::string response2_yaml = R"EOF( version_info: '1' resources: -- "@type": type.googleapis.com/envoy.api.v2.Cluster +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: cluster1 type: EDS eds_cluster_config: eds_config: path: eds path -- "@type": type.googleapis.com/envoy.api.v2.Cluster +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: cluster3 type: EDS eds_cluster_config: @@ -315,7 +301,9 @@ version_info: '1' expectAdd("cluster1", "1"); expectAdd("cluster3", "1"); EXPECT_CALL(cm_, removeCluster("cluster2")); - cds_callbacks_->onConfigUpdate(response2.resources(), response2.version_info()); + const auto decoded_resources_2 = + TestUtility::decodeResources(response2); + cds_callbacks_->onConfigUpdate(decoded_resources_2.refvec_, response2.version_info()); EXPECT_EQ("1", cds_->versionInfo()); } @@ -329,13 +317,13 @@ TEST_F(CdsApiImplTest, FailureInvalidConfig) { const std::string response1_yaml = R"EOF( version_info: '0' resources: -- "@type": type.googleapis.com/envoy.api.v2.Cluster +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: cluster1 type: EDS eds_cluster_config: eds_config: path: eds path -- "@type": type.googleapis.com/envoy.api.v2.Cluster +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: cluster1 type: EDS eds_cluster_config: @@ -347,7 +335,9 @@ version_info: '0' EXPECT_CALL(cm_, clusters()).WillRepeatedly(Return(cluster_map_)); EXPECT_CALL(initialized_, ready()); - EXPECT_THROW(cds_callbacks_->onConfigUpdate(response1.resources(), response1.version_info()), + const auto decoded_resources = + TestUtility::decodeResources(response1); + EXPECT_THROW(cds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()), EnvoyException); EXPECT_EQ("", cds_->versionInfo()); } diff --git a/test/common/upstream/cluster_factory_impl_test.cc b/test/common/upstream/cluster_factory_impl_test.cc index dbeac5c6c75ba..bf6032ca4f6f8 100644 --- a/test/common/upstream/cluster_factory_impl_test.cc +++ b/test/common/upstream/cluster_factory_impl_test.cc @@ -23,7 +23,8 @@ #include "test/mocks/local_info/mocks.h" #include "test/mocks/network/mocks.h" #include "test/mocks/protobuf/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/admin.h" +#include "test/mocks/server/instance.h" #include "test/mocks/ssl/mocks.h" using testing::NiceMock; @@ -61,7 +62,7 @@ class ClusterFactoryTestBase { const NiceMock local_info_; NiceMock dispatcher_; NiceMock runtime_; - NiceMock random_; + NiceMock random_; Stats::IsolatedStoreImpl stats_; Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()}; NiceMock tls_; @@ -79,10 +80,14 @@ TEST_F(TestStaticClusterImplTest, CreateWithoutConfig) { name: staticcluster connect_timeout: 0.25s lb_policy: ROUND_ROBIN - hosts: - - socket_address: - address: 10.0.0.1 - port_value: 443 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 443 cluster_type: name: envoy.clusters.test_static )EOF"; @@ -90,7 +95,7 @@ TEST_F(TestStaticClusterImplTest, CreateWithoutConfig) { TestStaticClusterFactory factory; Registry::InjectFactory registered_factory(factory); - const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); auto create_result = ClusterFactoryImplBase::create( cluster_config, cm_, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, random_, dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, @@ -117,10 +122,14 @@ TEST_F(TestStaticClusterImplTest, CreateWithStructConfig) { name: staticcluster connect_timeout: 0.25s lb_policy: ROUND_ROBIN - hosts: - - socket_address: - address: 10.0.0.1 - port_value: 443 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 443 cluster_type: name: envoy.clusters.custom_static typed_config: @@ -131,7 +140,7 @@ TEST_F(TestStaticClusterImplTest, CreateWithStructConfig) { port_value: 80 )EOF"; - const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); auto create_result = ClusterFactoryImplBase::create( cluster_config, cm_, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, random_, dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, @@ -157,10 +166,14 @@ TEST_F(TestStaticClusterImplTest, CreateWithTypedConfig) { name: staticcluster connect_timeout: 0.25s lb_policy: ROUND_ROBIN - hosts: - - socket_address: - address: 10.0.0.1 - port_value: 443 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 443 cluster_type: name: envoy.clusters.custom_static typed_config: @@ -170,7 +183,7 @@ TEST_F(TestStaticClusterImplTest, CreateWithTypedConfig) { port_value: 80 )EOF"; - const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); auto create_result = ClusterFactoryImplBase::create( cluster_config, cm_, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, random_, dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, @@ -196,10 +209,14 @@ TEST_F(TestStaticClusterImplTest, UnsupportedClusterType) { name: staticcluster connect_timeout: 0.25s lb_policy: ROUND_ROBIN - hosts: - - socket_address: - address: 10.0.0.1 - port_value: 443 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 443 cluster_type: name: envoy.clusters.bad_cluster_name typed_config: @@ -209,7 +226,7 @@ TEST_F(TestStaticClusterImplTest, UnsupportedClusterType) { // the factory is not registered, expect to throw EXPECT_THROW_WITH_MESSAGE( { - const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); ClusterFactoryImplBase::create( cluster_config, cm_, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, random_, dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, @@ -228,17 +245,21 @@ TEST_F(TestStaticClusterImplTest, HostnameWithoutDNS) { common_lb_config: consistent_hashing_lb_config: use_hostname_for_hashing: true - hosts: - - socket_address: - address: 10.0.0.1 - port_value: 443 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 443 cluster_type: name: envoy.clusters.test_static )EOF"; EXPECT_THROW_WITH_MESSAGE( { - const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); ClusterFactoryImplBase::create( cluster_config, cm_, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, random_, dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index 95d0766b78504..affa3fed62b36 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -6,23 +6,25 @@ #include "test/common/upstream/test_cluster_manager.h" -using testing::_; -using testing::Eq; -using testing::InSequence; -using testing::Invoke; -using testing::Mock; -using testing::NiceMock; -using testing::Return; -using testing::ReturnNew; -using testing::SaveArg; - namespace Envoy { namespace Upstream { namespace { -envoy::config::bootstrap::v3::Bootstrap parseBootstrapFromV2Yaml(const std::string& yaml) { +using ::testing::_; +using ::testing::DoAll; +using ::testing::Eq; +using ::testing::InSequence; +using ::testing::Invoke; +using ::testing::Mock; +using ::testing::NiceMock; +using ::testing::Return; +using ::testing::ReturnNew; +using ::testing::SaveArg; + +envoy::config::bootstrap::v3::Bootstrap parseBootstrapFromV3Yaml(const std::string& yaml, + bool avoid_boosting = true) { envoy::config::bootstrap::v3::Bootstrap bootstrap; - TestUtility::loadFromYaml(yaml, bootstrap, true); + TestUtility::loadFromYaml(yaml, bootstrap, true, avoid_boosting); return bootstrap; } @@ -41,7 +43,8 @@ class ClusterManagerImplTest : public testing::Test { bootstrap, factory_, factory_.stats_, factory_.tls_, factory_.runtime_, factory_.random_, factory_.local_info_, log_manager_, factory_.dispatcher_, admin_, validation_context_, *api_, http_context_, grpc_context_); - cluster_manager_->initializeSecondaryClusters(bootstrap); + cluster_manager_->setPrimaryClustersInitializedCb( + [this, bootstrap]() { cluster_manager_->initializeSecondaryClusters(bootstrap); }); } void createWithLocalClusterUpdate(const bool enable_merge_window = true) { @@ -52,13 +55,20 @@ class ClusterManagerImplTest : public testing::Test { connect_timeout: 0.250s type: STATIC lb_policy: ROUND_ROBIN - hosts: - - socket_address: - address: "127.0.0.1" - port_value: 11001 - - socket_address: - address: "127.0.0.1" - port_value: 11002 + load_assignment: + cluster_name: cluster_1 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11002 )EOF"; const std::string merge_window_enabled = R"EOF( common_lb_config: @@ -71,7 +81,7 @@ class ClusterManagerImplTest : public testing::Test { yaml += enable_merge_window ? merge_window_enabled : merge_window_disabled; - const auto& bootstrap = parseBootstrapFromV2Yaml(yaml); + const auto& bootstrap = parseBootstrapFromV3Yaml(yaml); cluster_manager_ = std::make_unique( bootstrap, factory_, factory_.stats_, factory_.tls_, factory_.runtime_, factory_.random_, @@ -135,7 +145,7 @@ envoy::config::bootstrap::v3::Bootstrap defaultConfig() { clusters: [] )EOF"; - return parseBootstrapFromV2Yaml(yaml); + return parseBootstrapFromV3Yaml(yaml); } TEST_F(ClusterManagerImplTest, MultipleProtocolClusterFail) { @@ -149,7 +159,7 @@ TEST_F(ClusterManagerImplTest, MultipleProtocolClusterFail) { http_protocol_options: {} )EOF"; EXPECT_THROW_WITH_MESSAGE( - create(parseBootstrapFromV2Yaml(yaml)), EnvoyException, + create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, "cluster: Both HTTP1 and HTTP2 options may only be configured with non-default " "'protocol_selection' values"); } @@ -171,7 +181,7 @@ TEST_F(ClusterManagerImplTest, MultipleHealthCheckFail) { path: "/" )EOF"; - EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV2Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, "Multiple health checks not supported"); } @@ -188,11 +198,11 @@ TEST_F(ClusterManagerImplTest, MultipleProtocolCluster) { http_protocol_options: {} protocol_selection: USE_DOWNSTREAM_PROTOCOL )EOF"; - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); checkConfigDump(R"EOF( static_clusters: - cluster: - "@type": type.googleapis.com/envoy.api.v2.Cluster + "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: http12_cluster connect_timeout: 0.250s lb_policy: ROUND_ROBIN @@ -222,7 +232,7 @@ TEST_F(ClusterManagerImplTest, OutlierEventLog) { )EOF"; EXPECT_CALL(log_manager_, createAccessLog("foo")); - create(parseBootstrapFromV2Json(json)); + create(parseBootstrapFromV3Json(json)); } TEST_F(ClusterManagerImplTest, NoSdsConfig) { @@ -234,7 +244,7 @@ TEST_F(ClusterManagerImplTest, NoSdsConfig) { type: eds lb_policy: round_robin )EOF"; - EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV2Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, "cannot create an EDS cluster without an EDS config"); } @@ -253,7 +263,7 @@ TEST_F(ClusterManagerImplTest, UnknownClusterType) { } )EOF"; - EXPECT_THROW_WITH_REGEX(create(parseBootstrapFromV2Json(json)), EnvoyException, + EXPECT_THROW_WITH_REGEX(create(parseBootstrapFromV3Json(json)), EnvoyException, "invalid value \"foo\" for type TYPE_ENUM"); } @@ -271,7 +281,7 @@ TEST_F(ClusterManagerImplTest, LocalClusterNotDefined) { )EOF", clustersJson({defaultStaticClusterJson("cluster_1"), defaultStaticClusterJson("cluster_2")})); - EXPECT_THROW(create(parseBootstrapFromV2Json(json)), EnvoyException); + EXPECT_THROW(create(parseBootstrapFromV3Json(json)), EnvoyException); } TEST_F(ClusterManagerImplTest, BadClusterManagerConfig) { @@ -289,7 +299,7 @@ TEST_F(ClusterManagerImplTest, BadClusterManagerConfig) { } )EOF"; - EXPECT_THROW_WITH_REGEX(create(parseBootstrapFromV2Json(json)), EnvoyException, + EXPECT_THROW_WITH_REGEX(create(parseBootstrapFromV3Json(json)), EnvoyException, "fake_property: Cannot find field"); } @@ -308,7 +318,7 @@ TEST_F(ClusterManagerImplTest, LocalClusterDefined) { clustersJson({defaultStaticClusterJson("cluster_1"), defaultStaticClusterJson("cluster_2"), defaultStaticClusterJson("new_cluster")})); - create(parseBootstrapFromV2Json(json)); + create(parseBootstrapFromV3Json(json)); checkStats(3 /*added*/, 0 /*modified*/, 0 /*removed*/, 3 /*active*/, 0 /*warming*/); factory_.tls_.shutdownThread(); @@ -318,7 +328,7 @@ TEST_F(ClusterManagerImplTest, DuplicateCluster) { const std::string json = fmt::sprintf( "{\"static_resources\":{%s}}", clustersJson({defaultStaticClusterJson("cluster_1"), defaultStaticClusterJson("cluster_1")})); - const auto config = parseBootstrapFromV2Json(json); + const auto config = parseBootstrapFromV3Json(json); EXPECT_THROW(create(config), EnvoyException); } @@ -331,16 +341,17 @@ TEST_F(ClusterManagerImplTest, ValidClusterName) { type: static lb_policy: round_robin load_assignment: + cluster_name: foo endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 11001 + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 )EOF"; - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); cluster_manager_->clusters() .find("cluster:name") ->second.get() @@ -351,6 +362,54 @@ TEST_F(ClusterManagerImplTest, ValidClusterName) { EXPECT_EQ(1UL, factory_.stats_.counter("cluster.cluster_name.foo").value()); } +// Validate that the primary clusters are derived from the bootstrap and don't +// include EDS. +TEST_F(ClusterManagerImplTest, PrimaryClusters) { + const std::string yaml = R"EOF( +static_resources: + clusters: + - name: static_cluster + connect_timeout: 0.250s + type: static + - name: logical_dns_cluster + connect_timeout: 0.250s + type: logical_dns + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.com + port_value: 11001 + - name: strict_dns_cluster + connect_timeout: 0.250s + type: strict_dns + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.com + port_value: 11001 + - name: rest_eds_cluster + connect_timeout: 0.250s + type: eds + eds_cluster_config: + eds_config: + api_config_source: + api_type: GRPC + grpc_services: + envoy_grpc: + cluster_name: static_cluster + )EOF"; + create(parseBootstrapFromV3Yaml(yaml)); + const auto& primary_clusters = cluster_manager_->primaryClusters(); + EXPECT_THAT(primary_clusters, testing::UnorderedElementsAre( + "static_cluster", "strict_dns_cluster", "logical_dns_cluster")); +} + TEST_F(ClusterManagerImplTest, OriginalDstLbRestriction) { const std::string yaml = R"EOF( static_resources: @@ -362,7 +421,7 @@ TEST_F(ClusterManagerImplTest, OriginalDstLbRestriction) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - create(parseBootstrapFromV2Yaml(yaml)), EnvoyException, + create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, "cluster: LB policy ROUND_ROBIN is not valid for Cluster type ORIGINAL_DST. Only " "'CLUSTER_PROVIDED' or 'ORIGINAL_DST_LB' is allowed with cluster type 'ORIGINAL_DST'"); } @@ -377,15 +436,15 @@ TEST_F(ClusterManagerImplTest, OriginalDstLbRestriction2) { lb_policy: original_dst_lb load_assignment: endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 11001 + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 )EOF"; - EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV2Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml, false)), EnvoyException, "cluster: LB policy hidden_envoy_deprecated_ORIGINAL_DST_LB is not " "valid for Cluster type STATIC. " "'ORIGINAL_DST_LB' is allowed only with cluster type 'ORIGINAL_DST'"); @@ -434,18 +493,19 @@ TEST_P(ClusterManagerSubsetInitializationTest, SubsetLoadBalancerInitialization) subset_selectors: - keys: [ "x" ] load_assignment: + cluster_name: cluster_1 endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 8000 - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 8001 + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8000 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8001 )EOF"; const std::string& policy_name = envoy::config::cluster::v3::Cluster::LbPolicy_Name(GetParam()); @@ -462,12 +522,12 @@ TEST_P(ClusterManagerSubsetInitializationTest, SubsetLoadBalancerInitialization) if (GetParam() == envoy::config::cluster::v3::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB || GetParam() == envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED) { EXPECT_THROW_WITH_MESSAGE( - create(parseBootstrapFromV2Yaml(yaml)), EnvoyException, + create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, fmt::format("cluster: LB policy {} cannot be combined with lb_subset_config", envoy::config::cluster::v3::Cluster::LbPolicy_Name(GetParam()))); } else { - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); checkStats(1 /*added*/, 0 /*modified*/, 0 /*removed*/, 1 /*active*/, 0 /*warming*/); Upstream::ThreadLocalCluster* tlc = cluster_manager_->get("cluster_1"); @@ -501,7 +561,7 @@ TEST_F(ClusterManagerImplTest, SubsetLoadBalancerOriginalDstRestriction) { - keys: [ "x" ] )EOF"; - EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV2Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml, false)), EnvoyException, "cluster: LB policy hidden_envoy_deprecated_ORIGINAL_DST_LB cannot be " "combined with lb_subset_config"); } @@ -521,7 +581,7 @@ TEST_F(ClusterManagerImplTest, SubsetLoadBalancerClusterProvidedLbRestriction) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - create(parseBootstrapFromV2Yaml(yaml)), EnvoyException, + create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, "cluster: LB policy CLUSTER_PROVIDED cannot be combined with lb_subset_config"); } @@ -539,6 +599,7 @@ TEST_F(ClusterManagerImplTest, SubsetLoadBalancerLocalityAware) { - keys: [ "x" ] locality_weight_aware: true load_assignment: + cluster_name: cluster_1 endpoints: - lb_endpoints: - endpoint: @@ -553,7 +614,7 @@ TEST_F(ClusterManagerImplTest, SubsetLoadBalancerLocalityAware) { port_value: 8001 )EOF"; - EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV2Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, "Locality weight aware subset LB requires that a " "locality_weighted_lb_config be set in cluster_1"); } @@ -569,20 +630,21 @@ TEST_F(ClusterManagerImplTest, RingHashLoadBalancerInitialization) { connect_timeout: 0.250s type: STATIC load_assignment: + cluster_name: redis_cluster endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 8000 - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 8001 + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8000 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8001 )EOF"; - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); } TEST_F(ClusterManagerImplTest, RingHashLoadBalancerV2Initialization) { @@ -593,23 +655,24 @@ TEST_F(ClusterManagerImplTest, RingHashLoadBalancerV2Initialization) { connect_timeout: 0.250s lb_policy: RING_HASH load_assignment: + cluster_name: redis_cluster endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 8000 - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 8001 + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8000 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8001 dns_lookup_family: V4_ONLY ring_hash_lb_config: minimum_ring_size: 125 )EOF"; - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); } // Verify EDS clusters have EDS config. @@ -621,7 +684,7 @@ TEST_F(ClusterManagerImplTest, EdsClustersRequireEdsConfig) { type: EDS )EOF"; - EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV2Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, "cannot create an EDS cluster without an EDS config"); } @@ -635,7 +698,7 @@ TEST_F(ClusterManagerImplTest, ClusterProvidedLbNoLb) { cluster1->info_->lb_type_ = LoadBalancerType::ClusterProvided; EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _)) .WillOnce(Return(std::make_pair(cluster1, nullptr))); - EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV2Json(json)), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Json(json)), EnvoyException, "cluster manager: cluster provided LB specified but cluster " "'cluster_0' did not provide one. Check cluster documentation."); } @@ -649,7 +712,7 @@ TEST_F(ClusterManagerImplTest, ClusterProvidedLbNotConfigured) { cluster1->info_->name_ = "cluster_0"; EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _)) .WillOnce(Return(std::make_pair(cluster1, new MockThreadAwareLoadBalancer()))); - EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV2Json(json)), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Json(json)), EnvoyException, "cluster manager: cluster provided LB not specified but cluster " "'cluster_0' provided one. Check cluster documentation."); } @@ -669,7 +732,7 @@ class ClusterManagerImplThreadAwareLbTest : public ClusterManagerImplTest { EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _)) .WillOnce(Return(std::make_pair(cluster1, nullptr))); ON_CALL(*cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary)); - create(parseBootstrapFromV2Json(json)); + create(parseBootstrapFromV3Json(json)); EXPECT_EQ(nullptr, cluster_manager_->get("cluster_0")->loadBalancer().chooseHost(nullptr)); @@ -704,13 +767,14 @@ TEST_F(ClusterManagerImplTest, TcpHealthChecker) { type: STATIC lb_policy: ROUND_ROBIN load_assignment: + cluster_name: cluster_1 endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 11001 + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 health_checks: - timeout: 1s interval: 1s @@ -728,7 +792,7 @@ TEST_F(ClusterManagerImplTest, TcpHealthChecker) { createClientConnection_( PointeesEq(Network::Utility::resolveUrl("tcp://127.0.0.1:11001")), _, _, _)) .WillOnce(Return(connection)); - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); factory_.tls_.shutdownThread(); } @@ -741,13 +805,14 @@ TEST_F(ClusterManagerImplTest, HttpHealthChecker) { type: STATIC lb_policy: ROUND_ROBIN load_assignment: + cluster_name: cluster_1 endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 11001 + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 health_checks: - timeout: 1s interval: 1s @@ -762,7 +827,7 @@ TEST_F(ClusterManagerImplTest, HttpHealthChecker) { createClientConnection_( PointeesEq(Network::Utility::resolveUrl("tcp://127.0.0.1:11001")), _, _, _)) .WillOnce(Return(connection)); - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); factory_.tls_.shutdownThread(); } @@ -770,7 +835,7 @@ TEST_F(ClusterManagerImplTest, UnknownCluster) { const std::string json = fmt::sprintf("{\"static_resources\":{%s}}", clustersJson({defaultStaticClusterJson("cluster_1")})); - create(parseBootstrapFromV2Json(json)); + create(parseBootstrapFromV3Json(json)); EXPECT_EQ(nullptr, cluster_manager_->get("hello")); EXPECT_EQ(nullptr, cluster_manager_->httpConnPoolForCluster("hello", ResourcePriority::Default, Http::Protocol::Http2, nullptr)); @@ -802,16 +867,17 @@ TEST_F(ClusterManagerImplTest, VerifyBufferLimits) { lb_policy: round_robin per_connection_buffer_limit_bytes: 8192 load_assignment: + cluster_name: cluster_1 endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 11001 + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 )EOF"; - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); Network::MockClientConnection* connection = new NiceMock(); EXPECT_CALL(*connection, setBufferLimits(8192)); EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _)) @@ -825,7 +891,7 @@ TEST_F(ClusterManagerImplTest, ShutdownOrder) { const std::string json = fmt::sprintf("{\"static_resources\":{%s}}", clustersJson({defaultStaticClusterJson("cluster_1")})); - create(parseBootstrapFromV2Json(json)); + create(parseBootstrapFromV3Json(json)); Cluster& cluster = cluster_manager_->activeClusters().begin()->second; EXPECT_EQ("cluster_1", cluster.info()->name()); EXPECT_EQ(cluster.info(), cluster_manager_->get("cluster_1")->info()); @@ -853,7 +919,7 @@ TEST_F(ClusterManagerImplTest, InitializeOrder) { "dynamic_resources": { "cds_config": { "api_config_source": { - "api_type": "UNSUPPORTED_REST_LEGACY", + "api_type": "0", "refresh_delay": "30s", "cluster_names": ["cds_cluster"] } @@ -893,7 +959,7 @@ TEST_F(ClusterManagerImplTest, InitializeOrder) { EXPECT_CALL(*cds_cluster, initialize(_)); EXPECT_CALL(*cluster1, initialize(_)); - create(parseBootstrapFromV2Json(json)); + create(parseBootstrapFromV3Json(json)); ReadyWatcher initialized; cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); }); @@ -935,78 +1001,102 @@ TEST_F(ClusterManagerImplTest, InitializeOrder) { version_info: version3 static_clusters: - cluster: - "@type": type.googleapis.com/envoy.api.v2.Cluster + "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: "cds_cluster" type: "STATIC" connect_timeout: 0.25s - hosts: - - socket_address: - address: "127.0.0.1" - port_value: 11001 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 last_updated: seconds: 1234567891 nanos: 234000000 - cluster: - "@type": type.googleapis.com/envoy.api.v2.Cluster + "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: "fake_cluster" type: "STATIC" connect_timeout: 0.25s - hosts: - - socket_address: - address: "127.0.0.1" - port_value: 11001 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 last_updated: seconds: 1234567891 nanos: 234000000 - cluster: - "@type": type.googleapis.com/envoy.api.v2.Cluster + "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: "fake_cluster2" type: "STATIC" connect_timeout: 0.25s - hosts: - - socket_address: - address: "127.0.0.1" - port_value: 11001 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 last_updated: seconds: 1234567891 nanos: 234000000 dynamic_active_clusters: - version_info: "version1" cluster: - "@type": type.googleapis.com/envoy.api.v2.Cluster + "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: "cluster3" type: "STATIC" connect_timeout: 0.25s - hosts: - - socket_address: - address: "127.0.0.1" - port_value: 11001 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 last_updated: seconds: 1234567891 nanos: 234000000 - version_info: "version2" cluster: - "@type": type.googleapis.com/envoy.api.v2.Cluster + "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: "cluster4" type: "STATIC" connect_timeout: 0.25s - hosts: - - socket_address: - address: "127.0.0.1" - port_value: 11001 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 last_updated: seconds: 1234567891 nanos: 234000000 - version_info: "version3" cluster: - "@type": type.googleapis.com/envoy.api.v2.Cluster + "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: "cluster5" type: "STATIC" connect_timeout: 0.25s - hosts: - - socket_address: - address: "127.0.0.1" - port_value: 11001 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 last_updated: seconds: 1234567891 nanos: 234000000 @@ -1055,7 +1145,7 @@ TEST_F(ClusterManagerImplTest, DynamicRemoveWithLocalCluster) { ON_CALL(*foo, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary)); EXPECT_CALL(*foo, initialize(_)); - create(parseBootstrapFromV2Json(json)); + create(parseBootstrapFromV3Json(json)); foo->initialize_callback_(); // Now add a dynamic cluster. This cluster will have a member update callback from the local @@ -1106,21 +1196,25 @@ TEST_F(ClusterManagerImplTest, RemoveWarmingCluster) { EXPECT_CALL(*cluster1, initializePhase()).Times(0); EXPECT_CALL(*cluster1, initialize(_)); EXPECT_TRUE( - cluster_manager_->addOrUpdateCluster(defaultStaticCluster("fake_cluster"), "version1")); + cluster_manager_->addOrUpdateCluster(defaultStaticCluster("fake_cluster"), "version3")); checkStats(1 /*added*/, 0 /*modified*/, 0 /*removed*/, 0 /*active*/, 1 /*warming*/); EXPECT_EQ(nullptr, cluster_manager_->get("fake_cluster")); checkConfigDump(R"EOF( dynamic_warming_clusters: - - version_info: "version1" + - version_info: "version3" cluster: - "@type": type.googleapis.com/envoy.api.v2.Cluster + "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: "fake_cluster" type: STATIC connect_timeout: 0.25s - hosts: - - socket_address: - address: "127.0.0.1" - port_value: 11001 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 last_updated: seconds: 1234567891 nanos: 234000000 @@ -1149,21 +1243,25 @@ TEST_F(ClusterManagerImplTest, ModifyWarmingCluster) { EXPECT_CALL(*cluster1, initializePhase()).Times(0); EXPECT_CALL(*cluster1, initialize(_)); EXPECT_TRUE( - cluster_manager_->addOrUpdateCluster(defaultStaticCluster("fake_cluster"), "version1")); + cluster_manager_->addOrUpdateCluster(defaultStaticCluster("fake_cluster"), "version3")); checkStats(1 /*added*/, 0 /*modified*/, 0 /*removed*/, 0 /*active*/, 1 /*warming*/); EXPECT_EQ(nullptr, cluster_manager_->get("fake_cluster")); checkConfigDump(R"EOF( dynamic_warming_clusters: - - version_info: "version1" + - version_info: "version3" cluster: - "@type": type.googleapis.com/envoy.api.v2.Cluster + "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: "fake_cluster" type: STATIC connect_timeout: 0.25s - hosts: - - socket_address: - address: "127.0.0.1" - port_value: 11001 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 last_updated: seconds: 1234567891 nanos: 234000000 @@ -1177,26 +1275,30 @@ TEST_F(ClusterManagerImplTest, ModifyWarmingCluster) { EXPECT_CALL(*cluster2, initializePhase()).Times(0); EXPECT_CALL(*cluster2, initialize(_)); EXPECT_TRUE(cluster_manager_->addOrUpdateCluster( - parseClusterFromV2Json(fmt::sprintf(kDefaultStaticClusterTmpl, "fake_cluster", + parseClusterFromV3Json(fmt::sprintf(kDefaultStaticClusterTmpl, "fake_cluster", R"EOF( "socket_address": { "address": "127.0.0.1", "port_value": 11002 })EOF")), - "version2")); + "version3")); checkStats(1 /*added*/, 1 /*modified*/, 0 /*removed*/, 0 /*active*/, 1 /*warming*/); checkConfigDump(R"EOF( dynamic_warming_clusters: - - version_info: "version2" + - version_info: "version3" cluster: - "@type": type.googleapis.com/envoy.api.v2.Cluster + "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: "fake_cluster" type: STATIC connect_timeout: 0.25s - hosts: - - socket_address: - address: "127.0.0.1" - port_value: 11002 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11002 last_updated: seconds: 1234567891 nanos: 234000000 @@ -1339,7 +1441,7 @@ TEST_F(ClusterManagerImplTest, AddOrUpdateClusterStaticExists) { ON_CALL(*cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary)); EXPECT_CALL(*cluster1, initialize(_)); - create(parseBootstrapFromV2Json(json)); + create(parseBootstrapFromV3Json(json)); ReadyWatcher initialized; cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); }); @@ -1368,7 +1470,7 @@ TEST_F(ClusterManagerImplTest, HostsPostedToTlsCluster) { ON_CALL(*cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary)); EXPECT_CALL(*cluster1, initialize(_)); - create(parseBootstrapFromV2Json(json)); + create(parseBootstrapFromV3Json(json)); ReadyWatcher initialized; cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); }); @@ -1436,7 +1538,7 @@ TEST_F(ClusterManagerImplTest, CloseHttpConnectionsOnHealthFailure) { // Test inline init. initialize_callback(); })); - create(parseBootstrapFromV2Json(json)); + create(parseBootstrapFromV3Json(json)); EXPECT_CALL(factory_, allocateConnPool_(_, _, _)).WillOnce(Return(cp1)); cluster_manager_->httpConnPoolForCluster("some_cluster", ResourcePriority::Default, @@ -1499,7 +1601,7 @@ TEST_F(ClusterManagerImplTest, CloseTcpConnectionPoolsOnHealthFailure) { // Test inline init. initialize_callback(); })); - create(parseBootstrapFromV2Json(json)); + create(parseBootstrapFromV3Json(json)); EXPECT_CALL(factory_, allocateTcpConnPool_(_)).WillOnce(Return(cp1)); cluster_manager_->tcpConnPoolForCluster("some_cluster", ResourcePriority::Default, nullptr); @@ -1570,7 +1672,7 @@ TEST_F(ClusterManagerImplTest, CloseTcpConnectionsOnHealthFailure) { // Test inline init. initialize_callback(); })); - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _)) .WillOnce(Return(connection1)); @@ -1643,7 +1745,7 @@ TEST_F(ClusterManagerImplTest, DoNotCloseTcpConnectionsOnHealthFailure) { // Test inline init. initialize_callback(); })); - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _)) .WillOnce(Return(connection1)); @@ -1672,6 +1774,7 @@ TEST_F(ClusterManagerImplTest, DynamicHostRemove) { address: 1.2.3.4 port_value: 80 load_assignment: + cluster_name: cluster_1 endpoints: - lb_endpoints: - endpoint: @@ -1689,7 +1792,7 @@ TEST_F(ClusterManagerImplTest, DynamicHostRemove) { Network::MockActiveDnsQuery active_dns_query; EXPECT_CALL(*dns_resolver, resolve(_, _, _)) .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); EXPECT_FALSE(cluster_manager_->get("cluster_1")->info()->addedViaApi()); // Test for no hosts returning the correct values before we have hosts. @@ -1815,6 +1918,7 @@ TEST_F(ClusterManagerImplTest, DynamicHostRemoveWithTls) { port_value: 80 lb_policy: ROUND_ROBIN load_assignment: + cluster_name: cluster_1 endpoints: - lb_endpoints: - endpoint: @@ -1832,7 +1936,7 @@ TEST_F(ClusterManagerImplTest, DynamicHostRemoveWithTls) { Network::MockActiveDnsQuery active_dns_query; EXPECT_CALL(*dns_resolver, resolve(_, _, _)) .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); EXPECT_FALSE(cluster_manager_->get("cluster_1")->info()->addedViaApi()); NiceMock example_com_context; @@ -2067,7 +2171,7 @@ TEST_F(ClusterManagerImplTest, UseTcpInDefaultDnsResolver) { Network::MockActiveDnsQuery active_dns_query; EXPECT_CALL(*dns_resolver, resolve(_, _, _)) .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); factory_.tls_.shutdownThread(); } @@ -2094,7 +2198,7 @@ TEST_F(ClusterManagerImplTest, UseUdpWithCustomDnsResolver) { Network::MockActiveDnsQuery active_dns_query; EXPECT_CALL(*dns_resolver, resolve(_, _, _)) .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); factory_.tls_.shutdownThread(); } @@ -2122,7 +2226,7 @@ TEST_F(ClusterManagerImplTest, UseTcpWithCustomDnsResolver) { Network::MockActiveDnsQuery active_dns_query; EXPECT_CALL(*dns_resolver, resolve(_, _, _)) .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); factory_.tls_.shutdownThread(); } @@ -2143,6 +2247,7 @@ TEST_F(ClusterManagerImplTest, DynamicHostRemoveDefaultPriority) { port_value: 80 lb_policy: ROUND_ROBIN load_assignment: + cluster_name: cluster_1 endpoints: - lb_endpoints: - endpoint: @@ -2160,7 +2265,7 @@ TEST_F(ClusterManagerImplTest, DynamicHostRemoveDefaultPriority) { Network::MockActiveDnsQuery active_dns_query; EXPECT_CALL(*dns_resolver, resolve(_, _, _)) .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); EXPECT_FALSE(cluster_manager_->get("cluster_1")->info()->addedViaApi()); dns_callback(Network::DnsResolver::ResolutionStatus::Success, @@ -2223,6 +2328,7 @@ TEST_F(ClusterManagerImplTest, ConnPoolDestroyWithDraining) { port_value: 80 lb_policy: ROUND_ROBIN load_assignment: + cluster_name: cluster_1 endpoints: - lb_endpoints: - endpoint: @@ -2240,7 +2346,7 @@ TEST_F(ClusterManagerImplTest, ConnPoolDestroyWithDraining) { Network::MockActiveDnsQuery active_dns_query; EXPECT_CALL(*dns_resolver, resolve(_, _, _)) .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); EXPECT_FALSE(cluster_manager_->get("cluster_1")->info()->addedViaApi()); dns_callback(Network::DnsResolver::ResolutionStatus::Success, @@ -2282,7 +2388,7 @@ TEST_F(ClusterManagerImplTest, OriginalDstInitialization) { "name": "cluster_1", "connect_timeout": "0.250s", "type": "original_dst", - "lb_policy": "original_dst_lb" + "lb_policy": "cluster_provided" } ] } @@ -2292,7 +2398,7 @@ TEST_F(ClusterManagerImplTest, OriginalDstInitialization) { ReadyWatcher initialized; EXPECT_CALL(initialized, ready()); - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); // Set up for an initialize callback. cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); }); @@ -2585,6 +2691,7 @@ TEST_F(ClusterManagerImplTest, MergedUpdatesDestroyedOnUpdate) { type: STATIC lb_policy: ROUND_ROBIN load_assignment: + cluster_name: new_cluster endpoints: - lb_endpoints: - endpoint: @@ -2595,7 +2702,7 @@ TEST_F(ClusterManagerImplTest, MergedUpdatesDestroyedOnUpdate) { common_lb_config: update_merge_window: 3s )EOF"; - EXPECT_TRUE(cluster_manager_->addOrUpdateCluster(parseClusterFromV2Yaml(yaml), "version1")); + EXPECT_TRUE(cluster_manager_->addOrUpdateCluster(parseClusterFromV3Yaml(yaml), "version1")); Cluster& cluster = cluster_manager_->activeClusters().find("new_cluster")->second; HostVectorSharedPtr hosts( @@ -2643,6 +2750,7 @@ TEST_F(ClusterManagerImplTest, MergedUpdatesDestroyedOnUpdate) { type: STATIC lb_policy: ROUND_ROBIN load_assignment: + cluster_name: new_cluster endpoints: - lb_endpoints: - endpoint: @@ -2663,7 +2771,7 @@ TEST_F(ClusterManagerImplTest, MergedUpdatesDestroyedOnUpdate) { .gauge("cluster_manager.warming_clusters", Stats::Gauge::ImportMode::NeverImport) .value()); EXPECT_TRUE( - cluster_manager_->addOrUpdateCluster(parseClusterFromV2Yaml(yaml_updated), "version2")); + cluster_manager_->addOrUpdateCluster(parseClusterFromV3Yaml(yaml_updated), "version2")); EXPECT_EQ(2, factory_.stats_ .gauge("cluster_manager.active_clusters", Stats::Gauge::ImportMode::NeverImport) .value()); @@ -2791,15 +2899,20 @@ TEST_F(ClusterManagerImplTest, AddUpstreamFilters) { connect_timeout: 0.250s lb_policy: ROUND_ROBIN type: STATIC - hosts: - - socket_address: - address: "127.0.0.1" - port_value: 11001 + load_assignment: + cluster_name: cluster_1 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 filters: - name: envoy.test.filter )EOF"; - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); Network::MockClientConnection* connection = new NiceMock(); EXPECT_CALL(*connection, addReadFilter(_)).Times(0); EXPECT_CALL(*connection, addWriteFilter(_)).Times(1); @@ -2867,6 +2980,9 @@ TEST_F(ClusterManagerInitHelperTest, StaticSdsInitialize) { TEST_F(ClusterManagerInitHelperTest, UpdateAlreadyInitialized) { InSequence s; + ReadyWatcher primary_clusters_initialized; + init_helper_.setPrimaryClustersInitializedCb( + [&]() -> void { primary_clusters_initialized.ready(); }); ReadyWatcher cm_initialized; init_helper_.setInitializedCb([&]() -> void { cm_initialized.ready(); }); @@ -2887,8 +3003,11 @@ TEST_F(ClusterManagerInitHelperTest, UpdateAlreadyInitialized) { init_helper_.removeCluster(cluster1); EXPECT_CALL(*this, onClusterInit(Ref(cluster2))); - EXPECT_CALL(cm_initialized, ready()); + EXPECT_CALL(primary_clusters_initialized, ready()); cluster2.initialize_callback_(); + + EXPECT_CALL(cm_initialized, ready()); + init_helper_.startInitializingSecondaryClusters(); } // If secondary clusters initialization triggered outside of CdsApiImpl::onConfigUpdate()'s @@ -2898,6 +3017,9 @@ TEST_F(ClusterManagerInitHelperTest, UpdateAlreadyInitialized) { TEST_F(ClusterManagerInitHelperTest, InitSecondaryWithoutEdsPaused) { InSequence s; + ReadyWatcher primary_clusters_initialized; + init_helper_.setPrimaryClustersInitializedCb( + [&]() -> void { primary_clusters_initialized.ready(); }); ReadyWatcher cm_initialized; init_helper_.setInitializedCb([&]() -> void { cm_initialized.ready(); }); @@ -2905,6 +3027,7 @@ TEST_F(ClusterManagerInitHelperTest, InitSecondaryWithoutEdsPaused) { ON_CALL(cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Secondary)); init_helper_.addCluster(cluster1); + EXPECT_CALL(primary_clusters_initialized, ready()); init_helper_.onStaticLoadComplete(); EXPECT_CALL(cluster1, initialize(_)); init_helper_.startInitializingSecondaryClusters(); @@ -2921,6 +3044,9 @@ TEST_F(ClusterManagerInitHelperTest, InitSecondaryWithoutEdsPaused) { TEST_F(ClusterManagerInitHelperTest, InitSecondaryWithEdsPaused) { InSequence s; + ReadyWatcher primary_clusters_initialized; + init_helper_.setPrimaryClustersInitializedCb( + [&]() -> void { primary_clusters_initialized.ready(); }); ReadyWatcher cm_initialized; init_helper_.setInitializedCb([&]() -> void { cm_initialized.ready(); }); @@ -2928,6 +3054,7 @@ TEST_F(ClusterManagerInitHelperTest, InitSecondaryWithEdsPaused) { ON_CALL(cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Secondary)); init_helper_.addCluster(cluster1); + EXPECT_CALL(primary_clusters_initialized, ready()); init_helper_.onStaticLoadComplete(); EXPECT_CALL(cluster1, initialize(_)); @@ -2941,6 +3068,9 @@ TEST_F(ClusterManagerInitHelperTest, InitSecondaryWithEdsPaused) { TEST_F(ClusterManagerInitHelperTest, AddSecondaryAfterSecondaryInit) { InSequence s; + ReadyWatcher primary_clusters_initialized; + init_helper_.setPrimaryClustersInitializedCb( + [&]() -> void { primary_clusters_initialized.ready(); }); ReadyWatcher cm_initialized; init_helper_.setInitializedCb([&]() -> void { cm_initialized.ready(); }); @@ -2956,8 +3086,10 @@ TEST_F(ClusterManagerInitHelperTest, AddSecondaryAfterSecondaryInit) { init_helper_.onStaticLoadComplete(); EXPECT_CALL(*this, onClusterInit(Ref(cluster1))); + EXPECT_CALL(primary_clusters_initialized, ready()); EXPECT_CALL(cluster2, initialize(_)); cluster1.initialize_callback_(); + init_helper_.startInitializingSecondaryClusters(); NiceMock cluster3; ON_CALL(cluster3, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Secondary)); @@ -2994,6 +3126,8 @@ TEST_F(ClusterManagerInitHelperTest, RemoveClusterWithinInitLoop) { init_helper_.startInitializingSecondaryClusters(); } +using NameVals = std::vector>; + // Validate that when options are set in the ClusterManager and/or Cluster, we see the socket option // propagated to setsockopt(). This is as close to an end-to-end test as we have for this feature, // due to the complexity of creating an integration test involving the network stack. We only test @@ -3001,30 +3135,33 @@ TEST_F(ClusterManagerInitHelperTest, RemoveClusterWithinInitLoop) { // socket_option_impl_test.cc. class SockoptsTest : public ClusterManagerImplTest { public: - void initialize(const std::string& yaml) { create(parseBootstrapFromV2Yaml(yaml)); } + void initialize(const std::string& yaml) { create(parseBootstrapFromV3Yaml(yaml)); } void TearDown() override { factory_.tls_.shutdownThread(); } // TODO(tschroed): Extend this to support socket state as well. - void expectSetsockopts(const std::vector>& names_vals) { - + void expectSetsockopts(const NameVals& names_vals) { NiceMock os_sys_calls; TestThreadsafeSingletonInjector os_calls(&os_sys_calls); + NiceMock socket; bool expect_success = true; for (const auto& name_val : names_vals) { - if (!name_val.first.has_value()) { + if (!name_val.first.hasValue()) { expect_success = false; continue; } - EXPECT_CALL(os_sys_calls, - setsockopt_(_, name_val.first.level(), name_val.first.option(), _, sizeof(int))) - .WillOnce(Invoke([&name_val](os_fd_t, int, int, const void* optval, socklen_t) -> int { - EXPECT_EQ(name_val.second, *static_cast(optval)); - return 0; - })); + EXPECT_CALL(socket, + setSocketOption(name_val.first.level(), name_val.first.option(), _, sizeof(int))) + .WillOnce( + Invoke([&name_val](int, int, const void* optval, socklen_t) -> Api::SysCallIntResult { + EXPECT_EQ(name_val.second, *static_cast(optval)); + return {0, 0}; + })); } + EXPECT_CALL(socket, ipVersion()) + .WillRepeatedly(testing::Return(Network::Address::IpVersion::v4)); EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _)) - .WillOnce(Invoke([this, &names_vals, expect_success]( + .WillOnce(Invoke([this, &names_vals, expect_success, &socket]( Network::Address::InstanceConstSharedPtr, Network::Address::InstanceConstSharedPtr, Network::TransportSocketPtr&, const Network::ConnectionSocket::OptionsSharedPtr& options) @@ -3033,7 +3170,6 @@ class SockoptsTest : public ClusterManagerImplTest { if (options.get() != nullptr) { // Don't crash the entire test. EXPECT_EQ(names_vals.size(), options->size()); } - NiceMock socket; if (expect_success) { EXPECT_TRUE((Network::Socket::applyOptions( options, socket, envoy::config::core::v3::SocketOption::STATE_PREBIND))); @@ -3047,8 +3183,15 @@ class SockoptsTest : public ClusterManagerImplTest { } void expectSetsockoptFreebind() { - std::vector> names_vals{ - {ENVOY_SOCKET_IP_FREEBIND, 1}}; + NameVals names_vals{{ENVOY_SOCKET_IP_FREEBIND, 1}}; + if (ENVOY_SOCKET_SO_NOSIGPIPE.hasValue()) { + names_vals.emplace_back(std::make_pair(ENVOY_SOCKET_SO_NOSIGPIPE, 1)); + } + expectSetsockopts(names_vals); + } + + void expectOnlyNoSigpipeOptions() { + NameVals names_vals{{std::make_pair(ENVOY_SOCKET_SO_NOSIGPIPE, 1)}}; expectSetsockopts(names_vals); } @@ -3078,6 +3221,7 @@ TEST_F(SockoptsTest, SockoptsUnset) { lb_policy: ROUND_ROBIN type: STATIC load_assignment: + cluster_name: SockoptsCluster endpoints: - lb_endpoints: - endpoint: @@ -3087,7 +3231,11 @@ TEST_F(SockoptsTest, SockoptsUnset) { port_value: 11001 )EOF"; initialize(yaml); - expectNoSocketOptions(); + if (ENVOY_SOCKET_SO_NOSIGPIPE.hasValue()) { + expectOnlyNoSigpipeOptions(); + } else { + expectNoSocketOptions(); + } } TEST_F(SockoptsTest, FreebindClusterOnly) { @@ -3099,6 +3247,7 @@ TEST_F(SockoptsTest, FreebindClusterOnly) { lb_policy: ROUND_ROBIN type: STATIC load_assignment: + cluster_name: SockoptsCluster endpoints: - lb_endpoints: - endpoint: @@ -3122,6 +3271,7 @@ TEST_F(SockoptsTest, FreebindClusterManagerOnly) { lb_policy: ROUND_ROBIN type: STATIC load_assignment: + cluster_name: SockoptsCluster endpoints: - lb_endpoints: - endpoint: @@ -3146,6 +3296,7 @@ TEST_F(SockoptsTest, FreebindClusterOverride) { lb_policy: ROUND_ROBIN type: STATIC load_assignment: + cluster_name: SockoptsCluster endpoints: - lb_endpoints: - endpoint: @@ -3172,6 +3323,7 @@ TEST_F(SockoptsTest, SockoptsClusterOnly) { lb_policy: ROUND_ROBIN type: STATIC load_assignment: + cluster_name: SockoptsCluster endpoints: - lb_endpoints: - endpoint: @@ -3186,8 +3338,11 @@ TEST_F(SockoptsTest, SockoptsClusterOnly) { )EOF"; initialize(yaml); - std::vector> names_vals{ - {ENVOY_MAKE_SOCKET_OPTION_NAME(1, 2), 3}, {ENVOY_MAKE_SOCKET_OPTION_NAME(4, 5), 6}}; + NameVals names_vals{{ENVOY_MAKE_SOCKET_OPTION_NAME(1, 2), 3}, + {ENVOY_MAKE_SOCKET_OPTION_NAME(4, 5), 6}}; + if (ENVOY_SOCKET_SO_NOSIGPIPE.hasValue()) { + names_vals.emplace_back(std::make_pair(ENVOY_SOCKET_SO_NOSIGPIPE, 1)); + } expectSetsockopts(names_vals); } @@ -3200,6 +3355,7 @@ TEST_F(SockoptsTest, SockoptsClusterManagerOnly) { lb_policy: ROUND_ROBIN type: STATIC load_assignment: + cluster_name: SockoptsCluster endpoints: - lb_endpoints: - endpoint: @@ -3214,8 +3370,11 @@ TEST_F(SockoptsTest, SockoptsClusterManagerOnly) { { level: 4, name: 5, int_value: 6, state: STATE_PREBIND }] )EOF"; initialize(yaml); - std::vector> names_vals{ - {ENVOY_MAKE_SOCKET_OPTION_NAME(1, 2), 3}, {ENVOY_MAKE_SOCKET_OPTION_NAME(4, 5), 6}}; + NameVals names_vals{{ENVOY_MAKE_SOCKET_OPTION_NAME(1, 2), 3}, + {ENVOY_MAKE_SOCKET_OPTION_NAME(4, 5), 6}}; + if (ENVOY_SOCKET_SO_NOSIGPIPE.hasValue()) { + names_vals.emplace_back(std::make_pair(ENVOY_SOCKET_SO_NOSIGPIPE, 1)); + } expectSetsockopts(names_vals); } @@ -3228,6 +3387,7 @@ TEST_F(SockoptsTest, SockoptsClusterOverride) { lb_policy: ROUND_ROBIN type: STATIC load_assignment: + cluster_name: SockoptsCluster endpoints: - lb_endpoints: - endpoint: @@ -3244,8 +3404,11 @@ TEST_F(SockoptsTest, SockoptsClusterOverride) { socket_options: [{ level: 7, name: 8, int_value: 9, state: STATE_PREBIND }] )EOF"; initialize(yaml); - std::vector> names_vals{ - {ENVOY_MAKE_SOCKET_OPTION_NAME(1, 2), 3}, {ENVOY_MAKE_SOCKET_OPTION_NAME(4, 5), 6}}; + NameVals names_vals{{ENVOY_MAKE_SOCKET_OPTION_NAME(1, 2), 3}, + {ENVOY_MAKE_SOCKET_OPTION_NAME(4, 5), 6}}; + if (ENVOY_SOCKET_SO_NOSIGPIPE.hasValue()) { + names_vals.emplace_back(std::make_pair(ENVOY_SOCKET_SO_NOSIGPIPE, 1)); + } expectSetsockopts(names_vals); } @@ -3256,14 +3419,14 @@ TEST_F(SockoptsTest, SockoptsClusterOverride) { // tcp_keepalive_option_impl_test.cc. class TcpKeepaliveTest : public ClusterManagerImplTest { public: - void initialize(const std::string& yaml) { create(parseBootstrapFromV2Yaml(yaml)); } + void initialize(const std::string& yaml) { create(parseBootstrapFromV3Yaml(yaml)); } void TearDown() override { factory_.tls_.shutdownThread(); } void expectSetsockoptSoKeepalive(absl::optional keepalive_probes, absl::optional keepalive_time, absl::optional keepalive_interval) { - if (!ENVOY_SOCKET_SO_KEEPALIVE.has_value()) { + if (!ENVOY_SOCKET_SO_KEEPALIVE.hasValue()) { EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _)) .WillOnce( Invoke([this](Network::Address::InstanceConstSharedPtr, @@ -3282,55 +3445,86 @@ class TcpKeepaliveTest : public ClusterManagerImplTest { } NiceMock os_sys_calls; TestThreadsafeSingletonInjector os_calls(&os_sys_calls); + NiceMock socket; EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _)) - .WillOnce( - Invoke([this](Network::Address::InstanceConstSharedPtr, - Network::Address::InstanceConstSharedPtr, Network::TransportSocketPtr&, - const Network::ConnectionSocket::OptionsSharedPtr& options) - -> Network::ClientConnection* { - EXPECT_NE(nullptr, options.get()); - NiceMock socket; - EXPECT_TRUE((Network::Socket::applyOptions( - options, socket, envoy::config::core::v3::SocketOption::STATE_PREBIND))); - return connection_; - })); - EXPECT_CALL(os_sys_calls, setsockopt_(_, ENVOY_SOCKET_SO_KEEPALIVE.level(), - ENVOY_SOCKET_SO_KEEPALIVE.option(), _, sizeof(int))) - .WillOnce(Invoke([](os_fd_t, int, int, const void* optval, socklen_t) -> int { + .WillOnce(Invoke([this, &socket](Network::Address::InstanceConstSharedPtr, + Network::Address::InstanceConstSharedPtr, + Network::TransportSocketPtr&, + const Network::ConnectionSocket::OptionsSharedPtr& options) + -> Network::ClientConnection* { + EXPECT_NE(nullptr, options.get()); + EXPECT_TRUE((Network::Socket::applyOptions( + options, socket, envoy::config::core::v3::SocketOption::STATE_PREBIND))); + return connection_; + })); + if (ENVOY_SOCKET_SO_NOSIGPIPE.hasValue()) { + EXPECT_CALL(socket, setSocketOption(ENVOY_SOCKET_SO_NOSIGPIPE.level(), + ENVOY_SOCKET_SO_NOSIGPIPE.option(), _, sizeof(int))) + .WillOnce(Invoke([](int, int, const void* optval, socklen_t) -> Api::SysCallIntResult { + EXPECT_EQ(1, *static_cast(optval)); + return {0, 0}; + })); + } + EXPECT_CALL(socket, setSocketOption(ENVOY_SOCKET_SO_KEEPALIVE.level(), + ENVOY_SOCKET_SO_KEEPALIVE.option(), _, sizeof(int))) + .WillOnce(Invoke([](int, int, const void* optval, socklen_t) -> Api::SysCallIntResult { EXPECT_EQ(1, *static_cast(optval)); - return 0; + return {0, 0}; })); if (keepalive_probes.has_value()) { - EXPECT_CALL(os_sys_calls, setsockopt_(_, ENVOY_SOCKET_TCP_KEEPCNT.level(), - ENVOY_SOCKET_TCP_KEEPCNT.option(), _, sizeof(int))) - .WillOnce( - Invoke([&keepalive_probes](os_fd_t, int, int, const void* optval, socklen_t) -> int { - EXPECT_EQ(keepalive_probes.value(), *static_cast(optval)); - return 0; - })); + EXPECT_CALL(socket, setSocketOption(ENVOY_SOCKET_TCP_KEEPCNT.level(), + ENVOY_SOCKET_TCP_KEEPCNT.option(), _, sizeof(int))) + .WillOnce(Invoke([&keepalive_probes](int, int, const void* optval, + socklen_t) -> Api::SysCallIntResult { + EXPECT_EQ(keepalive_probes.value(), *static_cast(optval)); + return {0, 0}; + })); } if (keepalive_time.has_value()) { - EXPECT_CALL(os_sys_calls, setsockopt_(_, ENVOY_SOCKET_TCP_KEEPIDLE.level(), - ENVOY_SOCKET_TCP_KEEPIDLE.option(), _, sizeof(int))) - .WillOnce( - Invoke([&keepalive_time](os_fd_t, int, int, const void* optval, socklen_t) -> int { + EXPECT_CALL(socket, setSocketOption(ENVOY_SOCKET_TCP_KEEPIDLE.level(), + ENVOY_SOCKET_TCP_KEEPIDLE.option(), _, sizeof(int))) + .WillOnce(Invoke( + [&keepalive_time](int, int, const void* optval, socklen_t) -> Api::SysCallIntResult { EXPECT_EQ(keepalive_time.value(), *static_cast(optval)); - return 0; + return {0, 0}; })); } if (keepalive_interval.has_value()) { - EXPECT_CALL(os_sys_calls, setsockopt_(_, ENVOY_SOCKET_TCP_KEEPINTVL.level(), - ENVOY_SOCKET_TCP_KEEPINTVL.option(), _, sizeof(int))) - .WillOnce(Invoke( - [&keepalive_interval](os_fd_t, int, int, const void* optval, socklen_t) -> int { - EXPECT_EQ(keepalive_interval.value(), *static_cast(optval)); - return 0; - })); + EXPECT_CALL(socket, setSocketOption(ENVOY_SOCKET_TCP_KEEPINTVL.level(), + ENVOY_SOCKET_TCP_KEEPINTVL.option(), _, sizeof(int))) + .WillOnce(Invoke([&keepalive_interval](int, int, const void* optval, + socklen_t) -> Api::SysCallIntResult { + EXPECT_EQ(keepalive_interval.value(), *static_cast(optval)); + return {0, 0}; + })); } auto conn_data = cluster_manager_->tcpConnForCluster("TcpKeepaliveCluster", nullptr); EXPECT_EQ(connection_, conn_data.connection_.get()); } + void expectOnlyNoSigpipeOptions() { + NiceMock socket; + EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _)) + .WillOnce(Invoke([this, &socket](Network::Address::InstanceConstSharedPtr, + Network::Address::InstanceConstSharedPtr, + Network::TransportSocketPtr&, + const Network::ConnectionSocket::OptionsSharedPtr& options) + -> Network::ClientConnection* { + EXPECT_NE(nullptr, options.get()); + EXPECT_TRUE((Network::Socket::applyOptions( + options, socket, envoy::config::core::v3::SocketOption::STATE_PREBIND))); + return connection_; + })); + EXPECT_CALL(socket, setSocketOption(ENVOY_SOCKET_SO_NOSIGPIPE.level(), + ENVOY_SOCKET_SO_NOSIGPIPE.option(), _, sizeof(int))) + .WillOnce(Invoke([](int, int, const void* optval, socklen_t) -> Api::SysCallIntResult { + EXPECT_EQ(1, *static_cast(optval)); + return {0, 0}; + })); + auto conn_data = cluster_manager_->tcpConnForCluster("TcpKeepaliveCluster", nullptr); + EXPECT_EQ(connection_, conn_data.connection_.get()); + } + void expectNoSocketOptions() { EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _)) .WillOnce( @@ -3357,6 +3551,7 @@ TEST_F(TcpKeepaliveTest, TcpKeepaliveUnset) { lb_policy: ROUND_ROBIN type: STATIC load_assignment: + cluster_name: TcpKeepaliveCluster endpoints: - lb_endpoints: - endpoint: @@ -3366,7 +3561,11 @@ TEST_F(TcpKeepaliveTest, TcpKeepaliveUnset) { port_value: 11001 )EOF"; initialize(yaml); - expectNoSocketOptions(); + if (ENVOY_SOCKET_SO_NOSIGPIPE.hasValue()) { + expectOnlyNoSigpipeOptions(); + } else { + expectNoSocketOptions(); + } } TEST_F(TcpKeepaliveTest, TcpKeepaliveCluster) { @@ -3378,6 +3577,7 @@ TEST_F(TcpKeepaliveTest, TcpKeepaliveCluster) { lb_policy: ROUND_ROBIN type: STATIC load_assignment: + cluster_name: TcpKeepaliveCluster endpoints: - lb_endpoints: - endpoint: @@ -3401,6 +3601,7 @@ TEST_F(TcpKeepaliveTest, TcpKeepaliveClusterProbes) { lb_policy: ROUND_ROBIN type: STATIC load_assignment: + cluster_name: TcpKeepaliveCluster endpoints: - lb_endpoints: - endpoint: @@ -3425,6 +3626,7 @@ TEST_F(TcpKeepaliveTest, TcpKeepaliveWithAllOptions) { lb_policy: ROUND_ROBIN type: STATIC load_assignment: + cluster_name: TcpKeepaliveCluster endpoints: - lb_endpoints: - endpoint: @@ -3457,7 +3659,7 @@ TEST_F(ClusterManagerImplTest, ConnPoolsDrainedOnHostSetChange) { ReadyWatcher initialized; EXPECT_CALL(initialized, ready()); - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); // Set up for an initialize callback. cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); }); @@ -3575,7 +3777,7 @@ TEST_F(ClusterManagerImplTest, ConnPoolsNotDrainedOnHostSetChange) { ReadyWatcher initialized; EXPECT_CALL(initialized, ready()); - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); // Set up for an initialize callback. cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); }); @@ -3648,7 +3850,7 @@ TEST_F(ClusterManagerImplTest, InvalidPriorityLocalClusterNameStatic) { local_cluster_name: new_cluster )EOF"; - EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV2Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, "Unexpected non-zero priority for local cluster 'new_cluster'."); } @@ -3673,7 +3875,7 @@ TEST_F(ClusterManagerImplTest, InvalidPriorityLocalClusterNameStrictDns) { local_cluster_name: new_cluster )EOF"; - EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV2Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, "Unexpected non-zero priority for local cluster 'new_cluster'."); } @@ -3700,7 +3902,7 @@ TEST_F(ClusterManagerImplTest, InvalidPriorityLocalClusterNameLogicalDns) { // The priority for LOGICAL_DNS endpoints are written, so we just verify that there is only a // single priority even if the endpoint was configured to be priority 10. - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); const auto cluster = cluster_manager_->get("new_cluster"); EXPECT_EQ(1, cluster->prioritySet().hostSetsPerPriority().size()); } diff --git a/test/common/upstream/conn_pool_map_impl_test.cc b/test/common/upstream/conn_pool_map_impl_test.cc index 6c4605cd96d24..64c518b2fc687 100644 --- a/test/common/upstream/conn_pool_map_impl_test.cc +++ b/test/common/upstream/conn_pool_map_impl_test.cc @@ -401,8 +401,8 @@ TEST_F(ConnPoolMapImplDeathTest, ReentryClearTripsAssert) { ON_CALL(*mock_pools_[0], addDrainedCallback(_)) .WillByDefault(Invoke([](Http::ConnectionPool::Instance::DrainedCb cb) { cb(); })); - EXPECT_DEATH_LOG_TO_STDERR(test_map->addDrainedCallback([&test_map] { test_map->clear(); }), - ".*Details: A resource should only be entered once"); + EXPECT_DEATH(test_map->addDrainedCallback([&test_map] { test_map->clear(); }), + ".*Details: A resource should only be entered once"); } TEST_F(ConnPoolMapImplDeathTest, ReentryGetPoolTripsAssert) { @@ -412,7 +412,7 @@ TEST_F(ConnPoolMapImplDeathTest, ReentryGetPoolTripsAssert) { ON_CALL(*mock_pools_[0], addDrainedCallback(_)) .WillByDefault(Invoke([](Http::ConnectionPool::Instance::DrainedCb cb) { cb(); })); - EXPECT_DEATH_LOG_TO_STDERR( + EXPECT_DEATH( test_map->addDrainedCallback([&test_map, this] { test_map->getPool(2, getBasicFactory()); }), ".*Details: A resource should only be entered once"); } @@ -424,9 +424,8 @@ TEST_F(ConnPoolMapImplDeathTest, ReentryDrainConnectionsTripsAssert) { ON_CALL(*mock_pools_[0], addDrainedCallback(_)) .WillByDefault(Invoke([](Http::ConnectionPool::Instance::DrainedCb cb) { cb(); })); - EXPECT_DEATH_LOG_TO_STDERR( - test_map->addDrainedCallback([&test_map] { test_map->drainConnections(); }), - ".*Details: A resource should only be entered once"); + EXPECT_DEATH(test_map->addDrainedCallback([&test_map] { test_map->drainConnections(); }), + ".*Details: A resource should only be entered once"); } TEST_F(ConnPoolMapImplDeathTest, ReentryAddDrainedCallbackTripsAssert) { @@ -436,9 +435,8 @@ TEST_F(ConnPoolMapImplDeathTest, ReentryAddDrainedCallbackTripsAssert) { ON_CALL(*mock_pools_[0], addDrainedCallback(_)) .WillByDefault(Invoke([](Http::ConnectionPool::Instance::DrainedCb cb) { cb(); })); - EXPECT_DEATH_LOG_TO_STDERR( - test_map->addDrainedCallback([&test_map] { test_map->addDrainedCallback([]() {}); }), - ".*Details: A resource should only be entered once"); + EXPECT_DEATH(test_map->addDrainedCallback([&test_map] { test_map->addDrainedCallback([]() {}); }), + ".*Details: A resource should only be entered once"); } #endif // !defined(NDEBUG) diff --git a/test/common/upstream/eds_speed_test.cc b/test/common/upstream/eds_speed_test.cc new file mode 100644 index 0000000000000..3aab5a54f9910 --- /dev/null +++ b/test/common/upstream/eds_speed_test.cc @@ -0,0 +1,218 @@ +// Note: this should be run with --compilation_mode=opt, and would benefit from a +// quiescent system with disabled cstate power management. + +#include "envoy/config/cluster/v3/cluster.pb.h" +#include "envoy/config/core/v3/health_check.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/config/endpoint/v3/endpoint_components.pb.h" +#include "envoy/service/discovery/v3/discovery.pb.h" +#include "envoy/stats/scope.h" + +#include "common/config/grpc_mux_impl.h" +#include "common/config/grpc_subscription_impl.h" +#include "common/config/utility.h" +#include "common/singleton/manager_impl.h" +#include "common/upstream/eds.h" + +#include "server/transport_socket_config_impl.h" + +#include "test/benchmark/main.h" +#include "test/common/upstream/utility.h" +#include "test/mocks/local_info/mocks.h" +#include "test/mocks/protobuf/mocks.h" +#include "test/mocks/runtime/mocks.h" +#include "test/mocks/server/admin.h" +#include "test/mocks/server/instance.h" +#include "test/mocks/ssl/mocks.h" +#include "test/mocks/upstream/mocks.h" +#include "test/test_common/utility.h" + +#include "benchmark/benchmark.h" + +using ::benchmark::State; +using Envoy::benchmark::skipExpensiveBenchmarks; + +namespace Envoy { +namespace Upstream { + +class EdsSpeedTest { +public: + EdsSpeedTest(State& state, bool v2_config) + : state_(state), v2_config_(v2_config), + type_url_(v2_config_ + ? "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment" + : "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment"), + subscription_stats_(Config::Utility::generateStats(stats_)), + api_(Api::createApiForTest(stats_)), async_client_(new Grpc::MockAsyncClient()), + grpc_mux_(new Config::GrpcMuxImpl( + local_info_, std::unique_ptr(async_client_), dispatcher_, + *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( + "envoy.service.endpoint.v3.EndpointDiscoveryService.StreamEndpoints"), + envoy::config::core::v3::ApiVersion::AUTO, random_, stats_, {}, true)) { + resetCluster(R"EOF( + name: name + connect_timeout: 0.25s + type: EDS + eds_cluster_config: + service_name: fare + eds_config: + api_config_source: + cluster_names: + - eds + refresh_delay: 1s + )EOF", + Envoy::Upstream::Cluster::InitializePhase::Secondary); + + EXPECT_CALL(*cm_.subscription_factory_.subscription_, start(_)); + cluster_->initialize([this] { initialized_ = true; }); + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(testing::Return(&async_stream_)); + subscription_->start({"fare"}); + } + + void resetCluster(const std::string& yaml_config, Cluster::InitializePhase initialize_phase) { + local_info_.node_.mutable_locality()->set_zone("us-east-1a"); + eds_cluster_ = parseClusterFromV3Yaml(yaml_config); + Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( + "cluster.{}.", + eds_cluster_.alt_stat_name().empty() ? eds_cluster_.name() : eds_cluster_.alt_stat_name())); + Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + singleton_manager_, tls_, validation_visitor_, *api_); + cluster_ = std::make_shared(eds_cluster_, runtime_, factory_context, + std::move(scope), false); + EXPECT_EQ(initialize_phase, cluster_->initializePhase()); + eds_callbacks_ = cm_.subscription_factory_.callbacks_; + subscription_ = std::make_unique( + grpc_mux_, *eds_callbacks_, resource_decoder_, subscription_stats_, type_url_, dispatcher_, + std::chrono::milliseconds(), false); + } + + // Set up an EDS config with multiple priorities, localities, weights and make sure + // they are loaded as expected. + void priorityAndLocalityWeightedHelper(bool ignore_unknown_dynamic_fields, size_t num_hosts, + bool healthy) { + state_.PauseTiming(); + + envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment; + cluster_load_assignment.set_cluster_name("fare"); + + // Add a whole bunch of hosts in a single place: + auto* endpoints = cluster_load_assignment.add_endpoints(); + endpoints->set_priority(1); + auto* locality = endpoints->mutable_locality(); + locality->set_region("region"); + locality->set_zone("zone"); + locality->set_sub_zone("sub_zone"); + endpoints->mutable_load_balancing_weight()->set_value(1); + + uint32_t port = 1000; + for (size_t i = 0; i < num_hosts; ++i) { + auto* lb_endpoint = endpoints->add_lb_endpoints(); + if (healthy) { + lb_endpoint->set_health_status(envoy::config::core::v3::HEALTHY); + } else { + lb_endpoint->set_health_status(envoy::config::core::v3::UNHEALTHY); + } + auto* socket_address = + lb_endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address(); + socket_address->set_address("10.0.1." + std::to_string(i / 60000)); + socket_address->set_port_value((port + i) % 60000); + } + + // this is what we're actually testing: + validation_visitor_.setSkipValidation(ignore_unknown_dynamic_fields); + + auto response = std::make_unique(); + response->set_type_url(type_url_); + auto* resource = response->mutable_resources()->Add(); + resource->PackFrom(cluster_load_assignment); + if (v2_config_) { + RELEASE_ASSERT(resource->type_url() == + "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + ""); + resource->set_type_url("type.googleapis.com/envoy.api.v2.ClusterLoadAssignment"); + } + state_.ResumeTiming(); + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); + ASSERT(cluster_->prioritySet().hostSetsPerPriority()[1]->hostsPerLocality().get()[0].size() == + num_hosts); + } + + State& state_; + const bool v2_config_; + const std::string type_url_; + bool initialized_{}; + Stats::IsolatedStoreImpl stats_; + Config::SubscriptionStats subscription_stats_; + Ssl::MockContextManager ssl_context_manager_; + envoy::config::cluster::v3::Cluster eds_cluster_; + NiceMock cm_; + NiceMock dispatcher_; + EdsClusterImplSharedPtr cluster_; + Config::SubscriptionCallbacks* eds_callbacks_{}; + Config::OpaqueResourceDecoderImpl + resource_decoder_{validation_visitor_, "cluster_name"}; + NiceMock random_; + NiceMock runtime_; + NiceMock local_info_; + NiceMock admin_; + Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()}; + NiceMock tls_; + ProtobufMessage::MockValidationVisitor validation_visitor_; + Api::ApiPtr api_; + Grpc::MockAsyncClient* async_client_; + NiceMock async_stream_; + Config::GrpcMuxImplSharedPtr grpc_mux_; + Config::GrpcSubscriptionImplPtr subscription_; +}; + +} // namespace Upstream +} // namespace Envoy + +static void priorityAndLocalityWeighted(State& state) { + Envoy::Thread::MutexBasicLockable lock; + Envoy::Logger::Context logging_state(spdlog::level::warn, + Envoy::Logger::Logger::DEFAULT_LOG_FORMAT, lock, false); + for (auto _ : state) { + Envoy::Upstream::EdsSpeedTest speed_test(state, state.range(0)); + // if we've been instructed to skip tests, only run once no matter the argument: + uint32_t endpoints = skipExpensiveBenchmarks() ? 1 : state.range(2); + + speed_test.priorityAndLocalityWeightedHelper(state.range(1), endpoints, true); + } +} + +BENCHMARK(priorityAndLocalityWeighted) + ->Ranges({{false, true}, {false, true}, {1, 100000}}) + ->Unit(benchmark::kMillisecond); + +static void duplicateUpdate(State& state) { + Envoy::Thread::MutexBasicLockable lock; + Envoy::Logger::Context logging_state(spdlog::level::warn, + Envoy::Logger::Logger::DEFAULT_LOG_FORMAT, lock, false); + + for (auto _ : state) { + Envoy::Upstream::EdsSpeedTest speed_test(state, false); + uint32_t endpoints = skipExpensiveBenchmarks() ? 1 : state.range(0); + + speed_test.priorityAndLocalityWeightedHelper(true, endpoints, true); + speed_test.priorityAndLocalityWeightedHelper(true, endpoints, true); + } +} + +BENCHMARK(duplicateUpdate)->Range(1, 100000)->Unit(benchmark::kMillisecond); + +static void healthOnlyUpdate(State& state) { + Envoy::Thread::MutexBasicLockable lock; + Envoy::Logger::Context logging_state(spdlog::level::warn, + Envoy::Logger::Logger::DEFAULT_LOG_FORMAT, lock, false); + for (auto _ : state) { + Envoy::Upstream::EdsSpeedTest speed_test(state, false); + uint32_t endpoints = skipExpensiveBenchmarks() ? 1 : state.range(0); + + speed_test.priorityAndLocalityWeightedHelper(true, endpoints, true); + speed_test.priorityAndLocalityWeightedHelper(true, endpoints, false); + } +} + +BENCHMARK(healthOnlyUpdate)->Range(1, 100000)->Unit(benchmark::kMillisecond); diff --git a/test/common/upstream/eds_test.cc b/test/common/upstream/eds_test.cc index 23600bbfe0e5f..144d29ad78a0e 100644 --- a/test/common/upstream/eds_test.cc +++ b/test/common/upstream/eds_test.cc @@ -18,7 +18,8 @@ #include "test/mocks/local_info/mocks.h" #include "test/mocks/protobuf/mocks.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/admin.h" +#include "test/mocks/server/instance.h" #include "test/mocks/ssl/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/utility.h" @@ -60,7 +61,7 @@ class EdsTest : public testing::Test { connect_timeout: 0.25s type: EDS lb_policy: ROUND_ROBIN - drain_connections_on_host_removal: true + ignore_health_on_host_removal: true eds_cluster_config: service_name: fare eds_config: @@ -88,7 +89,7 @@ class EdsTest : public testing::Test { void resetCluster(const std::string& yaml_config, Cluster::InitializePhase initialize_phase) { local_info_.node_.mutable_locality()->set_zone("us-east-1a"); - eds_cluster_ = parseClusterFromV2Yaml(yaml_config); + eds_cluster_ = parseClusterFromV3Yaml(yaml_config); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", eds_cluster_.alt_stat_name().empty() ? eds_cluster_.name() : eds_cluster_.alt_stat_name())); @@ -108,9 +109,9 @@ class EdsTest : public testing::Test { void doOnConfigUpdateVerifyNoThrow( const envoy::config::endpoint::v3::ClusterLoadAssignment& cluster_load_assignment) { - Protobuf::RepeatedPtrField resources; - resources.Add()->PackFrom(cluster_load_assignment); - VERBOSE_EXPECT_NO_THROW(eds_callbacks_->onConfigUpdate(resources, "")); + const auto decoded_resources = + TestUtility::decodeResources({cluster_load_assignment}, "cluster_name"); + VERBOSE_EXPECT_NO_THROW(eds_callbacks_->onConfigUpdate(decoded_resources.refvec_, "")); } bool initialized_{}; @@ -119,9 +120,9 @@ class EdsTest : public testing::Test { envoy::config::cluster::v3::Cluster eds_cluster_; NiceMock cm_; NiceMock dispatcher_; - std::shared_ptr cluster_; + EdsClusterImplSharedPtr cluster_; Config::SubscriptionCallbacks* eds_callbacks_{}; - NiceMock random_; + NiceMock random_; NiceMock runtime_; NiceMock local_info_; NiceMock admin_; @@ -137,8 +138,8 @@ class EdsWithHealthCheckUpdateTest : public EdsTest { // Build the initial cluster with some endpoints. void initializeCluster(const std::vector endpoint_ports, - const bool drain_connections_on_host_removal) { - resetCluster(drain_connections_on_host_removal); + const bool ignore_health_on_host_removal) { + resetCluster(ignore_health_on_host_removal); auto health_checker = std::make_shared(); EXPECT_CALL(*health_checker, start()); @@ -172,13 +173,13 @@ class EdsWithHealthCheckUpdateTest : public EdsTest { } } - void resetCluster(const bool drain_connections_on_host_removal) { + void resetCluster(const bool ignore_health_on_host_removal) { const std::string config = R"EOF( name: name connect_timeout: 0.25s type: EDS lb_policy: ROUND_ROBIN - drain_connections_on_host_removal: {} + ignore_health_on_host_removal: {} eds_cluster_config: service_name: fare eds_config: @@ -188,7 +189,7 @@ class EdsWithHealthCheckUpdateTest : public EdsTest { - eds refresh_delay: 1s )EOF"; - EdsTest::resetCluster(fmt::format(config, drain_connections_on_host_removal), + EdsTest::resetCluster(fmt::format(config, ignore_health_on_host_removal), Cluster::InitializePhase::Secondary); } @@ -218,25 +219,15 @@ class EdsWithHealthCheckUpdateTest : public EdsTest { envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment_; }; -// Negative test for protoc-gen-validate constraints. -TEST_F(EdsTest, ValidateFail) { - initialize(); - envoy::config::endpoint::v3::ClusterLoadAssignment resource; - Protobuf::RepeatedPtrField resources; - resources.Add()->PackFrom(resource); - EXPECT_THROW(eds_callbacks_->onConfigUpdate(resources, ""), ProtoValidationException); - EXPECT_FALSE(initialized_); -} - // Validate that onConfigUpdate() with unexpected cluster names rejects config. TEST_F(EdsTest, OnConfigUpdateWrongName) { envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment; cluster_load_assignment.set_cluster_name("wrong name"); - Protobuf::RepeatedPtrField resources; - resources.Add()->PackFrom(cluster_load_assignment); + const auto decoded_resources = + TestUtility::decodeResources({cluster_load_assignment}, "cluster_name"); initialize(); try { - eds_callbacks_->onConfigUpdate(resources, ""); + eds_callbacks_->onConfigUpdate(decoded_resources.refvec_, ""); } catch (const EnvoyException& e) { eds_callbacks_->onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, &e); @@ -248,9 +239,8 @@ TEST_F(EdsTest, OnConfigUpdateWrongName) { TEST_F(EdsTest, OnConfigUpdateEmpty) { initialize(); eds_callbacks_->onConfigUpdate({}, ""); - Protobuf::RepeatedPtrField resources; Protobuf::RepeatedPtrField removed_resources; - eds_callbacks_->onConfigUpdate(resources, removed_resources, ""); + eds_callbacks_->onConfigUpdate({}, removed_resources, ""); EXPECT_EQ(2UL, stats_.counter("cluster.name.update_empty").value()); EXPECT_TRUE(initialized_); } @@ -260,11 +250,10 @@ TEST_F(EdsTest, OnConfigUpdateWrongSize) { initialize(); envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment; cluster_load_assignment.set_cluster_name("fare"); - Protobuf::RepeatedPtrField resources; - resources.Add()->PackFrom(cluster_load_assignment); - resources.Add()->PackFrom(cluster_load_assignment); + const auto decoded_resources = TestUtility::decodeResources( + {cluster_load_assignment, cluster_load_assignment}, "cluster_name"); try { - eds_callbacks_->onConfigUpdate(resources, ""); + eds_callbacks_->onConfigUpdate(decoded_resources.refvec_, ""); } catch (const EnvoyException& e) { eds_callbacks_->onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, &e); @@ -292,7 +281,10 @@ TEST_F(EdsTest, DeltaOnConfigUpdateSuccess) { auto* resource = resources.Add(); resource->mutable_resource()->PackFrom(cluster_load_assignment); resource->set_version("v1"); - VERBOSE_EXPECT_NO_THROW(eds_callbacks_->onConfigUpdate(resources, {}, "v1")); + const auto decoded_resources = + TestUtility::decodeResources( + resources, "cluster_name"); + VERBOSE_EXPECT_NO_THROW(eds_callbacks_->onConfigUpdate(decoded_resources.refvec_, {}, "v1")); EXPECT_TRUE(initialized_); EXPECT_EQ(1UL, stats_.counter("cluster.name.update_no_rebuild").value()); @@ -1457,9 +1449,10 @@ TEST_F(EdsTest, NoPriorityForLocalCluster) { add_hosts_to_priority(0, 2); add_hosts_to_priority(1, 1); initialize(); - Protobuf::RepeatedPtrField resources; - resources.Add()->PackFrom(cluster_load_assignment); - EXPECT_THROW_WITH_MESSAGE(eds_callbacks_->onConfigUpdate(resources, ""), EnvoyException, + const auto decoded_resources = + TestUtility::decodeResources({cluster_load_assignment}, "cluster_name"); + EXPECT_THROW_WITH_MESSAGE(eds_callbacks_->onConfigUpdate(decoded_resources.refvec_, ""), + EnvoyException, "Unexpected non-zero priority for local cluster 'name'."); // Try an update which only has endpoints with P=0. This should go through. @@ -1746,9 +1739,10 @@ TEST_F(EdsTest, MalformedIP) { endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address()->set_port_value(80); initialize(); - Protobuf::RepeatedPtrField resources; - resources.Add()->PackFrom(cluster_load_assignment); - EXPECT_THROW_WITH_MESSAGE(eds_callbacks_->onConfigUpdate(resources, ""), EnvoyException, + const auto decoded_resources = + TestUtility::decodeResources({cluster_load_assignment}, "cluster_name"); + EXPECT_THROW_WITH_MESSAGE(eds_callbacks_->onConfigUpdate(decoded_resources.refvec_, ""), + EnvoyException, "malformed IP address: foo.bar.com. Consider setting resolver_name or " "setting cluster type to 'STRICT_DNS' or 'LOGICAL_DNS'"); } diff --git a/test/common/upstream/hds_test.cc b/test/common/upstream/hds_test.cc index b7a15887d05b5..3f1ffeff03556 100644 --- a/test/common/upstream/hds_test.cc +++ b/test/common/upstream/hds_test.cc @@ -16,7 +16,8 @@ #include "test/mocks/local_info/mocks.h" #include "test/mocks/network/mocks.h" #include "test/mocks/protobuf/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/admin.h" +#include "test/mocks/server/instance.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/simulated_time_system.h" #include "test/test_common/utility.h" @@ -44,6 +45,7 @@ class HdsDelegateFriend { std::unique_ptr&& message) { hd.processMessage(std::move(message)); }; + HdsDelegateStats getStats(HdsDelegate& hd) { return hd.stats_; }; }; class HdsTest : public testing::Test { @@ -126,7 +128,7 @@ class HdsTest : public testing::Test { NiceMock validation_visitor_; Api::ApiPtr api_; Extensions::TransportSockets::Tls::ContextManagerImpl ssl_context_manager_; - NiceMock random_; + NiceMock random_; NiceMock log_manager_; NiceMock cm_; NiceMock local_info_; @@ -226,6 +228,91 @@ TEST_F(HdsTest, TestProcessMessageHealthChecks) { EXPECT_EQ(hds_delegate_->hdsClusters()[1]->healthCheckers().size(), 3); } +// Test if processMessage exits gracefully upon receiving a malformed message +TEST_F(HdsTest, TestProcessMessageMissingFields) { + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, _)); + createHdsDelegate(); + + // Create Message + message.reset(createSimpleMessage()); + // remove healthy threshold field to create an error + message->mutable_cluster_health_checks(0)->mutable_health_checks(0)->clear_healthy_threshold(); + + // call onReceiveMessage function for testing. Should increment stat_ errors upon + // getting a bad message + hds_delegate_->onReceiveMessage(std::move(message)); + + // Ensure that we never enabled the response timer that would start health checks, + // since this config was invalid. + EXPECT_FALSE(server_response_timer_->enabled_); + + // ensure that no partial information was stored in hds_clusters_ + EXPECT_TRUE(hds_delegate_->hdsClusters().empty()); + + // Check Correctness by verifying one request and one error has been generated in stat_ + EXPECT_EQ(hds_delegate_friend_.getStats(*hds_delegate_).errors_.value(), 1); + EXPECT_EQ(hds_delegate_friend_.getStats(*hds_delegate_).requests_.value(), 1); +} + +// Test if processMessage exits gracefully upon receiving a malformed message +// There was a previous valid config, so we go back to that. +TEST_F(HdsTest, TestProcessMessageMissingFieldsWithFallback) { + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, _)); + createHdsDelegate(); + + // Create Message + message.reset(createSimpleMessage()); + + Network::MockClientConnection* connection_ = new NiceMock(); + EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)).WillRepeatedly(Return(connection_)); + EXPECT_CALL(*server_response_timer_, enableTimer(_, _)).Times(2); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)); + EXPECT_CALL(test_factory_, createClusterInfo(_)).WillOnce(Return(cluster_info_)); + EXPECT_CALL(*connection_, setBufferLimits(_)); + EXPECT_CALL(dispatcher_, deferredDelete_(_)); + // Process message + hds_delegate_->onReceiveMessage(std::move(message)); + connection_->raiseEvent(Network::ConnectionEvent::Connected); + + // Create a invalid message + message.reset(createSimpleMessage()); + + // set this address to be distinguishable from the previous message in sendResponse() + message->mutable_cluster_health_checks(0) + ->mutable_locality_endpoints(0) + ->mutable_endpoints(0) + ->mutable_address() + ->mutable_socket_address() + ->set_address("9.9.9.9"); + + // remove healthy threshold field to create an error + message->mutable_cluster_health_checks(0)->mutable_health_checks(0)->clear_healthy_threshold(); + + // Pass invalid message through. Should increment stat_ errors upon + // getting a bad message. + hds_delegate_->onReceiveMessage(std::move(message)); + + // Ensure that the timer is enabled since there was a previous valid specifier. + EXPECT_TRUE(server_response_timer_->enabled_); + + // read the response and check that it is pinging the old + // address 127.0.0.0 instead of the new 9.9.9.9 + auto response = hds_delegate_->sendResponse(); + EXPECT_EQ(response.endpoint_health_response() + .endpoints_health(0) + .endpoint() + .address() + .socket_address() + .address(), + "127.0.0.0"); + + // Check Correctness by verifying one request and one error has been generated in stat_ + EXPECT_EQ(hds_delegate_friend_.getStats(*hds_delegate_).errors_.value(), 1); + EXPECT_EQ(hds_delegate_friend_.getStats(*hds_delegate_).requests_.value(), 2); +} + // Tests OnReceiveMessage given a minimal HealthCheckSpecifier message TEST_F(HdsTest, TestMinimalOnReceiveMessage) { EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); diff --git a/test/common/upstream/health_checker_impl_test.cc b/test/common/upstream/health_checker_impl_test.cc index 3ea1592bb9f99..b20b8b56be235 100644 --- a/test/common/upstream/health_checker_impl_test.cc +++ b/test/common/upstream/health_checker_impl_test.cc @@ -29,6 +29,7 @@ #include "test/mocks/upstream/mocks.h" #include "test/test_common/printers.h" #include "test/test_common/simulated_time_system.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -63,7 +64,7 @@ TEST(HealthCheckerFactoryTest, GrpcHealthCheckHTTP2NotConfiguredException) { EXPECT_CALL(*cluster.info_, features()).WillRepeatedly(Return(0)); Runtime::MockLoader runtime; - Runtime::MockRandomGenerator random; + Random::MockRandomGenerator random; Event::MockDispatcher dispatcher; AccessLog::MockAccessLogManager log_manager; NiceMock validation_visitor; @@ -82,7 +83,7 @@ TEST(HealthCheckerFactoryTest, CreateGrpc) { .WillRepeatedly(Return(Upstream::ClusterInfo::Features::HTTP2)); Runtime::MockLoader runtime; - Runtime::MockRandomGenerator random; + Random::MockRandomGenerator random; Event::MockDispatcher dispatcher; AccessLog::MockAccessLogManager log_manager; NiceMock validation_visitor; @@ -95,6 +96,18 @@ TEST(HealthCheckerFactoryTest, CreateGrpc) { .get())); } +class HealthCheckerTestBase { +public: + std::shared_ptr cluster_{ + std::make_shared>()}; + NiceMock dispatcher_; + std::unique_ptr event_logger_storage_{ + std::make_unique()}; + MockHealthCheckEventLogger& event_logger_{*event_logger_storage_}; + NiceMock random_; + NiceMock runtime_; +}; + class TestHttpHealthCheckerImpl : public HttpHealthCheckerImpl { public: using HttpHealthCheckerImpl::HttpHealthCheckerImpl; @@ -109,7 +122,7 @@ class TestHttpHealthCheckerImpl : public HttpHealthCheckerImpl { Http::CodecClient::Type codecClientType() { return codec_client_type_; } }; -class HttpHealthCheckerImplTest : public testing::Test { +class HttpHealthCheckerImplTest : public testing::Test, public HealthCheckerTestBase { public: struct TestSession { Event::MockTimer* interval_timer_{}; @@ -123,12 +136,21 @@ class HttpHealthCheckerImplTest : public testing::Test { using TestSessionPtr = std::unique_ptr; using HostWithHealthCheckMap = - std::unordered_map; + absl::node_hash_map; + + void allocHealthChecker(const std::string& yaml, bool avoid_boosting = true) { + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV3Yaml(yaml, avoid_boosting), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_storage_.release())); + } - HttpHealthCheckerImplTest() - : cluster_(new NiceMock()), - event_logger_(new MockHealthCheckEventLogger()) {} + void addCompletionCallback() { + health_checker_->addHostCheckCompleteCb( + [this](HostSharedPtr host, HealthTransition changed_state) -> void { + onHostStatus(host, changed_state); + }); + } void setupNoServiceValidationHCWithHttp2() { const std::string yaml = R"EOF( @@ -145,13 +167,8 @@ class HttpHealthCheckerImplTest : public testing::Test { codec_client_type: Http2 )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupInitialJitter() { @@ -169,13 +186,8 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupIntervalJitterPercent() { @@ -192,13 +204,8 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupNoServiceValidationHC() { @@ -215,13 +222,8 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupNoServiceValidationHCOneUnhealthy() { @@ -238,13 +240,8 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupNoServiceValidationHCAlwaysLogFailure() { @@ -262,13 +259,8 @@ class HttpHealthCheckerImplTest : public testing::Test { always_log_health_check_failures: true )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupNoServiceValidationNoReuseConnectionHC() { @@ -283,13 +275,8 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupHealthCheckIntervalOverridesHC() { @@ -309,13 +296,8 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupServiceValidationHC() { @@ -331,13 +313,8 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupDeprecatedServiceNameValidationHC(const std::string& prefix) { @@ -354,13 +331,8 @@ class HttpHealthCheckerImplTest : public testing::Test { )EOF", prefix); - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupServicePrefixPatternValidationHC() { @@ -376,13 +348,8 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupServiceExactPatternValidationHC() { @@ -398,13 +365,8 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupServiceRegexPatternValidationHC() { @@ -422,13 +384,8 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupServiceValidationWithCustomHostValueHC(const std::string& host) { @@ -446,13 +403,8 @@ class HttpHealthCheckerImplTest : public testing::Test { )EOF", host); - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } const envoy::config::endpoint::v3::Endpoint::HealthCheckConfig @@ -521,13 +473,8 @@ class HttpHealthCheckerImplTest : public testing::Test { value: "%START_TIME(%s.%9f)%" )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupServiceValidationWithoutUserAgent() { @@ -546,13 +493,8 @@ class HttpHealthCheckerImplTest : public testing::Test { request_headers_to_remove: ["user-agent"] )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void expectSessionCreate(const HostWithHealthCheckMap& health_check_map) { @@ -568,6 +510,7 @@ class HttpHealthCheckerImplTest : public testing::Test { void expectClientCreate(size_t index, const HostWithHealthCheckMap& health_check_map) { TestSession& test_session = *test_sessions_[index]; test_session.codec_ = new NiceMock(); + ON_CALL(*test_session.codec_, protocol()).WillByDefault(Return(Http::Protocol::Http11)); test_session.client_connection_ = new NiceMock(); connection_index_.push_back(index); codec_index_.push_back(index); @@ -662,7 +605,7 @@ class HttpHealthCheckerImplTest : public testing::Test { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); respond(0, "503", false, false, false, false, health_checked_cluster); EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet( Host::HealthFlag::FAILED_ACTIVE_HC)); @@ -687,7 +630,7 @@ class HttpHealthCheckerImplTest : public testing::Test { test_sessions_[0]->interval_timer_->invokeCallback(); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logAddHealthy(_, _, false)); + EXPECT_CALL(event_logger_, logAddHealthy(_, _, false)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); respond(0, "200", false, false, false, false, health_checked_cluster); @@ -697,13 +640,8 @@ class HttpHealthCheckerImplTest : public testing::Test { MOCK_METHOD(void, onHostStatus, (HostSharedPtr host, HealthTransition changed_state)); - std::shared_ptr cluster_; - NiceMock dispatcher_; std::vector test_sessions_; std::shared_ptr health_checker_; - NiceMock runtime_; - NiceMock random_; - MockHealthCheckEventLogger* event_logger_{}; std::list connection_index_{}; std::list codec_index_{}; const HostWithHealthCheckMap health_checker_map_{}; @@ -750,7 +688,7 @@ TEST_F(HttpHealthCheckerImplTest, Degraded) { // We start off as healthy, and should go degraded after receiving the degraded health response. EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); - EXPECT_CALL(*event_logger_, logDegraded(_, _)); + EXPECT_CALL(event_logger_, logDegraded(_, _)); respond(0, "200", false, false, true, false, {}, true); EXPECT_EQ(Host::Health::Degraded, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health()); @@ -761,7 +699,7 @@ TEST_F(HttpHealthCheckerImplTest, Degraded) { EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); test_sessions_[0]->interval_timer_->invokeCallback(); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); - EXPECT_CALL(*event_logger_, logNoLongerDegraded(_, _)); + EXPECT_CALL(event_logger_, logNoLongerDegraded(_, _)); respond(0, "200", false, false, true, false, {}, false); EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health()); } @@ -1026,13 +964,8 @@ TEST_F(HttpHealthCheckerImplTest, ZeroRetryInterval) { path: /healthcheck )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); EXPECT_CALL(runtime_.snapshot_, featureEnabled("health_check.verify_cluster", 100)) .WillOnce(Return(true)); @@ -1047,10 +980,9 @@ TEST_F(HttpHealthCheckerImplTest, ZeroRetryInterval) { EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) { - EXPECT_EQ(headers.Host()->value().getStringView(), host); - EXPECT_EQ(headers.Path()->value().getStringView(), path); - EXPECT_EQ(headers.Scheme()->value().getStringView(), - Http::Headers::get().SchemeValues.Http); + EXPECT_EQ(headers.getHostValue(), host); + EXPECT_EQ(headers.getPathValue(), path); + EXPECT_EQ(headers.getSchemeValue(), Http::Headers::get().SchemeValues.Http); })); health_checker_->start(); @@ -1096,10 +1028,7 @@ TEST_F(HttpHealthCheckerImplTest, TlsOptions) { EXPECT_CALL(*socket_factory, createTransportSocket(ApplicationProtocolListEq("http1"))); - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - + allocHealthChecker(yaml); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; cluster_->info_->stats().upstream_cx_total_.inc(); @@ -1126,10 +1055,9 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceCheck) { EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) { - EXPECT_EQ(headers.Host()->value().getStringView(), host); - EXPECT_EQ(headers.Path()->value().getStringView(), path); - EXPECT_EQ(headers.Scheme()->value().getStringView(), - Http::Headers::get().SchemeValues.Http); + EXPECT_EQ(headers.getHostValue(), host); + EXPECT_EQ(headers.getPathValue(), path); + EXPECT_EQ(headers.getSchemeValue(), Http::Headers::get().SchemeValues.Http); })); health_checker_->start(); @@ -1161,10 +1089,9 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServicePrefixPatternCheck) { EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) { - EXPECT_EQ(headers.Host()->value().getStringView(), host); - EXPECT_EQ(headers.Path()->value().getStringView(), path); - EXPECT_EQ(headers.Scheme()->value().getStringView(), - Http::Headers::get().SchemeValues.Http); + EXPECT_EQ(headers.getHostValue(), host); + EXPECT_EQ(headers.getPathValue(), path); + EXPECT_EQ(headers.getSchemeValue(), Http::Headers::get().SchemeValues.Http); })); health_checker_->start(); @@ -1196,10 +1123,9 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceExactPatternCheck) { EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) { - EXPECT_EQ(headers.Host()->value().getStringView(), host); - EXPECT_EQ(headers.Path()->value().getStringView(), path); - EXPECT_EQ(headers.Scheme()->value().getStringView(), - Http::Headers::get().SchemeValues.Http); + EXPECT_EQ(headers.getHostValue(), host); + EXPECT_EQ(headers.getPathValue(), path); + EXPECT_EQ(headers.getSchemeValue(), Http::Headers::get().SchemeValues.Http); })); health_checker_->start(); @@ -1231,10 +1157,9 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceRegexPatternCheck) { EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) { - EXPECT_EQ(headers.Host()->value().getStringView(), host); - EXPECT_EQ(headers.Path()->value().getStringView(), path); - EXPECT_EQ(headers.Scheme()->value().getStringView(), - Http::Headers::get().SchemeValues.Http); + EXPECT_EQ(headers.getHostValue(), host); + EXPECT_EQ(headers.getPathValue(), path); + EXPECT_EQ(headers.getSchemeValue(), Http::Headers::get().SchemeValues.Http); })); health_checker_->start(); @@ -1274,8 +1199,8 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceCheckWithCustomHostValueOnTheHos EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) { - EXPECT_EQ(headers.Host()->value().getStringView(), host); - EXPECT_EQ(headers.Path()->value().getStringView(), path); + EXPECT_EQ(headers.getHostValue(), host); + EXPECT_EQ(headers.getPathValue(), path); })); health_checker_->start(); @@ -1318,8 +1243,8 @@ TEST_F(HttpHealthCheckerImplTest, EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) { - EXPECT_EQ(headers.Host()->value().getStringView(), host); - EXPECT_EQ(headers.Path()->value().getStringView(), path); + EXPECT_EQ(headers.getHostValue(), host); + EXPECT_EQ(headers.getPathValue(), path); })); health_checker_->start(); @@ -1352,8 +1277,8 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceCheckWithCustomHostValue) { EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) { - EXPECT_EQ(headers.Host()->value().getStringView(), host); - EXPECT_EQ(headers.Path()->value().getStringView(), path); + EXPECT_EQ(headers.getHostValue(), host); + EXPECT_EQ(headers.getPathValue(), path); })); health_checker_->start(); @@ -1419,7 +1344,7 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceCheckWithAdditionalHeaders) { EXPECT_EQ(headers.get(header_cool)->value().getStringView(), value_cool); EXPECT_EQ(headers.get(header_awesome)->value().getStringView(), value_awesome); - EXPECT_EQ(headers.UserAgent()->value().getStringView(), value_user_agent); + EXPECT_EQ(headers.getUserAgentValue(), value_user_agent); EXPECT_EQ(headers.get(upstream_metadata)->value().getStringView(), value_upstream_metadata); EXPECT_EQ(headers.get(protocol)->value().getStringView(), value_protocol); @@ -1498,12 +1423,12 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceCheckWithoutUserAgent) { TEST_F(HttpHealthCheckerImplTest, ServiceDoesNotMatchFail) { setupServiceValidationHC(); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); EXPECT_CALL(runtime_.snapshot_, featureEnabled("health_check.verify_cluster", 100)) .WillOnce(Return(true)); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)).Times(1); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; @@ -1529,12 +1454,12 @@ TEST_F(HttpHealthCheckerImplTest, ServiceDoesNotMatchFail) { TEST_F(HttpHealthCheckerImplTest, ServicePatternDoesNotMatchFail) { setupServiceRegexPatternValidationHC(); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); EXPECT_CALL(runtime_.snapshot_, featureEnabled("health_check.verify_cluster", 100)) .WillOnce(Return(true)); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)).Times(1); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; @@ -1560,12 +1485,12 @@ TEST_F(HttpHealthCheckerImplTest, ServicePatternDoesNotMatchFail) { TEST_F(HttpHealthCheckerImplTest, ServiceNotPresentInResponseFail) { setupServiceValidationHC(); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); EXPECT_CALL(runtime_.snapshot_, featureEnabled("health_check.verify_cluster", 100)) .WillOnce(Return(true)); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)).Times(1); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; @@ -1678,7 +1603,7 @@ TEST_F(HttpHealthCheckerImplTest, SuccessStartFailedSuccessFirst) { // Test fast success immediately moves us to healthy. EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)).Times(1); - EXPECT_CALL(*event_logger_, logAddHealthy(_, _, true)); + EXPECT_CALL(event_logger_, logAddHealthy(_, _, true)); EXPECT_CALL(runtime_.snapshot_, getInteger("health_check.max_interval", _)).WillOnce(Return(500)); EXPECT_CALL(runtime_.snapshot_, getInteger("health_check.min_interval", _)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(500), _)); @@ -1712,10 +1637,10 @@ TEST_F(HttpHealthCheckerImplTest, HttpFailRemoveHostInCallbackNoClose) { cluster_->prioritySet().getMockHostSet(0)->hosts_ = {}; cluster_->prioritySet().runUpdateCallbacks(0, {}, {host}); })); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)).Times(0); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()).Times(0); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); respond(0, "503", false); } @@ -1734,10 +1659,10 @@ TEST_F(HttpHealthCheckerImplTest, HttpFailRemoveHostInCallbackClose) { cluster_->prioritySet().getMockHostSet(0)->hosts_ = {}; cluster_->prioritySet().runUpdateCallbacks(0, {}, {host}); })); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)).Times(0); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()).Times(0); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); respond(0, "503", true); } @@ -1751,10 +1676,10 @@ TEST_F(HttpHealthCheckerImplTest, HttpFail) { health_checker_->start(); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); respond(0, "503", false); EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet( Host::HealthFlag::FAILED_ACTIVE_HC)); @@ -1781,7 +1706,7 @@ TEST_F(HttpHealthCheckerImplTest, HttpFail) { test_sessions_[0]->interval_timer_->invokeCallback(); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logAddHealthy(_, _, false)); + EXPECT_CALL(event_logger_, logAddHealthy(_, _, false)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); respond(0, "200", false); @@ -1798,10 +1723,10 @@ TEST_F(HttpHealthCheckerImplTest, HttpFailLogError) { health_checker_->start(); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); respond(0, "503", false); EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet( Host::HealthFlag::FAILED_ACTIVE_HC)); @@ -1818,7 +1743,7 @@ TEST_F(HttpHealthCheckerImplTest, HttpFailLogError) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, false)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, false)); respond(0, "503", false); EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet( Host::HealthFlag::FAILED_ACTIVE_HC)); @@ -1845,7 +1770,7 @@ TEST_F(HttpHealthCheckerImplTest, HttpFailLogError) { test_sessions_[0]->interval_timer_->invokeCallback(); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logAddHealthy(_, _, false)); + EXPECT_CALL(event_logger_, logAddHealthy(_, _, false)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); respond(0, "200", false); @@ -1854,7 +1779,7 @@ TEST_F(HttpHealthCheckerImplTest, HttpFailLogError) { TEST_F(HttpHealthCheckerImplTest, Disconnect) { setupNoServiceValidationHC(); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending)).Times(1); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { @@ -1876,7 +1801,7 @@ TEST_F(HttpHealthCheckerImplTest, Disconnect) { EXPECT_CALL(*this, onHostStatus(cluster_->prioritySet().getMockHostSet(0)->hosts_[0], HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); test_sessions_[0]->client_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); @@ -1899,8 +1824,8 @@ TEST_F(HttpHealthCheckerImplTest, Timeout) { EXPECT_CALL(*test_sessions_[0]->client_connection_, close(_)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); test_sessions_[0]->timeout_timer_->invokeCallback(); EXPECT_EQ(Host::Health::Unhealthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health()); @@ -1926,7 +1851,7 @@ TEST_F(HttpHealthCheckerImplTest, TimeoutThenSuccess) { test_sessions_[0]->stream_response_callbacks_->decodeHeaders(std::move(response_headers), false); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending)); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); EXPECT_CALL(*test_sessions_[0]->client_connection_, close(_)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); @@ -1947,7 +1872,7 @@ TEST_F(HttpHealthCheckerImplTest, TimeoutThenSuccess) { TEST_F(HttpHealthCheckerImplTest, TimeoutThenRemoteClose) { setupNoServiceValidationHC(); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; expectSessionCreate(); @@ -1968,7 +1893,7 @@ TEST_F(HttpHealthCheckerImplTest, TimeoutThenRemoteClose) { test_sessions_[0]->interval_timer_->invokeCallback(); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); test_sessions_[0]->client_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); @@ -1987,7 +1912,7 @@ TEST_F(HttpHealthCheckerImplTest, TimeoutAfterDisconnect) { makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; expectSessionCreate(); expectStreamCreate(0); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)).Times(2); health_checker_->start(); @@ -1999,7 +1924,7 @@ TEST_F(HttpHealthCheckerImplTest, TimeoutAfterDisconnect) { } EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)).Times(1); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); test_sessions_[0]->timeout_timer_->enableTimer(std::chrono::seconds(10), nullptr); @@ -2070,6 +1995,56 @@ TEST_F(HttpHealthCheckerImplTest, ProxyConnectionClose) { test_sessions_[0]->interval_timer_->invokeCallback(); } +TEST_F(HttpHealthCheckerImplTest, ConnectionCloseLegacy) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.fixed_connection_close", "false"}}); + setupNoServiceValidationHC(); + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)); + + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + expectSessionCreate(); + expectStreamCreate(0); + EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); + health_checker_->start(); + + EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); + EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); + respond(0, "200", true); + EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health()); + + expectClientCreate(0); + expectStreamCreate(0); + EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); + test_sessions_[0]->interval_timer_->invokeCallback(); +} + +TEST_F(HttpHealthCheckerImplTest, ProxyConnectionCloseLegacy) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.fixed_connection_close", "false"}}); + setupNoServiceValidationHC(); + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)); + + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + expectSessionCreate(); + expectStreamCreate(0); + EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); + health_checker_->start(); + + EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); + EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); + respond(0, "200", false, true); + EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health()); + + expectClientCreate(0); + expectStreamCreate(0); + EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); + test_sessions_[0]->interval_timer_->invokeCallback(); +} + TEST_F(HttpHealthCheckerImplTest, HealthCheckIntervals) { setupHealthCheckIntervalOverridesHC(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { @@ -2117,7 +2092,7 @@ TEST_F(HttpHealthCheckerImplTest, HealthCheckIntervals) { // ignored and health state changes immediately. Since the threshold is ignored, next health // check respects "unhealthy_interval". EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(2000), _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); respond(0, "503", false); @@ -2174,7 +2149,7 @@ TEST_F(HttpHealthCheckerImplTest, HealthCheckIntervals) { // After the healthy threshold is reached, health state should change while checks should respect // the default interval. EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logAddHealthy(_, _, false)); + EXPECT_CALL(event_logger_, logAddHealthy(_, _, false)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(1000), _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); respond(0, "200", false); @@ -2225,7 +2200,7 @@ TEST_F(HttpHealthCheckerImplTest, HealthCheckIntervals) { // Subsequent failing checks should respect unhealthy_interval. As the unhealthy threshold is // reached, health state should also change. EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(2000), _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); test_sessions_[0]->timeout_timer_->invokeCallback(); @@ -2274,7 +2249,7 @@ TEST_F(HttpHealthCheckerImplTest, HealthCheckIntervals) { // After the healthy threshold is reached, health state should change while checks should respect // the default interval. EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logAddHealthy(_, _, false)); + EXPECT_CALL(event_logger_, logAddHealthy(_, _, false)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(1000), _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); respond(0, "200", false); @@ -2411,8 +2386,8 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceCheckWithAltPort) { EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) { - EXPECT_EQ(headers.Host()->value().getStringView(), host); - EXPECT_EQ(headers.Path()->value().getStringView(), path); + EXPECT_EQ(headers.getHostValue(), host); + EXPECT_EQ(headers.getPathValue(), path); })); health_checker_->start(); @@ -2467,6 +2442,118 @@ TEST_F(HttpHealthCheckerImplTest, Http2ClusterUseHttp2CodecClient) { EXPECT_EQ(Http::CodecClient::Type::HTTP2, health_checker_->codecClientType()); } +MATCHER_P(MetadataEq, expected, "") { + const envoy::config::core::v3::Metadata* metadata = arg; + if (!metadata) { + return false; + } + EXPECT_TRUE(Envoy::Protobuf::util::MessageDifferencer::Equals(*metadata, expected)); + return true; +} + +TEST_F(HttpHealthCheckerImplTest, TransportSocketMatchCriteria) { + const std::string host = "fake_cluster"; + const std::string path = "/healthcheck"; + const std::string yaml = R"EOF( + timeout: 1s + interval: 1s + no_traffic_interval: 1s + interval_jitter_percent: 40 + unhealthy_threshold: 2 + healthy_threshold: 2 + http_health_check: + service_name_matcher: + prefix: locations + path: /healthcheck + transport_socket_match_criteria: + key: value + )EOF"; + + auto default_socket_factory = std::make_unique(); + // We expect that this default_socket_factory will NOT be used to create a transport socket for + // the health check connection. + EXPECT_CALL(*default_socket_factory, createTransportSocket(_)).Times(0); + EXPECT_CALL(*default_socket_factory, implementsSecureTransport()); + auto transport_socket_match = + std::make_unique(std::move(default_socket_factory)); + + auto metadata = TestUtility::parseYaml( + R"EOF( + filter_metadata: + envoy.transport_socket_match: + key: value + )EOF"); + + Stats::IsolatedStoreImpl stats_store; + auto health_transport_socket_stats = TransportSocketMatchStats{ + ALL_TRANSPORT_SOCKET_MATCH_STATS(POOL_COUNTER_PREFIX(stats_store, "test"))}; + auto health_check_only_socket_factory = std::make_unique(); + + // We expect resolve() to be called twice, once for endpoint socket matching (with no metadata in + // this test) and once for health check socket matching. In the latter we expect metadata that + // matches the above object. + EXPECT_CALL(*transport_socket_match, resolve(nullptr)); + EXPECT_CALL(*transport_socket_match, resolve(MetadataEq(metadata))) + .WillOnce(Return(TransportSocketMatcher::MatchData( + *health_check_only_socket_factory, health_transport_socket_stats, "health_check_only"))); + // The health_check_only_socket_factory should be used to create a transport socket for the health + // check connection. + EXPECT_CALL(*health_check_only_socket_factory, createTransportSocket(_)); + + cluster_->info_->transport_socket_matcher_ = std::move(transport_socket_match); + + allocHealthChecker(yaml); + + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + cluster_->info_->stats().upstream_cx_total_.inc(); + expectSessionCreate(); + expectStreamCreate(0); + EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); + health_checker_->start(); + EXPECT_EQ(health_transport_socket_stats.total_match_count_.value(), 1); +} + +TEST_F(HttpHealthCheckerImplTest, NoTransportSocketMatchCriteria) { + const std::string host = "fake_cluster"; + const std::string path = "/healthcheck"; + const std::string yaml = R"EOF( + timeout: 1s + interval: 1s + no_traffic_interval: 1s + interval_jitter_percent: 40 + unhealthy_threshold: 2 + healthy_threshold: 2 + http_health_check: + service_name_matcher: + prefix: locations + path: /healthcheck + )EOF"; + + auto default_socket_factory = std::make_unique(); + // The default_socket_factory should be used to create a transport socket for the health check + // connection. + EXPECT_CALL(*default_socket_factory, createTransportSocket(_)); + EXPECT_CALL(*default_socket_factory, implementsSecureTransport()); + auto transport_socket_match = + std::make_unique(std::move(default_socket_factory)); + // We expect resolve() to be called exactly once for endpoint socket matching. We should not + // attempt to match again for health checks since there is not match criteria in the config. + EXPECT_CALL(*transport_socket_match, resolve(nullptr)); + + cluster_->info_->transport_socket_matcher_ = std::move(transport_socket_match); + + allocHealthChecker(yaml); + + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + cluster_->info_->stats().upstream_cx_total_.inc(); + expectSessionCreate(); + expectStreamCreate(0); + EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); + health_checker_->start(); +} + class TestProdHttpHealthChecker : public ProdHttpHealthCheckerImpl { public: using ProdHttpHealthCheckerImpl::ProdHttpHealthCheckerImpl; @@ -2480,8 +2567,21 @@ class TestProdHttpHealthChecker : public ProdHttpHealthCheckerImpl { } }; -class ProdHttpHealthCheckerTest : public HttpHealthCheckerImplTest { +class ProdHttpHealthCheckerTest : public testing::Test, public HealthCheckerTestBase { public: + void allocHealthChecker(const std::string& yaml, bool avoid_boosting = true) { + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV3Yaml(yaml, avoid_boosting), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_storage_.release())); + } + + void addCompletionCallback() { + health_checker_->addHostCheckCompleteCb( + [this](HostSharedPtr host, HealthTransition changed_state) -> void { + onHostStatus(host, changed_state); + }); + } + void setupNoServiceValidationHCWithHttp2() { const std::string yaml = R"EOF( timeout: 1s @@ -2497,13 +2597,8 @@ class ProdHttpHealthCheckerTest : public HttpHealthCheckerImplTest { codec_client_type: Http2 )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupNoServiceValidationHC() { @@ -2520,15 +2615,11 @@ class ProdHttpHealthCheckerTest : public HttpHealthCheckerImplTest { path: /healthcheck )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } + MOCK_METHOD(void, onHostStatus, (HostSharedPtr host, HealthTransition changed_state)); std::unique_ptr connection_ = std::make_unique>(); std::shared_ptr health_checker_; @@ -2555,13 +2646,8 @@ TEST_F(HttpHealthCheckerImplTest, DEPRECATED_FEATURE_TEST(Http1CodecClient)) { use_http2: false )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml, false); + addCompletionCallback(); EXPECT_EQ(Http::CodecClient::Type::HTTP1, health_checker_->codecClientType()); } @@ -2580,13 +2666,8 @@ TEST_F(HttpHealthCheckerImplTest, DEPRECATED_FEATURE_TEST(Http2CodecClient)) { use_http2: true )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml, false); + addCompletionCallback(); EXPECT_EQ(Http::CodecClient::Type::HTTP2, health_checker_->codecClientType()); } @@ -2607,10 +2688,9 @@ TEST_F(HttpHealthCheckerImplTest, DEPRECATED_FEATURE_TEST(ServiceNameMatch)) { EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) { - EXPECT_EQ(headers.Host()->value().getStringView(), host); - EXPECT_EQ(headers.Path()->value().getStringView(), path); - EXPECT_EQ(headers.Scheme()->value().getStringView(), - Http::Headers::get().SchemeValues.Http); + EXPECT_EQ(headers.getHostValue(), host); + EXPECT_EQ(headers.getPathValue(), path); + EXPECT_EQ(headers.getSchemeValue(), Http::Headers::get().SchemeValues.Http); })); health_checker_->start(); @@ -2627,12 +2707,12 @@ TEST_F(HttpHealthCheckerImplTest, DEPRECATED_FEATURE_TEST(ServiceNameMatch)) { TEST_F(HttpHealthCheckerImplTest, DEPRECATED_FEATURE_TEST(ServiceNameMismatch)) { setupDeprecatedServiceNameValidationHC("locations"); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); EXPECT_CALL(runtime_.snapshot_, featureEnabled("health_check.verify_cluster", 100)) .WillOnce(Return(true)); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)).Times(1); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; @@ -2675,7 +2755,7 @@ TEST(HttpStatusChecker, Default) { )EOF"; HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV2Yaml(yaml).http_health_check().expected_statuses(), 200); + parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200); EXPECT_TRUE(http_status_checker.inRange(200)); EXPECT_FALSE(http_status_checker.inRange(204)); @@ -2697,7 +2777,7 @@ TEST(HttpStatusChecker, Single100) { )EOF"; HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV2Yaml(yaml).http_health_check().expected_statuses(), 200); + parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200); EXPECT_FALSE(http_status_checker.inRange(200)); @@ -2722,7 +2802,7 @@ TEST(HttpStatusChecker, Single599) { )EOF"; HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV2Yaml(yaml).http_health_check().expected_statuses(), 200); + parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200); EXPECT_FALSE(http_status_checker.inRange(200)); @@ -2749,7 +2829,7 @@ TEST(HttpStatusChecker, Ranges_204_304) { )EOF"; HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV2Yaml(yaml).http_health_check().expected_statuses(), 200); + parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200); EXPECT_FALSE(http_status_checker.inRange(200)); @@ -2778,7 +2858,7 @@ TEST(HttpStatusChecker, Below100) { EXPECT_THROW_WITH_MESSAGE( HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV2Yaml(yaml).http_health_check().expected_statuses(), 200), + parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200), EnvoyException, "Invalid http status range: expecting start >= 100, but found start=99"); } @@ -2799,7 +2879,7 @@ TEST(HttpStatusChecker, Above599) { EXPECT_THROW_WITH_MESSAGE( HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV2Yaml(yaml).http_health_check().expected_statuses(), 200), + parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200), EnvoyException, "Invalid http status range: expecting end <= 600, but found end=601"); } @@ -2820,7 +2900,7 @@ TEST(HttpStatusChecker, InvalidRange) { EXPECT_THROW_WITH_MESSAGE( HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV2Yaml(yaml).http_health_check().expected_statuses(), 200), + parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200), EnvoyException, "Invalid http status range: expecting start < end, but found start=200 and end=200"); } @@ -2842,7 +2922,7 @@ TEST(HttpStatusChecker, InvalidRange2) { EXPECT_THROW_WITH_MESSAGE( HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV2Yaml(yaml).http_health_check().expected_statuses(), 200), + parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200), EnvoyException, "Invalid http status range: expecting start < end, but found start=201 and end=200"); } @@ -2906,11 +2986,13 @@ TEST(TcpHealthCheckMatcher, match) { EXPECT_TRUE(TcpHealthCheckMatcher::match(segments, buffer)); } -class TcpHealthCheckerImplTest : public testing::Test { +class TcpHealthCheckerImplTest : public testing::Test, public HealthCheckerTestBase { public: - TcpHealthCheckerImplTest() - : cluster_(new NiceMock()), - event_logger_(new MockHealthCheckEventLogger()) {} + void allocHealthChecker(const std::string& yaml, bool avoid_boosting = true) { + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV3Yaml(yaml, avoid_boosting), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_storage_.release())); + } void setupData(unsigned int unhealthy_threshold = 2) { std::ostringstream yaml; @@ -2927,9 +3009,7 @@ class TcpHealthCheckerImplTest : public testing::Test { - text: "02" )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml.str()), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); + allocHealthChecker(yaml.str()); } void setupNoData() { @@ -2941,9 +3021,7 @@ class TcpHealthCheckerImplTest : public testing::Test { tcp_health_check: {} )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); + allocHealthChecker(yaml); } void setupDataDontReuseConnection() { @@ -2960,9 +3038,7 @@ class TcpHealthCheckerImplTest : public testing::Test { - text: "02" )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); + allocHealthChecker(yaml); } void expectSessionCreate() { @@ -2976,16 +3052,11 @@ class TcpHealthCheckerImplTest : public testing::Test { EXPECT_CALL(*connection_, addReadFilter(_)).WillOnce(SaveArg<0>(&read_filter_)); } - std::shared_ptr cluster_; - NiceMock dispatcher_; std::shared_ptr health_checker_; - MockHealthCheckEventLogger* event_logger_{}; Network::MockClientConnection* connection_{}; Event::MockTimer* timeout_timer_{}; Event::MockTimer* interval_timer_{}; Network::ReadFilterSharedPtr read_filter_; - NiceMock runtime_; - NiceMock random_; }; TEST_F(TcpHealthCheckerImplTest, Success) { @@ -3091,7 +3162,7 @@ TEST_F(TcpHealthCheckerImplTest, TimeoutThenRemoteClose) { read_filter_->onData(response, false); EXPECT_CALL(*connection_, close(_)); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); EXPECT_CALL(*timeout_timer_, disableTimer()); EXPECT_CALL(*interval_timer_, enableTimer(_, _)); timeout_timer_->invokeCallback(); @@ -3106,7 +3177,7 @@ TEST_F(TcpHealthCheckerImplTest, TimeoutThenRemoteClose) { connection_->raiseEvent(Network::ConnectionEvent::Connected); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*timeout_timer_, disableTimer()); EXPECT_CALL(*interval_timer_, enableTimer(_, _)); connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); @@ -3151,8 +3222,8 @@ TEST_F(TcpHealthCheckerImplTest, Timeout) { read_filter_->onData(response, false); EXPECT_CALL(*connection_, close(_)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); EXPECT_CALL(*timeout_timer_, disableTimer()); EXPECT_CALL(*interval_timer_, enableTimer(_, _)); timeout_timer_->invokeCallback(); @@ -3185,7 +3256,7 @@ TEST_F(TcpHealthCheckerImplTest, DoubleTimeout) { read_filter_->onData(response, false); EXPECT_CALL(*connection_, close(_)); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); EXPECT_CALL(*timeout_timer_, disableTimer()); EXPECT_CALL(*interval_timer_, enableTimer(_, _)); timeout_timer_->invokeCallback(); @@ -3201,7 +3272,7 @@ TEST_F(TcpHealthCheckerImplTest, DoubleTimeout) { connection_->raiseEvent(Network::ConnectionEvent::Connected); EXPECT_CALL(*connection_, close(_)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*timeout_timer_, disableTimer()); EXPECT_CALL(*interval_timer_, enableTimer(_, _)); timeout_timer_->invokeCallback(); @@ -3280,7 +3351,7 @@ TEST_F(TcpHealthCheckerImplTest, TimeoutWithoutReusingConnection) { connection_->raiseEvent(Network::ConnectionEvent::Connected); // Expected flow when a healthcheck times out. - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*timeout_timer_, disableTimer()); EXPECT_CALL(*interval_timer_, enableTimer(_, _)); connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); @@ -3327,8 +3398,8 @@ TEST_F(TcpHealthCheckerImplTest, PassiveFailure) { expectClientCreate(); EXPECT_CALL(*connection_, write(_, _)).Times(0); EXPECT_CALL(*timeout_timer_, enableTimer(_, _)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); health_checker_->start(); // Do multiple passive failures. This will not reset the active HC timers. @@ -3425,7 +3496,7 @@ TEST_F(TcpHealthCheckerImplTest, ConnectionLocalFailure) { health_checker_->start(); // Expect the LocalClose to be handled as a health check failure - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); EXPECT_CALL(*timeout_timer_, disableTimer()); EXPECT_CALL(*interval_timer_, enableTimer(_, _)); @@ -3452,7 +3523,7 @@ class TestGrpcHealthCheckerImpl : public GrpcHealthCheckerImpl { MOCK_METHOD(Http::CodecClient*, createCodecClient_, (Upstream::Host::CreateConnectionData&)); }; -class GrpcHealthCheckerImplTestBase { +class GrpcHealthCheckerImplTestBase : public HealthCheckerTestBase { public: struct TestSession { TestSession() = default; @@ -3523,34 +3594,35 @@ class GrpcHealthCheckerImplTestBase { std::vector> trailers; }; - GrpcHealthCheckerImplTestBase() - : cluster_(new NiceMock()), - event_logger_(new MockHealthCheckEventLogger()) { + GrpcHealthCheckerImplTestBase() { EXPECT_CALL(*cluster_->info_, features()) .WillRepeatedly(Return(Upstream::ClusterInfo::Features::HTTP2)); } - void setupHC() { - const auto config = createGrpcHealthCheckConfig(); + void allocHealthChecker(const envoy::config::core::v3::HealthCheck& config) { health_checker_ = std::make_shared( *cluster_, config, dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); + HealthCheckEventLoggerPtr(event_logger_storage_.release())); + } + + void addCompletionCallback() { health_checker_->addHostCheckCompleteCb( [this](HostSharedPtr host, HealthTransition changed_state) -> void { onHostStatus(host, changed_state); }); } + void setupHC() { + const auto config = createGrpcHealthCheckConfig(); + allocHealthChecker(config); + addCompletionCallback(); + } + void setupHCWithUnhealthyThreshold(int value) { auto config = createGrpcHealthCheckConfig(); config.mutable_unhealthy_threshold()->set_value(value); - health_checker_ = std::make_shared( - *cluster_, config, dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(config); + addCompletionCallback(); } void setupServiceNameHC(const absl::optional& authority) { @@ -3559,25 +3631,15 @@ class GrpcHealthCheckerImplTestBase { if (authority.has_value()) { config.mutable_grpc_health_check()->set_authority(authority.value()); } - health_checker_ = std::make_shared( - *cluster_, config, dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(config); + addCompletionCallback(); } void setupNoReuseConnectionHC() { auto config = createGrpcHealthCheckConfig(); config.mutable_reuse_connection()->set_value(false); - health_checker_ = std::make_shared( - *cluster_, config, dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(config); + addCompletionCallback(); } void setupHealthCheckIntervalOverridesHC() { @@ -3590,13 +3652,8 @@ class GrpcHealthCheckerImplTestBase { config.mutable_interval_jitter()->set_seconds(0); config.mutable_unhealthy_threshold()->set_value(3); config.mutable_healthy_threshold()->set_value(3); - health_checker_ = std::make_shared( - *cluster_, config, dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(config); + addCompletionCallback(); } void expectSessionCreate() { @@ -3764,14 +3821,11 @@ class GrpcHealthCheckerImplTestBase { EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) { - EXPECT_EQ(Http::Headers::get().ContentTypeValues.Grpc, - headers.ContentType()->value().getStringView()); - EXPECT_EQ(std::string("/grpc.health.v1.Health/Check"), - headers.Path()->value().getStringView()); - EXPECT_EQ(Http::Headers::get().SchemeValues.Http, - headers.Scheme()->value().getStringView()); + EXPECT_EQ(Http::Headers::get().ContentTypeValues.Grpc, headers.getContentTypeValue()); + EXPECT_EQ(std::string("/grpc.health.v1.Health/Check"), headers.getPathValue()); + EXPECT_EQ(Http::Headers::get().SchemeValues.Http, headers.getSchemeValue()); EXPECT_NE(nullptr, headers.Method()); - EXPECT_EQ(expected_host, headers.Host()->value().getStringView()); + EXPECT_EQ(expected_host, headers.getHostValue()); EXPECT_EQ(std::chrono::milliseconds(1000).count(), Envoy::Grpc::Common::getGrpcTimeout(headers).count()); })); @@ -3803,13 +3857,8 @@ class GrpcHealthCheckerImplTestBase { MOCK_METHOD(void, onHostStatus, (HostSharedPtr host, HealthTransition changed_state)); - std::shared_ptr cluster_; - NiceMock dispatcher_; std::vector test_sessions_; std::shared_ptr health_checker_; - NiceMock runtime_; - NiceMock random_; - MockHealthCheckEventLogger* event_logger_{}; std::list connection_index_{}; std::list codec_index_{}; }; @@ -3980,7 +4029,7 @@ TEST_F(GrpcHealthCheckerImplTest, SuccessStartFailedSuccessFirst) { expectHealthcheckStop(0, 500); // Fast success immediately moves us to healthy. EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logAddHealthy(_, _, true)); + EXPECT_CALL(event_logger_, logAddHealthy(_, _, true)); respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING); expectHostHealthy(true); EXPECT_FALSE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet( @@ -4006,7 +4055,7 @@ TEST_F(GrpcHealthCheckerImplTest, SuccessStartFailedFailFirst) { // Host was unhealthy from the start, but we expect a state change due to the pending active hc // flag changing. EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::NOT_SERVING); expectHostHealthy(false); EXPECT_FALSE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet( @@ -4029,7 +4078,7 @@ TEST_F(GrpcHealthCheckerImplTest, SuccessStartFailedFailFirst) { expectHealthcheckStop(0); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logAddHealthy(_, _, false)); + EXPECT_CALL(event_logger_, logAddHealthy(_, _, false)); respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING); expectHostHealthy(true); } @@ -4042,13 +4091,13 @@ TEST_F(GrpcHealthCheckerImplTest, GrpcHealthFail) { expectSessionCreate(); expectHealthcheckStart(0); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); health_checker_->start(); // Explicit healthcheck failure immediately renders host unhealthy. expectHealthcheckStop(0); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::NOT_SERVING); expectHostHealthy(false); @@ -4068,7 +4117,7 @@ TEST_F(GrpcHealthCheckerImplTest, GrpcHealthFail) { expectHealthcheckStop(0); // Host should has become healthy. EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logAddHealthy(_, _, false)); + EXPECT_CALL(event_logger_, logAddHealthy(_, _, false)); respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING); expectHostHealthy(true); } @@ -4081,7 +4130,7 @@ TEST_F(GrpcHealthCheckerImplTest, Disconnect) { expectSessionCreate(); expectHealthcheckStart(0); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); health_checker_->start(); expectHealthcheckStop(0); @@ -4097,7 +4146,7 @@ TEST_F(GrpcHealthCheckerImplTest, Disconnect) { expectHealthcheckStop(0); // Now, host should be unhealthy. EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); test_sessions_[0]->client_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); expectHostHealthy(false); } @@ -4109,13 +4158,13 @@ TEST_F(GrpcHealthCheckerImplTest, Timeout) { expectSessionCreate(); expectHealthcheckStart(0); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); health_checker_->start(); expectHealthcheckStop(0); // Unhealthy threshold is 1 so first timeout causes unhealthy EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); test_sessions_[0]->timeout_timer_->invokeCallback(); expectHostHealthy(false); } @@ -4128,7 +4177,7 @@ TEST_F(GrpcHealthCheckerImplTest, DoubleTimeout) { expectSessionCreate(); expectHealthcheckStart(0); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); health_checker_->start(); expectHealthcheckStop(0); @@ -4142,7 +4191,7 @@ TEST_F(GrpcHealthCheckerImplTest, DoubleTimeout) { expectHealthcheckStop(0); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); // Close connection. Timeouts and connection closes counts together. test_sessions_[0]->client_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); expectHostHealthy(false); @@ -4214,7 +4263,7 @@ TEST_F(GrpcHealthCheckerImplTest, HealthCheckIntervals) { // ignored and health state changes immediately. Since the threshold is ignored, next health // check respects "unhealthy_interval". EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(2000), _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::NOT_SERVING); @@ -4271,7 +4320,7 @@ TEST_F(GrpcHealthCheckerImplTest, HealthCheckIntervals) { // After the healthy threshold is reached, health state should change while checks should respect // the default interval. EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logAddHealthy(_, _, false)); + EXPECT_CALL(event_logger_, logAddHealthy(_, _, false)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(1000), _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING); @@ -4318,7 +4367,7 @@ TEST_F(GrpcHealthCheckerImplTest, HealthCheckIntervals) { // Subsequent failing checks should respect unhealthy_interval. As the unhealthy threshold is // reached, health state should also change. EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(2000), _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); test_sessions_[0]->timeout_timer_->invokeCallback(); @@ -4363,7 +4412,7 @@ TEST_F(GrpcHealthCheckerImplTest, HealthCheckIntervals) { // After the healthy threshold is reached, health state should change while checks should respect // the default interval. EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logAddHealthy(_, _, false)); + EXPECT_CALL(event_logger_, logAddHealthy(_, _, false)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(1000), _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING); @@ -4438,12 +4487,72 @@ TEST_F(GrpcHealthCheckerImplTest, DontReuseConnectionBetweenChecks) { expectHostHealthy(true); } +// Test that we close connections when a timeout occurs and reuse_connection is false. +TEST_F(GrpcHealthCheckerImplTest, DontReuseConnectionTimeout) { + setupNoReuseConnectionHC(); + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + + expectSessionCreate(); + expectHealthcheckStart(0); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); + health_checker_->start(); + + expectHealthcheckStop(0); + // Timeouts are considered network failures and make host unhealthy also after 2nd event. + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending)); + test_sessions_[0]->timeout_timer_->invokeCallback(); + expectHostHealthy(true); + + // A new client is created because we close the connection + // when a timeout occurs and connection reuse is disabled. + expectClientCreate(0); + expectHealthcheckStart(0); + test_sessions_[0]->interval_timer_->invokeCallback(); + + expectHealthcheckStop(0); + // Test host state haven't changed. + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)); + respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING); + expectHostHealthy(true); +} + +// Test that we close connections when a stream reset occurs and reuse_connection is false. +TEST_F(GrpcHealthCheckerImplTest, DontReuseConnectionStreamReset) { + setupNoReuseConnectionHC(); + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + + expectSessionCreate(); + expectHealthcheckStart(0); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); + health_checker_->start(); + + expectHealthcheckStop(0); + // Resets are considered network failures and make host unhealthy also after 2nd event. + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending)); + test_sessions_[0]->request_encoder_.stream_.resetStream(Http::StreamResetReason::RemoteReset); + expectHostHealthy(true); + + // A new client is created because we close the connection + // when a stream reset occurs and connection reuse is disabled. + expectClientCreate(0); + expectHealthcheckStart(0); + test_sessions_[0]->interval_timer_->invokeCallback(); + + expectHealthcheckStop(0); + // Test host state haven't changed. + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)); + respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING); + expectHostHealthy(true); +} + // Test UNKNOWN health status is considered unhealthy. TEST_F(GrpcHealthCheckerImplTest, GrpcFailUnknown) { setupHC(); expectSingleHealthcheck(HealthTransition::Changed); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::UNKNOWN); EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet( @@ -4456,8 +4565,8 @@ TEST_F(GrpcHealthCheckerImplTest, GrpcFailUnknown) { TEST_F(GrpcHealthCheckerImplTest, GrpcFailServiceUnknown) { setupHC(); expectSingleHealthcheck(HealthTransition::Changed); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVICE_UNKNOWN); EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet( @@ -4470,8 +4579,8 @@ TEST_F(GrpcHealthCheckerImplTest, GrpcFailServiceUnknown) { TEST_F(GrpcHealthCheckerImplTest, GrpcFailUnknownHealthStatus) { setupHC(); expectSingleHealthcheck(HealthTransition::Changed); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); respondServiceStatus(0, static_cast(999)); EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet( @@ -4480,16 +4589,18 @@ TEST_F(GrpcHealthCheckerImplTest, GrpcFailUnknownHealthStatus) { cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health()); } -// Test receiving GOAWAY is interpreted as connection close event. -TEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgress) { +// Test receiving GOAWAY (error) is interpreted as connection close event. +TEST_F(GrpcHealthCheckerImplTest, GoAwayErrorProbeInProgress) { // FailureType::Network will be issued, it will render host unhealthy only if unhealthy_threshold // is reached. setupHCWithUnhealthyThreshold(1); expectSingleHealthcheck(HealthTransition::Changed); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); - test_sessions_[0]->codec_client_->raiseGoAway(); + // GOAWAY with non-NO_ERROR code will result in a healthcheck failure + // and the connection closing. + test_sessions_[0]->codec_client_->raiseGoAway(Http::GoAwayErrorCode::Other); EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet( Host::HealthFlag::FAILED_ACTIVE_HC)); @@ -4497,6 +4608,178 @@ TEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgress) { cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health()); } +// Test receiving GOAWAY (no error) is handled gracefully while a check is in progress. +TEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgress) { + setupHCWithUnhealthyThreshold(/*threshold=*/1); + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + + expectSessionCreate(); + expectHealthcheckStart(0); + health_checker_->start(); + + expectHealthcheckStop(0); + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)); + + // GOAWAY with NO_ERROR code during check should be handle gracefully. + test_sessions_[0]->codec_client_->raiseGoAway(Http::GoAwayErrorCode::NoError); + respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING); + expectHostHealthy(true); + + // GOAWAY should cause a new connection to be created. + expectClientCreate(0); + expectHealthcheckStart(0); + test_sessions_[0]->interval_timer_->invokeCallback(); + + expectHealthcheckStop(0); + // Test host state haven't changed. + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)); + respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING); + expectHostHealthy(true); +} + +// Test receiving GOAWAY (no error) closes connection after an in progress probe times outs. +TEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgressTimeout) { + setupHCWithUnhealthyThreshold(/*threshold=*/1); + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + + expectSessionCreate(); + expectHealthcheckStart(0); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); + health_checker_->start(); + + expectHealthcheckStop(0); + // Unhealthy threshold is 1 so first timeout causes unhealthy + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); + + // GOAWAY during check should be handled gracefully. + test_sessions_[0]->codec_client_->raiseGoAway(Http::GoAwayErrorCode::NoError); + expectHostHealthy(true); + + test_sessions_[0]->timeout_timer_->invokeCallback(); + expectHostHealthy(false); + + // GOAWAY should cause a new connection to be created. + expectClientCreate(0); + expectHealthcheckStart(0); + test_sessions_[0]->interval_timer_->invokeCallback(); + + expectHealthcheckStop(0); + // Healthy threshold is 2, so the we'ere pending a state change. + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending)); + respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING); + expectHostHealthy(false); +} + +// Test receiving GOAWAY (no error) closes connection after an unexpected stream reset. +TEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgressStreamReset) { + setupHCWithUnhealthyThreshold(/*threshold=*/1); + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + + expectSessionCreate(); + expectHealthcheckStart(0); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); + health_checker_->start(); + + expectHealthcheckStop(0); + // Unhealthy threshold is 1 so first stream reset causes unhealthy + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); + + // GOAWAY during check should be handled gracefully. + test_sessions_[0]->codec_client_->raiseGoAway(Http::GoAwayErrorCode::NoError); + expectHostHealthy(true); + + test_sessions_[0]->request_encoder_.stream_.resetStream(Http::StreamResetReason::RemoteReset); + expectHostHealthy(false); + + // GOAWAY should cause a new connection to be created. + expectClientCreate(0); + expectHealthcheckStart(0); + test_sessions_[0]->interval_timer_->invokeCallback(); + + expectHealthcheckStop(0); + // Healthy threshold is 2, so the we'ere pending a state change. + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending)); + respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING); + expectHostHealthy(false); +} + +// Test receiving GOAWAY (no error) closes connection after a bad response. +TEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgressBadResponse) { + setupHCWithUnhealthyThreshold(/*threshold=*/1); + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + + expectSessionCreate(); + expectHealthcheckStart(0); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); + health_checker_->start(); + + expectHealthcheckStop(0); + // Unhealthy threshold is 1 so first bad response causes unhealthy + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); + + // GOAWAY during check should be handled gracefully. + test_sessions_[0]->codec_client_->raiseGoAway(Http::GoAwayErrorCode::NoError); + expectHostHealthy(true); + + respondResponseSpec(0, ResponseSpec{{{":status", "200"}, {"content-type", "application/grpc"}}, + {ResponseSpec::invalidChunk()}, + {}}); + expectHostHealthy(false); + + // GOAWAY should cause a new connection to be created. + expectClientCreate(0); + expectHealthcheckStart(0); + test_sessions_[0]->interval_timer_->invokeCallback(); + + expectHealthcheckStop(0); + // Healthy threshold is 2, so the we'ere pending a state change. + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending)); + respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING); + expectHostHealthy(false); +} + +// Test receiving GOAWAY (no error) and a connection close. +TEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgressConnectionClose) { + setupHCWithUnhealthyThreshold(/*threshold=*/1); + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + + expectSessionCreate(); + expectHealthcheckStart(0); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); + health_checker_->start(); + + expectHealthcheckStop(0); + // Unhealthy threshold is 1 so first bad response causes unhealthy + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); + + // GOAWAY during check should be handled gracefully. + test_sessions_[0]->codec_client_->raiseGoAway(Http::GoAwayErrorCode::NoError); + expectHostHealthy(true); + + test_sessions_[0]->client_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + expectHostHealthy(false); + + // GOAWAY should cause a new connection to be created. + expectClientCreate(0); + expectHealthcheckStart(0); + test_sessions_[0]->interval_timer_->invokeCallback(); + + expectHealthcheckStop(0); + // Healthy threshold is 2, so the we'ere pending a state change. + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending)); + respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING); + expectHostHealthy(false); +} + // Test receiving GOAWAY between checks affects nothing. TEST_F(GrpcHealthCheckerImplTest, GoAwayBetweenChecks) { setupHC(); @@ -4513,7 +4796,7 @@ TEST_F(GrpcHealthCheckerImplTest, GoAwayBetweenChecks) { expectHostHealthy(true); // GOAWAY between checks should go unnoticed. - test_sessions_[0]->codec_client_->raiseGoAway(); + test_sessions_[0]->codec_client_->raiseGoAway(Http::GoAwayErrorCode::NoError); expectClientCreate(0); expectHealthcheckStart(0); @@ -4625,8 +4908,8 @@ INSTANTIATE_TEST_SUITE_P( TEST_P(BadResponseGrpcHealthCheckerImplTest, GrpcBadResponse) { setupHC(); expectSingleHealthcheck(HealthTransition::Changed); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); ResponseSpec spec = GetParam(); respondResponseSpec(0, std::move(spec)); @@ -4726,7 +5009,7 @@ TEST(HealthCheckProto, Validation) { path: /healthcheck )EOF"; envoy::config::core::v3::HealthCheck health_check_proto; - EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV2Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV3Yaml(yaml)), EnvoyException, "Proto constraint validation failed.*value must be greater than.*"); } { @@ -4742,7 +5025,7 @@ TEST(HealthCheckProto, Validation) { path: /healthcheck )EOF"; envoy::config::core::v3::HealthCheck health_check_proto; - EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV2Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV3Yaml(yaml)), EnvoyException, "Proto constraint validation failed.*value must be greater than.*"); } { @@ -4758,7 +5041,7 @@ TEST(HealthCheckProto, Validation) { path: /healthcheck )EOF"; envoy::config::core::v3::HealthCheck health_check_proto; - EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV2Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV3Yaml(yaml)), EnvoyException, "Proto constraint validation failed.*value must be greater than.*"); } { @@ -4774,7 +5057,7 @@ TEST(HealthCheckProto, Validation) { path: /healthcheck )EOF"; envoy::config::core::v3::HealthCheck health_check_proto; - EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV2Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV3Yaml(yaml)), EnvoyException, "Proto constraint validation failed.*value must be greater than.*"); } { @@ -4788,7 +5071,7 @@ TEST(HealthCheckProto, Validation) { path: /healthcheck )EOF"; envoy::config::core::v3::HealthCheck health_check_proto; - EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV2Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV3Yaml(yaml)), EnvoyException, "Proto constraint validation failed.*value is required.*"); } { @@ -4802,7 +5085,7 @@ TEST(HealthCheckProto, Validation) { path: /healthcheck )EOF"; envoy::config::core::v3::HealthCheck health_check_proto; - EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV2Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV3Yaml(yaml)), EnvoyException, "Proto constraint validation failed.*value is required.*"); } } diff --git a/test/common/upstream/load_balancer_benchmark.cc b/test/common/upstream/load_balancer_benchmark.cc index f8e1177da0f98..bb491a788a161 100644 --- a/test/common/upstream/load_balancer_benchmark.cc +++ b/test/common/upstream/load_balancer_benchmark.cc @@ -4,8 +4,8 @@ #include "envoy/config/cluster/v3/cluster.pb.h" +#include "common/common/random_generator.h" #include "common/memory/stats.h" -#include "common/runtime/runtime_impl.h" #include "common/upstream/maglev_lb.h" #include "common/upstream/ring_hash_lb.h" #include "common/upstream/upstream_impl.h" @@ -51,7 +51,7 @@ class BaseTester { Stats::IsolatedStoreImpl stats_store_; ClusterStats stats_{ClusterInfoImpl::generateStats(stats_store_)}; NiceMock runtime_; - Runtime::RandomGeneratorImpl random_; + Random::RandomGeneratorImpl random_; envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_; std::shared_ptr info_{new NiceMock()}; }; @@ -214,7 +214,7 @@ class TestLoadBalancerContext : public LoadBalancerContextBase { }; void computeHitStats(benchmark::State& state, - const std::unordered_map& hit_counter) { + const absl::node_hash_map& hit_counter) { double mean = 0; for (const auto& pair : hit_counter) { mean += pair.second; @@ -240,7 +240,7 @@ void BM_LeastRequestLoadBalancerChooseHost(benchmark::State& state) { const uint64_t choice_count = state.range(1); const uint64_t keys_to_simulate = state.range(2); LeastRequestTester tester(num_hosts, choice_count); - std::unordered_map hit_counter; + absl::node_hash_map hit_counter; TestLoadBalancerContext context; state.ResumeTiming(); @@ -273,12 +273,12 @@ void BM_RingHashLoadBalancerChooseHost(benchmark::State& state) { RingHashTester tester(num_hosts, min_ring_size); tester.ring_hash_lb_->initialize(); LoadBalancerPtr lb = tester.ring_hash_lb_->factory()->create(); - std::unordered_map hit_counter; + absl::node_hash_map hit_counter; TestLoadBalancerContext context; state.ResumeTiming(); // Note: To a certain extent this is benchmarking the performance of xxhash as well as - // std::unordered_map. However, it should be roughly equivalent to the work done when + // absl::node_hash_map. However, it should be roughly equivalent to the work done when // comparing different hashing algorithms. // TODO(mattklein123): When Maglev is a real load balancer, further share code with the // other test. @@ -311,12 +311,12 @@ void BM_MaglevLoadBalancerChooseHost(benchmark::State& state) { MaglevTester tester(num_hosts); tester.maglev_lb_->initialize(); LoadBalancerPtr lb = tester.maglev_lb_->factory()->create(); - std::unordered_map hit_counter; + absl::node_hash_map hit_counter; TestLoadBalancerContext context; state.ResumeTiming(); // Note: To a certain extent this is benchmarking the performance of xxhash as well as - // std::unordered_map. However, it should be roughly equivalent to the work done when + // absl::node_hash_map. However, it should be roughly equivalent to the work done when // comparing different hashing algorithms. for (uint64_t i = 0; i < keys_to_simulate; i++) { context.hash_key_ = hashInt(i); diff --git a/test/common/upstream/load_balancer_impl_test.cc b/test/common/upstream/load_balancer_impl_test.cc index 3ad095de9bc4b..ed89540e2968a 100644 --- a/test/common/upstream/load_balancer_impl_test.cc +++ b/test/common/upstream/load_balancer_impl_test.cc @@ -13,6 +13,8 @@ #include "test/common/upstream/utility.h" #include "test/mocks/runtime/mocks.h" #include "test/mocks/upstream/mocks.h" +#include "test/test_common/logging.h" +#include "test/test_common/test_runtime.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -39,7 +41,7 @@ class LoadBalancerTestBase : public testing::TestWithParam { Stats::IsolatedStoreImpl stats_store_; ClusterStats stats_; NiceMock runtime_; - NiceMock random_; + NiceMock random_; NiceMock priority_set_; MockHostSet& host_set_ = *priority_set_.getMockHostSet(0); MockHostSet& failover_host_set_ = *priority_set_.getMockHostSet(1); @@ -51,7 +53,7 @@ class LoadBalancerTestBase : public testing::TestWithParam { class TestLb : public LoadBalancerBase { public: TestLb(const PrioritySet& priority_set, ClusterStats& stats, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, + Random::RandomGenerator& random, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config) : LoadBalancerBase(priority_set, stats, runtime, random, common_config) {} using LoadBalancerBase::chooseHostSet; @@ -128,7 +130,7 @@ TEST_P(LoadBalancerBaseTest, PrioritySelection) { HealthyAndDegradedLoad priority_load{Upstream::HealthyLoad({100, 0, 0}), Upstream::DegradedLoad({0, 0, 0})}; - EXPECT_CALL(context, determinePriorityLoad(_, _)).WillRepeatedly(ReturnRef(priority_load)); + EXPECT_CALL(context, determinePriorityLoad(_, _, _)).WillRepeatedly(ReturnRef(priority_load)); // Primary and failover are in panic mode. Load distribution is based // on the number of hosts regardless of their health. EXPECT_EQ(50, lb_.percentageLoad(0)); @@ -205,11 +207,10 @@ TEST_P(LoadBalancerBaseTest, PrioritySelectionFuzz) { updateHostSet(failover_host_set_, failover_set_hosts, unhealthy_hosts, degraded_hosts); } - EXPECT_CALL(context, determinePriorityLoad(_, _)) + EXPECT_CALL(context, determinePriorityLoad(_, _, _)) .WillRepeatedly( - Invoke([](const auto&, const auto& original_load) -> const HealthyAndDegradedLoad& { - return original_load; - })); + Invoke([](const auto&, const auto& original_load, + const auto&) -> const HealthyAndDegradedLoad& { return original_load; })); for (uint64_t i = 0; i < total_hosts; ++i) { const auto hs = lb_.chooseHostSet(&context); @@ -234,7 +235,7 @@ TEST_P(LoadBalancerBaseTest, PrioritySelectionWithFilter) { HealthyAndDegradedLoad priority_load{Upstream::HealthyLoad({0u, 100u}), Upstream::DegradedLoad({0, 0})}; // return a filter that excludes priority 0 - EXPECT_CALL(context, determinePriorityLoad(_, _)).WillRepeatedly(ReturnRef(priority_load)); + EXPECT_CALL(context, determinePriorityLoad(_, _, _)).WillRepeatedly(ReturnRef(priority_load)); updateHostSet(host_set_, 1 /* num_hosts */, 1 /* num_healthy_hosts */); updateHostSet(failover_host_set_, 1, 1); @@ -1009,7 +1010,7 @@ TEST_P(RoundRobinLoadBalancerTest, HostSelectionWithFilter) { } else { priority_load.healthy_priority_load_ = HealthyLoad({0u, 100u}); } - EXPECT_CALL(context, determinePriorityLoad(_, _)).WillRepeatedly(ReturnRef(priority_load)); + EXPECT_CALL(context, determinePriorityLoad(_, _, _)).WillRepeatedly(ReturnRef(priority_load)); EXPECT_CALL(context, hostSelectionRetryCount()).WillRepeatedly(Return(2)); // Calling chooseHost multiple times always returns host one, since the filter will reject @@ -1533,6 +1534,89 @@ TEST_P(LeastRequestLoadBalancerTest, WeightImbalance) { EXPECT_EQ(hostSet().healthy_hosts_[0], lb_.chooseHost(nullptr)); } +// Validate that the load balancer defaults to an active request bias value of 1.0 if the runtime +// value is invalid (less than 0.0). +TEST_P(LeastRequestLoadBalancerTest, WeightImbalanceWithInvalidActiveRequestBias) { + envoy::config::cluster::v3::Cluster::LeastRequestLbConfig lr_lb_config; + lr_lb_config.mutable_active_request_bias()->set_runtime_key("ar_bias"); + lr_lb_config.mutable_active_request_bias()->set_default_value(1.0); + LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, + random_, common_config_, lr_lb_config}; + + EXPECT_CALL(runtime_.snapshot_, getDouble("ar_bias", 1.0)).WillRepeatedly(Return(-1.0)); + + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", 1), + makeTestHost(info_, "tcp://127.0.0.1:81", 2)}; + + hostSet().hosts_ = hostSet().healthy_hosts_; + + // Trigger callbacks. The added/removed lists are not relevant. + EXPECT_LOG_CONTAINS( + "warn", "upstream: invalid active request bias supplied (runtime key ar_bias), using 1.0", + hostSet().runCallbacks({}, {})); + + EXPECT_CALL(random_, random()).WillRepeatedly(Return(0)); + + // We should see 2:1 ratio for hosts[1] to hosts[0]. + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + + // Bringing hosts[1] to an active request should yield a 1:1 ratio. + hostSet().healthy_hosts_[1]->stats().rq_active_.set(1); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + + // Settings hosts[0] to an active request and hosts[1] to no active requests should yield a 4:1 + // ratio. + hostSet().healthy_hosts_[0]->stats().rq_active_.set(1); + hostSet().healthy_hosts_[1]->stats().rq_active_.set(0); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); +} + +TEST_P(LeastRequestLoadBalancerTest, WeightImbalanceWithCustomActiveRequestBias) { + // Create a load balancer with a custom active request bias. + envoy::config::cluster::v3::Cluster::LeastRequestLbConfig lr_lb_config; + lr_lb_config.mutable_active_request_bias()->set_runtime_key("ar_bias"); + lr_lb_config.mutable_active_request_bias()->set_default_value(1.0); + LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, + random_, common_config_, lr_lb_config}; + + EXPECT_CALL(runtime_.snapshot_, getDouble("ar_bias", 1.0)).WillRepeatedly(Return(0.0)); + + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", 1), + makeTestHost(info_, "tcp://127.0.0.1:81", 2)}; + + hostSet().hosts_ = hostSet().healthy_hosts_; + hostSet().runCallbacks({}, {}); // Trigger callbacks. The added/removed lists are not relevant. + + EXPECT_CALL(random_, random()).WillRepeatedly(Return(0)); + + // We should see 2:1 ratio for hosts[1] to hosts[0], regardless of the active request count. + hostSet().healthy_hosts_[1]->stats().rq_active_.set(1); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); +} + TEST_P(LeastRequestLoadBalancerTest, WeightImbalanceCallbacks) { hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", 1), makeTestHost(info_, "tcp://127.0.0.1:81", 2)}; diff --git a/test/common/upstream/load_balancer_simulation_test.cc b/test/common/upstream/load_balancer_simulation_test.cc index d220d14107313..0f86debac3b88 100644 --- a/test/common/upstream/load_balancer_simulation_test.cc +++ b/test/common/upstream/load_balancer_simulation_test.cc @@ -8,8 +8,8 @@ #include "envoy/config/endpoint/v3/endpoint_components.pb.h" #include "common/common/fmt.h" +#include "common/common/random_generator.h" #include "common/network/utility.h" -#include "common/runtime/runtime_impl.h" #include "common/upstream/load_balancer_impl.h" #include "common/upstream/upstream_impl.h" @@ -69,19 +69,19 @@ TEST(DISABLED_LeastRequestLoadBalancerWeightTest, Weight) { ClusterStats stats{ClusterInfoImpl::generateStats(stats_store)}; stats.max_host_weight_.set(weight); NiceMock runtime; - Runtime::RandomGeneratorImpl random; + Random::RandomGeneratorImpl random; envoy::config::cluster::v3::Cluster::LeastRequestLbConfig least_request_lb_config; envoy::config::cluster::v3::Cluster::CommonLbConfig common_config; LeastRequestLoadBalancer lb_{ priority_set, nullptr, stats, runtime, random, common_config, least_request_lb_config}; - std::unordered_map host_hits; + absl::node_hash_map host_hits; const uint64_t total_requests = 100; for (uint64_t i = 0; i < total_requests; i++) { host_hits[lb_.chooseHost(nullptr)]++; } - std::unordered_map weight_to_percent; + absl::node_hash_map weight_to_percent; for (const auto& host : host_hits) { std::cout << fmt::format("url:{}, weight:{}, hits:{}, percent_of_total:{}\n", host.first->address()->asString(), host.first->weight(), host.second, @@ -235,7 +235,7 @@ class DISABLED_SimulationTest : public testing::Test { MockHostSet& host_set_ = *priority_set_.getMockHostSet(0); std::shared_ptr info_{new NiceMock()}; NiceMock runtime_; - Runtime::RandomGeneratorImpl random_; + Random::RandomGeneratorImpl random_; Stats::IsolatedStoreImpl stats_store_; ClusterStats stats_; envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_; diff --git a/test/common/upstream/load_stats_reporter_test.cc b/test/common/upstream/load_stats_reporter_test.cc index b87b9e751f1d2..2fd28c3806619 100644 --- a/test/common/upstream/load_stats_reporter_test.cc +++ b/test/common/upstream/load_stats_reporter_test.cc @@ -52,9 +52,12 @@ class LoadStatsReporterTest : public testing::Test { const std::vector& expected_cluster_stats) { envoy::service::load_stats::v3::LoadStatsRequest expected_request; expected_request.mutable_node()->MergeFrom(local_info_.node()); + expected_request.mutable_node()->add_client_features("envoy.lrs.supports_send_all_clusters"); std::copy(expected_cluster_stats.begin(), expected_cluster_stats.end(), Protobuf::RepeatedPtrFieldBackInserter(expected_request.mutable_cluster_stats())); - EXPECT_CALL(async_stream_, sendMessageRaw_(Grpc::ProtoBufferEq(expected_request), false)); + EXPECT_CALL( + async_stream_, + sendMessageRaw_(Grpc::ProtoBufferEqIgnoreRepeatedFieldOrdering(expected_request), false)); } void deliverLoadStatsResponse(const std::vector& cluster_names) { diff --git a/test/common/upstream/logical_dns_cluster_test.cc b/test/common/upstream/logical_dns_cluster_test.cc index 74154fd825105..54404aab2c5f6 100644 --- a/test/common/upstream/logical_dns_cluster_test.cc +++ b/test/common/upstream/logical_dns_cluster_test.cc @@ -20,7 +20,8 @@ #include "test/mocks/network/mocks.h" #include "test/mocks/protobuf/mocks.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/admin.h" +#include "test/mocks/server/instance.h" #include "test/mocks/ssl/mocks.h" #include "test/mocks/thread_local/mocks.h" #include "test/mocks/upstream/mocks.h" @@ -42,10 +43,11 @@ class LogicalDnsClusterTest : public testing::Test { protected: LogicalDnsClusterTest() : api_(Api::createApiForTest(stats_store_)) {} - void setupFromV2Yaml(const std::string& yaml) { + void setupFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) { resolve_timer_ = new Event::MockTimer(&dispatcher_); NiceMock cm; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = + parseClusterFromV3Yaml(yaml, avoid_boosting); Envoy::Stats::ScopePtr scope = stats_store_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -74,7 +76,7 @@ class LogicalDnsClusterTest : public testing::Test { void testBasicSetup(const std::string& config, const std::string& expected_address, uint32_t expected_port, uint32_t expected_hc_port) { expectResolve(Network::DnsLookupFamily::V4Only, expected_address); - setupFromV2Yaml(config); + setupFromV3Yaml(config); EXPECT_CALL(membership_updated_, ready()); EXPECT_CALL(initialized_, ready()); @@ -196,7 +198,7 @@ class LogicalDnsClusterTest : public testing::Test { std::shared_ptr> dns_resolver_{ new NiceMock}; Network::MockActiveDnsQuery active_dns_query_; - NiceMock random_; + NiceMock random_; Network::DnsResolver::ResolveCb dns_callback_; NiceMock tls_; Event::MockTimer* resolve_timer_; @@ -263,10 +265,14 @@ TEST_P(LogicalDnsParamTest, ImmediateResolve) { lb_policy: round_robin )EOF" + std::get<0>(GetParam()) + R"EOF( - hosts: - - socket_address: - address: foo.bar.com - port_value: 443 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 )EOF"; EXPECT_CALL(membership_updated_, ready()); @@ -279,7 +285,7 @@ TEST_P(LogicalDnsParamTest, ImmediateResolve) { TestUtility::makeDnsResponse(std::get<2>(GetParam()))); return nullptr; })); - setupFromV2Yaml(yaml); + setupFromV3Yaml(yaml); EXPECT_EQ(1UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size()); EXPECT_EQ(1UL, cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHosts().size()); EXPECT_EQ("foo.bar.com", @@ -301,14 +307,18 @@ TEST_F(LogicalDnsParamTest, FailureRefreshRateBackoffResetsWhenSuccessHappens) { # Since the following expectResolve() requires Network::DnsLookupFamily::V4Only we need to set # dns_lookup_family to V4_ONLY explicitly for v2 .yaml config. dns_lookup_family: V4_ONLY - hosts: - - socket_address: - address: foo.bar.com - port_value: 443 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 )EOF"; expectResolve(Network::DnsLookupFamily::V4Only, "foo.bar.com"); - setupFromV2Yaml(yaml); + setupFromV3Yaml(yaml); // Failing response kicks the failure refresh backoff strategy. ON_CALL(random_, random()).WillByDefault(Return(8000)); @@ -341,14 +351,18 @@ TEST_F(LogicalDnsParamTest, TtlAsDnsRefreshRate) { # Since the following expectResolve() requires Network::DnsLookupFamily::V4Only we need to set # dns_lookup_family to V4_ONLY explicitly for v2 .yaml config. dns_lookup_family: V4_ONLY - hosts: - - socket_address: - address: foo.bar.com - port_value: 443 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 )EOF"; expectResolve(Network::DnsLookupFamily::V4Only, "foo.bar.com"); - setupFromV2Yaml(yaml); + setupFromV3Yaml(yaml); // TTL is recorded when the DNS response is successful and not empty EXPECT_CALL(membership_updated_, ready()); @@ -377,17 +391,25 @@ TEST_F(LogicalDnsClusterTest, BadConfig) { dns_refresh_rate: 4s connect_timeout: 0.25s lb_policy: ROUND_ROBIN - hosts: - - socket_address: - address: foo.bar.com - port_value: 443 - - socket_address: - address: foo2.bar.com - port_value: 443 + load_assignment: + cluster_name: name + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 + - endpoint: + address: + socket_address: + address: foo2.bar.com + port_value: 443 )EOF"; - EXPECT_THROW_WITH_MESSAGE(setupFromV2Yaml(multiple_hosts_yaml), EnvoyException, - "LOGICAL_DNS clusters must have a single host"); + EXPECT_THROW_WITH_MESSAGE( + setupFromV3Yaml(multiple_hosts_yaml), EnvoyException, + "LOGICAL_DNS clusters must have a single locality_lb_endpoint and a single lb_endpoint"); const std::string multiple_lb_endpoints_yaml = R"EOF( name: name @@ -417,7 +439,7 @@ TEST_F(LogicalDnsClusterTest, BadConfig) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - setupFromV2Yaml(multiple_lb_endpoints_yaml), EnvoyException, + setupFromV3Yaml(multiple_lb_endpoints_yaml), EnvoyException, "LOGICAL_DNS clusters must have a single locality_lb_endpoint and a single lb_endpoint"); const std::string multiple_endpoints_yaml = R"EOF( @@ -450,7 +472,7 @@ TEST_F(LogicalDnsClusterTest, BadConfig) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - setupFromV2Yaml(multiple_endpoints_yaml), EnvoyException, + setupFromV3Yaml(multiple_endpoints_yaml), EnvoyException, "LOGICAL_DNS clusters must have a single locality_lb_endpoint and a single lb_endpoint"); const std::string custom_resolver_yaml = R"EOF( @@ -474,7 +496,7 @@ TEST_F(LogicalDnsClusterTest, BadConfig) { port_value: 8000 )EOF"; - EXPECT_THROW_WITH_MESSAGE(setupFromV2Yaml(custom_resolver_yaml), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(setupFromV3Yaml(custom_resolver_yaml), EnvoyException, "LOGICAL_DNS clusters must NOT have a custom resolver name set"); } @@ -491,10 +513,14 @@ TEST_F(LogicalDnsClusterTest, Basic) { # Since the following expectResolve() requires Network::DnsLookupFamily::V4Only we need to set # dns_lookup_family to V4_ONLY explicitly for v2 .yaml config. dns_lookup_family: V4_ONLY - hosts: - - socket_address: - address: foo.bar.com - port_value: 443 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 )EOF"; const std::string basic_yaml_load_assignment = R"EOF( diff --git a/test/common/upstream/maglev_lb_test.cc b/test/common/upstream/maglev_lb_test.cc index 3fce26252ac49..25456820dd4d8 100644 --- a/test/common/upstream/maglev_lb_test.cc +++ b/test/common/upstream/maglev_lb_test.cc @@ -53,7 +53,7 @@ class MaglevLoadBalancerTest : public testing::Test { ClusterStats stats_; envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_; NiceMock runtime_; - NiceMock random_; + NiceMock random_; std::unique_ptr lb_; }; diff --git a/test/common/upstream/original_dst_cluster_test.cc b/test/common/upstream/original_dst_cluster_test.cc index 72fb82425b029..6e920a6e63707 100644 --- a/test/common/upstream/original_dst_cluster_test.cc +++ b/test/common/upstream/original_dst_cluster_test.cc @@ -21,7 +21,8 @@ #include "test/mocks/network/mocks.h" #include "test/mocks/protobuf/mocks.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/admin.h" +#include "test/mocks/server/instance.h" #include "test/mocks/ssl/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/utility.h" @@ -69,7 +70,9 @@ class OriginalDstClusterTest : public testing::Test { : cleanup_timer_(new Event::MockTimer(&dispatcher_)), api_(Api::createApiForTest(stats_store_)) {} - void setupFromYaml(const std::string& yaml) { setup(parseClusterFromV2Yaml(yaml)); } + void setupFromYaml(const std::string& yaml, bool avoid_boosting = true) { + setup(parseClusterFromV3Yaml(yaml, avoid_boosting)); + } void setup(const envoy::config::cluster::v3::Cluster& cluster_config) { NiceMock cm; @@ -96,7 +99,7 @@ class OriginalDstClusterTest : public testing::Test { NiceMock runtime_; NiceMock dispatcher_; Event::MockTimer* cleanup_timer_; - NiceMock random_; + NiceMock random_; NiceMock local_info_; NiceMock admin_; Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()}; @@ -114,7 +117,7 @@ TEST(OriginalDstClusterConfigTest, GoodConfig) { cleanup_interval: 1s )EOF"; // Help Emacs balance quotation marks: " - EXPECT_TRUE(parseClusterFromV2Yaml(yaml).has_cleanup_interval()); + EXPECT_TRUE(parseClusterFromV3Yaml(yaml).has_cleanup_interval()); } TEST_F(OriginalDstClusterTest, BadConfigWithLoadAssignment) { @@ -122,7 +125,7 @@ TEST_F(OriginalDstClusterTest, BadConfigWithLoadAssignment) { name: name connect_timeout: 0.25s type: ORIGINAL_DST - lb_policy: ORIGINAL_DST_LB + lb_policy: CLUSTER_PROVIDED cleanup_interval: 1s load_assignment: cluster_name: name @@ -154,7 +157,7 @@ TEST_F(OriginalDstClusterTest, BadConfigWithDeprecatedHosts) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - setupFromYaml(yaml), EnvoyException, + setupFromYaml(yaml, false), EnvoyException, "ORIGINAL_DST clusters must have no load assignment or hosts configured"); } @@ -163,7 +166,7 @@ TEST_F(OriginalDstClusterTest, CleanupInterval) { name: name connect_timeout: 1.250s type: ORIGINAL_DST - lb_policy: ORIGINAL_DST_LB + lb_policy: CLUSTER_PROVIDED cleanup_interval: 1s )EOF"; // Help Emacs balance quotation marks: " @@ -181,7 +184,7 @@ TEST_F(OriginalDstClusterTest, NoContext) { name: name, connect_timeout: 0.125s type: ORIGINAL_DST - lb_policy: ORIGINAL_DST_LB + lb_policy: CLUSTER_PROVIDED )EOF"; EXPECT_CALL(initialized_, ready()); @@ -239,7 +242,7 @@ TEST_F(OriginalDstClusterTest, Membership) { name: name connect_timeout: 1.250s type: ORIGINAL_DST - lb_policy: ORIGINAL_DST_LB + lb_policy: CLUSTER_PROVIDED )EOF"; EXPECT_CALL(initialized_, ready()); @@ -330,7 +333,7 @@ TEST_F(OriginalDstClusterTest, Membership2) { name: name connect_timeout: 1.250s type: ORIGINAL_DST - lb_policy: ORIGINAL_DST_LB + lb_policy: CLUSTER_PROVIDED )EOF"; EXPECT_CALL(initialized_, ready()); @@ -418,7 +421,7 @@ TEST_F(OriginalDstClusterTest, Connection) { name: name connect_timeout: 1.250s type: ORIGINAL_DST - lb_policy: ORIGINAL_DST_LB + lb_policy: CLUSTER_PROVIDED )EOF"; EXPECT_CALL(initialized_, ready()); @@ -458,7 +461,7 @@ TEST_F(OriginalDstClusterTest, MultipleClusters) { name: name connect_timeout: 1.250s type: ORIGINAL_DST - lb_policy: ORIGINAL_DST_LB + lb_policy: CLUSTER_PROVIDED )EOF"; EXPECT_CALL(initialized_, ready()); @@ -510,7 +513,7 @@ TEST_F(OriginalDstClusterTest, UseHttpHeaderEnabled) { name: name connect_timeout: 1.250s type: ORIGINAL_DST - lb_policy: ORIGINAL_DST_LB + lb_policy: CLUSTER_PROVIDED original_dst_lb_config: use_http_header: true )EOF"; @@ -583,7 +586,7 @@ TEST_F(OriginalDstClusterTest, UseHttpHeaderDisabled) { name: name connect_timeout: 1.250s type: ORIGINAL_DST - lb_policy: ORIGINAL_DST_LB + lb_policy: CLUSTER_PROVIDED )EOF"; EXPECT_CALL(initialized_, ready()); diff --git a/test/common/upstream/outlier_detection_impl_test.cc b/test/common/upstream/outlier_detection_impl_test.cc index a79b1a1b31d6d..74a96a074fc87 100644 --- a/test/common/upstream/outlier_detection_impl_test.cc +++ b/test/common/upstream/outlier_detection_impl_test.cc @@ -1352,6 +1352,44 @@ TEST_F(OutlierDetectorImplTest, NotEnforcing) { .value()); } +TEST_F(OutlierDetectorImplTest, EjectionActiveValueIsAccountedWithoutMetricStorage) { + EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_)); + addHosts({"tcp://127.0.0.1:80", "tcp://127.0.0.1:81"}); + EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); + std::shared_ptr detector(DetectorImpl::create( + cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_)); + detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); }); + + ON_CALL(runtime_.snapshot_, getInteger("outlier_detection.max_ejection_percent", _)) + .WillByDefault(Return(1)); + + loadRq(hosts_[0], 4, 500); + + time_system_.setMonotonicTime(std::chrono::milliseconds(0)); + + // Manually increase the gauge. From metric's perspective it's overflowed. + outlier_detection_ejections_active_.inc(); + + // Since the overflow is not determined by the metric. Host[0] can be ejected. + EXPECT_CALL(checker_, check(hosts_[0])); + EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast(hosts_[0]), + _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true)); + hosts_[0]->outlierDetector().putHttpResponseCode(500); + EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)); + + // Expect active helper_ has the value 1. However, helper is private and it cannot be tested. + EXPECT_EQ(2UL, outlier_detection_ejections_active_.value()); + EXPECT_EQ(0UL, + cluster_.info_->stats_store_.counter("outlier_detection.ejections_overflow").value()); + + // Now it starts to overflow. + loadRq(hosts_[1], 5, 500); + EXPECT_FALSE(hosts_[1]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)); + EXPECT_EQ(2UL, outlier_detection_ejections_active_.value()); + EXPECT_EQ(1UL, + cluster_.info_->stats_store_.counter("outlier_detection.ejections_overflow").value()); +} + TEST_F(OutlierDetectorImplTest, CrossThreadRemoveRace) { EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_)); addHosts({"tcp://127.0.0.1:80"}); diff --git a/test/common/upstream/ring_hash_lb_test.cc b/test/common/upstream/ring_hash_lb_test.cc index fbd4906e01fe0..9c9413233e024 100644 --- a/test/common/upstream/ring_hash_lb_test.cc +++ b/test/common/upstream/ring_hash_lb_test.cc @@ -2,7 +2,6 @@ #include #include #include -#include #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/router/router.h" @@ -15,6 +14,7 @@ #include "test/mocks/runtime/mocks.h" #include "test/mocks/upstream/mocks.h" +#include "absl/container/node_hash_map.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -71,7 +71,7 @@ class RingHashLoadBalancerTest : public testing::TestWithParam { absl::optional config_; envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_; NiceMock runtime_; - NiceMock random_; + NiceMock random_; std::unique_ptr lb_; }; @@ -468,7 +468,7 @@ TEST_P(RingHashLoadBalancerTest, HostWeightedTinyRing) { LoadBalancerPtr lb = lb_->factory()->create(); // :90 should appear once, :91 should appear twice and :92 should appear three times. - std::unordered_map expected{ + absl::node_hash_map expected{ {928266305478181108UL, 2}, {4443673547860492590UL, 2}, {5583722120771150861UL, 1}, {6311230543546372928UL, 1}, {13444792449719432967UL, 2}, {16117243373044804889UL, 0}}; for (const auto& entry : expected) { @@ -547,7 +547,7 @@ TEST_P(RingHashLoadBalancerTest, LocalityWeightedTinyRing) { // :90 should appear once, :91 should appear twice, :92 should appear three times, // and :93 shouldn't appear at all. - std::unordered_map expected{ + absl::node_hash_map expected{ {928266305478181108UL, 2}, {4443673547860492590UL, 2}, {5583722120771150861UL, 1}, {6311230543546372928UL, 1}, {13444792449719432967UL, 2}, {16117243373044804889UL, 0}}; for (const auto& entry : expected) { @@ -617,7 +617,7 @@ TEST_P(RingHashLoadBalancerTest, HostAndLocalityWeightedTinyRing) { // :90 should appear once, :91 and :92 should each appear two times, and :93 should appear four // times, to get the correct overall proportions. - std::unordered_map expected{ + absl::node_hash_map expected{ {928266305478181108UL, 2}, {3851675632748031481UL, 3}, {5583722120771150861UL, 1}, {6311230543546372928UL, 1}, {7700377290971790572UL, 3}, {12559126875973811811UL, 3}, {13444792449719432967UL, 2}, {13784988426630141778UL, 3}, {16117243373044804889UL, 0}}; @@ -763,7 +763,7 @@ TEST_P(RingHashLoadBalancerTest, LopsidedWeightSmallScale) { // Every 128th host in the light-but-dense locality should have an entry on the ring, for a total // of 8 entries. This gives us the right ratio of 1/128. - std::unordered_map expected{ + absl::node_hash_map expected{ {11664790346325243808UL, 1}, {15894554872961148518UL, 128}, {13958138884277627155UL, 256}, {15803774069438192949UL, 384}, {3829253010855396576UL, 512}, {17918147347826565154UL, 640}, {6442769608292299103UL, 768}, {5881074926069334434UL, 896}}; diff --git a/test/common/upstream/subset_lb_test.cc b/test/common/upstream/subset_lb_test.cc index 5c4cf7c5faf5b..e9acf9f407bca 100644 --- a/test/common/upstream/subset_lb_test.cc +++ b/test/common/upstream/subset_lb_test.cc @@ -296,10 +296,11 @@ class SubsetLoadBalancerTest : public testing::TestWithParam { fallback_keys_subset_mapped); } - SubsetSelectorPtr - makeSelector(const std::set& selector_keys, - envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector:: - LbSubsetSelectorFallbackPolicy fallback_policy) { + SubsetSelectorPtr makeSelector( + const std::set& selector_keys, + envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector:: + LbSubsetSelectorFallbackPolicy fallback_policy = + envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED) { return makeSelector(selector_keys, fallback_policy, {}); } @@ -434,6 +435,25 @@ class SubsetLoadBalancerTest : public testing::TestWithParam { return std::make_shared(metadata); } + MetadataConstSharedPtr buildMetadataWithStage(const std::string& version, + const std::string& stage = "") const { + envoy::config::core::v3::Metadata metadata; + + if (!version.empty()) { + Envoy::Config::Metadata::mutableMetadataValue( + metadata, Config::MetadataFilters::get().ENVOY_LB, "version") + .set_string_value(version); + } + + if (!stage.empty()) { + Envoy::Config::Metadata::mutableMetadataValue( + metadata, Config::MetadataFilters::get().ENVOY_LB, "stage") + .set_string_value(stage); + } + + return std::make_shared(metadata); + } + LoadBalancerType lb_type_{LoadBalancerType::RoundRobin}; NiceMock priority_set_; MockHostSet& host_set_ = *priority_set_.getMockHostSet(0); @@ -443,7 +463,7 @@ class SubsetLoadBalancerTest : public testing::TestWithParam { envoy::config::cluster::v3::Cluster::LeastRequestLbConfig least_request_lb_config_; envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_; NiceMock runtime_; - NiceMock random_; + NiceMock random_; Stats::IsolatedStoreImpl stats_store_; ClusterStats stats_; PrioritySetImpl local_priority_set_; @@ -936,6 +956,115 @@ TEST_P(SubsetLoadBalancerTest, OnlyMetadataChanged) { EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_13)); } +TEST_P(SubsetLoadBalancerTest, EmptySubsetsPurged) { + std::vector subset_selectors = {makeSelector({"version"}), + makeSelector({"version", "stage"})}; + EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors)); + + // Simple add and remove. + init({{"tcp://127.0.0.1:8000", {{"version", "1.2"}}}, + {"tcp://127.0.0.1:8001", {{"version", "1.0"}, {"stage", "prod"}}}}); + EXPECT_EQ(3U, stats_.lb_subsets_active_.value()); + EXPECT_EQ(3U, stats_.lb_subsets_created_.value()); + EXPECT_EQ(0U, stats_.lb_subsets_removed_.value()); + + host_set_.hosts_[0]->metadata(buildMetadataWithStage("1.3")); + host_set_.runCallbacks({}, {}); + EXPECT_EQ(3U, stats_.lb_subsets_active_.value()); + EXPECT_EQ(4U, stats_.lb_subsets_created_.value()); + EXPECT_EQ(1U, stats_.lb_subsets_removed_.value()); + + // Move host that was in the version + stage subset into a new version only subset. + host_set_.hosts_[1]->metadata(buildMetadataWithStage("1.4")); + host_set_.runCallbacks({}, {}); + EXPECT_EQ(2U, stats_.lb_subsets_active_.value()); + EXPECT_EQ(5U, stats_.lb_subsets_created_.value()); + EXPECT_EQ(3U, stats_.lb_subsets_removed_.value()); + + // Create a new version + stage subset. + host_set_.hosts_[1]->metadata(buildMetadataWithStage("1.5", "devel")); + host_set_.runCallbacks({}, {}); + EXPECT_EQ(3U, stats_.lb_subsets_active_.value()); + EXPECT_EQ(7U, stats_.lb_subsets_created_.value()); + EXPECT_EQ(4U, stats_.lb_subsets_removed_.value()); + + // Now move it back to its original version + stage subset. + host_set_.hosts_[1]->metadata(buildMetadataWithStage("1.0", "prod")); + host_set_.runCallbacks({}, {}); + EXPECT_EQ(3U, stats_.lb_subsets_active_.value()); + EXPECT_EQ(9U, stats_.lb_subsets_created_.value()); + EXPECT_EQ(6U, stats_.lb_subsets_removed_.value()); + + // Finally, remove the original version + stage subset again. + host_set_.hosts_[1]->metadata(buildMetadataWithStage("1.6")); + host_set_.runCallbacks({}, {}); + EXPECT_EQ(2U, stats_.lb_subsets_active_.value()); + EXPECT_EQ(10U, stats_.lb_subsets_created_.value()); + EXPECT_EQ(8U, stats_.lb_subsets_removed_.value()); +} + +TEST_P(SubsetLoadBalancerTest, EmptySubsetsPurgedCollapsed) { + std::vector subset_selectors = {makeSelector({"version"}), + makeSelector({"version", "stage"})}; + EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors)); + + // Init subsets. + init({{"tcp://127.0.0.1:8000", {{"version", "1.2"}}}, + {"tcp://127.0.0.1:8001", {{"version", "1.0"}, {"stage", "prod"}}}}); + EXPECT_EQ(3U, stats_.lb_subsets_active_.value()); + EXPECT_EQ(3U, stats_.lb_subsets_created_.value()); + EXPECT_EQ(0U, stats_.lb_subsets_removed_.value()); + + // Get rid of 1.0. + host_set_.hosts_[1]->metadata(buildMetadataWithStage("1.2", "prod")); + host_set_.runCallbacks({}, {}); + EXPECT_EQ(2U, stats_.lb_subsets_active_.value()); + EXPECT_EQ(4U, stats_.lb_subsets_created_.value()); + EXPECT_EQ(2U, stats_.lb_subsets_removed_.value()); + + // Get rid of stage prod. + host_set_.hosts_[1]->metadata(buildMetadataWithStage("1.2")); + host_set_.runCallbacks({}, {}); + EXPECT_EQ(1U, stats_.lb_subsets_active_.value()); + EXPECT_EQ(4U, stats_.lb_subsets_created_.value()); + EXPECT_EQ(3U, stats_.lb_subsets_removed_.value()); + + // Add stage prod back. + host_set_.hosts_[1]->metadata(buildMetadataWithStage("1.2", "prod")); + host_set_.runCallbacks({}, {}); + EXPECT_EQ(2U, stats_.lb_subsets_active_.value()); + EXPECT_EQ(5U, stats_.lb_subsets_created_.value()); + EXPECT_EQ(3U, stats_.lb_subsets_removed_.value()); +} + +TEST_P(SubsetLoadBalancerTest, EmptySubsetsPurgedVersionChanged) { + std::vector subset_selectors = {makeSelector({"version"}), + makeSelector({"version", "stage"})}; + EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors)); + + // Init subsets. + init({{"tcp://127.0.0.1:8000", {{"version", "1.2"}}}, + {"tcp://127.0.0.1:8001", {{"version", "1.0"}, {"stage", "prod"}}}}); + EXPECT_EQ(3U, stats_.lb_subsets_active_.value()); + EXPECT_EQ(3U, stats_.lb_subsets_created_.value()); + EXPECT_EQ(0U, stats_.lb_subsets_removed_.value()); + + // Get rid of 1.0. + host_set_.hosts_[1]->metadata(buildMetadataWithStage("1.2", "prod")); + host_set_.runCallbacks({}, {}); + EXPECT_EQ(2U, stats_.lb_subsets_active_.value()); + EXPECT_EQ(4U, stats_.lb_subsets_created_.value()); + EXPECT_EQ(2U, stats_.lb_subsets_removed_.value()); + + // Change versions. + host_set_.hosts_[0]->metadata(buildMetadataWithStage("1.3")); + host_set_.hosts_[1]->metadata(buildMetadataWithStage("1.4", "prod")); + host_set_.runCallbacks({}, {}); + EXPECT_EQ(3U, stats_.lb_subsets_active_.value()); + EXPECT_EQ(7U, stats_.lb_subsets_created_.value()); + EXPECT_EQ(4U, stats_.lb_subsets_removed_.value()); +} + TEST_P(SubsetLoadBalancerTest, MetadataChangedHostsAddedRemoved) { TestLoadBalancerContext context_10({{"version", "1.0"}}); TestLoadBalancerContext context_12({{"version", "1.2"}}); diff --git a/test/common/upstream/test_cluster_manager.h b/test/common/upstream/test_cluster_manager.h index a71d276471fa1..20edfaa9b59be 100644 --- a/test/common/upstream/test_cluster_manager.h +++ b/test/common/upstream/test_cluster_manager.h @@ -35,7 +35,8 @@ #include "test/mocks/protobuf/mocks.h" #include "test/mocks/runtime/mocks.h" #include "test/mocks/secret/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/admin.h" +#include "test/mocks/server/instance.h" #include "test/mocks/tcp/mocks.h" #include "test/mocks/thread_local/mocks.h" #include "test/mocks/upstream/mocks.h" @@ -126,7 +127,7 @@ class TestClusterManagerFactory : public ClusterManagerFactory { std::shared_ptr> dns_resolver_{ new NiceMock}; NiceMock runtime_; - NiceMock random_; + NiceMock random_; NiceMock dispatcher_; Extensions::TransportSockets::Tls::ContextManagerImpl ssl_context_manager_{ dispatcher_.timeSource()}; @@ -160,7 +161,7 @@ class TestClusterManagerImpl : public ClusterManagerImpl { TestClusterManagerImpl(const envoy::config::bootstrap::v3::Bootstrap& bootstrap, ClusterManagerFactory& factory, Stats::Store& stats, ThreadLocal::Instance& tls, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, const LocalInfo::LocalInfo& local_info, + Random::RandomGenerator& random, const LocalInfo::LocalInfo& local_info, AccessLog::AccessLogManager& log_manager, Event::Dispatcher& main_thread_dispatcher, Server::Admin& admin, ProtobufMessage::ValidationContext& validation_context, Api::Api& api, @@ -185,7 +186,7 @@ class MockedUpdatedClusterManagerImpl : public TestClusterManagerImpl { MockedUpdatedClusterManagerImpl( const envoy::config::bootstrap::v3::Bootstrap& bootstrap, ClusterManagerFactory& factory, Stats::Store& stats, ThreadLocal::Instance& tls, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, const LocalInfo::LocalInfo& local_info, + Random::RandomGenerator& random, const LocalInfo::LocalInfo& local_info, AccessLog::AccessLogManager& log_manager, Event::Dispatcher& main_thread_dispatcher, Server::Admin& admin, ProtobufMessage::ValidationContext& validation_context, Api::Api& api, MockLocalClusterUpdate& local_cluster_update, MockLocalHostsRemoved& local_hosts_removed, diff --git a/test/common/upstream/transport_socket_matcher_test.cc b/test/common/upstream/transport_socket_matcher_test.cc index a506ab014d7b8..cfde130d1d1f8 100644 --- a/test/common/upstream/transport_socket_matcher_test.cc +++ b/test/common/upstream/transport_socket_matcher_test.cc @@ -15,7 +15,7 @@ #include "server/transport_socket_config_impl.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/transport_socket_factory_context.h" #include "test/test_common/registry.h" #include "test/test_common/utility.h" diff --git a/test/common/upstream/upstream_impl_test.cc b/test/common/upstream/upstream_impl_test.cc index 28c658c76b276..a0707d9cad2b0 100644 --- a/test/common/upstream/upstream_impl_test.cc +++ b/test/common/upstream/upstream_impl_test.cc @@ -13,6 +13,7 @@ #include "envoy/http/codec.h" #include "envoy/stats/scope.h" #include "envoy/upstream/cluster_manager.h" +#include "envoy/upstream/upstream.h" #include "common/config/metadata.h" #include "common/network/utility.h" @@ -29,7 +30,8 @@ #include "test/mocks/network/mocks.h" #include "test/mocks/protobuf/mocks.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/admin.h" +#include "test/mocks/server/instance.h" #include "test/mocks/ssl/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/registry.h" @@ -58,7 +60,7 @@ class UpstreamImplTestBase { NiceMock local_info_; NiceMock dispatcher_; NiceMock runtime_; - NiceMock random_; + NiceMock random_; Stats::TestUtil::TestStore stats_; Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()}; NiceMock tls_; @@ -160,10 +162,14 @@ TEST_P(StrictDnsParamTest, ImmediateResolve) { )EOF" + std::get<0>(GetParam()) + R"EOF( lb_policy: round_robin - hosts: - - socket_address: - address: foo.bar.com - port_value: 443 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 )EOF"; EXPECT_CALL(initialized, ready()); EXPECT_CALL(*dns_resolver, resolve("foo.bar.com", std::get<1>(GetParam()), _)) @@ -173,7 +179,7 @@ TEST_P(StrictDnsParamTest, ImmediateResolve) { TestUtility::makeDnsResponse(std::get<2>(GetParam()))); return nullptr; })); - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -207,7 +213,7 @@ TEST_F(StrictDnsClusterImplTest, ZeroHostsIsInializedImmediately) { - lb_endpoints: )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -231,11 +237,18 @@ TEST_F(StrictDnsClusterImplTest, ZeroHostsHealthChecker) { connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 )EOF"; ResolverData resolver(*dns_resolver_, dispatcher_); - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -291,12 +304,22 @@ TEST_F(StrictDnsClusterImplTest, Basic) { http_protocol_options: header_key_format: proper_case_words: {} - hosts: - - { socket_address: { address: localhost1, port_value: 11001 }} - - { socket_address: { address: localhost2, port_value: 11002 }} + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost1 + port_value: 11001 + - endpoint: + address: + socket_address: + address: localhost2 + port_value: 11002 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -436,12 +459,19 @@ TEST_F(StrictDnsClusterImplTest, HostRemovalActiveHealthSkipped) { connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN - drain_connections_on_host_removal: true - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] + ignore_health_on_host_removal: true + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 )EOF"; ResolverData resolver(*dns_resolver_, dispatcher_); - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -491,11 +521,18 @@ TEST_F(StrictDnsClusterImplTest, HostRemovalAfterHcFail) { connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 )EOF"; ResolverData resolver(*dns_resolver_, dispatcher_); - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -622,7 +659,7 @@ TEST_F(StrictDnsClusterImplTest, LoadAssignmentBasic) { port_value: 8000 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -777,7 +814,7 @@ TEST_F(StrictDnsClusterImplTest, LoadAssignmentBasic) { // Remove the duplicated hosts from both resolve targets and ensure that we don't see the same // host multiple times. - std::unordered_set removed_hosts; + absl::node_hash_set removed_hosts; cluster.prioritySet().addPriorityUpdateCb( [&](uint32_t, const HostVector&, const HostVector& hosts_removed) -> void { for (const auto& host : hosts_removed) { @@ -857,7 +894,7 @@ TEST_F(StrictDnsClusterImplTest, LoadAssignmentBasicMultiplePriorities) { port_value: 8000 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -958,11 +995,19 @@ TEST_F(StrictDnsClusterImplTest, CustomResolverFails) { connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN - drain_connections_on_host_removal: true - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443, resolver_name: customresolver }}] + ignore_health_on_host_removal: true + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 + resolver_name: customresolver )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format("cluster.{}.", cluster_config.name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( @@ -987,10 +1032,17 @@ TEST_F(StrictDnsClusterImplTest, FailureRefreshRateBackoffResetsWhenSuccessHappe dns_failure_refresh_rate: base_interval: 7s max_interval: 10s - hosts: [{ socket_address: { address: localhost1, port_value: 11001 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost1 + port_value: 11001 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1029,10 +1081,17 @@ TEST_F(StrictDnsClusterImplTest, TtlAsDnsRefreshRate) { lb_policy: ROUND_ROBIN dns_refresh_rate: 4s respect_dns_ttl: true - hosts: [{ socket_address: { address: localhost1, port_value: 11001 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost1 + port_value: 11001 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1097,12 +1156,22 @@ TEST_F(StrictDnsClusterImplTest, Http2UserDefinedSettingsParametersValidation) { http_protocol_options: header_key_format: proper_case_words: {} - hosts: - - { socket_address: { address: localhost1, port_value: 11001 }} - - { socket_address: { address: localhost2, port_value: 11002 }} + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost1 + port_value: 11001 + - endpoint: + address: + socket_address: + address: localhost2 + port_value: 11002 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1235,13 +1304,17 @@ TEST_F(StaticClusterImplTest, InitialHosts) { connect_timeout: 0.25s type: STATIC lb_policy: ROUND_ROBIN - hosts: - - socket_address: - address: 10.0.0.1 - port_value: 443 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 443 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1276,7 +1349,7 @@ TEST_F(StaticClusterImplTest, LoadAssignmentEmptyHostname) { port_value: 8000 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1311,7 +1384,7 @@ TEST_F(StaticClusterImplTest, LoadAssignmentNonEmptyHostname) { port_value: 8000 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1346,7 +1419,7 @@ TEST_F(StaticClusterImplTest, LoadAssignmentNonEmptyHostnameWithHealthChecks) { hostname: "foo2" )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1399,7 +1472,7 @@ TEST_F(StaticClusterImplTest, LoadAssignmentMultiplePriorities) { port_value: 8000 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1444,7 +1517,7 @@ TEST_F(StaticClusterImplTest, LoadAssignmentLocality) { port_value: 8000 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1490,7 +1563,7 @@ TEST_F(StaticClusterImplTest, LoadAssignmentEdsHealth) { )EOF"; NiceMock cm; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1512,10 +1585,17 @@ TEST_F(StaticClusterImplTest, AltStatName) { connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN - hosts: [{ socket_address: { address: 10.0.0.1, port_value: 443 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 443 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1535,10 +1615,17 @@ TEST_F(StaticClusterImplTest, RingHash) { connect_timeout: 0.25s type: static lb_policy: ring_hash - hosts: [{ socket_address: { address: 10.0.0.1, port_value: 11001 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 11001 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1559,12 +1646,22 @@ TEST_F(StaticClusterImplTest, OutlierDetector) { connect_timeout: 0.25s type: static lb_policy: random - hosts: - - { socket_address: { address: 10.0.0.1, port_value: 11001 }} - - { socket_address: { address: 10.0.0.1, port_value: 11002 }} + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 11001 + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 11002 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1607,12 +1704,22 @@ TEST_F(StaticClusterImplTest, HealthyStat) { connect_timeout: 0.25s type: static lb_policy: random - hosts: - - { socket_address: { address: 10.0.0.1, port_value: 11001 }} - - { socket_address: { address: 10.0.0.1, port_value: 11002 }} + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 11001 + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 11002 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1738,12 +1845,22 @@ TEST_F(StaticClusterImplTest, UrlConfig) { connect_timeout: 0.25s type: static lb_policy: random - hosts: - - { socket_address: { address: 10.0.0.1, port_value: 11001 }} - - { socket_address: { address: 10.0.0.2, port_value: 11002 }} + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 11001 + - endpoint: + address: + socket_address: + address: 10.0.0.2 + port_value: 11002 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1794,7 +1911,7 @@ TEST_F(StaticClusterImplTest, UnsupportedLBType) { EXPECT_THROW_WITH_MESSAGE( { - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format("cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() @@ -1817,10 +1934,16 @@ TEST_F(StaticClusterImplTest, MalformedHostIP) { connect_timeout: 0.25s type: STATIC lb_policy: ROUND_ROBIN - hosts: [{ socket_address: { address: foo.bar.com }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1848,7 +1971,7 @@ TEST_F(StaticClusterImplTest, NoHostsTest) { - priority: 1 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format("cluster.{}.", cluster_config.name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( @@ -1917,9 +2040,16 @@ TEST_F(ClusterImplTest, CloseConnectionsOnHostHealthFailure) { type: STRICT_DNS lb_policy: ROUND_ROBIN close_connections_on_host_health_failure: true - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -2049,8 +2179,9 @@ class ClusterInfoImplTest : public testing::Test { public: ClusterInfoImplTest() : api_(Api::createApiForTest(stats_)) {} - std::unique_ptr makeCluster(const std::string& yaml) { - cluster_config_ = parseClusterFromV2Yaml(yaml); + std::unique_ptr makeCluster(const std::string& yaml, + bool avoid_boosting = true) { + cluster_config_ = parseClusterFromV3Yaml(yaml, avoid_boosting); scope_ = stats_.createScope(fmt::format("cluster.{}.", cluster_config_.alt_stat_name().empty() ? cluster_config_.name() : cluster_config_.alt_stat_name())); @@ -2069,7 +2200,7 @@ class ClusterInfoImplTest : public testing::Test { NiceMock runtime_; NiceMock cm_; NiceMock local_info_; - NiceMock random_; + NiceMock random_; NiceMock admin_; Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()}; NiceMock tls_; @@ -2108,7 +2239,14 @@ TEST_F(ClusterInfoImplTest, Metadata) { connect_timeout: 0.25s type: STRICT_DNS lb_policy: MAGLEV - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 metadata: { filter_metadata: { com.bar.foo: { baz: test_value }, baz: {name: meh } } } common_lb_config: @@ -2138,13 +2276,20 @@ TEST_F(ClusterInfoImplTest, EdsServiceNamePopulation) { lb_policy: MAGLEV eds_cluster_config: service_name: service_foo - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 common_lb_config: healthy_panic_threshold: value: 0.3 )EOF"; auto cluster = makeCluster(yaml); - EXPECT_EQ(cluster->info()->eds_service_name(), "service_foo"); + EXPECT_EQ(cluster->info()->edsServiceName(), "service_foo"); const std::string unexpected_eds_config_yaml = R"EOF( name: name @@ -2153,7 +2298,14 @@ TEST_F(ClusterInfoImplTest, EdsServiceNamePopulation) { lb_policy: MAGLEV eds_cluster_config: service_name: service_foo - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 common_lb_config: healthy_panic_threshold: value: 0.3 @@ -2169,7 +2321,14 @@ TEST_F(ClusterInfoImplTest, BrokenTypedMetadata) { connect_timeout: 0.25s type: STRICT_DNS lb_policy: MAGLEV - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 metadata: { filter_metadata: { com.bar.foo: { baz: test_value }, baz: {boom: meh} } } common_lb_config: @@ -2190,12 +2349,22 @@ TEST_F(ClusterInfoImplTest, ExtensionProtocolOptionsForUnknownFilter) { connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] - extension_protocol_options: - no_such_filter: { option: value } + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 + typed_extension_protocol_options: + no_such_filter: + "@type": type.googleapis.com/google.protobuf.Struct + value: + option: "value" )EOF"; - EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml, false), EnvoyException, "Didn't find a registered network or http filter implementation for " "name: 'no_such_filter'"); } @@ -2206,7 +2375,14 @@ TEST_F(ClusterInfoImplTest, TypedExtensionProtocolOptionsForUnknownFilter) { connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 typed_extension_protocol_options: no_such_filter: "@type": type.googleapis.com/google.protobuf.Struct @@ -2217,6 +2393,7 @@ TEST_F(ClusterInfoImplTest, TypedExtensionProtocolOptionsForUnknownFilter) { "name: 'no_such_filter'"); } +// This test case can't be converted for V3 API as it is specific for extension_protocol_options TEST_F(ClusterInfoImplTest, OneofExtensionProtocolOptionsForUnknownFilter) { const std::string yaml = R"EOF( name: name @@ -2231,10 +2408,67 @@ TEST_F(ClusterInfoImplTest, OneofExtensionProtocolOptionsForUnknownFilter) { "@type": type.googleapis.com/google.protobuf.Struct )EOF"; - EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml, false), EnvoyException, "Only one of typed_extension_protocol_options or " "extension_protocol_options can be specified"); } + +TEST_F(ClusterInfoImplTest, TestTrackRequestResponseSizesNotSetInConfig) { + const std::string yaml_disabled = R"EOF( + name: name + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + )EOF"; + + auto cluster = makeCluster(yaml_disabled); + // By default, histograms tracking request/response sizes are not published. + EXPECT_FALSE(cluster->info()->requestResponseSizeStats().has_value()); + + const std::string yaml_disabled2 = R"EOF( + name: name + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + track_cluster_stats: { timeout_budgets : true } + )EOF"; + + cluster = makeCluster(yaml_disabled2); + EXPECT_FALSE(cluster->info()->requestResponseSizeStats().has_value()); + + const std::string yaml_disabled3 = R"EOF( + name: name + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + track_cluster_stats: { request_response_sizes : false } + )EOF"; + + cluster = makeCluster(yaml_disabled3); + EXPECT_FALSE(cluster->info()->requestResponseSizeStats().has_value()); +} + +TEST_F(ClusterInfoImplTest, TestTrackRequestResponseSizes) { + const std::string yaml = R"EOF( + name: name + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + track_cluster_stats: { request_response_sizes : true } + )EOF"; + + auto cluster = makeCluster(yaml); + // The stats should be created. + ASSERT_TRUE(cluster->info()->requestResponseSizeStats().has_value()); + + Upstream::ClusterRequestResponseSizeStats req_resp_stats = + cluster->info()->requestResponseSizeStats()->get(); + + EXPECT_EQ(Stats::Histogram::Unit::Bytes, req_resp_stats.upstream_rq_headers_size_.unit()); + EXPECT_EQ(Stats::Histogram::Unit::Bytes, req_resp_stats.upstream_rq_body_size_.unit()); + EXPECT_EQ(Stats::Histogram::Unit::Bytes, req_resp_stats.upstream_rs_body_size_.unit()); +} + TEST_F(ClusterInfoImplTest, TestTrackRemainingResourcesGauges) { const std::string yaml = R"EOF( name: name @@ -2288,7 +2522,14 @@ TEST_F(ClusterInfoImplTest, Timeouts) { connect_timeout: 0.25s type: STRICT_DNS lb_policy: MAGLEV - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 metadata: { filter_metadata: { com.bar.foo: { baz: test_value }, baz: {name: meh } } } common_lb_config: @@ -2319,7 +2560,64 @@ TEST_F(ClusterInfoImplTest, Timeouts) { EXPECT_FALSE(cluster3->info()->idleTimeout().has_value()); } +TEST_F(ClusterInfoImplTest, TestTrackTimeoutBudgetsNotSetInConfig) { + // Check that without the flag specified, the histogram is null. + const std::string yaml_disabled = R"EOF( + name: name + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + )EOF"; + + auto cluster = makeCluster(yaml_disabled); + // The stats will be null if they have not been explicitly turned on. + EXPECT_FALSE(cluster->info()->timeoutBudgetStats().has_value()); + + const std::string yaml_disabled2 = R"EOF( + name: name + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + track_cluster_stats: { request_response_sizes : true } + )EOF"; + + cluster = makeCluster(yaml_disabled2); + EXPECT_FALSE(cluster->info()->timeoutBudgetStats().has_value()); + + const std::string yaml_disabled3 = R"EOF( + name: name + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + track_cluster_stats: { timeout_budgets : false } + )EOF"; + + cluster = makeCluster(yaml_disabled3); + EXPECT_FALSE(cluster->info()->timeoutBudgetStats().has_value()); +} + TEST_F(ClusterInfoImplTest, TestTrackTimeoutBudgets) { + // Check that with the flag, the histogram is created. + const std::string yaml = R"EOF( + name: name + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + track_cluster_stats: { timeout_budgets : true } + )EOF"; + + auto cluster = makeCluster(yaml); + // The stats should be created. + ASSERT_TRUE(cluster->info()->timeoutBudgetStats().has_value()); + + Upstream::ClusterTimeoutBudgetStats tb_stats = cluster->info()->timeoutBudgetStats()->get(); + EXPECT_EQ(Stats::Histogram::Unit::Unspecified, + tb_stats.upstream_rq_timeout_budget_percent_used_.unit()); + EXPECT_EQ(Stats::Histogram::Unit::Unspecified, + tb_stats.upstream_rq_timeout_budget_per_try_percent_used_.unit()); +} + +TEST_F(ClusterInfoImplTest, DEPRECATED_FEATURE_TEST(TestTrackTimeoutBudgetsOld)) { // Check that without the flag specified, the histogram is null. const std::string yaml_disabled = R"EOF( name: name @@ -2343,9 +2641,13 @@ TEST_F(ClusterInfoImplTest, TestTrackTimeoutBudgets) { cluster = makeCluster(yaml); // The stats should be created. - EXPECT_TRUE(cluster->info()->timeoutBudgetStats().has_value()); + ASSERT_TRUE(cluster->info()->timeoutBudgetStats().has_value()); + + Upstream::ClusterTimeoutBudgetStats tb_stats = cluster->info()->timeoutBudgetStats()->get(); + EXPECT_EQ(Stats::Histogram::Unit::Unspecified, + tb_stats.upstream_rq_timeout_budget_percent_used_.unit()); EXPECT_EQ(Stats::Histogram::Unit::Unspecified, - cluster->info()->timeoutBudgetStats()->upstream_rq_timeout_budget_percent_used_.unit()); + tb_stats.upstream_rq_timeout_budget_per_try_percent_used_.unit()); } // Validates HTTP2 SETTINGS config. @@ -2410,7 +2712,7 @@ class TestNetworkFilterConfigFactory } Upstream::ProtocolOptionsConfigConstSharedPtr createProtocolOptionsConfig(const Protobuf::Message& msg, - ProtobufMessage::ValidationVisitor&) override { + Server::Configuration::ProtocolOptionsFactoryContext&) override { return parent_.createProtocolOptionsConfig(msg); } std::string name() const override { CONSTRUCT_ON_FIRST_USE(std::string, "envoy.test.filter"); } @@ -2445,7 +2747,7 @@ class TestHttpFilterConfigFactory : public Server::Configuration::NamedHttpFilte } Upstream::ProtocolOptionsConfigConstSharedPtr createProtocolOptionsConfig(const Protobuf::Message& msg, - ProtobufMessage::ValidationVisitor&) override { + Server::Configuration::ProtocolOptionsFactoryContext&) override { return parent_.createProtocolOptionsConfig(msg); } std::string name() const override { CONSTRUCT_ON_FIRST_USE(std::string, "envoy.test.filter"); } @@ -2468,22 +2770,32 @@ TEST_F(ClusterInfoImplTest, ExtensionProtocolOptionsForFilterWithoutOptions) { connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] - extension_protocol_options: - envoy.test.filter: { option: value } + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 + typed_extension_protocol_options: + envoy.test.filter: + "@type": type.googleapis.com/google.protobuf.Struct + value: + option: "value" )EOF"; { TestNetworkFilterConfigFactory factory(factoryBase); Registry::InjectFactory registry( factory); - EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml, false), EnvoyException, "filter envoy.test.filter does not support protocol options"); } { TestHttpFilterConfigFactory factory(factoryBase); Registry::InjectFactory registry(factory); - EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml, false), EnvoyException, "filter envoy.test.filter does not support protocol options"); } } @@ -2499,7 +2811,14 @@ TEST_F(ClusterInfoImplTest, TypedExtensionProtocolOptionsForFilterWithoutOptions connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 typed_extension_protocol_options: envoy.test.filter: { "@type": type.googleapis.com/google.protobuf.Struct } )EOF"; @@ -2537,9 +2856,19 @@ TEST_F(ClusterInfoImplTest, ExtensionProtocolOptionsForFilterWithOptions) { connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] - extension_protocol_options: - envoy.test.filter: { option: "value" } + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 + typed_extension_protocol_options: + envoy.test.filter: + "@type": type.googleapis.com/google.protobuf.Struct + value: + option: "value" )EOF"; const std::string typed_yaml = R"EOF( @@ -2547,7 +2876,14 @@ TEST_F(ClusterInfoImplTest, ExtensionProtocolOptionsForFilterWithOptions) { connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 typed_extension_protocol_options: envoy.test.filter: "@type": type.googleapis.com/google.protobuf.Struct diff --git a/test/common/upstream/utility.h b/test/common/upstream/utility.h index a181985b1c075..ef3dd210adf38 100644 --- a/test/common/upstream/utility.h +++ b/test/common/upstream/utility.h @@ -27,12 +27,21 @@ constexpr static const char* kDefaultStaticClusterTmpl = R"EOF( "connect_timeout": "0.250s", "type": "static", "lb_policy": "round_robin", - "hosts": [ + "load_assignment": { + "endpoints": [ { - %s, + "lb_endpoints": [ + { + "endpoint": { + "address": { + %s, } + } + } + ] } ] } + } )EOF"; inline std::string defaultStaticClusterJson(const std::string& name) { @@ -44,26 +53,28 @@ inline std::string defaultStaticClusterJson(const std::string& name) { } inline envoy::config::bootstrap::v3::Bootstrap -parseBootstrapFromV2Json(const std::string& json_string) { +parseBootstrapFromV3Json(const std::string& json_string, bool avoid_boosting = true) { envoy::config::bootstrap::v3::Bootstrap bootstrap; - TestUtility::loadFromJson(json_string, bootstrap, true); + TestUtility::loadFromJson(json_string, bootstrap, true, avoid_boosting); return bootstrap; } -inline envoy::config::cluster::v3::Cluster parseClusterFromV2Json(const std::string& json_string) { +inline envoy::config::cluster::v3::Cluster parseClusterFromV3Json(const std::string& json_string, + bool avoid_boosting = true) { envoy::config::cluster::v3::Cluster cluster; - TestUtility::loadFromJson(json_string, cluster, true); + TestUtility::loadFromJson(json_string, cluster, true, avoid_boosting); return cluster; } -inline envoy::config::cluster::v3::Cluster parseClusterFromV2Yaml(const std::string& yaml) { +inline envoy::config::cluster::v3::Cluster parseClusterFromV3Yaml(const std::string& yaml, + bool avoid_boosting = true) { envoy::config::cluster::v3::Cluster cluster; - TestUtility::loadFromYaml(yaml, cluster, true); + TestUtility::loadFromYaml(yaml, cluster, true, avoid_boosting); return cluster; } inline envoy::config::cluster::v3::Cluster defaultStaticCluster(const std::string& name) { - return parseClusterFromV2Json(defaultStaticClusterJson(name)); + return parseClusterFromV3Json(defaultStaticClusterJson(name)); } inline HostSharedPtr makeTestHost(ClusterInfoConstSharedPtr cluster, const std::string& hostname, @@ -76,12 +87,12 @@ inline HostSharedPtr makeTestHost(ClusterInfoConstSharedPtr cluster, const std:: } inline HostSharedPtr makeTestHost(ClusterInfoConstSharedPtr cluster, const std::string& url, - uint32_t weight = 1) { + uint32_t weight = 1, uint32_t priority = 0) { return HostSharedPtr{ new HostImpl(cluster, "", Network::Utility::resolveUrl(url), nullptr, weight, envoy::config::core::v3::Locality(), - envoy::config::endpoint::v3::Endpoint::HealthCheckConfig::default_instance(), 0, - envoy::config::core::v3::UNKNOWN)}; + envoy::config::endpoint::v3::Endpoint::HealthCheckConfig::default_instance(), + priority, envoy::config::core::v3::UNKNOWN)}; } inline HostSharedPtr makeTestHost(ClusterInfoConstSharedPtr cluster, const std::string& url, @@ -123,6 +134,14 @@ makeLocalityWeights(std::initializer_list locality_weights) { return std::make_shared(locality_weights); } +inline envoy::config::core::v3::HealthCheck +parseHealthCheckFromV3Yaml(const std::string& yaml_string, bool avoid_boosting = true) { + envoy::config::core::v3::HealthCheck health_check; + TestUtility::loadFromYamlAndValidate(yaml_string, health_check, false, avoid_boosting); + return health_check; +} + +// For DEPRECATED TEST CASES inline envoy::config::core::v3::HealthCheck parseHealthCheckFromV2Yaml(const std::string& yaml_string) { envoy::config::core::v3::HealthCheck health_check; diff --git a/test/config/BUILD b/test/config/BUILD index baf14f36e68fe..33746b1c0cec0 100644 --- a/test/config/BUILD +++ b/test/config/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_library( diff --git a/test/config/integration/BUILD b/test/config/integration/BUILD index 29fe25251783c..684312b35b20d 100644 --- a/test/config/integration/BUILD +++ b/test/config/integration/BUILD @@ -1,10 +1,10 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() exports_files([ diff --git a/test/config/integration/certs/BUILD b/test/config/integration/certs/BUILD index a4350864d8d3f..8e80a2f1d2f76 100644 --- a/test/config/integration/certs/BUILD +++ b/test/config/integration/certs/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() filegroup( diff --git a/test/config/integration/google_com_proxy_port_0.v2.yaml b/test/config/integration/google_com_proxy_port_0.v2.yaml index 47b1cfd0f6de1..c67b6845960d3 100644 --- a/test/config/integration/google_com_proxy_port_0.v2.yaml +++ b/test/config/integration/google_com_proxy_port_0.v2.yaml @@ -1,5 +1,5 @@ admin: - access_log_path: /dev/null + access_log_path: "{{ null_device_path }}" address: socket_address: address: "{{ ip_any_address }}" diff --git a/test/config/integration/server.yaml b/test/config/integration/server.yaml index 10e1175c17d9e..78d8f24fd8f5f 100644 --- a/test/config/integration/server.yaml +++ b/test/config/integration/server.yaml @@ -53,7 +53,7 @@ static_resources: - name: accesslog typed_config: "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog - path: /dev/null + path: {{ null_device_path }} filter: or_filter: filters: @@ -69,21 +69,6 @@ static_resources: value: default_value: 1000 runtime_key: access_log.access_error.duration - - address: - socket_address: - address: {{ ip_loopback_address }} - port_value: 0 - filter_chains: - - filters: - - name: redis - typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.redis_proxy.v2.RedisProxy - settings: - op_timeout: 0.4s - stat_prefix: redis - prefix_routes: - catch_all_route: - cluster: redis clusters: - name: cluster_1 connect_timeout: 5s @@ -181,7 +166,7 @@ layered_runtime: - name: admin admin_layer: {} admin: - access_log_path: "/dev/null" + access_log_path: "{{ null_device_path }}" profile_path: "{{ test_tmpdir }}/envoy.prof" address: socket_address: diff --git a/test/config/integration/server_unix_listener.yaml b/test/config/integration/server_unix_listener.yaml index b4f3d15becf0b..2c3328cd10268 100644 --- a/test/config/integration/server_unix_listener.yaml +++ b/test/config/integration/server_unix_listener.yaml @@ -39,7 +39,7 @@ static_resources: cluster_manager: {} watchdog: {} admin: - access_log_path: "/dev/null" + access_log_path: "{{ null_device_path }}" address: socket_address: address: "{{ ip_loopback_address }}" diff --git a/test/config/integration/server_xds.bootstrap.yaml b/test/config/integration/server_xds.bootstrap.yaml index 26eafc3e79d64..70c4302e37251 100644 --- a/test/config/integration/server_xds.bootstrap.yaml +++ b/test/config/integration/server_xds.bootstrap.yaml @@ -4,7 +4,7 @@ dynamic_resources: cds_config: path: {{ cds_json_path }} admin: - access_log_path: /dev/null + access_log_path: {{ null_device_path }} address: socket_address: address: {{ ntop_ip_loopback_address }} diff --git a/test/config/utility.cc b/test/config/utility.cc index 9f80e901662b4..598c9ade7ba8a 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -31,16 +31,16 @@ namespace Envoy { std::string ConfigHelper::baseConfig() { - return R"EOF( + return fmt::format(R"EOF( admin: - access_log_path: /dev/null + access_log_path: {} address: socket_address: address: 127.0.0.1 port_value: 0 dynamic_resources: lds_config: - path: /dev/null + path: {} static_resources: secrets: - name: "secret_static_0" @@ -68,13 +68,14 @@ std::string ConfigHelper::baseConfig() { socket_address: address: 127.0.0.1 port_value: 0 -)EOF"; +)EOF", + TestEnvironment::nullDevicePath(), TestEnvironment::nullDevicePath()); } std::string ConfigHelper::baseUdpListenerConfig() { - return R"EOF( + return fmt::format(R"EOF( admin: - access_log_path: /dev/null + access_log_path: {} address: socket_address: address: 127.0.0.1 @@ -98,7 +99,8 @@ std::string ConfigHelper::baseUdpListenerConfig() { address: 0.0.0.0 port_value: 0 protocol: udp -)EOF"; +)EOF", + TestEnvironment::nullDevicePath()); } std::string ConfigHelper::tcpProxyConfig() { @@ -121,23 +123,25 @@ name: "envoy.filters.listener.tls_inspector" } std::string ConfigHelper::httpProxyConfig() { - return absl::StrCat(baseConfig(), R"EOF( + return absl::StrCat(baseConfig(), fmt::format(R"EOF( filter_chains: filters: name: http typed_config: "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager stat_prefix: config_test + delayed_close_timeout: + nanos: 100 http_filters: name: envoy.filters.http.router codec_type: HTTP1 access_log: name: accesslog filter: - not_health_check_filter: {} + not_health_check_filter: {{}} typed_config: "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog - path: /dev/null + path: {} route_config: virtual_hosts: name: integration @@ -148,14 +152,15 @@ std::string ConfigHelper::httpProxyConfig() { prefix: "/" domains: "*" name: route_config_0 -)EOF"); +)EOF", + TestEnvironment::nullDevicePath())); } // TODO(danzh): For better compatibility with HTTP integration test framework, // it's better to combine with HTTP_PROXY_CONFIG, and use config modifiers to // specify quic specific things. std::string ConfigHelper::quicHttpProxyConfig() { - return absl::StrCat(baseUdpListenerConfig(), R"EOF( + return absl::StrCat(baseUdpListenerConfig(), fmt::format(R"EOF( filter_chains: transport_socket: name: envoy.transport_sockets.quic @@ -170,10 +175,10 @@ std::string ConfigHelper::quicHttpProxyConfig() { access_log: name: file_access_log filter: - not_health_check_filter: {} + not_health_check_filter: {{}} typed_config: "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog - path: /dev/null + path: {} route_config: virtual_hosts: name: integration @@ -186,7 +191,8 @@ std::string ConfigHelper::quicHttpProxyConfig() { name: route_config_0 udp_listener_config: udp_listener_name: "quiche_quic_listener" -)EOF"); +)EOF", + TestEnvironment::nullDevicePath())); } std::string ConfigHelper::defaultBufferFilter() { @@ -246,7 +252,7 @@ std::string ConfigHelper::discoveredClustersBootstrap(const std::string& api_typ return fmt::format( R"EOF( admin: - access_log_path: /dev/null + access_log_path: {} address: socket_address: address: 127.0.0.1 @@ -303,20 +309,23 @@ std::string ConfigHelper::discoveredClustersBootstrap(const std::string& api_typ prefix: "/cluster2" domains: "*" )EOF", - api_type); + TestEnvironment::nullDevicePath(), api_type); } // TODO(#6327) cleaner approach to testing with static config. -std::string ConfigHelper::adsBootstrap(const std::string& api_type) { - return fmt::format( - R"EOF( +std::string ConfigHelper::adsBootstrap(const std::string& api_type, + envoy::config::core::v3::ApiVersion api_version) { + return fmt::format(R"EOF( dynamic_resources: lds_config: + resource_api_version: {1} ads: {{}} cds_config: + resource_api_version: {1} ads: {{}} ads_config: - api_type: {} + transport_api_version: {1} + api_type: {0} static_resources: clusters: name: dummy_cluster @@ -335,17 +344,19 @@ std::string ConfigHelper::adsBootstrap(const std::string& api_type) { lb_policy: ROUND_ROBIN http2_protocol_options: {{}} admin: - access_log_path: /dev/null + access_log_path: {2} address: socket_address: address: 127.0.0.1 port_value: 0 )EOF", - api_type); + api_type, api_version == envoy::config::core::v3::ApiVersion::V2 ? "V2" : "V3", + TestEnvironment::nullDevicePath()); } -envoy::config::cluster::v3::Cluster ConfigHelper::buildCluster(const std::string& name, int port, - const std::string& ip_version) { +// TODO(samflattery): bundle this up with buildCluster +envoy::config::cluster::v3::Cluster +ConfigHelper::buildStaticCluster(const std::string& name, int port, const std::string& address) { return TestUtility::parseYaml(fmt::format(R"EOF( name: {} connect_timeout: 5s @@ -363,7 +374,139 @@ envoy::config::cluster::v3::Cluster ConfigHelper::buildCluster(const std::string http2_protocol_options: {{}} )EOF", name, name, - ip_version, port)); + address, port)); +} + +envoy::config::cluster::v3::Cluster +ConfigHelper::buildCluster(const std::string& name, const std::string& lb_policy, + envoy::config::core::v3::ApiVersion api_version) { + API_NO_BOOST(envoy::config::cluster::v3::Cluster) cluster; + TestUtility::loadFromYaml(fmt::format(R"EOF( + name: {} + connect_timeout: 5s + type: EDS + eds_cluster_config: + eds_config: + resource_api_version: {} + ads: {{}} + lb_policy: {} + http2_protocol_options: {{}} + )EOF", + name, apiVersionStr(api_version), lb_policy), + cluster, shouldBoost(api_version)); + return cluster; +} + +envoy::config::cluster::v3::Cluster +ConfigHelper::buildTlsCluster(const std::string& name, const std::string& lb_policy, + envoy::config::core::v3::ApiVersion api_version) { + API_NO_BOOST(envoy::config::cluster::v3::Cluster) cluster; + TestUtility::loadFromYaml( + fmt::format(R"EOF( + name: {} + connect_timeout: 5s + type: EDS + eds_cluster_config: + eds_config: + resource_api_version: {} + ads: {{}} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + validation_context: + trusted_ca: + filename: {} + lb_policy: {} + http2_protocol_options: {{}} + )EOF", + name, apiVersionStr(api_version), + TestEnvironment::runfilesPath("test/config/integration/certs/upstreamcacert.pem"), + lb_policy), + cluster, shouldBoost(api_version)); + return cluster; +} + +envoy::config::endpoint::v3::ClusterLoadAssignment +ConfigHelper::buildClusterLoadAssignment(const std::string& name, const std::string& address, + uint32_t port, + envoy::config::core::v3::ApiVersion api_version) { + API_NO_BOOST(envoy::config::endpoint::v3::ClusterLoadAssignment) cluster_load_assignment; + TestUtility::loadFromYaml(fmt::format(R"EOF( + cluster_name: {} + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: {} + port_value: {} + )EOF", + name, address, port), + cluster_load_assignment, shouldBoost(api_version)); + return cluster_load_assignment; +} + +envoy::config::listener::v3::Listener +ConfigHelper::buildBaseListener(const std::string& name, const std::string& address, + const std::string& filter_chains, + envoy::config::core::v3::ApiVersion api_version) { + API_NO_BOOST(envoy::config::listener::v3::Listener) listener; + TestUtility::loadFromYaml(fmt::format( + R"EOF( + name: {} + address: + socket_address: + address: {} + port_value: 0 + filter_chains: + {} + )EOF", + name, address, filter_chains), + listener, shouldBoost(api_version)); + return listener; +} + +envoy::config::listener::v3::Listener +ConfigHelper::buildListener(const std::string& name, const std::string& route_config, + const std::string& address, const std::string& stat_prefix, + envoy::config::core::v3::ApiVersion api_version) { + std::string hcm = fmt::format( + R"EOF( + filters: + - name: http + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + stat_prefix: {} + codec_type: HTTP2 + rds: + route_config_name: {} + config_source: + resource_api_version: {} + ads: {{}} + http_filters: [{{ name: envoy.filters.http.router }}] + )EOF", + stat_prefix, route_config, apiVersionStr(api_version)); + return buildBaseListener(name, address, hcm, api_version); +} + +envoy::config::route::v3::RouteConfiguration +ConfigHelper::buildRouteConfig(const std::string& name, const std::string& cluster, + envoy::config::core::v3::ApiVersion api_version) { + API_NO_BOOST(envoy::config::route::v3::RouteConfiguration) route; + TestUtility::loadFromYaml(fmt::format(R"EOF( + name: {} + virtual_hosts: + - name: integration + domains: ["*"] + routes: + - match: {{ prefix: "/" }} + route: {{ cluster: {} }} + )EOF", + name, cluster), + route, shouldBoost(api_version)); + return route; } envoy::config::endpoint::v3::Endpoint ConfigHelper::buildEndpoint(const std::string& address) { @@ -433,6 +576,26 @@ void ConfigHelper::addClusterFilterMetadata(absl::string_view metadata_yaml, } } +void ConfigHelper::setConnectConfig( + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm, + bool terminate_connect) { + auto* route_config = hcm.mutable_route_config(); + ASSERT_EQ(1, route_config->virtual_hosts_size()); + auto* route = route_config->mutable_virtual_hosts(0)->mutable_routes(0); + auto* match = route->mutable_match(); + match->Clear(); + match->mutable_connect_matcher(); + + if (terminate_connect) { + auto* upgrade = route->mutable_route()->add_upgrade_configs(); + upgrade->set_upgrade_type("CONNECT"); + upgrade->mutable_connect_config(); + } + + hcm.add_upgrade_configs()->set_upgrade_type("CONNECT"); + hcm.mutable_http2_protocol_options()->set_allow_connect(true); +} + void ConfigHelper::applyConfigModifiers() { for (const auto& config_modifier : config_modifiers_) { config_modifier(bootstrap_); @@ -454,6 +617,10 @@ void ConfigHelper::addRuntimeOverride(const std::string& key, const std::string& (*static_layer->mutable_fields())[std::string(key)] = ValueUtil::stringValue(std::string(value)); } +void ConfigHelper::setNewCodecs() { + addRuntimeOverride("envoy.reloadable_features.new_codec_behavior", "true"); +} + void ConfigHelper::finalize(const std::vector& ports) { RELEASE_ASSERT(!finalized_, ""); @@ -504,11 +671,16 @@ void ConfigHelper::finalize(const std::vector& ports) { for (int k = 0; k < locality_lb->lb_endpoints_size(); ++k) { auto lb_endpoint = locality_lb->mutable_lb_endpoints(k); if (lb_endpoint->endpoint().address().has_socket_address()) { - RELEASE_ASSERT(ports.size() > port_idx, ""); - lb_endpoint->mutable_endpoint() - ->mutable_address() - ->mutable_socket_address() - ->set_port_value(ports[port_idx++]); + if (lb_endpoint->endpoint().address().socket_address().port_value() == 0) { + RELEASE_ASSERT(ports.size() > port_idx, ""); + lb_endpoint->mutable_endpoint() + ->mutable_address() + ->mutable_socket_address() + ->set_port_value(ports[port_idx++]); + } else { + ENVOY_LOG_MISC(debug, "Not overriding preset port", + lb_endpoint->endpoint().address().socket_address().port_value()); + } } } } @@ -740,13 +912,13 @@ bool ConfigHelper::setAccessLog(const std::string& filename, absl::string_view f if (getFilterFromListener("http") == nullptr) { return false; } - // Replace /dev/null with a real path for the file access log. + // Replace null device with a real path for the file access log. envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager hcm_config; loadHttpConnectionManager(hcm_config); envoy::extensions::access_loggers::file::v3::FileAccessLog access_log_config; if (!format.empty()) { - access_log_config.set_format(std::string(format)); + access_log_config.mutable_log_format()->set_text_format(absl::StrCat(format, "\n")); } access_log_config.set_path(filename); hcm_config.mutable_access_log(0)->mutable_typed_config()->PackFrom(access_log_config); @@ -761,7 +933,7 @@ bool ConfigHelper::setListenerAccessLog(const std::string& filename, absl::strin } envoy::extensions::access_loggers::file::v3::FileAccessLog access_log_config; if (!format.empty()) { - access_log_config.set_format(std::string(format)); + access_log_config.mutable_log_format()->set_text_format(std::string(format)); } access_log_config.set_path(filename); bootstrap_.mutable_static_resources() @@ -775,8 +947,8 @@ bool ConfigHelper::setListenerAccessLog(const std::string& filename, absl::strin void ConfigHelper::initializeTls( const ServerSslOptions& options, envoy::extensions::transport_sockets::tls::v3::CommonTlsContext& common_tls_context) { - common_tls_context.add_alpn_protocols("h2"); - common_tls_context.add_alpn_protocols("http/1.1"); + common_tls_context.add_alpn_protocols(Http::Utility::AlpnNames::get().Http2); + common_tls_context.add_alpn_protocols(Http::Utility::AlpnNames::get().Http11); auto* validation_context = common_tls_context.mutable_validation_context(); validation_context->mutable_trusted_ca()->set_filename( @@ -925,6 +1097,16 @@ void ConfigHelper::setOutboundFramesLimits(uint32_t max_all_frames, uint32_t max } } +void ConfigHelper::setLocalReply( + const envoy::extensions::filters::network::http_connection_manager::v3::LocalReplyConfig& + config) { + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager + hcm_config; + loadHttpConnectionManager(hcm_config); + hcm_config.mutable_local_reply_config()->MergeFrom(config); + storeHttpConnectionManager(hcm_config); +} + CdsHelper::CdsHelper() : cds_path_(TestEnvironment::writeStringToFileForTest("cds.pb_text", "")) {} void CdsHelper::setCds(const std::vector& clusters) { @@ -967,6 +1149,8 @@ void EdsHelper::setEds(const std::vector& cluster_load_assignments, IntegrationTestServerStats& server_stats) { + // Make sure the last version has been accepted before setting a new one. + server_stats.waitForCounterGe("cluster.cluster_0.update_success", update_successes_); setEds(cluster_load_assignments); // Make sure Envoy has consumed the update now that it is running. ++update_successes_; diff --git a/test/config/utility.h b/test/config/utility.h index 77f0553a7bbfc..ff338722630ec 100644 --- a/test/config/utility.h +++ b/test/config/utility.h @@ -16,6 +16,7 @@ #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" #include "envoy/http/codes.h" +#include "common/config/api_version.h" #include "common/network/address_impl.h" #include "common/protobuf/protobuf.h" @@ -27,6 +28,8 @@ namespace Envoy { class ConfigHelper { public: + using HttpConnectionManager = + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager; struct ServerSslOptions { ServerSslOptions& setRsaCert(bool rsa_cert) { rsa_cert_ = rsa_cert; @@ -67,8 +70,7 @@ class ConfigHelper { envoy::extensions::transport_sockets::tls::v3::CommonTlsContext& common_context); using ConfigModifierFunction = std::function; - using HttpModifierFunction = std::function; + using HttpModifierFunction = std::function; // A basic configuration (admin port, cluster_0, one listener) with no network filters. static std::string baseConfig(); @@ -97,10 +99,37 @@ class ConfigHelper { // Configuration for L7 proxying, with clusters cluster_1 and cluster_2 meant to be added via CDS. // api_type should be REST, GRPC, or DELTA_GRPC. static std::string discoveredClustersBootstrap(const std::string& api_type); - static std::string adsBootstrap(const std::string& api_type); + static std::string adsBootstrap(const std::string& api_type, + envoy::config::core::v3::ApiVersion api_version); // Builds a standard Cluster config fragment, with a single endpoint (at address:port). - static envoy::config::cluster::v3::Cluster buildCluster(const std::string& name, int port, - const std::string& address); + static envoy::config::cluster::v3::Cluster buildStaticCluster(const std::string& name, int port, + const std::string& address); + + // ADS configurations + static envoy::config::cluster::v3::Cluster buildCluster( + const std::string& name, const std::string& lb_policy = "ROUND_ROBIN", + envoy::config::core::v3::ApiVersion api_version = envoy::config::core::v3::ApiVersion::V3); + + static envoy::config::cluster::v3::Cluster buildTlsCluster( + const std::string& name, const std::string& lb_policy = "ROUND_ROBIN", + envoy::config::core::v3::ApiVersion api_version = envoy::config::core::v3::ApiVersion::V3); + + static envoy::config::endpoint::v3::ClusterLoadAssignment buildClusterLoadAssignment( + const std::string& name, const std::string& ip_version, uint32_t port, + envoy::config::core::v3::ApiVersion api_version = envoy::config::core::v3::ApiVersion::V3); + + static envoy::config::listener::v3::Listener buildBaseListener( + const std::string& name, const std::string& address, const std::string& filter_chains = "", + envoy::config::core::v3::ApiVersion api_version = envoy::config::core::v3::ApiVersion::V3); + + static envoy::config::listener::v3::Listener buildListener( + const std::string& name, const std::string& route_config, const std::string& address, + const std::string& stat_prefix, + envoy::config::core::v3::ApiVersion api_version = envoy::config::core::v3::ApiVersion::V3); + + static envoy::config::route::v3::RouteConfiguration buildRouteConfig( + const std::string& name, const std::string& cluster, + envoy::config::core::v3::ApiVersion api_version = envoy::config::core::v3::ApiVersion::V3); // Builds a standard Endpoint suitable for population by finalize(). static envoy::config::endpoint::v3::Endpoint buildEndpoint(const std::string& address); @@ -155,7 +184,7 @@ class ConfigHelper { void addSslConfig() { addSslConfig({}); } // Set the HTTP access log for the first HCM (if present) to a given file. The default is - // /dev/null. + // the platform's null device. bool setAccessLog(const std::string& filename, absl::string_view format = ""); // Set the listener access log for the first listener to a given file. @@ -196,15 +225,31 @@ class ConfigHelper { void addClusterFilterMetadata(absl::string_view metadata_yaml, absl::string_view cluster_name = "cluster_0"); + // Given an HCM with the default config, set the matcher to be a connect matcher and enable + // CONNECT requests. + static void setConnectConfig(HttpConnectionManager& hcm, bool terminate_connect); + + void setLocalReply( + const envoy::extensions::filters::network::http_connection_manager::v3::LocalReplyConfig& + config); + + // Set new codecs to use for upstream and downstream codecs. + void setNewCodecs(); + private: + static bool shouldBoost(envoy::config::core::v3::ApiVersion api_version) { + return api_version == envoy::config::core::v3::ApiVersion::V2; + } + + static std::string apiVersionStr(envoy::config::core::v3::ApiVersion api_version) { + return api_version == envoy::config::core::v3::ApiVersion::V2 ? "V2" : "V3"; + } + // Load the first HCM struct from the first listener into a parsed proto. - bool loadHttpConnectionManager( - envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm); + bool loadHttpConnectionManager(HttpConnectionManager& hcm); // Take the contents of the provided HCM proto and stuff them into the first HCM // struct of the first listener. - void storeHttpConnectionManager( - const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm); + void storeHttpConnectionManager(const HttpConnectionManager& hcm); // Finds the filter named 'name' from the first filter chain from the first listener. envoy::config::listener::v3::Filter* getFilterFromListener(const std::string& name); diff --git a/test/config_test/BUILD b/test/config_test/BUILD index a00de6bb96c41..304e92005a497 100644 --- a/test/config_test/BUILD +++ b/test/config_test/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -9,6 +7,8 @@ load( load("//source/extensions:all_extensions.bzl", "envoy_all_extensions") load("//bazel:repositories.bzl", "PPC_SKIP_TARGETS", "WINDOWS_SKIP_TARGETS") +licenses(["notice"]) # Apache 2 + envoy_package() exports_files(["example_configs_test_setup.sh"]) @@ -22,6 +22,7 @@ envoy_cc_test( "example_configs_test_setup.sh", "//configs:example_configs", ], + tags = ["fails_on_windows"], deps = [ ":config_test_lib", "//test/test_common:environment_lib", @@ -39,7 +40,10 @@ envoy_cc_test_library( "//source/server:configuration_lib", "//source/server/config_validation:server_lib", "//test/integration:integration_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/server:worker_factory_mocks", + "//test/mocks/server:listener_component_factory_mocks", + "//test/mocks/server:worker_mocks", "//test/mocks/ssl:ssl_mocks", "//test/test_common:threadsafe_singleton_injector_lib", "//test/test_common:simulated_time_system_lib", @@ -49,3 +53,19 @@ envoy_cc_test_library( "//conditions:default": envoy_all_extensions(), }), ) + +envoy_cc_test( + name = "deprecated_configs_test", + srcs = [ + "deprecated_configs_test.cc", + ], + deps = [ + ":config_test_lib", + "//source/common/config:api_version_lib", + "//test/test_common:environment_lib", + "//test/test_common:logging_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/bootstrap/v2:pkg_cc_proto", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", + ], +) diff --git a/test/config_test/config_test.cc b/test/config_test/config_test.cc index 71f30f2eb11a2..47f7e88037ba5 100644 --- a/test/config_test/config_test.cc +++ b/test/config_test/config_test.cc @@ -15,7 +15,10 @@ #include "server/options_impl.h" #include "test/integration/server.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" +#include "test/mocks/server/listener_component_factory.h" +#include "test/mocks/server/worker.h" +#include "test/mocks/server/worker_factory.h" #include "test/mocks/ssl/mocks.h" #include "test/test_common/simulated_time_system.h" #include "test/test_common/threadsafe_singleton_injector.h" @@ -76,6 +79,9 @@ class ConfigTest { ScopedRuntimeInjector scoped_runtime(server_.runtime()); ON_CALL(server_.runtime_loader_.snapshot_, deprecatedFeatureEnabled(_, _)) .WillByDefault(Invoke([](absl::string_view, bool default_value) { return default_value; })); + ON_CALL(server_.runtime_loader_, threadsafeSnapshot()).WillByDefault(Invoke([this]() { + return snapshot_; + })); envoy::config::bootstrap::v3::Bootstrap bootstrap; Server::InstanceUtil::loadBootstrapConfig( @@ -143,7 +149,8 @@ class ConfigTest { NiceMock worker_factory_; Server::ListenerManagerImpl listener_manager_{server_, component_factory_, worker_factory_, false}; - Runtime::RandomGeneratorImpl random_; + Random::RandomGeneratorImpl random_; + Runtime::SnapshotConstSharedPtr snapshot_{std::make_shared>()}; NiceMock os_sys_calls_; TestThreadsafeSingletonInjector os_calls{&os_sys_calls_}; NiceMock file_system_; @@ -179,5 +186,32 @@ uint32_t run(const std::string& directory) { return num_tested; } +void loadVersionedBootstrapFile(const std::string& filename, + envoy::config::bootstrap::v3::Bootstrap& bootstrap_message, + absl::optional bootstrap_version) { + Api::ApiPtr api = Api::createApiForTest(); + OptionsImpl options( + Envoy::Server::createTestOptionsImpl(filename, "", Network::Address::IpVersion::v6)); + // Avoid contention issues with other tests over the hot restart domain socket. + options.setHotRestartDisabled(true); + if (bootstrap_version.has_value()) { + options.setBootstrapVersion(*bootstrap_version); + } + Server::InstanceUtil::loadBootstrapConfig(bootstrap_message, options, + ProtobufMessage::getStrictValidationVisitor(), *api); +} + +void loadBootstrapConfigProto(const envoy::config::bootstrap::v3::Bootstrap& in_proto, + envoy::config::bootstrap::v3::Bootstrap& bootstrap_message) { + Api::ApiPtr api = Api::createApiForTest(); + OptionsImpl options( + Envoy::Server::createTestOptionsImpl("", "", Network::Address::IpVersion::v6)); + options.setConfigProto(in_proto); + // Avoid contention issues with other tests over the hot restart domain socket. + options.setHotRestartDisabled(true); + Server::InstanceUtil::loadBootstrapConfig(bootstrap_message, options, + ProtobufMessage::getStrictValidationVisitor(), *api); +} + } // namespace ConfigTest } // namespace Envoy diff --git a/test/config_test/config_test.h b/test/config_test/config_test.h index fafa0cd1fd101..551fffadce33e 100644 --- a/test/config_test/config_test.h +++ b/test/config_test/config_test.h @@ -3,6 +3,10 @@ #include #include +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" + +#include "absl/types/optional.h" + namespace Envoy { namespace ConfigTest { @@ -18,5 +22,20 @@ uint32_t run(const std::string& path); */ void testMerge(); +/** + * Loads the given bootstrap file with an optional bootstrap_version into the + * given bootstrap protobuf message using the server's loadBootstrapConfig. + */ +void loadVersionedBootstrapFile(const std::string& filename, + envoy::config::bootstrap::v3::Bootstrap& bootstrap_message, + absl::optional bootstrap_version = absl::nullopt); + +/** + * Loads the given bootstrap proto into the given bootstrap protobuf message + * using the server's loadBootstrapConfig. + */ +void loadBootstrapConfigProto(const envoy::config::bootstrap::v3::Bootstrap& in_proto, + envoy::config::bootstrap::v3::Bootstrap& bootstrap_message); + } // namespace ConfigTest } // namespace Envoy diff --git a/test/config_test/deprecated_configs_test.cc b/test/config_test/deprecated_configs_test.cc new file mode 100644 index 0000000000000..dbb10707912eb --- /dev/null +++ b/test/config_test/deprecated_configs_test.cc @@ -0,0 +1,239 @@ +#include "envoy/config/bootstrap/v2/bootstrap.pb.h" +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" + +#include "common/config/api_version.h" + +#include "test/config_test/config_test.h" +#include "test/test_common/environment.h" +#include "test/test_common/logging.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +using testing::HasSubstr; +using testing::StartsWith; + +namespace Envoy { + +// A deprecated field can be used in previous version text proto and upgraded. +TEST(DeprecatedConfigsTest, DEPRECATED_FEATURE_TEST(LoadV2BootstrapTextProtoDeprecatedField)) { + API_NO_BOOST(envoy::config::bootstrap::v2::Bootstrap) + bootstrap = TestUtility::parseYaml(R"EOF( + node: + build_version: foo + )EOF"); + + std::string bootstrap_text; + ASSERT_TRUE(Protobuf::TextFormat::PrintToString(bootstrap, &bootstrap_text)); + const std::string filename = + TestEnvironment::writeStringToFileForTest("proto.pb_text", bootstrap_text); + + // Loading as previous version should work (after upgrade) + API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_v2_from_file; + EXPECT_LOG_CONTAINS("warning", "Using deprecated option 'envoy.api.v2.core.Node.build_version'", + ConfigTest::loadVersionedBootstrapFile(filename, proto_v2_from_file, 2)); + EXPECT_EQ("foo", proto_v2_from_file.node().hidden_envoy_deprecated_build_version()); + + // Loading as current version should fail + API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_v3_from_file; + EXPECT_THAT_THROWS_MESSAGE( + ConfigTest::loadVersionedBootstrapFile(filename, proto_v3_from_file, 3), EnvoyException, + AllOf(StartsWith("Unable to parse file"), + HasSubstr("as a text protobuf (type envoy.config.bootstrap.v3.Bootstrap)"))); + + API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) + bootstrap_v3 = TestUtility::parseYaml(R"EOF( + node: + hidden_envoy_deprecated_build_version: foo + )EOF"); + + std::string bootstrap_text_v3; + ASSERT_TRUE(Protobuf::TextFormat::PrintToString(bootstrap_v3, &bootstrap_text_v3)); + const std::string filename_v3 = + TestEnvironment::writeStringToFileForTest("proto_v3.pb_text", bootstrap_text_v3); + + // Loading v3 with hidden-deprecated field as current version should fail + EXPECT_THAT_THROWS_MESSAGE( + ConfigTest::loadVersionedBootstrapFile(filename_v3, proto_v3_from_file, 3), EnvoyException, + HasSubstr("Illegal use of hidden_envoy_deprecated_ V2 field " + "'envoy.config.core.v3.Node.hidden_envoy_deprecated_build_version'")); + + // Loading v3 with hidden-deprecated field with boosting should fail as it + // doesn't appear in v2 and only in v3 but marked as hidden_envoy_deprecated + EXPECT_THAT_THROWS_MESSAGE( + ConfigTest::loadVersionedBootstrapFile(filename_v3, proto_v3_from_file), EnvoyException, + HasSubstr("Illegal use of hidden_envoy_deprecated_ V2 field " + "'envoy.config.core.v3.Node.hidden_envoy_deprecated_build_version'")); +} + +// A deprecated field can be used in previous version binary proto and upgraded. +TEST(DeprecatedConfigsTest, DEPRECATED_FEATURE_TEST(LoadV2BootstrapBinaryProtoDeprecatedField)) { + API_NO_BOOST(envoy::config::bootstrap::v2::Bootstrap) + bootstrap = TestUtility::parseYaml(R"EOF( + node: + build_version: foo + )EOF"); + + std::string bootstrap_binary_str; + bootstrap_binary_str.reserve(bootstrap.ByteSizeLong()); + bootstrap.SerializeToString(&bootstrap_binary_str); + const std::string filename = + TestEnvironment::writeStringToFileForTest("proto.pb", bootstrap_binary_str); + + // Loading as previous version should work (after upgrade) + API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_v2_from_file; + EXPECT_LOG_CONTAINS("warning", "Using deprecated option 'envoy.api.v2.core.Node.build_version'", + ConfigTest::loadVersionedBootstrapFile(filename, proto_v2_from_file, 2)); + EXPECT_EQ("foo", proto_v2_from_file.node().hidden_envoy_deprecated_build_version()); + + // Loading as current version should fail + API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_v3_from_file; + EXPECT_THAT_THROWS_MESSAGE( + ConfigTest::loadVersionedBootstrapFile(filename, proto_v3_from_file, 3), EnvoyException, + HasSubstr("Illegal use of hidden_envoy_deprecated_ V2 field " + "'envoy.config.core.v3.Node.hidden_envoy_deprecated_build_version'")); + + API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) + bootstrap_v3 = TestUtility::parseYaml(R"EOF( + node: + hidden_envoy_deprecated_build_version: foo + )EOF"); + + std::string bootstrap_binary_str_v3; + bootstrap_binary_str_v3.reserve(bootstrap.ByteSizeLong()); + bootstrap.SerializeToString(&bootstrap_binary_str_v3); + const std::string filename_v3 = + TestEnvironment::writeStringToFileForTest("proto_v3.pb", bootstrap_binary_str_v3); + + // Loading v3 with hidden-deprecated field as current version should fail + EXPECT_THAT_THROWS_MESSAGE( + ConfigTest::loadVersionedBootstrapFile(filename_v3, proto_v3_from_file, 3), EnvoyException, + HasSubstr("Illegal use of hidden_envoy_deprecated_ V2 field " + "'envoy.config.core.v3.Node.hidden_envoy_deprecated_build_version'")); + + // Loading binary proto v3 with hidden-deprecated field with boosting will + // succeed as it cannot differentiate between v2 with the deprecated field and + // v3 with hidden_envoy_deprecated field + ConfigTest::loadVersionedBootstrapFile(filename_v3, proto_v3_from_file); + EXPECT_EQ("foo", proto_v3_from_file.node().hidden_envoy_deprecated_build_version()); +} + +// A deprecated field can be used in previous version yaml and upgraded. +TEST(DeprecatedConfigsTest, DEPRECATED_FEATURE_TEST(LoadV2BootstrapYamlDeprecatedField)) { + API_NO_BOOST(envoy::config::bootstrap::v2::Bootstrap) + bootstrap = TestUtility::parseYaml(R"EOF( + node: + build_version: foo + )EOF"); + + EXPECT_EQ("node:\n build_version: foo", + MessageUtil::getYamlStringFromMessage(bootstrap, true, false)); + const std::string filename = TestEnvironment::writeStringToFileForTest( + "proto.yaml", MessageUtil::getYamlStringFromMessage(bootstrap, false, false)); + + // Loading as previous version should work (after upgrade) + API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_v2_from_file; + EXPECT_LOG_CONTAINS("warning", "Using deprecated option 'envoy.api.v2.core.Node.build_version'", + ConfigTest::loadVersionedBootstrapFile(filename, proto_v2_from_file, 2)); + EXPECT_EQ("foo", proto_v2_from_file.node().hidden_envoy_deprecated_build_version()); + + // Loading as current version should fail + API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_v3_from_file; + EXPECT_THAT_THROWS_MESSAGE( + ConfigTest::loadVersionedBootstrapFile(filename, proto_v3_from_file, 3), EnvoyException, + AllOf(HasSubstr("type envoy.config.bootstrap.v3.Bootstrap"), + HasSubstr("build_version: Cannot find field"))); + + API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) + bootstrap_v3 = TestUtility::parseYaml(R"EOF( + node: + hidden_envoy_deprecated_build_version: foo + )EOF"); + + EXPECT_EQ("node:\n hidden_envoy_deprecated_build_version: foo", + MessageUtil::getYamlStringFromMessage(bootstrap_v3, true, false)); + const std::string filename_v3 = TestEnvironment::writeStringToFileForTest( + "proto_v3.yaml", MessageUtil::getYamlStringFromMessage(bootstrap_v3, false, false)); + + // Loading v3 with hidden-deprecated field as current version should fail + EXPECT_THAT_THROWS_MESSAGE( + ConfigTest::loadVersionedBootstrapFile(filename_v3, proto_v3_from_file, 3), EnvoyException, + HasSubstr("Illegal use of hidden_envoy_deprecated_ V2 field " + "'envoy.config.core.v3.Node.hidden_envoy_deprecated_build_version'")); + + // Loading v3 with hidden-deprecated field with boosting should fail as the name + // doesn't appear in v2 and only in v3 but marked as hidden_envoy_deprecated + EXPECT_THAT_THROWS_MESSAGE( + ConfigTest::loadVersionedBootstrapFile(filename_v3, proto_v3_from_file), EnvoyException, + HasSubstr("Illegal use of hidden_envoy_deprecated_ V2 field " + "'envoy.config.core.v3.Node.hidden_envoy_deprecated_build_version'")); +} + +// A deprecated field can be used in previous version json and upgraded. +TEST(DeprecatedConfigsTest, DEPRECATED_FEATURE_TEST(LoadV2BootstrapJsonDeprecatedField)) { + API_NO_BOOST(envoy::config::bootstrap::v2::Bootstrap) + bootstrap = TestUtility::parseYaml(R"EOF( + node: + build_version: foo + )EOF"); + + EXPECT_EQ("{\"node\":{\"build_version\":\"foo\"}}", + MessageUtil::getJsonStringFromMessage(bootstrap, false, false)); + const std::string filename = TestEnvironment::writeStringToFileForTest( + "proto.json", MessageUtil::getJsonStringFromMessage(bootstrap, false, false)); + + // Loading as previous version should work (after upgrade) + API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_v2_from_file; + EXPECT_LOG_CONTAINS("warning", "Using deprecated option 'envoy.api.v2.core.Node.build_version'", + ConfigTest::loadVersionedBootstrapFile(filename, proto_v2_from_file, 2)); + EXPECT_EQ("foo", proto_v2_from_file.node().hidden_envoy_deprecated_build_version()); + + // Loading as current version should fail + API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_v3_from_file; + EXPECT_THROW_WITH_MESSAGE( + ConfigTest::loadVersionedBootstrapFile(filename, proto_v3_from_file, 3), EnvoyException, + "Protobuf message (type envoy.config.bootstrap.v3.Bootstrap reason INVALID_ARGUMENT:(node) " + "build_version: Cannot find field.) has unknown fields"); + + API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) + bootstrap_v3 = TestUtility::parseYaml(R"EOF( + node: + hidden_envoy_deprecated_build_version: foo + )EOF"); + + EXPECT_EQ("{\"node\":{\"hidden_envoy_deprecated_build_version\":\"foo\"}}", + MessageUtil::getJsonStringFromMessage(bootstrap_v3, false, false)); + const std::string filename_v3 = TestEnvironment::writeStringToFileForTest( + "proto_v3.json", MessageUtil::getYamlStringFromMessage(bootstrap_v3, false, false)); + + // Loading v3 with hidden-deprecated field as current version should fail + EXPECT_THAT_THROWS_MESSAGE( + ConfigTest::loadVersionedBootstrapFile(filename_v3, proto_v3_from_file, 3), EnvoyException, + AllOf(StartsWith("Unable to parse JSON as proto"), + HasSubstr("hidden_envoy_deprecated_build_version: foo"))); + + // Loading v3 with hidden-deprecated field with boosting should fail as the name + // doesn't appear in v2 and only in v3 but marked as hidden_envoy_deprecated + EXPECT_THAT_THROWS_MESSAGE( + ConfigTest::loadVersionedBootstrapFile(filename_v3, proto_v3_from_file), EnvoyException, + AllOf(StartsWith("Unable to parse JSON as proto"), + HasSubstr("hidden_envoy_deprecated_build_version: foo"))); +} + +// Test the config_proto option when loading from bootstrap +TEST(DeprecatedConfigsTest, DEPRECATED_FEATURE_TEST(LoadV2BootstrapConfigProtoDeprecatedField)) { + API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) + in_bootstrap_v3 = TestUtility::parseYaml(R"EOF( + node: + hidden_envoy_deprecated_build_version: foo + )EOF"); + + // Loading v3 with hidden-deprecated field as current version should fail + API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_v3_from_file; + EXPECT_THAT_THROWS_MESSAGE( + ConfigTest::loadBootstrapConfigProto(in_bootstrap_v3, proto_v3_from_file), EnvoyException, + HasSubstr("Illegal use of hidden_envoy_deprecated_ V2 field " + "'envoy.config.core.v3.Node.hidden_envoy_deprecated_build_version'")); +} + +} // namespace Envoy diff --git a/test/config_test/example_configs_test.cc b/test/config_test/example_configs_test.cc index e6255dcee21ad..788b04f293c84 100644 --- a/test/config_test/example_configs_test.cc +++ b/test/config_test/example_configs_test.cc @@ -21,9 +21,9 @@ TEST(ExampleConfigsTest, All) { #ifdef __APPLE__ // freebind/freebind.yaml is not supported on macOS and disabled via Bazel. - EXPECT_EQ(21UL, ConfigTest::run(directory)); + EXPECT_EQ(35UL, ConfigTest::run(directory)); #else - EXPECT_EQ(22UL, ConfigTest::run(directory)); + EXPECT_EQ(36UL, ConfigTest::run(directory)); #endif ConfigTest::testMerge(); diff --git a/test/coverage/gen_build.sh b/test/coverage/gen_build.sh deleted file mode 100755 index e262c4e99bdc0..0000000000000 --- a/test/coverage/gen_build.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/bin/bash - -# Generate test/coverage/BUILD, which contains a single envoy_cc_test target -# that contains all C++ based tests suitable for performing the coverage run. A -# single binary (as opposed to multiple test targets) is require to work around -# the crazy in https://github.com/bazelbuild/bazel/issues/1118. This is used by -# the coverage runner script. - -set -e - -[ -z "${BAZEL_BIN}" ] && BAZEL_BIN=bazel -[ -z "${BUILDIFIER_BIN}" ] && BUILDIFIER_BIN=buildifier - -# Path to the generated BUILD file for the coverage target. -[ -z "${BUILD_PATH}" ] && BUILD_PATH="$(dirname "$0")"/BUILD - -# Extra repository information to include when generating coverage targets. This is useful for -# consuming projects. E.g., "@envoy". -[ -z "${REPOSITORY}" ] && REPOSITORY="" - -# This is an extra bazel path to query for additional targets. This is useful for consuming projects -# that want to run coverage over the public envoy code as well as private extensions. -# E.g., "//envoy-lyft/test/..." -[ -z "${EXTRA_QUERY_PATHS}" ] && EXTRA_QUERY_PATHS="" - -rm -f "${BUILD_PATH}" - -if [[ $# -gt 0 ]]; then - COVERAGE_TARGETS=$* -else - COVERAGE_TARGETS=//test/... -fi - -# This setting allows consuming projects to only run coverage over private extensions. -if [[ -z "${ONLY_EXTRA_QUERY_PATHS}" ]]; then - for target in ${COVERAGE_TARGETS}; do - TARGETS="$TARGETS $("${BAZEL_BIN}" query ${BAZEL_QUERY_OPTIONS} "attr('tags', 'coverage_test_lib', ${REPOSITORY}${target})" | grep "^//")" - done - - # Run the QUICHE platform api tests for coverage. - if [[ "${COVERAGE_TARGETS}" == "//test/..." ]]; then - TARGETS="$TARGETS $("${BAZEL_BIN}" query ${BAZEL_QUERY_OPTIONS} "attr('tags', 'coverage_test_lib', '@com_googlesource_quiche//:all')" | grep "^@com_googlesource_quiche")" - fi -fi - -if [ -n "${EXTRA_QUERY_PATHS}" ]; then - TARGETS="$TARGETS $("${BAZEL_BIN}" query ${BAZEL_QUERY_OPTIONS} "attr('tags', 'coverage_test_lib', ${EXTRA_QUERY_PATHS})" | grep "^//")" -fi - -( - cat << EOF -# This file is generated by test/coverage/gen_build.sh automatically prior to -# coverage runs. It is under .gitignore. DO NOT EDIT, DO NOT CHECK IN. -load( - "${REPOSITORY}//bazel:envoy_build_system.bzl", - "envoy_cc_test", - "envoy_package", -) - -envoy_package() - -envoy_cc_test( - name = "coverage_tests", - repository = "${REPOSITORY}", - deps = [ -EOF - for t in ${TARGETS} - do - echo " \"$t\"," - done - cat << EOF - ], - # no-remote due to https://github.com/bazelbuild/bazel/issues/4685 - tags = ["manual", "no-remote"], - coverage = False, - # Due to the nature of coverage_tests, the shard of coverage_tests are very uneven, some of - # shard can take 100s and some takes only 10s, so we use the maximum sharding to here to let - # Bazel scheduling them across CPU cores. - # Sharding can be disabled by --test_sharding_strategy=disabled. - shard_count = 50, -) -EOF - -) > "${BUILD_PATH}" - -echo "Generated coverage BUILD file at: ${BUILD_PATH}" -"${BUILDIFIER_BIN}" "${BUILD_PATH}" diff --git a/test/dependencies/BUILD b/test/dependencies/BUILD index 2e6ae296b760f..1ef365b90fc2b 100644 --- a/test/dependencies/BUILD +++ b/test/dependencies/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/dependencies/curl_test.cc b/test/dependencies/curl_test.cc index 6e11ee6dc7dec..859e68c90d9cd 100644 --- a/test/dependencies/curl_test.cc +++ b/test/dependencies/curl_test.cc @@ -9,28 +9,32 @@ TEST(CurlTest, BuiltWithExpectedFeatures) { // https://curl.haxx.se/libcurl/c/curl_version_info.html. curl_version_info_data* info = curl_version_info(CURLVERSION_NOW); - EXPECT_NE(0, info->features & CURL_VERSION_ASYNCHDNS); - EXPECT_NE(0, info->ares_num); - EXPECT_NE(0, info->features & CURL_VERSION_HTTP2); - EXPECT_NE(0, info->features & CURL_VERSION_LIBZ); + // In sequence as declared in curl.h. Overlook any toggle of the + // developer or os elections for DEBUG, CURL DEBUG and LARGE FILE EXPECT_NE(0, info->features & CURL_VERSION_IPV6); - -#ifndef WIN32 - EXPECT_NE(0, info->features & CURL_VERSION_UNIX_SOCKETS); -#else - EXPECT_EQ(0, info->features & CURL_VERSION_UNIX_SOCKETS); -#endif - - EXPECT_EQ(0, info->features & CURL_VERSION_BROTLI); - EXPECT_EQ(0, info->features & CURL_VERSION_GSSAPI); - EXPECT_EQ(0, info->features & CURL_VERSION_GSSNEGOTIATE); EXPECT_EQ(0, info->features & CURL_VERSION_KERBEROS4); - EXPECT_EQ(0, info->features & CURL_VERSION_KERBEROS5); + EXPECT_EQ(0, info->features & CURL_VERSION_SSL); + EXPECT_NE(0, info->features & CURL_VERSION_LIBZ); EXPECT_EQ(0, info->features & CURL_VERSION_NTLM); - EXPECT_EQ(0, info->features & CURL_VERSION_NTLM_WB); + EXPECT_EQ(0, info->features & CURL_VERSION_GSSNEGOTIATE); + EXPECT_NE(0, info->features & CURL_VERSION_ASYNCHDNS); EXPECT_EQ(0, info->features & CURL_VERSION_SPNEGO); - EXPECT_EQ(0, info->features & CURL_VERSION_SSL); + EXPECT_EQ(0, info->features & CURL_VERSION_IDN); EXPECT_EQ(0, info->features & CURL_VERSION_SSPI); + EXPECT_EQ(0, info->features & CURL_VERSION_CONV); + EXPECT_EQ(0, info->features & CURL_VERSION_TLSAUTH_SRP); + EXPECT_EQ(0, info->features & CURL_VERSION_NTLM_WB); + EXPECT_NE(0, info->features & CURL_VERSION_HTTP2); + EXPECT_EQ(0, info->features & CURL_VERSION_GSSAPI); + EXPECT_EQ(0, info->features & CURL_VERSION_KERBEROS5); + EXPECT_NE(0, info->features & CURL_VERSION_UNIX_SOCKETS); + EXPECT_EQ(0, info->features & CURL_VERSION_PSL); + EXPECT_EQ(0, info->features & CURL_VERSION_HTTPS_PROXY); + EXPECT_EQ(0, info->features & CURL_VERSION_MULTI_SSL); + EXPECT_EQ(0, info->features & CURL_VERSION_BROTLI); + EXPECT_EQ(0, info->features & CURL_VERSION_ALTSVC); + EXPECT_EQ(0, info->features & CURL_VERSION_HTTP3); + EXPECT_NE(0, info->ares_num); } } // namespace Dependencies diff --git a/test/exe/BUILD b/test/exe/BUILD index 7540aafa7525f..283086ab4799b 100644 --- a/test/exe/BUILD +++ b/test/exe/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -7,6 +5,8 @@ load( "envoy_sh_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_sh_test( @@ -17,8 +17,9 @@ envoy_sh_test( "//bazel:raw_build_id.ldscript", "//source/exe:envoy-static", ], - # The sh_test helper from Bazel does not work as expected, see: https://github.com/bazelbuild/bazel/issues/10959 - tags = ["fails_on_windows"], + # The Windows equivalent of a binaries' "link stamp" is a resource file descriptor of the + # executable. Our build revision API and output of --version flags are sufficient for now. + tags = ["skip_on_windows"], ) envoy_sh_test( @@ -26,13 +27,12 @@ envoy_sh_test( srcs = ["envoy_static_test.sh"], coverage = False, data = ["//source/exe:envoy-static"], - # For windows, we expect to use a .ps1 script that leverages dumpbin.exe, see: + # TODO(Windows): expect to test to leverage dumpbin.exe to confirm we avoid msvcrt, see # https://github.com/envoyproxy/envoy/pull/8280#pullrequestreview-290187328 - # The sh_test helper from Bazel does not work as expected, see: https://github.com/bazelbuild/bazel/issues/10959 # Sanitizers doesn't like statically linked lib(std)c++ and libgcc, skip this test in that context. tags = [ - "fails_on_windows", "no_san", + "skip_on_windows", ], ) @@ -57,8 +57,6 @@ envoy_sh_test( "//bazel:raw_build_id.ldscript", "//source/exe:envoy-static", ], - # The sh_test helper from Bazel does not work as expected, see: https://github.com/bazelbuild/bazel/issues/10959 - tags = ["fails_on_windows"], ) envoy_cc_test( @@ -67,7 +65,8 @@ envoy_cc_test( data = ["//test/config/integration:google_com_proxy_port_0"], deps = [ "//source/common/api:api_lib", - "//source/exe:envoy_main_common_lib", + "//source/exe:main_common_lib", + "//test/mocks/runtime:runtime_mocks", "//test/test_common:contention_lib", "//test/test_common:environment_lib", ], diff --git a/test/exe/main_common_test.cc b/test/exe/main_common_test.cc index 78fe3aeebfcfe..39d0486683d7d 100644 --- a/test/exe/main_common_test.cc +++ b/test/exe/main_common_test.cc @@ -2,6 +2,7 @@ #include "common/common/lock_guard.h" #include "common/common/mutex_tracer_impl.h" +#include "common/common/random_generator.h" #include "common/common/thread.h" #include "common/runtime/runtime_impl.h" @@ -9,10 +10,12 @@ #include "server/options_impl.h" +#include "test/mocks/common.h" #include "test/test_common/contention.h" #include "test/test_common/environment.h" #include "test/test_common/utility.h" +#include "gmock/gmock.h" #include "gtest/gtest.h" #ifdef ENVOY_HANDLE_SIGNALS @@ -23,12 +26,13 @@ using testing::HasSubstr; using testing::IsEmpty; +using testing::NiceMock; +using testing::Return; namespace Envoy { /** - * Captures common functions needed for invoking MainCommon. Generates a - * unique --base-id setting based on the pid and a random number. Maintains + * Captures common functions needed for invoking MainCommon.Maintains * an argv array that is terminated with nullptr. Identifies the config * file relative to runfiles directory. */ @@ -38,34 +42,7 @@ class MainCommonTest : public testing::TestWithParam argv_; }; +INSTANTIATE_TEST_SUITE_P(IpVersions, MainCommonTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); // Exercise the codepath to instantiate MainCommon and destruct it, with hot restart. TEST_P(MainCommonTest, ConstructDestructHotRestartEnabled) { @@ -109,6 +88,54 @@ TEST_P(MainCommonTest, ConstructDestructHotRestartDisabledNoInit) { EXPECT_TRUE(main_common.run()); } +// Exercise base-id-path option. +TEST_P(MainCommonTest, ConstructWritesBasePathId) { +#ifdef ENVOY_HOT_RESTART + const std::string base_id_path = TestEnvironment::temporaryPath("base-id-file"); + addArg("--base-id-path"); + addArg(base_id_path.c_str()); + VERBOSE_EXPECT_NO_THROW(MainCommon main_common(argc(), argv())); + + EXPECT_NE("", TestEnvironment::readFileToStringForTest(base_id_path)); +#endif +} + +// Test that an in-use base id triggers a retry and that we eventually give up. +TEST_P(MainCommonTest, RetryDynamicBaseIdFails) { +#ifdef ENVOY_HOT_RESTART + PlatformImpl platform; + Event::TestRealTimeSystem real_time_system; + DefaultListenerHooks default_listener_hooks; + ProdComponentFactory prod_component_factory; + + const std::string base_id_path = TestEnvironment::temporaryPath("base-id-file"); + + const auto first_args = std::vector({"envoy-static", "--use-dynamic-base-id", "-c", + config_file_, "--base-id-path", base_id_path}); + OptionsImpl first_options(first_args, &MainCommon::hotRestartVersion, spdlog::level::info); + MainCommonBase first(first_options, real_time_system, default_listener_hooks, + prod_component_factory, std::make_unique(), + platform.threadFactory(), platform.fileSystem(), nullptr); + + const std::string base_id_str = TestEnvironment::readFileToStringForTest(base_id_path); + uint32_t base_id; + ASSERT_TRUE(absl::SimpleAtoi(base_id_str, &base_id)); + + auto* mock_rng = new NiceMock(); + EXPECT_CALL(*mock_rng, random()).WillRepeatedly(Return(base_id)); + + const auto second_args = + std::vector({"envoy-static", "--use-dynamic-base-id", "-c", config_file_}); + OptionsImpl second_options(second_args, &MainCommon::hotRestartVersion, spdlog::level::info); + + EXPECT_THROW_WITH_MESSAGE( + MainCommonBase(second_options, real_time_system, default_listener_hooks, + prod_component_factory, std::unique_ptr{mock_rng}, + platform.threadFactory(), platform.fileSystem(), nullptr), + EnvoyException, "unable to select a dynamic base id"); +#endif +} + // Test that std::set_new_handler() was called and the callback functions as expected. // This test fails under TSAN and ASAN, so don't run it in that build: // [ DEATH ] ==845==ERROR: ThreadSanitizer: requested allocation size 0x3e800000000 @@ -119,9 +146,14 @@ TEST_P(MainCommonTest, ConstructDestructHotRestartDisabledNoInit) { // of 0x10000000000 (thread T0) class MainCommonDeathTest : public MainCommonTest {}; +INSTANTIATE_TEST_SUITE_P(IpVersions, MainCommonDeathTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); TEST_P(MainCommonDeathTest, OutOfMemoryHandler) { -#if defined(__has_feature) && (__has_feature(thread_sanitizer) || __has_feature(address_sanitizer)) +#if defined(__clang_analyzer__) || (defined(__has_feature) && (__has_feature(thread_sanitizer) || \ + __has_feature(address_sanitizer) || \ + __has_feature(memory_sanitizer))) ENVOY_LOG_MISC(critical, "MainCommonTest::OutOfMemoryHandler not supported by this compiler configuration"); #else @@ -131,7 +163,7 @@ TEST_P(MainCommonDeathTest, OutOfMemoryHandler) { // so disable handling that signal. signal(SIGABRT, SIG_DFL); #endif - EXPECT_DEATH_LOG_TO_STDERR( + EXPECT_DEATH( []() { // Allocating a fixed-size large array that results in OOM on gcc // results in a compile-time error on clang of "array size too big", @@ -140,17 +172,15 @@ TEST_P(MainCommonDeathTest, OutOfMemoryHandler) { for (uint64_t size = initial; size >= initial; // Disallow wraparound to avoid infinite loops on failure. size *= 1000) { - new int[size]; + int* p = new int[size]; + // Use the pointer to prevent clang from optimizing the allocation away in opt mode. + ENVOY_LOG_MISC(debug, "p={}", reinterpret_cast(p)); } }(), ".*panic: out of memory.*"); #endif } -INSTANTIATE_TEST_SUITE_P(IpVersions, MainCommonTest, - testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), - TestUtility::ipTestParamsToString); - class AdminRequestTest : public MainCommonTest { protected: AdminRequestTest() { addArg("--disable-hot-restart"); } @@ -242,6 +272,9 @@ class AdminRequestTest : public MainCommonTest { bool pause_before_run_{false}; bool pause_after_run_{false}; }; +INSTANTIATE_TEST_SUITE_P(IpVersions, AdminRequestTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); TEST_P(AdminRequestTest, AdminRequestGetStatsAndQuit) { startEnvoy(); @@ -405,8 +438,4 @@ TEST_P(MainCommonTest, ConstructDestructLogger) { Logger::Registry::getSink()->log(log_msg); } -INSTANTIATE_TEST_SUITE_P(IpVersions, AdminRequestTest, - testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), - TestUtility::ipTestParamsToString); - } // namespace Envoy diff --git a/test/exe/terminate_handler_test.cc b/test/exe/terminate_handler_test.cc index f37774352b3a3..d48782242ae99 100644 --- a/test/exe/terminate_handler_test.cc +++ b/test/exe/terminate_handler_test.cc @@ -8,7 +8,7 @@ namespace Envoy { TEST(TerminateHandlerDeathTest, HandlerInstalledTest) { TerminateHandler handler; - EXPECT_DEATH_LOG_TO_STDERR([]() -> void { std::terminate(); }(), ".*std::terminate called!.*"); + EXPECT_DEATH([]() -> void { std::terminate(); }(), ".*std::terminate called!.*"); } } // namespace Envoy diff --git a/test/extensions/access_loggers/common/BUILD b/test/extensions/access_loggers/common/BUILD index 9dbb3c91c70ff..a6f87344a490e 100644 --- a/test/extensions/access_loggers/common/BUILD +++ b/test/extensions/access_loggers/common/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/extensions/access_loggers/file/BUILD b/test/extensions/access_loggers/file/BUILD index 78434bcf535ba..4fe61e7764834 100644 --- a/test/extensions/access_loggers/file/BUILD +++ b/test/extensions/access_loggers/file/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -17,8 +17,9 @@ envoy_extension_cc_test( extension_name = "envoy.access_loggers.file", deps = [ "//source/extensions/access_loggers/file:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:environment_lib", + "//test/test_common:utility_lib", "@envoy_api//envoy/config/accesslog/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/access_loggers/file/v3:pkg_cc_proto", ], diff --git a/test/extensions/access_loggers/file/config_test.cc b/test/extensions/access_loggers/file/config_test.cc index 19343c80bdbec..9d7215e3f7136 100644 --- a/test/extensions/access_loggers/file/config_test.cc +++ b/test/extensions/access_loggers/file/config_test.cc @@ -9,18 +9,21 @@ #include "extensions/access_loggers/file/file_access_log_impl.h" #include "extensions/access_loggers/well_known_names.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/test_common/utility.h" #include "gmock/gmock.h" #include "gtest/gtest.h" +using testing::Return; + namespace Envoy { namespace Extensions { namespace AccessLoggers { namespace File { namespace { -TEST(FileAccessLogConfigTest, ValidateFail) { +TEST(FileAccessLogNegativeTest, ValidateFail) { NiceMock context; EXPECT_THROW(FileAccessLogFactory().createAccessLogInstance( @@ -28,7 +31,7 @@ TEST(FileAccessLogConfigTest, ValidateFail) { ProtoValidationException); } -TEST(FileAccessLogConfigTest, ConfigureFromProto) { +TEST(FileAccessLogNegativeTest, InvalidNameFail) { envoy::config::accesslog::v3::AccessLog config; NiceMock context; @@ -39,149 +42,137 @@ TEST(FileAccessLogConfigTest, ConfigureFromProto) { EXPECT_THROW_WITH_MESSAGE(AccessLog::AccessLogFactory::fromProto(config, context), EnvoyException, "Didn't find a registered implementation for name: 'INVALID'"); - - envoy::extensions::access_loggers::file::v3::FileAccessLog fal_config; - fal_config.set_path("/dev/null"); - - config.mutable_typed_config()->PackFrom(fal_config); - - config.set_name(AccessLogNames::get().File); - - AccessLog::InstanceSharedPtr log = AccessLog::AccessLogFactory::fromProto(config, context); - - EXPECT_NE(nullptr, log); - EXPECT_NE(nullptr, dynamic_cast(log.get())); } -TEST(FileAccessLogConfigTest, FileAccessLogTest) { - auto factory = - Registry::FactoryRegistry::getFactory( - AccessLogNames::get().File); - ASSERT_NE(nullptr, factory); +class FileAccessLogTest : public testing::Test { +public: + FileAccessLogTest() = default; - ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); - ASSERT_NE(nullptr, message); + void runTest(const std::string& yaml, absl::string_view expected, bool is_json) { + envoy::extensions::access_loggers::file::v3::FileAccessLog fal_config; + TestUtility::loadFromYaml(yaml, fal_config); - envoy::extensions::access_loggers::file::v3::FileAccessLog file_access_log; - file_access_log.set_path("/dev/null"); - file_access_log.set_format("%START_TIME%"); - TestUtility::jsonConvert(file_access_log, *message); + envoy::config::accesslog::v3::AccessLog config; + config.mutable_typed_config()->PackFrom(fal_config); - AccessLog::FilterPtr filter; - NiceMock context; + auto file = std::make_shared(); + EXPECT_CALL(context_.access_log_manager_, createAccessLog(fal_config.path())) + .WillOnce(Return(file)); + + AccessLog::InstanceSharedPtr logger = AccessLog::AccessLogFactory::fromProto(config, context_); + + absl::Time abslStartTime = + TestUtility::parseTime("Dec 18 01:50:34 2018 GMT", "%b %e %H:%M:%S %Y GMT"); + stream_info_.start_time_ = absl::ToChronoTime(abslStartTime); + EXPECT_CALL(stream_info_, upstreamHost()).WillRepeatedly(Return(nullptr)); + stream_info_.response_code_ = 200; + + EXPECT_CALL(*file, write(_)).WillOnce(Invoke([expected, is_json](absl::string_view got) { + if (is_json) { + EXPECT_TRUE(TestUtility::jsonStringEqual(std::string(got), std::string(expected))); + } else { + EXPECT_EQ(got, expected); + } + })); + logger->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); + } - AccessLog::InstanceSharedPtr instance = - factory->createAccessLogInstance(*message, std::move(filter), context); - EXPECT_NE(nullptr, instance); - EXPECT_NE(nullptr, dynamic_cast(instance.get())); + Http::TestRequestHeaderMapImpl request_headers_{{":method", "GET"}, {":path", "/bar/foo"}}; + Http::TestResponseHeaderMapImpl response_headers_; + Http::TestResponseTrailerMapImpl response_trailers_; + NiceMock stream_info_; + + NiceMock context_; +}; + +TEST_F(FileAccessLogTest, DEPRECATED_FEATURE_TEST(LegacyFormatEmpty)) { + runTest( + R"( + path: "/foo" + format: "" +)", + "[2018-12-18T01:50:34.000Z] \"GET /bar/foo -\" 200 - 0 0 - - \"-\" \"-\" \"-\" \"-\" \"-\"\n", + false); } -TEST(FileAccessLogConfigTest, FileAccessLogJsonTest) { - envoy::config::accesslog::v3::AccessLog config; - - NiceMock context; - EXPECT_THROW_WITH_MESSAGE(AccessLog::AccessLogFactory::fromProto(config, context), EnvoyException, - "Provided name for static registration lookup was empty."); - - config.set_name("INVALID"); - - EXPECT_THROW_WITH_MESSAGE(AccessLog::AccessLogFactory::fromProto(config, context), EnvoyException, - "Didn't find a registered implementation for name: 'INVALID'"); - - envoy::extensions::access_loggers::file::v3::FileAccessLog fal_config; - fal_config.set_path("/dev/null"); - - ProtobufWkt::Value string_value; - string_value.set_string_value("%PROTOCOL%"); - - auto json_format = fal_config.mutable_json_format(); - (*json_format->mutable_fields())["protocol"] = string_value; - - EXPECT_EQ( - fal_config.access_log_format_case(), - envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase::kJsonFormat); - config.mutable_typed_config()->PackFrom(fal_config); - - config.set_name(AccessLogNames::get().File); - - AccessLog::InstanceSharedPtr log = AccessLog::AccessLogFactory::fromProto(config, context); - - EXPECT_NE(nullptr, log); - EXPECT_NE(nullptr, dynamic_cast(log.get())); +TEST_F(FileAccessLogTest, DEPRECATED_FEATURE_TEST(LegacyFormatPlainText)) { + runTest( + R"( + path: "/foo" + format: "plain_text" +)", + "plain_text", false); } -TEST(FileAccessLogConfigTest, FileAccessLogTypedJsonTest) { - envoy::config::accesslog::v3::AccessLog config; - - envoy::extensions::access_loggers::file::v3::FileAccessLog fal_config; - fal_config.set_path("/dev/null"); - - ProtobufWkt::Value string_value; - string_value.set_string_value("%PROTOCOL%"); - - auto json_format = fal_config.mutable_typed_json_format(); - (*json_format->mutable_fields())["protocol"] = string_value; - - EXPECT_EQ(fal_config.access_log_format_case(), - envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase:: - kTypedJsonFormat); - config.mutable_typed_config()->PackFrom(fal_config); - - config.set_name(AccessLogNames::get().File); - - NiceMock context; - AccessLog::InstanceSharedPtr log = AccessLog::AccessLogFactory::fromProto(config, context); - - EXPECT_NE(nullptr, log); - EXPECT_NE(nullptr, dynamic_cast(log.get())); +TEST_F(FileAccessLogTest, DEPRECATED_FEATURE_TEST(LegacyJsonFormat)) { + runTest( + R"( + path: "/foo" + json_format: + text: "plain text" + path: "%REQ(:path)%" + code: "%RESPONSE_CODE%" +)", + R"({ + "text": "plain text", + "path": "/bar/foo", + "code": "200" +})", + true); } -TEST(FileAccessLogConfigTest, FileAccessLogJsonWithBoolValueTest) { - { - // Make sure we fail if you set a bool value in the format dictionary - envoy::config::accesslog::v3::AccessLog config; - config.set_name(AccessLogNames::get().File); - envoy::extensions::access_loggers::file::v3::FileAccessLog fal_config; - fal_config.set_path("/dev/null"); - - ProtobufWkt::Value bool_value; - bool_value.set_bool_value(false); - auto json_format = fal_config.mutable_json_format(); - (*json_format->mutable_fields())["protocol"] = bool_value; - - config.mutable_typed_config()->PackFrom(fal_config); - NiceMock context; - - EXPECT_THROW_WITH_MESSAGE(AccessLog::AccessLogFactory::fromProto(config, context), - EnvoyException, - "Only string values are supported in the JSON access log format."); - } +TEST_F(FileAccessLogTest, DEPRECATED_FEATURE_TEST(LegacyTypedJsonFormat)) { + runTest( + R"( + path: "/foo" + typed_json_format: + text: "plain text" + path: "%REQ(:path)%" + code: "%RESPONSE_CODE%" +)", + R"({ + "text": "plain text", + "path": "/bar/foo", + "code": 200 +})", + true); } -TEST(FileAccessLogConfigTest, FileAccessLogJsonWithNestedKeyTest) { - { - // Make sure we fail if you set a nested Struct value in the format dictionary - envoy::config::accesslog::v3::AccessLog config; - config.set_name(AccessLogNames::get().File); - envoy::extensions::access_loggers::file::v3::FileAccessLog fal_config; - fal_config.set_path("/dev/null"); - - ProtobufWkt::Value string_value; - string_value.set_string_value("some_nested_value"); - - ProtobufWkt::Value struct_value; - (*struct_value.mutable_struct_value()->mutable_fields())["some_nested_key"] = string_value; - - auto json_format = fal_config.mutable_json_format(); - (*json_format->mutable_fields())["top_level_key"] = struct_value; +TEST_F(FileAccessLogTest, EmptyFormat) { + runTest( + R"( + path: "/foo" +)", + "[2018-12-18T01:50:34.000Z] \"GET /bar/foo -\" 200 - 0 0 - - \"-\" \"-\" \"-\" \"-\" \"-\"\n", + false); +} - config.mutable_typed_config()->PackFrom(fal_config); - NiceMock context; +TEST_F(FileAccessLogTest, LogFormatText) { + runTest( + R"( + path: "/foo" + log_format: + text_format: "plain_text - %REQ(:path)% - %RESPONSE_CODE%" +)", + "plain_text - /bar/foo - 200", false); +} - EXPECT_THROW_WITH_MESSAGE(AccessLog::AccessLogFactory::fromProto(config, context), - EnvoyException, - "Only string values are supported in the JSON access log format."); - } +TEST_F(FileAccessLogTest, LogFormatJson) { + runTest( + R"( + path: "/foo" + log_format: + json_format: + text: "plain text" + path: "%REQ(:path)%" + code: "%RESPONSE_CODE%" +)", + R"({ + "text": "plain text", + "path": "/bar/foo", + "code": 200 +})", + true); } } // namespace diff --git a/test/extensions/access_loggers/grpc/BUILD b/test/extensions/access_loggers/grpc/BUILD index c79d115396655..b573bc6082f5f 100644 --- a/test/extensions/access_loggers/grpc/BUILD +++ b/test/extensions/access_loggers/grpc/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -23,6 +23,7 @@ envoy_extension_cc_test( "//test/mocks/ssl:ssl_mocks", "//test/mocks/stream_info:stream_info_mocks", "//test/mocks/thread_local:thread_local_mocks", + "//test/test_common:test_runtime_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/data/accesslog/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/access_loggers/grpc/v3:pkg_cc_proto", @@ -67,7 +68,7 @@ envoy_extension_cc_test( extension_name = "envoy.access_loggers.http_grpc", deps = [ "//source/extensions/access_loggers/grpc:http_config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/access_loggers/grpc/v3:pkg_cc_proto", ], @@ -77,6 +78,7 @@ envoy_extension_cc_test( name = "http_grpc_access_log_integration_test", srcs = ["http_grpc_access_log_integration_test.cc"], extension_name = "envoy.access_loggers.http_grpc", + tags = ["fails_on_windows"], deps = [ "//source/common/buffer:zero_copy_input_stream_lib", "//source/common/grpc:codec_lib", @@ -96,6 +98,7 @@ envoy_extension_cc_test( name = "tcp_grpc_access_log_integration_test", srcs = ["tcp_grpc_access_log_integration_test.cc"], extension_name = "envoy.access_loggers.http_grpc", + tags = ["fails_on_windows"], deps = [ "//source/common/buffer:zero_copy_input_stream_lib", "//source/common/grpc:codec_lib", diff --git a/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc b/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc index 1a5ac2d7f61e5..6ea00508fc7fb 100644 --- a/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc +++ b/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc @@ -16,11 +16,14 @@ #include "test/mocks/ssl/mocks.h" #include "test/mocks/stream_info/mocks.h" #include "test/mocks/thread_local/mocks.h" +#include "test/test_common/test_runtime.h" using testing::_; +using testing::AnyNumber; using testing::InSequence; using testing::Invoke; using testing::NiceMock; +using testing::Return; namespace Envoy { namespace Extensions { @@ -39,9 +42,10 @@ class GrpcAccessLoggerImplTest : public testing::Test { void initLogger(std::chrono::milliseconds buffer_flush_interval_msec, size_t buffer_size_bytes) { timer_ = new Event::MockTimer(&dispatcher_); EXPECT_CALL(*timer_, enableTimer(buffer_flush_interval_msec, _)); - logger_ = std::make_unique(Grpc::RawAsyncClientPtr{async_client_}, - log_name_, buffer_flush_interval_msec, - buffer_size_bytes, dispatcher_, local_info_); + logger_ = std::make_unique( + Grpc::RawAsyncClientPtr{async_client_}, log_name_, buffer_flush_interval_msec, + buffer_size_bytes, dispatcher_, local_info_, stats_store_, + envoy::config::core::v3::ApiVersion::AUTO); } void expectStreamStart(MockAccessLogStream& stream, AccessLogCallbacks** callbacks_to_set) { @@ -57,6 +61,7 @@ class GrpcAccessLoggerImplTest : public testing::Test { void expectStreamMessage(MockAccessLogStream& stream, const std::string& expected_message_yaml) { envoy::service::accesslog::v3::StreamAccessLogsMessage expected_message; TestUtility::loadFromYaml(expected_message_yaml, expected_message); + EXPECT_CALL(stream, isAboveWriteBufferHighWatermark()).WillOnce(Return(false)); EXPECT_CALL(stream, sendMessageRaw_(_, false)) .WillOnce(Invoke([expected_message](Buffer::InstancePtr& request, bool) { envoy::service::accesslog::v3::StreamAccessLogsMessage message; @@ -66,12 +71,13 @@ class GrpcAccessLoggerImplTest : public testing::Test { })); } + Stats::IsolatedStoreImpl stats_store_; std::string log_name_ = "test_log_name"; LocalInfo::MockLocalInfo local_info_; Event::MockTimer* timer_ = nullptr; Event::MockDispatcher dispatcher_; Grpc::MockAsyncClient* async_client_{new Grpc::MockAsyncClient}; - std::unique_ptr logger_; + GrpcAccessLoggerImplPtr logger_; }; // Test basic stream logging flow. @@ -100,6 +106,9 @@ TEST_F(GrpcAccessLoggerImplTest, BasicFlow) { envoy::data::accesslog::v3::HTTPAccessLogEntry entry; entry.mutable_request()->set_path("/test/path1"); logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry)); + EXPECT_EQ( + 1, + TestUtility::findCounter(stats_store_, "access_logs.grpc_access_log.logs_written")->value()); expectStreamMessage(stream, R"EOF( http_logs: @@ -109,6 +118,9 @@ TEST_F(GrpcAccessLoggerImplTest, BasicFlow) { )EOF"); entry.mutable_request()->set_path("/test/path2"); logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry)); + EXPECT_EQ( + 2, + TestUtility::findCounter(stats_store_, "access_logs.grpc_access_log.logs_written")->value()); // Verify that sending an empty response message doesn't do anything bad. callbacks->onReceiveMessage( @@ -133,8 +145,103 @@ TEST_F(GrpcAccessLoggerImplTest, BasicFlow) { )EOF"); entry.mutable_request()->set_path("/test/path3"); logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry)); + EXPECT_EQ( + 0, + TestUtility::findCounter(stats_store_, "access_logs.grpc_access_log.logs_dropped")->value()); + EXPECT_EQ( + 3, + TestUtility::findCounter(stats_store_, "access_logs.grpc_access_log.logs_written")->value()); } +TEST_F(GrpcAccessLoggerImplTest, WatermarksOverrun) { + InSequence s; + initLogger(FlushInterval, 1); + + // Start a stream for the first log. + MockAccessLogStream stream; + AccessLogCallbacks* callbacks; + expectStreamStart(stream, &callbacks); + EXPECT_CALL(local_info_, node()); + + // Fail to flush, so the log stays buffered up. + envoy::data::accesslog::v3::HTTPAccessLogEntry entry; + entry.mutable_request()->set_path("/test/path1"); + EXPECT_CALL(stream, isAboveWriteBufferHighWatermark()).WillOnce(Return(true)); + EXPECT_CALL(stream, sendMessageRaw_(_, false)).Times(0); + logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry)); + EXPECT_EQ( + 1, + TestUtility::findCounter(stats_store_, "access_logs.grpc_access_log.logs_written")->value()); + EXPECT_EQ( + 0, + TestUtility::findCounter(stats_store_, "access_logs.grpc_access_log.logs_dropped")->value()); + + // Now canLogMore will fail, and the next log will be dropped. + EXPECT_CALL(stream, isAboveWriteBufferHighWatermark()).WillOnce(Return(true)); + EXPECT_CALL(stream, sendMessageRaw_(_, _)).Times(0); + logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry)); + EXPECT_EQ( + 1, + TestUtility::findCounter(stats_store_, "access_logs.grpc_access_log.logs_written")->value()); + EXPECT_EQ( + 1, + TestUtility::findCounter(stats_store_, "access_logs.grpc_access_log.logs_dropped")->value()); + + // Now allow the flush to happen. The stored log will get logged, and the next log will succeed. + EXPECT_CALL(stream, isAboveWriteBufferHighWatermark()).WillOnce(Return(false)); + EXPECT_CALL(stream, sendMessageRaw_(_, _)).Times(1); + EXPECT_CALL(stream, isAboveWriteBufferHighWatermark()).WillOnce(Return(false)); + EXPECT_CALL(stream, sendMessageRaw_(_, _)).Times(1); + logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry)); + EXPECT_EQ( + 2, + TestUtility::findCounter(stats_store_, "access_logs.grpc_access_log.logs_written")->value()); + EXPECT_EQ( + 1, + TestUtility::findCounter(stats_store_, "access_logs.grpc_access_log.logs_dropped")->value()); +} + +// Test legacy behavior of unbounded access logs. +TEST_F(GrpcAccessLoggerImplTest, WatermarksLegacy) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.disallow_unbounded_access_logs", "false"}}); + + InSequence s; + initLogger(FlushInterval, 1); + + // Start a stream for the first log. + MockAccessLogStream stream; + AccessLogCallbacks* callbacks; + expectStreamStart(stream, &callbacks); + EXPECT_CALL(local_info_, node()); + + EXPECT_CALL(stream, isAboveWriteBufferHighWatermark()) + .Times(AnyNumber()) + .WillRepeatedly(Return(true)); + + // Fail to flush, so the log stays buffered up. + envoy::data::accesslog::v3::HTTPAccessLogEntry entry; + entry.mutable_request()->set_path("/test/path1"); + EXPECT_CALL(stream, sendMessageRaw_(_, false)).Times(0); + logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry)); + EXPECT_EQ( + 1, + TestUtility::findCounter(stats_store_, "access_logs.grpc_access_log.logs_written")->value()); + EXPECT_EQ( + 0, + TestUtility::findCounter(stats_store_, "access_logs.grpc_access_log.logs_dropped")->value()); + + // As with the above test, try to log more. The log will not be dropped. + EXPECT_CALL(stream, sendMessageRaw_(_, _)).Times(0); + logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry)); + EXPECT_EQ( + 2, + TestUtility::findCounter(stats_store_, "access_logs.grpc_access_log.logs_written")->value()); + EXPECT_EQ( + 0, + TestUtility::findCounter(stats_store_, "access_logs.grpc_access_log.logs_dropped")->value()); +} // Test that stream failure is handled correctly. TEST_F(GrpcAccessLoggerImplTest, StreamFailure) { InSequence s; @@ -265,11 +372,12 @@ class GrpcAccessLoggerCacheImplTest : public testing::Test { Grpc::MockAsyncClientManager async_client_manager_; Grpc::MockAsyncClient* async_client_ = nullptr; Grpc::MockAsyncClientFactory* factory_ = nullptr; - std::unique_ptr logger_cache_; + GrpcAccessLoggerCacheImplPtr logger_cache_; NiceMock scope_; }; TEST_F(GrpcAccessLoggerCacheImplTest, Deduplication) { + Stats::IsolatedStoreImpl scope; InSequence s; envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig config; @@ -278,25 +386,25 @@ TEST_F(GrpcAccessLoggerCacheImplTest, Deduplication) { expectClientCreation(); GrpcAccessLoggerSharedPtr logger1 = - logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::HTTP); - EXPECT_EQ(logger1, logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::HTTP)); + logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::HTTP, scope); + EXPECT_EQ(logger1, logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::HTTP, scope)); // Do not deduplicate different types of logger expectClientCreation(); - EXPECT_NE(logger1, logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::TCP)); + EXPECT_NE(logger1, logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::TCP, scope)); // Changing log name leads to another logger. config.set_log_name("log-2"); expectClientCreation(); - EXPECT_NE(logger1, logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::HTTP)); + EXPECT_NE(logger1, logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::HTTP, scope)); config.set_log_name("log-1"); - EXPECT_EQ(logger1, logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::HTTP)); + EXPECT_EQ(logger1, logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::HTTP, scope)); // Changing cluster name leads to another logger. config.mutable_grpc_service()->mutable_envoy_grpc()->set_cluster_name("cluster-2"); expectClientCreation(); - EXPECT_NE(logger1, logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::HTTP)); + EXPECT_NE(logger1, logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::HTTP, scope)); } } // namespace diff --git a/test/extensions/access_loggers/grpc/grpc_access_log_utils_test.cc b/test/extensions/access_loggers/grpc/grpc_access_log_utils_test.cc index 90d18811c43ac..5e3a4460e6bf0 100644 --- a/test/extensions/access_loggers/grpc/grpc_access_log_utils_test.cc +++ b/test/extensions/access_loggers/grpc/grpc_access_log_utils_test.cc @@ -40,6 +40,10 @@ TEST(UtilityResponseFlagsToAccessLogResponseFlagsTest, All) { common_access_log_expected.mutable_response_flags()->set_stream_idle_timeout(true); common_access_log_expected.mutable_response_flags()->set_invalid_envoy_request_headers(true); common_access_log_expected.mutable_response_flags()->set_downstream_protocol_error(true); + common_access_log_expected.mutable_response_flags()->set_upstream_max_stream_duration_reached( + true); + common_access_log_expected.mutable_response_flags()->set_response_from_cache_filter(true); + common_access_log_expected.mutable_response_flags()->set_no_filter_config_found(true); EXPECT_EQ(common_access_log_expected.DebugString(), common_access_log.DebugString()); } diff --git a/test/extensions/access_loggers/grpc/http_config_test.cc b/test/extensions/access_loggers/grpc/http_config_test.cc index 1c6be1e2dec48..37ba5220244f6 100644 --- a/test/extensions/access_loggers/grpc/http_config_test.cc +++ b/test/extensions/access_loggers/grpc/http_config_test.cc @@ -7,7 +7,7 @@ #include "extensions/access_loggers/grpc/http_grpc_access_log_impl.h" #include "extensions/access_loggers/well_known_names.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc b/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc index 13e0b1641cf0c..d6cf63b113953 100644 --- a/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc +++ b/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc @@ -45,7 +45,7 @@ class MockGrpcAccessLoggerCache : public GrpcCommon::GrpcAccessLoggerCache { // GrpcAccessLoggerCache MOCK_METHOD(GrpcCommon::GrpcAccessLoggerSharedPtr, getOrCreateLogger, (const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, - GrpcCommon::GrpcAccessLoggerType logger_type)); + GrpcCommon::GrpcAccessLoggerType logger_type, Stats::Scope& scope)); }; class HttpGrpcAccessLogTest : public testing::Test { @@ -55,17 +55,17 @@ class HttpGrpcAccessLogTest : public testing::Test { config_.mutable_common_config()->set_log_name("hello_log"); config_.mutable_common_config()->add_filter_state_objects_to_log("string_accessor"); config_.mutable_common_config()->add_filter_state_objects_to_log("serialized"); - EXPECT_CALL(*logger_cache_, getOrCreateLogger(_, _)) + EXPECT_CALL(*logger_cache_, getOrCreateLogger(_, _, _)) .WillOnce( [this](const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, - GrpcCommon::GrpcAccessLoggerType logger_type) { + GrpcCommon::GrpcAccessLoggerType logger_type, Stats::Scope&) { EXPECT_EQ(config.DebugString(), config_.common_config().DebugString()); EXPECT_EQ(GrpcCommon::GrpcAccessLoggerType::HTTP, logger_type); return logger_; }); access_log_ = std::make_unique(AccessLog::FilterPtr{filter_}, config_, tls_, - logger_cache_); + logger_cache_, scope_); } void expectLog(const std::string& expected_log_entry_yaml) { @@ -116,12 +116,13 @@ response: {{}} access_log_->log(&request_headers, nullptr, nullptr, stream_info); } + Stats::IsolatedStoreImpl scope_; AccessLog::MockFilter* filter_{new NiceMock()}; NiceMock tls_; envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig config_; std::shared_ptr logger_{new MockGrpcAccessLogger()}; std::shared_ptr logger_cache_{new MockGrpcAccessLoggerCache()}; - std::unique_ptr access_log_; + HttpGrpcAccessLogPtr access_log_; }; class TestSerializedFilterState : public StreamInfo::FilterState::Object { diff --git a/test/extensions/access_loggers/grpc/http_grpc_access_log_integration_test.cc b/test/extensions/access_loggers/grpc/http_grpc_access_log_integration_test.cc index e66655b92c023..98096bb2386eb 100644 --- a/test/extensions/access_loggers/grpc/http_grpc_access_log_integration_test.cc +++ b/test/extensions/access_loggers/grpc/http_grpc_access_log_integration_test.cc @@ -4,9 +4,9 @@ #include "envoy/service/accesslog/v3/als.pb.h" #include "common/buffer/zero_copy_input_stream_impl.h" -#include "common/common/version.h" #include "common/grpc/codec.h" #include "common/grpc/common.h" +#include "common/version/version.h" #include "test/common/grpc/grpc_client_integration.h" #include "test/integration/http_integration.h" @@ -19,7 +19,7 @@ using testing::AssertionResult; namespace Envoy { namespace { -class AccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, +class AccessLogIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, public HttpIntegrationTest { public: AccessLogIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ipVersion()) {} @@ -48,6 +48,7 @@ class AccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig config; auto* common_config = config.mutable_common_config(); common_config->set_log_name("foo"); + common_config->set_transport_api_version(apiVersion()); setGrpcService(*common_config->mutable_grpc_service(), "accesslog", fake_upstreams_.back()->localAddress()); access_log->mutable_typed_config()->PackFrom(config); @@ -56,6 +57,14 @@ class AccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, HttpIntegrationTest::initialize(); } + static ProtobufTypes::MessagePtr scrubHiddenEnvoyDeprecated(const Protobuf::Message& message) { + ProtobufTypes::MessagePtr mutable_clone; + mutable_clone.reset(message.New()); + mutable_clone->MergeFrom(message); + Config::VersionUtil::scrubHiddenEnvoyDeprecated(*mutable_clone); + return mutable_clone; + } + ABSL_MUST_USE_RESULT AssertionResult waitForAccessLogConnection() { return fake_upstreams_[1]->waitForHttpConnection(*dispatcher_, fake_access_log_connection_); @@ -70,11 +79,11 @@ class AccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, AssertionResult waitForAccessLogRequest(const std::string& expected_request_msg_yaml) { envoy::service::accesslog::v3::StreamAccessLogsMessage request_msg; VERIFY_ASSERTION(access_log_request_->waitForGrpcMessage(*dispatcher_, request_msg)); - EXPECT_EQ("POST", access_log_request_->headers().Method()->value().getStringView()); - EXPECT_EQ("/envoy.service.accesslog.v2.AccessLogService/StreamAccessLogs", - access_log_request_->headers().Path()->value().getStringView()); - EXPECT_EQ("application/grpc", - access_log_request_->headers().ContentType()->value().getStringView()); + EXPECT_EQ("POST", access_log_request_->headers().getMethodValue()); + EXPECT_EQ(TestUtility::getVersionedMethodPath("envoy.service.accesslog.{}.AccessLogService", + "StreamAccessLogs", apiVersion()), + access_log_request_->headers().getPathValue()); + EXPECT_EQ("application/grpc", access_log_request_->headers().getContentTypeValue()); envoy::service::accesslog::v3::StreamAccessLogsMessage expected_request_msg; TestUtility::loadFromYaml(expected_request_msg_yaml, expected_request_msg); @@ -94,8 +103,10 @@ class AccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, node->clear_extensions(); node->clear_user_agent_build_version(); } - EXPECT_EQ(request_msg.DebugString(), expected_request_msg.DebugString()); - + Config::VersionUtil::scrubHiddenEnvoyDeprecated(request_msg); + Config::VersionUtil::scrubHiddenEnvoyDeprecated(expected_request_msg); + EXPECT_TRUE(TestUtility::protoEqual(request_msg, expected_request_msg, + /*ignore_repeated_field_ordering=*/false)); return AssertionSuccess(); } @@ -113,7 +124,7 @@ class AccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, }; INSTANTIATE_TEST_SUITE_P(IpVersionsCientType, AccessLogIntegrationTest, - GRPC_CLIENT_INTEGRATION_PARAMS); + VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS); // Test a basic full access logging flow. TEST_P(AccessLogIntegrationTest, BasicAccessLogFlow) { @@ -152,7 +163,7 @@ TEST_P(AccessLogIntegrationTest, BasicAccessLogFlow) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "GET", "/notfound", "", downstream_protocol_, version_); EXPECT_TRUE(response->complete()); - EXPECT_EQ("404", response->headers().Status()->value().getStringView()); + EXPECT_EQ("404", response->headers().getStatusValue()); ASSERT_TRUE(waitForAccessLogRequest(R"EOF( http_logs: log_entry: @@ -191,7 +202,7 @@ TEST_P(AccessLogIntegrationTest, BasicAccessLogFlow) { response = IntegrationUtil::makeSingleRequest(lookupPort("http"), "GET", "/notfound", "", downstream_protocol_, version_); EXPECT_TRUE(response->complete()); - EXPECT_EQ("404", response->headers().Status()->value().getStringView()); + EXPECT_EQ("404", response->headers().getStatusValue()); ASSERT_TRUE(waitForAccessLogStream()); ASSERT_TRUE(waitForAccessLogRequest(fmt::format(R"EOF( identifier: diff --git a/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc b/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc index a83f27484641c..e79fb234eaa1c 100644 --- a/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc +++ b/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc @@ -5,9 +5,9 @@ #include "envoy/service/accesslog/v3/als.pb.h" #include "common/buffer/zero_copy_input_stream_impl.h" -#include "common/common/version.h" #include "common/grpc/codec.h" #include "common/grpc/common.h" +#include "common/version/version.h" #include "test/common/grpc/grpc_client_integration.h" #include "test/integration/http_integration.h" @@ -24,7 +24,7 @@ void clearPort(envoy::config::core::v3::Address& address) { address.mutable_socket_address()->clear_port_specifier(); } -class TcpGrpcAccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, +class TcpGrpcAccessLogIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, public BaseIntegrationTest { public: TcpGrpcAccessLogIntegrationTest() @@ -32,11 +32,6 @@ class TcpGrpcAccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamT enable_half_close_ = true; } - ~TcpGrpcAccessLogIntegrationTest() override { - test_server_.reset(); - fake_upstreams_.clear(); - } - void createUpstreams() override { BaseIntegrationTest::createUpstreams(); fake_upstreams_.emplace_back( @@ -59,6 +54,7 @@ class TcpGrpcAccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamT envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig access_log_config; auto* common_config = access_log_config.mutable_common_config(); common_config->set_log_name("foo"); + common_config->set_transport_api_version(apiVersion()); setGrpcService(*common_config->mutable_grpc_service(), "accesslog", fake_upstreams_.back()->localAddress()); access_log->mutable_typed_config()->PackFrom(access_log_config); @@ -80,11 +76,11 @@ class TcpGrpcAccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamT AssertionResult waitForAccessLogRequest(const std::string& expected_request_msg_yaml) { envoy::service::accesslog::v3::StreamAccessLogsMessage request_msg; VERIFY_ASSERTION(access_log_request_->waitForGrpcMessage(*dispatcher_, request_msg)); - EXPECT_EQ("POST", access_log_request_->headers().Method()->value().getStringView()); - EXPECT_EQ("/envoy.service.accesslog.v2.AccessLogService/StreamAccessLogs", - access_log_request_->headers().Path()->value().getStringView()); - EXPECT_EQ("application/grpc", - access_log_request_->headers().ContentType()->value().getStringView()); + EXPECT_EQ("POST", access_log_request_->headers().getMethodValue()); + EXPECT_EQ(TestUtility::getVersionedMethodPath("envoy.service.accesslog.{}.AccessLogService", + "StreamAccessLogs", apiVersion()), + access_log_request_->headers().getPathValue()); + EXPECT_EQ("application/grpc", access_log_request_->headers().getContentTypeValue()); envoy::service::accesslog::v3::StreamAccessLogsMessage expected_request_msg; TestUtility::loadFromYaml(expected_request_msg_yaml, expected_request_msg); @@ -105,7 +101,10 @@ class TcpGrpcAccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamT node->clear_extensions(); node->clear_user_agent_build_version(); } - EXPECT_EQ(request_msg.DebugString(), expected_request_msg.DebugString()); + Config::VersionUtil::scrubHiddenEnvoyDeprecated(request_msg); + Config::VersionUtil::scrubHiddenEnvoyDeprecated(expected_request_msg); + EXPECT_TRUE(TestUtility::protoEqual(request_msg, expected_request_msg, + /*ignore_repeated_field_ordering=*/false)); return AssertionSuccess(); } @@ -125,7 +124,7 @@ class TcpGrpcAccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamT }; INSTANTIATE_TEST_SUITE_P(IpVersionsCientType, TcpGrpcAccessLogIntegrationTest, - GRPC_CLIENT_INTEGRATION_PARAMS); + VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS); // Test a basic full access logging flow. TEST_P(TcpGrpcAccessLogIntegrationTest, BasicAccessLogFlow) { @@ -137,11 +136,11 @@ TEST_P(TcpGrpcAccessLogIntegrationTest, BasicAccessLogFlow) { ASSERT_TRUE(fake_upstream_connection->write("hello")); tcp_client->waitForData("hello"); - tcp_client->write("bar", false); + ASSERT_TRUE(tcp_client->write("bar", false)); ASSERT_TRUE(fake_upstream_connection->write("", true)); tcp_client->waitForHalfClose(); - tcp_client->write("", true); + ASSERT_TRUE(tcp_client->write("", true)); ASSERT_TRUE(fake_upstream_connection->waitForData(3)); ASSERT_TRUE(fake_upstream_connection->waitForHalfClose()); diff --git a/test/extensions/clusters/aggregate/BUILD b/test/extensions/clusters/aggregate/BUILD index f5885980ab369..4e75753f133b1 100644 --- a/test/extensions/clusters/aggregate/BUILD +++ b/test/extensions/clusters/aggregate/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -20,7 +20,8 @@ envoy_extension_cc_test( "//source/extensions/transport_sockets/raw_buffer:config", "//test/common/upstream:utility_lib", "//test/mocks/protobuf:protobuf_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:admin_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/ssl:ssl_mocks", "//test/test_common:environment_lib", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", @@ -40,7 +41,7 @@ envoy_extension_cc_test( "//test/common/upstream:test_cluster_manager", "//test/common/upstream:utility_lib", "//test/mocks/protobuf:protobuf_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:admin_mocks", "//test/mocks/ssl:ssl_mocks", "//test/test_common:environment_lib", "//test/test_common:simulated_time_system_lib", @@ -53,16 +54,17 @@ envoy_extension_cc_test( name = "cluster_integration_test", srcs = ["cluster_integration_test.cc"], extension_name = "envoy.clusters.aggregate", + tags = ["fails_on_windows"], deps = [ "//source/common/config:protobuf_link_hacks", "//source/common/protobuf:utility_lib", "//source/extensions/clusters/aggregate:cluster", "//source/extensions/filters/network/tcp_proxy:config", + "//source/extensions/retry/priority/previous_priorities:config", "//test/common/grpc:grpc_client_integration_lib", "//test/integration:http_integration_lib", "//test/integration:integration_lib", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", "//test/test_common:network_utility_lib", "//test/test_common:resources_lib", "//test/test_common:utility_lib", diff --git a/test/extensions/clusters/aggregate/cluster_integration_test.cc b/test/extensions/clusters/aggregate/cluster_integration_test.cc index 4f6f8b6edb3d9..abebeadc82c91 100644 --- a/test/extensions/clusters/aggregate/cluster_integration_test.cc +++ b/test/extensions/clusters/aggregate/cluster_integration_test.cc @@ -9,7 +9,6 @@ #include "test/common/grpc/grpc_client_integration.h" #include "test/integration/http_integration.h" #include "test/integration/utility.h" -#include "test/mocks/server/mocks.h" #include "test/test_common/network_utility.h" #include "test/test_common/resources.h" #include "test/test_common/simulated_time_system.h" @@ -30,9 +29,9 @@ const int FirstUpstreamIndex = 2; const int SecondUpstreamIndex = 3; const std::string& config() { - CONSTRUCT_ON_FIRST_USE(std::string, R"EOF( + CONSTRUCT_ON_FIRST_USE(std::string, fmt::format(R"EOF( admin: - access_log_path: /dev/null + access_log_path: {} address: socket_address: address: 127.0.0.1 @@ -48,7 +47,7 @@ const std::string& config() { static_resources: clusters: - name: my_cds_cluster - http2_protocol_options: {} + http2_protocol_options: {{}} load_assignment: cluster_name: my_cds_cluster endpoints: @@ -61,6 +60,7 @@ const std::string& config() { - name: aggregate_cluster connect_timeout: 0.25s lb_policy: CLUSTER_PROVIDED + protocol_selection: USE_DOWNSTREAM_PROTOCOL # this should be ignored, as cluster_1 and cluster_2 specify HTTP/2. cluster_type: name: envoy.clusters.aggregate typed_config: @@ -99,10 +99,17 @@ const std::string& config() { prefix: "/cluster2" - route: cluster: aggregate_cluster + retry_policy: + retry_priority: + name: envoy.retry_priorities.previous_priorities + typed_config: + "@type": type.googleapis.com/envoy.config.retry.previous_priorities.PreviousPrioritiesConfig + update_frequency: 1 match: prefix: "/aggregatecluster" domains: "*" -)EOF"); +)EOF", + TestEnvironment::nullDevicePath())); } class AggregateIntegrationTest : public testing::TestWithParam, @@ -113,11 +120,7 @@ class AggregateIntegrationTest : public testing::TestWithParamset_allow_unexpected_disconnects(false); - fake_upstreams_.emplace_back(new FakeUpstream(0, FakeHttpConnection::Type::HTTP1, version_, + fake_upstreams_.emplace_back(new FakeUpstream(0, FakeHttpConnection::Type::HTTP2, version_, timeSystem(), enable_half_close_)); fake_upstreams_[SecondUpstreamIndex]->set_allow_unexpected_disconnects(false); - cluster1_ = ConfigHelper::buildCluster( + cluster1_ = ConfigHelper::buildStaticCluster( FirstClusterName, fake_upstreams_[FirstUpstreamIndex]->localAddress()->ip()->port(), Network::Test::getLoopbackAddressString(GetParam())); - cluster2_ = ConfigHelper::buildCluster( + cluster2_ = ConfigHelper::buildStaticCluster( SecondClusterName, fake_upstreams_[SecondUpstreamIndex]->localAddress()->ip()->port(), Network::Test::getLoopbackAddressString(GetParam())); @@ -190,10 +193,10 @@ TEST_P(AggregateIntegrationTest, ClusterUpDownUp) { IntegrationUtil::makeSingleRequest(lookupPort("http"), "GET", "/aggregatecluster", "", downstream_protocol_, version_, "foo.com"); ASSERT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // Tell Envoy that cluster_1 is back. EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "42", {}, {}, {})); @@ -212,7 +215,7 @@ TEST_P(AggregateIntegrationTest, TwoClusters) { testRouterHeaderOnlyRequestAndResponse(nullptr, FirstUpstreamIndex, "/aggregatecluster"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // Tell Envoy that cluster_2 is here. EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "55", {}, {}, {})); @@ -224,7 +227,7 @@ TEST_P(AggregateIntegrationTest, TwoClusters) { // A request for aggregate cluster should be fine. testRouterHeaderOnlyRequestAndResponse(nullptr, FirstUpstreamIndex, "/aggregatecluster"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // Tell Envoy that cluster_1 is gone. EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "42", {}, {}, {})); @@ -236,7 +239,7 @@ TEST_P(AggregateIntegrationTest, TwoClusters) { testRouterHeaderOnlyRequestAndResponse(nullptr, SecondUpstreamIndex, "/aggregatecluster"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // Tell Envoy that cluster_1 is back. EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "42", {}, {}, {})); @@ -249,5 +252,45 @@ TEST_P(AggregateIntegrationTest, TwoClusters) { cleanupUpstreamAndDownstream(); } +// Test that the PreviousPriorities retry predicate works as expected. It is configured +// in this test to exclude a priority after a single failure, so the first failure +// on cluster_1 results in the retry going to cluster_2. +TEST_P(AggregateIntegrationTest, PreviousPrioritiesRetryPredicate) { + initialize(); + + // Tell Envoy that cluster_2 is here. + sendDiscoveryResponse( + Config::TypeUrl::get().Cluster, {cluster1_, cluster2_}, {cluster2_}, {}, "42"); + // The '4' includes the fake CDS server and aggregate cluster. + test_server_->waitForGaugeGe("cluster_manager.active_clusters", 4); + + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = codec_client_->makeRequestWithBody( + Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/aggregatecluster"}, + {":scheme", "http"}, + {":authority", "host"}, + {"x-forwarded-for", "10.0.0.1"}, + {"x-envoy-retry-on", "5xx"}}, + 1024); + waitForNextUpstreamRequest(FirstUpstreamIndex); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "503"}}, false); + + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + ASSERT_TRUE(fake_upstream_connection_->close()); + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + fake_upstream_connection_.reset(); + + waitForNextUpstreamRequest(SecondUpstreamIndex); + upstream_request_->encodeHeaders(default_response_headers_, true); + + response->waitForEndStream(); + EXPECT_TRUE(upstream_request_->complete()); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + cleanupUpstreamAndDownstream(); +} + } // namespace } // namespace Envoy diff --git a/test/extensions/clusters/aggregate/cluster_test.cc b/test/extensions/clusters/aggregate/cluster_test.cc index 1c173b6ac0cce..b2c4174fbd655 100644 --- a/test/extensions/clusters/aggregate/cluster_test.cc +++ b/test/extensions/clusters/aggregate/cluster_test.cc @@ -8,7 +8,8 @@ #include "test/common/upstream/utility.h" #include "test/mocks/protobuf/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/admin.h" +#include "test/mocks/server/instance.h" #include "test/mocks/ssl/mocks.h" #include "test/test_common/environment.h" @@ -21,24 +22,35 @@ namespace Extensions { namespace Clusters { namespace Aggregate { +namespace { +const std::string primary_name("primary"); +const std::string secondary_name("secondary"); +} // namespace + class AggregateClusterTest : public testing::Test { public: - AggregateClusterTest() : stats_(Upstream::ClusterInfoImpl::generateStats(stats_store_)) {} + AggregateClusterTest() : stats_(Upstream::ClusterInfoImpl::generateStats(stats_store_)) { + ON_CALL(*primary_info_, name()).WillByDefault(ReturnRef(primary_name)); + ON_CALL(*secondary_info_, name()).WillByDefault(ReturnRef(secondary_name)); + } - Upstream::HostVector setupHostSet(int healthy_hosts, int degraded_hosts, int unhealthy_hosts) { + Upstream::HostVector setupHostSet(Upstream::ClusterInfoConstSharedPtr cluster, int healthy_hosts, + int degraded_hosts, int unhealthy_hosts, uint32_t priority) { Upstream::HostVector hosts; for (int i = 0; i < healthy_hosts; ++i) { - hosts.emplace_back(Upstream::makeTestHost(info_, "tcp://127.0.0.1:80")); + hosts.emplace_back(Upstream::makeTestHost(cluster, "tcp://127.0.0.1:80", 1, priority)); } for (int i = 0; i < degraded_hosts; ++i) { - Upstream::HostSharedPtr host = Upstream::makeTestHost(info_, "tcp://127.0.0.2:80"); + Upstream::HostSharedPtr host = + Upstream::makeTestHost(cluster, "tcp://127.0.0.2:80", 1, priority); host->healthFlagSet(Upstream::HostImpl::HealthFlag::DEGRADED_ACTIVE_HC); hosts.emplace_back(host); } for (int i = 0; i < unhealthy_hosts; ++i) { - Upstream::HostSharedPtr host = Upstream::makeTestHost(info_, "tcp://127.0.0.3:80"); + Upstream::HostSharedPtr host = + Upstream::makeTestHost(cluster, "tcp://127.0.0.3:80", 1, priority); host->healthFlagSet(Upstream::HostImpl::HealthFlag::FAILED_ACTIVE_HC); hosts.emplace_back(host); } @@ -47,7 +59,8 @@ class AggregateClusterTest : public testing::Test { } void setupPrimary(int priority, int healthy_hosts, int degraded_hosts, int unhealthy_hosts) { - auto hosts = setupHostSet(healthy_hosts, degraded_hosts, unhealthy_hosts); + auto hosts = + setupHostSet(primary_info_, healthy_hosts, degraded_hosts, unhealthy_hosts, priority); primary_ps_.updateHosts( priority, Upstream::HostSetImpl::partitionHosts(std::make_shared(hosts), @@ -57,7 +70,8 @@ class AggregateClusterTest : public testing::Test { } void setupSecondary(int priority, int healthy_hosts, int degraded_hosts, int unhealthy_hosts) { - auto hosts = setupHostSet(healthy_hosts, degraded_hosts, unhealthy_hosts); + auto hosts = + setupHostSet(secondary_info_, healthy_hosts, degraded_hosts, unhealthy_hosts, priority); secondary_ps_.updateHosts( priority, Upstream::HostSetImpl::partitionHosts(std::make_shared(hosts), @@ -75,7 +89,7 @@ class AggregateClusterTest : public testing::Test { void initialize(const std::string& yaml_config) { envoy::config::cluster::v3::Cluster cluster_config = - Upstream::parseClusterFromV2Yaml(yaml_config); + Upstream::parseClusterFromV3Yaml(yaml_config); envoy::extensions::clusters::aggregate::v3::ClusterConfig config; Config::Utility::translateOpaqueConfig(cluster_config.cluster_type().typed_config(), ProtobufWkt::Struct::default_instance(), @@ -109,7 +123,7 @@ class AggregateClusterTest : public testing::Test { Stats::IsolatedStoreImpl stats_store_; Ssl::MockContextManager ssl_context_manager_; NiceMock cm_; - NiceMock random_; + NiceMock random_; NiceMock tls_; NiceMock runtime_; NiceMock dispatcher_; @@ -123,7 +137,10 @@ class AggregateClusterTest : public testing::Test { Upstream::LoadBalancerFactorySharedPtr lb_factory_; Upstream::LoadBalancerPtr lb_; Upstream::ClusterStats stats_; - std::shared_ptr info_{new NiceMock()}; + std::shared_ptr primary_info_{ + new NiceMock()}; + std::shared_ptr secondary_info_{ + new NiceMock()}; NiceMock aggregate_cluster_, primary_, secondary_; Upstream::PrioritySetImpl primary_ps_, secondary_ps_; NiceMock primary_load_balancer_, secondary_load_balancer_; @@ -151,7 +168,7 @@ TEST_F(AggregateClusterTest, LoadBalancerTest) { // Cluster 2: // Priority 0: 33.3% // Priority 1: 33.3% - Upstream::HostSharedPtr host = Upstream::makeTestHost(info_, "tcp://127.0.0.1:80"); + Upstream::HostSharedPtr host = Upstream::makeTestHost(primary_info_, "tcp://127.0.0.1:80"); EXPECT_CALL(primary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(host)); EXPECT_CALL(secondary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(nullptr)); @@ -199,7 +216,7 @@ TEST_F(AggregateClusterTest, LoadBalancerTest) { TEST_F(AggregateClusterTest, AllHostAreUnhealthyTest) { initialize(default_yaml_config_); - Upstream::HostSharedPtr host = Upstream::makeTestHost(info_, "tcp://127.0.0.1:80"); + Upstream::HostSharedPtr host = Upstream::makeTestHost(primary_info_, "tcp://127.0.0.1:80"); // Set up the HostSet with 0 healthy, 0 degraded and 2 unhealthy. setupPrimary(0, 0, 0, 2); setupPrimary(1, 0, 0, 2); @@ -237,7 +254,7 @@ TEST_F(AggregateClusterTest, AllHostAreUnhealthyTest) { TEST_F(AggregateClusterTest, ClusterInPanicTest) { initialize(default_yaml_config_); - Upstream::HostSharedPtr host = Upstream::makeTestHost(info_, "tcp://127.0.0.1:80"); + Upstream::HostSharedPtr host = Upstream::makeTestHost(primary_info_, "tcp://127.0.0.1:80"); setupPrimary(0, 1, 0, 4); setupPrimary(1, 1, 0, 4); setupSecondary(0, 1, 0, 4); @@ -310,6 +327,63 @@ TEST_F(AggregateClusterTest, LBContextTest) { EXPECT_EQ(context.upstreamTransportSocketOptions(), nullptr); } +TEST_F(AggregateClusterTest, ContextDeterminePriorityLoad) { + Upstream::MockLoadBalancerContext lb_context; + initialize(default_yaml_config_); + setupPrimary(0, 1, 0, 0); + setupPrimary(1, 1, 0, 0); + setupSecondary(0, 1, 0, 0); + setupSecondary(1, 1, 0, 0); + + const uint32_t invalid_priority = 42; + Upstream::HostSharedPtr host = + Upstream::makeTestHost(primary_info_, "tcp://127.0.0.1:80", 1, invalid_priority); + + // The linearized priorities are [P0, P1, S0, S1]. + Upstream::HealthyAndDegradedLoad secondary_priority_1{Upstream::HealthyLoad({0, 0, 0, 100}), + Upstream::DegradedLoad()}; + + // Validate that lb_context->determinePriorityLoad() is called and that the mapping function + // passed in works correctly. + EXPECT_CALL(lb_context, determinePriorityLoad(_, _, _)) + .WillOnce(Invoke([&](const Upstream::PrioritySet&, const Upstream::HealthyAndDegradedLoad&, + const Upstream::RetryPriority::PriorityMappingFunc& mapping_func) + -> const Upstream::HealthyAndDegradedLoad& { + // This one isn't part of the mapping due to an invalid priority. + EXPECT_FALSE(mapping_func(*host).has_value()); + + // Helper to get a host from the given set and priority + auto host_from_priority = [](Upstream::PrioritySetImpl& ps, + uint32_t priority) -> const Upstream::HostDescription& { + return *(ps.hostSetsPerPriority()[priority]->hosts()[0]); + }; + + EXPECT_EQ(mapping_func(host_from_priority(primary_ps_, 0)), absl::optional(0)); + EXPECT_EQ(mapping_func(host_from_priority(primary_ps_, 1)), absl::optional(1)); + EXPECT_EQ(mapping_func(host_from_priority(secondary_ps_, 0)), absl::optional(2)); + EXPECT_EQ(mapping_func(host_from_priority(secondary_ps_, 1)), absl::optional(3)); + + return secondary_priority_1; + })); + + // Validate that the AggregateLoadBalancerContext is initialized with the weights from + // lb_context->determinePriorityLoad(). + EXPECT_CALL(secondary_load_balancer_, chooseHost(_)) + .WillOnce(Invoke([this, &host]( + Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr { + const Upstream::HealthyAndDegradedLoad& adjusted_load = context->determinePriorityLoad( + secondary_ps_, {Upstream::HealthyLoad({100, 0}), Upstream::DegradedLoad()}, nullptr); + + EXPECT_EQ(adjusted_load.healthy_priority_load_.get().size(), 2); + EXPECT_EQ(adjusted_load.healthy_priority_load_.get().at(0), 0); + EXPECT_EQ(adjusted_load.healthy_priority_load_.get().at(1), 100); + + return host; + })); + + lb_->chooseHost(&lb_context); +} + } // namespace Aggregate } // namespace Clusters } // namespace Extensions diff --git a/test/extensions/clusters/aggregate/cluster_update_test.cc b/test/extensions/clusters/aggregate/cluster_update_test.cc index e7cbbcb4311de..cf80d8599b7fc 100644 --- a/test/extensions/clusters/aggregate/cluster_update_test.cc +++ b/test/extensions/clusters/aggregate/cluster_update_test.cc @@ -11,7 +11,7 @@ #include "test/common/upstream/test_cluster_manager.h" #include "test/common/upstream/utility.h" #include "test/mocks/protobuf/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/admin.h" #include "test/mocks/ssl/mocks.h" #include "test/test_common/environment.h" #include "test/test_common/simulated_time_system.h" diff --git a/test/extensions/clusters/dynamic_forward_proxy/BUILD b/test/extensions/clusters/dynamic_forward_proxy/BUILD index 1668ea02f6367..126bb1aa78403 100644 --- a/test/extensions/clusters/dynamic_forward_proxy/BUILD +++ b/test/extensions/clusters/dynamic_forward_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -23,7 +23,8 @@ envoy_extension_cc_test( "//test/common/upstream:utility_lib", "//test/extensions/common/dynamic_forward_proxy:mocks", "//test/mocks/protobuf:protobuf_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:admin_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/ssl:ssl_mocks", "//test/test_common:environment_lib", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", diff --git a/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc b/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc index e44a7a7d18e91..725e62590c85d 100644 --- a/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc +++ b/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc @@ -10,7 +10,8 @@ #include "test/common/upstream/utility.h" #include "test/extensions/common/dynamic_forward_proxy/mocks.h" #include "test/mocks/protobuf/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/admin.h" +#include "test/mocks/server/instance.h" #include "test/mocks/ssl/mocks.h" #include "test/test_common/environment.h" @@ -30,7 +31,7 @@ class ClusterTest : public testing::Test, public: void initialize(const std::string& yaml_config, bool uses_tls) { envoy::config::cluster::v3::Cluster cluster_config = - Upstream::parseClusterFromV2Yaml(yaml_config); + Upstream::parseClusterFromV3Yaml(yaml_config); envoy::extensions::clusters::dynamic_forward_proxy::v3::ClusterConfig config; Config::Utility::translateOpaqueConfig(cluster_config.cluster_type().typed_config(), ProtobufWkt::Struct::default_instance(), @@ -107,7 +108,7 @@ class ClusterTest : public testing::Test, Stats::IsolatedStoreImpl stats_store_; Ssl::MockContextManager ssl_context_manager_; NiceMock cm_; - NiceMock random_; + NiceMock random_; NiceMock tls_; NiceMock runtime_; NiceMock dispatcher_; @@ -199,9 +200,9 @@ TEST_F(ClusterTest, PopulatedCache) { class ClusterFactoryTest : public testing::Test { protected: - void createCluster(const std::string& yaml_config) { + void createCluster(const std::string& yaml_config, bool avoid_boosting = true) { envoy::config::cluster::v3::Cluster cluster_config = - Upstream::parseClusterFromV2Yaml(yaml_config); + Upstream::parseClusterFromV3Yaml(yaml_config, avoid_boosting); Upstream::ClusterFactoryContextImpl cluster_factory_context( cm_, stats_store_, tls_, nullptr, ssl_context_manager_, runtime_, random_, dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, nullptr, true, validation_visitor_, @@ -216,7 +217,7 @@ class ClusterFactoryTest : public testing::Test { Stats::IsolatedStoreImpl stats_store_; NiceMock ssl_context_manager_; NiceMock cm_; - NiceMock random_; + NiceMock random_; NiceMock tls_; NiceMock runtime_; NiceMock dispatcher_; @@ -250,7 +251,7 @@ connect_timeout: 0.25s )EOF"); EXPECT_THROW_WITH_MESSAGE( - createCluster(yaml_config), EnvoyException, + createCluster(yaml_config, false), EnvoyException, "dynamic_forward_proxy cluster cannot configure 'sni' or 'verify_subject_alt_name'"); } @@ -274,7 +275,7 @@ connect_timeout: 0.25s )EOF"); EXPECT_THROW_WITH_MESSAGE( - createCluster(yaml_config), EnvoyException, + createCluster(yaml_config, false), EnvoyException, "dynamic_forward_proxy cluster cannot configure 'sni' or 'verify_subject_alt_name'"); } @@ -297,6 +298,23 @@ upstream_http_protocol_options: {} "configured with upstream_http_protocol_options"); } +TEST_F(ClusterFactoryTest, InsecureUpstreamHttpProtocolOptions) { + const std::string yaml_config = TestEnvironment::substitute(R"EOF( +name: name +connect_timeout: 0.25s +cluster_type: + name: dynamic_forward_proxy + typed_config: + "@type": type.googleapis.com/envoy.extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig + allow_insecure_cluster_options: true + dns_cache_config: + name: foo +upstream_http_protocol_options: {} +)EOF"); + + createCluster(yaml_config); +} + } // namespace DynamicForwardProxy } // namespace Clusters } // namespace Extensions diff --git a/test/extensions/clusters/redis/BUILD b/test/extensions/clusters/redis/BUILD index 80476cd921ee7..6c88308d93d63 100644 --- a/test/extensions/clusters/redis/BUILD +++ b/test/extensions/clusters/redis/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", @@ -11,6 +9,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -36,7 +36,8 @@ envoy_extension_cc_test( "//test/mocks/network:network_mocks", "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:admin_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/ssl:ssl_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/upstream:upstream_mocks", @@ -88,6 +89,7 @@ envoy_extension_cc_test( size = "small", srcs = ["redis_cluster_integration_test.cc"], extension_name = "envoy.clusters.redis", + tags = ["fails_on_windows"], deps = [ "//source/extensions/clusters/redis:redis_cluster", "//source/extensions/clusters/redis:redis_cluster_lb", diff --git a/test/extensions/clusters/redis/redis_cluster_integration_test.cc b/test/extensions/clusters/redis/redis_cluster_integration_test.cc index 8746c86bbe5ff..7cddc336c3f26 100644 --- a/test/extensions/clusters/redis/redis_cluster_integration_test.cc +++ b/test/extensions/clusters/redis/redis_cluster_integration_test.cc @@ -18,9 +18,9 @@ namespace { // in the cluster. The load balancing policy must be set // to random for proper test operation. const std::string& listenerConfig() { - CONSTRUCT_ON_FIRST_USE(std::string, R"EOF( + CONSTRUCT_ON_FIRST_USE(std::string, fmt::format(R"EOF( admin: - access_log_path: /dev/null + access_log_path: {} address: socket_address: address: 127.0.0.1 @@ -44,7 +44,8 @@ const std::string& listenerConfig() { settings: op_timeout: 5s enable_redirection: true -)EOF"); +)EOF", + TestEnvironment::nullDevicePath())); } const std::string& clusterConfig() { @@ -143,11 +144,6 @@ class RedisClusterIntegrationTest : public testing::TestWithParam(&(test_server.server().random())); + mock_rng_ = dynamic_cast(&(test_server.server().random())); // Abort now if we cannot downcast the server's random number generator pointer. ASSERT_TRUE(mock_rng_ != nullptr); // Ensure that fake_upstreams_[0] is the load balancer's host of choice by default. @@ -199,13 +195,14 @@ class RedisClusterIntegrationTest : public testing::TestWithParamclearData(); - redis_client->write(request); + ASSERT_TRUE(redis_client->write(request)); if (fake_upstream_connection.get() == nullptr) { expect_auth_command = (!auth_password.empty()); @@ -213,7 +210,10 @@ class RedisClusterIntegrationTest : public testing::TestWithParamwaitForData(auth_command.size() + request.size(), &proxy_to_server)); // The original request should be the same as the data received by the server. @@ -221,6 +221,13 @@ class RedisClusterIntegrationTest : public testing::TestWithParamwrite(ok)); + } else if (expect_readonly) { + std::string readonly_command = makeBulkStringArray({"readonly"}); + EXPECT_TRUE(fake_upstream_connection->waitForData(readonly_command.size() + request.size(), + &proxy_to_server)); + EXPECT_EQ(readonly_command + request, proxy_to_server); + // Send back an OK for the readonly command. + EXPECT_TRUE(fake_upstream_connection->write(ok)); } else { EXPECT_TRUE(fake_upstream_connection->waitForData(request.size(), &proxy_to_server)); // The original request should be the same as the data received by the server. @@ -239,18 +246,19 @@ class RedisClusterIntegrationTest : public testing::TestWithParamclose(); EXPECT_TRUE(fake_upstream_connection->close()); } void expectCallClusterSlot(int stream_index, std::string& response, + const std::string& auth_username = "", const std::string& auth_password = "") { std::string cluster_slot_request = makeBulkStringArray({"CLUSTER", "SLOTS"}); @@ -264,10 +272,18 @@ class RedisClusterIntegrationTest : public testing::TestWithParamwaitForData(cluster_slot_request.size(), &proxied_cluster_slot_request)); EXPECT_EQ(cluster_slot_request, proxied_cluster_slot_request); - } else { + } else if (auth_username.empty()) { std::string auth_request = makeBulkStringArray({"auth", auth_password}); std::string ok = "+OK\r\n"; + EXPECT_TRUE(fake_upstream_connection_->waitForData( + auth_request.size() + cluster_slot_request.size(), &proxied_cluster_slot_request)); + EXPECT_EQ(auth_request + cluster_slot_request, proxied_cluster_slot_request); + EXPECT_TRUE(fake_upstream_connection_->write(ok)); + } else { + std::string auth_request = makeBulkStringArray({"auth", auth_username, auth_password}); + std::string ok = "+OK\r\n"; + EXPECT_TRUE(fake_upstream_connection_->waitForData( auth_request.size() + cluster_slot_request.size(), &proxied_cluster_slot_request)); EXPECT_EQ(auth_request + cluster_slot_request, proxied_cluster_slot_request); @@ -279,13 +295,13 @@ class RedisClusterIntegrationTest : public testing::TestWithParamaddressAsString(), master->port()) + << makeIp(primary->addressAsString(), primary->port()) << makeIp(replica->addressAsString(), replica->port()); return resp.str(); @@ -302,8 +318,8 @@ class RedisClusterIntegrationTest : public testing::TestWithParamlocalAddress()->ip(), fake_upstreams_[1]->localAddress()->ip()); expectCallClusterSlot(random_index_, cluster_slot_response); }; @@ -429,7 +453,7 @@ TEST_P(RedisClusterIntegrationTest, ClusterSlotRequestAfterRedirection) { random_index_ = 0; on_server_init_function_ = [this]() { - std::string cluster_slot_response = singleSlotMasterReplica( + std::string cluster_slot_response = singleSlotPrimaryReplica( fake_upstreams_[0]->localAddress()->ip(), fake_upstreams_[1]->localAddress()->ip()); expectCallClusterSlot(random_index_, cluster_slot_response); }; @@ -437,7 +461,7 @@ TEST_P(RedisClusterIntegrationTest, ClusterSlotRequestAfterRedirection) { initialize(); // foo hashes to slot 12182 which the proxy believes is at the server reachable via - // fake_upstreams_[0], based on the singleSlotMasterReplica() response above. + // fake_upstreams_[0], based on the singleSlotPrimaryReplica() response above. std::string request = makeBulkStringArray({"get", "foo"}); // The actual moved redirection error that redirects to the fake_upstreams_[1] server. std::string redirection_response = @@ -448,7 +472,7 @@ TEST_P(RedisClusterIntegrationTest, ClusterSlotRequestAfterRedirection) { std::string proxy_to_server; IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort("redis_proxy")); - redis_client->write(request); + ASSERT_TRUE(redis_client->write(request)); FakeRawConnectionPtr fake_upstream_connection_1, fake_upstream_connection_2, fake_upstream_connection_3; @@ -491,30 +515,30 @@ TEST_P(RedisClusterIntegrationTest, ClusterSlotRequestAfterRedirection) { // This test sends simple "set foo" and "get foo" command from a fake // downstream client through the proxy to a fake upstream -// Redis cluster with a single slot with master and replica. +// Redis cluster with a single slot with primary and replica. // The envoy proxy is set with read_policy to read from replica, the expected result -// is that the set command will be sent to the master and the get command will be sent +// is that the set command will be sent to the primary and the get command will be sent // to the replica -TEST_P(RedisClusterWithReadPolicyIntegrationTest, SingleSlotMasterReplicaReadReplica) { +TEST_P(RedisClusterWithReadPolicyIntegrationTest, SingleSlotPrimaryReplicaReadReplica) { random_index_ = 0; on_server_init_function_ = [this]() { - std::string cluster_slot_response = singleSlotMasterReplica( + std::string cluster_slot_response = singleSlotPrimaryReplica( fake_upstreams_[0]->localAddress()->ip(), fake_upstreams_[1]->localAddress()->ip()); expectCallClusterSlot(random_index_, cluster_slot_response); }; initialize(); - // foo hashes to slot 12182 which has master node in upstream 0 and replica in upstream 1 - simpleRequestAndResponse(0, makeBulkStringArray({"set", "foo", "bar"}), ":1\r\n"); - simpleRequestAndResponse(1, makeBulkStringArray({"get", "foo"}), "$3\r\nbar\r\n"); + // foo hashes to slot 12182 which has primary node in upstream 0 and replica in upstream 1 + simpleRequestAndResponse(0, makeBulkStringArray({"set", "foo", "bar"}), ":1\r\n", true); + simpleRequestAndResponse(1, makeBulkStringArray({"get", "foo"}), "$3\r\nbar\r\n", true); } // This test sends a simple "get foo" command from a fake // downstream client through the proxy to a fake upstream -// Redis cluster with a single slot with master and replica. +// Redis cluster with a single slot with primary and replica. // The fake server sends a valid response back to the client. // The request and response should make it through the envoy // proxy server code unchanged. @@ -524,13 +548,13 @@ TEST_P(RedisClusterWithReadPolicyIntegrationTest, SingleSlotMasterReplicaReadRep // "cluster slots" command), and one to authenticate the connection // that carries the "get foo" request. -TEST_P(RedisClusterWithAuthIntegrationTest, SingleSlotMasterReplica) { +TEST_P(RedisClusterWithAuthIntegrationTest, SingleSlotPrimaryReplica) { random_index_ = 0; on_server_init_function_ = [this]() { - std::string cluster_slot_response = singleSlotMasterReplica( + std::string cluster_slot_response = singleSlotPrimaryReplica( fake_upstreams_[0]->localAddress()->ip(), fake_upstreams_[1]->localAddress()->ip()); - expectCallClusterSlot(0, cluster_slot_response, "somepassword"); + expectCallClusterSlot(0, cluster_slot_response, "", "somepassword"); }; initialize(); @@ -539,7 +563,8 @@ TEST_P(RedisClusterWithAuthIntegrationTest, SingleSlotMasterReplica) { FakeRawConnectionPtr fake_upstream_connection; roundtripToUpstreamStep(fake_upstreams_[random_index_], makeBulkStringArray({"get", "foo"}), - "$3\r\nbar\r\n", redis_client, fake_upstream_connection, "somepassword"); + "$3\r\nbar\r\n", redis_client, fake_upstream_connection, "", + "somepassword"); redis_client->close(); EXPECT_TRUE(fake_upstream_connection->close()); @@ -552,7 +577,7 @@ TEST_P(RedisClusterWithRefreshIntegrationTest, ClusterSlotRequestAfterFailure) { random_index_ = 0; on_server_init_function_ = [this]() { - std::string cluster_slot_response = singleSlotMasterReplica( + std::string cluster_slot_response = singleSlotPrimaryReplica( fake_upstreams_[0]->localAddress()->ip(), fake_upstreams_[1]->localAddress()->ip()); expectCallClusterSlot(random_index_, cluster_slot_response); }; @@ -560,15 +585,16 @@ TEST_P(RedisClusterWithRefreshIntegrationTest, ClusterSlotRequestAfterFailure) { initialize(); // foo hashes to slot 12182 which the proxy believes is at the server reachable via - // fake_upstreams_[0], based on the singleSlotMasterReplica() response above. + // fake_upstreams_[0], based on the singleSlotPrimaryReplica() response above. std::string request = makeBulkStringArray({"get", "foo"}); // The actual error response. std::string error_response = "-CLUSTERDOWN The cluster is down\r\n"; + std::string upstream_error_response = "-upstream failure\r\n"; std::string cluster_slots_request = makeBulkStringArray({"CLUSTER", "SLOTS"}); std::string proxy_to_server; IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort("redis_proxy")); - redis_client->write(request); + ASSERT_TRUE(redis_client->write(request)); FakeRawConnectionPtr fake_upstream_connection_1, fake_upstream_connection_2; @@ -581,9 +607,9 @@ TEST_P(RedisClusterWithRefreshIntegrationTest, ClusterSlotRequestAfterFailure) { // Send the server down error response from the first fake Redis server back to the proxy. EXPECT_TRUE(fake_upstream_connection_1->write(error_response)); - redis_client->waitForData(error_response); + redis_client->waitForData(upstream_error_response); // The client should receive response unchanged. - EXPECT_EQ(error_response, redis_client->data()); + EXPECT_EQ(upstream_error_response, redis_client->data()); // A new connection should be created to fake_upstreams_[0] for topology discovery. proxy_to_server.clear(); diff --git a/test/extensions/clusters/redis/redis_cluster_lb_test.cc b/test/extensions/clusters/redis/redis_cluster_lb_test.cc index bfc1ae6e16be4..b261687454525 100644 --- a/test/extensions/clusters/redis/redis_cluster_lb_test.cc +++ b/test/extensions/clusters/redis/redis_cluster_lb_test.cc @@ -51,7 +51,7 @@ class RedisClusterLoadBalancerTest : public testing::Test { const std::vector>& expected_assignments, bool read_command = false, NetworkFilters::Common::Redis::Client::ReadPolicy read_policy = - NetworkFilters::Common::Redis::Client::ReadPolicy::Master) { + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary) { Upstream::LoadBalancerPtr lb = lb_->factory()->create(); for (auto& assignment : expected_assignments) { @@ -75,7 +75,7 @@ class RedisClusterLoadBalancerTest : public testing::Test { std::shared_ptr factory_; std::unique_ptr lb_; std::shared_ptr info_{new NiceMock()}; - NiceMock random_; + NiceMock random_; }; class RedisLoadBalancerContextImplTest : public testing::Test { @@ -173,22 +173,22 @@ TEST_F(RedisClusterLoadBalancerTest, ReadStrategiesHealthy) { validateAssignment(hosts, replica_assignments, true, NetworkFilters::Common::Redis::Client::ReadPolicy::PreferReplica); - const std::vector> master_assignments = { + const std::vector> primary_assignments = { {0, 0}, {1100, 0}, {2000, 0}, {18382, 0}, {2001, 1}, {2100, 1}, {16383, 1}, {19382, 1}}; - validateAssignment(hosts, master_assignments, true, - NetworkFilters::Common::Redis::Client::ReadPolicy::Master); - validateAssignment(hosts, master_assignments, true, - NetworkFilters::Common::Redis::Client::ReadPolicy::PreferMaster); + validateAssignment(hosts, primary_assignments, true, + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary); + validateAssignment(hosts, primary_assignments, true, + NetworkFilters::Common::Redis::Client::ReadPolicy::PreferPrimary); ON_CALL(random_, random()).WillByDefault(Return(0)); - validateAssignment(hosts, master_assignments, true, + validateAssignment(hosts, primary_assignments, true, NetworkFilters::Common::Redis::Client::ReadPolicy::Any); ON_CALL(random_, random()).WillByDefault(Return(1)); validateAssignment(hosts, replica_assignments, true, NetworkFilters::Common::Redis::Client::ReadPolicy::Any); } -TEST_F(RedisClusterLoadBalancerTest, ReadStrategiesUnhealthyMaster) { +TEST_F(RedisClusterLoadBalancerTest, ReadStrategiesUnhealthyPrimary) { Upstream::HostVector hosts{ Upstream::makeTestHost(info_, "tcp://127.0.0.1:90"), Upstream::makeTestHost(info_, "tcp://127.0.0.1:91"), @@ -215,17 +215,17 @@ TEST_F(RedisClusterLoadBalancerTest, ReadStrategiesUnhealthyMaster) { // A list of (hash: host_index) pair const std::vector> replica_assignments = { {0, 2}, {1100, 2}, {2000, 2}, {18382, 2}, {2001, 3}, {2100, 3}, {16383, 3}, {19382, 3}}; - const std::vector> master_assignments = { + const std::vector> primary_assignments = { {0, 0}, {1100, 0}, {2000, 0}, {18382, 0}, {2001, 1}, {2100, 1}, {16383, 1}, {19382, 1}}; validateAssignment(hosts, replica_assignments, true, NetworkFilters::Common::Redis::Client::ReadPolicy::Replica); validateAssignment(hosts, replica_assignments, true, NetworkFilters::Common::Redis::Client::ReadPolicy::PreferReplica); - validateAssignment(hosts, master_assignments, true, - NetworkFilters::Common::Redis::Client::ReadPolicy::Master); + validateAssignment(hosts, primary_assignments, true, + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary); validateAssignment(hosts, replica_assignments, true, - NetworkFilters::Common::Redis::Client::ReadPolicy::PreferMaster); + NetworkFilters::Common::Redis::Client::ReadPolicy::PreferPrimary); ON_CALL(random_, random()).WillByDefault(Return(0)); validateAssignment(hosts, replica_assignments, true, @@ -262,23 +262,23 @@ TEST_F(RedisClusterLoadBalancerTest, ReadStrategiesUnhealthyReplica) { // A list of (hash: host_index) pair const std::vector> replica_assignments = { {0, 2}, {1100, 2}, {2000, 2}, {18382, 2}, {2001, 3}, {2100, 3}, {16383, 3}, {19382, 3}}; - const std::vector> master_assignments = { + const std::vector> primary_assignments = { {0, 0}, {1100, 0}, {2000, 0}, {18382, 0}, {2001, 1}, {2100, 1}, {16383, 1}, {19382, 1}}; validateAssignment(hosts, replica_assignments, true, NetworkFilters::Common::Redis::Client::ReadPolicy::Replica); - validateAssignment(hosts, master_assignments, true, + validateAssignment(hosts, primary_assignments, true, NetworkFilters::Common::Redis::Client::ReadPolicy::PreferReplica); - validateAssignment(hosts, master_assignments, true, - NetworkFilters::Common::Redis::Client::ReadPolicy::Master); - validateAssignment(hosts, master_assignments, true, - NetworkFilters::Common::Redis::Client::ReadPolicy::PreferMaster); + validateAssignment(hosts, primary_assignments, true, + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary); + validateAssignment(hosts, primary_assignments, true, + NetworkFilters::Common::Redis::Client::ReadPolicy::PreferPrimary); ON_CALL(random_, random()).WillByDefault(Return(0)); - validateAssignment(hosts, master_assignments, true, + validateAssignment(hosts, primary_assignments, true, NetworkFilters::Common::Redis::Client::ReadPolicy::Any); ON_CALL(random_, random()).WillByDefault(Return(1)); - validateAssignment(hosts, master_assignments, true, + validateAssignment(hosts, primary_assignments, true, NetworkFilters::Common::Redis::Client::ReadPolicy::Any); } @@ -296,15 +296,15 @@ TEST_F(RedisClusterLoadBalancerTest, ReadStrategiesNoReplica) { factory_->onClusterSlotUpdate(std::move(slots), all_hosts); // A list of (hash: host_index) pair - const std::vector> master_assignments = { + const std::vector> primary_assignments = { {0, 0}, {1100, 0}, {2000, 0}, {18382, 0}, {2001, 1}, {2100, 1}, {16383, 1}, {19382, 1}}; - validateAssignment(hosts, master_assignments, true, - NetworkFilters::Common::Redis::Client::ReadPolicy::Master); - validateAssignment(hosts, master_assignments, true, - NetworkFilters::Common::Redis::Client::ReadPolicy::PreferMaster); - validateAssignment(hosts, master_assignments, true, + validateAssignment(hosts, primary_assignments, true, + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary); + validateAssignment(hosts, primary_assignments, true, + NetworkFilters::Common::Redis::Client::ReadPolicy::PreferPrimary); + validateAssignment(hosts, primary_assignments, true, NetworkFilters::Common::Redis::Client::ReadPolicy::Any); - validateAssignment(hosts, master_assignments, true, + validateAssignment(hosts, primary_assignments, true, NetworkFilters::Common::Redis::Client::ReadPolicy::PreferReplica); Upstream::LoadBalancerPtr lb = lb_->factory()->create(); @@ -393,11 +393,11 @@ TEST_F(RedisLoadBalancerContextImplTest, Basic) { get_request.asArray().swap(get_foo); RedisLoadBalancerContextImpl context1("foo", true, true, get_request, - NetworkFilters::Common::Redis::Client::ReadPolicy::Master); + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary); EXPECT_EQ(absl::optional(44950), context1.computeHashKey()); EXPECT_EQ(true, context1.isReadCommand()); - EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Master, context1.readPolicy()); + EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Primary, context1.readPolicy()); // Simple write command std::vector set_foo(3); @@ -413,11 +413,11 @@ TEST_F(RedisLoadBalancerContextImplTest, Basic) { set_request.asArray().swap(set_foo); RedisLoadBalancerContextImpl context2("foo", true, true, set_request, - NetworkFilters::Common::Redis::Client::ReadPolicy::Master); + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary); EXPECT_EQ(absl::optional(44950), context2.computeHashKey()); EXPECT_EQ(false, context2.isReadCommand()); - EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Master, context2.readPolicy()); + EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Primary, context2.readPolicy()); } TEST_F(RedisLoadBalancerContextImplTest, CompositeArray) { @@ -435,18 +435,18 @@ TEST_F(RedisLoadBalancerContextImplTest, CompositeArray) { NetworkFilters::Common::Redis::RespValue get_request2{base, get_command, 2, 2}; RedisLoadBalancerContextImpl context1("foo", true, true, get_request1, - NetworkFilters::Common::Redis::Client::ReadPolicy::Master); + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary); EXPECT_EQ(absl::optional(44950), context1.computeHashKey()); EXPECT_EQ(true, context1.isReadCommand()); - EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Master, context1.readPolicy()); + EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Primary, context1.readPolicy()); RedisLoadBalancerContextImpl context2("bar", true, true, get_request2, - NetworkFilters::Common::Redis::Client::ReadPolicy::Master); + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary); EXPECT_EQ(absl::optional(37829), context2.computeHashKey()); EXPECT_EQ(true, context2.isReadCommand()); - EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Master, context2.readPolicy()); + EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Primary, context2.readPolicy()); // Composite write command NetworkFilters::Common::Redis::RespValue set_command; @@ -455,11 +455,11 @@ TEST_F(RedisLoadBalancerContextImplTest, CompositeArray) { NetworkFilters::Common::Redis::RespValue set_request{base, set_command, 1, 2}; RedisLoadBalancerContextImpl context3("foo", true, true, set_request, - NetworkFilters::Common::Redis::Client::ReadPolicy::Master); + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary); EXPECT_EQ(absl::optional(44950), context3.computeHashKey()); EXPECT_EQ(false, context3.isReadCommand()); - EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Master, context3.readPolicy()); + EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Primary, context3.readPolicy()); } TEST_F(RedisLoadBalancerContextImplTest, UpperCaseCommand) { @@ -475,11 +475,11 @@ TEST_F(RedisLoadBalancerContextImplTest, UpperCaseCommand) { get_request.asArray().swap(get_foo); RedisLoadBalancerContextImpl context1("foo", true, true, get_request, - NetworkFilters::Common::Redis::Client::ReadPolicy::Master); + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary); EXPECT_EQ(absl::optional(44950), context1.computeHashKey()); EXPECT_EQ(true, context1.isReadCommand()); - EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Master, context1.readPolicy()); + EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Primary, context1.readPolicy()); // Simple write command std::vector set_foo(3); @@ -495,11 +495,11 @@ TEST_F(RedisLoadBalancerContextImplTest, UpperCaseCommand) { set_request.asArray().swap(set_foo); RedisLoadBalancerContextImpl context2("foo", true, true, set_request, - NetworkFilters::Common::Redis::Client::ReadPolicy::Master); + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary); EXPECT_EQ(absl::optional(44950), context2.computeHashKey()); EXPECT_EQ(false, context2.isReadCommand()); - EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Master, context2.readPolicy()); + EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Primary, context2.readPolicy()); } TEST_F(RedisLoadBalancerContextImplTest, UnsupportedCommand) { @@ -511,11 +511,11 @@ TEST_F(RedisLoadBalancerContextImplTest, UnsupportedCommand) { unknown_request.asArray().swap(unknown); RedisLoadBalancerContextImpl context3("foo", true, true, unknown_request, - NetworkFilters::Common::Redis::Client::ReadPolicy::Master); + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary); EXPECT_EQ(absl::optional(44950), context3.computeHashKey()); EXPECT_EQ(false, context3.isReadCommand()); - EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Master, context3.readPolicy()); + EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Primary, context3.readPolicy()); } TEST_F(RedisLoadBalancerContextImplTest, EnforceHashTag) { @@ -534,11 +534,11 @@ TEST_F(RedisLoadBalancerContextImplTest, EnforceHashTag) { // Enable_hash tagging should be override when is_redis_cluster is true. This is treated like // "foo" RedisLoadBalancerContextImpl context2("{foo}bar", false, true, set_request, - NetworkFilters::Common::Redis::Client::ReadPolicy::Master); + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary); EXPECT_EQ(absl::optional(44950), context2.computeHashKey()); EXPECT_EQ(false, context2.isReadCommand()); - EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Master, context2.readPolicy()); + EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Primary, context2.readPolicy()); } } // namespace Redis diff --git a/test/extensions/clusters/redis/redis_cluster_test.cc b/test/extensions/clusters/redis/redis_cluster_test.cc index 6b9a87ab778a3..42a77b8445bdd 100644 --- a/test/extensions/clusters/redis/redis_cluster_test.cc +++ b/test/extensions/clusters/redis/redis_cluster_test.cc @@ -21,7 +21,8 @@ #include "test/extensions/filters/network/common/redis/mocks.h" #include "test/mocks/local_info/mocks.h" #include "test/mocks/protobuf/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/admin.h" +#include "test/mocks/server/instance.h" #include "test/mocks/ssl/mocks.h" using testing::_; @@ -41,10 +42,14 @@ const std::string BasicConfig = R"EOF( name: name connect_timeout: 0.25s dns_lookup_family: V4_ONLY - hosts: - - socket_address: - address: foo.bar.com - port_value: 22120 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 22120 cluster_type: name: envoy.clusters.redis typed_config: @@ -65,7 +70,7 @@ class RedisClusterTest : public testing::Test, create(Upstream::HostConstSharedPtr host, Event::Dispatcher&, const Extensions::NetworkFilters::Common::Redis::Client::Config&, const Extensions::NetworkFilters::Common::Redis::RedisCommandStatsSharedPtr&, - Stats::Scope&, const std::string&) override { + Stats::Scope&, const std::string&, const std::string&) override { EXPECT_EQ(22120, host->address()->ip()->port()); return Extensions::NetworkFilters::Common::Redis::Client::ClientPtr{ create_(host->address()->asString())}; @@ -85,10 +90,11 @@ class RedisClusterTest : public testing::Test, return addresses; } - void setupFromV2Yaml(const std::string& yaml) { + void setupFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) { expectRedisSessionCreated(); NiceMock cm; - envoy::config::cluster::v3::Cluster cluster_config = Upstream::parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = + Upstream::parseClusterFromV3Yaml(yaml, avoid_boosting); Envoy::Stats::ScopePtr scope = stats_store_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -116,9 +122,9 @@ class RedisClusterTest : public testing::Test, }); } - void setupFactoryFromV2Yaml(const std::string& yaml) { + void setupFactoryFromV3Yaml(const std::string& yaml) { NiceMock cm; - envoy::config::cluster::v3::Cluster cluster_config = Upstream::parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = Upstream::parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_store_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -160,6 +166,7 @@ class RedisClusterTest : public testing::Test, void expectRedisSessionCreated() { resolve_timer_ = new Event::MockTimer(&dispatcher_); + EXPECT_CALL(*resolve_timer_, disableTimer()); ON_CALL(random_, random()).WillByDefault(Return(0)); } @@ -184,14 +191,14 @@ class RedisClusterTest : public testing::Test, pool_callbacks_->onFailure(); } - NetworkFilters::Common::Redis::RespValuePtr singleSlotMasterReplica(const std::string& master, - const std::string& replica, - int64_t port) const { - std::vector master_1(2); - master_1[0].type(NetworkFilters::Common::Redis::RespType::BulkString); - master_1[0].asString() = master; - master_1[1].type(NetworkFilters::Common::Redis::RespType::Integer); - master_1[1].asInteger() = port; + NetworkFilters::Common::Redis::RespValuePtr singleSlotPrimaryReplica(const std::string& primary, + const std::string& replica, + int64_t port) const { + std::vector primary_1(2); + primary_1[0].type(NetworkFilters::Common::Redis::RespType::BulkString); + primary_1[0].asString() = primary; + primary_1[1].type(NetworkFilters::Common::Redis::RespType::Integer); + primary_1[1].asInteger() = port; std::vector replica_1(2); replica_1[0].type(NetworkFilters::Common::Redis::RespType::BulkString); @@ -205,7 +212,7 @@ class RedisClusterTest : public testing::Test, slot_1[1].type(NetworkFilters::Common::Redis::RespType::Integer); slot_1[1].asInteger() = 16383; slot_1[2].type(NetworkFilters::Common::Redis::RespType::Array); - slot_1[2].asArray().swap(master_1); + slot_1[2].asArray().swap(primary_1); slot_1[3].type(NetworkFilters::Common::Redis::RespType::Array); slot_1[3].asArray().swap(replica_1); @@ -220,18 +227,18 @@ class RedisClusterTest : public testing::Test, return response; } - NetworkFilters::Common::Redis::RespValuePtr twoSlotsMasters() const { - std::vector master_1(2); - master_1[0].type(NetworkFilters::Common::Redis::RespType::BulkString); - master_1[0].asString() = "127.0.0.1"; - master_1[1].type(NetworkFilters::Common::Redis::RespType::Integer); - master_1[1].asInteger() = 22120; + NetworkFilters::Common::Redis::RespValuePtr twoSlotsPrimaries() const { + std::vector primary_1(2); + primary_1[0].type(NetworkFilters::Common::Redis::RespType::BulkString); + primary_1[0].asString() = "127.0.0.1"; + primary_1[1].type(NetworkFilters::Common::Redis::RespType::Integer); + primary_1[1].asInteger() = 22120; - std::vector master_2(2); - master_2[0].type(NetworkFilters::Common::Redis::RespType::BulkString); - master_2[0].asString() = "127.0.0.2"; - master_2[1].type(NetworkFilters::Common::Redis::RespType::Integer); - master_2[1].asInteger() = 22120; + std::vector primary_2(2); + primary_2[0].type(NetworkFilters::Common::Redis::RespType::BulkString); + primary_2[0].asString() = "127.0.0.2"; + primary_2[1].type(NetworkFilters::Common::Redis::RespType::Integer); + primary_2[1].asInteger() = 22120; std::vector slot_1(3); slot_1[0].type(NetworkFilters::Common::Redis::RespType::Integer); @@ -239,7 +246,7 @@ class RedisClusterTest : public testing::Test, slot_1[1].type(NetworkFilters::Common::Redis::RespType::Integer); slot_1[1].asInteger() = 9999; slot_1[2].type(NetworkFilters::Common::Redis::RespType::Array); - slot_1[2].asArray().swap(master_1); + slot_1[2].asArray().swap(primary_1); std::vector slot_2(3); slot_2[0].type(NetworkFilters::Common::Redis::RespType::Integer); @@ -247,7 +254,7 @@ class RedisClusterTest : public testing::Test, slot_2[1].type(NetworkFilters::Common::Redis::RespType::Integer); slot_2[1].asInteger() = 16383; slot_2[2].type(NetworkFilters::Common::Redis::RespType::Array); - slot_2[2].asArray().swap(master_2); + slot_2[2].asArray().swap(primary_2); std::vector slots(2); slots[0].type(NetworkFilters::Common::Redis::RespType::Array); @@ -262,18 +269,18 @@ class RedisClusterTest : public testing::Test, return response; } - NetworkFilters::Common::Redis::RespValuePtr twoSlotsMastersWithReplica() const { - std::vector master_1(2); - master_1[0].type(NetworkFilters::Common::Redis::RespType::BulkString); - master_1[0].asString() = "127.0.0.1"; - master_1[1].type(NetworkFilters::Common::Redis::RespType::Integer); - master_1[1].asInteger() = 22120; + NetworkFilters::Common::Redis::RespValuePtr twoSlotsPrimariesWithReplica() const { + std::vector primary_1(2); + primary_1[0].type(NetworkFilters::Common::Redis::RespType::BulkString); + primary_1[0].asString() = "127.0.0.1"; + primary_1[1].type(NetworkFilters::Common::Redis::RespType::Integer); + primary_1[1].asInteger() = 22120; - std::vector master_2(2); - master_2[0].type(NetworkFilters::Common::Redis::RespType::BulkString); - master_2[0].asString() = "127.0.0.2"; - master_2[1].type(NetworkFilters::Common::Redis::RespType::Integer); - master_2[1].asInteger() = 22120; + std::vector primary_2(2); + primary_2[0].type(NetworkFilters::Common::Redis::RespType::BulkString); + primary_2[0].asString() = "127.0.0.2"; + primary_2[1].type(NetworkFilters::Common::Redis::RespType::Integer); + primary_2[1].asInteger() = 22120; std::vector replica_1(2); replica_1[0].type(NetworkFilters::Common::Redis::RespType::BulkString); @@ -293,7 +300,7 @@ class RedisClusterTest : public testing::Test, slot_1[1].type(NetworkFilters::Common::Redis::RespType::Integer); slot_1[1].asInteger() = 9999; slot_1[2].type(NetworkFilters::Common::Redis::RespType::Array); - slot_1[2].asArray().swap(master_1); + slot_1[2].asArray().swap(primary_1); slot_1[3].type(NetworkFilters::Common::Redis::RespType::Array); slot_1[3].asArray().swap(replica_1); @@ -303,7 +310,7 @@ class RedisClusterTest : public testing::Test, slot_2[1].type(NetworkFilters::Common::Redis::RespType::Integer); slot_2[1].asInteger() = 16383; slot_2[2].type(NetworkFilters::Common::Redis::RespType::Array); - slot_2[2].asArray().swap(master_2); + slot_2[2].asArray().swap(primary_2); slot_2[3].type(NetworkFilters::Common::Redis::RespType::Array); slot_2[3].asArray().swap(replica_2); @@ -372,27 +379,27 @@ class RedisClusterTest : public testing::Test, int64_t slot1_size = idx++; int64_t slot1_range_start_type = idx++; int64_t slot1_range_end_type = idx++; - int64_t master_type = idx++; - int64_t master_size = idx++; - int64_t master_ip_type = idx++; - int64_t master_ip_value = idx++; - int64_t master_port_type = idx++; + int64_t primary_type = idx++; + int64_t primary_size = idx++; + int64_t primary_ip_type = idx++; + int64_t primary_ip_value = idx++; + int64_t primary_port_type = idx++; idx = 0; int64_t replica_size = idx++; int64_t replica_ip_type = idx++; int64_t replica_ip_value = idx++; int64_t replica_port_type = idx++; - std::vector master_1_array; - if (flags.test(master_size)) { + std::vector primary_1_array; + if (flags.test(primary_size)) { // Ip field. - if (flags.test(master_ip_value)) { - master_1_array.push_back(createStringField(flags.test(master_ip_type), "127.0.0.1")); + if (flags.test(primary_ip_value)) { + primary_1_array.push_back(createStringField(flags.test(primary_ip_type), "127.0.0.1")); } else { - master_1_array.push_back(createStringField(flags.test(master_ip_type), "bad ip foo")); + primary_1_array.push_back(createStringField(flags.test(primary_ip_type), "bad ip foo")); } // Port field. - master_1_array.push_back(createIntegerField(flags.test(master_port_type), 22120)); + primary_1_array.push_back(createIntegerField(flags.test(primary_port_type), 22120)); } std::vector replica_1_array; @@ -413,7 +420,7 @@ class RedisClusterTest : public testing::Test, if (flags.test(slot1_size)) { slot_1_array.push_back(createIntegerField(flags.test(slot1_range_start_type), 0)); slot_1_array.push_back(createIntegerField(flags.test(slot1_range_end_type), 16383)); - slot_1_array.push_back(createArrayField(flags.test(master_type), master_1_array)); + slot_1_array.push_back(createArrayField(flags.test(primary_type), primary_1_array)); if (replica_flags.any()) { slot_1_array.push_back(createArrayField(replica_flags.test(replica_size), replica_1_array)); } @@ -452,7 +459,7 @@ class RedisClusterTest : public testing::Test, } void testBasicSetup(const std::string& config, const std::string& expected_discovery_address) { - setupFromV2Yaml(config); + setupFromV3Yaml(config); const std::list resolved_addresses{"127.0.0.1", "127.0.0.2"}; expectResolveDiscovery(Network::DnsLookupFamily::V4Only, expected_discovery_address, resolved_addresses); @@ -463,30 +470,30 @@ class RedisClusterTest : public testing::Test, cluster_->initialize([&]() -> void { initialized_.ready(); }); EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1); - expectClusterSlotResponse(singleSlotMasterReplica("127.0.0.1", "127.0.0.2", 22120)); + expectClusterSlotResponse(singleSlotPrimaryReplica("127.0.0.1", "127.0.0.2", 22120)); expectHealthyHosts(std::list({"127.0.0.1:22120", "127.0.0.2:22120"})); - // Promote replica to master + // Promote replica to primary expectRedisResolve(); EXPECT_CALL(membership_updated_, ready()); resolve_timer_->invokeCallback(); EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1); - expectClusterSlotResponse(twoSlotsMasters()); + expectClusterSlotResponse(twoSlotsPrimaries()); expectHealthyHosts(std::list({"127.0.0.1:22120", "127.0.0.2:22120"})); // No change. expectRedisResolve(); resolve_timer_->invokeCallback(); EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1).WillOnce(Return(false)); - expectClusterSlotResponse(twoSlotsMasters()); + expectClusterSlotResponse(twoSlotsPrimaries()); expectHealthyHosts(std::list({"127.0.0.1:22120", "127.0.0.2:22120"})); - // Add replicas to masters + // Add replicas to primaries expectRedisResolve(); EXPECT_CALL(membership_updated_, ready()); resolve_timer_->invokeCallback(); EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1); - expectClusterSlotResponse(twoSlotsMastersWithReplica()); + expectClusterSlotResponse(twoSlotsPrimariesWithReplica()); expectHealthyHosts(std::list( {"127.0.0.1:22120", "127.0.0.3:22120", "127.0.0.2:22120", "127.0.0.4:22120"})); @@ -494,7 +501,7 @@ class RedisClusterTest : public testing::Test, expectRedisResolve(); resolve_timer_->invokeCallback(); EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1).WillOnce(Return(false)); - expectClusterSlotResponse(twoSlotsMastersWithReplica()); + expectClusterSlotResponse(twoSlotsPrimariesWithReplica()); expectHealthyHosts(std::list( {"127.0.0.1:22120", "127.0.0.3:22120", "127.0.0.2:22120", "127.0.0.4:22120"})); @@ -503,7 +510,7 @@ class RedisClusterTest : public testing::Test, EXPECT_CALL(membership_updated_, ready()); resolve_timer_->invokeCallback(); EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1); - expectClusterSlotResponse(singleSlotMasterReplica("127.0.0.1", "127.0.0.2", 22120)); + expectClusterSlotResponse(singleSlotPrimaryReplica("127.0.0.1", "127.0.0.2", 22120)); expectHealthyHosts(std::list({"127.0.0.1:22120", "127.0.0.2:22120"})); } @@ -542,7 +549,7 @@ class RedisClusterTest : public testing::Test, Ssl::MockContextManager ssl_context_manager_; std::shared_ptr> dns_resolver_{ new NiceMock}; - NiceMock random_; + NiceMock random_; NiceMock tls_; Event::MockTimer* resolve_timer_; ReadyWatcher membership_updated_; @@ -614,10 +621,14 @@ TEST_P(RedisDnsParamTest, ImmediateResolveDns) { connect_timeout: 0.25s )EOF" + std::get<0>(GetParam()) + R"EOF( - hosts: - - socket_address: - address: foo.bar.com - port_value: 22120 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 22120 cluster_type: name: envoy.clusters.redis typed_config: @@ -627,7 +638,7 @@ TEST_P(RedisDnsParamTest, ImmediateResolveDns) { cluster_refresh_timeout: 0.25s )EOF"; - setupFromV2Yaml(config); + setupFromV3Yaml(config); expectRedisResolve(true); EXPECT_CALL(*dns_resolver_, resolve("foo.bar.com", std::get<1>(GetParam()), _)) @@ -638,7 +649,7 @@ TEST_P(RedisDnsParamTest, ImmediateResolveDns) { TestUtility::makeDnsResponse(address_pair)); EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1); expectClusterSlotResponse( - singleSlotMasterReplica(address_pair.front(), address_pair.back(), 22120)); + singleSlotPrimaryReplica(address_pair.front(), address_pair.back(), 22120)); return nullptr; })); @@ -651,7 +662,7 @@ TEST_P(RedisDnsParamTest, ImmediateResolveDns) { TEST_F(RedisClusterTest, EmptyDnsResponse) { Event::MockTimer* dns_timer = new NiceMock(&dispatcher_); - setupFromV2Yaml(BasicConfig); + setupFromV3Yaml(BasicConfig); const std::list resolved_addresses{}; EXPECT_CALL(*dns_timer, enableTimer(_, _)); expectResolveDiscovery(Network::DnsLookupFamily::V4Only, "foo.bar.com", resolved_addresses); @@ -675,7 +686,7 @@ TEST_F(RedisClusterTest, EmptyDnsResponse) { TEST_F(RedisClusterTest, FailedDnsResponse) { Event::MockTimer* dns_timer = new NiceMock(&dispatcher_); - setupFromV2Yaml(BasicConfig); + setupFromV3Yaml(BasicConfig); const std::list resolved_addresses{}; EXPECT_CALL(*dns_timer, enableTimer(_, _)); expectResolveDiscovery(Network::DnsLookupFamily::V4Only, "foo.bar.com", resolved_addresses, @@ -732,7 +743,7 @@ TEST_F(RedisClusterTest, Basic) { } TEST_F(RedisClusterTest, RedisResolveFailure) { - setupFromV2Yaml(BasicConfig); + setupFromV3Yaml(BasicConfig); const std::list resolved_addresses{"127.0.0.1", "127.0.0.2"}; expectResolveDiscovery(Network::DnsLookupFamily::V4Only, "foo.bar.com", resolved_addresses); expectRedisResolve(true); @@ -749,7 +760,7 @@ TEST_F(RedisClusterTest, RedisResolveFailure) { EXPECT_CALL(membership_updated_, ready()); EXPECT_CALL(initialized_, ready()); EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1); - expectClusterSlotResponse(singleSlotMasterReplica("127.0.0.1", "127.0.0.2", 22120)); + expectClusterSlotResponse(singleSlotPrimaryReplica("127.0.0.1", "127.0.0.2", 22120)); expectHealthyHosts(std::list({"127.0.0.1:22120", "127.0.0.2:22120"})); // Expect no change if resolve failed. @@ -766,10 +777,14 @@ TEST_F(RedisClusterTest, FactoryInitNotRedisClusterTypeFailure) { name: name connect_timeout: 0.25s dns_lookup_family: V4_ONLY - hosts: - - socket_address: - address: foo.bar.com - port_value: 22120 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 22120 cluster_type: name: envoy.clusters.memcached typed_config: @@ -779,16 +794,16 @@ TEST_F(RedisClusterTest, FactoryInitNotRedisClusterTypeFailure) { cluster_refresh_timeout: 0.25s )EOF"; - EXPECT_THROW_WITH_MESSAGE(setupFactoryFromV2Yaml(basic_yaml_hosts), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(setupFactoryFromV3Yaml(basic_yaml_hosts), EnvoyException, "Redis cluster can only created with redis cluster type."); } TEST_F(RedisClusterTest, FactoryInitRedisClusterTypeSuccess) { - setupFactoryFromV2Yaml(BasicConfig); + setupFactoryFromV3Yaml(BasicConfig); } TEST_F(RedisClusterTest, RedisErrorResponse) { - setupFromV2Yaml(BasicConfig); + setupFromV3Yaml(BasicConfig); const std::list resolved_addresses{"127.0.0.1", "127.0.0.2"}; expectResolveDiscovery(Network::DnsLookupFamily::V4Only, "foo.bar.com", resolved_addresses); expectRedisResolve(true); @@ -817,9 +832,9 @@ TEST_F(RedisClusterTest, RedisErrorResponse) { EXPECT_CALL(membership_updated_, ready()); EXPECT_CALL(initialized_, ready()); EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1); - std::bitset single_slot_master(0xfff); + std::bitset single_slot_primary(0xfff); std::bitset no_replica(0); - expectClusterSlotResponse(createResponse(single_slot_master, no_replica)); + expectClusterSlotResponse(createResponse(single_slot_primary, no_replica)); expectHealthyHosts(std::list({"127.0.0.1:22120"})); // Expect no change if resolve failed. @@ -843,7 +858,7 @@ TEST_F(RedisClusterTest, RedisErrorResponse) { } TEST_F(RedisClusterTest, RedisReplicaErrorResponse) { - setupFromV2Yaml(BasicConfig); + setupFromV3Yaml(BasicConfig); const std::list resolved_addresses{"127.0.0.1", "127.0.0.2"}; expectResolveDiscovery(Network::DnsLookupFamily::V4Only, "foo.bar.com", resolved_addresses); expectRedisResolve(true); @@ -853,9 +868,9 @@ TEST_F(RedisClusterTest, RedisReplicaErrorResponse) { EXPECT_CALL(membership_updated_, ready()); EXPECT_CALL(initialized_, ready()); EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1); - std::bitset single_slot_master(0xfff); + std::bitset single_slot_primary(0xfff); std::bitset no_replica(0); - expectClusterSlotResponse(createResponse(single_slot_master, no_replica)); + expectClusterSlotResponse(createResponse(single_slot_primary, no_replica)); expectHealthyHosts(std::list({"127.0.0.1:22120"})); // Expect no change if resolve failed. @@ -871,7 +886,7 @@ TEST_F(RedisClusterTest, RedisReplicaErrorResponse) { EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1).WillOnce(Return(false)); } expectHealthyHosts(std::list({"127.0.0.1:22120"})); - expectClusterSlotResponse(createResponse(single_slot_master, replica_flags)); + expectClusterSlotResponse(createResponse(single_slot_primary, replica_flags)); EXPECT_EQ(++update_attempt, cluster_->info()->stats().update_attempt_.value()); if (!(replica_flags.all() || replica_flags.none())) { EXPECT_EQ(++update_failure, cluster_->info()->stats().update_failure_.value()); @@ -880,7 +895,7 @@ TEST_F(RedisClusterTest, RedisReplicaErrorResponse) { } TEST_F(RedisClusterTest, DnsDiscoveryResolverBasic) { - setupFromV2Yaml(BasicConfig); + setupFromV3Yaml(BasicConfig); testDnsResolve("foo.bar.com", 22120); } @@ -889,13 +904,19 @@ TEST_F(RedisClusterTest, MultipleDnsDiscovery) { name: name connect_timeout: 0.25s dns_lookup_family: V4_ONLY - hosts: - - socket_address: - address: foo.bar.com - port_value: 22120 - - socket_address: - address: foo1.bar.com - port_value: 22120 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 22120 + - endpoint: + address: + socket_address: + address: foo1.bar.com + port_value: 22120 cluster_type: name: envoy.clusters.redis typed_config: @@ -905,7 +926,7 @@ TEST_F(RedisClusterTest, MultipleDnsDiscovery) { cluster_refresh_timeout: 0.25s )EOF"; - setupFromV2Yaml(config); + setupFromV3Yaml(config); // Only single in-flight "cluster slots" call. expectRedisResolve(true); @@ -936,7 +957,7 @@ TEST_F(RedisClusterTest, MultipleDnsDiscovery) { } TEST_F(RedisClusterTest, HostRemovalAfterHcFail) { - setupFromV2Yaml(BasicConfig); + setupFromV3Yaml(BasicConfig); auto health_checker = std::make_shared(); EXPECT_CALL(*health_checker, start()); EXPECT_CALL(*health_checker, addHostCheckCompleteCb(_)).Times(2); @@ -951,7 +972,7 @@ TEST_F(RedisClusterTest, HostRemovalAfterHcFail) { cluster_->initialize([&]() -> void { initialized_.ready(); }); EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1); - expectClusterSlotResponse(singleSlotMasterReplica("127.0.0.1", "127.0.0.2", 22120)); + expectClusterSlotResponse(singleSlotPrimaryReplica("127.0.0.1", "127.0.0.2", 22120)); // Verify that both hosts are initially marked with FAILED_ACTIVE_HC, then // clear the flag to simulate that these hosts have been successfully health diff --git a/test/extensions/common/BUILD b/test/extensions/common/BUILD index 216c9b56b094f..e976a4fc4c1ba 100644 --- a/test/extensions/common/BUILD +++ b/test/extensions/common/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/extensions/common/aws/BUILD b/test/extensions/common/aws/BUILD index a6c37b7001019..eae532ee27f90 100644 --- a/test/extensions/common/aws/BUILD +++ b/test/extensions/common/aws/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( @@ -76,6 +76,7 @@ envoy_cc_test( srcs = [ "aws_metadata_fetcher_integration_test.cc", ], + tags = ["fails_on_windows"], deps = [ "//source/common/common:fmt_lib", "//source/extensions/common/aws:utility_lib", diff --git a/test/extensions/common/aws/aws_metadata_fetcher_integration_test.cc b/test/extensions/common/aws/aws_metadata_fetcher_integration_test.cc index 499dec138ce9b..d6bd8e2b698d4 100644 --- a/test/extensions/common/aws/aws_metadata_fetcher_integration_test.cc +++ b/test/extensions/common/aws/aws_metadata_fetcher_integration_test.cc @@ -71,11 +71,6 @@ class AwsMetadataIntegrationTestBase : public ::testing::Test, public BaseIntegr } void SetUp() override { BaseIntegrationTest::initialize(); } - - void TearDown() override { - test_server_.reset(); - fake_upstreams_.clear(); - } }; class AwsMetadataIntegrationTestSuccess : public AwsMetadataIntegrationTestBase { diff --git a/test/extensions/common/aws/signer_impl_test.cc b/test/extensions/common/aws/signer_impl_test.cc index 31ed9f7cbd9dd..857399749fb1d 100644 --- a/test/extensions/common/aws/signer_impl_test.cc +++ b/test/extensions/common/aws/signer_impl_test.cc @@ -41,6 +41,27 @@ class SignerImplTest : public testing::Test { message_->body() = std::make_unique(body); } + void expectSignHeaders(absl::string_view service_name, absl::string_view signature, + absl::string_view payload) { + auto* credentials_provider = new NiceMock(); + EXPECT_CALL(*credentials_provider, getCredentials()).WillOnce(Return(credentials_)); + Http::TestRequestHeaderMapImpl headers{}; + headers.setMethod("GET"); + headers.setPath("/"); + headers.addCopy(Http::LowerCaseString("host"), "www.example.com"); + + SignerImpl signer(service_name, "region", CredentialsProviderSharedPtr{credentials_provider}, + time_system_); + signer.sign(headers); + + EXPECT_EQ(fmt::format("AWS4-HMAC-SHA256 Credential=akid/20180102/region/{}/aws4_request, " + "SignedHeaders=host;x-amz-content-sha256;x-amz-date, " + "Signature={}", + service_name, signature), + headers.get(Http::CustomHeaders::get().Authorization)->value().getStringView()); + EXPECT_EQ(payload, headers.get(SignatureHeaders::get().ContentSha256)->value().getStringView()); + } + NiceMock* credentials_provider_; Event::SimulatedTimeSystem time_system_; Http::RequestMessagePtr message_; @@ -54,7 +75,7 @@ class SignerImplTest : public testing::Test { TEST_F(SignerImplTest, AnonymousCredentials) { EXPECT_CALL(*credentials_provider_, getCredentials()).WillOnce(Return(Credentials())); signer_.sign(*message_); - EXPECT_EQ(nullptr, message_->headers().Authorization()); + EXPECT_EQ(nullptr, message_->headers().get(Http::CustomHeaders::get().Authorization)); } // HTTP :method header is required @@ -62,7 +83,7 @@ TEST_F(SignerImplTest, MissingMethodException) { EXPECT_CALL(*credentials_provider_, getCredentials()).WillOnce(Return(credentials_)); EXPECT_THROW_WITH_MESSAGE(signer_.sign(*message_), EnvoyException, "Message is missing :method header"); - EXPECT_EQ(nullptr, message_->headers().Authorization()); + EXPECT_EQ(nullptr, message_->headers().get(Http::CustomHeaders::get().Authorization)); } // HTTP :path header is required @@ -71,7 +92,7 @@ TEST_F(SignerImplTest, MissingPathException) { addMethod("GET"); EXPECT_THROW_WITH_MESSAGE(signer_.sign(*message_), EnvoyException, "Message is missing :path header"); - EXPECT_EQ(nullptr, message_->headers().Authorization()); + EXPECT_EQ(nullptr, message_->headers().get(Http::CustomHeaders::get().Authorization)); } // Verify we sign the date header @@ -83,10 +104,11 @@ TEST_F(SignerImplTest, SignDateHeader) { EXPECT_NE(nullptr, message_->headers().get(SignatureHeaders::get().ContentSha256)); EXPECT_EQ("20180102T030400Z", message_->headers().get(SignatureHeaders::get().Date)->value().getStringView()); - EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " - "SignedHeaders=x-amz-content-sha256;x-amz-date, " - "Signature=4ee6aa9355259c18133f150b139ea9aeb7969c9408ad361b2151f50a516afe42", - message_->headers().Authorization()->value().getStringView()); + EXPECT_EQ( + "AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " + "SignedHeaders=x-amz-content-sha256;x-amz-date, " + "Signature=4ee6aa9355259c18133f150b139ea9aeb7969c9408ad361b2151f50a516afe42", + message_->headers().get(Http::CustomHeaders::get().Authorization)->value().getStringView()); } // Verify we sign the security token header if the token is present in the credentials @@ -98,10 +120,11 @@ TEST_F(SignerImplTest, SignSecurityTokenHeader) { EXPECT_EQ( "token", message_->headers().get(SignatureHeaders::get().SecurityToken)->value().getStringView()); - EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " - "SignedHeaders=x-amz-content-sha256;x-amz-date;x-amz-security-token, " - "Signature=1d42526aabf7d8b6d7d33d9db43b03537300cc7e6bb2817e349749e0a08f5b5e", - message_->headers().Authorization()->value().getStringView()); + EXPECT_EQ( + "AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " + "SignedHeaders=x-amz-content-sha256;x-amz-date;x-amz-security-token, " + "Signature=1d42526aabf7d8b6d7d33d9db43b03537300cc7e6bb2817e349749e0a08f5b5e", + message_->headers().get(Http::CustomHeaders::get().Authorization)->value().getStringView()); } // Verify we sign the content header as the hashed empty string if the body is empty @@ -113,10 +136,11 @@ TEST_F(SignerImplTest, SignEmptyContentHeader) { EXPECT_EQ( SignatureConstants::get().HashedEmptyString, message_->headers().get(SignatureHeaders::get().ContentSha256)->value().getStringView()); - EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " - "SignedHeaders=x-amz-content-sha256;x-amz-date, " - "Signature=4ee6aa9355259c18133f150b139ea9aeb7969c9408ad361b2151f50a516afe42", - message_->headers().Authorization()->value().getStringView()); + EXPECT_EQ( + "AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " + "SignedHeaders=x-amz-content-sha256;x-amz-date, " + "Signature=4ee6aa9355259c18133f150b139ea9aeb7969c9408ad361b2151f50a516afe42", + message_->headers().get(Http::CustomHeaders::get().Authorization)->value().getStringView()); } // Verify we sign the content header correctly when we have a body @@ -129,10 +153,11 @@ TEST_F(SignerImplTest, SignContentHeader) { EXPECT_EQ( "937e8d5fbb48bd4949536cd65b8d35c426b80d2f830c5c308e2cdec422ae2244", message_->headers().get(SignatureHeaders::get().ContentSha256)->value().getStringView()); - EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " - "SignedHeaders=x-amz-content-sha256;x-amz-date, " - "Signature=4eab89c36f45f2032d6010ba1adab93f8510ddd6afe540821f3a05bb0253e27b", - message_->headers().Authorization()->value().getStringView()); + EXPECT_EQ( + "AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " + "SignedHeaders=x-amz-content-sha256;x-amz-date, " + "Signature=4eab89c36f45f2032d6010ba1adab93f8510ddd6afe540821f3a05bb0253e27b", + message_->headers().get(Http::CustomHeaders::get().Authorization)->value().getStringView()); } // Verify we sign some extra headers @@ -144,10 +169,11 @@ TEST_F(SignerImplTest, SignExtraHeaders) { addHeader("b", "b_value"); addHeader("c", "c_value"); signer_.sign(*message_); - EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " - "SignedHeaders=a;b;c;x-amz-content-sha256;x-amz-date, " - "Signature=0940025fcecfef5d7ee30e0a26a0957e116560e374878cd86ef4316c53ae9e81", - message_->headers().Authorization()->value().getStringView()); + EXPECT_EQ( + "AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " + "SignedHeaders=a;b;c;x-amz-content-sha256;x-amz-date, " + "Signature=0940025fcecfef5d7ee30e0a26a0957e116560e374878cd86ef4316c53ae9e81", + message_->headers().get(Http::CustomHeaders::get().Authorization)->value().getStringView()); } // Verify signing a host header @@ -157,52 +183,23 @@ TEST_F(SignerImplTest, SignHostHeader) { addPath("/"); addHeader("host", "www.example.com"); signer_.sign(*message_); - EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " - "SignedHeaders=host;x-amz-content-sha256;x-amz-date, " - "Signature=d9fd9be575a254c924d843964b063d770181d938ae818f5b603ef0575a5ce2cd", - message_->headers().Authorization()->value().getStringView()); -} - -// Verify signing headers for S3 -TEST_F(SignerImplTest, SignHeadersS3) { - auto* credentials_provider = new NiceMock(); - EXPECT_CALL(*credentials_provider, getCredentials()).WillOnce(Return(credentials_)); - Http::TestRequestHeaderMapImpl headers{}; - headers.setMethod("GET"); - headers.setPath("/"); - headers.addCopy(Http::LowerCaseString("host"), "www.example.com"); - - SignerImpl signer("s3", "region", CredentialsProviderSharedPtr{credentials_provider}, - time_system_); - signer.sign(headers); - - EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/s3/aws4_request, " - "SignedHeaders=host;x-amz-content-sha256;x-amz-date, " - "Signature=d97cae067345792b78d2bad746f25c729b9eb4701127e13a7c80398f8216a167", - headers.Authorization()->value().getStringView()); - EXPECT_EQ(SignatureConstants::get().UnsignedPayload, - headers.get(SignatureHeaders::get().ContentSha256)->value().getStringView()); + EXPECT_EQ( + "AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " + "SignedHeaders=host;x-amz-content-sha256;x-amz-date, " + "Signature=d9fd9be575a254c924d843964b063d770181d938ae818f5b603ef0575a5ce2cd", + message_->headers().get(Http::CustomHeaders::get().Authorization)->value().getStringView()); } -// Verify signing headers for non S3 -TEST_F(SignerImplTest, SignHeadersNonS3) { - auto* credentials_provider = new NiceMock(); - EXPECT_CALL(*credentials_provider, getCredentials()).WillOnce(Return(credentials_)); - Http::TestRequestHeaderMapImpl headers{}; - headers.setMethod("GET"); - headers.setPath("/"); - headers.addCopy(Http::LowerCaseString("host"), "www.example.com"); - - SignerImpl signer("service", "region", CredentialsProviderSharedPtr{credentials_provider}, - time_system_); - signer.sign(headers); - - EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " - "SignedHeaders=host;x-amz-content-sha256;x-amz-date, " - "Signature=d9fd9be575a254c924d843964b063d770181d938ae818f5b603ef0575a5ce2cd", - headers.Authorization()->value().getStringView()); - EXPECT_EQ(SignatureConstants::get().HashedEmptyString, - headers.get(SignatureHeaders::get().ContentSha256)->value().getStringView()); +// Verify signing headers for services. +TEST_F(SignerImplTest, SignHeadersByService) { + expectSignHeaders("s3", "d97cae067345792b78d2bad746f25c729b9eb4701127e13a7c80398f8216a167", + SignatureConstants::get().UnsignedPayload); + expectSignHeaders("service", "d9fd9be575a254c924d843964b063d770181d938ae818f5b603ef0575a5ce2cd", + SignatureConstants::get().HashedEmptyString); + expectSignHeaders("es", "0fd9c974bb2ad16c8d8a314dca4f6db151d32cbd04748d9c018afee2a685a02e", + SignatureConstants::get().UnsignedPayload); + expectSignHeaders("glacier", "8d1f241d77c64cda57b042cd312180f16e98dbd7a96e5545681430f8dbde45a0", + SignatureConstants::get().UnsignedPayload); } } // namespace diff --git a/test/extensions/common/dynamic_forward_proxy/BUILD b/test/extensions/common/dynamic_forward_proxy/BUILD index eb51afa1ce875..3452905e285bf 100644 --- a/test/extensions/common/dynamic_forward_proxy/BUILD +++ b/test/extensions/common/dynamic_forward_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( @@ -21,17 +21,34 @@ envoy_cc_test( "//test/mocks/runtime:runtime_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/test_common:simulated_time_system_lib", + "//test/test_common:test_runtime_lib", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/common/dynamic_forward_proxy/v3:pkg_cc_proto", ], ) +envoy_cc_test( + name = "dns_cache_resource_manager_test", + srcs = ["dns_cache_resource_manager_test.cc"], + deps = [ + ":mocks", + "//source/common/config:utility_lib", + "//source/extensions/common/dynamic_forward_proxy:dns_cache_impl", + "//source/extensions/common/dynamic_forward_proxy:dns_cache_resource_manager", + "//test/mocks/runtime:runtime_mocks", + "//test/mocks/stats:stats_mocks", + "//test/test_common:utility_lib", + "@envoy_api//envoy/extensions/common/dynamic_forward_proxy/v3:pkg_cc_proto", + ], +) + envoy_cc_mock( name = "mocks", srcs = ["mocks.cc"], hdrs = ["mocks.h"], deps = [ - "//source/extensions/common/dynamic_forward_proxy:dns_cache_interface", + "//source/extensions/common/dynamic_forward_proxy:dns_cache_impl", + "//test/mocks/upstream:upstream_mocks", "@envoy_api//envoy/extensions/common/dynamic_forward_proxy/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc b/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc index d3bf78619891c..c12f94d4e99b0 100644 --- a/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc +++ b/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc @@ -11,6 +11,7 @@ #include "test/mocks/runtime/mocks.h" #include "test/mocks/thread_local/mocks.h" #include "test/test_common/simulated_time_system.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" using testing::InSequence; @@ -30,7 +31,8 @@ class DnsCacheImplTest : public testing::Test, public Event::TestUsingSimulatedT config_.set_dns_lookup_family(envoy::config::cluster::v3::Cluster::V4_ONLY); EXPECT_CALL(dispatcher_, createDnsResolver(_, _)).WillOnce(Return(resolver_)); - dns_cache_ = std::make_unique(dispatcher_, tls_, random_, store_, config_); + dns_cache_ = + std::make_unique(dispatcher_, tls_, random_, loader_, store_, config_); update_callbacks_handle_ = dns_cache_->addUpdateCallbacks(update_callbacks_); } @@ -58,7 +60,8 @@ class DnsCacheImplTest : public testing::Test, public Event::TestUsingSimulatedT NiceMock dispatcher_; std::shared_ptr resolver_{std::make_shared()}; NiceMock tls_; - NiceMock random_; + NiceMock random_; + NiceMock loader_; Stats::IsolatedStoreImpl store_; std::unique_ptr dns_cache_; MockUpdateCallbacks update_callbacks_; @@ -642,13 +645,68 @@ TEST_F(DnsCacheImplTest, MaxHostOverflow) { EXPECT_EQ(1, TestUtility::findCounter(store_, "dns_cache.foo.host_overflow")->value()); } +TEST_F(DnsCacheImplTest, CircuitBreakersNotInvoked) { + initialize(); + + auto raii_ptr = dns_cache_->canCreateDnsRequest(absl::nullopt); + EXPECT_NE(raii_ptr.get(), nullptr); +} + +TEST_F(DnsCacheImplTest, DnsCacheCircuitBreakersOverflow) { + config_.mutable_dns_cache_circuit_breaker()->mutable_max_pending_requests()->set_value(0); + initialize(); + + auto raii_ptr = dns_cache_->canCreateDnsRequest(absl::nullopt); + EXPECT_EQ(raii_ptr.get(), nullptr); + EXPECT_EQ(1, TestUtility::findCounter(store_, "dns_cache.foo.dns_rq_pending_overflow")->value()); +} + +TEST_F(DnsCacheImplTest, ClustersCircuitBreakersOverflow) { + initialize(); + NiceMock pending_requests_; + + EXPECT_CALL(pending_requests_, canCreate()).WillOnce(Return(false)); + auto raii_ptr = dns_cache_->canCreateDnsRequest(pending_requests_); + EXPECT_EQ(raii_ptr.get(), nullptr); + EXPECT_EQ(0, TestUtility::findCounter(store_, "dns_cache.foo.dns_rq_pending_overflow")->value()); +} + +TEST(DnsCacheImplOptionsTest, UseTcpForDnsLookupsOptionSet) { + NiceMock dispatcher; + std::shared_ptr resolver{std::make_shared()}; + NiceMock tls; + NiceMock random; + NiceMock loader; + Stats::IsolatedStoreImpl store; + + envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config; + config.set_use_tcp_for_dns_lookups(true); + EXPECT_CALL(dispatcher, createDnsResolver(_, true)).WillOnce(Return(resolver)); + DnsCacheImpl dns_cache_(dispatcher, tls, random, loader, store, config); +} + +TEST(DnsCacheImplOptionsTest, UseTcpForDnsLookupsOptionUnSet) { + NiceMock dispatcher; + std::shared_ptr resolver{std::make_shared()}; + NiceMock tls; + NiceMock random; + NiceMock loader; + Stats::IsolatedStoreImpl store; + + envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config; + config.set_use_tcp_for_dns_lookups(false); + EXPECT_CALL(dispatcher, createDnsResolver(_, false)).WillOnce(Return(resolver)); + DnsCacheImpl dns_cache_(dispatcher, tls, random, loader, store, config); +} + // DNS cache manager config tests. TEST(DnsCacheManagerImplTest, LoadViaConfig) { NiceMock dispatcher; NiceMock tls; - NiceMock random; + NiceMock random; + NiceMock loader; Stats::IsolatedStoreImpl store; - DnsCacheManagerImpl cache_manager(dispatcher, tls, random, store); + DnsCacheManagerImpl cache_manager(dispatcher, tls, random, loader, store); envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config1; config1.set_name("foo"); @@ -680,7 +738,7 @@ TEST(DnsCacheManagerImplTest, LoadViaConfig) { // I spent too much time trying to figure this out. So for the moment I have copied this test body // here. I will spend some more time fixing this, but wanted to land unblocking functionality first. TEST(UtilityTest, PrepareDnsRefreshStrategy) { - NiceMock random; + NiceMock random; { // dns_failure_refresh_rate not set. diff --git a/test/extensions/common/dynamic_forward_proxy/dns_cache_resource_manager_test.cc b/test/extensions/common/dynamic_forward_proxy/dns_cache_resource_manager_test.cc new file mode 100644 index 0000000000000..04127f486fff0 --- /dev/null +++ b/test/extensions/common/dynamic_forward_proxy/dns_cache_resource_manager_test.cc @@ -0,0 +1,77 @@ +#include "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h" + +#include "common/config/utility.h" + +#include "extensions/common/dynamic_forward_proxy/dns_cache_impl.h" +#include "extensions/common/dynamic_forward_proxy/dns_cache_resource_manager.h" + +#include "test/extensions/common/dynamic_forward_proxy/mocks.h" +#include "test/mocks/runtime/mocks.h" +#include "test/mocks/stats/mocks.h" +#include "test/test_common/utility.h" + +using testing::_; +using testing::NiceMock; + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace DynamicForwardProxy { +namespace { + +class DnsCacheResourceManagerTest : public testing::Test { +public: + DnsCacheResourceManagerTest() { ON_CALL(store_, gauge(_, _)).WillByDefault(ReturnRef(gauge_)); } + + void setupResourceManager(std::string& config_yaml) { + envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheCircuitBreakers cb_config; + TestUtility::loadFromYaml(config_yaml, cb_config); + + resource_manager_ = + std::make_unique(store_, loader_, "dummy", cb_config); + } + + void cleanup() { + auto& pending_requests = resource_manager_->pendingRequests(); + while (pending_requests.count() != 0) { + pending_requests.dec(); + } + } + + std::unique_ptr resource_manager_; + NiceMock store_; + NiceMock gauge_; + NiceMock loader_; +}; + +TEST_F(DnsCacheResourceManagerTest, CheckDnsResource) { + std::string config_yaml = R"EOF( + max_pending_requests: 3 + )EOF"; + setupResourceManager(config_yaml); + + auto& pending_requests = resource_manager_->pendingRequests(); + EXPECT_EQ(3, pending_requests.max()); + EXPECT_EQ(0, pending_requests.count()); + EXPECT_TRUE(pending_requests.canCreate()); + + pending_requests.inc(); + EXPECT_EQ(1, pending_requests.count()); + EXPECT_TRUE(pending_requests.canCreate()); + + pending_requests.inc(); + pending_requests.inc(); + EXPECT_EQ(3, pending_requests.count()); + EXPECT_FALSE(pending_requests.canCreate()); + + pending_requests.dec(); + EXPECT_EQ(2, pending_requests.count()); + EXPECT_TRUE(pending_requests.canCreate()); + + cleanup(); +} +} // namespace +} // namespace DynamicForwardProxy +} // namespace Common +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/test/extensions/common/dynamic_forward_proxy/mocks.cc b/test/extensions/common/dynamic_forward_proxy/mocks.cc index 9fc2137943343..ef27a4de5b00a 100644 --- a/test/extensions/common/dynamic_forward_proxy/mocks.cc +++ b/test/extensions/common/dynamic_forward_proxy/mocks.cc @@ -10,7 +10,14 @@ namespace Extensions { namespace Common { namespace DynamicForwardProxy { -MockDnsCache::MockDnsCache() = default; +MockDnsCacheResourceManager::MockDnsCacheResourceManager() { + ON_CALL(*this, pendingRequests()).WillByDefault(ReturnRef(pending_requests_)); +} +MockDnsCacheResourceManager::~MockDnsCacheResourceManager() = default; + +MockDnsCache::MockDnsCache() { + ON_CALL(*this, canCreateDnsRequest_(_)).WillByDefault(Return(nullptr)); +} MockDnsCache::~MockDnsCache() = default; MockLoadDnsCacheEntryHandle::MockLoadDnsCacheEntryHandle() = default; diff --git a/test/extensions/common/dynamic_forward_proxy/mocks.h b/test/extensions/common/dynamic_forward_proxy/mocks.h index 88b09cf69ef97..1a9e8c77e7c2d 100644 --- a/test/extensions/common/dynamic_forward_proxy/mocks.h +++ b/test/extensions/common/dynamic_forward_proxy/mocks.h @@ -2,15 +2,30 @@ #include "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h" -#include "extensions/common/dynamic_forward_proxy/dns_cache.h" +#include "extensions/common/dynamic_forward_proxy/dns_cache_impl.h" + +#include "test/mocks/upstream/mocks.h" #include "gmock/gmock.h" +using testing::NiceMock; + namespace Envoy { namespace Extensions { namespace Common { namespace DynamicForwardProxy { +class MockDnsCacheResourceManager : public DnsCacheResourceManager { +public: + MockDnsCacheResourceManager(); + ~MockDnsCacheResourceManager() override; + + MOCK_METHOD(ResourceLimit&, pendingRequests, ()); + MOCK_METHOD(DnsCacheCircuitBreakersStats&, stats, ()); + + NiceMock pending_requests_; +}; + class MockDnsCache : public DnsCache { public: MockDnsCache(); @@ -26,6 +41,11 @@ class MockDnsCache : public DnsCache { MockLoadDnsCacheEntryResult result = loadDnsCacheEntry_(host, default_port, callbacks); return {result.status_, LoadDnsCacheEntryHandlePtr{result.handle_}}; } + Upstream::ResourceAutoIncDecPtr + canCreateDnsRequest(ResourceLimitOptRef pending_requests) override { + Upstream::ResourceAutoIncDec* raii_ptr = canCreateDnsRequest_(pending_requests); + return std::unique_ptr(raii_ptr); + } MOCK_METHOD(MockLoadDnsCacheEntryResult, loadDnsCacheEntry_, (absl::string_view host, uint16_t default_port, LoadDnsCacheEntryCallbacks& callbacks)); @@ -37,6 +57,7 @@ class MockDnsCache : public DnsCache { (UpdateCallbacks & callbacks)); MOCK_METHOD((absl::flat_hash_map), hosts, ()); + MOCK_METHOD(Upstream::ResourceAutoIncDec*, canCreateDnsRequest_, (ResourceLimitOptRef)); }; class MockLoadDnsCacheEntryHandle : public DnsCache::LoadDnsCacheEntryHandle { @@ -55,7 +76,7 @@ class MockDnsCacheManager : public DnsCacheManager { MOCK_METHOD(DnsCacheSharedPtr, getCache, (const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config)); - std::shared_ptr dns_cache_{new MockDnsCache()}; + std::shared_ptr> dns_cache_{new NiceMock()}; }; class MockDnsHostInfo : public DnsHostInfo { diff --git a/test/extensions/common/matcher/BUILD b/test/extensions/common/matcher/BUILD new file mode 100644 index 0000000000000..a2723b48da781 --- /dev/null +++ b/test/extensions/common/matcher/BUILD @@ -0,0 +1,19 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_test( + name = "matcher_test", + srcs = ["matcher_test.cc"], + deps = [ + "//source/extensions/common/matcher:matcher_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/common/matcher/v3:pkg_cc_proto", + ], +) diff --git a/test/extensions/common/matcher/matcher_test.cc b/test/extensions/common/matcher/matcher_test.cc new file mode 100644 index 0000000000000..28f6752eb24a0 --- /dev/null +++ b/test/extensions/common/matcher/matcher_test.cc @@ -0,0 +1,506 @@ +#include "envoy/config/common/matcher/v3/matcher.pb.h" + +#include "common/protobuf/utility.h" + +#include "extensions/common/matcher/matcher.h" + +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace Matcher { +namespace { + +class MatcherTestBase { +public: + std::vector matchers_; + Matcher::MatchStatusVector statuses_; + envoy::config::common::matcher::v3::MatchPredicate config_; + + enum class Direction { Request, Response }; +}; + +class TapMatcherTest : public MatcherTestBase, public testing::Test { +public: + Http::TestRequestHeaderMapImpl request_headers_; + Http::TestRequestTrailerMapImpl request_trailers_; + Http::TestResponseHeaderMapImpl response_headers_; + Http::TestResponseTrailerMapImpl response_trailers_; +}; + +class TapMatcherGenericBodyConfigTest : public MatcherTestBase, public ::testing::Test {}; + +class TapMatcherGenericBodyTest + : public MatcherTestBase, + public ::testing::TestWithParam< + std::tuple, std::list>, + std::pair>>> { +public: + TapMatcherGenericBodyTest(); + + Buffer::OwnedImpl data_; + std::vector body_parts_; +}; + +TEST_F(TapMatcherTest, Any) { + const std::string matcher_yaml = + R"EOF( +any_match: true +)EOF"; + + TestUtility::loadFromYaml(matcher_yaml, config_); + buildMatcher(config_, matchers_); + EXPECT_EQ(1, matchers_.size()); + statuses_.resize(matchers_.size()); + matchers_[0]->onNewStream(statuses_); + EXPECT_EQ((Matcher::MatchStatus{true, false}), matchers_[0]->matchStatus(statuses_)); + matchers_[0]->onHttpRequestHeaders(request_headers_, statuses_); + EXPECT_EQ((Matcher::MatchStatus{true, false}), matchers_[0]->matchStatus(statuses_)); + matchers_[0]->onHttpRequestTrailers(request_trailers_, statuses_); + EXPECT_EQ((Matcher::MatchStatus{true, false}), matchers_[0]->matchStatus(statuses_)); + matchers_[0]->onHttpResponseHeaders(response_headers_, statuses_); + EXPECT_EQ((Matcher::MatchStatus{true, false}), matchers_[0]->matchStatus(statuses_)); + matchers_[0]->onHttpResponseTrailers(response_trailers_, statuses_); + EXPECT_EQ((Matcher::MatchStatus{true, false}), matchers_[0]->matchStatus(statuses_)); +} + +TEST_F(TapMatcherTest, Not) { + const std::string matcher_yaml = + R"EOF( +not_match: + any_match: true +)EOF"; + + TestUtility::loadFromYaml(matcher_yaml, config_); + buildMatcher(config_, matchers_); + EXPECT_EQ(2, matchers_.size()); + statuses_.resize(matchers_.size()); + matchers_[0]->onNewStream(statuses_); + EXPECT_EQ((Matcher::MatchStatus{false, false}), matchers_[0]->matchStatus(statuses_)); + matchers_[0]->onHttpRequestHeaders(request_headers_, statuses_); + EXPECT_EQ((Matcher::MatchStatus{false, false}), matchers_[0]->matchStatus(statuses_)); + matchers_[0]->onHttpRequestTrailers(request_trailers_, statuses_); + EXPECT_EQ((Matcher::MatchStatus{false, false}), matchers_[0]->matchStatus(statuses_)); + matchers_[0]->onHttpResponseHeaders(response_headers_, statuses_); + EXPECT_EQ((Matcher::MatchStatus{false, false}), matchers_[0]->matchStatus(statuses_)); + matchers_[0]->onHttpResponseTrailers(response_trailers_, statuses_); + EXPECT_EQ((Matcher::MatchStatus{false, false}), matchers_[0]->matchStatus(statuses_)); +} + +TEST_F(TapMatcherTest, AndMightChangeStatus) { + const std::string matcher_yaml = + R"EOF( +and_match: + rules: + - http_response_headers_match: + headers: + - name: bar + exact_match: baz +)EOF"; + + TestUtility::loadFromYaml(matcher_yaml, config_); + buildMatcher(config_, matchers_); + EXPECT_EQ(2, matchers_.size()); + statuses_.resize(matchers_.size()); + matchers_[0]->onNewStream(statuses_); + EXPECT_EQ((Matcher::MatchStatus{false, true}), matchers_[0]->matchStatus(statuses_)); + matchers_[0]->onHttpRequestHeaders(request_headers_, statuses_); + EXPECT_EQ((Matcher::MatchStatus{false, true}), matchers_[0]->matchStatus(statuses_)); + matchers_[0]->onHttpRequestTrailers(request_trailers_, statuses_); + EXPECT_EQ((Matcher::MatchStatus{false, true}), matchers_[0]->matchStatus(statuses_)); + matchers_[0]->onHttpResponseHeaders(response_headers_, statuses_); + EXPECT_EQ((Matcher::MatchStatus{false, false}), matchers_[0]->matchStatus(statuses_)); + matchers_[0]->onHttpResponseTrailers(response_trailers_, statuses_); + EXPECT_EQ((Matcher::MatchStatus{false, false}), matchers_[0]->matchStatus(statuses_)); +} + +TapMatcherGenericBodyTest::TapMatcherGenericBodyTest() { + std::string hex; + body_parts_.push_back("This is generic body matcher test for envoy"); // Index 0 + body_parts_.push_back("proxy used to create and assemble http body"); // Index 1 + body_parts_.push_back("env"); // Index 2 + body_parts_.push_back("oyp"); // Index 3 + body_parts_.push_back("roxy"); // Index 4 + body_parts_.push_back("roxy layer 7"); // Index 5 + body_parts_.push_back("blah"); // Index 6 + hex = "xx"; + unsigned char buf[] = {0xde, 0xad}; + memcpy(const_cast(hex.data()), buf, 2); + body_parts_.push_back(hex); // Index 7 + unsigned char buf1[] = {0xbe, 0xef}; + memcpy(const_cast(hex.data()), buf1, 2); + body_parts_.push_back(hex); // Index 8 +} + +// This test initializes matcher with several patterns. The length of the longest +// pattern is used to initialize overlap_ buffer. +// The longest pattern is found first. This should result in less buffering +// required for locating remaining patterns. +TEST_F(TapMatcherGenericBodyTest, ResizeOverlap) { + std::string matcher_yaml = R"EOF( +http_request_generic_body_match: + patterns: + - string_match: generic + - string_match: lay +)EOF"; + TestUtility::loadFromYaml(matcher_yaml, config_); + buildMatcher(config_, matchers_); + EXPECT_EQ(1, matchers_.size()); + statuses_.resize(matchers_.size()); + matchers_[0]->onNewStream(statuses_); + + const auto& ctx = reinterpret_cast(statuses_[0].ctx_.get()); + // 6 is length of "generic" + ASSERT_THAT(ctx->overlap_.capacity(), 6); + // 2 patterns must be located + ASSERT_THAT(ctx->patterns_index_.size(), 2); + + // Process body chunk which produces no match. + // It should fill the overlap_ buffer to full capacity. + data_.add(body_parts_[1].data(), body_parts_[1].length()); + matchers_[0]->onRequestBody(data_, statuses_); + ASSERT_THAT(ctx->overlap_.size(), 6); + ASSERT_THAT(ctx->capacity_, 6); + + // Now pass the chunk which matches "generic" pattern. + data_.drain(data_.length()); + data_.add(body_parts_[0].data(), body_parts_[0].length()); + matchers_[0]->onRequestBody(data_, statuses_); + + // Size of patterns_index_ should drop down to one. + // Capacity of the overlap_ should drop to to 2, as the longest pattern not found yet is 3 chars + // long. Also 2 bytes should have been copied to overlap, so its size is 2. + ASSERT_THAT(ctx->patterns_index_.size(), 1); + ASSERT_THAT(ctx->overlap_.size(), 2); + ASSERT_THAT(ctx->capacity_, 2); +} + +// Test the case when hex string is not even number of characters +TEST_F(TapMatcherGenericBodyTest, WrongConfigTest) { + std::string matcher_yaml = R"EOF( +http_request_generic_body_match: + patterns: + - binary_match: 4rdHFh%2 +)EOF"; + ASSERT_ANY_THROW(TestUtility::loadFromYaml(matcher_yaml, config_)); +} + +// Test different configurations against the body. +// Parameterized test passes various configurations +// which are appended to the yaml string. +TEST_P(TapMatcherGenericBodyTest, GenericBodyTest) { + Direction dir = std::get<0>(GetParam()); + std::string matcher_yaml; + if (Direction::Request == dir) { + matcher_yaml = + R"EOF(http_request_generic_body_match: + patterns:)EOF"; + } else { + matcher_yaml = + R"EOF(http_response_generic_body_match: + patterns:)EOF"; + } + + auto text_and_result = std::get<1>(GetParam()); + // Append vector of matchers + for (const auto& i : std::get<0>(text_and_result)) { + matcher_yaml += '\n'; + matcher_yaml += i; + matcher_yaml += '\n'; + } + + TestUtility::loadFromYaml(matcher_yaml, config_); + buildMatcher(config_, matchers_); + EXPECT_EQ(1, matchers_.size()); + statuses_.resize(matchers_.size()); + matchers_[0]->onNewStream(statuses_); + + // Now create data. The data is passed to matcher in several + // steps to simulate that body was not received in one continuous + // chunk. Data for each step is reassembled from body_parts_. + for (const auto& i : std::get<1>(text_and_result)) { + data_.drain(data_.length()); + for (const auto& j : i) { + data_.add(body_parts_[j].data(), body_parts_[j].length()); + } + + if (Direction::Request == dir) { + matchers_[0]->onRequestBody(data_, statuses_); + } else { + matchers_[0]->onResponseBody(data_, statuses_); + } + } + const std::pair& expected = std::get<2>(text_and_result); + EXPECT_EQ((Matcher::MatchStatus{expected.first, expected.second}), + matchers_[0]->matchStatus(statuses_)); +} + +INSTANTIATE_TEST_SUITE_P( + TapMatcherGenericBodyTestSuite, TapMatcherGenericBodyTest, + ::testing::Combine( + ::testing::Values(MatcherTestBase::Direction::Request, + MatcherTestBase::Direction::Response), + ::testing::Values( + // SEARCHING FOR SINGLE PATTERN - no limit + // Should match - there is a single body chunk and envoy is in the body + std::make_tuple(std::vector{" - string_match: \"envoy\""}, + std::list>{{0}}, std::make_pair(true, false)), + // Should match - single body and `envoyproxy` is there + std::make_tuple(std::vector{" - string_match: \"envoyproxy\""}, + std::list>{{0, 1}}, std::make_pair(true, false)), + // Should match - 2 body chunks. First contains 'envoy' at the end and the second + // chunk contains 'proxy' at the beginning. + std::make_tuple(std::vector{" - string_match: \"envoyproxy\""}, + std::list>{{0}, {1}}, std::make_pair(true, false)), + // Should not match - 2 body chunks. First chunk does not contain 'enwoy' at the end but + // should match 'en' and then bail out. + std::make_tuple(std::vector{" - string_match: \"enwoyproxy\""}, + std::list>{{0}, {1}}, std::make_pair(false, true)), + // Should match - 3 body chunks containing string `envoyproxy` when reassembled. + std::make_tuple(std::vector{" - string_match: \"envoyproxy\""}, + std::list>{{2}, {3}, {4}}, + std::make_pair(true, false)), + // Should match - 3 body chunks containing string ``envoyproxy layer`` when reassembled. + std::make_tuple(std::vector{" - string_match: \"envoyproxy\""}, + std::list>{{2}, {3}, {5}}, + std::make_pair(true, false)), + // Should match - 4 body chunks The last 3 contain string ``envoyproxy layer`` when + // reassembled. + std::make_tuple(std::vector{" - string_match: \"envoyproxy\""}, + std::list>{{6}, {2}, {3}, {5}}, + std::make_pair(true, false)), + // Should match - First few chunks does not match, then 3 reassembled match + // `envoyproxy`. + std::make_tuple(std::vector{" - string_match: \"envoyproxy\""}, + std::list>{{6}, {6}, {6}, {2}, {3}, {5}, {6}}, + std::make_pair(true, false)), + // Should match - chunk #7 contains hex '0xdead (3q0= in base64 format)'. + std::make_tuple(std::vector{" - binary_match: \"3q0=\""}, + std::list>{{6}, {6}, {7}, {6}}, + std::make_pair(true, false)), + // Should match - chunk #7 contains 0xdead and chunk 8 contains 0xbeef + // 0xdeadbeef encoded in base64 format is '3q2+7w=='. + std::make_tuple(std::vector{" - binary_match: \"3q2+7w==\""}, + std::list>{{6}, {6}, {7}, {8}, {6}}, + std::make_pair(true, false)), + // Should NOT match - hex 0xdeed (3u0= in base64 format) is not there + std::make_tuple(std::vector{" - binary_match: \"3u0=\""}, + std::list>{{6}, {6}, {7}, {8}, {6}}, + std::make_pair(false, true)), + + // SEARCHING FOR SINGLE PATTERN - with limit + // Should match - there is a single body chunk and 'This' is within + // search limit. + std::make_tuple(std::vector{" - string_match: \"This\"", + " bytes_limit: 10"}, + std::list>{{0}}, std::make_pair(true, false)), + // Should NOT match - there is a single body chunk and envoy is in the body + // but outside of the limit + std::make_tuple(std::vector{" - string_match: \"envoy\"", + " bytes_limit: 10"}, + std::list>{{0}}, std::make_pair(false, false)), + // Should NOT match - 2 body chunks. First contains 'envoy' at the end and the second + // chunk contains 'proxy' at the beginning. Search is limited to the first 10 bytes + // - 'proxy' in the second chunk should not be found as it is outside of the search + // limit. + std::make_tuple(std::vector{" - string_match: \"proxy\"", + " bytes_limit: 10"}, + std::list>{{0}, {1}}, std::make_pair(false, false)), + // Should match - 2 body chunks. First contains 'envoy' at the end and the second + // chunk contains 'proxy' at the beginning. 'proxy' is located at bytes 44-48 + // so should be found when search limit is 48. + std::make_tuple(std::vector{" - string_match: \"proxy\"", + " bytes_limit: 48"}, + std::list>{{0}, {1}}, std::make_pair(true, false)), + // Should NOT match - 2 body chunks. First contains 'envoy' at the end and the second + // chunk contains 'proxy' at the beginning. 'proxy' is located at bytes 44-48. + // Search limit is 47 bytes, so the last character of 'proxy' is outside of the search + // limit. + std::make_tuple(std::vector{" - string_match: \"proxy\"", + " bytes_limit: 47"}, + std::list>{{0}, {1}}, std::make_pair(false, false)), + // Should match - 2 body chunks. First contains 'envoy' at the end and the second + // chunk contains 'proxy' at the beginning. 'proxy' is located at bytes 44-48. + // Search limit is 46 bytes, which is enough to include 'envoypro' in search. + std::make_tuple(std::vector{" - string_match: \"envoypro\"", + " bytes_limit: 46"}, + std::list>{{0}, {1}}, std::make_pair(true, false)), + // Should NOT match - 2 body chunks. First contains 'envoy' at the end and the second + // chunk contains 'proxy' at the beginning. 'proxy' is located at bytes 44-48. + // Search limit is 45 bytes, so the last character of `envoyproxy` is outside of the + // search limit. + std::make_tuple(std::vector{" - string_match: \"envoypro\"", + " bytes_limit: 45"}, + std::list>{{0}, {1}}, std::make_pair(false, false)), + + // SEARCHING FOR MULTIPLE PATTERNS - no limit + // Should NOT match. None of the patterns is in the body. + std::make_tuple(std::vector{" - string_match: \"balancer\"", + " - string_match: \"error\""}, + std::list>{{0}}, std::make_pair(false, true)), + // Should NOT match. One pattern is in the body but the second is not. + std::make_tuple(std::vector{" - string_match: \"envoy\"", + " - string_match: \"error\""}, + std::list>{{0}}, std::make_pair(false, true)), + // Should match. Both patterns are in the body (concatenated frags 0 and 1). + std::make_tuple(std::vector{" - string_match: \"envoy\"", + " - string_match: \"proxy\""}, + std::list>{{0, 1}}, std::make_pair(true, false)), + // SPELLCHECKER(off) + // Should match. Both patterns should be found. 'envoy' is in the first + // chunk and '0xbeef' (`vu8=` in base64 format) is in the chunk 8. + std::make_tuple(std::vector{" - string_match: \"envoy\"", + " - binary_match: \"vu8=\""}, + std::list>{{0, 1}, {8}, {6}}, + std::make_pair(true, false)), + // Should match. Both patterns should be found. '0xdeadbeef' is spread + // across two chunks - 7 and 8. The second pattern 'envoy' is in chunk 0. + std::make_tuple(std::vector{" - string_match: \"envoy\"", + " - binary_match: \"3q2+7w==\""}, + std::list>{{7}, {8}, {6, 0}}, + std::make_pair(true, false)), + // Should match. One pattern is substring of the other and they both + // are located part in chunk 0 and part in chunk 1. + std::make_tuple(std::vector{" - string_match: \"envoyproxy\"", + " - string_match: \"voypro\""}, + std::list>{{6}, {0}, {1}, {8}, {6}}, + std::make_pair(true, false)), + // Should match. Duplicated pattern which is found in the body. + std::make_tuple(std::vector{" - string_match: \"envoyproxy\"", + " - string_match: \"envoyproxy\""}, + std::list>{{6}, {0}, {1}, {8}, {6}}, + std::make_pair(true, false)), + // Test starting search from some offset for shorter patterns. + // Overlap buffer size will be initialized for longest pattern but + // search for shorter patterns should start from some index in overlap + // buffer. Make sure that the index is enough for the shorter pattern to be found. + std::make_tuple(std::vector{" - string_match: \"assemble\"", + " - string_match: \"envoyp\""}, + std::list>{{0, 1}}, std::make_pair(true, false)), + // SEARCHING FOR MULTIPLE PATTERNS - with limit + // Should NOT match. None of the patterns is in the body. + std::make_tuple(std::vector{" - string_match: \"balancer\"", + " - string_match: \"error\"", + " bytes_limit: 15"}, + std::list>{{0}}, std::make_pair(false, false)), + // Should NOT match. One pattern is in the body but the second is not. + // Search limit is large enough to find the first pattern. + std::make_tuple(std::vector{" - string_match: \"envoy\"", + " - string_match: \"error\"", + " bytes_limit: 35"}, + std::list>{{0}}, std::make_pair(false, false)), + // Should NOT match. One pattern is in the body but the second is not. + // Search limit is small so none of the patterns should be found. + std::make_tuple(std::vector{" - string_match: \"envoy\"", + " - string_match: \"error\"", + " bytes_limit: 5"}, + std::list>{{0}}, std::make_pair(false, false)), + // Should NOT match. Both patterns are in the body (concatenated frags 0 and 1). + // Limit includes only the first pattern. + std::make_tuple(std::vector{" - string_match: \"envoy\"", + " - string_match: \"proxy\"", + " bytes_limit: 30"}, + std::list>{{0, 1}}, std::make_pair(false, false)), + // Should match. Both patterns should be found. 'envoy' is in the first + // chunk and '0xbeef (vu8= in base64 format)' is in the chunk 8 and search limit is + // large enough to include 2 patterns + std::make_tuple( + std::vector{" - string_match: \"envoy\"", + " - binary_match: \"vu8=\"", " bytes_limit: 90"}, + std::list>{{0, 1}, {8}, {6}}, std::make_pair(true, false)), + // Should match. Both patterns should be found. '0xdeadbeef (3q2+7w== in base64)' is + // spread across two chunks - 7 and 8. The second pattern 'envoy' is in chunk 0. + std::make_tuple( + std::vector{" - string_match: \"envoy\"", + " - binary_match: \"3q2+7w==\"", " bytes_limit: 85"}, + std::list>{{7}, {8}, {6, 0}}, std::make_pair(true, false)), + // Should match. Search limit ends exactly where '0xdeadbeef (3q2+7w== in base64)' ends. + std::make_tuple( + std::vector{" - string_match: \"envoy\"", + " - binary_match: \"3q2+7w==\"", " bytes_limit: 47"}, + std::list>{{0}, {7}, {8}, {6, 0}}, std::make_pair(true, false)), + // Should NOT match. Search limit ends exactly one byte before end of '0xdeadbeef + // (3q2+7w== in base64)'. + std::make_tuple(std::vector{" - string_match: \"envoy\"", + " - binary_match: \"3q2+7w==\"", + " bytes_limit: 46"}, + std::list>{{0}, {7}, {8}, {6, 0}}, + std::make_pair(false, false)), + // Test the situation when end of the search limit overlaps with end of first chunk. + // Should NOT match. The second pattern should not be found. + std::make_tuple(std::vector{" - string_match: \"envoy\"", + " - binary_match: \"3q2+7w==\"", + " bytes_limit: 43"}, + std::list>{{0}, {7}, {8}, {6, 0}}, + std::make_pair(false, false)), + + // SPELLCHECKER(on) + // Now pass enormously large value. It should work just fine. + std::make_tuple(std::vector{" - string_match: \"envoy\"", + " - binary_match: \"3q2+7w==\"", + " bytes_limit: 50000000"}, + std::list>{{0}, {7}, {8}, {6, 0}}, + std::make_pair(true, false))))); + +// Test takes one long pattern existing on the boundary of two body chunks and generates random +// number of substrings of various lengths. All substrings and original long pattern are added to +// the matcher's config. Next the two body chunks are passed to the matcher. In all cases the +// matcher should report that match was found. +TEST_F(TapMatcherGenericBodyTest, RandomLengthOverlappingPatterns) { + std::string pattern = "envoyproxy"; + + // Loop through fairly large number of tests + for (size_t i = 0; i < 10 * pattern.length(); i++) { + std::string matcher_yaml = R"EOF( +http_request_generic_body_match: + patterns: +)EOF"; + // generate number of substrings which will be derived from pattern + uint32_t num = std::rand() % 10; + for (size_t j = 0; j < num; j++) { + std::string yaml_line = " - string_match: "; + + // Generate random start index. + const uint32_t start = std::rand() % (pattern.length() - 1); + // Generate random length. Minimum 1 character. + const uint32_t len = 1 + std::rand() % (pattern.length() - start - 1); + yaml_line += "\"" + pattern.substr(start, len) + "\"\n"; + matcher_yaml += yaml_line; + } + // Finally add the original pattern, but not in all cases + if (0 == (num % 2)) { + matcher_yaml += " - string_match: " + pattern + "\n"; + } + + // Initialize matcher. + TestUtility::loadFromYaml(matcher_yaml, config_); + buildMatcher(config_, matchers_); + EXPECT_EQ(1, matchers_.size()); + statuses_.resize(matchers_.size()); + matchers_[0]->onNewStream(statuses_); + + EXPECT_EQ((Matcher::MatchStatus{false, true}), matchers_[0]->matchStatus(statuses_)); + + // Use body chunks #0 and #1 + data_.drain(data_.length()); + data_.add(body_parts_[0].data(), body_parts_[0].length()); + matchers_[0]->onRequestBody(data_, statuses_); + data_.drain(data_.length()); + data_.add(body_parts_[1].data(), body_parts_[1].length()); + matchers_[0]->onRequestBody(data_, statuses_); + + // Check the result. All patterns should be found. + EXPECT_EQ((Matcher::MatchStatus{true, false}), matchers_[0]->matchStatus(statuses_)); + + matchers_.clear(); + } +} +} // namespace +} // namespace Matcher +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/common/proxy_protocol/BUILD b/test/extensions/common/proxy_protocol/BUILD index bd269493ddf50..414674d847115 100644 --- a/test/extensions/common/proxy_protocol/BUILD +++ b/test/extensions/common/proxy_protocol/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( @@ -14,6 +14,7 @@ envoy_cc_test( deps = [ "//source/common/buffer:buffer_lib", "//source/extensions/common/proxy_protocol:proxy_protocol_header_lib", + "//test/mocks/network:connection_mocks", "//test/test_common:utility_lib", ], ) @@ -21,6 +22,7 @@ envoy_cc_test( envoy_cc_test( name = "proxy_protocol_regression_test", srcs = ["proxy_protocol_regression_test.cc"], + tags = ["fails_on_windows"], deps = [ "//source/common/buffer:buffer_lib", "//source/common/event:dispatcher_includes", @@ -32,7 +34,6 @@ envoy_cc_test( "//source/server:connection_handler_lib", "//test/mocks/buffer:buffer_mocks", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", "//test/test_common:utility_lib", diff --git a/test/extensions/common/proxy_protocol/proxy_protocol_header_test.cc b/test/extensions/common/proxy_protocol/proxy_protocol_header_test.cc index 052544a4a99a9..61ac2f70946be 100644 --- a/test/extensions/common/proxy_protocol/proxy_protocol_header_test.cc +++ b/test/extensions/common/proxy_protocol/proxy_protocol_header_test.cc @@ -4,6 +4,7 @@ #include "extensions/common/proxy_protocol/proxy_protocol_header.h" +#include "test/mocks/network/connection.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -28,6 +29,16 @@ TEST(ProxyProtocolHeaderTest, GeneratesV1IPv4Header) { generateV1Header(src_addr, dst_addr, src_port, dst_port, version, buff); EXPECT_TRUE(TestUtility::buffersEqual(expectedBuff, buff)); + + // Make sure the wrapper utility generates the same output. + testing::NiceMock connection; + connection.remote_address_ = Network::Utility::resolveUrl("tcp://174.2.2.222:50000"); + connection.local_address_ = Network::Utility::resolveUrl("tcp://172.0.0.1:80"); + Buffer::OwnedImpl util_buf; + envoy::config::core::v3::ProxyProtocolConfig config; + config.set_version(envoy::config::core::v3::ProxyProtocolConfig::V1); + generateProxyProtoHeader(config, connection, util_buf); + EXPECT_TRUE(TestUtility::buffersEqual(expectedBuff, util_buf)); } TEST(ProxyProtocolHeaderTest, GeneratesV1IPv6Header) { @@ -79,6 +90,16 @@ TEST(ProxyProtocolHeaderTest, GeneratesV2IPv6Header) { generateV2Header(src_addr, dst_addr, src_port, dst_port, version, buff); EXPECT_TRUE(TestUtility::buffersEqual(expectedBuff, buff)); + + // Make sure the wrapper utility generates the same output. + testing::NiceMock connection; + connection.remote_address_ = Network::Utility::resolveUrl("tcp://[1:2:3::4]:8"); + connection.local_address_ = Network::Utility::resolveUrl("tcp://[1:100:200:3::]:2"); + Buffer::OwnedImpl util_buf; + envoy::config::core::v3::ProxyProtocolConfig config; + config.set_version(envoy::config::core::v3::ProxyProtocolConfig::V2); + generateProxyProtoHeader(config, connection, util_buf); + EXPECT_TRUE(TestUtility::buffersEqual(expectedBuff, util_buf)); } TEST(ProxyProtocolHeaderTest, GeneratesV2LocalHeader) { @@ -96,4 +117,4 @@ TEST(ProxyProtocolHeaderTest, GeneratesV2LocalHeader) { } // namespace ProxyProtocol } // namespace Common } // namespace Extensions -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc b/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc index 701df21eb9014..7c1bd0d80ae18 100644 --- a/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc +++ b/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc @@ -1,6 +1,7 @@ #include "envoy/network/address.h" #include "common/buffer/buffer_impl.h" +#include "common/common/basic_resource_impl.h" #include "common/event/dispatcher_impl.h" #include "common/network/connection_balancer_impl.h" #include "common/network/listen_socket_impl.h" @@ -12,7 +13,6 @@ #include "test/mocks/buffer/mocks.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" #include "test/test_common/environment.h" #include "test/test_common/network_utility.h" #include "test/test_common/utility.h" @@ -47,8 +47,7 @@ class ProxyProtocolRegressionTest : public testing::TestWithParamlocalAddress())); EXPECT_CALL(socket_factory_, getListenSocket()).WillOnce(Return(socket_)); connection_handler_->addListener(absl::nullopt, *this); @@ -71,6 +70,8 @@ class ProxyProtocolRegressionTest : public testing::TestWithParam( - std::make_shared(listenerScope()))); + std::make_shared( + listenerScope(), + envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol()))); maybeExitDispatcher(); return true; })); @@ -162,6 +165,7 @@ class ProxyProtocolRegressionTest : public testing::TestWithParam connection_callbacks_; + BasicResourceLimitImpl open_connections_; Network::Connection* server_connection_; Network::MockConnectionCallbacks server_callbacks_; std::shared_ptr read_filter_; diff --git a/test/extensions/common/redis/BUILD b/test/extensions/common/redis/BUILD index a2185d580451d..07bc8e5dc11d6 100644 --- a/test/extensions/common/redis/BUILD +++ b/test/extensions/common/redis/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_platform_dep", @@ -11,6 +9,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_library( @@ -26,6 +26,7 @@ envoy_extension_cc_test( name = "cluster_refresh_manager_test", srcs = ["cluster_refresh_manager_test.cc"], extension_name = "envoy.filters.network.redis_proxy", + flaky = True, deps = [ "//source/common/common:lock_guard_lib", "//source/common/common:thread_lib", diff --git a/test/extensions/common/redis/cluster_refresh_manager_test.cc b/test/extensions/common/redis/cluster_refresh_manager_test.cc index d4bca7edeadef..e58f6d6ca728f 100644 --- a/test/extensions/common/redis/cluster_refresh_manager_test.cc +++ b/test/extensions/common/redis/cluster_refresh_manager_test.cc @@ -25,6 +25,7 @@ namespace Extensions { namespace Common { namespace Redis { +// TODO: rewrite the tests to fix the flaky test class ClusterRefreshManagerTest : public testing::Test { public: ClusterRefreshManagerTest() diff --git a/test/extensions/common/sqlutils/BUILD b/test/extensions/common/sqlutils/BUILD new file mode 100644 index 0000000000000..0277e47706b2d --- /dev/null +++ b/test/extensions/common/sqlutils/BUILD @@ -0,0 +1,20 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_test( + name = "sqlutils_tests", + srcs = [ + "sqlutils_test.cc", + ], + external_deps = ["sqlparser"], + deps = [ + "//source/extensions/common/sqlutils:sqlutils_lib", + ], +) diff --git a/test/extensions/common/sqlutils/sqlutils_test.cc b/test/extensions/common/sqlutils/sqlutils_test.cc new file mode 100644 index 0000000000000..2ab95360367fa --- /dev/null +++ b/test/extensions/common/sqlutils/sqlutils_test.cc @@ -0,0 +1,191 @@ +#include "extensions/common/sqlutils/sqlutils.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace SQLUtils { + +// MetadataFromSQLTest class is used for parameterized tests. +// The values in the tests are: +// std::string - SQL query +// bool - whether to expect SQL parsing to be successful +// std::map> map of expected tables accessed based on the query. +// The map is checked only when parsing was successful. Map is indexed by table name and points to +// list of operations performed on the table. For example table1: "select", "insert" says that there +// was SELECT and INSERT operations on table1. +// DecoderAttributes is a map containing additional attributes which augment creating metadata. +class MetadataFromSQLTest + : public ::testing::TestWithParam< + std::tuple>, + SQLUtils::DecoderAttributes>> {}; + +// Test takes SQL query as a parameter and checks if the parsing +// produces the correct metadata. +// Metadata is 2-level structure. First layer is list of resources +// over which the SQL query operates: in our case is list of tables. +// Under each table there is secondary list which contains operations performed +// on the table, like "select", "insert", etc. +TEST_P(MetadataFromSQLTest, ParsingAndMetadataTest) { + // Get the SQL query + const std::string& query = std::get<0>(GetParam()); + // vector of queries to check. + std::vector test_queries; + test_queries.push_back(query); + + // Create uppercase and lowercase versions of the queries and put + // them into vector of queries to check + test_queries.push_back(absl::AsciiStrToLower(query)); + test_queries.push_back(absl::AsciiStrToUpper(query)); + + while (!test_queries.empty()) { + std::string test_query = test_queries.back(); + ProtobufWkt::Struct metadata; + + // Check if the parsing result is what expected. + ASSERT_EQ(std::get<1>(GetParam()), + SQLUtils::setMetadata(test_query, std::get<3>(GetParam()), metadata)); + + // If parsing was expected to fail do not check parsing values. + if (!std::get<1>(GetParam())) { + return; + } + + // Access metadata fields, where parsing results are stored. + auto& fields = *metadata.mutable_fields(); + + // Get the names of resources which SQL query operates on. + std::map> expected_tables = std::get<2>(GetParam()); + // Check if query results return the same number of resources as expected. + ASSERT_EQ(expected_tables.size(), fields.size()); + for (const auto& i : fields) { + // Get from created metadata the list of operations on the resource + const auto& operations = i; + std::string table_name = operations.first; + + std::transform(table_name.begin(), table_name.end(), table_name.begin(), + [](unsigned char c) { return std::tolower(c); }); + // Get the list of expected operations on the same resource from test param. + const auto& table_name_it = expected_tables.find(table_name); + // Make sure that a resource (table) found in metadata is expected. + ASSERT_NE(expected_tables.end(), table_name_it); + auto& operations_list = table_name_it->second; + // The number of expected operations and created in metadata must be the same. + ASSERT_EQ(operations_list.size(), operations.second.list_value().values().size()); + // Now iterate over the operations list found in metadata and check if the same operation + // is listed as expected in test param. + for (const auto& j : operations.second.list_value().values()) { + // Find that operation in test params. + const auto operation_it = + std::find(operations_list.begin(), operations_list.end(), j.string_value()); + ASSERT_NE(operations_list.end(), operation_it); + // Erase the operation. At the end of the test this list should be empty what means + // that we found all expected operations. + operations_list.erase(operation_it); + } + // Make sure that we went through all expected operations. + ASSERT_TRUE(operations_list.empty()); + // Remove the table from the list. At the end of the test this list must be empty. + expected_tables.erase(table_name_it); + } + + ASSERT_TRUE(expected_tables.empty()); + test_queries.pop_back(); + } +} + +// Note: This parameterized test's queries are converted to all lowercase and all uppercase +// to validate that parser is case-insensitive. The test routine converts to uppercase and +// lowercase entire query string, not only SQL keywords. This introduces a problem when comparing +// tables' names when verifying parsing result. Therefore the test converts table names to lowercase +// before comparing. It however requires that all table names in the queries below use lowercase +// only. +#define TEST_VALUE(...) \ + std::tuple>, \ + SQLUtils::DecoderAttributes> { \ + __VA_ARGS__ \ + } +INSTANTIATE_TEST_SUITE_P( + SQLUtilsTestSuite, MetadataFromSQLTest, + ::testing::Values( + TEST_VALUE("blahblah;", false, {}, {}), + + TEST_VALUE("CREATE TABLE IF NOT EXISTS table1(Usr VARCHAR(40),Count INT);", true, + {{"table1", {"create"}}}, {}), + TEST_VALUE("CREATE TABLE IF NOT EXISTS `table number 1`(Usr VARCHAR(40),Count INT);", true, + {{"table number 1.testdb", {"create"}}}, {{"database", "testdb"}}), + TEST_VALUE( + "CREATE TABLE IF NOT EXISTS table1(Usr VARCHAR(40),Count INT); SELECT * from table1;", + true, {{"table1", {"select", "create"}}}, {}), + TEST_VALUE( + "CREATE TABLE IF NOT EXISTS table1(Usr VARCHAR(40),Count INT); SELECT * from table2;", + true, {{"table1", {"create"}}, {"table2", {"select"}}}, {{"user", "testusr"}}), + + TEST_VALUE("CREATE TABLE table1(Usr VARCHAR(40),Count INT);", true, + {{"table1", {"create"}}}, {}), + TEST_VALUE("CREATE TABLE;", false, {}, {}), + TEST_VALUE("CREATE TEMPORARY table table1(Usr VARCHAR(40),Count INT);", true, + {{"table1", {"create"}}}, {}), + TEST_VALUE("DROP TABLE IF EXISTS table1", true, {{"table1", {"drop"}}}, {}), + TEST_VALUE("ALTER TABLE table1 add column Id varchar (20);", true, {{"table1", {"alter"}}}, + {}), + TEST_VALUE("INSERT INTO table1 (Usr, Count) VALUES ('allsp2', 3);", true, + {{"table1", {"insert"}}}, {}), + TEST_VALUE("INSERT LOW_PRIORITY INTO table1 (Usr, Count) VALUES ('allsp2', 3);", true, + {{"table1", {"insert"}}}, {}), + TEST_VALUE("INSERT IGNORE INTO table1 (Usr, Count) VALUES ('allsp2', 3);", true, + {{"table1", {"insert"}}}, {}), + TEST_VALUE("INSERT INTO table1 (Usr, Count) VALUES ('allsp2', 3);SELECT * from table1", + true, {{"table1", {"insert", "select"}}}, {}), + TEST_VALUE("DELETE FROM table1 WHERE Count > 3;", true, {{"table1", {"delete"}}}, {}), + TEST_VALUE("DELETE LOW_PRIORITY FROM table1 WHERE Count > 3;", true, + {{"table1", {"delete"}}}, {}), + TEST_VALUE("DELETE QUICK FROM table1 WHERE Count > 3;", true, {{"table1", {"delete"}}}, {}), + TEST_VALUE("DELETE IGNORE FROM table1 WHERE Count > 3;", true, {{"table1", {"delete"}}}, + {}), + + TEST_VALUE("SELECT * FROM table1 WHERE Count = 1;", true, {{"table1", {"select"}}}, {}), + TEST_VALUE("SELECT * FROM table1 WHERE Count = 1;", true, {{"table1", {"select"}}}, {}), + TEST_VALUE("SELECT product.category FROM table1 WHERE Count = 1;", true, + {{"table1", {"select"}}, {"product", {"unknown"}}}, {}), + TEST_VALUE("SELECT DISTINCT Usr FROM table1;", true, {{"table1", {"select"}}}, {}), + TEST_VALUE("SELECT Usr, Count FROM table1 ORDER BY Count DESC;", true, + {{"table1.testdb", {"select"}}}, {{"user", "testuser"}, {"database", "testdb"}}), + TEST_VALUE("SELECT 12 AS a, a FROM table1 GROUP BY a;", true, {{"table1", {"select"}}}, {}), + TEST_VALUE("SELECT;", false, {}, {}), TEST_VALUE("SELECT Usr, Count FROM;", false, {}, {}), + TEST_VALUE("INSERT INTO table1 SELECT * FROM table2;", true, + {{"table1", {"insert"}}, {"table2", {"select"}}}, {}), + TEST_VALUE("INSERT INTO table1 SELECT tbl_temp1.fld_order_id FROM table2;", true, + {{"tbl_temp1", {"unknown"}}, {"table2", {"select"}}, {"table1", {"insert"}}}, + {}), + TEST_VALUE("UPDATE table1 SET col1 = col1 + 1", true, {{"table1", {"update"}}}, {}), + TEST_VALUE("UPDATE LOW_PRIORITY table1 SET col1 = col1 + 1", true, {{"table1", {"update"}}}, + {}), + TEST_VALUE("UPDATE IGNORE table1 SET col1 = col1 + 1", true, {{"table1", {"update"}}}, {}), + TEST_VALUE("UPDATE table1 SET column1=(SELECT * columnX from table2);", true, + {{"table1", {"update"}}, {"table2", {"select"}}}, {}), + + // operations on database should not create any metadata + TEST_VALUE("CREATE DATABASE testdb;", true, {}, {}), + TEST_VALUE("CREATE DATABASE IF NOT EXISTS testdb;", true, {}, {}), + TEST_VALUE("ALTER DATABASE testdb CHARACTER SET charset_name;", true, {}, {}), + TEST_VALUE("ALTER DATABASE testdb default CHARACTER SET charset_name;", true, {}, {}), + TEST_VALUE("ALTER DATABASE testdb default CHARACTER SET = charset_name;", true, {}, {}), + TEST_VALUE("ALTER SCHEMA testdb default CHARACTER SET = charset_name;", true, {}, {}), + + // The following DROP DATABASE tests should not produce metadata. + TEST_VALUE("DROP DATABASE testdb;", true, {}, {}), + TEST_VALUE("DROP DATABASE IF EXISTS testdb;", true, {}, {}), + + // Schema. Should be parsed fine, but should not produce any metadata + TEST_VALUE("SHOW databases;", true, {}, {}), TEST_VALUE("SHOW tables;", true, {}, {}), + TEST_VALUE("SELECT * FROM;", false, {}, {}), + TEST_VALUE("SELECT 1 FROM tabletest1;", true, {{"tabletest1", {"select"}}}, {}) + + )); + +} // namespace SQLUtils +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/common/tap/BUILD b/test/extensions/common/tap/BUILD index 833f4bbb566c4..c5a459721fafc 100644 --- a/test/extensions/common/tap/BUILD +++ b/test/extensions/common/tap/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_library( @@ -25,17 +25,8 @@ envoy_cc_test( srcs = ["admin_test.cc"], deps = [ "//source/extensions/common/tap:admin", - "//test/mocks/server:server_mocks", - "@envoy_api//envoy/config/tap/v3:pkg_cc_proto", - ], -) - -envoy_cc_test( - name = "tap_matcher_test", - srcs = ["tap_matcher_test.cc"], - deps = [ - "//source/extensions/common/tap:tap_matcher", - "//test/test_common:utility_lib", + "//test/mocks/server:admin_mocks", + "//test/mocks/server:admin_stream_mocks", "@envoy_api//envoy/config/tap/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/common/tap/admin_test.cc b/test/extensions/common/tap/admin_test.cc index 0ee2bc2af042f..bffe69944cbe1 100644 --- a/test/extensions/common/tap/admin_test.cc +++ b/test/extensions/common/tap/admin_test.cc @@ -2,20 +2,22 @@ #include "extensions/common/tap/admin.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/admin.h" +#include "test/mocks/server/admin_stream.h" #include "gtest/gtest.h" -using testing::_; -using testing::Return; -using testing::SaveArg; - namespace Envoy { namespace Extensions { namespace Common { namespace Tap { namespace { +using ::testing::_; +using ::testing::DoAll; +using ::testing::Return; +using ::testing::SaveArg; + class MockExtensionConfig : public ExtensionConfig { public: MOCK_METHOD(const absl::string_view, adminId, ()); @@ -48,7 +50,7 @@ class AdminHandlerTest : public testing::Test { R"EOF( config_id: test_config_id tap_config: - match_config: + match: any_match: true output_config: sinks: diff --git a/test/extensions/common/tap/common.h b/test/extensions/common/tap/common.h index 7e8ca455d0946..03b0d0b428409 100644 --- a/test/extensions/common/tap/common.h +++ b/test/extensions/common/tap/common.h @@ -64,6 +64,9 @@ class MockMatcher : public Matcher { MOCK_METHOD(void, onHttpResponseTrailers, (const Http::ResponseTrailerMap& response_trailers, MatchStatusVector& statuses), (const)); + MOCK_METHOD(void, onRequestBody, (const Buffer::Instance& data, MatchStatusVector& statuses)); + MOCK_METHOD(void, onResponseBody, (const Buffer::Instance& data, MatchStatusVector& statuses), + ()); }; } // namespace Tap diff --git a/test/extensions/common/tap/tap_config_base_test.cc b/test/extensions/common/tap/tap_config_base_test.cc index 74cf5074b3f5a..75ccec5a1595f 100644 --- a/test/extensions/common/tap/tap_config_base_test.cc +++ b/test/extensions/common/tap/tap_config_base_test.cc @@ -91,6 +91,8 @@ TEST(AddBufferToProtoBytes, All) { } TEST(TrimSlice, All) { + std::string slice_mem = "static base slice memory that is long enough"; + void* test_base = static_cast(&slice_mem[0]); { std::vector slices; Utility::trimSlices(slices, 0, 100); @@ -98,63 +100,63 @@ TEST(TrimSlice, All) { } { - std::vector slices = {{nullptr, 5}}; + std::vector slices = {{test_base, 5}}; Utility::trimSlices(slices, 0, 100); - const std::vector expected{{nullptr, 5}}; + const std::vector expected{{test_base, 5}}; EXPECT_EQ(expected, slices); } { - std::vector slices = {{nullptr, 5}}; + std::vector slices = {{test_base, 5}}; Utility::trimSlices(slices, 3, 3); - const std::vector expected{{reinterpret_cast(0x3), 2}}; + const std::vector expected{{static_cast(&slice_mem[3]), 2}}; EXPECT_EQ(expected, slices); } { - std::vector slices = {{nullptr, 5}, {nullptr, 4}}; + std::vector slices = {{test_base, 5}, {test_base, 4}}; Utility::trimSlices(slices, 3, 3); - const std::vector expected{{reinterpret_cast(0x3), 2}, - {reinterpret_cast(0x0), 1}}; + const std::vector expected{{static_cast(&slice_mem[3]), 2}, + {static_cast(&slice_mem[0]), 1}}; EXPECT_EQ(expected, slices); } { - std::vector slices = {{nullptr, 5}, {nullptr, 4}}; + std::vector slices = {{test_base, 5}, {test_base, 4}}; Utility::trimSlices(slices, 6, 3); - const std::vector expected{{reinterpret_cast(0x5), 0}, - {reinterpret_cast(0x1), 3}}; + const std::vector expected{{static_cast(&slice_mem[5]), 0}, + {static_cast(&slice_mem[1]), 3}}; EXPECT_EQ(expected, slices); } { - std::vector slices = {{nullptr, 5}, {nullptr, 4}}; + std::vector slices = {{test_base, 5}, {test_base, 4}}; Utility::trimSlices(slices, 0, 0); - const std::vector expected{{reinterpret_cast(0x0), 0}, - {reinterpret_cast(0x0), 0}}; + const std::vector expected{{static_cast(&slice_mem[0]), 0}, + {static_cast(&slice_mem[0]), 0}}; EXPECT_EQ(expected, slices); } { - std::vector slices = {{nullptr, 5}, {nullptr, 4}}; + std::vector slices = {{test_base, 5}, {test_base, 4}}; Utility::trimSlices(slices, 0, 3); - const std::vector expected{{reinterpret_cast(0x0), 3}, - {reinterpret_cast(0x0), 0}}; + const std::vector expected{{static_cast(&slice_mem[0]), 3}, + {static_cast(&slice_mem[0]), 0}}; EXPECT_EQ(expected, slices); } { - std::vector slices = {{nullptr, 5}, {nullptr, 4}}; + std::vector slices = {{test_base, 5}, {test_base, 4}}; Utility::trimSlices(slices, 1, 3); - const std::vector expected{{reinterpret_cast(0x1), 3}, - {reinterpret_cast(0x0), 0}}; + const std::vector expected{{static_cast(&slice_mem[1]), 3}, + {static_cast(&slice_mem[0]), 0}}; EXPECT_EQ(expected, slices); } } diff --git a/test/extensions/common/tap/tap_matcher_test.cc b/test/extensions/common/tap/tap_matcher_test.cc deleted file mode 100644 index 04fb50227e397..0000000000000 --- a/test/extensions/common/tap/tap_matcher_test.cc +++ /dev/null @@ -1,104 +0,0 @@ -#include "envoy/config/tap/v3/common.pb.h" - -#include "common/protobuf/utility.h" - -#include "extensions/common/tap/tap_matcher.h" - -#include "test/test_common/utility.h" - -#include "gtest/gtest.h" - -namespace Envoy { -namespace Extensions { -namespace Common { -namespace Tap { -namespace { - -class TapMatcherTest : public testing::Test { -public: - std::vector matchers_; - Matcher::MatchStatusVector statuses_; - envoy::config::tap::v3::MatchPredicate config_; - Http::TestRequestHeaderMapImpl request_headers_; - Http::TestRequestTrailerMapImpl request_trailers_; - Http::TestResponseHeaderMapImpl response_headers_; - Http::TestResponseTrailerMapImpl response_trailers_; -}; - -TEST_F(TapMatcherTest, Any) { - const std::string matcher_yaml = - R"EOF( -any_match: true -)EOF"; - - TestUtility::loadFromYaml(matcher_yaml, config_); - buildMatcher(config_, matchers_); - EXPECT_EQ(1, matchers_.size()); - statuses_.resize(matchers_.size()); - matchers_[0]->onNewStream(statuses_); - EXPECT_EQ((Matcher::MatchStatus{true, false}), matchers_[0]->matchStatus(statuses_)); - matchers_[0]->onHttpRequestHeaders(request_headers_, statuses_); - EXPECT_EQ((Matcher::MatchStatus{true, false}), matchers_[0]->matchStatus(statuses_)); - matchers_[0]->onHttpRequestTrailers(request_trailers_, statuses_); - EXPECT_EQ((Matcher::MatchStatus{true, false}), matchers_[0]->matchStatus(statuses_)); - matchers_[0]->onHttpResponseHeaders(response_headers_, statuses_); - EXPECT_EQ((Matcher::MatchStatus{true, false}), matchers_[0]->matchStatus(statuses_)); - matchers_[0]->onHttpResponseTrailers(response_trailers_, statuses_); - EXPECT_EQ((Matcher::MatchStatus{true, false}), matchers_[0]->matchStatus(statuses_)); -} - -TEST_F(TapMatcherTest, Not) { - const std::string matcher_yaml = - R"EOF( -not_match: - any_match: true -)EOF"; - - TestUtility::loadFromYaml(matcher_yaml, config_); - buildMatcher(config_, matchers_); - EXPECT_EQ(2, matchers_.size()); - statuses_.resize(matchers_.size()); - matchers_[0]->onNewStream(statuses_); - EXPECT_EQ((Matcher::MatchStatus{false, false}), matchers_[0]->matchStatus(statuses_)); - matchers_[0]->onHttpRequestHeaders(request_headers_, statuses_); - EXPECT_EQ((Matcher::MatchStatus{false, false}), matchers_[0]->matchStatus(statuses_)); - matchers_[0]->onHttpRequestTrailers(request_trailers_, statuses_); - EXPECT_EQ((Matcher::MatchStatus{false, false}), matchers_[0]->matchStatus(statuses_)); - matchers_[0]->onHttpResponseHeaders(response_headers_, statuses_); - EXPECT_EQ((Matcher::MatchStatus{false, false}), matchers_[0]->matchStatus(statuses_)); - matchers_[0]->onHttpResponseTrailers(response_trailers_, statuses_); - EXPECT_EQ((Matcher::MatchStatus{false, false}), matchers_[0]->matchStatus(statuses_)); -} - -TEST_F(TapMatcherTest, AndMightChangeStatus) { - const std::string matcher_yaml = - R"EOF( -and_match: - rules: - - http_response_headers_match: - headers: - - name: bar - exact_match: baz -)EOF"; - - TestUtility::loadFromYaml(matcher_yaml, config_); - buildMatcher(config_, matchers_); - EXPECT_EQ(2, matchers_.size()); - statuses_.resize(matchers_.size()); - matchers_[0]->onNewStream(statuses_); - EXPECT_EQ((Matcher::MatchStatus{false, true}), matchers_[0]->matchStatus(statuses_)); - matchers_[0]->onHttpRequestHeaders(request_headers_, statuses_); - EXPECT_EQ((Matcher::MatchStatus{false, true}), matchers_[0]->matchStatus(statuses_)); - matchers_[0]->onHttpRequestTrailers(request_trailers_, statuses_); - EXPECT_EQ((Matcher::MatchStatus{false, true}), matchers_[0]->matchStatus(statuses_)); - matchers_[0]->onHttpResponseHeaders(response_headers_, statuses_); - EXPECT_EQ((Matcher::MatchStatus{false, false}), matchers_[0]->matchStatus(statuses_)); - matchers_[0]->onHttpResponseTrailers(response_trailers_, statuses_); - EXPECT_EQ((Matcher::MatchStatus{false, false}), matchers_[0]->matchStatus(statuses_)); -} - -} // namespace -} // namespace Tap -} // namespace Common -} // namespace Extensions -} // namespace Envoy diff --git a/test/extensions/common/wasm/BUILD b/test/extensions/common/wasm/BUILD index 4a4c0bdda7a6c..e85cf73322e44 100644 --- a/test/extensions/common/wasm/BUILD +++ b/test/extensions/common/wasm/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/extensions/common/wasm/test_data/BUILD b/test/extensions/common/wasm/test_data/BUILD index ef4f37386280f..f46c28bbd63e7 100644 --- a/test/extensions/common/wasm/test_data/BUILD +++ b/test/extensions/common/wasm/test_data/BUILD @@ -1,10 +1,10 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() filegroup( diff --git a/test/extensions/common/wasm/test_data/Makefile b/test/extensions/common/wasm/test_data/Makefile new file mode 100644 index 0000000000000..03707a7f42d5e --- /dev/null +++ b/test/extensions/common/wasm/test_data/Makefile @@ -0,0 +1,5 @@ +all: test_rust.wasm + +test_rust.wasm: test_rust.rs + rustc -C lto -C opt-level=3 -C panic=abort -C link-arg=-S -C link-arg=-zstack-size=32768 --crate-type cdylib --target wasm32-unknown-unknown test_rust.rs + ../../../../../bazel-bin/test/tools/wee8_compile/wee8_compile_tool test_rust.wasm test_rust.wasm diff --git a/test/extensions/common/wasm/test_data/test_rust.wasm b/test/extensions/common/wasm/test_data/test_rust.wasm index 2396b5badfaf8..68c30b0da4fa8 100755 Binary files a/test/extensions/common/wasm/test_data/test_rust.wasm and b/test/extensions/common/wasm/test_data/test_rust.wasm differ diff --git a/test/extensions/common/wasm/wasm_vm_test.cc b/test/extensions/common/wasm/wasm_vm_test.cc index a628aa43baed4..b07b684a0ba4b 100644 --- a/test/extensions/common/wasm/wasm_vm_test.cc +++ b/test/extensions/common/wasm/wasm_vm_test.cc @@ -150,6 +150,13 @@ TEST_P(WasmVmTest, V8BadCode) { } TEST_P(WasmVmTest, V8Code) { +#ifndef NDEBUG + // Do not execute pre-compilation tests in debug mode because V8 will fail to load because the + // flags do not match. TODO: restore this test when the rust toolchain is integrated. + if (GetParam() == 1) { + return; + } +#endif auto wasm_vm = createWasmVm("envoy.wasm.runtime.v8", scope_); ASSERT_TRUE(wasm_vm != nullptr); EXPECT_TRUE(wasm_vm->runtime() == "envoy.wasm.runtime.v8"); @@ -170,6 +177,13 @@ TEST_P(WasmVmTest, V8Code) { } TEST_P(WasmVmTest, V8BadHostFunctions) { +#ifndef NDEBUG + // Do not execute pre-compilation tests in debug mode because V8 will fail to load because the + // flags do not match. TODO: restore this test when the rust toolchain is integrated. + if (GetParam() == 1) { + return; + } +#endif auto wasm_vm = createWasmVm("envoy.wasm.runtime.v8", scope_); ASSERT_TRUE(wasm_vm != nullptr); @@ -198,6 +212,13 @@ TEST_P(WasmVmTest, V8BadHostFunctions) { } TEST_P(WasmVmTest, V8BadModuleFunctions) { +#ifndef NDEBUG + // Do not execute pre-compilation tests in debug mode because V8 will fail to load because the + // flags do not match. TODO: restore this test when the rust toolchain is integrated. + if (GetParam() == 1) { + return; + } +#endif auto wasm_vm = createWasmVm("envoy.wasm.runtime.v8", scope_); ASSERT_TRUE(wasm_vm != nullptr); @@ -226,6 +247,13 @@ TEST_P(WasmVmTest, V8BadModuleFunctions) { } TEST_P(WasmVmTest, V8FunctionCalls) { +#ifndef NDEBUG + // Do not execute pre-compilation tests in debug mode because V8 will fail to load because the + // flags do not match. TODO: restore this test when the rust toolchain is integrated. + if (GetParam() == 1) { + return; + } +#endif auto wasm_vm = createWasmVm("envoy.wasm.runtime.v8", scope_); ASSERT_TRUE(wasm_vm != nullptr); @@ -264,6 +292,13 @@ TEST_P(WasmVmTest, V8FunctionCalls) { } TEST_P(WasmVmTest, V8Memory) { +#ifndef NDEBUG + // Do not execute pre-compilation tests in debug mode because V8 will fail to load because the + // flags do not match. TODO: restore this test when the rust toolchain is integrated. + if (GetParam() == 1) { + return; + } +#endif auto wasm_vm = createWasmVm("envoy.wasm.runtime.v8", scope_); ASSERT_TRUE(wasm_vm != nullptr); diff --git a/test/extensions/compression/gzip/BUILD b/test/extensions/compression/gzip/BUILD new file mode 100644 index 0000000000000..290209d3eb610 --- /dev/null +++ b/test/extensions/compression/gzip/BUILD @@ -0,0 +1,21 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_fuzz_test", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_fuzz_test( + name = "compressor_fuzz_test", + srcs = ["compressor_fuzz_test.cc"], + corpus = "compressor_corpus", + deps = [ + "//source/common/buffer:buffer_lib", + "//source/common/common:assert_lib", + "//source/extensions/compression/gzip/compressor:compressor_lib", + "//source/extensions/compression/gzip/decompressor:zlib_decompressor_impl_lib", + ], +) diff --git a/test/extensions/compression/gzip/compressor/BUILD b/test/extensions/compression/gzip/compressor/BUILD new file mode 100644 index 0000000000000..6d101cd2aafd0 --- /dev/null +++ b/test/extensions/compression/gzip/compressor/BUILD @@ -0,0 +1,24 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_extension_cc_test( + name = "compressor_test", + srcs = ["zlib_compressor_impl_test.cc"], + extension_name = "envoy.compression.gzip.compressor", + deps = [ + "//source/common/common:assert_lib", + "//source/common/common:hex_lib", + "//source/extensions/compression/gzip/compressor:config", + "//test/test_common:utility_lib", + ], +) diff --git a/test/extensions/compression/gzip/compressor/zlib_compressor_impl_test.cc b/test/extensions/compression/gzip/compressor/zlib_compressor_impl_test.cc new file mode 100644 index 0000000000000..7d5046e1c7447 --- /dev/null +++ b/test/extensions/compression/gzip/compressor/zlib_compressor_impl_test.cc @@ -0,0 +1,266 @@ +#include "common/buffer/buffer_impl.h" +#include "common/common/hex.h" + +#include "extensions/compression/gzip/compressor/config.h" +#include "extensions/compression/gzip/compressor/zlib_compressor_impl.h" + +#include "test/test_common/utility.h" + +#include "absl/container/fixed_array.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace Compression { +namespace Gzip { +namespace Compressor { +namespace { + +// Test helpers + +void expectValidFlushedBuffer(const Buffer::OwnedImpl& output_buffer) { + Buffer::RawSliceVector compressed_slices = output_buffer.getRawSlices(); + const uint64_t num_comp_slices = compressed_slices.size(); + + const std::string header_hex_str = Hex::encode( + reinterpret_cast(compressed_slices[0].mem_), compressed_slices[0].len_); + + // HEADER 0x1f = 31 (window_bits) + EXPECT_EQ("1f8b", header_hex_str.substr(0, 4)); + // CM 0x8 = deflate (compression method) + EXPECT_EQ("08", header_hex_str.substr(4, 2)); + + const std::string footer_hex_str = + Hex::encode(reinterpret_cast(compressed_slices[num_comp_slices - 1].mem_), + compressed_slices[num_comp_slices - 1].len_); + // FOOTER four-byte sequence (sync flush) + EXPECT_EQ("0000ffff", footer_hex_str.substr(footer_hex_str.size() - 8, 10)); +} + +void expectEqualInputSize(const std::string& footer_bytes, const uint32_t input_size) { + const std::string size_bytes = footer_bytes.substr(footer_bytes.size() - 8, 8); + uint64_t size; + StringUtil::atoull(size_bytes.c_str(), size, 16); + EXPECT_EQ(TestUtility::flipOrder(size), input_size); +} + +void expectValidFinishedBuffer(const Buffer::OwnedImpl& output_buffer, const uint32_t input_size) { + Buffer::RawSliceVector compressed_slices = output_buffer.getRawSlices(); + const uint64_t num_comp_slices = compressed_slices.size(); + + const std::string header_hex_str = Hex::encode( + reinterpret_cast(compressed_slices[0].mem_), compressed_slices[0].len_); + // HEADER 0x1f = 31 (window_bits) + EXPECT_EQ("1f8b", header_hex_str.substr(0, 4)); + // CM 0x8 = deflate (compression method) + EXPECT_EQ("08", header_hex_str.substr(4, 2)); + + const std::string footer_bytes_str = + Hex::encode(reinterpret_cast(compressed_slices[num_comp_slices - 1].mem_), + compressed_slices[num_comp_slices - 1].len_); + + // A valid finished compressed buffer should have trailer with input size in it. + expectEqualInputSize(footer_bytes_str, input_size); +} + +void drainBuffer(Buffer::OwnedImpl& buffer) { buffer.drain(buffer.length()); } + +class ZlibCompressorImplTester : public ZlibCompressorImpl { +public: + ZlibCompressorImplTester() = default; + ZlibCompressorImplTester(uint64_t chunk_size) : ZlibCompressorImpl(chunk_size) {} + void compressThenFlush(Buffer::OwnedImpl& buffer) { + compress(buffer, Envoy::Compression::Compressor::State::Flush); + } + void finish(Buffer::OwnedImpl& buffer) { + compress(buffer, Envoy::Compression::Compressor::State::Finish); + } +}; + +// Fixtures + +class ZlibCompressorImplTest : public testing::Test { +protected: + static constexpr int64_t gzip_window_bits{31}; + static constexpr int64_t memory_level{8}; + static constexpr uint64_t default_input_size{796}; +}; + +class ZlibCompressorImplDeathTest : public ZlibCompressorImplTest { +protected: + static void compressorBadInitTestHelper(int64_t window_bits, int64_t mem_level) { + ZlibCompressorImpl compressor; + compressor.init(ZlibCompressorImpl::CompressionLevel::Standard, + ZlibCompressorImpl::CompressionStrategy::Standard, window_bits, mem_level); + } + + static void uninitializedCompressorTestHelper() { + Buffer::OwnedImpl buffer; + ZlibCompressorImplTester compressor; + TestUtility::feedBufferWithRandomCharacters(buffer, 100); + compressor.finish(buffer); + } + + static void uninitializedCompressorFlushTestHelper() { + Buffer::OwnedImpl buffer; + ZlibCompressorImplTester compressor; + compressor.compressThenFlush(buffer); + } + + static void uninitializedCompressorFinishTestHelper() { + Buffer::OwnedImpl buffer; + ZlibCompressorImplTester compressor; + compressor.finish(buffer); + } +}; + +class ZlibCompressorImplFactoryTest + : public ::testing::TestWithParam> {}; + +INSTANTIATE_TEST_SUITE_P( + CreateCompressorTests, ZlibCompressorImplFactoryTest, + ::testing::Values(std::make_tuple("", ""), std::make_tuple("FILTERED", "BEST_COMPRESSION"), + std::make_tuple("HUFFMAN_ONLY", "BEST_COMPRESSION"), + std::make_tuple("RLE", "BEST_SPEED"), + std::make_tuple("DEFAULT_STRATEGY", "DEFAULT_COMPRESSION"), + std::make_tuple("FIXED", "COMPRESSION_LEVEL_1"), + std::make_tuple("FIXED", "COMPRESSION_LEVEL_2"), + std::make_tuple("FIXED", "COMPRESSION_LEVEL_3"), + std::make_tuple("FIXED", "COMPRESSION_LEVEL_4"), + std::make_tuple("FIXED", "COMPRESSION_LEVEL_5"), + std::make_tuple("FIXED", "COMPRESSION_LEVEL_6"), + std::make_tuple("FIXED", "COMPRESSION_LEVEL_7"), + std::make_tuple("FIXED", "COMPRESSION_LEVEL_8"), + std::make_tuple("FIXED", "COMPRESSION_LEVEL_9"))); + +TEST_P(ZlibCompressorImplFactoryTest, CreateCompressorTest) { + Buffer::OwnedImpl buffer; + envoy::extensions::compression::gzip::compressor::v3::Gzip gzip; + std::string json{"{}"}; + absl::string_view strategy = std::get<0>(GetParam()); + absl::string_view compression_level = std::get<1>(GetParam()); + + if (!strategy.empty()) { + json = fmt::format(R"EOF({{ + "compression_strategy": "{}", + "compression_level": "{}", + "memory_level": 6, + "window_bits": 27, + "chunk_size": 10000 + }})EOF", + strategy, compression_level); + } + TestUtility::loadFromJson(json, gzip); + Envoy::Compression::Compressor::CompressorPtr compressor = + GzipCompressorFactory(gzip).createCompressor(); + // Check the created compressor produces valid output. + TestUtility::feedBufferWithRandomCharacters(buffer, 4096); + compressor->compress(buffer, Envoy::Compression::Compressor::State::Flush); + expectValidFlushedBuffer(buffer); + drainBuffer(buffer); +} + +// Exercises death by passing bad initialization params or by calling +// compress before init. +TEST_F(ZlibCompressorImplDeathTest, CompressorDeathTest) { + EXPECT_DEATH(compressorBadInitTestHelper(100, 8), "assert failure: result >= 0"); + EXPECT_DEATH(compressorBadInitTestHelper(31, 10), "assert failure: result >= 0"); + EXPECT_DEATH(uninitializedCompressorTestHelper(), "assert failure: result == Z_OK"); + EXPECT_DEATH(uninitializedCompressorFlushTestHelper(), "assert failure: result == Z_OK"); + EXPECT_DEATH(uninitializedCompressorFinishTestHelper(), "assert failure: result == Z_STREAM_END"); +} + +// Exercises compressor's checksum by calling it before init or compress. +TEST_F(ZlibCompressorImplTest, CallingChecksum) { + Buffer::OwnedImpl buffer; + + ZlibCompressorImplTester compressor; + EXPECT_EQ(0, compressor.checksum()); + + compressor.init(ZlibCompressorImpl::CompressionLevel::Standard, + ZlibCompressorImpl::CompressionStrategy::Standard, gzip_window_bits, + memory_level); + EXPECT_EQ(0, compressor.checksum()); + + TestUtility::feedBufferWithRandomCharacters(buffer, 4096); + compressor.compressThenFlush(buffer); + expectValidFlushedBuffer(buffer); + + drainBuffer(buffer); + EXPECT_TRUE(compressor.checksum() > 0); +} + +// Exercises compressor's checksum by calling it before init or compress. +TEST_F(ZlibCompressorImplTest, CallingFinishOnly) { + Buffer::OwnedImpl buffer; + + ZlibCompressorImplTester compressor; + compressor.init(ZlibCompressorImpl::CompressionLevel::Standard, + ZlibCompressorImpl::CompressionStrategy::Standard, gzip_window_bits, + memory_level); + EXPECT_EQ(0, compressor.checksum()); + + TestUtility::feedBufferWithRandomCharacters(buffer, 4096); + compressor.finish(buffer); + expectValidFinishedBuffer(buffer, 4096); +} + +TEST_F(ZlibCompressorImplTest, CompressWithSmallChunkSize) { + Buffer::OwnedImpl buffer; + Buffer::OwnedImpl accumulation_buffer; + + ZlibCompressorImplTester compressor(8); + compressor.init(ZlibCompressorImpl::CompressionLevel::Standard, + ZlibCompressorImpl::CompressionStrategy::Standard, gzip_window_bits, + memory_level); + + uint64_t input_size = 0; + for (uint64_t i = 0; i < 10; i++) { + TestUtility::feedBufferWithRandomCharacters(buffer, default_input_size * i, i); + ASSERT_EQ(default_input_size * i, buffer.length()); + input_size += buffer.length(); + compressor.compressThenFlush(buffer); + accumulation_buffer.add(buffer); + drainBuffer(buffer); + ASSERT_EQ(0, buffer.length()); + } + expectValidFlushedBuffer(accumulation_buffer); + + compressor.finish(buffer); + accumulation_buffer.add(buffer); + expectValidFinishedBuffer(accumulation_buffer, input_size); +} + +// Exercises compression with other supported zlib initialization params. +TEST_F(ZlibCompressorImplTest, CompressWithNotCommonParams) { + Buffer::OwnedImpl buffer; + Buffer::OwnedImpl accumulation_buffer; + + ZlibCompressorImplTester compressor; + compressor.init(ZlibCompressorImpl::CompressionLevel::Speed, + ZlibCompressorImpl::CompressionStrategy::Rle, gzip_window_bits, 1); + + uint64_t input_size = 0; + for (uint64_t i = 0; i < 10; i++) { + TestUtility::feedBufferWithRandomCharacters(buffer, default_input_size * i, i); + ASSERT_EQ(default_input_size * i, buffer.length()); + input_size += buffer.length(); + compressor.compressThenFlush(buffer); + accumulation_buffer.add(buffer); + drainBuffer(buffer); + ASSERT_EQ(0, buffer.length()); + } + + expectValidFlushedBuffer(accumulation_buffer); + + compressor.finish(buffer); + accumulation_buffer.add(buffer); + expectValidFinishedBuffer(accumulation_buffer, input_size); +} + +} // namespace +} // namespace Compressor +} // namespace Gzip +} // namespace Compression +} // namespace Extensions +} // namespace Envoy diff --git a/test/common/compressor/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-5149986500640768 b/test/extensions/compression/gzip/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-5149986500640768 similarity index 100% rename from test/common/compressor/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-5149986500640768 rename to test/extensions/compression/gzip/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-5149986500640768 diff --git a/test/common/compressor/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-5407695477932032 b/test/extensions/compression/gzip/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-5407695477932032 similarity index 100% rename from test/common/compressor/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-5407695477932032 rename to test/extensions/compression/gzip/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-5407695477932032 diff --git a/test/common/compressor/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-5644831560302592 b/test/extensions/compression/gzip/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-5644831560302592 similarity index 100% rename from test/common/compressor/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-5644831560302592 rename to test/extensions/compression/gzip/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-5644831560302592 diff --git a/test/common/compressor/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-6005942746873856 b/test/extensions/compression/gzip/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-6005942746873856 similarity index 100% rename from test/common/compressor/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-6005942746873856 rename to test/extensions/compression/gzip/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-6005942746873856 diff --git a/test/common/compressor/compressor_corpus/empty b/test/extensions/compression/gzip/compressor_corpus/empty similarity index 100% rename from test/common/compressor/compressor_corpus/empty rename to test/extensions/compression/gzip/compressor_corpus/empty diff --git a/test/common/compressor/compressor_corpus/noise b/test/extensions/compression/gzip/compressor_corpus/noise similarity index 100% rename from test/common/compressor/compressor_corpus/noise rename to test/extensions/compression/gzip/compressor_corpus/noise diff --git a/test/common/compressor/compressor_corpus/simple b/test/extensions/compression/gzip/compressor_corpus/simple similarity index 100% rename from test/common/compressor/compressor_corpus/simple rename to test/extensions/compression/gzip/compressor_corpus/simple diff --git a/test/common/compressor/compressor_fuzz_test.cc b/test/extensions/compression/gzip/compressor_fuzz_test.cc similarity index 82% rename from test/common/compressor/compressor_fuzz_test.cc rename to test/extensions/compression/gzip/compressor_fuzz_test.cc index 1c28ac5bcc5c5..bdaa5283e53af 100644 --- a/test/common/compressor/compressor_fuzz_test.cc +++ b/test/extensions/compression/gzip/compressor_fuzz_test.cc @@ -1,11 +1,16 @@ #include "common/buffer/buffer_impl.h" #include "common/common/assert.h" -#include "common/compressor/zlib_compressor_impl.h" -#include "common/decompressor/zlib_decompressor_impl.h" +#include "common/stats/isolated_store_impl.h" + +#include "extensions/compression/gzip/compressor/zlib_compressor_impl.h" +#include "extensions/compression/gzip/decompressor/zlib_decompressor_impl.h" #include "test/fuzz/fuzz_runner.h" namespace Envoy { +namespace Extensions { +namespace Compression { +namespace Gzip { namespace Compressor { namespace Fuzz { @@ -15,9 +20,11 @@ namespace Fuzz { // trip compress-decompress pair; the decompressor itself is not fuzzed beyond // whatever the compressor emits, as it exists only as a test utility today. DEFINE_FUZZER(const uint8_t* buf, size_t len) { + FuzzedDataProvider provider(buf, len); ZlibCompressorImpl compressor; - Decompressor::ZlibDecompressorImpl decompressor; + Stats::IsolatedStoreImpl stats_store; + Decompressor::ZlibDecompressorImpl decompressor{stats_store, "test"}; // Select target compression level. We can't use ConsumeEnum() since the range // is non-contiguous. @@ -61,7 +68,8 @@ DEFINE_FUZZER(const uint8_t* buf, size_t len) { full_input.add(next_data); Buffer::OwnedImpl buffer{next_data.data(), next_data.size()}; provider_empty = provider.remaining_bytes() == 0; - compressor.compress(buffer, provider_empty ? State::Finish : State::Flush); + compressor.compress(buffer, provider_empty ? Envoy::Compression::Compressor::State::Finish + : Envoy::Compression::Compressor::State::Flush); decompressor.decompress(buffer, full_output); } RELEASE_ASSERT(full_input.toString() == full_output.toString(), ""); @@ -70,4 +78,7 @@ DEFINE_FUZZER(const uint8_t* buf, size_t len) { } // namespace Fuzz } // namespace Compressor +} // namespace Gzip +} // namespace Compression +} // namespace Extensions } // namespace Envoy diff --git a/test/extensions/compression/gzip/decompressor/BUILD b/test/extensions/compression/gzip/decompressor/BUILD new file mode 100644 index 0000000000000..bc732fc1a7c37 --- /dev/null +++ b/test/extensions/compression/gzip/decompressor/BUILD @@ -0,0 +1,26 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_extension_cc_test( + name = "zlib_decompressor_impl_test", + srcs = ["zlib_decompressor_impl_test.cc"], + extension_name = "envoy.compression.gzip.decompressor", + deps = [ + "//source/common/common:assert_lib", + "//source/common/common:hex_lib", + "//source/common/stats:isolated_store_lib", + "//source/extensions/compression/gzip/compressor:compressor_lib", + "//source/extensions/compression/gzip/decompressor:zlib_decompressor_impl_lib", + "//test/test_common:utility_lib", + ], +) diff --git a/test/common/decompressor/zlib_decompressor_impl_test.cc b/test/extensions/compression/gzip/decompressor/zlib_decompressor_impl_test.cc similarity index 57% rename from test/common/decompressor/zlib_decompressor_impl_test.cc rename to test/extensions/compression/gzip/decompressor/zlib_decompressor_impl_test.cc index 93ff4e07729dd..43ae89d42fd0b 100644 --- a/test/common/decompressor/zlib_decompressor_impl_test.cc +++ b/test/extensions/compression/gzip/decompressor/zlib_decompressor_impl_test.cc @@ -1,47 +1,53 @@ #include "common/buffer/buffer_impl.h" #include "common/common/hex.h" -#include "common/compressor/zlib_compressor_impl.h" -#include "common/decompressor/zlib_decompressor_impl.h" +#include "common/stats/isolated_store_impl.h" + +#include "extensions/compression/gzip/compressor/zlib_compressor_impl.h" +#include "extensions/compression/gzip/decompressor/zlib_decompressor_impl.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" namespace Envoy { +namespace Extensions { +namespace Compression { +namespace Gzip { namespace Decompressor { -namespace { class ZlibDecompressorImplTest : public testing::Test { protected: void drainBuffer(Buffer::OwnedImpl& buffer) { buffer.drain(buffer.length()); } void testcompressDecompressWithUncommonParams( - Compressor::ZlibCompressorImpl::CompressionLevel comp_level, - Compressor::ZlibCompressorImpl::CompressionStrategy comp_strategy, int64_t window_bits, - uint64_t memory_level) { + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel comp_level, + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy + comp_strategy, + int64_t window_bits, uint64_t memory_level) { Buffer::OwnedImpl buffer; Buffer::OwnedImpl accumulation_buffer; - Envoy::Compressor::ZlibCompressorImpl compressor; + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl compressor; compressor.init(comp_level, comp_strategy, window_bits, memory_level); std::string original_text{}; for (uint64_t i = 0; i < 30; ++i) { TestUtility::feedBufferWithRandomCharacters(buffer, default_input_size * i, i); original_text.append(buffer.toString()); - compressor.compress(buffer, Compressor::State::Flush); + compressor.compress(buffer, Envoy::Compression::Compressor::State::Flush); accumulation_buffer.add(buffer); drainBuffer(buffer); } ASSERT_EQ(0, buffer.length()); - compressor.compress(buffer, Compressor::State::Finish); + compressor.compress(buffer, Envoy::Compression::Compressor::State::Finish); accumulation_buffer.add(buffer); drainBuffer(buffer); ASSERT_EQ(0, buffer.length()); - ZlibDecompressorImpl decompressor; + Stats::IsolatedStoreImpl stats_store{}; + ZlibDecompressorImpl decompressor{stats_store, "test."}; decompressor.init(window_bits); decompressor.decompress(accumulation_buffer, buffer); @@ -61,24 +67,27 @@ class ZlibDecompressorImplTest : public testing::Test { class ZlibDecompressorImplFailureTest : public ZlibDecompressorImplTest { protected: static void decompressorBadInitTestHelper(int64_t window_bits) { - ZlibDecompressorImpl decompressor; + Stats::IsolatedStoreImpl stats_store{}; + ZlibDecompressorImpl decompressor{stats_store, "test."}; decompressor.init(window_bits); } static void uninitializedDecompressorTestHelper() { Buffer::OwnedImpl input_buffer; Buffer::OwnedImpl output_buffer; - ZlibDecompressorImpl decompressor; + Stats::IsolatedStoreImpl stats_store{}; + ZlibDecompressorImpl decompressor{stats_store, "test."}; TestUtility::feedBufferWithRandomCharacters(input_buffer, 100); decompressor.decompress(input_buffer, output_buffer); ASSERT_TRUE(decompressor.decompression_error_ < 0); + ASSERT_EQ(stats_store.counterFromString("test.zlib_stream_error").value(), 1); } }; // Test different failures by passing bad initialization params or by calling decompress before // init. TEST_F(ZlibDecompressorImplFailureTest, DecompressorFailureTest) { - EXPECT_DEATH_LOG_TO_STDERR(decompressorBadInitTestHelper(100), "assert failure: result >= 0"); + EXPECT_DEATH(decompressorBadInitTestHelper(100), "assert failure: result >= 0"); uninitializedDecompressorTestHelper(); } @@ -87,19 +96,21 @@ TEST_F(ZlibDecompressorImplTest, CallingChecksum) { Buffer::OwnedImpl compressor_buffer; Buffer::OwnedImpl decompressor_output_buffer; - Envoy::Compressor::ZlibCompressorImpl compressor; + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl compressor; ASSERT_EQ(0, compressor.checksum()); - compressor.init(Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, - gzip_window_bits, memory_level); + compressor.init( + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, + gzip_window_bits, memory_level); ASSERT_EQ(0, compressor.checksum()); TestUtility::feedBufferWithRandomCharacters(compressor_buffer, 4096); - compressor.compress(compressor_buffer, Compressor::State::Flush); + compressor.compress(compressor_buffer, Envoy::Compression::Compressor::State::Flush); ASSERT_TRUE(compressor.checksum() > 0); - ZlibDecompressorImpl decompressor; + Stats::IsolatedStoreImpl stats_store{}; + ZlibDecompressorImpl decompressor{stats_store, "test."}; decompressor.init(gzip_window_bits); EXPECT_EQ(0, decompressor.checksum()); @@ -119,23 +130,24 @@ TEST_F(ZlibDecompressorImplTest, CompressAndDecompress) { Buffer::OwnedImpl accumulation_buffer; Buffer::OwnedImpl empty_buffer; - Envoy::Compressor::ZlibCompressorImpl compressor; - compressor.init(Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, - gzip_window_bits, memory_level); + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl compressor; + compressor.init( + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, + gzip_window_bits, memory_level); std::string original_text{}; for (uint64_t i = 0; i < 20; ++i) { TestUtility::feedBufferWithRandomCharacters(buffer, default_input_size * i, i); original_text.append(buffer.toString()); - compressor.compress(buffer, Compressor::State::Flush); + compressor.compress(buffer, Envoy::Compression::Compressor::State::Flush); accumulation_buffer.add(buffer); drainBuffer(buffer); } ASSERT_EQ(0, buffer.length()); - compressor.compress(buffer, Compressor::State::Finish); + compressor.compress(buffer, Envoy::Compression::Compressor::State::Finish); ASSERT_GE(10, buffer.length()); accumulation_buffer.add(buffer); @@ -143,7 +155,8 @@ TEST_F(ZlibDecompressorImplTest, CompressAndDecompress) { drainBuffer(buffer); ASSERT_EQ(0, buffer.length()); - ZlibDecompressorImpl decompressor; + Stats::IsolatedStoreImpl stats_store{}; + ZlibDecompressorImpl decompressor{stats_store, "test."}; decompressor.init(gzip_window_bits); decompressor.decompress(accumulation_buffer, buffer); @@ -173,12 +186,14 @@ TEST_F(ZlibDecompressorImplTest, FailedDecompression) { accumulation_buffer.add(buffer); drainBuffer(buffer); } - ZlibDecompressorImpl decompressor; + Stats::IsolatedStoreImpl stats_store{}; + ZlibDecompressorImpl decompressor{stats_store, "test."}; decompressor.init(gzip_window_bits); decompressor.decompress(accumulation_buffer, buffer); ASSERT_TRUE(decompressor.decompression_error_ < 0); + ASSERT_EQ(stats_store.counterFromString("test.zlib_data_error").value(), 17); } // Exercises decompression with a very small output buffer. @@ -186,23 +201,24 @@ TEST_F(ZlibDecompressorImplTest, DecompressWithSmallOutputBuffer) { Buffer::OwnedImpl buffer; Buffer::OwnedImpl accumulation_buffer; - Envoy::Compressor::ZlibCompressorImpl compressor; - compressor.init(Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, - gzip_window_bits, memory_level); + Envoy::Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl compressor; + compressor.init( + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, + gzip_window_bits, memory_level); std::string original_text{}; for (uint64_t i = 0; i < 20; ++i) { TestUtility::feedBufferWithRandomCharacters(buffer, default_input_size * i, i); original_text.append(buffer.toString()); - compressor.compress(buffer, Compressor::State::Flush); + compressor.compress(buffer, Envoy::Compression::Compressor::State::Flush); accumulation_buffer.add(buffer); drainBuffer(buffer); } ASSERT_EQ(0, buffer.length()); - compressor.compress(buffer, Compressor::State::Finish); + compressor.compress(buffer, Envoy::Compression::Compressor::State::Finish); ASSERT_GE(10, buffer.length()); accumulation_buffer.add(buffer); @@ -210,7 +226,8 @@ TEST_F(ZlibDecompressorImplTest, DecompressWithSmallOutputBuffer) { drainBuffer(buffer); ASSERT_EQ(0, buffer.length()); - ZlibDecompressorImpl decompressor(16); + Stats::IsolatedStoreImpl stats_store{}; + ZlibDecompressorImpl decompressor{stats_store, "test.", 16}; decompressor.init(gzip_window_bits); decompressor.decompress(accumulation_buffer, buffer); @@ -227,20 +244,25 @@ TEST_F(ZlibDecompressorImplTest, CompressDecompressWithUncommonParams) { // Test with different memory levels. for (uint64_t i = 1; i < 10; ++i) { testcompressDecompressWithUncommonParams( - Compressor::ZlibCompressorImpl::CompressionLevel::Best, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Rle, 15, i); + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Best, + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Rle, 15, + i); testcompressDecompressWithUncommonParams( - Compressor::ZlibCompressorImpl::CompressionLevel::Best, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Rle, 15, i); + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Best, + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Rle, 15, + i); testcompressDecompressWithUncommonParams( - Compressor::ZlibCompressorImpl::CompressionLevel::Speed, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Huffman, 15, i); + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Speed, + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Huffman, + 15, i); testcompressDecompressWithUncommonParams( - Compressor::ZlibCompressorImpl::CompressionLevel::Speed, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Filtered, 15, i); + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Speed, + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy:: + Filtered, + 15, i); } } @@ -262,15 +284,17 @@ TEST_F(ZlibDecompressorImplTest, CompressDecompressOfMultipleSlices) { const uint64_t num_slices = buffer.getRawSlices().size(); EXPECT_EQ(num_slices, 20); - Envoy::Compressor::ZlibCompressorImpl compressor; - compressor.init(Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, - gzip_window_bits, memory_level); + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl compressor; + compressor.init( + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, + gzip_window_bits, memory_level); - compressor.compress(buffer, Compressor::State::Flush); + compressor.compress(buffer, Envoy::Compression::Compressor::State::Flush); accumulation_buffer.add(buffer); - ZlibDecompressorImpl decompressor; + Stats::IsolatedStoreImpl stats_store{}; + ZlibDecompressorImpl decompressor{stats_store, "test."}; decompressor.init(gzip_window_bits); drainBuffer(buffer); @@ -284,6 +308,33 @@ TEST_F(ZlibDecompressorImplTest, CompressDecompressOfMultipleSlices) { EXPECT_EQ(original_text, decompressed_text); } -} // namespace +class ZlibDecompressorStatsTest : public testing::Test { +protected: + void chargeErrorStats(const int result) { decompressor_.chargeErrorStats(result); } + + Stats::IsolatedStoreImpl stats_store_{}; + ZlibDecompressorImpl decompressor_{stats_store_, "test."}; +}; + +TEST_F(ZlibDecompressorStatsTest, ChargeErrorStats) { + decompressor_.init(31); + + chargeErrorStats(Z_ERRNO); + ASSERT_EQ(stats_store_.counterFromString("test.zlib_errno").value(), 1); + chargeErrorStats(Z_STREAM_ERROR); + ASSERT_EQ(stats_store_.counterFromString("test.zlib_stream_error").value(), 1); + chargeErrorStats(Z_DATA_ERROR); + ASSERT_EQ(stats_store_.counterFromString("test.zlib_data_error").value(), 1); + chargeErrorStats(Z_MEM_ERROR); + ASSERT_EQ(stats_store_.counterFromString("test.zlib_mem_error").value(), 1); + chargeErrorStats(Z_BUF_ERROR); + ASSERT_EQ(stats_store_.counterFromString("test.zlib_buf_error").value(), 1); + chargeErrorStats(Z_VERSION_ERROR); + ASSERT_EQ(stats_store_.counterFromString("test.zlib_version_error").value(), 1); +} + } // namespace Decompressor +} // namespace Gzip +} // namespace Compression +} // namespace Extensions } // namespace Envoy diff --git a/test/extensions/filters/common/expr/BUILD b/test/extensions/filters/common/expr/BUILD index c6af64c0a0f17..fe758380dc379 100644 --- a/test/extensions/filters/common/expr/BUILD +++ b/test/extensions/filters/common/expr/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_fuzz_test", @@ -11,6 +9,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/common/expr/context_test.cc b/test/extensions/filters/common/expr/context_test.cc index 9ce4c6fcc756e..e187a54d70801 100644 --- a/test/extensions/filters/common/expr/context_test.cc +++ b/test/extensions/filters/common/expr/context_test.cc @@ -367,6 +367,8 @@ TEST(Context, ConnectionAttributes) { Network::Utility::parseInternetAddress("10.20.30.40", 456, false); Network::Address::InstanceConstSharedPtr upstream_address = Network::Utility::parseInternetAddress("10.1.2.3", 679, false); + Network::Address::InstanceConstSharedPtr upstream_local_address = + Network::Utility::parseInternetAddress("10.1.2.3", 1000, false); const std::string sni_name = "kittens.com"; EXPECT_CALL(info, downstreamLocalAddress()).WillRepeatedly(ReturnRef(local)); EXPECT_CALL(info, downstreamRemoteAddress()).WillRepeatedly(ReturnRef(remote)); @@ -374,6 +376,10 @@ TEST(Context, ConnectionAttributes) { EXPECT_CALL(info, upstreamSslConnection()).WillRepeatedly(Return(upstream_ssl_info)); EXPECT_CALL(info, upstreamHost()).WillRepeatedly(Return(upstream_host)); EXPECT_CALL(info, requestedServerName()).WillRepeatedly(ReturnRef(sni_name)); + EXPECT_CALL(info, upstreamLocalAddress()).WillRepeatedly(ReturnRef(upstream_local_address)); + const std::string upstream_transport_failure_reason = "ConnectionTermination"; + EXPECT_CALL(info, upstreamTransportFailureReason()) + .WillRepeatedly(ReturnRef(upstream_transport_failure_reason)); EXPECT_CALL(*downstream_ssl_info, peerCertificatePresented()).WillRepeatedly(Return(true)); EXPECT_CALL(*upstream_host, address()).WillRepeatedly(Return(upstream_address)); @@ -577,6 +583,20 @@ TEST(Context, ConnectionAttributes) { ASSERT_TRUE(value.value().IsString()); EXPECT_EQ(subject_peer, value.value().StringOrDie().value()); } + + { + auto value = upstream[CelValue::CreateStringView(UpstreamLocalAddress)]; + EXPECT_TRUE(value.has_value()); + ASSERT_TRUE(value.value().IsString()); + EXPECT_EQ(upstream_local_address->asStringView(), value.value().StringOrDie().value()); + } + + { + auto value = upstream[CelValue::CreateStringView(UpstreamTransportFailureReason)]; + EXPECT_TRUE(value.has_value()); + ASSERT_TRUE(value.value().IsString()); + EXPECT_EQ(upstream_transport_failure_reason, value.value().StringOrDie().value()); + } } } // namespace diff --git a/test/extensions/filters/common/ext_authz/BUILD b/test/extensions/filters/common/ext_authz/BUILD index c43c822f14b1c..9f983589bd63d 100644 --- a/test/extensions/filters/common/ext_authz/BUILD +++ b/test/extensions/filters/common/ext_authz/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/extensions/filters/common/ext_authz/check_request_utils_test.cc b/test/extensions/filters/common/ext_authz/check_request_utils_test.cc index 8252defad35fa..d05f2585e39ba 100644 --- a/test/extensions/filters/common/ext_authz/check_request_utils_test.cc +++ b/test/extensions/filters/common/ext_authz/check_request_utils_test.cc @@ -167,7 +167,7 @@ TEST_F(CheckRequestUtilsTest, BasicHttp) { // Verify that check request object has only a portion of the request data. TEST_F(CheckRequestUtilsTest, BasicHttpWithPartialBody) { const uint64_t size = 4049; - Http::RequestHeaderMapImpl headers_; + Http::TestRequestHeaderMapImpl headers_; envoy::service::auth::v3::CheckRequest request_; EXPECT_CALL(*ssl_, uriSanPeerCertificate()).WillOnce(Return(std::vector{"source"})); @@ -185,7 +185,7 @@ TEST_F(CheckRequestUtilsTest, BasicHttpWithPartialBody) { // Verify that check request object has all the request data. TEST_F(CheckRequestUtilsTest, BasicHttpWithFullBody) { - Http::RequestHeaderMapImpl headers_; + Http::TestRequestHeaderMapImpl headers_; envoy::service::auth::v3::CheckRequest request_; EXPECT_CALL(*ssl_, uriSanPeerCertificate()).WillOnce(Return(std::vector{"source"})); diff --git a/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc b/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc index 7a4137437ca13..ab0f7b37d6fd2 100644 --- a/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc +++ b/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc @@ -31,17 +31,17 @@ namespace Filters { namespace Common { namespace ExtAuthz { -constexpr char V2[] = "envoy.service.auth.v2.Authorization"; -constexpr char V2Alpha[] = "envoy.service.auth.v2alpha.Authorization"; +using Params = std::tuple; -class ExtAuthzGrpcClientTest : public testing::TestWithParam { +class ExtAuthzGrpcClientTest : public testing::TestWithParam { public: ExtAuthzGrpcClientTest() : async_client_(new Grpc::MockAsyncClient()), timeout_(10) {} - void initialize(bool use_alpha) { - use_alpha_ = use_alpha; + void initialize(const Params& param) { + api_version_ = std::get<0>(param); + use_alpha_ = std::get<1>(param); client_ = std::make_unique(Grpc::RawAsyncClientPtr{async_client_}, timeout_, - use_alpha_); + api_version_, use_alpha_); } void expectCallSend(envoy::service::auth::v3::CheckRequest& request) { @@ -51,7 +51,9 @@ class ExtAuthzGrpcClientTest : public testing::TestWithParam { Invoke([this](absl::string_view service_full_name, absl::string_view method_name, Buffer::InstancePtr&&, Grpc::RawAsyncRequestCallbacks&, Tracing::Span&, const Http::AsyncClient::RequestOptions& options) -> Grpc::AsyncRequest* { - EXPECT_EQ(use_alpha_ ? V2Alpha : V2, service_full_name); + EXPECT_EQ(TestUtility::getVersionedServiceFullName( + "envoy.service.auth.{}.Authorization", api_version_, use_alpha_), + service_full_name); EXPECT_EQ("Check", method_name); EXPECT_EQ(timeout_->count(), options.timeout->count()); return &async_request_; @@ -61,14 +63,19 @@ class ExtAuthzGrpcClientTest : public testing::TestWithParam { Grpc::MockAsyncClient* async_client_; absl::optional timeout_; Grpc::MockAsyncRequest async_request_; - std::unique_ptr client_; + GrpcClientImplPtr client_; MockRequestCallbacks request_callbacks_; Tracing::MockSpan span_; bool use_alpha_{}; NiceMock stream_info_; + envoy::config::core::v3::ApiVersion api_version_; }; -INSTANTIATE_TEST_SUITE_P(Parameterized, ExtAuthzGrpcClientTest, Values(true, false)); +INSTANTIATE_TEST_SUITE_P(Parameterized, ExtAuthzGrpcClientTest, + Values(Params(envoy::config::core::v3::ApiVersion::AUTO, false), + Params(envoy::config::core::v3::ApiVersion::V2, false), + Params(envoy::config::core::v3::ApiVersion::V2, true), + Params(envoy::config::core::v3::ApiVersion::V3, false))); // Test the client when an ok response is received. TEST_P(ExtAuthzGrpcClientTest, AuthorizationOk) { @@ -84,7 +91,7 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationOk) { expectCallSend(request); client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); - Http::RequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; client_->onCreateInitialMetadata(headers); EXPECT_CALL(span_, setTag(Eq("ext_authz_status"), Eq("ext_authz_ok"))); @@ -108,7 +115,7 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationOkWithAllAtributes) { expectCallSend(request); client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); - Http::RequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; client_->onCreateInitialMetadata(headers); EXPECT_CALL(span_, setTag(Eq("ext_authz_status"), Eq("ext_authz_ok"))); @@ -131,7 +138,7 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationDenied) { expectCallSend(request); client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); - Http::RequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; client_->onCreateInitialMetadata(headers); EXPECT_EQ(nullptr, headers.RequestId()); EXPECT_CALL(span_, setTag(Eq("ext_authz_status"), Eq("ext_authz_unauthorized"))); @@ -155,7 +162,7 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationDeniedGrpcUnknownStatus) { expectCallSend(request); client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); - Http::RequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; client_->onCreateInitialMetadata(headers); EXPECT_EQ(nullptr, headers.RequestId()); EXPECT_CALL(span_, setTag(Eq("ext_authz_status"), Eq("ext_authz_unauthorized"))); @@ -182,7 +189,7 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationDeniedWithAllAttributes) { expectCallSend(request); client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); - Http::RequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; client_->onCreateInitialMetadata(headers); EXPECT_EQ(nullptr, headers.RequestId()); EXPECT_CALL(span_, setTag(Eq("ext_authz_status"), Eq("ext_authz_unauthorized"))); diff --git a/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc b/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc index 9b075d3bbf291..602f5836919a8 100644 --- a/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc +++ b/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc @@ -35,14 +35,11 @@ namespace { class ExtAuthzHttpClientTest : public testing::Test { public: - ExtAuthzHttpClientTest() - : async_request_{&async_client_}, time_source_{async_client_.dispatcher().timeSource()} { - initialize(EMPTY_STRING); - } + ExtAuthzHttpClientTest() : async_request_{&async_client_} { initialize(EMPTY_STRING); } void initialize(const std::string& yaml) { config_ = createConfig(yaml); - client_ = std::make_unique(cm_, config_, time_source_); + client_ = std::make_unique(cm_, config_); ON_CALL(cm_, httpAsyncClientForCluster(config_->cluster())) .WillByDefault(ReturnRef(async_client_)); } @@ -81,6 +78,12 @@ class ExtAuthzHttpClientTest : public testing::Test { ignore_case: true - prefix: "X-" ignore_case: true + allowed_upstream_headers_to_append: + patterns: + - exact: Alice + ignore_case: true + - prefix: "Append-" + ignore_case: true allowed_client_headers: patterns: - exact: Foo @@ -96,7 +99,7 @@ class ExtAuthzHttpClientTest : public testing::Test { return std::make_shared(proto_config, timeout, path_prefix); } - Http::RequestMessagePtr sendRequest(std::unordered_map&& headers) { + Http::RequestMessagePtr sendRequest(absl::node_hash_map&& headers) { envoy::service::auth::v3::CheckRequest request{}; auto mutable_headers = request.mutable_attributes()->mutable_request()->mutable_http()->mutable_headers(); @@ -117,7 +120,7 @@ class ExtAuthzHttpClientTest : public testing::Test { const auto authz_response = TestCommon::makeAuthzResponse(CheckStatus::OK); auto check_response = TestCommon::makeMessageResponse(expected_headers); - client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); + client_->check(request_callbacks_, request, parent_span_, stream_info_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzOkResponse(authz_response)))); client_->onSuccess(async_request_, std::move(check_response)); @@ -129,10 +132,10 @@ class ExtAuthzHttpClientTest : public testing::Test { NiceMock async_client_; NiceMock async_request_; ClientConfigSharedPtr config_; - TimeSource& time_source_; std::unique_ptr client_; MockRequestCallbacks request_callbacks_; - Tracing::MockSpan active_span_; + Tracing::MockSpan parent_span_; + Tracing::MockSpan child_span_; NiceMock stream_info_; }; @@ -141,27 +144,32 @@ TEST_F(ExtAuthzHttpClientTest, ClientConfig) { const Http::LowerCaseString foo{"foo"}; const Http::LowerCaseString baz{"baz"}; const Http::LowerCaseString bar{"bar"}; + const Http::LowerCaseString alice{"alice"}; // Check allowed request headers. EXPECT_TRUE(config_->requestHeaderMatchers()->matches(Http::Headers::get().Method.get())); EXPECT_TRUE(config_->requestHeaderMatchers()->matches(Http::Headers::get().Host.get())); - EXPECT_TRUE(config_->requestHeaderMatchers()->matches(Http::Headers::get().Authorization.get())); + EXPECT_TRUE( + config_->requestHeaderMatchers()->matches(Http::CustomHeaders::get().Authorization.get())); EXPECT_FALSE(config_->requestHeaderMatchers()->matches(Http::Headers::get().ContentLength.get())); EXPECT_TRUE(config_->requestHeaderMatchers()->matches(baz.get())); - // // Check allowed client headers. + // Check allowed client headers. EXPECT_TRUE(config_->clientHeaderMatchers()->matches(Http::Headers::get().Status.get())); EXPECT_TRUE(config_->clientHeaderMatchers()->matches(Http::Headers::get().ContentLength.get())); EXPECT_FALSE(config_->clientHeaderMatchers()->matches(Http::Headers::get().Path.get())); EXPECT_FALSE(config_->clientHeaderMatchers()->matches(Http::Headers::get().Host.get())); EXPECT_TRUE(config_->clientHeaderMatchers()->matches(Http::Headers::get().WWWAuthenticate.get())); - EXPECT_FALSE(config_->clientHeaderMatchers()->matches(Http::Headers::get().Origin.get())); + EXPECT_FALSE(config_->clientHeaderMatchers()->matches(Http::CustomHeaders::get().Origin.get())); EXPECT_TRUE(config_->clientHeaderMatchers()->matches(foo.get())); - // // Check allowed upstream headers. + // Check allowed upstream headers. EXPECT_TRUE(config_->upstreamHeaderMatchers()->matches(bar.get())); - // // Check other attributes. + // Check allowed upstream headers to append. + EXPECT_TRUE(config_->upstreamHeaderToAppendMatchers()->matches(alice.get())); + + // Check other attributes. EXPECT_EQ(config_->pathPrefix(), "/bar"); EXPECT_EQ(config_->cluster(), "ext_authz"); EXPECT_EQ(config_->tracingName(), "async ext_authz egress"); @@ -184,7 +192,8 @@ TEST_F(ExtAuthzHttpClientTest, TestDefaultAllowedHeaders) { // Check allowed request headers. EXPECT_TRUE(config_->requestHeaderMatchers()->matches(Http::Headers::get().Method.get())); EXPECT_TRUE(config_->requestHeaderMatchers()->matches(Http::Headers::get().Host.get())); - EXPECT_TRUE(config_->requestHeaderMatchers()->matches(Http::Headers::get().Authorization.get())); + EXPECT_TRUE( + config_->requestHeaderMatchers()->matches(Http::CustomHeaders::get().Authorization.get())); EXPECT_FALSE(config_->requestHeaderMatchers()->matches(Http::Headers::get().ContentLength.get())); // Check allowed client headers. @@ -201,9 +210,7 @@ TEST_F(ExtAuthzHttpClientTest, TestDefaultAllowedHeaders) { TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithPathRewrite) { Http::RequestMessagePtr message_ptr = sendRequest({{":path", "/foo"}, {"foo", "bar"}}); - const auto* path = message_ptr->headers().get(Http::Headers::get().Path); - ASSERT_NE(path, nullptr); - EXPECT_EQ(path->value().getStringView(), "/bar/foo"); + EXPECT_EQ(message_ptr->headers().getPathValue(), "/bar/foo"); } // Test the client when a request contains Content-Length greater than 0. @@ -212,13 +219,8 @@ TEST_F(ExtAuthzHttpClientTest, ContentLengthEqualZero) { sendRequest({{Http::Headers::get().ContentLength.get(), std::string{"47"}}, {Http::Headers::get().Method.get(), std::string{"POST"}}}); - const auto* content_length = message_ptr->headers().get(Http::Headers::get().ContentLength); - ASSERT_NE(content_length, nullptr); - EXPECT_EQ(content_length->value().getStringView(), "0"); - - const auto* method = message_ptr->headers().get(Http::Headers::get().Method); - ASSERT_NE(method, nullptr); - EXPECT_EQ(method->value().getStringView(), "POST"); + EXPECT_EQ(message_ptr->headers().getContentLengthValue(), "0"); + EXPECT_EQ(message_ptr->headers().getMethodValue(), "POST"); } // Test the client when a request contains Content-Length greater than 0. @@ -244,13 +246,8 @@ TEST_F(ExtAuthzHttpClientTest, ContentLengthEqualZeroWithAllowedHeaders) { sendRequest({{Http::Headers::get().ContentLength.get(), std::string{"47"}}, {Http::Headers::get().Method.get(), std::string{"POST"}}}); - const auto* content_length = message_ptr->headers().get(Http::Headers::get().ContentLength); - ASSERT_NE(content_length, nullptr); - EXPECT_EQ(content_length->value().getStringView(), "0"); - - const auto* method = message_ptr->headers().get(Http::Headers::get().Method); - ASSERT_NE(method, nullptr); - EXPECT_EQ(method->value().getStringView(), "POST"); + EXPECT_EQ(message_ptr->headers().getContentLengthValue(), "0"); + EXPECT_EQ(message_ptr->headers().getMethodValue(), "POST"); } // Test the client when a request contains headers in the prefix matchers. @@ -284,24 +281,14 @@ TEST_F(ExtAuthzHttpClientTest, AllowedRequestHeadersPrefix) { // Verify client response when authorization server returns a 200 OK. TEST_F(ExtAuthzHttpClientTest, AuthorizationOk) { - Tracing::MockSpan* child_span{new Tracing::MockSpan()}; const auto expected_headers = TestCommon::makeHeaderValueOption({{":status", "200", false}}); const auto authz_response = TestCommon::makeAuthzResponse(CheckStatus::OK); auto check_response = TestCommon::makeMessageResponse(expected_headers); envoy::service::auth::v3::CheckRequest request; - - EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); - EXPECT_CALL(*child_span, - setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); - EXPECT_CALL(*child_span, injectContext(_)); - - client_->check(request_callbacks_, request, active_span_, stream_info_); + client_->check(request_callbacks_, request, parent_span_, stream_info_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzOkResponse(authz_response)))); - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_status"), Eq("ext_authz_ok"))); - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("OK"))); - EXPECT_CALL(*child_span, finishSpan()); client_->onSuccess(async_request_, std::move(check_response)); } @@ -309,7 +296,6 @@ using HeaderValuePair = std::pairmutable_request()->mutable_http()->mutable_headers(); (*mutable_headers)[std::string{":x-authz-header2"}] = std::string{"forged-value"}; - - EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); - EXPECT_CALL(*child_span, - setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); - EXPECT_CALL(*child_span, injectContext(_)); // Expect that header1 will be added and header2 correctly overwritten. Due to this behavior, the // append property of header value option should always be false. const HeaderValuePair header1{"x-authz-header1", "value"}; const HeaderValuePair header2{"x-authz-header2", "value"}; EXPECT_CALL(async_client_, send_(AllOf(ContainsPairAsHeader(header1), ContainsPairAsHeader(header2)), _, _)); - client_->check(request_callbacks_, request, active_span_, stream_info_); + client_->check(request_callbacks_, request, parent_span_, stream_info_); + + // Check for child span tagging when the request is allowed. + EXPECT_CALL(child_span_, setTag(Eq("ext_authz_http_status"), Eq("OK"))); + EXPECT_CALL(child_span_, setTag(Eq("ext_authz_status"), Eq("ext_authz_ok"))); + client_->onBeforeFinalizeUpstreamSpan(child_span_, &check_response->headers()); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzOkResponse(authz_response)))); - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_status"), Eq("ext_authz_ok"))); - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("OK"))); - EXPECT_CALL(*child_span, finishSpan()); client_->onSuccess(async_request_, std::move(check_response)); } @@ -356,20 +339,14 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAddedAuthzHeadersFromStreamInf initialize(yaml); - Tracing::MockSpan* child_span{new Tracing::MockSpan()}; const auto expected_headers = TestCommon::makeHeaderValueOption({{":status", "200", false}}); const auto authz_response = TestCommon::makeAuthzResponse(CheckStatus::OK); auto check_response = TestCommon::makeMessageResponse(expected_headers); - EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); - EXPECT_CALL(*child_span, - setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); - EXPECT_CALL(*child_span, injectContext(_)); - const HeaderValuePair expected_header{"x-authz-header1", "123"}; EXPECT_CALL(async_client_, send_(ContainsPairAsHeader(expected_header), _, _)); - Http::RequestHeaderMapImpl request_headers; + Http::TestRequestHeaderMapImpl request_headers; request_headers.addCopy(Http::LowerCaseString(std::string("x-request-id")), expected_header.second); @@ -377,19 +354,15 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAddedAuthzHeadersFromStreamInf EXPECT_CALL(stream_info, getRequestHeaders()).WillOnce(Return(&request_headers)); envoy::service::auth::v3::CheckRequest request; - client_->check(request_callbacks_, request, active_span_, stream_info); + client_->check(request_callbacks_, request, parent_span_, stream_info); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzOkResponse(authz_response)))); - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_status"), Eq("ext_authz_ok"))); - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("OK"))); - EXPECT_CALL(*child_span, finishSpan()); client_->onSuccess(async_request_, std::move(check_response)); } // Verify client response headers when allow_upstream_headers is configured. TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAllowHeader) { - Tracing::MockSpan* child_span{new Tracing::MockSpan()}; const std::string empty_body{}; const auto expected_headers = TestCommon::makeHeaderValueOption({{"x-baz", "foo", false}, {"bar", "foo", false}}); @@ -399,11 +372,7 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAllowHeader) { envoy::service::auth::v3::CheckRequest request; EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzOkResponse(authz_response)))); - EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); - EXPECT_CALL(*child_span, - setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); - EXPECT_CALL(*child_span, injectContext(_)); - client_->check(request_callbacks_, request, active_span_, stream_info_); + client_->check(request_callbacks_, request, parent_span_, stream_info_); const auto check_response_headers = TestCommon::makeHeaderValueOption({{":status", "200", false}, @@ -414,30 +383,25 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAllowHeader) { {"x-baz", "foo", false}, {"foobar", "foo", false}}); - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_status"), Eq("ext_authz_ok"))); - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("OK"))); - EXPECT_CALL(*child_span, finishSpan()); auto message_response = TestCommon::makeMessageResponse(check_response_headers); client_->onSuccess(async_request_, std::move(message_response)); } // Test the client when a denied response is received. TEST_F(ExtAuthzHttpClientTest, AuthorizationDenied) { - Tracing::MockSpan* child_span{new Tracing::MockSpan()}; const auto expected_headers = TestCommon::makeHeaderValueOption({{":status", "403", false}}); const auto authz_response = TestCommon::makeAuthzResponse( CheckStatus::Denied, Http::Code::Forbidden, EMPTY_STRING, expected_headers); + auto check_response = TestCommon::makeMessageResponse(expected_headers); envoy::service::auth::v3::CheckRequest request; - EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); - EXPECT_CALL(*child_span, - setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); - EXPECT_CALL(*child_span, injectContext(_)); - client_->check(request_callbacks_, request, active_span_, stream_info_); - - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_status"), Eq("ext_authz_unauthorized"))); - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("Forbidden"))); - EXPECT_CALL(*child_span, finishSpan()); + client_->check(request_callbacks_, request, parent_span_, stream_info_); + + // Check for child span tagging when the request is denied. + EXPECT_CALL(child_span_, setTag(Eq("ext_authz_http_status"), Eq("Forbidden"))); + EXPECT_CALL(child_span_, setTag(Eq("ext_authz_status"), Eq("ext_authz_unauthorized"))); + client_->onBeforeFinalizeUpstreamSpan(child_span_, &check_response->headers()); + EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzDeniedResponse(authz_response)))); client_->onSuccess(async_request_, TestCommon::makeMessageResponse(expected_headers)); @@ -445,24 +409,15 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationDenied) { // Verify client response headers and body when the authorization server denies the request. TEST_F(ExtAuthzHttpClientTest, AuthorizationDeniedWithAllAttributes) { - Tracing::MockSpan* child_span{new Tracing::MockSpan()}; const auto expected_body = std::string{"test"}; const auto expected_headers = TestCommon::makeHeaderValueOption( {{":status", "401", false}, {"foo", "bar", false}, {"x-foobar", "bar", false}}); const auto authz_response = TestCommon::makeAuthzResponse( CheckStatus::Denied, Http::Code::Unauthorized, expected_body, expected_headers); - EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); - EXPECT_CALL(*child_span, - setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); - EXPECT_CALL(*child_span, injectContext(_)); - envoy::service::auth::v3::CheckRequest request; - client_->check(request_callbacks_, request, active_span_, stream_info_); + client_->check(request_callbacks_, request, parent_span_, stream_info_); - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_status"), Eq("ext_authz_unauthorized"))); - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("Unauthorized"))); - EXPECT_CALL(*child_span, finishSpan()); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzDeniedResponse(authz_response)))); client_->onSuccess(async_request_, @@ -472,25 +427,16 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationDeniedWithAllAttributes) { // Verify client response headers when the authorization server denies the request and // allowed_client_headers is configured. TEST_F(ExtAuthzHttpClientTest, AuthorizationDeniedAndAllowedClientHeaders) { - Tracing::MockSpan* child_span{new Tracing::MockSpan()}; const auto expected_body = std::string{"test"}; const auto authz_response = TestCommon::makeAuthzResponse( CheckStatus::Denied, Http::Code::Unauthorized, expected_body, TestCommon::makeHeaderValueOption( {{"x-foo", "bar", false}, {":status", "401", false}, {"foo", "bar", false}})); - EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); - EXPECT_CALL(*child_span, - setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); - EXPECT_CALL(*child_span, injectContext(_)); - envoy::service::auth::v3::CheckRequest request; - client_->check(request_callbacks_, request, active_span_, stream_info_); + client_->check(request_callbacks_, request, parent_span_, stream_info_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzDeniedResponse(authz_response)))); - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_status"), Eq("ext_authz_unauthorized"))); - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("Unauthorized"))); - EXPECT_CALL(*child_span, finishSpan()); const auto check_response_headers = TestCommon::makeHeaderValueOption({{":method", "post", false}, {"x-foo", "bar", false}, {":status", "401", false}, @@ -501,20 +447,12 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationDeniedAndAllowedClientHeaders) { // Test the client when an unknown error occurs. TEST_F(ExtAuthzHttpClientTest, AuthorizationRequestError) { - Tracing::MockSpan* child_span{new Tracing::MockSpan()}; envoy::service::auth::v3::CheckRequest request; - EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); - EXPECT_CALL(*child_span, - setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); - EXPECT_CALL(*child_span, injectContext(_)); - - client_->check(request_callbacks_, request, active_span_, stream_info_); + client_->check(request_callbacks_, request, parent_span_, stream_info_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzErrorResponse(CheckStatus::Error)))); - EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True))); - EXPECT_CALL(*child_span, finishSpan()); client_->onFailure(async_request_, Http::AsyncClient::FailureReason::Reset); } @@ -522,79 +460,35 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationRequestError) { TEST_F(ExtAuthzHttpClientTest, AuthorizationRequest5xxError) { Http::ResponseMessagePtr check_response(new Http::ResponseMessageImpl( Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "503"}}})); - Tracing::MockSpan* child_span{new Tracing::MockSpan()}; envoy::service::auth::v3::CheckRequest request; - EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); - EXPECT_CALL(*child_span, - setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); - EXPECT_CALL(*child_span, injectContext(_)); - - client_->check(request_callbacks_, request, active_span_, stream_info_); - - EXPECT_CALL(request_callbacks_, - onComplete_(WhenDynamicCastTo(AuthzErrorResponse(CheckStatus::Error)))); - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("Service Unavailable"))); - EXPECT_CALL(*child_span, finishSpan()); - client_->onSuccess(async_request_, std::move(check_response)); -} - -// Test the client when a call to authorization server returns a status code that cannot be -// parsed. -TEST_F(ExtAuthzHttpClientTest, AuthorizationRequestErrorParsingStatusCode) { - Http::ResponseMessagePtr check_response(new Http::ResponseMessageImpl( - Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "foo"}}})); - Tracing::MockSpan* child_span{new Tracing::MockSpan()}; - envoy::service::auth::v3::CheckRequest request; - - EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); - EXPECT_CALL(*child_span, - setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); - EXPECT_CALL(*child_span, injectContext(_)); - - client_->check(request_callbacks_, request, active_span_, stream_info_); + client_->check(request_callbacks_, request, parent_span_, stream_info_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzErrorResponse(CheckStatus::Error)))); - EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True))); - EXPECT_CALL(*child_span, finishSpan()); client_->onSuccess(async_request_, std::move(check_response)); } // Test the client when the request is canceled. TEST_F(ExtAuthzHttpClientTest, CancelledAuthorizationRequest) { - Tracing::MockSpan* child_span{new Tracing::MockSpan()}; envoy::service::auth::v3::CheckRequest request; - EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); - EXPECT_CALL(*child_span, - setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); - EXPECT_CALL(*child_span, injectContext(_)); EXPECT_CALL(async_client_, send_(_, _, _)).WillOnce(Return(&async_request_)); - client_->check(request_callbacks_, request, active_span_, stream_info_); + client_->check(request_callbacks_, request, parent_span_, stream_info_); EXPECT_CALL(async_request_, cancel()); - EXPECT_CALL(*child_span, - setTag(Eq(Tracing::Tags::get().Status), Eq(Tracing::Tags::get().Canceled))); - EXPECT_CALL(*child_span, finishSpan()); client_->cancel(); } // Test the client when the configured cluster is missing/removed. TEST_F(ExtAuthzHttpClientTest, NoCluster) { InSequence s; - Tracing::MockSpan* child_span{new Tracing::MockSpan()}; - EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); - EXPECT_CALL(*child_span, - setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); EXPECT_CALL(cm_, get(Eq("ext_authz"))).WillOnce(Return(nullptr)); EXPECT_CALL(cm_, httpAsyncClientForCluster("ext_authz")).Times(0); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzErrorResponse(CheckStatus::Error)))); - EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True))); - EXPECT_CALL(*child_span, finishSpan()); - client_->check(request_callbacks_, envoy::service::auth::v3::CheckRequest{}, active_span_, + client_->check(request_callbacks_, envoy::service::auth::v3::CheckRequest{}, parent_span_, stream_info_); } diff --git a/test/extensions/filters/common/ext_authz/test_common.cc b/test/extensions/filters/common/ext_authz/test_common.cc index 9e8af02f38079..f23c363d2ed3f 100644 --- a/test/extensions/filters/common/ext_authz/test_common.cc +++ b/test/extensions/filters/common/ext_authz/test_common.cc @@ -65,7 +65,7 @@ Response TestCommon::makeAuthzResponse(CheckStatus status, Http::Code status_cod authz_response.headers_to_append.emplace_back(Http::LowerCaseString(header.header().key()), header.header().value()); } else { - authz_response.headers_to_add.emplace_back(Http::LowerCaseString(header.header().key()), + authz_response.headers_to_set.emplace_back(Http::LowerCaseString(header.header().key()), header.header().value()); } } @@ -98,7 +98,7 @@ Http::ResponseMessagePtr TestCommon::makeMessageResponse(const HeaderValueOption return response; }; -bool TestCommon::CompareHeaderVector(const Http::HeaderVector& lhs, const Http::HeaderVector& rhs) { +bool TestCommon::compareHeaderVector(const Http::HeaderVector& lhs, const Http::HeaderVector& rhs) { return std::set>(lhs.begin(), lhs.end()) == std::set>(rhs.begin(), rhs.end()); } diff --git a/test/extensions/filters/common/ext_authz/test_common.h b/test/extensions/filters/common/ext_authz/test_common.h index 47a4ad6e3bb85..07348b838dfac 100644 --- a/test/extensions/filters/common/ext_authz/test_common.h +++ b/test/extensions/filters/common/ext_authz/test_common.h @@ -44,7 +44,7 @@ class TestCommon { static HeaderValueOptionVector makeHeaderValueOption(KeyValueOptionVector&& headers); - static bool CompareHeaderVector(const Http::HeaderVector& lhs, const Http::HeaderVector& rhs); + static bool compareHeaderVector(const Http::HeaderVector& lhs, const Http::HeaderVector& rhs); }; MATCHER_P(AuthzErrorResponse, status, "") { @@ -77,7 +77,7 @@ MATCHER_P(AuthzDeniedResponse, response, "") { return false; } // Compare headers_to_add. - return TestCommon::CompareHeaderVector(response.headers_to_add, arg->headers_to_add); + return TestCommon::compareHeaderVector(response.headers_to_add, arg->headers_to_add); } MATCHER_P(AuthzOkResponse, response, "") { @@ -85,12 +85,12 @@ MATCHER_P(AuthzOkResponse, response, "") { return false; } // Compare headers_to_append. - if (!TestCommon::CompareHeaderVector(response.headers_to_append, arg->headers_to_append)) { + if (!TestCommon::compareHeaderVector(response.headers_to_append, arg->headers_to_append)) { return false; } // Compare headers_to_add. - return TestCommon::CompareHeaderVector(response.headers_to_add, arg->headers_to_add); + return TestCommon::compareHeaderVector(response.headers_to_add, arg->headers_to_add); ; } diff --git a/test/extensions/filters/common/fault/BUILD b/test/extensions/filters/common/fault/BUILD index a2b3a89d9acb7..da4af82caf984 100644 --- a/test/extensions/filters/common/fault/BUILD +++ b/test/extensions/filters/common/fault/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/extensions/filters/common/fault/fault_config_test.cc b/test/extensions/filters/common/fault/fault_config_test.cc index 340cd2c3dd70b..784aa2d88ab46 100644 --- a/test/extensions/filters/common/fault/fault_config_test.cc +++ b/test/extensions/filters/common/fault/fault_config_test.cc @@ -20,19 +20,41 @@ TEST(FaultConfigTest, FaultAbortHeaderConfig) { // Header with bad data. Http::TestRequestHeaderMapImpl bad_headers{{"x-envoy-fault-abort-request", "abc"}}; - EXPECT_EQ(absl::nullopt, config.statusCode(&bad_headers)); + EXPECT_EQ(absl::nullopt, config.httpStatusCode(&bad_headers)); // Out of range header - value too low. Http::TestRequestHeaderMapImpl too_low_headers{{"x-envoy-fault-abort-request", "199"}}; - EXPECT_EQ(absl::nullopt, config.statusCode(&too_low_headers)); + EXPECT_EQ(absl::nullopt, config.httpStatusCode(&too_low_headers)); // Out of range header - value too high. Http::TestRequestHeaderMapImpl too_high_headers{{"x-envoy-fault-abort-request", "600"}}; - EXPECT_EQ(absl::nullopt, config.statusCode(&too_high_headers)); + EXPECT_EQ(absl::nullopt, config.httpStatusCode(&too_high_headers)); // Valid header. Http::TestRequestHeaderMapImpl good_headers{{"x-envoy-fault-abort-request", "401"}}; - EXPECT_EQ(Http::Code::Unauthorized, config.statusCode(&good_headers).value()); + EXPECT_EQ(Http::Code::Unauthorized, config.httpStatusCode(&good_headers)); +} + +TEST(FaultConfigTest, FaultAbortGrpcHeaderConfig) { + envoy::extensions::filters::http::fault::v3::FaultAbort proto_config; + proto_config.mutable_header_abort(); + FaultAbortConfig config(proto_config); + + // Header with bad data. + Http::TestRequestHeaderMapImpl bad_headers{{"x-envoy-fault-abort-grpc-request", "abc"}}; + EXPECT_EQ(absl::nullopt, config.grpcStatusCode(&bad_headers)); + + // Out of range header - value too low. + Http::TestRequestHeaderMapImpl too_low_headers{{"x-envoy-fault-abort-grpc-request", "-1"}}; + EXPECT_EQ(absl::nullopt, config.grpcStatusCode(&too_low_headers)); + + // Valid header - with well-defined gRPC status code in [0,16] range. + Http::TestRequestHeaderMapImpl good_headers{{"x-envoy-fault-abort-grpc-request", "5"}}; + EXPECT_EQ(Grpc::Status::NotFound, config.grpcStatusCode(&good_headers)); + + // Valid header - with not well-defined gRPC status code (> 16). + Http::TestRequestHeaderMapImpl too_high_headers{{"x-envoy-fault-abort-grpc-request", "100"}}; + EXPECT_EQ(100, config.grpcStatusCode(&too_high_headers)); } TEST(FaultConfigTest, FaultAbortPercentageHeaderConfig) { diff --git a/test/extensions/filters/common/lua/BUILD b/test/extensions/filters/common/lua/BUILD index df115309d63c3..88d42f01aab0b 100644 --- a/test/extensions/filters/common/lua/BUILD +++ b/test/extensions/filters/common/lua/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -7,12 +5,16 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( name = "lua_test", srcs = ["lua_test.cc"], + tags = ["skip_on_windows"], deps = [ + "//source/common/thread_local:thread_local_lib", "//source/extensions/filters/common/lua:lua_lib", "//test/mocks:common_lib", "//test/mocks/thread_local:thread_local_mocks", @@ -23,6 +25,7 @@ envoy_cc_test( envoy_cc_test( name = "wrappers_test", srcs = ["wrappers_test.cc"], + tags = ["skip_on_windows"], deps = [ ":lua_wrappers_lib", "//source/common/buffer:buffer_lib", @@ -37,6 +40,7 @@ envoy_cc_test( envoy_cc_test_library( name = "lua_wrappers_lib", hdrs = ["lua_wrappers.h"], + tags = ["skip_on_windows"], deps = [ "//source/extensions/filters/common/lua:lua_lib", "//test/mocks/thread_local:thread_local_mocks", diff --git a/test/extensions/filters/common/lua/lua_test.cc b/test/extensions/filters/common/lua/lua_test.cc index b5770a0b20d79..27c9e35f8cfe6 100644 --- a/test/extensions/filters/common/lua/lua_test.cc +++ b/test/extensions/filters/common/lua/lua_test.cc @@ -1,5 +1,7 @@ #include +#include "common/thread_local/thread_local_impl.h" + #include "extensions/filters/common/lua/lua.h" #include "test/mocks/common.h" @@ -46,7 +48,7 @@ class LuaTest : public testing::Test { } NiceMock tls_; - std::unique_ptr state_; + ThreadLocalStatePtr state_; std::function yield_callback_; ReadyWatcher on_yield_; }; @@ -157,6 +159,55 @@ TEST_F(LuaTest, MarkDead) { lua_gc(cr1->luaState(), LUA_GCCOLLECT, 0); } +class ThreadSafeTest : public testing::Test { +public: + ThreadSafeTest() + : api_(Api::createApiForTest()), main_dispatcher_(api_->allocateDispatcher("main")), + worker_dispatcher_(api_->allocateDispatcher("worker")) {} + + // Use real dispatchers to verify that callback functions can be executed correctly. + Api::ApiPtr api_; + Event::DispatcherPtr main_dispatcher_; + Event::DispatcherPtr worker_dispatcher_; + ThreadLocal::InstanceImpl tls_; + + std::unique_ptr state_; +}; + +// Test whether ThreadLocalState can be safely released. +TEST_F(ThreadSafeTest, StateDestructedBeforeWorkerRun) { + const std::string SCRIPT{R"EOF( + function HelloWorld() + print("Hello World!") + end + )EOF"}; + + tls_.registerThread(*main_dispatcher_, true); + EXPECT_EQ(main_dispatcher_.get(), &tls_.dispatcher()); + tls_.registerThread(*worker_dispatcher_, false); + + // Some callback functions waiting to be executed will be added to the dispatcher of the Worker + // thread. The callback functions in the main thread will be executed directly. + state_ = std::make_unique(SCRIPT, tls_); + state_->registerType(); + + main_dispatcher_->run(Event::Dispatcher::RunType::Block); + + // Destroy state_. + state_.reset(nullptr); + + // Start a new worker thread to execute the callback functions in the worker dispatcher. + Thread::ThreadPtr thread = Thread::threadFactoryForTest().createThread([this]() { + worker_dispatcher_->run(Event::Dispatcher::RunType::Block); + // Verify we have the expected dispatcher for the new worker thread. + EXPECT_EQ(worker_dispatcher_.get(), &tls_.dispatcher()); + }); + thread->join(); + + tls_.shutdownGlobalThreading(); + tls_.shutdownThread(); +} + } // namespace } // namespace Lua } // namespace Common diff --git a/test/extensions/filters/common/lua/lua_wrappers.h b/test/extensions/filters/common/lua/lua_wrappers.h index 4791f9e5109a9..e13f1914c48d7 100644 --- a/test/extensions/filters/common/lua/lua_wrappers.h +++ b/test/extensions/filters/common/lua/lua_wrappers.h @@ -1,5 +1,7 @@ #pragma once +#include + #include "extensions/filters/common/lua/lua.h" #include "test/mocks/thread_local/mocks.h" @@ -18,7 +20,7 @@ template class LuaWrappersTestBase : public testing::Test { public: virtual void setup(const std::string& code) { coroutine_.reset(); - state_.reset(new ThreadLocalState(code, tls_)); + state_ = std::make_unique(code, tls_); state_->registerType(); coroutine_ = state_->createCoroutine(); lua_pushlightuserdata(coroutine_->luaState(), this); @@ -41,7 +43,7 @@ template class LuaWrappersTestBase : public testing::Test { MOCK_METHOD(void, testPrint, (const std::string&)); NiceMock tls_; - std::unique_ptr state_; + ThreadLocalStatePtr state_; std::function yield_callback_; CoroutinePtr coroutine_; }; diff --git a/test/extensions/filters/common/original_src/BUILD b/test/extensions/filters/common/original_src/BUILD index 02da243f0ca22..1d3a5d28847d5 100644 --- a/test/extensions/filters/common/original_src/BUILD +++ b/test/extensions/filters/common/original_src/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/extensions/filters/common/ratelimit/BUILD b/test/extensions/filters/common/ratelimit/BUILD index 2bc6d08d7e8b3..652af79831f95 100644 --- a/test/extensions/filters/common/ratelimit/BUILD +++ b/test/extensions/filters/common/ratelimit/BUILD @@ -1,12 +1,13 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_cc_test", + "envoy_cc_test_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( @@ -32,3 +33,11 @@ envoy_cc_mock( "//source/extensions/filters/common/ratelimit:ratelimit_client_interface", ], ) + +envoy_cc_test_library( + name = "ratelimit_utils", + hdrs = ["utils.h"], + deps = [ + "@envoy_api//envoy/service/ratelimit/v3:pkg_cc_proto", + ], +) diff --git a/test/extensions/filters/common/ratelimit/ratelimit_impl_test.cc b/test/extensions/filters/common/ratelimit/ratelimit_impl_test.cc index 021c17ebffe55..bb4545583993c 100644 --- a/test/extensions/filters/common/ratelimit/ratelimit_impl_test.cc +++ b/test/extensions/filters/common/ratelimit/ratelimit_impl_test.cc @@ -35,13 +35,16 @@ namespace { class MockRequestCallbacks : public RequestCallbacks { public: - void complete(LimitStatus status, Http::ResponseHeaderMapPtr&& response_headers_to_add, + void complete(LimitStatus status, DescriptorStatusListPtr&& descriptor_statuses, + Http::ResponseHeaderMapPtr&& response_headers_to_add, Http::RequestHeaderMapPtr&& request_headers_to_add) override { - complete_(status, response_headers_to_add.get(), request_headers_to_add.get()); + complete_(status, descriptor_statuses.get(), response_headers_to_add.get(), + request_headers_to_add.get()); } MOCK_METHOD(void, complete_, - (LimitStatus status, const Http::ResponseHeaderMap* response_headers_to_add, + (LimitStatus status, const DescriptorStatusList* descriptor_statuses, + const Http::ResponseHeaderMap* response_headers_to_add, const Http::RequestHeaderMap* request_headers_to_add)); }; @@ -49,8 +52,8 @@ class RateLimitGrpcClientTest : public testing::Test { public: RateLimitGrpcClientTest() : async_client_(new Grpc::MockAsyncClient()), - client_(Grpc::RawAsyncClientPtr{async_client_}, - absl::optional()) {} + client_(Grpc::RawAsyncClientPtr{async_client_}, absl::optional(), + envoy::config::core::v3::ApiVersion::AUTO) {} Grpc::MockAsyncClient* async_client_; Grpc::MockAsyncRequest async_request_; @@ -64,7 +67,7 @@ TEST_F(RateLimitGrpcClientTest, Basic) { { envoy::service::ratelimit::v3::RateLimitRequest request; - Http::RequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; GrpcClientImpl::createRequest(request, "foo", {{{{"foo", "bar"}}}}); EXPECT_CALL(*async_client_, sendRaw(_, _, Grpc::ProtoBufferEq(request), Ref(client_), _, _)) .WillOnce( @@ -85,13 +88,13 @@ TEST_F(RateLimitGrpcClientTest, Basic) { response = std::make_unique(); response->set_overall_code(envoy::service::ratelimit::v3::RateLimitResponse::OVER_LIMIT); EXPECT_CALL(span_, setTag(Eq("ratelimit_status"), Eq("over_limit"))); - EXPECT_CALL(request_callbacks_, complete_(LimitStatus::OverLimit, _, _)); + EXPECT_CALL(request_callbacks_, complete_(LimitStatus::OverLimit, _, _, _)); client_.onSuccess(std::move(response), span_); } { envoy::service::ratelimit::v3::RateLimitRequest request; - Http::RequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; GrpcClientImpl::createRequest(request, "foo", {{{{"foo", "bar"}, {"bar", "baz"}}}}); EXPECT_CALL(*async_client_, sendRaw(_, _, Grpc::ProtoBufferEq(request), _, _, _)) .WillOnce(Return(&async_request_)); @@ -104,7 +107,7 @@ TEST_F(RateLimitGrpcClientTest, Basic) { response = std::make_unique(); response->set_overall_code(envoy::service::ratelimit::v3::RateLimitResponse::OK); EXPECT_CALL(span_, setTag(Eq("ratelimit_status"), Eq("ok"))); - EXPECT_CALL(request_callbacks_, complete_(LimitStatus::OK, _, _)); + EXPECT_CALL(request_callbacks_, complete_(LimitStatus::OK, _, _, _)); client_.onSuccess(std::move(response), span_); } @@ -121,9 +124,32 @@ TEST_F(RateLimitGrpcClientTest, Basic) { Tracing::NullSpan::instance()); response = std::make_unique(); - EXPECT_CALL(request_callbacks_, complete_(LimitStatus::Error, _, _)); + EXPECT_CALL(request_callbacks_, complete_(LimitStatus::Error, _, _, _)); client_.onFailure(Grpc::Status::Unknown, "", span_); } + + { + envoy::service::ratelimit::v3::RateLimitRequest request; + Http::TestRequestHeaderMapImpl headers; + GrpcClientImpl::createRequest( + request, "foo", + {{{{"foo", "bar"}, {"bar", "baz"}}, {{42, envoy::type::v3::RateLimitUnit::MINUTE}}}}); + EXPECT_CALL(*async_client_, sendRaw(_, _, Grpc::ProtoBufferEq(request), _, _, _)) + .WillOnce(Return(&async_request_)); + + client_.limit( + request_callbacks_, "foo", + {{{{"foo", "bar"}, {"bar", "baz"}}, {{42, envoy::type::v3::RateLimitUnit::MINUTE}}}}, + Tracing::NullSpan::instance()); + + client_.onCreateInitialMetadata(headers); + + response = std::make_unique(); + response->set_overall_code(envoy::service::ratelimit::v3::RateLimitResponse::OK); + EXPECT_CALL(span_, setTag(Eq("ratelimit_status"), Eq("ok"))); + EXPECT_CALL(request_callbacks_, complete_(LimitStatus::OK, _, _, _)); + client_.onSuccess(std::move(response), span_); + } } TEST_F(RateLimitGrpcClientTest, Cancel) { diff --git a/test/extensions/filters/common/ratelimit/utils.h b/test/extensions/filters/common/ratelimit/utils.h new file mode 100644 index 0000000000000..d993ed35e4a75 --- /dev/null +++ b/test/extensions/filters/common/ratelimit/utils.h @@ -0,0 +1,28 @@ +#pragma once + +#include + +#include "envoy/service/ratelimit/v3/rls.pb.h" + +namespace Envoy { +namespace RateLimit { + +inline envoy::service::ratelimit::v3::RateLimitResponse_DescriptorStatus +buildDescriptorStatus(uint32_t requests_per_unit, + envoy::service::ratelimit::v3::RateLimitResponse_RateLimit_Unit unit, + std::string name, uint32_t limit_remaining, uint32_t seconds_until_reset) { + envoy::service::ratelimit::v3::RateLimitResponse_DescriptorStatus statusMsg; + statusMsg.set_limit_remaining(limit_remaining); + statusMsg.mutable_duration_until_reset()->set_seconds(seconds_until_reset); + if (requests_per_unit) { + envoy::service::ratelimit::v3::RateLimitResponse_RateLimit* limitMsg = + statusMsg.mutable_current_limit(); + limitMsg->set_requests_per_unit(requests_per_unit); + limitMsg->set_unit(unit); + limitMsg->set_name(name); + } + return statusMsg; +} + +} // namespace RateLimit +} // namespace Envoy diff --git a/test/extensions/filters/common/rbac/BUILD b/test/extensions/filters/common/rbac/BUILD index 6454c69e159f4..64e405da4d913 100644 --- a/test/extensions/filters/common/rbac/BUILD +++ b/test/extensions/filters/common/rbac/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -10,6 +8,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/common/rbac/engine_impl_test.cc b/test/extensions/filters/common/rbac/engine_impl_test.cc index 42306d4bc7f4b..b9d8608a92083 100644 --- a/test/extensions/filters/common/rbac/engine_impl_test.cc +++ b/test/extensions/filters/common/rbac/engine_impl_test.cc @@ -24,20 +24,56 @@ namespace Common { namespace RBAC { namespace { -void checkEngine(const RBAC::RoleBasedAccessControlEngineImpl& engine, bool expected, - const Envoy::Network::Connection& connection = Envoy::Network::MockConnection(), - const Envoy::Http::RequestHeaderMap& headers = Envoy::Http::RequestHeaderMapImpl(), - const StreamInfo::StreamInfo& info = NiceMock()) { - EXPECT_EQ(expected, engine.allowed(connection, headers, info, nullptr)); +enum class LogResult { Yes, No, Undecided }; + +void checkEngine( + RBAC::RoleBasedAccessControlEngineImpl& engine, bool expected, LogResult expected_log, + StreamInfo::StreamInfo& info, + const Envoy::Network::Connection& connection = Envoy::Network::MockConnection(), + const Envoy::Http::RequestHeaderMap& headers = Envoy::Http::TestRequestHeaderMapImpl()) { + + bool engineRes = engine.handleAction(connection, headers, info, nullptr); + EXPECT_EQ(expected, engineRes); + + if (expected_log != LogResult::Undecided) { + auto filter_meta = info.dynamicMetadata().filter_metadata().at( + RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace); + EXPECT_EQ(expected_log == LogResult::Yes, + filter_meta.fields() + .at(RBAC::DynamicMetadataKeysSingleton::get().AccessLogKey) + .bool_value()); + } else { + EXPECT_EQ(info.dynamicMetadata().filter_metadata().end(), + info.dynamicMetadata().filter_metadata().find( + Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace)); + } +} + +void checkEngine( + RBAC::RoleBasedAccessControlEngineImpl& engine, bool expected, LogResult expected_log, + const Envoy::Network::Connection& connection = Envoy::Network::MockConnection(), + const Envoy::Http::RequestHeaderMap& headers = Envoy::Http::TestRequestHeaderMapImpl()) { + + NiceMock empty_info; + checkEngine(engine, expected, expected_log, empty_info, connection, headers); +} + +void onMetadata(NiceMock& info) { + ON_CALL(info, setDynamicMetadata("envoy.common", _)) + .WillByDefault(Invoke([&info](const std::string&, const ProtobufWkt::Struct& obj) { + (*info.metadata_.mutable_filter_metadata())["envoy.common"] = obj; + })); } TEST(RoleBasedAccessControlEngineImpl, Disabled) { envoy::config::rbac::v3::RBAC rbac; rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW); - checkEngine(RBAC::RoleBasedAccessControlEngineImpl(rbac), false); + RBAC::RoleBasedAccessControlEngineImpl engine_allow(rbac); + checkEngine(engine_allow, false, LogResult::Undecided); rbac.set_action(envoy::config::rbac::v3::RBAC::DENY); - checkEngine(RBAC::RoleBasedAccessControlEngineImpl(rbac), true); + RBAC::RoleBasedAccessControlEngineImpl engine_deny(rbac); + checkEngine(engine_deny, true, LogResult::Undecided); } // Test various invalid policies to validate the fix for @@ -126,7 +162,7 @@ TEST(RoleBasedAccessControlEngineImpl, InvalidConfig) { } } -TEST(RoleBasedAccessControlEngineImpl, AllowedWhitelist) { +TEST(RoleBasedAccessControlEngineImpl, AllowedAllowlist) { envoy::config::rbac::v3::Policy policy; policy.add_permissions()->set_destination_port(123); policy.add_principals()->set_any(true); @@ -137,19 +173,19 @@ TEST(RoleBasedAccessControlEngineImpl, AllowedWhitelist) { RBAC::RoleBasedAccessControlEngineImpl engine(rbac); Envoy::Network::MockConnection conn; - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); EXPECT_CALL(Const(info), downstreamLocalAddress()).WillOnce(ReturnRef(addr)); - checkEngine(engine, true, conn, headers, info); + checkEngine(engine, true, LogResult::Undecided, info, conn, headers); addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 456, false); EXPECT_CALL(Const(info), downstreamLocalAddress()).WillOnce(ReturnRef(addr)); - checkEngine(engine, false, conn, headers, info); + checkEngine(engine, false, LogResult::Undecided, info, conn, headers); } -TEST(RoleBasedAccessControlEngineImpl, DeniedBlacklist) { +TEST(RoleBasedAccessControlEngineImpl, DeniedDenylist) { envoy::config::rbac::v3::Policy policy; policy.add_permissions()->set_destination_port(123); policy.add_principals()->set_any(true); @@ -160,16 +196,16 @@ TEST(RoleBasedAccessControlEngineImpl, DeniedBlacklist) { RBAC::RoleBasedAccessControlEngineImpl engine(rbac); Envoy::Network::MockConnection conn; - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); EXPECT_CALL(Const(info), downstreamLocalAddress()).WillOnce(ReturnRef(addr)); - checkEngine(engine, false, conn, headers, info); + checkEngine(engine, false, LogResult::Undecided, info, conn, headers); addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 456, false); EXPECT_CALL(Const(info), downstreamLocalAddress()).WillOnce(ReturnRef(addr)); - checkEngine(engine, true, conn, headers, info); + checkEngine(engine, true, LogResult::Undecided, info, conn, headers); } TEST(RoleBasedAccessControlEngineImpl, BasicCondition) { @@ -186,7 +222,7 @@ TEST(RoleBasedAccessControlEngineImpl, BasicCondition) { rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW); (*rbac.mutable_policies())["foo"] = policy; RBAC::RoleBasedAccessControlEngineImpl engine(rbac); - checkEngine(engine, false); + checkEngine(engine, false, LogResult::Undecided); } TEST(RoleBasedAccessControlEngineImpl, MalformedCondition) { @@ -208,6 +244,10 @@ TEST(RoleBasedAccessControlEngineImpl, MalformedCondition) { EXPECT_THROW_WITH_REGEX(RBAC::RoleBasedAccessControlEngineImpl engine(rbac), EnvoyException, "failed to create an expression: .*"); + + rbac.set_action(envoy::config::rbac::v3::RBAC::LOG); + EXPECT_THROW_WITH_REGEX(RBAC::RoleBasedAccessControlEngineImpl engine_log(rbac), EnvoyException, + "failed to create an expression: .*"); } TEST(RoleBasedAccessControlEngineImpl, MistypedCondition) { @@ -224,7 +264,7 @@ TEST(RoleBasedAccessControlEngineImpl, MistypedCondition) { rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW); (*rbac.mutable_policies())["foo"] = policy; RBAC::RoleBasedAccessControlEngineImpl engine(rbac); - checkEngine(engine, false); + checkEngine(engine, false, LogResult::Undecided); } TEST(RoleBasedAccessControlEngineImpl, ErrorCondition) { @@ -249,7 +289,7 @@ TEST(RoleBasedAccessControlEngineImpl, ErrorCondition) { rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW); (*rbac.mutable_policies())["foo"] = policy; RBAC::RoleBasedAccessControlEngineImpl engine(rbac); - checkEngine(engine, false, Envoy::Network::MockConnection()); + checkEngine(engine, false, LogResult::Undecided, Envoy::Network::MockConnection()); } TEST(RoleBasedAccessControlEngineImpl, HeaderCondition) { @@ -280,12 +320,12 @@ TEST(RoleBasedAccessControlEngineImpl, HeaderCondition) { (*rbac.mutable_policies())["foo"] = policy; RBAC::RoleBasedAccessControlEngineImpl engine(rbac); - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; Envoy::Http::LowerCaseString key("foo"); std::string value = "bar"; headers.setReference(key, value); - checkEngine(engine, true, Envoy::Network::MockConnection(), headers); + checkEngine(engine, true, LogResult::Undecided, Envoy::Network::MockConnection(), headers); } TEST(RoleBasedAccessControlEngineImpl, MetadataCondition) { @@ -321,7 +361,7 @@ TEST(RoleBasedAccessControlEngineImpl, MetadataCondition) { (*rbac.mutable_policies())["foo"] = policy; RBAC::RoleBasedAccessControlEngineImpl engine(rbac); - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; NiceMock info; auto label = MessageUtil::keyValueStruct("label", "prod"); @@ -330,7 +370,7 @@ TEST(RoleBasedAccessControlEngineImpl, MetadataCondition) { Protobuf::MapPair("other", label)); EXPECT_CALL(Const(info), dynamicMetadata()).WillRepeatedly(ReturnRef(metadata)); - checkEngine(engine, true, Envoy::Network::MockConnection(), headers, info); + checkEngine(engine, true, LogResult::Undecided, info, Envoy::Network::MockConnection(), headers); } TEST(RoleBasedAccessControlEngineImpl, ConjunctiveCondition) { @@ -349,12 +389,48 @@ TEST(RoleBasedAccessControlEngineImpl, ConjunctiveCondition) { RBAC::RoleBasedAccessControlEngineImpl engine(rbac); Envoy::Network::MockConnection conn; - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; + NiceMock info; + Envoy::Network::Address::InstanceConstSharedPtr addr = + Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); + EXPECT_CALL(Const(info), downstreamLocalAddress()).Times(1).WillRepeatedly(ReturnRef(addr)); + checkEngine(engine, false, LogResult::Undecided, info, conn, headers); +} + +// Log tests +TEST(RoleBasedAccessControlEngineImpl, DisabledLog) { + NiceMock info; + onMetadata(info); + + envoy::config::rbac::v3::RBAC rbac; + rbac.set_action(envoy::config::rbac::v3::RBAC::LOG); + RBAC::RoleBasedAccessControlEngineImpl engine(rbac); + checkEngine(engine, true, RBAC::LogResult::No, info); +} + +TEST(RoleBasedAccessControlEngineImpl, LogIfMatched) { + envoy::config::rbac::v3::Policy policy; + policy.add_permissions()->set_destination_port(123); + policy.add_principals()->set_any(true); + + envoy::config::rbac::v3::RBAC rbac; + rbac.set_action(envoy::config::rbac::v3::RBAC::LOG); + (*rbac.mutable_policies())["foo"] = policy; + RBAC::RoleBasedAccessControlEngineImpl engine(rbac); + + Envoy::Network::MockConnection conn; + Envoy::Http::TestRequestHeaderMapImpl headers; NiceMock info; + onMetadata(info); + Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); EXPECT_CALL(Const(info), downstreamLocalAddress()).WillOnce(ReturnRef(addr)); - checkEngine(engine, false, conn, headers, info); + checkEngine(engine, true, RBAC::LogResult::Yes, info, conn, headers); + + addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 456, false); + EXPECT_CALL(Const(info), downstreamLocalAddress()).WillOnce(ReturnRef(addr)); + checkEngine(engine, true, RBAC::LogResult::No, info, conn, headers); } } // namespace diff --git a/test/extensions/filters/common/rbac/matchers_test.cc b/test/extensions/filters/common/rbac/matchers_test.cc index d710859f9d733..52947461ae691 100644 --- a/test/extensions/filters/common/rbac/matchers_test.cc +++ b/test/extensions/filters/common/rbac/matchers_test.cc @@ -29,7 +29,7 @@ namespace { void checkMatcher( const RBAC::Matcher& matcher, bool expected, const Envoy::Network::Connection& connection = Envoy::Network::MockConnection(), - const Envoy::Http::RequestHeaderMap& headers = Envoy::Http::RequestHeaderMapImpl(), + const Envoy::Http::RequestHeaderMap& headers = Envoy::Http::TestRequestHeaderMapImpl(), const StreamInfo::StreamInfo& info = NiceMock()) { EXPECT_EQ(expected, matcher.matches(connection, headers, info)); } @@ -47,7 +47,7 @@ TEST(AndMatcher, Permission_Set) { perm->set_destination_port(123); Envoy::Network::MockConnection conn; - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); @@ -74,7 +74,7 @@ TEST(AndMatcher, Principal_Set) { cidr->mutable_prefix_len()->set_value(24); Envoy::Network::MockConnection conn; - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); @@ -94,7 +94,7 @@ TEST(OrMatcher, Permission_Set) { perm->set_destination_port(123); Envoy::Network::MockConnection conn; - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 456, false); @@ -116,7 +116,7 @@ TEST(OrMatcher, Principal_Set) { cidr->mutable_prefix_len()->set_value(24); Envoy::Network::MockConnection conn; - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.4.6", 456, false); @@ -151,7 +151,7 @@ TEST(HeaderMatcher, HeaderMatcher) { config.set_name("foo"); config.set_exact_match("bar"); - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; Envoy::Http::LowerCaseString key("foo"); std::string value = "bar"; headers.setReference(key, value); @@ -169,7 +169,7 @@ TEST(HeaderMatcher, HeaderMatcher) { TEST(IPMatcher, IPMatcher) { Envoy::Network::MockConnection conn; - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr connectionRemote = Envoy::Network::Utility::parseInternetAddress("12.13.14.15", 789, false); @@ -232,7 +232,7 @@ TEST(IPMatcher, IPMatcher) { TEST(PortMatcher, PortMatcher) { Envoy::Network::MockConnection conn; - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); @@ -336,7 +336,7 @@ TEST(AuthenticatedMatcher, NoSSL) { TEST(MetadataMatcher, MetadataMatcher) { Envoy::Network::MockConnection conn; - Envoy::Http::RequestHeaderMapImpl header; + Envoy::Http::TestRequestHeaderMapImpl header; NiceMock info; auto label = MessageUtil::keyValueStruct("label", "prod"); @@ -368,7 +368,7 @@ TEST(PolicyMatcher, PolicyMatcher) { RBAC::PolicyMatcher matcher(policy, builder.get()); Envoy::Network::MockConnection conn; - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; NiceMock info; auto ssl = std::make_shared(); Envoy::Network::Address::InstanceConstSharedPtr addr = @@ -431,7 +431,7 @@ TEST(RequestedServerNameMatcher, EmptyRequestedServerName) { } TEST(PathMatcher, NoPathInHeader) { - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; envoy::type::matcher::v3::PathMatcher matcher; matcher.mutable_path()->mutable_safe_regex()->mutable_google_re2(); matcher.mutable_path()->mutable_safe_regex()->set_regex(".*"); @@ -443,7 +443,7 @@ TEST(PathMatcher, NoPathInHeader) { } TEST(PathMatcher, ValidPathInHeader) { - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; envoy::type::matcher::v3::PathMatcher matcher; matcher.mutable_path()->set_exact("/exact"); diff --git a/test/extensions/filters/common/rbac/mocks.h b/test/extensions/filters/common/rbac/mocks.h index fda95244c8933..a99e97aa9ea69 100644 --- a/test/extensions/filters/common/rbac/mocks.h +++ b/test/extensions/filters/common/rbac/mocks.h @@ -14,16 +14,17 @@ namespace RBAC { class MockEngine : public RoleBasedAccessControlEngineImpl { public: - MockEngine(const envoy::config::rbac::v3::RBAC& rules) - : RoleBasedAccessControlEngineImpl(rules){}; + MockEngine(const envoy::config::rbac::v3::RBAC& rules, + const EnforcementMode mode = EnforcementMode::Enforced) + : RoleBasedAccessControlEngineImpl(rules, mode){}; - MOCK_METHOD(bool, allowed, + MOCK_METHOD(bool, handleAction, (const Envoy::Network::Connection&, const Envoy::Http::RequestHeaderMap&, - const StreamInfo::StreamInfo&, std::string* effective_policy_id), + StreamInfo::StreamInfo&, std::string* effective_policy_id), (const)); - MOCK_METHOD(bool, allowed, - (const Envoy::Network::Connection&, const StreamInfo::StreamInfo&, + MOCK_METHOD(bool, handleAction, + (const Envoy::Network::Connection&, StreamInfo::StreamInfo&, std::string* effective_policy_id), (const)); }; diff --git a/test/extensions/filters/http/adaptive_concurrency/BUILD b/test/extensions/filters/http/adaptive_concurrency/BUILD index fbe81cc26327f..c91f90dcfcf51 100644 --- a/test/extensions/filters/http/adaptive_concurrency/BUILD +++ b/test/extensions/filters/http/adaptive_concurrency/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -34,6 +34,7 @@ envoy_extension_cc_test( "adaptive_concurrency_filter_integration_test.h", ], extension_name = "envoy.filters.http.adaptive_concurrency", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/http/adaptive_concurrency:config", "//source/extensions/filters/http/fault:config", diff --git a/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_integration_test.h b/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_integration_test.h index c367b33263199..a4f6d35b3dd2b 100644 --- a/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_integration_test.h +++ b/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_integration_test.h @@ -67,11 +67,11 @@ class AdaptiveConcurrencyIntegrationTest void respondToRequest(bool expect_forwarded); void verifyResponseForwarded(IntegrationStreamDecoderPtr response) { - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } void verifyResponseBlocked(IntegrationStreamDecoderPtr response) { - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); } std::deque responses_; diff --git a/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_test.cc b/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_test.cc index bcfed37a24097..5742385d5cc57 100644 --- a/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_test.cc +++ b/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_test.cc @@ -29,7 +29,7 @@ class MockConcurrencyController : public Controller::ConcurrencyController { public: MOCK_METHOD(RequestForwardingAction, forwardingDecision, ()); MOCK_METHOD(void, cancelLatencySample, ()); - MOCK_METHOD(void, recordLatencySample, (std::chrono::nanoseconds)); + MOCK_METHOD(void, recordLatencySample, (MonotonicTime)); uint32_t concurrencyLimit() const override { return 0; } }; @@ -223,12 +223,12 @@ TEST_F(AdaptiveConcurrencyFilterTest, OnDestroyCleanupTest) { .WillOnce(Return(RequestForwardingAction::Forward)); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); - const auto advance_time = std::chrono::nanoseconds(42); - time_system_.advanceTimeWait(advance_time); + const auto rq_rcv_time = time_system_.monotonicTime(); + time_system_.advanceTimeWait(std::chrono::nanoseconds(42)); Http::TestResponseHeaderMapImpl response_headers; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, true)); - EXPECT_CALL(*controller_, recordLatencySample(advance_time)); + EXPECT_CALL(*controller_, recordLatencySample(rq_rcv_time)); filter_->encodeComplete(); filter_->onDestroy(); @@ -248,16 +248,16 @@ TEST_F(AdaptiveConcurrencyFilterTest, EncodeHeadersValidTestWithBody) { Http::TestRequestTrailerMapImpl request_trailers; EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers)); - const auto advance_time = std::chrono::nanoseconds(42); + const auto rq_rcv_time = time_system_.monotonicTime(); mt = time_system_.monotonicTime(); - time_system_.setMonotonicTime(mt + advance_time); + time_system_.setMonotonicTime(mt + std::chrono::nanoseconds(42)); Http::TestResponseHeaderMapImpl response_headers; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(data, false)); Http::TestResponseTrailerMapImpl response_trailers; EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers)); - EXPECT_CALL(*controller_, recordLatencySample(advance_time)); + EXPECT_CALL(*controller_, recordLatencySample(rq_rcv_time)); filter_->encodeComplete(); } @@ -271,13 +271,13 @@ TEST_F(AdaptiveConcurrencyFilterTest, EncodeHeadersValidTest) { .WillOnce(Return(RequestForwardingAction::Forward)); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); - const auto advance_time = std::chrono::nanoseconds(42); + const auto rq_rcv_time = time_system_.monotonicTime(); mt = time_system_.monotonicTime(); - time_system_.setMonotonicTime(mt + advance_time); + time_system_.setMonotonicTime(mt + std::chrono::nanoseconds(42)); Http::TestResponseHeaderMapImpl response_headers; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, true)); - EXPECT_CALL(*controller_, recordLatencySample(advance_time)); + EXPECT_CALL(*controller_, recordLatencySample(rq_rcv_time)); filter_->encodeComplete(); } diff --git a/test/extensions/filters/http/adaptive_concurrency/controller/BUILD b/test/extensions/filters/http/adaptive_concurrency/controller/BUILD index 94592fb47acf8..63bf457be4f30 100644 --- a/test/extensions/filters/http/adaptive_concurrency/controller/BUILD +++ b/test/extensions/filters/http/adaptive_concurrency/controller/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/adaptive_concurrency/controller/gradient_controller_test.cc b/test/extensions/filters/http/adaptive_concurrency/controller/gradient_controller_test.cc index 812e305297773..3134e30c906c1 100644 --- a/test/extensions/filters/http/adaptive_concurrency/controller/gradient_controller_test.cc +++ b/test/extensions/filters/http/adaptive_concurrency/controller/gradient_controller_test.cc @@ -11,6 +11,7 @@ #include "extensions/filters/http/adaptive_concurrency/controller/gradient_controller.h" #include "test/common/stats/stat_test_utility.h" +#include "test/mocks/common.h" #include "test/mocks/event/mocks.h" #include "test/mocks/runtime/mocks.h" #include "test/test_common/simulated_time_system.h" @@ -51,11 +52,22 @@ class GradientControllerTest : public testing::Test { dispatcher_(api_->allocateDispatcher("test_thread")) {} GradientControllerSharedPtr makeController(const std::string& yaml_config) { - return std::make_shared(makeConfig(yaml_config, runtime_), *dispatcher_, - runtime_, "test_prefix.", stats_, random_); + const auto config = std::make_shared(makeConfig(yaml_config, runtime_), + *dispatcher_, runtime_, "test_prefix.", + stats_, random_, time_system_); + + // Advance time so that the latency sample calculations don't underflow if monotonic time is 0. + time_system_.advanceTimeAsync(std::chrono::hours(42)); + + return config; } protected: + void sampleLatency(const GradientControllerSharedPtr& controller, + std::chrono::microseconds latency) { + controller->recordLatencySample(time_system_.monotonicTime() - latency); + } + // Helper function that will attempt to pull forwarding decisions. void tryForward(const GradientControllerSharedPtr& controller, const bool expect_forward_response) { @@ -71,16 +83,36 @@ class GradientControllerTest : public testing::Test { const auto config = makeConfig(yaml_config, runtime_); for (uint32_t i = 0; i <= config.minRTTAggregateRequestCount(); ++i) { tryForward(controller, true); - controller->recordLatencySample(latency); + sampleLatency(controller, latency); } } + void verifyMinRTTValue(std::chrono::milliseconds min_rtt) { + EXPECT_EQ( + min_rtt.count(), + stats_.gauge("test_prefix.min_rtt_msecs", Stats::Gauge::ImportMode::NeverImport).value()); + } + + void verifyMinRTTActive() { + EXPECT_EQ( + 1, + stats_.gauge("test_prefix.min_rtt_calculation_active", Stats::Gauge::ImportMode::Accumulate) + .value()); + } + + void verifyMinRTTInactive() { + EXPECT_EQ( + 0, + stats_.gauge("test_prefix.min_rtt_calculation_active", Stats::Gauge::ImportMode::Accumulate) + .value()); + } + Event::SimulatedTimeSystem time_system_; Stats::TestUtil::TestStore stats_; NiceMock runtime_; Api::ApiPtr api_; Event::DispatcherPtr dispatcher_; - NiceMock random_; + NiceMock random_; }; TEST_F(GradientControllerConfigTest, BasicTest) { @@ -206,6 +238,71 @@ TEST_F(GradientControllerConfigTest, DefaultValuesTest) { EXPECT_EQ(config.minRTTBufferPercent(), 0.25); } +// Verify that requests started in the previous minRTT window are not sampled in the next. +TEST_F(GradientControllerTest, MinRTTEpoch) { + const std::string yaml = R"EOF( +sample_aggregate_percentile: + value: 50 +concurrency_limit_params: + concurrency_update_interval: 0.1s +min_rtt_calc_params: + jitter: + value: 0.0 + interval: 30s + request_count: 25 + min_concurrency: 2 + buffer: + value: 0.0 +)EOF"; + + const int min_concurrency = 2; + auto controller = makeController(yaml); + const auto min_rtt = std::chrono::milliseconds(1350); + time_system_.advanceTimeAsync(min_rtt); + + verifyMinRTTActive(); + EXPECT_EQ(controller->concurrencyLimit(), min_concurrency); + advancePastMinRTTStage(controller, yaml, std::chrono::milliseconds(1350)); + verifyMinRTTInactive(); + verifyMinRTTValue(std::chrono::milliseconds(1350)); + + // Advance time to just before the end of the epoch and inflate the concurrency limit. + uint32_t last_limit = controller->concurrencyLimit(); + for (int i = 0; i < 29; ++i) { + tryForward(controller, true); + time_system_.advanceTimeAsync(std::chrono::seconds(1)); + sampleLatency(controller, min_rtt); + dispatcher_->run(Event::Dispatcher::RunType::Block); + EXPECT_GT(controller->concurrencyLimit(), last_limit); + last_limit = controller->concurrencyLimit(); + } + + int active_rq_counter = 0; + // Send out requests that we won't attempt to sample until the next minRTT window so the requests + // will be disregarded as they were started in the previous minRTT window. + for (uint32_t i = 0; i < controller->concurrencyLimit(); ++i) { + tryForward(controller, true); + ++active_rq_counter; + } + + // Move into the next minRTT window while the requests are outstanding. + time_system_.advanceTimeAsync(std::chrono::seconds(5)); + dispatcher_->run(Event::Dispatcher::RunType::Block); + verifyMinRTTActive(); + EXPECT_EQ(controller->concurrencyLimit(), min_concurrency); + + // Sample more than enough requests to break out of the minRTT measurement window (>25). These are + // expected to be disregarded since they would have started in the previous minRTT epoch. + // Therefore, we expect the minRTT window to still be active. + EXPECT_GT(active_rq_counter, 25); + for (int i = 0; i < active_rq_counter; ++i) { + // Sample requests that were send "5 minutes ago," which would surely be from an older minRTT + // epoch. + sampleLatency(controller, std::chrono::minutes(5)); + } + verifyMinRTTActive(); +} + TEST_F(GradientControllerTest, MinRTTLogicTest) { const std::string yaml = R"EOF( sample_aggregate_percentile: @@ -226,34 +323,28 @@ TEST_F(GradientControllerTest, MinRTTLogicTest) { // The controller should be measuring minRTT upon creation, so the concurrency window is 7 (the // min concurrency). - EXPECT_EQ( - 1, - stats_.gauge("test_prefix.min_rtt_calculation_active", Stats::Gauge::ImportMode::Accumulate) - .value()); + verifyMinRTTActive(); EXPECT_EQ(controller->concurrencyLimit(), 7); for (int i = 0; i < 7; ++i) { tryForward(controller, true); } tryForward(controller, false); tryForward(controller, false); + time_system_.advanceTimeAsync(min_rtt); for (int i = 0; i < 7; ++i) { - controller->recordLatencySample(min_rtt); + sampleLatency(controller, min_rtt); } // 43 more requests should cause the minRTT to be done calculating. for (int i = 0; i < 43; ++i) { EXPECT_EQ(controller->concurrencyLimit(), 7); tryForward(controller, true); - controller->recordLatencySample(min_rtt); + sampleLatency(controller, min_rtt); } // Verify the minRTT value measured is accurate. - EXPECT_EQ( - 0, - stats_.gauge("test_prefix.min_rtt_calculation_active", Stats::Gauge::ImportMode::Accumulate) - .value()); - EXPECT_EQ( - 13, stats_.gauge("test_prefix.min_rtt_msecs", Stats::Gauge::ImportMode::NeverImport).value()); + verifyMinRTTInactive(); + verifyMinRTTValue(std::chrono::milliseconds(13)); } TEST_F(GradientControllerTest, CancelLatencySample) { @@ -274,10 +365,9 @@ TEST_F(GradientControllerTest, CancelLatencySample) { for (int i = 1; i <= 5; ++i) { tryForward(controller, true); - controller->recordLatencySample(std::chrono::milliseconds(i)); + sampleLatency(controller, std::chrono::milliseconds(i)); } - EXPECT_EQ( - 3, stats_.gauge("test_prefix.min_rtt_msecs", Stats::Gauge::ImportMode::NeverImport).value()); + verifyMinRTTValue(std::chrono::milliseconds(3)); } TEST_F(GradientControllerTest, SamplePercentileProcessTest) { @@ -326,8 +416,7 @@ TEST_F(GradientControllerTest, MinRTTBufferTest) { // Force a minRTT of 5ms. advancePastMinRTTStage(controller, yaml, std::chrono::milliseconds(5)); - EXPECT_EQ( - 5, stats_.gauge("test_prefix.min_rtt_msecs", Stats::Gauge::ImportMode::NeverImport).value()); + verifyMinRTTValue(std::chrono::milliseconds(5)); // Ensure that the minRTT doesn't decrease due to the buffer added. for (int recalcs = 0; recalcs < 10; ++recalcs) { @@ -336,7 +425,7 @@ TEST_F(GradientControllerTest, MinRTTBufferTest) { tryForward(controller, true); // Recording sample that's technically higher than the minRTT, but the 50% buffer should // prevent the concurrency limit from decreasing. - controller->recordLatencySample(std::chrono::milliseconds(6)); + sampleLatency(controller, std::chrono::milliseconds(6)); } time_system_.advanceTimeAsync(std::chrono::milliseconds(101)); dispatcher_->run(Event::Dispatcher::RunType::Block); @@ -366,8 +455,7 @@ TEST_F(GradientControllerTest, ConcurrencyLimitBehaviorTestBasic) { // Force a minRTT of 5ms. advancePastMinRTTStage(controller, yaml, std::chrono::milliseconds(5)); - EXPECT_EQ( - 5, stats_.gauge("test_prefix.min_rtt_msecs", Stats::Gauge::ImportMode::NeverImport).value()); + verifyMinRTTValue(std::chrono::milliseconds(5)); // Ensure that the concurrency window increases on its own due to the headroom calculation with // the max gradient. @@ -382,7 +470,7 @@ TEST_F(GradientControllerTest, ConcurrencyLimitBehaviorTestBasic) { const auto last_concurrency = controller->concurrencyLimit(); for (int i = 1; i <= 5; ++i) { tryForward(controller, true); - controller->recordLatencySample(std::chrono::milliseconds(4)); + sampleLatency(controller, std::chrono::milliseconds(4)); } time_system_.advanceTimeAsync(std::chrono::milliseconds(101)); dispatcher_->run(Event::Dispatcher::RunType::Block); @@ -396,7 +484,7 @@ TEST_F(GradientControllerTest, ConcurrencyLimitBehaviorTestBasic) { const auto last_concurrency = controller->concurrencyLimit(); for (int i = 1; i <= 5; ++i) { tryForward(controller, true); - controller->recordLatencySample(std::chrono::milliseconds(6)); + sampleLatency(controller, std::chrono::milliseconds(6)); } time_system_.advanceTimeAsync(std::chrono::milliseconds(101)); dispatcher_->run(Event::Dispatcher::RunType::Block); @@ -422,15 +510,17 @@ TEST_F(GradientControllerTest, MinRTTReturnToPreviousLimit) { auto controller = makeController(yaml); EXPECT_EQ(controller->concurrencyLimit(), 3); - // Get initial minRTT measurement out of the way. + // Get initial minRTT measurement out of the way and advance time so request samples are not + // thought to come from the previous minRTT epoch. advancePastMinRTTStage(controller, yaml, std::chrono::milliseconds(5)); + time_system_.advanceTimeAsync(std::chrono::seconds(1)); // Force the limit calculation to run a few times from some measurements. for (int sample_iters = 0; sample_iters < 5; ++sample_iters) { const auto last_concurrency = controller->concurrencyLimit(); for (int i = 1; i <= 5; ++i) { tryForward(controller, true); - controller->recordLatencySample(std::chrono::milliseconds(4)); + sampleLatency(controller, std::chrono::milliseconds(4)); } time_system_.advanceTimeAsync(std::chrono::milliseconds(101)); dispatcher_->run(Event::Dispatcher::RunType::Block); @@ -445,11 +535,14 @@ TEST_F(GradientControllerTest, MinRTTReturnToPreviousLimit) { dispatcher_->run(Event::Dispatcher::RunType::Block); EXPECT_EQ(controller->concurrencyLimit(), 3); + // Advance time again for request samples to appear from the current epoch. + time_system_.advanceTimeAsync(std::chrono::seconds(1)); + // 49 more requests should cause the minRTT to be done calculating. for (int i = 0; i < 5; ++i) { EXPECT_EQ(controller->concurrencyLimit(), 3); tryForward(controller, true); - controller->recordLatencySample(std::chrono::milliseconds(13)); + sampleLatency(controller, std::chrono::milliseconds(13)); } // Check that we restored the old concurrency limit value. @@ -473,15 +566,17 @@ TEST_F(GradientControllerTest, MinRTTRescheduleTest) { auto controller = makeController(yaml); EXPECT_EQ(controller->concurrencyLimit(), 3); - // Get initial minRTT measurement out of the way. + // Get initial minRTT measurement out of the way and advance time so request samples are not + // thought to come from the previous minRTT epoch. advancePastMinRTTStage(controller, yaml, std::chrono::milliseconds(5)); + time_system_.advanceTimeAsync(std::chrono::seconds(1)); // Force the limit calculation to run a few times from some measurements. for (int sample_iters = 0; sample_iters < 5; ++sample_iters) { const auto last_concurrency = controller->concurrencyLimit(); for (int i = 1; i <= 5; ++i) { tryForward(controller, true); - controller->recordLatencySample(std::chrono::milliseconds(4)); + sampleLatency(controller, std::chrono::milliseconds(4)); } time_system_.advanceTimeAsync(std::chrono::milliseconds(101)); dispatcher_->run(Event::Dispatcher::RunType::Block); @@ -525,7 +620,7 @@ TEST_F(GradientControllerTest, NoSamplesTest) { const auto last_concurrency = controller->concurrencyLimit(); for (int i = 1; i <= 5; ++i) { tryForward(controller, true); - controller->recordLatencySample(std::chrono::milliseconds(4)); + sampleLatency(controller, std::chrono::milliseconds(4)); } time_system_.advanceTimeAsync(std::chrono::milliseconds(101)); dispatcher_->run(Event::Dispatcher::RunType::Block); @@ -567,8 +662,9 @@ TEST_F(GradientControllerTest, TimerAccuracyTest) { .WillOnce(Return(rtt_timer)) .WillOnce(Return(sample_timer)); EXPECT_CALL(*sample_timer, enableTimer(std::chrono::milliseconds(123), _)); - auto controller = std::make_shared( - makeConfig(yaml, runtime_), fake_dispatcher, runtime_, "test_prefix.", stats_, random_); + auto controller = + std::make_shared(makeConfig(yaml, runtime_), fake_dispatcher, runtime_, + "test_prefix.", stats_, random_, time_system_); // Set the minRTT- this will trigger the timer for the next minRTT calculation. @@ -580,7 +676,8 @@ TEST_F(GradientControllerTest, TimerAccuracyTest) { EXPECT_CALL(*sample_timer, enableTimer(std::chrono::milliseconds(123), _)); for (int i = 0; i < 6; ++i) { tryForward(controller, true); - controller->recordLatencySample(std::chrono::milliseconds(5)); + time_system_.advanceTimeAsync(std::chrono::milliseconds(5)); + sampleLatency(controller, std::chrono::milliseconds(5)); } } @@ -609,8 +706,9 @@ TEST_F(GradientControllerTest, TimerAccuracyTestNoJitter) { .WillOnce(Return(rtt_timer)) .WillOnce(Return(sample_timer)); EXPECT_CALL(*sample_timer, enableTimer(std::chrono::milliseconds(123), _)); - auto controller = std::make_shared( - makeConfig(yaml, runtime_), fake_dispatcher, runtime_, "test_prefix.", stats_, random_); + auto controller = + std::make_shared(makeConfig(yaml, runtime_), fake_dispatcher, runtime_, + "test_prefix.", stats_, random_, time_system_); // Set the minRTT- this will trigger the timer for the next minRTT calculation. EXPECT_CALL(*rtt_timer, enableTimer(std::chrono::milliseconds(45000), _)); @@ -618,7 +716,8 @@ TEST_F(GradientControllerTest, TimerAccuracyTestNoJitter) { EXPECT_CALL(*sample_timer, enableTimer(std::chrono::milliseconds(123), _)); for (int i = 0; i < 6; ++i) { tryForward(controller, true); - controller->recordLatencySample(std::chrono::milliseconds(5)); + time_system_.advanceTimeAsync(std::chrono::milliseconds(5)); + sampleLatency(controller, std::chrono::milliseconds(5)); } } @@ -646,8 +745,7 @@ TEST_F(GradientControllerTest, ConsecutiveMinConcurrencyReset) { // Force a minRTT of 5ms. advancePastMinRTTStage(controller, yaml, std::chrono::milliseconds(5)); - EXPECT_EQ( - 5, stats_.gauge("test_prefix.min_rtt_msecs", Stats::Gauge::ImportMode::NeverImport).value()); + verifyMinRTTValue(std::chrono::milliseconds(5)); // Ensure that the concurrency window increases on its own due to the headroom calculation with // the max gradient. @@ -662,7 +760,7 @@ TEST_F(GradientControllerTest, ConsecutiveMinConcurrencyReset) { for (int recalcs = 0; recalcs < 5; ++recalcs) { for (int i = 1; i <= 5; ++i) { tryForward(controller, true); - controller->recordLatencySample(elevated_latency); + sampleLatency(controller, elevated_latency); } time_system_.advanceTimeAsync(std::chrono::milliseconds(101)); dispatcher_->run(Event::Dispatcher::RunType::Block); @@ -673,7 +771,7 @@ TEST_F(GradientControllerTest, ConsecutiveMinConcurrencyReset) { const auto last_concurrency = controller->concurrencyLimit(); for (int i = 1; i <= 5; ++i) { tryForward(controller, true); - controller->recordLatencySample(elevated_latency); + sampleLatency(controller, elevated_latency); } time_system_.advanceTimeAsync(std::chrono::milliseconds(101)); dispatcher_->run(Event::Dispatcher::RunType::Block); diff --git a/test/extensions/filters/http/admission_control/BUILD b/test/extensions/filters/http/admission_control/BUILD new file mode 100644 index 0000000000000..fea174f10b1c3 --- /dev/null +++ b/test/extensions/filters/http/admission_control/BUILD @@ -0,0 +1,82 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_extension_cc_test( + name = "admission_control_filter_test", + srcs = ["admission_control_filter_test.cc"], + extension_name = "envoy.filters.http.admission_control", + deps = [ + "//source/common/common:enum_to_int", + "//source/common/http:header_map_lib", + "//source/common/http:headers_lib", + "//source/extensions/filters/http/admission_control:admission_control_filter_lib", + "//test/mocks/http:http_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/thread_local:thread_local_mocks", + "//test/test_common:simulated_time_system_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", + ], +) + +envoy_extension_cc_test( + name = "config_test", + srcs = ["config_test.cc"], + extension_name = "envoy.filters.http.admission_control", + deps = [ + "//source/common/http:header_map_lib", + "//source/common/http:headers_lib", + "//source/extensions/filters/http/admission_control:admission_control_filter_lib", + "//test/mocks/http:http_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/thread_local:thread_local_mocks", + "//test/test_common:simulated_time_system_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", + ], +) + +envoy_extension_cc_test( + name = "success_criteria_evaluator_test", + srcs = ["success_criteria_evaluator_test.cc"], + extension_name = "envoy.filters.http.admission_control", + deps = [ + "//source/extensions/filters/http/admission_control:admission_control_filter_lib", + "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", + ], +) + +envoy_extension_cc_test( + name = "admission_control_integration_test", + srcs = ["admission_control_integration_test.cc"], + extension_name = "envoy.filters.http.admission_control", + tags = ["fails_on_windows"], + deps = [ + "//source/extensions/filters/http/admission_control:config", + "//test/integration:http_integration_lib", + "//test/test_common:utility_lib", + ], +) + +envoy_extension_cc_test( + name = "admission_controller_test", + srcs = ["controller_test.cc"], + extension_name = "envoy.filters.http.admission_control", + deps = [ + "//source/common/http:headers_lib", + "//source/extensions/filters/http/admission_control:admission_control_filter_lib", + "//test/test_common:simulated_time_system_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", + ], +) diff --git a/test/extensions/filters/http/admission_control/admission_control_filter_test.cc b/test/extensions/filters/http/admission_control/admission_control_filter_test.cc new file mode 100644 index 0000000000000..d8ba63e723828 --- /dev/null +++ b/test/extensions/filters/http/admission_control/admission_control_filter_test.cc @@ -0,0 +1,289 @@ +#include + +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" +#include "envoy/grpc/status.h" + +#include "common/common/enum_to_int.h" +#include "common/stats/isolated_store_impl.h" + +#include "extensions/filters/http/admission_control/admission_control.h" +#include "extensions/filters/http/admission_control/evaluators/response_evaluator.h" +#include "extensions/filters/http/admission_control/thread_local_controller.h" + +#include "test/mocks/runtime/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/thread_local/mocks.h" +#include "test/test_common/simulated_time_system.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::NiceMock; +using testing::Return; + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace AdmissionControl { +namespace { + +using RequestData = ThreadLocalController::RequestData; + +class MockThreadLocalController : public ThreadLocal::ThreadLocalObject, + public ThreadLocalController { +public: + MOCK_METHOD(RequestData, requestCounts, ()); + MOCK_METHOD(void, recordSuccess, ()); + MOCK_METHOD(void, recordFailure, ()); +}; + +class MockResponseEvaluator : public ResponseEvaluator { +public: + MOCK_METHOD(bool, isHttpSuccess, (uint64_t code), (const)); + MOCK_METHOD(bool, isGrpcSuccess, (uint32_t status), (const)); +}; + +class TestConfig : public AdmissionControlFilterConfig { +public: + TestConfig(const AdmissionControlProto& proto_config, Runtime::Loader& runtime, + Random::RandomGenerator& random, Stats::Scope& scope, ThreadLocal::SlotPtr&& tls, + MockThreadLocalController& controller, std::shared_ptr evaluator) + : AdmissionControlFilterConfig(proto_config, runtime, random, scope, std::move(tls), + std::move(evaluator)), + controller_(controller) {} + ThreadLocalController& getController() const override { return controller_; } + +private: + MockThreadLocalController& controller_; +}; + +class AdmissionControlTest : public testing::Test { +public: + AdmissionControlTest() = default; + + std::shared_ptr makeConfig(const std::string& yaml) { + AdmissionControlProto proto; + TestUtility::loadFromYamlAndValidate(yaml, proto); + auto tls = context_.threadLocal().allocateSlot(); + evaluator_ = std::make_shared(); + + return std::make_shared(proto, runtime_, random_, scope_, std::move(tls), + controller_, evaluator_); + } + + void setupFilter(std::shared_ptr config) { + filter_ = std::make_shared(config, "test_prefix."); + filter_->setDecoderFilterCallbacks(decoder_callbacks_); + } + + void sampleGrpcRequest(const Grpc::Status::WellKnownGrpcStatus status) { + Http::TestResponseHeaderMapImpl headers{{"content-type", "application/grpc"}, + {"grpc-status", std::to_string(enumToInt(status))}}; + filter_->encodeHeaders(headers, true); + } + + void sampleGrpcRequestTrailer(const Grpc::Status::WellKnownGrpcStatus status) { + Http::TestResponseHeaderMapImpl headers{{"content-type", "application/grpc"}, + {":status", "200"}}; + filter_->encodeHeaders(headers, false); + Http::TestResponseTrailerMapImpl trailers{{"grpc-message", "foo"}, + {"grpc-status", std::to_string(enumToInt(status))}}; + filter_->encodeTrailers(trailers); + } + + void sampleHttpRequest(const std::string& http_error_code) { + Http::TestResponseHeaderMapImpl headers{{":status", http_error_code}}; + filter_->encodeHeaders(headers, true); + } + +protected: + std::string stats_prefix_; + NiceMock runtime_; + NiceMock context_; + Stats::IsolatedStoreImpl scope_; + Event::SimulatedTimeSystem time_system_; + NiceMock random_; + std::shared_ptr filter_; + NiceMock decoder_callbacks_; + NiceMock controller_; + std::shared_ptr evaluator_; + const std::string default_yaml_{R"EOF( +enabled: + default_value: true + runtime_key: "foo.enabled" +sampling_window: 10s +aggression_coefficient: + default_value: 1.0 + runtime_key: "foo.aggression" +success_criteria: + http_criteria: + grpc_criteria: +)EOF"}; +}; + +// Ensure the filter can be disabled/enabled via runtime. +TEST_F(AdmissionControlTest, FilterRuntimeOverride) { + const std::string yaml = R"EOF( +enabled: + default_value: true + runtime_key: "foo.enabled" +sampling_window: 10s +aggression_coefficient: + default_value: 1.0 + runtime_key: "foo.aggression" +success_criteria: + http_criteria: + grpc_criteria: +)EOF"; + + auto config = makeConfig(yaml); + setupFilter(config); + + // "Disable" the filter via runtime. + EXPECT_CALL(runtime_.snapshot_, getBoolean("foo.enabled", true)).WillRepeatedly(Return(false)); + + // The filter is bypassed via runtime. + EXPECT_CALL(controller_, requestCounts()).Times(0); + + // We expect no rejections. + Http::TestRequestHeaderMapImpl request_headers; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); +} + +// Ensure the filter disregards healthcheck traffic. +TEST_F(AdmissionControlTest, DisregardHealthChecks) { + auto config = makeConfig(default_yaml_); + setupFilter(config); + + StreamInfo::MockStreamInfo stream_info; + EXPECT_CALL(decoder_callbacks_, streamInfo()).WillOnce(testing::ReturnRef(stream_info)); + EXPECT_CALL(stream_info, healthCheck()).WillOnce(Return(true)); + + // We do not make admission decisions for health checks, so we expect no lookup of request success + // counts. + EXPECT_CALL(controller_, requestCounts()).Times(0); + + Http::TestRequestHeaderMapImpl request_headers; + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, true)); +} + +// Validate simple HTTP failure case. +TEST_F(AdmissionControlTest, HttpFailureBehavior) { + auto config = makeConfig(default_yaml_); + setupFilter(config); + + // We expect rejection counter to increment upon failure. + TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 0, time_system_); + + EXPECT_CALL(controller_, requestCounts()).WillRepeatedly(Return(RequestData(100, 0))); + EXPECT_CALL(*evaluator_, isHttpSuccess(500)).WillRepeatedly(Return(false)); + + Http::TestRequestHeaderMapImpl request_headers; + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers, true)); + sampleHttpRequest("500"); + + TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 1, time_system_); +} + +// Validate simple HTTP success case. +TEST_F(AdmissionControlTest, HttpSuccessBehavior) { + auto config = makeConfig(default_yaml_); + setupFilter(config); + + // We expect rejection counter to NOT increment upon success. + TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 0, time_system_); + + EXPECT_CALL(controller_, requestCounts()).WillRepeatedly(Return(RequestData(100, 100))); + EXPECT_CALL(*evaluator_, isHttpSuccess(200)).WillRepeatedly(Return(true)); + + Http::TestRequestHeaderMapImpl request_headers; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); + sampleHttpRequest("200"); + + TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 0, time_system_); +} + +// Validate simple gRPC failure case. +TEST_F(AdmissionControlTest, GrpcFailureBehavior) { + auto config = makeConfig(default_yaml_); + setupFilter(config); + + TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 0, time_system_); + + EXPECT_CALL(controller_, requestCounts()).WillRepeatedly(Return(RequestData(100, 0))); + EXPECT_CALL(*evaluator_, isGrpcSuccess(7)).WillRepeatedly(Return(false)); + + Http::TestRequestHeaderMapImpl request_headers; + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers, true)); + sampleGrpcRequest(Grpc::Status::WellKnownGrpcStatus::PermissionDenied); + + // We expect rejection counter to increment upon failure. + TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 1, time_system_); +} + +// Validate simple gRPC success case with status in the trailer. +TEST_F(AdmissionControlTest, GrpcSuccessBehaviorTrailer) { + auto config = makeConfig(default_yaml_); + setupFilter(config); + + TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 0, time_system_); + + EXPECT_CALL(controller_, requestCounts()).WillRepeatedly(Return(RequestData(100, 100))); + EXPECT_CALL(*evaluator_, isGrpcSuccess(0)).WillRepeatedly(Return(true)); + + Http::TestRequestHeaderMapImpl request_headers; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); + sampleGrpcRequestTrailer(Grpc::Status::WellKnownGrpcStatus::Ok); + + // We expect rejection counter to NOT increment upon success. + TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 0, time_system_); +} + +// Validate simple gRPC failure case with status in the trailer. +TEST_F(AdmissionControlTest, GrpcFailureBehaviorTrailer) { + auto config = makeConfig(default_yaml_); + setupFilter(config); + + TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 0, time_system_); + + EXPECT_CALL(controller_, requestCounts()).WillRepeatedly(Return(RequestData(100, 0))); + EXPECT_CALL(*evaluator_, isGrpcSuccess(7)).WillRepeatedly(Return(false)); + + Http::TestRequestHeaderMapImpl request_headers; + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers, true)); + sampleGrpcRequestTrailer(Grpc::Status::WellKnownGrpcStatus::PermissionDenied); + + // We expect rejection counter to increment upon failure. + TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 1, time_system_); +} + +// Validate simple gRPC success case. +TEST_F(AdmissionControlTest, GrpcSuccessBehavior) { + auto config = makeConfig(default_yaml_); + setupFilter(config); + + TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 0, time_system_); + + EXPECT_CALL(controller_, requestCounts()).WillRepeatedly(Return(RequestData(100, 100))); + EXPECT_CALL(*evaluator_, isGrpcSuccess(0)).WillRepeatedly(Return(true)); + + Http::TestRequestHeaderMapImpl request_headers; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); + sampleGrpcRequest(Grpc::Status::WellKnownGrpcStatus::Ok); + + // We expect rejection counter to NOT increment upon success. + TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 0, time_system_); +} + +} // namespace +} // namespace AdmissionControl +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/admission_control/admission_control_integration_test.cc b/test/extensions/filters/http/admission_control/admission_control_integration_test.cc new file mode 100644 index 0000000000000..578f39db10c39 --- /dev/null +++ b/test/extensions/filters/http/admission_control/admission_control_integration_test.cc @@ -0,0 +1,171 @@ +#include "common/grpc/common.h" + +#include "test/integration/autonomous_upstream.h" +#include "test/integration/http_integration.h" +#include "test/test_common/simulated_time_system.h" +#include "test/test_common/utility.h" + +namespace Envoy { +namespace { + +const std::string ADMISSION_CONTROL_CONFIG = + R"EOF( +name: envoy.filters.http.admission_control +typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.admission_control.v3alpha.AdmissionControl + success_criteria: + http_criteria: + grpc_criteria: + sampling_window: 120s + aggression_coefficient: + default_value: 1.0 + runtime_key: "foo.aggression" + enabled: + default_value: true + runtime_key: "foo.enabled" +)EOF"; + +class AdmissionControlIntegrationTest : public Event::TestUsingSimulatedTime, + public testing::TestWithParam, + public HttpIntegrationTest { +public: + AdmissionControlIntegrationTest() + : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam(), realTime()) {} + + void SetUp() override {} + + void initialize() override { + config_helper_.addConfigModifier(setEnableDownstreamTrailersHttp1()); + config_helper_.addFilter(ADMISSION_CONTROL_CONFIG); + HttpIntegrationTest::initialize(); + } + +protected: + void verifyGrpcSuccess(IntegrationStreamDecoderPtr response) { + EXPECT_EQ("0", response->trailers()->GrpcStatus()->value().getStringView()); + } + + void verifyHttpSuccess(IntegrationStreamDecoderPtr response) { + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + } + + IntegrationStreamDecoderPtr sendGrpcRequestWithReturnCode(uint64_t code) { + codec_client_ = makeHttpConnection(lookupPort("http")); + + // Set the response headers on the autonomous upstream. + auto headers = std::make_unique(); + headers->setStatus(200); + headers->setContentType("application/grpc"); + + auto trailers = std::make_unique(); + trailers->setGrpcMessage("this is a message"); + trailers->setGrpcStatus(code); + + auto* au = reinterpret_cast(fake_upstreams_.front().get()); + au->setResponseHeaders(std::move(headers)); + au->setResponseTrailers(std::move(trailers)); + + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + response->waitForEndStream(); + codec_client_->close(); + return response; + } + + IntegrationStreamDecoderPtr sendRequestWithReturnCode(std::string&& code) { + codec_client_ = makeHttpConnection(lookupPort("http")); + + // Set the response headers on the autonomous upstream. + auto* au = reinterpret_cast(fake_upstreams_.front().get()); + au->setResponseHeaders(std::make_unique( + Http::TestResponseHeaderMapImpl({{":status", code}}))); + + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + response->waitForEndStream(); + codec_client_->close(); + return response; + } +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, AdmissionControlIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest())); + +TEST_P(AdmissionControlIntegrationTest, HttpTest) { + autonomous_upstream_ = true; + initialize(); + + // Drop the success rate to a very low value. + ENVOY_LOG(info, "dropping success rate"); + for (int i = 0; i < 300; ++i) { + sendRequestWithReturnCode("500"); + } + + // Measure throttling rate from the admission control filter. + double throttle_count = 0; + double request_count = 0; + ENVOY_LOG(info, "validating throttling rate"); + for (int i = 0; i < 300; ++i) { + auto response = sendRequestWithReturnCode("500"); + auto rc = response->headers().Status()->value().getStringView(); + if (rc == "503") { + ++throttle_count; + } else { + ASSERT_EQ(rc, "500"); + } + ++request_count; + } + + // Given the current throttling rate formula with an aggression of 1, it should result in a ~98% + // throttling rate. Allowing an error of 3%. + EXPECT_NEAR(throttle_count / request_count, 0.98, 0.03); + + // We now wait for the history to become stale. + timeSystem().advanceTimeWait(std::chrono::seconds(120)); + + // We expect a 100% success rate after waiting. No throttling should occur. + for (int i = 0; i < 100; ++i) { + verifyHttpSuccess(sendRequestWithReturnCode("200")); + } +} + +TEST_P(AdmissionControlIntegrationTest, GrpcTest) { + autonomous_upstream_ = true; + setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); + initialize(); + + // Drop the success rate to a very low value. + for (int i = 0; i < 300; ++i) { + sendGrpcRequestWithReturnCode(14); + } + + // Measure throttling rate from the admission control filter. + double throttle_count = 0; + double request_count = 0; + for (int i = 0; i < 300; ++i) { + auto response = sendGrpcRequestWithReturnCode(10); + + // When the filter is throttling, it returns an HTTP code 503 and the GRPC status is unset. + // Otherwise, we expect a GRPC status of "Unknown" as set above. + if (response->headers().Status()->value().getStringView() == "503") { + ++throttle_count; + } else { + auto grpc_status = Grpc::Common::getGrpcStatus(*(response->trailers())); + ASSERT_EQ(grpc_status, Grpc::Status::WellKnownGrpcStatus::Aborted); + } + ++request_count; + } + + // Given the current throttling rate formula with an aggression of 1, it should result in a ~98% + // throttling rate. Allowing an error of 3%. + EXPECT_NEAR(throttle_count / request_count, 0.98, 0.03); + + // We now wait for the history to become stale. + timeSystem().advanceTimeWait(std::chrono::seconds(120)); + + // We expect a 100% success rate after waiting. No throttling should occur. + for (int i = 0; i < 100; ++i) { + verifyGrpcSuccess(sendGrpcRequestWithReturnCode(0)); + } +} + +} // namespace +} // namespace Envoy diff --git a/test/extensions/filters/http/admission_control/config_test.cc b/test/extensions/filters/http/admission_control/config_test.cc new file mode 100644 index 0000000000000..cd7b6b212f1e7 --- /dev/null +++ b/test/extensions/filters/http/admission_control/config_test.cc @@ -0,0 +1,113 @@ +#include + +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" + +#include "common/stats/isolated_store_impl.h" + +#include "extensions/filters/http/admission_control/admission_control.h" +#include "extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h" + +#include "test/mocks/runtime/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/thread_local/mocks.h" +#include "test/test_common/simulated_time_system.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::NiceMock; +using testing::Return; + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace AdmissionControl { +namespace { + +class AdmissionControlConfigTest : public testing::Test { +public: + AdmissionControlConfigTest() = default; + + std::shared_ptr makeConfig(const std::string& yaml) { + AdmissionControlProto proto; + TestUtility::loadFromYamlAndValidate(yaml, proto); + auto tls = context_.threadLocal().allocateSlot(); + auto evaluator = std::make_unique(proto.success_criteria()); + return std::make_shared(proto, runtime_, random_, scope_, + std::move(tls), std::move(evaluator)); + } + +protected: + NiceMock runtime_; + NiceMock context_; + Stats::IsolatedStoreImpl scope_; + NiceMock random_; +}; + +// Verify the configuration when all fields are set. +TEST_F(AdmissionControlConfigTest, BasicTestAllConfigured) { + const std::string yaml = R"EOF( +enabled: + default_value: false + runtime_key: "foo.enabled" +sampling_window: 1337s +aggression_coefficient: + default_value: 4.2 + runtime_key: "foo.aggression" +success_criteria: + http_criteria: + grpc_criteria: +)EOF"; + + auto config = makeConfig(yaml); + + EXPECT_FALSE(config->filterEnabled()); + EXPECT_EQ(4.2, config->aggression()); +} + +// Verify the config defaults when not specified. +TEST_F(AdmissionControlConfigTest, BasicTestMinimumConfigured) { + // Empty config. No fields are required. + AdmissionControlProto proto; + + const std::string yaml = R"EOF( +success_criteria: + http_criteria: + grpc_criteria: +)EOF"; + auto config = makeConfig(yaml); + + EXPECT_TRUE(config->filterEnabled()); + EXPECT_EQ(2.0, config->aggression()); +} + +// Ensure runtime fields are honored. +TEST_F(AdmissionControlConfigTest, VerifyRuntime) { + const std::string yaml = R"EOF( +enabled: + default_value: false + runtime_key: "foo.enabled" +sampling_window: 1337s +aggression_coefficient: + default_value: 4.2 + runtime_key: "foo.aggression" +success_criteria: + http_criteria: + grpc_criteria: +)EOF"; + + auto config = makeConfig(yaml); + + EXPECT_CALL(runtime_.snapshot_, getBoolean("foo.enabled", false)).WillOnce(Return(true)); + EXPECT_TRUE(config->filterEnabled()); + EXPECT_CALL(runtime_.snapshot_, getDouble("foo.aggression", 4.2)).WillOnce(Return(1.3)); + EXPECT_EQ(1.3, config->aggression()); +} + +} // namespace +} // namespace AdmissionControl +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/admission_control/controller_test.cc b/test/extensions/filters/http/admission_control/controller_test.cc new file mode 100644 index 0000000000000..bf88a7037431d --- /dev/null +++ b/test/extensions/filters/http/admission_control/controller_test.cc @@ -0,0 +1,107 @@ +#include + +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" + +#include "extensions/filters/http/admission_control/thread_local_controller.h" + +#include "test/test_common/simulated_time_system.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace AdmissionControl { +namespace { + +using RequestData = ThreadLocalController::RequestData; + +class ThreadLocalControllerTest : public testing::Test { +public: + ThreadLocalControllerTest() : window_(5), tlc_(time_system_, window_) {} + +protected: + // Submit a single request per entry in the historical data (this comes out to a single request + // each second). The final sample does not advance time to allow for testing of this transition. + void fillHistorySlots(const bool successes = true) { + std::function record; + if (successes) { + record = [this]() { tlc_.recordSuccess(); }; + } else { + record = [this]() { tlc_.recordFailure(); }; + } + for (int tick = 0; tick < window_.count(); ++tick) { + record(); + time_system_.advanceTimeWait(std::chrono::seconds(1)); + } + // Don't sleep after the final sample to allow for measurements. + record(); + } + + Event::SimulatedTimeSystem time_system_; + std::chrono::seconds window_; + ThreadLocalControllerImpl tlc_; +}; + +// Test the basic functionality of the admission controller. +TEST_F(ThreadLocalControllerTest, BasicRecord) { + EXPECT_EQ(RequestData(0, 0), tlc_.requestCounts()); + + tlc_.recordFailure(); + EXPECT_EQ(RequestData(1, 0), tlc_.requestCounts()); + + tlc_.recordSuccess(); + EXPECT_EQ(RequestData(2, 1), tlc_.requestCounts()); +} + +// Verify that stale historical samples are removed when they grow stale. +TEST_F(ThreadLocalControllerTest, RemoveStaleSamples) { + fillHistorySlots(); + + // We expect a single request counted in each second of the window. + EXPECT_EQ(RequestData(window_.count(), window_.count()), tlc_.requestCounts()); + + time_system_.advanceTimeWait(std::chrono::seconds(1)); + + // Continuing to sample requests at 1 per second should maintain the same request counts. We'll + // record failures here. + fillHistorySlots(false); + EXPECT_EQ(RequestData(window_.count(), 0), tlc_.requestCounts()); + + // Expect the oldest entry to go stale. + time_system_.advanceTimeWait(std::chrono::seconds(1)); + EXPECT_EQ(RequestData(window_.count() - 1, 0), tlc_.requestCounts()); +} + +// Verify that stale historical samples are removed when they grow stale. +TEST_F(ThreadLocalControllerTest, RemoveStaleSamples2) { + fillHistorySlots(); + + // We expect a single request counted in each second of the window. + EXPECT_EQ(RequestData(window_.count(), window_.count()), tlc_.requestCounts()); + + // Let's just sit here for a full day. We expect all samples to become stale. + time_system_.advanceTimeWait(std::chrono::hours(24)); + + EXPECT_EQ(RequestData(0, 0), tlc_.requestCounts()); +} + +// Verify that historical samples are made only when there is data to record. +TEST_F(ThreadLocalControllerTest, VerifyMemoryUsage) { + // Make sure we don't add any null data to the history if there are sparse requests. + tlc_.recordSuccess(); + time_system_.advanceTimeWait(std::chrono::seconds(1)); + tlc_.recordSuccess(); + time_system_.advanceTimeWait(std::chrono::seconds(3)); + tlc_.recordSuccess(); + EXPECT_EQ(RequestData(3, 3), tlc_.requestCounts()); +} + +} // namespace +} // namespace AdmissionControl +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/admission_control/success_criteria_evaluator_test.cc b/test/extensions/filters/http/admission_control/success_criteria_evaluator_test.cc new file mode 100644 index 0000000000000..888497a1363e9 --- /dev/null +++ b/test/extensions/filters/http/admission_control/success_criteria_evaluator_test.cc @@ -0,0 +1,178 @@ +#include + +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" + +#include "common/common/enum_to_int.h" + +#include "extensions/filters/http/admission_control/admission_control.h" +#include "extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h" + +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace AdmissionControl { +namespace { + +class SuccessCriteriaTest : public testing::Test { +public: + SuccessCriteriaTest() = default; + + void makeEvaluator(const std::string& yaml) { + AdmissionControlProto::SuccessCriteria proto; + TestUtility::loadFromYamlAndValidate(yaml, proto); + + evaluator_ = std::make_unique(proto); + } + + void expectHttpSuccess(int code) { EXPECT_TRUE(evaluator_->isHttpSuccess(code)); } + + void expectHttpFail(int code) { EXPECT_FALSE(evaluator_->isHttpSuccess(code)); } + + void expectGrpcSuccess(int code) { EXPECT_TRUE(evaluator_->isGrpcSuccess(code)); } + + void expectGrpcFail(int code) { EXPECT_FALSE(evaluator_->isGrpcSuccess(code)); } + + void verifyGrpcDefaultEval() { + expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::AlreadyExists); + expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::Canceled); + expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::FailedPrecondition); + expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::InvalidArgument); + expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::NotFound); + expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::Ok); + expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::OutOfRange); + expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::PermissionDenied); + expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::Unauthenticated); + expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::Unimplemented); + expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::Unknown); + + expectGrpcFail(enumToInt(Grpc::Status::WellKnownGrpcStatus::Aborted)); + expectGrpcFail(enumToInt(Grpc::Status::WellKnownGrpcStatus::DataLoss)); + expectGrpcFail(enumToInt(Grpc::Status::WellKnownGrpcStatus::DeadlineExceeded)); + expectGrpcFail(enumToInt(Grpc::Status::WellKnownGrpcStatus::Internal)); + expectGrpcFail(enumToInt(Grpc::Status::WellKnownGrpcStatus::ResourceExhausted)); + expectGrpcFail(enumToInt(Grpc::Status::WellKnownGrpcStatus::Unavailable)); + } + + void verifyHttpDefaultEval() { + for (int code = 200; code < 600; ++code) { + if (code < 500) { + expectHttpSuccess(code); + } else { + expectHttpFail(code); + } + } + } + +protected: + std::unique_ptr evaluator_; +}; + +// Ensure the HTTP code successful range configurations are honored. +TEST_F(SuccessCriteriaTest, HttpErrorCodes) { + const std::string yaml = R"EOF( +http_criteria: + http_success_status: + - start: 200 + end: 300 + - start: 400 + end: 500 +)EOF"; + + makeEvaluator(yaml); + + for (int code = 200; code < 600; ++code) { + if ((code < 300 && code >= 200) || (code < 500 && code >= 400)) { + expectHttpSuccess(code); + continue; + } + + expectHttpFail(code); + } + + verifyGrpcDefaultEval(); +} + +// Verify default success values of the evaluator. +TEST_F(SuccessCriteriaTest, DefaultBehaviorTest) { + const std::string yaml = R"EOF( +http_criteria: +grpc_criteria: +)EOF"; + + makeEvaluator(yaml); + verifyGrpcDefaultEval(); + verifyHttpDefaultEval(); +} + +// Check that GRPC error code configurations are honored. +TEST_F(SuccessCriteriaTest, GrpcErrorCodes) { + const std::string yaml = R"EOF( +grpc_criteria: + grpc_success_status: + - 7 + - 13 +)EOF"; + + makeEvaluator(yaml); + + using GrpcStatus = Grpc::Status::WellKnownGrpcStatus; + for (int code = GrpcStatus::Ok; code <= GrpcStatus::MaximumKnown; ++code) { + if (code == 7 || code == 13) { + expectGrpcSuccess(code); + } else { + expectGrpcFail(code); + } + } + + verifyHttpDefaultEval(); +} + +// Verify correct gRPC range validation. +TEST_F(SuccessCriteriaTest, GrpcRangeValidation) { + const std::string yaml = R"EOF( +grpc_criteria: + grpc_success_status: + - 17 +)EOF"; + EXPECT_THROW_WITH_REGEX(makeEvaluator(yaml), EnvoyException, "invalid gRPC code*"); +} + +// Verify correct HTTP range validation. +TEST_F(SuccessCriteriaTest, HttpRangeValidation) { + auto check_ranges = [this](std::string&& yaml) { + EXPECT_THROW_WITH_REGEX(makeEvaluator(yaml), EnvoyException, "invalid HTTP range*"); + }; + + check_ranges(R"EOF( +http_criteria: + http_success_status: + - start: 300 + end: 200 +)EOF"); + + check_ranges(R"EOF( +http_criteria: + http_success_status: + - start: 600 + end: 600 +)EOF"); + + check_ranges(R"EOF( +http_criteria: + http_success_status: + - start: 99 + end: 99 +)EOF"); +} + +} // namespace +} // namespace AdmissionControl +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/aws_lambda/BUILD b/test/extensions/filters/http/aws_lambda/BUILD index 4486b02c12795..5c35774f44384 100644 --- a/test/extensions/filters/http/aws_lambda/BUILD +++ b/test/extensions/filters/http/aws_lambda/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -27,6 +27,7 @@ envoy_extension_cc_test( name = "aws_lambda_filter_integration_test", srcs = ["aws_lambda_filter_integration_test.cc"], extension_name = "envoy.filters.http.aws_lambda", + tags = ["fails_on_windows"], deps = [ "//source/common/http:header_map_lib", "//source/extensions/filters/http/aws_lambda:aws_lambda_filter_lib", @@ -53,7 +54,8 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.aws_lambda", deps = [ "//source/extensions/filters/http/aws_lambda:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:instance_mocks", "@envoy_api//envoy/extensions/filters/http/aws_lambda/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/aws_lambda/aws_lambda_filter_integration_test.cc b/test/extensions/filters/http/aws_lambda/aws_lambda_filter_integration_test.cc index b8e552a85283d..c752f4ca651b0 100644 --- a/test/extensions/filters/http/aws_lambda/aws_lambda_filter_integration_test.cc +++ b/test/extensions/filters/http/aws_lambda/aws_lambda_filter_integration_test.cc @@ -27,11 +27,7 @@ class AwsLambdaFilterIntegrationTest : public testing::TestWithParam(ctx); + [actual_headers = &response->headers()](const Http::HeaderEntry& expected_entry) { const auto* actual_entry = actual_headers->get( Http::LowerCaseString(std::string(expected_entry.key().getStringView()))); EXPECT_EQ(actual_entry->value().getStringView(), expected_entry.value().getStringView()); return Http::HeaderMap::Iterate::Continue; - }, - // Because headers() returns a pointer to const we have to cast it - // away to match the callback signature. This is safe because we do - // not call any non-const functions on the headers in the callback. - const_cast(&response->headers())); + }); // verify cookies if we have any if (!expected_response_cookies.empty()) { std::vector actual_cookies; - response->headers().iterate( - [](const Http::HeaderEntry& entry, void* ctx) { - auto* list = static_cast*>(ctx); - if (entry.key().getStringView() == Http::Headers::get().SetCookie.get()) { - list->emplace_back(entry.value().getStringView()); - } - return Http::HeaderMap::Iterate::Continue; - }, - &actual_cookies); + response->headers().iterate([&actual_cookies](const Http::HeaderEntry& entry) { + if (entry.key().getStringView() == Http::Headers::get().SetCookie.get()) { + actual_cookies.emplace_back(entry.value().getStringView()); + } + return Http::HeaderMap::Iterate::Continue; + }); EXPECT_EQ(expected_response_cookies, actual_cookies); } diff --git a/test/extensions/filters/http/aws_lambda/aws_lambda_filter_test.cc b/test/extensions/filters/http/aws_lambda/aws_lambda_filter_test.cc index de30165175b2f..ab0cf4c2c9004 100644 --- a/test/extensions/filters/http/aws_lambda/aws_lambda_filter_test.cc +++ b/test/extensions/filters/http/aws_lambda/aws_lambda_filter_test.cc @@ -194,16 +194,13 @@ TEST_F(AwsLambdaFilterTest, DecodeHeadersInvocationModeSetsHeader) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, header_result); std::string invocation_header_value; - headers.iterate( - [](const Http::HeaderEntry& entry, void* ctx) { - auto* out = static_cast(ctx); - if (entry.key().getStringView() == "x-amz-invocation-type") { - out->append(std::string(entry.value().getStringView())); - return Http::HeaderMap::Iterate::Break; - } - return Http::HeaderMap::Iterate::Continue; - }, - &invocation_header_value); + headers.iterate([&invocation_header_value](const Http::HeaderEntry& entry) { + if (entry.key().getStringView() == "x-amz-invocation-type") { + invocation_header_value.append(std::string(entry.value().getStringView())); + return Http::HeaderMap::Iterate::Break; + } + return Http::HeaderMap::Iterate::Continue; + }); EXPECT_EQ("RequestResponse", invocation_header_value); } @@ -235,11 +232,11 @@ TEST_F(AwsLambdaFilterTest, DecodeHeadersOnlyRequestWithJsonOn) { ASSERT_GT(json_buf.length(), 0); ASSERT_NE(headers.ContentType(), nullptr); - EXPECT_EQ("application/json", headers.ContentType()->value().getStringView()); + EXPECT_EQ("application/json", headers.getContentTypeValue()); // Assert the true (post-transformation) content-length sent to the Lambda endpoint. ASSERT_NE(headers.ContentLength(), nullptr); - EXPECT_EQ(fmt::format("{}", json_buf.length()), headers.ContentLength()->value().getStringView()); + EXPECT_EQ(fmt::format("{}", json_buf.length()), headers.getContentLengthValue()); // The best way to verify the generated JSON is to deserialize it and inspect it. Request req; @@ -298,12 +295,11 @@ TEST_F(AwsLambdaFilterTest, DecodeDataWithTextualBodyWithJsonOn) { ASSERT_GT(decoded_buf.length(), 0); ASSERT_NE(headers.ContentType(), nullptr); - EXPECT_EQ("application/json", headers.ContentType()->value().getStringView()); + EXPECT_EQ("application/json", headers.getContentTypeValue()); // Assert the true (post-transformation) content-length sent to the Lambda endpoint. ASSERT_NE(headers.ContentLength(), nullptr); - EXPECT_EQ(fmt::format("{}", decoded_buf.length()), - headers.ContentLength()->value().getStringView()); + EXPECT_EQ(fmt::format("{}", decoded_buf.length()), headers.getContentLengthValue()); // The best way to verify the generated JSON is to deserialize it and inspect it. Request req; @@ -471,6 +467,19 @@ TEST_F(AwsLambdaFilterTest, EncodeDataJsonModeStopIterationAndBuffer) { EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, result); } +TEST_F(AwsLambdaFilterTest, EncodeDataAddsLastChunk) { + setupFilter({arn_, InvocationMode::Synchronous, false /*passthrough*/}); + filter_->resolveSettings(); + Http::TestResponseHeaderMapImpl headers; + headers.setStatus(200); + filter_->encodeHeaders(headers, false /*end_stream*/); + + Buffer::OwnedImpl buf(std::string("foobar")); + EXPECT_CALL(encoder_callbacks_, addEncodedData(_, false)); + EXPECT_CALL(encoder_callbacks_, encodingBuffer).WillRepeatedly(Return(&buf)); + filter_->encodeData(buf, true /*end_stream*/); +} + /** * encodeData() data in JSON mode without a 'body' key should translate the 'headers' key to HTTP * headers while ignoring any HTTP/2 pseudo-headers. @@ -506,7 +515,7 @@ TEST_F(AwsLambdaFilterTest, EncodeDataJsonModeTransformToHttp) { EXPECT_EQ(Http::FilterDataStatus::Continue, result); ASSERT_NE(nullptr, headers.Status()); - EXPECT_EQ("201", headers.Status()->value().getStringView()); + EXPECT_EQ("201", headers.getStatusValue()); EXPECT_EQ(nullptr, headers.get(Http::LowerCaseString(":other"))); @@ -515,15 +524,12 @@ TEST_F(AwsLambdaFilterTest, EncodeDataJsonModeTransformToHttp) { EXPECT_EQ("awesome value", custom_header->value().getStringView()); std::vector cookies; - headers.iterate( - [](const Http::HeaderEntry& entry, void* ctx) { - auto* list = static_cast*>(ctx); - if (entry.key().getStringView() == Http::Headers::get().SetCookie.get()) { - list->emplace_back(entry.value().getStringView()); - } - return Http::HeaderMap::Iterate::Continue; - }, - &cookies); + headers.iterate([&cookies](const Http::HeaderEntry& entry) { + if (entry.key().getStringView() == Http::Headers::get().SetCookie.get()) { + cookies.emplace_back(entry.value().getStringView()); + } + return Http::HeaderMap::Iterate::Continue; + }); EXPECT_THAT(cookies, ElementsAre("session-id=42; Secure; HttpOnly", "user=joe")); } @@ -608,7 +614,7 @@ TEST_F(AwsLambdaFilterTest, EncodeDataJsonModeInvalidJson) { EXPECT_EQ(0, encoded_buf.length()); ASSERT_NE(nullptr, headers.Status()); - EXPECT_EQ("500", headers.Status()->value().getStringView()); + EXPECT_EQ("500", headers.getStatusValue()); EXPECT_EQ(1ul, filter_->stats().server_error_.value()); } diff --git a/test/extensions/filters/http/aws_lambda/config_test.cc b/test/extensions/filters/http/aws_lambda/config_test.cc index 178fe1ff7c6ad..752f763292b61 100644 --- a/test/extensions/filters/http/aws_lambda/config_test.cc +++ b/test/extensions/filters/http/aws_lambda/config_test.cc @@ -4,7 +4,8 @@ #include "extensions/filters/http/aws_lambda/aws_lambda_filter.h" #include "extensions/filters/http/aws_lambda/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/server/instance.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/http/aws_request_signing/BUILD b/test/extensions/filters/http/aws_request_signing/BUILD index 5a3194877bc29..25301010ded56 100644 --- a/test/extensions/filters/http/aws_request_signing/BUILD +++ b/test/extensions/filters/http/aws_request_signing/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -28,7 +28,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.aws_request_signing", deps = [ "//source/extensions/filters/http/aws_request_signing:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/extensions/filters/http/aws_request_signing/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/aws_request_signing/aws_request_signing_filter_test.cc b/test/extensions/filters/http/aws_request_signing/aws_request_signing_filter_test.cc index f6225ec8ee413..b280b21eee92e 100644 --- a/test/extensions/filters/http/aws_request_signing/aws_request_signing_filter_test.cc +++ b/test/extensions/filters/http/aws_request_signing/aws_request_signing_filter_test.cc @@ -56,7 +56,7 @@ TEST_F(AwsRequestSigningFilterTest, SignWithHostRewrite) { Http::TestRequestHeaderMapImpl headers; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); - EXPECT_EQ("foo", headers.Host()->value().getStringView()); + EXPECT_EQ("foo", headers.getHostValue()); EXPECT_EQ(1UL, filter_config_->stats_.signing_added_.value()); } @@ -72,6 +72,18 @@ TEST_F(AwsRequestSigningFilterTest, SignFails) { EXPECT_EQ(1UL, filter_config_->stats_.signing_failed_.value()); } +// Verify FilterConfigImpl's getters. +TEST_F(AwsRequestSigningFilterTest, FilterConfigImplGetters) { + Stats::IsolatedStoreImpl stats; + auto signer = std::make_unique(); + const auto* signer_ptr = signer.get(); + FilterConfigImpl config(std::move(signer), "prefix", stats, "foo"); + + EXPECT_EQ(signer_ptr, &config.signer()); + EXPECT_EQ(0UL, config.stats().signing_added_.value()); + EXPECT_EQ("foo", config.hostRewrite()); +} + } // namespace } // namespace AwsRequestSigningFilter } // namespace HttpFilters diff --git a/test/extensions/filters/http/aws_request_signing/config_test.cc b/test/extensions/filters/http/aws_request_signing/config_test.cc index 424524b999efa..7d86e759b77e3 100644 --- a/test/extensions/filters/http/aws_request_signing/config_test.cc +++ b/test/extensions/filters/http/aws_request_signing/config_test.cc @@ -3,7 +3,7 @@ #include "extensions/filters/http/aws_request_signing/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/http/buffer/BUILD b/test/extensions/filters/http/buffer/BUILD index 20351ba4a1fb7..d04528928c192 100644 --- a/test/extensions/filters/http/buffer/BUILD +++ b/test/extensions/filters/http/buffer/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -36,6 +36,7 @@ envoy_extension_cc_test( name = "buffer_filter_integration_test", srcs = ["buffer_filter_integration_test.cc"], extension_name = "envoy.filters.http.buffer", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/http/buffer:config", "//test/config:utility_lib", @@ -51,7 +52,8 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.buffer", deps = [ "//source/extensions/filters/http/buffer:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/http/buffer/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/http/buffer/buffer_filter_integration_test.cc b/test/extensions/filters/http/buffer/buffer_filter_integration_test.cc index dfde16d77699f..c61b6e1753689 100644 --- a/test/extensions/filters/http/buffer/buffer_filter_integration_test.cc +++ b/test/extensions/filters/http/buffer/buffer_filter_integration_test.cc @@ -63,7 +63,7 @@ TEST_P(BufferIntegrationTest, RouterRequestPopulateContentLength) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(BufferIntegrationTest, RouterRequestPopulateContentLengthOnTrailers) { @@ -92,10 +92,17 @@ TEST_P(BufferIntegrationTest, RouterRequestPopulateContentLengthOnTrailers) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(BufferIntegrationTest, RouterRequestBufferLimitExceeded) { + // Make sure the connection isn't closed during request upload. + // Without a large drain-close it's possible that the local reply will be sent + // during request upload, and continued upload will result in TCP reset before + // the response is read. + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.mutable_delayed_close_timeout()->set_seconds(2000 * 1000); }); config_helper_.addFilter(ConfigHelper::smallBufferFilter()); initialize(); @@ -112,7 +119,7 @@ TEST_P(BufferIntegrationTest, RouterRequestBufferLimitExceeded) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("413", response->headers().Status()->value().getStringView()); + EXPECT_EQ("413", response->headers().getStatusValue()); } ConfigHelper::HttpModifierFunction overrideConfig(const std::string& json_config) { @@ -154,7 +161,7 @@ TEST_P(BufferIntegrationTest, RouteDisabled) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(BufferIntegrationTest, RouteOverride) { @@ -180,7 +187,7 @@ TEST_P(BufferIntegrationTest, RouteOverride) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } } // namespace diff --git a/test/extensions/filters/http/buffer/buffer_filter_test.cc b/test/extensions/filters/http/buffer/buffer_filter_test.cc index 11f85e138f767..34ce1e2211b6d 100644 --- a/test/extensions/filters/http/buffer/buffer_filter_test.cc +++ b/test/extensions/filters/http/buffer/buffer_filter_test.cc @@ -106,8 +106,7 @@ TEST_F(BufferFilterTest, ContentLengthPopulation) { Buffer::OwnedImpl data2(" world"); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data2, true)); - ASSERT_NE(headers.ContentLength(), nullptr); - EXPECT_EQ(headers.ContentLength()->value().getStringView(), "11"); + EXPECT_EQ(headers.getContentLengthValue(), "11"); } TEST_F(BufferFilterTest, ContentLengthPopulationInTrailers) { @@ -122,8 +121,7 @@ TEST_F(BufferFilterTest, ContentLengthPopulationInTrailers) { Http::TestRequestTrailerMapImpl trailers; EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(trailers)); - ASSERT_NE(headers.ContentLength(), nullptr); - EXPECT_EQ(headers.ContentLength()->value().getStringView(), "5"); + EXPECT_EQ(headers.getContentLengthValue(), "5"); } TEST_F(BufferFilterTest, ContentLengthPopulationAlreadyPresent) { @@ -134,8 +132,7 @@ TEST_F(BufferFilterTest, ContentLengthPopulationAlreadyPresent) { Buffer::OwnedImpl data("foo"); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data, true)); - ASSERT_NE(headers.ContentLength(), nullptr); - EXPECT_EQ(headers.ContentLength()->value().getStringView(), "3"); + EXPECT_EQ(headers.getContentLengthValue(), "3"); } TEST_F(BufferFilterTest, RouteConfigOverride) { diff --git a/test/extensions/filters/http/buffer/config_test.cc b/test/extensions/filters/http/buffer/config_test.cc index d2c8e0c85633c..268fe142931b8 100644 --- a/test/extensions/filters/http/buffer/config_test.cc +++ b/test/extensions/filters/http/buffer/config_test.cc @@ -4,7 +4,8 @@ #include "extensions/filters/http/buffer/buffer_filter.h" #include "extensions/filters/http/buffer/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/server/instance.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -47,9 +48,9 @@ TEST(BufferFilterFactoryTest, BufferFilterCorrectProto) { TEST(BufferFilterFactoryTest, BufferFilterEmptyProto) { BufferFilterFactory factory; + auto empty_proto = factory.createEmptyConfigProto(); envoy::extensions::filters::http::buffer::v3::Buffer config = - *dynamic_cast( - factory.createEmptyConfigProto().get()); + *dynamic_cast(empty_proto.get()); config.mutable_max_request_bytes()->set_value(1028); @@ -62,9 +63,9 @@ TEST(BufferFilterFactoryTest, BufferFilterEmptyProto) { TEST(BufferFilterFactoryTest, BufferFilterNoMaxRequestBytes) { BufferFilterFactory factory; + auto empty_proto = factory.createEmptyConfigProto(); envoy::extensions::filters::http::buffer::v3::Buffer config = - *dynamic_cast( - factory.createEmptyConfigProto().get()); + *dynamic_cast(empty_proto.get()); NiceMock context; EXPECT_THROW_WITH_REGEX(factory.createFilterFactoryFromProto(config, "stats", context), @@ -74,10 +75,8 @@ TEST(BufferFilterFactoryTest, BufferFilterNoMaxRequestBytes) { TEST(BufferFilterFactoryTest, BufferFilterEmptyRouteProto) { BufferFilterFactory factory; EXPECT_NO_THROW({ - envoy::extensions::filters::http::buffer::v3::BufferPerRoute* config = - dynamic_cast( - factory.createEmptyRouteConfigProto().get()); - EXPECT_NE(nullptr, config); + EXPECT_NE(nullptr, dynamic_cast( + factory.createEmptyRouteConfigProto().get())); }); } diff --git a/test/extensions/filters/http/cache/BUILD b/test/extensions/filters/http/cache/BUILD index faa47807b25f7..db5d5ea50fd55 100644 --- a/test/extensions/filters/http/cache/BUILD +++ b/test/extensions/filters/http/cache/BUILD @@ -1,21 +1,21 @@ -licenses(["notice"]) # Apache 2 - load("//bazel:envoy_build_system.bzl", "envoy_package") load( "//test/extensions:extensions_build_system.bzl", "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( - name = "http_cache_utils_test", - srcs = ["http_cache_utils_test.cc"], + name = "cache_headers_utils_test", + srcs = ["cache_headers_utils_test.cc"], extension_name = "envoy.filters.http.cache", deps = [ "//include/envoy/http:header_map_interface", "//source/common/http:header_map_lib", - "//source/extensions/filters/http/cache:http_cache_utils_lib", + "//source/extensions/filters/http/cache:cache_headers_utils_lib", "//test/test_common:utility_lib", ], ) @@ -40,12 +40,22 @@ envoy_extension_cc_test( deps = [ "//source/extensions/filters/http/cache:cache_filter_lib", "//source/extensions/filters/http/cache/simple_http_cache:simple_http_cache_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:simulated_time_system_lib", "//test/test_common:utility_lib", ], ) +envoy_extension_cc_test( + name = "cacheability_utils_test", + srcs = ["cacheability_utils_test.cc"], + extension_name = "envoy.filters.http.cache", + deps = [ + "//source/extensions/filters/http/cache:cacheability_utils_lib", + "//test/test_common:utility_lib", + ], +) + envoy_extension_cc_test( name = "config_test", srcs = ["config_test.cc"], @@ -54,7 +64,7 @@ envoy_extension_cc_test( "//source/extensions/filters/http/cache:config", "//source/extensions/filters/http/cache/simple_http_cache:simple_http_cache_lib", "//test/mocks/http:http_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", ], ) @@ -65,6 +75,7 @@ envoy_extension_cc_test( "cache_filter_integration_test.cc", ], extension_name = "envoy.filters.http.cache", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/http/cache:config", "//source/extensions/filters/http/cache:http_cache_lib", diff --git a/test/extensions/filters/http/cache/cache_filter_integration_test.cc b/test/extensions/filters/http/cache/cache_filter_integration_test.cc index a645734486467..d4113c78c7a8b 100644 --- a/test/extensions/filters/http/cache/cache_filter_integration_test.cc +++ b/test/extensions/filters/http/cache/cache_filter_integration_test.cc @@ -40,6 +40,7 @@ INSTANTIATE_TEST_SUITE_P(Protocols, CacheIntegrationTest, HttpProtocolIntegrationTest::protocolTestParamsToString); TEST_P(CacheIntegrationTest, MissInsertHit) { + useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); // Set system time to cause Envoy's cached formatted time to match time on this thread. simTime().setSystemTime(std::chrono::hours(1)); initializeFilter(default_config); @@ -68,8 +69,13 @@ TEST_P(CacheIntegrationTest, MissInsertHit) { EXPECT_THAT(request->headers(), IsSupersetOfHeaders(response_headers)); EXPECT_EQ(request->headers().get(Http::Headers::get().Age), nullptr); EXPECT_EQ(request->body(), std::string(42, 'a')); + EXPECT_EQ(waitForAccessLog(access_log_name_), + fmt::format("- via_upstream{}", TestEnvironment::newLine)); } + // Advance time, to verify the original date header is preserved. + simTime().advanceTimeWait(std::chrono::seconds(10)); + // Send second request, and get response from cache. IntegrationStreamDecoderPtr request = codec_client_->makeHeaderOnlyRequest(request_headers); request->waitForEndStream(); @@ -77,9 +83,159 @@ TEST_P(CacheIntegrationTest, MissInsertHit) { EXPECT_THAT(request->headers(), IsSupersetOfHeaders(response_headers)); EXPECT_EQ(request->body(), std::string(42, 'a')); EXPECT_NE(request->headers().get(Http::Headers::get().Age), nullptr); + // Advance time to force a log flush. + simTime().advanceTimeWait(std::chrono::seconds(1)); + EXPECT_EQ(waitForAccessLog(access_log_name_, 1), + fmt::format("RFCF cache.response_from_cache_filter{}", TestEnvironment::newLine)); +} + +TEST_P(CacheIntegrationTest, SuccessfulValidation) { + useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); + // Set system time to cause Envoy's cached formatted time to match time on this thread. + simTime().setSystemTime(std::chrono::hours(1)); + initializeFilter(default_config); + + // Include test name and params in URL to make each test's requests unique. + const Http::TestRequestHeaderMapImpl request_headers = { + {":method", "GET"}, + {":path", absl::StrCat("/", protocolTestParamsToString({GetParam(), 0}))}, + {":scheme", "http"}, + {":authority", "SuccessfulValidation"}}; + + const std::string original_response_date = formatter_.now(simTime()); + Http::TestResponseHeaderMapImpl response_headers = {{":status", "200"}, + {"date", original_response_date}, + {"cache-control", "max-age=0"}, + {"content-length", "42"}, + {"etag", "abc123"}}; + + // Send first request, and get response from upstream. + { + IntegrationStreamDecoderPtr response_decoder = + codec_client_->makeHeaderOnlyRequest(request_headers); + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(response_headers, /*end_stream=*/false); + // send 42 'a's + upstream_request_->encodeData(42, true); + // Wait for the response to be read by the codec client. + response_decoder->waitForEndStream(); + EXPECT_TRUE(response_decoder->complete()); + EXPECT_THAT(response_decoder->headers(), IsSupersetOfHeaders(response_headers)); + EXPECT_EQ(response_decoder->headers().get(Http::Headers::get().Age), nullptr); + EXPECT_EQ(response_decoder->body(), std::string(42, 'a')); + EXPECT_EQ(waitForAccessLog(access_log_name_), "- via_upstream\n"); + } + + simTime().advanceTimeWait(std::chrono::seconds(10)); + const std::string not_modified_date = formatter_.now(simTime()); + + // Send second request, the cached response should be validated then served. + IntegrationStreamDecoderPtr response_decoder = + codec_client_->makeHeaderOnlyRequest(request_headers); + waitForNextUpstreamRequest(); + + // Check for injected conditional headers -- no "Last-Modified" header so should fallback to + // "Date". + Http::TestRequestHeaderMapImpl injected_headers = {{"if-none-match", "abc123"}, + {"if-modified-since", original_response_date}}; + EXPECT_THAT(upstream_request_->headers(), IsSupersetOfHeaders(injected_headers)); + + // Create a 304 (not modified) response -> cached response is valid. + Http::TestResponseHeaderMapImpl not_modified_response_headers = {{":status", "304"}, + {"date", not_modified_date}}; + upstream_request_->encodeHeaders(not_modified_response_headers, /*end_stream=*/true); + + // The original response headers should be updated with 304 response headers. + response_headers.setDate(not_modified_date); + + // Wait for the response to be read by the codec client. + response_decoder->waitForEndStream(); + + // Check that the served response is the cached response. + EXPECT_TRUE(response_decoder->complete()); + EXPECT_THAT(response_decoder->headers(), IsSupersetOfHeaders(response_headers)); + EXPECT_EQ(response_decoder->body(), std::string(42, 'a')); + // Check that age header exists as this is a cached response. + EXPECT_NE(response_decoder->headers().get(Http::Headers::get().Age), nullptr); + + // Advance time to force a log flush. + simTime().advanceTimeWait(std::chrono::seconds(1)); + EXPECT_EQ(waitForAccessLog(access_log_name_, 1), "RFCF cache.response_from_cache_filter\n"); +} + +TEST_P(CacheIntegrationTest, UnsuccessfulValidation) { + useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); + // Set system time to cause Envoy's cached formatted time to match time on this thread. + simTime().setSystemTime(std::chrono::hours(1)); + initializeFilter(default_config); + + // Include test name and params in URL to make each test's requests unique. + const Http::TestRequestHeaderMapImpl request_headers = { + {":method", "GET"}, + {":path", absl::StrCat("/", protocolTestParamsToString({GetParam(), 0}))}, + {":scheme", "http"}, + {":authority", "UnsuccessfulValidation"}}; + + Http::TestResponseHeaderMapImpl original_response_headers = {{":status", "200"}, + {"date", formatter_.now(simTime())}, + {"cache-control", "max-age=0"}, + {"content-length", "10"}, + {"etag", "a1"}}; + + // Send first request, and get response from upstream. + { + IntegrationStreamDecoderPtr response_decoder = + codec_client_->makeHeaderOnlyRequest(request_headers); + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(original_response_headers, /*end_stream=*/false); + // send 10 'a's + upstream_request_->encodeData(10, true); + // Wait for the response to be read by the codec client. + response_decoder->waitForEndStream(); + EXPECT_TRUE(response_decoder->complete()); + EXPECT_THAT(response_decoder->headers(), IsSupersetOfHeaders(original_response_headers)); + EXPECT_EQ(response_decoder->headers().get(Http::Headers::get().Age), nullptr); + EXPECT_EQ(response_decoder->body(), std::string(10, 'a')); + EXPECT_EQ(waitForAccessLog(access_log_name_), "- via_upstream\n"); + } + + simTime().advanceTimeWait(std::chrono::seconds(10)); + // Any response with status other than 304 should be passed to the client as-is. + Http::TestResponseHeaderMapImpl updated_response_headers = {{":status", "200"}, + {"date", formatter_.now(simTime())}, + {"cache-control", "max-age=0"}, + {"content-length", "20"}, + {"etag", "a2"}}; + + // Send second request, validation of the cached response should be attempted but should fail. + IntegrationStreamDecoderPtr response_decoder = + codec_client_->makeHeaderOnlyRequest(request_headers); + waitForNextUpstreamRequest(); + + // Check for injected precondition headers. + Http::TestRequestHeaderMapImpl injected_headers = {{"if-none-match", "a1"}}; + EXPECT_THAT(upstream_request_->headers(), IsSupersetOfHeaders(injected_headers)); + + // Reply with the updated response -> cached response is invalid. + upstream_request_->encodeHeaders(updated_response_headers, /*end_stream=*/false); + // send 20 'a's + upstream_request_->encodeData(20, true); + + // Wait for the response to be read by the codec client. + response_decoder->waitForEndStream(); + // Check that the served response is the updated response. + EXPECT_TRUE(response_decoder->complete()); + EXPECT_THAT(response_decoder->headers(), IsSupersetOfHeaders(updated_response_headers)); + EXPECT_EQ(response_decoder->body(), std::string(20, 'a')); + // Check that age header does not exist as this is not a cached response. + EXPECT_EQ(response_decoder->headers().get(Http::Headers::get().Age), nullptr); + + // Advance time to force a log flush. + simTime().advanceTimeWait(std::chrono::seconds(1)); + EXPECT_EQ(waitForAccessLog(access_log_name_, 1), "- via_upstream\n"); } -// Send the same GET request twice with body and trailers twice, then check that the response +// Send the same GET request with body and trailers twice, then check that the response // doesn't have an age header, to confirm that it wasn't served from cache. TEST_P(CacheIntegrationTest, GetRequestWithBodyAndTrailers) { // Set system time to cause Envoy's cached formatted time to match time on this thread. diff --git a/test/extensions/filters/http/cache/cache_filter_test.cc b/test/extensions/filters/http/cache/cache_filter_test.cc index 99d79a1e478a0..0ab4034cc7997 100644 --- a/test/extensions/filters/http/cache/cache_filter_test.cc +++ b/test/extensions/filters/http/cache/cache_filter_test.cc @@ -1,7 +1,7 @@ #include "extensions/filters/http/cache/cache_filter.h" #include "extensions/filters/http/cache/simple_http_cache/simple_http_cache.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/simulated_time_system.h" #include "test/test_common/utility.h" @@ -79,21 +79,21 @@ TEST_F(CacheFilterTest, ImmediateHitNoBody) { ON_CALL(context_.dispatcher_, post(_)).WillByDefault(::testing::InvokeArgument<0>()); { - // Create filter for request 1 + // Create filter for request 1. CacheFilter filter = makeFilter(simple_cache_); - // Decode request 1 header + // Decode request 1 header. EXPECT_EQ(filter.decodeHeaders(request_headers_, true), Http::FilterHeadersStatus::Continue); - // Encode response header + // Encode response header. EXPECT_EQ(filter.encodeHeaders(response_headers_, true), Http::FilterHeadersStatus::Continue); filter.onDestroy(); } { - // Create filter for request 2 + // Create filter for request 2. CacheFilter filter = makeFilter(simple_cache_); - // Decode request 2 header + // Decode request 2 header. EXPECT_CALL(decoder_callbacks_, encodeHeaders_(testing::AllOf(IsSupersetOfHeaders(response_headers_), HeaderHasValueRef("age", "0")), @@ -111,25 +111,25 @@ TEST_F(CacheFilterTest, DelayedHitNoBody) { ON_CALL(context_.dispatcher_, post(_)).WillByDefault(::testing::InvokeArgument<0>()); { - // Create filter for request 1 + // Create filter for request 1. CacheFilter filter = makeFilter(delayed_cache_); - // Decode request 1 header + // Decode request 1 header. EXPECT_EQ(filter.decodeHeaders(request_headers_, true), Http::FilterHeadersStatus::StopAllIterationAndWatermark); EXPECT_CALL(decoder_callbacks_, continueDecoding); delayed_cache_.delayed_cb_(); ::testing::Mock::VerifyAndClearExpectations(&decoder_callbacks_); - // Encode response header + // Encode response header. EXPECT_EQ(filter.encodeHeaders(response_headers_, true), Http::FilterHeadersStatus::Continue); filter.onDestroy(); } { - // Create filter for request 2 + // Create filter for request 2. CacheFilter filter = makeFilter(delayed_cache_); - // Decode request 2 header + // Decode request 2 header. EXPECT_EQ(filter.decodeHeaders(request_headers_, true), Http::FilterHeadersStatus::StopAllIterationAndWatermark); EXPECT_CALL(decoder_callbacks_, @@ -149,13 +149,13 @@ TEST_F(CacheFilterTest, ImmediateHitBody) { const std::string body = "abc"; { - // Create filter for request 1 + // Create filter for request 1. CacheFilter filter = makeFilter(simple_cache_); - // Decode request 1 header + // Decode request 1 header. EXPECT_EQ(filter.decodeHeaders(request_headers_, true), Http::FilterHeadersStatus::Continue); - // Encode response header + // Encode response header. Buffer::OwnedImpl buffer(body); response_headers_.setContentLength(body.size()); EXPECT_EQ(filter.encodeHeaders(response_headers_, false), Http::FilterHeadersStatus::Continue); @@ -163,7 +163,7 @@ TEST_F(CacheFilterTest, ImmediateHitBody) { filter.onDestroy(); } { - // Create filter for request 2 + // Create filter for request 2. CacheFilter filter = makeFilter(simple_cache_); // Decode request 2 header diff --git a/test/extensions/filters/http/cache/cache_headers_utils_test.cc b/test/extensions/filters/http/cache/cache_headers_utils_test.cc new file mode 100644 index 0000000000000..dd3f0a78e52b4 --- /dev/null +++ b/test/extensions/filters/http/cache/cache_headers_utils_test.cc @@ -0,0 +1,349 @@ +#include +#include +#include + +#include "envoy/common/time.h" + +#include "common/common/macros.h" +#include "common/http/header_map_impl.h" + +#include "extensions/filters/http/cache/cache_headers_utils.h" + +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Cache { +namespace { + +struct TestRequestCacheControl : public RequestCacheControl { + TestRequestCacheControl(bool must_validate, bool no_store, bool no_transform, bool only_if_cached, + OptionalDuration max_age, OptionalDuration min_fresh, + OptionalDuration max_stale) { + must_validate_ = must_validate; + no_store_ = no_store; + no_transform_ = no_transform; + only_if_cached_ = only_if_cached; + max_age_ = max_age; + min_fresh_ = min_fresh; + max_stale_ = max_stale; + } +}; + +struct TestResponseCacheControl : public ResponseCacheControl { + TestResponseCacheControl(bool must_validate, bool no_store, bool no_transform, bool no_stale, + bool is_public, OptionalDuration max_age) { + must_validate_ = must_validate; + no_store_ = no_store; + no_transform_ = no_transform; + no_stale_ = no_stale; + is_public_ = is_public; + max_age_ = max_age; + } +}; + +struct RequestCacheControlTestCase { + absl::string_view cache_control_header; + TestRequestCacheControl request_cache_control; +}; + +struct ResponseCacheControlTestCase { + absl::string_view cache_control_header; + TestResponseCacheControl response_cache_control; +}; + +class RequestCacheControlTest : public testing::TestWithParam { +public: + static const std::vector& getTestCases() { + // clang-format off + CONSTRUCT_ON_FIRST_USE(std::vector, + // Empty header + { + "", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {false, false, false, false, absl::nullopt, absl::nullopt, absl::nullopt} + }, + // Valid cache-control headers + { + "max-age=3600, min-fresh=10, no-transform, only-if-cached, no-store", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {false, true, true, true, std::chrono::seconds(3600), std::chrono::seconds(10), absl::nullopt} + }, + { + "min-fresh=100, max-stale, no-cache", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {true, false, false, false, absl::nullopt, std::chrono::seconds(100), SystemTime::duration::max()} + }, + { + "max-age=10, max-stale=50", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {false, false, false, false, std::chrono::seconds(10), absl::nullopt, std::chrono::seconds(50)} + }, + // Quoted arguments are interpreted correctly + { + "max-age=\"3600\", min-fresh=\"10\", no-transform, only-if-cached, no-store", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {false, true, true, true, std::chrono::seconds(3600), std::chrono::seconds(10), absl::nullopt} + }, + { + "max-age=\"10\", max-stale=\"50\", only-if-cached", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {false, false, false, true, std::chrono::seconds(10), absl::nullopt, std::chrono::seconds(50)} + }, + // Unknown directives are ignored + { + "max-age=10, max-stale=50, unknown-directive", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {false, false, false, false, std::chrono::seconds(10), absl::nullopt, std::chrono::seconds(50)} + }, + { + "max-age=10, max-stale=50, unknown-directive-with-arg=arg1", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {false, false, false, false, std::chrono::seconds(10), absl::nullopt, std::chrono::seconds(50)} + }, + { + "max-age=10, max-stale=50, unknown-directive-with-quoted-arg=\"arg1\"", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {false, false, false, false, std::chrono::seconds(10), absl::nullopt, std::chrono::seconds(50)} + }, + { + "max-age=10, max-stale=50, unknown-directive, unknown-directive-with-quoted-arg=\"arg1\"", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {false, false, false, false, std::chrono::seconds(10), absl::nullopt, std::chrono::seconds(50)} + }, + // Invalid durations are ignored + { + "max-age=five, min-fresh=30, no-store", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {false, true, false, false, absl::nullopt, std::chrono::seconds(30), absl::nullopt} + }, + { + "max-age=five, min-fresh=30s, max-stale=-2", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {false, false, false, false, absl::nullopt, absl::nullopt, absl::nullopt} + }, + { + "max-age=\"", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {false, false, false, false, absl::nullopt, absl::nullopt, absl::nullopt} + }, + // Invalid parts of the header are ignored + { + "no-cache, ,,,fjfwioen3298, max-age=20, min-fresh=30=40", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {true, false, false, false, std::chrono::seconds(20), absl::nullopt, absl::nullopt} + }, + // If a directive argument contains a comma by mistake + // the part before the comma will be interpreted as the argument + // and the part after it will be ignored + { + "no-cache, max-age=10,0, no-store", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {true, true, false, false, std::chrono::seconds(10), absl::nullopt, absl::nullopt} + }, + ); + // clang-format on + } +}; + +class ResponseCacheControlTest : public testing::TestWithParam { +public: + static const std::vector& getTestCases() { + // clang-format off + CONSTRUCT_ON_FIRST_USE(std::vector, + // Empty header + { + "", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {false, false, false, false, false, absl::nullopt} + }, + // Valid cache-control headers + { + "s-maxage=1000, max-age=2000, proxy-revalidate, no-store", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {false, true, false, true, false, std::chrono::seconds(1000)} + }, + { + "max-age=500, must-revalidate, no-cache, no-transform", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {true, false, true, true, false, std::chrono::seconds(500)} + }, + { + "s-maxage=10, private=content-length, no-cache=content-encoding", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {true, true, false, false, false, std::chrono::seconds(10)} + }, + { + "private", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {false, true, false, false, false, absl::nullopt} + }, + { + "public, max-age=0", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {false, false, false, false, true, std::chrono::seconds(0)} + }, + // Quoted arguments are interpreted correctly + { + "s-maxage=\"20\", max-age=\"10\", public", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {false, false, false, false, true, std::chrono::seconds(20)} + }, + { + "max-age=\"50\", private", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {false, true, false, false, false, std::chrono::seconds(50)} + }, + { + "s-maxage=\"0\"", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {false, false, false, false, false, std::chrono::seconds(0)} + }, + // Unknown directives are ignored + { + "private, no-cache, max-age=30, unknown-directive", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {true, true, false, false, false, std::chrono::seconds(30)} + }, + { + "private, no-cache, max-age=30, unknown-directive-with-arg=arg", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {true, true, false, false, false, std::chrono::seconds(30)} + }, + { + "private, no-cache, max-age=30, unknown-directive-with-quoted-arg=\"arg\"", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {true, true, false, false, false, std::chrono::seconds(30)} + }, + { + "private, no-cache, max-age=30, unknown-directive, unknown-directive-with-quoted-arg=\"arg\"", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {true, true, false, false, false, std::chrono::seconds(30)} + }, + // Invalid durations are ignored + { + "max-age=five", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {false, false, false, false, false, absl::nullopt} + }, + { + "max-age=10s, private", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {false, true, false, false, false, absl::nullopt} + }, + { + "s-maxage=\"50s\", max-age=\"zero\", no-cache", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {true, false, false, false, false, absl::nullopt} + }, + { + "s-maxage=five, max-age=10, no-transform", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {false, false, true, false, false, std::chrono::seconds(10)} + }, + { + "max-age=\"", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {false, false, false, false, false, absl::nullopt} + }, + // Invalid parts of the header are ignored + { + "no-cache, ,,,fjfwioen3298, max-age=20", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {true, false, false, false, false, std::chrono::seconds(20)} + }, + // If a directive argument contains a comma by mistake + // the part before the comma will be interpreted as the argument + // and the part after it will be ignored + { + "no-cache, max-age=10,0, no-store", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {true, true, false, false, false, std::chrono::seconds(10)} + }, + ); + // clang-format on + } +}; + +// TODO(#9872): More tests for httpTime. +class HttpTimeTest : public testing::TestWithParam { +public: + static const std::vector& getOkTestCases() { + // clang-format off + CONSTRUCT_ON_FIRST_USE(std::vector, + "Sun, 06 Nov 1994 08:49:37 GMT", // IMF-fixdate. + "Sunday, 06-Nov-94 08:49:37 GMT", // obsolete RFC 850 format. + "Sun Nov 6 08:49:37 1994" // ANSI C's asctime() format. + ); + // clang-format on + } +}; + +INSTANTIATE_TEST_SUITE_P(RequestCacheControlTest, RequestCacheControlTest, + testing::ValuesIn(RequestCacheControlTest::getTestCases())); + +TEST_P(RequestCacheControlTest, RequestCacheControlTest) { + const absl::string_view cache_control_header = GetParam().cache_control_header; + const RequestCacheControl expected_request_cache_control = GetParam().request_cache_control; + EXPECT_EQ(expected_request_cache_control, RequestCacheControl(cache_control_header)); +} + +INSTANTIATE_TEST_SUITE_P(ResponseCacheControlTest, ResponseCacheControlTest, + testing::ValuesIn(ResponseCacheControlTest::getTestCases())); + +TEST_P(ResponseCacheControlTest, ResponseCacheControlTest) { + const absl::string_view cache_control_header = GetParam().cache_control_header; + const ResponseCacheControl expected_response_cache_control = GetParam().response_cache_control; + EXPECT_EQ(expected_response_cache_control, ResponseCacheControl(cache_control_header)); +} + +INSTANTIATE_TEST_SUITE_P(Ok, HttpTimeTest, testing::ValuesIn(HttpTimeTest::getOkTestCases())); + +TEST_P(HttpTimeTest, Ok) { + const Http::TestResponseHeaderMapImpl response_headers{{"date", GetParam()}}; + // Manually confirmed that 784111777 is 11/6/94, 8:46:37. + EXPECT_EQ(784111777, + SystemTime::clock::to_time_t(CacheHeadersUtils::httpTime(response_headers.Date()))); +} + +TEST(HttpTime, Null) { EXPECT_EQ(CacheHeadersUtils::httpTime(nullptr), SystemTime()); } + +void testReadAndRemoveLeadingDigits(absl::string_view input, int64_t expected, + absl::string_view remaining) { + absl::string_view test_input(input); + auto output = CacheHeadersUtils::readAndRemoveLeadingDigits(test_input); + if (output) { + EXPECT_EQ(output, static_cast(expected)) << "input=" << input; + EXPECT_EQ(test_input, remaining) << "input=" << input; + } else { + EXPECT_LT(expected, 0) << "input=" << input; + EXPECT_EQ(test_input, remaining) << "input=" << input; + } +} + +TEST(ReadAndRemoveLeadingDigits, ComprehensiveTest) { + testReadAndRemoveLeadingDigits("123", 123, ""); + testReadAndRemoveLeadingDigits("a123", -1, "a123"); + testReadAndRemoveLeadingDigits("9_", 9, "_"); + testReadAndRemoveLeadingDigits("11111111111xyz", 11111111111ll, "xyz"); + + // Overflow case + testReadAndRemoveLeadingDigits("1111111111111111111111111111111xyz", -1, + "1111111111111111111111111111111xyz"); + + // 2^64 + testReadAndRemoveLeadingDigits("18446744073709551616xyz", -1, "18446744073709551616xyz"); + // 2^64-1 + testReadAndRemoveLeadingDigits("18446744073709551615xyz", 18446744073709551615ull, "xyz"); + // (2^64-1)*10+9 + testReadAndRemoveLeadingDigits("184467440737095516159yz", -1, "184467440737095516159yz"); +} + +} // namespace +} // namespace Cache +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/cache/cacheability_utils_test.cc b/test/extensions/filters/http/cache/cacheability_utils_test.cc new file mode 100644 index 0000000000000..f4647e8bfc3f1 --- /dev/null +++ b/test/extensions/filters/http/cache/cacheability_utils_test.cc @@ -0,0 +1,136 @@ +#include "envoy/http/header_map.h" + +#include "extensions/filters/http/cache/cacheability_utils.h" + +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Cache { +namespace { + +class IsCacheableRequestTest : public testing::TestWithParam { +protected: + const Http::TestRequestHeaderMapImpl cacheable_request_headers_ = {{":path", "/"}, + {":method", "GET"}, + {"x-forwarded-proto", "http"}, + {":authority", "test.com"}}; +}; + +class IsCacheableResponseTest : public testing::Test { +protected: + std::string cache_control_ = "max-age=3600"; + const Http::TestResponseHeaderMapImpl cacheable_response_headers_ = { + {":status", "200"}, + {"date", "Sun, 06 Nov 1994 08:49:37 GMT"}, + {"cache-control", cache_control_}}; +}; + +TEST_F(IsCacheableRequestTest, CacheableRequest) { + EXPECT_TRUE(CacheabilityUtils::isCacheableRequest(cacheable_request_headers_)); +} + +TEST_F(IsCacheableRequestTest, PathHeader) { + Http::TestRequestHeaderMapImpl request_headers = cacheable_request_headers_; + EXPECT_TRUE(CacheabilityUtils::isCacheableRequest(request_headers)); + request_headers.removePath(); + EXPECT_FALSE(CacheabilityUtils::isCacheableRequest(request_headers)); +} + +TEST_F(IsCacheableRequestTest, HostHeader) { + Http::TestRequestHeaderMapImpl request_headers = cacheable_request_headers_; + EXPECT_TRUE(CacheabilityUtils::isCacheableRequest(request_headers)); + request_headers.removeHost(); + EXPECT_FALSE(CacheabilityUtils::isCacheableRequest(request_headers)); +} + +TEST_F(IsCacheableRequestTest, MethodHeader) { + const Http::HeaderValues& header_values = Http::Headers::get(); + Http::TestRequestHeaderMapImpl request_headers = cacheable_request_headers_; + EXPECT_TRUE(CacheabilityUtils::isCacheableRequest(request_headers)); + request_headers.setMethod(header_values.MethodValues.Post); + EXPECT_FALSE(CacheabilityUtils::isCacheableRequest(request_headers)); + request_headers.setMethod(header_values.MethodValues.Put); + EXPECT_FALSE(CacheabilityUtils::isCacheableRequest(request_headers)); + request_headers.removeMethod(); + EXPECT_FALSE(CacheabilityUtils::isCacheableRequest(request_headers)); +} + +TEST_F(IsCacheableRequestTest, ForwardedProtoHeader) { + Http::TestRequestHeaderMapImpl request_headers = cacheable_request_headers_; + EXPECT_TRUE(CacheabilityUtils::isCacheableRequest(request_headers)); + request_headers.setForwardedProto("ftp"); + EXPECT_FALSE(CacheabilityUtils::isCacheableRequest(request_headers)); + request_headers.removeForwardedProto(); + EXPECT_FALSE(CacheabilityUtils::isCacheableRequest(request_headers)); +} + +TEST_F(IsCacheableRequestTest, AuthorizationHeader) { + Http::TestRequestHeaderMapImpl request_headers = cacheable_request_headers_; + EXPECT_TRUE(CacheabilityUtils::isCacheableRequest(request_headers)); + request_headers.setCopy(Http::CustomHeaders::get().Authorization, + "basic YWxhZGRpbjpvcGVuc2VzYW1l"); + EXPECT_FALSE(CacheabilityUtils::isCacheableRequest(request_headers)); +} + +INSTANTIATE_TEST_SUITE_P(ConditionalHeaders, IsCacheableRequestTest, + testing::Values("if-match", "if-none-match", "if-modified-since", + "if-unmodified-since", "if-range")); + +TEST_P(IsCacheableRequestTest, ConditionalHeaders) { + Http::TestRequestHeaderMapImpl request_headers = cacheable_request_headers_; + EXPECT_TRUE(CacheabilityUtils::isCacheableRequest(request_headers)); + request_headers.setCopy(Http::LowerCaseString{GetParam()}, "test-value"); + EXPECT_FALSE(CacheabilityUtils::isCacheableRequest(request_headers)); +} + +TEST_F(IsCacheableResponseTest, CacheableResponse) { + EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(cacheable_response_headers_)); +} + +TEST_F(IsCacheableResponseTest, UncacheableStatusCode) { + Http::TestResponseHeaderMapImpl response_headers = cacheable_response_headers_; + EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers)); + response_headers.setStatus("700"); + EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers)); + response_headers.removeStatus(); + EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers)); +} + +TEST_F(IsCacheableResponseTest, ValidationData) { + Http::TestResponseHeaderMapImpl response_headers = cacheable_response_headers_; + EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers)); + response_headers.setCopy(Http::CustomHeaders::get().CacheControl, "s-maxage=1000"); + EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers)); + response_headers.setCopy(Http::CustomHeaders::get().CacheControl, "public, no-transform"); + EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers)); + response_headers.remove(Http::CustomHeaders::get().CacheControl); + EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers)); + response_headers.setCopy(Http::Headers::get().Expires, "Sun, 06 Nov 1994 09:49:37 GMT"); + EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers)); +} + +TEST_F(IsCacheableResponseTest, ResponseNoStore) { + Http::TestResponseHeaderMapImpl response_headers = cacheable_response_headers_; + EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers)); + std::string cache_control_no_store = absl::StrCat(cache_control_, ", no-store"); + response_headers.setCopy(Http::CustomHeaders::get().CacheControl, cache_control_no_store); + EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers)); +} + +TEST_F(IsCacheableResponseTest, ResponsePrivate) { + Http::TestResponseHeaderMapImpl response_headers = cacheable_response_headers_; + EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers)); + std::string cache_control_private = absl::StrCat(cache_control_, ", private"); + response_headers.setCopy(Http::CustomHeaders::get().CacheControl, cache_control_private); + EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers)); +} + +} // namespace +} // namespace Cache +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/cache/config_test.cc b/test/extensions/filters/http/cache/config_test.cc index c314897c33a4e..2b05de0072561 100644 --- a/test/extensions/filters/http/cache/config_test.cc +++ b/test/extensions/filters/http/cache/config_test.cc @@ -3,7 +3,7 @@ #include "extensions/filters/http/cache/cache_filter.h" #include "extensions/filters/http/cache/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/http/cache/http_cache_test.cc b/test/extensions/filters/http/cache/http_cache_test.cc index 3fd9cf6992939..fac7c099f73de 100644 --- a/test/extensions/filters/http/cache/http_cache_test.cc +++ b/test/extensions/filters/http/cache/http_cache_test.cc @@ -1,3 +1,4 @@ +#include "extensions/filters/http/cache/cache_headers_utils.h" #include "extensions/filters/http/cache/http_cache.h" #include "test/mocks/http/mocks.h" @@ -105,19 +106,6 @@ TEST_F(LookupRequestTest, MakeLookupResultBody) { EXPECT_FALSE(lookup_response.has_trailers_); } -TEST_F(LookupRequestTest, MakeLookupResultNoDate) { - const LookupRequest lookup_request(request_headers_, current_time_); - const Http::TestResponseHeaderMapImpl response_headers( - {{"cache-control", "public, max-age=3600"}}); - const LookupResult lookup_response = makeLookupResult(lookup_request, response_headers); - EXPECT_EQ(CacheEntryStatus::RequiresValidation, lookup_response.cache_entry_status_); - ASSERT_TRUE(lookup_response.headers_); - EXPECT_THAT(*lookup_response.headers_, Http::IsSupersetOfHeaders(response_headers)); - EXPECT_EQ(lookup_response.content_length_, 0); - EXPECT_TRUE(lookup_response.response_ranges_.empty()); - EXPECT_FALSE(lookup_response.has_trailers_); -} - TEST_F(LookupRequestTest, PrivateResponse) { const LookupRequest lookup_request(request_headers_, current_time_); const Http::TestResponseHeaderMapImpl response_headers( @@ -170,9 +158,56 @@ TEST_F(LookupRequestTest, NotExpiredViaFallbackheader) { EXPECT_EQ(CacheEntryStatus::Ok, lookup_response.cache_entry_status_); } -TEST_F(LookupRequestTest, FullRange) { - request_headers_.addCopy("Range", "0-99"); +// If request Cache-Control header is missing, +// "Pragma:no-cache" is equivalent to "Cache-Control:no-cache". +// https://httpwg.org/specs/rfc7234.html#header.pragma +TEST_F(LookupRequestTest, PragmaNoCacheFallback) { + request_headers_.addCopy("pragma", "no-cache"); + const LookupRequest lookup_request(request_headers_, current_time_); + const Http::TestResponseHeaderMapImpl response_headers( + {{"date", formatter_.fromTime(current_time_)}, {"cache-control", "public, max-age=3600"}}); + const LookupResult lookup_response = makeLookupResult(lookup_request, response_headers); + // Response is not expired but the request requires revalidation through Pragma: no-cache. + EXPECT_EQ(CacheEntryStatus::RequiresValidation, lookup_response.cache_entry_status_); +} + +TEST_F(LookupRequestTest, PragmaNoCacheFallbackExtraDirectivesIgnored) { + request_headers_.addCopy("pragma", "no-cache, custom-directive=custom-value"); + const LookupRequest lookup_request(request_headers_, current_time_); + const Http::TestResponseHeaderMapImpl response_headers( + {{"date", formatter_.fromTime(current_time_)}, {"cache-control", "public, max-age=3600"}}); + const LookupResult lookup_response = makeLookupResult(lookup_request, response_headers); + // Response is not expired but the request requires revalidation through Pragma: no-cache. + EXPECT_EQ(CacheEntryStatus::RequiresValidation, lookup_response.cache_entry_status_); +} + +TEST_F(LookupRequestTest, PragmaFallbackOtherValuesIgnored) { + request_headers_.addCopy("pragma", "max-age=0"); + const LookupRequest lookup_request(request_headers_, current_time_ + std::chrono::seconds(5)); + const Http::TestResponseHeaderMapImpl response_headers( + {{"date", formatter_.fromTime(current_time_)}, {"cache-control", "public, max-age=3600"}}); + const LookupResult lookup_response = makeLookupResult(lookup_request, response_headers); + // Response is fresh, Pragma header with values other than "no-cache" is ignored. + EXPECT_EQ(CacheEntryStatus::Ok, lookup_response.cache_entry_status_); +} + +TEST_F(LookupRequestTest, PragmaNoFallback) { + request_headers_.addCopy("pragma", "no-cache"); + request_headers_.addCopy("cache-control", "max-age=10"); + const LookupRequest lookup_request(request_headers_, current_time_ + std::chrono::seconds(5)); + const Http::TestResponseHeaderMapImpl response_headers( + {{"date", formatter_.fromTime(current_time_)}, {"cache-control", "public, max-age=3600"}}); + const LookupResult lookup_response = makeLookupResult(lookup_request, response_headers); + // Pragma header is ignored when Cache-Control header is present. + EXPECT_EQ(CacheEntryStatus::Ok, lookup_response.cache_entry_status_); +} + +TEST_F(LookupRequestTest, SatisfiableRange) { + // add method (GET) and range to headers + request_headers_.addReference(Http::Headers::get().Method, Http::Headers::get().MethodValues.Get); + request_headers_.addReference(Http::Headers::get().Range, "bytes=1-99,3-,-2"); const LookupRequest lookup_request(request_headers_, current_time_); + const Http::TestResponseHeaderMapImpl response_headers( {{"date", formatter_.fromTime(current_time_)}, {"cache-control", "public, max-age=3600"}, @@ -180,11 +215,50 @@ TEST_F(LookupRequestTest, FullRange) { const uint64_t content_length = 4; const LookupResult lookup_response = makeLookupResult(lookup_request, response_headers, content_length); - ASSERT_EQ(CacheEntryStatus::Ok, lookup_response.cache_entry_status_); + ASSERT_EQ(CacheEntryStatus::SatisfiableRange, lookup_response.cache_entry_status_); + ASSERT_TRUE(lookup_response.headers_); EXPECT_THAT(*lookup_response.headers_, Http::IsSupersetOfHeaders(response_headers)); EXPECT_EQ(lookup_response.content_length_, 4); - EXPECT_TRUE(lookup_response.response_ranges_.empty()); + + // checks that the ranges have been adjusted to the content's length + EXPECT_EQ(lookup_response.response_ranges_.size(), 3); + + EXPECT_EQ(lookup_response.response_ranges_[0].begin(), 1); + EXPECT_EQ(lookup_response.response_ranges_[0].end(), 4); + EXPECT_EQ(lookup_response.response_ranges_[0].length(), 3); + + EXPECT_EQ(lookup_response.response_ranges_[1].begin(), 3); + EXPECT_EQ(lookup_response.response_ranges_[1].end(), 4); + EXPECT_EQ(lookup_response.response_ranges_[1].length(), 1); + + EXPECT_EQ(lookup_response.response_ranges_[2].begin(), 2); + EXPECT_EQ(lookup_response.response_ranges_[2].end(), 4); + EXPECT_EQ(lookup_response.response_ranges_[2].length(), 2); + + EXPECT_FALSE(lookup_response.has_trailers_); +} + +TEST_F(LookupRequestTest, NotSatisfiableRange) { + // add method (GET) and range headers + request_headers_.addReference(Http::Headers::get().Method, Http::Headers::get().MethodValues.Get); + request_headers_.addReference(Http::Headers::get().Range, "bytes=5-99,100-"); + + const LookupRequest lookup_request(request_headers_, current_time_); + + const Http::TestResponseHeaderMapImpl response_headers( + {{"date", formatter_.fromTime(current_time_)}, + {"cache-control", "public, max-age=3600"}, + {"content-length", "4"}}); + const uint64_t content_length = 4; + const LookupResult lookup_response = + makeLookupResult(lookup_request, response_headers, content_length); + ASSERT_EQ(CacheEntryStatus::NotSatisfiableRange, lookup_response.cache_entry_status_); + + ASSERT_TRUE(lookup_response.headers_); + EXPECT_THAT(*lookup_response.headers_, Http::IsSupersetOfHeaders(response_headers)); + EXPECT_EQ(lookup_response.content_length_, 4); + ASSERT_TRUE(lookup_response.response_ranges_.empty()); EXPECT_FALSE(lookup_response.has_trailers_); } @@ -253,6 +327,148 @@ TEST(AdjustByteRange, NoRangeRequest) { EXPECT_THAT(result, ContainerEq(std::vector{})); } +namespace { +Http::TestRequestHeaderMapImpl makeTestHeaderMap(std::string range_value) { + return Http::TestRequestHeaderMapImpl{{":method", "GET"}, {"range", range_value}}; +} +} // namespace + +TEST(ParseRangesTest, NoRangeHeader) { + Http::TestRequestHeaderMapImpl headers = Http::TestRequestHeaderMapImpl{{":method", "GET"}}; + std::vector result_vector = RangeRequests::parseRanges(headers, 5); + + ASSERT_EQ(0, result_vector.size()); +} + +TEST(ParseRangesTest, InvalidUnit) { + Http::TestRequestHeaderMapImpl headers = makeTestHeaderMap("bits=3-4"); + std::vector result_vector = RangeRequests::parseRanges(headers, 5); + + ASSERT_EQ(0, result_vector.size()); +} + +TEST(ParseRangesTest, SingleRange) { + Http::TestRequestHeaderMapImpl headers = makeTestHeaderMap("bytes=3-4"); + std::vector result_vector = RangeRequests::parseRanges(headers, 5); + + ASSERT_EQ(1, result_vector.size()); + + ASSERT_EQ(3, result_vector[0].firstBytePos()); + ASSERT_EQ(4, result_vector[0].lastBytePos()); +} + +TEST(ParseRangesTest, MissingFirstBytePos) { + Http::TestRequestHeaderMapImpl headers = makeTestHeaderMap("bytes=-5"); + std::vector result_vector = RangeRequests::parseRanges(headers, 5); + + ASSERT_EQ(1, result_vector.size()); + + ASSERT_TRUE(result_vector[0].isSuffix()); + ASSERT_EQ(5, result_vector[0].suffixLength()); +} + +TEST(ParseRangesTest, MissingLastBytePos) { + Http::TestRequestHeaderMapImpl headers = makeTestHeaderMap("bytes=6-"); + std::vector result_vector = RangeRequests::parseRanges(headers, 5); + + ASSERT_EQ(1, result_vector.size()); + + ASSERT_EQ(6, result_vector[0].firstBytePos()); + ASSERT_EQ(std::numeric_limits::max(), result_vector[0].lastBytePos()); +} + +TEST(ParseRangesTest, MultipleRanges) { + Http::TestRequestHeaderMapImpl headers = makeTestHeaderMap("bytes=345-456,-567,6789-"); + std::vector result_vector = RangeRequests::parseRanges(headers, 5); + + ASSERT_EQ(3, result_vector.size()); + + ASSERT_EQ(345, result_vector[0].firstBytePos()); + ASSERT_EQ(456, result_vector[0].lastBytePos()); + + ASSERT_TRUE(result_vector[1].isSuffix()); + ASSERT_EQ(567, result_vector[1].suffixLength()); + + ASSERT_EQ(6789, result_vector[2].firstBytePos()); + ASSERT_EQ(UINT64_MAX, result_vector[2].lastBytePos()); +} + +TEST(ParseRangesTest, LongRangeHeaderValue) { + Http::TestRequestHeaderMapImpl headers = + makeTestHeaderMap("bytes=1000-1000,1001-1001,1002-1002,1003-1003,1004-1004,1005-" + "1005,1006-1006,1007-1007,1008-1008,100-"); + std::vector result_vector = RangeRequests::parseRanges(headers, 10); + + ASSERT_EQ(10, result_vector.size()); +} + +TEST(ParseRangesTest, ZeroRangeLimit) { + Http::TestRequestHeaderMapImpl headers = makeTestHeaderMap("bytes=1000-1000"); + std::vector result_vector = RangeRequests::parseRanges(headers, 0); + + ASSERT_EQ(0, result_vector.size()); +} + +TEST(ParseRangesTest, OverRangeLimit) { + Http::TestRequestHeaderMapImpl headers = makeTestHeaderMap("bytes=1000-1000,1001-1001"); + std::vector result_vector = RangeRequests::parseRanges(headers, 1); + + ASSERT_EQ(0, result_vector.size()); +} + +class ParseInvalidRangeHeaderTest : public testing::Test, + public testing::WithParamInterface { +protected: + Http::TestRequestHeaderMapImpl range() { return makeTestHeaderMap(GetParam()); } +}; + +// clang-format off +INSTANTIATE_TEST_SUITE_P( + Default, ParseInvalidRangeHeaderTest, + testing::Values("-", + "1-2", + "12", + "a", + "a1", + "bytes=", + "bytes=-", + "bytes1-2", + "bytes=12", + "bytes=1-2-3", + "bytes=1-2-", + "bytes=1--3", + "bytes=--2", + "bytes=2--", + "bytes=-2-", + "bytes=-1-2", + "bytes=a-2", + "bytes=2-a", + "bytes=-a", + "bytes=a-", + "bytes=a1-2", + "bytes=1-a2", + "bytes=1a-2", + "bytes=1-2a", + "bytes=1-2,3-a", + "bytes=1-a,3-4", + "bytes=1-2,3a-4", + "bytes=1-2,3-4a", + "bytes=1-2,3-4-5", + "bytes=1-2,bytes=3-4", + "bytes=1-2,3-4,a", + // too many byte ranges (test sets the limit as 5) + "bytes=0-1,1-2,2-3,3-4,4-5,5-6", + // UINT64_MAX-UINT64_MAX+1 + "bytes=18446744073709551615-18446744073709551616", + // UINT64_MAX+1-UINT64_MAX+2 + "bytes=18446744073709551616-18446744073709551617")); +// clang-format on + +TEST_P(ParseInvalidRangeHeaderTest, InvalidRangeReturnsEmpty) { + std::vector result_vector = RangeRequests::parseRanges(range(), 5); + ASSERT_EQ(0, result_vector.size()); +} + } // namespace Cache } // namespace HttpFilters } // namespace Extensions diff --git a/test/extensions/filters/http/cache/http_cache_utils_test.cc b/test/extensions/filters/http/cache/http_cache_utils_test.cc deleted file mode 100644 index 09a825662277e..0000000000000 --- a/test/extensions/filters/http/cache/http_cache_utils_test.cc +++ /dev/null @@ -1,81 +0,0 @@ -#include - -#include "common/http/header_map_impl.h" - -#include "extensions/filters/http/cache/http_cache_utils.h" - -#include "test/test_common/utility.h" - -#include "gtest/gtest.h" - -namespace Envoy { -namespace Extensions { -namespace HttpFilters { -namespace Cache { -namespace { - -// TODO(#9872): Add tests for eat* functions -// TODO(#9872): More tests for httpTime, effectiveMaxAge - -class HttpTimeTest : public testing::TestWithParam {}; - -const char* const ok_times[] = { - "Sun, 06 Nov 1994 08:49:37 GMT", // IMF-fixdate - "Sunday, 06-Nov-94 08:49:37 GMT", // obsolete RFC 850 format - "Sun Nov 6 08:49:37 1994" // ANSI C's asctime() format -}; - -INSTANTIATE_TEST_SUITE_P(Ok, HttpTimeTest, testing::ValuesIn(ok_times)); - -TEST_P(HttpTimeTest, Ok) { - Http::TestResponseHeaderMapImpl response_headers{{"date", GetParam()}}; - // Manually confirmed that 784111777 is 11/6/94, 8:46:37. - EXPECT_EQ(784111777, SystemTime::clock::to_time_t(Utils::httpTime(response_headers.Date()))); -} - -TEST(HttpTime, Null) { EXPECT_EQ(Utils::httpTime(nullptr), SystemTime()); } - -struct EffectiveMaxAgeParams { - absl::string_view cache_control; - int effective_max_age_secs; -}; - -EffectiveMaxAgeParams params[] = { - {"public, max-age=3600", 3600}, - {"public, max-age=-1", 0}, - {"max-age=20", 20}, - {"max-age=86400, public", 86400}, - {"public,max-age=\"0\"", 0}, - {"public,max-age=8", 8}, - {"public,max-age=3,no-cache", 0}, - {"s-maxage=0", 0}, - {"max-age=10,s-maxage=0", 0}, - {"s-maxage=10", 10}, - {"no-cache", 0}, - {"max-age=0", 0}, - {"no-cache", 0}, - {"public", 0}, - // TODO(#9833): parse quoted forms - // {"max-age=20, s-maxage=\"25\"",25}, - // {"public,max-age=\"8\",foo=11",8}, - // {"public,max-age=\"8\",bar=\"11\"",8}, - // TODO(#9833): parse public/private - // {"private,max-age=10",0} - // {"private",0}, - // {"private,s-maxage=8",0}, -}; - -class EffectiveMaxAgeTest : public testing::TestWithParam {}; - -INSTANTIATE_TEST_SUITE_P(EffectiveMaxAgeTest, EffectiveMaxAgeTest, testing::ValuesIn(params)); - -TEST_P(EffectiveMaxAgeTest, EffectiveMaxAgeTest) { - EXPECT_EQ(Utils::effectiveMaxAge(GetParam().cache_control), - std::chrono::seconds(GetParam().effective_max_age_secs)); -} - -} // namespace -} // namespace Cache -} // namespace HttpFilters -} // namespace Extensions -} // namespace Envoy diff --git a/test/extensions/filters/http/cache/simple_http_cache/BUILD b/test/extensions/filters/http/cache/simple_http_cache/BUILD index 89198975f330e..3030d84eeae9e 100644 --- a/test/extensions/filters/http/cache/simple_http_cache/BUILD +++ b/test/extensions/filters/http/cache/simple_http_cache/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load("//bazel:envoy_build_system.bzl", "envoy_package") load( "//test/extensions:extensions_build_system.bzl", "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc b/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc index a60b1bb723af1..301009223163d 100644 --- a/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc +++ b/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc @@ -3,6 +3,7 @@ #include "common/buffer/buffer_impl.h" +#include "extensions/filters/http/cache/cache_headers_utils.h" #include "extensions/filters/http/cache/simple_http_cache/simple_http_cache.h" #include "test/test_common/simulated_time_system.h" @@ -24,7 +25,7 @@ class SimpleHttpCacheTest : public testing::Test { request_headers_.setMethod("GET"); request_headers_.setHost("example.com"); request_headers_.setForwardedProto("https"); - request_headers_.setCacheControl("max-age=3600"); + request_headers_.setCopy(Http::CustomHeaders::get().CacheControl, "max-age=3600"); } // Performs a cache lookup. @@ -160,7 +161,7 @@ TEST_F(SimpleHttpCacheTest, Stale) { } TEST_F(SimpleHttpCacheTest, RequestSmallMinFresh) { - request_headers_.setReferenceKey(Http::Headers::get().CacheControl, "min-fresh=1000"); + request_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, "min-fresh=1000"); const std::string request_path("Name"); LookupContextPtr name_lookup_context = lookup(request_path); EXPECT_EQ(CacheEntryStatus::Unusable, lookup_result_.cache_entry_status_); @@ -174,7 +175,7 @@ TEST_F(SimpleHttpCacheTest, RequestSmallMinFresh) { } TEST_F(SimpleHttpCacheTest, ResponseStaleWithRequestLargeMaxStale) { - request_headers_.setReferenceKey(Http::Headers::get().CacheControl, "max-stale=9000"); + request_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, "max-stale=9000"); const std::string request_path("Name"); LookupContextPtr name_lookup_context = lookup(request_path); diff --git a/test/extensions/filters/http/common/BUILD b/test/extensions/filters/http/common/BUILD index 99dca40599e63..9c5b60eb9789e 100644 --- a/test/extensions/filters/http/common/BUILD +++ b/test/extensions/filters/http/common/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -11,6 +9,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_library( @@ -23,7 +23,7 @@ envoy_cc_test_library( ], deps = [ "//source/extensions/filters/http/common:jwks_fetcher_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/upstream:upstream_mocks", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) @@ -46,6 +46,7 @@ envoy_extension_cc_test( "//source/extensions/filters/http/common:jwks_fetcher_lib", "//test/extensions/filters/http/common:mock_lib", "//test/mocks/http:http_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/http/common/compressor/BUILD b/test/extensions/filters/http/common/compressor/BUILD index b03a3cf39122b..a6b214dd6b503 100644 --- a/test/extensions/filters/http/common/compressor/BUILD +++ b/test/extensions/filters/http/common/compressor/BUILD @@ -1,11 +1,13 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", + "envoy_benchmark_test", + "envoy_cc_benchmark_binary", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( @@ -13,11 +15,38 @@ envoy_cc_test( srcs = ["compressor_filter_test.cc"], deps = [ "//source/common/protobuf:utility_lib", + "//source/extensions/compression/gzip/compressor:config", + "//source/extensions/filters/http/common/compressor:compressor_lib", + "//test/mocks/compression/compressor:compressor_mocks", + "//test/mocks/http:http_mocks", + "//test/mocks/protobuf:protobuf_mocks", + "//test/mocks/runtime:runtime_mocks", + "//test/test_common:utility_lib", + "@envoy_api//envoy/extensions/filters/http/compressor/v3:pkg_cc_proto", + ], +) + +envoy_cc_benchmark_binary( + name = "compressor_filter_speed_test", + srcs = ["compressor_filter_speed_test.cc"], + external_deps = [ + "benchmark", + "googletest", + ], + deps = [ + "//source/common/protobuf:utility_lib", + "//source/extensions/compression/gzip/compressor:compressor_lib", "//source/extensions/filters/http/common/compressor:compressor_lib", "//test/mocks/http:http_mocks", "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/runtime:runtime_mocks", + "//test/test_common:printers_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/http/compressor/v3:pkg_cc_proto", ], ) + +envoy_benchmark_test( + name = "compressor_filter_speed_test_benchmark_test", + benchmark_binary = "compressor_filter_speed_test", +) diff --git a/test/extensions/filters/http/common/compressor/compressor_filter_speed_test.cc b/test/extensions/filters/http/common/compressor/compressor_filter_speed_test.cc new file mode 100644 index 0000000000000..9056ddc0ac3d7 --- /dev/null +++ b/test/extensions/filters/http/common/compressor/compressor_filter_speed_test.cc @@ -0,0 +1,293 @@ +#include "envoy/extensions/filters/http/compressor/v3/compressor.pb.h" + +#include "extensions/compression/gzip/compressor/zlib_compressor_impl.h" +#include "extensions/filters/http/common/compressor/compressor.h" + +#include "test/mocks/http/mocks.h" +#include "test/mocks/runtime/mocks.h" +#include "test/mocks/stats/mocks.h" + +#include "benchmark/benchmark.h" +#include "gmock/gmock.h" + +using testing::Return; + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Common { +namespace Compressors { + +class MockCompressorFilterConfig : public CompressorFilterConfig { +public: + MockCompressorFilterConfig( + const envoy::extensions::filters::http::compressor::v3::Compressor& compressor, + const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime, + const std::string& compressor_name, + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel level, + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy strategy, + int64_t window_bits, uint64_t memory_level) + : CompressorFilterConfig(compressor, stats_prefix + compressor_name + ".", scope, runtime, + compressor_name), + level_(level), strategy_(strategy), window_bits_(window_bits), memory_level_(memory_level) { + } + + Envoy::Compression::Compressor::CompressorPtr makeCompressor() override { + auto compressor = std::make_unique(); + compressor->init(level_, strategy_, window_bits_, memory_level_); + return compressor; + } + + const Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel level_; + const Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy strategy_; + const int64_t window_bits_; + const uint64_t memory_level_; +}; + +using CompressionParams = + std::tuple; + +static constexpr uint64_t TestDataSize = 122880; + +Buffer::OwnedImpl generateTestData() { + Buffer::OwnedImpl data; + TestUtility::feedBufferWithRandomCharacters(data, TestDataSize); + return data; +} + +const Buffer::OwnedImpl& testData() { + CONSTRUCT_ON_FIRST_USE(Buffer::OwnedImpl, generateTestData()); +} + +static std::vector generateChunks(const uint64_t chunk_count, + const uint64_t chunk_size) { + std::vector vec; + vec.reserve(chunk_count); + + const auto& test_data = testData(); + uint64_t added = 0; + + for (uint64_t i = 0; i < chunk_count; ++i) { + Buffer::OwnedImpl chunk; + std::unique_ptr data(new char[chunk_size]); + + test_data.copyOut(added, chunk_size, data.get()); + chunk.add(absl::string_view(data.get(), chunk_size)); + vec.push_back(std::move(chunk)); + + added += chunk_size; + } + + return vec; +} + +struct Result { + uint64_t total_uncompressed_bytes = 0; + uint64_t total_compressed_bytes = 0; +}; + +static Result compressWith(std::vector&& chunks, CompressionParams params, + NiceMock& decoder_callbacks, + benchmark::State& state) { + auto start = std::chrono::high_resolution_clock::now(); + Stats::IsolatedStoreImpl stats; + testing::NiceMock runtime; + envoy::extensions::filters::http::compressor::v3::Compressor compressor; + + const auto level = std::get<0>(params); + const auto strategy = std::get<1>(params); + const auto window_bits = std::get<2>(params); + const auto memory_level = std::get<3>(params); + CompressorFilterConfigSharedPtr config = std::make_shared( + compressor, "test.", stats, runtime, "gzip", level, strategy, window_bits, memory_level); + + ON_CALL(runtime.snapshot_, featureEnabled("test.filter_enabled", 100)) + .WillByDefault(Return(true)); + + auto filter = std::make_unique(config); + filter->setDecoderFilterCallbacks(decoder_callbacks); + + Http::TestRequestHeaderMapImpl headers = {{":method", "get"}, {"accept-encoding", "gzip"}}; + filter->decodeHeaders(headers, false); + + Http::TestResponseHeaderMapImpl response_headers = { + {":method", "get"}, + {"content-length", "122880"}, + {"content-type", "application/json;charset=utf-8"}}; + filter->encodeHeaders(response_headers, false); + + uint64_t idx = 0; + Result res; + for (auto& data : chunks) { + res.total_uncompressed_bytes += data.length(); + + if (idx == (chunks.size() - 1)) { + filter->encodeData(data, true); + } else { + filter->encodeData(data, false); + } + + res.total_compressed_bytes += data.length(); + ++idx; + } + + EXPECT_EQ(res.total_uncompressed_bytes, + stats.counterFromString("test.gzip.total_uncompressed_bytes").value()); + EXPECT_EQ(res.total_compressed_bytes, + stats.counterFromString("test.gzip.total_compressed_bytes").value()); + + EXPECT_EQ(1U, stats.counterFromString("test.gzip.compressed").value()); + auto end = std::chrono::high_resolution_clock::now(); + const auto elapsed = std::chrono::duration_cast>(end - start); + state.SetIterationTime(elapsed.count()); + + return res; +} + +// SPELLCHECKER(off) +/* +Running ./bazel-bin/test/extensions/filters/http/common/compressor/compressor_filter_speed_test +Run on (8 X 2300 MHz CPU s) +CPU Caches: +L1 Data 32K (x4) +L1 Instruction 32K (x4) +L2 Unified 262K (x4) +L3 Unified 6291K (x1) +Load Average: 1.82, 1.72, 1.74 +***WARNING*** Library was built as DEBUG. Timings may be affected. +------------------------------------------------------------ +Benchmark Time CPU Iterations +------------------------------------------------------------ +.... +compressFull/0/manual_time 14.1 ms 14.3 ms 48 +compressFull/1/manual_time 7.06 ms 7.22 ms 104 +compressFull/2/manual_time 5.17 ms 5.33 ms 123 +compressFull/3/manual_time 15.4 ms 15.5 ms 45 +compressFull/4/manual_time 10.1 ms 10.3 ms 69 +compressFull/5/manual_time 15.8 ms 16.0 ms 40 +compressFull/6/manual_time 15.3 ms 15.5 ms 42 +compressFull/7/manual_time 9.91 ms 10.1 ms 71 +compressFull/8/manual_time 15.8 ms 16.0 ms 45 +compressChunks16384/0/manual_time 13.4 ms 13.5 ms 52 +compressChunks16384/1/manual_time 6.33 ms 6.48 ms 111 +compressChunks16384/2/manual_time 5.09 ms 5.27 ms 147 +compressChunks16384/3/manual_time 15.1 ms 15.3 ms 46 +compressChunks16384/4/manual_time 9.61 ms 9.78 ms 71 +compressChunks16384/5/manual_time 14.5 ms 14.6 ms 47 +compressChunks16384/6/manual_time 14.0 ms 14.1 ms 48 +compressChunks16384/7/manual_time 9.20 ms 9.36 ms 76 +compressChunks16384/8/manual_time 14.5 ms 14.6 ms 48 +compressChunks8192/0/manual_time 14.3 ms 14.5 ms 50 +compressChunks8192/1/manual_time 6.80 ms 6.96 ms 100 +compressChunks8192/2/manual_time 5.21 ms 5.36 ms 135 +compressChunks8192/3/manual_time 14.9 ms 15.0 ms 47 +compressChunks8192/4/manual_time 9.71 ms 9.87 ms 68 +compressChunks8192/5/manual_time 15.9 ms 16.1 ms 45 +.... +*/ +// SPELLCHECKER(on) + +static std::vector compression_params = { + // Speed + Standard + Small Window + Low mem level + {Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Speed, + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 9, 1}, + + // Speed + Standard + Med window + Med mem level + {Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Speed, + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 12, 5}, + + // Speed + Standard + Big window + High mem level + {Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Speed, + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 15, 9}, + + // Standard + Standard + Small window + Low mem level + {Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 9, 1}, + + // Standard + Standard + Med window + Med mem level + {Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 12, 5}, + + // Standard + Standard + High window + High mem level + {Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 15, 9}, + + // Best + Standard + Small window + Low mem level + {Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Best, + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 9, 1}, + + // Best + Standard + Med window + Med mem level + {Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Best, + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 12, 5}, + + // Best + Standard + High window + High mem level + {Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Best, + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 15, 9}}; + +static void compressFull(benchmark::State& state) { + NiceMock decoder_callbacks; + const auto idx = state.range(0); + const auto& params = compression_params[idx]; + + for (auto _ : state) { + std::vector chunks = generateChunks(1, 122880); + compressWith(std::move(chunks), params, decoder_callbacks, state); + } +} +BENCHMARK(compressFull)->DenseRange(0, 8, 1)->UseManualTime()->Unit(benchmark::kMillisecond); + +static void compressChunks16384(benchmark::State& state) { + NiceMock decoder_callbacks; + const auto idx = state.range(0); + const auto& params = compression_params[idx]; + + for (auto _ : state) { + std::vector chunks = generateChunks(7, 16384); + compressWith(std::move(chunks), params, decoder_callbacks, state); + } +} +BENCHMARK(compressChunks16384)->DenseRange(0, 8, 1)->UseManualTime()->Unit(benchmark::kMillisecond); + +static void compressChunks8192(benchmark::State& state) { + NiceMock decoder_callbacks; + const auto idx = state.range(0); + const auto& params = compression_params[idx]; + + for (auto _ : state) { + std::vector chunks = generateChunks(15, 8192); + compressWith(std::move(chunks), params, decoder_callbacks, state); + } +} +BENCHMARK(compressChunks8192)->DenseRange(0, 8, 1)->UseManualTime()->Unit(benchmark::kMillisecond); + +static void compressChunks4096(benchmark::State& state) { + NiceMock decoder_callbacks; + const auto idx = state.range(0); + const auto& params = compression_params[idx]; + + for (auto _ : state) { + std::vector chunks = generateChunks(30, 4096); + compressWith(std::move(chunks), params, decoder_callbacks, state); + } +} +BENCHMARK(compressChunks4096)->DenseRange(0, 8, 1)->UseManualTime()->Unit(benchmark::kMillisecond); + +static void compressChunks1024(benchmark::State& state) { + NiceMock decoder_callbacks; + const auto idx = state.range(0); + const auto& params = compression_params[idx]; + + for (auto _ : state) { + std::vector chunks = generateChunks(120, 1024); + compressWith(std::move(chunks), params, decoder_callbacks, state); + } +} +BENCHMARK(compressChunks1024)->DenseRange(0, 8, 1)->UseManualTime()->Unit(benchmark::kMillisecond); + +} // namespace Compressors +} // namespace Common +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/common/compressor/compressor_filter_test.cc b/test/extensions/filters/http/common/compressor/compressor_filter_test.cc index a82c86444d0a2..92c8d45190aba 100644 --- a/test/extensions/filters/http/common/compressor/compressor_filter_test.cc +++ b/test/extensions/filters/http/common/compressor/compressor_filter_test.cc @@ -6,6 +6,7 @@ #include "extensions/filters/http/common/compressor/compressor.h" +#include "test/mocks/compression/compressor/mocks.h" #include "test/mocks/http/mocks.h" #include "test/mocks/protobuf/mocks.h" #include "test/mocks/runtime/mocks.h" @@ -14,30 +15,34 @@ #include "gtest/gtest.h" -using testing::Return; - namespace Envoy { namespace Extensions { namespace HttpFilters { namespace Common { namespace Compressors { -class MockCompressor : public Compressor::Compressor { - void compress(Buffer::Instance&, ::Envoy::Compressor::State) override {} -}; +using testing::_; +using testing::Return; -class MockCompressorFilterConfig : public CompressorFilterConfig { +class TestCompressorFilterConfig : public CompressorFilterConfig { public: - MockCompressorFilterConfig( + TestCompressorFilterConfig( const envoy::extensions::filters::http::compressor::v3::Compressor& compressor, const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime, const std::string& compressor_name) : CompressorFilterConfig(compressor, stats_prefix + compressor_name + ".", scope, runtime, compressor_name) {} - std::unique_ptr makeCompressor() override { - return std::make_unique(); + Envoy::Compression::Compressor::CompressorPtr makeCompressor() override { + auto compressor = std::make_unique(); + EXPECT_CALL(*compressor, compress(_, _)).Times(expected_compress_calls_); + return compressor; } + + void setExpectedCompressCalls(uint32_t calls) { expected_compress_calls_ = calls; } + +private: + uint32_t expected_compress_calls_{1}; }; class CompressorFilterTest : public testing::Test { @@ -47,7 +52,17 @@ class CompressorFilterTest : public testing::Test { .WillByDefault(Return(true)); } - void SetUp() override { setUpFilter("{}"); } + void SetUp() override { + setUpFilter(R"EOF( +{ + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } +} +)EOF"); + } // CompressorFilter private member functions void sanitizeEtagHeader(Http::ResponseHeaderMap& headers) { @@ -93,7 +108,7 @@ class CompressorFilterTest : public testing::Test { envoy::extensions::filters::http::compressor::v3::Compressor compressor; TestUtility::loadFromJson(json, compressor); config_ = - std::make_shared(compressor, "test.", stats_, runtime_, "test"); + std::make_shared(compressor, "test.", stats_, runtime_, "test"); filter_ = std::make_unique(config_); filter_->setEncoderFilterCallbacks(encoder_callbacks_); } @@ -117,7 +132,7 @@ class CompressorFilterTest : public testing::Test { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, end_stream)); } - void doResponseCompression(Http::TestResponseHeaderMapImpl&& headers, bool with_trailers) { + void doResponseCompression(Http::TestResponseHeaderMapImpl& headers, bool with_trailers) { NiceMock decoder_callbacks; filter_->setDecoderFilterCallbacks(decoder_callbacks); uint64_t content_length; @@ -139,7 +154,7 @@ class CompressorFilterTest : public testing::Test { EXPECT_EQ(1U, stats_.counter("test.test.compressed").value()); } - void doResponseNoCompression(Http::TestResponseHeaderMapImpl&& headers) { + void doResponseNoCompression(Http::TestResponseHeaderMapImpl& headers) { NiceMock decoder_callbacks; filter_->setDecoderFilterCallbacks(decoder_callbacks); uint64_t content_length; @@ -158,7 +173,7 @@ class CompressorFilterTest : public testing::Test { EXPECT_EQ(1, stats_.counter("test.test.not_compressed").value()); } - CompressorFilterConfigSharedPtr config_; + std::shared_ptr config_; std::unique_ptr filter_; Buffer::OwnedImpl data_; std::string expected_str_; @@ -174,6 +189,11 @@ TEST_F(CompressorFilterTest, DecodeHeadersWithRuntimeDisabled) { "runtime_enabled": { "default_value": true, "runtime_key": "foo_key" + }, + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } } } )EOF"); @@ -181,7 +201,9 @@ TEST_F(CompressorFilterTest, DecodeHeadersWithRuntimeDisabled) { .Times(2) .WillRepeatedly(Return(false)); doRequest({{":method", "get"}, {"accept-encoding", "deflate, test"}}, false); - doResponseNoCompression({{":method", "get"}, {"content-length", "256"}}); + Http::TestResponseHeaderMapImpl headers{{":method", "get"}, {"content-length", "256"}}; + doResponseNoCompression(headers); + EXPECT_FALSE(headers.has("vary")); } // Default config values. @@ -199,7 +221,9 @@ TEST_F(CompressorFilterTest, AcceptanceTestEncoding) { EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false)); Http::TestRequestTrailerMapImpl trailers; EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers)); - doResponseCompression({{":method", "get"}, {"content-length", "256"}}, false); + + Http::TestResponseHeaderMapImpl headers{{":method", "get"}, {"content-length", "256"}}; + doResponseCompression(headers, false); } TEST_F(CompressorFilterTest, AcceptanceTestEncodingWithTrailers) { @@ -208,7 +232,9 @@ TEST_F(CompressorFilterTest, AcceptanceTestEncodingWithTrailers) { EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false)); Http::TestRequestTrailerMapImpl trailers; EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers)); - doResponseCompression({{":method", "get"}, {"content-length", "256"}}, true); + Http::TestResponseHeaderMapImpl headers{{":method", "get"}, {"content-length", "256"}}; + config_->setExpectedCompressCalls(2); + doResponseCompression(headers, true); } // Verifies hasCacheControlNoTransform function. @@ -230,22 +256,27 @@ TEST_F(CompressorFilterTest, HasCacheControlNoTransform) { // Verifies that compression is skipped when cache-control header has no-transform value. TEST_F(CompressorFilterTest, HasCacheControlNoTransformNoCompression) { doRequest({{":method", "get"}, {"accept-encoding", "test;q=1, deflate"}}, true); - doResponseNoCompression( - {{":method", "get"}, {"content-length", "256"}, {"cache-control", "no-transform"}}); + Http::TestResponseHeaderMapImpl headers{ + {":method", "get"}, {"content-length", "256"}, {"cache-control", "no-transform"}}; + doResponseNoCompression(headers); + EXPECT_FALSE(headers.has("vary")); } // Verifies that compression is NOT skipped when cache-control header does NOT have no-transform // value. TEST_F(CompressorFilterTest, HasCacheControlNoTransformCompression) { doRequest({{":method", "get"}, {"accept-encoding", "test, deflate"}}, true); - doResponseCompression( - {{":method", "get"}, {"content-length", "256"}, {"cache-control", "no-cache"}}, false); + Http::TestResponseHeaderMapImpl headers{ + {":method", "get"}, {"content-length", "256"}, {"cache-control", "no-cache"}}; + doResponseCompression(headers, false); } TEST_F(CompressorFilterTest, NoAcceptEncodingHeader) { doRequest({{":method", "get"}, {}}, true); - doResponseNoCompression({{":method", "get"}, {"content-length", "256"}}); + Http::TestResponseHeaderMapImpl headers{{":method", "get"}, {"content-length", "256"}}; + doResponseNoCompression(headers); EXPECT_EQ(1, stats_.counter("test.test.no_accept_header").value()); + EXPECT_EQ("Accept-Encoding", headers.get_("vary")); } // Verifies isAcceptEncodingAllowed function. @@ -351,10 +382,19 @@ TEST_F(CompressorFilterTest, IsAcceptEncodingAllowed) { Stats::TestUtil::TestStore stats; NiceMock runtime; envoy::extensions::filters::http::compressor::v3::Compressor compressor; - TestUtility::loadFromJson("{}", compressor); + TestUtility::loadFromJson(R"EOF( +{ + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } +} +)EOF", + compressor); CompressorFilterConfigSharedPtr config2; config2 = - std::make_shared(compressor, "test2.", stats, runtime, "test2"); + std::make_shared(compressor, "test2.", stats, runtime, "test2"); std::unique_ptr filter2 = std::make_unique(config2); NiceMock decoder_callbacks; filter2->setDecoderFilterCallbacks(decoder_callbacks); @@ -375,10 +415,19 @@ TEST_F(CompressorFilterTest, IsAcceptEncodingAllowed) { ; NiceMock runtime; envoy::extensions::filters::http::compressor::v3::Compressor compressor; - TestUtility::loadFromJson("{}", compressor); + TestUtility::loadFromJson(R"EOF( +{ + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } +} +)EOF", + compressor); CompressorFilterConfigSharedPtr config2; config2 = - std::make_shared(compressor, "test2.", stats, runtime, "gzip"); + std::make_shared(compressor, "test2.", stats, runtime, "gzip"); std::unique_ptr gzip_filter = std::make_unique(config2); NiceMock decoder_callbacks; gzip_filter->setDecoderFilterCallbacks(decoder_callbacks); @@ -395,10 +444,19 @@ TEST_F(CompressorFilterTest, IsAcceptEncodingAllowed) { ; NiceMock runtime; envoy::extensions::filters::http::compressor::v3::Compressor compressor; - TestUtility::loadFromJson("{}", compressor); + TestUtility::loadFromJson(R"EOF( +{ + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } +} +)EOF", + compressor); CompressorFilterConfigSharedPtr config2; config2 = - std::make_shared(compressor, "test2.", stats, runtime, "test"); + std::make_shared(compressor, "test2.", stats, runtime, "test"); std::unique_ptr filter2 = std::make_unique(config2); NiceMock decoder_callbacks; filter2->setDecoderFilterCallbacks(decoder_callbacks); @@ -415,10 +473,19 @@ TEST_F(CompressorFilterTest, IsAcceptEncodingAllowed) { ; NiceMock runtime; envoy::extensions::filters::http::compressor::v3::Compressor compressor; - TestUtility::loadFromJson("{}", compressor); + TestUtility::loadFromJson(R"EOF( +{ + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } +} +)EOF", + compressor); CompressorFilterConfigSharedPtr config2; config2 = - std::make_shared(compressor, "test2.", stats, runtime, "test"); + std::make_shared(compressor, "test2.", stats, runtime, "test"); std::unique_ptr filter2 = std::make_unique(config2); NiceMock decoder_callbacks; filter2->setDecoderFilterCallbacks(decoder_callbacks); @@ -435,14 +502,23 @@ TEST_F(CompressorFilterTest, IsAcceptEncodingAllowed) { ; NiceMock runtime; envoy::extensions::filters::http::compressor::v3::Compressor compressor; - TestUtility::loadFromJson("{}", compressor); + TestUtility::loadFromJson(R"EOF( +{ + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } +} +)EOF", + compressor); CompressorFilterConfigSharedPtr config1; config1 = - std::make_shared(compressor, "test1.", stats, runtime, "test1"); + std::make_shared(compressor, "test1.", stats, runtime, "test1"); std::unique_ptr filter1 = std::make_unique(config1); CompressorFilterConfigSharedPtr config2; config2 = - std::make_shared(compressor, "test2.", stats, runtime, "test2"); + std::make_shared(compressor, "test2.", stats, runtime, "test2"); std::unique_ptr filter2 = std::make_unique(config2); NiceMock decoder_callbacks; filter1->setDecoderFilterCallbacks(decoder_callbacks); @@ -465,14 +541,23 @@ TEST_F(CompressorFilterTest, IsAcceptEncodingAllowed) { ; NiceMock runtime; envoy::extensions::filters::http::compressor::v3::Compressor compressor; - TestUtility::loadFromJson("{}", compressor); + TestUtility::loadFromJson(R"EOF( +{ + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } +} +)EOF", + compressor); CompressorFilterConfigSharedPtr config1; config1 = - std::make_shared(compressor, "test1.", stats, runtime, "test1"); + std::make_shared(compressor, "test1.", stats, runtime, "test1"); std::unique_ptr filter1 = std::make_unique(config1); CompressorFilterConfigSharedPtr config2; config2 = - std::make_shared(compressor, "test2.", stats, runtime, "test2"); + std::make_shared(compressor, "test2.", stats, runtime, "test2"); std::unique_ptr filter2 = std::make_unique(config2); NiceMock decoder_callbacks; filter1->setDecoderFilterCallbacks(decoder_callbacks); @@ -489,13 +574,18 @@ TEST_F(CompressorFilterTest, IsAcceptEncodingAllowed) { // Verifies that compression is skipped when accept-encoding header is not allowed. TEST_F(CompressorFilterTest, AcceptEncodingNoCompression) { doRequest({{":method", "get"}, {"accept-encoding", "test;q=0, deflate"}}, true); - doResponseNoCompression({{":method", "get"}, {"content-length", "256"}}); + Http::TestResponseHeaderMapImpl headers{{":method", "get"}, {"content-length", "256"}}; + doResponseNoCompression(headers); + // Even if compression is disallowed by a client we must let her know the resource is + // compressible. + EXPECT_EQ("Accept-Encoding", headers.get_("vary")); } // Verifies that compression is NOT skipped when accept-encoding header is allowed. TEST_F(CompressorFilterTest, AcceptEncodingCompression) { doRequest({{":method", "get"}, {"accept-encoding", "test, deflate"}}, true); - doResponseCompression({{":method", "get"}, {"content-length", "256"}}, false); + Http::TestResponseHeaderMapImpl headers{{":method", "get"}, {"content-length", "256"}}; + doResponseCompression(headers, false); } // Verifies isMinimumContentLength function. @@ -517,7 +607,16 @@ TEST_F(CompressorFilterTest, IsMinimumContentLength) { EXPECT_TRUE(isMinimumContentLength(headers)); } - setUpFilter(R"EOF({"content_length": 500})EOF"); + setUpFilter(R"EOF( +{ + "content_length": 500, + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } +} +)EOF"); { Http::TestResponseHeaderMapImpl headers = {{"content-length", "501"}}; EXPECT_TRUE(isMinimumContentLength(headers)); @@ -535,14 +634,26 @@ TEST_F(CompressorFilterTest, IsMinimumContentLength) { // Verifies that compression is skipped when content-length header is NOT allowed. TEST_F(CompressorFilterTest, ContentLengthNoCompression) { doRequest({{":method", "get"}, {"accept-encoding", "test"}}, true); - doResponseNoCompression({{":method", "get"}, {"content-length", "10"}}); + Http::TestResponseHeaderMapImpl headers{{":method", "get"}, {"content-length", "10"}}; + doResponseNoCompression(headers); + EXPECT_FALSE(headers.has("vary")); } // Verifies that compression is NOT skipped when content-length header is allowed. TEST_F(CompressorFilterTest, ContentLengthCompression) { - setUpFilter(R"EOF({"content_length": 500})EOF"); + setUpFilter(R"EOF( +{ + "content_length": 500, + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } +} +)EOF"); doRequest({{":method", "get"}, {"accept-encoding", "test"}}, true); - doResponseCompression({{":method", "get"}, {"content-length", "1000"}}, false); + Http::TestResponseHeaderMapImpl headers{{":method", "get"}, {"content-length", "1000"}}; + doResponseCompression(headers, false); } // Verifies isContentTypeAllowed function. @@ -603,7 +714,12 @@ TEST_F(CompressorFilterTest, IsContentTypeAllowed) { "text/html", "xyz/svg+xml", "Test/INSENSITIVE" - ] + ], + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } } )EOF"); @@ -641,22 +757,30 @@ TEST_F(CompressorFilterTest, ContentTypeNoCompression) { "application/json", "font/eot", "image/svg+xml" - ] + ], + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } } )EOF"); doRequest({{":method", "get"}, {"accept-encoding", "test"}}, true); - doResponseNoCompression( - {{":method", "get"}, {"content-length", "256"}, {"content-type", "image/jpeg"}}); + Http::TestResponseHeaderMapImpl headers{ + {":method", "get"}, {"content-length", "256"}, {"content-type", "image/jpeg"}}; + doResponseNoCompression(headers); EXPECT_EQ(1, stats_.counter("test.test.header_not_valid").value()); + // Assert the resource is not compressible. + EXPECT_FALSE(headers.has("vary")); } // Verifies that compression is NOT skipped when content-encoding header is allowed. TEST_F(CompressorFilterTest, ContentTypeCompression) { doRequest({{":method", "get"}, {"accept-encoding", "test"}}, true); - doResponseCompression({{":method", "get"}, - {"content-length", "256"}, - {"content-type", "application/json;charset=utf-8"}}, - false); + Http::TestResponseHeaderMapImpl headers{{":method", "get"}, + {"content-length", "256"}, + {"content-type", "application/json;charset=utf-8"}}; + doResponseCompression(headers, false); } // Verifies sanitizeEtagHeader function. @@ -698,7 +822,16 @@ TEST_F(CompressorFilterTest, IsEtagAllowed) { EXPECT_EQ(0, stats_.counter("test.test.not_compressed_etag").value()); } - setUpFilter(R"EOF({ "disable_on_etag_header": true })EOF"); + setUpFilter(R"EOF( +{ + "disable_on_etag_header": true, + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } +} +)EOF"); { Http::TestResponseHeaderMapImpl headers = {{"etag", R"EOF(W/"686897696a7c876b7e")EOF"}}; EXPECT_FALSE(isEtagAllowed(headers)); @@ -718,22 +851,30 @@ TEST_F(CompressorFilterTest, IsEtagAllowed) { // Verifies that compression is skipped when etag header is NOT allowed. TEST_F(CompressorFilterTest, EtagNoCompression) { - setUpFilter(R"EOF({ "disable_on_etag_header": true })EOF"); + setUpFilter(R"EOF( +{ + "disable_on_etag_header": true, + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } +} +)EOF"); doRequest({{":method", "get"}, {"accept-encoding", "test"}}, true); - doResponseNoCompression( - {{":method", "get"}, {"content-length", "256"}, {"etag", R"EOF(W/"686897696a7c876b7e")EOF"}}); + Http::TestResponseHeaderMapImpl headers{ + {":method", "get"}, {"content-length", "256"}, {"etag", R"EOF(W/"686897696a7c876b7e")EOF"}}; + doResponseNoCompression(headers); EXPECT_EQ(1, stats_.counter("test.test.not_compressed_etag").value()); + EXPECT_FALSE(headers.has("vary")); } -// Verifies that compression is skipped when etag header is NOT allowed. +// Verifies that compression is not skipped when strong etag header is present. TEST_F(CompressorFilterTest, EtagCompression) { doRequest({{":method", "get"}, {"accept-encoding", "test"}}, true); Http::TestResponseHeaderMapImpl headers{ {":method", "get"}, {"content-length", "256"}, {"etag", "686897696a7c876b7e"}}; - feedBuffer(256); - NiceMock decoder_callbacks; - filter_->setDecoderFilterCallbacks(decoder_callbacks); - EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false)); + doResponseCompression(headers, false); EXPECT_FALSE(headers.has("etag")); EXPECT_EQ("test", headers.get_("content-encoding")); } @@ -777,16 +918,19 @@ TEST_F(CompressorFilterTest, IsTransferEncodingAllowed) { // Tests compression when Transfer-Encoding header exists. TEST_F(CompressorFilterTest, TransferEncodingChunked) { doRequest({{":method", "get"}, {"accept-encoding", "test"}}, true); - doResponseCompression( - {{":method", "get"}, {"content-length", "256"}, {"transfer-encoding", "chunked"}}, false); + Http::TestResponseHeaderMapImpl headers{ + {":method", "get"}, {"content-length", "256"}, {"transfer-encoding", "chunked"}}; + doResponseCompression(headers, false); } // Tests compression when Transfer-Encoding header exists. TEST_F(CompressorFilterTest, AcceptanceTransferEncoding) { doRequest({{":method", "get"}, {"accept-encoding", "test"}}, true); - doResponseNoCompression( - {{":method", "get"}, {"content-length", "256"}, {"transfer-encoding", "chunked, deflate"}}); + Http::TestResponseHeaderMapImpl headers{ + {":method", "get"}, {"content-length", "256"}, {"transfer-encoding", "chunked, deflate"}}; + doResponseNoCompression(headers); + EXPECT_EQ("Accept-Encoding", headers.get_("vary")); } // Content-Encoding: upstream response is already encoded. @@ -848,8 +992,7 @@ TEST_F(CompressorFilterTest, NoVaryHeader) { filter_->setDecoderFilterCallbacks(decoder_callbacks); doRequest({{":method", "get"}, {"accept-encoding", "test"}}, true); Http::TestResponseHeaderMapImpl headers{{":method", "get"}, {"content-length", "256"}}; - feedBuffer(256); - EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false)); + doResponseCompression(headers, false); EXPECT_TRUE(headers.has("vary")); EXPECT_EQ("Accept-Encoding", headers.get_("vary")); } @@ -861,8 +1004,7 @@ TEST_F(CompressorFilterTest, VaryOtherValues) { doRequest({{":method", "get"}, {"accept-encoding", "test"}}, true); Http::TestResponseHeaderMapImpl headers{ {":method", "get"}, {"content-length", "256"}, {"vary", "User-Agent, Cookie"}}; - feedBuffer(256); - EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false)); + doResponseCompression(headers, false); EXPECT_TRUE(headers.has("vary")); EXPECT_EQ("User-Agent, Cookie, Accept-Encoding", headers.get_("vary")); } @@ -874,8 +1016,7 @@ TEST_F(CompressorFilterTest, VaryAlreadyHasAcceptEncoding) { doRequest({{":method", "get"}, {"accept-encoding", "test"}}, true); Http::TestResponseHeaderMapImpl headers{ {":method", "get"}, {"content-length", "256"}, {"vary", "accept-encoding"}}; - feedBuffer(256); - EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false)); + doResponseCompression(headers, false); EXPECT_TRUE(headers.has("vary")); EXPECT_EQ("accept-encoding, Accept-Encoding", headers.get_("vary")); } @@ -886,13 +1027,30 @@ TEST_F(CompressorFilterTest, RemoveAcceptEncodingHeader) { filter_->setDecoderFilterCallbacks(decoder_callbacks); { Http::TestRequestHeaderMapImpl headers = {{"accept-encoding", "deflate, test, gzip, br"}}; - setUpFilter(R"EOF({"remove_accept_encoding_header": true})EOF"); + setUpFilter(R"EOF( +{ + "remove_accept_encoding_header": true, + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } +} +)EOF"); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, true)); EXPECT_FALSE(headers.has("accept-encoding")); } { Http::TestRequestHeaderMapImpl headers = {{"accept-encoding", "deflate, test, gzip, br"}}; - setUpFilter("{}"); + setUpFilter(R"EOF( +{ + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } +} +)EOF"); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, true)); EXPECT_TRUE(headers.has("accept-encoding")); EXPECT_EQ("deflate, test, gzip, br", headers.get_("accept-encoding")); diff --git a/test/extensions/filters/http/common/fuzz/BUILD b/test/extensions/filters/http/common/fuzz/BUILD index 0e99f5e76546f..cd99336234b8a 100644 --- a/test/extensions/filters/http/common/fuzz/BUILD +++ b/test/extensions/filters/http/common/fuzz/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_fuzz_test", @@ -9,9 +7,11 @@ load( ) load( "//source/extensions:all_extensions.bzl", - "envoy_all_extensions", + "envoy_all_http_filters", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_proto_library( @@ -25,15 +25,27 @@ envoy_proto_library( envoy_cc_test_library( name = "uber_filter_lib", + srcs = [ + "uber_filter.cc", + "uber_per_filter.cc", + ], hdrs = ["uber_filter.h"], deps = [ ":filter_fuzz_proto_cc_proto", "//source/common/config:utility_lib", + "//source/common/http:utility_lib", "//source/common/protobuf:utility_lib", + "//source/extensions/filters/http:well_known_names", + "//source/extensions/filters/http/common:utility_lib", "//test/fuzz:utility_lib", "//test/mocks/buffer:buffer_mocks", "//test/mocks/http:http_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/proto:bookstore_proto_cc_proto", + "@envoy_api//envoy/extensions/filters/http/grpc_json_transcoder/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/squash/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/tap/v3:pkg_cc_proto", ], ) @@ -47,8 +59,9 @@ envoy_cc_fuzz_test( ":uber_filter_lib", "//source/common/config:utility_lib", "//source/common/protobuf:utility_lib", + "//source/extensions/upstreams/http/generic:config", "//test/config:utility_lib", "@envoy_api//envoy/service/auth/v3:pkg_cc_proto", "@envoy_api//envoy/service/auth/v2alpha:pkg_cc_proto", - ] + envoy_all_extensions(), + ] + envoy_all_http_filters(), ) diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/buffer1 b/test/extensions/filters/http/common/fuzz/filter_corpus/buffer1 index a1bf00f67a61c..9b8bf63c7ea9c 100644 --- a/test/extensions/filters/http/common/fuzz/filter_corpus/buffer1 +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/buffer1 @@ -15,5 +15,7 @@ data { "a" value : "b" } } - data: "hello" + http_body { + data: "hello" + } } diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5082368313655296 b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5082368313655296 new file mode 100644 index 0000000000000..2ac06ba4abef4 --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5082368313655296 @@ -0,0 +1,7 @@ +config { + name: "envoy.filters.http.header_to_metadata" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.header_to_metadata.v3.Config" + value: "\n\033\n\002;;\032\023\022\001;\032\014stanotcci_fi \t \001\n+\n\001;\022\021\022\001;\032\014static_confi\032\023\022\001;\032\014static_confi \t\022\031\n\002m;\032\023\022\001;\032\014stanotcci_fi \t" + } +} diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5143098977157120.fuzz b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5143098977157120.fuzz new file mode 100644 index 0000000000000..d212ffdb4e19e --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5143098977157120.fuzz @@ -0,0 +1,7 @@ +config { + name: "envoy.squash" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.squash.v3.Squash" + value: "\n\002Ae\022\356\n\n\342\n\n\001\017\022\334\n2\331\n\n\305\n2\302\n\n\0022\000\n\267\n*\264\n\n\261\n\n\004o\177\177\177\022\250\n2\245\n\n\216\n2\213\n\n\0022\000\n\200\n*\375\t\n\372\t\n\001\017\022\364\t2\361\t\nE2C\n\0022\000\n9*7\n5\n\004o\177\177\177\022-2+\n\0252\023\n\0022\000\n\t*\007\n\005\n\001@\022\000\n\002*\000\n\003\032\001#\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\002*\000\n\230\t2\225\t\n\375\0102\372\010\n\357\0102\354\010\n\365\0072\362\007\n\0022\000\n\347\007*\344\007\n\341\007\n\004o\177\177\177\022\330\0072\325\007\n\276\0072\273\007\n\0022\000\n\260\007*\255\007\n\252\007\n\001\017\022\244\0072\241\007\nE2C\n\0022\000\n9*7\n5\n\004o\177\177\177\022-2+\n\0252\023\n\0022\000\n\t*\007\n\005\n\001@\022\000\n\002*\000\n\003\032\001#\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\002*\000\n\310\0062\305\006\n\255\0062\252\006\n\237\0062\234\006\n\0142\n\n\000\n\0022\000\n\002*\000\n\374\0052\371\005\n\366\0052\363\005\n\337\0052\334\005\n\0022\000\n\321\005*\316\005\n\313\005\n\004o\177\177\177\022\302\0052\277\005\n\0022\000\n\0302\026\n\0022\000\n\014*\n\n\010\n\004o\177\177\177\022\000\n\002*\000\n\003\032\001#\n\231\005*\226\005\n\223\005\n\004o\177\177\177\022\212\0052\207\005\n\360\0042\355\004\n\0022\000\n\342\004*\337\004\n\334\004\n\001\017\022\326\0042\323\004\nE2C\n\0022\000\n9*7\n5\n\004o\177\177\177\022-2+\n\0252\023\n\0022\000\n\t*\007\n\005\n\001@\022\000\n\002*\000\n\003\032\001#\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\002*\000\n\372\0032\367\003\n\337\0032\334\003\n\321\0032\316\003\n\327\0022\324\002\n\0022\000\n\311\002*\306\002\n\303\002\n\004o\177\177\177\021\272\0022\267\002\n\240\0022\235\002\n\0022\000\n\222\002*\217\002\n\214\002\n\001\017\022\206\0022\203\002\nE2C\n\0022\000\n9*7\n5\n\004o\177\177\177\022-2+\n\0252\023\n\0022\000\n\t*\007\n\005\n\001@\022\000\n\002*\000\n\003\032\001#\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\002*\000\n\252\0012\247\001\n\217\0012\214\001\n\201\0012\177\n\0142\n\n\000\n\0022\000\n\002*\000\n`2^\n\\2Z\nG2E\n\0022\000\n;*9\n7\n\004o\177\177\177\022/2-\n\0022\000\n\0302\026\n\0022\000\n\014*\n\n\010\n\004o\177\177\177\022\000\n\002*\000\n\003\032\001#\n\010*\006\n\004\n\000\022\000\n\002*\000\n\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\0022\000\n\002*\000\n\0042\002\n\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\002*\000\n\003\032\001#\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\002*\000\nc2a\n_2]\nJ2H\n\0022\000\n>*<\n:\n\004o\177\177\177\022220\n\0022\000\n!2\037\n\0022\000\n\014*\n\n\010\n\004o\177\177\177\022\000\n\013*\t\n\007\n\001\001\022\002\010\000\n\003\032\001#\n\002*\000\n\002*\000\n\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\0022\000\n\002*\000\n\0042\002\n\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\002*\000\n\003\032\001#\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\002*\000\n\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\0022\000\n\002*\000\n\0042\002\n\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\002*\000\n\003\032\001#\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\002*\000\nc2a\n_2]\nJ2H\n\0022\000\n>*<\n:\n\004o\177\177\177\022220\n\0022\000\n!2\037\n\0022\000\n\014*\n\n\010\n\004o\177\177\177\022\000\n\013*\t\n\007\n\001\001\022\002\010\000\n\003\032\001#\n\002*\000\n\002*\000\n\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\0022\000\n\002*\000\n\0042\002\n\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\002*\000\n\003\032\001#\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\002*\000\n\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\007\n\001\001\022\002\010\000*\007\010 \020\261\300\334\001" + } +} \ No newline at end of file diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5144919410999296 b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5144919410999296 new file mode 100644 index 0000000000000..4178a4b002867 --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5144919410999296 @@ -0,0 +1,7 @@ +config { + name: "envoy.ext_authz" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + value: "\020\001\032\356\001\n\317\001\n\177\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\022Gtype.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\032\003\020\200`\022\032envoy.ext_aeny.ext_aututhz" + } +} \ No newline at end of file diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5635252589690880 b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5635252589690880 new file mode 100644 index 0000000000000..6d7a709ef7ac7 --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5635252589690880 @@ -0,0 +1,7 @@ +config { + name: "envoy.filters.http.tap" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap" + value: "\nZ\022X\n\010\032\006\032\004\032\002 \001\022L\nH\"F\n)envoy.service.health.v3.HealthCheckReques\022\031\022\027\n\010BB\017\000\000\000\000\000\"\001R*\010P\000\000\000\000\000\000\000 \001" + } +} diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5713820013297664 b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5713820013297664 new file mode 100644 index 0000000000000..58adf9a302923 --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5713820013297664 @@ -0,0 +1,7 @@ +config { + name: "envoy.filters.http.adaptive_concurrency" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.adaptive_concurrency.v3.AdaptiveConcurrency" + value: "\n\016\022\005\032\003\010\200\001\032\005\n\003\010\200\001" + } +} \ No newline at end of file diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5714246842449920.fuzz b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5714246842449920.fuzz new file mode 100644 index 0000000000000..069f873d8896a --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5714246842449920.fuzz @@ -0,0 +1,65 @@ +config { + name: "envoy.router" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + value: "*\023x-envoy-max-retries" + } +} +data { + headers { + headers { + key: "x-envoy-max-retries" + value: "?" + } + headers { + key: "x-envoy-max-retries" + value: "ffffffffffffffffffffffffffffffffffffffffffvfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + headers { + key: "x-envoy-max-retries" + value: "ffffffffffffffffffffffffffffffffffffffffffvfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + headers { + key: "x-envoy-max-retries" + value: "fff\002fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbffffffffffffffffmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + headers { + key: "fff\002fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbffffffffffffffffmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + value: "fff\002ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmtmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm}mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + headers { + key: "x-envoy-max-retries" + value: "ffffffffffffffffffffffffffffffffffffffffffvfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + } + trailers { + headers { + key: "x-envoy-max-retries" + value: "?" + } + headers { + key: "x-envoy-max-retries" + value: "&&&&&&&&&&&" + } + headers { + key: "x-envoy-max-retries" + value: "x-envoy-max-retries" + } + headers { + key: "x-envoy-max-retries" + value: "fff\002fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbffffffffffffffffmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + headers { + key: "x-envoy-max-retries" + value: "fff\002ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmtmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm}mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + headers { + key: "x-envoy-max-retries" + value: "ffffffffffffffffffffffffffffffffffffffffffvfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + headers { + key: "x-envoy-max-retries" + value: "?" + } + } +} \ No newline at end of file diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5726031248621568 b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5726031248621568 new file mode 100644 index 0000000000000..a3aa016972ed0 --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5726031248621568 @@ -0,0 +1,7 @@ +config { + name: "envoy.filters.http.tap" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap" + value: "\n\002\022\000" + } +} diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5728217898680320 b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5728217898680320 new file mode 100644 index 0000000000000..b59917510f205 --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5728217898680320 @@ -0,0 +1,7 @@ +config { + name: "envoy.filters.http.tap" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap" + value: "\no\022m\nb\n`\nD\032B\n@\n\030\032\026\n\024\n\n\032\010\032\006J\004\022\002\n\000\n\006\032\004\032\002*\000\n$\n\"\n\002 \001\n\034\032\032\032\030\n\026\n\002 \001\n\020\032\016\032\014\n\n\n\002 \001\n\004\032\002B\000\n\030\n\026\n\002 \001\n\020\032\016\032\014\n\n\n\002 \001\n\004\032\002B\000\022\007\n\005\032\003\n\001(" + } +} \ No newline at end of file diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-6133921480966144 b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-6133921480966144 new file mode 100644 index 0000000000000..1d3dd81ed0ecb --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-6133921480966144 @@ -0,0 +1,11 @@ +config { + name: "envoy.filters.http.admission_control" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.admission_control.v3alpha.AdmissionControl" + value: "\022\000" + } +} +upstream_data { + trailers { + } +} diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/crash-3014465358f0947e73ac12ccb40b299d5b0646b3 b/test/extensions/filters/http/common/fuzz/filter_corpus/crash-3014465358f0947e73ac12ccb40b299d5b0646b3 index fb63866ea5a51..72bcfa0b0baed 100644 --- a/test/extensions/filters/http/common/fuzz/filter_corpus/crash-3014465358f0947e73ac12ccb40b299d5b0646b3 +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/crash-3014465358f0947e73ac12ccb40b299d5b0646b3 @@ -7,8 +7,10 @@ data { value: "\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\314\255" } } - data: "\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177" - data: "\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177" + http_body { + data: "\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177" + data: "\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177" + } trailers { headers { key: "6" diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/crash-bb74d7280823776808e881b20c0a9c87f7a2163b b/test/extensions/filters/http/common/fuzz/filter_corpus/crash-bb74d7280823776808e881b20c0a9c87f7a2163b new file mode 100644 index 0000000000000..7c2bbfbae7f7e --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/crash-bb74d7280823776808e881b20c0a9c87f7a2163b @@ -0,0 +1,18 @@ +config { + name: "envoy.grpc_json_transcoder" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.grpc_json_transcoder.v3.GrpcJsonTranscoder" + value: "\n\001%8\001" + } +} +data { + http_body { + data: "\001\000\000\t" + } + trailers { + headers { + key: "0" + value: "||||||||||||||||||||||||||||||||||||||||" + } + } +} diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_stats b/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_stats new file mode 100644 index 0000000000000..10704daac17bb --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_stats @@ -0,0 +1,47 @@ +config { + name: "envoy.filters.http.grpc_stats" + typed_config: {} +} +data { + headers { + headers { + key: ":method" + value: "POST" + } + headers { + key: ":path" + value: "/bookstore.Bookstore/CreateShelfWithPackageServiceAndMethod" + } + headers { + key: "content-type" + value: "application/grpc" + } + } +} +upstream_data { + headers { + headers { + key: ":status" + value: "200" + } + headers { + key: "content-type" + value: "application/grpc" + } + } + proto_body { + message { + [type.googleapis.com/bookstore.Book] { + id: 16 + title: "Hardy Boys" + } + } + chunk_size: 4 + } + trailers { + headers { + key: "grpc-status" + value: "0" + } + } +} \ No newline at end of file diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_decode_encode b/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_decode_encode new file mode 100644 index 0000000000000..d1a907e186fc1 --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_decode_encode @@ -0,0 +1,50 @@ +config { + name: "envoy.filters.http.grpc_json_transcoder" + typed_config: {} +} +data { + headers { + headers { + key: "content-type" + value: "application/json" + } + headers { + key: ":method" + value: "POST" + } + headers { + key: ":path" + value: "/bookstore.Bookstore/CreateShelfWithPackageServiceAndMethod" + } + } + http_body { + data: "{\"theme\": \"Children\"}" + } +} +upstream_data { + headers { + headers { + key: ":status" + value: "200" + } + headers { + key: "content-type" + value: "application/grpc" + } + } + proto_body { + message { + [type.googleapis.com/bookstore.Book] { + id: 16 + title: "Hardy Boys" + } + } + chunk_size: 100 + } + trailers { + headers { + key: "grpc-status" + value: "0" + } + } +} \ No newline at end of file diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_http_data b/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_http_data new file mode 100644 index 0000000000000..cf0e8282a0830 --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_http_data @@ -0,0 +1,24 @@ +config { + name: "envoy.filters.http.grpc_json_transcoder" + typed_config: {} +} + +data { + headers { + headers { + key: "content-type" + value: "application/json" + } + headers { + key: ":method" + value: "POST" + } + headers { + key: ":path" + value: "/bookstore.Bookstore/CreateShelfWithPackageServiceAndMethod" + } + } + http_body { + data: "{\"theme\": \"Children\"}" + } +} \ No newline at end of file diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_proto_data b/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_proto_data new file mode 100644 index 0000000000000..3adc75ba874e2 --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_proto_data @@ -0,0 +1,38 @@ +config { + name: "envoy.filters.http.grpc_json_transcoder" + typed_config: {} +} + +data { + headers { + headers { + key: ":method" + value: "POST" + } + headers { + key: ":path" + value: "/bookstore.Bookstore/CreateShelf" + } + headers { + key: "content-type" + value: "application/grpc" + } + } + proto_body { + message { + [type.googleapis.com/bookstore.CreateShelfRequest] { + shelf: { + id: 32 + theme: "Children" + } + } + } + chunk_size: 3 + } + trailers { + headers { + key: "grpc-status" + value: "0" + } + } +} \ No newline at end of file diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/jwt_connect b/test/extensions/filters/http/common/fuzz/filter_corpus/jwt_connect new file mode 100644 index 0000000000000..2b2d00ecda5b4 --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/jwt_connect @@ -0,0 +1,7 @@ +config { + name: "envoy.filters.http.jwt_authn" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.jwt_authn.v3.JwtAuthentication" + value: "\022\004\n\002b\000" + } +} \ No newline at end of file diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/metadata_not_reached b/test/extensions/filters/http/common/fuzz/filter_corpus/metadata_not_reached new file mode 100644 index 0000000000000..0e714ec32f42e --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/metadata_not_reached @@ -0,0 +1,7 @@ +config { + name: "envoy.router" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + value: "\020\001\032\200\005\n\0012\022\372\004:\367\004\022\207\004:\204\004\022\255\001:\252\001\022\004\n\002\n\000\022[2Y\n\006\n\004\n\002\010\001\nK:I\022927\n\002R\000\n12/\n):\'\022\tB\007\n\005\n\001)@\001\022\0222\020\n\006\n\004\n\002\010\002\n\002\032\000\n\002b\000\022\006\n\004\n\002\010\001\n\002\032\000\022\006\n\004\n\002\010\001\022\004\n\002\n\000\n\002\032\000\022\006\n\004\n\002\010\001\022725\n\004\n\002\n\000\n\002\032\000\n)2\'\n!:\037\022\tB\007\n\005\n\001)@\001\022\n2\010\n\002\032\000\n\002\"\000\022\006\n\004\n\002\010\001\n\002\032\000\022\004\n\002\n\000\022\004\n\002\n\000\022\263\0022\260\002\n\002R\000\n\245\0022\242\002\n\206\0022\203\002\nw2u\nj:h\022\004\n\002\n\000\022@2>\n8:6\022&2$\n\002R\000\n\0362\034\n\026:\024\022\n2\010\n\002\032\000\n\002\"\000\022\006\n\004\n\002\010\001\n\002\032\000\022\006\n\004\n\002\010\001\022\004\n\002\n\000\n\002\032\000\022\006\n\004\n\002\010\001\022\0202\016\n\004\n\002\n\000\n\002\032\000\n\002\"\000\022\004\n\002\n\000\n\007R\005\n\001\002\020\001\n.:,\022\0342\032\n\005R\003\n\001\004\n\r2\013\n\005R\003\n\001\002\n\002\032\000\n\002\032\000\022\006\n\004\n\002\010\001\022\004\n\002\n\000\nR:P\022<2:\n\006\n\004\n\002\010\001\n,:*\022\0342\032\n\005R\003\n\001\004\n\r2\013\n\005R\003\n\001\002\n\002\032\000\n\002\032\000\022\004\n\002\n\000\022\004\n\002\n\000\n\002\032\000\022\006\n\004\n\002\010\001\022\002\"\000\022\004\n\002\n\000\n\004R\002\020\001\n\027:\025\022\r2\013\n\005R\003\n\001\004\n\002\032\000\022\004\n\002\n\000\n\002\032\000\022\006\n\004\n\002\010\001\022\006\n\004\n\002\010\001\022\006\n\004\n\002\010\002\022\002\032\000\022J*H\nD\n\000\032@\022>2<\n:28\n6:4\022220\n.2,\n*2(\n&:$\022\"2 \n\000\n\034:\032\022\0302\026\n\n2\010\n\006:\004\022\0022\000\n\010:\006\022\0042\002\n\000\030\001\022\033:\031\022\002J\000\022\013\n\t\n\007\010\001\022\003\032\001/\022\006\n\004\n\002\010\002\032(\n\016\177\177\177\177\177\177\177\177\177\177\177\177\177\177\022\0262\024\n\002R\000\n\n2\010\n\002R\000\n\002\032\000\n\002\032\0000\001" + } +} \ No newline at end of file diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/not_implemented_tap b/test/extensions/filters/http/common/fuzz/filter_corpus/not_implemented_tap new file mode 100644 index 0000000000000..0b0167248818c --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/not_implemented_tap @@ -0,0 +1,7 @@ +config { + name: "envoy.filters.http.tap" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap" + value: "\ns\n\000\032o\nf\nd\nb\032`\022^\n$\022\"\n\034\022\032\n\n\032\010\032\006\n\004\n\002\032\000\n\006\022\004\n\002 \001\n\004\032\002 \001\n\002*\000\n2\n0\n.\032,\022*\n\"\022 \n\032\022\030\n\n\032\010\032\006\n\004\n\002\032\000\n\004\022\002\n\000\n\004\032\002 \001\n\002 \001\n\000\n\002 \001\n\002 \001\022\005\n\000\022s\006" + } +} \ No newline at end of file diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/oom-da39a3ee5e6b4b0d3255bfef95601890afd80709 b/test/extensions/filters/http/common/fuzz/filter_corpus/oom-da39a3ee5e6b4b0d3255bfef95601890afd80709 new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/router_buffering b/test/extensions/filters/http/common/fuzz/filter_corpus/router_buffering new file mode 100644 index 0000000000000..04bb89d6e8423 --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/router_buffering @@ -0,0 +1,74 @@ +config { + name: "envoy.router" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + value: "*\023x-envoy-max-retries" + } +} +data { + headers { + headers { + key: "x-envoy-max-retries" + value: "?" + } + headers { + key: "x-envoy-max-retries" + value: "ffffffffffffffffffffffffffffffffffffffffffvfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + headers { + key: "x-envoy-max-retries" + value: "ffffffffffffffffffffffffffffffffffffffffffvfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + headers { + key: "x-envoy-max-retries" + value: "&&&&&&&&&&&" + } + headers { + key: "fff\002fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbffffffffffffffffmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + value: "&&&&&&&&&&&" + } + headers { + key: "=" + value: "ffffffffffffffffffffffffffffffffffffffffffvfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + } + trailers { + headers { + key: "x-envoy-max!-retries" + value: "&" + } + headers { + key: "type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC" + value: "&&&&&&&&&&&" + } + headers { + key: "x-envoy-max-retries" + } + headers { + key: "x-env-max-retries" + value: "fff\002fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbffffffffffffffffmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + headers { + key: "x-envoy-max-retries" + value: "ffffffffffffffffffffffffffffffffffffffffffvfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + headers { + key: "x-envoy-max-retries" + value: "fff\002ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmtmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm}mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmfffffffffffffffffffffffffffffffffffffffffffffffffffffmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm}mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmffffffffffffffffffffffffffffffff" + } + headers { + key: "x-envoy-m_x-retries" + value: "x-envoy-max-retries" + } + headers { + key: "x-envoy-max-retries" + value: "?" + } + } + proto_body { + message { + type_url: "type.googleapis.com/google.protobuf.Empty" + } + chunk_size: 32 + } +} \ No newline at end of file diff --git a/test/extensions/filters/http/common/fuzz/filter_fuzz.proto b/test/extensions/filters/http/common/fuzz/filter_fuzz.proto index a97d9dcfd2bbb..20f036684161c 100644 --- a/test/extensions/filters/http/common/fuzz/filter_fuzz.proto +++ b/test/extensions/filters/http/common/fuzz/filter_fuzz.proto @@ -8,5 +8,9 @@ import "envoy/extensions/filters/network/http_connection_manager/v3/http_connect message FilterFuzzTestCase { envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter config = 1; + // Downstream data (named for backwards compatibility). test.fuzz.HttpData data = 2; + + // Upstream data. + test.fuzz.HttpData upstream_data = 3; } diff --git a/test/extensions/filters/http/common/fuzz/filter_fuzz_test.cc b/test/extensions/filters/http/common/fuzz/filter_fuzz_test.cc index 6b076f0da3003..7e773b4f13115 100644 --- a/test/extensions/filters/http/common/fuzz/filter_fuzz_test.cc +++ b/test/extensions/filters/http/common/fuzz/filter_fuzz_test.cc @@ -13,37 +13,44 @@ namespace Extensions { namespace HttpFilters { DEFINE_PROTO_FUZZER(const test::extensions::filters::http::FilterFuzzTestCase& input) { - static PostProcessorRegistration reg = {[](test::extensions::filters::http::FilterFuzzTestCase* - input, - unsigned int seed) { - // This ensures that the mutated configs all have valid filter names and type_urls. The list of - // names and type_urls is pulled from the NamedHttpFilterConfigFactory. All Envoy extensions are - // built with this test (see BUILD file). - // This post-processor mutation is applied only when libprotobuf-mutator calls mutate on an - // input, and *not* during fuzz target execution. Replaying a corpus through the fuzzer will not - // be affected by the post-processor mutation. - static const std::vector filter_names = Registry::FactoryRegistry< - Server::Configuration::NamedHttpFilterConfigFactory>::registeredNames(); - static const auto factories = - Registry::FactoryRegistry::factories(); - // Choose a valid filter name. - if (std::find(filter_names.begin(), filter_names.end(), input->config().name()) == - std::end(filter_names)) { - absl::string_view filter_name = filter_names[seed % filter_names.size()]; - input->mutable_config()->set_name(std::string(filter_name)); - } - // Set the corresponding type_url for Any. - auto& factory = factories.at(input->config().name()); - input->mutable_config()->mutable_typed_config()->set_type_url(absl::StrCat( - "type.googleapis.com/", factory->createEmptyConfigProto()->GetDescriptor()->full_name())); - }}; + ABSL_ATTRIBUTE_UNUSED static PostProcessorRegistration reg = { + [](test::extensions::filters::http::FilterFuzzTestCase* input, unsigned int seed) { + // This ensures that the mutated configs all have valid filter names and type_urls. The list + // of names and type_urls is pulled from the NamedHttpFilterConfigFactory. All Envoy + // extensions are built with this test (see BUILD file). This post-processor mutation is + // applied only when libprotobuf-mutator calls mutate on an input, and *not* during fuzz + // target execution. Replaying a corpus through the fuzzer will not be affected by the + // post-processor mutation. + static const std::vector filter_names = Registry::FactoryRegistry< + Server::Configuration::NamedHttpFilterConfigFactory>::registeredNames(); + static const auto factories = Registry::FactoryRegistry< + Server::Configuration::NamedHttpFilterConfigFactory>::factories(); + // Choose a valid filter name. + if (std::find(filter_names.begin(), filter_names.end(), input->config().name()) == + std::end(filter_names)) { + absl::string_view filter_name = filter_names[seed % filter_names.size()]; + input->mutable_config()->set_name(std::string(filter_name)); + } + // Set the corresponding type_url for Any. + auto& factory = factories.at(input->config().name()); + input->mutable_config()->mutable_typed_config()->set_type_url( + absl::StrCat("type.googleapis.com/", + factory->createEmptyConfigProto()->GetDescriptor()->full_name())); + + // For fuzzing proto data, guide the mutator to useful 'Any' types half + // the time. The other half the time, let the fuzzing engine choose + // any message to serialize. + if (seed % 2 == 0 && input->data().has_proto_body()) { + UberFilterFuzzer::guideAnyProtoType(input->mutable_data(), seed / 2); + } + }}; try { // Catch invalid header characters. TestUtility::validate(input); // Fuzz filter. static UberFilterFuzzer fuzzer; - fuzzer.fuzz(input.config(), input.data()); + fuzzer.fuzz(input.config(), input.data(), input.upstream_data()); } catch (const ProtoValidationException& e) { ENVOY_LOG_MISC(debug, "ProtoValidationException: {}", e.what()); } diff --git a/test/extensions/filters/http/common/fuzz/uber_filter.cc b/test/extensions/filters/http/common/fuzz/uber_filter.cc new file mode 100644 index 0000000000000..da0e353d1b204 --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/uber_filter.cc @@ -0,0 +1,241 @@ +#include "test/extensions/filters/http/common/fuzz/uber_filter.h" + +#include "common/config/utility.h" +#include "common/config/version_converter.h" +#include "common/http/message_impl.h" +#include "common/http/utility.h" +#include "common/protobuf/protobuf.h" +#include "common/protobuf/utility.h" + +#include "test/test_common/utility.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { + +UberFilterFuzzer::UberFilterFuzzer() : async_request_{&cluster_manager_.async_client_} { + // This is a decoder filter. + ON_CALL(filter_callback_, addStreamDecoderFilter(_)) + .WillByDefault(Invoke([&](Http::StreamDecoderFilterSharedPtr filter) -> void { + decoder_filter_ = filter; + decoder_filter_->setDecoderFilterCallbacks(decoder_callbacks_); + })); + // This is an encoded filter. + ON_CALL(filter_callback_, addStreamEncoderFilter(_)) + .WillByDefault(Invoke([&](Http::StreamEncoderFilterSharedPtr filter) -> void { + encoder_filter_ = filter; + encoder_filter_->setEncoderFilterCallbacks(encoder_callbacks_); + })); + // This is a decoder and encoder filter. + ON_CALL(filter_callback_, addStreamFilter(_)) + .WillByDefault(Invoke([&](Http::StreamFilterSharedPtr filter) -> void { + decoder_filter_ = filter; + decoder_filter_->setDecoderFilterCallbacks(decoder_callbacks_); + encoder_filter_ = filter; + encoder_filter_->setEncoderFilterCallbacks(encoder_callbacks_); + })); + // This filter supports access logging. + ON_CALL(filter_callback_, addAccessLogHandler(_)) + .WillByDefault( + Invoke([&](AccessLog::InstanceSharedPtr handler) -> void { access_logger_ = handler; })); + // This handles stopping execution after a direct response is sent. + ON_CALL(decoder_callbacks_, sendLocalReply(_, _, _, _, _)) + .WillByDefault( + Invoke([this](Http::Code code, absl::string_view body, + std::function modify_headers, + const absl::optional grpc_status, + absl::string_view details) { + enabled_ = false; + decoder_callbacks_.sendLocalReply_(code, body, modify_headers, grpc_status, details); + })); + // Set expectations for particular filters that may get fuzzed. + perFilterSetup(); +} + +std::vector UberFilterFuzzer::parseHttpData(const test::fuzz::HttpData& data) { + std::vector data_chunks; + + if (data.has_http_body()) { + data_chunks.reserve(data.http_body().data_size()); + for (const std::string& http_data : data.http_body().data()) { + data_chunks.push_back(http_data); + } + } else if (data.has_proto_body()) { + const std::string serialized = data.proto_body().message().value(); + data_chunks = absl::StrSplit(serialized, absl::ByLength(data.proto_body().chunk_size())); + } + + return data_chunks; +} + +template +void UberFilterFuzzer::runData(FilterType* filter, const test::fuzz::HttpData& data) { + bool end_stream = false; + enabled_ = true; + if (data.body_case() == test::fuzz::HttpData::BODY_NOT_SET && !data.has_trailers()) { + end_stream = true; + } + const auto& headersStatus = sendHeaders(filter, data, end_stream); + ENVOY_LOG_MISC(debug, "Finished with FilterHeadersStatus: {}", headersStatus); + if ((headersStatus != Http::FilterHeadersStatus::Continue && + headersStatus != Http::FilterHeadersStatus::StopIteration) || + !enabled_) { + return; + } + + const std::vector data_chunks = parseHttpData(data); + for (size_t i = 0; i < data_chunks.size(); i++) { + if (!data.has_trailers() && i == data_chunks.size() - 1) { + end_stream = true; + } + Buffer::OwnedImpl buffer(data_chunks[i]); + const auto& dataStatus = sendData(filter, buffer, end_stream); + ENVOY_LOG_MISC(debug, "Finished with FilterDataStatus: {}", dataStatus); + if (dataStatus != Http::FilterDataStatus::Continue || !enabled_) { + return; + } + } + + if (data.has_trailers() && enabled_) { + sendTrailers(filter, data); + } +} + +template <> +Http::FilterHeadersStatus UberFilterFuzzer::sendHeaders(Http::StreamDecoderFilter* filter, + const test::fuzz::HttpData& data, + bool end_stream) { + request_headers_ = Fuzz::fromHeaders(data.headers()); + if (request_headers_.Path() == nullptr) { + request_headers_.setPath("/foo"); + } + if (request_headers_.Method() == nullptr) { + request_headers_.setMethod("GET"); + } + if (request_headers_.Host() == nullptr) { + request_headers_.setHost("foo.com"); + } + + ENVOY_LOG_MISC(debug, "Decoding headers (end_stream={}):\n{} ", end_stream, request_headers_); + return filter->decodeHeaders(request_headers_, end_stream); +} + +template <> +Http::FilterHeadersStatus UberFilterFuzzer::sendHeaders(Http::StreamEncoderFilter* filter, + const test::fuzz::HttpData& data, + bool end_stream) { + response_headers_ = Fuzz::fromHeaders(data.headers()); + + // Status must be a valid unsigned long. If not set, the utility function below will throw + // an exception on the data path of some filters. This should never happen in production, so catch + // the exception and set to a default value. + try { + (void)Http::Utility::getResponseStatus(response_headers_); + } catch (const Http::CodecClientException& e) { + response_headers_.setStatus(200); + } + + ENVOY_LOG_MISC(debug, "Encoding headers (end_stream={}):\n{} ", end_stream, response_headers_); + Http::FilterHeadersStatus status = filter->encodeHeaders(response_headers_, end_stream); + if (end_stream) { + filter->encodeComplete(); + } + return status; +} + +template <> +Http::FilterDataStatus UberFilterFuzzer::sendData(Http::StreamDecoderFilter* filter, + Buffer::Instance& buffer, bool end_stream) { + ENVOY_LOG_MISC(debug, "Decoding data (end_stream={}): {} ", end_stream, buffer.toString()); + return filter->decodeData(buffer, end_stream); +} + +template <> +Http::FilterDataStatus UberFilterFuzzer::sendData(Http::StreamEncoderFilter* filter, + Buffer::Instance& buffer, bool end_stream) { + ENVOY_LOG_MISC(debug, "Encoding data (end_stream={}): {} ", end_stream, buffer.toString()); + Http::FilterDataStatus status = filter->encodeData(buffer, end_stream); + if (end_stream) { + filter->encodeComplete(); + } + return status; +} + +template <> +void UberFilterFuzzer::sendTrailers(Http::StreamDecoderFilter* filter, + const test::fuzz::HttpData& data) { + request_trailers_ = Fuzz::fromHeaders(data.trailers()); + ENVOY_LOG_MISC(debug, "Decoding trailers:\n{} ", request_trailers_); + filter->decodeTrailers(request_trailers_); +} + +template <> +void UberFilterFuzzer::sendTrailers(Http::StreamEncoderFilter* filter, + const test::fuzz::HttpData& data) { + response_trailers_ = Fuzz::fromHeaders(data.trailers()); + ENVOY_LOG_MISC(debug, "Encoding trailers:\n{} ", response_trailers_); + filter->encodeTrailers(response_trailers_); + filter->encodeComplete(); +} + +void UberFilterFuzzer::accessLog(AccessLog::Instance* access_logger, + const StreamInfo::StreamInfo& stream_info) { + ENVOY_LOG_MISC(debug, "Access logging"); + access_logger->log(&request_headers_, &response_headers_, &response_trailers_, stream_info); +} + +void UberFilterFuzzer::fuzz( + const envoy::extensions::filters::network::http_connection_manager::v3::HttpFilter& + proto_config, + const test::fuzz::HttpData& downstream_data, const test::fuzz::HttpData& upstream_data) { + try { + // Try to create the filter. Exit early if the config is invalid or violates PGV constraints. + ENVOY_LOG_MISC(info, "filter name {}", proto_config.name()); + auto& factory = Config::Utility::getAndCheckFactoryByName< + Server::Configuration::NamedHttpFilterConfigFactory>(proto_config.name()); + ProtobufTypes::MessagePtr message = Config::Utility::translateToFactoryConfig( + proto_config, factory_context_.messageValidationVisitor(), factory); + // Clean-up config with filter-specific logic before it runs through validations. + cleanFuzzedConfig(proto_config.name(), message.get()); + cb_ = factory.createFilterFactoryFromProto(*message, "stats", factory_context_); + cb_(filter_callback_); + } catch (const EnvoyException& e) { + ENVOY_LOG_MISC(debug, "Controlled exception {}", e.what()); + return; + } + + // Data path should not throw exceptions. + if (decoder_filter_ != nullptr) { + runData(decoder_filter_.get(), downstream_data); + } + if (encoder_filter_ != nullptr) { + runData(encoder_filter_.get(), upstream_data); + } + if (access_logger_ != nullptr) { + accessLog(access_logger_.get(), stream_info_); + } + + reset(); +} + +void UberFilterFuzzer::reset() { + if (decoder_filter_ != nullptr) { + decoder_filter_->onDestroy(); + } + decoder_filter_.reset(); + + if (encoder_filter_ != nullptr) { + encoder_filter_->onDestroy(); + } + encoder_filter_.reset(); + + access_logger_.reset(); + request_headers_.clear(); + response_headers_.clear(); + request_trailers_.clear(); + response_trailers_.clear(); +} + +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/common/fuzz/uber_filter.h b/test/extensions/filters/http/common/fuzz/uber_filter.h index 2df4483a4a610..07aa3322de795 100644 --- a/test/extensions/filters/http/common/fuzz/uber_filter.h +++ b/test/extensions/filters/http/common/fuzz/uber_filter.h @@ -1,11 +1,8 @@ -#include "common/config/utility.h" -#include "common/config/version_converter.h" -#include "common/protobuf/utility.h" - #include "test/fuzz/utility.h" #include "test/mocks/buffer/mocks.h" #include "test/mocks/http/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/stream_info/mocks.h" namespace Envoy { namespace Extensions { @@ -13,134 +10,77 @@ namespace HttpFilters { class UberFilterFuzzer { public: - UberFilterFuzzer() { - // Need to set for both a decoder filter and an encoder/decoder filter. - ON_CALL(filter_callback_, addStreamDecoderFilter(_)) - .WillByDefault( - Invoke([&](std::shared_ptr filter) -> void { - filter_ = filter; - filter_->setDecoderFilterCallbacks(callbacks_); - })); - ON_CALL(filter_callback_, addStreamFilter(_)) - .WillByDefault( - Invoke([&](std::shared_ptr filter) -> void { - filter_ = filter; - filter_->setDecoderFilterCallbacks(callbacks_); - })); - setExpectations(); - } - - void setExpectations() { - // Ext-authz setup - prepareExtAuthz(); - prepareCache(); - prepareTap(); - } - - void prepareExtAuthz() { - // Preparing the expectations for the ext_authz filter. - addr_ = std::make_shared("1.2.3.4", 1111); - ON_CALL(connection_, remoteAddress()).WillByDefault(testing::ReturnRef(addr_)); - ON_CALL(connection_, localAddress()).WillByDefault(testing::ReturnRef(addr_)); - ON_CALL(callbacks_, connection()).WillByDefault(testing::Return(&connection_)); - ON_CALL(callbacks_, activeSpan()) - .WillByDefault(testing::ReturnRef(Tracing::NullSpan::instance())); - callbacks_.stream_info_.protocol_ = Envoy::Http::Protocol::Http2; - } - - void prepareCache() { - // Prepare expectations for dynamic forward proxy. - ON_CALL(factory_context_.dispatcher_, createDnsResolver(_, _)) - .WillByDefault(testing::Return(resolver_)); - } - - void prepareTap() { - ON_CALL(factory_context_, admin()).WillByDefault(testing::ReturnRef(factory_context_.admin_)); - ON_CALL(factory_context_.admin_, addHandler(_, _, _, _, _)) - .WillByDefault(testing::Return(true)); - ON_CALL(factory_context_.admin_, removeHandler(_)).WillByDefault(testing::Return(true)); - } - - // This executes the decode methods to be fuzzed. - void decode(Http::StreamDecoderFilter* filter, const test::fuzz::HttpData& data) { - bool end_stream = false; - - auto headers = Fuzz::fromHeaders(data.headers()); - if (headers.Path() == nullptr) { - headers.setPath("/foo"); - } - if (headers.Method() == nullptr) { - headers.setMethod("GET"); - } - if (headers.Host() == nullptr) { - headers.setHost("foo.com"); - } - - if (data.data().empty() && !data.has_trailers()) { - end_stream = true; - } - ENVOY_LOG_MISC(debug, "Decoding headers: {} ", data.headers().DebugString()); - const auto& headersStatus = filter->decodeHeaders(headers, end_stream); - if (headersStatus != Http::FilterHeadersStatus::Continue && - headersStatus != Http::FilterHeadersStatus::StopIteration) { - return; - } - - for (int i = 0; i < data.data().size(); i++) { - if (i == data.data().size() - 1 && !data.has_trailers()) { - end_stream = true; - } - Buffer::OwnedImpl buffer(data.data().Get(i)); - ENVOY_LOG_MISC(debug, "Decoding data: {} ", buffer.toString()); - if (filter->decodeData(buffer, end_stream) != Http::FilterDataStatus::Continue) { - return; - } - } - - if (data.has_trailers()) { - ENVOY_LOG_MISC(debug, "Decoding trailers: {} ", data.trailers().DebugString()); - auto trailers = Fuzz::fromHeaders(data.trailers()); - filter->decodeTrailers(trailers); - } - } - - // This creates the filter config and runs decode. + UberFilterFuzzer(); + + // This creates the filter config and runs the fuzzed data against the filter. void fuzz(const envoy::extensions::filters::network::http_connection_manager::v3::HttpFilter& proto_config, - const test::fuzz::HttpData& data) { - try { - // Try to create the filter. Exit early if the config is invalid or violates PGV constraints. - ENVOY_LOG_MISC(info, "filter name {}", proto_config.name()); - auto& factory = Config::Utility::getAndCheckFactoryByName< - Server::Configuration::NamedHttpFilterConfigFactory>(proto_config.name()); - ProtobufTypes::MessagePtr message = Config::Utility::translateToFactoryConfig( - proto_config, factory_context_.messageValidationVisitor(), factory); - cb_ = factory.createFilterFactoryFromProto(*message, "stats", factory_context_); - cb_(filter_callback_); - } catch (const EnvoyException& e) { - ENVOY_LOG_MISC(debug, "Controlled exception {}", e.what()); - return; - } - - decode(filter_.get(), data); - reset(); - } - - void reset() { - if (filter_ != nullptr) { - filter_->onDestroy(); - } - filter_.reset(); - } + const test::fuzz::HttpData& downstream_data, const test::fuzz::HttpData& upstream_data); + + // This executes the filter decoders/encoders with the fuzzed data. + template void runData(FilterType* filter, const test::fuzz::HttpData& data); + + // This executes the access logger with the fuzzed headers/trailers. + void accessLog(AccessLog::Instance* access_logger, const StreamInfo::StreamInfo& stream_info); + + // For fuzzing proto data, guide the mutator to useful 'Any' types. + static void guideAnyProtoType(test::fuzz::HttpData* mutable_data, uint choice); + + // Resets cached data (request headers, etc.). Should be called for each fuzz iteration. + void reset(); + +protected: + // Set-up filter specific mock expectations in constructor. + void perFilterSetup(); + // Filter specific input cleanup. + void cleanFuzzedConfig(absl::string_view filter_name, Protobuf::Message* message); + + // Parses http or proto body into chunks. + static std::vector parseHttpData(const test::fuzz::HttpData& data); + + // Templated functions to validate and send headers/data/trailers for decoders/encoders. + // General functions are deleted, but templated specializations for encoders/decoders are defined + // in the cc file. + template + Http::FilterHeadersStatus sendHeaders(FilterType* filter, const test::fuzz::HttpData& data, + bool end_stream) = delete; + + template + Http::FilterDataStatus sendData(FilterType* filter, Buffer::Instance& buffer, + bool end_stream) = delete; + + template + void sendTrailers(FilterType* filter, const test::fuzz::HttpData& data) = delete; +private: + // This keeps track of when a filter will stop decoding due to direct responses. + bool enabled_ = true; NiceMock factory_context_; - NiceMock callbacks_; NiceMock filter_callback_; std::shared_ptr resolver_{std::make_shared()}; - std::shared_ptr filter_; Http::FilterFactoryCb cb_; NiceMock connection_; Network::Address::InstanceConstSharedPtr addr_; + NiceMock cluster_manager_; + NiceMock async_request_; + NiceMock stream_info_; + + // Mocked callbacks. + NiceMock decoder_callbacks_; + NiceMock encoder_callbacks_; + + // Filter constructed from the config. + Http::StreamDecoderFilterSharedPtr decoder_filter_; + Http::StreamEncoderFilterSharedPtr encoder_filter_; + AccessLog::InstanceSharedPtr access_logger_; + + // Headers/trailers need to be saved for the lifetime of the the filter, + // so save them as member variables. + // TODO(nareddyt): Use for access logging in a followup PR. + Http::TestRequestHeaderMapImpl request_headers_; + Http::TestResponseHeaderMapImpl response_headers_; + Http::TestRequestTrailerMapImpl request_trailers_; + Http::TestResponseTrailerMapImpl response_trailers_; }; } // namespace HttpFilters diff --git a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc new file mode 100644 index 0000000000000..d816d5a26ab0a --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc @@ -0,0 +1,175 @@ +#include "envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.pb.h" +#include "envoy/extensions/filters/http/jwt_authn/v3/config.pb.h" +#include "envoy/extensions/filters/http/squash/v3/squash.pb.h" +#include "envoy/extensions/filters/http/tap/v3/tap.pb.h" + +#include "extensions/filters/http/common/utility.h" +#include "extensions/filters/http/well_known_names.h" + +#include "test/extensions/filters/http/common/fuzz/uber_filter.h" +#include "test/proto/bookstore.pb.h" + +// This file contains any filter-specific setup and input clean-up needed in the generic filter fuzz +// target. + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace { + +void addFileDescriptorsRecursively(const Protobuf::FileDescriptor& descriptor, + Protobuf::FileDescriptorSet& set, + absl::flat_hash_set& added_descriptors) { + if (!added_descriptors.insert(descriptor.name()).second) { + // Already added. + return; + } + for (int i = 0; i < descriptor.dependency_count(); i++) { + addFileDescriptorsRecursively(*descriptor.dependency(i), set, added_descriptors); + } + descriptor.CopyTo(set.add_file()); +} + +void addBookstoreProtoDescriptor(Protobuf::Message* message) { + envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder& config = + dynamic_cast( + *message); + config.clear_services(); + config.add_services("bookstore.Bookstore"); + + Protobuf::FileDescriptorSet descriptor_set; + const auto* file_descriptor = + Protobuf::DescriptorPool::generated_pool()->FindFileByName("test/proto/bookstore.proto"); + ASSERT(file_descriptor != nullptr); + // Create a set to keep track of descriptors as they are added. + absl::flat_hash_set added_descriptors; + addFileDescriptorsRecursively(*file_descriptor, descriptor_set, added_descriptors); + descriptor_set.SerializeToString(config.mutable_proto_descriptor_bin()); +} +} // namespace + +void UberFilterFuzzer::guideAnyProtoType(test::fuzz::HttpData* mutable_data, uint choice) { + // These types are request/response from the test Bookstore service + // for the gRPC Transcoding filter. + static const std::vector expected_types = { + "type.googleapis.com/bookstore.ListShelvesResponse", + "type.googleapis.com/bookstore.CreateShelfRequest", + "type.googleapis.com/bookstore.GetShelfRequest", + "type.googleapis.com/bookstore.DeleteShelfRequest", + "type.googleapis.com/bookstore.ListBooksRequest", + "type.googleapis.com/bookstore.CreateBookRequest", + "type.googleapis.com/bookstore.GetBookRequest", + "type.googleapis.com/bookstore.UpdateBookRequest", + "type.googleapis.com/bookstore.DeleteBookRequest", + "type.googleapis.com/bookstore.GetAuthorRequest", + "type.googleapis.com/bookstore.EchoBodyRequest", + "type.googleapis.com/bookstore.EchoStructReqResp", + "type.googleapis.com/bookstore.Shelf", + "type.googleapis.com/bookstore.Book", + "type.googleapis.com/google.protobuf.Empty", + "type.googleapis.com/google.api.HttpBody", + }; + ProtobufWkt::Any* mutable_any = mutable_data->mutable_proto_body()->mutable_message(); + const std::string& type_url = expected_types[choice % expected_types.size()]; + mutable_any->set_type_url(type_url); +} + +void removeConnectMatcher(Protobuf::Message* message) { + envoy::extensions::filters::http::jwt_authn::v3::JwtAuthentication& config = + dynamic_cast(*message); + for (auto& rules : *config.mutable_rules()) { + if (rules.match().has_connect_matcher()) { + rules.mutable_match()->set_path("/"); + } + } +} + +void cleanAttachmentTemplate(Protobuf::Message* message) { + envoy::extensions::filters::http::squash::v3::Squash& config = + dynamic_cast(*message); + std::string json; + Protobuf::util::JsonPrintOptions json_options; + if (!Protobuf::util::MessageToJsonString(config.attachment_template(), &json, json_options) + .ok()) { + config.clear_attachment_template(); + } +} + +void cleanTapConfig(Protobuf::Message* message) { + envoy::extensions::filters::http::tap::v3::Tap& config = + dynamic_cast(*message); + if (config.common_config().config_type_case() == + envoy::extensions::common::tap::v3::CommonExtensionConfig::ConfigTypeCase::kTapdsConfig) { + config.mutable_common_config()->mutable_static_config()->mutable_match_config()->set_any_match( + true); + } + // TODO(samflattery): remove once StreamingGrpcSink is implemented + // a static config filter is required to have one sink, but since validation isn't performed on + // the filter until after this function runs, we have to manually check that there are sinks + // before checking that they are not StreamingGrpc + else if (config.common_config().config_type_case() == + envoy::extensions::common::tap::v3::CommonExtensionConfig::ConfigTypeCase:: + kStaticConfig && + !config.common_config().static_config().output_config().sinks().empty() && + config.common_config() + .static_config() + .output_config() + .sinks(0) + .output_sink_type_case() == + envoy::config::tap::v3::OutputSink::OutputSinkTypeCase::kStreamingGrpc) { + // will be caught in UberFilterFuzzer::fuzz + throw EnvoyException("received input with not implemented output_sink_type StreamingGrpcSink"); + } +} + +void UberFilterFuzzer::cleanFuzzedConfig(absl::string_view filter_name, + Protobuf::Message* message) { + const std::string name = Extensions::HttpFilters::Common::FilterNameUtil::canonicalFilterName( + std::string(filter_name)); + // Map filter name to clean-up function. + if (filter_name == HttpFilterNames::get().GrpcJsonTranscoder) { + // Add a valid service proto descriptor. + addBookstoreProtoDescriptor(message); + } else if (name == HttpFilterNames::get().Squash) { + cleanAttachmentTemplate(message); + } else if (name == HttpFilterNames::get().Tap) { + // TapDS oneof field and OutputSinkType StreamingGrpc not implemented + cleanTapConfig(message); + } + if (filter_name == HttpFilterNames::get().JwtAuthn) { + // Remove when connect matcher is implemented for Jwt Authentication filter. + removeConnectMatcher(message); + } +} + +void UberFilterFuzzer::perFilterSetup() { + // Prepare expectations for the ext_authz filter. + addr_ = std::make_shared("1.2.3.4", 1111); + ON_CALL(connection_, remoteAddress()).WillByDefault(testing::ReturnRef(addr_)); + ON_CALL(connection_, localAddress()).WillByDefault(testing::ReturnRef(addr_)); + ON_CALL(factory_context_, clusterManager()).WillByDefault(testing::ReturnRef(cluster_manager_)); + ON_CALL(cluster_manager_.async_client_, send_(_, _, _)).WillByDefault(Return(&async_request_)); + + ON_CALL(decoder_callbacks_, connection()).WillByDefault(testing::Return(&connection_)); + ON_CALL(decoder_callbacks_, activeSpan()) + .WillByDefault(testing::ReturnRef(Tracing::NullSpan::instance())); + decoder_callbacks_.stream_info_.protocol_ = Envoy::Http::Protocol::Http2; + + ON_CALL(encoder_callbacks_, connection()).WillByDefault(testing::Return(&connection_)); + ON_CALL(encoder_callbacks_, activeSpan()) + .WillByDefault(testing::ReturnRef(Tracing::NullSpan::instance())); + encoder_callbacks_.stream_info_.protocol_ = Envoy::Http::Protocol::Http2; + + // Prepare expectations for dynamic forward proxy. + ON_CALL(factory_context_.dispatcher_, createDnsResolver(_, _)) + .WillByDefault(testing::Return(resolver_)); + + // Prepare expectations for TAP config. + ON_CALL(factory_context_, admin()).WillByDefault(testing::ReturnRef(factory_context_.admin_)); + ON_CALL(factory_context_.admin_, addHandler(_, _, _, _, _)).WillByDefault(testing::Return(true)); + ON_CALL(factory_context_.admin_, removeHandler(_)).WillByDefault(testing::Return(true)); +} + +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/common/jwks_fetcher_test.cc b/test/extensions/filters/http/common/jwks_fetcher_test.cc index 6cfcd8f14af46..79a04018e8cbb 100644 --- a/test/extensions/filters/http/common/jwks_fetcher_test.cc +++ b/test/extensions/filters/http/common/jwks_fetcher_test.cc @@ -10,6 +10,7 @@ #include "test/extensions/filters/http/common/mock.h" #include "test/mocks/http/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/utility.h" using envoy::config::core::v3::HttpUri; diff --git a/test/extensions/filters/http/common/mock.h b/test/extensions/filters/http/common/mock.h index 5f8ef99004309..804c00b0eb660 100644 --- a/test/extensions/filters/http/common/mock.h +++ b/test/extensions/filters/http/common/mock.h @@ -4,7 +4,7 @@ #include "extensions/filters/http/common/jwks_fetcher.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/upstream/mocks.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/http/compressor/BUILD b/test/extensions/filters/http/compressor/BUILD new file mode 100644 index 0000000000000..6c9a99ca3db90 --- /dev/null +++ b/test/extensions/filters/http/compressor/BUILD @@ -0,0 +1,64 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", + "envoy_proto_library", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_extension_cc_test( + name = "compressor_filter_test", + srcs = [ + "compressor_filter_test.cc", + ], + extension_name = "envoy.filters.http.compressor", + deps = [ + "//source/extensions/filters/http/compressor:compressor_filter_lib", + "//test/mocks/compression/compressor:compressor_mocks", + "//test/mocks/runtime:runtime_mocks", + "//test/test_common:utility_lib", + ], +) + +envoy_extension_cc_test( + name = "compressor_filter_integration_test", + srcs = [ + "compressor_filter_integration_test.cc", + ], + extension_name = "envoy.filters.http.compressor", + tags = ["fails_on_windows"], + deps = [ + "//source/extensions/compression/gzip/compressor:config", + "//source/extensions/compression/gzip/decompressor:config", + "//source/extensions/filters/http/compressor:config", + "//test/integration:http_integration_lib", + "//test/test_common:simulated_time_system_lib", + "//test/test_common:utility_lib", + ], +) + +envoy_proto_library( + name = "mock_config", + srcs = ["mock_compressor_library.proto"], +) + +envoy_extension_cc_test( + name = "config_test", + srcs = [ + "config_test.cc", + ], + extension_name = "envoy.filters.http.compressor", + deps = [ + ":mock_config_cc_proto", + "//source/extensions/filters/http/compressor:config", + "//test/mocks/runtime:runtime_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/test_common:utility_lib", + ], +) diff --git a/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc b/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc new file mode 100644 index 0000000000000..0ded467631a26 --- /dev/null +++ b/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc @@ -0,0 +1,327 @@ +#include "envoy/event/timer.h" + +#include "extensions/compression/gzip/decompressor/zlib_decompressor_impl.h" + +#include "test/integration/http_integration.h" +#include "test/test_common/simulated_time_system.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { + +class CompressorIntegrationTest : public testing::TestWithParam, + public Event::SimulatedTimeSystem, + public HttpIntegrationTest { +public: + CompressorIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {} + + void SetUp() override { decompressor_.init(window_bits); } + void TearDown() override { cleanupUpstreamAndDownstream(); } + + void initializeFilter(const std::string& config) { + config_helper_.addFilter(config); + initialize(); + codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); + } + + void doRequestAndCompression(Http::TestRequestHeaderMapImpl&& request_headers, + Http::TestResponseHeaderMapImpl&& response_headers) { + uint64_t content_length; + ASSERT_TRUE(absl::SimpleAtoi(response_headers.get_("content-length"), &content_length)); + const Buffer::OwnedImpl expected_response{std::string(content_length, 'a')}; + auto response = + sendRequestAndWaitForResponse(request_headers, 0, response_headers, content_length); + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + EXPECT_EQ(Http::CustomHeaders::get().ContentEncodingValues.Gzip, + response->headers() + .get(Http::CustomHeaders::get().ContentEncoding) + ->value() + .getStringView()); + EXPECT_EQ(Http::Headers::get().TransferEncodingValues.Chunked, + response->headers().getTransferEncodingValue()); + + Buffer::OwnedImpl decompressed_response{}; + const Buffer::OwnedImpl compressed_response{response->body()}; + decompressor_.decompress(compressed_response, decompressed_response); + ASSERT_EQ(content_length, decompressed_response.length()); + EXPECT_TRUE(TestUtility::buffersEqual(expected_response, decompressed_response)); + } + + void doRequestAndNoCompression(Http::TestRequestHeaderMapImpl&& request_headers, + Http::TestResponseHeaderMapImpl&& response_headers) { + uint64_t content_length; + ASSERT_TRUE(absl::SimpleAtoi(response_headers.get_("content-length"), &content_length)); + auto response = + sendRequestAndWaitForResponse(request_headers, 0, response_headers, content_length); + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding) == nullptr); + ASSERT_EQ(content_length, response->body().size()); + EXPECT_EQ(response->body(), std::string(content_length, 'a')); + } + + const std::string full_config{R"EOF( + name: compressor + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor + disable_on_etag_header: true + content_length: 100 + content_type: + - text/html + - application/json + compressor_library: + name: testlib + typed_config: + "@type": type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip + memory_level: 3 + window_bits: 10 + compression_level: best_compression + compression_strategy: rle + )EOF"}; + + const std::string default_config{R"EOF( + name: envoy.filters.http.compressor + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor + compressor_library: + name: testlib + typed_config: + "@type": type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip + )EOF"}; + + const uint64_t window_bits{15 | 16}; + + Stats::IsolatedStoreImpl stats_store_; + Extensions::Compression::Gzip::Decompressor::ZlibDecompressorImpl decompressor_{stats_store_, + "test"}; +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, CompressorIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +/** + * Exercises gzip compression with default configuration. + */ +TEST_P(CompressorIntegrationTest, AcceptanceDefaultConfigTest) { + initializeFilter(default_config); + doRequestAndCompression(Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"accept-encoding", "deflate, gzip"}}, + Http::TestResponseHeaderMapImpl{{":status", "200"}, + {"content-length", "4400"}, + {"content-type", "text/xml"}}); +} + +/** + * Exercises gzip compression with full configuration. + */ +TEST_P(CompressorIntegrationTest, AcceptanceFullConfigTest) { + initializeFilter(full_config); + doRequestAndCompression(Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"accept-encoding", "deflate, gzip"}}, + Http::TestResponseHeaderMapImpl{{":status", "200"}, + {"content-length", "4400"}, + {"content-type", "application/json"}}); +} + +/** + * Exercises filter when client request contains 'identity' type. + */ +TEST_P(CompressorIntegrationTest, IdentityAcceptEncoding) { + initializeFilter(default_config); + doRequestAndNoCompression(Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"accept-encoding", "identity"}}, + Http::TestResponseHeaderMapImpl{{":status", "200"}, + {"content-length", "128"}, + {"content-type", "text/plain"}}); +} + +/** + * Exercises filter when client request contains unsupported 'accept-encoding' type. + */ +TEST_P(CompressorIntegrationTest, NotSupportedAcceptEncoding) { + initializeFilter(default_config); + doRequestAndNoCompression(Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"accept-encoding", "deflate, br"}}, + Http::TestResponseHeaderMapImpl{{":status", "200"}, + {"content-length", "128"}, + {"content-type", "text/plain"}}); +} + +/** + * Exercises filter when upstream response is already encoded. + */ +TEST_P(CompressorIntegrationTest, UpstreamResponseAlreadyEncoded) { + initializeFilter(default_config); + Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"accept-encoding", "deflate, gzip"}}; + + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}, + {"content-encoding", "br"}, + {"content-length", "128"}, + {"content-type", "application/json"}}; + + auto response = sendRequestAndWaitForResponse(request_headers, 0, response_headers, 128); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + ASSERT_EQ( + "br", + response->headers().get(Http::CustomHeaders::get().ContentEncoding)->value().getStringView()); + EXPECT_EQ(128U, response->body().size()); +} + +/** + * Exercises filter when upstream responds with content length below the default threshold. + */ +TEST_P(CompressorIntegrationTest, NotEnoughContentLength) { + initializeFilter(default_config); + Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"accept-encoding", "deflate, gzip"}}; + + Http::TestResponseHeaderMapImpl response_headers{ + {":status", "200"}, {"content-length", "10"}, {"content-type", "application/json"}}; + + auto response = sendRequestAndWaitForResponse(request_headers, 0, response_headers, 10); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding) == nullptr); + EXPECT_EQ(10U, response->body().size()); +} + +/** + * Exercises filter when response from upstream service is empty. + */ +TEST_P(CompressorIntegrationTest, EmptyResponse) { + initializeFilter(default_config); + Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"accept-encoding", "deflate, gzip"}}; + + Http::TestResponseHeaderMapImpl response_headers{{":status", "204"}, {"content-length", "0"}}; + + auto response = sendRequestAndWaitForResponse(request_headers, 0, response_headers, 0); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("204", response->headers().getStatusValue()); + ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding) == nullptr); + EXPECT_EQ(0U, response->body().size()); +} + +/** + * Exercises filter when upstream responds with restricted content-type value. + */ +TEST_P(CompressorIntegrationTest, SkipOnContentType) { + initializeFilter(full_config); + doRequestAndNoCompression(Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"accept-encoding", "deflate, gzip"}}, + Http::TestResponseHeaderMapImpl{{":status", "200"}, + {"content-length", "128"}, + {"content-type", "application/xml"}}); +} + +/** + * Exercises filter when upstream responds with restricted cache-control value. + */ +TEST_P(CompressorIntegrationTest, SkipOnCacheControl) { + initializeFilter(full_config); + doRequestAndNoCompression(Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"accept-encoding", "deflate, gzip"}}, + Http::TestResponseHeaderMapImpl{{":status", "200"}, + {"content-length", "128"}, + {"cache-control", "no-transform"}, + {"content-type", "application/json"}}); +} + +/** + * Exercises gzip compression when upstream returns a chunked response. + */ +TEST_P(CompressorIntegrationTest, AcceptanceFullConfigChunkedResponse) { + initializeFilter(full_config); + Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"accept-encoding", "deflate, gzip"}}; + + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}, + {"content-type", "application/json"}}; + + auto response = sendRequestAndWaitForResponse(request_headers, 0, response_headers, 1024); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + ASSERT_EQ( + "gzip", + response->headers().get(Http::CustomHeaders::get().ContentEncoding)->value().getStringView()); + ASSERT_EQ("chunked", response->headers().getTransferEncodingValue()); +} + +/** + * Verify Vary header values are preserved. + */ +TEST_P(CompressorIntegrationTest, AcceptanceFullConfigVaryHeader) { + initializeFilter(default_config); + Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"accept-encoding", "deflate, gzip"}}; + + Http::TestResponseHeaderMapImpl response_headers{ + {":status", "200"}, {"content-type", "application/json"}, {"vary", "Cookie"}}; + + auto response = sendRequestAndWaitForResponse(request_headers, 0, response_headers, 1024); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + ASSERT_EQ( + "gzip", + response->headers().get(Http::CustomHeaders::get().ContentEncoding)->value().getStringView()); + ASSERT_EQ("Cookie, Accept-Encoding", + response->headers().get(Http::CustomHeaders::get().Vary)->value().getStringView()); +} +} // namespace Envoy diff --git a/test/extensions/filters/http/compressor/compressor_filter_test.cc b/test/extensions/filters/http/compressor/compressor_filter_test.cc new file mode 100644 index 0000000000000..a8f2571f62667 --- /dev/null +++ b/test/extensions/filters/http/compressor/compressor_filter_test.cc @@ -0,0 +1,34 @@ +#include "extensions/filters/http/compressor/compressor_filter.h" + +#include "test/mocks/compression/compressor/mocks.h" +#include "test/mocks/runtime/mocks.h" +#include "test/mocks/stats/mocks.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Compressor { +namespace { + +using testing::NiceMock; + +TEST(CompressorFilterConfigTests, MakeCompressorTest) { + const envoy::extensions::filters::http::compressor::v3::Compressor compressor_cfg; + NiceMock runtime; + Stats::TestUtil::TestStore stats; + auto compressor_factory(std::make_unique()); + EXPECT_CALL(*compressor_factory, createCompressor()).Times(1); + EXPECT_CALL(*compressor_factory, statsPrefix()).Times(1); + EXPECT_CALL(*compressor_factory, contentEncoding()).Times(1); + CompressorFilterConfig config(compressor_cfg, "test.compressor.", stats, runtime, + std::move(compressor_factory)); + Envoy::Compression::Compressor::CompressorPtr compressor = config.makeCompressor(); +} + +} // namespace +} // namespace Compressor +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/compressor/config_test.cc b/test/extensions/filters/http/compressor/config_test.cc new file mode 100644 index 0000000000000..cea48bc00cffc --- /dev/null +++ b/test/extensions/filters/http/compressor/config_test.cc @@ -0,0 +1,46 @@ +#include "extensions/filters/http/compressor/config.h" + +#include "test/mocks/server/factory_context.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Compressor { +namespace { + +using testing::NiceMock; + +TEST(CompressorFilterFactoryTests, MissingCompressorLibraryConfig) { + const envoy::extensions::filters::http::compressor::v3::Compressor proto_config; + CompressorFilterFactory factory; + NiceMock context; + EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(proto_config, "stats", context), + EnvoyException, + "Compressor filter doesn't have compressor_library defined"); +} + +TEST(CompressorFilterFactoryTests, UnregisteredCompressorLibraryConfig) { + const std::string yaml_string = R"EOF( + compressor_library: + name: fake_compressor + typed_config: + "@type": type.googleapis.com/test.mock_compressor_library.Unregistered + )EOF"; + + envoy::extensions::filters::http::compressor::v3::Compressor proto_config; + TestUtility::loadFromYaml(yaml_string, proto_config); + CompressorFilterFactory factory; + NiceMock context; + EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(proto_config, "stats", context), + EnvoyException, + "Didn't find a registered implementation for type: " + "'test.mock_compressor_library.Unregistered'"); +} + +} // namespace +} // namespace Compressor +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/compressor/mock_compressor_library.proto b/test/extensions/filters/http/compressor/mock_compressor_library.proto new file mode 100644 index 0000000000000..b6d5ea18d1c08 --- /dev/null +++ b/test/extensions/filters/http/compressor/mock_compressor_library.proto @@ -0,0 +1,6 @@ +syntax = "proto3"; + +package test.mock_compressor_library; + +message Unregistered { +} \ No newline at end of file diff --git a/test/extensions/filters/http/cors/BUILD b/test/extensions/filters/http/cors/BUILD index 6aab69d2f6d97..a91934cb12496 100644 --- a/test/extensions/filters/http/cors/BUILD +++ b/test/extensions/filters/http/cors/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -30,6 +30,7 @@ envoy_extension_cc_test( name = "cors_filter_integration_test", srcs = ["cors_filter_integration_test.cc"], extension_name = "envoy.filters.http.cors", + tags = ["fails_on_windows"], deps = [ "//source/common/buffer:buffer_lib", "//source/common/http:header_map_lib", diff --git a/test/extensions/filters/http/cors/cors_filter_integration_test.cc b/test/extensions/filters/http/cors/cors_filter_integration_test.cc index 37a366e4a70c6..de715800ccb4d 100644 --- a/test/extensions/filters/http/cors/cors_filter_integration_test.cc +++ b/test/extensions/filters/http/cors/cors_filter_integration_test.cc @@ -130,7 +130,7 @@ TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestVHostConfigSuccess "CorsPolicy.hidden_envoy_deprecated_enabled", "true"); testPreflight( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "OPTIONS"}, {":path", "/cors-vhost-config/test"}, {":scheme", "http"}, @@ -138,7 +138,7 @@ TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestVHostConfigSuccess {"access-control-request-method", "GET"}, {"origin", "test-origin"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"access-control-allow-origin", "test-origin"}, {"access-control-allow-methods", "GET,POST"}, {"access-control-allow-headers", "content-type,x-grpc-web"}, @@ -150,7 +150,7 @@ TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestVHostConfigSuccess TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestRouteConfigSuccess)) { testPreflight( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "OPTIONS"}, {":path", "/cors-route-config/test"}, {":scheme", "http"}, @@ -158,7 +158,7 @@ TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestRouteConfigSuccess {"access-control-request-method", "GET"}, {"origin", "test-origin-1"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"access-control-allow-origin", "test-origin-1"}, {"access-control-allow-methods", "POST"}, {"access-control-allow-headers", "content-type"}, @@ -174,7 +174,7 @@ TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestRouteConfigBadOrig "CorsPolicy.hidden_envoy_deprecated_enabled", "true"); testNormalRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "OPTIONS"}, {":path", "/cors-route-config/test"}, {":scheme", "http"}, @@ -182,7 +182,7 @@ TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestRouteConfigBadOrig {"access-control-request-method", "GET"}, {"origin", "test-origin"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, @@ -191,7 +191,7 @@ TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestRouteConfigBadOrig TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestCorsDisabled)) { testNormalRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "OPTIONS"}, {":path", "/no-cors/test"}, {":scheme", "http"}, @@ -199,7 +199,7 @@ TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestCorsDisabled)) { {"access-control-request-method", "GET"}, {"origin", "test-origin"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, @@ -225,7 +225,7 @@ TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestLegacyCorsDisabled ->set_value(false); }); testNormalRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "OPTIONS"}, {":path", "/legacy-no-cors/test"}, {":scheme", "http"}, @@ -233,7 +233,7 @@ TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestLegacyCorsDisabled {"access-control-request-method", "GET"}, {"origin", "test-origin"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, @@ -242,14 +242,14 @@ TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestLegacyCorsDisabled TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestEncodeHeaders)) { testNormalRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/cors-vhost-config/test"}, {":scheme", "http"}, {":authority", "test-host"}, {"origin", "test-origin"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"access-control-allow-origin", "test-origin"}, {"server", "envoy"}, {"content-length", "0"}, @@ -259,14 +259,14 @@ TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestEncodeHeaders)) { TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestEncodeHeadersCredentialsAllowed)) { testNormalRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/cors-credentials-allowed/test"}, {":scheme", "http"}, {":authority", "test-host"}, {"origin", "test-origin"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"access-control-allow-origin", "test-origin"}, {"access-control-allow-credentials", "true"}, {"server", "envoy"}, @@ -277,14 +277,14 @@ TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestEncodeHeadersCrede TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestAllowedOriginRegex)) { testNormalRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/cors-allow-origin-regex/test"}, {":scheme", "http"}, {":authority", "test-host"}, {"origin", "www.envoyproxy.io"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"access-control-allow-origin", "www.envoyproxy.io"}, {"access-control-allow-credentials", "true"}, {"server", "envoy"}, @@ -295,14 +295,14 @@ TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestAllowedOriginRegex TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestExposeHeaders)) { testNormalRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/cors-expose-headers/test"}, {":scheme", "http"}, {":authority", "test-host"}, {"origin", "test-origin-1"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"access-control-allow-origin", "test-origin-1"}, {"access-control-expose-headers", "custom-header-1,custom-header-2"}, {"server", "envoy"}, diff --git a/test/extensions/filters/http/cors/cors_filter_test.cc b/test/extensions/filters/http/cors/cors_filter_test.cc index b045ed78987dd..1c49f88eed1b6 100644 --- a/test/extensions/filters/http/cors/cors_filter_test.cc +++ b/test/extensions/filters/http/cors/cors_filter_test.cc @@ -253,6 +253,8 @@ TEST_F(CorsFilterTest, OptionsRequestMatchingOriginByWildcard) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers_, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_)); + ASSERT_TRUE(decoder_callbacks_.stream_info_.responseCodeDetails().has_value()); + EXPECT_EQ(decoder_callbacks_.stream_info_.responseCodeDetails().value(), "cors_response"); } TEST_F(CorsFilterTest, OptionsRequestWithOriginCorsEnabledShadowDisabled) { diff --git a/test/extensions/filters/http/csrf/BUILD b/test/extensions/filters/http/csrf/BUILD index 984cccc2b122d..a7e4b25859682 100644 --- a/test/extensions/filters/http/csrf/BUILD +++ b/test/extensions/filters/http/csrf/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -31,6 +31,7 @@ envoy_extension_cc_test( name = "csrf_filter_integration_test", srcs = ["csrf_filter_integration_test.cc"], extension_name = "envoy.filters.http.csrf", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/http/csrf:config", "//test/config:utility_lib", diff --git a/test/extensions/filters/http/csrf/csrf_filter_integration_test.cc b/test/extensions/filters/http/csrf/csrf_filter_integration_test.cc index 0b4b81ec2fa50..6500bf77b61a0 100644 --- a/test/extensions/filters/http/csrf/csrf_filter_integration_test.cc +++ b/test/extensions/filters/http/csrf/csrf_filter_integration_test.cc @@ -84,12 +84,12 @@ TEST_P(CsrfFilterIntegrationTest, TestCsrfSuccess) { {":method", "PUT"}, {":path", "/"}, {":scheme", "http"}, - {"origin", "localhost"}, + {"origin", "http://localhost"}, {"host", "localhost"}, }}; const auto& response = sendRequestAndWaitForResponse(headers); EXPECT_TRUE(response->complete()); - EXPECT_EQ(response->headers().Status()->value().getStringView(), "200"); + EXPECT_EQ(response->headers().getStatusValue(), "200"); } TEST_P(CsrfFilterIntegrationTest, TestCsrfDisabled) { @@ -98,12 +98,12 @@ TEST_P(CsrfFilterIntegrationTest, TestCsrfDisabled) { {":method", "PUT"}, {":path", "/"}, {":scheme", "http"}, - {"origin", "cross-origin"}, + {"origin", "http://cross-origin"}, {"host", "test-origin"}, }}; const auto& response = sendRequestAndWaitForResponse(headers); EXPECT_TRUE(response->complete()); - EXPECT_EQ(response->headers().Status()->value().getStringView(), "200"); + EXPECT_EQ(response->headers().getStatusValue(), "200"); } TEST_P(CsrfFilterIntegrationTest, TestNonMutationMethod) { @@ -112,12 +112,12 @@ TEST_P(CsrfFilterIntegrationTest, TestNonMutationMethod) { {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, - {"origin", "cross-origin"}, + {"origin", "http://cross-origin"}, {"host", "test-origin"}, }}; const auto& response = sendRequestAndWaitForResponse(headers); EXPECT_TRUE(response->complete()); - EXPECT_EQ(response->headers().Status()->value().getStringView(), "200"); + EXPECT_EQ(response->headers().getStatusValue(), "200"); } TEST_P(CsrfFilterIntegrationTest, TestOriginMismatch) { @@ -126,12 +126,12 @@ TEST_P(CsrfFilterIntegrationTest, TestOriginMismatch) { {":method", "PUT"}, {":path", "/"}, {":scheme", "http"}, - {"origin", "cross-origin"}, + {"origin", "http://cross-origin"}, {"host", "test-origin"}, }}; const auto& response = sendRequest(headers); EXPECT_TRUE(response->complete()); - EXPECT_EQ(response->headers().Status()->value().getStringView(), "403"); + EXPECT_EQ(response->headers().getStatusValue(), "403"); } TEST_P(CsrfFilterIntegrationTest, TestEnforcesPost) { @@ -140,12 +140,12 @@ TEST_P(CsrfFilterIntegrationTest, TestEnforcesPost) { {":method", "POST"}, {":path", "/"}, {":scheme", "http"}, - {"origin", "cross-origin"}, + {"origin", "http://cross-origin"}, {"host", "test-origin"}, }}; const auto& response = sendRequest(headers); EXPECT_TRUE(response->complete()); - EXPECT_EQ(response->headers().Status()->value().getStringView(), "403"); + EXPECT_EQ(response->headers().getStatusValue(), "403"); } TEST_P(CsrfFilterIntegrationTest, TestEnforcesDelete) { @@ -154,12 +154,12 @@ TEST_P(CsrfFilterIntegrationTest, TestEnforcesDelete) { {":method", "DELETE"}, {":path", "/"}, {":scheme", "http"}, - {"origin", "cross-origin"}, + {"origin", "http://cross-origin"}, {"host", "test-origin"}, }}; const auto& response = sendRequest(headers); EXPECT_TRUE(response->complete()); - EXPECT_EQ(response->headers().Status()->value().getStringView(), "403"); + EXPECT_EQ(response->headers().getStatusValue(), "403"); } TEST_P(CsrfFilterIntegrationTest, TestEnforcesPatch) { @@ -168,12 +168,12 @@ TEST_P(CsrfFilterIntegrationTest, TestEnforcesPatch) { {":method", "PATCH"}, {":path", "/"}, {":scheme", "http"}, - {"origin", "cross-origin"}, + {"origin", "http://cross-origin"}, {"host", "test-origin"}, }}; const auto& response = sendRequest(headers); EXPECT_TRUE(response->complete()); - EXPECT_EQ(response->headers().Status()->value().getStringView(), "403"); + EXPECT_EQ(response->headers().getStatusValue(), "403"); } TEST_P(CsrfFilterIntegrationTest, TestRefererFallback) { @@ -181,11 +181,11 @@ TEST_P(CsrfFilterIntegrationTest, TestRefererFallback) { Http::TestRequestHeaderMapImpl headers = {{":method", "DELETE"}, {":path", "/"}, {":scheme", "http"}, - {"referer", "test-origin"}, + {"referer", "http://test-origin"}, {"host", "test-origin"}}; const auto& response = sendRequestAndWaitForResponse(headers); EXPECT_TRUE(response->complete()); - EXPECT_EQ(response->headers().Status()->value().getStringView(), "200"); + EXPECT_EQ(response->headers().getStatusValue(), "200"); } TEST_P(CsrfFilterIntegrationTest, TestMissingOrigin) { @@ -194,7 +194,7 @@ TEST_P(CsrfFilterIntegrationTest, TestMissingOrigin) { {{":method", "DELETE"}, {":path", "/"}, {":scheme", "http"}, {"host", "test-origin"}}}; const auto& response = sendRequest(headers); EXPECT_TRUE(response->complete()); - EXPECT_EQ(response->headers().Status()->value().getStringView(), "403"); + EXPECT_EQ(response->headers().getStatusValue(), "403"); } TEST_P(CsrfFilterIntegrationTest, TestShadowOnlyMode) { @@ -203,12 +203,12 @@ TEST_P(CsrfFilterIntegrationTest, TestShadowOnlyMode) { {":method", "PUT"}, {":path", "/"}, {":scheme", "http"}, - {"origin", "cross-origin"}, + {"origin", "http://cross-origin"}, {"host", "localhost"}, }}; const auto& response = sendRequestAndWaitForResponse(headers); EXPECT_TRUE(response->complete()); - EXPECT_EQ(response->headers().Status()->value().getStringView(), "200"); + EXPECT_EQ(response->headers().getStatusValue(), "200"); } TEST_P(CsrfFilterIntegrationTest, TestFilterAndShadowEnabled) { @@ -217,12 +217,12 @@ TEST_P(CsrfFilterIntegrationTest, TestFilterAndShadowEnabled) { {":method", "PUT"}, {":path", "/"}, {":scheme", "http"}, - {"origin", "cross-origin"}, + {"origin", "http://cross-origin"}, {"host", "localhost"}, }}; const auto& response = sendRequest(headers); EXPECT_TRUE(response->complete()); - EXPECT_EQ(response->headers().Status()->value().getStringView(), "403"); + EXPECT_EQ(response->headers().getStatusValue(), "403"); } } // namespace } // namespace Envoy diff --git a/test/extensions/filters/http/csrf/csrf_filter_test.cc b/test/extensions/filters/http/csrf/csrf_filter_test.cc index 634a01401ea9f..dbac2d629e2ed 100644 --- a/test/extensions/filters/http/csrf/csrf_filter_test.cc +++ b/test/extensions/filters/http/csrf/csrf_filter_test.cc @@ -124,7 +124,8 @@ TEST_F(CsrfFilterTest, RequestWithoutOrigin) { } TEST_F(CsrfFilterTest, RequestWithoutDestination) { - Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, {"origin", "localhost"}}; + Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, + {"origin", "http://localhost"}}; EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_.decodeHeaders(request_headers, false)); @@ -138,7 +139,31 @@ TEST_F(CsrfFilterTest, RequestWithoutDestination) { TEST_F(CsrfFilterTest, RequestWithInvalidOrigin) { Http::TestRequestHeaderMapImpl request_headers{ - {":method", "PUT"}, {"origin", "cross-origin"}, {":authority", "localhost"}}; + {":method", "PUT"}, {"origin", "http://cross-origin"}, {":authority", "localhost"}}; + + Http::TestResponseHeaderMapImpl response_headers{ + {":status", "403"}, + {"content-length", "14"}, + {"content-type", "text/plain"}, + }; + EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false)); + + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_.decodeHeaders(request_headers, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_)); + + EXPECT_EQ(0U, config_->stats().missing_source_origin_.value()); + EXPECT_EQ(1U, config_->stats().request_invalid_.value()); + EXPECT_EQ(0U, config_->stats().request_valid_.value()); + EXPECT_EQ("csrf_origin_mismatch", decoder_callbacks_.details_); +} + +TEST_F(CsrfFilterTest, RequestWithInvalidOriginDifferentNonStandardPorts) { + Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, + {"origin", "http://localhost:90"}, + {":authority", "localhost:91"}, + {":scheme", "http"}}; Http::TestResponseHeaderMapImpl response_headers{ {":status", "403"}, @@ -159,8 +184,42 @@ TEST_F(CsrfFilterTest, RequestWithInvalidOrigin) { } TEST_F(CsrfFilterTest, RequestWithValidOrigin) { - Http::TestRequestHeaderMapImpl request_headers{ - {":method", "PUT"}, {"origin", "localhost"}, {"host", "localhost"}}; + Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, + {"origin", "http://localhost"}, + {"host", "localhost"}, + {":scheme", "http"}}; + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_)); + + EXPECT_EQ(0U, config_->stats().missing_source_origin_.value()); + EXPECT_EQ(0U, config_->stats().request_invalid_.value()); + EXPECT_EQ(1U, config_->stats().request_valid_.value()); +} + +TEST_F(CsrfFilterTest, RequestWithValidOriginNonStandardPort) { + Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, + {"origin", "http://localhost:88"}, + {"host", "localhost:88"}, + {":scheme", "http"}}; + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_)); + + EXPECT_EQ(0U, config_->stats().missing_source_origin_.value()); + EXPECT_EQ(0U, config_->stats().request_invalid_.value()); + EXPECT_EQ(1U, config_->stats().request_valid_.value()); +} + +// This works because gURL drops the port for hostAndPort() when they are standard +// ports (e.g.: 80 & 443). +TEST_F(CsrfFilterTest, RequestWithValidOriginHttpVsHttps) { + Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, + {"origin", "https://localhost"}, + {"host", "localhost"}, + {":scheme", "http"}}; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false)); @@ -173,7 +232,7 @@ TEST_F(CsrfFilterTest, RequestWithValidOrigin) { TEST_F(CsrfFilterTest, RequestWithInvalidOriginCsrfDisabledShadowDisabled) { Http::TestRequestHeaderMapImpl request_headers{ - {":method", "PUT"}, {"origin", "cross-origin"}, {"host", "localhost"}}; + {":method", "PUT"}, {"origin", "http://cross-origin"}, {"host", "localhost"}}; setFilterEnabled(false); @@ -188,8 +247,10 @@ TEST_F(CsrfFilterTest, RequestWithInvalidOriginCsrfDisabledShadowDisabled) { } TEST_F(CsrfFilterTest, RequestWithInvalidOriginCsrfDisabledShadowEnabled) { - Http::TestRequestHeaderMapImpl request_headers{ - {":method", "PUT"}, {"origin", "cross-origin"}, {"host", "localhost"}}; + Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, + {"origin", "http://cross-origin"}, + {"host", "localhost"}, + {":scheme", "http"}}; setFilterEnabled(false); setShadowEnabled(true); @@ -204,8 +265,10 @@ TEST_F(CsrfFilterTest, RequestWithInvalidOriginCsrfDisabledShadowEnabled) { } TEST_F(CsrfFilterTest, RequestWithValidOriginCsrfDisabledShadowEnabled) { - Http::TestRequestHeaderMapImpl request_headers{ - {":method", "PUT"}, {"origin", "localhost"}, {"host", "localhost"}}; + Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, + {"origin", "http://localhost"}, + {"host", "localhost"}, + {":scheme", "http"}}; setFilterEnabled(false); setShadowEnabled(true); @@ -220,8 +283,10 @@ TEST_F(CsrfFilterTest, RequestWithValidOriginCsrfDisabledShadowEnabled) { } TEST_F(CsrfFilterTest, RequestWithInvalidOriginCsrfEnabledShadowEnabled) { - Http::TestRequestHeaderMapImpl request_headers{ - {":method", "PUT"}, {"origin", "cross-origin"}, {"host", "localhost"}}; + Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, + {"origin", "http://cross-origin"}, + {"host", "localhost"}, + {":scheme", "http"}}; setShadowEnabled(true); @@ -243,8 +308,10 @@ TEST_F(CsrfFilterTest, RequestWithInvalidOriginCsrfEnabledShadowEnabled) { } TEST_F(CsrfFilterTest, RequestWithValidOriginCsrfEnabledShadowEnabled) { - Http::TestRequestHeaderMapImpl request_headers{ - {":method", "PUT"}, {"origin", "localhost"}, {"host", "localhost"}}; + Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, + {"origin", "http://localhost"}, + {"host", "localhost"}, + {":scheme", "http"}}; setShadowEnabled(true); @@ -295,8 +362,10 @@ TEST_F(CsrfFilterTest, EmptyRouteEntry) { } TEST_F(CsrfFilterTest, NoCsrfEntry) { - Http::TestRequestHeaderMapImpl request_headers{ - {":method", "PUT"}, {"origin", "cross-origin"}, {"host", "localhost"}}; + Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, + {"origin", "http://cross-origin"}, + {"host", "localhost"}, + {":scheme", "http"}}; setRoutePolicy(nullptr); setVirtualHostPolicy(nullptr); @@ -311,7 +380,8 @@ TEST_F(CsrfFilterTest, NoCsrfEntry) { } TEST_F(CsrfFilterTest, NoRouteCsrfEntry) { - Http::TestRequestHeaderMapImpl request_headers{{":method", "POST"}, {"origin", "localhost"}}; + Http::TestRequestHeaderMapImpl request_headers{{":method", "POST"}, + {"origin", "http://localhost"}}; setRoutePolicy(nullptr); @@ -326,7 +396,8 @@ TEST_F(CsrfFilterTest, NoRouteCsrfEntry) { } TEST_F(CsrfFilterTest, NoVHostCsrfEntry) { - Http::TestRequestHeaderMapImpl request_headers{{":method", "DELETE"}, {"origin", "localhost"}}; + Http::TestRequestHeaderMapImpl request_headers{{":method", "DELETE"}, + {"origin", "http://localhost"}}; setVirtualHostPolicy(nullptr); @@ -341,7 +412,8 @@ TEST_F(CsrfFilterTest, NoVHostCsrfEntry) { } TEST_F(CsrfFilterTest, RequestFromAdditionalExactOrigin) { - Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, {"origin", "additionalhost"}}; + Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, + {"origin", "http://additionalhost"}}; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false)); @@ -353,7 +425,8 @@ TEST_F(CsrfFilterTest, RequestFromAdditionalExactOrigin) { } TEST_F(CsrfFilterTest, RequestFromAdditionalRegexOrigin) { - Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, {"origin", "www-1.allow.com"}}; + Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, + {"origin", "http://www-1.allow.com"}}; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false)); @@ -365,7 +438,8 @@ TEST_F(CsrfFilterTest, RequestFromAdditionalRegexOrigin) { } TEST_F(CsrfFilterTest, RequestFromInvalidAdditionalRegexOrigin) { - Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, {"origin", "www.allow.com"}}; + Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, + {"origin", "http://www.allow.com"}}; EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_.decodeHeaders(request_headers, false)); diff --git a/test/extensions/filters/http/decompressor/BUILD b/test/extensions/filters/http/decompressor/BUILD new file mode 100644 index 0000000000000..cb78711b4020e --- /dev/null +++ b/test/extensions/filters/http/decompressor/BUILD @@ -0,0 +1,49 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_extension_cc_test( + name = "decompressor_filter_test", + srcs = ["decompressor_filter_test.cc"], + extension_name = "envoy.filters.http.decompressor", + deps = [ + "//source/common/http:headers_lib", + "//source/common/protobuf:utility_lib", + "//source/extensions/compression/gzip/decompressor:config", + "//source/extensions/filters/http:well_known_names", + "//source/extensions/filters/http/decompressor:config", + "//test/mocks/compression/decompressor:decompressor_mocks", + "//test/mocks/http:http_mocks", + "//test/mocks/protobuf:protobuf_mocks", + "//test/mocks/runtime:runtime_mocks", + "//test/test_common:utility_lib", + "@envoy_api//envoy/extensions/filters/http/decompressor/v3:pkg_cc_proto", + ], +) + +envoy_extension_cc_test( + name = "decompressor_filter_integration_test", + srcs = [ + "decompressor_filter_integration_test.cc", + ], + extension_name = "envoy.filters.http.decompressor", + tags = ["fails_on_windows"], + deps = [ + "//source/extensions/compression/gzip/compressor:config", + "//source/extensions/compression/gzip/decompressor:config", + "//source/extensions/filters/http/decompressor:config", + "//test/integration:http_integration_lib", + "//test/mocks/server:factory_context_mocks", + "//test/test_common:simulated_time_system_lib", + "//test/test_common:utility_lib", + ], +) diff --git a/test/extensions/filters/http/decompressor/decompressor_filter_integration_test.cc b/test/extensions/filters/http/decompressor/decompressor_filter_integration_test.cc new file mode 100644 index 0000000000000..c924bb4a5fc21 --- /dev/null +++ b/test/extensions/filters/http/decompressor/decompressor_filter_integration_test.cc @@ -0,0 +1,260 @@ +#include "envoy/event/timer.h" + +#include "extensions/compression/gzip/compressor/config.h" + +#include "test/integration/http_integration.h" +#include "test/mocks/server/factory_context.h" +#include "test/test_common/simulated_time_system.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { + +class DecompressorIntegrationTest : public testing::TestWithParam, + public HttpIntegrationTest { +public: + DecompressorIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, GetParam()) { + Extensions::Compression::Gzip::Compressor::GzipCompressorLibraryFactory + compressor_library_factory; + envoy::extensions::compression::gzip::compressor::v3::Gzip factory_config; + testing::NiceMock context; + + auto compressor_factory = + compressor_library_factory.createCompressorFactoryFromProto(factory_config, context); + request_compressor_ = compressor_factory->createCompressor(); + response_compressor_ = compressor_factory->createCompressor(); + } + + void TearDown() override { cleanupUpstreamAndDownstream(); } + + void initializeFilter(const std::string& config) { + config_helper_.addFilter(config); + HttpIntegrationTest::initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + } + + const std::string default_config{R"EOF( + name: default_decompressor + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.decompressor.v3.Decompressor + decompressor_library: + name: testlib + typed_config: + "@type": type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip + )EOF"}; + + Envoy::Compression::Compressor::CompressorPtr request_compressor_{}; + Envoy::Compression::Compressor::CompressorPtr response_compressor_{}; +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, DecompressorIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +/** + * Exercises gzip decompression bidirectionally with default configuration. + */ +TEST_P(DecompressorIntegrationTest, BidirectionalDecompression) { + // Use gzip for decompression. + initializeFilter(default_config); + + // Enable request decompression by setting the Content-Encoding header to gzip. + auto encoder_decoder = + codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":scheme", "http"}, + {":path", "/test/long/url"}, + {":authority", "host"}, + {"content-encoding", "gzip"}}); + auto request_encoder = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + + // Send first data chunk upstream. + Buffer::OwnedImpl request_data1; + TestUtility::feedBufferWithRandomCharacters(request_data1, 8192); + auto uncompressed_request_length = request_data1.length(); + request_compressor_->compress(request_data1, Envoy::Compression::Compressor::State::Flush); + auto compressed_request_length = request_data1.length(); + codec_client_->sendData(*request_encoder, request_data1, false); + + // Send second data chunk upstream and finish the request stream. + Buffer::OwnedImpl request_data2; + TestUtility::feedBufferWithRandomCharacters(request_data2, 16384); + uncompressed_request_length += request_data2.length(); + request_compressor_->compress(request_data2, Envoy::Compression::Compressor::State::Finish); + compressed_request_length += request_data2.length(); + codec_client_->sendData(*request_encoder, request_data2, true); + + // Wait for frames to arrive upstream. + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + + // Assert that the total bytes received upstream equal the sum of the uncompressed byte buffers + // sent. + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ("chunked", upstream_request_->headers().TransferEncoding()->value().getStringView()); + EXPECT_EQ("gzip", upstream_request_->headers() + .get(Http::LowerCaseString("accept-encoding")) + ->value() + .getStringView()); + EXPECT_EQ(nullptr, upstream_request_->headers().get(Http::LowerCaseString("content-encoding"))); + EXPECT_EQ(uncompressed_request_length, upstream_request_->bodyLength()); + + // Verify stats + test_server_->waitForCounterEq("http.config_test.decompressor.testlib.gzip.request.decompressed", + 1); + test_server_->waitForCounterEq( + "http.config_test.decompressor.testlib.gzip.request.not_decompressed", 0); + test_server_->waitForCounterEq( + "http.config_test.decompressor.testlib.gzip.request.total_compressed_bytes", + compressed_request_length); + test_server_->waitForCounterEq( + "http.config_test.decompressor.testlib.gzip.request.total_uncompressed_bytes", + uncompressed_request_length); + + // Enable response decompression by setting the Content-Encoding header to gzip. + upstream_request_->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}, {"content-encoding", "gzip"}}, false); + + // Send first data chunk downstream. + Buffer::OwnedImpl response_data1; + TestUtility::feedBufferWithRandomCharacters(response_data1, 4096); + auto uncompressed_response_length = response_data1.length(); + response_compressor_->compress(response_data1, Envoy::Compression::Compressor::State::Flush); + auto compressed_response_length = response_data1.length(); + upstream_request_->encodeData(response_data1, false); + + // Send second data chunk downstream and finish the response stream. + Buffer::OwnedImpl response_data2; + TestUtility::feedBufferWithRandomCharacters(response_data2, 8192); + uncompressed_response_length += response_data2.length(); + response_compressor_->compress(response_data2, Envoy::Compression::Compressor::State::Flush); + compressed_response_length += response_data2.length(); + upstream_request_->encodeData(response_data2, true); + + // Wait for frames to arrive downstream. + response->waitForEndStream(); + + // Assert that the total bytes received downstream equal the sum of the uncompressed byte buffers + // sent. + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ(uncompressed_response_length, response->body().length()); + + // Verify stats + test_server_->waitForCounterEq("http.config_test.decompressor.testlib.gzip.response.decompressed", + 1); + test_server_->waitForCounterEq( + "http.config_test.decompressor.testlib.gzip.response.not_decompressed", 0); + test_server_->waitForCounterEq( + "http.config_test.decompressor.testlib.gzip.response.total_compressed_bytes", + compressed_response_length); + test_server_->waitForCounterEq( + "http.config_test.decompressor.testlib.gzip.response.total_uncompressed_bytes", + uncompressed_response_length); +} + +/** + * Exercises gzip decompression bidirectionally with configuration using incompatible window bits + * resulting in an error. + */ +TEST_P(DecompressorIntegrationTest, BidirectionalDecompressionError) { + const std::string bad_config{R"EOF( + name: default_decompressor + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.decompressor.v3.Decompressor + decompressor_library: + name: testlib + typed_config: + "@type": type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip + window_bits: 10 + )EOF"}; + // Use gzip for decompression. + initializeFilter(bad_config); + + // Enable request decompression by setting the Content-Encoding header to gzip. + auto encoder_decoder = + codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":scheme", "http"}, + {":path", "/test/long/url"}, + {":authority", "host"}, + {"content-encoding", "gzip"}}); + auto request_encoder = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + + // Send first data chunk upstream. + Buffer::OwnedImpl request_data1; + TestUtility::feedBufferWithRandomCharacters(request_data1, 8192); + request_compressor_->compress(request_data1, Envoy::Compression::Compressor::State::Flush); + auto compressed_request_length = request_data1.length(); + codec_client_->sendData(*request_encoder, request_data1, false); + + // Send second data chunk upstream and finish the request stream. + Buffer::OwnedImpl request_data2; + TestUtility::feedBufferWithRandomCharacters(request_data2, 16384); + request_compressor_->compress(request_data2, Envoy::Compression::Compressor::State::Finish); + compressed_request_length += request_data2.length(); + codec_client_->sendData(*request_encoder, request_data2, true); + + // Wait for frames to arrive upstream. + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ("chunked", upstream_request_->headers().TransferEncoding()->value().getStringView()); + EXPECT_EQ("gzip", upstream_request_->headers() + .get(Http::LowerCaseString("accept-encoding")) + ->value() + .getStringView()); + EXPECT_EQ(nullptr, upstream_request_->headers().get(Http::LowerCaseString("content-encoding"))); + + // Verify stats. While the stream was decompressed, there should be a decompression failure. + test_server_->waitForCounterEq("http.config_test.decompressor.testlib.gzip.request.decompressed", + 1); + test_server_->waitForCounterEq( + "http.config_test.decompressor.testlib.gzip.request.not_decompressed", 0); + test_server_->waitForCounterEq( + "http.config_test.decompressor.testlib.gzip.request.total_compressed_bytes", + compressed_request_length); + test_server_->waitForCounterEq( + "http.config_test.decompressor.testlib.gzip.decompressor_library.zlib_data_error", 2); + + // Enable response decompression by setting the Content-Encoding header to gzip. + upstream_request_->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}, {"content-encoding", "gzip"}}, false); + + // Send first data chunk downstream. + Buffer::OwnedImpl response_data1; + TestUtility::feedBufferWithRandomCharacters(response_data1, 4096); + response_compressor_->compress(response_data1, Envoy::Compression::Compressor::State::Flush); + auto compressed_response_length = response_data1.length(); + upstream_request_->encodeData(response_data1, false); + + // Send second data chunk downstream and finish the response stream. + Buffer::OwnedImpl response_data2; + TestUtility::feedBufferWithRandomCharacters(response_data2, 8192); + response_compressor_->compress(response_data2, Envoy::Compression::Compressor::State::Flush); + compressed_response_length += response_data2.length(); + upstream_request_->encodeData(response_data2, true); + + // Wait for frames to arrive downstream. + response->waitForEndStream(); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + + // Verify stats. While the stream was decompressed, there should be a decompression failure. + test_server_->waitForCounterEq("http.config_test.decompressor.testlib.gzip.response.decompressed", + 1); + test_server_->waitForCounterEq( + "http.config_test.decompressor.testlib.gzip.response.not_decompressed", 0); + test_server_->waitForCounterEq( + "http.config_test.decompressor.testlib.gzip.response.total_compressed_bytes", + compressed_response_length); + test_server_->waitForCounterGe( + "http.config_test.decompressor.testlib.gzip.decompressor_library.zlib_data_error", 3); +} + +} // namespace Envoy diff --git a/test/extensions/filters/http/decompressor/decompressor_filter_test.cc b/test/extensions/filters/http/decompressor/decompressor_filter_test.cc new file mode 100644 index 0000000000000..871b8f28b7518 --- /dev/null +++ b/test/extensions/filters/http/decompressor/decompressor_filter_test.cc @@ -0,0 +1,447 @@ +#include "envoy/extensions/filters/http/decompressor/v3/decompressor.pb.h" + +#include "common/http/headers.h" +#include "common/protobuf/utility.h" + +#include "extensions/filters/http/decompressor/decompressor_filter.h" + +#include "test/mocks/buffer/mocks.h" +#include "test/mocks/compression/decompressor/mocks.h" +#include "test/mocks/http/mocks.h" +#include "test/mocks/protobuf/mocks.h" +#include "test/mocks/runtime/mocks.h" +#include "test/mocks/stats/mocks.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +using testing::ByMove; +using testing::Return; + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Decompressor { +namespace { + +class DecompressorFilterTest : public testing::TestWithParam { +public: + void SetUp() override { + setUpFilter(R"EOF( +decompressor_library: + typed_config: + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip" +)EOF"); + } + + void setUpFilter(std::string&& yaml) { + envoy::extensions::filters::http::decompressor::v3::Decompressor decompressor; + TestUtility::loadFromYaml(yaml, decompressor); + auto decompressor_factory = + std::make_unique>(); + decompressor_factory_ = decompressor_factory.get(); + config_ = std::make_shared(decompressor, "test.", stats_, runtime_, + std::move(decompressor_factory)); + filter_ = std::make_unique(config_); + filter_->setDecoderFilterCallbacks(decoder_callbacks_); + filter_->setEncoderFilterCallbacks(encoder_callbacks_); + } + + bool isRequestDirection() { return GetParam(); } + + std::unique_ptr doHeaders(const Http::HeaderMap& headers, + const bool end_stream) { + if (isRequestDirection()) { + auto request_headers = std::make_unique(headers); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, + filter_->decodeHeaders(*request_headers, end_stream)); + return request_headers; + } else { + auto response_headers = std::make_unique(headers); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, + filter_->encodeHeaders(*response_headers, end_stream)); + return response_headers; + } + } + + void doData(Buffer::Instance& buffer, const bool end_stream) { + if (isRequestDirection()) { + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, end_stream)); + } else { + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(buffer, end_stream)); + } + } + + void expectDecompression(Compression::Decompressor::MockDecompressor* decompressor_ptr) { + EXPECT_CALL(*decompressor_ptr, decompress(_, _)) + .Times(2) + .WillRepeatedly( + Invoke([&](const Buffer::Instance& input_buffer, Buffer::Instance& output_buffer) { + TestUtility::feedBufferWithRandomCharacters(output_buffer, 2 * input_buffer.length()); + })); + Buffer::OwnedImpl buffer; + TestUtility::feedBufferWithRandomCharacters(buffer, 10); + EXPECT_EQ(10, buffer.length()); + doData(buffer, false /* end_stream */); + EXPECT_EQ(20, buffer.length()); + doData(buffer, true /* end_stream */); + EXPECT_EQ(40, buffer.length()); + } + + void expectNoDecompression() { + Buffer::OwnedImpl buffer; + TestUtility::feedBufferWithRandomCharacters(buffer, 10); + EXPECT_EQ(10, buffer.length()); + doData(buffer, true /* end_stream */); + EXPECT_EQ(10, buffer.length()); + } + + void decompressionActive(const Http::HeaderMap& headers_before_filter, + const absl::optional expected_content_encoding, + const absl::optional expected_accept_encoding = "mock") { + // Keep the decompressor to set expectations about it + auto decompressor = std::make_unique(); + auto* decompressor_ptr = decompressor.get(); + EXPECT_CALL(*decompressor_factory_, createDecompressor(_)) + .WillOnce(Return(ByMove(std::move(decompressor)))); + + std::unique_ptr headers_after_filter = + doHeaders(headers_before_filter, false /* end_stream */); + + // The filter removes Content-Length + EXPECT_EQ(nullptr, headers_after_filter->ContentLength()); + + // The filter removes the decompressor's content encoding from the Content-Encoding header. + if (expected_content_encoding.has_value()) { + EXPECT_EQ(expected_content_encoding.value(), + headers_after_filter->get(Http::CustomHeaders::get().ContentEncoding) + ->value() + .getStringView()); + } else { + EXPECT_EQ(nullptr, headers_after_filter->get(Http::CustomHeaders::get().ContentEncoding)); + } + + // The filter adds the decompressor's content encoding to the Accept-Encoding header on the + // request direction. + const auto* accept_encoding = + headers_after_filter->get(Http::LowerCaseString{"accept-encoding"}); + if (isRequestDirection() && expected_accept_encoding.has_value()) { + EXPECT_EQ(expected_accept_encoding.value(), accept_encoding->value().getStringView()); + } else { + EXPECT_EQ(nullptr, accept_encoding); + } + + expectDecompression(decompressor_ptr); + } + + Compression::Decompressor::MockDecompressorFactory* decompressor_factory_{}; + DecompressorFilterConfigSharedPtr config_; + std::unique_ptr filter_; + Stats::TestUtil::TestStore stats_; + NiceMock runtime_; + NiceMock decoder_callbacks_; + NiceMock encoder_callbacks_; +}; + +INSTANTIATE_TEST_SUITE_P(IsRequestDirection, DecompressorFilterTest, + ::testing::Values(true, false)); + +TEST_P(DecompressorFilterTest, DecompressionActive) { + Http::TestRequestHeaderMapImpl headers_before_filter{{"content-encoding", "mock"}, + {"content-length", "256"}}; + decompressionActive(headers_before_filter, absl::nullopt /* expected_content_encoding */); +} + +TEST_P(DecompressorFilterTest, DecompressionActiveContentEncodingSpacing) { + // Additional spacing should still match. + Http::TestRequestHeaderMapImpl headers_before_filter{{"content-encoding", " mock "}, + {"content-length", "256"}}; + decompressionActive(headers_before_filter, absl::nullopt /* expected_content_encoding */); +} + +TEST_P(DecompressorFilterTest, DecompressionActiveContentEncodingCasing) { + // Different casing should still match. + Http::TestRequestHeaderMapImpl headers_before_filter{{"content-encoding", "MOCK"}, + {"content-length", "256"}}; + decompressionActive(headers_before_filter, absl::nullopt /* expected_content_encoding */); +} + +TEST_P(DecompressorFilterTest, DecompressionActiveMultipleEncodings) { + // If the first encoding in the Content-Encoding header is the configured value, the filter should + // still be active. + Http::TestRequestHeaderMapImpl headers_before_filter{{"content-encoding", "mock, br"}, + {"content-length", "256"}}; + decompressionActive(headers_before_filter, "br"); +} + +TEST_P(DecompressorFilterTest, DecompressionActiveMultipleEncodings2) { + // If the first encoding in the Content-Encoding header is the configured value, the filter should + // still be active. + Http::TestRequestHeaderMapImpl headers_before_filter{{"content-encoding", "mock, br , gzip "}, + {"content-length", "256"}}; + decompressionActive(headers_before_filter, "br , gzip"); +} + +TEST_P(DecompressorFilterTest, DisableAdvertiseAcceptEncoding) { + setUpFilter(R"EOF( +decompressor_library: + typed_config: + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip" +request_direction_config: + advertise_accept_encoding: false +)EOF"); + + Http::TestRequestHeaderMapImpl headers_before_filter{{"content-encoding", "mock"}, + {"content-length", "256"}}; + decompressionActive(headers_before_filter, absl::nullopt /* expected_content_encoding*/, + absl::nullopt /* expected_accept_encoding */); +} + +TEST_P(DecompressorFilterTest, ExplicitlyEnableAdvertiseAcceptEncoding) { + setUpFilter(R"EOF( +decompressor_library: + typed_config: + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip" +request_direction_config: + advertise_accept_encoding: true +)EOF"); + + Http::TestRequestHeaderMapImpl headers_before_filter{{"content-encoding", "mock"}, + {"content-length", "256"}}; + if (isRequestDirection()) { + // Also test that the filter appends to an already existing header. + headers_before_filter.addCopy("accept-encoding", "br"); + } + decompressionActive(headers_before_filter, absl::nullopt /* expected_content_encoding*/, + "br,mock" /* expected_accept_encoding */); +} + +TEST_P(DecompressorFilterTest, DecompressionDisabled) { + setUpFilter(R"EOF( +decompressor_library: + typed_config: + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip" +request_direction_config: + common_config: + enabled: + default_value: false + runtime_key: does_not_exist +response_direction_config: + common_config: + enabled: + default_value: false + runtime_key: does_not_exist +)EOF"); + + EXPECT_CALL(*decompressor_factory_, createDecompressor(_)).Times(0); + Http::TestRequestHeaderMapImpl headers_before_filter{{"content-encoding", "mock"}, + {"content-length", "256"}}; + std::unique_ptr headers_after_filter = + doHeaders(headers_before_filter, false /* end_stream */); + EXPECT_THAT(headers_after_filter, HeaderMapEqualIgnoreOrder(&headers_before_filter)); + + expectNoDecompression(); +} + +TEST_P(DecompressorFilterTest, RequestDecompressionDisabled) { + setUpFilter(R"EOF( +decompressor_library: + typed_config: + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip" +request_direction_config: + common_config: + enabled: + default_value: false + runtime_key: does_not_exist +)EOF"); + + Http::TestRequestHeaderMapImpl headers_before_filter{{"content-encoding", "mock"}, + {"content-length", "256"}}; + + if (isRequestDirection()) { + EXPECT_CALL(*decompressor_factory_, createDecompressor(_)).Times(0); + std::unique_ptr headers_after_filter = + doHeaders(headers_before_filter, false /* end_stream */); + + // The request direction adds Accept-Encoding by default. Other than this header, the rest of + // the headers should be the same before and after the filter. + headers_after_filter->remove(Http::LowerCaseString("accept-encoding")); + EXPECT_THAT(headers_after_filter, HeaderMapEqualIgnoreOrder(&headers_before_filter)); + + expectNoDecompression(); + } else { + decompressionActive(headers_before_filter, absl::nullopt /* expected_content_encoding*/, + "mock" /* expected_accept_encoding */); + } +} + +TEST_P(DecompressorFilterTest, ResponseDecompressionDisabled) { + setUpFilter(R"EOF( +decompressor_library: + typed_config: + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip" +response_direction_config: + common_config: + enabled: + default_value: false + runtime_key: does_not_exist +)EOF"); + + Http::TestRequestHeaderMapImpl headers_before_filter{{"content-encoding", "mock"}, + {"content-length", "256"}}; + + if (isRequestDirection()) { + // Accept-Encoding is not advertised in the request headers when response decompression is + // disabled. + decompressionActive(headers_before_filter, absl::nullopt /* expected_content_encoding*/, + absl::nullopt /* expected_accept_encoding */); + } else { + EXPECT_CALL(*decompressor_factory_, createDecompressor(_)).Times(0); + std::unique_ptr headers_after_filter = + doHeaders(headers_before_filter, false /* end_stream */); + + EXPECT_THAT(headers_after_filter, HeaderMapEqualIgnoreOrder(&headers_before_filter)); + + expectNoDecompression(); + } +} + +TEST_P(DecompressorFilterTest, NoDecompressionHeadersOnly) { + EXPECT_CALL(*decompressor_factory_, createDecompressor(_)).Times(0); + Http::TestRequestHeaderMapImpl headers_before_filter; + std::unique_ptr headers_after_filter = + doHeaders(headers_before_filter, true /* end_stream */); + + if (isRequestDirection()) { + ASSERT_EQ(headers_after_filter->get(Http::LowerCaseString("accept-encoding")) + ->value() + .getStringView(), + "mock"); + // The request direction adds Accept-Encoding by default, even for header-only requests. + // Other than this header, the rest of the headers should be the same before and after the + // filter. + headers_after_filter->remove(Http::LowerCaseString("accept-encoding")); + } + EXPECT_THAT(headers_after_filter, HeaderMapEqualIgnoreOrder(&headers_before_filter)); +} + +TEST_P(DecompressorFilterTest, NoDecompressionContentEncodingAbsent) { + EXPECT_CALL(*decompressor_factory_, createDecompressor(_)).Times(0); + Http::TestRequestHeaderMapImpl headers_before_filter{{"content-length", "256"}}; + std::unique_ptr headers_after_filter = + doHeaders(headers_before_filter, false /* end_stream */); + + if (isRequestDirection()) { + ASSERT_EQ(headers_after_filter->get(Http::LowerCaseString("accept-encoding")) + ->value() + .getStringView(), + "mock"); + // The request direction adds Accept-Encoding by default. Other than this header, the rest of + // the headers should be the same before and after the filter. + headers_after_filter->remove(Http::LowerCaseString("accept-encoding")); + } + EXPECT_THAT(headers_after_filter, HeaderMapEqualIgnoreOrder(&headers_before_filter)); + + expectNoDecompression(); +} + +TEST_P(DecompressorFilterTest, NoDecompressionContentEncodingDoesNotMatch) { + EXPECT_CALL(*decompressor_factory_, createDecompressor(_)).Times(0); + Http::TestRequestHeaderMapImpl headers_before_filter{{"content-encoding", "not-matching"}, + {"content-length", "256"}}; + std::unique_ptr headers_after_filter = + doHeaders(headers_before_filter, false /* end_stream */); + + expectNoDecompression(); +} + +TEST_P(DecompressorFilterTest, NoDecompressionContentEncodingNotCurrent) { + EXPECT_CALL(*decompressor_factory_, createDecompressor(_)).Times(0); + // The decompressor's content scheme is not the first value in the comma-delimited list in the + // Content-Encoding header. Therefore, compression will not occur. + Http::TestRequestHeaderMapImpl headers_before_filter{{"content-encoding", "gzip,mock"}, + {"content-length", "256"}}; + std::unique_ptr headers_after_filter = + doHeaders(headers_before_filter, false /* end_stream */); + + if (isRequestDirection()) { + ASSERT_EQ(headers_after_filter->get(Http::LowerCaseString("accept-encoding")) + ->value() + .getStringView(), + "mock"); + // The request direction adds Accept-Encoding by default. Other than this header, the rest of + // the headers should be the same before and after the filter. + headers_after_filter->remove(Http::LowerCaseString("accept-encoding")); + } + EXPECT_THAT(headers_after_filter, HeaderMapEqualIgnoreOrder(&headers_before_filter)); + + expectNoDecompression(); +} + +TEST_P(DecompressorFilterTest, NoResponseDecompressionNoTransformPresent) { + EXPECT_CALL(*decompressor_factory_, createDecompressor(_)).Times(0); + Http::TestRequestHeaderMapImpl headers_before_filter{ + {"cache-control", Http::CustomHeaders::get().CacheControlValues.NoTransform}, + {"content-encoding", "mock"}, + {"content-length", "256"}}; + std::unique_ptr headers_after_filter = + doHeaders(headers_before_filter, false /* end_stream */); + + if (isRequestDirection()) { + ASSERT_EQ(headers_after_filter->get(Http::LowerCaseString("accept-encoding")) + ->value() + .getStringView(), + "mock"); + // The request direction adds Accept-Encoding by default. Other than this header, the rest of + // the headers should be the same before and after the filter. + headers_after_filter->remove(Http::LowerCaseString("accept-encoding")); + } + EXPECT_THAT(headers_after_filter, HeaderMapEqualIgnoreOrder(&headers_before_filter)); + + expectNoDecompression(); +} + +TEST_P(DecompressorFilterTest, NoResponseDecompressionNoTransformPresentInList) { + EXPECT_CALL(*decompressor_factory_, createDecompressor(_)).Times(0); + Http::TestRequestHeaderMapImpl headers_before_filter{ + {"cache-control", fmt::format("{}, {}", Http::CustomHeaders::get().CacheControlValues.NoCache, + Http::CustomHeaders::get().CacheControlValues.NoTransform)}, + {"content-encoding", "mock"}, + {"content-length", "256"}}; + std::unique_ptr headers_after_filter = + doHeaders(headers_before_filter, false /* end_stream */); + + if (isRequestDirection()) { + ASSERT_EQ(headers_after_filter->get(Http::LowerCaseString("accept-encoding")) + ->value() + .getStringView(), + "mock"); + // The request direction adds Accept-Encoding by default. Other than this header, the rest of + // the headers should be the same before and after the filter. + headers_after_filter->remove(Http::LowerCaseString("accept-encoding")); + } + EXPECT_THAT(headers_after_filter, HeaderMapEqualIgnoreOrder(&headers_before_filter)); + + expectNoDecompression(); +} + +TEST_P(DecompressorFilterTest, DecompressionLibraryNotRegistered) { + EXPECT_THROW_WITH_MESSAGE( + setUpFilter(R"EOF( +decompressor_library: + typed_config: + "@type": "type.googleapis.com/envoy.extensions.compression.does_not_exist" +)EOF"), + EnvoyException, + "Unable to parse JSON as proto (INVALID_ARGUMENT:(decompressor_library.typed_config): " + "invalid value Invalid type URL, unknown type: envoy.extensions.compression.does_not_exist " + "for type Any): " + "{\"decompressor_library\":{\"typed_config\":{\"@type\":\"type.googleapis.com/" + "envoy.extensions.compression.does_not_exist\"}}}"); +} + +} // namespace +} // namespace Decompressor +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/dynamic_forward_proxy/BUILD b/test/extensions/filters/http/dynamic_forward_proxy/BUILD index 6902c9dd718f3..58d9e9a922466 100644 --- a/test/extensions/filters/http/dynamic_forward_proxy/BUILD +++ b/test/extensions/filters/http/dynamic_forward_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -16,11 +16,16 @@ envoy_extension_cc_test( srcs = ["proxy_filter_test.cc"], extension_name = "envoy.filters.http.dynamic_forward_proxy", deps = [ + "//source/common/stats:isolated_store_lib", + "//source/extensions/clusters:well_known_names", + "//source/extensions/common/dynamic_forward_proxy:dns_cache_impl", "//source/extensions/filters/http:well_known_names", "//source/extensions/filters/http/dynamic_forward_proxy:config", "//test/extensions/common/dynamic_forward_proxy:mocks", "//test/mocks/http:http_mocks", "//test/mocks/upstream:upstream_mocks", + "//test/test_common:test_runtime_lib", + "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/dynamic_forward_proxy/v3:pkg_cc_proto", ], ) @@ -32,6 +37,7 @@ envoy_extension_cc_test( "//test/config/integration/certs", ], extension_name = "envoy.filters.http.dynamic_forward_proxy", + tags = ["fails_on_windows"], deps = [ "//source/extensions/clusters/dynamic_forward_proxy:cluster", "//source/extensions/filters/http/dynamic_forward_proxy:config", diff --git a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc index dc607359ca09e..e066cf482805a 100644 --- a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc +++ b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc @@ -18,20 +18,22 @@ class ProxyFilterIntegrationTest : public testing::TestWithParammutable_typed_config()->PackFrom(tls_context); } - const std::string cluster_type_config = - fmt::format(R"EOF( + const std::string cluster_type_config = fmt::format( + R"EOF( name: envoy.clusters.dynamic_forward_proxy typed_config: - "@type": type.googleapis.com/envoy.config.cluster.dynamic_forward_proxy.v2alpha.ClusterConfig + "@type": type.googleapis.com/envoy.extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig dns_cache_config: name: foo dns_lookup_family: {} max_hosts: {} + dns_cache_circuit_breaker: + max_pending_requests: {} )EOF", - Network::Test::ipVersionToDnsFamily(GetParam()), max_hosts); + Network::Test::ipVersionToDnsFamily(GetParam()), max_hosts, max_pending_requests); TestUtility::loadFromYaml(cluster_type_config, *cluster_.mutable_cluster_type()); + cluster_.mutable_circuit_breakers() + ->add_thresholds() + ->mutable_max_pending_requests() + ->set_value(max_pending_requests); // Load the CDS cluster and wait for it to initialize. cds_helper_.setCds({cluster_}); @@ -93,6 +101,11 @@ name: envoy.clusters.dynamic_forward_proxy } } + void disableDnsCacheCircuitBreakers() { + config_helper_.addRuntimeOverride("envoy.reloadable_features.enable_dns_cache_circuit_breakers", + "false"); + } + bool upstream_tls_{}; std::string upstream_cert_name_{"upstreamlocalhost"}; CdsHelper cds_helper_; @@ -128,6 +141,30 @@ TEST_P(ProxyFilterIntegrationTest, RequestWithBody) { EXPECT_EQ(1, test_server_->counter("dns_cache.foo.host_added")->value()); } +TEST_P(ProxyFilterIntegrationTest, RequestWithBodyWithClusterCircuitBreaker) { + disableDnsCacheCircuitBreakers(); + setup(); + codec_client_ = makeHttpConnection(lookupPort("http")); + const Http::TestRequestHeaderMapImpl request_headers{ + {":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", + fmt::format("localhost:{}", fake_upstreams_[0]->localAddress()->ip()->port())}}; + + auto response = + sendRequestAndWaitForResponse(request_headers, 1024, default_response_headers_, 1024); + checkSimpleRequestSuccess(1024, 1024, response.get()); + EXPECT_EQ(1, test_server_->counter("dns_cache.foo.dns_query_attempt")->value()); + EXPECT_EQ(1, test_server_->counter("dns_cache.foo.host_added")->value()); + + // Now send another request. This should hit the DNS cache. + response = sendRequestAndWaitForResponse(request_headers, 512, default_response_headers_, 512); + checkSimpleRequestSuccess(512, 512, response.get()); + EXPECT_EQ(1, test_server_->counter("dns_cache.foo.dns_query_attempt")->value()); + EXPECT_EQ(1, test_server_->counter("dns_cache.foo.host_added")->value()); +} + // Verify that after we populate the cache and reload the cluster we reattach to the cache with // its existing hosts. TEST_P(ProxyFilterIntegrationTest, ReloadClusterAndAttachToCache) { @@ -214,7 +251,7 @@ TEST_P(ProxyFilterIntegrationTest, DNSCacheHostOverflow) { {":authority", fmt::format("localhost2", fake_upstreams_[0]->localAddress()->ip()->port())}}; response = codec_client_->makeHeaderOnlyRequest(request_headers2); response->waitForEndStream(); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); EXPECT_EQ(1, test_server_->counter("dns_cache.foo.host_overflow")->value()); } @@ -236,8 +273,7 @@ TEST_P(ProxyFilterIntegrationTest, UpstreamTls) { const Extensions::TransportSockets::Tls::SslSocketInfo* ssl_socket = dynamic_cast( fake_upstream_connection_->connection().ssl().get()); - EXPECT_STREQ("localhost", - SSL_get_servername(ssl_socket->rawSslForTest(), TLSEXT_NAMETYPE_host_name)); + EXPECT_STREQ("localhost", SSL_get_servername(ssl_socket->ssl(), TLSEXT_NAMETYPE_host_name)); upstream_request_->encodeHeaders(default_response_headers_, true); response->waitForEndStream(); @@ -262,7 +298,7 @@ TEST_P(ProxyFilterIntegrationTest, UpstreamTlsWithIpHost) { const Extensions::TransportSockets::Tls::SslSocketInfo* ssl_socket = dynamic_cast( fake_upstream_connection_->connection().ssl().get()); - EXPECT_STREQ(nullptr, SSL_get_servername(ssl_socket->rawSslForTest(), TLSEXT_NAMETYPE_host_name)); + EXPECT_STREQ(nullptr, SSL_get_servername(ssl_socket->ssl(), TLSEXT_NAMETYPE_host_name)); upstream_request_->encodeHeaders(default_response_headers_, true); response->waitForEndStream(); @@ -289,10 +325,49 @@ TEST_P(ProxyFilterIntegrationTest, UpstreamTlsInvalidSAN) { auto response = codec_client_->makeHeaderOnlyRequest(request_headers); response->waitForEndStream(); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.ssl.fail_verify_san")->value()); } +TEST_P(ProxyFilterIntegrationTest, DnsCacheCircuitBreakersInvoked) { + setup(1024, 0); + + codec_client_ = makeHttpConnection(lookupPort("http")); + const Http::TestRequestHeaderMapImpl request_headers{ + {":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", + fmt::format("localhost:{}", fake_upstreams_[0]->localAddress()->ip()->port())}}; + + auto response = codec_client_->makeRequestWithBody(request_headers, 1024); + response->waitForEndStream(); + EXPECT_EQ(1, test_server_->counter("dns_cache.foo.dns_rq_pending_overflow")->value()); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("503", response->headers().Status()->value().getStringView()); +} + +TEST_P(ProxyFilterIntegrationTest, ClusterCircuitBreakersInvoked) { + disableDnsCacheCircuitBreakers(); + setup(1024, 0); + + codec_client_ = makeHttpConnection(lookupPort("http")); + const Http::TestRequestHeaderMapImpl request_headers{ + {":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", + fmt::format("localhost:{}", fake_upstreams_[0]->localAddress()->ip()->port())}}; + + auto response = codec_client_->makeRequestWithBody(request_headers, 1024); + response->waitForEndStream(); + EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.upstream_rq_pending_overflow")->value()); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("503", response->headers().Status()->value().getStringView()); +} + } // namespace } // namespace Envoy diff --git a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_test.cc b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_test.cc index 17dd35d8cd3ef..7a2bffbbcaec5 100644 --- a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_test.cc +++ b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_test.cc @@ -1,5 +1,8 @@ +#include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.pb.h" +#include "extensions/clusters/well_known_names.h" +#include "extensions/common/dynamic_forward_proxy/dns_cache_impl.h" #include "extensions/filters/http/dynamic_forward_proxy/proxy_filter.h" #include "extensions/filters/http/well_known_names.h" @@ -7,6 +10,7 @@ #include "test/mocks/http/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/mocks/upstream/transport_socket_match.h" +#include "test/test_common/test_runtime.h" using testing::AtLeast; using testing::Eq; @@ -19,6 +23,8 @@ namespace HttpFilters { namespace DynamicForwardProxy { namespace { +using CustomClusterType = envoy::config::cluster::v3::Cluster::CustomClusterType; + using LoadDnsCacheEntryStatus = Common::DynamicForwardProxy::DnsCache::LoadDnsCacheEntryStatus; using MockLoadDnsCacheEntryResult = Common::DynamicForwardProxy::MockDnsCache::MockLoadDnsCacheEntryResult; @@ -42,6 +48,12 @@ class ProxyFilterTest : public testing::Test, EXPECT_CALL(callbacks_, connection()).Times(AtLeast(0)); EXPECT_CALL(callbacks_, streamId()).Times(AtLeast(0)); + // Configure upstream cluster to be a Dynamic Forward Proxy since that's the + // kind we need to do DNS entries for. + CustomClusterType cluster_type; + cluster_type.set_name(Envoy::Extensions::Clusters::ClusterTypes::get().DynamicForwardProxy); + cm_.thread_local_cluster_.cluster_.info_->cluster_type_ = cluster_type; + // Configure max pending to 1 so we can test circuit breaking. cm_.thread_local_cluster_.cluster_.info_->resetResourceManager(0, 1, 0, 0, 0); } @@ -65,14 +77,19 @@ class ProxyFilterTest : public testing::Test, std::unique_ptr filter_; Http::MockStreamDecoderFilterCallbacks callbacks_; Http::TestRequestHeaderMapImpl request_headers_{{":authority", "foo"}}; + NiceMock pending_requests_; }; // Default port 80 if upstream TLS not configured. TEST_F(ProxyFilterTest, HttpDefaultPort) { + Upstream::ResourceAutoIncDec* circuit_breakers_( + new Upstream::ResourceAutoIncDec(pending_requests_)); InSequence s; EXPECT_CALL(callbacks_, route()); EXPECT_CALL(cm_, get(_)); + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_)) + .WillOnce(Return(circuit_breakers_)); EXPECT_CALL(*transport_socket_factory_, implementsSecureTransport()).WillOnce(Return(false)); Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle* handle = new Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle(); @@ -87,10 +104,14 @@ TEST_F(ProxyFilterTest, HttpDefaultPort) { // Default port 443 if upstream TLS is configured. TEST_F(ProxyFilterTest, HttpsDefaultPort) { + Upstream::ResourceAutoIncDec* circuit_breakers_( + new Upstream::ResourceAutoIncDec(pending_requests_)); InSequence s; EXPECT_CALL(callbacks_, route()); EXPECT_CALL(cm_, get(_)); + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_)) + .WillOnce(Return(circuit_breakers_)); EXPECT_CALL(*transport_socket_factory_, implementsSecureTransport()).WillOnce(Return(true)); Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle* handle = new Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle(); @@ -105,10 +126,14 @@ TEST_F(ProxyFilterTest, HttpsDefaultPort) { // Cache overflow. TEST_F(ProxyFilterTest, CacheOverflow) { + Upstream::ResourceAutoIncDec* circuit_breakers_( + new Upstream::ResourceAutoIncDec(pending_requests_)); InSequence s; EXPECT_CALL(callbacks_, route()); EXPECT_CALL(cm_, get(_)); + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_)) + .WillOnce(Return(circuit_breakers_)); EXPECT_CALL(*transport_socket_factory_, implementsSecureTransport()).WillOnce(Return(true)); EXPECT_CALL(*dns_cache_manager_->dns_cache_, loadDnsCacheEntry_(Eq("foo"), 443, _)) .WillOnce(Return(MockLoadDnsCacheEntryResult{LoadDnsCacheEntryStatus::Overflow, nullptr})); @@ -124,10 +149,18 @@ TEST_F(ProxyFilterTest, CacheOverflow) { // Circuit breaker overflow TEST_F(ProxyFilterTest, CircuitBreakerOverflow) { + // Disable dns cache circuit breakers because which we expect to be used cluster circuit breakers. + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.enable_dns_cache_circuit_breakers", "false"}}); + Upstream::ResourceAutoIncDec* circuit_breakers_( + new Upstream::ResourceAutoIncDec(pending_requests_)); InSequence s; EXPECT_CALL(callbacks_, route()); EXPECT_CALL(cm_, get(_)); + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_)) + .WillOnce(Return(circuit_breakers_)); EXPECT_CALL(*transport_socket_factory_, implementsSecureTransport()).WillOnce(Return(true)); Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle* handle = new Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle(); @@ -141,6 +174,7 @@ TEST_F(ProxyFilterTest, CircuitBreakerOverflow) { filter2->setDecoderFilterCallbacks(callbacks_); EXPECT_CALL(callbacks_, route()); EXPECT_CALL(cm_, get(_)); + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_)); EXPECT_CALL(callbacks_, sendLocalReply(Http::Code::ServiceUnavailable, Eq("Dynamic forward proxy pending request overflow"), _, _, Eq("Dynamic forward proxy pending request overflow"))); @@ -156,6 +190,46 @@ TEST_F(ProxyFilterTest, CircuitBreakerOverflow) { filter_->onDestroy(); } +// Circuit breaker overflow with DNS Cache resource manager +TEST_F(ProxyFilterTest, CircuitBreakerOverflowWithDnsCacheResourceManager) { + Upstream::ResourceAutoIncDec* circuit_breakers_( + new Upstream::ResourceAutoIncDec(pending_requests_)); + InSequence s; + + EXPECT_CALL(callbacks_, route()); + EXPECT_CALL(cm_, get(_)); + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_)) + .WillOnce(Return(circuit_breakers_)); + EXPECT_CALL(*transport_socket_factory_, implementsSecureTransport()).WillOnce(Return(true)); + Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle* handle = + new Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle(); + EXPECT_CALL(*dns_cache_manager_->dns_cache_, loadDnsCacheEntry_(Eq("foo"), 443, _)) + .WillOnce(Return(MockLoadDnsCacheEntryResult{LoadDnsCacheEntryStatus::Loading, handle})); + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, + filter_->decodeHeaders(request_headers_, false)); + + // Create a second filter for a 2nd request. + auto filter2 = std::make_unique(filter_config_); + filter2->setDecoderFilterCallbacks(callbacks_); + EXPECT_CALL(callbacks_, route()); + EXPECT_CALL(cm_, get(_)); + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_)); + EXPECT_CALL(callbacks_, sendLocalReply(Http::Code::ServiceUnavailable, + Eq("Dynamic forward proxy pending request overflow"), _, _, + Eq("Dynamic forward proxy pending request overflow"))); + EXPECT_CALL(callbacks_, encodeHeaders_(_, false)); + EXPECT_CALL(callbacks_, encodeData(_, true)); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter2->decodeHeaders(request_headers_, false)); + + // Cluster circuit breaker overflow counter won't be incremented. + EXPECT_EQ(0, + cm_.thread_local_cluster_.cluster_.info_->stats_.upstream_rq_pending_overflow_.value()); + filter2->onDestroy(); + EXPECT_CALL(*handle, onDestroy()); + filter_->onDestroy(); +} + // No route handling. TEST_F(ProxyFilterTest, NoRoute) { InSequence s; @@ -173,7 +247,33 @@ TEST_F(ProxyFilterTest, NoCluster) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); } +// No cluster type leads to skipping DNS lookups. +TEST_F(ProxyFilterTest, NoClusterType) { + cm_.thread_local_cluster_.cluster_.info_->cluster_type_ = absl::nullopt; + + InSequence s; + + EXPECT_CALL(callbacks_, route()); + EXPECT_CALL(cm_, get(_)); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); +} + +// Cluster that isn't a dynamic forward proxy cluster +TEST_F(ProxyFilterTest, NonDynamicForwardProxy) { + CustomClusterType cluster_type; + cluster_type.set_name(Envoy::Extensions::Clusters::ClusterTypes::get().Static); + cm_.thread_local_cluster_.cluster_.info_->cluster_type_ = cluster_type; + + InSequence s; + + EXPECT_CALL(callbacks_, route()); + EXPECT_CALL(cm_, get(_)); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); +} + TEST_F(ProxyFilterTest, HostRewrite) { + Upstream::ResourceAutoIncDec* circuit_breakers_( + new Upstream::ResourceAutoIncDec(pending_requests_)); InSequence s; envoy::extensions::filters::http::dynamic_forward_proxy::v3::PerRouteConfig proto_config; @@ -182,6 +282,8 @@ TEST_F(ProxyFilterTest, HostRewrite) { EXPECT_CALL(callbacks_, route()); EXPECT_CALL(cm_, get(_)); + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_)) + .WillOnce(Return(circuit_breakers_)); EXPECT_CALL(*transport_socket_factory_, implementsSecureTransport()).WillOnce(Return(false)); Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle* handle = new Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle(); @@ -198,6 +300,8 @@ TEST_F(ProxyFilterTest, HostRewrite) { } TEST_F(ProxyFilterTest, HostRewriteViaHeader) { + Upstream::ResourceAutoIncDec* circuit_breakers_( + new Upstream::ResourceAutoIncDec(pending_requests_)); InSequence s; envoy::extensions::filters::http::dynamic_forward_proxy::v3::PerRouteConfig proto_config; @@ -206,6 +310,8 @@ TEST_F(ProxyFilterTest, HostRewriteViaHeader) { EXPECT_CALL(callbacks_, route()); EXPECT_CALL(cm_, get(_)); + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_)) + .WillOnce(Return(circuit_breakers_)); EXPECT_CALL(*transport_socket_factory_, implementsSecureTransport()).WillOnce(Return(false)); Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle* handle = new Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle(); diff --git a/test/extensions/filters/http/dynamo/BUILD b/test/extensions/filters/http/dynamo/BUILD index 4fcc77be74124..1fb51d2ffff22 100644 --- a/test/extensions/filters/http/dynamo/BUILD +++ b/test/extensions/filters/http/dynamo/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -56,7 +56,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.dynamo", deps = [ "//source/extensions/filters/http/dynamo:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/extensions/filters/http/dynamo/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/dynamo/config_test.cc b/test/extensions/filters/http/dynamo/config_test.cc index 54d5dbc6f1437..e1ed56fbcb8a0 100644 --- a/test/extensions/filters/http/dynamo/config_test.cc +++ b/test/extensions/filters/http/dynamo/config_test.cc @@ -3,7 +3,7 @@ #include "extensions/filters/http/dynamo/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/http/dynamo/dynamo_request_parser_test.cc b/test/extensions/filters/http/dynamo/dynamo_request_parser_test.cc index 7d5f26d05095a..69d616ddebc6d 100644 --- a/test/extensions/filters/http/dynamo/dynamo_request_parser_test.cc +++ b/test/extensions/filters/http/dynamo/dynamo_request_parser_test.cc @@ -20,25 +20,26 @@ namespace { TEST(DynamoRequestParser, parseOperation) { // Well formed x-amz-target header, in a format, Version.Operation { - Http::TestHeaderMapImpl headers{{"X", "X"}, {"x-amz-target", "X.Operation"}}; + Http::TestRequestHeaderMapImpl headers{{"X", "X"}, {"x-amz-target", "X.Operation"}}; EXPECT_EQ("Operation", RequestParser::parseOperation(headers)); } // Not well formed x-amz-target header. { - Http::TestHeaderMapImpl headers{{"X", "X"}, {"x-amz-target", "X,Operation"}}; + Http::TestRequestHeaderMapImpl headers{{"X", "X"}, {"x-amz-target", "X,Operation"}}; EXPECT_EQ("", RequestParser::parseOperation(headers)); } // Too many entries in the Version.Operation. { - Http::TestHeaderMapImpl headers{{"X", "X"}, {"x-amz-target", "NOT_VALID.NOT_VALID.NOT_VALID"}}; + Http::TestRequestHeaderMapImpl headers{{"X", "X"}, + {"x-amz-target", "NOT_VALID.NOT_VALID.NOT_VALID"}}; EXPECT_EQ("", RequestParser::parseOperation(headers)); } // Required header is not present in the headers { - Http::TestHeaderMapImpl headers{{"Z", "Z"}}; + Http::TestRequestHeaderMapImpl headers{{"Z", "Z"}}; EXPECT_EQ("", RequestParser::parseOperation(headers)); } } diff --git a/test/extensions/filters/http/ext_authz/BUILD b/test/extensions/filters/http/ext_authz/BUILD index 75bd5bec2fab1..d6e0ff429e5a5 100644 --- a/test/extensions/filters/http/ext_authz/BUILD +++ b/test/extensions/filters/http/ext_authz/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -47,7 +47,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.ext_authz", deps = [ "//source/extensions/filters/http/ext_authz:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/ext_authz/v3:pkg_cc_proto", ], @@ -57,6 +57,7 @@ envoy_extension_cc_test( name = "ext_authz_integration_test", srcs = ["ext_authz_integration_test.cc"], extension_name = "envoy.filters.http.ext_authz", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/http/ext_authz:config", "//test/integration:http_integration_lib", diff --git a/test/extensions/filters/http/ext_authz/config_test.cc b/test/extensions/filters/http/ext_authz/config_test.cc index c6f8f18b5b4ca..7a3f011032fef 100644 --- a/test/extensions/filters/http/ext_authz/config_test.cc +++ b/test/extensions/filters/http/ext_authz/config_test.cc @@ -5,7 +5,8 @@ #include "extensions/filters/http/ext_authz/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/test_common/utility.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -19,18 +20,20 @@ namespace HttpFilters { namespace ExtAuthz { namespace { -TEST(HttpExtAuthzConfigTest, CorrectProtoGrpc) { +void expectCorrectProtoGrpc(envoy::config::core::v3::ApiVersion api_version) { std::string yaml = R"EOF( grpc_service: google_grpc: target_uri: ext_authz_server stat_prefix: google failure_mode_allow: false + transport_api_version: {} )EOF"; ExtAuthzFilterConfig factory; ProtobufTypes::MessagePtr proto_config = factory.createEmptyConfigProto(); - TestUtility::loadFromYaml(yaml, *proto_config); + TestUtility::loadFromYaml( + fmt::format(yaml, TestUtility::getVersionStringFromApiVersion(api_version)), *proto_config); testing::StrictMock context; EXPECT_CALL(context, messageValidationVisitor()).Times(1); @@ -48,6 +51,14 @@ TEST(HttpExtAuthzConfigTest, CorrectProtoGrpc) { cb(filter_callback); } +} // namespace + +TEST(HttpExtAuthzConfigTest, CorrectProtoGrpc) { + expectCorrectProtoGrpc(envoy::config::core::v3::ApiVersion::AUTO); + expectCorrectProtoGrpc(envoy::config::core::v3::ApiVersion::V2); + expectCorrectProtoGrpc(envoy::config::core::v3::ApiVersion::V3); +} + TEST(HttpExtAuthzConfigTest, CorrectProtoHttp) { std::string yaml = R"EOF( http_service: @@ -76,6 +87,10 @@ TEST(HttpExtAuthzConfigTest, CorrectProtoHttp) { patterns: - exact: baz - prefix: x-fail + allowed_upstream_headers_to_append: + patterns: + - exact: baz-append + - prefix: x-append path_prefix: /extauth @@ -93,7 +108,6 @@ TEST(HttpExtAuthzConfigTest, CorrectProtoHttp) { EXPECT_CALL(context, clusterManager()).Times(1); EXPECT_CALL(context, runtime()).Times(1); EXPECT_CALL(context, scope()).Times(1); - EXPECT_CALL(context, timeSource()).Times(1); Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(*proto_config, "stats", context); testing::StrictMock filter_callback; EXPECT_CALL(filter_callback, addStreamDecoderFilter(_)); @@ -110,7 +124,6 @@ TEST(HttpExtAuthzConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterNa deprecated_name)); } -} // namespace } // namespace ExtAuthz } // namespace HttpFilters } // namespace Extensions diff --git a/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc b/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc index e24056c15957d..e1d4d2e059ac9 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc @@ -3,6 +3,8 @@ #include "envoy/extensions/filters/http/ext_authz/v3/ext_authz.pb.h" #include "envoy/service/auth/v3/external_auth.pb.h" +#include "common/common/macros.h" + #include "extensions/filters/http/well_known_names.h" #include "test/common/grpc/grpc_client_integration.h" @@ -13,11 +15,15 @@ #include "gtest/gtest.h" using testing::AssertionResult; +using testing::Not; +using testing::TestWithParam; +using testing::ValuesIn; namespace Envoy { -namespace { -class ExtAuthzGrpcIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, +using Headers = std::vector>; + +class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, public HttpIntegrationTest { public: ExtAuthzGrpcIntegrationTest() @@ -29,7 +35,7 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, new FakeUpstream(0, FakeHttpConnection::Type::HTTP2, version_, timeSystem())); } - void initializeWithDownstreamProtocol(Http::CodecClient::Type downstream_protocol) { + void initializeConfig() { config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { auto* ext_authz_cluster = bootstrap.mutable_static_resources()->add_clusters(); ext_authz_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); @@ -40,21 +46,49 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, setGrpcService(*proto_config_.mutable_grpc_service(), "ext_authz", fake_upstreams_.back()->localAddress()); + proto_config_.mutable_filter_enabled()->set_runtime_key("envoy.ext_authz.enable"); + proto_config_.mutable_filter_enabled()->mutable_default_value()->set_numerator(100); + proto_config_.mutable_deny_at_disable()->set_runtime_key("envoy.ext_authz.deny_at_disable"); + proto_config_.mutable_deny_at_disable()->mutable_default_value()->set_value(false); + proto_config_.set_transport_api_version(apiVersion()); + envoy::config::listener::v3::Filter ext_authz_filter; ext_authz_filter.set_name(Extensions::HttpFilters::HttpFilterNames::get().ExtAuthorization); ext_authz_filter.mutable_typed_config()->PackFrom(proto_config_); config_helper_.addFilter(MessageUtil::getJsonStringFromMessage(ext_authz_filter)); }); + } - setDownstreamProtocol(downstream_protocol); - HttpIntegrationTest::initialize(); + void setDenyAtDisableRuntimeConfig(bool deny_at_disable) { + config_helper_.addRuntimeOverride("envoy.ext_authz.enable", "numerator: 0"); + if (deny_at_disable) { + config_helper_.addRuntimeOverride("envoy.ext_authz.deny_at_disable", "true"); + } else { + config_helper_.addRuntimeOverride("envoy.ext_authz.deny_at_disable", "false"); + } } - void initiateClientConnection(uint64_t request_body_length) { + void initiateClientConnection(uint64_t request_body_length, + const Headers& headers_to_add = Headers{}, + const Headers& headers_to_append = Headers{}) { auto conn = makeClientConnection(lookupPort("http")); codec_client_ = makeHttpConnection(std::move(conn)); Http::TestRequestHeaderMapImpl headers{ {":method", "POST"}, {":path", "/test"}, {":scheme", "http"}, {":authority", "host"}}; + + // Initialize headers to append. If the authorization server returns any matching keys with one + // of value in headers_to_add, the header entry from authorization server replaces the one in + // headers_to_add. + for (const auto& header_to_add : headers_to_add) { + headers.addCopy(header_to_add.first, header_to_add.second); + } + + // Initialize headers to append. If the authorization server returns any matching keys with one + // of value in headers_to_append, it will be appended. + for (const auto& headers_to_append : headers_to_append) { + headers.addCopy(headers_to_append.first, headers_to_append.second); + } + TestUtility::feedBufferWithRandomCharacters(request_body_, request_body_length); response_ = codec_client_->makeRequestWithBody(headers, request_body_.toString()); } @@ -71,11 +105,11 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, result = ext_authz_request_->waitForGrpcMessage(*dispatcher_, check_request); RELEASE_ASSERT(result, result.message()); - EXPECT_EQ("POST", ext_authz_request_->headers().Method()->value().getStringView()); - EXPECT_EQ("/envoy.service.auth.v2.Authorization/Check", - ext_authz_request_->headers().Path()->value().getStringView()); - EXPECT_EQ("application/grpc", - ext_authz_request_->headers().ContentType()->value().getStringView()); + EXPECT_EQ("POST", ext_authz_request_->headers().getMethodValue()); + EXPECT_EQ(TestUtility::getVersionedMethodPath("envoy.service.auth.{}.Authorization", "Check", + apiVersion()), + ext_authz_request_->headers().getPathValue()); + EXPECT_EQ("application/grpc", ext_authz_request_->headers().getContentTypeValue()); envoy::service::auth::v3::CheckRequest expected_check_request; TestUtility::loadFromYaml(expected_check_request_yaml, expected_check_request); @@ -100,7 +134,13 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, RELEASE_ASSERT(result, result.message()); } - void waitForSuccessfulUpstreamResponse() { + void waitForSuccessfulUpstreamResponse( + const std::string& expected_response_code, const Headers& headers_to_add = Headers{}, + const Headers& headers_to_append = Headers{}, + const Http::TestRequestHeaderMapImpl& new_headers_from_upstream = + Http::TestRequestHeaderMapImpl{}, + const Http::TestRequestHeaderMapImpl& headers_to_append_multiple = + Http::TestRequestHeaderMapImpl{}) { AssertionResult result = fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_); RELEASE_ASSERT(result, result.message()); @@ -111,20 +151,109 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); upstream_request_->encodeData(response_size_, true); + + for (const auto& header_to_add : headers_to_add) { + EXPECT_THAT(upstream_request_->headers(), + Http::HeaderValueOf(header_to_add.first, header_to_add.second)); + // For headers_to_add (with append = false), the original request headers have no "-replaced" + // suffix, but the ones from the authorization server have it. + EXPECT_TRUE(absl::EndsWith(header_to_add.second, "-replaced")); + } + + for (const auto& header_to_append : headers_to_append) { + // The current behavior of appending is using the "appendCopy", which ALWAYS combines entries + // with the same key into one key, and the values are separated by "," (regardless it is an + // inline-header or not). In addition to that, it only applies to the existing headers (the + // header is existed in the original request headers). + EXPECT_THAT( + upstream_request_->headers(), + Http::HeaderValueOf( + header_to_append.first, + // In this test, the keys and values of the original request headers have the same + // string value. Hence for "header2" key, the value is "header2,header2-appended". + absl::StrCat(header_to_append.first, ",", header_to_append.second))); + const auto value = upstream_request_->headers() + .get(Http::LowerCaseString(header_to_append.first)) + ->value() + .getStringView(); + EXPECT_TRUE(absl::EndsWith(value, "-appended")); + const auto values = StringUtil::splitToken(value, ","); + EXPECT_EQ(2, values.size()); + } + + if (!new_headers_from_upstream.empty()) { + // new_headers_from_upstream has append = true. The current implementation ignores to set + // multiple headers that are not present in the original request headers. In order to add + // headers with the same key multiple times, setting response headers with append = false and + // append = true is required. + EXPECT_THAT(new_headers_from_upstream, + Not(Http::IsSubsetOfHeaders(upstream_request_->headers()))); + } + + if (!headers_to_append_multiple.empty()) { + // headers_to_append_multiple has append = false for the first entry of multiple entries, and + // append = true for the rest entries. + EXPECT_THAT(upstream_request_->headers(), + Http::HeaderValueOf("multiple", "multiple-first,multiple-second")); + } + response_->waitForEndStream(); EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(request_body_.length(), upstream_request_->bodyLength()); EXPECT_TRUE(response_->complete()); - EXPECT_EQ("200", response_->headers().Status()->value().getStringView()); + EXPECT_EQ(expected_response_code, response_->headers().getStatusValue()); EXPECT_EQ(response_size_, response_->body().size()); } - void sendExtAuthzResponse() { + void sendExtAuthzResponse(const Headers& headers_to_add, const Headers& headers_to_append, + const Http::TestRequestHeaderMapImpl& new_headers_from_upstream, + const Http::TestRequestHeaderMapImpl& headers_to_append_multiple) { ext_authz_request_->startGrpcStream(); envoy::service::auth::v3::CheckResponse check_response; check_response.mutable_status()->set_code(Grpc::Status::WellKnownGrpcStatus::Ok); + + for (const auto& header_to_add : headers_to_add) { + auto* entry = check_response.mutable_ok_response()->mutable_headers()->Add(); + entry->mutable_append()->set_value(false); + entry->mutable_header()->set_key(header_to_add.first); + entry->mutable_header()->set_value(header_to_add.second); + } + + for (const auto& header_to_append : headers_to_append) { + auto* entry = check_response.mutable_ok_response()->mutable_headers()->Add(); + entry->mutable_append()->set_value(true); + entry->mutable_header()->set_key(header_to_append.first); + entry->mutable_header()->set_value(header_to_append.second); + } + + // Entries in this headers are not present in the original request headers. + new_headers_from_upstream.iterate( + [&check_response](const Http::HeaderEntry& h) -> Http::HeaderMap::Iterate { + auto* entry = check_response.mutable_ok_response()->mutable_headers()->Add(); + // Try to append to a non-existent field. + entry->mutable_append()->set_value(true); + entry->mutable_header()->set_key(std::string(h.key().getStringView())); + entry->mutable_header()->set_value(std::string(h.value().getStringView())); + return Http::HeaderMap::Iterate::Continue; + }); + + // Entries in this headers are not present in the original request headers. But we set append = + // true and append = false. + headers_to_append_multiple.iterate( + [&check_response](const Http::HeaderEntry& h) -> Http::HeaderMap::Iterate { + auto* entry = check_response.mutable_ok_response()->mutable_headers()->Add(); + const auto key = std::string(h.key().getStringView()); + const auto value = std::string(h.value().getStringView()); + + // This scenario makes sure we have set the headers to be appended later. + entry->mutable_append()->set_value(!absl::EndsWith(value, "-first")); + entry->mutable_header()->set_key(key); + entry->mutable_header()->set_value(value); + return Http::HeaderMap::Iterate::Continue; + }); + ext_authz_request_->sendGrpcMessage(check_response); ext_authz_request_->finishGrpcStream(Grpc::Status::Ok); } @@ -170,11 +299,49 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, void expectCheckRequestWithBody(Http::CodecClient::Type downstream_protocol, uint64_t request_size) { - initializeWithDownstreamProtocol(downstream_protocol); - initiateClientConnection(request_size); + expectCheckRequestWithBodyWithHeaders(downstream_protocol, request_size, Headers{}, Headers{}, + Http::TestRequestHeaderMapImpl{}, + Http::TestRequestHeaderMapImpl{}); + } + + void expectCheckRequestWithBodyWithHeaders( + Http::CodecClient::Type downstream_protocol, uint64_t request_size, + const Headers& headers_to_add, const Headers& headers_to_append, + const Http::TestRequestHeaderMapImpl& new_headers_from_upstream, + const Http::TestRequestHeaderMapImpl& headers_to_append_multiple) { + initializeConfig(); + setDownstreamProtocol(downstream_protocol); + HttpIntegrationTest::initialize(); + initiateClientConnection(request_size, headers_to_add, headers_to_append); waitForExtAuthzRequest(expectedCheckRequest(downstream_protocol)); - sendExtAuthzResponse(); - waitForSuccessfulUpstreamResponse(); + + Headers updated_headers_to_add; + for (auto& header_to_add : headers_to_add) { + updated_headers_to_add.push_back( + std::make_pair(header_to_add.first, header_to_add.second + "-replaced")); + } + Headers updated_headers_to_append; + for (const auto& header_to_append : headers_to_append) { + updated_headers_to_append.push_back( + std::make_pair(header_to_append.first, header_to_append.second + "-appended")); + } + sendExtAuthzResponse(updated_headers_to_add, updated_headers_to_append, + new_headers_from_upstream, headers_to_append_multiple); + + waitForSuccessfulUpstreamResponse("200", updated_headers_to_add, updated_headers_to_append, + new_headers_from_upstream, headers_to_append_multiple); + cleanup(); + } + + void expectFilterDisableCheck(bool deny_at_disable, const std::string& expected_status) { + initializeConfig(); + setDenyAtDisableRuntimeConfig(deny_at_disable); + setDownstreamProtocol(Http::CodecClient::Type::HTTP2); + HttpIntegrationTest::initialize(); + initiateClientConnection(4); + if (!deny_at_disable) { + waitForSuccessfulUpstreamResponse(expected_status); + } cleanup(); } @@ -194,7 +361,7 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, }; class ExtAuthzHttpIntegrationTest : public HttpIntegrationTest, - public testing::TestWithParam { + public TestWithParam { public: ExtAuthzHttpIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {} @@ -221,6 +388,8 @@ class ExtAuthzHttpIntegrationTest : public HttpIntegrationTest, {":scheme", "http"}, {":authority", "host"}, {"x-case-sensitive-header", case_sensitive_header_value_}, + {"baz", "foo"}, + {"bat", "foo"}, }); } @@ -232,6 +401,17 @@ class ExtAuthzHttpIntegrationTest : public HttpIntegrationTest, RELEASE_ASSERT(result, result.message()); result = ext_authz_request_->waitForEndStream(*dispatcher_); RELEASE_ASSERT(result, result.message()); + + // Send back authorization response with "baz" and "bat" headers. + // Also add multiple values "append-foo" and "append-bar" for key "x-append-bat". + Http::TestResponseHeaderMapImpl response_headers{ + {":status", "200"}, + {"baz", "baz"}, + {"bat", "bar"}, + {"x-append-bat", "append-foo"}, + {"x-append-bat", "append-bar"}, + }; + ext_authz_request_->encodeHeaders(response_headers, true); } void cleanup() { @@ -267,6 +447,34 @@ class ExtAuthzHttpIntegrationTest : public HttpIntegrationTest, initiateClientConnection(); waitForExtAuthzRequest(); + AssertionResult result = + fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_); + RELEASE_ASSERT(result, result.message()); + result = fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_); + RELEASE_ASSERT(result, result.message()); + result = upstream_request_->waitForEndStream(*dispatcher_); + RELEASE_ASSERT(result, result.message()); + + // The original client request header value of "baz" is "foo". Since we configure to "override" + // the value of "baz", we expect the request headers to be sent to upstream contain only one + // "baz" with value "baz" (set by the authorization server). + EXPECT_THAT(upstream_request_->headers(), Http::HeaderValueOf("baz", "baz")); + + // The original client request header value of "bat" is "foo". Since we configure to "append" + // the value of "bat", we expect the request headers to be sent to upstream contain two "bat"s, + // with values: "foo" and "bar" (the "bat: bar" header is appended by the authorization server). + const auto& request_existed_headers = + Http::TestRequestHeaderMapImpl{{"bat", "foo"}, {"bat", "bar"}}; + EXPECT_THAT(request_existed_headers, Http::IsSubsetOfHeaders(upstream_request_->headers())); + + // The original client request header does not contain x-append-bat. Since we configure to + // "append" the value of "x-append-bat", we expect the headers to be sent to upstream contain + // two "x-append-bat"s, instead of replacing the first with the last one, with values: + // "append-foo" and "append-bar" + const auto& request_nonexisted_headers = Http::TestRequestHeaderMapImpl{ + {"x-append-bat", "append-foo"}, {"x-append-bat", "append-bar"}}; + EXPECT_THAT(request_nonexisted_headers, Http::IsSubsetOfHeaders(upstream_request_->headers())); + response_->waitForEndStream(); EXPECT_TRUE(response_->complete()); @@ -285,16 +493,29 @@ class ExtAuthzHttpIntegrationTest : public HttpIntegrationTest, uri: "ext_authz:9000" cluster: "ext_authz" timeout: 0.25s + authorization_request: allowed_headers: patterns: - exact: X-Case-Sensitive-Header + + authorization_response: + allowed_upstream_headers: + patterns: + - exact: baz + - prefix: x-success + + allowed_upstream_headers_to_append: + patterns: + - exact: bat + - prefix: x-append + failure_mode_allow: true )EOF"; }; INSTANTIATE_TEST_SUITE_P(IpVersionsCientType, ExtAuthzGrpcIntegrationTest, - GRPC_CLIENT_INTEGRATION_PARAMS); + VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS); // Verifies that the request body is included in the CheckRequest when the downstream protocol is // HTTP/1.1. @@ -320,11 +541,28 @@ TEST_P(ExtAuthzGrpcIntegrationTest, HTTP2DownstreamRequestWithLargeBody) { expectCheckRequestWithBody(Http::CodecClient::Type::HTTP2, 2048); } +// Verifies that the original request headers will be added and appended when the authorization +// server returns headers_to_add and headers_to_append in OkResponse message. +TEST_P(ExtAuthzGrpcIntegrationTest, SendHeadersToAddAndToAppendToUpstream) { + expectCheckRequestWithBodyWithHeaders( + Http::CodecClient::Type::HTTP1, 4, + /*headers_to_add=*/Headers{{"header1", "header1"}}, + /*headers_to_append=*/Headers{{"header2", "header2"}}, + /*new_headers_from_upstream=*/Http::TestRequestHeaderMapImpl{{"new1", "new1"}}, + /*headers_to_append_multiple=*/ + Http::TestRequestHeaderMapImpl{{"multiple", "multiple-first"}, + {"multiple", "multiple-second"}}); +} + +TEST_P(ExtAuthzGrpcIntegrationTest, AllowAtDisable) { expectFilterDisableCheck(false, "200"); } + +TEST_P(ExtAuthzGrpcIntegrationTest, DenyAtDisable) { expectFilterDisableCheck(true, "403"); } + INSTANTIATE_TEST_SUITE_P(IpVersions, ExtAuthzHttpIntegrationTest, - testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); -// Verifies that by default HTTP service uses the case sensitive string matcher. +// Verifies that by default HTTP service uses the case-sensitive string matcher. TEST_P(ExtAuthzHttpIntegrationTest, DefaultCaseSensitiveStringMatcher) { setupWithDisabledCaseSensitiveStringMatcher(false); const auto* header_entry = ext_authz_request_->headers().get(case_sensitive_header_name_); @@ -333,7 +571,7 @@ TEST_P(ExtAuthzHttpIntegrationTest, DefaultCaseSensitiveStringMatcher) { // Verifies that by setting "false" to // envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher, the string -// matcher used by HTTP service will case insensitive. +// matcher used by HTTP service will be case-insensitive. TEST_P(ExtAuthzHttpIntegrationTest, DisableCaseSensitiveStringMatcher) { setupWithDisabledCaseSensitiveStringMatcher(true); const auto* header_entry = ext_authz_request_->headers().get(case_sensitive_header_name_); @@ -341,5 +579,4 @@ TEST_P(ExtAuthzHttpIntegrationTest, DisableCaseSensitiveStringMatcher) { EXPECT_EQ(case_sensitive_header_value_, header_entry->value().getStringView()); } -} // namespace } // namespace Envoy diff --git a/test/extensions/filters/http/ext_authz/ext_authz_test.cc b/test/extensions/filters/http/ext_authz/ext_authz_test.cc index 957ed37467e74..3948822584f82 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_test.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_test.cc @@ -189,8 +189,7 @@ TEST_F(HttpFilterTest, ErrorFailClose) { EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); EXPECT_CALL(filter_callbacks_, encodeHeaders_(_, true)) .WillOnce(Invoke([&](const Http::ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ(headers.Status()->value().getStringView(), - std::to_string(enumToInt(Http::Code::Forbidden))); + EXPECT_EQ(headers.getStatusValue(), std::to_string(enumToInt(Http::Code::Forbidden))); })); Filters::Common::ExtAuthz::Response response{}; @@ -228,7 +227,7 @@ TEST_F(HttpFilterTest, ErrorCustomStatusCode) { EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); EXPECT_CALL(filter_callbacks_, encodeHeaders_(_, true)) .WillOnce(Invoke([&](const Http::ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ(headers.Status()->value().getStringView(), + EXPECT_EQ(headers.getStatusValue(), std::to_string(enumToInt(Http::Code::ServiceUnavailable))); })); @@ -598,7 +597,7 @@ TEST_F(HttpFilterTest, ClearCache) { Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::OK; response.headers_to_append = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; - response.headers_to_add = Http::HeaderVector{{Http::LowerCaseString{"bar"}, "foo"}}; + response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{"bar"}, "foo"}}; request_callbacks_->onComplete(std::make_unique(response)); EXPECT_EQ( 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.ok").value()); @@ -678,7 +677,7 @@ TEST_F(HttpFilterTest, ClearCacheRouteHeadersToAddOnly) { Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::OK; - response.headers_to_add = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; + response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; request_callbacks_->onComplete(std::make_unique(response)); EXPECT_EQ( 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.ok").value()); @@ -753,7 +752,7 @@ TEST_F(HttpFilterTest, NoClearCacheRouteConfig) { Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::OK; response.headers_to_append = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; - response.headers_to_add = Http::HeaderVector{{Http::LowerCaseString{"bar"}, "foo"}}; + response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{"bar"}, "foo"}}; request_callbacks_->onComplete(std::make_unique(response)); EXPECT_EQ( 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.ok").value()); @@ -776,7 +775,7 @@ TEST_F(HttpFilterTest, NoClearCacheRouteDeniedResponse) { Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::Denied; response.status_code = Http::Code::Unauthorized; - response.headers_to_add = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; + response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; auto response_ptr = std::make_unique(response); EXPECT_CALL(*client_, check(_, _, testing::A(), _)) @@ -910,12 +909,75 @@ TEST_F(HttpFilterTest, FilterEnabled) { filter_->decodeHeaders(request_headers_, false)); } +// Test that filter can deny for protected path when filter is disabled via filter_enabled field. +TEST_F(HttpFilterTest, FilterDenyAtDisable) { + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + filter_enabled: + runtime_key: "http.ext_authz.enabled" + default_value: + numerator: 0 + denominator: HUNDRED + deny_at_disable: + runtime_key: "http.ext_authz.deny_at_disable" + default_value: + value: true + )EOF"); + + ON_CALL(runtime_.snapshot_, + featureEnabled("http.ext_authz.enabled", + testing::Matcher(Percent(0)))) + .WillByDefault(Return(false)); + + ON_CALL(runtime_.snapshot_, featureEnabled("http.ext_authz.enabled", false)) + .WillByDefault(Return(true)); + + // Make sure check is not called. + EXPECT_CALL(*client_, check(_, _, _, _)).Times(0); + // Engage the filter. + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_, false)); +} + +// Test that filter allows for protected path when filter is disabled via filter_enabled field. +TEST_F(HttpFilterTest, FilterAllowAtDisable) { + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + filter_enabled: + runtime_key: "http.ext_authz.enabled" + default_value: + numerator: 0 + denominator: HUNDRED + deny_at_disable: + runtime_key: "http.ext_authz.deny_at_disable" + default_value: + value: false + )EOF"); + + ON_CALL(runtime_.snapshot_, + featureEnabled("http.ext_authz.enabled", + testing::Matcher(Percent(0)))) + .WillByDefault(Return(false)); + + ON_CALL(runtime_.snapshot_, featureEnabled("http.ext_authz.enabled", false)) + .WillByDefault(Return(false)); + + // Make sure check is not called. + EXPECT_CALL(*client_, check(_, _, _, _)).Times(0); + // Engage the filter. + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); +} + // ------------------- // Parameterized Tests // ------------------- // Test that context extensions make it into the check request. -TEST_F(HttpFilterTestParam, ContextExtensions) { +TEST_P(HttpFilterTestParam, ContextExtensions) { // Place something in the context extensions on the virtualhost. envoy::extensions::filters::http::ext_authz::v3::ExtAuthzPerRoute settingsvhost; (*settingsvhost.mutable_check_settings()->mutable_context_extensions())["key_vhost"] = @@ -957,7 +1019,7 @@ TEST_F(HttpFilterTestParam, ContextExtensions) { } // Test that filter can be disabled with route config. -TEST_F(HttpFilterTestParam, DisabledOnRoute) { +TEST_P(HttpFilterTestParam, DisabledOnRoute) { envoy::extensions::filters::http::ext_authz::v3::ExtAuthzPerRoute settings; FilterConfigPerRoute auth_per_route(settings); @@ -990,7 +1052,7 @@ TEST_F(HttpFilterTestParam, DisabledOnRoute) { } // Test that filter can be disabled with route config. -TEST_F(HttpFilterTestParam, DisabledOnRouteWithRequestBody) { +TEST_P(HttpFilterTestParam, DisabledOnRouteWithRequestBody) { envoy::extensions::filters::http::ext_authz::v3::ExtAuthzPerRoute settings; FilterConfigPerRoute auth_per_route(settings); @@ -1036,7 +1098,7 @@ TEST_F(HttpFilterTestParam, DisabledOnRouteWithRequestBody) { } // Test that the request continues when the filter_callbacks has no route. -TEST_F(HttpFilterTestParam, NoRoute) { +TEST_P(HttpFilterTestParam, NoRoute) { EXPECT_CALL(*filter_callbacks_.route_, routeEntry()).WillOnce(Return(nullptr)); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); @@ -1044,7 +1106,7 @@ TEST_F(HttpFilterTestParam, NoRoute) { } // Test that the request is stopped till there is an OK response back after which it continues on. -TEST_F(HttpFilterTestParam, OkResponse) { +TEST_P(HttpFilterTestParam, OkResponse) { InSequence s; prepareCheck(); @@ -1064,6 +1126,8 @@ TEST_F(HttpFilterTestParam, OkResponse) { Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::OK; + // Send an OK response Without setting the dynamic metadata field. + EXPECT_CALL(filter_callbacks_.stream_info_, setDynamicMetadata(_, _)).Times(0); request_callbacks_->onComplete(std::make_unique(response)); EXPECT_EQ( 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.ok").value()); @@ -1075,7 +1139,7 @@ TEST_F(HttpFilterTestParam, OkResponse) { // Test that an synchronous OK response from the authorization service, on the call stack, results // in request continuing on. -TEST_F(HttpFilterTestParam, ImmediateOkResponse) { +TEST_P(HttpFilterTestParam, ImmediateOkResponse) { InSequence s; prepareCheck(); @@ -1099,7 +1163,7 @@ TEST_F(HttpFilterTestParam, ImmediateOkResponse) { // Test that an synchronous denied response from the authorization service passing additional HTTP // attributes to the downstream. -TEST_F(HttpFilterTestParam, ImmediateDeniedResponseWithHttpAttributes) { +TEST_P(HttpFilterTestParam, ImmediateDeniedResponseWithHttpAttributes) { InSequence s; prepareCheck(); @@ -1107,7 +1171,7 @@ TEST_F(HttpFilterTestParam, ImmediateDeniedResponseWithHttpAttributes) { Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::Denied; response.status_code = Http::Code::Unauthorized; - response.headers_to_add = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; + response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; response.body = std::string{"baz"}; auto response_ptr = std::make_unique(response); @@ -1130,7 +1194,7 @@ TEST_F(HttpFilterTestParam, ImmediateDeniedResponseWithHttpAttributes) { // Test that an synchronous ok response from the authorization service passing additional HTTP // attributes to the upstream. -TEST_F(HttpFilterTestParam, ImmediateOkResponseWithHttpAttributes) { +TEST_P(HttpFilterTestParam, ImmediateOkResponseWithHttpAttributes) { InSequence s; // `bar` will be appended to this header. @@ -1149,7 +1213,7 @@ TEST_F(HttpFilterTestParam, ImmediateOkResponseWithHttpAttributes) { Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::OK; response.headers_to_append = Http::HeaderVector{{request_header_key, "bar"}}; - response.headers_to_add = Http::HeaderVector{{key_to_add, "foo"}, {key_to_override, "bar"}}; + response.headers_to_set = Http::HeaderVector{{key_to_add, "foo"}, {key_to_override, "bar"}}; auto response_ptr = std::make_unique(response); @@ -1170,7 +1234,7 @@ TEST_F(HttpFilterTestParam, ImmediateOkResponseWithHttpAttributes) { // Test that an synchronous denied response from the authorization service, on the call stack, // results in request not continuing. -TEST_F(HttpFilterTestParam, ImmediateDeniedResponse) { +TEST_P(HttpFilterTestParam, ImmediateDeniedResponse) { InSequence s; prepareCheck(); @@ -1194,7 +1258,7 @@ TEST_F(HttpFilterTestParam, ImmediateDeniedResponse) { } // Test that a denied response results in the connection closing with a 401 response to the client. -TEST_F(HttpFilterTestParam, DeniedResponseWith401) { +TEST_P(HttpFilterTestParam, DeniedResponseWith401) { InSequence s; prepareCheck(); @@ -1226,7 +1290,7 @@ TEST_F(HttpFilterTestParam, DeniedResponseWith401) { } // Test that a denied response results in the connection closing with a 403 response to the client. -TEST_F(HttpFilterTestParam, DeniedResponseWith403) { +TEST_P(HttpFilterTestParam, DeniedResponseWith403) { InSequence s; prepareCheck(); @@ -1261,14 +1325,14 @@ TEST_F(HttpFilterTestParam, DeniedResponseWith403) { } // Verify that authz response memory is not used after free. -TEST_F(HttpFilterTestParam, DestroyResponseBeforeSendLocalReply) { +TEST_P(HttpFilterTestParam, DestroyResponseBeforeSendLocalReply) { InSequence s; Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::Denied; response.status_code = Http::Code::Forbidden; response.body = std::string{"foo"}; - response.headers_to_add = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}, + response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}, {Http::LowerCaseString{"bar"}, "foo"}}; Filters::Common::ExtAuthz::ResponsePtr response_ptr = std::make_unique(response); @@ -1293,7 +1357,7 @@ TEST_F(HttpFilterTestParam, DestroyResponseBeforeSendLocalReply) { EXPECT_CALL(filter_callbacks_, encodeData(_, true)) .WillOnce(Invoke([&](Buffer::Instance& data, bool) { response_ptr.reset(); - Http::TestHeaderMapImpl test_headers{*saved_headers}; + Http::TestRequestHeaderMapImpl test_headers{*saved_headers}; EXPECT_EQ(test_headers.get_("foo"), "bar"); EXPECT_EQ(test_headers.get_("bar"), "foo"); EXPECT_EQ(data.toString(), "foo"); @@ -1315,14 +1379,14 @@ TEST_F(HttpFilterTestParam, DestroyResponseBeforeSendLocalReply) { // Verify that authz denied response headers overrides the existing encoding headers, // and that it adds repeated header names using the standard method of comma concatenation of values // for predefined inline headers while repeating other headers -TEST_F(HttpFilterTestParam, OverrideEncodingHeaders) { +TEST_P(HttpFilterTestParam, OverrideEncodingHeaders) { InSequence s; Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::Denied; response.status_code = Http::Code::Forbidden; response.body = std::string{"foo"}; - response.headers_to_add = + response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}, {Http::LowerCaseString{"bar"}, "foo"}, {Http::LowerCaseString{"set-cookie"}, "cookie1=value"}, @@ -1358,7 +1422,7 @@ TEST_F(HttpFilterTestParam, OverrideEncodingHeaders) { EXPECT_CALL(filter_callbacks_, encodeData(_, true)) .WillOnce(Invoke([&](Buffer::Instance& data, bool) { response_ptr.reset(); - Http::TestHeaderMapImpl test_headers{*saved_headers}; + Http::TestRequestHeaderMapImpl test_headers{*saved_headers}; EXPECT_EQ(test_headers.get_("foo"), "bar"); EXPECT_EQ(test_headers.get_("bar"), "foo"); EXPECT_EQ(test_headers.get_("foobar"), "DO_NOT_OVERRIDE"); @@ -1383,9 +1447,58 @@ TEST_F(HttpFilterTestParam, OverrideEncodingHeaders) { filter_callbacks_.clusterInfo()->statsScope().counterFromString("upstream_rq_403").value()); } +// Verify that when returning an OK response with dynamic_metadata field set, the filter emits +// dynamic metadata. +TEST_F(HttpFilterTest, EmitDynamicMetadata) { + InSequence s; + + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + )EOF"); + + prepareCheck(); + + EXPECT_CALL(*client_, check(_, _, testing::A(), _)) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { + request_callbacks_ = &callbacks; + }))); + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, + filter_->decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); + + Filters::Common::ExtAuthz::Response response{}; + response.status = Filters::Common::ExtAuthz::CheckStatus::OK; + response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; + + auto* fields = response.dynamic_metadata.mutable_fields(); + (*fields)["foo"] = ValueUtil::stringValue("ok"); + (*fields)["bar"] = ValueUtil::numberValue(1); + + EXPECT_CALL(filter_callbacks_.stream_info_, setDynamicMetadata(_, _)) + .WillOnce(Invoke([&response](const std::string& ns, + const ProtobufWkt::Struct& returned_dynamic_metadata) { + EXPECT_EQ(ns, HttpFilterNames::get().ExtAuthorization); + EXPECT_TRUE(TestUtility::protoEqual(returned_dynamic_metadata, response.dynamic_metadata)); + })); + + EXPECT_CALL(filter_callbacks_, continueDecoding()); + EXPECT_CALL(filter_callbacks_.stream_info_, + setResponseFlag(Envoy::StreamInfo::ResponseFlag::UnauthorizedExternalService)) + .Times(0); + request_callbacks_->onComplete(std::make_unique(response)); + + EXPECT_EQ( + 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.ok").value()); + EXPECT_EQ(1U, config_->stats().ok_.value()); +} + // Test that when a connection awaiting a authorization response is canceled then the // authorization call is closed. -TEST_F(HttpFilterTestParam, ResetDuringCall) { +TEST_P(HttpFilterTestParam, ResetDuringCall) { InSequence s; prepareCheck(); @@ -1403,7 +1516,7 @@ TEST_F(HttpFilterTestParam, ResetDuringCall) { // Regression test for https://github.com/envoyproxy/envoy/pull/8436. // Test that ext_authz filter is not in noop mode when cluster is not specified per route // (this could be the case when route is configured with redirect or direct response action). -TEST_F(HttpFilterTestParam, NoCluster) { +TEST_P(HttpFilterTestParam, NoCluster) { ON_CALL(filter_callbacks_, clusterInfo()).WillByDefault(Return(nullptr)); diff --git a/test/extensions/filters/http/fault/BUILD b/test/extensions/filters/http/fault/BUILD index 04a48ea06dc57..f545e439b5c85 100644 --- a/test/extensions/filters/http/fault/BUILD +++ b/test/extensions/filters/http/fault/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_library", @@ -10,6 +8,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -44,7 +44,7 @@ envoy_extension_cc_test( deps = [ ":utility_lib", "//source/extensions/filters/http/fault:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/extensions/filters/http/fault/v3:pkg_cc_proto", "@envoy_api//envoy/type/v3:pkg_cc_proto", ], @@ -54,6 +54,7 @@ envoy_extension_cc_test( name = "fault_filter_integration_test", srcs = ["fault_filter_integration_test.cc"], extension_name = "envoy.filters.http.fault", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/http/fault:config", "//test/integration:http_protocol_integration_lib", diff --git a/test/extensions/filters/http/fault/config_test.cc b/test/extensions/filters/http/fault/config_test.cc index cc70285a578de..080e5a3e41de7 100644 --- a/test/extensions/filters/http/fault/config_test.cc +++ b/test/extensions/filters/http/fault/config_test.cc @@ -5,7 +5,7 @@ #include "extensions/filters/http/fault/config.h" #include "test/extensions/filters/http/fault/utility.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/http/fault/fault_filter_integration_test.cc b/test/extensions/filters/http/fault/fault_filter_integration_test.cc index 6059287a66c72..a8cd15e973813 100644 --- a/test/extensions/filters/http/fault/fault_filter_integration_test.cc +++ b/test/extensions/filters/http/fault/fault_filter_integration_test.cc @@ -46,6 +46,17 @@ name: fault percentage: numerator: 100 )EOF"; + + const std::string abort_grpc_fault_config_ = + R"EOF( +name: fault +typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.fault.v3.HTTPFault + abort: + grpc_status: 5 + percentage: + numerator: 100 +)EOF"; }; // Fault integration tests that should run with all protocols, useful for testing various @@ -72,6 +83,7 @@ name: fault EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.aborts_injected")->value()); EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); + EXPECT_EQ(0UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); } // Response rate limited with no trailers. @@ -81,6 +93,10 @@ TEST_P(FaultIntegrationTestAllProtocols, ResponseRateLimitNoTrailers) { IntegrationStreamDecoderPtr decoder = codec_client_->makeHeaderOnlyRequest(default_request_headers_); waitForNextUpstreamRequest(); + + // Active faults gauge is incremented. + EXPECT_EQ(1UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); + upstream_request_->encodeHeaders(default_response_headers_, false); Buffer::OwnedImpl data(std::string(127, 'a')); upstream_request_->encodeData(data, true); @@ -96,6 +112,7 @@ TEST_P(FaultIntegrationTestAllProtocols, ResponseRateLimitNoTrailers) { EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.aborts_injected")->value()); EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); EXPECT_EQ(1UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); + EXPECT_EQ(0UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); } // Request delay and response rate limited via header configuration. @@ -114,6 +131,8 @@ TEST_P(FaultIntegrationTestAllProtocols, HeaderFaultConfig) { // At least 200ms of simulated time should have elapsed before we got the upstream request. EXPECT_LE(std::chrono::milliseconds(200), simTime().monotonicTime() - current_time); + // Active faults gauge is incremented. + EXPECT_EQ(1UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); // Verify response body throttling. upstream_request_->encodeHeaders(default_response_headers_, false); @@ -131,6 +150,7 @@ TEST_P(FaultIntegrationTestAllProtocols, HeaderFaultConfig) { EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.aborts_injected")->value()); EXPECT_EQ(1UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); EXPECT_EQ(1UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); + EXPECT_EQ(0UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); } // Request abort controlled via header configuration. @@ -152,6 +172,7 @@ TEST_P(FaultIntegrationTestAllProtocols, HeaderFaultAbortConfig) { EXPECT_EQ(1UL, test_server_->counter("http.config_test.fault.aborts_injected")->value()); EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); + EXPECT_EQ(0UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); } // Request faults controlled via header configuration. @@ -177,6 +198,7 @@ TEST_P(FaultIntegrationTestAllProtocols, HeaderFaultsConfig0PercentageHeaders) { EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.aborts_injected")->value()); EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); + EXPECT_EQ(0UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); } // Request faults controlled via header configuration. @@ -200,6 +222,7 @@ TEST_P(FaultIntegrationTestAllProtocols, HeaderFaultsConfig100PercentageHeaders) EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.aborts_injected")->value()); EXPECT_EQ(1UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); EXPECT_EQ(1UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); + EXPECT_EQ(0UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); } // Header configuration with no headers, so no fault injection. @@ -212,6 +235,87 @@ TEST_P(FaultIntegrationTestAllProtocols, HeaderFaultConfigNoHeaders) { EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.aborts_injected")->value()); EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); + EXPECT_EQ(0UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); +} + +// Request abort with grpc status, controlled via header configuration. +TEST_P(FaultIntegrationTestAllProtocols, HeaderFaultAbortGrpcConfig) { + initializeFilter(header_fault_config_); + codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); + + auto response = codec_client_->makeHeaderOnlyRequest( + Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"x-envoy-fault-abort-grpc-request", "5"}, + {"content-type", "application/grpc"}}); + response->waitForEndStream(); + + EXPECT_TRUE(response->complete()); + EXPECT_THAT(response->headers(), Envoy::Http::HttpStatusIs("200")); + EXPECT_THAT(response->headers(), + HeaderValueOf(Http::Headers::get().ContentType, "application/grpc")); + EXPECT_THAT(response->headers(), HeaderValueOf(Http::Headers::get().GrpcStatus, "5")); + EXPECT_THAT(response->headers(), + HeaderValueOf(Http::Headers::get().GrpcMessage, "fault filter abort")); + EXPECT_EQ(nullptr, response->trailers()); + + EXPECT_EQ(1UL, test_server_->counter("http.config_test.fault.aborts_injected")->value()); + EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); + EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); + EXPECT_EQ(0UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); +} + +// Request abort with grpc status, controlled via header configuration. +TEST_P(FaultIntegrationTestAllProtocols, HeaderFaultAbortGrpcConfig0PercentageHeader) { + initializeFilter(header_fault_config_); + codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); + + auto response = codec_client_->makeHeaderOnlyRequest( + Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"x-envoy-fault-abort-grpc-request", "5"}, + {"x-envoy-fault-abort-request-percentage", "0"}, + {"content-type", "application/grpc"}}); + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + + EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.aborts_injected")->value()); + EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); + EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); + EXPECT_EQ(0UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); +} + +// Request abort with grpc status, controlled via configuration. +TEST_P(FaultIntegrationTestAllProtocols, FaultAbortGrpcConfig) { + initializeFilter(abort_grpc_fault_config_); + codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); + + auto response = codec_client_->makeHeaderOnlyRequest( + Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"content-type", "application/grpc"}}); + response->waitForEndStream(); + + EXPECT_TRUE(response->complete()); + EXPECT_THAT(response->headers(), Envoy::Http::HttpStatusIs("200")); + EXPECT_THAT(response->headers(), + HeaderValueOf(Http::Headers::get().ContentType, "application/grpc")); + EXPECT_THAT(response->headers(), HeaderValueOf(Http::Headers::get().GrpcStatus, "5")); + EXPECT_THAT(response->headers(), + HeaderValueOf(Http::Headers::get().GrpcMessage, "fault filter abort")); + EXPECT_EQ(nullptr, response->trailers()); + + EXPECT_EQ(1UL, test_server_->counter("http.config_test.fault.aborts_injected")->value()); + EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); + EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); + EXPECT_EQ(0UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); } // Fault integration tests that run with HTTP/2 only, used for fully testing trailers. @@ -228,6 +332,10 @@ TEST_P(FaultIntegrationTestHttp2, ResponseRateLimitTrailersBodyFlushed) { IntegrationStreamDecoderPtr decoder = codec_client_->makeHeaderOnlyRequest(default_request_headers_); waitForNextUpstreamRequest(); + + // Active fault gauge is incremented. + EXPECT_EQ(1UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); + upstream_request_->encodeHeaders(default_response_headers_, false); Buffer::OwnedImpl data(std::string(127, 'a')); upstream_request_->encodeData(data, false); @@ -240,7 +348,7 @@ TEST_P(FaultIntegrationTestHttp2, ResponseRateLimitTrailersBodyFlushed) { decoder->waitForBodyData(127); // Send trailers and wait for end stream. - Http::TestHeaderMapImpl trailers{{"hello", "world"}}; + Http::TestResponseTrailerMapImpl trailers{{"hello", "world"}}; upstream_request_->encodeTrailers(trailers); decoder->waitForEndStream(); EXPECT_NE(nullptr, decoder->trailers()); @@ -248,6 +356,7 @@ TEST_P(FaultIntegrationTestHttp2, ResponseRateLimitTrailersBodyFlushed) { EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.aborts_injected")->value()); EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); EXPECT_EQ(1UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); + EXPECT_EQ(0UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); } // Rate limiting with trailers received before the body has been flushed. @@ -260,7 +369,7 @@ TEST_P(FaultIntegrationTestHttp2, ResponseRateLimitTrailersBodyNotFlushed) { upstream_request_->encodeHeaders(default_response_headers_, false); Buffer::OwnedImpl data(std::string(128, 'a')); upstream_request_->encodeData(data, false); - Http::TestHeaderMapImpl trailers{{"hello", "world"}}; + Http::TestResponseTrailerMapImpl trailers{{"hello", "world"}}; upstream_request_->encodeTrailers(trailers); // Wait for a tick worth of data. @@ -275,6 +384,7 @@ TEST_P(FaultIntegrationTestHttp2, ResponseRateLimitTrailersBodyNotFlushed) { EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.aborts_injected")->value()); EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); EXPECT_EQ(1UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); + EXPECT_EQ(0UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); } } // namespace diff --git a/test/extensions/filters/http/fault/fault_filter_test.cc b/test/extensions/filters/http/fault/fault_filter_test.cc index ce783bdca8e53..a29c5cc858161 100644 --- a/test/extensions/filters/http/fault/fault_filter_test.cc +++ b/test/extensions/filters/http/fault/fault_filter_test.cc @@ -326,6 +326,159 @@ TEST_F(FaultFilterTest, HeaderAbortWithHttpStatus) { EXPECT_EQ("fault_filter_abort", decoder_filter_callbacks_.details_); } +TEST_F(FaultFilterTest, AbortWithGrpcStatus) { + decoder_filter_callbacks_.is_grpc_request_ = true; + + envoy::extensions::filters::http::fault::v3::HTTPFault fault; + fault.mutable_abort()->mutable_percentage()->set_numerator(100); + fault.mutable_abort()->mutable_percentage()->set_denominator( + envoy::type::v3::FractionalPercent::HUNDRED); + fault.mutable_abort()->set_grpc_status(5); + SetUpTest(fault); + + EXPECT_CALL(runtime_.snapshot_, + getInteger("fault.http.max_active_faults", std::numeric_limits::max())) + .WillOnce(Return(std::numeric_limits::max())); + + EXPECT_CALL(decoder_filter_callbacks_, continueDecoding()).Times(0); + EXPECT_CALL(decoder_filter_callbacks_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::DelayInjected)) + .Times(0); + + // Abort related calls + EXPECT_CALL(runtime_.snapshot_, + featureEnabled("fault.http.abort.abort_percent", + Matcher(Percent(100)))) + .WillOnce(Return(true)); + + EXPECT_CALL(runtime_.snapshot_, getInteger("fault.http.abort.grpc_status", 5)) + .WillOnce(Return(5)); + + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}, + {"content-type", "application/grpc"}, + {"grpc-status", "5"}, + {"grpc-message", "fault filter abort"}}; + EXPECT_CALL(decoder_filter_callbacks_, + encodeHeaders_(HeaderMapEqualRef(&response_headers), true)); + + EXPECT_CALL(decoder_filter_callbacks_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::FaultInjected)); + + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_, false)); + Http::MetadataMap metadata_map{{"metadata", "metadata"}}; + EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->decodeMetadata(metadata_map)); + EXPECT_EQ(1UL, config_->stats().active_faults_.value()); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); + filter_->onDestroy(); + + EXPECT_EQ(0UL, config_->stats().delays_injected_.value()); + EXPECT_EQ(1UL, config_->stats().aborts_injected_.value()); + EXPECT_EQ(0UL, config_->stats().active_faults_.value()); + EXPECT_EQ("fault_filter_abort", decoder_filter_callbacks_.details_); +} + +TEST_F(FaultFilterTest, HeaderAbortWithGrpcStatus) { + decoder_filter_callbacks_.is_grpc_request_ = true; + SetUpTest(header_abort_only_yaml); + + request_headers_.addCopy("x-envoy-fault-abort-grpc-request", "5"); + + EXPECT_CALL(runtime_.snapshot_, + getInteger("fault.http.max_active_faults", std::numeric_limits::max())) + .WillOnce(Return(std::numeric_limits::max())); + + EXPECT_CALL(decoder_filter_callbacks_, continueDecoding()).Times(0); + EXPECT_CALL(decoder_filter_callbacks_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::DelayInjected)) + .Times(0); + + // Abort related calls + EXPECT_CALL(runtime_.snapshot_, + featureEnabled("fault.http.abort.abort_percent", + Matcher(Percent(100)))) + .WillOnce(Return(true)); + + EXPECT_CALL(runtime_.snapshot_, getInteger("fault.http.abort.grpc_status", 5)) + .WillOnce(Return(5)); + + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}, + {"content-type", "application/grpc"}, + {"grpc-status", "5"}, + {"grpc-message", "fault filter abort"}}; + + EXPECT_CALL(decoder_filter_callbacks_, + encodeHeaders_(HeaderMapEqualRef(&response_headers), true)); + + EXPECT_CALL(decoder_filter_callbacks_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::FaultInjected)); + + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_, false)); + Http::MetadataMap metadata_map{{"metadata", "metadata"}}; + EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->decodeMetadata(metadata_map)); + EXPECT_EQ(1UL, config_->stats().active_faults_.value()); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); + filter_->onDestroy(); + + EXPECT_EQ(0UL, config_->stats().delays_injected_.value()); + EXPECT_EQ(1UL, config_->stats().aborts_injected_.value()); + EXPECT_EQ(0UL, config_->stats().active_faults_.value()); + EXPECT_EQ("fault_filter_abort", decoder_filter_callbacks_.details_); +} + +TEST_F(FaultFilterTest, HeaderAbortWithHttpAndGrpcStatus) { + SetUpTest(header_abort_only_yaml); + + request_headers_.addCopy("x-envoy-fault-abort-request", "429"); + request_headers_.addCopy("x-envoy-fault-abort-grpc-request", "5"); + + EXPECT_CALL(runtime_.snapshot_, + getInteger("fault.http.max_active_faults", std::numeric_limits::max())) + .WillOnce(Return(std::numeric_limits::max())); + + EXPECT_CALL(decoder_filter_callbacks_, continueDecoding()).Times(0); + EXPECT_CALL(decoder_filter_callbacks_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::DelayInjected)) + .Times(0); + + // Abort related calls + EXPECT_CALL(runtime_.snapshot_, + featureEnabled("fault.http.abort.abort_percent", + Matcher(Percent(100)))) + .WillOnce(Return(true)); + + EXPECT_CALL(runtime_.snapshot_, getInteger("fault.http.abort.http_status", 429)) + .WillOnce(Return(429)); + + EXPECT_CALL(runtime_.snapshot_, getInteger("fault.http.abort.grpc_status", 5)).Times(0); + + Http::TestResponseHeaderMapImpl response_headers{ + {":status", "429"}, {"content-length", "18"}, {"content-type", "text/plain"}}; + EXPECT_CALL(decoder_filter_callbacks_, + encodeHeaders_(HeaderMapEqualRef(&response_headers), false)); + EXPECT_CALL(decoder_filter_callbacks_, encodeData(_, true)); + + EXPECT_CALL(decoder_filter_callbacks_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::FaultInjected)); + + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_, false)); + Http::MetadataMap metadata_map{{"metadata", "metadata"}}; + EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->decodeMetadata(metadata_map)); + EXPECT_EQ(1UL, config_->stats().active_faults_.value()); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); + filter_->onDestroy(); + + EXPECT_EQ(0UL, config_->stats().delays_injected_.value()); + EXPECT_EQ(1UL, config_->stats().aborts_injected_.value()); + EXPECT_EQ(0UL, config_->stats().active_faults_.value()); + EXPECT_EQ("fault_filter_abort", decoder_filter_callbacks_.details_); +} + TEST_F(FaultFilterTest, FixedDelayZeroDuration) { SetUpTest(fixed_delay_only_yaml); @@ -1096,6 +1249,7 @@ TEST_F(FaultFilterSettingsTest, CheckDefaultRuntimeKeys) { EXPECT_EQ("fault.http.abort.abort_percent", settings.abortPercentRuntime()); EXPECT_EQ("fault.http.delay.fixed_duration_ms", settings.delayDurationRuntime()); EXPECT_EQ("fault.http.abort.http_status", settings.abortHttpStatusRuntime()); + EXPECT_EQ("fault.http.abort.grpc_status", settings.abortGrpcStatusRuntime()); EXPECT_EQ("fault.http.max_active_faults", settings.maxActiveFaultsRuntime()); EXPECT_EQ("fault.http.rate_limit.response_percent", settings.responseRateLimitPercentRuntime()); } @@ -1105,6 +1259,7 @@ TEST_F(FaultFilterSettingsTest, CheckOverrideRuntimeKeys) { fault.set_abort_percent_runtime(std::string("fault.abort_percent_runtime")); fault.set_delay_percent_runtime(std::string("fault.delay_percent_runtime")); fault.set_abort_http_status_runtime(std::string("fault.abort_http_status_runtime")); + fault.set_abort_grpc_status_runtime(std::string("fault.abort_grpc_status_runtime")); fault.set_delay_duration_runtime(std::string("fault.delay_duration_runtime")); fault.set_max_active_faults_runtime(std::string("fault.max_active_faults_runtime")); fault.set_response_rate_limit_percent_runtime( @@ -1116,6 +1271,7 @@ TEST_F(FaultFilterSettingsTest, CheckOverrideRuntimeKeys) { EXPECT_EQ("fault.abort_percent_runtime", settings.abortPercentRuntime()); EXPECT_EQ("fault.delay_duration_runtime", settings.delayDurationRuntime()); EXPECT_EQ("fault.abort_http_status_runtime", settings.abortHttpStatusRuntime()); + EXPECT_EQ("fault.abort_grpc_status_runtime", settings.abortGrpcStatusRuntime()); EXPECT_EQ("fault.max_active_faults_runtime", settings.maxActiveFaultsRuntime()); EXPECT_EQ("fault.response_rate_limit_percent_runtime", settings.responseRateLimitPercentRuntime()); diff --git a/test/extensions/filters/http/grpc_http1_bridge/BUILD b/test/extensions/filters/http/grpc_http1_bridge/BUILD index d429303dbb84a..d8ee636ac045b 100644 --- a/test/extensions/filters/http/grpc_http1_bridge/BUILD +++ b/test/extensions/filters/http/grpc_http1_bridge/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -33,6 +33,6 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.grpc_http1_bridge", deps = [ "//source/extensions/filters/http/grpc_http1_bridge:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", ], ) diff --git a/test/extensions/filters/http/grpc_http1_bridge/config_test.cc b/test/extensions/filters/http/grpc_http1_bridge/config_test.cc index c7894d2390aec..8c890afcaf7eb 100644 --- a/test/extensions/filters/http/grpc_http1_bridge/config_test.cc +++ b/test/extensions/filters/http/grpc_http1_bridge/config_test.cc @@ -1,6 +1,6 @@ #include "extensions/filters/http/grpc_http1_bridge/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/http/grpc_http1_reverse_bridge/BUILD b/test/extensions/filters/http/grpc_http1_reverse_bridge/BUILD index 648f2e473dc74..71291b0623e74 100644 --- a/test/extensions/filters/http/grpc_http1_reverse_bridge/BUILD +++ b/test/extensions/filters/http/grpc_http1_reverse_bridge/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -28,6 +28,7 @@ envoy_extension_cc_test( name = "reverse_bridge_integration_test", srcs = ["reverse_bridge_integration_test.cc"], extension_name = "envoy.filters.http.grpc_http1_reverse_bridge", + tags = ["fails_on_windows"], deps = [ "//source/common/buffer:buffer_lib", "//source/common/http:header_map_lib", @@ -36,6 +37,7 @@ envoy_extension_cc_test( "//test/integration:http_integration_lib", "//test/mocks/upstream:upstream_mocks", "//test/test_common:utility_lib", + "@envoy_api//envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3:pkg_cc_proto", ], ) @@ -45,7 +47,8 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.grpc_http1_reverse_bridge", deps = [ "//source/extensions/filters/http/grpc_http1_reverse_bridge:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/http/grpc_http1_reverse_bridge/config_test.cc b/test/extensions/filters/http/grpc_http1_reverse_bridge/config_test.cc index ea47b673187f0..33acd2fef2563 100644 --- a/test/extensions/filters/http/grpc_http1_reverse_bridge/config_test.cc +++ b/test/extensions/filters/http/grpc_http1_reverse_bridge/config_test.cc @@ -3,7 +3,8 @@ #include "extensions/filters/http/grpc_http1_reverse_bridge/config.h" #include "extensions/filters/http/grpc_http1_reverse_bridge/filter.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/server/instance.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_integration_test.cc b/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_integration_test.cc index f774044bdf8af..febd3d40a3ed2 100644 --- a/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_integration_test.cc +++ b/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_integration_test.cc @@ -1,5 +1,7 @@ #include +#include "envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.pb.h" + #include "common/http/message_impl.h" #include "extensions/filters/http/well_known_names.h" @@ -13,6 +15,9 @@ using Envoy::Http::HeaderValueOf; +// for ::operator""s (which Windows compiler does not support): +using namespace std::string_literals; + namespace Envoy { namespace { @@ -24,27 +29,35 @@ class ReverseBridgeIntegrationTest : public testing::TestWithParammutable_typed_per_filter_config())["envoy.filters.http.grpc_http1_reverse_bridge"] + .PackFrom(route_config); + config_helper_.addVirtualHost(vhost); + HttpIntegrationTest::initialize(); } - void TearDown() override { - test_server_.reset(); - fake_upstream_connection_.reset(); - fake_upstreams_.clear(); - } + void TearDown() override { fake_upstream_connection_.reset(); } + +protected: + FakeHttpConnection::Type upstream_protocol_; }; INSTANTIATE_TEST_SUITE_P(IpVersions, ReverseBridgeIntegrationTest, @@ -53,7 +66,60 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, ReverseBridgeIntegrationTest, // Verifies that we don't do anything with the request when it's hitting a route that // doesn't enable the bridge. +// Regression test of https://github.com/envoyproxy/envoy/issues/9922 +TEST_P(ReverseBridgeIntegrationTest, DisabledRoute) { + upstream_protocol_ = FakeHttpConnection::Type::HTTP2; + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + Http::TestRequestHeaderMapImpl request_headers({{":scheme", "http"}, + {":method", "POST"}, + {":authority", "disabled"}, + {":path", "/testing.ExampleService/Print"}, + {"content-type", "application/grpc"}}); + auto response = codec_client_->makeRequestWithBody(request_headers, "abcdef"); + + // Wait for upstream to finish the request. + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + + // Ensure that we don't do anything + EXPECT_EQ("abcdef", upstream_request_->body().toString()); + EXPECT_THAT(upstream_request_->headers(), + HeaderValueOf(Http::Headers::get().ContentType, "application/grpc")); + + // Respond to the request. + Http::TestResponseHeaderMapImpl response_headers; + response_headers.setStatus(200); + response_headers.setContentType("application/grpc"); + upstream_request_->encodeHeaders(response_headers, false); + + Buffer::OwnedImpl response_data{"defgh"}; + upstream_request_->encodeData(response_data, false); + + Http::TestResponseTrailerMapImpl response_trailers; + response_trailers.setGrpcStatus(std::string("0")); + upstream_request_->encodeTrailers(response_trailers); + + response->waitForEndStream(); + EXPECT_TRUE(response->complete()); + + EXPECT_EQ(response->body(), response_data.toString()); + EXPECT_THAT(response->headers(), + HeaderValueOf(Http::Headers::get().ContentType, "application/grpc")); + EXPECT_THAT(*response->trailers(), HeaderValueOf(Http::Headers::get().GrpcStatus, "0")); + + codec_client_->close(); + ASSERT_TRUE(fake_upstream_connection_->close()); + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); +} + TEST_P(ReverseBridgeIntegrationTest, EnabledRoute) { + upstream_protocol_ = FakeHttpConnection::Type::HTTP1; + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); Http::TestRequestHeaderMapImpl request_headers({{":scheme", "http"}, @@ -61,12 +127,8 @@ TEST_P(ReverseBridgeIntegrationTest, EnabledRoute) { {":authority", "foo"}, {":path", "/testing.ExampleService/Print"}, {"content-type", "application/grpc"}}); - auto encoder_decoder = codec_client_->startRequest(request_headers); - request_encoder_ = &encoder_decoder.first; - IntegrationStreamDecoderPtr response = std::move(encoder_decoder.second); - Buffer::OwnedImpl request_data{"abcdef"}; - codec_client_->sendData(*request_encoder_, request_data, true); + auto response = codec_client_->makeRequestWithBody(request_headers, "abcdef"); // Wait for upstream to finish the request. ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); @@ -79,7 +141,7 @@ TEST_P(ReverseBridgeIntegrationTest, EnabledRoute) { EXPECT_THAT(upstream_request_->headers(), HeaderValueOf(Http::Headers::get().ContentType, "application/x-protobuf")); EXPECT_THAT(upstream_request_->headers(), - HeaderValueOf(Http::Headers::get().Accept, "application/x-protobuf")); + HeaderValueOf(Http::CustomHeaders::get().Accept, "application/x-protobuf")); // Respond to the request. Http::TestResponseHeaderMapImpl response_headers; @@ -88,11 +150,7 @@ TEST_P(ReverseBridgeIntegrationTest, EnabledRoute) { upstream_request_->encodeHeaders(response_headers, false); Buffer::OwnedImpl response_data{"defgh"}; - upstream_request_->encodeData(response_data, false); - - Http::TestResponseTrailerMapImpl response_trailers; - response_trailers.setGrpcStatus(std::string("0")); - upstream_request_->encodeTrailers(response_trailers); + upstream_request_->encodeData(response_data, true); response->waitForEndStream(); EXPECT_TRUE(response->complete()); @@ -103,8 +161,6 @@ TEST_P(ReverseBridgeIntegrationTest, EnabledRoute) { // Comparing strings embedded zero literals is hard. Use string literal and std::equal to avoid // truncating the string when it's converted to const char *. - using namespace std::string_literals; - const auto expected_prefix = "\0\0\0\0\4"s; EXPECT_TRUE( std::equal(response->body().begin(), response->body().begin() + 4, expected_prefix.begin())); diff --git a/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_test.cc b/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_test.cc index d264d37ff06d9..54ee07792c480 100644 --- a/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_test.cc +++ b/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_test.cc @@ -38,7 +38,7 @@ class ReverseBridgeTest : public testing::Test { filter_->setEncoderFilterCallbacks(encoder_callbacks_); } - std::unique_ptr filter_; + FilterPtr filter_; std::shared_ptr route_ = std::make_shared(); Router::RouteSpecificFilterConfig filter_config_; Http::MockStreamDecoderFilterCallbacks decoder_callbacks_; @@ -60,7 +60,8 @@ TEST_F(ReverseBridgeTest, InvalidGrpcRequest) { EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, "application/x-protobuf")); EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, "20")); - EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().Accept, "application/x-protobuf")); + EXPECT_THAT(headers, + HeaderValueOf(Http::CustomHeaders::get().Accept, "application/x-protobuf")); } { @@ -174,7 +175,8 @@ TEST_F(ReverseBridgeTest, GrpcRequestNoManageFrameHeader) { EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, "application/x-protobuf")); EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, "25")); - EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().Accept, "application/x-protobuf")); + EXPECT_THAT(headers, + HeaderValueOf(Http::CustomHeaders::get().Accept, "application/x-protobuf")); } { @@ -234,7 +236,8 @@ TEST_F(ReverseBridgeTest, GrpcRequest) { EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, "application/x-protobuf")); EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, "20")); - EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().Accept, "application/x-protobuf")); + EXPECT_THAT(headers, + HeaderValueOf(Http::CustomHeaders::get().Accept, "application/x-protobuf")); } { @@ -313,7 +316,8 @@ TEST_F(ReverseBridgeTest, GrpcRequestNoContentLength) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, "application/x-protobuf")); - EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().Accept, "application/x-protobuf")); + EXPECT_THAT(headers, + HeaderValueOf(Http::CustomHeaders::get().Accept, "application/x-protobuf")); // Ensure that we don't insert a content-length header. EXPECT_EQ(nullptr, headers.ContentLength()); } @@ -379,6 +383,55 @@ TEST_F(ReverseBridgeTest, GrpcRequestNoContentLength) { EXPECT_EQ(12, frames[0].length_); } } + +// Regression tests that header-only responses do not get the content-length +// adjusted (https://github.com/envoyproxy/envoy/issues/11099) +TEST_F(ReverseBridgeTest, GrpcRequestHeaderOnlyResponse) { + initialize(); + decoder_callbacks_.is_grpc_request_ = true; + + { + EXPECT_CALL(decoder_callbacks_, route()).WillRepeatedly(testing::Return(nullptr)); + EXPECT_CALL(decoder_callbacks_, clearRouteCache()); + Http::TestRequestHeaderMapImpl headers({{"content-type", "application/grpc"}, + {"content-length", "25"}, + {":path", "/testing.ExampleService/SendData"}}); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); + + EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, "application/x-protobuf")); + EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, "20")); + EXPECT_THAT(headers, + HeaderValueOf(Http::CustomHeaders::get().Accept, "application/x-protobuf")); + } + + { + // We should remove the first five bytes. + Envoy::Buffer::OwnedImpl buffer; + buffer.add("abcdefgh", 8); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false)); + EXPECT_EQ("fgh", buffer.toString()); + } + + { + // Subsequent calls to decodeData should do nothing. + Envoy::Buffer::OwnedImpl buffer; + buffer.add("abcdefgh", 8); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false)); + EXPECT_EQ("abcdefgh", buffer.toString()); + } + + { + Http::TestRequestTrailerMapImpl trailers; + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers)); + } + + Http::TestResponseHeaderMapImpl headers( + {{":status", "200"}, {"content-length", "0"}, {"content-type", "application/x-protobuf"}}); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, true)); + EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, "application/grpc")); + EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, "0")); +} + // Tests that a gRPC is downgraded to application/x-protobuf and upgraded back // to gRPC, and that the upstream 400 is converted into an internal (13) // grpc-status. @@ -393,7 +446,8 @@ TEST_F(ReverseBridgeTest, GrpcRequestInternalError) { {{"content-type", "application/grpc"}, {":path", "/testing.ExampleService/SendData"}}); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, "application/x-protobuf")); - EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().Accept, "application/x-protobuf")); + EXPECT_THAT(headers, + HeaderValueOf(Http::CustomHeaders::get().Accept, "application/x-protobuf")); } { @@ -468,7 +522,8 @@ TEST_F(ReverseBridgeTest, GrpcRequestBadResponseNoContentType) { {{"content-type", "application/grpc"}, {":path", "/testing.ExampleService/SendData"}}); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, "application/x-protobuf")); - EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().Accept, "application/x-protobuf")); + EXPECT_THAT(headers, + HeaderValueOf(Http::CustomHeaders::get().Accept, "application/x-protobuf")); } { @@ -518,7 +573,8 @@ TEST_F(ReverseBridgeTest, GrpcRequestBadResponse) { {{"content-type", "application/grpc"}, {":path", "/testing.ExampleService/SendData"}}); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, "application/x-protobuf")); - EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().Accept, "application/x-protobuf")); + EXPECT_THAT(headers, + HeaderValueOf(Http::CustomHeaders::get().Accept, "application/x-protobuf")); } { @@ -608,7 +664,8 @@ TEST_F(ReverseBridgeTest, FilterConfigPerRouteEnabled) { EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, "application/x-protobuf")); EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, "20")); - EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().Accept, "application/x-protobuf")); + EXPECT_THAT(headers, + HeaderValueOf(Http::CustomHeaders::get().Accept, "application/x-protobuf")); } { @@ -694,7 +751,8 @@ TEST_F(ReverseBridgeTest, RouteWithTrailers) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, "application/x-protobuf")); EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, "20")); - EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().Accept, "application/x-protobuf")); + EXPECT_THAT(headers, + HeaderValueOf(Http::CustomHeaders::get().Accept, "application/x-protobuf")); } { diff --git a/test/extensions/filters/http/grpc_json_transcoder/BUILD b/test/extensions/filters/http/grpc_json_transcoder/BUILD index 977669d16fd8e..51068ae50e990 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/BUILD +++ b/test/extensions/filters/http/grpc_json_transcoder/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -61,6 +61,7 @@ envoy_extension_cc_test( "//test/proto:bookstore_proto_descriptor", ], extension_name = "envoy.filters.http.grpc_json_transcoder", + tags = ["fails_on_windows"], deps = [ "//source/common/grpc:codec_lib", "//source/common/http:header_map_lib", @@ -77,7 +78,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.grpc_json_transcoder", deps = [ "//source/extensions/filters/http/grpc_json_transcoder:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/extensions/filters/http/grpc_json_transcoder/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/grpc_json_transcoder/config_test.cc b/test/extensions/filters/http/grpc_json_transcoder/config_test.cc index 601b627aecd69..164649e3228ab 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/config_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/config_test.cc @@ -3,7 +3,7 @@ #include "extensions/filters/http/grpc_json_transcoder/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc b/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc index 3760cbb6cb418..c0384a71dc947 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc @@ -28,9 +28,7 @@ class GrpcJsonTranscoderIntegrationTest public: GrpcJsonTranscoderIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {} - /** - * Global initializer for all integration tests. - */ + void SetUp() override { setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); const std::string filter = @@ -45,15 +43,6 @@ class GrpcJsonTranscoderIntegrationTest fmt::format(filter, TestEnvironment::runfilesPath("test/proto/bookstore.descriptor"))); } - /** - * Global destructor for all integration tests. - */ - void TearDown() override { - test_server_.reset(); - fake_upstream_connection_.reset(); - fake_upstreams_.clear(); - } - protected: template void testTranscoding(Http::RequestHeaderMap&& request_headers, const std::string& request_body, @@ -141,8 +130,7 @@ class GrpcJsonTranscoderIntegrationTest } response_headers.iterate( - [](const Http::HeaderEntry& entry, void* context) -> Http::HeaderMap::Iterate { - auto* response = static_cast(context); + [response = response.get()](const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate { Http::LowerCaseString lower_key{std::string(entry.key().getStringView())}; if (entry.value() == UnexpectedHeaderValue) { EXPECT_FALSE(response->headers().get(lower_key)); @@ -151,8 +139,7 @@ class GrpcJsonTranscoderIntegrationTest response->headers().get(lower_key)->value().getStringView()); } return Http::HeaderMap::Iterate::Continue; - }, - response.get()); + }); if (!response_body.empty()) { if (full_response) { EXPECT_EQ(response_body, response->body()); @@ -330,6 +317,7 @@ TEST_P(GrpcJsonTranscoderIntegrationTest, UnaryGetHttpBody) { TEST_P(GrpcJsonTranscoderIntegrationTest, StreamGetHttpBody) { HttpIntegrationTest::initialize(); + // 1. Normal streaming get testTranscoding( Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/indexStream"}, {":authority", "host"}}, @@ -339,6 +327,107 @@ TEST_P(GrpcJsonTranscoderIntegrationTest, StreamGetHttpBody) { Status(), Http::TestResponseHeaderMapImpl{{":status", "200"}, {"content-type", "text/html"}}, R"(

Hello!

)" R"(Hello!)"); + + // 2. Empty response (trailers only) from streaming backend, with a gRPC error. + testTranscoding( + Http::TestRequestHeaderMapImpl{ + {":method", "GET"}, {":path", "/indexStream"}, {":authority", "host"}}, + "", {""}, {}, Status(Code::NOT_FOUND, "Not Found"), + Http::TestResponseHeaderMapImpl{{":status", "404"}, {"content-type", "application/json"}}, + ""); +} + +TEST_P(GrpcJsonTranscoderIntegrationTest, StreamGetHttpBodyMultipleFramesInData) { + HttpIntegrationTest::initialize(); + + // testTranscoding() does not provide grpc multiframe support. + // Since this is one-off it does not make sense to even more + // complicate this function. + // + // Make request to gRPC upstream + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{ + {":method", "GET"}, + {":path", "/indexStream"}, + {":authority", "host"}, + }); + waitForNextUpstreamRequest(); + + // Send multi-framed gRPC response + // Headers + Http::TestResponseHeaderMapImpl response_headers; + response_headers.setStatus(200); + response_headers.setContentType("application/grpc"); + upstream_request_->encodeHeaders(response_headers, false); + // Payload + google::api::HttpBody grpcMsg; + EXPECT_TRUE(TextFormat::ParseFromString(R"(content_type: "text/plain" data: "Hello")", &grpcMsg)); + Buffer::OwnedImpl response_buffer; + for (size_t i = 0; i < 3; i++) { + auto frame = Grpc::Common::serializeToGrpcFrame(grpcMsg); + response_buffer.add(*frame); + } + upstream_request_->encodeData(response_buffer, false); + // Trailers + Http::TestResponseTrailerMapImpl response_trailers; + auto grpc_status = Status(); + response_trailers.setGrpcStatus(static_cast(grpc_status.error_code())); + response_trailers.setGrpcMessage( + absl::string_view(grpc_status.error_message().data(), grpc_status.error_message().size())); + upstream_request_->encodeTrailers(response_trailers); + EXPECT_TRUE(upstream_request_->complete()); + + // Wait for complete / check body to have 3 frames joined + response->waitForEndStream(); + EXPECT_TRUE(response->complete()); + EXPECT_EQ(response->body(), "HelloHelloHello"); +} + +TEST_P(GrpcJsonTranscoderIntegrationTest, StreamGetHttpBodyFragmented) { + HttpIntegrationTest::initialize(); + + // Make request to gRPC upstream + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{ + {":method", "GET"}, + {":path", "/indexStream"}, + {":authority", "host"}, + }); + waitForNextUpstreamRequest(); + + // Send fragmented gRPC response + // Headers + Http::TestResponseHeaderMapImpl response_headers; + response_headers.setStatus(200); + response_headers.setContentType("application/grpc"); + upstream_request_->encodeHeaders(response_headers, false); + // Fragmented payload + google::api::HttpBody http_body; + http_body.set_content_type("text/plain"); + http_body.set_data(std::string(1024, 'a')); + // Fragment gRPC frame into 2 buffers equally divided + Buffer::OwnedImpl fragment1; + auto fragment2 = Grpc::Common::serializeToGrpcFrame(http_body); + fragment1.move(*fragment2, fragment2->length() / 2); + upstream_request_->encodeData(fragment1, false); + upstream_request_->encodeData(*fragment2, false); + // Trailers + Http::TestResponseTrailerMapImpl response_trailers; + auto grpc_status = Status(); + response_trailers.setGrpcStatus(static_cast(grpc_status.error_code())); + response_trailers.setGrpcMessage( + absl::string_view(grpc_status.error_message().data(), grpc_status.error_message().size())); + upstream_request_->encodeTrailers(response_trailers); + EXPECT_TRUE(upstream_request_->complete()); + + // Wait for complete + response->waitForEndStream(); + EXPECT_TRUE(response->complete()); + // Ensure that body was actually replaced + EXPECT_EQ(response->body(), http_body.data()); + // As well as content-type header + auto content_type = response->headers().get(Http::LowerCaseString("content-type")); + EXPECT_EQ("text/plain", content_type->value().getStringView()); } TEST_P(GrpcJsonTranscoderIntegrationTest, UnaryEchoHttpBody) { @@ -549,7 +638,7 @@ TEST_P(GrpcJsonTranscoderIntegrationTest, ServerStreamingGet) { Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/shelves/37/books"}, {":authority", "host"}}, "", {"shelf: 37"}, {}, Status(Code::NOT_FOUND, "Shelf 37 not found"), - Http::TestResponseHeaderMapImpl{{":status", "200"}, {"content-type", "application/json"}}, + Http::TestResponseHeaderMapImpl{{":status", "404"}, {"content-type", "application/json"}}, "[]"); } @@ -716,7 +805,9 @@ TEST_P(GrpcJsonTranscoderIntegrationTest, LargeStruct) { R"({"content":)" + largeJson + R"(})"); } -TEST_P(GrpcJsonTranscoderIntegrationTest, UnknownField) { +TEST_P(GrpcJsonTranscoderIntegrationTest, UnknownFieldInRequest) { + // Request JSON has many fields that are unknown to the request proto message. + // They are discarded. HttpIntegrationTest::initialize(); testTranscoding( Http::TestRequestHeaderMapImpl{{":method", "POST"}, @@ -732,6 +823,33 @@ TEST_P(GrpcJsonTranscoderIntegrationTest, UnknownField) { R"({"id":"20","theme":"Children"})"); } +// Test proto to json transcoding with an unknown field in the response message. +// gRPC server may use a updated proto with a new field, but Envoy transcoding +// filter could use an old proto descriptor without that field. That fields is unknown +// to the Envoy transcoder filter. Expected result: the unknown field is discarded, +// other fields should be transcoded properly. +TEST_P(GrpcJsonTranscoderIntegrationTest, UnknownResponse) { + // The mocked upstream proto response message is bookstore::BigBook which has + // all 3 fields. But the proto descriptor used by the Envoy transcoder filter is using + // bookstore::OldBigBook which is missing the `field1` field. + HttpIntegrationTest::initialize(); + // The bug is ZeroCopyInputStreamImpl::Skip() which is not implemented. + // In order to trigger a call to that function, the response message has to be big enough + // so it is stored in multiple slices. + const std::string field1_value = std::string(32 * 1024, 'O'); + const std::string response_body = + fmt::format(R"(field1: "{}" field2: "field2_value" field3: "field3_value" )", field1_value); + testTranscoding( + Http::TestRequestHeaderMapImpl{ + {":method", "GET"}, {":path", "/bigbook"}, {":authority", "host"}}, + "", {""}, {response_body}, Status(), + Http::TestResponseHeaderMapImpl{{":status", "200"}, + {"content-type", "application/json"}, + {"content-length", "49"}, + {"grpc-status", "0"}}, + R"({"field2":"field2_value","field3":"field3_value"})"); +} + TEST_P(GrpcJsonTranscoderIntegrationTest, UTF8) { HttpIntegrationTest::initialize(); testTranscoding( diff --git a/test/extensions/filters/http/grpc_json_transcoder/http_body_utils_test.cc b/test/extensions/filters/http/grpc_json_transcoder/http_body_utils_test.cc index 1446c4d8874d0..e1a3ed763c61c 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/http_body_utils_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/http_body_utils_test.cc @@ -18,10 +18,7 @@ class HttpBodyUtilsTest : public testing::Test { public: HttpBodyUtilsTest() = default; - template - void basicTest(const std::string& content, const std::string& content_type, - const std::vector& body_field_path, - std::function get_http_body) { + void setBodyFieldPath(const std::vector& body_field_path) { for (int field_number : body_field_path) { Protobuf::Field field; field.set_number(field_number); @@ -30,20 +27,53 @@ class HttpBodyUtilsTest : public testing::Test { for (auto& field : raw_body_field_path_) { body_field_path_.push_back(&field); } + } - Buffer::InstancePtr message_buffer = std::make_unique(); - HttpBodyUtils::appendHttpBodyEnvelope(*message_buffer, body_field_path_, content_type, - content.length()); - message_buffer->add(content); + template + void basicTest(const std::string& content, const std::string& content_type, + const std::vector& body_field_path, + std::function get_http_body) { + setBodyFieldPath(body_field_path); - Buffer::ZeroCopyInputStreamImpl stream(std::move(message_buffer)); + // Parse using concrete message type. + { + Buffer::InstancePtr message_buffer = std::make_unique(); + HttpBodyUtils::appendHttpBodyEnvelope(*message_buffer, body_field_path_, content_type, + content.length()); + message_buffer->add(content); - Message message; - message.ParseFromZeroCopyStream(&stream); + Buffer::ZeroCopyInputStreamImpl stream(std::move(message_buffer)); + + Message message; + message.ParseFromZeroCopyStream(&stream); + + google::api::HttpBody http_body = get_http_body(std::move(message)); + EXPECT_EQ(http_body.content_type(), content_type); + EXPECT_EQ(http_body.data(), content); + } - google::api::HttpBody http_body = get_http_body(std::move(message)); - EXPECT_EQ(http_body.content_type(), content_type); - EXPECT_EQ(http_body.data(), content); + // Parse message dynamically by field path. + { + Buffer::InstancePtr message_buffer = std::make_unique(); + HttpBodyUtils::appendHttpBodyEnvelope(*message_buffer, body_field_path_, content_type, + content.length()); + message_buffer->add(content); + + google::api::HttpBody http_body; + Buffer::ZeroCopyInputStreamImpl stream(std::move(message_buffer)); + EXPECT_TRUE(HttpBodyUtils::parseMessageByFieldPath(&stream, body_field_path_, &http_body)); + EXPECT_EQ(http_body.content_type(), content_type); + EXPECT_EQ(http_body.data(), content); + } + } + + void testInvalidMessage(const std::string& content, const std::vector& body_field_path) { + setBodyFieldPath(body_field_path); + Buffer::InstancePtr message_buffer = std::make_unique(); + message_buffer->add(content); + google::api::HttpBody http_body; + Buffer::ZeroCopyInputStreamImpl stream(std::move(message_buffer)); + EXPECT_FALSE(HttpBodyUtils::parseMessageByFieldPath(&stream, body_field_path_, &http_body)); } std::vector raw_body_field_path_; @@ -77,6 +107,60 @@ TEST_F(HttpBodyUtilsTest, NestedFieldsList) { [](bookstore::DeepNestedBody message) { return message.nested().nested().nested().body(); }); } +TEST_F(HttpBodyUtilsTest, SkipUnknownFields) { + bookstore::DeepNestedBody message; + auto* body = message.mutable_nested()->mutable_nested()->mutable_nested()->mutable_body(); + body->set_content_type("text/nested"); + body->set_data("abcd"); + message.mutable_extra()->set_field("test"); + message.mutable_nested()->mutable_extra()->set_field(123); + + Buffer::InstancePtr message_buffer = std::make_unique(); + std::string serialized_message; + EXPECT_TRUE(message.SerializeToString(&serialized_message)); + message_buffer->add(serialized_message); + setBodyFieldPath({1, 1000000, 100000000, 500000000}); + + google::api::HttpBody http_body; + Buffer::ZeroCopyInputStreamImpl stream(std::move(message_buffer)); + EXPECT_TRUE(HttpBodyUtils::parseMessageByFieldPath(&stream, body_field_path_, &http_body)); + EXPECT_EQ(http_body.content_type(), "text/nested"); + EXPECT_EQ(http_body.data(), "abcd"); +} + +TEST_F(HttpBodyUtilsTest, FailInvalidLength) { + std::string message; + // First field tag. + message += static_cast((1 << 3) | 2); + // Invalid length. + message += '\x02'; + // Second field tag. + message += static_cast((2 << 3) | 2); + // Invalid length. + message += '\x80'; + testInvalidMessage(message, {1, 2}); +} + +TEST_F(HttpBodyUtilsTest, FailSkipField) { + std::string message; + // Field tag. + message += static_cast((2 << 3) | 2); + // Invalid length. + message += '\x80'; + testInvalidMessage(message, {1}); +} + +TEST_F(HttpBodyUtilsTest, FailShortMessage) { + std::string message; + // Field tag. + message += static_cast((1 << 3) | 2); + // Length less then remaining message size. + message += '\x02'; + // Invalid tag. + message += '\x00'; + testInvalidMessage(message, {1, 2}); +} + } // namespace } // namespace GrpcJsonTranscoder } // namespace HttpFilters diff --git a/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc b/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc index 2170a79cf38e0..c0fbd516472d4 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc @@ -1,5 +1,6 @@ #include #include +#include #include "envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.pb.h" @@ -31,6 +32,7 @@ using Envoy::Protobuf::util::MessageDifferencer; using Envoy::ProtobufUtil::error::Code; using google::api::HttpRule; using google::grpc::transcoding::Transcoder; +using TranscoderPtr = std::unique_ptr; namespace Envoy { namespace Extensions { @@ -165,6 +167,33 @@ TEST_F(GrpcJsonTranscoderConfigTest, NonProto) { EnvoyException, "transcoding_filter: Unable to parse proto descriptor"); } +TEST_F(GrpcJsonTranscoderConfigTest, JsonResponseBody) { + EXPECT_THROW_WITH_REGEX( + JsonTranscoderConfig config( + getProtoConfig(TestEnvironment::runfilesPath("test/proto/bookstore.descriptor"), + "bookstore.ServiceWithResponseBody"), + *api_), + EnvoyException, "Setting \"response_body\" is not supported yet for non-HttpBody fields"); +} + +TEST_F(GrpcJsonTranscoderConfigTest, InvalidRequestBodyPath) { + EXPECT_THROW_WITH_REGEX( + JsonTranscoderConfig config( + getProtoConfig(TestEnvironment::runfilesPath("test/proto/bookstore.descriptor"), + "bookstore.ServiceWithInvalidRequestBodyPath"), + *api_), + EnvoyException, "Could not find field"); +} + +TEST_F(GrpcJsonTranscoderConfigTest, InvalidResponseBodyPath) { + EXPECT_THROW_WITH_REGEX( + JsonTranscoderConfig config( + getProtoConfig(TestEnvironment::runfilesPath("test/proto/bookstore.descriptor"), + "bookstore.ServiceWithInvalidResponseBodyPath"), + *api_), + EnvoyException, "Could not find field"); +} + TEST_F(GrpcJsonTranscoderConfigTest, NonBinaryProto) { envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder proto_config; proto_config.set_proto_descriptor_bin("This is invalid proto"); @@ -195,7 +224,7 @@ TEST_F(GrpcJsonTranscoderConfigTest, CreateTranscoder) { Http::TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/shelves"}}; TranscoderInputStreamImpl request_in, response_in; - std::unique_ptr transcoder; + TranscoderPtr transcoder; MethodInfoSharedPtr method_info; const auto status = config.createTranscoder(headers, request_in, response_in, transcoder, method_info); @@ -216,7 +245,7 @@ TEST_F(GrpcJsonTranscoderConfigTest, CreateTranscoderAutoMap) { {":path", "/bookstore.Bookstore/DeleteShelf"}}; TranscoderInputStreamImpl request_in, response_in; - std::unique_ptr transcoder; + TranscoderPtr transcoder; MethodInfoSharedPtr method_info; const auto status = config.createTranscoder(headers, request_in, response_in, transcoder, method_info); @@ -235,7 +264,7 @@ TEST_F(GrpcJsonTranscoderConfigTest, InvalidQueryParameter) { Http::TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/shelves?foo=bar"}}; TranscoderInputStreamImpl request_in, response_in; - std::unique_ptr transcoder; + TranscoderPtr transcoder; MethodInfoSharedPtr method_info; const auto status = config.createTranscoder(headers, request_in, response_in, transcoder, method_info); @@ -255,7 +284,7 @@ TEST_F(GrpcJsonTranscoderConfigTest, UnknownQueryParameterIsIgnored) { Http::TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/shelves?foo=bar"}}; TranscoderInputStreamImpl request_in, response_in; - std::unique_ptr transcoder; + TranscoderPtr transcoder; MethodInfoSharedPtr method_info; const auto status = config.createTranscoder(headers, request_in, response_in, transcoder, method_info); @@ -274,7 +303,7 @@ TEST_F(GrpcJsonTranscoderConfigTest, IgnoredQueryParameter) { Http::TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/shelves?key=API_KEY"}}; TranscoderInputStreamImpl request_in, response_in; - std::unique_ptr transcoder; + TranscoderPtr transcoder; MethodInfoSharedPtr method_info; const auto status = config.createTranscoder(headers, request_in, response_in, transcoder, method_info); @@ -296,7 +325,7 @@ TEST_F(GrpcJsonTranscoderConfigTest, InvalidVariableBinding) { Http::TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/book/1"}}; TranscoderInputStreamImpl request_in, response_in; - std::unique_ptr transcoder; + TranscoderPtr transcoder; MethodInfoSharedPtr method_info; const auto status = config.createTranscoder(headers, request_in, response_in, transcoder, method_info); @@ -342,9 +371,10 @@ TEST_F(GrpcJsonTranscoderFilterTest, NoTranscoding) { {":method", "POST"}, {":path", "/grpc.service/UnknownGrpcMethod"}}; - Http::TestHeaderMapImpl expected_request_headers{{"content-type", "application/grpc"}, - {":method", "POST"}, - {":path", "/grpc.service/UnknownGrpcMethod"}}; + Http::TestRequestHeaderMapImpl expected_request_headers{ + {"content-type", "application/grpc"}, + {":method", "POST"}, + {":path", "/grpc.service/UnknownGrpcMethod"}}; EXPECT_CALL(decoder_callbacks_, clearRouteCache()).Times(0); @@ -363,8 +393,8 @@ TEST_F(GrpcJsonTranscoderFilterTest, NoTranscoding) { Http::TestResponseHeaderMapImpl response_headers{{"content-type", "application/grpc"}, {":status", "200"}}; - Http::TestHeaderMapImpl expected_response_headers{{"content-type", "application/grpc"}, - {":status", "200"}}; + Http::TestResponseHeaderMapImpl expected_response_headers{{"content-type", "application/grpc"}, + {":status", "200"}}; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, false)); EXPECT_EQ(expected_response_headers, response_headers); @@ -374,7 +404,7 @@ TEST_F(GrpcJsonTranscoderFilterTest, NoTranscoding) { EXPECT_EQ(2, response_data.length()); Http::TestResponseTrailerMapImpl response_trailers{{"grpc-status", "0"}}; - Http::TestHeaderMapImpl expected_response_trailers{{"grpc-status", "0"}}; + Http::TestResponseTrailerMapImpl expected_response_trailers{{"grpc-status", "0"}}; EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers)); EXPECT_EQ(expected_response_trailers, response_trailers); } @@ -388,6 +418,7 @@ TEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryPost) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); EXPECT_EQ("application/grpc", request_headers.get_("content-type")); EXPECT_EQ("/shelf", request_headers.get_("x-envoy-original-path")); + EXPECT_EQ("POST", request_headers.get_("x-envoy-original-method")); EXPECT_EQ("/bookstore.Bookstore/CreateShelf", request_headers.get_(":path")); EXPECT_EQ("trailers", request_headers.get_("te")); @@ -454,6 +485,7 @@ TEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryPostWithPackageServiceMetho EXPECT_EQ("application/grpc", request_headers.get_("content-type")); EXPECT_EQ("/bookstore.Bookstore/CreateShelfWithPackageServiceAndMethod", request_headers.get_("x-envoy-original-path")); + EXPECT_EQ("POST", request_headers.get_("x-envoy-original-method")); EXPECT_EQ("/bookstore.Bookstore/CreateShelfWithPackageServiceAndMethod", request_headers.get_(":path")); EXPECT_EQ("trailers", request_headers.get_("te")); @@ -597,6 +629,7 @@ TEST_F(GrpcJsonTranscoderFilterSkipRecalculatingTest, TranscodingUnaryPostSkipRe EXPECT_EQ("application/grpc", request_headers.get_("content-type")); EXPECT_EQ("/shelf", request_headers.get_("x-envoy-original-path")); + EXPECT_EQ("POST", request_headers.get_("x-envoy-original-method")); EXPECT_EQ("/bookstore.Bookstore/CreateShelf", request_headers.get_(":path")); EXPECT_EQ("trailers", request_headers.get_("te")); @@ -618,7 +651,7 @@ TEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryError) { EXPECT_CALL(decoder_callbacks_, encodeHeaders_(_, false)) .WillOnce(Invoke([](Http::ResponseHeaderMap& headers, bool end_stream) { - EXPECT_EQ("400", headers.Status()->value().getStringView()); + EXPECT_EQ("400", headers.getStatusValue()); EXPECT_FALSE(end_stream); })); EXPECT_CALL(decoder_callbacks_, encodeData(_, true)); @@ -674,6 +707,7 @@ TEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryWithHttpBodyAsOutput) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); EXPECT_EQ("application/grpc", request_headers.get_("content-type")); EXPECT_EQ("/index", request_headers.get_("x-envoy-original-path")); + EXPECT_EQ("GET", request_headers.get_("x-envoy-original-method")); EXPECT_EQ("/bookstore.Bookstore/GetIndex", request_headers.get_(":path")); EXPECT_EQ("trailers", request_headers.get_("te")); @@ -700,6 +734,40 @@ TEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryWithHttpBodyAsOutput) { EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers)); } +TEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryWithInvalidHttpBodyAsOutput) { + Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, + {":path", "/echoResponseBodyPath"}}; + + EXPECT_CALL(decoder_callbacks_, clearRouteCache()); + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); + EXPECT_EQ("application/grpc", request_headers.get_("content-type")); + EXPECT_EQ("/echoResponseBodyPath", request_headers.get_("x-envoy-original-path")); + EXPECT_EQ("GET", request_headers.get_("x-envoy-original-method")); + EXPECT_EQ("/bookstore.Bookstore/EchoResponseBodyPath", request_headers.get_(":path")); + EXPECT_EQ("trailers", request_headers.get_("te")); + + Http::TestResponseHeaderMapImpl response_headers{{"content-type", "application/grpc"}, + {":status", "200"}}; + + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_.encodeHeaders(response_headers, false)); + EXPECT_EQ("application/json", response_headers.get_("content-type")); + + google::api::HttpBody response; + response.set_content_type("text/html"); + response.set_data("

Hello, world!

"); + + Buffer::OwnedImpl response_data; + // Some invalid message. + response_data.add("\x10\x80"); + Grpc::Common::prependGrpcFrameHeader(response_data); + + EXPECT_CALL(encoder_callbacks_, resetStream()); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, + filter_.encodeData(response_data, false)); +} + TEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryWithHttpBodyAsOutputAndSplitTwoEncodeData) { Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, {":path", "/index"}}; @@ -708,6 +776,7 @@ TEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryWithHttpBodyAsOutputAndSpli EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); EXPECT_EQ("application/grpc", request_headers.get_("content-type")); EXPECT_EQ("/index", request_headers.get_("x-envoy-original-path")); + EXPECT_EQ("GET", request_headers.get_("x-envoy-original-method")); EXPECT_EQ("/bookstore.Bookstore/GetIndex", request_headers.get_(":path")); EXPECT_EQ("trailers", request_headers.get_("te")); @@ -753,6 +822,7 @@ TEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryPostWithHttpBody) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); EXPECT_EQ("application/grpc", request_headers.get_("content-type")); EXPECT_EQ("/postBody?arg=hi", request_headers.get_("x-envoy-original-path")); + EXPECT_EQ("POST", request_headers.get_("x-envoy-original-method")); EXPECT_EQ("/bookstore.Bookstore/PostBody", request_headers.get_(":path")); EXPECT_EQ("trailers", request_headers.get_("te")); @@ -800,6 +870,7 @@ TEST_F(GrpcJsonTranscoderFilterTest, TranscodingStreamPostWithHttpBody) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); EXPECT_EQ("application/grpc", request_headers.get_("content-type")); EXPECT_EQ("/streamBody?arg=hi", request_headers.get_("x-envoy-original-path")); + EXPECT_EQ("POST", request_headers.get_("x-envoy-original-method")); EXPECT_EQ("/bookstore.Bookstore/StreamBody", request_headers.get_(":path")); EXPECT_EQ("trailers", request_headers.get_("te")); @@ -855,6 +926,7 @@ TEST_F(GrpcJsonTranscoderFilterTest, TranscodingStreamWithHttpBodyAsOutput) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); EXPECT_EQ("application/grpc", request_headers.get_("content-type")); EXPECT_EQ("/indexStream", request_headers.get_("x-envoy-original-path")); + EXPECT_EQ("GET", request_headers.get_("x-envoy-original-method")); EXPECT_EQ("/bookstore.Bookstore/GetIndexStream", request_headers.get_(":path")); EXPECT_EQ("trailers", request_headers.get_("te")); @@ -884,10 +956,57 @@ TEST_F(GrpcJsonTranscoderFilterTest, TranscodingStreamWithHttpBodyAsOutput) { EXPECT_EQ(nullptr, response_headers.ContentLength()); EXPECT_EQ(response.data(), response_data->toString()); + // "Send" 3rd multiframe message ("msgmsgmsg") + Buffer::OwnedImpl multiframe_data; + response.set_data("msg"); + for (size_t i = 0; i < 3; i++) { + auto frame = Grpc::Common::serializeToGrpcFrame(response); + multiframe_data.add(*frame); + } + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(multiframe_data, false)); + // 3 grpc frames joined + EXPECT_EQ("msgmsgmsg", multiframe_data.toString()); + Http::TestRequestTrailerMapImpl request_trailers; EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers)); } +TEST_F(GrpcJsonTranscoderFilterTest, TranscodingStreamWithFragmentedHttpBody) { + Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, {":path", "/indexStream"}}; + + EXPECT_CALL(decoder_callbacks_, clearRouteCache()); + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); + EXPECT_EQ("application/grpc", request_headers.get_("content-type")); + EXPECT_EQ("/indexStream", request_headers.get_("x-envoy-original-path")); + EXPECT_EQ("GET", request_headers.get_("x-envoy-original-method")); + EXPECT_EQ("/bookstore.Bookstore/GetIndexStream", request_headers.get_(":path")); + EXPECT_EQ("trailers", request_headers.get_("te")); + + Http::TestResponseHeaderMapImpl response_headers{{"content-type", "application/grpc"}, + {":status", "200"}}; + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_.encodeHeaders(response_headers, false)); + + // "Send" one fragmented gRPC frame + google::api::HttpBody http_body; + http_body.set_content_type("text/html"); + http_body.set_data("

Fragmented Message!

"); + auto fragment2 = Grpc::Common::serializeToGrpcFrame(http_body); + Buffer::OwnedImpl fragment1; + fragment1.move(*fragment2, fragment2->length() / 2); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_.encodeData(fragment1, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(*fragment2, false)); + + // Ensure that content-type is correct (taken from httpBody) + EXPECT_EQ("text/html", response_headers.get_("content-type")); + + // Fragment1 is buffered by transcoder + EXPECT_EQ(0, fragment1.length()); + // Second fragment contains entire body + EXPECT_EQ(http_body.data(), fragment2->toString()); +} + class GrpcJsonTranscoderFilterGrpcStatusTest : public GrpcJsonTranscoderFilterTest { public: GrpcJsonTranscoderFilterGrpcStatusTest( diff --git a/test/extensions/filters/http/grpc_stats/BUILD b/test/extensions/filters/http/grpc_stats/BUILD index 15dd7ab9aeff4..df0dd7b0f8774 100644 --- a/test/extensions/filters/http/grpc_stats/BUILD +++ b/test/extensions/filters/http/grpc_stats/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -20,7 +20,7 @@ envoy_extension_cc_test( "//source/extensions/filters/http/grpc_stats:config", "//test/common/buffer:utility_lib", "//test/common/stream_info:test_util", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:logging_lib", "@envoy_api//envoy/extensions/filters/http/grpc_stats/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/http/grpc_stats/config_test.cc b/test/extensions/filters/http/grpc_stats/config_test.cc index b75737b06cd28..68bf0bde27f0c 100644 --- a/test/extensions/filters/http/grpc_stats/config_test.cc +++ b/test/extensions/filters/http/grpc_stats/config_test.cc @@ -7,7 +7,7 @@ #include "test/common/buffer/utility.h" #include "test/common/stream_info/test_util.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/logging.h" #include "gmock/gmock.h" @@ -64,7 +64,7 @@ class GrpcStatsFilterConfigTest : public testing::Test { envoy::extensions::filters::http::grpc_stats::v3::FilterConfig config_; NiceMock context_; - std::shared_ptr filter_; + Http::StreamFilterSharedPtr filter_; NiceMock decoder_callbacks_; NiceMock stream_info_; NiceMock stats_store_; @@ -358,11 +358,18 @@ TEST_F(GrpcStatsFilterConfigTest, MessageCounts) { .counterFromString( "grpc.lyft.users.BadCompanions.GetBadCompanions.request_message_count") .value()); - EXPECT_EQ(0U, decoder_callbacks_.clusterInfo() - ->statsScope() - .counterFromString( - "grpc.lyft.users.BadCompanions.GetBadCompanions.response_message_count") - .value()); + + // Check that there is response_message_count stat yet. We use + // stats_store_.findCounterByString rather than looking on + // clusterInfo()->statsScope() because findCounterByString is not an API on + // Stats::Store, and there is no prefix so the names will match. We verify + // that by double-checking we can find the request_message_count using the + // same API. + EXPECT_FALSE(stats_store_.findCounterByString( + "grpc.lyft.users.BadCompanions.GetBadCompanions.response_message_count")); + EXPECT_TRUE(stats_store_.findCounterByString( + "grpc.lyft.users.BadCompanions.GetBadCompanions.request_message_count")); + const auto& data = stream_info_.filterState()->getDataReadOnly( HttpFilterNames::get().GrpcStats); EXPECT_EQ(2U, data.request_message_count); diff --git a/test/extensions/filters/http/grpc_web/BUILD b/test/extensions/filters/http/grpc_web/BUILD index 5c35c5c6c3574..cf5187b684554 100644 --- a/test/extensions/filters/http/grpc_web/BUILD +++ b/test/extensions/filters/http/grpc_web/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -31,7 +31,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.grpc_web", deps = [ "//source/extensions/filters/http/grpc_web:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", ], ) @@ -39,9 +39,11 @@ envoy_extension_cc_test( name = "grpc_web_integration_test", srcs = ["grpc_web_filter_integration_test.cc"], extension_name = "envoy.filters.http.grpc_web", + tags = ["fails_on_windows"], deps = [ "//source/common/buffer:buffer_lib", "//source/common/http:header_map_lib", + "//source/extensions/filters/http/grpc_web:config", "//source/extensions/filters/http/grpc_web:grpc_web_filter_lib", "//test/integration:http_integration_lib", "//test/mocks/upstream:upstream_mocks", diff --git a/test/extensions/filters/http/grpc_web/config_test.cc b/test/extensions/filters/http/grpc_web/config_test.cc index 76d1e435d4aba..0ad9da56c86f5 100644 --- a/test/extensions/filters/http/grpc_web/config_test.cc +++ b/test/extensions/filters/http/grpc_web/config_test.cc @@ -1,6 +1,6 @@ #include "extensions/filters/http/grpc_web/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/http/grpc_web/grpc_web_filter_integration_test.cc b/test/extensions/filters/http/grpc_web/grpc_web_filter_integration_test.cc index 773a5f0354332..2eaf4ef7fdb1d 100644 --- a/test/extensions/filters/http/grpc_web/grpc_web_filter_integration_test.cc +++ b/test/extensions/filters/http/grpc_web/grpc_web_filter_integration_test.cc @@ -9,20 +9,55 @@ namespace Envoy { namespace { -class GrpcWebFilterIntegrationTest : public ::testing::TestWithParam, +using SkipEncodingEmptyTrailers = bool; +using TestParams = + std::tuple; + +class GrpcWebFilterIntegrationTest : public testing::TestWithParam, public HttpIntegrationTest { public: GrpcWebFilterIntegrationTest() - : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {} + : HttpIntegrationTest(std::get<1>(GetParam()), std::get<0>(GetParam())) {} void SetUp() override { setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); config_helper_.addFilter("name: envoy.filters.http.grpc_web"); } + + void skipEncodingEmptyTrailers(SkipEncodingEmptyTrailers http2_skip_encoding_empty_trailers) { + config_helper_.addRuntimeOverride( + "envoy.reloadable_features.http2_skip_encoding_empty_trailers", + http2_skip_encoding_empty_trailers ? "true" : "false"); + } + + static std::string testParamsToString(const testing::TestParamInfo params) { + return fmt::format( + "{}_{}_{}", + TestUtility::ipTestParamsToString(testing::TestParamInfo( + std::get<0>(params.param), params.index)), + std::get<1>(params.param) == Http::CodecClient::Type::HTTP2 ? "Http2" : "Http", + std::get<2>(params.param) ? "SkipEncodingEmptyTrailers" : "SubmitEncodingEmptyTrailers"); + } }; -TEST_P(GrpcWebFilterIntegrationTest, GRPCWebTrailersNotDuplicated) { - config_helper_.addConfigModifier(setEnableDownstreamTrailersHttp1()); +INSTANTIATE_TEST_SUITE_P( + Params, GrpcWebFilterIntegrationTest, + testing::Combine( + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + testing::Values(Http::CodecClient::Type::HTTP1, Http::CodecClient::Type::HTTP2), + testing::Values(SkipEncodingEmptyTrailers{true}, SkipEncodingEmptyTrailers{false})), + GrpcWebFilterIntegrationTest::testParamsToString); + +TEST_P(GrpcWebFilterIntegrationTest, GrpcWebTrailersNotDuplicated) { + const auto downstream_protocol = std::get<1>(GetParam()); + const bool http2_skip_encoding_empty_trailers = std::get<2>(GetParam()); + + if (downstream_protocol == Http::CodecClient::Type::HTTP1) { + config_helper_.addConfigModifier(setEnableDownstreamTrailersHttp1()); + } else { + skipEncodingEmptyTrailers(http2_skip_encoding_empty_trailers); + } + setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); Http::TestRequestTrailerMapImpl request_trailers{{"request1", "trailer1"}, @@ -53,11 +88,26 @@ TEST_P(GrpcWebFilterIntegrationTest, GRPCWebTrailersNotDuplicated) { EXPECT_THAT(*upstream_request_->trailers(), HeaderMapEqualRef(&request_trailers)); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_TRUE(absl::StrContains(response->body(), "response1:trailer1")); EXPECT_TRUE(absl::StrContains(response->body(), "response2:trailer2")); - // Expect that the trailers be in the response-body instead - EXPECT_EQ(response->trailers(), nullptr); + + if (downstream_protocol == Http::CodecClient::Type::HTTP1) { + // When the downstream protocol is HTTP/1.1 we expect the trailers to be in the response-body. + EXPECT_EQ(nullptr, response->trailers()); + } + + if (downstream_protocol == Http::CodecClient::Type::HTTP2) { + if (http2_skip_encoding_empty_trailers) { + // When the downstream protocol is HTTP/2 and the feature-flag to skip encoding empty trailers + // is turned on, expect that the trailers are included in the response-body. + EXPECT_EQ(nullptr, response->trailers()); + } else { + // Otherwise, we send empty trailers. + ASSERT_NE(nullptr, response->trailers()); + EXPECT_TRUE(response->trailers()->empty()); + } + } } } // namespace diff --git a/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc b/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc index 8828f47c62c10..32f29094010dd 100644 --- a/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc +++ b/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc @@ -87,7 +87,7 @@ class GrpcWebFilterTest : public testing::TestWithParamvalue().getStringView(), &code)); + ASSERT_TRUE(absl::SimpleAtoi(headers.getStatusValue(), &code)); EXPECT_EQ(static_cast(expected_code), code); })); EXPECT_CALL(decoder_callbacks_, encodeData(_, _)) @@ -95,15 +95,13 @@ class GrpcWebFilterTest : public testing::TestWithParamvalue().getStringView()); + void expectRequiredGrpcUpstreamHeaders(const Http::TestRequestHeaderMapImpl& request_headers) { + EXPECT_EQ(Http::Headers::get().ContentTypeValues.Grpc, request_headers.getContentTypeValue()); // Ensure we never send content-length upstream EXPECT_EQ(nullptr, request_headers.ContentLength()); - EXPECT_EQ(Http::Headers::get().TEValues.Trailers, - request_headers.TE()->value().getStringView()); - EXPECT_EQ(Http::Headers::get().GrpcAcceptEncodingValues.Default, - request_headers.GrpcAcceptEncoding()->value().getStringView()); + EXPECT_EQ(Http::Headers::get().TEValues.Trailers, request_headers.getTEValue()); + EXPECT_EQ(Http::CustomHeaders::get().GrpcAcceptEncodingValues.Default, + request_headers.get_(Http::CustomHeaders::get().GrpcAcceptEncoding)); } Stats::TestSymbolTable symbol_table_; @@ -114,6 +112,7 @@ class GrpcWebFilterTest : public testing::TestWithParamvalue().getStringView()); + EXPECT_EQ(Http::Headers::get().ContentTypeValues.Grpc, request_headers.getContentTypeValue()); } } TEST_F(GrpcWebFilterTest, UnsupportedContentType) { Buffer::OwnedImpl data; - Http::TestRequestHeaderMapImpl request_headers; - request_headers.addCopy(Http::Headers::get().ContentType, "unsupported"); - EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); + request_headers_.addCopy(Http::Headers::get().ContentType, "unsupported"); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers_, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_)); @@ -148,7 +145,18 @@ TEST_F(GrpcWebFilterTest, UnsupportedContentType) { TEST_F(GrpcWebFilterTest, NoContentType) { Buffer::OwnedImpl data; - Http::TestRequestHeaderMapImpl request_headers; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_)); + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_)); +} + +TEST_F(GrpcWebFilterTest, NoPath) { + Http::TestRequestHeaderMapImpl request_headers{}; + Buffer::OwnedImpl data; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_)); @@ -159,12 +167,11 @@ TEST_F(GrpcWebFilterTest, NoContentType) { } TEST_F(GrpcWebFilterTest, InvalidBase64) { - Http::TestRequestHeaderMapImpl request_headers; - request_headers.addCopy(Http::Headers::get().ContentType, - Http::Headers::get().ContentTypeValues.GrpcWebText); + request_headers_.addCopy(Http::Headers::get().ContentType, + Http::Headers::get().ContentTypeValues.GrpcWebText); expectErrorResponse(Http::Code::BadRequest, "Bad gRPC-web request, invalid base64 data."); - EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); - expectRequiredGrpcUpstreamHeaders(request_headers); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers_, false)); + expectRequiredGrpcUpstreamHeaders(request_headers_); Buffer::OwnedImpl request_buffer; Buffer::OwnedImpl decoded_buffer; @@ -175,12 +182,11 @@ TEST_F(GrpcWebFilterTest, InvalidBase64) { } TEST_F(GrpcWebFilterTest, Base64NoPadding) { - Http::TestRequestHeaderMapImpl request_headers; - request_headers.addCopy(Http::Headers::get().ContentType, - Http::Headers::get().ContentTypeValues.GrpcWebText); + request_headers_.addCopy(Http::Headers::get().ContentType, + Http::Headers::get().ContentTypeValues.GrpcWebText); expectErrorResponse(Http::Code::BadRequest, "Bad gRPC-web request, invalid base64 data."); - EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); - expectRequiredGrpcUpstreamHeaders(request_headers); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers_, false)); + expectRequiredGrpcUpstreamHeaders(request_headers_); Buffer::OwnedImpl request_buffer; Buffer::OwnedImpl decoded_buffer; @@ -254,13 +260,12 @@ TEST_P(GrpcWebFilterTest, StatsErrorResponse) { TEST_P(GrpcWebFilterTest, Unary) { // Tests request headers. - Http::TestRequestHeaderMapImpl request_headers; - request_headers.addCopy(Http::Headers::get().ContentType, request_content_type()); - request_headers.addCopy(Http::Headers::get().Accept, request_accept()); - request_headers.addCopy(Http::Headers::get().ContentLength, uint64_t(8)); + request_headers_.addCopy(Http::Headers::get().ContentType, request_content_type()); + request_headers_.addCopy(Http::CustomHeaders::get().Accept, request_accept()); + request_headers_.addCopy(Http::Headers::get().ContentLength, uint64_t(8)); - EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); - expectRequiredGrpcUpstreamHeaders(request_headers); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers_, false)); + expectRequiredGrpcUpstreamHeaders(request_headers_); // Tests request data. if (isBinaryRequest()) { @@ -310,10 +315,10 @@ TEST_P(GrpcWebFilterTest, Unary) { EXPECT_EQ("200", response_headers.get_(Http::Headers::get().Status.get())); if (accept_binary_response()) { EXPECT_EQ(Http::Headers::get().ContentTypeValues.GrpcWebProto, - response_headers.ContentType()->value().getStringView()); + response_headers.getContentTypeValue()); } else if (accept_text_response()) { EXPECT_EQ(Http::Headers::get().ContentTypeValues.GrpcWebTextProto, - response_headers.ContentType()->value().getStringView()); + response_headers.getContentTypeValue()); } else { FAIL() << "Unsupported gRPC-Web request accept: " << request_accept(); } @@ -344,7 +349,7 @@ TEST_P(GrpcWebFilterTest, Unary) { EXPECT_EQ(std::string(B64_MESSAGE, B64_MESSAGE_SIZE), encoded_buffer.toString()); } else { FAIL() << "Unsupported gRPC-Web response content-type: " - << response_headers.ContentType()->value().getStringView(); + << response_headers.getContentTypeValue(); } // Tests response trailers. @@ -361,7 +366,7 @@ TEST_P(GrpcWebFilterTest, Unary) { EXPECT_EQ(std::string(TRAILERS, TRAILERS_SIZE), Base64::decode(trailers_buffer.toString())); } else { FAIL() << "Unsupported gRPC-Web response content-type: " - << response_headers.ContentType()->value().getStringView(); + << response_headers.getContentTypeValue(); } EXPECT_EQ(0, response_trailers.size()); } diff --git a/test/extensions/filters/http/gzip/BUILD b/test/extensions/filters/http/gzip/BUILD index 72941b500b9fa..28f6d0d14792a 100644 --- a/test/extensions/filters/http/gzip/BUILD +++ b/test/extensions/filters/http/gzip/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -16,13 +16,15 @@ envoy_extension_cc_test( srcs = ["gzip_filter_test.cc"], extension_name = "envoy.filters.http.gzip", deps = [ - "//source/common/compressor:compressor_lib", - "//source/common/decompressor:decompressor_lib", "//source/common/protobuf:utility_lib", + "//source/extensions/compression/gzip/compressor:compressor_lib", + "//source/extensions/compression/gzip/decompressor:zlib_decompressor_impl_lib", "//source/extensions/filters/http/gzip:config", "//source/extensions/filters/http/gzip:gzip_filter_lib", "//test/mocks/http:http_mocks", "//test/mocks/runtime:runtime_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/test_common:logging_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/http/gzip/v3:pkg_cc_proto", ], @@ -34,8 +36,9 @@ envoy_extension_cc_test( "gzip_filter_integration_test.cc", ], extension_name = "envoy.filters.http.gzip", + tags = ["fails_on_windows"], deps = [ - "//source/common/decompressor:decompressor_lib", + "//source/extensions/compression/gzip/decompressor:zlib_decompressor_impl_lib", "//source/extensions/filters/http/gzip:config", "//test/integration:http_integration_lib", "//test/test_common:simulated_time_system_lib", diff --git a/test/extensions/filters/http/gzip/gzip_filter_integration_test.cc b/test/extensions/filters/http/gzip/gzip_filter_integration_test.cc index 83ecbb2c22ad3..8996e2aa06845 100644 --- a/test/extensions/filters/http/gzip/gzip_filter_integration_test.cc +++ b/test/extensions/filters/http/gzip/gzip_filter_integration_test.cc @@ -1,6 +1,6 @@ #include "envoy/event/timer.h" -#include "common/decompressor/zlib_decompressor_impl.h" +#include "extensions/compression/gzip/decompressor/zlib_decompressor_impl.h" #include "test/integration/http_integration.h" #include "test/test_common/simulated_time_system.h" @@ -25,8 +25,8 @@ class GzipIntegrationTest : public testing::TestWithParamcomplete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); - ASSERT_TRUE(response->headers().ContentEncoding() != nullptr); - EXPECT_EQ(Http::Headers::get().ContentEncodingValues.Gzip, - response->headers().ContentEncoding()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); + ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding) != nullptr); + EXPECT_EQ(Http::CustomHeaders::get().ContentEncodingValues.Gzip, + response->headers() + .get(Http::CustomHeaders::get().ContentEncoding) + ->value() + .getStringView()); ASSERT_TRUE(response->headers().TransferEncoding() != nullptr); EXPECT_EQ(Http::Headers::get().TransferEncodingValues.Chunked, - response->headers().TransferEncoding()->value().getStringView()); + response->headers().getTransferEncodingValue()); Buffer::OwnedImpl decompressed_response{}; const Buffer::OwnedImpl compressed_response{response->body()}; @@ -50,8 +53,8 @@ class GzipIntegrationTest : public testing::TestWithParamcomplete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); - ASSERT_TRUE(response->headers().ContentEncoding() == nullptr); + EXPECT_EQ("200", response->headers().getStatusValue()); + ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding) == nullptr); ASSERT_EQ(content_length, response->body().size()); EXPECT_EQ(response->body(), std::string(content_length, 'a')); } @@ -100,7 +103,9 @@ class GzipIntegrationTest : public testing::TestWithParamcomplete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); - ASSERT_EQ("br", response->headers().ContentEncoding()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); + ASSERT_EQ( + "br", + response->headers().get(Http::CustomHeaders::get().ContentEncoding)->value().getStringView()); EXPECT_EQ(128U, response->body().size()); } /** * Exercises filter when upstream responds with content length below the default threshold. */ -TEST_P(GzipIntegrationTest, NotEnoughContentLength) { +TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(NotEnoughContentLength)) { initializeFilter(default_config); Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, {":path", "/test/long/url"}, @@ -227,15 +234,15 @@ TEST_P(GzipIntegrationTest, NotEnoughContentLength) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); - ASSERT_TRUE(response->headers().ContentEncoding() == nullptr); + EXPECT_EQ("200", response->headers().getStatusValue()); + ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding) == nullptr); EXPECT_EQ(10U, response->body().size()); } /** * Exercises filter when response from upstream service is empty. */ -TEST_P(GzipIntegrationTest, EmptyResponse) { +TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(EmptyResponse)) { initializeFilter(default_config); Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, {":path", "/test/long/url"}, @@ -250,15 +257,15 @@ TEST_P(GzipIntegrationTest, EmptyResponse) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("204", response->headers().Status()->value().getStringView()); - ASSERT_TRUE(response->headers().ContentEncoding() == nullptr); + EXPECT_EQ("204", response->headers().getStatusValue()); + ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding) == nullptr); EXPECT_EQ(0U, response->body().size()); } /** * Exercises filter when upstream responds with restricted content-type value. */ -TEST_P(GzipIntegrationTest, SkipOnContentType) { +TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(SkipOnContentType)) { initializeFilter(full_config); doRequestAndNoCompression(Http::TestRequestHeaderMapImpl{{":method", "GET"}, {":path", "/test/long/url"}, @@ -273,7 +280,7 @@ TEST_P(GzipIntegrationTest, SkipOnContentType) { /** * Exercises filter when upstream responds with restricted cache-control value. */ -TEST_P(GzipIntegrationTest, SkipOnCacheControl) { +TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(SkipOnCacheControl)) { initializeFilter(full_config); doRequestAndNoCompression(Http::TestRequestHeaderMapImpl{{":method", "GET"}, {":path", "/test/long/url"}, @@ -289,7 +296,7 @@ TEST_P(GzipIntegrationTest, SkipOnCacheControl) { /** * Exercises gzip compression when upstream returns a chunked response. */ -TEST_P(GzipIntegrationTest, AcceptanceFullConfigChunkedResponse) { +TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(AcceptanceFullConfigChunkedResponse)) { initializeFilter(full_config); Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, {":path", "/test/long/url"}, @@ -305,15 +312,17 @@ TEST_P(GzipIntegrationTest, AcceptanceFullConfigChunkedResponse) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); - ASSERT_EQ("gzip", response->headers().ContentEncoding()->value().getStringView()); - ASSERT_EQ("chunked", response->headers().TransferEncoding()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); + ASSERT_EQ( + "gzip", + response->headers().get(Http::CustomHeaders::get().ContentEncoding)->value().getStringView()); + ASSERT_EQ("chunked", response->headers().getTransferEncodingValue()); } /** * Verify Vary header values are preserved. */ -TEST_P(GzipIntegrationTest, AcceptanceFullConfigVeryHeader) { +TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(AcceptanceFullConfigVaryHeader)) { initializeFilter(default_config); Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, {":path", "/test/long/url"}, @@ -329,8 +338,11 @@ TEST_P(GzipIntegrationTest, AcceptanceFullConfigVeryHeader) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); - ASSERT_EQ("gzip", response->headers().ContentEncoding()->value().getStringView()); - ASSERT_EQ("Cookie, Accept-Encoding", response->headers().Vary()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); + ASSERT_EQ( + "gzip", + response->headers().get(Http::CustomHeaders::get().ContentEncoding)->value().getStringView()); + ASSERT_EQ("Cookie, Accept-Encoding", + response->headers().get(Http::CustomHeaders::get().Vary)->value().getStringView()); } } // namespace Envoy diff --git a/test/extensions/filters/http/gzip/gzip_filter_test.cc b/test/extensions/filters/http/gzip/gzip_filter_test.cc index 24f715b569e7f..7f92d1c06e46e 100644 --- a/test/extensions/filters/http/gzip/gzip_filter_test.cc +++ b/test/extensions/filters/http/gzip/gzip_filter_test.cc @@ -3,15 +3,18 @@ #include "envoy/extensions/filters/http/gzip/v3/gzip.pb.h" #include "common/common/hex.h" -#include "common/compressor/zlib_compressor_impl.h" -#include "common/decompressor/zlib_decompressor_impl.h" #include "common/protobuf/utility.h" +#include "extensions/compression/gzip/compressor/zlib_compressor_impl.h" +#include "extensions/compression/gzip/decompressor/zlib_decompressor_impl.h" +#include "extensions/filters/http/gzip/config.h" #include "extensions/filters/http/gzip/gzip_filter.h" #include "test/mocks/http/mocks.h" #include "test/mocks/runtime/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/mocks/stats/mocks.h" +#include "test/test_common/logging.h" #include "test/test_common/utility.h" #include "absl/container/fixed_array.h" @@ -78,7 +81,8 @@ class GzipFilterTest : public testing::Test { feedBuffer(content_length); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false)); EXPECT_EQ("", headers.get_("content-length")); - EXPECT_EQ(Http::Headers::get().ContentEncodingValues.Gzip, headers.get_("content-encoding")); + EXPECT_EQ(Http::CustomHeaders::get().ContentEncodingValues.Gzip, + headers.get_("content-encoding")); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(data_, !with_trailers)); if (with_trailers) { Buffer::OwnedImpl trailers_buffer; @@ -120,8 +124,10 @@ class GzipFilterTest : public testing::Test { } void expectValidCompressionStrategyAndLevel( - Compressor::ZlibCompressorImpl::CompressionStrategy strategy, absl::string_view strategy_name, - Compressor::ZlibCompressorImpl::CompressionLevel level, absl::string_view level_name) { + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy strategy, + absl::string_view strategy_name, + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel level, + absl::string_view level_name) { setUpFilter(fmt::format(R"EOF({{"compression_strategy": "{}", "compression_level": "{}"}})EOF", strategy_name, level_name)); EXPECT_EQ(strategy, config_->compressionStrategy()); @@ -154,7 +160,8 @@ class GzipFilterTest : public testing::Test { std::shared_ptr config_; std::unique_ptr filter_; Buffer::OwnedImpl data_; - Decompressor::ZlibDecompressorImpl decompressor_; + Stats::IsolatedStoreImpl stats_store_; + Compression::Gzip::Decompressor::ZlibDecompressorImpl decompressor_{stats_store_, "test"}; Buffer::OwnedImpl decompressed_data_; std::string expected_str_; Stats::TestUtil::TestStore stats_; @@ -189,26 +196,26 @@ TEST_F(GzipFilterTest, DefaultConfigValues) { EXPECT_EQ(28, config_->windowBits()); EXPECT_EQ(false, config_->disableOnEtagHeader()); EXPECT_EQ(false, config_->removeAcceptEncodingHeader()); - EXPECT_EQ(Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, + EXPECT_EQ(Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, config_->compressionStrategy()); - EXPECT_EQ(Compressor::ZlibCompressorImpl::CompressionLevel::Standard, + EXPECT_EQ(Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, config_->compressionLevel()); EXPECT_EQ(18, config_->contentTypeValues().size()); } TEST_F(GzipFilterTest, AvailableCombinationCompressionStrategyAndLevelConfig) { expectValidCompressionStrategyAndLevel( - Compressor::ZlibCompressorImpl::CompressionStrategy::Filtered, "FILTERED", - Compressor::ZlibCompressorImpl::CompressionLevel::Best, "BEST"); + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Filtered, "FILTERED", + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Best, "BEST"); expectValidCompressionStrategyAndLevel( - Compressor::ZlibCompressorImpl::CompressionStrategy::Huffman, "HUFFMAN", - Compressor::ZlibCompressorImpl::CompressionLevel::Best, "BEST"); + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Huffman, "HUFFMAN", + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Best, "BEST"); expectValidCompressionStrategyAndLevel( - Compressor::ZlibCompressorImpl::CompressionStrategy::Rle, "RLE", - Compressor::ZlibCompressorImpl::CompressionLevel::Speed, "SPEED"); + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Rle, "RLE", + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Speed, "SPEED"); expectValidCompressionStrategyAndLevel( - Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, "DEFAULT", - Compressor::ZlibCompressorImpl::CompressionLevel::Standard, "DEFAULT"); + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, "DEFAULT", + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, "DEFAULT"); } // Acceptance Testing with default configuration. @@ -407,6 +414,21 @@ TEST_F(GzipFilterTest, RemoveAcceptEncodingHeader) { } } +// Test setting zlib's chunk size. +TEST_F(GzipFilterTest, ChunkSize) { + // Default + setUpFilter("{}"); + EXPECT_EQ(config_->chunkSize(), 4096); + + // Override + setUpFilter(R"EOF( +{ + "chunk_size": 8192 +} +)EOF"); + EXPECT_EQ(config_->chunkSize(), 8192); +} + // Test that the deprecated extension name still functions. TEST(GzipFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.gzip"; @@ -417,6 +439,39 @@ TEST(GzipFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName deprecated_name)); } +// Test that the deprecated extension triggers an exception. +TEST(GzipFilterFactoryTest, DEPRECATED_FEATURE_TEST(TestCheckDeprecatedExtensionThrows)) { + NiceMock context; + GzipFilterFactory factory; + envoy::extensions::filters::http::gzip::v3::Gzip config; + + EXPECT_CALL( + context.runtime_loader_.snapshot_, + deprecatedFeatureEnabled("envoy.deprecated_features.allow_deprecated_gzip_http_filter", _)) + .WillRepeatedly(Return(false)); + + EXPECT_THROW_WITH_REGEX(factory.createFilterFactoryFromProto(config, "stats.", context), + EnvoyException, + "Using deprecated extension 'envoy.extensions.filters.http.gzip'.*"); +} + +// Test that the deprecated extension gives a deprecation warning. +TEST(GzipFilterFactoryTest, DEPRECATED_FEATURE_TEST(TestCheckDeprecatedExtensionWarns)) { + NiceMock context; + GzipFilterFactory factory; + envoy::extensions::filters::http::gzip::v3::Gzip config; + + EXPECT_CALL( + context.runtime_loader_.snapshot_, + deprecatedFeatureEnabled("envoy.deprecated_features.allow_deprecated_gzip_http_filter", _)) + .WillRepeatedly(Return(true)); + + EXPECT_NO_THROW(factory.createFilterFactoryFromProto(config, "stats.", context)); + + EXPECT_LOG_CONTAINS("warn", "Using deprecated extension 'envoy.extensions.filters.http.gzip'.", + factory.createFilterFactoryFromProto(config, "stats.", context)); +} + } // namespace Gzip } // namespace HttpFilters } // namespace Extensions diff --git a/test/extensions/filters/http/header_to_metadata/BUILD b/test/extensions/filters/http/header_to_metadata/BUILD index 20e85fba58cef..9b976fe7f7724 100644 --- a/test/extensions/filters/http/header_to_metadata/BUILD +++ b/test/extensions/filters/http/header_to_metadata/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -29,7 +29,8 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.header_to_metadata", deps = [ "//source/extensions/filters/http/header_to_metadata:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/http/header_to_metadata/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/http/header_to_metadata/config_test.cc b/test/extensions/filters/http/header_to_metadata/config_test.cc index 861e4ee545a76..3b7771b4ce231 100644 --- a/test/extensions/filters/http/header_to_metadata/config_test.cc +++ b/test/extensions/filters/http/header_to_metadata/config_test.cc @@ -6,7 +6,8 @@ #include "extensions/filters/http/header_to_metadata/config.h" #include "extensions/filters/http/header_to_metadata/header_to_metadata_filter.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/server/instance.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -20,20 +21,37 @@ namespace HeaderToMetadataFilter { using HeaderToMetadataProtoConfig = envoy::extensions::filters::http::header_to_metadata::v3::Config; -TEST(HeaderToMetadataFilterConfigTest, InvalidEmptyHeader) { +void testForbiddenConfig(const std::string& yaml) { + HeaderToMetadataProtoConfig proto_config; + TestUtility::loadFromYamlAndValidate(yaml, proto_config); + + testing::NiceMock context; + HeaderToMetadataConfig factory; + + EXPECT_THROW(factory.createFilterFactoryFromProto(proto_config, "stats", context), + EnvoyException); +} + +// Tests that empty (metadata) keys are rejected. +TEST(HeaderToMetadataFilterConfigTest, InvalidEmptyKey) { const std::string yaml = R"EOF( request_rules: -- header: "" + - header: x-version + on_header_present: + metadata_namespace: envoy.lb + key: "" + type: STRING )EOF"; HeaderToMetadataProtoConfig proto_config; EXPECT_THROW(TestUtility::loadFromYamlAndValidate(yaml, proto_config), ProtoValidationException); } -TEST(HeaderToMetadataFilterConfigTest, InvalidEmptyKey) { +// Tests that empty (metadata) keys are rejected in case of cookie. +TEST(HeaderToMetadataFilterConfigTest, InvalidEmptyCookieKey) { const std::string yaml = R"EOF( request_rules: - - header: x-version + - cookie: x-cookie on_header_present: metadata_namespace: envoy.lb key: "" @@ -44,6 +62,7 @@ TEST(HeaderToMetadataFilterConfigTest, InvalidEmptyKey) { EXPECT_THROW(TestUtility::loadFromYamlAndValidate(yaml, proto_config), ProtoValidationException); } +// Tests that a valid config with header is properly consumed. TEST(HeaderToMetadataFilterConfigTest, SimpleConfig) { const std::string yaml = R"EOF( request_rules: @@ -71,6 +90,35 @@ TEST(HeaderToMetadataFilterConfigTest, SimpleConfig) { cb(filter_callbacks); } +// Tests that a valid config with cookie is properly consumed. +TEST(HeaderToMetadataFilterConfigTest, SimpleCookieConfig) { + const std::string yaml = R"EOF( +request_rules: + - cookie: x-cookie + on_header_present: + metadata_namespace: envoy.lb + key: version1 + type: STRING + on_header_missing: + metadata_namespace: envoy.lb + key: default + value: 'true' + type: STRING + )EOF"; + + HeaderToMetadataProtoConfig proto_config; + TestUtility::loadFromYamlAndValidate(yaml, proto_config); + + testing::NiceMock context; + HeaderToMetadataConfig factory; + + Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, "stats", context); + Http::MockFilterChainFactoryCallbacks filter_callbacks; + EXPECT_CALL(filter_callbacks, addStreamFilter(_)); + cb(filter_callbacks); +} + +// Tests that per route config properly overrides the global config. TEST(HeaderToMetadataFilterConfigTest, PerRouteConfig) { const std::string yaml = R"EOF( request_rules: @@ -99,6 +147,72 @@ TEST(HeaderToMetadataFilterConfigTest, PerRouteConfig) { EXPECT_FALSE(config->doResponse()); } +// Tests that configuration does not allow value and regex_value_rewrite in the same rule. +TEST(HeaderToMetadataFilterConfigTest, ValueAndRegex) { + const std::string yaml = R"EOF( +request_rules: + - header: x-version + on_header_present: + metadata_namespace: envoy.lb + key: cluster + value: foo + regex_value_rewrite: + pattern: + google_re2: {} + regex: "^/(cluster[\\d\\w-]+)/?.*$" + substitution: "\\1" + )EOF"; + + testForbiddenConfig(yaml); +} + +// Tests that cookie configuration does not allow value and regex_value_rewrite in the same rule. +TEST(HeaderToMetadataFilterConfigTest, CookieValueAndRegex) { + const std::string yaml = R"EOF( +request_rules: + - cookie: x-cookie + on_header_present: + metadata_namespace: envoy.lb + key: cluster + value: foo + regex_value_rewrite: + pattern: + google_re2: {} + regex: "^/(cluster[\\d\\w-]+)/?.*$" + substitution: "\\1" + )EOF"; + + testForbiddenConfig(yaml); +} + +// Tests that on_header_missing rules don't allow an empty value. +TEST(HeaderToMetadataFilterConfigTest, OnHeaderMissingEmptyValue) { + const std::string yaml = R"EOF( +request_rules: + - header: x-version + on_header_missing: + metadata_namespace: envoy.lb + key: "foo" + type: STRING + )EOF"; + + testForbiddenConfig(yaml); +} + +// Tests that on_header_missing rules don't allow an empty cookie value. +TEST(HeaderToMetadataFilterConfigTest, CookieOnHeaderMissingEmptyValue) { + const std::string yaml = R"EOF( +request_rules: + - cookie: x-cookie + on_header_missing: + metadata_namespace: envoy.lb + key: "foo" + type: STRING + )EOF"; + + testForbiddenConfig(yaml); +} + } // namespace HeaderToMetadataFilter } // namespace HttpFilters } // namespace Extensions diff --git a/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc b/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc index ad3ee7af60edb..fc9c81431ae1b 100644 --- a/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc +++ b/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc @@ -181,7 +181,7 @@ TEST_F(HeaderToMetadataTest, HeaderRemovedTest) { initializeFilter(response_config_yaml); Http::TestResponseHeaderMapImpl incoming_headers{{"x-authenticated", "1"}}; std::map expected = {{"auth", "1"}}; - Http::TestHeaderMapImpl empty_headers; + Http::TestResponseHeaderMapImpl empty_headers; EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); EXPECT_CALL(req_info_, @@ -213,7 +213,7 @@ TEST_F(HeaderToMetadataTest, NumberTypeTest) { initializeFilter(response_config_yaml); Http::TestResponseHeaderMapImpl incoming_headers{{"x-authenticated", "1"}}; std::map expected = {{"auth", 1}}; - Http::TestHeaderMapImpl empty_headers; + Http::TestResponseHeaderMapImpl empty_headers; EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); EXPECT_CALL(req_info_, @@ -238,7 +238,7 @@ TEST_F(HeaderToMetadataTest, StringTypeInBase64UrlTest) { const auto encoded = Base64::encode(data.c_str(), data.size()); Http::TestResponseHeaderMapImpl incoming_headers{{"x-authenticated", encoded}}; std::map expected = {{"auth", data}}; - Http::TestHeaderMapImpl empty_headers; + Http::TestResponseHeaderMapImpl empty_headers; EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); EXPECT_CALL(req_info_, @@ -416,7 +416,7 @@ TEST_F(HeaderToMetadataTest, IgnoreHeaderValueUseConstant) { initializeFilter(response_config_yaml); Http::TestResponseHeaderMapImpl incoming_headers{{"x-something", "thing"}}; std::map expected = {{"something", "else"}}; - Http::TestHeaderMapImpl empty_headers; + Http::TestResponseHeaderMapImpl empty_headers; EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); EXPECT_CALL(req_info_, @@ -435,7 +435,7 @@ TEST_F(HeaderToMetadataTest, RejectInvalidRule) { )EOF"; auto expected = "header to metadata filter: rule for header 'x-something' has neither " "`on_header_present` nor `on_header_missing` set"; - EXPECT_THROW_WITH_MESSAGE(initializeFilter(config), Envoy::EnvoyException, expected); + EXPECT_THROW_WITH_MESSAGE(initializeFilter(config), EnvoyException, expected); } TEST_F(HeaderToMetadataTest, PerRouteEmtpyRules) { @@ -443,6 +443,55 @@ TEST_F(HeaderToMetadataTest, PerRouteEmtpyRules) { EXPECT_THROW(std::make_shared(config_proto, true), EnvoyException); } +/** + * Invalid empty header or cookie should be rejected. + */ +TEST_F(HeaderToMetadataTest, RejectEmptyHeader) { + const std::string config = R"EOF( +request_rules: + - header: "" + +)EOF"; + auto expected = "One of Cookie or Header option needs to be specified"; + EXPECT_THROW_WITH_MESSAGE(initializeFilter(config), EnvoyException, expected); +} + +/** + * Rules with both header and cookie fields should be rejected. + */ +TEST_F(HeaderToMetadataTest, RejectBothCookieHeader) { + const std::string config = R"EOF( +request_rules: + - header: x-something + cookie: something-else + on_header_present: + key: something + value: else + type: STRING + remove: false + +)EOF"; + auto expected = "Cannot specify both header and cookie"; + EXPECT_THROW_WITH_MESSAGE(initializeFilter(config), EnvoyException, expected); +} + +/** + * Rules with remove field should be rejected in case of a cookie. + */ +TEST_F(HeaderToMetadataTest, RejectRemoveForCookie) { + const std::string config = R"EOF( +request_rules: + - cookie: cookie + on_header_present: + metadata_namespace: envoy.lb + key: version + type: STRING + remove: true +)EOF"; + auto expected = "Cannot specify remove for cookie"; + EXPECT_THROW_WITH_MESSAGE(initializeFilter(config), EnvoyException, expected); +} + /** * Empty values not added to metadata. */ @@ -463,6 +512,243 @@ TEST_F(HeaderToMetadataTest, NoEmptyValues) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); } +/** + * Regex substitution on header value. + */ +TEST_F(HeaderToMetadataTest, RegexSubstitution) { + const std::string config = R"EOF( +request_rules: + - header: :path + on_header_present: + metadata_namespace: envoy.lb + key: cluster + regex_value_rewrite: + pattern: + google_re2: {} + regex: "^/(cluster[\\d\\w-]+)/?.*$" + substitution: "\\1" +)EOF"; + initializeFilter(config); + + // Match with additional path elements. + { + Http::TestRequestHeaderMapImpl headers{{":path", "/cluster-prod-001/x/y"}}; + std::map expected = {{"cluster", "cluster-prod-001"}}; + + EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); + EXPECT_CALL(req_info_, setDynamicMetadata("envoy.lb", MapEq(expected))); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); + } + + // Match with no additional path elements. + { + Http::TestRequestHeaderMapImpl headers{{":path", "/cluster-prod-001"}}; + std::map expected = {{"cluster", "cluster-prod-001"}}; + + EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); + EXPECT_CALL(req_info_, setDynamicMetadata("envoy.lb", MapEq(expected))); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); + } + + // No match. + { + Http::TestRequestHeaderMapImpl headers{{":path", "/foo"}}; + std::map expected = {{"cluster", "/foo"}}; + + EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); + EXPECT_CALL(req_info_, setDynamicMetadata("envoy.lb", MapEq(expected))); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); + } + + // No match with additional path elements. + { + Http::TestRequestHeaderMapImpl headers{{":path", "/foo/bar?x=2"}}; + std::map expected = {{"cluster", "/foo/bar?x=2"}}; + + EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); + EXPECT_CALL(req_info_, setDynamicMetadata("envoy.lb", MapEq(expected))); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); + } +} + +/** + * Missing case is not executed when header is present. + */ +TEST_F(HeaderToMetadataTest, NoMissingWhenHeaderIsPresent) { + const std::string config = R"EOF( +request_rules: + - header: x-version + on_header_missing: + metadata_namespace: envoy.lb + key: version + value: some_value + type: STRING +)EOF"; + initializeFilter(config); + Http::TestRequestHeaderMapImpl headers{{"x-version", "19"}}; + + EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); + EXPECT_CALL(req_info_, setDynamicMetadata(_, _)).Times(0); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); +} + +/** + * on header missing case with no header data + */ + +TEST_F(HeaderToMetadataTest, OnMissingWhenHeaderIsPresent) { + const std::string config = R"EOF( +request_rules: + - header: x-version + on_header_missing: + metadata_namespace: envoy.lb + key: version + value: some_value + type: STRING +)EOF"; + initializeFilter(config); + Http::TestRequestHeaderMapImpl headers{{"x-version", ""}}; + + EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); + EXPECT_CALL(req_info_, setDynamicMetadata(_, _)).Times(0); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); +} + +/** + * on header present case, when the regex replacement turns the header into an empty string + */ +TEST_F(HeaderToMetadataTest, HeaderIsPresentButRegexEmptiesIt) { + const std::string config = R"EOF( +request_rules: + - header: x-version + on_header_present: + metadata_namespace: envoy.lb + key: cluster + regex_value_rewrite: + pattern: + google_re2: {} + regex: "^foo" + substitution: "" + on_header_missing: + metadata_namespace: envoy.lb + key: version + value: some_value + type: STRING +)EOF"; + initializeFilter(config); + Http::TestRequestHeaderMapImpl headers{{"x-version", "foo"}}; + + EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); + EXPECT_CALL(req_info_, setDynamicMetadata(_, _)).Times(0); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); +} + +/** + * cookie value extracted and stored + */ +TEST_F(HeaderToMetadataTest, CookieValueUsed) { + const std::string response_config_yaml = R"EOF( +response_rules: + - cookie: bar + on_header_present: + key: bar + type: STRING + remove: false +)EOF"; + initializeFilter(response_config_yaml); + Http::TestResponseHeaderMapImpl incoming_headers{{"cookie", "bar=foo"}}; + std::map expected = {{"bar", "foo"}}; + + EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); + EXPECT_CALL(req_info_, + setDynamicMetadata(HttpFilterNames::get().HeaderToMetadata, MapEq(expected))); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(incoming_headers, false)); +} + +/** + * Ignore the cookie's value, use a given constant value. + */ +TEST_F(HeaderToMetadataTest, IgnoreCookieValueUseConstant) { + const std::string response_config_yaml = R"EOF( +response_rules: + - cookie: meh + on_header_present: + key: meh + value: some_value + type: STRING + remove: false +)EOF"; + initializeFilter(response_config_yaml); + Http::TestResponseHeaderMapImpl incoming_headers{{"cookie", "meh=foo"}}; + std::map expected = {{"meh", "some_value"}}; + + EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); + EXPECT_CALL(req_info_, + setDynamicMetadata(HttpFilterNames::get().HeaderToMetadata, MapEq(expected))); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(incoming_headers, false)); +} + +/** + * No cookie value, no metadata + */ +TEST_F(HeaderToMetadataTest, NoCookieValue) { + const std::string config = R"EOF( +request_rules: + - cookie: foo + on_header_missing: + metadata_namespace: envoy.lb + key: foo + value: some_value + type: STRING +)EOF"; + initializeFilter(config); + Http::TestRequestHeaderMapImpl headers{{"cookie", ""}}; + std::map expected = {{"foo", "some_value"}}; + + EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); + EXPECT_CALL(req_info_, setDynamicMetadata("envoy.lb", MapEq(expected))); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); +} + +/** + * Regex substitution on cookie value. + */ +TEST_F(HeaderToMetadataTest, CookieRegexSubstitution) { + const std::string config = R"EOF( +request_rules: + - cookie: foo + on_header_present: + metadata_namespace: envoy.lb + key: cluster + regex_value_rewrite: + pattern: + google_re2: {} + regex: "^(cluster[\\d\\w-]+)$" + substitution: "\\1 matched" +)EOF"; + initializeFilter(config); + + // match. + { + Http::TestRequestHeaderMapImpl headers{{"cookie", "foo=cluster-prod-001"}}; + std::map expected = {{"cluster", "cluster-prod-001 matched"}}; + + EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); + EXPECT_CALL(req_info_, setDynamicMetadata("envoy.lb", MapEq(expected))); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); + } + + // No match. + { + Http::TestRequestHeaderMapImpl headers{{"cookie", "foo=cluster"}}; + std::map expected = {{"cluster", "cluster"}}; + + EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); + EXPECT_CALL(req_info_, setDynamicMetadata("envoy.lb", MapEq(expected))); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); + } +} + } // namespace HeaderToMetadataFilter } // namespace HttpFilters } // namespace Extensions diff --git a/test/extensions/filters/http/health_check/BUILD b/test/extensions/filters/http/health_check/BUILD index 924cea4f29707..30beabc500f08 100644 --- a/test/extensions/filters/http/health_check/BUILD +++ b/test/extensions/filters/http/health_check/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -19,7 +19,7 @@ envoy_extension_cc_test( "//source/common/buffer:buffer_lib", "//source/common/http:header_utility_lib", "//source/extensions/filters/http/health_check:health_check_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", ], @@ -31,7 +31,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.health_check", deps = [ "//source/extensions/filters/http/health_check:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/health_check/v3:pkg_cc_proto", diff --git a/test/extensions/filters/http/health_check/config_test.cc b/test/extensions/filters/http/health_check/config_test.cc index 1a66473ba05a0..dcf4e37fb0c88 100644 --- a/test/extensions/filters/http/health_check/config_test.cc +++ b/test/extensions/filters/http/health_check/config_test.cc @@ -6,7 +6,7 @@ #include "extensions/filters/http/health_check/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/http/health_check/health_check_test.cc b/test/extensions/filters/http/health_check/health_check_test.cc index 54f5306cdb477..c8f4da639e5e1 100644 --- a/test/extensions/filters/http/health_check/health_check_test.cc +++ b/test/extensions/filters/http/health_check/health_check_test.cc @@ -9,7 +9,7 @@ #include "extensions/filters/http/health_check/health_check.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/mocks/upstream/cluster_info.h" #include "test/test_common/printers.h" #include "test/test_common/utility.h" @@ -115,7 +115,7 @@ TEST_F(HealthCheckFilterNoPassThroughTest, NotHcRequest) { EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(body, false)); Http::TestResponseTrailerMapImpl response_trailers; EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers)); - EXPECT_EQ("true", service_response.EnvoyImmediateHealthCheckFail()->value().getStringView()); + EXPECT_EQ("true", service_response.getEnvoyImmediateHealthCheckFailValue()); } TEST_F(HealthCheckFilterNoPassThroughTest, ComputedHealth) { @@ -227,8 +227,7 @@ TEST_F(HealthCheckFilterNoPassThroughTest, HealthCheckFailedCallbackCalled) { .Times(1) .WillRepeatedly(Invoke([&](Http::ResponseHeaderMap& headers, bool end_stream) { filter_->encodeHeaders(headers, end_stream); - EXPECT_EQ("cluster_name", - headers.EnvoyUpstreamHealthCheckedCluster()->value().getStringView()); + EXPECT_EQ("cluster_name", headers.getEnvoyUpstreamHealthCheckedClusterValue()); EXPECT_EQ(nullptr, headers.EnvoyImmediateHealthCheckFail()); })); @@ -250,10 +249,9 @@ TEST_F(HealthCheckFilterPassThroughTest, Ok) { EXPECT_CALL(callbacks_, encodeHeaders_(_, _)).Times(0); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); - Http::TestResponseHeaderMapImpl service_hc_respnose{{":status", "200"}}; - EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(service_hc_respnose, true)); - EXPECT_EQ("cluster_name", - service_hc_respnose.EnvoyUpstreamHealthCheckedCluster()->value().getStringView()); + Http::TestResponseHeaderMapImpl service_hc_response{{":status", "200"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(service_hc_response, true)); + EXPECT_EQ("cluster_name", service_hc_response.getEnvoyUpstreamHealthCheckedClusterValue()); } TEST_F(HealthCheckFilterPassThroughTest, OkWithContinue) { @@ -270,10 +268,9 @@ TEST_F(HealthCheckFilterPassThroughTest, OkWithContinue) { filter_->encode100ContinueHeaders(continue_response)); Http::MetadataMap metadata_map{{"metadata", "metadata"}}; EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->encodeMetadata(metadata_map)); - Http::TestResponseHeaderMapImpl service_hc_respnose{{":status", "200"}}; - EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(service_hc_respnose, true)); - EXPECT_EQ("cluster_name", - service_hc_respnose.EnvoyUpstreamHealthCheckedCluster()->value().getStringView()); + Http::TestResponseHeaderMapImpl service_hc_response{{":status", "200"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(service_hc_response, true)); + EXPECT_EQ("cluster_name", service_hc_response.getEnvoyUpstreamHealthCheckedClusterValue()); } TEST_F(HealthCheckFilterPassThroughTest, Failed) { @@ -302,8 +299,7 @@ TEST_F(HealthCheckFilterCachingTest, CachedServiceUnavailableCallbackCalled) { .Times(1) .WillRepeatedly(Invoke([&](Http::ResponseHeaderMap& headers, bool end_stream) { filter_->encodeHeaders(headers, end_stream); - EXPECT_EQ("cluster_name", - headers.EnvoyUpstreamHealthCheckedCluster()->value().getStringView()); + EXPECT_EQ("cluster_name", headers.getEnvoyUpstreamHealthCheckedClusterValue()); })); EXPECT_CALL(callbacks_.stream_info_, @@ -324,8 +320,7 @@ TEST_F(HealthCheckFilterCachingTest, CachedOkCallbackNotCalled) { .Times(1) .WillRepeatedly(Invoke([&](Http::ResponseHeaderMap& headers, bool end_stream) { filter_->encodeHeaders(headers, end_stream); - EXPECT_EQ("cluster_name", - headers.EnvoyUpstreamHealthCheckedCluster()->value().getStringView()); + EXPECT_EQ("cluster_name", headers.getEnvoyUpstreamHealthCheckedClusterValue()); })); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, @@ -353,8 +348,7 @@ TEST_F(HealthCheckFilterCachingTest, All) { .Times(1) .WillRepeatedly(Invoke([&](Http::ResponseHeaderMap& headers, bool end_stream) { filter_->encodeHeaders(headers, end_stream); - EXPECT_EQ("cluster_name", - headers.EnvoyUpstreamHealthCheckedCluster()->value().getStringView()); + EXPECT_EQ("cluster_name", headers.getEnvoyUpstreamHealthCheckedClusterValue()); })); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, true)); @@ -388,8 +382,7 @@ TEST_F(HealthCheckFilterCachingTest, DegradedHeader) { .Times(1) .WillRepeatedly(Invoke([&](Http::ResponseHeaderMap& headers, bool end_stream) { filter_->encodeHeaders(headers, end_stream); - EXPECT_EQ("cluster_name", - headers.EnvoyUpstreamHealthCheckedCluster()->value().getStringView()); + EXPECT_EQ("cluster_name", headers.getEnvoyUpstreamHealthCheckedClusterValue()); })); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, true)); diff --git a/test/extensions/filters/http/ip_tagging/BUILD b/test/extensions/filters/http/ip_tagging/BUILD index fdfa8d58cc989..7625367f3e62c 100644 --- a/test/extensions/filters/http/ip_tagging/BUILD +++ b/test/extensions/filters/http/ip_tagging/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/ip_tagging/ip_tagging_filter_test.cc b/test/extensions/filters/http/ip_tagging/ip_tagging_filter_test.cc index e99f427275a8b..492600503f607 100644 --- a/test/extensions/filters/http/ip_tagging/ip_tagging_filter_test.cc +++ b/test/extensions/filters/http/ip_tagging/ip_tagging_filter_test.cc @@ -3,7 +3,6 @@ #include "envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.pb.h" #include "common/buffer/buffer_impl.h" -#include "common/http/header_map_impl.h" #include "common/network/address_impl.h" #include "common/network/utility.h" @@ -81,7 +80,7 @@ TEST_F(IpTaggingFilterTest, InternalRequest) { EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers)); // Check external requests don't get a tag. - request_headers = {}; + request_headers = Http::TestRequestHeaderMapImpl{}; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false)); EXPECT_FALSE(request_headers.has(Http::Headers::get().EnvoyIpTags)); } @@ -147,7 +146,7 @@ request_type: both EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false)); EXPECT_EQ("internal_request", request_headers.get_(Http::Headers::get().EnvoyIpTags)); - request_headers = {}; + request_headers = Http::TestRequestHeaderMapImpl{}; remote_address = Network::Utility::parseInternetAddress("1.2.3.4"); EXPECT_CALL(filter_callbacks_.stream_info_, downstreamRemoteAddress()) .WillOnce(ReturnRef(remote_address)); @@ -283,7 +282,7 @@ TEST_F(IpTaggingFilterTest, ClearRouteCache) { // no tags, no call EXPECT_CALL(filter_callbacks_, clearRouteCache()).Times(0); - request_headers = {}; + request_headers = Http::TestRequestHeaderMapImpl{}; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false)); EXPECT_FALSE(request_headers.has(Http::Headers::get().EnvoyIpTags)); } diff --git a/test/extensions/filters/http/jwt_authn/BUILD b/test/extensions/filters/http/jwt_authn/BUILD index c3641befd989a..dd23ce92ae54d 100644 --- a/test/extensions/filters/http/jwt_authn/BUILD +++ b/test/extensions/filters/http/jwt_authn/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", @@ -11,6 +9,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( @@ -47,7 +47,6 @@ envoy_extension_cc_test( deps = [ ":mock_lib", "//source/extensions/filters/http/jwt_authn:filter_lib", - "//test/mocks/server:server_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto", ], @@ -62,7 +61,8 @@ envoy_extension_cc_test( "//source/common/stream_info:filter_state_lib", "//source/extensions/filters/http/jwt_authn:config", "//test/extensions/filters/http/jwt_authn:test_common_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:instance_mocks", "@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto", ], ) @@ -74,7 +74,7 @@ envoy_extension_cc_test( deps = [ "//source/extensions/filters/http/jwt_authn:config", "//test/extensions/filters/http/jwt_authn:test_common_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto", ], ) @@ -106,7 +106,7 @@ envoy_extension_cc_test( "//source/extensions/filters/http/jwt_authn:matchers_lib", "//test/extensions/filters/http/common:mock_lib", "//test/extensions/filters/http/jwt_authn:test_common_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto", @@ -117,6 +117,7 @@ envoy_extension_cc_test( name = "filter_integration_test", srcs = ["filter_integration_test.cc"], extension_name = "envoy.filters.http.jwt_authn", + tags = ["fails_on_windows"], deps = [ "//source/common/router:string_accessor_lib", "//source/extensions/filters/http/common:pass_through_filter_lib", @@ -169,7 +170,7 @@ envoy_extension_cc_test( ":test_common_lib", "//source/extensions/filters/http/jwt_authn:filter_config_interface", "//source/extensions/filters/http/jwt_authn:matchers_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/http/jwt_authn/all_verifier_test.cc b/test/extensions/filters/http/jwt_authn/all_verifier_test.cc index 148550c1a2ebb..9f9d1685e2397 100644 --- a/test/extensions/filters/http/jwt_authn/all_verifier_test.cc +++ b/test/extensions/filters/http/jwt_authn/all_verifier_test.cc @@ -5,7 +5,7 @@ #include "test/extensions/filters/http/jwt_authn/mock.h" #include "test/extensions/filters/http/jwt_authn/test_common.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/utility.h" #include "absl/strings/string_view.h" diff --git a/test/extensions/filters/http/jwt_authn/authenticator_test.cc b/test/extensions/filters/http/jwt_authn/authenticator_test.cc index 1cf6c1e893556..de34b89618293 100644 --- a/test/extensions/filters/http/jwt_authn/authenticator_test.cc +++ b/test/extensions/filters/http/jwt_authn/authenticator_test.cc @@ -11,7 +11,7 @@ #include "test/extensions/filters/http/common/mock.h" #include "test/extensions/filters/http/jwt_authn/mock.h" #include "test/extensions/filters/http/jwt_authn/test_common.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" @@ -36,18 +36,20 @@ class AuthenticatorTest : public testing::Test { public: void SetUp() override { TestUtility::loadFromYaml(ExampleConfig, proto_config_); - CreateAuthenticator(); + createAuthenticator(); } - void CreateAuthenticator(::google::jwt_verify::CheckAudience* check_audience = nullptr, - const absl::optional& provider = - absl::make_optional(ProviderName)) { + void createAuthenticator( + ::google::jwt_verify::CheckAudience* check_audience = nullptr, + const absl::optional& provider = absl::make_optional(ProviderName), + bool allow_failed = false, bool allow_missing = false) { filter_config_ = FilterConfigImpl::create(proto_config_, "", mock_factory_ctx_); raw_fetcher_ = new MockJwksFetcher; fetcher_.reset(raw_fetcher_); auth_ = Authenticator::create( - check_audience, provider, !provider, !provider, filter_config_->getCache().getJwksCache(), - filter_config_->cm(), [this](Upstream::ClusterManager&) { return std::move(fetcher_); }, + check_audience, provider, allow_failed, allow_missing, + filter_config_->getCache().getJwksCache(), filter_config_->cm(), + [this](Upstream::ClusterManager&) { return std::move(fetcher_); }, filter_config_->timeSource()); jwks_ = Jwks::createFrom(PublicKey, Jwks::JWKS); EXPECT_TRUE(jwks_->getStatus() == Status::Ok); @@ -99,14 +101,13 @@ TEST_F(AuthenticatorTest, TestOkJWTandCache) { // Test OK pubkey and its cache for (int i = 0; i < 10; i++) { - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(GoodToken)}}; + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + std::string(GoodToken)}}; expectVerifyStatus(Status::Ok, headers); EXPECT_EQ(headers.get_("sec-istio-auth-userinfo"), ExpectedPayloadValue); // Verify the token is removed. - EXPECT_FALSE(headers.Authorization()); + EXPECT_FALSE(headers.has(Http::CustomHeaders::get().Authorization)); } } @@ -114,7 +115,7 @@ TEST_F(AuthenticatorTest, TestOkJWTandCache) { TEST_F(AuthenticatorTest, TestForwardJwt) { // Config forward_jwt flag (*proto_config_.mutable_providers())[std::string(ProviderName)].set_forward(true); - CreateAuthenticator(); + createAuthenticator(); EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)) .WillOnce(Invoke([this](const envoy::config::core::v3::HttpUri&, Tracing::Span&, JwksFetcher::JwksReceiver& receiver) { @@ -122,13 +123,12 @@ TEST_F(AuthenticatorTest, TestForwardJwt) { })); // Test OK pubkey and its cache - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(GoodToken)}}; + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + std::string(GoodToken)}}; expectVerifyStatus(Status::Ok, headers); // Verify the token is NOT removed. - EXPECT_TRUE(headers.Authorization()); + EXPECT_TRUE(headers.has(Http::CustomHeaders::get().Authorization)); // Payload not set by default EXPECT_EQ(out_name_, ""); @@ -139,7 +139,7 @@ TEST_F(AuthenticatorTest, TestSetPayload) { // Config payload_in_metadata flag (*proto_config_.mutable_providers())[std::string(ProviderName)].set_payload_in_metadata( "my_payload"); - CreateAuthenticator(); + createAuthenticator(); EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)) .WillOnce(Invoke([this](const envoy::config::core::v3::HttpUri&, Tracing::Span&, JwksFetcher::JwksReceiver& receiver) { @@ -147,8 +147,7 @@ TEST_F(AuthenticatorTest, TestSetPayload) { })); // Test OK pubkey and its cache - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(GoodToken)}}; + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + std::string(GoodToken)}}; expectVerifyStatus(Status::Ok, headers); @@ -169,8 +168,8 @@ TEST_F(AuthenticatorTest, TestJwtWithNonExistKid) { })); // Test OK pubkey and its cache - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(NonExistKidToken)}}; + Http::TestRequestHeaderMapImpl headers{ + {"Authorization", "Bearer " + std::string(NonExistKidToken)}}; expectVerifyStatus(Status::JwtVerificationFail, headers); } @@ -180,17 +179,83 @@ TEST_F(AuthenticatorTest, TestMissedJWT) { EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(0); // Empty headers. - auto headers = Http::TestRequestHeaderMapImpl{}; + Http::TestRequestHeaderMapImpl headers{}; expectVerifyStatus(Status::JwtMissed, headers); } +// Test multiple tokens; the one from query parameter is bad, verification should fail. +TEST_F(AuthenticatorTest, TestMultipleJWTOneBadFromQuery) { + EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(1); + + // headers with multiple tokens: one good, one bad + Http::TestRequestHeaderMapImpl headers{ + {"Authorization", "Bearer " + std::string(GoodToken)}, + {":path", "/foo?access_token=" + std::string(NonExistKidToken)}, + }; + + expectVerifyStatus(Status::JwtVerificationFail, headers); +} + +// Test multiple tokens; the one from header is bad, verification should fail. +TEST_F(AuthenticatorTest, TestMultipleJWTOneBadFromHeader) { + EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(1); + + // headers with multiple tokens: one good, one bad + Http::TestRequestHeaderMapImpl headers{ + {"Authorization", "Bearer " + std::string(NonExistKidToken)}, + {":path", "/foo?access_token=" + std::string(GoodToken)}, + }; + + expectVerifyStatus(Status::JwtVerificationFail, headers); +} + +// Test multiple tokens; all are good, verification is ok. +TEST_F(AuthenticatorTest, TestMultipleJWTAllGood) { + EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(1); + + // headers with multiple tokens: all are good + Http::TestRequestHeaderMapImpl headers{ + {"Authorization", "Bearer " + std::string(GoodToken)}, + {":path", "/foo?access_token=" + std::string(GoodToken)}, + }; + + expectVerifyStatus(Status::Ok, headers); +} + +// Test multiple tokens; one of them is bad and allow_failed, verification is ok. +TEST_F(AuthenticatorTest, TestMultipleJWTOneBadAllowFails) { + createAuthenticator(nullptr, absl::make_optional(ProviderName), + /*allow_failed=*/true, /*all_missing=*/false); + EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(1); + + // headers with multiple tokens: one good, one bad + Http::TestRequestHeaderMapImpl headers{ + {"Authorization", "Bearer " + std::string(GoodToken)}, + {":path", "/foo?access_token=" + std::string(NonExistKidToken)}, + }; + + expectVerifyStatus(Status::Ok, headers); +} + +// Test empty header and allow_missing, verification is ok. +TEST_F(AuthenticatorTest, TestAllowMissingWithEmptyHeader) { + createAuthenticator(nullptr, absl::make_optional(ProviderName), + /*allow_failed=*/false, /*all_missing=*/true); + EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(0); + + // Empty headers + Http::TestRequestHeaderMapImpl headers{}; + + expectVerifyStatus(Status::Ok, headers); +} + // This test verifies if Jwt is invalid, JwtBadFormat status is returned. TEST_F(AuthenticatorTest, TestInvalidJWT) { EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(0); std::string token = "invalidToken"; - auto headers = Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + token}}; + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + token}}; expectVerifyStatus(Status::JwtBadFormat, headers); } @@ -198,7 +263,7 @@ TEST_F(AuthenticatorTest, TestInvalidJWT) { TEST_F(AuthenticatorTest, TestInvalidPrefix) { EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(0); - auto headers = Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer-invalid"}}; + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer-invalid"}}; expectVerifyStatus(Status::JwtMissed, headers); } @@ -207,8 +272,8 @@ TEST_F(AuthenticatorTest, TestInvalidPrefix) { TEST_F(AuthenticatorTest, TestNonExpiringJWT) { EXPECT_CALL(mock_factory_ctx_.cluster_manager_, httpAsyncClientForCluster(_)).Times(0); - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(NonExpiringToken)}}; + Http::TestRequestHeaderMapImpl headers{ + {"Authorization", "Bearer " + std::string(NonExpiringToken)}}; expectVerifyStatus(Status::JwtAudienceNotAllowed, headers); } @@ -216,8 +281,7 @@ TEST_F(AuthenticatorTest, TestNonExpiringJWT) { TEST_F(AuthenticatorTest, TestExpiredJWT) { EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(0); - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(ExpiredToken)}}; + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + std::string(ExpiredToken)}}; expectVerifyStatus(Status::JwtExpired, headers); } @@ -225,8 +289,8 @@ TEST_F(AuthenticatorTest, TestExpiredJWT) { TEST_F(AuthenticatorTest, TestNotYetValidJWT) { EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(0); - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(NotYetValidToken)}}; + Http::TestRequestHeaderMapImpl headers{ + {"Authorization", "Bearer " + std::string(NotYetValidToken)}}; expectVerifyStatus(Status::JwtNotYetValid, headers); } @@ -235,12 +299,11 @@ TEST_F(AuthenticatorTest, TestInvalidLocalJwks) { auto& provider = (*proto_config_.mutable_providers())[std::string(ProviderName)]; provider.clear_remote_jwks(); provider.mutable_local_jwks()->set_inline_string("invalid"); - CreateAuthenticator(); + createAuthenticator(); EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(0); - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(GoodToken)}}; + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + std::string(GoodToken)}}; expectVerifyStatus(Status::JwksNoValidKeys, headers); } @@ -248,8 +311,8 @@ TEST_F(AuthenticatorTest, TestInvalidLocalJwks) { TEST_F(AuthenticatorTest, TestNonMatchAudJWT) { EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(0); - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(InvalidAudToken)}}; + Http::TestRequestHeaderMapImpl headers{ + {"Authorization", "Bearer " + std::string(InvalidAudToken)}}; expectVerifyStatus(Status::JwtAudienceNotAllowed, headers); } @@ -257,12 +320,11 @@ TEST_F(AuthenticatorTest, TestNonMatchAudJWT) { TEST_F(AuthenticatorTest, TestIssuerNotFound) { // Create a config with an other issuer. (*proto_config_.mutable_providers())[std::string(ProviderName)].set_issuer("other_issuer"); - CreateAuthenticator(); + createAuthenticator(); EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(0); - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(GoodToken)}}; + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + std::string(GoodToken)}}; expectVerifyStatus(Status::JwtUnknownIssuer, headers); } @@ -274,8 +336,7 @@ TEST_F(AuthenticatorTest, TestPubkeyFetchFail) { receiver.onJwksError(JwksFetcher::JwksReceiver::Failure::InvalidJwks); })); - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(GoodToken)}}; + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + std::string(GoodToken)}}; expectVerifyStatus(Status::JwksFetchFail, headers); Http::ResponseMessagePtr response_message(new Http::ResponseMessageImpl( @@ -291,8 +352,7 @@ TEST_F(AuthenticatorTest, TestOnDestroy) { // Cancel is called once. EXPECT_CALL(*raw_fetcher_, cancel()).Times(1); - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(GoodToken)}}; + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + std::string(GoodToken)}}; initTokenExtractor(); auto tokens = extractor_->extract(headers); // callback should not be called. @@ -308,15 +368,14 @@ TEST_F(AuthenticatorTest, TestNoForwardPayloadHeader) { // In this config, there is no forward_payload_header auto& provider0 = (*proto_config_.mutable_providers())[std::string(ProviderName)]; provider0.clear_forward_payload_header(); - CreateAuthenticator(); + createAuthenticator(); EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)) .WillOnce(Invoke([this](const envoy::config::core::v3::HttpUri&, Tracing::Span&, JwksFetcher::JwksReceiver& receiver) { receiver.onJwksSuccess(std::move(jwks_)); })); - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(GoodToken)}}; + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + std::string(GoodToken)}}; expectVerifyStatus(Status::Ok, headers); // Test when forward_payload_header is not set, the output should NOT @@ -334,36 +393,36 @@ TEST_F(AuthenticatorTest, TestAllowFailedMultipleTokens) { header->set_value_prefix("Bearer "); } - CreateAuthenticator(nullptr, absl::nullopt); + createAuthenticator(nullptr, absl::nullopt, /*allow_failed=*/true); EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)) .WillOnce(Invoke([this](const envoy::config::core::v3::HttpUri&, Tracing::Span&, JwksFetcher::JwksReceiver& receiver) { receiver.onJwksSuccess(std::move(jwks_)); })); - auto headers = Http::TestRequestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl headers1{ {"a", "Bearer " + std::string(ExpiredToken)}, {"b", "Bearer " + std::string(GoodToken)}, {"c", "Bearer " + std::string(InvalidAudToken)}, {":path", "/"}, }; - expectVerifyStatus(Status::Ok, headers); + expectVerifyStatus(Status::Ok, headers1); - EXPECT_TRUE(headers.has("a")); - EXPECT_FALSE(headers.has("b")); - EXPECT_TRUE(headers.has("c")); + EXPECT_TRUE(headers1.has("a")); + EXPECT_FALSE(headers1.has("b")); + EXPECT_TRUE(headers1.has("c")); - headers = Http::TestRequestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl headers2{ {"a", "Bearer " + std::string(GoodToken)}, {"b", "Bearer " + std::string(GoodToken)}, {"c", "Bearer " + std::string(GoodToken)}, {":path", "/"}, }; - expectVerifyStatus(Status::Ok, headers); + expectVerifyStatus(Status::Ok, headers2); - EXPECT_FALSE(headers.has("a")); - EXPECT_FALSE(headers.has("b")); - EXPECT_FALSE(headers.has("c")); + EXPECT_FALSE(headers2.has("a")); + EXPECT_FALSE(headers2.has("b")); + EXPECT_FALSE(headers2.has("c")); } // This test verifies that allow failed authenticator will verify all tokens. @@ -381,7 +440,7 @@ TEST_F(AuthenticatorTest, TestAllowFailedMultipleIssuers) { header->set_name("other-auth"); header->set_value_prefix("Bearer "); - CreateAuthenticator(nullptr, absl::nullopt); + createAuthenticator(nullptr, absl::nullopt, /*allow_failed=*/true); EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)) .Times(2) .WillRepeatedly(Invoke([](const envoy::config::core::v3::HttpUri&, Tracing::Span&, @@ -391,7 +450,7 @@ TEST_F(AuthenticatorTest, TestAllowFailedMultipleIssuers) { receiver.onJwksSuccess(std::move(jwks)); })); - auto headers = Http::TestRequestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl headers{ {"Authorization", "Bearer " + std::string(GoodToken)}, {"expired-auth", "Bearer " + std::string(ExpiredToken)}, {"other-auth", "Bearer " + std::string(OtherGoodToken)}, @@ -408,19 +467,19 @@ TEST_F(AuthenticatorTest, TestAllowFailedMultipleIssuers) { TEST_F(AuthenticatorTest, TestCustomCheckAudience) { auto check_audience = std::make_unique<::google::jwt_verify::CheckAudience>( std::vector{"invalid_service"}); - CreateAuthenticator(check_audience.get()); + createAuthenticator(check_audience.get()); EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)) .WillOnce(Invoke([this](const envoy::config::core::v3::HttpUri&, Tracing::Span&, JwksFetcher::JwksReceiver& receiver) { receiver.onJwksSuccess(std::move(jwks_)); })); - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(InvalidAudToken)}}; - expectVerifyStatus(Status::Ok, headers); + Http::TestRequestHeaderMapImpl headers1{ + {"Authorization", "Bearer " + std::string(InvalidAudToken)}}; + expectVerifyStatus(Status::Ok, headers1); - headers = Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(GoodToken)}}; - expectVerifyStatus(Status::JwtAudienceNotAllowed, headers); + Http::TestRequestHeaderMapImpl headers2{{"Authorization", "Bearer " + std::string(GoodToken)}}; + expectVerifyStatus(Status::JwtAudienceNotAllowed, headers2); } // This test verifies that when invalid JWKS is fetched, an JWKS error status is returned. @@ -432,8 +491,7 @@ TEST_F(AuthenticatorTest, TestInvalidPubkeyKey) { receiver.onJwksSuccess(std::move(jwks)); })); - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(GoodToken)}}; + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + std::string(GoodToken)}}; expectVerifyStatus(Status::JwksPemBadBase64, headers); } diff --git a/test/extensions/filters/http/jwt_authn/extractor_test.cc b/test/extensions/filters/http/jwt_authn/extractor_test.cc index f32af1b6706ef..d91f2c7dfee19 100644 --- a/test/extensions/filters/http/jwt_authn/extractor_test.cc +++ b/test/extensions/filters/http/jwt_authn/extractor_test.cc @@ -112,7 +112,7 @@ TEST_F(ExtractorTest, TestDefaultHeaderLocation) { // Test token remove tokens[0]->removeJwt(headers); - EXPECT_FALSE(headers.Authorization()); + EXPECT_FALSE(headers.has(Http::CustomHeaders::get().Authorization)); } // Test extracting JWT as Bearer token from the default header location: "Authorization" - diff --git a/test/extensions/filters/http/jwt_authn/filter_config_test.cc b/test/extensions/filters/http/jwt_authn/filter_config_test.cc index 662b8a97a87e5..45da3fea600f7 100644 --- a/test/extensions/filters/http/jwt_authn/filter_config_test.cc +++ b/test/extensions/filters/http/jwt_authn/filter_config_test.cc @@ -6,7 +6,8 @@ #include "extensions/filters/http/jwt_authn/filter_config.h" #include "test/extensions/filters/http/jwt_authn/test_common.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/server/instance.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/http/jwt_authn/filter_factory_test.cc b/test/extensions/filters/http/jwt_authn/filter_factory_test.cc index 3095be69e32c4..ea89ea6162253 100644 --- a/test/extensions/filters/http/jwt_authn/filter_factory_test.cc +++ b/test/extensions/filters/http/jwt_authn/filter_factory_test.cc @@ -4,7 +4,7 @@ #include "extensions/filters/http/jwt_authn/filter_factory.h" #include "test/extensions/filters/http/jwt_authn/test_common.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/http/jwt_authn/filter_integration_test.cc b/test/extensions/filters/http/jwt_authn/filter_integration_test.cc index bb40a649bc882..5b72bb7e2adc0 100644 --- a/test/extensions/filters/http/jwt_authn/filter_integration_test.cc +++ b/test/extensions/filters/http/jwt_authn/filter_integration_test.cc @@ -114,11 +114,11 @@ TEST_P(LocalJwksIntegrationTest, WithGoodToken) { EXPECT_TRUE(payload_entry != nullptr); EXPECT_EQ(payload_entry->value().getStringView(), ExpectedPayloadValue); // Verify the token is removed. - EXPECT_FALSE(upstream_request_->headers().Authorization()); + EXPECT_EQ(nullptr, upstream_request_->headers().get(Http::CustomHeaders::get().Authorization)); upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } // With local Jwks, this test verifies a request is rejected with an expired Jwt token. @@ -138,7 +138,7 @@ TEST_P(LocalJwksIntegrationTest, ExpiredToken) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("401", response->headers().Status()->value().getStringView()); + EXPECT_EQ("401", response->headers().getStatusValue()); } TEST_P(LocalJwksIntegrationTest, MissingToken) { @@ -156,7 +156,7 @@ TEST_P(LocalJwksIntegrationTest, MissingToken) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("401", response->headers().Status()->value().getStringView()); + EXPECT_EQ("401", response->headers().getStatusValue()); } TEST_P(LocalJwksIntegrationTest, ExpiredTokenHeadReply) { @@ -175,8 +175,8 @@ TEST_P(LocalJwksIntegrationTest, ExpiredTokenHeadReply) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("401", response->headers().Status()->value().getStringView()); - EXPECT_NE("0", response->headers().ContentLength()->value().getStringView()); + EXPECT_EQ("401", response->headers().getStatusValue()); + EXPECT_NE("0", response->headers().getContentLengthValue()); EXPECT_THAT(response->body(), ::testing::IsEmpty()); } @@ -199,7 +199,7 @@ TEST_P(LocalJwksIntegrationTest, NoRequiresPath) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } // This test verifies a CORS preflight request without JWT token is allowed. @@ -222,7 +222,7 @@ TEST_P(LocalJwksIntegrationTest, CorsPreflight) { upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } // This test verifies JwtRequirement specified from filer state rules @@ -299,7 +299,7 @@ TEST_P(LocalJwksIntegrationTest, FilterStateRequirement) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ(test.expected_status, response->headers().Status()->value().getStringView()); + EXPECT_EQ(test.expected_status, response->headers().getStatusValue()); } } @@ -392,13 +392,13 @@ TEST_P(RemoteJwksIntegrationTest, WithGoodToken) { EXPECT_TRUE(payload_entry != nullptr); EXPECT_EQ(payload_entry->value().getStringView(), ExpectedPayloadValue); // Verify the token is removed. - EXPECT_FALSE(upstream_request_->headers().Authorization()); + EXPECT_EQ(nullptr, upstream_request_->headers().get(Http::CustomHeaders::get().Authorization)); upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); cleanup(); } @@ -423,7 +423,7 @@ TEST_P(RemoteJwksIntegrationTest, FetchFailedJwks) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("401", response->headers().Status()->value().getStringView()); + EXPECT_EQ("401", response->headers().getStatusValue()); cleanup(); } @@ -443,7 +443,7 @@ TEST_P(RemoteJwksIntegrationTest, FetchFailedMissingCluster) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("401", response->headers().Status()->value().getStringView()); + EXPECT_EQ("401", response->headers().getStatusValue()); cleanup(); } diff --git a/test/extensions/filters/http/jwt_authn/filter_test.cc b/test/extensions/filters/http/jwt_authn/filter_test.cc index 9881cd25bab20..0f8f1ff9c5491 100644 --- a/test/extensions/filters/http/jwt_authn/filter_test.cc +++ b/test/extensions/filters/http/jwt_authn/filter_test.cc @@ -4,7 +4,6 @@ #include "extensions/filters/http/well_known_names.h" #include "test/extensions/filters/http/jwt_authn/mock.h" -#include "test/mocks/server/mocks.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -110,6 +109,33 @@ TEST_F(FilterTest, CorsPreflight) { EXPECT_EQ(0U, mock_config_->stats().denied_.value()); } +TEST_F(FilterTest, CorsPreflightMssingOrigin) { + auto headers = Http::TestRequestHeaderMapImpl{ + {":method", "OPTIONS"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "host"}, + {"access-control-request-method", "GET"}, + }; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); + EXPECT_EQ(1U, mock_config_->stats().allowed_.value()); + // Should not be bypassed by cors_preflight since missing origin. + EXPECT_EQ(0U, mock_config_->stats().cors_preflight_bypassed_.value()); + EXPECT_EQ(0U, mock_config_->stats().denied_.value()); +} + +TEST_F(FilterTest, CorsPreflightMssingAccessControlRequestMethod) { + auto headers = Http::TestRequestHeaderMapImpl{ + {":method", "OPTIONS"}, {":path", "/"}, {":scheme", "http"}, {":authority", "host"}, + {"origin", "test-origin"}, + }; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); + EXPECT_EQ(1U, mock_config_->stats().allowed_.value()); + // Should not be bypassed by cors_preflight since missing access-control-request-method. + EXPECT_EQ(0U, mock_config_->stats().cors_preflight_bypassed_.value()); + EXPECT_EQ(0U, mock_config_->stats().denied_.value()); +} + // This test verifies the setPayload call is handled correctly TEST_F(FilterTest, TestSetPayloadCall) { setupMockConfig(); diff --git a/test/extensions/filters/http/jwt_authn/group_verifier_test.cc b/test/extensions/filters/http/jwt_authn/group_verifier_test.cc index 10ca0909555ed..fec4c14305163 100644 --- a/test/extensions/filters/http/jwt_authn/group_verifier_test.cc +++ b/test/extensions/filters/http/jwt_authn/group_verifier_test.cc @@ -63,7 +63,7 @@ const char AnyWithAll[] = R"( - provider_name: "provider_4" )"; -using StatusMap = std::unordered_map; +using StatusMap = absl::node_hash_map; constexpr auto allowfailed = "_allow_failed_"; @@ -109,9 +109,9 @@ class GroupVerifierTest : public testing::Test { return struct_obj; } - std::unordered_map + absl::node_hash_map createAsyncMockAuthsAndVerifier(const std::vector& providers) { - std::unordered_map callbacks; + absl::node_hash_map callbacks; for (const auto& provider : providers) { auto mock_auth = std::make_unique(); EXPECT_CALL(*mock_auth, doVerify(_, _, _, _, _)) @@ -130,7 +130,7 @@ class GroupVerifierTest : public testing::Test { JwtAuthentication proto_config_; VerifierConstPtr verifier_; MockVerifierCallbacks mock_cb_; - std::unordered_map> mock_auths_; + absl::node_hash_map> mock_auths_; NiceMock mock_factory_; ContextSharedPtr context_; NiceMock parent_span_; diff --git a/test/extensions/filters/http/jwt_authn/provider_verifier_test.cc b/test/extensions/filters/http/jwt_authn/provider_verifier_test.cc index ef6f24de8785a..effc6ae0f7b28 100644 --- a/test/extensions/filters/http/jwt_authn/provider_verifier_test.cc +++ b/test/extensions/filters/http/jwt_authn/provider_verifier_test.cc @@ -5,7 +5,7 @@ #include "test/extensions/filters/http/jwt_authn/mock.h" #include "test/extensions/filters/http/jwt_authn/test_common.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/utility.h" diff --git a/test/extensions/filters/http/lua/BUILD b/test/extensions/filters/http/lua/BUILD index ea708b4420f82..dcb41ab264e74 100644 --- a/test/extensions/filters/http/lua/BUILD +++ b/test/extensions/filters/http/lua/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,15 +7,19 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( name = "lua_filter_test", srcs = ["lua_filter_test.cc"], extension_name = "envoy.filters.http.lua", + tags = ["skip_on_windows"], deps = [ "//source/common/stream_info:stream_info_lib", "//source/extensions/filters/http/lua:lua_filter_lib", + "//test/mocks/api:api_mocks", "//test/mocks/http:http_mocks", "//test/mocks/network:network_mocks", "//test/mocks/ssl:ssl_mocks", @@ -33,6 +35,7 @@ envoy_extension_cc_test( name = "wrappers_test", srcs = ["wrappers_test.cc"], extension_name = "envoy.filters.http.lua", + tags = ["skip_on_windows"], deps = [ "//source/common/stream_info:stream_info_lib", "//source/extensions/filters/http/lua:wrappers_lib", @@ -47,6 +50,7 @@ envoy_extension_cc_test( name = "lua_integration_test", srcs = ["lua_integration_test.cc"], extension_name = "envoy.filters.http.lua", + tags = ["skip_on_windows"], deps = [ "//source/extensions/filters/http/lua:config", "//test/integration:http_integration_lib", @@ -60,9 +64,10 @@ envoy_extension_cc_test( name = "config_test", srcs = ["config_test.cc"], extension_name = "envoy.filters.http.lua", + tags = ["skip_on_windows"], deps = [ "//source/extensions/filters/http/lua:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/http/lua/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/http/lua/config_test.cc b/test/extensions/filters/http/lua/config_test.cc index 5addb9ca26935..c01772b2a2222 100644 --- a/test/extensions/filters/http/lua/config_test.cc +++ b/test/extensions/filters/http/lua/config_test.cc @@ -5,7 +5,7 @@ #include "extensions/filters/http/lua/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/http/lua/lua_filter_test.cc b/test/extensions/filters/http/lua/lua_filter_test.cc index 3d2e35eb3d0ee..f0893c1e8c0a8 100644 --- a/test/extensions/filters/http/lua/lua_filter_test.cc +++ b/test/extensions/filters/http/lua/lua_filter_test.cc @@ -1,3 +1,4 @@ +#include #include #include "envoy/config/core/v3/base.pb.h" @@ -8,6 +9,7 @@ #include "extensions/filters/http/lua/lua_filter.h" +#include "test/mocks/api/mocks.h" #include "test/mocks/http/mocks.h" #include "test/mocks/network/mocks.h" #include "test/mocks/ssl/mocks.h" @@ -54,6 +56,7 @@ class LuaHttpFilterTest : public testing::Test { decoder_callbacks_.buffer_->move(data); })); + EXPECT_CALL(decoder_callbacks_, activeSpan()).Times(AtLeast(0)); EXPECT_CALL(decoder_callbacks_, decodingBuffer()).Times(AtLeast(0)); EXPECT_CALL(decoder_callbacks_, route()).Times(AtLeast(0)); @@ -65,16 +68,31 @@ class LuaHttpFilterTest : public testing::Test { } encoder_callbacks_.buffer_->move(data); })); + EXPECT_CALL(encoder_callbacks_, activeSpan()).Times(AtLeast(0)); EXPECT_CALL(encoder_callbacks_, encodingBuffer()).Times(AtLeast(0)); + EXPECT_CALL(decoder_callbacks_, streamInfo()).Times(testing::AnyNumber()); } ~LuaHttpFilterTest() override { filter_->onDestroy(); } + // Quickly set up a global configuration. In order to avoid extensive modification of existing + // test cases, the existing configuration methods must be compatible. void setup(const std::string& lua_code) { - config_ = std::make_shared(lua_code, tls_, cluster_manager_); + envoy::extensions::filters::http::lua::v3::Lua proto_config; + proto_config.set_inline_code(lua_code); + envoy::extensions::filters::http::lua::v3::LuaPerRoute per_route_proto_config; + setupConfig(proto_config, per_route_proto_config); setupFilter(); } + void setupConfig(envoy::extensions::filters::http::lua::v3::Lua& proto_config, + envoy::extensions::filters::http::lua::v3::LuaPerRoute& per_route_proto_config) { + // Setup filter config for Lua filter. + config_ = std::make_shared(proto_config, tls_, cluster_manager_, api_); + // Setup per route config for Lua filter. + per_route_config_ = std::make_shared(per_route_proto_config, tls_, api_); + } + void setupFilter() { filter_ = std::make_unique(config_); filter_->setDecoderFilterCallbacks(decoder_callbacks_); @@ -94,8 +112,10 @@ class LuaHttpFilterTest : public testing::Test { } NiceMock tls_; + NiceMock api_; Upstream::MockClusterManager cluster_manager_; std::shared_ptr config_; + std::shared_ptr per_route_config_; std::unique_ptr filter_; Http::MockStreamDecoderFilterCallbacks decoder_callbacks_; Http::MockStreamEncoderFilterCallbacks encoder_callbacks_; @@ -103,6 +123,7 @@ class LuaHttpFilterTest : public testing::Test { std::shared_ptr> ssl_; NiceMock connection_; NiceMock stream_info_; + Tracing::MockSpan child_span_; const std::string HEADER_ONLY_SCRIPT{R"EOF( function envoy_on_request(request_handle) @@ -180,6 +201,12 @@ class LuaHttpFilterTest : public testing::Test { end end )EOF"}; + + const std::string ADD_HEADERS_SCRIPT{R"EOF( + function envoy_on_request(request_handle) + request_handle:headers():add("hello", "world") + end + )EOF"}; }; // Bad code in initial config. @@ -190,7 +217,12 @@ TEST(LuaHttpFilterConfigTest, BadCode) { NiceMock tls; NiceMock cluster_manager; - EXPECT_THROW_WITH_MESSAGE(FilterConfig(SCRIPT, tls, cluster_manager), + NiceMock api; + + envoy::extensions::filters::http::lua::v3::Lua proto_config; + proto_config.set_inline_code(SCRIPT); + + EXPECT_THROW_WITH_MESSAGE(FilterConfig(proto_config, tls, cluster_manager, api), Filters::Common::Lua::LuaException, "script load error: [string \"...\"]:3: '=' expected near ''"); } @@ -771,12 +803,12 @@ TEST_F(LuaHttpFilterTest, HttpCall) { .WillOnce( Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - EXPECT_EQ((Http::TestHeaderMapImpl{{":path", "/"}, - {":method", "POST"}, - {":authority", "foo"}, - {"set-cookie", "flavor=chocolate; Path=/"}, - {"set-cookie", "variant=chewy; Path=/"}, - {"content-length", "11"}}), + EXPECT_EQ((Http::TestRequestHeaderMapImpl{{":path", "/"}, + {":method", "POST"}, + {":authority", "foo"}, + {"set-cookie", "flavor=chocolate; Path=/"}, + {"set-cookie", "variant=chewy; Path=/"}, + {"content-length", "11"}}), message->headers()); callbacks = &cb; return &request; @@ -797,6 +829,7 @@ TEST_F(LuaHttpFilterTest, HttpCall) { EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(":status 200"))); EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("response"))); EXPECT_CALL(decoder_callbacks_, continueDecoding()); + callbacks->onBeforeFinalizeUpstreamSpan(child_span_, &response_message->headers()); callbacks->onSuccess(request, std::move(response_message)); } @@ -834,12 +867,12 @@ TEST_F(LuaHttpFilterTest, HttpCallAsyncFalse) { .WillOnce( Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - EXPECT_EQ((Http::TestHeaderMapImpl{{":path", "/"}, - {":method", "POST"}, - {":authority", "foo"}, - {"set-cookie", "flavor=chocolate; Path=/"}, - {"set-cookie", "variant=chewy; Path=/"}, - {"content-length", "11"}}), + EXPECT_EQ((Http::TestRequestHeaderMapImpl{{":path", "/"}, + {":method", "POST"}, + {":authority", "foo"}, + {"set-cookie", "flavor=chocolate; Path=/"}, + {"set-cookie", "variant=chewy; Path=/"}, + {"content-length", "11"}}), message->headers()); callbacks = &cb; return &request; @@ -893,12 +926,12 @@ TEST_F(LuaHttpFilterTest, HttpCallAsynchronous) { .WillOnce( Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - EXPECT_EQ((Http::TestHeaderMapImpl{{":path", "/"}, - {":method", "POST"}, - {":authority", "foo"}, - {"set-cookie", "flavor=chocolate; Path=/"}, - {"set-cookie", "variant=chewy; Path=/"}, - {"content-length", "11"}}), + EXPECT_EQ((Http::TestRequestHeaderMapImpl{{":path", "/"}, + {":method", "POST"}, + {":authority", "foo"}, + {"set-cookie", "flavor=chocolate; Path=/"}, + {"set-cookie", "variant=chewy; Path=/"}, + {"content-length", "11"}}), message->headers()); callbacks = &cb; return &request; @@ -961,10 +994,10 @@ TEST_F(LuaHttpFilterTest, DoubleHttpCall) { .WillOnce( Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - EXPECT_EQ((Http::TestHeaderMapImpl{{":path", "/"}, - {":method", "POST"}, - {":authority", "foo"}, - {"content-length", "11"}}), + EXPECT_EQ((Http::TestRequestHeaderMapImpl{{":path", "/"}, + {":method", "POST"}, + {":authority", "foo"}, + {"content-length", "11"}}), message->headers()); callbacks = &cb; return &request; @@ -984,7 +1017,7 @@ TEST_F(LuaHttpFilterTest, DoubleHttpCall) { .WillOnce( Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - EXPECT_EQ((Http::TestHeaderMapImpl{ + EXPECT_EQ((Http::TestRequestHeaderMapImpl{ {":path", "/bar"}, {":method", "GET"}, {":authority", "foo"}}), message->headers()); callbacks = &cb; @@ -997,6 +1030,7 @@ TEST_F(LuaHttpFilterTest, DoubleHttpCall) { EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(":status 403"))); EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("no body"))); EXPECT_CALL(decoder_callbacks_, continueDecoding()); + callbacks->onBeforeFinalizeUpstreamSpan(child_span_, &response_message->headers()); callbacks->onSuccess(request, std::move(response_message)); Buffer::OwnedImpl data("hello"); @@ -1040,7 +1074,7 @@ TEST_F(LuaHttpFilterTest, HttpCallNoBody) { .WillOnce( Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - EXPECT_EQ((Http::TestHeaderMapImpl{ + EXPECT_EQ((Http::TestRequestHeaderMapImpl{ {":path", "/"}, {":method", "GET"}, {":authority", "foo"}}), message->headers()); callbacks = &cb; @@ -1098,7 +1132,7 @@ TEST_F(LuaHttpFilterTest, HttpCallImmediateResponse) { .WillOnce( Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - EXPECT_EQ((Http::TestHeaderMapImpl{ + EXPECT_EQ((Http::TestRequestHeaderMapImpl{ {":path", "/"}, {":method", "GET"}, {":authority", "foo"}}), message->headers()); callbacks = &cb; @@ -1110,9 +1144,9 @@ TEST_F(LuaHttpFilterTest, HttpCallImmediateResponse) { Http::ResponseMessagePtr response_message(new Http::ResponseMessageImpl( Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); - Http::TestHeaderMapImpl expected_headers{{":status", "403"}, - {"set-cookie", "flavor=chocolate; Path=/"}, - {"set-cookie", "variant=chewy; Path=/"}}; + Http::TestResponseHeaderMapImpl expected_headers{{":status", "403"}, + {"set-cookie", "flavor=chocolate; Path=/"}, + {"set-cookie", "variant=chewy; Path=/"}}; EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&expected_headers), true)); callbacks->onSuccess(request, std::move(response_message)); } @@ -1410,8 +1444,9 @@ TEST_F(LuaHttpFilterTest, ImmediateResponse) { setup(SCRIPT); // Perform a GC and snap bytes currently used by the runtime. - config_->runtimeGC(); - const uint64_t mem_use_at_start = config_->runtimeBytesUsed(); + auto script_config = config_->perLuaCodeSetup(GLOBAL_SCRIPT_NAME); + script_config->runtimeGC(); + const uint64_t mem_use_at_start = script_config->runtimeBytesUsed(); uint64_t num_loops = 2000; #if defined(__has_feature) && (__has_feature(thread_sanitizer)) @@ -1422,7 +1457,7 @@ TEST_F(LuaHttpFilterTest, ImmediateResponse) { for (uint64_t i = 0; i < num_loops; i++) { Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; - Http::TestHeaderMapImpl expected_headers{{":status", "503"}, {"content-length", "4"}}; + Http::TestResponseHeaderMapImpl expected_headers{{":status", "503"}, {"content-length", "4"}}; EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&expected_headers), false)); EXPECT_CALL(decoder_callbacks_, encodeData(_, true)); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, @@ -1439,8 +1474,8 @@ TEST_F(LuaHttpFilterTest, ImmediateResponse) { // to do a soft comparison here. In my own testing, without a fix for #3570, the memory // usage after is at least 20x higher after 2000 iterations so we just check to see if it's // within 2x. - config_->runtimeGC(); - EXPECT_TRUE(config_->runtimeBytesUsed() < mem_use_at_start * 2); + script_config->runtimeGC(); + EXPECT_TRUE(script_config->runtimeBytesUsed() < mem_use_at_start * 2); } // Respond with bad status. @@ -1793,6 +1828,151 @@ TEST_F(LuaHttpFilterTest, CheckConnection) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); } +// Inspect stream info downstream SSL connection. +TEST_F(LuaHttpFilterTest, InspectStreamInfoDowstreamSslConnection) { + const std::string SCRIPT{R"EOF( + function envoy_on_request(request_handle) + if request_handle:streamInfo():downstreamSslConnection() == nil then + else + if request_handle:streamInfo():downstreamSslConnection():peerCertificatePresented() then + request_handle:logTrace("peerCertificatePresented") + end + + if request_handle:streamInfo():downstreamSslConnection():peerCertificateValidated() then + request_handle:logTrace("peerCertificateValidated") + end + + request_handle:logTrace(table.concat(request_handle:streamInfo():downstreamSslConnection():uriSanPeerCertificate(), ",")) + request_handle:logTrace(table.concat(request_handle:streamInfo():downstreamSslConnection():uriSanLocalCertificate(), ",")) + request_handle:logTrace(table.concat(request_handle:streamInfo():downstreamSslConnection():dnsSansPeerCertificate(), ",")) + request_handle:logTrace(table.concat(request_handle:streamInfo():downstreamSslConnection():dnsSansLocalCertificate(), ",")) + + request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():ciphersuiteId()) + + request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():validFromPeerCertificate()) + request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():expirationPeerCertificate()) + + request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():subjectLocalCertificate()) + request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():sha256PeerCertificateDigest()) + request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():serialNumberPeerCertificate()) + request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():issuerPeerCertificate()) + request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():subjectPeerCertificate()) + request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():ciphersuiteString()) + request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():tlsVersion()) + request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():urlEncodedPemEncodedPeerCertificate()) + request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():urlEncodedPemEncodedPeerCertificateChain()) + end + end + )EOF"}; + + setup(SCRIPT); + + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + + auto connection_info = std::make_shared(); + EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(stream_info_)); + EXPECT_CALL(stream_info_, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + + EXPECT_CALL(*connection_info, peerCertificatePresented()).WillOnce(Return(true)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("peerCertificatePresented"))); + + EXPECT_CALL(*connection_info, peerCertificateValidated()).WillOnce(Return(true)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("peerCertificateValidated"))); + + const std::vector peer_uri_sans{"peer-uri-sans-1", "peer-uri-sans-2"}; + EXPECT_CALL(*connection_info, uriSanPeerCertificate()).WillOnce(Return(peer_uri_sans)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("peer-uri-sans-1,peer-uri-sans-2"))); + + const std::vector local_uri_sans{"local-uri-sans-1", "local-uri-sans-2"}; + EXPECT_CALL(*connection_info, uriSanLocalCertificate()).WillOnce(Return(local_uri_sans)); + EXPECT_CALL(*filter_, + scriptLog(spdlog::level::trace, StrEq("local-uri-sans-1,local-uri-sans-2"))); + + const std::vector peer_dns_sans{"peer-dns-sans-1", "peer-dns-sans-2"}; + EXPECT_CALL(*connection_info, dnsSansPeerCertificate()).WillOnce(Return(peer_dns_sans)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("peer-dns-sans-1,peer-dns-sans-2"))); + + const std::vector local_dns_sans{"local-dns-sans-1", "local-dns-sans-2"}; + EXPECT_CALL(*connection_info, dnsSansLocalCertificate()).WillOnce(Return(local_dns_sans)); + EXPECT_CALL(*filter_, + scriptLog(spdlog::level::trace, StrEq("local-dns-sans-1,local-dns-sans-2"))); + + const std::string subject_local = "subject-local"; + EXPECT_CALL(*connection_info, subjectLocalCertificate()).WillOnce(ReturnRef(subject_local)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(subject_local))); + + const uint64_t cipher_suite_id = 0x0707; + EXPECT_CALL(*connection_info, ciphersuiteId()).WillRepeatedly(Return(cipher_suite_id)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("0x0707"))); + + const SystemTime validity(std::chrono::seconds(1522796777)); + EXPECT_CALL(*connection_info, validFromPeerCertificate()).WillRepeatedly(Return(validity)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("1522796777"))); + + const SystemTime expiry(std::chrono::seconds(1522796776)); + EXPECT_CALL(*connection_info, expirationPeerCertificate()).WillRepeatedly(Return(expiry)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("1522796776"))); + + const std::string peer_cert_digest = "peer-cert-digest"; + EXPECT_CALL(*connection_info, sha256PeerCertificateDigest()) + .WillOnce(ReturnRef(peer_cert_digest)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(peer_cert_digest))); + + const std::string peer_cert_serial_number = "peer-cert-serial-number"; + EXPECT_CALL(*connection_info, serialNumberPeerCertificate()) + .WillOnce(ReturnRef(peer_cert_serial_number)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(peer_cert_serial_number))); + + const std::string peer_cert_issuer = "peer-cert-issuer"; + EXPECT_CALL(*connection_info, issuerPeerCertificate()).WillOnce(ReturnRef(peer_cert_issuer)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(peer_cert_issuer))); + + const std::string peer_cert_subject = "peer-cert-subject"; + EXPECT_CALL(*connection_info, subjectPeerCertificate()).WillOnce(ReturnRef(peer_cert_subject)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(peer_cert_subject))); + + const std::string cipher_suite = "cipher-suite"; + EXPECT_CALL(*connection_info, ciphersuiteString()).WillOnce(Return(cipher_suite)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(cipher_suite))); + + const std::string tls_version = "tls-version"; + EXPECT_CALL(*connection_info, tlsVersion()).WillOnce(ReturnRef(tls_version)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(tls_version))); + + const std::string peer_cert = "peer-cert"; + EXPECT_CALL(*connection_info, urlEncodedPemEncodedPeerCertificate()) + .WillOnce(ReturnRef(peer_cert)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(peer_cert))); + + const std::string peer_cert_chain = "peer-cert-chain"; + EXPECT_CALL(*connection_info, urlEncodedPemEncodedPeerCertificateChain()) + .WillOnce(ReturnRef(peer_cert_chain)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(peer_cert_chain))); + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); +} + +// Inspect stream info downstream SSL connection in a plain connection. +TEST_F(LuaHttpFilterTest, InspectStreamInfoDowstreamSslConnectionOnPlainConnection) { + const std::string SCRIPT{R"EOF( + function envoy_on_request(request_handle) + if request_handle:streamInfo():downstreamSslConnection() == nil then + request_handle:logTrace("downstreamSslConnection is nil") + end + end + )EOF"}; + + setup(SCRIPT); + + EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(stream_info_)); + EXPECT_CALL(stream_info_, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("downstreamSslConnection is nil"))); + + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); +} + TEST_F(LuaHttpFilterTest, ImportPublicKey) { const std::string SCRIPT{R"EOF( function string.fromhex(str) @@ -1918,6 +2098,99 @@ TEST_F(LuaHttpFilterTest, SignatureVerify) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); } +// Test whether the route configuration can properly disable the Lua filter. +TEST_F(LuaHttpFilterTest, LuaFilterDisabled) { + envoy::extensions::filters::http::lua::v3::Lua proto_config; + proto_config.set_inline_code(ADD_HEADERS_SCRIPT); + envoy::extensions::filters::http::lua::v3::LuaPerRoute per_route_proto_config; + per_route_proto_config.set_disabled(true); + + setupConfig(proto_config, per_route_proto_config); + setupFilter(); + + EXPECT_CALL(decoder_callbacks_, clearRouteCache()); + + ON_CALL(decoder_callbacks_.route_->route_entry_, perFilterConfig(HttpFilterNames::get().Lua)) + .WillByDefault(Return(nullptr)); + + Http::TestRequestHeaderMapImpl request_headers_1{{":path", "/"}}; + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_1, true)); + EXPECT_EQ("world", request_headers_1.get_("hello")); + + ON_CALL(decoder_callbacks_.route_->route_entry_, perFilterConfig(HttpFilterNames::get().Lua)) + .WillByDefault(Return(per_route_config_.get())); + + Http::TestRequestHeaderMapImpl request_headers_2{{":path", "/"}}; + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_2, true)); + EXPECT_EQ(nullptr, request_headers_2.get(Http::LowerCaseString("hello"))); +} + +// Test whether the route can directly reuse the Lua code in the global configuration. +TEST_F(LuaHttpFilterTest, LuaFilterRefSourceCodes) { + const std::string SCRIPT_FOR_ROUTE_ONE{R"EOF( + function envoy_on_request(request_handle) + request_handle:headers():add("route_info", "This request is routed by ROUTE_ONE"); + end + )EOF"}; + const std::string SCRIPT_FOR_ROUTE_TWO{R"EOF( + function envoy_on_request(request_handle) + request_handle:headers():add("route_info", "This request is routed by ROUTE_TWO"); + end + )EOF"}; + EXPECT_CALL(decoder_callbacks_, clearRouteCache()); + envoy::extensions::filters::http::lua::v3::Lua proto_config; + proto_config.set_inline_code(ADD_HEADERS_SCRIPT); + envoy::config::core::v3::DataSource source1, source2; + source1.set_inline_string(SCRIPT_FOR_ROUTE_ONE); + source2.set_inline_string(SCRIPT_FOR_ROUTE_TWO); + proto_config.mutable_source_codes()->insert({"route_one.lua", source1}); + proto_config.mutable_source_codes()->insert({"route_two.lua", source2}); + + envoy::extensions::filters::http::lua::v3::LuaPerRoute per_route_proto_config; + per_route_proto_config.set_name("route_two.lua"); + + setupConfig(proto_config, per_route_proto_config); + setupFilter(); + + ON_CALL(decoder_callbacks_.route_->route_entry_, perFilterConfig(HttpFilterNames::get().Lua)) + .WillByDefault(Return(per_route_config_.get())); + + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); + EXPECT_EQ("This request is routed by ROUTE_TWO", request_headers.get_("route_info")); +} + +// Lua filter do nothing when the referenced name does not exist. +TEST_F(LuaHttpFilterTest, LuaFilterRefSourceCodeNotExist) { + const std::string SCRIPT_FOR_ROUTE_ONE{R"EOF( + function envoy_on_request(request_handle) + request_handle:headers():add("route_info", "This request is routed by ROUTE_ONE"); + end + )EOF"}; + + envoy::extensions::filters::http::lua::v3::Lua proto_config; + proto_config.set_inline_code(ADD_HEADERS_SCRIPT); + envoy::config::core::v3::DataSource source1; + source1.set_inline_string(SCRIPT_FOR_ROUTE_ONE); + proto_config.mutable_source_codes()->insert({"route_one.lua", source1}); + + envoy::extensions::filters::http::lua::v3::LuaPerRoute per_route_proto_config; + // The global source codes do not contain a script named 'route_two.lua'. + per_route_proto_config.set_name("route_two.lua"); + + setupConfig(proto_config, per_route_proto_config); + setupFilter(); + + ON_CALL(decoder_callbacks_.route_->route_entry_, perFilterConfig(HttpFilterNames::get().Lua)) + .WillByDefault(Return(per_route_config_.get())); + + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); + EXPECT_EQ(nullptr, request_headers.get(Http::LowerCaseString("hello"))); +} + } // namespace } // namespace Lua } // namespace HttpFilters diff --git a/test/extensions/filters/http/lua/lua_integration_test.cc b/test/extensions/filters/http/lua/lua_integration_test.cc index 082f6217e5718..b1c7bb61b0f8c 100644 --- a/test/extensions/filters/http/lua/lua_integration_test.cc +++ b/test/extensions/filters/http/lua/lua_integration_test.cc @@ -29,15 +29,8 @@ class LuaIntegrationTest : public testing::TestWithParamadd_clusters(); - lua_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); - lua_cluster->set_name("lua_cluster"); - - auto* alt_cluster = bootstrap.mutable_static_resources()->add_clusters(); - alt_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); - alt_cluster->set_name("alt_cluster"); - }); + // Create static clusters. + createClusters(); config_helper_.addConfigModifier( [domain]( @@ -79,6 +72,31 @@ class LuaIntegrationTest : public testing::TestWithParamadd_clusters(); + lua_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); + lua_cluster->set_name("lua_cluster"); + + auto* alt_cluster = bootstrap.mutable_static_resources()->add_clusters(); + alt_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); + alt_cluster->set_name("alt_cluster"); + }); + } + void cleanup() { codec_client_->close(); if (fake_lua_connection_ != nullptr) { @@ -353,7 +371,7 @@ name: lua cleanup(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("403", response->headers().Status()->value().getStringView()); + EXPECT_EQ("403", response->headers().getStatusValue()); EXPECT_EQ("nope", response->body()); } @@ -404,7 +422,7 @@ name: envoy.filters.http.lua cleanup(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } // Filter alters headers and changes route. @@ -437,7 +455,7 @@ name: lua cleanup(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } // Should survive from 30 calls when calling streamInfo():dynamicMetadata(). This is a regression @@ -471,7 +489,7 @@ name: lua response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } cleanup(); @@ -571,8 +589,143 @@ name: lua response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); + + cleanup(); +} + +// Test whether LuaPerRoute works properly. Since this test is mainly for configuration, the Lua +// script can be very simple. +TEST_P(LuaIntegrationTest, BasicTestOfLuaPerRoute) { + const std::string FILTER_AND_CODE = + R"EOF( +name: lua +typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua + inline_code: | + function envoy_on_request(request_handle) + request_handle:headers():add("code", "code_from_global") + end + source_codes: + hello.lua: + inline_string: | + function envoy_on_request(request_handle) + request_handle:headers():add("code", "code_from_hello") + end + byebye.lua: + inline_string: | + function envoy_on_request(request_handle) + request_handle:headers():add("code", "code_from_byebye") + end +)EOF"; + const std::string INITIAL_ROUTE_CONFIG = + R"EOF( +name: basic_lua_routes +virtual_hosts: +- name: rds_vhost_1 + domains: ["lua.per.route"] + routes: + - match: + prefix: "/lua/per/route/default" + route: + cluster: lua_cluster + - match: + prefix: "/lua/per/route/disabled" + route: + cluster: lua_cluster + typed_per_filter_config: + envoy.filters.http.lua: + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute + disabled: true + - match: + prefix: "/lua/per/route/hello" + route: + cluster: lua_cluster + typed_per_filter_config: + envoy.filters.http.lua: + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute + name: hello.lua + - match: + prefix: "/lua/per/route/byebye" + route: + cluster: lua_cluster + typed_per_filter_config: + envoy.filters.http.lua: + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute + name: byebye.lua + - match: + prefix: "/lua/per/route/nocode" + route: + cluster: lua_cluster + typed_per_filter_config: + envoy.filters.http.lua: + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute + name: nocode.lua +)EOF"; + + initializeWithYaml(FILTER_AND_CODE, INITIAL_ROUTE_CONFIG); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto check_request = [this](const Http::TestRequestHeaderMapImpl& request_headers, + const std::string& expected_value) { + auto response = codec_client_->makeHeaderOnlyRequest(request_headers); + waitForNextUpstreamRequest(1); + + auto* entry = upstream_request_->headers().get(Http::LowerCaseString("code")); + if (!expected_value.empty()) { + EXPECT_EQ(expected_value, entry->value().getStringView()); + } else { + EXPECT_EQ(nullptr, entry); + } + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + }; + + // Lua code defined in 'inline_code' will be executed by default. + Http::TestRequestHeaderMapImpl default_headers{{":method", "GET"}, + {":path", "/lua/per/route/default"}, + {":scheme", "http"}, + {":authority", "lua.per.route"}, + {"x-forwarded-for", "10.0.0.1"}}; + check_request(default_headers, "code_from_global"); + + // Test whether LuaPerRoute can disable the Lua filter. + Http::TestRequestHeaderMapImpl disabled_headers{{":method", "GET"}, + {":path", "/lua/per/route/disabled"}, + {":scheme", "http"}, + {":authority", "lua.per.route"}, + {"x-forwarded-for", "10.0.0.1"}}; + check_request(disabled_headers, ""); + + // Test whether LuaPerRoute can correctly reference Lua code defined in filter config. + Http::TestRequestHeaderMapImpl hello_headers{{":method", "GET"}, + {":path", "/lua/per/route/hello"}, + {":scheme", "http"}, + {":authority", "lua.per.route"}, + {"x-forwarded-for", "10.0.0.1"}}; + + check_request(hello_headers, "code_from_hello"); + + Http::TestRequestHeaderMapImpl byebye_headers{{":method", "GET"}, + {":path", "/lua/per/route/byebye"}, + {":scheme", "http"}, + {":authority", "lua.per.route"}, + {"x-forwarded-for", "10.0.0.1"}}; + check_request(byebye_headers, "code_from_byebye"); + + // When the name referenced by LuaPerRoute does not exist, Lua filter does nothing. + Http::TestRequestHeaderMapImpl nocode_headers{{":method", "GET"}, + {":path", "/lua/per/route/nocode"}, + {":scheme", "http"}, + {":authority", "lua.per.route"}, + {"x-forwarded-for", "10.0.0.1"}}; + + check_request(nocode_headers, ""); cleanup(); } diff --git a/test/extensions/filters/http/lua/wrappers_test.cc b/test/extensions/filters/http/lua/wrappers_test.cc index 32c003951e1f4..bc4c3200d5207 100644 --- a/test/extensions/filters/http/lua/wrappers_test.cc +++ b/test/extensions/filters/http/lua/wrappers_test.cc @@ -50,7 +50,7 @@ TEST_F(LuaHeaderMapWrapperTest, Methods) { InSequence s; setup(SCRIPT); - Http::TestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; HeaderMapWrapper::create(coroutine_->luaState(), headers, []() { return true; }); EXPECT_CALL(*this, testPrint("WORLD")); EXPECT_CALL(*this, testPrint("'hello' 'WORLD'")); @@ -86,7 +86,7 @@ TEST_F(LuaHeaderMapWrapperTest, ModifiableMethods) { InSequence s; setup(SCRIPT); - Http::TestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; HeaderMapWrapper::create(coroutine_->luaState(), headers, []() { return false; }); start("shouldBeOk"); @@ -119,13 +119,13 @@ TEST_F(LuaHeaderMapWrapperTest, Replace) { InSequence s; setup(SCRIPT); - Http::TestHeaderMapImpl headers{{":path", "/"}, {"other_header", "hello"}}; + Http::TestRequestHeaderMapImpl headers{{":path", "/"}, {"other_header", "hello"}}; HeaderMapWrapper::create(coroutine_->luaState(), headers, []() { return true; }); start("callMe"); - EXPECT_EQ((Http::TestHeaderMapImpl{{":path", "/new_path"}, - {"other_header", "other_header_value"}, - {"new_header", "new_header_value"}}), + EXPECT_EQ((Http::TestRequestHeaderMapImpl{{":path", "/new_path"}, + {"other_header", "other_header_value"}, + {"new_header", "new_header_value"}}), headers); } @@ -142,7 +142,7 @@ TEST_F(LuaHeaderMapWrapperTest, ModifyDuringIteration) { InSequence s; setup(SCRIPT); - Http::TestHeaderMapImpl headers{{"foo", "bar"}}; + Http::TestRequestHeaderMapImpl headers{{"foo", "bar"}}; HeaderMapWrapper::create(coroutine_->luaState(), headers, []() { return true; }); EXPECT_THROW_WITH_MESSAGE(start("callMe"), Filters::Common::Lua::LuaException, "[string \"...\"]:4: header map cannot be modified while iterating"); @@ -167,7 +167,7 @@ TEST_F(LuaHeaderMapWrapperTest, ModifyAfterIteration) { InSequence s; setup(SCRIPT); - Http::TestHeaderMapImpl headers{{"foo", "bar"}}; + Http::TestRequestHeaderMapImpl headers{{"foo", "bar"}}; HeaderMapWrapper::create(coroutine_->luaState(), headers, []() { return true; }); EXPECT_CALL(*this, testPrint("'foo' 'bar'")); EXPECT_CALL(*this, testPrint("'foo' 'bar'")); @@ -188,7 +188,7 @@ TEST_F(LuaHeaderMapWrapperTest, DontFinishIteration) { InSequence s; setup(SCRIPT); - Http::TestHeaderMapImpl headers{{"foo", "bar"}, {"hello", "world"}}; + Http::TestRequestHeaderMapImpl headers{{"foo", "bar"}, {"hello", "world"}}; HeaderMapWrapper::create(coroutine_->luaState(), headers, []() { return true; }); EXPECT_THROW_WITH_MESSAGE( start("callMe"), Filters::Common::Lua::LuaException, @@ -208,7 +208,7 @@ TEST_F(LuaHeaderMapWrapperTest, IteratorAcrossYield) { InSequence s; setup(SCRIPT); - Http::TestHeaderMapImpl headers{{"foo", "bar"}, {"hello", "world"}}; + Http::TestRequestHeaderMapImpl headers{{"foo", "bar"}, {"hello", "world"}}; Filters::Common::Lua::LuaDeathRef wrapper( HeaderMapWrapper::create(coroutine_->luaState(), headers, []() { return true; }), true); yield_callback_ = [] {}; diff --git a/test/extensions/filters/http/on_demand/BUILD b/test/extensions/filters/http/on_demand/BUILD index 9a5acca7688f2..d9412a1370395 100644 --- a/test/extensions/filters/http/on_demand/BUILD +++ b/test/extensions/filters/http/on_demand/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/on_demand/on_demand_filter_test.cc b/test/extensions/filters/http/on_demand/on_demand_filter_test.cc index dae119f291ef0..d226ee4dcec60 100644 --- a/test/extensions/filters/http/on_demand/on_demand_filter_test.cc +++ b/test/extensions/filters/http/on_demand/on_demand_filter_test.cc @@ -31,7 +31,7 @@ class OnDemandFilterTest : public testing::Test { // tests decodeHeaders() when no cached route is available and vhds is configured TEST_F(OnDemandFilterTest, TestDecodeHeaders) { - Http::RequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; std::shared_ptr route_config_ptr{new NiceMock()}; EXPECT_CALL(decoder_callbacks_, route()).WillOnce(Return(nullptr)); EXPECT_CALL(decoder_callbacks_, routeConfig()).Times(2).WillRepeatedly(Return(route_config_ptr)); @@ -42,13 +42,13 @@ TEST_F(OnDemandFilterTest, TestDecodeHeaders) { // tests decodeHeaders() when no cached route is available TEST_F(OnDemandFilterTest, TestDecodeHeadersWhenRouteAvailable) { - Http::RequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, true)); } // tests decodeHeaders() when no route configuration is available TEST_F(OnDemandFilterTest, TestDecodeHeadersWhenRouteConfigIsNotAvailable) { - Http::RequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; std::shared_ptr route_config_ptr{new NiceMock()}; EXPECT_CALL(decoder_callbacks_, route()).WillOnce(Return(nullptr)); EXPECT_CALL(decoder_callbacks_, routeConfig()).WillOnce(Return(absl::nullopt)); @@ -56,7 +56,7 @@ TEST_F(OnDemandFilterTest, TestDecodeHeadersWhenRouteConfigIsNotAvailable) { } TEST_F(OnDemandFilterTest, TestDecodeTrailers) { - Http::RequestTrailerMapImpl headers; + Http::TestRequestTrailerMapImpl headers; EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(headers)); } diff --git a/test/extensions/filters/http/original_src/BUILD b/test/extensions/filters/http/original_src/BUILD index 18cfcae4e112a..86ba61d9d6bf7 100644 --- a/test/extensions/filters/http/original_src/BUILD +++ b/test/extensions/filters/http/original_src/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -29,7 +29,7 @@ envoy_extension_cc_test( "//source/extensions/filters/http/original_src:config", "//source/extensions/filters/http/original_src:config_lib", "//source/extensions/filters/http/original_src:original_src_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/extensions/filters/http/original_src/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/original_src/original_src_config_factory_test.cc b/test/extensions/filters/http/original_src/original_src_config_factory_test.cc index 7f52f2e566e54..4967371847dd8 100644 --- a/test/extensions/filters/http/original_src/original_src_config_factory_test.cc +++ b/test/extensions/filters/http/original_src/original_src_config_factory_test.cc @@ -5,7 +5,7 @@ #include "extensions/filters/http/original_src/original_src.h" #include "extensions/filters/http/original_src/original_src_config_factory.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/http/original_src/original_src_test.cc b/test/extensions/filters/http/original_src/original_src_test.cc index 33b3ba002f116..def891c4094cb 100644 --- a/test/extensions/filters/http/original_src/original_src_test.cc +++ b/test/extensions/filters/http/original_src/original_src_test.cc @@ -134,7 +134,7 @@ TEST_F(OriginalSrcHttpTest, DecodeHeadersIpv4AddressBleachesPort) { } TEST_F(OriginalSrcHttpTest, FilterAddsTransparentOption) { - if (!ENVOY_SOCKET_IP_TRANSPARENT.has_value()) { + if (!ENVOY_SOCKET_IP_TRANSPARENT.hasValue()) { // The option isn't supported on this platform. Just skip the test. return; } @@ -153,7 +153,7 @@ TEST_F(OriginalSrcHttpTest, FilterAddsTransparentOption) { } TEST_F(OriginalSrcHttpTest, FilterAddsMarkOption) { - if (!ENVOY_SOCKET_SO_MARK.has_value()) { + if (!ENVOY_SOCKET_SO_MARK.hasValue()) { // The option isn't supported on this platform. Just skip the test. return; } @@ -175,7 +175,7 @@ TEST_F(OriginalSrcHttpTest, FilterAddsMarkOption) { } TEST_F(OriginalSrcHttpTest, Mark0NotAdded) { - if (!ENVOY_SOCKET_SO_MARK.has_value()) { + if (!ENVOY_SOCKET_SO_MARK.hasValue()) { // The option isn't supported on this platform. Just skip the test. return; } diff --git a/test/extensions/filters/http/ratelimit/BUILD b/test/extensions/filters/http/ratelimit/BUILD index becbc98059fb5..e209aa081608e 100644 --- a/test/extensions/filters/http/ratelimit/BUILD +++ b/test/extensions/filters/http/ratelimit/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -23,6 +23,7 @@ envoy_extension_cc_test( "//source/extensions/filters/common/ratelimit:ratelimit_lib", "//source/extensions/filters/http/ratelimit:ratelimit_lib", "//test/extensions/filters/common/ratelimit:ratelimit_mocks", + "//test/extensions/filters/common/ratelimit:ratelimit_utils", "//test/mocks/http:http_mocks", "//test/mocks/local_info:local_info_mocks", "//test/mocks/ratelimit:ratelimit_mocks", @@ -40,8 +41,42 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.ratelimit", deps = [ "//source/extensions/filters/http/ratelimit:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:instance_mocks", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/ratelimit/v3:pkg_cc_proto", ], ) + +envoy_extension_cc_test( + name = "ratelimit_integration_test", + srcs = ["ratelimit_integration_test.cc"], + extension_name = "envoy.filters.http.ratelimit", + tags = ["fails_on_windows"], + deps = [ + "//source/common/buffer:zero_copy_input_stream_lib", + "//source/common/grpc:codec_lib", + "//source/common/grpc:common_lib", + "//source/extensions/filters/http/ratelimit:config", + "//test/common/grpc:grpc_client_integration_lib", + "//test/extensions/filters/common/ratelimit:ratelimit_utils", + "//test/integration:http_integration_lib", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", + "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/ratelimit/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", + "@envoy_api//envoy/service/ratelimit/v3:pkg_cc_proto", + ], +) + +envoy_extension_cc_test( + name = "ratelimit_headers_test", + srcs = ["ratelimit_headers_test.cc"], + extension_name = "envoy.filters.http.cache", + deps = [ + "//source/extensions/filters/http/ratelimit:ratelimit_headers_lib", + "//test/extensions/filters/common/ratelimit:ratelimit_utils", + "//test/mocks/http:http_mocks", + "//test/test_common:utility_lib", + ], +) diff --git a/test/extensions/filters/http/ratelimit/config_test.cc b/test/extensions/filters/http/ratelimit/config_test.cc index fa0a8cf207696..a611082d43836 100644 --- a/test/extensions/filters/http/ratelimit/config_test.cc +++ b/test/extensions/filters/http/ratelimit/config_test.cc @@ -4,7 +4,8 @@ #include "extensions/filters/http/ratelimit/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/server/instance.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/http/ratelimit/ratelimit_headers_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_headers_test.cc new file mode 100644 index 0000000000000..9acc0ca72cbaa --- /dev/null +++ b/test/extensions/filters/http/ratelimit/ratelimit_headers_test.cc @@ -0,0 +1,90 @@ +#include +#include + +#include "extensions/filters/http/ratelimit/ratelimit_headers.h" + +#include "test/extensions/filters/common/ratelimit/utils.h" +#include "test/mocks/http/mocks.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace RateLimitFilter { +namespace { + +using Envoy::RateLimit::buildDescriptorStatus; +using Filters::Common::RateLimit::DescriptorStatusList; + +struct RateLimitHeadersTestCase { + Http::TestResponseHeaderMapImpl expected_headers; + DescriptorStatusList descriptor_statuses; +}; + +class RateLimitHeadersTest : public testing::TestWithParam { +public: + static const std::vector& getTestCases() { + CONSTRUCT_ON_FIRST_USE( + std::vector, + // Empty descriptor statuses + {{}, {}}, + // Status with no current limit is ignored + {{{"x-ratelimit-limit", "4, 4;w=3600;name=\"second\""}, + {"x-ratelimit-remaining", "5"}, + {"x-ratelimit-reset", "6"}}, + {// passing 0 will cause it not to set a current limit + buildDescriptorStatus(0, + envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::MINUTE, + "first", 2, 3), + buildDescriptorStatus(4, + envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::HOUR, + "second", 5, 6)}}, + // Empty name is not appended + {{{"x-ratelimit-limit", "1, 1;w=60"}, + {"x-ratelimit-remaining", "2"}, + {"x-ratelimit-reset", "3"}}, + { + // passing 0 will cause it not to set a current limit + buildDescriptorStatus( + 1, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::MINUTE, "", 2, 3), + }}, + // Unknown unit is ignored in window, but not overall + {{{"x-ratelimit-limit", "1, 4;w=3600;name=\"second\""}, + {"x-ratelimit-remaining", "2"}, + {"x-ratelimit-reset", "3"}}, + {// passing 0 will cause it not to set a current limit + buildDescriptorStatus( + 1, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::UNKNOWN, "first", 2, + 3), + buildDescriptorStatus(4, + envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::HOUR, + "second", 5, 6)}}, + // Normal case, multiple arguments + {{{"x-ratelimit-limit", "1, 1;w=60;name=\"first\", 4;w=3600;name=\"second\""}, + {"x-ratelimit-remaining", "2"}, + {"x-ratelimit-reset", "3"}}, + {buildDescriptorStatus(1, + envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::MINUTE, + "first", 2, 3), + buildDescriptorStatus(4, + envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::HOUR, + "second", 5, 6)}}, ); + } +}; + +INSTANTIATE_TEST_SUITE_P(RateLimitHeadersTest, RateLimitHeadersTest, + testing::ValuesIn(RateLimitHeadersTest::getTestCases())); + +TEST_P(RateLimitHeadersTest, RateLimitHeadersTest) { + Http::ResponseHeaderMapPtr result = XRateLimitHeaderUtils::create( + std::make_unique(GetParam().descriptor_statuses)); + EXPECT_THAT(result, HeaderMapEqual(&GetParam().expected_headers)); +} + +} // namespace +} // namespace RateLimitFilter +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/integration/ratelimit_integration_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc similarity index 66% rename from test/integration/ratelimit_integration_test.cc rename to test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc index 76ff3a39819b6..e8c86a00e06d4 100644 --- a/test/integration/ratelimit_integration_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc @@ -10,8 +10,10 @@ #include "common/grpc/common.h" #include "extensions/filters/http/ratelimit/config.h" +#include "extensions/filters/http/ratelimit/ratelimit_headers.h" #include "test/common/grpc/grpc_client_integration.h" +#include "test/extensions/filters/common/ratelimit/utils.h" #include "test/integration/http_integration.h" #include "gtest/gtest.h" @@ -20,7 +22,7 @@ namespace Envoy { namespace { // Tests Ratelimit functionality with config in filter. -class RatelimitIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, +class RatelimitIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, public HttpIntegrationTest { public: RatelimitIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ipVersion()) {} @@ -44,8 +46,10 @@ class RatelimitIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, // enhance rate limit filter config based on the configuration of test. TestUtility::loadFromYaml(base_filter_config_, proto_config_); proto_config_.set_failure_mode_deny(failure_mode_deny_); + proto_config_.set_enable_x_ratelimit_headers(enable_x_ratelimit_headers_); setGrpcService(*proto_config_.mutable_rate_limit_service()->mutable_grpc_service(), "ratelimit", fake_upstreams_.back()->localAddress()); + proto_config_.mutable_rate_limit_service()->set_transport_api_version(apiVersion()); envoy::config::listener::v3::Filter ratelimit_filter; ratelimit_filter.set_name("envoy.filters.http.ratelimit"); @@ -85,11 +89,11 @@ class RatelimitIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, RELEASE_ASSERT(result, result.message()); result = ratelimit_request_->waitForEndStream(*dispatcher_); RELEASE_ASSERT(result, result.message()); - EXPECT_EQ("POST", ratelimit_request_->headers().Method()->value().getStringView()); - EXPECT_EQ("/envoy.service.ratelimit.v2.RateLimitService/ShouldRateLimit", - ratelimit_request_->headers().Path()->value().getStringView()); - EXPECT_EQ("application/grpc", - ratelimit_request_->headers().ContentType()->value().getStringView()); + EXPECT_EQ("POST", ratelimit_request_->headers().getMethodValue()); + EXPECT_EQ(TestUtility::getVersionedMethodPath("envoy.service.ratelimit.{}.RateLimitService", + "ShouldRateLimit", apiVersion()), + ratelimit_request_->headers().getPathValue()); + EXPECT_EQ("application/grpc", ratelimit_request_->headers().getContentTypeValue()); envoy::service::ratelimit::v3::RateLimitRequest expected_request_msg; expected_request_msg.set_domain("some_domain"); @@ -116,44 +120,40 @@ class RatelimitIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, EXPECT_EQ(request_size_, upstream_request_->bodyLength()); EXPECT_TRUE(response_->complete()); - EXPECT_EQ("200", response_->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response_->headers().getStatusValue()); EXPECT_EQ(response_size_, response_->body().size()); } void waitForFailedUpstreamResponse(uint32_t response_code) { response_->waitForEndStream(); EXPECT_TRUE(response_->complete()); - EXPECT_EQ(std::to_string(response_code), - response_->headers().Status()->value().getStringView()); + EXPECT_EQ(std::to_string(response_code), response_->headers().getStatusValue()); } - void sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::Code code, - const Http::ResponseHeaderMap& response_headers_to_add, - const Http::RequestHeaderMap& request_headers_to_add) { + void sendRateLimitResponse( + envoy::service::ratelimit::v3::RateLimitResponse::Code code, + const Extensions::Filters::Common::RateLimit::DescriptorStatusList& descriptor_statuses, + const Http::ResponseHeaderMap& response_headers_to_add, + const Http::RequestHeaderMap& request_headers_to_add) { ratelimit_request_->startGrpcStream(); envoy::service::ratelimit::v3::RateLimitResponse response_msg; response_msg.set_overall_code(code); + *response_msg.mutable_statuses() = {descriptor_statuses.begin(), descriptor_statuses.end()}; response_headers_to_add.iterate( - [](const Http::HeaderEntry& h, void* context) -> Http::HeaderMap::Iterate { - auto header = static_cast(context) - ->mutable_response_headers_to_add() - ->Add(); + [&response_msg](const Http::HeaderEntry& h) -> Http::HeaderMap::Iterate { + auto header = response_msg.mutable_response_headers_to_add()->Add(); header->set_key(std::string(h.key().getStringView())); header->set_value(std::string(h.value().getStringView())); return Http::HeaderMap::Iterate::Continue; - }, - &response_msg); + }); request_headers_to_add.iterate( - [](const Http::HeaderEntry& h, void* context) -> Http::HeaderMap::Iterate { - auto header = static_cast(context) - ->mutable_request_headers_to_add() - ->Add(); + [&response_msg](const Http::HeaderEntry& h) -> Http::HeaderMap::Iterate { + auto header = response_msg.mutable_request_headers_to_add()->Add(); header->set_key(std::string(h.key().getStringView())); header->set_value(std::string(h.value().getStringView())); return Http::HeaderMap::Iterate::Continue; - }, - &response_msg); + }); ratelimit_request_->sendGrpcMessage(response_msg); ratelimit_request_->finishGrpcStream(Grpc::Status::Ok); } @@ -174,8 +174,8 @@ class RatelimitIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, void basicFlow() { initiateClientConnection(); waitForRatelimitRequest(); - sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OK, - Http::ResponseHeaderMapImpl{}, Http::RequestHeaderMapImpl{}); + sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OK, {}, + Http::TestResponseHeaderMapImpl{}, Http::TestRequestHeaderMapImpl{}); waitForSuccessfulUpstreamResponse(); cleanup(); @@ -191,6 +191,8 @@ class RatelimitIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, const uint64_t request_size_ = 1024; const uint64_t response_size_ = 512; bool failure_mode_deny_ = false; + envoy::extensions::filters::http::ratelimit::v3::RateLimit::XRateLimitHeadersRFCVersion + enable_x_ratelimit_headers_ = envoy::extensions::filters::http::ratelimit::v3::RateLimit::OFF; envoy::extensions::filters::http::ratelimit::v3::RateLimit proto_config_{}; const std::string base_filter_config_ = R"EOF( domain: some_domain @@ -204,10 +206,21 @@ class RatelimitFailureModeIntegrationTest : public RatelimitIntegrationTest { RatelimitFailureModeIntegrationTest() { failure_mode_deny_ = true; } }; +// Test verifies that response headers provided by filter work. +class RatelimitFilterHeadersEnabledIntegrationTest : public RatelimitIntegrationTest { +public: + RatelimitFilterHeadersEnabledIntegrationTest() { + enable_x_ratelimit_headers_ = + envoy::extensions::filters::http::ratelimit::v3::RateLimit::DRAFT_VERSION_03; + } +}; + INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, RatelimitIntegrationTest, - GRPC_CLIENT_INTEGRATION_PARAMS); + VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS); INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, RatelimitFailureModeIntegrationTest, - GRPC_CLIENT_INTEGRATION_PARAMS); + VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS); +INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, RatelimitFilterHeadersEnabledIntegrationTest, + VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS); TEST_P(RatelimitIntegrationTest, Ok) { basicFlow(); } @@ -218,27 +231,23 @@ TEST_P(RatelimitIntegrationTest, OkWithHeaders) { {"x-ratelimit-remaining", "500"}}; Http::TestRequestHeaderMapImpl request_headers_to_add{{"x-ratelimit-done", "true"}}; - sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OK, + sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OK, {}, ratelimit_response_headers, request_headers_to_add); waitForSuccessfulUpstreamResponse(); ratelimit_response_headers.iterate( - [](const Http::HeaderEntry& entry, void* context) -> Http::HeaderMap::Iterate { - IntegrationStreamDecoder* response = static_cast(context); + [response = response_.get()](const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate { Http::LowerCaseString lower_key{std::string(entry.key().getStringView())}; EXPECT_EQ(entry.value(), response->headers().get(lower_key)->value().getStringView()); return Http::HeaderMap::Iterate::Continue; - }, - response_.get()); + }); - request_headers_to_add.iterate( - [](const Http::HeaderEntry& entry, void* context) -> Http::HeaderMap::Iterate { - FakeStream* upstream = static_cast(context); - Http::LowerCaseString lower_key{std::string(entry.key().getStringView())}; - EXPECT_EQ(entry.value(), upstream->headers().get(lower_key)->value().getStringView()); - return Http::HeaderMap::Iterate::Continue; - }, - upstream_request_.get()); + request_headers_to_add.iterate([upstream = upstream_request_.get()]( + const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate { + Http::LowerCaseString lower_key{std::string(entry.key().getStringView())}; + EXPECT_EQ(entry.value(), upstream->headers().get(lower_key)->value().getStringView()); + return Http::HeaderMap::Iterate::Continue; + }); cleanup(); @@ -250,8 +259,8 @@ TEST_P(RatelimitIntegrationTest, OkWithHeaders) { TEST_P(RatelimitIntegrationTest, OverLimit) { initiateClientConnection(); waitForRatelimitRequest(); - sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OVER_LIMIT, - Http::ResponseHeaderMapImpl{}, Http::RequestHeaderMapImpl{}); + sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OVER_LIMIT, {}, + Http::TestResponseHeaderMapImpl{}, Http::TestRequestHeaderMapImpl{}); waitForFailedUpstreamResponse(429); cleanup(); @@ -265,18 +274,16 @@ TEST_P(RatelimitIntegrationTest, OverLimitWithHeaders) { waitForRatelimitRequest(); Http::TestResponseHeaderMapImpl ratelimit_response_headers{ {"x-ratelimit-limit", "1000"}, {"x-ratelimit-remaining", "0"}, {"retry-after", "33"}}; - sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OVER_LIMIT, - ratelimit_response_headers, Http::RequestHeaderMapImpl{}); + sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OVER_LIMIT, {}, + ratelimit_response_headers, Http::TestRequestHeaderMapImpl{}); waitForFailedUpstreamResponse(429); ratelimit_response_headers.iterate( - [](const Http::HeaderEntry& entry, void* context) -> Http::HeaderMap::Iterate { - IntegrationStreamDecoder* response = static_cast(context); + [response = response_.get()](const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate { Http::LowerCaseString lower_key{std::string(entry.key().getStringView())}; EXPECT_EQ(entry.value(), response->headers().get(lower_key)->value().getStringView()); return Http::HeaderMap::Iterate::Continue; - }, - response_.get()); + }); cleanup(); @@ -359,5 +366,76 @@ TEST_P(RatelimitFailureModeIntegrationTest, ErrorWithFailureModeOff) { EXPECT_EQ(nullptr, test_server_->counter("cluster.cluster_0.ratelimit.failure_mode_allowed")); } +TEST_P(RatelimitFilterHeadersEnabledIntegrationTest, OkWithFilterHeaders) { + initiateClientConnection(); + waitForRatelimitRequest(); + + Extensions::Filters::Common::RateLimit::DescriptorStatusList descriptor_statuses{ + Envoy::RateLimit::buildDescriptorStatus( + 1, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::MINUTE, "first", 2, 3), + Envoy::RateLimit::buildDescriptorStatus( + 4, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::HOUR, "second", 5, 6)}; + sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OK, descriptor_statuses, + Http::TestResponseHeaderMapImpl{}, Http::TestRequestHeaderMapImpl{}); + waitForSuccessfulUpstreamResponse(); + + EXPECT_THAT( + response_.get()->headers(), + Http::HeaderValueOf( + Extensions::HttpFilters::RateLimitFilter::XRateLimitHeaders::get().XRateLimitLimit, + "1, 1;w=60;name=\"first\", 4;w=3600;name=\"second\"")); + EXPECT_THAT( + response_.get()->headers(), + Http::HeaderValueOf( + Extensions::HttpFilters::RateLimitFilter::XRateLimitHeaders::get().XRateLimitRemaining, + "2")); + EXPECT_THAT( + response_.get()->headers(), + Http::HeaderValueOf( + Extensions::HttpFilters::RateLimitFilter::XRateLimitHeaders::get().XRateLimitReset, "3")); + + cleanup(); + + EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.ratelimit.ok")->value()); + EXPECT_EQ(nullptr, test_server_->counter("cluster.cluster_0.ratelimit.over_limit")); + EXPECT_EQ(nullptr, test_server_->counter("cluster.cluster_0.ratelimit.error")); +} + +TEST_P(RatelimitFilterHeadersEnabledIntegrationTest, OverLimitWithFilterHeaders) { + initiateClientConnection(); + waitForRatelimitRequest(); + + Extensions::Filters::Common::RateLimit::DescriptorStatusList descriptor_statuses{ + Envoy::RateLimit::buildDescriptorStatus( + 1, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::MINUTE, "first", 2, 3), + Envoy::RateLimit::buildDescriptorStatus( + 4, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::HOUR, "second", 5, 6)}; + sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OVER_LIMIT, + descriptor_statuses, Http::TestResponseHeaderMapImpl{}, + Http::TestRequestHeaderMapImpl{}); + waitForFailedUpstreamResponse(429); + + EXPECT_THAT( + response_.get()->headers(), + Http::HeaderValueOf( + Extensions::HttpFilters::RateLimitFilter::XRateLimitHeaders::get().XRateLimitLimit, + "1, 1;w=60;name=\"first\", 4;w=3600;name=\"second\"")); + EXPECT_THAT( + response_.get()->headers(), + Http::HeaderValueOf( + Extensions::HttpFilters::RateLimitFilter::XRateLimitHeaders::get().XRateLimitRemaining, + "2")); + EXPECT_THAT( + response_.get()->headers(), + Http::HeaderValueOf( + Extensions::HttpFilters::RateLimitFilter::XRateLimitHeaders::get().XRateLimitReset, "3")); + + cleanup(); + + EXPECT_EQ(nullptr, test_server_->counter("cluster.cluster_0.ratelimit.ok")); + EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.ratelimit.over_limit")->value()); + EXPECT_EQ(nullptr, test_server_->counter("cluster.cluster_0.ratelimit.error")); +} + } // namespace } // namespace Envoy diff --git a/test/extensions/filters/http/ratelimit/ratelimit_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_test.cc index 625801433f743..a0d3a31d8a2c1 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_test.cc @@ -12,6 +12,7 @@ #include "extensions/filters/http/ratelimit/ratelimit.h" #include "test/extensions/filters/common/ratelimit/mocks.h" +#include "test/extensions/filters/common/ratelimit/utils.h" #include "test/mocks/http/mocks.h" #include "test/mocks/local_info/mocks.h" #include "test/mocks/ratelimit/mocks.h" @@ -74,6 +75,11 @@ class HttpRateLimitFilterTest : public testing::Test { failure_mode_deny: true )EOF"; + const std::string enable_x_ratelimit_headers_config_ = R"EOF( + domain: foo + enable_x_ratelimit_headers: DRAFT_VERSION_03 + )EOF"; + const std::string filter_config_ = R"EOF( domain: foo )EOF"; @@ -155,8 +161,8 @@ TEST_F(HttpRateLimitFilterTest, NoApplicableRateLimit) { TEST_F(HttpRateLimitFilterTest, NoDescriptor) { SetUpTest(filter_config_); - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)).Times(1); - EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _)).Times(1); + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(1); + EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(1); EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); @@ -190,7 +196,7 @@ TEST_F(HttpRateLimitFilterTest, OkResponse) { EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0)) .Times(1); - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_, @@ -223,7 +229,8 @@ TEST_F(HttpRateLimitFilterTest, OkResponse) { EXPECT_CALL(filter_callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::RateLimited)) .Times(0); - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, + nullptr); EXPECT_EQ( 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value()); @@ -236,7 +243,7 @@ TEST_F(HttpRateLimitFilterTest, OkResponseWithHeaders) { EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0)) .Times(1); - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_, @@ -269,15 +276,15 @@ TEST_F(HttpRateLimitFilterTest, OkResponseWithHeaders) { .Times(0); Http::HeaderMapPtr request_headers_to_add{ - new Http::TestHeaderMapImpl{{"x-rls-rate-limited", "true"}}}; - Http::HeaderMapPtr rl_headers{ - new Http::TestHeaderMapImpl{{"x-ratelimit-limit", "1000"}, {"x-ratelimit-remaining", "500"}}}; + new Http::TestRequestHeaderMapImpl{{"x-rls-rate-limited", "true"}}}; + Http::HeaderMapPtr rl_headers{new Http::TestResponseHeaderMapImpl{ + {"x-ratelimit-limit", "1000"}, {"x-ratelimit-remaining", "500"}}}; request_callbacks_->complete( - Filters::Common::RateLimit::LimitStatus::OK, + Filters::Common::RateLimit::LimitStatus::OK, nullptr, Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl(*rl_headers)}, Http::RequestHeaderMapPtr{new Http::TestRequestHeaderMapImpl(*request_headers_to_add)}); - Http::TestHeaderMapImpl expected_headers(*rl_headers); + Http::TestResponseHeaderMapImpl expected_headers(*rl_headers); Http::TestResponseHeaderMapImpl response_headers; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false)); EXPECT_EQ(true, (expected_headers == response_headers)); @@ -287,11 +294,71 @@ TEST_F(HttpRateLimitFilterTest, OkResponseWithHeaders) { 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value()); } +TEST_F(HttpRateLimitFilterTest, OkResponseWithFilterHeaders) { + SetUpTest(enable_x_ratelimit_headers_config_); + InSequence s; + + EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0)) + .Times(1); + + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) + .WillOnce(SetArgReferee<1>(descriptor_)); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_, + getApplicableRateLimit(0)) + .Times(1); + + EXPECT_CALL(*client_, limit(_, "foo", + testing::ContainerEq(std::vector{ + {{{"descriptor_key", "descriptor_value"}}}}), + _)) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { + request_callbacks_ = &callbacks; + }))); + + request_headers_.addCopy(Http::Headers::get().RequestId, "requestid"); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_trailers_)); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, + filter_->encode100ContinueHeaders(response_headers_)); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_)); + + EXPECT_CALL(filter_callbacks_, continueDecoding()); + EXPECT_CALL(filter_callbacks_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::RateLimited)) + .Times(0); + + auto descriptor_statuses = { + Envoy::RateLimit::buildDescriptorStatus( + 1, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::MINUTE, "first", 2, 3), + Envoy::RateLimit::buildDescriptorStatus( + 4, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::HOUR, "second", 5, 6)}; + auto descriptor_statuses_ptr = + std::make_unique(descriptor_statuses); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OK, + std::move(descriptor_statuses_ptr), nullptr, nullptr); + + Http::TestResponseHeaderMapImpl expected_headers{ + {"x-ratelimit-limit", "1, 1;w=60;name=\"first\", 4;w=3600;name=\"second\""}, + {"x-ratelimit-remaining", "2"}, + {"x-ratelimit-reset", "3"}}; + Http::TestResponseHeaderMapImpl response_headers; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false)); + EXPECT_THAT(response_headers, HeaderMapEqualRef(&expected_headers)); + EXPECT_EQ( + 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value()); +} + TEST_F(HttpRateLimitFilterTest, ImmediateOkResponse) { SetUpTest(filter_config_); InSequence s; - EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _)) + EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); EXPECT_CALL(*client_, limit(_, "foo", @@ -300,7 +367,8 @@ TEST_F(HttpRateLimitFilterTest, ImmediateOkResponse) { _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { - callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr); + callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, + nullptr); }))); EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); @@ -321,7 +389,7 @@ TEST_F(HttpRateLimitFilterTest, ImmediateErrorResponse) { SetUpTest(filter_config_); InSequence s; - EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _)) + EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); EXPECT_CALL(*client_, limit(_, "foo", @@ -330,7 +398,8 @@ TEST_F(HttpRateLimitFilterTest, ImmediateErrorResponse) { _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { - callbacks.complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr); + callbacks.complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr, + nullptr); }))); EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); @@ -356,7 +425,7 @@ TEST_F(HttpRateLimitFilterTest, ErrorResponse) { SetUpTest(filter_config_); InSequence s; - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); EXPECT_CALL(*client_, limit(_, _, _, _)) .WillOnce( @@ -368,7 +437,8 @@ TEST_F(HttpRateLimitFilterTest, ErrorResponse) { filter_->decodeHeaders(request_headers_, false)); EXPECT_CALL(filter_callbacks_, continueDecoding()); - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr, + nullptr); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); @@ -389,7 +459,7 @@ TEST_F(HttpRateLimitFilterTest, ErrorResponseWithFailureModeAllowOff) { SetUpTest(fail_close_config_); InSequence s; - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); EXPECT_CALL(*client_, limit(_, _, _, _)) .WillOnce( @@ -400,7 +470,8 @@ TEST_F(HttpRateLimitFilterTest, ErrorResponseWithFailureModeAllowOff) { EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, false)); - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr, + nullptr); EXPECT_CALL(filter_callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::RateLimitServiceError)) @@ -420,7 +491,7 @@ TEST_F(HttpRateLimitFilterTest, LimitResponse) { SetUpTest(filter_config_); InSequence s; - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); EXPECT_CALL(*client_, limit(_, _, _, _)) .WillOnce( @@ -440,8 +511,8 @@ TEST_F(HttpRateLimitFilterTest, LimitResponse) { EXPECT_CALL(filter_callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::RateLimited)); - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, std::move(h), - nullptr); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, + std::move(h), nullptr); EXPECT_EQ(1U, filter_callbacks_.clusterInfo() ->statsScope() @@ -460,7 +531,7 @@ TEST_F(HttpRateLimitFilterTest, LimitResponseWithHeaders) { SetUpTest(filter_config_); InSequence s; - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); EXPECT_CALL(*client_, limit(_, _, _, _)) .WillOnce( @@ -476,9 +547,9 @@ TEST_F(HttpRateLimitFilterTest, LimitResponseWithHeaders) { EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_)); - Http::HeaderMapPtr rl_headers{new Http::TestHeaderMapImpl{ + Http::HeaderMapPtr rl_headers{new Http::TestResponseHeaderMapImpl{ {"x-ratelimit-limit", "1000"}, {"x-ratelimit-remaining", "0"}, {"retry-after", "33"}}}; - Http::TestHeaderMapImpl expected_headers(*rl_headers); + Http::TestResponseHeaderMapImpl expected_headers(*rl_headers); expected_headers.addCopy(":status", "429"); expected_headers.addCopy("x-envoy-ratelimited", Http::Headers::get().EnvoyRateLimitedValues.True); @@ -488,12 +559,12 @@ TEST_F(HttpRateLimitFilterTest, LimitResponseWithHeaders) { setResponseFlag(StreamInfo::ResponseFlag::RateLimited)); Http::HeaderMapPtr request_headers_to_add{ - new Http::TestHeaderMapImpl{{"x-rls-rate-limited", "true"}}}; + new Http::TestRequestHeaderMapImpl{{"x-rls-rate-limited", "true"}}}; Http::ResponseHeaderMapPtr h{new Http::TestResponseHeaderMapImpl(*rl_headers)}; Http::RequestHeaderMapPtr uh{new Http::TestRequestHeaderMapImpl(*request_headers_to_add)}; - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, std::move(h), - std::move(uh)); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, + std::move(h), std::move(uh)); EXPECT_THAT(*request_headers_to_add, Not(IsSubsetOfHeaders(request_headers_))); EXPECT_EQ(1U, filter_callbacks_.clusterInfo() @@ -508,11 +579,63 @@ TEST_F(HttpRateLimitFilterTest, LimitResponseWithHeaders) { filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(upstream_rq_429_).value()); } +TEST_F(HttpRateLimitFilterTest, LimitResponseWithFilterHeaders) { + SetUpTest(enable_x_ratelimit_headers_config_); + InSequence s; + + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) + .WillOnce(SetArgReferee<1>(descriptor_)); + EXPECT_CALL(*client_, limit(_, _, _, _)) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { + request_callbacks_ = &callbacks; + }))); + + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, + filter_->encode100ContinueHeaders(response_headers_)); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_)); + + Http::TestResponseHeaderMapImpl expected_headers{ + {":status", "429"}, + {"x-envoy-ratelimited", Http::Headers::get().EnvoyRateLimitedValues.True}, + {"x-ratelimit-limit", "1, 1;w=60;name=\"first\", 4;w=3600;name=\"second\""}, + {"x-ratelimit-remaining", "2"}, + {"x-ratelimit-reset", "3"}}; + EXPECT_CALL(filter_callbacks_, encodeHeaders_(HeaderMapEqualRef(&expected_headers), true)); + EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); + EXPECT_CALL(filter_callbacks_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::RateLimited)); + + auto descriptor_statuses = { + Envoy::RateLimit::buildDescriptorStatus( + 1, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::MINUTE, "first", 2, 3), + Envoy::RateLimit::buildDescriptorStatus( + 4, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::HOUR, "second", 5, 6)}; + auto descriptor_statuses_ptr = + std::make_unique(descriptor_statuses); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, + std::move(descriptor_statuses_ptr), nullptr, nullptr); + EXPECT_EQ(1U, filter_callbacks_.clusterInfo() + ->statsScope() + .counterFromStatName(ratelimit_over_limit_) + .value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(upstream_rq_4xx_).value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(upstream_rq_429_).value()); +} + TEST_F(HttpRateLimitFilterTest, LimitResponseRuntimeDisabled) { SetUpTest(filter_config_); InSequence s; - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); EXPECT_CALL(*client_, limit(_, _, _, _)) .WillOnce( @@ -527,8 +650,8 @@ TEST_F(HttpRateLimitFilterTest, LimitResponseRuntimeDisabled) { .WillOnce(Return(false)); EXPECT_CALL(filter_callbacks_, continueDecoding()); Http::ResponseHeaderMapPtr h{new Http::TestResponseHeaderMapImpl()}; - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, std::move(h), - nullptr); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, + std::move(h), nullptr); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); @@ -554,7 +677,7 @@ TEST_F(HttpRateLimitFilterTest, ResetDuringCall) { SetUpTest(filter_config_); InSequence s; - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); EXPECT_CALL(*client_, limit(_, _, _, _)) .WillOnce( @@ -576,7 +699,7 @@ TEST_F(HttpRateLimitFilterTest, RouteRateLimitDisabledForRouteKey) { ON_CALL(runtime_.snapshot_, featureEnabled("ratelimit.test_key.http_filter_enabled", 100)) .WillByDefault(Return(false)); - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)).Times(0); + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(0); EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); @@ -596,7 +719,7 @@ TEST_F(HttpRateLimitFilterTest, VirtualHostRateLimitDisabledForRouteKey) { ON_CALL(runtime_.snapshot_, featureEnabled("ratelimit.test_vh_key.http_filter_enabled", 100)) .WillByDefault(Return(false)); - EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _)).Times(0); + EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(0); EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); @@ -618,8 +741,8 @@ TEST_F(HttpRateLimitFilterTest, IncorrectRequestType) { )EOF"; SetUpTest(internal_filter_config); - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)).Times(0); - EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _)).Times(0); + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(0); + EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(0); EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); @@ -639,8 +762,8 @@ TEST_F(HttpRateLimitFilterTest, IncorrectRequestType) { SetUpTest(external_filter_config); Http::TestRequestHeaderMapImpl request_headers{{"x-envoy-internal", "true"}}; - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)).Times(0); - EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _)).Times(0); + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(0); + EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(0); EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); @@ -666,7 +789,7 @@ TEST_F(HttpRateLimitFilterTest, InternalRequestType) { EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0)) .Times(1); - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_, @@ -679,7 +802,8 @@ TEST_F(HttpRateLimitFilterTest, InternalRequestType) { _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { - callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr); + callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, + nullptr); }))); EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); @@ -711,7 +835,7 @@ TEST_F(HttpRateLimitFilterTest, ExternalRequestType) { EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0)) .Times(1); - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_, @@ -724,7 +848,8 @@ TEST_F(HttpRateLimitFilterTest, ExternalRequestType) { _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { - callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr); + callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, + nullptr); }))); EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); @@ -751,7 +876,7 @@ TEST_F(HttpRateLimitFilterTest, ExcludeVirtualHost) { InSequence s; EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0)); - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); EXPECT_CALL(filter_callbacks_.route_->route_entry_, includeVirtualHostRateLimits()) @@ -766,7 +891,8 @@ TEST_F(HttpRateLimitFilterTest, ExcludeVirtualHost) { _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { - callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr); + callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, + nullptr); }))); EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); diff --git a/test/extensions/filters/http/rbac/BUILD b/test/extensions/filters/http/rbac/BUILD index 372b2110d0cf0..f08fffcd9683c 100644 --- a/test/extensions/filters/http/rbac/BUILD +++ b/test/extensions/filters/http/rbac/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -10,6 +8,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -18,7 +18,8 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.rbac", deps = [ "//source/extensions/filters/http/rbac:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:instance_mocks", "@envoy_api//envoy/config/rbac/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/rbac/v3:pkg_cc_proto", ], @@ -44,6 +45,7 @@ envoy_extension_cc_test( name = "rbac_filter_integration_test", srcs = ["rbac_filter_integration_test.cc"], extension_name = "envoy.filters.http.rbac", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/http/rbac:config", "//test/config:utility_lib", diff --git a/test/extensions/filters/http/rbac/config_test.cc b/test/extensions/filters/http/rbac/config_test.cc index 7617a99c6136f..318a6e2f10334 100644 --- a/test/extensions/filters/http/rbac/config_test.cc +++ b/test/extensions/filters/http/rbac/config_test.cc @@ -5,7 +5,8 @@ #include "extensions/filters/common/rbac/engine.h" #include "extensions/filters/http/rbac/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/server/instance.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -36,16 +37,14 @@ TEST(RoleBasedAccessControlFilterConfigFactoryTest, ValidProto) { TEST(RoleBasedAccessControlFilterConfigFactoryTest, EmptyProto) { RoleBasedAccessControlFilterConfigFactory factory; - auto* config = dynamic_cast( - factory.createEmptyConfigProto().get()); - EXPECT_NE(nullptr, config); + EXPECT_NE(nullptr, dynamic_cast( + factory.createEmptyConfigProto().get())); } TEST(RoleBasedAccessControlFilterConfigFactoryTest, EmptyRouteProto) { RoleBasedAccessControlFilterConfigFactory factory; - auto* config = dynamic_cast( - factory.createEmptyRouteConfigProto().get()); - EXPECT_NE(nullptr, config); + EXPECT_NE(nullptr, dynamic_cast( + factory.createEmptyRouteConfigProto().get())); } TEST(RoleBasedAccessControlFilterConfigFactoryTest, RouteSpecificConfig) { diff --git a/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc b/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc index a7b8f89151590..b7fcd3ebcbb78 100644 --- a/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc +++ b/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc @@ -64,6 +64,20 @@ name: rbac - any: true )EOF"; +const std::string RBAC_CONFIG_WITH_LOG_ACTION = R"EOF( +name: rbac +typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC + rules: + action: LOG + policies: + foo: + permissions: + - header: { name: ":method", exact_match: "GET" } + principals: + - any: true +)EOF"; + using RBACIntegrationTest = HttpProtocolIntegrationTest; INSTANTIATE_TEST_SUITE_P(Protocols, RBACIntegrationTest, @@ -90,7 +104,7 @@ TEST_P(RBACIntegrationTest, Allowed) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(RBACIntegrationTest, Denied) { @@ -110,7 +124,7 @@ TEST_P(RBACIntegrationTest, Denied) { 1024); response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("403", response->headers().Status()->value().getStringView()); + EXPECT_EQ("403", response->headers().getStatusValue()); } TEST_P(RBACIntegrationTest, DeniedWithPrefixRule) { @@ -136,7 +150,7 @@ TEST_P(RBACIntegrationTest, DeniedWithPrefixRule) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(RBACIntegrationTest, RbacPrefixRuleUseNormalizePath) { @@ -160,7 +174,7 @@ TEST_P(RBACIntegrationTest, RbacPrefixRuleUseNormalizePath) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("403", response->headers().Status()->value().getStringView()); + EXPECT_EQ("403", response->headers().getStatusValue()); } TEST_P(RBACIntegrationTest, DeniedHeadReply) { @@ -180,9 +194,9 @@ TEST_P(RBACIntegrationTest, DeniedHeadReply) { 1024); response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("403", response->headers().Status()->value().getStringView()); + EXPECT_EQ("403", response->headers().getStatusValue()); ASSERT_TRUE(response->headers().ContentLength()); - EXPECT_NE("0", response->headers().ContentLength()->value().getStringView()); + EXPECT_NE("0", response->headers().getContentLengthValue()); EXPECT_THAT(response->body(), ::testing::IsEmpty()); } @@ -220,7 +234,7 @@ TEST_P(RBACIntegrationTest, RouteOverride) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(RBACIntegrationTest, PathWithQueryAndFragment) { @@ -246,7 +260,7 @@ TEST_P(RBACIntegrationTest, PathWithQueryAndFragment) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } } @@ -273,9 +287,32 @@ TEST_P(RBACIntegrationTest, PathIgnoreCase) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } } +TEST_P(RBACIntegrationTest, LogConnectionAllow) { + config_helper_.addFilter(RBAC_CONFIG_WITH_LOG_ACTION); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto response = codec_client_->makeRequestWithBody( + Http::TestRequestHeaderMapImpl{ + {":method", "POST"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "host"}, + {"x-forwarded-for", "10.0.0.1"}, + }, + 1024); + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); + + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); +} + } // namespace } // namespace Envoy diff --git a/test/extensions/filters/http/rbac/rbac_filter_test.cc b/test/extensions/filters/http/rbac/rbac_filter_test.cc index d445860f83946..519a49126bbb7 100644 --- a/test/extensions/filters/http/rbac/rbac_filter_test.cc +++ b/test/extensions/filters/http/rbac/rbac_filter_test.cc @@ -24,9 +24,12 @@ namespace HttpFilters { namespace RBACFilter { namespace { +enum class LogResult { Yes, No, Undecided }; + class RoleBasedAccessControlFilterTest : public testing::Test { public: - RoleBasedAccessControlFilterConfigSharedPtr setupConfig() { + RoleBasedAccessControlFilterConfigSharedPtr + setupConfig(envoy::config::rbac::v3::RBAC::Action action) { envoy::extensions::filters::http::rbac::v3::RBAC config; envoy::config::rbac::v3::Policy policy; @@ -36,7 +39,7 @@ class RoleBasedAccessControlFilterTest : public testing::Test { policy_rules->add_rules()->set_destination_port(123); policy_rules->add_rules()->mutable_url_path()->mutable_path()->set_suffix("suffix"); policy.add_principals()->set_any(true); - config.mutable_rules()->set_action(envoy::config::rbac::v3::RBAC::ALLOW); + config.mutable_rules()->set_action(action); (*config.mutable_rules()->mutable_policies())["foo"] = policy; envoy::config::rbac::v3::Policy shadow_policy; @@ -44,13 +47,14 @@ class RoleBasedAccessControlFilterTest : public testing::Test { shadow_policy_rules->add_rules()->mutable_requested_server_name()->set_exact("xyz.cncf.io"); shadow_policy_rules->add_rules()->set_destination_port(456); shadow_policy.add_principals()->set_any(true); - config.mutable_shadow_rules()->set_action(envoy::config::rbac::v3::RBAC::ALLOW); + config.mutable_shadow_rules()->set_action(action); (*config.mutable_shadow_rules()->mutable_policies())["bar"] = shadow_policy; return std::make_shared(config, "test", store_); } - RoleBasedAccessControlFilterTest() : config_(setupConfig()), filter_(config_) {} + RoleBasedAccessControlFilterTest() + : config_(setupConfig(envoy::config::rbac::v3::RBAC::ALLOW)), filter_(config_) {} void SetUp() override { EXPECT_CALL(callbacks_, connection()).WillRepeatedly(Return(&connection_)); @@ -68,6 +72,21 @@ class RoleBasedAccessControlFilterTest : public testing::Test { ON_CALL(connection_, requestedServerName()).WillByDefault(Return(requested_server_name_)); } + void checkAccessLogMetadata(LogResult expected) { + if (expected != LogResult::Undecided) { + auto filter_meta = req_info_.dynamicMetadata().filter_metadata().at( + Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace); + EXPECT_EQ(expected == LogResult::Yes, + filter_meta.fields() + .at(Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().AccessLogKey) + .bool_value()); + } else { + EXPECT_EQ(req_info_.dynamicMetadata().filter_metadata().end(), + req_info_.dynamicMetadata().filter_metadata().find( + Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace)); + } + } + void setMetadata() { ON_CALL(req_info_, setDynamicMetadata(HttpFilterNames::get().Rbac, _)) .WillByDefault(Invoke([this](const std::string&, const ProtobufWkt::Struct& obj) { @@ -75,6 +94,15 @@ class RoleBasedAccessControlFilterTest : public testing::Test { Protobuf::MapPair(HttpFilterNames::get().Rbac, obj)); })); + + ON_CALL(req_info_, + setDynamicMetadata( + Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace, _)) + .WillByDefault(Invoke([this](const std::string&, const ProtobufWkt::Struct& obj) { + req_info_.metadata_.mutable_filter_metadata()->insert( + Protobuf::MapPair( + Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace, obj)); + })); } NiceMock callbacks_; @@ -82,8 +110,8 @@ class RoleBasedAccessControlFilterTest : public testing::Test { NiceMock req_info_; Stats::IsolatedStoreImpl store_; RoleBasedAccessControlFilterConfigSharedPtr config_; - RoleBasedAccessControlFilter filter_; + Network::Address::InstanceConstSharedPtr address_; std::string requested_server_name_; Http::TestRequestHeaderMapImpl headers_; @@ -92,6 +120,7 @@ class RoleBasedAccessControlFilterTest : public testing::Test { TEST_F(RoleBasedAccessControlFilterTest, Allowed) { setDestinationPort(123); + setMetadata(); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers_, false)); Http::MetadataMap metadata_map{{"metadata", "metadata"}}; @@ -102,11 +131,14 @@ TEST_F(RoleBasedAccessControlFilterTest, Allowed) { Buffer::OwnedImpl data(""); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(trailers_)); + + checkAccessLogMetadata(LogResult::Undecided); } TEST_F(RoleBasedAccessControlFilterTest, RequestedServerName) { setDestinationPort(999); setRequestedServerName("www.cncf.io"); + setMetadata(); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers_, false)); EXPECT_EQ(1U, config_->stats().allowed_.value()); @@ -117,10 +149,13 @@ TEST_F(RoleBasedAccessControlFilterTest, RequestedServerName) { Buffer::OwnedImpl data(""); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(trailers_)); + + checkAccessLogMetadata(LogResult::Undecided); } TEST_F(RoleBasedAccessControlFilterTest, Path) { setDestinationPort(999); + setMetadata(); auto headers = Http::TestRequestHeaderMapImpl{ {":method", "GET"}, @@ -129,6 +164,7 @@ TEST_F(RoleBasedAccessControlFilterTest, Path) { {":authority", "host"}, }; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers, false)); + checkAccessLogMetadata(LogResult::Undecided); } TEST_F(RoleBasedAccessControlFilterTest, Denied) { @@ -151,23 +187,65 @@ TEST_F(RoleBasedAccessControlFilterTest, Denied) { EXPECT_EQ("allowed", filter_meta.fields().at("shadow_engine_result").string_value()); EXPECT_EQ("bar", filter_meta.fields().at("shadow_effective_policy_id").string_value()); EXPECT_EQ("rbac_access_denied", callbacks_.details_); + checkAccessLogMetadata(LogResult::Undecided); } TEST_F(RoleBasedAccessControlFilterTest, RouteLocalOverride) { setDestinationPort(456); + setMetadata(); envoy::extensions::filters::http::rbac::v3::RBACPerRoute route_config; route_config.mutable_rbac()->mutable_rules()->set_action(envoy::config::rbac::v3::RBAC::DENY); NiceMock engine{route_config.rbac().rules()}; NiceMock per_route_config_{route_config}; - EXPECT_CALL(engine, allowed(_, _, _, _)).WillRepeatedly(Return(true)); + EXPECT_CALL(engine, handleAction(_, _, _, _)).WillRepeatedly(Return(true)); EXPECT_CALL(per_route_config_, engine()).WillRepeatedly(ReturnRef(engine)); EXPECT_CALL(callbacks_.route_->route_entry_, perFilterConfig(HttpFilterNames::get().Rbac)) .WillRepeatedly(Return(&per_route_config_)); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers_, true)); + checkAccessLogMetadata(LogResult::Undecided); +} + +// Log Tests +TEST_F(RoleBasedAccessControlFilterTest, ShouldLog) { + config_ = setupConfig(envoy::config::rbac::v3::RBAC::LOG); + filter_ = RoleBasedAccessControlFilter(config_); + filter_.setDecoderFilterCallbacks(callbacks_); + + setDestinationPort(123); + setMetadata(); + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers_, false)); + EXPECT_EQ(1U, config_->stats().allowed_.value()); + EXPECT_EQ(0U, config_->stats().shadow_denied_.value()); + + Buffer::OwnedImpl data(""); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(trailers_)); + + checkAccessLogMetadata(LogResult::Yes); +} + +TEST_F(RoleBasedAccessControlFilterTest, ShouldNotLog) { + config_ = setupConfig(envoy::config::rbac::v3::RBAC::LOG); + filter_ = RoleBasedAccessControlFilter(config_); + filter_.setDecoderFilterCallbacks(callbacks_); + + setDestinationPort(456); + setMetadata(); + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers_, false)); + EXPECT_EQ(1U, config_->stats().allowed_.value()); + EXPECT_EQ(0U, config_->stats().shadow_denied_.value()); + + Buffer::OwnedImpl data(""); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(trailers_)); + + checkAccessLogMetadata(LogResult::No); } } // namespace diff --git a/test/extensions/filters/http/router/BUILD b/test/extensions/filters/http/router/BUILD index 0fedbe15d3aea..1410160daf465 100644 --- a/test/extensions/filters/http/router/BUILD +++ b/test/extensions/filters/http/router/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -17,7 +17,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.router", deps = [ "//source/extensions/filters/http/router:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/http/router/v3:pkg_cc_proto", ], @@ -30,6 +30,7 @@ envoy_extension_cc_test( "//test/config/integration/certs", ], extension_name = "envoy.filters.http.router", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/http/router:config", "//test/integration:http_integration_lib", diff --git a/test/extensions/filters/http/router/auto_sni_integration_test.cc b/test/extensions/filters/http/router/auto_sni_integration_test.cc index 9a7770c353c31..10f0d7818e3fd 100644 --- a/test/extensions/filters/http/router/auto_sni_integration_test.cc +++ b/test/extensions/filters/http/router/auto_sni_integration_test.cc @@ -69,7 +69,7 @@ TEST_P(AutoSniIntegrationTest, BasicAutoSniTest) { setup(); codec_client_ = makeHttpConnection(lookupPort("http")); const auto response_ = sendRequestAndWaitForResponse( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, {":authority", "localhost"}}, 0, default_response_headers_, 0); @@ -79,15 +79,14 @@ TEST_P(AutoSniIntegrationTest, BasicAutoSniTest) { const Extensions::TransportSockets::Tls::SslSocketInfo* ssl_socket = dynamic_cast( fake_upstream_connection_->connection().ssl().get()); - EXPECT_STREQ("localhost", - SSL_get_servername(ssl_socket->rawSslForTest(), TLSEXT_NAMETYPE_host_name)); + EXPECT_STREQ("localhost", SSL_get_servername(ssl_socket->ssl(), TLSEXT_NAMETYPE_host_name)); } TEST_P(AutoSniIntegrationTest, PassingNotDNS) { setup(); codec_client_ = makeHttpConnection(lookupPort("http")); const auto response_ = sendRequestAndWaitForResponse( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, {":authority", "127.0.0.1"}}, 0, default_response_headers_, 0); @@ -97,7 +96,7 @@ TEST_P(AutoSniIntegrationTest, PassingNotDNS) { const Extensions::TransportSockets::Tls::SslSocketInfo* ssl_socket = dynamic_cast( fake_upstream_connection_->connection().ssl().get()); - EXPECT_STREQ(nullptr, SSL_get_servername(ssl_socket->rawSslForTest(), TLSEXT_NAMETYPE_host_name)); + EXPECT_STREQ(nullptr, SSL_get_servername(ssl_socket->ssl(), TLSEXT_NAMETYPE_host_name)); } TEST_P(AutoSniIntegrationTest, PassingHostWithoutPort) { @@ -116,8 +115,7 @@ TEST_P(AutoSniIntegrationTest, PassingHostWithoutPort) { const Extensions::TransportSockets::Tls::SslSocketInfo* ssl_socket = dynamic_cast( fake_upstream_connection_->connection().ssl().get()); - EXPECT_STREQ("example.com", - SSL_get_servername(ssl_socket->rawSslForTest(), TLSEXT_NAMETYPE_host_name)); + EXPECT_STREQ("example.com", SSL_get_servername(ssl_socket->ssl(), TLSEXT_NAMETYPE_host_name)); } } // namespace diff --git a/test/extensions/filters/http/router/config_test.cc b/test/extensions/filters/http/router/config_test.cc index 68ebadaa6ef9f..1808920fd6107 100644 --- a/test/extensions/filters/http/router/config_test.cc +++ b/test/extensions/filters/http/router/config_test.cc @@ -6,7 +6,7 @@ #include "extensions/filters/http/router/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -27,7 +27,7 @@ TEST(RouterFilterConfigTest, SimpleRouterFilterConfig) { )EOF"; envoy::extensions::filters::http::router::v3::Router proto_config; - TestUtility::loadFromYaml(yaml_string, proto_config); + TestUtility::loadFromYaml(yaml_string, proto_config, false, true); NiceMock context; RouterFilterConfig factory; Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, "stats.", context); @@ -43,8 +43,8 @@ TEST(RouterFilterConfigTest, BadRouterFilterConfig) { )EOF"; envoy::extensions::filters::http::router::v3::Router proto_config; - EXPECT_THROW_WITH_REGEX(TestUtility::loadFromYaml(yaml_string, proto_config), EnvoyException, - "route: Cannot find field"); + EXPECT_THROW_WITH_REGEX(TestUtility::loadFromYaml(yaml_string, proto_config, false, true), + EnvoyException, "route: Cannot find field"); } TEST(RouterFilterConfigTest, RouterFilterWithUnsupportedStrictHeaderCheck) { @@ -54,7 +54,7 @@ TEST(RouterFilterConfigTest, RouterFilterWithUnsupportedStrictHeaderCheck) { )EOF"; envoy::extensions::filters::http::router::v3::Router router_config; - TestUtility::loadFromYaml(yaml, router_config); + TestUtility::loadFromYaml(yaml, router_config, false, true); NiceMock context; RouterFilterConfig factory; diff --git a/test/extensions/filters/http/squash/BUILD b/test/extensions/filters/http/squash/BUILD index ae0eb427ea843..9ddeb07480f07 100644 --- a/test/extensions/filters/http/squash/BUILD +++ b/test/extensions/filters/http/squash/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -21,7 +21,7 @@ envoy_extension_cc_test( "//source/common/stats:stats_lib", "//source/extensions/filters/http/squash:squash_filter_lib", "//test/mocks/http:http_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:environment_lib", "@envoy_api//envoy/extensions/filters/http/squash/v3:pkg_cc_proto", @@ -32,6 +32,7 @@ envoy_extension_cc_test( name = "squash_filter_integration_test", srcs = ["squash_filter_integration_test.cc"], extension_name = "envoy.filters.http.squash", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/http/squash:config", "//test/integration:http_integration_lib", @@ -45,7 +46,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.squash", deps = [ "//source/extensions/filters/http/squash:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/http/squash/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/http/squash/config_test.cc b/test/extensions/filters/http/squash/config_test.cc index 62e7a18826e5b..f7f9dda19b046 100644 --- a/test/extensions/filters/http/squash/config_test.cc +++ b/test/extensions/filters/http/squash/config_test.cc @@ -3,7 +3,7 @@ #include "extensions/filters/http/squash/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/http/squash/squash_filter_integration_test.cc b/test/extensions/filters/http/squash/squash_filter_integration_test.cc index 79cf9a91c23cf..fa8dd4ec1a754 100644 --- a/test/extensions/filters/http/squash/squash_filter_integration_test.cc +++ b/test/extensions/filters/http/squash/squash_filter_integration_test.cc @@ -133,8 +133,8 @@ TEST_P(SquashFilterIntegrationTest, TestHappyPath) { response->waitForEndStream(); - EXPECT_EQ("POST", create_stream->headers().Method()->value().getStringView()); - EXPECT_EQ("/api/v2/debugattachment/", create_stream->headers().Path()->value().getStringView()); + EXPECT_EQ("POST", create_stream->headers().getMethodValue()); + EXPECT_EQ("/api/v2/debugattachment/", create_stream->headers().getPathValue()); // Make sure the env var was replaced ProtobufWkt::Struct actualbody; TestUtility::loadFromJson(create_stream->body().toString(), actualbody); @@ -146,11 +146,10 @@ TEST_P(SquashFilterIntegrationTest, TestHappyPath) { EXPECT_TRUE(MessageDifferencer::Equals(expectedbody, actualbody)); // The second request should be for the created object - EXPECT_EQ("GET", get_stream->headers().Method()->value().getStringView()); - EXPECT_EQ("/api/v2/debugattachment/oF8iVdiJs5", - get_stream->headers().Path()->value().getStringView()); + EXPECT_EQ("GET", get_stream->headers().getMethodValue()); + EXPECT_EQ("/api/v2/debugattachment/oF8iVdiJs5", get_stream->headers().getPathValue()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(SquashFilterIntegrationTest, ErrorAttaching) { @@ -164,7 +163,7 @@ TEST_P(SquashFilterIntegrationTest, ErrorAttaching) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(SquashFilterIntegrationTest, TimeoutAttaching) { @@ -180,7 +179,7 @@ TEST_P(SquashFilterIntegrationTest, TimeoutAttaching) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(SquashFilterIntegrationTest, ErrorNoSquashServer) { @@ -191,7 +190,7 @@ TEST_P(SquashFilterIntegrationTest, ErrorNoSquashServer) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(SquashFilterIntegrationTest, BadCreateResponse) { @@ -203,7 +202,7 @@ TEST_P(SquashFilterIntegrationTest, BadCreateResponse) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(SquashFilterIntegrationTest, BadGetResponse) { @@ -217,7 +216,7 @@ TEST_P(SquashFilterIntegrationTest, BadGetResponse) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } } // namespace Envoy diff --git a/test/extensions/filters/http/squash/squash_filter_test.cc b/test/extensions/filters/http/squash/squash_filter_test.cc index d2cb53bf52b63..84d6ac9cb41ca 100644 --- a/test/extensions/filters/http/squash/squash_filter_test.cc +++ b/test/extensions/filters/http/squash/squash_filter_test.cc @@ -9,7 +9,7 @@ #include "extensions/filters/http/squash/squash_filter.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/environment.h" #include "test/test_common/utility.h" diff --git a/test/extensions/filters/http/tap/BUILD b/test/extensions/filters/http/tap/BUILD index b5aaea4b14807..f134caaf53568 100644 --- a/test/extensions/filters/http/tap/BUILD +++ b/test/extensions/filters/http/tap/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_library", @@ -10,6 +8,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_library( @@ -29,7 +29,7 @@ envoy_extension_cc_test( "//source/extensions/filters/http/tap:config", "//source/extensions/filters/http/tap:tap_config_interface", "//test/mocks/http:http_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/mocks/stream_info:stream_info_mocks", "//test/test_common:utility_lib", ], @@ -52,9 +52,11 @@ envoy_extension_cc_test( name = "tap_filter_integration_test", srcs = ["tap_filter_integration_test.cc"], extension_name = "envoy.filters.http.tap", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/http/tap:config", "//test/integration:http_integration_lib", + "//test/test_common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/data/tap/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/http/tap/tap_config_impl_test.cc b/test/extensions/filters/http/tap/tap_config_impl_test.cc index 2033c1cd240d3..4e3763e9520e0 100644 --- a/test/extensions/filters/http/tap/tap_config_impl_test.cc +++ b/test/extensions/filters/http/tap/tap_config_impl_test.cc @@ -7,6 +7,7 @@ using testing::_; using testing::Assign; +using testing::ByMove; using testing::InSequence; using testing::Return; using testing::ReturnRef; @@ -24,7 +25,7 @@ class HttpPerRequestTapperImplTest : public testing::Test { HttpPerRequestTapperImplTest() { EXPECT_CALL(*config_, createPerTapSinkHandleManager_(1)).WillOnce(Return(sink_manager_)); EXPECT_CALL(*config_, createMatchStatusVector()) - .WillOnce(Return(TapCommon::Matcher::MatchStatusVector(1))); + .WillOnce(Return(ByMove(TapCommon::Matcher::MatchStatusVector(1)))); EXPECT_CALL(*config_, rootMatcher()).WillRepeatedly(ReturnRef(matcher_)); EXPECT_CALL(matcher_, onNewStream(_)).WillOnce(SaveArgAddress(&statuses_)); tapper_ = std::make_unique(config_, 1); @@ -53,11 +54,13 @@ TEST_F(HttpPerRequestTapperImplTest, BufferedFlowNoTap) { InSequence s; EXPECT_CALL(matcher_, onHttpRequestHeaders(_, _)); tapper_->onRequestHeaders(request_headers_); + EXPECT_CALL(matcher_, onRequestBody(_, _)); tapper_->onRequestBody(Buffer::OwnedImpl("hello")); EXPECT_CALL(matcher_, onHttpRequestTrailers(_, _)); tapper_->onRequestTrailers(request_trailers_); EXPECT_CALL(matcher_, onHttpResponseHeaders(_, _)); tapper_->onResponseHeaders(response_headers_); + EXPECT_CALL(matcher_, onResponseBody(_, _)); tapper_->onResponseBody(Buffer::OwnedImpl("world")); EXPECT_CALL(matcher_, onHttpResponseTrailers(_, _)); tapper_->onResponseTrailers(response_trailers_); @@ -73,11 +76,13 @@ TEST_F(HttpPerRequestTapperImplTest, BufferedFlowTap) { InSequence s; EXPECT_CALL(matcher_, onHttpRequestHeaders(_, _)); tapper_->onRequestHeaders(request_headers_); + EXPECT_CALL(matcher_, onRequestBody(_, _)); tapper_->onRequestBody(Buffer::OwnedImpl("hello")); EXPECT_CALL(matcher_, onHttpRequestTrailers(_, _)); tapper_->onRequestTrailers(request_trailers_); EXPECT_CALL(matcher_, onHttpResponseHeaders(_, _)); tapper_->onResponseHeaders(response_headers_); + EXPECT_CALL(matcher_, onResponseBody(_, _)); tapper_->onResponseBody(Buffer::OwnedImpl("world")); EXPECT_CALL(matcher_, onHttpResponseTrailers(_, _)); tapper_->onResponseTrailers(response_trailers_); @@ -116,6 +121,7 @@ TEST_F(HttpPerRequestTapperImplTest, StreamedMatchRequestTrailers) { InSequence s; EXPECT_CALL(matcher_, onHttpRequestHeaders(_, _)); tapper_->onRequestHeaders(request_headers_); + EXPECT_CALL(matcher_, onRequestBody(_, _)); tapper_->onRequestBody(Buffer::OwnedImpl("hello")); EXPECT_CALL(matcher_, onHttpRequestTrailers(_, _)) .WillOnce(Assign(&(*statuses_)[0].matches_, true)); @@ -156,6 +162,7 @@ TEST_F(HttpPerRequestTapperImplTest, StreamedMatchRequestTrailers) { value: f )EOF"))); tapper_->onResponseHeaders(response_headers_); + EXPECT_CALL(matcher_, onResponseBody(_, _)); EXPECT_CALL(*sink_manager_, submitTrace_(TraceEqual( R"EOF( http_streamed_trace_segment: @@ -187,11 +194,13 @@ TEST_F(HttpPerRequestTapperImplTest, StreamedMatchResponseTrailers) { InSequence s; EXPECT_CALL(matcher_, onHttpRequestHeaders(_, _)); tapper_->onRequestHeaders(request_headers_); + EXPECT_CALL(matcher_, onRequestBody(_, _)); tapper_->onRequestBody(Buffer::OwnedImpl("hello")); EXPECT_CALL(matcher_, onHttpRequestTrailers(_, _)); tapper_->onRequestTrailers(request_trailers_); EXPECT_CALL(matcher_, onHttpResponseHeaders(_, _)); tapper_->onResponseHeaders(response_headers_); + EXPECT_CALL(matcher_, onResponseBody(_, _)); tapper_->onResponseBody(Buffer::OwnedImpl("world")); EXPECT_CALL(matcher_, onHttpResponseTrailers(_, _)) .WillOnce(Assign(&(*statuses_)[0].matches_, true)); diff --git a/test/extensions/filters/http/tap/tap_filter_integration_test.cc b/test/extensions/filters/http/tap/tap_filter_integration_test.cc index 048b369a7d28c..fdfa591bc62e5 100644 --- a/test/extensions/filters/http/tap/tap_filter_integration_test.cc +++ b/test/extensions/filters/http/tap/tap_filter_integration_test.cc @@ -4,6 +4,7 @@ #include "envoy/data/tap/v3/wrapper.pb.h" #include "test/integration/http_integration.h" +#include "test/test_common/utility.h" #include "absl/strings/match.h" #include "gtest/gtest.h" @@ -87,7 +88,7 @@ class TapIntegrationTest : public testing::TestWithParammakeRequestWithBody(admin_request_headers, admin_request_yaml); admin_response_->waitForHeaders(); - EXPECT_EQ("200", admin_response_->headers().Status()->value().getStringView()); + EXPECT_EQ("200", admin_response_->headers().getStatusValue()); EXPECT_FALSE(admin_response_->complete()); } @@ -127,6 +128,27 @@ class TapIntegrationTest : public testing::TestWithParamclose(); + test_server_->waitForCounterGe("http.config_test.downstream_cx_destroy", 1); + + // Find the written .pb file and verify it. + auto files = TestUtility::listFiles(path_prefix, false); + auto pb_file = std::find_if(files.begin(), files.end(), + [](const std::string& s) { return absl::EndsWith(s, ".pb"); }); + ASSERT_NE(pb_file, files.end()); + + envoy::data::tap::v3::TraceWrapper trace; + TestUtility::loadFromFile(*pb_file, trace, *api_); + EXPECT_TRUE(trace.has_http_buffered_trace()); + } + const Http::TestRequestHeaderMapImpl request_headers_tap_{{":method", "GET"}, {":path", "/"}, {":scheme", "http"}, @@ -168,10 +190,10 @@ TEST_P(TapIntegrationTest, StaticFilePerTap) { R"EOF( name: tap typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.tap.v2alpha.Tap + "@type": type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap common_config: static_config: - match_config: + match: any_match: true output_config: sinks: @@ -180,24 +202,53 @@ name: tap path_prefix: {} )EOF"; - const std::string path_prefix = getTempPathPrefix(); - initializeFilter(fmt::format(filter_config, path_prefix)); + verifyStaticFilePerTap(filter_config); +} - // Initial request/response with tap. - codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); - makeRequest(request_headers_tap_, {}, nullptr, response_headers_no_tap_, {}, nullptr); - codec_client_->close(); - test_server_->waitForCounterGe("http.config_test.downstream_cx_destroy", 1); +// Verify the match field takes precedence over the deprecated match_config field. +TEST_P(TapIntegrationTest, DEPRECATED_FEATURE_TEST(StaticFilePerTapWithMatchConfigAndMatch)) { + const std::string filter_config = + R"EOF( +name: tap +typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap + common_config: + static_config: + # match_config should be ignored by the match field. + match_config: + not_match: + any_match: true + match: + any_match: true + output_config: + sinks: + - format: PROTO_BINARY + file_per_tap: + path_prefix: {} +)EOF"; - // Find the written .pb file and verify it. - auto files = TestUtility::listFiles(path_prefix, false); - auto pb_file = std::find_if(files.begin(), files.end(), - [](const std::string& s) { return absl::EndsWith(s, ".pb"); }); - ASSERT_NE(pb_file, files.end()); + verifyStaticFilePerTap(filter_config); +} - envoy::data::tap::v3::TraceWrapper trace; - TestUtility::loadFromFile(*pb_file, trace, *api_); - EXPECT_TRUE(trace.has_http_buffered_trace()); +// Verify the deprecated match_config field. +TEST_P(TapIntegrationTest, DEPRECATED_FEATURE_TEST(StaticFilePerTapWithMatchConfig)) { + const std::string filter_config = + R"EOF( +name: tap +typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap + common_config: + static_config: + match_config: + any_match: true + output_config: + sinks: + - format: PROTO_BINARY + file_per_tap: + path_prefix: {} +)EOF"; + + verifyStaticFilePerTap(filter_config); } // Verify a basic tap flow using the admin handler. @@ -212,7 +263,7 @@ TEST_P(TapIntegrationTest, AdminBasicFlow) { R"EOF( config_id: test_config_id tap_config: - match_config: + match: or_match: rules: - http_request_headers_match: @@ -275,7 +326,7 @@ config_id: test_config_id R"EOF( config_id: test_config_id tap_config: - match_config: + match: and_match: rules: - http_request_headers_match: @@ -308,6 +359,7 @@ config_id: test_config_id admin_client_->close(); EXPECT_EQ(3UL, test_server_->counter("http.config_test.tap.rq_tapped")->value()); + test_server_->waitForGaugeEq("http.admin.downstream_rq_active", 0); } // Verify both request and response trailer matching works. @@ -318,7 +370,7 @@ TEST_P(TapIntegrationTest, AdminTrailers) { R"EOF( config_id: test_config_id tap_config: - match_config: + match: and_match: rules: - http_request_trailers_match: @@ -359,7 +411,7 @@ TEST_P(TapIntegrationTest, AdminBodyAsBytes) { R"EOF( config_id: test_config_id tap_config: - match_config: + match: any_match: true output_config: sinks: @@ -390,7 +442,7 @@ TEST_P(TapIntegrationTest, AdminBodyAsString) { R"EOF( config_id: test_config_id tap_config: - match_config: + match: any_match: true output_config: sinks: @@ -422,7 +474,7 @@ TEST_P(TapIntegrationTest, AdminBodyAsBytesTruncated) { R"EOF( config_id: test_config_id tap_config: - match_config: + match: any_match: true output_config: max_buffered_rx_bytes: 3 @@ -537,5 +589,52 @@ name: tap EXPECT_EQ(1UL, test_server_->counter("http.config_test.tap.rq_tapped")->value()); } +// Verify that body matching works. +TEST_P(TapIntegrationTest, AdminBodyMatching) { + initializeFilter(admin_filter_config_); + + const std::string admin_request_yaml = + R"EOF( +config_id: test_config_id +tap_config: + match: + and_match: + rules: + - http_request_generic_body_match: + patterns: + - string_match: request + - http_response_generic_body_match: + patterns: + - string_match: response + output_config: + sinks: + - format: JSON_BODY_AS_STRING + streaming_admin: {} +)EOF"; + + startAdminRequest(admin_request_yaml); + + codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); + // Should not tap, request and response body do not match. + makeRequest(request_headers_no_tap_, {{"This is test payload"}}, nullptr, + response_headers_no_tap_, {{"This is test payload"}}, nullptr); + // Should not tap, request matches but response body does not match. + makeRequest(request_headers_no_tap_, {{"This is request payload"}}, nullptr, + response_headers_no_tap_, {{"This is test payload"}}, nullptr); + // Should tap, request and response body match. + makeRequest(request_headers_no_tap_, {{"This is request payload"}}, nullptr, + response_headers_no_tap_, {{"This is resp"}, {"onse payload"}}, nullptr); + + envoy::data::tap::v3::TraceWrapper trace; + admin_response_->waitForBodyData(1); + TestUtility::loadFromYaml(admin_response_->body(), trace); + EXPECT_NE(std::string::npos, + trace.http_buffered_trace().request().body().as_string().find("request")); + EXPECT_NE(std::string::npos, + trace.http_buffered_trace().response().body().as_string().find("response")); + + admin_client_->close(); +} + } // namespace } // namespace Envoy diff --git a/test/extensions/filters/http/tap/tap_filter_test.cc b/test/extensions/filters/http/tap/tap_filter_test.cc index 1f305ddbe79e6..7ed0f1afb77fe 100644 --- a/test/extensions/filters/http/tap/tap_filter_test.cc +++ b/test/extensions/filters/http/tap/tap_filter_test.cc @@ -3,7 +3,7 @@ #include "test/extensions/filters/http/tap/common.h" #include "test/mocks/http/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/mocks/stream_info/mocks.h" #include "test/test_common/utility.h" @@ -101,7 +101,7 @@ TEST_F(TapFilterTest, Config) { Http::TestRequestHeaderMapImpl request_headers; EXPECT_CALL(*http_per_request_tapper_, onRequestHeaders(_)); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false)); - Buffer::OwnedImpl request_body; + Buffer::OwnedImpl request_body("hello"); EXPECT_CALL(*http_per_request_tapper_, onRequestBody(_)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(request_body, false)); Http::TestRequestTrailerMapImpl request_trailers; @@ -113,7 +113,7 @@ TEST_F(TapFilterTest, Config) { filter_->encode100ContinueHeaders(response_headers)); EXPECT_CALL(*http_per_request_tapper_, onResponseHeaders(_)); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false)); - Buffer::OwnedImpl response_body; + Buffer::OwnedImpl response_body("hello"); EXPECT_CALL(*http_per_request_tapper_, onResponseBody(_)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_body, false)); Http::TestResponseTrailerMapImpl response_trailers; @@ -133,7 +133,7 @@ TEST(TapFilterConfigTest, InvalidProto) { R"EOF( common_config: static_config: - match_config: + match: any_match: true output_config: sinks: @@ -150,6 +150,29 @@ TEST(TapFilterConfigTest, InvalidProto) { "Error: Specifying admin streaming output without configuring admin."); } +TEST(TapFilterConfigTest, NeitherMatchNorMatchConfig) { + const std::string filter_config = + R"EOF( + common_config: + static_config: + output_config: + sinks: + - format: PROTO_BINARY + file_per_tap: + path_prefix: abc +)EOF"; + + envoy::extensions::filters::http::tap::v3::Tap config; + TestUtility::loadFromYaml(filter_config, config); + NiceMock context; + TapFilterFactory factory; + + EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(config, "stats", context), + EnvoyException, + fmt::format("Neither match nor match_config is set in TapConfig: {}", + config.common_config().static_config().DebugString())); +} + } // namespace } // namespace TapFilter } // namespace HttpFilters diff --git a/test/extensions/filters/listener/common/fuzz/BUILD b/test/extensions/filters/listener/common/fuzz/BUILD new file mode 100644 index 0000000000000..85ed3cbf7304a --- /dev/null +++ b/test/extensions/filters/listener/common/fuzz/BUILD @@ -0,0 +1,38 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test_library", + "envoy_package", + "envoy_proto_library", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_proto_library( + name = "listener_filter_fuzzer_proto", + srcs = ["listener_filter_fuzzer.proto"], +) + +envoy_cc_test_library( + name = "listener_filter_fuzzer_lib", + srcs = ["listener_filter_fuzzer.cc"], + hdrs = ["listener_filter_fuzzer.h"], + deps = [ + ":listener_filter_fakes", + ":listener_filter_fuzzer_proto_cc_proto", + "//include/envoy/network:filter_interface", + "//test/mocks/network:network_mocks", + "//test/test_common:threadsafe_singleton_injector_lib", + ], +) + +envoy_cc_test_library( + name = "listener_filter_fakes", + srcs = ["listener_filter_fakes.cc"], + hdrs = ["listener_filter_fakes.h"], + deps = [ + "//source/common/api:os_sys_calls_lib", + "//test/mocks/network:network_mocks", + ], +) diff --git a/test/extensions/filters/listener/common/fuzz/listener_filter_fakes.cc b/test/extensions/filters/listener/common/fuzz/listener_filter_fakes.cc new file mode 100644 index 0000000000000..f0546c7950fe8 --- /dev/null +++ b/test/extensions/filters/listener/common/fuzz/listener_filter_fakes.cc @@ -0,0 +1,86 @@ +#include "test/extensions/filters/listener/common/fuzz/listener_filter_fakes.h" + +namespace Envoy { +namespace Extensions { +namespace ListenerFilters { + +Network::IoHandle& FakeConnectionSocket::ioHandle() { return *io_handle_; } + +const Network::IoHandle& FakeConnectionSocket::ioHandle() const { return *io_handle_; } + +void FakeConnectionSocket::setLocalAddress( + const Network::Address::InstanceConstSharedPtr& local_address) { + local_address_ = local_address; + if (local_address_ != nullptr) { + addr_type_ = local_address_->type(); + } +} + +void FakeConnectionSocket::setRemoteAddress( + const Network::Address::InstanceConstSharedPtr& remote_address) { + remote_address_ = remote_address; +} + +const Network::Address::InstanceConstSharedPtr& FakeConnectionSocket::localAddress() const { + return local_address_; +} + +const Network::Address::InstanceConstSharedPtr& FakeConnectionSocket::remoteAddress() const { + return remote_address_; +} + +Network::Address::Type FakeConnectionSocket::addressType() const { return addr_type_; } + +absl::optional FakeConnectionSocket::ipVersion() const { + if (local_address_ == nullptr || addr_type_ != Network::Address::Type::Ip) { + return absl::nullopt; + } + + return local_address_->ip()->version(); +} + +void FakeConnectionSocket::setDetectedTransportProtocol(absl::string_view protocol) { + transport_protocol_ = std::string(protocol); +} + +absl::string_view FakeConnectionSocket::detectedTransportProtocol() const { + return transport_protocol_; +} + +void FakeConnectionSocket::setRequestedApplicationProtocols( + const std::vector& protocols) { + application_protocols_.clear(); + for (const auto& protocol : protocols) { + application_protocols_.emplace_back(protocol); + } +} + +const std::vector& FakeConnectionSocket::requestedApplicationProtocols() const { + return application_protocols_; +} + +void FakeConnectionSocket::setRequestedServerName(absl::string_view server_name) { + server_name_ = std::string(server_name); +} + +absl::string_view FakeConnectionSocket::requestedServerName() const { return server_name_; } + +Api::SysCallIntResult FakeConnectionSocket::getSocketOption(int level, int, void* optval, + socklen_t*) const { + switch (level) { + case SOL_IPV6: + static_cast(optval)->ss_family = AF_INET6; + break; + case SOL_IP: + static_cast(optval)->ss_family = AF_INET; + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + + return Api::SysCallIntResult{0, 0}; +} + +} // namespace ListenerFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/listener/common/fuzz/listener_filter_fakes.h b/test/extensions/filters/listener/common/fuzz/listener_filter_fakes.h new file mode 100644 index 0000000000000..4e13b4e6f4189 --- /dev/null +++ b/test/extensions/filters/listener/common/fuzz/listener_filter_fakes.h @@ -0,0 +1,70 @@ +#include "common/api/os_sys_calls_impl.h" +#include "common/network/io_socket_handle_impl.h" + +#include "test/mocks/network/mocks.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Extensions { +namespace ListenerFilters { + +static constexpr int kFakeSocketFd = 42; + +class FakeConnectionSocket : public Network::MockConnectionSocket { +public: + FakeConnectionSocket() + : io_handle_(std::make_unique(kFakeSocketFd)), + local_address_(nullptr), remote_address_(nullptr) {} + + ~FakeConnectionSocket() override { io_handle_->close(); } + + Network::IoHandle& ioHandle() override; + + const Network::IoHandle& ioHandle() const override; + + void setLocalAddress(const Network::Address::InstanceConstSharedPtr& local_address) override; + + void setRemoteAddress(const Network::Address::InstanceConstSharedPtr& remote_address) override; + + const Network::Address::InstanceConstSharedPtr& localAddress() const override; + + const Network::Address::InstanceConstSharedPtr& remoteAddress() const override; + + Network::Address::Type addressType() const override; + + absl::optional ipVersion() const override; + + void setRequestedApplicationProtocols(const std::vector& protocols) override; + + const std::vector& requestedApplicationProtocols() const override; + + void setDetectedTransportProtocol(absl::string_view protocol) override; + + absl::string_view detectedTransportProtocol() const override; + + void setRequestedServerName(absl::string_view server_name) override; + + absl::string_view requestedServerName() const override; + + Api::SysCallIntResult getSocketOption(int level, int, void* optval, socklen_t*) const override; + +private: + const Network::IoHandlePtr io_handle_; + Network::Address::InstanceConstSharedPtr local_address_; + Network::Address::InstanceConstSharedPtr remote_address_; + Network::Address::Type addr_type_; + std::vector application_protocols_; + std::string transport_protocol_; + std::string server_name_; +}; + +// TODO: Move over to Fake (name is confusing) +class FakeOsSysCalls : public Api::OsSysCallsImpl { +public: + MOCK_METHOD(Api::SysCallSizeResult, recv, (os_fd_t, void*, size_t, int)); +}; + +} // namespace ListenerFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.cc b/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.cc new file mode 100644 index 0000000000000..0f5aa60b8d44d --- /dev/null +++ b/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.cc @@ -0,0 +1,99 @@ +#include "test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h" + +namespace Envoy { +namespace Extensions { +namespace ListenerFilters { + +void ListenerFilterFuzzer::fuzz( + Network::ListenerFilter& filter, + const test::extensions::filters::listener::FilterFuzzTestCase& input) { + try { + socket_.setLocalAddress(Network::Utility::resolveUrl(input.sock().local_address())); + } catch (const EnvoyException& e) { + // Socket's local address will be nullptr by default if fuzzed local address is malformed + // or missing - local address field in proto is optional + } + try { + socket_.setRemoteAddress(Network::Utility::resolveUrl(input.sock().remote_address())); + } catch (const EnvoyException& e) { + // Socket's remote address will be nullptr by default if fuzzed remote address is malformed + // or missing - remote address field in proto is optional + } + + FuzzedHeader header(input); + + if (!header.empty()) { + ON_CALL(os_sys_calls_, recv(kFakeSocketFd, _, _, MSG_PEEK)) + .WillByDefault(testing::Return(Api::SysCallSizeResult{static_cast(0), 0})); + + ON_CALL(dispatcher_, + createFileEvent_(_, _, Event::FileTriggerType::Edge, + Event::FileReadyType::Read | Event::FileReadyType::Closed)) + .WillByDefault(testing::DoAll(testing::SaveArg<1>(&file_event_callback_), + testing::ReturnNew>())); + } + + filter.onAccept(cb_); + + if (file_event_callback_ == nullptr) { + // If filter does not call createFileEvent (i.e. original_dst and original_src) + return; + } + + if (!header.empty()) { + { + testing::InSequence s; + + EXPECT_CALL(os_sys_calls_, recv(kFakeSocketFd, _, _, MSG_PEEK)) + .Times(testing::AnyNumber()) + .WillRepeatedly(Invoke( + [&header](os_fd_t, void* buffer, size_t length, int) -> Api::SysCallSizeResult { + return header.next(buffer, length); + })); + } + + bool got_continue = false; + + ON_CALL(cb_, continueFilterChain(true)) + .WillByDefault(testing::InvokeWithoutArgs([&got_continue]() { got_continue = true; })); + + while (!got_continue) { + if (header.done()) { // End of stream reached but not done + file_event_callback_(Event::FileReadyType::Closed); + } else { + file_event_callback_(Event::FileReadyType::Read); + } + } + } +} + +FuzzedHeader::FuzzedHeader(const test::extensions::filters::listener::FilterFuzzTestCase& input) + : nreads_(input.data_size()), nread_(0) { + size_t len = 0; + for (int i = 0; i < nreads_; i++) { + len += input.data(i).size(); + } + + header_.reserve(len); + + for (int i = 0; i < nreads_; i++) { + header_ += input.data(i); + indices_.push_back(header_.size()); + } +} + +Api::SysCallSizeResult FuzzedHeader::next(void* buffer, size_t length) { + if (done()) { // End of stream reached + nread_ = nreads_ - 1; // Decrement to avoid out-of-range for last recv() call + } + memcpy(buffer, header_.data(), std::min(indices_[nread_], length)); + return Api::SysCallSizeResult{static_cast(indices_[nread_++]), 0}; +} + +bool FuzzedHeader::done() { return nread_ >= nreads_; } + +bool FuzzedHeader::empty() { return nreads_ == 0; } + +} // namespace ListenerFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h b/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h new file mode 100644 index 0000000000000..66b6f8707bfd2 --- /dev/null +++ b/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h @@ -0,0 +1,55 @@ +#include "envoy/network/filter.h" + +#include "test/extensions/filters/listener/common/fuzz/listener_filter_fakes.h" +#include "test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.pb.validate.h" +#include "test/mocks/event/mocks.h" +#include "test/mocks/network/mocks.h" +#include "test/test_common/threadsafe_singleton_injector.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Extensions { +namespace ListenerFilters { + +class ListenerFilterFuzzer { +public: + ListenerFilterFuzzer() { + ON_CALL(cb_, socket()).WillByDefault(testing::ReturnRef(socket_)); + ON_CALL(cb_, dispatcher()).WillByDefault(testing::ReturnRef(dispatcher_)); + } + + void fuzz(Network::ListenerFilter& filter, + const test::extensions::filters::listener::FilterFuzzTestCase& input); + +private: + FakeOsSysCalls os_sys_calls_; + TestThreadsafeSingletonInjector os_calls_{&os_sys_calls_}; + NiceMock cb_; + FakeConnectionSocket socket_; + NiceMock dispatcher_; + Event::FileReadyCb file_event_callback_; +}; + +class FuzzedHeader { +public: + FuzzedHeader(const test::extensions::filters::listener::FilterFuzzTestCase& input); + + // Copies next read into buffer and returns the number of bytes written + Api::SysCallSizeResult next(void* buffer, size_t length); + + bool done(); + + // Returns true if data field in proto is empty + bool empty(); + +private: + const int nreads_; // Number of reads + int nread_; // Counter of current read + std::string header_; // Construct header from single or multiple reads + std::vector indices_; // Ending indices for each read +}; + +} // namespace ListenerFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.proto b/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.proto new file mode 100644 index 0000000000000..5741ed9edfa3e --- /dev/null +++ b/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package test.extensions.filters.listener; + +message Socket { + string local_address = 1; + string remote_address = 2; +} + +message FilterFuzzTestCase { + Socket sock = 1; + repeated string data = 2; +} \ No newline at end of file diff --git a/test/extensions/filters/listener/http_inspector/BUILD b/test/extensions/filters/listener/http_inspector/BUILD index ddfac953f6b9a..05f898a7bf90f 100644 --- a/test/extensions/filters/listener/http_inspector/BUILD +++ b/test/extensions/filters/listener/http_inspector/BUILD @@ -1,7 +1,6 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", + "envoy_cc_fuzz_test", "envoy_package", ) load( @@ -9,14 +8,18 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( name = "http_inspector_test", srcs = ["http_inspector_test.cc"], extension_name = "envoy.filters.listener.http_inspector", + tags = ["fails_on_windows"], deps = [ "//source/common/common:hex_lib", + "//source/common/http:utility_lib", "//source/extensions/filters/listener/http_inspector:http_inspector_lib", "//test/mocks/api:api_mocks", "//test/mocks/network:network_mocks", @@ -35,8 +38,18 @@ envoy_extension_cc_test( "//source/extensions/filters/listener/http_inspector:http_inspector_lib", "//test/mocks/api:api_mocks", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:listener_factory_context_mocks", "//test/mocks/stats:stats_mocks", "//test/test_common:threadsafe_singleton_injector_lib", ], ) + +envoy_cc_fuzz_test( + name = "http_inspector_fuzz_test", + srcs = ["http_inspector_fuzz_test.cc"], + corpus = "http_inspector_corpus", + deps = [ + "//source/extensions/filters/listener/http_inspector:http_inspector_lib", + "//test/extensions/filters/listener/common/fuzz:listener_filter_fuzzer_lib", + ], +) diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_config_test.cc b/test/extensions/filters/listener/http_inspector/http_inspector_config_test.cc index c2037ed8ca675..4fba684513d27 100644 --- a/test/extensions/filters/listener/http_inspector/http_inspector_config_test.cc +++ b/test/extensions/filters/listener/http_inspector/http_inspector_config_test.cc @@ -1,7 +1,7 @@ #include "extensions/filters/listener/http_inspector/http_inspector.h" #include "extensions/filters/listener/well_known_names.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/listener_factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_corpus/bad_header b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/bad_header new file mode 100644 index 0000000000000..a84991228ff57 --- /dev/null +++ b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/bad_header @@ -0,0 +1 @@ +data: "X" \ No newline at end of file diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_corpus/incomplete_header b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/incomplete_header new file mode 100644 index 0000000000000..db337b0c762ae --- /dev/null +++ b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/incomplete_header @@ -0,0 +1 @@ +data: "GE" \ No newline at end of file diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_corpus/invalid_method b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/invalid_method new file mode 100644 index 0000000000000..b14ffd72e1168 --- /dev/null +++ b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/invalid_method @@ -0,0 +1 @@ +data: "BAD /anything HTTP/1.1" \ No newline at end of file diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_corpus/invalid_request b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/invalid_request new file mode 100644 index 0000000000000..a7943ddb30b19 --- /dev/null +++ b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/invalid_request @@ -0,0 +1 @@ +data: "BAD /anything HTTP/1.1\r\n" \ No newline at end of file diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_corpus/multiple_http10 b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/multiple_http10 new file mode 100644 index 0000000000000..42fa7434ebbb0 --- /dev/null +++ b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/multiple_http10 @@ -0,0 +1,3 @@ +data: "GET /anyt" +data: "hing HT" +data: "TP/1.0\r" \ No newline at end of file diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_corpus/multiple_incomplete b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/multiple_incomplete new file mode 100644 index 0000000000000..58c5d8ad86130 --- /dev/null +++ b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/multiple_incomplete @@ -0,0 +1,2 @@ +data: "G" +data: "E" \ No newline at end of file diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_corpus/valid_http10 b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/valid_http10 new file mode 100644 index 0000000000000..5512c5504dd90 --- /dev/null +++ b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/valid_http10 @@ -0,0 +1 @@ +data: "GET /anything HTTP/1.0\r\nhost: google.com\r\nuser-agent: curl/7.64.0\r\naccept: */*\r\nx-forwarded-proto: http\r\nx-request-id: a52df4a0-ed00-4a19-86a7-80e5049c6c84\r\nx-envoy-expected-rq-timeout-ms: 15000\r\ncontent-length: 0\r\n\r\n" \ No newline at end of file diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_corpus/valid_http11 b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/valid_http11 new file mode 100644 index 0000000000000..56906d74b1c14 --- /dev/null +++ b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/valid_http11 @@ -0,0 +1 @@ +data: "GET /anything HTTP/1.1\r\nhost: google.com\r\nuser-agent: curl/7.64.0\r\naccept: */*\r\nx-forwarded-proto: http\r\nx-request-id: a52df4a0-ed00-4a19-86a7-80e5049c6c84\r\nx-envoy-expected-rq-timeout-ms: 15000\r\ncontent-length: 3\r\n\r\nfoo" \ No newline at end of file diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_corpus/valid_http2 b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/valid_http2 new file mode 100644 index 0000000000000..0e1faf044c0fa --- /dev/null +++ b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/valid_http2 @@ -0,0 +1 @@ +data: "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n\x00\x00\x0c\x04\x00\x00\x00\x00\x00\x00\x04\x10\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x04\x08\x00\x00\x00\x00\x00\x0f\xff\x00\x01\x00\x00}\x01\x05\x00\x00\x00\x01A\x8a\xa0\xe4\x1d\x13\x9d\t\xb8\xf0\x00\x0f\x04\x88`uzL\xe6\xaaf\x05\x82\x86z\x88%\xb6P\xc3\xab\xb8\xd2\xe0S\x03*/*@\x8d\xf2\xb4\xa7\xb3\xc0\xec\x90\xb2-]\x87I\xff\x83\x9d)\xaf@\x89\xf2\xb5\x85\xediP\x95\x8d\'\x9a\x18\x9e\x03\xf1\xcaU\x82&_Y\xa7[\n\xc3\x11\x19Y\xc7\xe4\x90\x04\x90\x8d\xb6\xe8?@\x96\xf2\xb1j\xee\x7fK\x17\xcde\"K\"\xd6vY&\xa4\xa7\xb5+R\x8f\x84\x0b`\x00?" \ No newline at end of file diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_fuzz_test.cc b/test/extensions/filters/listener/http_inspector/http_inspector_fuzz_test.cc new file mode 100644 index 0000000000000..5f867c22b179f --- /dev/null +++ b/test/extensions/filters/listener/http_inspector/http_inspector_fuzz_test.cc @@ -0,0 +1,32 @@ +#include "extensions/filters/listener/http_inspector/http_inspector.h" + +#include "test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h" +#include "test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.pb.validate.h" +#include "test/fuzz/fuzz_runner.h" + +namespace Envoy { +namespace Extensions { +namespace ListenerFilters { +namespace HttpInspector { + +DEFINE_PROTO_FUZZER(const test::extensions::filters::listener::FilterFuzzTestCase& input) { + + try { + TestUtility::validate(input); + } catch (const ProtoValidationException& e) { + ENVOY_LOG_MISC(debug, "ProtoValidationException: {}", e.what()); + return; + } + + Stats::IsolatedStoreImpl store; + ConfigSharedPtr cfg = std::make_shared(store); + auto filter = std::make_unique(cfg); + + ListenerFilterFuzzer fuzzer; + fuzzer.fuzz(*filter, input); +} + +} // namespace HttpInspector +} // namespace ListenerFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_test.cc b/test/extensions/filters/listener/http_inspector/http_inspector_test.cc index c55189470427f..a6638892f26fc 100644 --- a/test/extensions/filters/listener/http_inspector/http_inspector_test.cc +++ b/test/extensions/filters/listener/http_inspector/http_inspector_test.cc @@ -1,4 +1,5 @@ #include "common/common/hex.h" +#include "common/http/utility.h" #include "common/network/io_socket_handle_impl.h" #include "extensions/filters/listener/http_inspector/http_inspector.h" @@ -104,7 +105,7 @@ TEST_F(HttpInspectorTest, InlineReadInspectHttp10) { memcpy(buffer, header.data(), header.size()); return Api::SysCallSizeResult{ssize_t(header.size()), 0}; })); - const std::vector alpn_protos{absl::string_view("http/1.0")}; + const std::vector alpn_protos{Http::Utility::AlpnNames::get().Http10}; EXPECT_CALL(dispatcher_, createFileEvent_(_, _, _, _)).Times(0); @@ -152,7 +153,7 @@ TEST_F(HttpInspectorTest, InspectHttp10) { return Api::SysCallSizeResult{ssize_t(header.size()), 0}; })); - const std::vector alpn_protos{absl::string_view("http/1.0")}; + const std::vector alpn_protos{Http::Utility::AlpnNames::get().Http10}; EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos)); EXPECT_CALL(cb_, continueFilterChain(true)); @@ -176,7 +177,7 @@ TEST_F(HttpInspectorTest, InspectHttp11) { return Api::SysCallSizeResult{ssize_t(header.size()), 0}; })); - const std::vector alpn_protos{absl::string_view("http/1.1")}; + const std::vector alpn_protos{Http::Utility::AlpnNames::get().Http11}; EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos)); EXPECT_CALL(cb_, continueFilterChain(true)); @@ -200,7 +201,7 @@ TEST_F(HttpInspectorTest, InspectHttp11WithNonEmptyRequestBody) { return Api::SysCallSizeResult{ssize_t(header.size()), 0}; })); - const std::vector alpn_protos{absl::string_view("http/1.1")}; + const std::vector alpn_protos{Http::Utility::AlpnNames::get().Http11}; EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos)); EXPECT_CALL(cb_, continueFilterChain(true)); @@ -221,7 +222,7 @@ TEST_F(HttpInspectorTest, ExtraSpaceInRequestLine) { return Api::SysCallSizeResult{ssize_t(header.size()), 0}; })); - const std::vector alpn_protos{absl::string_view("http/1.1")}; + const std::vector alpn_protos{Http::Utility::AlpnNames::get().Http11}; EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos)); EXPECT_CALL(cb_, continueFilterChain(true)); @@ -277,7 +278,7 @@ TEST_F(HttpInspectorTest, OldHttpProtocol) { return Api::SysCallSizeResult{ssize_t(header.size()), 0}; })); - const std::vector alpn_protos{absl::string_view("http/1.0")}; + const std::vector alpn_protos{Http::Utility::AlpnNames::get().Http10}; EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos)); EXPECT_CALL(cb_, continueFilterChain(true)); file_event_callback_(Event::FileReadyType::Read); @@ -321,7 +322,7 @@ TEST_F(HttpInspectorTest, InspectHttp2) { return Api::SysCallSizeResult{ssize_t(data.size()), 0}; })); - const std::vector alpn_protos{absl::string_view("h2c")}; + const std::vector alpn_protos{Http::Utility::AlpnNames::get().Http2c}; EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos)); EXPECT_CALL(cb_, continueFilterChain(true)); @@ -353,7 +354,7 @@ TEST_F(HttpInspectorTest, ReadError) { init(); EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() { - return Api::SysCallSizeResult{ssize_t(-1), ENOTSUP}; + return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_NOT_SUP}; })); EXPECT_CALL(cb_, continueFilterChain(false)); file_event_callback_(Event::FileReadyType::Read); @@ -362,7 +363,7 @@ TEST_F(HttpInspectorTest, ReadError) { TEST_F(HttpInspectorTest, MultipleReadsHttp2) { init(); - const std::vector alpn_protos = {absl::string_view("h2c")}; + const std::vector alpn_protos{Http::Utility::AlpnNames::get().Http2c}; const std::string header = "505249202a20485454502f322e300d0a0d0a534d0d0a0d0a00000c04000000000000041000000000020000000000" @@ -375,7 +376,7 @@ TEST_F(HttpInspectorTest, MultipleReadsHttp2) { InSequence s; EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() { - return Api::SysCallSizeResult{ssize_t(-1), EAGAIN}; + return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN}; })); for (size_t i = 1; i <= 24; i++) { @@ -408,7 +409,7 @@ TEST_F(HttpInspectorTest, MultipleReadsHttp2BadPreface) { InSequence s; EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() { - return Api::SysCallSizeResult{ssize_t(-1), EAGAIN}; + return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN}; })); for (size_t i = 1; i <= data.size(); i++) { @@ -440,7 +441,7 @@ TEST_F(HttpInspectorTest, MultipleReadsHttp1) { InSequence s; EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() { - return Api::SysCallSizeResult{ssize_t(-1), EAGAIN}; + return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN}; })); for (size_t i = 1; i <= data.size(); i++) { @@ -455,7 +456,7 @@ TEST_F(HttpInspectorTest, MultipleReadsHttp1) { } bool got_continue = false; - const std::vector alpn_protos = {absl::string_view("http/1.0")}; + const std::vector alpn_protos{Http::Utility::AlpnNames::get().Http10}; EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos)); EXPECT_CALL(cb_, continueFilterChain(true)).WillOnce(InvokeWithoutArgs([&got_continue]() { got_continue = true; @@ -474,7 +475,7 @@ TEST_F(HttpInspectorTest, MultipleReadsHttp1IncompleteHeader) { InSequence s; EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() { - return Api::SysCallSizeResult{ssize_t(-1), EAGAIN}; + return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN}; })); for (size_t i = 1; i <= data.size(); i++) { @@ -506,7 +507,7 @@ TEST_F(HttpInspectorTest, MultipleReadsHttp1IncompleteBadHeader) { InSequence s; EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() { - return Api::SysCallSizeResult{ssize_t(-1), EAGAIN}; + return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN}; })); for (size_t i = 1; i <= data.size(); i++) { @@ -540,7 +541,7 @@ TEST_F(HttpInspectorTest, MultipleReadsHttp1BadProtocol) { InSequence s; EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() { - return Api::SysCallSizeResult{ssize_t(-1), EAGAIN}; + return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN}; })); for (size_t i = 1; i <= truncate_header.size(); i++) { @@ -574,7 +575,7 @@ TEST_F(HttpInspectorTest, Http1WithLargeRequestLine) { InSequence s; EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() { - return Api::SysCallSizeResult{ssize_t(-1), EAGAIN}; + return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN}; })); uint64_t num_loops = Config::MAX_INSPECT_SIZE; @@ -599,7 +600,7 @@ TEST_F(HttpInspectorTest, Http1WithLargeRequestLine) { } bool got_continue = false; - const std::vector alpn_protos = {absl::string_view("http/1.0")}; + const std::vector alpn_protos{Http::Utility::AlpnNames::get().Http10}; EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos)); EXPECT_CALL(cb_, continueFilterChain(true)).WillOnce(InvokeWithoutArgs([&got_continue]() { got_continue = true; @@ -620,7 +621,7 @@ TEST_F(HttpInspectorTest, Http1WithLargeHeader) { InSequence s; EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() { - return Api::SysCallSizeResult{ssize_t(-1), EAGAIN}; + return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN}; })); for (size_t i = 1; i <= 20; i++) { @@ -635,7 +636,7 @@ TEST_F(HttpInspectorTest, Http1WithLargeHeader) { } bool got_continue = false; - const std::vector alpn_protos = {absl::string_view("http/1.0")}; + const std::vector alpn_protos{Http::Utility::AlpnNames::get().Http10}; EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos)); EXPECT_CALL(cb_, continueFilterChain(true)).WillOnce(InvokeWithoutArgs([&got_continue]() { got_continue = true; diff --git a/test/extensions/filters/listener/original_dst/BUILD b/test/extensions/filters/listener/original_dst/BUILD index aaed1667cdf19..ba71d8f6bbb9f 100644 --- a/test/extensions/filters/listener/original_dst/BUILD +++ b/test/extensions/filters/listener/original_dst/BUILD @@ -1,7 +1,6 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", + "envoy_cc_fuzz_test", "envoy_package", ) load( @@ -9,6 +8,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -20,3 +21,13 @@ envoy_extension_cc_test( "//test/test_common:utility_lib", ], ) + +envoy_cc_fuzz_test( + name = "original_dst_fuzz_test", + srcs = ["original_dst_fuzz_test.cc"], + corpus = "original_dst_corpus", + deps = [ + "//source/extensions/filters/listener/original_dst:original_dst_lib", + "//test/extensions/filters/listener/common/fuzz:listener_filter_fuzzer_lib", + ], +) diff --git a/test/extensions/filters/listener/original_dst/original_dst_corpus/invalid_scheme b/test/extensions/filters/listener/original_dst/original_dst_corpus/invalid_scheme new file mode 100644 index 0000000000000..67994b567f870 --- /dev/null +++ b/test/extensions/filters/listener/original_dst/original_dst_corpus/invalid_scheme @@ -0,0 +1,3 @@ +sock { + local_address: "hello world" +} \ No newline at end of file diff --git a/test/extensions/filters/listener/original_dst/original_dst_corpus/invalid_unix b/test/extensions/filters/listener/original_dst/original_dst_corpus/invalid_unix new file mode 100644 index 0000000000000..ee8917b15305f --- /dev/null +++ b/test/extensions/filters/listener/original_dst/original_dst_corpus/invalid_unix @@ -0,0 +1,3 @@ +sock { + local_address: "unix://tmp/server" +} \ No newline at end of file diff --git a/test/extensions/filters/listener/original_dst/original_dst_corpus/valid_ipv4 b/test/extensions/filters/listener/original_dst/original_dst_corpus/valid_ipv4 new file mode 100644 index 0000000000000..a0510b8c253c3 --- /dev/null +++ b/test/extensions/filters/listener/original_dst/original_dst_corpus/valid_ipv4 @@ -0,0 +1,3 @@ +sock { + local_address: "tcp://0.0.0.0:0" +} \ No newline at end of file diff --git a/test/extensions/filters/listener/original_dst/original_dst_corpus/valid_ipv6 b/test/extensions/filters/listener/original_dst/original_dst_corpus/valid_ipv6 new file mode 100644 index 0000000000000..32bdadc805ce7 --- /dev/null +++ b/test/extensions/filters/listener/original_dst/original_dst_corpus/valid_ipv6 @@ -0,0 +1,3 @@ +sock { + local_address: "tcp://[a:b:c:d::]:0" +} \ No newline at end of file diff --git a/test/extensions/filters/listener/original_dst/original_dst_fuzz_test.cc b/test/extensions/filters/listener/original_dst/original_dst_fuzz_test.cc new file mode 100644 index 0000000000000..4eb1899f3b351 --- /dev/null +++ b/test/extensions/filters/listener/original_dst/original_dst_fuzz_test.cc @@ -0,0 +1,29 @@ +#include "extensions/filters/listener/original_dst/original_dst.h" + +#include "test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h" +#include "test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.pb.validate.h" +#include "test/fuzz/fuzz_runner.h" + +namespace Envoy { +namespace Extensions { +namespace ListenerFilters { +namespace OriginalDst { + +DEFINE_PROTO_FUZZER(const test::extensions::filters::listener::FilterFuzzTestCase& input) { + + try { + TestUtility::validate(input); + } catch (const ProtoValidationException& e) { + ENVOY_LOG_MISC(debug, "ProtoValidationException: {}", e.what()); + return; + } + + auto filter = std::make_unique(); + ListenerFilterFuzzer fuzzer; + fuzzer.fuzz(*filter, input); +} + +} // namespace OriginalDst +} // namespace ListenerFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/listener/original_src/BUILD b/test/extensions/filters/listener/original_src/BUILD index 0961acdb25947..217e8b2586deb 100644 --- a/test/extensions/filters/listener/original_src/BUILD +++ b/test/extensions/filters/listener/original_src/BUILD @@ -1,14 +1,16 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", + "envoy_cc_fuzz_test", "envoy_package", + "envoy_proto_library", ) load( "//test/extensions:extensions_build_system.bzl", "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -25,11 +27,12 @@ envoy_extension_cc_test( name = "original_src_config_factory_test", srcs = ["original_src_config_factory_test.cc"], extension_name = "envoy.filters.listener.original_src", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/listener/original_src:config", "//source/extensions/filters/listener/original_src:config_lib", "//source/extensions/filters/listener/original_src:original_src_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:listener_factory_context_mocks", ], ) @@ -48,3 +51,23 @@ envoy_extension_cc_test( "@envoy_api//envoy/extensions/filters/listener/original_src/v3:pkg_cc_proto", ], ) + +envoy_proto_library( + name = "original_src_fuzz_test_proto", + srcs = ["original_src_fuzz_test.proto"], + deps = [ + "//test/extensions/filters/listener/common/fuzz:listener_filter_fuzzer_proto", + "@envoy_api//envoy/extensions/filters/listener/original_src/v3:pkg", + ], +) + +envoy_cc_fuzz_test( + name = "original_src_fuzz_test", + srcs = ["original_src_fuzz_test.cc"], + corpus = "original_src_corpus", + deps = [ + ":original_src_fuzz_test_proto_cc_proto", + "//source/extensions/filters/listener/original_src:original_src_lib", + "//test/extensions/filters/listener/common/fuzz:listener_filter_fuzzer_lib", + ], +) diff --git a/test/extensions/filters/listener/original_src/original_src_config_factory_test.cc b/test/extensions/filters/listener/original_src/original_src_config_factory_test.cc index dca16ca9e4556..eac36949aa96d 100644 --- a/test/extensions/filters/listener/original_src/original_src_config_factory_test.cc +++ b/test/extensions/filters/listener/original_src/original_src_config_factory_test.cc @@ -2,7 +2,7 @@ #include "extensions/filters/listener/original_src/original_src.h" #include "extensions/filters/listener/original_src/original_src_config_factory.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/listener_factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/listener/original_src/original_src_corpus/valid_ipv4 b/test/extensions/filters/listener/original_src/original_src_corpus/valid_ipv4 new file mode 100644 index 0000000000000..e9acd000b4634 --- /dev/null +++ b/test/extensions/filters/listener/original_src/original_src_corpus/valid_ipv4 @@ -0,0 +1,10 @@ +config { + bind_port: false + mark: 0 +} + +fuzzed { + sock { + remote_address: "tcp://1.2.3.4:0" + } +} \ No newline at end of file diff --git a/test/extensions/filters/listener/original_src/original_src_corpus/valid_unix b/test/extensions/filters/listener/original_src/original_src_corpus/valid_unix new file mode 100644 index 0000000000000..9726394370c6a --- /dev/null +++ b/test/extensions/filters/listener/original_src/original_src_corpus/valid_unix @@ -0,0 +1,10 @@ +config { + bind_port: true + mark: 15 +} + +fuzzed { + sock { + remote_address: "unix://domain.socket" + } +} \ No newline at end of file diff --git a/test/extensions/filters/listener/original_src/original_src_fuzz_test.cc b/test/extensions/filters/listener/original_src/original_src_fuzz_test.cc new file mode 100644 index 0000000000000..0116a7a98b362 --- /dev/null +++ b/test/extensions/filters/listener/original_src/original_src_fuzz_test.cc @@ -0,0 +1,31 @@ +#include "extensions/filters/listener/original_src/original_src.h" + +#include "test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h" +#include "test/extensions/filters/listener/original_src/original_src_fuzz_test.pb.validate.h" +#include "test/fuzz/fuzz_runner.h" + +namespace Envoy { +namespace Extensions { +namespace ListenerFilters { +namespace OriginalSrc { + +DEFINE_PROTO_FUZZER( + const envoy::extensions::filters::listener::original_src::OriginalSrcTestCase& input) { + + try { + TestUtility::validate(input); + } catch (const ProtoValidationException& e) { + ENVOY_LOG_MISC(debug, "ProtoValidationException: {}", e.what()); + return; + } + + Config config(input.config()); + auto filter = std::make_unique(config); + ListenerFilterFuzzer fuzzer; + fuzzer.fuzz(*filter, input.fuzzed()); +} + +} // namespace OriginalSrc +} // namespace ListenerFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/listener/original_src/original_src_fuzz_test.proto b/test/extensions/filters/listener/original_src/original_src_fuzz_test.proto new file mode 100644 index 0000000000000..093378b090454 --- /dev/null +++ b/test/extensions/filters/listener/original_src/original_src_fuzz_test.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package envoy.extensions.filters.listener.original_src; + +import "envoy/extensions/filters/listener/original_src/v3/original_src.proto"; +import "test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.proto"; +import "validate/validate.proto"; + +message OriginalSrcTestCase { + envoy.extensions.filters.listener.original_src.v3.OriginalSrc config = 1 + [(validate.rules).message.required = true]; + test.extensions.filters.listener.FilterFuzzTestCase fuzzed = 2 + [(validate.rules).message.required = true]; +} \ No newline at end of file diff --git a/test/extensions/filters/listener/original_src/original_src_test.cc b/test/extensions/filters/listener/original_src/original_src_test.cc index b206bb722b66c..0e9180012fc27 100644 --- a/test/extensions/filters/listener/original_src/original_src_test.cc +++ b/test/extensions/filters/listener/original_src/original_src_test.cc @@ -119,7 +119,7 @@ TEST_F(OriginalSrcTest, OnNewConnectionIpv4AddressBleachesPort) { } TEST_F(OriginalSrcTest, FilterAddsTransparentOption) { - if (!ENVOY_SOCKET_IP_TRANSPARENT.has_value()) { + if (!ENVOY_SOCKET_IP_TRANSPARENT.hasValue()) { // The option isn't supported on this platform. Just skip the test. return; } @@ -138,7 +138,7 @@ TEST_F(OriginalSrcTest, FilterAddsTransparentOption) { } TEST_F(OriginalSrcTest, FilterAddsMarkOption) { - if (!ENVOY_SOCKET_SO_MARK.has_value()) { + if (!ENVOY_SOCKET_SO_MARK.hasValue()) { // The option isn't supported on this platform. Just skip the test. return; } @@ -160,7 +160,7 @@ TEST_F(OriginalSrcTest, FilterAddsMarkOption) { } TEST_F(OriginalSrcTest, Mark0NotAdded) { - if (!ENVOY_SOCKET_SO_MARK.has_value()) { + if (!ENVOY_SOCKET_SO_MARK.hasValue()) { // The option isn't supported on this platform. Just skip the test. return; } diff --git a/test/extensions/filters/listener/proxy_protocol/BUILD b/test/extensions/filters/listener/proxy_protocol/BUILD index 0eb0eb4983ebd..f373897957784 100644 --- a/test/extensions/filters/listener/proxy_protocol/BUILD +++ b/test/extensions/filters/listener/proxy_protocol/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,12 +7,15 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( name = "proxy_protocol_test", srcs = ["proxy_protocol_test.cc"], extension_name = "envoy.filters.listener.proxy_protocol", + tags = ["fails_on_windows"], deps = [ "//source/common/buffer:buffer_lib", "//source/common/event:dispatcher_includes", @@ -29,7 +30,7 @@ envoy_extension_cc_test( "//test/mocks/api:api_mocks", "//test/mocks/buffer:buffer_mocks", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:listener_factory_context_mocks", "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", "//test/test_common:threadsafe_singleton_injector_lib", diff --git a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc index 74dc142304e28..a270ee2f569d0 100644 --- a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc +++ b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc @@ -18,11 +18,12 @@ #include "server/connection_handler_impl.h" #include "extensions/filters/listener/proxy_protocol/proxy_protocol.h" +#include "extensions/filters/listener/well_known_names.h" #include "test/mocks/api/mocks.h" #include "test/mocks/buffer/mocks.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/listener_factory_context.h" #include "test/test_common/environment.h" #include "test/test_common/network_utility.h" #include "test/test_common/printers.h" @@ -35,6 +36,7 @@ using testing::_; using testing::AnyNumber; using testing::AtLeast; +using testing::ElementsAre; using testing::Invoke; using testing::NiceMock; using testing::Return; @@ -60,8 +62,7 @@ class ProxyProtocolTest : public testing::TestWithParamlocalAddress())); EXPECT_CALL(socket_factory_, getListenSocket()).WillOnce(Return(socket_)); connection_handler_->addListener(absl::nullopt, *this); @@ -82,8 +83,10 @@ class ProxyProtocolTest : public testing::TestWithParam void { expected_callbacks--; @@ -109,7 +114,11 @@ class ProxyProtocolTest : public testing::TestWithParam bool { filter_manager.addAcceptFilter( - nullptr, std::make_unique(std::make_shared(listenerScope()))); + nullptr, std::make_unique(std::make_shared( + listenerScope(), (nullptr != proto_config) + ? *proto_config + : envoy::extensions::filters::listener:: + proxy_protocol::v3::ProxyProtocol()))); maybeExitDispatcher(); return true; })); @@ -184,6 +193,7 @@ class ProxyProtocolTest : public testing::TestWithParam connection_callbacks_; Network::Connection* server_connection_; Network::MockConnectionCallbacks server_callbacks_; + BasicResourceLimitImpl open_connections_; std::shared_ptr read_filter_; std::string name_; Api::OsSysCallsImpl os_sys_calls_actual_; @@ -876,6 +886,244 @@ TEST_P(ProxyProtocolTest, V2PartialRead) { disconnect(); } +TEST_P(ProxyProtocolTest, V2ExtractTlvOfInterest) { + // A well-formed ipv4/tcp with a pair of TLV extensions is accepted + constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, + 0x54, 0x0a, 0x21, 0x11, 0x00, 0x1a, 0x01, 0x02, 0x03, 0x04, + 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02}; + constexpr uint8_t tlv1[] = {0x0, 0x0, 0x1, 0xff}; + constexpr uint8_t tlv_type_authority[] = {0x02, 0x00, 0x07, 0x66, 0x6f, + 0x6f, 0x2e, 0x63, 0x6f, 0x6d}; + constexpr uint8_t data[] = {'D', 'A', 'T', 'A'}; + + envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol proto_config; + auto rule = proto_config.add_rules(); + rule->set_tlv_type(0x02); + rule->mutable_on_tlv_present()->set_key("PP2 type authority"); + + connect(true, &proto_config); + write(buffer, sizeof(buffer)); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + + write(tlv1, sizeof(tlv1)); + write(tlv_type_authority, sizeof(tlv_type_authority)); + write(data, sizeof(data)); + expectData("DATA"); + + EXPECT_EQ(1, server_connection_->streamInfo().dynamicMetadata().filter_metadata_size()); + + auto metadata = server_connection_->streamInfo().dynamicMetadata().filter_metadata(); + EXPECT_EQ(1, metadata.size()); + EXPECT_EQ(1, metadata.count(ListenerFilters::ListenerFilterNames::get().ProxyProtocol)); + + auto fields = metadata.at(ListenerFilters::ListenerFilterNames::get().ProxyProtocol).fields(); + EXPECT_EQ(1, fields.size()); + EXPECT_EQ(1, fields.count("PP2 type authority")); + + auto value_s = fields.at("PP2 type authority").string_value(); + ASSERT_THAT(value_s, ElementsAre(0x66, 0x6f, 0x6f, 0x2e, 0x63, 0x6f, 0x6d)); + disconnect(); +} + +TEST_P(ProxyProtocolTest, V2ExtractTlvOfInterestAndEmitWithSpecifiedMetadataNamespace) { + // A well-formed ipv4/tcp with a pair of TLV extensions is accepted + constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, + 0x54, 0x0a, 0x21, 0x11, 0x00, 0x1a, 0x01, 0x02, 0x03, 0x04, + 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02}; + constexpr uint8_t tlv1[] = {0x0, 0x0, 0x1, 0xff}; + constexpr uint8_t tlv_type_authority[] = {0x02, 0x00, 0x07, 0x66, 0x6f, + 0x6f, 0x2e, 0x63, 0x6f, 0x6d}; + constexpr uint8_t data[] = {'D', 'A', 'T', 'A'}; + + envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol proto_config; + auto rule = proto_config.add_rules(); + rule->set_tlv_type(0x02); + rule->mutable_on_tlv_present()->set_key("PP2 type authority"); + rule->mutable_on_tlv_present()->set_metadata_namespace("We need a different metadata namespace"); + + connect(true, &proto_config); + write(buffer, sizeof(buffer)); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + + write(tlv1, sizeof(tlv1)); + write(tlv_type_authority, sizeof(tlv_type_authority)); + write(data, sizeof(data)); + expectData("DATA"); + + EXPECT_EQ(1, server_connection_->streamInfo().dynamicMetadata().filter_metadata_size()); + + auto metadata = server_connection_->streamInfo().dynamicMetadata().filter_metadata(); + EXPECT_EQ(1, metadata.size()); + EXPECT_EQ(1, metadata.count("We need a different metadata namespace")); + + auto fields = metadata.at("We need a different metadata namespace").fields(); + EXPECT_EQ(1, fields.size()); + EXPECT_EQ(1, fields.count("PP2 type authority")); + + auto value_s = fields.at("PP2 type authority").string_value(); + ASSERT_THAT(value_s, ElementsAre(0x66, 0x6f, 0x6f, 0x2e, 0x63, 0x6f, 0x6d)); + disconnect(); +} + +TEST_P(ProxyProtocolTest, V2ExtractMultipleTlvsOfInterest) { + // A well-formed ipv4/tcp with a pair of TLV extensions is accepted + constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, + 0x54, 0x0a, 0x21, 0x11, 0x00, 0x39, 0x01, 0x02, 0x03, 0x04, + 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02}; + // a TLV of type 0x00 with size of 4 (1 byte is value) + constexpr uint8_t tlv1[] = {0x00, 0x00, 0x01, 0xff}; + // a TLV of type 0x02 with size of 10 bytes (7 bytes are value) + constexpr uint8_t tlv_type_authority[] = {0x02, 0x00, 0x07, 0x66, 0x6f, + 0x6f, 0x2e, 0x63, 0x6f, 0x6d}; + // a TLV of type 0x0f with size of 6 bytes (3 bytes are value) + constexpr uint8_t tlv3[] = {0x0f, 0x00, 0x03, 0xf0, 0x00, 0x0f}; + // a TLV of type 0xea with size of 25 bytes (22 bytes are value) + constexpr uint8_t tlv_vpc_id[] = {0xea, 0x00, 0x16, 0x01, 0x76, 0x70, 0x63, 0x2d, 0x30, + 0x32, 0x35, 0x74, 0x65, 0x73, 0x74, 0x32, 0x66, 0x61, + 0x36, 0x63, 0x36, 0x33, 0x68, 0x61, 0x37}; + constexpr uint8_t data[] = {'D', 'A', 'T', 'A'}; + + envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol proto_config; + auto rule_type_authority = proto_config.add_rules(); + rule_type_authority->set_tlv_type(0x02); + rule_type_authority->mutable_on_tlv_present()->set_key("PP2 type authority"); + + auto rule_vpc_id = proto_config.add_rules(); + rule_vpc_id->set_tlv_type(0xea); + rule_vpc_id->mutable_on_tlv_present()->set_key("PP2 vpc id"); + + connect(true, &proto_config); + write(buffer, sizeof(buffer)); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + + write(tlv1, sizeof(tlv1)); + write(tlv_type_authority, sizeof(tlv_type_authority)); + write(tlv3, sizeof(tlv3)); + write(tlv_vpc_id, sizeof(tlv_vpc_id)); + write(data, sizeof(data)); + expectData("DATA"); + + EXPECT_EQ(1, server_connection_->streamInfo().dynamicMetadata().filter_metadata_size()); + + auto metadata = server_connection_->streamInfo().dynamicMetadata().filter_metadata(); + EXPECT_EQ(1, metadata.size()); + EXPECT_EQ(1, metadata.count(ListenerFilters::ListenerFilterNames::get().ProxyProtocol)); + + auto fields = metadata.at(ListenerFilters::ListenerFilterNames::get().ProxyProtocol).fields(); + EXPECT_EQ(2, fields.size()); + EXPECT_EQ(1, fields.count("PP2 type authority")); + EXPECT_EQ(1, fields.count("PP2 vpc id")); + + auto value_type_authority = fields.at("PP2 type authority").string_value(); + ASSERT_THAT(value_type_authority, ElementsAre(0x66, 0x6f, 0x6f, 0x2e, 0x63, 0x6f, 0x6d)); + + auto value_vpc_id = fields.at("PP2 vpc id").string_value(); + ASSERT_THAT(value_vpc_id, + ElementsAre(0x01, 0x76, 0x70, 0x63, 0x2d, 0x30, 0x32, 0x35, 0x74, 0x65, 0x73, 0x74, + 0x32, 0x66, 0x61, 0x36, 0x63, 0x36, 0x33, 0x68, 0x61, 0x37)); + disconnect(); +} + +TEST_P(ProxyProtocolTest, V2WillNotOverwriteTLV) { + // A well-formed ipv4/tcp with a pair of TLV extensions is accepted + constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, + 0x54, 0x0a, 0x21, 0x11, 0x00, 0x2a, 0x01, 0x02, 0x03, 0x04, + 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02}; + // a TLV of type 0x00 with size of 4 (1 byte is value) + constexpr uint8_t tlv1[] = {0x00, 0x00, 0x01, 0xff}; + // a TLV of type 0x02 with size of 10 bytes (7 bytes are value) + constexpr uint8_t tlv_type_authority1[] = {0x02, 0x00, 0x07, 0x66, 0x6f, + 0x6f, 0x2e, 0x63, 0x6f, 0x6d}; + // a TLV of type 0x0f with size of 6 bytes (3 bytes are value) + constexpr uint8_t tlv3[] = {0x0f, 0x00, 0x03, 0xf0, 0x00, 0x0f}; + // a TLV of type 0x02 (again) with size of 10 bytes (7 bytes are value) and different values + constexpr uint8_t tlv_type_authority2[] = {0x02, 0x00, 0x07, 0x62, 0x61, + 0x72, 0x2e, 0x6e, 0x65, 0x74}; + constexpr uint8_t data[] = {'D', 'A', 'T', 'A'}; + + envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol proto_config; + auto rule_type_authority = proto_config.add_rules(); + rule_type_authority->set_tlv_type(0x02); + rule_type_authority->mutable_on_tlv_present()->set_key("PP2 type authority"); + + connect(true, &proto_config); + write(buffer, sizeof(buffer)); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + + write(tlv1, sizeof(tlv1)); + write(tlv_type_authority1, sizeof(tlv_type_authority1)); + write(tlv3, sizeof(tlv3)); + write(tlv_type_authority2, sizeof(tlv_type_authority2)); + write(data, sizeof(data)); + expectData("DATA"); + + EXPECT_EQ(1, server_connection_->streamInfo().dynamicMetadata().filter_metadata_size()); + + auto metadata = server_connection_->streamInfo().dynamicMetadata().filter_metadata(); + EXPECT_EQ(1, metadata.size()); + EXPECT_EQ(1, metadata.count(ListenerFilters::ListenerFilterNames::get().ProxyProtocol)); + + auto fields = metadata.at(ListenerFilters::ListenerFilterNames::get().ProxyProtocol).fields(); + EXPECT_EQ(1, fields.size()); + EXPECT_EQ(1, fields.count("PP2 type authority")); + + auto value_type_authority = fields.at("PP2 type authority").string_value(); + ASSERT_THAT(value_type_authority, ElementsAre(0x66, 0x6f, 0x6f, 0x2e, 0x63, 0x6f, 0x6d)); + + disconnect(); +} + +TEST_P(ProxyProtocolTest, V2WrongTLVLength) { + // A well-formed ipv4/tcp with buffer[14]15] being 0x00 and 0x10. It says we should have 16 bytes + // following. + constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, + 0x54, 0x0a, 0x21, 0x11, 0x00, 0x10, 0x01, 0x02, 0x03, 0x04, + 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02}; + + // tlv[2] should be 0x1 since there's only one byte for tlv value. + constexpr uint8_t tlv[] = {0x0, 0x0, 0x2, 0xff}; + + envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol proto_config; + auto rule_00 = proto_config.add_rules(); + rule_00->set_tlv_type(0x00); + rule_00->mutable_on_tlv_present()->set_key("00"); + + connect(false, &proto_config); + write(buffer, sizeof(buffer)); + write(tlv, sizeof(tlv)); + + expectProxyProtoError(); +} + +TEST_P(ProxyProtocolTest, V2IncompleteTLV) { + // A ipv4/tcp with buffer[14]15] being 0x00 and 0x11. It says we should have 17 bytes following, + // however we have 20. + constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, + 0x54, 0x0a, 0x21, 0x11, 0x00, 0x11, 0x01, 0x02, 0x03, 0x04, + 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02}; + + // a TLV of type 0x00 with size of 4 (1 byte is value) + constexpr uint8_t tlv1[] = {0x0, 0x0, 0x1, 0xff}; + // a TLV of type 0x01 with size of 4 (1 byte is value) + constexpr uint8_t tlv2[] = {0x1, 0x0, 0x1, 0xff}; + + envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol proto_config; + auto rule_00 = proto_config.add_rules(); + rule_00->set_tlv_type(0x00); + rule_00->mutable_on_tlv_present()->set_key("00"); + + auto rule_01 = proto_config.add_rules(); + rule_01->set_tlv_type(0x01); + rule_01->mutable_on_tlv_present()->set_key("01"); + + connect(false, &proto_config); + write(buffer, sizeof(buffer)); + write(tlv1, sizeof(tlv1)); + write(tlv2, sizeof(tlv2)); + + expectProxyProtoError(); +} + TEST_P(ProxyProtocolTest, MalformedProxyLine) { connect(false); @@ -1001,8 +1249,7 @@ class WildcardProxyProtocolTest : public testing::TestWithParamlocalAddress()->ip()->port())), connection_handler_(new Server::ConnectionHandlerImpl(*dispatcher_)), name_("proxy"), filter_chain_(Network::Test::createEmptyFilterChainWithRawBufferSockets()) { - EXPECT_CALL(socket_factory_, socketType()) - .WillOnce(Return(Network::Address::SocketType::Stream)); + EXPECT_CALL(socket_factory_, socketType()).WillOnce(Return(Network::Socket::Type::Stream)); EXPECT_CALL(socket_factory_, localAddress()).WillOnce(ReturnRef(socket_->localAddress())); EXPECT_CALL(socket_factory_, getListenSocket()).WillOnce(Return(socket_)); connection_handler_->addListener(absl::nullopt, *this); @@ -1014,7 +1261,10 @@ class WildcardProxyProtocolTest : public testing::TestWithParam bool { filter_manager.addAcceptFilter( - nullptr, std::make_unique(std::make_shared(listenerScope()))); + nullptr, + std::make_unique(std::make_shared( + listenerScope(), + envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol()))); return true; })); } @@ -1027,11 +1277,13 @@ class WildcardProxyProtocolTest : public testing::TestWithParam socket_; Network::Address::InstanceConstSharedPtr local_dst_address_; @@ -1138,6 +1391,45 @@ TEST_P(WildcardProxyProtocolTest, BasicV6) { disconnect(); } +TEST(ProxyProtocolConfigFactoryTest, TestCreateFactory) { + Server::Configuration::NamedListenerFilterConfigFactory* factory = + Registry::FactoryRegistry:: + getFactory(ListenerFilters::ListenerFilterNames::get().ProxyProtocol); + + EXPECT_EQ(factory->name(), ListenerFilters::ListenerFilterNames::get().ProxyProtocol); + + const std::string yaml = R"EOF( + rules: + - tlv_type: 0x01 + on_tlv_present: + key: "PP2_TYPE_ALPN" + - tlv_type: 0x1a + on_tlv_present: + key: "PP2_TYPE_CUSTOMER_A" +)EOF"; + + ProtobufTypes::MessagePtr proto_config = factory->createEmptyConfigProto(); + TestUtility::loadFromYaml(yaml, *proto_config); + + Server::Configuration::MockListenerFactoryContext context; + EXPECT_CALL(context, scope()).Times(1); + EXPECT_CALL(context, messageValidationVisitor()).Times(1); + Network::ListenerFilterFactoryCb cb = + factory->createListenerFilterFactoryFromProto(*proto_config, nullptr, context); + + Network::MockListenerFilterManager manager; + Network::ListenerFilterPtr added_filter; + EXPECT_CALL(manager, addAcceptFilter_(_, _)) + .WillOnce(Invoke([&added_filter](const Network::ListenerFilterMatcherSharedPtr&, + Network::ListenerFilterPtr& filter) { + added_filter = std::move(filter); + })); + cb(manager); + + // Make sure we actually create the correct type! + EXPECT_NE(dynamic_cast(added_filter.get()), nullptr); +} + // Test that the deprecated extension name still functions. TEST(ProxyProtocolConfigFactoryTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.listener.proxy_protocol"; diff --git a/test/extensions/filters/listener/tls_inspector/BUILD b/test/extensions/filters/listener/tls_inspector/BUILD index 8e2b7360a5ad3..0f654911f6723 100644 --- a/test/extensions/filters/listener/tls_inspector/BUILD +++ b/test/extensions/filters/listener/tls_inspector/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", @@ -12,6 +10,8 @@ load( "envoy_extension_cc_benchmark_binary", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( @@ -19,6 +19,7 @@ envoy_cc_test( srcs = ["tls_inspector_test.cc"], deps = [ ":tls_utility_lib", + "//source/common/http:utility_lib", "//source/extensions/filters/listener/tls_inspector:config", "//source/extensions/filters/listener/tls_inspector:tls_inspector_lib", "//test/mocks/api:api_mocks", @@ -37,6 +38,7 @@ envoy_extension_cc_benchmark_binary( ], deps = [ ":tls_utility_lib", + "//source/common/http:utility_lib", "//source/common/network:listen_socket_lib", "//source/extensions/filters/listener/tls_inspector:tls_inspector_lib", "//test/mocks/api:api_mocks", diff --git a/test/extensions/filters/listener/tls_inspector/tls_inspector_benchmark.cc b/test/extensions/filters/listener/tls_inspector/tls_inspector_benchmark.cc index 7cb0915a89d07..5bed094fc34c8 100644 --- a/test/extensions/filters/listener/tls_inspector/tls_inspector_benchmark.cc +++ b/test/extensions/filters/listener/tls_inspector/tls_inspector_benchmark.cc @@ -1,6 +1,7 @@ #include #include "common/api/os_sys_calls_impl.h" +#include "common/http/utility.h" #include "common/network/io_socket_handle_impl.h" #include "common/network/listen_socket_impl.h" @@ -85,7 +86,8 @@ static void BM_TlsInspector(benchmark::State& state) { RELEASE_ASSERT(socket.detectedTransportProtocol() == "tls", ""); RELEASE_ASSERT(socket.requestedServerName() == "example.com", ""); RELEASE_ASSERT(socket.requestedApplicationProtocols().size() == 2 && - socket.requestedApplicationProtocols().front() == "h2", + socket.requestedApplicationProtocols().front() == + Http::Utility::AlpnNames::get().Http2, ""); socket.setDetectedTransportProtocol(""); socket.setRequestedServerName(""); diff --git a/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc b/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc index 8d1da06977a42..56f2e637e0fd8 100644 --- a/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc +++ b/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc @@ -1,3 +1,4 @@ +#include "common/http/utility.h" #include "common/network/io_socket_handle_impl.h" #include "extensions/filters/listener/tls_inspector/tls_inspector.h" @@ -94,7 +95,7 @@ TEST_P(TlsInspectorTest, ConnectionClosed) { TEST_P(TlsInspectorTest, ReadError) { init(); EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() { - return Api::SysCallSizeResult{ssize_t(-1), ENOTSUP}; + return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_NOT_SUP}; })); EXPECT_CALL(cb_, continueFilterChain(false)); file_event_callback_(Event::FileReadyType::Read); @@ -127,8 +128,8 @@ TEST_P(TlsInspectorTest, SniRegistered) { // Test that a ClientHello with an ALPN value causes the correct name notification. TEST_P(TlsInspectorTest, AlpnRegistered) { init(); - const std::vector alpn_protos = {absl::string_view("h2"), - absl::string_view("http/1.1")}; + const auto alpn_protos = std::vector{Http::Utility::AlpnNames::get().Http2, + Http::Utility::AlpnNames::get().Http11}; std::vector client_hello = Tls::Test::generateClientHello( std::get<0>(GetParam()), std::get<1>(GetParam()), "", "\x02h2\x08http/1.1"); EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)) @@ -151,7 +152,7 @@ TEST_P(TlsInspectorTest, AlpnRegistered) { // Test with the ClientHello spread over multiple socket reads. TEST_P(TlsInspectorTest, MultipleReads) { init(); - const std::vector alpn_protos = {absl::string_view("h2")}; + const auto alpn_protos = std::vector{Http::Utility::AlpnNames::get().Http2}; const std::string servername("example.com"); std::vector client_hello = Tls::Test::generateClientHello( std::get<0>(GetParam()), std::get<1>(GetParam()), servername, "\x02h2"); @@ -159,7 +160,7 @@ TEST_P(TlsInspectorTest, MultipleReads) { InSequence s; EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)) .WillOnce(InvokeWithoutArgs([]() -> Api::SysCallSizeResult { - return Api::SysCallSizeResult{ssize_t(-1), EAGAIN}; + return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN}; })); for (size_t i = 1; i <= client_hello.size(); i++) { EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)) @@ -256,7 +257,7 @@ TEST_P(TlsInspectorTest, InlineReadSucceed) { EXPECT_CALL(cb_, socket()).WillRepeatedly(ReturnRef(socket_)); EXPECT_CALL(cb_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher_)); EXPECT_CALL(socket_, ioHandle()).WillRepeatedly(ReturnRef(*io_handle_)); - const std::vector alpn_protos = {absl::string_view("h2")}; + const auto alpn_protos = std::vector{Http::Utility::AlpnNames::get().Http2}; const std::string servername("example.com"); std::vector client_hello = Tls::Test::generateClientHello( std::get<0>(GetParam()), std::get<1>(GetParam()), servername, "\x02h2"); diff --git a/test/extensions/filters/network/client_ssl_auth/BUILD b/test/extensions/filters/network/client_ssl_auth/BUILD index ffde1a55d3c41..a168a2a0b3b03 100644 --- a/test/extensions/filters/network/client_ssl_auth/BUILD +++ b/test/extensions/filters/network/client_ssl_auth/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -36,7 +36,7 @@ envoy_extension_cc_test( deps = [ "//source/common/protobuf:utility_lib", "//source/extensions/filters/network/client_ssl_auth:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/extensions/filters/network/client_ssl_auth/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc b/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc index 634d780a01709..cd18c5da8c549 100644 --- a/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc +++ b/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc @@ -113,7 +113,7 @@ stat_prefix: vpn Event::MockTimer* interval_timer_; Http::AsyncClient::Callbacks* callbacks_; Stats::TestUtil::TestStore stats_store_; - NiceMock random_; + NiceMock random_; Api::ApiPtr api_; std::shared_ptr ssl_; }; @@ -215,7 +215,7 @@ TEST_F(ClientSslAuthFilterTest, Ssl) { filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); EXPECT_EQ(1U, stats_store_.counter("auth.clientssl.vpn.update_success").value()); - EXPECT_EQ(2U, stats_store_.counter("auth.clientssl.vpn.auth_ip_white_list").value()); + EXPECT_EQ(2U, stats_store_.counter("auth.clientssl.vpn.auth_ip_allowlist").value()); EXPECT_EQ(1U, stats_store_.counter("auth.clientssl.vpn.auth_digest_match").value()); EXPECT_EQ(1U, stats_store_.counter("auth.clientssl.vpn.auth_digest_no_match").value()); diff --git a/test/extensions/filters/network/client_ssl_auth/config_test.cc b/test/extensions/filters/network/client_ssl_auth/config_test.cc index 7426428f75936..0acd58fa1bbc9 100644 --- a/test/extensions/filters/network/client_ssl_auth/config_test.cc +++ b/test/extensions/filters/network/client_ssl_auth/config_test.cc @@ -7,7 +7,7 @@ #include "extensions/filters/network/client_ssl_auth/config.h" #include "extensions/filters/network/well_known_names.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/common/BUILD b/test/extensions/filters/network/common/BUILD index 246a1c9f8a0d4..046af1acac14a 100644 --- a/test/extensions/filters/network/common/BUILD +++ b/test/extensions/filters/network/common/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/extensions/filters/network/common/fuzz/BUILD b/test/extensions/filters/network/common/fuzz/BUILD new file mode 100644 index 0000000000000..8f54f57e5de81 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/BUILD @@ -0,0 +1,105 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_fuzz_test", + "envoy_cc_test_library", + "envoy_package", + "envoy_proto_library", +) +load( + "//source/extensions:all_extensions.bzl", + "envoy_all_network_filters", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_proto_library( + name = "network_readfilter_fuzz_proto", + srcs = ["network_readfilter_fuzz.proto"], + deps = [ + "//test/fuzz:common_proto", + "@envoy_api//envoy/config/listener/v3:pkg", + ], +) + +envoy_proto_library( + name = "network_writefilter_fuzz_proto", + srcs = ["network_writefilter_fuzz.proto"], + deps = [ + "//test/fuzz:common_proto", + "@envoy_api//envoy/config/listener/v3:pkg", + ], +) + +envoy_cc_test_library( + name = "uber_readfilter_lib", + srcs = [ + "uber_per_readfilter.cc", + "uber_readfilter.cc", + ], + hdrs = ["uber_readfilter.h"], + deps = [ + ":network_readfilter_fuzz_proto_cc_proto", + "//source/common/config:utility_lib", + "//source/extensions/filters/common/ratelimit:ratelimit_lib", + "//source/extensions/filters/network:well_known_names", + "//source/extensions/filters/network/common:utility_lib", + "//test/extensions/filters/common/ext_authz:ext_authz_test_common", + "//test/extensions/filters/network/common/fuzz/utils:network_filter_fuzzer_fakes_lib", + "//test/fuzz:utility_lib", + "//test/mocks/network:network_mocks", + "@envoy_api//envoy/extensions/filters/network/direct_response/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/network/local_ratelimit/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/network/thrift_proxy/v3:pkg_cc_proto", + ], +) + +envoy_cc_fuzz_test( + name = "network_readfilter_fuzz_test", + srcs = ["network_readfilter_fuzz_test.cc"], + corpus = "network_readfilter_corpus", + dictionaries = ["network_readfilter_fuzz_test.dict"], + # All Envoy network filters must be linked to the test in order for the fuzzer to pick + # these up via the NamedNetworkFilterConfigFactory. + deps = [ + ":uber_readfilter_lib", + "//source/common/config:utility_lib", + "//test/config:utility_lib", + ] + envoy_all_network_filters(), +) + +envoy_cc_test_library( + name = "uber_writefilter_lib", + srcs = [ + "uber_per_writefilter.cc", + "uber_writefilter.cc", + ], + hdrs = ["uber_writefilter.h"], + deps = [ + ":network_writefilter_fuzz_proto_cc_proto", + "//source/common/config:utility_lib", + "//source/extensions/filters/network:well_known_names", + "//source/extensions/filters/network/common:utility_lib", + "//test/extensions/filters/network/common/fuzz/utils:network_filter_fuzzer_fakes_lib", + "//test/fuzz:utility_lib", + "//test/mocks/network:network_mocks", + ], +) + +envoy_cc_fuzz_test( + name = "network_writefilter_fuzz_test", + srcs = ["network_writefilter_fuzz_test.cc"], + corpus = "network_writefilter_corpus", + # All Envoy network filters must be linked to the test in order for the fuzzer to pick + # these up via the NamedNetworkFilterConfigFactory. + deps = [ + ":uber_writefilter_lib", + "//source/common/config:utility_lib", + "//source/extensions/filters/network/kafka:kafka_broker_config_lib", + "//source/extensions/filters/network/mongo_proxy:config", + "//source/extensions/filters/network/mysql_proxy:config", + "//source/extensions/filters/network/zookeeper_proxy:config", + "//test/config:utility_lib", + ], +) diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/client_sslL_auth_2 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/client_sslL_auth_2 new file mode 100644 index 0000000000000..dd24c6c6c4daa --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/client_sslL_auth_2 @@ -0,0 +1,47 @@ +config { + name: "envoy.filters.network.client_ssl_auth" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.client_ssl_auth.v3.ClientSSLAuth" + value: "\n\010\177\177_p\000O\002@\022\007x-clien" + } +} +actions { + advance_time { + milliseconds: 524288 + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 524288 + } +} +actions { + advance_time { + milliseconds: 524288 + } +} +actions { + on_new_connection { + } +} +actions { + on_data { + data: "ppu" + end_stream: true + } +} +actions { + advance_time { + milliseconds: 524288 + } +} +actions { + on_data { + data: "type.googleapis.com/envoy.extensions.filters.network.client_ssl_auth.v3.ClientSSLAuth" + end_stream: true + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/client_ssl_authz_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/client_ssl_authz_1 new file mode 100644 index 0000000000000..44f4dfaf34d18 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/client_ssl_authz_1 @@ -0,0 +1,44 @@ +config { + name: "envoy.filters.network.client_ssl_auth" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.client_ssl_auth.v3.ClientSSLAuth" + value: "\n%envoy.filters.network.client_ssl_auth\022\0011" + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 4 + } +} +actions { + on_data { + data: "u\360" + } +} +actions { + on_data { + data: "u\360" + } +} +actions { + on_data { + data: "u\360" + } +} +actions { + advance_time { + milliseconds: 4 + } +} +actions { + on_new_connection { + } +} +actions { + on_new_connection { + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/direct_response_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/direct_response_1 new file mode 100644 index 0000000000000..c65354895b289 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/direct_response_1 @@ -0,0 +1,36 @@ +config { + name: "envoy.filters.network.direct_response" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.direct_response.v3.Config" + } +} +actions { + on_new_connection { + } +} +actions { + on_data { + } +} +actions { + on_data { + data: "y" + } +} +actions { + on_data { + } +} +actions { + on_data { + } +} +actions { + on_data { + data: "\006" + } +} +actions { + on_data { + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/direct_response_open_file b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/direct_response_open_file new file mode 100644 index 0000000000000..26df2e4de4ec6 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/direct_response_open_file @@ -0,0 +1,19 @@ +config { + name: "envoy.filters.network.direct_response" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.direct_response.v3.Config" + value: "\n\032\n\030*\014\n\002\020\001\"\006\020\001\"\002\030\0012\003\032\001\':\003\032\001\'" + } +} +actions { + on_new_connection { + } +} +actions{ + on_data{ + } +} +actions { + on_new_connection { + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/dubbo_proxy_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/dubbo_proxy_1 new file mode 100644 index 0000000000000..b9c6f893f556b --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/dubbo_proxy_1 @@ -0,0 +1,53 @@ +config { + name: "envoy.filters.network.dubbo_proxy" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.dubbo_proxy.v3.DubboProxy" + value: "\n!envoy.filters.network.dubbo_proxy" + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 268435 + } +} +actions { + on_data { + data: "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee" + } +} +actions { + on_new_connection { + } +} +actions { + on_data { + data: "\000\013" + } +} +actions { + on_data { + data: "\000\013" + } +} +actions { + on_data { + data: "\000\013" + } +} +actions { + on_data { + data: "\000\013" + } +} +actions { + on_new_connection { + } +} +actions { + on_new_connection { + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/echo_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/echo_1 new file mode 100644 index 0000000000000..fd15fde5a83f8 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/echo_1 @@ -0,0 +1,31 @@ +config { + name: "envoy.filters.network.echo" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.echo.v3.Echo" + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 2097152 + } +} +actions { + advance_time { + milliseconds: 4194304 + } +} +actions { + on_data { + data: "y" + } +} +actions { + advance_time { + milliseconds: 2097152 + } +} + diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/empty b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/empty new file mode 100644 index 0000000000000..9933bd3fed12a --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/empty @@ -0,0 +1,14 @@ +config { + name: "envoy.filters.network.local_ratelimit" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.local_ratelimit.v3.LocalRateLimit" + value:"\001\n\311\001type.googleapis.com/envoy.extensions.filters.netwe\360\231\201\270\362\251\212\211\361\263\275\271\363\206\215\263\361\255\230\252\362\265\266\243\364\203\217\266\362\211\226\227\362\232\255\221\362\227\227\210\362\255\274\232\363\220\256\256\364\206\217\231\363\246\273\262\363\214\207\237\360\255\215\236\364\206\232\207\361\273\210\256\362\234\204\234\361\256\236\207\361\225\240\253\363\255\231\272\363\254\256\273\360\276\201\214\361\231\215\216\363\233\202\226\361\252\222\256\362\217\241\265\363\200\257\245voy.api.v2.route.RouteActlRateLimit\022\017\010\200\312\002\022\004\010\200\312\002\032\003\010\200^" + } +} + +actions { + on_data { + data: "\nVtype.googleapis.com/envoy.extensions.filters.network.local_ratelimit.v3.LocalRateLimit\022\002\010 \032d\n\002\010\001\022^\n2\n%envoy.filters.network.local_ratelimit\022\000\032\007\n\002\010\001\022\001+\022\000\032&\n\000\022\"\000\000\000\000\000voy.filters.network.lo\000\000\000\000\000\000+" + end_stream: true + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/ext_authz_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/ext_authz_1 new file mode 100644 index 0000000000000..fabd48ca01501 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/ext_authz_1 @@ -0,0 +1,20 @@ +config { + name: "envoy.filters.network.ext_authz" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + } +} +actions { + on_data { + data: "y" + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 655360 + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/ext_authz_2 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/ext_authz_2 new file mode 100644 index 0000000000000..cc8199f166f42 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/ext_authz_2 @@ -0,0 +1,16 @@ +config { + name: "envoy.filters.network.ext_authz" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + value: "\n\037envoy.filters.network.ext_authz\030\001(\001" + } +} +actions { + on_new_connection { + } +} +actions { + on_data { + data: ":" + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/http_connection_manager_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/http_connection_manager_1 new file mode 100644 index 0000000000000..cae9fbab67007 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/http_connection_manager_1 @@ -0,0 +1,21 @@ +config { + name: "envoy.filters.network.http_connection_manager" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager" + value: "\022\002B\001\"\000J\004(\001J\000z\002\010\001\220\001\001" + } +} +actions { + on_data { + data: "y" + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 655360 + } +} \ No newline at end of file diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/http_connection_manager_2 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/http_connection_manager_2 new file mode 100644 index 0000000000000..d4012d30d3847 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/http_connection_manager_2 @@ -0,0 +1,21 @@ +config { + name: "envoy.filters.network.http_connection_manager" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager" + value: "\010\002\022\001-\"5\n\001\000\032\001~\032\'envoy.type.matcher.v3.ListStringMatcherB\001-B\001~:\013\"\t\t\000\000\000\004\000\000\000\000B\002(\001\312\001\000\362\001\002\010\001" + } +} +actions { + on_data { + data: "y" + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 655360 + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/kafka_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/kafka_1 new file mode 100644 index 0000000000000..dd8c619f9d2f6 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/kafka_1 @@ -0,0 +1,20 @@ +config { + name: "envoy.filters.network.kafka_broker" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.kafka_broker.v3.KafkaBroker" + value: "\n\"envoy.filters.network.kafka_broker" + } +} +actions { + on_new_connection { + } +} +actions { + on_data { + } +} +actions { + advance_time { + milliseconds: 10000 + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/local_ratelimit_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/local_ratelimit_1 new file mode 100644 index 0000000000000..ab8d73afbd8f8 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/local_ratelimit_1 @@ -0,0 +1,39 @@ +config { + name: "envoy.filters.network.local_ratelimit" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.local_ratelimit.v3.LocalRateLimit" + value: "\nVtype.googleapis.com/envoy.extensions.filters.network.local_ratelimit.v3.LocalRateLimit\022\013\010\001\032\007\010\200^\020\200\306\001" + } +} +actions { + on_new_connection { + } +} +actions { + on_data { + data: "\000\000" + } +} +actions { + on_data { + data: "\000\000" + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 12035000 + } +} +actions { + on_data { + data: "\000\000" + } +} +actions { + on_new_connection { + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/local_ratelimit_time_overflow b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/local_ratelimit_time_overflow new file mode 100644 index 0000000000000..a450f763024bd --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/local_ratelimit_time_overflow @@ -0,0 +1,44 @@ +config { + name: "envoy.filters.network.local_ratelimit" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.local_ratelimit.v3.LocalRateLimit" + value: "\nVtype.googleapis.com/envoy.extensions.filters.network.local_ratelimit.v3.LocalRateLimit\022\017\010\001\032\013\010\200\336\200\200\240\007\020\200\306!" + } +} +actions { + advance_time { + milliseconds: 12035000 + } +} +actions { + on_data { + data: "\000\013" + } +} +actions { + on_data { + data: "\000\000" + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 12035000 + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 53 + } +} +actions { + on_new_connection { + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/ratelimit_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/ratelimit_1 new file mode 100644 index 0000000000000..967d64df713d0 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/ratelimit_1 @@ -0,0 +1,26 @@ +config { + name: "envoy.filters.network.ratelimit" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.ratelimit.v3.RateLimit" + value: "\nP\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\022Y\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\032W\nU\n\001[\022P\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\"\005\020\200\200\214\001(\0012e\022c\022Y\n\010\001\000\000\000\000\000\000\002\"M\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\032\006\010\200\200\204\360\002" + } +} +actions { + on_new_connection { + } +} +actions { + on_data { + data: "\000" + } +} +actions { + on_data { + data: "\000\000" + } +} +actions { + advance_time { + milliseconds: 7299840 + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/rbac_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/rbac_1 new file mode 100644 index 0000000000000..61f1adaedc4d8 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/rbac_1 @@ -0,0 +1,20 @@ +config { + name: "envoy.filters.network.rbac" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC" + value: "\032\010\177\177\177\177\177\177\177\177" + } +} +actions { + on_new_connection { + } +} +actions { + on_data { + } +} +actions { + on_data { + end_stream: true + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/redis_proxy_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/redis_proxy_1 new file mode 100644 index 0000000000000..15ac639614e89 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/redis_proxy_1 @@ -0,0 +1,28 @@ +config { + name: "envoy.filters.network.redis_proxy" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.redis_proxy.v3.RedisProxy" + value: "\n\001N\032\032\n\005\020\200\200\200\030\030\001 \377\377\377\337\017*\005\020\200\200\200\0302\000@\001*\010\n\006\032\004\001\000\000\010" + } +} +actions { + on_new_connection { + + } +} +actions { + on_new_connection { + } +} +actions { + on_data { + data: "0" + end_stream: true + } +} +actions { + on_data { + data: "0" + end_stream: true + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/rocketmq_proxy_crash b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/rocketmq_proxy_crash new file mode 100644 index 0000000000000..b1a71216b2fc4 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/rocketmq_proxy_crash @@ -0,0 +1,15 @@ +config { + name: "envoy.filters.network.rocketmq_proxy" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.rocketmq_proxy.v3.RocketmqProxy" + value: "\n \022\034\n\032__________________________ \001 \001" + } +} + +actions { + on_data { + data: "\000\000\000\000\000\000\000\000\000" + end_stream: false + } +} + diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_cluster_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_cluster_1 new file mode 100644 index 0000000000000..e657e3b116a2f --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_cluster_1 @@ -0,0 +1,35 @@ +config { + name: "envoy.filters.network.sni_cluster" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.sni_cluster.v3.SniCluster" + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 268435 + } +} +actions { + on_data { + data: "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee" + } +} +actions { + on_data { + data: "IIIIIIIIIIIIIIIIIIII\000\000\000\000\000\000\000;IIIIIIIIIIIIIIIIIIIIIIIIIIIIII" + } +} +actions { + advance_time { + milliseconds: 16384 + } +} +actions { + advance_time { + milliseconds: 13 + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_cluster_2 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_cluster_2 new file mode 100644 index 0000000000000..25a5c974299ad --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_cluster_2 @@ -0,0 +1,25 @@ +config { + name: "envoy.filters.network.sni_cluster" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.sni_cluster.v3.SniCluster" + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 268435 + } +} +actions { + on_data { + data: "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee" + } +} +actions { + advance_time { + milliseconds: 1677721 + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_dynamic_forward_proxy_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_dynamic_forward_proxy_1 new file mode 100644 index 0000000000000..21ad6d880835a --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_dynamic_forward_proxy_1 @@ -0,0 +1,36 @@ +config { + name: "envoy.filters.network.sni_dynamic_forward_proxy" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha.FilterConfig" + value: "\nP\nFenvoy.network.sni_dynamic_fo.filters.network.sni_dynamic_forward_proxy*\006\010\200\200\200\260\002" + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 30976 + } +} +actions { + advance_time { + milliseconds: 262144 + } +} +actions { + on_data { + data: "\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030c.googlers.com\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030" + } +} +actions { + on_data { + data: "\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030c.googlers.com\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030" + } +} +actions { + on_data { + data: "\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030c.googlers.com\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030" + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_1 new file mode 100644 index 0000000000000..a194b7f990310 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_1 @@ -0,0 +1,7 @@ +config { + name: "envoy.filters.network.thrift_proxy" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy" + value: "\nYtype.googleapis.com/envoy.extensions.filters.network.thrift_proxy.vLLLLLLLLL3.ThriftProxy\020\003\030\003\"\231\002\022\226\002\n\003\n\001A\022\216\002\032\201\002\n\361\001\n\010@\000\000\000\000\000\000\000\022\344\001\nc\n_*]\032[\nPtype.googleapis.com/envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy\022\007\020\002\"\003\n\001A\022\000\n}\nyenvoy.filters.network.thrift_prox\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177y\022\000\n\013\n\000\022\007\n\005\n\001#\022\0002\010A\177\177\177\177\177\177\177" + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_3 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_3 new file mode 100644 index 0000000000000..78a87924ae34e --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_3 @@ -0,0 +1,34 @@ +config { + name: "envoy.filters.network.thrift_proxy" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy" + value: "\nz\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177" + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 10 + } +} +actions { + on_new_connection { + } +} +actions { + on_new_connection { + } +} +actions { + on_data { + } +} +actions { + on_data { + data: "type.googleapis.com/envoy.extensions.filters.network.thrift_proxy.v3.Thrif~tProxy" + end_stream: true + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_assert_failure b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_assert_failure new file mode 100644 index 0000000000000..ca2772ee0e71d --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_assert_failure @@ -0,0 +1,7 @@ +config { + name: "envoy.filters.network.thrift_proxy" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy" + value: "\nYtype.googleapis.com/envoy.extensions.filters.network.thrift_proxy.vLLLLLLLLL3.ThriftProxy\020\003\030\003\"\231\002\022\226\002\n\003\n\001A\022\216\002\032\201\002\n\361\001\n\010@\000\000\000\000\000\000\000\022\344\001\nc\n_*]\032[\nPtype.googleapis.com/envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy\022\007\020\002\"\003\n\001A\022\000\n}\nyenvoy.filters.network.thrift_prox\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177y\022\000\n\013\n\000\022\007\n\005\n\001#\022\0002\010A\000\000\000\000\000\000\000" + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/zookeeper_proxy_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/zookeeper_proxy_1 new file mode 100644 index 0000000000000..fb16dbd750df4 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/zookeeper_proxy_1 @@ -0,0 +1,34 @@ +config { + name: "envoy.filters.network.zookeeper_proxy" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy" + value: "\nVtype.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy\032\000" + } +} +actions { + advance_time { + milliseconds: 8257536 + } +} +actions { + on_new_connection { + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 8257536 + } +} +actions { + on_data { + } +} +actions { + advance_time { + milliseconds: 83886080 + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_fuzz.proto b/test/extensions/filters/network/common/fuzz/network_readfilter_fuzz.proto new file mode 100644 index 0000000000000..e8205658d25e1 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_fuzz.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; + +package test.extensions.filters.network; +import "google/protobuf/empty.proto"; +import "validate/validate.proto"; +import "envoy/config/listener/v3/listener_components.proto"; + +message OnData { + bytes data = 1; + bool end_stream = 2; +} + +message AdvanceTime { + // Advance the system time by (0,24] hours. + uint32 milliseconds = 1 [(validate.rules).uint32 = {gt: 0 lt: 86400000}]; +} + +message Action { + oneof action_selector { + option (validate.required) = true; + // Call onNewConnection() + google.protobuf.Empty on_new_connection = 1; + // Call onData() + OnData on_data = 2; + // Advance time_source_ + AdvanceTime advance_time = 3; + } +} + +message FilterFuzzTestCase { + // This is actually a protobuf type for the config of network filters. + envoy.config.listener.v3.Filter config = 1; + repeated Action actions = 2; +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_fuzz_test.cc b/test/extensions/filters/network/common/fuzz/network_readfilter_fuzz_test.cc new file mode 100644 index 0000000000000..cacff3aa8938e --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_fuzz_test.cc @@ -0,0 +1,61 @@ +#include "common/config/utility.h" +#include "common/protobuf/utility.h" + +#include "extensions/filters/network/well_known_names.h" + +#include "test/config/utility.h" +#include "test/extensions/filters/network/common/fuzz/network_readfilter_fuzz.pb.validate.h" +#include "test/extensions/filters/network/common/fuzz/uber_readfilter.h" +#include "test/fuzz/fuzz_runner.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +DEFINE_PROTO_FUZZER(const test::extensions::filters::network::FilterFuzzTestCase& input) { + ABSL_ATTRIBUTE_UNUSED static PostProcessorRegistration reg = { + [](test::extensions::filters::network::FilterFuzzTestCase* input, unsigned int seed) { + // This post-processor mutation is applied only when libprotobuf-mutator + // calls mutate on an input, and *not* during fuzz target execution. + // Replaying a corpus through the fuzzer will not be affected by the + // post-processor mutation. + + // TODO(jianwendong): After extending to cover all the filters, we can use + // `Registry::FactoryRegistry< + // Server::Configuration::NamedNetworkFilterConfigFactory>::registeredNames()` + // to get all the filter names instead of calling `UberFilterFuzzer::filter_names()`. + static const auto filter_names = UberFilterFuzzer::filterNames(); + static const auto factories = Registry::FactoryRegistry< + Server::Configuration::NamedNetworkFilterConfigFactory>::factories(); + // Choose a valid filter name. + if (std::find(filter_names.begin(), filter_names.end(), input->config().name()) == + std::end(filter_names)) { + absl::string_view filter_name = filter_names[seed % filter_names.size()]; + input->mutable_config()->set_name(std::string(filter_name)); + } + // Set the corresponding type_url for Any. + auto& factory = factories.at(input->config().name()); + input->mutable_config()->mutable_typed_config()->set_type_url( + absl::StrCat("type.googleapis.com/", + factory->createEmptyConfigProto()->GetDescriptor()->full_name())); + }}; + + try { + TestUtility::validate(input); + // Check the filter's name in case some filters are not supported yet. + static const auto filter_names = UberFilterFuzzer::filterNames(); + // TODO(jianwendong): remove this if block after covering all the filters. + if (std::find(filter_names.begin(), filter_names.end(), input.config().name()) == + std::end(filter_names)) { + ENVOY_LOG_MISC(debug, "Test case with unsupported filter type: {}", input.config().name()); + return; + } + static UberFilterFuzzer fuzzer; + fuzzer.fuzz(input.config(), input.actions()); + } catch (const ProtoValidationException& e) { + ENVOY_LOG_MISC(debug, "ProtoValidationException: {}", e.what()); + } +} + +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_fuzz_test.dict b/test/extensions/filters/network/common/fuzz/network_readfilter_fuzz_test.dict new file mode 100644 index 0000000000000..41d6703efd993 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_fuzz_test.dict @@ -0,0 +1,3 @@ +# The names of supported thrift_filters in ThriftProxy +"envoy.filters.thrift.router" +"envoy.filters.thrift.rate_limit" diff --git a/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/kafka_broker_1 b/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/kafka_broker_1 new file mode 100644 index 0000000000000..a20c58dd2d4a1 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/kafka_broker_1 @@ -0,0 +1,110 @@ +config { + name: "envoy.filters.network.kafka_broker" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.kafka_broker.v3.KafkaBroker" + value: "\n}\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177" + } +} +actions { + on_write { + data: "-" + end_stream: true + } +} +actions { + advance_time { + milliseconds: 268435 + } +} +actions { + on_write { + data: "-" + end_stream: true + } +} +actions { + on_write { + data: "\312\312\312\312\312\312\312\312\312\312\312\312\315\312\312\312\312\312\312\312\312\312\312" + end_stream: true + } +} +actions { + on_write { + data: "-" + } +} +actions { + on_write { + data: "\312\312\312\312\312\312\312\312\312\312\312\312\312\312\312\312\312\312\312\312\312\312\312" + end_stream: true + } +} +actions { + on_write { + data: "-" + end_stream: true + } +} +actions { + on_write { + data: "-" + end_stream: true + } +} +actions { + on_write { + data: "-" + } +} +actions { + on_write { + data: "-" + } +} +actions { + on_write { + data: "\n\002\315\265" + } +} +actions { + on_write { + end_stream: true + } +} +actions { + on_write { + data: "\020\000\000\000" + } +} +actions { + on_write { + data: "-" + end_stream: true + } +} +actions { + on_write { + data: "-" + end_stream: true + } +} +actions { + on_write { + data: "p" + } +} +actions { + on_write { + data: "-" + } +} +actions { + on_write { + data: "-" + end_stream: true + } +} +actions { + on_write { + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/mongodb_proxy_1 b/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/mongodb_proxy_1 new file mode 100644 index 0000000000000..20a344f8fe351 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/mongodb_proxy_1 @@ -0,0 +1,107 @@ +config { + name: "envoy.filters.network.mongo_proxy" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.mongo_proxy.v3.MongoProxy" + value: "\n\001\\\032\007\"\003\010\200t*\000 \001" + } +} +actions { + on_write { + data: "]\000" + } +} +actions { + on_write { + data: "\004\000" + end_stream: true + } +} +actions { + advance_time { + milliseconds: 14848 + } +} +actions { + on_write { + data: "\004\000" + end_stream: true + } +} +actions { + advance_time { + milliseconds: 14848 + } +} +actions { + on_write { + data: "\004\000\001\000\000\000\000\000\000\001" + end_stream: true + } +} +actions { + on_write { + data: "<" + end_stream: true + } +} +actions { + on_write { + data: "\004\000" + } +} +actions { + on_write { + data: "\004\000" + } +} +actions { + advance_time { + milliseconds: 14848 + } +} +actions { + on_write { + data: "type.googleapis.com/envoy.extensions.filters.network.mongo_proxy.v3.MongoProxy" + end_stream: true + } +} +actions { + on_write { + data: "\004\000" + end_stream: true + } +} +actions { + on_write { + data: "\004\000" + } +} +actions { + on_write { + data: "pH\037\000 `\000\000" + end_stream: true + } +} +actions { + on_write { + data: "\004\000" + end_stream: true + } +} +actions { + advance_time { + milliseconds: 14848 + } +} +actions { + on_write { + data: "=" + end_stream: true + } +} +actions { + on_write { + data: "\004\000" + end_stream: true + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/mysql_proxy_1 b/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/mysql_proxy_1 new file mode 100644 index 0000000000000..f58ad110b8b9d --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/mysql_proxy_1 @@ -0,0 +1,86 @@ +config { + name: "envoy.filters.network.mysql_proxy" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.mysql_proxy.v3.MySQLProxy" + value: "\n\006#\336\215\302\246\001" + } +} +actions { + on_write { + data: "\031\031\031\031" + } +} +actions { + on_write { + data: "\031\031\031\031\031\031\031\031" + end_stream: true + } +} +actions { + on_write { + data: "3" + } +} +actions { + on_write { + data: "#" + } +} +actions { + on_write { + data: "#" + end_stream: true + } +} +actions { + on_write { + data: "3" + } +} +actions { + on_write { + data: "#" + end_stream: true + } +} +actions { + on_write { + data: "#" + } +} +actions { + on_write { + data: "#" + } +} +actions { + on_write { + data: "\031\031\031\031\031\031\031\031" + end_stream: true + } +} +actions { + on_write { + end_stream: true + } +} +actions { + on_write { + end_stream: true + } +} +actions { + on_write { + data: "3" + } +} +actions { + on_write { + end_stream: true + } +} +actions { + on_write { + data: "3" + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/zookeeper_proxy_1 b/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/zookeeper_proxy_1 new file mode 100644 index 0000000000000..2e2e6c1bfb8dc --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/zookeeper_proxy_1 @@ -0,0 +1,17 @@ +config { + name: "envoy.filters.network.zookeeper_proxy" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy" + value: "\nVtype.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy\032\000" + } +} +actions { + on_write { + data: "\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030c.googlers.com\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030" + } +} +actions { + on_write { + } +} + diff --git a/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/zookeeper_proxy_assert_failure_onwrite b/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/zookeeper_proxy_assert_failure_onwrite new file mode 100644 index 0000000000000..ae270c6fe26cc --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/zookeeper_proxy_assert_failure_onwrite @@ -0,0 +1,12 @@ +config { + name: "envoy.filters.network.zookeeper_proxy" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy" + value: "\nVtype.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy\022\001!\032\006\010\377\376\377\317\017" + } +} +actions { + on_write { + data: "\030\030\030\030\030\030\030\030" + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_writefilter_fuzz.proto b/test/extensions/filters/network/common/fuzz/network_writefilter_fuzz.proto new file mode 100644 index 0000000000000..77de32b5858f8 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_writefilter_fuzz.proto @@ -0,0 +1,31 @@ +syntax = "proto3"; + +package test.extensions.filters.network; +import "validate/validate.proto"; +import "envoy/config/listener/v3/listener_components.proto"; + +message OnWrite { + bytes data = 1; + bool end_stream = 2; +} + +message AdvanceTime { + // Advance the system time by (0,24] hours. + uint32 milliseconds = 1 [(validate.rules).uint32 = {gt: 0 lt: 86400000}]; +} + +message WriteAction { + oneof action_selector { + option (validate.required) = true; + // Call onWrite() + OnWrite on_write = 2; + // Advance time_source_ + AdvanceTime advance_time = 3; + } +} + +message FilterFuzzTestCase { + // This is actually a protobuf type for the config of network filters. + envoy.config.listener.v3.Filter config = 1; + repeated WriteAction actions = 2; +} diff --git a/test/extensions/filters/network/common/fuzz/network_writefilter_fuzz_test.cc b/test/extensions/filters/network/common/fuzz/network_writefilter_fuzz_test.cc new file mode 100644 index 0000000000000..702cb4078db46 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_writefilter_fuzz_test.cc @@ -0,0 +1,58 @@ +#include "common/config/utility.h" +#include "common/protobuf/utility.h" + +#include "extensions/filters/network/well_known_names.h" + +#include "test/config/utility.h" +#include "test/extensions/filters/network/common/fuzz/network_writefilter_fuzz.pb.validate.h" +#include "test/extensions/filters/network/common/fuzz/uber_writefilter.h" +#include "test/fuzz/fuzz_runner.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +DEFINE_PROTO_FUZZER(const test::extensions::filters::network::FilterFuzzTestCase& input) { + ABSL_ATTRIBUTE_UNUSED static PostProcessorRegistration reg = { + [](test::extensions::filters::network::FilterFuzzTestCase* input, unsigned int seed) { + // This post-processor mutation is applied only when libprotobuf-mutator + // calls mutate on an input, and *not* during fuzz target execution. + // Replaying a corpus through the fuzzer will not be affected by the + // post-processor mutation. + + // TODO(jianwendong): consider using a factory to store the names of all + // writeFilters. + static const auto filter_names = UberWriteFilterFuzzer::filterNames(); + static const auto factories = Registry::FactoryRegistry< + Server::Configuration::NamedNetworkFilterConfigFactory>::factories(); + // Choose a valid filter name. + if (std::find(filter_names.begin(), filter_names.end(), input->config().name()) == + std::end(filter_names)) { + absl::string_view filter_name = filter_names[seed % filter_names.size()]; + input->mutable_config()->set_name(std::string(filter_name)); + } + // Set the corresponding type_url for Any. + auto& factory = factories.at(input->config().name()); + input->mutable_config()->mutable_typed_config()->set_type_url( + absl::StrCat("type.googleapis.com/", + factory->createEmptyConfigProto()->GetDescriptor()->full_name())); + }}; + try { + TestUtility::validate(input); + // Check the filter's name in case some filters are not supported yet. + // TODO(jianwendong): remove this if block when we have a factory for writeFilters. + static const auto filter_names = UberWriteFilterFuzzer::filterNames(); + if (std::find(filter_names.begin(), filter_names.end(), input.config().name()) == + std::end(filter_names)) { + ENVOY_LOG_MISC(debug, "Test case with unsupported filter type: {}", input.config().name()); + return; + } + static UberWriteFilterFuzzer fuzzer; + fuzzer.fuzz(input.config(), input.actions()); + } catch (const ProtoValidationException& e) { + ENVOY_LOG_MISC(debug, "ProtoValidationException: {}", e.what()); + } +} + +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc b/test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc new file mode 100644 index 0000000000000..ce8d04e51fc40 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc @@ -0,0 +1,165 @@ +#include "envoy/extensions/filters/network/direct_response/v3/config.pb.h" +#include "envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.pb.h" +#include "envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.pb.h" + +#include "extensions/filters/common/ratelimit/ratelimit_impl.h" +#include "extensions/filters/network/common/utility.h" +#include "extensions/filters/network/well_known_names.h" + +#include "test/extensions/filters/common/ext_authz/test_common.h" +#include "test/extensions/filters/network/common/fuzz/uber_readfilter.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace { +// Limit the fill_interval in the config of local_ratelimit filter prevent overflow in +// std::chrono::time_point. +static const int SecondsPerDay = 86400; +} // namespace +std::vector UberFilterFuzzer::filterNames() { + // These filters have already been covered by this fuzzer. + // Will extend to cover other network filters one by one. + static std::vector filter_names; + if (filter_names.empty()) { + const auto factories = Registry::FactoryRegistry< + Server::Configuration::NamedNetworkFilterConfigFactory>::factories(); + const std::vector supported_filter_names = { + NetworkFilterNames::get().ExtAuthorization, NetworkFilterNames::get().LocalRateLimit, + NetworkFilterNames::get().RedisProxy, NetworkFilterNames::get().ClientSslAuth, + NetworkFilterNames::get().Echo, NetworkFilterNames::get().DirectResponse, + NetworkFilterNames::get().DubboProxy, NetworkFilterNames::get().SniCluster, + // A dedicated http_connection_manager fuzzer can be found in + // test/common/http/conn_manager_impl_fuzz_test.cc + NetworkFilterNames::get().HttpConnectionManager, NetworkFilterNames::get().ThriftProxy, + NetworkFilterNames::get().ZooKeeperProxy, NetworkFilterNames::get().SniDynamicForwardProxy, + NetworkFilterNames::get().KafkaBroker, NetworkFilterNames::get().RocketmqProxy, + NetworkFilterNames::get().RateLimit, NetworkFilterNames::get().Rbac + // TODO(jianwendong): cover mongo_proxy, mysql_proxy, postgres_proxy, tcp_proxy. + }; + // Check whether each filter is loaded into Envoy. + // Some customers build Envoy without some filters. When they run fuzzing, the use of a filter + // that does not exist will cause fatal errors. + for (auto& filter_name : supported_filter_names) { + if (factories.contains(filter_name)) { + filter_names.push_back(filter_name); + } else { + ENVOY_LOG_MISC(debug, "Filter name not found in the factory: {}", filter_name); + } + } + } + return filter_names; +} + +void UberFilterFuzzer::perFilterSetup(const std::string& filter_name) { + // Set up response for ext_authz filter + if (filter_name == NetworkFilterNames::get().ExtAuthorization) { + + async_client_factory_ = std::make_unique(); + async_client_ = std::make_unique(); + // TODO(jianwendong): consider testing on different kinds of responses. + ON_CALL(*async_client_, sendRaw(_, _, _, _, _, _)) + .WillByDefault(testing::WithArgs<3>(Invoke([&](Grpc::RawAsyncRequestCallbacks& callbacks) { + Filters::Common::ExtAuthz::GrpcClientImpl* grpc_client_impl = + dynamic_cast(&callbacks); + const std::string empty_body{}; + const auto expected_headers = + Filters::Common::ExtAuthz::TestCommon::makeHeaderValueOption({}); + auto check_response = Filters::Common::ExtAuthz::TestCommon::makeCheckResponse( + Grpc::Status::WellKnownGrpcStatus::Ok, envoy::type::v3::OK, empty_body, + expected_headers); + // Give response to the grpc_client by calling onSuccess(). + grpc_client_impl->onSuccess(std::move(check_response), span_); + return async_request_.get(); + }))); + + EXPECT_CALL(*async_client_factory_, create()).WillOnce(Invoke([&] { + return std::move(async_client_); + })); + + EXPECT_CALL(factory_context_.cluster_manager_.async_client_manager_, + factoryForGrpcService(_, _, _)) + .WillOnce(Invoke([&](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) { + return std::move(async_client_factory_); + })); + read_filter_callbacks_->connection_.local_address_ = pipe_addr_; + read_filter_callbacks_->connection_.remote_address_ = pipe_addr_; + } else if (filter_name == NetworkFilterNames::get().HttpConnectionManager) { + read_filter_callbacks_->connection_.local_address_ = pipe_addr_; + read_filter_callbacks_->connection_.remote_address_ = pipe_addr_; + } else if (filter_name == NetworkFilterNames::get().RateLimit) { + async_client_factory_ = std::make_unique(); + async_client_ = std::make_unique(); + // TODO(jianwendong): consider testing on different kinds of responses. + ON_CALL(*async_client_, sendRaw(_, _, _, _, _, _)) + .WillByDefault(testing::WithArgs<3>(Invoke([&](Grpc::RawAsyncRequestCallbacks& callbacks) { + Filters::Common::RateLimit::GrpcClientImpl* grpc_client_impl = + dynamic_cast(&callbacks); + // Response OK + auto response = std::make_unique(); + // Give response to the grpc_client by calling onSuccess(). + grpc_client_impl->onSuccess(std::move(response), span_); + return async_request_.get(); + }))); + + EXPECT_CALL(*async_client_factory_, create()).WillOnce(Invoke([&] { + return std::move(async_client_); + })); + + EXPECT_CALL(factory_context_.cluster_manager_.async_client_manager_, + factoryForGrpcService(_, _, _)) + .WillOnce(Invoke([&](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) { + return std::move(async_client_factory_); + })); + read_filter_callbacks_->connection_.local_address_ = pipe_addr_; + read_filter_callbacks_->connection_.remote_address_ = pipe_addr_; + } +} + +void UberFilterFuzzer::checkInvalidInputForFuzzer(const std::string& filter_name, + Protobuf::Message* config_message) { + // System calls such as reading files are prohibited in this fuzzer. Some input that crashes the + // mock/fake objects are also prohibited. We could also avoid fuzzing some unfinished features by + // checking them here. For now there are only three filters {DirectResponse, LocalRateLimit, + // HttpConnectionManager} on which we have constraints. + const std::string name = Extensions::NetworkFilters::Common::FilterNameUtil::canonicalFilterName( + std::string(filter_name)); + if (filter_name == NetworkFilterNames::get().DirectResponse) { + envoy::extensions::filters::network::direct_response::v3::Config& config = + dynamic_cast( + *config_message); + if (config.response().specifier_case() == + envoy::config::core::v3::DataSource::SpecifierCase::kFilename) { + throw EnvoyException( + absl::StrCat("direct_response trying to open a file. Config:\n{}", config.DebugString())); + } + } else if (filter_name == NetworkFilterNames::get().LocalRateLimit) { + envoy::extensions::filters::network::local_ratelimit::v3::LocalRateLimit& config = + dynamic_cast( + *config_message); + if (config.token_bucket().fill_interval().seconds() > SecondsPerDay) { + // Too large fill_interval may cause "c++/v1/chrono" overflow when simulated_time_system_ is + // converting it to a smaller unit. Constraining fill_interval to no greater than one day is + // reasonable. + throw EnvoyException( + absl::StrCat("local_ratelimit trying to set a large fill_interval. Config:\n{}", + config.DebugString())); + } + } else if (filter_name == NetworkFilterNames::get().HttpConnectionManager) { + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + config = dynamic_cast(*config_message); + if (config.codec_type() == envoy::extensions::filters::network::http_connection_manager::v3:: + HttpConnectionManager::HTTP3) { + // Quiche is still in progress and http_conn_manager has a dedicated fuzzer. + // So we won't fuzz it here with complex mocks. + throw EnvoyException(absl::StrCat( + "http_conn_manager trying to use Quiche which we won't fuzz here. Config:\n{}", + config.DebugString())); + } + } +} + +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/common/fuzz/uber_per_writefilter.cc b/test/extensions/filters/network/common/fuzz/uber_per_writefilter.cc new file mode 100644 index 0000000000000..911caa250c522 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/uber_per_writefilter.cc @@ -0,0 +1,35 @@ +#include "extensions/filters/network/common/utility.h" +#include "extensions/filters/network/well_known_names.h" + +#include "test/extensions/filters/network/common/fuzz/uber_writefilter.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +std::vector UberWriteFilterFuzzer::filterNames() { + // These filters have already been covered by this fuzzer. + // Will extend to cover other network filters one by one. + static std::vector filter_names; + if (filter_names.empty()) { + const auto factories = Registry::FactoryRegistry< + Server::Configuration::NamedNetworkFilterConfigFactory>::factories(); + const std::vector supported_filter_names = { + NetworkFilterNames::get().ZooKeeperProxy, NetworkFilterNames::get().KafkaBroker, + NetworkFilterNames::get().MongoProxy, NetworkFilterNames::get().MySQLProxy + // TODO(jianwendong) Add "NetworkFilterNames::get().Postgres" after it supports untrusted + // data. + }; + for (auto& filter_name : supported_filter_names) { + if (factories.contains(filter_name)) { + filter_names.push_back(filter_name); + } else { + ENVOY_LOG_MISC(debug, "Filter name not found in the factory: {}", filter_name); + } + } + } + return filter_names; +} + +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/common/fuzz/uber_readfilter.cc b/test/extensions/filters/network/common/fuzz/uber_readfilter.cc new file mode 100644 index 0000000000000..a5b2faa1ab26e --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/uber_readfilter.cc @@ -0,0 +1,114 @@ +#include "test/extensions/filters/network/common/fuzz/uber_readfilter.h" + +#include "common/config/utility.h" +#include "common/config/version_converter.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +void UberFilterFuzzer::reset() { + // Reset some changes made by current filter on some mock objects. + + // Close the connection to make sure the filter's callback is set to nullptr. + read_filter_callbacks_->connection_.raiseEvent(Network::ConnectionEvent::LocalClose); + // Clear the filter's raw pointer stored inside the connection_ and reset the connection_'s state. + read_filter_callbacks_->connection_.callbacks_.clear(); + read_filter_callbacks_->connection_.bytes_sent_callbacks_.clear(); + read_filter_callbacks_->connection_.state_ = Network::Connection::State::Open; + // Clear the pointers inside the mock_dispatcher + Event::MockDispatcher& mock_dispatcher = + dynamic_cast(read_filter_callbacks_->connection_.dispatcher_); + mock_dispatcher.clearDeferredDeleteList(); + read_filter_.reset(); +} + +void UberFilterFuzzer::fuzzerSetup() { + // Setup process when this fuzzer object is constructed. + // For a static fuzzer, this will only be executed once. + + // Get the pointer of read_filter when the read_filter is being added to connection_. + read_filter_callbacks_ = std::make_shared>(); + ON_CALL(read_filter_callbacks_->connection_, addReadFilter(_)) + .WillByDefault(Invoke([&](Network::ReadFilterSharedPtr read_filter) -> void { + read_filter_ = read_filter; + read_filter_->initializeReadFilterCallbacks(*read_filter_callbacks_); + })); + ON_CALL(read_filter_callbacks_->connection_, addFilter(_)) + .WillByDefault(Invoke([&](Network::FilterSharedPtr read_filter) -> void { + read_filter_ = read_filter; + read_filter_->initializeReadFilterCallbacks(*read_filter_callbacks_); + })); + // Prepare sni for sni_cluster filter and sni_dynamic_forward_proxy filter. + ON_CALL(read_filter_callbacks_->connection_, requestedServerName()) + .WillByDefault(testing::Return("fake_cluster")); + // Prepare time source for filters such as local_ratelimit filter. + factory_context_.prepareSimulatedSystemTime(); + // Prepare address for filters such as ext_authz filter. + pipe_addr_ = std::make_shared("/test/test.sock"); + async_request_ = std::make_unique(); +} + +UberFilterFuzzer::UberFilterFuzzer() : time_source_(factory_context_.simulatedTimeSystem()) { + fuzzerSetup(); +} + +void UberFilterFuzzer::fuzz( + const envoy::config::listener::v3::Filter& proto_config, + const Protobuf::RepeatedPtrField<::test::extensions::filters::network::Action>& actions) { + try { + // Try to create the filter callback(cb_). Exit early if the config is invalid or violates PGV + // constraints. + const std::string& filter_name = proto_config.name(); + ENVOY_LOG_MISC(info, "filter name {}", filter_name); + auto& factory = Config::Utility::getAndCheckFactoryByName< + Server::Configuration::NamedNetworkFilterConfigFactory>(filter_name); + ProtobufTypes::MessagePtr message = Config::Utility::translateToFactoryConfig( + proto_config, factory_context_.messageValidationVisitor(), factory); + // Make sure no invalid system calls are executed in fuzzer. + checkInvalidInputForFuzzer(filter_name, message.get()); + ENVOY_LOG_MISC(info, "Config content after decoded: {}", message->DebugString()); + cb_ = factory.createFilterFactoryFromProto(*message, factory_context_); + + } catch (const EnvoyException& e) { + ENVOY_LOG_MISC(debug, "Controlled exception in filter setup {}", e.what()); + return; + } + perFilterSetup(proto_config.name()); + // Add filter to connection_. + cb_(read_filter_callbacks_->connection_); + for (const auto& action : actions) { + ENVOY_LOG_MISC(trace, "action {}", action.DebugString()); + switch (action.action_selector_case()) { + case test::extensions::filters::network::Action::kOnData: { + ASSERT(read_filter_ != nullptr); + Buffer::OwnedImpl buffer(action.on_data().data()); + read_filter_->onData(buffer, action.on_data().end_stream()); + + break; + } + case test::extensions::filters::network::Action::kOnNewConnection: { + ASSERT(read_filter_ != nullptr); + read_filter_->onNewConnection(); + + break; + } + case test::extensions::filters::network::Action::kAdvanceTime: { + time_source_.advanceTimeAsync( + std::chrono::milliseconds(action.advance_time().milliseconds())); + factory_context_.dispatcher().run(Event::Dispatcher::RunType::NonBlock); + break; + } + default: { + // Unhandled actions. + ENVOY_LOG_MISC(debug, "Action support is missing for:\n{}", action.DebugString()); + PANIC("A case is missing for an action"); + } + } + } + + reset(); +} + +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/common/fuzz/uber_readfilter.h b/test/extensions/filters/network/common/fuzz/uber_readfilter.h new file mode 100644 index 0000000000000..d055c5e4451ae --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/uber_readfilter.h @@ -0,0 +1,49 @@ +#include "envoy/network/filter.h" + +#include "common/protobuf/protobuf.h" + +#include "test/extensions/filters/network/common/fuzz/network_readfilter_fuzz.pb.validate.h" +#include "test/extensions/filters/network/common/fuzz/utils/fakes.h" +#include "test/mocks/network/mocks.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { + +class UberFilterFuzzer { +public: + UberFilterFuzzer(); + // This creates the filter config and runs the fuzzed data against the filter. + void + fuzz(const envoy::config::listener::v3::Filter& proto_config, + const Protobuf::RepeatedPtrField<::test::extensions::filters::network::Action>& actions); + // Get the name of filters which has been covered by this fuzzer. + static std::vector filterNames(); + // Check whether the filter's config is invalid for fuzzer(e.g. system call). + void checkInvalidInputForFuzzer(const std::string& filter_name, + Protobuf::Message* config_message); + +protected: + // Set-up filter specific mock expectations in constructor. + void fuzzerSetup(); + // Reset the states of the mock objects. + void reset(); + // Mock behaviors for specific filters. + void perFilterSetup(const std::string& filter_name); + +private: + Server::Configuration::FakeFactoryContext factory_context_; + Network::ReadFilterSharedPtr read_filter_; + Network::FilterFactoryCb cb_; + Network::Address::InstanceConstSharedPtr pipe_addr_; + Event::SimulatedTimeSystem& time_source_; + std::shared_ptr> read_filter_callbacks_; + std::unique_ptr async_request_; + std::unique_ptr async_client_; + std::unique_ptr async_client_factory_; + Tracing::MockSpan span_; +}; + +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/common/fuzz/uber_writefilter.cc b/test/extensions/filters/network/common/fuzz/uber_writefilter.cc new file mode 100644 index 0000000000000..517429a1dd4bd --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/uber_writefilter.cc @@ -0,0 +1,123 @@ +#include "test/extensions/filters/network/common/fuzz/uber_writefilter.h" + +#include "common/config/utility.h" +#include "common/config/version_converter.h" + +using testing::_; +using testing::Return; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +void UberWriteFilterFuzzer::reset() { + // Reset the state of dependencies so that a new fuzz input starts in a clean state. + + // Close the connection to make sure the filter's callback is set to nullptr. + write_filter_callbacks_->connection_.raiseEvent(Network::ConnectionEvent::LocalClose); + // Clear the filter's raw pointer stored inside the connection_ and reset the connection_'s state. + write_filter_callbacks_->connection_.callbacks_.clear(); + write_filter_callbacks_->connection_.bytes_sent_callbacks_.clear(); + write_filter_callbacks_->connection_.state_ = Network::Connection::State::Open; + // Clear the pointers inside the mock_dispatcher + Event::MockDispatcher& mock_dispatcher = + dynamic_cast(write_filter_callbacks_->connection_.dispatcher_); + mock_dispatcher.clearDeferredDeleteList(); + write_filter_.reset(); +} + +void UberWriteFilterFuzzer::fuzzerSetup() { + // Setup process when this fuzzer object is constructed. + // For a static fuzzer, this will only be executed once. + + // Get the pointer of write_filter when the write_filter is being added to connection_. + write_filter_callbacks_ = std::make_shared>(); + read_filter_callbacks_ = std::make_shared>(); + ON_CALL(write_filter_callbacks_->connection_, addWriteFilter(_)) + .WillByDefault(Invoke([&](Network::WriteFilterSharedPtr write_filter) -> void { + write_filter->initializeWriteFilterCallbacks(*write_filter_callbacks_); + write_filter_ = write_filter; + })); + ON_CALL(write_filter_callbacks_->connection_, addFilter(_)) + .WillByDefault(Invoke([&](Network::FilterSharedPtr filter) -> void { + filter->initializeReadFilterCallbacks(*read_filter_callbacks_); + filter->initializeWriteFilterCallbacks(*write_filter_callbacks_); + write_filter_ = filter; + })); + factory_context_.prepareSimulatedSystemTime(); + + // Set featureEnabled for mongo_proxy + ON_CALL(factory_context_.runtime_loader_.snapshot_, featureEnabled("mongo.proxy_enabled", 100)) + .WillByDefault(Return(true)); + ON_CALL(factory_context_.runtime_loader_.snapshot_, + featureEnabled("mongo.connection_logging_enabled", 100)) + .WillByDefault(Return(true)); + ON_CALL(factory_context_.runtime_loader_.snapshot_, featureEnabled("mongo.logging_enabled", 100)) + .WillByDefault(Return(true)); + + // Set featureEnabled for thrift_proxy + ON_CALL(factory_context_.runtime_loader_.snapshot_, + featureEnabled("ratelimit.thrift_filter_enabled", 100)) + .WillByDefault(Return(true)); + ON_CALL(factory_context_.runtime_loader_.snapshot_, + featureEnabled("ratelimit.thrift_filter_enforcing", 100)) + .WillByDefault(Return(true)); + ON_CALL(factory_context_.runtime_loader_.snapshot_, + featureEnabled("ratelimit.test_key.thrift_filter_enabled", 100)) + .WillByDefault(Return(true)); +} + +UberWriteFilterFuzzer::UberWriteFilterFuzzer() + : time_source_(factory_context_.simulatedTimeSystem()) { + fuzzerSetup(); +} + +void UberWriteFilterFuzzer::fuzz( + const envoy::config::listener::v3::Filter& proto_config, + const Protobuf::RepeatedPtrField<::test::extensions::filters::network::WriteAction>& actions) { + try { + // Try to create the filter callback(cb_). Exit early if the config is invalid or violates PGV + // constraints. + const std::string& filter_name = proto_config.name(); + ENVOY_LOG_MISC(debug, "filter name {}", filter_name); + auto& factory = Config::Utility::getAndCheckFactoryByName< + Server::Configuration::NamedNetworkFilterConfigFactory>(filter_name); + ProtobufTypes::MessagePtr message = Config::Utility::translateToFactoryConfig( + proto_config, factory_context_.messageValidationVisitor(), factory); + ENVOY_LOG_MISC(debug, "Config content after decoded: {}", message->DebugString()); + cb_ = factory.createFilterFactoryFromProto(*message, factory_context_); + // Add filter to connection_. + cb_(write_filter_callbacks_->connection_); + } catch (const EnvoyException& e) { + ENVOY_LOG_MISC(debug, "Controlled exception in filter setup {}", e.what()); + return; + } + for (const auto& action : actions) { + ENVOY_LOG_MISC(debug, "action {}", action.DebugString()); + switch (action.action_selector_case()) { + case test::extensions::filters::network::WriteAction::kOnWrite: { + ASSERT(write_filter_ != nullptr); + Buffer::OwnedImpl buffer(action.on_write().data()); + write_filter_->onWrite(buffer, action.on_write().end_stream()); + + break; + } + case test::extensions::filters::network::WriteAction::kAdvanceTime: { + time_source_.advanceTimeAsync( + std::chrono::milliseconds(action.advance_time().milliseconds())); + factory_context_.dispatcher().run(Event::Dispatcher::RunType::NonBlock); + break; + } + default: { + // Unhandled actions. + ENVOY_LOG_MISC(debug, "Action support is missing for:\n{}", action.DebugString()); + PANIC("A case is missing for an action"); + } + } + } + + reset(); +} + +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/common/fuzz/uber_writefilter.h b/test/extensions/filters/network/common/fuzz/uber_writefilter.h new file mode 100644 index 0000000000000..9f6c34eb60e93 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/uber_writefilter.h @@ -0,0 +1,40 @@ +#include "envoy/network/filter.h" + +#include "common/protobuf/protobuf.h" + +#include "test/extensions/filters/network/common/fuzz/network_writefilter_fuzz.pb.validate.h" +#include "test/extensions/filters/network/common/fuzz/utils/fakes.h" +#include "test/mocks/network/mocks.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { + +class UberWriteFilterFuzzer { +public: + UberWriteFilterFuzzer(); + // This creates the filter config and runs the fuzzed data against the filter. + void fuzz( + const envoy::config::listener::v3::Filter& proto_config, + const Protobuf::RepeatedPtrField<::test::extensions::filters::network::WriteAction>& actions); + // Get the name of filters which has been covered by this fuzzer. + static std::vector filterNames(); + +protected: + // Set-up filter specific mock expectations in constructor. + void fuzzerSetup(); + // Reset the states of the mock objects. + void reset(); + +private: + Server::Configuration::FakeFactoryContext factory_context_; + Event::SimulatedTimeSystem& time_source_; + Network::WriteFilterSharedPtr write_filter_; + Network::FilterFactoryCb cb_; + std::shared_ptr> write_filter_callbacks_; + std::shared_ptr> read_filter_callbacks_; +}; + +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/common/fuzz/utils/BUILD b/test/extensions/filters/network/common/fuzz/utils/BUILD new file mode 100644 index 0000000000000..6c231c2a185f0 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/utils/BUILD @@ -0,0 +1,17 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test_library", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_test_library( + name = "network_filter_fuzzer_fakes_lib", + hdrs = ["fakes.h"], + deps = [ + "//test/mocks/server:factory_context_mocks", + ], +) diff --git a/test/extensions/filters/network/common/fuzz/utils/fakes.h b/test/extensions/filters/network/common/fuzz/utils/fakes.h new file mode 100644 index 0000000000000..035dcb3e29cac --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/utils/fakes.h @@ -0,0 +1,49 @@ +#include "test/mocks/server/factory_context.h" + +namespace Envoy { +namespace Server { +namespace Configuration { +class FakeFactoryContext : public MockFactoryContext { +public: + void prepareSimulatedSystemTime() { + api_ = Api::createApiForTest(time_system_); + dispatcher_ = api_->allocateDispatcher("test_thread"); + } + AccessLog::AccessLogManager& accessLogManager() override { return access_log_manager_; } + Upstream::ClusterManager& clusterManager() override { return cluster_manager_; } + Event::Dispatcher& dispatcher() override { return *dispatcher_; } + const Network::DrainDecision& drainDecision() override { return drain_manager_; } + Init::Manager& initManager() override { return init_manager_; } + ServerLifecycleNotifier& lifecycleNotifier() override { return lifecycle_notifier_; } + const LocalInfo::LocalInfo& localInfo() const override { return local_info_; } + Envoy::Random::RandomGenerator& random() override { return random_; } + Envoy::Runtime::Loader& runtime() override { return runtime_loader_; } + Stats::Scope& scope() override { return scope_; } + Singleton::Manager& singletonManager() override { return *singleton_manager_; } + ThreadLocal::Instance& threadLocal() override { return thread_local_; } + Server::Admin& admin() override { return admin_; } + Stats::Scope& listenerScope() override { return listener_scope_; } + Api::Api& api() override { return *api_; } + TimeSource& timeSource() override { return time_system_; } + OverloadManager& overloadManager() override { return overload_manager_; } + ProtobufMessage::ValidationContext& messageValidationContext() override { + return validation_context_; + } + ProtobufMessage::ValidationVisitor& messageValidationVisitor() override { + return ProtobufMessage::getStrictValidationVisitor(); + } + Event::SimulatedTimeSystem& simulatedTimeSystem() { + return dynamic_cast(time_system_); + } + Event::TestTimeSystem& timeSystem() { return time_system_; } + Grpc::Context& grpcContext() override { return grpc_context_; } + Http::Context& httpContext() override { return http_context_; } + + Event::DispatcherPtr dispatcher_; + Event::SimulatedTimeSystem time_system_; + Api::ApiPtr api_; +}; + +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/extensions/filters/network/common/redis/BUILD b/test/extensions/filters/network/common/redis/BUILD index a2d076a1b4b9a..dffc239544886 100644 --- a/test/extensions/filters/network/common/redis/BUILD +++ b/test/extensions/filters/network/common/redis/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", @@ -8,6 +6,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/extensions/filters/network/common/redis/client_impl_test.cc b/test/extensions/filters/network/common/redis/client_impl_test.cc index c9028a1da42ac..fc9c8131aa07e 100644 --- a/test/extensions/filters/network/common/redis/client_impl_test.cc +++ b/test/extensions/filters/network/common/redis/client_impl_test.cc @@ -116,7 +116,7 @@ class RedisClientImplTest : public testing::Test, Common::Redis::RespValue readonly_request = Utility::ReadOnlyRequest::instance(); EXPECT_CALL(*encoder_, encode(Eq(readonly_request), _)); EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); - client_->initialize(auth_password_); + client_->initialize(auth_username_, auth_password_); EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_total_.value()); EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_active_.value()); @@ -143,6 +143,7 @@ class RedisClientImplTest : public testing::Test, NiceMock stats_; Stats::ScopePtr stats_scope_; Common::Redis::RedisCommandStatsSharedPtr redis_command_stats_; + std::string auth_username_; std::string auth_password_; }; @@ -189,7 +190,7 @@ class ConfigBufferSizeGTSingleRequest : public Config { } uint32_t maxUpstreamUnknownConnections() const override { return 0; } bool enableCommandStats() const override { return false; } - ReadPolicy readPolicy() const override { return ReadPolicy::Master; } + ReadPolicy readPolicy() const override { return ReadPolicy::Primary; } }; TEST_F(RedisClientImplTest, BatchWithTimerFiring) { @@ -290,7 +291,7 @@ TEST_F(RedisClientImplTest, Basic) { setup(); - client_->initialize(auth_password_); + client_->initialize(auth_username_, auth_password_); Common::Redis::RespValue request1; MockClientCallbacks callbacks1; @@ -346,7 +347,7 @@ class ConfigEnableCommandStats : public Config { std::chrono::milliseconds bufferFlushTimeoutInMs() const override { return std::chrono::milliseconds(0); } - ReadPolicy readPolicy() const override { return ReadPolicy::Master; } + ReadPolicy readPolicy() const override { return ReadPolicy::Primary; } uint32_t maxUpstreamUnknownConnections() const override { return 0; } bool enableCommandStats() const override { return true; } }; @@ -370,7 +371,7 @@ TEST_F(RedisClientImplTest, CommandStatsDisabledSingleRequest) { setup(); - client_->initialize(auth_password_); + client_->initialize(auth_username_, auth_password_); std::string get_command = "get"; @@ -426,7 +427,7 @@ TEST_F(RedisClientImplTest, CommandStatsEnabledTwoRequests) { setup(std::make_unique()); - client_->initialize(auth_password_); + client_->initialize(auth_username_, auth_password_); std::string get_command = "get"; @@ -511,7 +512,7 @@ TEST_F(RedisClientImplTest, InitializedWithAuthPassword) { Utility::AuthRequest auth_request(auth_password_); EXPECT_CALL(*encoder_, encode(Eq(auth_request), _)); EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); - client_->initialize(auth_password_); + client_->initialize(auth_username_, auth_password_); EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_total_.value()); EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_active_.value()); @@ -523,7 +524,29 @@ TEST_F(RedisClientImplTest, InitializedWithAuthPassword) { client_->close(); } -TEST_F(RedisClientImplTest, InitializedWithPreferMasterReadPolicy) { +TEST_F(RedisClientImplTest, InitializedWithAuthAcl) { + InSequence s; + + setup(); + + auth_username_ = "testing username"; + auth_password_ = "testing password"; + Utility::AuthRequest auth_request(auth_username_, auth_password_); + EXPECT_CALL(*encoder_, encode(Eq(auth_request), _)); + EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); + client_->initialize(auth_username_, auth_password_); + + EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_total_.value()); + EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_active_.value()); + EXPECT_EQ(1UL, host_->stats_.rq_total_.value()); + EXPECT_EQ(1UL, host_->stats_.rq_active_.value()); + + EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); + EXPECT_CALL(*connect_or_op_timer_, disableTimer()); + client_->close(); +} + +TEST_F(RedisClientImplTest, InitializedWithPreferPrimaryReadPolicy) { testInitializeReadPolicy(envoy::extensions::filters::network::redis_proxy::v3::RedisProxy:: ConnPoolSettings::PREFER_MASTER); } @@ -709,7 +732,7 @@ class ConfigOutlierDisabled : public Config { std::chrono::milliseconds bufferFlushTimeoutInMs() const override { return std::chrono::milliseconds(0); } - ReadPolicy readPolicy() const override { return ReadPolicy::Master; } + ReadPolicy readPolicy() const override { return ReadPolicy::Primary; } uint32_t maxUpstreamUnknownConnections() const override { return 0; } bool enableCommandStats() const override { return false; } }; @@ -1188,9 +1211,10 @@ TEST(RedisClientFactoryImplTest, Basic) { Stats::IsolatedStoreImpl stats_; auto redis_command_stats = Common::Redis::RedisCommandStats::createRedisCommandStats(stats_.symbolTable()); + const std::string auth_username; const std::string auth_password; - ClientPtr client = - factory.create(host, dispatcher, config, redis_command_stats, stats_, auth_password); + ClientPtr client = factory.create(host, dispatcher, config, redis_command_stats, stats_, + auth_username, auth_password); client->close(); } } // namespace Client diff --git a/test/extensions/filters/network/common/redis/mocks.h b/test/extensions/filters/network/common/redis/mocks.h index 0561c7bb57e0a..4f8f11bdaa4b5 100644 --- a/test/extensions/filters/network/common/redis/mocks.h +++ b/test/extensions/filters/network/common/redis/mocks.h @@ -87,7 +87,7 @@ class MockClient : public Client { MOCK_METHOD(void, close, ()); MOCK_METHOD(PoolRequest*, makeRequest_, (const Common::Redis::RespValue& request, ClientCallbacks& callbacks)); - MOCK_METHOD(void, initialize, (const std::string& password)); + MOCK_METHOD(void, initialize, (const std::string& username, const std::string& password)); std::list callbacks_; std::list client_callbacks_; diff --git a/test/extensions/filters/network/direct_response/BUILD b/test/extensions/filters/network/direct_response/BUILD index a828acc3d659b..8ebb790617e37 100644 --- a/test/extensions/filters/network/direct_response/BUILD +++ b/test/extensions/filters/network/direct_response/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -29,3 +29,15 @@ envoy_extension_cc_test( "//test/test_common:utility_lib", ], ) + +envoy_extension_cc_test( + name = "direct_response_test", + srcs = ["direct_response_test.cc"], + extension_name = "envoy.filters.network.direct_response", + deps = [ + "//source/extensions/filters/network/direct_response:filter", + "//test/mocks/api:api_mocks", + "//test/mocks/network:network_mocks", + "@envoy_api//envoy/extensions/filters/network/direct_response/v3:pkg_cc_proto", + ], +) diff --git a/test/extensions/filters/network/direct_response/direct_response_integration_test.cc b/test/extensions/filters/network/direct_response/direct_response_integration_test.cc index 6b6272165ae3b..8ff030275c417 100644 --- a/test/extensions/filters/network/direct_response/direct_response_integration_test.cc +++ b/test/extensions/filters/network/direct_response/direct_response_integration_test.cc @@ -21,21 +21,10 @@ class DirectResponseIntegrationTest : public testing::TestWithParam void { + auto connection = createConnectionDriver( + lookupPort("listener_0"), "hello", + [&response](Network::ClientConnection& conn, const Buffer::Instance& data) -> void { response.append(data.toString()); - connection.close(); - }, - version_); - - connection.run(); + conn.close(Network::ConnectionCloseType::FlushWrite); + }); + connection->run(); EXPECT_EQ("hello, world!\n", response); EXPECT_THAT(waitForAccessLog(listener_access_log_name_), testing::HasSubstr(StreamInfo::ResponseCodeDetails::get().DirectResponse)); diff --git a/test/extensions/filters/network/direct_response/direct_response_test.cc b/test/extensions/filters/network/direct_response/direct_response_test.cc new file mode 100644 index 0000000000000..7218bd194426c --- /dev/null +++ b/test/extensions/filters/network/direct_response/direct_response_test.cc @@ -0,0 +1,61 @@ +#include "envoy/extensions/filters/network/direct_response/v3/config.pb.validate.h" + +#include "extensions/filters/network/direct_response/filter.h" + +#include "test/mocks/api/mocks.h" +#include "test/mocks/network/mocks.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::NiceMock; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace DirectResponse { + +class DirectResponseFilterTest : public testing::Test { +public: + void initialize(const std::string& response) { + filter_ = std::make_shared(response); + filter_->initializeReadFilterCallbacks(read_filter_callbacks_); + } + std::shared_ptr filter_; + NiceMock read_filter_callbacks_; +}; + +// Test the filter's onNewConnection() with a non-empty response +TEST_F(DirectResponseFilterTest, OnNewConnection) { + initialize("hello"); + Buffer::OwnedImpl response("hello"); + EXPECT_CALL(read_filter_callbacks_.connection_, write(BufferEqual(&response), false)); + EXPECT_CALL(read_filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite)); + EXPECT_CALL(read_filter_callbacks_.connection_.stream_info_, + setResponseCodeDetails(StreamInfo::ResponseCodeDetails::get().DirectResponse)); + EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection()); +} + +// Test the filter's onNewConnection() with an empty response +TEST_F(DirectResponseFilterTest, OnNewConnectionEmptyResponse) { + initialize(""); + EXPECT_CALL(read_filter_callbacks_.connection_, write(_, _)).Times(0); + EXPECT_CALL(read_filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite)); + EXPECT_CALL(read_filter_callbacks_.connection_.stream_info_, + setResponseCodeDetails(StreamInfo::ResponseCodeDetails::get().DirectResponse)); + EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection()); +} + +// Test the filter's onData() +TEST_F(DirectResponseFilterTest, OnData) { + initialize("hello"); + Buffer::OwnedImpl data("data"); + EXPECT_CALL(read_filter_callbacks_.connection_, write(_, _)).Times(0); + EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); +} + +} // namespace DirectResponse +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/dubbo_proxy/BUILD b/test/extensions/filters/network/dubbo_proxy/BUILD index 00cbebf6780af..1a2b5e0aa8790 100644 --- a/test/extensions/filters/network/dubbo_proxy/BUILD +++ b/test/extensions/filters/network/dubbo_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", @@ -11,6 +9,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( @@ -89,7 +89,7 @@ envoy_extension_cc_test( ":utility_lib", "//source/extensions/filters/network/dubbo_proxy:config", "//source/extensions/filters/network/dubbo_proxy/filters:filter_config_interface", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:registry_lib", "@envoy_api//envoy/extensions/filters/network/dubbo_proxy/v3:pkg_cc_proto", ], @@ -112,7 +112,7 @@ envoy_extension_cc_test( deps = [ "//source/extensions/filters/network/dubbo_proxy:metadata_lib", "//source/extensions/filters/network/dubbo_proxy/router:route_matcher", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/extensions/filters/network/dubbo_proxy/v3:pkg_cc_proto", ], ) @@ -128,7 +128,7 @@ envoy_extension_cc_test( "//source/extensions/filters/network/dubbo_proxy:dubbo_protocol_impl_lib", "//source/extensions/filters/network/dubbo_proxy:metadata_lib", "//source/extensions/filters/network/dubbo_proxy/router:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:registry_lib", ], ) @@ -156,7 +156,7 @@ envoy_extension_cc_test( ":mocks_lib", "//source/extensions/filters/network/dubbo_proxy/filters:well_known_names", "//source/extensions/filters/network/dubbo_proxy/router:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/extensions/filters/network/dubbo_proxy/router/v3:pkg_cc_proto", ], ) @@ -185,7 +185,7 @@ envoy_extension_cc_test( "//source/extensions/filters/network/dubbo_proxy:dubbo_hessian2_serializer_impl_lib", "//source/extensions/filters/network/dubbo_proxy:dubbo_protocol_impl_lib", "//test/common/stats:stat_test_utility_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/extensions/filters/network/dubbo_proxy/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/network/dubbo_proxy/app_exception_test.cc b/test/extensions/filters/network/dubbo_proxy/app_exception_test.cc index 3856f893bf2cf..d58bea0e47e6d 100644 --- a/test/extensions/filters/network/dubbo_proxy/app_exception_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/app_exception_test.cc @@ -49,9 +49,9 @@ TEST_F(AppExceptionTest, Encode) { EXPECT_TRUE(result.second); const ContextImpl* context = static_cast(result.first.get()); - EXPECT_EQ(expect_body_size, context->body_size()); - EXPECT_EQ(metadata->message_type(), MessageType::Response); - buffer.drain(context->header_size()); + EXPECT_EQ(expect_body_size, context->bodySize()); + EXPECT_EQ(metadata->messageType(), MessageType::Response); + buffer.drain(context->headerSize()); // Verify the response type and content. size_t hessian_int_size; diff --git a/test/extensions/filters/network/dubbo_proxy/config_test.cc b/test/extensions/filters/network/dubbo_proxy/config_test.cc index 559d5995fe17b..bdf4b37204d0f 100644 --- a/test/extensions/filters/network/dubbo_proxy/config_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/config_test.cc @@ -5,7 +5,7 @@ #include "extensions/filters/network/dubbo_proxy/filters/filter_config.h" #include "test/extensions/filters/network/dubbo_proxy/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/registry.h" #include "gmock/gmock.h" @@ -22,9 +22,9 @@ using DubboProxyProto = envoy::extensions::filters::network::dubbo_proxy::v3::Du namespace { -DubboProxyProto parseDubboProxyFromV2Yaml(const std::string& yaml) { +DubboProxyProto parseDubboProxyFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) { DubboProxyProto dubbo_proxy; - TestUtility::loadFromYaml(yaml, dubbo_proxy); + TestUtility::loadFromYaml(yaml, dubbo_proxy, false, avoid_boosting); return dubbo_proxy; } @@ -92,7 +92,7 @@ TEST_F(DubboFilterConfigTest, DubboProxyWithExplicitRouterConfig) { - name: envoy.filters.dubbo.router )EOF"; - DubboProxyProto config = parseDubboProxyFromV2Yaml(yaml); + DubboProxyProto config = parseDubboProxyFromV3Yaml(yaml); testConfig(config); } @@ -107,7 +107,7 @@ TEST_F(DubboFilterConfigTest, DubboProxyWithUnknownFilter) { - name: envoy.filters.dubbo.router )EOF"; - DubboProxyProto config = parseDubboProxyFromV2Yaml(yaml); + DubboProxyProto config = parseDubboProxyFromV3Yaml(yaml); EXPECT_THROW_WITH_REGEX(factory_.createFilterFactoryFromProto(config, context_), EnvoyException, "no_such_filter"); @@ -131,7 +131,7 @@ TEST_F(DubboFilterConfigTest, DubboProxyWithMultipleFilters) { DubboFilters::MockFilterConfigFactory factory; Registry::InjectFactory registry(factory); - DubboProxyProto config = parseDubboProxyFromV2Yaml(yaml); + DubboProxyProto config = parseDubboProxyFromV3Yaml(yaml); testConfig(config); EXPECT_EQ(1, factory.config_struct_.fields_size()); @@ -156,7 +156,7 @@ TEST_F(DubboFilterConfigTest, CreateFilterChain) { DubboFilters::MockFilterConfigFactory factory; Registry::InjectFactory registry(factory); - DubboProxyProto dubbo_config = parseDubboProxyFromV2Yaml(yaml); + DubboProxyProto dubbo_config = parseDubboProxyFromV3Yaml(yaml); NiceMock context; DubboFilters::MockFilterChainFactoryCallbacks callbacks; diff --git a/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc b/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc index 226a9bc5161b7..54951ea41c316 100644 --- a/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc @@ -14,7 +14,7 @@ #include "test/extensions/filters/network/dubbo_proxy/mocks.h" #include "test/extensions/filters/network/dubbo_proxy/utility.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/printers.h" #include "gmock/gmock.h" @@ -318,7 +318,7 @@ class ConnectionManagerTest : public testing::Test { Buffer::OwnedImpl buffer_; Buffer::OwnedImpl write_buffer_; NiceMock filter_callbacks_; - NiceMock random_; + NiceMock random_; std::unique_ptr conn_manager_; MockSerializer* custom_serializer_{}; MockProtocol* custom_protocol_{}; @@ -366,13 +366,13 @@ TEST_F(ConnectionManagerTest, OnDataHandlesHeartbeatEvent) { auto result = protocol->decodeHeader(buffer, metadata); EXPECT_TRUE(result.second); const DubboProxy::ContextImpl& ctx = *static_cast(result.first.get()); - EXPECT_TRUE(ctx.is_heartbeat()); + EXPECT_TRUE(ctx.isHeartbeat()); EXPECT_TRUE(metadata->hasResponseStatus()); - EXPECT_FALSE(metadata->is_two_way()); - EXPECT_EQ(ProtocolType::Dubbo, metadata->protocol_type()); - EXPECT_EQ(metadata->response_status(), ResponseStatus::Ok); - EXPECT_EQ(metadata->message_type(), MessageType::HeartbeatResponse); - buffer.drain(ctx.header_size()); + EXPECT_FALSE(metadata->isTwoWay()); + EXPECT_EQ(ProtocolType::Dubbo, metadata->protocolType()); + EXPECT_EQ(metadata->responseStatus(), ResponseStatus::Ok); + EXPECT_EQ(metadata->messageType(), MessageType::HeartbeatResponse); + buffer.drain(ctx.headerSize()); })); EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); @@ -1187,7 +1187,7 @@ serialization_type: Hessian2 .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; })); EXPECT_CALL(*decoder_filter, onMessageDecoded(_, _)) .WillOnce(Invoke([&](MessageMetadataSharedPtr metadata, ContextSharedPtr) -> FilterStatus { - auto invo = static_cast(&metadata->invocation_info()); + auto invo = static_cast(&metadata->invocationInfo()); auto data = const_cast(invo); data->setServiceName("org.apache.dubbo.demo.DemoService"); data->setMethodName("test"); @@ -1248,7 +1248,7 @@ TEST_F(ConnectionManagerTest, MessageDecodedReturnStopIteration) { size_t buf_size = buffer_.length(); EXPECT_CALL(*decoder_filter, onMessageDecoded(_, _)) .WillOnce(Invoke([&](MessageMetadataSharedPtr, ContextSharedPtr ctx) -> FilterStatus { - EXPECT_EQ(ctx->message_size(), buf_size); + EXPECT_EQ(ctx->messageSize(), buf_size); return FilterStatus::StopIteration; })); @@ -1336,8 +1336,8 @@ TEST_F(ConnectionManagerTest, HandleResponseWithEncoderFilter) { EXPECT_CALL(*encoder_filter, onMessageEncoded(_, _)) .WillOnce( Invoke([&](MessageMetadataSharedPtr metadata, ContextSharedPtr ctx) -> FilterStatus { - EXPECT_EQ(metadata->request_id(), request_id); - EXPECT_EQ(ctx->message_size(), expect_response_length); + EXPECT_EQ(metadata->requestId(), request_id); + EXPECT_EQ(ctx->messageSize(), expect_response_length); return FilterStatus::Continue; })); @@ -1364,7 +1364,7 @@ TEST_F(ConnectionManagerTest, HandleResponseWithCodecFilter) { .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; })); EXPECT_CALL(*mock_codec_filter, onMessageDecoded(_, _)) .WillOnce(Invoke([&](MessageMetadataSharedPtr metadata, ContextSharedPtr) -> FilterStatus { - EXPECT_EQ(metadata->request_id(), request_id); + EXPECT_EQ(metadata->requestId(), request_id); return FilterStatus::Continue; })); @@ -1386,8 +1386,8 @@ TEST_F(ConnectionManagerTest, HandleResponseWithCodecFilter) { EXPECT_CALL(*mock_codec_filter, onMessageEncoded(_, _)) .WillOnce( Invoke([&](MessageMetadataSharedPtr metadata, ContextSharedPtr ctx) -> FilterStatus { - EXPECT_EQ(metadata->request_id(), request_id); - EXPECT_EQ(ctx->message_size(), expect_response_length); + EXPECT_EQ(metadata->requestId(), request_id); + EXPECT_EQ(ctx->messageSize(), expect_response_length); return FilterStatus::Continue; })); @@ -1410,7 +1410,7 @@ TEST_F(ConnectionManagerTest, AddDataWithStopAndContinue) { EXPECT_CALL(*config_->decoder_filters_[0], onMessageDecoded(_, _)) .WillOnce(Invoke([&](MessageMetadataSharedPtr metadata, ContextSharedPtr) -> FilterStatus { - EXPECT_EQ(metadata->request_id(), request_id); + EXPECT_EQ(metadata->requestId(), request_id); return FilterStatus::Continue; })); EXPECT_CALL(*config_->decoder_filters_[1], onMessageDecoded(_, _)) @@ -1425,7 +1425,7 @@ TEST_F(ConnectionManagerTest, AddDataWithStopAndContinue) { // For encode direction EXPECT_CALL(*config_->encoder_filters_[0], onMessageEncoded(_, _)) .WillOnce(Invoke([&](MessageMetadataSharedPtr metadata, ContextSharedPtr) -> FilterStatus { - EXPECT_EQ(metadata->request_id(), request_id); + EXPECT_EQ(metadata->requestId(), request_id); return FilterStatus::Continue; })); EXPECT_CALL(*config_->encoder_filters_[1], onMessageEncoded(_, _)) diff --git a/test/extensions/filters/network/dubbo_proxy/decoder_test.cc b/test/extensions/filters/network/dubbo_proxy/decoder_test.cc index cf285ba5bb777..bcc3997772cdc 100644 --- a/test/extensions/filters/network/dubbo_proxy/decoder_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/decoder_test.cc @@ -38,8 +38,8 @@ class DecoderStateMachineTestBase { Invoke([=](Buffer::Instance&, MessageMetadataSharedPtr metadata) -> std::pair { auto context = std::make_shared(); - context->set_header_size(16); - context->set_body_size(body_size); + context->setHeaderSize(16); + context->setBodySize(body_size); metadata->setMessageType(type); return std::pair(context, true); @@ -99,7 +99,7 @@ TEST_F(DubboDecoderStateMachineTest, RequestMessageCallbacks) { Buffer::OwnedImpl buffer; EXPECT_EQ(dsm.run(buffer), ProtocolState::Done); - EXPECT_EQ(active_stream_->metadata_->message_type(), MessageType::Request); + EXPECT_EQ(active_stream_->metadata_->messageType(), MessageType::Request); } TEST_F(DubboDecoderStateMachineTest, ResponseMessageCallbacks) { @@ -114,7 +114,7 @@ TEST_F(DubboDecoderStateMachineTest, ResponseMessageCallbacks) { Buffer::OwnedImpl buffer; EXPECT_EQ(dsm.run(buffer), ProtocolState::Done); - EXPECT_EQ(active_stream_->metadata_->message_type(), MessageType::Response); + EXPECT_EQ(active_stream_->metadata_->messageType(), MessageType::Response); } TEST_F(DubboDecoderStateMachineTest, SerializeRpcInvocationException) { @@ -194,8 +194,8 @@ TEST_F(DubboDecoderTest, NeedMoreDataForProtocolBody) { MessageMetadataSharedPtr metadate) -> std::pair { metadate->setMessageType(MessageType::Response); auto context = std::make_shared(); - context->set_header_size(16); - context->set_body_size(10); + context->setHeaderSize(16); + context->setBodySize(10); return std::pair(context, true); })); EXPECT_CALL(protocol_, decodeData(_, _, _)) @@ -228,8 +228,8 @@ TEST_F(DubboDecoderTest, DecodeResponseMessage) { MessageMetadataSharedPtr metadate) -> std::pair { metadate->setMessageType(MessageType::Response); auto context = std::make_shared(); - context->set_header_size(16); - context->set_body_size(10); + context->setHeaderSize(16); + context->setBodySize(10); return std::pair(context, true); })); EXPECT_CALL(protocol_, decodeData(_, _, _)).WillOnce(Return(true)); @@ -251,8 +251,8 @@ TEST_F(DubboDecoderTest, DecodeResponseMessage) { MessageMetadataSharedPtr metadate) -> std::pair { metadate->setMessageType(MessageType::Response); auto context = std::make_shared(); - context->set_header_size(16); - context->set_body_size(10); + context->setHeaderSize(16); + context->setBodySize(10); return std::pair(context, true); })); EXPECT_CALL(protocol_, decodeData(_, _, _)).WillOnce(Return(true)); diff --git a/test/extensions/filters/network/dubbo_proxy/dubbo_hessian2_serializer_impl_test.cc b/test/extensions/filters/network/dubbo_proxy/dubbo_hessian2_serializer_impl_test.cc index 17a500b4719bf..94ff9f3f7360e 100644 --- a/test/extensions/filters/network/dubbo_proxy/dubbo_hessian2_serializer_impl_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/dubbo_hessian2_serializer_impl_test.cc @@ -32,14 +32,14 @@ TEST(HessianProtocolTest, deserializeRpcInvocation) { 0x04, 't', 'e', 's', 't', // method name })); std::shared_ptr context = std::make_shared(); - context->set_body_size(buffer.length()); + context->setBodySize(buffer.length()); auto result = serializer.deserializeRpcInvocation(buffer, context); EXPECT_TRUE(result.second); auto invo = result.first; - EXPECT_STREQ("test", invo->method_name().c_str()); - EXPECT_STREQ("test", invo->service_name().c_str()); - EXPECT_STREQ("0.0.0", invo->service_version().value().c_str()); + EXPECT_STREQ("test", invo->methodName().c_str()); + EXPECT_STREQ("test", invo->serviceName().c_str()); + EXPECT_STREQ("0.0.0", invo->serviceVersion().value().c_str()); } // incorrect body size @@ -54,7 +54,7 @@ TEST(HessianProtocolTest, deserializeRpcInvocation) { std::string exception_string = fmt::format("RpcInvocation size({}) large than body size({})", buffer.length(), buffer.length() - 1); std::shared_ptr context = std::make_shared(); - context->set_body_size(buffer.length() - 1); + context->setBodySize(buffer.length() - 1); EXPECT_THROW_WITH_MESSAGE(serializer.deserializeRpcInvocation(buffer, context), EnvoyException, exception_string); } @@ -70,7 +70,7 @@ TEST(HessianProtocolTest, deserializeRpcResult) { '\x94', // return type 0x04, 't', 'e', 's', 't', // return body })); - context->set_body_size(4); + context->setBodySize(4); auto result = serializer.deserializeRpcResult(buffer, context); EXPECT_TRUE(result.second); EXPECT_FALSE(result.first->hasException()); @@ -82,7 +82,7 @@ TEST(HessianProtocolTest, deserializeRpcResult) { '\x93', // return type 0x04, 't', 'e', 's', 't', // return body })); - context->set_body_size(4); + context->setBodySize(4); auto result = serializer.deserializeRpcResult(buffer, context); EXPECT_TRUE(result.second); EXPECT_TRUE(result.first->hasException()); @@ -94,7 +94,7 @@ TEST(HessianProtocolTest, deserializeRpcResult) { '\x90', // return type 0x04, 't', 'e', 's', 't', // return body })); - context->set_body_size(4); + context->setBodySize(4); auto result = serializer.deserializeRpcResult(buffer, context); EXPECT_TRUE(result.second); EXPECT_TRUE(result.first->hasException()); @@ -106,7 +106,7 @@ TEST(HessianProtocolTest, deserializeRpcResult) { '\x91', // return type 0x04, 't', 'e', 's', 't', // return body })); - context->set_body_size(4); + context->setBodySize(4); auto result = serializer.deserializeRpcResult(buffer, context); EXPECT_TRUE(result.second); EXPECT_TRUE(result.first->hasException()); @@ -119,7 +119,7 @@ TEST(HessianProtocolTest, deserializeRpcResult) { '\x94', // return type 0x05, 't', 'e', 's', 't', // return body })); - context->set_body_size(0); + context->setBodySize(0); EXPECT_THROW_WITH_MESSAGE(serializer.deserializeRpcResult(buffer, context), EnvoyException, "RpcResult size(1) large than body size(0)"); } @@ -131,7 +131,7 @@ TEST(HessianProtocolTest, deserializeRpcResult) { '\x96', // incorrect return type 0x05, 't', 'e', 's', 't', // return body })); - context->set_body_size(buffer.length()); + context->setBodySize(buffer.length()); EXPECT_THROW_WITH_MESSAGE(serializer.deserializeRpcResult(buffer, context), EnvoyException, "not supported return type 6"); } @@ -146,7 +146,7 @@ TEST(HessianProtocolTest, deserializeRpcResult) { std::string exception_string = fmt::format("RpcResult is no value, but the rest of the body size({}) not equal 0", buffer.length() - 1); - context->set_body_size(buffer.length()); + context->setBodySize(buffer.length()); EXPECT_THROW_WITH_MESSAGE(serializer.deserializeRpcResult(buffer, context), EnvoyException, exception_string); } @@ -180,7 +180,7 @@ TEST(HessianProtocolTest, serializeRpcResult) { size_t body_size = mock_response.size() + sizeof(mock_response_type); std::shared_ptr context = std::make_shared(); - context->set_body_size(body_size); + context->setBodySize(body_size); auto result = serializer.deserializeRpcResult(buffer, context); EXPECT_TRUE(result.first->hasException()); } diff --git a/test/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl_test.cc b/test/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl_test.cc index 9aadf58aeffdd..0dafbc8fef707 100644 --- a/test/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl_test.cc @@ -44,9 +44,9 @@ TEST(DubboProtocolImplTest, Normal) { auto result = dubbo_protocol.decodeHeader(buffer, metadata); auto context = result.first; EXPECT_TRUE(result.second); - EXPECT_EQ(1, metadata->request_id()); - EXPECT_EQ(1, context->body_size()); - EXPECT_EQ(MessageType::Request, metadata->message_type()); + EXPECT_EQ(1, metadata->requestId()); + EXPECT_EQ(1, context->bodySize()); + EXPECT_EQ(MessageType::Request, metadata->messageType()); } // Normal dubbo response message @@ -59,9 +59,9 @@ TEST(DubboProtocolImplTest, Normal) { auto result = dubbo_protocol.decodeHeader(buffer, metadata); auto context = result.first; EXPECT_TRUE(result.second); - EXPECT_EQ(1, metadata->request_id()); - EXPECT_EQ(1, context->body_size()); - EXPECT_EQ(MessageType::Response, metadata->message_type()); + EXPECT_EQ(1, metadata->requestId()); + EXPECT_EQ(1, context->bodySize()); + EXPECT_EQ(MessageType::Response, metadata->messageType()); } } @@ -136,20 +136,20 @@ TEST(DubboProtocolImplTest, encode) { auto result = dubbo_protocol.decodeHeader(buffer, output_metadata); EXPECT_TRUE(result.second); - EXPECT_EQ(metadata.message_type(), output_metadata->message_type()); - EXPECT_EQ(metadata.response_status(), output_metadata->response_status()); - EXPECT_EQ(metadata.serialization_type(), output_metadata->serialization_type()); - EXPECT_EQ(metadata.request_id(), output_metadata->request_id()); + EXPECT_EQ(metadata.messageType(), output_metadata->messageType()); + EXPECT_EQ(metadata.responseStatus(), output_metadata->responseStatus()); + EXPECT_EQ(metadata.serializationType(), output_metadata->serializationType()); + EXPECT_EQ(metadata.requestId(), output_metadata->requestId()); Buffer::OwnedImpl body_buffer; size_t serialized_body_size = dubbo_protocol.serializer()->serializeRpcResult( body_buffer, content, RpcResponseType::ResponseWithValue); auto context = result.first; - EXPECT_EQ(context->body_size(), serialized_body_size); + EXPECT_EQ(context->bodySize(), serialized_body_size); EXPECT_EQ(false, context->hasAttachments()); EXPECT_EQ(0, context->attachments().size()); - buffer.drain(context->header_size()); + buffer.drain(context->headerSize()); EXPECT_TRUE(dubbo_protocol.decodeData(buffer, context, output_metadata)); } @@ -216,10 +216,10 @@ TEST(DubboProtocolImplTest, decode) { auto result = dubbo_protocol.decodeHeader(buffer, metadata); EXPECT_TRUE(result.second); auto context = result.first; - EXPECT_EQ(1, context->body_size()); - EXPECT_EQ(MessageType::Request, metadata->message_type()); - EXPECT_EQ(1, metadata->request_id()); - EXPECT_EQ(SerializationType::Hessian2, metadata->serialization_type()); + EXPECT_EQ(1, context->bodySize()); + EXPECT_EQ(MessageType::Request, metadata->messageType()); + EXPECT_EQ(1, metadata->requestId()); + EXPECT_EQ(SerializationType::Hessian2, metadata->serializationType()); buffer.drain(buffer.length()); } @@ -231,10 +231,10 @@ TEST(DubboProtocolImplTest, decode) { auto result = dubbo_protocol.decodeHeader(buffer, metadata); EXPECT_TRUE(result.second); auto context = result.first; - EXPECT_EQ(1, context->body_size()); - EXPECT_EQ(MessageType::Oneway, metadata->message_type()); - EXPECT_EQ(1, metadata->request_id()); - EXPECT_EQ(SerializationType::Hessian2, metadata->serialization_type()); + EXPECT_EQ(1, context->bodySize()); + EXPECT_EQ(MessageType::Oneway, metadata->messageType()); + EXPECT_EQ(1, metadata->requestId()); + EXPECT_EQ(SerializationType::Hessian2, metadata->serializationType()); } } diff --git a/test/extensions/filters/network/dubbo_proxy/metadata_test.cc b/test/extensions/filters/network/dubbo_proxy/metadata_test.cc index ab94547762f7f..4ac1e14e6301a 100644 --- a/test/extensions/filters/network/dubbo_proxy/metadata_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/metadata_test.cc @@ -22,19 +22,19 @@ TEST(MessageMetadataTest, Fields) { EXPECT_TRUE(metadata.timeout().has_value()); invo->setMethodName("method"); - EXPECT_EQ("method", invo->method_name()); + EXPECT_EQ("method", invo->methodName()); - EXPECT_FALSE(invo->service_version().has_value()); - EXPECT_THROW(invo->service_version().value(), absl::bad_optional_access); + EXPECT_FALSE(invo->serviceVersion().has_value()); + EXPECT_THROW(invo->serviceVersion().value(), absl::bad_optional_access); invo->setServiceVersion("1.0.0"); - EXPECT_TRUE(invo->service_version().has_value()); - EXPECT_EQ("1.0.0", invo->service_version().value()); + EXPECT_TRUE(invo->serviceVersion().has_value()); + EXPECT_EQ("1.0.0", invo->serviceVersion().value()); - EXPECT_FALSE(invo->service_group().has_value()); - EXPECT_THROW(invo->service_group().value(), absl::bad_optional_access); + EXPECT_FALSE(invo->serviceGroup().has_value()); + EXPECT_THROW(invo->serviceGroup().value(), absl::bad_optional_access); invo->setServiceGroup("group"); - EXPECT_TRUE(invo->service_group().has_value()); - EXPECT_EQ("group", invo->service_group().value()); + EXPECT_TRUE(invo->serviceGroup().has_value()); + EXPECT_EQ("group", invo->serviceGroup().value()); } TEST(MessageMetadataTest, Headers) { diff --git a/test/extensions/filters/network/dubbo_proxy/route_matcher_test.cc b/test/extensions/filters/network/dubbo_proxy/route_matcher_test.cc index 07592bcad8333..7aab9ba075538 100644 --- a/test/extensions/filters/network/dubbo_proxy/route_matcher_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/route_matcher_test.cc @@ -8,7 +8,7 @@ #include "extensions/filters/network/dubbo_proxy/router/route_matcher.h" #include "extensions/filters/network/dubbo_proxy/serializer_impl.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/dubbo_proxy/router_filter_config_test.cc b/test/extensions/filters/network/dubbo_proxy/router_filter_config_test.cc index 1ef5e61dacdaa..6f9c65ffb4290 100644 --- a/test/extensions/filters/network/dubbo_proxy/router_filter_config_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/router_filter_config_test.cc @@ -5,7 +5,7 @@ #include "extensions/filters/network/dubbo_proxy/router/config.h" #include "test/extensions/filters/network/dubbo_proxy/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/dubbo_proxy/router_test.cc b/test/extensions/filters/network/dubbo_proxy/router_test.cc index ad5f1d5b90049..d812ac0546055 100644 --- a/test/extensions/filters/network/dubbo_proxy/router_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/router_test.cc @@ -9,7 +9,7 @@ #include "test/extensions/filters/network/dubbo_proxy/mocks.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/printers.h" #include "test/test_common/registry.h" @@ -380,7 +380,7 @@ TEST_F(DubboRouterTest, UnexpectedRouterDestroy) { buffer.add("test"); // Body auto ctx = static_cast(message_context_.get()); - ctx->message_origin_data().move(buffer, buffer.length()); + ctx->messageOriginData().move(buffer, buffer.length()); startRequest(MessageType::Request); connectUpstream(); destroyRouter(); @@ -484,7 +484,7 @@ TEST_F(DubboRouterTest, DestroyWhileConnecting) { initializeRouter(); initializeMetadata(MessageType::Request); - NiceMock conn_pool_handle; + NiceMock conn_pool_handle; EXPECT_CALL(context_.cluster_manager_.tcp_conn_pool_, newConnection(_)) .WillOnce(Invoke([&](Tcp::ConnectionPool::Callbacks&) -> Tcp::ConnectionPool::Cancellable* { return &conn_pool_handle; diff --git a/test/extensions/filters/network/ext_authz/BUILD b/test/extensions/filters/network/ext_authz/BUILD index 4d003eb26decf..df951f392f32e 100644 --- a/test/extensions/filters/network/ext_authz/BUILD +++ b/test/extensions/filters/network/ext_authz/BUILD @@ -1,14 +1,16 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", + "envoy_cc_fuzz_test", "envoy_package", + "envoy_proto_library", ) load( "//test/extensions:extensions_build_system.bzl", "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -22,6 +24,7 @@ envoy_extension_cc_test( "//source/common/network:address_lib", "//source/common/protobuf:utility_lib", "//source/common/stats:stats_lib", + "//source/extensions/filters/network:well_known_names", "//source/extensions/filters/network/ext_authz", "//test/extensions/filters/common/ext_authz:ext_authz_mocks", "//test/mocks/network:network_mocks", @@ -38,8 +41,31 @@ envoy_extension_cc_test( extension_name = "envoy.filters.network.ext_authz", deps = [ "//source/extensions/filters/network/ext_authz:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/ext_authz/v3:pkg_cc_proto", ], ) + +envoy_proto_library( + name = "ext_authz_fuzz_proto", + srcs = ["ext_authz_fuzz.proto"], + deps = [ + "@envoy_api//envoy/extensions/filters/network/ext_authz/v3:pkg", + ], +) + +envoy_cc_fuzz_test( + name = "ext_authz_fuzz_test", + srcs = ["ext_authz_fuzz_test.cc"], + corpus = "ext_authz_corpus", + deps = [ + ":ext_authz_fuzz_proto_cc_proto", + "//source/common/buffer:buffer_lib", + "//source/extensions/filters/network/ext_authz", + "//test/extensions/filters/common/ext_authz:ext_authz_mocks", + "//test/mocks/network:network_mocks", + "//test/mocks/runtime:runtime_mocks", + "@envoy_api//envoy/extensions/filters/network/ext_authz/v3:pkg_cc_proto", + ], +) diff --git a/test/extensions/filters/network/ext_authz/config_test.cc b/test/extensions/filters/network/ext_authz/config_test.cc index 010ad2018e850..d1093ccdb4834 100644 --- a/test/extensions/filters/network/ext_authz/config_test.cc +++ b/test/extensions/filters/network/ext_authz/config_test.cc @@ -5,7 +5,8 @@ #include "extensions/filters/network/ext_authz/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/test_common/utility.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -18,14 +19,8 @@ namespace Extensions { namespace NetworkFilters { namespace ExtAuthz { -TEST(ExtAuthzFilterConfigTest, ValidateFail) { - NiceMock context; - EXPECT_THROW(ExtAuthzConfigFactory().createFilterFactoryFromProto( - envoy::extensions::filters::network::ext_authz::v3::ExtAuthz(), context), - ProtoValidationException); -} - -TEST(ExtAuthzFilterConfigTest, ExtAuthzCorrectProto) { +namespace { +void expectCorrectProto(envoy::config::core::v3::ApiVersion api_version) { std::string yaml = R"EOF( grpc_service: google_grpc: @@ -33,11 +28,13 @@ TEST(ExtAuthzFilterConfigTest, ExtAuthzCorrectProto) { stat_prefix: google failure_mode_allow: false stat_prefix: name + transport_api_version: {} )EOF"; ExtAuthzConfigFactory factory; ProtobufTypes::MessagePtr proto_config = factory.createEmptyConfigProto(); - TestUtility::loadFromYaml(yaml, *proto_config); + TestUtility::loadFromYaml( + fmt::format(yaml, TestUtility::getVersionStringFromApiVersion(api_version)), *proto_config); NiceMock context; @@ -50,6 +47,20 @@ TEST(ExtAuthzFilterConfigTest, ExtAuthzCorrectProto) { EXPECT_CALL(connection, addReadFilter(_)); cb(connection); } +} // namespace + +TEST(ExtAuthzFilterConfigTest, ValidateFail) { + NiceMock context; + EXPECT_THROW(ExtAuthzConfigFactory().createFilterFactoryFromProto( + envoy::extensions::filters::network::ext_authz::v3::ExtAuthz(), context), + ProtoValidationException); +} + +TEST(ExtAuthzFilterConfigTest, ExtAuthzCorrectProto) { + expectCorrectProto(envoy::config::core::v3::ApiVersion::AUTO); + expectCorrectProto(envoy::config::core::v3::ApiVersion::V2); + expectCorrectProto(envoy::config::core::v3::ApiVersion::V3); +} // Test that the deprecated extension name still functions. TEST(ExtAuthzConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { diff --git a/test/extensions/filters/network/ext_authz/ext_authz_corpus/crash-309531f09ce8c0c71f272c7145da9d5528c3e8fc b/test/extensions/filters/network/ext_authz/ext_authz_corpus/crash-309531f09ce8c0c71f272c7145da9d5528c3e8fc new file mode 100644 index 0000000000000..840c6e068f8c2 --- /dev/null +++ b/test/extensions/filters/network/ext_authz/ext_authz_corpus/crash-309531f09ce8c0c71f272c7145da9d5528c3e8fc @@ -0,0 +1,23 @@ +config { + stat_prefix: "\361\261\261\261\361\261\261\261\361\261\261\261\361\261\261\261\361\261\261\261\361\261\261\261\321\261" + failure_mode_allow: true + include_peer_certificate: true +} +actions { + on_data { + result { + check_status_ok { + } + } + data: "123" + } +} +actions { + remote_close { + } +} +actions { + local_close { + } +} + diff --git a/test/extensions/filters/network/ext_authz/ext_authz_corpus/crash-72c994c40b30ff66b72f401055681e9851fea7a2 b/test/extensions/filters/network/ext_authz/ext_authz_corpus/crash-72c994c40b30ff66b72f401055681e9851fea7a2 new file mode 100644 index 0000000000000..b20e1a96bece1 --- /dev/null +++ b/test/extensions/filters/network/ext_authz/ext_authz_corpus/crash-72c994c40b30ff66b72f401055681e9851fea7a2 @@ -0,0 +1,38 @@ +config { + stat_prefix: "envoy.extensions.filters.network.e" + failure_mode_allow: true +} +actions { + on_new_connection { + } +} +actions { + on_data { + data: "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" + end_stream: true + result { + check_status_denied { + } + } + } +} +actions { + on_data { + data: "CCCCCCCCCCCC" + end_stream: true + result { + check_status_denied { + } + } + } +} +actions { + on_data { + data: "\000\000\000\000" + end_stream: true + result { + check_status_error { + } + } + } +} diff --git a/test/extensions/filters/network/ext_authz/ext_authz_fuzz.proto b/test/extensions/filters/network/ext_authz/ext_authz_fuzz.proto new file mode 100644 index 0000000000000..a590752fcbd50 --- /dev/null +++ b/test/extensions/filters/network/ext_authz/ext_authz_fuzz.proto @@ -0,0 +1,41 @@ +syntax = "proto3"; +package envoy.extensions.filters.network.ext_authz; + +import "envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto"; +import "google/protobuf/empty.proto"; +import "validate/validate.proto"; + +message Result { + oneof result_selector { + option (validate.required) = true; + // Authorization check status + google.protobuf.Empty check_status_error = 1; + google.protobuf.Empty check_status_denied = 2; + google.protobuf.Empty check_status_ok = 3; + } +} + +message OnData { + bytes data = 1; + bool end_stream = 2; + // optional: to set the default authorization check result for this and the following onData() + Result result = 3; +} + +message Action { + oneof action_selector { + option (validate.required) = true; + // Call onNewConnection(). + google.protobuf.Empty on_new_connection = 1; + // Call onData(). + OnData on_data = 2; + // Connection close + google.protobuf.Empty remote_close = 3; + google.protobuf.Empty local_close = 4; + } +} +message ExtAuthzTestCase { + envoy.extensions.filters.network.ext_authz.v3.ExtAuthz config = 1 + [(validate.rules).message = {required: true}]; + repeated Action actions = 2; +} diff --git a/test/extensions/filters/network/ext_authz/ext_authz_fuzz_test.cc b/test/extensions/filters/network/ext_authz/ext_authz_fuzz_test.cc new file mode 100644 index 0000000000000..c2e816c748d51 --- /dev/null +++ b/test/extensions/filters/network/ext_authz/ext_authz_fuzz_test.cc @@ -0,0 +1,120 @@ +#include "envoy/extensions/filters/network/ext_authz/v3/ext_authz.pb.h" + +#include "common/buffer/buffer_impl.h" +#include "common/network/address_impl.h" + +#include "extensions/filters/network/ext_authz/ext_authz.h" + +#include "test/extensions/filters/common/ext_authz/mocks.h" +#include "test/extensions/filters/network/ext_authz/ext_authz_fuzz.pb.validate.h" +#include "test/fuzz/fuzz_runner.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/runtime/mocks.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::ReturnRef; +using testing::WithArgs; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace ExtAuthz { + +Filters::Common::ExtAuthz::ResponsePtr +makeAuthzResponse(Filters::Common::ExtAuthz::CheckStatus status) { + Filters::Common::ExtAuthz::ResponsePtr response = + std::make_unique(); + response->status = status; + return response; +} + +Filters::Common::ExtAuthz::CheckStatus resultCaseToCheckStatus( + envoy::extensions::filters::network::ext_authz::Result::ResultSelectorCase result_case) { + Filters::Common::ExtAuthz::CheckStatus check_status; + switch (result_case) { + case envoy::extensions::filters::network::ext_authz::Result::kCheckStatusOk: { + check_status = Filters::Common::ExtAuthz::CheckStatus::OK; + break; + } + case envoy::extensions::filters::network::ext_authz::Result::kCheckStatusError: { + check_status = Filters::Common::ExtAuthz::CheckStatus::Error; + break; + } + case envoy::extensions::filters::network::ext_authz::Result::kCheckStatusDenied: { + check_status = Filters::Common::ExtAuthz::CheckStatus::Denied; + break; + } + default: { + // Unhandled status + PANIC("A check status handle is missing"); + } + } + return check_status; +} + +DEFINE_PROTO_FUZZER(const envoy::extensions::filters::network::ext_authz::ExtAuthzTestCase& input) { + try { + TestUtility::validate(input); + } catch (const EnvoyException& e) { + ENVOY_LOG_MISC(debug, "EnvoyException during validation: {}", e.what()); + return; + } + + Stats::TestUtil::TestStore stats_store; + Filters::Common::ExtAuthz::MockClient* client = new Filters::Common::ExtAuthz::MockClient(); + envoy::extensions::filters::network::ext_authz::v3::ExtAuthz proto_config = input.config(); + + ConfigSharedPtr config = std::make_shared(proto_config, stats_store); + std::unique_ptr filter = + std::make_unique(config, Filters::Common::ExtAuthz::ClientPtr{client}); + + NiceMock filter_callbacks; + filter->initializeReadFilterCallbacks(filter_callbacks); + static Network::Address::InstanceConstSharedPtr addr = + std::make_shared("/test/test.sock"); + + ON_CALL(filter_callbacks.connection_, remoteAddress()).WillByDefault(ReturnRef(addr)); + ON_CALL(filter_callbacks.connection_, localAddress()).WillByDefault(ReturnRef(addr)); + + for (const auto& action : input.actions()) { + switch (action.action_selector_case()) { + case envoy::extensions::filters::network::ext_authz::Action::kOnData: { + // Optional input field to set default authorization check result for the following "onData()" + if (action.on_data().has_result()) { + ON_CALL(*client, check(_, _, _, _)) + .WillByDefault(WithArgs<0>( + Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { + callbacks.onComplete(makeAuthzResponse( + resultCaseToCheckStatus(action.on_data().result().result_selector_case()))); + }))); + } + Buffer::OwnedImpl buffer(action.on_data().data()); + filter->onData(buffer, action.on_data().end_stream()); + break; + } + case envoy::extensions::filters::network::ext_authz::Action::kOnNewConnection: { + filter->onNewConnection(); + break; + } + case envoy::extensions::filters::network::ext_authz::Action::kRemoteClose: { + filter_callbacks.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); + break; + } + case envoy::extensions::filters::network::ext_authz::Action::kLocalClose: { + filter_callbacks.connection_.raiseEvent(Network::ConnectionEvent::LocalClose); + break; + } + default: { + // Unhandled actions + PANIC("A case is missing for an action"); + } + } + } +} + +} // namespace ExtAuthz +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/test/extensions/filters/network/ext_authz/ext_authz_test.cc b/test/extensions/filters/network/ext_authz/ext_authz_test.cc index 47a208dc8c3bd..b977c88d0176b 100644 --- a/test/extensions/filters/network/ext_authz/ext_authz_test.cc +++ b/test/extensions/filters/network/ext_authz/ext_authz_test.cc @@ -12,6 +12,7 @@ #include "common/protobuf/utility.h" #include "extensions/filters/network/ext_authz/ext_authz.h" +#include "extensions/filters/network/well_known_names.h" #include "test/extensions/filters/common/ext_authz/mocks.h" #include "test/mocks/network/mocks.h" @@ -37,19 +38,11 @@ namespace ExtAuthz { class ExtAuthzFilterTest : public testing::Test { public: - ExtAuthzFilterTest() { - std::string json = R"EOF( - { - "grpc_service": { - "envoy_grpc": { "cluster_name": "ext_authz_server" } - }, - "failure_mode_allow": true, - "stat_prefix": "name" - } - )EOF"; + ExtAuthzFilterTest() { initialize(); } + void initialize() { envoy::extensions::filters::network::ext_authz::v3::ExtAuthz proto_config{}; - TestUtility::loadFromJson(json, proto_config); + TestUtility::loadFromYaml(default_yaml_string_, proto_config); config_ = std::make_shared(proto_config, stats_store_); client_ = new Filters::Common::ExtAuthz::MockClient(); filter_ = std::make_unique(config_, Filters::Common::ExtAuthz::ClientPtr{client_}); @@ -82,18 +75,24 @@ class ExtAuthzFilterTest : public testing::Test { NiceMock filter_callbacks_; Network::Address::InstanceConstSharedPtr addr_; Filters::Common::ExtAuthz::RequestCallbacks* request_callbacks_{}; + const std::string default_yaml_string_ = R"EOF( +grpc_service: + envoy_grpc: + cluster_name: ext_authz_server + +failure_mode_allow: true +stat_prefix: name + )EOF"; }; TEST_F(ExtAuthzFilterTest, BadExtAuthzConfig) { - std::string json_string = R"EOF( - { - "stat_prefix": "my_stat_prefix", - "grpc_service": {} - } + std::string yaml_string = R"EOF( +grpc_service: {} +stat_prefix: name )EOF"; envoy::extensions::filters::network::ext_authz::v3::ExtAuthz proto_config{}; - TestUtility::loadFromJson(json_string, proto_config); + TestUtility::loadFromYaml(yaml_string, proto_config); EXPECT_THROW( TestUtility::downcastAndValidate< @@ -102,8 +101,6 @@ TEST_F(ExtAuthzFilterTest, BadExtAuthzConfig) { } TEST_F(ExtAuthzFilterTest, OKWithOnData) { - InSequence s; - EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(*client_, check(_, _, testing::A(), _)) @@ -126,8 +123,23 @@ TEST_F(ExtAuthzFilterTest, OKWithOnData) { 1U, stats_store_.gauge("ext_authz.name.active", Stats::Gauge::ImportMode::Accumulate).value()); + Filters::Common::ExtAuthz::Response response{}; + response.status = Filters::Common::ExtAuthz::CheckStatus::OK; + response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; + + auto* fields = response.dynamic_metadata.mutable_fields(); + (*fields)["foo"] = ValueUtil::stringValue("ok"); + (*fields)["bar"] = ValueUtil::numberValue(1); + + EXPECT_CALL(filter_callbacks_.connection_.stream_info_, setDynamicMetadata(_, _)) + .WillOnce(Invoke([&response](const std::string& ns, + const ProtobufWkt::Struct& returned_dynamic_metadata) { + EXPECT_EQ(ns, NetworkFilterNames::get().ExtAuthorization); + EXPECT_TRUE(TestUtility::protoEqual(returned_dynamic_metadata, response.dynamic_metadata)); + })); + EXPECT_CALL(filter_callbacks_, continueReading()); - request_callbacks_->onComplete(makeAuthzResponse(Filters::Common::ExtAuthz::CheckStatus::OK)); + request_callbacks_->onComplete(std::make_unique(response)); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); @@ -314,7 +326,7 @@ TEST_F(ExtAuthzFilterTest, ImmediateOK) { WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { callbacks.onComplete(makeAuthzResponse(Filters::Common::ExtAuthz::CheckStatus::OK)); }))); - + EXPECT_CALL(filter_callbacks_.connection_.stream_info_, setDynamicMetadata(_, _)).Times(0); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection()); Buffer::OwnedImpl data("hello"); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); diff --git a/test/extensions/filters/network/http_connection_manager/BUILD b/test/extensions/filters/network/http_connection_manager/BUILD index 0b8e125be0c56..15a050c217116 100644 --- a/test/extensions/filters/network/http_connection_manager/BUILD +++ b/test/extensions/filters/network/http_connection_manager/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -10,6 +8,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_proto_library( @@ -25,14 +25,14 @@ envoy_extension_cc_test( ":config_cc_proto", "//source/common/buffer:buffer_lib", "//source/common/event:dispatcher_lib", + "//source/common/filter/http:filter_config_discovery_lib", "//source/extensions/access_loggers/file:config", "//source/extensions/filters/http/health_check:config", - "//source/extensions/filters/http/ratelimit:config", "//source/extensions/filters/http/router:config", "//source/extensions/filters/network/http_connection_manager:config", "//test/integration/filters:encoder_decoder_buffer_filter_lib", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:registry_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", diff --git a/test/extensions/filters/network/http_connection_manager/config_test.cc b/test/extensions/filters/network/http_connection_manager/config_test.cc index 014459f6d73ea..170246b40eb9a 100644 --- a/test/extensions/filters/network/http_connection_manager/config_test.cc +++ b/test/extensions/filters/network/http_connection_manager/config_test.cc @@ -8,6 +8,7 @@ #include "envoy/type/v3/percent.pb.h" #include "common/buffer/buffer_impl.h" +#include "common/filter/http/filter_config_discovery_impl.h" #include "common/http/date_provider_impl.h" #include "common/http/request_id_extension_uuid_impl.h" @@ -18,7 +19,7 @@ #include "test/mocks/config/mocks.h" #include "test/mocks/http/mocks.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/printers.h" #include "test/test_common/registry.h" #include "test/test_common/utility.h" @@ -40,10 +41,10 @@ namespace NetworkFilters { namespace HttpConnectionManager { envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager -parseHttpConnectionManagerFromV2Yaml(const std::string& yaml) { +parseHttpConnectionManagerFromYaml(const std::string& yaml, bool avoid_boosting = true) { envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager http_connection_manager; - TestUtility::loadFromYamlAndValidate(yaml, http_connection_manager); + TestUtility::loadFromYamlAndValidate(yaml, http_connection_manager, false, avoid_boosting); return http_connection_manager; } @@ -54,8 +55,15 @@ class HttpConnectionManagerConfigTest : public testing::Test { NiceMock route_config_provider_manager_; NiceMock scoped_routes_config_provider_manager_; NiceMock http_tracer_manager_; + Filter::Http::FilterConfigProviderManagerImpl filter_config_provider_manager_; std::shared_ptr> http_tracer_{ std::make_shared>()}; + void createHttpConnectionManagerConfig(const std::string& yaml) { + HttpConnectionManagerConfig(parseHttpConnectionManagerFromYaml(yaml), context_, date_provider_, + route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); + } }; TEST_F(HttpConnectionManagerConfigTest, ValidateFail) { @@ -84,11 +92,8 @@ stat_prefix: router - name: foo )EOF"; - EXPECT_THROW_WITH_MESSAGE( - HttpConnectionManagerConfig(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_), - EnvoyException, "Didn't find a registered implementation for name: 'foo'"); + EXPECT_THROW_WITH_MESSAGE(createHttpConnectionManagerConfig(yaml_string), EnvoyException, + "Didn't find a registered implementation for name: 'foo'"); } TEST_F(HttpConnectionManagerConfigTest, RouterInverted) { @@ -115,10 +120,7 @@ stat_prefix: router )EOF"; EXPECT_THROW_WITH_MESSAGE( - HttpConnectionManagerConfig(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_), - EnvoyException, + createHttpConnectionManagerConfig(yaml_string), EnvoyException, "Error: terminal filter named envoy.filters.http.router of type envoy.filters.http.router " "must be the last filter in a http filter chain."); } @@ -145,17 +147,15 @@ stat_prefix: router pass_through_mode: false )EOF"; - EXPECT_THROW_WITH_MESSAGE( - HttpConnectionManagerConfig(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_), - EnvoyException, - "Error: non-terminal filter named health_check of type " - "envoy.filters.http.health_check is the last filter in a http filter " - "chain."); + EXPECT_THROW_WITH_MESSAGE(createHttpConnectionManagerConfig(yaml_string), EnvoyException, + "Error: non-terminal filter named health_check of type " + "envoy.filters.http.health_check is the last filter in a http filter " + "chain."); } -TEST_F(HttpConnectionManagerConfigTest, MiscConfig) { +// When deprecating v2, remove the old style "operation_name: egress" config +// but retain the rest of the test. +TEST_F(HttpConnectionManagerConfigTest, DEPRECATED_FEATURE_TEST(MiscConfig)) { const std::string yaml_string = R"EOF( codec_type: http1 server_name: foo @@ -171,15 +171,16 @@ stat_prefix: router route: cluster: cluster tracing: - operation_name: ingress + operation_name: egress max_path_tag_length: 128 http_filters: - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), + context_, date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(128, config.tracingConfig()->max_path_tag_length_); EXPECT_EQ(*context_.local_info_.address_, config.localAddress()); @@ -212,9 +213,10 @@ stat_prefix: router // there is no reason to obtain an actual HttpTracer. EXPECT_CALL(http_tracer_manager_, getOrCreateHttpTracer(_)).Times(0); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); // By default, tracer must be a null object (Tracing::HttpNullTracer) rather than nullptr. EXPECT_THAT(config.tracer().get(), WhenDynamicCastTo(NotNull())); @@ -250,9 +252,10 @@ stat_prefix: router // there is no reason to obtain an actual HttpTracer. EXPECT_CALL(http_tracer_manager_, getOrCreateHttpTracer(_)).Times(0); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); // Even though tracer provider is configured in the bootstrap config, a given filter instance // should not have a tracer associated with it. @@ -285,9 +288,10 @@ tracing: {} # notice that tracing is enabled // an actual HttpTracer must be obtained from the HttpTracerManager. EXPECT_CALL(http_tracer_manager_, getOrCreateHttpTracer(nullptr)).WillOnce(Return(http_tracer_)); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); // Actual HttpTracer must be obtained from the HttpTracerManager. EXPECT_THAT(config.tracer(), Eq(http_tracer_)); @@ -325,9 +329,10 @@ tracing: {} # notice that tracing is enabled EXPECT_CALL(http_tracer_manager_, getOrCreateHttpTracer(Pointee(ProtoEq(tracing_config.http())))) .WillOnce(Return(http_tracer_)); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); // Actual HttpTracer must be obtained from the HttpTracerManager. EXPECT_THAT(config.tracer(), Eq(http_tracer_)); @@ -384,9 +389,10 @@ stat_prefix: router EXPECT_CALL(http_tracer_manager_, getOrCreateHttpTracer(Pointee(ProtoEq(inlined_tracing_config)))) .WillOnce(Return(http_tracer_)); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), + context_, date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); // Actual HttpTracer must be obtained from the HttpTracerManager. EXPECT_THAT(config.tracer(), Eq(http_tracer_)); @@ -415,9 +421,10 @@ stat_prefix: router key: com.bar.foo path: [ { key: xx }, { key: yy } ] )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); std::vector custom_tags{"ltag", "etag", "rtag", "mtag"}; const Tracing::CustomTagMap& custom_tag_map = config.tracingConfig()->custom_tags_; @@ -435,9 +442,10 @@ stat_prefix: router request_headers_for_tags: - foo )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), + context_, date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); const Tracing::CustomTagMap& custom_tag_map = config.tracingConfig()->custom_tags_; const Tracing::RequestHeaderCustomTag* foo = dynamic_cast( @@ -467,9 +475,10 @@ stat_prefix: router )EOF"; ON_CALL(context_, direction()).WillByDefault(Return(envoy::config::core::v3::OUTBOUND)); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), + context_, date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(Tracing::OperationName::Egress, config.tracingConfig()->operation_name_); } @@ -493,9 +502,10 @@ stat_prefix: router )EOF"; ON_CALL(context_, direction()).WillByDefault(Return(envoy::config::core::v3::INBOUND)); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), + context_, date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(Tracing::OperationName::Ingress, config.tracingConfig()->operation_name_); } @@ -512,9 +522,10 @@ TEST_F(HttpConnectionManagerConfigTest, SamplingDefault) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), + context_, date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(100, config.tracingConfig()->client_sampling_.numerator()); EXPECT_EQ(Tracing::DefaultMaxPathTagLength, config.tracingConfig()->max_path_tag_length_); @@ -547,9 +558,10 @@ TEST_F(HttpConnectionManagerConfigTest, SamplingConfigured) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), + context_, date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(1, config.tracingConfig()->client_sampling_.numerator()); EXPECT_EQ(envoy::type::v3::FractionalPercent::HUNDRED, @@ -581,9 +593,10 @@ TEST_F(HttpConnectionManagerConfigTest, FractionalSamplingConfigured) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), + context_, date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(0, config.tracingConfig()->client_sampling_.numerator()); EXPECT_EQ(envoy::type::v3::FractionalPercent::HUNDRED, @@ -607,9 +620,10 @@ TEST_F(HttpConnectionManagerConfigTest, UnixSocketInternalAddress) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); Network::Address::PipeInstance unixAddress{"/foo"}; Network::Address::Ipv4Instance internalIpAddress{"127.0.0.1", 0}; Network::Address::Ipv4Instance externalIpAddress{"12.0.0.1", 0}; @@ -627,9 +641,10 @@ TEST_F(HttpConnectionManagerConfigTest, MaxRequestHeadersKbDefault) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(60, config.maxRequestHeadersKb()); } @@ -643,9 +658,10 @@ TEST_F(HttpConnectionManagerConfigTest, MaxRequestHeadersKbConfigured) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(16, config.maxRequestHeadersKb()); } @@ -659,9 +675,10 @@ TEST_F(HttpConnectionManagerConfigTest, MaxRequestHeadersKbMaxConfigurable) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(96, config.maxRequestHeadersKb()); } @@ -676,9 +693,10 @@ TEST_F(HttpConnectionManagerConfigTest, DisabledStreamIdleTimeout) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(0, config.streamIdleTimeout().count()); } @@ -693,9 +711,10 @@ TEST_F(HttpConnectionManagerConfigTest, DEPRECATED_FEATURE_TEST(IdleTimeout)) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), + context_, date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(1000, config.idleTimeout().value().count()); } @@ -711,9 +730,10 @@ TEST_F(HttpConnectionManagerConfigTest, CommonHttpProtocolIdleTimeout) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(1000, config.idleTimeout().value().count()); } @@ -727,9 +747,10 @@ TEST_F(HttpConnectionManagerConfigTest, CommonHttpProtocolIdleTimeoutDefault) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(std::chrono::hours(1), config.idleTimeout().value()); } @@ -745,9 +766,10 @@ TEST_F(HttpConnectionManagerConfigTest, CommonHttpProtocolIdleTimeoutOff) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_FALSE(config.idleTimeout().has_value()); } @@ -761,9 +783,10 @@ TEST_F(HttpConnectionManagerConfigTest, DefaultMaxRequestHeaderCount) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(100, config.maxRequestHeadersCount()); } @@ -779,9 +802,10 @@ TEST_F(HttpConnectionManagerConfigTest, MaxRequestHeaderCountConfigurable) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(200, config.maxRequestHeadersCount()); } @@ -798,9 +822,10 @@ TEST_F(HttpConnectionManagerConfigTest, ServerOverwrite) { EXPECT_CALL(context_.runtime_loader_.snapshot_, featureEnabled(_, An())) .WillOnce(Invoke(&context_.runtime_loader_.snapshot_, &Runtime::MockSnapshot::featureEnabledDefault)); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(HttpConnectionManagerConfig::HttpConnectionManagerProto::OVERWRITE, config.serverHeaderTransformation()); } @@ -818,9 +843,10 @@ TEST_F(HttpConnectionManagerConfigTest, ServerAppendIfAbsent) { EXPECT_CALL(context_.runtime_loader_.snapshot_, featureEnabled(_, An())) .WillOnce(Invoke(&context_.runtime_loader_.snapshot_, &Runtime::MockSnapshot::featureEnabledDefault)); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(HttpConnectionManagerConfig::HttpConnectionManagerProto::APPEND_IF_ABSENT, config.serverHeaderTransformation()); } @@ -838,9 +864,10 @@ TEST_F(HttpConnectionManagerConfigTest, ServerPassThrough) { EXPECT_CALL(context_.runtime_loader_.snapshot_, featureEnabled(_, An())) .WillOnce(Invoke(&context_.runtime_loader_.snapshot_, &Runtime::MockSnapshot::featureEnabledDefault)); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(HttpConnectionManagerConfig::HttpConnectionManagerProto::PASS_THROUGH, config.serverHeaderTransformation()); } @@ -859,9 +886,10 @@ TEST_F(HttpConnectionManagerConfigTest, NormalizePathDefault) { EXPECT_CALL(context_.runtime_loader_.snapshot_, featureEnabled(_, An())) .WillOnce(Invoke(&context_.runtime_loader_.snapshot_, &Runtime::MockSnapshot::featureEnabledDefault)); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); #ifdef ENVOY_NORMALIZE_PATH_BY_DEFAULT EXPECT_TRUE(config.shouldNormalizePath()); #else @@ -882,9 +910,10 @@ TEST_F(HttpConnectionManagerConfigTest, NormalizePathRuntime) { EXPECT_CALL(context_.runtime_loader_.snapshot_, featureEnabled("http_connection_manager.normalize_path", An())) .WillOnce(Return(true)); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_TRUE(config.shouldNormalizePath()); } @@ -902,9 +931,10 @@ TEST_F(HttpConnectionManagerConfigTest, NormalizePathTrue) { EXPECT_CALL(context_.runtime_loader_.snapshot_, featureEnabled("http_connection_manager.normalize_path", An())) .Times(0); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_TRUE(config.shouldNormalizePath()); } @@ -922,9 +952,10 @@ TEST_F(HttpConnectionManagerConfigTest, NormalizePathFalse) { EXPECT_CALL(context_.runtime_loader_.snapshot_, featureEnabled("http_connection_manager.normalize_path", An())) .Times(0); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_FALSE(config.shouldNormalizePath()); } @@ -938,9 +969,10 @@ TEST_F(HttpConnectionManagerConfigTest, MergeSlashesDefault) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_FALSE(config.shouldMergeSlashes()); } @@ -955,9 +987,10 @@ TEST_F(HttpConnectionManagerConfigTest, MergeSlashesTrue) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_TRUE(config.shouldMergeSlashes()); } @@ -972,12 +1005,66 @@ TEST_F(HttpConnectionManagerConfigTest, MergeSlashesFalse) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_FALSE(config.shouldMergeSlashes()); } +// Validated that by default we don't remove port. +TEST_F(HttpConnectionManagerConfigTest, RemovePortDefault) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + route_config: + name: local_route + http_filters: + - name: envoy.filters.http.router + )EOF"; + + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); + EXPECT_FALSE(config.shouldStripMatchingPort()); +} + +// Validated that when configured, we remove port. +TEST_F(HttpConnectionManagerConfigTest, RemovePortTrue) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + route_config: + name: local_route + strip_matching_host_port: true + http_filters: + - name: envoy.filters.http.router + )EOF"; + + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); + EXPECT_TRUE(config.shouldStripMatchingPort()); +} + +// Validated that when explicitly set false, we don't remove port. +TEST_F(HttpConnectionManagerConfigTest, RemovePortFalse) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + route_config: + name: local_route + strip_matching_host_port: false + http_filters: + - name: envoy.filters.http.router + )EOF"; + + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); + EXPECT_FALSE(config.shouldStripMatchingPort()); +} + // Validated that by default we allow requests with header names containing underscores. TEST_F(HttpConnectionManagerConfigTest, HeadersWithUnderscoresAllowedByDefault) { const std::string yaml_string = R"EOF( @@ -988,9 +1075,10 @@ TEST_F(HttpConnectionManagerConfigTest, HeadersWithUnderscoresAllowedByDefault) - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(envoy::config::core::v3::HttpProtocolOptions::ALLOW, config.headersWithUnderscoresAction()); } @@ -1007,9 +1095,10 @@ TEST_F(HttpConnectionManagerConfigTest, HeadersWithUnderscoresDroppedByConfig) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER, config.headersWithUnderscoresAction()); } @@ -1026,9 +1115,10 @@ TEST_F(HttpConnectionManagerConfigTest, HeadersWithUnderscoresRequestRejectedByC - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(envoy::config::core::v3::HttpProtocolOptions::REJECT_REQUEST, config.headersWithUnderscoresAction()); } @@ -1043,9 +1133,10 @@ TEST_F(HttpConnectionManagerConfigTest, ConfiguredRequestTimeout) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(53 * 1000, config.requestTimeout().count()); } @@ -1059,9 +1150,10 @@ TEST_F(HttpConnectionManagerConfigTest, DisabledRequestTimeout) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(0, config.requestTimeout().count()); } @@ -1074,9 +1166,10 @@ TEST_F(HttpConnectionManagerConfigTest, UnconfiguredRequestTimeout) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(0, config.requestTimeout().count()); } @@ -1101,7 +1194,7 @@ stat_prefix: router - name: envoy.filters.http.router )EOF"; - auto proto_config = parseHttpConnectionManagerFromV2Yaml(yaml_string); + auto proto_config = parseHttpConnectionManagerFromYaml(yaml_string); HttpConnectionManagerFilterConfigFactory factory; // We expect a single slot allocation vs. multiple. EXPECT_CALL(context_.thread_local_, allocateSlot()); @@ -1128,7 +1221,7 @@ stat_prefix: my_stat_prefix - {} )EOF"; - EXPECT_THROW(parseHttpConnectionManagerFromV2Yaml(yaml_string), EnvoyException); + EXPECT_THROW(parseHttpConnectionManagerFromYaml(yaml_string), EnvoyException); } TEST_F(HttpConnectionManagerConfigTest, BadAccessLogConfig) { @@ -1156,7 +1249,7 @@ stat_prefix: my_stat_prefix filter: [] )EOF"; - EXPECT_THROW_WITH_REGEX(parseHttpConnectionManagerFromV2Yaml(yaml_string), EnvoyException, + EXPECT_THROW_WITH_REGEX(parseHttpConnectionManagerFromYaml(yaml_string), EnvoyException, "filter: Proto field is not repeating, cannot start list."); } @@ -1186,7 +1279,7 @@ stat_prefix: my_stat_prefix bad_type: {} )EOF"; - EXPECT_THROW_WITH_REGEX(parseHttpConnectionManagerFromV2Yaml(yaml_string), EnvoyException, + EXPECT_THROW_WITH_REGEX(parseHttpConnectionManagerFromYaml(yaml_string), EnvoyException, "bad_type: Cannot find field"); } @@ -1224,7 +1317,7 @@ stat_prefix: my_stat_prefix - not_health_check_filter: {} )EOF"; - EXPECT_THROW_WITH_REGEX(parseHttpConnectionManagerFromV2Yaml(yaml_string), EnvoyException, + EXPECT_THROW_WITH_REGEX(parseHttpConnectionManagerFromYaml(yaml_string), EnvoyException, "bad_type: Cannot find field"); } @@ -1252,9 +1345,7 @@ stat_prefix: my_stat_prefix custom_settings_parameters: { identifier: 3, value: 2048 } )EOF"; // This will throw when Http2ProtocolOptions validation fails. - HttpConnectionManagerConfig(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + createHttpConnectionManagerConfig(yaml_string); } // Validates that named and user defined parameter collisions will trigger a config validation @@ -1286,10 +1377,7 @@ stat_prefix: my_stat_prefix - { identifier: 3, value: 1024 } )EOF"; EXPECT_THROW_WITH_REGEX( - HttpConnectionManagerConfig(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_), - EnvoyException, + createHttpConnectionManagerConfig(yaml_string), EnvoyException, R"(the \{hpack_table_size,max_concurrent_streams\} HTTP/2 SETTINGS parameter\(s\) can not be)" " configured"); } @@ -1320,10 +1408,7 @@ stat_prefix: my_stat_prefix - { identifier: 8, value: 0 } )EOF"; EXPECT_THROW_WITH_REGEX( - HttpConnectionManagerConfig(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_), - EnvoyException, + createHttpConnectionManagerConfig(yaml_string), EnvoyException, "the \"allow_connect\" SETTINGS parameter must only be configured through the named field"); const std::string yaml_string2 = R"EOF( @@ -1345,9 +1430,7 @@ stat_prefix: my_stat_prefix http2_protocol_options: allow_connect: true )EOF"; - HttpConnectionManagerConfig(parseHttpConnectionManagerFromV2Yaml(yaml_string2), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + createHttpConnectionManagerConfig(yaml_string2); } // Validates that setting the server push parameter via user defined parameters is disallowed. @@ -1373,10 +1456,7 @@ stat_prefix: my_stat_prefix )EOF"; EXPECT_THROW_WITH_REGEX( - HttpConnectionManagerConfig(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_), - EnvoyException, + createHttpConnectionManagerConfig(yaml_string), EnvoyException, "server push is not supported by Envoy and can not be enabled via a SETTINGS parameter."); // Specify both the server push parameter and colliding named and user defined parameters. @@ -1407,10 +1487,7 @@ stat_prefix: my_stat_prefix // The server push exception is thrown first. EXPECT_THROW_WITH_REGEX( - HttpConnectionManagerConfig(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_), - EnvoyException, + createHttpConnectionManagerConfig(yaml_string), EnvoyException, "server push is not supported by Envoy and can not be enabled via a SETTINGS parameter."); } @@ -1441,10 +1518,7 @@ stat_prefix: my_stat_prefix - { identifier: 12, value: 10 } )EOF"; EXPECT_THROW_WITH_REGEX( - HttpConnectionManagerConfig(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_), - EnvoyException, + createHttpConnectionManagerConfig(yaml_string), EnvoyException, R"(inconsistent HTTP/2 custom SETTINGS parameter\(s\) detected; identifiers = \{0x0a\})"); } @@ -1458,6 +1532,39 @@ TEST_F(HttpConnectionManagerConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtens deprecated_name)); } +TEST_F(HttpConnectionManagerConfigTest, AlwaysSetRequestIdInResponseDefault) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + route_config: + name: local_route + http_filters: + - name: envoy.filters.http.router + )EOF"; + + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); + EXPECT_FALSE(config.alwaysSetRequestIdInResponse()); +} + +TEST_F(HttpConnectionManagerConfigTest, AlwaysSetRequestIdInResponseConfigured) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + always_set_request_id_in_response: true + route_config: + name: local_route + http_filters: + - name: envoy.filters.http.router + )EOF"; + + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); + EXPECT_TRUE(config.alwaysSetRequestIdInResponse()); +} + namespace { class TestRequestIDExtension : public Http::RequestIDExtension { @@ -1517,9 +1624,10 @@ TEST_F(HttpConnectionManagerConfigTest, CustomRequestIDExtension) { TestRequestIDExtensionFactory factory; Registry::InjectFactory registration(factory); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); auto request_id_extension = dynamic_cast(config.requestIDExtension().get()); ASSERT_NE(nullptr, request_id_extension); @@ -1538,11 +1646,8 @@ TEST_F(HttpConnectionManagerConfigTest, UnknownRequestIDExtension) { - name: envoy.filters.http.router )EOF"; - EXPECT_THROW_WITH_REGEX( - HttpConnectionManagerConfig(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_), - EnvoyException, "Didn't find a registered implementation for type"); + EXPECT_THROW_WITH_REGEX(createHttpConnectionManagerConfig(yaml_string), EnvoyException, + "Didn't find a registered implementation for type"); } TEST_F(HttpConnectionManagerConfigTest, DefaultRequestIDExtension) { @@ -1555,14 +1660,282 @@ TEST_F(HttpConnectionManagerConfigTest, DefaultRequestIDExtension) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); auto request_id_extension = dynamic_cast(config.requestIDExtension().get()); ASSERT_NE(nullptr, request_id_extension); } +TEST_F(HttpConnectionManagerConfigTest, LegacyH1Codecs) { + const std::string yaml_string = R"EOF( +codec_type: http1 +server_name: foo +stat_prefix: router +route_config: + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: cluster +http_filters: +- name: envoy.filters.http.router + )EOF"; + + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager + proto_config; + TestUtility::loadFromYaml(yaml_string, proto_config); + NiceMock filter_callbacks; + EXPECT_CALL(context_.runtime_loader_.snapshot_, runtimeFeatureEnabled(_)).WillOnce(Return(false)); + auto http_connection_manager_factory = + HttpConnectionManagerFactory::createHttpConnectionManagerFactoryFromProto( + proto_config, context_, filter_callbacks); + http_connection_manager_factory(); +} + +TEST_F(HttpConnectionManagerConfigTest, LegacyH2Codecs) { + const std::string yaml_string = R"EOF( +codec_type: http2 +server_name: foo +stat_prefix: router +route_config: + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: cluster +http_filters: +- name: envoy.filters.http.router + )EOF"; + + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager + proto_config; + TestUtility::loadFromYaml(yaml_string, proto_config); + NiceMock filter_callbacks; + EXPECT_CALL(context_.runtime_loader_.snapshot_, runtimeFeatureEnabled(_)).WillOnce(Return(false)); + auto http_connection_manager_factory = + HttpConnectionManagerFactory::createHttpConnectionManagerFactoryFromProto( + proto_config, context_, filter_callbacks); + http_connection_manager_factory(); +} + +TEST_F(HttpConnectionManagerConfigTest, DynamicFilterWarmingNoDefault) { + const std::string yaml_string = R"EOF( +codec_type: http1 +stat_prefix: router +route_config: + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: cluster +http_filters: +- name: foo + config_discovery: + config_source: { ads: {} } + apply_default_config_without_warming: true + type_urls: + - type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + )EOF"; + + EXPECT_THROW_WITH_MESSAGE( + createHttpConnectionManagerConfig(yaml_string), EnvoyException, + "Error: filter config foo applied without warming but has no default config."); +} + +TEST_F(HttpConnectionManagerConfigTest, DynamicFilterBadDefault) { + const std::string yaml_string = R"EOF( +codec_type: http1 +stat_prefix: router +route_config: + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: cluster +http_filters: +- name: foo + config_discovery: + config_source: { ads: {} } + default_config: + "@type": type.googleapis.com/google.protobuf.Value + type_urls: + - type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + )EOF"; + + EXPECT_THROW_WITH_MESSAGE( + createHttpConnectionManagerConfig(yaml_string), EnvoyException, + "Error: cannot find filter factory foo for default filter configuration with type URL " + "type.googleapis.com/google.protobuf.Value."); +} + +TEST_F(HttpConnectionManagerConfigTest, DynamicFilterDefaultNotTerminal) { + const std::string yaml_string = R"EOF( +codec_type: http1 +stat_prefix: router +route_config: + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: cluster +http_filters: +- name: foo + config_discovery: + config_source: { ads: {} } + default_config: + "@type": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck + type_urls: + - type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck + )EOF"; + + EXPECT_THROW_WITH_MESSAGE( + createHttpConnectionManagerConfig(yaml_string), EnvoyException, + "Error: non-terminal filter named foo of type envoy.filters.http.health_check is the last " + "filter in a http filter chain."); +} + +TEST_F(HttpConnectionManagerConfigTest, DynamicFilterDefaultTerminal) { + const std::string yaml_string = R"EOF( +codec_type: http1 +stat_prefix: router +route_config: + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: cluster +http_filters: +- name: foo + config_discovery: + config_source: { ads: {} } + default_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + type_urls: + - type.googleapis.com/envoy.extensions.filters.http.router.v3.Router +- name: envoy.filters.http.router + )EOF"; + + EXPECT_THROW_WITH_MESSAGE(createHttpConnectionManagerConfig(yaml_string), EnvoyException, + "Error: terminal filter named foo of type envoy.filters.http.router " + "must be the last filter in a http filter chain."); +} + +TEST_F(HttpConnectionManagerConfigTest, DynamicFilterDefaultRequireTypeUrl) { + const std::string yaml_string = R"EOF( +codec_type: http1 +stat_prefix: router +route_config: + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: cluster +http_filters: +- name: foo + config_discovery: + config_source: { ads: {} } + default_config: + "@type": type.googleapis.com/udpa.type.v1.TypedStruct + type_url: type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + type_urls: + - type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck +- name: envoy.filters.http.router + )EOF"; + + EXPECT_THROW_WITH_MESSAGE( + createHttpConnectionManagerConfig(yaml_string), EnvoyException, + "Error: filter config has type URL envoy.extensions.filters.http.router.v3.Router but " + "expect envoy.config.filter.http.health_check.v2.HealthCheck."); +} + +TEST_F(HttpConnectionManagerConfigTest, DynamicFilterRequireTypeUrlMissingFactory) { + const std::string yaml_string = R"EOF( +codec_type: http1 +stat_prefix: router +route_config: + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: cluster +http_filters: +- name: foo + config_discovery: + config_source: { ads: {} } + type_urls: + - type.googleapis.com/google.protobuf.Value + )EOF"; + + EXPECT_THROW_WITH_MESSAGE( + createHttpConnectionManagerConfig(yaml_string), EnvoyException, + "Error: no factory found for a required type URL google.protobuf.Value."); +} + +TEST_F(HttpConnectionManagerConfigTest, DynamicFilterDefaultValid) { + const std::string yaml_string = R"EOF( +codec_type: http1 +stat_prefix: router +route_config: + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: cluster +http_filters: +- name: foo + config_discovery: + config_source: { ads: {} } + default_config: + "@type": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck + pass_through_mode: false + type_urls: + - type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck + apply_default_config_without_warming: true +- name: envoy.filters.http.router + )EOF"; + + createHttpConnectionManagerConfig(yaml_string); +} + class FilterChainTest : public HttpConnectionManagerConfigTest { public: const std::string basic_config_ = R"EOF( @@ -1587,9 +1960,10 @@ stat_prefix: router }; TEST_F(FilterChainTest, CreateFilterChain) { - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(basic_config_), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(basic_config_), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); Http::MockFilterChainFactoryCallbacks callbacks; EXPECT_CALL(callbacks, addStreamFilter(_)); // Buffer @@ -1597,14 +1971,66 @@ TEST_F(FilterChainTest, CreateFilterChain) { config.createFilterChain(callbacks); } +TEST_F(FilterChainTest, CreateDynamicFilterChain) { + const std::string yaml_string = R"EOF( +codec_type: http1 +stat_prefix: router +route_config: + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: cluster +http_filters: +- name: foo + config_discovery: + config_source: { ads: {} } + type_urls: + - type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck +- name: bar + config_discovery: + config_source: { ads: {} } + type_urls: + - type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck +- name: envoy.filters.http.router + )EOF"; + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); + + Http::MockFilterChainFactoryCallbacks callbacks; + Http::StreamDecoderFilterSharedPtr missing_config_filter; + EXPECT_CALL(callbacks, addStreamDecoderFilter(_)) + .Times(2) + .WillOnce(testing::SaveArg<0>(&missing_config_filter)) + .WillOnce(Return()); // MissingConfigFilter (only once) and router + config.createFilterChain(callbacks); + + Http::MockStreamDecoderFilterCallbacks decoder_callbacks; + NiceMock stream_info; + EXPECT_CALL(decoder_callbacks, streamInfo()).WillRepeatedly(ReturnRef(stream_info)); + EXPECT_CALL(decoder_callbacks, sendLocalReply(Http::Code::InternalServerError, _, _, _, _)) + .WillRepeatedly(Return()); + Http::TestRequestHeaderMapImpl headers; + missing_config_filter->setDecoderFilterCallbacks(decoder_callbacks); + missing_config_filter->decodeHeaders(headers, false); + EXPECT_TRUE(stream_info.hasResponseFlag(StreamInfo::ResponseFlag::NoFilterConfigFound)); +} + // Tests where upgrades are configured on via the HCM. TEST_F(FilterChainTest, CreateUpgradeFilterChain) { - auto hcm_config = parseHttpConnectionManagerFromV2Yaml(basic_config_); + auto hcm_config = parseHttpConnectionManagerFromYaml(basic_config_); hcm_config.add_upgrade_configs()->set_upgrade_type("websocket"); HttpConnectionManagerConfig config(hcm_config, context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); NiceMock callbacks; // Check the case where WebSockets are configured in the HCM, and no router @@ -1645,13 +2071,14 @@ TEST_F(FilterChainTest, CreateUpgradeFilterChain) { // Tests where upgrades are configured off via the HCM. TEST_F(FilterChainTest, CreateUpgradeFilterChainHCMDisabled) { - auto hcm_config = parseHttpConnectionManagerFromV2Yaml(basic_config_); + auto hcm_config = parseHttpConnectionManagerFromYaml(basic_config_); hcm_config.add_upgrade_configs()->set_upgrade_type("websocket"); hcm_config.mutable_upgrade_configs(0)->mutable_enabled()->set_value(false); HttpConnectionManagerConfig config(hcm_config, context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); NiceMock callbacks; // Check the case where WebSockets are off in the HCM, and no router config is present. @@ -1683,7 +2110,7 @@ TEST_F(FilterChainTest, CreateUpgradeFilterChainHCMDisabled) { } TEST_F(FilterChainTest, CreateCustomUpgradeFilterChain) { - auto hcm_config = parseHttpConnectionManagerFromV2Yaml(basic_config_); + auto hcm_config = parseHttpConnectionManagerFromYaml(basic_config_); auto websocket_config = hcm_config.add_upgrade_configs(); websocket_config->set_upgrade_type("websocket"); @@ -1705,7 +2132,8 @@ TEST_F(FilterChainTest, CreateCustomUpgradeFilterChain) { HttpConnectionManagerConfig config(hcm_config, context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); { Http::MockFilterChainFactoryCallbacks callbacks; @@ -1729,7 +2157,7 @@ TEST_F(FilterChainTest, CreateCustomUpgradeFilterChain) { } TEST_F(FilterChainTest, CreateCustomUpgradeFilterChainWithRouterNotLast) { - auto hcm_config = parseHttpConnectionManagerFromV2Yaml(basic_config_); + auto hcm_config = parseHttpConnectionManagerFromYaml(basic_config_); auto websocket_config = hcm_config.add_upgrade_configs(); websocket_config->set_upgrade_type("websocket"); @@ -1749,21 +2177,23 @@ TEST_F(FilterChainTest, CreateCustomUpgradeFilterChainWithRouterNotLast) { EXPECT_THROW_WITH_MESSAGE( HttpConnectionManagerConfig(hcm_config, context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_), + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_), EnvoyException, "Error: terminal filter named envoy.filters.http.router of type envoy.filters.http.router " "must be the last filter in a http upgrade filter chain."); } TEST_F(FilterChainTest, InvalidConfig) { - auto hcm_config = parseHttpConnectionManagerFromV2Yaml(basic_config_); + auto hcm_config = parseHttpConnectionManagerFromYaml(basic_config_); hcm_config.add_upgrade_configs()->set_upgrade_type("WEBSOCKET"); hcm_config.add_upgrade_configs()->set_upgrade_type("websocket"); EXPECT_THROW_WITH_MESSAGE( HttpConnectionManagerConfig(hcm_config, context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_), + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_), EnvoyException, "Error: multiple upgrade configs with the same name: 'websocket'"); } diff --git a/test/extensions/filters/network/kafka/BUILD b/test/extensions/filters/network/kafka/BUILD index 19ac80b6947f1..de260c365d7d8 100644 --- a/test/extensions/filters/network/kafka/BUILD +++ b/test/extensions/filters/network/kafka/BUILD @@ -1,5 +1,4 @@ -licenses(["notice"]) # Apache 2 - +load("@rules_python//python:defs.bzl", "py_binary") load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_library", @@ -10,6 +9,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_library( @@ -100,7 +101,6 @@ envoy_extension_cc_test( ":buffer_based_test_lib", ":serialization_utilities_lib", "//source/extensions/filters/network/kafka:kafka_request_parser_lib", - "//test/mocks/server:server_mocks", ], ) @@ -111,7 +111,6 @@ envoy_extension_cc_test( deps = [ ":buffer_based_test_lib", "//source/extensions/filters/network/kafka:kafka_request_codec_lib", - "//test/mocks/server:server_mocks", ], ) @@ -123,7 +122,6 @@ envoy_extension_cc_test( ":buffer_based_test_lib", ":serialization_utilities_lib", "//source/extensions/filters/network/kafka:kafka_request_codec_lib", - "//test/mocks/server:server_mocks", ], ) @@ -135,7 +133,6 @@ envoy_extension_cc_test( ":buffer_based_test_lib", ":serialization_utilities_lib", "//source/extensions/filters/network/kafka:kafka_request_codec_lib", - "//test/mocks/server:server_mocks", ], ) @@ -146,7 +143,6 @@ envoy_extension_cc_test( deps = [ ":buffer_based_test_lib", "//source/extensions/filters/network/kafka:kafka_request_codec_lib", - "//test/mocks/server:server_mocks", ], ) @@ -180,7 +176,6 @@ envoy_extension_cc_test( ":buffer_based_test_lib", ":serialization_utilities_lib", "//source/extensions/filters/network/kafka:kafka_response_parser_lib", - "//test/mocks/server:server_mocks", ], ) @@ -191,7 +186,6 @@ envoy_extension_cc_test( deps = [ ":buffer_based_test_lib", "//source/extensions/filters/network/kafka:kafka_response_codec_lib", - "//test/mocks/server:server_mocks", ], ) @@ -203,7 +197,6 @@ envoy_extension_cc_test( ":buffer_based_test_lib", ":serialization_utilities_lib", "//source/extensions/filters/network/kafka:kafka_response_codec_lib", - "//test/mocks/server:server_mocks", ], ) @@ -215,7 +208,6 @@ envoy_extension_cc_test( ":buffer_based_test_lib", ":serialization_utilities_lib", "//source/extensions/filters/network/kafka:kafka_response_codec_lib", - "//test/mocks/server:server_mocks", ], ) @@ -226,7 +218,6 @@ envoy_extension_cc_test( deps = [ ":buffer_based_test_lib", "//source/extensions/filters/network/kafka:kafka_response_codec_lib", - "//test/mocks/server:server_mocks", ], ) diff --git a/test/extensions/filters/network/kafka/broker/BUILD b/test/extensions/filters/network/kafka/broker/BUILD index 89664fb909b8d..cc64251e3f593 100644 --- a/test/extensions/filters/network/kafka/broker/BUILD +++ b/test/extensions/filters/network/kafka/broker/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -17,7 +17,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.network.kafka_broker", deps = [ "//source/extensions/filters/network/kafka:kafka_broker_config_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", ], ) @@ -26,8 +26,10 @@ envoy_extension_cc_test( srcs = ["filter_unit_test.cc"], extension_name = "envoy.filters.network.kafka_broker", deps = [ + "//include/envoy/event:timer_interface", "//source/extensions/filters/network/kafka:kafka_broker_filter_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/network:network_mocks", + "//test/mocks/stats:stats_mocks", ], ) diff --git a/test/extensions/filters/network/kafka/broker/config_unit_test.cc b/test/extensions/filters/network/kafka/broker/config_unit_test.cc index d316bb65d5d9d..e464acad0b960 100644 --- a/test/extensions/filters/network/kafka/broker/config_unit_test.cc +++ b/test/extensions/filters/network/kafka/broker/config_unit_test.cc @@ -1,6 +1,6 @@ #include "extensions/filters/network/kafka/broker/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/kafka/broker/filter_unit_test.cc b/test/extensions/filters/network/kafka/broker/filter_unit_test.cc index 13f4c6cd271f8..0555fe75f625e 100644 --- a/test/extensions/filters/network/kafka/broker/filter_unit_test.cc +++ b/test/extensions/filters/network/kafka/broker/filter_unit_test.cc @@ -1,7 +1,9 @@ +#include "envoy/event/timer.h" + #include "extensions/filters/network/kafka/broker/filter.h" #include "extensions/filters/network/kafka/external/requests.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/network/mocks.h" #include "test/mocks/stats/mocks.h" #include "gmock/gmock.h" @@ -218,7 +220,7 @@ TEST_F(KafkaMetricsFacadeImplUnitTest, ShouldRegisterRequest) { EXPECT_CALL(*request_metrics_, onRequest(api_key)); - MonotonicTime time_point{MonotonicTime::duration(1234)}; + MonotonicTime time_point{Event::TimeSystem::Milliseconds(1234)}; EXPECT_CALL(time_source_, monotonicTime()).WillOnce(Return(time_point)); // when @@ -248,10 +250,10 @@ TEST_F(KafkaMetricsFacadeImplUnitTest, ShouldRegisterResponse) { const int32_t correlation_id = 1234; AbstractResponseSharedPtr response = std::make_shared(api_key, correlation_id); - MonotonicTime request_time_point{MonotonicTime::duration(1234000000)}; + MonotonicTime request_time_point{Event::TimeSystem::Milliseconds(1234)}; testee_.getRequestArrivalsForTest()[correlation_id] = request_time_point; - MonotonicTime response_time_point{MonotonicTime::duration(2345000000)}; + MonotonicTime response_time_point{Event::TimeSystem::Milliseconds(2345)}; EXPECT_CALL(*response_metrics_, onResponse(api_key, 1111)); EXPECT_CALL(time_source_, monotonicTime()).WillOnce(Return(response_time_point)); diff --git a/test/extensions/filters/network/kafka/broker/integration_test/BUILD b/test/extensions/filters/network/kafka/broker/integration_test/BUILD index 14fec8991db52..d82d6b95c0115 100644 --- a/test/extensions/filters/network/kafka/broker/integration_test/BUILD +++ b/test/extensions/filters/network/kafka/broker/integration_test/BUILD @@ -1,10 +1,11 @@ -licenses(["notice"]) # Apache 2 - +load("@rules_python//python:defs.bzl", "py_test") load( "//bazel:envoy_build_system.bzl", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() py_test( diff --git a/test/extensions/filters/network/kafka/kafka_request_parser_test.cc b/test/extensions/filters/network/kafka/kafka_request_parser_test.cc index 42bd15436ac31..cb20b878098f6 100644 --- a/test/extensions/filters/network/kafka/kafka_request_parser_test.cc +++ b/test/extensions/filters/network/kafka/kafka_request_parser_test.cc @@ -2,7 +2,6 @@ #include "test/extensions/filters/network/kafka/buffer_based_test.h" #include "test/extensions/filters/network/kafka/serialization_utilities.h" -#include "test/mocks/server/mocks.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/network/kafka/kafka_response_parser_test.cc b/test/extensions/filters/network/kafka/kafka_response_parser_test.cc index 4028423635c47..b905fc326f449 100644 --- a/test/extensions/filters/network/kafka/kafka_response_parser_test.cc +++ b/test/extensions/filters/network/kafka/kafka_response_parser_test.cc @@ -2,7 +2,6 @@ #include "test/extensions/filters/network/kafka/buffer_based_test.h" #include "test/extensions/filters/network/kafka/serialization_utilities.h" -#include "test/mocks/server/mocks.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/network/kafka/protocol/request_codec_request_test_cc.j2 b/test/extensions/filters/network/kafka/protocol/request_codec_request_test_cc.j2 index aa03d24ea8423..4c29ff3730766 100644 --- a/test/extensions/filters/network/kafka/protocol/request_codec_request_test_cc.j2 +++ b/test/extensions/filters/network/kafka/protocol/request_codec_request_test_cc.j2 @@ -14,7 +14,6 @@ #include "test/extensions/filters/network/kafka/buffer_based_test.h" #include "test/extensions/filters/network/kafka/serialization_utilities.h" -#include "test/mocks/server/mocks.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/kafka/protocol/requests_test_cc.j2 b/test/extensions/filters/network/kafka/protocol/requests_test_cc.j2 index b6af59d8962ce..ec96a4d90047c 100644 --- a/test/extensions/filters/network/kafka/protocol/requests_test_cc.j2 +++ b/test/extensions/filters/network/kafka/protocol/requests_test_cc.j2 @@ -7,7 +7,6 @@ #include "extensions/filters/network/kafka/request_codec.h" #include "test/extensions/filters/network/kafka/buffer_based_test.h" -#include "test/mocks/server/mocks.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/kafka/protocol/response_codec_response_test_cc.j2 b/test/extensions/filters/network/kafka/protocol/response_codec_response_test_cc.j2 index 060d1c64f3709..f366452cf19f6 100644 --- a/test/extensions/filters/network/kafka/protocol/response_codec_response_test_cc.j2 +++ b/test/extensions/filters/network/kafka/protocol/response_codec_response_test_cc.j2 @@ -14,7 +14,6 @@ #include "test/extensions/filters/network/kafka/buffer_based_test.h" #include "test/extensions/filters/network/kafka/serialization_utilities.h" -#include "test/mocks/server/mocks.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/kafka/protocol/responses_test_cc.j2 b/test/extensions/filters/network/kafka/protocol/responses_test_cc.j2 index f12a21f846c7f..84fff592eb344 100644 --- a/test/extensions/filters/network/kafka/protocol/responses_test_cc.j2 +++ b/test/extensions/filters/network/kafka/protocol/responses_test_cc.j2 @@ -7,7 +7,6 @@ #include "extensions/filters/network/kafka/response_codec.h" #include "test/extensions/filters/network/kafka/buffer_based_test.h" -#include "test/mocks/server/mocks.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/kafka/request_codec_integration_test.cc b/test/extensions/filters/network/kafka/request_codec_integration_test.cc index 69e77a7f8d2b9..8a7ae9b7a7a36 100644 --- a/test/extensions/filters/network/kafka/request_codec_integration_test.cc +++ b/test/extensions/filters/network/kafka/request_codec_integration_test.cc @@ -2,7 +2,6 @@ #include "test/extensions/filters/network/kafka/buffer_based_test.h" #include "test/extensions/filters/network/kafka/serialization_utilities.h" -#include "test/mocks/server/mocks.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/kafka/request_codec_unit_test.cc b/test/extensions/filters/network/kafka/request_codec_unit_test.cc index eec27d7152150..9d9c0734262f5 100644 --- a/test/extensions/filters/network/kafka/request_codec_unit_test.cc +++ b/test/extensions/filters/network/kafka/request_codec_unit_test.cc @@ -1,7 +1,6 @@ #include "extensions/filters/network/kafka/request_codec.h" #include "test/extensions/filters/network/kafka/buffer_based_test.h" -#include "test/mocks/server/mocks.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/kafka/response_codec_integration_test.cc b/test/extensions/filters/network/kafka/response_codec_integration_test.cc index 08be46a156454..287ba9f1855a7 100644 --- a/test/extensions/filters/network/kafka/response_codec_integration_test.cc +++ b/test/extensions/filters/network/kafka/response_codec_integration_test.cc @@ -2,7 +2,6 @@ #include "test/extensions/filters/network/kafka/buffer_based_test.h" #include "test/extensions/filters/network/kafka/serialization_utilities.h" -#include "test/mocks/server/mocks.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/kafka/response_codec_unit_test.cc b/test/extensions/filters/network/kafka/response_codec_unit_test.cc index 0327b1ef738ca..07dc116aab068 100644 --- a/test/extensions/filters/network/kafka/response_codec_unit_test.cc +++ b/test/extensions/filters/network/kafka/response_codec_unit_test.cc @@ -1,7 +1,6 @@ #include "extensions/filters/network/kafka/response_codec.h" #include "test/extensions/filters/network/kafka/buffer_based_test.h" -#include "test/mocks/server/mocks.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/kafka/serialization_test.cc b/test/extensions/filters/network/kafka/serialization_test.cc index 3d8a5af14e53b..2cc656926d645 100644 --- a/test/extensions/filters/network/kafka/serialization_test.cc +++ b/test/extensions/filters/network/kafka/serialization_test.cc @@ -323,7 +323,8 @@ TEST(NullableBytesDeserializer, ShouldDeserialize) { } TEST(NullableBytesDeserializer, ShouldDeserializeEmptyBytes) { - const NullableBytes value{{}}; + // gcc refuses to initialize optional with empty vector with value{{}} + const NullableBytes value = {{}}; serializeThenDeserializeAndCheckEquality(value); } diff --git a/test/extensions/filters/network/local_ratelimit/BUILD b/test/extensions/filters/network/local_ratelimit/BUILD index 854bfae8c8bcc..ee8bcec55d8b7 100644 --- a/test/extensions/filters/network/local_ratelimit/BUILD +++ b/test/extensions/filters/network/local_ratelimit/BUILD @@ -1,14 +1,16 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", + "envoy_cc_fuzz_test", "envoy_package", + "envoy_proto_library", ) load( "//test/extensions:extensions_build_system.bzl", "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -28,9 +30,34 @@ envoy_extension_cc_test( name = "local_ratelimit_integration_test", srcs = ["local_ratelimit_integration_test.cc"], extension_name = "envoy.filters.network.local_ratelimit", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/network/local_ratelimit:config", "//source/extensions/filters/network/tcp_proxy:config", "//test/integration:integration_lib", ], ) + +envoy_proto_library( + name = "local_ratelimit_fuzz_proto", + srcs = ["local_ratelimit_fuzz.proto"], + deps = [ + "@envoy_api//envoy/extensions/filters/network/local_ratelimit/v3:pkg", + ], +) + +envoy_cc_fuzz_test( + name = "local_ratelimit_fuzz_test", + srcs = ["local_ratelimit_fuzz_test.cc"], + corpus = "local_ratelimit_corpus", + deps = [ + ":local_ratelimit_fuzz_proto_cc_proto", + "//source/common/buffer:buffer_lib", + "//source/extensions/filters/network/local_ratelimit:local_ratelimit_lib", + "//test/fuzz:utility_lib", + "//test/mocks/event:event_mocks", + "//test/mocks/network:network_mocks", + "//test/mocks/runtime:runtime_mocks", + "@envoy_api//envoy/extensions/filters/network/local_ratelimit/v3:pkg_cc_proto", + ], +) diff --git a/test/extensions/filters/network/local_ratelimit/local_ratelimit_corpus/basic_test_case b/test/extensions/filters/network/local_ratelimit/local_ratelimit_corpus/basic_test_case new file mode 100644 index 0000000000000..282b7dc8fd0ca --- /dev/null +++ b/test/extensions/filters/network/local_ratelimit/local_ratelimit_corpus/basic_test_case @@ -0,0 +1,39 @@ +config{ + stat_prefix: "local_rate_limit_stats" + token_bucket:{ + max_tokens: 1 + fill_interval{ + seconds: 1 + } + } + runtime_enabled:{ + default_value: { + value: true + } + runtime_key: "foo_key" + } + +} +actions { + on_new_connection { + } +} +actions { + on_data { + data: "\000\000" + } +} +actions { + on_data { + data: "\000\000" + } +} +actions { + on_new_connection { + } +} +actions { + on_data { + data: "\000\000" + } +} \ No newline at end of file diff --git a/test/extensions/filters/network/local_ratelimit/local_ratelimit_fuzz.proto b/test/extensions/filters/network/local_ratelimit/local_ratelimit_fuzz.proto new file mode 100644 index 0000000000000..d6b2896d06c94 --- /dev/null +++ b/test/extensions/filters/network/local_ratelimit/local_ratelimit_fuzz.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; +package envoy.extensions.filters.network.local_ratelimit; + +import "envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto"; +import "google/protobuf/empty.proto"; +import "validate/validate.proto"; + +message OnData { + bytes data = 1; + bool end_stream = 2; +} + +message Action { + oneof action_selector { + option (validate.required) = true; + // Call onNewConnection(). + google.protobuf.Empty on_new_connection = 1; + // Call onData(). + OnData on_data = 2; + // Timer ends and refill the bucket. + google.protobuf.Empty refill = 3; + } +} +message LocalRateLimitTestCase { + envoy.extensions.filters.network.local_ratelimit.v3.LocalRateLimit config = 1 + [(validate.rules).message = {required: true}]; + repeated Action actions = 2; +} diff --git a/test/extensions/filters/network/local_ratelimit/local_ratelimit_fuzz_test.cc b/test/extensions/filters/network/local_ratelimit/local_ratelimit_fuzz_test.cc new file mode 100644 index 0000000000000..5e597bb552ade --- /dev/null +++ b/test/extensions/filters/network/local_ratelimit/local_ratelimit_fuzz_test.cc @@ -0,0 +1,102 @@ +#include "envoy/common/exception.h" +#include "envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.pb.h" + +#include "common/buffer/buffer_impl.h" +#include "common/stats/isolated_store_impl.h" + +#include "extensions/filters/network/local_ratelimit/local_ratelimit.h" + +#include "test/extensions/filters/network/local_ratelimit/local_ratelimit_fuzz.pb.validate.h" +#include "test/fuzz/fuzz_runner.h" +#include "test/mocks/event/mocks.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/runtime/mocks.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::NiceMock; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace LocalRateLimitFilter { +struct ActiveFilter { + ActiveFilter(const ConfigSharedPtr& config) : filter_(config) { + filter_.initializeReadFilterCallbacks(read_filter_callbacks_); + } + + NiceMock read_filter_callbacks_; + Filter filter_; +}; + +DEFINE_PROTO_FUZZER( + const envoy::extensions::filters::network::local_ratelimit::LocalRateLimitTestCase& input) { + + try { + TestUtility::validate(input); + } catch (const ProtoValidationException& e) { + ENVOY_LOG_MISC(debug, "ProtoValidationException: {}", e.what()); + return; + } catch (const ProtobufMessage::DeprecatedProtoFieldException& e) { + ENVOY_LOG_MISC(debug, "DeprecatedProtoFieldException: {}", e.what()); + return; + } + if (input.config().token_bucket().fill_interval().nanos() < 0) { + // TODO: + // protoc-gen-validate has an issue on type "Duration" which may generate interval with seconds + // > 0 while "nanos" < 0. And negative "nanos" will cause validation inside the filter to fail. + // see https://github.com/envoyproxy/protoc-gen-validate/issues/348 for detail. + ENVOY_LOG_MISC(debug, "In fill_interval, nanos should not be negative!"); + return; + } + static NiceMock dispatcher; + Stats::IsolatedStoreImpl stats_store; + static NiceMock runtime; + Event::MockTimer* fill_timer = new Event::MockTimer(&dispatcher); + envoy::extensions::filters::network::local_ratelimit::v3::LocalRateLimit proto_config = + input.config(); + ConfigSharedPtr config = nullptr; + try { + config = std::make_shared(proto_config, dispatcher, stats_store, runtime); + } catch (EnvoyException& e) { + ENVOY_LOG_MISC(debug, "EnvoyException in config's constructor: {}", e.what()); + return; + } + + ActiveFilter active_filter(config); + std::chrono::milliseconds fill_interval( + PROTOBUF_GET_MS_REQUIRED(proto_config.token_bucket(), fill_interval)); + + for (const auto& action : input.actions()) { + ENVOY_LOG_MISC(trace, "action {}", action.DebugString()); + + switch (action.action_selector_case()) { + case envoy::extensions::filters::network::local_ratelimit::Action::kOnData: { + Buffer::OwnedImpl buffer(action.on_data().data()); + active_filter.filter_.onData(buffer, action.on_data().end_stream()); + break; + } + case envoy::extensions::filters::network::local_ratelimit::Action::kOnNewConnection: { + active_filter.filter_.onNewConnection(); + break; + } + case envoy::extensions::filters::network::local_ratelimit::Action::kRefill: { + EXPECT_CALL(*fill_timer, enableTimer(fill_interval, nullptr)); + fill_timer->invokeCallback(); + break; + } + default: + // Unhandled actions + PANIC("A case is missing for an action"); + } + } +} // NOLINT(clang-analyzer-cplusplus.NewDeleteLeaks) + // Silence clang-tidy here because it thinks there is a memory leak for "fill_timer" + // However, ownership of each MockTimer instance is transferred to the (caller of) dispatcher's + // createTimer_(), so to avoid destructing it twice, the MockTimer must have been dynamically + // allocated and must not be deleted by it's creator. See test/mocks/event/mocks.cc for detail. +} // namespace LocalRateLimitFilter +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/local_ratelimit/local_ratelimit_integration_test.cc b/test/extensions/filters/network/local_ratelimit/local_ratelimit_integration_test.cc index bb1fb3be37a5d..63b684f49d860 100644 --- a/test/extensions/filters/network/local_ratelimit/local_ratelimit_integration_test.cc +++ b/test/extensions/filters/network/local_ratelimit/local_ratelimit_integration_test.cc @@ -10,11 +10,6 @@ class LocalRateLimitIntegrationTest : public Event::TestUsingSimulatedTime, LocalRateLimitIntegrationTest() : BaseIntegrationTest(GetParam(), ConfigHelper::tcpProxyConfig()) {} - ~LocalRateLimitIntegrationTest() override { - test_server_.reset(); - fake_upstreams_.clear(); - } - void setup(const std::string& filter_yaml) { config_helper_.addNetworkFilter(filter_yaml); BaseIntegrationTest::initialize(); @@ -40,7 +35,7 @@ name: ratelimit IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); - tcp_client->write("hello"); + ASSERT_TRUE(tcp_client->write("hello")); ASSERT_TRUE(fake_upstream_connection->waitForData(5)); ASSERT_TRUE(fake_upstream_connection->write("world")); tcp_client->waitForData("world"); diff --git a/test/extensions/filters/network/mongo_proxy/BUILD b/test/extensions/filters/network/mongo_proxy/BUILD index aa836af608c96..b8ed6e190b3bb 100644 --- a/test/extensions/filters/network/mongo_proxy/BUILD +++ b/test/extensions/filters/network/mongo_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -71,7 +71,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.network.mongo_proxy", deps = [ "//source/extensions/filters/network/mongo_proxy:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/network/mongo_proxy/v3:pkg_cc_proto", "@envoy_api//envoy/type/v3:pkg_cc_proto", diff --git a/test/extensions/filters/network/mongo_proxy/config_test.cc b/test/extensions/filters/network/mongo_proxy/config_test.cc index bab4fcc446619..35f46fad6cbc1 100644 --- a/test/extensions/filters/network/mongo_proxy/config_test.cc +++ b/test/extensions/filters/network/mongo_proxy/config_test.cc @@ -6,7 +6,7 @@ #include "extensions/filters/network/mongo_proxy/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/network/mongo_proxy/utility_test.cc b/test/extensions/filters/network/mongo_proxy/utility_test.cc index cdffb8608dc9b..ad28e35e9cc13 100644 --- a/test/extensions/filters/network/mongo_proxy/utility_test.cc +++ b/test/extensions/filters/network/mongo_proxy/utility_test.cc @@ -137,7 +137,7 @@ TEST(QueryMessageInfoTest, MaxTime) { q.fullCollectionName("db.foo"); q.query(Bson::DocumentImpl::create()); QueryMessageInfo info(q); - EXPECT_EQ(0, info.max_time()); + EXPECT_EQ(0, info.maxTime()); } { @@ -145,7 +145,7 @@ TEST(QueryMessageInfoTest, MaxTime) { q.fullCollectionName("db.foo"); q.query(Bson::DocumentImpl::create()->addInt32("$maxTimeMS", 1212)); QueryMessageInfo info(q); - EXPECT_EQ(1212, info.max_time()); + EXPECT_EQ(1212, info.maxTime()); } { @@ -153,7 +153,7 @@ TEST(QueryMessageInfoTest, MaxTime) { q.fullCollectionName("db.foo"); q.query(Bson::DocumentImpl::create()->addInt64("$maxTimeMS", 1212)); QueryMessageInfo info(q); - EXPECT_EQ(1212, info.max_time()); + EXPECT_EQ(1212, info.maxTime()); } { @@ -161,7 +161,7 @@ TEST(QueryMessageInfoTest, MaxTime) { q.fullCollectionName("db.foo"); q.query(Bson::DocumentImpl::create()->addInt64("maxTimeMS", 2400)); QueryMessageInfo info(q); - EXPECT_EQ(2400, info.max_time()); + EXPECT_EQ(2400, info.maxTime()); } } diff --git a/test/extensions/filters/network/mysql_proxy/BUILD b/test/extensions/filters/network/mysql_proxy/BUILD index afdb5f84a2c5f..17e3c8c204d89 100644 --- a/test/extensions/filters/network/mysql_proxy/BUILD +++ b/test/extensions/filters/network/mysql_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -10,6 +8,8 @@ load( "envoy_extension_cc_test_library", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test_library( @@ -40,6 +40,7 @@ envoy_extension_cc_test( "mysql_filter_test.cc", ], extension_name = "envoy.filters.network.mysql_proxy", + tags = ["fails_on_windows"], deps = [ ":mysql_test_utils_lib", "//source/extensions/filters/network/mysql_proxy:config", @@ -56,6 +57,7 @@ envoy_extension_cc_test( "mysql_test_config.yaml", ], extension_name = "envoy.filters.network.mysql_proxy", + tags = ["fails_on_windows"], deps = [ ":mysql_test_utils_lib", "//source/common/tcp_proxy", diff --git a/test/extensions/filters/network/mysql_proxy/mysql_command_test.cc b/test/extensions/filters/network/mysql_proxy/mysql_command_test.cc index cce430facd773..55cacdc2ab532 100644 --- a/test/extensions/filters/network/mysql_proxy/mysql_command_test.cc +++ b/test/extensions/filters/network/mysql_proxy/mysql_command_test.cc @@ -203,7 +203,7 @@ class MySQLCommandTest : public testing::Test, public MySQLTestUtils { EXPECT_EQ(1UL, result.size()); EXPECT_EQ(statement_type, result.getStatement(0)->type()); hsql::TableAccessMap table_access_map; - if (expected_table_access_map.empty()) { + if (expected_table_access_map.empty() && (statement_type == hsql::StatementType::kStmtShow)) { return; } result.getStatement(0)->tablesAccessed(table_access_map); @@ -454,7 +454,8 @@ TEST_F(MySQLCommandTest, MySQLTest20) { std::string command = buildAlter(TestResource::TABLE, table, "add column Id varchar (20)"); hsql::SQLParserResult result; EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result)); - expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtAlter, {}); + expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtAlter, + {{table, {"alter"}}}); } /* diff --git a/test/extensions/filters/network/mysql_proxy/mysql_integration_test.cc b/test/extensions/filters/network/mysql_proxy/mysql_integration_test.cc index d970fdae5ddd7..6bbb5bacab3e6 100644 --- a/test/extensions/filters/network/mysql_proxy/mysql_integration_test.cc +++ b/test/extensions/filters/network/mysql_proxy/mysql_integration_test.cc @@ -27,6 +27,7 @@ class MySQLIntegrationTest : public testing::TestWithParamwrite(login); + ASSERT_TRUE(tcp_client->write(login)); ASSERT_TRUE(fake_upstream_connection->waitForData(login.length(), &rcvd_data)); EXPECT_EQ(login, rcvd_data); @@ -137,7 +131,7 @@ TEST_P(MySQLIntegrationTest, MySQLUnitTestMultiClientsLoop) { // Client username/password and capabilities std::string login = encodeClientLogin(MYSQL_CLIENT_CAPAB_41VS320, user, CHALLENGE_SEQ_NUM); - tcp_client->write(login); + ASSERT_TRUE(tcp_client->write(login)); ASSERT_TRUE(fake_upstream_connection->waitForData(login.length(), &rcvd_data)); EXPECT_EQ(login, rcvd_data); diff --git a/test/extensions/filters/network/mysql_proxy/mysql_test_config.yaml b/test/extensions/filters/network/mysql_proxy/mysql_test_config.yaml index 9fa20096e1633..d65f3e3aea423 100644 --- a/test/extensions/filters/network/mysql_proxy/mysql_test_config.yaml +++ b/test/extensions/filters/network/mysql_proxy/mysql_test_config.yaml @@ -1,5 +1,5 @@ admin: - access_log_path: /dev/null + access_log_path: {} address: socket_address: address: "{}" diff --git a/test/extensions/filters/network/postgres_proxy/BUILD b/test/extensions/filters/network/postgres_proxy/BUILD index f319540a50e9c..0e58a294742b2 100644 --- a/test/extensions/filters/network/postgres_proxy/BUILD +++ b/test/extensions/filters/network/postgres_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -10,6 +8,8 @@ load( "envoy_extension_cc_test_library", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test_library( @@ -57,6 +57,7 @@ envoy_extension_cc_test( "postgres_test_config.yaml", ], extension_name = "envoy.filters.network.postgres_proxy", + tags = ["fails_on_windows"], deps = [ "//source/common/tcp_proxy", "//source/extensions/filters/network/postgres_proxy:config", diff --git a/test/extensions/filters/network/postgres_proxy/postgres_decoder_test.cc b/test/extensions/filters/network/postgres_proxy/postgres_decoder_test.cc index 0714e5fd99749..aa2d9ff2c7b7b 100644 --- a/test/extensions/filters/network/postgres_proxy/postgres_decoder_test.cc +++ b/test/extensions/filters/network/postgres_proxy/postgres_decoder_test.cc @@ -23,6 +23,7 @@ class DecoderCallbacksMock : public DecoderCallbacks { MOCK_METHOD(void, incTransactionsRollback, (), (override)); MOCK_METHOD(void, incNotices, (NoticeType), (override)); MOCK_METHOD(void, incErrors, (ErrorType), (override)); + MOCK_METHOD(void, processQuery, (const std::string&), (override)); }; // Define fixture class with decoder and mock callbacks. @@ -40,7 +41,7 @@ class PostgresProxyDecoderTestBase { // fields often used Buffer::OwnedImpl data_; - char buf_[256]; + char buf_[256]{}; std::string payload_; }; @@ -75,12 +76,36 @@ class PostgresProxyNoticeTest TEST_F(PostgresProxyDecoderTest, StartupMessage) { decoder_->setStartup(true); - // Start with length. - data_.writeBEInt(12); - // Add 8 bytes of some data. - data_.add(buf_, 8); + buf_[0] = '\0'; + // Startup message has the following structure: + // Length (4 bytes) - payload and length field + // version (4 bytes) + // Attributes: key/value pairs separated by '\0' + data_.writeBEInt(53); + // Add version code + data_.writeBEInt(0x00030000); + // user-postgres key-pair + data_.add("user"); // 4 bytes + data_.add(buf_, 1); + data_.add("postgres"); // 8 bytes + data_.add(buf_, 1); + // database-test-db key-pair + data_.add("database"); // 8 bytes + data_.add(buf_, 1); + data_.add("testdb"); // 6 bytes + data_.add(buf_, 1); + // Some other attribute + data_.add("attribute"); // 9 bytes + data_.add(buf_, 1); + data_.add("blah"); // 4 bytes + data_.add(buf_, 1); decoder_->onData(data_, true); ASSERT_THAT(data_.length(), 0); + // Verify parsing attributes + ASSERT_THAT(decoder_->getAttributes().at("user"), "postgres"); + ASSERT_THAT(decoder_->getAttributes().at("database"), "testdb"); + // This attribute should not be found + ASSERT_THAT(decoder_->getAttributes().find("no"), decoder_->getAttributes().end()); // Now feed normal message with 1bytes as command. data_.add("P"); @@ -91,6 +116,40 @@ TEST_F(PostgresProxyDecoderTest, StartupMessage) { ASSERT_THAT(data_.length(), 0); } +// Test verifies that when Startup message does not carry +// "database" attribute, it is derived from "user". +TEST_F(PostgresProxyDecoderTest, StartupMessageNoAttr) { + decoder_->setStartup(true); + + buf_[0] = '\0'; + // Startup message has the following structure: + // Length (4 bytes) - payload and length field + // version (4 bytes) + // Attributes: key/value pairs separated by '\0' + data_.writeBEInt(37); + // Add version code + data_.writeBEInt(0x00030000); + // user-postgres key-pair + data_.add("user"); // 4 bytes + data_.add(buf_, 1); + data_.add("postgres"); // 8 bytes + data_.add(buf_, 1); + // database-test-db key-pair + // Some other attribute + data_.add("attribute"); // 9 bytes + data_.add(buf_, 1); + data_.add("blah"); // 4 bytes + data_.add(buf_, 1); + decoder_->onData(data_, true); + ASSERT_THAT(data_.length(), 0); + + // Verify parsing attributes + ASSERT_THAT(decoder_->getAttributes().at("user"), "postgres"); + ASSERT_THAT(decoder_->getAttributes().at("database"), "postgres"); + // This attribute should not be found + ASSERT_THAT(decoder_->getAttributes().find("no"), decoder_->getAttributes().end()); +} + // Test processing messages which map 1:1 with buffer. // The buffer contains just a single entire message and // nothing more. @@ -181,7 +240,7 @@ TEST_F(PostgresProxyDecoderTest, Unknown) { // Test if each frontend command calls incMessagesFrontend() method. TEST_P(PostgresProxyFrontendDecoderTest, FrontendInc) { EXPECT_CALL(callbacks_, incMessagesFrontend()).Times(1); - createPostgresMsg(data_, GetParam(), "Some message just to create payload"); + createPostgresMsg(data_, GetParam(), "SELECT 1;"); decoder_->onData(data_, true); } @@ -206,6 +265,43 @@ TEST_F(PostgresProxyFrontendDecoderTest, TerminateMessage) { ASSERT_FALSE(decoder_->getSession().inTransaction()); } +// Query message should invoke filter's callback message +TEST_F(PostgresProxyFrontendDecoderTest, QueryMessage) { + EXPECT_CALL(callbacks_, processQuery).Times(1); + createPostgresMsg(data_, "Q", "SELECT * FROM whatever;"); + decoder_->onData(data_, true); +} + +// Parse message has optional Query name which may be in front of actual +// query statement. This test verifies that both formats are processed +// correctly. +TEST_F(PostgresProxyFrontendDecoderTest, ParseMessage) { + std::string query = "SELECT * FROM whatever;"; + std::string query_name, query_params; + + // Should be called twice with the same query. + EXPECT_CALL(callbacks_, processQuery(query)).Times(2); + + // Set params to be zero. + query_params.reserve(2); + query_params += '\0'; + query_params += '\0'; + + // Message without optional query name. + query_name.reserve(1); + query_name += '\0'; + createPostgresMsg(data_, "P", query_name + query + query_params); + decoder_->onData(data_, true); + + // Message with optional name query_name + query_name.clear(); + query_name.reserve(5); + query_name += "P0_8"; + query_name += '\0'; + createPostgresMsg(data_, "P", query_name + query + query_params); + decoder_->onData(data_, true); +} + // Test if each backend command calls incMessagesBackend()) method. TEST_P(PostgresProxyBackendDecoderTest, BackendInc) { EXPECT_CALL(callbacks_, incMessagesBackend()).Times(1); diff --git a/test/extensions/filters/network/postgres_proxy/postgres_filter_test.cc b/test/extensions/filters/network/postgres_proxy/postgres_filter_test.cc index c44d32bf94c32..5536189b84fae 100644 --- a/test/extensions/filters/network/postgres_proxy/postgres_filter_test.cc +++ b/test/extensions/filters/network/postgres_proxy/postgres_filter_test.cc @@ -4,6 +4,7 @@ #include #include "extensions/filters/network/postgres_proxy/postgres_filter.h" +#include "extensions/filters/network/well_known_names.h" #include "test/extensions/filters/network/postgres_proxy/postgres_test_utils.h" #include "test/mocks/network/mocks.h" @@ -13,6 +14,7 @@ namespace Extensions { namespace NetworkFilters { namespace PostgresProxy { +using testing::ReturnRef; using ::testing::WithArgs; // Decoder mock. @@ -29,17 +31,30 @@ class PostgresFilterTest std::function>> { public: PostgresFilterTest() { - config_ = std::make_shared(stat_prefix_, scope_); + config_ = std::make_shared(stat_prefix_, true, scope_); filter_ = std::make_unique(config_); filter_->initializeReadFilterCallbacks(filter_callbacks_); } + void setMetadata() { + EXPECT_CALL(filter_callbacks_, connection()).WillRepeatedly(ReturnRef(connection_)); + EXPECT_CALL(connection_, streamInfo()).WillRepeatedly(ReturnRef(stream_info_)); + ON_CALL(stream_info_, setDynamicMetadata(NetworkFilterNames::get().PostgresProxy, _)) + .WillByDefault(Invoke([this](const std::string&, const ProtobufWkt::Struct& obj) { + stream_info_.metadata_.mutable_filter_metadata()->insert( + Protobuf::MapPair( + NetworkFilterNames::get().PostgresProxy, obj)); + })); + } + Stats::IsolatedStoreImpl scope_; std::string stat_prefix_{"test."}; std::unique_ptr filter_; PostgresFilterConfigSharedPtr config_; NiceMock filter_callbacks_; + NiceMock connection_; + NiceMock stream_info_; // These variables are used internally in tests. Buffer::OwnedImpl data_; @@ -238,6 +253,60 @@ TEST_F(PostgresFilterTest, EncryptedSessionStats) { ASSERT_THAT(filter_->getStats().sessions_encrypted_.value(), 1); } +// Test verifies that incorrect SQL statement does not create +// Postgres metadata. +TEST_F(PostgresFilterTest, MetadataIncorrectSQL) { + // Pretend that startup message has been received. + static_cast(filter_->getDecoder())->setStartup(false); + setMetadata(); + + createPostgresMsg(data_, "Q", "BLAH blah blah"); + filter_->onData(data_, false); + + // SQL statement was wrong. No metadata should have been created. + ASSERT_THAT(filter_->connection().streamInfo().dynamicMetadata().filter_metadata().contains( + NetworkFilterNames::get().PostgresProxy), + false); + ASSERT_THAT(filter_->getStats().statements_parse_error_.value(), 1); + ASSERT_THAT(filter_->getStats().statements_parsed_.value(), 0); +} + +// Test verifies that Postgres metadata is created for correct SQL statement. +// and it happens only when parse_sql flag is true. +TEST_F(PostgresFilterTest, QueryMessageMetadata) { + // Pretend that startup message has been received. + static_cast(filter_->getDecoder())->setStartup(false); + setMetadata(); + + // Disable creating parsing SQL and creating metadata. + filter_->getConfig()->enable_sql_parsing_ = false; + createPostgresMsg(data_, "Q", "SELECT * FROM whatever"); + filter_->onData(data_, false); + + ASSERT_THAT(filter_->connection().streamInfo().dynamicMetadata().filter_metadata().contains( + NetworkFilterNames::get().PostgresProxy), + false); + ASSERT_THAT(filter_->getStats().statements_parse_error_.value(), 0); + ASSERT_THAT(filter_->getStats().statements_parsed_.value(), 0); + + // Now enable SQL parsing and creating metadata. + filter_->getConfig()->enable_sql_parsing_ = true; + filter_->onData(data_, false); + + auto& filter_meta = filter_->connection().streamInfo().dynamicMetadata().filter_metadata().at( + NetworkFilterNames::get().PostgresProxy); + auto& fields = filter_meta.fields(); + + ASSERT_THAT(fields.size(), 1); + ASSERT_THAT(fields.contains("whatever"), true); + + const auto& operations = fields.at("whatever").list_value(); + ASSERT_EQ("select", operations.values(0).string_value()); + + ASSERT_THAT(filter_->getStats().statements_parse_error_.value(), 0); + ASSERT_THAT(filter_->getStats().statements_parsed_.value(), 1); +} + } // namespace PostgresProxy } // namespace NetworkFilters } // namespace Extensions diff --git a/test/extensions/filters/network/postgres_proxy/postgres_integration_test.cc b/test/extensions/filters/network/postgres_proxy/postgres_integration_test.cc index 02229fc1ea1e8..cd96f73d61af5 100644 --- a/test/extensions/filters/network/postgres_proxy/postgres_integration_test.cc +++ b/test/extensions/filters/network/postgres_proxy/postgres_integration_test.cc @@ -1,5 +1,3 @@ -#include - #include "test/integration/fake_upstream.h" #include "test/integration/integration.h" #include "test/integration/utility.h" @@ -21,7 +19,7 @@ class PostgresIntegrationTest : public testing::TestWithParam(12); // Add 8 bytes of some data. data.add(buf, 8); - tcp_client->write(data.toString()); + ASSERT_TRUE(tcp_client->write(data.toString())); ASSERT_TRUE(fake_upstream_connection->waitForData(data.toString().length(), &rcvd)); data.drain(data.length()); diff --git a/test/extensions/filters/network/postgres_proxy/postgres_test_config.yaml b/test/extensions/filters/network/postgres_proxy/postgres_test_config.yaml index 16202b9027a29..6c2877ebc56c1 100644 --- a/test/extensions/filters/network/postgres_proxy/postgres_test_config.yaml +++ b/test/extensions/filters/network/postgres_proxy/postgres_test_config.yaml @@ -1,5 +1,5 @@ admin: - access_log_path: /dev/null + access_log_path: {} address: socket_address: address: "{}" diff --git a/test/extensions/filters/network/ratelimit/BUILD b/test/extensions/filters/network/ratelimit/BUILD index c1905934b4b0c..99a0f31eb85b8 100644 --- a/test/extensions/filters/network/ratelimit/BUILD +++ b/test/extensions/filters/network/ratelimit/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -35,7 +35,8 @@ envoy_extension_cc_test( extension_name = "envoy.filters.network.ratelimit", deps = [ "//source/extensions/filters/network/ratelimit:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/ratelimit/v3:pkg_cc_proto", diff --git a/test/extensions/filters/network/ratelimit/config_test.cc b/test/extensions/filters/network/ratelimit/config_test.cc index 9920c9dbfe1e3..ab6b01d753c89 100644 --- a/test/extensions/filters/network/ratelimit/config_test.cc +++ b/test/extensions/filters/network/ratelimit/config_test.cc @@ -4,7 +4,8 @@ #include "extensions/filters/network/ratelimit/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/server/instance.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -75,12 +76,12 @@ domain: fake_domain - entries: - key: my_key value: my_value -ip_white_list: '12' +ip_allowlist: '12' )EOF"; envoy::extensions::filters::network::ratelimit::v3::RateLimit proto_config; EXPECT_THROW_WITH_REGEX(TestUtility::loadFromYaml(yaml_string, proto_config), EnvoyException, - "ip_white_list: Cannot find field"); + "ip_allowlist: Cannot find field"); } // Test that the deprecated extension name still functions. diff --git a/test/extensions/filters/network/ratelimit/ratelimit_test.cc b/test/extensions/filters/network/ratelimit/ratelimit_test.cc index e77f6080cba8d..ac64a1d6d1087 100644 --- a/test/extensions/filters/network/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/network/ratelimit/ratelimit_test.cc @@ -40,7 +40,7 @@ class RateLimitFilterTest : public testing::Test { .WillByDefault(Return(true)); envoy::extensions::filters::network::ratelimit::v3::RateLimit proto_config{}; - TestUtility::loadFromYaml(yaml, proto_config); + TestUtility::loadFromYaml(yaml, proto_config, false, true); config_ = std::make_shared(proto_config, stats_store_, runtime_); client_ = new Filters::Common::RateLimit::MockClient(); filter_ = std::make_unique(config_, Filters::Common::RateLimit::ClientPtr{client_}); @@ -114,7 +114,8 @@ TEST_F(RateLimitFilterTest, OK) { EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false)); EXPECT_CALL(filter_callbacks_, continueReading()); - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, + nullptr); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); @@ -141,7 +142,7 @@ TEST_F(RateLimitFilterTest, OverLimit) { EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush)); EXPECT_CALL(*client_, cancel()).Times(0); - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, nullptr, nullptr); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); @@ -170,7 +171,7 @@ TEST_F(RateLimitFilterTest, OverLimitNotEnforcing) { EXPECT_CALL(filter_callbacks_.connection_, close(_)).Times(0); EXPECT_CALL(*client_, cancel()).Times(0); EXPECT_CALL(filter_callbacks_, continueReading()); - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, nullptr, nullptr); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); @@ -195,7 +196,8 @@ TEST_F(RateLimitFilterTest, Error) { EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false)); EXPECT_CALL(filter_callbacks_, continueReading()); - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr, + nullptr); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); @@ -235,7 +237,8 @@ TEST_F(RateLimitFilterTest, ImmediateOK) { EXPECT_CALL(*client_, limit(_, "foo", _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { - callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr); + callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, + nullptr); }))); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection()); @@ -258,7 +261,8 @@ TEST_F(RateLimitFilterTest, ImmediateError) { EXPECT_CALL(*client_, limit(_, "foo", _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { - callbacks.complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr); + callbacks.complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr, + nullptr); }))); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection()); @@ -300,7 +304,8 @@ TEST_F(RateLimitFilterTest, ErrorResponseWithFailureModeAllowOff) { EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection()); Buffer::OwnedImpl data("hello"); EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false)); - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr, + nullptr); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); diff --git a/test/extensions/filters/network/rbac/BUILD b/test/extensions/filters/network/rbac/BUILD index 9e2c4fec27fda..d4db04fee9725 100644 --- a/test/extensions/filters/network/rbac/BUILD +++ b/test/extensions/filters/network/rbac/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -17,7 +17,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.network.rbac", deps = [ "//source/extensions/filters/network/rbac:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/config/rbac/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/rbac/v3:pkg_cc_proto", ], @@ -41,6 +41,7 @@ envoy_extension_cc_test( name = "integration_test", srcs = ["integration_test.cc"], extension_name = "envoy.filters.network.rbac", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/network/echo:config", "//source/extensions/filters/network/rbac:config", diff --git a/test/extensions/filters/network/rbac/config_test.cc b/test/extensions/filters/network/rbac/config_test.cc index 06eb5b30a1824..ace7d65fb7b15 100644 --- a/test/extensions/filters/network/rbac/config_test.cc +++ b/test/extensions/filters/network/rbac/config_test.cc @@ -4,7 +4,7 @@ #include "extensions/filters/network/rbac/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "fmt/printf.h" #include "gmock/gmock.h" @@ -68,9 +68,8 @@ TEST_F(RoleBasedAccessControlNetworkFilterConfigFactoryTest, ValidProto) { TEST_F(RoleBasedAccessControlNetworkFilterConfigFactoryTest, EmptyProto) { RoleBasedAccessControlNetworkFilterConfigFactory factory; - auto* config = dynamic_cast( - factory.createEmptyConfigProto().get()); - EXPECT_NE(nullptr, config); + EXPECT_NE(nullptr, dynamic_cast( + factory.createEmptyConfigProto().get())); } TEST_F(RoleBasedAccessControlNetworkFilterConfigFactoryTest, InvalidPermission) { diff --git a/test/extensions/filters/network/rbac/filter_test.cc b/test/extensions/filters/network/rbac/filter_test.cc index 2e8fd2642da33..fe042854baa7a 100644 --- a/test/extensions/filters/network/rbac/filter_test.cc +++ b/test/extensions/filters/network/rbac/filter_test.cc @@ -22,8 +22,10 @@ namespace RBACFilter { class RoleBasedAccessControlNetworkFilterTest : public testing::Test { public: - RoleBasedAccessControlFilterConfigSharedPtr setupConfig(bool with_policy = true, - bool continuous = false) { + RoleBasedAccessControlFilterConfigSharedPtr + setupConfig(bool with_policy = true, bool continuous = false, + envoy::config::rbac::v3::RBAC::Action action = envoy::config::rbac::v3::RBAC::ALLOW) { + envoy::extensions::filters::network::rbac::v3::RBAC config; config.set_stat_prefix("tcp."); @@ -34,7 +36,7 @@ class RoleBasedAccessControlNetworkFilterTest : public testing::Test { ".*cncf.io"); policy_rules->add_rules()->set_destination_port(123); policy.add_principals()->set_any(true); - config.mutable_rules()->set_action(envoy::config::rbac::v3::RBAC::ALLOW); + config.mutable_rules()->set_action(action); (*config.mutable_rules()->mutable_policies())["foo"] = policy; envoy::config::rbac::v3::Policy shadow_policy; @@ -42,7 +44,7 @@ class RoleBasedAccessControlNetworkFilterTest : public testing::Test { shadow_policy_rules->add_rules()->mutable_requested_server_name()->set_exact("xyz.cncf.io"); shadow_policy_rules->add_rules()->set_destination_port(456); shadow_policy.add_principals()->set_any(true); - config.mutable_shadow_rules()->set_action(envoy::config::rbac::v3::RBAC::ALLOW); + config.mutable_shadow_rules()->set_action(action); (*config.mutable_shadow_rules()->mutable_policies())["bar"] = shadow_policy; } @@ -72,6 +74,15 @@ class RoleBasedAccessControlNetworkFilterTest : public testing::Test { .WillByDefault(Return(requested_server_name_)); } + void checkAccessLogMetadata(bool expected) { + auto filter_meta = stream_info_.dynamicMetadata().filter_metadata().at( + Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace); + EXPECT_EQ(expected, + filter_meta.fields() + .at(Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().AccessLogKey) + .bool_value()); + } + void setMetadata() { ON_CALL(stream_info_, setDynamicMetadata(NetworkFilterNames::get().Rbac, _)) .WillByDefault(Invoke([this](const std::string&, const ProtobufWkt::Struct& obj) { @@ -79,6 +90,15 @@ class RoleBasedAccessControlNetworkFilterTest : public testing::Test { Protobuf::MapPair(NetworkFilterNames::get().Rbac, obj)); })); + + ON_CALL(stream_info_, + setDynamicMetadata( + Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace, _)) + .WillByDefault(Invoke([this](const std::string&, const ProtobufWkt::Struct& obj) { + stream_info_.metadata_.mutable_filter_metadata()->insert( + Protobuf::MapPair( + Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace, obj)); + })); } NiceMock callbacks_; @@ -173,6 +193,49 @@ TEST_F(RoleBasedAccessControlNetworkFilterTest, Denied) { EXPECT_EQ("allowed", filter_meta.fields().at("shadow_engine_result").string_value()); } +// Log Tests +TEST_F(RoleBasedAccessControlNetworkFilterTest, ShouldLog) { + config_ = setupConfig(true, false, envoy::config::rbac::v3::RBAC::LOG); + filter_ = std::make_unique(config_); + filter_->initializeReadFilterCallbacks(callbacks_); + + setDestinationPort(123); + setMetadata(); + + EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data_, false)); + EXPECT_EQ(1U, config_->stats().allowed_.value()); + EXPECT_EQ(0U, config_->stats().shadow_denied_.value()); + + checkAccessLogMetadata(true); +} + +TEST_F(RoleBasedAccessControlNetworkFilterTest, ShouldNotLog) { + config_ = setupConfig(true, false, envoy::config::rbac::v3::RBAC::LOG); + filter_ = std::make_unique(config_); + filter_->initializeReadFilterCallbacks(callbacks_); + + setDestinationPort(456); + setMetadata(); + + EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data_, false)); + EXPECT_EQ(1U, config_->stats().allowed_.value()); + EXPECT_EQ(0U, config_->stats().shadow_denied_.value()); + + checkAccessLogMetadata(false); +} + +TEST_F(RoleBasedAccessControlNetworkFilterTest, AllowNoChangeLog) { + setDestinationPort(123); + setMetadata(); + + EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data_, false)); + + // Check that Allow action does not set access log metadata + EXPECT_EQ(stream_info_.dynamicMetadata().filter_metadata().end(), + stream_info_.dynamicMetadata().filter_metadata().find( + Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace)); +} + } // namespace RBACFilter } // namespace NetworkFilters } // namespace Extensions diff --git a/test/extensions/filters/network/rbac/integration_test.cc b/test/extensions/filters/network/rbac/integration_test.cc index 571ba94b678fb..7f90867687b59 100644 --- a/test/extensions/filters/network/rbac/integration_test.cc +++ b/test/extensions/filters/network/rbac/integration_test.cc @@ -60,11 +60,6 @@ class RoleBasedAccessControlNetworkFilterIntegrationTest BaseIntegrationTest::initialize(); } - - void TearDown() override { - test_server_.reset(); - fake_upstreams_.clear(); - } }; INSTANTIATE_TEST_SUITE_P(IpVersions, RoleBasedAccessControlNetworkFilterIntegrationTest, @@ -94,7 +89,7 @@ name: rbac any: true )EOF"); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write("hello"); + ASSERT_TRUE(tcp_client->write("hello")); ASSERT_TRUE(tcp_client->connected()); tcp_client->close(); @@ -127,7 +122,7 @@ name: rbac - any: true )EOF"); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write("hello"); + ASSERT_TRUE(tcp_client->write("hello", false, false)); tcp_client->waitForDisconnect(); EXPECT_EQ(0U, test_server_->counter("tcp.rbac.allowed")->value()); diff --git a/test/extensions/filters/network/redis_proxy/BUILD b/test/extensions/filters/network/redis_proxy/BUILD index eb74d4d17bc84..13980d9b57caf 100644 --- a/test/extensions/filters/network/redis_proxy/BUILD +++ b/test/extensions/filters/network/redis_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", @@ -12,6 +10,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -97,7 +97,7 @@ envoy_extension_cc_test( deps = [ "//source/common/protobuf:utility_lib", "//source/extensions/filters/network/redis_proxy:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:test_runtime_lib", "@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto", ], @@ -145,6 +145,7 @@ envoy_extension_cc_test( name = "redis_proxy_integration_test", srcs = ["redis_proxy_integration_test.cc"], extension_name = "envoy.filters.network.redis_proxy", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/network/redis_proxy:config", "//test/integration:integration_lib", diff --git a/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc b/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc index a6ccc4f1b59d2..edf29c9730921 100644 --- a/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc +++ b/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc @@ -28,6 +28,7 @@ class NoOpSplitCallbacks : public CommandSplitter::SplitCallbacks { bool connectionAllowed() override { return true; } void onAuth(const std::string&) override {} + void onAuth(const std::string&, const std::string&) override {} void onResponse(Common::Redis::RespValuePtr&&) override {} }; diff --git a/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc b/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc index b8020b2a29c1b..097cb3d49f4c2 100644 --- a/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc +++ b/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc @@ -485,6 +485,7 @@ class FragmentedRequestCommandHandlerTest : public RedisCommandSplitterImplTest EXPECT_CALL(callbacks_, connectionAllowed()).WillOnce(Return(true)); + std::vector dummy_requests(num_gets); for (uint32_t i = 0; i < num_gets; i++) { Common::Redis::Client::PoolRequest* request_to_use = nullptr; if (std::find(null_handle_indexes.begin(), null_handle_indexes.end(), i) == @@ -494,7 +495,7 @@ class FragmentedRequestCommandHandlerTest : public RedisCommandSplitterImplTest Common::Redis::Client::PoolRequest* mirror_request_to_use = nullptr; if (std::find(null_handle_indexes.begin(), null_handle_indexes.end(), i) == null_handle_indexes.end()) { - mirror_request_to_use = &mirror_request_to_use[i]; + mirror_request_to_use = &dummy_requests[i]; } EXPECT_CALL(*conn_pool_, makeRequest_(std::to_string(i), CompositeArrayEq(expected_requests_[i]), _)) diff --git a/test/extensions/filters/network/redis_proxy/config_test.cc b/test/extensions/filters/network/redis_proxy/config_test.cc index 9270cc637c99e..a9043af8cd6eb 100644 --- a/test/extensions/filters/network/redis_proxy/config_test.cc +++ b/test/extensions/filters/network/redis_proxy/config_test.cc @@ -5,7 +5,7 @@ #include "extensions/filters/network/redis_proxy/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/test_runtime.h" #include "gmock/gmock.h" @@ -85,7 +85,7 @@ stat_prefix: foo )EOF"; envoy::extensions::filters::network::redis_proxy::v3::RedisProxy proto_config{}; - TestUtility::loadFromYamlAndValidate(yaml, proto_config); + TestUtility::loadFromYamlAndValidate(yaml, proto_config, true, false); NiceMock context; RedisProxyFilterConfigFactory factory; Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context); @@ -114,7 +114,7 @@ stat_prefix: foo )EOF"; envoy::extensions::filters::network::redis_proxy::v3::RedisProxy proto_config{}; - TestUtility::loadFromYamlAndValidate(yaml, proto_config); + TestUtility::loadFromYamlAndValidate(yaml, proto_config, true, false); NiceMock context; RedisProxyFilterConfigFactory factory; Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context); diff --git a/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc b/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc index 639a9b8313c88..8b56ba1f695f3 100644 --- a/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc +++ b/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc @@ -78,12 +78,14 @@ class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client std::make_shared>(); auto redis_command_stats = Common::Redis::RedisCommandStats::createRedisCommandStats(store->symbolTable()); - std::unique_ptr conn_pool_impl = std::make_unique( + std::shared_ptr conn_pool_impl = std::make_shared( cluster_name_, cm_, *this, tls_, Common::Redis::Client::createConnPoolSettings(20, hashtagging, true, max_unknown_conns, read_policy_), api_, std::move(store), redis_command_stats, cluster_refresh_manager_); + conn_pool_impl->init(); // Set the authentication password for this connection pool. + conn_pool_impl->tls_->getTyped().auth_username_ = auth_username_; conn_pool_impl->tls_->getTyped().auth_password_ = auth_password_; conn_pool_ = std::move(conn_pool_impl); test_address_ = Network::Utility::resolveUrl("tcp://127.0.0.1:3000"); @@ -132,7 +134,7 @@ class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)) .WillOnce( Invoke([&](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr { - EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2_64("hash_key")); + EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2("hash_key")); EXPECT_EQ(context->metadataMatchCriteria(), nullptr); EXPECT_EQ(context->downstreamConnection(), nullptr); return this->cm_.thread_local_cluster_.lb_.host_; @@ -148,7 +150,7 @@ class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client EXPECT_NE(nullptr, request); } - std::unordered_map& + absl::node_hash_map& clientMap() { InstanceImpl* conn_pool_impl = dynamic_cast(conn_pool_.get()); return conn_pool_impl->tls_->getTyped().client_map_; @@ -159,7 +161,7 @@ class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client return conn_pool_impl->tls_->getTyped().client_map_[host].get(); } - std::unordered_map& hostAddressMap() { + absl::node_hash_map& hostAddressMap() { InstanceImpl* conn_pool_impl = dynamic_cast(conn_pool_.get()); return conn_pool_impl->tls_->getTyped().host_address_map_; } @@ -175,6 +177,11 @@ class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client return conn_pool_impl->tls_->getTyped().clients_to_drain_; } + InstanceImpl::ThreadLocalPool& threadLocalPool() { + InstanceImpl* conn_pool_impl = dynamic_cast(conn_pool_.get()); + return conn_pool_impl->tls_->getTyped(); + } + Event::TimerPtr& drainTimer() { InstanceImpl* conn_pool_impl = dynamic_cast(conn_pool_.get()); return conn_pool_impl->tls_->getTyped().drain_timer_; @@ -199,7 +206,9 @@ class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client Common::Redis::Client::ClientPtr create(Upstream::HostConstSharedPtr host, Event::Dispatcher&, const Common::Redis::Client::Config&, const Common::Redis::RedisCommandStatsSharedPtr&, - Stats::Scope&, const std::string& password) override { + Stats::Scope&, const std::string& username, + const std::string& password) override { + EXPECT_EQ(auth_username_, username); EXPECT_EQ(auth_password_, password); return Common::Redis::Client::ClientPtr{create_(host)}; } @@ -221,7 +230,7 @@ class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)) .WillOnce( Invoke([&](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr { - EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2_64("hash_key")); + EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2("hash_key")); EXPECT_EQ(context->metadataMatchCriteria(), nullptr); EXPECT_EQ(context->downstreamConnection(), nullptr); auto redis_context = @@ -273,6 +282,7 @@ class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client Upstream::ClusterUpdateCallbacks* update_callbacks_{}; Common::Redis::Client::MockClient* client_{}; Network::Address::InstanceConstSharedPtr test_address_; + std::string auth_username_; std::string auth_password_; NiceMock api_; envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings::ReadPolicy @@ -296,7 +306,7 @@ TEST_F(RedisConnPoolImplTest, Basic) { EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)) .WillOnce(Invoke([&](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr { - EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2_64("hash_key")); + EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2("hash_key")); EXPECT_EQ(context->metadataMatchCriteria(), nullptr); EXPECT_EQ(context->downstreamConnection(), nullptr); return cm_.thread_local_cluster_.lb_.host_; @@ -327,7 +337,7 @@ TEST_F(RedisConnPoolImplTest, BasicRespVariant) { EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)) .WillOnce(Invoke([&](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr { - EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2_64("hash_key")); + EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2("hash_key")); EXPECT_EQ(context->metadataMatchCriteria(), nullptr); EXPECT_EQ(context->downstreamConnection(), nullptr); return cm_.thread_local_cluster_.lb_.host_; @@ -357,7 +367,7 @@ TEST_F(RedisConnPoolImplTest, ClientRequestFailed) { EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)) .WillOnce(Invoke([&](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr { - EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2_64("hash_key")); + EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2("hash_key")); EXPECT_EQ(context->metadataMatchCriteria(), nullptr); EXPECT_EQ(context->downstreamConnection(), nullptr); return cm_.thread_local_cluster_.lb_.host_; @@ -378,7 +388,7 @@ TEST_F(RedisConnPoolImplTest, ClientRequestFailed) { TEST_F(RedisConnPoolImplTest, BasicWithReadPolicy) { testReadPolicy(envoy::extensions::filters::network::redis_proxy::v3::RedisProxy:: ConnPoolSettings::PREFER_MASTER, - NetworkFilters::Common::Redis::Client::ReadPolicy::PreferMaster); + NetworkFilters::Common::Redis::Client::ReadPolicy::PreferPrimary); testReadPolicy( envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings::REPLICA, NetworkFilters::Common::Redis::Client::ReadPolicy::Replica); @@ -400,7 +410,7 @@ TEST_F(RedisConnPoolImplTest, Hashtagging) { auto expectHashKey = [](const std::string& s) { return [s](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr { - EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2_64(s)); + EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2(s)); return nullptr; }; }; @@ -431,7 +441,7 @@ TEST_F(RedisConnPoolImplTest, HashtaggingNotEnabled) { auto expectHashKey = [](const std::string& s) { return [s](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr { - EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2_64(s)); + EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2(s)); return nullptr; }; }; @@ -621,10 +631,6 @@ TEST_F(RedisConnPoolImplTest, RemoteClose) { } TEST_F(RedisConnPoolImplTest, MakeRequestToHost) { - InSequence s; - - setup(false); - Common::Redis::RespValue value; Common::Redis::Client::MockPoolRequest active_request1; Common::Redis::Client::MockPoolRequest active_request2; @@ -635,48 +641,55 @@ TEST_F(RedisConnPoolImplTest, MakeRequestToHost) { Upstream::HostConstSharedPtr host1; Upstream::HostConstSharedPtr host2; - // There is no cluster yet, so makeRequestToHost() should fail. - EXPECT_EQ(nullptr, conn_pool_->makeRequestToHost("10.0.0.1:3000", value, callbacks1)); - // Add the cluster now. - update_callbacks_->onClusterAddOrUpdate(cm_.thread_local_cluster_); - - EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host1), Return(client1))); - EXPECT_CALL(*client1, makeRequest_(Ref(value), Ref(callbacks1))) - .WillOnce(Return(&active_request1)); - Common::Redis::Client::PoolRequest* request1 = - conn_pool_->makeRequestToHost("10.0.0.1:3000", value, callbacks1); - EXPECT_EQ(&active_request1, request1); - EXPECT_EQ(host1->address()->asString(), "10.0.0.1:3000"); - - // IPv6 address returned from Redis server will not have square brackets - // around it, while Envoy represents Address::Ipv6Instance addresses with square brackets around - // the address. - EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host2), Return(client2))); - EXPECT_CALL(*client2, makeRequest_(Ref(value), Ref(callbacks2))) - .WillOnce(Return(&active_request2)); - Common::Redis::Client::PoolRequest* request2 = - conn_pool_->makeRequestToHost("2001:470:813B:0:0:0:0:1:3333", value, callbacks2); - EXPECT_EQ(&active_request2, request2); - EXPECT_EQ(host2->address()->asString(), "[2001:470:813b::1]:3333"); + { + InSequence s; - // Test with a badly specified host address (no colon, no address, no port). - EXPECT_EQ(conn_pool_->makeRequestToHost("bad", value, callbacks1), nullptr); - // Test with a badly specified IPv4 address. - EXPECT_EQ(conn_pool_->makeRequestToHost("10.0.bad:3000", value, callbacks1), nullptr); - // Test with a badly specified TCP port. - EXPECT_EQ(conn_pool_->makeRequestToHost("10.0.0.1:bad", value, callbacks1), nullptr); - // Test with a TCP port outside of the acceptable range for a 32-bit integer. - EXPECT_EQ(conn_pool_->makeRequestToHost("10.0.0.1:4294967297", value, callbacks1), - nullptr); // 2^32 + 1 - // Test with a TCP port outside of the acceptable range for a TCP port (0 .. 65535). - EXPECT_EQ(conn_pool_->makeRequestToHost("10.0.0.1:65536", value, callbacks1), nullptr); - // Test with a badly specified IPv6-like address. - EXPECT_EQ(conn_pool_->makeRequestToHost("bad:ipv6:3000", value, callbacks1), nullptr); - // Test with a valid IPv6 address and a badly specified TCP port (out of range). - EXPECT_EQ(conn_pool_->makeRequestToHost("2001:470:813b:::70000", value, callbacks1), nullptr); + setup(false); + + // There is no cluster yet, so makeRequestToHost() should fail. + EXPECT_EQ(nullptr, conn_pool_->makeRequestToHost("10.0.0.1:3000", value, callbacks1)); + // Add the cluster now. + update_callbacks_->onClusterAddOrUpdate(cm_.thread_local_cluster_); + + EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host1), Return(client1))); + EXPECT_CALL(*client1, makeRequest_(Ref(value), Ref(callbacks1))) + .WillOnce(Return(&active_request1)); + Common::Redis::Client::PoolRequest* request1 = + conn_pool_->makeRequestToHost("10.0.0.1:3000", value, callbacks1); + EXPECT_EQ(&active_request1, request1); + EXPECT_EQ(host1->address()->asString(), "10.0.0.1:3000"); + + // IPv6 address returned from Redis server will not have square brackets + // around it, while Envoy represents Address::Ipv6Instance addresses with square brackets around + // the address. + EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host2), Return(client2))); + EXPECT_CALL(*client2, makeRequest_(Ref(value), Ref(callbacks2))) + .WillOnce(Return(&active_request2)); + Common::Redis::Client::PoolRequest* request2 = + conn_pool_->makeRequestToHost("2001:470:813B:0:0:0:0:1:3333", value, callbacks2); + EXPECT_EQ(&active_request2, request2); + EXPECT_EQ(host2->address()->asString(), "[2001:470:813b::1]:3333"); + + // Test with a badly specified host address (no colon, no address, no port). + EXPECT_EQ(conn_pool_->makeRequestToHost("bad", value, callbacks1), nullptr); + // Test with a badly specified IPv4 address. + EXPECT_EQ(conn_pool_->makeRequestToHost("10.0.bad:3000", value, callbacks1), nullptr); + // Test with a badly specified TCP port. + EXPECT_EQ(conn_pool_->makeRequestToHost("10.0.0.1:bad", value, callbacks1), nullptr); + // Test with a TCP port outside of the acceptable range for a 32-bit integer. + EXPECT_EQ(conn_pool_->makeRequestToHost("10.0.0.1:4294967297", value, callbacks1), + nullptr); // 2^32 + 1 + // Test with a TCP port outside of the acceptable range for a TCP port (0 .. 65535). + EXPECT_EQ(conn_pool_->makeRequestToHost("10.0.0.1:65536", value, callbacks1), nullptr); + // Test with a badly specified IPv6-like address. + EXPECT_EQ(conn_pool_->makeRequestToHost("bad:ipv6:3000", value, callbacks1), nullptr); + // Test with a valid IPv6 address and a badly specified TCP port (out of range). + EXPECT_EQ(conn_pool_->makeRequestToHost("2001:470:813b:::70000", value, callbacks1), nullptr); + } - EXPECT_CALL(*client2, close()); + // We cannot guarantee which order close will be called, perform these checks unsequenced EXPECT_CALL(*client1, close()); + EXPECT_CALL(*client2, close()); tls_.shutdownThread(); } @@ -731,7 +744,7 @@ TEST_F(RedisConnPoolImplTest, HostsAddedAndRemovedWithDraining) { EXPECT_EQ(&active_request2, request2); EXPECT_EQ(host2->address()->asString(), "[2001:470:813b::1]:3333"); - std::unordered_map& host_address_map = + absl::node_hash_map& host_address_map = hostAddressMap(); EXPECT_EQ(host_address_map.size(), 2); // host1 and host2 have been created. EXPECT_EQ(host_address_map[host1->address()->asString()], host1); @@ -830,7 +843,7 @@ TEST_F(RedisConnPoolImplTest, HostsAddedAndEndWithNoDraining) { EXPECT_EQ(&active_request2, request2); EXPECT_EQ(host2->address()->asString(), "[2001:470:813b::1]:3333"); - std::unordered_map& host_address_map = + absl::node_hash_map& host_address_map = hostAddressMap(); EXPECT_EQ(host_address_map.size(), 2); // host1 and host2 have been created. EXPECT_EQ(host_address_map[host1->address()->asString()], host1); @@ -908,7 +921,7 @@ TEST_F(RedisConnPoolImplTest, HostsAddedAndEndWithClusterRemoval) { EXPECT_EQ(&active_request2, request2); EXPECT_EQ(host2->address()->asString(), "[2001:470:813b::1]:3333"); - std::unordered_map& host_address_map = + absl::node_hash_map& host_address_map = hostAddressMap(); EXPECT_EQ(host_address_map.size(), 2); // host1 and host2 have been created. EXPECT_EQ(host_address_map[host1->address()->asString()], host1); @@ -1152,6 +1165,61 @@ TEST_F(RedisConnPoolImplTest, AskRedirectionFailure) { tls_.shutdownThread(); } +TEST_F(RedisConnPoolImplTest, MakeRequestAndRedirectFollowedByDelete) { + tls_.defer_delete = true; + std::unique_ptr> store = + std::make_unique>(); + cluster_refresh_manager_ = + std::make_shared>(); + auto redis_command_stats = + Common::Redis::RedisCommandStats::createRedisCommandStats(store->symbolTable()); + conn_pool_ = std::make_shared( + cluster_name_, cm_, *this, tls_, + Common::Redis::Client::createConnPoolSettings(20, true, true, 100, read_policy_), api_, + std::move(store), redis_command_stats, cluster_refresh_manager_); + conn_pool_->init(); + + auto& local_pool = threadLocalPool(); + conn_pool_.reset(); + + // Request + Common::Redis::Client::MockClient* client = new NiceMock(); + Common::Redis::RespValueSharedPtr value = std::make_shared(); + Common::Redis::Client::MockPoolRequest active_request; + MockPoolCallbacks callbacks; + EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)) + .WillOnce(Invoke([&](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr { + EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2("hash_key")); + EXPECT_EQ(context->metadataMatchCriteria(), nullptr); + EXPECT_EQ(context->downstreamConnection(), nullptr); + return this->cm_.thread_local_cluster_.lb_.host_; + })); + EXPECT_CALL(*this, create_(_)).WillOnce(Return(client)); + EXPECT_CALL(*cm_.thread_local_cluster_.lb_.host_, address()) + .WillRepeatedly(Return(this->test_address_)); + EXPECT_CALL(*client, makeRequest_(Ref(*value), _)).WillOnce(Return(&active_request)); + EXPECT_NE(nullptr, local_pool.makeRequest("hash_key", value, callbacks)); + + // Move redirection. + Common::Redis::Client::MockPoolRequest active_request2; + Common::Redis::Client::MockClient* client2 = new NiceMock(); + Upstream::HostConstSharedPtr host1; + Common::Redis::RespValuePtr moved_response{new Common::Redis::RespValue()}; + moved_response->type(Common::Redis::RespType::Error); + moved_response->asString() = "MOVED 1111 10.1.2.3:4000"; + + EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host1), Return(client2))); + EXPECT_CALL(*client2, makeRequest_(Ref(*value), _)).WillOnce(Return(&active_request2)); + EXPECT_TRUE(client->client_callbacks_.back()->onRedirection(std::move(moved_response), + "10.1.2.3:4000", false)); + EXPECT_EQ(host1->address()->asString(), "10.1.2.3:4000"); + EXPECT_CALL(callbacks, onResponse_(_)); + client2->client_callbacks_.back()->onResponse(std::make_unique()); + + EXPECT_CALL(*client, close()); + tls_.shutdownThread(); +} + } // namespace ConnPool } // namespace RedisProxy } // namespace NetworkFilters diff --git a/test/extensions/filters/network/redis_proxy/mocks.h b/test/extensions/filters/network/redis_proxy/mocks.h index 5bb208bfa9019..b093ad35b9b94 100644 --- a/test/extensions/filters/network/redis_proxy/mocks.h +++ b/test/extensions/filters/network/redis_proxy/mocks.h @@ -101,6 +101,7 @@ class MockSplitCallbacks : public SplitCallbacks { MOCK_METHOD(bool, connectionAllowed, ()); MOCK_METHOD(void, onAuth, (const std::string& password)); + MOCK_METHOD(void, onAuth, (const std::string& username, const std::string& password)); MOCK_METHOD(void, onResponse_, (Common::Redis::RespValuePtr & value)); }; diff --git a/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc b/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc index 72cebf97fcd28..f094c02b665a0 100644 --- a/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc +++ b/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc @@ -63,6 +63,7 @@ TEST_F(RedisProxyFilterConfigTest, Normal) { parseProtoFromYaml(yaml_string); ProxyFilterConfig config(proto_config, store_, drain_decision_, runtime_, api_); EXPECT_EQ("redis.foo.", config.stat_prefix_); + EXPECT_TRUE(config.downstream_auth_username_.empty()); EXPECT_TRUE(config.downstream_auth_password_.empty()); } @@ -93,6 +94,27 @@ TEST_F(RedisProxyFilterConfigTest, DownstreamAuthPasswordSet) { EXPECT_EQ(config.downstream_auth_password_, "somepassword"); } +TEST_F(RedisProxyFilterConfigTest, DownstreamAuthAclSet) { + const std::string yaml_string = R"EOF( + prefix_routes: + catch_all_route: + cluster: fake_cluster + stat_prefix: foo + settings: + op_timeout: 0.01s + downstream_auth_username: + inline_string: someusername + downstream_auth_password: + inline_string: somepassword + )EOF"; + + envoy::extensions::filters::network::redis_proxy::v3::RedisProxy proto_config = + parseProtoFromYaml(yaml_string); + ProxyFilterConfig config(proto_config, store_, drain_decision_, runtime_, api_); + EXPECT_EQ(config.downstream_auth_username_, "someusername"); + EXPECT_EQ(config.downstream_auth_password_, "somepassword"); +} + class RedisProxyFilterTest : public testing::Test, public Common::Redis::DecoderFactory { public: static constexpr const char* DefaultConfig = R"EOF( @@ -310,6 +332,33 @@ TEST_F(RedisProxyFilterTest, AuthWhenNotRequired) { EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(fake_data, false)); } +TEST_F(RedisProxyFilterTest, AuthAclWhenNotRequired) { + InSequence s; + + Buffer::OwnedImpl fake_data; + Common::Redis::RespValuePtr request(new Common::Redis::RespValue()); + EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { + decoder_callbacks_->onRespValue(std::move(request)); + })); + EXPECT_CALL(splitter_, makeRequest_(Ref(*request), _)) + .WillOnce( + Invoke([&](const Common::Redis::RespValue&, + CommandSplitter::SplitCallbacks& callbacks) -> CommandSplitter::SplitRequest* { + EXPECT_TRUE(callbacks.connectionAllowed()); + Common::Redis::RespValuePtr error(new Common::Redis::RespValue()); + error->type(Common::Redis::RespType::Error); + error->asString() = "ERR Client sent AUTH, but no username-password pair is set"; + EXPECT_CALL(*encoder_, encode(Eq(ByRef(*error)), _)); + EXPECT_CALL(filter_callbacks_.connection_, write(_, _)); + callbacks.onAuth("foo", "bar"); + // callbacks cannot be accessed now. + EXPECT_TRUE(filter_->connectionAllowed()); + return nullptr; + })); + + EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(fake_data, false)); +} + const std::string downstream_auth_password_config = R"EOF( prefix_routes: catch_all_route: @@ -380,6 +429,105 @@ TEST_F(RedisProxyFilterWithAuthPasswordTest, AuthPasswordIncorrect) { EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(fake_data, false)); } +const std::string downstream_auth_acl_config = R"EOF( +prefix_routes: + catch_all_route: + cluster: fake_cluster +stat_prefix: foo +settings: + op_timeout: 0.01s +downstream_auth_username: + inline_string: someusername +downstream_auth_password: + inline_string: somepassword +)EOF"; + +class RedisProxyFilterWithAuthAclTest : public RedisProxyFilterTest { +public: + RedisProxyFilterWithAuthAclTest() : RedisProxyFilterTest(downstream_auth_acl_config) {} +}; + +TEST_F(RedisProxyFilterWithAuthAclTest, AuthAclCorrect) { + InSequence s; + + Buffer::OwnedImpl fake_data; + Common::Redis::RespValuePtr request(new Common::Redis::RespValue()); + EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { + decoder_callbacks_->onRespValue(std::move(request)); + })); + EXPECT_CALL(splitter_, makeRequest_(Ref(*request), _)) + .WillOnce( + Invoke([&](const Common::Redis::RespValue&, + CommandSplitter::SplitCallbacks& callbacks) -> CommandSplitter::SplitRequest* { + EXPECT_FALSE(callbacks.connectionAllowed()); + Common::Redis::RespValuePtr reply(new Common::Redis::RespValue()); + reply->type(Common::Redis::RespType::SimpleString); + reply->asString() = "OK"; + EXPECT_CALL(*encoder_, encode(Eq(ByRef(*reply)), _)); + EXPECT_CALL(filter_callbacks_.connection_, write(_, _)); + callbacks.onAuth("someusername", "somepassword"); + // callbacks cannot be accessed now. + EXPECT_TRUE(filter_->connectionAllowed()); + return nullptr; + })); + + EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(fake_data, false)); +} + +TEST_F(RedisProxyFilterWithAuthAclTest, AuthAclUsernameIncorrect) { + InSequence s; + + Buffer::OwnedImpl fake_data; + Common::Redis::RespValuePtr request(new Common::Redis::RespValue()); + EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { + decoder_callbacks_->onRespValue(std::move(request)); + })); + EXPECT_CALL(splitter_, makeRequest_(Ref(*request), _)) + .WillOnce( + Invoke([&](const Common::Redis::RespValue&, + CommandSplitter::SplitCallbacks& callbacks) -> CommandSplitter::SplitRequest* { + EXPECT_FALSE(callbacks.connectionAllowed()); + Common::Redis::RespValuePtr reply(new Common::Redis::RespValue()); + reply->type(Common::Redis::RespType::Error); + reply->asString() = "WRONGPASS invalid username-password pair"; + EXPECT_CALL(*encoder_, encode(Eq(ByRef(*reply)), _)); + EXPECT_CALL(filter_callbacks_.connection_, write(_, _)); + callbacks.onAuth("wrongusername", "somepassword"); + // callbacks cannot be accessed now. + EXPECT_FALSE(filter_->connectionAllowed()); + return nullptr; + })); + + EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(fake_data, false)); +} + +TEST_F(RedisProxyFilterWithAuthAclTest, AuthAclPasswordIncorrect) { + InSequence s; + + Buffer::OwnedImpl fake_data; + Common::Redis::RespValuePtr request(new Common::Redis::RespValue()); + EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { + decoder_callbacks_->onRespValue(std::move(request)); + })); + EXPECT_CALL(splitter_, makeRequest_(Ref(*request), _)) + .WillOnce( + Invoke([&](const Common::Redis::RespValue&, + CommandSplitter::SplitCallbacks& callbacks) -> CommandSplitter::SplitRequest* { + EXPECT_FALSE(callbacks.connectionAllowed()); + Common::Redis::RespValuePtr reply(new Common::Redis::RespValue()); + reply->type(Common::Redis::RespType::Error); + reply->asString() = "WRONGPASS invalid username-password pair"; + EXPECT_CALL(*encoder_, encode(Eq(ByRef(*reply)), _)); + EXPECT_CALL(filter_callbacks_.connection_, write(_, _)); + callbacks.onAuth("someusername", "wrongpassword"); + // callbacks cannot be accessed now. + EXPECT_FALSE(filter_->connectionAllowed()); + return nullptr; + })); + + EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(fake_data, false)); +} + } // namespace RedisProxy } // namespace NetworkFilters } // namespace Extensions diff --git a/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc b/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc index b1e33acfe8203..8d9a09c6b464d 100644 --- a/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc +++ b/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc @@ -16,9 +16,9 @@ namespace { // in the cluster. The load balancing policy must be set // to random for proper test operation. -const std::string CONFIG = R"EOF( +const std::string CONFIG = fmt::format(R"EOF( admin: - access_log_path: /dev/null + access_log_path: {} address: socket_address: address: 127.0.0.1 @@ -59,7 +59,8 @@ const std::string CONFIG = R"EOF( cluster: cluster_0 settings: op_timeout: 5s -)EOF"; +)EOF", + TestEnvironment::nullDevicePath()); // This is a configuration with command stats enabled. const std::string CONFIG_WITH_COMMAND_STATS = CONFIG + R"EOF( @@ -77,9 +78,9 @@ const std::string CONFIG_WITH_BATCHING = CONFIG + R"EOF( buffer_flush_timeout: 0.003s )EOF"; -const std::string CONFIG_WITH_ROUTES_BASE = R"EOF( +const std::string CONFIG_WITH_ROUTES_BASE = fmt::format(R"EOF( admin: - access_log_path: /dev/null + access_log_path: {} address: socket_address: address: 127.0.0.1 @@ -114,12 +115,12 @@ const std::string CONFIG_WITH_ROUTES_BASE = R"EOF( address: socket_address: address: 127.0.0.1 - port_value: 1 + port_value: 0 - endpoint: address: socket_address: address: 127.0.0.1 - port_value: 1 + port_value: 0 - name: cluster_2 type: STATIC lb_policy: RANDOM @@ -131,12 +132,12 @@ const std::string CONFIG_WITH_ROUTES_BASE = R"EOF( address: socket_address: address: 127.0.0.1 - port_value: 2 + port_value: 0 - endpoint: address: socket_address: address: 127.0.0.1 - port_value: 2 + port_value: 0 listeners: name: listener_0 address: @@ -151,7 +152,8 @@ const std::string CONFIG_WITH_ROUTES_BASE = R"EOF( stat_prefix: redis_stats settings: op_timeout: 5s -)EOF"; +)EOF", + TestEnvironment::nullDevicePath()); const std::string CONFIG_WITH_ROUTES = CONFIG_WITH_ROUTES_BASE + R"EOF( prefix_routes: @@ -192,9 +194,10 @@ const std::string CONFIG_WITH_DOWNSTREAM_AUTH_PASSWORD_SET = CONFIG + R"EOF( downstream_auth_password: { inline_string: somepassword } )EOF"; -const std::string CONFIG_WITH_ROUTES_AND_AUTH_PASSWORDS = R"EOF( +const std::string CONFIG_WITH_ROUTES_AND_AUTH_PASSWORDS = + fmt::format(R"EOF( admin: - access_log_path: /dev/null + access_log_path: {} address: socket_address: address: 127.0.0.1 @@ -206,7 +209,7 @@ const std::string CONFIG_WITH_ROUTES_AND_AUTH_PASSWORDS = R"EOF( typed_extension_protocol_options: envoy.filters.network.redis_proxy: "@type": type.googleapis.com/envoy.config.filter.network.redis_proxy.v2.RedisProtocolOptions - auth_password: { inline_string: cluster_0_password } + auth_password: {{ inline_string: cluster_0_password }} lb_policy: RANDOM load_assignment: cluster_name: cluster_0 @@ -223,7 +226,7 @@ const std::string CONFIG_WITH_ROUTES_AND_AUTH_PASSWORDS = R"EOF( typed_extension_protocol_options: envoy.filters.network.redis_proxy: "@type": type.googleapis.com/envoy.config.filter.network.redis_proxy.v2.RedisProtocolOptions - auth_password: { inline_string: cluster_1_password } + auth_password: {{ inline_string: cluster_1_password }} load_assignment: cluster_name: cluster_1 endpoints: @@ -232,13 +235,13 @@ const std::string CONFIG_WITH_ROUTES_AND_AUTH_PASSWORDS = R"EOF( address: socket_address: address: 127.0.0.1 - port_value: 1 + port_value: 0 - name: cluster_2 type: STATIC typed_extension_protocol_options: envoy.filters.network.redis_proxy: "@type": type.googleapis.com/envoy.config.filter.network.redis_proxy.v2.RedisProtocolOptions - auth_password: { inline_string: cluster_2_password } + auth_password: {{ inline_string: cluster_2_password }} lb_policy: RANDOM load_assignment: cluster_name: cluster_2 @@ -248,7 +251,7 @@ const std::string CONFIG_WITH_ROUTES_AND_AUTH_PASSWORDS = R"EOF( address: socket_address: address: 127.0.0.1 - port_value: 2 + port_value: 0 listeners: name: listener_0 address: @@ -271,7 +274,8 @@ const std::string CONFIG_WITH_ROUTES_AND_AUTH_PASSWORDS = R"EOF( cluster: cluster_1 - prefix: "baz:" cluster: cluster_2 -)EOF"; +)EOF", + TestEnvironment::nullDevicePath()); // This function encodes commands as an array of bulkstrings as transmitted by Redis clients to // Redis servers, according to the Redis protocol. @@ -294,11 +298,6 @@ class RedisProxyIntegrationTest : public testing::TestWithParamclearData(); - redis_client->write(request); + ASSERT_TRUE(redis_client->write(request)); expectUpstreamRequestResponse(upstream, request, response, fake_upstream_connection, - auth_password); + auth_username, auth_password); redis_client->waitForData(response); // The original response should be received by the fake Redis client. @@ -494,7 +496,8 @@ void RedisProxyIntegrationTest::roundtripToUpstreamStep( void RedisProxyIntegrationTest::expectUpstreamRequestResponse( FakeUpstreamPtr& upstream, const std::string& request, const std::string& response, - FakeRawConnectionPtr& fake_upstream_connection, const std::string& auth_password) { + FakeRawConnectionPtr& fake_upstream_connection, const std::string& auth_username, + const std::string& auth_password) { std::string proxy_to_server; bool expect_auth_command = false; std::string ok = "+OK\r\n"; @@ -504,7 +507,9 @@ void RedisProxyIntegrationTest::expectUpstreamRequestResponse( EXPECT_TRUE(upstream->waitForRawConnection(fake_upstream_connection)); } if (expect_auth_command) { - std::string auth_command = makeBulkStringArray({"auth", auth_password}); + std::string auth_command = (auth_username.empty()) + ? makeBulkStringArray({"auth", auth_password}) + : makeBulkStringArray({"auth", auth_username, auth_password}); EXPECT_TRUE(fake_upstream_connection->waitForData(auth_command.size() + request.size(), &proxy_to_server)); // The original request should be the same as the data received by the server. @@ -527,7 +532,8 @@ void RedisProxyIntegrationTest::simpleRoundtripToUpstream(FakeUpstreamPtr& upstr IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort("redis_proxy")); FakeRawConnectionPtr fake_upstream_connection; - roundtripToUpstreamStep(upstream, request, response, redis_client, fake_upstream_connection, ""); + roundtripToUpstreamStep(upstream, request, response, redis_client, fake_upstream_connection, "", + ""); EXPECT_TRUE(fake_upstream_connection->close()); redis_client->close(); @@ -537,7 +543,7 @@ void RedisProxyIntegrationTest::proxyResponseStep(const std::string& request, const std::string& proxy_response, IntegrationTcpClientPtr& redis_client) { redis_client->clearData(); - redis_client->write(request); + ASSERT_TRUE(redis_client->write(request)); redis_client->waitForData(proxy_response); // After sending the request to the proxy, the fake redis client should receive proxy_response. EXPECT_EQ(proxy_response, redis_client->data()); @@ -558,7 +564,7 @@ void RedisProxyWithRedirectionIntegrationTest::simpleRedirection( bool asking = (redirection_response.find("-ASK") != std::string::npos); std::string proxy_to_server; IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort("redis_proxy")); - redis_client->write(request); + ASSERT_TRUE(redis_client->write(request)); FakeRawConnectionPtr fake_upstream_connection_1, fake_upstream_connection_2; @@ -621,15 +627,16 @@ TEST_P(RedisProxyWithCommandStatsIntegrationTest, MGETRequestAndResponse) { // Make MGET request from downstream IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort("redis_proxy")); redis_client->clearData(); - redis_client->write(request); + ASSERT_TRUE(redis_client->write(request)); // Make GET request to upstream (MGET is turned into GETs for upstream) FakeUpstreamPtr& upstream = fake_upstreams_[0]; FakeRawConnectionPtr fake_upstream_connection; + std::string auth_username = ""; std::string auth_password = ""; std::string upstream_request = makeBulkStringArray({"get", "foo"}); expectUpstreamRequestResponse(upstream, upstream_request, upstream_response, - fake_upstream_connection, auth_password); + fake_upstream_connection, auth_username, auth_password); // Downstream response for MGET redis_client->waitForData(downstream_response); @@ -785,7 +792,7 @@ TEST_P(RedisProxyWithRedirectionIntegrationTest, ConnectionFailureBeforeAskingRe std::string proxy_to_server; IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort("redis_proxy")); - redis_client->write(request); + ASSERT_TRUE(redis_client->write(request)); FakeRawConnectionPtr fake_upstream_connection_1, fake_upstream_connection_2; @@ -848,8 +855,8 @@ TEST_P(RedisProxyWithBatchingIntegrationTest, SimpleBatching) { std::string proxy_to_server; IntegrationTcpClientPtr redis_client_1 = makeTcpConnection(lookupPort("redis_proxy")); IntegrationTcpClientPtr redis_client_2 = makeTcpConnection(lookupPort("redis_proxy")); - redis_client_1->write(request); - redis_client_2->write(request); + ASSERT_TRUE(redis_client_1->write(request)); + ASSERT_TRUE(redis_client_2->write(request)); FakeRawConnectionPtr fake_upstream_connection; EXPECT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); @@ -915,7 +922,7 @@ TEST_P(RedisProxyWithDownstreamAuthIntegrationTest, ErrorsUntilCorrectPasswordSe proxyResponseStep(makeBulkStringArray({"auth", "somepassword"}), "+OK\r\n", redis_client); roundtripToUpstreamStep(fake_upstreams_[0], makeBulkStringArray({"get", "foo"}), "$3\r\nbar\r\n", - redis_client, fake_upstream_connection, ""); + redis_client, fake_upstream_connection, "", ""); EXPECT_TRUE(fake_upstream_connection->close()); redis_client->close(); @@ -932,16 +939,16 @@ TEST_P(RedisProxyWithRoutesAndAuthPasswordsIntegrationTest, TransparentAuthentic // roundtrip to cluster_0 (catch_all route) roundtripToUpstreamStep(fake_upstreams_[0], makeBulkStringArray({"get", "toto"}), "$3\r\nbar\r\n", - redis_client, fake_upstream_connection[0], "cluster_0_password"); + redis_client, fake_upstream_connection[0], "", "cluster_0_password"); // roundtrip to cluster_1 (prefix "foo:" route) roundtripToUpstreamStep(fake_upstreams_[1], makeBulkStringArray({"get", "foo:123"}), - "$3\r\nbar\r\n", redis_client, fake_upstream_connection[1], + "$3\r\nbar\r\n", redis_client, fake_upstream_connection[1], "", "cluster_1_password"); // roundtrip to cluster_2 (prefix "baz:" route) roundtripToUpstreamStep(fake_upstreams_[2], makeBulkStringArray({"get", "baz:123"}), - "$3\r\nbar\r\n", redis_client, fake_upstream_connection[2], + "$3\r\nbar\r\n", redis_client, fake_upstream_connection[2], "", "cluster_2_password"); EXPECT_TRUE(fake_upstream_connection[0]->close()); @@ -958,7 +965,7 @@ TEST_P(RedisProxyWithMirrorsIntegrationTest, MirroredCatchAllRequest) { const std::string& response = "$3\r\nbar\r\n"; // roundtrip to cluster_0 (catch_all route) IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort("redis_proxy")); - redis_client->write(request); + ASSERT_TRUE(redis_client->write(request)); expectUpstreamRequestResponse(fake_upstreams_[0], request, response, fake_upstream_connection[0]); @@ -988,7 +995,7 @@ TEST_P(RedisProxyWithMirrorsIntegrationTest, MirroredWriteOnlyRequest) { // roundtrip to cluster_0 (write_only route) IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort("redis_proxy")); - redis_client->write(set_request); + ASSERT_TRUE(redis_client->write(set_request)); expectUpstreamRequestResponse(fake_upstreams_[0], set_request, set_response, fake_upstream_connection[0]); @@ -1015,7 +1022,7 @@ TEST_P(RedisProxyWithMirrorsIntegrationTest, ExcludeReadCommands) { // roundtrip to cluster_0 (write_only route) IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort("redis_proxy")); - redis_client->write(get_request); + ASSERT_TRUE(redis_client->write(get_request)); expectUpstreamRequestResponse(fake_upstreams_[0], get_request, get_response, cluster_0_connection); @@ -1041,7 +1048,7 @@ TEST_P(RedisProxyWithMirrorsIntegrationTest, EnabledViaRuntimeFraction) { const std::string& response = "$3\r\nbar\r\n"; // roundtrip to cluster_0 (catch_all route) IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort("redis_proxy")); - redis_client->write(request); + ASSERT_TRUE(redis_client->write(request)); expectUpstreamRequestResponse(fake_upstreams_[0], request, response, fake_upstream_connection[0]); diff --git a/test/extensions/filters/network/rocketmq_proxy/BUILD b/test/extensions/filters/network/rocketmq_proxy/BUILD new file mode 100644 index 0000000000000..82a70612767f3 --- /dev/null +++ b/test/extensions/filters/network/rocketmq_proxy/BUILD @@ -0,0 +1,136 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_mock", + "envoy_cc_test_library", + "envoy_package", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_mock( + name = "mocks_lib", + srcs = ["mocks.cc"], + hdrs = ["mocks.h"], + deps = [ + "//source/extensions/filters/network/rocketmq_proxy:config", + "//source/extensions/filters/network/rocketmq_proxy/router:router_lib", + "//test/mocks/upstream:upstream_mocks", + ], +) + +envoy_cc_test_library( + name = "utility_lib", + srcs = ["utility.cc"], + hdrs = ["utility.h"], + deps = [ + "//source/extensions/filters/network/rocketmq_proxy:config", + ], +) + +envoy_extension_cc_test( + name = "protocol_test", + srcs = ["protocol_test.cc"], + extension_name = "envoy.filters.network.rocketmq_proxy", + deps = [ + "//source/extensions/filters/network/rocketmq_proxy:config", + "//test/test_common:utility_lib", + ], +) + +envoy_extension_cc_test( + name = "router_test", + srcs = ["router_test.cc"], + extension_name = "envoy.filters.network.rocketmq_proxy", + deps = [ + ":mocks_lib", + ":utility_lib", + "//source/extensions/filters/network/rocketmq_proxy:config", + "//test/mocks/server:factory_context_mocks", + "//test/test_common:utility_lib", + ], +) + +envoy_extension_cc_test( + name = "topic_route_test", + srcs = ["topic_route_test.cc"], + extension_name = "envoy.filters.network.rocketmq_proxy", + deps = [ + "//source/common/protobuf:utility_lib", + "//source/extensions/filters/network/rocketmq_proxy:config", + "//test/test_common:utility_lib", + ], +) + +envoy_extension_cc_test( + name = "conn_manager_test", + srcs = ["conn_manager_test.cc"], + extension_name = "envoy.filters.network.rocketmq_proxy", + deps = [ + ":utility_lib", + "//test/common/stats:stat_test_utility_lib", + "//test/common/upstream:utility_lib", + "//test/mocks/network:network_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/stream_info:stream_info_mocks", + "//test/test_common:utility_lib", + ], +) + +envoy_extension_cc_test( + name = "active_message_test", + srcs = ["active_message_test.cc"], + extension_name = "envoy.filters.network.rocketmq_proxy", + deps = [ + ":utility_lib", + "//source/extensions/filters/network/rocketmq_proxy:config", + "//test/mocks/network:network_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/stream_info:stream_info_mocks", + "//test/test_common:utility_lib", + ], +) + +envoy_extension_cc_test( + name = "config_test", + srcs = ["config_test.cc"], + extension_name = "envoy.filters.network.rocketmq_proxy", + deps = [ + "//source/extensions/filters/network/rocketmq_proxy:config", + "//test/mocks/local_info:local_info_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:instance_mocks", + "//test/test_common:registry_lib", + "@envoy_api//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg_cc_proto", + ], +) + +envoy_extension_cc_test( + name = "codec_test", + srcs = ["codec_test.cc"], + extension_name = "envoy.filters.network.rocketmq_proxy", + deps = [ + ":utility_lib", + "//source/common/network:address_lib", + "//source/common/protobuf:utility_lib", + "//test/mocks/server:server_mocks", + "//test/test_common:registry_lib", + ], +) + +envoy_extension_cc_test( + name = "route_matcher_test", + srcs = ["route_matcher_test.cc"], + extension_name = "envoy.filters.network.rocketmq_proxy", + deps = [ + "//source/extensions/filters/network/rocketmq_proxy/router:route_matcher", + "//test/test_common:utility_lib", + "@envoy_api//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg_cc_proto", + ], +) diff --git a/test/extensions/filters/network/rocketmq_proxy/active_message_test.cc b/test/extensions/filters/network/rocketmq_proxy/active_message_test.cc new file mode 100644 index 0000000000000..54f01cfaeea04 --- /dev/null +++ b/test/extensions/filters/network/rocketmq_proxy/active_message_test.cc @@ -0,0 +1,209 @@ +#include "extensions/filters/network/rocketmq_proxy/active_message.h" +#include "extensions/filters/network/rocketmq_proxy/config.h" +#include "extensions/filters/network/rocketmq_proxy/conn_manager.h" +#include "extensions/filters/network/rocketmq_proxy/protocol.h" +#include "extensions/filters/network/rocketmq_proxy/well_known_names.h" + +#include "test/extensions/filters/network/rocketmq_proxy/utility.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/server/factory_context.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::Return; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +class ActiveMessageTest : public testing::Test { +public: + ActiveMessageTest() + : stats_(RocketmqFilterStats::generateStats("test.", store_)), + config_(rocketmq_proxy_config_, factory_context_), + connection_manager_(config_, factory_context_.dispatcher().timeSource()) { + connection_manager_.initializeReadFilterCallbacks(filter_callbacks_); + } + + ~ActiveMessageTest() override { + filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); + } + +protected: + ConfigImpl::RocketmqProxyConfig rocketmq_proxy_config_; + NiceMock filter_callbacks_; + NiceMock factory_context_; + Stats::IsolatedStoreImpl store_; + RocketmqFilterStats stats_; + ConfigImpl config_; + ConnectionManager connection_manager_; +}; + +TEST_F(ActiveMessageTest, ClusterName) { + std::string json = R"EOF( + { + "opaque": 1, + "code": 35, + "version": 1, + "language": "JAVA", + "serializeTypeCurrentRPC": "JSON", + "flag": 0, + "extFields": { + "clientID": "SampleClient_01", + "producerGroup": "PG_Example_01", + "consumerGroup": "CG_001" + } + } + )EOF"; + + Buffer::OwnedImpl buffer; + buffer.writeBEInt(4 + 4 + json.size()); + buffer.writeBEInt(json.size()); + buffer.add(json); + + bool underflow = false; + bool has_error = false; + auto cmd = Decoder::decode(buffer, underflow, has_error); + EXPECT_FALSE(underflow); + EXPECT_FALSE(has_error); + + ActiveMessage activeMessage(connection_manager_, std::move(cmd)); + EXPECT_FALSE(activeMessage.metadata()->hasTopicName()); +} + +TEST_F(ActiveMessageTest, FillBrokerData) { + + absl::node_hash_map address; + address.emplace(0, "1.2.3.4:10911"); + BrokerData broker_data("DefaultCluster", "broker-a", std::move(address)); + + std::vector list; + list.push_back(broker_data); + + ActiveMessage::fillBrokerData(list, "DefaultCluster", "broker-a", 1, "localhost:10911"); + ActiveMessage::fillBrokerData(list, "DefaultCluster", "broker-a", 0, "localhost:10911"); + EXPECT_EQ(1, list.size()); + for (auto& it : list) { + auto& address = it.brokerAddresses(); + EXPECT_EQ(2, address.size()); + EXPECT_STREQ("1.2.3.4:10911", address[0].c_str()); + } +} + +TEST_F(ActiveMessageTest, FillAckMessageDirectiveSuccess) { + RemotingCommandPtr cmd = std::make_unique(); + ActiveMessage active_message(connection_manager_, std::move(cmd)); + + Buffer::OwnedImpl buffer; + // frame length + buffer.writeBEInt(98); + + // magic code + buffer.writeBEInt(enumToSignedInt(MessageVersion::V1)); + + // body CRC + buffer.writeBEInt(1); + + // queue Id + buffer.writeBEInt(2); + + // flag + buffer.writeBEInt(3); + + // queue offset + buffer.writeBEInt(4); + + // physical offset + buffer.writeBEInt(5); + + // system flag + buffer.writeBEInt(6); + + // born timestamp + buffer.writeBEInt(7); + + // born host + buffer.writeBEInt(8); + + // born host port + buffer.writeBEInt(9); + + // store timestamp + buffer.writeBEInt(10); + + // store host address ip:port --> long + Network::Address::Ipv4Instance host_address("127.0.0.1", 10911); + const sockaddr_in* sock_addr = reinterpret_cast(host_address.sockAddr()); + buffer.writeBEInt(sock_addr->sin_addr.s_addr); + buffer.writeBEInt(sock_addr->sin_port); + + // re-consume times + buffer.writeBEInt(11); + + // transaction offset + buffer.writeBEInt(12); + + // body size + buffer.writeBEInt(0); + + const std::string topic = "TopicTest"; + + // topic length + buffer.writeBEInt(topic.length()); + + // topic data + buffer.add(topic); + + AckMessageDirective directive("broker-a", 0, connection_manager_.timeSource().monotonicTime()); + const std::string group = "Group"; + active_message.fillAckMessageDirective(buffer, group, topic, directive); + + const std::string fake_topic = "FakeTopic"; + active_message.fillAckMessageDirective(buffer, group, fake_topic, directive); + + EXPECT_EQ(connection_manager_.getAckDirectiveTableForTest().size(), 1); +} + +TEST_F(ActiveMessageTest, RecordPopRouteInfo) { + auto host_description = new NiceMock(); + + auto metadata = std::make_shared(); + ProtobufWkt::Struct topic_route_data; + auto* fields = topic_route_data.mutable_fields(); + + std::string broker_name = "broker-a"; + int32_t broker_id = 0; + + (*fields)[RocketmqConstants::get().ReadQueueNum] = ValueUtil::numberValue(4); + (*fields)[RocketmqConstants::get().WriteQueueNum] = ValueUtil::numberValue(4); + (*fields)[RocketmqConstants::get().ClusterName] = ValueUtil::stringValue("DefaultCluster"); + (*fields)[RocketmqConstants::get().BrokerName] = ValueUtil::stringValue(broker_name); + (*fields)[RocketmqConstants::get().BrokerId] = ValueUtil::numberValue(broker_id); + (*fields)[RocketmqConstants::get().Perm] = ValueUtil::numberValue(6); + metadata->mutable_filter_metadata()->insert(Protobuf::MapPair( + NetworkFilterNames::get().RocketmqProxy, topic_route_data)); + + EXPECT_CALL(*host_description, metadata()).WillRepeatedly(Return(metadata)); + + Upstream::HostDescriptionConstSharedPtr host_description_ptr(host_description); + + Buffer::OwnedImpl buffer; + BufferUtility::fillRequestBuffer(buffer, RequestCode::PopMessage); + + bool underflow = false; + bool has_error = false; + + RemotingCommandPtr cmd = Decoder::decode(buffer, underflow, has_error); + ActiveMessage active_message(connection_manager_, std::move(cmd)); + active_message.recordPopRouteInfo(host_description_ptr); + auto custom_header = active_message.downstreamRequest()->typedCustomHeader(); + EXPECT_EQ(custom_header->targetBrokerName(), broker_name); + EXPECT_EQ(custom_header->targetBrokerId(), broker_id); +} + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/rocketmq_proxy/codec_test.cc b/test/extensions/filters/network/rocketmq_proxy/codec_test.cc new file mode 100644 index 0000000000000..902584b5b3fc5 --- /dev/null +++ b/test/extensions/filters/network/rocketmq_proxy/codec_test.cc @@ -0,0 +1,799 @@ +#include "common/common/empty_string.h" +#include "common/common/enum_to_int.h" +#include "common/network/address_impl.h" +#include "common/protobuf/utility.h" + +#include "extensions/filters/network/rocketmq_proxy/codec.h" + +#include "test/extensions/filters/network/rocketmq_proxy/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +class RocketmqCodecTest : public testing::Test { +public: + RocketmqCodecTest() = default; + ~RocketmqCodecTest() override = default; +}; + +TEST_F(RocketmqCodecTest, DecodeWithMinFrameSize) { + Buffer::OwnedImpl buffer; + + buffer.add(std::string({'\x00', '\x00', '\x01', '\x8b'})); + buffer.add(std::string({'\x00', '\x00', '\x01', '\x76'})); + + bool underflow = false; + bool has_error = false; + + auto cmd = Decoder::decode(buffer, underflow, has_error); + + EXPECT_TRUE(underflow); + EXPECT_FALSE(has_error); + EXPECT_TRUE(nullptr == cmd); +} + +TEST_F(RocketmqCodecTest, DecodeWithOverMaxFrameSizeData) { + Buffer::OwnedImpl buffer; + + buffer.add(std::string({'\x00', '\x40', '\x00', '\x01'})); + buffer.add(std::string({'\x00', '\x20', '\x00', '\x00', '\x00'})); + + bool underflow = false; + bool has_error = false; + + auto cmd = Decoder::decode(buffer, underflow, has_error); + + EXPECT_FALSE(underflow); + EXPECT_TRUE(has_error); + EXPECT_TRUE(nullptr == cmd); +} + +TEST_F(RocketmqCodecTest, DecodeUnsupportHeaderSerialization) { + Buffer::OwnedImpl buffer; + std::string header = "random text suffices"; + + buffer.writeBEInt(4 + 4 + header.size()); + uint32_t mark = header.size(); + mark |= (1u << 24u); + buffer.writeBEInt(mark); + buffer.add(header); + + bool underflow = false; + bool has_error = false; + + auto cmd = Decoder::decode(buffer, underflow, has_error); + + EXPECT_FALSE(underflow); + EXPECT_TRUE(has_error); + EXPECT_TRUE(nullptr == cmd); +} + +TEST_F(RocketmqCodecTest, DecodeInvalidJson) { + Buffer::OwnedImpl buffer; + // Invalid json string. + std::string invalid_json = R"EOF({a: 3)EOF"; + + buffer.writeBEInt(4 + 4 + invalid_json.size()); + buffer.writeBEInt(invalid_json.size()); + buffer.add(invalid_json); + + bool underflow = false; + bool has_error = false; + + auto cmd = Decoder::decode(buffer, underflow, has_error); + + EXPECT_FALSE(underflow); + EXPECT_TRUE(has_error); + EXPECT_TRUE(cmd == nullptr); +} + +TEST_F(RocketmqCodecTest, DecodeCodeMissing) { + Buffer::OwnedImpl buffer; + // Invalid json string. + std::string invalid_json = R"EOF({"a": 3})EOF"; + + buffer.writeBEInt(4 + 4 + invalid_json.size()); + buffer.writeBEInt(invalid_json.size()); + buffer.add(invalid_json); + + bool underflow = false; + bool has_error = false; + + auto cmd = Decoder::decode(buffer, underflow, has_error); + + EXPECT_FALSE(underflow); + EXPECT_TRUE(has_error); + EXPECT_TRUE(cmd == nullptr); +} + +TEST_F(RocketmqCodecTest, DecodeVersionMissing) { + Buffer::OwnedImpl buffer; + // Invalid json string. + std::string invalid_json = R"EOF({"code": 3})EOF"; + + buffer.writeBEInt(4 + 4 + invalid_json.size()); + buffer.writeBEInt(invalid_json.size()); + buffer.add(invalid_json); + + bool underflow = false; + bool has_error = false; + + auto cmd = Decoder::decode(buffer, underflow, has_error); + + EXPECT_FALSE(underflow); + EXPECT_TRUE(has_error); + EXPECT_TRUE(cmd == nullptr); +} + +TEST_F(RocketmqCodecTest, DecodeOpaqueMissing) { + Buffer::OwnedImpl buffer; + // Invalid json string. + std::string invalid_json = R"EOF( + { + "code": 3, + "version": 1 + } + )EOF"; + + buffer.writeBEInt(4 + 4 + invalid_json.size()); + buffer.writeBEInt(invalid_json.size()); + buffer.add(invalid_json); + + bool underflow = false; + bool has_error = false; + + auto cmd = Decoder::decode(buffer, underflow, has_error); + + EXPECT_FALSE(underflow); + EXPECT_TRUE(has_error); + EXPECT_TRUE(cmd == nullptr); +} + +TEST_F(RocketmqCodecTest, DecodeFlagMissing) { + Buffer::OwnedImpl buffer; + // Invalid json string. + std::string invalid_json = R"EOF( + { + "code": 3, + "version": 1, + "opaque": 1 + } + )EOF"; + + buffer.writeBEInt(4 + 4 + invalid_json.size()); + buffer.writeBEInt(invalid_json.size()); + buffer.add(invalid_json); + + bool underflow = false; + bool has_error = false; + + auto cmd = Decoder::decode(buffer, underflow, has_error); + + EXPECT_FALSE(underflow); + EXPECT_TRUE(has_error); + EXPECT_TRUE(cmd == nullptr); +} + +TEST_F(RocketmqCodecTest, DecodeRequestSendMessage) { + Buffer::OwnedImpl buffer; + BufferUtility::fillRequestBuffer(buffer, RequestCode::SendMessage); + + bool underflow = false; + bool has_error = false; + + RemotingCommandPtr request = Decoder::decode(buffer, underflow, has_error); + + EXPECT_FALSE(underflow || has_error); + EXPECT_EQ(request->opaque(), BufferUtility::opaque_); + Buffer::Instance& body = request->body(); + EXPECT_EQ(body.toString(), BufferUtility::msg_body_); + + auto header = request->typedCustomHeader(); + + EXPECT_EQ(header->topic(), BufferUtility::topic_name_); + EXPECT_EQ(header->version(), SendMessageRequestVersion::V1); + EXPECT_EQ(header->queueId(), -1); +} + +TEST_F(RocketmqCodecTest, DecodeRequestSendMessageV2) { + Buffer::OwnedImpl buffer; + + BufferUtility::fillRequestBuffer(buffer, RequestCode::SendMessageV2); + + bool underflow = false; + bool has_error = false; + + RemotingCommandPtr request = Decoder::decode(buffer, underflow, has_error); + + EXPECT_FALSE(underflow || has_error); + EXPECT_EQ(request->opaque(), BufferUtility::opaque_); + + Buffer::Instance& body = request->body(); + + EXPECT_EQ(body.toString(), BufferUtility::msg_body_); + + auto header = request->typedCustomHeader(); + + EXPECT_EQ(header->topic(), BufferUtility::topic_name_); + EXPECT_EQ(header->version(), SendMessageRequestVersion::V2); + EXPECT_EQ(header->queueId(), -1); +} + +TEST_F(RocketmqCodecTest, DecodeRequestSendMessageV1) { + std::string json = R"EOF( + { + "code": 10, + "version": 1, + "opaque": 1, + "flag": 0, + "extFields": { + "batch": false, + "bornTimestamp": 1575872212297, + "defaultTopic": "TBW102", + "defaultTopicQueueNums": 3, + "flag": 124, + "producerGroup": "FooBarGroup", + "queueId": 1, + "reconsumeTimes": 0, + "sysFlag": 0, + "topic": "FooBar", + "unitMode": false, + "properties": "mock_properties", + "maxReconsumeTimes": 32 + } + } + )EOF"; + Buffer::OwnedImpl buffer; + + buffer.writeBEInt(4 + 4 + json.size()); + buffer.writeBEInt(json.size()); + buffer.add(json); + + bool underflow = false; + bool has_error = false; + + auto cmd = Decoder::decode(buffer, underflow, has_error); + + EXPECT_FALSE(underflow); + EXPECT_FALSE(has_error); + EXPECT_TRUE(nullptr != cmd); + EXPECT_EQ(10, cmd->code()); + EXPECT_EQ(1, cmd->version()); + EXPECT_EQ(1, cmd->opaque()); +} + +TEST_F(RocketmqCodecTest, DecodeSendMessageResponseWithSystemError) { + std::string json = R"EOF( + { + "code": 1, + "language": "JAVA", + "version": 2, + "opaque": 1, + "flag": 1, + "remark": "System error", + "serializeTypeCurrentRPC": "JSON" + } + )EOF"; + Buffer::OwnedImpl buffer; + + buffer.writeBEInt(4 + 4 + json.size()); + buffer.writeBEInt(json.size()); + buffer.add(json); + + bool underflow = false; + bool has_error = false; + + auto cmd = + Decoder::decode(buffer, underflow, has_error, static_cast(RequestCode::SendMessage)); + + EXPECT_FALSE(has_error); + EXPECT_FALSE(underflow); + EXPECT_TRUE(nullptr != cmd); + EXPECT_STREQ("JAVA", cmd->language().c_str()); + EXPECT_STREQ("JSON", cmd->serializeTypeCurrentRPC().c_str()); + EXPECT_STREQ("System error", cmd->remark().c_str()); + EXPECT_TRUE(nullptr == cmd->customHeader()); +} + +TEST_F(RocketmqCodecTest, DecodeSendMessageResponseWithSystemBusy) { + std::string json = R"EOF( + { + "code": 2, + "language": "JAVA", + "version": 2, + "opaque": 1, + "flag": 1, + "remark": "System busy", + "serializeTypeCurrentRPC": "JSON" + } + )EOF"; + Buffer::OwnedImpl buffer; + + buffer.writeBEInt(4 + 4 + json.size()); + buffer.writeBEInt(json.size()); + buffer.add(json); + + bool underflow = false; + bool has_error = false; + + auto cmd = + Decoder::decode(buffer, underflow, has_error, static_cast(RequestCode::SendMessage)); + + EXPECT_FALSE(has_error); + EXPECT_FALSE(underflow); + EXPECT_TRUE(nullptr != cmd); + EXPECT_STREQ("JAVA", cmd->language().c_str()); + EXPECT_STREQ("JSON", cmd->serializeTypeCurrentRPC().c_str()); + EXPECT_STREQ("System busy", cmd->remark().c_str()); + EXPECT_TRUE(nullptr == cmd->customHeader()); +} + +TEST_F(RocketmqCodecTest, DecodeSendMessageResponseWithCodeNotSupported) { + std::string json = R"EOF( + { + "code": 3, + "language": "JAVA", + "version": 2, + "opaque": 1, + "flag": 1, + "remark": "Code not supported", + "serializeTypeCurrentRPC": "JSON" + } + )EOF"; + Buffer::OwnedImpl buffer; + + buffer.writeBEInt(4 + 4 + json.size()); + buffer.writeBEInt(json.size()); + buffer.add(json); + + bool underflow = false; + bool has_error = false; + + auto cmd = + Decoder::decode(buffer, underflow, has_error, static_cast(RequestCode::SendMessage)); + + EXPECT_FALSE(has_error); + EXPECT_FALSE(underflow); + EXPECT_TRUE(nullptr != cmd); + EXPECT_STREQ("JAVA", cmd->language().c_str()); + EXPECT_STREQ("JSON", cmd->serializeTypeCurrentRPC().c_str()); + EXPECT_STREQ("Code not supported", cmd->remark().c_str()); + EXPECT_TRUE(nullptr == cmd->customHeader()); +} + +TEST_F(RocketmqCodecTest, DecodeSendMessageResponseNormal) { + std::string json = R"EOF( + { + "code": 0, + "language": "JAVA", + "version": 2, + "opaque": 1, + "flag": 1, + "remark": "OK", + "serializeTypeCurrentRPC": "JSON", + "extFields": { + "msgId": "A001", + "queueId": "10", + "queueOffset": "2", + "transactionId": "" + } + } + )EOF"; + Buffer::OwnedImpl buffer; + + buffer.writeBEInt(4 + 4 + json.size()); + buffer.writeBEInt(json.size()); + buffer.add(json); + + bool underflow = false; + bool has_error = false; + + auto cmd = + Decoder::decode(buffer, underflow, has_error, static_cast(RequestCode::SendMessage)); + + EXPECT_FALSE(has_error); + EXPECT_FALSE(underflow); + EXPECT_TRUE(nullptr != cmd); + EXPECT_STREQ("JAVA", cmd->language().c_str()); + EXPECT_STREQ("JSON", cmd->serializeTypeCurrentRPC().c_str()); + EXPECT_STREQ("OK", cmd->remark().c_str()); + EXPECT_TRUE(nullptr != cmd->customHeader()); + + auto extHeader = cmd->typedCustomHeader(); + + EXPECT_STREQ("A001", extHeader->msgId().c_str()); + EXPECT_EQ(10, extHeader->queueId()); + EXPECT_EQ(2, extHeader->queueOffset()); +} + +TEST_F(RocketmqCodecTest, DecodePopMessageResponseNormal) { + std::string json = R"EOF( + { + "code": 0, + "language": "JAVA", + "version": 2, + "opaque": 1, + "flag": 1, + "remark": "OK", + "serializeTypeCurrentRPC": "JSON", + "extFields": { + "popTime": "1234", + "invisibleTime": "10", + "reviveQid": "2", + "restNum": "10", + "startOffsetInfo": "3", + "msgOffsetInfo": "mock_msg_offset_info", + "orderCountInfo": "mock_order_count_info" + } + } + )EOF"; + Buffer::OwnedImpl buffer; + + buffer.writeBEInt(4 + 4 + json.size()); + buffer.writeBEInt(json.size()); + buffer.add(json); + + bool underflow = false; + bool has_error = false; + + auto cmd = + Decoder::decode(buffer, underflow, has_error, static_cast(RequestCode::PopMessage)); + + EXPECT_FALSE(has_error); + EXPECT_FALSE(underflow); + EXPECT_TRUE(nullptr != cmd); + EXPECT_STREQ("JAVA", cmd->language().c_str()); + EXPECT_STREQ("JSON", cmd->serializeTypeCurrentRPC().c_str()); + EXPECT_STREQ("OK", cmd->remark().c_str()); + EXPECT_TRUE(nullptr != cmd->customHeader()); + + auto extHeader = cmd->typedCustomHeader(); + + EXPECT_EQ(1234, extHeader->popTimeForTest()); + EXPECT_EQ(10, extHeader->invisibleTime()); + EXPECT_EQ(2, extHeader->reviveQid()); + EXPECT_EQ(10, extHeader->restNum()); + EXPECT_STREQ("3", extHeader->startOffsetInfo().c_str()); + EXPECT_STREQ("mock_msg_offset_info", extHeader->msgOffsetInfo().c_str()); + EXPECT_STREQ("mock_order_count_info", extHeader->orderCountInfo().c_str()); +} + +TEST_F(RocketmqCodecTest, DecodeRequestSendMessageV2underflow) { + Buffer::OwnedImpl buffer; + + buffer.add(std::string({'\x00', '\x00', '\x01', '\x8b'})); + buffer.add(std::string({'\x00', '\x00', '\x01', '\x76'})); + + std::string header_json = R"EOF( + { + "code": 310, + "extFields": { + "a": "GID_LINGCHU_TEST_0" + } + )EOF"; + + buffer.add(header_json); + buffer.add(std::string{"_Apache_RocketMQ_"}); + + bool underflow = false; + bool has_error = false; + + RemotingCommandPtr request = Decoder::decode(buffer, underflow, has_error); + + EXPECT_EQ(underflow, true); + EXPECT_EQ(has_error, false); +} + +TEST_F(RocketmqCodecTest, EncodeResponseSendMessageSuccess) { + const int version = 285; + const int opaque = 4; + const std::string msg_id = "1E05789ABD1F18B4AAC2895B8BE60003"; + + RemotingCommandPtr response = + std::make_unique(static_cast(ResponseCode::Success), version, opaque); + + response->markAsResponse(); + + const int queue_id = 0; + const int queue_offset = 0; + + std::unique_ptr sendMessageResponseHeader = + std::make_unique(msg_id, queue_id, queue_offset, EMPTY_STRING); + CommandCustomHeaderPtr extHeader(sendMessageResponseHeader.release()); + response->customHeader(extHeader); + + Buffer::OwnedImpl response_buffer; + Encoder::encode(response, response_buffer); + + uint32_t frame_length = response_buffer.peekBEInt(); + uint32_t header_length = + response_buffer.peekBEInt(Decoder::FRAME_HEADER_LENGTH_FIELD_SIZE); + + EXPECT_EQ(header_length + Decoder::FRAME_HEADER_LENGTH_FIELD_SIZE, frame_length); + + std::unique_ptr header_data = std::make_unique(header_length); + const uint32_t frame_header_content_offset = + Decoder::FRAME_LENGTH_FIELD_SIZE + Decoder::FRAME_HEADER_LENGTH_FIELD_SIZE; + response_buffer.copyOut(frame_header_content_offset, header_length, header_data.get()); + std::string header_json(header_data.get(), header_length); + ProtobufWkt::Struct doc; + MessageUtil::loadFromJson(header_json, doc); + const auto& members = doc.fields(); + + EXPECT_EQ(members.at("code").number_value(), 0); + EXPECT_EQ(members.at("version").number_value(), version); + EXPECT_EQ(members.at("opaque").number_value(), opaque); + + const auto& extFields = members.at("extFields").struct_value().fields(); + + EXPECT_EQ(extFields.at("msgId").string_value(), msg_id); + EXPECT_EQ(extFields.at("queueId").number_value(), queue_id); + EXPECT_EQ(extFields.at("queueOffset").number_value(), queue_offset); +} + +TEST_F(RocketmqCodecTest, DecodeQueueIdWithIncompleteBuffer) { + Buffer::OwnedImpl buffer; + // incomplete buffer + buffer.add(std::string({'\x00'})); + + EXPECT_EQ(Decoder::decodeQueueId(buffer, 0), -1); +} + +TEST_F(RocketmqCodecTest, DecodeQueueIdSuccess) { + Buffer::OwnedImpl buffer; + // frame length + buffer.writeBEInt(16); + + for (int i = 0; i < 3; i++) { + buffer.writeBEInt(i); + } + EXPECT_EQ(Decoder::decodeQueueId(buffer, 0), 2); +} + +TEST_F(RocketmqCodecTest, DecodeQueueIdFailure) { + Buffer::OwnedImpl buffer; + buffer.writeBEInt(128); + + // Some random data, but incomplete frame + buffer.writeBEInt(12); + + EXPECT_EQ(Decoder::decodeQueueId(buffer, 0), -1); +} + +TEST_F(RocketmqCodecTest, DecodeQueueOffsetSuccess) { + Buffer::OwnedImpl buffer; + // frame length + buffer.writeBEInt(28); + + // frame data + for (int i = 0; i < 4; i++) { + buffer.writeBEInt(i); + } + // write queue offset which takes up 8 bytes + buffer.writeBEInt(4); + + EXPECT_EQ(Decoder::decodeQueueOffset(buffer, 0), 4); +} + +TEST_F(RocketmqCodecTest, DecodeQueueOffsetFailure) { + Buffer::OwnedImpl buffer; + + // Define length of the frame as 128 bytes + buffer.writeBEInt(128); + + // some random data, just make sure the frame is incomplete + for (int i = 0; i < 6; i++) { + buffer.writeBEInt(i); + } + + EXPECT_EQ(Decoder::decodeQueueOffset(buffer, 0), -1); +} + +TEST_F(RocketmqCodecTest, DecodeMsgIdSuccess) { + Buffer::OwnedImpl buffer; + + // frame length + buffer.writeBEInt(64); + + // magic code + buffer.writeBEInt(0); + + // body CRC + buffer.writeBEInt(1); + + // queue Id + buffer.writeBEInt(2); + + // flag + buffer.writeBEInt(3); + + // queue offset + buffer.writeBEInt(4); + + // physical offset + buffer.writeBEInt(5); + + // system flag + buffer.writeBEInt(6); + + // born timestamp + buffer.writeBEInt(7); + + // born host + buffer.writeBEInt(8); + + // born host port + buffer.writeBEInt(9); + + // store timestamp + buffer.writeBEInt(10); + + // store host address ip:port --> long + Network::Address::Ipv4Instance host_address("127.0.0.1", 10911); + const sockaddr_in* sock_addr = reinterpret_cast(host_address.sockAddr()); + buffer.writeBEInt(sock_addr->sin_addr.s_addr); + buffer.writeBEInt(sock_addr->sin_port); + EXPECT_EQ(Decoder::decodeMsgId(buffer, 0).empty(), false); +} + +TEST_F(RocketmqCodecTest, DecodeMsgIdFailure) { + Buffer::OwnedImpl buffer; + + // frame length + buffer.writeBEInt(101); + + // magic code + buffer.writeBEInt(0); + EXPECT_EQ(Decoder::decodeMsgId(buffer, 0).empty(), true); +} + +TEST_F(RocketmqCodecTest, DecodeTopicSuccessV1) { + Buffer::OwnedImpl buffer; + + // frame length + buffer.writeBEInt(98); + + // magic code + buffer.writeBEInt(enumToSignedInt(MessageVersion::V1)); + + // body CRC + buffer.writeBEInt(1); + + // queue Id + buffer.writeBEInt(2); + + // flag + buffer.writeBEInt(3); + + // queue offset + buffer.writeBEInt(4); + + // physical offset + buffer.writeBEInt(5); + + // system flag + buffer.writeBEInt(6); + + // born timestamp + buffer.writeBEInt(7); + + // born host + buffer.writeBEInt(8); + + // born host port + buffer.writeBEInt(9); + + // store timestamp + buffer.writeBEInt(10); + + // store host address ip:port --> long + Network::Address::Ipv4Instance host_address("127.0.0.1", 10911); + const sockaddr_in* sock_addr = reinterpret_cast(host_address.sockAddr()); + buffer.writeBEInt(sock_addr->sin_addr.s_addr); + buffer.writeBEInt(sock_addr->sin_port); + + // re-consume times + buffer.writeBEInt(11); + + // transaction offset + buffer.writeBEInt(12); + + // body size + buffer.writeBEInt(0); + + const std::string topic = "TopicTest"; + + // topic length + buffer.writeBEInt(topic.length()); + + // topic data + buffer.add(topic); + + EXPECT_STREQ(Decoder::decodeTopic(buffer, 0).c_str(), topic.c_str()); +} + +TEST_F(RocketmqCodecTest, DecodeTopicSuccessV2) { + Buffer::OwnedImpl buffer; + + // frame length + buffer.writeBEInt(99); + + // magic code + buffer.writeBEInt(enumToSignedInt(MessageVersion::V2)); + + // body CRC + buffer.writeBEInt(1); + + // queue Id + buffer.writeBEInt(2); + + // flag + buffer.writeBEInt(3); + + // queue offset + buffer.writeBEInt(4); + + // physical offset + buffer.writeBEInt(5); + + // system flag + buffer.writeBEInt(6); + + // born timestamp + buffer.writeBEInt(7); + + // born host + buffer.writeBEInt(8); + + // born host port + buffer.writeBEInt(9); + + // store timestamp + buffer.writeBEInt(10); + + // store host address ip:port --> long + Network::Address::Ipv4Instance host_address("127.0.0.1", 10911); + const sockaddr_in* sock_addr = reinterpret_cast(host_address.sockAddr()); + buffer.writeBEInt(sock_addr->sin_addr.s_addr); + buffer.writeBEInt(sock_addr->sin_port); + + // re-consume times + buffer.writeBEInt(11); + + // transaction offset + buffer.writeBEInt(12); + + // body size + buffer.writeBEInt(0); + + const std::string topic = "TopicTest"; + + // topic length + buffer.writeBEInt(topic.length()); + + // topic data + buffer.add(topic); + + EXPECT_STREQ(Decoder::decodeTopic(buffer, 0).c_str(), topic.c_str()); +} + +TEST_F(RocketmqCodecTest, DecodeTopicFailure) { + Buffer::OwnedImpl buffer; + + // frame length + buffer.writeBEInt(64); + + // magic code + buffer.writeBEInt(0); + EXPECT_EQ(Decoder::decodeTopic(buffer, 0).empty(), true); +} + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/rocketmq_proxy/config_test.cc b/test/extensions/filters/network/rocketmq_proxy/config_test.cc new file mode 100644 index 0000000000000..3030522c9eb72 --- /dev/null +++ b/test/extensions/filters/network/rocketmq_proxy/config_test.cc @@ -0,0 +1,172 @@ +#include "envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.h" +#include "envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.validate.h" + +#include "extensions/filters/network/rocketmq_proxy/config.h" + +#include "test/mocks/local_info/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/server/instance.h" +#include "test/test_common/registry.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::Return; +using testing::ReturnRef; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +using RocketmqProxyProto = envoy::extensions::filters::network::rocketmq_proxy::v3::RocketmqProxy; + +RocketmqProxyProto parseRocketmqProxyFromV3Yaml(const std::string& yaml, + bool avoid_boosting = true) { + RocketmqProxyProto rocketmq_proxy; + TestUtility::loadFromYaml(yaml, rocketmq_proxy, false, avoid_boosting); + return rocketmq_proxy; +} + +class RocketmqFilterConfigTestBase { +public: + void testConfig(RocketmqProxyProto& config) { + Network::FilterFactoryCb cb; + EXPECT_NO_THROW({ cb = factory_.createFilterFactoryFromProto(config, context_); }); + Network::MockConnection connection; + EXPECT_CALL(connection, addReadFilter(_)); + cb(connection); + } + + NiceMock context_; + RocketmqProxyFilterConfigFactory factory_; +}; + +class RocketmqFilterConfigTest : public RocketmqFilterConfigTestBase, public testing::Test { +public: + ~RocketmqFilterConfigTest() override = default; +}; + +TEST_F(RocketmqFilterConfigTest, ValidateFail) { + NiceMock context; + EXPECT_THROW( + RocketmqProxyFilterConfigFactory().createFilterFactoryFromProto( + envoy::extensions::filters::network::rocketmq_proxy::v3::RocketmqProxy(), context), + ProtoValidationException); +} + +TEST_F(RocketmqFilterConfigTest, ValidProtoConfiguration) { + envoy::extensions::filters::network::rocketmq_proxy::v3::RocketmqProxy config{}; + config.set_stat_prefix("my_stat_prefix"); + NiceMock context; + RocketmqProxyFilterConfigFactory factory; + Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(config, context); + Network::MockConnection connection; + EXPECT_CALL(connection, addReadFilter(_)); + cb(connection); +} + +TEST_F(RocketmqFilterConfigTest, RocketmqProxyWithEmptyProto) { + NiceMock context; + RocketmqProxyFilterConfigFactory factory; + envoy::extensions::filters::network::rocketmq_proxy::v3::RocketmqProxy config = + *dynamic_cast( + factory.createEmptyConfigProto().get()); + config.set_stat_prefix("my_stat_prefix"); + Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(config, context); + Network::MockConnection connection; + EXPECT_CALL(connection, addReadFilter(_)); + cb(connection); +} + +TEST_F(RocketmqFilterConfigTest, RocketmqProxyWithFullConfig) { + const std::string yaml = R"EOF( + stat_prefix: rocketmq_incomming_stats + develop_mode: true + transient_object_life_span: + seconds: 30 + )EOF"; + RocketmqProxyProto config = parseRocketmqProxyFromV3Yaml(yaml); + testConfig(config); +} + +TEST_F(RocketmqFilterConfigTest, ProxyAddress) { + NiceMock context; + Server::Configuration::MockServerFactoryContext factory_context; + EXPECT_CALL(context, getServerFactoryContext()).WillRepeatedly(ReturnRef(factory_context)); + + LocalInfo::MockLocalInfo local_info; + EXPECT_CALL(factory_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); + std::shared_ptr instance = + std::make_shared("logical", "physical"); + EXPECT_CALL(local_info, address()).WillRepeatedly(Return(instance)); + EXPECT_CALL(*instance, type()).WillRepeatedly(Return(Network::Address::Type::Ip)); + + Network::MockIp* ip = new Network::MockIp(); + EXPECT_CALL(*instance, ip()).WillRepeatedly(testing::Return(ip)); + + std::string address("1.2.3.4"); + EXPECT_CALL(*ip, addressAsString()).WillRepeatedly(ReturnRef(address)); + EXPECT_CALL(*ip, port()).WillRepeatedly(Return(1234)); + ConfigImpl::RocketmqProxyConfig proxyConfig; + ConfigImpl configImpl(proxyConfig, context); + + EXPECT_STREQ("1.2.3.4:1234", configImpl.proxyAddress().c_str()); + delete ip; +} + +TEST_F(RocketmqFilterConfigTest, ProxyAddressWithDefaultPort) { + NiceMock context; + Server::Configuration::MockServerFactoryContext factory_context; + EXPECT_CALL(context, getServerFactoryContext()).WillRepeatedly(ReturnRef(factory_context)); + + LocalInfo::MockLocalInfo local_info; + EXPECT_CALL(factory_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); + std::shared_ptr instance = + std::make_shared("logical", "physical"); + EXPECT_CALL(local_info, address()).WillRepeatedly(Return(instance)); + EXPECT_CALL(*instance, type()).WillRepeatedly(Return(Network::Address::Type::Ip)); + + Network::MockIp* ip = new Network::MockIp(); + EXPECT_CALL(*instance, ip()).WillRepeatedly(testing::Return(ip)); + + std::string address("1.2.3.4"); + EXPECT_CALL(*ip, addressAsString()).WillRepeatedly(ReturnRef(address)); + EXPECT_CALL(*ip, port()).WillRepeatedly(Return(0)); + ConfigImpl::RocketmqProxyConfig proxyConfig; + ConfigImpl configImpl(proxyConfig, context); + + EXPECT_STREQ("1.2.3.4:10000", configImpl.proxyAddress().c_str()); + delete ip; +} + +TEST_F(RocketmqFilterConfigTest, ProxyAddressWithNonIpType) { + NiceMock context; + Server::Configuration::MockServerFactoryContext factory_context; + EXPECT_CALL(context, getServerFactoryContext()).WillRepeatedly(ReturnRef(factory_context)); + + LocalInfo::MockLocalInfo local_info; + EXPECT_CALL(factory_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); + std::shared_ptr instance = + std::make_shared("logical", "physical"); + EXPECT_CALL(local_info, address()).WillRepeatedly(Return(instance)); + EXPECT_CALL(*instance, type()).WillRepeatedly(Return(Network::Address::Type::Pipe)); + + Network::MockIp* ip = new Network::MockIp(); + EXPECT_CALL(*instance, ip()).WillRepeatedly(testing::Return(ip)); + + std::string address("1.2.3.4"); + EXPECT_CALL(*ip, addressAsString()).WillRepeatedly(ReturnRef(address)); + EXPECT_CALL(*ip, port()).WillRepeatedly(Return(0)); + ConfigImpl::RocketmqProxyConfig proxyConfig; + ConfigImpl configImpl(proxyConfig, context); + + EXPECT_STREQ("physical", configImpl.proxyAddress().c_str()); + delete ip; +} + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/rocketmq_proxy/conn_manager_test.cc b/test/extensions/filters/network/rocketmq_proxy/conn_manager_test.cc new file mode 100644 index 0000000000000..3470b4c348189 --- /dev/null +++ b/test/extensions/filters/network/rocketmq_proxy/conn_manager_test.cc @@ -0,0 +1,705 @@ +#include "envoy/network/connection.h" + +#include "extensions/filters/network/rocketmq_proxy/config.h" +#include "extensions/filters/network/rocketmq_proxy/conn_manager.h" +#include "extensions/filters/network/rocketmq_proxy/well_known_names.h" + +#include "test/common/stats/stat_test_utility.h" +#include "test/common/upstream/utility.h" +#include "test/extensions/filters/network/rocketmq_proxy/utility.h" +#include "test/mocks/network/connection.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/server/instance.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::NiceMock; +using testing::Return; +using testing::ReturnRef; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +using ConfigRocketmqProxy = envoy::extensions::filters::network::rocketmq_proxy::v3::RocketmqProxy; + +class TestConfigImpl : public ConfigImpl { +public: + TestConfigImpl(RocketmqProxyConfig config, Server::Configuration::MockFactoryContext& context, + RocketmqFilterStats& stats) + : ConfigImpl(config, context), stats_(stats) {} + + RocketmqFilterStats& stats() override { return stats_; } + +private: + RocketmqFilterStats stats_; +}; + +class RocketmqConnectionManagerTest : public testing::Test { +public: + RocketmqConnectionManagerTest() : stats_(RocketmqFilterStats::generateStats("test.", store_)) {} + + ~RocketmqConnectionManagerTest() override { + filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); + } + + void initializeFilter() { initializeFilter(""); } + + void initializeFilter(const std::string& yaml) { + if (!yaml.empty()) { + TestUtility::loadFromYaml(yaml, proto_config_); + TestUtility::validate(proto_config_); + } + config_ = std::make_unique(proto_config_, factory_context_, stats_); + conn_manager_ = + std::make_unique(*config_, factory_context_.dispatcher().timeSource()); + conn_manager_->initializeReadFilterCallbacks(filter_callbacks_); + conn_manager_->onNewConnection(); + current_ = factory_context_.dispatcher().timeSource().monotonicTime(); + } + + void initializeCluster() { + Upstream::HostVector hosts; + hosts.emplace_back(host_); + priority_set_.updateHosts( + 1, + Upstream::HostSetImpl::partitionHosts(std::make_shared(hosts), + Upstream::HostsPerLocalityImpl::empty()), + nullptr, hosts, {}, 100); + ON_CALL(thread_local_cluster_, prioritySet()).WillByDefault(ReturnRef(priority_set_)); + EXPECT_CALL(factory_context_.cluster_manager_, get(_)) + .WillRepeatedly(Return(&thread_local_cluster_)); + } + + NiceMock factory_context_; + Stats::TestUtil::TestStore store_; + RocketmqFilterStats stats_; + ConfigRocketmqProxy proto_config_; + + std::unique_ptr config_; + + Buffer::OwnedImpl buffer_; + NiceMock filter_callbacks_; + std::unique_ptr conn_manager_; + + Encoder encoder_; + Decoder decoder_; + + MonotonicTime current_; + + std::shared_ptr cluster_info_{ + new NiceMock()}; + Upstream::HostSharedPtr host_{Upstream::makeTestHost(cluster_info_, "tcp://127.0.0.1:80")}; + Upstream::PrioritySetImpl priority_set_; + NiceMock thread_local_cluster_; +}; + +TEST_F(RocketmqConnectionManagerTest, OnHeartbeat) { + initializeFilter(); + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::HeartBeat); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.heartbeat").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnHeartbeatWithDecodeError) { + initializeFilter(); + + std::string json = R"EOF( + { + "language": "JAVA", + "version": 2, + "opaque": 1, + "flag": 1, + "serializeTypeCurrentRPC": "JSON" + } + )EOF"; + + buffer_.writeBEInt(4 + 4 + json.size()); + buffer_.writeBEInt(json.size()); + buffer_.add(json); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.request_decoding_error").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnHeartbeatWithInvalidBodyJson) { + initializeFilter(); + + RemotingCommandPtr cmd = std::make_unique(); + cmd->code(static_cast(RequestCode::HeartBeat)); + std::string heartbeat_data = R"EOF({"clientID": "127})EOF"; + cmd->body().add(heartbeat_data); + encoder_.encode(cmd, buffer_); + + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(0U, store_.counter("test.request_decoding_error").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnHeartbeatWithBodyJsonLackofClientId) { + initializeFilter(); + + RemotingCommandPtr cmd = std::make_unique(); + cmd->code(static_cast(RequestCode::HeartBeat)); + std::string heartbeat_data = R"EOF( + { + "consumerDataSet": [{}] + } + )EOF"; + cmd->body().add(heartbeat_data); + encoder_.encode(cmd, buffer_); + + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(0U, store_.counter("test.request_decoding_error").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnHeartbeatWithGroupMembersMapExists) { + initializeFilter(); + + auto& group_members_map = conn_manager_->groupMembersForTest(); + std::vector group_members; + ConsumerGroupMember group_member("127.0.0.1@90330", *conn_manager_); + group_member.setLastForTest(current_); + group_members.emplace_back(group_member); + group_members_map["test_cg"] = group_members; + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::HeartBeat); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.heartbeat").value()); + EXPECT_FALSE(group_member.expired()); + EXPECT_FALSE(group_members_map.at("test_cg").empty()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnHeartbeatWithGroupMembersMapExistsButExpired) { + initializeFilter(); + + auto& group_members_map = conn_manager_->groupMembersForTest(); + std::vector group_members; + ConsumerGroupMember group_member("127.0.0.2@90330", *conn_manager_); + group_member.setLastForTest(current_ - std::chrono::seconds(31)); + group_members.emplace_back(group_member); + group_members_map["test_cg"] = group_members; + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::HeartBeat); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.heartbeat").value()); + EXPECT_TRUE(group_member.expired()); + EXPECT_TRUE(group_members_map.empty()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnHeartbeatWithGroupMembersMapExistsButLackOfClientID) { + initializeFilter(); + + auto& group_members_map = conn_manager_->groupMembersForTest(); + std::vector group_members; + ConsumerGroupMember group_member("127.0.0.2@90330", *conn_manager_); + group_member.setLastForTest(current_); + group_members.emplace_back(group_member); + group_members_map["test_cg"] = group_members; + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::HeartBeat); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.heartbeat").value()); + EXPECT_FALSE(group_member.expired()); + EXPECT_FALSE(group_members_map.at("test_cg").empty()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnHeartbeatWithDownstreamConnecitonClosed) { + initializeFilter(); + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::HeartBeat); + NiceMock connection; + EXPECT_CALL(connection, state()).Times(1).WillOnce(Invoke([&]() -> Network::Connection::State { + return Network::Connection::State::Closed; + })); + EXPECT_CALL(filter_callbacks_, connection()).WillRepeatedly(Invoke([&]() -> Network::Connection& { + return connection; + })); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.heartbeat").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnHeartbeatWithPurgeDirectiveTable) { + initializeFilter(); + + std::string broker_name = "broker_name"; + int32_t broker_id = 0; + std::chrono::milliseconds delay_0(31 * 1000); + AckMessageDirective directive_0(broker_name, broker_id, + conn_manager_->timeSource().monotonicTime() - delay_0); + std::string directive_key_0 = "key_0"; + conn_manager_->insertAckDirective(directive_key_0, directive_0); + + std::chrono::milliseconds delay_1(29 * 1000); + AckMessageDirective directive_1(broker_name, broker_id, + conn_manager_->timeSource().monotonicTime() - delay_1); + std::string directive_key_1 = "key_1"; + conn_manager_->insertAckDirective(directive_key_1, directive_1); + + EXPECT_EQ(2, conn_manager_->getAckDirectiveTableForTest().size()); + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::HeartBeat); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.heartbeat").value()); + + EXPECT_EQ(1, conn_manager_->getAckDirectiveTableForTest().size()); + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnUnregisterClient) { + initializeFilter(); + + auto& group_members_map = conn_manager_->groupMembersForTest(); + BufferUtility::fillRequestBuffer(buffer_, RequestCode::UnregisterClient); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.unregister").value()); + EXPECT_TRUE(group_members_map.empty()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnUnregisterClientWithGroupMembersMapExists) { + initializeFilter(); + + auto& group_members_map = conn_manager_->groupMembersForTest(); + std::vector group_members; + ConsumerGroupMember group_member("test_client_id", *conn_manager_); + group_member.setLastForTest(current_); + group_members.emplace_back(group_member); + group_members_map["test_cg"] = group_members; + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::UnregisterClient); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.unregister").value()); + EXPECT_FALSE(group_member.expired()); + EXPECT_TRUE(group_members_map.empty()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnUnregisterClientWithGroupMembersMapExistsButExpired) { + initializeFilter(); + + auto& group_members_map = conn_manager_->groupMembersForTest(); + std::vector group_members; + ConsumerGroupMember group_member("127.0.0.2@90330", *conn_manager_); + group_member.setLastForTest(current_ - std::chrono::seconds(31)); + group_members.emplace_back(group_member); + group_members_map["test_cg"] = group_members; + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::UnregisterClient); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.unregister").value()); + EXPECT_TRUE(group_member.expired()); + EXPECT_TRUE(group_members_map.empty()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, + OnUnregisterClientWithGroupMembersMapExistsButLackOfClientID) { + initializeFilter(); + + auto& group_members_map = conn_manager_->groupMembersForTest(); + std::vector group_members; + ConsumerGroupMember group_member("127.0.0.2@90330", *conn_manager_); + group_member.setLastForTest(current_); + group_members.emplace_back(group_member); + group_members_map["test_cg"] = group_members; + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::UnregisterClient); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.unregister").value()); + EXPECT_FALSE(group_member.expired()); + EXPECT_FALSE(group_members_map.empty()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnGetTopicRoute) { + const std::string yaml = R"EOF( +stat_prefix: test +route_config: + name: default_route + routes: + - match: + topic: + exact: test_topic + route: + cluster: fake_cluster +)EOF"; + initializeFilter(yaml); + + auto metadata = std::make_shared(); + ProtobufWkt::Struct topic_route_data; + auto* fields = topic_route_data.mutable_fields(); + (*fields)[RocketmqConstants::get().ReadQueueNum] = ValueUtil::numberValue(4); + (*fields)[RocketmqConstants::get().WriteQueueNum] = ValueUtil::numberValue(4); + (*fields)[RocketmqConstants::get().ClusterName] = ValueUtil::stringValue("DefaultCluster"); + (*fields)[RocketmqConstants::get().BrokerName] = ValueUtil::stringValue("broker-a"); + (*fields)[RocketmqConstants::get().BrokerId] = ValueUtil::numberValue(0); + (*fields)[RocketmqConstants::get().Perm] = ValueUtil::numberValue(6); + metadata->mutable_filter_metadata()->insert(Protobuf::MapPair( + NetworkFilterNames::get().RocketmqProxy, topic_route_data)); + host_->metadata(metadata); + initializeCluster(); + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::GetRouteInfoByTopic); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.get_topic_route").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnGetTopicRouteWithoutRoutes) { + const std::string yaml = R"EOF( +stat_prefix: test +route_config: + name: default_route + routes: + - match: + topic: + exact: test_another_topic + route: + cluster: fake_cluster +)EOF"; + initializeFilter(yaml); + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::GetRouteInfoByTopic); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.get_topic_route").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnGetTopicRouteWithoutCluster) { + const std::string yaml = R"EOF( +stat_prefix: test +route_config: + name: default_route + routes: + - match: + topic: + exact: test_topic + route: + cluster: fake_cluster +)EOF"; + initializeFilter(yaml); + + EXPECT_CALL(factory_context_.cluster_manager_, get(_)).WillRepeatedly(Return(nullptr)); + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::GetRouteInfoByTopic); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.get_topic_route").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnGetTopicRouteInDevelopMode) { + const std::string yaml = R"EOF( +stat_prefix: test +develop_mode: true +route_config: + name: default_route + routes: + - match: + topic: + exact: test_topic + route: + cluster: fake_cluster +)EOF"; + NiceMock server_factory_context; + NiceMock local_info; + NiceMock ip; + std::shared_ptr instance = + std::make_shared("logical", "physical"); + EXPECT_CALL(factory_context_, getServerFactoryContext()) + .WillRepeatedly(ReturnRef(server_factory_context)); + EXPECT_CALL(server_factory_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); + EXPECT_CALL(local_info, address()).WillRepeatedly(Return(instance)); + EXPECT_CALL(*instance, type()).WillRepeatedly(Return(Network::Address::Type::Ip)); + EXPECT_CALL(*instance, ip()).WillRepeatedly(testing::Return(&ip)); + const std::string address{"1.2.3.4"}; + EXPECT_CALL(ip, addressAsString()).WillRepeatedly(ReturnRef(address)); + EXPECT_CALL(ip, port()).WillRepeatedly(Return(1234)); + initializeFilter(yaml); + + auto metadata = std::make_shared(); + ProtobufWkt::Struct topic_route_data; + auto* fields = topic_route_data.mutable_fields(); + (*fields)[RocketmqConstants::get().ReadQueueNum] = ValueUtil::numberValue(4); + (*fields)[RocketmqConstants::get().WriteQueueNum] = ValueUtil::numberValue(4); + (*fields)[RocketmqConstants::get().ClusterName] = ValueUtil::stringValue("DefaultCluster"); + (*fields)[RocketmqConstants::get().BrokerName] = ValueUtil::stringValue("broker-a"); + (*fields)[RocketmqConstants::get().BrokerId] = ValueUtil::numberValue(0); + (*fields)[RocketmqConstants::get().Perm] = ValueUtil::numberValue(6); + metadata->mutable_filter_metadata()->insert(Protobuf::MapPair( + NetworkFilterNames::get().RocketmqProxy, topic_route_data)); + host_->metadata(metadata); + initializeCluster(); + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::GetRouteInfoByTopic); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.get_topic_route").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnGetConsumerListByGroup) { + initializeFilter(); + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::GetConsumerListByGroup); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.get_consumer_list").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnGetConsumerListByGroupWithGroupMemberMapExists) { + initializeFilter(); + + auto& group_members_map = conn_manager_->groupMembersForTest(); + std::vector group_members; + ConsumerGroupMember group_member("127.0.0.2@90330", *conn_manager_); + group_member.setLastForTest(current_ - std::chrono::seconds(31)); + group_members.emplace_back(group_member); + group_members_map["test_cg"] = group_members; + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::GetConsumerListByGroup); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.get_consumer_list").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnPopMessage) { + const std::string yaml = R"EOF( +stat_prefix: test +route_config: + name: default_route + routes: + - match: + topic: + exact: test_topic + route: + cluster: fake_cluster +)EOF"; + initializeFilter(yaml); + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::PopMessage); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.pop_message").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnAckMessage) { + const std::string yaml = R"EOF( +stat_prefix: test +route_config: + name: default_route + routes: + - match: + topic: + exact: test_topic + route: + cluster: fake_cluster +)EOF"; + initializeFilter(yaml); + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::AckMessage); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.ack_message").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnData) { + initializeFilter(); + + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(0, buffer_.length()); + EXPECT_EQ(0U, store_.counter("test.request").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnDataWithEndStream) { + initializeFilter(); + + Buffer::OwnedImpl buffer; + BufferUtility::fillRequestBuffer(buffer, RequestCode::SendMessageV2); + bool underflow, has_error; + RemotingCommandPtr request = Decoder::decode(buffer, underflow, has_error); + conn_manager_->createActiveMessage(request); + EXPECT_EQ(1, conn_manager_->activeMessageList().size()); + conn_manager_->onData(buffer_, true); + EXPECT_TRUE(conn_manager_->activeMessageList().empty()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnDataWithMinFrameSize) { + initializeFilter(); + + buffer_.add(std::string({'\x00', '\x00', '\x01', '\x8b'})); + buffer_.add(std::string({'\x00', '\x00', '\x01', '\x76'})); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(0U, store_.counter("test.request").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnDataSendMessage) { + const std::string yaml = R"EOF( +stat_prefix: test +route_config: + name: default_route + routes: + - match: + topic: + exact: test_topic + route: + cluster: fake_cluster +)EOF"; + initializeFilter(yaml); + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::SendMessage); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.send_message_v1").value()); + EXPECT_EQ( + 1U, + store_.gauge("test.send_message_v1_active", Stats::Gauge::ImportMode::Accumulate).value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnDataSendMessageV2) { + const std::string yaml = R"EOF( +stat_prefix: test +route_config: + name: default_route + routes: + - match: + topic: + exact: test_topic + route: + cluster: fake_cluster +)EOF"; + initializeFilter(yaml); + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::SendMessageV2); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.send_message_v2").value()); + EXPECT_EQ( + 1U, + store_.gauge("test.send_message_v2_active", Stats::Gauge::ImportMode::Accumulate).value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnDataWithUnsupportedCode) { + initializeFilter(); + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::Unsupported); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnDataInvalidFrameLength) { + // Test against the invalid input where frame_length <= header_length. + const std::string yaml = R"EOF( + stat_prefix: test + )EOF"; + initializeFilter(yaml); + buffer_.add( + std::string({'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'})); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, ConsumerGroupMemberEqual) { + initializeFilter(); + + ConsumerGroupMember m1("abc", *conn_manager_); + ConsumerGroupMember m2("abc", *conn_manager_); + EXPECT_TRUE(m1 == m2); +} + +TEST_F(RocketmqConnectionManagerTest, ConsumerGroupMemberLessThan) { + initializeFilter(); + + ConsumerGroupMember m1("abc", *conn_manager_); + ConsumerGroupMember m2("def", *conn_manager_); + EXPECT_TRUE(m1 < m2); +} + +TEST_F(RocketmqConnectionManagerTest, ConsumerGroupMemberExpired) { + initializeFilter(); + + ConsumerGroupMember member("Mock", *conn_manager_); + EXPECT_FALSE(member.expired()); + EXPECT_STREQ("Mock", member.clientId().data()); +} + +TEST_F(RocketmqConnectionManagerTest, ConsumerGroupMemberRefresh) { + initializeFilter(); + + ConsumerGroupMember member("Mock", *conn_manager_); + EXPECT_FALSE(member.expired()); + member.setLastForTest(current_ - std::chrono::seconds(31)); + EXPECT_TRUE(member.expired()); + member.refresh(); + EXPECT_FALSE(member.expired()); +} + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/rocketmq_proxy/mocks.cc b/test/extensions/filters/network/rocketmq_proxy/mocks.cc new file mode 100644 index 0000000000000..d346364491d77 --- /dev/null +++ b/test/extensions/filters/network/rocketmq_proxy/mocks.cc @@ -0,0 +1,57 @@ +#include "test/extensions/filters/network/rocketmq_proxy/mocks.h" + +#include "extensions/filters/network/rocketmq_proxy/router/router_impl.h" + +#include "gtest/gtest.h" + +using testing::_; +using testing::ByMove; +using testing::Return; +using testing::ReturnRef; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +MockActiveMessage::MockActiveMessage(ConnectionManager& conn_manager, RemotingCommandPtr&& request) + : ActiveMessage(conn_manager, std::move(request)) { + route_ = std::make_shared>(); + + ON_CALL(*this, onError(_)).WillByDefault(Invoke([&](absl::string_view error_message) { + ActiveMessage::onError(error_message); + })); + ON_CALL(*this, onReset()).WillByDefault(Return()); + ON_CALL(*this, sendResponseToDownstream()).WillByDefault(Invoke([&]() { + ActiveMessage::sendResponseToDownstream(); + })); + ON_CALL(*this, metadata()).WillByDefault(Invoke([&]() { return ActiveMessage::metadata(); })); + ON_CALL(*this, route()).WillByDefault(Return(route_)); +} +MockActiveMessage::~MockActiveMessage() = default; + +MockConfig::MockConfig() : stats_(RocketmqFilterStats::generateStats("test.", store_)) { + ON_CALL(*this, stats()).WillByDefault(ReturnRef(stats_)); + ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); + ON_CALL(*this, createRouter()) + .WillByDefault(Return(ByMove(std::make_unique(cluster_manager_)))); + ON_CALL(*this, developMode()).WillByDefault(Return(false)); + ON_CALL(*this, proxyAddress()).WillByDefault(Return(std::string{"1.2.3.4:1234"})); +} + +namespace Router { + +MockRouteEntry::MockRouteEntry() { + ON_CALL(*this, clusterName()).WillByDefault(ReturnRef(cluster_name_)); +} + +MockRouteEntry::~MockRouteEntry() = default; + +MockRoute::MockRoute() { ON_CALL(*this, routeEntry()).WillByDefault(Return(&route_entry_)); } +MockRoute::~MockRoute() = default; + +} // namespace Router +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/test/extensions/filters/network/rocketmq_proxy/mocks.h b/test/extensions/filters/network/rocketmq_proxy/mocks.h new file mode 100644 index 0000000000000..2cef60f964f0b --- /dev/null +++ b/test/extensions/filters/network/rocketmq_proxy/mocks.h @@ -0,0 +1,88 @@ +#pragma once + +#include "extensions/filters/network/rocketmq_proxy/active_message.h" +#include "extensions/filters/network/rocketmq_proxy/conn_manager.h" + +#include "test/mocks/upstream/mocks.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +namespace Router { +class MockRoute; +} // namespace Router + +class MockActiveMessage : public ActiveMessage { +public: + MockActiveMessage(ConnectionManager& conn_manager, RemotingCommandPtr&& request); + ~MockActiveMessage() override; + + MOCK_METHOD(void, createFilterChain, ()); + MOCK_METHOD(void, sendRequestToUpstream, ()); + MOCK_METHOD(RemotingCommandPtr&, downstreamRequest, ()); + MOCK_METHOD(void, sendResponseToDownstream, ()); + MOCK_METHOD(void, onQueryTopicRoute, ()); + MOCK_METHOD(void, onError, (absl::string_view)); + MOCK_METHOD(ConnectionManager&, connectionManager, ()); + MOCK_METHOD(void, onReset, ()); + MOCK_METHOD(bool, onUpstreamData, + (Buffer::Instance&, bool, Tcp::ConnectionPool::ConnectionDataPtr&)); + MOCK_METHOD(MessageMetadataSharedPtr, metadata, (), (const)); + MOCK_METHOD(Router::RouteConstSharedPtr, route, ()); + + std::shared_ptr route_; +}; + +class MockConfig : public Config { +public: + MockConfig(); + ~MockConfig() override = default; + + MOCK_METHOD(RocketmqFilterStats&, stats, ()); + MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ()); + MOCK_METHOD(Router::RouterPtr, createRouter, ()); + MOCK_METHOD(bool, developMode, (), (const)); + MOCK_METHOD(std::string, proxyAddress, ()); + MOCK_METHOD(Router::Config&, routerConfig, ()); + +private: + Stats::IsolatedStoreImpl store_; + RocketmqFilterStats stats_; + NiceMock cluster_manager_; + Router::RouterPtr router_; +}; + +namespace Router { + +class MockRouteEntry : public RouteEntry { +public: + MockRouteEntry(); + ~MockRouteEntry() override; + + // RocketmqProxy::Router::RouteEntry + MOCK_METHOD(const std::string&, clusterName, (), (const)); + MOCK_METHOD(Envoy::Router::MetadataMatchCriteria*, metadataMatchCriteria, (), (const)); + + std::string cluster_name_{"fake_cluster"}; +}; + +class MockRoute : public Route { +public: + MockRoute(); + ~MockRoute() override; + + // RocketmqProxy::Router::Route + MOCK_METHOD(const RouteEntry*, routeEntry, (), (const)); + + NiceMock route_entry_; +}; +} // namespace Router + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/test/extensions/filters/network/rocketmq_proxy/protocol_test.cc b/test/extensions/filters/network/rocketmq_proxy/protocol_test.cc new file mode 100644 index 0000000000000..ac2aa63a0d813 --- /dev/null +++ b/test/extensions/filters/network/rocketmq_proxy/protocol_test.cc @@ -0,0 +1,927 @@ +#include "common/protobuf/utility.h" + +#include "extensions/filters/network/rocketmq_proxy/protocol.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +class UnregisterClientRequestHeaderTest : public testing::Test { +public: + std::string client_id_{"SampleClient_01"}; + std::string producer_group_{"PG_Example_01"}; + std::string consumer_group_{"CG_001"}; +}; + +TEST_F(UnregisterClientRequestHeaderTest, Encode) { + UnregisterClientRequestHeader request_header; + request_header.clientId(client_id_); + request_header.producerGroup(producer_group_); + request_header.consumerGroup(consumer_group_); + + ProtobufWkt::Value doc; + request_header.encode(doc); + + const auto& members = doc.struct_value().fields(); + EXPECT_STREQ(client_id_.c_str(), members.at("clientID").string_value().c_str()); + EXPECT_STREQ(producer_group_.c_str(), members.at("producerGroup").string_value().c_str()); + EXPECT_STREQ(consumer_group_.c_str(), members.at("consumerGroup").string_value().c_str()); +} + +TEST_F(UnregisterClientRequestHeaderTest, Decode) { + + std::string json = R"EOF( + { + "clientID": "SampleClient_01", + "producerGroup": "PG_Example_01", + "consumerGroup": "CG_001" + } + )EOF"; + + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + UnregisterClientRequestHeader unregister_client_request_header; + unregister_client_request_header.decode(doc); + EXPECT_STREQ(client_id_.c_str(), unregister_client_request_header.clientId().c_str()); + EXPECT_STREQ(producer_group_.c_str(), unregister_client_request_header.producerGroup().c_str()); + EXPECT_STREQ(consumer_group_.c_str(), unregister_client_request_header.consumerGroup().c_str()); +} + +TEST(GetConsumerListByGroupResponseBodyTest, Encode) { + GetConsumerListByGroupResponseBody response_body; + response_body.add("localhost@1"); + response_body.add("localhost@2"); + + ProtobufWkt::Struct doc; + response_body.encode(doc); + + const auto& members = doc.fields(); + EXPECT_TRUE(members.contains("consumerIdList")); + EXPECT_EQ(2, members.at("consumerIdList").list_value().values_size()); +} + +class AckMessageRequestHeaderTest : public testing::Test { +public: + std::string consumer_group{"CG_Unit_Test"}; + std::string topic{"T_UnitTest"}; + int32_t queue_id{1}; + std::string extra_info{"extra_info_UT"}; + int64_t offset{100}; +}; + +TEST_F(AckMessageRequestHeaderTest, Encode) { + AckMessageRequestHeader ack_header; + ack_header.consumerGroup(consumer_group); + ack_header.topic(topic); + ack_header.queueId(queue_id); + ack_header.extraInfo(extra_info); + ack_header.offset(offset); + + ProtobufWkt::Value doc; + ack_header.encode(doc); + + const auto& members = doc.struct_value().fields(); + + EXPECT_TRUE(members.contains("consumerGroup")); + EXPECT_STREQ(consumer_group.c_str(), members.at("consumerGroup").string_value().c_str()); + + EXPECT_TRUE(members.contains("topic")); + EXPECT_STREQ(topic.c_str(), members.at("topic").string_value().c_str()); + + EXPECT_TRUE(members.contains("queueId")); + EXPECT_EQ(queue_id, members.at("queueId").number_value()); + + EXPECT_TRUE(members.contains("extraInfo")); + EXPECT_STREQ(extra_info.c_str(), members.at("extraInfo").string_value().c_str()); + + EXPECT_TRUE(members.contains("offset")); + EXPECT_EQ(offset, members.at("offset").number_value()); +} + +TEST_F(AckMessageRequestHeaderTest, Decode) { + std::string json = R"EOF( + { + "consumerGroup": "CG_Unit_Test", + "topic": "T_UnitTest", + "queueId": 1, + "extraInfo": "extra_info_UT", + "offset": 100 + } + )EOF"; + + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + + AckMessageRequestHeader ack_header; + ack_header.decode(doc); + ASSERT_STREQ(consumer_group.c_str(), ack_header.consumerGroup().data()); + ASSERT_STREQ(topic.c_str(), ack_header.topic().c_str()); + ASSERT_EQ(queue_id, ack_header.queueId()); + ASSERT_STREQ(extra_info.c_str(), ack_header.extraInfo().data()); + ASSERT_EQ(offset, ack_header.offset()); +} + +TEST_F(AckMessageRequestHeaderTest, DecodeNumSerializedAsString) { + std::string json = R"EOF( + { + "consumerGroup": "CG_Unit_Test", + "topic": "T_UnitTest", + "queueId": "1", + "extraInfo": "extra_info_UT", + "offset": "100" + } + )EOF"; + + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + + AckMessageRequestHeader ack_header; + ack_header.decode(doc); + ASSERT_STREQ(consumer_group.c_str(), ack_header.consumerGroup().data()); + ASSERT_STREQ(topic.c_str(), ack_header.topic().c_str()); + ASSERT_EQ(queue_id, ack_header.queueId()); + ASSERT_STREQ(extra_info.c_str(), ack_header.extraInfo().data()); + ASSERT_EQ(offset, ack_header.offset()); +} + +class PopMessageRequestHeaderTest : public testing::Test { +public: + std::string consumer_group{"CG_UT"}; + std::string topic{"T_UT"}; + int32_t queue_id{1}; + int32_t max_msg_nums{2}; + int64_t invisible_time{3}; + int64_t poll_time{4}; + int64_t born_time{5}; + int32_t init_mode{6}; + + std::string exp_type{"exp_type_UT"}; + std::string exp{"exp_UT"}; +}; + +TEST_F(PopMessageRequestHeaderTest, Encode) { + PopMessageRequestHeader pop_request_header; + pop_request_header.consumerGroup(consumer_group); + pop_request_header.topic(topic); + pop_request_header.queueId(queue_id); + pop_request_header.maxMsgNum(max_msg_nums); + pop_request_header.invisibleTime(invisible_time); + pop_request_header.pollTime(poll_time); + pop_request_header.bornTime(born_time); + pop_request_header.initMode(init_mode); + pop_request_header.expType(exp_type); + pop_request_header.exp(exp); + + ProtobufWkt::Value doc; + pop_request_header.encode(doc); + + const auto& members = doc.struct_value().fields(); + + EXPECT_TRUE(members.contains("consumerGroup")); + EXPECT_STREQ(consumer_group.c_str(), members.at("consumerGroup").string_value().c_str()); + + EXPECT_TRUE(members.contains("topic")); + EXPECT_STREQ(topic.c_str(), members.at("topic").string_value().c_str()); + + EXPECT_TRUE(members.contains("queueId")); + EXPECT_EQ(queue_id, members.at("queueId").number_value()); + + EXPECT_TRUE(members.contains("maxMsgNums")); + EXPECT_EQ(max_msg_nums, members.at("maxMsgNums").number_value()); + + EXPECT_TRUE(members.contains("invisibleTime")); + EXPECT_EQ(invisible_time, members.at("invisibleTime").number_value()); + + EXPECT_TRUE(members.contains("pollTime")); + EXPECT_EQ(poll_time, members.at("pollTime").number_value()); + + EXPECT_TRUE(members.contains("bornTime")); + EXPECT_EQ(born_time, members.at("bornTime").number_value()); + + EXPECT_TRUE(members.contains("initMode")); + EXPECT_EQ(init_mode, members.at("initMode").number_value()); + + EXPECT_TRUE(members.contains("expType")); + EXPECT_STREQ(exp_type.c_str(), members.at("expType").string_value().c_str()); + + EXPECT_TRUE(members.contains("exp")); + EXPECT_STREQ(exp.c_str(), members.at("exp").string_value().c_str()); +} + +TEST_F(PopMessageRequestHeaderTest, Decode) { + std::string json = R"EOF( + { + "consumerGroup": "CG_UT", + "topic": "T_UT", + "queueId": 1, + "maxMsgNums": 2, + "invisibleTime": 3, + "pollTime": 4, + "bornTime": 5, + "initMode": 6, + "expType": "exp_type_UT", + "exp": "exp_UT" + } + )EOF"; + + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + PopMessageRequestHeader pop_request_header; + pop_request_header.decode(doc); + + ASSERT_STREQ(consumer_group.c_str(), pop_request_header.consumerGroup().data()); + ASSERT_STREQ(topic.c_str(), pop_request_header.topic().c_str()); + ASSERT_EQ(queue_id, pop_request_header.queueId()); + ASSERT_EQ(max_msg_nums, pop_request_header.maxMsgNum()); + ASSERT_EQ(invisible_time, pop_request_header.invisibleTime()); + ASSERT_EQ(poll_time, pop_request_header.pollTime()); + ASSERT_EQ(born_time, pop_request_header.bornTime()); + ASSERT_EQ(init_mode, pop_request_header.initMode()); + ASSERT_STREQ(exp_type.c_str(), pop_request_header.expType().c_str()); + ASSERT_STREQ(exp.c_str(), pop_request_header.exp().c_str()); +} + +TEST_F(PopMessageRequestHeaderTest, DecodeNumSerializedAsString) { + std::string json = R"EOF( + { + "consumerGroup": "CG_UT", + "topic": "T_UT", + "queueId": "1", + "maxMsgNums": "2", + "invisibleTime": "3", + "pollTime": "4", + "bornTime": "5", + "initMode": "6", + "expType": "exp_type_UT", + "exp": "exp_UT" + } + )EOF"; + + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + PopMessageRequestHeader pop_request_header; + pop_request_header.decode(doc); + + ASSERT_STREQ(consumer_group.c_str(), pop_request_header.consumerGroup().data()); + ASSERT_STREQ(topic.c_str(), pop_request_header.topic().c_str()); + ASSERT_EQ(queue_id, pop_request_header.queueId()); + ASSERT_EQ(max_msg_nums, pop_request_header.maxMsgNum()); + ASSERT_EQ(invisible_time, pop_request_header.invisibleTime()); + ASSERT_EQ(poll_time, pop_request_header.pollTime()); + ASSERT_EQ(born_time, pop_request_header.bornTime()); + ASSERT_EQ(init_mode, pop_request_header.initMode()); + ASSERT_STREQ(exp_type.c_str(), pop_request_header.expType().c_str()); + ASSERT_STREQ(exp.c_str(), pop_request_header.exp().c_str()); +} + +class PopMessageResponseHeaderTest : public testing::Test { +public: + int64_t pop_time{1}; + int64_t invisible_time{2}; + int32_t revive_qid{3}; + int64_t rest_num{4}; + + std::string start_offset_info{"start"}; + std::string msg_offset_info{"msg"}; + std::string order_count_info{"order"}; +}; + +TEST_F(PopMessageResponseHeaderTest, Encode) { + PopMessageResponseHeader pop_response_header; + pop_response_header.popTime(pop_time); + pop_response_header.invisibleTime(invisible_time); + pop_response_header.reviveQid(revive_qid); + pop_response_header.restNum(rest_num); + pop_response_header.startOffsetInfo(start_offset_info); + pop_response_header.msgOffsetInfo(msg_offset_info); + pop_response_header.orderCountInfo(order_count_info); + + ProtobufWkt::Value doc; + pop_response_header.encode(doc); + + const auto& members = doc.struct_value().fields(); + + EXPECT_TRUE(members.contains("popTime")); + EXPECT_TRUE(members.contains("invisibleTime")); + EXPECT_TRUE(members.contains("reviveQid")); + EXPECT_TRUE(members.contains("restNum")); + EXPECT_TRUE(members.contains("startOffsetInfo")); + EXPECT_TRUE(members.contains("msgOffsetInfo")); + EXPECT_TRUE(members.contains("orderCountInfo")); + + EXPECT_EQ(pop_time, members.at("popTime").number_value()); + EXPECT_EQ(invisible_time, members.at("invisibleTime").number_value()); + EXPECT_EQ(revive_qid, members.at("reviveQid").number_value()); + EXPECT_EQ(rest_num, members.at("restNum").number_value()); + EXPECT_STREQ(start_offset_info.c_str(), members.at("startOffsetInfo").string_value().c_str()); + EXPECT_STREQ(msg_offset_info.c_str(), members.at("msgOffsetInfo").string_value().c_str()); + EXPECT_STREQ(order_count_info.c_str(), members.at("orderCountInfo").string_value().c_str()); +} + +TEST_F(PopMessageResponseHeaderTest, Decode) { + std::string json = R"EOF( + { + "popTime": 1, + "invisibleTime": 2, + "reviveQid": 3, + "restNum": 4, + "startOffsetInfo": "start", + "msgOffsetInfo": "msg", + "orderCountInfo": "order" + } + )EOF"; + + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + + PopMessageResponseHeader header; + header.decode(doc); + + EXPECT_EQ(pop_time, header.popTimeForTest()); + EXPECT_EQ(invisible_time, header.invisibleTime()); + EXPECT_EQ(revive_qid, header.reviveQid()); + EXPECT_EQ(rest_num, header.restNum()); + + EXPECT_STREQ(start_offset_info.c_str(), header.startOffsetInfo().data()); + EXPECT_STREQ(msg_offset_info.c_str(), header.msgOffsetInfo().data()); + EXPECT_STREQ(order_count_info.c_str(), header.orderCountInfo().data()); +} + +TEST_F(PopMessageResponseHeaderTest, DecodeNumSerializedAsString) { + std::string json = R"EOF( + { + "popTime": "1", + "invisibleTime": "2", + "reviveQid": "3", + "restNum": "4", + "startOffsetInfo": "start", + "msgOffsetInfo": "msg", + "orderCountInfo": "order" + } + )EOF"; + + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + + PopMessageResponseHeader header; + header.decode(doc); + + EXPECT_EQ(pop_time, header.popTimeForTest()); + EXPECT_EQ(invisible_time, header.invisibleTime()); + EXPECT_EQ(revive_qid, header.reviveQid()); + EXPECT_EQ(rest_num, header.restNum()); + + EXPECT_STREQ(start_offset_info.c_str(), header.startOffsetInfo().data()); + EXPECT_STREQ(msg_offset_info.c_str(), header.msgOffsetInfo().data()); + EXPECT_STREQ(order_count_info.c_str(), header.orderCountInfo().data()); +} + +class SendMessageResponseHeaderTest : public testing::Test { +public: + SendMessageResponseHeader response_header_; +}; + +TEST_F(SendMessageResponseHeaderTest, Encode) { + response_header_.msgIdForTest("MSG_ID_01"); + response_header_.queueId(1); + response_header_.queueOffset(100); + response_header_.transactionId("TX_01"); + ProtobufWkt::Value doc; + response_header_.encode(doc); + + const auto& members = doc.struct_value().fields(); + EXPECT_TRUE(members.contains("msgId")); + EXPECT_TRUE(members.contains("queueId")); + EXPECT_TRUE(members.contains("queueOffset")); + EXPECT_TRUE(members.contains("transactionId")); + + EXPECT_STREQ("MSG_ID_01", members.at("msgId").string_value().c_str()); + EXPECT_STREQ("TX_01", members.at("transactionId").string_value().c_str()); + EXPECT_EQ(1, members.at("queueId").number_value()); + EXPECT_EQ(100, members.at("queueOffset").number_value()); +} + +TEST_F(SendMessageResponseHeaderTest, Decode) { + std::string json = R"EOF( + { + "msgId": "abc", + "queueId": 1, + "queueOffset": 10, + "transactionId": "TX_1" + } + )EOF"; + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + response_header_.decode(doc); + EXPECT_STREQ("abc", response_header_.msgId().c_str()); + EXPECT_EQ(1, response_header_.queueId()); + EXPECT_EQ(10, response_header_.queueOffset()); + EXPECT_STREQ("TX_1", response_header_.transactionId().c_str()); +} + +TEST_F(SendMessageResponseHeaderTest, DecodeNumSerializedAsString) { + std::string json = R"EOF( + { + "msgId": "abc", + "queueId": "1", + "queueOffset": "10", + "transactionId": "TX_1" + } + )EOF"; + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + response_header_.decode(doc); + EXPECT_STREQ("abc", response_header_.msgId().c_str()); + EXPECT_EQ(1, response_header_.queueId()); + EXPECT_EQ(10, response_header_.queueOffset()); + EXPECT_STREQ("TX_1", response_header_.transactionId().c_str()); +} + +class SendMessageRequestHeaderTest : public testing::Test {}; + +TEST_F(SendMessageRequestHeaderTest, EncodeDefault) { + SendMessageRequestHeader header; + ProtobufWkt::Value doc; + header.encode(doc); + const auto& members = doc.struct_value().fields(); + EXPECT_TRUE(members.contains("producerGroup")); + EXPECT_TRUE(members.contains("topic")); + EXPECT_TRUE(members.contains("defaultTopic")); + EXPECT_TRUE(members.contains("defaultTopicQueueNums")); + EXPECT_TRUE(members.contains("queueId")); + EXPECT_TRUE(members.contains("sysFlag")); + EXPECT_TRUE(members.contains("bornTimestamp")); + EXPECT_TRUE(members.contains("flag")); + EXPECT_FALSE(members.contains("properties")); + EXPECT_FALSE(members.contains("reconsumeTimes")); + EXPECT_FALSE(members.contains("unitMode")); + EXPECT_FALSE(members.contains("batch")); + EXPECT_FALSE(members.contains("maxReconsumeTimes")); +} + +TEST_F(SendMessageRequestHeaderTest, EncodeOptional) { + SendMessageRequestHeader header; + header.properties("mock"); + header.reconsumeTimes(1); + header.unitMode(true); + header.batch(true); + header.maxReconsumeTimes(32); + ProtobufWkt::Value doc; + header.encode(doc); + const auto& members = doc.struct_value().fields(); + EXPECT_TRUE(members.contains("producerGroup")); + EXPECT_TRUE(members.contains("topic")); + EXPECT_TRUE(members.contains("defaultTopic")); + EXPECT_TRUE(members.contains("defaultTopicQueueNums")); + EXPECT_TRUE(members.contains("queueId")); + EXPECT_TRUE(members.contains("sysFlag")); + EXPECT_TRUE(members.contains("bornTimestamp")); + EXPECT_TRUE(members.contains("flag")); + EXPECT_TRUE(members.contains("properties")); + EXPECT_TRUE(members.contains("reconsumeTimes")); + EXPECT_TRUE(members.contains("unitMode")); + EXPECT_TRUE(members.contains("batch")); + EXPECT_TRUE(members.contains("maxReconsumeTimes")); + + EXPECT_STREQ("mock", members.at("properties").string_value().c_str()); + EXPECT_EQ(1, members.at("reconsumeTimes").number_value()); + EXPECT_TRUE(members.at("unitMode").bool_value()); + EXPECT_TRUE(members.at("batch").bool_value()); + EXPECT_EQ(32, members.at("maxReconsumeTimes").number_value()); +} + +TEST_F(SendMessageRequestHeaderTest, EncodeDefaultV2) { + SendMessageRequestHeader header; + header.version(SendMessageRequestVersion::V2); + ProtobufWkt::Value doc; + header.encode(doc); + const auto& members = doc.struct_value().fields(); + EXPECT_TRUE(members.contains("a")); + EXPECT_TRUE(members.contains("b")); + EXPECT_TRUE(members.contains("c")); + EXPECT_TRUE(members.contains("d")); + EXPECT_TRUE(members.contains("e")); + EXPECT_TRUE(members.contains("f")); + EXPECT_TRUE(members.contains("g")); + EXPECT_TRUE(members.contains("h")); + EXPECT_FALSE(members.contains("i")); + EXPECT_FALSE(members.contains("j")); + EXPECT_FALSE(members.contains("k")); + EXPECT_FALSE(members.contains("l")); + EXPECT_FALSE(members.contains("m")); +} + +TEST_F(SendMessageRequestHeaderTest, EncodeOptionalV2) { + SendMessageRequestHeader header; + header.properties("mock"); + header.reconsumeTimes(1); + header.unitMode(true); + header.batch(true); + header.maxReconsumeTimes(32); + header.version(SendMessageRequestVersion::V2); + ProtobufWkt::Value doc; + header.encode(doc); + + const auto& members = doc.struct_value().fields(); + EXPECT_TRUE(members.contains("a")); + EXPECT_TRUE(members.contains("b")); + EXPECT_TRUE(members.contains("c")); + EXPECT_TRUE(members.contains("d")); + EXPECT_TRUE(members.contains("e")); + EXPECT_TRUE(members.contains("f")); + EXPECT_TRUE(members.contains("g")); + EXPECT_TRUE(members.contains("h")); + EXPECT_TRUE(members.contains("i")); + EXPECT_TRUE(members.contains("j")); + EXPECT_TRUE(members.contains("k")); + EXPECT_TRUE(members.contains("l")); + EXPECT_TRUE(members.contains("m")); + + EXPECT_STREQ("mock", members.at("i").string_value().c_str()); + EXPECT_EQ(1, members.at("j").number_value()); + EXPECT_TRUE(members.at("k").bool_value()); + EXPECT_TRUE(members.at("m").bool_value()); + EXPECT_EQ(32, members.at("l").number_value()); +} + +TEST_F(SendMessageRequestHeaderTest, EncodeV3) { + SendMessageRequestHeader header; + header.version(SendMessageRequestVersion::V3); + ProtobufWkt::Value doc; + header.encode(doc); +} + +TEST_F(SendMessageRequestHeaderTest, DecodeV1) { + std::string json = R"EOF( + { + "batch": false, + "bornTimestamp": 1575872212297, + "defaultTopic": "TBW102", + "defaultTopicQueueNums": 3, + "flag": 124, + "producerGroup": "FooBarGroup", + "queueId": 1, + "reconsumeTimes": 0, + "sysFlag": 0, + "topic": "FooBar", + "unitMode": false + } + )EOF"; + + SendMessageRequestHeader header; + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + header.decode(doc); + EXPECT_STREQ("FooBar", header.topic().c_str()); + EXPECT_EQ(1, header.queueId()); + EXPECT_STREQ("FooBarGroup", header.producerGroup().c_str()); + EXPECT_STREQ("TBW102", header.defaultTopic().c_str()); + EXPECT_EQ(3, header.defaultTopicQueueNumber()); + EXPECT_EQ(0, header.sysFlag()); + EXPECT_EQ(1575872212297, header.bornTimestamp()); + EXPECT_EQ(124, header.flag()); + EXPECT_STREQ("", header.properties().c_str()); + EXPECT_EQ(0, header.reconsumeTimes()); + EXPECT_FALSE(header.unitMode()); + EXPECT_FALSE(header.batch()); + EXPECT_EQ(0, header.maxReconsumeTimes()); +} + +TEST_F(SendMessageRequestHeaderTest, DecodeV1Optional) { + std::string json = R"EOF( + { + "batch": false, + "bornTimestamp": 1575872212297, + "defaultTopic": "TBW102", + "defaultTopicQueueNums": 3, + "flag": 124, + "producerGroup": "FooBarGroup", + "queueId": 1, + "reconsumeTimes": 0, + "sysFlag": 0, + "topic": "FooBar", + "unitMode": false, + "properties": "mock_properties", + "maxReconsumeTimes": 32 + } + )EOF"; + + SendMessageRequestHeader header; + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + header.decode(doc); + EXPECT_STREQ("FooBar", header.topic().c_str()); + EXPECT_EQ(1, header.queueId()); + EXPECT_STREQ("FooBarGroup", header.producerGroup().c_str()); + EXPECT_STREQ("TBW102", header.defaultTopic().c_str()); + EXPECT_EQ(3, header.defaultTopicQueueNumber()); + EXPECT_EQ(0, header.sysFlag()); + EXPECT_EQ(1575872212297, header.bornTimestamp()); + EXPECT_EQ(124, header.flag()); + EXPECT_STREQ("mock_properties", header.properties().c_str()); + EXPECT_EQ(0, header.reconsumeTimes()); + EXPECT_FALSE(header.unitMode()); + EXPECT_FALSE(header.batch()); + EXPECT_EQ(32, header.maxReconsumeTimes()); +} + +TEST_F(SendMessageRequestHeaderTest, DecodeV1OptionalNumSerializedAsString) { + std::string json = R"EOF( + { + "batch": "false", + "bornTimestamp": "1575872212297", + "defaultTopic": "TBW102", + "defaultTopicQueueNums": "3", + "flag": "124", + "producerGroup": "FooBarGroup", + "queueId": "1", + "reconsumeTimes": "0", + "sysFlag": "0", + "topic": "FooBar", + "unitMode": "false", + "properties": "mock_properties", + "maxReconsumeTimes": "32" + } + )EOF"; + + SendMessageRequestHeader header; + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + header.decode(doc); + EXPECT_STREQ("FooBar", header.topic().c_str()); + EXPECT_EQ(1, header.queueId()); + EXPECT_STREQ("FooBarGroup", header.producerGroup().c_str()); + EXPECT_STREQ("TBW102", header.defaultTopic().c_str()); + EXPECT_EQ(3, header.defaultTopicQueueNumber()); + EXPECT_EQ(0, header.sysFlag()); + EXPECT_EQ(1575872212297, header.bornTimestamp()); + EXPECT_EQ(124, header.flag()); + EXPECT_STREQ("mock_properties", header.properties().c_str()); + EXPECT_EQ(0, header.reconsumeTimes()); + EXPECT_FALSE(header.unitMode()); + EXPECT_FALSE(header.batch()); + EXPECT_EQ(32, header.maxReconsumeTimes()); +} + +TEST_F(SendMessageRequestHeaderTest, DecodeV2) { + std::string json = R"EOF( + { + "a": "FooBarGroup", + "b": "FooBar", + "c": "TBW102", + "d": 3, + "e": 1, + "f": 0, + "g": 1575872563203, + "h": 124, + "j": 0, + "k": false, + "m": false + } + )EOF"; + + SendMessageRequestHeader header; + header.version(SendMessageRequestVersion::V2); + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + header.decode(doc); + EXPECT_STREQ("FooBar", header.topic().c_str()); + EXPECT_EQ(1, header.queueId()); + EXPECT_STREQ("FooBarGroup", header.producerGroup().c_str()); + EXPECT_STREQ("TBW102", header.defaultTopic().c_str()); + EXPECT_EQ(3, header.defaultTopicQueueNumber()); + EXPECT_EQ(0, header.sysFlag()); + EXPECT_EQ(1575872563203, header.bornTimestamp()); + EXPECT_EQ(124, header.flag()); + EXPECT_STREQ("", header.properties().c_str()); + EXPECT_EQ(0, header.reconsumeTimes()); + EXPECT_FALSE(header.unitMode()); + EXPECT_FALSE(header.batch()); + EXPECT_EQ(0, header.maxReconsumeTimes()); +} + +TEST_F(SendMessageRequestHeaderTest, DecodeV2Optional) { + std::string json = R"EOF( + { + "a": "FooBarGroup", + "b": "FooBar", + "c": "TBW102", + "d": 3, + "e": 1, + "f": 0, + "g": 1575872563203, + "h": 124, + "i": "mock_properties", + "j": 0, + "k": false, + "l": 1, + "m": false + } + )EOF"; + + SendMessageRequestHeader header; + header.version(SendMessageRequestVersion::V2); + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + header.decode(doc); + EXPECT_STREQ("FooBar", header.topic().c_str()); + EXPECT_EQ(1, header.queueId()); + EXPECT_STREQ("FooBarGroup", header.producerGroup().c_str()); + EXPECT_STREQ("TBW102", header.defaultTopic().c_str()); + EXPECT_EQ(3, header.defaultTopicQueueNumber()); + EXPECT_EQ(0, header.sysFlag()); + EXPECT_EQ(1575872563203, header.bornTimestamp()); + EXPECT_EQ(124, header.flag()); + EXPECT_STREQ("mock_properties", header.properties().c_str()); + EXPECT_EQ(0, header.reconsumeTimes()); + EXPECT_FALSE(header.unitMode()); + EXPECT_FALSE(header.batch()); + EXPECT_EQ(1, header.maxReconsumeTimes()); +} + +TEST_F(SendMessageRequestHeaderTest, DecodeV2OptionalNumSerializedAsString) { + std::string json = R"EOF( + { + "a": "FooBarGroup", + "b": "FooBar", + "c": "TBW102", + "d": "3", + "e": "1", + "f": "0", + "g": "1575872563203", + "h": "124", + "i": "mock_properties", + "j": "0", + "k": "false", + "l": "1", + "m": "false" + } + )EOF"; + + SendMessageRequestHeader header; + header.version(SendMessageRequestVersion::V2); + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + header.decode(doc); + EXPECT_STREQ("FooBar", header.topic().c_str()); + EXPECT_EQ(1, header.queueId()); + EXPECT_STREQ("FooBarGroup", header.producerGroup().c_str()); + EXPECT_STREQ("TBW102", header.defaultTopic().c_str()); + EXPECT_EQ(3, header.defaultTopicQueueNumber()); + EXPECT_EQ(0, header.sysFlag()); + EXPECT_EQ(1575872563203, header.bornTimestamp()); + EXPECT_EQ(124, header.flag()); + EXPECT_STREQ("mock_properties", header.properties().c_str()); + EXPECT_EQ(0, header.reconsumeTimes()); + EXPECT_FALSE(header.unitMode()); + EXPECT_FALSE(header.batch()); + EXPECT_EQ(1, header.maxReconsumeTimes()); +} + +TEST_F(SendMessageRequestHeaderTest, DecodeV3) { + std::string json = R"EOF( + { + "batch": false, + "bornTimestamp": 1575872212297, + "defaultTopic": "TBW102", + "defaultTopicQueueNums": 3, + "flag": 124, + "producerGroup": "FooBarGroup", + "queueId": 1, + "reconsumeTimes": 0, + "sysFlag": 0, + "topic": "FooBar", + "unitMode": false + } + )EOF"; + + SendMessageRequestHeader header; + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + header.version(SendMessageRequestVersion::V3); + header.decode(doc); +} + +class HeartbeatDataTest : public testing::Test { +public: + HeartbeatData data_; +}; + +TEST_F(HeartbeatDataTest, Decoding) { + std::string json = R"EOF( + { + "clientID": "127.0.0.1@23606", + "consumerDataSet": [ + { + "consumeFromWhere": "CONSUME_FROM_LAST_OFFSET", + "consumeType": "CONSUME_ACTIVELY", + "groupName": "please_rename_unique_group_name_4", + "messageModel": "CLUSTERING", + "subscriptionDataSet": [ + { + "classFilterMode": false, + "codeSet": [], + "expressionType": "TAG", + "subString": "*", + "subVersion": 0, + "tagsSet": [], + "topic": "test_topic" + } + ], + "unitMode": false + } + ], + "producerDataSet": [ + { + "groupName": "CLIENT_INNER_PRODUCER" + } + ] + } + )EOF"; + + const char* clientId = "127.0.0.1@23606"; + const char* consumerGroup = "please_rename_unique_group_name_4"; + + HeartbeatData heart_beat_data; + ProtobufWkt::Struct doc; + MessageUtil::loadFromJson(json, doc); + + heart_beat_data.decode(doc); + EXPECT_STREQ(clientId, heart_beat_data.clientId().c_str()); + EXPECT_EQ(1, heart_beat_data.consumerGroups().size()); + EXPECT_STREQ(consumerGroup, heart_beat_data.consumerGroups()[0].c_str()); +} + +TEST_F(HeartbeatDataTest, DecodeClientIdMissing) { + std::string json = R"EOF( + { + "consumerDataSet": [ + { + "consumeFromWhere": "CONSUME_FROM_LAST_OFFSET", + "consumeType": "CONSUME_ACTIVELY", + "groupName": "please_rename_unique_group_name_4", + "messageModel": "CLUSTERING", + "subscriptionDataSet": [ + { + "classFilterMode": false, + "codeSet": [], + "expressionType": "TAG", + "subString": "*", + "subVersion": 0, + "tagsSet": [], + "topic": "test_topic" + } + ], + "unitMode": false + } + ], + "producerDataSet": [ + { + "groupName": "CLIENT_INNER_PRODUCER" + } + ] + } + )EOF"; + + ProtobufWkt::Struct doc; + MessageUtil::loadFromJson(json, doc); + EXPECT_FALSE(data_.decode(doc)); +} + +TEST_F(HeartbeatDataTest, Encode) { + data_.clientId("CID_01"); + ProtobufWkt::Struct doc; + data_.encode(doc); + const auto& members = doc.fields(); + EXPECT_TRUE(members.contains("clientID")); + EXPECT_STREQ("CID_01", members.at("clientID").string_value().c_str()); +} + +class RemotingCommandTest : public testing::Test { +public: + RemotingCommand cmd_; +}; + +TEST_F(RemotingCommandTest, FlagResponse) { + cmd_.markAsResponse(); + EXPECT_EQ(1, cmd_.flag()); +} + +TEST_F(RemotingCommandTest, FlagOneway) { + cmd_.markAsOneway(); + EXPECT_EQ(2, cmd_.flag()); +} + +TEST_F(RemotingCommandTest, Remark) { + const char* remark = "OK"; + cmd_.remark(remark); + EXPECT_STREQ(remark, cmd_.remark().c_str()); +} + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/test/extensions/filters/network/rocketmq_proxy/route_matcher_test.cc b/test/extensions/filters/network/rocketmq_proxy/route_matcher_test.cc new file mode 100644 index 0000000000000..947e67481f9bc --- /dev/null +++ b/test/extensions/filters/network/rocketmq_proxy/route_matcher_test.cc @@ -0,0 +1,74 @@ +#include "envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.h" +#include "envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.validate.h" +#include "envoy/extensions/filters/network/rocketmq_proxy/v3/route.pb.h" +#include "envoy/extensions/filters/network/rocketmq_proxy/v3/route.pb.validate.h" + +#include "extensions/filters/network/rocketmq_proxy/metadata.h" +#include "extensions/filters/network/rocketmq_proxy/router/route_matcher.h" + +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { +namespace Router { + +using RouteConfigurationProto = + envoy::extensions::filters::network::rocketmq_proxy::v3::RouteConfiguration; + +RouteConfigurationProto parseRouteConfigurationFromV2Yaml(const std::string& yaml) { + RouteConfigurationProto route_config; + TestUtility::loadFromYaml(yaml, route_config); + TestUtility::validate(route_config); + return route_config; +} + +TEST(RocketmqRouteMatcherTest, RouteWithHeaders) { + const std::string yaml = R"EOF( +name: default_route +routes: + - match: + topic: + exact: test_topic + headers: + - name: code + exact_match: '310' + route: + cluster: fake_cluster + metadata_match: + filter_metadata: + envoy.lb: + k1: v1 +)EOF"; + + RouteConfigurationProto config = parseRouteConfigurationFromV2Yaml(yaml); + + MessageMetadata metadata; + std::string topic_name = "test_topic"; + metadata.setTopicName(topic_name); + uint64_t code = 310; + metadata.headers().addCopy(Http::LowerCaseString("code"), code); + RouteMatcher matcher(config); + const Envoy::Router::MetadataMatchCriteria* criteria = + matcher.route(metadata)->routeEntry()->metadataMatchCriteria(); + const std::vector& mmc = + criteria->metadataMatchCriteria(); + + ProtobufWkt::Value v1; + v1.set_string_value("v1"); + HashedValue hv1(v1); + + EXPECT_EQ(1, mmc.size()); + EXPECT_EQ("k1", mmc[0]->name()); + EXPECT_EQ(hv1, mmc[0]->value()); +} + +} // namespace Router +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/rocketmq_proxy/router_test.cc b/test/extensions/filters/network/rocketmq_proxy/router_test.cc new file mode 100644 index 0000000000000..6a5c3c2336b2b --- /dev/null +++ b/test/extensions/filters/network/rocketmq_proxy/router_test.cc @@ -0,0 +1,470 @@ +#include "extensions/filters/network/rocketmq_proxy/config.h" +#include "extensions/filters/network/rocketmq_proxy/conn_manager.h" +#include "extensions/filters/network/rocketmq_proxy/router/router.h" +#include "extensions/filters/network/rocketmq_proxy/well_known_names.h" + +#include "test/extensions/filters/network/rocketmq_proxy/mocks.h" +#include "test/extensions/filters/network/rocketmq_proxy/utility.h" +#include "test/mocks/server/factory_context.h" + +#include "gtest/gtest.h" + +using testing::_; +using testing::ContainsRegex; +using testing::Return; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { +namespace Router { + +class RocketmqRouterTestBase { +public: + RocketmqRouterTestBase() + : config_(rocketmq_proxy_config_, context_), + cluster_info_(std::make_shared()) { + conn_manager_ = + std::make_unique(config_, context_.dispatcher().timeSource()); + conn_manager_->initializeReadFilterCallbacks(filter_callbacks_); + } + + ~RocketmqRouterTestBase() { filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); } + + void initializeRouter() { + router_ = std::make_unique(context_.clusterManager()); + EXPECT_EQ(nullptr, router_->downstreamConnection()); + } + + void initSendMessageRequest(std::string topic_name = "test_topic", bool is_oneway = false) { + RemotingCommandPtr request = std::make_unique(); + request->code(static_cast(RequestCode::SendMessageV2)); + if (is_oneway) { + request->flag(2); + } + SendMessageRequestHeader* header = new SendMessageRequestHeader(); + absl::string_view t = topic_name; + header->topic(t); + CommandCustomHeaderPtr custom_header(header); + request->customHeader(custom_header); + active_message_ = + std::make_unique>(*conn_manager_, std::move(request)); + + // Not yet implemented: + EXPECT_EQ(nullptr, router_->metadataMatchCriteria()); + } + + void initPopMessageRequest() { + Buffer::OwnedImpl buffer; + BufferUtility::fillRequestBuffer(buffer, RequestCode::PopMessage); + + bool underflow = false; + bool has_error = false; + + RemotingCommandPtr request = Decoder::decode(buffer, underflow, has_error); + + active_message_ = + std::make_unique>(*conn_manager_, std::move(request)); + } + + void initAckMessageRequest() { + Buffer::OwnedImpl buffer; + BufferUtility::fillRequestBuffer(buffer, RequestCode::AckMessage); + + bool underflow = false; + bool has_error = false; + + RemotingCommandPtr request = Decoder::decode(buffer, underflow, has_error); + + active_message_ = + std::make_unique>(*conn_manager_, std::move(request)); + } + + void initOneWayAckMessageRequest() { + RemotingCommandPtr request = std::make_unique(); + request->code(static_cast(RequestCode::AckMessage)); + request->flag(2); + std::unique_ptr header = std::make_unique(); + header->consumerGroup("test_cg"); + header->topic("test_topic"); + header->queueId(0); + header->extraInfo("test_extra"); + header->offset(1); + CommandCustomHeaderPtr ptr(header.release()); + request->customHeader(ptr); + active_message_ = + std::make_unique>(*conn_manager_, std::move(request)); + } + + void startRequest() { router_->sendRequestToUpstream(*active_message_); } + + void connectUpstream() { + context_.cluster_manager_.tcp_conn_pool_.poolReady(upstream_connection_); + } + + void startRequestWithExistingConnection() { + EXPECT_CALL(context_.cluster_manager_.tcp_conn_pool_, newConnection(_)) + .WillOnce( + Invoke([&](Tcp::ConnectionPool::Callbacks& cb) -> Tcp::ConnectionPool::Cancellable* { + context_.cluster_manager_.tcp_conn_pool_.newConnectionImpl(cb); + context_.cluster_manager_.tcp_conn_pool_.poolReady(upstream_connection_); + return nullptr; + })); + router_->sendRequestToUpstream(*active_message_); + } + + void receiveEmptyResponse() { + Buffer::OwnedImpl buffer; + router_->onAboveWriteBufferHighWatermark(); + router_->onBelowWriteBufferLowWatermark(); + router_->onUpstreamData(buffer, false); + } + + void receiveSendMessageResponse(bool end_stream) { + Buffer::OwnedImpl buffer; + BufferUtility::fillResponseBuffer(buffer, RequestCode::SendMessageV2, ResponseCode::Success); + router_->onUpstreamData(buffer, end_stream); + } + + void receivePopMessageResponse() { + Buffer::OwnedImpl buffer; + BufferUtility::fillResponseBuffer(buffer, RequestCode::PopMessage, ResponseCode::Success); + router_->onUpstreamData(buffer, false); + } + + void receiveAckMessageResponse() { + Buffer::OwnedImpl buffer; + BufferUtility::fillResponseBuffer(buffer, RequestCode::AckMessage, ResponseCode::Success); + router_->onUpstreamData(buffer, false); + } + + NiceMock filter_callbacks_; + NiceMock context_; + ConfigImpl::RocketmqProxyConfig rocketmq_proxy_config_; + ConfigImpl config_; + std::unique_ptr conn_manager_; + + std::unique_ptr router_; + + std::unique_ptr> active_message_; + NiceMock upstream_connection_; + + std::shared_ptr cluster_info_; + NiceMock thread_local_cluster_; +}; + +class RocketmqRouterTest : public RocketmqRouterTestBase, public testing::Test {}; + +TEST_F(RocketmqRouterTest, PoolRemoteConnectionFailure) { + initializeRouter(); + initSendMessageRequest(); + + EXPECT_CALL(*active_message_, onError(_)) + .Times(1) + .WillOnce(Invoke([&](absl::string_view error_message) -> void { + EXPECT_THAT(error_message, ContainsRegex(".*remote connection failure*.")); + })); + + startRequest(); + context_.cluster_manager_.tcp_conn_pool_.poolFailure( + Tcp::ConnectionPool::PoolFailureReason::RemoteConnectionFailure); +} + +TEST_F(RocketmqRouterTest, PoolTimeout) { + initializeRouter(); + initSendMessageRequest(); + + EXPECT_CALL(*active_message_, onError(_)) + .Times(1) + .WillOnce(Invoke([&](absl::string_view error_message) -> void { + EXPECT_THAT(error_message, ContainsRegex(".*timeout*.")); + })); + EXPECT_CALL(*active_message_, onReset()); + + startRequest(); + context_.cluster_manager_.tcp_conn_pool_.poolFailure( + Tcp::ConnectionPool::PoolFailureReason::Timeout); +} + +TEST_F(RocketmqRouterTest, PoolLocalConnectionFailure) { + initializeRouter(); + initSendMessageRequest(); + + EXPECT_CALL(*active_message_, onError(_)) + .Times(1) + .WillOnce(Invoke([&](absl::string_view error_message) -> void { + EXPECT_THAT(error_message, ContainsRegex(".*local connection failure*.")); + })); + EXPECT_CALL(*active_message_, onReset()); + + startRequest(); + context_.cluster_manager_.tcp_conn_pool_.poolFailure( + Tcp::ConnectionPool::PoolFailureReason::LocalConnectionFailure); +} + +TEST_F(RocketmqRouterTest, PoolOverflowFailure) { + initializeRouter(); + initSendMessageRequest(); + + EXPECT_CALL(*active_message_, onError(_)) + .Times(1) + .WillOnce(Invoke([&](absl::string_view error_message) -> void { + EXPECT_THAT(error_message, ContainsRegex(".*overflow*.")); + })); + EXPECT_CALL(*active_message_, onReset()); + + startRequest(); + context_.cluster_manager_.tcp_conn_pool_.poolFailure( + Tcp::ConnectionPool::PoolFailureReason::Overflow); +} + +TEST_F(RocketmqRouterTest, ClusterMaintenanceMode) { + initializeRouter(); + initSendMessageRequest(); + + EXPECT_CALL(*active_message_, onError(_)) + .Times(1) + .WillOnce(Invoke([&](absl::string_view error_message) -> void { + EXPECT_THAT(error_message, ContainsRegex(".*Cluster under maintenance*.")); + })); + EXPECT_CALL(*context_.cluster_manager_.thread_local_cluster_.cluster_.info_, maintenanceMode()) + .WillOnce(Return(true)); + EXPECT_CALL(*active_message_, onReset()); + + startRequest(); +} + +TEST_F(RocketmqRouterTest, NoHealthyHosts) { + initializeRouter(); + initSendMessageRequest(); + + EXPECT_CALL(*active_message_, onError(_)) + .Times(1) + .WillOnce(Invoke([&](absl::string_view error_message) -> void { + EXPECT_THAT(error_message, ContainsRegex(".*No host available*.")); + })); + EXPECT_CALL(context_.cluster_manager_, tcpConnPoolForCluster("fake_cluster", _, _)) + .WillOnce(Return(nullptr)); + EXPECT_CALL(*active_message_, onReset()); + + startRequest(); +} + +TEST_F(RocketmqRouterTest, NoRouteForRequest) { + initializeRouter(); + initSendMessageRequest(); + + EXPECT_CALL(*active_message_, onError(_)) + .Times(1) + .WillOnce(Invoke([&](absl::string_view error_message) -> void { + EXPECT_THAT(error_message, ContainsRegex(".*No route for current request*.")); + })); + EXPECT_CALL(*active_message_, route()).WillRepeatedly(Return(nullptr)); + EXPECT_CALL(*active_message_, onReset()); + + startRequest(); +} + +TEST_F(RocketmqRouterTest, NoCluster) { + initializeRouter(); + initSendMessageRequest(); + + EXPECT_CALL(*active_message_, onReset()); + EXPECT_CALL(context_.cluster_manager_, get(_)).WillRepeatedly(Return(nullptr)); + + startRequest(); +} + +TEST_F(RocketmqRouterTest, CallWithEmptyResponse) { + initializeRouter(); + initSendMessageRequest(); + + startRequest(); + connectUpstream(); + + EXPECT_CALL(*active_message_, sendResponseToDownstream()).Times(0); + EXPECT_CALL(*active_message_, onReset()).Times(0); + + receiveEmptyResponse(); +} + +TEST_F(RocketmqRouterTest, OneWayRequest) { + initializeRouter(); + initSendMessageRequest("test_topic", true); + startRequest(); + + EXPECT_CALL(*active_message_, onReset()); + + connectUpstream(); + + EXPECT_TRUE(active_message_->metadata()->isOneWay()); +} + +TEST_F(RocketmqRouterTest, ReceiveSendMessageResponse) { + initializeRouter(); + initSendMessageRequest(); + + startRequest(); + connectUpstream(); + + EXPECT_CALL(*active_message_, sendResponseToDownstream()); + EXPECT_CALL(*active_message_, onReset()); + + receiveSendMessageResponse(false); +} + +TEST_F(RocketmqRouterTest, ReceivePopMessageResponse) { + initializeRouter(); + initPopMessageRequest(); + + startRequest(); + connectUpstream(); + + EXPECT_CALL(*active_message_, sendResponseToDownstream()); + EXPECT_CALL(*active_message_, onReset()); + + receivePopMessageResponse(); +} + +TEST_F(RocketmqRouterTest, ReceiveAckMessageResponse) { + initializeRouter(); + initAckMessageRequest(); + + startRequest(); + connectUpstream(); + + EXPECT_CALL(*active_message_, sendResponseToDownstream()); + EXPECT_CALL(*active_message_, onReset()); + + receiveAckMessageResponse(); +} + +TEST_F(RocketmqRouterTest, OneWayAckMessage) { + initializeRouter(); + initOneWayAckMessageRequest(); + + startRequest(); + + EXPECT_CALL(*active_message_, onReset()); + + connectUpstream(); +} + +TEST_F(RocketmqRouterTest, ReceivedSendMessageResponseWithDecodeError) { + initializeRouter(); + initSendMessageRequest(); + + EXPECT_CALL(*active_message_, onError(_)) + .Times(1) + .WillOnce(Invoke([&](absl::string_view error_message) -> void { + EXPECT_THAT(error_message, ContainsRegex(".*Failed to decode response*.")); + })); + + EXPECT_CALL(upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); + + startRequest(); + connectUpstream(); + std::string json = R"EOF( + { + "language": "JAVA", + "version": 2, + "opaque": 1, + "flag": 1, + "serializeTypeCurrentRPC": "JSON" + } + )EOF"; + Buffer::OwnedImpl buffer; + buffer.writeBEInt(4 + 4 + json.size()); + buffer.writeBEInt(json.size()); + buffer.add(json); + + EXPECT_CALL(*active_message_, onReset()).WillRepeatedly(Invoke([&]() -> void { + conn_manager_->deferredDelete(**conn_manager_->activeMessageList().begin()); + })); + EXPECT_CALL(*active_message_, onReset()); + + LinkedList::moveIntoList(std::move(active_message_), conn_manager_->activeMessageList()); + router_->onUpstreamData(buffer, false); +} + +TEST_F(RocketmqRouterTest, ReceivedSendMessageResponseWithStreamEnd) { + initializeRouter(); + initSendMessageRequest(); + + EXPECT_CALL(upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); + + startRequest(); + connectUpstream(); + + EXPECT_CALL(*active_message_, sendResponseToDownstream()); + EXPECT_CALL(*active_message_, onReset()); + + receiveSendMessageResponse(true); +} + +TEST_F(RocketmqRouterTest, UpstreamRemoteCloseMidResponse) { + initializeRouter(); + initSendMessageRequest(); + + EXPECT_CALL(*active_message_, onError(_)) + .Times(1) + .WillOnce(Invoke([&](absl::string_view error_message) -> void { + EXPECT_THAT(error_message, ContainsRegex(".*Connection to upstream is closed*.")); + })); + + startRequest(); + connectUpstream(); + + EXPECT_CALL(*active_message_, sendResponseToDownstream()).Times(0); + EXPECT_CALL(*active_message_, onReset()); + + router_->onEvent(Network::ConnectionEvent::RemoteClose); +} + +TEST_F(RocketmqRouterTest, UpstreamLocalCloseMidResponse) { + initializeRouter(); + initSendMessageRequest(); + + EXPECT_CALL(*active_message_, onError(_)) + .Times(1) + .WillOnce(Invoke([&](absl::string_view error_message) -> void { + EXPECT_THAT(error_message, ContainsRegex(".*Connection to upstream has been closed*.")); + })); + + startRequest(); + connectUpstream(); + + EXPECT_CALL(*active_message_, sendResponseToDownstream()).Times(0); + EXPECT_CALL(*active_message_, onReset()); + + router_->onEvent(Network::ConnectionEvent::LocalClose); +} + +TEST_F(RocketmqRouterTest, UpstreamConnected) { + initializeRouter(); + initSendMessageRequest(); + + startRequest(); + connectUpstream(); + + EXPECT_CALL(*active_message_, sendResponseToDownstream()).Times(0); + EXPECT_CALL(*active_message_, onReset()).Times(0); + + router_->onEvent(Network::ConnectionEvent::Connected); +} + +TEST_F(RocketmqRouterTest, StartRequestWithExistingConnection) { + initializeRouter(); + initSendMessageRequest(); + + EXPECT_CALL(*active_message_, onError(_)).Times(0); + EXPECT_CALL(*active_message_, onReset()).Times(0); + + startRequestWithExistingConnection(); +} + +} // namespace Router +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/test/extensions/filters/network/rocketmq_proxy/topic_route_test.cc b/test/extensions/filters/network/rocketmq_proxy/topic_route_test.cc new file mode 100644 index 0000000000000..a337b89ead692 --- /dev/null +++ b/test/extensions/filters/network/rocketmq_proxy/topic_route_test.cc @@ -0,0 +1,73 @@ +#include "common/protobuf/utility.h" + +#include "extensions/filters/network/rocketmq_proxy/topic_route.h" + +#include "absl/container/node_hash_map.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +TEST(TopicRouteTest, Serialization) { + QueueData queue_data("broker-a", 8, 8, 6); + ProtobufWkt::Struct doc; + queue_data.encode(doc); + + const auto& members = doc.fields(); + + ASSERT_STREQ("broker-a", members.at("brokerName").string_value().c_str()); + ASSERT_EQ(queue_data.brokerName(), members.at("brokerName").string_value()); + ASSERT_EQ(queue_data.readQueueNum(), members.at("readQueueNums").number_value()); + ASSERT_EQ(queue_data.writeQueueNum(), members.at("writeQueueNums").number_value()); + ASSERT_EQ(queue_data.perm(), members.at("perm").number_value()); +} + +TEST(BrokerDataTest, Serialization) { + absl::node_hash_map broker_addrs; + std::string dummy_address("127.0.0.1:10911"); + for (int64_t i = 0; i < 3; i++) { + broker_addrs[i] = dummy_address; + } + std::string cluster("DefaultCluster"); + std::string broker_name("broker-a"); + BrokerData broker_data(cluster, broker_name, std::move(broker_addrs)); + + ProtobufWkt::Struct doc; + broker_data.encode(doc); + + const auto& members = doc.fields(); + + ASSERT_STREQ(cluster.c_str(), members.at("cluster").string_value().c_str()); + ASSERT_STREQ(broker_name.c_str(), members.at("brokerName").string_value().c_str()); +} + +TEST(TopicRouteDataTest, Serialization) { + TopicRouteData topic_route_data; + + for (int i = 0; i < 16; i++) { + topic_route_data.queueData().push_back(QueueData("broker-a", 8, 8, 6)); + } + + std::string cluster("DefaultCluster"); + std::string broker_name("broker-a"); + std::string dummy_address("127.0.0.1:10911"); + + for (int i = 0; i < 16; i++) { + absl::node_hash_map broker_addrs; + for (int64_t i = 0; i < 3; i++) { + broker_addrs[i] = dummy_address; + } + topic_route_data.brokerData().emplace_back( + BrokerData(cluster, broker_name, std::move(broker_addrs))); + } + ProtobufWkt::Struct doc; + EXPECT_NO_THROW(topic_route_data.encode(doc)); + MessageUtil::getJsonStringFromMessage(doc); +} + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/test/extensions/filters/network/rocketmq_proxy/utility.cc b/test/extensions/filters/network/rocketmq_proxy/utility.cc new file mode 100644 index 0000000000000..a44f0cd0acb37 --- /dev/null +++ b/test/extensions/filters/network/rocketmq_proxy/utility.cc @@ -0,0 +1,240 @@ +#include "test/extensions/filters/network/rocketmq_proxy/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +const std::string BufferUtility::topic_name_ = "test_topic"; +const std::string BufferUtility::client_id_ = "test_client_id"; +const std::string BufferUtility::producer_group_ = "test_pg"; +const std::string BufferUtility::consumer_group_ = "test_cg"; +const std::string BufferUtility::extra_info_ = "test_extra"; +const std::string BufferUtility::msg_body_ = "_Apache_RocketMQ_"; +const int BufferUtility::queue_id_ = 1; +int BufferUtility::opaque_ = 0; + +void BufferUtility::fillRequestBuffer(Buffer::OwnedImpl& buffer, RequestCode code) { + + RemotingCommandPtr cmd = std::make_unique(); + cmd->code(static_cast(code)); + cmd->opaque(++opaque_); + + switch (code) { + case RequestCode::SendMessage: { + std::unique_ptr header = std::make_unique(); + header->topic(topic_name_); + header->version(SendMessageRequestVersion::V1); + std::string msg_body = msg_body_; + cmd->body().add(msg_body); + CommandCustomHeaderPtr ptr(header.release()); + cmd->customHeader(ptr); + } break; + + case RequestCode::HeartBeat: { + std::string heartbeat_data = R"EOF( + { + "clientID": "127.0.0.1@90330", + "consumerDataSet": [ + { + "consumeFromWhere": "CONSUME_FROM_FIRST_OFFSET", + "consumeType": "CONSUME_PASSIVELY", + "groupName": "test_cg", + "messageModel": "CLUSTERING", + "subscriptionDataSet": [ + { + "classFilterMode": false, + "codeSet": [], + "expressionType": "TAG", + "subString": "*", + "subVersion": 1575630587925, + "tagsSet": [], + "topic": "test_topic" + }, + { + "classFilterMode": false, + "codeSet": [], + "expressionType": "TAG", + "subString": "*", + "subVersion": 1575630587945, + "tagsSet": [], + "topic": "%RETRY%please_rename_unique_group_name_4" + } + ], + "unitMode": false + } + ], + "producerDataSet": [ + { + "groupName": "CLIENT_INNER_PRODUCER" + } + ] + } + )EOF"; + cmd->body().add(heartbeat_data); + } break; + + case RequestCode::UnregisterClient: { + std::unique_ptr header = + std::make_unique(); + header->clientId(client_id_); + header->consumerGroup(consumer_group_); + CommandCustomHeaderPtr ptr(header.release()); + cmd->customHeader(ptr); + break; + } + + case RequestCode::GetRouteInfoByTopic: { + std::unique_ptr header = + std::make_unique(); + header->topic(topic_name_); + CommandCustomHeaderPtr ptr(header.release()); + cmd->customHeader(ptr); + break; + } + + case RequestCode::GetConsumerListByGroup: { + std::unique_ptr header = + std::make_unique(); + header->consumerGroup(consumer_group_); + CommandCustomHeaderPtr ptr(header.release()); + cmd->customHeader(ptr); + break; + } + + case RequestCode::SendMessageV2: { + std::unique_ptr header = std::make_unique(); + header->topic(topic_name_); + header->version(SendMessageRequestVersion::V2); + header->producerGroup(producer_group_); + std::string msg_body = msg_body_; + cmd->body().add(msg_body); + CommandCustomHeaderPtr ptr(header.release()); + cmd->customHeader(ptr); + break; + } + + case RequestCode::PopMessage: { + std::unique_ptr header = std::make_unique(); + header->consumerGroup(consumer_group_); + header->topic(topic_name_); + header->queueId(queue_id_); + header->maxMsgNum(32); + header->invisibleTime(6000); + header->pollTime(3000); + header->bornTime(1000); + header->initMode(4); + + CommandCustomHeaderPtr ptr(header.release()); + cmd->customHeader(ptr); + break; + } + + case RequestCode::AckMessage: { + std::unique_ptr header = std::make_unique(); + header->consumerGroup(consumer_group_); + header->topic(topic_name_); + header->queueId(queue_id_); + header->extraInfo(extra_info_); + header->offset(1); + CommandCustomHeaderPtr ptr(header.release()); + cmd->customHeader(ptr); + break; + } + + default: + break; + } + Encoder encoder_; + buffer.drain(buffer.length()); + encoder_.encode(cmd, buffer); +} + +void BufferUtility::fillResponseBuffer(Buffer::OwnedImpl& buffer, RequestCode req_code, + ResponseCode resp_code) { + RemotingCommandPtr cmd = std::make_unique(); + cmd->code(static_cast(resp_code)); + cmd->opaque(opaque_); + + switch (req_code) { + case RequestCode::SendMessageV2: { + std::unique_ptr header = + std::make_unique(); + header->msgIdForTest("MSG_ID_01"); + header->queueId(1); + header->queueOffset(100); + header->transactionId("TX_01"); + break; + } + case RequestCode::PopMessage: { + std::unique_ptr header = std::make_unique(); + header->popTime(1587386521445); + header->invisibleTime(50000); + header->reviveQid(5); + std::string msg_offset_info = "0 6 147"; + header->msgOffsetInfo(msg_offset_info); + std::string start_offset_info = "0 6 147"; + header->startOffsetInfo(start_offset_info); + CommandCustomHeaderPtr ptr(header.release()); + cmd->customHeader(ptr); + cmd->body().add(std::string({'\x00', '\x00', '\x00', '\xD5'})); + cmd->body().add(std::string({'\xDA', '\xA3', '\x20', '\xA7'})); + cmd->body().add(std::string({'\x01', '\xE5', '\x9A', '\x3E'})); + cmd->body().add(std::string({'\x00', '\x00', '\x00', '\x06'})); + cmd->body().add(std::string({'\x00', '\x00', '\x00', '\x00'})); + cmd->body().add(std::string({'\x00', '\x00', '\x00', '\x00'})); + cmd->body().add(std::string({'\x00', '\x00', '\x00', '\x93'})); + cmd->body().add(std::string({'\x00', '\x00', '\x00', '\x00'})); + cmd->body().add(std::string({'\x00', '\x4A', '\xE0', '\x46'})); + cmd->body().add(std::string({'\x00', '\x00', '\x00', '\x00'})); + cmd->body().add(std::string({'\x00', '\x00', '\x01', '\x71'})); + cmd->body().add(std::string({'\x97', '\x98', '\x71', '\xB6'})); + cmd->body().add(std::string({'\x0A', '\x65', '\xC4', '\x91'})); + cmd->body().add(std::string({'\x00', '\x00', '\x1A', '\xF4'})); + cmd->body().add(std::string({'\x00', '\x00', '\x01', '\x71'})); + cmd->body().add(std::string({'\x97', '\x98', '\x71', '\xAF'})); + cmd->body().add(std::string({'\x0A', '\x65', '\xC1', '\x2D'})); + cmd->body().add(std::string({'\x00', '\x00', '\x1F', '\x53'})); + cmd->body().add(std::string({'\x00', '\x00', '\x00', '\x00'})); + cmd->body().add(std::string({'\x00', '\x00', '\x00', '\x00'})); + cmd->body().add(std::string({'\x00', '\x00', '\x00', '\x00'})); + cmd->body().add(std::string({'\x00', '\x00', '\x00', '\x11'})); + cmd->body().add(std::string("Hello RocketMQ 52")); + cmd->body().add(std::string({'\x04'})); + cmd->body().add(std::string("mesh")); + cmd->body().add(std::string({'\x00', '\x65'})); + cmd->body().add(std::string("TRACE_ON")); + cmd->body().add(std::string({'\x01'})); + cmd->body().add(std::string("true")); + cmd->body().add(std::string({'\x02'})); + cmd->body().add(std::string("MSG_REGION")); + cmd->body().add(std::string({'\x01'})); + cmd->body().add(std::string("DefaultRegion")); + cmd->body().add(std::string({'\x02'})); + cmd->body().add(std::string("UNIQ_KEY")); + cmd->body().add(std::string({'\x01'})); + cmd->body().add(std::string("1EE10882893E18B4AAC2664649B60034")); + cmd->body().add(std::string({'\x02'})); + cmd->body().add(std::string("WAIT")); + cmd->body().add(std::string({'\x01'})); + cmd->body().add(std::string("true")); + cmd->body().add(std::string({'\x02'})); + cmd->body().add(std::string("TAGS")); + cmd->body().add(std::string({'\x01'})); + cmd->body().add(std::string("TagA")); + cmd->body().add(std::string({'\x02'})); + break; + } + default: + break; + } + Encoder encoder_; + buffer.drain(buffer.length()); + encoder_.encode(cmd, buffer); +} +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/test/extensions/filters/network/rocketmq_proxy/utility.h b/test/extensions/filters/network/rocketmq_proxy/utility.h new file mode 100644 index 0000000000000..ad3809fd07ccf --- /dev/null +++ b/test/extensions/filters/network/rocketmq_proxy/utility.h @@ -0,0 +1,31 @@ +#pragma once + +#include "extensions/filters/network/rocketmq_proxy/config.h" +#include "extensions/filters/network/rocketmq_proxy/conn_manager.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +class BufferUtility { +public: + static void fillRequestBuffer(Buffer::OwnedImpl& buffer, RequestCode code); + static void fillResponseBuffer(Buffer::OwnedImpl& buffer, RequestCode req_code, + ResponseCode resp_code); + + const static std::string topic_name_; + const static std::string client_id_; + const static std::string producer_group_; + const static std::string consumer_group_; + const static std::string msg_body_; + const static std::string extra_info_; + const static int queue_id_; + static int opaque_; +}; +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/test/extensions/filters/network/sni_cluster/BUILD b/test/extensions/filters/network/sni_cluster/BUILD index a521d1f071453..b0024fecf442e 100644 --- a/test/extensions/filters/network/sni_cluster/BUILD +++ b/test/extensions/filters/network/sni_cluster/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -19,7 +19,7 @@ envoy_extension_cc_test( "//source/extensions/filters/network/sni_cluster", "//source/extensions/filters/network/sni_cluster:config", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/mocks/stream_info:stream_info_mocks", ], ) diff --git a/test/extensions/filters/network/sni_cluster/sni_cluster_test.cc b/test/extensions/filters/network/sni_cluster/sni_cluster_test.cc index a047869ee0544..61c9a79ee681c 100644 --- a/test/extensions/filters/network/sni_cluster/sni_cluster_test.cc +++ b/test/extensions/filters/network/sni_cluster/sni_cluster_test.cc @@ -4,7 +4,7 @@ #include "extensions/filters/network/sni_cluster/sni_cluster.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/mocks/stream_info/mocks.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/network/sni_dynamic_forward_proxy/BUILD b/test/extensions/filters/network/sni_dynamic_forward_proxy/BUILD index a931923403d25..c9981ba5f70c4 100644 --- a/test/extensions/filters/network/sni_dynamic_forward_proxy/BUILD +++ b/test/extensions/filters/network/sni_dynamic_forward_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -32,6 +32,7 @@ envoy_extension_cc_test( "//test/config/integration/certs", ], extension_name = "envoy.filters.network.sni_dynamic_forward_proxy", + tags = ["fails_on_windows"], deps = [ "//source/extensions/clusters/dynamic_forward_proxy:cluster", "//source/extensions/filters/listener/tls_inspector:config", diff --git a/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_integration_test.cc b/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_integration_test.cc index 53ff3c2fd6f2e..39a57e7781c7e 100644 --- a/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_integration_test.cc +++ b/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_integration_test.cc @@ -23,12 +23,12 @@ class SniDynamicProxyFilterIntegrationTest : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam(), ConfigHelper::tcpProxyConfig()) {} - void setup(uint64_t max_hosts = 1024) { + void setup(uint64_t max_hosts = 1024, uint32_t max_pending_requests = 1024) { setUpstreamProtocol(FakeHttpConnection::Type::HTTP1); config_helper_.addListenerFilter(ConfigHelper::tlsInspectorFilter()); - config_helper_.addConfigModifier([this, max_hosts]( + config_helper_.addConfigModifier([this, max_hosts, max_pending_requests]( envoy::config::bootstrap::v3::Bootstrap& bootstrap) { // Switch predefined cluster_0 to CDS filesystem sourcing. bootstrap.mutable_dynamic_resources()->mutable_cds_config()->set_path(cds_helper_.cds_path()); @@ -43,10 +43,12 @@ name: envoy.filters.http.dynamic_forward_proxy name: foo dns_lookup_family: {} max_hosts: {} + dns_cache_circuit_breaker: + max_pending_requests: {} port_value: {} )EOF", Network::Test::ipVersionToDnsFamily(GetParam()), max_hosts, - fake_upstreams_[0]->localAddress()->ip()->port()); + max_pending_requests, fake_upstreams_[0]->localAddress()->ip()->port()); config_helper_.addNetworkFilter(filter); }); @@ -56,8 +58,8 @@ name: envoy.filters.http.dynamic_forward_proxy cluster_.set_name("cluster_0"); cluster_.set_lb_policy(envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED); - const std::string cluster_type_config = - fmt::format(R"EOF( + const std::string cluster_type_config = fmt::format( + R"EOF( name: envoy.clusters.dynamic_forward_proxy typed_config: "@type": type.googleapis.com/envoy.extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig @@ -65,8 +67,10 @@ name: envoy.clusters.dynamic_forward_proxy name: foo dns_lookup_family: {} max_hosts: {} + dns_cache_circuit_breaker: + max_pending_requests: {} )EOF", - Network::Test::ipVersionToDnsFamily(GetParam()), max_hosts); + Network::Test::ipVersionToDnsFamily(GetParam()), max_hosts, max_pending_requests); TestUtility::loadFromYaml(cluster_type_config, *cluster_.mutable_cluster_type()); @@ -129,5 +133,15 @@ TEST_P(SniDynamicProxyFilterIntegrationTest, UpstreamTls) { response->waitForEndStream(); checkSimpleRequestSuccess(0, 0, response.get()); } + +TEST_P(SniDynamicProxyFilterIntegrationTest, CircuitBreakerInvokedUpstreamTls) { + setup(1024, 0); + + codec_client_ = makeRawHttpConnection( + makeSslClientConnection(Ssl::ClientSslTransportOptions().setSni("localhost")), absl::nullopt); + ASSERT_FALSE(codec_client_->connected()); + EXPECT_EQ(1, test_server_->counter("dns_cache.foo.dns_rq_pending_overflow")->value()); +} + } // namespace } // namespace Envoy diff --git a/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_test.cc b/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_test.cc index 12755253776da..fa28ec75df9b9 100644 --- a/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_test.cc +++ b/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_test.cc @@ -40,10 +40,6 @@ class SniDynamicProxyFilterTest // Allow for an otherwise strict mock. ON_CALL(callbacks_, connection()).WillByDefault(ReturnRef(connection_)); EXPECT_CALL(callbacks_, connection()).Times(AtLeast(0)); - - // Configure max pending to 1 so we can test circuit breaking. - // TODO(lizan): implement circuit breaker in SNI dynamic forward proxy - cm_.thread_local_cluster_.cluster_.info_->resetResourceManager(0, 1, 0, 0, 0); } ~SniDynamicProxyFilterTest() override { @@ -62,6 +58,7 @@ class SniDynamicProxyFilterTest std::unique_ptr filter_; Network::MockReadFilterCallbacks callbacks_; NiceMock connection_; + NiceMock pending_requests_; }; // No SNI handling. @@ -72,6 +69,10 @@ TEST_F(SniDynamicProxyFilterTest, NoSNI) { TEST_F(SniDynamicProxyFilterTest, LoadDnsCache) { EXPECT_CALL(connection_, requestedServerName()).WillRepeatedly(Return("foo")); + Upstream::ResourceAutoIncDec* circuit_breakers_{ + new Upstream::ResourceAutoIncDec(pending_requests_)}; + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_)) + .WillOnce(Return(circuit_breakers_)); Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle* handle = new Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle(); EXPECT_CALL(*dns_cache_manager_->dns_cache_, loadDnsCacheEntry_(Eq("foo"), 443, _)) @@ -86,6 +87,10 @@ TEST_F(SniDynamicProxyFilterTest, LoadDnsCache) { TEST_F(SniDynamicProxyFilterTest, LoadDnsInCache) { EXPECT_CALL(connection_, requestedServerName()).WillRepeatedly(Return("foo")); + Upstream::ResourceAutoIncDec* circuit_breakers_{ + new Upstream::ResourceAutoIncDec(pending_requests_)}; + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_)) + .WillOnce(Return(circuit_breakers_)); EXPECT_CALL(*dns_cache_manager_->dns_cache_, loadDnsCacheEntry_(Eq("foo"), 443, _)) .WillOnce(Return(MockLoadDnsCacheEntryResult{LoadDnsCacheEntryStatus::InCache, nullptr})); @@ -95,12 +100,23 @@ TEST_F(SniDynamicProxyFilterTest, LoadDnsInCache) { // Cache overflow. TEST_F(SniDynamicProxyFilterTest, CacheOverflow) { EXPECT_CALL(connection_, requestedServerName()).WillRepeatedly(Return("foo")); + Upstream::ResourceAutoIncDec* circuit_breakers_{ + new Upstream::ResourceAutoIncDec(pending_requests_)}; + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_)) + .WillOnce(Return(circuit_breakers_)); EXPECT_CALL(*dns_cache_manager_->dns_cache_, loadDnsCacheEntry_(Eq("foo"), 443, _)) .WillOnce(Return(MockLoadDnsCacheEntryResult{LoadDnsCacheEntryStatus::Overflow, nullptr})); EXPECT_CALL(connection_, close(Network::ConnectionCloseType::NoFlush)); EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection()); } +TEST_F(SniDynamicProxyFilterTest, CircuitBreakerInvoked) { + EXPECT_CALL(connection_, requestedServerName()).WillRepeatedly(Return("foo")); + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_)).WillOnce(Return(nullptr)); + EXPECT_CALL(connection_, close(Network::ConnectionCloseType::NoFlush)); + EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection()); +} + } // namespace } // namespace SniDynamicForwardProxy diff --git a/test/extensions/filters/network/tcp_proxy/BUILD b/test/extensions/filters/network/tcp_proxy/BUILD index 318a2aa9b78a7..ad332adc27ac8 100644 --- a/test/extensions/filters/network/tcp_proxy/BUILD +++ b/test/extensions/filters/network/tcp_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -17,7 +17,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.network.tcp_proxy", deps = [ "//source/extensions/filters/network/tcp_proxy:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/network/tcp_proxy/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/network/tcp_proxy/config_test.cc b/test/extensions/filters/network/tcp_proxy/config_test.cc index 4104f8540ec33..ff74cf1cb0f87 100644 --- a/test/extensions/filters/network/tcp_proxy/config_test.cc +++ b/test/extensions/filters/network/tcp_proxy/config_test.cc @@ -5,7 +5,7 @@ #include "extensions/filters/network/tcp_proxy/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -91,7 +91,7 @@ TEST_P(RouteIpListConfigTest, DEPRECATED_FEATURE_TEST(TcpProxy)) { )EOF"; envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy proto_config; - TestUtility::loadFromJson(json_string, proto_config); + TestUtility::loadFromJson(json_string, proto_config, true, false); NiceMock context; ConfigFactory factory; diff --git a/test/extensions/filters/network/thrift_proxy/BUILD b/test/extensions/filters/network/thrift_proxy/BUILD index ac219f01fae9b..5311f28d5f3c8 100644 --- a/test/extensions/filters/network/thrift_proxy/BUILD +++ b/test/extensions/filters/network/thrift_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -11,6 +9,8 @@ load( "envoy_extension_cc_test_library", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_mock( @@ -209,7 +209,7 @@ envoy_extension_cc_test( ":mocks", "//source/extensions/filters/network/thrift_proxy:config", "//source/extensions/filters/network/thrift_proxy/router:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:registry_lib", "@envoy_api//envoy/extensions/filters/network/thrift_proxy/v3:pkg_cc_proto", ], @@ -228,7 +228,7 @@ envoy_extension_cc_test( "//source/extensions/filters/network/thrift_proxy/router:config", "//source/extensions/filters/network/thrift_proxy/router:router_interface", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:printers_lib", "@envoy_api//envoy/extensions/filters/network/thrift_proxy/v3:pkg_cc_proto", @@ -271,7 +271,7 @@ envoy_extension_cc_test( "//source/extensions/filters/network/thrift_proxy/router:config", "//source/extensions/filters/network/thrift_proxy/router:router_lib", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:printers_lib", "//test/test_common:registry_lib", @@ -291,7 +291,7 @@ envoy_extension_cc_test( "//source/extensions/filters/network/thrift_proxy/router:config", "//source/extensions/filters/network/thrift_proxy/router:router_ratelimit_lib", "//test/mocks/ratelimit:ratelimit_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:printers_lib", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/thrift_proxy/v3:pkg_cc_proto", @@ -333,6 +333,7 @@ envoy_extension_cc_test( "//test/extensions/filters/network/thrift_proxy/driver:generate_fixture", ], extension_name = "envoy.filters.network.thrift_proxy", + tags = ["fails_on_windows"], deps = [ ":integration_lib", ":utility_lib", @@ -349,6 +350,7 @@ envoy_extension_cc_test( "//test/extensions/filters/network/thrift_proxy/driver:generate_fixture", ], extension_name = "envoy.filters.network.thrift_proxy", + tags = ["fails_on_windows"], deps = [ ":integration_lib", ":utility_lib", diff --git a/test/extensions/filters/network/thrift_proxy/config_test.cc b/test/extensions/filters/network/thrift_proxy/config_test.cc index 8a37946c2383f..6bf4afbd3f7af 100644 --- a/test/extensions/filters/network/thrift_proxy/config_test.cc +++ b/test/extensions/filters/network/thrift_proxy/config_test.cc @@ -5,7 +5,7 @@ #include "extensions/filters/network/thrift_proxy/filters/factory_base.h" #include "test/extensions/filters/network/thrift_proxy/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/registry.h" #include "gmock/gmock.h" @@ -122,6 +122,30 @@ TEST_F(ThriftFilterConfigTest, ThriftProxyWithEmptyProto) { testConfig(config); } +// Test config with an invalid cluster_header. +TEST_F(ThriftFilterConfigTest, RouterConfigWithInvalidClusterHeader) { + const std::string yaml = R"EOF( +stat_prefix: thrift +route_config: + name: local_route + routes: + match: + method_name: A + route: + cluster_header: A +thrift_filters: + - name: envoy.filters.thrift.router +)EOF"; + + envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy config = + parseThriftProxyFromV2Yaml(yaml); + std::string header = "A"; + header.push_back('\000'); // Add an invalid character for http header. + config.mutable_route_config()->mutable_routes()->at(0).mutable_route()->set_cluster_header( + header); + EXPECT_THROW(factory_.createFilterFactoryFromProto(config, context_), ProtoValidationException); +} + // Test config with an explicitly defined router filter. TEST_F(ThriftFilterConfigTest, ThriftProxyWithExplicitRouterConfig) { const std::string yaml = R"EOF( diff --git a/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc b/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc index 5de6de271950b..6d60f9cf09638 100644 --- a/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc +++ b/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc @@ -16,7 +16,7 @@ #include "test/extensions/filters/network/thrift_proxy/mocks.h" #include "test/extensions/filters/network/thrift_proxy/utility.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/printers.h" @@ -237,6 +237,26 @@ class ThriftConnectionManagerTest : public testing::Test { } } + void writeVoidFramedBinaryMessage(Buffer::Instance& buffer, int32_t seq_id) { + Buffer::OwnedImpl msg; + ProtocolPtr proto = + NamedProtocolConfigFactory::getFactory(ProtocolType::Binary).createProtocol(); + MessageMetadata metadata; + metadata.setMethodName("name"); + metadata.setMessageType(MessageType::Reply); + metadata.setSequenceId(seq_id); + + proto->writeMessageBegin(msg, metadata); + proto->writeStructBegin(msg, ""); + proto->writeFieldBegin(msg, "", FieldType::Stop, 0); + proto->writeStructEnd(msg); + proto->writeMessageEnd(msg); + + TransportPtr transport = + NamedTransportConfigFactory::getFactory(TransportType::Framed).createTransport(); + transport->encodeFrame(buffer, metadata, msg); + } + void writeFramedBinaryTApplicationException(Buffer::Instance& buffer, int32_t seq_id) { Buffer::OwnedImpl msg; ProtocolPtr proto = @@ -304,7 +324,7 @@ class ThriftConnectionManagerTest : public testing::Test { Buffer::OwnedImpl buffer_; Buffer::OwnedImpl write_buffer_; NiceMock filter_callbacks_; - NiceMock random_; + NiceMock random_; std::unique_ptr filter_; MockTransport* custom_transport_{}; MockProtocol* custom_protocol_{}; @@ -676,6 +696,40 @@ TEST_F(ThriftConnectionManagerTest, RequestAndResponse) { EXPECT_EQ(0U, store_.counter("test.response_error").value()); } +TEST_F(ThriftConnectionManagerTest, RequestAndVoidResponse) { + initializeFilter(); + writeComplexFramedBinaryMessage(buffer_, MessageType::Call, 0x0F); + + ThriftFilters::DecoderFilterCallbacks* callbacks{}; + EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)) + .WillOnce( + Invoke([&](ThriftFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; })); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request_call").value()); + + writeVoidFramedBinaryMessage(write_buffer_, 0x0F); + + FramedTransportImpl transport; + BinaryProtocolImpl proto; + callbacks->startUpstreamResponse(transport, proto); + + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1); + EXPECT_EQ(ThriftFilters::ResponseStatus::Complete, callbacks->upstreamData(write_buffer_)); + + filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.request_call").value()); + EXPECT_EQ(0U, stats_.request_active_.value()); + EXPECT_EQ(1U, store_.counter("test.response").value()); + EXPECT_EQ(1U, store_.counter("test.response_reply").value()); + EXPECT_EQ(0U, store_.counter("test.response_exception").value()); + EXPECT_EQ(0U, store_.counter("test.response_invalid_type").value()); + EXPECT_EQ(1U, store_.counter("test.response_success").value()); + EXPECT_EQ(0U, store_.counter("test.response_error").value()); +} + // Tests that the downstream request's sequence number is used for the response. TEST_F(ThriftConnectionManagerTest, RequestAndResponseSequenceIdHandling) { initializeFilter(); diff --git a/test/extensions/filters/network/thrift_proxy/decoder_test.cc b/test/extensions/filters/network/thrift_proxy/decoder_test.cc index 1dc42a1a116b4..4699f3d94a911 100644 --- a/test/extensions/filters/network/thrift_proxy/decoder_test.cc +++ b/test/extensions/filters/network/thrift_proxy/decoder_test.cc @@ -336,6 +336,28 @@ TEST_P(DecoderStateMachineValueTest, ListValue) { EXPECT_EQ(dsm.currentState(), ProtocolState::ListEnd); } +TEST_P(DecoderStateMachineValueTest, IncompleteListValue) { + FieldType field_type = GetParam(); + Buffer::OwnedImpl buffer; + InSequence dummy; + + EXPECT_CALL(proto_, readListBegin(Ref(buffer), _, _)) + .WillOnce(DoAll(SetArgReferee<1>(field_type), SetArgReferee<2>(1), Return(true))); + + expectValue(proto_, handler_, field_type, false); + + DecoderStateMachine dsm(proto_, metadata_, handler_); + + dsm.setCurrentState(ProtocolState::ListBegin); + EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData); + EXPECT_EQ(dsm.currentState(), ProtocolState::ListValue); + + expectValue(proto_, handler_, field_type); + + EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData); + EXPECT_EQ(dsm.currentState(), ProtocolState::ListEnd); +} + TEST_P(DecoderStateMachineValueTest, MultipleListValues) { FieldType field_type = GetParam(); Buffer::OwnedImpl buffer; @@ -448,6 +470,54 @@ TEST_P(DecoderStateMachineValueTest, MapValueValue) { EXPECT_EQ(dsm.currentState(), ProtocolState::MapEnd); } +TEST_P(DecoderStateMachineValueTest, IncompleteMapKey) { + FieldType field_type = GetParam(); + Buffer::OwnedImpl buffer; + InSequence dummy; + + EXPECT_CALL(proto_, readMapBegin(Ref(buffer), _, _, _)) + .WillOnce(DoAll(SetArgReferee<1>(field_type), SetArgReferee<2>(FieldType::I32), + SetArgReferee<3>(1), Return(true))); + + expectValue(proto_, handler_, field_type, false); // key + + DecoderStateMachine dsm(proto_, metadata_, handler_); + + dsm.setCurrentState(ProtocolState::MapBegin); + EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData); + EXPECT_EQ(dsm.currentState(), ProtocolState::MapKey); + + expectValue(proto_, handler_, field_type); // key + expectValue(proto_, handler_, FieldType::I32); // value + + EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData); + EXPECT_EQ(dsm.currentState(), ProtocolState::MapEnd); +} + +TEST_P(DecoderStateMachineValueTest, IncompleteMapValue) { + FieldType field_type = GetParam(); + Buffer::OwnedImpl buffer; + InSequence dummy; + + EXPECT_CALL(proto_, readMapBegin(Ref(buffer), _, _, _)) + .WillOnce(DoAll(SetArgReferee<1>(FieldType::I32), SetArgReferee<2>(field_type), + SetArgReferee<3>(1), Return(true))); + + expectValue(proto_, handler_, FieldType::I32); // key + expectValue(proto_, handler_, field_type, false); // value + + DecoderStateMachine dsm(proto_, metadata_, handler_); + + dsm.setCurrentState(ProtocolState::MapBegin); + EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData); + EXPECT_EQ(dsm.currentState(), ProtocolState::MapValue); + + expectValue(proto_, handler_, field_type); // value + + EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData); + EXPECT_EQ(dsm.currentState(), ProtocolState::MapEnd); +} + TEST_P(DecoderStateMachineValueTest, MultipleMapKeyValues) { FieldType field_type = GetParam(); Buffer::OwnedImpl buffer; @@ -520,6 +590,28 @@ TEST_P(DecoderStateMachineValueTest, SetValue) { EXPECT_EQ(dsm.currentState(), ProtocolState::SetEnd); } +TEST_P(DecoderStateMachineValueTest, IncompleteSetValue) { + FieldType field_type = GetParam(); + Buffer::OwnedImpl buffer; + InSequence dummy; + + EXPECT_CALL(proto_, readSetBegin(Ref(buffer), _, _)) + .WillOnce(DoAll(SetArgReferee<1>(field_type), SetArgReferee<2>(1), Return(true))); + + expectValue(proto_, handler_, field_type, false); + + DecoderStateMachine dsm(proto_, metadata_, handler_); + + dsm.setCurrentState(ProtocolState::SetBegin); + EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData); + EXPECT_EQ(dsm.currentState(), ProtocolState::SetValue); + + expectValue(proto_, handler_, field_type); + + EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData); + EXPECT_EQ(dsm.currentState(), ProtocolState::SetEnd); +} + TEST_P(DecoderStateMachineValueTest, MultipleSetValues) { FieldType field_type = GetParam(); Buffer::OwnedImpl buffer; diff --git a/test/extensions/filters/network/thrift_proxy/driver/BUILD b/test/extensions/filters/network/thrift_proxy/driver/BUILD index b0461509c7a19..4e5d0f47d1d4e 100644 --- a/test/extensions/filters/network/thrift_proxy/driver/BUILD +++ b/test/extensions/filters/network/thrift_proxy/driver/BUILD @@ -1,7 +1,8 @@ -licenses(["notice"]) # Apache 2 - +load("@rules_python//python:defs.bzl", "py_binary") load("//bazel:envoy_build_system.bzl", "envoy_package") +licenses(["notice"]) # Apache 2 + envoy_package() filegroup( diff --git a/test/extensions/filters/network/thrift_proxy/driver/fbthrift/BUILD b/test/extensions/filters/network/thrift_proxy/driver/fbthrift/BUILD index a1b33006f10f3..82b251aeac773 100644 --- a/test/extensions/filters/network/thrift_proxy/driver/fbthrift/BUILD +++ b/test/extensions/filters/network/thrift_proxy/driver/fbthrift/BUILD @@ -1,7 +1,8 @@ -licenses(["notice"]) # Apache 2 - +load("@rules_python//python:defs.bzl", "py_library") load("//bazel:envoy_build_system.bzl", "envoy_package") +licenses(["notice"]) # Apache 2 + envoy_package() py_library( diff --git a/test/extensions/filters/network/thrift_proxy/driver/finagle/BUILD b/test/extensions/filters/network/thrift_proxy/driver/finagle/BUILD index 71fa29d640635..e2f159ae992d4 100644 --- a/test/extensions/filters/network/thrift_proxy/driver/finagle/BUILD +++ b/test/extensions/filters/network/thrift_proxy/driver/finagle/BUILD @@ -1,7 +1,8 @@ -licenses(["notice"]) # Apache 2 - +load("@rules_python//python:defs.bzl", "py_library") load("//bazel:envoy_build_system.bzl", "envoy_package") +licenses(["notice"]) # Apache 2 + envoy_package() py_library( diff --git a/test/extensions/filters/network/thrift_proxy/driver/generated/example/BUILD b/test/extensions/filters/network/thrift_proxy/driver/generated/example/BUILD index 6c9595737b16f..d3a7029ab41d3 100644 --- a/test/extensions/filters/network/thrift_proxy/driver/generated/example/BUILD +++ b/test/extensions/filters/network/thrift_proxy/driver/generated/example/BUILD @@ -1,7 +1,8 @@ -licenses(["notice"]) # Apache 2 - +load("@rules_python//python:defs.bzl", "py_library") load("//bazel:envoy_build_system.bzl", "envoy_package") +licenses(["notice"]) # Apache 2 + envoy_package() py_library( diff --git a/test/extensions/filters/network/thrift_proxy/filters/BUILD b/test/extensions/filters/network/thrift_proxy/filters/BUILD new file mode 100644 index 0000000000000..0af3863cfb2cd --- /dev/null +++ b/test/extensions/filters/network/thrift_proxy/filters/BUILD @@ -0,0 +1,18 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_test( + name = "pass_through_filter_test", + srcs = ["pass_through_filter_test.cc"], + deps = [ + "//source/extensions/filters/network/thrift_proxy/filters:pass_through_filter_lib", + "//test/extensions/filters/network/thrift_proxy:mocks", + ], +) diff --git a/test/extensions/filters/network/thrift_proxy/filters/pass_through_filter_test.cc b/test/extensions/filters/network/thrift_proxy/filters/pass_through_filter_test.cc new file mode 100644 index 0000000000000..bd0952258bbce --- /dev/null +++ b/test/extensions/filters/network/thrift_proxy/filters/pass_through_filter_test.cc @@ -0,0 +1,118 @@ +#include +#include + +#include "extensions/filters/network/thrift_proxy/filters/pass_through_filter.h" + +#include "test/extensions/filters/network/thrift_proxy/mocks.h" +#include "test/test_common/printers.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::NiceMock; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace ThriftProxy { +namespace ThriftFilters { + +using namespace Envoy::Extensions::NetworkFilters; + +class ThriftPassThroughDecoderFilterTest : public testing::Test { +public: + class Filter : public PassThroughDecoderFilter { + public: + DecoderFilterCallbacks* decoderFilterCallbacks() { return decoder_callbacks_; } + }; + + void initialize() { + filter_ = std::make_unique(); + filter_->setDecoderFilterCallbacks(filter_callbacks_); + } + + std::unique_ptr filter_; + NiceMock filter_callbacks_; + ThriftProxy::MessageMetadataSharedPtr request_metadata_; +}; + +// Tests that each method returns ThriftProxy::FilterStatus::Continue. +TEST_F(ThriftPassThroughDecoderFilterTest, AllMethodsAreImplementedTrivially) { + initialize(); + + EXPECT_EQ(&filter_callbacks_, filter_->decoderFilterCallbacks()); + + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->transportBegin(request_metadata_)); + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->messageBegin(request_metadata_)); + { + std::string dummy_str = "dummy"; + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->structBegin(dummy_str)); + } + { + std::string dummy_str = "dummy"; + ThriftProxy::FieldType dummy_ft{ThriftProxy::FieldType::I32}; + int16_t dummy_id{1}; + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, + filter_->fieldBegin(dummy_str, dummy_ft, dummy_id)); + } + { + bool dummy_val{false}; + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->boolValue(dummy_val)); + } + { + uint8_t dummy_val{0}; + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->byteValue(dummy_val)); + } + { + int16_t dummy_val{0}; + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->int16Value(dummy_val)); + } + { + int32_t dummy_val{0}; + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->int32Value(dummy_val)); + } + { + int64_t dummy_val{0}; + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->int64Value(dummy_val)); + } + { + double dummy_val{0.0}; + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->doubleValue(dummy_val)); + } + { + std::string dummy_str = "dummy"; + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->stringValue(dummy_str)); + } + { + ThriftProxy::FieldType dummy_ft = ThriftProxy::FieldType::I32; + uint32_t dummy_size{1}; + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, + filter_->mapBegin(dummy_ft, dummy_ft, dummy_size)); + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->mapEnd()); + } + { + ThriftProxy::FieldType dummy_ft = ThriftProxy::FieldType::I32; + uint32_t dummy_size{1}; + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->listBegin(dummy_ft, dummy_size)); + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->listEnd()); + } + { + ThriftProxy::FieldType dummy_ft = ThriftProxy::FieldType::I32; + uint32_t dummy_size{1}; + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->setBegin(dummy_ft, dummy_size)); + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->setEnd()); + } + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->structEnd()); + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->fieldEnd()); + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->messageEnd()); + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->transportEnd()); + + EXPECT_NO_THROW(filter_->onDestroy()); +} + +} // namespace ThriftFilters +} // namespace ThriftProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD b/test/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD index 06542bc5cbb58..bff8a2ae1a215 100644 --- a/test/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD +++ b/test/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -38,7 +38,7 @@ envoy_extension_cc_test( deps = [ "//source/extensions/filters/network/thrift_proxy/filters/ratelimit:config", "//test/extensions/filters/network/thrift_proxy:mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/network/thrift_proxy/filters/ratelimit/config_test.cc b/test/extensions/filters/network/thrift_proxy/filters/ratelimit/config_test.cc index b18981195ab93..98bcf3d482dba 100644 --- a/test/extensions/filters/network/thrift_proxy/filters/ratelimit/config_test.cc +++ b/test/extensions/filters/network/thrift_proxy/filters/ratelimit/config_test.cc @@ -5,7 +5,7 @@ #include "extensions/filters/network/thrift_proxy/filters/ratelimit/config.h" #include "test/extensions/filters/network/thrift_proxy/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -19,9 +19,9 @@ namespace RateLimitFilter { namespace { envoy::extensions::filters::network::thrift_proxy::filters::ratelimit::v3::RateLimit -parseRateLimitFromV2Yaml(const std::string& yaml) { +parseRateLimitFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) { envoy::extensions::filters::network::thrift_proxy::filters::ratelimit::v3::RateLimit rate_limit; - TestUtility::loadFromYaml(yaml, rate_limit); + TestUtility::loadFromYaml(yaml, rate_limit, false, avoid_boosting); return rate_limit; } @@ -46,7 +46,7 @@ timeout: "1.337s" cluster_name: ratelimit_cluster )EOF"; - auto proto_config = parseRateLimitFromV2Yaml(yaml_string); + auto proto_config = parseRateLimitFromV3Yaml(yaml_string); NiceMock context; diff --git a/test/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit_test.cc b/test/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit_test.cc index df6fabc70d9a3..447076e9bd159 100644 --- a/test/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit_test.cc @@ -50,10 +50,10 @@ class ThriftRateLimitFilterTest : public testing::Test { .WillByDefault(Return(true)); } - void SetUpTest(const std::string& yaml) { + void setupTest(const std::string& yaml) { envoy::extensions::filters::network::thrift_proxy::filters::ratelimit::v3::RateLimit proto_config{}; - TestUtility::loadFromYaml(yaml, proto_config); + TestUtility::loadFromYaml(yaml, proto_config, false, true); config_ = std::make_shared(proto_config, local_info_, stats_store_, runtime_, cm_); @@ -94,7 +94,7 @@ class ThriftRateLimitFilterTest : public testing::Test { }; TEST_F(ThriftRateLimitFilterTest, NoRoute) { - SetUpTest(filter_config_); + setupTest(filter_config_); EXPECT_CALL(*filter_callbacks_.route_, routeEntry()).WillOnce(Return(nullptr)); @@ -165,7 +165,7 @@ TEST_F(ThriftRateLimitFilterTest, NoRoute) { } TEST_F(ThriftRateLimitFilterTest, NoCluster) { - SetUpTest(filter_config_); + setupTest(filter_config_); ON_CALL(cm_, get(_)).WillByDefault(Return(nullptr)); @@ -173,7 +173,7 @@ TEST_F(ThriftRateLimitFilterTest, NoCluster) { } TEST_F(ThriftRateLimitFilterTest, NoApplicableRateLimit) { - SetUpTest(filter_config_); + setupTest(filter_config_); filter_callbacks_.route_->route_entry_.rate_limit_policy_.rate_limit_policy_entry_.clear(); EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0); @@ -182,7 +182,7 @@ TEST_F(ThriftRateLimitFilterTest, NoApplicableRateLimit) { } TEST_F(ThriftRateLimitFilterTest, NoDescriptor) { - SetUpTest(filter_config_); + setupTest(filter_config_); EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)).Times(1); EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0); @@ -191,7 +191,7 @@ TEST_F(ThriftRateLimitFilterTest, NoDescriptor) { } TEST_F(ThriftRateLimitFilterTest, RuntimeDisabled) { - SetUpTest(filter_config_); + setupTest(filter_config_); EXPECT_CALL(runtime_.snapshot_, featureEnabled("ratelimit.thrift_filter_enabled", 100)) .WillOnce(Return(false)); @@ -200,7 +200,7 @@ TEST_F(ThriftRateLimitFilterTest, RuntimeDisabled) { } TEST_F(ThriftRateLimitFilterTest, OkResponse) { - SetUpTest(filter_config_); + setupTest(filter_config_); InSequence s; EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0)) @@ -226,14 +226,15 @@ TEST_F(ThriftRateLimitFilterTest, OkResponse) { EXPECT_CALL(filter_callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::RateLimited)) .Times(0); - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, + nullptr); EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter("ratelimit.ok").value()); } TEST_F(ThriftRateLimitFilterTest, ImmediateOkResponse) { - SetUpTest(filter_config_); + setupTest(filter_config_); InSequence s; EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) @@ -245,7 +246,8 @@ TEST_F(ThriftRateLimitFilterTest, ImmediateOkResponse) { _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { - callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr); + callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, + nullptr); }))); EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); @@ -256,7 +258,7 @@ TEST_F(ThriftRateLimitFilterTest, ImmediateOkResponse) { } TEST_F(ThriftRateLimitFilterTest, ImmediateErrorResponse) { - SetUpTest(filter_config_); + setupTest(filter_config_); InSequence s; EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) @@ -268,7 +270,8 @@ TEST_F(ThriftRateLimitFilterTest, ImmediateErrorResponse) { _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { - callbacks.complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr); + callbacks.complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr, + nullptr); }))); EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); @@ -283,7 +286,7 @@ TEST_F(ThriftRateLimitFilterTest, ImmediateErrorResponse) { } TEST_F(ThriftRateLimitFilterTest, ErrorResponse) { - SetUpTest(filter_config_); + setupTest(filter_config_); InSequence s; EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) @@ -297,7 +300,8 @@ TEST_F(ThriftRateLimitFilterTest, ErrorResponse) { EXPECT_EQ(ThriftProxy::FilterStatus::StopIteration, filter_->messageBegin(request_metadata_)); EXPECT_CALL(filter_callbacks_, continueDecoding()); - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr, + nullptr); EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->messageEnd()); EXPECT_CALL(filter_callbacks_.stream_info_, @@ -313,7 +317,7 @@ TEST_F(ThriftRateLimitFilterTest, ErrorResponse) { } TEST_F(ThriftRateLimitFilterTest, ErrorResponseWithFailureModeAllowOff) { - SetUpTest(fail_close_config_); + setupTest(fail_close_config_); InSequence s; EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) @@ -334,7 +338,8 @@ TEST_F(ThriftRateLimitFilterTest, ErrorResponseWithFailureModeAllowOff) { })); EXPECT_CALL(filter_callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::RateLimitServiceError)); - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr, + nullptr); EXPECT_EQ( 1U, @@ -345,7 +350,7 @@ TEST_F(ThriftRateLimitFilterTest, ErrorResponseWithFailureModeAllowOff) { } TEST_F(ThriftRateLimitFilterTest, LimitResponse) { - SetUpTest(filter_config_); + setupTest(filter_config_); InSequence s; EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) @@ -367,7 +372,7 @@ TEST_F(ThriftRateLimitFilterTest, LimitResponse) { EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); EXPECT_CALL(filter_callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::RateLimited)); - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, nullptr, nullptr); EXPECT_EQ(1U, @@ -376,7 +381,7 @@ TEST_F(ThriftRateLimitFilterTest, LimitResponse) { } TEST_F(ThriftRateLimitFilterTest, LimitResponseWithHeaders) { - SetUpTest(filter_config_); + setupTest(filter_config_); InSequence s; EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) @@ -389,7 +394,7 @@ TEST_F(ThriftRateLimitFilterTest, LimitResponseWithHeaders) { EXPECT_EQ(ThriftProxy::FilterStatus::StopIteration, filter_->messageBegin(request_metadata_)); - Http::HeaderMapPtr rl_headers{new Http::TestHeaderMapImpl{ + Http::HeaderMapPtr rl_headers{new Http::TestRequestHeaderMapImpl{ {"x-ratelimit-limit", "1000"}, {"x-ratelimit-remaining", "0"}, {"retry-after", "33"}}}; EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); @@ -400,8 +405,8 @@ TEST_F(ThriftRateLimitFilterTest, LimitResponseWithHeaders) { setResponseFlag(StreamInfo::ResponseFlag::RateLimited)); Http::ResponseHeaderMapPtr h{new Http::TestResponseHeaderMapImpl(*rl_headers)}; - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, std::move(h), - nullptr); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, + std::move(h), nullptr); EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter("ratelimit.over_limit") @@ -409,7 +414,7 @@ TEST_F(ThriftRateLimitFilterTest, LimitResponseWithHeaders) { } TEST_F(ThriftRateLimitFilterTest, LimitResponseRuntimeDisabled) { - SetUpTest(filter_config_); + setupTest(filter_config_); InSequence s; EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) @@ -425,7 +430,7 @@ TEST_F(ThriftRateLimitFilterTest, LimitResponseRuntimeDisabled) { EXPECT_CALL(runtime_.snapshot_, featureEnabled("ratelimit.thrift_filter_enforcing", 100)) .WillOnce(Return(false)); EXPECT_CALL(filter_callbacks_, continueDecoding()); - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, nullptr, nullptr); EXPECT_EQ(1U, @@ -434,7 +439,7 @@ TEST_F(ThriftRateLimitFilterTest, LimitResponseRuntimeDisabled) { } TEST_F(ThriftRateLimitFilterTest, ResetDuringCall) { - SetUpTest(filter_config_); + setupTest(filter_config_); InSequence s; EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) @@ -453,7 +458,7 @@ TEST_F(ThriftRateLimitFilterTest, ResetDuringCall) { TEST_F(ThriftRateLimitFilterTest, RouteRateLimitDisabledForRouteKey) { route_rate_limit_.disable_key_ = "test_key"; - SetUpTest(filter_config_); + setupTest(filter_config_); ON_CALL(runtime_.snapshot_, featureEnabled("ratelimit.test_key.thrift_filter_enabled", 100)) .WillByDefault(Return(false)); @@ -472,7 +477,7 @@ TEST_F(ThriftRateLimitFilterTest, ConfigValueTest) { } )EOF"; - SetUpTest(stage_filter_config); + setupTest(stage_filter_config); EXPECT_EQ(5UL, config_->stage()); EXPECT_EQ("foo", config_->domain()); @@ -485,7 +490,7 @@ TEST_F(ThriftRateLimitFilterTest, DefaultConfigValueTest) { } )EOF"; - SetUpTest(stage_filter_config); + setupTest(stage_filter_config); EXPECT_EQ(0UL, config_->stage()); EXPECT_EQ("foo", config_->domain()); diff --git a/test/extensions/filters/network/thrift_proxy/header_transport_impl_test.cc b/test/extensions/filters/network/thrift_proxy/header_transport_impl_test.cc index 5dd0416bc8b22..73a60b91ec5d5 100644 --- a/test/extensions/filters/network/thrift_proxy/header_transport_impl_test.cc +++ b/test/extensions/filters/network/thrift_proxy/header_transport_impl_test.cc @@ -458,7 +458,7 @@ TEST(HeaderTransportTest, InfoBlock) { buffer.writeByte(0); // empty value buffer.writeByte(0); // padding - Http::HeaderMapImpl expected_headers; + Http::TestRequestHeaderMapImpl expected_headers; expected_headers.addCopy(Http::LowerCaseString("not"), "empty"); expected_headers.addCopy(Http::LowerCaseString("key"), "value"); expected_headers.addCopy(Http::LowerCaseString("key2"), std::string(128, 'x')); @@ -467,8 +467,7 @@ TEST(HeaderTransportTest, InfoBlock) { EXPECT_TRUE(transport.decodeFrameStart(buffer, metadata)); EXPECT_THAT(metadata, HasFrameSize(38U)); - Http::HeaderMapImpl& actual_headers = dynamic_cast(metadata.headers()); - EXPECT_EQ(expected_headers, actual_headers); + EXPECT_EQ(expected_headers, metadata.headers()); EXPECT_EQ(buffer.length(), 0); } diff --git a/test/extensions/filters/network/thrift_proxy/integration_test.cc b/test/extensions/filters/network/thrift_proxy/integration_test.cc index 19ba17e4eb7e9..29dc1790df4a1 100644 --- a/test/extensions/filters/network/thrift_proxy/integration_test.cc +++ b/test/extensions/filters/network/thrift_proxy/integration_test.cc @@ -122,11 +122,6 @@ class ThriftConnManagerIntegrationTest BaseThriftIntegrationTest::initialize(); } - void TearDown() override { - test_server_.reset(); - fake_upstreams_.clear(); - } - protected: // Multiplexed requests are handled by the service name route match, // while oneway's are handled by the "poke" method. All other requests @@ -180,7 +175,7 @@ TEST_P(ThriftConnManagerIntegrationTest, Success) { initializeCall(DriverMode::Success); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write(request_bytes_.toString()); + ASSERT_TRUE(tcp_client->write(request_bytes_.toString())); FakeRawConnectionPtr fake_upstream_connection; FakeUpstream* expected_upstream = getExpectedUpstream(false); @@ -207,7 +202,7 @@ TEST_P(ThriftConnManagerIntegrationTest, IDLException) { initializeCall(DriverMode::IDLException); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write(request_bytes_.toString()); + ASSERT_TRUE(tcp_client->write(request_bytes_.toString())); FakeUpstream* expected_upstream = getExpectedUpstream(false); FakeRawConnectionPtr fake_upstream_connection; @@ -234,7 +229,7 @@ TEST_P(ThriftConnManagerIntegrationTest, Exception) { initializeCall(DriverMode::Exception); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write(request_bytes_.toString()); + ASSERT_TRUE(tcp_client->write(request_bytes_.toString())); FakeUpstream* expected_upstream = getExpectedUpstream(false); FakeRawConnectionPtr fake_upstream_connection; @@ -267,7 +262,7 @@ TEST_P(ThriftConnManagerIntegrationTest, EarlyClose) { expected_upstream->set_allow_unexpected_disconnects(true); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write(partial_request); + ASSERT_TRUE(tcp_client->write(partial_request)); tcp_client->close(); FakeRawConnectionPtr fake_upstream_connection; @@ -289,7 +284,7 @@ TEST_P(ThriftConnManagerIntegrationTest, EarlyCloseWithUpstream) { request_bytes_.toString().substr(0, request_bytes_.length() - 5); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write(partial_request); + ASSERT_TRUE(tcp_client->write(partial_request)); FakeUpstream* expected_upstream = getExpectedUpstream(false); FakeRawConnectionPtr fake_upstream_connection; @@ -312,7 +307,7 @@ TEST_P(ThriftConnManagerIntegrationTest, EarlyUpstreamClose) { request_bytes_.toString().substr(0, request_bytes_.length() - 5); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write(request_bytes_.toString()); + ASSERT_TRUE(tcp_client->write(request_bytes_.toString())); FakeUpstream* expected_upstream = getExpectedUpstream(false); FakeRawConnectionPtr fake_upstream_connection; @@ -339,7 +334,7 @@ TEST_P(ThriftConnManagerIntegrationTest, Oneway) { initializeOneway(); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write(request_bytes_.toString()); + ASSERT_TRUE(tcp_client->write(request_bytes_.toString())); FakeUpstream* expected_upstream = getExpectedUpstream(true); FakeRawConnectionPtr fake_upstream_connection; @@ -360,7 +355,7 @@ TEST_P(ThriftConnManagerIntegrationTest, OnewayEarlyClose) { initializeOneway(); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write(request_bytes_.toString()); + ASSERT_TRUE(tcp_client->write(request_bytes_.toString())); tcp_client->close(); FakeUpstream* expected_upstream = getExpectedUpstream(true); @@ -385,7 +380,7 @@ TEST_P(ThriftConnManagerIntegrationTest, OnewayEarlyClosePartialRequest) { expected_upstream->set_allow_unexpected_disconnects(true); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write(partial_request); + ASSERT_TRUE(tcp_client->write(partial_request)); tcp_client->close(); FakeRawConnectionPtr fake_upstream_connection; @@ -420,13 +415,13 @@ TEST_P(ThriftTwitterConnManagerIntegrationTest, Success) { // Upgrade request/response happens without an upstream. IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write(upgrade_request_bytes.toString()); + ASSERT_TRUE(tcp_client->write(upgrade_request_bytes.toString())); tcp_client->waitForData(upgrade_response_bytes.toString()); EXPECT_TRUE( TestUtility::buffersEqual(Buffer::OwnedImpl(tcp_client->data()), upgrade_response_bytes)); // First real request triggers upstream connection. - tcp_client->write(request_bytes_.toString()); + ASSERT_TRUE(tcp_client->write(request_bytes_.toString())); FakeRawConnectionPtr fake_upstream_connection; FakeUpstream* expected_upstream = getExpectedUpstream(false); ASSERT_TRUE(expected_upstream->waitForRawConnection(fake_upstream_connection)); diff --git a/test/extensions/filters/network/thrift_proxy/route_matcher_test.cc b/test/extensions/filters/network/thrift_proxy/route_matcher_test.cc index b39e45d391c38..079f646d3a913 100644 --- a/test/extensions/filters/network/thrift_proxy/route_matcher_test.cc +++ b/test/extensions/filters/network/thrift_proxy/route_matcher_test.cc @@ -3,6 +3,8 @@ #include "envoy/extensions/filters/network/thrift_proxy/v3/route.pb.h" #include "envoy/extensions/filters/network/thrift_proxy/v3/route.pb.validate.h" +#include "common/config/metadata.h" + #include "extensions/filters/network/thrift_proxy/router/config.h" #include "extensions/filters/network/thrift_proxy/router/router_impl.h" @@ -19,9 +21,9 @@ namespace Router { namespace { envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration -parseRouteConfigurationFromV2Yaml(const std::string& yaml) { +parseRouteConfigurationFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) { envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration route_config; - TestUtility::loadFromYaml(yaml, route_config); + TestUtility::loadFromYaml(yaml, route_config, false, avoid_boosting); TestUtility::validate(route_config); return route_config; } @@ -41,7 +43,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; @@ -78,7 +80,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; @@ -121,7 +123,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); @@ -158,7 +160,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); EXPECT_THROW(new RouteMatcher(config), EnvoyException); } @@ -178,7 +180,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; @@ -215,7 +217,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; @@ -258,7 +260,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); @@ -295,7 +297,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); EXPECT_THROW(new RouteMatcher(config), EnvoyException); } @@ -314,7 +316,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; @@ -347,7 +349,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; @@ -385,7 +387,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; @@ -421,7 +423,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; @@ -458,7 +460,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; @@ -494,7 +496,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; @@ -532,7 +534,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; @@ -588,7 +590,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; @@ -632,7 +634,7 @@ name: config )EOF"; const envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); EXPECT_THROW(RouteMatcher m(config), EnvoyException); } @@ -656,7 +658,7 @@ name: config )EOF"; const envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; @@ -728,7 +730,7 @@ name: config )EOF"; const envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; metadata.setMethodName("method1"); @@ -756,6 +758,8 @@ name: config EXPECT_EQ("k2", mmc[1]->name()); EXPECT_EQ(hv2, mmc[1]->value()); + + EXPECT_EQ(Http::LowerCaseString{""}, route->routeEntry()->clusterHeader()); } // match with weighted cluster with different metadata key @@ -815,7 +819,7 @@ name: config )EOF"; const envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; metadata.setMethodName("method1"); @@ -889,6 +893,118 @@ name: config } } +// Test that the route entry has metadata match criteria when using a cluster header. +TEST(ThriftRouteMatcherTest, ClusterHeaderMetadataMatch) { + envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config; + { + config.set_name("config"); + auto* route = config.add_routes(); + route->mutable_match()->set_method_name("method1"); + auto* action = route->mutable_route(); + action->set_cluster_header("header_name"); + auto* metadata = action->mutable_metadata_match(); + Envoy::Config::Metadata::mutableMetadataValue(*metadata, "envoy.lb", "k1") + .set_string_value("v1"); + Envoy::Config::Metadata::mutableMetadataValue(*metadata, "envoy.lb", "k2") + .set_string_value("v2"); + + auto* route2 = config.add_routes(); + route2->mutable_match()->set_method_name("method2"); + auto* action2 = route2->mutable_route(); + action2->set_cluster("cluster2"); + } + + RouteMatcher matcher(config); + + // match with metadata + { + MessageMetadata metadata; + metadata.setMethodName("method1"); + metadata.headers().addCopy(Http::LowerCaseString{"header_name"}, "cluster1"); + RouteConstSharedPtr route = matcher.route(metadata, 0); + EXPECT_NE(nullptr, route); + EXPECT_NE(nullptr, route->routeEntry()); + + EXPECT_EQ(Http::LowerCaseString{"header_name"}, route->routeEntry()->clusterHeader()); + + const Envoy::Router::MetadataMatchCriteria* criteria = + route->routeEntry()->metadataMatchCriteria(); + EXPECT_NE(nullptr, criteria); + const std::vector& mmc = + criteria->metadataMatchCriteria(); + EXPECT_EQ(2, mmc.size()); + + ProtobufWkt::Value v1, v2; + v1.set_string_value("v1"); + v2.set_string_value("v2"); + HashedValue hv1(v1), hv2(v2); + + EXPECT_EQ("k1", mmc[0]->name()); + EXPECT_EQ(hv1, mmc[0]->value()); + + EXPECT_EQ("k2", mmc[1]->name()); + EXPECT_EQ(hv2, mmc[1]->value()); + } + + // match with no metadata + { + MessageMetadata metadata; + metadata.setMethodName("method2"); + RouteConstSharedPtr route = matcher.route(metadata, 0); + EXPECT_NE(nullptr, route); + EXPECT_NE(nullptr, route->routeEntry()); + EXPECT_EQ(nullptr, route->routeEntry()->metadataMatchCriteria()); + + EXPECT_EQ(Http::LowerCaseString{""}, route->routeEntry()->clusterHeader()); + } +} + +// Tests that weighted cluster route entries can be configured to strip the service name. +TEST(RouteMatcherTest, WeightedClusterWithStripServiceEnabled) { + envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config; + { + config.set_name("config"); + auto* route = config.add_routes(); + route->mutable_match()->set_method_name("method1"); + auto* action = route->mutable_route(); + auto* cluster1 = action->mutable_weighted_clusters()->add_clusters(); + cluster1->set_name("cluster1"); + cluster1->mutable_weight()->set_value(50); + auto* cluster2 = action->mutable_weighted_clusters()->add_clusters(); + cluster2->set_name("cluster2"); + cluster2->mutable_weight()->set_value(50); + action->set_strip_service_name(true); + } + + RouteMatcher matcher(config); + + MessageMetadata metadata; + metadata.setMethodName("method1"); + + EXPECT_TRUE(matcher.route(metadata, 0)->routeEntry()->stripServiceName()); +} + +// Tests that dynamic route entries can be configured to strip the service name. +TEST(RouteMatcherTest, ClusterHeaderWithStripServiceEnabled) { + envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config; + { + config.set_name("config"); + auto* route = config.add_routes(); + route->mutable_match()->set_method_name("method1"); + auto* action = route->mutable_route(); + action->set_cluster_header("header_name"); + action->set_strip_service_name(true); + } + + RouteMatcher matcher(config); + + MessageMetadata metadata; + metadata.setMethodName("method1"); + metadata.headers().addCopy(Http::LowerCaseString{"header_name"}, "cluster1"); + + EXPECT_TRUE(matcher.route(metadata, 0)->routeEntry()->stripServiceName()); +} + } // namespace } // namespace Router } // namespace ThriftProxy diff --git a/test/extensions/filters/network/thrift_proxy/router_ratelimit_test.cc b/test/extensions/filters/network/thrift_proxy/router_ratelimit_test.cc index 458412b7d0e05..0813d58590260 100644 --- a/test/extensions/filters/network/thrift_proxy/router_ratelimit_test.cc +++ b/test/extensions/filters/network/thrift_proxy/router_ratelimit_test.cc @@ -14,7 +14,7 @@ #include "test/extensions/filters/network/thrift_proxy/mocks.h" #include "test/mocks/ratelimit/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -31,9 +31,13 @@ namespace { class ThriftRateLimitConfigurationTest : public testing::Test { public: - void initialize(const std::string& yaml) { + void initialize(const std::string& yaml, bool avoid_boosting = true) { envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy config; - TestUtility::loadFromYaml(yaml, config); + TestUtility::loadFromYaml(yaml, config, false, avoid_boosting); + initialize(config); + } + + void initialize(envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy& config) { config_ = std::make_unique(config, factory_context_); } @@ -170,6 +174,125 @@ TEST_F(ThriftRateLimitConfigurationTest, Stages) { EXPECT_TRUE(rate_limits.empty()); } +// Test that rate limiter stages work with weighted cluster route entries. +TEST_F(ThriftRateLimitConfigurationTest, WeightedClusterStages) { + envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy config; + { + auto* route_config = config.mutable_route_config(); + route_config->set_name("config"); + auto* route = route_config->add_routes(); + route->mutable_match()->set_method_name("foo"); + auto* action = route->mutable_route(); + auto* cluster1 = action->mutable_weighted_clusters()->add_clusters(); + cluster1->set_name("thrift"); + cluster1->mutable_weight()->set_value(50); + auto* cluster2 = action->mutable_weighted_clusters()->add_clusters(); + cluster2->set_name("thrift2"); + cluster2->mutable_weight()->set_value(50); + + auto* limit1 = action->add_rate_limits(); + limit1->mutable_stage()->set_value(1); + limit1->add_actions()->mutable_remote_address(); + + action->add_rate_limits()->add_actions()->mutable_destination_cluster(); + + auto* limit3 = action->add_rate_limits(); + limit3->add_actions()->mutable_destination_cluster(); + limit3->add_actions()->mutable_source_cluster(); + } + initialize(config); + + auto route = config_->route(genMetadata("foo"), 0)->routeEntry(); + std::vector> rate_limits = + route->rateLimitPolicy().getApplicableRateLimit(0); + EXPECT_EQ(2U, rate_limits.size()); + + std::vector descriptors; + for (const RateLimitPolicyEntry& rate_limit : rate_limits) { + rate_limit.populateDescriptors(*route, descriptors, "service_cluster", *metadata_, + default_remote_address_); + } + EXPECT_THAT(std::vector( + {{{{"destination_cluster", "thrift"}}}, + {{{"destination_cluster", "thrift"}, {"source_cluster", "service_cluster"}}}}), + testing::ContainerEq(descriptors)); + + descriptors.clear(); + rate_limits = route->rateLimitPolicy().getApplicableRateLimit(1); + EXPECT_EQ(1U, rate_limits.size()); + + for (const RateLimitPolicyEntry& rate_limit : rate_limits) { + rate_limit.populateDescriptors(*route, descriptors, "service_cluster", *metadata_, + default_remote_address_); + } + EXPECT_THAT(std::vector({{{{"remote_address", "10.0.0.1"}}}}), + testing::ContainerEq(descriptors)); + + rate_limits = route->rateLimitPolicy().getApplicableRateLimit(10); + EXPECT_TRUE(rate_limits.empty()); +} + +// Test that rate limiter stages work with dynamic route entries. +TEST_F(ThriftRateLimitConfigurationTest, ClusterHeaderStages) { + envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy config; + { + auto* route_config = config.mutable_route_config(); + route_config->set_name("config"); + auto* route = route_config->add_routes(); + route->mutable_match()->set_method_name("foo"); + auto* action = route->mutable_route(); + action->set_cluster_header("header_name"); + + auto* limit1 = action->add_rate_limits(); + limit1->mutable_stage()->set_value(1); + limit1->add_actions()->mutable_remote_address(); + + action->add_rate_limits()->add_actions()->mutable_destination_cluster(); + + auto* limit3 = action->add_rate_limits(); + limit3->add_actions()->mutable_destination_cluster(); + limit3->add_actions()->mutable_source_cluster(); + } + initialize(config); + + auto& metadata = genMetadata("foo"); + metadata.headers().addCopy(Http::LowerCaseString{"header_name"}, "thrift"); + + // Keep hold of route, it's a newly minted shared pointer. + auto route = config_->route(metadata, 0); + auto* route_entry = route->routeEntry(); + + std::vector> rate_limits = + route_entry->rateLimitPolicy().getApplicableRateLimit(0); + + EXPECT_EQ(2U, rate_limits.size()); + + std::vector descriptors; + for (const RateLimitPolicyEntry& rate_limit : rate_limits) { + rate_limit.populateDescriptors(*route_entry, descriptors, "service_cluster", *metadata_, + default_remote_address_); + } + + EXPECT_THAT(std::vector( + {{{{"destination_cluster", "thrift"}}}, + {{{"destination_cluster", "thrift"}, {"source_cluster", "service_cluster"}}}}), + testing::ContainerEq(descriptors)); + + descriptors.clear(); + rate_limits = route_entry->rateLimitPolicy().getApplicableRateLimit(1); + EXPECT_EQ(1U, rate_limits.size()); + + for (const RateLimitPolicyEntry& rate_limit : rate_limits) { + rate_limit.populateDescriptors(*route_entry, descriptors, "service_cluster", *metadata_, + default_remote_address_); + } + EXPECT_THAT(std::vector({{{{"remote_address", "10.0.0.1"}}}}), + testing::ContainerEq(descriptors)); + + rate_limits = route_entry->rateLimitPolicy().getApplicableRateLimit(10); + EXPECT_TRUE(rate_limits.empty()); +} + class ThriftRateLimitPolicyEntryTest : public testing::Test { public: void initialize(const std::string& yaml) { diff --git a/test/extensions/filters/network/thrift_proxy/router_test.cc b/test/extensions/filters/network/thrift_proxy/router_test.cc index ff7e57a5e14e2..2034941d59a34 100644 --- a/test/extensions/filters/network/thrift_proxy/router_test.cc +++ b/test/extensions/filters/network/thrift_proxy/router_test.cc @@ -13,7 +13,7 @@ #include "test/extensions/filters/network/thrift_proxy/mocks.h" #include "test/extensions/filters/network/thrift_proxy/utility.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/printers.h" #include "test/test_common/registry.h" @@ -700,6 +700,75 @@ TEST_F(ThriftRouterTest, ProtocolUpgrade) { destroyRouter(); } +// Test the case where an upgrade will occur, but the conn pool +// returns immediately with a valid, but never, used connection. +TEST_F(ThriftRouterTest, ProtocolUpgradeOnExistingUnusedConnection) { + initializeRouter(); + + EXPECT_CALL(*context_.cluster_manager_.tcp_conn_pool_.connection_data_, addUpstreamCallbacks(_)) + .WillOnce(Invoke( + [&](Tcp::ConnectionPool::UpstreamCallbacks& cb) -> void { upstream_callbacks_ = &cb; })); + + conn_state_.reset(); + EXPECT_CALL(*context_.cluster_manager_.tcp_conn_pool_.connection_data_, connectionState()) + .WillRepeatedly( + Invoke([&]() -> Tcp::ConnectionPool::ConnectionState* { return conn_state_.get(); })); + EXPECT_CALL(*context_.cluster_manager_.tcp_conn_pool_.connection_data_, setConnectionState_(_)) + .WillOnce(Invoke( + [&](Tcp::ConnectionPool::ConnectionStatePtr& cs) -> void { conn_state_.swap(cs); })); + + MockThriftObject* upgrade_response = new NiceMock(); + + EXPECT_CALL(upstream_connection_, write(_, false)) + .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> void { + EXPECT_EQ("upgrade request", buffer.toString()); + })); + + // Simulate an existing connection that's never been used. + EXPECT_CALL(context_.cluster_manager_.tcp_conn_pool_, newConnection(_)) + .WillOnce( + Invoke([&](Tcp::ConnectionPool::Callbacks& cb) -> Tcp::ConnectionPool::Cancellable* { + context_.cluster_manager_.tcp_conn_pool_.newConnectionImpl(cb); + + EXPECT_CALL(*protocol_, supportsUpgrade()).WillOnce(Return(true)); + + EXPECT_CALL(*protocol_, attemptUpgrade(_, _, _)) + .WillOnce(Invoke([&](Transport&, ThriftConnectionState&, + Buffer::Instance& buffer) -> ThriftObjectPtr { + buffer.add("upgrade request"); + return ThriftObjectPtr{upgrade_response}; + })); + + context_.cluster_manager_.tcp_conn_pool_.poolReady(upstream_connection_); + return nullptr; + })); + + startRequest(MessageType::Call); + + EXPECT_NE(nullptr, upstream_callbacks_); + + Buffer::OwnedImpl buffer; + EXPECT_CALL(*upgrade_response, onData(Ref(buffer))).WillOnce(Return(false)); + upstream_callbacks_->onUpstreamData(buffer, false); + + EXPECT_CALL(*upgrade_response, onData(Ref(buffer))).WillOnce(Return(true)); + EXPECT_CALL(*protocol_, completeUpgrade(_, Ref(*upgrade_response))); + EXPECT_CALL(callbacks_, continueDecoding()); + EXPECT_CALL(*protocol_, writeMessageBegin(_, _)) + .WillOnce(Invoke([&](Buffer::Instance&, const MessageMetadata& metadata) -> void { + EXPECT_EQ(metadata_->methodName(), metadata.methodName()); + EXPECT_EQ(metadata_->messageType(), metadata.messageType()); + EXPECT_EQ(metadata_->sequenceId(), metadata.sequenceId()); + })); + upstream_callbacks_->onUpstreamData(buffer, false); + + // Then the actual request... + sendTrivialStruct(FieldType::String); + completeRequest(); + returnResponse(); + destroyRouter(); +} + TEST_F(ThriftRouterTest, ProtocolUpgradeSkippedOnExistingConnection) { initializeRouter(); startRequest(MessageType::Call); diff --git a/test/extensions/filters/network/thrift_proxy/translation_integration_test.cc b/test/extensions/filters/network/thrift_proxy/translation_integration_test.cc index d60c6dacd415b..7b07ad7ab623d 100644 --- a/test/extensions/filters/network/thrift_proxy/translation_integration_test.cc +++ b/test/extensions/filters/network/thrift_proxy/translation_integration_test.cc @@ -82,11 +82,6 @@ class ThriftTranslationIntegrationTest BaseThriftIntegrationTest::initialize(); } - void TearDown() override { - test_server_.reset(); - fake_upstreams_.clear(); - } - Buffer::OwnedImpl downstream_request_bytes_; Buffer::OwnedImpl downstream_response_bytes_; Buffer::OwnedImpl upstream_request_bytes_; @@ -121,7 +116,7 @@ TEST_P(ThriftTranslationIntegrationTest, Translates) { initialize(); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write(downstream_request_bytes_.toString()); + ASSERT_TRUE(tcp_client->write(downstream_request_bytes_.toString())); FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); diff --git a/test/extensions/filters/network/thrift_proxy/twitter_protocol_impl_test.cc b/test/extensions/filters/network/thrift_proxy/twitter_protocol_impl_test.cc index 3d08c0eb95e23..061fbe1ddb1b0 100644 --- a/test/extensions/filters/network/thrift_proxy/twitter_protocol_impl_test.cc +++ b/test/extensions/filters/network/thrift_proxy/twitter_protocol_impl_test.cc @@ -99,8 +99,8 @@ class TwitterProtocolTest : public testing::Test { TestTwitterProtocolImpl proto; - metadata_->mutable_spans().emplace_back(trace_id, "", span_id, absl::optional(), - AnnotationList(), BinaryAnnotationList(), false); + metadata_->mutableSpans().emplace_back(trace_id, "", span_id, absl::optional(), + AnnotationList(), BinaryAnnotationList(), false); metadata_->headers().addCopy(Http::LowerCaseString("test-header"), "test-header-value"); proto.writeResponseHeaderForTest(buffer, *metadata_); @@ -487,7 +487,7 @@ TEST_F(TwitterProtocolTest, ParseRequestHeader) { EXPECT_TRUE(metadata_->flags()); EXPECT_EQ(5, *metadata_->flags()); - Http::TestHeaderMapImpl test_headers(metadata_->headers()); + Http::TestRequestHeaderMapImpl test_headers(metadata_->headers()); EXPECT_EQ(6, test_headers.size()); EXPECT_EQ("thrift-client-id", test_headers.get_(":client-id")); @@ -523,7 +523,7 @@ TEST_F(TwitterProtocolTest, ParseEmptyRequestHeader) { EXPECT_FALSE(metadata_->flags()); EXPECT_TRUE(metadata_->spans().empty()); - Http::TestHeaderMapImpl test_headers(metadata_->headers()); + Http::TestRequestHeaderMapImpl test_headers(metadata_->headers()); EXPECT_EQ(0, test_headers.size()); } @@ -556,7 +556,7 @@ TEST_F(TwitterProtocolTest, WriteRequestHeader) { EXPECT_TRUE(*metadata_->sampled()); EXPECT_EQ(5, *metadata_->flags()); - Http::TestHeaderMapImpl test_headers(metadata_->headers()); + Http::TestRequestHeaderMapImpl test_headers(metadata_->headers()); EXPECT_EQ(4, test_headers.size()); EXPECT_EQ("thrift-client-id", test_headers.get_(":client-id")); EXPECT_EQ("dest", test_headers.get_(":dest")); @@ -581,7 +581,7 @@ TEST_F(TwitterProtocolTest, WriteMostlyEmptyRequestHeader) { EXPECT_FALSE(metadata_->sampled()); EXPECT_FALSE(metadata_->flags()); - Http::TestHeaderMapImpl test_headers(metadata_->headers()); + Http::TestRequestHeaderMapImpl test_headers(metadata_->headers()); EXPECT_EQ(0, test_headers.size()); } @@ -696,7 +696,7 @@ TEST_F(TwitterProtocolTest, ParseResponseHeader) { EXPECT_FALSE(span.debug_); } - Http::TestHeaderMapImpl test_headers(metadata_->headers()); + Http::TestRequestHeaderMapImpl test_headers(metadata_->headers()); EXPECT_EQ(2, test_headers.size()); EXPECT_EQ("v1", test_headers.get_("k1")); EXPECT_EQ("v2", test_headers.get_("k2")); @@ -714,7 +714,7 @@ TEST_F(TwitterProtocolTest, ParseEmptyResponseHeader) { EXPECT_TRUE(metadata_->spans().empty()); - Http::TestHeaderMapImpl test_headers(metadata_->headers()); + Http::TestRequestHeaderMapImpl test_headers(metadata_->headers()); EXPECT_EQ(0, test_headers.size()); } @@ -724,7 +724,7 @@ TEST_F(TwitterProtocolTest, WriteResponseHeader) { headers.addCopy(Http::LowerCaseString("key1"), "value1"); headers.addCopy(Http::LowerCaseString("key2"), "value2"); - SpanList& spans = metadata_->mutable_spans(); + SpanList& spans = metadata_->mutableSpans(); spans.emplace_back(1, "s1", 100, absl::optional(10), AnnotationList({ Annotation(100000, "a1", {Endpoint(0xC0A80001, 0, "")}), @@ -798,7 +798,7 @@ TEST_F(TwitterProtocolTest, WriteResponseHeader) { EXPECT_TRUE(span2.binary_annotations_.empty()); EXPECT_FALSE(span2.debug_); - Http::TestHeaderMapImpl test_headers(metadata_->headers()); + Http::TestRequestHeaderMapImpl test_headers(metadata_->headers()); EXPECT_EQ("value1", test_headers.get_("key1")); EXPECT_EQ("value2", test_headers.get_("key2")); } @@ -822,7 +822,7 @@ TEST_F(TwitterProtocolTest, WriteEmptyResponseHeader) { EXPECT_TRUE(metadata_->spans().empty()); - Http::TestHeaderMapImpl test_headers(metadata_->headers()); + Http::TestRequestHeaderMapImpl test_headers(metadata_->headers()); EXPECT_EQ(0, test_headers.size()); } @@ -840,7 +840,7 @@ TEST_F(TwitterProtocolTest, TestUpgradedRequestMessageBegin) { EXPECT_EQ(101, metadata_->sequenceId()); EXPECT_EQ(1, *metadata_->traceId()); EXPECT_EQ(2, *metadata_->spanId()); - Http::TestHeaderMapImpl test_headers(metadata_->headers()); + Http::TestRequestHeaderMapImpl test_headers(metadata_->headers()); EXPECT_EQ("test_client", test_headers.get_(":client-id")); } @@ -865,7 +865,7 @@ TEST_F(TwitterProtocolTest, TestUpgradedRequestMessageContinuation) { EXPECT_EQ(101, metadata_->sequenceId()); EXPECT_EQ(1, *metadata_->traceId()); EXPECT_EQ(2, *metadata_->spanId()); - Http::TestHeaderMapImpl test_headers(metadata_->headers()); + Http::TestRequestHeaderMapImpl test_headers(metadata_->headers()); EXPECT_EQ("test_client", test_headers.get_(":client-id")); } } @@ -885,7 +885,7 @@ TEST_F(TwitterProtocolTest, TestUpgradedReplyMessageBegin) { EXPECT_EQ(1, metadata_->spans().size()); EXPECT_EQ(1, metadata_->spans().front().trace_id_); EXPECT_EQ(2, metadata_->spans().front().span_id_); - Http::TestHeaderMapImpl test_headers(metadata_->headers()); + Http::TestRequestHeaderMapImpl test_headers(metadata_->headers()); EXPECT_EQ("test-header-value", test_headers.get_("test-header")); } @@ -912,7 +912,7 @@ TEST_F(TwitterProtocolTest, TestUpgradedReplyMessageContinuation) { EXPECT_EQ(1, metadata_->spans().size()); EXPECT_EQ(1, metadata_->spans().front().trace_id_); EXPECT_EQ(2, metadata_->spans().front().span_id_); - Http::TestHeaderMapImpl test_headers(metadata_->headers()); + Http::TestRequestHeaderMapImpl test_headers(metadata_->headers()); EXPECT_EQ("test-header-value", test_headers.get_("test-header")); } } @@ -924,8 +924,8 @@ TEST_F(TwitterProtocolTest, TestUpgradedWriteMessageBegin) { metadata_->setMethodName("message"); metadata_->setSequenceId(1); metadata_->setTraceId(1); - metadata_->mutable_spans().emplace_back(100, "", 100, absl::optional(), AnnotationList(), - BinaryAnnotationList(), false); + metadata_->mutableSpans().emplace_back(100, "", 100, absl::optional(), AnnotationList(), + BinaryAnnotationList(), false); { // Call diff --git a/test/extensions/filters/network/zookeeper_proxy/BUILD b/test/extensions/filters/network/zookeeper_proxy/BUILD index d4d9cb5cef8c2..33767ff1f06c1 100644 --- a/test/extensions/filters/network/zookeeper_proxy/BUILD +++ b/test/extensions/filters/network/zookeeper_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -32,7 +32,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.network.zookeeper_proxy", deps = [ "//source/extensions/filters/network/zookeeper_proxy:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/network/zookeeper_proxy/config_test.cc b/test/extensions/filters/network/zookeeper_proxy/config_test.cc index 274287b07d6e3..b133a8f5075b2 100644 --- a/test/extensions/filters/network/zookeeper_proxy/config_test.cc +++ b/test/extensions/filters/network/zookeeper_proxy/config_test.cc @@ -3,7 +3,7 @@ #include "extensions/filters/network/zookeeper_proxy/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -49,7 +49,7 @@ stat_prefix: test_prefix )EOF"; ZooKeeperProxyProtoConfig proto_config; - TestUtility::loadFromYamlAndValidate(yaml, proto_config); + TestUtility::loadFromYamlAndValidate(yaml, proto_config, false, true); testing::NiceMock context; ZooKeeperConfigFactory factory; diff --git a/test/extensions/filters/network/zookeeper_proxy/filter_test.cc b/test/extensions/filters/network/zookeeper_proxy/filter_test.cc index f818f403a0af8..3a6eefd8e28bb 100644 --- a/test/extensions/filters/network/zookeeper_proxy/filter_test.cc +++ b/test/extensions/filters/network/zookeeper_proxy/filter_test.cc @@ -953,6 +953,18 @@ TEST_F(ZooKeeperFilterTest, WatchEvent) { EXPECT_EQ(0UL, config_->stats().decoder_error_.value()); } +TEST_F(ZooKeeperFilterTest, MissingXid) { + initialize(); + + const auto& stat = config_->stats().getdata_resp_; + Buffer::OwnedImpl data = encodeResponseHeader(1000, 2000, 0); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onWrite(data, false)); + EXPECT_EQ(0UL, stat.value()); + EXPECT_EQ(0UL, config_->stats().response_bytes_.value()); + EXPECT_EQ(1UL, config_->stats().decoder_error_.value()); +} + } // namespace ZooKeeperProxy } // namespace NetworkFilters } // namespace Extensions diff --git a/test/extensions/filters/udp/dns_filter/BUILD b/test/extensions/filters/udp/dns_filter/BUILD index a7c21842cd4c5..31583a527a633 100644 --- a/test/extensions/filters/udp/dns_filter/BUILD +++ b/test/extensions/filters/udp/dns_filter/BUILD @@ -1,25 +1,66 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", + "envoy_cc_fuzz_test", "envoy_package", ) load( "//test/extensions:extensions_build_system.bzl", "envoy_extension_cc_test", + "envoy_extension_cc_test_library", ) +licenses(["notice"]) # Apache 2 + envoy_package() +envoy_extension_cc_test_library( + name = "dns_filter_test_lib", + srcs = ["dns_filter_test_utils.cc"], + hdrs = ["dns_filter_test_utils.h"], + extension_name = "envoy.filters.udp_listener.dns_filter", + deps = [ + "//source/extensions/filters/udp/dns_filter:dns_filter_lib", + "//test/test_common:environment_lib", + ], +) + envoy_extension_cc_test( name = "dns_filter_test", srcs = ["dns_filter_test.cc"], extension_name = "envoy.filters.udp_listener.dns_filter", + tags = ["fails_on_windows"], deps = [ + ":dns_filter_test_lib", "//source/extensions/filters/udp/dns_filter:dns_filter_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/server:listener_factory_context_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:environment_lib", - "@envoy_api//envoy/config/filter/udp/dns_filter/v2alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg_cc_proto", + ], +) + +envoy_extension_cc_test( + name = "dns_filter_integration_test", + srcs = ["dns_filter_integration_test.cc"], + extension_name = "envoy.filters.udp_listener.dns_filter", + tags = ["fails_on_windows"], + deps = [ + ":dns_filter_test_lib", + "//source/extensions/filters/udp/dns_filter:config", + "//source/extensions/filters/udp/dns_filter:dns_filter_lib", + "//test/integration:integration_lib", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", + ], +) + +envoy_cc_fuzz_test( + name = "dns_filter_fuzz_test", + srcs = ["dns_filter_fuzz_test.cc"], + corpus = "dns_filter_corpus", + deps = [ + "//source/extensions/filters/udp/dns_filter:dns_filter_lib", + "//test/fuzz:utility_lib", + "//test/test_common:environment_lib", ], ) diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_corpus/016fac1e4a40199b26b08df73179f9249e6a680b b/test/extensions/filters/udp/dns_filter/dns_filter_corpus/016fac1e4a40199b26b08df73179f9249e6a680b new file mode 100644 index 0000000000000..a78515c64c5fc Binary files /dev/null and b/test/extensions/filters/udp/dns_filter/dns_filter_corpus/016fac1e4a40199b26b08df73179f9249e6a680b differ diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_corpus/110be4738f0cc29218ba95bd16a1442b57b3caaf b/test/extensions/filters/udp/dns_filter/dns_filter_corpus/110be4738f0cc29218ba95bd16a1442b57b3caaf new file mode 100644 index 0000000000000..ebba765bd1540 Binary files /dev/null and b/test/extensions/filters/udp/dns_filter/dns_filter_corpus/110be4738f0cc29218ba95bd16a1442b57b3caaf differ diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_corpus/497a3f29c3a53a65853a9e0ab3dd315fb92ac025 b/test/extensions/filters/udp/dns_filter/dns_filter_corpus/497a3f29c3a53a65853a9e0ab3dd315fb92ac025 new file mode 100644 index 0000000000000..20ec57ca096c8 Binary files /dev/null and b/test/extensions/filters/udp/dns_filter/dns_filter_corpus/497a3f29c3a53a65853a9e0ab3dd315fb92ac025 differ diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_corpus/7c09f450b6667337fd111fad0049bf4601c1aece b/test/extensions/filters/udp/dns_filter/dns_filter_corpus/7c09f450b6667337fd111fad0049bf4601c1aece new file mode 100644 index 0000000000000..a7644df43a1cc Binary files /dev/null and b/test/extensions/filters/udp/dns_filter/dns_filter_corpus/7c09f450b6667337fd111fad0049bf4601c1aece differ diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_corpus/fb9282f0af3341cfc98d56f10fffffd5529d8802 b/test/extensions/filters/udp/dns_filter/dns_filter_corpus/fb9282f0af3341cfc98d56f10fffffd5529d8802 new file mode 100644 index 0000000000000..2e9c5cade09f0 Binary files /dev/null and b/test/extensions/filters/udp/dns_filter/dns_filter_corpus/fb9282f0af3341cfc98d56f10fffffd5529d8802 differ diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_fuzz_test.cc b/test/extensions/filters/udp/dns_filter/dns_filter_fuzz_test.cc new file mode 100644 index 0000000000000..4eceac1226eb3 --- /dev/null +++ b/test/extensions/filters/udp/dns_filter/dns_filter_fuzz_test.cc @@ -0,0 +1,71 @@ +#include "common/common/logger.h" + +#include "extensions/filters/udp/dns_filter/dns_filter.h" + +#include "test/fuzz/fuzz_runner.h" +#include "test/fuzz/utility.h" +#include "test/mocks/common.h" +#include "test/mocks/event/mocks.h" +#include "test/test_common/environment.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace UdpFilters { +namespace DnsFilter { +namespace { + +DEFINE_FUZZER(const uint8_t* buf, size_t len) { + static const auto local = Network::Utility::parseInternetAddressAndPort("127.0.2.1:5353"); + static const auto peer = Network::Utility::parseInternetAddressAndPort("127.0.2.1:55088"); + + static NiceMock random; + static NiceMock histogram; + histogram.unit_ = Stats::Histogram::Unit::Milliseconds; + static Api::ApiPtr api = Api::createApiForTest(); + static NiceMock mock_query_buffer_underflow; + static NiceMock mock_record_name_overflow; + static NiceMock query_parsing_failure; + static DnsParserCounters counters(mock_query_buffer_underflow, mock_record_name_overflow, + query_parsing_failure); + + FuzzedDataProvider data_provider(buf, len); + Buffer::InstancePtr query_buffer = std::make_unique(); + + while (data_provider.remaining_bytes()) { + const std::string query = data_provider.ConsumeRandomLengthString(1024); + query_buffer->add(query.data(), query.size()); + + const uint16_t retry_count = data_provider.ConsumeIntegralInRange(0, 3); + DnsMessageParser message_parser(true, api->timeSource(), retry_count, random, histogram); + uint64_t offset = data_provider.ConsumeIntegralInRange(0, query.size()); + + const uint8_t fuzz_function = data_provider.ConsumeIntegralInRange(0, 2); + switch (fuzz_function) { + case 0: { + DnsQueryContextPtr query_context = + std::make_unique(local, peer, counters, retry_count); + bool result = message_parser.parseDnsObject(query_context, query_buffer); + UNREFERENCED_PARAMETER(result); + } break; + + case 1: { + DnsQueryRecordPtr ptr = message_parser.parseDnsQueryRecord(query_buffer, &offset); + UNREFERENCED_PARAMETER(ptr); + } break; + + case 2: { + DnsAnswerRecordPtr ptr = message_parser.parseDnsAnswerRecord(query_buffer, &offset); + UNREFERENCED_PARAMETER(ptr); + } break; + } // end case + query_buffer->drain(query_buffer->length()); + } +} +} // namespace +} // namespace DnsFilter +} // namespace UdpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_integration_test.cc b/test/extensions/filters/udp/dns_filter/dns_filter_integration_test.cc new file mode 100644 index 0000000000000..26628682a2279 --- /dev/null +++ b/test/extensions/filters/udp/dns_filter/dns_filter_integration_test.cc @@ -0,0 +1,306 @@ +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" + +#include "extensions/filters/udp/dns_filter/dns_filter.h" + +#include "test/integration/integration.h" +#include "test/test_common/network_utility.h" + +#include "dns_filter_test_utils.h" + +namespace Envoy { +namespace Extensions { +namespace UdpFilters { +namespace DnsFilter { +namespace { + +class DnsFilterIntegrationTest : public testing::TestWithParam, + public BaseIntegrationTest { +public: + DnsFilterIntegrationTest() + : BaseIntegrationTest(GetParam(), configToUse()), api_(Api::createApiForTest()), + counters_(mock_query_buffer_underflow_, mock_record_name_overflow_, + query_parsing_failure_) { + setupResponseParser(); + } + + void setupResponseParser() { + histogram_.unit_ = Stats::Histogram::Unit::Milliseconds; + response_parser_ = std::make_unique( + true /* recursive queries */, api_->timeSource(), 0 /* retries */, random_, histogram_); + } + + static std::string configToUse() { + return fmt::format(R"EOF( +admin: + access_log_path: {} + address: + socket_address: + address: 127.0.0.1 + port_value: 0 +static_resources: + clusters: + name: cluster_0 + load_assignment: + cluster_name: cluster_0 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 0 + )EOF", + TestEnvironment::nullDevicePath()); + } + + Network::Address::InstanceConstSharedPtr getListenerBindAddressAndPort() { + auto addr = Network::Utility::parseInternetAddressAndPort( + fmt::format("{}:{}", Envoy::Network::Test::getLoopbackAddressUrlString(version_), 0), + false); + + ASSERT(addr != nullptr); + + addr = Network::Test::findOrCheckFreePort(addr, Network::Socket::Type::Datagram); + ASSERT(addr != nullptr && addr->ip() != nullptr); + + return addr; + } + + envoy::config::listener::v3::Listener + getListener0(Network::Address::InstanceConstSharedPtr& addr) { + auto config = fmt::format(R"EOF( +name: listener_0 +address: + socket_address: + address: {} + port_value: 0 + protocol: udp +listener_filters: + name: "envoy.filters.udp.dns_filter" + typed_config: + '@type': 'type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig' + stat_prefix: "my_prefix" + client_config: + resolver_timeout: 1s + upstream_resolvers: + - socket_address: + address: {} + port_value: {} + max_pending_lookups: 256 + server_config: + inline_dns_table: + external_retry_count: 0 + known_suffixes: + - suffix: "foo1.com" + - suffix: "cluster_0" + virtual_domains: + - name: "www.foo1.com" + endpoint: + address_list: + address: + - 10.0.0.1 + - 10.0.0.2 + - 10.0.0.3 + - 10.0.0.4 + - name: "cluster.foo1.com" + endpoint: + cluster_name: "cluster_0" +)EOF", + addr->ip()->addressAsString(), addr->ip()->addressAsString(), + addr->ip()->port()); + return TestUtility::parseYaml(config); + } + + envoy::config::listener::v3::Listener + getListener1(Network::Address::InstanceConstSharedPtr& addr) { + auto config = fmt::format(R"EOF( +name: listener_1 +address: + socket_address: + address: {} + port_value: {} + protocol: udp +listener_filters: + name: "envoy.filters.udp.dns_filter" + typed_config: + '@type': 'type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig' + stat_prefix: "external_resolver" + server_config: + inline_dns_table: + external_retry_count: 0 + known_suffixes: + - suffix: "google.com" + virtual_domains: + - name: "www.google.com" + endpoint: + address_list: + address: + - 42.42.42.42 + - 2607:42:42::42:42 +)EOF", + addr->ip()->addressAsString(), addr->ip()->port()); + return TestUtility::parseYaml(config); + } + + void setup(uint32_t upstream_count) { + udp_fake_upstream_ = true; + if (upstream_count > 1) { + setDeterministic(); + setUpstreamCount(upstream_count); + config_helper_.addConfigModifier( + [upstream_count](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + for (uint32_t i = 1; i < upstream_count; i++) { + bootstrap.mutable_static_resources() + ->mutable_clusters(0) + ->mutable_load_assignment() + ->mutable_endpoints(0) + ->add_lb_endpoints() + ->mutable_endpoint() + ->MergeFrom(ConfigHelper::buildEndpoint( + Network::Test::getLoopbackAddressString(GetParam()))); + } + }); + } + + config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto addr_port = getListenerBindAddressAndPort(); + auto listener_0 = getListener0(addr_port); + auto listener_1 = getListener1(addr_port); + bootstrap.mutable_static_resources()->add_listeners()->MergeFrom(listener_0); + bootstrap.mutable_static_resources()->add_listeners()->MergeFrom(listener_1); + }); + + BaseIntegrationTest::initialize(); + } + + void requestResponseWithListenerAddress(const Network::Address::Instance& listener_address, + const std::string& data_to_send, + Network::UdpRecvData& response_datagram) { + Network::Test::UdpSyncPeer client(version_); + client.write(data_to_send, listener_address); + client.recv(response_datagram); + } + + Api::ApiPtr api_; + NiceMock histogram_; + NiceMock random_; + NiceMock mock_query_buffer_underflow_; + NiceMock mock_record_name_overflow_; + NiceMock query_parsing_failure_; + DnsParserCounters counters_; + std::unique_ptr response_parser_; + DnsQueryContextPtr query_ctx_; +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, DnsFilterIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +TEST_P(DnsFilterIntegrationTest, ExternalLookupTest) { + setup(0); + const uint32_t port = lookupPort("listener_0"); + const auto listener_address = Network::Utility::resolveUrl( + fmt::format("tcp://{}:{}", Network::Test::getLoopbackAddressUrlString(version_), port)); + + Network::UdpRecvData response; + std::string query = + Utils::buildQueryForDomain("www.google.com", DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + requestResponseWithListenerAddress(*listener_address, query, response); + + query_ctx_ = response_parser_->createQueryContext(response, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + + EXPECT_EQ(1, query_ctx_->answers_.size()); + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); +} + +TEST_P(DnsFilterIntegrationTest, ExternalLookupTestIPv6) { + setup(0); + const uint32_t port = lookupPort("listener_0"); + const auto listener_address = Network::Utility::resolveUrl( + fmt::format("tcp://{}:{}", Network::Test::getLoopbackAddressUrlString(version_), port)); + + Network::UdpRecvData response; + std::string query = + Utils::buildQueryForDomain("www.google.com", DNS_RECORD_TYPE_AAAA, DNS_RECORD_CLASS_IN); + requestResponseWithListenerAddress(*listener_address, query, response); + + query_ctx_ = response_parser_->createQueryContext(response, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + + EXPECT_EQ(1, query_ctx_->answers_.size()); + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); +} + +TEST_P(DnsFilterIntegrationTest, LocalLookupTest) { + setup(0); + const uint32_t port = lookupPort("listener_0"); + const auto listener_address = Network::Utility::resolveUrl( + fmt::format("tcp://{}:{}", Network::Test::getLoopbackAddressUrlString(version_), port)); + + Network::UdpRecvData response; + std::string query = + Utils::buildQueryForDomain("www.foo1.com", DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + requestResponseWithListenerAddress(*listener_address, query, response); + + query_ctx_ = response_parser_->createQueryContext(response, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + + EXPECT_EQ(4, query_ctx_->answers_.size()); + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); +} + +TEST_P(DnsFilterIntegrationTest, ClusterLookupTest) { + setup(2); + const uint32_t port = lookupPort("listener_0"); + const auto listener_address = Network::Utility::resolveUrl( + fmt::format("tcp://{}:{}", Network::Test::getLoopbackAddressUrlString(version_), port)); + + uint16_t record_type; + if (listener_address->ip()->ipv6()) { + record_type = DNS_RECORD_TYPE_AAAA; + } else { + record_type = DNS_RECORD_TYPE_A; + } + + Network::UdpRecvData response; + std::string query = Utils::buildQueryForDomain("cluster_0", record_type, DNS_RECORD_CLASS_IN); + requestResponseWithListenerAddress(*listener_address, query, response); + + query_ctx_ = response_parser_->createQueryContext(response, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + + EXPECT_EQ(2, query_ctx_->answers_.size()); + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); +} + +TEST_P(DnsFilterIntegrationTest, ClusterEndpointLookupTest) { + setup(2); + const uint32_t port = lookupPort("listener_0"); + const auto listener_address = Network::Utility::resolveUrl( + fmt::format("tcp://{}:{}", Network::Test::getLoopbackAddressUrlString(version_), port)); + + uint16_t record_type; + if (listener_address->ip()->ipv6()) { + record_type = DNS_RECORD_TYPE_AAAA; + } else { + record_type = DNS_RECORD_TYPE_A; + } + + Network::UdpRecvData response; + std::string query = + Utils::buildQueryForDomain("cluster.foo1.com", record_type, DNS_RECORD_CLASS_IN); + requestResponseWithListenerAddress(*listener_address, query, response); + + query_ctx_ = response_parser_->createQueryContext(response, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + + EXPECT_EQ(2, query_ctx_->answers_.size()); + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); +} + +} // namespace +} // namespace DnsFilter +} // namespace UdpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_test.cc b/test/extensions/filters/udp/dns_filter/dns_filter_test.cc index 72f349ff196aa..9da9c52ced952 100644 --- a/test/extensions/filters/udp/dns_filter/dns_filter_test.cc +++ b/test/extensions/filters/udp/dns_filter/dns_filter_test.cc @@ -1,19 +1,25 @@ -#include "envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.pb.h" -#include "envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.pb.validate.h" +#include "envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.h" +#include "envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.validate.h" #include "common/common/logger.h" -#include "extensions/filters/udp/dns_filter/dns_filter.h" - -#include "test/mocks/server/mocks.h" +#include "test/mocks/event/mocks.h" +#include "test/mocks/server/instance.h" +#include "test/mocks/server/listener_factory_context.h" #include "test/test_common/environment.h" +#include "test/test_common/simulated_time_system.h" +#include "dns_filter_test_utils.h" #include "gmock/gmock.h" #include "gtest/gtest.h" +using testing::AnyNumber; using testing::AtLeast; using testing::InSequence; +using testing::Mock; +using testing::Return; using testing::ReturnRef; +using testing::SaveArg; namespace Envoy { namespace Extensions { @@ -21,66 +27,1129 @@ namespace UdpFilters { namespace DnsFilter { namespace { -class DnsFilterTest : public testing::Test { +Api::IoCallUint64Result makeNoError(uint64_t rc) { + auto no_error = Api::ioCallUint64ResultNoError(); + no_error.rc_ = rc; + return no_error; +} + +class DnsFilterTest : public testing::Test, public Event::TestUsingSimulatedTime { public: DnsFilterTest() - : listener_address_(Network::Utility::parseInternetAddressAndPort("127.0.2.1:5353")) { - - Logger::Registry::setLogLevel(spdlog::level::info); + : listener_address_(Network::Utility::parseInternetAddressAndPort("127.0.2.1:5353")), + api_(Api::createApiForTest()), + counters_(mock_query_buffer_underflow_, mock_record_name_overflow_, + query_parsing_failure_) { + udp_response_.addresses_.local_ = listener_address_; + udp_response_.addresses_.peer_ = listener_address_; + udp_response_.buffer_ = std::make_unique(); + setupResponseParser(); EXPECT_CALL(callbacks_, udpListener()).Times(AtLeast(0)); + EXPECT_CALL(callbacks_.udp_listener_, send(_)) + .WillRepeatedly( + Invoke([this](const Network::UdpSendData& send_data) -> Api::IoCallUint64Result { + udp_response_.buffer_->move(send_data.buffer_); + return makeNoError(udp_response_.buffer_->length()); + })); + EXPECT_CALL(callbacks_.udp_listener_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher_)); } ~DnsFilterTest() override { EXPECT_CALL(callbacks_.udp_listener_, onDestroy()); } + void setupResponseParser() { + histogram_.unit_ = Stats::Histogram::Unit::Milliseconds; + response_parser_ = std::make_unique( + true /* recursive queries */, api_->timeSource(), 0 /* retries */, random_, histogram_); + } + void setup(const std::string& yaml) { - envoy::config::filter::udp::dns_filter::v2alpha::DnsFilterConfig config; + envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig config; TestUtility::loadFromYamlAndValidate(yaml, config); auto store = stats_store_.createScope("dns_scope"); - EXPECT_CALL(listener_factory_, scope()).WillOnce(ReturnRef(*store)); + ON_CALL(listener_factory_, scope()).WillByDefault(ReturnRef(*store)); + ON_CALL(listener_factory_, api()).WillByDefault(ReturnRef(*api_)); + ON_CALL(random_, random()).WillByDefault(Return(3)); + ON_CALL(listener_factory_, random()).WillByDefault(ReturnRef(random_)); + + resolver_ = std::make_shared(); + ON_CALL(dispatcher_, createDnsResolver(_, _)).WillByDefault(Return(resolver_)); config_ = std::make_shared(listener_factory_, config); filter_ = std::make_unique(callbacks_, config_); } + void sendQueryFromClient(const std::string& peer_address, const std::string& buffer) { + Network::UdpRecvData data; + data.addresses_.peer_ = Network::Utility::parseInternetAddressAndPort(peer_address); + data.addresses_.local_ = listener_address_; + data.buffer_ = std::make_unique(buffer); + data.receive_time_ = MonotonicTime(std::chrono::seconds(0)); + filter_->onData(data); + } + const Network::Address::InstanceConstSharedPtr listener_address_; - Server::Configuration::MockListenerFactoryContext listener_factory_; + Api::ApiPtr api_; DnsFilterEnvoyConfigSharedPtr config_; - - std::unique_ptr filter_; + NiceMock mock_query_buffer_underflow_; + NiceMock mock_record_name_overflow_; + NiceMock query_parsing_failure_; + DnsParserCounters counters_; + DnsQueryContextPtr query_ctx_; + NiceMock dispatcher_; Network::MockUdpReadFilterCallbacks callbacks_; + Network::UdpRecvData udp_response_; + NiceMock file_system_; + NiceMock histogram_; + NiceMock random_; + NiceMock listener_factory_; Stats::IsolatedStoreImpl stats_store_; - Runtime::RandomGeneratorImpl rng_; + std::shared_ptr resolver_; + std::unique_ptr filter_; + std::unique_ptr response_parser_; - const std::string config_yaml = R"EOF( + const std::string forward_query_off_config = R"EOF( stat_prefix: "my_prefix" server_config: inline_dns_table: external_retry_count: 3 + known_suffixes: + - suffix: foo1.com + - suffix: foo2.com + - suffix: foo3.com + - suffix: foo16.com + - suffix: thisismydomainforafivehundredandtwelvebytetest.com + virtual_domains: + - name: "www.foo1.com" + endpoint: + address_list: + address: + - "10.0.0.1" + - "10.0.0.2" + - name: "www.foo2.com" + endpoint: + address_list: + address: + - "2001:8a:c1::2800:7" + - "2001:8a:c1::2800:8" + - "2001:8a:c1::2800:9" + - name: "www.foo3.com" + endpoint: + address_list: + address: + - "10.0.3.1" + - name: "www.foo16.com" + endpoint: + address_list: + address: + - "10.0.16.1" + - "10.0.16.2" + - "10.0.16.3" + - "10.0.16.4" + - "10.0.16.5" + - "10.0.16.6" + - "10.0.16.7" + - "10.0.16.8" + - "10.0.16.9" + - "10.0.16.10" + - "10.0.16.11" + - "10.0.16.12" + - "10.0.16.13" + - "10.0.16.14" + - "10.0.16.15" + - "10.0.16.16" + - name: www.supercalifragilisticexpialidocious.thisismydomainforafivehundredandtwelvebytetest.com + endpoint: + address_list: + address: + - "2001:8a:c1::2801:0001" + - "2001:8a:c1::2801:0002" + - "2001:8a:c1::2801:0003" + - "2001:8a:c1::2801:0004" + - "2001:8a:c1::2801:0005" + - "2001:8a:c1::2801:0006" + - "2001:8a:c1::2801:0007" + - "2001:8a:c1::2801:0008" +)EOF"; + + const std::string forward_query_on_config = R"EOF( +stat_prefix: "my_prefix" +client_config: + resolver_timeout: 1s + upstream_resolvers: + - socket_address: + address: "1.1.1.1" + port_value: 53 + - socket_address: + address: "8.8.8.8" + port_value: 53 + - socket_address: + address: "8.8.4.4" + port_value: 53 + max_pending_lookups: 1 +server_config: + inline_dns_table: + external_retry_count: 0 + known_suffixes: + - suffix: foo1.com + - suffix: foo2.com virtual_domains: - name: "www.foo1.com" endpoint: address_list: address: - - 10.0.0.1 - - 10.0.0.2 - - name: "www.foo2.com" - endpoint: - address_list: - address: - - 2001:8a:c1::2800:7 - - name: "www.foo3.com" - endpoint: - address_list: - address: - - 10.0.3.1 - )EOF"; + - "10.0.0.1" +)EOF"; + + const std::string external_dns_table_config = R"EOF( +stat_prefix: "my_prefix" +client_config: + resolver_timeout: 1s + upstream_resolvers: + - socket_address: + address: "1.1.1.1" + port_value: 53 + max_pending_lookups: 256 +server_config: + external_dns_table: + filename: {} +)EOF"; + + const std::string external_dns_table_json = R"EOF( +{ + "external_retry_count": 3, + "known_suffixes": [ { "suffix": "com" } ], + "virtual_domains": [ + { + "name": "www.external_foo1.com", + "endpoint": { "address_list": { "address": [ "10.0.0.1", "10.0.0.2" ] } } + }, + { + "name": "www.external_foo2.com", + "endpoint": { "address_list": { "address": [ "2001:8a:c1::2800:7" ] } } + }, + { + "name": "www.external_foo3.com", + "endpoint": { "address_list": { "address": [ "10.0.3.1" ] } } + } + ] +} +)EOF"; + + const std::string external_dns_table_yaml = R"EOF( +external_retry_count: 3 +known_suffixes: + - suffix: "com" +virtual_domains: + - name: "www.external_foo1.com" + endpoint: + address_list: + address: + - "10.0.0.1" + - "10.0.0.2" + - name: "www.external_foo2.com" + endpoint: + address_list: + address: + - "2001:8a:c1::2800:7" + - name: "www.external_foo3.com" + endpoint: + address_list: + address: + - "10.0.3.1" +)EOF"; }; -TEST_F(DnsFilterTest, TestConfig) { +TEST_F(DnsFilterTest, InvalidQuery) { + InSequence s; + + setup(forward_query_off_config); + sendQueryFromClient("10.0.0.1:1000", "hello"); + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_FALSE(query_ctx_->parse_status_); + + EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(0, query_ctx_->answers_.size()); + + // Validate stats + EXPECT_EQ(0, config_->stats().a_record_queries_.value()); + EXPECT_EQ(1, config_->stats().downstream_rx_invalid_queries_.value()); + EXPECT_TRUE(config_->stats().downstream_rx_bytes_.used()); + EXPECT_TRUE(config_->stats().downstream_tx_bytes_.used()); + + EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(0, query_ctx_->answers_.size()); +} + +TEST_F(DnsFilterTest, MaxQueryAndResponseSizeTest) { + InSequence s; + + setup(forward_query_off_config); + std::string domain( + "www.supercalifragilisticexpialidocious.thisismydomainforafivehundredandtwelvebytetest.com"); + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_AAAA, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query.empty()); + + sendQueryFromClient("10.0.0.1:1000", query); + EXPECT_LT(udp_response_.buffer_->length(), Utils::MAX_UDP_DNS_SIZE); + + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); + // There are 8 addresses, however, since the domain is part of the answer record, each + // serialized answer is over 100 bytes in size, there is room for 3 before the next + // serialized answer puts the buffer over the 512 byte limit. The query itself is also + // around 100 bytes. + EXPECT_EQ(3, query_ctx_->answers_.size()); + + // Validate stats + EXPECT_EQ(1, config_->stats().aaaa_record_queries_.value()); + + // Although there are only 3 answers returned, the filter did find 8 records for the query + EXPECT_EQ(8, config_->stats().local_aaaa_record_answers_.value()); + EXPECT_EQ(0, config_->stats().downstream_rx_invalid_queries_.value()); + EXPECT_TRUE(config_->stats().downstream_rx_bytes_.used()); + EXPECT_TRUE(config_->stats().downstream_tx_bytes_.used()); +} + +TEST_F(DnsFilterTest, InvalidQueryNameTooLongTest) { + InSequence s; + + setup(forward_query_off_config); + std::string domain = "www." + std::string(256, 'a') + ".com"; + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query.empty()); + + sendQueryFromClient("10.0.0.1:1000", query); + + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_FALSE(query_ctx_->parse_status_); + + EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(0, query_ctx_->answers_.size()); + + // Validate stats + EXPECT_EQ(0, config_->stats().a_record_queries_.value()); + EXPECT_EQ(1, config_->stats().downstream_rx_invalid_queries_.value()); + EXPECT_TRUE(config_->stats().downstream_rx_bytes_.used()); + EXPECT_TRUE(config_->stats().downstream_tx_bytes_.used()); + + EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(0, query_ctx_->answers_.size()); +} + +TEST_F(DnsFilterTest, InvalidLabelNameTooLongTest) { + InSequence s; + + setup(forward_query_off_config); + std::string domain(64, 'a'); + domain += ".com"; + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query.empty()); + + sendQueryFromClient("10.0.0.1:1000", query); + + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_FALSE(query_ctx_->parse_status_); + + EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(0, query_ctx_->answers_.size()); + + // Validate stats + EXPECT_EQ(0, config_->stats().a_record_queries_.value()); + EXPECT_EQ(1, config_->stats().downstream_rx_invalid_queries_.value()); + EXPECT_TRUE(config_->stats().downstream_rx_bytes_.used()); + EXPECT_TRUE(config_->stats().downstream_tx_bytes_.used()); + + EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(0, query_ctx_->answers_.size()); +} + +TEST_F(DnsFilterTest, SingleTypeAQuery) { + InSequence s; + + setup(forward_query_off_config); + + const std::string domain("www.foo3.com"); + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query.empty()); + + sendQueryFromClient("10.0.0.1:1000", query); + + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(1, query_ctx_->answers_.size()); + + // Verify that we have an answer record for the queried domain + + const DnsAnswerRecordPtr& answer = query_ctx_->answers_.find(domain)->second; + + // Verify the address returned + const std::list expected{"10.0.3.1"}; + + Utils::verifyAddress(expected, answer); + + // Validate stats + EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value()); + EXPECT_EQ(1, config_->stats().known_domain_queries_.value()); + EXPECT_EQ(1, config_->stats().local_a_record_answers_.value()); + EXPECT_EQ(1, config_->stats().a_record_queries_.value()); + EXPECT_TRUE(config_->stats().downstream_rx_bytes_.used()); + EXPECT_TRUE(config_->stats().downstream_tx_bytes_.used()); +} + +TEST_F(DnsFilterTest, RepeatedTypeAQuerySuccess) { + InSequence s; + + setup(forward_query_off_config); + constexpr size_t loopCount = 5; + const std::string domain("www.foo3.com"); + size_t total_query_bytes = 0; + + for (size_t i = 0; i < loopCount; i++) { + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + total_query_bytes += query.size(); + ASSERT_FALSE(query.empty()); + sendQueryFromClient("10.0.0.1:1000", query); + + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(1, query_ctx_->answers_.size()); + + // Verify that we have an answer record for the queried domain + const DnsAnswerRecordPtr& answer = query_ctx_->answers_.find(domain)->second; + + // Verify the address returned + std::list expected{"10.0.3.1"}; + Utils::verifyAddress(expected, answer); + } + + // Validate stats + EXPECT_EQ(loopCount, config_->stats().downstream_rx_queries_.value()); + EXPECT_EQ(loopCount, config_->stats().known_domain_queries_.value()); + EXPECT_EQ(loopCount, config_->stats().local_a_record_answers_.value()); + EXPECT_EQ(loopCount, config_->stats().a_record_queries_.value()); +} + +TEST_F(DnsFilterTest, LocalTypeAQueryFail) { + InSequence s; + + setup(forward_query_off_config); + const std::string query = + Utils::buildQueryForDomain("www.foo2.com", DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query.empty()); + + sendQueryFromClient("10.0.0.1:1000", query); + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + + EXPECT_EQ(DNS_RESPONSE_CODE_NAME_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(0, query_ctx_->answers_.size()); + + // Validate stats + EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value()); + EXPECT_EQ(1, config_->stats().known_domain_queries_.value()); + EXPECT_EQ(3, config_->stats().local_a_record_answers_.value()); + EXPECT_EQ(1, config_->stats().a_record_queries_.value()); + EXPECT_EQ(1, config_->stats().unanswered_queries_.value()); +} + +TEST_F(DnsFilterTest, LocalTypeAAAAQuerySuccess) { + InSequence s; + + setup(forward_query_off_config); + std::list expected{"2001:8a:c1::2800:7", "2001:8a:c1::2800:8", "2001:8a:c1::2800:9"}; + const std::string domain("www.foo2.com"); + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_AAAA, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query.empty()); + + sendQueryFromClient("10.0.0.1:1000", query); + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(expected.size(), query_ctx_->answers_.size()); + + // Verify the address returned + for (const auto& answer : query_ctx_->answers_) { + EXPECT_EQ(answer.first, domain); + Utils::verifyAddress(expected, answer.second); + } + + // Validate stats + EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value()); + EXPECT_EQ(1, config_->stats().known_domain_queries_.value()); + EXPECT_EQ(3, config_->stats().local_aaaa_record_answers_.value()); + EXPECT_EQ(1, config_->stats().aaaa_record_queries_.value()); +} + +TEST_F(DnsFilterTest, ExternalResolutionReturnSingleAddress) { + InSequence s; + + auto timeout_timer = new NiceMock(&dispatcher_); + EXPECT_CALL(*timeout_timer, enableTimer(_, _)).Times(1); + + const std::string expected_address("130.207.244.251"); + const std::string domain("www.foobaz.com"); + setup(forward_query_on_config); + + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query.empty()); + + // Verify that we are calling the resolver with the expected name + Network::DnsResolver::ResolveCb resolve_cb; + EXPECT_CALL(*resolver_, resolve(domain, _, _)) + .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); + + // Send a query to for a name not in our configuration + sendQueryFromClient("10.0.0.1:1000", query); + + EXPECT_CALL(*timeout_timer, disableTimer()).Times(AnyNumber()); + + // Execute resolve callback + resolve_cb(Network::DnsResolver::ResolutionStatus::Success, + TestUtility::makeDnsResponse({expected_address})); + + // parse the result + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(1, query_ctx_->answers_.size()); + + std::list expected{expected_address}; + for (const auto& answer : query_ctx_->answers_) { + EXPECT_EQ(answer.first, domain); + Utils::verifyAddress(expected, answer.second); + } + + // Validate stats + EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value()); + EXPECT_EQ(1, config_->stats().external_a_record_queries_.value()); + EXPECT_EQ(1, config_->stats().external_a_record_answers_.value()); + EXPECT_EQ(1, config_->stats().a_record_queries_.value()); + EXPECT_EQ(0, config_->stats().aaaa_record_queries_.value()); + EXPECT_EQ(0, config_->stats().unanswered_queries_.value()); + + EXPECT_TRUE(Mock::VerifyAndClearExpectations(resolver_.get())); +} + +TEST_F(DnsFilterTest, ExternalResolutionIpv6SingleAddress) { + InSequence s; + + auto timeout_timer = new NiceMock(&dispatcher_); + EXPECT_CALL(*timeout_timer, enableTimer(_, _)).Times(1); + + const std::string expected_address("2a04:4e42:d::323"); + const std::string domain("www.foobaz.com"); + + setup(forward_query_on_config); + + // Verify that we are calling the resolver with the expected name + Network::DnsResolver::ResolveCb resolve_cb; + EXPECT_CALL(*resolver_, resolve(domain, _, _)) + .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); + + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_AAAA, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query.empty()); + + // Send a query to for a name not in our configuration + sendQueryFromClient("10.0.0.1:1000", query); + + EXPECT_CALL(*timeout_timer, disableTimer()).Times(1); + + // Execute resolve callback + resolve_cb(Network::DnsResolver::ResolutionStatus::Success, + TestUtility::makeDnsResponse({expected_address})); + + // parse the result + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(1, query_ctx_->answers_.size()); + + std::list expected{expected_address}; + for (const auto& answer : query_ctx_->answers_) { + EXPECT_EQ(answer.first, domain); + Utils::verifyAddress(expected, answer.second); + } + + // Validate stats + EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value()); + EXPECT_EQ(1, config_->stats().external_aaaa_record_queries_.value()); + EXPECT_EQ(1, config_->stats().external_aaaa_record_answers_.value()); + EXPECT_EQ(1, config_->stats().aaaa_record_queries_.value()); + EXPECT_EQ(0, config_->stats().a_record_queries_.value()); + EXPECT_EQ(0, config_->stats().unanswered_queries_.value()); + + EXPECT_TRUE(Mock::VerifyAndClearExpectations(resolver_.get())); +} + +TEST_F(DnsFilterTest, ExternalResolutionReturnMultipleAddresses) { + InSequence s; + + auto timeout_timer = new NiceMock(&dispatcher_); + EXPECT_CALL(*timeout_timer, enableTimer(_, _)).Times(1); + + const std::list expected_address{"130.207.244.251", "130.207.244.252", + "130.207.244.253", "130.207.244.254"}; + const std::string domain("www.foobaz.com"); + setup(forward_query_on_config); + + // Verify that we are calling the resolver with the expected name + Network::DnsResolver::ResolveCb resolve_cb; + EXPECT_CALL(*resolver_, resolve(domain, _, _)) + .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); + + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query.empty()); + + // Send a query to for a name not in our configuration + sendQueryFromClient("10.0.0.1:1000", query); + + EXPECT_CALL(*timeout_timer, disableTimer()).Times(1); + + // Execute resolve callback + resolve_cb(Network::DnsResolver::ResolutionStatus::Success, + TestUtility::makeDnsResponse({expected_address})); + + // parse the result + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(expected_address.size(), query_ctx_->answers_.size()); + + EXPECT_LT(udp_response_.buffer_->length(), Utils::MAX_UDP_DNS_SIZE); + + for (const auto& answer : query_ctx_->answers_) { + EXPECT_EQ(answer.first, domain); + Utils::verifyAddress(expected_address, answer.second); + } + + // Validate stats + EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value()); + EXPECT_EQ(1, config_->stats().external_a_record_queries_.value()); + EXPECT_EQ(expected_address.size(), config_->stats().external_a_record_answers_.value()); + EXPECT_EQ(1, config_->stats().a_record_queries_.value()); + EXPECT_EQ(0, config_->stats().aaaa_record_queries_.value()); + EXPECT_EQ(0, config_->stats().unanswered_queries_.value()); + + EXPECT_TRUE(Mock::VerifyAndClearExpectations(resolver_.get())); +} + +TEST_F(DnsFilterTest, ExternalResolutionReturnNoAddresses) { + InSequence s; + + auto timeout_timer = new NiceMock(&dispatcher_); + EXPECT_CALL(*timeout_timer, enableTimer(_, _)).Times(1); + + const std::string domain("www.foobaz.com"); + setup(forward_query_on_config); + + // Verify that we are calling the resolver with the expected name + Network::DnsResolver::ResolveCb resolve_cb; + EXPECT_CALL(*resolver_, resolve(domain, _, _)) + .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); + + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query.empty()); + + // Send a query to for a name not in our configuration + sendQueryFromClient("10.0.0.1:1000", query); + + EXPECT_CALL(*timeout_timer, disableTimer()).Times(1); + + // Execute resolve callback + resolve_cb(Network::DnsResolver::ResolutionStatus::Success, TestUtility::makeDnsResponse({})); + + // parse the result + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + EXPECT_EQ(DNS_RESPONSE_CODE_NAME_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(0, query_ctx_->answers_.size()); + + // Validate stats + EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value()); + EXPECT_EQ(1, config_->stats().external_a_record_queries_.value()); + EXPECT_EQ(0, config_->stats().external_a_record_answers_.value()); + EXPECT_EQ(1, config_->stats().a_record_queries_.value()); + EXPECT_EQ(0, config_->stats().aaaa_record_queries_.value()); + EXPECT_EQ(1, config_->stats().unanswered_queries_.value()); + + EXPECT_TRUE(Mock::VerifyAndClearExpectations(resolver_.get())); +} + +TEST_F(DnsFilterTest, ExternalResolutionTimeout) { + InSequence s; + + auto timeout_timer = new NiceMock(&dispatcher_); + EXPECT_CALL(*timeout_timer, enableTimer(_, _)).Times(1); + + const std::string domain("www.foobaz.com"); + setup(forward_query_on_config); + + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query.empty()); + + EXPECT_CALL(*resolver_, resolve(domain, _, _)).WillOnce(Return(&resolver_->active_query_)); + + // Send a query to for a name not in our configuration + sendQueryFromClient("10.0.0.1:1000", query); + simTime().advanceTimeWait(std::chrono::milliseconds(1500)); + + // Execute timeout timer callback + timeout_timer->invokeCallback(); + + // parse the result + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + EXPECT_EQ(DNS_RESPONSE_CODE_NAME_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(0, query_ctx_->answers_.size()); + + // Validate stats + EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value()); + EXPECT_EQ(1, config_->stats().external_a_record_queries_.value()); + EXPECT_EQ(0, config_->stats().external_a_record_answers_.value()); + EXPECT_EQ(1, config_->stats().a_record_queries_.value()); + EXPECT_EQ(0, config_->stats().aaaa_record_queries_.value()); + EXPECT_EQ(1, config_->stats().unanswered_queries_.value()); + + EXPECT_TRUE(Mock::VerifyAndClearExpectations(resolver_.get())); +} + +TEST_F(DnsFilterTest, ExternalResolutionTimeout2) { + InSequence s; + + auto timeout_timer = new NiceMock(&dispatcher_); + EXPECT_CALL(*timeout_timer, enableTimer(_, _)).Times(1); + + const std::string domain("www.foobaz.com"); + setup(forward_query_on_config); + + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query.empty()); + + // Verify that we are calling the resolver with the expected name + Network::DnsResolver::ResolveCb resolve_cb; + EXPECT_CALL(*resolver_, resolve(domain, _, _)) + .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); + + // Send a query to for a name not in our configuration + sendQueryFromClient("10.0.0.1:1000", query); + simTime().advanceTimeWait(std::chrono::milliseconds(1500)); + + // Execute timeout timer callback + timeout_timer->invokeCallback(); + + // Execute resolve callback. This should harmlessly return and not alter + // the response received by the client. Even though we are returning a successful + // response, the client does not get an answer + resolve_cb(Network::DnsResolver::ResolutionStatus::Success, + TestUtility::makeDnsResponse({"130.207.244.251"})); + + // parse the result + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + EXPECT_EQ(DNS_RESPONSE_CODE_NAME_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(0, query_ctx_->answers_.size()); + + // Validate stats + EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value()); + EXPECT_EQ(1, config_->stats().external_a_record_queries_.value()); + EXPECT_EQ(0, config_->stats().external_a_record_answers_.value()); + EXPECT_EQ(1, config_->stats().a_record_queries_.value()); + EXPECT_EQ(0, config_->stats().aaaa_record_queries_.value()); + EXPECT_EQ(1, config_->stats().unanswered_queries_.value()); + + EXPECT_TRUE(Mock::VerifyAndClearExpectations(resolver_.get())); +} + +TEST_F(DnsFilterTest, ExternalResolutionExceedMaxPendingLookups) { + InSequence s; + + const std::string domain("www.foobaz.com"); + setup(forward_query_on_config); + const std::string query1 = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query1.empty()); + + const std::string query2 = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_AAAA, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query2.empty()); + + const std::string query3 = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query3.empty()); + + // Send the first query. This will remain 'in-flight' + EXPECT_CALL(dispatcher_, createTimer_(_)); + EXPECT_CALL(*resolver_, resolve(domain, _, _)); + sendQueryFromClient("10.0.0.1:1000", query1); + + // Send the second query. This will remain 'in-flight' also + EXPECT_CALL(dispatcher_, createTimer_(_)); + EXPECT_CALL(*resolver_, resolve(domain, _, _)); + sendQueryFromClient("10.0.0.1:1000", query2); + + // The third query should be rejected since pending queries (2) > 1, and + // we've disabled retries. The client will get a response for this single + // query + sendQueryFromClient("10.0.0.1:1000", query3); + + // Parse the result for the third query. Since the first two queries are + // still in flight, the third query is the only one to generate a response + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + EXPECT_EQ(0, query_ctx_->answers_.size()); + EXPECT_EQ(DNS_RESPONSE_CODE_NAME_ERROR, response_parser_->getQueryResponseCode()); + + // Validate stats + EXPECT_EQ(3, config_->stats().downstream_rx_queries_.value()); + EXPECT_EQ(1, config_->stats().external_a_record_queries_.value()); + EXPECT_EQ(0, config_->stats().external_a_record_answers_.value()); + EXPECT_EQ(2, config_->stats().a_record_queries_.value()); + EXPECT_EQ(1, config_->stats().aaaa_record_queries_.value()); + EXPECT_EQ(1, config_->stats().unanswered_queries_.value()); +} + +TEST_F(DnsFilterTest, ConsumeExternalJsonTableTest) { + InSequence s; + + std::string temp_path = + TestEnvironment::writeStringToFileForTest("dns_table.json", external_dns_table_json); + std::string config_to_use = fmt::format(external_dns_table_config, temp_path); + setup(config_to_use); + + const std::string domain("www.external_foo1.com"); + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + + ASSERT_FALSE(query.empty()); + sendQueryFromClient("10.0.0.1:1000", query); + + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(2, query_ctx_->answers_.size()); + + // Verify the address returned + const std::list expected{"10.0.0.1", "10.0.0.2"}; + for (const auto& answer : query_ctx_->answers_) { + EXPECT_EQ(answer.first, domain); + Utils::verifyAddress(expected, answer.second); + } + + // Validate stats + ASSERT_EQ(1, config_->stats().downstream_rx_queries_.value()); + ASSERT_EQ(1, config_->stats().known_domain_queries_.value()); + ASSERT_EQ(2, config_->stats().local_a_record_answers_.value()); + ASSERT_EQ(1, config_->stats().a_record_queries_.value()); +} + +TEST_F(DnsFilterTest, ConsumeExternalYamlTableTest) { + InSequence s; + + std::string temp_path = + TestEnvironment::writeStringToFileForTest("dns_table.yaml", external_dns_table_yaml); + std::string config_to_use = fmt::format(external_dns_table_config, temp_path); + setup(config_to_use); + + const std::string domain("www.external_foo1.com"); + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + + ASSERT_FALSE(query.empty()); + sendQueryFromClient("10.0.0.1:1000", query); + + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(2, query_ctx_->answers_.size()); + + // Verify the address returned + const std::list expected{"10.0.0.1", "10.0.0.2"}; + for (const auto& answer : query_ctx_->answers_) { + EXPECT_EQ(answer.first, domain); + Utils::verifyAddress(expected, answer.second); + } + + // Validate stats + EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value()); + EXPECT_EQ(1, config_->stats().known_domain_queries_.value()); + EXPECT_EQ(2, config_->stats().local_a_record_answers_.value()); + EXPECT_EQ(1, config_->stats().a_record_queries_.value()); +} + +TEST_F(DnsFilterTest, RawBufferTest) { + InSequence s; + + setup(forward_query_off_config); + const std::string domain("www.foo3.com"); + + constexpr char dns_request[] = { + 0x36, 0x6b, // Transaction ID + 0x01, 0x20, // Flags + 0x00, 0x01, // Questions + 0x00, 0x00, // Answers + 0x00, 0x00, // Authority RRs + 0x00, 0x00, // Additional RRs + 0x03, 0x77, 0x77, 0x77, 0x04, 0x66, 0x6f, // Query record for + 0x6f, 0x33, 0x03, 0x63, 0x6f, 0x6d, 0x00, // www.foo3.com + 0x00, 0x01, // Query Type - A + 0x00, 0x01, // Query Class - IN + }; + + constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]); + const std::string query = Utils::buildQueryFromBytes(dns_request, count); + + sendQueryFromClient("10.0.0.1:1000", query); + + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(1, query_ctx_->answers_.size()); + + // Verify that we have an answer record for the queried domain + const DnsAnswerRecordPtr& answer = query_ctx_->answers_.find(domain)->second; + + // Verify the address returned + const std::list expected{"10.0.3.1"}; + Utils::verifyAddress(expected, answer); +} + +TEST_F(DnsFilterTest, InvalidQueryNameTest) { + InSequence s; + + setup(forward_query_off_config); + + // In this buffer the name segment sizes are incorrect. The filter will indicate that the parsing + // failed + constexpr char dns_request[] = { + 0x36, 0x6c, // Transaction ID + 0x01, 0x20, // Flags + 0x00, 0x01, // Questions + 0x00, 0x00, // Answers + 0x00, 0x00, // Authority RRs + 0x00, 0x00, // Additional RRs + 0x02, 0x77, 0x77, 0x77, 0x03, 0x66, 0x6f, // Query record for + 0x6f, 0x33, 0x01, 0x63, 0x6f, 0x6d, 0x00, // www.foo3.com + 0x00, 0x01, // Query Type - A + 0x00, 0x01, // Query Class - IN + }; + + constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]); + const std::string query = Utils::buildQueryFromBytes(dns_request, count); + + sendQueryFromClient("10.0.0.1:1000", query); + + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_FALSE(query_ctx_->parse_status_); + EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); + + // TODO(abaptiste): underflow stats + EXPECT_EQ(1, config_->stats().downstream_rx_invalid_queries_.value()); +} + +TEST_F(DnsFilterTest, InvalidQueryNameTest2) { + InSequence s; + + setup(forward_query_off_config); + // In this buffer the name segment sizes are incorrect. The first segment points + // past the end of the buffer. The filter will indicate that the parsing failed. + constexpr char dns_request[] = { + 0x36, 0x6c, // Transaction ID + 0x01, 0x20, // Flags + 0x00, 0x01, // Questions + 0x00, 0x00, // Answers + 0x00, 0x00, // Authority RRs + 0x00, 0x00, // Additional RRs + 0x4c, 0x77, 0x77, 0x77, 0x03, 0x66, 0x6f, // Query record for + 0x6f, 0x33, 0x01, 0x63, 0x6f, 0x6d, 0x00, // www.foo3.com + 0x00, 0x01, // Query Type - A + 0x00, 0x01, // Query Class - IN + }; + + constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]); + const std::string query = Utils::buildQueryFromBytes(dns_request, count); + + sendQueryFromClient("10.0.0.1:1000", query); + + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_FALSE(query_ctx_->parse_status_); + EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); + + // TODO(abaptiste): underflow/overflow stats + EXPECT_EQ(1, config_->stats().downstream_rx_invalid_queries_.value()); +} + +TEST_F(DnsFilterTest, MultipleQueryCountTest) { InSequence s; - setup(config_yaml); + setup(forward_query_off_config); + // In this buffer we have 2 queries for two different domains. This is a rare case + // and serves to validate that we handle the protocol correctly. We will return an + // error to the client since most implementations will send the two questions as two + // separate DNS queries + constexpr char dns_request[] = { + 0x36, 0x6d, // Transaction ID + 0x01, 0x20, // Flags + 0x00, 0x02, // Questions + 0x00, 0x00, // Answers + 0x00, 0x00, // Authority RRs + 0x00, 0x00, // Additional RRs + 0x03, 0x77, 0x77, 0x77, 0x04, 0x66, 0x6f, // begin query record for + 0x6f, 0x33, 0x03, 0x63, 0x6f, 0x6d, 0x00, // www.foo3.com + 0x00, 0x01, // Query Type - A + 0x00, 0x01, // Query Class - IN + 0x03, 0x77, 0x77, 0x77, 0x04, 0x66, 0x6f, // Query record for + 0x6f, 0x31, 0x03, 0x63, 0x6f, 0x6d, 0x00, // www.foo1.com + 0x00, 0x01, // Query Type - A + 0x00, 0x01, // Query Class - IN + }; + + constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]); + const std::string query = Utils::buildQueryFromBytes(dns_request, count); + + sendQueryFromClient("10.0.0.1:1000", query); + + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_FALSE(query_ctx_->parse_status_); + EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); + + EXPECT_EQ(1, config_->stats().downstream_rx_invalid_queries_.value()); + EXPECT_EQ(0, config_->stats().a_record_queries_.value()); + EXPECT_EQ(0, query_ctx_->answers_.size()); +} + +TEST_F(DnsFilterTest, InvalidQueryCountTest) { + InSequence s; + + setup(forward_query_off_config); + // In this buffer the Questions count is zero. This is an invalid query and is handled as such. + constexpr char dns_request[] = { + 0x36, 0x6f, // Transaction ID + 0x01, 0x20, // Flags + 0x00, 0x00, // Questions + 0x00, 0x00, // Answers + 0x00, 0x00, // Authority RRs + 0x00, 0x00, // Additional RRs + 0x03, 0x77, 0x77, 0x77, 0x04, 0x66, 0x6f, // Query record for + 0x6f, 0x33, 0x03, 0x63, 0x6f, 0x6d, 0x00, // www.foo3.com + 0x00, 0x01, // Query Type - A + 0x00, 0x01, // Query Class - IN + }; + + constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]); + const std::string query = Utils::buildQueryFromBytes(dns_request, count); + + sendQueryFromClient("10.0.0.1:1000", query); + + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_FALSE(query_ctx_->parse_status_); + EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); + + EXPECT_EQ(0, config_->stats().a_record_queries_.value()); + EXPECT_EQ(1, config_->stats().downstream_rx_invalid_queries_.value()); + EXPECT_EQ(0, query_ctx_->answers_.size()); +} + +TEST_F(DnsFilterTest, NotImplementedQueryTest) { + InSequence s; + + setup(forward_query_off_config); + // This buffer requests a CNAME record which we do not support. We respond to the client with a + // "not implemented" response code + constexpr char dns_request[] = { + 0x36, 0x70, // Transaction ID + 0x01, 0x20, // Flags + 0x00, 0x01, // Questions + 0x00, 0x00, // Answers + 0x00, 0x00, // Authority RRs + 0x00, 0x00, // Additional RRs + 0x03, 0x77, 0x77, 0x77, 0x04, 0x66, 0x6f, // Query record for + 0x6f, 0x33, 0x03, 0x63, 0x6f, 0x6d, 0x00, // www.foo3.com + 0x00, 0x05, // Query Type - CNAME + 0x00, 0x01, // Query Class - IN + }; + + constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]); + const std::string query = Utils::buildQueryFromBytes(dns_request, count); + + sendQueryFromClient("10.0.0.1:1000", query); + + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + EXPECT_EQ(DNS_RESPONSE_CODE_NOT_IMPLEMENTED, response_parser_->getQueryResponseCode()); + + EXPECT_EQ(0, config_->stats().a_record_queries_.value()); + EXPECT_EQ(0, config_->stats().downstream_rx_invalid_queries_.value()); +} + +TEST_F(DnsFilterTest, InvalidShortBufferTest) { + InSequence s; + + setup(forward_query_off_config); + // This is an invalid query. Envoy should handle the packet and indicate a parsing failure + constexpr char dns_request[] = {0x1c}; + const std::string query = Utils::buildQueryFromBytes(dns_request, 1); + sendQueryFromClient("10.0.0.1:1000", query); + + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_FALSE(query_ctx_->parse_status_); + EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); + + EXPECT_EQ(0, config_->stats().a_record_queries_.value()); + EXPECT_EQ(1, config_->stats().downstream_rx_invalid_queries_.value()); +} + +TEST_F(DnsFilterTest, RandomizeFirstAnswerTest) { + InSequence s; + + setup(forward_query_off_config); + const std::string domain("www.foo16.com"); + + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query.empty()); + sendQueryFromClient("10.0.0.1:1000", query); + + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); + + // Although 16 addresses are defined, only 8 are returned + EXPECT_EQ(8, query_ctx_->answers_.size()); + + // We shuffle the list of addresses when we read the config, and in the case of more than + // 8 defined addresses, we randomize the initial starting index. We should not end up with + // the first answer being the first defined address, or the answers appearing in the same + // order as they are defined. + const std::list defined_order{"10.0.16.1", "10.0.16.2", "10.0.16.3", "10.0.16.4", + "10.0.16.5", "10.0.16.6", "10.0.16.7", "10.0.16.8"}; + auto defined_answer_iter = defined_order.begin(); + for (const auto& answer : query_ctx_->answers_) { + const auto resolved_address = answer.second->ip_addr_->ip()->addressAsString(); + EXPECT_NE(0L, resolved_address.compare(*defined_answer_iter++)); + } } } // namespace diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_test_utils.cc b/test/extensions/filters/udp/dns_filter/dns_filter_test_utils.cc new file mode 100644 index 0000000000000..364ee8b3094c7 --- /dev/null +++ b/test/extensions/filters/udp/dns_filter/dns_filter_test_utils.cc @@ -0,0 +1,90 @@ +#include "dns_filter_test_utils.h" + +#include "common/common/random_generator.h" + +#include "test/test_common/utility.h" + +namespace Envoy { +namespace Extensions { +namespace UdpFilters { +namespace DnsFilter { +namespace Utils { + +std::string buildQueryFromBytes(const char* bytes, const size_t count) { + std::string query; + for (size_t i = 0; i < count; i++) { + query.append(static_cast(&bytes[i]), 1); + } + return query; +} + +std::string buildQueryForDomain(const std::string& name, uint16_t rec_type, uint16_t rec_class) { + Random::RandomGeneratorImpl random_; + struct DnsMessageParser::DnsHeader query {}; + uint16_t id = random_.random() & 0xFFFF; + + // Generate a random query ID + query.id = id; + + // Signify that this is a query + query.flags.qr = 0; + + // This should usually be zero + query.flags.opcode = 0; + + query.flags.aa = 0; + query.flags.tc = 0; + + // Set Recursion flags (at least one bit set so that the flags are not all zero) + query.flags.rd = 1; + query.flags.ra = 0; + + // reserved flag is not set + query.flags.z = 0; + + // Set the authenticated flags to zero + query.flags.ad = 0; + query.flags.cd = 0; + + query.questions = 1; + query.answers = 0; + query.authority_rrs = 0; + query.additional_rrs = 0; + + Buffer::OwnedImpl buffer; + buffer.writeBEInt(query.id); + + uint16_t flags; + ::memcpy(&flags, static_cast(&query.flags), sizeof(uint16_t)); + buffer.writeBEInt(flags); + + buffer.writeBEInt(query.questions); + buffer.writeBEInt(query.answers); + buffer.writeBEInt(query.authority_rrs); + buffer.writeBEInt(query.additional_rrs); + + DnsQueryRecord query_rec(name, rec_type, rec_class); + query_rec.serialize(buffer); + return buffer.toString(); +} + +void verifyAddress(const std::list& addresses, const DnsAnswerRecordPtr& answer) { + ASSERT_TRUE(answer != nullptr); + ASSERT_TRUE(answer->ip_addr_ != nullptr); + + const auto resolved_address = answer->ip_addr_->ip()->addressAsString(); + if (addresses.size() == 1) { + const auto expected = addresses.begin(); + ASSERT_EQ(*expected, resolved_address); + return; + } + + const auto iter = std::find(addresses.begin(), addresses.end(), resolved_address); + ASSERT_TRUE(iter != addresses.end()); +} + +} // namespace Utils +} // namespace DnsFilter +} // namespace UdpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_test_utils.h b/test/extensions/filters/udp/dns_filter/dns_filter_test_utils.h new file mode 100644 index 0000000000000..f3bced0ff2629 --- /dev/null +++ b/test/extensions/filters/udp/dns_filter/dns_filter_test_utils.h @@ -0,0 +1,22 @@ +#pragma once + +#include "extensions/filters/udp/dns_filter/dns_filter.h" + +namespace Envoy { +namespace Extensions { +namespace UdpFilters { +namespace DnsFilter { +namespace Utils { + +static constexpr uint64_t MAX_UDP_DNS_SIZE{512}; + +std::string buildQueryFromBytes(const char* bytes, const size_t count); +std::string buildQueryForDomain(const std::string& name, uint16_t rec_type, uint16_t rec_class); +void verifyAddress(const std::list& addresses, const DnsAnswerRecordPtr& answer); +size_t getResponseQueryCount(DnsMessageParser& parser); + +} // namespace Utils +} // namespace DnsFilter +} // namespace UdpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/udp/udp_proxy/BUILD b/test/extensions/filters/udp/udp_proxy/BUILD index f08621cfc0228..9205ec9237a77 100644 --- a/test/extensions/filters/udp/udp_proxy/BUILD +++ b/test/extensions/filters/udp/udp_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -19,7 +19,7 @@ envoy_extension_cc_test( "//source/extensions/filters/udp/udp_proxy:udp_proxy_filter_lib", "//test/mocks/network:io_handle_mocks", "//test/mocks/upstream:upstream_mocks", - "@envoy_api//envoy/config/filter/udp/udp_proxy/v2alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/udp/udp_proxy/v3:pkg_cc_proto", ], ) @@ -27,6 +27,7 @@ envoy_extension_cc_test( name = "udp_proxy_integration_test", srcs = ["udp_proxy_integration_test.cc"], extension_name = "envoy.filters.udp_listener.udp_proxy", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/udp/udp_proxy:config", "//test/integration:integration_lib", diff --git a/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc b/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc index 3e773189bcc9e..449a98c0a5cc5 100644 --- a/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc +++ b/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc @@ -1,5 +1,5 @@ -#include "envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.pb.h" -#include "envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.pb.validate.h" +#include "envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.pb.h" +#include "envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.pb.validate.h" #include "extensions/filters/udp/udp_proxy/udp_proxy_filter.h" @@ -12,6 +12,7 @@ using testing::AtLeast; using testing::ByMove; using testing::InSequence; +using testing::InvokeWithoutArgs; using testing::Return; using testing::ReturnNew; using testing::SaveArg; @@ -66,6 +67,7 @@ class UdpProxyFilterTest : public testing::Test { int send_sys_errno = 0) { EXPECT_CALL(*idle_timer_, enableTimer(parent_.config_->sessionTimeout(), nullptr)); + EXPECT_CALL(*io_handle_, supportsUdpGro()); EXPECT_CALL(*io_handle_, supportsMmsg()); // Return the datagram. EXPECT_CALL(*io_handle_, recvmsg(_, 1, _, _)) @@ -97,6 +99,7 @@ class UdpProxyFilterTest : public testing::Test { } })); // Return an EAGAIN result. + EXPECT_CALL(*io_handle_, supportsUdpGro()); EXPECT_CALL(*io_handle_, supportsMmsg()); EXPECT_CALL(*io_handle_, recvmsg(_, 1, _, _)) .WillOnce(Return(ByMove(Api::IoCallUint64Result( @@ -128,7 +131,7 @@ class UdpProxyFilterTest : public testing::Test { ~UdpProxyFilterTest() override { EXPECT_CALL(callbacks_.udp_listener_, onDestroy()); } void setup(const std::string& yaml, bool has_cluster = true) { - envoy::config::filter::udp::udp_proxy::v2alpha::UdpProxyConfig config; + envoy::extensions::filters::udp::udp_proxy::v3::UdpProxyConfig config; TestUtility::loadFromYamlAndValidate(yaml, config); config_ = std::make_shared(cluster_manager_, time_system_, stats_store_, config); @@ -163,6 +166,10 @@ class UdpProxyFilterTest : public testing::Test { EXPECT_CALL(callbacks_.udp_listener_.dispatcher_, createFileEvent_(_, _, Event::FileTriggerType::Edge, Event::FileReadyType::Read)) .WillOnce(DoAll(SaveArg<1>(&new_session.file_event_cb_), Return(nullptr))); + // Internal Buffer is Empty, flush will be a no-op + ON_CALL(callbacks_.udp_listener_, flush()) + .WillByDefault( + InvokeWithoutArgs([]() -> Api::IoCallUint64Result { return makeNoError(0); })); } void checkTransferStats(uint64_t rx_bytes, uint64_t rx_datagrams, uint64_t tx_bytes, @@ -260,13 +267,13 @@ cluster: fake_cluster EXPECT_EQ(5, cluster_manager_.thread_local_cluster_.cluster_.info_->stats_ .upstream_cx_tx_bytes_total_.value()); - test_sessions_[0].recvDataFromUpstream("world2", 0, EMSGSIZE); + test_sessions_[0].recvDataFromUpstream("world2", 0, SOCKET_ERROR_MSG_SIZE); checkTransferStats(5 /*rx_bytes*/, 1 /*rx_datagrams*/, 0 /*tx_bytes*/, 0 /*tx_datagrams*/); EXPECT_EQ(6, cluster_manager_.thread_local_cluster_.cluster_.info_->stats_ .upstream_cx_rx_bytes_total_.value()); EXPECT_EQ(1, config_->stats().downstream_sess_tx_errors_.value()); - test_sessions_[0].recvDataFromUpstream("world2", EMSGSIZE, 0); + test_sessions_[0].recvDataFromUpstream("world2", SOCKET_ERROR_MSG_SIZE, 0); checkTransferStats(5 /*rx_bytes*/, 1 /*rx_datagrams*/, 0 /*tx_bytes*/, 0 /*tx_datagrams*/); EXPECT_EQ(6, cluster_manager_.thread_local_cluster_.cluster_.info_->stats_ .upstream_cx_rx_bytes_total_.value()); @@ -275,7 +282,7 @@ cluster: fake_cluster "udp.sess_rx_errors") ->value()); - test_sessions_[0].expectUpstreamWrite("hello", EMSGSIZE); + test_sessions_[0].expectUpstreamWrite("hello", SOCKET_ERROR_MSG_SIZE); recvDataFromDownstream("10.0.0.1:1000", "10.0.0.2:80", "hello"); checkTransferStats(10 /*rx_bytes*/, 2 /*rx_datagrams*/, 0 /*tx_bytes*/, 0 /*tx_datagrams*/); EXPECT_EQ(5, cluster_manager_.thread_local_cluster_.cluster_.info_->stats_ diff --git a/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc b/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc index 88b51986362b5..19ee0597a1474 100644 --- a/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc +++ b/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc @@ -18,7 +18,7 @@ class UdpProxyIntegrationTest : public testing::TestWithParam 1. +TEST_P(UdpProxyIntegrationTest, NoReusePort) { + concurrency_ = 2; + // Do not wait for listeners to start as the listener will fail. + defer_listener_finalization_ = true; + setup(1); + test_server_->waitForCounterGe("listener_manager.lds.update_rejected", 1); +} + // Basic loopback test. TEST_P(UdpProxyIntegrationTest, HelloWorldOnLoopback) { setup(1); diff --git a/test/extensions/grpc_credentials/aws_iam/BUILD b/test/extensions/grpc_credentials/aws_iam/BUILD index 07afc2edd9717..0796f78a871cd 100644 --- a/test/extensions/grpc_credentials/aws_iam/BUILD +++ b/test/extensions/grpc_credentials/aws_iam/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -7,12 +5,15 @@ load( "envoy_select_google_grpc", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( name = "aws_iam_grpc_credentials_test", srcs = envoy_select_google_grpc(["aws_iam_grpc_credentials_test.cc"]), data = ["//test/config/integration/certs"], + tags = ["fails_on_windows"], deps = [ "//source/extensions/grpc_credentials:well_known_names", "//source/extensions/grpc_credentials/aws_iam:config", diff --git a/test/extensions/grpc_credentials/aws_iam/aws_iam_grpc_credentials_test.cc b/test/extensions/grpc_credentials/aws_iam/aws_iam_grpc_credentials_test.cc index 8dcfd96b25f04..6001a7bfdacd9 100644 --- a/test/extensions/grpc_credentials/aws_iam/aws_iam_grpc_credentials_test.cc +++ b/test/extensions/grpc_credentials/aws_iam/aws_iam_grpc_credentials_test.cc @@ -34,9 +34,12 @@ class GrpcAwsIamClientIntegrationTest : public GrpcSslClientIntegrationTest { } void expectExtraHeaders(FakeStream& fake_stream) override { + if (call_credentials_ != CallCredentials::FromPlugin) { + return; + } AssertionResult result = fake_stream.waitForHeadersComplete(); RELEASE_ASSERT(result, result.message()); - Http::TestHeaderMapImpl stream_headers(fake_stream.headers()); + Http::TestRequestHeaderMapImpl stream_headers(fake_stream.headers()); const auto auth_header = stream_headers.get_("Authorization"); const auto auth_parts = StringUtil::splitToken(auth_header, ", ", false); ASSERT_EQ(4, auth_parts.size()); @@ -57,31 +60,55 @@ class GrpcAwsIamClientIntegrationTest : public GrpcSslClientIntegrationTest { ssl_creds->mutable_root_certs()->set_filename( TestEnvironment::runfilesPath("test/config/integration/certs/upstreamcacert.pem")); - std::string config_yaml; - if (region_in_env_) { - TestEnvironment::setEnvVar("AWS_REGION", region_name_, 1); - config_yaml = fmt::format(R"EOF( -"@type": type.googleapis.com/envoy.config.grpc_credential.v2alpha.AwsIamConfig -service_name: {} -)EOF", - service_name_); - } else { - config_yaml = fmt::format(R"EOF( -"@type": type.googleapis.com/envoy.config.grpc_credential.v2alpha.AwsIamConfig -service_name: {} -region: {} -)EOF", - service_name_, region_name_); + switch (call_credentials_) { + case CallCredentials::FromPlugin: { + std::string config_yaml; + switch (region_location_) { + case RegionLocation::InEnvironment: + TestEnvironment::setEnvVar("AWS_REGION", region_name_, 1); + ABSL_FALLTHROUGH_INTENDED; + case RegionLocation::NotProvided: + config_yaml = fmt::format(R"EOF( + "@type": type.googleapis.com/envoy.config.grpc_credential.v2alpha.AwsIamConfig + service_name: {} + )EOF", + service_name_); + break; + case RegionLocation::InConfig: + config_yaml = fmt::format(R"EOF( + "@type": type.googleapis.com/envoy.config.grpc_credential.v2alpha.AwsIamConfig + service_name: {} + region: {} + )EOF", + service_name_, region_name_); + break; + } + + auto* plugin_config = google_grpc->add_call_credentials()->mutable_from_plugin(); + plugin_config->set_name(credentials_factory_name_); + Envoy::TestUtility::loadFromYaml(config_yaml, *plugin_config->mutable_typed_config()); + return config; + } + case CallCredentials::AccessToken: + google_grpc->add_call_credentials()->mutable_access_token()->assign("foo"); + return config; + default: + return config; } - - auto* plugin_config = google_grpc->add_call_credentials()->mutable_from_plugin(); - plugin_config->set_name(credentials_factory_name_); - envoy::config::grpc_credential::v3::AwsIamConfig metadata_config; - Envoy::TestUtility::loadFromYaml(config_yaml, *plugin_config->mutable_typed_config()); - return config; } - - bool region_in_env_{}; + enum class RegionLocation { + NotProvided, + InEnvironment, + InConfig, + }; + + enum class CallCredentials { + FromPlugin, + AccessToken, + }; + + RegionLocation region_location_ = RegionLocation::NotProvided; + CallCredentials call_credentials_ = CallCredentials::FromPlugin; std::string service_name_{}; std::string region_name_{}; std::string credentials_factory_name_{}; @@ -94,6 +121,7 @@ TEST_P(GrpcAwsIamClientIntegrationTest, AwsIamGrpcAuth_ConfigRegion) { SKIP_IF_GRPC_CLIENT(ClientType::EnvoyGrpc); service_name_ = "test_service"; region_name_ = "test_region_static"; + region_location_ = RegionLocation::InConfig; credentials_factory_name_ = Extensions::GrpcCredentials::GrpcCredentialsNames::get().AwsIam; initialize(); auto request = createRequest(empty_metadata_); @@ -105,7 +133,26 @@ TEST_P(GrpcAwsIamClientIntegrationTest, AwsIamGrpcAuth_EnvRegion) { SKIP_IF_GRPC_CLIENT(ClientType::EnvoyGrpc); service_name_ = "test_service"; region_name_ = "test_region_env"; - region_in_env_ = true; + region_location_ = RegionLocation::InEnvironment; + credentials_factory_name_ = Extensions::GrpcCredentials::GrpcCredentialsNames::get().AwsIam; + initialize(); + auto request = createRequest(empty_metadata_); + request->sendReply(); + dispatcher_helper_.runDispatcher(); +} + +TEST_P(GrpcAwsIamClientIntegrationTest, AwsIamGrpcAuth_NoRegion) { + SKIP_IF_GRPC_CLIENT(ClientType::EnvoyGrpc); + service_name_ = "test_service"; + region_name_ = "test_region_env"; + region_location_ = RegionLocation::NotProvided; + credentials_factory_name_ = Extensions::GrpcCredentials::GrpcCredentialsNames::get().AwsIam; + EXPECT_THROW_WITH_REGEX(initialize();, EnvoyException, "AWS region"); +} + +TEST_P(GrpcAwsIamClientIntegrationTest, AwsIamGrpcAuth_UnexpectedCallCredentials) { + SKIP_IF_GRPC_CLIENT(ClientType::EnvoyGrpc); + call_credentials_ = CallCredentials::AccessToken; credentials_factory_name_ = Extensions::GrpcCredentials::GrpcCredentialsNames::get().AwsIam; initialize(); auto request = createRequest(empty_metadata_); diff --git a/test/extensions/grpc_credentials/file_based_metadata/BUILD b/test/extensions/grpc_credentials/file_based_metadata/BUILD index 74d148c643d16..53cff427b2fe9 100644 --- a/test/extensions/grpc_credentials/file_based_metadata/BUILD +++ b/test/extensions/grpc_credentials/file_based_metadata/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -7,12 +5,15 @@ load( "envoy_select_google_grpc", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( name = "file_based_metadata_grpc_credentials_test", srcs = ["file_based_metadata_grpc_credentials_test.cc"], data = ["//test/config/integration/certs"], + tags = ["fails_on_windows"], deps = [ "//source/extensions/grpc_credentials:well_known_names", "//source/extensions/grpc_credentials/file_based_metadata:config", diff --git a/test/extensions/grpc_credentials/file_based_metadata/file_based_metadata_grpc_credentials_test.cc b/test/extensions/grpc_credentials/file_based_metadata/file_based_metadata_grpc_credentials_test.cc index f567b3d3e258b..f9be83ec5b6e3 100644 --- a/test/extensions/grpc_credentials/file_based_metadata/file_based_metadata_grpc_credentials_test.cc +++ b/test/extensions/grpc_credentials/file_based_metadata/file_based_metadata_grpc_credentials_test.cc @@ -23,7 +23,7 @@ class GrpcFileBasedMetadataClientIntegrationTest : public GrpcSslClientIntegrati void expectExtraHeaders(FakeStream& fake_stream) override { AssertionResult result = fake_stream.waitForHeadersComplete(); RELEASE_ASSERT(result, result.message()); - Http::TestHeaderMapImpl stream_headers(fake_stream.headers()); + Http::TestRequestHeaderMapImpl stream_headers(fake_stream.headers()); if (!header_value_1_.empty()) { EXPECT_EQ(header_prefix_1_ + header_value_1_, stream_headers.get_(header_key_1_)); } diff --git a/test/extensions/health_checkers/redis/BUILD b/test/extensions/health_checkers/redis/BUILD index 73eafe90384a8..e03d6efc0a52a 100644 --- a/test/extensions/health_checkers/redis/BUILD +++ b/test/extensions/health_checkers/redis/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -39,9 +39,10 @@ envoy_extension_cc_test( "//source/common/upstream:health_checker_lib", "//source/extensions/health_checkers/redis:config", "//test/common/upstream:utility_lib", + "//test/mocks/access_log:access_log_mocks", "//test/mocks/network:network_mocks", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:health_checker_factory_context_mocks", "//test/mocks/upstream:upstream_mocks", "@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto", ], diff --git a/test/extensions/health_checkers/redis/config_test.cc b/test/extensions/health_checkers/redis/config_test.cc index 0e01e8e9a1560..cd18ecc1644c8 100644 --- a/test/extensions/health_checkers/redis/config_test.cc +++ b/test/extensions/health_checkers/redis/config_test.cc @@ -6,9 +6,10 @@ #include "extensions/health_checkers/redis/config.h" #include "test/common/upstream/utility.h" +#include "test/mocks/access_log/mocks.h" #include "test/mocks/network/mocks.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/health_checker_factory_context.h" #include "test/mocks/upstream/mocks.h" namespace Envoy { @@ -36,11 +37,11 @@ TEST(HealthCheckerFactoryTest, DEPRECATED_FEATURE_TEST(CreateRedisDeprecated)) { NiceMock context; RedisHealthCheckerFactory factory; - EXPECT_NE( - nullptr, - dynamic_cast( - factory.createCustomHealthChecker(Upstream::parseHealthCheckFromV2Yaml(yaml), context) - .get())); + EXPECT_NE(nullptr, dynamic_cast( + factory + .createCustomHealthChecker( + Upstream::parseHealthCheckFromV3Yaml(yaml, false), context) + .get())); } TEST(HealthCheckerFactoryTest, CreateRedis) { @@ -64,7 +65,7 @@ TEST(HealthCheckerFactoryTest, CreateRedis) { EXPECT_NE( nullptr, dynamic_cast( - factory.createCustomHealthChecker(Upstream::parseHealthCheckFromV2Yaml(yaml), context) + factory.createCustomHealthChecker(Upstream::parseHealthCheckFromV3Yaml(yaml), context) .get())); } @@ -84,11 +85,11 @@ TEST(HealthCheckerFactoryTest, DEPRECATED_FEATURE_TEST(CreateRedisWithoutKeyDepr NiceMock context; RedisHealthCheckerFactory factory; - EXPECT_NE( - nullptr, - dynamic_cast( - factory.createCustomHealthChecker(Upstream::parseHealthCheckFromV2Yaml(yaml), context) - .get())); + EXPECT_NE(nullptr, dynamic_cast( + factory + .createCustomHealthChecker( + Upstream::parseHealthCheckFromV3Yaml(yaml, false), context) + .get())); } TEST(HealthCheckerFactoryTest, CreateRedisWithoutKey) { @@ -111,7 +112,7 @@ TEST(HealthCheckerFactoryTest, CreateRedisWithoutKey) { EXPECT_NE( nullptr, dynamic_cast( - factory.createCustomHealthChecker(Upstream::parseHealthCheckFromV2Yaml(yaml), context) + factory.createCustomHealthChecker(Upstream::parseHealthCheckFromV3Yaml(yaml), context) .get())); } @@ -136,7 +137,7 @@ TEST(HealthCheckerFactoryTest, CreateRedisWithLogHCFailure) { EXPECT_NE( nullptr, dynamic_cast( - factory.createCustomHealthChecker(Upstream::parseHealthCheckFromV2Yaml(yaml), context) + factory.createCustomHealthChecker(Upstream::parseHealthCheckFromV3Yaml(yaml), context) .get())); } @@ -157,7 +158,7 @@ TEST(HealthCheckerFactoryTest, CreateRedisViaUpstreamHealthCheckerFactory) { NiceMock cluster; Runtime::MockLoader runtime; - Runtime::MockRandomGenerator random; + Random::MockRandomGenerator random; Event::MockDispatcher dispatcher; AccessLog::MockAccessLogManager log_manager; NiceMock api; @@ -165,7 +166,7 @@ TEST(HealthCheckerFactoryTest, CreateRedisViaUpstreamHealthCheckerFactory) { EXPECT_NE(nullptr, dynamic_cast( Upstream::HealthCheckerFactory::create( - Upstream::parseHealthCheckFromV2Yaml(yaml), cluster, runtime, random, + Upstream::parseHealthCheckFromV3Yaml(yaml), cluster, runtime, random, dispatcher, log_manager, ProtobufMessage::getStrictValidationVisitor(), api) .get())); } diff --git a/test/extensions/health_checkers/redis/redis_test.cc b/test/extensions/health_checkers/redis/redis_test.cc index 9d413879998ca..a16fad088aa85 100644 --- a/test/extensions/health_checkers/redis/redis_test.cc +++ b/test/extensions/health_checkers/redis/redis_test.cc @@ -49,10 +49,46 @@ class RedisHealthCheckerTest "@type": type.googleapis.com/envoy.config.health_checker.redis.v2.Redis )EOF"; + const auto& health_check_config = Upstream::parseHealthCheckFromV3Yaml(yaml); + const auto& redis_config = getRedisHealthCheckConfig( + health_check_config, ProtobufMessage::getStrictValidationVisitor()); + + health_checker_ = std::make_shared( + *cluster_, health_check_config, redis_config, dispatcher_, runtime_, random_, + Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this); + } + + void setupWithAuth() { + const std::string yaml = R"EOF( + timeout: 1s + interval: 1s + no_traffic_interval: 5s + interval_jitter: 1s + unhealthy_threshold: 1 + healthy_threshold: 1 + custom_health_check: + name: redis + typed_config: + "@type": type.googleapis.com/envoy.config.health_checker.redis.v2.Redis + )EOF"; + const auto& health_check_config = Upstream::parseHealthCheckFromV2Yaml(yaml); const auto& redis_config = getRedisHealthCheckConfig( health_check_config, ProtobufMessage::getStrictValidationVisitor()); + std::string auth_yaml = R"EOF( + auth_username: { inline_string: "test user" } + auth_password: { inline_string: "test password" } + )EOF"; + envoy::extensions::filters::network::redis_proxy::v3::RedisProtocolOptions proto_config{}; + TestUtility::loadFromYaml(auth_yaml, proto_config); + + Upstream::ProtocolOptionsConfigConstSharedPtr options = std::make_shared< + const Envoy::Extensions::NetworkFilters::RedisProxy::ProtocolOptionsConfigImpl>( + proto_config); + + EXPECT_CALL(*cluster_->info_, extensionProtocolOptions(_)).WillRepeatedly(Return(options)); + health_checker_ = std::make_shared( *cluster_, health_check_config, redis_config, dispatcher_, runtime_, random_, Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this); @@ -73,7 +109,7 @@ class RedisHealthCheckerTest "@type": type.googleapis.com/envoy.config.health_checker.redis.v2.Redis )EOF"; - const auto& health_check_config = Upstream::parseHealthCheckFromV2Yaml(yaml); + const auto& health_check_config = Upstream::parseHealthCheckFromV3Yaml(yaml); const auto& redis_config = getRedisHealthCheckConfig( health_check_config, ProtobufMessage::getStrictValidationVisitor()); @@ -97,16 +133,53 @@ class RedisHealthCheckerTest key: foo )EOF"; + const auto& health_check_config = Upstream::parseHealthCheckFromV3Yaml(yaml); + const auto& redis_config = getRedisHealthCheckConfig( + health_check_config, ProtobufMessage::getStrictValidationVisitor()); + + health_checker_ = std::make_shared( + *cluster_, health_check_config, redis_config, dispatcher_, runtime_, random_, + Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this); + } + + void setupExistsHealthcheckWithAuth() { + const std::string yaml = R"EOF( + timeout: 1s + interval: 1s + no_traffic_interval: 5s + interval_jitter: 1s + unhealthy_threshold: 1 + healthy_threshold: 1 + custom_health_check: + name: redis + typed_config: + "@type": type.googleapis.com/envoy.config.health_checker.redis.v2.Redis + key: foo + )EOF"; + const auto& health_check_config = Upstream::parseHealthCheckFromV2Yaml(yaml); const auto& redis_config = getRedisHealthCheckConfig( health_check_config, ProtobufMessage::getStrictValidationVisitor()); + std::string auth_yaml = R"EOF( + auth_username: { inline_string: "test user" } + auth_password: { inline_string: "test password" } + )EOF"; + envoy::extensions::filters::network::redis_proxy::v3::RedisProtocolOptions proto_config{}; + TestUtility::loadFromYaml(auth_yaml, proto_config); + + Upstream::ProtocolOptionsConfigConstSharedPtr options = std::make_shared< + const Envoy::Extensions::NetworkFilters::RedisProxy::ProtocolOptionsConfigImpl>( + proto_config); + + EXPECT_CALL(*cluster_->info_, extensionProtocolOptions(_)).WillRepeatedly(Return(options)); + health_checker_ = std::make_shared( *cluster_, health_check_config, redis_config, dispatcher_, runtime_, random_, Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this); } - void setupExistsHealthcheckDeprecated() { + void setupExistsHealthcheckDeprecated(bool avoid_boosting = true) { const std::string yaml = R"EOF( timeout: 1s interval: 1s @@ -120,7 +193,7 @@ class RedisHealthCheckerTest key: foo )EOF"; - const auto& health_check_config = Upstream::parseHealthCheckFromV2Yaml(yaml); + const auto& health_check_config = Upstream::parseHealthCheckFromV3Yaml(yaml, avoid_boosting); const auto& redis_config = getRedisHealthCheckConfig( health_check_config, ProtobufMessage::getStrictValidationVisitor()); @@ -144,7 +217,7 @@ class RedisHealthCheckerTest "@type": type.googleapis.com/envoy.config.health_checker.redis.v2.Redis )EOF"; - const auto& health_check_config = Upstream::parseHealthCheckFromV2Yaml(yaml); + const auto& health_check_config = Upstream::parseHealthCheckFromV3Yaml(yaml); const auto& redis_config = getRedisHealthCheckConfig( health_check_config, ProtobufMessage::getStrictValidationVisitor()); @@ -157,7 +230,9 @@ class RedisHealthCheckerTest create(Upstream::HostConstSharedPtr, Event::Dispatcher&, const Extensions::NetworkFilters::Common::Redis::Client::Config&, const Extensions::NetworkFilters::Common::Redis::RedisCommandStatsSharedPtr&, - Stats::Scope&, const std::string&) override { + Stats::Scope&, const std::string& username, const std::string& password) override { + EXPECT_EQ(auth_username_, username); + EXPECT_EQ(auth_password_, password); return Extensions::NetworkFilters::Common::Redis::Client::ClientPtr{create_()}; } @@ -206,7 +281,7 @@ class RedisHealthCheckerTest std::shared_ptr cluster_; NiceMock dispatcher_; NiceMock runtime_; - NiceMock random_; + NiceMock random_; Upstream::MockHealthCheckEventLogger* event_logger_{}; Event::MockTimer* timeout_timer_{}; Event::MockTimer* interval_timer_{}; @@ -215,8 +290,105 @@ class RedisHealthCheckerTest Extensions::NetworkFilters::Common::Redis::Client::ClientCallbacks* pool_callbacks_{}; std::shared_ptr health_checker_; Api::ApiPtr api_; + std::string auth_username_; + std::string auth_password_; }; +TEST_F(RedisHealthCheckerTest, PingWithAuth) { + InSequence s; + + auth_username_ = "test user"; + auth_password_ = "test password"; + + setupWithAuth(); + + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + Upstream::makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + + expectSessionCreate(); + expectClientCreate(); + expectPingRequestCreate(); + health_checker_->start(); + + client_->runHighWatermarkCallbacks(); + client_->runLowWatermarkCallbacks(); + + // Success + EXPECT_CALL(*timeout_timer_, disableTimer()); + EXPECT_CALL(*interval_timer_, enableTimer(_, _)); + NetworkFilters::Common::Redis::RespValuePtr response( + new NetworkFilters::Common::Redis::RespValue()); + response->type(NetworkFilters::Common::Redis::RespType::SimpleString); + response->asString() = "PONG"; + pool_callbacks_->onResponse(std::move(response)); + + expectPingRequestCreate(); + interval_timer_->invokeCallback(); + + // Failure, invalid auth + EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(*timeout_timer_, disableTimer()); + EXPECT_CALL(*interval_timer_, enableTimer(_, _)); + response = std::make_unique(); + response->type(NetworkFilters::Common::Redis::RespType::Error); + response->asString() = "WRONGPASS invalid username-password pair"; + pool_callbacks_->onResponse(std::move(response)); + + EXPECT_CALL(*client_, close()); + + EXPECT_EQ(2UL, cluster_->info_->stats_store_.counter("health_check.attempt").value()); + EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter("health_check.success").value()); + EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter("health_check.failure").value()); + EXPECT_EQ(0UL, cluster_->info_->stats_store_.counter("health_check.network_failure").value()); +} + +TEST_F(RedisHealthCheckerTest, ExistsWithAuth) { + InSequence s; + + auth_username_ = "test user"; + auth_password_ = "test password"; + + setupExistsHealthcheckWithAuth(); + + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + Upstream::makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + + expectSessionCreate(); + expectClientCreate(); + expectExistsRequestCreate(); + health_checker_->start(); + + client_->runHighWatermarkCallbacks(); + client_->runLowWatermarkCallbacks(); + + // Success + EXPECT_CALL(*timeout_timer_, disableTimer()); + EXPECT_CALL(*interval_timer_, enableTimer(_, _)); + NetworkFilters::Common::Redis::RespValuePtr response( + new NetworkFilters::Common::Redis::RespValue()); + response->type(NetworkFilters::Common::Redis::RespType::Integer); + response->asInteger() = 0; + pool_callbacks_->onResponse(std::move(response)); + + expectExistsRequestCreate(); + interval_timer_->invokeCallback(); + + // Failure, invalid auth + EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(*timeout_timer_, disableTimer()); + EXPECT_CALL(*interval_timer_, enableTimer(_, _)); + response = std::make_unique(); + response->type(NetworkFilters::Common::Redis::RespType::Error); + response->asString() = "WRONGPASS invalid username-password pair"; + pool_callbacks_->onResponse(std::move(response)); + + EXPECT_CALL(*client_, close()); + + EXPECT_EQ(2UL, cluster_->info_->stats_store_.counter("health_check.attempt").value()); + EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter("health_check.success").value()); + EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter("health_check.failure").value()); +} + TEST_F(RedisHealthCheckerTest, PingAndVariousFailures) { InSequence s; setup(); @@ -398,7 +570,7 @@ TEST_F(RedisHealthCheckerTest, LogInitialFailure) { TEST_F(RedisHealthCheckerTest, DEPRECATED_FEATURE_TEST(ExistsDeprecated)) { InSequence s; - setupExistsHealthcheckDeprecated(); + setupExistsHealthcheckDeprecated(false); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { Upstream::makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; diff --git a/test/extensions/internal_redirect/previous_routes/BUILD b/test/extensions/internal_redirect/previous_routes/BUILD new file mode 100644 index 0000000000000..5ec2358246a0e --- /dev/null +++ b/test/extensions/internal_redirect/previous_routes/BUILD @@ -0,0 +1,24 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_extension_cc_test( + name = "config_test", + srcs = ["config_test.cc"], + extension_name = "envoy.internal_redirect_predicates.previous_routes", + deps = [ + "//source/common/stream_info:filter_state_lib", + "//source/extensions/internal_redirect:well_known_names", + "//source/extensions/internal_redirect/previous_routes:config", + "@envoy_api//envoy/extensions/internal_redirect/previous_routes/v3:pkg_cc_proto", + ], +) diff --git a/test/extensions/internal_redirect/previous_routes/config_test.cc b/test/extensions/internal_redirect/previous_routes/config_test.cc new file mode 100644 index 0000000000000..1d69320fc2ed7 --- /dev/null +++ b/test/extensions/internal_redirect/previous_routes/config_test.cc @@ -0,0 +1,83 @@ +#include "envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.pb.h" +#include "envoy/registry/registry.h" +#include "envoy/router/internal_redirect.h" + +#include "common/stream_info/filter_state_impl.h" + +#include "extensions/internal_redirect/previous_routes/config.h" +#include "extensions/internal_redirect/well_known_names.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using namespace testing; + +namespace Envoy { +namespace Extensions { +namespace InternalRedirect { +namespace { + +class PreviousRoutesTest : public testing::Test { +protected: + PreviousRoutesTest() : filter_state_(StreamInfo::FilterState::LifeSpan::FilterChain) { + factory_ = Registry::FactoryRegistry::getFactory( + InternalRedirectPredicateValues::get().PreviousRoutesPredicate); + config_ = factory_->createEmptyConfigProto(); + } + + StreamInfo::FilterStateImpl filter_state_; + Router::InternalRedirectPredicateFactory* factory_; + ProtobufTypes::MessagePtr config_; +}; + +TEST_F(PreviousRoutesTest, TargetIsOnlyTakenOnce) { + std::string current_route_name = "fake_current_route"; + // Create the predicate for the first time. It should remember nothing in the + // filter state, so it allows the redirect. + { + auto predicate = factory_->createInternalRedirectPredicate(*config_, current_route_name); + ASSERT(predicate); + + EXPECT_TRUE(predicate->acceptTargetRoute(filter_state_, "route_1", false, false)); + // New filter state data is created with route name. + EXPECT_TRUE(filter_state_.hasDataWithName( + "envoy.internal_redirect.previous_routes_predicate_state.fake_current_route")); + } + + // The second predicate should see the previously taken route. + { + auto predicate = factory_->createInternalRedirectPredicate(*config_, current_route_name); + ASSERT(predicate); + + EXPECT_FALSE(predicate->acceptTargetRoute(filter_state_, "route_1", false, false)); + } +} + +TEST_F(PreviousRoutesTest, RoutesAreIndependent) { + // Create the predicate on route_0. + { + auto predicate = factory_->createInternalRedirectPredicate(*config_, "route_0"); + ASSERT(predicate); + + EXPECT_TRUE(predicate->acceptTargetRoute(filter_state_, "route_2", false, false)); + // New filter state data is created with route name. + EXPECT_TRUE(filter_state_.hasDataWithName( + "envoy.internal_redirect.previous_routes_predicate_state.route_0")); + } + + // The predicate created on route_1 should also allow a redirect to route_2 + { + auto predicate = factory_->createInternalRedirectPredicate(*config_, "route_1"); + ASSERT(predicate); + + EXPECT_TRUE(predicate->acceptTargetRoute(filter_state_, "route_2", false, false)); + // New filter state data is created with route name. + EXPECT_TRUE(filter_state_.hasDataWithName( + "envoy.internal_redirect.previous_routes_predicate_state.route_1")); + } +} + +} // namespace +} // namespace InternalRedirect +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/quic_listeners/quiche/BUILD b/test/extensions/quic_listeners/quiche/BUILD index 7e24810fe50b9..bb259455a7c1b 100644 --- a/test/extensions/quic_listeners/quiche/BUILD +++ b/test/extensions/quic_listeners/quiche/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( @@ -34,6 +34,7 @@ envoy_cc_test( ], deps = [ "//source/common/network:io_socket_error_lib", + "//source/common/network:udp_packet_writer_handler_lib", "//source/extensions/quic_listeners/quiche:envoy_quic_packet_writer_lib", "//test/mocks/api:api_mocks", "//test/mocks/network:network_mocks", @@ -49,7 +50,24 @@ envoy_cc_test( deps = [ "//source/extensions/quic_listeners/quiche:envoy_quic_proof_source_lib", "//source/extensions/quic_listeners/quiche:envoy_quic_proof_verifier_lib", + "//source/extensions/transport_sockets/tls:context_config_lib", + "//test/mocks/network:network_mocks", + "//test/mocks/ssl:ssl_mocks", "@com_googlesource_quiche//:quic_core_versions_lib", + "@com_googlesource_quiche//:quic_test_tools_test_certificates_lib", + ], +) + +envoy_cc_test( + name = "envoy_quic_proof_verifier_test", + srcs = ["envoy_quic_proof_verifier_test.cc"], + external_deps = ["quiche_quic_platform"], + tags = ["nofips"], + deps = [ + "//source/extensions/quic_listeners/quiche:envoy_quic_proof_verifier_lib", + "//source/extensions/transport_sockets/tls:context_config_lib", + "//test/mocks/ssl:ssl_mocks", + "@com_googlesource_quiche//:quic_test_tools_test_certificates_lib", ], ) @@ -74,6 +92,7 @@ envoy_cc_test( "//test/mocks/network:network_mocks", "//test/test_common:utility_lib", "@com_googlesource_quiche//:quic_core_http_spdy_session_lib", + "@com_googlesource_quiche//:quic_test_tools_session_peer_lib", ], ) @@ -111,11 +130,12 @@ envoy_cc_test( ], deps = [ ":quic_test_utils_for_envoy_lib", + ":test_proof_source_lib", + ":test_utils_lib", "//include/envoy/stats:stats_macros", "//source/extensions/quic_listeners/quiche:codec_lib", "//source/extensions/quic_listeners/quiche:envoy_quic_alarm_factory_lib", "//source/extensions/quic_listeners/quiche:envoy_quic_connection_helper_lib", - "//source/extensions/quic_listeners/quiche:envoy_quic_proof_source_lib", "//source/extensions/quic_listeners/quiche:envoy_quic_server_connection_lib", "//source/extensions/quic_listeners/quiche:envoy_quic_server_session_lib", "//source/server:configuration_lib", @@ -127,6 +147,7 @@ envoy_cc_test( "//test/test_common:global_lib", "//test/test_common:logging_lib", "//test/test_common:simulated_time_system_lib", + "@com_googlesource_quiche//:quic_test_tools_config_peer_lib", "@com_googlesource_quiche//:quic_test_tools_server_session_base_peer", "@com_googlesource_quiche//:quic_test_tools_test_utils_interface_lib", ], @@ -142,6 +163,7 @@ envoy_cc_test( ], deps = [ ":quic_test_utils_for_envoy_lib", + ":test_utils_lib", "//include/envoy/stats:stats_macros", "//source/extensions/quic_listeners/quiche:codec_lib", "//source/extensions/quic_listeners/quiche:envoy_quic_alarm_factory_lib", @@ -167,13 +189,18 @@ envoy_cc_test( ], deps = [ ":quic_test_utils_for_envoy_lib", + ":test_utils_lib", + "//source/extensions/quic_listeners/quiche:active_quic_listener_config_lib", "//source/extensions/quic_listeners/quiche:active_quic_listener_lib", "//source/extensions/quic_listeners/quiche:envoy_quic_utils_lib", + "//source/extensions/quic_listeners/quiche:udp_gso_batch_writer_lib", "//source/server:configuration_lib", "//test/mocks/network:network_mocks", - "//test/test_common:environment_lib", + "//test/mocks/server:instance_mocks", "//test/test_common:network_utility_lib", "//test/test_common:simulated_time_system_lib", + "@com_googlesource_quiche//:quic_test_tools_crypto_server_config_peer_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) @@ -187,6 +214,8 @@ envoy_cc_test( ], deps = [ ":quic_test_utils_for_envoy_lib", + ":test_proof_source_lib", + ":test_utils_lib", "//include/envoy/stats:stats_macros", "//source/extensions/quic_listeners/quiche:envoy_quic_alarm_factory_lib", "//source/extensions/quic_listeners/quiche:envoy_quic_connection_helper_lib", @@ -198,19 +227,38 @@ envoy_cc_test( "//test/mocks/http:http_mocks", "//test/mocks/network:network_mocks", "//test/mocks/stats:stats_mocks", - "//test/test_common:environment_lib", "//test/test_common:global_lib", "//test/test_common:simulated_time_system_lib", ], ) +envoy_cc_test_library( + name = "test_proof_source_lib", + hdrs = ["test_proof_source.h"], + tags = ["nofips"], + deps = [ + "//source/extensions/quic_listeners/quiche:envoy_quic_proof_source_base_lib", + "//test/mocks/network:network_mocks", + "@com_googlesource_quiche//:quic_test_tools_test_certificates_lib", + ], +) + +envoy_cc_test_library( + name = "test_proof_verifier_lib", + hdrs = ["test_proof_verifier.h"], + tags = ["nofips"], + deps = [ + "//source/extensions/quic_listeners/quiche:envoy_quic_proof_verifier_base_lib", + ], +) + envoy_cc_test_library( name = "quic_test_utils_for_envoy_lib", srcs = ["crypto_test_utils_for_envoy.cc"], tags = ["nofips"], deps = [ - "//source/extensions/quic_listeners/quiche:envoy_quic_proof_source_lib", - "//source/extensions/quic_listeners/quiche:envoy_quic_proof_verifier_lib", + ":test_proof_source_lib", + ":test_proof_verifier_lib", "@com_googlesource_quiche//:quic_test_tools_test_utils_interface_lib", ], ) @@ -272,9 +320,12 @@ envoy_cc_test( envoy_cc_test_library( name = "test_utils_lib", hdrs = ["test_utils.h"], + external_deps = ["bazel_runfiles"], tags = ["nofips"], deps = [ "//source/extensions/quic_listeners/quiche:quic_filter_manager_connection_lib", + "//test/test_common:environment_lib", "@com_googlesource_quiche//:quic_core_http_spdy_session_lib", + "@com_googlesource_quiche//:quic_test_tools_first_flight_lib", ], ) diff --git a/test/extensions/quic_listeners/quiche/active_quic_listener_config_test.cc b/test/extensions/quic_listeners/quiche/active_quic_listener_config_test.cc index b93afa375ea32..d116f816b6cad 100644 --- a/test/extensions/quic_listeners/quiche/active_quic_listener_config_test.cc +++ b/test/extensions/quic_listeners/quiche/active_quic_listener_config_test.cc @@ -15,6 +15,10 @@ class ActiveQuicListenerFactoryPeer { static quic::QuicConfig& quicConfig(ActiveQuicListenerFactory& factory) { return factory.quic_config_; } + static envoy::config::core::v3::RuntimeFeatureFlag& + runtimeEnabled(ActiveQuicListenerFactory& factory) { + return factory.enabled_; + } }; TEST(ActiveQuicListenerConfigTest, CreateActiveQuicListenerFactory) { @@ -29,6 +33,9 @@ TEST(ActiveQuicListenerConfigTest, CreateActiveQuicListenerFactory) { idle_timeout: { seconds: 2 } + enabled: + default_value: true + runtime_key: foo_key )EOF"; TestUtility::loadFromYaml(yaml, *config); Network::ActiveUdpListenerFactoryPtr listener_factory = @@ -41,6 +48,38 @@ TEST(ActiveQuicListenerConfigTest, CreateActiveQuicListenerFactory) { EXPECT_EQ(2000u, quic_config.IdleNetworkTimeout().ToMilliseconds()); // Default value if not present in config. EXPECT_EQ(20000u, quic_config.max_time_before_crypto_handshake().ToMilliseconds()); + envoy::config::core::v3::RuntimeFeatureFlag& runtime_enabled = + ActiveQuicListenerFactoryPeer::runtimeEnabled( + dynamic_cast(*listener_factory)); + EXPECT_EQ(true, runtime_enabled.default_value().value()); + EXPECT_EQ("foo_key", runtime_enabled.runtime_key()); +} + +TEST(ActiveQuicListenerConfigTest, QuicListenerFlagNotConfigured) { + std::string listener_name = QuicListenerName; + auto& config_factory = + Config::Utility::getAndCheckFactoryByName( + listener_name); + ProtobufTypes::MessagePtr config = config_factory.createEmptyConfigProto(); + + std::string yaml = R"EOF( + max_concurrent_streams: 10 + idle_timeout: { + seconds: 2 + } + )EOF"; + TestUtility::loadFromYaml(yaml, *config); + Network::ActiveUdpListenerFactoryPtr listener_factory = + config_factory.createActiveUdpListenerFactory(*config, /*concurrency=*/1); + EXPECT_NE(nullptr, listener_factory); + envoy::config::core::v3::RuntimeFeatureFlag& runtime_enabled = + ActiveQuicListenerFactoryPeer::runtimeEnabled( + dynamic_cast(*listener_factory)); + auto& quic_config = + dynamic_cast(*config); + EXPECT_FALSE(runtime_enabled.has_default_value()); + EXPECT_FALSE(quic_config.has_enabled()); + EXPECT_EQ("", runtime_enabled.runtime_key()); } } // namespace Quic diff --git a/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc b/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc index a9850565fa0fe..b41b6bdd311d1 100644 --- a/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc +++ b/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc @@ -8,10 +8,17 @@ #include +#include "common/runtime/runtime_impl.h" + +#include "envoy/config/core/v3/base.pb.h" +#include "envoy/config/core/v3/base.pb.validate.h" +#include "envoy/network/exception.h" + #include "quiche/quic/core/crypto/crypto_protocol.h" #include "quiche/quic/test_tools/crypto_test_utils.h" #include "quiche/quic/test_tools/quic_dispatcher_peer.h" #include "quiche/quic/test_tools/quic_test_utils.h" +#include "quiche/quic/test_tools/quic_crypto_server_config_peer.h" #pragma GCC diagnostic pop @@ -20,16 +27,23 @@ #include "common/network/listen_socket_impl.h" #include "common/network/socket_option_factory.h" #include "extensions/quic_listeners/quiche/active_quic_listener.h" +#include "test/extensions/quic_listeners/quiche/test_utils.h" +#include "test/extensions/quic_listeners/quiche/test_proof_source.h" #include "test/test_common/simulated_time_system.h" #include "test/test_common/environment.h" #include "test/mocks/network/mocks.h" +#include "test/mocks/server/instance.h" + +#include "test/mocks/runtime/mocks.h" #include "test/test_common/utility.h" #include "test/test_common/network_utility.h" #include "absl/time/time.h" #include "gtest/gtest.h" #include "gmock/gmock.h" +#include "extensions/quic_listeners/quiche/active_quic_listener_config.h" #include "extensions/quic_listeners/quiche/platform/envoy_quic_clock.h" #include "extensions/quic_listeners/quiche/envoy_quic_utils.h" +#include "extensions/quic_listeners/quiche/udp_gso_batch_writer.h" using testing::Return; using testing::ReturnRef; @@ -39,41 +53,106 @@ namespace Quic { class ActiveQuicListenerPeer { public: - static EnvoyQuicDispatcher* quic_dispatcher(ActiveQuicListener& listener) { + static EnvoyQuicDispatcher* quicDispatcher(ActiveQuicListener& listener) { return listener.quic_dispatcher_.get(); } - static quic::QuicCryptoServerConfig& crypto_config(ActiveQuicListener& listener) { + static quic::QuicCryptoServerConfig& cryptoConfig(ActiveQuicListener& listener) { return *listener.crypto_config_; } + + static bool enabled(ActiveQuicListener& listener) { return listener.enabled_.enabled(); } +}; + +class ActiveQuicListenerFactoryPeer { +public: + static envoy::config::core::v3::RuntimeFeatureFlag& + runtimeEnabled(ActiveQuicListenerFactory* factory) { + return factory->enabled_; + } }; -class ActiveQuicListenerTest : public testing::TestWithParam { +class ActiveQuicListenerTest : public QuicMultiVersionTest { protected: - using Socket = Network::NetworkListenSocket< - Network::NetworkSocketTrait>; + using Socket = + Network::NetworkListenSocket>; ActiveQuicListenerTest() - : version_(GetParam()), api_(Api::createApiForTest(simulated_time_system_)), + : version_(GetParam().first), api_(Api::createApiForTest(simulated_time_system_)), dispatcher_(api_->allocateDispatcher("test_thread")), clock_(*dispatcher_), local_address_(Network::Test::getCanonicalLoopbackAddress(version_)), - connection_handler_(*dispatcher_) {} + connection_handler_(*dispatcher_), quic_version_([]() { + if (GetParam().second == QuicVersionType::GquicQuicCrypto) { + return quic::CurrentSupportedVersionsWithQuicCrypto(); + } + bool use_http3 = GetParam().second == QuicVersionType::Iquic; + SetQuicReloadableFlag(quic_disable_version_draft_29, !use_http3); + SetQuicReloadableFlag(quic_disable_version_draft_27, !use_http3); + SetQuicReloadableFlag(quic_disable_version_draft_25, !use_http3); + return quic::CurrentSupportedVersions(); + }()[0]) {} + + template + std::unique_ptr staticUniquePointerCast(std::unique_ptr&& source) { + return std::unique_ptr{static_cast(source.release())}; + } void SetUp() override { + envoy::config::bootstrap::v3::LayeredRuntime config; + config.add_layers()->mutable_admin_layer(); + loader_ = std::make_unique( + Runtime::LoaderPtr{new Runtime::LoaderImpl(*dispatcher_, tls_, config, local_info_, store_, + generator_, validation_visitor_, *api_)}); + listen_socket_ = std::make_shared(local_address_, nullptr, /*bind*/ true); listen_socket_->addOptions(Network::SocketOptionFactory::buildIpPacketInfoOptions()); listen_socket_->addOptions(Network::SocketOptionFactory::buildRxQueueOverFlowOptions()); - quic_listener_ = std::make_unique( - *dispatcher_, connection_handler_, listen_socket_, listener_config_, quic_config_, nullptr); + ON_CALL(listener_config_, listenSocketFactory()).WillByDefault(ReturnRef(socket_factory_)); + ON_CALL(socket_factory_, getListenSocket()).WillByDefault(Return(listen_socket_)); + + // Use UdpGsoBatchWriter to perform non-batched writes for the purpose of this test + ON_CALL(listener_config_, udpPacketWriterFactory()) + .WillByDefault(Return( + std::reference_wrapper(udp_packet_writer_factory_))); + ON_CALL(udp_packet_writer_factory_, createUdpPacketWriter(_, _)) + .WillByDefault(Invoke( + [&](Network::IoHandle& io_handle, Stats::Scope& scope) -> Network::UdpPacketWriterPtr { + Network::UdpPacketWriterPtr udp_packet_writer = + std::make_unique(io_handle, scope); + return udp_packet_writer; + })); + + listener_factory_ = createQuicListenerFactory(yamlForQuicConfig()); + EXPECT_CALL(listener_config_, filterChainManager()).WillOnce(ReturnRef(filter_chain_manager_)); + quic_listener_ = + staticUniquePointerCast(listener_factory_->createActiveUdpListener( + connection_handler_, *dispatcher_, listener_config_)); + quic_dispatcher_ = ActiveQuicListenerPeer::quicDispatcher(*quic_listener_); + quic::QuicCryptoServerConfig& crypto_config = + ActiveQuicListenerPeer::cryptoConfig(*quic_listener_); + quic::test::QuicCryptoServerConfigPeer crypto_config_peer(&crypto_config); + auto proof_source = std::make_unique(); + filter_chain_ = &proof_source->filterChain(); + crypto_config_peer.ResetProofSource(std::move(proof_source)); simulated_time_system_.advanceTimeWait(std::chrono::milliseconds(100)); } - void configureMocks(int connection_count) { - EXPECT_CALL(listener_config_, filterChainManager()) - .Times(connection_count) - .WillRepeatedly(ReturnRef(filter_chain_manager_)); + Network::ActiveUdpListenerFactoryPtr createQuicListenerFactory(const std::string& yaml) { + std::string listener_name = QuicListenerName; + auto& config_factory = + Config::Utility::getAndCheckFactoryByName( + listener_name); + ProtobufTypes::MessagePtr config_proto = config_factory.createEmptyConfigProto(); + TestUtility::loadFromYaml(yaml, *config_proto); + return config_factory.createActiveUdpListenerFactory(*config_proto, /*concurrency=*/1); + } + + void maybeConfigureMocks(int connection_count) { + if (quic_version_.UsesTls()) { + return; + } EXPECT_CALL(listener_config_, filterChainFactory()).Times(connection_count); EXPECT_CALL(listener_config_.filter_chain_factory_, createNetworkFilterChain(_, _)) .Times(connection_count) @@ -101,60 +180,29 @@ class ActiveQuicListenerTest : public testing::TestWithParam(local_address_, nullptr, /*bind*/ false)); - quic::CryptoHandshakeMessage chlo = quic::test::crypto_test_utils::GenerateDefaultInchoateCHLO( - &clock_, quic::AllSupportedVersions()[0].transport_version, - &ActiveQuicListenerPeer::crypto_config(*quic_listener_)); - chlo.SetVector(quic::kCOPT, quic::QuicTagVector{quic::kREJ}); - quic::CryptoHandshakeMessage full_chlo; - quic::QuicReferenceCountedPointer signed_config( - new quic::QuicSignedServerConfig); - quic::QuicCompressedCertsCache cache( - quic::QuicCompressedCertsCache::kQuicCompressedCertsCacheSize); - quic::test::crypto_test_utils::GenerateFullCHLO( - chlo, &ActiveQuicListenerPeer::crypto_config(*quic_listener_), - envoyAddressInstanceToQuicSocketAddress(local_address_), - envoyAddressInstanceToQuicSocketAddress(local_address_), - quic::AllSupportedVersions()[0].transport_version, &clock_, signed_config, &cache, - &full_chlo); - // Overwrite version label to highest current supported version. - full_chlo.SetVersion(quic::kVER, quic::CurrentSupportedVersions()[0]); - quic::QuicConfig quic_config; - quic_config.ToHandshakeMessage(&full_chlo, - quic::CurrentSupportedVersions()[0].transport_version); - - std::string packet_content(full_chlo.GetSerialized().AsStringPiece()); - auto encrypted_packet = std::unique_ptr( - quic::test::ConstructEncryptedPacket(connection_id, quic::EmptyQuicConnectionId(), - /*version_flag=*/true, /*reset_flag*/ false, - /*packet_number=*/1, packet_content)); - - Buffer::RawSlice first_slice{ - reinterpret_cast(const_cast(encrypted_packet->data())), - encrypted_packet->length()}; + Buffer::OwnedImpl payload = generateChloPacketToSend( + quic_version_, quic_config_, ActiveQuicListenerPeer::cryptoConfig(*quic_listener_), + connection_id, clock_, envoyIpAddressToQuicSocketAddress(local_address_->ip()), + envoyIpAddressToQuicSocketAddress(local_address_->ip()), "test.example.org"); + Buffer::RawSliceVector slice = payload.getRawSlices(); + ASSERT_EQ(1u, slice.size()); // Send a full CHLO to finish 0-RTT handshake. - auto send_rc = Network::Utility::writeToSocket(client_sockets_.back()->ioHandle(), &first_slice, + auto send_rc = Network::Utility::writeToSocket(client_sockets_.back()->ioHandle(), slice.data(), 1, nullptr, *listen_socket_->localAddress()); - ASSERT_EQ(encrypted_packet->length(), send_rc.rc_); + ASSERT_EQ(slice[0].len_, send_rc.rc_); } - void ReadFromClientSockets() { + void readFromClientSockets() { for (auto& client_socket : client_sockets_) { Buffer::InstancePtr result_buffer(new Buffer::OwnedImpl()); const uint64_t bytes_to_read = 11; @@ -185,6 +233,16 @@ class ActiveQuicListenerTest : public testing::TestWithParamonListenerShutdown(); // Trigger alarm to fire before listener destruction. dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + Runtime::LoaderSingleton::clear(); + } + +protected: + virtual std::string yamlForQuicConfig() { + return R"EOF( + enabled: + default_value: true + runtime_key: quic.enabled +)EOF"; } Network::Address::IpVersion version_; @@ -198,9 +256,22 @@ class ActiveQuicListenerTest : public testing::TestWithParam read_filter_; Network::MockConnectionCallbacks network_connection_callbacks_; NiceMock listener_config_; + NiceMock udp_packet_writer_factory_; quic::QuicConfig quic_config_; Server::ConnectionHandlerImpl connection_handler_; std::unique_ptr quic_listener_; + Network::ActiveUdpListenerFactoryPtr listener_factory_; + NiceMock socket_factory_; + EnvoyQuicDispatcher* quic_dispatcher_; + std::unique_ptr loader_; + + NiceMock tls_; + Stats::TestUtil::TestStore store_; + Random::MockRandomGenerator generator_; + Random::MockRandomGenerator rand_; + NiceMock local_info_; + Init::MockManager init_manager_; + NiceMock validation_visitor_; std::list> client_sockets_; std::list> read_filters_; @@ -208,12 +279,12 @@ class ActiveQuicListenerTest : public testing::TestWithParam> filter_factories_; - std::list filter_chains_; + const Network::MockFilterChain* filter_chain_; + quic::ParsedQuicVersion quic_version_; }; -INSTANTIATE_TEST_SUITE_P(IpVersions, ActiveQuicListenerTest, - testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), - TestUtility::ipTestParamsToString); +INSTANTIATE_TEST_SUITE_P(ActiveQuicListenerTests, ActiveQuicListenerTest, + testing::ValuesIn(generateTestParam()), testParamsToString); TEST_P(ActiveQuicListenerTest, FailSocketOptionUponCreation) { auto option = std::make_unique(); @@ -221,30 +292,34 @@ TEST_P(ActiveQuicListenerTest, FailSocketOptionUponCreation) { .WillOnce(Return(false)); auto options = std::make_shared>(); options->emplace_back(std::move(option)); - EXPECT_THROW_WITH_REGEX(std::make_unique(*dispatcher_, connection_handler_, - listen_socket_, listener_config_, - quic_config_, options), - EnvoyException, "Failed to apply socket options."); + EXPECT_THROW_WITH_REGEX( + std::make_unique( + *dispatcher_, connection_handler_, listen_socket_, listener_config_, quic_config_, + options, + ActiveQuicListenerFactoryPeer::runtimeEnabled( + static_cast(listener_factory_.get()))), + Network::CreateListenerException, "Failed to apply socket options."); } -TEST_P(ActiveQuicListenerTest, ReceiveFullQuicCHLO) { - configureMocks(/* connection_count = */ 1); - SendFullCHLO(quic::test::TestConnectionId(1)); +TEST_P(ActiveQuicListenerTest, ReceiveCHLO) { + quic::QuicBufferedPacketStore* const buffered_packets = + quic::test::QuicDispatcherPeer::GetBufferedPackets(quic_dispatcher_); + maybeConfigureMocks(/* connection_count = */ 1); + sendCHLO(quic::test::TestConnectionId(1)); dispatcher_->run(Event::Dispatcher::RunType::NonBlock); - ReadFromClientSockets(); + EXPECT_FALSE(buffered_packets->HasChlosBuffered()); + EXPECT_FALSE(quic_dispatcher_->session_map().empty()); + readFromClientSockets(); } TEST_P(ActiveQuicListenerTest, ProcessBufferedChlos) { - EnvoyQuicDispatcher* const envoy_quic_dispatcher = - ActiveQuicListenerPeer::quic_dispatcher(*quic_listener_); quic::QuicBufferedPacketStore* const buffered_packets = - quic::test::QuicDispatcherPeer::GetBufferedPackets(envoy_quic_dispatcher); - - configureMocks(ActiveQuicListener::kNumSessionsToCreatePerLoop + 2); + quic::test::QuicDispatcherPeer::GetBufferedPackets(quic_dispatcher_); + maybeConfigureMocks(ActiveQuicListener::kNumSessionsToCreatePerLoop + 2); // Generate one more CHLO than can be processed immediately. for (size_t i = 1; i <= ActiveQuicListener::kNumSessionsToCreatePerLoop + 1; ++i) { - SendFullCHLO(quic::test::TestConnectionId(i)); + sendCHLO(quic::test::TestConnectionId(i)); } dispatcher_->run(Event::Dispatcher::RunType::NonBlock); @@ -256,9 +331,10 @@ TEST_P(ActiveQuicListenerTest, ProcessBufferedChlos) { EXPECT_TRUE(buffered_packets->HasBufferedPackets( quic::test::TestConnectionId(ActiveQuicListener::kNumSessionsToCreatePerLoop + 1))); EXPECT_TRUE(buffered_packets->HasChlosBuffered()); + EXPECT_FALSE(quic_dispatcher_->session_map().empty()); // Generate more data to trigger a socket read during the next event loop. - SendFullCHLO(quic::test::TestConnectionId(ActiveQuicListener::kNumSessionsToCreatePerLoop + 2)); + sendCHLO(quic::test::TestConnectionId(ActiveQuicListener::kNumSessionsToCreatePerLoop + 2)); dispatcher_->run(Event::Dispatcher::RunType::NonBlock); // The socket read results in processing all CHLOs. @@ -267,7 +343,49 @@ TEST_P(ActiveQuicListenerTest, ProcessBufferedChlos) { } EXPECT_FALSE(buffered_packets->HasChlosBuffered()); - ReadFromClientSockets(); + readFromClientSockets(); +} + +TEST_P(ActiveQuicListenerTest, QuicProcessingDisabledAndEnabled) { + EXPECT_TRUE(ActiveQuicListenerPeer::enabled(*quic_listener_)); + Runtime::LoaderSingleton::getExisting()->mergeValues({{"quic.enabled", " false"}}); + sendCHLO(quic::test::TestConnectionId(1)); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + // If listener was enabled, there should have been session created for active connection. + EXPECT_TRUE(quic_dispatcher_->session_map().empty()); + EXPECT_FALSE(ActiveQuicListenerPeer::enabled(*quic_listener_)); + Runtime::LoaderSingleton::getExisting()->mergeValues({{"quic.enabled", " true"}}); + maybeConfigureMocks(/* connection_count = */ 1); + sendCHLO(quic::test::TestConnectionId(1)); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + EXPECT_FALSE(quic_dispatcher_->session_map().empty()); + EXPECT_TRUE(ActiveQuicListenerPeer::enabled(*quic_listener_)); +} + +class ActiveQuicListenerEmptyFlagConfigTest : public ActiveQuicListenerTest { +protected: + std::string yamlForQuicConfig() override { + return R"EOF( + max_concurrent_streams: 10 + )EOF"; + } +}; + +INSTANTIATE_TEST_SUITE_P(ActiveQuicListenerEmptyFlagConfigTests, + ActiveQuicListenerEmptyFlagConfigTest, + testing::ValuesIn(generateTestParam()), testParamsToString); + +// Quic listener should be enabled by default, if not enabled explicitly in config. +TEST_P(ActiveQuicListenerEmptyFlagConfigTest, ReceiveFullQuicCHLO) { + quic::QuicBufferedPacketStore* const buffered_packets = + quic::test::QuicDispatcherPeer::GetBufferedPackets(quic_dispatcher_); + maybeConfigureMocks(/* connection_count = */ 1); + sendCHLO(quic::test::TestConnectionId(1)); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + EXPECT_FALSE(buffered_packets->HasChlosBuffered()); + EXPECT_FALSE(quic_dispatcher_->session_map().empty()); + EXPECT_TRUE(ActiveQuicListenerPeer::enabled(*quic_listener_)); + readFromClientSockets(); } } // namespace Quic diff --git a/test/extensions/quic_listeners/quiche/crypto_test_utils_for_envoy.cc b/test/extensions/quic_listeners/quiche/crypto_test_utils_for_envoy.cc index 22df487392d4b..cafdce0c6227d 100644 --- a/test/extensions/quic_listeners/quiche/crypto_test_utils_for_envoy.cc +++ b/test/extensions/quic_listeners/quiche/crypto_test_utils_for_envoy.cc @@ -19,20 +19,23 @@ #endif #include -#include "extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h" -#include "extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h" +#include "test/extensions/quic_listeners/quiche/test_proof_verifier.h" +#include "test/extensions/quic_listeners/quiche/test_proof_source.h" namespace quic { namespace test { namespace crypto_test_utils { +// NOLINTNEXTLINE(readability-identifier-naming) std::unique_ptr ProofSourceForTesting() { - return std::make_unique(); + return std::make_unique(); } +// NOLINTNEXTLINE(readability-identifier-naming) std::unique_ptr ProofVerifierForTesting() { - return std::make_unique(); + return std::make_unique(); } +// NOLINTNEXTLINE(readability-identifier-naming) std::unique_ptr ProofVerifyContextForTesting() { // No context needed for fake verifier. return nullptr; diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_alarm_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_alarm_test.cc index 0b020afb7888a..0e0ab28bb48d7 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_alarm_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_alarm_test.cc @@ -154,10 +154,11 @@ TEST_F(EnvoyQuicAlarmTest, SetAlarmToPastTime) { EXPECT_EQ(100, (clock_.Now() - quic::QuicTime::Zero()).ToMilliseconds()); auto unowned_delegate = new TestDelegate(); quic::QuicArenaScopedPtr alarm(alarm_factory_.CreateAlarm(unowned_delegate)); - // alarm becomes active upon Set(). + // Alarm will be active 1ms after Update() for the purpose of avoiding firing + // in the same event loop. alarm->Set(clock_.Now() - QuicTime::Delta::FromMilliseconds(10)); EXPECT_FALSE(unowned_delegate->fired()); - dispatcher_->run(Dispatcher::RunType::NonBlock); + advanceMsAndLoop(1); EXPECT_TRUE(unowned_delegate->fired()); } @@ -168,9 +169,10 @@ TEST_F(EnvoyQuicAlarmTest, UpdateAlarmWithPastDeadline) { advanceMsAndLoop(9); EXPECT_EQ(9, (clock_.Now() - quic::QuicTime::Zero()).ToMilliseconds()); EXPECT_FALSE(unowned_delegate->fired()); - // alarm becomes active upon Update(). + // Alarm will be active 1ms after Update() for the purpose of avoiding firing + // in the same event loop. alarm->Update(clock_.Now() - QuicTime::Delta::FromMilliseconds(1), quic::QuicTime::Delta::Zero()); - dispatcher_->run(Dispatcher::RunType::NonBlock); + advanceMsAndLoop(1); EXPECT_TRUE(unowned_delegate->fired()); unowned_delegate->set_fired(false); advanceMsAndLoop(1); diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc index 39cf43207efbb..488fe023354e1 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc @@ -16,6 +16,7 @@ #include "extensions/quic_listeners/quiche/envoy_quic_connection_helper.h" #include "extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h" #include "extensions/quic_listeners/quiche/envoy_quic_utils.h" +#include "test/extensions/quic_listeners/quiche/test_utils.h" #include "envoy/stats/stats_macros.h" #include "test/mocks/event/mocks.h" @@ -62,9 +63,9 @@ class TestQuicCryptoClientStream : public quic::QuicCryptoClientStream { TestQuicCryptoClientStream(const quic::QuicServerId& server_id, quic::QuicSession* session, std::unique_ptr verify_context, quic::QuicCryptoClientConfig* crypto_config, - ProofHandler* proof_handler) + ProofHandler* proof_handler, bool has_application_state) : quic::QuicCryptoClientStream(server_id, session, std::move(verify_context), crypto_config, - proof_handler) {} + proof_handler, has_application_state) {} bool encryption_established() const override { return true; } }; @@ -84,7 +85,7 @@ class TestEnvoyQuicClientSession : public EnvoyQuicClientSession { std::unique_ptr CreateQuicCryptoStream() override { return std::make_unique( server_id(), this, crypto_config()->proof_verifier()->CreateDefaultContext(), - crypto_config(), this); + crypto_config(), this, true); } }; @@ -94,7 +95,9 @@ class EnvoyQuicClientSessionTest : public testing::TestWithParam { : api_(Api::createApiForTest(time_system_)), dispatcher_(api_->allocateDispatcher("test_thread")), connection_helper_(*dispatcher_), alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { - SetQuicReloadableFlag(quic_enable_version_draft_27, GetParam()); + SetQuicReloadableFlag(quic_disable_version_draft_29, !GetParam()); + SetQuicReloadableFlag(quic_disable_version_draft_27, !GetParam()); + SetQuicReloadableFlag(quic_disable_version_draft_25, !GetParam()); return quic::ParsedVersionOfIndex(quic::CurrentSupportedVersions(), 0); }()), peer_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(), @@ -122,6 +125,8 @@ class EnvoyQuicClientSessionTest : public testing::TestWithParam { void SetUp() override { envoy_quic_session_.Initialize(); + setQuicConfigWithDefaultValues(envoy_quic_session_.config()); + envoy_quic_session_.OnConfigNegotiated(); envoy_quic_session_.addConnectionCallbacks(network_connection_callbacks_); envoy_quic_session_.setConnectionStats( {read_total_, read_current_, write_total_, write_current_, nullptr, nullptr}); @@ -188,7 +193,7 @@ TEST_P(EnvoyQuicClientSessionTest, NewStream) { // Response headers should be propagated to decoder. EXPECT_CALL(response_decoder, decodeHeaders_(_, /*end_stream=*/true)) .WillOnce(Invoke([](const Http::ResponseHeaderMapPtr& decoded_headers, bool) { - EXPECT_EQ("200", decoded_headers->Status()->value().getStringView()); + EXPECT_EQ("200", decoded_headers->getStatusValue()); })); stream.OnStreamHeaderList(/*fin=*/true, headers.uncompressed_header_bytes(), headers); } diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc index 5aa201c4a1f87..9784c7231ff28 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc @@ -25,7 +25,9 @@ class EnvoyQuicClientStreamTest : public testing::TestWithParam { : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher("test_thread")), connection_helper_(*dispatcher_), alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { - SetQuicReloadableFlag(quic_enable_version_draft_27, GetParam()); + SetQuicReloadableFlag(quic_disable_version_draft_29, !GetParam()); + SetQuicReloadableFlag(quic_disable_version_draft_27, !GetParam()); + SetQuicReloadableFlag(quic_disable_version_draft_25, !GetParam()); return quic::CurrentSupportedVersions()[0]; }()), peer_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(), @@ -38,12 +40,14 @@ class EnvoyQuicClientStreamTest : public testing::TestWithParam { createConnectionSocket(peer_addr_, self_addr_, nullptr))), quic_session_(quic_config_, {quic_version_}, quic_connection_, *dispatcher_, quic_config_.GetInitialStreamFlowControlWindowToSend() * 2), - stream_id_(quic_version_.transport_version == quic::QUIC_VERSION_IETF_DRAFT_27 ? 4u : 5u), + stream_id_(quic::VersionUsesHttp3(quic_version_.transport_version) ? 4u : 5u), quic_stream_(new EnvoyQuicClientStream(stream_id_, &quic_session_, quic::BIDIRECTIONAL)), - request_headers_{{":authority", host_}, {":method", "POST"}, {":path", "/"}} { + request_headers_{{":authority", host_}, {":method", "POST"}, {":path", "/"}}, + request_trailers_{{"trailer-key", "trailer-value"}} { quic_stream_->setResponseDecoder(stream_decoder_); quic_stream_->addCallbacks(stream_callbacks_); quic_session_.ActivateStream(std::unique_ptr(quic_stream_)); + EXPECT_CALL(quic_session_, ShouldYield(_)).WillRepeatedly(testing::Return(false)); EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _)) .WillRepeatedly(Invoke([](quic::QuicStreamId, size_t write_length, quic::QuicStreamOffset, quic::StreamSendingState state, bool, @@ -59,6 +63,8 @@ class EnvoyQuicClientStreamTest : public testing::TestWithParam { void SetUp() override { quic_session_.Initialize(); + setQuicConfigWithDefaultValues(quic_session_.config()); + quic_session_.OnConfigNegotiated(); quic_connection_->setUpConnectionSocket(); response_headers_.OnHeaderBlockStart(); response_headers_.OnHeader(":status", "200"); @@ -67,7 +73,7 @@ class EnvoyQuicClientStreamTest : public testing::TestWithParam { trailers_.OnHeaderBlockStart(); trailers_.OnHeader("key1", "value1"); - if (quic_version_.transport_version != quic::QUIC_VERSION_IETF_DRAFT_27) { + if (!quic::VersionUsesHttp3(quic_version_.transport_version)) { // ":final-offset" is required and stripped off by quic. trailers_.OnHeader(":final-offset", absl::StrCat("", response_body_.length())); } @@ -100,6 +106,7 @@ class EnvoyQuicClientStreamTest : public testing::TestWithParam { Http::MockStreamCallbacks stream_callbacks_; std::string host_{"www.abc.com"}; Http::TestRequestHeaderMapImpl request_headers_; + Http::TestRequestTrailerMapImpl request_trailers_; quic::QuicHeaderList response_headers_; quic::QuicHeaderList trailers_; Buffer::OwnedImpl request_body_{"Hello world"}; @@ -112,11 +119,12 @@ INSTANTIATE_TEST_SUITE_P(EnvoyQuicClientStreamTests, EnvoyQuicClientStreamTest, TEST_P(EnvoyQuicClientStreamTest, PostRequestAndResponse) { EXPECT_EQ(absl::nullopt, quic_stream_->http1StreamEncoderOptions()); quic_stream_->encodeHeaders(request_headers_, false); - quic_stream_->encodeData(request_body_, true); + quic_stream_->encodeData(request_body_, false); + quic_stream_->encodeTrailers(request_trailers_); EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/false)) .WillOnce(Invoke([](const Http::ResponseHeaderMapPtr& headers, bool) { - EXPECT_EQ("200", headers->Status()->value().getStringView()); + EXPECT_EQ("200", headers->getStatusValue()); })); quic_stream_->OnStreamHeaderList(/*fin=*/false, response_headers_.uncompressed_header_bytes(), response_headers_); @@ -135,7 +143,7 @@ TEST_P(EnvoyQuicClientStreamTest, PostRequestAndResponse) { EXPECT_EQ(0, buffer.length()); })); std::string data = response_body_; - if (quic_version_.transport_version == quic::QUIC_VERSION_IETF_DRAFT_27) { + if (quic::VersionUsesHttp3(quic_version_.transport_version)) { std::unique_ptr data_buffer; quic::QuicByteCount data_frame_header_length = quic::HttpEncoder::SerializeDataFrameHeader(response_body_.length(), &data_buffer); @@ -163,7 +171,7 @@ TEST_P(EnvoyQuicClientStreamTest, OutOfOrderTrailers) { quic_stream_->encodeHeaders(request_headers_, true); EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/false)) .WillOnce(Invoke([](const Http::ResponseHeaderMapPtr& headers, bool) { - EXPECT_EQ("200", headers->Status()->value().getStringView()); + EXPECT_EQ("200", headers->getStatusValue()); })); quic_stream_->OnStreamHeaderList(/*fin=*/false, response_headers_.uncompressed_header_bytes(), response_headers_); @@ -173,7 +181,7 @@ TEST_P(EnvoyQuicClientStreamTest, OutOfOrderTrailers) { quic_stream_->OnStreamHeaderList(/*fin=*/true, trailers_.uncompressed_header_bytes(), trailers_); std::string data = response_body_; - if (quic_version_.transport_version == quic::QUIC_VERSION_IETF_DRAFT_27) { + if (quic::VersionUsesHttp3(quic_version_.transport_version)) { std::unique_ptr data_buffer; quic::QuicByteCount data_frame_header_length = quic::HttpEncoder::SerializeDataFrameHeader(response_body_.length(), &data_buffer); @@ -258,5 +266,81 @@ TEST_P(EnvoyQuicClientStreamTest, WatermarkSendBuffer) { EXPECT_CALL(stream_callbacks_, onResetStream(_, _)); } +// Tests that headers and trailers buffered in send buffer contribute towards buffer watermark +// limits. Only IETF QUIC writes them on data stream, gQUIC writes them on dedicated headers stream +// and only contributes to connection watermark buffer. +TEST_P(EnvoyQuicClientStreamTest, HeadersContributeToWatermarkIquic) { + if (!quic::VersionUsesHttp3(quic_version_.transport_version)) { + EXPECT_CALL(stream_callbacks_, onResetStream(_, _)); + return; + } + + // Bump connection flow control window large enough not to cause connection level flow control + // blocked + quic::QuicWindowUpdateFrame window_update( + quic::kInvalidControlFrameId, + quic::QuicUtils::GetInvalidStreamId(quic_version_.transport_version), 1024 * 1024); + quic_session_.OnWindowUpdateFrame(window_update); + + // Make the stream blocked by congestion control. + EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _)) + .WillOnce(Invoke([](quic::QuicStreamId, size_t /*write_length*/, quic::QuicStreamOffset, + quic::StreamSendingState state, bool, + quiche::QuicheOptional) { + return quic::QuicConsumedData{0u, state != quic::NO_FIN}; + })); + quic_stream_->encodeHeaders(request_headers_, /*end_stream=*/false); + + // Encode 16kB -10 bytes request body. Because the high watermark is 16KB, with previously + // buffered headers, this call should make the send buffers reach their high watermark. + std::string request(16 * 1024 - 10, 'a'); + Buffer::OwnedImpl buffer(request); + EXPECT_CALL(stream_callbacks_, onAboveWriteBufferHighWatermark()); + quic_stream_->encodeData(buffer, false); + EXPECT_EQ(0u, buffer.length()); + + // Unblock writing now, and this will write out 16kB data and cause stream to + // be blocked by the flow control limit. + EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _)) + .WillOnce(Invoke([](quic::QuicStreamId, size_t write_length, quic::QuicStreamOffset, + quic::StreamSendingState state, bool, + quiche::QuicheOptional) { + return quic::QuicConsumedData{write_length, state != quic::NO_FIN}; + })); + EXPECT_CALL(stream_callbacks_, onBelowWriteBufferLowWatermark()); + quic_session_.OnCanWrite(); + EXPECT_TRUE(quic_stream_->flow_controller()->IsBlocked()); + + // Update flow control window to write all the buffered data. + quic::QuicWindowUpdateFrame window_update1(quic::kInvalidControlFrameId, quic_stream_->id(), + 32 * 1024); + quic_stream_->OnWindowUpdateFrame(window_update1); + EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _)) + .WillOnce(Invoke([](quic::QuicStreamId, size_t write_length, quic::QuicStreamOffset, + quic::StreamSendingState state, bool, + quiche::QuicheOptional) { + return quic::QuicConsumedData{write_length, state != quic::NO_FIN}; + })); + quic_session_.OnCanWrite(); + // No data should be buffered at this point. + + EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _)) + .WillOnce(Invoke([](quic::QuicStreamId, size_t, quic::QuicStreamOffset, + quic::StreamSendingState state, bool, + quiche::QuicheOptional) { + return quic::QuicConsumedData{0u, state != quic::NO_FIN}; + })); + // Send more data. If watermark bytes counting were not cleared in previous + // OnCanWrite, this write would have caused the stream to exceed its high watermark. + std::string request1(16 * 1024 - 3, 'a'); + Buffer::OwnedImpl buffer1(request1); + quic_stream_->encodeData(buffer1, false); + // Buffering more trailers will cause stream to reach high watermark, but + // because trailers closes the stream, no callback should be triggered. + quic_stream_->encodeTrailers(request_trailers_); + + EXPECT_CALL(stream_callbacks_, onResetStream(_, _)); +} + } // namespace Quic } // namespace Envoy diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc index 6162165935dbd..c5b6e6c2e7af0 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc @@ -9,6 +9,7 @@ #include "quiche/quic/core/quic_dispatcher.h" #include "quiche/quic/test_tools/quic_dispatcher_peer.h" #include "quiche/quic/test_tools/crypto_test_utils.h" + #include "quiche/quic/test_tools/quic_test_utils.h" #include "quiche/common/platform/api/quiche_text_utils.h" #pragma GCC diagnostic pop @@ -25,7 +26,9 @@ #include "extensions/quic_listeners/quiche/platform/envoy_quic_clock.h" #include "extensions/quic_listeners/quiche/envoy_quic_utils.h" #include "extensions/quic_listeners/quiche/envoy_quic_dispatcher.h" -#include "extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h" +#include "extensions/quic_listeners/quiche/envoy_quic_server_session.h" +#include "test/extensions/quic_listeners/quiche/test_proof_source.h" +#include "test/extensions/quic_listeners/quiche/test_utils.h" #include "extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h" #include "extensions/quic_listeners/quiche/envoy_quic_utils.h" #include "extensions/transport_sockets/well_known_names.h" @@ -44,20 +47,30 @@ namespace { const size_t kNumSessionsToCreatePerLoopForTests = 16; } -class EnvoyQuicDispatcherTest : public testing::TestWithParam, +class EnvoyQuicDispatcherTest : public QuicMultiVersionTest, protected Logger::Loggable { public: EnvoyQuicDispatcherTest() - : version_(GetParam()), api_(Api::createApiForTest(time_system_)), + : version_(GetParam().first), api_(Api::createApiForTest(time_system_)), dispatcher_(api_->allocateDispatcher("test_thread")), listen_socket_(std::make_unique>>( + Network::NetworkSocketTrait>>( Network::Test::getCanonicalLoopbackAddress(version_), nullptr, /*bind*/ true)), - connection_helper_(*dispatcher_), + connection_helper_(*dispatcher_), proof_source_(new TestProofSource()), crypto_config_(quic::QuicCryptoServerConfig::TESTING, quic::QuicRandom::GetInstance(), - std::make_unique(), + std::unique_ptr(proof_source_), quic::KeyExchangeSource::Default()), - version_manager_(quic::CurrentSupportedVersions()), + version_manager_([]() { + if (GetParam().second == QuicVersionType::GquicQuicCrypto) { + return quic::CurrentSupportedVersionsWithQuicCrypto(); + } + bool use_http3 = GetParam().second == QuicVersionType::Iquic; + SetQuicReloadableFlag(quic_disable_version_draft_29, !use_http3); + SetQuicReloadableFlag(quic_disable_version_draft_27, !use_http3); + SetQuicReloadableFlag(quic_disable_version_draft_25, !use_http3); + return quic::CurrentSupportedVersions(); + }()), + quic_version_(version_manager_.GetSupportedVersions()[0]), listener_stats_({ALL_LISTENER_STATS(POOL_COUNTER(listener_config_.listenerScope()), POOL_GAUGE(listener_config_.listenerScope()), POOL_HISTOGRAM(listener_config_.listenerScope()))}), @@ -95,40 +108,108 @@ class EnvoyQuicDispatcherTest : public testing::TestWithParamrun(Event::Dispatcher::RunType::NonBlock); } - // TODO(bencebeky): Factor out parts common with - // ActiveQuicListenerTest::SendFullCHLO() to test_utils. - std::unique_ptr - createFullChloPacket(quic::QuicSocketAddress client_address) { + void processValidChloPacketAndCheckStatus(bool should_buffer) { + quic::QuicSocketAddress peer_addr(version_ == Network::Address::IpVersion::v4 + ? quic::QuicIpAddress::Loopback4() + : quic::QuicIpAddress::Loopback6(), + 54321); + quic::QuicBufferedPacketStore* buffered_packets = + quic::test::QuicDispatcherPeer::GetBufferedPackets(&envoy_quic_dispatcher_); + if (!should_buffer) { + // Set QuicDispatcher::new_sessions_allowed_per_event_loop_ to + // |kNumSessionsToCreatePerLoopForTests| so that received CHLOs can be + // processed immediately. + envoy_quic_dispatcher_.ProcessBufferedChlos(kNumSessionsToCreatePerLoopForTests); + EXPECT_FALSE(buffered_packets->HasChlosBuffered()); + EXPECT_FALSE(buffered_packets->HasBufferedPackets(connection_id_)); + } + + // Create a Quic Crypto or TLS1.3 CHLO packet. EnvoyQuicClock clock(*dispatcher_); - quic::CryptoHandshakeMessage chlo = quic::test::crypto_test_utils::GenerateDefaultInchoateCHLO( - &clock, quic::AllSupportedVersions()[0].transport_version, &crypto_config_); - chlo.SetVector(quic::kCOPT, quic::QuicTagVector{quic::kREJ}); - chlo.SetStringPiece(quic::kSNI, "www.abc.com"); - quic::CryptoHandshakeMessage full_chlo; - quic::QuicReferenceCountedPointer signed_config( - new quic::QuicSignedServerConfig); - quic::QuicCompressedCertsCache cache( - quic::QuicCompressedCertsCache::kQuicCompressedCertsCacheSize); - quic::test::crypto_test_utils::GenerateFullCHLO( - chlo, &crypto_config_, - envoyAddressInstanceToQuicSocketAddress(listen_socket_->localAddress()), client_address, - quic::AllSupportedVersions()[0].transport_version, &clock, signed_config, &cache, - &full_chlo); - // Overwrite version label to highest current supported version. - full_chlo.SetVersion(quic::kVER, quic::CurrentSupportedVersions()[0]); - quic::QuicConfig quic_config; - quic_config.ToHandshakeMessage(&full_chlo, - quic::CurrentSupportedVersions()[0].transport_version); + Buffer::OwnedImpl payload = generateChloPacketToSend( + quic_version_, quic_config_, crypto_config_, connection_id_, clock, + envoyIpAddressToQuicSocketAddress(listen_socket_->localAddress()->ip()), peer_addr, + "test.example.org"); + Buffer::RawSliceVector slice = payload.getRawSlices(); + ASSERT(slice.size() == 1); + auto encrypted_packet = std::make_unique( + static_cast(slice[0].mem_), slice[0].len_); + std::unique_ptr received_packet = + std::unique_ptr( + quic::test::ConstructReceivedPacket(*encrypted_packet, clock.Now())); + + envoy_quic_dispatcher_.ProcessPacket( + envoyIpAddressToQuicSocketAddress(listen_socket_->localAddress()->ip()), peer_addr, + *received_packet); + + if (should_buffer) { + // Incoming CHLO packet is buffered, because ProcessPacket() is called before + // ProcessBufferedChlos(). + EXPECT_TRUE(buffered_packets->HasChlosBuffered()); + EXPECT_TRUE(buffered_packets->HasBufferedPackets(connection_id_)); + + // Process the buffered CHLO now. + envoy_quic_dispatcher_.ProcessBufferedChlos(kNumSessionsToCreatePerLoopForTests); + } + + EXPECT_FALSE(buffered_packets->HasChlosBuffered()); + EXPECT_FALSE(buffered_packets->HasBufferedPackets(connection_id_)); + + // A new QUIC connection is created and its filter installed based on self and peer address. + EXPECT_EQ(1u, envoy_quic_dispatcher_.session_map().size()); + quic::QuicSession* session = + envoy_quic_dispatcher_.session_map().find(connection_id_)->second.get(); + ASSERT(session != nullptr); + EXPECT_TRUE(session->IsEncryptionEstablished()); + EXPECT_EQ(1u, connection_handler_.numConnections()); + auto envoy_connection = static_cast(session); + EXPECT_EQ("test.example.org", envoy_connection->requestedServerName()); + EXPECT_EQ(peer_addr, + envoyIpAddressToQuicSocketAddress(envoy_connection->remoteAddress()->ip())); + ASSERT(envoy_connection->localAddress() != nullptr); + EXPECT_EQ(*listen_socket_->localAddress(), *envoy_connection->localAddress()); + } - std::string packet_content(full_chlo.GetSerialized().AsStringPiece()); - std::unique_ptr encrypted_packet( - quic::test::ConstructEncryptedPacket(connection_id_, quic::EmptyQuicConnectionId(), - /*version_flag=*/true, /*reset_flag*/ false, - /*packet_number=*/1, packet_content)); - return std::unique_ptr( - quic::test::ConstructReceivedPacket(*encrypted_packet, clock.Now())); + void processValidChloPacketAndInitializeFilters(bool should_buffer) { + Network::MockFilterChainManager filter_chain_manager; + std::shared_ptr read_filter(new Network::MockReadFilter()); + Network::MockConnectionCallbacks network_connection_callbacks; + testing::StrictMock read_total; + testing::StrictMock read_current; + testing::StrictMock write_total; + testing::StrictMock write_current; + + std::vector filter_factory( + {[&](Network::FilterManager& filter_manager) { + filter_manager.addReadFilter(read_filter); + read_filter->callbacks_->connection().addConnectionCallbacks( + network_connection_callbacks); + read_filter->callbacks_->connection().setConnectionStats( + {read_total, read_current, write_total, write_current, nullptr, nullptr}); + }}); + EXPECT_CALL(proof_source_->filterChain(), networkFilterFactories()) + .WillOnce(ReturnRef(filter_factory)); + EXPECT_CALL(listener_config_, filterChainFactory()); + EXPECT_CALL(listener_config_.filter_chain_factory_, createNetworkFilterChain(_, _)) + .WillOnce(Invoke([](Network::Connection& connection, + const std::vector& filter_factories) { + EXPECT_EQ(1u, filter_factories.size()); + Server::Configuration::FilterChainUtility::buildFilterChain(connection, filter_factories); + return true; + })); + EXPECT_CALL(*read_filter, onNewConnection()) + // Stop iteration to avoid calling getRead/WriteBuffer(). + .WillOnce(Return(Network::FilterStatus::StopIteration)); + EXPECT_CALL(network_connection_callbacks, onEvent(Network::ConnectionEvent::Connected)); + + processValidChloPacketAndCheckStatus(should_buffer); + EXPECT_CALL(network_connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose)); + // Shutdown() to close the connection. + envoy_quic_dispatcher_.Shutdown(); } + bool quicVersionUsesTls() { return quic_version_.UsesTls(); } + protected: Network::Address::IpVersion version_; Event::SimulatedTimeSystemHelper time_system_; @@ -136,10 +217,11 @@ class EnvoyQuicDispatcherTest : public testing::TestWithParam listener_config_; Server::ListenerStats listener_stats_; Server::PerHandlerListenerStats per_worker_stats_; @@ -148,217 +230,29 @@ class EnvoyQuicDispatcherTest : public testing::TestWithParamlocalAddress(), *socket.localAddress()); - EXPECT_EQ(Extensions::TransportSockets::TransportProtocolNames::get().Quic, - socket.detectedTransportProtocol()); - EXPECT_EQ(peer_addr, envoyAddressInstanceToQuicSocketAddress(socket.remoteAddress())); - return &filter_chain; - })); - std::shared_ptr read_filter(new Network::MockReadFilter()); - Network::MockConnectionCallbacks network_connection_callbacks; - testing::StrictMock read_total; - testing::StrictMock read_current; - testing::StrictMock write_total; - testing::StrictMock write_current; - - std::vector filter_factory( - {[&](Network::FilterManager& filter_manager) { - filter_manager.addReadFilter(read_filter); - read_filter->callbacks_->connection().addConnectionCallbacks(network_connection_callbacks); - read_filter->callbacks_->connection().setConnectionStats( - {read_total, read_current, write_total, write_current, nullptr, nullptr}); - }}); - EXPECT_CALL(filter_chain, networkFilterFactories()).WillOnce(ReturnRef(filter_factory)); - EXPECT_CALL(listener_config_, filterChainFactory()); - EXPECT_CALL(listener_config_.filter_chain_factory_, createNetworkFilterChain(_, _)) - .WillOnce(Invoke([](Network::Connection& connection, - const std::vector& filter_factories) { - EXPECT_EQ(1u, filter_factories.size()); - Server::Configuration::FilterChainUtility::buildFilterChain(connection, filter_factories); - return true; - })); - EXPECT_CALL(*read_filter, onNewConnection()) - // Stop iteration to avoid calling getRead/WriteBuffer(). - .WillOnce(Return(Network::FilterStatus::StopIteration)); - EXPECT_CALL(network_connection_callbacks, onEvent(Network::ConnectionEvent::Connected)); - - quic::QuicBufferedPacketStore* buffered_packets = - quic::test::QuicDispatcherPeer::GetBufferedPackets(&envoy_quic_dispatcher_); - EXPECT_FALSE(buffered_packets->HasChlosBuffered()); - EXPECT_FALSE(buffered_packets->HasBufferedPackets(connection_id_)); - - // Set QuicDispatcher::new_sessions_allowed_per_event_loop_ to - // |kNumSessionsToCreatePerLoopForTests| so that received CHLOs can be - // processed immediately. - envoy_quic_dispatcher_.ProcessBufferedChlos(kNumSessionsToCreatePerLoopForTests); - - std::unique_ptr received_packet = createFullChloPacket(peer_addr); - envoy_quic_dispatcher_.ProcessPacket( - envoyAddressInstanceToQuicSocketAddress(listen_socket_->localAddress()), peer_addr, - *received_packet); - - EXPECT_FALSE(buffered_packets->HasChlosBuffered()); - EXPECT_FALSE(buffered_packets->HasBufferedPackets(connection_id_)); - - // A new QUIC connection is created and its filter installed based on self and peer address. - EXPECT_EQ(1u, envoy_quic_dispatcher_.session_map().size()); - EXPECT_TRUE( - envoy_quic_dispatcher_.session_map().find(connection_id_)->second->IsEncryptionEstablished()); - EXPECT_EQ(1u, connection_handler_.numConnections()); - EXPECT_EQ("www.abc.com", read_filter->callbacks_->connection().requestedServerName()); - EXPECT_EQ(peer_addr, envoyAddressInstanceToQuicSocketAddress( - read_filter->callbacks_->connection().remoteAddress())); - EXPECT_EQ(*listen_socket_->localAddress(), *read_filter->callbacks_->connection().localAddress()); - EXPECT_CALL(network_connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose)); - // Shutdown() to close the connection. - envoy_quic_dispatcher_.Shutdown(); + if (quicVersionUsesTls()) { + // QUICHE doesn't support 0-RTT TLS1.3 handshake yet. + processValidChloPacketAndCheckStatus(false); + // Shutdown() to close the connection. + envoy_quic_dispatcher_.Shutdown(); + return; + } + processValidChloPacketAndInitializeFilters(false); } TEST_P(EnvoyQuicDispatcherTest, CreateNewConnectionUponBufferedCHLO) { - quic::QuicSocketAddress peer_addr(version_ == Network::Address::IpVersion::v4 - ? quic::QuicIpAddress::Loopback4() - : quic::QuicIpAddress::Loopback6(), - 54321); - Network::MockFilterChain filter_chain; - Network::MockFilterChainManager filter_chain_manager; - EXPECT_CALL(listener_config_, filterChainManager()).WillOnce(ReturnRef(filter_chain_manager)); - EXPECT_CALL(filter_chain_manager, findFilterChain(_)) - .WillOnce(Invoke([&](const Network::ConnectionSocket& socket) { - EXPECT_EQ(*listen_socket_->localAddress(), *socket.localAddress()); - EXPECT_EQ(Extensions::TransportSockets::TransportProtocolNames::get().Quic, - socket.detectedTransportProtocol()); - EXPECT_EQ(peer_addr, envoyAddressInstanceToQuicSocketAddress(socket.remoteAddress())); - return &filter_chain; - })); - std::shared_ptr read_filter(new Network::MockReadFilter()); - Network::MockConnectionCallbacks network_connection_callbacks; - testing::StrictMock read_total; - testing::StrictMock read_current; - testing::StrictMock write_total; - testing::StrictMock write_current; - - std::vector filter_factory( - {[&](Network::FilterManager& filter_manager) { - filter_manager.addReadFilter(read_filter); - read_filter->callbacks_->connection().addConnectionCallbacks(network_connection_callbacks); - read_filter->callbacks_->connection().setConnectionStats( - {read_total, read_current, write_total, write_current, nullptr, nullptr}); - }}); - EXPECT_CALL(filter_chain, networkFilterFactories()).WillOnce(ReturnRef(filter_factory)); - EXPECT_CALL(listener_config_, filterChainFactory()); - EXPECT_CALL(listener_config_.filter_chain_factory_, createNetworkFilterChain(_, _)) - .WillOnce(Invoke([](Network::Connection& connection, - const std::vector& filter_factories) { - EXPECT_EQ(1u, filter_factories.size()); - Server::Configuration::FilterChainUtility::buildFilterChain(connection, filter_factories); - return true; - })); - EXPECT_CALL(*read_filter, onNewConnection()) - // Stop iteration to avoid calling getRead/WriteBuffer(). - .WillOnce(Return(Network::FilterStatus::StopIteration)); - EXPECT_CALL(network_connection_callbacks, onEvent(Network::ConnectionEvent::Connected)); - - quic::QuicBufferedPacketStore* buffered_packets = - quic::test::QuicDispatcherPeer::GetBufferedPackets(&envoy_quic_dispatcher_); - EXPECT_FALSE(buffered_packets->HasChlosBuffered()); - EXPECT_FALSE(buffered_packets->HasBufferedPackets(connection_id_)); - - // Incoming CHLO packet is buffered, because ProcessPacket() is called before - // ProcessBufferedChlos(). - std::unique_ptr received_packet = createFullChloPacket(peer_addr); - envoy_quic_dispatcher_.ProcessPacket( - envoyAddressInstanceToQuicSocketAddress(listen_socket_->localAddress()), peer_addr, - *received_packet); - EXPECT_TRUE(buffered_packets->HasChlosBuffered()); - EXPECT_TRUE(buffered_packets->HasBufferedPackets(connection_id_)); - - // Process buffered CHLO. - envoy_quic_dispatcher_.ProcessBufferedChlos(kNumSessionsToCreatePerLoopForTests); - EXPECT_FALSE(buffered_packets->HasChlosBuffered()); - EXPECT_FALSE(buffered_packets->HasBufferedPackets(connection_id_)); - - // A new QUIC connection is created and its filter installed based on self and peer address. - EXPECT_EQ(1u, envoy_quic_dispatcher_.session_map().size()); - EXPECT_TRUE( - envoy_quic_dispatcher_.session_map().find(connection_id_)->second->IsEncryptionEstablished()); - EXPECT_EQ(1u, connection_handler_.numConnections()); - EXPECT_EQ("www.abc.com", read_filter->callbacks_->connection().requestedServerName()); - EXPECT_EQ(peer_addr, envoyAddressInstanceToQuicSocketAddress( - read_filter->callbacks_->connection().remoteAddress())); - EXPECT_EQ(*listen_socket_->localAddress(), *read_filter->callbacks_->connection().localAddress()); - EXPECT_CALL(network_connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose)); - // Shutdown() to close the connection. - envoy_quic_dispatcher_.Shutdown(); -} - -TEST_P(EnvoyQuicDispatcherTest, CloseConnectionDueToMissingFilterChain) { - quic::QuicSocketAddress peer_addr(version_ == Network::Address::IpVersion::v4 - ? quic::QuicIpAddress::Loopback4() - : quic::QuicIpAddress::Loopback6(), - 54321); - Network::MockFilterChainManager filter_chain_manager; - EXPECT_CALL(listener_config_, filterChainManager()).WillOnce(ReturnRef(filter_chain_manager)); - EXPECT_CALL(filter_chain_manager, findFilterChain(_)) - .WillOnce(Invoke([&](const Network::ConnectionSocket& socket) { - EXPECT_EQ(*listen_socket_->localAddress(), *socket.localAddress()); - EXPECT_EQ(peer_addr, envoyAddressInstanceToQuicSocketAddress(socket.remoteAddress())); - return nullptr; - })); - std::unique_ptr received_packet = createFullChloPacket(peer_addr); - envoy_quic_dispatcher_.ProcessBufferedChlos(kNumSessionsToCreatePerLoopForTests); - envoy_quic_dispatcher_.ProcessPacket( - envoyAddressInstanceToQuicSocketAddress(listen_socket_->localAddress()), peer_addr, - *received_packet); - EXPECT_EQ(0u, envoy_quic_dispatcher_.session_map().size()); - EXPECT_EQ(0u, connection_handler_.numConnections()); - EXPECT_TRUE(quic::test::QuicDispatcherPeer::GetTimeWaitListManager(&envoy_quic_dispatcher_) - ->IsConnectionIdInTimeWait(connection_id_)); - EXPECT_EQ(1u, listener_stats_.downstream_cx_total_.value()); - EXPECT_EQ(0u, listener_stats_.downstream_cx_active_.value()); - EXPECT_EQ(1u, listener_stats_.no_filter_chain_match_.value()); -} - -TEST_P(EnvoyQuicDispatcherTest, CloseConnectionDueToEmptyFilterChain) { - quic::QuicSocketAddress peer_addr(version_ == Network::Address::IpVersion::v4 - ? quic::QuicIpAddress::Loopback4() - : quic::QuicIpAddress::Loopback6(), - 54321); - Network::MockFilterChain filter_chain; - Network::MockFilterChainManager filter_chain_manager; - EXPECT_CALL(listener_config_, filterChainManager()).WillOnce(ReturnRef(filter_chain_manager)); - EXPECT_CALL(filter_chain_manager, findFilterChain(_)) - .WillOnce(Invoke([&](const Network::ConnectionSocket& socket) { - EXPECT_EQ(*listen_socket_->localAddress(), *socket.localAddress()); - EXPECT_EQ(peer_addr, envoyAddressInstanceToQuicSocketAddress(socket.remoteAddress())); - return &filter_chain; - })); - // Empty filter_factory should cause connection close. - std::vector filter_factory; - EXPECT_CALL(filter_chain, networkFilterFactories()).WillOnce(ReturnRef(filter_factory)); - - std::unique_ptr received_packet = createFullChloPacket(peer_addr); - envoy_quic_dispatcher_.ProcessBufferedChlos(kNumSessionsToCreatePerLoopForTests); - envoy_quic_dispatcher_.ProcessPacket( - envoyAddressInstanceToQuicSocketAddress(listen_socket_->localAddress()), peer_addr, - *received_packet); - EXPECT_EQ(0u, envoy_quic_dispatcher_.session_map().size()); - EXPECT_EQ(0u, connection_handler_.numConnections()); - EXPECT_TRUE(quic::test::QuicDispatcherPeer::GetTimeWaitListManager(&envoy_quic_dispatcher_) - ->IsConnectionIdInTimeWait(connection_id_)); + if (quicVersionUsesTls()) { + // QUICHE doesn't support 0-RTT TLS1.3 handshake yet. + processValidChloPacketAndCheckStatus(true); + // Shutdown() to close the connection. + envoy_quic_dispatcher_.Shutdown(); + return; + } + processValidChloPacketAndInitializeFilters(true); } } // namespace Quic diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_proof_source_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_proof_source_test.cc index d25e190f5a7ab..cbf66f511f503 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_proof_source_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_proof_source_test.cc @@ -1,11 +1,22 @@ +#include #include #include -#include "extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h" -#include "extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h" +#include "extensions/quic_listeners/quiche/envoy_quic_proof_source.h" +#include "extensions/quic_listeners/quiche/envoy_quic_proof_verifier.h" +#include "extensions/quic_listeners/quiche/envoy_quic_utils.h" +#include "extensions/transport_sockets/tls/context_config_impl.h" + +#include "test/mocks/network/mocks.h" +#include "test/mocks/ssl/mocks.h" #include "gmock/gmock.h" #include "gtest/gtest.h" +#include "quiche/quic/test_tools/test_certificates.h" + +using testing::Invoke; +using testing::Return; +using testing::ReturnRef; namespace Envoy { @@ -13,62 +24,280 @@ namespace Quic { class TestGetProofCallback : public quic::ProofSource::Callback { public: - TestGetProofCallback(bool& called, std::string signature, std::string leaf_cert_scts, - std::vector certs) - : called_(called), expected_signature_(std::move(signature)), - expected_leaf_certs_scts_(std::move(leaf_cert_scts)), expected_certs_(std::move(certs)) {} + TestGetProofCallback(bool& called, bool should_succeed, const std::string& server_config, + quic::QuicTransportVersion& version, quiche::QuicheStringPiece chlo_hash, + Network::FilterChain& filter_chain) + : called_(called), should_succeed_(should_succeed), server_config_(server_config), + version_(version), chlo_hash_(chlo_hash), expected_filter_chain_(filter_chain) { + ON_CALL(client_context_config_, cipherSuites) + .WillByDefault(ReturnRef( + Extensions::TransportSockets::Tls::ClientContextConfigImpl::DEFAULT_CIPHER_SUITES)); + ON_CALL(client_context_config_, ecdhCurves) + .WillByDefault( + ReturnRef(Extensions::TransportSockets::Tls::ClientContextConfigImpl::DEFAULT_CURVES)); + const std::string alpn("h2,http/1.1"); + ON_CALL(client_context_config_, alpnProtocols()).WillByDefault(ReturnRef(alpn)); + const std::string empty_string; + ON_CALL(client_context_config_, serverNameIndication()).WillByDefault(ReturnRef(empty_string)); + ON_CALL(client_context_config_, signingAlgorithmsForTest()) + .WillByDefault(ReturnRef(empty_string)); + ON_CALL(client_context_config_, certificateValidationContext()) + .WillByDefault(Return(&cert_validation_ctx_config_)); + + // Getting the last cert in the chain as the root CA cert. + std::string cert_chain(quic::test::kTestCertificateChainPem); + const std::string& root_ca_cert = + cert_chain.substr(cert_chain.rfind("-----BEGIN CERTIFICATE-----")); + const std::string path_string("some_path"); + ON_CALL(cert_validation_ctx_config_, caCert()).WillByDefault(ReturnRef(root_ca_cert)); + ON_CALL(cert_validation_ctx_config_, caCertPath()).WillByDefault(ReturnRef(path_string)); + ON_CALL(cert_validation_ctx_config_, trustChainVerification) + .WillByDefault(Return(envoy::extensions::transport_sockets::tls::v3:: + CertificateValidationContext::VERIFY_TRUST_CHAIN)); + ON_CALL(cert_validation_ctx_config_, allowExpiredCertificate()).WillByDefault(Return(true)); + const std::string crl_list; + ON_CALL(cert_validation_ctx_config_, certificateRevocationList()) + .WillByDefault(ReturnRef(crl_list)); + ON_CALL(cert_validation_ctx_config_, certificateRevocationListPath()) + .WillByDefault(ReturnRef(path_string)); + const std::vector empty_string_list; + ON_CALL(cert_validation_ctx_config_, verifySubjectAltNameList()) + .WillByDefault(ReturnRef(empty_string_list)); + const std::vector san_matchers; + ON_CALL(cert_validation_ctx_config_, subjectAltNameMatchers()) + .WillByDefault(ReturnRef(san_matchers)); + ON_CALL(cert_validation_ctx_config_, verifyCertificateHashList()) + .WillByDefault(ReturnRef(empty_string_list)); + ON_CALL(cert_validation_ctx_config_, verifyCertificateSpkiList()) + .WillByDefault(ReturnRef(empty_string_list)); + verifier_ = + std::make_unique(store_, client_context_config_, time_system_); + } // quic::ProofSource::Callback void Run(bool ok, const quic::QuicReferenceCountedPointer& chain, const quic::QuicCryptoProof& proof, std::unique_ptr details) override { - EXPECT_TRUE(ok); - EXPECT_EQ(expected_signature_, proof.signature); - EXPECT_EQ(expected_leaf_certs_scts_, proof.leaf_cert_scts); - EXPECT_EQ(expected_certs_, chain->certs); - EXPECT_EQ(nullptr, details); called_ = true; + if (!should_succeed_) { + EXPECT_FALSE(ok); + return; + }; + EXPECT_TRUE(ok); + EXPECT_EQ(2, chain->certs.size()); + std::string error; + EXPECT_EQ(quic::QUIC_SUCCESS, + verifier_->VerifyProof("www.example.org", 54321, server_config_, version_, chlo_hash_, + chain->certs, proof.leaf_cert_scts, proof.signature, nullptr, + &error, nullptr, nullptr)) + << error; + EXPECT_EQ(&expected_filter_chain_, + &static_cast(details.get())->filterChain()); } private: bool& called_; - std::string expected_signature_; - std::string expected_leaf_certs_scts_; - std::vector expected_certs_; + bool should_succeed_; + const std::string& server_config_; + const quic::QuicTransportVersion& version_; + quiche::QuicheStringPiece chlo_hash_; + Network::FilterChain& expected_filter_chain_; + NiceMock store_; + Event::GlobalTimeSystem time_system_; + NiceMock client_context_config_; + NiceMock cert_validation_ctx_config_; + std::unique_ptr verifier_; }; -class EnvoyQuicFakeProofSourceTest : public ::testing::Test { +class TestSignatureCallback : public quic::ProofSource::SignatureCallback { +public: + TestSignatureCallback(bool expect_success) : expect_success_(expect_success) {} + ~TestSignatureCallback() override { EXPECT_TRUE(run_called_); } + + // quic::ProofSource::SignatureCallback + void Run(bool ok, std::string, std::unique_ptr) override { + EXPECT_EQ(expect_success_, ok); + run_called_ = true; + } + +private: + bool expect_success_; + bool run_called_{false}; +}; + +class EnvoyQuicProofSourceTest : public ::testing::Test { +public: + EnvoyQuicProofSourceTest() + : server_address_(quic::QuicIpAddress::Loopback4(), 12345), + client_address_(quic::QuicIpAddress::Loopback4(), 54321), + transport_socket_factory_(std::make_unique()), + listener_stats_({ALL_LISTENER_STATS(POOL_COUNTER(listener_config_.listenerScope()), + POOL_GAUGE(listener_config_.listenerScope()), + POOL_HISTOGRAM(listener_config_.listenerScope()))}), + proof_source_(listen_socket_, filter_chain_manager_, listener_stats_) {} + + void expectCertChainAndPrivateKey(const std::string& cert, bool expect_private_key) { + EXPECT_CALL(listen_socket_, ioHandle()).Times(expect_private_key ? 2u : 1u); + EXPECT_CALL(filter_chain_manager_, findFilterChain(_)) + .WillRepeatedly(Invoke([&](const Network::ConnectionSocket& connection_socket) { + EXPECT_EQ(*quicAddressToEnvoyAddressInstance(server_address_), + *connection_socket.localAddress()); + EXPECT_EQ(*quicAddressToEnvoyAddressInstance(client_address_), + *connection_socket.remoteAddress()); + EXPECT_EQ(Extensions::TransportSockets::TransportProtocolNames::get().Quic, + connection_socket.detectedTransportProtocol()); + EXPECT_EQ("h2", connection_socket.requestedApplicationProtocols()[0]); + return &filter_chain_; + })); + EXPECT_CALL(filter_chain_, transportSocketFactory()) + .WillRepeatedly(ReturnRef(transport_socket_factory_)); + + std::vector> tls_cert_configs{ + std::reference_wrapper(tls_cert_config_)}; + EXPECT_CALL(dynamic_cast( + transport_socket_factory_.serverContextConfig()), + tlsCertificates()) + .WillRepeatedly(Return(tls_cert_configs)); + EXPECT_CALL(tls_cert_config_, certificateChain()).WillOnce(ReturnRef(cert)); + if (expect_private_key) { + EXPECT_CALL(tls_cert_config_, privateKey()).WillOnce(ReturnRef(pkey_)); + } + } + + void testGetProof(bool expect_success) { + bool called = false; + auto callback = std::make_unique(called, expect_success, server_config_, + version_, chlo_hash_, filter_chain_); + proof_source_.GetProof(server_address_, client_address_, hostname_, server_config_, version_, + chlo_hash_, std::move(callback)); + EXPECT_TRUE(called); + } + protected: std::string hostname_{"www.fake.com"}; quic::QuicSocketAddress server_address_; + quic::QuicSocketAddress client_address_; quic::QuicTransportVersion version_{quic::QUIC_VERSION_UNSUPPORTED}; - quiche::QuicheStringPiece chlo_hash_{""}; + quiche::QuicheStringPiece chlo_hash_{"aaaaa"}; std::string server_config_{"Server Config"}; - std::vector expected_certs_{"Fake cert"}; - std::string expected_signature_{absl::StrCat("Fake signature for { ", server_config_, " }")}; - EnvoyQuicFakeProofSource proof_source_; - EnvoyQuicFakeProofVerifier proof_verifier_; + std::string expected_certs_{quic::test::kTestCertificateChainPem}; + std::string pkey_{quic::test::kTestCertificatePrivateKeyPem}; + Network::MockFilterChain filter_chain_; + Network::MockFilterChainManager filter_chain_manager_; + Network::MockListenSocket listen_socket_; + testing::NiceMock listener_config_; + QuicServerTransportSocketFactory transport_socket_factory_; + Ssl::MockTlsCertificateConfig tls_cert_config_; + Server::ListenerStats listener_stats_; + EnvoyQuicProofSource proof_source_; }; -TEST_F(EnvoyQuicFakeProofSourceTest, TestGetProof) { +TEST_F(EnvoyQuicProofSourceTest, TestGetProof) { + expectCertChainAndPrivateKey(expected_certs_, true); + testGetProof(true); +} + +TEST_F(EnvoyQuicProofSourceTest, GetProofFailNoFilterChain) { bool called = false; - auto callback = std::make_unique(called, expected_signature_, - "Fake timestamp", expected_certs_); - proof_source_.GetProof(server_address_, hostname_, server_config_, version_, chlo_hash_, - std::move(callback)); + auto callback = std::make_unique(called, false, server_config_, version_, + chlo_hash_, filter_chain_); + EXPECT_CALL(listen_socket_, ioHandle()); + EXPECT_CALL(filter_chain_manager_, findFilterChain(_)) + .WillRepeatedly(Invoke([&](const Network::ConnectionSocket&) { return nullptr; })); + proof_source_.GetProof(server_address_, client_address_, hostname_, server_config_, version_, + chlo_hash_, std::move(callback)); EXPECT_TRUE(called); } -TEST_F(EnvoyQuicFakeProofSourceTest, TestVerifyProof) { - EXPECT_EQ(quic::QUIC_SUCCESS, - proof_verifier_.VerifyProof(hostname_, /*port=*/0, server_config_, version_, chlo_hash_, - expected_certs_, "", expected_signature_, nullptr, nullptr, - nullptr, nullptr)); - std::vector wrong_certs{"wrong cert"}; - EXPECT_EQ(quic::QUIC_FAILURE, - proof_verifier_.VerifyProof(hostname_, /*port=*/0, server_config_, version_, chlo_hash_, - wrong_certs, "Fake timestamp", expected_signature_, nullptr, - nullptr, nullptr, nullptr)); +TEST_F(EnvoyQuicProofSourceTest, GetProofFailInvalidCert) { + std::string invalid_cert{R"(-----BEGIN CERTIFICATE----- + invalid certificate + -----END CERTIFICATE-----)"}; + expectCertChainAndPrivateKey(invalid_cert, false); + testGetProof(false); +} + +TEST_F(EnvoyQuicProofSourceTest, GetProofFailInvalidPublicKeyInCert) { + // This is a valid cert with RSA public key. But we don't support RSA key with + // length < 1024. + std::string cert_with_rsa_1024{R"(-----BEGIN CERTIFICATE----- +MIIC2jCCAkOgAwIBAgIUDBHEwlCvLGh3w0O8VwIW+CjYXY8wDQYJKoZIhvcNAQEL +BQAwfzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk1BMRIwEAYDVQQHDAlDYW1icmlk +Z2UxDzANBgNVBAoMBkdvb2dsZTEOMAwGA1UECwwFZW52b3kxDTALBgNVBAMMBHRl +c3QxHzAdBgkqhkiG9w0BCQEWEGRhbnpoQGdvb2dsZS5jb20wHhcNMjAwODA0MTg1 +OTQ4WhcNMjEwODA0MTg1OTQ4WjB/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCTUEx +EjAQBgNVBAcMCUNhbWJyaWRnZTEPMA0GA1UECgwGR29vZ2xlMQ4wDAYDVQQLDAVl +bnZveTENMAsGA1UEAwwEdGVzdDEfMB0GCSqGSIb3DQEJARYQZGFuemhAZ29vZ2xl +LmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAykCZNjxws+sNfnp18nsp ++7LN81J/RSwAHLkGnwEtd3OxSUuiCYHgYlyuEAwJdf99+SaFrgcA4LvYJ/Mhm/fZ +msnpfsAvoQ49+ax0fm1x56ii4KgNiu9iFsWwwVmkHkgjlRcRsmhr4WeIf14Yvpqs +JNsbNVSCZ4GLQ2V6BqIHlhcCAwEAAaNTMFEwHQYDVR0OBBYEFDO1KPYcdRmeKDvL +H2Yzj8el2Xe1MB8GA1UdIwQYMBaAFDO1KPYcdRmeKDvLH2Yzj8el2Xe1MA8GA1Ud +EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADgYEAnwWVmwSK9TDml7oHGBavzOC1 +f/lOd5zz2e7Tu2pUtx1sX1tlKph1D0ANpJwxRV78R2hjmynLSl7h4Ual9NMubqkD +x96rVeUbRJ/qU4//nNM/XQa9vIAIcTZ0jFhmb0c3R4rmoqqC3vkSDwtaE5yuS5T4 +GUy+n0vQNB0cXGzgcGI= +-----END CERTIFICATE-----)"}; + expectCertChainAndPrivateKey(cert_with_rsa_1024, false); + testGetProof(false); +} + +TEST_F(EnvoyQuicProofSourceTest, UnexpectedPrivateKey) { + EXPECT_CALL(listen_socket_, ioHandle()); + EXPECT_CALL(filter_chain_manager_, findFilterChain(_)) + .WillOnce(Invoke([&](const Network::ConnectionSocket&) { return &filter_chain_; })); + auto server_context_config = std::make_unique(); + auto server_context_config_ptr = server_context_config.get(); + QuicServerTransportSocketFactory transport_socket_factory(std::move(server_context_config)); + EXPECT_CALL(filter_chain_, transportSocketFactory()) + .WillRepeatedly(ReturnRef(transport_socket_factory)); + + Ssl::MockTlsCertificateConfig tls_cert_config; + std::vector> tls_cert_configs{ + std::reference_wrapper(tls_cert_config)}; + EXPECT_CALL(*server_context_config_ptr, tlsCertificates()) + .WillRepeatedly(Return(tls_cert_configs)); + std::string rsa_pkey_1024_len(R"(-----BEGIN RSA PRIVATE KEY----- +MIICWwIBAAKBgQC79hDq/OwN3ke3EF6Ntdi9R+VSrl9MStk992l1us8lZhq+e0zU +OlvxbUeZ8wyVkzs1gqI1it1IwF+EpdGhHhjggZjg040GD3HWSuyCzpHh+nLwJxtQ +D837PCg0zl+TnKv1YjY3I1F3trGhIqfd2B6pgaJ4hpr+0hdqnKP0Htd4DwIDAQAB +AoGASNypUD59Tx70k+1fifWNMEq3heacgJmfPxsyoXWqKSg8g8yOStLYo20mTXJf +VXg+go7CTJkpELOqE2SoL5nYMD0D/YIZCgDx85k0GWHdA6udNn4to95ZTeZPrBHx +T0QNQHnZI3A7RwLinO60IRY0NYzhkTEBxIuvIY6u0DVbrAECQQDpshbxK3DHc7Yi +Au7BUsxP8RbG4pP5IIVoD4YvJuwUkdrfrwejqTdkfchJJc+Gu/+h8vy7eASPHLLT +NBk5wFoPAkEAzeaKnx0CgNs0RX4+sSF727FroD98VUM38OFEJQ6U9OAWGvaKd8ey +yAYUjR2Sl5ZRyrwWv4IqyWgUGhZqNG0CAQJAPTjjm8DGpenhcB2WkNzxG4xMbEQV +gfGMIYvXmmi29liTn4AKH00IbvIo00jtih2cRcATh8VUZG2fR4dhiGik7wJAWSwS +NwzaS7IjtkERp6cHvELfiLxV/Zsp/BGjcKUbD96I1E6X834ySHyRo/f9x9bbP4Es +HO6j1yxTIGU6w8++AQJACdFPnRidOaj5oJmcZq0s6WGTYfegjTOKgi5KQzO0FTwG +qGm130brdD+1U1EJnEFmleLZ/W6mEi3MxcKpWOpTqQ== +-----END RSA PRIVATE KEY-----)"); + EXPECT_CALL(tls_cert_config, privateKey()).WillOnce(ReturnRef(rsa_pkey_1024_len)); + proof_source_.ComputeTlsSignature(server_address_, client_address_, hostname_, + SSL_SIGN_RSA_PSS_RSAE_SHA256, "payload", + std::make_unique(false)); +} + +TEST_F(EnvoyQuicProofSourceTest, InvalidPrivateKey) { + EXPECT_CALL(listen_socket_, ioHandle()); + EXPECT_CALL(filter_chain_manager_, findFilterChain(_)) + .WillOnce(Invoke([&](const Network::ConnectionSocket&) { return &filter_chain_; })); + auto server_context_config = std::make_unique(); + auto server_context_config_ptr = server_context_config.get(); + QuicServerTransportSocketFactory transport_socket_factory(std::move(server_context_config)); + EXPECT_CALL(filter_chain_, transportSocketFactory()) + .WillRepeatedly(ReturnRef(transport_socket_factory)); + + Ssl::MockTlsCertificateConfig tls_cert_config; + std::vector> tls_cert_configs{ + std::reference_wrapper(tls_cert_config)}; + EXPECT_CALL(*server_context_config_ptr, tlsCertificates()) + .WillRepeatedly(Return(tls_cert_configs)); + std::string invalid_pkey("abcdefg"); + EXPECT_CALL(tls_cert_config, privateKey()).WillOnce(ReturnRef(invalid_pkey)); + proof_source_.ComputeTlsSignature(server_address_, client_address_, hostname_, + SSL_SIGN_RSA_PSS_RSAE_SHA256, "payload", + std::make_unique(false)); } } // namespace Quic diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_proof_verifier_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_proof_verifier_test.cc new file mode 100644 index 0000000000000..4a1dfe144dd3c --- /dev/null +++ b/test/extensions/quic_listeners/quiche/envoy_quic_proof_verifier_test.cc @@ -0,0 +1,252 @@ +#include +#include + +#include "extensions/quic_listeners/quiche/envoy_quic_proof_verifier.h" +#include "extensions/transport_sockets/tls/context_config_impl.h" + +#include "test/mocks/ssl/mocks.h" +#include "test/mocks/stats/mocks.h" +#include "test/test_common/test_time.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "quiche/quic/core/crypto/certificate_view.h" +#include "quiche/quic/test_tools/test_certificates.h" + +using testing::NiceMock; +using testing::Return; +using testing::ReturnRef; + +namespace Envoy { +namespace Quic { + +class EnvoyQuicProofVerifierTest : public testing::Test { +public: + EnvoyQuicProofVerifierTest() + : root_ca_cert_(cert_chain_.substr(cert_chain_.rfind("-----BEGIN CERTIFICATE-----"))), + leaf_cert_([=]() { + std::stringstream pem_stream(cert_chain_); + std::vector chain = quic::CertificateView::LoadPemFromStream(&pem_stream); + return chain[0]; + }()) { + ON_CALL(client_context_config_, cipherSuites) + .WillByDefault(ReturnRef( + Extensions::TransportSockets::Tls::ClientContextConfigImpl::DEFAULT_CIPHER_SUITES)); + ON_CALL(client_context_config_, ecdhCurves) + .WillByDefault( + ReturnRef(Extensions::TransportSockets::Tls::ClientContextConfigImpl::DEFAULT_CURVES)); + ON_CALL(client_context_config_, alpnProtocols()).WillByDefault(ReturnRef(alpn_)); + ON_CALL(client_context_config_, serverNameIndication()).WillByDefault(ReturnRef(empty_string_)); + ON_CALL(client_context_config_, signingAlgorithmsForTest()).WillByDefault(ReturnRef(sig_algs_)); + ON_CALL(client_context_config_, certificateValidationContext()) + .WillByDefault(Return(&cert_validation_ctx_config_)); + } + + // Since this cert chain contains an expired cert, we can flip allow_expired_cert to test the code + // paths for BoringSSL cert verification success and failure. + void configCertVerificationDetails(bool allow_expired_cert) { + // Getting the last cert in the chain as the root CA cert. + EXPECT_CALL(cert_validation_ctx_config_, caCert()).WillRepeatedly(ReturnRef(root_ca_cert_)); + EXPECT_CALL(cert_validation_ctx_config_, caCertPath()).WillRepeatedly(ReturnRef(path_string_)); + EXPECT_CALL(cert_validation_ctx_config_, trustChainVerification) + .WillRepeatedly(Return(envoy::extensions::transport_sockets::tls::v3:: + CertificateValidationContext::VERIFY_TRUST_CHAIN)); + EXPECT_CALL(cert_validation_ctx_config_, allowExpiredCertificate()) + .WillRepeatedly(Return(allow_expired_cert)); + EXPECT_CALL(cert_validation_ctx_config_, certificateRevocationList()) + .WillRepeatedly(ReturnRef(empty_string_)); + EXPECT_CALL(cert_validation_ctx_config_, certificateRevocationListPath()) + .WillRepeatedly(ReturnRef(path_string_)); + EXPECT_CALL(cert_validation_ctx_config_, verifySubjectAltNameList()) + .WillRepeatedly(ReturnRef(empty_string_list_)); + EXPECT_CALL(cert_validation_ctx_config_, subjectAltNameMatchers()) + .WillRepeatedly(ReturnRef(san_matchers_)); + EXPECT_CALL(cert_validation_ctx_config_, verifyCertificateHashList()) + .WillRepeatedly(ReturnRef(empty_string_list_)); + EXPECT_CALL(cert_validation_ctx_config_, verifyCertificateSpkiList()) + .WillRepeatedly(ReturnRef(empty_string_list_)); + verifier_ = + std::make_unique(store_, client_context_config_, time_system_); + } + +protected: + const std::string path_string_{"some_path"}; + const std::string alpn_{"h2,http/1.1"}; + const std::string sig_algs_{"rsa_pss_rsae_sha256"}; + const std::vector san_matchers_; + const std::string empty_string_; + const std::vector empty_string_list_; + const std::string cert_chain_{quic::test::kTestCertificateChainPem}; + const std::string root_ca_cert_; + const std::string leaf_cert_; + NiceMock store_; + Event::GlobalTimeSystem time_system_; + NiceMock client_context_config_; + Ssl::MockCertificateValidationContextConfig cert_validation_ctx_config_; + std::unique_ptr verifier_; +}; + +TEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainSuccess) { + configCertVerificationDetails(true); + std::unique_ptr cert_view = + quic::CertificateView::ParseSingleCertificate(leaf_cert_); + const std::string ocsp_response; + const std::string cert_sct; + std::string error_details; + EXPECT_EQ(quic::QUIC_SUCCESS, + verifier_->VerifyCertChain(std::string(cert_view->subject_alt_name_domains()[0]), 54321, + {leaf_cert_}, ocsp_response, cert_sct, nullptr, + &error_details, nullptr, nullptr)) + << error_details; +} + +TEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainFailureFromSsl) { + configCertVerificationDetails(false); + std::unique_ptr cert_view = + quic::CertificateView::ParseSingleCertificate(leaf_cert_); + const std::string ocsp_response; + const std::string cert_sct; + std::string error_details; + EXPECT_EQ(quic::QUIC_FAILURE, + verifier_->VerifyCertChain(std::string(cert_view->subject_alt_name_domains()[0]), 54321, + {leaf_cert_}, ocsp_response, cert_sct, nullptr, + &error_details, nullptr, nullptr)) + << error_details; + EXPECT_EQ("X509_verify_cert: certificate verification error at depth 1: certificate has expired", + error_details); +} + +TEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainFailureInvalidLeafCert) { + configCertVerificationDetails(true); + const std::string ocsp_response; + const std::string cert_sct; + std::string error_details; + const std::vector certs{"invalid leaf cert"}; + EXPECT_EQ(quic::QUIC_FAILURE, + verifier_->VerifyCertChain("www.google.com", 54321, certs, ocsp_response, cert_sct, + nullptr, &error_details, nullptr, nullptr)); + EXPECT_EQ("d2i_X509: fail to parse DER", error_details); +} + +TEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainFailureLeafCertWithGarbage) { + configCertVerificationDetails(true); + std::unique_ptr cert_view = + quic::CertificateView::ParseSingleCertificate(leaf_cert_); + const std::string ocsp_response; + const std::string cert_sct; + std::string cert_with_trailing_garbage = absl::StrCat(leaf_cert_, "AAAAAA"); + std::string error_details; + EXPECT_EQ(quic::QUIC_FAILURE, + verifier_->VerifyCertChain(std::string(cert_view->subject_alt_name_domains()[0]), 54321, + {cert_with_trailing_garbage}, ocsp_response, cert_sct, + nullptr, &error_details, nullptr, nullptr)) + << error_details; + EXPECT_EQ("There is trailing garbage in DER.", error_details); +} + +TEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainFailureInvalidHost) { + configCertVerificationDetails(true); + std::unique_ptr cert_view = + quic::CertificateView::ParseSingleCertificate(leaf_cert_); + const std::string ocsp_response; + const std::string cert_sct; + std::string error_details; + EXPECT_EQ(quic::QUIC_FAILURE, + verifier_->VerifyCertChain("unknown.org", 54321, {leaf_cert_}, ocsp_response, cert_sct, + nullptr, &error_details, nullptr, nullptr)) + << error_details; + EXPECT_EQ("Leaf certificate doesn't match hostname: unknown.org", error_details); +} + +TEST_F(EnvoyQuicProofVerifierTest, VerifyProofFailureEmptyCertChain) { + configCertVerificationDetails(true); + std::unique_ptr cert_view = + quic::CertificateView::ParseSingleCertificate(leaf_cert_); + quic::QuicTransportVersion version{quic::QUIC_VERSION_UNSUPPORTED}; + quiche::QuicheStringPiece chlo_hash{"aaaaa"}; + std::string server_config{"Server Config"}; + const std::string ocsp_response; + const std::string cert_sct; + std::string error_details; + const std::vector certs; + EXPECT_EQ(quic::QUIC_FAILURE, + verifier_->VerifyProof(std::string(cert_view->subject_alt_name_domains()[0]), 54321, + server_config, version, chlo_hash, certs, cert_sct, "signature", + nullptr, &error_details, nullptr, nullptr)); + EXPECT_EQ("Received empty cert chain.", error_details); +} + +TEST_F(EnvoyQuicProofVerifierTest, VerifyProofFailureInvalidLeafCert) { + configCertVerificationDetails(true); + std::unique_ptr cert_view = + quic::CertificateView::ParseSingleCertificate(leaf_cert_); + quic::QuicTransportVersion version{quic::QUIC_VERSION_UNSUPPORTED}; + quiche::QuicheStringPiece chlo_hash{"aaaaa"}; + std::string server_config{"Server Config"}; + const std::string ocsp_response; + const std::string cert_sct; + std::string error_details; + const std::vector certs{"invalid leaf cert"}; + EXPECT_EQ(quic::QUIC_FAILURE, + verifier_->VerifyProof(std::string(cert_view->subject_alt_name_domains()[0]), 54321, + server_config, version, chlo_hash, certs, cert_sct, "signature", + nullptr, &error_details, nullptr, nullptr)); + EXPECT_EQ("Invalid leaf cert.", error_details); +} + +TEST_F(EnvoyQuicProofVerifierTest, VerifyProofFailureUnsupportedECKey) { + configCertVerificationDetails(true); + quic::QuicTransportVersion version{quic::QUIC_VERSION_UNSUPPORTED}; + quiche::QuicheStringPiece chlo_hash{"aaaaa"}; + std::string server_config{"Server Config"}; + const std::string ocsp_response; + const std::string cert_sct; + std::string error_details; + // This is a EC cert with secp384r1 curve which is not supported by Envoy. + const std::string certs{R"(-----BEGIN CERTIFICATE----- +MIICkDCCAhagAwIBAgIUTZbykU9eQL3GdrNlodxrOJDecIQwCgYIKoZIzj0EAwIw +fzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk1BMRIwEAYDVQQHDAlDYW1icmlkZ2Ux +DzANBgNVBAoMBkdvb2dsZTEOMAwGA1UECwwFZW52b3kxDTALBgNVBAMMBHRlc3Qx +HzAdBgkqhkiG9w0BCQEWEGRhbnpoQGdvb2dsZS5jb20wHhcNMjAwODA1MjAyMDI0 +WhcNMjIwODA1MjAyMDI0WjB/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCTUExEjAQ +BgNVBAcMCUNhbWJyaWRnZTEPMA0GA1UECgwGR29vZ2xlMQ4wDAYDVQQLDAVlbnZv +eTENMAsGA1UEAwwEdGVzdDEfMB0GCSqGSIb3DQEJARYQZGFuemhAZ29vZ2xlLmNv +bTB2MBAGByqGSM49AgEGBSuBBAAiA2IABGRaEAtVq+xHXfsF4R/j+mqVN2E29ZYL +oFlvnelKeeT2B51bSfUv+X+Ci1BSa2OxPCVS6o0vpcF6YOlz4CS7QcXZIoRfhsv7 +O2Hz/IdxAPhX/gdK/70T1x+V/6nvIHiiw6NTMFEwHQYDVR0OBBYEFF75rDce6xNJ +GfpKbUg4emG2KWRMMB8GA1UdIwQYMBaAFF75rDce6xNJGfpKbUg4emG2KWRMMA8G +A1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwIDaAAwZQIxAIyZghTK3cmyrRWkxfQ7 +xEc11gujcT8nbytYbM6jodKwcbtR6SOmLx2ychXrCMm2ZAIwXqmrTYBtrbqb3mBx +VdGXMAjeXhnOnPvmDi5hUz/uvI+Pg6cNmUoCRwSCnK/DazhA +-----END CERTIFICATE-----)"}; + std::stringstream pem_stream(certs); + std::vector chain = quic::CertificateView::LoadPemFromStream(&pem_stream); + std::unique_ptr cert_view = + quic::CertificateView::ParseSingleCertificate(chain[0]); + ASSERT(cert_view); + EXPECT_EQ(quic::QUIC_FAILURE, + verifier_->VerifyProof("www.google.com", 54321, server_config, version, chlo_hash, + chain, cert_sct, "signature", nullptr, &error_details, nullptr, + nullptr)); + EXPECT_EQ("Invalid leaf cert, only P-256 ECDSA certificates are supported", error_details); +} + +TEST_F(EnvoyQuicProofVerifierTest, VerifyProofFailureInvalidSignature) { + configCertVerificationDetails(true); + std::unique_ptr cert_view = + quic::CertificateView::ParseSingleCertificate(leaf_cert_); + quic::QuicTransportVersion version{quic::QUIC_VERSION_UNSUPPORTED}; + quiche::QuicheStringPiece chlo_hash{"aaaaa"}; + std::string server_config{"Server Config"}; + const std::string ocsp_response; + const std::string cert_sct; + std::string error_details; + EXPECT_EQ(quic::QUIC_FAILURE, + verifier_->VerifyProof(std::string(cert_view->subject_alt_name_domains()[0]), 54321, + server_config, version, chlo_hash, {leaf_cert_}, cert_sct, + "signature", nullptr, &error_details, nullptr, nullptr)); + EXPECT_EQ("Signature is not valid.", error_details); +} + +} // namespace Quic +} // namespace Envoy diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc index 52727dc8dcbb3..f2ef9fae069e0 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc @@ -9,7 +9,6 @@ #include "quiche/quic/core/quic_utils.h" #include "quiche/quic/core/quic_versions.h" #include "quiche/quic/test_tools/crypto_test_utils.h" -#include "quiche/quic/test_tools/quic_config_peer.h" #include "quiche/quic/test_tools/quic_connection_peer.h" #include "quiche/quic/test_tools/quic_server_session_base_peer.h" #include "quiche/quic/test_tools/quic_test_utils.h" @@ -25,7 +24,8 @@ #include "extensions/quic_listeners/quiche/envoy_quic_connection_helper.h" #include "extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h" #include "extensions/quic_listeners/quiche/envoy_quic_utils.h" -#include "extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h" +#include "test/extensions/quic_listeners/quiche/test_proof_source.h" +#include "test/extensions/quic_listeners/quiche/test_utils.h" #include "extensions/transport_sockets/well_known_names.h" #include "envoy/stats/stats_macros.h" @@ -57,12 +57,11 @@ class TestEnvoyQuicServerConnection : public EnvoyQuicServerConnection { quic::QuicAlarmFactory& alarm_factory, quic::QuicPacketWriter& writer, const quic::ParsedQuicVersionVector& supported_versions, - Network::ListenerConfig& listener_config, - Server::ListenerStats& stats, Network::Socket& listen_socket) + Network::Socket& listen_socket) : EnvoyQuicServerConnection(quic::test::TestConnectionId(), quic::QuicSocketAddress(quic::QuicIpAddress::Loopback4(), 12345), helper, alarm_factory, &writer, /*owns_writer=*/false, - supported_versions, listener_config, stats, listen_socket) {} + supported_versions, listen_socket) {} Network::Connection::ConnectionStats& connectionStats() const { return EnvoyQuicConnection::connectionStats(); @@ -84,17 +83,60 @@ class TestEnvoyQuicServerSession : public EnvoyQuicServerSession { } }; -class TestQuicCryptoServerStream : public quic::QuicCryptoServerStream { +class ProofSourceDetailsSetter { public: + virtual ~ProofSourceDetailsSetter() = default; + + virtual void setProofSourceDetails(std::unique_ptr details) = 0; +}; + +class TestQuicCryptoServerStream : public EnvoyQuicCryptoServerStream, + public ProofSourceDetailsSetter { +public: + ~TestQuicCryptoServerStream() override = default; + explicit TestQuicCryptoServerStream(const quic::QuicCryptoServerConfig* crypto_config, quic::QuicCompressedCertsCache* compressed_certs_cache, quic::QuicSession* session, quic::QuicCryptoServerStreamBase::Helper* helper) - : quic::QuicCryptoServerStream(crypto_config, compressed_certs_cache, session, helper) {} + : EnvoyQuicCryptoServerStream(crypto_config, compressed_certs_cache, session, helper) {} + + bool encryption_established() const override { return true; } + + const EnvoyQuicProofSourceDetails* proofSourceDetails() const override { return details_.get(); } + + void setProofSourceDetails(std::unique_ptr details) override { + details_ = std::move(details); + } + +private: + std::unique_ptr details_; +}; + +class TestEnvoyQuicTlsServerHandshaker : public EnvoyQuicTlsServerHandshaker, + public ProofSourceDetailsSetter { +public: + ~TestEnvoyQuicTlsServerHandshaker() override = default; - using quic::QuicCryptoServerStream::QuicCryptoServerStream; + TestEnvoyQuicTlsServerHandshaker(quic::QuicSession* session, + const quic::QuicCryptoServerConfig& crypto_config) + : EnvoyQuicTlsServerHandshaker(session, crypto_config), + params_(new quic::QuicCryptoNegotiatedParameters) { + params_->cipher_suite = 1; + } bool encryption_established() const override { return true; } + const EnvoyQuicProofSourceDetails* proofSourceDetails() const override { return details_.get(); } + void setProofSourceDetails(std::unique_ptr details) override { + details_ = std::move(details); + } + const quic::QuicCryptoNegotiatedParameters& crypto_negotiated_params() const override { + return *params_; + } + +private: + std::unique_ptr details_; + quic::QuicReferenceCountedPointer params_; }; class EnvoyQuicServerSessionTest : public testing::TestWithParam { @@ -103,23 +145,21 @@ class EnvoyQuicServerSessionTest : public testing::TestWithParam { : api_(Api::createApiForTest(time_system_)), dispatcher_(api_->allocateDispatcher("test_thread")), connection_helper_(*dispatcher_), alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { - SetQuicReloadableFlag(quic_enable_version_draft_27, GetParam()); + SetQuicReloadableFlag(quic_disable_version_draft_29, !GetParam()); + SetQuicReloadableFlag(quic_disable_version_draft_27, !GetParam()); + SetQuicReloadableFlag(quic_disable_version_draft_25, !GetParam()); return quic::ParsedVersionOfIndex(quic::CurrentSupportedVersions(), 0); }()), - listener_stats_({ALL_LISTENER_STATS(POOL_COUNTER(listener_config_.listenerScope()), - POOL_GAUGE(listener_config_.listenerScope()), - POOL_HISTOGRAM(listener_config_.listenerScope()))}), quic_connection_(new TestEnvoyQuicServerConnection( - connection_helper_, alarm_factory_, writer_, quic_version_, listener_config_, - listener_stats_, *listener_config_.socket_)), + connection_helper_, alarm_factory_, writer_, quic_version_, *listener_config_.socket_)), crypto_config_(quic::QuicCryptoServerConfig::TESTING, quic::QuicRandom::GetInstance(), - std::make_unique(), - quic::KeyExchangeSource::Default()), + std::make_unique(), quic::KeyExchangeSource::Default()), envoy_quic_session_(quic_config_, quic_version_, std::unique_ptr(quic_connection_), /*visitor=*/nullptr, &crypto_stream_helper_, &crypto_config_, &compressed_certs_cache_, *dispatcher_, - /*send_buffer_limit*/ quic::kDefaultFlowControlSendWindow * 1.5), + /*send_buffer_limit*/ quic::kDefaultFlowControlSendWindow * 1.5, + listener_config_), read_filter_(new Network::MockReadFilter()) { EXPECT_EQ(time_system_.systemTime(), envoy_quic_session_.streamInfo().startTime()); @@ -138,7 +178,32 @@ class EnvoyQuicServerSessionTest : public testing::TestWithParam { ON_CALL(crypto_stream_helper_, CanAcceptClientHello(_, _, _, _, _)).WillByDefault(Return(true)); } - void SetUp() override { envoy_quic_session_.Initialize(); } + void SetUp() override { + envoy_quic_session_.Initialize(); + setQuicConfigWithDefaultValues(envoy_quic_session_.config()); + envoy_quic_session_.OnConfigNegotiated(); + quic::test::QuicConfigPeer::SetNegotiated(envoy_quic_session_.config(), true); + quic::test::QuicConnectionPeer::SetAddressValidated(quic_connection_); + // Switch to a encryption forward secure crypto stream. + quic::test::QuicServerSessionBasePeer::SetCryptoStream(&envoy_quic_session_, nullptr); + quic::QuicCryptoServerStreamBase* crypto_stream = nullptr; + if (quic_version_[0].handshake_protocol == quic::PROTOCOL_QUIC_CRYPTO) { + auto test_crypto_stream = new TestQuicCryptoServerStream( + &crypto_config_, &compressed_certs_cache_, &envoy_quic_session_, &crypto_stream_helper_); + crypto_stream = test_crypto_stream; + crypto_stream_ = test_crypto_stream; + } else { + auto test_crypto_stream = + new TestEnvoyQuicTlsServerHandshaker(&envoy_quic_session_, crypto_config_); + crypto_stream = test_crypto_stream; + crypto_stream_ = test_crypto_stream; + } + quic::test::QuicServerSessionBasePeer::SetCryptoStream(&envoy_quic_session_, crypto_stream); + quic_connection_->SetDefaultEncryptionLevel(quic::ENCRYPTION_FORWARD_SECURE); + quic_connection_->SetEncrypter( + quic::ENCRYPTION_FORWARD_SECURE, + std::make_unique(quic::Perspective::IS_SERVER)); + } bool installReadFilter() { // Setup read filter. @@ -171,7 +236,7 @@ class EnvoyQuicServerSessionTest : public testing::TestWithParam { return request_decoder; })); quic::QuicStreamId stream_id = - quic_version_[0].transport_version == quic::QUIC_VERSION_IETF_DRAFT_27 ? 4u : 5u; + quic::VersionUsesHttp3(quic_version_[0].transport_version) ? 4u : 5u; return envoy_quic_session_.GetOrCreateStream(stream_id); } @@ -196,11 +261,11 @@ class EnvoyQuicServerSessionTest : public testing::TestWithParam { quic::ParsedQuicVersionVector quic_version_; testing::NiceMock writer_; testing::NiceMock listener_config_; - Server::ListenerStats listener_stats_; TestEnvoyQuicServerConnection* quic_connection_; quic::QuicConfig quic_config_; quic::QuicCryptoServerConfig crypto_config_; testing::NiceMock crypto_stream_helper_; + ProofSourceDetailsSetter* crypto_stream_; TestEnvoyQuicServerSession envoy_quic_session_; quic::QuicCompressedCertsCache compressed_certs_cache_{100}; std::shared_ptr read_filter_; @@ -223,7 +288,7 @@ TEST_P(EnvoyQuicServerSessionTest, NewStream) { EXPECT_CALL(http_connection_callbacks_, newStream(_, false)) .WillOnce(testing::ReturnRef(request_decoder)); quic::QuicStreamId stream_id = - quic_version_[0].transport_version == quic::QUIC_VERSION_IETF_DRAFT_27 ? 4u : 5u; + quic::VersionUsesHttp3(quic_version_[0].transport_version) ? 4u : 5u; auto stream = reinterpret_cast(envoy_quic_session_.GetOrCreateStream(stream_id)); // Receive a GET request on created stream. @@ -237,10 +302,9 @@ TEST_P(EnvoyQuicServerSessionTest, NewStream) { // Request headers should be propagated to decoder. EXPECT_CALL(request_decoder, decodeHeaders_(_, /*end_stream=*/true)) .WillOnce(Invoke([&host](const Http::RequestHeaderMapPtr& decoded_headers, bool) { - EXPECT_EQ(host, decoded_headers->Host()->value().getStringView()); - EXPECT_EQ("/", decoded_headers->Path()->value().getStringView()); - EXPECT_EQ(Http::Headers::get().MethodValues.Get, - decoded_headers->Method()->value().getStringView()); + EXPECT_EQ(host, decoded_headers->getHostValue()); + EXPECT_EQ("/", decoded_headers->getPathValue()); + EXPECT_EQ(Http::Headers::get().MethodValues.Get, decoded_headers->getMethodValue()); })); stream->OnStreamHeaderList(/*fin=*/true, headers.uncompressed_header_bytes(), headers); } @@ -252,12 +316,15 @@ TEST_P(EnvoyQuicServerSessionTest, InvalidIncomingStreamId) { Http::MockStreamCallbacks stream_callbacks; // IETF stream 5 and G-Quic stream 2 are server initiated. quic::QuicStreamId stream_id = - quic_version_[0].transport_version == quic::QUIC_VERSION_IETF_DRAFT_27 ? 5u : 2u; + quic::VersionUsesHttp3(quic_version_[0].transport_version) ? 5u : 2u; std::string data("aaaa"); quic::QuicStreamFrame stream_frame(stream_id, false, 0, data); EXPECT_CALL(http_connection_callbacks_, newStream(_, false)).Times(0); - EXPECT_CALL(*quic_connection_, SendConnectionClosePacket(quic::QUIC_INVALID_STREAM_ID, - "Data for nonexistent stream")); + EXPECT_CALL(*quic_connection_, + SendConnectionClosePacket((quic::VersionUsesHttp3(quic_version_[0].transport_version) + ? quic::QUIC_HTTP_STREAM_WRONG_DIRECTION + : quic::QUIC_INVALID_STREAM_ID), + "Data for nonexistent stream")); EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose)); envoy_quic_session_.OnStreamFrame(stream_frame); @@ -269,10 +336,13 @@ TEST_P(EnvoyQuicServerSessionTest, NoNewStreamForInvalidIncomingStream) { Http::MockStreamCallbacks stream_callbacks; // IETF stream 5 and G-Quic stream 2 are server initiated. quic::QuicStreamId stream_id = - quic_version_[0].transport_version == quic::QUIC_VERSION_IETF_DRAFT_27 ? 5u : 2u; + quic::VersionUsesHttp3(quic_version_[0].transport_version) ? 5u : 2u; EXPECT_CALL(http_connection_callbacks_, newStream(_, false)).Times(0); - EXPECT_CALL(*quic_connection_, SendConnectionClosePacket(quic::QUIC_INVALID_STREAM_ID, - "Data for nonexistent stream")); + EXPECT_CALL(*quic_connection_, + SendConnectionClosePacket(quic::VersionUsesHttp3(quic_version_[0].transport_version) + ? quic::QUIC_HTTP_STREAM_WRONG_DIRECTION + : quic::QUIC_INVALID_STREAM_ID, + "Data for nonexistent stream")); EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose)); // Stream creation on closed connection should fail. @@ -287,13 +357,14 @@ TEST_P(EnvoyQuicServerSessionTest, OnResetFrame) { quic::QuicRstStreamFrame rst1(/*control_frame_id=*/1u, stream1->id(), quic::QUIC_ERROR_PROCESSING_STREAM, /*bytes_written=*/0u); EXPECT_CALL(stream_callbacks, onResetStream(Http::StreamResetReason::RemoteReset, _)); - if (quic_version_[0].transport_version < quic::QUIC_VERSION_IETF_DRAFT_27) { + if (!quic::VersionUsesHttp3(quic_version_[0].transport_version)) { EXPECT_CALL(*quic_connection_, SendControlFrame(_)) .WillOnce(Invoke([stream_id = stream1->id()](const quic::QuicFrame& frame) { EXPECT_EQ(stream_id, frame.rst_stream_frame->stream_id); EXPECT_EQ(quic::QUIC_RST_ACKNOWLEDGEMENT, frame.rst_stream_frame->error_code); return false; })); + } else { } stream1->OnStreamReset(rst1); @@ -383,16 +454,6 @@ TEST_P(EnvoyQuicServerSessionTest, FlushCloseWithDataToWrite) { // timer. TEST_P(EnvoyQuicServerSessionTest, WriteUpdatesDelayCloseTimer) { installReadFilter(); - // Switch to a encryption forward secure crypto stream. - quic::test::QuicServerSessionBasePeer::SetCryptoStream(&envoy_quic_session_, nullptr); - quic::test::QuicServerSessionBasePeer::SetCryptoStream( - &envoy_quic_session_, - new TestQuicCryptoServerStream(&crypto_config_, &compressed_certs_cache_, - &envoy_quic_session_, &crypto_stream_helper_)); - quic_connection_->SetDefaultEncryptionLevel(quic::ENCRYPTION_FORWARD_SECURE); - quic_connection_->SetEncrypter( - quic::ENCRYPTION_FORWARD_SECURE, - std::make_unique(quic::Perspective::IS_SERVER)); // Drive congestion control manually. auto send_algorithm = new testing::NiceMock; quic::test::QuicConnectionPeer::SetSendAlgorithm(quic_connection_, send_algorithm); @@ -427,10 +488,9 @@ TEST_P(EnvoyQuicServerSessionTest, WriteUpdatesDelayCloseTimer) { // Request headers should be propagated to decoder. EXPECT_CALL(request_decoder, decodeHeaders_(_, /*end_stream=*/true)) .WillOnce(Invoke([&host](const Http::RequestHeaderMapPtr& decoded_headers, bool) { - EXPECT_EQ(host, decoded_headers->Host()->value().getStringView()); - EXPECT_EQ("/", decoded_headers->Path()->value().getStringView()); - EXPECT_EQ(Http::Headers::get().MethodValues.Get, - decoded_headers->Method()->value().getStringView()); + EXPECT_EQ(host, decoded_headers->getHostValue()); + EXPECT_EQ("/", decoded_headers->getPathValue()); + EXPECT_EQ(Http::Headers::get().MethodValues.Get, decoded_headers->getMethodValue()); })); stream->OnStreamHeaderList(/*fin=*/true, request_headers.uncompressed_header_bytes(), request_headers); @@ -522,10 +582,9 @@ TEST_P(EnvoyQuicServerSessionTest, FlushCloseNoTimeout) { // Request headers should be propagated to decoder. EXPECT_CALL(request_decoder, decodeHeaders_(_, /*end_stream=*/true)) .WillOnce(Invoke([&host](const Http::RequestHeaderMapPtr& decoded_headers, bool) { - EXPECT_EQ(host, decoded_headers->Host()->value().getStringView()); - EXPECT_EQ("/", decoded_headers->Path()->value().getStringView()); - EXPECT_EQ(Http::Headers::get().MethodValues.Get, - decoded_headers->Method()->value().getStringView()); + EXPECT_EQ(host, decoded_headers->getHostValue()); + EXPECT_EQ("/", decoded_headers->getPathValue()); + EXPECT_EQ(Http::Headers::get().MethodValues.Get, decoded_headers->getMethodValue()); })); stream->OnStreamHeaderList(/*fin=*/true, request_headers.uncompressed_header_bytes(), request_headers); @@ -696,41 +755,20 @@ TEST_P(EnvoyQuicServerSessionTest, ShutdownNotice) { TEST_P(EnvoyQuicServerSessionTest, GoAway) { installReadFilter(); - EXPECT_CALL(*quic_connection_, SendControlFrame(_)); + testing::NiceMock debug_visitor; + envoy_quic_session_.set_debug_visitor(&debug_visitor); + if (quic::VersionUsesHttp3(quic_version_[0].transport_version)) { + EXPECT_CALL(debug_visitor, OnGoAwayFrameSent(_)); + } else { + EXPECT_CALL(*quic_connection_, SendControlFrame(_)); + } http_connection_->goAway(); } TEST_P(EnvoyQuicServerSessionTest, InitializeFilterChain) { - // Generate a CHLO packet. - quic::CryptoHandshakeMessage chlo = quic::test::crypto_test_utils::GenerateDefaultInchoateCHLO( - connection_helper_.GetClock(), quic::CurrentSupportedVersions()[0].transport_version, - &crypto_config_); - chlo.SetVector(quic::kCOPT, quic::QuicTagVector{quic::kREJ}); - std::string packet_content(chlo.GetSerialized().AsStringPiece()); - auto encrypted_packet = - std::unique_ptr(quic::test::ConstructEncryptedPacket( - quic_connection_->connection_id(), quic::EmptyQuicConnectionId(), /*version_flag=*/true, - /*reset_flag*/ false, /*packet_number=*/1, packet_content)); - - quic::QuicSocketAddress self_address( - envoyAddressInstanceToQuicSocketAddress(listener_config_.socket_->localAddress())); - auto packet = std::unique_ptr( - quic::test::ConstructReceivedPacket(*encrypted_packet, connection_helper_.GetClock()->Now())); - - // Receiving above packet should trigger filter chain retrieval. - Network::MockFilterChainManager filter_chain_manager; - EXPECT_CALL(listener_config_, filterChainManager()).WillOnce(ReturnRef(filter_chain_manager)); Network::MockFilterChain filter_chain; - EXPECT_CALL(filter_chain_manager, findFilterChain(_)) - .WillOnce(Invoke([&](const Network::ConnectionSocket& socket) { - EXPECT_EQ(*quicAddressToEnvoyAddressInstance(quic_connection_->peer_address()), - *socket.remoteAddress()); - EXPECT_EQ(*quicAddressToEnvoyAddressInstance(self_address), *socket.localAddress()); - EXPECT_EQ(listener_config_.socket_->ioHandle().fd(), socket.ioHandle().fd()); - EXPECT_EQ(Extensions::TransportSockets::TransportProtocolNames::get().Quic, - socket.detectedTransportProtocol()); - return &filter_chain; - })); + crypto_stream_->setProofSourceDetails( + std::make_unique(filter_chain)); std::vector filter_factory{[this]( Network::FilterManager& filter_manager) { filter_manager.addReadFilter(read_filter_); @@ -749,13 +787,16 @@ TEST_P(EnvoyQuicServerSessionTest, InitializeFilterChain) { Server::Configuration::FilterChainUtility::buildFilterChain(connection, filter_factories); return true; })); - // A reject should be sent because of the inchoate CHLO. - EXPECT_CALL(writer_, WritePacket(_, _, _, _, _)) - .WillOnce(testing::Return(quic::WriteResult(quic::WRITE_STATUS_OK, 1))); - quic_connection_->ProcessUdpPacket(self_address, quic_connection_->peer_address(), *packet); - EXPECT_TRUE(quic_connection_->connected()); + EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::Connected)); + if (!quic_version_[0].UsesTls()) { + envoy_quic_session_.SetDefaultEncryptionLevel(quic::ENCRYPTION_FORWARD_SECURE); + } else { + if (quic::VersionUsesHttp3(quic_version_[0].transport_version)) { + EXPECT_CALL(*quic_connection_, SendControlFrame(_)); + } + envoy_quic_session_.OnOneRttKeysAvailable(); + } EXPECT_EQ(nullptr, envoy_quic_session_.socketOptions()); - EXPECT_FALSE(envoy_quic_session_.IsEncryptionEstablished()); EXPECT_TRUE(quic_connection_->connectionSocket()->ioHandle().isOpen()); EXPECT_TRUE(quic_connection_->connectionSocket()->ioHandle().close().ok()); EXPECT_FALSE(quic_connection_->connectionSocket()->ioHandle().isOpen()); @@ -802,7 +843,7 @@ TEST_P(EnvoyQuicServerSessionTest, SendBufferWatermark) { return request_decoder; })); quic::QuicStreamId stream_id = - quic_version_[0].transport_version == quic::QUIC_VERSION_IETF_DRAFT_27 ? 4u : 5u; + quic::VersionUsesHttp3(quic_version_[0].transport_version) ? 4u : 5u; auto stream1 = dynamic_cast(envoy_quic_session_.GetOrCreateStream(stream_id)); @@ -817,10 +858,9 @@ TEST_P(EnvoyQuicServerSessionTest, SendBufferWatermark) { // Request headers should be propagated to decoder. EXPECT_CALL(request_decoder, decodeHeaders_(_, /*end_stream=*/true)) .WillOnce(Invoke([&host](const Http::RequestHeaderMapPtr& decoded_headers, bool) { - EXPECT_EQ(host, decoded_headers->Host()->value().getStringView()); - EXPECT_EQ("/", decoded_headers->Path()->value().getStringView()); - EXPECT_EQ(Http::Headers::get().MethodValues.Get, - decoded_headers->Method()->value().getStringView()); + EXPECT_EQ(host, decoded_headers->getHostValue()); + EXPECT_EQ("/", decoded_headers->getPathValue()); + EXPECT_EQ(Http::Headers::get().MethodValues.Get, decoded_headers->getMethodValue()); })); stream1->OnStreamHeaderList(/*fin=*/true, request_headers.uncompressed_header_bytes(), request_headers); @@ -850,10 +890,9 @@ TEST_P(EnvoyQuicServerSessionTest, SendBufferWatermark) { dynamic_cast(envoy_quic_session_.GetOrCreateStream(stream_id + 4)); EXPECT_CALL(request_decoder2, decodeHeaders_(_, /*end_stream=*/true)) .WillOnce(Invoke([&host](const Http::RequestHeaderMapPtr& decoded_headers, bool) { - EXPECT_EQ(host, decoded_headers->Host()->value().getStringView()); - EXPECT_EQ("/", decoded_headers->Path()->value().getStringView()); - EXPECT_EQ(Http::Headers::get().MethodValues.Get, - decoded_headers->Method()->value().getStringView()); + EXPECT_EQ(host, decoded_headers->getHostValue()); + EXPECT_EQ("/", decoded_headers->getPathValue()); + EXPECT_EQ(Http::Headers::get().MethodValues.Get, decoded_headers->getMethodValue()); })); stream2->OnStreamHeaderList(/*fin=*/true, request_headers.uncompressed_header_bytes(), request_headers); @@ -882,10 +921,9 @@ TEST_P(EnvoyQuicServerSessionTest, SendBufferWatermark) { dynamic_cast(envoy_quic_session_.GetOrCreateStream(stream_id + 8)); EXPECT_CALL(request_decoder3, decodeHeaders_(_, /*end_stream=*/true)) .WillOnce(Invoke([&host](const Http::RequestHeaderMapPtr& decoded_headers, bool) { - EXPECT_EQ(host, decoded_headers->Host()->value().getStringView()); - EXPECT_EQ("/", decoded_headers->Path()->value().getStringView()); - EXPECT_EQ(Http::Headers::get().MethodValues.Get, - decoded_headers->Method()->value().getStringView()); + EXPECT_EQ(host, decoded_headers->getHostValue()); + EXPECT_EQ("/", decoded_headers->getPathValue()); + EXPECT_EQ(Http::Headers::get().MethodValues.Get, decoded_headers->getMethodValue()); })); stream3->OnStreamHeaderList(/*fin=*/true, request_headers.uncompressed_header_bytes(), request_headers); @@ -955,5 +993,115 @@ TEST_P(EnvoyQuicServerSessionTest, SendBufferWatermark) { EXPECT_TRUE(stream2->write_side_closed()); } +TEST_P(EnvoyQuicServerSessionTest, HeadersContributeToWatermarkGquic) { + if (quic::VersionUsesHttp3(quic_version_[0].transport_version)) { + installReadFilter(); + return; + } + // Switch to a encryption forward secure crypto stream. + quic::test::QuicServerSessionBasePeer::SetCryptoStream(&envoy_quic_session_, nullptr); + quic::test::QuicServerSessionBasePeer::SetCryptoStream( + &envoy_quic_session_, + new TestQuicCryptoServerStream(&crypto_config_, &compressed_certs_cache_, + &envoy_quic_session_, &crypto_stream_helper_)); + quic_connection_->SetDefaultEncryptionLevel(quic::ENCRYPTION_FORWARD_SECURE); + quic_connection_->SetEncrypter( + quic::ENCRYPTION_FORWARD_SECURE, + std::make_unique(quic::Perspective::IS_SERVER)); + // Drive congestion control manually. + auto send_algorithm = new testing::NiceMock; + quic::test::QuicConnectionPeer::SetSendAlgorithm(quic_connection_, send_algorithm); + EXPECT_CALL(*send_algorithm, PacingRate(_)).WillRepeatedly(Return(quic::QuicBandwidth::Zero())); + EXPECT_CALL(*send_algorithm, BandwidthEstimate()) + .WillRepeatedly(Return(quic::QuicBandwidth::Zero())); + EXPECT_CALL(*quic_connection_, SendControlFrame(_)).Times(AnyNumber()); + + // Bump connection flow control window large enough not to interfere + // stream writing. + envoy_quic_session_.flow_controller()->UpdateSendWindowOffset( + 10 * quic::kDefaultFlowControlSendWindow); + installReadFilter(); + Http::MockRequestDecoder request_decoder; + Http::MockStreamCallbacks stream_callbacks; + EXPECT_CALL(http_connection_callbacks_, newStream(_, false)) + .WillOnce(Invoke([&request_decoder, &stream_callbacks](Http::ResponseEncoder& encoder, + bool) -> Http::RequestDecoder& { + encoder.getStream().addCallbacks(stream_callbacks); + return request_decoder; + })); + quic::QuicStreamId stream_id = + quic::VersionUsesHttp3(quic_version_[0].transport_version) ? 4u : 5u; + auto stream1 = + dynamic_cast(envoy_quic_session_.GetOrCreateStream(stream_id)); + + // Receive a GET request on created stream. + quic::QuicHeaderList request_headers; + request_headers.OnHeaderBlockStart(); + std::string host("www.abc.com"); + request_headers.OnHeader(":authority", host); + request_headers.OnHeader(":method", "GET"); + request_headers.OnHeader(":path", "/"); + request_headers.OnHeaderBlockEnd(/*uncompressed_header_bytes=*/0, /*compressed_header_bytes=*/0); + // Request headers should be propagated to decoder. + EXPECT_CALL(request_decoder, decodeHeaders_(_, /*end_stream=*/true)) + .WillOnce(Invoke([&host](const Http::RequestHeaderMapPtr& decoded_headers, bool) { + EXPECT_EQ(host, decoded_headers->getHostValue()); + EXPECT_EQ("/", decoded_headers->getPathValue()); + EXPECT_EQ(Http::Headers::get().MethodValues.Get, decoded_headers->getMethodValue()); + })); + stream1->OnStreamHeaderList(/*fin=*/true, request_headers.uncompressed_header_bytes(), + request_headers); + + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + // Make connection congestion control blocked so headers are buffered. + EXPECT_CALL(*send_algorithm, CanSend(_)).WillRepeatedly(Return(false)); + stream1->encodeHeaders(response_headers, false); + // Buffer a response slightly smaller than connection level watermark, but + // with the previously buffered headers, this write should reach high + // watermark. + std::string response(24 * 1024 - 1, 'a'); + Buffer::OwnedImpl buffer(response); + // Triggered twice, once by stream, the other time by connection. + EXPECT_CALL(stream_callbacks, onAboveWriteBufferHighWatermark()).Times(2); + EXPECT_CALL(network_connection_callbacks_, onAboveWriteBufferHighWatermark) + .WillOnce(Invoke( + [this]() { http_connection_->onUnderlyingConnectionAboveWriteBufferHighWatermark(); })); + stream1->encodeData(buffer, false); + EXPECT_FALSE(envoy_quic_session_.IsConnectionFlowControlBlocked()); + + // Write the buffered data out till stream is flow control blocked. Both + // stream and connection level buffers should drop below watermark. + EXPECT_CALL(*send_algorithm, CanSend(_)).WillRepeatedly(Return(true)); + EXPECT_CALL(*send_algorithm, GetCongestionWindow()).WillRepeatedly(Return(quic::kDefaultTCPMSS)); + EXPECT_CALL(network_connection_callbacks_, onBelowWriteBufferLowWatermark) + .WillOnce(Invoke( + [this]() { http_connection_->onUnderlyingConnectionBelowWriteBufferLowWatermark(); })); + EXPECT_CALL(stream_callbacks, onBelowWriteBufferLowWatermark()).Times(2); + envoy_quic_session_.OnCanWrite(); + EXPECT_TRUE(stream1->flow_controller()->IsBlocked()); + + // Buffer more response because of flow control. The buffered bytes become just below connection + // level high watermark. + std::string response1(16 * 1024 - 20, 'a'); + Buffer::OwnedImpl buffer1(response1); + EXPECT_CALL(stream_callbacks, onAboveWriteBufferHighWatermark()); + stream1->encodeData(buffer1, false); + + // Make connection congestion control blocked again. + EXPECT_CALL(*send_algorithm, CanSend(_)).WillRepeatedly(Return(false)); + // Buffering the trailers will cause connection to reach high watermark. + EXPECT_CALL(network_connection_callbacks_, onAboveWriteBufferHighWatermark) + .WillOnce(Invoke( + [this]() { http_connection_->onUnderlyingConnectionAboveWriteBufferHighWatermark(); })); + Http::TestResponseTrailerMapImpl response_trailers{{"trailer-key", "trailer-value"}}; + stream1->encodeTrailers(response_trailers); + + EXPECT_CALL(network_connection_callbacks_, onBelowWriteBufferLowWatermark) + .WillOnce(Invoke( + [this]() { http_connection_->onUnderlyingConnectionBelowWriteBufferLowWatermark(); })); + EXPECT_CALL(stream_callbacks, onResetStream(Http::StreamResetReason::LocalReset, _)); + stream1->resetStream(Http::StreamResetReason::LocalReset); +} + } // namespace Quic } // namespace Envoy diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc index 735f2690d031c..4a4236737bd00 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc @@ -1,5 +1,15 @@ #include +#pragma GCC diagnostic push +// QUICHE allows unused parameters. +#pragma GCC diagnostic ignored "-Wunused-parameter" +// QUICHE uses offsetof(). +#pragma GCC diagnostic ignored "-Winvalid-offsetof" + +#include "quiche/quic/test_tools/quic_connection_peer.h" +#include "quiche/quic/test_tools/quic_session_peer.h" +#pragma GCC diagnostic pop + #include "common/event/libevent_scheduler.h" #include "common/http/headers.h" @@ -30,7 +40,9 @@ class EnvoyQuicServerStreamTest : public testing::TestWithParam { : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher("test_thread")), connection_helper_(*dispatcher_), alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { - SetQuicReloadableFlag(quic_enable_version_draft_27, GetParam()); + SetQuicReloadableFlag(quic_disable_version_draft_29, !GetParam()); + SetQuicReloadableFlag(quic_disable_version_draft_27, !GetParam()); + SetQuicReloadableFlag(quic_disable_version_draft_25, !GetParam()); return quic::CurrentSupportedVersions()[0]; }()), listener_stats_({ALL_LISTENER_STATS(POOL_COUNTER(listener_config_.listenerScope()), @@ -39,16 +51,18 @@ class EnvoyQuicServerStreamTest : public testing::TestWithParam { quic_connection_(quic::test::TestConnectionId(), quic::QuicSocketAddress(quic::QuicIpAddress::Any6(), 12345), connection_helper_, alarm_factory_, &writer_, - /*owns_writer=*/false, {quic_version_}, listener_config_, listener_stats_, - *listener_config_.socket_), + /*owns_writer=*/false, {quic_version_}, *listener_config_.socket_), quic_session_(quic_config_, {quic_version_}, &quic_connection_, *dispatcher_, quic_config_.GetInitialStreamFlowControlWindowToSend() * 2), stream_id_(VersionUsesHttp3(quic_version_.transport_version) ? 4u : 5u), quic_stream_(new EnvoyQuicServerStream(stream_id_, &quic_session_, quic::BIDIRECTIONAL)), - response_headers_{{":status", "200"}} { + response_headers_{{":status", "200"}, {"response-key", "response-value"}}, + response_trailers_{{"trailer-key", "trailer-value"}} { quic_stream_->setRequestDecoder(stream_decoder_); quic_stream_->addCallbacks(stream_callbacks_); + quic::test::QuicConnectionPeer::SetAddressValidated(&quic_connection_); quic_session_.ActivateStream(std::unique_ptr(quic_stream_)); + EXPECT_CALL(quic_session_, ShouldYield(_)).WillRepeatedly(testing::Return(false)); EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _)) .WillRepeatedly(Invoke([](quic::QuicStreamId, size_t write_length, quic::QuicStreamOffset, quic::StreamSendingState state, bool, @@ -64,6 +78,8 @@ class EnvoyQuicServerStreamTest : public testing::TestWithParam { void SetUp() override { quic_session_.Initialize(); + setQuicConfigWithDefaultValues(quic_session_.config()); + quic_session_.OnConfigNegotiated(); request_headers_.OnHeaderBlockStart(); request_headers_.OnHeader(":authority", host_); request_headers_.OnHeader(":method", "POST"); @@ -101,10 +117,9 @@ class EnvoyQuicServerStreamTest : public testing::TestWithParam { size_t sendRequest(const std::string& payload, bool fin, size_t decoder_buffer_high_watermark) { EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/false)) .WillOnce(Invoke([this](const Http::RequestHeaderMapPtr& headers, bool) { - EXPECT_EQ(host_, headers->Host()->value().getStringView()); - EXPECT_EQ("/", headers->Path()->value().getStringView()); - EXPECT_EQ(Http::Headers::get().MethodValues.Post, - headers->Method()->value().getStringView()); + EXPECT_EQ(host_, headers->getHostValue()); + EXPECT_EQ("/", headers->getPathValue()); + EXPECT_EQ(Http::Headers::get().MethodValues.Post, headers->getMethodValue()); })); quic_stream_->OnStreamHeaderList(/*fin=*/false, request_headers_.uncompressed_header_bytes(), request_headers_); @@ -142,6 +157,7 @@ class EnvoyQuicServerStreamTest : public testing::TestWithParam { Http::MockStreamCallbacks stream_callbacks_; quic::QuicHeaderList request_headers_; Http::TestResponseHeaderMapImpl response_headers_; + Http::TestResponseTrailerMapImpl response_trailers_; quic::QuicHeaderList trailers_; std::string host_{"www.abc.com"}; std::string request_body_{"Hello world"}; @@ -161,10 +177,9 @@ TEST_P(EnvoyQuicServerStreamTest, GetRequestAndResponse) { EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/true)) .WillOnce(Invoke([this](const Http::RequestHeaderMapPtr& headers, bool) { - EXPECT_EQ(host_, headers->Host()->value().getStringView()); - EXPECT_EQ("/", headers->Path()->value().getStringView()); - EXPECT_EQ(Http::Headers::get().MethodValues.Get, - headers->Method()->value().getStringView()); + EXPECT_EQ(host_, headers->getHostValue()); + EXPECT_EQ("/", headers->getPathValue()); + EXPECT_EQ(Http::Headers::get().MethodValues.Get, headers->getMethodValue()); })); quic_stream_->OnStreamHeaderList(/*fin=*/true, request_headers.uncompressed_header_bytes(), request_headers); @@ -175,7 +190,8 @@ TEST_P(EnvoyQuicServerStreamTest, GetRequestAndResponse) { TEST_P(EnvoyQuicServerStreamTest, PostRequestAndResponse) { EXPECT_EQ(absl::nullopt, quic_stream_->http1StreamEncoderOptions()); sendRequest(request_body_, true, request_body_.size() * 2); - quic_stream_->encodeHeaders(response_headers_, /*end_stream=*/true); + quic_stream_->encodeHeaders(response_headers_, /*end_stream=*/false); + quic_stream_->encodeTrailers(response_trailers_); } TEST_P(EnvoyQuicServerStreamTest, DecodeHeadersBodyAndTrailers) { @@ -198,10 +214,9 @@ TEST_P(EnvoyQuicServerStreamTest, OutOfOrderTrailers) { } EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/false)) .WillOnce(Invoke([this](const Http::RequestHeaderMapPtr& headers, bool) { - EXPECT_EQ(host_, headers->Host()->value().getStringView()); - EXPECT_EQ("/", headers->Path()->value().getStringView()); - EXPECT_EQ(Http::Headers::get().MethodValues.Post, - headers->Method()->value().getStringView()); + EXPECT_EQ(host_, headers->getHostValue()); + EXPECT_EQ("/", headers->getPathValue()); + EXPECT_EQ(Http::Headers::get().MethodValues.Post, headers->getMethodValue()); })); quic_stream_->OnStreamHeaderList(/*fin=*/false, request_headers_.uncompressed_header_bytes(), request_headers_); @@ -271,10 +286,9 @@ TEST_P(EnvoyQuicServerStreamTest, ReadDisableUponLargePost) { TEST_P(EnvoyQuicServerStreamTest, ReadDisableAndReEnableImmediately) { EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/false)) .WillOnce(Invoke([this](const Http::RequestHeaderMapPtr& headers, bool) { - EXPECT_EQ(host_, headers->Host()->value().getStringView()); - EXPECT_EQ("/", headers->Path()->value().getStringView()); - EXPECT_EQ(Http::Headers::get().MethodValues.Post, - headers->Method()->value().getStringView()); + EXPECT_EQ(host_, headers->getHostValue()); + EXPECT_EQ("/", headers->getPathValue()); + EXPECT_EQ(Http::Headers::get().MethodValues.Post, headers->getMethodValue()); })); quic_stream_->OnStreamHeaderList(/*fin=*/false, request_headers_.uncompressed_header_bytes(), request_headers_); @@ -364,5 +378,80 @@ TEST_P(EnvoyQuicServerStreamTest, WatermarkSendBuffer) { EXPECT_TRUE(quic_stream_->write_side_closed()); } +TEST_P(EnvoyQuicServerStreamTest, HeadersContributeToWatermarkIquic) { + if (!quic::VersionUsesHttp3(quic_version_.transport_version)) { + EXPECT_CALL(stream_callbacks_, onResetStream(_, _)); + return; + } + + sendRequest(request_body_, true, request_body_.size() * 2); + + // Bump connection flow control window large enough not to cause connection level flow control + // blocked + quic::QuicWindowUpdateFrame window_update( + quic::kInvalidControlFrameId, + quic::QuicUtils::GetInvalidStreamId(quic_version_.transport_version), 1024 * 1024); + quic_session_.OnWindowUpdateFrame(window_update); + + // Make the stream blocked by congestion control. + EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _)) + .WillOnce(Invoke([](quic::QuicStreamId, size_t /*write_length*/, quic::QuicStreamOffset, + quic::StreamSendingState state, bool, + quiche::QuicheOptional) { + return quic::QuicConsumedData{0u, state != quic::NO_FIN}; + })); + quic_stream_->encodeHeaders(response_headers_, /*end_stream=*/false); + + // Encode 16kB -10 bytes request body. Because the high watermark is 16KB, with previously + // buffered headers, this call should make the send buffers reach their high watermark. + std::string response(16 * 1024 - 10, 'a'); + Buffer::OwnedImpl buffer(response); + EXPECT_CALL(stream_callbacks_, onAboveWriteBufferHighWatermark()); + quic_stream_->encodeData(buffer, false); + EXPECT_EQ(0u, buffer.length()); + + // Unblock writing now, and this will write out 16kB data and cause stream to + // be blocked by the flow control limit. + EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _)) + .WillOnce(Invoke([](quic::QuicStreamId, size_t write_length, quic::QuicStreamOffset, + quic::StreamSendingState state, bool, + quiche::QuicheOptional) { + return quic::QuicConsumedData{write_length, state != quic::NO_FIN}; + })); + EXPECT_CALL(stream_callbacks_, onBelowWriteBufferLowWatermark()); + quic_session_.OnCanWrite(); + EXPECT_TRUE(quic_stream_->flow_controller()->IsBlocked()); + + // Update flow control window to write all the buffered data. + quic::QuicWindowUpdateFrame window_update1(quic::kInvalidControlFrameId, quic_stream_->id(), + 32 * 1024); + quic_stream_->OnWindowUpdateFrame(window_update1); + EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _)) + .WillOnce(Invoke([](quic::QuicStreamId, size_t write_length, quic::QuicStreamOffset, + quic::StreamSendingState state, bool, + quiche::QuicheOptional) { + return quic::QuicConsumedData{write_length, state != quic::NO_FIN}; + })); + quic_session_.OnCanWrite(); + // No data should be buffered at this point. + + EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _)) + .WillRepeatedly(Invoke([](quic::QuicStreamId, size_t, quic::QuicStreamOffset, + quic::StreamSendingState state, bool, + quiche::QuicheOptional) { + return quic::QuicConsumedData{0u, state != quic::NO_FIN}; + })); + // Send more data. If watermark bytes counting were not cleared in previous + // OnCanWrite, this write would have caused the stream to exceed its high watermark. + std::string response1(16 * 1024 - 3, 'a'); + Buffer::OwnedImpl buffer1(response1); + quic_stream_->encodeData(buffer1, false); + // Buffering more trailers will cause stream to reach high watermark, but + // because trailers closes the stream, no callback should be triggered. + quic_stream_->encodeTrailers(response_trailers_); + + EXPECT_CALL(stream_callbacks_, onResetStream(_, _)); +} + } // namespace Quic } // namespace Envoy diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_utils_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_utils_test.cc index de9883880c2e4..68d606ea54b4e 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_utils_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_utils_test.cc @@ -39,22 +39,20 @@ TEST(EnvoyQuicUtilsTest, ConversionBetweenQuicAddressAndEnvoyAddress) { Network::Address::InstanceConstSharedPtr envoy_addr = quicAddressToEnvoyAddressInstance(quic_addr); EXPECT_EQ(quic_addr.ToString(), envoy_addr->asStringView()); - EXPECT_EQ(quic_addr, envoyAddressInstanceToQuicSocketAddress(envoy_addr)); + EXPECT_EQ(quic_addr, envoyIpAddressToQuicSocketAddress(envoy_addr->ip())); } } TEST(EnvoyQuicUtilsTest, HeadersConversion) { spdy::SpdyHeaderBlock headers_block; - headers_block[":host"] = "www.google.com"; + headers_block[":authority"] = "www.google.com"; headers_block[":path"] = "/index.hml"; headers_block[":scheme"] = "https"; auto envoy_headers = spdyHeaderBlockToEnvoyHeaders(headers_block); EXPECT_EQ(headers_block.size(), envoy_headers->size()); - EXPECT_EQ("www.google.com", - envoy_headers->get(Http::LowerCaseString(":host"))->value().getStringView()); - EXPECT_EQ("/index.hml", - envoy_headers->get(Http::LowerCaseString(":path"))->value().getStringView()); - EXPECT_EQ("https", envoy_headers->get(Http::LowerCaseString(":scheme"))->value().getStringView()); + EXPECT_EQ("www.google.com", envoy_headers->getHostValue()); + EXPECT_EQ("/index.hml", envoy_headers->getPathValue()); + EXPECT_EQ("https", envoy_headers->getSchemeValue()); quic::QuicHeaderList quic_headers = quic::test::AsHeaderList(headers_block); auto envoy_headers2 = quicHeadersToEnvoyHeaders(quic_headers); diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_writer_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_writer_test.cc index d418531ba3596..cb22532e69bb3 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_writer_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_writer_test.cc @@ -5,6 +5,7 @@ #include "common/network/address_impl.h" #include "common/network/io_socket_error_impl.h" +#include "common/network/udp_packet_writer_handler_impl.h" #include "extensions/quic_listeners/quiche/envoy_quic_packet_writer.h" @@ -22,7 +23,8 @@ namespace Quic { class EnvoyQuicWriterTest : public ::testing::Test { public: - EnvoyQuicWriterTest() : envoy_quic_writer_(socket_) { + EnvoyQuicWriterTest() + : envoy_quic_writer_(std::make_unique(socket_.ioHandle())) { self_address_.FromString("::"); quic::QuicIpAddress peer_ip; peer_ip.FromString("::1"); @@ -82,7 +84,7 @@ TEST_F(EnvoyQuicWriterTest, SendBlocked) { EXPECT_CALL(os_sys_calls_, sendmsg(_, _, _)) .WillOnce(testing::Invoke([this, str](int, const msghdr* message, int) { verifySendData(str, message); - return Api::SysCallSizeResult{-1, EAGAIN}; + return Api::SysCallSizeResult{-1, SOCKET_ERROR_AGAIN}; })); quic::WriteResult result = envoy_quic_writer_.WritePacket(str.data(), str.length(), self_address_, peer_address_, nullptr); @@ -94,7 +96,7 @@ TEST_F(EnvoyQuicWriterTest, SendBlocked) { EXPECT_CALL(os_sys_calls_, sendmsg(_, _, _)) .WillOnce(testing::Invoke([this, str](int, const msghdr* message, int) { verifySendData(str, message); - return Api::SysCallSizeResult{-1, EAGAIN}; + return Api::SysCallSizeResult{-1, SOCKET_ERROR_AGAIN}; })); #endif EXPECT_DEBUG_DEATH(envoy_quic_writer_.WritePacket(str.data(), str.length(), self_address_, @@ -109,7 +111,7 @@ TEST_F(EnvoyQuicWriterTest, SendFailure) { EXPECT_CALL(os_sys_calls_, sendmsg(_, _, _)) .WillOnce(testing::Invoke([this, str](int, const msghdr* message, int) { verifySendData(str, message); - return Api::SysCallSizeResult{-1, ENOTSUP}; + return Api::SysCallSizeResult{-1, SOCKET_ERROR_NOT_SUP}; })); quic::WriteResult result = envoy_quic_writer_.WritePacket(str.data(), str.length(), self_address_, peer_address_, nullptr); @@ -123,7 +125,7 @@ TEST_F(EnvoyQuicWriterTest, SendFailureMessageTooBig) { EXPECT_CALL(os_sys_calls_, sendmsg(_, _, _)) .WillOnce(testing::Invoke([this, str](int, const msghdr* message, int) { verifySendData(str, message); - return Api::SysCallSizeResult{-1, EMSGSIZE}; + return Api::SysCallSizeResult{-1, SOCKET_ERROR_MSG_SIZE}; })); quic::WriteResult result = envoy_quic_writer_.WritePacket(str.data(), str.length(), self_address_, peer_address_, nullptr); diff --git a/test/extensions/quic_listeners/quiche/integration/BUILD b/test/extensions/quic_listeners/quiche/integration/BUILD index ec7a3cd5dcfad..a36af5d08dee5 100644 --- a/test/extensions/quic_listeners/quiche/integration/BUILD +++ b/test/extensions/quic_listeners/quiche/integration/BUILD @@ -1,15 +1,16 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( name = "quic_http_integration_test", + size = "medium", srcs = ["quic_http_integration_test.cc"], data = ["//test/config/integration/certs"], # Skipping as quiche quic_stream_send_buffer.cc does not currently compile on Windows @@ -28,10 +29,11 @@ envoy_cc_test( "//source/extensions/quic_listeners/quiche:quic_transport_socket_factory_lib", "//source/extensions/resource_monitors/injected_resource:config", "//test/extensions/quic_listeners/quiche:quic_test_utils_for_envoy_lib", + "//test/extensions/quic_listeners/quiche:test_utils_lib", "//test/integration:http_integration_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/config/overload/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/transport_sockets/quic/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc b/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc index 0b754471987c4..85688dbd0835c 100644 --- a/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc +++ b/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc @@ -1,12 +1,15 @@ +#include + #include #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/overload/v3/overload.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" -#include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" +#include "envoy/extensions/transport_sockets/quic/v3/quic_transport.pb.h" #include "test/config/utility.h" #include "test/integration/http_integration.h" +#include "test/integration/ssl_utility.h" #include "test/test_common/utility.h" #pragma GCC diagnostic push @@ -23,11 +26,14 @@ #include "extensions/quic_listeners/quiche/envoy_quic_client_session.h" #include "extensions/quic_listeners/quiche/envoy_quic_client_connection.h" -#include "extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h" +#include "extensions/quic_listeners/quiche/envoy_quic_proof_verifier.h" #include "extensions/quic_listeners/quiche/envoy_quic_connection_helper.h" #include "extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h" #include "extensions/quic_listeners/quiche/envoy_quic_packet_writer.h" #include "extensions/quic_listeners/quiche/envoy_quic_utils.h" +#include "extensions/quic_listeners/quiche/quic_transport_socket_factory.h" +#include "test/extensions/quic_listeners/quiche/test_utils.h" +#include "extensions/transport_sockets/tls/context_config_impl.h" namespace Envoy { namespace Quic { @@ -43,15 +49,59 @@ class CodecClientCallbacksForTest : public Http::CodecClientCallbacks { Http::StreamResetReason last_stream_reset_reason_{Http::StreamResetReason::LocalReset}; }; -class QuicHttpIntegrationTest : public HttpIntegrationTest, - public testing::TestWithParam { +std::unique_ptr +createQuicClientTransportSocketFactory(const Ssl::ClientSslTransportOptions& options, Api::Api& api, + const std::string& san_to_match) { + std::string yaml_plain = R"EOF( + common_tls_context: + validation_context: + trusted_ca: + filename: "{{ test_rundir }}/test/config/integration/certs/cacert.pem" +)EOF"; + envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context; + TestUtility::loadFromYaml(TestEnvironment::substitute(yaml_plain), tls_context); + auto* common_context = tls_context.mutable_common_tls_context(); + + if (options.alpn_) { + common_context->add_alpn_protocols("h3"); + } + if (options.san_) { + common_context->mutable_validation_context()->add_match_subject_alt_names()->set_exact( + san_to_match); + } + for (const std::string& cipher_suite : options.cipher_suites_) { + common_context->mutable_tls_params()->add_cipher_suites(cipher_suite); + } + if (!options.sni_.empty()) { + tls_context.set_sni(options.sni_); + } + + common_context->mutable_tls_params()->set_tls_minimum_protocol_version(options.tls_version_); + common_context->mutable_tls_params()->set_tls_maximum_protocol_version(options.tls_version_); + + NiceMock mock_factory_ctx; + ON_CALL(mock_factory_ctx, api()).WillByDefault(testing::ReturnRef(api)); + auto cfg = std::make_unique( + tls_context, options.sigalgs_, mock_factory_ctx); + return std::make_unique(std::move(cfg)); +} + +class QuicHttpIntegrationTest : public HttpIntegrationTest, public QuicMultiVersionTest { public: QuicHttpIntegrationTest() - : HttpIntegrationTest(Http::CodecClient::Type::HTTP3, GetParam(), + : HttpIntegrationTest(Http::CodecClient::Type::HTTP3, GetParam().first, ConfigHelper::quicHttpProxyConfig()), - supported_versions_(quic::CurrentSupportedVersions()), - crypto_config_(std::make_unique()), conn_helper_(*dispatcher_), - alarm_factory_(*dispatcher_, *conn_helper_.GetClock()), + supported_versions_([]() { + if (GetParam().second == QuicVersionType::GquicQuicCrypto) { + return quic::CurrentSupportedVersionsWithQuicCrypto(); + } + bool use_http3 = GetParam().second == QuicVersionType::Iquic; + SetQuicReloadableFlag(quic_disable_version_draft_29, !use_http3); + SetQuicReloadableFlag(quic_disable_version_draft_27, !use_http3); + SetQuicReloadableFlag(quic_disable_version_draft_25, !use_http3); + return quic::CurrentSupportedVersions(); + }()), + conn_helper_(*dispatcher_), alarm_factory_(*dispatcher_, *conn_helper_.GetClock()), injected_resource_filename_(TestEnvironment::temporaryPath("injected_resource")), file_updater_(injected_resource_filename_) {} @@ -68,11 +118,11 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, // TODO(danzh) Implement retry upon version mismatch and modify test frame work to specify a // different version set on server side to test that. auto connection = std::make_unique( - getNextServerDesignatedConnectionId(), server_addr_, conn_helper_, alarm_factory_, + getNextConnectionId(), server_addr_, conn_helper_, alarm_factory_, quic::ParsedQuicVersionVector{supported_versions_[0]}, local_addr, *dispatcher_, nullptr); quic_connection_ = connection.get(); auto session = std::make_unique( - quic_config_, supported_versions_, std::move(connection), server_id_, &crypto_config_, + quic_config_, supported_versions_, std::move(connection), server_id_, crypto_config_.get(), &push_promise_index_, *dispatcher_, 0); session->Initialize(); return session; @@ -83,8 +133,11 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, // TODO(#8479) Propagate INVALID_VERSION error to caller and let caller to use server advertised // version list to create a new connection with mutually supported version and make client codec // again. - IntegrationCodecClientPtr makeRawHttpConnection(Network::ClientConnectionPtr&& conn) override { - IntegrationCodecClientPtr codec = HttpIntegrationTest::makeRawHttpConnection(std::move(conn)); + IntegrationCodecClientPtr makeRawHttpConnection( + Network::ClientConnectionPtr&& conn, + absl::optional http2_options) override { + IntegrationCodecClientPtr codec = + HttpIntegrationTest::makeRawHttpConnection(std::move(conn), http2_options); if (codec->disconnected()) { // Connection may get closed during version negotiation or handshake. ENVOY_LOG(error, "Fail to connect to server with error: {}", @@ -95,24 +148,26 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, return codec; } - quic::QuicConnectionId getNextServerDesignatedConnectionId() { - quic::QuicCryptoClientConfig::CachedState* cached = crypto_config_.LookupOrCreate(server_id_); - // If the cached state indicates that we should use a server-designated - // connection ID, then return that connection ID. - quic::QuicConnectionId conn_id = cached->has_server_designated_connection_id() - ? cached->GetNextServerDesignatedConnectionId() - : quic::EmptyQuicConnectionId(); - return conn_id.IsEmpty() ? quic::QuicUtils::CreateRandomConnectionId() : conn_id; + quic::QuicConnectionId getNextConnectionId() { + if (designated_connection_ids_.empty()) { + return quic::QuicUtils::CreateRandomConnectionId(); + } + quic::QuicConnectionId cid = designated_connection_ids_.front(); + designated_connection_ids_.pop_front(); + return cid; } void initialize() override { config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { - envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; - ConfigHelper::initializeTls({}, *tls_context.mutable_common_tls_context()); + envoy::extensions::transport_sockets::quic::v3::QuicDownstreamTransport + quic_transport_socket_config; + auto tls_context = quic_transport_socket_config.mutable_downstream_tls_context(); + ConfigHelper::initializeTls(ConfigHelper::ServerSslOptions().setRsaCert(true).setTlsV13(true), + *tls_context->mutable_common_tls_context()); auto* filter_chain = bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_filter_chains(0); auto* transport_socket = filter_chain->mutable_transport_socket(); - transport_socket->mutable_typed_config()->PackFrom(tls_context); + transport_socket->mutable_typed_config()->PackFrom(quic_transport_socket_config); bootstrap.mutable_static_resources()->mutable_listeners(0)->set_reuse_port(set_reuse_port_); @@ -156,16 +211,24 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, updateResource(0); HttpIntegrationTest::initialize(); registerTestServerPorts({"http"}); + crypto_config_ = + std::make_unique(std::make_unique( + stats_store_, + createQuicClientTransportSocketFactory( + Ssl::ClientSslTransportOptions().setAlpn(true).setSan(true), *api_, san_to_match_) + ->clientContextConfig(), + timeSystem())); } void updateResource(double pressure) { file_updater_.update(absl::StrCat(pressure)); } protected: quic::QuicConfig quic_config_; - quic::QuicServerId server_id_{"example.com", 443, false}; + quic::QuicServerId server_id_{"lyft.com", 443, false}; + std::string san_to_match_{"spiffe://lyft.com/backend-team"}; quic::QuicClientPushPromiseIndex push_promise_index_; quic::ParsedQuicVersionVector supported_versions_; - quic::QuicCryptoClientConfig crypto_config_; + std::unique_ptr crypto_config_; EnvoyQuicConnectionHelper conn_helper_; EnvoyQuicAlarmFactory alarm_factory_; CodecClientCallbacksForTest client_codec_callback_; @@ -174,11 +237,11 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, bool set_reuse_port_{false}; const std::string injected_resource_filename_; AtomicFileUpdater file_updater_; + std::list designated_connection_ids_; }; -INSTANTIATE_TEST_SUITE_P(IpVersions, QuicHttpIntegrationTest, - testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), - TestUtility::ipTestParamsToString); +INSTANTIATE_TEST_SUITE_P(QuicHttpIntegrationTests, QuicHttpIntegrationTest, + testing::ValuesIn(generateTestParam()), testParamsToString); TEST_P(QuicHttpIntegrationTest, GetRequestAndEmptyResponse) { testRouterHeaderOnlyRequestAndResponse(); @@ -265,7 +328,7 @@ TEST_P(QuicHttpIntegrationTest, TestDelayedConnectionTeardownTimeoutTrigger) { response->waitForEndStream(); // The delayed close timeout should trigger since client is not closing the connection. EXPECT_TRUE(codec_client_->waitForDisconnect(std::chrono::milliseconds(5000))); - EXPECT_EQ(codec_client_->last_connection_event(), Network::ConnectionEvent::RemoteClose); + EXPECT_EQ(codec_client_->lastConnectionEvent(), Network::ConnectionEvent::RemoteClose); EXPECT_EQ(test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value(), 1); } @@ -276,22 +339,21 @@ TEST_P(QuicHttpIntegrationTest, MultipleQuicListenersWithBPF) { set_reuse_port_ = true; initialize(); std::vector codec_clients; - quic::QuicCryptoClientConfig::CachedState* cached = crypto_config_.LookupOrCreate(server_id_); for (size_t i = 1; i <= concurrency_; ++i) { // The BPF filter looks at the 1st word of connection id in the packet // header. And currently all QUIC versions support 8 bytes connection id. So // create connections with the first 4 bytes of connection id different from each // other so they should be evenly distributed. - cached->add_server_designated_connection_id(quic::test::TestConnectionId(i << 32)); + designated_connection_ids_.push_back(quic::test::TestConnectionId(i << 32)); codec_clients.push_back(makeHttpConnection(lookupPort("http"))); } - if (GetParam() == Network::Address::IpVersion::v4) { + if (GetParam().first == Network::Address::IpVersion::v4) { test_server_->waitForCounterEq("listener.0.0.0.0_0.downstream_cx_total", 8u); } else { test_server_->waitForCounterEq("listener.[__]_0.downstream_cx_total", 8u); } for (size_t i = 0; i < concurrency_; ++i) { - if (GetParam() == Network::Address::IpVersion::v4) { + if (GetParam().first == Network::Address::IpVersion::v4) { test_server_->waitForGaugeEq( fmt::format("listener.0.0.0.0_0.worker_{}.downstream_cx_active", i), 1u); test_server_->waitForCounterEq( @@ -319,16 +381,15 @@ TEST_P(QuicHttpIntegrationTest, MultipleQuicListenersNoBPF) { #undef SO_ATTACH_REUSEPORT_CBPF #endif std::vector codec_clients; - quic::QuicCryptoClientConfig::CachedState* cached = crypto_config_.LookupOrCreate(server_id_); for (size_t i = 1; i <= concurrency_; ++i) { // The BPF filter looks at the 1st byte of connection id in the packet // header. And currently all QUIC versions support 8 bytes connection id. So // create connections with the first 4 bytes of connection id different from each // other so they should be evenly distributed. - cached->add_server_designated_connection_id(quic::test::TestConnectionId(i << 32)); + designated_connection_ids_.push_back(quic::test::TestConnectionId(i << 32)); codec_clients.push_back(makeHttpConnection(lookupPort("http"))); } - if (GetParam() == Network::Address::IpVersion::v4) { + if (GetParam().first == Network::Address::IpVersion::v4) { test_server_->waitForCounterEq("listener.0.0.0.0_0.downstream_cx_total", 8u); } else { test_server_->waitForCounterEq("listener.[__]_0.downstream_cx_total", 8u); @@ -336,7 +397,7 @@ TEST_P(QuicHttpIntegrationTest, MultipleQuicListenersNoBPF) { // Even without BPF support, these connections should more or less distributed // across different workers. for (size_t i = 0; i < concurrency_; ++i) { - if (GetParam() == Network::Address::IpVersion::v4) { + if (GetParam().first == Network::Address::IpVersion::v4) { EXPECT_LT( test_server_->gauge(fmt::format("listener.0.0.0.0_0.worker_{}.downstream_cx_active", i)) ->value(), @@ -413,7 +474,7 @@ TEST_P(QuicHttpIntegrationTest, StopAcceptingConnectionsWhenOverloaded) { updateResource(0.9); test_server_->waitForGaugeEq("overload.envoy.overload_actions.stop_accepting_connections.active", 1); - codec_client_ = makeRawHttpConnection(makeClientConnection((lookupPort("http")))); + codec_client_ = makeRawHttpConnection(makeClientConnection((lookupPort("http"))), absl::nullopt); EXPECT_TRUE(codec_client_->disconnected()); // Reduce load a little to allow the connection to be accepted connection. @@ -432,21 +493,36 @@ TEST_P(QuicHttpIntegrationTest, StopAcceptingConnectionsWhenOverloaded) { upstream_request_->encodeData(10, true); response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); // New request should be rejected. auto response2 = codec_client_->makeHeaderOnlyRequest(default_request_headers_); response2->waitForEndStream(); - EXPECT_EQ("503", response2->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response2->headers().getStatusValue()); EXPECT_EQ("envoy overloaded", response2->body()); codec_client_->close(); - EXPECT_TRUE(makeRawHttpConnection(makeClientConnection((lookupPort("http"))))->disconnected()); + EXPECT_TRUE(makeRawHttpConnection(makeClientConnection((lookupPort("http"))), absl::nullopt) + ->disconnected()); } TEST_P(QuicHttpIntegrationTest, AdminDrainDrainsListeners) { testAdminDrain(Http::CodecClient::Type::HTTP1); } +TEST_P(QuicHttpIntegrationTest, CertVerificationFailure) { + san_to_match_ = "www.random_domain.com"; + initialize(); + codec_client_ = makeRawHttpConnection(makeClientConnection((lookupPort("http"))), absl::nullopt); + EXPECT_FALSE(codec_client_->connected()); + std::string failure_reason = + GetParam().second == QuicVersionType::GquicQuicCrypto + ? "QUIC_PROOF_INVALID with details: Proof invalid: X509_verify_cert: certificate " + "verification error at depth 0: ok" + : "QUIC_HANDSHAKE_FAILED with details: TLS handshake failure (ENCRYPTION_HANDSHAKE) 46: " + "certificate unknown"; + EXPECT_EQ(failure_reason, codec_client_->connection()->transportFailureReason()); +} + } // namespace Quic } // namespace Envoy diff --git a/test/extensions/quic_listeners/quiche/platform/BUILD b/test/extensions/quic_listeners/quiche/platform/BUILD index f48ced9263f2a..9ccaf9cdedc21 100644 --- a/test/extensions/quic_listeners/quiche/platform/BUILD +++ b/test/extensions/quic_listeners/quiche/platform/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( @@ -224,11 +224,6 @@ envoy_cc_test_library( ], ) -envoy_cc_test_library( - name = "spdy_platform_test_impl_lib", - hdrs = ["spdy_test_impl.h"], -) - envoy_cc_test( name = "envoy_quic_clock_test", srcs = ["envoy_quic_clock_test.cc"], diff --git a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc index d15743831a8aa..68141aa940394 100644 --- a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc +++ b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc @@ -7,9 +7,9 @@ #include #include -#include #include "common/memory/stats.h" +#include "common/network/socket_impl.h" #include "common/network/utility.h" #include "extensions/quic_listeners/quiche/platform/flags_impl.h" @@ -239,7 +239,12 @@ TEST_F(QuicPlatformTest, QuicServerStats) { } TEST_F(QuicPlatformTest, QuicStackTraceTest) { +#if !defined(ENVOY_CONFIG_COVERAGE) && !defined(GCC_COMPILER) + // This doesn't work in coverage build because part of the stacktrace will be overwritten by + // __llvm_coverage_mapping + // Stack trace under gcc with optimizations on (-c opt) doesn't include the test name EXPECT_THAT(QuicStackTrace(), HasSubstr("QuicStackTraceTest")); +#endif } TEST_F(QuicPlatformTest, QuicSleep) { QuicSleep(QuicTime::Delta::FromMilliseconds(20)); } @@ -275,8 +280,8 @@ TEST_F(QuicPlatformTest, QuicThread) { EXPECT_EQ(1, value); // QuicThread will panic if it's started but not joined. - EXPECT_DEATH_LOG_TO_STDERR({ AdderThread(&value, 2).Start(); }, - "QuicThread should be joined before destruction"); + EXPECT_DEATH({ AdderThread(&value, 2).Start(); }, + "QuicThread should be joined before destruction"); } TEST_F(QuicPlatformTest, QuicUint128) { @@ -323,7 +328,7 @@ TEST_F(QuicPlatformTest, QuicLog) { EXPECT_LOG_CONTAINS("info", "i=1", QUIC_VLOG(1) << "i=" << (i = 1)); EXPECT_EQ(1, i); - errno = EINVAL; + errno = SOCKET_ERROR_INVAL; EXPECT_LOG_CONTAINS("info", "i=3:", QUIC_PLOG(INFO) << "i=" << (i = 3)); EXPECT_EQ(3, i); } @@ -392,9 +397,9 @@ TEST_F(QuicPlatformTest, QuicCHECK) { "CHECK failed:.* Supposed to fail in debug mode."); EXPECT_DEBUG_DEATH({ DCHECK(false); }, "CHECK failed"); - EXPECT_DEATH_LOG_TO_STDERR({ CHECK(false) << " Supposed to fail in all modes."; }, - "CHECK failed:.* Supposed to fail in all modes."); - EXPECT_DEATH_LOG_TO_STDERR({ CHECK(false); }, "CHECK failed"); + EXPECT_DEATH({ CHECK(false) << " Supposed to fail in all modes."; }, + "CHECK failed:.* Supposed to fail in all modes."); + EXPECT_DEATH({ CHECK(false); }, "CHECK failed"); } // Test the behaviors of the cross products of @@ -403,16 +408,16 @@ TEST_F(QuicPlatformTest, QuicCHECK) { TEST_F(QuicPlatformTest, QuicFatalLog) { #ifdef NDEBUG // Release build - EXPECT_DEATH_LOG_TO_STDERR(QUIC_LOG(FATAL) << "Should abort 0", "Should abort 0"); + EXPECT_DEATH(QUIC_LOG(FATAL) << "Should abort 0", "Should abort 0"); QUIC_LOG(DFATAL) << "Should not abort"; QUIC_DLOG(FATAL) << "Should compile out"; QUIC_DLOG(DFATAL) << "Should compile out"; #else // Debug build - EXPECT_DEATH_LOG_TO_STDERR(QUIC_LOG(FATAL) << "Should abort 1", "Should abort 1"); - EXPECT_DEATH_LOG_TO_STDERR(QUIC_LOG(DFATAL) << "Should abort 2", "Should abort 2"); - EXPECT_DEATH_LOG_TO_STDERR(QUIC_DLOG(FATAL) << "Should abort 3", "Should abort 3"); - EXPECT_DEATH_LOG_TO_STDERR(QUIC_DLOG(DFATAL) << "Should abort 4", "Should abort 4"); + EXPECT_DEATH(QUIC_LOG(FATAL) << "Should abort 1", "Should abort 1"); + EXPECT_DEATH(QUIC_LOG(DFATAL) << "Should abort 2", "Should abort 2"); + EXPECT_DEATH(QUIC_DLOG(FATAL) << "Should abort 3", "Should abort 3"); + EXPECT_DEATH(QUIC_DLOG(DFATAL) << "Should abort 4", "Should abort 4"); #endif } @@ -430,7 +435,7 @@ TEST_F(QuicPlatformTest, QuicNotReached) { #ifdef NDEBUG QUIC_NOTREACHED(); // Expect no-op. #else - EXPECT_DEATH_LOG_TO_STDERR(QUIC_NOTREACHED(), "not reached"); + EXPECT_DEATH(QUIC_NOTREACHED(), "not reached"); #endif } @@ -592,12 +597,12 @@ TEST_F(QuicPlatformTest, QuicFlags) { } TEST_F(QuicPlatformTest, QuicPccSender) { - EXPECT_DEATH_LOG_TO_STDERR(quic::CreatePccSender(/*clock=*/nullptr, /*rtt_stats=*/nullptr, - /*unacked_packets=*/nullptr, /*random=*/nullptr, - /*stats=*/nullptr, - /*initial_congestion_window=*/0, - /*max_congestion_window=*/0), - "PccSender is not supported."); + EXPECT_DEATH(quic::CreatePccSender(/*clock=*/nullptr, /*rtt_stats=*/nullptr, + /*unacked_packets=*/nullptr, /*random=*/nullptr, + /*stats=*/nullptr, + /*initial_congestion_window=*/0, + /*max_congestion_window=*/0), + "PccSender is not supported."); } class FileUtilsTest : public testing::Test { @@ -665,10 +670,9 @@ TEST_F(QuicPlatformTest, PickUnsedPort) { Envoy::Network::Test::getCanonicalLoopbackAddress(ip_version); Envoy::Network::Address::InstanceConstSharedPtr addr_with_port = Envoy::Network::Utility::getAddressWithPort(*addr, port); - Envoy::Network::IoHandlePtr io_handle = - addr_with_port->socket(Envoy::Network::Address::SocketType::Datagram); + Envoy::Network::SocketImpl sock(Envoy::Network::Socket::Type::Datagram, addr_with_port); // binding of given port should success. - EXPECT_EQ(0, addr_with_port->bind(io_handle->fd()).rc_); + EXPECT_EQ(0, sock.bind(addr_with_port).rc_); } } @@ -682,8 +686,8 @@ TEST_F(QuicPlatformTest, FailToPickUnsedPort) { }); // Fail bind call's to mimic port exhaustion. EXPECT_CALL(os_sys_calls, bind(_, _, _)) - .WillRepeatedly(Return(Envoy::Api::SysCallIntResult{-1, EADDRINUSE})); - EXPECT_DEATH_LOG_TO_STDERR(QuicPickServerPortForTestsOrDie(), "Failed to pick a port for test."); + .WillRepeatedly(Return(Envoy::Api::SysCallIntResult{-1, SOCKET_ERROR_ADDR_IN_USE})); + EXPECT_DEATH(QuicPickServerPortForTestsOrDie(), "Failed to pick a port for test."); } TEST_F(QuicPlatformTest, TestEnvoyQuicBufferAllocator) { diff --git a/test/extensions/quic_listeners/quiche/platform/quic_port_utils_impl.cc b/test/extensions/quic_listeners/quiche/platform/quic_port_utils_impl.cc index de3f39a001a7d..ab902f5460737 100644 --- a/test/extensions/quic_listeners/quiche/platform/quic_port_utils_impl.cc +++ b/test/extensions/quic_listeners/quiche/platform/quic_port_utils_impl.cc @@ -32,8 +32,8 @@ int QuicPickServerPortForTestsOrDieImpl() { fmt::format("{}:{}", Envoy::Network::Test::getAnyAddressUrlString(ip_version), /*port*/ 0), /*v6only*/ false); ASSERT(addr_port != nullptr); - addr_port = Envoy::Network::Test::findOrCheckFreePort( - addr_port, Envoy::Network::Address::SocketType::Datagram); + addr_port = + Envoy::Network::Test::findOrCheckFreePort(addr_port, Envoy::Network::Socket::Type::Datagram); if (addr_port != nullptr && addr_port->ip() != nullptr) { // Find a port. return addr_port->ip()->port(); diff --git a/test/extensions/quic_listeners/quiche/quic_io_handle_wrapper_test.cc b/test/extensions/quic_listeners/quiche/quic_io_handle_wrapper_test.cc index 63d2aef7b3924..2361a0399b0dd 100644 --- a/test/extensions/quic_listeners/quiche/quic_io_handle_wrapper_test.cc +++ b/test/extensions/quic_listeners/quiche/quic_io_handle_wrapper_test.cc @@ -57,6 +57,25 @@ TEST_F(QuicIoHandleWrapperTest, DelegateIoHandleCalls) { EXPECT_CALL(os_sys_calls_, sendmsg(fd, _, 0)).WillOnce(Return(Api::SysCallSizeResult{5u, 0})); wrapper_->sendmsg(&slice, 1, 0, /*self_ip=*/nullptr, *addr); + EXPECT_CALL(os_sys_calls_, getsockname(_, _, _)).WillOnce(Return(Api::SysCallIntResult{0, 0})); + wrapper_->domain(); + + EXPECT_CALL(os_sys_calls_, getsockname(_, _, _)) + .WillOnce(Invoke([](os_fd_t, sockaddr* addr, socklen_t* addrlen) -> Api::SysCallIntResult { + addr->sa_family = AF_INET6; + *addrlen = sizeof(sockaddr_in6); + return Api::SysCallIntResult{0, 0}; + })); + addr = wrapper_->localAddress(); + + EXPECT_CALL(os_sys_calls_, getpeername(_, _, _)) + .WillOnce(Invoke([](os_fd_t, sockaddr* addr, socklen_t* addrlen) -> Api::SysCallIntResult { + addr->sa_family = AF_INET6; + *addrlen = sizeof(sockaddr_in6); + return Api::SysCallIntResult{0, 0}; + })); + addr = wrapper_->peerAddress(); + Network::IoHandle::RecvMsgOutput output(1, nullptr); EXPECT_CALL(os_sys_calls_, recvmsg(fd, _, 0)).WillOnce(Invoke([](os_fd_t, msghdr* msg, int) { sockaddr_storage ss; @@ -93,6 +112,9 @@ TEST_F(QuicIoHandleWrapperTest, DelegateIoHandleCalls) { EXPECT_DEBUG_DEATH(wrapper_->recvmmsg(slices, /*self_port=*/12345, output2), "recvmmsg is called after close"); + EXPECT_CALL(os_sys_calls_, supportsUdpGro()); + wrapper_->supportsUdpGro(); + EXPECT_CALL(os_sys_calls_, supportsMmsg()); wrapper_->supportsMmsg(); } diff --git a/test/extensions/quic_listeners/quiche/test_proof_source.h b/test/extensions/quic_listeners/quiche/test_proof_source.h new file mode 100644 index 0000000000000..8b1baf920d69b --- /dev/null +++ b/test/extensions/quic_listeners/quiche/test_proof_source.h @@ -0,0 +1,55 @@ +#ifdef __GNUC__ +#pragma GCC diagnostic push +// QUICHE allows unused parameters. +#pragma GCC diagnostic ignored "-Wunused-parameter" +// QUICHE uses offsetof(). +#pragma GCC diagnostic ignored "-Winvalid-offsetof" +#pragma GCC diagnostic ignored "-Wtype-limits" +#include "quiche/quic/test_tools/test_certificates.h" + +#pragma GCC diagnostic pop +#else +#include "quiche/quic/test_tools/test_certificates.h" +#endif + +#include + +#include "test/mocks/network/mocks.h" +#include "extensions/quic_listeners/quiche/envoy_quic_proof_source_base.h" + +namespace Envoy { +namespace Quic { + +// A test ProofSource which always provide a hard-coded test certificate in +// QUICHE and a fake signature. +class TestProofSource : public EnvoyQuicProofSourceBase { +public: + quic::QuicReferenceCountedPointer + GetCertChain(const quic::QuicSocketAddress& /*server_address*/, + const quic::QuicSocketAddress& /*client_address*/, + const std::string& /*hostname*/) override { + return cert_chain_; + } + + const Network::MockFilterChain& filterChain() const { return filter_chain_; } + +protected: + void signPayload(const quic::QuicSocketAddress& /*server_address*/, + const quic::QuicSocketAddress& /*client_address*/, + const std::string& /*hostname*/, uint16_t /*signature_algorithm*/, + quiche::QuicheStringPiece in, + std::unique_ptr callback) override { + callback->Run(true, absl::StrCat("Fake signature for { ", in, " }"), + std::make_unique(filter_chain_)); + } + +private: + quic::QuicReferenceCountedPointer cert_chain_{ + new quic::ProofSource::Chain( + std::vector{std::string(quic::test::kTestCertificate)})}; + + Network::MockFilterChain filter_chain_; +}; + +} // namespace Quic +} // namespace Envoy diff --git a/test/extensions/quic_listeners/quiche/test_proof_verifier.h b/test/extensions/quic_listeners/quiche/test_proof_verifier.h new file mode 100644 index 0000000000000..77dada22d1cdc --- /dev/null +++ b/test/extensions/quic_listeners/quiche/test_proof_verifier.h @@ -0,0 +1,30 @@ +#include "extensions/quic_listeners/quiche/envoy_quic_proof_verifier_base.h" + +namespace Envoy { +namespace Quic { + +// A test quic::ProofVerifier which always approves the certs and signature. +class TestProofVerifier : public EnvoyQuicProofVerifierBase { +public: + // quic::ProofVerifier + quic::QuicAsyncStatus + VerifyCertChain(const std::string& /*hostname*/, const uint16_t /*port*/, + const std::vector& /*certs*/, const std::string& /*ocsp_response*/, + const std::string& /*cert_sct*/, const quic::ProofVerifyContext* /*context*/, + std::string* /*error_details*/, + std::unique_ptr* /*details*/, + std::unique_ptr /*callback*/) override { + return quic::QUIC_SUCCESS; + } + +protected: + // EnvoyQuicProofVerifierBase + bool verifySignature(const std::string& /*server_config*/, absl::string_view /*chlo_hash*/, + const std::string& /*cert*/, const std::string& /*signature*/, + std::string* /*error_details*/) override { + return true; + } +}; + +} // namespace Quic +} // namespace Envoy diff --git a/test/extensions/quic_listeners/quiche/test_utils.h b/test/extensions/quic_listeners/quiche/test_utils.h index 3e07ec1a5c1a5..b9cc942af8401 100644 --- a/test/extensions/quic_listeners/quiche/test_utils.h +++ b/test/extensions/quic_listeners/quiche/test_utils.h @@ -9,11 +9,15 @@ #include "quiche/quic/core/http/quic_spdy_session.h" #include "quiche/quic/core/http/quic_spdy_client_session.h" #include "quiche/quic/test_tools/quic_test_utils.h" +#include "quiche/quic/test_tools/first_flight.h" #include "quiche/quic/core/quic_utils.h" #include "quiche/quic/test_tools/crypto_test_utils.h" - +#include "quiche/quic/test_tools/quic_config_peer.h" #pragma GCC diagnostic pop +#include "extensions/quic_listeners/quiche/envoy_quic_utils.h" +#include "test/test_common/environment.h" + namespace Envoy { namespace Quic { @@ -40,6 +44,7 @@ class MockEnvoyQuicSession : public quic::QuicSpdySession, public QuicFilterMana (quic::QuicStreamId id, size_t write_length, quic::QuicStreamOffset offset, quic::StreamSendingState state, quic::TransmissionType type, quiche::QuicheOptional level)); + MOCK_METHOD(bool, ShouldYield, (quic::QuicStreamId id)); absl::string_view requestedServerName() const override { return {GetCryptoStream()->crypto_negotiated_params().sni}; @@ -83,6 +88,7 @@ class MockEnvoyQuicClientSession : public quic::QuicSpdyClientSession, (quic::QuicStreamId id, size_t write_length, quic::QuicStreamOffset offset, quic::StreamSendingState state, quic::TransmissionType type, quiche::QuicheOptional level)); + MOCK_METHOD(bool, ShouldYield, (quic::QuicStreamId id)); absl::string_view requestedServerName() const override { return {GetCryptoStream()->crypto_negotiated_params().sni}; @@ -97,5 +103,96 @@ class MockEnvoyQuicClientSession : public quic::QuicSpdyClientSession, quic::QuicCryptoClientConfig crypto_config_; }; +Buffer::OwnedImpl +generateChloPacketToSend(quic::ParsedQuicVersion quic_version, quic::QuicConfig& quic_config, + quic::QuicCryptoServerConfig& crypto_config, + quic::QuicConnectionId connection_id, quic::QuicClock& clock, + const quic::QuicSocketAddress& server_address, + const quic::QuicSocketAddress& client_address, std::string sni) { + if (quic_version.UsesTls()) { + std::unique_ptr packet = + std::move(quic::test::GetFirstFlightOfPackets(quic_version, quic_config, connection_id)[0]); + return Buffer::OwnedImpl(packet->data(), packet->length()); + } + quic::CryptoHandshakeMessage chlo = quic::test::crypto_test_utils::GenerateDefaultInchoateCHLO( + &clock, quic_version.transport_version, &crypto_config); + chlo.SetVector(quic::kCOPT, quic::QuicTagVector{quic::kREJ}); + chlo.SetStringPiece(quic::kSNI, sni); + quic::CryptoHandshakeMessage full_chlo; + quic::QuicReferenceCountedPointer signed_config( + new quic::QuicSignedServerConfig); + quic::QuicCompressedCertsCache cache( + quic::QuicCompressedCertsCache::kQuicCompressedCertsCacheSize); + quic::test::crypto_test_utils::GenerateFullCHLO(chlo, &crypto_config, server_address, + client_address, quic_version.transport_version, + &clock, signed_config, &cache, &full_chlo); + // Overwrite version label to the version passed in. + full_chlo.SetVersion(quic::kVER, quic_version); + quic::QuicConfig quic_config_tmp; + quic_config_tmp.ToHandshakeMessage(&full_chlo, quic_version.transport_version); + + std::string packet_content(full_chlo.GetSerialized().AsStringPiece()); + quic::ParsedQuicVersionVector supported_versions{quic_version}; + auto encrypted_packet = + std::unique_ptr(quic::test::ConstructEncryptedPacket( + connection_id, quic::EmptyQuicConnectionId(), + /*version_flag=*/true, /*reset_flag*/ false, + /*packet_number=*/1, packet_content, quic::CONNECTION_ID_PRESENT, + quic::CONNECTION_ID_ABSENT, quic::PACKET_4BYTE_PACKET_NUMBER, &supported_versions)); + + return Buffer::OwnedImpl(encrypted_packet->data(), encrypted_packet->length()); +} + +void setQuicConfigWithDefaultValues(quic::QuicConfig* config) { + quic::test::QuicConfigPeer::SetReceivedMaxBidirectionalStreams( + config, quic::kDefaultMaxStreamsPerConnection); + quic::test::QuicConfigPeer::SetReceivedMaxUnidirectionalStreams( + config, quic::kDefaultMaxStreamsPerConnection); + quic::test::QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesUnidirectional( + config, quic::kMinimumFlowControlSendWindow); + quic::test::QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesIncomingBidirectional( + config, quic::kMinimumFlowControlSendWindow); + quic::test::QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesOutgoingBidirectional( + config, quic::kMinimumFlowControlSendWindow); + quic::test::QuicConfigPeer::SetReceivedInitialSessionFlowControlWindow( + config, quic::kMinimumFlowControlSendWindow); +} + +enum class QuicVersionType { + GquicQuicCrypto, + GquicTls, + Iquic, +}; + +// A test suite with variation of ip version and a knob to turn on/off IETF QUIC implementation. +class QuicMultiVersionTest + : public testing::TestWithParam> {}; + +std::vector> generateTestParam() { + std::vector> param; + for (auto ip_version : TestEnvironment::getIpVersionsForTest()) { + param.emplace_back(ip_version, QuicVersionType::GquicQuicCrypto); + param.emplace_back(ip_version, QuicVersionType::GquicTls); + param.emplace_back(ip_version, QuicVersionType::Iquic); + } + + return param; +} + +std::string testParamsToString( + const ::testing::TestParamInfo>& + params) { + std::string ip_version = params.param.first == Network::Address::IpVersion::v4 ? "IPv4" : "IPv6"; + switch (params.param.second) { + case QuicVersionType::GquicQuicCrypto: + return absl::StrCat(ip_version, "_UseGQuicWithQuicCrypto"); + case QuicVersionType::GquicTls: + return absl::StrCat(ip_version, "_UseGQuicWithTLS"); + case QuicVersionType::Iquic: + return absl::StrCat(ip_version, "_UseHttp3"); + } + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; +} + } // namespace Quic } // namespace Envoy diff --git a/test/extensions/resource_monitors/fixed_heap/BUILD b/test/extensions/resource_monitors/fixed_heap/BUILD index 4f8594bbe4ea6..2d28542abde3b 100644 --- a/test/extensions/resource_monitors/fixed_heap/BUILD +++ b/test/extensions/resource_monitors/fixed_heap/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/resource_monitors/fixed_heap/fixed_heap_monitor_test.cc b/test/extensions/resource_monitors/fixed_heap/fixed_heap_monitor_test.cc index 6b71e7bb5cede..905f083e15aaa 100644 --- a/test/extensions/resource_monitors/fixed_heap/fixed_heap_monitor_test.cc +++ b/test/extensions/resource_monitors/fixed_heap/fixed_heap_monitor_test.cc @@ -53,6 +53,21 @@ TEST(FixedHeapMonitorTest, ComputesCorrectUsage) { EXPECT_EQ(resource.pressure(), 0.7); } +TEST(FixedHeapMonitorTest, ComputeUsageWithRealMemoryStats) { + envoy::config::resource_monitor::fixed_heap::v2alpha::FixedHeapConfig config; + uint64_t max_heap = 1024 * 1024 * 1024; + config.set_max_heap_size_bytes(max_heap); + auto stats_reader = std::make_unique(); + const double expected_usage = + (stats_reader->reservedHeapBytes() - stats_reader->unmappedHeapBytes()) / + static_cast(max_heap); + std::unique_ptr monitor(new FixedHeapMonitor(config, std::move(stats_reader))); + + ResourcePressure resource; + monitor->updateResourceUsage(resource); + EXPECT_NEAR(resource.pressure(), expected_usage, 0.0005); +} + } // namespace } // namespace FixedHeapMonitor } // namespace ResourceMonitors diff --git a/test/extensions/resource_monitors/injected_resource/BUILD b/test/extensions/resource_monitors/injected_resource/BUILD index 034d9b1365f07..e8f32dc41060d 100644 --- a/test/extensions/resource_monitors/injected_resource/BUILD +++ b/test/extensions/resource_monitors/injected_resource/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -10,11 +8,14 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( name = "injected_resource_monitor_test", srcs = ["injected_resource_monitor_test.cc"], + tags = ["fails_on_windows"], deps = [ "//source/common/event:dispatcher_lib", "//source/common/stats:isolated_store_lib", diff --git a/test/extensions/retry/host/omit_canary_hosts/BUILD b/test/extensions/retry/host/omit_canary_hosts/BUILD index 605fc111a64c1..98d0b8e0d8230 100644 --- a/test/extensions/retry/host/omit_canary_hosts/BUILD +++ b/test/extensions/retry/host/omit_canary_hosts/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/retry/host/omit_canary_hosts/config_test.cc b/test/extensions/retry/host/omit_canary_hosts/config_test.cc index 4794aefa32a1e..5ce5870057ee5 100644 --- a/test/extensions/retry/host/omit_canary_hosts/config_test.cc +++ b/test/extensions/retry/host/omit_canary_hosts/config_test.cc @@ -2,7 +2,6 @@ #include "envoy/upstream/retry.h" #include "extensions/retry/host/omit_canary_hosts/config.h" -#include "extensions/retry/host/well_known_names.h" #include "test/mocks/upstream/mocks.h" @@ -19,7 +18,7 @@ namespace { TEST(OmitCanaryHostsRetryPredicateTest, PredicateTest) { auto factory = Registry::FactoryRegistry::getFactory( - RetryHostPredicateValues::get().OmitCanaryHostsPredicate); + "envoy.retry_host_predicates.omit_canary_hosts"); ASSERT_NE(nullptr, factory); @@ -34,6 +33,18 @@ TEST(OmitCanaryHostsRetryPredicateTest, PredicateTest) { ASSERT_FALSE(predicate->shouldSelectAnotherHost(*host1)); ASSERT_TRUE(predicate->shouldSelectAnotherHost(*host2)); + predicate->onHostAttempted(host1); +} + +TEST(OmitCanaryHostsRetryPredicateTest, EmptyConfig) { + auto factory = Registry::FactoryRegistry::getFactory( + "envoy.retry_host_predicates.omit_canary_hosts"); + + ASSERT_NE(nullptr, factory); + + ProtobufTypes::MessagePtr config = factory->createEmptyConfigProto(); + EXPECT_TRUE(dynamic_cast( + config.get())); } } // namespace diff --git a/test/extensions/retry/host/omit_host_metadata/BUILD b/test/extensions/retry/host/omit_host_metadata/BUILD index c219d6695b294..feb3fbbdeff2a 100644 --- a/test/extensions/retry/host/omit_host_metadata/BUILD +++ b/test/extensions/retry/host/omit_host_metadata/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -16,7 +16,6 @@ envoy_extension_cc_test( srcs = ["config_test.cc"], extension_name = "envoy.retry_host_predicates.omit_host_metadata", deps = [ - "//source/extensions/retry/host:well_known_names", "//source/extensions/retry/host/omit_host_metadata:config", "//test/mocks/upstream:upstream_mocks", "@envoy_api//envoy/extensions/retry/host/omit_host_metadata/v3:pkg_cc_proto", diff --git a/test/extensions/retry/host/omit_host_metadata/config_test.cc b/test/extensions/retry/host/omit_host_metadata/config_test.cc index 69a2b47e8e3dd..039f9d231eb01 100644 --- a/test/extensions/retry/host/omit_host_metadata/config_test.cc +++ b/test/extensions/retry/host/omit_host_metadata/config_test.cc @@ -3,7 +3,6 @@ #include "envoy/upstream/retry.h" #include "extensions/retry/host/omit_host_metadata/omit_host_metadata.h" -#include "extensions/retry/host/well_known_names.h" #include "test/mocks/upstream/mocks.h" @@ -20,7 +19,7 @@ namespace { TEST(OmitHostsRetryPredicateTest, PredicateTest) { auto factory = Registry::FactoryRegistry::getFactory( - RetryHostPredicateValues::get().OmitHostMetadataPredicate); + "envoy.retry_host_predicates.omit_host_metadata"); ASSERT_NE(nullptr, factory); @@ -82,6 +81,8 @@ TEST(OmitHostsRetryPredicateTest, PredicateTest) { )EOF")))); ASSERT_FALSE(predicate->shouldSelectAnotherHost(*host)); + + predicate->onHostAttempted(host); } } // namespace } // namespace Host diff --git a/test/extensions/retry/host/previous_hosts/BUILD b/test/extensions/retry/host/previous_hosts/BUILD index 961c3897038f2..308fa11445d81 100644 --- a/test/extensions/retry/host/previous_hosts/BUILD +++ b/test/extensions/retry/host/previous_hosts/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/retry/host/previous_hosts/config_test.cc b/test/extensions/retry/host/previous_hosts/config_test.cc index 5e83f445f63a0..ed97542fab7b4 100644 --- a/test/extensions/retry/host/previous_hosts/config_test.cc +++ b/test/extensions/retry/host/previous_hosts/config_test.cc @@ -2,7 +2,6 @@ #include "envoy/upstream/retry.h" #include "extensions/retry/host/previous_hosts/config.h" -#include "extensions/retry/host/well_known_names.h" #include "test/mocks/upstream/mocks.h" @@ -19,7 +18,7 @@ namespace { TEST(PreviousHostsRetryPredicateConfigTest, PredicateTest) { auto factory = Registry::FactoryRegistry::getFactory( - RetryHostPredicateValues::get().PreviousHostsPredicate); + "envoy.retry_host_predicates.previous_hosts"); ASSERT_NE(nullptr, factory); @@ -48,6 +47,17 @@ TEST(PreviousHostsRetryPredicateConfigTest, PredicateTest) { ASSERT_TRUE(predicate->shouldSelectAnotherHost(*host2)); } +TEST(PreviousHostsRetryPredicateConfigTest, EmptyConfig) { + auto factory = Registry::FactoryRegistry::getFactory( + "envoy.retry_host_predicates.previous_hosts"); + + ASSERT_NE(nullptr, factory); + + ProtobufTypes::MessagePtr config = factory->createEmptyConfigProto(); + EXPECT_TRUE(dynamic_cast( + config.get())); +} + } // namespace } // namespace Host } // namespace Retry diff --git a/test/extensions/retry/priority/previous_priorities/BUILD b/test/extensions/retry/priority/previous_priorities/BUILD index 712f9874b54c5..f06784b4b7a3b 100644 --- a/test/extensions/retry/priority/previous_priorities/BUILD +++ b/test/extensions/retry/priority/previous_priorities/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/retry/priority/previous_priorities/config_test.cc b/test/extensions/retry/priority/previous_priorities/config_test.cc index d715b3c12653b..89b00310acfad 100644 --- a/test/extensions/retry/priority/previous_priorities/config_test.cc +++ b/test/extensions/retry/priority/previous_priorities/config_test.cc @@ -49,9 +49,12 @@ class RetryPriorityTest : public testing::Test { } void verifyPriorityLoads(const Upstream::HealthyLoad& expected_healthy_priority_load, - const Upstream::DegradedLoad& expected_degraded_priority_load) { - const auto& priority_loads = - retry_priority_->determinePriorityLoad(priority_set_, original_priority_load_); + const Upstream::DegradedLoad& expected_degraded_priority_load, + absl::optional + priority_mapping_func = absl::nullopt) { + const auto& priority_loads = retry_priority_->determinePriorityLoad( + priority_set_, original_priority_load_, + priority_mapping_func.value_or(Upstream::RetryPriority::defaultPriorityMapping)); // Unwrapping gives a nicer gtest error. ASSERT_EQ(priority_loads.healthy_priority_load_.get(), expected_healthy_priority_load.get()); ASSERT_EQ(priority_loads.degraded_priority_load_.get(), expected_degraded_priority_load.get()); @@ -94,6 +97,57 @@ TEST_F(RetryPriorityTest, DefaultFrequency) { verifyPriorityLoads(original_priority_load, original_degraded_priority_load); } +TEST_F(RetryPriorityTest, PriorityMappingCallback) { + const Upstream::HealthyLoad original_priority_load({100, 0}); + const Upstream::DegradedLoad original_degraded_priority_load({0, 0}); + + initialize(original_priority_load, original_degraded_priority_load); + addHosts(0, 2, 2); + addHosts(1, 2, 2); + + auto host1 = std::make_shared>(); + EXPECT_CALL(*host1, priority()).Times(0); + + auto host2 = std::make_shared>(); + EXPECT_CALL(*host2, priority()).Times(0); + + Upstream::RetryPriority::PriorityMappingFunc priority_mapping_func = + [&](const Upstream::HostDescription& host) -> absl::optional { + if (&host == host1.get()) { + return 0; + } + ASSERT(&host == host2.get()); + return 1; + }; + + const Upstream::HealthyLoad expected_priority_load({0, 100}); + const Upstream::DegradedLoad expected_degraded_priority_load({0, 0}); + + // After attempting a host in P0, P1 should receive all the load. + retry_priority_->onHostAttempted(host1); + verifyPriorityLoads(expected_priority_load, expected_degraded_priority_load, + priority_mapping_func); + + // With a mapping function that doesn't recognize host2, results will remain the same as after + // only trying host1. + retry_priority_->onHostAttempted(host2); + Upstream::RetryPriority::PriorityMappingFunc priority_mapping_func_no_host2 = + [&](const Upstream::HostDescription& host) -> absl::optional { + if (&host == host1.get()) { + return 0; + } + ASSERT(&host == host2.get()); + return absl::nullopt; + }; + verifyPriorityLoads(expected_priority_load, expected_degraded_priority_load, + priority_mapping_func_no_host2); + + // After we've tried host2, we've attempted all priorities and should reset back to the original + // priority load. + verifyPriorityLoads(original_priority_load, original_degraded_priority_load, + priority_mapping_func); +} + // Tests that we handle all hosts being unhealthy in the original priority set. TEST_F(RetryPriorityTest, NoHealthyUpstreams) { const Upstream::HealthyLoad original_priority_load({0, 0, 0}); diff --git a/test/extensions/stats_sinks/common/statsd/BUILD b/test/extensions/stats_sinks/common/statsd/BUILD index fdd7b8b28df25..49afc25b4e941 100644 --- a/test/extensions/stats_sinks/common/statsd/BUILD +++ b/test/extensions/stats_sinks/common/statsd/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( @@ -30,6 +30,7 @@ envoy_cc_test( envoy_cc_test( name = "udp_statsd_test", srcs = ["udp_statsd_test.cc"], + tags = ["fails_on_windows"], deps = [ "//source/common/network:address_lib", "//source/common/network:utility_lib", diff --git a/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc b/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc index c2b3ea48542cc..a97f07dbce007 100644 --- a/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc +++ b/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc @@ -5,6 +5,7 @@ #include "common/api/os_sys_calls_impl.h" #include "common/network/address_impl.h" +#include "common/network/socket_impl.h" #include "common/network/utility.h" #include "extensions/stat_sinks/common/statsd/statsd.h" @@ -31,6 +32,15 @@ namespace { class MockWriter : public UdpStatsdSink::Writer { public: MOCK_METHOD(void, write, (const std::string& message)); + MOCK_METHOD(void, writeBuffer, (Buffer::Instance & buffer)); + + void delegateBufferFake() { + ON_CALL(*this, writeBuffer).WillByDefault([this](Buffer::Instance& buffer) { + this->buffer_writes.push_back(buffer.toString()); + }); + } + + std::vector buffer_writes; }; // Regression test for https://github.com/envoyproxy/envoy/issues/8911 @@ -51,19 +61,14 @@ TEST(UdpOverUdsStatsdSinkTest, InitWithPipeAddress) { sink.flush(snapshot); // Start the server. - // TODO(mattklein123): Right now all sockets are non-blocking. Move this non-blocking - // modification back to the abstraction layer so it will work for multiple platforms. Additionally - // this uses low level networking calls because our abstractions in this area only work for IP - // sockets. Revisit this also. - auto io_handle = uds_address->socket(Network::Address::SocketType::Datagram); - RELEASE_ASSERT( - Api::OsSysCallsSingleton::get().setsocketblocking(io_handle->fd(), false).rc_ != -1, ""); - uds_address->bind(io_handle->fd()); + Network::SocketImpl sock(Network::Socket::Type::Datagram, uds_address); + RELEASE_ASSERT(sock.setBlockingForTest(false).rc_ != -1, ""); + sock.bind(uds_address); // Do the flush which should have somewhere to write now. sink.flush(snapshot); Buffer::OwnedImpl receive_buffer; - receive_buffer.read(*io_handle, 32); + receive_buffer.read(sock.ioHandle(), 32); EXPECT_EQ("envoy.test_counter:1|c", receive_buffer.toString()); } @@ -156,8 +161,9 @@ TEST_P(UdpStatsdSinkWithTagsTest, InitWithIpAddress) { TEST(UdpStatsdSinkTest, CheckActualStats) { NiceMock snapshot; auto writer_ptr = std::make_shared>(); + writer_ptr->delegateBufferFake(); NiceMock tls_; - UdpStatsdSink sink(tls_, writer_ptr, false); + UdpStatsdSink sink(tls_, writer_ptr, false, getDefaultPrefix(), 1024); NiceMock counter; counter.name_ = "test_counter"; @@ -165,9 +171,11 @@ TEST(UdpStatsdSinkTest, CheckActualStats) { counter.latch_ = 1; snapshot.counters_.push_back({1, counter}); - EXPECT_CALL(*std::dynamic_pointer_cast>(writer_ptr), - write("envoy.test_counter:1|c")); + EXPECT_CALL(*std::dynamic_pointer_cast>(writer_ptr), writeBuffer(_)) + .Times(1); sink.flush(snapshot); + EXPECT_EQ(writer_ptr->buffer_writes.size(), 1); + EXPECT_EQ(writer_ptr->buffer_writes.at(0), "envoy.test_counter:1|c"); counter.used_ = false; NiceMock gauge; @@ -176,9 +184,10 @@ TEST(UdpStatsdSinkTest, CheckActualStats) { gauge.used_ = true; snapshot.gauges_.push_back(gauge); - EXPECT_CALL(*std::dynamic_pointer_cast>(writer_ptr), - write("envoy.test_gauge:1|g")); + EXPECT_CALL(*std::dynamic_pointer_cast>(writer_ptr), writeBuffer(_)); sink.flush(snapshot); + EXPECT_EQ(writer_ptr->buffer_writes.size(), 2); + EXPECT_EQ(writer_ptr->buffer_writes.at(1), "envoy.test_gauge:1|g"); NiceMock timer; timer.name_ = "test_timer"; @@ -189,11 +198,13 @@ TEST(UdpStatsdSinkTest, CheckActualStats) { tls_.shutdownThread(); } -TEST(UdpStatsdSinkTest, CheckActualStatsWithCustomPrefix) { +TEST(UdpStatsdSinkTest, CheckMetricLargerThanBuffer) { NiceMock snapshot; auto writer_ptr = std::make_shared>(); + writer_ptr->delegateBufferFake(); NiceMock tls_; - UdpStatsdSink sink(tls_, writer_ptr, false, "test_prefix"); + uint64_t buffer_size = 4; + UdpStatsdSink sink(tls_, writer_ptr, false, getDefaultPrefix(), buffer_size); NiceMock counter; counter.name_ = "test_counter"; @@ -201,11 +212,112 @@ TEST(UdpStatsdSinkTest, CheckActualStatsWithCustomPrefix) { counter.latch_ = 1; snapshot.counters_.push_back({1, counter}); + // Expect the metric to skip the buffer EXPECT_CALL(*std::dynamic_pointer_cast>(writer_ptr), - write("test_prefix.test_counter:1|c")); + write("envoy.test_counter:1|c")); sink.flush(snapshot); counter.used_ = false; + NiceMock gauge; + gauge.name_ = "test_gauge"; + gauge.value_ = 1; + gauge.used_ = true; + snapshot.gauges_.push_back(gauge); + + // Expect the metric to skip the buffer + EXPECT_CALL(*std::dynamic_pointer_cast>(writer_ptr), + write("envoy.test_gauge:1|g")); + sink.flush(snapshot); + + tls_.shutdownThread(); +} + +TEST(UdpStatsdSinkTest, CheckBufferedWritesWithinBufferSize) { + NiceMock snapshot; + auto writer_ptr = std::make_shared>(); + writer_ptr->delegateBufferFake(); + NiceMock tls_; + uint64_t buffer_size = 1024; + UdpStatsdSink sink(tls_, writer_ptr, false, getDefaultPrefix(), buffer_size); + + NiceMock counter; + counter.name_ = "test_counter"; + counter.used_ = true; + counter.latch_ = 1; + snapshot.counters_.push_back({1, counter}); + + NiceMock gauge; + gauge.name_ = "test_gauge"; + gauge.value_ = 1; + gauge.used_ = true; + snapshot.gauges_.push_back(gauge); + + // Expect both metrics to be present in single write + EXPECT_CALL(*std::dynamic_pointer_cast>(writer_ptr), writeBuffer(_)) + .Times(1); + sink.flush(snapshot); + EXPECT_EQ(writer_ptr->buffer_writes.size(), 1); + EXPECT_EQ(writer_ptr->buffer_writes.at(0), "envoy.test_counter:1|c\nenvoy.test_gauge:1|g"); + + tls_.shutdownThread(); +} + +TEST(UdpStatsdSinkTest, CheckBufferedWritesExceedingBufferSize) { + NiceMock snapshot; + auto writer_ptr = std::make_shared>(); + writer_ptr->delegateBufferFake(); + NiceMock tls_; + uint64_t buffer_size = 64; + UdpStatsdSink sink(tls_, writer_ptr, false, getDefaultPrefix(), buffer_size); + + NiceMock counter_1; + counter_1.name_ = "test_counter_1"; + counter_1.used_ = true; + counter_1.latch_ = 1; + snapshot.counters_.push_back({1, counter_1}); + + NiceMock counter_2; + counter_2.name_ = "test_counter_2"; + counter_2.used_ = true; + counter_2.latch_ = 1; + snapshot.counters_.push_back({1, counter_2}); + + NiceMock gauge; + gauge.name_ = "test_gauge"; + gauge.value_ = 1; + gauge.used_ = true; + snapshot.gauges_.push_back(gauge); + + // Expect both metrics to be present in single write + EXPECT_CALL(*std::dynamic_pointer_cast>(writer_ptr), writeBuffer(_)) + .Times(2); + sink.flush(snapshot); + EXPECT_EQ(writer_ptr->buffer_writes.size(), 2); + EXPECT_EQ(writer_ptr->buffer_writes.at(0), "envoy.test_counter_1:1|c\nenvoy.test_counter_2:1|c"); + EXPECT_EQ(writer_ptr->buffer_writes.at(1), "envoy.test_gauge:1|g"); + + tls_.shutdownThread(); +} + +TEST(UdpStatsdSinkTest, CheckActualStatsWithCustomPrefix) { + NiceMock snapshot; + auto writer_ptr = std::make_shared>(); + writer_ptr->delegateBufferFake(); + NiceMock tls_; + UdpStatsdSink sink(tls_, writer_ptr, false, "test_prefix", 1024); + + NiceMock counter; + counter.name_ = "test_counter"; + counter.used_ = true; + counter.latch_ = 1; + snapshot.counters_.push_back({1, counter}); + + EXPECT_CALL(*std::dynamic_pointer_cast>(writer_ptr), writeBuffer(_)); + sink.flush(snapshot); + EXPECT_EQ(writer_ptr->buffer_writes.size(), 1); + EXPECT_EQ(writer_ptr->buffer_writes.at(0), "test_prefix.test_counter:1|c"); + counter.used_ = false; + tls_.shutdownThread(); } @@ -253,8 +365,9 @@ TEST(UdpStatsdSinkTest, SiSuffix) { TEST(UdpStatsdSinkWithTagsTest, CheckActualStats) { NiceMock snapshot; auto writer_ptr = std::make_shared>(); + writer_ptr->delegateBufferFake(); NiceMock tls_; - UdpStatsdSink sink(tls_, writer_ptr, true); + UdpStatsdSink sink(tls_, writer_ptr, true, getDefaultPrefix(), 1024); std::vector tags = {Stats::Tag{"key1", "value1"}, Stats::Tag{"key2", "value2"}}; NiceMock counter; @@ -264,9 +377,10 @@ TEST(UdpStatsdSinkWithTagsTest, CheckActualStats) { counter.setTags(tags); snapshot.counters_.push_back({1, counter}); - EXPECT_CALL(*std::dynamic_pointer_cast>(writer_ptr), - write("envoy.test_counter:1|c|#key1:value1,key2:value2")); + EXPECT_CALL(*std::dynamic_pointer_cast>(writer_ptr), writeBuffer(_)); sink.flush(snapshot); + EXPECT_EQ(writer_ptr->buffer_writes.size(), 1); + EXPECT_EQ(writer_ptr->buffer_writes.at(0), "envoy.test_counter:1|c|#key1:value1,key2:value2"); counter.used_ = false; NiceMock gauge; @@ -276,9 +390,10 @@ TEST(UdpStatsdSinkWithTagsTest, CheckActualStats) { gauge.setTags(tags); snapshot.gauges_.push_back(gauge); - EXPECT_CALL(*std::dynamic_pointer_cast>(writer_ptr), - write("envoy.test_gauge:1|g|#key1:value1,key2:value2")); + EXPECT_CALL(*std::dynamic_pointer_cast>(writer_ptr), writeBuffer(_)); sink.flush(snapshot); + EXPECT_EQ(writer_ptr->buffer_writes.size(), 2); + EXPECT_EQ(writer_ptr->buffer_writes.at(1), "envoy.test_gauge:1|g|#key1:value1,key2:value2"); NiceMock timer; timer.name_ = "test_timer"; diff --git a/test/extensions/stats_sinks/dog_statsd/BUILD b/test/extensions/stats_sinks/dog_statsd/BUILD index 003c7853fddab..a9a0057336fac 100644 --- a/test/extensions/stats_sinks/dog_statsd/BUILD +++ b/test/extensions/stats_sinks/dog_statsd/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -19,7 +19,7 @@ envoy_extension_cc_test( "//include/envoy/registry", "//source/common/protobuf:utility_lib", "//source/extensions/stat_sinks/dog_statsd:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", "//test/test_common:utility_lib", diff --git a/test/extensions/stats_sinks/dog_statsd/config_test.cc b/test/extensions/stats_sinks/dog_statsd/config_test.cc index 8108dd6618d2e..cdb68d6e938b9 100644 --- a/test/extensions/stats_sinks/dog_statsd/config_test.cc +++ b/test/extensions/stats_sinks/dog_statsd/config_test.cc @@ -9,7 +9,7 @@ #include "extensions/stat_sinks/dog_statsd/config.h" #include "extensions/stat_sinks/well_known_names.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/test_common/environment.h" #include "test/test_common/network_utility.h" #include "test/test_common/utility.h" @@ -37,7 +37,8 @@ TEST_P(DogStatsdConfigLoopbackTest, ValidUdpIp) { envoy::config::core::v3::Address& address = *sink_config.mutable_address(); envoy::config::core::v3::SocketAddress& socket_address = *address.mutable_socket_address(); socket_address.set_protocol(envoy::config::core::v3::SocketAddress::UDP); - auto loopback_flavor = Network::Test::getCanonicalLoopbackAddress(GetParam()); + Network::Address::InstanceConstSharedPtr loopback_flavor = + Network::Test::getCanonicalLoopbackAddress(GetParam()); socket_address.set_address(loopback_flavor->ip()->addressAsString()); socket_address.set_port_value(8125); @@ -48,7 +49,7 @@ TEST_P(DogStatsdConfigLoopbackTest, ValidUdpIp) { ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); TestUtility::jsonConvert(sink_config, *message); - NiceMock server; + NiceMock server; Stats::SinkPtr sink = factory->createStatsSink(*message, server); EXPECT_NE(sink, nullptr); auto udp_sink = dynamic_cast(sink.get()); @@ -59,12 +60,68 @@ TEST_P(DogStatsdConfigLoopbackTest, ValidUdpIp) { // Negative test for protoc-gen-validate constraints for dog_statsd. TEST(DogStatsdConfigTest, ValidateFail) { - NiceMock server; + NiceMock server; EXPECT_THROW( DogStatsdSinkFactory().createStatsSink(envoy::config::metrics::v3::DogStatsdSink(), server), ProtoValidationException); } +TEST_P(DogStatsdConfigLoopbackTest, CustomBufferSize) { + const std::string name = StatsSinkNames::get().DogStatsd; + + envoy::config::metrics::v3::DogStatsdSink sink_config; + sink_config.mutable_max_bytes_per_datagram()->set_value(128); + envoy::config::core::v3::Address& address = *sink_config.mutable_address(); + envoy::config::core::v3::SocketAddress& socket_address = *address.mutable_socket_address(); + socket_address.set_protocol(envoy::config::core::v3::SocketAddress::UDP); + Network::Address::InstanceConstSharedPtr loopback_flavor = + Network::Test::getCanonicalLoopbackAddress(GetParam()); + socket_address.set_address(loopback_flavor->ip()->addressAsString()); + socket_address.set_port_value(8125); + + Server::Configuration::StatsSinkFactory* factory = + Registry::FactoryRegistry::getFactory(name); + ASSERT_NE(factory, nullptr); + + ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); + TestUtility::jsonConvert(sink_config, *message); + + NiceMock server; + Stats::SinkPtr sink = factory->createStatsSink(*message, server); + ASSERT_NE(sink, nullptr); + auto udp_sink = dynamic_cast(sink.get()); + ASSERT_NE(udp_sink, nullptr); + EXPECT_EQ(udp_sink->getBufferSizeForTest(), 128); +} + +TEST_P(DogStatsdConfigLoopbackTest, DefaultBufferSize) { + const std::string name = StatsSinkNames::get().DogStatsd; + + envoy::config::metrics::v3::DogStatsdSink sink_config; + envoy::config::core::v3::Address& address = *sink_config.mutable_address(); + envoy::config::core::v3::SocketAddress& socket_address = *address.mutable_socket_address(); + socket_address.set_protocol(envoy::config::core::v3::SocketAddress::UDP); + Network::Address::InstanceConstSharedPtr loopback_flavor = + Network::Test::getCanonicalLoopbackAddress(GetParam()); + socket_address.set_address(loopback_flavor->ip()->addressAsString()); + socket_address.set_port_value(8125); + + Server::Configuration::StatsSinkFactory* factory = + Registry::FactoryRegistry::getFactory(name); + ASSERT_NE(factory, nullptr); + + ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); + TestUtility::jsonConvert(sink_config, *message); + + NiceMock server; + Stats::SinkPtr sink = factory->createStatsSink(*message, server); + ASSERT_NE(sink, nullptr); + auto udp_sink = dynamic_cast(sink.get()); + ASSERT_NE(udp_sink, nullptr); + // Expect default buffer size of 0 (no buffering) + EXPECT_EQ(udp_sink->getBufferSizeForTest(), 0); +} + TEST_P(DogStatsdConfigLoopbackTest, WithCustomPrefix) { const std::string name = StatsSinkNames::get().DogStatsd; @@ -72,7 +129,8 @@ TEST_P(DogStatsdConfigLoopbackTest, WithCustomPrefix) { envoy::config::core::v3::Address& address = *sink_config.mutable_address(); envoy::config::core::v3::SocketAddress& socket_address = *address.mutable_socket_address(); socket_address.set_protocol(envoy::config::core::v3::SocketAddress::UDP); - auto loopback_flavor = Network::Test::getCanonicalLoopbackAddress(GetParam()); + Network::Address::InstanceConstSharedPtr loopback_flavor = + Network::Test::getCanonicalLoopbackAddress(GetParam()); socket_address.set_address(loopback_flavor->ip()->addressAsString()); socket_address.set_port_value(8125); @@ -86,7 +144,7 @@ TEST_P(DogStatsdConfigLoopbackTest, WithCustomPrefix) { ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); TestUtility::jsonConvert(sink_config, *message); - NiceMock server; + NiceMock server; Stats::SinkPtr sink = factory->createStatsSink(*message, server); ASSERT_NE(sink, nullptr); auto udp_sink = dynamic_cast(sink.get()); diff --git a/test/extensions/stats_sinks/hystrix/BUILD b/test/extensions/stats_sinks/hystrix/BUILD index 148a62fad7c55..127f263d8d423 100644 --- a/test/extensions/stats_sinks/hystrix/BUILD +++ b/test/extensions/stats_sinks/hystrix/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -19,7 +19,7 @@ envoy_extension_cc_test( "//include/envoy/registry", "//source/common/protobuf:utility_lib", "//source/extensions/stat_sinks/hystrix:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", "//test/test_common:utility_lib", @@ -34,7 +34,9 @@ envoy_extension_cc_test( deps = [ "//source/common/stats:stats_lib", "//source/extensions/stat_sinks/hystrix:hystrix_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:admin_mocks", + "//test/mocks/server:admin_stream_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/stats:stats_mocks", "//test/mocks/upstream:upstream_mocks", ], @@ -44,6 +46,7 @@ envoy_extension_cc_test( name = "hystrix_integration_test", srcs = ["hystrix_integration_test.cc"], extension_name = "envoy.stat_sinks.hystrix", + tags = ["fails_on_windows"], deps = [ "//source/extensions/stat_sinks/hystrix:config", "//test/integration:http_protocol_integration_lib", diff --git a/test/extensions/stats_sinks/hystrix/config_test.cc b/test/extensions/stats_sinks/hystrix/config_test.cc index e997234210220..5a3c4c007e9f3 100644 --- a/test/extensions/stats_sinks/hystrix/config_test.cc +++ b/test/extensions/stats_sinks/hystrix/config_test.cc @@ -7,7 +7,7 @@ #include "extensions/stat_sinks/hystrix/hystrix.h" #include "extensions/stat_sinks/well_known_names.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/test_common/environment.h" #include "test/test_common/network_utility.h" #include "test/test_common/utility.h" @@ -35,7 +35,7 @@ TEST(StatsConfigTest, ValidHystrixSink) { ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); TestUtility::jsonConvert(sink_config, *message); - NiceMock server; + NiceMock server; Stats::SinkPtr sink = factory->createStatsSink(*message, server); EXPECT_NE(sink, nullptr); EXPECT_NE(dynamic_cast(sink.get()), nullptr); diff --git a/test/extensions/stats_sinks/hystrix/hystrix_integration_test.cc b/test/extensions/stats_sinks/hystrix/hystrix_integration_test.cc index 9a5667e6d5815..221201bff5c8b 100644 --- a/test/extensions/stats_sinks/hystrix/hystrix_integration_test.cc +++ b/test/extensions/stats_sinks/hystrix/hystrix_integration_test.cc @@ -25,26 +25,23 @@ TEST_P(HystrixIntegrationTest, NoChunkEncoding) { if (downstreamProtocol() == Http::CodecClient::Type::HTTP1) { // For HTTP/1.1 we use a raw client to make absolutely sure there is no chunk encoding. - Buffer::OwnedImpl buffer("GET /hystrix_event_stream HTTP/1.1\r\nHost: admin\r\n\r\n"); std::string response; - RawConnectionDriver connection( - lookupPort("admin"), buffer, - [&](Network::ClientConnection& client, const Buffer::Instance& data) -> void { + auto connection = createConnectionDriver( + lookupPort("admin"), "GET /hystrix_event_stream HTTP/1.1\r\nHost: admin\r\n\r\n", + [&response](Network::ClientConnection& conn, const Buffer::Instance& data) -> void { response.append(data.toString()); - // Wait until there is a flush. if (response.find("rollingCountCollapsedRequests") != std::string::npos) { - client.close(Network::ConnectionCloseType::NoFlush); + conn.close(Network::ConnectionCloseType::NoFlush); } - }, - version_); - connection.run(); + }); + connection->run(); EXPECT_THAT(response, StartsWith("HTTP/1.1 200 OK\r\n")); // Make sure that the response is not actually chunk encoded, but it does have the hystrix flush // trailer. EXPECT_THAT(response, Not(HasSubstr("chunked"))); EXPECT_THAT(response, Not(HasSubstr("3\r\n:\n\n"))); EXPECT_THAT(response, HasSubstr(":\n\n")); - connection.close(); + connection->close(); } else { codec_client_ = makeHttpConnection(lookupPort("admin")); auto response = codec_client_->makeHeaderOnlyRequest( diff --git a/test/extensions/stats_sinks/hystrix/hystrix_test.cc b/test/extensions/stats_sinks/hystrix/hystrix_test.cc index f8527b34e0998..463d437c3d460 100644 --- a/test/extensions/stats_sinks/hystrix/hystrix_test.cc +++ b/test/extensions/stats_sinks/hystrix/hystrix_test.cc @@ -5,7 +5,9 @@ #include "extensions/stat_sinks/hystrix/hystrix.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/admin.h" +#include "test/mocks/server/admin_stream.h" +#include "test/mocks/server/instance.h" #include "test/mocks/stats/mocks.h" #include "test/mocks/upstream/mocks.h" @@ -155,7 +157,7 @@ class HystrixSinkTest : public testing::Test { addClusterToMap(cluster2_name_, cluster2_.cluster_); } - std::unordered_map + absl::node_hash_map addSecondClusterAndSendDataHelper(Buffer::OwnedImpl& buffer, const uint64_t success_step, const uint64_t error_step, const uint64_t timeout_step, const uint64_t success_step2, const uint64_t error_step2, @@ -214,8 +216,8 @@ class HystrixSinkTest : public testing::Test { } } - std::unordered_map buildClusterMap(absl::string_view data_message) { - std::unordered_map cluster_message_map; + absl::node_hash_map buildClusterMap(absl::string_view data_message) { + absl::node_hash_map cluster_message_map; std::vector messages = absl::StrSplit(data_message, "data: ", absl::SkipWhitespace()); for (auto message : messages) { @@ -241,7 +243,7 @@ class HystrixSinkTest : public testing::Test { ClusterTestInfo cluster2_{cluster2_name_}; NiceMock callbacks_; - NiceMock server_; + NiceMock server_; Upstream::ClusterManager::ClusterInfoMap cluster_map_; std::unique_ptr sink_; @@ -255,7 +257,7 @@ TEST_F(HystrixSinkTest, EmptyFlush) { // Register callback to sink. sink_->registerConnection(&callbacks_); sink_->flush(snapshot_); - std::unordered_map cluster_message_map = + absl::node_hash_map cluster_message_map = buildClusterMap(buffer.toString()); validateResults(cluster_message_map[cluster1_name_], 0, 0, 0, 0, 0, window_size_); } @@ -278,7 +280,7 @@ TEST_F(HystrixSinkTest, BasicFlow) { sink_->flush(snapshot_); } - std::unordered_map cluster_message_map = + absl::node_hash_map cluster_message_map = buildClusterMap(buffer.toString()); Json::ObjectSharedPtr json_buffer = @@ -350,7 +352,7 @@ TEST_F(HystrixSinkTest, Disconnect) { } EXPECT_NE(buffer.length(), 0); - std::unordered_map cluster_message_map = + absl::node_hash_map cluster_message_map = buildClusterMap(buffer.toString()); Json::ObjectSharedPtr json_buffer = Json::Factory::loadFromString(cluster_message_map[cluster1_name_]); @@ -390,7 +392,7 @@ TEST_F(HystrixSinkTest, AddCluster) { Buffer::OwnedImpl buffer = createClusterAndCallbacks(); // Add cluster and "run" some traffic. - std::unordered_map cluster_message_map = + absl::node_hash_map cluster_message_map = addSecondClusterAndSendDataHelper(buffer, success_step, error_step, timeout_step, success_step2, error_step2, timeout_step2); @@ -431,7 +433,7 @@ TEST_F(HystrixSinkTest, AddAndRemoveClusters) { removeSecondClusterHelper(buffer); // Check that removed worked. - std::unordered_map cluster_message_map = + absl::node_hash_map cluster_message_map = buildClusterMap(buffer.toString()); ASSERT_NE(cluster_message_map.find(cluster1_name_), cluster_message_map.end()) << "cluster1_name = " << cluster1_name_; @@ -483,7 +485,7 @@ TEST_F(HystrixSinkTest, HistogramTest) { sink_->registerConnection(&callbacks_); sink_->flush(snapshot_); - std::unordered_map cluster_message_map = + absl::node_hash_map cluster_message_map = buildClusterMap(buffer.toString()); Json::ObjectSharedPtr latency = Json::Factory::loadFromString(cluster_message_map[cluster1_name_]) @@ -506,7 +508,7 @@ TEST_F(HystrixSinkTest, HystrixEventStreamHandler) { // This value doesn't matter in handlerHystrixEventStream absl::string_view path_and_query; - Http::ResponseHeaderMapImpl response_headers; + Http::TestResponseHeaderMapImpl response_headers; NiceMock admin_stream_mock; NiceMock connection_mock; @@ -527,13 +529,10 @@ TEST_F(HystrixSinkTest, HystrixEventStreamHandler) { // Check that response_headers has been set correctly EXPECT_EQ(response_headers.ContentType()->value(), "text/event-stream"); - EXPECT_EQ(response_headers.CacheControl()->value(), "no-cache"); + EXPECT_EQ(response_headers.get_("cache-control"), "no-cache"); EXPECT_EQ(response_headers.Connection()->value(), "close"); - EXPECT_EQ(response_headers.AccessControlAllowOrigin()->value(), "*"); - - std::string access_control_allow_headers = - std::string(response_headers.AccessControlAllowHeaders()->value().getStringView()); - EXPECT_THAT(access_control_allow_headers, HasSubstr("Accept")); + EXPECT_EQ(response_headers.get_("access-control-allow-origin"), "*"); + EXPECT_THAT(response_headers.get_("access-control-allow-headers"), HasSubstr("Accept")); } } // namespace diff --git a/test/extensions/stats_sinks/metrics_service/BUILD b/test/extensions/stats_sinks/metrics_service/BUILD index e8ff78cc02c59..9cf530605be06 100644 --- a/test/extensions/stats_sinks/metrics_service/BUILD +++ b/test/extensions/stats_sinks/metrics_service/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -45,6 +45,7 @@ envoy_extension_cc_test( name = "metrics_service_integration_test", srcs = ["metrics_service_integration_test.cc"], extension_name = "envoy.stat_sinks.metrics_service", + tags = ["fails_on_windows"], deps = [ "//source/common/buffer:zero_copy_input_stream_lib", "//source/common/grpc:codec_lib", diff --git a/test/extensions/stats_sinks/metrics_service/grpc_metrics_service_impl_test.cc b/test/extensions/stats_sinks/metrics_service/grpc_metrics_service_impl_test.cc index c7543903d0f90..ce940b650136d 100644 --- a/test/extensions/stats_sinks/metrics_service/grpc_metrics_service_impl_test.cc +++ b/test/extensions/stats_sinks/metrics_service/grpc_metrics_service_impl_test.cc @@ -31,8 +31,9 @@ class GrpcMetricsStreamerImplTest : public testing::Test { EXPECT_CALL(*factory_, create()).WillOnce(Invoke([this] { return Grpc::RawAsyncClientPtr{async_client_}; })); - streamer_ = std::make_unique(Grpc::AsyncClientFactoryPtr{factory_}, - local_info_); + streamer_ = std::make_unique( + Grpc::AsyncClientFactoryPtr{factory_}, local_info_, + envoy::config::core::v3::ApiVersion::AUTO); } void expectStreamStart(MockMetricsStream& stream, MetricsServiceCallbacks** callbacks_to_set) { @@ -48,7 +49,7 @@ class GrpcMetricsStreamerImplTest : public testing::Test { LocalInfo::MockLocalInfo local_info_; Grpc::MockAsyncClient* async_client_{new NiceMock}; Grpc::MockAsyncClientFactory* factory_{new Grpc::MockAsyncClientFactory}; - std::unique_ptr streamer_; + GrpcMetricsStreamerImplPtr streamer_; }; // Test basic metrics streaming flow. @@ -90,35 +91,29 @@ class MockGrpcMetricsStreamer : public GrpcMetricsStreamer { MOCK_METHOD(void, send, (envoy::service::metrics::v3::StreamMetricsMessage & message)); }; -class TestGrpcMetricsStreamer : public GrpcMetricsStreamer { +class MetricsServiceSinkTest : public testing::Test { public: - int metric_count; - // GrpcMetricsStreamer - void send(envoy::service::metrics::v3::StreamMetricsMessage& message) override { - metric_count = message.envoy_metrics_size(); - } -}; + MetricsServiceSinkTest() = default; -class MetricsServiceSinkTest : public testing::Test {}; - -TEST(MetricsServiceSinkTest, CheckSendCall) { - NiceMock snapshot; - Event::SimulatedTimeSystem time_system; + NiceMock snapshot_; + Event::SimulatedTimeSystem time_system_; std::shared_ptr streamer_{new MockGrpcMetricsStreamer()}; +}; - MetricsServiceSink sink(streamer_, time_system); +TEST_F(MetricsServiceSinkTest, CheckSendCall) { + MetricsServiceSink sink(streamer_, time_system_, false); auto counter = std::make_shared>(); counter->name_ = "test_counter"; counter->latch_ = 1; counter->used_ = true; - snapshot.counters_.push_back({1, *counter}); + snapshot_.counters_.push_back({1, *counter}); auto gauge = std::make_shared>(); gauge->name_ = "test_gauge"; gauge->value_ = 1; gauge->used_ = true; - snapshot.gauges_.push_back(*gauge); + snapshot_.gauges_.push_back(*gauge); auto histogram = std::make_shared>(); histogram->name_ = "test_histogram"; @@ -126,35 +121,73 @@ TEST(MetricsServiceSinkTest, CheckSendCall) { EXPECT_CALL(*streamer_, send(_)); - sink.flush(snapshot); + sink.flush(snapshot_); } -TEST(MetricsServiceSinkTest, CheckStatsCount) { - NiceMock snapshot; - Event::SimulatedTimeSystem time_system; - std::shared_ptr streamer_{new TestGrpcMetricsStreamer()}; - - MetricsServiceSink sink(streamer_, time_system); +TEST_F(MetricsServiceSinkTest, CheckStatsCount) { + MetricsServiceSink sink(streamer_, time_system_, false); auto counter = std::make_shared>(); counter->name_ = "test_counter"; - counter->latch_ = 1; + counter->value_ = 100; counter->used_ = true; - snapshot.counters_.push_back({1, *counter}); + snapshot_.counters_.push_back({1, *counter}); auto gauge = std::make_shared>(); gauge->name_ = "test_gauge"; gauge->value_ = 1; gauge->used_ = true; - snapshot.gauges_.push_back(*gauge); + snapshot_.gauges_.push_back(*gauge); - sink.flush(snapshot); - EXPECT_EQ(2, (*streamer_).metric_count); + EXPECT_CALL(*streamer_, send(_)) + .WillOnce(Invoke([](envoy::service::metrics::v3::StreamMetricsMessage& message) { + EXPECT_EQ(2, message.envoy_metrics_size()); + })); + sink.flush(snapshot_); // Verify only newly added metrics come after endFlush call. gauge->used_ = false; - sink.flush(snapshot); - EXPECT_EQ(1, (*streamer_).metric_count); + EXPECT_CALL(*streamer_, send(_)) + .WillOnce(Invoke([](envoy::service::metrics::v3::StreamMetricsMessage& message) { + EXPECT_EQ(1, message.envoy_metrics_size()); + })); + sink.flush(snapshot_); +} + +// Test that verifies counters are correctly reported as current value when configured to do so. +TEST_F(MetricsServiceSinkTest, ReportCountersValues) { + MetricsServiceSink sink(streamer_, time_system_, false); + + auto counter = std::make_shared>(); + counter->name_ = "test_counter"; + counter->value_ = 100; + counter->used_ = true; + snapshot_.counters_.push_back({1, *counter}); + + EXPECT_CALL(*streamer_, send(_)) + .WillOnce(Invoke([](envoy::service::metrics::v3::StreamMetricsMessage& message) { + EXPECT_EQ(1, message.envoy_metrics_size()); + EXPECT_EQ(100, message.envoy_metrics(0).metric(0).counter().value()); + })); + sink.flush(snapshot_); +} + +// Test that verifies counters are reported as the delta between flushes when configured to do so. +TEST_F(MetricsServiceSinkTest, ReportCountersAsDeltas) { + MetricsServiceSink sink(streamer_, time_system_, true); + + auto counter = std::make_shared>(); + counter->name_ = "test_counter"; + counter->value_ = 100; + counter->used_ = true; + snapshot_.counters_.push_back({1, *counter}); + + EXPECT_CALL(*streamer_, send(_)) + .WillOnce(Invoke([](envoy::service::metrics::v3::StreamMetricsMessage& message) { + EXPECT_EQ(1, message.envoy_metrics_size()); + EXPECT_EQ(1, message.envoy_metrics(0).metric(0).counter().value()); + })); + sink.flush(snapshot_); } } // namespace diff --git a/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc b/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc index 5539ece5fdf89..560b52552d3f2 100644 --- a/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc +++ b/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc @@ -2,10 +2,10 @@ #include "envoy/config/metrics/v3/metrics_service.pb.h" #include "envoy/service/metrics/v3/metrics_service.pb.h" -#include "common/common/version.h" #include "common/grpc/codec.h" #include "common/grpc/common.h" #include "common/stats/histogram_impl.h" +#include "common/version/version.h" #include "test/common/grpc/grpc_client_integration.h" #include "test/integration/http_integration.h" @@ -18,7 +18,7 @@ using testing::AssertionResult; namespace Envoy { namespace { -class MetricsServiceIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, +class MetricsServiceIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, public HttpIntegrationTest { public: MetricsServiceIntegrationTest() @@ -44,6 +44,7 @@ class MetricsServiceIntegrationTest : public Grpc::GrpcClientIntegrationParamTes envoy::config::metrics::v3::MetricsServiceConfig config; setGrpcService(*config.mutable_grpc_service(), "metrics_service", fake_upstreams_.back()->localAddress()); + config.set_transport_api_version(apiVersion()); metrics_sink->mutable_typed_config()->PackFrom(config); // Shrink reporting period down to 1s to make test not take forever. bootstrap.mutable_stats_flush_interval()->CopyFrom( @@ -79,11 +80,11 @@ class MetricsServiceIntegrationTest : public Grpc::GrpcClientIntegrationParamTes while (!(known_counter_exists && known_gauge_exists && known_histogram_exists)) { envoy::service::metrics::v3::StreamMetricsMessage request_msg; VERIFY_ASSERTION(metrics_service_request_->waitForGrpcMessage(*dispatcher_, request_msg)); - EXPECT_EQ("POST", metrics_service_request_->headers().Method()->value().getStringView()); - EXPECT_EQ("/envoy.service.metrics.v2.MetricsService/StreamMetrics", - metrics_service_request_->headers().Path()->value().getStringView()); - EXPECT_EQ("application/grpc", - metrics_service_request_->headers().ContentType()->value().getStringView()); + EXPECT_EQ("POST", metrics_service_request_->headers().getMethodValue()); + EXPECT_EQ(TestUtility::getVersionedMethodPath("envoy.service.metrics.{}.MetricsService", + "StreamMetrics", apiVersion()), + metrics_service_request_->headers().getPathValue()); + EXPECT_EQ("application/grpc", metrics_service_request_->headers().getContentTypeValue()); EXPECT_TRUE(request_msg.envoy_metrics_size() > 0); const Protobuf::RepeatedPtrField<::io::prometheus::client::MetricFamily>& envoy_metrics = request_msg.envoy_metrics(); @@ -109,9 +110,8 @@ class MetricsServiceIntegrationTest : public Grpc::GrpcClientIntegrationParamTes if (metrics_family.name() == "cluster.cluster_0.upstream_rq_time" && metrics_family.type() == ::io::prometheus::client::MetricType::HISTOGRAM) { known_histogram_exists = true; - Stats::HistogramStatisticsImpl empty_statistics; EXPECT_EQ(metrics_family.metric(0).histogram().bucket_size(), - empty_statistics.supportedBuckets().size()); + Stats::HistogramSettingsImpl::defaultBuckets().size()); } ASSERT(metrics_family.metric(0).has_timestamp_ms()); if (known_counter_exists && known_gauge_exists && known_histogram_exists) { @@ -141,7 +141,7 @@ class MetricsServiceIntegrationTest : public Grpc::GrpcClientIntegrationParamTes }; INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, MetricsServiceIntegrationTest, - GRPC_CLIENT_INTEGRATION_PARAMS); + VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS); // Test a basic metric service flow. TEST_P(MetricsServiceIntegrationTest, BasicFlow) { diff --git a/test/extensions/stats_sinks/statsd/BUILD b/test/extensions/stats_sinks/statsd/BUILD index c081621d74033..b1c56f0466d8b 100644 --- a/test/extensions/stats_sinks/statsd/BUILD +++ b/test/extensions/stats_sinks/statsd/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -19,7 +19,7 @@ envoy_extension_cc_test( "//include/envoy/registry", "//source/common/protobuf:utility_lib", "//source/extensions/stat_sinks/statsd:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", "//test/test_common:utility_lib", diff --git a/test/extensions/stats_sinks/statsd/config_test.cc b/test/extensions/stats_sinks/statsd/config_test.cc index 44c89aa5e5795..91a4b4db77bee 100644 --- a/test/extensions/stats_sinks/statsd/config_test.cc +++ b/test/extensions/stats_sinks/statsd/config_test.cc @@ -10,7 +10,7 @@ #include "extensions/stat_sinks/statsd/config.h" #include "extensions/stat_sinks/well_known_names.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/test_common/environment.h" #include "test/test_common/network_utility.h" #include "test/test_common/utility.h" @@ -39,7 +39,7 @@ TEST(StatsConfigTest, ValidTcpStatsd) { ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); TestUtility::jsonConvert(sink_config, *message); - NiceMock server; + NiceMock server; Stats::SinkPtr sink = factory->createStatsSink(*message, server); EXPECT_NE(sink, nullptr); EXPECT_NE(dynamic_cast(sink.get()), nullptr); @@ -81,7 +81,7 @@ TEST_P(StatsConfigParameterizedTest, UdpSinkDefaultPrefix) { ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); TestUtility::jsonConvert(sink_config, *message); - NiceMock server; + NiceMock server; Stats::SinkPtr sink = factory->createStatsSink(*message, server); ASSERT_NE(sink, nullptr); @@ -113,7 +113,7 @@ TEST_P(StatsConfigParameterizedTest, UdpSinkCustomPrefix) { ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); TestUtility::jsonConvert(sink_config, *message); - NiceMock server; + NiceMock server; Stats::SinkPtr sink = factory->createStatsSink(*message, server); ASSERT_NE(sink, nullptr); @@ -136,7 +136,7 @@ TEST(StatsConfigTest, TcpSinkDefaultPrefix) { ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); TestUtility::jsonConvert(sink_config, *message); - NiceMock server; + NiceMock server; Stats::SinkPtr sink = factory->createStatsSink(*message, server); ASSERT_NE(sink, nullptr); @@ -161,7 +161,7 @@ TEST(StatsConfigTest, TcpSinkCustomPrefix) { ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); TestUtility::jsonConvert(sink_config, *message); - NiceMock server; + NiceMock server; Stats::SinkPtr sink = factory->createStatsSink(*message, server); ASSERT_NE(sink, nullptr); @@ -193,7 +193,7 @@ TEST_P(StatsConfigLoopbackTest, ValidUdpIpStatsd) { ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); TestUtility::jsonConvert(sink_config, *message); - NiceMock server; + NiceMock server; Stats::SinkPtr sink = factory->createStatsSink(*message, server); EXPECT_NE(sink, nullptr); EXPECT_NE(dynamic_cast(sink.get()), nullptr); @@ -202,7 +202,7 @@ TEST_P(StatsConfigLoopbackTest, ValidUdpIpStatsd) { // Negative test for protoc-gen-validate constraints for statsd. TEST(StatsdConfigTest, ValidateFail) { - NiceMock server; + NiceMock server; EXPECT_THROW( StatsdSinkFactory().createStatsSink(envoy::config::metrics::v3::StatsdSink(), server), ProtoValidationException); diff --git a/test/extensions/tracers/common/ot/BUILD b/test/extensions/tracers/common/ot/BUILD index 76fd9d8b13784..3e56002a89a16 100644 --- a/test/extensions/tracers/common/ot/BUILD +++ b/test/extensions/tracers/common/ot/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/tracers/common/ot/opentracing_driver_impl_test.cc b/test/extensions/tracers/common/ot/opentracing_driver_impl_test.cc index d159f0de0a58e..011030dff5a49 100644 --- a/test/extensions/tracers/common/ot/opentracing_driver_impl_test.cc +++ b/test/extensions/tracers/common/ot/opentracing_driver_impl_test.cc @@ -99,6 +99,20 @@ TEST_F(OpenTracingDriverTest, FlushSpanWithLog) { EXPECT_EQ(expected_logs, driver_->recorder().top().logs); } +TEST_F(OpenTracingDriverTest, FlushSpanWithBaggage) { + setupValidDriver(); + + Tracing::SpanPtr first_span = driver_->startSpan(config_, request_headers_, operation_name_, + start_time_, {Tracing::Reason::Sampling, true}); + first_span->setBaggage("abc", "123"); + first_span->finishSpan(); + + const std::map expected_baggage = {{"abc", "123"}}; + + EXPECT_EQ(1, driver_->recorder().spans().size()); + EXPECT_EQ(expected_baggage, driver_->recorder().top().span_context.baggage); +} + TEST_F(OpenTracingDriverTest, TagSamplingFalseByDecision) { setupValidDriver(OpenTracingDriver::PropagationMode::TracerNative, {}); @@ -175,7 +189,7 @@ TEST_F(OpenTracingDriverTest, InjectFailure) { const auto span_context_injection_error_count = stats_.counter("tracing.opentracing.span_context_injection_error").value(); - EXPECT_EQ(nullptr, request_headers_.OtSpanContext()); + EXPECT_FALSE(request_headers_.has(Http::CustomHeaders::get().OtSpanContext)); span->injectContext(request_headers_); EXPECT_EQ(span_context_injection_error_count + 1, diff --git a/test/extensions/tracers/datadog/BUILD b/test/extensions/tracers/datadog/BUILD index 1e6b94c6e0f09..f362c834eb395 100644 --- a/test/extensions/tracers/datadog/BUILD +++ b/test/extensions/tracers/datadog/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/tracers/datadog/config_test.cc b/test/extensions/tracers/datadog/config_test.cc index 87d259c0f4e75..52a44719367ca 100644 --- a/test/extensions/tracers/datadog/config_test.cc +++ b/test/extensions/tracers/datadog/config_test.cc @@ -4,7 +4,8 @@ #include "extensions/tracers/datadog/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/tracer_factory.h" +#include "test/mocks/server/tracer_factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/tracers/datadog/datadog_tracer_impl_test.cc b/test/extensions/tracers/datadog/datadog_tracer_impl_test.cc index d8fec3cbadbbb..97414ee5a7f25 100644 --- a/test/extensions/tracers/datadog/datadog_tracer_impl_test.cc +++ b/test/extensions/tracers/datadog/datadog_tracer_impl_test.cc @@ -84,7 +84,7 @@ class DatadogDriverTest : public testing::Test { NiceMock* timer_; Stats::TestUtil::TestStore stats_; NiceMock cm_; - NiceMock random_; + NiceMock random_; NiceMock runtime_; NiceMock local_info_; @@ -154,9 +154,8 @@ TEST_F(DatadogDriverTest, FlushSpansTimer) { const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { callback = &callbacks; - EXPECT_EQ("fake_cluster", message->headers().Host()->value().getStringView()); - EXPECT_EQ("application/msgpack", - message->headers().ContentType()->value().getStringView()); + EXPECT_EQ("fake_cluster", message->headers().getHostValue()); + EXPECT_EQ("application/msgpack", message->headers().getContentTypeValue()); return &request; })); diff --git a/test/extensions/tracers/dynamic_ot/BUILD b/test/extensions/tracers/dynamic_ot/BUILD index 4037befe21f9d..48e8d4a97c67f 100644 --- a/test/extensions/tracers/dynamic_ot/BUILD +++ b/test/extensions/tracers/dynamic_ot/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/tracers/dynamic_ot/config_test.cc b/test/extensions/tracers/dynamic_ot/config_test.cc index 90c77529f568d..a655b23e5f6c3 100644 --- a/test/extensions/tracers/dynamic_ot/config_test.cc +++ b/test/extensions/tracers/dynamic_ot/config_test.cc @@ -4,7 +4,8 @@ #include "extensions/tracers/dynamic_ot/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/tracer_factory.h" +#include "test/mocks/server/tracer_factory_context.h" #include "test/test_common/environment.h" #include "fmt/printf.h" diff --git a/test/extensions/tracers/dynamic_ot/dynamic_opentracing_driver_impl_test.cc b/test/extensions/tracers/dynamic_ot/dynamic_opentracing_driver_impl_test.cc index 422ffed9728d2..775270a09b698 100644 --- a/test/extensions/tracers/dynamic_ot/dynamic_opentracing_driver_impl_test.cc +++ b/test/extensions/tracers/dynamic_ot/dynamic_opentracing_driver_impl_test.cc @@ -70,19 +70,27 @@ TEST_F(DynamicOpenTracingDriverTest, InitializeDriver) { } } +// This test fails under gcc, please see https://github.com/envoyproxy/envoy/issues/7647 +// for more details. +#ifndef GCC_COMPILER TEST_F(DynamicOpenTracingDriverTest, FlushSpans) { setupValidDriver(); - Tracing::SpanPtr first_span = driver_->startSpan(config_, request_headers_, operation_name_, - start_time_, {Tracing::Reason::Sampling, true}); - first_span->finishSpan(); - driver_->tracer().Close(); + { + Tracing::SpanPtr first_span = driver_->startSpan( + config_, request_headers_, operation_name_, start_time_, {Tracing::Reason::Sampling, true}); + first_span->finishSpan(); + driver_->tracer().Close(); + } + + driver_ = nullptr; const Json::ObjectSharedPtr spans_json = TestEnvironment::jsonLoadFromString(TestEnvironment::readFileToStringForTest(spans_file_)); EXPECT_NE(spans_json, nullptr); EXPECT_EQ(spans_json->asObjectArray().size(), 1); } +#endif } // namespace } // namespace DynamicOt diff --git a/test/extensions/tracers/lightstep/BUILD b/test/extensions/tracers/lightstep/BUILD index 40815b572d758..86849c5f84e02 100644 --- a/test/extensions/tracers/lightstep/BUILD +++ b/test/extensions/tracers/lightstep/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/tracers/lightstep/config_test.cc b/test/extensions/tracers/lightstep/config_test.cc index ec09bf27c6c9e..e56ff7b0c507f 100644 --- a/test/extensions/tracers/lightstep/config_test.cc +++ b/test/extensions/tracers/lightstep/config_test.cc @@ -4,7 +4,8 @@ #include "extensions/tracers/lightstep/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/tracer_factory.h" +#include "test/mocks/server/tracer_factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc b/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc index 48c50dd51bbf2..ef657d6d54f5a 100644 --- a/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc +++ b/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc @@ -124,7 +124,7 @@ class LightStepDriverTest : public testing::Test { std::unique_ptr driver_; NiceMock* timer_; NiceMock cm_; - NiceMock random_; + NiceMock random_; NiceMock runtime_; NiceMock local_info_; @@ -245,10 +245,9 @@ TEST_F(LightStepDriverTest, FlushSeveralSpans) { callback = &callbacks; EXPECT_EQ("/lightstep.collector.CollectorService/Report", - message->headers().Path()->value().getStringView()); - EXPECT_EQ("fake_cluster", message->headers().Host()->value().getStringView()); - EXPECT_EQ("application/grpc", - message->headers().ContentType()->value().getStringView()); + message->headers().getPathValue()); + EXPECT_EQ("fake_cluster", message->headers().getHostValue()); + EXPECT_EQ("application/grpc", message->headers().getContentTypeValue()); return &request; })); @@ -416,10 +415,9 @@ TEST_F(LightStepDriverTest, FlushOneFailure) { callback = &callbacks; EXPECT_EQ("/lightstep.collector.CollectorService/Report", - message->headers().Path()->value().getStringView()); - EXPECT_EQ("fake_cluster", message->headers().Host()->value().getStringView()); - EXPECT_EQ("application/grpc", - message->headers().ContentType()->value().getStringView()); + message->headers().getPathValue()); + EXPECT_EQ("fake_cluster", message->headers().getHostValue()); + EXPECT_EQ("application/grpc", message->headers().getContentTypeValue()); return &request; })); @@ -464,10 +462,9 @@ TEST_F(LightStepDriverTest, FlushWithActiveReport) { callback = &callbacks; EXPECT_EQ("/lightstep.collector.CollectorService/Report", - message->headers().Path()->value().getStringView()); - EXPECT_EQ("fake_cluster", message->headers().Host()->value().getStringView()); - EXPECT_EQ("application/grpc", - message->headers().ContentType()->value().getStringView()); + message->headers().getPathValue()); + EXPECT_EQ("fake_cluster", message->headers().getHostValue()); + EXPECT_EQ("application/grpc", message->headers().getContentTypeValue()); return &request; })); @@ -510,10 +507,9 @@ TEST_F(LightStepDriverTest, OnFullWithActiveReport) { callback = &callbacks; EXPECT_EQ("/lightstep.collector.CollectorService/Report", - message->headers().Path()->value().getStringView()); - EXPECT_EQ("fake_cluster", message->headers().Host()->value().getStringView()); - EXPECT_EQ("application/grpc", - message->headers().ContentType()->value().getStringView()); + message->headers().getPathValue()); + EXPECT_EQ("fake_cluster", message->headers().getHostValue()); + EXPECT_EQ("application/grpc", message->headers().getContentTypeValue()); return &request; })); @@ -622,24 +618,24 @@ TEST_F(LightStepDriverTest, SerializeAndDeserializeContext) { // Supply bogus context, that will be simply ignored. const std::string invalid_context = "notvalidcontext"; - request_headers_.setOtSpanContext(invalid_context); + request_headers_.setCopy(Http::CustomHeaders::get().OtSpanContext, invalid_context); stats_.counter("tracing.opentracing.span_context_extraction_error").reset(); driver_->startSpan(config_, request_headers_, operation_name_, start_time_, {Tracing::Reason::Sampling, true}); EXPECT_EQ(1U, stats_.counter("tracing.opentracing.span_context_extraction_error").value()); - std::string injected_ctx(request_headers_.OtSpanContext()->value().getStringView()); + std::string injected_ctx(request_headers_.get_(Http::CustomHeaders::get().OtSpanContext)); EXPECT_FALSE(injected_ctx.empty()); // Supply empty context. - request_headers_.removeOtSpanContext(); + request_headers_.remove(Http::CustomHeaders::get().OtSpanContext); Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_, start_time_, {Tracing::Reason::Sampling, true}); - EXPECT_EQ(nullptr, request_headers_.OtSpanContext()); + EXPECT_FALSE(request_headers_.has(Http::CustomHeaders::get().OtSpanContext)); span->injectContext(request_headers_); - injected_ctx = std::string(request_headers_.OtSpanContext()->value().getStringView()); + injected_ctx = std::string(request_headers_.get_(Http::CustomHeaders::get().OtSpanContext)); EXPECT_FALSE(injected_ctx.empty()); // Context can be parsed fine. @@ -651,9 +647,9 @@ TEST_F(LightStepDriverTest, SerializeAndDeserializeContext) { // Supply parent context, request_headers has properly populated x-ot-span-context. Tracing::SpanPtr span_with_parent = driver_->startSpan( config_, request_headers_, operation_name_, start_time_, {Tracing::Reason::Sampling, true}); - request_headers_.removeOtSpanContext(); + request_headers_.remove(Http::CustomHeaders::get().OtSpanContext); span_with_parent->injectContext(request_headers_); - injected_ctx = std::string(request_headers_.OtSpanContext()->value().getStringView()); + injected_ctx = std::string(request_headers_.get_(Http::CustomHeaders::get().OtSpanContext)); EXPECT_FALSE(injected_ctx.empty()); } } @@ -688,9 +684,9 @@ TEST_F(LightStepDriverTest, MultiplePropagationModes) { Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_, start_time_, {Tracing::Reason::Sampling, true}); - EXPECT_EQ(nullptr, request_headers_.OtSpanContext()); + EXPECT_FALSE(request_headers_.has(Http::CustomHeaders::get().OtSpanContext)); span->injectContext(request_headers_); - EXPECT_TRUE(request_headers_.has("x-ot-span-context")); + EXPECT_TRUE(request_headers_.has(Http::CustomHeaders::get().OtSpanContext)); EXPECT_TRUE(request_headers_.has("ot-tracer-traceid")); EXPECT_TRUE(request_headers_.has("x-b3-traceid")); EXPECT_TRUE(request_headers_.has("traceparent")); @@ -714,14 +710,25 @@ TEST_F(LightStepDriverTest, SpawnChild) { childViaSpawn->injectContext(base2); std::string base1_context = - Base64::decode(std::string(base1.OtSpanContext()->value().getStringView())); + Base64::decode(std::string(base1.get_(Http::CustomHeaders::get().OtSpanContext))); std::string base2_context = - Base64::decode(std::string(base2.OtSpanContext()->value().getStringView())); + Base64::decode(std::string(base2.get_(Http::CustomHeaders::get().OtSpanContext))); EXPECT_FALSE(base1_context.empty()); EXPECT_FALSE(base2_context.empty()); } +TEST_F(LightStepDriverTest, GetAndSetBaggage) { + setupValidDriver(); + Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_, + start_time_, {Tracing::Reason::Sampling, true}); + + std::string key = "key1"; + std::string value = "value1"; + span->setBaggage(key, value); + EXPECT_EQ(span->getBaggage(key), value); +} + } // namespace } // namespace Lightstep } // namespace Tracers diff --git a/test/extensions/tracers/opencensus/BUILD b/test/extensions/tracers/opencensus/BUILD index 9aa809b29f8df..abdaa5fc7d8e8 100644 --- a/test/extensions/tracers/opencensus/BUILD +++ b/test/extensions/tracers/opencensus/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/tracers/opencensus/config_test.cc b/test/extensions/tracers/opencensus/config_test.cc index 29888485e3de1..227ed0d353c86 100644 --- a/test/extensions/tracers/opencensus/config_test.cc +++ b/test/extensions/tracers/opencensus/config_test.cc @@ -5,7 +5,8 @@ #include "extensions/tracers/opencensus/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/tracer_factory.h" +#include "test/mocks/server/tracer_factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -154,12 +155,18 @@ TEST(OpenCensusTracerConfigTest, OpenCensusHttpTracerGrpc) { OpenCensusTracerFactory factory; auto message = Config::Utility::translateToFactoryConfig( configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory); +#ifdef ENVOY_GOOGLE_GRPC Tracing::HttpTracerSharedPtr tracer = factory.createHttpTracer(*message, context); EXPECT_NE(nullptr, tracer); // Reset TraceParams back to default. ::opencensus::trace::TraceConfig::SetCurrentTraceParams( {32, 32, 128, 32, ::opencensus::trace::ProbabilitySampler(1e-4)}); +#else + EXPECT_THROW_WITH_MESSAGE( + (factory.createHttpTracer(*message, context)), EnvoyException, + "Opencensus tracer: cannot handle ocagent google grpc service, google grpc is not built in."); +#endif } TEST(OpenCensusTracerConfigTest, ShouldCreateAtMostOneOpenCensusTracer) { @@ -253,9 +260,15 @@ TEST(OpenCensusTracerConfigTest, ShouldNotCacheInvalidConfiguration) { auto message_two = Config::Utility::translateToFactoryConfig( configuration_two.http(), ProtobufMessage::getStrictValidationVisitor(), factory); +#ifdef ENVOY_GOOGLE_GRPC Tracing::HttpTracerSharedPtr tracer_two = factory.createHttpTracer(*message_two, context); // Verify that a new tracer has been created despite an earlier failed attempt. EXPECT_NE(nullptr, tracer_two); +#else + EXPECT_THROW_WITH_MESSAGE( + (factory.createHttpTracer(*message_two, context)), EnvoyException, + "Opencensus tracer: cannot handle ocagent google grpc service, google grpc is not built in."); +#endif } TEST(OpenCensusTracerConfigTest, ShouldRejectSubsequentCreateAttemptsWithDifferentConfig) { @@ -298,6 +311,39 @@ TEST(OpenCensusTracerConfigTest, ShouldRejectSubsequentCreateAttemptsWithDiffere "Opencensus has already been configured with a different config."); } +TEST(OpenCensusTracerConfigTest, OpenCensusHttpTracerStackdriverGrpc) { + NiceMock context; + const std::string yaml_string = R"EOF( + http: + name: opencensus + typed_config: + "@type": type.googleapis.com/envoy.config.trace.v2.OpenCensusConfig + stackdriver_exporter_enabled: true + stackdriver_grpc_service: + google_grpc: + target_uri: 127.0.0.1:55678 + stat_prefix: test + initial_metadata: + - key: foo + value: bar + )EOF"; + + envoy::config::trace::v3::Tracing configuration; + TestUtility::loadFromYaml(yaml_string, configuration); + + OpenCensusTracerFactory factory; + auto message = Config::Utility::translateToFactoryConfig( + configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory); +#ifdef ENVOY_GOOGLE_GRPC + Tracing::HttpTracerSharedPtr tracer = factory.createHttpTracer(*message, context); + EXPECT_NE(nullptr, tracer); +#else + EXPECT_THROW_WITH_MESSAGE((factory.createHttpTracer(*message, context)), EnvoyException, + "Opencensus tracer: cannot handle stackdriver google grpc service, " + "google grpc is not built in."); +#endif +} + } // namespace OpenCensus } // namespace Tracers } // namespace Extensions diff --git a/test/extensions/tracers/opencensus/tracer_test.cc b/test/extensions/tracers/opencensus/tracer_test.cc index 7ee0a23184c2f..88ed7f2f5983e 100644 --- a/test/extensions/tracers/opencensus/tracer_test.cc +++ b/test/extensions/tracers/opencensus/tracer_test.cc @@ -76,7 +76,7 @@ class SpanCatcher : public SpanExporter::Handler { private: mutable absl::Mutex mu_; - std::vector spans_ GUARDED_BY(mu_); + std::vector spans_ ABSL_GUARDED_BY(mu_); }; // Use a Singleton SpanCatcher. @@ -123,6 +123,10 @@ TEST(OpenCensusTracerTest, Span) { child->finishSpan(); span->setSampled(false); // Abandon tracer. span->finishSpan(); + + // Baggage methods are a noop in opencensus and won't affect events. + span->setBaggage("baggage_key", "baggage_value"); + ASSERT_EQ("", span->getBaggage("baggage_key")); } // Retrieve SpanData from the OpenCensus trace exporter. diff --git a/test/extensions/tracers/xray/BUILD b/test/extensions/tracers/xray/BUILD index e00d7e395bb9f..bf4e36dcd7bec 100644 --- a/test/extensions/tracers/xray/BUILD +++ b/test/extensions/tracers/xray/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_fuzz_test", @@ -10,6 +8,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -27,10 +27,13 @@ envoy_extension_cc_test( "//test/mocks/http:http_mocks", "//test/mocks/local_info:local_info_mocks", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/server:tracer_factory_context_mocks", "//test/mocks/stats:stats_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/tracing:tracing_mocks", + "//test/test_common:environment_lib", + "//test/test_common:network_utility_lib", "//test/test_common:simulated_time_system_lib", ], ) @@ -41,7 +44,9 @@ envoy_extension_cc_test( extension_name = "envoy.tracers.xray", deps = [ "//source/extensions/tracers/xray:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/server:tracer_factory_context_mocks", + "//test/mocks/server:tracer_factory_mocks", "//test/test_common:environment_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", diff --git a/test/extensions/tracers/xray/config_test.cc b/test/extensions/tracers/xray/config_test.cc index b71092f60eda3..ff39c0dbeaf89 100644 --- a/test/extensions/tracers/xray/config_test.cc +++ b/test/extensions/tracers/xray/config_test.cc @@ -5,7 +5,9 @@ #include "extensions/tracers/xray/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" +#include "test/mocks/server/tracer_factory.h" +#include "test/mocks/server/tracer_factory_context.h" #include "test/test_common/environment.h" #include "gmock/gmock.h" diff --git a/test/extensions/tracers/xray/localized_sampling_test.cc b/test/extensions/tracers/xray/localized_sampling_test.cc index 1d291ab603066..31b7530326720 100644 --- a/test/extensions/tracers/xray/localized_sampling_test.cc +++ b/test/extensions/tracers/xray/localized_sampling_test.cc @@ -1,6 +1,6 @@ #include "extensions/tracers/xray/localized_sampling.h" -#include "test/mocks/runtime/mocks.h" +#include "test/mocks/common.h" #include "test/test_common/simulated_time_system.h" #include "gmock/gmock.h" @@ -22,19 +22,19 @@ class LocalizedSamplingStrategyTest : public ::testing::Test { }; TEST_F(LocalizedSamplingStrategyTest, EmptyRules) { - NiceMock random_generator; + NiceMock random_generator; LocalizedSamplingStrategy strategy{"", random_generator, time_system_}; ASSERT_TRUE(strategy.usingDefaultManifest()); } TEST_F(LocalizedSamplingStrategyTest, BadJson) { - NiceMock random_generator; + NiceMock random_generator; LocalizedSamplingStrategy strategy{"{{}", random_generator, time_system_}; ASSERT_TRUE(strategy.usingDefaultManifest()); } TEST_F(LocalizedSamplingStrategyTest, ValidCustomRules) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto rules_json = R"EOF( { "version": 2, @@ -59,7 +59,7 @@ TEST_F(LocalizedSamplingStrategyTest, ValidCustomRules) { } TEST_F(LocalizedSamplingStrategyTest, InvalidRate) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto rules_json = R"EOF( { "version": 2, @@ -84,7 +84,7 @@ TEST_F(LocalizedSamplingStrategyTest, InvalidRate) { } TEST_F(LocalizedSamplingStrategyTest, InvalidFixedTarget) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto rules_json = R"EOF( { "version": 2, @@ -109,7 +109,7 @@ TEST_F(LocalizedSamplingStrategyTest, InvalidFixedTarget) { } TEST_F(LocalizedSamplingStrategyTest, DefaultRuleMissingRate) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto rules_json = R"EOF( { "version": 2, @@ -133,7 +133,7 @@ TEST_F(LocalizedSamplingStrategyTest, DefaultRuleMissingRate) { } TEST_F(LocalizedSamplingStrategyTest, DefaultRuleMissingFixedTarget) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto rules_json = R"EOF( { "version": 2, @@ -157,7 +157,7 @@ TEST_F(LocalizedSamplingStrategyTest, DefaultRuleMissingFixedTarget) { } TEST_F(LocalizedSamplingStrategyTest, WrongVersion) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto wrong_version = R"EOF( { "version": 1, @@ -182,7 +182,7 @@ TEST_F(LocalizedSamplingStrategyTest, WrongVersion) { } TEST_F(LocalizedSamplingStrategyTest, MissingVersion) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto missing_version = R"EOF( { "rules": [ @@ -206,7 +206,7 @@ TEST_F(LocalizedSamplingStrategyTest, MissingVersion) { } TEST_F(LocalizedSamplingStrategyTest, MissingDefaultRules) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto rules_json = R"EOF( { "version": 2, @@ -227,7 +227,7 @@ TEST_F(LocalizedSamplingStrategyTest, MissingDefaultRules) { } TEST_F(LocalizedSamplingStrategyTest, CustomRuleHostIsNotString) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto rules_json = R"EOF( { "version": 2, @@ -252,7 +252,7 @@ TEST_F(LocalizedSamplingStrategyTest, CustomRuleHostIsNotString) { } TEST_F(LocalizedSamplingStrategyTest, CustomRuleHttpMethodIsNotString) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto rules_json = R"EOF( { "version": 2, @@ -277,7 +277,7 @@ TEST_F(LocalizedSamplingStrategyTest, CustomRuleHttpMethodIsNotString) { } TEST_F(LocalizedSamplingStrategyTest, CustomRuleUrlPathIsNotString) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto rules_json = R"EOF( { "version": 2, @@ -302,7 +302,7 @@ TEST_F(LocalizedSamplingStrategyTest, CustomRuleUrlPathIsNotString) { } TEST_F(LocalizedSamplingStrategyTest, CustomRuleMissingFixedTarget) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto rules_json = R"EOF( { "version": 2, @@ -326,7 +326,7 @@ TEST_F(LocalizedSamplingStrategyTest, CustomRuleMissingFixedTarget) { } TEST_F(LocalizedSamplingStrategyTest, CustomRuleMissingRate) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto rules_json = R"EOF( { "version": 2, @@ -350,7 +350,7 @@ TEST_F(LocalizedSamplingStrategyTest, CustomRuleMissingRate) { } TEST_F(LocalizedSamplingStrategyTest, CustomRuleArrayElementWithWrongType) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto rules_json = R"EOF( { "version": 2, @@ -375,7 +375,7 @@ TEST_F(LocalizedSamplingStrategyTest, CustomRuleArrayElementWithWrongType) { } TEST_F(LocalizedSamplingStrategyTest, CustomRuleNegativeFixedRate) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto rules_json = R"EOF( { "version": 2, @@ -400,7 +400,7 @@ TEST_F(LocalizedSamplingStrategyTest, CustomRuleNegativeFixedRate) { } TEST_F(LocalizedSamplingStrategyTest, CustomRuleNegativeRate) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto rules_json = R"EOF( { "version": 2, @@ -425,7 +425,7 @@ TEST_F(LocalizedSamplingStrategyTest, CustomRuleNegativeRate) { } TEST_F(LocalizedSamplingStrategyTest, TraceOnlyFromReservoir) { - NiceMock rng; + NiceMock rng; EXPECT_CALL(rng, random()).WillRepeatedly(Return(90)); constexpr auto rules_json = R"EOF( { @@ -459,7 +459,7 @@ TEST_F(LocalizedSamplingStrategyTest, TraceOnlyFromReservoir) { } TEST_F(LocalizedSamplingStrategyTest, TraceFromReservoirAndByRate) { - NiceMock rng; + NiceMock rng; EXPECT_CALL(rng, random()).WillRepeatedly(Return(1)); constexpr auto rules_json = R"EOF( { @@ -492,7 +492,7 @@ TEST_F(LocalizedSamplingStrategyTest, TraceFromReservoirAndByRate) { } TEST_F(LocalizedSamplingStrategyTest, NoMatchingHost) { - NiceMock rng; + NiceMock rng; // this following value doesn't affect the test EXPECT_CALL(rng, random()).WillRepeatedly(Return(50 /*50 percent*/)); // the following rules say: @@ -530,7 +530,7 @@ TEST_F(LocalizedSamplingStrategyTest, NoMatchingHost) { } TEST_F(LocalizedSamplingStrategyTest, NoMatchingHttpMethod) { - NiceMock rng; + NiceMock rng; // this following value doesn't affect the test EXPECT_CALL(rng, random()).WillRepeatedly(Return(50 /*50 percent*/)); // the following rules say: @@ -568,7 +568,7 @@ TEST_F(LocalizedSamplingStrategyTest, NoMatchingHttpMethod) { } TEST_F(LocalizedSamplingStrategyTest, NoMatchingPath) { - NiceMock rng; + NiceMock rng; // this following value doesn't affect the test EXPECT_CALL(rng, random()).WillRepeatedly(Return(50 /*50 percent*/)); // the following rules say: diff --git a/test/extensions/tracers/xray/tracer_test.cc b/test/extensions/tracers/xray/tracer_test.cc index ef7b721c565c3..caeb153def476 100644 --- a/test/extensions/tracers/xray/tracer_test.cc +++ b/test/extensions/tracers/xray/tracer_test.cc @@ -10,9 +10,12 @@ #include "extensions/tracers/xray/tracer.h" #include "extensions/tracers/xray/xray_configuration.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/mocks/tracing/mocks.h" +#include "test/test_common/environment.h" +#include "test/test_common/network_utility.h" +#include "absl/strings/str_format.h" #include "absl/strings/str_split.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -46,10 +49,10 @@ TEST_F(XRayTracerTest, SerializeSpanTest) { constexpr auto expected_http_method = "POST"; constexpr auto expected_http_url = "/first/second"; constexpr auto expected_user_agent = "Mozilla/5.0 (Macintosh; Intel Mac OS X)"; - constexpr auto expected_status_code = "202"; - constexpr auto expected_content_length = "1337"; + constexpr uint32_t expected_status_code = 202; + constexpr uint32_t expected_content_length = 1337; constexpr auto expected_client_ip = "10.0.0.100"; - constexpr auto expected_x_forwarded_for = "false"; + constexpr auto expected_x_forwarded_for = false; constexpr auto expected_upstream_address = "10.0.0.200"; auto on_send = [&](const std::string& json) { @@ -61,13 +64,19 @@ TEST_F(XRayTracerTest, SerializeSpanTest) { ASSERT_EQ(1, s.annotations().size()); ASSERT_TRUE(s.parent_id().empty()); ASSERT_STREQ(expected_span_name, s.name().c_str()); - ASSERT_STREQ(expected_http_method, s.http().request().at("method").c_str()); - ASSERT_STREQ(expected_http_url, s.http().request().at("url").c_str()); - ASSERT_STREQ(expected_user_agent, s.http().request().at("user_agent").c_str()); - ASSERT_STREQ(expected_status_code, s.http().response().at("status").c_str()); - ASSERT_STREQ(expected_content_length, s.http().response().at("content_length").c_str()); - ASSERT_STREQ(expected_client_ip, s.http().request().at("client_ip").c_str()); - ASSERT_STREQ(expected_x_forwarded_for, s.http().request().at("x_forwarded_for").c_str()); + ASSERT_STREQ(expected_http_method, + s.http().request().fields().at("method").string_value().c_str()); + ASSERT_STREQ(expected_http_url, s.http().request().fields().at("url").string_value().c_str()); + ASSERT_STREQ(expected_user_agent, + s.http().request().fields().at("user_agent").string_value().c_str()); + ASSERT_DOUBLE_EQ(expected_status_code, + s.http().response().fields().at("status").number_value()); + ASSERT_DOUBLE_EQ(expected_content_length, + s.http().response().fields().at("content_length").number_value()); + ASSERT_STREQ(expected_client_ip, + s.http().request().fields().at("client_ip").string_value().c_str()); + ASSERT_EQ(expected_x_forwarded_for, + s.http().request().fields().at("x_forwarded_for").bool_value()); ASSERT_STREQ(expected_upstream_address, s.annotations().at("upstream_address").c_str()); }; @@ -78,8 +87,8 @@ TEST_F(XRayTracerTest, SerializeSpanTest) { span->setTag("http.method", expected_http_method); span->setTag("http.url", expected_http_url); span->setTag("user_agent", expected_user_agent); - span->setTag("http.status_code", expected_status_code); - span->setTag("response_size", expected_content_length); + span->setTag("http.status_code", absl::StrFormat("%d", expected_status_code)); + span->setTag("response_size", absl::StrFormat("%d", expected_content_length)); span->setTag("peer.address", expected_client_ip); span->setTag("upstream_address", expected_upstream_address); span->finishSpan(); @@ -91,6 +100,16 @@ TEST_F(XRayTracerTest, NonSampledSpansNotSerialized) { span->finishSpan(); } +TEST_F(XRayTracerTest, BaggageNotImplemented) { + Tracer tracer{"" /*span name*/, std::move(broker_), server_.timeSource()}; + auto span = tracer.createNonSampledSpan(); + span->setBaggage("baggage_key", "baggage_value"); + span->finishSpan(); + + // Baggage isn't supported so getBaggage should always return empty + ASSERT_EQ("", span->getBaggage("baggage_key")); +} + TEST_F(XRayTracerTest, ChildSpanHasParentInfo) { NiceMock config; constexpr auto expected_span_name = "Service 1"; @@ -101,7 +120,7 @@ TEST_F(XRayTracerTest, ChildSpanHasParentInfo) { absl::nullopt /*headers*/); const XRay::Span* xray_parent_span = static_cast(parent_span.get()); - const std::string expected_parent_id = xray_parent_span->Id(); + const std::string expected_parent_id = xray_parent_span->id(); auto on_send = [&](const std::string& json) { ASSERT_FALSE(json.empty()); daemon::Segment s; @@ -109,7 +128,7 @@ TEST_F(XRayTracerTest, ChildSpanHasParentInfo) { ASSERT_STREQ(expected_parent_id.c_str(), s.parent_id().c_str()); ASSERT_STREQ(expected_span_name, s.name().c_str()); ASSERT_STREQ(xray_parent_span->traceId().c_str(), s.trace_id().c_str()); - ASSERT_STRNE(xray_parent_span->Id().c_str(), s.id().c_str()); + ASSERT_STRNE(xray_parent_span->id().c_str(), s.id().c_str()); }; EXPECT_CALL(broker, send(_)).WillOnce(Invoke(on_send)); @@ -141,7 +160,7 @@ TEST_F(XRayTracerTest, SpanInjectContextHasXRayHeader) { Tracer tracer{span_name, std::move(broker_), server_.timeSource()}; auto span = tracer.startSpan(operation_name, server_.timeSource().systemTime(), absl::nullopt /*headers*/); - Http::RequestHeaderMapImpl request_headers; + Http::TestRequestHeaderMapImpl request_headers; span->injectContext(request_headers); auto* header = request_headers.get(Http::LowerCaseString{XRayTraceHeader}); ASSERT_NE(header, nullptr); @@ -154,7 +173,7 @@ TEST_F(XRayTracerTest, SpanInjectContextHasXRayHeaderNonSampled) { constexpr auto span_name = "my span"; Tracer tracer{span_name, std::move(broker_), server_.timeSource()}; auto span = tracer.createNonSampledSpan(); - Http::RequestHeaderMapImpl request_headers; + Http::TestRequestHeaderMapImpl request_headers; span->injectContext(request_headers); auto* header = request_headers.get(Http::LowerCaseString{XRayTraceHeader}); ASSERT_NE(header, nullptr); @@ -176,6 +195,47 @@ TEST_F(XRayTracerTest, TraceIDFormatTest) { ASSERT_EQ(24, parts[2].length()); } +class XRayDaemonTest : public testing::TestWithParam {}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, XRayDaemonTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +TEST_P(XRayDaemonTest, VerifyUdpPacketContents) { + NiceMock server; + Network::Test::UdpSyncPeer xray_fake_daemon(GetParam()); + const std::string daemon_endpoint = xray_fake_daemon.localAddress()->asString(); + Tracer tracer{"my_segment", std::make_unique(daemon_endpoint), + server.timeSource()}; + auto span = tracer.startSpan("ingress" /*operation name*/, server.timeSource().systemTime(), + absl::nullopt /*headers*/); + + span->setTag("http.status_code", "202"); + span->finishSpan(); + + Network::UdpRecvData datagram; + xray_fake_daemon.recv(datagram); + + const std::string header_json = R"EOF({"format":"json","version":1})EOF"; + // The UDP datagram contains two independent, consecutive JSON documents; a header and a body. + const std::string payload = datagram.buffer_->toString(); + // Make sure the payload has enough data. + ASSERT_GT(payload.length(), header_json.length()); + // Skip the header since we're only interested in the body. + const std::string body = payload.substr(header_json.length()); + + EXPECT_EQ(0, payload.find(header_json)); + + // Deserialize the body to verify it. + source::extensions::tracers::xray::daemon::Segment seg; + MessageUtil::loadFromJson(body, seg, ProtobufMessage::getNullValidationVisitor()); + EXPECT_STREQ("my_segment", seg.name().c_str()); + for (auto&& f : seg.http().request().fields()) { + // there should only be a single field + EXPECT_EQ(202, f.second.number_value()); + } +} + } // namespace } // namespace XRay } // namespace Tracers diff --git a/test/extensions/tracers/xray/xray_tracer_impl_test.cc b/test/extensions/tracers/xray/xray_tracer_impl_test.cc index 81f9532cb4a8a..0019ef66544e3 100644 --- a/test/extensions/tracers/xray/xray_tracer_impl_test.cc +++ b/test/extensions/tracers/xray/xray_tracer_impl_test.cc @@ -5,7 +5,9 @@ #include "extensions/tracers/xray/xray_tracer_impl.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" +#include "test/mocks/server/tracer_factory.h" +#include "test/mocks/server/tracer_factory_context.h" #include "test/mocks/thread_local/mocks.h" #include "test/mocks/tracing/mocks.h" #include "test/test_common/utility.h" diff --git a/test/extensions/tracers/zipkin/BUILD b/test/extensions/tracers/zipkin/BUILD index 385c4f194759c..efa94415b0316 100644 --- a/test/extensions/tracers/zipkin/BUILD +++ b/test/extensions/tracers/zipkin/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -52,7 +52,8 @@ envoy_extension_cc_test( extension_name = "envoy.tracers.zipkin", deps = [ "//source/extensions/tracers/zipkin:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:tracer_factory_context_mocks", + "//test/mocks/server:tracer_factory_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", ], diff --git a/test/extensions/tracers/zipkin/config_test.cc b/test/extensions/tracers/zipkin/config_test.cc index 352a923ffe23c..0f62b8f7fd7f8 100644 --- a/test/extensions/tracers/zipkin/config_test.cc +++ b/test/extensions/tracers/zipkin/config_test.cc @@ -5,7 +5,8 @@ #include "extensions/tracers/zipkin/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/tracer_factory.h" +#include "test/mocks/server/tracer_factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/tracers/zipkin/span_buffer_test.cc b/test/extensions/tracers/zipkin/span_buffer_test.cc index 05563a02de01f..210e9df37b253 100644 --- a/test/extensions/tracers/zipkin/span_buffer_test.cc +++ b/test/extensions/tracers/zipkin/span_buffer_test.cc @@ -51,8 +51,9 @@ Annotation createAnnotation(const absl::string_view value, const IpType ip_type) BinaryAnnotation createTag() { BinaryAnnotation tag; - tag.setKey("component"); - tag.setValue("proxy"); + tag.setKey("response_size"); + // ensure duration replacement doesn't override this value. + tag.setValue(std::to_string(DEFAULT_TEST_DURATION)); return tag; } @@ -134,11 +135,12 @@ TEST(ZipkinSpanBufferTest, TestSerializeTimestamp) { ProtobufWkt::Struct object; auto* fields = object.mutable_fields(); Util::Replacements replacements; - (*fields)["timestamp"] = Util::uint64Value(DEFAULT_TEST_TIMESTAMP, replacements); + (*fields)["timestamp"] = Util::uint64Value(DEFAULT_TEST_TIMESTAMP, "timestamp", replacements); ASSERT_EQ(1, replacements.size()); - EXPECT_EQ(absl::StrCat("\"", default_timestamp_string, "\""), replacements.at(0).first); - EXPECT_EQ(default_timestamp_string, replacements.at(0).second); + EXPECT_EQ(absl::StrCat("\"timestamp\":\"", default_timestamp_string, "\""), + replacements.at(0).first); + EXPECT_EQ(absl::StrCat("\"timestamp\":", default_timestamp_string), replacements.at(0).second); } TEST(ZipkinSpanBufferTest, ConstructBuffer) { @@ -157,8 +159,8 @@ TEST(ZipkinSpanBufferTest, ConstructBuffer) { R"("endpoint":{"ipv4":"1.2.3.4",)" R"("port":8080,)" R"("serviceName":"service1"}}],)" - R"("binaryAnnotations":[{"key":"component",)" - R"("value":"proxy"}]}])"); + R"("binaryAnnotations":[{"key":"response_size",)" + R"("value":"DEFAULT_TEST_DURATION"}]}])"); const std::string expected2 = withDefaultTimestampAndDuration(R"([{"traceId":"0000000000000001",)" @@ -175,8 +177,8 @@ TEST(ZipkinSpanBufferTest, ConstructBuffer) { R"("endpoint":{"ipv4":"1.2.3.4",)" R"("port":8080,)" R"("serviceName":"service1"}}],)" - R"("binaryAnnotations":[{"key":"component",)" - R"("value":"proxy"}]},)" + R"("binaryAnnotations":[{"key":"response_size",)" + R"("value":"DEFAULT_TEST_DURATION"}]},)" R"({"traceId":"0000000000000001",)" R"("name":"",)" R"("id":"0000000000000001",)" @@ -191,8 +193,8 @@ TEST(ZipkinSpanBufferTest, ConstructBuffer) { R"("endpoint":{"ipv4":"1.2.3.4",)" R"("port":8080,)" R"("serviceName":"service1"}}],)" - R"("binaryAnnotations":[{"key":"component",)" - R"("value":"proxy"}]}])"); + R"("binaryAnnotations":[{"key":"response_size",)" + R"("value":"DEFAULT_TEST_DURATION"}]}])"); const bool shared = true; const bool delay_allocation = true; @@ -221,7 +223,7 @@ TEST(ZipkinSpanBufferTest, SerializeSpan) { R"("ipv4":"1.2.3.4",)" R"("port":8080},)" R"("tags":{)" - R"("component":"proxy"})" + R"("response_size":"DEFAULT_TEST_DURATION"},)" "}]"), JsonStringEq(wrapAsObject(buffer1.serialize()))); @@ -238,7 +240,7 @@ TEST(ZipkinSpanBufferTest, SerializeSpan) { R"("ipv6":"2001:db8:85a3::8a2e:370:4444",)" R"("port":7334},)" R"("tags":{)" - R"("component":"proxy"})" + R"("response_size":"DEFAULT_TEST_DURATION"},)" "}]"), JsonStringEq(wrapAsObject(buffer1_v6.serialize()))); @@ -255,7 +257,7 @@ TEST(ZipkinSpanBufferTest, SerializeSpan) { R"("ipv4":"1.2.3.4",)" R"("port":8080},)" R"("tags":{)" - R"("component":"proxy"}},)" + R"("response_size":"DEFAULT_TEST_DURATION"}},)" R"({)" R"("traceId":"0000000000000001",)" R"("id":"0000000000000001",)" @@ -267,7 +269,7 @@ TEST(ZipkinSpanBufferTest, SerializeSpan) { R"("ipv4":"1.2.3.4",)" R"("port":8080},)" R"("tags":{)" - R"("component":"proxy"},)" + R"("response_size":"DEFAULT_TEST_DURATION"},)" R"("shared":true)" "}]"), JsonStringEq(wrapAsObject(buffer2.serialize()))); @@ -285,7 +287,7 @@ TEST(ZipkinSpanBufferTest, SerializeSpan) { R"("ipv4":"1.2.3.4",)" R"("port":8080},)" R"("tags":{)" - R"("component":"proxy"}},)" + R"("response_size":"DEFAULT_TEST_DURATION"}},)" R"({)" R"("traceId":"0000000000000001",)" R"("id":"0000000000000001",)" @@ -297,7 +299,7 @@ TEST(ZipkinSpanBufferTest, SerializeSpan) { R"("ipv4":"1.2.3.4",)" R"("port":8080},)" R"("tags":{)" - R"("component":"proxy"})" + R"("response_size":"DEFAULT_TEST_DURATION"})" "}]"), JsonStringEq(wrapAsObject(buffer3.serialize()))); @@ -315,7 +317,7 @@ TEST(ZipkinSpanBufferTest, SerializeSpan) { R"("ipv4":"AQIDBA==",)" R"("port":8080},)" R"("tags":{)" - R"("component":"proxy"})" + R"("response_size":"DEFAULT_TEST_DURATION"})" "}]}"), serializedMessageToJson(buffer4.serialize())); @@ -333,7 +335,7 @@ TEST(ZipkinSpanBufferTest, SerializeSpan) { R"("ipv6":"IAENuIWjAAAAAIouA3BERA==",)" R"("port":7334},)" R"("tags":{)" - R"("component":"proxy"})" + R"("response_size":"DEFAULT_TEST_DURATION"})" "}]}"), serializedMessageToJson(buffer4_v6.serialize())); @@ -351,7 +353,7 @@ TEST(ZipkinSpanBufferTest, SerializeSpan) { R"("ipv4":"AQIDBA==",)" R"("port":8080},)" R"("tags":{)" - R"("component":"proxy"}},)" + R"("response_size":"DEFAULT_TEST_DURATION"}},)" R"({)" R"("traceId":"AAAAAAAAAAE=",)" R"("id":"AQAAAAAAAAA=",)" @@ -363,7 +365,7 @@ TEST(ZipkinSpanBufferTest, SerializeSpan) { R"("ipv4":"AQIDBA==",)" R"("port":8080},)" R"("tags":{)" - R"("component":"proxy"},)" + R"("response_size":"DEFAULT_TEST_DURATION"},)" R"("shared":true)" "}]}"), serializedMessageToJson(buffer5.serialize())); @@ -382,7 +384,7 @@ TEST(ZipkinSpanBufferTest, SerializeSpan) { R"("ipv4":"AQIDBA==",)" R"("port":8080},)" R"("tags":{)" - R"("component":"proxy"}},)" + R"("response_size":"DEFAULT_TEST_DURATION"}},)" R"({)" R"("traceId":"AAAAAAAAAAE=",)" R"("id":"AQAAAAAAAAA=",)" @@ -394,7 +396,7 @@ TEST(ZipkinSpanBufferTest, SerializeSpan) { R"("ipv4":"AQIDBA==",)" R"("port":8080},)" R"("tags":{)" - R"("component":"proxy"})" + R"("response_size":"DEFAULT_TEST_DURATION"})" "}]}"), serializedMessageToJson(buffer6.serialize())); } @@ -413,7 +415,8 @@ TEST(ZipkinSpanBufferTest, TestSerializeTimestampInTheFuture) { ProtobufWkt::Struct object; auto* objectFields = object.mutable_fields(); Util::Replacements replacements; - (*objectFields)["timestamp"] = Util::uint64Value(DEFAULT_TEST_TIMESTAMP, replacements); + (*objectFields)["timestamp"] = + Util::uint64Value(DEFAULT_TEST_TIMESTAMP, "timestamp", replacements); const auto objectJson = MessageUtil::getJsonStringFromMessage(object, false, true); // We still have "1584324295476870" from MessageUtil::getJsonStringFromMessage here. EXPECT_EQ(R"({"timestamp":"1584324295476870"})", objectJson); diff --git a/test/extensions/tracers/zipkin/span_context_extractor_test.cc b/test/extensions/tracers/zipkin/span_context_extractor_test.cc index 0dfa4b8ade85b..17977d4451c9f 100644 --- a/test/extensions/tracers/zipkin/span_context_extractor_test.cc +++ b/test/extensions/tracers/zipkin/span_context_extractor_test.cc @@ -27,10 +27,10 @@ TEST(ZipkinSpanContextExtractorTest, Largest) { auto context = extractor.extractSpanContext(true); EXPECT_TRUE(context.second); EXPECT_EQ(3, context.first.id()); - EXPECT_EQ(2, context.first.parent_id()); + EXPECT_EQ(2, context.first.parentId()); EXPECT_TRUE(context.first.is128BitTraceId()); - EXPECT_EQ(1, context.first.trace_id()); - EXPECT_EQ(9, context.first.trace_id_high()); + EXPECT_EQ(1, context.first.traceId()); + EXPECT_EQ(9, context.first.traceIdHigh()); EXPECT_TRUE(context.first.sampled()); EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, false})); } @@ -42,10 +42,10 @@ TEST(ZipkinSpanContextExtractorTest, WithoutParentDebug) { auto context = extractor.extractSpanContext(true); EXPECT_TRUE(context.second); EXPECT_EQ(3, context.first.id()); - EXPECT_EQ(0, context.first.parent_id()); + EXPECT_EQ(0, context.first.parentId()); EXPECT_TRUE(context.first.is128BitTraceId()); - EXPECT_EQ(1, context.first.trace_id()); - EXPECT_EQ(9, context.first.trace_id_high()); + EXPECT_EQ(1, context.first.traceId()); + EXPECT_EQ(9, context.first.traceIdHigh()); EXPECT_TRUE(context.first.sampled()); EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, false})); } @@ -73,10 +73,10 @@ TEST(ZipkinSpanContextExtractorTest, DebugOnly) { auto context = extractor.extractSpanContext(true); EXPECT_FALSE(context.second); EXPECT_EQ(0, context.first.id()); - EXPECT_EQ(0, context.first.parent_id()); + EXPECT_EQ(0, context.first.parentId()); EXPECT_FALSE(context.first.is128BitTraceId()); - EXPECT_EQ(0, context.first.trace_id()); - EXPECT_EQ(0, context.first.trace_id_high()); + EXPECT_EQ(0, context.first.traceId()); + EXPECT_EQ(0, context.first.traceIdHigh()); EXPECT_FALSE(context.first.sampled()); EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, false})); } @@ -87,10 +87,10 @@ TEST(ZipkinSpanContextExtractorTest, Sampled) { auto context = extractor.extractSpanContext(true); EXPECT_FALSE(context.second); EXPECT_EQ(0, context.first.id()); - EXPECT_EQ(0, context.first.parent_id()); + EXPECT_EQ(0, context.first.parentId()); EXPECT_FALSE(context.first.is128BitTraceId()); - EXPECT_EQ(0, context.first.trace_id()); - EXPECT_EQ(0, context.first.trace_id_high()); + EXPECT_EQ(0, context.first.traceId()); + EXPECT_EQ(0, context.first.traceIdHigh()); EXPECT_FALSE(context.first.sampled()); EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, false})); } @@ -101,10 +101,10 @@ TEST(ZipkinSpanContextExtractorTest, SampledFalse) { auto context = extractor.extractSpanContext(true); EXPECT_FALSE(context.second); EXPECT_EQ(0, context.first.id()); - EXPECT_EQ(0, context.first.parent_id()); + EXPECT_EQ(0, context.first.parentId()); EXPECT_FALSE(context.first.is128BitTraceId()); - EXPECT_EQ(0, context.first.trace_id()); - EXPECT_EQ(0, context.first.trace_id_high()); + EXPECT_EQ(0, context.first.traceId()); + EXPECT_EQ(0, context.first.traceIdHigh()); EXPECT_FALSE(context.first.sampled()); EXPECT_FALSE(extractor.extractSampled({Tracing::Reason::Sampling, true})); } @@ -116,10 +116,10 @@ TEST(ZipkinSpanContextExtractorTest, IdNotYetSampled128) { auto context = extractor.extractSpanContext(true); EXPECT_TRUE(context.second); EXPECT_EQ(3, context.first.id()); - EXPECT_EQ(0, context.first.parent_id()); + EXPECT_EQ(0, context.first.parentId()); EXPECT_TRUE(context.first.is128BitTraceId()); - EXPECT_EQ(1, context.first.trace_id()); - EXPECT_EQ(9, context.first.trace_id_high()); + EXPECT_EQ(1, context.first.traceId()); + EXPECT_EQ(9, context.first.traceIdHigh()); EXPECT_TRUE(context.first.sampled()); EXPECT_FALSE(extractor.extractSampled({Tracing::Reason::Sampling, false})); } @@ -130,10 +130,10 @@ TEST(ZipkinSpanContextExtractorTest, IdsUnsampled) { auto context = extractor.extractSpanContext(true); EXPECT_TRUE(context.second); EXPECT_EQ(3, context.first.id()); - EXPECT_EQ(0, context.first.parent_id()); + EXPECT_EQ(0, context.first.parentId()); EXPECT_FALSE(context.first.is128BitTraceId()); - EXPECT_EQ(1, context.first.trace_id()); - EXPECT_EQ(0, context.first.trace_id_high()); + EXPECT_EQ(1, context.first.traceId()); + EXPECT_EQ(0, context.first.traceIdHigh()); EXPECT_TRUE(context.first.sampled()); EXPECT_FALSE(extractor.extractSampled({Tracing::Reason::Sampling, true})); } @@ -145,10 +145,10 @@ TEST(ZipkinSpanContextExtractorTest, ParentUnsampled) { auto context = extractor.extractSpanContext(true); EXPECT_TRUE(context.second); EXPECT_EQ(3, context.first.id()); - EXPECT_EQ(2, context.first.parent_id()); + EXPECT_EQ(2, context.first.parentId()); EXPECT_FALSE(context.first.is128BitTraceId()); - EXPECT_EQ(1, context.first.trace_id()); - EXPECT_EQ(0, context.first.trace_id_high()); + EXPECT_EQ(1, context.first.traceId()); + EXPECT_EQ(0, context.first.traceIdHigh()); EXPECT_TRUE(context.first.sampled()); EXPECT_FALSE(extractor.extractSampled({Tracing::Reason::Sampling, true})); } @@ -160,10 +160,10 @@ TEST(ZipkinSpanContextExtractorTest, ParentDebug) { auto context = extractor.extractSpanContext(true); EXPECT_TRUE(context.second); EXPECT_EQ(3, context.first.id()); - EXPECT_EQ(2, context.first.parent_id()); + EXPECT_EQ(2, context.first.parentId()); EXPECT_FALSE(context.first.is128BitTraceId()); - EXPECT_EQ(1, context.first.trace_id()); - EXPECT_EQ(0, context.first.trace_id_high()); + EXPECT_EQ(1, context.first.traceId()); + EXPECT_EQ(0, context.first.traceIdHigh()); EXPECT_TRUE(context.first.sampled()); EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, false})); } @@ -174,10 +174,10 @@ TEST(ZipkinSpanContextExtractorTest, IdsWithDebug) { auto context = extractor.extractSpanContext(true); EXPECT_TRUE(context.second); EXPECT_EQ(3, context.first.id()); - EXPECT_EQ(0, context.first.parent_id()); + EXPECT_EQ(0, context.first.parentId()); EXPECT_FALSE(context.first.is128BitTraceId()); - EXPECT_EQ(1, context.first.trace_id()); - EXPECT_EQ(0, context.first.trace_id_high()); + EXPECT_EQ(1, context.first.traceId()); + EXPECT_EQ(0, context.first.traceIdHigh()); EXPECT_TRUE(context.first.sampled()); EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, false})); } @@ -188,10 +188,10 @@ TEST(ZipkinSpanContextExtractorTest, WithoutSampled) { auto context = extractor.extractSpanContext(false); EXPECT_TRUE(context.second); EXPECT_EQ(3, context.first.id()); - EXPECT_EQ(0, context.first.parent_id()); + EXPECT_EQ(0, context.first.parentId()); EXPECT_FALSE(context.first.is128BitTraceId()); - EXPECT_EQ(1, context.first.trace_id()); - EXPECT_EQ(0, context.first.trace_id_high()); + EXPECT_EQ(1, context.first.traceId()); + EXPECT_EQ(0, context.first.traceIdHigh()); EXPECT_FALSE(context.first.sampled()); EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, true})); } diff --git a/test/extensions/tracers/zipkin/tracer_test.cc b/test/extensions/tracers/zipkin/tracer_test.cc index 878af93da6991..549437dc0bb45 100644 --- a/test/extensions/tracers/zipkin/tracer_test.cc +++ b/test/extensions/tracers/zipkin/tracer_test.cc @@ -45,7 +45,7 @@ class ZipkinTracerTest : public testing::Test { TEST_F(ZipkinTracerTest, SpanCreation) { Network::Address::InstanceConstSharedPtr addr = Network::Utility::parseInternetAddressAndPort("127.0.0.1:9000"); - NiceMock random_generator; + NiceMock random_generator; Tracer tracer("my_service_name", addr, random_generator, false, true, time_system_); SystemTime timestamp = time_system_.systemTime(); @@ -185,8 +185,8 @@ TEST_F(ZipkinTracerTest, SpanCreation) { ON_CALL(config, operationName()).WillByDefault(Return(Tracing::OperationName::Ingress)); TestRandomGenerator generator; const uint64_t generated_parent_id = generator.random(); - SpanContext modified_root_span_context(root_span_context.trace_id_high(), - root_span_context.trace_id(), root_span_context.id(), + SpanContext modified_root_span_context(root_span_context.traceIdHigh(), + root_span_context.traceId(), root_span_context.id(), generated_parent_id, root_span_context.sampled()); SpanPtr new_shared_context_span = tracer.startSpan(config, "new_shared_context_span", timestamp, modified_root_span_context); @@ -202,7 +202,7 @@ TEST_F(ZipkinTracerTest, SpanCreation) { // The parent should be the same as in the CS side EXPECT_TRUE(new_shared_context_span->isSetParentId()); - EXPECT_EQ(modified_root_span_context.parent_id(), new_shared_context_span->parentId()); + EXPECT_EQ(modified_root_span_context.parentId(), new_shared_context_span->parentId()); // span timestamp should not be set (it was set in the CS side) EXPECT_FALSE(new_shared_context_span->isSetTimestamp()); @@ -229,7 +229,7 @@ TEST_F(ZipkinTracerTest, SpanCreation) { TEST_F(ZipkinTracerTest, FinishSpan) { Network::Address::InstanceConstSharedPtr addr = Network::Utility::parseInternetAddressAndPort("127.0.0.1:9000"); - NiceMock random_generator; + NiceMock random_generator; Tracer tracer("my_service_name", addr, random_generator, false, true, time_system_); SystemTime timestamp = time_system_.systemTime(); @@ -312,7 +312,7 @@ TEST_F(ZipkinTracerTest, FinishSpan) { TEST_F(ZipkinTracerTest, FinishNotSampledSpan) { Network::Address::InstanceConstSharedPtr addr = Network::Utility::parseInternetAddressAndPort("127.0.0.1:9000"); - NiceMock random_generator; + NiceMock random_generator; Tracer tracer("my_service_name", addr, random_generator, false, true, time_system_); SystemTime timestamp = time_system_.systemTime(); @@ -340,7 +340,7 @@ TEST_F(ZipkinTracerTest, FinishNotSampledSpan) { TEST_F(ZipkinTracerTest, SpanSampledPropagatedToChild) { Network::Address::InstanceConstSharedPtr addr = Network::Utility::parseInternetAddressAndPort("127.0.0.1:9000"); - NiceMock random_generator; + NiceMock random_generator; Tracer tracer("my_service_name", addr, random_generator, false, true, time_system_); SystemTime timestamp = time_system_.systemTime(); @@ -368,7 +368,7 @@ TEST_F(ZipkinTracerTest, SpanSampledPropagatedToChild) { TEST_F(ZipkinTracerTest, RootSpan128bitTraceId) { Network::Address::InstanceConstSharedPtr addr = Network::Utility::parseInternetAddressAndPort("127.0.0.1:9000"); - NiceMock random_generator; + NiceMock random_generator; Tracer tracer("my_service_name", addr, random_generator, true, true, time_system_); SystemTime timestamp = time_system_.systemTime(); @@ -387,7 +387,7 @@ TEST_F(ZipkinTracerTest, RootSpan128bitTraceId) { TEST_F(ZipkinTracerTest, SharedSpanContext) { Network::Address::InstanceConstSharedPtr addr = Network::Utility::parseInternetAddressAndPort("127.0.0.1:9000"); - NiceMock random_generator; + NiceMock random_generator; const bool shared_span_context = true; Tracer tracer("my_service_name", addr, random_generator, false, shared_span_context, @@ -411,7 +411,7 @@ TEST_F(ZipkinTracerTest, SharedSpanContext) { TEST_F(ZipkinTracerTest, NotSharedSpanContext) { Network::Address::InstanceConstSharedPtr addr = Network::Utility::parseInternetAddressAndPort("127.0.0.1:9000"); - NiceMock random_generator; + NiceMock random_generator; const bool shared_span_context = false; Tracer tracer("my_service_name", addr, random_generator, false, shared_span_context, diff --git a/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc b/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc index 96078b70c8988..0d1488e63bff3 100644 --- a/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc +++ b/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc @@ -2,7 +2,6 @@ #include #include #include -#include #include "envoy/config/trace/v3/zipkin.pb.h" @@ -90,9 +89,9 @@ class ZipkinDriverTest : public testing::Test { const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { callback = &callbacks; - EXPECT_EQ("/api/v1/spans", message->headers().Path()->value().getStringView()); - EXPECT_EQ("fake_cluster", message->headers().Host()->value().getStringView()); - EXPECT_EQ(content_type, message->headers().ContentType()->value().getStringView()); + EXPECT_EQ("/api/v1/spans", message->headers().getPathValue()); + EXPECT_EQ("fake_cluster", message->headers().getHostValue()); + EXPECT_EQ(content_type, message->headers().getContentTypeValue()); return &request; })); @@ -146,7 +145,7 @@ class ZipkinDriverTest : public testing::Test { NiceMock cm_; NiceMock runtime_; NiceMock local_info_; - NiceMock random_; + NiceMock random_; NiceMock config_; Event::SimulatedTimeSystem test_time_; @@ -235,10 +234,9 @@ TEST_F(ZipkinDriverTest, FlushOneSpanReportFailure) { const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { callback = &callbacks; - EXPECT_EQ("/api/v1/spans", message->headers().Path()->value().getStringView()); - EXPECT_EQ("fake_cluster", message->headers().Host()->value().getStringView()); - EXPECT_EQ("application/json", - message->headers().ContentType()->value().getStringView()); + EXPECT_EQ("/api/v1/spans", message->headers().getPathValue()); + EXPECT_EQ("fake_cluster", message->headers().getHostValue()); + EXPECT_EQ("application/json", message->headers().getContentTypeValue()); return &request; })); @@ -621,7 +619,7 @@ TEST_F(ZipkinDriverTest, ZipkinSpanTest) { // Test effective setTag() // ==== - request_headers_.removeOtSpanContext(); + request_headers_.remove(Http::CustomHeaders::get().OtSpanContext); // New span will have a CS annotation Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_, @@ -644,7 +642,7 @@ TEST_F(ZipkinDriverTest, ZipkinSpanTest) { const std::string parent_id = Hex::uint64ToHex(generateRandom64()); const std::string context = trace_id + ";" + span_id + ";" + parent_id + ";" + CLIENT_SEND; - request_headers_.setOtSpanContext(context); + request_headers_.setCopy(Http::CustomHeaders::get().OtSpanContext, context); // New span will have an SR annotation Tracing::SpanPtr span2 = driver_->startSpan(config_, request_headers_, operation_name_, @@ -691,6 +689,14 @@ TEST_F(ZipkinDriverTest, ZipkinSpanTest) { EXPECT_FALSE(zipkin_zipkin_span4.annotations().empty()); EXPECT_EQ(timestamp_count, zipkin_zipkin_span4.annotations().back().timestamp()); EXPECT_EQ("abc", zipkin_zipkin_span4.annotations().back().value()); + + // ==== + // Test baggage noop + // ==== + Tracing::SpanPtr span5 = driver_->startSpan(config_, request_headers_, operation_name_, + start_time_, {Tracing::Reason::Sampling, true}); + span5->setBaggage("baggage_key", "baggage_value"); + EXPECT_EQ("", span5->getBaggage("baggage_key")); } TEST_F(ZipkinDriverTest, ZipkinSpanContextFromB3HeadersTest) { @@ -866,11 +872,10 @@ TEST_F(ZipkinDriverTest, DuplicatedHeader) { span->setSampled(true); span->injectContext(request_headers_); request_headers_.iterate( - [](const Http::HeaderEntry& header, void* cb) -> Http::HeaderMap::Iterate { - EXPECT_FALSE(static_cast(cb)->operator()(header.key().getStringView())); + [&dup_callback](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + dup_callback(header.key().getStringView()); return Http::HeaderMap::Iterate::Continue; - }, - &dup_callback); + }); } } // namespace diff --git a/test/extensions/transport_sockets/alts/BUILD b/test/extensions/transport_sockets/alts/BUILD index 8c7cbfa6f27e4..489c29f782733 100644 --- a/test/extensions/transport_sockets/alts/BUILD +++ b/test/extensions/transport_sockets/alts/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -10,6 +8,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( @@ -17,10 +17,11 @@ envoy_extension_cc_test( srcs = ["config_test.cc"], extension_name = "envoy.transport_sockets.alts", deps = [ + "//source/common/singleton:manager_impl_lib", "//source/extensions/transport_sockets/alts:config", "//test/mocks/event:event_mocks", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:transport_socket_factory_context_mocks", ], ) @@ -75,7 +76,10 @@ envoy_extension_cc_test( extension_name = "envoy.transport_sockets.alts", external_deps = [ "grpc_alts_fake_handshaker_server", + "grpc_alts_handshaker_proto", + "grpc_alts_transport_security_common_proto", ], + tags = ["fails_on_windows"], deps = [ "//source/common/common:utility_lib", "//source/common/event:dispatcher_includes", @@ -86,6 +90,7 @@ envoy_extension_cc_test( "//test/integration:http_integration_lib", "//test/mocks/runtime:runtime_mocks", "//test/mocks/secret:secret_mocks", + "//test/mocks/server:server_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/config/transport_socket/alts/v2alpha:pkg_cc_proto", ], diff --git a/test/extensions/transport_sockets/alts/alts_integration_test.cc b/test/extensions/transport_sockets/alts/alts_integration_test.cc index 587fd3b8e490b..609d70b73b3b4 100644 --- a/test/extensions/transport_sockets/alts/alts_integration_test.cc +++ b/test/extensions/transport_sockets/alts/alts_integration_test.cc @@ -5,12 +5,24 @@ #include "extensions/transport_sockets/alts/config.h" +#ifdef major +#undef major +#endif +#ifdef minor +#undef minor +#endif + #include "test/core/tsi/alts/fake_handshaker/fake_handshaker_server.h" +#include "test/core/tsi/alts/fake_handshaker/handshaker.grpc.pb.h" +#include "test/core/tsi/alts/fake_handshaker/handshaker.pb.h" +#include "test/core/tsi/alts/fake_handshaker/transport_security_common.pb.h" + #include "test/integration/http_integration.h" #include "test/integration/integration.h" #include "test/integration/server.h" #include "test/integration/utility.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/transport_socket_factory_context.h" + #include "test/test_common/network_utility.h" #include "test/test_common/utility.h" @@ -29,16 +41,55 @@ namespace TransportSockets { namespace Alts { namespace { +// Fake handshaker message, copied from grpc::gcp::FakeHandshakerService implementation. +constexpr char kClientInitFrame[] = "ClientInit"; + +// Hollowed out implementation of HandshakerService that is dysfunctional, but +// responds correctly to the first client request, capturing client and server +// ALTS versions in the process. +class CapturingHandshakerService : public grpc::gcp::HandshakerService::Service { +public: + CapturingHandshakerService() = default; + + grpc::Status + DoHandshake(grpc::ServerContext*, + grpc::ServerReaderWriter* stream) + override { + grpc::gcp::HandshakerReq request; + grpc::gcp::HandshakerResp response; + while (stream->Read(&request)) { + if (request.has_client_start()) { + client_versions = request.client_start().rpc_versions(); + // Sets response to make first request successful. + response.set_out_frames(kClientInitFrame); + response.set_bytes_consumed(0); + response.mutable_status()->set_code(grpc::StatusCode::OK); + } else if (request.has_server_start()) { + server_versions = request.server_start().rpc_versions(); + response.mutable_status()->set_code(grpc::StatusCode::CANCELLED); + } + stream->Write(response); + request.Clear(); + } + return grpc::Status::OK; + } + + // Storing client and server RPC versions for later verification. + grpc::gcp::RpcProtocolVersions client_versions; + grpc::gcp::RpcProtocolVersions server_versions; +}; + class AltsIntegrationTestBase : public testing::TestWithParam, public HttpIntegrationTest { public: AltsIntegrationTestBase(const std::string& server_peer_identity, const std::string& client_peer_identity, bool server_connect_handshaker, - bool client_connect_handshaker) + bool client_connect_handshaker, bool capturing_handshaker = false) : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()), server_peer_identity_(server_peer_identity), client_peer_identity_(client_peer_identity), server_connect_handshaker_(server_connect_handshaker), - client_connect_handshaker_(client_connect_handshaker) {} + client_connect_handshaker_(client_connect_handshaker), + capturing_handshaker_(capturing_handshaker) {} void initialize() override { config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { @@ -60,7 +111,14 @@ class AltsIntegrationTestBase : public testing::TestWithParamthreadFactory().createThread([this]() { - std::unique_ptr service = grpc::gcp::CreateFakeHandshakerService(); + std::unique_ptr service; + if (capturing_handshaker_) { + capturing_handshaker_service_ = new CapturingHandshakerService(); + service = std::unique_ptr{capturing_handshaker_service_}; + } else { + capturing_handshaker_service_ = nullptr; + service = grpc::gcp::CreateFakeHandshakerService(); + } std::string server_address = Network::Test::getLoopbackAddressUrlString(version_) + ":0"; grpc::ServerBuilder builder; @@ -143,6 +201,8 @@ class AltsIntegrationTestBase : public testing::TestWithParamconnected()); } @@ -253,8 +313,40 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, AltsIntegrationTestClientWrongHandshaker, // and connection closes. TEST_P(AltsIntegrationTestClientWrongHandshaker, ConnectToWrongHandshakerAddress) { initialize(); - codec_client_ = makeRawHttpConnection(makeAltsConnection()); + codec_client_ = makeRawHttpConnection(makeAltsConnection(), absl::nullopt); + EXPECT_FALSE(codec_client_->connected()); +} + +class AltsIntegrationTestCapturingHandshaker : public AltsIntegrationTestBase { +public: + AltsIntegrationTestCapturingHandshaker() + : AltsIntegrationTestBase("", "", + /* server_connect_handshaker */ true, + /* client_connect_handshaker */ true, + /* capturing_handshaker */ true) {} +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, AltsIntegrationTestCapturingHandshaker, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +// Verifies that handshake request should include ALTS version. +TEST_P(AltsIntegrationTestCapturingHandshaker, CheckAltsVersion) { + initialize(); + codec_client_ = makeRawHttpConnection(makeAltsConnection(), absl::nullopt); EXPECT_FALSE(codec_client_->connected()); + EXPECT_EQ(capturing_handshaker_service_->client_versions.max_rpc_version().major(), + capturing_handshaker_service_->server_versions.max_rpc_version().major()); + EXPECT_EQ(capturing_handshaker_service_->client_versions.max_rpc_version().minor(), + capturing_handshaker_service_->server_versions.max_rpc_version().minor()); + EXPECT_EQ(capturing_handshaker_service_->client_versions.min_rpc_version().major(), + capturing_handshaker_service_->server_versions.min_rpc_version().major()); + EXPECT_EQ(capturing_handshaker_service_->client_versions.min_rpc_version().minor(), + capturing_handshaker_service_->server_versions.min_rpc_version().minor()); + EXPECT_NE(0, capturing_handshaker_service_->client_versions.max_rpc_version().major()); + EXPECT_NE(0, capturing_handshaker_service_->client_versions.max_rpc_version().minor()); + EXPECT_NE(0, capturing_handshaker_service_->client_versions.min_rpc_version().major()); + EXPECT_NE(0, capturing_handshaker_service_->client_versions.min_rpc_version().minor()); } } // namespace diff --git a/test/extensions/transport_sockets/alts/config_test.cc b/test/extensions/transport_sockets/alts/config_test.cc index 6e1ae044c4698..a3c233ad78224 100644 --- a/test/extensions/transport_sockets/alts/config_test.cc +++ b/test/extensions/transport_sockets/alts/config_test.cc @@ -3,7 +3,7 @@ #include "extensions/transport_sockets/alts/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/transport_socket_factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/transport_sockets/common/BUILD b/test/extensions/transport_sockets/common/BUILD new file mode 100644 index 0000000000000..f30b8bf8bb2f0 --- /dev/null +++ b/test/extensions/transport_sockets/common/BUILD @@ -0,0 +1,20 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_test( + name = "passthrough_test", + srcs = ["passthrough_test.cc"], + deps = [ + "//source/extensions/transport_sockets/common:passthrough_lib", + "//test/mocks/buffer:buffer_mocks", + "//test/mocks/network:network_mocks", + "//test/mocks/network:transport_socket_mocks", + ], +) diff --git a/test/extensions/transport_sockets/common/passthrough_test.cc b/test/extensions/transport_sockets/common/passthrough_test.cc new file mode 100644 index 0000000000000..067caab6611ea --- /dev/null +++ b/test/extensions/transport_sockets/common/passthrough_test.cc @@ -0,0 +1,90 @@ +#include "extensions/transport_sockets/common/passthrough.h" + +#include "test/mocks/buffer/mocks.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/network/transport_socket.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::NiceMock; + +namespace Envoy { +namespace Extensions { +namespace TransportSockets { +namespace { + +class PassthroughTest : public testing::Test { +protected: + void SetUp() override { + auto inner_socket = std::make_unique>(); + inner_socket_ = inner_socket.get(); + passthrough_socket_ = std::make_unique(std::move(inner_socket)); + } + + NiceMock* inner_socket_; + std::unique_ptr passthrough_socket_; +}; + +// Test setTransportSocketCallbacks method defers to inner socket +TEST_F(PassthroughTest, SetTransportSocketCallbacksDefersToInnerSocket) { + auto transport_callbacks = std::make_unique>(); + EXPECT_CALL(*inner_socket_, setTransportSocketCallbacks(Ref(*transport_callbacks))).Times(1); + passthrough_socket_->setTransportSocketCallbacks(*transport_callbacks); +} + +// Test protocol method defers to inner socket +TEST_F(PassthroughTest, ProtocolDefersToInnerSocket) { + EXPECT_CALL(*inner_socket_, protocol()).Times(1); + passthrough_socket_->protocol(); +} + +// Test failureReason method defers to inner socket +TEST_F(PassthroughTest, FailureReasonDefersToInnerSocket) { + EXPECT_CALL(*inner_socket_, failureReason()).Times(1); + passthrough_socket_->failureReason(); +} + +// Test canFlushClose method defers to inner socket +TEST_F(PassthroughTest, CanFlushCloseDefersToInnerSocket) { + EXPECT_CALL(*inner_socket_, canFlushClose()).Times(1); + passthrough_socket_->canFlushClose(); +} + +// Test closeSocket method defers to inner socket +TEST_F(PassthroughTest, CloseSocketDefersToInnerSocket) { + EXPECT_CALL(*inner_socket_, closeSocket(testing::Eq(Network::ConnectionEvent::LocalClose))) + .Times(1); + passthrough_socket_->closeSocket(Network::ConnectionEvent::LocalClose); +} + +// Test doRead method defers to inner socket +TEST_F(PassthroughTest, DoReadDefersToInnerSocket) { + auto buff = Buffer::OwnedImpl("data"); + EXPECT_CALL(*inner_socket_, doRead(BufferEqual(&buff))).Times(1); + passthrough_socket_->doRead(buff); +} + +// Test doWrite method defers to inner socket +TEST_F(PassthroughTest, DoWriteDefersToInnerSocket) { + auto buff = Buffer::OwnedImpl("data"); + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&buff), false)).Times(1); + passthrough_socket_->doWrite(buff, false); +} + +// Test onConnected method defers to inner socket +TEST_F(PassthroughTest, OnConnectedDefersToInnerSocket) { + EXPECT_CALL(*inner_socket_, onConnected()).Times(1); + passthrough_socket_->onConnected(); +} + +// Test ssl method defers to inner socket +TEST_F(PassthroughTest, SslDefersToInnerSocket) { + EXPECT_CALL(*inner_socket_, ssl()).Times(1); + passthrough_socket_->ssl(); +} + +} // namespace +} // namespace TransportSockets +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/test/extensions/transport_sockets/proxy_protocol/BUILD b/test/extensions/transport_sockets/proxy_protocol/BUILD new file mode 100644 index 0000000000000..dbbdb719f507d --- /dev/null +++ b/test/extensions/transport_sockets/proxy_protocol/BUILD @@ -0,0 +1,27 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_extension_cc_test( + name = "proxy_protocol_test", + srcs = ["proxy_protocol_test.cc"], + extension_name = "envoy.transport_sockets.upstream_proxy_protocol", + deps = [ + "//include/envoy/network:proxy_protocol_options_lib", + "//source/extensions/common/proxy_protocol:proxy_protocol_header_lib", + "//source/extensions/transport_sockets/proxy_protocol:upstream_proxy_protocol", + "//test/mocks/buffer:buffer_mocks", + "//test/mocks/network:io_handle_mocks", + "//test/mocks/network:network_mocks", + "//test/mocks/network:transport_socket_mocks", + ], +) diff --git a/test/extensions/transport_sockets/proxy_protocol/proxy_protocol_test.cc b/test/extensions/transport_sockets/proxy_protocol/proxy_protocol_test.cc new file mode 100644 index 0000000000000..2823d218c992d --- /dev/null +++ b/test/extensions/transport_sockets/proxy_protocol/proxy_protocol_test.cc @@ -0,0 +1,398 @@ +#include "envoy/network/proxy_protocol.h" + +#include "common/buffer/buffer_impl.h" +#include "common/network/address_impl.h" +#include "common/network/transport_socket_options_impl.h" + +#include "extensions/common/proxy_protocol/proxy_protocol_header.h" +#include "extensions/transport_sockets/proxy_protocol/proxy_protocol.h" + +#include "test/mocks/buffer/mocks.h" +#include "test/mocks/network/io_handle.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/network/transport_socket.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::InSequence; +using testing::NiceMock; +using testing::Return; +using testing::ReturnRef; + +using envoy::config::core::v3::ProxyProtocolConfig_Version; + +namespace Envoy { +namespace Extensions { +namespace TransportSockets { +namespace ProxyProtocol { +namespace { + +constexpr uint64_t MaxSlices = 16; + +class ProxyProtocolTest : public testing::Test { +public: + void initialize(ProxyProtocolConfig_Version version, + Network::TransportSocketOptionsSharedPtr socket_options) { + auto inner_socket = std::make_unique>(); + inner_socket_ = inner_socket.get(); + ON_CALL(transport_callbacks_, ioHandle()).WillByDefault(ReturnRef(io_handle_)); + proxy_protocol_socket_ = std::make_unique(std::move(inner_socket), + socket_options, version); + proxy_protocol_socket_->setTransportSocketCallbacks(transport_callbacks_); + } + + NiceMock* inner_socket_; + NiceMock io_handle_; + std::unique_ptr proxy_protocol_socket_; + NiceMock transport_callbacks_; +}; + +// Test injects PROXY protocol header only once +TEST_F(ProxyProtocolTest, InjectesHeaderOnlyOnce) { + transport_callbacks_.connection_.local_address_ = + Network::Utility::resolveUrl("tcp://174.2.2.222:50000"); + transport_callbacks_.connection_.remote_address_ = + Network::Utility::resolveUrl("tcp://172.0.0.1:80"); + Buffer::OwnedImpl expected_buff{}; + Common::ProxyProtocol::generateV1Header("174.2.2.222", "172.0.0.1", 50000, 80, + Network::Address::IpVersion::v4, expected_buff); + auto expected_slices = expected_buff.getRawSlices(MaxSlices); + initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, nullptr); + + EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) + .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( + expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + auto msg = Buffer::OwnedImpl("some data"); + auto msg2 = Buffer::OwnedImpl("more data"); + { + InSequence s; + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg2), false)).Times(1); + } + + proxy_protocol_socket_->doWrite(msg, false); + proxy_protocol_socket_->doWrite(msg2, false); +} + +// Test returned bytes processed includes the PROXY protocol header +TEST_F(ProxyProtocolTest, BytesProcessedIncludesProxyProtocolHeader) { + transport_callbacks_.connection_.local_address_ = + Network::Utility::resolveUrl("tcp://174.2.2.222:50000"); + transport_callbacks_.connection_.remote_address_ = + Network::Utility::resolveUrl("tcp://172.0.0.1:80"); + Buffer::OwnedImpl expected_buff{}; + Common::ProxyProtocol::generateV1Header("174.2.2.222", "172.0.0.1", 50000, 80, + Network::Address::IpVersion::v4, expected_buff); + auto expected_slices = expected_buff.getRawSlices(MaxSlices); + initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, nullptr); + + EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) + .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( + expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + auto msg = Buffer::OwnedImpl("some data"); + auto msg2 = Buffer::OwnedImpl("more data"); + { + InSequence s; + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)) + .WillOnce(Return(Network::IoResult{Network::PostIoAction::KeepOpen, msg.length(), false})); + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg2), false)) + .WillOnce(Return(Network::IoResult{Network::PostIoAction::KeepOpen, msg2.length(), false})); + } + + auto resp = proxy_protocol_socket_->doWrite(msg, false); + EXPECT_EQ(expected_buff.length() + msg.length(), resp.bytes_processed_); + auto resp2 = proxy_protocol_socket_->doWrite(msg2, false); + EXPECT_EQ(msg2.length(), resp2.bytes_processed_); +} + +// Test returns KeepOpen action when write error is Again +TEST_F(ProxyProtocolTest, ReturnsKeepOpenWhenWriteErrorIsAgain) { + transport_callbacks_.connection_.local_address_ = + Network::Utility::resolveUrl("tcp://174.2.2.222:50000"); + transport_callbacks_.connection_.remote_address_ = + Network::Utility::resolveUrl("tcp://172.0.0.1:80"); + Buffer::OwnedImpl expected_buff{}; + Common::ProxyProtocol::generateV1Header("174.2.2.222", "172.0.0.1", 50000, 80, + Network::Address::IpVersion::v4, expected_buff); + auto expected_slices = expected_buff.getRawSlices(MaxSlices); + initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, nullptr); + + auto msg = Buffer::OwnedImpl("some data"); + { + InSequence s; + EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) + .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( + 0, Api::IoErrorPtr(Network::IoSocketError::getIoSocketEagainInstance(), + Network::IoSocketError::deleteIoError))))); + EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) + .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( + expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)) + .WillOnce(Return(Network::IoResult{Network::PostIoAction::KeepOpen, msg.length(), false})); + } + + auto resp = proxy_protocol_socket_->doWrite(msg, false); + EXPECT_EQ(Network::PostIoAction::KeepOpen, resp.action_); + auto resp2 = proxy_protocol_socket_->doWrite(msg, false); + EXPECT_EQ(Network::PostIoAction::KeepOpen, resp2.action_); +} + +// Test returns Close action when write error is not Again +TEST_F(ProxyProtocolTest, ReturnsCloseWhenWriteErrorIsNotAgain) { + transport_callbacks_.connection_.local_address_ = + Network::Utility::resolveUrl("tcp://174.2.2.222:50000"); + transport_callbacks_.connection_.remote_address_ = + Network::Utility::resolveUrl("tcp://172.0.0.1:80"); + Buffer::OwnedImpl expected_buff{}; + Common::ProxyProtocol::generateV1Header("174.2.2.222", "172.0.0.1", 50000, 80, + Network::Address::IpVersion::v4, expected_buff); + auto expected_slices = expected_buff.getRawSlices(MaxSlices); + initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, nullptr); + + auto msg = Buffer::OwnedImpl("some data"); + { + InSequence s; + EXPECT_CALL(io_handle_, writev(_, _)) + .WillOnce(Return(testing::ByMove( + Api::IoCallUint64Result(0, Api::IoErrorPtr(new Network::IoSocketError(EADDRNOTAVAIL), + [](Api::IoError* err) { delete err; }))))); + } + + auto resp = proxy_protocol_socket_->doWrite(msg, false); + EXPECT_EQ(Network::PostIoAction::Close, resp.action_); +} + +// Test injects V1 PROXY protocol using upstream addresses when transport options are null +TEST_F(ProxyProtocolTest, V1IPV4LocalAddressWhenTransportOptionsAreNull) { + transport_callbacks_.connection_.local_address_ = + Network::Utility::resolveUrl("tcp://174.2.2.222:50000"); + transport_callbacks_.connection_.remote_address_ = + Network::Utility::resolveUrl("tcp://172.0.0.1:80"); + Buffer::OwnedImpl expected_buff{}; + Common::ProxyProtocol::generateV1Header("174.2.2.222", "172.0.0.1", 50000, 80, + Network::Address::IpVersion::v4, expected_buff); + auto expected_slices = expected_buff.getRawSlices(MaxSlices); + initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, nullptr); + + EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) + .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( + expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + auto msg = Buffer::OwnedImpl("some data"); + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); + + proxy_protocol_socket_->doWrite(msg, false); +} + +// Test injects V1 PROXY protocol using upstream addresses when header options are null +TEST_F(ProxyProtocolTest, V1IPV4LocalAddressesWhenHeaderOptionsAreNull) { + transport_callbacks_.connection_.local_address_ = + Network::Utility::resolveUrl("tcp://174.2.2.222:50000"); + transport_callbacks_.connection_.remote_address_ = + Network::Utility::resolveUrl("tcp://172.0.0.1:80"); + Buffer::OwnedImpl expected_buff{}; + Common::ProxyProtocol::generateV1Header("174.2.2.222", "172.0.0.1", 50000, 80, + Network::Address::IpVersion::v4, expected_buff); + auto expected_slices = expected_buff.getRawSlices(MaxSlices); + initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, + std::make_shared()); + + EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), 1)) + .WillOnce(Return(testing::ByMove( + Api::IoCallUint64Result(43, Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + auto msg = Buffer::OwnedImpl("some data"); + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); + + proxy_protocol_socket_->doWrite(msg, false); +} + +// Test injects V1 PROXY protocol using upstream addresses when header options are null +TEST_F(ProxyProtocolTest, V1IPV6LocalAddressesWhenHeaderOptionsAreNull) { + transport_callbacks_.connection_.local_address_ = + Network::Utility::resolveUrl("tcp://[a:b:c:d::]:50000"); + transport_callbacks_.connection_.remote_address_ = + Network::Utility::resolveUrl("tcp://[e:b:c:f::]:8080"); + Buffer::OwnedImpl expected_buff{}; + Common::ProxyProtocol::generateV1Header("a:b:c:d::", "e:b:c:f::", 50000, 8080, + Network::Address::IpVersion::v6, expected_buff); + auto expected_slices = expected_buff.getRawSlices(MaxSlices); + initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, + std::make_shared()); + + EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) + .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( + expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + auto msg = Buffer::OwnedImpl("some data"); + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); + + proxy_protocol_socket_->doWrite(msg, false); +} + +// Test injects V1 PROXY protocol for downstream IPV4 addresses +TEST_F(ProxyProtocolTest, V1IPV4DownstreamAddresses) { + auto src_addr = Network::Address::InstanceConstSharedPtr( + new Network::Address::Ipv4Instance("202.168.0.13", 52000)); + auto dst_addr = Network::Address::InstanceConstSharedPtr( + new Network::Address::Ipv4Instance("174.2.2.222", 80)); + Network::TransportSocketOptionsSharedPtr socket_options = + std::make_shared( + "", std::vector{}, std::vector{}, absl::nullopt, + absl::optional( + Network::ProxyProtocolData{src_addr, dst_addr})); + transport_callbacks_.connection_.local_address_ = + Network::Utility::resolveUrl("tcp://174.2.2.222:50000"); + transport_callbacks_.connection_.remote_address_ = + Network::Utility::resolveUrl("tcp://172.0.0.1:8080"); + Buffer::OwnedImpl expected_buff{}; + Common::ProxyProtocol::generateV1Header("202.168.0.13", "174.2.2.222", 52000, 80, + Network::Address::IpVersion::v4, expected_buff); + auto expected_slices = expected_buff.getRawSlices(MaxSlices); + initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, socket_options); + + EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) + .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( + expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + auto msg = Buffer::OwnedImpl("some data"); + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); + + proxy_protocol_socket_->doWrite(msg, false); +} + +// Test injects V1 PROXY protocol for downstream IPV6 addresses +TEST_F(ProxyProtocolTest, V1IPV6DownstreamAddresses) { + auto src_addr = + Network::Address::InstanceConstSharedPtr(new Network::Address::Ipv6Instance("1::2:3", 52000)); + auto dst_addr = + Network::Address::InstanceConstSharedPtr(new Network::Address::Ipv6Instance("a:b:c:d::", 80)); + Network::TransportSocketOptionsSharedPtr socket_options = + std::make_shared( + "", std::vector{}, std::vector{}, absl::nullopt, + absl::optional( + Network::ProxyProtocolData{src_addr, dst_addr})); + transport_callbacks_.connection_.local_address_ = + Network::Utility::resolveUrl("tcp://[a:b:c:d::]:50000"); + transport_callbacks_.connection_.remote_address_ = + Network::Utility::resolveUrl("tcp://[e:b:c:f::]:8080"); + Buffer::OwnedImpl expected_buff{}; + Common::ProxyProtocol::generateV1Header("1::2:3", "a:b:c:d::", 52000, 80, + Network::Address::IpVersion::v6, expected_buff); + auto expected_slices = expected_buff.getRawSlices(MaxSlices); + initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, socket_options); + + EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) + .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( + expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + auto msg = Buffer::OwnedImpl("some data"); + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); + + proxy_protocol_socket_->doWrite(msg, false); +} + +// Test injects V2 PROXY protocol using upstream addresses when transport options are null +TEST_F(ProxyProtocolTest, V2IPV4LocalCommandWhenTransportOptionsAreNull) { + transport_callbacks_.connection_.local_address_ = + Network::Utility::resolveUrl("tcp://1.2.3.4:773"); + transport_callbacks_.connection_.remote_address_ = + Network::Utility::resolveUrl("tcp://0.1.1.2:513"); + Buffer::OwnedImpl expected_buff{}; + Common::ProxyProtocol::generateV2LocalHeader(expected_buff); + auto expected_slices = expected_buff.getRawSlices(MaxSlices); + initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V2, nullptr); + + EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) + .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( + expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + auto msg = Buffer::OwnedImpl("some data"); + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); + + proxy_protocol_socket_->doWrite(msg, false); +} + +// Test injects V2 PROXY protocol using upstream addresses when header options are null +TEST_F(ProxyProtocolTest, V2IPV4LocalCommandWhenHeaderOptionsAreNull) { + transport_callbacks_.connection_.local_address_ = + Network::Utility::resolveUrl("tcp://1.2.3.4:773"); + transport_callbacks_.connection_.remote_address_ = + Network::Utility::resolveUrl("tcp://0.1.1.2:513"); + Buffer::OwnedImpl expected_buff{}; + Common::ProxyProtocol::generateV2LocalHeader(expected_buff); + auto expected_slices = expected_buff.getRawSlices(MaxSlices); + initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V2, + std::make_shared()); + + EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) + .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( + expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + auto msg = Buffer::OwnedImpl("some data"); + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); + + proxy_protocol_socket_->doWrite(msg, false); +} + +// Test injects V2 PROXY protocol for downstream IPV4 addresses +TEST_F(ProxyProtocolTest, V2IPV4DownstreamAddresses) { + auto src_addr = + Network::Address::InstanceConstSharedPtr(new Network::Address::Ipv4Instance("1.2.3.4", 773)); + auto dst_addr = + Network::Address::InstanceConstSharedPtr(new Network::Address::Ipv4Instance("0.1.1.2", 513)); + Network::TransportSocketOptionsSharedPtr socket_options = + std::make_shared( + "", std::vector{}, std::vector{}, absl::nullopt, + absl::optional( + Network::ProxyProtocolData{src_addr, dst_addr})); + transport_callbacks_.connection_.local_address_ = + Network::Utility::resolveUrl("tcp://0.1.1.2:50000"); + transport_callbacks_.connection_.remote_address_ = + Network::Utility::resolveUrl("tcp://3.3.3.3:80"); + Buffer::OwnedImpl expected_buff{}; + Common::ProxyProtocol::generateV2Header("1.2.3.4", "0.1.1.2", 773, 513, + Network::Address::IpVersion::v4, expected_buff); + auto expected_slices = expected_buff.getRawSlices(MaxSlices); + initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V2, socket_options); + + EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) + .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( + expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + auto msg = Buffer::OwnedImpl("some data"); + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); + + proxy_protocol_socket_->doWrite(msg, false); +} + +// Test injects V2 PROXY protocol for downstream IPV6 addresses +TEST_F(ProxyProtocolTest, V2IPV6DownstreamAddresses) { + auto src_addr = + Network::Address::InstanceConstSharedPtr(new Network::Address::Ipv6Instance("1:2:3::4", 8)); + auto dst_addr = Network::Address::InstanceConstSharedPtr( + new Network::Address::Ipv6Instance("1:100:200:3::", 2)); + Network::TransportSocketOptionsSharedPtr socket_options = + std::make_shared( + "", std::vector{}, std::vector{}, absl::nullopt, + absl::optional( + Network::ProxyProtocolData{src_addr, dst_addr})); + transport_callbacks_.connection_.local_address_ = + Network::Utility::resolveUrl("tcp://[1:100:200:3::]:50000"); + transport_callbacks_.connection_.remote_address_ = + Network::Utility::resolveUrl("tcp://[e:b:c:f::]:8080"); + Buffer::OwnedImpl expected_buff{}; + Common::ProxyProtocol::generateV2Header("1:2:3::4", "1:100:200:3::", 8, 2, + Network::Address::IpVersion::v6, expected_buff); + auto expected_slices = expected_buff.getRawSlices(MaxSlices); + initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V2, socket_options); + + EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) + .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( + expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + auto msg = Buffer::OwnedImpl("some data"); + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); + + proxy_protocol_socket_->doWrite(msg, false); +} + +} // namespace +} // namespace ProxyProtocol +} // namespace TransportSockets +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/test/extensions/transport_sockets/tap/BUILD b/test/extensions/transport_sockets/tap/BUILD index 1aaba1cdb5d57..be4dd9ba9c952 100644 --- a/test/extensions/transport_sockets/tap/BUILD +++ b/test/extensions/transport_sockets/tap/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/transport_sockets/tap/tap_config_impl_test.cc b/test/extensions/transport_sockets/tap/tap_config_impl_test.cc index ddf53b8258a56..c8bcefbf623de 100644 --- a/test/extensions/transport_sockets/tap/tap_config_impl_test.cc +++ b/test/extensions/transport_sockets/tap/tap_config_impl_test.cc @@ -7,6 +7,7 @@ #include "test/test_common/simulated_time_system.h" using testing::_; +using testing::ByMove; using testing::InSequence; using testing::Invoke; using testing::Return; @@ -52,7 +53,7 @@ class PerSocketTapperImplTest : public testing::Test { ON_CALL(connection_, id()).WillByDefault(Return(1)); EXPECT_CALL(*config_, createPerTapSinkHandleManager_(1)).WillOnce(Return(sink_manager_)); EXPECT_CALL(*config_, createMatchStatusVector()) - .WillOnce(Return(TapCommon::Matcher::MatchStatusVector(1))); + .WillOnce(Return(ByMove(TapCommon::Matcher::MatchStatusVector(1)))); EXPECT_CALL(*config_, rootMatcher()).WillRepeatedly(ReturnRef(matcher_)); EXPECT_CALL(matcher_, onNewStream(_)) .WillOnce(Invoke([this](TapCommon::Matcher::MatchStatusVector& statuses) { diff --git a/test/extensions/transport_sockets/tls/BUILD b/test/extensions/transport_sockets/tls/BUILD index bc1b9b9cbf0f7..4a7a2cd9481ec 100644 --- a/test/extensions/transport_sockets/tls/BUILD +++ b/test/extensions/transport_sockets/tls/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( @@ -46,6 +46,8 @@ envoy_cc_test( "//source/extensions/transport_sockets/tls/private_key:private_key_manager_lib", "//test/extensions/transport_sockets/tls/test_data:cert_infos", "//test/mocks/buffer:buffer_mocks", + "//test/mocks/init:init_mocks", + "//test/mocks/local_info:local_info_mocks", "//test/mocks/network:network_mocks", "//test/mocks/runtime:runtime_mocks", "//test/mocks/server:server_mocks", @@ -79,12 +81,15 @@ envoy_cc_test( "//source/extensions/transport_sockets/tls:context_config_lib", "//source/extensions/transport_sockets/tls:context_lib", "//test/extensions/transport_sockets/tls/test_data:cert_infos", + "//test/mocks/init:init_mocks", + "//test/mocks/local_info:local_info_mocks", "//test/mocks/runtime:runtime_mocks", "//test/mocks/secret:secret_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:transport_socket_factory_context_mocks", "//test/mocks/ssl:ssl_mocks", "//test/test_common:environment_lib", "//test/test_common:simulated_time_system_lib", + "//test/test_common:test_runtime_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto", "@envoy_api//envoy/type/matcher/v3:pkg_cc_proto", diff --git a/test/extensions/transport_sockets/tls/context_impl_test.cc b/test/extensions/transport_sockets/tls/context_impl_test.cc index 2b6c67057c28f..60cec6e1fe172 100644 --- a/test/extensions/transport_sockets/tls/context_impl_test.cc +++ b/test/extensions/transport_sockets/tls/context_impl_test.cc @@ -3,7 +3,7 @@ #include "envoy/admin/v3/certs.pb.h" #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" -#include "envoy/extensions/transport_sockets/tls/v3/cert.pb.validate.h" +#include "envoy/extensions/transport_sockets/tls/v3/tls.pb.validate.h" #include "envoy/type/matcher/v3/string.pb.h" #include "common/json/json_loader.h" @@ -19,11 +19,14 @@ #include "test/extensions/transport_sockets/tls/test_data/no_san_cert_info.h" #include "test/extensions/transport_sockets/tls/test_data/san_dns3_cert_info.h" #include "test/extensions/transport_sockets/tls/test_data/san_ip_cert_info.h" +#include "test/mocks/init/mocks.h" +#include "test/mocks/local_info/mocks.h" #include "test/mocks/secret/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/transport_socket_factory_context.h" #include "test/mocks/ssl/mocks.h" #include "test/test_common/environment.h" #include "test/test_common/simulated_time_system.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" @@ -39,6 +42,57 @@ namespace Extensions { namespace TransportSockets { namespace Tls { +namespace { +const std::vector& knownCipherSuites() { + CONSTRUCT_ON_FIRST_USE(std::vector, {"ECDHE-ECDSA-AES128-GCM-SHA256", + "ECDHE-RSA-AES128-GCM-SHA256", + "ECDHE-ECDSA-AES256-GCM-SHA384", + "ECDHE-RSA-AES256-GCM-SHA384", + "ECDHE-ECDSA-CHACHA20-POLY1305", + "ECDHE-RSA-CHACHA20-POLY1305", + "ECDHE-PSK-CHACHA20-POLY1305", + "ECDHE-ECDSA-AES128-SHA", + "ECDHE-RSA-AES128-SHA", + "ECDHE-PSK-AES128-CBC-SHA", + "ECDHE-ECDSA-AES256-SHA", + "ECDHE-RSA-AES256-SHA", + "ECDHE-PSK-AES256-CBC-SHA", + "AES128-GCM-SHA256", + "AES256-GCM-SHA384", + "AES128-SHA", + "PSK-AES128-CBC-SHA", + "AES256-SHA", + "PSK-AES256-CBC-SHA", + "DES-CBC3-SHA"}); +} +} // namespace + +class SslLibraryCipherSuiteSupport : public ::testing::TestWithParam {}; + +INSTANTIATE_TEST_SUITE_P(CipherSuites, SslLibraryCipherSuiteSupport, + ::testing::ValuesIn(knownCipherSuites())); + +// Tests for whether new cipher suites are added. When they are, they must be added to +// knownCipherSuites() so that this test can detect if they are removed in the future. +TEST_F(SslLibraryCipherSuiteSupport, CipherSuitesNotAdded) { + bssl::UniquePtr ctx(SSL_CTX_new(TLS_method())); + EXPECT_NE(0, SSL_CTX_set_strict_cipher_list(ctx.get(), "ALL")); + + std::vector present_cipher_suites; + for (const SSL_CIPHER* cipher : SSL_CTX_get_ciphers(ctx.get())) { + present_cipher_suites.push_back(SSL_CIPHER_get_name(cipher)); + } + EXPECT_THAT(present_cipher_suites, testing::IsSubsetOf(knownCipherSuites())); +} + +// Test that no previously supported cipher suites were removed from the SSL library. If a cipher +// suite is removed, it must be added to the release notes as an incompatible change, because it can +// cause previously loadable configurations to no longer load if they reference the cipher suite. +TEST_P(SslLibraryCipherSuiteSupport, CipherSuitesNotRemoved) { + bssl::UniquePtr ctx(SSL_CTX_new(TLS_method())); + EXPECT_NE(0, SSL_CTX_set_strict_cipher_list(ctx.get(), GetParam().c_str())); +} + class SslContextImplTest : public SslCertsTest { protected: Event::SimulatedTimeSystem time_system_; @@ -48,6 +102,23 @@ class SslContextImplTest : public SslCertsTest { TEST_F(SslContextImplTest, TestDnsNameMatching) { EXPECT_TRUE(ContextImpl::dnsNameMatch("lyft.com", "lyft.com")); EXPECT_TRUE(ContextImpl::dnsNameMatch("a.lyft.com", "*.lyft.com")); + EXPECT_FALSE(ContextImpl::dnsNameMatch("a.b.lyft.com", "*.lyft.com")); + EXPECT_FALSE(ContextImpl::dnsNameMatch("foo.test.com", "*.lyft.com")); + EXPECT_FALSE(ContextImpl::dnsNameMatch("lyft.com", "*.lyft.com")); + EXPECT_FALSE(ContextImpl::dnsNameMatch("alyft.com", "*.lyft.com")); + EXPECT_FALSE(ContextImpl::dnsNameMatch("alyft.com", "*lyft.com")); + EXPECT_FALSE(ContextImpl::dnsNameMatch("lyft.com", "*lyft.com")); + EXPECT_FALSE(ContextImpl::dnsNameMatch("", "*lyft.com")); + EXPECT_FALSE(ContextImpl::dnsNameMatch("lyft.com", "")); +} + +TEST_F(SslContextImplTest, TestDnsNameMatchingLegacy) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.fix_wildcard_matching", "false"}}); + EXPECT_TRUE(ContextImpl::dnsNameMatch("lyft.com", "lyft.com")); + EXPECT_TRUE(ContextImpl::dnsNameMatch("a.lyft.com", "*.lyft.com")); + // Legacy behavior EXPECT_TRUE(ContextImpl::dnsNameMatch("a.b.lyft.com", "*.lyft.com")); EXPECT_FALSE(ContextImpl::dnsNameMatch("foo.test.com", "*.lyft.com")); EXPECT_FALSE(ContextImpl::dnsNameMatch("lyft.com", "*.lyft.com")); @@ -87,6 +158,32 @@ TEST_F(SslContextImplTest, TestMatchSubjectAltNameWildcardDNSMatched) { EXPECT_TRUE(ContextImpl::matchSubjectAltName(cert.get(), subject_alt_name_matchers)); } +TEST_F(SslContextImplTest, TestMultiLevelMatch) { + // san_multiple_dns_cert matches *.example.com + bssl::UniquePtr cert = readCertFromFile(TestEnvironment::substitute( + "{{ test_rundir " + "}}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem")); + envoy::type::matcher::v3::StringMatcher matcher; + matcher.set_exact("foo.api.example.com"); + std::vector subject_alt_name_matchers; + subject_alt_name_matchers.push_back(Matchers::StringMatcherImpl(matcher)); + EXPECT_FALSE(ContextImpl::matchSubjectAltName(cert.get(), subject_alt_name_matchers)); +} + +TEST_F(SslContextImplTest, TestMultiLevelMatchLegacy) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.fix_wildcard_matching", "false"}}); + bssl::UniquePtr cert = readCertFromFile(TestEnvironment::substitute( + "{{ test_rundir " + "}}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem")); + envoy::type::matcher::v3::StringMatcher matcher; + matcher.set_exact("foo.api.example.com"); + std::vector subject_alt_name_matchers; + subject_alt_name_matchers.push_back(Matchers::StringMatcherImpl(matcher)); + EXPECT_TRUE(ContextImpl::matchSubjectAltName(cert.get(), subject_alt_name_matchers)); +} + TEST_F(SslContextImplTest, TestVerifySubjectAltNameURIMatched) { bssl::UniquePtr cert = readCertFromFile(TestEnvironment::substitute( "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem")); @@ -95,6 +192,25 @@ TEST_F(SslContextImplTest, TestVerifySubjectAltNameURIMatched) { EXPECT_TRUE(ContextImpl::verifySubjectAltName(cert.get(), verify_subject_alt_name_list)); } +TEST_F(SslContextImplTest, TestVerifySubjectAltMultiDomain) { + bssl::UniquePtr cert = readCertFromFile(TestEnvironment::substitute( + "{{ test_rundir " + "}}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem")); + std::vector verify_subject_alt_name_list = {"https://a.www.example.com"}; + EXPECT_FALSE(ContextImpl::verifySubjectAltName(cert.get(), verify_subject_alt_name_list)); +} + +TEST_F(SslContextImplTest, TestVerifySubjectAltMultiDomainLegacy) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.fix_wildcard_matching", "false"}}); + bssl::UniquePtr cert = readCertFromFile(TestEnvironment::substitute( + "{{ test_rundir " + "}}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem")); + std::vector verify_subject_alt_name_list = {"https://a.www.example.com"}; + EXPECT_TRUE(ContextImpl::verifySubjectAltName(cert.get(), verify_subject_alt_name_list)); +} + TEST_F(SslContextImplTest, TestMatchSubjectAltNameURIMatched) { bssl::UniquePtr cert = readCertFromFile(TestEnvironment::substitute( "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem")); @@ -126,16 +242,17 @@ TEST_F(SslContextImplTest, TestCipherSuites) { const std::string yaml = R"EOF( common_tls_context: tls_params: - cipher_suites: "-ALL:+[AES128-SHA|BOGUS1]:BOGUS2:AES256-SHA" + cipher_suites: "-ALL:+[AES128-SHA|BOGUS1-SHA256]:BOGUS2-SHA:AES256-SHA" )EOF"; envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context; TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), tls_context); ClientContextConfigImpl cfg(tls_context, factory_context_); - EXPECT_THROW_WITH_MESSAGE(manager_.createSslClientContext(store_, cfg), EnvoyException, - "Failed to initialize cipher suites " - "-ALL:+[AES128-SHA|BOGUS1]:BOGUS2:AES256-SHA. The following " - "ciphers were rejected when tried individually: BOGUS1, BOGUS2"); + EXPECT_THROW_WITH_MESSAGE( + manager_.createSslClientContext(store_, cfg), EnvoyException, + "Failed to initialize cipher suites " + "-ALL:+[AES128-SHA|BOGUS1-SHA256]:BOGUS2-SHA:AES256-SHA. The following " + "ciphers were rejected when tried individually: BOGUS1-SHA256, BOGUS2-SHA"); } TEST_F(SslContextImplTest, TestExpiringCert) { @@ -474,9 +591,10 @@ class SslServerContextImplTicketTest : public SslContextImplTest { loadConfig(server_context_config); } - void loadConfigYaml(const std::string& yaml) { + void loadConfigYaml(const std::string& yaml, bool avoid_boosting = true) { envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; - TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), tls_context); + TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), tls_context, false, + avoid_boosting); ServerContextConfigImpl cfg(tls_context, factory_context_); loadConfig(cfg); } @@ -583,7 +701,7 @@ TEST_F(SslServerContextImplTicketTest, TicketKeySdsNotReady) { NiceMock local_info; NiceMock dispatcher; - NiceMock random; + NiceMock random; Stats::IsolatedStoreImpl stats; NiceMock cluster_manager; NiceMock init_manager; @@ -592,7 +710,7 @@ TEST_F(SslServerContextImplTicketTest, TicketKeySdsNotReady) { // EXPECT_CALL(factory_context_, random()).WillOnce(ReturnRef(random)); EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats)); EXPECT_CALL(factory_context_, clusterManager()).WillOnce(ReturnRef(cluster_manager)); - EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(ReturnRef(init_manager)); auto* sds_secret_configs = tls_context.mutable_session_ticket_keys_sds_secret_config(); sds_secret_configs->set_name("abc.com"); sds_secret_configs->mutable_sds_config(); @@ -691,14 +809,15 @@ TEST_F(SslServerContextImplTicketTest, CRLWithNoCA) { TEST_F(SslServerContextImplTicketTest, VerifySanWithNoCA) { const std::string yaml = R"EOF( - common_tls_context: - tls_certificates: - certificate_chain: - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem" - private_key: - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem" - validation_context: - verify_subject_alt_name: "spiffe://lyft.com/testclient" + common_tls_context: + tls_certificates: + certificate_chain: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem" + private_key: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem" + validation_context: + match_subject_alt_names: + exact : "spiffe://lyft.com/testclient" )EOF"; EXPECT_THROW_WITH_MESSAGE(loadConfigYaml(yaml), EnvoyException, "SAN-based verification of peer certificates without trusted CA " @@ -1000,7 +1119,7 @@ TEST_F(ClientContextConfigImplTest, SecretNotReady) { NiceMock dispatcher; EXPECT_CALL(factory_context_, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats)); - EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(factory_context_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); auto sds_secret_configs = tls_context.mutable_common_tls_context()->mutable_tls_certificate_sds_secret_configs()->Add(); @@ -1032,7 +1151,7 @@ TEST_F(ClientContextConfigImplTest, ValidationContextNotReady) { NiceMock dispatcher; EXPECT_CALL(factory_context_, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats)); - EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(factory_context_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); auto sds_secret_configs = tls_context.mutable_common_tls_context()->mutable_validation_context_sds_secret_config(); @@ -1338,7 +1457,7 @@ TEST_F(ServerContextConfigImplTest, SecretNotReady) { NiceMock dispatcher; EXPECT_CALL(factory_context_, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats)); - EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(factory_context_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); auto sds_secret_configs = tls_context.mutable_common_tls_context()->mutable_tls_certificate_sds_secret_configs()->Add(); @@ -1370,7 +1489,7 @@ TEST_F(ServerContextConfigImplTest, ValidationContextNotReady) { NiceMock dispatcher; EXPECT_CALL(factory_context_, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats)); - EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(factory_context_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); auto sds_secret_configs = tls_context.mutable_common_tls_context()->mutable_validation_context_sds_secret_config(); diff --git a/test/extensions/transport_sockets/tls/integration/BUILD b/test/extensions/transport_sockets/tls/integration/BUILD index 830d37771bd3f..4425448c624f4 100644 --- a/test/extensions/transport_sockets/tls/integration/BUILD +++ b/test/extensions/transport_sockets/tls/integration/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( @@ -17,6 +17,7 @@ envoy_cc_test( data = [ "//test/config/integration/certs", ], + tags = ["fails_on_windows"], deps = [ "//source/common/event:dispatcher_includes", "//source/common/event:dispatcher_lib", diff --git a/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc b/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc index bd755736ef457..db9b0afd9ec58 100644 --- a/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc +++ b/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc @@ -174,7 +174,7 @@ TEST_P(SslIntegrationTest, AdminCertEndpoint) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("admin"), "GET", "/certs", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } // Validate certificate selection across different certificate types and client TLS versions. @@ -286,7 +286,8 @@ TEST_P(SslCertficateIntegrationTest, ServerEcdsaClientRsaOnly) { server_rsa_cert_ = false; server_ecdsa_cert_ = true; initialize(); - auto codec_client = makeRawHttpConnection(makeSslClientConnection(rsaOnlyClientOptions())); + auto codec_client = + makeRawHttpConnection(makeSslClientConnection(rsaOnlyClientOptions()), absl::nullopt); EXPECT_FALSE(codec_client->connected()); const std::string counter_name = listenerStatPrefix("ssl.connection_error"); Stats::CounterSharedPtr counter = test_server_->counter(counter_name); @@ -313,7 +314,8 @@ TEST_P(SslCertficateIntegrationTest, ServerRsaClientEcdsaOnly) { client_ecdsa_cert_ = true; initialize(); EXPECT_FALSE( - makeRawHttpConnection(makeSslClientConnection(ecdsaOnlyClientOptions()))->connected()); + makeRawHttpConnection(makeSslClientConnection(ecdsaOnlyClientOptions()), absl::nullopt) + ->connected()); const std::string counter_name = listenerStatPrefix("ssl.connection_error"); Stats::CounterSharedPtr counter = test_server_->counter(counter_name); test_server_->waitForCounterGe(counter_name, 1); @@ -394,10 +396,8 @@ class SslTapIntegrationTest : public SslIntegrationTest { envoy::extensions::transport_sockets::tap::v3::Tap createTapConfig(const envoy::config::core::v3::TransportSocket& inner_transport) { envoy::extensions::transport_sockets::tap::v3::Tap tap_config; - tap_config.mutable_common_config() - ->mutable_static_config() - ->mutable_match_config() - ->set_any_match(true); + tap_config.mutable_common_config()->mutable_static_config()->mutable_match()->set_any_match( + true); auto* output_config = tap_config.mutable_common_config()->mutable_static_config()->mutable_output_config(); if (max_rx_bytes_.has_value()) { @@ -436,7 +436,7 @@ TEST_P(SslTapIntegrationTest, TwoRequestsWithBinaryProto) { // First request (ID will be +1 since the client will also bump). const uint64_t first_id = Network::ConnectionImpl::nextGlobalIdForTest() + 1; codec_client_ = makeHttpConnection(creator()); - Http::TestHeaderMapImpl post_request_headers{ + Http::TestRequestHeaderMapImpl post_request_headers{ {":method", "POST"}, {":path", "/test/long/url"}, {":scheme", "http"}, {":authority", "host"}, {"x-lyft-user-id", "123"}, {"x-forwarded-for", "10.0.0.1"}}; auto response = @@ -444,7 +444,7 @@ TEST_P(SslTapIntegrationTest, TwoRequestsWithBinaryProto) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(128, upstream_request_->bodyLength()); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(256, response->body().size()); checkStats(); envoy::config::core::v3::Address expected_local_address; @@ -474,7 +474,7 @@ TEST_P(SslTapIntegrationTest, TwoRequestsWithBinaryProto) { // Verify a second request hits a different file. const uint64_t second_id = Network::ConnectionImpl::nextGlobalIdForTest() + 1; codec_client_ = makeHttpConnection(creator()); - Http::TestHeaderMapImpl get_request_headers{ + Http::TestRequestHeaderMapImpl get_request_headers{ {":method", "GET"}, {":path", "/test/long/url"}, {":scheme", "http"}, {":authority", "host"}, {"x-lyft-user-id", "123"}, {"x-forwarded-for", "10.0.0.1"}}; response = @@ -482,7 +482,7 @@ TEST_P(SslTapIntegrationTest, TwoRequestsWithBinaryProto) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(128, upstream_request_->bodyLength()); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(256, response->body().size()); checkStats(); codec_client_->close(); diff --git a/test/extensions/transport_sockets/tls/ssl_certs_test.h b/test/extensions/transport_sockets/tls/ssl_certs_test.h index d6d450ca748d4..843273acfcfae 100644 --- a/test/extensions/transport_sockets/tls/ssl_certs_test.h +++ b/test/extensions/transport_sockets/tls/ssl_certs_test.h @@ -1,6 +1,6 @@ #pragma once -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/transport_socket_factory_context.h" #include "test/test_common/environment.h" #include "test/test_common/simulated_time_system.h" diff --git a/test/extensions/transport_sockets/tls/ssl_socket_test.cc b/test/extensions/transport_sockets/tls/ssl_socket_test.cc index ebaf803d8de2f..76f3a16b56b1b 100644 --- a/test/extensions/transport_sockets/tls/ssl_socket_test.cc +++ b/test/extensions/transport_sockets/tls/ssl_socket_test.cc @@ -32,9 +32,11 @@ #include "test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p256_cert_info.h" #include "test/extensions/transport_sockets/tls/test_private_key_method_provider.h" #include "test/mocks/buffer/mocks.h" +#include "test/mocks/init/mocks.h" +#include "test/mocks/local_info/mocks.h" #include "test/mocks/network/mocks.h" #include "test/mocks/secret/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/transport_socket_factory_context.h" #include "test/mocks/ssl/mocks.h" #include "test/mocks/stats/mocks.h" #include "test/test_common/environment.h" @@ -140,12 +142,19 @@ class TestUtilOptions : public TestUtilOptionsBase { return *this; } - TestUtilOptions& setExpectedDigest(const std::string& expected_digest) { - expected_digest_ = expected_digest; + TestUtilOptions& setExpectedSha256Digest(const std::string& expected_sha256_digest) { + expected_sha256_digest_ = expected_sha256_digest; return *this; } - const std::string& expectedDigest() const { return expected_digest_; } + const std::string& expectedSha256Digest() const { return expected_sha256_digest_; } + + TestUtilOptions& setExpectedSha1Digest(const std::string& expected_sha1_digest) { + expected_sha1_digest_ = expected_sha1_digest; + return *this; + } + + const std::string& expectedSha1Digest() const { return expected_sha1_digest_; } TestUtilOptions& setExpectedLocalUri(const std::string& expected_local_uri) { expected_local_uri_ = {expected_local_uri}; @@ -248,7 +257,8 @@ class TestUtilOptions : public TestUtilOptionsBase { bool expect_no_cert_chain_; bool expect_private_key_method_; Network::ConnectionEvent expected_server_close_event_; - std::string expected_digest_; + std::string expected_sha256_digest_; + std::string expected_sha1_digest_; std::vector expected_local_uri_; std::string expected_serial_number_; std::string expected_peer_issuer_; @@ -336,15 +346,27 @@ void testUtil(const TestUtilOptions& options) { size_t connect_count = 0; auto connect_second_time = [&]() { if (++connect_count == 2) { - if (!options.expectedDigest().empty()) { + if (!options.expectedSha256Digest().empty()) { // Assert twice to ensure a cached value is returned and still valid. - EXPECT_EQ(options.expectedDigest(), + EXPECT_EQ(options.expectedSha256Digest(), server_connection->ssl()->sha256PeerCertificateDigest()); - EXPECT_EQ(options.expectedDigest(), + EXPECT_EQ(options.expectedSha256Digest(), server_connection->ssl()->sha256PeerCertificateDigest()); } + if (!options.expectedSha1Digest().empty()) { + // Assert twice to ensure a cached value is returned and still valid. + EXPECT_EQ(options.expectedSha1Digest(), + server_connection->ssl()->sha1PeerCertificateDigest()); + EXPECT_EQ(options.expectedSha1Digest(), + server_connection->ssl()->sha1PeerCertificateDigest()); + } + // Assert twice to ensure a cached value is returned and still valid. EXPECT_EQ(options.expectedClientCertUri(), server_connection->ssl()->uriSanPeerCertificate()); + EXPECT_EQ(options.expectedClientCertUri(), server_connection->ssl()->uriSanPeerCertificate()); + if (!options.expectedLocalUri().empty()) { + // Assert twice to ensure a cached value is returned and still valid. + EXPECT_EQ(options.expectedLocalUri(), server_connection->ssl()->uriSanLocalCertificate()); EXPECT_EQ(options.expectedLocalUri(), server_connection->ssl()->uriSanLocalCertificate()); } EXPECT_EQ(options.expectedSerialNumber(), @@ -391,6 +413,7 @@ void testUtil(const TestUtilOptions& options) { EXPECT_FALSE(server_connection->ssl()->validFromPeerCertificate().has_value()); EXPECT_FALSE(server_connection->ssl()->expirationPeerCertificate().has_value()); EXPECT_EQ(EMPTY_STRING, server_connection->ssl()->sha256PeerCertificateDigest()); + EXPECT_EQ(EMPTY_STRING, server_connection->ssl()->sha1PeerCertificateDigest()); EXPECT_EQ(EMPTY_STRING, server_connection->ssl()->urlEncodedPemEncodedPeerCertificate()); EXPECT_EQ(EMPTY_STRING, server_connection->ssl()->subjectPeerCertificate()); EXPECT_EQ(std::vector{}, server_connection->ssl()->dnsSansPeerCertificate()); @@ -606,7 +629,7 @@ const std::string testUtilV2(const TestUtilOptionsV2& options) { if (!options.clientSession().empty()) { const SslSocketInfo* ssl_socket = dynamic_cast(client_connection->ssl().get()); - SSL* client_ssl_socket = ssl_socket->rawSslForTest(); + SSL* client_ssl_socket = ssl_socket->ssl(); SSL_CTX* client_ssl_context = SSL_get_SSL_CTX(client_ssl_socket); SSL_SESSION* client_ssl_session = SSL_SESSION_from_bytes(reinterpret_cast(options.clientSession().data()), @@ -649,8 +672,10 @@ const std::string testUtilV2(const TestUtilOptionsV2& options) { EXPECT_EQ(options.expectedClientCertUri(), server_connection->ssl()->uriSanPeerCertificate()); const SslSocketInfo* ssl_socket = dynamic_cast(client_connection->ssl().get()); - SSL* client_ssl_socket = ssl_socket->rawSslForTest(); + SSL* client_ssl_socket = ssl_socket->ssl(); if (!options.expectedProtocolVersion().empty()) { + // Assert twice to ensure a cached value is returned and still valid. + EXPECT_EQ(options.expectedProtocolVersion(), client_connection->ssl()->tlsVersion()); EXPECT_EQ(options.expectedProtocolVersion(), client_connection->ssl()->tlsVersion()); } if (!options.expectedCiphersuite().empty()) { @@ -664,7 +689,7 @@ const std::string testUtilV2(const TestUtilOptionsV2& options) { absl::optional server_ssl_requested_server_name; const SslSocketInfo* server_ssl_socket = dynamic_cast(server_connection->ssl().get()); - SSL* server_ssl = server_ssl_socket->rawSslForTest(); + SSL* server_ssl = server_ssl_socket->ssl(); auto requested_server_name = SSL_get_servername(server_ssl, TLSEXT_NAMETYPE_host_name); if (requested_server_name != nullptr) { server_ssl_requested_server_name = std::string(requested_server_name); @@ -812,10 +837,35 @@ TEST_P(SslSocketTest, GetCertDigest) { )EOF"; TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam()); - testUtil(test_options.setExpectedDigest(TEST_NO_SAN_CERT_HASH) + testUtil(test_options.setExpectedSha256Digest(TEST_NO_SAN_CERT_256_HASH) + .setExpectedSha1Digest(TEST_NO_SAN_CERT_1_HASH) .setExpectedSerialNumber(TEST_NO_SAN_CERT_SERIAL)); } +TEST_P(SslSocketTest, GetCertDigestInvalidFiles) { + const std::string client_ctx_yaml = R"EOF( + common_tls_context: + tls_certificates: +)EOF"; + + const std::string server_ctx_yaml = R"EOF( + common_tls_context: + tls_certificates: + certificate_chain: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem" + private_key: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem" + validation_context: + trusted_ca: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem" +)EOF"; + + TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam()); + testUtil( + test_options.setExpectedSha256Digest("").setExpectedSha1Digest("").setExpectedSerialNumber( + "")); +} + TEST_P(SslSocketTest, GetCertDigestInline) { envoy::config::listener::v3::Listener listener; envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains(); @@ -885,7 +935,8 @@ TEST_P(SslSocketTest, GetCertDigestServerCertWithIntermediateCA) { )EOF"; TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam()); - testUtil(test_options.setExpectedDigest(TEST_NO_SAN_CERT_HASH) + testUtil(test_options.setExpectedSha256Digest(TEST_NO_SAN_CERT_256_HASH) + .setExpectedSha1Digest(TEST_NO_SAN_CERT_1_HASH) .setExpectedSerialNumber(TEST_NO_SAN_CERT_SERIAL)); } @@ -912,7 +963,8 @@ TEST_P(SslSocketTest, GetCertDigestServerCertWithoutCommonName) { )EOF"; TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam()); - testUtil(test_options.setExpectedDigest(TEST_NO_SAN_CERT_HASH) + testUtil(test_options.setExpectedSha256Digest(TEST_NO_SAN_CERT_256_HASH) + .setExpectedSha1Digest(TEST_NO_SAN_CERT_1_HASH) .setExpectedSerialNumber(TEST_NO_SAN_CERT_SERIAL)); } @@ -2511,7 +2563,7 @@ TEST_P(SslSocketTest, ClientAuthMultipleCAs) { const SslSocketInfo* ssl_socket = dynamic_cast(client_connection->ssl().get()); SSL_set_cert_cb( - ssl_socket->rawSslForTest(), + ssl_socket->ssl(), [](SSL* ssl, void*) -> int { STACK_OF(X509_NAME)* list = SSL_get_client_CA_list(ssl); EXPECT_NE(nullptr, list); @@ -2624,7 +2676,7 @@ void testTicketSessionResumption(const std::string& server_ctx_yaml1, .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { const SslSocketInfo* ssl_socket = dynamic_cast(client_connection->ssl().get()); - ssl_session = SSL_get1_session(ssl_socket->rawSslForTest()); + ssl_session = SSL_get1_session(ssl_socket->ssl()); EXPECT_TRUE(SSL_SESSION_is_resumable(ssl_session)); if (expected_lifetime_hint) { auto lifetime_hint = SSL_SESSION_get_ticket_lifetime_hint(ssl_session); @@ -2647,7 +2699,7 @@ void testTicketSessionResumption(const std::string& server_ctx_yaml1, client_connection->addConnectionCallbacks(client_connection_callbacks); const SslSocketInfo* ssl_socket = dynamic_cast(client_connection->ssl().get()); - SSL_set_session(ssl_socket->rawSslForTest(), ssl_session); + SSL_set_session(ssl_socket->ssl(), ssl_session); SSL_SESSION_free(ssl_session); client_connection->connect(); @@ -2753,7 +2805,7 @@ void testSupportForStatelessSessionResumption(const std::string& server_ctx_yaml const SslSocketInfo* ssl_socket = dynamic_cast(server_connection->ssl().get()); - SSL* server_ssl_socket = ssl_socket->rawSslForTest(); + SSL* server_ssl_socket = ssl_socket->ssl(); SSL_CTX* server_ssl_context = SSL_get_SSL_CTX(server_ssl_socket); if (expect_support) { EXPECT_EQ(0, (SSL_CTX_get_options(server_ssl_context) & SSL_OP_NO_TICKET)); @@ -3207,7 +3259,7 @@ TEST_P(SslSocketTest, ClientAuthCrossListenerSessionResumption) { .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { const SslSocketInfo* ssl_socket = dynamic_cast(client_connection->ssl().get()); - ssl_session = SSL_get1_session(ssl_socket->rawSslForTest()); + ssl_session = SSL_get1_session(ssl_socket->ssl()); EXPECT_TRUE(SSL_SESSION_is_resumable(ssl_session)); server_connection->close(Network::ConnectionCloseType::NoFlush); client_connection->close(Network::ConnectionCloseType::NoFlush); @@ -3226,7 +3278,7 @@ TEST_P(SslSocketTest, ClientAuthCrossListenerSessionResumption) { client_connection->addConnectionCallbacks(client_connection_callbacks); const SslSocketInfo* ssl_socket = dynamic_cast(client_connection->ssl().get()); - SSL_set_session(ssl_socket->rawSslForTest(), ssl_session); + SSL_set_session(ssl_socket->ssl(), ssl_session); SSL_SESSION_free(ssl_session); client_connection->connect(); @@ -3640,7 +3692,7 @@ TEST_P(SslSocketTest, ProtocolVersions) { client_params->clear_tls_minimum_protocol_version(); client_params->clear_tls_maximum_protocol_version(); - // Connection using TLSv1.3 (client) and defaults (server) succeeds (non-FIPS) or fails (FIPS). + // Connection using TLSv1.3 (client) and defaults (server) succeeds. client_params->set_tls_minimum_protocol_version( envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_3); client_params->set_tls_maximum_protocol_version( @@ -3650,11 +3702,7 @@ TEST_P(SslSocketTest, ProtocolVersions) { TestUtilOptionsV2 error_test_options(listener, client, false, GetParam()); error_test_options.setExpectedServerStats("ssl.connection_error") .setExpectedTransportFailureReasonContains("TLSV1_ALERT_PROTOCOL_VERSION"); -#ifndef BORINGSSL_FIPS testUtilV2(tls_v1_3_test_options); -#else // BoringSSL FIPS - testUtilV2(error_test_options); -#endif client_params->clear_tls_minimum_protocol_version(); client_params->clear_tls_maximum_protocol_version(); @@ -3663,11 +3711,7 @@ TEST_P(SslSocketTest, ProtocolVersions) { envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_0); client_params->set_tls_maximum_protocol_version( envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_3); -#ifndef BORINGSSL_FIPS testUtilV2(tls_v1_3_test_options); -#else // BoringSSL FIPS - testUtilV2(tls_v1_2_test_options); -#endif client_params->clear_tls_minimum_protocol_version(); client_params->clear_tls_maximum_protocol_version(); @@ -3847,6 +3891,11 @@ TEST_P(SslSocketTest, ALPN) { testUtilV2(test_options); client_ctx->clear_alpn_protocols(); server_ctx->clear_alpn_protocols(); + + // Client attempts to configure ALPN that is too large. + client_ctx->add_alpn_protocols(std::string(100000, 'a')); + EXPECT_THROW_WITH_MESSAGE(testUtilV2(test_options), EnvoyException, + "Invalid ALPN protocol string"); } TEST_P(SslSocketTest, CipherSuites) { @@ -4189,15 +4238,29 @@ TEST_P(SslSocketTest, OverrideApplicationProtocols) { server_ctx->add_alpn_protocols("test"); testUtilV2(test_options); server_ctx->clear_alpn_protocols(); - // Override client side ALPN, "test" ALPN is used. server_ctx->add_alpn_protocols("test"); - Network::TransportSocketOptionsSharedPtr transport_socket_options( - new Network::TransportSocketOptionsImpl("", {}, {"foo", "test", "bar"})); + auto transport_socket_options = std::make_shared( + "", std::vector{}, std::vector{"foo", "test", "bar"}); testUtilV2(test_options.setExpectedALPNProtocol("test").setTransportSocketOptions( transport_socket_options)); - server_ctx->clear_alpn_protocols(); + + // Set fallback ALPN on the client side ALPN, "test" ALPN is used since no ALPN is specified + // in the config. + server_ctx->add_alpn_protocols("test"); + transport_socket_options = std::make_shared( + "", std::vector{}, std::vector{}, "test"); + testUtilV2(test_options.setExpectedALPNProtocol("test").setTransportSocketOptions( + transport_socket_options)); + + // Update the client TLS config to specify ALPN. The fallback value should no longer be used. + // Note that the server prefers "test" over "bar", but since the client only configures "bar", + // the resulting ALPN will be "bar" even though "test" is included in the fallback. + server_ctx->add_alpn_protocols("bar"); + client.mutable_common_tls_context()->add_alpn_protocols("bar"); + testUtilV2(test_options.setExpectedALPNProtocol("bar").setTransportSocketOptions( + transport_socket_options)); } // Validate that if downstream secrets are not yet downloaded from SDS server, Envoy creates @@ -4211,7 +4274,7 @@ TEST_P(SslSocketTest, DownstreamNotReadySslSocket) { EXPECT_CALL(factory_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(factory_context, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context, stats()).WillOnce(ReturnRef(stats_store)); - EXPECT_CALL(factory_context, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(factory_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; auto sds_secret_configs = @@ -4246,7 +4309,7 @@ TEST_P(SslSocketTest, UpstreamNotReadySslSocket) { NiceMock dispatcher; EXPECT_CALL(factory_context, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context, stats()).WillOnce(ReturnRef(stats_store)); - EXPECT_CALL(factory_context, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(factory_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(factory_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context; @@ -4368,16 +4431,16 @@ class SslReadBufferLimitTest : public SslSocketTest { dispatcher_ = api_->allocateDispatcher("test_thread", Buffer::WatermarkFactoryPtr{factory}); // By default, expect 4 buffers to be created - the client and server read and write buffers. - EXPECT_CALL(*factory, create_(_, _)) + EXPECT_CALL(*factory, create_(_, _, _)) .Times(2) - .WillOnce(Invoke([&](std::function below_low, - std::function above_high) -> Buffer::Instance* { - client_write_buffer = new MockWatermarkBuffer(below_low, above_high); + .WillOnce(Invoke([&](std::function below_low, std::function above_high, + std::function above_overflow) -> Buffer::Instance* { + client_write_buffer = new MockWatermarkBuffer(below_low, above_high, above_overflow); return client_write_buffer; })) - .WillRepeatedly(Invoke([](std::function below_low, - std::function above_high) -> Buffer::Instance* { - return new Buffer::WatermarkBuffer(below_low, above_high); + .WillRepeatedly(Invoke([](std::function below_low, std::function above_high, + std::function above_overflow) -> Buffer::Instance* { + return new Buffer::WatermarkBuffer(below_low, above_high, above_overflow); })); initialize(); diff --git a/test/extensions/transport_sockets/tls/test_data/BUILD b/test/extensions/transport_sockets/tls/test_data/BUILD index 2397d1c8f6339..e37742552d78d 100644 --- a/test/extensions/transport_sockets/tls/test_data/BUILD +++ b/test/extensions/transport_sockets/tls/test_data/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() filegroup( diff --git a/test/extensions/transport_sockets/tls/test_data/no_san_cert_info.h b/test/extensions/transport_sockets/tls/test_data/no_san_cert_info.h index bb1ec52cbe6ac..b1b23f3b5ba5c 100644 --- a/test/extensions/transport_sockets/tls/test_data/no_san_cert_info.h +++ b/test/extensions/transport_sockets/tls/test_data/no_san_cert_info.h @@ -1,6 +1,7 @@ // NOLINT(namespace-envoy) -constexpr char TEST_NO_SAN_CERT_HASH[] = +constexpr char TEST_NO_SAN_CERT_256_HASH[] = "0035c2f2cefc21bd5e1e52b945ff26c474dad33343ae00aa8f86f4877aa02eca"; +constexpr char TEST_NO_SAN_CERT_1_HASH[] = "7bf61b89caf51c49c3dfaf6209b6a7ad900b352b"; constexpr char TEST_NO_SAN_CERT_SPKI[] = "xVbSFNk3uh/hr0XoZArX7fc1RrKx0oQ+OkVcGa1HCzY="; constexpr char TEST_NO_SAN_CERT_SERIAL[] = "b8b5ecc898f21249"; constexpr char TEST_NO_SAN_CERT_NOT_BEFORE[] = "Dec 18 01:50:34 2018 GMT"; diff --git a/test/extensions/upstreams/http/tcp/BUILD b/test/extensions/upstreams/http/tcp/BUILD new file mode 100644 index 0000000000000..70bea0f751776 --- /dev/null +++ b/test/extensions/upstreams/http/tcp/BUILD @@ -0,0 +1,32 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_test( + name = "upstream_request_test", + srcs = ["upstream_request_test.cc"], + deps = [ + "//source/common/buffer:buffer_lib", + "//source/common/router:router_lib", + "//source/common/upstream:upstream_includes", + "//source/common/upstream:upstream_lib", + "//source/extensions/upstreams/http/tcp:upstream_request_lib", + "//test/common/http:common_lib", + "//test/mocks:common_lib", + "//test/mocks/network:network_mocks", + "//test/mocks/router:router_filter_interface", + "//test/mocks/router:router_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/upstream:upstream_mocks", + "//test/test_common:environment_lib", + "//test/test_common:simulated_time_system_lib", + "//test/test_common:utility_lib", + ], +) diff --git a/test/extensions/upstreams/http/tcp/upstream_request_test.cc b/test/extensions/upstreams/http/tcp/upstream_request_test.cc new file mode 100644 index 0000000000000..1672f700f3c77 --- /dev/null +++ b/test/extensions/upstreams/http/tcp/upstream_request_test.cc @@ -0,0 +1,234 @@ +#include "common/buffer/buffer_impl.h" +#include "common/router/config_impl.h" +#include "common/router/router.h" +#include "common/router/upstream_request.h" + +#include "extensions/common/proxy_protocol/proxy_protocol_header.h" +#include "extensions/upstreams/http/tcp/upstream_request.h" + +#include "test/common/http/common.h" +#include "test/mocks/common.h" +#include "test/mocks/router/mocks.h" +#include "test/mocks/router/router_filter_interface.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/server/instance.h" +#include "test/mocks/tcp/mocks.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using Envoy::Http::TestRequestHeaderMapImpl; +using Envoy::Router::UpstreamRequest; +using testing::_; +using testing::AnyNumber; +using testing::NiceMock; +using testing::Return; +using testing::ReturnRef; + +namespace Envoy { +namespace Extensions { +namespace Upstreams { +namespace Http { +namespace Tcp { + +class TcpConnPoolTest : public ::testing::Test { +public: + TcpConnPoolTest() : host_(std::make_shared>()) { + NiceMock route_entry; + NiceMock cm; + EXPECT_CALL(cm, tcpConnPoolForCluster(_, _, _)).WillOnce(Return(&mock_pool_)); + conn_pool_ = std::make_unique(cm, true, route_entry, Envoy::Http::Protocol::Http11, + nullptr); + } + + std::unique_ptr conn_pool_; + Envoy::Tcp::ConnectionPool::MockInstance mock_pool_; + Router::MockGenericConnectionPoolCallbacks mock_generic_callbacks_; + std::shared_ptr> host_; + NiceMock cancellable_; +}; + +TEST_F(TcpConnPoolTest, Basic) { + NiceMock connection; + + EXPECT_CALL(mock_pool_, newConnection(_)).WillOnce(Return(&cancellable_)); + conn_pool_->newStream(&mock_generic_callbacks_); + + EXPECT_CALL(mock_generic_callbacks_, upstreamToDownstream()); + EXPECT_CALL(mock_generic_callbacks_, onPoolReady(_, _, _, _)); + auto data = std::make_unique>(); + EXPECT_CALL(*data, connection()).Times(AnyNumber()).WillRepeatedly(ReturnRef(connection)); + conn_pool_->onPoolReady(std::move(data), host_); +} + +TEST_F(TcpConnPoolTest, OnPoolFailure) { + EXPECT_CALL(mock_pool_, newConnection(_)).WillOnce(Return(&cancellable_)); + conn_pool_->newStream(&mock_generic_callbacks_); + + EXPECT_CALL(mock_generic_callbacks_, onPoolFailure(_, _, _)); + conn_pool_->onPoolFailure(Envoy::Tcp::ConnectionPool::PoolFailureReason::LocalConnectionFailure, + host_); + + // Make sure that the pool failure nulled out the pending request. + EXPECT_FALSE(conn_pool_->cancelAnyPendingRequest()); +} + +TEST_F(TcpConnPoolTest, Cancel) { + // Initially cancel should fail as there is no pending request. + EXPECT_FALSE(conn_pool_->cancelAnyPendingRequest()); + + EXPECT_CALL(mock_pool_, newConnection(_)).WillOnce(Return(&cancellable_)); + conn_pool_->newStream(&mock_generic_callbacks_); + + // Canceling should now return true as there was an active request. + EXPECT_TRUE(conn_pool_->cancelAnyPendingRequest()); + + // A second cancel should return false as there is not a pending request. + EXPECT_FALSE(conn_pool_->cancelAnyPendingRequest()); +} + +class TcpUpstreamTest : public ::testing::Test { +public: + TcpUpstreamTest() { + mock_router_filter_.requests_.push_back(std::make_unique( + mock_router_filter_, std::make_unique>())); + auto data = std::make_unique>(); + EXPECT_CALL(*data, connection()).Times(AnyNumber()).WillRepeatedly(ReturnRef(connection_)); + tcp_upstream_ = + std::make_unique(mock_router_filter_.requests_.front().get(), std::move(data)); + } + ~TcpUpstreamTest() override { EXPECT_CALL(mock_router_filter_, config()).Times(AnyNumber()); } + +protected: + NiceMock connection_; + NiceMock mock_router_filter_; + Envoy::Tcp::ConnectionPool::MockConnectionData* mock_connection_data_; + std::unique_ptr tcp_upstream_; + TestRequestHeaderMapImpl request_{{":method", "CONNECT"}, + {":path", "/"}, + {":protocol", "bytestream"}, + {":scheme", "https"}, + {":authority", "host"}}; +}; + +TEST_F(TcpUpstreamTest, Basic) { + // Swallow the request headers and generate response headers. + EXPECT_CALL(connection_, write(_, false)).Times(0); + EXPECT_CALL(mock_router_filter_, onUpstreamHeaders(200, _, _, false)); + tcp_upstream_->encodeHeaders(request_, false); + + // Proxy the data. + EXPECT_CALL(connection_, write(BufferStringEqual("foo"), false)); + Buffer::OwnedImpl buffer("foo"); + tcp_upstream_->encodeData(buffer, false); + + // Metadata is swallowed. + Envoy::Http::MetadataMapVector metadata_map_vector; + tcp_upstream_->encodeMetadata(metadata_map_vector); + + // Forward data. + Buffer::OwnedImpl response1("bar"); + EXPECT_CALL(mock_router_filter_, onUpstreamData(BufferStringEqual("bar"), _, false)); + tcp_upstream_->onUpstreamData(response1, false); + + Buffer::OwnedImpl response2("eep"); + EXPECT_CALL(mock_router_filter_, onUpstreamHeaders(_, _, _, _)).Times(0); + EXPECT_CALL(mock_router_filter_, onUpstreamData(BufferStringEqual("eep"), _, false)); + tcp_upstream_->onUpstreamData(response2, false); +} + +TEST_F(TcpUpstreamTest, V1Header) { + envoy::config::core::v3::ProxyProtocolConfig* proxy_config = + mock_router_filter_.route_entry_.connect_config_->mutable_proxy_protocol_config(); + proxy_config->set_version(envoy::config::core::v3::ProxyProtocolConfig::V1); + mock_router_filter_.client_connection_.remote_address_ = + std::make_shared("1.2.3.4", 5); + mock_router_filter_.client_connection_.local_address_ = + std::make_shared("4.5.6.7", 8); + + Buffer::OwnedImpl expected_data; + Extensions::Common::ProxyProtocol::generateProxyProtoHeader( + *proxy_config, mock_router_filter_.client_connection_, expected_data); + + // encodeHeaders now results in the proxy proto header being sent. + EXPECT_CALL(connection_, write(BufferEqual(&expected_data), false)); + tcp_upstream_->encodeHeaders(request_, false); + + // Data is proxied as usual. + EXPECT_CALL(connection_, write(BufferStringEqual("foo"), false)); + Buffer::OwnedImpl buffer("foo"); + tcp_upstream_->encodeData(buffer, false); +} + +TEST_F(TcpUpstreamTest, V2Header) { + envoy::config::core::v3::ProxyProtocolConfig* proxy_config = + mock_router_filter_.route_entry_.connect_config_->mutable_proxy_protocol_config(); + proxy_config->set_version(envoy::config::core::v3::ProxyProtocolConfig::V2); + mock_router_filter_.client_connection_.remote_address_ = + std::make_shared("1.2.3.4", 5); + mock_router_filter_.client_connection_.local_address_ = + std::make_shared("4.5.6.7", 8); + + Buffer::OwnedImpl expected_data; + Extensions::Common::ProxyProtocol::generateProxyProtoHeader( + *proxy_config, mock_router_filter_.client_connection_, expected_data); + + // encodeHeaders now results in the proxy proto header being sent. + EXPECT_CALL(connection_, write(BufferEqual(&expected_data), false)); + tcp_upstream_->encodeHeaders(request_, false); + + // Data is proxied as usual. + EXPECT_CALL(connection_, write(BufferStringEqual("foo"), false)); + Buffer::OwnedImpl buffer("foo"); + tcp_upstream_->encodeData(buffer, false); +} + +TEST_F(TcpUpstreamTest, TrailersEndStream) { + // Swallow the headers. + tcp_upstream_->encodeHeaders(request_, false); + + EXPECT_CALL(connection_, write(BufferStringEqual(""), true)); + Envoy::Http::TestRequestTrailerMapImpl trailers{{"foo", "bar"}}; + tcp_upstream_->encodeTrailers(trailers); +} + +TEST_F(TcpUpstreamTest, HeaderEndStreamHalfClose) { + EXPECT_CALL(connection_, write(BufferStringEqual(""), true)); + tcp_upstream_->encodeHeaders(request_, true); +} + +TEST_F(TcpUpstreamTest, ReadDisable) { + EXPECT_CALL(connection_, readDisable(true)); + tcp_upstream_->readDisable(true); + + EXPECT_CALL(connection_, readDisable(false)); + tcp_upstream_->readDisable(false); + + // Once the connection is closed, don't touch it. + connection_.state_ = Network::Connection::State::Closed; + EXPECT_CALL(connection_, readDisable(_)).Times(0); + tcp_upstream_->readDisable(true); +} + +TEST_F(TcpUpstreamTest, UpstreamEvent) { + // Make sure upstream disconnects result in stream reset. + EXPECT_CALL(mock_router_filter_, + onUpstreamReset(Envoy::Http::StreamResetReason::ConnectionTermination, "", _)); + tcp_upstream_->onEvent(Network::ConnectionEvent::RemoteClose); +} + +TEST_F(TcpUpstreamTest, Watermarks) { + EXPECT_CALL(mock_router_filter_, callbacks()).Times(AnyNumber()); + EXPECT_CALL(mock_router_filter_.callbacks_, onDecoderFilterAboveWriteBufferHighWatermark()); + tcp_upstream_->onAboveWriteBufferHighWatermark(); + + EXPECT_CALL(mock_router_filter_.callbacks_, onDecoderFilterBelowWriteBufferLowWatermark()); + tcp_upstream_->onBelowWriteBufferLowWatermark(); +} + +} // namespace Tcp +} // namespace Http +} // namespace Upstreams +} // namespace Extensions +} // namespace Envoy diff --git a/test/fuzz/BUILD b/test/fuzz/BUILD index d46ae39a51b34..35bd8e0ac197a 100644 --- a/test/fuzz/BUILD +++ b/test/fuzz/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_library", @@ -7,6 +5,8 @@ load( "envoy_proto_library", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_proto_library( diff --git a/test/fuzz/README.md b/test/fuzz/README.md index 10849dd15c9b4..0104affcfd3d0 100644 --- a/test/fuzz/README.md +++ b/test/fuzz/README.md @@ -45,7 +45,7 @@ The fuzz test will be executed in three environments: and basic sanitizers just on the supplied corpus. 1. Using the libFuzzer fuzzing engine and ASAN when run in the Envoy repository with `bazel run - //test/path/to/some_fuzz_test_with_libfuzzer --config asan-fuzzer`. This is where real fuzzing + //test/path/to/some_fuzz_test --config asan-fuzzer`. This is where real fuzzing takes place locally. The built binary can take libFuzzer command-line flags, including the number of runs and the maximum input length. @@ -66,7 +66,7 @@ The fuzz test will be executed in three environments: 4. Run the `envoy_cc_fuzz_test` target to test against the seed corpus. E.g. `bazel test //test/common/common:base64_fuzz_test`. -5. Run the `*_fuzz_test_with_libfuzzer` target against libFuzzer. E.g. `bazel run +5. Run the `*_fuzz_test` target against libFuzzer. E.g. `bazel run //test/common/common:base64_fuzz_test --config asan-fuzzer`. ## Protobuf fuzz tests @@ -88,15 +88,15 @@ Within the Envoy repository, we have various `*_fuzz_test` targets. When run und these will exercise the corpus as inputs but not actually link and run against any fuzzer (e.g. [`libfuzzer`](https://llvm.org/docs/LibFuzzer.html)). -To get actual fuzzing performed, the `*_fuzz_test_with_libfuzzer` target needs to be built with -`--config asan-fuzzer`. This links the target to the libFuzzer fuzzing engine. This is recommended -when writing new fuzz tests to check if they pick up any low hanging fruit (i.e. what you can find -on your local machine vs. the fuzz cluster). The binary takes the location of the seed corpus +To get actual fuzzing performed, the `*_fuzz_test` target needs to be built with `--config +asan-fuzzer`. This links the target to the libFuzzer fuzzing engine. This is recommended when +writing new fuzz tests to check if they pick up any low hanging fruit (i.e. what you can find on +your local machine vs. the fuzz cluster). The binary takes the location of the seed corpus directory. Fuzzing continues indefinitely until a bug is found or the number of iterations it should perform is specified with `-runs`. For example, -`bazel run //test/common/common:base64_fuzz_test_with_libfuzzer --config asan-fuzzer -- -test/common/common/base64_corpus -runs=1000` +`bazel run //test/common/common:base64_fuzz_test --config asan-fuzzer +--test/common/common/base64_corpus -runs=1000` The fuzzer prints information to stderr: @@ -158,7 +158,15 @@ to provide fuzzers some interesting starting points for invalid inputs. ## Coverage reports Coverage reports, where individual lines are annotated with fuzzing hit counts, are a useful way to -understand the scope and efficacy of the Envoy fuzzers. You can generate such reports from the +understand the scope and efficacy of the Envoy fuzzers. You can generate fuzz coverage reports both locally, and using the OSS-Fuzz infrastructure. + +To generate fuzz coverage reports locally (see [Coverage builds](bazel/README.md), run +``` +FUZZ_COVERAGE=true test/run_envoy_bazel_coverage.sh +``` +This generates a coverage report after running the fuzz targets for one minute against the fuzzing engine libfuzzer and using the checked-in corpus as an initial seed. + +Otherwise, you can generate reports from the ClusterFuzz corpus following the general ClusterFuzz [instructions for profiling setup](https://github.com/google/oss-fuzz/blob/master/docs/code_coverage.md). diff --git a/test/fuzz/common.proto b/test/fuzz/common.proto index b32db65c98e09..7b8bc1f83c61d 100644 --- a/test/fuzz/common.proto +++ b/test/fuzz/common.proto @@ -5,6 +5,7 @@ package test.fuzz; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/address.proto"; +import "google/protobuf/any.proto"; import "google/protobuf/wrappers.proto"; import "validate/validate.proto"; @@ -15,9 +16,33 @@ message Headers { repeated envoy.config.core.v3.HeaderValue headers = 1; } +message Metadata { + map metadata = 1; +} + +message HttpBody { + // The bytes that will be used as the request body. + repeated string data = 1 [(validate.rules).repeated .min_items = 1]; +} + +// HttpBody cannot efficiently create serialized protos. +// Use ProtoBody instead to test grpc data. +message ProtoBody { + // The proto message that will be serialized and used as the request body. + google.protobuf.Any message = 1 [(validate.rules).any.required = true]; + + // The size (in bytes) of each buffer when forming the requests. + uint64 chunk_size = 2 [(validate.rules).uint64 = {gt: 0, lt: 8192}]; +} + message HttpData { Headers headers = 1; - repeated string data = 2; + + oneof body { + HttpBody http_body = 2; + ProtoBody proto_body = 4; + } + Headers trailers = 3; } diff --git a/test/fuzz/fuzz_runner.cc b/test/fuzz/fuzz_runner.cc index 508f1e922c437..bda9446b39e9c 100644 --- a/test/fuzz/fuzz_runner.cc +++ b/test/fuzz/fuzz_runner.cc @@ -47,6 +47,14 @@ void Runner::setupEnvironment(int argc, char** argv, spdlog::level::level_enum d static auto* logging_context = new Logger::Context(log_level_, TestEnvironment::getOptions().logFormat(), *lock, false); UNREFERENCED_PARAMETER(logging_context); + + // Suppress all libprotobuf non-fatal logging as long as this object exists. + // For fuzzing, this prevents logging when parsing text-format protos fails, + // deprecated fields are used, etc. + // https://github.com/protocolbuffers/protobuf/blob/204f99488ce1ef74565239cf3963111ae4c774b7/src/google/protobuf/stubs/logging.h#L223 + if (log_level_ > spdlog::level::debug) { + ABSL_ATTRIBUTE_UNUSED static auto* log_silencer = new Protobuf::LogSilencer(); + } } } // namespace Fuzz diff --git a/test/fuzz/fuzz_runner.h b/test/fuzz/fuzz_runner.h index 31a317a220c0c..5349d1241cd58 100644 --- a/test/fuzz/fuzz_runner.h +++ b/test/fuzz/fuzz_runner.h @@ -8,7 +8,7 @@ #include "libprotobuf_mutator/src/libfuzzer/libfuzzer_macro.h" // Bring in FuzzedDataProvider, see // https://github.com/google/fuzzing/blob/master/docs/split-inputs.md#fuzzed-data-provider -#include "fuzzer/utils/FuzzedDataProvider.h" +#include "fuzzer/FuzzedDataProvider.h" #include "spdlog/spdlog.h" namespace Envoy { diff --git a/test/fuzz/main.cc b/test/fuzz/main.cc index 98e30e63cbb8a..d1c98eb6eed9f 100644 --- a/test/fuzz/main.cc +++ b/test/fuzz/main.cc @@ -54,13 +54,8 @@ INSTANTIATE_TEST_SUITE_P(CorpusExamples, FuzzerCorpusTest, testing::ValuesIn(tes } // namespace Envoy int main(int argc, char** argv) { -#ifndef __APPLE__ - absl::InitializeSymbolizer(argv[0]); -#endif -#ifdef ENVOY_HANDLE_SIGNALS - // Enabled by default. Control with "bazel --define=signal_trace=disabled" - Envoy::SignalAction handle_sigs; -#endif + Envoy::TestEnvironment::initializeTestMain(argv[0]); + // Expected usage: [other gtest flags] RELEASE_ASSERT(argc >= 2, ""); // Consider any file after the test path which doesn't have a - prefix to be a corpus entry. diff --git a/test/fuzz/utility.h b/test/fuzz/utility.h index ac1354648b64e..534e5f1f88503 100644 --- a/test/fuzz/utility.h +++ b/test/fuzz/utility.h @@ -12,6 +12,8 @@ #include "test/mocks/upstream/host.h" #include "test/test_common/utility.h" +#include "nghttp2/nghttp2.h" + // Strong assertion that applies across all compilation modes and doesn't rely // on gtest, which only provides soft fails that don't trip oss-fuzz failures. #define FUZZ_ASSERT(x) RELEASE_ASSERT(x, "") @@ -50,12 +52,10 @@ inline std::string replaceInvalidHostCharacters(absl::string_view string) { std::string filtered; filtered.reserve(string.length()); for (const char& c : string) { - switch (c) { - case ' ': - filtered.push_back('0'); - break; - default: + if (nghttp2_check_authority(reinterpret_cast(&c), 1)) { filtered.push_back(c); + } else { + filtered.push_back('0'); } } return filtered; @@ -83,27 +83,45 @@ replaceInvalidStringValues(const envoy::config::core::v3::Metadata& upstream_met template inline T fromHeaders( const test::fuzz::Headers& headers, - const std::unordered_set& ignore_headers = std::unordered_set()) { + const absl::node_hash_set& ignore_headers = absl::node_hash_set(), + absl::node_hash_set include_headers = absl::node_hash_set()) { T header_map; for (const auto& header : headers.headers()) { if (ignore_headers.find(absl::AsciiStrToLower(header.key())) == ignore_headers.end()) { header_map.addCopy(header.key(), header.value()); } + include_headers.erase(absl::AsciiStrToLower(header.key())); + } + // Add dummy headers for non-present headers that must be included. + for (const auto& header : include_headers) { + header_map.addCopy(header, "dummy"); } return header_map; } +// Convert from test proto Metadata to MetadataMap +inline Http::MetadataMapVector fromMetadata(const test::fuzz::Metadata& metadata) { + Http::MetadataMapVector metadata_map_vector; + if (!metadata.metadata().empty()) { + Http::MetadataMap metadata_map; + Http::MetadataMapPtr metadata_map_ptr = std::make_unique(metadata_map); + for (const auto& pair : metadata.metadata()) { + metadata_map_ptr->insert(pair); + } + metadata_map_vector.push_back(std::move(metadata_map_ptr)); + } + return metadata_map_vector; +} + // Convert from HeaderMap to test proto Headers. inline test::fuzz::Headers toHeaders(const Http::HeaderMap& headers) { test::fuzz::Headers fuzz_headers; - headers.iterate( - [](const Http::HeaderEntry& header, void* ctxt) -> Http::HeaderMap::Iterate { - auto* fuzz_header = static_cast(ctxt)->add_headers(); - fuzz_header->set_key(std::string(header.key().getStringView())); - fuzz_header->set_value(std::string(header.value().getStringView())); - return Http::HeaderMap::Iterate::Continue; - }, - &fuzz_headers); + headers.iterate([&fuzz_headers](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + auto* fuzz_header = fuzz_headers.add_headers(); + fuzz_header->set_key(std::string(header.key().getStringView())); + fuzz_header->set_value(std::string(header.value().getStringView())); + return Http::HeaderMap::Iterate::Continue; + }); return fuzz_headers; } diff --git a/test/integration/BUILD b/test/integration/BUILD index ad714ee651671..f54848a6e70cb 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -1,16 +1,19 @@ -licenses(["notice"]) # Apache 2 - +load("@rules_python//python:defs.bzl", "py_binary") load( "//bazel:envoy_build_system.bzl", "envoy_cc_fuzz_test", "envoy_cc_test", + "envoy_cc_test_binary", "envoy_cc_test_library", "envoy_package", "envoy_proto_library", "envoy_select_hot_restart", + "envoy_select_new_codecs_in_integration_tests", "envoy_sh_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_library( @@ -27,10 +30,10 @@ envoy_cc_test_library( ], deps = [ ":http_integration_lib", - "//source/common/common:version_lib", "//source/common/config:protobuf_link_hacks", "//source/common/config:version_converter_lib", "//source/common/protobuf:utility_lib", + "//source/common/version:version_lib", "//source/extensions/filters/network/redis_proxy:config", "//test/common/grpc:grpc_client_integration_lib", "//test/test_common:network_utility_lib", @@ -48,7 +51,9 @@ envoy_cc_test_library( envoy_cc_test( name = "ads_integration_test", + size = "enormous", srcs = ["ads_integration_test.cc"], + tags = ["fails_on_windows"], deps = [ ":ads_integration_lib", ":http_integration_lib", @@ -69,6 +74,7 @@ envoy_cc_test( envoy_cc_test( name = "api_listener_integration_test", srcs = ["api_listener_integration_test.cc"], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//test/mocks/http:stream_encoder_mock", @@ -79,6 +85,7 @@ envoy_cc_test( envoy_cc_test( name = "api_version_integration_test", srcs = ["api_version_integration_test.cc"], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "@envoy_api//envoy/api/v2:pkg_cc_proto", @@ -103,19 +110,24 @@ envoy_proto_library( srcs = [":capture_fuzz.proto"], ) +envoy_proto_library( + name = "h2_capture_fuzz_proto", + srcs = [":h2_capture_fuzz.proto"], +) + envoy_cc_test( name = "cds_integration_test", srcs = ["cds_integration_test.cc"], data = [ "//test/config/integration/certs", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/config:protobuf_link_hacks", "//source/common/protobuf:utility_lib", "//test/common/grpc:grpc_client_integration_lib", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", "//test/test_common:network_utility_lib", "//test/test_common:resources_lib", "//test/test_common:utility_lib", @@ -154,6 +166,7 @@ envoy_cc_test( srcs = [ "filter_manager_integration_test.cc", ], + tags = ["fails_on_windows"], deps = [ ":filter_manager_integration_proto_cc_proto", ":http_integration_lib", @@ -173,6 +186,7 @@ envoy_cc_test( envoy_cc_test( name = "cluster_filter_integration_test", srcs = ["cluster_filter_integration_test.cc"], + tags = ["fails_on_windows"], deps = [ ":integration_lib", "//include/envoy/network:filter_interface", @@ -186,6 +200,7 @@ envoy_cc_test( envoy_cc_test( name = "custom_cluster_integration_test", srcs = ["custom_cluster_integration_test.cc"], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/upstream:load_balancer_lib", @@ -203,13 +218,15 @@ envoy_cc_test( data = [ "//test/config/integration/certs", ], + tags = [ + "fails_on_windows", + ], deps = [ ":http_integration_lib", "//source/common/config:protobuf_link_hacks", "//source/common/protobuf:utility_lib", "//test/common/grpc:grpc_client_integration_lib", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", "//test/test_common:network_utility_lib", "//test/test_common:resources_lib", "//test/test_common:utility_lib", @@ -217,16 +234,43 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "drain_close_integration_test", + srcs = [ + "drain_close_integration_test.cc", + ], + tags = ["fails_on_windows"], + deps = [ + ":http_protocol_integration_lib", + "//source/extensions/filters/http/health_check:config", + "//test/test_common:utility_lib", + ], +) + exports_files(["test_utility.sh"]) +envoy_cc_test_binary( + name = "hotrestart_main", + srcs = ["hotrestart_main.cc"], + external_deps = [ + "abseil_symbolize", + ], + stamped = True, + deps = [ + "//source/exe:envoy_main_common_with_core_extensions_lib", + "//source/exe:platform_impl_lib", + ], +) + envoy_sh_test( name = "hotrestart_test", + size = "enormous", srcs = envoy_select_hot_restart([ "hotrestart_test.sh", ]), + cc_binary = [":hotrestart_main"], data = [ "test_utility.sh", - "//source/exe:envoy-static", "//test/config/integration:server_config_files", "//tools:socket_passing", ], @@ -237,13 +281,29 @@ envoy_sh_test( envoy_sh_test( name = "run_envoy_test", srcs = ["run_envoy_test.sh"], + cc_binary = [":hotrestart_main"], data = [ "test_utility.sh", - "//source/exe:envoy-static", "//test/config/integration:server_config_files", ], - # TODO: This script invocation does not work on Windows, see: https://github.com/bazelbuild/bazel/issues/10959 +) + +envoy_cc_test( + name = "alpn_selection_integration_test", + srcs = [ + "alpn_selection_integration_test.cc", + ], + data = [ + "//test/config/integration/certs", + ], tags = ["fails_on_windows"], + deps = [ + ":http_integration_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", + "@envoy_api//envoy/config/route/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto", + ], ) envoy_cc_test( @@ -251,6 +311,7 @@ envoy_cc_test( srcs = [ "header_integration_test.cc", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/config:api_version_lib", @@ -271,6 +332,8 @@ envoy_cc_test( "http2_integration_test.cc", "http2_integration_test.h", ], + shard_count = 4, + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/buffer:buffer_lib", @@ -296,6 +359,7 @@ envoy_cc_test( srcs = [ "http_subset_lb_integration_test.cc", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//test/common/upstream:utility_lib", @@ -314,6 +378,7 @@ envoy_cc_test( data = [ "//test/config/integration/certs", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/extensions/transport_sockets/tls:context_lib", @@ -330,6 +395,7 @@ envoy_cc_test( srcs = [ "header_casing_integration_test.cc", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", @@ -343,6 +409,7 @@ envoy_cc_test( "http_timeout_integration_test.cc", "http_timeout_integration_test.h", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "@envoy_api//envoy/extensions/filters/http/router/v3:pkg_cc_proto", @@ -358,6 +425,7 @@ envoy_cc_test( # As this test has many H1/H2/v4/v6 tests it takes a while to run. # Shard it enough to bring the run time in line with other integration tests. shard_count = 5, + tags = ["fails_on_windows"], deps = [ ":http_protocol_integration_lib", "//source/common/http:header_map_lib", @@ -378,6 +446,7 @@ envoy_cc_test( "http2_upstream_integration_test.cc", "http2_upstream_integration_test.h", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/http:header_map_lib", @@ -398,9 +467,11 @@ envoy_cc_test( "integration_admin_test.cc", "integration_admin_test.h", ], + tags = ["fails_on_windows"], deps = [ ":http_protocol_integration_lib", "//include/envoy/http:header_map_interface", + "//source/common/stats:histogram_lib", "//source/common/stats:stats_matcher_lib", "//source/extensions/filters/http/buffer:config", "//source/extensions/filters/http/health_check:config", @@ -441,6 +512,7 @@ envoy_cc_test_library( "//source/extensions/filters/http/router:config", "//source/extensions/filters/network/http_connection_manager:config", "//test/common/upstream:utility_lib", + "//test/integration/filters:add_body_filter_config_lib", "//test/integration/filters:add_trailers_filter_config_lib", "//test/integration/filters:call_decodedata_once_filter_config_lib", "//test/integration/filters:decode_headers_return_stop_all_filter_config_lib", @@ -449,6 +521,7 @@ envoy_cc_test_library( "//test/integration/filters:modify_buffer_filter_config_lib", "//test/integration/filters:passthrough_filter_config_lib", "//test/integration/filters:pause_filter_lib", + "//test/integration/filters:wait_for_whole_request_and_response_config_lib", "//test/test_common:registry_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", @@ -475,8 +548,10 @@ envoy_cc_test( # As this test has many pauses for idle timeouts, it takes a while to run. # Shard it enough to bring the run time in line with other integration tests. shard_count = 2, + tags = ["fails_on_windows"], deps = [ ":http_protocol_integration_lib", + "//test/integration/filters:backpressure_filter_config_lib", "//test/test_common:test_time_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", @@ -501,6 +576,10 @@ envoy_cc_test_library( "ssl_utility.h", "utility.h", ], + copts = envoy_select_new_codecs_in_integration_tests( + ["-DENVOY_USE_NEW_CODECS_IN_INTEGRATION_TESTS"], + "@envoy", + ), data = ["//test/common/runtime:filesystem_test_data"], deps = [ ":server_stats_interface", @@ -522,6 +601,7 @@ envoy_cc_test_library( "//source/common/buffer:buffer_lib", "//source/common/buffer:zero_copy_input_stream_lib", "//source/common/common:assert_lib", + "//source/common/common:basic_resource_lib", "//source/common/common:minimal_logger_lib", "//source/common/config:api_version_lib", "//source/common/config:version_converter_lib", @@ -531,7 +611,9 @@ envoy_cc_test_library( "//source/common/http:codec_client_lib", "//source/common/http:header_map_lib", "//source/common/http:headers_lib", + "//source/common/http/http1:codec_legacy_lib", "//source/common/http/http1:codec_lib", + "//source/common/http/http2:codec_legacy_lib", "//source/common/http/http2:codec_lib", "//source/common/local_info:local_info_lib", "//source/common/network:filter_lib", @@ -548,7 +630,9 @@ envoy_cc_test_library( "//source/extensions/transport_sockets/raw_buffer:config", "//source/extensions/transport_sockets/tap:config", "//source/extensions/transport_sockets/tls:config", + "//source/extensions/transport_sockets/tls:context_lib", "//source/server:connection_handler_lib", + "//source/server:drain_manager_lib", "//source/server:hot_restart_nop_lib", "//source/server:listener_hooks_lib", "//source/server:process_context_lib", @@ -558,7 +642,7 @@ envoy_cc_test_library( "//test/common/upstream:utility_lib", "//test/config:utility_lib", "//test/mocks/buffer:buffer_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:transport_socket_factory_context_mocks", "//test/mocks/stats:stats_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:environment_lib", @@ -573,6 +657,7 @@ envoy_cc_test_library( "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", + "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto", ], ) @@ -596,6 +681,7 @@ envoy_cc_test( "//test/integration/filters:clear_route_cache_filter_lib", "//test/integration/filters:encoder_decoder_buffer_filter_lib", "//test/integration/filters:process_context_lib", + "//test/integration/filters:stop_iteration_and_continue", "//test/mocks/http:http_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", @@ -609,12 +695,19 @@ envoy_cc_test( srcs = [ "redirect_integration_test.cc", ], + tags = ["fails_on_windows"], deps = [ ":http_protocol_integration_lib", "//source/common/http:header_map_lib", + "//source/extensions/internal_redirect/allow_listed_routes:config", + "//source/extensions/internal_redirect/previous_routes:config", + "//source/extensions/internal_redirect/safe_cross_scheme:config", "//test/test_common:utility_lib", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/internal_redirect/previous_routes/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg_cc_proto", ], ) @@ -659,12 +752,24 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "socket_interface_integration_test", + srcs = ["socket_interface_integration_test.cc"], + tags = ["fails_on_windows"], + deps = [ + ":http_integration_lib", + "//source/common/network:socket_interface_lib", + "//source/extensions/filters/network/echo:config", + ], +) + envoy_cc_test( name = "stats_integration_test", srcs = ["stats_integration_test.cc"], # The symbol table cluster memory tests take a while to run specially under tsan. # Shard it to avoid test timeout. shard_count = 2, + tags = ["fails_on_windows"], deps = [ ":integration_lib", "//source/common/memory:stats_lib", @@ -682,6 +787,7 @@ envoy_cc_test( envoy_cc_test( name = "load_stats_integration_test", srcs = ["load_stats_integration_test.cc"], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//test/config:utility_lib", @@ -699,6 +805,8 @@ envoy_cc_test( envoy_cc_test( name = "hds_integration_test", srcs = ["hds_integration_test.cc"], + shard_count = 2, + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", ":integration_lib", @@ -722,6 +830,7 @@ envoy_cc_test( name = "header_prefix_integration_test", srcs = ["header_prefix_integration_test.cc"], coverage = False, + tags = ["fails_on_windows"], deps = [ ":http_protocol_integration_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", @@ -731,6 +840,7 @@ envoy_cc_test( envoy_cc_test( name = "overload_integration_test", srcs = ["overload_integration_test.cc"], + tags = ["fails_on_windows"], deps = [ ":http_protocol_integration_lib", "//source/extensions/resource_monitors/injected_resource:config", @@ -745,6 +855,7 @@ envoy_cc_test( "proxy_proto_integration_test.cc", "proxy_proto_integration_test.h", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/buffer:buffer_lib", @@ -757,30 +868,28 @@ envoy_cc_test( ) envoy_cc_test( - name = "ratelimit_integration_test", - srcs = ["ratelimit_integration_test.cc"], + name = "rtds_integration_test", + srcs = ["rtds_integration_test.cc"], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", - "//source/common/buffer:zero_copy_input_stream_lib", - "//source/common/grpc:codec_lib", - "//source/common/grpc:common_lib", - "//source/extensions/filters/http/ratelimit:config", "//test/common/grpc:grpc_client_integration_lib", - "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", - "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/filters/http/ratelimit/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", - "@envoy_api//envoy/service/ratelimit/v3:pkg_cc_proto", + "@envoy_api//envoy/service/runtime/v3:pkg_cc_proto", ], ) envoy_cc_test( - name = "rtds_integration_test", - srcs = ["rtds_integration_test.cc"], + name = "extension_discovery_integration_test", + srcs = ["extension_discovery_integration_test.cc"], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//test/common/grpc:grpc_client_integration_lib", - "@envoy_api//envoy/service/runtime/v3:pkg_cc_proto", + "//test/integration/filters:set_response_code_filter_config_proto_cc_proto", + "//test/integration/filters:set_response_code_filter_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", + "@envoy_api//envoy/service/extension/v3:pkg_cc_proto", ], ) @@ -798,6 +907,7 @@ envoy_cc_test( data = [ "//test/config/integration/certs", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/event:dispatcher_includes", @@ -822,6 +932,7 @@ envoy_cc_test( data = [ "//test/config/integration/certs", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/config:api_version_lib", @@ -851,6 +962,7 @@ envoy_cc_test( srcs = [ "sds_generic_secret_integration_test.cc", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//include/envoy/registry", @@ -872,6 +984,8 @@ envoy_cc_test( data = [ "//test/config/integration/certs", ], + shard_count = 2, + tags = ["fails_on_windows"], deps = [ ":integration_lib", "//source/common/config:api_version_lib", @@ -903,6 +1017,7 @@ envoy_cc_test( data = [ "//test/config/integration/certs", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", ":http_protocol_integration_lib", @@ -917,6 +1032,7 @@ envoy_cc_test( srcs = [ "tcp_conn_pool_integration_test.cc", ], + tags = ["fails_on_windows"], deps = [ ":integration_lib", "//include/envoy/server:filter_config_interface", @@ -943,6 +1059,7 @@ envoy_cc_test( "uds_integration_test.cc", "uds_integration_test.h", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/event:dispatcher_includes", @@ -967,6 +1084,7 @@ envoy_cc_test( name = "dynamic_validation_integration_test", srcs = ["dynamic_validation_integration_test.cc"], data = ["//test/config/integration:server_xds_files"], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/stats:stats_lib", @@ -978,10 +1096,20 @@ envoy_cc_test( envoy_cc_test( name = "xds_integration_test", srcs = ["xds_integration_test.cc"], - data = ["//test/config/integration:server_xds_files"], + data = [ + "//test/config/integration:server_xds_files", + "//test/config/integration/certs", + ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", ":http_protocol_integration_lib", + "//source/extensions/filters/listener/tls_inspector:config", + "//source/extensions/filters/listener/tls_inspector:tls_inspector_lib", + "//source/extensions/filters/network/tcp_proxy:config", + "//source/extensions/transport_sockets/tls:config", + "//source/extensions/transport_sockets/tls:context_config_lib", + "//source/extensions/transport_sockets/tls:context_lib", "//test/test_common:environment_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", @@ -998,11 +1126,12 @@ envoy_cc_test( data = [ "//test/config/integration/certs", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/http:header_map_lib", "//source/extensions/transport_sockets/tls:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:transport_socket_factory_context_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", @@ -1071,11 +1200,75 @@ envoy_cc_fuzz_test( ], ) +H2_FUZZ_LIB_DEPS = [ + ":h2_capture_fuzz_proto_cc_proto", + ":http_integration_lib", + "//source/common/common:assert_lib", + "//source/common/common:logger_lib", + "//test/common/http/http2:http2_frame", + "//test/fuzz:fuzz_runner_lib", + "//test/fuzz:utility_lib", + "//test/integration:integration_lib", + "//test/test_common:environment_lib", +] + +envoy_cc_test_library( + name = "h2_fuzz_lib", + srcs = ["h2_fuzz.cc"], + hdrs = ["h2_fuzz.h"], + deps = H2_FUZZ_LIB_DEPS, +) + +envoy_cc_test_library( + name = "h2_fuzz_persistent_lib", + srcs = ["h2_fuzz.cc"], + hdrs = ["h2_fuzz.h"], + copts = ["-DPERSISTENT_FUZZER"], + deps = H2_FUZZ_LIB_DEPS, +) + +envoy_cc_fuzz_test( + name = "h2_capture_fuzz_test", + srcs = ["h2_capture_fuzz_test.cc"], + corpus = "h2_corpus", + deps = [":h2_fuzz_lib"], +) + +envoy_cc_fuzz_test( + name = "h2_capture_persistent_fuzz_test", + srcs = ["h2_capture_fuzz_test.cc"], + copts = ["-DPERSISTENT_FUZZER"], + corpus = "h2_corpus", + deps = [":h2_fuzz_persistent_lib"], +) + +envoy_cc_fuzz_test( + name = "h2_capture_direct_response_fuzz_test", + srcs = ["h2_capture_direct_response_fuzz_test.cc"], + corpus = "h2_corpus", + deps = [ + ":h2_fuzz_lib", + "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", + ], +) + +envoy_cc_fuzz_test( + name = "h2_capture_direct_response_persistent_fuzz_test", + srcs = ["h2_capture_direct_response_fuzz_test.cc"], + copts = ["-DPERSISTENT_FUZZER"], + corpus = "h2_corpus", + deps = [ + ":h2_fuzz_persistent_lib", + "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", + ], +) + envoy_cc_test( name = "scoped_rds_integration_test", srcs = [ "scoped_rds_integration_test.cc", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/config:api_version_lib", @@ -1099,6 +1292,7 @@ envoy_cc_test( srcs = [ "listener_lds_integration_test.cc", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/config:api_version_lib", @@ -1125,6 +1319,7 @@ envoy_cc_test( data = [ "//test/config/integration/certs", ], + tags = ["fails_on_windows"], deps = [ ":integration_lib", "//source/common/config:api_version_lib", @@ -1147,3 +1342,33 @@ envoy_cc_test( "@envoy_api//envoy/extensions/access_loggers/file/v3:pkg_cc_proto", ], ) + +envoy_cc_test( + name = "cx_limit_integration_test", + srcs = ["cx_limit_integration_test.cc"], + # TODO(11841) See if this can be reenabled once the test is deflaked. + tags = ["fails_on_windows"], + deps = [ + ":http_integration_lib", + "//include/envoy/network:filter_interface", + "//include/envoy/registry", + "//source/extensions/filters/network/tcp_proxy:config", + "//test/config:utility_lib", + "//test/test_common:logging_lib", + "//test/test_common:simulated_time_system_lib", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", + ], +) + +envoy_cc_test( + name = "local_reply_integration_test", + srcs = [ + "local_reply_integration_test.cc", + ], + tags = ["fails_on_windows"], + deps = [ + ":http_integration_lib", + ":http_protocol_integration_lib", + "//test/test_common:utility_lib", + ], +) diff --git a/test/integration/README.md b/test/integration/README.md index ef24fa0e439ef..d3cffc3d5412c 100644 --- a/test/integration/README.md +++ b/test/integration/README.md @@ -160,7 +160,7 @@ The full command might look something like ``` bazel test //test/integration:http2_upstream_integration_test \ --test_arg=--gtest_filter="IpVersions/Http2UpstreamIntegrationTest.RouterRequestAndResponseWithBodyNoBuffer/IPv6" \ ---jobs 60 --local_resources 100000000000,100000000000,10000000 --runs_per_test=1000 --test_arg="-l trace" +--jobs 60 --local_test_jobs=60 --runs_per_test=1000 --test_arg="-l trace" ``` ## Debugging test flakes diff --git a/test/integration/ads_integration.cc b/test/integration/ads_integration.cc index 6c91a4df608cc..7d81b1de0a1b4 100644 --- a/test/integration/ads_integration.cc +++ b/test/integration/ads_integration.cc @@ -20,124 +20,76 @@ using testing::AssertionResult; namespace Envoy { -AdsIntegrationTest::AdsIntegrationTest() +AdsIntegrationTest::AdsIntegrationTest(const envoy::config::core::v3::ApiVersion api_version) : HttpIntegrationTest( Http::CodecClient::Type::HTTP2, ipVersion(), - AdsIntegrationConfig(sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC")) { + ConfigHelper::adsBootstrap( + sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC", api_version)) { use_lds_ = false; create_xds_upstream_ = true; tls_xds_upstream_ = true; sotw_or_delta_ = sotwOrDelta(); + api_version_ = api_version; } -void AdsIntegrationTest::TearDown() { - cleanUpXdsConnection(); - test_server_.reset(); - fake_upstreams_.clear(); -} +void AdsIntegrationTest::TearDown() { cleanUpXdsConnection(); } envoy::config::cluster::v3::Cluster AdsIntegrationTest::buildCluster(const std::string& name) { - return TestUtility::parseYaml(fmt::format(R"EOF( - name: {} - connect_timeout: 5s - type: EDS - eds_cluster_config: {{ eds_config: {{ ads: {{}} }} }} - lb_policy: ROUND_ROBIN - http2_protocol_options: {{}} - )EOF", - name)); + return ConfigHelper::buildCluster(name, "ROUND_ROBIN", api_version_); +} + +envoy::config::cluster::v3::Cluster AdsIntegrationTest::buildTlsCluster(const std::string& name) { + return ConfigHelper::buildTlsCluster(name, "ROUND_ROBIN", api_version_); } envoy::config::cluster::v3::Cluster AdsIntegrationTest::buildRedisCluster(const std::string& name) { - return TestUtility::parseYaml(fmt::format(R"EOF( - name: {} - connect_timeout: 5s - type: EDS - eds_cluster_config: {{ eds_config: {{ ads: {{}} }} }} - lb_policy: MAGLEV - )EOF", - name)); + return ConfigHelper::buildCluster(name, "MAGLEV", api_version_); } envoy::config::endpoint::v3::ClusterLoadAssignment AdsIntegrationTest::buildClusterLoadAssignment(const std::string& name) { - return TestUtility::parseYaml( - fmt::format(R"EOF( - cluster_name: {} - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: {} - port_value: {} - )EOF", - name, Network::Test::getLoopbackAddressString(ipVersion()), - fake_upstreams_[0]->localAddress()->ip()->port())); + return ConfigHelper::buildClusterLoadAssignment( + name, Network::Test::getLoopbackAddressString(ipVersion()), + fake_upstreams_[0]->localAddress()->ip()->port(), api_version_); +} + +envoy::config::endpoint::v3::ClusterLoadAssignment +AdsIntegrationTest::buildTlsClusterLoadAssignment(const std::string& name) { + return ConfigHelper::buildClusterLoadAssignment( + name, Network::Test::getLoopbackAddressString(ipVersion()), 8443, api_version_); } envoy::config::listener::v3::Listener AdsIntegrationTest::buildListener(const std::string& name, const std::string& route_config, const std::string& stat_prefix) { - return TestUtility::parseYaml(fmt::format( - R"EOF( - name: {} - address: - socket_address: - address: {} - port_value: 0 - filter_chains: - filters: - - name: http - typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager - stat_prefix: {} - codec_type: HTTP2 - rds: - route_config_name: {} - config_source: {{ ads: {{}} }} - http_filters: [{{ name: envoy.filters.http.router }}] - )EOF", - name, Network::Test::getLoopbackAddressString(ipVersion()), stat_prefix, route_config)); + return ConfigHelper::buildListener(name, route_config, + Network::Test::getLoopbackAddressString(ipVersion()), + stat_prefix, api_version_); } envoy::config::listener::v3::Listener AdsIntegrationTest::buildRedisListener(const std::string& name, const std::string& cluster) { - return TestUtility::parseYaml(fmt::format( + std::string redis = fmt::format( R"EOF( - name: {} - address: - socket_address: - address: {} - port_value: 0 - filter_chains: filters: - name: redis typed_config: "@type": type.googleapis.com/envoy.config.filter.network.redis_proxy.v2.RedisProxy - settings: + settings: op_timeout: 1s stat_prefix: {} prefix_routes: - catch_all_route: + catch_all_route: cluster: {} )EOF", - name, Network::Test::getLoopbackAddressString(ipVersion()), name, cluster)); + name, cluster); + return ConfigHelper::buildBaseListener(name, Network::Test::getLoopbackAddressString(ipVersion()), + redis, api_version_); } envoy::config::route::v3::RouteConfiguration AdsIntegrationTest::buildRouteConfig(const std::string& name, const std::string& cluster) { - return TestUtility::parseYaml(fmt::format(R"EOF( - name: {} - virtual_hosts: - - name: integration - domains: ["*"] - routes: - - match: {{ prefix: "/" }} - route: {{ cluster: {} }} - )EOF", - name, - cluster)); + return ConfigHelper::buildRouteConfig(name, cluster, api_version_); } void AdsIntegrationTest::makeSingleRequest() { diff --git a/test/integration/ads_integration.h b/test/integration/ads_integration.h index c9f71fa5234c3..0da99aea566a1 100644 --- a/test/integration/ads_integration.h +++ b/test/integration/ads_integration.h @@ -10,62 +10,30 @@ #include "envoy/config/route/v3/route.pb.h" #include "test/common/grpc/grpc_client_integration.h" +#include "test/config/utility.h" #include "test/integration/http_integration.h" -// TODO(fredlas) set_node_on_first_message_only was true; the delta+SotW unification -// work restores it here. namespace Envoy { -static std::string AdsIntegrationConfig(const std::string& api_type) { - // Note: do not use CONSTRUCT_ON_FIRST_USE here! - return fmt::format(R"EOF( -dynamic_resources: - lds_config: - ads: {{}} - cds_config: - ads: {{}} - ads_config: - api_type: {} - set_node_on_first_message_only: false -static_resources: - clusters: - name: dummy_cluster - connect_timeout: - seconds: 5 - type: STATIC - load_assignment: - cluster_name: dummy_cluster - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 0 - lb_policy: ROUND_ROBIN - http2_protocol_options: {{}} -admin: - access_log_path: /dev/null - address: - socket_address: - address: 127.0.0.1 - port_value: 0 -)EOF", - api_type); -} class AdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public HttpIntegrationTest { public: - AdsIntegrationTest(); + AdsIntegrationTest(const envoy::config::core::v3::ApiVersion api_version); + AdsIntegrationTest() : AdsIntegrationTest(envoy::config::core::v3::ApiVersion::V2) {} void TearDown() override; envoy::config::cluster::v3::Cluster buildCluster(const std::string& name); + envoy::config::cluster::v3::Cluster buildTlsCluster(const std::string& name); + envoy::config::cluster::v3::Cluster buildRedisCluster(const std::string& name); envoy::config::endpoint::v3::ClusterLoadAssignment buildClusterLoadAssignment(const std::string& name); + envoy::config::endpoint::v3::ClusterLoadAssignment + buildTlsClusterLoadAssignment(const std::string& name); + envoy::config::listener::v3::Listener buildListener(const std::string& name, const std::string& route_config, const std::string& stat_prefix = "ads_test"); @@ -86,6 +54,8 @@ class AdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public Ht envoy::admin::v3::ClustersConfigDump getClustersConfigDump(); envoy::admin::v3::ListenersConfigDump getListenersConfigDump(); envoy::admin::v3::RoutesConfigDump getRoutesConfigDump(); + + envoy::config::core::v3::ApiVersion api_version_; }; } // namespace Envoy diff --git a/test/integration/ads_integration_test.cc b/test/integration/ads_integration_test.cc index 9153bce772a05..bf413b9d91d59 100644 --- a/test/integration/ads_integration_test.cc +++ b/test/integration/ads_integration_test.cc @@ -5,11 +5,11 @@ #include "envoy/config/route/v3/route.pb.h" #include "envoy/grpc/status.h" -#include "common/common/version.h" #include "common/config/protobuf_link_hacks.h" #include "common/config/version_converter.h" #include "common/protobuf/protobuf.h" #include "common/protobuf/utility.h" +#include "common/version/version.h" #include "test/common/grpc/grpc_client_integration.h" #include "test/integration/ads_integration.h" @@ -182,6 +182,60 @@ TEST_P(AdsIntegrationTest, RdsAfterLdsWithNoRdsChanges) { makeSingleRequest(); } +// Regression test for #11877, validate behavior of EDS updates when a cluster is updated and +// an active cluster is replaced by a newer cluster undergoing warming. +TEST_P(AdsIntegrationTest, CdsEdsReplacementWarming) { + initialize(); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "", {}, {}, {}, true)); + sendDiscoveryResponse(Config::TypeUrl::get().Cluster, + {buildCluster("cluster_0")}, + {buildCluster("cluster_0")}, {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "", + {"cluster_0"}, {"cluster_0"}, {})); + sendDiscoveryResponse( + Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment("cluster_0")}, + {buildClusterLoadAssignment("cluster_0")}, {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "1", {}, {}, {})); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, "", {}, {}, {})); + sendDiscoveryResponse( + Config::TypeUrl::get().Listener, {buildListener("listener_0", "route_config_0")}, + {buildListener("listener_0", "route_config_0")}, {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "1", + {"cluster_0"}, {}, {})); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, "", + {"route_config_0"}, {"route_config_0"}, {})); + sendDiscoveryResponse( + Config::TypeUrl::get().RouteConfiguration, {buildRouteConfig("route_config_0", "cluster_0")}, + {buildRouteConfig("route_config_0", "cluster_0")}, {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, "1", {}, {}, {})); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, "1", + {"route_config_0"}, {}, {})); + + test_server_->waitForCounterGe("listener_manager.listener_create_success", 1); + makeSingleRequest(); + + sendDiscoveryResponse( + Config::TypeUrl::get().Cluster, {buildTlsCluster("cluster_0")}, + {buildTlsCluster("cluster_0")}, {}, "2"); + // Inconsistent SotW and delta behaviors for warming, see + // https://github.com/envoyproxy/envoy/issues/11477#issuecomment-657855029. + if (sotw_or_delta_ != Grpc::SotwOrDelta::Delta) { + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "1", + {"cluster_0"}, {}, {})); + } + sendDiscoveryResponse( + Config::TypeUrl::get().ClusterLoadAssignment, {buildTlsClusterLoadAssignment("cluster_0")}, + {buildTlsClusterLoadAssignment("cluster_0")}, {}, "2"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "2", {}, {}, {}, true)); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "2", + {"cluster_0"}, {}, {})); +} + // Validate that the request with duplicate clusters in the initial request during server init is // rejected. TEST_P(AdsIntegrationTest, DuplicateInitialClusters) { @@ -321,16 +375,12 @@ TEST_P(AdsIntegrationTest, CdsPausedDuringWarming) { test_server_->waitForCounterGe("listener_manager.listener_create_success", 1); makeSingleRequest(); - EXPECT_FALSE( - test_server_->server().clusterManager().adsMux()->paused(Config::TypeUrl::get().Cluster)); // Send the first warming cluster. sendDiscoveryResponse( Config::TypeUrl::get().Cluster, {buildCluster("warming_cluster_1")}, {buildCluster("warming_cluster_1")}, {"cluster_0"}, "2"); test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 1); - EXPECT_TRUE( - test_server_->server().clusterManager().adsMux()->paused(Config::TypeUrl::get().Cluster)); EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "1", {"warming_cluster_1"}, {"warming_cluster_1"}, {"cluster_0"})); @@ -346,8 +396,6 @@ TEST_P(AdsIntegrationTest, CdsPausedDuringWarming) { {"warming_cluster_2", "warming_cluster_1"}, {"warming_cluster_2"}, {})); - EXPECT_TRUE( - test_server_->server().clusterManager().adsMux()->paused(Config::TypeUrl::get().Cluster)); // Finish warming the clusters. sendDiscoveryResponse( Config::TypeUrl::get().ClusterLoadAssignment, @@ -359,8 +407,6 @@ TEST_P(AdsIntegrationTest, CdsPausedDuringWarming) { // Validate that clusters are warmed. test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); - EXPECT_FALSE( - test_server_->server().clusterManager().adsMux()->paused(Config::TypeUrl::get().Cluster)); // CDS is resumed and EDS response was acknowledged. if (sotw_or_delta_ == Grpc::SotwOrDelta::Delta) { @@ -374,6 +420,68 @@ TEST_P(AdsIntegrationTest, CdsPausedDuringWarming) { {"warming_cluster_2", "warming_cluster_1"}, {}, {})); } +// Validate that warming listeners are removed when left out of SOTW update. +TEST_P(AdsIntegrationTest, RemoveWarmingListener) { + initialize(); + + // Send initial configuration to start workers, validate we can process a request. + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "", {}, {}, {}, true)); + sendDiscoveryResponse(Config::TypeUrl::get().Cluster, + {buildCluster("cluster_0")}, + {buildCluster("cluster_0")}, {}, "1"); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "", + {"cluster_0"}, {"cluster_0"}, {})); + + sendDiscoveryResponse( + Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment("cluster_0")}, + {buildClusterLoadAssignment("cluster_0")}, {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "1", {}, {}, {})); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, "", {}, {}, {})); + sendDiscoveryResponse( + Config::TypeUrl::get().Listener, {buildListener("listener_0", "route_config_0")}, + {buildListener("listener_0", "route_config_0")}, {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "1", + {"cluster_0"}, {}, {})); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, "", + {"route_config_0"}, {"route_config_0"}, {})); + sendDiscoveryResponse( + Config::TypeUrl::get().RouteConfiguration, {buildRouteConfig("route_config_0", "cluster_0")}, + {buildRouteConfig("route_config_0", "cluster_0")}, {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, "1", {}, {}, {})); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, "1", + {"route_config_0"}, {}, {})); + + test_server_->waitForCounterGe("listener_manager.listener_create_success", 1); + makeSingleRequest(); + + // Send a listener without its route, so it will be added as warming. + sendDiscoveryResponse( + Config::TypeUrl::get().Listener, + {buildListener("listener_0", "route_config_0"), + buildListener("warming_listener_1", "nonexistent_route")}, + {buildListener("warming_listener_1", "nonexistent_route")}, {}, "2"); + test_server_->waitForGaugeEq("listener_manager.total_listeners_warming", 1); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, "1", + {"nonexistent_route", "route_config_0"}, + {"nonexistent_route"}, {})); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, "2", {}, {}, {})); + + // Send a request removing the warming listener. + sendDiscoveryResponse( + Config::TypeUrl::get().Listener, {buildListener("listener_0", "route_config_0")}, + {buildListener("listener_0", "route_config_0")}, {"warming_listener_1"}, "3"); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, "1", + {"route_config_0"}, {}, {"nonexistent_route"})); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, "3", {}, {}, {})); + + // The warming listener should be successfully removed. + test_server_->waitForCounterEq("listener_manager.listener_removed", 1); + test_server_->waitForGaugeEq("listener_manager.total_listeners_warming", 0); +} + // Verify cluster warming is finished only on named EDS response. TEST_P(AdsIntegrationTest, ClusterWarmingOnNamedResponse) { initialize(); @@ -594,18 +702,15 @@ class AdsFailIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public: AdsFailIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, ipVersion(), - AdsIntegrationConfig( - sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC")) { + ConfigHelper::adsBootstrap( + sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC", + envoy::config::core::v3::ApiVersion::V2)) { create_xds_upstream_ = true; use_lds_ = false; sotw_or_delta_ = sotwOrDelta(); } - void TearDown() override { - cleanUpXdsConnection(); - test_server_.reset(); - fake_upstreams_.clear(); - } + void TearDown() override { cleanUpXdsConnection(); } void initialize() override { config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { @@ -638,18 +743,15 @@ class AdsConfigIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public: AdsConfigIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, ipVersion(), - AdsIntegrationConfig( - sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC")) { + ConfigHelper::adsBootstrap( + sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC", + envoy::config::core::v3::ApiVersion::V2)) { create_xds_upstream_ = true; use_lds_ = false; sotw_or_delta_ = sotwOrDelta(); } - void TearDown() override { - cleanUpXdsConnection(); - test_server_.reset(); - fake_upstreams_.clear(); - } + void TearDown() override { cleanUpXdsConnection(); } void initialize() override { config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { @@ -803,18 +905,15 @@ class AdsClusterFromFileIntegrationTest : public Grpc::DeltaSotwIntegrationParam public: AdsClusterFromFileIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, ipVersion(), - AdsIntegrationConfig( - sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC")) { + ConfigHelper::adsBootstrap( + sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC", + envoy::config::core::v3::ApiVersion::V2)) { create_xds_upstream_ = true; use_lds_ = false; sotw_or_delta_ = sotwOrDelta(); } - void TearDown() override { - cleanUpXdsConnection(); - test_server_.reset(); - fake_upstreams_.clear(); - } + void TearDown() override { cleanUpXdsConnection(); } void initialize() override { config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { @@ -992,4 +1091,146 @@ TEST_P(AdsIntegrationTestWithRtdsAndSecondaryClusters, Basic) { testBasicFlow(); } +// Check if EDS cluster defined in file is loaded before ADS request and used as xDS server +class AdsClusterV3Test : public AdsIntegrationTest { +public: + AdsClusterV3Test() : AdsIntegrationTest(envoy::config::core::v3::ApiVersion::V3) {} +}; + +INSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDelta, AdsClusterV3Test, + DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS); + +// Verify CDS is paused during cluster warming. +TEST_P(AdsClusterV3Test, CdsPausedDuringWarming) { + initialize(); + + const auto cds_type_url = Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3); + const auto eds_type_url = Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3); + const auto lds_type_url = Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3); + const auto rds_type_url = Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3); + + // Send initial configuration, validate we can process a request. + EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "", {}, {}, {}, true)); + sendDiscoveryResponse( + cds_type_url, {buildCluster("cluster_0")}, {buildCluster("cluster_0")}, {}, "1", false); + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "", {"cluster_0"}, {"cluster_0"}, {})); + + sendDiscoveryResponse( + eds_type_url, {buildClusterLoadAssignment("cluster_0")}, + {buildClusterLoadAssignment("cluster_0")}, {}, "1", false); + + EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "1", {}, {}, {})); + EXPECT_TRUE(compareDiscoveryRequest(lds_type_url, "", {}, {}, {})); + sendDiscoveryResponse( + lds_type_url, {buildListener("listener_0", "route_config_0")}, + {buildListener("listener_0", "route_config_0")}, {}, "1", false); + + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "1", {"cluster_0"}, {}, {})); + EXPECT_TRUE( + compareDiscoveryRequest(rds_type_url, "", {"route_config_0"}, {"route_config_0"}, {})); + sendDiscoveryResponse( + rds_type_url, {buildRouteConfig("route_config_0", "cluster_0")}, + {buildRouteConfig("route_config_0", "cluster_0")}, {}, "1", false); + + EXPECT_TRUE(compareDiscoveryRequest(lds_type_url, "1", {}, {}, {})); + EXPECT_TRUE(compareDiscoveryRequest(rds_type_url, "1", {"route_config_0"}, {}, {})); + + test_server_->waitForCounterGe("listener_manager.listener_create_success", 1); + makeSingleRequest(); + + // Send the first warming cluster. + sendDiscoveryResponse( + cds_type_url, {buildCluster("warming_cluster_1")}, {buildCluster("warming_cluster_1")}, + {"cluster_0"}, "2", false); + + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 1); + + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "1", {"warming_cluster_1"}, + {"warming_cluster_1"}, {"cluster_0"})); + + // Send the second warming cluster. + sendDiscoveryResponse( + cds_type_url, {buildCluster("warming_cluster_2")}, {buildCluster("warming_cluster_2")}, {}, + "3", false); + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 2); + // We would've got a Cluster discovery request with version 2 here, had the CDS not been paused. + + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "1", {"warming_cluster_2", "warming_cluster_1"}, + {"warming_cluster_2"}, {})); + + // Finish warming the clusters. + sendDiscoveryResponse( + eds_type_url, + {buildClusterLoadAssignment("warming_cluster_1"), + buildClusterLoadAssignment("warming_cluster_2")}, + {buildClusterLoadAssignment("warming_cluster_1"), + buildClusterLoadAssignment("warming_cluster_2")}, + {"cluster_0"}, "2", false); + + // Validate that clusters are warmed. + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); + + // CDS is resumed and EDS response was acknowledged. + if (sotw_or_delta_ == Grpc::SotwOrDelta::Delta) { + // Envoy will ACK both Cluster messages. Since they arrived while CDS was paused, they aren't + // sent until CDS is unpaused. Since version 3 has already arrived by the time the version 2 + // ACK goes out, they're both acknowledging version 3. + EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "3", {}, {}, {})); + } + EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "3", {}, {}, {})); + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "2", {"warming_cluster_2", "warming_cluster_1"}, + {}, {})); +} + +// Validates that the initial xDS request batches all resources referred to in static config +TEST_P(AdsClusterV3Test, XdsBatching) { + config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + bootstrap.mutable_dynamic_resources()->clear_cds_config(); + bootstrap.mutable_dynamic_resources()->clear_lds_config(); + + auto static_resources = bootstrap.mutable_static_resources(); + static_resources->add_clusters()->MergeFrom(buildCluster("eds_cluster")); + static_resources->add_clusters()->MergeFrom(buildCluster("eds_cluster2")); + + static_resources->add_listeners()->MergeFrom(buildListener("rds_listener", "route_config")); + static_resources->add_listeners()->MergeFrom(buildListener("rds_listener2", "route_config2")); + }); + + on_server_init_function_ = [this]() { + createXdsConnection(); + ASSERT_TRUE(xds_connection_->waitForNewStream(*dispatcher_, xds_stream_)); + xds_stream_->startGrpcStream(); + + const auto eds_type_url = + Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3); + const auto rds_type_url = Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3); + + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "", {"eds_cluster2", "eds_cluster"}, + {"eds_cluster2", "eds_cluster"}, {}, true)); + sendDiscoveryResponse( + eds_type_url, + {buildClusterLoadAssignment("eds_cluster"), buildClusterLoadAssignment("eds_cluster2")}, + {buildClusterLoadAssignment("eds_cluster"), buildClusterLoadAssignment("eds_cluster2")}, {}, + "1", false); + + EXPECT_TRUE(compareDiscoveryRequest(rds_type_url, "", {"route_config2", "route_config"}, + {"route_config2", "route_config"}, {})); + sendDiscoveryResponse( + rds_type_url, + {buildRouteConfig("route_config2", "eds_cluster2"), + buildRouteConfig("route_config", "dummy_cluster")}, + {buildRouteConfig("route_config2", "eds_cluster2"), + buildRouteConfig("route_config", "dummy_cluster")}, + {}, "1", false); + }; + + initialize(); +} + } // namespace Envoy diff --git a/test/integration/alpn_selection_integration_test.cc b/test/integration/alpn_selection_integration_test.cc new file mode 100644 index 0000000000000..a576f51d3e166 --- /dev/null +++ b/test/integration/alpn_selection_integration_test.cc @@ -0,0 +1,213 @@ +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" +#include "envoy/config/route/v3/route_components.pb.h" +#include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" + +#include "common/http/utility.h" + +#include "extensions/transport_sockets/tls/context_config_impl.h" +#include "extensions/transport_sockets/tls/context_impl.h" +#include "extensions/transport_sockets/tls/ssl_socket.h" + +#include "test/integration/http_integration.h" + +#include "absl/strings/str_replace.h" +#include "gtest/gtest.h" + +namespace Envoy { + +class AlpnSelectionIntegrationTest : public testing::Test, public HttpIntegrationTest { +public: + AlpnSelectionIntegrationTest() + : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, + TestEnvironment::getIpVersionsForTest().front(), + ConfigHelper::httpProxyConfig()) {} + + void initialize() override { + setDownstreamProtocol(Http::CodecClient::Type::HTTP1); + setUpstreamProtocol(use_h2_ ? FakeHttpConnection::Type::HTTP2 + : FakeHttpConnection::Type::HTTP1); + config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* static_resources = bootstrap.mutable_static_resources(); + auto* cluster = static_resources->mutable_clusters(0); + + if (use_h2_) { + cluster->mutable_http2_protocol_options(); + } + const std::string transport_socket_yaml = absl::StrFormat( + R"EOF( +name: tls +typed_config: + "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + common_tls_context: + alpn_protocols: [ %s ] + tls_certificates: + - certificate_chain: { filename: "%s" } + private_key: { filename: "%s" } + )EOF", + absl::StrJoin(configured_alpn_, ","), + TestEnvironment::runfilesPath("test/config/integration/certs/clientcert.pem"), + TestEnvironment::runfilesPath("test/config/integration/certs/clientkey.pem")); + auto* transport_socket = cluster->mutable_transport_socket(); + TestUtility::loadFromYaml(transport_socket_yaml, *transport_socket); + }); + HttpIntegrationTest::initialize(); + } + + Network::TransportSocketFactoryPtr createUpstreamSslContext() { + envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; + const std::string yaml = absl::StrFormat( + R"EOF( +common_tls_context: + alpn_protocols: [%s] + tls_certificates: + - certificate_chain: { filename: "%s" } + private_key: { filename: "%s" } + validation_context: + trusted_ca: { filename: "%s" } +require_client_certificate: true +)EOF", + absl::StrJoin(upstream_alpn_, ","), + TestEnvironment::runfilesPath("test/config/integration/certs/upstreamcert.pem"), + TestEnvironment::runfilesPath("test/config/integration/certs/upstreamkey.pem"), + TestEnvironment::runfilesPath("test/config/integration/certs/cacert.pem")); + TestUtility::loadFromYaml(yaml, tls_context); + auto cfg = std::make_unique( + tls_context, factory_context_); + static Stats::Scope* upstream_stats_store = new Stats::IsolatedStoreImpl(); + return std::make_unique( + std::move(cfg), context_manager_, *upstream_stats_store, std::vector{}); + } + + void createUpstreams() override { + auto endpoint = upstream_address_fn_(0); + fake_upstreams_.emplace_back(new FakeUpstream( + createUpstreamSslContext(), endpoint->ip()->port(), + use_h2_ ? FakeHttpConnection::Type::HTTP2 : FakeHttpConnection::Type::HTTP1, + endpoint->ip()->version(), timeSystem())); + } + + bool use_h2_{}; + std::vector upstream_alpn_; + std::vector configured_alpn_; +}; + +// No upstream ALPN is specified in the protocol, but we successfully negotiate h2 ALPN +// due to the default ALPN set through the HTTP/2 conn pool. +TEST_F(AlpnSelectionIntegrationTest, Http2UpstreamMatchingAlpn) { + use_h2_ = true; + upstream_alpn_.emplace_back(Http::Utility::AlpnNames::get().Http2); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + IntegrationStreamDecoderPtr response = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + + waitForNextUpstreamRequest(); + EXPECT_EQ(Http::Utility::AlpnNames::get().Http2, + fake_upstream_connection_->connection().nextProtocol()); + + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + EXPECT_EQ("200", response->headers().getStatusValue()); +} + +// No upstream ALPN is specified in the protocol and we fail to negotiate h2 ALPN +// since the upstream doesn't list h2 in its ALPN list. Note that the call still goes +// through because ALPN negotiation failure doesn't necessarily fail the call. +// TODO(snowp): We should actually fail the handshake in case of negotiation failure, +// fix that and update these tests. +TEST_F(AlpnSelectionIntegrationTest, Http2UpstreamMismatchingAlpn) { + use_h2_ = true; + upstream_alpn_.emplace_back(Http::Utility::AlpnNames::get().Http11); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + IntegrationStreamDecoderPtr response = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + + waitForNextUpstreamRequest(); + // No ALPN negotiated. + EXPECT_EQ("", fake_upstream_connection_->connection().nextProtocol()); + + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + EXPECT_EQ("200", response->headers().getStatusValue()); +} + +// The upstream supports h2,custom-alpn, and we configure the upstream TLS context to negotiate +// custom-alpn. No attempt to negotiate h2 should happen, so we should select custom-alpn. +TEST_F(AlpnSelectionIntegrationTest, Http2UpstreamConfiguredALPN) { + use_h2_ = true; + upstream_alpn_.emplace_back(Http::Utility::AlpnNames::get().Http2); + upstream_alpn_.emplace_back("custom-alpn"); + configured_alpn_.emplace_back("custom-alpn"); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + IntegrationStreamDecoderPtr response = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + + waitForNextUpstreamRequest(); + EXPECT_EQ("custom-alpn", fake_upstream_connection_->connection().nextProtocol()); + + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + EXPECT_EQ("200", response->headers().getStatusValue()); +} + +// No upstream ALPN is specified in the protocol, but we successfully negotiate http/1.1 ALPN +// due to the default ALPN set through the HTTP/1.1 conn pool. +TEST_F(AlpnSelectionIntegrationTest, Http11UpstreaMatchingAlpn) { + upstream_alpn_.emplace_back(Http::Utility::AlpnNames::get().Http11); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + IntegrationStreamDecoderPtr response = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + + waitForNextUpstreamRequest(); + EXPECT_EQ(Http::Utility::AlpnNames::get().Http11, + fake_upstream_connection_->connection().nextProtocol()); + + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + EXPECT_EQ("200", response->headers().getStatusValue()); +} + +// The upstream only lists h2 but we attempt to negotiate http/1.1 due to the default ALPN set by +// the conn pool. This results in no protocol being negotiated. Note that the call still goes +// through because ALPN negotiation failure doesn't necessarily fail the call. +TEST_F(AlpnSelectionIntegrationTest, Http11UpstreaMismatchingAlpn) { + upstream_alpn_.emplace_back(Http::Utility::AlpnNames::get().Http2); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + IntegrationStreamDecoderPtr response = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + + waitForNextUpstreamRequest(); + // No ALPN selected. + EXPECT_EQ("", fake_upstream_connection_->connection().nextProtocol()); + + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + EXPECT_EQ("200", response->headers().getStatusValue()); +} + +// The upstream supports http/1.1,custom-alpn, and we configure the upstream TLS context to +// negotiate custom-alpn. No attempt to negotiate http/1.1 should happen, so we should select +// custom-alpn. +// TODO(snowp): We should actually fail the handshake in case of negotiation failure, +// fix that and update these tests. +TEST_F(AlpnSelectionIntegrationTest, Http11UpstreamConfiguredALPN) { + upstream_alpn_.emplace_back(Http::Utility::AlpnNames::get().Http11); + upstream_alpn_.emplace_back("custom-alpn"); + configured_alpn_.emplace_back("custom-alpn"); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + IntegrationStreamDecoderPtr response = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + + waitForNextUpstreamRequest(); + EXPECT_EQ("custom-alpn", fake_upstream_connection_->connection().nextProtocol()); + + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + EXPECT_EQ("200", response->headers().getStatusValue()); +} +} // namespace Envoy diff --git a/test/integration/api_listener_integration_test.cc b/test/integration/api_listener_integration_test.cc index f00a7bd1fe0da..e4a206a41a15c 100644 --- a/test/integration/api_listener_integration_test.cc +++ b/test/integration/api_listener_integration_test.cc @@ -19,6 +19,7 @@ class ApiListenerIntegrationTest : public BaseIntegrationTest, ApiListenerIntegrationTest() : BaseIntegrationTest(GetParam(), bootstrapConfig()) { use_lds_ = false; autonomous_upstream_ = true; + defer_listener_finalization_ = true; } void SetUp() override { @@ -27,8 +28,8 @@ class ApiListenerIntegrationTest : public BaseIntegrationTest, // https://github.com/envoyproxy/envoy/blob/0b92c58d08d28ba7ef0ed5aaf44f90f0fccc5dce/test/integration/integration.cc#L454 // Thus, the ApiListener has to be added in addition to the already existing listener in the // config. - bootstrap.mutable_static_resources()->add_listeners()->MergeFrom( - Server::parseListenerFromV2Yaml(apiListenerConfig())); + bootstrap.mutable_static_resources()->mutable_listeners(0)->MergeFrom( + Server::parseListenerFromV3Yaml(apiListenerConfig())); }); } @@ -97,7 +98,7 @@ TEST_P(ApiListenerIntegrationTest, Basic) { // The AutonomousUpstream responds with 200 OK and a body of 10 bytes. // In the http1 codec the end stream is encoded with encodeData and 0 bytes. - Http::TestHeaderMapImpl expected_response_headers{{":status", "200"}}; + Http::TestResponseHeaderMapImpl expected_response_headers{{":status", "200"}}; EXPECT_CALL(stream_encoder_, encodeHeaders(_, false)); EXPECT_CALL(stream_encoder_, encodeData(_, false)); EXPECT_CALL(stream_encoder_, encodeData(BufferStringEqual(""), true)).WillOnce(Notify(&done)); diff --git a/test/integration/api_version_integration_test.cc b/test/integration/api_version_integration_test.cc index d7681cfd4e98f..952c095a820e6 100644 --- a/test/integration/api_version_integration_test.cc +++ b/test/integration/api_version_integration_test.cc @@ -75,7 +75,7 @@ class ApiVersionIntegrationTest : public testing::TestWithParam, RELEASE_ASSERT(result, result.message()); result = xds_stream_->waitForHeadersComplete(); RELEASE_ASSERT(result, result.message()); - endpoint_ = std::string(xds_stream_->headers().Path()->value().getStringView()); + endpoint_ = std::string(xds_stream_->headers().getPathValue()); ENVOY_LOG_MISC(debug, "xDS endpoint {}", endpoint_); } } @@ -218,8 +218,6 @@ class ApiVersionIntegrationTest : public testing::TestWithParam, if (xds_stream_ != nullptr) { cleanUpXdsConnection(); } - test_server_.reset(); - fake_upstreams_.clear(); } std::string endpoint_; @@ -318,9 +316,11 @@ TEST_P(ApiVersionIntegrationTest, Eds) { TEST_P(ApiVersionIntegrationTest, Rtds) { config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { - auto* admin_layer = bootstrap.mutable_layered_runtime()->add_layers(); - admin_layer->set_name("admin layer"); - admin_layer->mutable_admin_layer(); + if (bootstrap.mutable_layered_runtime()->layers_size() == 0) { + auto* admin_layer = bootstrap.mutable_layered_runtime()->add_layers(); + admin_layer->set_name("admin layer"); + admin_layer->mutable_admin_layer(); + } auto* rtds_layer = bootstrap.mutable_layered_runtime()->add_layers(); rtds_layer->set_name("rtds_layer"); setupConfigSource(*rtds_layer->mutable_rtds_layer()->mutable_rtds_config()); diff --git a/test/integration/autonomous_upstream.cc b/test/integration/autonomous_upstream.cc index 70c14b75de7d4..45a467dcf7e76 100644 --- a/test/integration/autonomous_upstream.cc +++ b/test/integration/autonomous_upstream.cc @@ -3,7 +3,8 @@ namespace Envoy { namespace { -void HeaderToInt(const char header_name[], int32_t& return_int, Http::TestHeaderMapImpl& headers) { +void HeaderToInt(const char header_name[], int32_t& return_int, + Http::TestResponseHeaderMapImpl& headers) { const std::string header_value(headers.get_(header_name)); if (!header_value.empty()) { uint64_t parsed_value; @@ -41,7 +42,7 @@ void AutonomousStream::setEndStream(bool end_stream) { // Check all the special headers and send a customized response based on them. void AutonomousStream::sendResponse() { - Http::TestHeaderMapImpl headers(*headers_); + Http::TestResponseHeaderMapImpl headers(*headers_); upstream_.setLastRequestHeaders(*headers_); int32_t request_body_length = -1; @@ -59,13 +60,14 @@ void AutonomousStream::sendResponse() { HeaderToInt(RESPONSE_SIZE_BYTES, response_body_length, headers); encodeHeaders(upstream_.responseHeaders(), false); - encodeData(response_body_length, true); + encodeData(response_body_length, false); + encodeTrailers(upstream_.responseTrailers()); } -AutonomousHttpConnection::AutonomousHttpConnection(SharedConnectionWrapper& shared_connection, - Stats::Store& store, Type type, - AutonomousUpstream& upstream) - : FakeHttpConnection(shared_connection, store, type, upstream.timeSystem(), +AutonomousHttpConnection::AutonomousHttpConnection(AutonomousUpstream& autonomous_upstream, + SharedConnectionWrapper& shared_connection, + Type type, AutonomousUpstream& upstream) + : FakeHttpConnection(autonomous_upstream, shared_connection, type, upstream.timeSystem(), Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW), upstream_(upstream) {} @@ -88,7 +90,7 @@ bool AutonomousUpstream::createNetworkFilterChain(Network::Connection& connectio const std::vector&) { shared_connections_.emplace_back(new SharedConnectionWrapper(connection, true)); AutonomousHttpConnectionPtr http_connection( - new AutonomousHttpConnection(*shared_connections_.back(), stats_store_, http_type_, *this)); + new AutonomousHttpConnection(*this, *shared_connections_.back(), http_type_, *this)); testing::AssertionResult result = http_connection->initialize(); RELEASE_ASSERT(result, result.message()); http_connections_.push_back(std::move(http_connection)); @@ -110,15 +112,27 @@ std::unique_ptr AutonomousUpstream::lastRequestH return std::move(last_request_headers_); } +void AutonomousUpstream::setResponseTrailers( + std::unique_ptr&& response_trailers) { + Thread::LockGuard lock(headers_lock_); + response_trailers_ = std::move(response_trailers); +} + void AutonomousUpstream::setResponseHeaders( std::unique_ptr&& response_headers) { Thread::LockGuard lock(headers_lock_); response_headers_ = std::move(response_headers); } -Http::TestHeaderMapImpl AutonomousUpstream::responseHeaders() { +Http::TestResponseTrailerMapImpl AutonomousUpstream::responseTrailers() { + Thread::LockGuard lock(headers_lock_); + Http::TestResponseTrailerMapImpl return_trailers = *response_trailers_; + return return_trailers; +} + +Http::TestResponseHeaderMapImpl AutonomousUpstream::responseHeaders() { Thread::LockGuard lock(headers_lock_); - Http::TestHeaderMapImpl return_headers = *response_headers_; + Http::TestResponseHeaderMapImpl return_headers = *response_headers_; return return_headers; } diff --git a/test/integration/autonomous_upstream.h b/test/integration/autonomous_upstream.h index 6c51abb002177..e9d247a4ba952 100644 --- a/test/integration/autonomous_upstream.h +++ b/test/integration/autonomous_upstream.h @@ -35,8 +35,9 @@ class AutonomousStream : public FakeStream { // An upstream which creates AutonomousStreams for new incoming streams. class AutonomousHttpConnection : public FakeHttpConnection { public: - AutonomousHttpConnection(SharedConnectionWrapper& shared_connection, Stats::Store& store, - Type type, AutonomousUpstream& upstream); + AutonomousHttpConnection(AutonomousUpstream& autonomous_upstream, + SharedConnectionWrapper& shared_connection, Type type, + AutonomousUpstream& upstream); Http::RequestDecoder& newStream(Http::ResponseEncoder& response_encoder, bool) override; @@ -55,16 +56,18 @@ class AutonomousUpstream : public FakeUpstream { bool allow_incomplete_streams) : FakeUpstream(address, type, time_system), allow_incomplete_streams_(allow_incomplete_streams), + response_trailers_(std::make_unique()), response_headers_(std::make_unique( - Http::TestHeaderMapImpl({{":status", "200"}}))) {} + Http::TestResponseHeaderMapImpl({{":status", "200"}}))) {} AutonomousUpstream(Network::TransportSocketFactoryPtr&& transport_socket_factory, uint32_t port, FakeHttpConnection::Type type, Network::Address::IpVersion version, Event::TestTimeSystem& time_system, bool allow_incomplete_streams) : FakeUpstream(std::move(transport_socket_factory), port, type, version, time_system), allow_incomplete_streams_(allow_incomplete_streams), + response_trailers_(std::make_unique()), response_headers_(std::make_unique( - Http::TestHeaderMapImpl({{":status", "200"}}))) {} + Http::TestResponseHeaderMapImpl({{":status", "200"}}))) {} ~AutonomousUpstream() override; bool @@ -76,13 +79,16 @@ class AutonomousUpstream : public FakeUpstream { void setLastRequestHeaders(const Http::HeaderMap& headers); std::unique_ptr lastRequestHeaders(); + void setResponseTrailers(std::unique_ptr&& response_trailers); void setResponseHeaders(std::unique_ptr&& response_headers); - Http::TestHeaderMapImpl responseHeaders(); + Http::TestResponseTrailerMapImpl responseTrailers(); + Http::TestResponseHeaderMapImpl responseHeaders(); const bool allow_incomplete_streams_{false}; private: Thread::MutexBasicLockable headers_lock_; std::unique_ptr last_request_headers_; + std::unique_ptr response_trailers_; std::unique_ptr response_headers_; std::vector http_connections_; std::vector shared_connections_; diff --git a/test/integration/cds_integration_test.cc b/test/integration/cds_integration_test.cc index 851a6ed9f3a31..93bfc5075d70e 100644 --- a/test/integration/cds_integration_test.cc +++ b/test/integration/cds_integration_test.cc @@ -10,7 +10,6 @@ #include "test/common/grpc/grpc_client_integration.h" #include "test/integration/http_integration.h" #include "test/integration/utility.h" -#include "test/mocks/server/mocks.h" #include "test/test_common/network_utility.h" #include "test/test_common/resources.h" #include "test/test_common/simulated_time_system.h" @@ -42,8 +41,6 @@ class CdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public Ht void TearDown() override { if (!test_skipped_) { cleanUpXdsConnection(); - test_server_.reset(); - fake_upstreams_.clear(); } } @@ -80,10 +77,10 @@ class CdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public Ht fake_upstreams_.emplace_back(new FakeUpstream(0, FakeHttpConnection::Type::HTTP2, version_, timeSystem(), enable_half_close_)); fake_upstreams_[UpstreamIndex2]->set_allow_unexpected_disconnects(false); - cluster1_ = ConfigHelper::buildCluster( + cluster1_ = ConfigHelper::buildStaticCluster( ClusterName1, fake_upstreams_[UpstreamIndex1]->localAddress()->ip()->port(), Network::Test::getLoopbackAddressString(ipVersion())); - cluster2_ = ConfigHelper::buildCluster( + cluster2_ = ConfigHelper::buildStaticCluster( ClusterName2, fake_upstreams_[UpstreamIndex2]->localAddress()->ip()->port(), Network::Test::getLoopbackAddressString(ipVersion())); @@ -147,6 +144,7 @@ INSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDelta, CdsIntegrationTest, TEST_P(CdsIntegrationTest, CdsClusterUpDownUp) { // Calls our initialize(), which includes establishing a listener, route, and cluster. testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, "/cluster1"); + test_server_->waitForCounterGe("cluster_manager.cluster_added", 1); // Tell Envoy that cluster_1 is gone. EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "55", {}, {}, {})); @@ -160,10 +158,10 @@ TEST_P(CdsIntegrationTest, CdsClusterUpDownUp) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "GET", "/cluster1", "", downstream_protocol_, version_, "foo.com"); ASSERT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // Tell Envoy that cluster_1 is back. EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "42", {}, {}, {})); @@ -186,7 +184,7 @@ TEST_P(CdsIntegrationTest, TwoClusters) { testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, "/cluster1"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // Tell Envoy that cluster_2 is here. EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "55", {}, {}, {})); @@ -198,7 +196,7 @@ TEST_P(CdsIntegrationTest, TwoClusters) { // A request for cluster_2 should be fine. testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex2, "/cluster2"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // Tell Envoy that cluster_1 is gone. EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "42", {}, {}, {})); @@ -211,7 +209,7 @@ TEST_P(CdsIntegrationTest, TwoClusters) { // Even with cluster_1 gone, a request for cluster_2 should be fine. testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex2, "/cluster2"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // Tell Envoy that cluster_1 is back. EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "42", {}, {}, {})); @@ -236,7 +234,7 @@ TEST_P(CdsIntegrationTest, VersionsRememberedAfterReconnect) { // Calls our initialize(), which includes establishing a listener, route, and cluster. testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, "/cluster1"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // Close the connection carrying Envoy's xDS gRPC stream... AssertionResult result = xds_connection_->close(); @@ -265,11 +263,11 @@ TEST_P(CdsIntegrationTest, VersionsRememberedAfterReconnect) { // A request for cluster_1 should be fine. testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, "/cluster1"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // A request for cluster_2 should be fine. testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex2, "/cluster2"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } } // namespace diff --git a/test/integration/cluster_filter_integration_test.cc b/test/integration/cluster_filter_integration_test.cc index 61eb7bbc7520b..4162bc9273cff 100644 --- a/test/integration/cluster_filter_integration_test.cc +++ b/test/integration/cluster_filter_integration_test.cc @@ -109,19 +109,19 @@ TEST_P(ClusterFilterIntegrationTest, TestClusterFilter) { ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); std::string observed_data; - tcp_client->write("test"); + ASSERT_TRUE(tcp_client->write("test")); ASSERT_TRUE(fake_upstream_connection->waitForData(11, &observed_data)); EXPECT_EQ("please test", observed_data); observed_data.clear(); - tcp_client->write(" everything"); + ASSERT_TRUE(tcp_client->write(" everything")); ASSERT_TRUE(fake_upstream_connection->waitForData(22, &observed_data)); EXPECT_EQ("please test everything", observed_data); ASSERT_TRUE(fake_upstream_connection->write("yes")); tcp_client->waitForData("surely yes"); - tcp_client->write("", true); + ASSERT_TRUE(tcp_client->write("", true)); ASSERT_TRUE(fake_upstream_connection->waitForHalfClose()); ASSERT_TRUE(fake_upstream_connection->write("", true)); ASSERT_TRUE(fake_upstream_connection->waitForDisconnect(true)); diff --git a/test/integration/clusters/BUILD b/test/integration/clusters/BUILD index f7c33d4be98a2..97b137eb75f70 100644 --- a/test/integration/clusters/BUILD +++ b/test/integration/clusters/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_library", @@ -7,6 +5,8 @@ load( "envoy_proto_library", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_library( diff --git a/test/integration/cx_limit_integration_test.cc b/test/integration/cx_limit_integration_test.cc new file mode 100644 index 0000000000000..126c5c2366646 --- /dev/null +++ b/test/integration/cx_limit_integration_test.cc @@ -0,0 +1,167 @@ +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" +#include "envoy/network/filter.h" +#include "envoy/registry/registry.h" + +#include "common/network/utility.h" + +#include "test/config/utility.h" +#include "test/integration/integration.h" +#include "test/test_common/logging.h" +#include "test/test_common/simulated_time_system.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace { + +class ConnectionLimitIntegrationTest : public testing::TestWithParam, + public Event::TestUsingSimulatedTime, + public BaseIntegrationTest { +public: + ConnectionLimitIntegrationTest() + : BaseIntegrationTest(GetParam(), ConfigHelper::tcpProxyConfig()) {} + + void setEmptyListenerLimit() { + config_helper_.addRuntimeOverride("envoy.resource_limits.listener.listener_0.connection_limit", + ""); + } + + void setListenerLimit(const uint32_t num_conns) { + config_helper_.addRuntimeOverride("envoy.resource_limits.listener.listener_0.connection_limit", + std::to_string(num_conns)); + } + + void setGlobalLimit(std::string&& num_conns) { + config_helper_.addRuntimeOverride("overload.global_downstream_max_connections", num_conns); + } + + void initialize() override { BaseIntegrationTest::initialize(); } + + AssertionResult waitForConnections(uint32_t envoy_downstream_connections) { + // The multiplier of 2 is because both Envoy's downstream connections and + // the test server's downstream connections are counted by the global + // counter. + uint32_t expected_connections = envoy_downstream_connections * 2; + + for (int i = 0; i < 10; ++i) { + if (Network::AcceptedSocketImpl::acceptedSocketCount() == expected_connections) { + return AssertionSuccess(); + } + timeSystem().advanceTimeWait(std::chrono::milliseconds(500)); + } + if (Network::AcceptedSocketImpl::acceptedSocketCount() == expected_connections) { + return AssertionSuccess(); + } + return AssertionFailure(); + } + + // Assumes a limit of 2 connections. + void doTest(std::function init_func, std::string&& check_stat) { + init_func(); + + std::vector tcp_clients; + std::vector raw_conns; + tcp_clients.emplace_back(makeTcpConnection(lookupPort("listener_0"))); + raw_conns.emplace_back(); + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(raw_conns.back())); + ASSERT_TRUE(tcp_clients.back()->connected()); + + tcp_clients.emplace_back(makeTcpConnection(lookupPort("listener_0"))); + raw_conns.emplace_back(); + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(raw_conns.back())); + ASSERT_TRUE(tcp_clients.back()->connected()); + + tcp_clients.emplace_back(makeTcpConnection(lookupPort("listener_0"))); + raw_conns.emplace_back(); + ASSERT_FALSE( + fake_upstreams_[0]->waitForRawConnection(raw_conns.back(), std::chrono::milliseconds(500))); + tcp_clients.back()->waitForDisconnect(); + + // Get rid of the client that failed to connect. + tcp_clients.back()->close(); + tcp_clients.pop_back(); + + // Close the first connection that was successful so that we can open a new successful + // connection. + tcp_clients.front()->close(); + ASSERT_TRUE(raw_conns.front()->waitForDisconnect()); + + // Make sure to not try to connect again until the acceptedSocketCount is updated. + ASSERT_TRUE(waitForConnections(1)); + tcp_clients.emplace_back(makeTcpConnection(lookupPort("listener_0"))); + raw_conns.emplace_back(); + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(raw_conns.back())); + ASSERT_TRUE(tcp_clients.back()->connected()); + + const bool isV4 = (version_ == Network::Address::IpVersion::v4); + auto local_address = isV4 ? Network::Utility::getCanonicalIpv4LoopbackAddress() + : Network::Utility::getIpv6LoopbackAddress(); + + const std::string counter_prefix = (isV4 ? "listener.127.0.0.1_0." : "listener.[__1]_0."); + + test_server_->waitForCounterEq(counter_prefix + check_stat, 1); + + for (auto& tcp_client : tcp_clients) { + tcp_client->close(); + } + + tcp_clients.clear(); + raw_conns.clear(); + } +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, ConnectionLimitIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +TEST_P(ConnectionLimitIntegrationTest, TestListenerLimit) { + std::function init_func = [this]() { + setListenerLimit(2); + initialize(); + }; + + doTest(init_func, "downstream_cx_overflow"); +} + +TEST_P(ConnectionLimitIntegrationTest, TestEmptyGlobalCxRuntimeLimit) { + const std::string log_line = "no configured limit to the number of allowed active connections."; + EXPECT_LOG_CONTAINS("warn", log_line, { initialize(); }); +} + +TEST_P(ConnectionLimitIntegrationTest, TestEmptyListenerRuntimeLimit) { + const std::string log_line = + "Listener connection limit runtime key " + "envoy.resource_limits.listener.listener_0.connection_limit is empty. There are currently " + "no limitations on the number of accepted connections for listener listener_0."; + EXPECT_LOG_CONTAINS("warn", log_line, { + setEmptyListenerLimit(); + initialize(); + }); +} + +TEST_P(ConnectionLimitIntegrationTest, TestGlobalLimit) { + std::function init_func = [this]() { + // Includes twice the number of connections expected because the tracking is performed via a + // static variable and the fake upstream has a listener. This causes upstream connections to the + // fake upstream to also be tracked as part of the global downstream connection tracking. + setGlobalLimit("4"); + initialize(); + }; + + doTest(init_func, "downstream_global_cx_overflow"); +} + +TEST_P(ConnectionLimitIntegrationTest, TestBothLimits) { + std::function init_func = [this]() { + // Setting the listener limit to a much higher value and making sure the right stat gets + // incremented when both limits are set. + setGlobalLimit("4"); + setListenerLimit(100); + initialize(); + }; + + doTest(init_func, "downstream_global_cx_overflow"); +} + +} // namespace +} // namespace Envoy diff --git a/test/integration/drain_close_integration_test.cc b/test/integration/drain_close_integration_test.cc new file mode 100644 index 0000000000000..51d6e95de1a28 --- /dev/null +++ b/test/integration/drain_close_integration_test.cc @@ -0,0 +1,179 @@ +#include "test/integration/http_protocol_integration.h" + +namespace Envoy { +namespace { + +using DrainCloseIntegrationTest = HttpProtocolIntegrationTest; + +// Add a health check filter and verify correct behavior when draining. +TEST_P(DrainCloseIntegrationTest, DrainCloseGradual) { + // The probability of drain close increases over time. With a high timeout, + // the probability will be very low, but the rapid retries prevent this from + // increasing total test time. + drain_time_ = std::chrono::seconds(100); + config_helper_.addFilter(ConfigHelper::defaultHealthCheckFilter()); + initialize(); + + absl::Notification drain_sequence_started; + test_server_->server().dispatcher().post([this, &drain_sequence_started]() { + test_server_->drainManager().startDrainSequence([] {}); + drain_sequence_started.Notify(); + }); + drain_sequence_started.WaitForNotification(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + EXPECT_FALSE(codec_client_->disconnected()); + + IntegrationStreamDecoderPtr response; + while (!test_server_->counter("http.config_test.downstream_cx_drain_close")->value()) { + response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + response->waitForEndStream(); + } + EXPECT_EQ(test_server_->counter("http.config_test.downstream_cx_drain_close")->value(), 1L); + + ASSERT_TRUE(codec_client_->waitForDisconnect()); + EXPECT_TRUE(response->complete()); + + EXPECT_EQ("200", response->headers().getStatusValue()); + if (downstream_protocol_ == Http::CodecClient::Type::HTTP2) { + EXPECT_TRUE(codec_client_->sawGoAway()); + } else { + EXPECT_EQ("close", response->headers().getConnectionValue()); + } +} + +TEST_P(DrainCloseIntegrationTest, DrainCloseImmediate) { + drain_strategy_ = Server::DrainStrategy::Immediate; + drain_time_ = std::chrono::seconds(100); + config_helper_.addFilter(ConfigHelper::defaultHealthCheckFilter()); + initialize(); + + absl::Notification drain_sequence_started; + test_server_->server().dispatcher().post([this, &drain_sequence_started]() { + test_server_->drainManager().startDrainSequence([] {}); + drain_sequence_started.Notify(); + }); + drain_sequence_started.WaitForNotification(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + EXPECT_FALSE(codec_client_->disconnected()); + + IntegrationStreamDecoderPtr response; + response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + response->waitForEndStream(); + + ASSERT_TRUE(codec_client_->waitForDisconnect()); + EXPECT_TRUE(response->complete()); + + EXPECT_EQ("200", response->headers().getStatusValue()); + if (downstream_protocol_ == Http::CodecClient::Type::HTTP2) { + EXPECT_TRUE(codec_client_->sawGoAway()); + } else { + EXPECT_EQ("close", response->headers().getConnectionValue()); + } +} + +TEST_P(DrainCloseIntegrationTest, AdminDrain) { testAdminDrain(downstreamProtocol()); } + +TEST_P(DrainCloseIntegrationTest, AdminGracefulDrain) { + drain_strategy_ = Server::DrainStrategy::Immediate; + drain_time_ = std::chrono::seconds(999); + initialize(); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + uint32_t http_port = lookupPort("http"); + codec_client_ = makeHttpConnection(http_port); + + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + waitForNextUpstreamRequest(0); + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_THAT(response->headers(), Http::HttpStatusIs("200")); + // The request is completed but the connection remains open. + EXPECT_TRUE(codec_client_->connected()); + + // Invoke /drain_listeners with graceful drain + BufferingStreamDecoderPtr admin_response = IntegrationUtil::makeSingleRequest( + lookupPort("admin"), "POST", "/drain_listeners?graceful", "", downstreamProtocol(), version_); + EXPECT_EQ(admin_response->headers().Status()->value().getStringView(), "200"); + + // With a 999s graceful drain period, the listener should still be open. + EXPECT_EQ(test_server_->counter("listener_manager.listener_stopped")->value(), 0); + + response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + waitForNextUpstreamRequest(0); + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_THAT(response->headers(), Http::HttpStatusIs("200")); + + // Connections will terminate on request complete + ASSERT_TRUE(codec_client_->waitForDisconnect()); + if (downstream_protocol_ == Http::CodecClient::Type::HTTP2) { + EXPECT_TRUE(codec_client_->sawGoAway()); + } else { + EXPECT_EQ("close", response->headers().getConnectionValue()); + } + + // New connections can still be made. + auto second_codec_client_ = makeRawHttpConnection(makeClientConnection(http_port), absl::nullopt); + EXPECT_TRUE(second_codec_client_->connected()); + + // Invoke /drain_listeners and shut down listeners. + second_codec_client_->rawConnection().close(Network::ConnectionCloseType::NoFlush); + admin_response = IntegrationUtil::makeSingleRequest( + lookupPort("admin"), "POST", "/drain_listeners", "", downstreamProtocol(), version_); + EXPECT_EQ(admin_response->headers().Status()->value().getStringView(), "200"); + + test_server_->waitForCounterEq("listener_manager.listener_stopped", 1); + ASSERT_TRUE(waitForPortAvailable(http_port)); +} + +TEST_P(DrainCloseIntegrationTest, RepeatedAdminGracefulDrain) { + // Use the default gradual probabilistic DrainStrategy so drainClose() + // behaviour isn't conflated with whether the drain sequence has started. + drain_time_ = std::chrono::seconds(999); + initialize(); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + uint32_t http_port = lookupPort("http"); + codec_client_ = makeHttpConnection(http_port); + + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + waitForNextUpstreamRequest(0); + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + + // Invoke /drain_listeners with graceful drain + BufferingStreamDecoderPtr admin_response = IntegrationUtil::makeSingleRequest( + lookupPort("admin"), "POST", "/drain_listeners?graceful", "", downstreamProtocol(), version_); + EXPECT_EQ(admin_response->headers().Status()->value().getStringView(), "200"); + EXPECT_EQ(test_server_->counter("listener_manager.listener_stopped")->value(), 0); + + admin_response = IntegrationUtil::makeSingleRequest( + lookupPort("admin"), "POST", "/drain_listeners?graceful", "", downstreamProtocol(), version_); + EXPECT_EQ(admin_response->headers().Status()->value().getStringView(), "200"); + EXPECT_EQ(admin_response->headers().Status()->value().getStringView(), "200"); + + response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + waitForNextUpstreamRequest(0); + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_THAT(response->headers(), Http::HttpStatusIs("200")); + + admin_response = IntegrationUtil::makeSingleRequest( + lookupPort("admin"), "POST", "/drain_listeners", "", downstreamProtocol(), version_); + EXPECT_EQ(admin_response->headers().Status()->value().getStringView(), "200"); + + test_server_->waitForCounterEq("listener_manager.listener_stopped", 1); + ASSERT_TRUE(waitForPortAvailable(http_port)); +} + +INSTANTIATE_TEST_SUITE_P(Protocols, DrainCloseIntegrationTest, + testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams( + {Http::CodecClient::Type::HTTP1, Http::CodecClient::Type::HTTP2}, + {FakeHttpConnection::Type::HTTP1})), + HttpProtocolIntegrationTest::protocolTestParamsToString); + +} // namespace +} // namespace Envoy diff --git a/test/integration/dynamic_validation_integration_test.cc b/test/integration/dynamic_validation_integration_test.cc index 3363b5341f4f4..aab7833a53724 100644 --- a/test/integration/dynamic_validation_integration_test.cc +++ b/test/integration/dynamic_validation_integration_test.cc @@ -38,41 +38,47 @@ class TestDynamicValidationNetworkFilterConfigFactory return Network::FilterFactoryCb(); } - Upstream::ProtocolOptionsConfigConstSharedPtr createProtocolOptionsTyped( - const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy&) override { + Upstream::ProtocolOptionsConfigConstSharedPtr + createProtocolOptionsTyped(const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy&, + Server::Configuration::ProtocolOptionsFactoryContext&) override { return nullptr; } }; // Pretty-printing of parameterized test names. std::string dynamicValidationTestParamsToString( - const ::testing::TestParamInfo>& params) { + const ::testing::TestParamInfo>& params) { return fmt::format( - "{}_{}", + "{}_{}_{}", TestUtility::ipTestParamsToString( ::testing::TestParamInfo(std::get<0>(params.param), 0)), - std::get<1>(params.param) ? "with_reject_unknown_fields" : "without_reject_unknown_fields"); + std::get<1>(params.param) ? "with_reject_unknown_fields" : "without_reject_unknown_fields", + std::get<2>(params.param) ? "with_ignore_unknown_fields" : "without_ignore_unknown_fields"); } // Validate unknown field handling in dynamic configuration. class DynamicValidationIntegrationTest - : public testing::TestWithParam>, + : public testing::TestWithParam>, public HttpIntegrationTest { public: DynamicValidationIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, std::get<0>(GetParam())), - reject_unknown_dynamic_fields_(std::get<1>(GetParam())) { + reject_unknown_dynamic_fields_(std::get<1>(GetParam())), + ignore_unknown_dynamic_fields_(std::get<2>(GetParam())) { setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); } void createEnvoy() override { registerPort("upstream_0", fake_upstreams_.back()->localAddress()->ip()->port()); - createApiTestServer(api_filesystem_config_, {"http"}, reject_unknown_dynamic_fields_, - reject_unknown_dynamic_fields_, allow_lds_rejection_); + createApiTestServer(api_filesystem_config_, {"http"}, + {reject_unknown_dynamic_fields_, reject_unknown_dynamic_fields_, + ignore_unknown_dynamic_fields_}, + allow_lds_rejection_); } ApiFilesystemConfig api_filesystem_config_; const bool reject_unknown_dynamic_fields_; + const bool ignore_unknown_dynamic_fields_; bool allow_lds_rejection_{}; private: @@ -83,7 +89,8 @@ class DynamicValidationIntegrationTest INSTANTIATE_TEST_SUITE_P( IpVersions, DynamicValidationIntegrationTest, - testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), testing::Bool()), + testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), testing::Bool(), + testing::Bool()), dynamicValidationTestParamsToString); // Protocol options in CDS with unknown fields are rejected if and only if strict. @@ -103,7 +110,11 @@ TEST_P(DynamicValidationIntegrationTest, CdsProtocolOptionsRejected) { EXPECT_EQ(0, test_server_->counter("server.dynamic_unknown_fields")->value()); } else { EXPECT_EQ(1, test_server_->counter("cluster_manager.cds.update_success")->value()); - EXPECT_EQ(1, test_server_->counter("server.dynamic_unknown_fields")->value()); + if (ignore_unknown_dynamic_fields_) { + EXPECT_EQ(0, test_server_->counter("server.dynamic_unknown_fields")->value()); + } else { + EXPECT_EQ(1, test_server_->counter("server.dynamic_unknown_fields")->value()); + } } } @@ -127,7 +138,11 @@ TEST_P(DynamicValidationIntegrationTest, LdsFilterRejected) { } else { EXPECT_EQ(1, test_server_->counter("listener_manager.lds.update_success")->value()); EXPECT_EQ(1, test_server_->counter("http.router.rds.route_config_0.update_success")->value()); - EXPECT_EQ(1, test_server_->counter("server.dynamic_unknown_fields")->value()); + if (ignore_unknown_dynamic_fields_) { + EXPECT_EQ(0, test_server_->counter("server.dynamic_unknown_fields")->value()); + } else { + EXPECT_EQ(1, test_server_->counter("server.dynamic_unknown_fields")->value()); + } } EXPECT_EQ(1, test_server_->counter("cluster_manager.cds.update_success")->value()); EXPECT_EQ(1, test_server_->counter("cluster.cluster_1.update_success")->value()); @@ -154,7 +169,11 @@ TEST_P(DynamicValidationIntegrationTest, LdsFilterRejectedTypedStruct) { } else { EXPECT_EQ(1, test_server_->counter("listener_manager.lds.update_success")->value()); EXPECT_EQ(1, test_server_->counter("http.router.rds.route_config_0.update_success")->value()); - EXPECT_EQ(1, test_server_->counter("server.dynamic_unknown_fields")->value()); + if (ignore_unknown_dynamic_fields_) { + EXPECT_EQ(0, test_server_->counter("server.dynamic_unknown_fields")->value()); + } else { + EXPECT_EQ(1, test_server_->counter("server.dynamic_unknown_fields")->value()); + } } EXPECT_EQ(1, test_server_->counter("cluster_manager.cds.update_success")->value()); EXPECT_EQ(1, test_server_->counter("cluster.cluster_1.update_success")->value()); @@ -178,7 +197,11 @@ TEST_P(DynamicValidationIntegrationTest, RdsFailedBySubscription) { EXPECT_EQ(0, test_server_->counter("server.dynamic_unknown_fields")->value()); } else { EXPECT_EQ(1, test_server_->counter("http.router.rds.route_config_0.update_success")->value()); - EXPECT_EQ(1, test_server_->counter("server.dynamic_unknown_fields")->value()); + if (ignore_unknown_dynamic_fields_) { + EXPECT_EQ(0, test_server_->counter("server.dynamic_unknown_fields")->value()); + } else { + EXPECT_EQ(1, test_server_->counter("server.dynamic_unknown_fields")->value()); + } } EXPECT_EQ(1, test_server_->counter("cluster_manager.cds.update_success")->value()); EXPECT_EQ(1, test_server_->counter("cluster.cluster_1.update_success")->value()); @@ -204,7 +227,11 @@ TEST_P(DynamicValidationIntegrationTest, EdsFailedBySubscription) { EXPECT_EQ(0, test_server_->counter("server.dynamic_unknown_fields")->value()); } else { EXPECT_EQ(1, test_server_->counter("cluster.cluster_1.update_success")->value()); - EXPECT_EQ(1, test_server_->counter("server.dynamic_unknown_fields")->value()); + if (ignore_unknown_dynamic_fields_) { + EXPECT_EQ(0, test_server_->counter("server.dynamic_unknown_fields")->value()); + } else { + EXPECT_EQ(1, test_server_->counter("server.dynamic_unknown_fields")->value()); + } } } diff --git a/test/integration/echo_integration_test.cc b/test/integration/echo_integration_test.cc index 4009e6cacdb03..b965d3254d1e1 100644 --- a/test/integration/echo_integration_test.cc +++ b/test/integration/echo_integration_test.cc @@ -28,18 +28,7 @@ class EchoIntegrationTest : public testing::TestWithParam void { + auto connection = createConnectionDriver( + lookupPort("listener_0"), "hello", + [&response](Network::ClientConnection& conn, const Buffer::Instance& data) -> void { response.append(data.toString()); - connection.close(); - }, - version_); - - connection.run(); + conn.close(Network::ConnectionCloseType::FlushWrite); + }); + connection->run(); EXPECT_EQ("hello", response); } @@ -81,7 +67,7 @@ name: new_listener [&listener_added_by_worker]() -> void { listener_added_by_worker.setReady(); }); test_server_->server().dispatcher().post([this, json, &listener_added_by_manager]() -> void { EXPECT_TRUE(test_server_->server().listenerManager().addOrUpdateListener( - Server::parseListenerFromV2Yaml(json), "", true)); + Server::parseListenerFromV3Yaml(json), "", true)); listener_added_by_manager.setReady(); }); listener_added_by_worker.waitReady(); @@ -97,16 +83,14 @@ name: new_listener ->ip() ->port(); - Buffer::OwnedImpl buffer("hello"); std::string response; - RawConnectionDriver connection( - new_listener_port, buffer, - [&](Network::ClientConnection&, const Buffer::Instance& data) -> void { + auto connection = createConnectionDriver( + lookupPort("listener_0"), "hello", + [&response](Network::ClientConnection& conn, const Buffer::Instance& data) -> void { response.append(data.toString()); - connection.close(); - }, - version_); - connection.run(); + conn.close(Network::ConnectionCloseType::FlushWrite); + }); + connection->run(); EXPECT_EQ("hello", response); // Remove the listener. @@ -128,20 +112,15 @@ name: new_listener // connect would unexpectedly succeed. bool connect_fail = false; for (int i = 0; i < 10; ++i) { - RawConnectionDriver connection2( - new_listener_port, buffer, - [&](Network::ClientConnection&, const Buffer::Instance&) -> void { FAIL(); }, version_); - while (connection2.connecting()) { - // Don't busy loop, but macOS often needs a moment to decide this connection isn't happening. - timeSystem().advanceTimeWait(std::chrono::milliseconds(10)); - - connection2.run(Event::Dispatcher::RunType::NonBlock); - } - if (connection2.connection().state() == Network::Connection::State::Closed) { + auto connection2 = createConnectionDriver( + new_listener_port, "hello", + [](Network::ClientConnection&, const Buffer::Instance&) -> void { FAIL(); }); + connection2->waitForConnection(); + if (connection2->connection().state() == Network::Connection::State::Closed) { connect_fail = true; break; } else { - connection2.close(); + connection2->close(); } } ASSERT_TRUE(connect_fail); diff --git a/test/integration/eds_integration_test.cc b/test/integration/eds_integration_test.cc index b9f51205fc7d1..16a694eadafc0 100644 --- a/test/integration/eds_integration_test.cc +++ b/test/integration/eds_integration_test.cc @@ -390,7 +390,7 @@ TEST_P(EdsIntegrationTest, StatsReadyFilter) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "GET", "/cluster1", "", downstream_protocol_, version_, "foo.com"); ASSERT_TRUE(response->complete()); - EXPECT_EQ("500", response->headers().Status()->value().getStringView()); + EXPECT_EQ("500", response->headers().getStatusValue()); EXPECT_EQ("EDS not ready", response->body()); cleanupUpstreamAndDownstream(); @@ -401,7 +401,7 @@ TEST_P(EdsIntegrationTest, StatsReadyFilter) { response = IntegrationUtil::makeSingleRequest(lookupPort("http"), "GET", "/cluster1", "", downstream_protocol_, version_, "foo.com"); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ("EDS is ready", response->body()); cleanupUpstreamAndDownstream(); diff --git a/test/integration/extension_discovery_integration_test.cc b/test/integration/extension_discovery_integration_test.cc new file mode 100644 index 0000000000000..467922b3e1236 --- /dev/null +++ b/test/integration/extension_discovery_integration_test.cc @@ -0,0 +1,316 @@ +#include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" +#include "envoy/service/extension/v3/config_discovery.pb.h" + +#include "test/common/grpc/grpc_client_integration.h" +#include "test/integration/filters/set_response_code_filter_config.pb.h" +#include "test/integration/http_integration.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace { + +std::string denyPrivateConfig() { + return R"EOF( + prefix: "/private" + code: 403 +)EOF"; +} + +std::string allowAllConfig() { return "code: 200"; } + +std::string invalidConfig() { return "code: 90"; } + +class ExtensionDiscoveryIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, + public HttpIntegrationTest { +public: + ExtensionDiscoveryIntegrationTest() + : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ipVersion()) {} + + void addDynamicFilter(const std::string& name, bool apply_without_warming, + bool set_default_config = true, bool rate_limit = false) { + config_helper_.addConfigModifier( + [this, name, apply_without_warming, set_default_config, rate_limit]( + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + http_connection_manager) { + auto* filter = http_connection_manager.mutable_http_filters()->Add(); + filter->set_name(name); + auto* discovery = filter->mutable_config_discovery(); + discovery->add_type_urls( + "type.googleapis.com/test.integration.filters.SetResponseCodeFilterConfig"); + if (set_default_config) { + const auto default_configuration = + TestUtility::parseYaml( + "code: 403"); + discovery->mutable_default_config()->PackFrom(default_configuration); + } + discovery->set_apply_default_config_without_warming(apply_without_warming); + auto* api_config_source = discovery->mutable_config_source()->mutable_api_config_source(); + api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC); + api_config_source->set_transport_api_version(envoy::config::core::v3::ApiVersion::V3); + if (rate_limit) { + api_config_source->mutable_rate_limit_settings()->mutable_max_tokens()->set_value(10); + } + auto* grpc_service = api_config_source->add_grpc_services(); + setGrpcService(*grpc_service, "ecds_cluster", getEcdsFakeUpstream().localAddress()); + // keep router the last + auto size = http_connection_manager.http_filters_size(); + http_connection_manager.mutable_http_filters()->SwapElements(size - 2, size - 1); + }); + } + + void initialize() override { + defer_listener_finalization_ = true; + setUpstreamCount(1); + // Add an xDS cluster for extension config discovery. + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* ecds_cluster = bootstrap.mutable_static_resources()->add_clusters(); + ecds_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); + ecds_cluster->set_name("ecds_cluster"); + ecds_cluster->mutable_http2_protocol_options(); + }); + // Make HCM do a direct response to avoid timing issues with the upstream. + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + http_connection_manager) { + http_connection_manager.mutable_route_config() + ->mutable_virtual_hosts(0) + ->mutable_routes(0) + ->mutable_direct_response() + ->set_status(200); + }); + HttpIntegrationTest::initialize(); + } + + ~ExtensionDiscoveryIntegrationTest() override { + if (ecds_connection_ != nullptr) { + AssertionResult result = ecds_connection_->close(); + RELEASE_ASSERT(result, result.message()); + result = ecds_connection_->waitForDisconnect(); + RELEASE_ASSERT(result, result.message()); + ecds_connection_.reset(); + } + } + + void createUpstreams() override { + HttpIntegrationTest::createUpstreams(); + // Create the extension config discovery upstream (fake_upstreams_[1]). + fake_upstreams_.emplace_back(new FakeUpstream(0, FakeHttpConnection::Type::HTTP2, version_, + timeSystem(), enable_half_close_)); + for (auto& upstream : fake_upstreams_) { + upstream->set_allow_unexpected_disconnects(true); + } + } + + void waitXdsStream() { + auto& upstream = getEcdsFakeUpstream(); + AssertionResult result = upstream.waitForHttpConnection(*dispatcher_, ecds_connection_); + RELEASE_ASSERT(result, result.message()); + result = ecds_connection_->waitForNewStream(*dispatcher_, ecds_stream_); + RELEASE_ASSERT(result, result.message()); + ecds_stream_->startGrpcStream(); + } + + void sendXdsResponse(const std::string& name, const std::string& version, + const std::string& yaml_config) { + envoy::service::discovery::v3::DiscoveryResponse response; + response.set_version_info(version); + response.set_type_url("type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig"); + const auto configuration = + TestUtility::parseYaml( + yaml_config); + envoy::config::core::v3::TypedExtensionConfig typed_config; + typed_config.set_name(name); + typed_config.mutable_typed_config()->PackFrom(configuration); + response.add_resources()->PackFrom(typed_config); + ecds_stream_->sendGrpcMessage(response); + } + + FakeUpstream& getEcdsFakeUpstream() const { return *fake_upstreams_[1]; } + + FakeHttpConnectionPtr ecds_connection_{nullptr}; + FakeStreamPtr ecds_stream_{nullptr}; +}; + +INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, ExtensionDiscoveryIntegrationTest, + GRPC_CLIENT_INTEGRATION_PARAMS); + +TEST_P(ExtensionDiscoveryIntegrationTest, BasicSuccess) { + on_server_init_function_ = [&]() { waitXdsStream(); }; + addDynamicFilter("foo", false); + initialize(); + test_server_->waitForCounterGe("listener_manager.lds.update_success", 1); + EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initializing); + registerTestServerPorts({"http"}); + sendXdsResponse("foo", "1", denyPrivateConfig()); + test_server_->waitForCounterGe("http.config_test.extension_config_discovery.foo.config_reload", + 1); + test_server_->waitUntilListenersReady(); + test_server_->waitForGaugeGe("listener_manager.workers_started", 1); + EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initialized); + codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); + { + Http::TestRequestHeaderMapImpl request_headers{ + {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, {":authority", "host"}}; + auto response = codec_client_->makeHeaderOnlyRequest(request_headers); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + } + Http::TestRequestHeaderMapImpl banned_request_headers{ + {":method", "GET"}, {":path", "/private/key"}, {":scheme", "http"}, {":authority", "host"}}; + { + auto response = codec_client_->makeHeaderOnlyRequest(banned_request_headers); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("403", response->headers().getStatusValue()); + } + // Update again but keep the connection. + { + sendXdsResponse("foo", "2", allowAllConfig()); + test_server_->waitForCounterGe("http.config_test.extension_config_discovery.foo.config_reload", + 2); + auto response = codec_client_->makeHeaderOnlyRequest(banned_request_headers); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + } +} + +TEST_P(ExtensionDiscoveryIntegrationTest, BasicFailWithDefault) { + on_server_init_function_ = [&]() { waitXdsStream(); }; + addDynamicFilter("foo", false); + initialize(); + test_server_->waitForCounterGe("listener_manager.lds.update_success", 1); + EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initializing); + registerTestServerPorts({"http"}); + sendXdsResponse("foo", "1", invalidConfig()); + test_server_->waitForCounterGe("http.config_test.extension_config_discovery.foo.config_fail", 1); + test_server_->waitUntilListenersReady(); + test_server_->waitForGaugeGe("listener_manager.workers_started", 1); + EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initialized); + codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); + Http::TestRequestHeaderMapImpl request_headers{ + {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, {":authority", "host"}}; + auto response = codec_client_->makeHeaderOnlyRequest(request_headers); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("403", response->headers().getStatusValue()); +} + +TEST_P(ExtensionDiscoveryIntegrationTest, BasicFailWithoutDefault) { + on_server_init_function_ = [&]() { waitXdsStream(); }; + addDynamicFilter("foo", false, false); + initialize(); + test_server_->waitForCounterGe("listener_manager.lds.update_success", 1); + EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initializing); + registerTestServerPorts({"http"}); + sendXdsResponse("foo", "1", invalidConfig()); + test_server_->waitForCounterGe("http.config_test.extension_config_discovery.foo.config_fail", 1); + test_server_->waitUntilListenersReady(); + test_server_->waitForGaugeGe("listener_manager.workers_started", 1); + EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initialized); + codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); + Http::TestRequestHeaderMapImpl request_headers{ + {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, {":authority", "host"}}; + auto response = codec_client_->makeHeaderOnlyRequest(request_headers); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("500", response->headers().getStatusValue()); +} + +TEST_P(ExtensionDiscoveryIntegrationTest, BasicWithoutWarming) { + on_server_init_function_ = [&]() { waitXdsStream(); }; + addDynamicFilter("bar", true); + initialize(); + test_server_->waitForCounterGe("listener_manager.lds.update_success", 1); + EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initialized); + registerTestServerPorts({"http"}); + test_server_->waitUntilListenersReady(); + test_server_->waitForGaugeGe("listener_manager.workers_started", 1); + codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); + // Initial request uses the default config. + Http::TestRequestHeaderMapImpl request_headers{ + {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, {":authority", "host"}}; + { + auto response = codec_client_->makeHeaderOnlyRequest(request_headers); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("403", response->headers().getStatusValue()); + } + + // Update should cause a different response. + sendXdsResponse("bar", "1", denyPrivateConfig()); + test_server_->waitForCounterGe("http.config_test.extension_config_discovery.bar.config_reload", + 1); + { + auto response = codec_client_->makeHeaderOnlyRequest(request_headers); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + } +} + +TEST_P(ExtensionDiscoveryIntegrationTest, BasicWithoutWarmingFail) { + on_server_init_function_ = [&]() { waitXdsStream(); }; + addDynamicFilter("bar", true); + initialize(); + test_server_->waitForCounterGe("listener_manager.lds.update_success", 1); + EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initialized); + registerTestServerPorts({"http"}); + test_server_->waitUntilListenersReady(); + test_server_->waitForGaugeGe("listener_manager.workers_started", 1); + codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); + // Update should not cause a different response. + sendXdsResponse("bar", "1", invalidConfig()); + test_server_->waitForCounterGe("http.config_test.extension_config_discovery.bar.config_fail", 1); + Http::TestRequestHeaderMapImpl request_headers{ + {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, {":authority", "host"}}; + auto response = codec_client_->makeHeaderOnlyRequest(request_headers); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("403", response->headers().getStatusValue()); +} + +TEST_P(ExtensionDiscoveryIntegrationTest, BasicTwoSubscriptionsSameName) { + on_server_init_function_ = [&]() { waitXdsStream(); }; + addDynamicFilter("baz", true); + addDynamicFilter("baz", false); + initialize(); + test_server_->waitForCounterGe("listener_manager.lds.update_success", 1); + EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initializing); + registerTestServerPorts({"http"}); + sendXdsResponse("baz", "1", denyPrivateConfig()); + test_server_->waitForCounterGe("http.config_test.extension_config_discovery.baz.config_reload", + 1); + test_server_->waitUntilListenersReady(); + test_server_->waitForGaugeGe("listener_manager.workers_started", 1); + EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initialized); + codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); + Http::TestRequestHeaderMapImpl request_headers{ + {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, {":authority", "host"}}; + auto response = codec_client_->makeHeaderOnlyRequest(request_headers); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); +} + +TEST_P(ExtensionDiscoveryIntegrationTest, DestroyDuringInit) { + // If rate limiting is enabled on the config source, gRPC mux drainage updates the requests + // queue size on destruction. The update calls out to stats scope nested under the extension + // config subscription stats scope. This test verifies that the stats scope outlasts the gRPC + // subscription. + on_server_init_function_ = [&]() { waitXdsStream(); }; + addDynamicFilter("foo", false, true); + initialize(); + test_server_->waitForCounterGe("listener_manager.lds.update_success", 1); + EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initializing); + test_server_.reset(); + auto result = ecds_connection_->waitForDisconnect(); + RELEASE_ASSERT(result, result.message()); + ecds_connection_.reset(); +} + +} // namespace +} // namespace Envoy diff --git a/test/integration/fake_upstream.cc b/test/integration/fake_upstream.cc index 8562edc01ca3b..b20ff0318398f 100644 --- a/test/integration/fake_upstream.cc +++ b/test/integration/fake_upstream.cc @@ -12,7 +12,9 @@ #include "common/common/fmt.h" #include "common/http/header_map_impl.h" #include "common/http/http1/codec_impl.h" +#include "common/http/http1/codec_impl_legacy.h" #include "common/http/http2/codec_impl.h" +#include "common/http/http2/codec_impl_legacy.h" #include "common/network/address_impl.h" #include "common/network/listen_socket_impl.h" #include "common/network/raw_buffer_socket.h" @@ -73,6 +75,10 @@ void FakeStream::decodeMetadata(Http::MetadataMapPtr&& metadata_map_ptr) { } } +void FakeStream::postToConnectionThread(std::function cb) { + parent_.connection().dispatcher().post(cb); +} + void FakeStream::encode100ContinueHeaders(const Http::ResponseHeaderMap& headers) { std::shared_ptr headers_copy( Http::createHeaderMap(headers)); @@ -217,12 +223,59 @@ void FakeStream::startGrpcStream() { } void FakeStream::finishGrpcStream(Grpc::Status::GrpcStatus status) { - encodeTrailers( - Http::TestHeaderMapImpl{{"grpc-status", std::to_string(static_cast(status))}}); + encodeTrailers(Http::TestResponseTrailerMapImpl{ + {"grpc-status", std::to_string(static_cast(status))}}); } +// The TestHttp1ServerConnectionImpl outlives its underlying Network::Connection +// so must not access the Connection on teardown. To achieve this, clear the +// read disable calls to avoid checking / editing the Connection blocked state. +class TestHttp1ServerConnectionImpl : public Http::Http1::ServerConnectionImpl { +public: + using Http::Http1::ServerConnectionImpl::ServerConnectionImpl; + + void onMessageComplete() override { + ServerConnectionImpl::onMessageComplete(); + + if (activeRequest().has_value() && activeRequest().value().request_decoder_) { + // Undo the read disable from the base class - we have many tests which + // waitForDisconnect after a full request has been read which will not + // receive the disconnect if reading is disabled. + activeRequest().value().response_encoder_.readDisable(false); + } + } + ~TestHttp1ServerConnectionImpl() override { + if (activeRequest().has_value()) { + activeRequest().value().response_encoder_.clearReadDisableCallsForTests(); + } + } +}; + +namespace Legacy { +class TestHttp1ServerConnectionImpl : public Http::Legacy::Http1::ServerConnectionImpl { +public: + using Http::Legacy::Http1::ServerConnectionImpl::ServerConnectionImpl; + + void onMessageComplete() override { + ServerConnectionImpl::onMessageComplete(); + + if (activeRequest().has_value() && activeRequest().value().request_decoder_) { + // Undo the read disable from the base class - we have many tests which + // waitForDisconnect after a full request has been read which will not + // receive the disconnect if reading is disabled. + activeRequest().value().response_encoder_.readDisable(false); + } + } + ~TestHttp1ServerConnectionImpl() override { + if (activeRequest().has_value()) { + activeRequest().value().response_encoder_.clearReadDisableCallsForTests(); + } + } +}; +} // namespace Legacy + FakeHttpConnection::FakeHttpConnection( - SharedConnectionWrapper& shared_connection, Stats::Store& store, Type type, + FakeUpstream& fake_upstream, SharedConnectionWrapper& shared_connection, Type type, Event::TestTimeSystem& time_system, uint32_t max_request_headers_kb, uint32_t max_request_headers_count, envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction @@ -232,26 +285,42 @@ FakeHttpConnection::FakeHttpConnection( Http::Http1Settings http1_settings; // For the purpose of testing, we always have the upstream encode the trailers if any http1_settings.enable_trailers_ = true; - codec_ = std::make_unique( - shared_connection_.connection(), store, *this, http1_settings, max_request_headers_kb, + Http::Http1::CodecStats& stats = fake_upstream.http1CodecStats(); +#ifdef ENVOY_USE_NEW_CODECS_IN_INTEGRATION_TESTS + codec_ = std::make_unique( + shared_connection_.connection(), stats, *this, http1_settings, max_request_headers_kb, + max_request_headers_count, headers_with_underscores_action); +#else + codec_ = std::make_unique( + shared_connection_.connection(), stats, *this, http1_settings, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); +#endif } else { envoy::config::core::v3::Http2ProtocolOptions http2_options = ::Envoy::Http2::Utility::initializeAndValidateOptions( envoy::config::core::v3::Http2ProtocolOptions()); http2_options.set_allow_connect(true); http2_options.set_allow_metadata(true); + Http::Http2::CodecStats& stats = fake_upstream.http2CodecStats(); +#ifdef ENVOY_USE_NEW_CODECS_IN_INTEGRATION_TESTS codec_ = std::make_unique( - shared_connection_.connection(), *this, store, http2_options, max_request_headers_kb, + shared_connection_.connection(), *this, stats, http2_options, max_request_headers_kb, + max_request_headers_count, headers_with_underscores_action); +#else + codec_ = std::make_unique( + shared_connection_.connection(), *this, stats, http2_options, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); +#endif ASSERT(type == Type::HTTP2); } - shared_connection_.connection().addReadFilter( Network::ReadFilterSharedPtr{new ReadFilter(*this)}); } AssertionResult FakeConnectionBase::close(std::chrono::milliseconds timeout) { + if (!shared_connection_.connected()) { + return AssertionSuccess(); + } return shared_connection_.executeOnDispatcher( [](Network::Connection& connection) { connection.close(Network::ConnectionCloseType::FlushWrite); @@ -450,7 +519,7 @@ bool FakeUpstream::createNetworkFilterChain(Network::Connection& connection, } auto connection_wrapper = std::make_unique(connection, allow_unexpected_disconnects_); - connection_wrapper->moveIntoListBack(std::move(connection_wrapper), new_connections_); + LinkedList::moveIntoListBack(std::move(connection_wrapper), new_connections_); upstream_event_.notifyOne(); return true; } @@ -498,7 +567,7 @@ AssertionResult FakeUpstream::waitForHttpConnection( return AssertionFailure() << "Got a new connection event, but didn't create a connection."; } connection = std::make_unique( - consumeConnection(), stats_store_, http_type_, time_system, max_request_headers_kb, + *this, consumeConnection(), http_type_, time_system, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); } VERIFY_ASSERTION(connection->initialize()); @@ -530,9 +599,9 @@ FakeUpstream::waitForHttpConnection(Event::Dispatcher& client_dispatcher, client_dispatcher.run(Event::Dispatcher::RunType::NonBlock); } else { connection = std::make_unique( - upstream.consumeConnection(), upstream.stats_store_, upstream.http_type_, - upstream.timeSystem(), Http::DEFAULT_MAX_REQUEST_HEADERS_KB, - Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + upstream, upstream.consumeConnection(), upstream.http_type_, upstream.timeSystem(), + Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, + envoy::config::core::v3::HttpProtocolOptions::ALLOW); lock.release(); VERIFY_ASSERTION(connection->initialize()); VERIFY_ASSERTION(connection->readDisable(false)); @@ -640,7 +709,7 @@ FakeRawConnection::waitForData(const std::function& da AssertionResult FakeRawConnection::write(const std::string& data, bool end_stream, milliseconds timeout) { return shared_connection_.executeOnDispatcher( - [&data, end_stream](Network::Connection& connection) { + [data, end_stream](Network::Connection& connection) { Buffer::OwnedImpl to_write(data); connection.write(to_write, end_stream); }, diff --git a/test/integration/fake_upstream.h b/test/integration/fake_upstream.h index 26379a3d31f49..0c23c42e4b418 100644 --- a/test/integration/fake_upstream.h +++ b/test/integration/fake_upstream.h @@ -19,6 +19,7 @@ #include "common/buffer/buffer_impl.h" #include "common/buffer/zero_copy_input_stream_impl.h" +#include "common/common/basic_resource_impl.h" #include "common/common/callback_impl.h" #include "common/common/linked_object.h" #include "common/common/lock_guard.h" @@ -26,9 +27,12 @@ #include "common/grpc/codec.h" #include "common/grpc/common.h" #include "common/http/exception.h" +#include "common/http/http1/codec_impl.h" +#include "common/http/http2/codec_impl.h" #include "common/network/connection_balancer_impl.h" #include "common/network/filter_impl.h" #include "common/network/listen_socket_impl.h" +#include "common/network/udp_default_writer_config.h" #include "common/stats/isolated_store_impl.h" #include "server/active_raw_udp_listener_config.h" @@ -38,7 +42,9 @@ #include "test/test_common/utility.h" namespace Envoy { + class FakeHttpConnection; +class FakeUpstream; /** * Provides a fake HTTP stream for integration testing. @@ -56,6 +62,11 @@ class FakeStream : public Http::RequestDecoder, Thread::LockGuard lock(lock_); return end_stream_; } + + // Execute a callback using the dispatcher associated with the FakeStream's connection. This + // allows execution of non-interrupted sequences of operations on the fake stream which may run + // into trouble if client-side events are interleaved. + void postToConnectionThread(std::function cb); void encode100ContinueHeaders(const Http::ResponseHeaderMap& headers); void encodeHeaders(const Http::HeaderMap& headers, bool end_stream); void encodeData(uint64_t size, bool end_stream); @@ -72,6 +83,25 @@ class FakeStream : public Http::RequestDecoder, Http::Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() { return encoder_.http1StreamEncoderOptions(); } + void + sendLocalReply(bool is_grpc_request, Http::Code code, absl::string_view body, + const std::function& /*modify_headers*/, + const absl::optional grpc_status, + absl::string_view /*details*/) override { + const bool is_head_request = + headers_ != nullptr && headers_->getMethodValue() == Http::Headers::get().MethodValues.Head; + Http::Utility::sendLocalReply( + false, + Http::Utility::EncodeFunctions( + {nullptr, + [&](Http::ResponseHeaderMapPtr&& headers, bool end_stream) -> void { + encoder_.encodeHeaders(*headers, end_stream); + }, + [&](Buffer::Instance& data, bool end_stream) -> void { + encoder_.encodeData(data, end_stream); + }}), + Http::Utility::LocalReplyData({is_grpc_request, code, body, grpc_status, is_head_request})); + } ABSL_MUST_USE_RESULT testing::AssertionResult @@ -173,8 +203,8 @@ class FakeStream : public Http::RequestDecoder, Event::TestTimeSystem& timeSystem() { return time_system_; } - Http::MetadataMap& metadata_map() { return metadata_map_; } - std::unordered_map& duplicated_metadata_key_count() { + Http::MetadataMap& metadataMap() { return metadata_map_; } + absl::node_hash_map& duplicatedMetadataKeyCount() { return duplicated_metadata_key_count_; } @@ -195,7 +225,7 @@ class FakeStream : public Http::RequestDecoder, bool add_served_by_header_{}; Event::TestTimeSystem& time_system_; Http::MetadataMap metadata_map_; - std::unordered_map duplicated_metadata_key_count_; + absl::node_hash_map duplicated_metadata_key_count_; bool received_data_{false}; }; @@ -270,7 +300,7 @@ class SharedConnectionWrapper : public Network::ConnectionCallbacks { return testing::AssertionSuccess(); } Thread::CondVar callback_ready_event; - bool unexpected_disconnect = false; + std::atomic unexpected_disconnect = false; connection_.dispatcher().post( [this, f, &callback_ready_event, &unexpected_disconnect]() -> void { // The use of connected() here, vs. !disconnected_, is because we want to use the lock_ @@ -420,8 +450,8 @@ class FakeHttpConnection : public Http::ServerConnectionCallbacks, public FakeCo public: enum class Type { HTTP1, HTTP2 }; - FakeHttpConnection(SharedConnectionWrapper& shared_connection, Stats::Store& store, Type type, - Event::TestTimeSystem& time_system, uint32_t max_request_headers_kb, + FakeHttpConnection(FakeUpstream& fake_upstream, SharedConnectionWrapper& shared_connection, + Type type, Event::TestTimeSystem& time_system, uint32_t max_request_headers_kb, uint32_t max_request_headers_count, envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action); @@ -438,7 +468,7 @@ class FakeHttpConnection : public Http::ServerConnectionCallbacks, public FakeCo // Http::ServerConnectionCallbacks Http::RequestDecoder& newStream(Http::ResponseEncoder& response_encoder, bool) override; - void onGoAway() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } + void onGoAway(Http::GoAwayErrorCode) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } private: struct ReadFilter : public Network::ReadFilterBaseImpl { @@ -446,10 +476,10 @@ class FakeHttpConnection : public Http::ServerConnectionCallbacks, public FakeCo // Network::ReadFilter Network::FilterStatus onData(Buffer::Instance& data, bool) override { - try { - parent_.codec_->dispatch(data); - } catch (const Http::CodecProtocolException& e) { - ENVOY_LOG(debug, "FakeUpstream dispatch error: {}", e.what()); + Http::Status status = parent_.codec_->dispatch(data); + + if (Http::isCodecProtocolError(status)) { + ENVOY_LOG(debug, "FakeUpstream dispatch error: {}", status.message()); // We don't do a full stream shutdown like HCM, but just shutdown the // connection for now. read_filter_callbacks_->connection().close( @@ -617,6 +647,14 @@ class FakeUpstream : Logger::Loggable, // Stops the dispatcher loop and joins the listening thread. void cleanUp(); + Http::Http1::CodecStats& http1CodecStats() { + return Http::Http1::CodecStats::atomicGet(http1_codec_stats_, stats_store_); + } + + Http::Http2::CodecStats& http2CodecStats() { + return Http::Http2::CodecStats::atomicGet(http2_codec_stats_, stats_store_); + } + protected: Stats::IsolatedStoreImpl stats_store_; const FakeHttpConnection::Type http_type_; @@ -631,7 +669,7 @@ class FakeUpstream : Logger::Loggable, FakeListenSocketFactory(Network::SocketSharedPtr socket) : socket_(socket) {} // Network::ListenSocketFactory - Network::Address::SocketType socketType() const override { return socket_->socketType(); } + Network::Socket::Type socketType() const override { return socket_->socketType(); } const Network::Address::InstanceConstSharedPtr& localAddress() const override { return socket_->localAddress(); @@ -661,7 +699,8 @@ class FakeUpstream : Logger::Loggable, public: FakeListener(FakeUpstream& parent) : parent_(parent), name_("fake_upstream"), - udp_listener_factory_(std::make_unique()) {} + udp_listener_factory_(std::make_unique()), + udp_writer_factory_(std::make_unique()) {} private: // Network::ListenerConfig @@ -681,6 +720,9 @@ class FakeUpstream : Logger::Loggable, Network::ActiveUdpListenerFactory* udpListenerFactory() override { return udp_listener_factory_.get(); } + Network::UdpPacketWriterFactoryOptRef udpPacketWriterFactory() override { + return Network::UdpPacketWriterFactoryOptRef(std::ref(*udp_writer_factory_)); + } Network::ConnectionBalancer& connectionBalancer() override { return connection_balancer_; } envoy::config::core::v3::TrafficDirection direction() const override { return envoy::config::core::v3::UNSPECIFIED; @@ -688,11 +730,19 @@ class FakeUpstream : Logger::Loggable, const std::vector& accessLogs() const override { return empty_access_logs_; } + ResourceLimit& openConnections() override { return connection_resource_; } + + void setMaxConnections(const uint32_t num_connections) { + connection_resource_.setMax(num_connections); + } + void clearMaxConnections() { connection_resource_.resetMax(); } FakeUpstream& parent_; const std::string name_; Network::NopConnectionBalancerImpl connection_balancer_; const Network::ActiveUdpListenerFactoryPtr udp_listener_factory_; + const Network::UdpPacketWriterFactoryPtr udp_writer_factory_; + BasicResourceLimitImpl connection_resource_; const std::vector empty_access_logs_; }; @@ -723,6 +773,8 @@ class FakeUpstream : Logger::Loggable, FakeListener listener_; const Network::FilterChainSharedPtr filter_chain_; std::list received_datagrams_ ABSL_GUARDED_BY(lock_); + Http::Http1::CodecStats::AtomicPtr http1_codec_stats_; + Http::Http2::CodecStats::AtomicPtr http2_codec_stats_; }; using FakeUpstreamPtr = std::unique_ptr; diff --git a/test/integration/filter_manager_integration_test.cc b/test/integration/filter_manager_integration_test.cc index 5a9dbbc523664..0d1c55afa896b 100644 --- a/test/integration/filter_manager_integration_test.cc +++ b/test/integration/filter_manager_integration_test.cc @@ -410,11 +410,6 @@ class InjectDataToFilterChainIntegrationTest void SetUp() override { addAuxiliaryFilter(config_helper_); } - void TearDown() override { - test_server_.reset(); - fake_upstreams_.clear(); - } - protected: // Returns configuration for a given auxiliary filter std::string filterConfig(const std::string& auxiliary_filter_name) override { @@ -455,7 +450,7 @@ TEST_P(InjectDataWithEchoFilterIntegrationTest, UsageOfInjectDataMethodsShouldBe initialize(); auto tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write("hello"); + ASSERT_TRUE(tcp_client->write("hello")); tcp_client->waitForData("hello"); tcp_client->close(); @@ -473,12 +468,12 @@ TEST_P(InjectDataWithEchoFilterIntegrationTest, FilterChainMismatch) { initialize(); auto tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write("hello"); + ASSERT_TRUE(tcp_client->write("hello", false, false)); std::string access_log = absl::StrCat("NR ", StreamInfo::ResponseCodeDetails::get().FilterChainNotFound); EXPECT_THAT(waitForAccessLog(listener_access_log_name_), testing::HasSubstr(access_log)); - tcp_client->close(); + tcp_client->waitForDisconnect(); } /** @@ -504,7 +499,7 @@ TEST_P(InjectDataWithTcpProxyFilterIntegrationTest, UsageOfInjectDataMethodsShou FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); - tcp_client->write("hello"); + ASSERT_TRUE(tcp_client->write("hello")); std::string observed_data; ASSERT_TRUE(fake_upstream_connection->waitForData(5, &observed_data)); @@ -513,7 +508,7 @@ TEST_P(InjectDataWithTcpProxyFilterIntegrationTest, UsageOfInjectDataMethodsShou ASSERT_TRUE(fake_upstream_connection->write("hi")); tcp_client->waitForData("hi"); - tcp_client->write(" world!", true); + ASSERT_TRUE(tcp_client->write(" world!", true)); observed_data.clear(); ASSERT_TRUE(fake_upstream_connection->waitForData(12, &observed_data)); EXPECT_EQ("hello world!", observed_data); @@ -603,7 +598,7 @@ TEST_P(InjectDataWithHttpConnectionManagerIntegrationTest, response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ("greetings", response->body()); } diff --git a/test/integration/filters/BUILD b/test/integration/filters/BUILD index b26d5ffb1835d..197dfc897cfce 100644 --- a/test/integration/filters/BUILD +++ b/test/integration/filters/BUILD @@ -1,13 +1,44 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_library", "envoy_package", + "envoy_proto_library", ) +licenses(["notice"]) # Apache 2 + envoy_package() +envoy_cc_test_library( + name = "add_body_filter_config_lib", + srcs = [ + "add_body_filter.cc", + ], + deps = [ + ":common_lib", + "//include/envoy/http:filter_interface", + "//include/envoy/registry", + "//include/envoy/server:filter_config_interface", + "//source/extensions/filters/http/common:pass_through_filter_lib", + "//test/extensions/filters/http/common:empty_http_filter_config_lib", + ], +) + +envoy_cc_test_library( + name = "wait_for_whole_request_and_response_config_lib", + srcs = [ + "wait_for_whole_request_and_response.cc", + ], + deps = [ + ":common_lib", + "//include/envoy/http:filter_interface", + "//include/envoy/registry", + "//include/envoy/server:filter_config_interface", + "//source/extensions/filters/http/common:pass_through_filter_lib", + "//test/extensions/filters/http/common:empty_http_filter_config_lib", + ], +) + envoy_cc_test_library( name = "add_trailers_filter_config_lib", srcs = [ @@ -22,6 +53,20 @@ envoy_cc_test_library( ], ) +envoy_cc_test_library( + name = "backpressure_filter_config_lib", + srcs = [ + "backpressure_filter.cc", + ], + deps = [ + "//include/envoy/http:filter_interface", + "//include/envoy/registry", + "//include/envoy/server:filter_config_interface", + "//source/extensions/filters/http/common:pass_through_filter_lib", + "//test/extensions/filters/http/common:empty_http_filter_config_lib", + ], +) + envoy_cc_test_library( name = "clear_route_cache_filter_lib", srcs = [ @@ -128,6 +173,25 @@ envoy_cc_test_library( ], ) +envoy_cc_test_library( + name = "set_response_code_filter_lib", + srcs = [ + "set_response_code_filter.cc", + ], + deps = [ + ":set_response_code_filter_config_proto_cc_proto", + "//include/envoy/http:filter_interface", + "//include/envoy/registry", + "//source/extensions/filters/http/common:factory_base_lib", + "//source/extensions/filters/http/common:pass_through_filter_lib", + ], +) + +envoy_proto_library( + name = "set_response_code_filter_config_proto", + srcs = [":set_response_code_filter_config.proto"], +) + envoy_cc_test_library( name = "stop_iteration_and_continue", srcs = [ diff --git a/test/integration/filters/add_body_filter.cc b/test/integration/filters/add_body_filter.cc new file mode 100644 index 0000000000000..c319f0f5f7295 --- /dev/null +++ b/test/integration/filters/add_body_filter.cc @@ -0,0 +1,49 @@ +#include + +#include "envoy/http/filter.h" +#include "envoy/registry/registry.h" +#include "envoy/server/filter_config.h" + +#include "common/buffer/buffer_impl.h" + +#include "extensions/filters/http/common/pass_through_filter.h" + +#include "test/extensions/filters/http/common/empty_http_filter_config.h" +#include "test/integration/filters/common.h" + +namespace Envoy { + +// A test filter that inserts body to a header only request/response. +class AddBodyStreamFilter : public Http::PassThroughFilter { +public: + constexpr static char name[] = "add-body-filter"; + + Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, + bool end_stream) override { + if (end_stream) { + Buffer::OwnedImpl body("body"); + headers.setContentLength(body.length()); + decoder_callbacks_->addDecodedData(body, false); + } + + return Http::FilterHeadersStatus::Continue; + } + + Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& headers, + bool end_stream) override { + if (end_stream) { + Buffer::OwnedImpl body("body"); + headers.setContentLength(body.length()); + encoder_callbacks_->addEncodedData(body, false); + } + + return Http::FilterHeadersStatus::Continue; + } +}; + +constexpr char AddBodyStreamFilter::name[]; + +static Registry::RegisterFactory, + Server::Configuration::NamedHttpFilterConfigFactory> + encoder_register_; +} // namespace Envoy diff --git a/test/integration/filters/backpressure_filter.cc b/test/integration/filters/backpressure_filter.cc new file mode 100644 index 0000000000000..1d6f8ce92be5c --- /dev/null +++ b/test/integration/filters/backpressure_filter.cc @@ -0,0 +1,42 @@ +#include + +#include "envoy/http/filter.h" +#include "envoy/registry/registry.h" +#include "envoy/server/filter_config.h" + +#include "extensions/filters/http/common/pass_through_filter.h" + +#include "test/extensions/filters/http/common/empty_http_filter_config.h" + +namespace Envoy { + +// A filter that buffers the entire request/response, then doubles +// the content of the filter buffer. +class BackpressureFilter : public Http::PassThroughFilter { +public: + void onDestroy() override { decoder_callbacks_->onDecoderFilterBelowWriteBufferLowWatermark(); } + + Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool) override { + decoder_callbacks_->onDecoderFilterAboveWriteBufferHighWatermark(); + return Http::FilterHeadersStatus::Continue; + } +}; + +class BackpressureConfig : public Extensions::HttpFilters::Common::EmptyHttpFilterConfig { +public: + BackpressureConfig() : EmptyHttpFilterConfig("backpressure-filter") {} + + Http::FilterFactoryCb createFilter(const std::string&, + Server::Configuration::FactoryContext&) override { + return [](Http::FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamFilter(std::make_shared<::Envoy::BackpressureFilter>()); + }; + } +}; + +// perform static registration +static Registry::RegisterFactory + register_; + +} // namespace Envoy diff --git a/test/integration/filters/pause_filter.cc b/test/integration/filters/pause_filter.cc index 45eaeefbb593e..b7f9aa1a3c360 100644 --- a/test/integration/filters/pause_filter.cc +++ b/test/integration/filters/pause_filter.cc @@ -30,7 +30,7 @@ class TestPauseFilter : public Http::PassThroughFilter { // If this is the second stream to decode headers and we're at high watermark. force low // watermark state if (number_of_decode_calls_ref_ == 2 && connection()->aboveHighWatermark()) { - connection()->onLowWatermark(); + connection()->onWriteBufferLowWatermark(); } } return PassThroughFilter::decodeData(buf, end_stream); @@ -43,7 +43,7 @@ class TestPauseFilter : public Http::PassThroughFilter { // If this is the first stream to encode headers and we're not at high watermark, force high // watermark state. if (number_of_encode_calls_ref_ == 1 && !connection()->aboveHighWatermark()) { - connection()->onHighWatermark(); + connection()->onWriteBufferHighWatermark(); } } return PassThroughFilter::encodeData(buf, end_stream); diff --git a/test/integration/filters/random_pause_filter.cc b/test/integration/filters/random_pause_filter.cc index 6b883bef5341e..e1a370ca015ca 100644 --- a/test/integration/filters/random_pause_filter.cc +++ b/test/integration/filters/random_pause_filter.cc @@ -25,9 +25,9 @@ class RandomPauseFilter : public Http::PassThroughFilter { // Roughly every 5th encode (5 being arbitrary) swap the watermark state. if (random % 5 == 0) { if (connection()->aboveHighWatermark()) { - connection()->onLowWatermark(); + connection()->onWriteBufferLowWatermark(); } else { - connection()->onHighWatermark(); + connection()->onWriteBufferHighWatermark(); } } return Http::PassThroughFilter::encodeData(buf, end_stream); diff --git a/test/integration/filters/set_response_code_filter.cc b/test/integration/filters/set_response_code_filter.cc new file mode 100644 index 0000000000000..28653c0ba080b --- /dev/null +++ b/test/integration/filters/set_response_code_filter.cc @@ -0,0 +1,64 @@ +#include + +#include "envoy/http/filter.h" +#include "envoy/registry/registry.h" + +#include "extensions/filters/http/common/factory_base.h" +#include "extensions/filters/http/common/pass_through_filter.h" + +#include "test/integration/filters/set_response_code_filter_config.pb.h" +#include "test/integration/filters/set_response_code_filter_config.pb.validate.h" + +#include "absl/strings/match.h" + +namespace Envoy { + +// A test filter that responds directly with a code on a prefix match. +class SetResponseCodeFilterConfig { +public: + SetResponseCodeFilterConfig(const std::string& prefix, uint32_t code, + Server::Configuration::FactoryContext& context) + : prefix_(prefix), code_(code), tls_slot_(context.threadLocal().allocateSlot()) {} + + const std::string prefix_; + const uint32_t code_; + // Allocate a slot to validate that it is destroyed on a main thread only. + ThreadLocal::SlotPtr tls_slot_; +}; + +class SetResponseCodeFilter : public Http::PassThroughFilter { +public: + SetResponseCodeFilter(std::shared_ptr config) : config_(config) {} + + Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, bool) override { + if (absl::StartsWith(headers.Path()->value().getStringView(), config_->prefix_)) { + decoder_callbacks_->sendLocalReply(static_cast(config_->code_), "", nullptr, + absl::nullopt, ""); + return Http::FilterHeadersStatus::StopIteration; + } + return Http::FilterHeadersStatus::Continue; + } + +private: + const std::shared_ptr config_; +}; + +class SetResponseCodeFilterFactory : public Extensions::HttpFilters::Common::FactoryBase< + test::integration::filters::SetResponseCodeFilterConfig> { +public: + SetResponseCodeFilterFactory() : FactoryBase("set-response-code-filter") {} + +private: + Http::FilterFactoryCb createFilterFactoryFromProtoTyped( + const test::integration::filters::SetResponseCodeFilterConfig& proto_config, + const std::string&, Server::Configuration::FactoryContext& context) override { + auto filter_config = std::make_shared( + proto_config.prefix(), proto_config.code(), context); + return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamFilter(std::make_shared(filter_config)); + }; + } +}; + +REGISTER_FACTORY(SetResponseCodeFilterFactory, Server::Configuration::NamedHttpFilterConfigFactory); +} // namespace Envoy diff --git a/test/integration/filters/set_response_code_filter_config.proto b/test/integration/filters/set_response_code_filter_config.proto new file mode 100644 index 0000000000000..f952981ab7a42 --- /dev/null +++ b/test/integration/filters/set_response_code_filter_config.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package test.integration.filters; + +import "validate/validate.proto"; + +message SetResponseCodeFilterConfig { + string prefix = 1; + uint32 code = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; +} diff --git a/test/integration/filters/wait_for_whole_request_and_response.cc b/test/integration/filters/wait_for_whole_request_and_response.cc new file mode 100644 index 0000000000000..c9fd346073250 --- /dev/null +++ b/test/integration/filters/wait_for_whole_request_and_response.cc @@ -0,0 +1,52 @@ +#include + +#include "envoy/http/filter.h" +#include "envoy/registry/registry.h" +#include "envoy/server/filter_config.h" + +#include "extensions/filters/http/common/pass_through_filter.h" + +#include "test/extensions/filters/http/common/empty_http_filter_config.h" +#include "test/integration/filters/common.h" + +namespace Envoy { + +// A test filter that waits for the request/response to finish before continuing. +class WaitForWholeRequestAndResponseStreamFilter : public Http::PassThroughFilter { +public: + constexpr static char name[] = "wait-for-whole-request-and-response-filter"; + + Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool end_stream) override { + if (end_stream) { + return Http::FilterHeadersStatus::Continue; + } + return Http::FilterHeadersStatus::StopIteration; + } + Http::FilterDataStatus decodeData(Buffer::Instance&, bool end_stream) override { + if (end_stream) { + return Http::FilterDataStatus::Continue; + } + return Http::FilterDataStatus::StopIterationAndBuffer; + } + + Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap&, bool end_stream) override { + if (end_stream) { + return Http::FilterHeadersStatus::Continue; + } + return Http::FilterHeadersStatus::StopIteration; + } + + Http::FilterDataStatus encodeData(Buffer::Instance&, bool end_stream) override { + if (end_stream) { + return Http::FilterDataStatus::Continue; + } + return Http::FilterDataStatus::StopIterationAndBuffer; + } +}; + +constexpr char WaitForWholeRequestAndResponseStreamFilter::name[]; + +static Registry::RegisterFactory, + Server::Configuration::NamedHttpFilterConfigFactory> + encoder_register_; +} // namespace Envoy diff --git a/test/integration/h1_corpus/stream_info_destructor b/test/integration/h1_corpus/stream_info_destructor new file mode 100644 index 0000000000000..63f9a21a8fb73 --- /dev/null +++ b/test/integration/h1_corpus/stream_info_destructor @@ -0,0 +1,24 @@ +events { + downstream_send_bytes: "POST /test/long/url HTTP/1.1\r\nhost: host\r\nx-lyft-user-id: -063%\nuser-agent: /4302450943\n\t\t08856android\363\243x-lyft-user-id: -063%\nuser-agent: /4;02450943\n\t\t08856android\363\243\201$80\n\t\t\t\n\t\t\t\tAe1\201\24180\n\t\t\t\n\t\t\t\tAe118\tefts " +} +events { +} +events { + downstream_send_bytes: "POST //urk HTTP/1.1\r\nshhfot: ost\r\n -253%\nuser-agent: /0%\nuser-agent: /430%\nuser-agent:4967:18446744073709551615iOS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~>~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\201~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~a~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~-7749978774642053139~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttuttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt\326Utttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt-4017153681670550988tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt|tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttstttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt\364ttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt\364ttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttaaaaaaaaaaaaaaaaaaaaaaa-6742158280474489582aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n\r\n" +} +events { +} +events { +} +events { + downstream_send_bytes: "POST /test/lon\nte: e: h" +} +events { +} +events { + downstream_send_bytes: "POST //urk HTTP/1.1\r\nshhfot: ost\r\n -253%\nuser-agent: /0%\nuser-agent: /430%\nuser-agent:4967:18446744073709551615iOS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~>~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\201~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~a~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~-7749978774642053139~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttuttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt + +tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt\326Utttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt-4017153681670550988tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt|tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttstttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt\364ttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt\364ttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttaaaaaaaaaaaaaaaaaaaaaaa-6742158280474489582aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n\r\n" +} +events { +} \ No newline at end of file diff --git a/test/integration/h1_fuzz.cc b/test/integration/h1_fuzz.cc index f1abeb9481a1f..3fe7886bd9709 100644 --- a/test/integration/h1_fuzz.cc +++ b/test/integration/h1_fuzz.cc @@ -30,7 +30,7 @@ void H1FuzzIntegrationTest::replay(const test::integration::CaptureFuzzTestCase& } switch (event.event_selector_case()) { case test::integration::Event::kDownstreamSendBytes: - tcp_client->write(event.downstream_send_bytes(), false, false); + ASSERT_TRUE(tcp_client->write(event.downstream_send_bytes(), false, false)); break; case test::integration::Event::kDownstreamRecvBytes: // TODO(htuch): Should we wait for some data? diff --git a/test/integration/h2_capture_direct_response_fuzz_test.cc b/test/integration/h2_capture_direct_response_fuzz_test.cc new file mode 100644 index 0000000000000..78bd2d3f6ef7c --- /dev/null +++ b/test/integration/h2_capture_direct_response_fuzz_test.cc @@ -0,0 +1,42 @@ +#include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" + +#include "test/integration/h2_fuzz.h" + +namespace Envoy { + +void H2FuzzIntegrationTest::initialize() { + const std::string body = "Response body"; + const std::string file_path = TestEnvironment::writeStringToFileForTest("test_envoy", body); + const std::string prefix("/"); + const Http::Code status(Http::Code::OK); + + setDownstreamProtocol(Http::CodecClient::Type::HTTP2); + setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); + + config_helper_.addConfigModifier( + [&file_path, &prefix]( + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + auto* route_config = hcm.mutable_route_config(); + // adding direct response mode to the default route + auto* default_route = + hcm.mutable_route_config()->mutable_virtual_hosts(0)->mutable_routes(0); + default_route->mutable_match()->set_prefix(prefix); + default_route->mutable_direct_response()->set_status(static_cast(status)); + default_route->mutable_direct_response()->mutable_body()->set_filename(file_path); + // adding headers to the default route + auto* header_value_option = route_config->mutable_response_headers_to_add()->Add(); + header_value_option->mutable_header()->set_value("direct-response-enabled"); + header_value_option->mutable_header()->set_key("x-direct-response-header"); + }); + HttpIntegrationTest::initialize(); +} + +DEFINE_PROTO_FUZZER(const test::integration::H2CaptureFuzzTestCase& input) { + RELEASE_ASSERT(!TestEnvironment::getIpVersionsForTest().empty(), ""); + const auto ip_version = TestEnvironment::getIpVersionsForTest()[0]; + PERSISTENT_FUZZ_VAR H2FuzzIntegrationTest h2_fuzz_integration_test(ip_version); + h2_fuzz_integration_test.replay(input, true); +} + +} // namespace Envoy diff --git a/test/integration/h2_capture_fuzz.proto b/test/integration/h2_capture_fuzz.proto new file mode 100644 index 0000000000000..cbf5f6702367e --- /dev/null +++ b/test/integration/h2_capture_fuzz.proto @@ -0,0 +1,165 @@ +syntax = "proto3"; + +package test.integration; + +message H2FramePing { + enum Flags { + NONE = 0; + ACK = 1; + } + Flags flags = 1; + bytes data = 2; +} + +message H2FrameSettings { + enum Flags { + NONE = 0; + ACK = 1; + } + Flags flags = 1; +} + +enum H2HeadersFlags { + NONE = 0; + END_STREAM = 1; + END_HEADERS = 4; +} + +message H2FrameHeaders { + repeated H2HeadersFlags flags = 1; + uint32 stream_index = 2; +} + +message H2FrameContinuation { + repeated H2HeadersFlags flags = 1; + uint32 stream_index = 2; +} + +message H2FrameData { + enum Flags { + NONE = 0; + END_STREAM = 1; + } + Flags flags = 1; + uint32 stream_index = 2; +} + +message H2FramePriority { + uint32 stream_index = 1; + uint32 dependent_index = 2; +} + +// These map to the errors defined in: https://tools.ietf.org/html/rfc7540#section-7 +enum H2ErrorCode { + NO_ERROR = 0; + PROTOCOL_ERROR = 1; + INTERNAL_ERROR = 2; + FLOW_CONTROL_ERROR = 3; + SETTINGS_TIMEOUT = 4; + STREAM_CLOSED = 5; + FRAME_SIZE_ERROR = 6; + REFUSED_STREAM = 7; + CANCEL = 8; + COMPRESSION_ERROR = 9; + CONNECT_ERROR = 10; + ENHANCE_YOUR_CLAIM = 11; + INADEQUATE_SECURITY = 12; + HTTP_1_1_REQUIRED = 13; +} + +message H2FramePushPromise { + repeated H2HeadersFlags flags = 1; + uint32 stream_index = 2; + uint32 promised_stream_index = 3; +} + +message H2FrameResetStream { + uint32 stream_index = 1; + H2ErrorCode error_code = 2; +} + +message H2FrameGoAway { + uint32 last_stream_index = 1; + H2ErrorCode error_code = 2; +} + +message H2FrameWindowUpdate { + uint32 stream_index = 1; + uint32 increment = 2; +} + +// A header that contains invalid status +message H2FrameMalformedRequest { + uint32 stream_index = 1; +} + +// A request that is comprised of a header that has HTTP GET request with a given host and path and +// an additional zero length header (making this a malformed request) +message H2FrameMalformedRequestWithZerolenHeader { + uint32 stream_index = 1; + string host = 2; + string path = 3; +} + +// A request that is comprised of a header that has HTTP GET request with a given host and path +message H2FrameRequest { + uint32 stream_index = 1; + string host = 2; + string path = 3; +} + +// A request that is comprised of a header that has HTTP POST request with a given host and path +message H2FramePostRequest { + uint32 stream_index = 1; + string host = 2; + string path = 3; +} + +// A generic frame to emit a malformed frame +message H2FrameGeneric { + bytes frame_bytes = 1; +} + +message H2TestFrame { + // These values map to the frame creation methods in: + // test/common/http/http2/http2_frame.h + oneof frame_type { + H2FramePing ping = 1; + H2FrameSettings settings = 2; + H2FrameHeaders headers = 3; + H2FrameContinuation continuation = 4; + H2FrameData data = 5; + H2FramePriority priority = 6; + H2FramePushPromise push_promise = 7; + H2FrameResetStream reset_stream = 8; + H2FrameGoAway go_away = 9; + H2FrameWindowUpdate window_update = 10; + H2FrameMalformedRequest malformed_request = 11; + H2FrameMalformedRequestWithZerolenHeader malformed_request_with_zerolen_header = 12; + H2FrameRequest request = 13; + H2FramePostRequest post_request = 14; + H2FrameGeneric generic = 15; + } +} + +message DownstreamSendEvent { + repeated H2TestFrame h2_frames = 1; +} + +message UpstreamSendEvent { + repeated H2TestFrame h2_frames = 1; +} + +message Event { + oneof event_selector { + // Downstream sent given frames. + DownstreamSendEvent downstream_send_event = 1; + // Upstream sent given frames. + UpstreamSendEvent upstream_send_event = 2; + } +} + +// Test case in corpus for *_h2_capture_fuzz_test. +message H2CaptureFuzzTestCase { + repeated Event events = 1; +} diff --git a/test/integration/h2_capture_fuzz_test.cc b/test/integration/h2_capture_fuzz_test.cc new file mode 100644 index 0000000000000..f31da4f2345f4 --- /dev/null +++ b/test/integration/h2_capture_fuzz_test.cc @@ -0,0 +1,19 @@ +#include "test/integration/h2_fuzz.h" + +namespace Envoy { +void H2FuzzIntegrationTest::initialize() { + setDownstreamProtocol(Http::CodecClient::Type::HTTP2); + setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); + + HttpIntegrationTest::initialize(); +} + +DEFINE_PROTO_FUZZER(const test::integration::H2CaptureFuzzTestCase& input) { + // Pick an IP version to use for loopback, it doesn't matter which. + FUZZ_ASSERT(!TestEnvironment::getIpVersionsForTest().empty()); + const auto ip_version = TestEnvironment::getIpVersionsForTest()[0]; + PERSISTENT_FUZZ_VAR H2FuzzIntegrationTest h2_fuzz_integration_test(ip_version); + h2_fuzz_integration_test.replay(input, false); +} + +} // namespace Envoy diff --git a/test/integration/h2_corpus/simple_test b/test/integration/h2_corpus/simple_test new file mode 100644 index 0000000000000..8556a3bdd9834 --- /dev/null +++ b/test/integration/h2_corpus/simple_test @@ -0,0 +1,48 @@ +events { + downstream_send_event { + h2_frames { + settings { + flags: NONE + } + } + h2_frames { + settings { + flags: ACK + } + } + h2_frames { + request { + stream_index: 1 + host: "host" + path: "/path/to/long/url" + } + } + } +} +events { + upstream_send_event { + h2_frames { + settings { + flags: NONE + } + } + h2_frames { + settings { + flags: ACK + } + } + h2_frames { + headers { + flags: NONE + flags: END_STREAM + stream_index: 1 + } + } + h2_frames { + data { + flags: NONE + stream_index: 1 + } + } + } +} diff --git a/test/integration/h2_fuzz.cc b/test/integration/h2_fuzz.cc new file mode 100644 index 0000000000000..c0eeae08152e3 --- /dev/null +++ b/test/integration/h2_fuzz.cc @@ -0,0 +1,255 @@ +#include "test/integration/h2_fuzz.h" + +#include + +#include "common/common/assert.h" +#include "common/common/base64.h" +#include "common/common/logger.h" + +#include "test/test_common/environment.h" + +namespace Envoy { + +using namespace Envoy::Http::Http2; + +namespace { + +static Http2Frame::HeadersFlags +unifyHeadersFlags(const Protobuf::RepeatedField& headers_flags) { + int unified_flags = 0; + for (const auto& flag : headers_flags) { + unified_flags |= flag; + } + return static_cast(unified_flags); +} + +} // namespace + +void H2FuzzIntegrationTest::sendFrame(const test::integration::H2TestFrame& proto_frame, + std::function write_func) { + Http2Frame h2_frame; + switch (proto_frame.frame_type_case()) { + case test::integration::H2TestFrame::kPing: + ENVOY_LOG_MISC(trace, "Sending ping frame"); + h2_frame = Http2Frame::makePingFrame(proto_frame.ping().data()); + break; + case test::integration::H2TestFrame::kSettings: { + const Http2Frame::SettingsFlags settings_flags = + static_cast(proto_frame.settings().flags()); + ENVOY_LOG_MISC(trace, "Sending settings frame"); + h2_frame = Http2Frame::makeEmptySettingsFrame(settings_flags); + break; + } + case test::integration::H2TestFrame::kHeaders: { + const Http2Frame::HeadersFlags headers_flags = unifyHeadersFlags(proto_frame.headers().flags()); + const uint32_t stream_idx = proto_frame.headers().stream_index(); + ENVOY_LOG_MISC(trace, "Sending headers frame"); + h2_frame = Http2Frame::makeEmptyHeadersFrame(stream_idx, headers_flags); + break; + } + case test::integration::H2TestFrame::kContinuation: { + const Http2Frame::HeadersFlags headers_flags = + unifyHeadersFlags(proto_frame.continuation().flags()); + const uint32_t stream_idx = proto_frame.continuation().stream_index(); + ENVOY_LOG_MISC(trace, "Sending continuation frame"); + h2_frame = Http2Frame::makeEmptyContinuationFrame(stream_idx, headers_flags); + break; + } + case test::integration::H2TestFrame::kData: { + const Http2Frame::DataFlags data_flags = + static_cast(proto_frame.data().flags()); + const uint32_t stream_idx = proto_frame.data().stream_index(); + ENVOY_LOG_MISC(trace, "Sending data frame"); + h2_frame = Http2Frame::makeEmptyDataFrame(stream_idx, data_flags); + break; + } + case test::integration::H2TestFrame::kPriority: { + const uint32_t stream_idx = proto_frame.priority().stream_index(); + const uint32_t dependent_idx = proto_frame.priority().dependent_index(); + ENVOY_LOG_MISC(trace, "Sending priority frame"); + h2_frame = Http2Frame::makePriorityFrame(stream_idx, dependent_idx); + break; + } + case test::integration::H2TestFrame::kPushPromise: { + const Http2Frame::HeadersFlags headers_flags = + unifyHeadersFlags(proto_frame.push_promise().flags()); + const uint32_t stream_idx = proto_frame.push_promise().stream_index(); + const uint32_t promised_stream_idx = proto_frame.push_promise().promised_stream_index(); + ENVOY_LOG_MISC(trace, "Sending push promise frame"); + h2_frame = + Http2Frame::makeEmptyPushPromiseFrame(stream_idx, promised_stream_idx, headers_flags); + break; + } + case test::integration::H2TestFrame::kResetStream: { + const uint32_t stream_idx = proto_frame.reset_stream().stream_index(); + const Http2Frame::ErrorCode error_code = + static_cast(proto_frame.reset_stream().error_code()); + ENVOY_LOG_MISC(trace, "Sending reset stream frame"); + h2_frame = Http2Frame::makeResetStreamFrame(stream_idx, error_code); + break; + } + case test::integration::H2TestFrame::kGoAway: { + const uint32_t last_stream_idx = proto_frame.go_away().last_stream_index(); + const Http2Frame::ErrorCode error_code = + static_cast(proto_frame.go_away().error_code()); + ENVOY_LOG_MISC(trace, "Sending go-away frame"); + h2_frame = Http2Frame::makeEmptyGoAwayFrame(last_stream_idx, error_code); + break; + } + case test::integration::H2TestFrame::kWindowUpdate: { + const uint32_t stream_idx = proto_frame.window_update().stream_index(); + const uint32_t increment = proto_frame.window_update().increment(); + ENVOY_LOG_MISC(trace, "Sending windows_update frame"); + h2_frame = Http2Frame::makeWindowUpdateFrame(stream_idx, increment); + break; + } + case test::integration::H2TestFrame::kMalformedRequest: { + const uint32_t stream_idx = proto_frame.malformed_request().stream_index(); + ENVOY_LOG_MISC(trace, "Sending malformed_request frame"); + h2_frame = Http2Frame::makeMalformedRequest(stream_idx); + break; + } + case test::integration::H2TestFrame::kMalformedRequestWithZerolenHeader: { + const uint32_t stream_idx = proto_frame.malformed_request_with_zerolen_header().stream_index(); + const absl::string_view host = proto_frame.malformed_request_with_zerolen_header().host(); + const absl::string_view path = proto_frame.malformed_request_with_zerolen_header().path(); + ENVOY_LOG_MISC(trace, "Sending malformed_request_with_zerolen_header"); + h2_frame = Http2Frame::makeMalformedRequestWithZerolenHeader(stream_idx, host, path); + break; + } + case test::integration::H2TestFrame::kRequest: { + const uint32_t stream_idx = proto_frame.request().stream_index(); + const absl::string_view host = proto_frame.request().host(); + const absl::string_view path = proto_frame.request().path(); + ENVOY_LOG_MISC(trace, "Sending request"); + h2_frame = Http2Frame::makeRequest(stream_idx, host, path); + break; + } + case test::integration::H2TestFrame::kPostRequest: { + const uint32_t stream_idx = proto_frame.post_request().stream_index(); + const absl::string_view host = proto_frame.post_request().host(); + const absl::string_view path = proto_frame.post_request().path(); + ENVOY_LOG_MISC(trace, "Sending post request"); + h2_frame = Http2Frame::makePostRequest(stream_idx, host, path); + break; + } + case test::integration::H2TestFrame::kGeneric: { + const absl::string_view frame_bytes = proto_frame.generic().frame_bytes(); + ENVOY_LOG_MISC(trace, "Sending generic frame"); + h2_frame = Http2Frame::makeGenericFrame(frame_bytes); + break; + } + default: + ENVOY_LOG_MISC(debug, "Proto-frame not supported!"); + break; + } + + write_func(h2_frame); +} + +void H2FuzzIntegrationTest::replay(const test::integration::H2CaptureFuzzTestCase& input, + bool ignore_response) { + PERSISTENT_FUZZ_VAR bool initialized = [this]() -> bool { + initialize(); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + return true; + }(); + UNREFERENCED_PARAMETER(initialized); + IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("http")); + FakeRawConnectionPtr fake_upstream_connection; + bool stop_further_inputs = false; + bool preamble_sent = false; + for (int i = 0; i < input.events().size(); ++i) { + if (stop_further_inputs) { + break; + } + const auto& event = input.events(i); + ENVOY_LOG_MISC(debug, "Processing event: {}", event.DebugString()); + // If we're disconnected, we fail out. + if (!tcp_client->connected()) { + ENVOY_LOG_MISC(debug, "Disconnected, no further event processing."); + break; + } + switch (event.event_selector_case()) { + case test::integration::Event::kDownstreamSendEvent: { + auto downstream_write_func = [&](const Http2Frame& h2_frame) -> void { + ASSERT_TRUE(tcp_client->write(std::string(h2_frame), false, false)); + }; + if (!preamble_sent) { + // Start H2 session - send hello string + ASSERT_TRUE(tcp_client->write(Http2Frame::Preamble, false, false)); + preamble_sent = true; + } + for (auto& frame : event.downstream_send_event().h2_frames()) { + if (!tcp_client->connected()) { + ENVOY_LOG_MISC(debug, + "Disconnected, avoiding sending data, no further event processing."); + break; + } + + ENVOY_LOG_MISC(trace, "sending downstream frame"); + sendFrame(frame, downstream_write_func); + } + break; + } + case test::integration::Event::kUpstreamSendEvent: { + if (ignore_response) { + break; + } + if (fake_upstream_connection == nullptr) { + if (!fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection, max_wait_ms_)) { + // If we timed out, we fail out. + if (tcp_client->connected()) { + tcp_client->close(); + } + stop_further_inputs = true; + break; + } + } + // If we're no longer connected, we're done. + if (!fake_upstream_connection->connected()) { + if (tcp_client->connected()) { + tcp_client->close(); + } + stop_further_inputs = true; + break; + } + { + auto upstream_write_func = [&](const Http2Frame& h2_frame) -> void { + AssertionResult result = fake_upstream_connection->write(std::string(h2_frame)); + RELEASE_ASSERT(result, result.message()); + }; + for (auto& frame : event.upstream_send_event().h2_frames()) { + if (!fake_upstream_connection->connected()) { + ENVOY_LOG_MISC( + debug, + "Upstream disconnected, avoiding sending data, no further event processing."); + stop_further_inputs = true; + break; + } + + ENVOY_LOG_MISC(trace, "sending upstream frame"); + sendFrame(frame, upstream_write_func); + } + } + break; + } + default: + // Maybe nothing is set? + break; + } + } + if (fake_upstream_connection != nullptr) { + if (fake_upstream_connection->connected()) { + AssertionResult result = fake_upstream_connection->close(); + RELEASE_ASSERT(result, result.message()); + } + AssertionResult result = fake_upstream_connection->waitForDisconnect(true); + RELEASE_ASSERT(result, result.message()); + } + if (tcp_client->connected()) { + tcp_client->close(); + } +} + +} // namespace Envoy diff --git a/test/integration/h2_fuzz.h b/test/integration/h2_fuzz.h new file mode 100644 index 0000000000000..b73ca0ffbfa23 --- /dev/null +++ b/test/integration/h2_fuzz.h @@ -0,0 +1,27 @@ +#pragma once + +#include "common/common/assert.h" +#include "common/common/logger.h" + +#include "test/common/http/http2/http2_frame.h" +#include "test/fuzz/fuzz_runner.h" +#include "test/fuzz/utility.h" +#include "test/integration/h2_capture_fuzz.pb.h" +#include "test/integration/http_integration.h" + +namespace Envoy { + +class H2FuzzIntegrationTest : public HttpIntegrationTest { +public: + H2FuzzIntegrationTest(Network::Address::IpVersion version) + : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, version) {} + + void initialize() override; + void replay(const test::integration::H2CaptureFuzzTestCase&, bool ignore_response); + const std::chrono::milliseconds max_wait_ms_{10}; + +private: + void sendFrame(const test::integration::H2TestFrame&, + std::function); +}; +} // namespace Envoy diff --git a/test/integration/hds_integration_test.cc b/test/integration/hds_integration_test.cc index dfdc804beaee8..1c950e2499b16 100644 --- a/test/integration/hds_integration_test.cc +++ b/test/integration/hds_integration_test.cc @@ -26,10 +26,10 @@ namespace Envoy { namespace { // TODO(jmarantz): switch this to simulated-time after debugging flakes. -class HdsIntegrationTest : public testing::TestWithParam, +class HdsIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, public HttpIntegrationTest { public: - HdsIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {} + HdsIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ipVersion()) {} void createUpstreams() override { fake_upstreams_.emplace_back( @@ -41,11 +41,12 @@ class HdsIntegrationTest : public testing::TestWithParamset_api_type(envoy::config::core::v3::ApiConfigSource::GRPC); hds_config->add_grpc_services()->mutable_envoy_grpc()->set_cluster_name("hds_cluster"); + hds_config->set_transport_api_version(apiVersion()); auto* hds_cluster = bootstrap.mutable_static_resources()->add_clusters(); hds_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); hds_cluster->mutable_circuit_breakers()->Clear(); @@ -60,8 +61,10 @@ class HdsIntegrationTest : public testing::TestWithParam(0, FakeHttpConnection::Type::HTTP1, version_, timeSystem()); + host_upstream_->set_allow_unexpected_disconnects(true); host2_upstream_ = std::make_unique(0, FakeHttpConnection::Type::HTTP1, version_, timeSystem()); + host2_upstream_->set_allow_unexpected_disconnects(true); } // Sets up a connection between Envoy and the management server. @@ -79,20 +82,18 @@ class HdsIntegrationTest : public testing::TestWithParamwaitForNewStream(*dispatcher_, host_stream_)); ASSERT_TRUE(host_stream_->waitForEndStream(*dispatcher_)); - host_upstream_->set_allow_unexpected_disconnects(true); - EXPECT_EQ(host_stream_->headers().Path()->value().getStringView(), "/healthcheck"); - EXPECT_EQ(host_stream_->headers().Method()->value().getStringView(), "GET"); - EXPECT_EQ(host_stream_->headers().Host()->value().getStringView(), "anna"); + EXPECT_EQ(host_stream_->headers().getPathValue(), "/healthcheck"); + EXPECT_EQ(host_stream_->headers().getMethodValue(), "GET"); + EXPECT_EQ(host_stream_->headers().getHostValue(), "anna"); if (!cluster2.empty()) { ASSERT_TRUE(host2_upstream_->waitForHttpConnection(*dispatcher_, host2_fake_connection_)); ASSERT_TRUE(host2_fake_connection_->waitForNewStream(*dispatcher_, host2_stream_)); ASSERT_TRUE(host2_stream_->waitForEndStream(*dispatcher_)); - host2_upstream_->set_allow_unexpected_disconnects(true); - EXPECT_EQ(host2_stream_->headers().Path()->value().getStringView(), "/healthcheck"); - EXPECT_EQ(host2_stream_->headers().Method()->value().getStringView(), "GET"); - EXPECT_EQ(host2_stream_->headers().Host()->value().getStringView(), cluster2); + EXPECT_EQ(host2_stream_->headers().getPathValue(), "/healthcheck"); + EXPECT_EQ(host2_stream_->headers().getMethodValue(), "GET"); + EXPECT_EQ(host2_stream_->headers().getHostValue(), cluster2); } } @@ -209,6 +210,25 @@ class HdsIntegrationTest : public testing::TestWithParamlocalAddress())) { ASSERT_TRUE(hds_stream_->waitForGrpcMessage(*dispatcher_, response_)); + EXPECT_EQ("POST", hds_stream_->headers().getMethodValue()); + EXPECT_EQ(TestUtility::getVersionedMethodPath("envoy.service.{1}.{0}.HealthDiscoveryService", + "StreamHealthCheck", apiVersion(), + /*use_alpha=*/false, serviceNamespace()), + hds_stream_->headers().getPathValue()); + EXPECT_EQ("application/grpc", hds_stream_->headers().getContentTypeValue()); + } + } + + const std::string serviceNamespace() const { + switch (apiVersion()) { + case envoy::config::core::v3::ApiVersion::AUTO: + FALLTHRU; + case envoy::config::core::v3::ApiVersion::V2: + return "discovery"; + case envoy::config::core::v3::ApiVersion::V3: + return "health"; + default: + NOT_REACHED_GCOVR_EXCL_LINE; } } @@ -232,9 +252,8 @@ class HdsIntegrationTest : public testing::TestWithParamwaitForCounterGe("hds_delegate.requests", ++hds_requests_); // Envoy sends a health check message to an endpoint - healthcheckEndpoints(); + ASSERT_TRUE(host_upstream_->waitForRawConnection(host_fake_raw_connection_)); // Endpoint doesn't respond to the health check + ASSERT_TRUE(host_fake_raw_connection_->waitForDisconnect(true)); // Receive updates until the one we expect arrives waitForEndpointHealthResponse(envoy::config::core::v3::TIMEOUT); @@ -361,19 +381,17 @@ TEST_P(HdsIntegrationTest, SingleEndpointTimeoutTcp) { server_health_check_specifier_.mutable_cluster_health_checks(0) ->mutable_health_checks(0) ->mutable_timeout() - ->set_nanos(500000000); // 0.5 seconds + ->set_nanos(100000000); // 0.1 seconds hds_stream_->startGrpcStream(); hds_stream_->sendGrpcMessage(server_health_check_specifier_); test_server_->waitForCounterGe("hds_delegate.requests", ++hds_requests_); // Envoys asks the endpoint if it's healthy - host_upstream_->set_allow_unexpected_disconnects(true); ASSERT_TRUE(host_upstream_->waitForRawConnection(host_fake_raw_connection_)); - ASSERT_TRUE( - host_fake_raw_connection_->waitForData(FakeRawConnection::waitForInexactMatch("Ping"))); // No response from the endpoint + ASSERT_TRUE(host_fake_raw_connection_->waitForDisconnect(true)); // Receive updates until the one we expect arrives waitForEndpointHealthResponse(envoy::config::core::v3::TIMEOUT); @@ -399,7 +417,6 @@ TEST_P(HdsIntegrationTest, SingleEndpointHealthyTcp) { test_server_->waitForCounterGe("hds_delegate.requests", ++hds_requests_); // Envoy asks the endpoint if it's healthy - host_upstream_->set_allow_unexpected_disconnects(true); ASSERT_TRUE(host_upstream_->waitForRawConnection(host_fake_raw_connection_)); ASSERT_TRUE( host_fake_raw_connection_->waitForData(FakeRawConnection::waitForInexactMatch("Ping"))); @@ -434,7 +451,6 @@ TEST_P(HdsIntegrationTest, SingleEndpointUnhealthyTcp) { test_server_->waitForCounterGe("hds_delegate.requests", ++hds_requests_); // Envoy asks the endpoint if it's healthy - host_upstream_->set_allow_unexpected_disconnects(true); ASSERT_TRUE(host_upstream_->waitForRawConnection(host_fake_raw_connection_)); ASSERT_TRUE( host_fake_raw_connection_->waitForData(FakeRawConnection::waitForInexactMatch("Ping"))); @@ -672,7 +688,6 @@ TEST_P(HdsIntegrationTest, TestUpdateMessage) { test_server_->waitForCounterGe("hds_delegate.requests", ++hds_requests_); // Envoy sends a health check message to an endpoint - host2_upstream_->set_allow_unexpected_disconnects(true); ASSERT_TRUE(host2_upstream_->waitForHttpConnection(*dispatcher_, host2_fake_connection_)); ASSERT_TRUE(host2_fake_connection_->waitForNewStream(*dispatcher_, host2_stream_)); ASSERT_TRUE(host2_stream_->waitForEndStream(*dispatcher_)); diff --git a/test/integration/header_casing_integration_test.cc b/test/integration/header_casing_integration_test.cc index a40e789de191f..7700e48ab3650 100644 --- a/test/integration/header_casing_integration_test.cc +++ b/test/integration/header_casing_integration_test.cc @@ -50,7 +50,7 @@ TEST_P(HeaderCasingIntegrationTest, VerifyCasedHeaders) { IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("http")); auto request = "GET / HTTP/1.1\r\nhost: host\r\nmy-header: foo\r\n\r\n"; - tcp_client->write(request, false); + ASSERT_TRUE(tcp_client->write(request, false)); Envoy::FakeRawConnectionPtr upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(upstream_connection)); diff --git a/test/integration/header_integration_test.cc b/test/integration/header_integration_test.cc index 08ebe7b2a8069..0dd832e21c43d 100644 --- a/test/integration/header_integration_test.cc +++ b/test/integration/header_integration_test.cc @@ -138,7 +138,7 @@ stat_prefix: header_test key: "x-foo" value: "value1" - header: - key: "authorization" + key: "user-agent" value: "token1" routes: - match: { prefix: "/test" } @@ -149,7 +149,7 @@ stat_prefix: header_test key: "x-foo" value: "value2" - header: - key: "authorization" + key: "user-agent" value: "token2" - name: path-sanitization domains: ["path-sanitization.com"] @@ -196,9 +196,6 @@ class HeaderIntegrationTest RELEASE_ASSERT(result, result.message()); eds_connection_.reset(); } - cleanupUpstreamAndDownstream(); - test_server_.reset(); - fake_upstreams_.clear(); } void addHeader(Protobuf::RepeatedPtrField* field, @@ -419,20 +416,21 @@ class HeaderIntegrationTest } protected: - void performRequest(Http::TestHeaderMapImpl&& request_headers, - Http::TestHeaderMapImpl&& expected_request_headers, - Http::TestHeaderMapImpl&& response_headers, - Http::TestHeaderMapImpl&& expected_response_headers) { + void performRequest(Http::TestRequestHeaderMapImpl&& request_headers, + Http::TestRequestHeaderMapImpl&& expected_request_headers, + Http::TestResponseHeaderMapImpl&& response_headers, + Http::TestResponseHeaderMapImpl&& expected_response_headers) { registerTestServerPorts({"http"}); codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); auto response = sendRequestAndWaitForResponse(request_headers, 0, response_headers, 0); - compareHeaders(upstream_request_->headers(), expected_request_headers); - compareHeaders(response->headers(), expected_response_headers); + compareHeaders(Http::TestRequestHeaderMapImpl(upstream_request_->headers()), + expected_request_headers); + compareHeaders(Http::TestResponseHeaderMapImpl(response->headers()), expected_response_headers); } - void compareHeaders(Http::TestHeaderMapImpl&& headers, - Http::TestHeaderMapImpl& expected_headers) { + template + void compareHeaders(Headers&& headers, ExpectedHeaders& expected_headers) { headers.remove(Envoy::Http::LowerCaseString{"content-length"}); headers.remove(Envoy::Http::LowerCaseString{"date"}); if (!routerSuppressEnvoyHeaders()) { @@ -462,26 +460,26 @@ INSTANTIATE_TEST_SUITE_P( TEST_P(HeaderIntegrationTest, TestRequestAndResponseHeaderPassThrough) { initializeFilter(HeaderMode::Append, false); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, {":authority", "no-headers.com"}, {"x-request-foo", "downstram"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "no-headers.com"}, {"x-request-foo", "downstram"}, {":path", "/"}, {":method", "GET"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, {"x-return-foo", "upstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-return-foo", "upstream"}, {":status", "200"}, @@ -493,7 +491,7 @@ TEST_P(HeaderIntegrationTest, TestRequestAndResponseHeaderPassThrough) { TEST_P(HeaderIntegrationTest, TestVirtualHostAppendHeaderManipulation) { initializeFilter(HeaderMode::Append, false); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/vhost-only"}, {":scheme", "http"}, @@ -501,21 +499,21 @@ TEST_P(HeaderIntegrationTest, TestVirtualHostAppendHeaderManipulation) { {"x-vhost-request", "downstream"}, {"x-vhost-request-remove", "downstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "vhost-headers.com"}, {"x-vhost-request", "downstream"}, {"x-vhost-request", "vhost"}, {":path", "/vhost-only"}, {":method", "GET"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, {"x-vhost-response", "upstream"}, {"x-vhost-response-remove", "upstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-vhost-response", "upstream"}, {"x-vhost-response", "vhost"}, @@ -527,7 +525,7 @@ TEST_P(HeaderIntegrationTest, TestVirtualHostAppendHeaderManipulation) { TEST_P(HeaderIntegrationTest, TestVirtualHostReplaceHeaderManipulation) { initializeFilter(HeaderMode::Replace, false); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/vhost-only"}, {":scheme", "http"}, @@ -535,21 +533,21 @@ TEST_P(HeaderIntegrationTest, TestVirtualHostReplaceHeaderManipulation) { {"x-vhost-request", "downstream"}, {"x-unmodified", "downstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "vhost-headers.com"}, {"x-unmodified", "downstream"}, {"x-vhost-request", "vhost"}, {":path", "/vhost-only"}, {":method", "GET"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, {"x-vhost-response", "upstream"}, {"x-unmodified", "upstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-unmodified", "upstream"}, {"x-vhost-response", "vhost"}, @@ -561,7 +559,7 @@ TEST_P(HeaderIntegrationTest, TestVirtualHostReplaceHeaderManipulation) { TEST_P(HeaderIntegrationTest, TestRouteAppendHeaderManipulation) { initializeFilter(HeaderMode::Append, false); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/route-only"}, {":scheme", "http"}, @@ -569,21 +567,21 @@ TEST_P(HeaderIntegrationTest, TestRouteAppendHeaderManipulation) { {"x-route-request", "downstream"}, {"x-route-request-remove", "downstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "route-headers.com"}, {"x-route-request", "downstream"}, {"x-route-request", "route"}, {":path", "/route-only"}, {":method", "GET"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, {"x-route-response", "upstream"}, {"x-route-response-remove", "upstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-route-response", "upstream"}, {"x-route-response", "route"}, @@ -595,7 +593,7 @@ TEST_P(HeaderIntegrationTest, TestRouteAppendHeaderManipulation) { TEST_P(HeaderIntegrationTest, TestRouteReplaceHeaderManipulation) { initializeFilter(HeaderMode::Replace, false); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/route-only"}, {":scheme", "http"}, @@ -604,14 +602,14 @@ TEST_P(HeaderIntegrationTest, TestRouteReplaceHeaderManipulation) { {"x-route-request-remove", "downstream"}, {"x-unmodified", "downstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "route-headers.com"}, {"x-unmodified", "downstream"}, {"x-route-request", "route"}, {":path", "/route-only"}, {":method", "GET"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, @@ -619,7 +617,7 @@ TEST_P(HeaderIntegrationTest, TestRouteReplaceHeaderManipulation) { {"x-route-response-remove", "upstream"}, {"x-unmodified", "upstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-unmodified", "upstream"}, {"x-route-response", "route"}, @@ -631,7 +629,7 @@ TEST_P(HeaderIntegrationTest, TestRouteReplaceHeaderManipulation) { TEST_P(HeaderIntegrationTest, TestVirtualHostAndRouteAppendHeaderManipulation) { initializeFilter(HeaderMode::Append, false); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/vhost-and-route"}, {":scheme", "http"}, @@ -641,7 +639,7 @@ TEST_P(HeaderIntegrationTest, TestVirtualHostAndRouteAppendHeaderManipulation) { {"x-route-request", "downstream"}, {"x-route-request-remove", "downstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "vhost-headers.com"}, {"x-vhost-request", "downstream"}, {"x-route-request", "downstream"}, @@ -650,7 +648,7 @@ TEST_P(HeaderIntegrationTest, TestVirtualHostAndRouteAppendHeaderManipulation) { {":path", "/vhost-and-route"}, {":method", "GET"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, @@ -659,7 +657,7 @@ TEST_P(HeaderIntegrationTest, TestVirtualHostAndRouteAppendHeaderManipulation) { {"x-route-response", "upstream"}, {"x-route-response-remove", "upstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-vhost-response", "upstream"}, {"x-route-response", "upstream"}, @@ -673,7 +671,7 @@ TEST_P(HeaderIntegrationTest, TestVirtualHostAndRouteAppendHeaderManipulation) { TEST_P(HeaderIntegrationTest, TestVirtualHostAndRouteReplaceHeaderManipulation) { initializeFilter(HeaderMode::Replace, false); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/vhost-and-route"}, {":scheme", "http"}, @@ -682,7 +680,7 @@ TEST_P(HeaderIntegrationTest, TestVirtualHostAndRouteReplaceHeaderManipulation) {"x-route-request", "downstream"}, {"x-unmodified", "request"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "vhost-headers.com"}, {"x-unmodified", "request"}, {"x-route-request", "route"}, @@ -690,7 +688,7 @@ TEST_P(HeaderIntegrationTest, TestVirtualHostAndRouteReplaceHeaderManipulation) {":path", "/vhost-and-route"}, {":method", "GET"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, @@ -698,7 +696,7 @@ TEST_P(HeaderIntegrationTest, TestVirtualHostAndRouteReplaceHeaderManipulation) {"x-route-response", "upstream"}, {"x-unmodified", "response"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-unmodified", "response"}, {"x-route-response", "route"}, @@ -712,7 +710,7 @@ TEST_P(HeaderIntegrationTest, TestVirtualHostAndRouteReplaceHeaderManipulation) TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostAndRouteAppendHeaderManipulation) { initializeFilter(HeaderMode::Append, true); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/vhost-and-route"}, {":scheme", "http"}, @@ -724,7 +722,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostAndRouteAppendHeaderMani {"x-route-request", "downstream"}, {"x-route-request-remove", "downstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "vhost-headers.com"}, {"x-routeconfig-request", "downstream"}, {"x-vhost-request", "downstream"}, @@ -735,7 +733,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostAndRouteAppendHeaderMani {":path", "/vhost-and-route"}, {":method", "GET"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, @@ -746,7 +744,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostAndRouteAppendHeaderMani {"x-route-response", "upstream"}, {"x-route-response-remove", "upstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-routeconfig-response", "upstream"}, {"x-vhost-response", "upstream"}, @@ -763,7 +761,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostAndRouteAppendHeaderMani TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostAndRouteReplaceHeaderManipulation) { initializeFilter(HeaderMode::Replace, true); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/vhost-and-route"}, {":scheme", "http"}, @@ -773,7 +771,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostAndRouteReplaceHeaderMan {"x-route-request", "downstream"}, {"x-unmodified", "request"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "vhost-headers.com"}, {"x-unmodified", "request"}, {"x-route-request", "route"}, @@ -782,7 +780,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostAndRouteReplaceHeaderMan {":path", "/vhost-and-route"}, {":method", "GET"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, @@ -791,7 +789,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostAndRouteReplaceHeaderMan {"x-route-response", "upstream"}, {"x-unmodified", "response"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-unmodified", "response"}, {"x-route-response", "route"}, @@ -806,7 +804,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostAndRouteReplaceHeaderMan TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostRouteAndClusterAppendHeaderManipulation) { initializeFilter(HeaderMode::Append, true); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/vhost-route-and-weighted-clusters"}, {":scheme", "http"}, @@ -820,7 +818,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostRouteAndClusterAppendHea {"x-weighted-cluster-request", "downstream"}, {"x-weighted-cluster-request-remove", "downstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "vhost-headers.com"}, {"x-routeconfig-request", "downstream"}, {"x-vhost-request", "downstream"}, @@ -833,7 +831,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostRouteAndClusterAppendHea {":path", "/vhost-route-and-weighted-clusters"}, {":method", "GET"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, @@ -846,7 +844,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostRouteAndClusterAppendHea {"x-weighted-cluster-response", "upstream"}, {"x-weighted-cluster-response-remove", "upstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-routeconfig-response", "upstream"}, {"x-vhost-response", "upstream"}, @@ -865,7 +863,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostRouteAndClusterAppendHea TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostRouteAndClusterReplaceHeaderManipulation) { initializeFilter(HeaderMode::Replace, true); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/vhost-route-and-weighted-clusters"}, {":scheme", "http"}, @@ -876,7 +874,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostRouteAndClusterReplaceHe {"x-weighted-cluster-request", "downstream"}, {"x-unmodified", "request"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "vhost-headers.com"}, {"x-unmodified", "request"}, {"x-weighted-cluster-request", "weighted-cluster-1"}, @@ -886,7 +884,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostRouteAndClusterReplaceHe {":path", "/vhost-route-and-weighted-clusters"}, {":method", "GET"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, @@ -896,7 +894,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostRouteAndClusterReplaceHe {"x-weighted-cluster-response", "upstream"}, {"x-unmodified", "response"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-unmodified", "response"}, {"x-weighted-cluster-response", "weighted-cluster-1"}, @@ -912,7 +910,7 @@ TEST_P(HeaderIntegrationTest, TestDynamicHeaders) { prepareEDS(); initializeFilter(HeaderMode::Replace, true); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/vhost-route-and-weighted-clusters"}, {":scheme", "http"}, @@ -923,7 +921,7 @@ TEST_P(HeaderIntegrationTest, TestDynamicHeaders) { {"x-weighted-cluster-request", "downstream"}, {"x-unmodified", "request"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "vhost-headers.com"}, {"x-unmodified", "request"}, {"x-weighted-cluster-request", "weighted-cluster-1"}, @@ -933,7 +931,7 @@ TEST_P(HeaderIntegrationTest, TestDynamicHeaders) { {":path", "/vhost-route-and-weighted-clusters"}, {":method", "GET"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, @@ -943,7 +941,7 @@ TEST_P(HeaderIntegrationTest, TestDynamicHeaders) { {"x-weighted-cluster-response", "upstream"}, {"x-unmodified", "response"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-unmodified", "response"}, {"x-weighted-cluster-response", "weighted-cluster-1"}, @@ -962,27 +960,27 @@ TEST_P(HeaderIntegrationTest, TestDynamicHeaders) { TEST_P(HeaderIntegrationTest, TestXFFParsing) { initializeFilter(HeaderMode::Replace, false); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/test"}, {":scheme", "http"}, {":authority", "xff-headers.com"}, {"x-forwarded-for", "1.2.3.4, 5.6.7.8 ,9.10.11.12"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "xff-headers.com"}, {"x-forwarded-for", "1.2.3.4, 5.6.7.8 ,9.10.11.12"}, {"x-real-ip", "5.6.7.8"}, {":path", "/test"}, {":method", "GET"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, {"x-unmodified", "response"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-unmodified", "response"}, {":status", "200"}, @@ -994,30 +992,30 @@ TEST_P(HeaderIntegrationTest, TestXFFParsing) { TEST_P(HeaderIntegrationTest, TestAppendSameHeaders) { initializeFilter(HeaderMode::Append, false); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/test"}, {":scheme", "http"}, {":authority", "append-same-headers.com"}, - {"authorization", "token3"}, + {"user-agent", "token3"}, {"x-foo", "value3"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "append-same-headers.com"}, {":path", "/test"}, {":method", "GET"}, - {"authorization", "token3,token2,token1"}, + {"user-agent", "token3,token2,token1"}, {"x-foo", "value3"}, {"x-foo", "value2"}, {"x-foo", "value1"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, {"x-unmodified", "response"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-unmodified", "response"}, {":status", "200"}, @@ -1031,23 +1029,23 @@ TEST_P(HeaderIntegrationTest, TestPathAndRouteWhenNormalizePathOff) { normalize_path_ = false; initializeFilter(HeaderMode::Append, false); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/private/../public"}, {":scheme", "http"}, {":authority", "path-sanitization.com"}, }, - Http::TestHeaderMapImpl{{":authority", "path-sanitization.com"}, - {":path", "/private/../public"}, - {":method", "GET"}, - {"x-site", "private"}}, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{{":authority", "path-sanitization.com"}, + {":path", "/private/../public"}, + {":method", "GET"}, + {"x-site", "private"}}, + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, {"x-unmodified", "response"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-unmodified", "response"}, {":status", "200"}, @@ -1061,23 +1059,23 @@ TEST_P(HeaderIntegrationTest, TestPathAndRouteOnNormalizedPath) { normalize_path_ = true; initializeFilter(HeaderMode::Append, false); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/private/../public"}, {":scheme", "http"}, {":authority", "path-sanitization.com"}, }, - Http::TestHeaderMapImpl{{":authority", "path-sanitization.com"}, - {":path", "/public"}, - {":method", "GET"}, - {"x-site", "public"}}, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{{":authority", "path-sanitization.com"}, + {":path", "/public"}, + {":method", "GET"}, + {"x-site", "public"}}, + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, {"x-unmodified", "response"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-unmodified", "response"}, {":status", "200"}, @@ -1088,7 +1086,7 @@ TEST_P(HeaderIntegrationTest, TestPathAndRouteOnNormalizedPath) { TEST_P(HeaderIntegrationTest, TestTeHeaderPassthrough) { initializeFilter(HeaderMode::Append, false); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, @@ -1097,23 +1095,24 @@ TEST_P(HeaderIntegrationTest, TestTeHeaderPassthrough) { {"connection", "te, close"}, {"te", "trailers"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "no-headers.com"}, {":path", "/"}, {":method", "GET"}, {"x-request-foo", "downstram"}, {"te", "trailers"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, {"x-return-foo", "upstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-return-foo", "upstream"}, {":status", "200"}, + {"connection", "close"}, }); } @@ -1121,7 +1120,7 @@ TEST_P(HeaderIntegrationTest, TestTeHeaderPassthrough) { TEST_P(HeaderIntegrationTest, TestTeHeaderSanitized) { initializeFilter(HeaderMode::Append, false); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, @@ -1133,19 +1132,19 @@ TEST_P(HeaderIntegrationTest, TestTeHeaderSanitized) { {"sam", "bar"}, {"will", "baz"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "no-headers.com"}, {":path", "/"}, {":method", "GET"}, {"x-request-foo", "downstram"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, {"x-return-foo", "upstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-return-foo", "upstream"}, {":status", "200"}, diff --git a/test/integration/header_prefix_integration_test.cc b/test/integration/header_prefix_integration_test.cc index 0400d2f25a3cf..e2e47831a27ce 100644 --- a/test/integration/header_prefix_integration_test.cc +++ b/test/integration/header_prefix_integration_test.cc @@ -12,7 +12,10 @@ namespace Envoy { // bootstrap proto it's too late to set it. // // Instead, set the value early and regression test the bootstrap proto's validation of prefix -// injection. +// injection. We also register a custom header to make sure that registered headers interact well +// with the prefix override. +Http::RegisterCustomInlineHeader + cache_control_handle(Http::CustomHeaders::get().CacheControl); static const char* custom_prefix_ = "x-custom"; diff --git a/test/integration/hotrestart_main.cc b/test/integration/hotrestart_main.cc new file mode 100644 index 0000000000000..75f72cc364366 --- /dev/null +++ b/test/integration/hotrestart_main.cc @@ -0,0 +1,21 @@ +#include "common/stats/utility.h" + +#include "exe/main_common.h" + +// NOLINT(namespace-envoy) + +/** + * Custom main() for hotrestart_test. This should be identical to + * source/exe/main.cc, except for the registration and increment of a new gauge + * specifically for hot_restart.test.sh. + */ +int main(int argc, char** argv) { + return Envoy::MainCommon::main(argc, argv, [](Envoy::Server::Instance& server) { + // Creates a gauge that will be incremented once and then never touched. This is + // for testing parent-gauge accumulation in hot_restart_test.sh. + Envoy::Stats::Utility::gaugeFromElements(server.stats(), + {Envoy::Stats::DynamicName("hotrestart_test_gauge")}, + Envoy::Stats::Gauge::ImportMode::Accumulate) + .inc(); + }); +} diff --git a/test/integration/hotrestart_test.sh b/test/integration/hotrestart_test.sh index 13a5a59731d53..a09aee64e5cbe 100755 --- a/test/integration/hotrestart_test.sh +++ b/test/integration/hotrestart_test.sh @@ -1,5 +1,10 @@ #!/bin/bash +# For this test we use a slightly modiified test binary, based on +# source/exe/enovy-static. If this starts failing to run or build, ensure that +# source/exe/main.cc and ./hotrestart_main.cc have not diverged except for +# adding the new gauge. +export ENVOY_BIN="${TEST_SRCDIR}"/envoy/test/integration/hotrestart_main source "$TEST_SRCDIR/envoy/test/integration/test_utility.sh" # TODO(htuch): In this test script, we are duplicating work done in test_environment.cc via sed. @@ -23,6 +28,7 @@ if [[ -z "${ENVOY_IP_TEST_VERSIONS}" ]] || [[ "${ENVOY_IP_TEST_VERSIONS}" == "al sed -e "s#{{ ip_loopback_address }}#127.0.0.1#" | \ sed -e "s#{{ reuse_port }}#false#" | \ sed -e "s#{{ dns_lookup_family }}#V4_ONLY#" | \ + sed -e "s#{{ null_device_path }}#/dev/null#" | \ cat > "${HOT_RESTART_JSON_V4}" JSON_TEST_ARRAY+=("${HOT_RESTART_JSON_V4}") fi @@ -37,6 +43,7 @@ if [[ -z "${ENVOY_IP_TEST_VERSIONS}" ]] || [[ "${ENVOY_IP_TEST_VERSIONS}" == "al sed -e "s#{{ ip_loopback_address }}#::1#" | \ sed -e "s#{{ reuse_port }}#false#" | \ sed -e "s#{{ dns_lookup_family }}#v6_only#" | \ + sed -e "s#{{ null_device_path }}#/dev/null#" | \ cat > "${HOT_RESTART_JSON_V6}" JSON_TEST_ARRAY+=("${HOT_RESTART_JSON_V6}") fi @@ -48,6 +55,7 @@ SOCKET_DIR="$(mktemp -d /tmp/envoy_test_hotrestart.XXXXXX)" cat "${TEST_SRCDIR}/envoy"/test/config/integration/server_unix_listener.yaml | sed -e "s#{{ socket_dir }}#${SOCKET_DIR}#" | \ sed -e "s#{{ ip_loopback_address }}#127.0.0.1#" | \ + sed -e "s#{{ null_device_path }}#/dev/null#" | \ cat > "${HOT_RESTART_JSON_UDS}" JSON_TEST_ARRAY+=("${HOT_RESTART_JSON_UDS}") @@ -61,37 +69,48 @@ cat "${TEST_SRCDIR}/envoy"/test/config/integration/server.yaml | sed -e "s#{{ ip_loopback_address }}#127.0.0.1#" | \ sed -e "s#{{ reuse_port }}#true#" | \ sed -e "s#{{ dns_lookup_family }}#V4_ONLY#" | \ + sed -e "s#{{ null_device_path }}#/dev/null#" | \ cat > "${HOT_RESTART_JSON_REUSE_PORT}" JSON_TEST_ARRAY+=("${HOT_RESTART_JSON_REUSE_PORT}") -# Enable this test to work with --runs_per_test -if [[ -z "${TEST_RANDOM_SEED}" ]]; then - BASE_ID=1 -else - BASE_ID="${TEST_RANDOM_SEED}" -fi +# Shared memory size varies by architecture +SHARED_MEMORY_SIZE="104" +[[ "$(uname -m)" == "aarch64" ]] && SHARED_MEMORY_SIZE="120" -echo "Hot restart test using --base-id ${BASE_ID}" +echo "Hot restart test using dynamic base id" TEST_INDEX=0 function run_testsuite() { local HOT_RESTART_JSON="$1" local FAKE_SYMBOL_TABLE="$2" - # TODO(jun03): instead of setting the base-id, the validate server should use the nop hot restart start_test validation check "${ENVOY_BIN}" -c "${HOT_RESTART_JSON}" --mode validate --service-cluster cluster \ - --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" --service-node node --base-id "${BASE_ID}" + --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" --service-node node --disable-hot-restart + + local BASE_ID_PATH=$(mktemp 'envoy_test_base_id.XXXXXX') + echo "Selected dynamic base id path ${BASE_ID_PATH}" - # Now start the real server, hot restart it twice, and shut it all down as a basic hot restart - # sanity test. + # Now start the real server, hot restart it twice, and shut it all down as a + # basic hot restart sanity test. We expect SERVER_0 to exit quickly when + # SERVER_2 starts, and are not relying on timeouts. start_test Starting epoch 0 ADMIN_ADDRESS_PATH_0="${TEST_TMPDIR}"/admin.0."${TEST_INDEX}".address run_in_background_saving_pid "${ENVOY_BIN}" -c "${HOT_RESTART_JSON}" \ - --restart-epoch 0 --base-id "${BASE_ID}" --service-cluster cluster --service-node node \ - --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" --admin-address-path "${ADMIN_ADDRESS_PATH_0}" + --restart-epoch 0 --use-dynamic-base-id --base-id-path "${BASE_ID_PATH}" \ + --service-cluster cluster --service-node node --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" \ + --admin-address-path "${ADMIN_ADDRESS_PATH_0}" + + local BASE_ID=$(cat "${BASE_ID_PATH}") + while [ -z "${BASE_ID}" ]; do + echo "Waiting for base id" + sleep 0.5 + BASE_ID=$(cat "${BASE_ID_PATH}") + done - FIRST_SERVER_PID=$BACKGROUND_PID + echo "Selected dynamic base id ${BASE_ID}" + + SERVER_0_PID=$BACKGROUND_PID start_test Updating original config listener addresses sleep 3 @@ -103,8 +122,8 @@ function run_testsuite() { # Send SIGUSR1 signal to the first server, this should not kill it. Also send SIGHUP which should # get eaten. echo "Sending SIGUSR1/SIGHUP to first server" - kill -SIGUSR1 ${FIRST_SERVER_PID} - kill -SIGHUP ${FIRST_SERVER_PID} + kill -SIGUSR1 ${SERVER_0_PID} + kill -SIGHUP ${SERVER_0_PID} sleep 3 disableHeapCheck @@ -113,7 +132,7 @@ function run_testsuite() { # string, compare it against a hard-coded string. start_test Checking for consistency of /hot_restart_version CLI_HOT_RESTART_VERSION=$("${ENVOY_BIN}" --hot-restart-version --base-id "${BASE_ID}" 2>&1) - EXPECTED_CLI_HOT_RESTART_VERSION="11.104" + EXPECTED_CLI_HOT_RESTART_VERSION="11.${SHARED_MEMORY_SIZE}" echo "The Envoy's hot restart version is ${CLI_HOT_RESTART_VERSION}" echo "Now checking that the above version is what we expected." check [ "${CLI_HOT_RESTART_VERSION}" = "${EXPECTED_CLI_HOT_RESTART_VERSION}" ] @@ -121,7 +140,7 @@ function run_testsuite() { start_test Checking for consistency of /hot_restart_version with --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" CLI_HOT_RESTART_VERSION=$("${ENVOY_BIN}" --hot-restart-version --base-id "${BASE_ID}" \ --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" 2>&1) - EXPECTED_CLI_HOT_RESTART_VERSION="11.104" + EXPECTED_CLI_HOT_RESTART_VERSION="11.${SHARED_MEMORY_SIZE}" check [ "${CLI_HOT_RESTART_VERSION}" = "${EXPECTED_CLI_HOT_RESTART_VERSION}" ] start_test Checking for match of --hot-restart-version and admin /hot_restart_version @@ -134,29 +153,40 @@ function run_testsuite() { check [ "${ADMIN_HOT_RESTART_VERSION}" = "${CLI_HOT_RESTART_VERSION}" ] start_test Checking server.hot_restart_generation 1 - GENERATION_0=$(curl -sg http://${ADMIN_ADDRESS_0}/stats | grep server.hot_restart_generation) - check [ "$GENERATION_0" = "server.hot_restart_generation: 1" ]; + GENERATION_0=$(scrape_stat "${ADMIN_ADDRESS_0}" "server.hot_restart_generation") + check [ "$GENERATION_0" = "1" ]; # Verify we can see server.live in the admin port. - SERVER_LIVE_0=$(curl -sg http://${ADMIN_ADDRESS_0}/stats | grep server.live) - check [ "$SERVER_LIVE_0" = "server.live: 1" ]; + SERVER_LIVE_0=$(scrape_stat "${ADMIN_ADDRESS_0}" "server.live") + check [ "$SERVER_LIVE_0" = "1" ]; + + # Capture the value of test_gauge from the initial parent: it should be 1. + TEST_GAUGE_0=$(scrape_stat "${ADMIN_ADDRESS_0}" "hotrestart_test_gauge") + check [ "$TEST_GAUGE_0" = "1" ]; enableHeapCheck - start_test Starting epoch 1 ADMIN_ADDRESS_PATH_1="${TEST_TMPDIR}"/admin.1."${TEST_INDEX}".address run_in_background_saving_pid "${ENVOY_BIN}" -c "${UPDATED_HOT_RESTART_JSON}" \ --restart-epoch 1 --base-id "${BASE_ID}" --service-cluster cluster --service-node node \ --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" --admin-address-path "${ADMIN_ADDRESS_PATH_1}" - SECOND_SERVER_PID=$BACKGROUND_PID + SERVER_1_PID=$BACKGROUND_PID # Wait for stat flushing sleep 7 ADMIN_ADDRESS_1=$(cat "${ADMIN_ADDRESS_PATH_1}") - SERVER_LIVE_1=$(curl -sg http://${ADMIN_ADDRESS_1}/stats | grep server.live) - check [ "$SERVER_LIVE_1" = "server.live: 1" ]; + SERVER_LIVE_1=$(scrape_stat "${ADMIN_ADDRESS_1}" "server.live") + check [ "$SERVER_LIVE_1" = "1" ]; + + # Check to see that the SERVER_1 accumulates the test_gauge value from + # SERVER_0, This will be erased once SERVER_0 terminates. + if [ "$TEST_GAUGE_0" != 0 ]; then + start_test Checking that the hotrestart_test_gauge incorporates SERVER_0 and SERVER_1. + TEST_GAUGE_1=$(scrape_stat "${ADMIN_ADDRESS_1}" "hotrestart_test_gauge") + check [ $TEST_GAUGE_1 = "2" ] + fi start_test Checking that listener addresses have not changed HOT_RESTART_JSON_1="${TEST_TMPDIR}"/hot_restart.1."${TEST_INDEX}".yaml @@ -165,18 +195,58 @@ function run_testsuite() { CONFIG_DIFF=$(diff "${UPDATED_HOT_RESTART_JSON}" "${HOT_RESTART_JSON_1}") [[ -z "${CONFIG_DIFF}" ]] + # Send SIGUSR1 signal to the second server, this should not kill it, and + # we prove that by checking its stats after having sent it a signal. + start_test Sending SIGUSR1 to SERVER_1. + kill -SIGUSR1 ${SERVER_1_PID} + sleep 3 + start_test Checking server.hot_restart_generation 2 - GENERATION_1=$(curl -sg http://${ADMIN_ADDRESS_1}/stats | grep server.hot_restart_generation) - check [ "$GENERATION_1" = "server.hot_restart_generation: 2" ]; + GENERATION_1=$(scrape_stat "${ADMIN_ADDRESS_1}" "server.hot_restart_generation") + check [ "$GENERATION_1" = "2" ]; ADMIN_ADDRESS_PATH_2="${TEST_TMPDIR}"/admin.2."${TEST_INDEX}".address start_test Starting epoch 2 run_in_background_saving_pid "${ENVOY_BIN}" -c "${UPDATED_HOT_RESTART_JSON}" \ --restart-epoch 2 --base-id "${BASE_ID}" --service-cluster cluster --service-node node \ - --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" --admin-address-path "${ADMIN_ADDRESS_PATH_2}" + --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" --admin-address-path "${ADMIN_ADDRESS_PATH_2}" \ + --parent-shutdown-time-s 3 - THIRD_SERVER_PID=$BACKGROUND_PID - sleep 3 + SERVER_2_PID=$BACKGROUND_PID + + # Now wait for the SERVER_0 to exit. It should occur immediately when SERVER_2 starts, as + # SERVER_1 will terminate SERVER_0 when it becomes the parent. + start_test Waiting for epoch 0 to finish. + echo time wait ${SERVER_0_PID} + time wait ${SERVER_0_PID} + [[ $? == 0 ]] + + # Then wait for the SERVER_1 to exit, which should happen within a few seconds + # due to '--parent-shutdown-time-s 3' on SERVER_2. + start_test Waiting for epoch 1 to finish. + echo time wait ${SERVER_1_PID} + time wait ${SERVER_1_PID} + [[ $? == 0 ]] + + # This tests that we are retaining the generation count. For most Gauges, + # we erase the parent contribution when the parent exits, but + # server.hot_restart_generation is excluded. Commenting out the call to + # stat_merger_->retainParentGaugeValue(hot_restart_generation_stat_name_) + # in source/server/hot_restarting_child.cc results in this test failing, + # with the generation being decremented back to 1. + start_test Checking server.hot_restart_generation 2 + ADMIN_ADDRESS_2=$(cat "${ADMIN_ADDRESS_PATH_2}") + GENERATION_2=$(scrape_stat "${ADMIN_ADDRESS_2}" "server.hot_restart_generation") + check [ "$GENERATION_2" = "3" ]; + + # Check to see that the SERVER_2's test_gauge value reverts bac to 1, since + # its parents have now exited and we have erased their gauge contributions. + start_test Check that the hotrestart_test_gauge reported in SERVER_2 excludes parent contribution + wait_status=$(wait_for_stat "$ADMIN_ADDRESS_2" "hotrestart_test_gauge" -eq 1 5) + echo $wait_status + if [[ "$wait_status" != success* ]]; then + handle_failure timeout + fi start_test Checking that listener addresses have not changed HOT_RESTART_JSON_2="${TEST_TMPDIR}"/hot_restart.2."${TEST_INDEX}".yaml @@ -185,24 +255,10 @@ function run_testsuite() { CONFIG_DIFF=$(diff "${UPDATED_HOT_RESTART_JSON}" "${HOT_RESTART_JSON_2}") [[ -z "${CONFIG_DIFF}" ]] - # First server should already be gone. - start_test Waiting for epoch 0 - wait ${FIRST_SERVER_PID} - [[ $? == 0 ]] - - #Send SIGUSR1 signal to the second server, this should not kill it - start_test Sending SIGUSR1 to the second server - kill -SIGUSR1 ${SECOND_SERVER_PID} - sleep 3 - # Now term the last server, and the other one should exit also. start_test Killing and waiting for epoch 2 - kill ${THIRD_SERVER_PID} - wait ${THIRD_SERVER_PID} - [[ $? == 0 ]] - - start_test Waiting for epoch 1 - wait ${SECOND_SERVER_PID} + kill ${SERVER_2_PID} + wait ${SERVER_2_PID} [[ $? == 0 ]] } diff --git a/test/integration/http2_integration_test.cc b/test/integration/http2_integration_test.cc index 2ca27de3d0c38..2cc24c148bc76 100644 --- a/test/integration/http2_integration_test.cc +++ b/test/integration/http2_integration_test.cc @@ -8,7 +8,9 @@ #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "common/buffer/buffer_impl.h" +#include "common/common/random_generator.h" #include "common/http/header_map_impl.h" +#include "common/network/socket_option_impl.h" #include "test/integration/utility.h" #include "test/mocks/http/mocks.h" @@ -99,6 +101,28 @@ TEST_P(Http2IntegrationTest, RetryAttemptCount) { testRetryAttemptCountHeader(); TEST_P(Http2IntegrationTest, LargeRequestTrailersRejected) { testLargeRequestTrailers(66, 60); } +// Verify downstream codec stream flush timeout. +TEST_P(Http2IntegrationTest, CodecStreamIdleTimeout) { + config_helper_.setBufferLimits(1024, 1024); + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + hcm.mutable_stream_idle_timeout()->set_seconds(0); + constexpr uint64_t IdleTimeoutMs = 400; + hcm.mutable_stream_idle_timeout()->set_nanos(IdleTimeoutMs * 1000 * 1000); + }); + initialize(); + envoy::config::core::v3::Http2ProtocolOptions http2_options; + http2_options.mutable_initial_stream_window_size()->set_value(65535); + codec_client_ = makeRawHttpConnection(makeClientConnection(lookupPort("http")), http2_options); + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(default_response_headers_, false); + upstream_request_->encodeData(70000, true); + test_server_->waitForCounterEq("http2.tx_flush_timeout", 1); + response->waitForReset(); +} + static std::string response_metadata_filter = R"EOF( name: response-metadata-filter typed_config: @@ -128,7 +152,7 @@ TEST_P(Http2MetadataIntegrationTest, ProxyMetadataInResponse) { // Verifies metadata is received by the client. response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ(response->metadata_map().find(key)->second, value); + EXPECT_EQ(response->metadataMap().find(key)->second, value); // Sends the second request. response = codec_client_->makeRequestWithBody(default_request_headers_, 10); @@ -147,7 +171,7 @@ TEST_P(Http2MetadataIntegrationTest, ProxyMetadataInResponse) { // Verifies metadata is received by the client. response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ(response->metadata_map().find(key)->second, value); + EXPECT_EQ(response->metadataMap().find(key)->second, value); // Sends the third request. response = codec_client_->makeRequestWithBody(default_request_headers_, 10); @@ -166,7 +190,7 @@ TEST_P(Http2MetadataIntegrationTest, ProxyMetadataInResponse) { // Verifies metadata is received by the client. response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ(response->metadata_map().find(key)->second, value); + EXPECT_EQ(response->metadataMap().find(key)->second, value); // Sends the fourth request. response = codec_client_->makeRequestWithBody(default_request_headers_, 10); @@ -186,7 +210,7 @@ TEST_P(Http2MetadataIntegrationTest, ProxyMetadataInResponse) { // Verifies metadata is received by the client. response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ(response->metadata_map().find(key)->second, value); + EXPECT_EQ(response->metadataMap().find(key)->second, value); // Sends the fifth request. response = codec_client_->makeRequestWithBody(default_request_headers_, 10); @@ -206,7 +230,7 @@ TEST_P(Http2MetadataIntegrationTest, ProxyMetadataInResponse) { // Verifies metadata is received by the client. response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ(response->metadata_map().find(key)->second, value); + EXPECT_EQ(response->metadataMap().find(key)->second, value); // Sends the sixth request. response = codec_client_->makeRequestWithBody(default_request_headers_, 10); @@ -239,7 +263,7 @@ TEST_P(Http2MetadataIntegrationTest, ProxyMultipleMetadata) { const int size = 4; std::vector multiple_vecs(size); for (int i = 0; i < size; i++) { - Runtime::RandomGeneratorImpl random; + Random::RandomGeneratorImpl random; int value_size = random.random() % Http::METADATA_MAX_PAYLOAD_SIZE + 1; Http::MetadataMap metadata_map = {{std::string(i, 'a'), std::string(value_size, 'b')}}; Http::MetadataMapPtr metadata_map_ptr = std::make_unique(metadata_map); @@ -259,10 +283,10 @@ TEST_P(Http2MetadataIntegrationTest, ProxyMultipleMetadata) { ASSERT_TRUE(response->complete()); for (int i = 0; i < size; i++) { for (const auto& metadata : *multiple_vecs[i][0]) { - EXPECT_EQ(response->metadata_map().find(metadata.first)->second, metadata.second); + EXPECT_EQ(response->metadataMap().find(metadata.first)->second, metadata.second); } } - EXPECT_EQ(response->metadata_map().size(), multiple_vecs.size()); + EXPECT_EQ(response->metadataMap().size(), multiple_vecs.size()); } TEST_P(Http2MetadataIntegrationTest, ProxyInvalidMetadata) { @@ -290,7 +314,7 @@ TEST_P(Http2MetadataIntegrationTest, ProxyInvalidMetadata) { // Verifies metadata is not received by the client. response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ(response->metadata_map().size(), 0); + EXPECT_EQ(response->metadataMap().size(), 0); } void verifyExpectedMetadata(Http::MetadataMap metadata_map, std::set keys) { @@ -318,7 +342,7 @@ TEST_P(Http2MetadataIntegrationTest, TestResponseMetadata) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); std::set expected_metadata_keys = {"headers", "duplicate"}; - verifyExpectedMetadata(response->metadata_map(), expected_metadata_keys); + verifyExpectedMetadata(response->metadataMap(), expected_metadata_keys); // Upstream responds with headers and data. response = codec_client_->makeRequestWithBody(default_request_headers_, 10); @@ -329,7 +353,7 @@ TEST_P(Http2MetadataIntegrationTest, TestResponseMetadata) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); expected_metadata_keys.insert("data"); - verifyExpectedMetadata(response->metadata_map(), expected_metadata_keys); + verifyExpectedMetadata(response->metadataMap(), expected_metadata_keys); EXPECT_EQ(response->keyCount("duplicate"), 2); // Upstream responds with headers, data and trailers. @@ -343,7 +367,7 @@ TEST_P(Http2MetadataIntegrationTest, TestResponseMetadata) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); expected_metadata_keys.insert("trailers"); - verifyExpectedMetadata(response->metadata_map(), expected_metadata_keys); + verifyExpectedMetadata(response->metadataMap(), expected_metadata_keys); EXPECT_EQ(response->keyCount("duplicate"), 3); // Upstream responds with headers, 100-continue and data. @@ -365,7 +389,7 @@ TEST_P(Http2MetadataIntegrationTest, TestResponseMetadata) { ASSERT_TRUE(response->complete()); expected_metadata_keys.erase("trailers"); expected_metadata_keys.insert("100-continue"); - verifyExpectedMetadata(response->metadata_map(), expected_metadata_keys); + verifyExpectedMetadata(response->metadataMap(), expected_metadata_keys); EXPECT_EQ(response->keyCount("duplicate"), 4); // Upstream responds with headers and metadata that will not be consumed. @@ -384,7 +408,7 @@ TEST_P(Http2MetadataIntegrationTest, TestResponseMetadata) { expected_metadata_keys.erase("100-continue"); expected_metadata_keys.insert("aaa"); expected_metadata_keys.insert("keep"); - verifyExpectedMetadata(response->metadata_map(), expected_metadata_keys); + verifyExpectedMetadata(response->metadataMap(), expected_metadata_keys); // Upstream responds with headers, data and metadata that will be consumed. response = codec_client_->makeRequestWithBody(default_request_headers_, 10); @@ -402,7 +426,7 @@ TEST_P(Http2MetadataIntegrationTest, TestResponseMetadata) { expected_metadata_keys.erase("aaa"); expected_metadata_keys.insert("data"); expected_metadata_keys.insert("replace"); - verifyExpectedMetadata(response->metadata_map(), expected_metadata_keys); + verifyExpectedMetadata(response->metadataMap(), expected_metadata_keys); EXPECT_EQ(response->keyCount("duplicate"), 2); } @@ -452,9 +476,9 @@ TEST_P(Http2MetadataIntegrationTest, ProxySmallMetadataInRequest) { // Verifies metadata is received by upstream. upstream_request_->encodeHeaders(default_response_headers_, true); - EXPECT_EQ(upstream_request_->metadata_map().find("key")->second, "value"); - EXPECT_EQ(upstream_request_->metadata_map().size(), 1); - EXPECT_EQ(upstream_request_->duplicated_metadata_key_count().find("key")->second, 3); + EXPECT_EQ(upstream_request_->metadataMap().find("key")->second, "value"); + EXPECT_EQ(upstream_request_->metadataMap().size(), 1); + EXPECT_EQ(upstream_request_->duplicatedMetadataKeyCount().find("key")->second, 3); response->waitForEndStream(); ASSERT_TRUE(response->complete()); @@ -482,9 +506,9 @@ TEST_P(Http2MetadataIntegrationTest, ProxyLargeMetadataInRequest) { // Verifies metadata is received upstream. upstream_request_->encodeHeaders(default_response_headers_, true); - EXPECT_EQ(upstream_request_->metadata_map().find("key")->second, value); - EXPECT_EQ(upstream_request_->metadata_map().size(), 1); - EXPECT_EQ(upstream_request_->duplicated_metadata_key_count().find("key")->second, 3); + EXPECT_EQ(upstream_request_->metadataMap().find("key")->second, value); + EXPECT_EQ(upstream_request_->metadataMap().size(), 1); + EXPECT_EQ(upstream_request_->duplicatedMetadataKeyCount().find("key")->second, 3); response->waitForEndStream(); ASSERT_TRUE(response->complete()); @@ -512,7 +536,7 @@ TEST_P(Http2MetadataIntegrationTest, RequestMetadataReachSizeLimit) { } // Verifies client connection will be closed. - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); ASSERT_FALSE(response->complete()); } @@ -541,7 +565,7 @@ TEST_P(Http2MetadataIntegrationTest, ConsumeAndInsertRequestMetadata) { // Verifies a headers metadata added. std::set expected_metadata_keys = {"headers"}; expected_metadata_keys.insert("metadata"); - verifyExpectedMetadata(upstream_request_->metadata_map(), expected_metadata_keys); + verifyExpectedMetadata(upstream_request_->metadataMap(), expected_metadata_keys); // Sends a headers only request with metadata. An empty data frame carries end_stream. auto encoder_decoder = codec_client_->startRequest(default_request_headers_); @@ -558,8 +582,8 @@ TEST_P(Http2MetadataIntegrationTest, ConsumeAndInsertRequestMetadata) { expected_metadata_keys.insert("data"); expected_metadata_keys.insert("metadata"); expected_metadata_keys.insert("replace"); - verifyExpectedMetadata(upstream_request_->metadata_map(), expected_metadata_keys); - EXPECT_EQ(upstream_request_->duplicated_metadata_key_count().find("metadata")->second, 3); + verifyExpectedMetadata(upstream_request_->metadataMap(), expected_metadata_keys); + EXPECT_EQ(upstream_request_->duplicatedMetadataKeyCount().find("metadata")->second, 3); // Verifies zero length data received, and end_stream is true. EXPECT_EQ(true, upstream_request_->receivedData()); EXPECT_EQ(0, upstream_request_->bodyLength()); @@ -580,8 +604,8 @@ TEST_P(Http2MetadataIntegrationTest, ConsumeAndInsertRequestMetadata) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); expected_metadata_keys.insert("trailers"); - verifyExpectedMetadata(upstream_request_->metadata_map(), expected_metadata_keys); - EXPECT_EQ(upstream_request_->duplicated_metadata_key_count().find("metadata")->second, 4); + verifyExpectedMetadata(upstream_request_->metadataMap(), expected_metadata_keys); + EXPECT_EQ(upstream_request_->duplicatedMetadataKeyCount().find("metadata")->second, 4); // Sends headers, large data, metadata. Large data triggers decodeData() multiple times, and each // time, a "data" metadata is added. @@ -598,9 +622,9 @@ TEST_P(Http2MetadataIntegrationTest, ConsumeAndInsertRequestMetadata) { ASSERT_TRUE(response->complete()); expected_metadata_keys.erase("trailers"); - verifyExpectedMetadata(upstream_request_->metadata_map(), expected_metadata_keys); - EXPECT_GE(upstream_request_->duplicated_metadata_key_count().find("data")->second, 2); - EXPECT_GE(upstream_request_->duplicated_metadata_key_count().find("metadata")->second, 3); + verifyExpectedMetadata(upstream_request_->metadataMap(), expected_metadata_keys); + EXPECT_GE(upstream_request_->duplicatedMetadataKeyCount().find("data")->second, 2); + EXPECT_GE(upstream_request_->duplicatedMetadataKeyCount().find("metadata")->second, 3); // Sends multiple metadata. auto encoder_decoder_4 = codec_client_->startRequest(default_request_headers_); @@ -622,8 +646,8 @@ TEST_P(Http2MetadataIntegrationTest, ConsumeAndInsertRequestMetadata) { expected_metadata_keys.insert("metadata1"); expected_metadata_keys.insert("metadata2"); expected_metadata_keys.insert("trailers"); - verifyExpectedMetadata(upstream_request_->metadata_map(), expected_metadata_keys); - EXPECT_EQ(upstream_request_->duplicated_metadata_key_count().find("metadata")->second, 6); + verifyExpectedMetadata(upstream_request_->metadataMap(), expected_metadata_keys); + EXPECT_EQ(upstream_request_->duplicatedMetadataKeyCount().find("metadata")->second, 6); } static std::string decode_headers_only = R"EOF( @@ -667,7 +691,7 @@ void Http2MetadataIntegrationTest::verifyHeadersOnlyTest() { // Verifies a headers metadata added. std::set expected_metadata_keys = {"headers"}; expected_metadata_keys.insert("metadata"); - verifyExpectedMetadata(upstream_request_->metadata_map(), expected_metadata_keys); + verifyExpectedMetadata(upstream_request_->metadataMap(), expected_metadata_keys); // Verifies zero length data received, and end_stream is true. EXPECT_EQ(true, upstream_request_->receivedData()); @@ -730,8 +754,8 @@ void Http2MetadataIntegrationTest::testRequestMetadataWithStopAllFilter() { ASSERT_TRUE(response->complete()); std::set expected_metadata_keys = {"headers", "data", "metadata", "metadata1", "metadata2", "replace", "trailers"}; - verifyExpectedMetadata(upstream_request_->metadata_map(), expected_metadata_keys); - EXPECT_EQ(upstream_request_->duplicated_metadata_key_count().find("metadata")->second, 6); + verifyExpectedMetadata(upstream_request_->metadataMap(), expected_metadata_keys); + EXPECT_EQ(upstream_request_->duplicatedMetadataKeyCount().find("metadata")->second, 6); } static std::string metadata_stop_all_filter = R"EOF( @@ -781,10 +805,10 @@ name: encode-headers-return-stop-all-filter response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ(response->metadata_map().find("headers")->second, "headers"); - EXPECT_EQ(response->metadata_map().find("data")->second, "data"); - EXPECT_EQ(response->metadata_map().find("trailers")->second, "trailers"); - EXPECT_EQ(response->metadata_map().size(), 3); + EXPECT_EQ(response->metadataMap().find("headers")->second, "headers"); + EXPECT_EQ(response->metadataMap().find("data")->second, "data"); + EXPECT_EQ(response->metadataMap().find("trailers")->second, "trailers"); + EXPECT_EQ(response->metadataMap().size(), 3); EXPECT_EQ(count * size + added_decoded_data_size * 2, response->body().size()); } @@ -796,10 +820,9 @@ TEST_P(Http2IntegrationTest, GrpcRouterNotFound) { lookupPort("http"), "POST", "/service/notfound", "", downstream_protocol_, version_, "host", Http::Headers::get().ContentTypeValues.Grpc); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); - EXPECT_EQ(Http::Headers::get().ContentTypeValues.Grpc, - response->headers().ContentType()->value().getStringView()); - EXPECT_EQ("12", response->headers().GrpcStatus()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); + EXPECT_EQ(Http::Headers::get().ContentTypeValues.Grpc, response->headers().getContentTypeValue()); + EXPECT_EQ("12", response->headers().getGrpcStatusValue()); } TEST_P(Http2IntegrationTest, GrpcRetry) { testGrpcRetry(); } @@ -807,6 +830,7 @@ TEST_P(Http2IntegrationTest, GrpcRetry) { testGrpcRetry(); } // Verify the case where there is an HTTP/2 codec/protocol error with an active stream. TEST_P(Http2IntegrationTest, CodecErrorAfterStreamStart) { initialize(); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); codec_client_ = makeHttpConnection(lookupPort("http")); // Sends a request. @@ -823,31 +847,25 @@ TEST_P(Http2IntegrationTest, CodecErrorAfterStreamStart) { TEST_P(Http2IntegrationTest, BadMagic) { initialize(); - Buffer::OwnedImpl buffer("hello"); std::string response; - RawConnectionDriver connection( - lookupPort("http"), buffer, - [&](Network::ClientConnection&, const Buffer::Instance& data) -> void { + auto connection = createConnectionDriver( + lookupPort("http"), "hello", + [&response](Network::ClientConnection&, const Buffer::Instance& data) -> void { response.append(data.toString()); - }, - version_); - - connection.run(); + }); + connection->run(); EXPECT_EQ("", response); } TEST_P(Http2IntegrationTest, BadFrame) { initialize(); - Buffer::OwnedImpl buffer("PRI * HTTP/2.0\r\n\r\nSM\r\n\r\nhelloworldcauseanerror"); std::string response; - RawConnectionDriver connection( - lookupPort("http"), buffer, - [&](Network::ClientConnection&, const Buffer::Instance& data) -> void { + auto connection = createConnectionDriver( + lookupPort("http"), "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\nhelloworldcauseanerror", + [&response](Network::ClientConnection&, const Buffer::Instance& data) -> void { response.append(data.toString()); - }, - version_); - - connection.run(); + }); + connection->run(); EXPECT_TRUE(response.find("SETTINGS expected") != std::string::npos); } @@ -868,7 +886,7 @@ TEST_P(Http2IntegrationTest, GoAway) { codec_client_->close(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(Http2IntegrationTest, Trailers) { testTrailers(1024, 2048, false, false); } @@ -904,9 +922,9 @@ TEST_P(Http2IntegrationTest, GrpcRequestTimeout) { {"content-type", "application/grpc"}}); response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_NE(response->headers().GrpcStatus(), nullptr); - EXPECT_EQ("14", response->headers().GrpcStatus()->value().getStringView()); // Service Unavailable + EXPECT_EQ("14", response->headers().getGrpcStatusValue()); // Service Unavailable EXPECT_LT(0, test_server_->counter("cluster.cluster_0.upstream_rq_timeout")->value()); } @@ -973,7 +991,7 @@ TEST_P(Http2IntegrationTest, IdleTimeoutWithSimultaneousRequests) { EXPECT_TRUE(upstream_request2->complete()); EXPECT_EQ(request2_bytes, upstream_request2->bodyLength()); EXPECT_TRUE(response2->complete()); - EXPECT_EQ("200", response2->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response2->headers().getStatusValue()); EXPECT_EQ(request2_bytes, response2->body().size()); // Validate that idle time is not kicked in. @@ -987,7 +1005,7 @@ TEST_P(Http2IntegrationTest, IdleTimeoutWithSimultaneousRequests) { EXPECT_TRUE(upstream_request1->complete()); EXPECT_EQ(request1_bytes, upstream_request1->bodyLength()); EXPECT_TRUE(response1->complete()); - EXPECT_EQ("200", response1->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response1->headers().getStatusValue()); EXPECT_EQ(request1_bytes, response1->body().size()); // Do not send any requests and validate idle timeout kicks in after both the requests are done. @@ -1032,12 +1050,12 @@ TEST_P(Http2IntegrationTest, RequestMirrorWithBody) { // Make sure both requests have a body. Also check the shadow for the shadow headers. EXPECT_EQ("hello", upstream_request_->body().toString()); EXPECT_EQ("hello", upstream_request2->body().toString()); - EXPECT_EQ("host-shadow", upstream_request2->headers().Host()->value().getStringView()); + EXPECT_EQ("host-shadow", upstream_request2->headers().getHostValue()); upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); upstream_request2->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); request->waitForEndStream(); - EXPECT_EQ("200", request->headers().Status()->value().getStringView()); + EXPECT_EQ("200", request->headers().getStatusValue()); // Cleanup. ASSERT_TRUE(fake_upstream_connection2->close()); @@ -1093,7 +1111,7 @@ void Http2IntegrationTest::simultaneousRequest(int32_t request1_bytes, int32_t r EXPECT_TRUE(upstream_request2->complete()); EXPECT_EQ(request2_bytes, upstream_request2->bodyLength()); EXPECT_TRUE(response2->complete()); - EXPECT_EQ("200", response2->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response2->headers().getStatusValue()); EXPECT_EQ(request2_bytes, response2->body().size()); // Respond to request 1 @@ -1103,7 +1121,7 @@ void Http2IntegrationTest::simultaneousRequest(int32_t request1_bytes, int32_t r EXPECT_TRUE(upstream_request1->complete()); EXPECT_EQ(request1_bytes, upstream_request1->bodyLength()); EXPECT_TRUE(response1->complete()); - EXPECT_EQ("200", response1->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response1->headers().getStatusValue()); EXPECT_EQ(request2_bytes, response1->body().size()); // Cleanup both downstream and upstream @@ -1123,27 +1141,29 @@ TEST_P(Http2IntegrationTest, SimultaneousRequestWithBufferLimits) { // Test downstream connection delayed close processing. TEST_P(Http2IntegrationTest, DelayedCloseAfterBadFrame) { + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.mutable_delayed_close_timeout()->set_nanos(1000 * 1000); }); initialize(); - Buffer::OwnedImpl buffer("PRI * HTTP/2.0\r\n\r\nSM\r\n\r\nhelloworldcauseanerror"); std::string response; - RawConnectionDriver connection( - lookupPort("http"), buffer, + + auto connection = createConnectionDriver( + lookupPort("http"), "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\nhelloworldcauseanerror", [&](Network::ClientConnection& connection, const Buffer::Instance& data) -> void { response.append(data.toString()); connection.dispatcher().exit(); - }, - version_); + }); - connection.run(); + connection->run(); EXPECT_THAT(response, HasSubstr("SETTINGS expected")); // Due to the multiple dispatchers involved (one for the RawConnectionDriver and another for the // Envoy server), it's possible the delayed close timer could fire and close the server socket // prior to the data callback above firing. Therefore, we may either still be connected, or have // received a remote close. - if (connection.last_connection_event() == Network::ConnectionEvent::Connected) { - connection.run(); + if (connection->lastConnectionEvent() == Network::ConnectionEvent::Connected) { + connection->run(); } - EXPECT_EQ(connection.last_connection_event(), Network::ConnectionEvent::RemoteClose); + EXPECT_EQ(connection->lastConnectionEvent(), Network::ConnectionEvent::RemoteClose); EXPECT_EQ(test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value(), 1); } @@ -1154,25 +1174,23 @@ TEST_P(Http2IntegrationTest, DelayedCloseDisabled) { [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) { hcm.mutable_delayed_close_timeout()->set_seconds(0); }); initialize(); - Buffer::OwnedImpl buffer("PRI * HTTP/2.0\r\n\r\nSM\r\n\r\nhelloworldcauseanerror"); std::string response; - RawConnectionDriver connection( - lookupPort("http"), buffer, + auto connection = createConnectionDriver( + lookupPort("http"), "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\nhelloworldcauseanerror", [&](Network::ClientConnection& connection, const Buffer::Instance& data) -> void { response.append(data.toString()); connection.dispatcher().exit(); - }, - version_); + }); - connection.run(); + connection->run(); EXPECT_THAT(response, HasSubstr("SETTINGS expected")); // Due to the multiple dispatchers involved (one for the RawConnectionDriver and another for the // Envoy server), it's possible for the 'connection' to receive the data and exit the dispatcher // prior to the FIN being received from the server. - if (connection.last_connection_event() == Network::ConnectionEvent::Connected) { - connection.run(); + if (connection->lastConnectionEvent() == Network::ConnectionEvent::Connected) { + connection->run(); } - EXPECT_EQ(connection.last_connection_event(), Network::ConnectionEvent::RemoteClose); + EXPECT_EQ(connection->lastConnectionEvent(), Network::ConnectionEvent::RemoteClose); EXPECT_EQ(test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value(), 0); } @@ -1346,7 +1364,7 @@ TEST_P(Http2RingHashIntegrationTest, CookieRoutingNoCookieNoTtl) { {":scheme", "http"}, {":authority", "host"}}, [&](IntegrationStreamDecoder& response) { - EXPECT_EQ("200", response.headers().Status()->value().getStringView()); + EXPECT_EQ("200", response.headers().getStatusValue()); EXPECT_TRUE(response.headers().get(Http::Headers::get().SetCookie) == nullptr); served_by.insert(std::string( response.headers().get(Http::LowerCaseString("x-served-by"))->value().getStringView())); @@ -1376,7 +1394,7 @@ TEST_P(Http2RingHashIntegrationTest, CookieRoutingNoCookieWithNonzeroTtlSet) { {":scheme", "http"}, {":authority", "host"}}, [&](IntegrationStreamDecoder& response) { - EXPECT_EQ("200", response.headers().Status()->value().getStringView()); + EXPECT_EQ("200", response.headers().getStatusValue()); std::string value( response.headers().get(Http::Headers::get().SetCookie)->value().getStringView()); set_cookies.insert(value); @@ -1407,7 +1425,7 @@ TEST_P(Http2RingHashIntegrationTest, CookieRoutingNoCookieWithZeroTtlSet) { {":scheme", "http"}, {":authority", "host"}}, [&](IntegrationStreamDecoder& response) { - EXPECT_EQ("200", response.headers().Status()->value().getStringView()); + EXPECT_EQ("200", response.headers().getStatusValue()); std::string value( response.headers().get(Http::Headers::get().SetCookie)->value().getStringView()); set_cookies.insert(value); @@ -1438,7 +1456,7 @@ TEST_P(Http2RingHashIntegrationTest, CookieRoutingWithCookieNoTtl) { {":scheme", "http"}, {":authority", "host"}}, [&](IntegrationStreamDecoder& response) { - EXPECT_EQ("200", response.headers().Status()->value().getStringView()); + EXPECT_EQ("200", response.headers().getStatusValue()); EXPECT_TRUE(response.headers().get(Http::Headers::get().SetCookie) == nullptr); served_by.insert(std::string( response.headers().get(Http::LowerCaseString("x-served-by"))->value().getStringView())); @@ -1469,7 +1487,7 @@ TEST_P(Http2RingHashIntegrationTest, CookieRoutingWithCookieWithTtlSet) { {":scheme", "http"}, {":authority", "host"}}, [&](IntegrationStreamDecoder& response) { - EXPECT_EQ("200", response.headers().Status()->value().getStringView()); + EXPECT_EQ("200", response.headers().getStatusValue()); EXPECT_TRUE(response.headers().get(Http::Headers::get().SetCookie) == nullptr); served_by.insert(std::string( response.headers().get(Http::LowerCaseString("x-served-by"))->value().getStringView())); @@ -1477,74 +1495,120 @@ TEST_P(Http2RingHashIntegrationTest, CookieRoutingWithCookieWithTtlSet) { EXPECT_EQ(served_by.size(), 1); } -namespace { -const int64_t TransmitThreshold = 100 * 1024 * 1024; -} // namespace +void Http2FrameIntegrationTest::startHttp2Session() { + ASSERT_TRUE(tcp_client_->write(Http2Frame::Preamble, false, false)); -void Http2FloodMitigationTest::setNetworkConnectionBufferSize() { - // nghttp2 library has its own internal mitigation for outbound control frames (see - // NGHTTP2_DEFAULT_MAX_OBQ_FLOOD_ITEM). The default nghttp2 mitigation threshold of 1K is modified - // to 10K in the ConnectionImpl::Http2Options::Http2Options. The mitigation is triggered when - // there are more than 10000 PING or SETTINGS frames with ACK flag in the nghttp2 internal - // outbound queue. It is possible to trigger this mitigation in nghttp2 before triggering Envoy's - // own flood mitigation. This can happen when a buffer large enough to contain over 10K PING or - // SETTINGS frames is dispatched to the nghttp2 library. To prevent this from happening the - // network connection receive buffer needs to be smaller than 90Kb (which is 10K SETTINGS frames). - // Set it to the arbitrarily chosen value of 32K. Note that this buffer has 16K lower bound. - config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { - RELEASE_ASSERT(bootstrap.mutable_static_resources()->listeners_size() >= 1, ""); - auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + // Send empty initial SETTINGS frame. + auto settings = Http2Frame::makeEmptySettingsFrame(); + ASSERT_TRUE(tcp_client_->write(std::string(settings), false, false)); - listener->mutable_per_connection_buffer_limit_bytes()->set_value(32 * 1024); - }); + // Read initial SETTINGS frame from the server. + readFrame(); + + // Send an SETTINGS ACK. + settings = Http2Frame::makeEmptySettingsFrame(Http2Frame::SettingsFlags::Ack); + ASSERT_TRUE(tcp_client_->write(std::string(settings), false, false)); + + // read pending SETTINGS and WINDOW_UPDATE frames + readFrame(); + readFrame(); } -void Http2FloodMitigationTest::beginSession() { +void Http2FrameIntegrationTest::beginSession() { setDownstreamProtocol(Http::CodecClient::Type::HTTP2); setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); // set lower outbound frame limits to make tests run faster config_helper_.setOutboundFramesLimits(1000, 100); initialize(); - tcp_client_ = makeTcpConnection(lookupPort("http")); + // Set up a raw connection to easily send requests without reading responses. + auto options = std::make_shared(); + options->emplace_back(std::make_shared( + envoy::config::core::v3::SocketOption::STATE_PREBIND, + ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_RCVBUF), 1024)); + tcp_client_ = makeTcpConnection(lookupPort("http"), options); startHttp2Session(); } -Http2Frame Http2FloodMitigationTest::readFrame() { +Http2Frame Http2FrameIntegrationTest::readFrame() { Http2Frame frame; - tcp_client_->waitForData(frame.HeaderSize); + EXPECT_TRUE(tcp_client_->waitForData(frame.HeaderSize)); frame.setHeader(tcp_client_->data()); tcp_client_->clearData(frame.HeaderSize); auto len = frame.payloadSize(); if (len) { - tcp_client_->waitForData(len); + EXPECT_TRUE(tcp_client_->waitForData(len)); frame.setPayload(tcp_client_->data()); tcp_client_->clearData(len); } return frame; } -void Http2FloodMitigationTest::sendFame(const Http2Frame& frame) { +void Http2FrameIntegrationTest::sendFrame(const Http2Frame& frame) { ASSERT_TRUE(tcp_client_->connected()); - tcp_client_->write(std::string(frame), false, false); + ASSERT_TRUE(tcp_client_->write(std::string(frame), false, false)); } -void Http2FloodMitigationTest::startHttp2Session() { - tcp_client_->write(Http2Frame::Preamble, false, false); +// Regression test. +TEST_P(Http2FrameIntegrationTest, SetDetailsTwice) { + autonomous_upstream_ = true; + useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); + beginSession(); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); - // Send empty initial SETTINGS frame. - auto settings = Http2Frame::makeEmptySettingsFrame(); - tcp_client_->write(std::string(settings), false, false); + // Send two concatenated frames, the first with too many headers, and the second an invalid frame + // (push_promise) + std::string bad_frame = + "00006d0104000000014083a8749783ee3a3fbebebebebebebebebebebebebebebebebebebebebebebebebebebebe" + "bebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebe" + "bebebebebebebebebebebebebebebebebebebebebebebebebebe0001010500000000018800a065"; + Http2Frame request = Http2Frame::makeGenericFrameFromHexDump(bad_frame); + sendFrame(request); + tcp_client_->close(); - // Read initial SETTINGS frame from the server. - readFrame(); + // Expect that the details for the first frame are kept. + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("too_many_headers")); +} - // Send an SETTINGS ACK. - settings = Http2Frame::makeEmptySettingsFrame(Http2Frame::SettingsFlags::Ack); - tcp_client_->write(std::string(settings), false, false); +INSTANTIATE_TEST_SUITE_P(IpVersions, Http2FrameIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); - // read pending SETTINGS and WINDOW_UPDATE frames - readFrame(); - readFrame(); +namespace { +const int64_t TransmitThreshold = 100 * 1024 * 1024; +} // namespace + +void Http2FloodMitigationTest::setNetworkConnectionBufferSize() { + // nghttp2 library has its own internal mitigation for outbound control frames (see + // NGHTTP2_DEFAULT_MAX_OBQ_FLOOD_ITEM). The default nghttp2 mitigation threshold of 1K is modified + // to 10K in the ConnectionImpl::Http2Options::Http2Options. The mitigation is triggered when + // there are more than 10000 PING or SETTINGS frames with ACK flag in the nghttp2 internal + // outbound queue. It is possible to trigger this mitigation in nghttp2 before triggering Envoy's + // own flood mitigation. This can happen when a buffer large enough to contain over 10K PING or + // SETTINGS frames is dispatched to the nghttp2 library. To prevent this from happening the + // network connection receive buffer needs to be smaller than 90Kb (which is 10K SETTINGS frames). + // Set it to the arbitrarily chosen value of 32K. Note that this buffer has 16K lower bound. + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + RELEASE_ASSERT(bootstrap.mutable_static_resources()->listeners_size() >= 1, ""); + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + + listener->mutable_per_connection_buffer_limit_bytes()->set_value(32 * 1024); + }); +} + +void Http2FloodMitigationTest::beginSession() { + setDownstreamProtocol(Http::CodecClient::Type::HTTP2); + setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); + // set lower outbound frame limits to make tests run faster + config_helper_.setOutboundFramesLimits(1000, 100); + initialize(); + // Set up a raw connection to easily send requests without reading responses. Also, set a small + // TCP receive buffer to speed up connection backup. + auto options = std::make_shared(); + options->emplace_back(std::make_shared( + envoy::config::core::v3::SocketOption::STATE_PREBIND, + ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_RCVBUF), 1024)); + tcp_client_ = makeTcpConnection(lookupPort("http"), options); + startHttp2Session(); } // Verify that the server detects the flood of the given frame. @@ -1563,14 +1627,13 @@ void Http2FloodMitigationTest::floodServer(const Http2Frame& frame, const std::s // Add early stop if we have sent more than 100M of frames, as it this // point it is obvious something is wrong. while (total_bytes_sent < TransmitThreshold && tcp_client_->connected()) { - tcp_client_->write({buf.begin(), buf.end()}, false, false); + ASSERT_TRUE(tcp_client_->write({buf.begin(), buf.end()}, false, false)); total_bytes_sent += buf.size(); } EXPECT_LE(total_bytes_sent, TransmitThreshold) << "Flood mitigation is broken."; EXPECT_EQ(1, test_server_->counter(flood_stat)->value()); - EXPECT_EQ(1, - test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); + test_server_->waitForCounterGe("http.config_test.downstream_cx_delayed_close_timeout", 1); } // Verify that the server detects the flood using specified request parameters. @@ -1579,7 +1642,7 @@ void Http2FloodMitigationTest::floodServer(absl::string_view host, absl::string_ const std::string& flood_stat) { uint32_t request_idx = 0; auto request = Http2Frame::makeRequest(request_idx, host, path); - sendFame(request); + sendFrame(request); auto frame = readFrame(); EXPECT_EQ(Http2Frame::Type::Headers, frame.type()); EXPECT_EQ(expected_http_status, frame.responseStatus()); @@ -1587,7 +1650,7 @@ void Http2FloodMitigationTest::floodServer(absl::string_view host, absl::string_ uint64_t total_bytes_sent = 0; while (total_bytes_sent < TransmitThreshold && tcp_client_->connected()) { request = Http2Frame::makeRequest(++request_idx, host, path); - sendFame(request); + sendFrame(request); total_bytes_sent += request.size(); } EXPECT_LE(total_bytes_sent, TransmitThreshold) << "Flood mitigation is broken."; @@ -1629,6 +1692,7 @@ TEST_P(Http2FloodMitigationTest, Data) { // Set large buffer limits so the test is not affected by the flow control. config_helper_.setBufferLimits(1024 * 1024 * 1024, 1024 * 1024 * 1024); autonomous_upstream_ = true; + autonomous_allow_incomplete_streams_ = true; beginSession(); fake_upstreams_[0]->set_allow_unexpected_disconnects(true); @@ -1641,13 +1705,15 @@ TEST_P(Http2FloodMitigationTest, RST_STREAM) { config_helper_.addConfigModifier( [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) -> void { - hcm.mutable_http2_protocol_options()->set_stream_error_on_invalid_http_messaging(true); + hcm.mutable_http2_protocol_options() + ->mutable_override_stream_error_on_invalid_http_message() + ->set_value(true); }); beginSession(); int i = 0; auto request = Http::Http2::Http2Frame::makeMalformedRequest(i); - sendFame(request); + sendFrame(request); auto response = readFrame(); // Make sure we've got RST_STREAM from the server EXPECT_EQ(Http2Frame::Type::RstStream, response.type()); @@ -1658,7 +1724,7 @@ TEST_P(Http2FloodMitigationTest, RST_STREAM) { uint64_t total_bytes_sent = 0; while (total_bytes_sent < TransmitThreshold && tcp_client_->connected()) { request = Http::Http2::Http2Frame::makeMalformedRequest(++i); - sendFame(request); + sendFrame(request); total_bytes_sent += request.size(); } EXPECT_LE(total_bytes_sent, TransmitThreshold) << "Flood mitigation is broken."; @@ -1695,7 +1761,7 @@ TEST_P(Http2FloodMitigationTest, EmptyHeaders) { uint32_t request_idx = 0; auto request = Http2Frame::makeEmptyHeadersFrame(request_idx); - sendFame(request); + sendFrame(request); tcp_client_->waitForDisconnect(); @@ -1705,39 +1771,43 @@ TEST_P(Http2FloodMitigationTest, EmptyHeaders) { } TEST_P(Http2FloodMitigationTest, EmptyHeadersContinuation) { + useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); beginSession(); uint32_t request_idx = 0; auto request = Http2Frame::makeEmptyHeadersFrame(request_idx); - sendFame(request); + sendFrame(request); for (int i = 0; i < 2; i++) { request = Http2Frame::makeEmptyContinuationFrame(request_idx); - sendFame(request); + sendFrame(request); } tcp_client_->waitForDisconnect(); + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("http2.inbound_empty_frames_flood")); EXPECT_EQ(1, test_server_->counter("http2.inbound_empty_frames_flood")->value()); EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); } TEST_P(Http2FloodMitigationTest, EmptyData) { + useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); beginSession(); fake_upstreams_[0]->set_allow_unexpected_disconnects(true); uint32_t request_idx = 0; auto request = Http2Frame::makePostRequest(request_idx, "host", "/"); - sendFame(request); + sendFrame(request); for (int i = 0; i < 2; i++) { request = Http2Frame::makeEmptyDataFrame(request_idx); - sendFame(request); + sendFrame(request); } tcp_client_->waitForDisconnect(); + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("http2.inbound_empty_frames_flood")); EXPECT_EQ(1, test_server_->counter("http2.inbound_empty_frames_flood")->value()); EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); @@ -1756,7 +1826,7 @@ TEST_P(Http2FloodMitigationTest, PriorityOpenStream) { // Open stream. uint32_t request_idx = 0; auto request = Http2Frame::makeRequest(request_idx, "host", "/"); - sendFame(request); + sendFrame(request); floodServer(Http2Frame::makePriorityFrame(request_idx, request_idx + 1), "http2.inbound_priority_frames_flood"); @@ -1770,7 +1840,7 @@ TEST_P(Http2FloodMitigationTest, PriorityClosedStream) { // Open stream. uint32_t request_idx = 0; auto request = Http2Frame::makeRequest(request_idx, "host", "/"); - sendFame(request); + sendFrame(request); // Reading response marks this stream as closed in nghttp2. auto frame = readFrame(); EXPECT_EQ(Http2Frame::Type::Headers, frame.type()); @@ -1786,7 +1856,7 @@ TEST_P(Http2FloodMitigationTest, WindowUpdate) { // Open stream. uint32_t request_idx = 0; auto request = Http2Frame::makeRequest(request_idx, "host", "/"); - sendFame(request); + sendFrame(request); floodServer(Http2Frame::makeWindowUpdateFrame(request_idx, 1), "http2.inbound_window_update_frames_flood"); @@ -1800,7 +1870,7 @@ TEST_P(Http2FloodMitigationTest, ZerolenHeader) { // Send invalid request. uint32_t request_idx = 0; auto request = Http2Frame::makeMalformedRequestWithZerolenHeader(request_idx, "host", "/"); - sendFame(request); + sendFrame(request); tcp_client_->waitForDisconnect(); @@ -1818,7 +1888,9 @@ TEST_P(Http2FloodMitigationTest, ZerolenHeaderAllowed) { config_helper_.addConfigModifier( [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) -> void { - hcm.mutable_http2_protocol_options()->set_stream_error_on_invalid_http_messaging(true); + hcm.mutable_http2_protocol_options() + ->mutable_override_stream_error_on_invalid_http_message() + ->set_value(true); }); autonomous_upstream_ = true; beginSession(); @@ -1827,7 +1899,7 @@ TEST_P(Http2FloodMitigationTest, ZerolenHeaderAllowed) { // Send invalid request. uint32_t request_idx = 0; auto request = Http2Frame::makeMalformedRequestWithZerolenHeader(request_idx, "host", "/"); - sendFame(request); + sendFrame(request); // Make sure we've got RST_STREAM from the server. auto response = readFrame(); EXPECT_EQ(Http2Frame::Type::RstStream, response.type()); @@ -1835,7 +1907,7 @@ TEST_P(Http2FloodMitigationTest, ZerolenHeaderAllowed) { // Send valid request using the same connection. request_idx++; request = Http2Frame::makeRequest(request_idx, "host", "/"); - sendFame(request); + sendFrame(request); response = readFrame(); EXPECT_EQ(Http2Frame::Type::Headers, response.type()); EXPECT_EQ(Http2Frame::ResponseStatus::Ok, response.responseStatus()); diff --git a/test/integration/http2_integration_test.h b/test/integration/http2_integration_test.h index d19d2d6436b36..bcafbf0c78666 100644 --- a/test/integration/http2_integration_test.h +++ b/test/integration/http2_integration_test.h @@ -67,21 +67,33 @@ class Http2MetadataIntegrationTest : public Http2IntegrationTest { void runHeaderOnlyTest(bool send_request_body, size_t body_size); }; -class Http2FloodMitigationTest : public testing::TestWithParam, - public HttpIntegrationTest { +class Http2FrameIntegrationTest : public testing::TestWithParam, + public HttpIntegrationTest { public: - Http2FloodMitigationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, GetParam()) {} + Http2FrameIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, GetParam()) {} protected: void startHttp2Session(); + Http2Frame readFrame(); + void sendFrame(const Http2Frame& frame); + virtual void beginSession(); + + IntegrationTcpClientPtr tcp_client_; +}; + +class Http2FloodMitigationTest : public Http2FrameIntegrationTest { +public: + Http2FloodMitigationTest() { + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.mutable_delayed_close_timeout()->set_seconds(1); }); + } + +protected: void floodServer(const Http2Frame& frame, const std::string& flood_stat); void floodServer(absl::string_view host, absl::string_view path, Http2Frame::ResponseStatus expected_http_status, const std::string& flood_stat); - Http2Frame readFrame(); - void sendFame(const Http2Frame& frame); void setNetworkConnectionBufferSize(); - void beginSession(); - - IntegrationTcpClientPtr tcp_client_; + void beginSession() override; }; } // namespace Envoy diff --git a/test/integration/http2_upstream_integration_test.cc b/test/integration/http2_upstream_integration_test.cc index 37cda22965f25..ba32580ec24b7 100644 --- a/test/integration/http2_upstream_integration_test.cc +++ b/test/integration/http2_upstream_integration_test.cc @@ -88,7 +88,7 @@ void Http2UpstreamIntegrationTest::bidirectionalStreaming(uint32_t bytes) { ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); // Finish the response. - upstream_request_->encodeTrailers(Http::TestHeaderMapImpl{{"trailer", "bar"}}); + upstream_request_->encodeTrailers(Http::TestResponseTrailerMapImpl{{"trailer", "bar"}}); response->waitForEndStream(); EXPECT_TRUE(response->complete()); } @@ -180,7 +180,7 @@ void Http2UpstreamIntegrationTest::simultaneousRequest(uint32_t request1_bytes, EXPECT_TRUE(upstream_request2->complete()); EXPECT_EQ(request2_bytes, upstream_request2->bodyLength()); EXPECT_TRUE(response2->complete()); - EXPECT_EQ("200", response2->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response2->headers().getStatusValue()); EXPECT_EQ(response2_bytes, response2->body().size()); // Respond to request 1 @@ -190,7 +190,7 @@ void Http2UpstreamIntegrationTest::simultaneousRequest(uint32_t request1_bytes, EXPECT_TRUE(upstream_request1->complete()); EXPECT_EQ(request1_bytes, upstream_request1->bodyLength()); EXPECT_TRUE(response1->complete()); - EXPECT_EQ("200", response1->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response1->headers().getStatusValue()); EXPECT_EQ(response1_bytes, response1->body().size()); } @@ -235,13 +235,16 @@ void Http2UpstreamIntegrationTest::manySimultaneousRequests(uint32_t request_byt responses[i]->waitForEndStream(); if (i % 2 != 0) { EXPECT_TRUE(responses[i]->complete()); - EXPECT_EQ("200", responses[i]->headers().Status()->value().getStringView()); + EXPECT_EQ("200", responses[i]->headers().getStatusValue()); EXPECT_EQ(response_bytes[i], responses[i]->body().length()); } else { // Upstream stream reset. - EXPECT_EQ("503", responses[i]->headers().Status()->value().getStringView()); + EXPECT_EQ("503", responses[i]->headers().getStatusValue()); } } + + EXPECT_EQ(0, test_server_->gauge("http2.streams_active")->value()); + EXPECT_EQ(0, test_server_->gauge("http2.pending_send_bytes")->value()); } TEST_P(Http2UpstreamIntegrationTest, ManySimultaneousRequest) { @@ -386,7 +389,7 @@ TEST_P(Http2UpstreamIntegrationTest, TestManyResponseHeadersRejected) { initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); - Http::TestHeaderMapImpl many_headers(default_response_headers_); + Http::TestResponseHeaderMapImpl many_headers(default_response_headers_); for (int i = 0; i < 100; i++) { many_headers.addCopy("many", std::string(1, 'a')); } @@ -396,7 +399,7 @@ TEST_P(Http2UpstreamIntegrationTest, TestManyResponseHeadersRejected) { upstream_request_->encodeHeaders(many_headers, true); response->waitForEndStream(); // Upstream stream reset triggered. - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); } // Tests bootstrap configuration of max response headers. @@ -431,7 +434,7 @@ TEST_P(Http2UpstreamIntegrationTest, LargeResponseHeadersRejected) { initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); - Http::TestHeaderMapImpl large_headers(default_response_headers_); + Http::TestResponseHeaderMapImpl large_headers(default_response_headers_); large_headers.addCopy("large", std::string(60 * 1024, 'a')); auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); waitForNextUpstreamRequest(); @@ -439,7 +442,7 @@ TEST_P(Http2UpstreamIntegrationTest, LargeResponseHeadersRejected) { upstream_request_->encodeHeaders(large_headers, true); response->waitForEndStream(); // Upstream stream reset. - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); } // Regression test to make sure that configuring upstream logs over gRPC will not crash Envoy. @@ -483,7 +486,7 @@ name: router // Send the response headers. upstream_request_->encodeHeaders(default_response_headers_, true); response->waitForEndStream(); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } } // namespace Envoy diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index f485f3083c640..56d738e3b9cef 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -40,6 +40,8 @@ namespace Envoy { namespace { +using testing::HasSubstr; + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::CodecType typeToCodecType(Http::CodecClient::Type type) { switch (type) { @@ -152,7 +154,10 @@ IntegrationCodecClient::startRequest(const Http::RequestHeaderMap& headers) { return {encoder, std::move(response)}; } -bool IntegrationCodecClient::waitForDisconnect(std::chrono::milliseconds time_to_wait) { +AssertionResult IntegrationCodecClient::waitForDisconnect(std::chrono::milliseconds time_to_wait) { + if (disconnected_) { + return AssertionSuccess(); + } Event::TimerPtr wait_timer; bool wait_timer_triggered = false; if (time_to_wait.count()) { @@ -171,11 +176,11 @@ bool IntegrationCodecClient::waitForDisconnect(std::chrono::milliseconds time_to } if (wait_timer_triggered && !disconnected_) { - return false; + return AssertionFailure() << "Timed out waiting for disconnect"; } EXPECT_TRUE(disconnected_); - return true; + return AssertionSuccess(); } void IntegrationCodecClient::ConnectionCallbacks::onEvent(Network::ConnectionEvent event) { @@ -202,12 +207,18 @@ IntegrationCodecClientPtr HttpIntegrationTest::makeHttpConnection(uint32_t port) return makeHttpConnection(makeClientConnection(port)); } -IntegrationCodecClientPtr -HttpIntegrationTest::makeRawHttpConnection(Network::ClientConnectionPtr&& conn) { +IntegrationCodecClientPtr HttpIntegrationTest::makeRawHttpConnection( + Network::ClientConnectionPtr&& conn, + absl::optional http2_options) { std::shared_ptr cluster{new NiceMock()}; cluster->max_response_headers_count_ = 200; - cluster->http2_options_.set_allow_connect(true); - cluster->http2_options_.set_allow_metadata(true); + if (!http2_options.has_value()) { + http2_options = Http2::Utility::initializeAndValidateOptions( + envoy::config::core::v3::Http2ProtocolOptions()); + http2_options.value().set_allow_connect(true); + http2_options.value().set_allow_metadata(true); + } + cluster->http2_options_ = http2_options.value(); cluster->http1_settings_.enable_trailers_ = true; Upstream::HostDescriptionConstSharedPtr host_description{Upstream::makeTestHostDescription( cluster, fmt::format("tcp://{}:80", Network::Test::getLoopbackAddressUrlString(version_)))}; @@ -217,7 +228,7 @@ HttpIntegrationTest::makeRawHttpConnection(Network::ClientConnectionPtr&& conn) IntegrationCodecClientPtr HttpIntegrationTest::makeHttpConnection(Network::ClientConnectionPtr&& conn) { - auto codec = makeRawHttpConnection(std::move(conn)); + auto codec = makeRawHttpConnection(std::move(conn), absl::nullopt); EXPECT_TRUE(codec->connected()) << codec->connection()->transportFailureReason(); return codec; } @@ -250,11 +261,7 @@ void HttpIntegrationTest::useAccessLog(absl::string_view format) { ASSERT_TRUE(config_helper_.setAccessLog(access_log_name_, format)); } -HttpIntegrationTest::~HttpIntegrationTest() { - cleanupUpstreamAndDownstream(); - test_server_.reset(); - fake_upstreams_.clear(); -} +HttpIntegrationTest::~HttpIntegrationTest() { cleanupUpstreamAndDownstream(); } void HttpIntegrationTest::setDownstreamProtocol(Http::CodecClient::Type downstream_protocol) { downstream_protocol_ = downstream_protocol; @@ -336,17 +343,15 @@ void HttpIntegrationTest::verifyResponse(IntegrationStreamDecoderPtr response, const Http::TestResponseHeaderMapImpl& expected_headers, const std::string& expected_body) { EXPECT_TRUE(response->complete()); - EXPECT_EQ(response_code, response->headers().Status()->value().getStringView()); - expected_headers.iterate( - [](const Http::HeaderEntry& header, void* context) -> Http::HeaderMap::Iterate { - auto response_headers = static_cast(context); - const Http::HeaderEntry* entry = - response_headers->get(Http::LowerCaseString{std::string(header.key().getStringView())}); - EXPECT_NE(entry, nullptr); - EXPECT_EQ(header.value().getStringView(), entry->value().getStringView()); - return Http::HeaderMap::Iterate::Continue; - }, - const_cast(static_cast(&response->headers()))); + EXPECT_EQ(response_code, response->headers().getStatusValue()); + expected_headers.iterate([response_headers = &response->headers()]( + const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + const Http::HeaderEntry* entry = + response_headers->get(Http::LowerCaseString{std::string(header.key().getStringView())}); + EXPECT_NE(entry, nullptr); + EXPECT_EQ(header.value().getStringView(), entry->value().getStringView()); + return Http::HeaderMap::Iterate::Continue; + }); EXPECT_EQ(response->body(), expected_body); } @@ -357,16 +362,24 @@ HttpIntegrationTest::waitForNextUpstreamRequest(const std::vector& ups absl::optional upstream_with_request; // If there is no upstream connection, wait for it to be established. if (!fake_upstream_connection_) { - AssertionResult result = AssertionFailure(); - for (auto upstream_index : upstream_indices) { - result = fake_upstreams_[upstream_index]->waitForHttpConnection( - *dispatcher_, fake_upstream_connection_, connection_wait_timeout, max_request_headers_kb_, - max_request_headers_count_); + int upstream_index = 0; + Event::TestTimeSystem& time_system = timeSystem(); + auto end_time = time_system.monotonicTime() + connection_wait_timeout; + // Loop over the upstreams until the call times out or an upstream request is received. + while (!result) { + upstream_index = upstream_index % upstream_indices.size(); + result = fake_upstreams_[upstream_indices[upstream_index]]->waitForHttpConnection( + *dispatcher_, fake_upstream_connection_, std::chrono::milliseconds(5), + max_request_headers_kb_, max_request_headers_count_); if (result) { upstream_with_request = upstream_index; break; + } else if (time_system.monotonicTime() >= end_time) { + result = (AssertionFailure() << "Timed out waiting for new connection."); + break; } + ++upstream_index; } RELEASE_ASSERT(result, result.message()); } @@ -393,7 +406,7 @@ void HttpIntegrationTest::checkSimpleRequestSuccess(uint64_t expected_request_si EXPECT_EQ(expected_request_size, upstream_request_->bodyLength()); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(expected_response_size, response->body().size()); } @@ -454,7 +467,7 @@ void HttpIntegrationTest::testRouterNotFound() { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "GET", "/notfound", "", downstream_protocol_, version_); ASSERT_TRUE(response->complete()); - EXPECT_EQ("404", response->headers().Status()->value().getStringView()); + EXPECT_EQ("404", response->headers().getStatusValue()); } // Change the default route to be restrictive, and send a POST to an alternate route. @@ -465,7 +478,7 @@ void HttpIntegrationTest::testRouterNotFoundWithBody() { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "POST", "/notfound", "foo", downstream_protocol_, version_); ASSERT_TRUE(response->complete()); - EXPECT_EQ("404", response->headers().Status()->value().getStringView()); + EXPECT_EQ("404", response->headers().getStatusValue()); } // Make sure virtual cluster stats are charged to the appropriate virtual cluster. @@ -529,7 +542,7 @@ void HttpIntegrationTest::testRouterUpstreamDisconnectBeforeRequestComplete() { response->waitForEndStream(); if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } else { codec_client_->close(); } @@ -538,7 +551,7 @@ void HttpIntegrationTest::testRouterUpstreamDisconnectBeforeRequestComplete() { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); EXPECT_EQ("upstream connect error or disconnect/reset before headers. reset reason: connection " "termination", response->body()); @@ -556,7 +569,7 @@ void HttpIntegrationTest::testRouterUpstreamDisconnectBeforeResponseComplete( ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } else { response->waitForReset(); codec_client_->close(); @@ -566,7 +579,7 @@ void HttpIntegrationTest::testRouterUpstreamDisconnectBeforeResponseComplete( EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_FALSE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(0U, response->body().size()); } @@ -628,7 +641,7 @@ void HttpIntegrationTest::testRouterDownstreamDisconnectBeforeResponseComplete( EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_FALSE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(512U, response->body().size()); } @@ -653,7 +666,7 @@ void HttpIntegrationTest::testRouterUpstreamResponseBeforeRequestComplete() { } if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } else { codec_client_->close(); } @@ -662,7 +675,7 @@ void HttpIntegrationTest::testRouterUpstreamResponseBeforeRequestComplete() { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(512U, response->body().size()); } @@ -695,7 +708,7 @@ void HttpIntegrationTest::testRetry() { EXPECT_EQ(1024U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(512U, response->body().size()); } @@ -720,10 +733,7 @@ void HttpIntegrationTest::testRetryAttemptCountHeader() { waitForNextUpstreamRequest(); upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "503"}}, false); - EXPECT_EQ( - atoi(std::string(upstream_request_->headers().EnvoyAttemptCount()->value().getStringView()) - .c_str()), - 1); + EXPECT_EQ(atoi(std::string(upstream_request_->headers().getEnvoyAttemptCountValue()).c_str()), 1); if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP1) { ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); @@ -732,10 +742,7 @@ void HttpIntegrationTest::testRetryAttemptCountHeader() { ASSERT_TRUE(upstream_request_->waitForReset()); } waitForNextUpstreamRequest(); - EXPECT_EQ( - atoi(std::string(upstream_request_->headers().EnvoyAttemptCount()->value().getStringView()) - .c_str()), - 2); + EXPECT_EQ(atoi(std::string(upstream_request_->headers().getEnvoyAttemptCountValue()).c_str()), 2); upstream_request_->encodeHeaders(default_response_headers_, false); upstream_request_->encodeData(512, true); @@ -744,11 +751,9 @@ void HttpIntegrationTest::testRetryAttemptCountHeader() { EXPECT_EQ(1024U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(512U, response->body().size()); - EXPECT_EQ( - 2, - atoi(std::string(response->headers().EnvoyAttemptCount()->value().getStringView()).c_str())); + EXPECT_EQ(2, atoi(std::string(response->headers().getEnvoyAttemptCountValue()).c_str())); } void HttpIntegrationTest::testGrpcRetry() { @@ -789,7 +794,7 @@ void HttpIntegrationTest::testGrpcRetry() { EXPECT_EQ(1024U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(512U, response->body().size()); if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP2) { EXPECT_THAT(*response->trailers(), HeaderMapEqualRef(&response_trailers)); @@ -837,19 +842,20 @@ void HttpIntegrationTest::testEnvoyHandling100Continue(bool additional_continue_ response->waitForEndStream(); ASSERT_TRUE(response->complete()); - ASSERT(response->continue_headers() != nullptr); - EXPECT_EQ("100", response->continue_headers()->Status()->value().getStringView()); - EXPECT_EQ(nullptr, response->continue_headers()->Via()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + ASSERT(response->continueHeaders() != nullptr); + EXPECT_EQ("100", response->continueHeaders()->getStatusValue()); + EXPECT_EQ(nullptr, response->continueHeaders()->Via()); + EXPECT_EQ("200", response->headers().getStatusValue()); if (via.empty()) { EXPECT_EQ(nullptr, response->headers().Via()); } else { - EXPECT_EQ(via.c_str(), response->headers().Via()->value().getStringView()); + EXPECT_EQ(via.c_str(), response->headers().getViaValue()); } } -void HttpIntegrationTest::testEnvoyProxying100Continue(bool continue_before_upstream_complete, - bool with_encoder_filter) { +void HttpIntegrationTest::testEnvoyProxying1xx(bool continue_before_upstream_complete, + bool with_encoder_filter, + bool with_multiple_1xx_headers) { if (with_encoder_filter) { // Because 100-continue only affects encoder filters, make sure it plays well with one. config_helper_.addFilter("name: envoy.filters.http.cors"); @@ -886,6 +892,13 @@ void HttpIntegrationTest::testEnvoyProxying100Continue(bool continue_before_upst ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); if (continue_before_upstream_complete) { + if (with_multiple_1xx_headers) { + upstream_request_->encode100ContinueHeaders( + Http::TestResponseHeaderMapImpl{{":status", "100"}}); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "102"}}, false); + upstream_request_->encode100ContinueHeaders( + Http::TestResponseHeaderMapImpl{{":status", "100"}}); + } // This case tests sending on 100-Continue headers before the client has sent all the // request data. upstream_request_->encode100ContinueHeaders( @@ -897,6 +910,13 @@ void HttpIntegrationTest::testEnvoyProxying100Continue(bool continue_before_upst ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); if (!continue_before_upstream_complete) { + if (with_multiple_1xx_headers) { + upstream_request_->encode100ContinueHeaders( + Http::TestResponseHeaderMapImpl{{":status", "100"}}); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "102"}}, false); + upstream_request_->encode100ContinueHeaders( + Http::TestResponseHeaderMapImpl{{":status", "100"}}); + } // This case tests forwarding 100-Continue after the client has sent all data. upstream_request_->encode100ContinueHeaders( Http::TestResponseHeaderMapImpl{{":status", "100"}}); @@ -906,10 +926,10 @@ void HttpIntegrationTest::testEnvoyProxying100Continue(bool continue_before_upst upstream_request_->encodeHeaders(default_response_headers_, true); response->waitForEndStream(); EXPECT_TRUE(response->complete()); - ASSERT(response->continue_headers() != nullptr); - EXPECT_EQ("100", response->continue_headers()->Status()->value().getStringView()); + ASSERT(response->continueHeaders() != nullptr); + EXPECT_EQ("100", response->continueHeaders()->getStatusValue()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } void HttpIntegrationTest::testTwoRequests(bool network_backup) { @@ -942,7 +962,7 @@ void HttpIntegrationTest::testTwoRequests(bool network_backup) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(1024U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(512U, response->body().size()); // Request 2. @@ -955,12 +975,51 @@ void HttpIntegrationTest::testTwoRequests(bool network_backup) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(512U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(1024U, response->body().size()); } +void HttpIntegrationTest::testLargeRequestUrl(uint32_t url_size, uint32_t max_headers_size) { + // `size` parameter dictates the size of each header that will be added to the request and `count` + // parameter is the number of headers to be added. The actual request byte size will exceed `size` + // due to the keys and other headers. The actual request header count will exceed `count` by four + // due to default headers. + + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { hcm.mutable_max_request_headers_kb()->set_value(max_headers_size); }); + max_request_headers_kb_ = max_headers_size; + + Http::TestRequestHeaderMapImpl big_headers{{":method", "GET"}, + {":path", "/" + std::string(url_size * 1024, 'a')}, + {":scheme", "http"}, + {":authority", "host"}}; + + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + if (url_size >= max_headers_size) { + // header size includes keys too, so expect rejection when equal + auto encoder_decoder = codec_client_->startRequest(big_headers); + auto response = std::move(encoder_decoder.second); + + if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { + ASSERT_TRUE(codec_client_->waitForDisconnect()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("431", response->headers().Status()->value().getStringView()); + } else { + response->waitForReset(); + codec_client_->close(); + } + } else { + auto response = sendRequestAndWaitForResponse(big_headers, 0, default_response_headers_, 0); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + } +} + void HttpIntegrationTest::testLargeRequestHeaders(uint32_t size, uint32_t count, uint32_t max_size, uint32_t max_count) { + useAccessLog("%RESPONSE_CODE_DETAILS%"); // `size` parameter dictates the size of each header that will be added to the request and `count` // parameter is the number of headers to be added. The actual request byte size will exceed `size` // due to the keys and other headers. The actual request header count will exceed `count` by four @@ -992,9 +1051,9 @@ void HttpIntegrationTest::testLargeRequestHeaders(uint32_t size, uint32_t count, auto response = std::move(encoder_decoder.second); if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("431", response->headers().Status()->value().getStringView()); + EXPECT_EQ("431", response->headers().getStatusValue()); } else { response->waitForReset(); codec_client_->close(); @@ -1002,7 +1061,10 @@ void HttpIntegrationTest::testLargeRequestHeaders(uint32_t size, uint32_t count, } else { auto response = sendRequestAndWaitForResponse(big_headers, 0, default_response_headers_, 0); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); + } + if (count > max_count) { + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("too_many_headers")); } } @@ -1031,9 +1093,9 @@ void HttpIntegrationTest::testLargeRequestTrailers(uint32_t size, uint32_t max_s if (size >= max_size) { if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("431", response->headers().Status()->value().getStringView()); + EXPECT_EQ("431", response->headers().getStatusValue()); } else { // Expect a stream reset when the size of the trailers is larger than the maximum // limit. @@ -1053,7 +1115,7 @@ void HttpIntegrationTest::testManyRequestHeaders(std::chrono::milliseconds time) // This test uses an Http::HeaderMapImpl instead of an Http::TestHeaderMapImpl to avoid // time-consuming asserts when using a large number of headers. max_request_headers_kb_ = 96; - max_request_headers_count_ = 20005; + max_request_headers_count_ = 10005; config_helper_.addConfigModifier( [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& @@ -1069,7 +1131,7 @@ void HttpIntegrationTest::testManyRequestHeaders(std::chrono::milliseconds time) {Http::Headers::get().Scheme, "http"}, {Http::Headers::get().Host, "host"}}); - for (int i = 0; i < 20000; i++) { + for (int i = 0; i < 10000; i++) { big_headers->addCopy(Http::LowerCaseString(std::to_string(i)), std::string(0, 'a')); } initialize(); @@ -1080,7 +1142,7 @@ void HttpIntegrationTest::testManyRequestHeaders(std::chrono::milliseconds time) sendRequestAndWaitForResponse(*big_headers, 0, default_response_headers_, 0, 0, time); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } void HttpIntegrationTest::testDownstreamResetBeforeResponseComplete() { @@ -1121,7 +1183,7 @@ void HttpIntegrationTest::testDownstreamResetBeforeResponseComplete() { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_FALSE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(512U, response->body().size()); } @@ -1158,7 +1220,7 @@ void HttpIntegrationTest::testTrailers(uint64_t request_size, uint64_t response_ } EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(response_size, response->body().size()); if (check_response) { EXPECT_THAT(*response->trailers(), HeaderMapEqualRef(&response_trailers)); @@ -1186,7 +1248,7 @@ void HttpIntegrationTest::testAdminDrain(Http::CodecClient::Type admin_request_t BufferingStreamDecoderPtr admin_response = IntegrationUtil::makeSingleRequest( lookupPort("admin"), "POST", "/drain_listeners", "", admin_request_type, version_); EXPECT_TRUE(admin_response->complete()); - EXPECT_EQ("200", admin_response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", admin_response->headers().getStatusValue()); EXPECT_EQ("OK\n", admin_response->body()); upstream_request_->encodeData(512, true); @@ -1203,10 +1265,98 @@ void HttpIntegrationTest::testAdminDrain(Http::CodecClient::Type admin_request_t test_server_->waitForCounterEq("listener_manager.listener_stopped", 1); // Validate that port is closed and can be bound by other sockets. - EXPECT_NO_THROW(Network::TcpListenSocket( - Network::Utility::getAddressWithPort(*Network::Test::getCanonicalLoopbackAddress(version_), - http_port), - nullptr, true)); + // This does not work for HTTP/3 because the port is not closed until the listener is completely + // destroyed. TODO(danzh) Match TCP behavior as much as possible. + if (downstreamProtocol() != Http::CodecClient::Type::HTTP3) { + ASSERT_TRUE(waitForPortAvailable(http_port)); + } +} + +void HttpIntegrationTest::testMaxStreamDuration() { + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* static_resources = bootstrap.mutable_static_resources(); + auto* cluster = static_resources->mutable_clusters(0); + auto* http_protocol_options = cluster->mutable_common_http_protocol_options(); + http_protocol_options->mutable_max_stream_duration()->MergeFrom( + ProtobufUtil::TimeUtil::MillisecondsToDuration(200)); + }); + + initialize(); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto encoder_decoder = codec_client_->startRequest(default_request_headers_); + request_encoder_ = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + + test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_max_duration_reached", 1); + + if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { + ASSERT_TRUE(codec_client_->waitForDisconnect()); + } else { + response->waitForReset(); + codec_client_->close(); + } +} + +void HttpIntegrationTest::testMaxStreamDurationWithRetry(bool invoke_retry_upstream_disconnect) { + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* static_resources = bootstrap.mutable_static_resources(); + auto* cluster = static_resources->mutable_clusters(0); + auto* http_protocol_options = cluster->mutable_common_http_protocol_options(); + http_protocol_options->mutable_max_stream_duration()->MergeFrom( + ProtobufUtil::TimeUtil::MillisecondsToDuration(1000)); + }); + + Http::TestRequestHeaderMapImpl retriable_header = Http::TestRequestHeaderMapImpl{ + {":method", "POST"}, {":path", "/test/long/url"}, {":scheme", "http"}, + {":authority", "host"}, {"x-forwarded-for", "10.0.0.1"}, {"x-envoy-retry-on", "5xx"}}; + initialize(); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto encoder_decoder = codec_client_->startRequest(retriable_header); + request_encoder_ = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + + if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP1) { + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + } else { + ASSERT_TRUE(upstream_request_->waitForReset()); + } + + test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_max_duration_reached", 1); + + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + + if (invoke_retry_upstream_disconnect) { + test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_max_duration_reached", 2); + if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { + ASSERT_TRUE(codec_client_->waitForDisconnect()); + } else { + response->waitForReset(); + codec_client_->close(); + } + + EXPECT_EQ("408", response->headers().getStatusValue()); + } else { + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + upstream_request_->encodeHeaders(response_headers, true); + + response->waitForHeaders(); + codec_client_->close(); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + } } std::string HttpIntegrationTest::listenerStatPrefix(const std::string& stat_name) { diff --git a/test/integration/http_integration.h b/test/integration/http_integration.h index 99f2c88505219..30e898936f724 100644 --- a/test/integration/http_integration.h +++ b/test/integration/http_integration.h @@ -38,9 +38,10 @@ class IntegrationCodecClient : public Http::CodecClientProd { void sendMetadata(Http::RequestEncoder& encoder, Http::MetadataMap metadata_map); std::pair startRequest(const Http::RequestHeaderMap& headers); - bool waitForDisconnect(std::chrono::milliseconds time_to_wait = std::chrono::milliseconds(0)); + ABSL_MUST_USE_RESULT AssertionResult + waitForDisconnect(std::chrono::milliseconds time_to_wait = TestUtility::DefaultTimeout); Network::ClientConnection* connection() const { return connection_.get(); } - Network::ConnectionEvent last_connection_event() const { return last_connection_event_; } + Network::ConnectionEvent lastConnectionEvent() const { return last_connection_event_; } Network::Connection& rawConnection() { return *connection_; } bool disconnected() { return disconnected_; } @@ -60,7 +61,7 @@ class IntegrationCodecClient : public Http::CodecClientProd { CodecCallbacks(IntegrationCodecClient& parent) : parent_(parent) {} // Http::ConnectionCallbacks - void onGoAway() override { parent_.saw_goaway_ = true; } + void onGoAway(Http::GoAwayErrorCode) override { parent_.saw_goaway_ = true; } IntegrationCodecClient& parent_; }; @@ -105,7 +106,9 @@ class HttpIntegrationTest : public BaseIntegrationTest { IntegrationCodecClientPtr makeHttpConnection(uint32_t port); // Makes a http connection object without checking its connected state. - virtual IntegrationCodecClientPtr makeRawHttpConnection(Network::ClientConnectionPtr&& conn); + virtual IntegrationCodecClientPtr makeRawHttpConnection( + Network::ClientConnectionPtr&& conn, + absl::optional http2_options); // Makes a http connection object with asserting a connected state. IntegrationCodecClientPtr makeHttpConnection(Network::ClientConnectionPtr&& conn); @@ -195,6 +198,7 @@ class HttpIntegrationTest : public BaseIntegrationTest { void testLargeHeaders(Http::TestRequestHeaderMapImpl request_headers, Http::TestRequestTrailerMapImpl request_trailers, uint32_t size, uint32_t max_size); + void testLargeRequestUrl(uint32_t url_size, uint32_t max_headers_size); void testLargeRequestHeaders(uint32_t size, uint32_t count, uint32_t max_size = 60, uint32_t max_count = 100); void testLargeRequestTrailers(uint32_t size, uint32_t max_size = 60); @@ -208,8 +212,9 @@ class HttpIntegrationTest : public BaseIntegrationTest { void testEnvoyHandling100Continue(bool additional_continue_from_upstream = false, const std::string& via = ""); - void testEnvoyProxying100Continue(bool continue_before_upstream_complete = false, - bool with_encoder_filter = false); + void testEnvoyProxying1xx(bool continue_before_upstream_complete = false, + bool with_encoder_filter = false, + bool with_multiple_1xx_headers = false); // HTTP/2 client tests. void testDownstreamResetBeforeResponseComplete(); @@ -220,7 +225,9 @@ class HttpIntegrationTest : public BaseIntegrationTest { bool response_trailers_present); // Test /drain_listener from admin portal. void testAdminDrain(Http::CodecClient::Type admin_request_type); - + // Test max stream duration. + void testMaxStreamDuration(); + void testMaxStreamDurationWithRetry(bool invoke_retry_upstream_disconnect); Http::CodecClient::Type downstreamProtocol() const { return downstream_protocol_; } // Prefix listener stat with IP:port, including IP version dependent loopback address. std::string listenerStatPrefix(const std::string& stat_name); diff --git a/test/integration/http_subset_lb_integration_test.cc b/test/integration/http_subset_lb_integration_test.cc index 4137ec95bbed2..cd275c20a7c36 100644 --- a/test/integration/http_subset_lb_integration_test.cc +++ b/test/integration/http_subset_lb_integration_test.cc @@ -52,8 +52,8 @@ class HttpSubsetLbIntegrationTest : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, TestEnvironment::getIpVersionsForTest().front(), ConfigHelper::httpProxyConfig()), - num_hosts_{4}, is_hash_lb_(GetParam() == envoy::config::cluster::v3::Cluster::RING_HASH || - GetParam() == envoy::config::cluster::v3::Cluster::MAGLEV) { + is_hash_lb_(GetParam() == envoy::config::cluster::v3::Cluster::RING_HASH || + GetParam() == envoy::config::cluster::v3::Cluster::MAGLEV) { autonomous_upstream_ = true; setUpstreamCount(num_hosts_); @@ -186,7 +186,7 @@ class HttpSubsetLbIntegrationTest } } - const uint32_t num_hosts_; + const uint32_t num_hosts_{4}; const bool is_hash_lb_; const std::string hash_header_{"x-hash"}; diff --git a/test/integration/http_timeout_integration_test.cc b/test/integration/http_timeout_integration_test.cc index ca226745de958..4592533656f02 100644 --- a/test/integration/http_timeout_integration_test.cc +++ b/test/integration/http_timeout_integration_test.cc @@ -44,7 +44,7 @@ TEST_P(HttpTimeoutIntegrationTest, GlobalTimeout) { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("504", response->headers().Status()->value().getStringView()); + EXPECT_EQ("504", response->headers().getStatusValue()); } // Testing that `x-envoy-expected-timeout-ms` header, set by egress envoy, is respected by ingress @@ -85,7 +85,7 @@ TEST_P(HttpTimeoutIntegrationTest, UseTimeoutSetByEgressEnvoy) { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("504", response->headers().Status()->value().getStringView()); + EXPECT_EQ("504", response->headers().getStatusValue()); } // Testing that ingress envoy derives new timeout value and sets `x-envoy-expected-timeout-ms` @@ -126,7 +126,7 @@ TEST_P(HttpTimeoutIntegrationTest, DeriveTimeoutInIngressEnvoy) { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("504", response->headers().Status()->value().getStringView()); + EXPECT_EQ("504", response->headers().getStatusValue()); } // Testing that `x-envoy-expected-timeout-ms` header, set by egress envoy, is ignored by ingress @@ -168,7 +168,7 @@ TEST_P(HttpTimeoutIntegrationTest, IgnoreTimeoutSetByEgressEnvoy) { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("504", response->headers().Status()->value().getStringView()); + EXPECT_EQ("504", response->headers().getStatusValue()); } // Regression test for https://github.com/envoyproxy/envoy/issues/7154 in which @@ -202,7 +202,7 @@ TEST_P(HttpTimeoutIntegrationTest, GlobalTimeoutAfterHeadersBeforeBodyResetsUpst upstream_request_->encodeHeaders(response_headers, false); response->waitForHeaders(); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); // Trigger global timeout. timeSystem().advanceTimeWait(std::chrono::milliseconds(200)); @@ -261,7 +261,7 @@ TEST_P(HttpTimeoutIntegrationTest, PerTryTimeout) { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("504", response->headers().Status()->value().getStringView()); + EXPECT_EQ("504", response->headers().getStatusValue()); } // Sends a request with a per try timeout specified but no global timeout. @@ -299,7 +299,7 @@ TEST_P(HttpTimeoutIntegrationTest, PerTryTimeoutWithoutGlobalTimeout) { ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); // Encode 200 response headers for the first (timed out) request. - Http::TestHeaderMapImpl response_headers{{":status", "200"}}; + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; upstream_request_->encodeHeaders(response_headers, true); response->waitForHeaders(); @@ -309,7 +309,7 @@ TEST_P(HttpTimeoutIntegrationTest, PerTryTimeoutWithoutGlobalTimeout) { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } // With hedge_on_per_try_timeout enabled via config, sends a request with a @@ -368,7 +368,7 @@ TEST_P(HttpTimeoutIntegrationTest, HedgedPerTryTimeout) { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(HttpTimeoutIntegrationTest, HedgedPerTryTimeoutWithBodyNoBufferFirstRequestWins) { @@ -490,7 +490,7 @@ void HttpTimeoutIntegrationTest::testRouterRequestAndResponseWithHedgedPerTryTim } EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } } // namespace Envoy diff --git a/test/integration/idle_timeout_integration_test.cc b/test/integration/idle_timeout_integration_test.cc index e40e683cb74bf..fc7069c3d4401 100644 --- a/test/integration/idle_timeout_integration_test.cc +++ b/test/integration/idle_timeout_integration_test.cc @@ -66,7 +66,7 @@ class IdleTimeoutIntegrationTest : public HttpProtocolIntegrationTest { void waitForTimeout(IntegrationStreamDecoder& response, absl::string_view stat_name = "", absl::string_view stat_prefix = "http.config_test") { if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } else { response.waitForReset(); codec_client_->close(); @@ -178,12 +178,33 @@ TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutAfterDownstreamHeaders) { EXPECT_FALSE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("408", response->headers().Status()->value().getStringView()); + EXPECT_EQ("408", response->headers().getStatusValue()); EXPECT_EQ("stream timeout", response->body()); EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("stream_idle_timeout")); } +// Per-stream idle timeout with reads disabled. +TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutWithLargeBuffer) { + config_helper_.addFilter(R"EOF( + name: backpressure-filter + )EOF"); + enable_per_stream_idle_timeout_ = true; + initialize(); + + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + response->waitForEndStream(); + EXPECT_TRUE(response->complete()); + + // Make sure that for HTTP/1.1 reads are enabled even though the first request + // ended in the "backed up" state. + auto response2 = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + response2->waitForEndStream(); + EXPECT_TRUE(response2->complete()); +} + // Per-stream idle timeout after having sent downstream head request. TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutHeadRequestAfterDownstreamHeadRequest) { enable_per_stream_idle_timeout_ = true; @@ -193,9 +214,9 @@ TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutHeadRequestAfterDownstrea EXPECT_FALSE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("408", response->headers().Status()->value().getStringView()); + EXPECT_EQ("408", response->headers().getStatusValue()); EXPECT_EQ(fmt::format("{}", strlen("stream timeout")), - response->headers().ContentLength()->value().getStringView()); + response->headers().getContentLengthValue()); EXPECT_EQ("", response->body()); } @@ -210,7 +231,7 @@ TEST_P(IdleTimeoutIntegrationTest, GlobalPerStreamIdleTimeoutAfterDownstreamHead EXPECT_FALSE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("408", response->headers().Status()->value().getStringView()); + EXPECT_EQ("408", response->headers().getStatusValue()); EXPECT_EQ("stream timeout", response->body()); } @@ -227,7 +248,7 @@ TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutAfterDownstreamHeadersAnd EXPECT_FALSE(upstream_request_->complete()); EXPECT_EQ(1U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("408", response->headers().Status()->value().getStringView()); + EXPECT_EQ("408", response->headers().getStatusValue()); EXPECT_EQ("stream timeout", response->body()); } @@ -243,7 +264,7 @@ TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutAfterUpstreamHeaders) { EXPECT_FALSE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_FALSE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ("", response->body()); } @@ -277,7 +298,7 @@ TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutAfterBidiData) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(1U, upstream_request_->bodyLength()); EXPECT_FALSE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ("aa", response->body()); } @@ -307,7 +328,7 @@ TEST_P(IdleTimeoutIntegrationTest, RequestTimeoutTriggersOnBodilessPost) { EXPECT_FALSE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("408", response->headers().Status()->value().getStringView()); + EXPECT_EQ("408", response->headers().getStatusValue()); EXPECT_EQ("request timeout", response->body()); } @@ -323,7 +344,7 @@ TEST_P(IdleTimeoutIntegrationTest, RequestTimeoutUnconfiguredDoesNotTriggerOnBod EXPECT_FALSE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("408", response->headers().Status()->value().getStringView()); + EXPECT_EQ("408", response->headers().getStatusValue()); EXPECT_NE("request timeout", response->body()); } @@ -381,7 +402,7 @@ TEST_P(IdleTimeoutIntegrationTest, RequestTimeoutIsNotDisarmedByEncode100Continu EXPECT_FALSE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("408", response->headers().Status()->value().getStringView()); + EXPECT_EQ("408", response->headers().getStatusValue()); EXPECT_EQ("request timeout", response->body()); } diff --git a/test/integration/integration.cc b/test/integration/integration.cc index ac40c2628be09..979e3fd8c47b1 100644 --- a/test/integration/integration.cc +++ b/test/integration/integration.cc @@ -148,23 +148,24 @@ void IntegrationStreamDecoder::onResetStream(Http::StreamResetReason reason, abs } } -IntegrationTcpClient::IntegrationTcpClient(Event::Dispatcher& dispatcher, - MockBufferFactory& factory, uint32_t port, - Network::Address::IpVersion version, - bool enable_half_close) - : payload_reader_(new WaitForPayloadReader(dispatcher)), +IntegrationTcpClient::IntegrationTcpClient( + Event::Dispatcher& dispatcher, Event::TestTimeSystem& time_system, MockBufferFactory& factory, + uint32_t port, Network::Address::IpVersion version, bool enable_half_close, + const Network::ConnectionSocket::OptionsSharedPtr& options) + : time_system_(time_system), payload_reader_(new WaitForPayloadReader(dispatcher)), callbacks_(new ConnectionCallbacks(*this)) { - EXPECT_CALL(factory, create_(_, _)) - .WillOnce(Invoke([&](std::function below_low, - std::function above_high) -> Buffer::Instance* { - client_write_buffer_ = new NiceMock(below_low, above_high); + EXPECT_CALL(factory, create_(_, _, _)) + .WillOnce(Invoke([&](std::function below_low, std::function above_high, + std::function above_overflow) -> Buffer::Instance* { + client_write_buffer_ = + new NiceMock(below_low, above_high, above_overflow); return client_write_buffer_; })); connection_ = dispatcher.createClientConnection( Network::Utility::resolveUrl( fmt::format("tcp://{}:{}", Network::Test::getLoopbackAddressUrlString(version), port)), - Network::Address::InstanceConstSharedPtr(), Network::Test::createRawBufferSocket(), nullptr); + Network::Address::InstanceConstSharedPtr(), Network::Test::createRawBufferSocket(), options); ON_CALL(*client_write_buffer_, drain(_)) .WillByDefault(testing::Invoke(client_write_buffer_, &MockWatermarkBuffer::baseDrain)); @@ -188,24 +189,28 @@ void IntegrationTcpClient::waitForData(const std::string& data, bool exact_match connection_->dispatcher().run(Event::Dispatcher::RunType::Block); } -void IntegrationTcpClient::waitForData(size_t length) { +AssertionResult IntegrationTcpClient::waitForData(size_t length, + std::chrono::milliseconds timeout) { if (payload_reader_->data().size() >= length) { - return; + return AssertionSuccess(); } - payload_reader_->setLengthToWaitFor(length); - connection_->dispatcher().run(Event::Dispatcher::RunType::Block); + return payload_reader_->waitForLength(length, timeout); } void IntegrationTcpClient::waitForDisconnect(bool ignore_spurious_events) { + Event::TimerPtr timeout_timer = + connection_->dispatcher().createTimer([this]() -> void { connection_->dispatcher().exit(); }); + timeout_timer->enableTimer(TestUtility::DefaultTimeout); + if (ignore_spurious_events) { - while (!disconnected_) { + while (!disconnected_ && timeout_timer->enabled()) { connection_->dispatcher().run(Event::Dispatcher::RunType::Block); } } else { connection_->dispatcher().run(Event::Dispatcher::RunType::Block); - EXPECT_TRUE(disconnected_); } + EXPECT_TRUE(disconnected_); } void IntegrationTcpClient::waitForHalfClose() { @@ -218,7 +223,9 @@ void IntegrationTcpClient::waitForHalfClose() { void IntegrationTcpClient::readDisable(bool disabled) { connection_->readDisable(disabled); } -void IntegrationTcpClient::write(const std::string& data, bool end_stream, bool verify) { +AssertionResult IntegrationTcpClient::write(const std::string& data, bool end_stream, bool verify, + std::chrono::milliseconds timeout) { + auto end_time = time_system_.monotonicTime() + timeout; Buffer::OwnedImpl buffer(data); if (verify) { EXPECT_CALL(*client_write_buffer_, move(_)); @@ -232,12 +239,21 @@ void IntegrationTcpClient::write(const std::string& data, bool end_stream, bool connection_->write(buffer, end_stream); do { connection_->dispatcher().run(Event::Dispatcher::RunType::NonBlock); - } while (client_write_buffer_->bytes_written() != bytes_expected && !disconnected_); - if (verify) { - // If we disconnect part way through the write, then we should fail, since write() is always - // expected to succeed. - EXPECT_TRUE(!disconnected_ || client_write_buffer_->bytes_written() == bytes_expected); + if (client_write_buffer_->bytes_written() == bytes_expected || disconnected_) { + break; + } + } while (time_system_.monotonicTime() < end_time); + + if (time_system_.monotonicTime() >= end_time) { + return AssertionFailure() << "Timed out completing write"; + } else if (verify && (disconnected_ || client_write_buffer_->bytes_written() != bytes_expected)) { + return AssertionFailure() + << "Failed to complete write or unexpected disconnect. disconnected_: " << disconnected_ + << " bytes_written: " << client_write_buffer_->bytes_written() + << " bytes_expected: " << bytes_expected; } + + return AssertionSuccess(); } void IntegrationTcpClient::ConnectionCallbacks::onEvent(Network::ConnectionEvent event) { @@ -264,12 +280,17 @@ BaseIntegrationTest::BaseIntegrationTest(const InstanceConstSharedPtrFn& upstrea // complex test hooks to the server and/or spin waiting on stats, neither of which I think are // necessary right now. timeSystem().advanceTimeWait(std::chrono::milliseconds(10)); - ON_CALL(*mock_buffer_factory_, create_(_, _)) - .WillByDefault(Invoke([](std::function below_low, - std::function above_high) -> Buffer::Instance* { - return new Buffer::WatermarkBuffer(below_low, above_high); + ON_CALL(*mock_buffer_factory_, create_(_, _, _)) + .WillByDefault(Invoke([](std::function below_low, std::function above_high, + std::function above_overflow) -> Buffer::Instance* { + return new Buffer::WatermarkBuffer(below_low, above_high, above_overflow); })); ON_CALL(factory_context_, api()).WillByDefault(ReturnRef(*api_)); + // In ENVOY_USE_NEW_CODECS_IN_INTEGRATION_TESTS mode, set runtime config to use legacy codecs. +#ifdef ENVOY_USE_NEW_CODECS_IN_INTEGRATION_TESTS + ENVOY_LOG_MISC(debug, "Using new codecs"); + setNewCodecs(); +#endif } BaseIntegrationTest::BaseIntegrationTest(Network::Address::IpVersion version, @@ -363,7 +384,7 @@ void BaseIntegrationTest::createEnvoy() { MessageUtil::getYamlStringFromMessage(bootstrap)); const std::string bootstrap_path = TestEnvironment::writeStringToFileForTest( - "bootstrap.json", MessageUtil::getJsonStringFromMessage(bootstrap)); + "bootstrap.pb", TestUtility::getProtobufBinaryStringFromMessage(bootstrap)); std::vector named_ports; const auto& static_resources = config_helper_.bootstrap().static_resources(); @@ -371,7 +392,7 @@ void BaseIntegrationTest::createEnvoy() { for (int i = 0; i < static_resources.listeners_size(); ++i) { named_ports.push_back(static_resources.listeners(i).name()); } - createGeneratedApiTestServer(bootstrap_path, named_ports, false, true, false); + createGeneratedApiTestServer(bootstrap_path, named_ports, {false, true, false}, false); } void BaseIntegrationTest::setUpstreamProtocol(FakeHttpConnection::Type protocol) { @@ -388,9 +409,11 @@ void BaseIntegrationTest::setUpstreamProtocol(FakeHttpConnection::Type protocol) } } -IntegrationTcpClientPtr BaseIntegrationTest::makeTcpConnection(uint32_t port) { - return std::make_unique(*dispatcher_, *mock_buffer_factory_, port, version_, - enable_half_close_); +IntegrationTcpClientPtr +BaseIntegrationTest::makeTcpConnection(uint32_t port, + const Network::ConnectionSocket::OptionsSharedPtr& options) { + return std::make_unique(*dispatcher_, time_system_, *mock_buffer_factory_, + port, version_, enable_half_close_, options); } void BaseIntegrationTest::registerPort(const std::string& key, uint32_t port) { @@ -450,15 +473,13 @@ std::string getListenerDetails(Envoy::Server::Instance& server) { return MessageUtil::getYamlStringFromMessage(listener_info.dynamic_listeners(0).error_state()); } -void BaseIntegrationTest::createGeneratedApiTestServer(const std::string& bootstrap_path, - const std::vector& port_names, - bool allow_unknown_static_fields, - bool reject_unknown_dynamic_fields, - bool allow_lds_rejection) { +void BaseIntegrationTest::createGeneratedApiTestServer( + const std::string& bootstrap_path, const std::vector& port_names, + Server::FieldValidationConfig validator_config, bool allow_lds_rejection) { test_server_ = IntegrationTestServer::create( bootstrap_path, version_, on_server_ready_function_, on_server_init_function_, deterministic_, - timeSystem(), *api_, defer_listener_finalization_, process_object_, - allow_unknown_static_fields, reject_unknown_dynamic_fields, concurrency_); + timeSystem(), *api_, defer_listener_finalization_, process_object_, validator_config, + concurrency_, drain_time_, drain_strategy_, use_real_stats_); if (config_helper_.bootstrap().static_resources().listeners_size() > 0 && !defer_listener_finalization_) { @@ -467,16 +488,19 @@ void BaseIntegrationTest::createGeneratedApiTestServer(const std::string& bootst auto end_time = time_system_.monotonicTime() + TestUtility::DefaultTimeout; const char* success = "listener_manager.listener_create_success"; const char* rejected = "listener_manager.lds.update_rejected"; - while ((test_server_->counter(success) == nullptr || - test_server_->counter(success)->value() < concurrency_) && - (!allow_lds_rejection || test_server_->counter(rejected) == nullptr || - test_server_->counter(rejected)->value() == 0)) { + for (Stats::CounterSharedPtr success_counter = test_server_->counter(success), + rejected_counter = test_server_->counter(rejected); + (success_counter == nullptr || + success_counter->value() < + concurrency_ * config_helper_.bootstrap().static_resources().listeners_size()) && + (!allow_lds_rejection || rejected_counter == nullptr || rejected_counter->value() == 0); + success_counter = test_server_->counter(success), + rejected_counter = test_server_->counter(rejected)) { if (time_system_.monotonicTime() >= end_time) { RELEASE_ASSERT(0, "Timed out waiting for listeners."); } if (!allow_lds_rejection) { - RELEASE_ASSERT(test_server_->counter(rejected) == nullptr || - test_server_->counter(rejected)->value() == 0, + RELEASE_ASSERT(rejected_counter == nullptr || rejected_counter->value() == 0, absl::StrCat("Lds update failed. Details\n", getListenerDetails(test_server_->server()))); } @@ -489,8 +513,7 @@ void BaseIntegrationTest::createGeneratedApiTestServer(const std::string& bootst void BaseIntegrationTest::createApiTestServer(const ApiFilesystemConfig& api_filesystem_config, const std::vector& port_names, - bool allow_unknown_static_fields, - bool reject_unknown_dynamic_fields, + Server::FieldValidationConfig validator_config, bool allow_lds_rejection) { const std::string eds_path = TestEnvironment::temporaryFileSubstitute( api_filesystem_config.eds_path_, port_map_, version_); @@ -500,45 +523,27 @@ void BaseIntegrationTest::createApiTestServer(const ApiFilesystemConfig& api_fil api_filesystem_config.rds_path_, port_map_, version_); const std::string lds_path = TestEnvironment::temporaryFileSubstitute( api_filesystem_config.lds_path_, {{"rds_json_path", rds_path}}, port_map_, version_); - createGeneratedApiTestServer( - TestEnvironment::temporaryFileSubstitute( - api_filesystem_config.bootstrap_path_, - {{"cds_json_path", cds_path}, {"lds_json_path", lds_path}}, port_map_, version_), - port_names, allow_unknown_static_fields, reject_unknown_dynamic_fields, allow_lds_rejection); -} - -void BaseIntegrationTest::createTestServer(const std::string& json_path, - const std::vector& port_names) { - test_server_ = createIntegrationTestServer( - TestEnvironment::temporaryFileSubstitute(json_path, port_map_, version_), nullptr, nullptr, - timeSystem()); - registerTestServerPorts(port_names); + createGeneratedApiTestServer(TestEnvironment::temporaryFileSubstitute( + api_filesystem_config.bootstrap_path_, + {{"cds_json_path", cds_path}, {"lds_json_path", lds_path}}, + port_map_, version_), + port_names, validator_config, allow_lds_rejection); } void BaseIntegrationTest::sendRawHttpAndWaitForResponse(int port, const char* raw_http, std::string* response, bool disconnect_after_headers_complete) { - Buffer::OwnedImpl buffer(raw_http); - RawConnectionDriver connection( - port, buffer, - [&](Network::ClientConnection& client, const Buffer::Instance& data) -> void { + auto connection = createConnectionDriver( + port, raw_http, + [response, disconnect_after_headers_complete](Network::ClientConnection& client, + const Buffer::Instance& data) -> void { response->append(data.toString()); if (disconnect_after_headers_complete && response->find("\r\n\r\n") != std::string::npos) { client.close(Network::ConnectionCloseType::NoFlush); } - }, - version_); + }); - connection.run(); -} - -IntegrationTestServerPtr BaseIntegrationTest::createIntegrationTestServer( - const std::string& bootstrap_path, - std::function on_server_ready_function, - std::function on_server_init_function, Event::TestTimeSystem& time_system) { - return IntegrationTestServer::create(bootstrap_path, version_, on_server_ready_function, - on_server_init_function, deterministic_, time_system, *api_, - defer_listener_finalization_); + connection->run(); } void BaseIntegrationTest::useListenerAccessLog(absl::string_view format) { @@ -546,12 +551,28 @@ void BaseIntegrationTest::useListenerAccessLog(absl::string_view format) { ASSERT_TRUE(config_helper_.setListenerAccessLog(listener_access_log_name_, format)); } -std::string BaseIntegrationTest::waitForAccessLog(const std::string& filename) { +// Assuming logs are newline delineated, return the start index of the nth entry. +// If there are not n entries, it will return file.length() (end of the string +// index) +size_t entryIndex(const std::string& file, uint32_t entry) { + size_t index = 0; + for (uint32_t i = 0; i < entry; ++i) { + index = file.find('\n', index); + if (index == std::string::npos || index == file.length()) { + return file.length(); + } + ++index; + } + return index; +} + +std::string BaseIntegrationTest::waitForAccessLog(const std::string& filename, uint32_t entry) { // Wait a max of 1s for logs to flush to disk. for (int i = 0; i < 1000; ++i) { std::string contents = TestEnvironment::readFileToStringForTest(filename, false); - if (contents.length() > 0) { - return contents; + size_t index = entryIndex(contents, entry); + if (contents.length() > index) { + return contents.substr(index); } absl::SleepFor(absl::Milliseconds(1)); } @@ -569,7 +590,7 @@ void BaseIntegrationTest::createXdsUpstream() { } else { envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; auto* common_tls_context = tls_context.mutable_common_tls_context(); - common_tls_context->add_alpn_protocols("h2"); + common_tls_context->add_alpn_protocols(Http::Utility::AlpnNames::get().Http2); auto* tls_cert = common_tls_context->add_tls_certificates(); tls_cert->mutable_certificate_chain()->set_filename( TestEnvironment::runfilesPath("test/config/integration/certs/upstreamcert.pem")); @@ -675,6 +696,23 @@ AssertionResult compareSets(const std::set& set1, const std::set& expected_resource_subscriptions, diff --git a/test/integration/integration.h b/test/integration/integration.h index 8c388b46314fa..e4f8a9cba86ea 100644 --- a/test/integration/integration.h +++ b/test/integration/integration.h @@ -13,13 +13,15 @@ #include "common/config/version_converter.h" #include "common/http/codec_client.h" +#include "extensions/transport_sockets/tls/context_manager_impl.h" + #include "test/common/grpc/grpc_client_integration.h" #include "test/config/utility.h" #include "test/integration/fake_upstream.h" #include "test/integration/server.h" #include "test/integration/utility.h" #include "test/mocks/buffer/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/transport_socket_factory_context.h" #include "test/test_common/environment.h" #include "test/test_common/network_utility.h" #include "test/test_common/printers.h" @@ -40,11 +42,11 @@ class IntegrationStreamDecoder : public Http::ResponseDecoder, public Http::Stre const std::string& body() { return body_; } bool complete() { return saw_end_stream_; } bool reset() { return saw_reset_; } - Http::StreamResetReason reset_reason() { return reset_reason_; } - const Http::ResponseHeaderMap* continue_headers() { return continue_headers_.get(); } + Http::StreamResetReason resetReason() { return reset_reason_; } + const Http::ResponseHeaderMap* continueHeaders() { return continue_headers_.get(); } const Http::ResponseHeaderMap& headers() { return *headers_; } const Http::ResponseTrailerMapPtr& trailers() { return trailers_; } - const Http::MetadataMap& metadata_map() { return *metadata_map_; } + const Http::MetadataMap& metadataMap() { return *metadata_map_; } uint64_t keyCount(std::string key) { return duplicated_metadata_key_count_[key]; } void waitForContinueHeaders(); void waitForHeaders(); @@ -77,7 +79,7 @@ class IntegrationStreamDecoder : public Http::ResponseDecoder, public Http::Stre Http::ResponseHeaderMapPtr headers_; Http::ResponseTrailerMapPtr trailers_; Http::MetadataMapPtr metadata_map_{new Http::MetadataMap()}; - std::unordered_map duplicated_metadata_key_count_; + absl::node_hash_map duplicated_metadata_key_count_; bool waiting_for_end_stream_{}; bool saw_end_stream_{}; std::string body_; @@ -96,17 +98,22 @@ using IntegrationStreamDecoderPtr = std::unique_ptr; */ class IntegrationTcpClient { public: - IntegrationTcpClient(Event::Dispatcher& dispatcher, MockBufferFactory& factory, uint32_t port, - Network::Address::IpVersion version, bool enable_half_close = false); + IntegrationTcpClient(Event::Dispatcher& dispatcher, Event::TestTimeSystem& time_system, + MockBufferFactory& factory, uint32_t port, + Network::Address::IpVersion version, bool enable_half_close, + const Network::ConnectionSocket::OptionsSharedPtr& options); void close(); void waitForData(const std::string& data, bool exact_match = true); // wait for at least `length` bytes to be received - void waitForData(size_t length); + ABSL_MUST_USE_RESULT AssertionResult + waitForData(size_t length, std::chrono::milliseconds timeout = TestUtility::DefaultTimeout); void waitForDisconnect(bool ignore_spurious_events = false); void waitForHalfClose(); void readDisable(bool disabled); - void write(const std::string& data, bool end_stream = false, bool verify = true); + ABSL_MUST_USE_RESULT AssertionResult + write(const std::string& data, bool end_stream = false, bool verify = true, + std::chrono::milliseconds timeout = TestUtility::DefaultTimeout); const std::string& data() { return payload_reader_->data(); } bool connected() const { return !disconnected_; } // clear up to the `count` number of bytes of received data @@ -124,6 +131,7 @@ class IntegrationTcpClient { IntegrationTcpClient& parent_; }; + Event::TestTimeSystem& time_system_; std::shared_ptr payload_reader_; std::shared_ptr callbacks_; Network::ClientConnectionPtr connection_; @@ -161,7 +169,6 @@ class BaseIntegrationTest : protected Logger::Loggable { BaseIntegrationTest(const InstanceConstSharedPtrFn& upstream_address_fn, Network::Address::IpVersion version, const std::string& config = ConfigHelper::httpProxyConfig()); - virtual ~BaseIntegrationTest() = default; // TODO(jmarantz): Remove this once @@ -177,17 +184,20 @@ class BaseIntegrationTest : protected Logger::Loggable { virtual void createEnvoy(); // Sets upstream_protocol_ and alters the upstream protocol in the config_helper_ void setUpstreamProtocol(FakeHttpConnection::Type protocol); - // Sets fake_upstreams_count_ and alters the upstream protocol in the config_helper_ + // Sets fake_upstreams_count_ void setUpstreamCount(uint32_t count) { fake_upstreams_count_ = count; } // Skip validation that ensures that all upstream ports are referenced by the // configuration generated in ConfigHelper::finalize. void skipPortUsageValidation() { config_helper_.skipPortUsageValidation(); } // Make test more deterministic by using a fixed RNG value. void setDeterministic() { deterministic_ = true; } + void setNewCodecs() { config_helper_.setNewCodecs(); } FakeHttpConnection::Type upstreamProtocol() const { return upstream_protocol_; } - IntegrationTcpClientPtr makeTcpConnection(uint32_t port); + IntegrationTcpClientPtr + makeTcpConnection(uint32_t port, + const Network::ConnectionSocket::OptionsSharedPtr& options = nullptr); // Test-wide port map. void registerPort(const std::string& key, uint32_t port); @@ -203,14 +213,13 @@ class BaseIntegrationTest : protected Logger::Loggable { const Network::ConnectionSocket::OptionsSharedPtr& options); void registerTestServerPorts(const std::vector& port_names); - void createTestServer(const std::string& json_path, const std::vector& port_names); void createGeneratedApiTestServer(const std::string& bootstrap_path, const std::vector& port_names, - bool allow_unknown_static_fields, - bool reject_unknown_dynamic_fields, bool allow_lds_rejection); + Server::FieldValidationConfig validator_config, + bool allow_lds_rejection); void createApiTestServer(const ApiFilesystemConfig& api_filesystem_config, const std::vector& port_names, - bool allow_unknown_static_fields, bool reject_unknown_dynamic_fields, + Server::FieldValidationConfig validator_config, bool allow_lds_rejection); Event::TestTimeSystem& timeSystem() { return time_system_; } @@ -222,8 +231,8 @@ class BaseIntegrationTest : protected Logger::Loggable { // Enable the listener access log void useListenerAccessLog(absl::string_view format = ""); - // Waits for the first access log entry. - std::string waitForAccessLog(const std::string& filename); + // Waits for the nth access log entry, defaulting to log entry 0. + std::string waitForAccessLog(const std::string& filename, uint32_t entry = 0); std::string listener_access_log_name_; @@ -232,6 +241,10 @@ class BaseIntegrationTest : protected Logger::Loggable { void createXdsConnection(); void cleanUpXdsConnection(); + // See if a port can be successfully bound within the given timeout. + ABSL_MUST_USE_RESULT AssertionResult waitForPortAvailable( + uint32_t port, std::chrono::milliseconds timeout = TestUtility::DefaultTimeout); + // Helpers for setting up expectations and making the internal gears turn for xDS request/response // sending/receiving to/from the (imaginary) xDS server. You should almost always use // compareDiscoveryRequest() and sendDiscoveryResponse(), but the SotW/delta-specific versions are @@ -248,11 +261,12 @@ class BaseIntegrationTest : protected Logger::Loggable { template void sendDiscoveryResponse(const std::string& type_url, const std::vector& state_of_the_world, const std::vector& added_or_updated, - const std::vector& removed, const std::string& version) { + const std::vector& removed, const std::string& version, + const bool api_downgrade = true) { if (sotw_or_delta_ == Grpc::SotwOrDelta::Sotw) { - sendSotwDiscoveryResponse(type_url, state_of_the_world, version); + sendSotwDiscoveryResponse(type_url, state_of_the_world, version, api_downgrade); } else { - sendDeltaDiscoveryResponse(type_url, added_or_updated, removed, version); + sendDeltaDiscoveryResponse(type_url, added_or_updated, removed, version, api_downgrade); } } @@ -284,12 +298,16 @@ class BaseIntegrationTest : protected Logger::Loggable { template void sendSotwDiscoveryResponse(const std::string& type_url, const std::vector& messages, - const std::string& version) { + const std::string& version, const bool api_downgrade = true) { API_NO_BOOST(envoy::api::v2::DiscoveryResponse) discovery_response; discovery_response.set_version_info(version); discovery_response.set_type_url(type_url); for (const auto& message : messages) { - discovery_response.add_resources()->PackFrom(API_DOWNGRADE(message)); + if (api_downgrade) { + discovery_response.add_resources()->PackFrom(API_DOWNGRADE(message)); + } else { + discovery_response.add_resources()->PackFrom(message); + } } static int next_nonce_counter = 0; discovery_response.set_nonce(absl::StrCat("nonce", next_nonce_counter++)); @@ -297,18 +315,21 @@ class BaseIntegrationTest : protected Logger::Loggable { } template - void - sendDeltaDiscoveryResponse(const std::string& type_url, const std::vector& added_or_updated, - const std::vector& removed, const std::string& version) { - sendDeltaDiscoveryResponse(type_url, added_or_updated, removed, version, xds_stream_); + void sendDeltaDiscoveryResponse(const std::string& type_url, + const std::vector& added_or_updated, + const std::vector& removed, + const std::string& version, const bool api_downgrade = true) { + sendDeltaDiscoveryResponse(type_url, added_or_updated, removed, version, xds_stream_, {}, + api_downgrade); } template void sendDeltaDiscoveryResponse(const std::string& type_url, const std::vector& added_or_updated, const std::vector& removed, const std::string& version, - FakeStreamPtr& stream, const std::vector& aliases = {}) { - auto response = - createDeltaDiscoveryResponse(type_url, added_or_updated, removed, version, aliases); + FakeStreamPtr& stream, const std::vector& aliases = {}, + const bool api_downgrade = true) { + auto response = createDeltaDiscoveryResponse(type_url, added_or_updated, removed, version, + aliases, api_downgrade); stream->sendGrpcMessage(response); } @@ -316,7 +337,8 @@ class BaseIntegrationTest : protected Logger::Loggable { envoy::api::v2::DeltaDiscoveryResponse createDeltaDiscoveryResponse(const std::string& type_url, const std::vector& added_or_updated, const std::vector& removed, const std::string& version, - const std::vector& aliases) { + const std::vector& aliases, + const bool api_downgrade = true) { API_NO_BOOST(envoy::api::v2::DeltaDiscoveryResponse) response; response.set_system_version_info("system_version_info_this_is_a_test"); @@ -324,10 +346,15 @@ class BaseIntegrationTest : protected Logger::Loggable { for (const auto& message : added_or_updated) { auto* resource = response.add_resources(); ProtobufWkt::Any temp_any; - temp_any.PackFrom(API_DOWNGRADE(message)); + if (api_downgrade) { + temp_any.PackFrom(API_DOWNGRADE(message)); + resource->mutable_resource()->PackFrom(API_DOWNGRADE(message)); + } else { + temp_any.PackFrom(message); + resource->mutable_resource()->PackFrom(message); + } resource->set_name(TestUtility::xdsResourceName(temp_any)); resource->set_version(version); - resource->mutable_resource()->PackFrom(API_DOWNGRADE(message)); for (const auto& alias : aliases) { resource->add_aliases(alias); } @@ -358,19 +385,31 @@ class BaseIntegrationTest : protected Logger::Loggable { void sendRawHttpAndWaitForResponse(int port, const char* raw_http, std::string* response, bool disconnect_after_headers_complete = false); -protected: - // Create the envoy server in another thread and start it. - // Will not return until that server is listening. - virtual IntegrationTestServerPtr - createIntegrationTestServer(const std::string& bootstrap_path, - std::function on_server_ready_function, - std::function on_server_init_function, - Event::TestTimeSystem& time_system); + /** + * Helper to create ConnectionDriver. + * + * @param port the port to connect to. + * @param initial_data the data to send. + * @param data_callback the callback on the received data. + **/ + std::unique_ptr createConnectionDriver( + uint32_t port, const std::string& initial_data, + std::function&& data_callback) { + Buffer::OwnedImpl buffer(initial_data); + return std::make_unique(port, buffer, data_callback, version_, + *dispatcher_); + } +protected: bool initialized() const { return initialized_; } std::unique_ptr upstream_stats_store_; + // Make sure the test server will be torn down after any fake client. + // The test server owns the runtime, which is often accessed by client and + // fake upstream codecs and must outlast them. + IntegrationTestServerPtr test_server_; + // The IpVersion (IPv4, IPv6) to use. Network::Address::IpVersion version_; // IP Address to use when binding sockets on upstreams. @@ -387,15 +426,42 @@ class BaseIntegrationTest : protected Logger::Loggable { // pre-init, control plane synchronization needed for server start. std::function on_server_init_function_; - std::vector> fake_upstreams_; - // Target number of upstreams. - uint32_t fake_upstreams_count_{1}; - spdlog::level::level_enum default_log_level_; - IntegrationTestServerPtr test_server_; // A map of keys to port names. Generally the names are pulled from the v2 listener name // but if a listener is created via ADS, it will be from whatever key is used with registerPort. TestEnvironment::PortMap port_map_; + // The DrainStrategy that dictates the behaviour of + // DrainManagerImpl::drainClose(). + Server::DrainStrategy drain_strategy_{Server::DrainStrategy::Gradual}; + + // Member variables for xDS testing. + FakeUpstream* xds_upstream_{}; + FakeHttpConnectionPtr xds_connection_; + FakeStreamPtr xds_stream_; + bool create_xds_upstream_{false}; + bool tls_xds_upstream_{false}; + bool use_lds_{true}; // Use the integration framework's LDS set up. + + testing::NiceMock factory_context_; + Extensions::TransportSockets::Tls::ContextManagerImpl context_manager_{timeSystem()}; + + // The fake upstreams_ are created using the context_manager, so make sure + // they are destroyed before it is. + std::vector> fake_upstreams_; + + Grpc::SotwOrDelta sotw_or_delta_{Grpc::SotwOrDelta::Sotw}; + + spdlog::level::level_enum default_log_level_; + + // Target number of upstreams. + uint32_t fake_upstreams_count_{1}; + + // The duration of the drain manager graceful drain period. + std::chrono::seconds drain_time_{1}; + + // The number of worker threads that the test server uses. + uint32_t concurrency_{1}; + // If true, use AutonomousUpstream for fake upstreams. bool autonomous_upstream_{false}; @@ -415,19 +481,9 @@ class BaseIntegrationTest : protected Logger::Loggable { // them in the port_map_. bool defer_listener_finalization_{false}; - // The number of worker threads that the test server uses. - uint32_t concurrency_{1}; - - // Member variables for xDS testing. - FakeUpstream* xds_upstream_{}; - FakeHttpConnectionPtr xds_connection_; - FakeStreamPtr xds_stream_; - testing::NiceMock factory_context_; - Extensions::TransportSockets::Tls::ContextManagerImpl context_manager_{timeSystem()}; - bool create_xds_upstream_{false}; - bool tls_xds_upstream_{false}; - bool use_lds_{true}; // Use the integration framework's LDS set up. - Grpc::SotwOrDelta sotw_or_delta_{Grpc::SotwOrDelta::Sotw}; + // By default the test server will use custom stats to notify on increment. + // This override exists for tests measuring stats memory. + bool use_real_stats_{}; private: // The type for the Envoy-to-backend connection diff --git a/test/integration/integration_admin_test.cc b/test/integration/integration_admin_test.cc index a5ecdcf30b56e..b5640bfcbac52 100644 --- a/test/integration/integration_admin_test.cc +++ b/test/integration/integration_admin_test.cc @@ -12,6 +12,7 @@ #include "common/common/fmt.h" #include "common/config/api_version.h" #include "common/profiler/profiler.h" +#include "common/stats/histogram_impl.h" #include "common/stats/stats_matcher_impl.h" #include "test/common/stats/stat_test_utility.h" @@ -156,7 +157,7 @@ TEST_P(IntegrationAdminTest, Admin) { EXPECT_EQ("200", request("admin", "GET", "/stats/recentlookups", response)); EXPECT_EQ("text/plain; charset=UTF-8", ContentType(response)); EXPECT_TRUE(absl::StartsWith(response->body(), " Count Lookup\n")) << response->body(); - EXPECT_LT(30, response->body().size()); + EXPECT_LT(28, response->body().size()); // Now disable recent-lookups tracking and check that we get the error again. EXPECT_EQ("200", request("admin", "POST", "/stats/recentlookups/disable", response)); @@ -226,6 +227,24 @@ TEST_P(IntegrationAdminTest, Admin) { EXPECT_THAT(response->body(), HasSubstr("envoy_cluster_upstream_cx_active{envoy_cluster_name=\"cluster_0\"} 0\n")); + // Test that a specific bucket config is applied. Buckets 1-4 (inclusive) are set in initialize(). + for (int i = 1; i <= 4; i++) { + EXPECT_THAT( + response->body(), + HasSubstr(fmt::format("envoy_cluster_upstream_cx_connect_ms_bucket{{envoy_cluster_name=" + "\"cluster_0\",le=\"{}\"}} 0\n", + i))); + } + + // Test that other histograms use the default buckets. + for (double bucket : Stats::HistogramSettingsImpl::defaultBuckets()) { + EXPECT_THAT( + response->body(), + HasSubstr(fmt::format("envoy_cluster_upstream_cx_length_ms_bucket{{envoy_cluster_name=" + "\"cluster_0\",le=\"{0:.32g}\"}} 0\n", + bucket))); + } + EXPECT_EQ("200", request("admin", "GET", "/stats/prometheus", response)); EXPECT_THAT( response->body(), @@ -262,37 +281,17 @@ TEST_P(IntegrationAdminTest, Admin) { EXPECT_EQ("200", request("admin", "GET", "/stats/recentlookups", response)); EXPECT_EQ("text/plain; charset=UTF-8", ContentType(response)); - // TODO(#8324): "http1.metadata_not_supported_error" should not still be in - // the 'recent lookups' output after reset_counters. switch (GetParam().downstream_protocol) { case Http::CodecClient::Type::HTTP1: EXPECT_EQ(" Count Lookup\n" - " 1 http1.dropped_headers_with_underscores\n" - " 1 http1.metadata_not_supported_error\n" - " 1 http1.requests_rejected_with_underscores_in_headers\n" - " 1 http1.response_flood\n" "\n" - "total: 4\n", + "total: 0\n", response->body()); break; case Http::CodecClient::Type::HTTP2: EXPECT_EQ(" Count Lookup\n" - " 1 http2.dropped_headers_with_underscores\n" - " 1 http2.header_overflow\n" - " 1 http2.headers_cb_no_stream\n" - " 1 http2.inbound_empty_frames_flood\n" - " 1 http2.inbound_priority_frames_flood\n" - " 1 http2.inbound_window_update_frames_flood\n" - " 1 http2.outbound_control_flood\n" - " 1 http2.outbound_flood\n" - " 1 http2.requests_rejected_with_underscores_in_headers\n" - " 1 http2.rx_messaging_error\n" - " 1 http2.rx_reset\n" - " 1 http2.too_many_header_frames\n" - " 1 http2.trailers\n" - " 1 http2.tx_reset\n" "\n" - "total: 14\n", + "total: 0\n", response->body()); break; case Http::CodecClient::Type::HTTP3: @@ -378,12 +377,35 @@ TEST_P(IntegrationAdminTest, Admin) { config_dump.configs(5).UnpackTo(&secret_config_dump); EXPECT_EQ("secret_static_0", secret_config_dump.static_secrets(0).name()); + EXPECT_EQ("200", request("admin", "GET", "/config_dump?include_eds", response)); + EXPECT_EQ("application/json", ContentType(response)); + json = Json::Factory::loadFromString(response->body()); + index = 0; + const std::string expected_types_eds[] = { + "type.googleapis.com/envoy.admin.v3.BootstrapConfigDump", + "type.googleapis.com/envoy.admin.v3.ClustersConfigDump", + "type.googleapis.com/envoy.admin.v3.EndpointsConfigDump", + "type.googleapis.com/envoy.admin.v3.ListenersConfigDump", + "type.googleapis.com/envoy.admin.v3.ScopedRoutesConfigDump", + "type.googleapis.com/envoy.admin.v3.RoutesConfigDump", + "type.googleapis.com/envoy.admin.v3.SecretsConfigDump"}; + + for (const Json::ObjectSharedPtr& obj_ptr : json->getObjectArray("configs")) { + EXPECT_TRUE(expected_types_eds[index].compare(obj_ptr->getString("@type")) == 0); + index++; + } + + // Validate we can parse as proto. + envoy::admin::v3::ConfigDump config_dump_with_eds; + TestUtility::loadFromJson(response->body(), config_dump_with_eds); + EXPECT_EQ(7, config_dump_with_eds.configs_size()); + // Validate that the "inboundonly" does not stop the default listener. response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "POST", "/drain_listeners?inboundonly", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ("text/plain; charset=UTF-8", ContentType(response)); EXPECT_EQ("OK\n", response->body()); @@ -395,7 +417,7 @@ TEST_P(IntegrationAdminTest, Admin) { response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "POST", "/drain_listeners", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ("text/plain; charset=UTF-8", ContentType(response)); EXPECT_EQ("OK\n", response->body()); @@ -415,7 +437,7 @@ TEST_P(IntegrationAdminTest, AdminDrainInboundOnly) { lookupPort("admin"), "POST", "/drain_listeners?inboundonly", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ("text/plain; charset=UTF-8", ContentType(response)); EXPECT_EQ("OK\n", response->body()); @@ -493,7 +515,7 @@ TEST_F(IntegrationAdminIpv4Ipv6Test, Ipv4Ipv6Listen) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("admin"), "GET", "/server_info", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } } @@ -523,7 +545,7 @@ class StatsMatcherIntegrationTest response_ = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "GET", "/stats", "", downstreamProtocol(), version_); ASSERT_TRUE(response_->complete()); - EXPECT_EQ("200", response_->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response_->headers().getStatusValue()); } BufferingStreamDecoderPtr response_; diff --git a/test/integration/integration_admin_test.h b/test/integration/integration_admin_test.h index 97426d52a6514..a63649e7ed710 100644 --- a/test/integration/integration_admin_test.h +++ b/test/integration/integration_admin_test.h @@ -16,6 +16,17 @@ class IntegrationAdminTest : public HttpProtocolIntegrationTest { public: void initialize() override { config_helper_.addFilter(ConfigHelper::defaultHealthCheckFilter()); + config_helper_.addConfigModifier( + [](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto& hist_settings = + *bootstrap.mutable_stats_config()->mutable_histogram_bucket_settings(); + envoy::config::metrics::v3::HistogramBucketSettings* setting = hist_settings.Add(); + setting->mutable_match()->set_suffix("upstream_cx_connect_ms"); + setting->mutable_buckets()->Add(1); + setting->mutable_buckets()->Add(2); + setting->mutable_buckets()->Add(3); + setting->mutable_buckets()->Add(4); + }); HttpIntegrationTest::initialize(); } @@ -32,15 +43,7 @@ class IntegrationAdminTest : public HttpProtocolIntegrationTest { response = IntegrationUtil::makeSingleRequest(lookupPort(port_key), method, endpoint, "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - return response->headers().Status()->value().getStringView(); - } - - /** - * Destructor for an individual test. - */ - void TearDown() override { - test_server_.reset(); - fake_upstreams_.clear(); + return response->headers().getStatusValue(); } /** diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 1ae41217b1a0e..9efe9bab87b86 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -54,6 +54,50 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, IntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); +// Verify that we gracefully handle an invalid pre-bind socket option when using reuse port. +TEST_P(IntegrationTest, BadPrebindSocketOptionWithReusePort) { + // Reserve a port that we can then use on the integration listener with reuse port. + auto addr_socket = + Network::Test::bindFreeLoopbackPort(version_, Network::Socket::Type::Stream, true); + // Do not wait for listeners to start as the listener will fail. + defer_listener_finalization_ = true; + + config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + listener->set_reuse_port(true); + listener->mutable_address()->mutable_socket_address()->set_port_value( + addr_socket.second->localAddress()->ip()->port()); + auto socket_option = listener->add_socket_options(); + socket_option->set_state(envoy::config::core::v3::SocketOption::STATE_PREBIND); + socket_option->set_level(10000); // Invalid level. + socket_option->set_int_value(10000); // Invalid value. + }); + initialize(); + test_server_->waitForCounterGe("listener_manager.listener_create_failure", 1); +} + +// Verify that we gracefully handle an invalid post-bind socket option when using reuse port. +TEST_P(IntegrationTest, BadPostbindSocketOptionWithReusePort) { + // Reserve a port that we can then use on the integration listener with reuse port. + auto addr_socket = + Network::Test::bindFreeLoopbackPort(version_, Network::Socket::Type::Stream, true); + // Do not wait for listeners to start as the listener will fail. + defer_listener_finalization_ = true; + + config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + listener->set_reuse_port(true); + listener->mutable_address()->mutable_socket_address()->set_port_value( + addr_socket.second->localAddress()->ip()->port()); + auto socket_option = listener->add_socket_options(); + socket_option->set_state(envoy::config::core::v3::SocketOption::STATE_BOUND); + socket_option->set_level(10000); // Invalid level. + socket_option->set_int_value(10000); // Invalid value. + }); + initialize(); + test_server_->waitForCounterGe("listener_manager.listener_create_failure", 1); +} + // Make sure we have correctly specified per-worker performance stats. TEST_P(IntegrationTest, PerWorkerStatsAndBalancing) { concurrency_ = 2; @@ -80,12 +124,12 @@ TEST_P(IntegrationTest, PerWorkerStatsAndBalancing) { check_listener_stats(0, 0); // Main thread admin listener stats. - EXPECT_NE(nullptr, test_server_->counter("listener.admin.main_thread.downstream_cx_total")); + test_server_->waitForCounterExists("listener.admin.main_thread.downstream_cx_total"); // Per-thread watchdog stats. - EXPECT_NE(nullptr, test_server_->counter("server.main_thread.watchdog_miss")); - EXPECT_NE(nullptr, test_server_->counter("server.worker_0.watchdog_miss")); - EXPECT_NE(nullptr, test_server_->counter("server.worker_1.watchdog_miss")); + test_server_->waitForCounterExists("server.main_thread.watchdog_miss"); + test_server_->waitForCounterExists("server.worker_0.watchdog_miss"); + test_server_->waitForCounterExists("server.worker_1.watchdog_miss"); codec_client_ = makeHttpConnection(lookupPort("http")); IntegrationCodecClientPtr codec_client2 = makeHttpConnection(lookupPort("http")); @@ -96,9 +140,6 @@ TEST_P(IntegrationTest, PerWorkerStatsAndBalancing) { check_listener_stats(0, 1); } -// Validates that the drain actually drains the listeners. -TEST_P(IntegrationTest, AdminDrainDrainsListeners) { testAdminDrain(downstreamProtocol()); } - TEST_P(IntegrationTest, RouterDirectResponse) { const std::string body = "Response body"; const std::string file_path = TestEnvironment::writeStringToFileForTest("test_envoy", body); @@ -131,12 +172,12 @@ TEST_P(IntegrationTest, RouterDirectResponse) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "GET", "/", "", downstream_protocol_, version_, "direct.example.com"); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ("example-value", response->headers() .get(Envoy::Http::LowerCaseString("x-additional-header")) ->value() .getStringView()); - EXPECT_EQ("text/html", response->headers().ContentType()->value().getStringView()); + EXPECT_EQ("text/html", response->headers().getContentTypeValue()); EXPECT_EQ(body, response->body()); } @@ -151,7 +192,7 @@ TEST_P(IntegrationTest, ConnectionClose) { {":authority", "host"}, {"connection", "close"}}); response->waitForEndStream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); EXPECT_TRUE(response->complete()); EXPECT_THAT(response->headers(), HttpStatusIs("200")); @@ -249,11 +290,21 @@ TEST_P(IntegrationTest, RouterUpstreamResponseBeforeRequestComplete) { } TEST_P(IntegrationTest, EnvoyProxyingEarly100ContinueWithEncoderFilter) { - testEnvoyProxying100Continue(true, true); + testEnvoyProxying1xx(true, true); } TEST_P(IntegrationTest, EnvoyProxyingLate100ContinueWithEncoderFilter) { - testEnvoyProxying100Continue(false, true); + testEnvoyProxying1xx(false, true); +} + +// Regression test for https://github.com/envoyproxy/envoy/issues/10923. +TEST_P(IntegrationTest, EnvoyProxying100ContinueWithDecodeDataPause) { + config_helper_.addFilter(R"EOF( + name: stop-iteration-and-continue-filter + typed_config: + "@type": type.googleapis.com/google.protobuf.Empty + )EOF"); + testEnvoyProxying1xx(true); } // This is a regression for https://github.com/envoyproxy/envoy/issues/2715 and validates that a @@ -292,7 +343,7 @@ TEST_P(IntegrationTest, UpstreamDisconnectWithTwoRequests) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); test_server_->waitForCounterGe("cluster.cluster_0.upstream_cx_total", 1); test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_200", 1); @@ -307,7 +358,7 @@ TEST_P(IntegrationTest, UpstreamDisconnectWithTwoRequests) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_TRUE(response2->complete()); - EXPECT_EQ("200", response2->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response2->headers().getStatusValue()); test_server_->waitForCounterGe("cluster.cluster_0.upstream_cx_total", 2); test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_200", 2); } @@ -358,8 +409,7 @@ TEST_P(IntegrationTest, TestSmuggling) { "GET / HTTP/1.1\r\nHost: host\r\ncontent-length: 36\r\ntransfer-encoding: chunked\r\n\r\n" + smuggled_request; sendRawHttpAndWaitForResponse(lookupPort("http"), full_request.c_str(), &response, false); - EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", - response); + EXPECT_THAT(response, HasSubstr("HTTP/1.1 400 Bad Request\r\n")); } { std::string response; @@ -367,8 +417,7 @@ TEST_P(IntegrationTest, TestSmuggling) { "\r\ncontent-length: 36\r\n\r\n" + smuggled_request; sendRawHttpAndWaitForResponse(lookupPort("http"), request.c_str(), &response, false); - EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", - response); + EXPECT_THAT(response, HasSubstr("HTTP/1.1 400 Bad Request\r\n")); } { std::string response; @@ -376,8 +425,7 @@ TEST_P(IntegrationTest, TestSmuggling) { "identity,chunked \r\ncontent-length: 36\r\n\r\n" + smuggled_request; sendRawHttpAndWaitForResponse(lookupPort("http"), request.c_str(), &response, false); - EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", - response); + EXPECT_THAT(response, HasSubstr("HTTP/1.1 400 Bad Request\r\n")); } } @@ -385,7 +433,7 @@ TEST_P(IntegrationTest, BadFirstline) { initialize(); std::string response; sendRawHttpAndWaitForResponse(lookupPort("http"), "hello", &response); - EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", response); + EXPECT_THAT(response, HasSubstr("HTTP/1.1 400 Bad Request\r\n")); } TEST_P(IntegrationTest, MissingDelimiter) { @@ -394,7 +442,7 @@ TEST_P(IntegrationTest, MissingDelimiter) { std::string response; sendRawHttpAndWaitForResponse(lookupPort("http"), "GET / HTTP/1.1\r\nHost: host\r\nfoo bar\r\n\r\n", &response); - EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", response); + EXPECT_THAT(response, HasSubstr("HTTP/1.1 400 Bad Request\r\n")); EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("http1.codec_error")); } @@ -403,7 +451,7 @@ TEST_P(IntegrationTest, InvalidCharacterInFirstline) { std::string response; sendRawHttpAndWaitForResponse(lookupPort("http"), "GE(T / HTTP/1.1\r\nHost: host\r\n\r\n", &response); - EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", response); + EXPECT_THAT(response, HasSubstr("HTTP/1.1 400 Bad Request\r\n")); } TEST_P(IntegrationTest, InvalidVersion) { @@ -411,7 +459,7 @@ TEST_P(IntegrationTest, InvalidVersion) { std::string response; sendRawHttpAndWaitForResponse(lookupPort("http"), "GET / HTTP/1.01\r\nHost: host\r\n\r\n", &response); - EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", response); + EXPECT_THAT(response, HasSubstr("HTTP/1.1 400 Bad Request\r\n")); } // Expect that malformed trailers to break the connection @@ -428,7 +476,7 @@ TEST_P(IntegrationTest, BadTrailer) { "badtrailer\r\n\r\n", &response); - EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", response); + EXPECT_THAT(response, HasSubstr("HTTP/1.1 400 Bad Request\r\n")); } // Expect malformed headers to break the connection @@ -445,7 +493,7 @@ TEST_P(IntegrationTest, BadHeader) { "body\r\n0\r\n\r\n", &response); - EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", response); + EXPECT_THAT(response, HasSubstr("HTTP/1.1 400 Bad Request\r\n")); } TEST_P(IntegrationTest, Http10Disabled) { @@ -483,6 +531,21 @@ TEST_P(IntegrationTest, Http09Enabled) { EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("HTTP/1.0")); } +TEST_P(IntegrationTest, Http09WithKeepalive) { + useAccessLog(); + autonomous_upstream_ = true; + config_helper_.addConfigModifier(&setAllowHttp10WithDefaultHost); + initialize(); + reinterpret_cast(fake_upstreams_.front().get()) + ->setResponseHeaders(std::make_unique( + Http::TestResponseHeaderMapImpl({{":status", "200"}, {"content-length", "0"}}))); + std::string response; + sendRawHttpAndWaitForResponse(lookupPort("http"), "GET /\r\nConnection: keep-alive\r\n\r\n", + &response, true); + EXPECT_THAT(response, HasSubstr("HTTP/1.0 200 OK\r\n")); + EXPECT_THAT(response, HasSubstr("connection: keep-alive\r\n")); +} + // Turn HTTP/1.0 support on and verify the request is proxied and the default host is sent upstream. TEST_P(IntegrationTest, Http10Enabled) { autonomous_upstream_ = true; @@ -514,8 +577,8 @@ TEST_P(IntegrationTest, TestInlineHeaders) { "GET / HTTP/1.1\r\n" "Host: foo.com\r\n" "Foo: bar\r\n" - "Cache-control: public\r\n" - "Cache-control: 123\r\n" + "User-Agent: public\r\n" + "User-Agent: 123\r\n" "Eep: baz\r\n\r\n", &response, true); EXPECT_THAT(response, HasSubstr("HTTP/1.1 200 OK\r\n")); @@ -524,7 +587,7 @@ TEST_P(IntegrationTest, TestInlineHeaders) { reinterpret_cast(fake_upstreams_.front().get())->lastRequestHeaders(); ASSERT_TRUE(upstream_headers != nullptr); EXPECT_EQ(upstream_headers->Host()->value(), "foo.com"); - EXPECT_EQ(upstream_headers->CacheControl()->value(), "public,123"); + EXPECT_EQ(upstream_headers->get_("User-Agent"), "public,123"); ASSERT_TRUE(upstream_headers->get(Envoy::Http::LowerCaseString("foo")) != nullptr); EXPECT_EQ("bar", upstream_headers->get(Envoy::Http::LowerCaseString("foo"))->value().getStringView()); @@ -563,7 +626,7 @@ TEST_P(IntegrationTest, Http10WithHostandKeepAliveAndContentLengthAndLws) { initialize(); reinterpret_cast(fake_upstreams_.front().get()) ->setResponseHeaders(std::make_unique( - Http::TestHeaderMapImpl({{":status", "200"}, {"content-length", "10"}}))); + Http::TestResponseHeaderMapImpl({{":status", "200"}, {"content-length", "10"}}))); std::string response; sendRawHttpAndWaitForResponse(lookupPort("http"), "GET / HTTP/1.0\r\nHost: foo.com \r\nConnection:Keep-alive\r\n\r\n", @@ -580,25 +643,23 @@ TEST_P(IntegrationTest, Pipeline) { initialize(); std::string response; - Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\nHost: host\r\n\r\nGET / HTTP/1.1\r\n\r\n"); - RawConnectionDriver connection( - lookupPort("http"), buffer, + auto connection = createConnectionDriver( + lookupPort("http"), "GET / HTTP/1.1\r\nHost: host\r\n\r\nGET / HTTP/1.1\r\n\r\n", [&](Network::ClientConnection&, const Buffer::Instance& data) -> void { response.append(data.toString()); - }, - version_); + }); // First response should be success. while (response.find("200") == std::string::npos) { - connection.run(Event::Dispatcher::RunType::NonBlock); + connection->run(Event::Dispatcher::RunType::NonBlock); } EXPECT_THAT(response, HasSubstr("HTTP/1.1 200 OK\r\n")); // Second response should be 400 (no host) while (response.find("400") == std::string::npos) { - connection.run(Event::Dispatcher::RunType::NonBlock); + connection->run(Event::Dispatcher::RunType::NonBlock); } EXPECT_THAT(response, HasSubstr("HTTP/1.1 400 Bad Request\r\n")); - connection.close(); + connection->close(); } // Checks to ensure that we reject the third request that is pipelined in the @@ -629,58 +690,57 @@ TEST_P(IntegrationTest, PipelineWithTrailers) { "trailer2:t3\r\n" "\r\n"); - Buffer::OwnedImpl buffer(absl::StrCat(good_request, good_request, bad_request)); - - RawConnectionDriver connection( - lookupPort("http"), buffer, - [&](Network::ClientConnection&, const Buffer::Instance& data) -> void { + auto connection = createConnectionDriver( + lookupPort("http"), absl::StrCat(good_request, good_request, bad_request), + [&response](Network::ClientConnection&, const Buffer::Instance& data) -> void { response.append(data.toString()); - }, - version_); + }); // First response should be success. size_t pos; while ((pos = response.find("200")) == std::string::npos) { - connection.run(Event::Dispatcher::RunType::NonBlock); + connection->run(Event::Dispatcher::RunType::NonBlock); } EXPECT_THAT(response, HasSubstr("HTTP/1.1 200 OK\r\n")); while (response.find("200", pos + 1) == std::string::npos) { - connection.run(Event::Dispatcher::RunType::NonBlock); + connection->run(Event::Dispatcher::RunType::NonBlock); } while (response.find("400") == std::string::npos) { - connection.run(Event::Dispatcher::RunType::NonBlock); + connection->run(Event::Dispatcher::RunType::NonBlock); } EXPECT_THAT(response, HasSubstr("HTTP/1.1 400 Bad Request\r\n")); - connection.close(); + connection->close(); } // Add a pipeline test where complete request headers in the first request merit // an inline sendLocalReply to make sure the "kick" works under the call stack // of dispatch as well as when a response is proxied from upstream. TEST_P(IntegrationTest, PipelineInline) { + // When deprecating this flag, set hcm.mutable_stream_error_on_invalid_http_message true. + config_helper_.addRuntimeOverride("envoy.reloadable_features.hcm_stream_error_on_invalid_message", + "false"); + autonomous_upstream_ = true; initialize(); std::string response; - Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\nGET / HTTP/1.0\r\n\r\n"); - RawConnectionDriver connection( - lookupPort("http"), buffer, - [&](Network::ClientConnection&, const Buffer::Instance& data) -> void { + auto connection = createConnectionDriver( + lookupPort("http"), "GET / HTTP/1.1\r\n\r\nGET / HTTP/1.0\r\n\r\n", + [&response](Network::ClientConnection&, const Buffer::Instance& data) -> void { response.append(data.toString()); - }, - version_); + }); while (response.find("400") == std::string::npos) { - connection.run(Event::Dispatcher::RunType::NonBlock); + connection->run(Event::Dispatcher::RunType::NonBlock); } EXPECT_THAT(response, HasSubstr("HTTP/1.1 400 Bad Request\r\n")); while (response.find("426") == std::string::npos) { - connection.run(Event::Dispatcher::RunType::NonBlock); + connection->run(Event::Dispatcher::RunType::NonBlock); } EXPECT_THAT(response, HasSubstr("HTTP/1.1 426 Upgrade Required\r\n")); - connection.close(); + connection->close(); } TEST_P(IntegrationTest, NoHost) { @@ -693,7 +753,7 @@ TEST_P(IntegrationTest, NoHost) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("400", response->headers().Status()->value().getStringView()); + EXPECT_EQ("400", response->headers().getStatusValue()); } TEST_P(IntegrationTest, BadPath) { @@ -753,7 +813,7 @@ TEST_P(IntegrationTest, AbsolutePathWithoutPort) { // Ensure that connect behaves the same with allow_absolute_url enabled and without TEST_P(IntegrationTest, Connect) { - const std::string& request = "CONNECT www.somewhere.com:80 HTTP/1.1\r\nHost: host\r\n\r\n"; + const std::string& request = "CONNECT www.somewhere.com:80 HTTP/1.1\r\n\r\n"; config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { // Clone the whole listener. auto static_resources = bootstrap.mutable_static_resources(); @@ -791,10 +851,10 @@ TEST_P(IntegrationTest, UpstreamProtocolError) { ASSERT_TRUE(fake_upstream_connection->waitForData(187, &data)); ASSERT_TRUE(fake_upstream_connection->write("bad protocol data!")); ASSERT_TRUE(fake_upstream_connection->waitForDisconnect()); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); } TEST_P(IntegrationTest, TestHead) { @@ -802,10 +862,10 @@ TEST_P(IntegrationTest, TestHead) { codec_client_ = makeHttpConnection(lookupPort("http")); - Http::TestHeaderMapImpl head_request{{":method", "HEAD"}, - {":path", "/test/long/url"}, - {":scheme", "http"}, - {":authority", "host"}}; + Http::TestRequestHeaderMapImpl head_request{{":method", "HEAD"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}}; // Without an explicit content length, assume we chunk for HTTP/1.1 auto response = sendRequestAndWaitForResponse(head_request, 0, default_response_headers_, 0); @@ -818,15 +878,14 @@ TEST_P(IntegrationTest, TestHead) { EXPECT_EQ(0, response->body().size()); // Preserve explicit content length. - Http::TestHeaderMapImpl content_length_response{{":status", "200"}, {"content-length", "12"}}; + Http::TestResponseHeaderMapImpl content_length_response{{":status", "200"}, + {"content-length", "12"}}; response = sendRequestAndWaitForResponse(head_request, 0, content_length_response, 0); ASSERT_TRUE(response->complete()); EXPECT_THAT(response->headers(), HttpStatusIs("200")); EXPECT_THAT(response->headers(), HeaderValueOf(Headers::get().ContentLength, "12")); EXPECT_EQ(response->headers().TransferEncoding(), nullptr); EXPECT_EQ(0, response->body().size()); - - cleanupUpstreamAndDownstream(); } // The Envoy HTTP/1.1 codec ASSERTs that T-E headers are cleared in @@ -836,7 +895,7 @@ TEST_P(IntegrationTest, TestHeadWithExplicitTE) { initialize(); auto tcp_client = makeTcpConnection(lookupPort("http")); - tcp_client->write("HEAD / HTTP/1.1\r\nHost: host\r\n\r\n"); + ASSERT_TRUE(tcp_client->write("HEAD / HTTP/1.1\r\nHost: host\r\n\r\n")); FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); std::string data; @@ -935,7 +994,7 @@ TEST_P(IntegrationTest, ViaAppendHeaderOnly) { EXPECT_THAT(upstream_request_->headers(), HeaderValueOf(Headers::get().Via, "foo, bar")); upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); response->waitForEndStream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); EXPECT_TRUE(response->complete()); EXPECT_THAT(response->headers(), HttpStatusIs("200")); EXPECT_THAT(response->headers(), HeaderValueOf(Headers::get().Via, "bar")); @@ -952,6 +1011,9 @@ TEST_P(IntegrationTest, ViaAppendWith100Continue) { // sent by Envoy, it will wait for response acknowledgment (via FIN/RST) from the client before // closing the socket (with a timeout for ensuring cleanup). TEST_P(IntegrationTest, TestDelayedConnectionTeardownOnGracefulClose) { + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.mutable_delayed_close_timeout()->set_seconds(1); }); // This test will trigger an early 413 Payload Too Large response due to buffer limits being // exceeded. The following filter is needed since the router filter will never trigger a 413. config_helper_.addFilter("{ name: encoder-decoder-buffer-filter, typed_config: { \"@type\": " @@ -975,7 +1037,7 @@ TEST_P(IntegrationTest, TestDelayedConnectionTeardownOnGracefulClose) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("413", response->headers().Status()->value().getStringView()); + EXPECT_EQ("413", response->headers().getStatusValue()); // With no delayed close processing, Envoy will close the connection immediately after flushing // and this should instead return true. EXPECT_FALSE(codec_client_->waitForDisconnect(std::chrono::milliseconds(500))); @@ -983,7 +1045,7 @@ TEST_P(IntegrationTest, TestDelayedConnectionTeardownOnGracefulClose) { // Issue a local close and check that the client did not pick up a remote close which can happen // when delayed close semantics are disabled. codec_client_->connection()->close(Network::ConnectionCloseType::NoFlush); - EXPECT_EQ(codec_client_->last_connection_event(), Network::ConnectionEvent::LocalClose); + EXPECT_EQ(codec_client_->lastConnectionEvent(), Network::ConnectionEvent::LocalClose); } // Test configuration of the delayed close timeout on downstream HTTP/1.1 connections. A value of 0 @@ -1020,7 +1082,7 @@ TEST_P(IntegrationTest, TestDelayedConnectionTeardownConfig) { // Therefore, avoid checking response code/payload here and instead simply look for the remote // close. EXPECT_TRUE(codec_client_->waitForDisconnect(std::chrono::milliseconds(500))); - EXPECT_EQ(codec_client_->last_connection_event(), Network::ConnectionEvent::RemoteClose); + EXPECT_EQ(codec_client_->lastConnectionEvent(), Network::ConnectionEvent::RemoteClose); } // Test that delay closed connections are eventually force closed when the timeout triggers. @@ -1054,7 +1116,7 @@ TEST_P(IntegrationTest, TestDelayedConnectionTeardownTimeoutTrigger) { response->waitForEndStream(); // The delayed close timeout should trigger since client is not closing the connection. EXPECT_TRUE(codec_client_->waitForDisconnect(std::chrono::milliseconds(2000))); - EXPECT_EQ(codec_client_->last_connection_event(), Network::ConnectionEvent::RemoteClose); + EXPECT_EQ(codec_client_->lastConnectionEvent(), Network::ConnectionEvent::RemoteClose); EXPECT_EQ(test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value(), 1); } @@ -1092,7 +1154,7 @@ TEST_P(IntegrationTest, NoConnectionPoolsFree) { response->waitForEndStream(); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_503", 1); EXPECT_EQ(test_server_->counter("cluster.cluster_0.upstream_cx_pool_overflow")->value(), 1); @@ -1113,7 +1175,7 @@ TEST_P(IntegrationTest, ProcessObjectHealthy) { {":authority", "host"}, {"connection", "close"}}); response->waitForEndStream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); EXPECT_TRUE(response->complete()); EXPECT_THAT(response->headers(), HttpStatusIs("200")); @@ -1134,7 +1196,7 @@ TEST_P(IntegrationTest, ProcessObjectUnealthy) { {":authority", "host"}, {"connection", "close"}}); response->waitForEndStream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); EXPECT_TRUE(response->complete()); EXPECT_THAT(response->headers(), HttpStatusIs("500")); @@ -1165,6 +1227,11 @@ TEST_P(UpstreamEndpointIntegrationTest, TestUpstreamEndpointAddress) { // Send continuous pipelined requests while not reading responses, to check // HTTP/1.1 response flood protection. TEST_P(IntegrationTest, TestFlood) { + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + hcm.mutable_stream_error_on_invalid_http_message()->set_value(true); + }); initialize(); // Set up a raw connection to easily send requests without reading responses. @@ -1195,12 +1262,15 @@ TEST_P(IntegrationTest, TestFlood) { } TEST_P(IntegrationTest, TestFloodUpstreamErrors) { + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.mutable_delayed_close_timeout()->set_seconds(1); }); autonomous_upstream_ = true; initialize(); // Set an Upstream reply with an invalid content-length, which will be rejected by the Envoy. auto response_headers = std::make_unique( - Http::TestHeaderMapImpl({{":status", "200"}, {"content-length", "invalid"}})); + Http::TestResponseHeaderMapImpl({{":status", "200"}, {"content-length", "invalid"}})); reinterpret_cast(fake_upstreams_.front().get()) ->setResponseHeaders(std::move(response_headers)); @@ -1239,6 +1309,11 @@ TEST_P(IntegrationTest, TestFloodUpstreamErrors) { // Make sure flood protection doesn't kick in with many requests sent serially. TEST_P(IntegrationTest, TestManyBadRequests) { + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + hcm.mutable_stream_error_on_invalid_http_message()->set_value(true); + }); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -1277,20 +1352,18 @@ TEST_P(IntegrationTest, TestUpgradeHeaderInResponse) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); EXPECT_EQ("Hello World", response->body()); - cleanupUpstreamAndDownstream(); } TEST_P(IntegrationTest, ConnectWithNoBody) { config_helper_.addConfigModifier( [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) -> void { - hcm.add_upgrade_configs()->set_upgrade_type("CONNECT"); - hcm.mutable_http2_protocol_options()->set_allow_connect(true); - }); + hcm) -> void { ConfigHelper::setConnectConfig(hcm, false); }); initialize(); + // Send the payload early so we can regression test that body data does not + // get proxied until after the response headers are sent. IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("http")); - tcp_client->write("CONNECT host.com:80 HTTP/1.1\r\nHost: host\r\n\r\n", false); + ASSERT_TRUE(tcp_client->write("CONNECT host.com:80 HTTP/1.1\r\n\r\npayload", false)); FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); @@ -1298,20 +1371,22 @@ TEST_P(IntegrationTest, ConnectWithNoBody) { ASSERT_TRUE(fake_upstream_connection->waitForData( FakeRawConnection::waitForInexactMatch("\r\n\r\n"), &data)); EXPECT_TRUE(absl::StartsWith(data, "CONNECT host.com:80 HTTP/1.1")); + // The payload should not be present as the response headers have not been sent. + EXPECT_FALSE(absl::StrContains(data, "payload")) << data; // No transfer-encoding: chunked or connection: close EXPECT_FALSE(absl::StrContains(data, "hunked")) << data; EXPECT_FALSE(absl::StrContains(data, "onnection")) << data; - ASSERT_TRUE(fake_upstream_connection->write("HTTP/1.1 200 OK\r\nContent-length: 0\r\n\r\n")); + ASSERT_TRUE(fake_upstream_connection->write("HTTP/1.1 200 OK\r\n\r\n")); tcp_client->waitForData("\r\n\r\n", false); EXPECT_TRUE(absl::StartsWith(tcp_client->data(), "HTTP/1.1 200 OK\r\n")) << tcp_client->data(); // Make sure the following payload is proxied without chunks or any other modifications. - tcp_client->write("payload"); ASSERT_TRUE(fake_upstream_connection->waitForData( FakeRawConnection::waitForInexactMatch("\r\n\r\npayload"), &data)); ASSERT_TRUE(fake_upstream_connection->write("return-payload")); tcp_client->waitForData("\r\n\r\nreturn-payload", false); + EXPECT_FALSE(absl::StrContains(tcp_client->data(), "hunked")); tcp_client->close(); ASSERT_TRUE(fake_upstream_connection->waitForDisconnect()); @@ -1320,14 +1395,11 @@ TEST_P(IntegrationTest, ConnectWithNoBody) { TEST_P(IntegrationTest, ConnectWithChunkedBody) { config_helper_.addConfigModifier( [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) -> void { - hcm.add_upgrade_configs()->set_upgrade_type("CONNECT"); - hcm.mutable_http2_protocol_options()->set_allow_connect(true); - }); + hcm) -> void { ConfigHelper::setConnectConfig(hcm, false); }); initialize(); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("http")); - tcp_client->write("CONNECT host.com:80 HTTP/1.1\r\nHost: host\r\n\r\n", false); + ASSERT_TRUE(tcp_client->write("CONNECT host.com:80 HTTP/1.1\r\n\r\npayload", false)); FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); @@ -1337,25 +1409,42 @@ TEST_P(IntegrationTest, ConnectWithChunkedBody) { // No transfer-encoding: chunked or connection: close EXPECT_FALSE(absl::StrContains(data, "hunked")) << data; EXPECT_FALSE(absl::StrContains(data, "onnection")) << data; - ASSERT_TRUE(fake_upstream_connection->write( "HTTP/1.1 200 OK\r\ntransfer-encoding: chunked\r\n\r\nb\r\nHello World\r\n0\r\n\r\n")); - tcp_client->waitForData("0\r\n\r\n", false); - EXPECT_TRUE(absl::StartsWith(tcp_client->data(), "HTTP/1.1 200 OK\r\n")); - EXPECT_TRUE(absl::StrContains(tcp_client->data(), "hunked")) << tcp_client->data(); - EXPECT_TRUE(absl::StrContains(tcp_client->data(), "\r\n\r\nb\r\nHello World\r\n0\r\n\r\n")) - << tcp_client->data(); + // The response will be rejected because chunked headers are not allowed with CONNECT upgrades. + // Envoy will send a local reply due to the invalid upstream response. + tcp_client->waitForDisconnect(false); + EXPECT_TRUE(absl::StartsWith(tcp_client->data(), "HTTP/1.1 503 Service Unavailable\r\n")); + ASSERT_TRUE(fake_upstream_connection->waitForDisconnect()); +} - // Make sure the following payload is proxied without chunks or any other modifications. - tcp_client->write("payload"); - ASSERT_TRUE(fake_upstream_connection->waitForData( - FakeRawConnection::waitForInexactMatch("\r\n\r\npayload"))); +// Verifies that a 204 response returns without a body +TEST_P(IntegrationTest, Response204WithBody) { + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); - ASSERT_TRUE(fake_upstream_connection->write("return-payload")); - tcp_client->waitForData("\r\n\r\nreturn-payload", false); + Http::TestRequestHeaderMapImpl request_headers{ + {":method", "GET"}, {":path", "/test/long/url"}, {":scheme", "http"}, {":authority", "host"}}; - tcp_client->close(); - ASSERT_TRUE(fake_upstream_connection->waitForDisconnect()); + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + waitForNextUpstreamRequest(); + // Create a response with a body. This will cause an upstream messaging error but downstream + // should still see a response. + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "204"}}, false); + upstream_request_->encodeData(512, true); + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect(true)); + + response->waitForEndStream(); + + EXPECT_TRUE(response->complete()); + EXPECT_THAT(response->headers(), HttpStatusIs("204")); + // The body should be removed + EXPECT_EQ(0, response->body().size()); +} + +TEST_P(IntegrationTest, QuitQuitQuit) { + initialize(); + test_server_->useAdminInterfaceToQuit(true); } } // namespace Envoy diff --git a/test/integration/listener_lds_integration_test.cc b/test/integration/listener_lds_integration_test.cc index bba29eaa20fb0..534b9a7fb3262 100644 --- a/test/integration/listener_lds_integration_test.cc +++ b/test/integration/listener_lds_integration_test.cc @@ -32,10 +32,7 @@ class ListenerIntegrationTest : public HttpIntegrationTest, ListenerIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ipVersion(), realTime()) {} - ~ListenerIntegrationTest() override { - resetConnections(); - cleanupUpstreamAndDownstream(); - } + ~ListenerIntegrationTest() override { resetConnections(); } void initialize() override { // We want to use the GRPC based LDS. @@ -249,16 +246,15 @@ TEST_P(ListenerIntegrationTest, BasicSuccess) { codec_client_ = makeHttpConnection(lookupPort(listener_name_)); int response_size = 800; int request_size = 10; - Http::TestHeaderMapImpl response_headers{{":status", "200"}, - {"server_id", "cluster_0, backend_0"}}; + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}, + {"server_id", "cluster_0, backend_0"}}; auto response = sendRequestAndWaitForResponse( - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {":method", "GET"}, {":path", "/"}, {":authority", "host"}, {":scheme", "http"}}, request_size, response_headers, response_size, /*cluster_0*/ 0); verifyResponse(std::move(response), "200", response_headers, std::string(response_size, 'a')); EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(request_size, upstream_request_->bodyLength()); - cleanupUpstreamAndDownstream(); } } // namespace diff --git a/test/integration/load_stats_integration_test.cc b/test/integration/load_stats_integration_test.cc index d917259e4a9a9..ef6402e71403b 100644 --- a/test/integration/load_stats_integration_test.cc +++ b/test/integration/load_stats_integration_test.cc @@ -17,10 +17,10 @@ namespace Envoy { namespace { -class LoadStatsIntegrationTest : public testing::TestWithParam, +class LoadStatsIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, public HttpIntegrationTest { public: - LoadStatsIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) { + LoadStatsIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ipVersion()) { // We rely on some fairly specific load balancing picks in this test, so // determinize the schedule. setDeterministic(); @@ -112,6 +112,7 @@ class LoadStatsIntegrationTest : public testing::TestWithParammutable_load_stats_config(); loadstats_config->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC); loadstats_config->add_grpc_services()->mutable_envoy_grpc()->set_cluster_name("load_report"); + loadstats_config->set_transport_api_version(apiVersion()); auto* load_report_cluster = bootstrap.mutable_static_resources()->add_clusters(); load_report_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); load_report_cluster->mutable_circuit_breakers()->Clear(); @@ -158,11 +159,20 @@ class LoadStatsIntegrationTest : public testing::TestWithParambegin(); + it != local_loadstats_request.mutable_cluster_stats()->end(); ++it) { + if (it->cluster_name() == "load_report") { + local_loadstats_request.mutable_cluster_stats()->erase(it); + break; + } + } + + ASSERT_LE(loadstats_request.cluster_stats_size(), 1) << loadstats_request.DebugString(); + ASSERT_LE(local_loadstats_request.cluster_stats_size(), 1) + << local_loadstats_request.DebugString(); if (local_loadstats_request.cluster_stats_size() == 0) { return; @@ -228,10 +238,11 @@ class LoadStatsIntegrationTest : public testing::TestWithParam& expected_locality_stats, uint64_t dropped = 0) { + auto end_time = timeSystem().monotonicTime() + TestUtility::DefaultTimeout; Protobuf::RepeatedPtrField expected_cluster_stats; if (!expected_locality_stats.empty() || dropped != 0) { auto* cluster_stats = expected_cluster_stats.Add(); @@ -254,6 +265,11 @@ class LoadStatsIntegrationTest : public testing::TestWithParamwaitForGrpcMessage(*dispatcher_, local_loadstats_request); RELEASE_ASSERT(result, result.message()); + // Check that "envoy.lrs.supports_send_all_clusters" client feature is set. + if (local_loadstats_request.has_node()) { + EXPECT_THAT(local_loadstats_request.node().client_features(), + ::testing::ElementsAre("envoy.lrs.supports_send_all_clusters")); + } // Sanity check and clear the measured load report interval. for (auto& cluster_stats : *local_loadstats_request.mutable_cluster_stats()) { const uint32_t actual_load_report_interval_ms = @@ -267,13 +283,19 @@ class LoadStatsIntegrationTest : public testing::TestWithParamheaders().Method()->value().getStringView()); - EXPECT_EQ("/envoy.service.load_stats.v2.LoadReportingService/StreamLoadStats", - loadstats_stream_->headers().Path()->value().getStringView()); - EXPECT_EQ("application/grpc", - loadstats_stream_->headers().ContentType()->value().getStringView()); + EXPECT_EQ("POST", loadstats_stream_->headers().getMethodValue()); + EXPECT_EQ( + TestUtility::getVersionedMethodPath("envoy.service.load_stats.{}.LoadReportingService", + "StreamLoadStats", apiVersion()), + loadstats_stream_->headers().getPathValue()); + EXPECT_EQ("application/grpc", loadstats_stream_->headers().getContentTypeValue()); + if (timeSystem().monotonicTime() >= end_time) { + return TestUtility::assertRepeatedPtrFieldEqual(expected_cluster_stats, + loadstats_request.cluster_stats(), true); + } } while (!TestUtility::assertRepeatedPtrFieldEqual(expected_cluster_stats, loadstats_request.cluster_stats(), true)); + return testing::AssertionSuccess(); } void waitForUpstreamResponse(uint32_t endpoint_index, uint32_t response_code = 200) { @@ -294,18 +316,21 @@ class LoadStatsIntegrationTest : public testing::TestWithParambodyLength()); ASSERT_TRUE(response_->complete()); - EXPECT_EQ(std::to_string(response_code), - response_->headers().Status()->value().getStringView()); + EXPECT_EQ(std::to_string(response_code), response_->headers().getStatusValue()); EXPECT_EQ(response_size_, response_->body().size()); } - void requestLoadStatsResponse(const std::vector& clusters) { + void requestLoadStatsResponse(const std::vector& clusters, + bool send_all_clusters = false) { envoy::service::load_stats::v3::LoadStatsResponse loadstats_response; loadstats_response.mutable_load_reporting_interval()->MergeFrom( Protobuf::util::TimeUtil::MillisecondsToDuration(load_report_interval_ms_)); for (const auto& cluster : clusters) { loadstats_response.add_clusters(cluster); } + if (send_all_clusters) { + loadstats_response.set_send_all_clusters(true); + } loadstats_stream_->sendGrpcMessage(loadstats_response); // Wait until the request has been received by Envoy. test_server_->waitForCounterGe("load_reporter.requests", ++load_requests_); @@ -360,9 +385,8 @@ class LoadStatsIntegrationTest : public testing::TestWithParamstartGrpcStream(); // Simple 50%/50% split between dragon/winter localities. Also include an @@ -384,8 +408,8 @@ TEST_P(LoadStatsIntegrationTest, Success) { } // Verify we do not get empty stats for non-zero priorities. - waitForLoadStatsRequest( - {localityStats("winter", 2, 0, 0, 2), localityStats("dragon", 2, 0, 0, 2)}); + ASSERT_TRUE(waitForLoadStatsRequest( + {localityStats("winter", 2, 0, 0, 2), localityStats("dragon", 2, 0, 0, 2)})); EXPECT_EQ(1, test_server_->counter("load_reporter.requests")->value()); // On slow machines, more than one load stats response may be pushed while we are simulating load. @@ -394,7 +418,8 @@ TEST_P(LoadStatsIntegrationTest, Success) { // 33%/67% split between dragon/winter primary localities. updateClusterLoadAssignment({{0}}, {{1, 2}}, {}, {{4}}); - requestLoadStatsResponse({"cluster_0"}); + // Verify that send_all_clusters works. + requestLoadStatsResponse({}, true); for (uint32_t i = 0; i < 6; ++i) { sendAndReceiveUpstream((4 + i) % 3); @@ -402,8 +427,8 @@ TEST_P(LoadStatsIntegrationTest, Success) { // No locality for priority=1 since there's no "winter" endpoints. // The hosts for dragon were received because membership_total is accurate. - waitForLoadStatsRequest( - {localityStats("winter", 2, 0, 0, 2), localityStats("dragon", 4, 0, 0, 4)}); + ASSERT_TRUE(waitForLoadStatsRequest( + {localityStats("winter", 2, 0, 0, 2), localityStats("dragon", 4, 0, 0, 4)})); EXPECT_EQ(2, test_server_->counter("load_reporter.requests")->value()); EXPECT_LE(3, test_server_->counter("load_reporter.responses")->value()); @@ -418,8 +443,8 @@ TEST_P(LoadStatsIntegrationTest, Success) { sendAndReceiveUpstream(i % 2 + 3); } - waitForLoadStatsRequest( - {localityStats("winter", 2, 0, 0, 2, 1), localityStats("dragon", 2, 0, 0, 2, 1)}); + ASSERT_TRUE(waitForLoadStatsRequest( + {localityStats("winter", 2, 0, 0, 2, 1), localityStats("dragon", 2, 0, 0, 2, 1)})); EXPECT_EQ(3, test_server_->counter("load_reporter.requests")->value()); EXPECT_LE(4, test_server_->counter("load_reporter.responses")->value()); EXPECT_EQ(0, test_server_->counter("load_reporter.errors")->value()); @@ -433,7 +458,7 @@ TEST_P(LoadStatsIntegrationTest, Success) { sendAndReceiveUpstream(1); } - waitForLoadStatsRequest({localityStats("winter", 1, 0, 0, 1)}); + ASSERT_TRUE(waitForLoadStatsRequest({localityStats("winter", 1, 0, 0, 1)})); EXPECT_EQ(4, test_server_->counter("load_reporter.requests")->value()); EXPECT_LE(5, test_server_->counter("load_reporter.responses")->value()); EXPECT_EQ(0, test_server_->counter("load_reporter.errors")->value()); @@ -446,7 +471,7 @@ TEST_P(LoadStatsIntegrationTest, Success) { sendAndReceiveUpstream(1); sendAndReceiveUpstream(1); - waitForLoadStatsRequest({localityStats("winter", 3, 0, 0, 3)}); + ASSERT_TRUE(waitForLoadStatsRequest({localityStats("winter", 3, 0, 0, 3)})); EXPECT_EQ(6, test_server_->counter("load_reporter.requests")->value()); EXPECT_LE(6, test_server_->counter("load_reporter.responses")->value()); @@ -460,7 +485,7 @@ TEST_P(LoadStatsIntegrationTest, Success) { sendAndReceiveUpstream(1); sendAndReceiveUpstream(1); - waitForLoadStatsRequest({localityStats("winter", 2, 0, 0, 2)}); + ASSERT_TRUE(waitForLoadStatsRequest({localityStats("winter", 2, 0, 0, 2)})); EXPECT_EQ(8, test_server_->counter("load_reporter.requests")->value()); EXPECT_LE(7, test_server_->counter("load_reporter.responses")->value()); @@ -477,7 +502,7 @@ TEST_P(LoadStatsIntegrationTest, LocalityWeighted) { initialize(); waitForLoadStatsStream(); - waitForLoadStatsRequest({}); + ASSERT_TRUE(waitForLoadStatsRequest({})); loadstats_stream_->startGrpcStream(); requestLoadStatsResponse({"cluster_0"}); @@ -495,8 +520,8 @@ TEST_P(LoadStatsIntegrationTest, LocalityWeighted) { sendAndReceiveUpstream(0); // Verify we get the expect request distribution. - waitForLoadStatsRequest( - {localityStats("winter", 4, 0, 0, 4), localityStats("dragon", 2, 0, 0, 2)}); + ASSERT_TRUE(waitForLoadStatsRequest( + {localityStats("winter", 4, 0, 0, 4), localityStats("dragon", 2, 0, 0, 2)})); EXPECT_EQ(1, test_server_->counter("load_reporter.requests")->value()); // On slow machines, more than one load stats response may be pushed while we are simulating load. @@ -512,7 +537,7 @@ TEST_P(LoadStatsIntegrationTest, NoLocalLocality) { initialize(); waitForLoadStatsStream(); - waitForLoadStatsRequest({}); + ASSERT_TRUE(waitForLoadStatsRequest({})); loadstats_stream_->startGrpcStream(); // Simple 50%/50% split between dragon/winter localities. Also include an @@ -529,8 +554,8 @@ TEST_P(LoadStatsIntegrationTest, NoLocalLocality) { // order of locality stats is different to the Success case, where winter is // the local locality (and hence first in the list as per // HostsPerLocality::get()). - waitForLoadStatsRequest( - {localityStats("dragon", 2, 0, 0, 2), localityStats("winter", 2, 0, 0, 2)}); + ASSERT_TRUE(waitForLoadStatsRequest( + {localityStats("dragon", 2, 0, 0, 2), localityStats("winter", 2, 0, 0, 2)})); EXPECT_EQ(1, test_server_->counter("load_reporter.requests")->value()); // On slow machines, more than one load stats response may be pushed while we are simulating load. @@ -545,7 +570,7 @@ TEST_P(LoadStatsIntegrationTest, Error) { initialize(); waitForLoadStatsStream(); - waitForLoadStatsRequest({}); + ASSERT_TRUE(waitForLoadStatsRequest({})); loadstats_stream_->startGrpcStream(); requestLoadStatsResponse({"cluster_0"}); @@ -557,7 +582,7 @@ TEST_P(LoadStatsIntegrationTest, Error) { // This should count as "success" since non-5xx. sendAndReceiveUpstream(0, 404); - waitForLoadStatsRequest({localityStats("winter", 1, 1, 0, 2)}); + ASSERT_TRUE(waitForLoadStatsRequest({localityStats("winter", 1, 1, 0, 2)})); EXPECT_EQ(1, test_server_->counter("load_reporter.requests")->value()); EXPECT_LE(2, test_server_->counter("load_reporter.responses")->value()); @@ -571,13 +596,13 @@ TEST_P(LoadStatsIntegrationTest, InProgress) { initialize(); waitForLoadStatsStream(); - waitForLoadStatsRequest({}); + ASSERT_TRUE(waitForLoadStatsRequest({})); loadstats_stream_->startGrpcStream(); updateClusterLoadAssignment({{0}}, {}, {}, {}); requestLoadStatsResponse({"cluster_0"}); initiateClientConnection(); - waitForLoadStatsRequest({localityStats("winter", 0, 0, 1, 1)}); + ASSERT_TRUE(waitForLoadStatsRequest({localityStats("winter", 0, 0, 1, 1)})); waitForUpstreamResponse(0, 503); cleanupUpstreamAndDownstream(); @@ -599,7 +624,7 @@ TEST_P(LoadStatsIntegrationTest, Dropped) { initialize(); waitForLoadStatsStream(); - waitForLoadStatsRequest({}); + ASSERT_TRUE(waitForLoadStatsRequest({})); loadstats_stream_->startGrpcStream(); updateClusterLoadAssignment({{0}}, {}, {}, {}); @@ -608,10 +633,10 @@ TEST_P(LoadStatsIntegrationTest, Dropped) { initiateClientConnection(); response_->waitForEndStream(); ASSERT_TRUE(response_->complete()); - EXPECT_EQ("503", response_->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response_->headers().getStatusValue()); cleanupUpstreamAndDownstream(); - waitForLoadStatsRequest({}, 1); + ASSERT_TRUE(waitForLoadStatsRequest({}, 1)); EXPECT_EQ(1, test_server_->counter("load_reporter.requests")->value()); EXPECT_LE(2, test_server_->counter("load_reporter.responses")->value()); diff --git a/test/integration/local_reply_integration_test.cc b/test/integration/local_reply_integration_test.cc new file mode 100644 index 0000000000000..dacd7fcad033b --- /dev/null +++ b/test/integration/local_reply_integration_test.cc @@ -0,0 +1,410 @@ +#include "test/integration/http_protocol_integration.h" +#include "test/test_common/utility.h" + +namespace Envoy { + +class LocalReplyIntegrationTest : public HttpProtocolIntegrationTest { +public: + void initialize() override { HttpProtocolIntegrationTest::initialize(); } + + void setLocalReplyConfig(const std::string& yaml) { + envoy::extensions::filters::network::http_connection_manager::v3::LocalReplyConfig + local_reply_config; + TestUtility::loadFromYaml(yaml, local_reply_config); + config_helper_.setLocalReply(local_reply_config); + } +}; + +INSTANTIATE_TEST_SUITE_P(Protocols, LocalReplyIntegrationTest, + testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams()), + HttpProtocolIntegrationTest::protocolTestParamsToString); + +TEST_P(LocalReplyIntegrationTest, MapStatusCodeAndFormatToJson) { + const std::string yaml = R"EOF( +mappers: + - filter: + header_filter: + header: + name: test-header + exact_match: exact-match-value + status_code: 550 + headers_to_add: + - header: + key: foo + value: bar + append: false +body_format: + json_format: + level: TRACE + user_agent: "%REQ(USER-AGENT)%" + response_body: "%LOCAL_REPLY_BODY%" + )EOF"; + setLocalReplyConfig(yaml); + initialize(); + + const std::string expected_body = R"({ + "level": "TRACE", + "user_agent": null, + "response_body": "upstream connect error or disconnect/reset before headers. reset reason: connection termination" +})"; + + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto encoder_decoder = codec_client_->startRequest( + Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"test-header", "exact-match-value"}}); + auto response = std::move(encoder_decoder.second); + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + ASSERT_TRUE(fake_upstream_connection_->close()); + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + response->waitForEndStream(); + + if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { + ASSERT_TRUE(codec_client_->waitForDisconnect()); + } else { + codec_client_->close(); + } + + EXPECT_FALSE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("application/json", response->headers().ContentType()->value().getStringView()); + EXPECT_EQ("150", response->headers().ContentLength()->value().getStringView()); + EXPECT_EQ("550", response->headers().Status()->value().getStringView()); + EXPECT_EQ("bar", response->headers().get(Http::LowerCaseString("foo"))->value().getStringView()); + // Check if returned json is same as expected + EXPECT_TRUE(TestUtility::jsonStringEqual(response->body(), expected_body)); +} + +// For grpc, the error message is in grpc-message header. +// If it is json, the header value is in json format. +TEST_P(LocalReplyIntegrationTest, MapStatusCodeAndFormatToJson4Grpc) { + const std::string yaml = R"EOF( +body_format: + json_format: + code: "%RESPONSE_CODE%" + message: "%LOCAL_REPLY_BODY%" +)EOF"; + setLocalReplyConfig(yaml); + initialize(); + + const std::string expected_grpc_message = R"({ + "code": 503, + "message":"upstream connect error or disconnect/reset before headers. reset reason: connection termination" +})"; + + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto encoder_decoder = codec_client_->startRequest( + Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/package.service/method"}, + {":scheme", "http"}, + {":authority", "host"}, + {"content-type", "application/grpc"}}); + auto response = std::move(encoder_decoder.second); + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + ASSERT_TRUE(fake_upstream_connection_->close()); + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + response->waitForEndStream(); + + if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { + ASSERT_TRUE(codec_client_->waitForDisconnect()); + } else { + codec_client_->close(); + } + + EXPECT_FALSE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("application/grpc", response->headers().ContentType()->value().getStringView()); + EXPECT_EQ("14", response->headers().GrpcStatus()->value().getStringView()); + // Check if grpc-message value is same as expected + EXPECT_TRUE(TestUtility::jsonStringEqual( + std::string(response->headers().GrpcMessage()->value().getStringView()), + expected_grpc_message)); +} + +// Matched second filter has code, headers and body rewrite and its format +TEST_P(LocalReplyIntegrationTest, MapStatusCodeAndFormatToJsonForFirstMatchingFilter) { + const std::string yaml = R"EOF( +mappers: + - filter: + header_filter: + header: + name: test-header + exact_match: exact-match-value-1 + status_code: 550 + - filter: + header_filter: + header: + name: test-header + exact_match: exact-match-value + status_code: 551 + headers_to_add: + - header: + key: foo + value: bar + append: false + body: + inline_string: "customized body text" + body_format_override: + text_format: "%LOCAL_REPLY_BODY% %RESPONSE_CODE%" + - filter: + header_filter: + header: + name: test-header + exact_match: exact-match-value + status_code: 552 +body_format: + json_format: + level: TRACE + response_flags: "%RESPONSE_FLAGS%" + response_body: "%LOCAL_REPLY_BODY%" + )EOF"; + setLocalReplyConfig(yaml); + initialize(); + + const std::string expected_body = "customized body text 551"; + + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto encoder_decoder = codec_client_->startRequest( + Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"test-header", "exact-match-value"}}); + auto response = std::move(encoder_decoder.second); + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + ASSERT_TRUE(fake_upstream_connection_->close()); + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + response->waitForEndStream(); + + if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { + ASSERT_TRUE(codec_client_->waitForDisconnect()); + } else { + codec_client_->close(); + } + + EXPECT_FALSE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("text/plain", response->headers().ContentType()->value().getStringView()); + EXPECT_EQ("24", response->headers().ContentLength()->value().getStringView()); + EXPECT_EQ("551", response->headers().Status()->value().getStringView()); + EXPECT_EQ("bar", response->headers().get(Http::LowerCaseString("foo"))->value().getStringView()); + // Check if returned json is same as expected + EXPECT_EQ(response->body(), expected_body); +} + +// Not matching any filters. +TEST_P(LocalReplyIntegrationTest, ShouldNotMatchAnyFilter) { + const std::string yaml = R"EOF( +mappers: + - filter: + header_filter: + header: + name: test-header + exact_match: exact-match-value-1 + status_code: 550 + - filter: + header_filter: + header: + name: test-header + exact_match: exact-match-value-2 + status_code: 551 + - filter: + header_filter: + header: + name: test-header + exact_match: exact-match-value-3 + status_code: 552 +body_format: + json_format: + level: TRACE + response_flags: "%RESPONSE_FLAGS%" + response_body: "%LOCAL_REPLY_BODY%" + )EOF"; + setLocalReplyConfig(yaml); + initialize(); + + const std::string expected_body = R"({ + "level": "TRACE", + "response_flags": "UC", + "response_body": "upstream connect error or disconnect/reset before headers. reset reason: connection termination" +})"; + + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto encoder_decoder = codec_client_->startRequest( + Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"test-header", "exact-match-value"}}); + auto response = std::move(encoder_decoder.second); + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + ASSERT_TRUE(fake_upstream_connection_->close()); + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + response->waitForEndStream(); + + if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { + ASSERT_TRUE(codec_client_->waitForDisconnect()); + } else { + codec_client_->close(); + } + + EXPECT_FALSE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("application/json", response->headers().ContentType()->value().getStringView()); + EXPECT_EQ("154", response->headers().ContentLength()->value().getStringView()); + EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + // Check if returned json is same as expected + EXPECT_TRUE(TestUtility::jsonStringEqual(response->body(), expected_body)); +} + +// Use default formatter. +TEST_P(LocalReplyIntegrationTest, ShouldMapResponseCodeAndMapToDefaultTextResponse) { + const std::string yaml = R"EOF( +mappers: + - filter: + header_filter: + header: + name: test-header + exact_match: exact-match-value-1 + status_code: 550 + - filter: + header_filter: + header: + name: test-header + exact_match: exact-match-value-2 + status_code: 551 + - filter: + header_filter: + header: + name: test-header + exact_match: exact-match-value-3 + status_code: 552 + )EOF"; + setLocalReplyConfig(yaml); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto encoder_decoder = codec_client_->startRequest( + Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"test-header", "exact-match-value-2"}}); + auto response = std::move(encoder_decoder.second); + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + ASSERT_TRUE(fake_upstream_connection_->close()); + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + response->waitForEndStream(); + + if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { + ASSERT_TRUE(codec_client_->waitForDisconnect()); + } else { + codec_client_->close(); + } + + EXPECT_FALSE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("text/plain", response->headers().ContentType()->value().getStringView()); + EXPECT_EQ("95", response->headers().ContentLength()->value().getStringView()); + + EXPECT_EQ("551", response->headers().Status()->value().getStringView()); + + EXPECT_EQ(response->body(), "upstream connect error or disconnect/reset before headers. reset " + "reason: connection termination"); +} + +// Should return formatted text/plain response. +TEST_P(LocalReplyIntegrationTest, ShouldFormatResponseToCustomString) { + const std::string yaml = R"EOF( +mappers: +- filter: + status_code_filter: + comparison: + op: EQ + value: + default_value: 503 + runtime_key: key_b + status_code: 513 + body: + inline_string: "customized body text" +body_format: + text_format: "%RESPONSE_CODE% - %LOCAL_REPLY_BODY%" +)EOF"; + setLocalReplyConfig(yaml); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto encoder_decoder = codec_client_->startRequest( + Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"test-header", "exact-match-value-2"}}); + auto response = std::move(encoder_decoder.second); + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + ASSERT_TRUE(fake_upstream_connection_->close()); + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + response->waitForEndStream(); + + if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { + ASSERT_TRUE(codec_client_->waitForDisconnect()); + } else { + codec_client_->close(); + } + + EXPECT_FALSE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + + EXPECT_TRUE(response->complete()); + + EXPECT_EQ("text/plain", response->headers().ContentType()->value().getStringView()); + EXPECT_EQ("26", response->headers().ContentLength()->value().getStringView()); + + EXPECT_EQ("513", response->headers().Status()->value().getStringView()); + + EXPECT_EQ(response->body(), "513 - customized body text"); +} + +} // namespace Envoy diff --git a/test/integration/overload_integration_test.cc b/test/integration/overload_integration_test.cc index b8406f587c5e6..6ded479c42120 100644 --- a/test/integration/overload_integration_test.cc +++ b/test/integration/overload_integration_test.cc @@ -75,7 +75,7 @@ TEST_P(OverloadIntegrationTest, CloseStreamsWhenOverloaded) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); EXPECT_EQ("envoy overloaded", response->body()); codec_client_->close(); @@ -84,7 +84,7 @@ TEST_P(OverloadIntegrationTest, CloseStreamsWhenOverloaded) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); EXPECT_EQ("envoy overloaded", response->body()); codec_client_->close(); @@ -98,7 +98,7 @@ TEST_P(OverloadIntegrationTest, CloseStreamsWhenOverloaded) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(0U, response->body().size()); } @@ -118,11 +118,11 @@ TEST_P(OverloadIntegrationTest, DisableKeepaliveWhenOverloaded) { Http::TestRequestHeaderMapImpl request_headers{ {":method", "GET"}, {":path", "/test/long/url"}, {":scheme", "http"}, {":authority", "host"}}; auto response = sendRequestAndWaitForResponse(request_headers, 1, default_response_headers_, 1); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); - EXPECT_EQ("close", response->headers().Connection()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); + EXPECT_EQ("close", response->headers().getConnectionValue()); // Deactivate overload state and check that keepalive is not disabled updateResource(0.7); @@ -132,7 +132,7 @@ TEST_P(OverloadIntegrationTest, DisableKeepaliveWhenOverloaded) { response = sendRequestAndWaitForResponse(request_headers, 1, default_response_headers_, 1); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(nullptr, response->headers().Connection()); } @@ -159,7 +159,7 @@ TEST_P(OverloadIntegrationTest, StopAcceptingConnectionsWhenOverloaded) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); EXPECT_EQ("envoy overloaded", response->body()); codec_client_->close(); } diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index f3251638f4a7a..941fd92a603f9 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -124,7 +124,7 @@ TEST_P(DownstreamProtocolIntegrationTest, RouterClusterNotFound404) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "GET", "/unknown", "", downstream_protocol_, version_, "foo.com"); ASSERT_TRUE(response->complete()); - EXPECT_EQ("404", response->headers().Status()->value().getStringView()); + EXPECT_EQ("404", response->headers().getStatusValue()); } // Add a route that uses unknown cluster (expect 503 Service Unavailable). @@ -139,7 +139,7 @@ TEST_P(DownstreamProtocolIntegrationTest, RouterClusterNotFound503) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "GET", "/unknown", "", downstream_protocol_, version_, "foo.com"); ASSERT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); } // Add a route which redirects HTTP to HTTPS, and verify Envoy sends a 301 @@ -152,11 +152,23 @@ TEST_P(ProtocolIntegrationTest, RouterRedirect) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "GET", "/foo", "", downstream_protocol_, version_, "www.redirect.com"); ASSERT_TRUE(response->complete()); - EXPECT_EQ("301", response->headers().Status()->value().getStringView()); + EXPECT_EQ("301", response->headers().getStatusValue()); EXPECT_EQ("https://www.redirect.com/foo", response->headers().get(Http::Headers::get().Location)->value().getStringView()); } +TEST_P(ProtocolIntegrationTest, UnknownResponsecode) { + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + Http::TestResponseHeaderMapImpl response_headers{{":status", "600"}}; + auto response = sendRequestAndWaitForResponse(default_request_headers_, 0, response_headers, 0); + + ASSERT_TRUE(response->complete()); + EXPECT_EQ("600", response->headers().getStatusValue()); +} + // Add a health check filter and verify correct computation of health based on upstream status. TEST_P(ProtocolIntegrationTest, ComputedHealthCheck) { config_helper_.addFilter(R"EOF( @@ -175,7 +187,7 @@ name: health_check response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); } // Add a health check filter and verify correct computation of health based on upstream status. @@ -196,7 +208,54 @@ name: health_check response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); +} + +// Verifies behavior for https://github.com/envoyproxy/envoy/pull/11248 +TEST_P(ProtocolIntegrationTest, AddBodyToRequestAndWaitForIt) { + // filters are prepended, so add them in reverse order + config_helper_.addFilter(R"EOF( + name: wait-for-whole-request-and-response-filter + )EOF"); + config_helper_.addFilter(R"EOF( + name: add-body-filter + )EOF"); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + waitForNextUpstreamRequest(); + EXPECT_EQ("body", upstream_request_->body().toString()); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + // encode data, as we have a separate test for the transforming header only response. + upstream_request_->encodeData(128, true); + response->waitForEndStream(); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); +} + +TEST_P(ProtocolIntegrationTest, AddBodyToResponseAndWaitForIt) { + // filters are prepended, so add them in reverse order + config_helper_.addFilter(R"EOF( + name: add-body-filter + )EOF"); + config_helper_.addFilter(R"EOF( + name: wait-for-whole-request-and-response-filter + )EOF"); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto response = codec_client_->makeRequestWithBody(default_request_headers_, 128); + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); + response->waitForEndStream(); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + EXPECT_EQ("body", response->body()); } TEST_P(ProtocolIntegrationTest, AddEncodedTrailers) { @@ -221,49 +280,65 @@ name: add-trailers-filter .getStringView()); } EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); if (downstream_protocol_ == Http::CodecClient::Type::HTTP2) { - EXPECT_EQ("encode", response->trailers()->GrpcMessage()->value().getStringView()); + EXPECT_EQ("encode", response->trailers()->getGrpcMessageValue()); } } -// Add a health check filter and verify correct behavior when draining. -TEST_P(ProtocolIntegrationTest, DrainClose) { - config_helper_.addFilter(ConfigHelper::defaultHealthCheckFilter()); +// Regression test for https://github.com/envoyproxy/envoy/issues/9873 +TEST_P(ProtocolIntegrationTest, ResponseWithHostHeader) { initialize(); - - test_server_->drainManager().draining_ = true; codec_client_ = makeHttpConnection(lookupPort("http")); - auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + auto response = codec_client_->makeHeaderOnlyRequest( + Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}}); + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}, {"host", "host"}}, true); response->waitForEndStream(); - codec_client_->waitForDisconnect(); - EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); - if (downstream_protocol_ == Http::CodecClient::Type::HTTP2) { - EXPECT_TRUE(codec_client_->sawGoAway()); - } - - test_server_->drainManager().draining_ = false; + EXPECT_EQ("200", response->headers().getStatusValue()); + EXPECT_EQ("host", + response->headers().get(Http::LowerCaseString("host"))->value().getStringView()); } -// Regression test for https://github.com/envoyproxy/envoy/issues/9873 -TEST_P(ProtocolIntegrationTest, ResponseWithHostHeader) { +// Regression test for https://github.com/envoyproxy/envoy/issues/10270 +TEST_P(ProtocolIntegrationTest, LongHeaderValueWithSpaces) { + // Header with at least 20kb of spaces surrounded by non-whitespace characters to ensure that + // dispatching is split across 2 dispatch calls. This threshold comes from Envoy preferring 16KB + // reads, which the buffer rounds up to about 20KB when allocating slices in + // Buffer::OwnedImpl::reserve(). + const std::string long_header_value_with_inner_lws = "v" + std::string(32 * 1024, ' ') + "v"; + initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); auto response = codec_client_->makeHeaderOnlyRequest( Http::TestRequestHeaderMapImpl{{":method", "GET"}, {":path", "/test/long/url"}, {":scheme", "http"}, - {":authority", "host"}}); + {":authority", "host"}, + {"longrequestvalue", long_header_value_with_inner_lws}}); waitForNextUpstreamRequest(); + EXPECT_EQ(long_header_value_with_inner_lws, upstream_request_->headers() + .get(Http::LowerCaseString("longrequestvalue")) + ->value() + .getStringView()); upstream_request_->encodeHeaders( - Http::TestResponseHeaderMapImpl{{":status", "200"}, {"host", "host"}}, true); + Http::TestResponseHeaderMapImpl{{":status", "200"}, + {"host", "host"}, + {"longresponsevalue", long_header_value_with_inner_lws}}, + true); response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ("host", response->headers().get(Http::LowerCaseString("host"))->value().getStringView()); + EXPECT_EQ( + long_header_value_with_inner_lws, + response->headers().get(Http::LowerCaseString("longresponsevalue"))->value().getStringView()); } TEST_P(ProtocolIntegrationTest, Retry) { @@ -295,8 +370,19 @@ TEST_P(ProtocolIntegrationTest, Retry) { EXPECT_EQ(1024U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(512U, response->body().size()); + Stats::Store& stats = test_server_->server().stats(); + if (upstreamProtocol() == FakeHttpConnection::Type::HTTP2) { + Stats::CounterSharedPtr counter = + TestUtility::findCounter(stats, "cluster.cluster_0.http2.tx_reset"); + ASSERT_NE(nullptr, counter); + EXPECT_EQ(1L, counter->value()); + } else { + Stats::CounterSharedPtr counter = + TestUtility::findCounter(stats, "cluster.cluster_0.http1.dropped_headers_with_underscores"); + EXPECT_NE(nullptr, counter); + } } TEST_P(ProtocolIntegrationTest, RetryStreaming) { @@ -348,7 +434,70 @@ TEST_P(ProtocolIntegrationTest, RetryStreaming) { EXPECT_EQ(combined_request_data.size(), upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); + EXPECT_EQ(512U, response->body().size()); +} + +// Regression test https://github.com/envoyproxy/envoy/issues/11131 +// Send complete response headers directing a retry and reset the stream to make +// sure that Envoy cleans up stream state correctly when doing a retry with +// complete response but incomplete request. +TEST_P(ProtocolIntegrationTest, RetryStreamingReset) { + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + auto encoder_decoder = + codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"x-forwarded-for", "10.0.0.1"}, + {"x-envoy-retry-on", "5xx"}}); + auto& encoder = encoder_decoder.first; + auto& response = encoder_decoder.second; + + // Send some data, but not the entire body. + std::string data(1024, 'a'); + Buffer::OwnedImpl send1(data); + encoder.encodeData(send1, false); + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + + // Send back an upstream failure and end stream. Make sure an immediate reset + // doesn't cause problems. Schedule via the upstream_request_ dispatcher to ensure that the stream + // still exists when encoding the reset stream. + upstream_request_->postToConnectionThread([this]() { + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "503"}}, true); + upstream_request_->encodeResetStream(); + }); + + // Make sure the fake stream is reset. + if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP1) { + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + } else { + ASSERT_TRUE(upstream_request_->waitForReset()); + } + + // Wait for a retry. Ensure all data, both before and after the retry, is received. + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + + // Finish the request. + std::string data2(512, 'b'); + Buffer::OwnedImpl send2(data2); + encoder.encodeData(send2, true); + std::string combined_request_data = data + data2; + ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, combined_request_data)); + + upstream_request_->encodeHeaders(default_response_headers_, false); + upstream_request_->encodeData(512, true); + + response->waitForEndStream(); + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ(combined_request_data.size(), upstream_request_->bodyLength()); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(512U, response->body().size()); } @@ -406,7 +555,7 @@ TEST_P(ProtocolIntegrationTest, RetryStreamingCancelDueToBufferOverflow) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("507", response->headers().Status()->value().getStringView()); + EXPECT_EQ("507", response->headers().getStatusValue()); test_server_->waitForCounterEq("cluster.cluster_0.retry_or_shadow_abandoned", 1); } @@ -430,10 +579,7 @@ TEST_P(DownstreamProtocolIntegrationTest, RetryAttemptCountHeader) { waitForNextUpstreamRequest(); upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "503"}}, false); - EXPECT_EQ( - atoi(std::string(upstream_request_->headers().EnvoyAttemptCount()->value().getStringView()) - .c_str()), - 1); + EXPECT_EQ(atoi(std::string(upstream_request_->headers().getEnvoyAttemptCountValue()).c_str()), 1); if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP1) { ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); @@ -442,10 +588,7 @@ TEST_P(DownstreamProtocolIntegrationTest, RetryAttemptCountHeader) { ASSERT_TRUE(upstream_request_->waitForReset()); } waitForNextUpstreamRequest(); - EXPECT_EQ( - atoi(std::string(upstream_request_->headers().EnvoyAttemptCount()->value().getStringView()) - .c_str()), - 2); + EXPECT_EQ(atoi(std::string(upstream_request_->headers().getEnvoyAttemptCountValue()).c_str()), 2); upstream_request_->encodeHeaders(default_response_headers_, false); upstream_request_->encodeData(512, true); @@ -454,11 +597,9 @@ TEST_P(DownstreamProtocolIntegrationTest, RetryAttemptCountHeader) { EXPECT_EQ(1024U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(512U, response->body().size()); - EXPECT_EQ( - 2, - atoi(std::string(response->headers().EnvoyAttemptCount()->value().getStringView()).c_str())); + EXPECT_EQ(2, atoi(std::string(response->headers().getEnvoyAttemptCountValue()).c_str())); } // Verifies that a retry priority can be configured and affect the host selected during retries. @@ -527,7 +668,7 @@ TEST_P(DownstreamProtocolIntegrationTest, RetryPriority) { EXPECT_EQ(1024U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(512U, response->body().size()); } @@ -591,7 +732,7 @@ TEST_P(DownstreamProtocolIntegrationTest, RetryHostPredicateFilter) { EXPECT_EQ(1024U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(512U, response->body().size()); } @@ -619,7 +760,7 @@ TEST_P(ProtocolIntegrationTest, RetryHittingBufferLimit) { EXPECT_EQ(66560U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); } // Very similar set-up to RetryHittingBufferLimits but using the route specific cap. @@ -647,7 +788,7 @@ TEST_P(ProtocolIntegrationTest, RetryHittingRouteLimits) { EXPECT_EQ(1U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); } // Test hitting the decoder buffer filter with too many request bytes to buffer. Ensure the @@ -681,7 +822,7 @@ TEST_P(DownstreamProtocolIntegrationTest, HittingDecoderFilterLimit) { ASSERT_TRUE(response->complete()); } if (response->complete()) { - EXPECT_EQ("413", response->headers().Status()->value().getStringView()); + EXPECT_EQ("413", response->headers().getStatusValue()); } } @@ -720,7 +861,7 @@ TEST_P(ProtocolIntegrationTest, HittingEncoderFilterLimit) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("500", response->headers().Status()->value().getStringView()); + EXPECT_EQ("500", response->headers().getStatusValue()); // Regression test https://github.com/envoyproxy/envoy/issues/9881 by making // sure this path does standard HCM header transformations. EXPECT_TRUE(response->headers().Date() != nullptr); @@ -734,18 +875,36 @@ TEST_P(ProtocolIntegrationTest, EnvoyHandlingDuplicate100Continue) { testEnvoyHandling100Continue(true); } -TEST_P(ProtocolIntegrationTest, EnvoyProxyingEarly100Continue) { - testEnvoyProxying100Continue(true); +// 100-continue before the request completes. +TEST_P(ProtocolIntegrationTest, EnvoyProxyingEarly100Continue) { testEnvoyProxying1xx(true); } + +// Multiple 1xx before the request completes. +TEST_P(ProtocolIntegrationTest, EnvoyProxyingEarlyMultiple1xx) { + testEnvoyProxying1xx(true, false, true); } -TEST_P(ProtocolIntegrationTest, EnvoyProxyingLate100Continue) { - testEnvoyProxying100Continue(false); +// 100-continue after the request completes. +TEST_P(ProtocolIntegrationTest, EnvoyProxyingLate100Continue) { testEnvoyProxying1xx(false); } + +// Multiple 1xx after the request completes. +TEST_P(ProtocolIntegrationTest, EnvoyProxyingLateMultiple1xx) { + testEnvoyProxying1xx(false, false, true); } TEST_P(ProtocolIntegrationTest, TwoRequests) { testTwoRequests(); } TEST_P(ProtocolIntegrationTest, TwoRequestsWithForcedBackup) { testTwoRequests(true); } +TEST_P(ProtocolIntegrationTest, BasicMaxStreamDuration) { testMaxStreamDuration(); } + +TEST_P(ProtocolIntegrationTest, MaxStreamDurationWithRetryPolicy) { + testMaxStreamDurationWithRetry(false); +} + +TEST_P(ProtocolIntegrationTest, MaxStreamDurationWithRetryPolicyWhenRetryUpstreamDisconnection) { + testMaxStreamDurationWithRetry(true); +} + // Verify that headers with underscores in their names are dropped from client requests // but remain in upstream responses. TEST_P(ProtocolIntegrationTest, HeadersWithUnderscoresDropped) { @@ -770,8 +929,13 @@ TEST_P(ProtocolIntegrationTest, HeadersWithUnderscoresDropped) { Http::TestResponseHeaderMapImpl{{":status", "200"}, {"bar_baz", "fooz"}}, true); response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_THAT(response->headers(), HeaderHasValueRef("bar_baz", "fooz")); + Stats::Store& stats = test_server_->server().stats(); + std::string stat_name = (downstreamProtocol() == Http::CodecClient::Type::HTTP1) + ? "http1.dropped_headers_with_underscores" + : "http2.dropped_headers_with_underscores"; + EXPECT_EQ(1L, TestUtility::findCounter(stats, stat_name)->value()); } // Verify that by default headers with underscores in their names remain in both requests and @@ -792,12 +956,13 @@ TEST_P(ProtocolIntegrationTest, HeadersWithUnderscoresRemainByDefault) { Http::TestResponseHeaderMapImpl{{":status", "200"}, {"bar_baz", "fooz"}}, true); response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_THAT(response->headers(), HeaderHasValueRef("bar_baz", "fooz")); } // Verify that request with headers containing underscores is rejected when configured. TEST_P(ProtocolIntegrationTest, HeadersWithUnderscoresCauseRequestRejectedByDefault) { + useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); config_helper_.addConfigModifier( [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) -> void { @@ -814,15 +979,16 @@ TEST_P(ProtocolIntegrationTest, HeadersWithUnderscoresCauseRequestRejectedByDefa {"foo_bar", "baz"}}); if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); ASSERT_TRUE(response->complete()); - EXPECT_EQ("400", response->headers().Status()->value().getStringView()); + EXPECT_EQ("400", response->headers().getStatusValue()); } else { response->waitForReset(); codec_client_->close(); ASSERT_TRUE(response->reset()); - EXPECT_EQ(Http::StreamResetReason::RemoteReset, response->reset_reason()); + EXPECT_EQ(Http::StreamResetReason::RemoteReset, response->resetReason()); } + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("unexpected_underscore")); } TEST_P(DownstreamProtocolIntegrationTest, ValidZeroLengthContent) { @@ -838,7 +1004,56 @@ TEST_P(DownstreamProtocolIntegrationTest, ValidZeroLengthContent) { auto response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); +} + +// Test we're following https://tools.ietf.org/html/rfc7230#section-3.3.2 +// as best we can. +TEST_P(ProtocolIntegrationTest, 304WithBody) { + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + if (upstreamProtocol() == FakeHttpConnection::Type::HTTP1) { + // The invalid data will trigger disconnect. + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + } + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + + waitForNextUpstreamRequest(); + Http::TestResponseHeaderMapImpl response_headers{{":status", "304"}, {"content-length", "2"}}; + ASSERT(upstream_request_ != nullptr); + upstream_request_->encodeHeaders(response_headers, false); + response->waitForHeaders(); + EXPECT_EQ("304", response->headers().getStatusValue()); + + // For HTTP/1.1 http_parser is explicitly told that 304s are header-only + // requests. + if (downstream_protocol_ == Http::CodecClient::Type::HTTP1 || + upstreamProtocol() == FakeHttpConnection::Type::HTTP1) { + ASSERT_TRUE(response->complete()); + } else { + ASSERT_FALSE(response->complete()); + } + + upstream_request_->encodeData(2, true); + if (upstreamProtocol() == FakeHttpConnection::Type::HTTP1) { + // Any body sent after the request is considered complete will not be handled as part of the + // active request, but will be flagged as a protocol error for the no-longer-associated + // connection. + // Ideally if we got the body with the headers we would instead reset the + // stream, but it turns out that's complicated so instead we consistently + // forward the headers and error out after. + test_server_->waitForCounterGe("cluster.cluster_0.upstream_cx_protocol_error", 1); + } + + // Only for HTTP/2, where streams are ended with an explicit end-stream so we + // can differentiate between 304-with-advertised-but-absent-body and + // 304-with-body, is there a protocol error on the active stream. + if (downstream_protocol_ == Http::CodecClient::Type::HTTP2 && + upstreamProtocol() == FakeHttpConnection::Type::HTTP2) { + response->waitForReset(); + } } // Validate that lots of tiny cookies doesn't cause a DoS (single cookie header). @@ -860,7 +1075,7 @@ TEST_P(DownstreamProtocolIntegrationTest, LargeCookieParsingConcatenated) { auto response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } // Validate that lots of tiny cookies doesn't cause a DoS (many cookie headers). @@ -889,7 +1104,7 @@ TEST_P(DownstreamProtocolIntegrationTest, LargeCookieParsingMany) { auto response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(DownstreamProtocolIntegrationTest, InvalidContentLength) { @@ -904,14 +1119,15 @@ TEST_P(DownstreamProtocolIntegrationTest, InvalidContentLength) { {"content-length", "-1"}}); auto response = std::move(encoder_decoder.second); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { ASSERT_TRUE(response->complete()); - EXPECT_EQ("400", response->headers().Status()->value().getStringView()); + EXPECT_EQ("400", response->headers().getStatusValue()); + test_server_->waitForCounterGe("http.config_test.downstream_rq_4xx", 1); } else { ASSERT_TRUE(response->reset()); - EXPECT_EQ(Http::StreamResetReason::ConnectionTermination, response->reset_reason()); + EXPECT_EQ(Http::StreamResetReason::ConnectionTermination, response->resetReason()); } } @@ -920,7 +1136,9 @@ TEST_P(DownstreamProtocolIntegrationTest, InvalidContentLengthAllowed) { config_helper_.addConfigModifier( [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) -> void { - hcm.mutable_http2_protocol_options()->set_stream_error_on_invalid_http_messaging(true); + hcm.mutable_http2_protocol_options() + ->mutable_override_stream_error_on_invalid_http_message() + ->set_value(true); }); initialize(); @@ -935,7 +1153,7 @@ TEST_P(DownstreamProtocolIntegrationTest, InvalidContentLengthAllowed) { auto response = std::move(encoder_decoder.second); if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } else { response->waitForReset(); codec_client_->close(); @@ -943,10 +1161,10 @@ TEST_P(DownstreamProtocolIntegrationTest, InvalidContentLengthAllowed) { if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { ASSERT_TRUE(response->complete()); - EXPECT_EQ("400", response->headers().Status()->value().getStringView()); + EXPECT_EQ("400", response->headers().getStatusValue()); } else { ASSERT_TRUE(response->reset()); - EXPECT_EQ(Http::StreamResetReason::RemoteReset, response->reset_reason()); + EXPECT_EQ(Http::StreamResetReason::RemoteReset, response->resetReason()); } } @@ -960,14 +1178,14 @@ TEST_P(DownstreamProtocolIntegrationTest, MultipleContentLengths) { {"content-length", "3,2"}}); auto response = std::move(encoder_decoder.second); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { ASSERT_TRUE(response->complete()); - EXPECT_EQ("400", response->headers().Status()->value().getStringView()); + EXPECT_EQ("400", response->headers().getStatusValue()); } else { ASSERT_TRUE(response->reset()); - EXPECT_EQ(Http::StreamResetReason::ConnectionTermination, response->reset_reason()); + EXPECT_EQ(Http::StreamResetReason::ConnectionTermination, response->resetReason()); } } @@ -976,7 +1194,9 @@ TEST_P(DownstreamProtocolIntegrationTest, MultipleContentLengthsAllowed) { config_helper_.addConfigModifier( [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) -> void { - hcm.mutable_http2_protocol_options()->set_stream_error_on_invalid_http_messaging(true); + hcm.mutable_http2_protocol_options() + ->mutable_override_stream_error_on_invalid_http_message() + ->set_value(true); }); initialize(); @@ -989,7 +1209,7 @@ TEST_P(DownstreamProtocolIntegrationTest, MultipleContentLengthsAllowed) { auto response = std::move(encoder_decoder.second); if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } else { response->waitForReset(); codec_client_->close(); @@ -997,10 +1217,10 @@ TEST_P(DownstreamProtocolIntegrationTest, MultipleContentLengthsAllowed) { if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { ASSERT_TRUE(response->complete()); - EXPECT_EQ("400", response->headers().Status()->value().getStringView()); + EXPECT_EQ("400", response->headers().getStatusValue()); } else { ASSERT_TRUE(response->reset()); - EXPECT_EQ(Http::StreamResetReason::RemoteReset, response->reset_reason()); + EXPECT_EQ(Http::StreamResetReason::RemoteReset, response->resetReason()); } } @@ -1030,7 +1250,7 @@ name: encode-headers-only } EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); EXPECT_EQ(0, response->body().size()); } @@ -1053,7 +1273,7 @@ name: decode-headers-only response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); EXPECT_EQ(128, response->body().size()); } @@ -1088,7 +1308,7 @@ name: passthrough-filter } EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); EXPECT_EQ(0, response->body().size()); } @@ -1117,7 +1337,7 @@ name: passthrough-filter response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); EXPECT_EQ(128, response->body().size()); } @@ -1154,10 +1374,20 @@ name: decode-headers-only response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); EXPECT_EQ(0, upstream_request_->body().length()); } +TEST_P(DownstreamProtocolIntegrationTest, LargeRequestUrlRejected) { + // Send one 95 kB URL with limit 60 kB headers. + testLargeRequestUrl(95, 60); +} + +TEST_P(DownstreamProtocolIntegrationTest, LargeRequestUrlAccepted) { + // Send one 95 kB URL with limit 96 kB headers. + testLargeRequestUrl(95, 96); +} + TEST_P(DownstreamProtocolIntegrationTest, LargeRequestHeadersRejected) { // Send one 95 kB header with limit 60 kB and 100 headers. testLargeRequestHeaders(95, 1, 60, 100); @@ -1197,9 +1427,9 @@ TEST_P(DownstreamProtocolIntegrationTest, ManyRequestTrailersRejected) { codec_client_->sendTrailers(*request_encoder_, request_trailers); if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("431", response->headers().Status()->value().getStringView()); + EXPECT_EQ("431", response->headers().getStatusValue()); } else { response->waitForReset(); codec_client_->close(); @@ -1233,7 +1463,7 @@ TEST_P(DownstreamProtocolIntegrationTest, ManyRequestTrailersAccepted) { upstream_request_->encodeHeaders(default_response_headers_, true); response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } // This test uses an Http::HeaderMapImpl instead of an Http::TestHeaderMapImpl to avoid @@ -1268,9 +1498,9 @@ TEST_P(DownstreamProtocolIntegrationTest, ManyTrailerHeaders) { max_request_headers_count_); }); - Http::RequestTrailerMapImpl request_trailers; + auto request_trailers = Http::RequestTrailerMapImpl::create(); for (int i = 0; i < 20000; i++) { - request_trailers.addCopy(Http::LowerCaseString(std::to_string(i)), ""); + request_trailers->addCopy(Http::LowerCaseString(std::to_string(i)), ""); } initialize(); @@ -1282,14 +1512,14 @@ TEST_P(DownstreamProtocolIntegrationTest, ManyTrailerHeaders) { {":authority", "host"}}); request_encoder_ = &encoder_decoder.first; auto response = std::move(encoder_decoder.second); - codec_client_->sendTrailers(*request_encoder_, request_trailers); + codec_client_->sendTrailers(*request_encoder_, *request_trailers); waitForNextUpstreamRequest(); upstream_request_->encodeHeaders(default_response_headers_, true); response->waitForEndStream(); EXPECT_TRUE(upstream_request_->complete()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } // Regression tests for CVE-2019-18801. We only validate the behavior of large @@ -1320,9 +1550,9 @@ TEST_P(ProtocolIntegrationTest, LargeRequestMethod) { auto encoder_decoder = codec_client_->startRequest(request_headers); request_encoder_ = &encoder_decoder.first; auto response = std::move(encoder_decoder.second); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("400", response->headers().Status()->value().getStringView()); + EXPECT_EQ("400", response->headers().getStatusValue()); } else { ASSERT(downstreamProtocol() == Http::CodecClient::Type::HTTP2); if (upstreamProtocol() == FakeHttpConnection::Type::HTTP1) { @@ -1331,7 +1561,7 @@ TEST_P(ProtocolIntegrationTest, LargeRequestMethod) { fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("400", response->headers().Status()->value().getStringView()); + EXPECT_EQ("400", response->headers().getStatusValue()); } else { ASSERT(upstreamProtocol() == FakeHttpConnection::Type::HTTP2); auto response = @@ -1584,7 +1814,7 @@ TEST_P(ProtocolIntegrationTest, MultipleSetCookies) { auto response = sendRequestAndWaitForResponse(default_request_headers_, 0, response_headers, 0); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); std::vector out; Http::HeaderUtility::getAllOfHeader(response->headers(), "set-cookie", out); @@ -1622,7 +1852,7 @@ TEST_P(ProtocolIntegrationTest, TestDownstreamResetIdleTimeout) { ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); } - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } // Test connection is closed after single request processed. @@ -1691,6 +1921,27 @@ TEST_P(ProtocolIntegrationTest, ConnDurationTimeoutNoHttpRequest) { test_server_->waitForCounterGe("http.config_test.downstream_cx_max_duration_reached", 1); } +TEST_P(DownstreamProtocolIntegrationTest, TestPrefetch) { + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters(0); + cluster->mutable_prefetch_policy()->mutable_prefetch_ratio()->set_value(1.5); + }); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = + sendRequestAndWaitForResponse(default_request_headers_, 0, default_response_headers_, 0); + FakeHttpConnectionPtr fake_upstream_connection_two; + if (upstreamProtocol() == FakeHttpConnection::Type::HTTP1) { + // For HTTP/1.1 there should be a prefetched connection. + ASSERT_TRUE( + fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_two)); + } else { + // For HTTP/2, the original connection can accommodate two requests. + ASSERT_FALSE(fake_upstreams_[0]->waitForHttpConnection( + *dispatcher_, fake_upstream_connection_two, std::chrono::milliseconds(5))); + } +} + TEST_P(DownstreamProtocolIntegrationTest, BasicMaxStreamTimeout) { config_helper_.setDownstreamMaxStreamDuration(std::chrono::milliseconds(500)); initialize(); @@ -1705,9 +1956,32 @@ TEST_P(DownstreamProtocolIntegrationTest, BasicMaxStreamTimeout) { ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + test_server_->waitForCounterGe("http.config_test.downstream_rq_max_duration_reached", 1); + response->waitForReset(); + EXPECT_TRUE(response->complete()); +} + +TEST_P(DownstreamProtocolIntegrationTest, BasicMaxStreamTimeoutLegacy) { + useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); + config_helper_.addRuntimeOverride("envoy.reloadable_features.allow_response_for_timeout", + "false"); + config_helper_.setDownstreamMaxStreamDuration(std::chrono::milliseconds(500)); + initialize(); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto encoder_decoder = codec_client_->startRequest(default_request_headers_); + request_encoder_ = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + test_server_->waitForCounterGe("http.config_test.downstream_rq_max_duration_reached", 1); response->waitForReset(); EXPECT_FALSE(response->complete()); + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("max_duration_timeout")); } // Make sure that invalid authority headers get blocked at or before the HCM. @@ -1727,11 +2001,11 @@ TEST_P(DownstreamProtocolIntegrationTest, InvalidAuthority) { // For HTTP/1 this is handled by the HCM, which sends a full 400 response. response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("400", response->headers().Status()->value().getStringView()); + EXPECT_EQ("400", response->headers().getStatusValue()); } else { // For HTTP/2 this is handled by nghttp2 which resets the connection without // sending an HTTP response. - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); ASSERT_FALSE(response->complete()); } } @@ -1743,16 +2017,18 @@ TEST_P(DownstreamProtocolIntegrationTest, ConnectIsBlocked) { Http::TestRequestHeaderMapImpl{{":method", "CONNECT"}, {":authority", "host.com:80"}}); if (downstreamProtocol() == Http::CodecClient::Type::HTTP1) { + // Because CONNECT requests for HTTP/1 do not include a path, they will fail + // to find a route match and return a 404. response->waitForEndStream(); - EXPECT_EQ("403", response->headers().Status()->value().getStringView()); + EXPECT_EQ("404", response->headers().getStatusValue()); EXPECT_TRUE(response->complete()); } else { response->waitForReset(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } } -// Make sure that with stream_error_on_invalid_http_messaging true, CONNECT +// Make sure that with override_stream_error_on_invalid_http_message true, CONNECT // results in stream teardown not connection teardown. TEST_P(DownstreamProtocolIntegrationTest, ConnectStreamRejection) { if (downstreamProtocol() == Http::CodecClient::Type::HTTP1) { @@ -1761,7 +2037,9 @@ TEST_P(DownstreamProtocolIntegrationTest, ConnectStreamRejection) { config_helper_.addConfigModifier( [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) -> void { - hcm.mutable_http2_protocol_options()->set_stream_error_on_invalid_http_messaging(true); + hcm.mutable_http2_protocol_options() + ->mutable_override_stream_error_on_invalid_http_message() + ->set_value(true); }); initialize(); @@ -1773,6 +2051,40 @@ TEST_P(DownstreamProtocolIntegrationTest, ConnectStreamRejection) { EXPECT_FALSE(codec_client_->disconnected()); } +// Regression test for https://github.com/envoyproxy/envoy/issues/12131 +TEST_P(DownstreamProtocolIntegrationTest, Test100AndDisconnect) { + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + waitForNextUpstreamRequest(); + upstream_request_->encode100ContinueHeaders(Http::TestResponseHeaderMapImpl{{":status", "100"}}); + ASSERT_TRUE(fake_upstream_connection_->close()); + + // Make sure that a disconnect results in valid 5xx response headers even when preceded by a 100. + response->waitForEndStream(); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("503", response->headers().getStatusValue()); +} + +TEST_P(DownstreamProtocolIntegrationTest, Test100AndDisconnectLegacy) { + config_helper_.addRuntimeOverride("envoy.reloadable_features.allow_500_after_100", "false"); + + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + waitForNextUpstreamRequest(); + upstream_request_->encode100ContinueHeaders(Http::TestResponseHeaderMapImpl{{":status", "100"}}); + ASSERT_TRUE(fake_upstream_connection_->close()); + + if (downstreamProtocol() == Http::CodecClient::Type::HTTP1) { + ASSERT_TRUE(codec_client_->waitForDisconnect()); + EXPECT_FALSE(response->complete()); + } else { + response->waitForReset(); + EXPECT_FALSE(response->complete()); + } +} + // For tests which focus on downstream-to-Envoy behavior, and don't need to be // run with both HTTP/1 and HTTP/2 upstreams. INSTANTIATE_TEST_SUITE_P(Protocols, DownstreamProtocolIntegrationTest, diff --git a/test/integration/proxy_proto_integration_test.cc b/test/integration/proxy_proto_integration_test.cc index ec62a8991a6ad..92c4bc90b39d6 100644 --- a/test/integration/proxy_proto_integration_test.cc +++ b/test/integration/proxy_proto_integration_test.cc @@ -18,6 +18,27 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, ProxyProtoIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); +TEST_P(ProxyProtoIntegrationTest, CaptureTlvToMetadata) { + useListenerAccessLog( + "%DYNAMIC_METADATA(envoy.filters.listener.proxy_protocol:PP2TypeAuthority)%"); + + ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr { + Network::ClientConnectionPtr conn = makeClientConnection(lookupPort("http")); + constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, 0x54, + 0x0a, 0x21, 0x11, 0x00, 0x1a, 0x01, 0x02, 0x03, 0x04, 0x00, 0x01, + 0x01, 0x02, 0x03, 0x05, 0x00, 0x02, 0x00, 0x00, 0x01, 0xff, 0x02, + 0x00, 0x07, 0x66, 0x6f, 0x6f, 0x2e, 0x63, 0x6f, 0x6d}; + Buffer::OwnedImpl buf(buffer, sizeof(buffer)); + conn->write(buf, false); + return conn; + }; + + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); + cleanupUpstreamAndDownstream(); + const std::string log_line = waitForAccessLog(listener_access_log_name_); + EXPECT_EQ(log_line, "\"foo.com\""); +} + TEST_P(ProxyProtoIntegrationTest, V1RouterRequestAndResponseWithBodyNoBuffer) { ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr { Network::ClientConnectionPtr conn = makeClientConnection(lookupPort("http")); @@ -116,7 +137,7 @@ TEST_P(ProxyProtoIntegrationTest, AccessLog) { ASSERT_EQ(2, tokens.size()); EXPECT_EQ(tokens[0], Network::Test::getLoopbackAddressString(GetParam())); - EXPECT_EQ(tokens[1], "1.2.3.4:12345"); + EXPECT_EQ(tokens[1], "1.2.3.4:12345\n"); } TEST_P(ProxyProtoIntegrationTest, DEPRECATED_FEATURE_TEST(OriginalDst)) { diff --git a/test/integration/proxy_proto_integration_test.h b/test/integration/proxy_proto_integration_test.h index e67b69e9a47e4..140d5b63d3f70 100644 --- a/test/integration/proxy_proto_integration_test.h +++ b/test/integration/proxy_proto_integration_test.h @@ -4,6 +4,8 @@ #include "common/http/codec_client.h" +#include "extensions/filters/listener/proxy_protocol/proxy_protocol.h" + #include "test/integration/fake_upstream.h" #include "test/integration/http_integration.h" #include "test/integration/server.h" @@ -17,9 +19,15 @@ class ProxyProtoIntegrationTest : public testing::TestWithParam void { + ::envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol proxy_protocol; + auto rule = proxy_protocol.add_rules(); + rule->set_tlv_type(0x02); + rule->mutable_on_tlv_present()->set_key("PP2TypeAuthority"); + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); - auto* filter_chain = listener->mutable_filter_chains(0); - filter_chain->mutable_use_proxy_proto()->set_value(true); + auto* ppv_filter = listener->add_listener_filters(); + ppv_filter->set_name("envoy.listener.proxy_protocol"); + ppv_filter->mutable_typed_config()->PackFrom(proxy_protocol); }); } }; diff --git a/test/integration/redirect_integration_test.cc b/test/integration/redirect_integration_test.cc index 14c4fa5da7e2c..0b03e662e9e8f 100644 --- a/test/integration/redirect_integration_test.cc +++ b/test/integration/redirect_integration_test.cc @@ -1,10 +1,15 @@ #include "envoy/config/route/v3/route_components.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" +#include "envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.pb.h" +#include "envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.pb.h" +#include "envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.pb.h" #include "test/integration/http_protocol_integration.h" namespace Envoy { +using testing::HasSubstr; + namespace { constexpr char HandleThreeHopLocationFormat[] = "http://handle.internal.redirect.max.three.hop/path{}"; @@ -19,16 +24,17 @@ class RedirectIntegrationTest : public HttpProtocolIntegrationTest { config_helper_.addVirtualHost(pass_through); auto handle = config_helper_.createVirtualHost("handle.internal.redirect"); - handle.mutable_routes(0)->mutable_route()->set_internal_redirect_action( - envoy::config::route::v3::RouteAction::HANDLE_INTERNAL_REDIRECT); + handle.mutable_routes(0)->set_name("redirect"); + handle.mutable_routes(0)->mutable_route()->mutable_internal_redirect_policy(); config_helper_.addVirtualHost(handle); auto handle_max_3_hop = config_helper_.createVirtualHost("handle.internal.redirect.max.three.hop"); - handle_max_3_hop.mutable_routes(0)->mutable_route()->set_internal_redirect_action( - envoy::config::route::v3::RouteAction::HANDLE_INTERNAL_REDIRECT); + handle_max_3_hop.mutable_routes(0)->set_name("max_three_hop"); + handle_max_3_hop.mutable_routes(0)->mutable_route()->mutable_internal_redirect_policy(); handle_max_3_hop.mutable_routes(0) ->mutable_route() + ->mutable_internal_redirect_policy() ->mutable_max_internal_redirects() ->set_value(3); config_helper_.addVirtualHost(handle_max_3_hop); @@ -89,7 +95,7 @@ TEST_P(RedirectIntegrationTest, RedirectNotConfigured) { codec_client_ = makeHttpConnection(lookupPort("http")); auto response = sendRequestAndWaitForResponse(default_request_headers_, 0, redirect_response_, 0); EXPECT_TRUE(response->complete()); - EXPECT_EQ("302", response->headers().Status()->value().getStringView()); + EXPECT_EQ("302", response->headers().getStatusValue()); } // Now test a route with redirects configured on in pass-through mode. @@ -99,13 +105,14 @@ TEST_P(RedirectIntegrationTest, InternalRedirectPassedThrough) { codec_client_ = makeHttpConnection(lookupPort("http")); default_request_headers_.setHost("pass.through.internal.redirect"); auto response = sendRequestAndWaitForResponse(default_request_headers_, 0, redirect_response_, 0); - EXPECT_EQ("302", response->headers().Status()->value().getStringView()); + EXPECT_EQ("302", response->headers().getStatusValue()); EXPECT_EQ( 0, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_failed_total")->value()); } TEST_P(RedirectIntegrationTest, BasicInternalRedirect) { + useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); // Validate that header sanitization is only called once. config_helper_.addConfigModifier( [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& @@ -125,18 +132,19 @@ TEST_P(RedirectIntegrationTest, BasicInternalRedirect) { waitForNextUpstreamRequest(); ASSERT(upstream_request_->headers().EnvoyOriginalUrl() != nullptr); EXPECT_EQ("http://handle.internal.redirect/test/long/url", - upstream_request_->headers().EnvoyOriginalUrl()->value().getStringView()); - EXPECT_EQ("/new/url", upstream_request_->headers().Path()->value().getStringView()); - EXPECT_EQ("authority2", upstream_request_->headers().Host()->value().getStringView()); - EXPECT_EQ("via_value", upstream_request_->headers().Via()->value().getStringView()); + upstream_request_->headers().getEnvoyOriginalUrlValue()); + EXPECT_EQ("/new/url", upstream_request_->headers().getPathValue()); + EXPECT_EQ("authority2", upstream_request_->headers().getHostValue()); + EXPECT_EQ("via_value", upstream_request_->headers().getViaValue()); upstream_request_->encodeHeaders(default_response_headers_, true); response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_succeeded_total") ->value()); + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("internal_redirect")); } TEST_P(RedirectIntegrationTest, InternalRedirectWithThreeHopLimit) { @@ -159,11 +167,10 @@ TEST_P(RedirectIntegrationTest, InternalRedirectWithThreeHopLimit) { for (int i = 0; i < 4; i++) { upstream_requests.push_back(waitForNextStream()); - EXPECT_EQ(fmt::format("/path{}", i), - upstream_requests.back()->headers().Path()->value().getStringView()); + EXPECT_EQ(fmt::format("/path{}", i), upstream_requests.back()->headers().getPathValue()); EXPECT_EQ("handle.internal.redirect.max.three.hop", - upstream_requests.back()->headers().Host()->value().getStringView()); - EXPECT_EQ("via_value", upstream_requests.back()->headers().Via()->value().getStringView()); + upstream_requests.back()->headers().getHostValue()); + EXPECT_EQ("via_value", upstream_requests.back()->headers().getViaValue()); auto next_location = fmt::format(HandleThreeHopLocationFormat, i + 1); redirect_response_.setLocation(next_location); @@ -172,10 +179,13 @@ TEST_P(RedirectIntegrationTest, InternalRedirectWithThreeHopLimit) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("302", response->headers().Status()->value().getStringView()); + EXPECT_EQ("302", response->headers().getStatusValue()); EXPECT_EQ( 1, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_failed_total")->value()); + EXPECT_EQ( + 1, test_server_->counter("http.config_test.passthrough_internal_redirect_too_many_redirects") + ->value()); } TEST_P(RedirectIntegrationTest, InternalRedirectToDestinationWithBody) { @@ -203,23 +213,197 @@ TEST_P(RedirectIntegrationTest, InternalRedirectToDestinationWithBody) { waitForNextUpstreamRequest(); ASSERT(upstream_request_->headers().EnvoyOriginalUrl() != nullptr); EXPECT_EQ("http://handle.internal.redirect/test/long/url", - upstream_request_->headers().EnvoyOriginalUrl()->value().getStringView()); - EXPECT_EQ("/new/url", upstream_request_->headers().Path()->value().getStringView()); - EXPECT_EQ("authority2", upstream_request_->headers().Host()->value().getStringView()); - EXPECT_EQ("via_value", upstream_request_->headers().Via()->value().getStringView()); + upstream_request_->headers().getEnvoyOriginalUrlValue()); + EXPECT_EQ("/new/url", upstream_request_->headers().getPathValue()); + EXPECT_EQ("authority2", upstream_request_->headers().getHostValue()); + EXPECT_EQ("via_value", upstream_request_->headers().getViaValue()); - Http::TestHeaderMapImpl response_with_big_body( + Http::TestResponseHeaderMapImpl response_with_big_body( {{":status", "200"}, {"content-length", "2000000"}}); upstream_request_->encodeHeaders(response_with_big_body, false); upstream_request_->encodeData(2000000, true); response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_succeeded_total") ->value()); } +TEST_P(RedirectIntegrationTest, InternalRedirectPreventedByPreviousRoutesPredicate) { + auto handle_prevent_repeated_target = + config_helper_.createVirtualHost("handle.internal.redirect.no.repeated.target"); + auto* internal_redirect_policy = handle_prevent_repeated_target.mutable_routes(0) + ->mutable_route() + ->mutable_internal_redirect_policy(); + internal_redirect_policy->mutable_max_internal_redirects()->set_value(10); + envoy::extensions::internal_redirect::previous_routes::v3::PreviousRoutesConfig + previous_routes_config; + auto* predicate = internal_redirect_policy->add_predicates(); + predicate->set_name("previous_routes"); + predicate->mutable_typed_config()->PackFrom(previous_routes_config); + config_helper_.addVirtualHost(handle_prevent_repeated_target); + + // Validate that header sanitization is only called once. + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.set_via("via_value"); }); + initialize(); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + default_request_headers_.setHost("handle.internal.redirect.no.repeated.target"); + IntegrationStreamDecoderPtr response = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + + auto first_request = waitForNextStream(); + // Redirect to another route + redirect_response_.setLocation("http://handle.internal.redirect.max.three.hop/random/path"); + first_request->encodeHeaders(redirect_response_, true); + + auto second_request = waitForNextStream(); + // Redirect back to the original route. + redirect_response_.setLocation("http://handle.internal.redirect.no.repeated.target/another/path"); + second_request->encodeHeaders(redirect_response_, true); + + auto third_request = waitForNextStream(); + // Redirect to the same route as the first redirect. This should fail. + redirect_response_.setLocation("http://handle.internal.redirect.max.three.hop/yet/another/path"); + third_request->encodeHeaders(redirect_response_, true); + + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("302", response->headers().getStatusValue()); + EXPECT_EQ("http://handle.internal.redirect.max.three.hop/yet/another/path", + response->headers().getLocationValue()); + EXPECT_EQ(2, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_succeeded_total") + ->value()); + EXPECT_EQ( + 1, + test_server_->counter("http.config_test.passthrough_internal_redirect_predicate")->value()); +} + +TEST_P(RedirectIntegrationTest, InternalRedirectPreventedByAllowListedRoutesPredicate) { + auto handle_allow_listed_redirect_route = + config_helper_.createVirtualHost("handle.internal.redirect.only.allow.listed.target"); + auto* internal_redirect_policy = handle_allow_listed_redirect_route.mutable_routes(0) + ->mutable_route() + ->mutable_internal_redirect_policy(); + + auto* allow_listed_routes_predicate = internal_redirect_policy->add_predicates(); + allow_listed_routes_predicate->set_name("allow_listed_routes"); + envoy::extensions::internal_redirect::allow_listed_routes::v3::AllowListedRoutesConfig + allow_listed_routes_config; + *allow_listed_routes_config.add_allowed_route_names() = "max_three_hop"; + allow_listed_routes_predicate->mutable_typed_config()->PackFrom(allow_listed_routes_config); + + internal_redirect_policy->mutable_max_internal_redirects()->set_value(10); + + config_helper_.addVirtualHost(handle_allow_listed_redirect_route); + + // Validate that header sanitization is only called once. + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.set_via("via_value"); }); + initialize(); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + default_request_headers_.setHost("handle.internal.redirect.only.allow.listed.target"); + IntegrationStreamDecoderPtr response = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + + auto first_request = waitForNextStream(); + // Redirect to another route + redirect_response_.setLocation("http://handle.internal.redirect.max.three.hop/random/path"); + first_request->encodeHeaders(redirect_response_, true); + + auto second_request = waitForNextStream(); + // Redirect back to the original route. + redirect_response_.setLocation( + "http://handle.internal.redirect.only.allow.listed.target/another/path"); + second_request->encodeHeaders(redirect_response_, true); + + auto third_request = waitForNextStream(); + // Redirect to the non-allow-listed route. This should fail. + redirect_response_.setLocation("http://handle.internal.redirect/yet/another/path"); + third_request->encodeHeaders(redirect_response_, true); + + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("302", response->headers().getStatusValue()); + EXPECT_EQ("http://handle.internal.redirect/yet/another/path", + response->headers().getLocationValue()); + EXPECT_EQ(2, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_succeeded_total") + ->value()); + EXPECT_EQ( + 1, + test_server_->counter("http.config_test.passthrough_internal_redirect_predicate")->value()); +} + +TEST_P(RedirectIntegrationTest, InternalRedirectPreventedBySafeCrossSchemePredicate) { + auto handle_safe_cross_scheme_route = config_helper_.createVirtualHost( + "handle.internal.redirect.only.allow.safe.cross.scheme.redirect"); + auto* internal_redirect_policy = handle_safe_cross_scheme_route.mutable_routes(0) + ->mutable_route() + ->mutable_internal_redirect_policy(); + + internal_redirect_policy->set_allow_cross_scheme_redirect(true); + + auto* predicate = internal_redirect_policy->add_predicates(); + predicate->set_name("safe_cross_scheme_predicate"); + envoy::extensions::internal_redirect::safe_cross_scheme::v3::SafeCrossSchemeConfig + predicate_config; + predicate->mutable_typed_config()->PackFrom(predicate_config); + + internal_redirect_policy->mutable_max_internal_redirects()->set_value(10); + + config_helper_.addVirtualHost(handle_safe_cross_scheme_route); + + // Validate that header sanitization is only called once. + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.set_via("via_value"); }); + initialize(); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + default_request_headers_.setHost( + "handle.internal.redirect.only.allow.safe.cross.scheme.redirect"); + IntegrationStreamDecoderPtr response = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + + auto first_request = waitForNextStream(); + // Redirect to another route + redirect_response_.setLocation("http://handle.internal.redirect.max.three.hop/random/path"); + first_request->encodeHeaders(redirect_response_, true); + + auto second_request = waitForNextStream(); + // Redirect back to the original route. + redirect_response_.setLocation( + "http://handle.internal.redirect.only.allow.safe.cross.scheme.redirect/another/path"); + second_request->encodeHeaders(redirect_response_, true); + + auto third_request = waitForNextStream(); + // Redirect to https target. This should fail. + redirect_response_.setLocation("https://handle.internal.redirect/yet/another/path"); + third_request->encodeHeaders(redirect_response_, true); + + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("302", response->headers().getStatusValue()); + EXPECT_EQ("https://handle.internal.redirect/yet/another/path", + response->headers().getLocationValue()); + EXPECT_EQ(2, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_succeeded_total") + ->value()); + EXPECT_EQ( + 1, + test_server_->counter("http.config_test.passthrough_internal_redirect_predicate")->value()); +} + TEST_P(RedirectIntegrationTest, InvalidRedirect) { initialize(); @@ -230,7 +414,7 @@ TEST_P(RedirectIntegrationTest, InvalidRedirect) { codec_client_ = makeHttpConnection(lookupPort("http")); default_request_headers_.setHost("handle.internal.redirect"); auto response = sendRequestAndWaitForResponse(default_request_headers_, 0, redirect_response_, 0); - EXPECT_EQ("302", response->headers().Status()->value().getStringView()); + EXPECT_EQ("302", response->headers().getStatusValue()); EXPECT_EQ( 1, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_failed_total")->value()); diff --git a/test/integration/rtds_integration_test.cc b/test/integration/rtds_integration_test.cc index b68124c563db2..1fdd148dd8508 100644 --- a/test/integration/rtds_integration_test.cc +++ b/test/integration/rtds_integration_test.cc @@ -2,6 +2,7 @@ #include "test/common/grpc/grpc_client_integration.h" #include "test/integration/http_integration.h" +#include "test/test_common/utility.h" #include "gtest/gtest.h" @@ -15,6 +16,7 @@ std::string tdsBootstrapConfig(absl::string_view api_type) { static_resources: clusters: - name: dummy_cluster + http2_protocol_options: {{}} load_assignment: cluster_name: dummy_cluster endpoints: @@ -54,13 +56,13 @@ std::string tdsBootstrapConfig(absl::string_view api_type) { - name: some_admin_layer admin_layer: {{}} admin: - access_log_path: /dev/null + access_log_path: {} address: socket_address: address: 127.0.0.1 port_value: 0 )EOF", - api_type); + api_type, TestEnvironment::nullDevicePath()); } class RtdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public HttpIntegrationTest { @@ -74,11 +76,7 @@ class RtdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public H sotw_or_delta_ = sotwOrDelta(); } - void TearDown() override { - cleanUpXdsConnection(); - test_server_.reset(); - fake_upstreams_.clear(); - } + void TearDown() override { cleanUpXdsConnection(); } void initialize() override { // The tests infra expects the xDS server to be the second fake upstream, so @@ -86,32 +84,26 @@ class RtdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public H setUpstreamCount(1); setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); HttpIntegrationTest::initialize(); - // Initial RTDS connection. - createXdsConnection(); - AssertionResult result = xds_connection_->waitForNewStream(*dispatcher_, xds_stream_); - RELEASE_ASSERT(result, result.message()); - xds_stream_->startGrpcStream(); // Register admin port. registerTestServerPorts({}); initial_load_success_ = test_server_->counter("runtime.load_success")->value(); initial_keys_ = test_server_->gauge("runtime.num_keys")->value(); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); } void acceptXdsConnection() { - AssertionResult result = // xds_connection_ is filled with the new FakeHttpConnection. - fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, xds_connection_); - RELEASE_ASSERT(result, result.message()); - result = xds_connection_->waitForNewStream(*dispatcher_, xds_stream_); + // Initial RTDS connection. + createXdsConnection(); + AssertionResult result = xds_connection_->waitForNewStream(*dispatcher_, xds_stream_); RELEASE_ASSERT(result, result.message()); xds_stream_->startGrpcStream(); - fake_upstreams_[0]->set_allow_unexpected_disconnects(true); } std::string getRuntimeKey(const std::string& key) { auto response = IntegrationUtil::makeSingleRequest( lookupPort("admin"), "GET", "/runtime?format=json", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); Json::ObjectSharedPtr loader = TestEnvironment::jsonLoadFromString(response->body()); auto entries = loader->getObject("entries"); if (entries->hasObject(key)) { @@ -129,6 +121,7 @@ INSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDelta, RtdsIntegrationTest, TEST_P(RtdsIntegrationTest, RtdsReload) { initialize(); + acceptXdsConnection(); EXPECT_EQ("whatevs", getRuntimeKey("foo")); EXPECT_EQ("yar", getRuntimeKey("bar")); @@ -176,5 +169,63 @@ TEST_P(RtdsIntegrationTest, RtdsReload) { EXPECT_EQ(3, test_server_->gauge("runtime.num_layers")->value()); } +// Verify that RTDS initialization starts only after initialization of all primary clusters has +// completed. Primary cluster initialization completes asynchronously when some of the clusters use +// DNS for endpoint discovery or when health check is configured. +// This test uses health checking of the first cluster to make primary cluster initialization to +// complete asynchronously. +TEST_P(RtdsIntegrationTest, RtdsAfterAsyncPrimaryClusterInitialization) { + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + // Enable health checking for the first cluster. + auto* dummy_cluster = bootstrap.mutable_static_resources()->mutable_clusters(0); + auto* health_check = dummy_cluster->add_health_checks(); + health_check->mutable_timeout()->set_seconds(30); + health_check->mutable_interval()->CopyFrom( + Protobuf::util::TimeUtil::MillisecondsToDuration(100)); + health_check->mutable_no_traffic_interval()->CopyFrom( + Protobuf::util::TimeUtil::MillisecondsToDuration(100)); + health_check->mutable_unhealthy_threshold()->set_value(1); + health_check->mutable_healthy_threshold()->set_value(1); + health_check->mutable_http_health_check()->set_path("/healthcheck"); + health_check->mutable_http_health_check()->set_codec_client_type( + envoy::type::v3::CodecClientType::HTTP2); + }); + + initialize(); + + // Make sure statically provisioned runtime values were loaded. + EXPECT_EQ("whatevs", getRuntimeKey("foo")); + EXPECT_EQ("yar", getRuntimeKey("bar")); + EXPECT_EQ("", getRuntimeKey("baz")); + + // Respond to the initial health check, which should complete initialization of primary clusters. + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); + test_server_->waitForGaugeEq("cluster.dummy_cluster.membership_healthy", 1); + + // After this xDS connection should be established. Verify that dynamic runtime values are loaded. + acceptXdsConnection(); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Runtime, "", {"some_rtds_layer"}, + {"some_rtds_layer"}, {}, true)); + auto some_rtds_layer = TestUtility::parseYaml(R"EOF( + name: some_rtds_layer + layer: + foo: bar + baz: meh + )EOF"); + sendDiscoveryResponse( + Config::TypeUrl::get().Runtime, {some_rtds_layer}, {some_rtds_layer}, {}, "1"); + test_server_->waitForCounterGe("runtime.load_success", initial_load_success_ + 1); + + EXPECT_EQ("bar", getRuntimeKey("foo")); + EXPECT_EQ("yar", getRuntimeKey("bar")); + EXPECT_EQ("meh", getRuntimeKey("baz")); + + EXPECT_EQ(0, test_server_->counter("runtime.load_error")->value()); + EXPECT_EQ(initial_load_success_ + 1, test_server_->counter("runtime.load_success")->value()); + EXPECT_EQ(initial_keys_ + 1, test_server_->gauge("runtime.num_keys")->value()); + EXPECT_EQ(3, test_server_->gauge("runtime.num_layers")->value()); +} + } // namespace } // namespace Envoy diff --git a/test/integration/run_envoy_test.sh b/test/integration/run_envoy_test.sh index a84ec07be4d05..4c9c21d73d344 100755 --- a/test/integration/run_envoy_test.sh +++ b/test/integration/run_envoy_test.sh @@ -1,5 +1,6 @@ #!/bin/bash +export ENVOY_BIN="${TEST_SRCDIR}/envoy/test/integration/hotrestart_main" source "${TEST_SRCDIR}/envoy/test/integration/test_utility.sh" function expect_fail_with_error() { @@ -7,8 +8,8 @@ function expect_fail_with_error() { rm -f "$log" expected_error="$1" shift - echo ${ENVOY_BIN} "$@" ">&" "$log" - ${ENVOY_BIN} "$@" >& "$log" + echo ${ENVOY_BIN} --use-dynamic-base-id "$@" ">&" "$log" + ${ENVOY_BIN} --use-dynamic-base-id "$@" >& "$log" EXIT_CODE=$? cat "$log" check [ $EXIT_CODE -eq 1 ] diff --git a/test/integration/scoped_rds_integration_test.cc b/test/integration/scoped_rds_integration_test.cc index 20688daf5ac63..59dc3069d32d6 100644 --- a/test/integration/scoped_rds_integration_test.cc +++ b/test/integration/scoped_rds_integration_test.cc @@ -32,10 +32,7 @@ class ScopedRdsIntegrationTest : public HttpIntegrationTest, ScopedRdsIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ipVersion(), realTime()) {} - ~ScopedRdsIntegrationTest() override { - resetConnections(); - cleanupUpstreamAndDownstream(); - } + ~ScopedRdsIntegrationTest() override { resetConnections(); } void initialize() override { // Setup two upstream hosts, one for each cluster. @@ -278,7 +275,7 @@ route_configuration_name: {} {":scheme", "http"}, {"Addr", "x-foo-key=xyz-route"}}); response->waitForEndStream(); - verifyResponse(std::move(response), "404", Http::TestHeaderMapImpl{}, ""); + verifyResponse(std::move(response), "404", Http::TestResponseHeaderMapImpl{}, ""); cleanupUpstreamAndDownstream(); // Test "foo-route" and 'bar-route' both gets routed to cluster_0. @@ -349,7 +346,7 @@ route_configuration_name: {} {":scheme", "http"}, {"Addr", "x-foo-key=foo-route"}}); response->waitForEndStream(); - verifyResponse(std::move(response), "404", Http::TestHeaderMapImpl{}, ""); + verifyResponse(std::move(response), "404", Http::TestResponseHeaderMapImpl{}, ""); cleanupUpstreamAndDownstream(); // Add a new scope foo_scope4. const std::string& scope_route4 = @@ -366,7 +363,7 @@ route_configuration_name: {} response->waitForEndStream(); // Get 404 because RDS hasn't pushed route configuration "foo_route4" yet. // But scope is found and the Router::NullConfigImpl is returned. - verifyResponse(std::move(response), "404", Http::TestHeaderMapImpl{}, ""); + verifyResponse(std::move(response), "404", Http::TestResponseHeaderMapImpl{}, ""); cleanupUpstreamAndDownstream(); // RDS updated foo_route4, requests with scope key "xyz-route" now hit cluster_1. @@ -410,7 +407,7 @@ route_configuration_name: foo_route1 {":scheme", "http"}, {"Addr", "x-foo-key=foo"}}); response->waitForEndStream(); - verifyResponse(std::move(response), "404", Http::TestHeaderMapImpl{}, ""); + verifyResponse(std::move(response), "404", Http::TestResponseHeaderMapImpl{}, ""); cleanupUpstreamAndDownstream(); // SRDS update fixed the problem. diff --git a/test/integration/sds_dynamic_integration_test.cc b/test/integration/sds_dynamic_integration_test.cc index 9a384a6e98ec5..f7aa58316a102 100644 --- a/test/integration/sds_dynamic_integration_test.cc +++ b/test/integration/sds_dynamic_integration_test.cc @@ -9,6 +9,7 @@ #include "common/config/api_version.h" #include "common/event/dispatcher_impl.h" +#include "common/http/utility.h" #include "common/network/connection_impl.h" #include "common/network/utility.h" @@ -22,7 +23,6 @@ #include "test/integration/server.h" #include "test/integration/ssl_utility.h" #include "test/mocks/secret/mocks.h" -#include "test/mocks/server/mocks.h" #include "test/test_common/network_utility.h" #include "test/test_common/resources.h" #include "test/test_common/test_time_system.h" @@ -151,7 +151,7 @@ class SdsDynamicDownstreamIntegrationTest : public SdsDynamicIntegrationBaseTest ->mutable_listeners(0) ->mutable_filter_chains(0) ->mutable_transport_socket(); - common_tls_context->add_alpn_protocols("http/1.1"); + common_tls_context->add_alpn_protocols(Http::Utility::AlpnNames::get().Http11); auto* validation_context = common_tls_context->mutable_validation_context(); validation_context->mutable_trusted_ca()->set_filename( @@ -183,10 +183,7 @@ class SdsDynamicDownstreamIntegrationTest : public SdsDynamicIntegrationBaseTest void TearDown() override { cleanUpXdsConnection(); - client_ssl_ctx_.reset(); - cleanupUpstreamAndDownstream(); - codec_client_.reset(); } Network::ClientConnectionPtr makeSslClientConnection() { @@ -228,7 +225,7 @@ TEST_P(SdsDynamicDownstreamIntegrationTest, WrongSecretFirst) { }; initialize(); - codec_client_ = makeRawHttpConnection(makeSslClientConnection()); + codec_client_ = makeRawHttpConnection(makeSslClientConnection(), absl::nullopt); // the connection state is not connected. EXPECT_FALSE(codec_client_->connected()); codec_client_->connection()->close(Network::ConnectionCloseType::NoFlush); @@ -257,7 +254,7 @@ class SdsDynamicDownstreamCertValidationContextTest : public SdsDynamicDownstrea ->mutable_transport_socket(); envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; auto* common_tls_context = tls_context.mutable_common_tls_context(); - common_tls_context->add_alpn_protocols("http/1.1"); + common_tls_context->add_alpn_protocols(Http::Utility::AlpnNames::get().Http11); auto* tls_certificate = common_tls_context->add_tls_certificates(); tls_certificate->mutable_certificate_chain()->set_filename( @@ -346,9 +343,6 @@ class SdsDynamicDownstreamCertValidationContextTest : public SdsDynamicDownstrea client_ssl_ctx_.reset(); cleanupUpstreamAndDownstream(); codec_client_.reset(); - - test_server_.reset(); - fake_upstreams_.clear(); } void enableCombinedValidationContext(bool enable) { use_combined_validation_context_ = enable; } @@ -529,7 +523,7 @@ TEST_P(SdsDynamicUpstreamIntegrationTest, WrongSecretFirst) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "GET", "/test/long/url", "", downstream_protocol_, version_); ASSERT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); // To flush out the reset connection from the first request in upstream. FakeRawConnectionPtr fake_upstream_connection; diff --git a/test/integration/sds_generic_secret_integration_test.cc b/test/integration/sds_generic_secret_integration_test.cc index 5e68cdcdc7d21..719bc0cc4f393 100644 --- a/test/integration/sds_generic_secret_integration_test.cc +++ b/test/integration/sds_generic_secret_integration_test.cc @@ -104,11 +104,7 @@ class SdsGenericSecretIntegrationTest : public Grpc::GrpcClientIntegrationParamT HttpIntegrationTest::initialize(); } - void TearDown() override { - cleanUpXdsConnection(); - cleanupUpstreamAndDownstream(); - codec_client_.reset(); - } + void TearDown() override { cleanUpXdsConnection(); } void createSdsStream() { createXdsConnection(); diff --git a/test/integration/sds_static_integration_test.cc b/test/integration/sds_static_integration_test.cc index f0a92b8e19c74..da9e77c6e23fc 100644 --- a/test/integration/sds_static_integration_test.cc +++ b/test/integration/sds_static_integration_test.cc @@ -17,7 +17,6 @@ #include "test/integration/server.h" #include "test/integration/ssl_utility.h" #include "test/mocks/secret/mocks.h" -#include "test/mocks/server/mocks.h" #include "test/test_common/network_utility.h" #include "test/test_common/test_time_system.h" #include "test/test_common/utility.h" @@ -46,7 +45,7 @@ class SdsStaticDownstreamIntegrationTest ->mutable_transport_socket(); envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; auto* common_tls_context = tls_context.mutable_common_tls_context(); - common_tls_context->add_alpn_protocols("http/1.1"); + common_tls_context->add_alpn_protocols(Http::Utility::AlpnNames::get().Http11); common_tls_context->mutable_validation_context_sds_secret_config()->set_name( "validation_context"); diff --git a/test/integration/server.cc b/test/integration/server.cc index 8bc2f9d96cd9f..1b89ed3b995b6 100644 --- a/test/integration/server.cc +++ b/test/integration/server.cc @@ -5,6 +5,7 @@ #include "envoy/http/header_map.h" +#include "common/common/random_generator.h" #include "common/common/thread.h" #include "common/local_info/local_info_impl.h" #include "common/network/utility.h" @@ -20,7 +21,6 @@ #include "test/integration/integration.h" #include "test/integration/utility.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" #include "test/test_common/environment.h" #include "absl/strings/str_replace.h" @@ -31,19 +31,23 @@ namespace Server { OptionsImpl createTestOptionsImpl(const std::string& config_path, const std::string& config_yaml, Network::Address::IpVersion ip_version, - bool allow_unknown_static_fields, - bool reject_unknown_dynamic_fields, uint32_t concurrency) { + FieldValidationConfig validation_config, uint32_t concurrency, + std::chrono::seconds drain_time, + Server::DrainStrategy drain_strategy) { OptionsImpl test_options("cluster_name", "node_name", "zone_name", spdlog::level::info); test_options.setConfigPath(config_path); test_options.setConfigYaml(config_yaml); test_options.setLocalAddressIpVersion(ip_version); test_options.setFileFlushIntervalMsec(std::chrono::milliseconds(50)); - test_options.setDrainTime(std::chrono::seconds(1)); + test_options.setDrainTime(drain_time); test_options.setParentShutdownTime(std::chrono::seconds(2)); - test_options.setAllowUnkownFields(allow_unknown_static_fields); - test_options.setRejectUnknownFieldsDynamic(reject_unknown_dynamic_fields); + test_options.setDrainStrategy(drain_strategy); + test_options.setAllowUnkownFields(validation_config.allow_unknown_static_fields); + test_options.setRejectUnknownFieldsDynamic(validation_config.reject_unknown_dynamic_fields); + test_options.setIgnoreUnknownFieldsDynamic(validation_config.ignore_unknown_dynamic_fields); test_options.setConcurrency(concurrency); + test_options.setHotRestartDisabled(true); return test_options; } @@ -55,16 +59,16 @@ IntegrationTestServerPtr IntegrationTestServer::create( std::function server_ready_function, std::function on_server_init_function, bool deterministic, Event::TestTimeSystem& time_system, Api::Api& api, bool defer_listener_finalization, - ProcessObjectOptRef process_object, bool allow_unknown_static_fields, - bool reject_unknown_dynamic_fields, uint32_t concurrency) { + ProcessObjectOptRef process_object, Server::FieldValidationConfig validation_config, + uint32_t concurrency, std::chrono::seconds drain_time, Server::DrainStrategy drain_strategy, + bool use_real_stats) { IntegrationTestServerPtr server{ - std::make_unique(time_system, api, config_path)}; + std::make_unique(time_system, api, config_path, use_real_stats)}; if (server_ready_function != nullptr) { server->setOnServerReadyCb(server_ready_function); } server->start(version, on_server_init_function, deterministic, defer_listener_finalization, - process_object, allow_unknown_static_fields, reject_unknown_dynamic_fields, - concurrency); + process_object, validation_config, concurrency, drain_time, drain_strategy); return server; } @@ -82,15 +86,16 @@ void IntegrationTestServer::start(const Network::Address::IpVersion version, std::function on_server_init_function, bool deterministic, bool defer_listener_finalization, ProcessObjectOptRef process_object, - bool allow_unknown_static_fields, - bool reject_unknown_dynamic_fields, uint32_t concurrency) { + Server::FieldValidationConfig validator_config, + uint32_t concurrency, std::chrono::seconds drain_time, + Server::DrainStrategy drain_strategy) { ENVOY_LOG(info, "starting integration test server"); ASSERT(!thread_); - thread_ = api_.threadFactory().createThread( - [version, deterministic, process_object, allow_unknown_static_fields, - reject_unknown_dynamic_fields, concurrency, this]() -> void { - threadRoutine(version, deterministic, process_object, allow_unknown_static_fields, - reject_unknown_dynamic_fields, concurrency); + thread_ = + api_.threadFactory().createThread([version, deterministic, process_object, validator_config, + concurrency, drain_time, drain_strategy, this]() -> void { + threadRoutine(version, deterministic, process_object, validator_config, concurrency, + drain_time, drain_strategy); }); // If any steps need to be done prior to workers starting, do them now. E.g., xDS pre-init. @@ -165,36 +170,43 @@ void IntegrationTestServer::serverReady() { void IntegrationTestServer::threadRoutine(const Network::Address::IpVersion version, bool deterministic, ProcessObjectOptRef process_object, - bool allow_unknown_static_fields, - bool reject_unknown_dynamic_fields, - uint32_t concurrency) { - OptionsImpl options(Server::createTestOptionsImpl(config_path_, "", version, - allow_unknown_static_fields, - reject_unknown_dynamic_fields, concurrency)); + Server::FieldValidationConfig validation_config, + uint32_t concurrency, std::chrono::seconds drain_time, + Server::DrainStrategy drain_strategy) { + OptionsImpl options(Server::createTestOptionsImpl(config_path_, "", version, validation_config, + concurrency, drain_time, drain_strategy)); Thread::MutexBasicLockable lock; - Runtime::RandomGeneratorPtr random_generator; + Random::RandomGeneratorPtr random_generator; if (deterministic) { - random_generator = std::make_unique>(); + random_generator = std::make_unique>(); } else { - random_generator = std::make_unique(); + random_generator = std::make_unique(); } createAndRunEnvoyServer(options, time_system_, Network::Utility::getLocalAddress(version), *this, lock, *this, std::move(random_generator), process_object); } +IntegrationTestServerImpl::IntegrationTestServerImpl(Event::TestTimeSystem& time_system, + Api::Api& api, const std::string& config_path, + bool use_real_stats) + : IntegrationTestServer(time_system, api, config_path), + symbol_table_(Stats::SymbolTableCreator::makeSymbolTable()) { + stats_allocator_ = + (use_real_stats ? std::make_unique(*symbol_table_) + : std::make_unique(*symbol_table_)); +} + void IntegrationTestServerImpl::createAndRunEnvoyServer( OptionsImpl& options, Event::TimeSystem& time_system, Network::Address::InstanceConstSharedPtr local_address, ListenerHooks& hooks, Thread::BasicLockable& access_log_lock, Server::ComponentFactory& component_factory, - Runtime::RandomGeneratorPtr&& random_generator, ProcessObjectOptRef process_object) { + Random::RandomGeneratorPtr&& random_generator, ProcessObjectOptRef process_object) { { Init::ManagerImpl init_manager{"Server"}; - Stats::SymbolTablePtr symbol_table = Stats::SymbolTableCreator::makeSymbolTable(); Server::HotRestartNopImpl restarter; ThreadLocal::InstanceImpl tls; - Stats::AllocatorImpl stats_allocator(*symbol_table); - Stats::ThreadLocalStoreImpl stat_store(stats_allocator); + Stats::ThreadLocalStoreImpl stat_store(*stats_allocator_); std::unique_ptr process_context; if (process_object.has_value()) { process_context = std::make_unique(process_object->get()); @@ -218,18 +230,25 @@ void IntegrationTestServerImpl::createAndRunEnvoyServer( IntegrationTestServerImpl::~IntegrationTestServerImpl() { ENVOY_LOG(info, "stopping integration test server"); - Network::Address::InstanceConstSharedPtr admin_address(admin_address_); - admin_address_ = nullptr; + if (useAdminInterfaceToQuit()) { + Network::Address::InstanceConstSharedPtr admin_address(admin_address_); + if (admin_address != nullptr) { + BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( + admin_address, "POST", "/quitquitquit", "", Http::CodecClient::Type::HTTP1); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + server_gone_.WaitForNotification(); + } + } else { + if (server_) { + server_->dispatcher().post([this]() { server_->shutdown(); }); + server_gone_.WaitForNotification(); + } + } + server_ = nullptr; + admin_address_ = nullptr; stat_store_ = nullptr; - - if (admin_address != nullptr) { - BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( - admin_address, "POST", "/quitquitquit", "", Http::CodecClient::Type::HTTP1); - EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); - server_gone_.WaitForNotification(); - } } } // namespace Envoy diff --git a/test/integration/server.h b/test/integration/server.h index 22f80a09b57b2..4dc9a3ee21ea7 100644 --- a/test/integration/server.h +++ b/test/integration/server.h @@ -6,6 +6,7 @@ #include #include +#include "envoy/config/listener/v3/listener.pb.h" #include "envoy/server/options.h" #include "envoy/server/process_context.h" #include "envoy/stats/stats.h" @@ -14,7 +15,9 @@ #include "common/common/lock_guard.h" #include "common/common/logger.h" #include "common/common/thread.h" +#include "common/stats/allocator_impl.h" +#include "server/drain_manager_impl.h" #include "server/listener_hooks.h" #include "server/options_impl.h" #include "server/server.h" @@ -30,27 +33,26 @@ namespace Envoy { namespace Server { -// Create OptionsImpl structures suitable for tests. -OptionsImpl createTestOptionsImpl(const std::string& config_path, const std::string& config_yaml, - Network::Address::IpVersion ip_version, - bool allow_unknown_static_fields = false, - bool reject_unknown_dynamic_fields = false, - uint32_t concurrency = 1); - -class TestDrainManager : public DrainManager { -public: - // Server::DrainManager - bool drainClose() const override { return draining_; } - void startDrainSequence(std::function) override {} - void startParentShutdownSequence() override {} - - bool draining_{}; +struct FieldValidationConfig { + bool allow_unknown_static_fields = false; + bool reject_unknown_dynamic_fields = false; + bool ignore_unknown_dynamic_fields = false; }; +// Create OptionsImpl structures suitable for tests. Disables hot restart. +OptionsImpl +createTestOptionsImpl(const std::string& config_path, const std::string& config_yaml, + Network::Address::IpVersion ip_version, + FieldValidationConfig validation_config = FieldValidationConfig(), + uint32_t concurrency = 1, + std::chrono::seconds drain_time = std::chrono::seconds(1), + Server::DrainStrategy drain_strategy = Server::DrainStrategy::Gradual); + class TestComponentFactory : public ComponentFactory { public: - Server::DrainManagerPtr createDrainManager(Server::Instance&) override { - return Server::DrainManagerPtr{new Server::TestDrainManager()}; + Server::DrainManagerPtr createDrainManager(Server::Instance& server) override { + return Server::DrainManagerPtr{ + new Server::DrainManagerImpl(server, envoy::config::listener::v3::Listener::MODIFY_ONLY)}; } Runtime::LoaderPtr createRuntime(Server::Instance& server, Server::Configuration::Initial& config) override { @@ -148,11 +150,121 @@ class TestScopeWrapper : public Scope { } SymbolTable& symbolTable() override { return wrapped_scope_->symbolTable(); } + bool iterate(const IterateFn& fn) const override { return wrapped_scope_->iterate(fn); } + bool iterate(const IterateFn& fn) const override { return wrapped_scope_->iterate(fn); } + bool iterate(const IterateFn& fn) const override { + return wrapped_scope_->iterate(fn); + } + bool iterate(const IterateFn& fn) const override { + return wrapped_scope_->iterate(fn); + } + private: Thread::MutexBasicLockable& lock_; ScopePtr wrapped_scope_; }; +// A counter which signals on a condition variable when it is incremented. +class NotifyingCounter : public Stats::Counter { +public: + NotifyingCounter(Stats::Counter* counter, absl::Mutex& mutex, absl::CondVar& condvar) + : counter_(counter), mutex_(mutex), condvar_(condvar) {} + + std::string name() const override { return counter_->name(); } + StatName statName() const override { return counter_->statName(); } + TagVector tags() const override { return counter_->tags(); } + std::string tagExtractedName() const override { return counter_->tagExtractedName(); } + void iterateTagStatNames(const TagStatNameIterFn& fn) const override { + counter_->iterateTagStatNames(fn); + } + void add(uint64_t amount) override { + counter_->add(amount); + absl::MutexLock l(&mutex_); + condvar_.Signal(); + } + void inc() override { add(1); } + uint64_t latch() override { return counter_->latch(); } + void reset() override { return counter_->reset(); } + uint64_t value() const override { return counter_->value(); } + void incRefCount() override { counter_->incRefCount(); } + bool decRefCount() override { return counter_->decRefCount(); } + uint32_t use_count() const override { return counter_->use_count(); } + StatName tagExtractedStatName() const override { return counter_->tagExtractedStatName(); } + bool used() const override { return counter_->used(); } + SymbolTable& symbolTable() override { return counter_->symbolTable(); } + const SymbolTable& constSymbolTable() const override { return counter_->constSymbolTable(); } + +private: + std::unique_ptr counter_; + absl::Mutex& mutex_; + absl::CondVar& condvar_; +}; + +// A stats allocator which creates NotifyingCounters rather than regular CounterImpls. +class NotifyingAllocatorImpl : public Stats::AllocatorImpl { +public: + using Stats::AllocatorImpl::AllocatorImpl; + + void waitForCounterFromStringEq(const std::string& name, uint64_t value) { + absl::MutexLock l(&mutex_); + ENVOY_LOG_MISC(trace, "waiting for {} to be {}", name, value); + while (getCounterLockHeld(name) == nullptr || getCounterLockHeld(name)->value() != value) { + condvar_.Wait(&mutex_); + } + ENVOY_LOG_MISC(trace, "done waiting for {} to be {}", name, value); + } + + void waitForCounterFromStringGe(const std::string& name, uint64_t value) { + absl::MutexLock l(&mutex_); + ENVOY_LOG_MISC(trace, "waiting for {} to be {}", name, value); + while (getCounterLockHeld(name) == nullptr || getCounterLockHeld(name)->value() < value) { + condvar_.Wait(&mutex_); + } + ENVOY_LOG_MISC(trace, "done waiting for {} to be {}", name, value); + } + + void waitForCounterExists(const std::string& name) { + absl::MutexLock l(&mutex_); + ENVOY_LOG_MISC(trace, "waiting for {} to exist", name); + while (getCounterLockHeld(name) == nullptr) { + condvar_.Wait(&mutex_); + } + ENVOY_LOG_MISC(trace, "done waiting for {} to exist", name); + } + +protected: + Stats::Counter* makeCounterInternal(StatName name, StatName tag_extracted_name, + const StatNameTagVector& stat_name_tags) override { + Stats::Counter* counter = new NotifyingCounter( + Stats::AllocatorImpl::makeCounterInternal(name, tag_extracted_name, stat_name_tags), mutex_, + condvar_); + { + absl::MutexLock l(&mutex_); + // Allow getting the counter directly from the allocator, since it's harder to + // signal when the counter has been added to a given stats store. + counters_.emplace(counter->name(), counter); + if (counter->name() == "cluster_manager.cluster_removed") { + } + condvar_.Signal(); + } + return counter; + } + + virtual Stats::Counter* getCounterLockHeld(const std::string& name) + ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_) { + auto it = counters_.find(name); + if (it != counters_.end()) { + return it->second; + } + return nullptr; + } + +private: + absl::flat_hash_map counters_; + absl::Mutex mutex_; + absl::CondVar condvar_; +}; + /** * This is a variant of the isolated store that has locking across all operations so that it can * be used during the integration tests. @@ -239,10 +351,16 @@ class TestIsolatedStoreImpl : public StoreRoot { return store_.textReadouts(); } + bool iterate(const IterateFn& fn) const override { return store_.iterate(fn); } + bool iterate(const IterateFn& fn) const override { return store_.iterate(fn); } + bool iterate(const IterateFn& fn) const override { return store_.iterate(fn); } + bool iterate(const IterateFn& fn) const override { return store_.iterate(fn); } + // Stats::StoreRoot void addSink(Sink&) override {} void setTagProducer(TagProducerPtr&&) override {} void setStatsMatcher(StatsMatcherPtr&&) override {} + void setHistogramSettings(HistogramSettingsConstPtr&&) override {} void initializeThreading(Event::Dispatcher&, ThreadLocal::Instance&) override {} void shutdownThreading() override {} void mergeHistograms(PostMergeCb) override {} @@ -268,20 +386,24 @@ class IntegrationTestServer : public Logger::Loggable, public IntegrationTestServerStats, public Server::ComponentFactory { public: - static IntegrationTestServerPtr create( - const std::string& config_path, const Network::Address::IpVersion version, - std::function on_server_ready_function, - std::function on_server_init_function, bool deterministic, - Event::TestTimeSystem& time_system, Api::Api& api, bool defer_listener_finalization = false, - ProcessObjectOptRef process_object = absl::nullopt, bool allow_unknown_static_fields = false, - bool reject_unknown_dynamic_fields = false, uint32_t concurrency = 1); + static IntegrationTestServerPtr + create(const std::string& config_path, const Network::Address::IpVersion version, + std::function on_server_ready_function, + std::function on_server_init_function, bool deterministic, + Event::TestTimeSystem& time_system, Api::Api& api, + bool defer_listener_finalization = false, + ProcessObjectOptRef process_object = absl::nullopt, + Server::FieldValidationConfig validation_config = Server::FieldValidationConfig(), + uint32_t concurrency = 1, std::chrono::seconds drain_time = std::chrono::seconds(1), + Server::DrainStrategy drain_strategy = Server::DrainStrategy::Gradual, + bool use_real_stats = false); // Note that the derived class is responsible for tearing down the server in its // destructor. ~IntegrationTestServer() override; void waitUntilListenersReady(); - Server::TestDrainManager& drainManager() { return *drain_manager_; } + Server::DrainManagerImpl& drainManager() { return *drain_manager_; } void setOnWorkerListenerAddedCb(std::function on_worker_listener_added) { on_worker_listener_added_cb_ = std::move(on_worker_listener_added); } @@ -296,48 +418,61 @@ class IntegrationTestServer : public Logger::Loggable, void start(const Network::Address::IpVersion version, std::function on_server_init_function, bool deterministic, bool defer_listener_finalization, ProcessObjectOptRef process_object, - bool allow_unknown_static_fields, bool reject_unknown_dynamic_fields, - uint32_t concurrency); + Server::FieldValidationConfig validation_config, uint32_t concurrency, + std::chrono::seconds drain_time, Server::DrainStrategy drain_strategy); - void waitForCounterEq(const std::string& name, uint64_t value) override { - TestUtility::waitForCounterEq(stat_store(), name, value, time_system_); + void + waitForCounterEq(const std::string& name, uint64_t value, + std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()) override { + ASSERT_TRUE(TestUtility::waitForCounterGe(statStore(), name, value, time_system_, timeout)); } - void waitForCounterGe(const std::string& name, uint64_t value) override { - TestUtility::waitForCounterGe(stat_store(), name, value, time_system_); + void + waitForCounterGe(const std::string& name, uint64_t value, + std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()) override { + ASSERT_TRUE(TestUtility::waitForCounterGe(statStore(), name, value, time_system_, timeout)); } - void waitForGaugeGe(const std::string& name, uint64_t value) override { - TestUtility::waitForGaugeGe(stat_store(), name, value, time_system_); + void + waitForGaugeEq(const std::string& name, uint64_t value, + std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()) override { + ASSERT_TRUE(TestUtility::waitForGaugeEq(statStore(), name, value, time_system_, timeout)); } - void waitForGaugeEq(const std::string& name, uint64_t value) override { - TestUtility::waitForGaugeEq(stat_store(), name, value, time_system_); + void + waitForGaugeGe(const std::string& name, uint64_t value, + std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()) override { + ASSERT_TRUE(TestUtility::waitForGaugeGe(statStore(), name, value, time_system_, timeout)); + } + + void waitForCounterExists(const std::string& name) override { + notifyingStatsAllocator().waitForCounterExists(name); } Stats::CounterSharedPtr counter(const std::string& name) override { // When using the thread local store, only counters() is thread safe. This also allows us // to test if a counter exists at all versus just defaulting to zero. - return TestUtility::findCounter(stat_store(), name); + return TestUtility::findCounter(statStore(), name); } Stats::GaugeSharedPtr gauge(const std::string& name) override { // When using the thread local store, only gauges() is thread safe. This also allows us // to test if a counter exists at all versus just defaulting to zero. - return TestUtility::findGauge(stat_store(), name); + return TestUtility::findGauge(statStore(), name); } - std::vector counters() override { return stat_store().counters(); } + std::vector counters() override { return statStore().counters(); } - std::vector gauges() override { return stat_store().gauges(); } + std::vector gauges() override { return statStore().gauges(); } // ListenerHooks void onWorkerListenerAdded() override; void onWorkerListenerRemoved() override; // Server::ComponentFactory - Server::DrainManagerPtr createDrainManager(Server::Instance&) override { - drain_manager_ = new Server::TestDrainManager(); + Server::DrainManagerPtr createDrainManager(Server::Instance& server) override { + drain_manager_ = + new Server::DrainManagerImpl(server, envoy::config::listener::v3::Listener::MODIFY_ONLY); return Server::DrainManagerPtr{drain_manager_}; } Runtime::LoaderPtr createRuntime(Server::Instance& server, @@ -347,8 +482,11 @@ class IntegrationTestServer : public Logger::Loggable, // Should not be called until createAndRunEnvoyServer() is called. virtual Server::Instance& server() PURE; - virtual Stats::Store& stat_store() PURE; - virtual Network::Address::InstanceConstSharedPtr admin_address() PURE; + virtual Stats::Store& statStore() PURE; + virtual Network::Address::InstanceConstSharedPtr adminAddress() PURE; + virtual Stats::NotifyingAllocatorImpl& notifyingStatsAllocator() PURE; + void useAdminInterfaceToQuit(bool use) { use_admin_interface_to_quit_ = use; } + bool useAdminInterfaceToQuit() { return use_admin_interface_to_quit_; } protected: IntegrationTestServer(Event::TestTimeSystem& time_system, Api::Api& api, @@ -356,18 +494,18 @@ class IntegrationTestServer : public Logger::Loggable, : time_system_(time_system), api_(api), config_path_(config_path) {} // Create the running envoy server. This function will call serverReady() when the virtual - // functions server(), stat_store(), and admin_address() may be called, but before the server + // functions server(), statStore(), and adminAddress() may be called, but before the server // has been started. // The subclass is also responsible for tearing down this server in its destructor. virtual void createAndRunEnvoyServer(OptionsImpl& options, Event::TimeSystem& time_system, Network::Address::InstanceConstSharedPtr local_address, ListenerHooks& hooks, Thread::BasicLockable& access_log_lock, Server::ComponentFactory& component_factory, - Runtime::RandomGeneratorPtr&& random_generator, + Random::RandomGeneratorPtr&& random_generator, ProcessObjectOptRef process_object) PURE; // Will be called by subclass on server thread when the server is ready to be accessed. The - // server may not have been run yet, but all server access methods (server(), stat_store(), + // server may not have been run yet, but all server access methods (server(), statStore(), // adminAddress()) will be available. void serverReady(); @@ -376,8 +514,9 @@ class IntegrationTestServer : public Logger::Loggable, * Runs the real server on a thread. */ void threadRoutine(const Network::Address::IpVersion version, bool deterministic, - ProcessObjectOptRef process_object, bool allow_unknown_static_fields, - bool reject_unknown_dynamic_fields, uint32_t concurrency); + ProcessObjectOptRef process_object, + Server::FieldValidationConfig validation_config, uint32_t concurrency, + std::chrono::seconds drain_time, Server::DrainStrategy drain_strategy); Event::TestTimeSystem& time_system_; Api::Api& api_; @@ -387,19 +526,19 @@ class IntegrationTestServer : public Logger::Loggable, Thread::MutexBasicLockable listeners_mutex_; uint64_t pending_listeners_; ConditionalInitializer server_set_; - Server::TestDrainManager* drain_manager_{}; + Server::DrainManagerImpl* drain_manager_{}; std::function on_worker_listener_added_cb_; std::function on_worker_listener_removed_cb_; TcpDumpPtr tcp_dump_; std::function on_server_ready_cb_; + bool use_admin_interface_to_quit_{}; }; // Default implementation of IntegrationTestServer class IntegrationTestServerImpl : public IntegrationTestServer { public: IntegrationTestServerImpl(Event::TestTimeSystem& time_system, Api::Api& api, - const std::string& config_path) - : IntegrationTestServer(time_system, api, config_path) {} + const std::string& config_path, bool real_stats = false); ~IntegrationTestServerImpl() override; @@ -407,18 +546,25 @@ class IntegrationTestServerImpl : public IntegrationTestServer { RELEASE_ASSERT(server_ != nullptr, ""); return *server_; } - Stats::Store& stat_store() override { + Stats::Store& statStore() override { RELEASE_ASSERT(stat_store_ != nullptr, ""); return *stat_store_; } - Network::Address::InstanceConstSharedPtr admin_address() override { return admin_address_; } + Network::Address::InstanceConstSharedPtr adminAddress() override { return admin_address_; } + + Stats::NotifyingAllocatorImpl& notifyingStatsAllocator() override { + auto* ret = dynamic_cast(stats_allocator_.get()); + RELEASE_ASSERT(ret != nullptr, + "notifyingStatsAllocator() is not created when real_stats is true"); + return *ret; + } private: void createAndRunEnvoyServer(OptionsImpl& options, Event::TimeSystem& time_system, Network::Address::InstanceConstSharedPtr local_address, ListenerHooks& hooks, Thread::BasicLockable& access_log_lock, Server::ComponentFactory& component_factory, - Runtime::RandomGeneratorPtr&& random_generator, + Random::RandomGeneratorPtr&& random_generator, ProcessObjectOptRef process_object) override; // Owned by this class. An owning pointer is not used because the actual allocation is done @@ -427,6 +573,8 @@ class IntegrationTestServerImpl : public IntegrationTestServer { Stats::Store* stat_store_{}; Network::Address::InstanceConstSharedPtr admin_address_; absl::Notification server_gone_; + Stats::SymbolTablePtr symbol_table_; + std::unique_ptr stats_allocator_; }; } // namespace Envoy diff --git a/test/integration/server_stats.h b/test/integration/server_stats.h index 859363aa0f110..c3ab300f05053 100644 --- a/test/integration/server_stats.h +++ b/test/integration/server_stats.h @@ -13,29 +13,47 @@ class IntegrationTestServerStats { * Wait for a counter to == a given value. * @param name counter name. * @param value target value. + * @param timeout amount of time to wait before asserting false, or 0 for no timeout. */ - virtual void waitForCounterEq(const std::string& name, uint64_t value) PURE; + virtual void + waitForCounterEq(const std::string& name, uint64_t value, + std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()) PURE; /** * Wait for a counter to >= a given value. * @param name counter name. * @param value target value. + * @param timeout amount of time to wait before asserting false, or 0 for no timeout. */ - virtual void waitForCounterGe(const std::string& name, uint64_t value) PURE; + virtual void + waitForCounterGe(const std::string& name, uint64_t value, + std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()) PURE; + + /** + * Wait for a counter to exist. + * @param name counter name. + */ + virtual void waitForCounterExists(const std::string& name) PURE; /** * Wait for a gauge to >= a given value. * @param name gauge name. * @param value target value. + * @param timeout amount of time to wait before asserting false, or 0 for no timeout. */ - virtual void waitForGaugeGe(const std::string& name, uint64_t value) PURE; + virtual void + waitForGaugeGe(const std::string& name, uint64_t value, + std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()) PURE; /** * Wait for a gauge to == a given value. * @param name gauge name. * @param value target value. + * @param timeout amount of time to wait before asserting false, or 0 for no timeout. */ - virtual void waitForGaugeEq(const std::string& name, uint64_t value) PURE; + virtual void + waitForGaugeEq(const std::string& name, uint64_t value, + std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()) PURE; /** * Counter lookup. This is not thread safe, since we don't get a consistent diff --git a/test/integration/socket_interface_integration_test.cc b/test/integration/socket_interface_integration_test.cc new file mode 100644 index 0000000000000..3e40a901d6c3f --- /dev/null +++ b/test/integration/socket_interface_integration_test.cc @@ -0,0 +1,90 @@ +#include "common/network/socket_interface.h" + +#include "test/integration/integration.h" +#include "test/test_common/environment.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace { + +class SocketInterfaceIntegrationTest : public BaseIntegrationTest, + public testing::TestWithParam { +public: + SocketInterfaceIntegrationTest() : BaseIntegrationTest(GetParam(), config()) { + use_lds_ = false; + }; + + static std::string config() { + // At least one empty filter chain needs to be specified. + return absl::StrCat(echoConfig(), R"EOF( +bootstrap_extensions: + - name: envoy.extensions.network.socket_interface.default_socket_interface + typed_config: + "@type": type.googleapis.com/envoy.extensions.network.socket_interface.v3.DefaultSocketInterface +default_socket_interface: "envoy.extensions.network.socket_interface.default_socket_interface" + )EOF"); + } + static std::string echoConfig() { + return absl::StrCat(ConfigHelper::baseConfig(), R"EOF( + filter_chains: + filters: + name: ratelimit + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.rate_limit.v2.RateLimit + domain: foo + stats_prefix: name + descriptors: [{"key": "foo", "value": "bar"}] + filters: + name: envoy.filters.network.echo + )EOF"); + } +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, SocketInterfaceIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +TEST_P(SocketInterfaceIntegrationTest, Basic) { + BaseIntegrationTest::initialize(); + const Network::SocketInterface* factory = Network::socketInterface( + "envoy.extensions.network.socket_interface.default_socket_interface"); + ASSERT_TRUE(Network::SocketInterfaceSingleton::getExisting() == factory); + + std::string response; + auto connection = createConnectionDriver( + lookupPort("listener_0"), "hello", + [&response](Network::ClientConnection& conn, const Buffer::Instance& data) -> void { + response.append(data.toString()); + conn.close(Network::ConnectionCloseType::FlushWrite); + }); + connection->run(); + EXPECT_EQ("hello", response); +} + +TEST_P(SocketInterfaceIntegrationTest, AddressWithSocketInterface) { + BaseIntegrationTest::initialize(); + + ConnectionStatusCallbacks connect_callbacks_; + Network::ClientConnectionPtr client_; + Network::Address::InstanceConstSharedPtr address = + std::make_shared( + Network::Test::getLoopbackAddressUrlString(Network::Address::IpVersion::v4), + lookupPort("listener_0"), + "envoy.extensions.network.socket_interface.default_socket_interface"); + + client_ = dispatcher_->createClientConnection(address, Network::Address::InstanceConstSharedPtr(), + Network::Test::createRawBufferSocket(), nullptr); + + client_->addConnectionCallbacks(connect_callbacks_); + client_->connect(); + while (!connect_callbacks_.connected() && !connect_callbacks_.closed()) { + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + } + + client_->close(Network::ConnectionCloseType::FlushWrite); +} + +} // namespace +} // namespace Envoy \ No newline at end of file diff --git a/test/integration/ssl_utility.cc b/test/integration/ssl_utility.cc index af54c2cdb40ab..aab3dd5d4adf8 100644 --- a/test/integration/ssl_utility.cc +++ b/test/integration/ssl_utility.cc @@ -2,6 +2,7 @@ #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" +#include "common/http/utility.h" #include "common/json/json_loader.h" #include "common/network/utility.h" @@ -11,7 +12,7 @@ #include "test/config/utility.h" #include "test/integration/server.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/transport_socket_factory_context.h" #include "test/test_common/environment.h" #include "test/test_common/network_utility.h" @@ -54,8 +55,8 @@ createClientSslTransportSocketFactory(const ClientSslTransportOptions& options, auto* common_context = tls_context.mutable_common_tls_context(); if (options.alpn_) { - common_context->add_alpn_protocols("h2"); - common_context->add_alpn_protocols("http/1.1"); + common_context->add_alpn_protocols(Http::Utility::AlpnNames::get().Http2); + common_context->add_alpn_protocols(Http::Utility::AlpnNames::get().Http11); } if (options.san_) { common_context->mutable_validation_context() diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index 75012bd02116a..f66a9b8a14bf2 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -25,11 +25,6 @@ class StatsIntegrationTest : public testing::TestWithParam::GetParam()) {} + : BaseIntegrationTest(testing::TestWithParam::GetParam()) { + use_real_stats_ = true; + } static size_t computeMemoryDelta(int initial_num_clusters, int initial_num_hosts, int final_num_clusters, int final_num_hosts, bool allow_stats) { @@ -195,7 +192,6 @@ class ClusterMemoryTestHelper : public BaseIntegrationTest { auto* socket_address = host->mutable_socket_address(); socket_address->set_protocol(envoy::config::core::v3::SocketAddress::TCP); socket_address->set_address("0.0.0.0"); - socket_address->set_port_value(80); } } }); @@ -204,9 +200,15 @@ class ClusterMemoryTestHelper : public BaseIntegrationTest { return memory_test.consumedBytes(); } }; + class ClusterMemoryTestRunner : public testing::TestWithParam { protected: + ClusterMemoryTestRunner() + : ip_version_(testing::TestWithParam::GetParam()) {} + Stats::TestUtil::SymbolTableCreatorTestPeer symbol_table_creator_test_peer_; + + Network::Address::IpVersion ip_version_; }; INSTANTIATE_TEST_SUITE_P(IpVersions, ClusterMemoryTestRunner, @@ -271,6 +273,21 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // 2020/03/24 10501 44261 44600 upstream: upstream_rq_retry_limit_exceeded. // 2020/04/02 10624 43356 44000 Use 100 clusters rather than 1000 to avoid timeouts // 2020/04/07 10661 43349 44000 fix clang tidy on master + // 2020/04/23 10531 44169 44600 http: max stream duration upstream support. + // 2020/04/23 10661 44425 46000 per-listener connection limits + // 2020/05/05 10908 44233 44600 router: add InternalRedirectPolicy and predicate + // 2020/05/13 10531 44425 44600 Refactor resource manager + // 2020/05/20 11223 44491 44600 Add primary clusters tracking to cluster manager. + // 2020/06/10 11561 44491 44811 Make upstreams pluggable + // 2020/06/29 11751 44715 46000 Improve time complexity of removing callback handle + // in callback manager. + // 2020/07/07 11252 44971 46000 Introduce Least Request LB active request bias config + // 2020/07/15 11748 45003 46000 Stream error on invalid messaging + // 2020/07/20 11559 44747 46000 stats: add histograms for request/response headers + // and body sizes. + // 2020/07/21 12034 44811 46000 Add configurable histogram buckets. + // 2020/07/31 12035 45002 46000 Init manager store unready targets in hash map. + // 2020/08/10 12275 44949 46000 Re-organize tls histogram maps to improve continuity. // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -284,8 +301,14 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // If you encounter a failure here, please see // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests // for details on how to fix. - EXPECT_MEMORY_EQ(m_per_cluster, 43993); - EXPECT_MEMORY_LE(m_per_cluster, 44100); + // + // We only run the exact test for ipv6 because ipv4 in some cases may allocate a + // different number of bytes. We still run the approximate test. + if (ip_version_ != Network::Address::IpVersion::v6) { + // https://github.com/envoyproxy/envoy/issues/12209 + // EXPECT_MEMORY_EQ(m_per_cluster, 44949); + } + EXPECT_MEMORY_LE(m_per_cluster, 46000); // Round up to allow platform variations. } TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { @@ -329,6 +352,20 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // 2020/03/24 10501 36300 36800 upstream: upstream_rq_retry_limit_exceeded. // 2020/04/02 10624 35564 36000 Use 100 clusters rather than 1000 to avoid timeouts // 2020/04/07 10661 35557 36000 fix clang tidy on master + // 2020/04/23 10531 36281 36800 http: max stream duration upstream support. + // 2020/04/23 10661 36537 37000 per-listener connection limits + // 2020/05/05 10908 36345 36800 router: add InternalRedirectPolicy and predicate + // 2020/05/13 10531 36537 36800 Refactor resource manager + // 2020/05/20 11223 36603 36800 Add primary clusters tracking to cluster manager. + // 2020/06/10 11561 36603 36923 Make upstreams pluggable + // 2020/06/29 11751 36827 38000 Improve time complexity of removing callback handle. + // 2020/07/07 11252 37083 38000 Introduce Least Request LB active request bias config + // 2020/07/15 11748 37115 38000 Stream error on invalid messaging + // 2020/07/20 11559 36859 38000 stats: add histograms for request/response headers + // and body sizes. + // 2020/07/21 12034 36923 38000 Add configurable histogram buckets. + // 2020/07/31 12035 37114 38000 Init manager store unready targets in hash map. + // 2020/08/10 12275 37061 38000 Re-organize tls histogram maps to improve continuity. // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -342,8 +379,14 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // If you encounter a failure here, please see // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests // for details on how to fix. - EXPECT_MEMORY_EQ(m_per_cluster, 36201); - EXPECT_MEMORY_LE(m_per_cluster, 36300); + // + // We only run the exact test for ipv6 because ipv4 in some cases may allocate a + // different number of bytes. We still run the approximate test. + if (ip_version_ != Network::Address::IpVersion::v6) { + // https://github.com/envoyproxy/envoy/issues/12209 + // EXPECT_MEMORY_EQ(m_per_cluster, 37061); + } + EXPECT_MEMORY_LE(m_per_cluster, 38000); // Round up to allow platform variations. } TEST_P(ClusterMemoryTestRunner, MemoryLargeHostSizeWithStats) { @@ -384,8 +427,14 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeHostSizeWithStats) { // If you encounter a failure here, please see // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests // for details on how to fix. - EXPECT_MEMORY_EQ(m_per_host, 1380); - EXPECT_MEMORY_LE(m_per_host, 1655); + // + // We only run the exact test for ipv6 because ipv4 in some cases may allocate a + // different number of bytes. We still run the approximate test. + if (ip_version_ != Network::Address::IpVersion::v6) { + // https://github.com/envoyproxy/envoy/issues/12209 + // EXPECT_MEMORY_EQ(m_per_host, 1380); + } + EXPECT_MEMORY_LE(m_per_host, 1800); // Round up to allow platform variations. } } // namespace diff --git a/test/integration/tcp_conn_pool_integration_test.cc b/test/integration/tcp_conn_pool_integration_test.cc index 5f1d14711e903..592747627de2c 100644 --- a/test/integration/tcp_conn_pool_integration_test.cc +++ b/test/integration/tcp_conn_pool_integration_test.cc @@ -124,12 +124,6 @@ class TcpConnPoolIntegrationTest : public testing::TestWithParam filter_resolver_; @@ -144,7 +138,7 @@ TEST_P(TcpConnPoolIntegrationTest, SingleRequest) { std::string response("response"); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write(request); + ASSERT_TRUE(tcp_client->write(request)); FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); @@ -164,7 +158,7 @@ TEST_P(TcpConnPoolIntegrationTest, MultipleRequests) { IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); // send request 1 - tcp_client->write(request1); + ASSERT_TRUE(tcp_client->write(request1)); FakeRawConnectionPtr fake_upstream_connection1; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection1)); std::string data; @@ -172,7 +166,7 @@ TEST_P(TcpConnPoolIntegrationTest, MultipleRequests) { EXPECT_EQ(request1, data); // send request 2 - tcp_client->write(request2); + ASSERT_TRUE(tcp_client->write(request2)); FakeRawConnectionPtr fake_upstream_connection2; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection2)); ASSERT_TRUE(fake_upstream_connection2->waitForData(request2.size(), &data)); diff --git a/test/integration/tcp_proxy_integration_test.cc b/test/integration/tcp_proxy_integration_test.cc index 9742d24fc3c87..39d6a47797401 100644 --- a/test/integration/tcp_proxy_integration_test.cc +++ b/test/integration/tcp_proxy_integration_test.cc @@ -26,14 +26,36 @@ using testing::NiceMock; namespace Envoy { +std::vector getProtocolTestParams() { + std::vector ret; + + for (auto ip_version : TestEnvironment::getIpVersionsForTest()) { + ret.push_back(TcpProxyIntegrationTestParams{ip_version, true}); + ret.push_back(TcpProxyIntegrationTestParams{ip_version, false}); + } + return ret; +} + +std::string +protocolTestParamsToString(const ::testing::TestParamInfo& params) { + return absl::StrCat( + (params.param.version == Network::Address::IpVersion::v4 ? "IPv4_" : "IPv6_"), + (params.param.test_original_version == true ? "OriginalConnPool" : "NewConnPool")); +} + void TcpProxyIntegrationTest::initialize() { + if (GetParam().test_original_version) { + config_helper_.addRuntimeOverride("envoy.reloadable_features.new_tcp_connection_pool", "false"); + } else { + config_helper_.addRuntimeOverride("envoy.reloadable_features.new_tcp_connection_pool", "true"); + } + config_helper_.renameListener("tcp_proxy"); BaseIntegrationTest::initialize(); } -INSTANTIATE_TEST_SUITE_P(IpVersions, TcpProxyIntegrationTest, - testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), - TestUtility::ipTestParamsToString); +INSTANTIATE_TEST_SUITE_P(TcpProxyIntegrationTestParams, TcpProxyIntegrationTest, + testing::ValuesIn(getProtocolTestParams()), protocolTestParamsToString); // Test upstream writing before downstream downstream does. TEST_P(TcpProxyIntegrationTest, TcpProxyUpstreamWritesFirst) { @@ -48,20 +70,20 @@ TEST_P(TcpProxyIntegrationTest, TcpProxyUpstreamWritesFirst) { tcp_client->waitForData("ello", false); // Make sure length based wait works for the data already received - tcp_client->waitForData(5); - tcp_client->waitForData(4); + ASSERT_TRUE(tcp_client->waitForData(5)); + ASSERT_TRUE(tcp_client->waitForData(4)); // Drain part of the received message tcp_client->clearData(2); tcp_client->waitForData("llo"); - tcp_client->waitForData(3); + ASSERT_TRUE(tcp_client->waitForData(3)); - tcp_client->write("hello"); + ASSERT_TRUE(tcp_client->write("hello")); ASSERT_TRUE(fake_upstream_connection->waitForData(5)); ASSERT_TRUE(fake_upstream_connection->write("", true)); tcp_client->waitForHalfClose(); - tcp_client->write("", true); + ASSERT_TRUE(tcp_client->write("", true)); ASSERT_TRUE(fake_upstream_connection->waitForHalfClose()); ASSERT_TRUE(fake_upstream_connection->waitForDisconnect()); } @@ -71,7 +93,7 @@ TEST_P(TcpProxyIntegrationTest, TcpProxyUpstreamWritesFirst) { TEST_P(TcpProxyIntegrationTest, TcpProxyUpstreamDisconnect) { initialize(); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); - tcp_client->write("hello"); + ASSERT_TRUE(tcp_client->write("hello")); FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); ASSERT_TRUE(fake_upstream_connection->waitForData(5)); @@ -89,13 +111,13 @@ TEST_P(TcpProxyIntegrationTest, TcpProxyUpstreamDisconnect) { TEST_P(TcpProxyIntegrationTest, TcpProxyDownstreamDisconnect) { initialize(); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); - tcp_client->write("hello"); + ASSERT_TRUE(tcp_client->write("hello")); FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); ASSERT_TRUE(fake_upstream_connection->waitForData(5)); ASSERT_TRUE(fake_upstream_connection->write("world")); tcp_client->waitForData("world"); - tcp_client->write("hello", true); + ASSERT_TRUE(tcp_client->write("hello", true)); ASSERT_TRUE(fake_upstream_connection->waitForData(10)); ASSERT_TRUE(fake_upstream_connection->waitForHalfClose()); ASSERT_TRUE(fake_upstream_connection->write("", true)); @@ -103,13 +125,32 @@ TEST_P(TcpProxyIntegrationTest, TcpProxyDownstreamDisconnect) { tcp_client->waitForDisconnect(); } +TEST_P(TcpProxyIntegrationTest, NoUpstream) { + // Set the first upstream to have an invalid port, so connection will fail, + // but it won't fail synchronously (as it would if there were simply no + // upstreams) + fake_upstreams_count_ = 0; + config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters(0); + auto* lb_endpoint = + cluster->mutable_load_assignment()->mutable_endpoints(0)->mutable_lb_endpoints(0); + lb_endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address()->set_port_value(1); + }); + config_helper_.skipPortUsageValidation(); + enable_half_close_ = false; + initialize(); + + IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + tcp_client->waitForDisconnect(); +} + TEST_P(TcpProxyIntegrationTest, TcpProxyLargeWrite) { config_helper_.setBufferLimits(1024, 1024); initialize(); std::string data(1024 * 16, 'a'); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); - tcp_client->write(data); + ASSERT_TRUE(tcp_client->write(data)); FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); ASSERT_TRUE(fake_upstream_connection->waitForData(data.size())); @@ -147,7 +188,7 @@ TEST_P(TcpProxyIntegrationTest, TcpProxyDownstreamFlush) { FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); tcp_client->readDisable(true); - tcp_client->write("", true); + ASSERT_TRUE(tcp_client->write("", true)); // This ensures that readDisable(true) has been run on it's thread // before tcp_client starts writing. @@ -192,7 +233,7 @@ TEST_P(TcpProxyIntegrationTest, TcpProxyUpstreamFlush) { // before tcp_client starts writing. tcp_client->waitForHalfClose(); - tcp_client->write(data, true); + ASSERT_TRUE(tcp_client->write(data, true)); test_server_->waitForGaugeEq("tcp.tcp_stats.upstream_flush_active", 1); ASSERT_TRUE(fake_upstream_connection->readDisable(false)); @@ -223,7 +264,7 @@ TEST_P(TcpProxyIntegrationTest, TcpProxyUpstreamFlushEnvoyExit) { // before tcp_client starts writing. tcp_client->waitForHalfClose(); - tcp_client->write(data, true); + ASSERT_TRUE(tcp_client->write(data, true)); test_server_->waitForGaugeEq("tcp.tcp_stats.upstream_flush_active", 1); test_server_.reset(); @@ -235,7 +276,7 @@ TEST_P(TcpProxyIntegrationTest, TcpProxyUpstreamFlushEnvoyExit) { TEST_P(TcpProxyIntegrationTest, AccessLog) { std::string access_log_path = TestEnvironment::temporaryPath( - fmt::format("access_log{}.txt", GetParam() == Network::Address::IpVersion::v4 ? "v4" : "v6")); + fmt::format("access_log{}.txt", version_ == Network::Address::IpVersion::v4 ? "v4" : "v6")); config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); auto* filter_chain = listener->mutable_filter_chains(0); @@ -250,7 +291,7 @@ TEST_P(TcpProxyIntegrationTest, AccessLog) { access_log->set_name("accesslog"); envoy::extensions::access_loggers::file::v3::FileAccessLog access_log_config; access_log_config.set_path(access_log_path); - access_log_config.set_format( + access_log_config.mutable_log_format()->set_text_format( "upstreamlocal=%UPSTREAM_LOCAL_ADDRESS% " "upstreamhost=%UPSTREAM_HOST% downstream=%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% " "sent=%BYTES_SENT% received=%BYTES_RECEIVED%\n"); @@ -274,7 +315,7 @@ TEST_P(TcpProxyIntegrationTest, AccessLog) { ASSERT_TRUE(fake_upstream_connection->write("", true)); tcp_client->waitForHalfClose(); - tcp_client->write("", true); + ASSERT_TRUE(tcp_client->write("", true)); ASSERT_TRUE(fake_upstream_connection->waitForHalfClose()); ASSERT_TRUE(fake_upstream_connection->waitForDisconnect()); @@ -286,17 +327,17 @@ TEST_P(TcpProxyIntegrationTest, AccessLog) { // Regex matching localhost:port #ifndef GTEST_USES_SIMPLE_RE - const std::string ip_port_regex = (GetParam() == Network::Address::IpVersion::v4) + const std::string ip_port_regex = (version_ == Network::Address::IpVersion::v4) ? R"EOF(127\.0\.0\.1:[0-9]+)EOF" : R"EOF(\[::1\]:[0-9]+)EOF"; #else - const std::string ip_port_regex = (GetParam() == Network::Address::IpVersion::v4) + const std::string ip_port_regex = (version_ == Network::Address::IpVersion::v4) ? R"EOF(127\.0\.0\.1:\d+)EOF" : R"EOF(\[::1\]:\d+)EOF"; #endif const std::string ip_regex = - (GetParam() == Network::Address::IpVersion::v4) ? R"EOF(127\.0\.0\.1)EOF" : R"EOF(::1)EOF"; + (version_ == Network::Address::IpVersion::v4) ? R"EOF(127\.0\.0\.1)EOF" : R"EOF(::1)EOF"; // Test that all three addresses were populated correctly. Only check the first line of // log output for simplicity. @@ -317,13 +358,13 @@ TEST_P(TcpProxyIntegrationTest, ShutdownWithOpenConnections) { }); initialize(); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); - tcp_client->write("hello"); + ASSERT_TRUE(tcp_client->write("hello")); FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); ASSERT_TRUE(fake_upstream_connection->waitForData(5)); ASSERT_TRUE(fake_upstream_connection->write("world")); tcp_client->waitForData("world"); - tcp_client->write("hello", false); + ASSERT_TRUE(tcp_client->write("hello", false)); ASSERT_TRUE(fake_upstream_connection->waitForData(10)); test_server_.reset(); ASSERT_TRUE(fake_upstream_connection->waitForHalfClose()); @@ -383,13 +424,137 @@ TEST_P(TcpProxyIntegrationTest, TestIdletimeoutWithLargeOutstandingData) { ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); std::string data(1024 * 16, 'a'); - tcp_client->write(data); + ASSERT_TRUE(tcp_client->write(data)); ASSERT_TRUE(fake_upstream_connection->write(data)); tcp_client->waitForDisconnect(true); ASSERT_TRUE(fake_upstream_connection->waitForDisconnect(true)); } +TEST_P(TcpProxyIntegrationTest, TestNoCloseOnHealthFailure) { + concurrency_ = 2; + + config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto* static_resources = bootstrap.mutable_static_resources(); + for (int i = 0; i < static_resources->clusters_size(); ++i) { + auto* cluster = static_resources->mutable_clusters(i); + cluster->set_close_connections_on_host_health_failure(false); + cluster->mutable_common_lb_config()->mutable_healthy_panic_threshold()->set_value(0); + cluster->add_health_checks()->mutable_timeout()->set_seconds(20); + cluster->mutable_health_checks(0)->mutable_reuse_connection()->set_value(true); + cluster->mutable_health_checks(0)->mutable_interval()->set_seconds(1); + cluster->mutable_health_checks(0)->mutable_no_traffic_interval()->set_seconds(1); + cluster->mutable_health_checks(0)->mutable_unhealthy_threshold()->set_value(1); + cluster->mutable_health_checks(0)->mutable_healthy_threshold()->set_value(1); + cluster->mutable_health_checks(0)->mutable_tcp_health_check(); + cluster->mutable_health_checks(0)->mutable_tcp_health_check()->mutable_send()->set_text( + "50696E67"); + cluster->mutable_health_checks(0)->mutable_tcp_health_check()->add_receive()->set_text( + "506F6E67"); + } + }); + + FakeRawConnectionPtr fake_upstream_health_connection; + on_server_init_function_ = [&](void) -> void { + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_health_connection)); + ASSERT_TRUE(fake_upstream_health_connection->waitForData( + FakeRawConnection::waitForInexactMatch("Ping"))); + ASSERT_TRUE(fake_upstream_health_connection->write("Pong")); + }; + + initialize(); + IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + ASSERT_TRUE(tcp_client->write("hello")); + FakeRawConnectionPtr fake_upstream_connection; + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); + ASSERT_TRUE(fake_upstream_connection->waitForData(5)); + ASSERT_TRUE(fake_upstream_connection->write("world")); + tcp_client->waitForData("world"); + ASSERT_TRUE(tcp_client->write("hello")); + ASSERT_TRUE(fake_upstream_connection->waitForData(10)); + + ASSERT_TRUE(fake_upstream_health_connection->waitForData(8)); + ASSERT_TRUE(fake_upstream_health_connection->close()); + ASSERT_TRUE(fake_upstream_health_connection->waitForDisconnect(true)); + + // By waiting we know the previous health check attempt completed (with a failure since we closed + // the connection on it) + FakeRawConnectionPtr fake_upstream_health_connection_reconnect; + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_health_connection_reconnect)); + ASSERT_TRUE(fake_upstream_health_connection_reconnect->waitForData( + FakeRawConnection::waitForInexactMatch("Ping"))); + + ASSERT_TRUE(tcp_client->write("still")); + ASSERT_TRUE(fake_upstream_connection->waitForData(15)); + ASSERT_TRUE(fake_upstream_connection->write("here")); + tcp_client->waitForData("here", false); + + test_server_.reset(); + ASSERT_TRUE(fake_upstream_connection->waitForHalfClose()); + ASSERT_TRUE(fake_upstream_connection->close()); + ASSERT_TRUE(fake_upstream_connection->waitForDisconnect(true)); + ASSERT_TRUE(fake_upstream_health_connection_reconnect->waitForHalfClose()); + ASSERT_TRUE(fake_upstream_health_connection_reconnect->close()); + ASSERT_TRUE(fake_upstream_health_connection_reconnect->waitForDisconnect(true)); + tcp_client->waitForHalfClose(); + tcp_client->close(); +} + +TEST_P(TcpProxyIntegrationTest, TestCloseOnHealthFailure) { + concurrency_ = 2; + + config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto* static_resources = bootstrap.mutable_static_resources(); + for (int i = 0; i < static_resources->clusters_size(); ++i) { + auto* cluster = static_resources->mutable_clusters(i); + cluster->set_close_connections_on_host_health_failure(true); + cluster->mutable_common_lb_config()->mutable_healthy_panic_threshold()->set_value(0); + cluster->add_health_checks()->mutable_timeout()->set_seconds(20); + cluster->mutable_health_checks(0)->mutable_reuse_connection()->set_value(true); + cluster->mutable_health_checks(0)->mutable_interval()->set_seconds(1); + cluster->mutable_health_checks(0)->mutable_no_traffic_interval()->set_seconds(1); + cluster->mutable_health_checks(0)->mutable_unhealthy_threshold()->set_value(1); + cluster->mutable_health_checks(0)->mutable_healthy_threshold()->set_value(1); + cluster->mutable_health_checks(0)->mutable_tcp_health_check(); + cluster->mutable_health_checks(0)->mutable_tcp_health_check()->mutable_send()->set_text( + "50696E67"); + ; + cluster->mutable_health_checks(0)->mutable_tcp_health_check()->add_receive()->set_text( + "506F6E67"); + } + }); + + FakeRawConnectionPtr fake_upstream_health_connection; + on_server_init_function_ = [&](void) -> void { + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_health_connection)); + ASSERT_TRUE(fake_upstream_health_connection->waitForData(4)); + ASSERT_TRUE(fake_upstream_health_connection->write("Pong")); + }; + + initialize(); + IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + ASSERT_TRUE(tcp_client->write("hello")); + FakeRawConnectionPtr fake_upstream_connection; + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); + ASSERT_TRUE(fake_upstream_connection->waitForData(5)); + ASSERT_TRUE(fake_upstream_connection->write("world")); + tcp_client->waitForData("world"); + ASSERT_TRUE(tcp_client->write("hello")); + ASSERT_TRUE(fake_upstream_connection->waitForData(10)); + + ASSERT_TRUE(fake_upstream_health_connection->waitForData(8)); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + ASSERT_TRUE(fake_upstream_health_connection->close()); + ASSERT_TRUE(fake_upstream_health_connection->waitForDisconnect(true)); + + ASSERT_TRUE(fake_upstream_connection->waitForHalfClose()); + tcp_client->waitForHalfClose(); + + ASSERT_TRUE(fake_upstream_connection->close()); + tcp_client->close(); + ASSERT_TRUE(fake_upstream_connection->waitForDisconnect(true)); +} + class TcpProxyMetadataMatchIntegrationTest : public TcpProxyIntegrationTest { public: void initialize() override; @@ -461,13 +626,13 @@ void TcpProxyMetadataMatchIntegrationTest::initialize() { // Verifies successful connection. void TcpProxyMetadataMatchIntegrationTest::expectEndpointToMatchRoute() { IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); - tcp_client->write("hello"); + ASSERT_TRUE(tcp_client->write("hello")); FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); ASSERT_TRUE(fake_upstream_connection->waitForData(5)); ASSERT_TRUE(fake_upstream_connection->write("world")); tcp_client->waitForData("world"); - tcp_client->write("hello", true); + ASSERT_TRUE(tcp_client->write("hello", true)); ASSERT_TRUE(fake_upstream_connection->waitForData(10)); ASSERT_TRUE(fake_upstream_connection->waitForHalfClose()); ASSERT_TRUE(fake_upstream_connection->write("", true)); @@ -480,7 +645,7 @@ void TcpProxyMetadataMatchIntegrationTest::expectEndpointToMatchRoute() { // Verifies connection failure. void TcpProxyMetadataMatchIntegrationTest::expectEndpointNotToMatchRoute() { IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); - tcp_client->write("hello"); + ASSERT_TRUE(tcp_client->write("hello", false, false)); // TODO(yskopets): 'tcp_client->waitForDisconnect(true);' gets stuck indefinitely on Linux builds, // e.g. on 'envoy-linux (bazel compile_time_options)' and 'envoy-linux (bazel release)' @@ -492,9 +657,8 @@ void TcpProxyMetadataMatchIntegrationTest::expectEndpointNotToMatchRoute() { tcp_client->close(); } -INSTANTIATE_TEST_SUITE_P(IpVersions, TcpProxyMetadataMatchIntegrationTest, - testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), - TestUtility::ipTestParamsToString); +INSTANTIATE_TEST_SUITE_P(TcpProxyIntegrationTestParams, TcpProxyMetadataMatchIntegrationTest, + testing::ValuesIn(getProtocolTestParams()), protocolTestParamsToString); // Test subset load balancing for a regular cluster when endpoint selector is defined at the top // level. @@ -503,9 +667,9 @@ TEST_P(TcpProxyMetadataMatchIntegrationTest, tcp_proxy_.set_stat_prefix("tcp_stats"); tcp_proxy_.set_cluster("cluster_0"); tcp_proxy_.mutable_metadata_match()->MergeFrom( - lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}})); + lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "prod"}})); - endpoint_metadata_ = lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}}); + endpoint_metadata_ = lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "prod"}}); initialize(); @@ -521,10 +685,13 @@ TEST_P(TcpProxyMetadataMatchIntegrationTest, tcp_proxy_.mutable_hidden_envoy_deprecated_deprecated_v1()->add_routes()->set_cluster( "cluster_0"); tcp_proxy_.mutable_metadata_match()->MergeFrom( - lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}})); + lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "prod"}})); - endpoint_metadata_ = lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}}); + endpoint_metadata_ = lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "prod"}}); + config_helper_.addRuntimeOverride("envoy.deprecated_features:envoy.extensions.filters.network." + "tcp_proxy.v3.TcpProxy.hidden_envoy_deprecated_deprecated_v1", + "true"); initialize(); expectEndpointToMatchRoute(); @@ -538,9 +705,9 @@ TEST_P(TcpProxyMetadataMatchIntegrationTest, EndpointShouldMatchWeightedClusterW cluster_0->set_name("cluster_0"); cluster_0->set_weight(1); cluster_0->mutable_metadata_match()->MergeFrom( - lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}})); + lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "prod"}})); - endpoint_metadata_ = lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}}); + endpoint_metadata_ = lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "prod"}}); initialize(); @@ -557,9 +724,9 @@ TEST_P(TcpProxyMetadataMatchIntegrationTest, cluster_0->set_name("cluster_0"); cluster_0->set_weight(1); cluster_0->mutable_metadata_match()->MergeFrom(lbMetadata( - {{"role", "master"}, {"stage", "prod"}})); // should override `stage` value at top-level + {{"role", "primary"}, {"stage", "prod"}})); // should override `stage` value at top-level - endpoint_metadata_ = lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}}); + endpoint_metadata_ = lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "prod"}}); initialize(); @@ -572,12 +739,12 @@ TEST_P(TcpProxyMetadataMatchIntegrationTest, EndpointShouldMatchWeightedClusterWithTopLevelMetadataMatch) { tcp_proxy_.set_stat_prefix("tcp_stats"); tcp_proxy_.mutable_metadata_match()->MergeFrom( - lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}})); + lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "prod"}})); auto* cluster_0 = tcp_proxy_.mutable_weighted_clusters()->add_clusters(); cluster_0->set_name("cluster_0"); cluster_0->set_weight(1); - endpoint_metadata_ = lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}}); + endpoint_metadata_ = lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "prod"}}); initialize(); @@ -591,7 +758,7 @@ TEST_P(TcpProxyMetadataMatchIntegrationTest, tcp_proxy_.set_stat_prefix("tcp_stats"); tcp_proxy_.set_cluster("cluster_0"); tcp_proxy_.mutable_metadata_match()->MergeFrom( - lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}})); + lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "prod"}})); endpoint_metadata_ = lbMetadata({{"role", "replica"}, {"version", "v1"}, {"stage", "prod"}}); @@ -609,10 +776,13 @@ TEST_P(TcpProxyMetadataMatchIntegrationTest, tcp_proxy_.mutable_hidden_envoy_deprecated_deprecated_v1()->add_routes()->set_cluster( "cluster_0"); tcp_proxy_.mutable_metadata_match()->MergeFrom( - lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}})); + lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "prod"}})); endpoint_metadata_ = lbMetadata({{"role", "replica"}, {"version", "v1"}, {"stage", "prod"}}); + config_helper_.addRuntimeOverride("envoy.deprecated_features:envoy.extensions.filters.network." + "tcp_proxy.v3.TcpProxy.hidden_envoy_deprecated_deprecated_v1", + "true"); initialize(); expectEndpointNotToMatchRoute(); @@ -627,7 +797,7 @@ TEST_P(TcpProxyMetadataMatchIntegrationTest, cluster_0->set_name("cluster_0"); cluster_0->set_weight(1); cluster_0->mutable_metadata_match()->MergeFrom( - lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}})); + lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "prod"}})); endpoint_metadata_ = lbMetadata({{"role", "replica"}, {"version", "v1"}, {"stage", "prod"}}); @@ -646,9 +816,9 @@ TEST_P(TcpProxyMetadataMatchIntegrationTest, cluster_0->set_name("cluster_0"); cluster_0->set_weight(1); cluster_0->mutable_metadata_match()->MergeFrom(lbMetadata( - {{"role", "master"}, {"stage", "prod"}})); // should override `stage` value at top-level + {{"role", "primary"}, {"stage", "prod"}})); // should override `stage` value at top-level - endpoint_metadata_ = lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "dev"}}); + endpoint_metadata_ = lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "dev"}}); initialize(); @@ -661,7 +831,7 @@ TEST_P(TcpProxyMetadataMatchIntegrationTest, EndpointShouldNotMatchWeightedClusterWithTopLevelMetadataMatch) { tcp_proxy_.set_stat_prefix("tcp_stats"); tcp_proxy_.mutable_metadata_match()->MergeFrom( - lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}})); + lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "prod"}})); auto* cluster_0 = tcp_proxy_.mutable_weighted_clusters()->add_clusters(); cluster_0->set_name("cluster_0"); cluster_0->set_weight(1); @@ -673,9 +843,8 @@ TEST_P(TcpProxyMetadataMatchIntegrationTest, expectEndpointNotToMatchRoute(); } -INSTANTIATE_TEST_SUITE_P(IpVersions, TcpProxySslIntegrationTest, - testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), - TestUtility::ipTestParamsToString); +INSTANTIATE_TEST_SUITE_P(TcpProxyIntegrationTestParams, TcpProxySslIntegrationTest, + testing::ValuesIn(getProtocolTestParams()), protocolTestParamsToString); void TcpProxySslIntegrationTest::initialize() { config_helper_.addSslConfig(); @@ -692,11 +861,12 @@ void TcpProxySslIntegrationTest::setupConnections() { // Set up the mock buffer factory so the newly created SSL client will have a mock write // buffer. This allows us to track the bytes actually written to the socket. - EXPECT_CALL(*mock_buffer_factory_, create_(_, _)) + EXPECT_CALL(*mock_buffer_factory_, create_(_, _, _)) .Times(1) - .WillOnce(Invoke([&](std::function below_low, - std::function above_high) -> Buffer::Instance* { - client_write_buffer_ = new NiceMock(below_low, above_high); + .WillOnce(Invoke([&](std::function below_low, std::function above_high, + std::function above_overflow) -> Buffer::Instance* { + client_write_buffer_ = + new NiceMock(below_low, above_high, above_overflow); ON_CALL(*client_write_buffer_, move(_)) .WillByDefault(Invoke(client_write_buffer_, &MockWatermarkBuffer::baseMove)); ON_CALL(*client_write_buffer_, drain(_)) @@ -711,7 +881,7 @@ void TcpProxySslIntegrationTest::setupConnections() { dispatcher_->createClientConnection(address, Network::Address::InstanceConstSharedPtr(), context_->createTransportSocket(nullptr), nullptr); - // Perform the SSL handshake. Loopback is whitelisted in tcp_proxy.json for the ssl_auth + // Perform the SSL handshake. Loopback is allowlisted in tcp_proxy.json for the ssl_auth // filter so there will be no pause waiting on auth data. ssl_client_->addConnectionCallbacks(connect_callbacks_); ssl_client_->enableHalfClose(true); diff --git a/test/integration/tcp_proxy_integration_test.h b/test/integration/tcp_proxy_integration_test.h index 6504befc36306..d4a2248c4e3e2 100644 --- a/test/integration/tcp_proxy_integration_test.h +++ b/test/integration/tcp_proxy_integration_test.h @@ -10,18 +10,19 @@ namespace Envoy { -class TcpProxyIntegrationTest : public testing::TestWithParam, +struct TcpProxyIntegrationTestParams { + Network::Address::IpVersion version; + bool test_original_version; +}; + +class TcpProxyIntegrationTest : public testing::TestWithParam, public BaseIntegrationTest { public: - TcpProxyIntegrationTest() : BaseIntegrationTest(GetParam(), ConfigHelper::tcpProxyConfig()) { + TcpProxyIntegrationTest() + : BaseIntegrationTest(GetParam().version, ConfigHelper::tcpProxyConfig()) { enable_half_close_ = true; } - ~TcpProxyIntegrationTest() override { - test_server_.reset(); - fake_upstreams_.clear(); - } - void initialize() override; }; diff --git a/test/integration/tcp_tunneling_integration_test.cc b/test/integration/tcp_tunneling_integration_test.cc index 2851fd79a4946..bc270853ae651 100644 --- a/test/integration/tcp_tunneling_integration_test.cc +++ b/test/integration/tcp_tunneling_integration_test.cc @@ -22,14 +22,10 @@ class ConnectTerminationIntegrationTest } void initialize() override { - auto host = config_helper_.createVirtualHost("host", "/"); - // host.mutable_proxying_config(); - config_helper_.addVirtualHost(host); config_helper_.addConfigModifier( [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) -> void { - hcm.add_upgrade_configs()->set_upgrade_type("CONNECT"); - hcm.mutable_http2_protocol_options()->set_allow_connect(true); + hcm) { + ConfigHelper::setConnectConfig(hcm, true); if (enable_timeout_) { hcm.mutable_stream_idle_timeout()->set_seconds(0); @@ -45,6 +41,7 @@ class ConnectTerminationIntegrationTest request_encoder_ = &encoder_decoder.first; response_ = std::move(encoder_decoder.second); ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_raw_upstream_connection_)); + response_->waitForHeaders(); } void sendBidirectionalData(const char* downstream_send_data = "hello", @@ -66,15 +63,13 @@ class ConnectTerminationIntegrationTest {":path", "/"}, {":protocol", "bytestream"}, {":scheme", "https"}, - {":authority", "host"}}; + {":authority", "host:80"}}; FakeRawConnectionPtr fake_raw_upstream_connection_; IntegrationStreamDecoderPtr response_; bool enable_timeout_{}; }; -// TODO(alyssawilk) make sure that if data is sent with the connect it does not go upstream -// until the 200 headers are sent before unhiding ANY config. -TEST_P(ConnectTerminationIntegrationTest, DISABLED_Basic) { +TEST_P(ConnectTerminationIntegrationTest, Basic) { initialize(); setUpConnection(); @@ -92,7 +87,7 @@ TEST_P(ConnectTerminationIntegrationTest, DISABLED_Basic) { ASSERT_FALSE(response_->reset()); } -TEST_P(ConnectTerminationIntegrationTest, DISABLED_DownstreamClose) { +TEST_P(ConnectTerminationIntegrationTest, DownstreamClose) { initialize(); setUpConnection(); @@ -103,7 +98,7 @@ TEST_P(ConnectTerminationIntegrationTest, DISABLED_DownstreamClose) { ASSERT_TRUE(fake_raw_upstream_connection_->waitForHalfClose()); } -TEST_P(ConnectTerminationIntegrationTest, DISABLED_DownstreamReset) { +TEST_P(ConnectTerminationIntegrationTest, DownstreamReset) { initialize(); setUpConnection(); @@ -114,7 +109,7 @@ TEST_P(ConnectTerminationIntegrationTest, DISABLED_DownstreamReset) { ASSERT_TRUE(fake_raw_upstream_connection_->waitForHalfClose()); } -TEST_P(ConnectTerminationIntegrationTest, DISABLED_UpstreamClose) { +TEST_P(ConnectTerminationIntegrationTest, UpstreamClose) { initialize(); setUpConnection(); @@ -125,18 +120,18 @@ TEST_P(ConnectTerminationIntegrationTest, DISABLED_UpstreamClose) { response_->waitForReset(); } -TEST_P(ConnectTerminationIntegrationTest, DISABLED_TestTimeout) { +TEST_P(ConnectTerminationIntegrationTest, TestTimeout) { enable_timeout_ = true; initialize(); setUpConnection(); // Wait for the timeout to close the connection. - response_->waitForEndStream(); + response_->waitForReset(); ASSERT_TRUE(fake_raw_upstream_connection_->waitForHalfClose()); } -TEST_P(ConnectTerminationIntegrationTest, DISABLED_BuggyHeaders) { +TEST_P(ConnectTerminationIntegrationTest, BuggyHeaders) { initialize(); // It's possible that the FIN is received before we set half close on the // upstream connection, so allow unexpected disconnects. @@ -150,7 +145,7 @@ TEST_P(ConnectTerminationIntegrationTest, DISABLED_BuggyHeaders) { {":path", "/"}, {":protocol", "bytestream"}, {":scheme", "https"}, - {":authority", "host"}}); + {":authority", "host:80"}}); // If the connection is established (created, set to half close, and then the // FIN arrives), make sure the FIN arrives, and send a FIN from upstream. if (fake_upstreams_[0]->waitForRawConnection(fake_raw_upstream_connection_) && @@ -165,16 +160,37 @@ TEST_P(ConnectTerminationIntegrationTest, DISABLED_BuggyHeaders) { ASSERT_FALSE(response_->reset()); } +TEST_P(ConnectTerminationIntegrationTest, BasicMaxStreamDuration) { + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* static_resources = bootstrap.mutable_static_resources(); + auto* cluster = static_resources->mutable_clusters(0); + auto* http_protocol_options = cluster->mutable_common_http_protocol_options(); + http_protocol_options->mutable_max_stream_duration()->MergeFrom( + ProtobufUtil::TimeUtil::MillisecondsToDuration(1000)); + }); + + initialize(); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + setUpConnection(); + sendBidirectionalData(); + + test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_max_duration_reached", 1); + + if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { + ASSERT_TRUE(codec_client_->waitForDisconnect()); + } else { + response_->waitForReset(); + codec_client_->close(); + } +} + // For this class, forward the CONNECT request upstream class ProxyingConnectIntegrationTest : public HttpProtocolIntegrationTest { public: void initialize() override { config_helper_.addConfigModifier( [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) -> void { - hcm.add_upgrade_configs()->set_upgrade_type("CONNECT"); - hcm.mutable_http2_protocol_options()->set_allow_connect(true); - }); + hcm) -> void { ConfigHelper::setConnectConfig(hcm, false); }); HttpProtocolIntegrationTest::initialize(); } @@ -219,7 +235,7 @@ TEST_P(ProxyingConnectIntegrationTest, ProxyConnect) { // Wait for them to arrive downstream. response_->waitForHeaders(); - EXPECT_EQ("200", response_->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response_->headers().getStatusValue()); // Make sure that even once the response has started, that data can continue to go upstream. codec_client_->sendData(*request_encoder_, "hello", false); @@ -281,16 +297,16 @@ TEST_P(TcpTunnelingIntegrationTest, Basic) { upstream_request_->encodeHeaders(default_response_headers_, false); // Send some data from downstream to upstream, and make sure it goes through. - tcp_client->write("hello", false); + ASSERT_TRUE(tcp_client->write("hello", false)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); // Send data from upstream to downstream. upstream_request_->encodeData(12, false); - tcp_client->waitForData(12); + ASSERT_TRUE(tcp_client->waitForData(12)); // Now send more data and close the TCP client. This should be treated as half close, so the data // should go through. - tcp_client->write("hello", false); + ASSERT_TRUE(tcp_client->write("hello", false)); tcp_client->close(); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); @@ -299,6 +315,23 @@ TEST_P(TcpTunnelingIntegrationTest, Basic) { upstream_request_->encodeData(0, true); } +// Validates that if the cluster is not configured with HTTP/2 we don't attempt +// to tunnel the data. +TEST_P(TcpTunnelingIntegrationTest, InvalidCluster) { + config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + bootstrap.mutable_static_resources() + ->mutable_clusters() + ->Mutable(0) + ->clear_http2_protocol_options(); + }); + initialize(); + + // Start a connection and see it close immediately due to the invalid cluster. + IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + tcp_client->waitForHalfClose(); + tcp_client->close(); +} + TEST_P(TcpTunnelingIntegrationTest, InvalidResponseHeaders) { initialize(); @@ -331,21 +364,21 @@ TEST_P(TcpTunnelingIntegrationTest, CloseUpstreamFirst) { upstream_request_->encodeHeaders(default_response_headers_, false); // Send data in both directions. - tcp_client->write("hello", false); + ASSERT_TRUE(tcp_client->write("hello", false)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); // Send data from upstream to downstream with an end stream and make sure the data is received // before the connection is half-closed. upstream_request_->encodeData(12, true); - tcp_client->waitForData(12); + ASSERT_TRUE(tcp_client->waitForData(12)); tcp_client->waitForHalfClose(); // Attempt to send data upstream. // should go through. - tcp_client->write("hello", false); + ASSERT_TRUE(tcp_client->write("hello", false)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); - tcp_client->write("hello", true); + ASSERT_TRUE(tcp_client->write("hello", true)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); } @@ -393,7 +426,7 @@ TEST_P(TcpTunnelingIntegrationTest, TestIdletimeoutWithLargeOutstandingData) { upstream_request_->encodeHeaders(default_response_headers_, false); std::string data(1024 * 16, 'a'); - tcp_client->write(data); + ASSERT_TRUE(tcp_client->write(data)); upstream_request_->encodeData(data, false); tcp_client->waitForDisconnect(true); @@ -415,7 +448,7 @@ TEST_P(TcpTunnelingIntegrationTest, TcpProxyDownstreamFlush) { upstream_request_->encodeHeaders(default_response_headers_, false); tcp_client->readDisable(true); - tcp_client->write("", true); + ASSERT_TRUE(tcp_client->write("", true)); // This ensures that readDisable(true) has been run on its thread // before tcp_client starts writing. @@ -443,13 +476,13 @@ TEST_P(TcpTunnelingIntegrationTest, TcpProxyUpstreamFlush) { ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); upstream_request_->encodeHeaders(default_response_headers_, false); upstream_request_->readDisable(true); - upstream_request_->encodeData("", true); + upstream_request_->encodeData("hello", false); // This ensures that fake_upstream_connection->readDisable has been run on its thread // before tcp_client starts writing. - tcp_client->waitForHalfClose(); + ASSERT_TRUE(tcp_client->waitForData(5)); - tcp_client->write(data, true); + ASSERT_TRUE(tcp_client->write(data, true)); // Note that upstream_flush_active will *not* be incremented for the HTTP // tunneling case. The data is already written to the stream, so no drainer @@ -457,9 +490,53 @@ TEST_P(TcpTunnelingIntegrationTest, TcpProxyUpstreamFlush) { upstream_request_->readDisable(false); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, size)); ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + upstream_request_->encodeData("world", true); tcp_client->waitForHalfClose(); } +// Test that h2 connection is reused. +TEST_P(TcpTunnelingIntegrationTest, H2ConnectionReuse) { + initialize(); + + // Establish a connection. + IntegrationTcpClientPtr tcp_client1 = makeTcpConnection(lookupPort("tcp_proxy")); + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + upstream_request_->encodeHeaders(default_response_headers_, false); + + // Send data in both directions. + ASSERT_TRUE(tcp_client1->write("hello1", false)); + ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, "hello1")); + + // Send data from upstream to downstream with an end stream and make sure the data is received + // before the connection is half-closed. + upstream_request_->encodeData("world1", true); + tcp_client1->waitForData("world1"); + tcp_client1->waitForHalfClose(); + tcp_client1->close(); + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + + // Establish a new connection. + IntegrationTcpClientPtr tcp_client2 = makeTcpConnection(lookupPort("tcp_proxy")); + + // The new CONNECT stream is established in the existing h2 connection. + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + upstream_request_->encodeHeaders(default_response_headers_, false); + + ASSERT_TRUE(tcp_client2->write("hello2", false)); + ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, "hello2")); + + // Send data from upstream to downstream with an end stream and make sure the data is received + // before the connection is half-closed. + upstream_request_->encodeData("world2", true); + tcp_client2->waitForData("world2"); + tcp_client2->waitForHalfClose(); + tcp_client2->close(); + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); +} + INSTANTIATE_TEST_SUITE_P(IpVersions, TcpTunnelingIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); diff --git a/test/integration/test_utility.sh b/test/integration/test_utility.sh index ee5ab5316c750..33b3bfa6838af 100644 --- a/test/integration/test_utility.sh +++ b/test/integration/test_utility.sh @@ -75,4 +75,40 @@ enableHeapCheck () { HEAPCHECK=${SAVED_HEAPCHECK} } -[[ -z "${ENVOY_BIN}" ]] && ENVOY_BIN="${TEST_SRCDIR}"/envoy/source/exe/envoy-static +# Scrapes a stat value from an an admin port. +scrape_stat() { + local ADMIN_ADDRESS="$1" + local STAT_NAME="$2" + curl -sg "$ADMIN_ADDRESS"/stats | grep "^${STAT_NAME}: " | cut -f2 -d" " +} + +milliseconds() { + local nanos=$(date +%N | sed 's/^0*//') + local seconds=$(date +%s) + echo $((1000*seconds + nanos/1000000)) +} + +wait_for_stat() { + local ADMIN_ADDRESS="$1" + local STAT_NAME="$2" + local OP="$3" + local VALUE="$4" + local TIMEOUT_SEC="$5" + local start_time_ms=$(milliseconds) + local end_time=$((SECONDS + TIMEOUT_SEC)) + local ret="" + while [ "$ret" = "" ]; do + local stat=$(scrape_stat "$ADMIN_ADDRESS" "$STAT_NAME") + if [ $stat $OP $VALUE ]; then + local end_time_ms=$(milliseconds) + ret="success: $STAT_NAME reached $stat after $((end_time_ms - start_time_ms)) ms" + elif [ "$SECONDS" -gt "$end_time" ]; then + ret="timeout: waiting $TIMEOUT_SEC seconds for $STAT_NAME=$stat to reach $VALUE" + else + sleep 0.1 + fi + done + echo "$ret" +} + +[[ -z "${ENVOY_BIN}" ]] && ENVOY_BIN="${TEST_SRCDIR}/envoy/source/exe/envoy-static" diff --git a/test/integration/transport_socket_match_integration_test.cc b/test/integration/transport_socket_match_integration_test.cc index e074af1a85d0f..2456921be3e16 100644 --- a/test/integration/transport_socket_match_integration_test.cc +++ b/test/integration/transport_socket_match_integration_test.cc @@ -20,8 +20,7 @@ class TransportSockeMatchIntegrationTest : public testing::Test, public HttpInte TransportSockeMatchIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, TestEnvironment::getIpVersionsForTest().front(), - ConfigHelper::httpProxyConfig()), - num_hosts_{2} { + ConfigHelper::httpProxyConfig()) { autonomous_upstream_ = true; setUpstreamCount(num_hosts_); } @@ -163,7 +162,7 @@ require_client_certificate: true setUpstreamProtocol(FakeHttpConnection::Type::HTTP1); } - const uint32_t num_hosts_; + const uint32_t num_hosts_{2}; Http::TestRequestHeaderMapImpl type_a_request_headers_{{":method", "GET"}, {":path", "/test"}, {":scheme", "http"}, @@ -188,10 +187,10 @@ TEST_F(TransportSockeMatchIntegrationTest, TlsAndPlaintextSucceed) { IntegrationStreamDecoderPtr response = codec_client_->makeHeaderOnlyRequest(type_a_request_headers_); response->waitForEndStream(); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); response = codec_client_->makeHeaderOnlyRequest(type_b_request_headers_); response->waitForEndStream(); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } } @@ -203,10 +202,10 @@ TEST_F(TransportSockeMatchIntegrationTest, TlsAndPlaintextFailsWithoutSocketMatc IntegrationStreamDecoderPtr response = codec_client_->makeHeaderOnlyRequest(type_a_request_headers_); response->waitForEndStream(); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); response = codec_client_->makeHeaderOnlyRequest(type_b_request_headers_); response->waitForEndStream(); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } } } // namespace Envoy diff --git a/test/integration/utility.cc b/test/integration/utility.cc index d231245635679..c969a5b8a2ef5 100644 --- a/test/integration/utility.cc +++ b/test/integration/utility.cc @@ -84,7 +84,7 @@ IntegrationUtil::makeSingleRequest(const Network::Address::InstanceConstSharedPt Http::RequestEncoder& encoder = client.newStream(*response); encoder.getStream().addCallbacks(*response); - Http::RequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; headers.setMethod(method); headers.setPath(url); headers.setHost(host); @@ -114,15 +114,22 @@ IntegrationUtil::makeSingleRequest(uint32_t port, const std::string& method, con RawConnectionDriver::RawConnectionDriver(uint32_t port, Buffer::Instance& initial_data, ReadCallback data_callback, - Network::Address::IpVersion version) { + Network::Address::IpVersion version, + Event::Dispatcher& dispatcher, + Network::TransportSocketPtr transport_socket) + : dispatcher_(dispatcher) { api_ = Api::createApiForTest(stats_store_); Event::GlobalTimeSystem time_system; - dispatcher_ = api_->allocateDispatcher("test_thread"); callbacks_ = std::make_unique(); - client_ = dispatcher_->createClientConnection( + + if (transport_socket == nullptr) { + transport_socket = Network::Test::createRawBufferSocket(); + } + + client_ = dispatcher_.createClientConnection( Network::Utility::resolveUrl( fmt::format("tcp://{}:{}", Network::Test::getLoopbackAddressUrlString(version), port)), - Network::Address::InstanceConstSharedPtr(), Network::Test::createRawBufferSocket(), nullptr); + Network::Address::InstanceConstSharedPtr(), std::move(transport_socket), nullptr); client_->addConnectionCallbacks(*callbacks_); client_->addReadFilter(Network::ReadFilterSharedPtr{new ForwardingFilter(*this, data_callback)}); client_->write(initial_data, false); @@ -131,7 +138,14 @@ RawConnectionDriver::RawConnectionDriver(uint32_t port, Buffer::Instance& initia RawConnectionDriver::~RawConnectionDriver() = default; -void RawConnectionDriver::run(Event::Dispatcher::RunType run_type) { dispatcher_->run(run_type); } +void RawConnectionDriver::waitForConnection() { + while (!callbacks_->connected() && !callbacks_->closed()) { + Event::GlobalTimeSystem().timeSystem().advanceTimeWait(std::chrono::milliseconds(10)); + dispatcher_.run(Event::Dispatcher::RunType::NonBlock); + } +} + +void RawConnectionDriver::run(Event::Dispatcher::RunType run_type) { dispatcher_.run(run_type); } void RawConnectionDriver::close() { client_->close(Network::ConnectionCloseType::FlushWrite); } diff --git a/test/integration/utility.h b/test/integration/utility.h index 707acf5dd0aac..6ff69ad27a831 100644 --- a/test/integration/utility.h +++ b/test/integration/utility.h @@ -18,6 +18,8 @@ #include "test/test_common/printers.h" #include "test/test_common/test_time.h" +#include "gtest/gtest.h" + namespace Envoy { /** * A buffering response decoder used for testing. @@ -64,15 +66,19 @@ class RawConnectionDriver { using ReadCallback = std::function; RawConnectionDriver(uint32_t port, Buffer::Instance& initial_data, ReadCallback data_callback, - Network::Address::IpVersion version); + Network::Address::IpVersion version, Event::Dispatcher& dispatcher, + Network::TransportSocketPtr transport_socket = nullptr); ~RawConnectionDriver(); const Network::Connection& connection() { return *client_; } - bool connecting() { return callbacks_->connecting_; } void run(Event::Dispatcher::RunType run_type = Event::Dispatcher::RunType::Block); void close(); - Network::ConnectionEvent last_connection_event() const { + Network::ConnectionEvent lastConnectionEvent() const { return callbacks_->last_connection_event_; } + // Wait until connected or closed(). + void waitForConnection(); + + bool closed() { return callbacks_->closed(); } private: struct ForwardingFilter : public Network::ReadFilterBaseImpl { @@ -91,20 +97,30 @@ class RawConnectionDriver { }; struct ConnectionCallbacks : public Network::ConnectionCallbacks { + + bool connected() const { return connected_; } + bool closed() const { return closed_; } + + // Network::ConnectionCallbacks void onEvent(Network::ConnectionEvent event) override { last_connection_event_ = event; - connecting_ = false; + closed_ |= (event == Network::ConnectionEvent::RemoteClose || + event == Network::ConnectionEvent::LocalClose); + connected_ |= (event == Network::ConnectionEvent::Connected); } void onAboveWriteBufferHighWatermark() override {} void onBelowWriteBufferLowWatermark() override {} - bool connecting_{true}; Network::ConnectionEvent last_connection_event_; + + private: + bool connected_{false}; + bool closed_{false}; }; Stats::IsolatedStoreImpl stats_store_; Api::ApiPtr api_; - Event::DispatcherPtr dispatcher_; + Event::Dispatcher& dispatcher_; std::unique_ptr callbacks_; Network::ClientConnectionPtr client_; }; @@ -183,11 +199,29 @@ class WaitForPayloadReader : public Network::ReadFilterBaseImpl { data_to_wait_for_ = data; exact_match_ = exact_match; } - void setLengthToWaitFor(size_t length) { + + ABSL_MUST_USE_RESULT testing::AssertionResult waitForLength(size_t length, + std::chrono::milliseconds timeout) { ASSERT(!wait_for_length_); length_to_wait_for_ = length; wait_for_length_ = true; + + Event::TimerPtr timeout_timer = + dispatcher_.createTimer([this]() -> void { dispatcher_.exit(); }); + timeout_timer->enableTimer(timeout); + + dispatcher_.run(Event::Dispatcher::RunType::Block); + + if (timeout_timer->enabled()) { + timeout_timer->disableTimer(); + return testing::AssertionSuccess(); + } + + length_to_wait_for_ = 0; + wait_for_length_ = false; + return testing::AssertionFailure() << "Timed out waiting for " << length << " bytes of data\n"; } + const std::string& data() { return data_; } bool readLastByte() { return read_end_stream_; } void clearData(size_t count = std::string::npos) { data_.erase(0, count); } diff --git a/test/integration/version_integration_test.cc b/test/integration/version_integration_test.cc index bbc46fcc33aaf..b07d2c881abbc 100644 --- a/test/integration/version_integration_test.cc +++ b/test/integration/version_integration_test.cc @@ -31,6 +31,11 @@ TEST_P(VersionIntegrationTest, DEPRECATED_FEATURE_TEST(IpTaggingV2StaticStructCo config: )EOF", ExampleIpTaggingConfig)); + + config_helper_.addRuntimeOverride( + "envoy.deprecated_features:envoy.extensions.filters.network." + "http_connection_manager.v3.HttpFilter.hidden_envoy_deprecated_config", + "true"); initialize(); } diff --git a/test/integration/vhds_integration_test.cc b/test/integration/vhds_integration_test.cc index dc8dd64244f53..879fbcf8b90be 100644 --- a/test/integration/vhds_integration_test.cc +++ b/test/integration/vhds_integration_test.cc @@ -10,7 +10,6 @@ #include "test/common/grpc/grpc_client_integration.h" #include "test/integration/http_integration.h" #include "test/integration/utility.h" -#include "test/mocks/server/mocks.h" #include "test/test_common/network_utility.h" #include "test/test_common/resources.h" #include "test/test_common/simulated_time_system.h" @@ -140,11 +139,7 @@ class VhdsInitializationTest : public HttpIntegrationTest, use_lds_ = false; } - void TearDown() override { - cleanUpXdsConnection(); - test_server_.reset(); - fake_upstreams_.clear(); - } + void TearDown() override { cleanUpXdsConnection(); } // Overridden to insert this stuff into the initialize() at the very beginning of // HttpIntegrationTest::testRouterRequestAndResponseWithBody(). @@ -206,7 +201,7 @@ TEST_P(VhdsInitializationTest, InitializeVhdsAfterRdsHasBeenInitialized) { // Calls our initialize(), which includes establishing a listener, route, and cluster. testRouterHeaderOnlyRequestAndResponse(nullptr, 1, "/rdsone", "vhost.rds.first"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // Update RouteConfig, this time include VHDS config sendSotwDiscoveryResponse( @@ -231,7 +226,7 @@ TEST_P(VhdsInitializationTest, InitializeVhdsAfterRdsHasBeenInitialized) { // Confirm vhost.first that was configured via VHDS is reachable testRouterHeaderOnlyRequestAndResponse(nullptr, 1, "/", "vhost.first"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } class VhdsIntegrationTest : public HttpIntegrationTest, @@ -242,11 +237,7 @@ class VhdsIntegrationTest : public HttpIntegrationTest, use_lds_ = false; } - void TearDown() override { - cleanUpXdsConnection(); - test_server_.reset(); - fake_upstreams_.clear(); - } + void TearDown() override { cleanUpXdsConnection(); } std::string virtualHostYaml(const std::string& name, const std::string& domain) { return fmt::format(VhostTemplate, name, domain); @@ -376,7 +367,7 @@ class VhdsIntegrationTest : public HttpIntegrationTest, resource->set_version("4"); resource->mutable_resource()->PackFrom( API_DOWNGRADE(TestUtility::parseYaml( - virtualHostYaml("vhost_1", "vhost_1, vhost.first")))); + virtualHostYaml("my_route/vhost_1", "vhost_1, vhost.first")))); resource->add_aliases("my_route/vhost.first"); ret.set_nonce("test-nonce-0"); @@ -392,7 +383,7 @@ INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, VhdsIntegrationTest, GRPC_CLIENT_ TEST_P(VhdsIntegrationTest, RdsUpdateWithoutVHDSChangesDoesNotRestartVHDS) { testRouterHeaderOnlyRequestAndResponse(nullptr, 1, "/", "host"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // Update RouteConfig, but don't change VHDS config sendSotwDiscoveryResponse( @@ -403,7 +394,7 @@ TEST_P(VhdsIntegrationTest, RdsUpdateWithoutVHDSChangesDoesNotRestartVHDS) { // Confirm vhost_0 that was originally configured via VHDS is reachable testRouterHeaderOnlyRequestAndResponse(nullptr, 1, "/", "host"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } // tests a scenario when: @@ -416,7 +407,7 @@ TEST_P(VhdsIntegrationTest, VhdsVirtualHostAddUpdateRemove) { // Calls our initialize(), which includes establishing a listener, route, and cluster. testRouterHeaderOnlyRequestAndResponse(nullptr, 1); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // A spontaneous VHDS DiscoveryResponse adds two virtual hosts sendDeltaDiscoveryResponse( @@ -426,10 +417,10 @@ TEST_P(VhdsIntegrationTest, VhdsVirtualHostAddUpdateRemove) { testRouterHeaderOnlyRequestAndResponse(nullptr, 1, "/one", "vhost.first"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); testRouterHeaderOnlyRequestAndResponse(nullptr, 1, "/two", "vhost.second"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // A spontaneous VHDS DiscoveryResponse removes newly added virtual hosts sendDeltaDiscoveryResponse( @@ -457,7 +448,7 @@ TEST_P(VhdsIntegrationTest, VhdsVirtualHostAddUpdateRemove) { upstream_request_->encodeHeaders(default_response_headers_, true); response->waitForHeaders(); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); cleanupUpstreamAndDownstream(); } @@ -475,7 +466,7 @@ TEST_P(VhdsIntegrationTest, RdsWithVirtualHostsVhdsVirtualHostAddUpdateRemove) { testRouterHeaderOnlyRequestAndResponse(nullptr, 1); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // A spontaneous VHDS DiscoveryResponse adds two virtual hosts sendDeltaDiscoveryResponse( @@ -486,13 +477,13 @@ TEST_P(VhdsIntegrationTest, RdsWithVirtualHostsVhdsVirtualHostAddUpdateRemove) { // verify that rds-based virtual host can be resolved testRouterHeaderOnlyRequestAndResponse(nullptr, 1, "/rdsone", "vhost.rds.first"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); testRouterHeaderOnlyRequestAndResponse(nullptr, 1, "/one", "vhost.first"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); testRouterHeaderOnlyRequestAndResponse(nullptr, 1, "/two", "vhost.second"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // A spontaneous VHDS DiscoveryResponse removes virtual hosts added via vhds sendDeltaDiscoveryResponse( @@ -503,7 +494,7 @@ TEST_P(VhdsIntegrationTest, RdsWithVirtualHostsVhdsVirtualHostAddUpdateRemove) { // verify rds-based virtual host is still present testRouterHeaderOnlyRequestAndResponse(nullptr, 1, "/rdsone", "vhost.rds.first"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, @@ -524,7 +515,7 @@ TEST_P(VhdsIntegrationTest, RdsWithVirtualHostsVhdsVirtualHostAddUpdateRemove) { upstream_request_->encodeHeaders(default_response_headers_, true); response->waitForHeaders(); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); cleanupUpstreamAndDownstream(); } @@ -538,12 +529,12 @@ TEST_P(VhdsIntegrationTest, VhdsOnDemandUpdateWithResourceNameAsAlias) { testRouterHeaderOnlyRequestAndResponse(nullptr, 1); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // verify that rds-based virtual host can be resolved testRouterHeaderOnlyRequestAndResponse(nullptr, 1, "/rdsone", "vhost.rds.first"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // Attempt to make a request to an unknown host codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); @@ -565,7 +556,7 @@ TEST_P(VhdsIntegrationTest, VhdsOnDemandUpdateWithResourceNameAsAlias) { upstream_request_->encodeHeaders(default_response_headers_, true); response->waitForHeaders(); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); cleanupUpstreamAndDownstream(); } @@ -584,12 +575,12 @@ TEST_P(VhdsIntegrationTest, VhdsOnDemandUpdateFailToResolveTheAlias) { testRouterHeaderOnlyRequestAndResponse(nullptr, 1); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // verify that rds-based virtual host can be resolved testRouterHeaderOnlyRequestAndResponse(nullptr, 1, "/rdsone", "vhost.rds.first"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // Attempt to make a request to an unknown host codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); @@ -606,7 +597,7 @@ TEST_P(VhdsIntegrationTest, VhdsOnDemandUpdateFailToResolveTheAlias) { notifyAboutAliasResolutionFailure("4", vhds_stream_, {"my_route/vhost.third"}); response->waitForHeaders(); - EXPECT_EQ("404", response->headers().Status()->value().getStringView()); + EXPECT_EQ("404", response->headers().getStatusValue()); cleanupUpstreamAndDownstream(); } @@ -625,12 +616,12 @@ TEST_P(VhdsIntegrationTest, VhdsOnDemandUpdateFailToResolveOneAliasOutOfSeveral) testRouterHeaderOnlyRequestAndResponse(nullptr, 1); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // verify that rds-based virtual host can be resolved testRouterHeaderOnlyRequestAndResponse(nullptr, 1, "/rdsone", "vhost.rds.first"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // Attempt to make a request to an unknown host codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); @@ -648,7 +639,40 @@ TEST_P(VhdsIntegrationTest, VhdsOnDemandUpdateFailToResolveOneAliasOutOfSeveral) {"vhost.first"}, {"my_route/vhost.third"}); response->waitForHeaders(); - EXPECT_EQ("404", response->headers().Status()->value().getStringView()); + EXPECT_EQ("404", response->headers().getStatusValue()); + + cleanupUpstreamAndDownstream(); +} + +// Verify that an vhds update succeeds even when the client closes its connection +TEST_P(VhdsIntegrationTest, VhdsOnDemandUpdateHttpConnectionCloses) { + // RDS exchange with a non-empty virtual_hosts field + useRdsWithVhosts(); + + testRouterHeaderOnlyRequestAndResponse(nullptr, 1); + cleanupUpstreamAndDownstream(); + EXPECT_TRUE(codec_client_->waitForDisconnect()); + + // Attempt to make a request to an unknown host + codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); + Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "vhost_1"}, + {"x-lyft-user-id", "123"}}; + auto encoder_decoder = codec_client_->startRequest(request_headers); + Http::RequestEncoder& encoder = encoder_decoder.first; + IntegrationStreamDecoderPtr response = std::move(encoder_decoder.second); + EXPECT_TRUE(compareDeltaDiscoveryRequest(Config::TypeUrl::get().VirtualHost, + {vhdsRequestResourceName("vhost_1")}, {}, vhds_stream_)); + + envoy::api::v2::DeltaDiscoveryResponse vhds_update = + createDeltaDiscoveryResponseWithResourceNameUsedAsAlias(); + vhds_stream_->sendGrpcMessage(vhds_update); + + codec_client_->sendReset(encoder); + response->waitForReset(); + EXPECT_TRUE(codec_client_->connected()); cleanupUpstreamAndDownstream(); } diff --git a/test/integration/websocket_integration_test.cc b/test/integration/websocket_integration_test.cc index f66eddbf02dec..a7e92f4bca6f2 100644 --- a/test/integration/websocket_integration_test.cc +++ b/test/integration/websocket_integration_test.cc @@ -40,7 +40,7 @@ void commonValidate(ProxiedHeaders& proxied_headers, const OriginalHeaders& orig // If no content length is specified, the HTTP1 codec will add a chunked encoding header. if (original_headers.ContentLength() == nullptr && proxied_headers.TransferEncoding() != nullptr) { - ASSERT_EQ(proxied_headers.TransferEncoding()->value().getStringView(), "chunked"); + ASSERT_EQ(proxied_headers.getTransferEncodingValue(), "chunked"); proxied_headers.removeTransferEncoding(); } if (proxied_headers.Connection() != nullptr && @@ -60,7 +60,7 @@ void WebsocketIntegrationTest::validateUpgradeRequestHeaders( const Http::RequestHeaderMap& original_request_headers) { Http::TestRequestHeaderMapImpl proxied_request_headers(original_proxied_request_headers); if (proxied_request_headers.ForwardedProto()) { - ASSERT_EQ(proxied_request_headers.ForwardedProto()->value().getStringView(), "http"); + ASSERT_EQ(proxied_request_headers.getForwardedProtoValue(), "http"); proxied_request_headers.removeForwardedProto(); } @@ -70,7 +70,7 @@ void WebsocketIntegrationTest::validateUpgradeRequestHeaders( proxied_request_headers.removeEnvoyExpectedRequestTimeoutMs(); if (proxied_request_headers.Scheme()) { - ASSERT_EQ(proxied_request_headers.Scheme()->value().getStringView(), "http"); + ASSERT_EQ(proxied_request_headers.getSchemeValue(), "http"); } else { proxied_request_headers.setScheme("http"); } @@ -96,10 +96,12 @@ void WebsocketIntegrationTest::validateUpgradeResponseHeaders( // Check for and remove headers added by default for HTTP responses. ASSERT_TRUE(proxied_response_headers.Date() != nullptr); ASSERT_TRUE(proxied_response_headers.Server() != nullptr); - ASSERT_EQ(proxied_response_headers.Server()->value().getStringView(), "envoy"); + ASSERT_EQ(proxied_response_headers.getServerValue(), "envoy"); proxied_response_headers.removeDate(); proxied_response_headers.removeServer(); + ASSERT_TRUE(proxied_response_headers.TransferEncoding() == nullptr); + commonValidate(proxied_response_headers, original_response_headers); EXPECT_THAT(&proxied_response_headers, HeaderMapEqualIgnoreOrder(&original_response_headers)); @@ -364,7 +366,7 @@ TEST_P(WebsocketIntegrationTest, WebsocketCustomFilterChain) { response_ = std::move(encoder_decoder.second); codec_client_->sendData(encoder_decoder.first, large_req_str, false); response_->waitForEndStream(); - EXPECT_EQ("413", response_->headers().Status()->value().getStringView()); + EXPECT_EQ("413", response_->headers().getStatusValue()); waitForClientDisconnectOrReset(); codec_client_->close(); } @@ -381,7 +383,7 @@ TEST_P(WebsocketIntegrationTest, WebsocketCustomFilterChain) { response_ = std::move(encoder_decoder.second); codec_client_->sendData(encoder_decoder.first, large_req_str, false); response_->waitForEndStream(); - EXPECT_EQ("413", response_->headers().Status()->value().getStringView()); + EXPECT_EQ("413", response_->headers().getStatusValue()); waitForClientDisconnectOrReset(); codec_client_->close(); } @@ -419,9 +421,6 @@ TEST_P(WebsocketIntegrationTest, BidirectionalChunkedData) { if (upstreamProtocol() == FakeHttpConnection::Type::HTTP1) { ASSERT_TRUE(upstream_request_->headers().TransferEncoding() != nullptr); } - if (downstreamProtocol() == Http::CodecClient::Type::HTTP1) { - ASSERT_TRUE(response_->headers().TransferEncoding() != nullptr); - } // Send both a chunked request body and "websocket" payload. std::string request_payload = "3\r\n123\r\n0\r\n\r\nSomeWebsocketRequestPayload"; diff --git a/test/integration/websocket_integration_test.h b/test/integration/websocket_integration_test.h index 0657a8fa5a579..c060f043c7324 100644 --- a/test/integration/websocket_integration_test.h +++ b/test/integration/websocket_integration_test.h @@ -39,7 +39,7 @@ class WebsocketIntegrationTest : public HttpProtocolIntegrationTest { if (downstreamProtocol() != Http::CodecClient::Type::HTTP1) { response_->waitForReset(); } else { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } } diff --git a/test/integration/xds_integration_test.cc b/test/integration/xds_integration_test.cc index 9442b682c7a36..f36b97d26ff77 100644 --- a/test/integration/xds_integration_test.cc +++ b/test/integration/xds_integration_test.cc @@ -1,12 +1,16 @@ #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" +#include "common/buffer/buffer_impl.h" + #include "test/integration/http_integration.h" #include "test/integration/http_protocol_integration.h" +#include "test/integration/ssl_utility.h" #include "test/test_common/environment.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" +#include "utility.h" namespace Envoy { namespace { @@ -33,7 +37,7 @@ class XdsIntegrationTest : public testing::TestWithParamlocalAddress()->ip()->port()); - createApiTestServer(api_filesystem_config, {"http"}, false, false, false); + createApiTestServer(api_filesystem_config, {"http"}, {false, false, false}, false); EXPECT_EQ(1, test_server_->counter("listener_manager.lds.update_success")->value()); EXPECT_EQ(1, test_server_->counter("http.router.rds.route_config_0.update_success")->value()); EXPECT_EQ(1, test_server_->counter("cluster_manager.cds.update_success")->value()); @@ -72,6 +76,331 @@ TEST_P(XdsIntegrationTestTypedStruct, RouterRequestAndResponseWithBodyNoBuffer) testRouterRequestAndResponseWithBody(1024, 512, false); } +class LdsInplaceUpdateTcpProxyIntegrationTest + : public testing::TestWithParam, + public BaseIntegrationTest { +public: + LdsInplaceUpdateTcpProxyIntegrationTest() + : BaseIntegrationTest(GetParam(), ConfigHelper::baseConfig() + R"EOF( + filter_chains: + - filter_chain_match: + application_protocols: ["alpn0"] + filters: + - name: envoy.filters.network.tcp_proxy + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy + stat_prefix: tcp_stats + cluster: cluster_0 + - filter_chain_match: + application_protocols: ["alpn1"] + filters: + - name: envoy.filters.network.tcp_proxy + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy + stat_prefix: tcp_stats + cluster: cluster_1 +)EOF") {} + + void initialize() override { + config_helper_.renameListener("tcp"); + std::string tls_inspector_config = ConfigHelper::tlsInspectorFilter(); + config_helper_.addListenerFilter(tls_inspector_config); + + config_helper_.addSslConfig(); + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* filter_chain_0 = + bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_filter_chains(0); + auto* filter_chain_1 = + bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_filter_chains(1); + filter_chain_1->mutable_transport_socket()->MergeFrom( + *filter_chain_0->mutable_transport_socket()); + + bootstrap.mutable_static_resources()->mutable_clusters()->Add()->MergeFrom( + *bootstrap.mutable_static_resources()->mutable_clusters(0)); + bootstrap.mutable_static_resources()->mutable_clusters(1)->set_name("cluster_1"); + }); + + BaseIntegrationTest::initialize(); + + context_manager_ = + std::make_unique(timeSystem()); + context_ = Ssl::createClientSslTransportSocketFactory({}, *context_manager_, *api_); + } + + std::unique_ptr createConnectionAndWrite(const std::string& alpn, + const std::string& request, + std::string& response) { + Buffer::OwnedImpl buffer(request); + return std::make_unique( + lookupPort("tcp"), buffer, + [&response](Network::ClientConnection&, const Buffer::Instance& data) -> void { + response.append(data.toString()); + }, + version_, *dispatcher_, + context_->createTransportSocket(std::make_shared( + absl::string_view(""), std::vector(), std::vector{alpn}))); + } + + std::unique_ptr context_manager_; + Network::TransportSocketFactoryPtr context_; + testing::NiceMock secret_manager_; +}; + +// Verify that tcp connection 1 is closed while client 0 survives when deleting filter chain 1. +TEST_P(LdsInplaceUpdateTcpProxyIntegrationTest, ReloadConfigDeletingFilterChain) { + setUpstreamCount(2); + initialize(); + std::string response_0; + auto client_conn_0 = createConnectionAndWrite("alpn0", "hello", response_0); + client_conn_0->waitForConnection(); + FakeRawConnectionPtr fake_upstream_connection_0; + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection_0)); + + std::string response_1; + auto client_conn_1 = createConnectionAndWrite("alpn1", "dummy", response_1); + client_conn_1->waitForConnection(); + FakeRawConnectionPtr fake_upstream_connection_1; + ASSERT_TRUE(fake_upstreams_[1]->waitForRawConnection(fake_upstream_connection_1)); + + ConfigHelper new_config_helper(version_, *api_, + MessageUtil::getJsonStringFromMessage(config_helper_.bootstrap())); + new_config_helper.addConfigModifier( + [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + listener->mutable_filter_chains()->RemoveLast(); + }); + new_config_helper.setLds("1"); + test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 1); + + while (!client_conn_1->closed()) { + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + } + ASSERT_EQ(response_1, ""); + + std::string observed_data_0; + ASSERT_TRUE(fake_upstream_connection_0->waitForData(5, &observed_data_0)); + EXPECT_EQ("hello", observed_data_0); + + ASSERT_TRUE(fake_upstream_connection_0->write("world")); + while (response_0.find("world") == std::string::npos) { + client_conn_0->run(Event::Dispatcher::RunType::NonBlock); + } + client_conn_0->close(); + while (!client_conn_0->closed()) { + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + } +} + +// Verify that tcp connection of filter chain 0 survives if new listener config adds new filter +// chain 2. +TEST_P(LdsInplaceUpdateTcpProxyIntegrationTest, ReloadConfigAddingFilterChain) { + setUpstreamCount(2); + initialize(); + test_server_->waitForCounterGe("listener_manager.listener_create_success", 1); + + std::string response_0; + auto client_conn_0 = createConnectionAndWrite("alpn0", "hello", response_0); + client_conn_0->waitForConnection(); + FakeRawConnectionPtr fake_upstream_connection_0; + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection_0)); + + ConfigHelper new_config_helper(version_, *api_, + MessageUtil::getJsonStringFromMessage(config_helper_.bootstrap())); + new_config_helper.addConfigModifier( + [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + listener->mutable_filter_chains()->Add()->MergeFrom(*listener->mutable_filter_chains(1)); + *listener->mutable_filter_chains(2) + ->mutable_filter_chain_match() + ->mutable_application_protocols(0) = "alpn2"; + }); + new_config_helper.setLds("1"); + test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 1); + test_server_->waitForCounterGe("listener_manager.listener_create_success", 2); + + std::string response_2; + auto client_conn_2 = createConnectionAndWrite("alpn2", "hello2", response_2); + client_conn_2->waitForConnection(); + FakeRawConnectionPtr fake_upstream_connection_2; + ASSERT_TRUE(fake_upstreams_[1]->waitForRawConnection(fake_upstream_connection_2)); + std::string observed_data_2; + ASSERT_TRUE(fake_upstream_connection_2->waitForData(6, &observed_data_2)); + EXPECT_EQ("hello2", observed_data_2); + + ASSERT_TRUE(fake_upstream_connection_2->write("world2")); + while (response_2.find("world2") == std::string::npos) { + client_conn_2->run(Event::Dispatcher::RunType::NonBlock); + } + client_conn_2->close(); + while (!client_conn_2->closed()) { + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + } + + std::string observed_data_0; + ASSERT_TRUE(fake_upstream_connection_0->waitForData(5, &observed_data_0)); + EXPECT_EQ("hello", observed_data_0); + + ASSERT_TRUE(fake_upstream_connection_0->write("world")); + while (response_0.find("world") == std::string::npos) { + client_conn_0->run(Event::Dispatcher::RunType::NonBlock); + } + client_conn_0->close(); + while (!client_conn_0->closed()) { + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + } +} + +class LdsInplaceUpdateHttpIntegrationTest + : public testing::TestWithParam, + public HttpIntegrationTest { +public: + LdsInplaceUpdateHttpIntegrationTest() + : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {} + + void initialize() override { + autonomous_upstream_ = true; + setUpstreamCount(2); + + config_helper_.renameListener("http"); + std::string tls_inspector_config = ConfigHelper::tlsInspectorFilter(); + config_helper_.addListenerFilter(tls_inspector_config); + config_helper_.addSslConfig(); + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* filter_chain_0 = + bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_filter_chains(0); + *filter_chain_0->mutable_filter_chain_match()->mutable_application_protocols()->Add() = + "alpn0"; + auto* filter_chain_1 = bootstrap.mutable_static_resources() + ->mutable_listeners(0) + ->mutable_filter_chains() + ->Add(); + filter_chain_1->MergeFrom(*filter_chain_0); + + // filter chain 1 + // alpn1, route to cluster_1 + *filter_chain_1->mutable_filter_chain_match()->mutable_application_protocols(0) = "alpn1"; + + auto* config_blob = filter_chain_1->mutable_filters(0)->mutable_typed_config(); + + ASSERT_TRUE(config_blob->Is()); + auto hcm_config = MessageUtil::anyConvert< + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager>( + *config_blob); + hcm_config.mutable_route_config() + ->mutable_virtual_hosts(0) + ->mutable_routes(0) + ->mutable_route() + ->set_cluster("cluster_1"); + config_blob->PackFrom(hcm_config); + bootstrap.mutable_static_resources()->mutable_clusters()->Add()->MergeFrom( + *bootstrap.mutable_static_resources()->mutable_clusters(0)); + bootstrap.mutable_static_resources()->mutable_clusters(1)->set_name("cluster_1"); + }); + + BaseIntegrationTest::initialize(); + + context_manager_ = + std::make_unique(timeSystem()); + context_ = Ssl::createClientSslTransportSocketFactory({}, *context_manager_, *api_); + address_ = Ssl::getSslAddress(version_, lookupPort("http")); + } + + IntegrationCodecClientPtr createHttpCodec(const std::string& alpn) { + auto ssl_conn = dispatcher_->createClientConnection( + address_, Network::Address::InstanceConstSharedPtr(), + context_->createTransportSocket(std::make_shared( + absl::string_view(""), std::vector(), std::vector{alpn})), + nullptr); + return makeHttpConnection(std::move(ssl_conn)); + } + + void expectResponseHeaderConnectionClose(IntegrationCodecClient& codec_client, + bool expect_close) { + IntegrationStreamDecoderPtr response = + codec_client.makeHeaderOnlyRequest(default_request_headers_); + + response->waitForEndStream(); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + if (expect_close) { + EXPECT_EQ("close", response->headers().getConnectionValue()); + + } else { + EXPECT_EQ(nullptr, response->headers().Connection()); + } + } + + std::unique_ptr context_manager_; + Network::TransportSocketFactoryPtr context_; + testing::NiceMock secret_manager_; + Network::Address::InstanceConstSharedPtr address_; +}; + +// Verify that http response on filter chain 0 has "Connection: close" header when filter chain 0 +// is deleted during the listener update. +TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigDeletingFilterChain) { + initialize(); + + auto codec_client_1 = createHttpCodec("alpn1"); + auto codec_client_0 = createHttpCodec("alpn0"); + Cleanup cleanup([c1 = codec_client_1.get(), c0 = codec_client_0.get()]() { + c1->close(); + c0->close(); + }); + ConfigHelper new_config_helper(version_, *api_, + MessageUtil::getJsonStringFromMessage(config_helper_.bootstrap())); + new_config_helper.addConfigModifier( + [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + listener->mutable_filter_chains()->RemoveLast(); + }); + + new_config_helper.setLds("1"); + test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 1); + test_server_->waitForGaugeGe("listener_manager.total_filter_chains_draining", 1); + + expectResponseHeaderConnectionClose(*codec_client_1, true); + test_server_->waitForGaugeGe("listener_manager.total_filter_chains_draining", 0); + expectResponseHeaderConnectionClose(*codec_client_0, false); +} + +// Verify that http clients of filter chain 0 survives if new listener config adds new filter +// chain 2. +TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigAddingFilterChain) { + initialize(); + test_server_->waitForCounterGe("listener_manager.listener_create_success", 1); + + auto codec_client_0 = createHttpCodec("alpn0"); + Cleanup cleanup0([c0 = codec_client_0.get()]() { c0->close(); }); + ConfigHelper new_config_helper(version_, *api_, + MessageUtil::getJsonStringFromMessage(config_helper_.bootstrap())); + new_config_helper.addConfigModifier( + [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + listener->mutable_filter_chains()->Add()->MergeFrom(*listener->mutable_filter_chains(1)); + *listener->mutable_filter_chains(2) + ->mutable_filter_chain_match() + ->mutable_application_protocols(0) = "alpn2"; + }); + new_config_helper.setLds("1"); + test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 1); + test_server_->waitForCounterGe("listener_manager.listener_create_success", 2); + + auto codec_client_2 = createHttpCodec("alpn2"); + Cleanup cleanup2([c2 = codec_client_2.get()]() { c2->close(); }); + expectResponseHeaderConnectionClose(*codec_client_2, false); + expectResponseHeaderConnectionClose(*codec_client_0, false); +} + +INSTANTIATE_TEST_SUITE_P(IpVersions, LdsInplaceUpdateHttpIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +INSTANTIATE_TEST_SUITE_P(IpVersions, LdsInplaceUpdateTcpProxyIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + using LdsIntegrationTest = HttpProtocolIntegrationTest; INSTANTIATE_TEST_SUITE_P(Protocols, LdsIntegrationTest, @@ -106,6 +435,7 @@ TEST_P(LdsIntegrationTest, ReloadConfig) { // Create an LDS response with the new config, and reload config. new_config_helper.setLds("1"); + test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 1); test_server_->waitForCounterGe("listener_manager.lds.update_success", 2); // HTTP 1.0 should now be enabled. @@ -122,9 +452,7 @@ TEST_P(LdsIntegrationTest, FailConfigLoad) { filter_chain->mutable_filters(0)->clear_typed_config(); filter_chain->mutable_filters(0)->set_name("grewgragra"); }); - EXPECT_DEATH_LOG_TO_STDERR(initialize(), - "Didn't find a registered implementation for name: 'grewgragra'"); + EXPECT_DEATH(initialize(), "Didn't find a registered implementation for name: 'grewgragra'"); } - } // namespace } // namespace Envoy diff --git a/test/integration/xfcc_integration_test.cc b/test/integration/xfcc_integration_test.cc index 25176afe941f3..e14ee0ef7e304 100644 --- a/test/integration/xfcc_integration_test.cc +++ b/test/integration/xfcc_integration_test.cc @@ -2,7 +2,6 @@ #include #include -#include #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" @@ -21,6 +20,7 @@ #include "test/test_common/printers.h" #include "test/test_common/utility.h" +#include "absl/container/node_hash_map.h" #include "gmock/gmock.h" #include "gtest/gtest.h" #include "integration.h" @@ -178,8 +178,7 @@ void XfccIntegrationTest::testRequestAndResponseWithXfccHeader(std::string previ if (expected_xfcc.empty()) { EXPECT_EQ(nullptr, upstream_request_->headers().ForwardedClientCert()); } else { - EXPECT_EQ(expected_xfcc, - upstream_request_->headers().ForwardedClientCert()->value().getStringView()); + EXPECT_EQ(expected_xfcc, upstream_request_->headers().getForwardedClientCertValue()); } upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); response->waitForEndStream(); @@ -430,8 +429,8 @@ TEST_P(XfccIntegrationTest, TagExtractedNameGenerationTest) { // } // std::cout << "};" << std::endl; - std::unordered_map tag_extracted_counter_map; - std::unordered_map tag_extracted_gauge_map; + absl::node_hash_map tag_extracted_counter_map; + absl::node_hash_map tag_extracted_gauge_map; tag_extracted_counter_map = { {listenerStatPrefix("downstream_cx_total"), "listener.downstream_cx_total"}, @@ -749,7 +748,7 @@ TEST_P(XfccIntegrationTest, TagExtractedNameGenerationTest) { {"server.version", "server.version"}}; auto test_name_against_mapping = - [](const std::unordered_map& extracted_name_map, + [](const absl::node_hash_map& extracted_name_map, const Stats::Metric& metric) { auto it = extracted_name_map.find(metric.name()); // Ignore any metrics that are not found in the map for ease of addition diff --git a/test/integration/xfcc_integration_test.h b/test/integration/xfcc_integration_test.h index 538a1bc86d970..6e4997f8b0dc8 100644 --- a/test/integration/xfcc_integration_test.h +++ b/test/integration/xfcc_integration_test.h @@ -8,7 +8,7 @@ #include "test/config/integration/certs/clientcert_hash.h" #include "test/integration/http_integration.h" #include "test/integration/server.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/transport_socket_factory_context.h" #include "absl/strings/ascii.h" #include "absl/strings/str_replace.h" diff --git a/test/main.cc b/test/main.cc index 42bc71b05ed35..eae6c3fc4f68f 100644 --- a/test/main.cc +++ b/test/main.cc @@ -5,43 +5,13 @@ #include "test/test_common/utility.h" #include "test/test_runner.h" -#include "absl/debugging/symbolize.h" - -#ifdef ENVOY_HANDLE_SIGNALS -#include "common/signal/signal_action.h" -#endif - #include "tools/cpp/runfiles/runfiles.h" -#if defined(WIN32) -static void NoopInvalidParameterHandler(const wchar_t* expression, const wchar_t* function, - const wchar_t* file, unsigned int line, - uintptr_t pReserved) { - return; -} -#endif - using bazel::tools::cpp::runfiles::Runfiles; // The main entry point (and the rest of this file) should have no logic in it, // this allows overriding by site specific versions of main.cc. int main(int argc, char** argv) { -#if defined(WIN32) - _set_abort_behavior(0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT); - - _set_invalid_parameter_handler(NoopInvalidParameterHandler); - - WSADATA wsa_data; - const WORD version_requested = MAKEWORD(2, 2); - RELEASE_ASSERT(WSAStartup(version_requested, &wsa_data) == 0, ""); -#endif - -#ifndef __APPLE__ - absl::InitializeSymbolizer(argv[0]); -#endif -#ifdef ENVOY_HANDLE_SIGNALS - // Enabled by default. Control with "bazel --define=signal_trace=disabled" - Envoy::SignalAction handle_sigs; -#endif + Envoy::TestEnvironment::initializeTestMain(argv[0]); // Create a Runfiles object for runfiles lookup. // https://github.com/bazelbuild/bazel/blob/master/tools/cpp/runfiles/runfiles_src.h#L32 diff --git a/test/mocks/BUILD b/test/mocks/BUILD index 29ef006615108..f12ced49f118d 100644 --- a/test/mocks/BUILD +++ b/test/mocks/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_library( @@ -13,6 +13,8 @@ envoy_cc_test_library( srcs = ["common.cc"], hdrs = ["common.h"], deps = [ + "//include/envoy/common:conn_pool_interface", + "//include/envoy/common:random_generator_interface", "//include/envoy/common:time_interface", "//include/envoy/common:token_bucket_interface", "//source/common/common:minimal_logger_lib", diff --git a/test/mocks/access_log/BUILD b/test/mocks/access_log/BUILD index 58bbae8a0aa7b..6e2ad4141fece 100644 --- a/test/mocks/access_log/BUILD +++ b/test/mocks/access_log/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/access_log/mocks.h b/test/mocks/access_log/mocks.h index ce5539207fa97..8d30128564ec0 100644 --- a/test/mocks/access_log/mocks.h +++ b/test/mocks/access_log/mocks.h @@ -30,7 +30,8 @@ class MockFilter : public Filter { MOCK_METHOD(bool, evaluate, (const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers)); + const Http::ResponseTrailerMap& response_trailers), + (const)); }; class MockAccessLogManager : public AccessLogManager { diff --git a/test/mocks/api/BUILD b/test/mocks/api/BUILD index 491bef01dadb3..e4d44c573e553 100644 --- a/test/mocks/api/BUILD +++ b/test/mocks/api/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", @@ -7,6 +5,8 @@ load( "envoy_select_hot_restart", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/api/mocks.cc b/test/mocks/api/mocks.cc index f12240cfc0940..e1bd53bbeca6a 100644 --- a/test/mocks/api/mocks.cc +++ b/test/mocks/api/mocks.cc @@ -52,7 +52,7 @@ SysCallIntResult MockOsSysCalls::setsockopt(os_fd_t sockfd, int level, int optna SysCallIntResult MockOsSysCalls::getsockopt(os_fd_t sockfd, int level, int optname, void* optval, socklen_t* optlen) { - ASSERT(*optlen == sizeof(int)); + ASSERT(*optlen == sizeof(int) || *optlen == sizeof(sockaddr_storage)); int val = 0; const auto& it = boolsockopts_.find(SockOptKey(sockfd, level, optname)); if (it != boolsockopts_.end()) { diff --git a/test/mocks/api/mocks.h b/test/mocks/api/mocks.h index cd2ef8dadf30f..22fd91168e395 100644 --- a/test/mocks/api/mocks.h +++ b/test/mocks/api/mocks.h @@ -90,6 +90,7 @@ class MockOsSysCalls : public OsSysCallsImpl { MOCK_METHOD(SysCallIntResult, listen, (os_fd_t sockfd, int backlog)); MOCK_METHOD(SysCallSizeResult, write, (os_fd_t sockfd, const void* buffer, size_t length)); MOCK_METHOD(bool, supportsMmsg, (), (const)); + MOCK_METHOD(bool, supportsUdpGro, (), (const)); // Map from (sockfd,level,optname) to boolean socket option. using SockOptKey = std::tuple; diff --git a/test/mocks/buffer/BUILD b/test/mocks/buffer/BUILD index 9d9634528ae7e..38d61c302cf41 100644 --- a/test/mocks/buffer/BUILD +++ b/test/mocks/buffer/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/buffer/mocks.cc b/test/mocks/buffer/mocks.cc index 64459da03703f..8328f870b0632 100644 --- a/test/mocks/buffer/mocks.cc +++ b/test/mocks/buffer/mocks.cc @@ -6,16 +6,18 @@ namespace Envoy { template <> MockBufferBase::MockBufferBase(std::function below_low, - std::function above_high) - : Buffer::WatermarkBuffer(below_low, above_high) {} + std::function above_high, + std::function above_overflow) + : Buffer::WatermarkBuffer(below_low, above_high, above_overflow) {} template <> MockBufferBase::MockBufferBase() - : Buffer::WatermarkBuffer([&]() -> void {}, [&]() -> void {}) { + : Buffer::WatermarkBuffer([&]() -> void {}, [&]() -> void {}, [&]() -> void {}) { ASSERT(0); // This constructor is not supported for WatermarkBuffer. } template <> -MockBufferBase::MockBufferBase(std::function, std::function) +MockBufferBase::MockBufferBase(std::function, std::function, + std::function) : Buffer::OwnedImpl() { ASSERT(0); // This constructor is not supported for OwnedImpl. } diff --git a/test/mocks/buffer/mocks.h b/test/mocks/buffer/mocks.h index a37d6cc3a2973..6918729c7b394 100644 --- a/test/mocks/buffer/mocks.h +++ b/test/mocks/buffer/mocks.h @@ -17,7 +17,8 @@ namespace Envoy { template class MockBufferBase : public BaseClass { public: MockBufferBase(); - MockBufferBase(std::function below_low, std::function above_high); + MockBufferBase(std::function below_low, std::function above_high, + std::function above_overflow); MOCK_METHOD(Api::IoCallUint64Result, write, (Network::IoHandle & io_handle)); MOCK_METHOD(void, move, (Buffer::Instance & rhs)); @@ -57,12 +58,14 @@ template class MockBufferBase : public BaseClass { template <> MockBufferBase::MockBufferBase(std::function below_low, - std::function above_high); + std::function above_high, + std::function above_overflow); template <> MockBufferBase::MockBufferBase(); template <> MockBufferBase::MockBufferBase(std::function below_low, - std::function above_high); + std::function above_high, + std::function above_overflow); template <> MockBufferBase::MockBufferBase(); class MockBuffer : public MockBufferBase { @@ -78,8 +81,9 @@ class MockWatermarkBuffer : public MockBufferBase { public: using BaseClass = MockBufferBase; - MockWatermarkBuffer(std::function below_low, std::function above_high) - : BaseClass(below_low, above_high) { + MockWatermarkBuffer(std::function below_low, std::function above_high, + std::function above_overflow) + : BaseClass(below_low, above_high, above_overflow) { ON_CALL(*this, write(testing::_)) .WillByDefault(testing::Invoke(this, &MockWatermarkBuffer::trackWrites)); ON_CALL(*this, move(testing::_)) @@ -92,13 +96,14 @@ class MockBufferFactory : public Buffer::WatermarkFactory { MockBufferFactory(); ~MockBufferFactory() override; - Buffer::InstancePtr create(std::function below_low, - std::function above_high) override { - return Buffer::InstancePtr{create_(below_low, above_high)}; + Buffer::InstancePtr create(std::function below_low, std::function above_high, + std::function above_overflow) override { + return Buffer::InstancePtr{create_(below_low, above_high, above_overflow)}; } MOCK_METHOD(Buffer::Instance*, create_, - (std::function below_low, std::function above_high)); + (std::function below_low, std::function above_high, + std::function above_overflow)); }; MATCHER_P(BufferEqual, rhs, testing::PrintToString(*rhs)) { @@ -128,4 +133,8 @@ ACTION_P(AddBufferToStringWithoutDraining, target_string) { target_string->append(arg0.toString()); } +MATCHER_P(RawSliceVectorEqual, rhs, testing::PrintToString(rhs)) { + return TestUtility::rawSlicesEqual(arg, rhs.data(), rhs.size()); +} + } // namespace Envoy diff --git a/test/mocks/common.cc b/test/mocks/common.cc index ba36e63fc102c..ea4012729a8c1 100644 --- a/test/mocks/common.cc +++ b/test/mocks/common.cc @@ -1,6 +1,21 @@ #include "test/mocks/common.h" +using testing::Return; + namespace Envoy { +namespace ConnectionPool { +MockCancellable::MockCancellable() = default; +MockCancellable::~MockCancellable() = default; +} // namespace ConnectionPool + +namespace Random { + +MockRandomGenerator::MockRandomGenerator() { ON_CALL(*this, uuid()).WillByDefault(Return(uuid_)); } + +MockRandomGenerator::~MockRandomGenerator() = default; + +} // namespace Random + ReadyWatcher::ReadyWatcher() = default; ReadyWatcher::~ReadyWatcher() = default; diff --git a/test/mocks/common.h b/test/mocks/common.h index 89b7c3e9ca3eb..cf5a915825b2e 100644 --- a/test/mocks/common.h +++ b/test/mocks/common.h @@ -2,6 +2,8 @@ #include +#include "envoy/common/conn_pool.h" +#include "envoy/common/random_generator.h" #include "envoy/common/scope_tracker.h" #include "envoy/common/time.h" #include "envoy/common/token_bucket.h" @@ -50,16 +52,17 @@ class MockTimeSystem : public Event::TestTimeSystem { // where timer callbacks are triggered by the advancement of time. This implementation // matches recent behavior, where real-time timers were created directly in libevent // by dispatcher_impl.cc. - Event::SchedulerPtr createScheduler(Event::Scheduler& base_scheduler) override { - return real_time_.createScheduler(base_scheduler); + Event::SchedulerPtr createScheduler(Event::Scheduler& base_scheduler, + Event::CallbackScheduler& cb_scheduler) override { + return real_time_.createScheduler(base_scheduler, cb_scheduler); } void advanceTimeWait(const Duration& duration) override { real_time_.advanceTimeWait(duration); } void advanceTimeAsync(const Duration& duration) override { real_time_.advanceTimeAsync(duration); } - Thread::CondVar::WaitStatus - waitFor(Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, - const Duration& duration) noexcept ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) override { + Thread::CondVar::WaitStatus waitFor(Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, + const Duration& duration) noexcept + ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) override { return real_time_.waitFor(mutex, condvar, duration); // NO_CHECK_FORMAT(real_time) } MOCK_METHOD(SystemTime, systemTime, ()); @@ -95,4 +98,29 @@ class MockScopedTrackedObject : public ScopeTrackedObject { MOCK_METHOD(void, dumpState, (std::ostream&, int), (const)); }; +namespace ConnectionPool { + +class MockCancellable : public Cancellable { +public: + MockCancellable(); + ~MockCancellable() override; + + // ConnectionPool::Cancellable + MOCK_METHOD(void, cancel, (CancelPolicy cancel_policy)); +}; +} // namespace ConnectionPool + +namespace Random { +class MockRandomGenerator : public RandomGenerator { +public: + MockRandomGenerator(); + ~MockRandomGenerator() override; + + MOCK_METHOD(uint64_t, random, ()); + MOCK_METHOD(std::string, uuid, ()); + + const std::string uuid_{"a121e9e1-feae-4136-9e0e-6fac343d56c9"}; +}; +} // namespace Random + } // namespace Envoy diff --git a/test/mocks/compression/compressor/BUILD b/test/mocks/compression/compressor/BUILD new file mode 100644 index 0000000000000..855752e06a411 --- /dev/null +++ b/test/mocks/compression/compressor/BUILD @@ -0,0 +1,19 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_mock", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_mock( + name = "compressor_mocks", + srcs = ["mocks.cc"], + hdrs = ["mocks.h"], + deps = [ + "//include/envoy/compression/compressor:compressor_config_interface", + "//include/envoy/compression/compressor:compressor_interface", + ], +) diff --git a/test/mocks/compression/compressor/mocks.cc b/test/mocks/compression/compressor/mocks.cc new file mode 100644 index 0000000000000..9d2ff9cac79ec --- /dev/null +++ b/test/mocks/compression/compressor/mocks.cc @@ -0,0 +1,21 @@ +#include "test/mocks/compression/compressor/mocks.h" + +using testing::ReturnRef; + +namespace Envoy { +namespace Compression { +namespace Compressor { + +MockCompressor::MockCompressor() = default; +MockCompressor::~MockCompressor() = default; + +MockCompressorFactory::MockCompressorFactory() { + ON_CALL(*this, statsPrefix()).WillByDefault(ReturnRef(stats_prefix_)); + ON_CALL(*this, contentEncoding()).WillByDefault(ReturnRef(content_encoding_)); +} + +MockCompressorFactory::~MockCompressorFactory() = default; + +} // namespace Compressor +} // namespace Compression +} // namespace Envoy diff --git a/test/mocks/compression/compressor/mocks.h b/test/mocks/compression/compressor/mocks.h new file mode 100644 index 0000000000000..e5438699484f7 --- /dev/null +++ b/test/mocks/compression/compressor/mocks.h @@ -0,0 +1,37 @@ +#pragma once + +#include "envoy/compression/compressor/compressor.h" +#include "envoy/compression/compressor/config.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Compression { +namespace Compressor { + +class MockCompressor : public Compressor { +public: + MockCompressor(); + ~MockCompressor() override; + + // Compressor::Compressor + MOCK_METHOD(void, compress, (Buffer::Instance & buffer, State state)); +}; + +class MockCompressorFactory : public CompressorFactory { +public: + MockCompressorFactory(); + ~MockCompressorFactory() override; + + // Compressor::CompressorFactory + MOCK_METHOD(CompressorPtr, createCompressor, ()); + MOCK_METHOD(const std::string&, statsPrefix, (), (const)); + MOCK_METHOD(const std::string&, contentEncoding, (), (const)); + + const std::string stats_prefix_{"mock"}; + const std::string content_encoding_{"mock"}; +}; + +} // namespace Compressor +} // namespace Compression +} // namespace Envoy diff --git a/test/mocks/compression/decompressor/BUILD b/test/mocks/compression/decompressor/BUILD new file mode 100644 index 0000000000000..5e308cef1dede --- /dev/null +++ b/test/mocks/compression/decompressor/BUILD @@ -0,0 +1,19 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_mock", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_mock( + name = "decompressor_mocks", + srcs = ["mocks.cc"], + hdrs = ["mocks.h"], + deps = [ + "//include/envoy/compression/decompressor:decompressor_config_interface", + "//include/envoy/compression/decompressor:decompressor_interface", + ], +) diff --git a/test/mocks/compression/decompressor/mocks.cc b/test/mocks/compression/decompressor/mocks.cc new file mode 100644 index 0000000000000..48017c6f3f6f2 --- /dev/null +++ b/test/mocks/compression/decompressor/mocks.cc @@ -0,0 +1,21 @@ +#include "test/mocks/compression/decompressor/mocks.h" + +using testing::ReturnRef; + +namespace Envoy { +namespace Compression { +namespace Decompressor { + +MockDecompressor::MockDecompressor() = default; +MockDecompressor::~MockDecompressor() = default; + +MockDecompressorFactory::MockDecompressorFactory() { + ON_CALL(*this, statsPrefix()).WillByDefault(ReturnRef(stats_prefix_)); + ON_CALL(*this, contentEncoding()).WillByDefault(ReturnRef(content_encoding_)); +} + +MockDecompressorFactory::~MockDecompressorFactory() = default; + +} // namespace Decompressor +} // namespace Compression +} // namespace Envoy diff --git a/test/mocks/compression/decompressor/mocks.h b/test/mocks/compression/decompressor/mocks.h new file mode 100644 index 0000000000000..5910ab9336a8c --- /dev/null +++ b/test/mocks/compression/decompressor/mocks.h @@ -0,0 +1,38 @@ +#pragma once + +#include "envoy/compression/decompressor/config.h" +#include "envoy/compression/decompressor/decompressor.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Compression { +namespace Decompressor { + +class MockDecompressor : public Decompressor { +public: + MockDecompressor(); + ~MockDecompressor() override; + + // Decompressor::Decompressor + MOCK_METHOD(void, decompress, + (const Buffer::Instance& input_buffer, Buffer::Instance& output_buffer)); +}; + +class MockDecompressorFactory : public DecompressorFactory { +public: + MockDecompressorFactory(); + ~MockDecompressorFactory() override; + + // Decompressor::DecompressorFactory + MOCK_METHOD(DecompressorPtr, createDecompressor, (const std::string&)); + MOCK_METHOD(const std::string&, statsPrefix, (), (const)); + MOCK_METHOD(const std::string&, contentEncoding, (), (const)); + + const std::string stats_prefix_{"mock"}; + const std::string content_encoding_{"mock"}; +}; + +} // namespace Decompressor +} // namespace Compression +} // namespace Envoy diff --git a/test/mocks/config/BUILD b/test/mocks/config/BUILD index c9756f13893ae..6df51d8b98c6f 100644 --- a/test/mocks/config/BUILD +++ b/test/mocks/config/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/config/mocks.cc b/test/mocks/config/mocks.cc index 374eec10d0716..f578384ab7d2f 100644 --- a/test/mocks/config/mocks.cc +++ b/test/mocks/config/mocks.cc @@ -8,15 +8,15 @@ namespace Envoy { namespace Config { MockSubscriptionFactory::MockSubscriptionFactory() { - ON_CALL(*this, subscriptionFromConfigSource(_, _, _, _)) - .WillByDefault(testing::Invoke([this](const envoy::config::core::v3::ConfigSource&, - absl::string_view, Stats::Scope&, - SubscriptionCallbacks& callbacks) -> SubscriptionPtr { - auto ret = std::make_unique>(); - subscription_ = ret.get(); - callbacks_ = &callbacks; - return ret; - })); + ON_CALL(*this, subscriptionFromConfigSource(_, _, _, _, _)) + .WillByDefault(testing::Invoke( + [this](const envoy::config::core::v3::ConfigSource&, absl::string_view, Stats::Scope&, + SubscriptionCallbacks& callbacks, OpaqueResourceDecoder&) -> SubscriptionPtr { + auto ret = std::make_unique>(); + subscription_ = ret.get(); + callbacks_ = &callbacks; + return ret; + })); ON_CALL(*this, messageValidationVisitor()) .WillByDefault(testing::ReturnRef(ProtobufMessage::getStrictValidationVisitor())); } @@ -32,13 +32,15 @@ MockGrpcMux::~MockGrpcMux() = default; MockGrpcStreamCallbacks::MockGrpcStreamCallbacks() = default; MockGrpcStreamCallbacks::~MockGrpcStreamCallbacks() = default; -MockSubscriptionCallbacks::MockSubscriptionCallbacks() { - ON_CALL(*this, resourceName(testing::_)) - .WillByDefault(testing::Invoke(TestUtility::xdsResourceName)); -} - +MockSubscriptionCallbacks::MockSubscriptionCallbacks() = default; MockSubscriptionCallbacks::~MockSubscriptionCallbacks() = default; +MockOpaqueResourceDecoder::MockOpaqueResourceDecoder() = default; +MockOpaqueResourceDecoder::~MockOpaqueResourceDecoder() = default; + +MockUntypedConfigUpdateCallbacks::MockUntypedConfigUpdateCallbacks() = default; +MockUntypedConfigUpdateCallbacks::~MockUntypedConfigUpdateCallbacks() = default; + MockTypedFactory::~MockTypedFactory() = default; } // namespace Config } // namespace Envoy diff --git a/test/mocks/config/mocks.h b/test/mocks/config/mocks.h index 4ac54dbc2010f..29412fffbd068 100644 --- a/test/mocks/config/mocks.h +++ b/test/mocks/config/mocks.h @@ -24,6 +24,30 @@ class MockSubscriptionCallbacks : public SubscriptionCallbacks { MockSubscriptionCallbacks(); ~MockSubscriptionCallbacks() override; + MOCK_METHOD(void, onConfigUpdate, + (const std::vector& resources, const std::string& version_info)); + MOCK_METHOD(void, onConfigUpdate, + (const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& system_version_info)); + MOCK_METHOD(void, onConfigUpdateFailed, + (Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException* e)); +}; + +class MockOpaqueResourceDecoder : public OpaqueResourceDecoder { +public: + MockOpaqueResourceDecoder(); + ~MockOpaqueResourceDecoder() override; + + MOCK_METHOD(ProtobufTypes::MessagePtr, decodeResource, (const ProtobufWkt::Any& resource)); + MOCK_METHOD(std::string, resourceName, (const Protobuf::Message& resource)); +}; + +class MockUntypedConfigUpdateCallbacks : public UntypedConfigUpdateCallbacks { +public: + MockUntypedConfigUpdateCallbacks(); + ~MockUntypedConfigUpdateCallbacks() override; + MOCK_METHOD(void, onConfigUpdate, (const Protobuf::RepeatedPtrField& resources, const std::string& version_info)); @@ -34,7 +58,6 @@ class MockSubscriptionCallbacks : public SubscriptionCallbacks { const std::string& system_version_info)); MOCK_METHOD(void, onConfigUpdateFailed, (Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException* e)); - MOCK_METHOD(std::string, resourceName, (const ProtobufWkt::Any& resource)); }; class MockSubscription : public Subscription { @@ -50,7 +73,8 @@ class MockSubscriptionFactory : public SubscriptionFactory { MOCK_METHOD(SubscriptionPtr, subscriptionFromConfigSource, (const envoy::config::core::v3::ConfigSource& config, absl::string_view type_url, - Stats::Scope& scope, SubscriptionCallbacks& callbacks)); + Stats::Scope& scope, SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder)); MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ()); MockSubscription* subscription_{}; @@ -70,10 +94,9 @@ class MockGrpcMux : public GrpcMux { MockGrpcMux(); ~MockGrpcMux() override; - MOCK_METHOD(void, start, ()); - MOCK_METHOD(void, pause, (const std::string& type_url)); - MOCK_METHOD(void, resume, (const std::string& type_url)); - MOCK_METHOD(bool, paused, (const std::string& type_url), (const)); + MOCK_METHOD(void, start, (), (override)); + MOCK_METHOD(ScopedResume, pause, (const std::string& type_url), (override)); + MOCK_METHOD(ScopedResume, pause, (const std::vector type_urls), (override)); MOCK_METHOD(void, addSubscription, (const std::set& resources, const std::string& type_url, @@ -84,7 +107,7 @@ class MockGrpcMux : public GrpcMux { MOCK_METHOD(GrpcMuxWatchPtr, addWatch, (const std::string& type_url, const std::set& resources, - SubscriptionCallbacks& callbacks)); + SubscriptionCallbacks& callbacks, OpaqueResourceDecoder& resource_decoder)); }; class MockGrpcStreamCallbacks @@ -96,7 +119,8 @@ class MockGrpcStreamCallbacks MOCK_METHOD(void, onStreamEstablished, ()); MOCK_METHOD(void, onEstablishmentFailure, ()); MOCK_METHOD(void, onDiscoveryResponse, - (std::unique_ptr && message)); + (std::unique_ptr && message, + ControlPlaneStats& control_plane_stats)); MOCK_METHOD(void, onWriteable, ()); }; diff --git a/test/mocks/event/BUILD b/test/mocks/event/BUILD index c807c41d0b6ad..8a27b1804d07a 100644 --- a/test/mocks/event/BUILD +++ b/test/mocks/event/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/event/mocks.cc b/test/mocks/event/mocks.cc index 6cdf4b5d10eca..d263c3ad68bca 100644 --- a/test/mocks/event/mocks.cc +++ b/test/mocks/event/mocks.cc @@ -5,6 +5,7 @@ using testing::_; using testing::Assign; +using testing::DoAll; using testing::Invoke; using testing::NiceMock; using testing::Return; @@ -50,6 +51,19 @@ MockTimer::MockTimer(MockDispatcher* dispatcher) : MockTimer() { MockTimer::~MockTimer() = default; +MockSchedulableCallback::~MockSchedulableCallback() = default; + +MockSchedulableCallback::MockSchedulableCallback(MockDispatcher* dispatcher) + : dispatcher_(dispatcher) { + EXPECT_CALL(*dispatcher, createSchedulableCallback_(_)) + .WillOnce(DoAll(SaveArg<0>(&callback_), Return(this))) + .RetiresOnSaturation(); + ON_CALL(*this, scheduleCallbackCurrentIteration()).WillByDefault(Assign(&enabled_, true)); + ON_CALL(*this, scheduleCallbackNextIteration()).WillByDefault(Assign(&enabled_, true)); + ON_CALL(*this, cancel()).WillByDefault(Assign(&enabled_, false)); + ON_CALL(*this, enabled()).WillByDefault(ReturnPointee(&enabled_)); +} + MockSignalEvent::MockSignalEvent(MockDispatcher* dispatcher) { EXPECT_CALL(*dispatcher, listenForSignal_(_, _)) .WillOnce(DoAll(SaveArg<1>(&callback_), Return(this))) diff --git a/test/mocks/event/mocks.h b/test/mocks/event/mocks.h index 018f937203a42..55983c27c5369 100644 --- a/test/mocks/event/mocks.h +++ b/test/mocks/event/mocks.h @@ -74,7 +74,17 @@ class MockDispatcher : public Dispatcher { } Event::TimerPtr createTimer(Event::TimerCb cb) override { - return Event::TimerPtr{createTimer_(cb)}; + auto timer = Event::TimerPtr{createTimer_(cb)}; + // Assert that the timer is not null to avoid confusing test failures down the line. + ASSERT(timer != nullptr); + return timer; + } + + Event::SchedulableCallbackPtr createSchedulableCallback(std::function cb) override { + auto schedulable_cb = Event::SchedulableCallbackPtr{createSchedulableCallback_(cb)}; + // Assert that schedulable_cb is not null to avoid confusing test failures down the line. + ASSERT(schedulable_cb != nullptr); + return schedulable_cb; } void deferredDelete(DeferredDeletablePtr&& to_delete) override { @@ -109,6 +119,7 @@ class MockDispatcher : public Dispatcher { MOCK_METHOD(Network::UdpListener*, createUdpListener_, (Network::SocketSharedPtr && socket, Network::UdpListenerCallbacks& cb)); MOCK_METHOD(Timer*, createTimer_, (Event::TimerCb cb)); + MOCK_METHOD(SchedulableCallback*, createSchedulableCallback_, (std::function cb)); MOCK_METHOD(void, deferredDelete_, (DeferredDeletable * to_delete)); MOCK_METHOD(void, exit, ()); MOCK_METHOD(SignalEvent*, listenForSignal_, (int signal_num, SignalCb cb)); @@ -159,10 +170,33 @@ class MockTimer : public Timer { const ScopeTrackedObject* scope_{}; bool enabled_{}; -private: Event::TimerCb callback_; }; +class MockSchedulableCallback : public SchedulableCallback { +public: + MockSchedulableCallback(MockDispatcher* dispatcher); + ~MockSchedulableCallback() override; + + void invokeCallback() { + EXPECT_TRUE(enabled_); + enabled_ = false; + callback_(); + } + + // SchedulableCallback + MOCK_METHOD(void, scheduleCallbackCurrentIteration, ()); + MOCK_METHOD(void, scheduleCallbackNextIteration, ()); + MOCK_METHOD(void, cancel, ()); + MOCK_METHOD(bool, enabled, ()); + + MockDispatcher* dispatcher_{}; + bool enabled_{}; + +private: + std::function callback_; +}; + class MockSignalEvent : public SignalEvent { public: MockSignalEvent(MockDispatcher* dispatcher); diff --git a/test/mocks/filesystem/BUILD b/test/mocks/filesystem/BUILD index edb14369c6b31..96cbe7876b313 100644 --- a/test/mocks/filesystem/BUILD +++ b/test/mocks/filesystem/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/grpc/BUILD b/test/mocks/grpc/BUILD index 436363c082031..972cba77fbe11 100644 --- a/test/mocks/grpc/BUILD +++ b/test/mocks/grpc/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/grpc/mocks.h b/test/mocks/grpc/mocks.h index bbf8f6d1b60d1..476ba677f9453 100644 --- a/test/mocks/grpc/mocks.h +++ b/test/mocks/grpc/mocks.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include "envoy/config/core/v3/grpc_service.pb.h" @@ -36,12 +37,15 @@ class MockAsyncStream : public RawAsyncStream { MOCK_METHOD(void, sendMessageRaw_, (Buffer::InstancePtr & request, bool end_stream)); MOCK_METHOD(void, closeStream, ()); MOCK_METHOD(void, resetStream, ()); + MOCK_METHOD(bool, isAboveWriteBufferHighWatermark, (), (const)); }; +template using ResponseTypePtr = std::unique_ptr; + template class MockAsyncRequestCallbacks : public AsyncRequestCallbacks { public: - void onSuccess(std::unique_ptr&& response, Tracing::Span& span) { + void onSuccess(ResponseTypePtr&& response, Tracing::Span& span) { onSuccess_(*response, span); } @@ -58,7 +62,7 @@ class MockAsyncStreamCallbacks : public AsyncStreamCallbacks { onReceiveInitialMetadata_(*metadata); } - void onReceiveMessage(std::unique_ptr&& message) { onReceiveMessage_(*message); } + void onReceiveMessage(ResponseTypePtr&& message) { onReceiveMessage_(*message); } void onReceiveTrailingMetadata(Http::ResponseTrailerMapPtr&& metadata) { onReceiveTrailingMetadata_(*metadata); diff --git a/test/mocks/http/BUILD b/test/mocks/http/BUILD index 34227d7ac58ba..d169464f6c267 100644 --- a/test/mocks/http/BUILD +++ b/test/mocks/http/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( @@ -24,6 +24,7 @@ envoy_cc_mock( hdrs = ["conn_pool.h"], deps = [ "//include/envoy/http:conn_pool_interface", + "//test/mocks:common_lib", "//test/mocks/upstream:host_mocks", ], ) diff --git a/test/mocks/http/conn_pool.cc b/test/mocks/http/conn_pool.cc index 77d21bdf85a35..035f566a6b130 100644 --- a/test/mocks/http/conn_pool.cc +++ b/test/mocks/http/conn_pool.cc @@ -4,9 +4,6 @@ namespace Envoy { namespace Http { namespace ConnectionPool { -MockCancellable::MockCancellable() = default; -MockCancellable::~MockCancellable() = default; - MockInstance::MockInstance() : host_{std::make_shared>()} { ON_CALL(*this, host()).WillByDefault(Return(host_)); diff --git a/test/mocks/http/conn_pool.h b/test/mocks/http/conn_pool.h index f5116ae3cda64..4fd32853cfa9b 100644 --- a/test/mocks/http/conn_pool.h +++ b/test/mocks/http/conn_pool.h @@ -2,6 +2,7 @@ #include "envoy/http/conn_pool.h" +#include "test/mocks/common.h" #include "test/mocks/upstream/host.h" #include "gmock/gmock.h" @@ -10,15 +11,6 @@ namespace Envoy { namespace Http { namespace ConnectionPool { -class MockCancellable : public Cancellable { -public: - MockCancellable(); - ~MockCancellable() override; - - // Http::ConnectionPool::Cancellable - MOCK_METHOD(void, cancel, ()); -}; - class MockInstance : public Instance { public: MockInstance(); diff --git a/test/mocks/http/mocks.cc b/test/mocks/http/mocks.cc index 6b041b740ec34..6d95a4150cdb0 100644 --- a/test/mocks/http/mocks.cc +++ b/test/mocks/http/mocks.cc @@ -80,15 +80,19 @@ void MockStreamDecoderFilterCallbacks::sendLocalReply_( const absl::optional grpc_status, absl::string_view details) { details_ = std::string(details); Utility::sendLocalReply( - is_grpc_request_, - [this, modify_headers](ResponseHeaderMapPtr&& headers, bool end_stream) -> void { - if (modify_headers != nullptr) { - modify_headers(*headers); - } - encodeHeaders(std::move(headers), end_stream); - }, - [this](Buffer::Instance& data, bool end_stream) -> void { encodeData(data, end_stream); }, - stream_destroyed_, code, body, grpc_status, is_head_request_); + stream_destroyed_, + Utility::EncodeFunctions{ + nullptr, + [this, modify_headers](ResponseHeaderMapPtr&& headers, bool end_stream) -> void { + if (modify_headers != nullptr) { + modify_headers(*headers); + } + encodeHeaders(std::move(headers), end_stream); + }, + [this](Buffer::Instance& data, bool end_stream) -> void { + encodeData(data, end_stream); + }}, + Utility::LocalReplyData{is_grpc_request_, code, body, grpc_status, is_head_request_}); } MockStreamEncoderFilterCallbacks::MockStreamEncoderFilterCallbacks() { diff --git a/test/mocks/http/mocks.h b/test/mocks/http/mocks.h index 0edb60a759804..94550d19ae12a 100644 --- a/test/mocks/http/mocks.h +++ b/test/mocks/http/mocks.h @@ -44,7 +44,7 @@ class MockConnectionCallbacks : public virtual ConnectionCallbacks { ~MockConnectionCallbacks() override; // Http::ConnectionCallbacks - MOCK_METHOD(void, onGoAway, ()); + MOCK_METHOD(void, onGoAway, (GoAwayErrorCode error_code)); }; class MockServerConnectionCallbacks : public ServerConnectionCallbacks, @@ -75,7 +75,7 @@ class MockServerConnection : public ServerConnection { ~MockServerConnection() override; // Http::Connection - MOCK_METHOD(void, dispatch, (Buffer::Instance & data)); + MOCK_METHOD(Status, dispatch, (Buffer::Instance & data)); MOCK_METHOD(void, goAway, ()); MOCK_METHOD(Protocol, protocol, ()); MOCK_METHOD(void, shutdownNotice, ()); @@ -92,7 +92,7 @@ class MockClientConnection : public ClientConnection { ~MockClientConnection() override; // Http::Connection - MOCK_METHOD(void, dispatch, (Buffer::Instance & data)); + MOCK_METHOD(Status, dispatch, (Buffer::Instance & data)); MOCK_METHOD(void, goAway, ()); MOCK_METHOD(Protocol, protocol, ()); MOCK_METHOD(void, shutdownNotice, ()); @@ -136,6 +136,7 @@ class MockStreamDecoderFilterCallbacks : public StreamDecoderFilterCallbacks, MOCK_METHOD(void, resetStream, ()); MOCK_METHOD(Upstream::ClusterInfoConstSharedPtr, clusterInfo, ()); MOCK_METHOD(Router::RouteConstSharedPtr, route, ()); + MOCK_METHOD(Router::RouteConstSharedPtr, route, (const Router::RouteCallback&)); MOCK_METHOD(void, requestRouteConfigUpdate, (Http::RouteConfigUpdatedCallbackSharedPtr)); MOCK_METHOD(absl::optional, routeConfig, ()); MOCK_METHOD(void, clearRouteCache, ()); @@ -214,6 +215,7 @@ class MockStreamEncoderFilterCallbacks : public StreamEncoderFilterCallbacks, MOCK_METHOD(void, requestRouteConfigUpdate, (std::function)); MOCK_METHOD(bool, canRequestRouteConfigUpdate, ()); MOCK_METHOD(Router::RouteConstSharedPtr, route, ()); + MOCK_METHOD(Router::RouteConstSharedPtr, route, (const Router::RouteCallback&)); MOCK_METHOD(void, clearRouteCache, ()); MOCK_METHOD(uint64_t, streamId, (), (const)); MOCK_METHOD(StreamInfo::StreamInfo&, streamInfo, ()); @@ -256,6 +258,11 @@ class MockStreamDecoderFilter : public StreamDecoderFilter { MOCK_METHOD(FilterMetadataStatus, decodeMetadata, (Http::MetadataMap & metadata_map)); MOCK_METHOD(void, setDecoderFilterCallbacks, (StreamDecoderFilterCallbacks & callbacks)); MOCK_METHOD(void, decodeComplete, ()); + MOCK_METHOD(void, sendLocalReply, + (bool is_grpc_request, Code code, absl::string_view body, + const std::function& modify_headers, + bool is_head_request, const absl::optional grpc_status, + absl::string_view details)); Http::StreamDecoderFilterCallbacks* callbacks_{}; }; @@ -344,6 +351,8 @@ class MockAsyncClientCallbacks : public AsyncClient::Callbacks { MOCK_METHOD(void, onSuccess_, (const Http::AsyncClient::Request&, ResponseMessage*)); MOCK_METHOD(void, onFailure, (const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason)); + MOCK_METHOD(void, onBeforeFinalizeUpstreamSpan, + (Envoy::Tracing::Span&, const Http::ResponseHeaderMap*)); }; class MockAsyncClientStreamCallbacks : public AsyncClient::StreamCallbacks { @@ -382,6 +391,7 @@ class MockAsyncClientStream : public AsyncClient::Stream { MOCK_METHOD(void, sendData, (Buffer::Instance & data, bool end_stream)); MOCK_METHOD(void, sendTrailers, (RequestTrailerMap & trailers)); MOCK_METHOD(void, reset, ()); + MOCK_METHOD(bool, isAboveWriteBufferHighWatermark, (), (const)); }; class MockFilterChainFactoryCallbacks : public Http::FilterChainFactoryCallbacks { @@ -412,21 +422,18 @@ class HeaderValueOfMatcherImpl : public testing::MatcherInterface { testing::Matcher matcher) : key_(std::move(key)), matcher_(std::move(matcher)) {} + // NOLINTNEXTLINE(readability-identifier-naming) bool MatchAndExplain(HeaderMapT headers, testing::MatchResultListener* listener) const override { // Get all headers with matching keys. std::vector values; - std::pair*> context = - std::make_pair(key_.get(), &values); Envoy::Http::HeaderMap::ConstIterateCb get_headers_cb = - [](const Envoy::Http::HeaderEntry& header, void* context) { - auto* typed_context = - static_cast*>*>(context); - if (header.key().getStringView() == typed_context->first) { - typed_context->second->push_back(header.value().getStringView()); + [key = key_.get(), &values](const Envoy::Http::HeaderEntry& header) { + if (header.key().getStringView() == key) { + values.push_back(header.value().getStringView()); } return Envoy::Http::HeaderMap::Iterate::Continue; }; - headers.iterate(get_headers_cb, &context); + headers.iterate(get_headers_cb); if (values.empty()) { *listener << "which has no '" << key_.get() << "' header"; @@ -496,6 +503,14 @@ MATCHER_P(HttpStatusIs, expected_code, "") { return true; } +inline HeaderMap::ConstIterateCb +saveHeaders(std::vector>* output) { + return [output](const HeaderEntry& header) { + output->push_back(std::make_pair(header.key().getStringView(), header.value().getStringView())); + return HeaderMap::Iterate::Continue; + }; +} + template class IsSubsetOfHeadersMatcherImpl : public testing::MatcherInterface { public: @@ -508,17 +523,14 @@ class IsSubsetOfHeadersMatcherImpl : public testing::MatcherInterface>*>(headers) - ->push_back(std::make_pair(header.key().getStringView(), header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }; std::vector> arg_headers_vec; - headers.iterate(get_headers_cb, &arg_headers_vec); + headers.iterate(saveHeaders(&arg_headers_vec)); + std::vector> expected_headers_vec; - expected_headers_.iterate(get_headers_cb, &expected_headers_vec); + expected_headers_.iterate(saveHeaders(&expected_headers_vec)); return ExplainMatchResult(testing::IsSubsetOf(expected_headers_vec), arg_headers_vec, listener); } @@ -527,7 +539,7 @@ class IsSubsetOfHeadersMatcherImpl : public testing::MatcherInterface>*>(headers) - ->push_back(std::make_pair(header.key().getStringView(), header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }; std::vector> arg_headers_vec; - headers.iterate(get_headers_cb, &arg_headers_vec); + headers.iterate(saveHeaders(&arg_headers_vec)); + std::vector> expected_headers_vec; - expected_headers_.iterate(get_headers_cb, &expected_headers_vec); + expected_headers_.iterate(saveHeaders(&expected_headers_vec)); return ExplainMatchResult(testing::IsSupersetOf(expected_headers_vec), arg_headers_vec, listener); @@ -583,7 +592,7 @@ class IsSupersetOfHeadersMatcherImpl : public testing::MatcherInterface callbacks_{}; Network::Address::InstanceConstSharedPtr connection_local_address_; diff --git a/test/mocks/http/stream_decoder.cc b/test/mocks/http/stream_decoder.cc index e4bf5ef3958d7..76145b6dc43f6 100644 --- a/test/mocks/http/stream_decoder.cc +++ b/test/mocks/http/stream_decoder.cc @@ -6,9 +6,6 @@ using testing::Invoke; namespace Envoy { namespace Http { -MockStreamDecoder::MockStreamDecoder() = default; -MockStreamDecoder::~MockStreamDecoder() = default; - MockRequestDecoder::MockRequestDecoder() { ON_CALL(*this, decodeHeaders_(_, _)).WillByDefault(Invoke([](RequestHeaderMapPtr& headers, bool) { // Check to see that method is not-null. Path can be null for CONNECT and authority can be null diff --git a/test/mocks/http/stream_decoder.h b/test/mocks/http/stream_decoder.h index 2abbe175aaa44..479eec77f9dfa 100644 --- a/test/mocks/http/stream_decoder.h +++ b/test/mocks/http/stream_decoder.h @@ -6,22 +6,21 @@ namespace Envoy { namespace Http { -class MockStreamDecoder : public virtual StreamDecoder { +class MockRequestDecoder : public RequestDecoder { public: - MockStreamDecoder(); - ~MockStreamDecoder() override; + MockRequestDecoder(); + ~MockRequestDecoder() override; void decodeMetadata(MetadataMapPtr&& metadata_map) override { decodeMetadata_(metadata_map); } // Http::StreamDecoder MOCK_METHOD(void, decodeData, (Buffer::Instance & data, bool end_stream)); MOCK_METHOD(void, decodeMetadata_, (MetadataMapPtr & metadata_map)); -}; - -class MockRequestDecoder : public MockStreamDecoder, public RequestDecoder { -public: - MockRequestDecoder(); - ~MockRequestDecoder() override; + MOCK_METHOD(void, sendLocalReply, + (bool is_grpc_request, Code code, absl::string_view body, + const std::function& modify_headers, + const absl::optional grpc_status, + absl::string_view details)); void decodeHeaders(RequestHeaderMapPtr&& headers, bool end_stream) override { decodeHeaders_(headers, end_stream); @@ -33,11 +32,17 @@ class MockRequestDecoder : public MockStreamDecoder, public RequestDecoder { MOCK_METHOD(void, decodeTrailers_, (RequestTrailerMapPtr & trailers)); }; -class MockResponseDecoder : public MockStreamDecoder, public ResponseDecoder { +class MockResponseDecoder : public ResponseDecoder { public: MockResponseDecoder(); ~MockResponseDecoder() override; + void decodeMetadata(MetadataMapPtr&& metadata_map) override { decodeMetadata_(metadata_map); } + + // Http::StreamDecoder + MOCK_METHOD(void, decodeData, (Buffer::Instance & data, bool end_stream)); + MOCK_METHOD(void, decodeMetadata_, (MetadataMapPtr & metadata_map)); + void decode100ContinueHeaders(ResponseHeaderMapPtr&& headers) override { decode100ContinueHeaders_(headers); } diff --git a/test/mocks/http/stream_encoder.cc b/test/mocks/http/stream_encoder.cc index 0c13a2ebe3403..ad9b646af7d8e 100644 --- a/test/mocks/http/stream_encoder.cc +++ b/test/mocks/http/stream_encoder.cc @@ -9,13 +9,8 @@ namespace Http { MockHttp1StreamEncoderOptions::MockHttp1StreamEncoderOptions() = default; MockHttp1StreamEncoderOptions::~MockHttp1StreamEncoderOptions() = default; -MockStreamEncoder::MockStreamEncoder() { - ON_CALL(*this, getStream()).WillByDefault(ReturnRef(stream_)); -} - -MockStreamEncoder::~MockStreamEncoder() = default; - MockRequestEncoder::MockRequestEncoder() { + ON_CALL(*this, getStream()).WillByDefault(ReturnRef(stream_)); ON_CALL(*this, encodeHeaders(_, _)) .WillByDefault(Invoke([](const RequestHeaderMap& headers, bool) { // Check to see that method is not-null. Path can be null for CONNECT and authority can be @@ -26,6 +21,7 @@ MockRequestEncoder::MockRequestEncoder() { MockRequestEncoder::~MockRequestEncoder() = default; MockResponseEncoder::MockResponseEncoder() { + ON_CALL(*this, getStream()).WillByDefault(ReturnRef(stream_)); ON_CALL(*this, encodeHeaders(_, _)) .WillByDefault(Invoke([](const ResponseHeaderMap& headers, bool) { // Check for passing request headers as response headers in a test. diff --git a/test/mocks/http/stream_encoder.h b/test/mocks/http/stream_encoder.h index 768951a411b4c..fa302cdefbe26 100644 --- a/test/mocks/http/stream_encoder.h +++ b/test/mocks/http/stream_encoder.h @@ -17,31 +17,25 @@ class MockHttp1StreamEncoderOptions : public Http1StreamEncoderOptions { MOCK_METHOD(void, disableChunkEncoding, ()); }; -class MockStreamEncoder : public virtual StreamEncoder { +class MockRequestEncoder : public RequestEncoder { public: - MockStreamEncoder(); - ~MockStreamEncoder() override; + MockRequestEncoder(); + ~MockRequestEncoder() override; + + // Http::RequestEncoder + MOCK_METHOD(void, encodeHeaders, (const RequestHeaderMap& headers, bool end_stream)); + MOCK_METHOD(void, encodeTrailers, (const RequestTrailerMap& trailers)); // Http::StreamEncoder MOCK_METHOD(void, encodeData, (Buffer::Instance & data, bool end_stream)); MOCK_METHOD(void, encodeMetadata, (const MetadataMapVector& metadata_map_vector)); - MOCK_METHOD(Stream&, getStream, ()); MOCK_METHOD(Http1StreamEncoderOptionsOptRef, http1StreamEncoderOptions, ()); + MOCK_METHOD(Stream&, getStream, (), ()); testing::NiceMock stream_; }; -class MockRequestEncoder : public MockStreamEncoder, public RequestEncoder { -public: - MockRequestEncoder(); - ~MockRequestEncoder() override; - - // Http::RequestEncoder - MOCK_METHOD(void, encodeHeaders, (const RequestHeaderMap& headers, bool end_stream)); - MOCK_METHOD(void, encodeTrailers, (const RequestTrailerMap& trailers)); -}; - -class MockResponseEncoder : public MockStreamEncoder, public ResponseEncoder { +class MockResponseEncoder : public ResponseEncoder { public: MockResponseEncoder(); ~MockResponseEncoder() override; @@ -50,6 +44,14 @@ class MockResponseEncoder : public MockStreamEncoder, public ResponseEncoder { MOCK_METHOD(void, encode100ContinueHeaders, (const ResponseHeaderMap& headers)); MOCK_METHOD(void, encodeHeaders, (const ResponseHeaderMap& headers, bool end_stream)); MOCK_METHOD(void, encodeTrailers, (const ResponseTrailerMap& trailers)); + + // Http::StreamEncoder + MOCK_METHOD(void, encodeData, (Buffer::Instance & data, bool end_stream)); + MOCK_METHOD(void, encodeMetadata, (const MetadataMapVector& metadata_map_vector)); + MOCK_METHOD(Http1StreamEncoderOptionsOptRef, http1StreamEncoderOptions, ()); + MOCK_METHOD(Stream&, getStream, (), ()); + + testing::NiceMock stream_; }; } // namespace Http diff --git a/test/mocks/init/BUILD b/test/mocks/init/BUILD index 5aa9f74bacd3c..d2969531ecb67 100644 --- a/test/mocks/init/BUILD +++ b/test/mocks/init/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/local_info/BUILD b/test/mocks/local_info/BUILD index b7cd52aa9ff0b..9ae293dfc9a6e 100644 --- a/test/mocks/local_info/BUILD +++ b/test/mocks/local_info/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/network/BUILD b/test/mocks/network/BUILD index 877861539c9d8..020e4b6db4041 100644 --- a/test/mocks/network/BUILD +++ b/test/mocks/network/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/network/io_handle.h b/test/mocks/network/io_handle.h index 460d90f7c428a..0b27d262bebcc 100644 --- a/test/mocks/network/io_handle.h +++ b/test/mocks/network/io_handle.h @@ -29,6 +29,18 @@ class MockIoHandle : public IoHandle { MOCK_METHOD(Api::IoCallUint64Result, recvmmsg, (RawSliceArrays & slices, uint32_t self_port, RecvMsgOutput& output)); MOCK_METHOD(bool, supportsMmsg, (), (const)); + MOCK_METHOD(bool, supportsUdpGro, (), (const)); + MOCK_METHOD(Api::SysCallIntResult, bind, (Address::InstanceConstSharedPtr address)); + MOCK_METHOD(Api::SysCallIntResult, listen, (int backlog)); + MOCK_METHOD(Api::SysCallIntResult, connect, (Address::InstanceConstSharedPtr address)); + MOCK_METHOD(Api::SysCallIntResult, setOption, + (int level, int optname, const void* optval, socklen_t optlen)); + MOCK_METHOD(Api::SysCallIntResult, getOption, + (int level, int optname, void* optval, socklen_t* optlen)); + MOCK_METHOD(Api::SysCallIntResult, setBlocking, (bool blocking)); + MOCK_METHOD(absl::optional, domain, ()); + MOCK_METHOD(Address::InstanceConstSharedPtr, localAddress, ()); + MOCK_METHOD(Address::InstanceConstSharedPtr, peerAddress, ()); }; } // namespace Network diff --git a/test/mocks/network/mocks.cc b/test/mocks/network/mocks.cc index 81f9d03721c20..61369918ef9ed 100644 --- a/test/mocks/network/mocks.cc +++ b/test/mocks/network/mocks.cc @@ -135,6 +135,7 @@ MockListenSocket::MockListenSocket() ON_CALL(testing::Const(*this), isOpen()).WillByDefault(Invoke([this]() { return socket_is_open_; })); + ON_CALL(*this, ipVersion()).WillByDefault(Return(local_address_->ip()->version())); } MockSocketOption::MockSocketOption() { @@ -152,6 +153,7 @@ MockConnectionSocket::MockConnectionSocket() ON_CALL(*this, directRemoteAddress()).WillByDefault(ReturnRef(remote_address_)); ON_CALL(*this, ioHandle()).WillByDefault(ReturnRef(*io_handle_)); ON_CALL(testing::Const(*this), ioHandle()).WillByDefault(ReturnRef(*io_handle_)); + ON_CALL(*this, ipVersion()).WillByDefault(Return(local_address_->ip()->version())); } MockConnectionSocket::~MockConnectionSocket() = default; diff --git a/test/mocks/network/mocks.h b/test/mocks/network/mocks.h index b7537a4b9f74c..45371be5d5842 100644 --- a/test/mocks/network/mocks.h +++ b/test/mocks/network/mocks.h @@ -130,6 +130,7 @@ class MockListenerCallbacks : public ListenerCallbacks { void onAccept(ConnectionSocketPtr&& socket) override { onAccept_(socket); } MOCK_METHOD(void, onAccept_, (ConnectionSocketPtr & socket)); + MOCK_METHOD(void, onReject, ()); }; class MockUdpListenerCallbacks : public UdpListenerCallbacks { @@ -141,6 +142,7 @@ class MockUdpListenerCallbacks : public UdpListenerCallbacks { MOCK_METHOD(void, onReadReady, ()); MOCK_METHOD(void, onWriteReady, (const Socket& socket)); MOCK_METHOD(void, onReceiveError, (Api::IoError::IoErrorCode err)); + MOCK_METHOD(Network::UdpPacketWriter&, udpPacketWriter, ()); }; class MockDrainDecision : public DrainDecision { @@ -219,12 +221,23 @@ class MockListenSocket : public Socket { MOCK_METHOD(void, setLocalAddress, (const Address::InstanceConstSharedPtr&)); MOCK_METHOD(IoHandle&, ioHandle, ()); MOCK_METHOD(const IoHandle&, ioHandle, (), (const)); - MOCK_METHOD(Address::SocketType, socketType, (), (const)); + MOCK_METHOD(Socket::Type, socketType, (), (const)); + MOCK_METHOD(Address::Type, addressType, (), (const)); + MOCK_METHOD(absl::optional, ipVersion, (), (const)); MOCK_METHOD(void, close, ()); MOCK_METHOD(bool, isOpen, (), (const)); MOCK_METHOD(void, addOption_, (const Socket::OptionConstSharedPtr& option)); MOCK_METHOD(void, addOptions_, (const Socket::OptionsSharedPtr& options)); MOCK_METHOD(const OptionsSharedPtr&, options, (), (const)); + MOCK_METHOD(IoHandlePtr, socket, (Socket::Type, Address::Type, Address::IpVersion), (const)); + MOCK_METHOD(IoHandlePtr, socketForAddrPtr, (Socket::Type, const Address::InstanceConstSharedPtr), + (const)); + MOCK_METHOD(Api::SysCallIntResult, bind, (const Address::InstanceConstSharedPtr)); + MOCK_METHOD(Api::SysCallIntResult, connect, (const Address::InstanceConstSharedPtr)); + MOCK_METHOD(Api::SysCallIntResult, listen, (int)); + MOCK_METHOD(Api::SysCallIntResult, setSocketOption, (int, int, const void*, socklen_t)); + MOCK_METHOD(Api::SysCallIntResult, getSocketOption, (int, int, void*, socklen_t*), (const)); + MOCK_METHOD(Api::SysCallIntResult, setBlockingForTest, (bool)); IoHandlePtr io_handle_; Address::InstanceConstSharedPtr local_address_; @@ -270,9 +283,20 @@ class MockConnectionSocket : public ConnectionSocket { MOCK_METHOD(const Network::ConnectionSocket::OptionsSharedPtr&, options, (), (const)); MOCK_METHOD(IoHandle&, ioHandle, ()); MOCK_METHOD(const IoHandle&, ioHandle, (), (const)); - MOCK_METHOD(Address::SocketType, socketType, (), (const)); + MOCK_METHOD(Socket::Type, socketType, (), (const)); + MOCK_METHOD(Address::Type, addressType, (), (const)); + MOCK_METHOD(absl::optional, ipVersion, (), (const)); MOCK_METHOD(void, close, ()); MOCK_METHOD(bool, isOpen, (), (const)); + MOCK_METHOD(IoHandlePtr, socket, (Socket::Type, Address::Type, Address::IpVersion), (const)); + MOCK_METHOD(IoHandlePtr, socketForAddrPtr, (Socket::Type, const Address::InstanceConstSharedPtr), + (const)); + MOCK_METHOD(Api::SysCallIntResult, bind, (const Address::InstanceConstSharedPtr)); + MOCK_METHOD(Api::SysCallIntResult, connect, (const Address::InstanceConstSharedPtr)); + MOCK_METHOD(Api::SysCallIntResult, listen, (int)); + MOCK_METHOD(Api::SysCallIntResult, setSocketOption, (int, int, const void*, socklen_t)); + MOCK_METHOD(Api::SysCallIntResult, getSocketOption, (int, int, void*, socklen_t*), (const)); + MOCK_METHOD(Api::SysCallIntResult, setBlockingForTest, (bool)); IoHandlePtr io_handle_; Address::InstanceConstSharedPtr local_address_; @@ -288,6 +312,9 @@ class MockListenerFilterCallbacks : public ListenerFilterCallbacks { MOCK_METHOD(ConnectionSocket&, socket, ()); MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); MOCK_METHOD(void, continueFilterChain, (bool)); + MOCK_METHOD(void, setDynamicMetadata, (const std::string&, const ProtobufWkt::Struct&)); + MOCK_METHOD(envoy::config::core::v3::Metadata&, dynamicMetadata, ()); + MOCK_METHOD(const envoy::config::core::v3::Metadata&, dynamicMetadata, (), (const)); NiceMock socket_; }; @@ -296,12 +323,20 @@ class MockListenSocketFactory : public ListenSocketFactory { public: MockListenSocketFactory() = default; - MOCK_METHOD(Network::Address::SocketType, socketType, (), (const)); + MOCK_METHOD(Network::Socket::Type, socketType, (), (const)); MOCK_METHOD(const Network::Address::InstanceConstSharedPtr&, localAddress, (), (const)); MOCK_METHOD(Network::SocketSharedPtr, getListenSocket, ()); MOCK_METHOD(SocketOptRef, sharedSocket, (), (const)); }; +class MockUdpPacketWriterFactory : public UdpPacketWriterFactory { +public: + MockUdpPacketWriterFactory() = default; + + MOCK_METHOD(Network::UdpPacketWriterPtr, createUdpPacketWriter, + (Network::IoHandle&, Stats::Scope&), ()); +}; + class MockListenerConfig : public ListenerConfig { public: MockListenerConfig(); @@ -319,7 +354,9 @@ class MockListenerConfig : public ListenerConfig { MOCK_METHOD(uint64_t, listenerTag, (), (const)); MOCK_METHOD(const std::string&, name, (), (const)); MOCK_METHOD(Network::ActiveUdpListenerFactory*, udpListenerFactory, ()); + MOCK_METHOD(Network::UdpPacketWriterFactoryOptRef, udpPacketWriterFactory, ()); MOCK_METHOD(ConnectionBalancer&, connectionBalancer, ()); + MOCK_METHOD(ResourceLimit&, openConnections, ()); envoy::config::core::v3::TrafficDirection direction() const override { return envoy::config::core::v3::UNSPECIFIED; @@ -376,10 +413,11 @@ class MockIp : public Address::Ip { MOCK_METHOD(const std::string&, addressAsString, (), (const)); MOCK_METHOD(bool, isAnyAddress, (), (const)); MOCK_METHOD(bool, isUnicastAddress, (), (const)); - MOCK_METHOD(Address::Ipv4*, ipv4, (), (const)); - MOCK_METHOD(Address::Ipv6*, ipv6, (), (const)); + MOCK_METHOD(const Address::Ipv4*, ipv4, (), (const)); + MOCK_METHOD(const Address::Ipv6*, ipv6, (), (const)); MOCK_METHOD(uint32_t, port, (), (const)); MOCK_METHOD(Address::IpVersion, version, (), (const)); + MOCK_METHOD(bool, v6only, (), (const)); }; class MockResolvedAddress : public Address::Instance { @@ -394,18 +432,21 @@ class MockResolvedAddress : public Address::Instance { MOCK_METHOD(Api::SysCallIntResult, bind, (os_fd_t), (const)); MOCK_METHOD(Api::SysCallIntResult, connect, (os_fd_t), (const)); - MOCK_METHOD(Address::Ip*, ip, (), (const)); - MOCK_METHOD(IoHandlePtr, socket, (Address::SocketType), (const)); + MOCK_METHOD(const Address::Ip*, ip, (), (const)); + MOCK_METHOD(const Address::Pipe*, pipe, (), (const)); + MOCK_METHOD(IoHandlePtr, socket, (Socket::Type), (const)); MOCK_METHOD(Address::Type, type, (), (const)); - MOCK_METHOD(sockaddr*, sockAddr, (), (const)); + MOCK_METHOD(const sockaddr*, sockAddr, (), (const)); MOCK_METHOD(socklen_t, sockAddrLen, (), (const)); const std::string& asString() const override { return physical_; } absl::string_view asStringView() const override { return physical_; } const std::string& logicalName() const override { return logical_; } + const std::string& socketInterface() const override { return socket_interface_; } const std::string logical_; const std::string physical_; + const std::string socket_interface_{""}; }; class MockTransportSocketCallbacks : public TransportSocketCallbacks { @@ -424,6 +465,22 @@ class MockTransportSocketCallbacks : public TransportSocketCallbacks { testing::NiceMock connection_; }; +class MockUdpPacketWriter : public UdpPacketWriter { +public: + MockUdpPacketWriter() = default; + + MOCK_METHOD(Api::IoCallUint64Result, writePacket, + (const Buffer::Instance& buffer, const Address::Ip* local_ip, + const Address::Instance& peer_address)); + MOCK_METHOD(bool, isWriteBlocked, (), (const)); + MOCK_METHOD(void, setWritable, ()); + MOCK_METHOD(uint64_t, getMaxPacketSize, (const Address::Instance& peer_address), (const)); + MOCK_METHOD(bool, isBatchMode, (), (const)); + MOCK_METHOD(Network::UdpPacketWriterBuffer, getNextWriteLocation, + (const Address::Ip* local_ip, const Address::Instance& peer_address)); + MOCK_METHOD(Api::IoCallUint64Result, flush, ()); +}; + class MockUdpListener : public UdpListener { public: MockUdpListener(); @@ -435,6 +492,7 @@ class MockUdpListener : public UdpListener { MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); MOCK_METHOD(Address::InstanceConstSharedPtr&, localAddress, (), (const)); MOCK_METHOD(Api::IoCallUint64Result, send, (const UdpSendData&)); + MOCK_METHOD(Api::IoCallUint64Result, flush, ()); Event::MockDispatcher dispatcher_; }; diff --git a/test/mocks/protobuf/BUILD b/test/mocks/protobuf/BUILD index 5d7637475f675..67b4c15cd6446 100644 --- a/test/mocks/protobuf/BUILD +++ b/test/mocks/protobuf/BUILD @@ -1,16 +1,18 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( name = "protobuf_mocks", srcs = ["mocks.cc"], hdrs = ["mocks.h"], - deps = ["//include/envoy/protobuf:message_validator_interface"], + deps = [ + "//include/envoy/protobuf:message_validator_interface", + ], ) diff --git a/test/mocks/protobuf/mocks.h b/test/mocks/protobuf/mocks.h index 3f60902c1fc60..3e61b31fed120 100644 --- a/test/mocks/protobuf/mocks.h +++ b/test/mocks/protobuf/mocks.h @@ -13,6 +13,14 @@ class MockValidationVisitor : public ValidationVisitor { ~MockValidationVisitor() override; MOCK_METHOD(void, onUnknownField, (absl::string_view)); + MOCK_METHOD(void, onDeprecatedField, (absl::string_view, bool)); + + bool skipValidation() override { return skip_validation_; } + + void setSkipValidation(bool s) { skip_validation_ = s; } + +private: + bool skip_validation_ = false; }; class MockValidationContext : public ValidationContext { diff --git a/test/mocks/ratelimit/BUILD b/test/mocks/ratelimit/BUILD index 9aa235577e39b..db30620b1edda 100644 --- a/test/mocks/ratelimit/BUILD +++ b/test/mocks/ratelimit/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/ratelimit/mocks.h b/test/mocks/ratelimit/mocks.h index 238c3c25d8cca..7f983beabbca8 100644 --- a/test/mocks/ratelimit/mocks.h +++ b/test/mocks/ratelimit/mocks.h @@ -10,12 +10,16 @@ namespace Envoy { namespace RateLimit { +inline bool operator==(const RateLimitOverride& lhs, const RateLimitOverride& rhs) { + return lhs.requests_per_unit_ == rhs.requests_per_unit_ && lhs.unit_ == rhs.unit_; +} + inline bool operator==(const DescriptorEntry& lhs, const DescriptorEntry& rhs) { return lhs.key_ == rhs.key_ && lhs.value_ == rhs.value_; } inline bool operator==(const Descriptor& lhs, const Descriptor& rhs) { - return lhs.entries_ == rhs.entries_; + return lhs.entries_ == rhs.entries_ && lhs.limit_ == rhs.limit_; } } // namespace RateLimit diff --git a/test/mocks/redis/BUILD b/test/mocks/redis/BUILD index d97ea0dbaab97..6ff8990aae059 100644 --- a/test/mocks/redis/BUILD +++ b/test/mocks/redis/BUILD @@ -1,8 +1,8 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() diff --git a/test/mocks/router/BUILD b/test/mocks/router/BUILD index db572a1f92a74..7044a84c1edbb 100644 --- a/test/mocks/router/BUILD +++ b/test/mocks/router/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( @@ -35,3 +35,15 @@ envoy_cc_mock( "@envoy_api//envoy/type/v3:pkg_cc_proto", ], ) + +envoy_cc_mock( + name = "router_filter_interface", + srcs = ["router_filter_interface.cc"], + hdrs = ["router_filter_interface.h"], + deps = [ + "//source/common/router:router_lib", + "//test/mocks/http:http_mocks", + "//test/mocks/network:network_mocks", + "//test/mocks/server:factory_context_mocks", + ], +) diff --git a/test/mocks/router/mocks.cc b/test/mocks/router/mocks.cc index 5f4f8d487c1ef..93de670c3a2d7 100644 --- a/test/mocks/router/mocks.cc +++ b/test/mocks/router/mocks.cc @@ -6,6 +6,7 @@ #include "gtest/gtest.h" using testing::_; +using testing::DoAll; using testing::NiceMock; using testing::Return; using testing::ReturnPointee; @@ -22,6 +23,10 @@ TestRetryPolicy::TestRetryPolicy() { num_retries_ = 1; } TestRetryPolicy::~TestRetryPolicy() = default; +MockInternalRedirectPolicy::MockInternalRedirectPolicy() { + ON_CALL(*this, enabled()).WillByDefault(Return(false)); +} + MockRetryState::MockRetryState() = default; void MockRetryState::expectHeadersRetry() { @@ -85,6 +90,7 @@ MockRouteEntry::MockRouteEntry() { ON_CALL(*this, opaqueConfig()).WillByDefault(ReturnRef(opaque_config_)); ON_CALL(*this, rateLimitPolicy()).WillByDefault(ReturnRef(rate_limit_policy_)); ON_CALL(*this, retryPolicy()).WillByDefault(ReturnRef(retry_policy_)); + ON_CALL(*this, internalRedirectPolicy()).WillByDefault(ReturnRef(internal_redirect_policy_)); ON_CALL(*this, retryShadowBufferLimit()) .WillByDefault(Return(std::numeric_limits::max())); ON_CALL(*this, shadowPolicies()).WillByDefault(ReturnRef(shadow_policies_)); @@ -97,12 +103,14 @@ MockRouteEntry::MockRouteEntry() { ON_CALL(*this, upgradeMap()).WillByDefault(ReturnRef(upgrade_map_)); ON_CALL(*this, hedgePolicy()).WillByDefault(ReturnRef(hedge_policy_)); ON_CALL(*this, routeName()).WillByDefault(ReturnRef(route_name_)); + ON_CALL(*this, connectConfig()).WillByDefault(ReturnRef(connect_config_)); } MockRouteEntry::~MockRouteEntry() = default; MockConfig::MockConfig() : route_(new NiceMock()) { ON_CALL(*this, route(_, _, _)).WillByDefault(Return(route_)); + ON_CALL(*this, route(_, _, _, _)).WillByDefault(Return(route_)); ON_CALL(*this, internalOnlyHeaders()).WillByDefault(ReturnRef(internal_only_headers_)); ON_CALL(*this, name()).WillByDefault(ReturnRef(name_)); ON_CALL(*this, usesVhds()).WillByDefault(Return(false)); @@ -146,5 +154,8 @@ MockScopedRouteConfigProvider::MockScopedRouteConfigProvider() } MockScopedRouteConfigProvider::~MockScopedRouteConfigProvider() = default; +MockGenericConnectionPoolCallbacks::MockGenericConnectionPoolCallbacks() { + ON_CALL(*this, upstreamToDownstream()).WillByDefault(ReturnRef(upstream_to_downstream_)); +} } // namespace Router } // namespace Envoy diff --git a/test/mocks/router/mocks.h b/test/mocks/router/mocks.h index 9ed7b8ead74ba..890a6ccef8932 100644 --- a/test/mocks/router/mocks.h +++ b/test/mocks/router/mocks.h @@ -131,6 +131,22 @@ class TestRetryPolicy : public RetryPolicy { absl::optional max_interval_{}; }; +class MockInternalRedirectPolicy : public InternalRedirectPolicy { +public: + MockInternalRedirectPolicy(); + MOCK_METHOD(bool, enabled, (), (const)); + MOCK_METHOD(bool, shouldRedirectForResponseCode, (const Http::Code& response_code), (const)); + MOCK_METHOD(std::vector, predicates, (), (const)); + MOCK_METHOD(uint32_t, maxInternalRedirects, (), (const)); + MOCK_METHOD(bool, isCrossSchemeRedirectAllowed, (), (const)); +}; + +class MockInternalRedirectPredicate : public InternalRedirectPredicate { +public: + MOCK_METHOD(bool, acceptTargetRoute, (StreamInfo::FilterState&, absl::string_view, bool, bool)); + MOCK_METHOD(absl::string_view, name, (), (const)); +}; + class MockRetryState : public RetryState { public: MockRetryState(); @@ -150,7 +166,8 @@ class MockRetryState : public RetryState { MOCK_METHOD(void, onHostAttempted, (Upstream::HostDescriptionConstSharedPtr)); MOCK_METHOD(bool, shouldSelectAnotherHost, (const Upstream::Host& host)); MOCK_METHOD(const Upstream::HealthyAndDegradedLoad&, priorityLoadForRetry, - (const Upstream::PrioritySet&, const Upstream::HealthyAndDegradedLoad&)); + (const Upstream::PrioritySet&, const Upstream::HealthyAndDegradedLoad&, + const Upstream::RetryPriority::PriorityMappingFunc&)); MOCK_METHOD(uint32_t, hostSelectionMaxAttempts, (), (const)); DoRetryCallback callback_; @@ -167,7 +184,8 @@ class MockRateLimitPolicyEntry : public RateLimitPolicyEntry { MOCK_METHOD(void, populateDescriptors, (const RouteEntry& route, std::vector& descriptors, const std::string& local_service_cluster, const Http::HeaderMap& headers, - const Network::Address::Instance& remote_address), + const Network::Address::Instance& remote_address, + const envoy::config::core::v3::Metadata* dynamic_metadata), (const)); uint64_t stage_{}; @@ -334,6 +352,7 @@ class MockRouteEntry : public RouteEntry { MOCK_METHOD(Upstream::ResourcePriority, priority, (), (const)); MOCK_METHOD(const RateLimitPolicy&, rateLimitPolicy, (), (const)); MOCK_METHOD(const RetryPolicy&, retryPolicy, (), (const)); + MOCK_METHOD(const InternalRedirectPolicy&, internalRedirectPolicy, (), (const)); MOCK_METHOD(uint32_t, retryShadowBufferLimit, (), (const)); MOCK_METHOD(const std::vector&, shadowPolicies, (), (const)); MOCK_METHOD(std::chrono::milliseconds, timeout, (), (const)); @@ -353,9 +372,8 @@ class MockRouteEntry : public RouteEntry { MOCK_METHOD(const RouteSpecificFilterConfig*, perFilterConfig, (const std::string&), (const)); MOCK_METHOD(bool, includeAttemptCountInRequest, (), (const)); MOCK_METHOD(bool, includeAttemptCountInResponse, (), (const)); + MOCK_METHOD(const absl::optional&, connectConfig, (), (const)); MOCK_METHOD(const UpgradeMap&, upgradeMap, (), (const)); - MOCK_METHOD(InternalRedirectAction, internalRedirectAction, (), (const)); - MOCK_METHOD(uint32_t, maxInternalRedirects, (), (const)); MOCK_METHOD(const std::string&, routeName, (), (const)); std::string cluster_name_{"fake_cluster"}; @@ -363,6 +381,7 @@ class MockRouteEntry : public RouteEntry { std::multimap opaque_config_; TestVirtualCluster virtual_cluster_; TestRetryPolicy retry_policy_; + testing::NiceMock internal_redirect_policy_; TestHedgePolicy hedge_policy_; testing::NiceMock rate_limit_policy_; std::vector shadow_policies_; @@ -374,6 +393,7 @@ class MockRouteEntry : public RouteEntry { testing::NiceMock path_match_criterion_; envoy::config::core::v3::Metadata metadata_; UpgradeMap upgrade_map_; + absl::optional connect_config_; }; class MockDecorator : public Decorator { @@ -428,6 +448,11 @@ class MockConfig : public Config { (const Http::RequestHeaderMap&, const Envoy::StreamInfo::StreamInfo&, uint64_t random_value), (const)); + MOCK_METHOD(RouteConstSharedPtr, route, + (const RouteCallback& cb, const Http::RequestHeaderMap&, + const Envoy::StreamInfo::StreamInfo&, uint64_t random_value), + (const)); + MOCK_METHOD(const std::list&, internalOnlyHeaders, (), (const)); MOCK_METHOD(const std::string&, name, (), (const)); MOCK_METHOD(bool, usesVhds, (), (const)); @@ -495,5 +520,49 @@ class MockScopedRouteConfigProvider : public Envoy::Config::ConfigProvider { std::shared_ptr config_; }; +class MockGenericConnPool : public GenericConnPool { + MOCK_METHOD(void, newStream, (GenericConnectionPoolCallbacks * request)); + MOCK_METHOD(bool, cancelAnyPendingRequest, ()); + MOCK_METHOD(absl::optional, protocol, (), (const)); + MOCK_METHOD(bool, initialize, + (Upstream::ClusterManager&, const RouteEntry&, Http::Protocol, + Upstream::LoadBalancerContext*)); + MOCK_METHOD(Upstream::HostDescriptionConstSharedPtr, host, (), (const)); +}; + +class MockUpstreamToDownstream : public UpstreamToDownstream { +public: + MOCK_METHOD(const RouteEntry&, routeEntry, (), (const)); + MOCK_METHOD(const Network::Connection&, connection, (), (const)); + + MOCK_METHOD(void, decodeData, (Buffer::Instance&, bool)); + MOCK_METHOD(void, decodeMetadata, (Http::MetadataMapPtr &&)); + MOCK_METHOD(void, decode100ContinueHeaders, (Http::ResponseHeaderMapPtr &&)); + MOCK_METHOD(void, decodeHeaders, (Http::ResponseHeaderMapPtr&&, bool)); + MOCK_METHOD(void, decodeTrailers, (Http::ResponseTrailerMapPtr &&)); + + MOCK_METHOD(void, onResetStream, (Http::StreamResetReason, absl::string_view)); + MOCK_METHOD(void, onAboveWriteBufferHighWatermark, ()); + MOCK_METHOD(void, onBelowWriteBufferLowWatermark, ()); +}; + +class MockGenericConnectionPoolCallbacks : public GenericConnectionPoolCallbacks { +public: + MockGenericConnectionPoolCallbacks(); + + MOCK_METHOD(void, onPoolFailure, + (Http::ConnectionPool::PoolFailureReason reason, + absl::string_view transport_failure_reason, + Upstream::HostDescriptionConstSharedPtr host)); + MOCK_METHOD(void, onPoolReady, + (std::unique_ptr && upstream, + Upstream::HostDescriptionConstSharedPtr host, + const Network::Address::InstanceConstSharedPtr& upstream_local_address, + const StreamInfo::StreamInfo& info)); + MOCK_METHOD(UpstreamToDownstream&, upstreamToDownstream, ()); + + NiceMock upstream_to_downstream_; +}; + } // namespace Router } // namespace Envoy diff --git a/test/mocks/router/router_filter_interface.cc b/test/mocks/router/router_filter_interface.cc new file mode 100644 index 0000000000000..81d9b8be1a340 --- /dev/null +++ b/test/mocks/router/router_filter_interface.cc @@ -0,0 +1,29 @@ +#include "test/mocks/router/router_filter_interface.h" + +using testing::AnyNumber; +using testing::Return; +using testing::ReturnRef; + +namespace Envoy { +namespace Router { + +MockRouterFilterInterface::MockRouterFilterInterface() + : config_("prefix.", context_, ShadowWriterPtr(new MockShadowWriter()), router_proto) { + auto cluster_info = new NiceMock(); + cluster_info->timeout_budget_stats_ = nullptr; + ON_CALL(*cluster_info, timeoutBudgetStats()).WillByDefault(Return(absl::nullopt)); + cluster_info_.reset(cluster_info); + ON_CALL(*this, callbacks()).WillByDefault(Return(&callbacks_)); + ON_CALL(*this, config()).WillByDefault(ReturnRef(config_)); + ON_CALL(*this, cluster()).WillByDefault(Return(cluster_info_)); + ON_CALL(*this, upstreamRequests()).WillByDefault(ReturnRef(requests_)); + EXPECT_CALL(callbacks_.dispatcher_, setTrackedObject(_)).Times(AnyNumber()); + ON_CALL(*this, routeEntry()).WillByDefault(Return(&route_entry_)); + ON_CALL(callbacks_, connection()).WillByDefault(Return(&client_connection_)); + route_entry_.connect_config_.emplace(RouteEntry::ConnectConfig()); +} + +MockRouterFilterInterface::~MockRouterFilterInterface() = default; + +} // namespace Router +} // namespace Envoy diff --git a/test/mocks/router/router_filter_interface.h b/test/mocks/router/router_filter_interface.h new file mode 100644 index 0000000000000..40b6ac609b8e1 --- /dev/null +++ b/test/mocks/router/router_filter_interface.h @@ -0,0 +1,63 @@ +#pragma once + +#include "common/router/router.h" + +#include "test/mocks/http/mocks.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/server/factory_context.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Router { + +class MockRouterFilterInterface : public RouterFilterInterface { +public: + MockRouterFilterInterface(); + ~MockRouterFilterInterface() override; + + MOCK_METHOD(void, onUpstream100ContinueHeaders, + (Envoy::Http::ResponseHeaderMapPtr && headers, UpstreamRequest& upstream_request)); + MOCK_METHOD(void, onUpstreamHeaders, + (uint64_t response_code, Envoy::Http::ResponseHeaderMapPtr&& headers, + UpstreamRequest& upstream_request, bool end_stream)); + MOCK_METHOD(void, onUpstreamData, + (Buffer::Instance & data, UpstreamRequest& upstream_request, bool end_stream)); + MOCK_METHOD(void, onUpstreamTrailers, + (Envoy::Http::ResponseTrailerMapPtr && trailers, UpstreamRequest& upstream_request)); + MOCK_METHOD(void, onUpstreamMetadata, (Envoy::Http::MetadataMapPtr && metadata_map)); + MOCK_METHOD(void, onUpstreamReset, + (Envoy::Http::StreamResetReason reset_reason, absl::string_view transport_failure, + UpstreamRequest& upstream_request)); + MOCK_METHOD(void, onUpstreamHostSelected, (Upstream::HostDescriptionConstSharedPtr host)); + MOCK_METHOD(void, onPerTryTimeout, (UpstreamRequest & upstream_request)); + MOCK_METHOD(void, onStreamMaxDurationReached, (UpstreamRequest & upstream_request)); + + MOCK_METHOD(Envoy::Http::StreamDecoderFilterCallbacks*, callbacks, ()); + MOCK_METHOD(Upstream::ClusterInfoConstSharedPtr, cluster, ()); + MOCK_METHOD(FilterConfig&, config, ()); + MOCK_METHOD(FilterUtility::TimeoutData, timeout, ()); + MOCK_METHOD(Envoy::Http::RequestHeaderMap*, downstreamHeaders, ()); + MOCK_METHOD(Envoy::Http::RequestTrailerMap*, downstreamTrailers, ()); + MOCK_METHOD(bool, downstreamResponseStarted, (), (const)); + MOCK_METHOD(bool, downstreamEndStream, (), (const)); + MOCK_METHOD(uint32_t, attemptCount, (), (const)); + MOCK_METHOD(const VirtualCluster*, requestVcluster, (), (const)); + MOCK_METHOD(const RouteEntry*, routeEntry, (), (const)); + MOCK_METHOD(const std::list&, upstreamRequests, (), (const)); + MOCK_METHOD(const UpstreamRequest*, finalUpstreamRequest, (), (const)); + MOCK_METHOD(TimeSource&, timeSource, ()); + + NiceMock callbacks_; + NiceMock route_entry_; + NiceMock client_connection_; + + envoy::extensions::filters::http::router::v3::Router router_proto; + NiceMock context_; + FilterConfig config_; + Upstream::ClusterInfoConstSharedPtr cluster_info_; + std::list requests_; +}; + +} // namespace Router +} // namespace Envoy diff --git a/test/mocks/runtime/BUILD b/test/mocks/runtime/BUILD index 7d899cb90c580..a6af7e7570fe8 100644 --- a/test/mocks/runtime/BUILD +++ b/test/mocks/runtime/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( @@ -17,6 +17,7 @@ envoy_cc_mock( "//include/envoy/runtime:runtime_interface", "//include/envoy/upstream:cluster_manager_interface", "//test/mocks:common_lib", + "//test/mocks/stats:stats_mocks", "@envoy_api//envoy/type/v3:pkg_cc_proto", ], ) diff --git a/test/mocks/runtime/mocks.cc b/test/mocks/runtime/mocks.cc index 9dda39b1087c9..d2f22d414c8f4 100644 --- a/test/mocks/runtime/mocks.cc +++ b/test/mocks/runtime/mocks.cc @@ -4,16 +4,13 @@ #include "gtest/gtest.h" using testing::_; +using testing::NiceMock; using testing::Return; using testing::ReturnArg; namespace Envoy { namespace Runtime { -MockRandomGenerator::MockRandomGenerator() { ON_CALL(*this, uuid()).WillByDefault(Return(uuid_)); } - -MockRandomGenerator::~MockRandomGenerator() = default; - MockSnapshot::MockSnapshot() { ON_CALL(*this, getInteger(_, _)).WillByDefault(ReturnArg<1>()); ON_CALL(*this, getDouble(_, _)).WillByDefault(ReturnArg<1>()); @@ -23,7 +20,13 @@ MockSnapshot::MockSnapshot() { MockSnapshot::~MockSnapshot() = default; -MockLoader::MockLoader() { ON_CALL(*this, snapshot()).WillByDefault(ReturnRef(snapshot_)); } +MockLoader::MockLoader() { + ON_CALL(*this, threadsafeSnapshot()).WillByDefault(testing::Invoke([]() { + return std::make_shared>(); + })); + ON_CALL(*this, snapshot()).WillByDefault(ReturnRef(snapshot_)); + ON_CALL(*this, getRootScope()).WillByDefault(ReturnRef(store_)); +} MockLoader::~MockLoader() = default; diff --git a/test/mocks/runtime/mocks.h b/test/mocks/runtime/mocks.h index d73bb3eb5317b..53bea8ce81ad7 100644 --- a/test/mocks/runtime/mocks.h +++ b/test/mocks/runtime/mocks.h @@ -2,28 +2,19 @@ #include #include -#include #include "envoy/runtime/runtime.h" #include "envoy/type/v3/percent.pb.h" #include "envoy/upstream/cluster_manager.h" +#include "test/mocks/stats/mocks.h" + +#include "absl/container/node_hash_map.h" #include "gmock/gmock.h" namespace Envoy { namespace Runtime { -class MockRandomGenerator : public RandomGenerator { -public: - MockRandomGenerator(); - ~MockRandomGenerator() override; - - MOCK_METHOD(uint64_t, random, ()); - MOCK_METHOD(std::string, uuid, ()); - - const std::string uuid_{"a121e9e1-feae-4136-9e0e-6fac343d56c9"}; -}; - class MockSnapshot : public Snapshot { public: MockSnapshot(); @@ -41,6 +32,7 @@ class MockSnapshot : public Snapshot { } } + MOCK_METHOD(void, countDeprecatedFeatureUse, (), (const)); MOCK_METHOD(bool, deprecatedFeatureEnabled, (absl::string_view key, bool default_enabled), (const)); MOCK_METHOD(bool, runtimeFeatureEnabled, (absl::string_view key), (const)); @@ -72,11 +64,13 @@ class MockLoader : public Loader { MOCK_METHOD(void, initialize, (Upstream::ClusterManager & cm)); MOCK_METHOD(const Snapshot&, snapshot, ()); - MOCK_METHOD(std::shared_ptr, threadsafeSnapshot, ()); - MOCK_METHOD(void, mergeValues, ((const std::unordered_map&))); + MOCK_METHOD(SnapshotConstSharedPtr, threadsafeSnapshot, ()); + MOCK_METHOD(void, mergeValues, ((const absl::node_hash_map&))); MOCK_METHOD(void, startRtdsSubscriptions, (ReadyCallback)); + MOCK_METHOD(Stats::Scope&, getRootScope, ()); testing::NiceMock snapshot_; + testing::NiceMock store_; }; class MockOverrideLayer : public Snapshot::OverrideLayer { diff --git a/test/mocks/secret/BUILD b/test/mocks/secret/BUILD index 313a56038cde8..92a80718db015 100644 --- a/test/mocks/secret/BUILD +++ b/test/mocks/secret/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/server/BUILD b/test/mocks/server/BUILD index 4d85cd8496abc..35a1ac14b80ad 100644 --- a/test/mocks/server/BUILD +++ b/test/mocks/server/BUILD @@ -1,32 +1,165 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( - name = "server_mocks", - srcs = ["mocks.cc"], - hdrs = ["mocks.h"], + name = "config_tracker_mocks", + srcs = ["config_tracker.cc"], + hdrs = ["config_tracker.h"], deps = [ - "//include/envoy/secret:secret_manager_interface", - "//include/envoy/server:admin_interface", "//include/envoy/server:configuration_interface", + ], +) + +envoy_cc_mock( + name = "admin_mocks", + srcs = ["admin.cc"], + hdrs = ["admin.h"], + deps = [ + "//include/envoy/server:admin_interface", + "//test/mocks/server:config_tracker_mocks", + ], +) + +envoy_cc_mock( + name = "bootstrap_extension_factory_mocks", + srcs = ["bootstrap_extension_factory.cc"], + hdrs = ["bootstrap_extension_factory.h"], + deps = [ + "//include/envoy/server:bootstrap_extension_config_interface", + ], +) + +envoy_cc_mock( + name = "options_mocks", + srcs = ["options.cc"], + hdrs = ["options.h"], + deps = [ + "//include/envoy/server:options_interface", + "@envoy_api//envoy/admin/v3:pkg_cc_proto", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", + ], +) + +envoy_cc_mock( + name = "admin_stream_mocks", + srcs = ["admin_stream.cc"], + hdrs = ["admin_stream.h"], + deps = [ + "//include/envoy/server:admin_interface", + "//test/mocks/http:http_mocks", + ], +) + +envoy_cc_mock( + name = "drain_manager_mocks", + srcs = ["drain_manager.cc"], + hdrs = ["drain_manager.h"], + deps = [ "//include/envoy/server:drain_manager_interface", - "//include/envoy/server:filter_config_interface", + ], +) + +envoy_cc_mock( + name = "watch_dog_mocks", + srcs = ["watch_dog.cc"], + hdrs = ["watch_dog.h"], + deps = [ + "//include/envoy/server:watchdog_interface", + ], +) + +envoy_cc_mock( + name = "guard_dog_mocks", + srcs = ["guard_dog.cc"], + hdrs = ["guard_dog.h"], + deps = [ "//include/envoy/server:guarddog_interface", - "//include/envoy/server:health_checker_config_interface", + "//test/mocks/server:watch_dog_mocks", + ], +) + +envoy_cc_mock( + name = "hot_restart_mocks", + srcs = ["hot_restart.cc"], + hdrs = ["hot_restart.h"], + deps = [ "//include/envoy/server:instance_interface", - "//include/envoy/server:options_interface", - "//include/envoy/server:overload_manager_interface", - "//include/envoy/server:tracer_config_interface", + "//test/mocks/stats:stats_mocks", + ], +) + +envoy_cc_mock( + name = "listener_component_factory_mocks", + srcs = ["listener_component_factory.cc"], + hdrs = ["listener_component_factory.h"], + deps = [ + "//include/envoy/server:drain_manager_interface", + "//include/envoy/server:listener_manager_interface", + "//test/mocks/network:network_mocks", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", + ], +) + +envoy_cc_mock( + name = "listener_manager_mocks", + srcs = ["listener_manager.cc"], + hdrs = ["listener_manager.h"], + deps = [ + "//include/envoy/server:listener_manager_interface", + ], +) + +envoy_cc_mock( + name = "server_lifecycle_notifier_mocks", + srcs = ["server_lifecycle_notifier.cc"], + hdrs = ["server_lifecycle_notifier.h"], + deps = [ + "//include/envoy/server:lifecycle_notifier_interface", + ], +) + +envoy_cc_mock( + name = "worker_factory_mocks", + srcs = ["worker_factory.cc"], + hdrs = ["worker_factory.h"], + deps = [ + "//include/envoy/server:worker_interface", + "//test/mocks/server:worker_mocks", + ], +) + +envoy_cc_mock( + name = "worker_mocks", + srcs = ["worker.cc"], + hdrs = ["worker.h"], + deps = [ "//include/envoy/server:worker_interface", - "//include/envoy/ssl:context_manager_interface", - "//include/envoy/upstream:health_checker_interface", + ], +) + +envoy_cc_mock( + name = "overload_manager_mocks", + srcs = ["overload_manager.cc"], + hdrs = ["overload_manager.h"], + deps = [ + "//include/envoy/server:overload_manager_interface", + ], +) + +envoy_cc_mock( + name = "instance_mocks", + srcs = ["instance.cc"], + hdrs = ["instance.h"], + deps = [ + "//include/envoy/server:instance_interface", "//source/common/grpc:context_lib", "//source/common/http:context_lib", "//source/common/secret:secret_manager_impl_lib", @@ -43,13 +176,139 @@ envoy_cc_mock( "//test/mocks/router:router_mocks", "//test/mocks/runtime:runtime_mocks", "//test/mocks/secret:secret_mocks", + "//test/mocks/server:admin_mocks", + "//test/mocks/server:drain_manager_mocks", + "//test/mocks/server:hot_restart_mocks", + "//test/mocks/server:listener_manager_mocks", + "//test/mocks/server:options_mocks", + "//test/mocks/server:overload_manager_mocks", + "//test/mocks/server:server_lifecycle_notifier_mocks", + "//test/mocks/server:transport_socket_factory_context_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/tracing:tracing_mocks", "//test/mocks/upstream:upstream_mocks", - "//test/test_common:test_time_lib", - "@envoy_api//envoy/admin/v3:pkg_cc_proto", - "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", - "@envoy_api//envoy/config/core/v3:pkg_cc_proto", - "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", + ], +) + +envoy_cc_mock( + name = "main_mocks", + srcs = ["main.cc"], + hdrs = ["main.h"], + deps = [ + "//include/envoy/server:configuration_interface", + "//include/envoy/server:overload_manager_interface", + ], +) + +envoy_cc_mock( + name = "factory_context_mocks", + srcs = ["factory_context.cc"], + hdrs = ["factory_context.h"], + deps = [ + "//test/mocks/server:drain_manager_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/server:overload_manager_mocks", + "//test/mocks/server:server_lifecycle_notifier_mocks", + ], +) + +envoy_cc_mock( + name = "transport_socket_factory_context_mocks", + srcs = ["transport_socket_factory_context.cc"], + hdrs = ["transport_socket_factory_context.h"], + deps = [ + "//include/envoy/server:tracer_config_interface", + "//source/common/secret:secret_manager_impl_lib", + "//test/mocks/api:api_mocks", + "//test/mocks/server:config_tracker_mocks", + "//test/mocks/upstream:upstream_mocks", + ], +) + +envoy_cc_mock( + name = "listener_factory_context_mocks", + srcs = ["listener_factory_context.cc"], + hdrs = ["listener_factory_context.h"], + deps = [ + "//include/envoy/server:listener_manager_interface", + "//test/mocks/server:factory_context_mocks", + ], +) + +envoy_cc_mock( + name = "health_checker_factory_context_mocks", + srcs = ["health_checker_factory_context.cc"], + hdrs = ["health_checker_factory_context.h"], + deps = [ + "//include/envoy/server:health_checker_config_interface", + "//test/mocks/api:api_mocks", + "//test/mocks/event:event_mocks", + "//test/mocks/protobuf:protobuf_mocks", + "//test/mocks/router:router_mocks", + "//test/mocks/runtime:runtime_mocks", + "//test/mocks/upstream:upstream_mocks", + ], +) + +envoy_cc_mock( + name = "filter_chain_factory_context_mocks", + srcs = ["filter_chain_factory_context.cc"], + hdrs = ["filter_chain_factory_context.h"], + deps = [ + "//include/envoy/server:filter_config_interface", + "//test/mocks/server:factory_context_mocks", + ], +) + +envoy_cc_mock( + name = "tracer_factory_mocks", + srcs = ["tracer_factory.cc"], + hdrs = ["tracer_factory.h"], + deps = [ + "//include/envoy/protobuf:message_validator_interface", + "//include/envoy/server:tracer_config_interface", + ], +) + +envoy_cc_mock( + name = "tracer_factory_context_mocks", + srcs = ["tracer_factory_context.cc"], + hdrs = ["tracer_factory_context.h"], + deps = [ + "//include/envoy/server:configuration_interface", + "//test/mocks/server:instance_mocks", + "//test/mocks/server:tracer_factory_mocks", + ], +) + +envoy_cc_mock( + name = "server_mocks", + srcs = [], + hdrs = ["mocks.h"], + deps = [ + "//test/mocks/server:admin_mocks", + "//test/mocks/server:admin_stream_mocks", + "//test/mocks/server:bootstrap_extension_factory_mocks", + "//test/mocks/server:config_tracker_mocks", + "//test/mocks/server:drain_manager_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:filter_chain_factory_context_mocks", + "//test/mocks/server:guard_dog_mocks", + "//test/mocks/server:health_checker_factory_context_mocks", + "//test/mocks/server:hot_restart_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/server:listener_component_factory_mocks", + "//test/mocks/server:listener_factory_context_mocks", + "//test/mocks/server:listener_manager_mocks", + "//test/mocks/server:main_mocks", + "//test/mocks/server:options_mocks", + "//test/mocks/server:overload_manager_mocks", + "//test/mocks/server:server_lifecycle_notifier_mocks", + "//test/mocks/server:tracer_factory_context_mocks", + "//test/mocks/server:tracer_factory_mocks", + "//test/mocks/server:transport_socket_factory_context_mocks", + "//test/mocks/server:watch_dog_mocks", + "//test/mocks/server:worker_factory_mocks", + "//test/mocks/server:worker_mocks", ], ) diff --git a/test/mocks/server/admin.cc b/test/mocks/server/admin.cc new file mode 100644 index 0000000000000..2f873c5476335 --- /dev/null +++ b/test/mocks/server/admin.cc @@ -0,0 +1,16 @@ +#include "admin.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { +MockAdmin::MockAdmin() { + ON_CALL(*this, getConfigTracker()).WillByDefault(testing::ReturnRef(config_tracker_)); + ON_CALL(*this, concurrency()).WillByDefault(testing::Return(1)); +} + +MockAdmin::~MockAdmin() = default; + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/admin.h b/test/mocks/server/admin.h new file mode 100644 index 0000000000000..8805ee969709b --- /dev/null +++ b/test/mocks/server/admin.h @@ -0,0 +1,39 @@ +#pragma once + +#include + +#include "envoy/server/admin.h" + +#include "absl/strings/string_view.h" +#include "config_tracker.h" +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +class MockAdmin : public Admin { +public: + MockAdmin(); + ~MockAdmin() override; + + // Server::Admin + MOCK_METHOD(bool, addHandler, + (const std::string& prefix, const std::string& help_text, HandlerCb callback, + bool removable, bool mutates_server_state)); + MOCK_METHOD(bool, removeHandler, (const std::string& prefix)); + MOCK_METHOD(Network::Socket&, socket, ()); + MOCK_METHOD(ConfigTracker&, getConfigTracker, ()); + MOCK_METHOD(void, startHttpListener, + (const std::string& access_log_path, const std::string& address_out_path, + Network::Address::InstanceConstSharedPtr address, + const Network::Socket::OptionsSharedPtr& socket_options, + Stats::ScopePtr&& listener_scope)); + MOCK_METHOD(Http::Code, request, + (absl::string_view path_and_query, absl::string_view method, + Http::ResponseHeaderMap& response_headers, std::string& body)); + MOCK_METHOD(void, addListenerToHandler, (Network::ConnectionHandler * handler)); + MOCK_METHOD(uint32_t, concurrency, (), (const)); + + ::testing::NiceMock config_tracker_; +}; +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/admin_stream.cc b/test/mocks/server/admin_stream.cc new file mode 100644 index 0000000000000..9ed778013b181 --- /dev/null +++ b/test/mocks/server/admin_stream.cc @@ -0,0 +1,15 @@ +#include "admin_stream.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { +MockAdminStream::MockAdminStream() = default; + +MockAdminStream::~MockAdminStream() = default; + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/admin_stream.h b/test/mocks/server/admin_stream.h new file mode 100644 index 0000000000000..d7972b4ae27c0 --- /dev/null +++ b/test/mocks/server/admin_stream.h @@ -0,0 +1,25 @@ +#pragma once + +#include "envoy/server/admin.h" + +#include "test/mocks/http/mocks.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +class MockAdminStream : public AdminStream { +public: + MockAdminStream(); + ~MockAdminStream() override; + + MOCK_METHOD(void, setEndStreamOnComplete, (bool)); + MOCK_METHOD(void, addOnDestroyCallback, (std::function)); + MOCK_METHOD(const Buffer::Instance*, getRequestBody, (), (const)); + MOCK_METHOD(Http::RequestHeaderMap&, getRequestHeaders, (), (const)); + MOCK_METHOD(NiceMock&, getDecoderFilterCallbacks, (), + (const)); + MOCK_METHOD(Http::Http1StreamEncoderOptionsOptRef, http1StreamEncoderOptions, ()); +}; +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/bootstrap_extension_factory.cc b/test/mocks/server/bootstrap_extension_factory.cc new file mode 100644 index 0000000000000..80984ea4093dc --- /dev/null +++ b/test/mocks/server/bootstrap_extension_factory.cc @@ -0,0 +1,12 @@ +#include "bootstrap_extension_factory.h" + +namespace Envoy { +namespace Server { +namespace Configuration { +MockBootstrapExtensionFactory::MockBootstrapExtensionFactory() = default; + +MockBootstrapExtensionFactory::~MockBootstrapExtensionFactory() = default; + +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/bootstrap_extension_factory.h b/test/mocks/server/bootstrap_extension_factory.h new file mode 100644 index 0000000000000..f6421f7887880 --- /dev/null +++ b/test/mocks/server/bootstrap_extension_factory.h @@ -0,0 +1,22 @@ +#pragma once + +#include "envoy/server/bootstrap_extension_config.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +namespace Configuration { +class MockBootstrapExtensionFactory : public BootstrapExtensionFactory { +public: + MockBootstrapExtensionFactory(); + ~MockBootstrapExtensionFactory() override; + + MOCK_METHOD(BootstrapExtensionPtr, createBootstrapExtension, + (const Protobuf::Message&, Configuration::ServerFactoryContext&), (override)); + MOCK_METHOD(ProtobufTypes::MessagePtr, createEmptyConfigProto, (), (override)); + MOCK_METHOD(std::string, name, (), (const, override)); +}; +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/config_tracker.cc b/test/mocks/server/config_tracker.cc new file mode 100644 index 0000000000000..bf53f7501e3ac --- /dev/null +++ b/test/mocks/server/config_tracker.cc @@ -0,0 +1,26 @@ +#include "config_tracker.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { + +using ::testing::_; +using ::testing::Invoke; + +MockConfigTracker::MockConfigTracker() { + ON_CALL(*this, add_(_, _)) + .WillByDefault(Invoke([this](const std::string& key, Cb callback) -> EntryOwner* { + EXPECT_TRUE(config_tracker_callbacks_.find(key) == config_tracker_callbacks_.end()); + config_tracker_callbacks_[key] = callback; + return new MockEntryOwner(); + })); +} + +MockConfigTracker::~MockConfigTracker() = default; + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/config_tracker.h b/test/mocks/server/config_tracker.h new file mode 100644 index 0000000000000..1c30cf919c76b --- /dev/null +++ b/test/mocks/server/config_tracker.h @@ -0,0 +1,30 @@ +#pragma once + +#include + +#include "envoy/server/config_tracker.h" + +#include "absl/container/node_hash_map.h" +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +class MockConfigTracker : public ConfigTracker { +public: + MockConfigTracker(); + ~MockConfigTracker() override; + + struct MockEntryOwner : public EntryOwner {}; + + MOCK_METHOD(EntryOwner*, add_, (std::string, Cb)); + + // Server::ConfigTracker + MOCK_METHOD(const CbsMap&, getCallbacksMap, (), (const)); + EntryOwnerPtr add(const std::string& key, Cb callback) override { + return EntryOwnerPtr{add_(key, std::move(callback))}; + } + + absl::node_hash_map config_tracker_callbacks_; +}; +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/drain_manager.cc b/test/mocks/server/drain_manager.cc new file mode 100644 index 0000000000000..15735d57834c9 --- /dev/null +++ b/test/mocks/server/drain_manager.cc @@ -0,0 +1,21 @@ +#include "drain_manager.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { + +using ::testing::_; +using ::testing::SaveArg; + +MockDrainManager::MockDrainManager() { + ON_CALL(*this, startDrainSequence(_)).WillByDefault(SaveArg<0>(&drain_sequence_completion_)); +} + +MockDrainManager::~MockDrainManager() = default; + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/drain_manager.h b/test/mocks/server/drain_manager.h new file mode 100644 index 0000000000000..dc0331b05876c --- /dev/null +++ b/test/mocks/server/drain_manager.h @@ -0,0 +1,28 @@ +#pragma once + +#include +#include +#include +#include + +#include "envoy/server/drain_manager.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +class MockDrainManager : public DrainManager { +public: + MockDrainManager(); + ~MockDrainManager() override; + + // Server::DrainManager + MOCK_METHOD(bool, drainClose, (), (const)); + MOCK_METHOD(bool, draining, (), (const)); + MOCK_METHOD(void, startDrainSequence, (std::function completion)); + MOCK_METHOD(void, startParentShutdownSequence, ()); + + std::function drain_sequence_completion_; +}; +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/factory_context.cc b/test/mocks/server/factory_context.cc new file mode 100644 index 0000000000000..974371a1d3b51 --- /dev/null +++ b/test/mocks/server/factory_context.cc @@ -0,0 +1,47 @@ +#include "factory_context.h" + +#include + +#include "common/singleton/manager_impl.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { +namespace Configuration { + +using ::testing::ReturnRef; + +MockFactoryContext::MockFactoryContext() + : singleton_manager_(new Singleton::ManagerImpl(Thread::threadFactoryForTest())), + grpc_context_(scope_.symbolTable()), http_context_(scope_.symbolTable()) { + ON_CALL(*this, getServerFactoryContext()).WillByDefault(ReturnRef(server_factory_context_)); + ON_CALL(*this, accessLogManager()).WillByDefault(ReturnRef(access_log_manager_)); + ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); + ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); + ON_CALL(*this, drainDecision()).WillByDefault(ReturnRef(drain_manager_)); + ON_CALL(*this, initManager()).WillByDefault(ReturnRef(init_manager_)); + ON_CALL(*this, lifecycleNotifier()).WillByDefault(ReturnRef(lifecycle_notifier_)); + ON_CALL(*this, localInfo()).WillByDefault(ReturnRef(local_info_)); + ON_CALL(*this, random()).WillByDefault(ReturnRef(random_)); + ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_loader_)); + ON_CALL(*this, scope()).WillByDefault(ReturnRef(scope_)); + ON_CALL(*this, singletonManager()).WillByDefault(ReturnRef(*singleton_manager_)); + ON_CALL(*this, threadLocal()).WillByDefault(ReturnRef(thread_local_)); + ON_CALL(*this, admin()).WillByDefault(ReturnRef(admin_)); + ON_CALL(*this, listenerScope()).WillByDefault(ReturnRef(listener_scope_)); + ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); + ON_CALL(*this, timeSource()).WillByDefault(ReturnRef(time_system_)); + ON_CALL(*this, overloadManager()).WillByDefault(ReturnRef(overload_manager_)); + ON_CALL(*this, messageValidationContext()).WillByDefault(ReturnRef(validation_context_)); + ON_CALL(*this, messageValidationVisitor()) + .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor())); + ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); +} + +MockFactoryContext::~MockFactoryContext() = default; + +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/factory_context.h b/test/mocks/server/factory_context.h new file mode 100644 index 0000000000000..1bdab558962e2 --- /dev/null +++ b/test/mocks/server/factory_context.h @@ -0,0 +1,75 @@ +#pragma once + +#include "envoy/server/configuration.h" + +#include "extensions/transport_sockets/tls/context_manager_impl.h" + +#include "admin.h" +#include "drain_manager.h" +#include "gmock/gmock.h" +#include "instance.h" +#include "overload_manager.h" +#include "server_lifecycle_notifier.h" + +namespace Envoy { +namespace Server { +namespace Configuration { +class MockFactoryContext : public virtual FactoryContext { +public: + MockFactoryContext(); + ~MockFactoryContext() override; + + MOCK_METHOD(ServerFactoryContext&, getServerFactoryContext, (), (const)); + MOCK_METHOD(TransportSocketFactoryContext&, getTransportSocketFactoryContext, (), (const)); + MOCK_METHOD(AccessLog::AccessLogManager&, accessLogManager, ()); + MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ()); + MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); + MOCK_METHOD(const Network::DrainDecision&, drainDecision, ()); + MOCK_METHOD(bool, healthCheckFailed, ()); + MOCK_METHOD(Init::Manager&, initManager, ()); + MOCK_METHOD(ServerLifecycleNotifier&, lifecycleNotifier, ()); + MOCK_METHOD(Envoy::Random::RandomGenerator&, random, ()); + MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ()); + MOCK_METHOD(Stats::Scope&, scope, ()); + MOCK_METHOD(Singleton::Manager&, singletonManager, ()); + MOCK_METHOD(OverloadManager&, overloadManager, ()); + MOCK_METHOD(ThreadLocal::Instance&, threadLocal, ()); + MOCK_METHOD(Server::Admin&, admin, ()); + MOCK_METHOD(Stats::Scope&, listenerScope, ()); + MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const)); + MOCK_METHOD(const envoy::config::core::v3::Metadata&, listenerMetadata, (), (const)); + MOCK_METHOD(envoy::config::core::v3::TrafficDirection, direction, (), (const)); + MOCK_METHOD(TimeSource&, timeSource, ()); + Event::TestTimeSystem& timeSystem() { return time_system_; } + Grpc::Context& grpcContext() override { return grpc_context_; } + Http::Context& httpContext() override { return http_context_; } + MOCK_METHOD(ProcessContextOptRef, processContext, ()); + MOCK_METHOD(ProtobufMessage::ValidationContext&, messageValidationContext, ()); + MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ()); + MOCK_METHOD(Api::Api&, api, ()); + + testing::NiceMock server_factory_context_; + testing::NiceMock access_log_manager_; + testing::NiceMock cluster_manager_; + testing::NiceMock dispatcher_; + testing::NiceMock drain_manager_; + testing::NiceMock init_manager_; + testing::NiceMock lifecycle_notifier_; + testing::NiceMock local_info_; + testing::NiceMock random_; + testing::NiceMock runtime_loader_; + testing::NiceMock scope_; + testing::NiceMock thread_local_; + Singleton::ManagerPtr singleton_manager_; + testing::NiceMock admin_; + Stats::IsolatedStoreImpl listener_scope_; + Event::GlobalTimeSystem time_system_; + testing::NiceMock validation_context_; + testing::NiceMock overload_manager_; + Grpc::ContextImpl grpc_context_; + Http::ContextImpl http_context_; + testing::NiceMock api_; +}; +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/filter_chain_factory_context.cc b/test/mocks/server/filter_chain_factory_context.cc new file mode 100644 index 0000000000000..674f95f4a558e --- /dev/null +++ b/test/mocks/server/filter_chain_factory_context.cc @@ -0,0 +1,17 @@ +#include "filter_chain_factory_context.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { +namespace Configuration { +MockFilterChainFactoryContext::MockFilterChainFactoryContext() = default; + +MockFilterChainFactoryContext::~MockFilterChainFactoryContext() = default; + +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/filter_chain_factory_context.h b/test/mocks/server/filter_chain_factory_context.h new file mode 100644 index 0000000000000..f09e7a565722c --- /dev/null +++ b/test/mocks/server/filter_chain_factory_context.h @@ -0,0 +1,17 @@ +#pragma once + +#include "envoy/server/filter_config.h" + +#include "factory_context.h" + +namespace Envoy { +namespace Server { +namespace Configuration { +class MockFilterChainFactoryContext : public MockFactoryContext, public FilterChainFactoryContext { +public: + MockFilterChainFactoryContext(); + ~MockFilterChainFactoryContext() override; +}; +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/guard_dog.cc b/test/mocks/server/guard_dog.cc new file mode 100644 index 0000000000000..e5e552c234f50 --- /dev/null +++ b/test/mocks/server/guard_dog.cc @@ -0,0 +1,20 @@ +#include "guard_dog.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { + +using ::testing::_; +using ::testing::NiceMock; +using ::testing::Return; + +MockGuardDog::MockGuardDog() : watch_dog_(new NiceMock()) { + ON_CALL(*this, createWatchDog(_, _)).WillByDefault(Return(watch_dog_)); +} + +MockGuardDog::~MockGuardDog() = default; + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/guard_dog.h b/test/mocks/server/guard_dog.h new file mode 100644 index 0000000000000..fed29041db3e3 --- /dev/null +++ b/test/mocks/server/guard_dog.h @@ -0,0 +1,23 @@ +#pragma once + +#include "envoy/server/guarddog.h" + +#include "gmock/gmock.h" +#include "watch_dog.h" + +namespace Envoy { +namespace Server { +class MockGuardDog : public GuardDog { +public: + MockGuardDog(); + ~MockGuardDog() override; + + // Server::GuardDog + MOCK_METHOD(WatchDogSharedPtr, createWatchDog, + (Thread::ThreadId thread_id, const std::string& thread_name)); + MOCK_METHOD(void, stopWatching, (WatchDogSharedPtr wd)); + + std::shared_ptr watch_dog_; +}; +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/health_checker_factory_context.cc b/test/mocks/server/health_checker_factory_context.cc new file mode 100644 index 0000000000000..f6a17d962e2ac --- /dev/null +++ b/test/mocks/server/health_checker_factory_context.cc @@ -0,0 +1,30 @@ +#include "health_checker_factory_context.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { +namespace Configuration { + +using ::testing::ReturnRef; + +MockHealthCheckerFactoryContext::MockHealthCheckerFactoryContext() { + event_logger_ = new testing::NiceMock(); + ON_CALL(*this, cluster()).WillByDefault(ReturnRef(cluster_)); + ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); + ON_CALL(*this, random()).WillByDefault(ReturnRef(random_)); + ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_)); + ON_CALL(*this, eventLogger_()).WillByDefault(Return(event_logger_)); + ON_CALL(*this, messageValidationVisitor()) + .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor())); + ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); +} + +MockHealthCheckerFactoryContext::~MockHealthCheckerFactoryContext() = default; + +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/health_checker_factory_context.h b/test/mocks/server/health_checker_factory_context.h new file mode 100644 index 0000000000000..6de7f59dd77cf --- /dev/null +++ b/test/mocks/server/health_checker_factory_context.h @@ -0,0 +1,43 @@ +#pragma once + +#include "envoy/server/health_checker_config.h" + +#include "test/mocks/api/mocks.h" +#include "test/mocks/event/mocks.h" +#include "test/mocks/protobuf/mocks.h" +#include "test/mocks/router/mocks.h" +#include "test/mocks/runtime/mocks.h" +#include "test/mocks/upstream/mocks.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +namespace Configuration { +class MockHealthCheckerFactoryContext : public virtual HealthCheckerFactoryContext { +public: + MockHealthCheckerFactoryContext(); + ~MockHealthCheckerFactoryContext() override; + + MOCK_METHOD(Upstream::Cluster&, cluster, ()); + MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); + MOCK_METHOD(Envoy::Random::RandomGenerator&, random, ()); + MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ()); + MOCK_METHOD(Upstream::HealthCheckEventLogger*, eventLogger_, ()); + MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ()); + MOCK_METHOD(Api::Api&, api, ()); + Upstream::HealthCheckEventLoggerPtr eventLogger() override { + return Upstream::HealthCheckEventLoggerPtr(eventLogger_()); + } + + testing::NiceMock cluster_; + testing::NiceMock dispatcher_; + testing::NiceMock random_; + testing::NiceMock runtime_; + testing::NiceMock* event_logger_{}; + testing::NiceMock api_{}; +}; +} // namespace Configuration + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/hot_restart.cc b/test/mocks/server/hot_restart.cc new file mode 100644 index 0000000000000..8a11dbe8011da --- /dev/null +++ b/test/mocks/server/hot_restart.cc @@ -0,0 +1,22 @@ +#include "hot_restart.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { + +using ::testing::ReturnRef; + +MockHotRestart::MockHotRestart() : stats_allocator_(*symbol_table_) { + ON_CALL(*this, logLock()).WillByDefault(ReturnRef(log_lock_)); + ON_CALL(*this, accessLogLock()).WillByDefault(ReturnRef(access_log_lock_)); + ON_CALL(*this, statsAllocator()).WillByDefault(ReturnRef(stats_allocator_)); +} + +MockHotRestart::~MockHotRestart() = default; + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/hot_restart.h b/test/mocks/server/hot_restart.h new file mode 100644 index 0000000000000..c6edd13d89050 --- /dev/null +++ b/test/mocks/server/hot_restart.h @@ -0,0 +1,38 @@ +#pragma once + +#include "envoy/server/instance.h" + +#include "test/mocks/stats/mocks.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +class MockHotRestart : public HotRestart { +public: + MockHotRestart(); + ~MockHotRestart() override; + + // Server::HotRestart + MOCK_METHOD(void, drainParentListeners, ()); + MOCK_METHOD(int, duplicateParentListenSocket, (const std::string& address)); + MOCK_METHOD(std::unique_ptr, getParentStats, ()); + MOCK_METHOD(void, initialize, (Event::Dispatcher & dispatcher, Server::Instance& server)); + MOCK_METHOD(void, sendParentAdminShutdownRequest, (time_t & original_start_time)); + MOCK_METHOD(void, sendParentTerminateRequest, ()); + MOCK_METHOD(ServerStatsFromParent, mergeParentStatsIfAny, (Stats::StoreRoot & stats_store)); + MOCK_METHOD(void, shutdown, ()); + MOCK_METHOD(uint32_t, baseId, ()); + MOCK_METHOD(std::string, version, ()); + MOCK_METHOD(Thread::BasicLockable&, logLock, ()); + MOCK_METHOD(Thread::BasicLockable&, accessLogLock, ()); + MOCK_METHOD(Stats::Allocator&, statsAllocator, ()); + +private: + Stats::TestSymbolTable symbol_table_; + Thread::MutexBasicLockable log_lock_; + Thread::MutexBasicLockable access_log_lock_; + Stats::AllocatorImpl stats_allocator_; +}; +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/instance.cc b/test/mocks/server/instance.cc new file mode 100644 index 0000000000000..91c102c452679 --- /dev/null +++ b/test/mocks/server/instance.cc @@ -0,0 +1,81 @@ +#include "instance.h" + +#include "common/singleton/manager_impl.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { + +using ::testing::Return; +using ::testing::ReturnRef; + +MockInstance::MockInstance() + : secret_manager_(std::make_unique(admin_.getConfigTracker())), + cluster_manager_(timeSource()), ssl_context_manager_(timeSource()), + singleton_manager_(new Singleton::ManagerImpl(Thread::threadFactoryForTest())), + grpc_context_(stats_store_.symbolTable()), http_context_(stats_store_.symbolTable()), + server_factory_context_( + std::make_shared>()), + transport_socket_factory_context_( + std::make_shared>()) { + ON_CALL(*this, threadLocal()).WillByDefault(ReturnRef(thread_local_)); + ON_CALL(*this, stats()).WillByDefault(ReturnRef(stats_store_)); + ON_CALL(*this, grpcContext()).WillByDefault(ReturnRef(grpc_context_)); + ON_CALL(*this, httpContext()).WillByDefault(ReturnRef(http_context_)); + ON_CALL(*this, dnsResolver()).WillByDefault(Return(dns_resolver_)); + ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); + ON_CALL(*this, admin()).WillByDefault(ReturnRef(admin_)); + ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); + ON_CALL(*this, sslContextManager()).WillByDefault(ReturnRef(ssl_context_manager_)); + ON_CALL(*this, accessLogManager()).WillByDefault(ReturnRef(access_log_manager_)); + ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_loader_)); + ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); + ON_CALL(*this, hotRestart()).WillByDefault(ReturnRef(hot_restart_)); + ON_CALL(*this, random()).WillByDefault(ReturnRef(random_)); + ON_CALL(*this, lifecycleNotifier()).WillByDefault(ReturnRef(lifecycle_notifier_)); + ON_CALL(*this, localInfo()).WillByDefault(ReturnRef(local_info_)); + ON_CALL(*this, options()).WillByDefault(ReturnRef(options_)); + ON_CALL(*this, drainManager()).WillByDefault(ReturnRef(drain_manager_)); + ON_CALL(*this, initManager()).WillByDefault(ReturnRef(init_manager_)); + ON_CALL(*this, listenerManager()).WillByDefault(ReturnRef(listener_manager_)); + ON_CALL(*this, mutexTracer()).WillByDefault(Return(nullptr)); + ON_CALL(*this, singletonManager()).WillByDefault(ReturnRef(*singleton_manager_)); + ON_CALL(*this, overloadManager()).WillByDefault(ReturnRef(overload_manager_)); + ON_CALL(*this, messageValidationContext()).WillByDefault(ReturnRef(validation_context_)); + ON_CALL(*this, serverFactoryContext()).WillByDefault(ReturnRef(*server_factory_context_)); + ON_CALL(*this, transportSocketFactoryContext()) + .WillByDefault(ReturnRef(*transport_socket_factory_context_)); +} + +MockInstance::~MockInstance() = default; + +namespace Configuration { + +MockServerFactoryContext::MockServerFactoryContext() + : singleton_manager_(new Singleton::ManagerImpl(Thread::threadFactoryForTest())), + grpc_context_(scope_.symbolTable()) { + ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); + ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); + ON_CALL(*this, drainDecision()).WillByDefault(ReturnRef(drain_manager_)); + ON_CALL(*this, localInfo()).WillByDefault(ReturnRef(local_info_)); + ON_CALL(*this, random()).WillByDefault(ReturnRef(random_)); + ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_loader_)); + ON_CALL(*this, scope()).WillByDefault(ReturnRef(scope_)); + ON_CALL(*this, singletonManager()).WillByDefault(ReturnRef(*singleton_manager_)); + ON_CALL(*this, threadLocal()).WillByDefault(ReturnRef(thread_local_)); + ON_CALL(*this, admin()).WillByDefault(ReturnRef(admin_)); + ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); + ON_CALL(*this, timeSource()).WillByDefault(ReturnRef(time_system_)); + ON_CALL(*this, messageValidationContext()).WillByDefault(ReturnRef(validation_context_)); + ON_CALL(*this, messageValidationVisitor()) + .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor())); + ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); + ON_CALL(*this, drainManager()).WillByDefault(ReturnRef(drain_manager_)); +} +MockServerFactoryContext::~MockServerFactoryContext() = default; + +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/instance.h b/test/mocks/server/instance.h new file mode 100644 index 0000000000000..9daa9c3b84766 --- /dev/null +++ b/test/mocks/server/instance.h @@ -0,0 +1,171 @@ +#pragma once + +#include "envoy/server/instance.h" + +#include "common/grpc/context_impl.h" +#include "common/http/context_impl.h" +#include "common/stats/fake_symbol_table_impl.h" + +#include "extensions/transport_sockets/tls/context_manager_impl.h" + +#include "test/mocks/access_log/mocks.h" +#include "test/mocks/api/mocks.h" +#include "test/mocks/event/mocks.h" +#include "test/mocks/http/mocks.h" +#include "test/mocks/init/mocks.h" +#include "test/mocks/local_info/mocks.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/protobuf/mocks.h" +#include "test/mocks/runtime/mocks.h" +#include "test/mocks/secret/mocks.h" +#include "test/mocks/stats/mocks.h" +#include "test/mocks/thread_local/mocks.h" +#include "test/mocks/tracing/mocks.h" +#include "test/mocks/upstream/mocks.h" + +#include "admin.h" +#include "drain_manager.h" +#include "gmock/gmock.h" +#include "hot_restart.h" +#include "listener_manager.h" +#include "options.h" +#include "overload_manager.h" +#include "server_lifecycle_notifier.h" +#include "transport_socket_factory_context.h" + +namespace Envoy { +namespace Server { +namespace Configuration { +class MockServerFactoryContext; +} // namespace Configuration + +class MockInstance : public Instance { +public: + MockInstance(); + ~MockInstance() override; + + Secret::SecretManager& secretManager() override { return *(secret_manager_.get()); } + + MOCK_METHOD(Admin&, admin, ()); + MOCK_METHOD(Api::Api&, api, ()); + MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ()); + MOCK_METHOD(Ssl::ContextManager&, sslContextManager, ()); + MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); + MOCK_METHOD(Network::DnsResolverSharedPtr, dnsResolver, ()); + MOCK_METHOD(void, drainListeners, ()); + MOCK_METHOD(DrainManager&, drainManager, ()); + MOCK_METHOD(AccessLog::AccessLogManager&, accessLogManager, ()); + MOCK_METHOD(void, failHealthcheck, (bool fail)); + MOCK_METHOD(void, exportStatsToChild, (envoy::HotRestartMessage::Reply::Stats*)); + MOCK_METHOD(bool, healthCheckFailed, ()); + MOCK_METHOD(HotRestart&, hotRestart, ()); + MOCK_METHOD(Init::Manager&, initManager, ()); + MOCK_METHOD(ServerLifecycleNotifier&, lifecycleNotifier, ()); + MOCK_METHOD(ListenerManager&, listenerManager, ()); + MOCK_METHOD(Envoy::MutexTracer*, mutexTracer, ()); + MOCK_METHOD(const Options&, options, ()); + MOCK_METHOD(OverloadManager&, overloadManager, ()); + MOCK_METHOD(Random::RandomGenerator&, random, ()); + MOCK_METHOD(Runtime::Loader&, runtime, ()); + MOCK_METHOD(void, shutdown, ()); + MOCK_METHOD(bool, isShutdown, ()); + MOCK_METHOD(void, shutdownAdmin, ()); + MOCK_METHOD(Singleton::Manager&, singletonManager, ()); + MOCK_METHOD(time_t, startTimeCurrentEpoch, ()); + MOCK_METHOD(time_t, startTimeFirstEpoch, ()); + MOCK_METHOD(Stats::Store&, stats, ()); + MOCK_METHOD(Grpc::Context&, grpcContext, ()); + MOCK_METHOD(Http::Context&, httpContext, ()); + MOCK_METHOD(ProcessContextOptRef, processContext, ()); + MOCK_METHOD(ThreadLocal::Instance&, threadLocal, ()); + MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const)); + MOCK_METHOD(std::chrono::milliseconds, statsFlushInterval, (), (const)); + MOCK_METHOD(void, flushStats, ()); + MOCK_METHOD(ProtobufMessage::ValidationContext&, messageValidationContext, ()); + MOCK_METHOD(Configuration::ServerFactoryContext&, serverFactoryContext, ()); + MOCK_METHOD(Configuration::TransportSocketFactoryContext&, transportSocketFactoryContext, ()); + + void setDefaultTracingConfig(const envoy::config::trace::v3::Tracing& tracing_config) override { + http_context_.setDefaultTracingConfig(tracing_config); + } + + TimeSource& timeSource() override { return time_system_; } + + NiceMock stats_store_; + testing::NiceMock thread_local_; + std::shared_ptr> dns_resolver_{ + new testing::NiceMock()}; + testing::NiceMock api_; + testing::NiceMock admin_; + Event::GlobalTimeSystem time_system_; + std::unique_ptr secret_manager_; + testing::NiceMock cluster_manager_; + Thread::MutexBasicLockable access_log_lock_; + testing::NiceMock runtime_loader_; + Extensions::TransportSockets::Tls::ContextManagerImpl ssl_context_manager_; + testing::NiceMock dispatcher_; + testing::NiceMock drain_manager_; + testing::NiceMock access_log_manager_; + testing::NiceMock hot_restart_; + testing::NiceMock options_; + testing::NiceMock random_; + testing::NiceMock lifecycle_notifier_; + testing::NiceMock local_info_; + testing::NiceMock init_manager_; + testing::NiceMock listener_manager_; + testing::NiceMock overload_manager_; + Singleton::ManagerPtr singleton_manager_; + Grpc::ContextImpl grpc_context_; + Http::ContextImpl http_context_; + testing::NiceMock validation_context_; + std::shared_ptr> + server_factory_context_; + std::shared_ptr> + transport_socket_factory_context_; +}; + +namespace Configuration { +class MockServerFactoryContext : public virtual ServerFactoryContext { +public: + MockServerFactoryContext(); + ~MockServerFactoryContext() override; + + MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ()); + MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); + MOCK_METHOD(const Network::DrainDecision&, drainDecision, ()); + MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const)); + MOCK_METHOD(Envoy::Random::RandomGenerator&, random, ()); + MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ()); + MOCK_METHOD(Stats::Scope&, scope, ()); + MOCK_METHOD(Singleton::Manager&, singletonManager, ()); + MOCK_METHOD(ThreadLocal::Instance&, threadLocal, ()); + MOCK_METHOD(Server::Admin&, admin, ()); + MOCK_METHOD(TimeSource&, timeSource, ()); + Event::TestTimeSystem& timeSystem() { return time_system_; } + MOCK_METHOD(ProtobufMessage::ValidationContext&, messageValidationContext, ()); + MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ()); + MOCK_METHOD(Api::Api&, api, ()); + Grpc::Context& grpcContext() override { return grpc_context_; } + MOCK_METHOD(Server::DrainManager&, drainManager, ()); + MOCK_METHOD(Init::Manager&, initManager, ()); + MOCK_METHOD(ServerLifecycleNotifier&, lifecycleNotifier, ()); + MOCK_METHOD(std::chrono::milliseconds, statsFlushInterval, (), (const)); + + testing::NiceMock cluster_manager_; + testing::NiceMock dispatcher_; + testing::NiceMock drain_manager_; + testing::NiceMock local_info_; + testing::NiceMock random_; + testing::NiceMock runtime_loader_; + testing::NiceMock scope_; + testing::NiceMock thread_local_; + testing::NiceMock validation_context_; + Singleton::ManagerPtr singleton_manager_; + testing::NiceMock admin_; + Event::GlobalTimeSystem time_system_; + testing::NiceMock api_; + Grpc::ContextImpl grpc_context_; +}; +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/listener_component_factory.cc b/test/mocks/server/listener_component_factory.cc new file mode 100644 index 0000000000000..a0b00b7bc756a --- /dev/null +++ b/test/mocks/server/listener_component_factory.cc @@ -0,0 +1,31 @@ +#include "listener_component_factory.h" + +#include "envoy/config/core/v3/base.pb.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { + +using ::testing::_; +using ::testing::Invoke; + +MockListenerComponentFactory::MockListenerComponentFactory() + : socket_(std::make_shared>()) { + ON_CALL(*this, createListenSocket(_, _, _, _)) + .WillByDefault(Invoke([&](Network::Address::InstanceConstSharedPtr, Network::Socket::Type, + const Network::Socket::OptionsSharedPtr& options, + const ListenSocketCreationParams&) -> Network::SocketSharedPtr { + if (!Network::Socket::applyOptions(options, *socket_, + envoy::config::core::v3::SocketOption::STATE_PREBIND)) { + throw EnvoyException("MockListenerComponentFactory: Setting socket options failed"); + } + return socket_; + })); +} + +MockListenerComponentFactory::~MockListenerComponentFactory() = default; + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/listener_component_factory.h b/test/mocks/server/listener_component_factory.h new file mode 100644 index 0000000000000..84d73dd4bfd55 --- /dev/null +++ b/test/mocks/server/listener_component_factory.h @@ -0,0 +1,47 @@ +#pragma once + +#include "envoy/config/listener/v3/listener_components.pb.h" +#include "envoy/server/drain_manager.h" +#include "envoy/server/listener_manager.h" + +#include "test/mocks/network/mocks.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +class MockListenerComponentFactory : public ListenerComponentFactory { +public: + MockListenerComponentFactory(); + ~MockListenerComponentFactory() override; + + DrainManagerPtr + createDrainManager(envoy::config::listener::v3::Listener::DrainType drain_type) override { + return DrainManagerPtr{createDrainManager_(drain_type)}; + } + LdsApiPtr createLdsApi(const envoy::config::core::v3::ConfigSource& lds_config) override { + return LdsApiPtr{createLdsApi_(lds_config)}; + } + + MOCK_METHOD(LdsApi*, createLdsApi_, (const envoy::config::core::v3::ConfigSource& lds_config)); + MOCK_METHOD(std::vector, createNetworkFilterFactoryList, + (const Protobuf::RepeatedPtrField& filters, + Configuration::FilterChainFactoryContext& filter_chain_factory_context)); + MOCK_METHOD(std::vector, createListenerFilterFactoryList, + (const Protobuf::RepeatedPtrField&, + Configuration::ListenerFactoryContext& context)); + MOCK_METHOD(std::vector, createUdpListenerFilterFactoryList, + (const Protobuf::RepeatedPtrField&, + Configuration::ListenerFactoryContext& context)); + MOCK_METHOD(Network::SocketSharedPtr, createListenSocket, + (Network::Address::InstanceConstSharedPtr address, Network::Socket::Type socket_type, + const Network::Socket::OptionsSharedPtr& options, + const ListenSocketCreationParams& params)); + MOCK_METHOD(DrainManager*, createDrainManager_, + (envoy::config::listener::v3::Listener::DrainType drain_type)); + MOCK_METHOD(uint64_t, nextListenerTag, ()); + + std::shared_ptr socket_; +}; +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/listener_factory_context.cc b/test/mocks/server/listener_factory_context.cc new file mode 100644 index 0000000000000..99a7678c1a274 --- /dev/null +++ b/test/mocks/server/listener_factory_context.cc @@ -0,0 +1,15 @@ +#include "listener_factory_context.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { +namespace Configuration { +MockListenerFactoryContext::MockListenerFactoryContext() = default; + +MockListenerFactoryContext::~MockListenerFactoryContext() = default; + +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/listener_factory_context.h b/test/mocks/server/listener_factory_context.h new file mode 100644 index 0000000000000..8d322735b5779 --- /dev/null +++ b/test/mocks/server/listener_factory_context.h @@ -0,0 +1,23 @@ +#pragma once + +#include "envoy/server/listener_manager.h" + +#include "factory_context.h" +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +namespace Configuration { +class MockListenerFactoryContext : public MockFactoryContext, public ListenerFactoryContext { +public: + MockListenerFactoryContext(); + ~MockListenerFactoryContext() override; + + const Network::ListenerConfig& listenerConfig() const override { return listener_config_; } + MOCK_METHOD(const Network::ListenerConfig&, listenerConfig_, (), (const)); + + Network::MockListenerConfig listener_config_; +}; +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/listener_manager.cc b/test/mocks/server/listener_manager.cc new file mode 100644 index 0000000000000..0448ff4e7122a --- /dev/null +++ b/test/mocks/server/listener_manager.cc @@ -0,0 +1,16 @@ +#include "listener_manager.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { + +MockListenerManager::MockListenerManager() = default; + +MockListenerManager::~MockListenerManager() = default; + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/listener_manager.h b/test/mocks/server/listener_manager.h new file mode 100644 index 0000000000000..a91a9acb1764e --- /dev/null +++ b/test/mocks/server/listener_manager.h @@ -0,0 +1,30 @@ +#pragma once + +#include "envoy/server/listener_manager.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +class MockListenerManager : public ListenerManager { +public: + MockListenerManager(); + ~MockListenerManager() override; + + MOCK_METHOD(bool, addOrUpdateListener, + (const envoy::config::listener::v3::Listener& config, const std::string& version_info, + bool modifiable)); + MOCK_METHOD(void, createLdsApi, (const envoy::config::core::v3::ConfigSource& lds_config)); + MOCK_METHOD(std::vector>, listeners, + (ListenerState state)); + MOCK_METHOD(uint64_t, numConnections, (), (const)); + MOCK_METHOD(bool, removeListener, (const std::string& listener_name)); + MOCK_METHOD(void, startWorkers, (GuardDog & guard_dog)); + MOCK_METHOD(void, stopListeners, (StopListenersType listeners_type)); + MOCK_METHOD(void, stopWorkers, ()); + MOCK_METHOD(void, beginListenerUpdate, ()); + MOCK_METHOD(void, endListenerUpdate, (ListenerManager::FailureStates &&)); + MOCK_METHOD(ApiListenerOptRef, apiListener, ()); +}; +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/main.cc b/test/mocks/server/main.cc new file mode 100644 index 0000000000000..26bde5941bedf --- /dev/null +++ b/test/mocks/server/main.cc @@ -0,0 +1,27 @@ +#include "main.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { +namespace Configuration { + +using ::testing::Return; + +MockMain::MockMain(int wd_miss, int wd_megamiss, int wd_kill, int wd_multikill, + double wd_multikill_threshold) + : wd_miss_(wd_miss), wd_megamiss_(wd_megamiss), wd_kill_(wd_kill), wd_multikill_(wd_multikill), + wd_multikill_threshold_(wd_multikill_threshold) { + ON_CALL(*this, wdMissTimeout()).WillByDefault(Return(wd_miss_)); + ON_CALL(*this, wdMegaMissTimeout()).WillByDefault(Return(wd_megamiss_)); + ON_CALL(*this, wdKillTimeout()).WillByDefault(Return(wd_kill_)); + ON_CALL(*this, wdMultiKillTimeout()).WillByDefault(Return(wd_multikill_)); + ON_CALL(*this, wdMultiKillThreshold()).WillByDefault(Return(wd_multikill_threshold_)); +} + +MockMain::~MockMain() = default; + +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/main.h b/test/mocks/server/main.h new file mode 100644 index 0000000000000..c89b637e669a0 --- /dev/null +++ b/test/mocks/server/main.h @@ -0,0 +1,40 @@ +#pragma once + +#include +#include +#include +#include + +#include "envoy/server/configuration.h" +#include "envoy/server/overload_manager.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +namespace Configuration { +class MockMain : public Main { +public: + MockMain() : MockMain(0, 0, 0, 0, 0.0) {} + MockMain(int wd_miss, int wd_megamiss, int wd_kill, int wd_multikill, + double wd_multikill_threshold); + ~MockMain() override; + + MOCK_METHOD(Upstream::ClusterManager*, clusterManager, ()); + MOCK_METHOD(std::list&, statsSinks, ()); + MOCK_METHOD(std::chrono::milliseconds, statsFlushInterval, (), (const)); + MOCK_METHOD(std::chrono::milliseconds, wdMissTimeout, (), (const)); + MOCK_METHOD(std::chrono::milliseconds, wdMegaMissTimeout, (), (const)); + MOCK_METHOD(std::chrono::milliseconds, wdKillTimeout, (), (const)); + MOCK_METHOD(std::chrono::milliseconds, wdMultiKillTimeout, (), (const)); + MOCK_METHOD(double, wdMultiKillThreshold, (), (const)); + + std::chrono::milliseconds wd_miss_; + std::chrono::milliseconds wd_megamiss_; + std::chrono::milliseconds wd_kill_; + std::chrono::milliseconds wd_multikill_; + double wd_multikill_threshold_; +}; +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/mocks.cc b/test/mocks/server/mocks.cc deleted file mode 100644 index 148fd24e9da57..0000000000000 --- a/test/mocks/server/mocks.cc +++ /dev/null @@ -1,304 +0,0 @@ -#include "mocks.h" - -#include - -#include "envoy/admin/v3/server_info.pb.h" -#include "envoy/config/core/v3/base.pb.h" - -#include "common/singleton/manager_impl.h" - -#include "gmock/gmock.h" -#include "gtest/gtest.h" - -using testing::_; -using testing::Invoke; -using testing::Return; -using testing::ReturnPointee; -using testing::ReturnRef; -using testing::SaveArg; - -namespace Envoy { -namespace Server { - -MockOptions::MockOptions(const std::string& config_path) : config_path_(config_path) { - ON_CALL(*this, concurrency()).WillByDefault(ReturnPointee(&concurrency_)); - ON_CALL(*this, configPath()).WillByDefault(ReturnRef(config_path_)); - ON_CALL(*this, configProto()).WillByDefault(ReturnRef(config_proto_)); - ON_CALL(*this, configYaml()).WillByDefault(ReturnRef(config_yaml_)); - ON_CALL(*this, allowUnknownStaticFields()).WillByDefault(Invoke([this] { - return allow_unknown_static_fields_; - })); - ON_CALL(*this, rejectUnknownDynamicFields()).WillByDefault(Invoke([this] { - return reject_unknown_dynamic_fields_; - })); - ON_CALL(*this, adminAddressPath()).WillByDefault(ReturnRef(admin_address_path_)); - ON_CALL(*this, serviceClusterName()).WillByDefault(ReturnRef(service_cluster_name_)); - ON_CALL(*this, serviceNodeName()).WillByDefault(ReturnRef(service_node_name_)); - ON_CALL(*this, serviceZone()).WillByDefault(ReturnRef(service_zone_name_)); - ON_CALL(*this, logLevel()).WillByDefault(Return(log_level_)); - ON_CALL(*this, logPath()).WillByDefault(ReturnRef(log_path_)); - ON_CALL(*this, restartEpoch()).WillByDefault(ReturnPointee(&hot_restart_epoch_)); - ON_CALL(*this, hotRestartDisabled()).WillByDefault(ReturnPointee(&hot_restart_disabled_)); - ON_CALL(*this, signalHandlingEnabled()).WillByDefault(ReturnPointee(&signal_handling_enabled_)); - ON_CALL(*this, mutexTracingEnabled()).WillByDefault(ReturnPointee(&mutex_tracing_enabled_)); - ON_CALL(*this, cpusetThreadsEnabled()).WillByDefault(ReturnPointee(&cpuset_threads_enabled_)); - ON_CALL(*this, disabledExtensions()).WillByDefault(ReturnRef(disabled_extensions_)); - ON_CALL(*this, toCommandLineOptions()).WillByDefault(Invoke([] { - return std::make_unique(); - })); -} -MockOptions::~MockOptions() = default; - -MockConfigTracker::MockConfigTracker() { - ON_CALL(*this, add_(_, _)) - .WillByDefault(Invoke([this](const std::string& key, Cb callback) -> EntryOwner* { - EXPECT_TRUE(config_tracker_callbacks_.find(key) == config_tracker_callbacks_.end()); - config_tracker_callbacks_[key] = callback; - return new MockEntryOwner(); - })); -} -MockConfigTracker::~MockConfigTracker() = default; - -MockAdmin::MockAdmin() { - ON_CALL(*this, getConfigTracker()).WillByDefault(testing::ReturnRef(config_tracker_)); -} -MockAdmin::~MockAdmin() = default; - -MockAdminStream::MockAdminStream() = default; -MockAdminStream::~MockAdminStream() = default; - -MockDrainManager::MockDrainManager() { - ON_CALL(*this, startDrainSequence(_)).WillByDefault(SaveArg<0>(&drain_sequence_completion_)); -} -MockDrainManager::~MockDrainManager() = default; - -MockWatchDog::MockWatchDog() = default; -MockWatchDog::~MockWatchDog() = default; - -MockGuardDog::MockGuardDog() : watch_dog_(new NiceMock()) { - ON_CALL(*this, createWatchDog(_, _)).WillByDefault(Return(watch_dog_)); -} -MockGuardDog::~MockGuardDog() = default; - -MockHotRestart::MockHotRestart() : stats_allocator_(*symbol_table_) { - ON_CALL(*this, logLock()).WillByDefault(ReturnRef(log_lock_)); - ON_CALL(*this, accessLogLock()).WillByDefault(ReturnRef(access_log_lock_)); - ON_CALL(*this, statsAllocator()).WillByDefault(ReturnRef(stats_allocator_)); -} -MockHotRestart::~MockHotRestart() = default; - -MockOverloadManager::MockOverloadManager() { - ON_CALL(*this, getThreadLocalOverloadState()).WillByDefault(ReturnRef(overload_state_)); -} -MockOverloadManager::~MockOverloadManager() = default; - -MockListenerComponentFactory::MockListenerComponentFactory() - : socket_(std::make_shared>()) { - ON_CALL(*this, createListenSocket(_, _, _, _)) - .WillByDefault(Invoke([&](Network::Address::InstanceConstSharedPtr, - Network::Address::SocketType, - const Network::Socket::OptionsSharedPtr& options, - const ListenSocketCreationParams&) -> Network::SocketSharedPtr { - if (!Network::Socket::applyOptions(options, *socket_, - envoy::config::core::v3::SocketOption::STATE_PREBIND)) { - throw EnvoyException("MockListenerComponentFactory: Setting socket options failed"); - } - return socket_; - })); -} -MockListenerComponentFactory::~MockListenerComponentFactory() = default; - -MockServerLifecycleNotifier::MockServerLifecycleNotifier() = default; -MockServerLifecycleNotifier::~MockServerLifecycleNotifier() = default; - -MockListenerManager::MockListenerManager() = default; -MockListenerManager::~MockListenerManager() = default; - -MockWorkerFactory::MockWorkerFactory() = default; -MockWorkerFactory::~MockWorkerFactory() = default; - -MockWorker::MockWorker() { - ON_CALL(*this, addListener(_, _, _)) - .WillByDefault( - Invoke([this](absl::optional overridden_listener, - Network::ListenerConfig& config, AddListenerCompletion completion) -> void { - UNREFERENCED_PARAMETER(overridden_listener); - config.listenSocketFactory().getListenSocket(); - EXPECT_EQ(nullptr, add_listener_completion_); - add_listener_completion_ = completion; - })); - - ON_CALL(*this, removeListener(_, _)) - .WillByDefault( - Invoke([this](Network::ListenerConfig&, std::function completion) -> void { - EXPECT_EQ(nullptr, remove_listener_completion_); - remove_listener_completion_ = completion; - })); - - ON_CALL(*this, stopListener(_, _)) - .WillByDefault(Invoke([](Network::ListenerConfig&, std::function completion) -> void { - if (completion != nullptr) { - completion(); - } - })); - - ON_CALL(*this, removeFilterChains(_, _, _)) - .WillByDefault(Invoke([this](uint64_t, const std::list&, - std::function completion) -> void { - EXPECT_EQ(nullptr, remove_filter_chains_completion_); - remove_filter_chains_completion_ = completion; - })); -} -MockWorker::~MockWorker() = default; - -MockInstance::MockInstance() - : secret_manager_(std::make_unique(admin_.getConfigTracker())), - cluster_manager_(timeSource()), ssl_context_manager_(timeSource()), - singleton_manager_(new Singleton::ManagerImpl(Thread::threadFactoryForTest())), - grpc_context_(stats_store_.symbolTable()), http_context_(stats_store_.symbolTable()), - server_factory_context_( - std::make_shared>()), - transport_socket_factory_context_( - std::make_shared>()) { - ON_CALL(*this, threadLocal()).WillByDefault(ReturnRef(thread_local_)); - ON_CALL(*this, stats()).WillByDefault(ReturnRef(stats_store_)); - ON_CALL(*this, grpcContext()).WillByDefault(ReturnRef(grpc_context_)); - ON_CALL(*this, httpContext()).WillByDefault(ReturnRef(http_context_)); - ON_CALL(*this, dnsResolver()).WillByDefault(Return(dns_resolver_)); - ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); - ON_CALL(*this, admin()).WillByDefault(ReturnRef(admin_)); - ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); - ON_CALL(*this, sslContextManager()).WillByDefault(ReturnRef(ssl_context_manager_)); - ON_CALL(*this, accessLogManager()).WillByDefault(ReturnRef(access_log_manager_)); - ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_loader_)); - ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); - ON_CALL(*this, hotRestart()).WillByDefault(ReturnRef(hot_restart_)); - ON_CALL(*this, random()).WillByDefault(ReturnRef(random_)); - ON_CALL(*this, lifecycleNotifier()).WillByDefault(ReturnRef(lifecycle_notifier_)); - ON_CALL(*this, localInfo()).WillByDefault(ReturnRef(local_info_)); - ON_CALL(*this, options()).WillByDefault(ReturnRef(options_)); - ON_CALL(*this, drainManager()).WillByDefault(ReturnRef(drain_manager_)); - ON_CALL(*this, initManager()).WillByDefault(ReturnRef(init_manager_)); - ON_CALL(*this, listenerManager()).WillByDefault(ReturnRef(listener_manager_)); - ON_CALL(*this, mutexTracer()).WillByDefault(Return(nullptr)); - ON_CALL(*this, singletonManager()).WillByDefault(ReturnRef(*singleton_manager_)); - ON_CALL(*this, overloadManager()).WillByDefault(ReturnRef(overload_manager_)); - ON_CALL(*this, messageValidationContext()).WillByDefault(ReturnRef(validation_context_)); - ON_CALL(*this, serverFactoryContext()).WillByDefault(ReturnRef(*server_factory_context_)); - ON_CALL(*this, transportSocketFactoryContext()) - .WillByDefault(ReturnRef(*transport_socket_factory_context_)); -} - -MockInstance::~MockInstance() = default; - -namespace Configuration { - -MockMain::MockMain(int wd_miss, int wd_megamiss, int wd_kill, int wd_multikill) - : wd_miss_(wd_miss), wd_megamiss_(wd_megamiss), wd_kill_(wd_kill), wd_multikill_(wd_multikill) { - ON_CALL(*this, wdMissTimeout()).WillByDefault(Return(wd_miss_)); - ON_CALL(*this, wdMegaMissTimeout()).WillByDefault(Return(wd_megamiss_)); - ON_CALL(*this, wdKillTimeout()).WillByDefault(Return(wd_kill_)); - ON_CALL(*this, wdMultiKillTimeout()).WillByDefault(Return(wd_multikill_)); -} - -MockMain::~MockMain() = default; - -MockServerFactoryContext::MockServerFactoryContext() - : singleton_manager_(new Singleton::ManagerImpl(Thread::threadFactoryForTest())), - grpc_context_(scope_.symbolTable()) { - ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); - ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); - ON_CALL(*this, drainDecision()).WillByDefault(ReturnRef(drain_manager_)); - ON_CALL(*this, localInfo()).WillByDefault(ReturnRef(local_info_)); - ON_CALL(*this, random()).WillByDefault(ReturnRef(random_)); - ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_loader_)); - ON_CALL(*this, scope()).WillByDefault(ReturnRef(scope_)); - ON_CALL(*this, singletonManager()).WillByDefault(ReturnRef(*singleton_manager_)); - ON_CALL(*this, threadLocal()).WillByDefault(ReturnRef(thread_local_)); - ON_CALL(*this, admin()).WillByDefault(ReturnRef(admin_)); - ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); - ON_CALL(*this, timeSource()).WillByDefault(ReturnRef(time_system_)); - ON_CALL(*this, messageValidationContext()).WillByDefault(ReturnRef(validation_context_)); - ON_CALL(*this, messageValidationVisitor()) - .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor())); - ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); - ON_CALL(*this, drainManager()).WillByDefault(ReturnRef(drain_manager_)); -} -MockServerFactoryContext::~MockServerFactoryContext() = default; - -MockFactoryContext::MockFactoryContext() - : singleton_manager_(new Singleton::ManagerImpl(Thread::threadFactoryForTest())), - grpc_context_(scope_.symbolTable()), http_context_(scope_.symbolTable()) { - ON_CALL(*this, getServerFactoryContext()).WillByDefault(ReturnRef(server_factory_context_)); - ON_CALL(*this, accessLogManager()).WillByDefault(ReturnRef(access_log_manager_)); - ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); - ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); - ON_CALL(*this, drainDecision()).WillByDefault(ReturnRef(drain_manager_)); - ON_CALL(*this, initManager()).WillByDefault(ReturnRef(init_manager_)); - ON_CALL(*this, lifecycleNotifier()).WillByDefault(ReturnRef(lifecycle_notifier_)); - ON_CALL(*this, localInfo()).WillByDefault(ReturnRef(local_info_)); - ON_CALL(*this, random()).WillByDefault(ReturnRef(random_)); - ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_loader_)); - ON_CALL(*this, scope()).WillByDefault(ReturnRef(scope_)); - ON_CALL(*this, singletonManager()).WillByDefault(ReturnRef(*singleton_manager_)); - ON_CALL(*this, threadLocal()).WillByDefault(ReturnRef(thread_local_)); - ON_CALL(*this, admin()).WillByDefault(ReturnRef(admin_)); - ON_CALL(*this, listenerScope()).WillByDefault(ReturnRef(listener_scope_)); - ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); - ON_CALL(*this, timeSource()).WillByDefault(ReturnRef(time_system_)); - ON_CALL(*this, overloadManager()).WillByDefault(ReturnRef(overload_manager_)); - ON_CALL(*this, messageValidationContext()).WillByDefault(ReturnRef(validation_context_)); - ON_CALL(*this, messageValidationVisitor()) - .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor())); - ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); -} - -MockFactoryContext::~MockFactoryContext() = default; - -MockTransportSocketFactoryContext::MockTransportSocketFactoryContext() - : secret_manager_(std::make_unique(config_tracker_)) { - ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); - ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); - ON_CALL(*this, messageValidationVisitor()) - .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor())); -} - -MockTransportSocketFactoryContext::~MockTransportSocketFactoryContext() = default; - -MockListenerFactoryContext::MockListenerFactoryContext() = default; -MockListenerFactoryContext::~MockListenerFactoryContext() = default; - -MockHealthCheckerFactoryContext::MockHealthCheckerFactoryContext() { - event_logger_ = new NiceMock(); - ON_CALL(*this, cluster()).WillByDefault(ReturnRef(cluster_)); - ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); - ON_CALL(*this, random()).WillByDefault(ReturnRef(random_)); - ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_)); - ON_CALL(*this, eventLogger_()).WillByDefault(Return(event_logger_)); - ON_CALL(*this, messageValidationVisitor()) - .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor())); - ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); -} - -MockHealthCheckerFactoryContext::~MockHealthCheckerFactoryContext() = default; - -MockFilterChainFactoryContext::MockFilterChainFactoryContext() = default; -MockFilterChainFactoryContext::~MockFilterChainFactoryContext() = default; - -MockTracerFactory::MockTracerFactory(const std::string& name) : name_(name) { - ON_CALL(*this, createEmptyConfigProto()).WillByDefault(Invoke([] { - return std::make_unique(); - })); -} -MockTracerFactory::~MockTracerFactory() = default; - -MockTracerFactoryContext::MockTracerFactoryContext() { - ON_CALL(*this, serverFactoryContext()).WillByDefault(ReturnRef(server_factory_context_)); - ON_CALL(*this, messageValidationVisitor()) - .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor())); -} - -MockTracerFactoryContext::~MockTracerFactoryContext() = default; -} // namespace Configuration -} // namespace Server -} // namespace Envoy diff --git a/test/mocks/server/mocks.h b/test/mocks/server/mocks.h index 3cc405465e61f..d041412d01d17 100644 --- a/test/mocks/server/mocks.h +++ b/test/mocks/server/mocks.h @@ -1,680 +1,28 @@ #pragma once -#include -#include -#include -#include - -#include "envoy/common/mutex_tracer.h" -#include "envoy/config/bootstrap/v3/bootstrap.pb.h" -#include "envoy/config/core/v3/base.pb.h" -#include "envoy/config/core/v3/config_source.pb.h" -#include "envoy/config/listener/v3/listener.pb.h" -#include "envoy/config/listener/v3/listener_components.pb.h" -#include "envoy/protobuf/message_validator.h" -#include "envoy/server/admin.h" -#include "envoy/server/configuration.h" -#include "envoy/server/drain_manager.h" -#include "envoy/server/filter_config.h" -#include "envoy/server/health_checker_config.h" -#include "envoy/server/instance.h" -#include "envoy/server/options.h" -#include "envoy/server/overload_manager.h" -#include "envoy/server/tracer_config.h" -#include "envoy/server/transport_socket_config.h" -#include "envoy/server/worker.h" -#include "envoy/ssl/context_manager.h" -#include "envoy/stats/scope.h" -#include "envoy/thread/thread.h" - -#include "common/grpc/context_impl.h" -#include "common/http/context_impl.h" -#include "common/secret/secret_manager_impl.h" -#include "common/stats/fake_symbol_table_impl.h" - -#include "extensions/transport_sockets/tls/context_manager_impl.h" - -#include "test/mocks/access_log/mocks.h" -#include "test/mocks/api/mocks.h" -#include "test/mocks/event/mocks.h" -#include "test/mocks/http/mocks.h" -#include "test/mocks/init/mocks.h" -#include "test/mocks/local_info/mocks.h" -#include "test/mocks/network/mocks.h" -#include "test/mocks/protobuf/mocks.h" -#include "test/mocks/router/mocks.h" -#include "test/mocks/runtime/mocks.h" -#include "test/mocks/secret/mocks.h" -#include "test/mocks/stats/mocks.h" -#include "test/mocks/thread_local/mocks.h" -#include "test/mocks/tracing/mocks.h" -#include "test/mocks/upstream/mocks.h" -#include "test/test_common/test_time_system.h" - -#include "absl/strings/string_view.h" -#include "gmock/gmock.h" -#include "spdlog/spdlog.h" - -namespace Envoy { -namespace Server { - -namespace Configuration { -class MockServerFactoryContext; -class MockTransportSocketFactoryContext; -} // namespace Configuration - -class MockOptions : public Options { -public: - MockOptions() : MockOptions(std::string()) {} - MockOptions(const std::string& config_path); - ~MockOptions() override; - - MOCK_METHOD(uint64_t, baseId, (), (const)); - MOCK_METHOD(uint32_t, concurrency, (), (const)); - MOCK_METHOD(const std::string&, configPath, (), (const)); - MOCK_METHOD(const envoy::config::bootstrap::v3::Bootstrap&, configProto, (), (const)); - MOCK_METHOD(const std::string&, configYaml, (), (const)); - MOCK_METHOD(bool, allowUnknownStaticFields, (), (const)); - MOCK_METHOD(bool, rejectUnknownDynamicFields, (), (const)); - MOCK_METHOD(const std::string&, adminAddressPath, (), (const)); - MOCK_METHOD(Network::Address::IpVersion, localAddressIpVersion, (), (const)); - MOCK_METHOD(std::chrono::seconds, drainTime, (), (const)); - MOCK_METHOD(spdlog::level::level_enum, logLevel, (), (const)); - MOCK_METHOD((const std::vector>&), - componentLogLevels, (), (const)); - MOCK_METHOD(const std::string&, logFormat, (), (const)); - MOCK_METHOD(bool, logFormatEscaped, (), (const)); - MOCK_METHOD(const std::string&, logPath, (), (const)); - MOCK_METHOD(std::chrono::seconds, parentShutdownTime, (), (const)); - MOCK_METHOD(uint64_t, restartEpoch, (), (const)); - MOCK_METHOD(std::chrono::milliseconds, fileFlushIntervalMsec, (), (const)); - MOCK_METHOD(Mode, mode, (), (const)); - MOCK_METHOD(const std::string&, serviceClusterName, (), (const)); - MOCK_METHOD(const std::string&, serviceNodeName, (), (const)); - MOCK_METHOD(const std::string&, serviceZone, (), (const)); - MOCK_METHOD(bool, hotRestartDisabled, (), (const)); - MOCK_METHOD(bool, signalHandlingEnabled, (), (const)); - MOCK_METHOD(bool, mutexTracingEnabled, (), (const)); - MOCK_METHOD(bool, fakeSymbolTableEnabled, (), (const)); - MOCK_METHOD(bool, cpusetThreadsEnabled, (), (const)); - MOCK_METHOD(const std::vector&, disabledExtensions, (), (const)); - MOCK_METHOD(Server::CommandLineOptionsPtr, toCommandLineOptions, (), (const)); - - std::string config_path_; - envoy::config::bootstrap::v3::Bootstrap config_proto_; - std::string config_yaml_; - bool allow_unknown_static_fields_{}; - bool reject_unknown_dynamic_fields_{}; - std::string admin_address_path_; - std::string service_cluster_name_; - std::string service_node_name_; - std::string service_zone_name_; - spdlog::level::level_enum log_level_{spdlog::level::trace}; - std::string log_path_; - uint32_t concurrency_{1}; - uint64_t hot_restart_epoch_{}; - bool hot_restart_disabled_{}; - bool signal_handling_enabled_{true}; - bool mutex_tracing_enabled_{}; - bool cpuset_threads_enabled_{}; - std::vector disabled_extensions_; -}; - -class MockConfigTracker : public ConfigTracker { -public: - MockConfigTracker(); - ~MockConfigTracker() override; - - struct MockEntryOwner : public EntryOwner {}; - - MOCK_METHOD(EntryOwner*, add_, (std::string, Cb)); - - // Server::ConfigTracker - MOCK_METHOD(const CbsMap&, getCallbacksMap, (), (const)); - EntryOwnerPtr add(const std::string& key, Cb callback) override { - return EntryOwnerPtr{add_(key, std::move(callback))}; - } - - std::unordered_map config_tracker_callbacks_; -}; - -class MockAdmin : public Admin { -public: - MockAdmin(); - ~MockAdmin() override; - - // Server::Admin - MOCK_METHOD(bool, addHandler, - (const std::string& prefix, const std::string& help_text, HandlerCb callback, - bool removable, bool mutates_server_state)); - MOCK_METHOD(bool, removeHandler, (const std::string& prefix)); - MOCK_METHOD(Network::Socket&, socket, ()); - MOCK_METHOD(ConfigTracker&, getConfigTracker, ()); - MOCK_METHOD(void, startHttpListener, - (const std::string& access_log_path, const std::string& address_out_path, - Network::Address::InstanceConstSharedPtr address, - const Network::Socket::OptionsSharedPtr& socket_options, - Stats::ScopePtr&& listener_scope)); - MOCK_METHOD(Http::Code, request, - (absl::string_view path_and_query, absl::string_view method, - Http::ResponseHeaderMap& response_headers, std::string& body)); - MOCK_METHOD(void, addListenerToHandler, (Network::ConnectionHandler * handler)); - - NiceMock config_tracker_; -}; - -class MockAdminStream : public AdminStream { -public: - MockAdminStream(); - ~MockAdminStream() override; - - MOCK_METHOD(void, setEndStreamOnComplete, (bool)); - MOCK_METHOD(void, addOnDestroyCallback, (std::function)); - MOCK_METHOD(const Buffer::Instance*, getRequestBody, (), (const)); - MOCK_METHOD(Http::RequestHeaderMap&, getRequestHeaders, (), (const)); - MOCK_METHOD(NiceMock&, getDecoderFilterCallbacks, (), - (const)); - MOCK_METHOD(Http::Http1StreamEncoderOptionsOptRef, http1StreamEncoderOptions, ()); -}; - -class MockDrainManager : public DrainManager { -public: - MockDrainManager(); - ~MockDrainManager() override; - - // Server::DrainManager - MOCK_METHOD(bool, drainClose, (), (const)); - MOCK_METHOD(void, startDrainSequence, (std::function completion)); - MOCK_METHOD(void, startParentShutdownSequence, ()); - - std::function drain_sequence_completion_; -}; - -class MockWatchDog : public WatchDog { -public: - MockWatchDog(); - ~MockWatchDog() override; - - // Server::WatchDog - MOCK_METHOD(void, startWatchdog, (Event::Dispatcher & dispatcher)); - MOCK_METHOD(void, touch, ()); - MOCK_METHOD(Thread::ThreadId, threadId, (), (const)); - MOCK_METHOD(MonotonicTime, lastTouchTime, (), (const)); -}; - -class MockGuardDog : public GuardDog { -public: - MockGuardDog(); - ~MockGuardDog() override; - - // Server::GuardDog - MOCK_METHOD(WatchDogSharedPtr, createWatchDog, - (Thread::ThreadId thread_id, const std::string& thread_name)); - MOCK_METHOD(void, stopWatching, (WatchDogSharedPtr wd)); - - std::shared_ptr watch_dog_; -}; - -class MockHotRestart : public HotRestart { -public: - MockHotRestart(); - ~MockHotRestart() override; - - // Server::HotRestart - MOCK_METHOD(void, drainParentListeners, ()); - MOCK_METHOD(int, duplicateParentListenSocket, (const std::string& address)); - MOCK_METHOD(std::unique_ptr, getParentStats, ()); - MOCK_METHOD(void, initialize, (Event::Dispatcher & dispatcher, Server::Instance& server)); - MOCK_METHOD(void, sendParentAdminShutdownRequest, (time_t & original_start_time)); - MOCK_METHOD(void, sendParentTerminateRequest, ()); - MOCK_METHOD(ServerStatsFromParent, mergeParentStatsIfAny, (Stats::StoreRoot & stats_store)); - MOCK_METHOD(void, shutdown, ()); - MOCK_METHOD(std::string, version, ()); - MOCK_METHOD(Thread::BasicLockable&, logLock, ()); - MOCK_METHOD(Thread::BasicLockable&, accessLogLock, ()); - MOCK_METHOD(Stats::Allocator&, statsAllocator, ()); - -private: - Stats::TestSymbolTable symbol_table_; - Thread::MutexBasicLockable log_lock_; - Thread::MutexBasicLockable access_log_lock_; - Stats::AllocatorImpl stats_allocator_; -}; - -class MockListenerComponentFactory : public ListenerComponentFactory { -public: - MockListenerComponentFactory(); - ~MockListenerComponentFactory() override; - - DrainManagerPtr - createDrainManager(envoy::config::listener::v3::Listener::DrainType drain_type) override { - return DrainManagerPtr{createDrainManager_(drain_type)}; - } - LdsApiPtr createLdsApi(const envoy::config::core::v3::ConfigSource& lds_config) override { - return LdsApiPtr{createLdsApi_(lds_config)}; - } - - MOCK_METHOD(LdsApi*, createLdsApi_, (const envoy::config::core::v3::ConfigSource& lds_config)); - MOCK_METHOD(std::vector, createNetworkFilterFactoryList, - (const Protobuf::RepeatedPtrField& filters, - Configuration::FilterChainFactoryContext& filter_chain_factory_context)); - MOCK_METHOD(std::vector, createListenerFilterFactoryList, - (const Protobuf::RepeatedPtrField&, - Configuration::ListenerFactoryContext& context)); - MOCK_METHOD(std::vector, createUdpListenerFilterFactoryList, - (const Protobuf::RepeatedPtrField&, - Configuration::ListenerFactoryContext& context)); - MOCK_METHOD(Network::SocketSharedPtr, createListenSocket, - (Network::Address::InstanceConstSharedPtr address, - Network::Address::SocketType socket_type, - const Network::Socket::OptionsSharedPtr& options, - const ListenSocketCreationParams& params)); - MOCK_METHOD(DrainManager*, createDrainManager_, - (envoy::config::listener::v3::Listener::DrainType drain_type)); - MOCK_METHOD(uint64_t, nextListenerTag, ()); - - std::shared_ptr socket_; -}; - -class MockListenerManager : public ListenerManager { -public: - MockListenerManager(); - ~MockListenerManager() override; - - MOCK_METHOD(bool, addOrUpdateListener, - (const envoy::config::listener::v3::Listener& config, const std::string& version_info, - bool modifiable)); - MOCK_METHOD(void, createLdsApi, (const envoy::config::core::v3::ConfigSource& lds_config)); - MOCK_METHOD(std::vector>, listeners, ()); - MOCK_METHOD(uint64_t, numConnections, (), (const)); - MOCK_METHOD(bool, removeListener, (const std::string& listener_name)); - MOCK_METHOD(void, startWorkers, (GuardDog & guard_dog)); - MOCK_METHOD(void, stopListeners, (StopListenersType listeners_type)); - MOCK_METHOD(void, stopWorkers, ()); - MOCK_METHOD(void, beginListenerUpdate, ()); - MOCK_METHOD(void, endListenerUpdate, (ListenerManager::FailureStates &&)); - MOCK_METHOD(ApiListenerOptRef, apiListener, ()); -}; - -class MockServerLifecycleNotifier : public ServerLifecycleNotifier { -public: - MockServerLifecycleNotifier(); - ~MockServerLifecycleNotifier() override; - - MOCK_METHOD(ServerLifecycleNotifier::HandlePtr, registerCallback, (Stage, StageCallback)); - MOCK_METHOD(ServerLifecycleNotifier::HandlePtr, registerCallback, - (Stage, StageCallbackWithCompletion)); -}; - -class MockWorkerFactory : public WorkerFactory { -public: - MockWorkerFactory(); - ~MockWorkerFactory() override; - - // Server::WorkerFactory - WorkerPtr createWorker(OverloadManager&, const std::string&) override { - return WorkerPtr{createWorker_()}; - } - - MOCK_METHOD(Worker*, createWorker_, ()); -}; - -class MockWorker : public Worker { -public: - MockWorker(); - ~MockWorker() override; - - void callAddCompletion(bool success) { - EXPECT_NE(nullptr, add_listener_completion_); - add_listener_completion_(success); - add_listener_completion_ = nullptr; - } - - void callRemovalCompletion() { - EXPECT_NE(nullptr, remove_listener_completion_); - remove_listener_completion_(); - remove_listener_completion_ = nullptr; - } - - void callDrainFilterChainsComplete() { - EXPECT_NE(nullptr, remove_filter_chains_completion_); - remove_filter_chains_completion_(); - remove_filter_chains_completion_ = nullptr; - } - - // Server::Worker - MOCK_METHOD(void, addListener, - (absl::optional overridden_listener, Network::ListenerConfig& listener, - AddListenerCompletion completion)); - MOCK_METHOD(uint64_t, numConnections, (), (const)); - MOCK_METHOD(void, removeListener, - (Network::ListenerConfig & listener, std::function completion)); - MOCK_METHOD(void, start, (GuardDog & guard_dog)); - MOCK_METHOD(void, initializeStats, (Stats::Scope & scope)); - MOCK_METHOD(void, stop, ()); - MOCK_METHOD(void, stopListener, - (Network::ListenerConfig & listener, std::function completion)); - MOCK_METHOD(void, removeFilterChains, - (uint64_t listener_tag, const std::list& filter_chains, - std::function completion)); - - AddListenerCompletion add_listener_completion_; - std::function remove_listener_completion_; - std::function remove_filter_chains_completion_; -}; - -class MockOverloadManager : public OverloadManager { -public: - MockOverloadManager(); - ~MockOverloadManager() override; - - // OverloadManager - MOCK_METHOD(void, start, ()); - MOCK_METHOD(bool, registerForAction, - (const std::string& action, Event::Dispatcher& dispatcher, - OverloadActionCb callback)); - MOCK_METHOD(ThreadLocalOverloadState&, getThreadLocalOverloadState, ()); - - ThreadLocalOverloadState overload_state_; -}; - -class MockInstance : public Instance { -public: - MockInstance(); - ~MockInstance() override; - - Secret::SecretManager& secretManager() override { return *(secret_manager_.get()); } - - MOCK_METHOD(Admin&, admin, ()); - MOCK_METHOD(Api::Api&, api, ()); - MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ()); - MOCK_METHOD(Ssl::ContextManager&, sslContextManager, ()); - MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); - MOCK_METHOD(Network::DnsResolverSharedPtr, dnsResolver, ()); - MOCK_METHOD(void, drainListeners, ()); - MOCK_METHOD(DrainManager&, drainManager, ()); - MOCK_METHOD(AccessLog::AccessLogManager&, accessLogManager, ()); - MOCK_METHOD(void, failHealthcheck, (bool fail)); - MOCK_METHOD(void, exportStatsToChild, (envoy::HotRestartMessage::Reply::Stats*)); - MOCK_METHOD(bool, healthCheckFailed, ()); - MOCK_METHOD(HotRestart&, hotRestart, ()); - MOCK_METHOD(Init::Manager&, initManager, ()); - MOCK_METHOD(ServerLifecycleNotifier&, lifecycleNotifier, ()); - MOCK_METHOD(ListenerManager&, listenerManager, ()); - MOCK_METHOD(Envoy::MutexTracer*, mutexTracer, ()); - MOCK_METHOD(const Options&, options, ()); - MOCK_METHOD(OverloadManager&, overloadManager, ()); - MOCK_METHOD(Runtime::RandomGenerator&, random, ()); - MOCK_METHOD(Runtime::Loader&, runtime, ()); - MOCK_METHOD(void, shutdown, ()); - MOCK_METHOD(bool, isShutdown, ()); - MOCK_METHOD(void, shutdownAdmin, ()); - MOCK_METHOD(Singleton::Manager&, singletonManager, ()); - MOCK_METHOD(time_t, startTimeCurrentEpoch, ()); - MOCK_METHOD(time_t, startTimeFirstEpoch, ()); - MOCK_METHOD(Stats::Store&, stats, ()); - MOCK_METHOD(Grpc::Context&, grpcContext, ()); - MOCK_METHOD(Http::Context&, httpContext, ()); - MOCK_METHOD(ProcessContextOptRef, processContext, ()); - MOCK_METHOD(ThreadLocal::Instance&, threadLocal, ()); - MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const)); - MOCK_METHOD(std::chrono::milliseconds, statsFlushInterval, (), (const)); - MOCK_METHOD(void, flushStats, ()); - MOCK_METHOD(ProtobufMessage::ValidationContext&, messageValidationContext, ()); - MOCK_METHOD(Configuration::ServerFactoryContext&, serverFactoryContext, ()); - MOCK_METHOD(Configuration::TransportSocketFactoryContext&, transportSocketFactoryContext, ()); - - void setDefaultTracingConfig(const envoy::config::trace::v3::Tracing& tracing_config) override { - http_context_.setDefaultTracingConfig(tracing_config); - } - - TimeSource& timeSource() override { return time_system_; } - - testing::NiceMock thread_local_; - NiceMock stats_store_; - std::shared_ptr> dns_resolver_{ - new testing::NiceMock()}; - testing::NiceMock api_; - testing::NiceMock admin_; - Event::GlobalTimeSystem time_system_; - std::unique_ptr secret_manager_; - testing::NiceMock cluster_manager_; - Thread::MutexBasicLockable access_log_lock_; - testing::NiceMock runtime_loader_; - Extensions::TransportSockets::Tls::ContextManagerImpl ssl_context_manager_; - testing::NiceMock dispatcher_; - testing::NiceMock drain_manager_; - testing::NiceMock access_log_manager_; - testing::NiceMock hot_restart_; - testing::NiceMock options_; - testing::NiceMock random_; - testing::NiceMock lifecycle_notifier_; - testing::NiceMock local_info_; - testing::NiceMock init_manager_; - testing::NiceMock listener_manager_; - testing::NiceMock overload_manager_; - Singleton::ManagerPtr singleton_manager_; - Grpc::ContextImpl grpc_context_; - Http::ContextImpl http_context_; - testing::NiceMock validation_context_; - std::shared_ptr> - server_factory_context_; - std::shared_ptr> - transport_socket_factory_context_; -}; - -namespace Configuration { - -class MockMain : public Main { -public: - MockMain() : MockMain(0, 0, 0, 0) {} - MockMain(int wd_miss, int wd_megamiss, int wd_kill, int wd_multikill); - ~MockMain() override; - - MOCK_METHOD(Upstream::ClusterManager*, clusterManager, ()); - MOCK_METHOD(std::list&, statsSinks, ()); - MOCK_METHOD(std::chrono::milliseconds, statsFlushInterval, (), (const)); - MOCK_METHOD(std::chrono::milliseconds, wdMissTimeout, (), (const)); - MOCK_METHOD(std::chrono::milliseconds, wdMegaMissTimeout, (), (const)); - MOCK_METHOD(std::chrono::milliseconds, wdKillTimeout, (), (const)); - MOCK_METHOD(std::chrono::milliseconds, wdMultiKillTimeout, (), (const)); - - std::chrono::milliseconds wd_miss_; - std::chrono::milliseconds wd_megamiss_; - std::chrono::milliseconds wd_kill_; - std::chrono::milliseconds wd_multikill_; -}; - -class MockServerFactoryContext : public virtual ServerFactoryContext { -public: - MockServerFactoryContext(); - ~MockServerFactoryContext() override; - - MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ()); - MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); - MOCK_METHOD(const Network::DrainDecision&, drainDecision, ()); - MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const)); - MOCK_METHOD(Envoy::Runtime::RandomGenerator&, random, ()); - MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ()); - MOCK_METHOD(Stats::Scope&, scope, ()); - MOCK_METHOD(Singleton::Manager&, singletonManager, ()); - MOCK_METHOD(ThreadLocal::Instance&, threadLocal, ()); - MOCK_METHOD(Server::Admin&, admin, ()); - MOCK_METHOD(TimeSource&, timeSource, ()); - Event::TestTimeSystem& timeSystem() { return time_system_; } - MOCK_METHOD(ProtobufMessage::ValidationContext&, messageValidationContext, ()); - MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ()); - MOCK_METHOD(Api::Api&, api, ()); - Grpc::Context& grpcContext() override { return grpc_context_; } - MOCK_METHOD(Server::DrainManager&, drainManager, ()); - - testing::NiceMock cluster_manager_; - testing::NiceMock dispatcher_; - testing::NiceMock drain_manager_; - testing::NiceMock local_info_; - testing::NiceMock random_; - testing::NiceMock runtime_loader_; - testing::NiceMock scope_; - testing::NiceMock thread_local_; - testing::NiceMock validation_context_; - Singleton::ManagerPtr singleton_manager_; - testing::NiceMock admin_; - Event::GlobalTimeSystem time_system_; - testing::NiceMock api_; - Grpc::ContextImpl grpc_context_; -}; - -class MockFactoryContext : public virtual FactoryContext { -public: - MockFactoryContext(); - ~MockFactoryContext() override; - - MOCK_METHOD(ServerFactoryContext&, getServerFactoryContext, (), (const)); - MOCK_METHOD(TransportSocketFactoryContext&, getTransportSocketFactoryContext, (), (const)); - MOCK_METHOD(AccessLog::AccessLogManager&, accessLogManager, ()); - MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ()); - MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); - MOCK_METHOD(const Network::DrainDecision&, drainDecision, ()); - MOCK_METHOD(bool, healthCheckFailed, ()); - MOCK_METHOD(Init::Manager&, initManager, ()); - MOCK_METHOD(ServerLifecycleNotifier&, lifecycleNotifier, ()); - MOCK_METHOD(Envoy::Runtime::RandomGenerator&, random, ()); - MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ()); - MOCK_METHOD(Stats::Scope&, scope, ()); - MOCK_METHOD(Singleton::Manager&, singletonManager, ()); - MOCK_METHOD(OverloadManager&, overloadManager, ()); - MOCK_METHOD(ThreadLocal::Instance&, threadLocal, ()); - MOCK_METHOD(Server::Admin&, admin, ()); - MOCK_METHOD(Stats::Scope&, listenerScope, ()); - MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const)); - MOCK_METHOD(const envoy::config::core::v3::Metadata&, listenerMetadata, (), (const)); - MOCK_METHOD(envoy::config::core::v3::TrafficDirection, direction, (), (const)); - MOCK_METHOD(TimeSource&, timeSource, ()); - Event::TestTimeSystem& timeSystem() { return time_system_; } - Grpc::Context& grpcContext() override { return grpc_context_; } - Http::Context& httpContext() override { return http_context_; } - MOCK_METHOD(ProcessContextOptRef, processContext, ()); - MOCK_METHOD(ProtobufMessage::ValidationContext&, messageValidationContext, ()); - MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ()); - MOCK_METHOD(Api::Api&, api, ()); - - testing::NiceMock server_factory_context_; - testing::NiceMock access_log_manager_; - testing::NiceMock cluster_manager_; - testing::NiceMock dispatcher_; - testing::NiceMock drain_manager_; - testing::NiceMock init_manager_; - testing::NiceMock lifecycle_notifier_; - testing::NiceMock local_info_; - testing::NiceMock random_; - testing::NiceMock runtime_loader_; - testing::NiceMock scope_; - testing::NiceMock thread_local_; - Singleton::ManagerPtr singleton_manager_; - testing::NiceMock admin_; - Stats::IsolatedStoreImpl listener_scope_; - Event::GlobalTimeSystem time_system_; - testing::NiceMock validation_context_; - testing::NiceMock overload_manager_; - Grpc::ContextImpl grpc_context_; - Http::ContextImpl http_context_; - testing::NiceMock api_; -}; - -class MockTransportSocketFactoryContext : public TransportSocketFactoryContext { -public: - MockTransportSocketFactoryContext(); - ~MockTransportSocketFactoryContext() override; - - Secret::SecretManager& secretManager() override { return *(secret_manager_.get()); } - - MOCK_METHOD(Server::Admin&, admin, ()); - MOCK_METHOD(Ssl::ContextManager&, sslContextManager, ()); - MOCK_METHOD(Stats::Scope&, scope, ()); - MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ()); - MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const)); - MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); - MOCK_METHOD(Envoy::Runtime::RandomGenerator&, random, ()); - MOCK_METHOD(Stats::Store&, stats, ()); - MOCK_METHOD(Init::Manager*, initManager, ()); - MOCK_METHOD(Singleton::Manager&, singletonManager, ()); - MOCK_METHOD(ThreadLocal::SlotAllocator&, threadLocal, ()); - MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ()); - MOCK_METHOD(Api::Api&, api, ()); - - testing::NiceMock cluster_manager_; - testing::NiceMock api_; - testing::NiceMock config_tracker_; - std::unique_ptr secret_manager_; -}; - -class MockListenerFactoryContext : public MockFactoryContext, public ListenerFactoryContext { -public: - MockListenerFactoryContext(); - ~MockListenerFactoryContext() override; - - const Network::ListenerConfig& listenerConfig() const override { return listener_config_; } - MOCK_METHOD(const Network::ListenerConfig&, listenerConfig_, (), (const)); - - Network::MockListenerConfig listener_config_; -}; - -class MockHealthCheckerFactoryContext : public virtual HealthCheckerFactoryContext { -public: - MockHealthCheckerFactoryContext(); - ~MockHealthCheckerFactoryContext() override; - - MOCK_METHOD(Upstream::Cluster&, cluster, ()); - MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); - MOCK_METHOD(Envoy::Runtime::RandomGenerator&, random, ()); - MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ()); - MOCK_METHOD(Upstream::HealthCheckEventLogger*, eventLogger_, ()); - MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ()); - MOCK_METHOD(Api::Api&, api, ()); - Upstream::HealthCheckEventLoggerPtr eventLogger() override { - return Upstream::HealthCheckEventLoggerPtr(eventLogger_()); - } - - testing::NiceMock cluster_; - testing::NiceMock dispatcher_; - testing::NiceMock random_; - testing::NiceMock runtime_; - testing::NiceMock* event_logger_{}; - testing::NiceMock api_{}; -}; - -class MockFilterChainFactoryContext : public MockFactoryContext, public FilterChainFactoryContext { -public: - MockFilterChainFactoryContext(); - ~MockFilterChainFactoryContext() override; -}; - -class MockTracerFactory : public TracerFactory { -public: - explicit MockTracerFactory(const std::string& name); - ~MockTracerFactory() override; - - std::string name() const override { return name_; } - - MOCK_METHOD(ProtobufTypes::MessagePtr, createEmptyConfigProto, ()); - MOCK_METHOD(Tracing::HttpTracerSharedPtr, createHttpTracer, - (const Protobuf::Message& config, TracerFactoryContext& context)); - -private: - std::string name_; -}; - -class MockTracerFactoryContext : public TracerFactoryContext { -public: - MockTracerFactoryContext(); - ~MockTracerFactoryContext() override; - - MOCK_METHOD(ServerFactoryContext&, serverFactoryContext, ()); - MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ()); - - testing::NiceMock server_factory_context_; -}; - -} // namespace Configuration -} // namespace Server -} // namespace Envoy +// NOLINT(namespace-envoy) + +#include "admin.h" +#include "admin_stream.h" +#include "bootstrap_extension_factory.h" +#include "config_tracker.h" +#include "drain_manager.h" +#include "factory_context.h" +#include "filter_chain_factory_context.h" +#include "guard_dog.h" +#include "health_checker_factory_context.h" +#include "hot_restart.h" +#include "instance.h" +#include "listener_component_factory.h" +#include "listener_factory_context.h" +#include "listener_manager.h" +#include "main.h" +#include "options.h" +#include "overload_manager.h" +#include "server_lifecycle_notifier.h" +#include "tracer_factory.h" +#include "tracer_factory_context.h" +#include "transport_socket_factory_context.h" +#include "watch_dog.h" +#include "worker.h" +#include "worker_factory.h" diff --git a/test/mocks/server/options.cc b/test/mocks/server/options.cc new file mode 100644 index 0000000000000..c407ab6ef9074 --- /dev/null +++ b/test/mocks/server/options.cc @@ -0,0 +1,51 @@ +#include "options.h" + +#include "envoy/admin/v3/server_info.pb.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { + +using ::testing::Invoke; +using ::testing::Return; +using ::testing::ReturnPointee; +using ::testing::ReturnRef; + +MockOptions::MockOptions(const std::string& config_path) : config_path_(config_path) { + ON_CALL(*this, concurrency()).WillByDefault(ReturnPointee(&concurrency_)); + ON_CALL(*this, configPath()).WillByDefault(ReturnRef(config_path_)); + ON_CALL(*this, configProto()).WillByDefault(ReturnRef(config_proto_)); + ON_CALL(*this, configYaml()).WillByDefault(ReturnRef(config_yaml_)); + ON_CALL(*this, bootstrapVersion()).WillByDefault(ReturnRef(bootstrap_version_)); + ON_CALL(*this, allowUnknownStaticFields()).WillByDefault(Invoke([this] { + return allow_unknown_static_fields_; + })); + ON_CALL(*this, rejectUnknownDynamicFields()).WillByDefault(Invoke([this] { + return reject_unknown_dynamic_fields_; + })); + ON_CALL(*this, ignoreUnknownDynamicFields()).WillByDefault(Invoke([this] { + return ignore_unknown_dynamic_fields_; + })); + ON_CALL(*this, adminAddressPath()).WillByDefault(ReturnRef(admin_address_path_)); + ON_CALL(*this, serviceClusterName()).WillByDefault(ReturnRef(service_cluster_name_)); + ON_CALL(*this, serviceNodeName()).WillByDefault(ReturnRef(service_node_name_)); + ON_CALL(*this, serviceZone()).WillByDefault(ReturnRef(service_zone_name_)); + ON_CALL(*this, logLevel()).WillByDefault(Return(log_level_)); + ON_CALL(*this, logPath()).WillByDefault(ReturnRef(log_path_)); + ON_CALL(*this, restartEpoch()).WillByDefault(ReturnPointee(&hot_restart_epoch_)); + ON_CALL(*this, hotRestartDisabled()).WillByDefault(ReturnPointee(&hot_restart_disabled_)); + ON_CALL(*this, signalHandlingEnabled()).WillByDefault(ReturnPointee(&signal_handling_enabled_)); + ON_CALL(*this, mutexTracingEnabled()).WillByDefault(ReturnPointee(&mutex_tracing_enabled_)); + ON_CALL(*this, cpusetThreadsEnabled()).WillByDefault(ReturnPointee(&cpuset_threads_enabled_)); + ON_CALL(*this, disabledExtensions()).WillByDefault(ReturnRef(disabled_extensions_)); + ON_CALL(*this, toCommandLineOptions()).WillByDefault(Invoke([] { + return std::make_unique(); + })); +} + +MockOptions::~MockOptions() = default; + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/options.h b/test/mocks/server/options.h new file mode 100644 index 0000000000000..31a6112dca358 --- /dev/null +++ b/test/mocks/server/options.h @@ -0,0 +1,76 @@ +#pragma once + +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" +#include "envoy/server/options.h" + +#include "absl/strings/string_view.h" +#include "gmock/gmock.h" +#include "spdlog/spdlog.h" + +namespace Envoy { +namespace Server { +class MockOptions : public Options { +public: + MockOptions() : MockOptions(std::string()) {} + MockOptions(const std::string& config_path); + ~MockOptions() override; + + MOCK_METHOD(uint64_t, baseId, (), (const)); + MOCK_METHOD(bool, useDynamicBaseId, (), (const)); + MOCK_METHOD(const std::string&, baseIdPath, (), (const)); + MOCK_METHOD(uint32_t, concurrency, (), (const)); + MOCK_METHOD(const std::string&, configPath, (), (const)); + MOCK_METHOD(const envoy::config::bootstrap::v3::Bootstrap&, configProto, (), (const)); + MOCK_METHOD(const std::string&, configYaml, (), (const)); + MOCK_METHOD(const absl::optional&, bootstrapVersion, (), (const)); + MOCK_METHOD(bool, allowUnknownStaticFields, (), (const)); + MOCK_METHOD(bool, rejectUnknownDynamicFields, (), (const)); + MOCK_METHOD(bool, ignoreUnknownDynamicFields, (), (const)); + MOCK_METHOD(const std::string&, adminAddressPath, (), (const)); + MOCK_METHOD(Network::Address::IpVersion, localAddressIpVersion, (), (const)); + MOCK_METHOD(std::chrono::seconds, drainTime, (), (const)); + MOCK_METHOD(std::chrono::seconds, parentShutdownTime, (), (const)); + MOCK_METHOD(Server::DrainStrategy, drainStrategy, (), (const)); + MOCK_METHOD(spdlog::level::level_enum, logLevel, (), (const)); + MOCK_METHOD((const std::vector>&), + componentLogLevels, (), (const)); + MOCK_METHOD(const std::string&, logFormat, (), (const)); + MOCK_METHOD(bool, logFormatEscaped, (), (const)); + MOCK_METHOD(const std::string&, logPath, (), (const)); + MOCK_METHOD(uint64_t, restartEpoch, (), (const)); + MOCK_METHOD(std::chrono::milliseconds, fileFlushIntervalMsec, (), (const)); + MOCK_METHOD(Mode, mode, (), (const)); + MOCK_METHOD(const std::string&, serviceClusterName, (), (const)); + MOCK_METHOD(const std::string&, serviceNodeName, (), (const)); + MOCK_METHOD(const std::string&, serviceZone, (), (const)); + MOCK_METHOD(bool, hotRestartDisabled, (), (const)); + MOCK_METHOD(bool, signalHandlingEnabled, (), (const)); + MOCK_METHOD(bool, mutexTracingEnabled, (), (const)); + MOCK_METHOD(bool, fakeSymbolTableEnabled, (), (const)); + MOCK_METHOD(bool, cpusetThreadsEnabled, (), (const)); + MOCK_METHOD(const std::vector&, disabledExtensions, (), (const)); + MOCK_METHOD(Server::CommandLineOptionsPtr, toCommandLineOptions, (), (const)); + + std::string config_path_; + envoy::config::bootstrap::v3::Bootstrap config_proto_; + std::string config_yaml_; + absl::optional bootstrap_version_; + bool allow_unknown_static_fields_{}; + bool reject_unknown_dynamic_fields_{}; + bool ignore_unknown_dynamic_fields_{}; + std::string admin_address_path_; + std::string service_cluster_name_; + std::string service_node_name_; + std::string service_zone_name_; + spdlog::level::level_enum log_level_{spdlog::level::trace}; + std::string log_path_; + uint32_t concurrency_{1}; + uint64_t hot_restart_epoch_{}; + bool hot_restart_disabled_{}; + bool signal_handling_enabled_{true}; + bool mutex_tracing_enabled_{}; + bool cpuset_threads_enabled_{}; + std::vector disabled_extensions_; +}; +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/overload_manager.cc b/test/mocks/server/overload_manager.cc new file mode 100644 index 0000000000000..d0fd9b545ec62 --- /dev/null +++ b/test/mocks/server/overload_manager.cc @@ -0,0 +1,25 @@ +#include "overload_manager.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { + +using ::testing::ReturnRef; + +MockThreadLocalOverloadState::MockThreadLocalOverloadState() + : disabled_state_(OverloadActionState::Inactive) { + ON_CALL(*this, getState).WillByDefault(ReturnRef(disabled_state_)); +} + +MockOverloadManager::MockOverloadManager() { + ON_CALL(*this, getThreadLocalOverloadState()).WillByDefault(ReturnRef(overload_state_)); +} + +MockOverloadManager::~MockOverloadManager() = default; + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/overload_manager.h b/test/mocks/server/overload_manager.h new file mode 100644 index 0000000000000..86c194d5586d2 --- /dev/null +++ b/test/mocks/server/overload_manager.h @@ -0,0 +1,37 @@ +#pragma once + +#include + +#include "envoy/server/overload_manager.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { + +class MockThreadLocalOverloadState : public ThreadLocalOverloadState { +public: + MockThreadLocalOverloadState(); + MOCK_METHOD(const OverloadActionState&, getState, (const std::string&), (override)); + +private: + const OverloadActionState disabled_state_; +}; + +class MockOverloadManager : public OverloadManager { +public: + MockOverloadManager(); + ~MockOverloadManager() override; + + // OverloadManager + MOCK_METHOD(void, start, ()); + MOCK_METHOD(bool, registerForAction, + (const std::string& action, Event::Dispatcher& dispatcher, + OverloadActionCb callback)); + MOCK_METHOD(ThreadLocalOverloadState&, getThreadLocalOverloadState, ()); + + testing::NiceMock overload_state_; +}; + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/server_lifecycle_notifier.cc b/test/mocks/server/server_lifecycle_notifier.cc new file mode 100644 index 0000000000000..1cd6cd9d3b5c8 --- /dev/null +++ b/test/mocks/server/server_lifecycle_notifier.cc @@ -0,0 +1,16 @@ +#include "server_lifecycle_notifier.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { + +MockServerLifecycleNotifier::MockServerLifecycleNotifier() = default; + +MockServerLifecycleNotifier::~MockServerLifecycleNotifier() = default; + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/server_lifecycle_notifier.h b/test/mocks/server/server_lifecycle_notifier.h new file mode 100644 index 0000000000000..3442a194ac323 --- /dev/null +++ b/test/mocks/server/server_lifecycle_notifier.h @@ -0,0 +1,20 @@ +#pragma once + +#include "envoy/server/lifecycle_notifier.h" + +#include "gmock/gmock.h" +#include "spdlog/spdlog.h" + +namespace Envoy { +namespace Server { +class MockServerLifecycleNotifier : public ServerLifecycleNotifier { +public: + MockServerLifecycleNotifier(); + ~MockServerLifecycleNotifier() override; + + MOCK_METHOD(ServerLifecycleNotifier::HandlePtr, registerCallback, (Stage, StageCallback)); + MOCK_METHOD(ServerLifecycleNotifier::HandlePtr, registerCallback, + (Stage, StageCallbackWithCompletion)); +}; +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/tracer_factory.cc b/test/mocks/server/tracer_factory.cc new file mode 100644 index 0000000000000..7dcaa39f7ae18 --- /dev/null +++ b/test/mocks/server/tracer_factory.cc @@ -0,0 +1,24 @@ +#include "tracer_factory.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { +namespace Configuration { + +using ::testing::Invoke; + +MockTracerFactory::MockTracerFactory(const std::string& name) : name_(name) { + ON_CALL(*this, createEmptyConfigProto()).WillByDefault(Invoke([] { + return std::make_unique(); + })); +} + +MockTracerFactory::~MockTracerFactory() = default; + +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/tracer_factory.h b/test/mocks/server/tracer_factory.h new file mode 100644 index 0000000000000..e342116ee2d5a --- /dev/null +++ b/test/mocks/server/tracer_factory.h @@ -0,0 +1,27 @@ +#pragma once + +#include "envoy/protobuf/message_validator.h" +#include "envoy/server/tracer_config.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +namespace Configuration { +class MockTracerFactory : public TracerFactory { +public: + explicit MockTracerFactory(const std::string& name); + ~MockTracerFactory() override; + + std::string name() const override { return name_; } + + MOCK_METHOD(ProtobufTypes::MessagePtr, createEmptyConfigProto, ()); + MOCK_METHOD(Tracing::HttpTracerSharedPtr, createHttpTracer, + (const Protobuf::Message& config, TracerFactoryContext& context)); + +private: + std::string name_; +}; +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/tracer_factory_context.cc b/test/mocks/server/tracer_factory_context.cc new file mode 100644 index 0000000000000..992a3854179f9 --- /dev/null +++ b/test/mocks/server/tracer_factory_context.cc @@ -0,0 +1,24 @@ +#include "tracer_factory_context.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { +namespace Configuration { + +using ::testing::ReturnRef; + +MockTracerFactoryContext::MockTracerFactoryContext() { + ON_CALL(*this, serverFactoryContext()).WillByDefault(ReturnRef(server_factory_context_)); + ON_CALL(*this, messageValidationVisitor()) + .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor())); +} + +MockTracerFactoryContext::~MockTracerFactoryContext() = default; + +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/tracer_factory_context.h b/test/mocks/server/tracer_factory_context.h new file mode 100644 index 0000000000000..66a0212d0bb7b --- /dev/null +++ b/test/mocks/server/tracer_factory_context.h @@ -0,0 +1,25 @@ +#pragma once + +#include "envoy/server/configuration.h" + +#include "gmock/gmock.h" +#include "instance.h" +#include "tracer_factory.h" + +namespace Envoy { +namespace Server { +namespace Configuration { +class MockTracerFactoryContext : public TracerFactoryContext { +public: + MockTracerFactoryContext(); + ~MockTracerFactoryContext() override; + + MOCK_METHOD(ServerFactoryContext&, serverFactoryContext, ()); + MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ()); + + testing::NiceMock server_factory_context_; +}; +} // namespace Configuration + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/transport_socket_factory_context.cc b/test/mocks/server/transport_socket_factory_context.cc new file mode 100644 index 0000000000000..0e4e50231a7d7 --- /dev/null +++ b/test/mocks/server/transport_socket_factory_context.cc @@ -0,0 +1,26 @@ +#include "transport_socket_factory_context.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { +namespace Configuration { + +using ::testing::ReturnRef; + +MockTransportSocketFactoryContext::MockTransportSocketFactoryContext() + : secret_manager_(std::make_unique(config_tracker_)) { + ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); + ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); + ON_CALL(*this, messageValidationVisitor()) + .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor())); +} + +MockTransportSocketFactoryContext::~MockTransportSocketFactoryContext() = default; + +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/transport_socket_factory_context.h b/test/mocks/server/transport_socket_factory_context.h new file mode 100644 index 0000000000000..1995fb573d64c --- /dev/null +++ b/test/mocks/server/transport_socket_factory_context.h @@ -0,0 +1,44 @@ +#pragma once + +#include "envoy/server/transport_socket_config.h" + +#include "common/secret/secret_manager_impl.h" + +#include "test/mocks/api/mocks.h" +#include "test/mocks/upstream/mocks.h" + +#include "config_tracker.h" +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +namespace Configuration { +class MockTransportSocketFactoryContext : public TransportSocketFactoryContext { +public: + MockTransportSocketFactoryContext(); + ~MockTransportSocketFactoryContext() override; + + Secret::SecretManager& secretManager() override { return *(secret_manager_.get()); } + + MOCK_METHOD(Server::Admin&, admin, ()); + MOCK_METHOD(Ssl::ContextManager&, sslContextManager, ()); + MOCK_METHOD(Stats::Scope&, scope, ()); + MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ()); + MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const)); + MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); + MOCK_METHOD(Envoy::Random::RandomGenerator&, random, ()); + MOCK_METHOD(Stats::Store&, stats, ()); + MOCK_METHOD(Init::Manager&, initManager, ()); + MOCK_METHOD(Singleton::Manager&, singletonManager, ()); + MOCK_METHOD(ThreadLocal::SlotAllocator&, threadLocal, ()); + MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ()); + MOCK_METHOD(Api::Api&, api, ()); + + testing::NiceMock cluster_manager_; + testing::NiceMock api_; + testing::NiceMock config_tracker_; + std::unique_ptr secret_manager_; +}; +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/watch_dog.cc b/test/mocks/server/watch_dog.cc new file mode 100644 index 0000000000000..f07755963fe6b --- /dev/null +++ b/test/mocks/server/watch_dog.cc @@ -0,0 +1,16 @@ +#include "watch_dog.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { + +MockWatchDog::MockWatchDog() = default; + +MockWatchDog::~MockWatchDog() = default; + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/watch_dog.h b/test/mocks/server/watch_dog.h new file mode 100644 index 0000000000000..105761781c360 --- /dev/null +++ b/test/mocks/server/watch_dog.h @@ -0,0 +1,21 @@ +#pragma once + +#include "envoy/server/watchdog.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +class MockWatchDog : public WatchDog { +public: + MockWatchDog(); + ~MockWatchDog() override; + + // Server::WatchDog + MOCK_METHOD(void, startWatchdog, (Event::Dispatcher & dispatcher)); + MOCK_METHOD(void, touch, ()); + MOCK_METHOD(Thread::ThreadId, threadId, (), (const)); + MOCK_METHOD(MonotonicTime, lastTouchTime, (), (const)); +}; +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/worker.cc b/test/mocks/server/worker.cc new file mode 100644 index 0000000000000..a7e981b299b10 --- /dev/null +++ b/test/mocks/server/worker.cc @@ -0,0 +1,50 @@ +#include "worker.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { + +using ::testing::_; +using ::testing::Invoke; + +MockWorker::MockWorker() { + ON_CALL(*this, addListener(_, _, _)) + .WillByDefault( + Invoke([this](absl::optional overridden_listener, + Network::ListenerConfig& config, AddListenerCompletion completion) -> void { + UNREFERENCED_PARAMETER(overridden_listener); + config.listenSocketFactory().getListenSocket(); + EXPECT_EQ(nullptr, add_listener_completion_); + add_listener_completion_ = completion; + })); + + ON_CALL(*this, removeListener(_, _)) + .WillByDefault( + Invoke([this](Network::ListenerConfig&, std::function completion) -> void { + EXPECT_EQ(nullptr, remove_listener_completion_); + remove_listener_completion_ = completion; + })); + + ON_CALL(*this, stopListener(_, _)) + .WillByDefault(Invoke([](Network::ListenerConfig&, std::function completion) -> void { + if (completion != nullptr) { + completion(); + } + })); + + ON_CALL(*this, removeFilterChains(_, _, _)) + .WillByDefault(Invoke([this](uint64_t, const std::list&, + std::function completion) -> void { + EXPECT_EQ(nullptr, remove_filter_chains_completion_); + remove_filter_chains_completion_ = completion; + })); +} + +MockWorker::~MockWorker() = default; + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/worker.h b/test/mocks/server/worker.h new file mode 100644 index 0000000000000..978ab3bbcc21b --- /dev/null +++ b/test/mocks/server/worker.h @@ -0,0 +1,54 @@ +#pragma once + +#include "envoy/server/worker.h" + +#include "absl/strings/string_view.h" +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +class MockWorker : public Worker { +public: + MockWorker(); + ~MockWorker() override; + + void callAddCompletion(bool success) { + EXPECT_NE(nullptr, add_listener_completion_); + add_listener_completion_(success); + add_listener_completion_ = nullptr; + } + + void callRemovalCompletion() { + EXPECT_NE(nullptr, remove_listener_completion_); + remove_listener_completion_(); + remove_listener_completion_ = nullptr; + } + + void callDrainFilterChainsComplete() { + EXPECT_NE(nullptr, remove_filter_chains_completion_); + remove_filter_chains_completion_(); + remove_filter_chains_completion_ = nullptr; + } + + // Server::Worker + MOCK_METHOD(void, addListener, + (absl::optional overridden_listener, Network::ListenerConfig& listener, + AddListenerCompletion completion)); + MOCK_METHOD(uint64_t, numConnections, (), (const)); + MOCK_METHOD(void, removeListener, + (Network::ListenerConfig & listener, std::function completion)); + MOCK_METHOD(void, start, (GuardDog & guard_dog)); + MOCK_METHOD(void, initializeStats, (Stats::Scope & scope)); + MOCK_METHOD(void, stop, ()); + MOCK_METHOD(void, stopListener, + (Network::ListenerConfig & listener, std::function completion)); + MOCK_METHOD(void, removeFilterChains, + (uint64_t listener_tag, const std::list& filter_chains, + std::function completion)); + + AddListenerCompletion add_listener_completion_; + std::function remove_listener_completion_; + std::function remove_filter_chains_completion_; +}; +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/worker_factory.cc b/test/mocks/server/worker_factory.cc new file mode 100644 index 0000000000000..65844ae8cc218 --- /dev/null +++ b/test/mocks/server/worker_factory.cc @@ -0,0 +1,14 @@ +#include "worker_factory.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { + +MockWorkerFactory::MockWorkerFactory() = default; + +MockWorkerFactory::~MockWorkerFactory() = default; + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/worker_factory.h b/test/mocks/server/worker_factory.h new file mode 100644 index 0000000000000..3c05ed76566c6 --- /dev/null +++ b/test/mocks/server/worker_factory.h @@ -0,0 +1,23 @@ +#pragma once + +#include "envoy/server/worker.h" + +#include "gmock/gmock.h" +#include "worker.h" + +namespace Envoy { +namespace Server { +class MockWorkerFactory : public WorkerFactory { +public: + MockWorkerFactory(); + ~MockWorkerFactory() override; + + // Server::WorkerFactory + WorkerPtr createWorker(OverloadManager&, const std::string&) override { + return WorkerPtr{createWorker_()}; + } + + MOCK_METHOD(Worker*, createWorker_, ()); +}; +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/ssl/BUILD b/test/mocks/ssl/BUILD index 7141bb5c8fc19..e79694f5224f8 100644 --- a/test/mocks/ssl/BUILD +++ b/test/mocks/ssl/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/ssl/mocks.h b/test/mocks/ssl/mocks.h index 3aed0577db6ea..7567e5807cff0 100644 --- a/test/mocks/ssl/mocks.h +++ b/test/mocks/ssl/mocks.h @@ -42,6 +42,7 @@ class MockConnectionInfo : public ConnectionInfo { MOCK_METHOD(bool, peerCertificateValidated, (), (const)); MOCK_METHOD(absl::Span, uriSanLocalCertificate, (), (const)); MOCK_METHOD(const std::string&, sha256PeerCertificateDigest, (), (const)); + MOCK_METHOD(const std::string&, sha1PeerCertificateDigest, (), (const)); MOCK_METHOD(const std::string&, serialNumberPeerCertificate, (), (const)); MOCK_METHOD(const std::string&, issuerPeerCertificate, (), (const)); MOCK_METHOD(const std::string&, subjectPeerCertificate, (), (const)); @@ -114,6 +115,37 @@ class MockServerContextConfig : public ServerContextConfig { MOCK_METHOD(bool, disableStatelessSessionResumption, (), (const)); }; +class MockTlsCertificateConfig : public TlsCertificateConfig { +public: + MockTlsCertificateConfig() = default; + ~MockTlsCertificateConfig() override = default; + + MOCK_METHOD(const std::string&, certificateChain, (), (const)); + MOCK_METHOD(const std::string&, certificateChainPath, (), (const)); + MOCK_METHOD(const std::string&, privateKey, (), (const)); + MOCK_METHOD(const std::string&, privateKeyPath, (), (const)); + MOCK_METHOD(const std::string&, password, (), (const)); + MOCK_METHOD(const std::string&, passwordPath, (), (const)); + MOCK_METHOD(Envoy::Ssl::PrivateKeyMethodProviderSharedPtr, privateKeyMethod, (), (const)); +}; + +class MockCertificateValidationContextConfig : public CertificateValidationContextConfig { +public: + MOCK_METHOD(const std::string&, caCert, (), (const)); + MOCK_METHOD(const std::string&, caCertPath, (), (const)); + MOCK_METHOD(const std::string&, certificateRevocationList, (), (const)); + MOCK_METHOD(const std::string&, certificateRevocationListPath, (), (const)); + MOCK_METHOD(const std::vector&, verifySubjectAltNameList, (), (const)); + MOCK_METHOD(const std::vector&, subjectAltNameMatchers, + (), (const)); + MOCK_METHOD(const std::vector&, verifyCertificateHashList, (), (const)); + MOCK_METHOD(const std::vector&, verifyCertificateSpkiList, (), (const)); + MOCK_METHOD(bool, allowExpiredCertificate, (), (const)); + MOCK_METHOD(envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext:: + TrustChainVerification, + trustChainVerification, (), (const)); +}; + class MockPrivateKeyMethodManager : public PrivateKeyMethodManager { public: MockPrivateKeyMethodManager(); diff --git a/test/mocks/stats/BUILD b/test/mocks/stats/BUILD index 1880b79a93cb0..6d4ddd19a0505 100644 --- a/test/mocks/stats/BUILD +++ b/test/mocks/stats/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/stats/mocks.h b/test/mocks/stats/mocks.h index 06a81e7c8cf52..272603041d98d 100644 --- a/test/mocks/stats/mocks.h +++ b/test/mocks/stats/mocks.h @@ -110,12 +110,25 @@ template class MockMetric : public BaseClass { void setTags(const TagVector& tags) { tag_pool_.clear(); + tag_names_and_values_.clear(); tags_ = tags; for (const Tag& tag : tags) { tag_names_and_values_.push_back(tag_pool_.add(tag.name_)); tag_names_and_values_.push_back(tag_pool_.add(tag.value_)); } } + + void setTags(const Stats::StatNameTagVector& tags) { + tag_pool_.clear(); + tag_names_and_values_.clear(); + tags_.clear(); + for (const StatNameTag& tag : tags) { + tag_names_and_values_.push_back(tag.first); + tag_names_and_values_.push_back(tag.second); + tags_.push_back(Tag{symbol_table_->toString(tag.first), symbol_table_->toString(tag.second)}); + } + } + void addTag(const Tag& tag) { tags_.emplace_back(tag); tag_names_and_values_.push_back(tag_pool_.add(tag.name_)); @@ -124,7 +137,7 @@ template class MockMetric : public BaseClass { private: TagVector tags_; - std::vector tag_names_and_values_; + StatNameVec tag_names_and_values_; std::string tag_extracted_name_; StatNamePool tag_pool_; std::unique_ptr tag_extracted_stat_name_; @@ -174,6 +187,7 @@ class MockGauge : public MockStatWithRefcount { MOCK_METHOD(void, dec, ()); MOCK_METHOD(void, inc, ()); MOCK_METHOD(void, set, (uint64_t value)); + MOCK_METHOD(void, setParentValue, (uint64_t parent_value)); MOCK_METHOD(void, sub, (uint64_t amount)); MOCK_METHOD(void, mergeImportMode, (ImportMode)); MOCK_METHOD(bool, used, (), (const)); @@ -250,7 +264,7 @@ class MockTextReadout : public MockMetric { MockTextReadout(); ~MockTextReadout() override; - MOCK_METHOD1(set, void(std::string&& value)); + MOCK_METHOD1(set, void(absl::string_view value)); MOCK_CONST_METHOD0(used, bool()); MOCK_CONST_METHOD0(value, std::string()); @@ -266,11 +280,12 @@ class MockMetricSnapshot : public MetricSnapshot { MOCK_METHOD(const std::vector&, counters, ()); MOCK_METHOD(const std::vector>&, gauges, ()); MOCK_METHOD(const std::vector>&, histograms, ()); - MOCK_METHOD(const std::vector&, textReadouts, ()); + MOCK_METHOD(const std::vector>&, textReadouts, ()); std::vector counters_; std::vector> gauges_; std::vector> histograms_; + std::vector> text_readouts_; }; class MockSink : public Sink { diff --git a/test/mocks/stream_info/BUILD b/test/mocks/stream_info/BUILD index 6d33901f6cf18..da45abf717cee 100644 --- a/test/mocks/stream_info/BUILD +++ b/test/mocks/stream_info/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/tcp/BUILD b/test/mocks/tcp/BUILD index 8634b86e9c5c8..263b2e49ba086 100644 --- a/test/mocks/tcp/BUILD +++ b/test/mocks/tcp/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( @@ -15,6 +15,7 @@ envoy_cc_mock( deps = [ "//include/envoy/buffer:buffer_interface", "//include/envoy/tcp:conn_pool_interface", + "//test/mocks:common_lib", "//test/mocks/network:network_mocks", "//test/mocks/upstream:host_mocks", ], diff --git a/test/mocks/tcp/mocks.cc b/test/mocks/tcp/mocks.cc index 8d86a1f204a05..8b86988451afe 100644 --- a/test/mocks/tcp/mocks.cc +++ b/test/mocks/tcp/mocks.cc @@ -12,9 +12,6 @@ namespace Envoy { namespace Tcp { namespace ConnectionPool { -MockCancellable::MockCancellable() = default; -MockCancellable::~MockCancellable() = default; - MockUpstreamCallbacks::MockUpstreamCallbacks() = default; MockUpstreamCallbacks::~MockUpstreamCallbacks() = default; @@ -33,7 +30,7 @@ MockInstance::MockInstance() { } MockInstance::~MockInstance() = default; -MockCancellable* MockInstance::newConnectionImpl(Callbacks& cb) { +Envoy::ConnectionPool::MockCancellable* MockInstance::newConnectionImpl(Callbacks& cb) { handles_.emplace_back(); callbacks_.push_back(&cb); return &handles_.back(); diff --git a/test/mocks/tcp/mocks.h b/test/mocks/tcp/mocks.h index 74f5c8f85f24e..7b0b5fb349518 100644 --- a/test/mocks/tcp/mocks.h +++ b/test/mocks/tcp/mocks.h @@ -15,15 +15,6 @@ namespace Envoy { namespace Tcp { namespace ConnectionPool { -class MockCancellable : public Cancellable { -public: - MockCancellable(); - ~MockCancellable() override; - - // Tcp::ConnectionPool::Cancellable - MOCK_METHOD(void, cancel, (CancelPolicy cancel_policy)); -}; - class MockUpstreamCallbacks : public UpstreamCallbacks { public: MockUpstreamCallbacks(); @@ -62,17 +53,18 @@ class MockInstance : public Instance { // Tcp::ConnectionPool::Instance MOCK_METHOD(void, addDrainedCallback, (DrainedCb cb)); MOCK_METHOD(void, drainConnections, ()); + MOCK_METHOD(void, closeConnections, ()); MOCK_METHOD(Cancellable*, newConnection, (Tcp::ConnectionPool::Callbacks & callbacks)); MOCK_METHOD(Upstream::HostDescriptionConstSharedPtr, host, (), (const)); - MockCancellable* newConnectionImpl(Callbacks& cb); + Envoy::ConnectionPool::MockCancellable* newConnectionImpl(Callbacks& cb); void poolFailure(PoolFailureReason reason); void poolReady(Network::MockClientConnection& conn); // Invoked when connection_data_, having been assigned via poolReady is released. MOCK_METHOD(void, released, (Network::MockClientConnection&)); - std::list> handles_; + std::list> handles_; std::list callbacks_; std::shared_ptr> host_{ diff --git a/test/mocks/thread_local/BUILD b/test/mocks/thread_local/BUILD index bc65c8da86c9d..e05b8f6f4b965 100644 --- a/test/mocks/thread_local/BUILD +++ b/test/mocks/thread_local/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/thread_local/mocks.h b/test/mocks/thread_local/mocks.h index b2f3f1e13f2f4..9bbd26a644650 100644 --- a/test/mocks/thread_local/mocks.h +++ b/test/mocks/thread_local/mocks.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include "envoy/thread_local/thread_local.h" @@ -51,7 +50,8 @@ class MockInstance : public Instance { ~SlotImpl() override { // Do not actually clear slot data during shutdown. This mimics the production code. - if (!parent_.shutdown_) { + // The defer_delete mimics the recycle() code with Bookkeeper. + if (!parent_.shutdown_ && !parent_.defer_delete) { EXPECT_LT(index_, parent_.data_.size()); parent_.data_[index_].reset(); } @@ -98,6 +98,7 @@ class MockInstance : public Instance { bool defer_data{}; bool shutdown_{}; bool registered_{true}; + bool defer_delete{}; }; } // namespace ThreadLocal diff --git a/test/mocks/tracing/BUILD b/test/mocks/tracing/BUILD index a5ef26bce3fff..3f4eaf881d4d6 100644 --- a/test/mocks/tracing/BUILD +++ b/test/mocks/tracing/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/tracing/mocks.h b/test/mocks/tracing/mocks.h index 8531027c7543b..98a7a96ac5138 100644 --- a/test/mocks/tracing/mocks.h +++ b/test/mocks/tracing/mocks.h @@ -37,6 +37,8 @@ class MockSpan : public Span { MOCK_METHOD(void, finishSpan, ()); MOCK_METHOD(void, injectContext, (Http::RequestHeaderMap & request_headers)); MOCK_METHOD(void, setSampled, (const bool sampled)); + MOCK_METHOD(void, setBaggage, (absl::string_view key, absl::string_view value)); + MOCK_METHOD(std::string, getBaggage, (absl::string_view key)); SpanPtr spawnChild(const Config& config, const std::string& name, SystemTime start_time) override { diff --git a/test/mocks/upstream/BUILD b/test/mocks/upstream/BUILD index 7319dbfb9ad54..f249847bac2c0 100644 --- a/test/mocks/upstream/BUILD +++ b/test/mocks/upstream/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( @@ -16,8 +16,11 @@ envoy_cc_mock( ":transport_socket_match_mocks", "//include/envoy/upstream:cluster_manager_interface", "//include/envoy/upstream:upstream_interface", + "//source/common/common:thread_lib", "//source/common/config:metadata_lib", "//source/common/http:utility_lib", + "//source/common/http/http1:codec_stats_lib", + "//source/common/http/http2:codec_stats_lib", "//source/common/network:raw_buffer_socket_lib", "//source/common/upstream:upstream_includes", "//source/common/upstream:upstream_lib", @@ -66,10 +69,30 @@ envoy_cc_mock( envoy_cc_mock( name = "upstream_mocks", - srcs = ["mocks.cc"], hdrs = ["mocks.h"], deps = [ + ":basic_resource_limit_mocks", + ":cds_api_mocks", + ":cluster_info_factory_mocks", + ":cluster_manager_factory_mocks", + ":cluster_manager_mocks", + ":cluster_mocks", + ":cluster_priority_set_mocks", + ":cluster_real_priority_set_mocks", + ":cluster_update_callbacks_handle_mocks", + ":cluster_update_callbacks_mocks", + ":health_check_event_logger_mocks", + ":health_checker_mocks", + ":host_set_mocks", ":load_balancer_context_mock", + ":load_balancer_mocks", + ":priority_set_mocks", + ":retry_host_predicate_mocks", + ":retry_priority_factory_mocks", + ":retry_priority_mocks", + ":test_retry_host_predicate_factory_mocks", + ":thread_aware_load_balancer_mocks", + ":thread_local_cluster_mocks", ":transport_socket_match_mocks", "//include/envoy/http:async_client_interface", "//include/envoy/upstream:cluster_factory_interface", @@ -77,6 +100,7 @@ envoy_cc_mock( "//include/envoy/upstream:health_checker_interface", "//include/envoy/upstream:load_balancer_interface", "//include/envoy/upstream:upstream_interface", + "//source/common/http:header_utility_lib", "//source/common/upstream:cluster_factory_lib", "//source/common/upstream:health_discovery_service_lib", "//source/common/upstream:upstream_lib", @@ -93,3 +117,208 @@ envoy_cc_mock( "@envoy_api//envoy/data/core/v3:pkg_cc_proto", ], ) + +envoy_cc_mock( + name = "host_set_mocks", + srcs = ["host_set.cc"], + hdrs = ["host_set.h"], + deps = [ + "//include/envoy/upstream:upstream_interface", + "//source/common/common:callback_impl_lib", + "//source/common/upstream:upstream_lib", + ], +) + +envoy_cc_mock( + name = "priority_set_mocks", + srcs = ["priority_set.cc"], + hdrs = ["priority_set.h"], + deps = [ + "//include/envoy/upstream:upstream_interface", + "//test/mocks/upstream:host_set_mocks", + ], +) + +envoy_cc_mock( + name = "retry_priority_mocks", + srcs = ["retry_priority.cc"], + hdrs = ["retry_priority.h"], + deps = [ + "//include/envoy/upstream:retry_interface", + ], +) + +envoy_cc_mock( + name = "retry_priority_factory_mocks", + hdrs = ["retry_priority_factory.h"], + deps = [ + "//include/envoy/upstream:retry_interface", + "//test/mocks/upstream:retry_priority_mocks", + ], +) + +envoy_cc_mock( + name = "cluster_mocks", + srcs = ["cluster.cc"], + hdrs = ["cluster.h"], + deps = [ + "//include/envoy/upstream:upstream_interface", + "//test/mocks/upstream:cluster_info_mocks", + ], +) + +envoy_cc_mock( + name = "cluster_real_priority_set_mocks", + srcs = ["cluster_real_priority_set.cc"], + hdrs = ["cluster_real_priority_set.h"], + deps = [ + "//test/mocks/upstream:cluster_mocks", + ], +) + +envoy_cc_mock( + name = "cluster_priority_set_mocks", + srcs = ["cluster_priority_set.cc"], + hdrs = ["cluster_priority_set.h"], + deps = [ + "//test/mocks/upstream:cluster_mocks", + "//test/mocks/upstream:priority_set_mocks", + ], +) + +envoy_cc_mock( + name = "load_balancer_mocks", + srcs = ["load_balancer.cc"], + hdrs = ["load_balancer.h"], + deps = [ + "//include/envoy/upstream:load_balancer_interface", + "//test/mocks/upstream:host_mocks", + ], +) + +envoy_cc_mock( + name = "thread_aware_load_balancer_mocks", + srcs = ["thread_aware_load_balancer.cc"], + hdrs = ["thread_aware_load_balancer.h"], + deps = [ + "//include/envoy/upstream:load_balancer_interface", + ], +) + +envoy_cc_mock( + name = "thread_local_cluster_mocks", + srcs = ["thread_local_cluster.cc"], + hdrs = ["thread_local_cluster.h"], + deps = [ + "//include/envoy/upstream:thread_local_cluster_interface", + "//test/mocks/upstream:cluster_priority_set_mocks", + "//test/mocks/upstream:load_balancer_mocks", + ], +) + +envoy_cc_mock( + name = "cluster_manager_factory_mocks", + srcs = ["cluster_manager_factory.cc"], + hdrs = ["cluster_manager_factory.h"], + deps = [ + "//include/envoy/upstream:cluster_manager_interface", + "//test/mocks/secret:secret_mocks", + ], +) + +envoy_cc_mock( + name = "cluster_update_callbacks_handle_mocks", + srcs = ["cluster_update_callbacks_handle.cc"], + hdrs = ["cluster_update_callbacks_handle.h"], + deps = [ + "//include/envoy/upstream:cluster_manager_interface", + ], +) + +envoy_cc_mock( + name = "cluster_manager_mocks", + srcs = ["cluster_manager.cc"], + hdrs = ["cluster_manager.h"], + deps = [ + "//include/envoy/upstream:cluster_manager_interface", + "//test/mocks/config:config_mocks", + "//test/mocks/grpc:grpc_mocks", + "//test/mocks/http:http_mocks", + "//test/mocks/tcp:tcp_mocks", + "//test/mocks/upstream:cluster_manager_factory_mocks", + "//test/mocks/upstream:thread_local_cluster_mocks", + ], +) + +envoy_cc_mock( + name = "health_checker_mocks", + srcs = ["health_checker.cc"], + hdrs = ["health_checker.h"], + deps = [ + "//include/envoy/upstream:health_checker_interface", + ], +) + +envoy_cc_mock( + name = "health_check_event_logger_mocks", + hdrs = ["health_check_event_logger.h"], + deps = [ + "//include/envoy/upstream:health_checker_interface", + "@envoy_api//envoy/data/core/v3:pkg_cc_proto", + ], +) + +envoy_cc_mock( + name = "cds_api_mocks", + srcs = ["cds_api.cc"], + hdrs = ["cds_api.h"], + deps = [ + "//include/envoy/upstream:cluster_manager_interface", + ], +) + +envoy_cc_mock( + name = "cluster_update_callbacks_mocks", + srcs = ["cluster_update_callbacks.cc"], + hdrs = ["cluster_update_callbacks.h"], + deps = [ + "//include/envoy/upstream:cluster_manager_interface", + ], +) + +envoy_cc_mock( + name = "cluster_info_factory_mocks", + srcs = ["cluster_info_factory.cc"], + hdrs = ["cluster_info_factory.h"], + deps = [ + "//include/envoy/upstream:cluster_manager_interface", + "//source/common/common:minimal_logger_lib", + ], +) + +envoy_cc_mock( + name = "retry_host_predicate_mocks", + srcs = ["retry_host_predicate.cc"], + hdrs = ["retry_host_predicate.h"], + deps = [ + "//include/envoy/upstream:retry_interface", + ], +) + +envoy_cc_mock( + name = "test_retry_host_predicate_factory_mocks", + hdrs = ["test_retry_host_predicate_factory.h"], + deps = [ + "//include/envoy/upstream:retry_interface", + "//test/mocks/upstream:retry_host_predicate_mocks", + ], +) + +envoy_cc_mock( + name = "basic_resource_limit_mocks", + srcs = ["basic_resource_limit.cc"], + hdrs = ["basic_resource_limit.h"], + deps = [ + "//include/envoy/common:resource_interface", + ], +) diff --git a/test/mocks/upstream/basic_resource_limit.cc b/test/mocks/upstream/basic_resource_limit.cc new file mode 100644 index 0000000000000..0676301dabf25 --- /dev/null +++ b/test/mocks/upstream/basic_resource_limit.cc @@ -0,0 +1,17 @@ +#include "basic_resource_limit.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { + +using ::testing::Return; +MockBasicResourceLimit::MockBasicResourceLimit() { + ON_CALL(*this, canCreate()).WillByDefault(Return(true)); +} + +MockBasicResourceLimit::~MockBasicResourceLimit() = default; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/basic_resource_limit.h b/test/mocks/upstream/basic_resource_limit.h new file mode 100644 index 0000000000000..93b31d2e2be8c --- /dev/null +++ b/test/mocks/upstream/basic_resource_limit.h @@ -0,0 +1,23 @@ +#pragma once + +#include "envoy/common/resource.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +class MockBasicResourceLimit : public ResourceLimit { +public: + MockBasicResourceLimit(); + ~MockBasicResourceLimit() override; + + MOCK_METHOD(bool, canCreate, ()); + MOCK_METHOD(void, inc, ()); + MOCK_METHOD(void, dec, ()); + MOCK_METHOD(void, decBy, (uint64_t)); + MOCK_METHOD(uint64_t, max, ()); + MOCK_METHOD(uint64_t, count, (), (const)); +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cds_api.cc b/test/mocks/upstream/cds_api.cc new file mode 100644 index 0000000000000..297defc9d23b8 --- /dev/null +++ b/test/mocks/upstream/cds_api.cc @@ -0,0 +1,19 @@ +#include "cds_api.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +using ::testing::_; +using ::testing::SaveArg; +MockCdsApi::MockCdsApi() { + ON_CALL(*this, setInitializedCb(_)).WillByDefault(SaveArg<0>(&initialized_callback_)); +} + +MockCdsApi::~MockCdsApi() = default; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cds_api.h b/test/mocks/upstream/cds_api.h new file mode 100644 index 0000000000000..e11f644159f57 --- /dev/null +++ b/test/mocks/upstream/cds_api.h @@ -0,0 +1,25 @@ +#pragma once + +#include +#include + +#include "envoy/upstream/cluster_manager.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +class MockCdsApi : public CdsApi { +public: + MockCdsApi(); + ~MockCdsApi() override; + + MOCK_METHOD(void, initialize, ()); + MOCK_METHOD(void, setInitializedCb, (std::function callback)); + MOCK_METHOD(const std::string, versionInfo, (), (const)); + + std::function initialized_callback_; +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster.cc b/test/mocks/upstream/cluster.cc new file mode 100644 index 0000000000000..d0c2975064906 --- /dev/null +++ b/test/mocks/upstream/cluster.cc @@ -0,0 +1,23 @@ +#include "cluster.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +using ::testing::_; +using ::testing::Invoke; +using ::testing::Return; +MockCluster::MockCluster() { + ON_CALL(*this, info()).WillByDefault(Return(info_)); + ON_CALL(*this, initialize(_)) + .WillByDefault(Invoke([this](std::function callback) -> void { + EXPECT_EQ(nullptr, initialize_callback_); + initialize_callback_ = callback; + })); +} + +MockCluster::~MockCluster() = default; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster.h b/test/mocks/upstream/cluster.h new file mode 100644 index 0000000000000..243daf09dbdb7 --- /dev/null +++ b/test/mocks/upstream/cluster.h @@ -0,0 +1,33 @@ +#pragma once + +#include + +#include "envoy/upstream/upstream.h" + +#include "test/mocks/upstream/cluster_info.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +class MockCluster : public Cluster { +public: + MockCluster(); + ~MockCluster() override; + + // Upstream::Cluster + MOCK_METHOD(HealthChecker*, healthChecker, ()); + MOCK_METHOD(ClusterInfoConstSharedPtr, info, (), (const)); + MOCK_METHOD(Outlier::Detector*, outlierDetector, ()); + MOCK_METHOD(const Outlier::Detector*, outlierDetector, (), (const)); + MOCK_METHOD(void, initialize, (std::function callback)); + MOCK_METHOD(InitializePhase, initializePhase, (), (const)); + MOCK_METHOD(const Network::Address::InstanceConstSharedPtr&, sourceAddress, (), (const)); + + std::shared_ptr info_{new ::testing::NiceMock()}; + std::function initialize_callback_; + Network::Address::InstanceConstSharedPtr source_address_; +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster_info.cc b/test/mocks/upstream/cluster_info.cc index 55b0fffbb2b86..b6b8a59631cee 100644 --- a/test/mocks/upstream/cluster_info.cc +++ b/test/mocks/upstream/cluster_info.cc @@ -42,7 +42,9 @@ MockClusterInfo::MockClusterInfo() stats_(ClusterInfoImpl::generateStats(stats_store_)), transport_socket_matcher_(new NiceMock()), load_report_stats_(ClusterInfoImpl::generateLoadReportStats(load_report_stats_store_)), - timeout_budget_stats_(absl::make_optional( + request_response_size_stats_(std::make_unique( + ClusterInfoImpl::generateRequestResponseSizeStats(request_response_size_stats_store_))), + timeout_budget_stats_(std::make_unique( ClusterInfoImpl::generateTimeoutBudgetStats(timeout_budget_stats_store_))), circuit_breakers_stats_( ClusterInfoImpl::generateCircuitBreakersStats(stats_store_, "default", true)), @@ -51,10 +53,13 @@ MockClusterInfo::MockClusterInfo() circuit_breakers_stats_, absl::nullopt, absl::nullopt)) { ON_CALL(*this, connectTimeout()).WillByDefault(Return(std::chrono::milliseconds(1))); ON_CALL(*this, idleTimeout()).WillByDefault(Return(absl::optional())); + ON_CALL(*this, prefetchRatio()).WillByDefault(Return(1.0)); ON_CALL(*this, name()).WillByDefault(ReturnRef(name_)); - ON_CALL(*this, eds_service_name()).WillByDefault(ReturnPointee(&eds_service_name_)); + ON_CALL(*this, edsServiceName()).WillByDefault(ReturnPointee(&eds_service_name_)); ON_CALL(*this, http1Settings()).WillByDefault(ReturnRef(http1_settings_)); ON_CALL(*this, http2Options()).WillByDefault(ReturnRef(http2_options_)); + ON_CALL(*this, commonHttpProtocolOptions()) + .WillByDefault(ReturnRef(common_http_protocol_options_)); ON_CALL(*this, extensionProtocolOptions(_)).WillByDefault(Return(extension_protocol_options_)); ON_CALL(*this, maxResponseHeadersCount()) .WillByDefault(ReturnPointee(&max_response_headers_count_)); @@ -69,7 +74,12 @@ MockClusterInfo::MockClusterInfo() .WillByDefault( Invoke([this]() -> TransportSocketMatcher& { return *transport_socket_matcher_; })); ON_CALL(*this, loadReportStats()).WillByDefault(ReturnRef(load_report_stats_)); - ON_CALL(*this, timeoutBudgetStats()).WillByDefault(ReturnRef(timeout_budget_stats_)); + ON_CALL(*this, requestResponseSizeStats()) + .WillByDefault(Return( + std::reference_wrapper(*request_response_size_stats_))); + ON_CALL(*this, timeoutBudgetStats()) + .WillByDefault( + Return(std::reference_wrapper(*timeout_budget_stats_))); ON_CALL(*this, sourceAddress()).WillByDefault(ReturnRef(source_address_)); ON_CALL(*this, resourceManager(_)) .WillByDefault(Invoke( @@ -79,6 +89,7 @@ MockClusterInfo::MockClusterInfo() ON_CALL(*this, lbSubsetInfo()).WillByDefault(ReturnRef(lb_subset_)); ON_CALL(*this, lbRingHashConfig()).WillByDefault(ReturnRef(lb_ring_hash_config_)); ON_CALL(*this, lbOriginalDstConfig()).WillByDefault(ReturnRef(lb_original_dst_config_)); + ON_CALL(*this, upstreamConfig()).WillByDefault(ReturnRef(upstream_config_)); ON_CALL(*this, lbConfig()).WillByDefault(ReturnRef(lb_config_)); ON_CALL(*this, clusterSocketOptions()).WillByDefault(ReturnRef(cluster_socket_options_)); ON_CALL(*this, metadata()).WillByDefault(ReturnRef(metadata_)); @@ -99,5 +110,13 @@ MockClusterInfo::MockClusterInfo() MockClusterInfo::~MockClusterInfo() = default; +Http::Http1::CodecStats& MockClusterInfo::http1CodecStats() const { + return Http::Http1::CodecStats::atomicGet(http1_codec_stats_, statsScope()); +} + +Http::Http2::CodecStats& MockClusterInfo::http2CodecStats() const { + return Http::Http2::CodecStats::atomicGet(http2_codec_stats_, statsScope()); +} + } // namespace Upstream } // namespace Envoy diff --git a/test/mocks/upstream/cluster_info.h b/test/mocks/upstream/cluster_info.h index 6e2e8c3b113f5..3848154f73555 100644 --- a/test/mocks/upstream/cluster_info.h +++ b/test/mocks/upstream/cluster_info.h @@ -13,6 +13,9 @@ #include "envoy/upstream/cluster_manager.h" #include "envoy/upstream/upstream.h" +#include "common/common/thread.h" +#include "common/http/http1/codec_stats.h" +#include "common/http/http2/codec_stats.h" #include "common/upstream/upstream_impl.h" #include "test/mocks/runtime/mocks.h" @@ -57,7 +60,7 @@ class MockClusterTypedMetadata : public Config::TypedMetadataImpl>& data() { + absl::node_hash_map>& data() { return data_; } }; @@ -86,10 +89,13 @@ class MockClusterInfo : public ClusterInfo { MOCK_METHOD(bool, addedViaApi, (), (const)); MOCK_METHOD(std::chrono::milliseconds, connectTimeout, (), (const)); MOCK_METHOD(const absl::optional, idleTimeout, (), (const)); + MOCK_METHOD(float, prefetchRatio, (), (const)); MOCK_METHOD(uint32_t, perConnectionBufferLimitBytes, (), (const)); MOCK_METHOD(uint64_t, features, (), (const)); MOCK_METHOD(const Http::Http1Settings&, http1Settings, (), (const)); MOCK_METHOD(const envoy::config::core::v3::Http2ProtocolOptions&, http2Options, (), (const)); + MOCK_METHOD(const envoy::config::core::v3::HttpProtocolOptions&, commonHttpProtocolOptions, (), + (const)); MOCK_METHOD(ProtocolOptionsConfigConstSharedPtr, extensionProtocolOptions, (const std::string&), (const)); MOCK_METHOD(const envoy::config::cluster::v3::Cluster::CommonLbConfig&, lbConfig, (), (const)); @@ -103,6 +109,8 @@ class MockClusterInfo : public ClusterInfo { lbLeastRequestConfig, (), (const)); MOCK_METHOD(const absl::optional&, lbOriginalDstConfig, (), (const)); + MOCK_METHOD(const absl::optional&, upstreamConfig, + (), (const)); MOCK_METHOD(bool, maintenanceMode, (), (const)); MOCK_METHOD(uint32_t, maxResponseHeadersCount, (), (const)); MOCK_METHOD(uint64_t, maxRequestsPerConnection, (), (const)); @@ -112,7 +120,8 @@ class MockClusterInfo : public ClusterInfo { MOCK_METHOD(ClusterStats&, stats, (), (const)); MOCK_METHOD(Stats::Scope&, statsScope, (), (const)); MOCK_METHOD(ClusterLoadReportStats&, loadReportStats, (), (const)); - MOCK_METHOD(absl::optional&, timeoutBudgetStats, (), (const)); + MOCK_METHOD(ClusterRequestResponseSizeStatsOptRef, requestResponseSizeStats, (), (const)); + MOCK_METHOD(ClusterTimeoutBudgetStatsOptRef, timeoutBudgetStats, (), (const)); MOCK_METHOD(const Network::Address::InstanceConstSharedPtr&, sourceAddress, (), (const)); MOCK_METHOD(const LoadBalancerSubsetInfo&, lbSubsetInfo, (), (const)); MOCK_METHOD(const envoy::config::core::v3::Metadata&, metadata, (), (const)); @@ -123,14 +132,18 @@ class MockClusterInfo : public ClusterInfo { MOCK_METHOD(bool, warmHosts, (), (const)); MOCK_METHOD(const absl::optional&, upstreamHttpProtocolOptions, (), (const)); - MOCK_METHOD(absl::optional, eds_service_name, (), (const)); + MOCK_METHOD(absl::optional, edsServiceName, (), (const)); MOCK_METHOD(void, createNetworkFilterChain, (Network::Connection&), (const)); MOCK_METHOD(Http::Protocol, upstreamHttpProtocol, (absl::optional), (const)); + Http::Http1::CodecStats& http1CodecStats() const override; + Http::Http2::CodecStats& http2CodecStats() const override; + std::string name_{"fake_cluster"}; absl::optional eds_service_name_; Http::Http1Settings http1_settings_; envoy::config::core::v3::Http2ProtocolOptions http2_options_; + envoy::config::core::v3::HttpProtocolOptions common_http_protocol_options_; ProtocolOptionsConfigConstSharedPtr extension_protocol_options_; uint64_t max_requests_per_connection_{}; uint32_t max_response_headers_count_{Http::DEFAULT_MAX_HEADERS_COUNT}; @@ -139,8 +152,10 @@ class MockClusterInfo : public ClusterInfo { Upstream::TransportSocketMatcherPtr transport_socket_matcher_; NiceMock load_report_stats_store_; ClusterLoadReportStats load_report_stats_; + NiceMock request_response_size_stats_store_; + ClusterRequestResponseSizeStatsPtr request_response_size_stats_; NiceMock timeout_budget_stats_store_; - absl::optional timeout_budget_stats_; + ClusterTimeoutBudgetStatsPtr timeout_budget_stats_; ClusterCircuitBreakersStats circuit_breakers_stats_; NiceMock runtime_; std::unique_ptr resource_manager_; @@ -154,10 +169,14 @@ class MockClusterInfo : public ClusterInfo { upstream_http_protocol_options_; absl::optional lb_ring_hash_config_; absl::optional lb_original_dst_config_; + absl::optional upstream_config_; Network::ConnectionSocket::OptionsSharedPtr cluster_socket_options_; envoy::config::cluster::v3::Cluster::CommonLbConfig lb_config_; envoy::config::core::v3::Metadata metadata_; std::unique_ptr typed_metadata_; + absl::optional max_stream_duration_; + mutable Http::Http1::CodecStats::AtomicPtr http1_codec_stats_; + mutable Http::Http2::CodecStats::AtomicPtr http2_codec_stats_; }; class MockIdleTimeEnabledClusterInfo : public MockClusterInfo { diff --git a/test/mocks/upstream/cluster_info_factory.cc b/test/mocks/upstream/cluster_info_factory.cc new file mode 100644 index 0000000000000..f00b821d19034 --- /dev/null +++ b/test/mocks/upstream/cluster_info_factory.cc @@ -0,0 +1,10 @@ +#include "cluster_info_factory.h" + +namespace Envoy { +namespace Upstream { +MockClusterInfoFactory::MockClusterInfoFactory() = default; + +MockClusterInfoFactory::~MockClusterInfoFactory() = default; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster_info_factory.h b/test/mocks/upstream/cluster_info_factory.h new file mode 100644 index 0000000000000..08144c57154d4 --- /dev/null +++ b/test/mocks/upstream/cluster_info_factory.h @@ -0,0 +1,20 @@ +#pragma once + +#include "envoy/upstream/cluster_manager.h" + +#include "common/common/logger.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +class MockClusterInfoFactory : public ClusterInfoFactory, Logger::Loggable { +public: + MockClusterInfoFactory(); + ~MockClusterInfoFactory() override; + + MOCK_METHOD(ClusterInfoConstSharedPtr, createClusterInfo, (const CreateClusterInfoParams&)); +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster_manager.cc b/test/mocks/upstream/cluster_manager.cc new file mode 100644 index 0000000000000..d40a5af3d273c --- /dev/null +++ b/test/mocks/upstream/cluster_manager.cc @@ -0,0 +1,36 @@ +#include "cluster_manager.h" + +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +using ::testing::_; +using ::testing::Eq; +using ::testing::Return; +using ::testing::ReturnRef; +MockClusterManager::MockClusterManager(TimeSource&) : MockClusterManager() {} + +MockClusterManager::MockClusterManager() { + ON_CALL(*this, httpConnPoolForCluster(_, _, _, _)).WillByDefault(Return(&conn_pool_)); + ON_CALL(*this, tcpConnPoolForCluster(_, _, _)).WillByDefault(Return(&tcp_conn_pool_)); + ON_CALL(*this, httpAsyncClientForCluster(_)).WillByDefault(ReturnRef(async_client_)); + ON_CALL(*this, httpAsyncClientForCluster(_)).WillByDefault((ReturnRef(async_client_))); + ON_CALL(*this, bindConfig()).WillByDefault(ReturnRef(bind_config_)); + ON_CALL(*this, adsMux()).WillByDefault(Return(ads_mux_)); + ON_CALL(*this, grpcAsyncClientManager()).WillByDefault(ReturnRef(async_client_manager_)); + ON_CALL(*this, localClusterName()).WillByDefault((ReturnRef(local_cluster_name_))); + + // Matches are LIFO so "" will match first. + ON_CALL(*this, get(_)).WillByDefault(Return(&thread_local_cluster_)); + ON_CALL(*this, get(Eq(""))).WillByDefault(Return(nullptr)); + ON_CALL(*this, subscriptionFactory()).WillByDefault(ReturnRef(subscription_factory_)); +} + +MockClusterManager::~MockClusterManager() = default; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster_manager.h b/test/mocks/upstream/cluster_manager.h new file mode 100644 index 0000000000000..c24b1b045acda --- /dev/null +++ b/test/mocks/upstream/cluster_manager.h @@ -0,0 +1,81 @@ +#pragma once + +#include "envoy/upstream/cluster_manager.h" + +#include "test/mocks/config/mocks.h" +#include "test/mocks/grpc/mocks.h" +#include "test/mocks/http/mocks.h" +#include "test/mocks/tcp/mocks.h" + +#include "cluster_manager_factory.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "thread_local_cluster.h" + +namespace Envoy { +namespace Upstream { +using ::testing::NiceMock; +class MockClusterManager : public ClusterManager { +public: + explicit MockClusterManager(TimeSource& time_source); + MockClusterManager(); + ~MockClusterManager() override; + + ClusterUpdateCallbacksHandlePtr + addThreadLocalClusterUpdateCallbacks(ClusterUpdateCallbacks& callbacks) override { + return ClusterUpdateCallbacksHandlePtr{addThreadLocalClusterUpdateCallbacks_(callbacks)}; + } + + Host::CreateConnectionData tcpConnForCluster(const std::string& cluster, + LoadBalancerContext* context) override { + MockHost::MockCreateConnectionData data = tcpConnForCluster_(cluster, context); + return {Network::ClientConnectionPtr{data.connection_}, data.host_description_}; + } + + ClusterManagerFactory& clusterManagerFactory() override { return cluster_manager_factory_; } + + // Upstream::ClusterManager + MOCK_METHOD(bool, addOrUpdateCluster, + (const envoy::config::cluster::v3::Cluster& cluster, + const std::string& version_info)); + MOCK_METHOD(void, setPrimaryClustersInitializedCb, (PrimaryClustersReadyCallback)); + MOCK_METHOD(void, setInitializedCb, (InitializationCompleteCallback)); + MOCK_METHOD(void, initializeSecondaryClusters, + (const envoy::config::bootstrap::v3::Bootstrap& bootstrap)); + MOCK_METHOD(ClusterInfoMap, clusters, ()); + MOCK_METHOD(const ClusterSet&, primaryClusters, ()); + MOCK_METHOD(ThreadLocalCluster*, get, (absl::string_view cluster)); + MOCK_METHOD(Http::ConnectionPool::Instance*, httpConnPoolForCluster, + (const std::string& cluster, ResourcePriority priority, + absl::optional downstream_protocol, LoadBalancerContext* context)); + MOCK_METHOD(Tcp::ConnectionPool::Instance*, tcpConnPoolForCluster, + (const std::string& cluster, ResourcePriority priority, + LoadBalancerContext* context)); + MOCK_METHOD(MockHost::MockCreateConnectionData, tcpConnForCluster_, + (const std::string& cluster, LoadBalancerContext* context)); + MOCK_METHOD(Http::AsyncClient&, httpAsyncClientForCluster, (const std::string& cluster)); + MOCK_METHOD(bool, removeCluster, (const std::string& cluster)); + MOCK_METHOD(void, shutdown, ()); + MOCK_METHOD(const envoy::config::core::v3::BindConfig&, bindConfig, (), (const)); + MOCK_METHOD(Config::GrpcMuxSharedPtr, adsMux, ()); + MOCK_METHOD(Grpc::AsyncClientManager&, grpcAsyncClientManager, ()); + MOCK_METHOD(const std::string, versionInfo, (), (const)); + MOCK_METHOD(const absl::optional&, localClusterName, (), (const)); + MOCK_METHOD(ClusterUpdateCallbacksHandle*, addThreadLocalClusterUpdateCallbacks_, + (ClusterUpdateCallbacks & callbacks)); + MOCK_METHOD(Config::SubscriptionFactory&, subscriptionFactory, ()); + + NiceMock conn_pool_; + NiceMock async_client_; + NiceMock tcp_conn_pool_; + NiceMock thread_local_cluster_; + envoy::config::core::v3::BindConfig bind_config_; + std::shared_ptr> ads_mux_; + NiceMock async_client_manager_; + absl::optional local_cluster_name_; + NiceMock cluster_manager_factory_; + NiceMock subscription_factory_; +}; +} // namespace Upstream + +} // namespace Envoy diff --git a/test/mocks/upstream/cluster_manager_factory.cc b/test/mocks/upstream/cluster_manager_factory.cc new file mode 100644 index 0000000000000..37727679a554e --- /dev/null +++ b/test/mocks/upstream/cluster_manager_factory.cc @@ -0,0 +1,9 @@ +#include "cluster_manager_factory.h" + +namespace Envoy { +namespace Upstream { +MockClusterManagerFactory::MockClusterManagerFactory() = default; + +MockClusterManagerFactory::~MockClusterManagerFactory() = default; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster_manager_factory.h b/test/mocks/upstream/cluster_manager_factory.h new file mode 100644 index 0000000000000..cdcc952d090b0 --- /dev/null +++ b/test/mocks/upstream/cluster_manager_factory.h @@ -0,0 +1,44 @@ +#pragma once + +#include "envoy/upstream/cluster_manager.h" + +#include "test/mocks/secret/mocks.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +using ::testing::NiceMock; +class MockClusterManagerFactory : public ClusterManagerFactory { +public: + MockClusterManagerFactory(); + ~MockClusterManagerFactory() override; + + Secret::MockSecretManager& secretManager() override { return secret_manager_; }; + + MOCK_METHOD(ClusterManagerPtr, clusterManagerFromProto, + (const envoy::config::bootstrap::v3::Bootstrap& bootstrap)); + + MOCK_METHOD(Http::ConnectionPool::InstancePtr, allocateConnPool, + (Event::Dispatcher & dispatcher, HostConstSharedPtr host, ResourcePriority priority, + Http::Protocol protocol, const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options)); + + MOCK_METHOD(Tcp::ConnectionPool::InstancePtr, allocateTcpConnPool, + (Event::Dispatcher & dispatcher, HostConstSharedPtr host, ResourcePriority priority, + const Network::ConnectionSocket::OptionsSharedPtr& options, + Network::TransportSocketOptionsSharedPtr)); + + MOCK_METHOD((std::pair), clusterFromProto, + (const envoy::config::cluster::v3::Cluster& cluster, ClusterManager& cm, + Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api)); + + MOCK_METHOD(CdsApiPtr, createCds, + (const envoy::config::core::v3::ConfigSource& cds_config, ClusterManager& cm)); + +private: + NiceMock secret_manager_; +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster_priority_set.cc b/test/mocks/upstream/cluster_priority_set.cc new file mode 100644 index 0000000000000..08b4b1a6a40a0 --- /dev/null +++ b/test/mocks/upstream/cluster_priority_set.cc @@ -0,0 +1,10 @@ +#include "cluster_priority_set.h" + +namespace Envoy { +namespace Upstream { +MockClusterMockPrioritySet::MockClusterMockPrioritySet() = default; + +MockClusterMockPrioritySet::~MockClusterMockPrioritySet() = default; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster_priority_set.h b/test/mocks/upstream/cluster_priority_set.h new file mode 100644 index 0000000000000..da090750ec233 --- /dev/null +++ b/test/mocks/upstream/cluster_priority_set.h @@ -0,0 +1,22 @@ +#pragma once + +#include "cluster.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "priority_set.h" + +namespace Envoy { +namespace Upstream { +class MockClusterMockPrioritySet : public MockCluster { +public: + MockClusterMockPrioritySet(); + ~MockClusterMockPrioritySet() override; + + // Upstream::Cluster + MockPrioritySet& prioritySet() override { return priority_set_; } + const PrioritySet& prioritySet() const override { return priority_set_; } + + ::testing::NiceMock priority_set_; +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster_real_priority_set.cc b/test/mocks/upstream/cluster_real_priority_set.cc new file mode 100644 index 0000000000000..60c3d05a08f84 --- /dev/null +++ b/test/mocks/upstream/cluster_real_priority_set.cc @@ -0,0 +1,9 @@ +#include "cluster_real_priority_set.h" + +namespace Envoy { +namespace Upstream { +MockClusterRealPrioritySet::MockClusterRealPrioritySet() = default; + +MockClusterRealPrioritySet::~MockClusterRealPrioritySet() = default; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster_real_priority_set.h b/test/mocks/upstream/cluster_real_priority_set.h new file mode 100644 index 0000000000000..4b6cde5ffd8c2 --- /dev/null +++ b/test/mocks/upstream/cluster_real_priority_set.h @@ -0,0 +1,19 @@ +#pragma once + +#include "cluster.h" + +namespace Envoy { +namespace Upstream { +class MockClusterRealPrioritySet : public MockCluster { +public: + MockClusterRealPrioritySet(); + ~MockClusterRealPrioritySet() override; + + // Upstream::Cluster + PrioritySetImpl& prioritySet() override { return priority_set_; } + const PrioritySet& prioritySet() const override { return priority_set_; } + + PrioritySetImpl priority_set_; +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster_update_callbacks.cc b/test/mocks/upstream/cluster_update_callbacks.cc new file mode 100644 index 0000000000000..5451d3ed2a700 --- /dev/null +++ b/test/mocks/upstream/cluster_update_callbacks.cc @@ -0,0 +1,10 @@ +#include "cluster_update_callbacks.h" + +namespace Envoy { +namespace Upstream { +MockClusterUpdateCallbacks::MockClusterUpdateCallbacks() = default; + +MockClusterUpdateCallbacks::~MockClusterUpdateCallbacks() = default; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster_update_callbacks.h b/test/mocks/upstream/cluster_update_callbacks.h new file mode 100644 index 0000000000000..782c319004c15 --- /dev/null +++ b/test/mocks/upstream/cluster_update_callbacks.h @@ -0,0 +1,21 @@ +#pragma once + +#include + +#include "envoy/upstream/cluster_manager.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +class MockClusterUpdateCallbacks : public ClusterUpdateCallbacks { +public: + MockClusterUpdateCallbacks(); + ~MockClusterUpdateCallbacks() override; + + MOCK_METHOD(void, onClusterAddOrUpdate, (ThreadLocalCluster & cluster)); + MOCK_METHOD(void, onClusterRemoval, (const std::string& cluster_name)); +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster_update_callbacks_handle.cc b/test/mocks/upstream/cluster_update_callbacks_handle.cc new file mode 100644 index 0000000000000..72f44f798c126 --- /dev/null +++ b/test/mocks/upstream/cluster_update_callbacks_handle.cc @@ -0,0 +1,9 @@ +#include "cluster_update_callbacks_handle.h" + +namespace Envoy { +namespace Upstream { +MockClusterUpdateCallbacksHandle::MockClusterUpdateCallbacksHandle() = default; + +MockClusterUpdateCallbacksHandle::~MockClusterUpdateCallbacksHandle() = default; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster_update_callbacks_handle.h b/test/mocks/upstream/cluster_update_callbacks_handle.h new file mode 100644 index 0000000000000..22a023c45a9d7 --- /dev/null +++ b/test/mocks/upstream/cluster_update_callbacks_handle.h @@ -0,0 +1,16 @@ +#pragma once + +#include "envoy/upstream/cluster_manager.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +class MockClusterUpdateCallbacksHandle : public ClusterUpdateCallbacksHandle { +public: + MockClusterUpdateCallbacksHandle(); + ~MockClusterUpdateCallbacksHandle() override; +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/health_check_event_logger.h b/test/mocks/upstream/health_check_event_logger.h new file mode 100644 index 0000000000000..1d6e75819b3b4 --- /dev/null +++ b/test/mocks/upstream/health_check_event_logger.h @@ -0,0 +1,35 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include "envoy/data/core/v3/health_check_event.pb.h" +#include "envoy/upstream/health_checker.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +class MockHealthCheckEventLogger : public HealthCheckEventLogger { +public: + MOCK_METHOD(void, logEjectUnhealthy, + (envoy::data::core::v3::HealthCheckerType, const HostDescriptionConstSharedPtr&, + envoy::data::core::v3::HealthCheckFailureType)); + MOCK_METHOD(void, logAddHealthy, + (envoy::data::core::v3::HealthCheckerType, const HostDescriptionConstSharedPtr&, + bool)); + MOCK_METHOD(void, logUnhealthy, + (envoy::data::core::v3::HealthCheckerType, const HostDescriptionConstSharedPtr&, + envoy::data::core::v3::HealthCheckFailureType, bool)); + MOCK_METHOD(void, logDegraded, + (envoy::data::core::v3::HealthCheckerType, const HostDescriptionConstSharedPtr&)); + MOCK_METHOD(void, logNoLongerDegraded, + (envoy::data::core::v3::HealthCheckerType, const HostDescriptionConstSharedPtr&)); +}; +} // namespace Upstream + +} // namespace Envoy diff --git a/test/mocks/upstream/health_checker.cc b/test/mocks/upstream/health_checker.cc new file mode 100644 index 0000000000000..2bf89a8345b49 --- /dev/null +++ b/test/mocks/upstream/health_checker.cc @@ -0,0 +1,19 @@ +#include "health_checker.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +using ::testing::_; +using ::testing::Invoke; +MockHealthChecker::MockHealthChecker() { + ON_CALL(*this, addHostCheckCompleteCb(_)).WillByDefault(Invoke([this](HostStatusCb cb) -> void { + callbacks_.push_back(cb); + })); +} + +MockHealthChecker::~MockHealthChecker() = default; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/health_checker.h b/test/mocks/upstream/health_checker.h new file mode 100644 index 0000000000000..af4a96a9ee874 --- /dev/null +++ b/test/mocks/upstream/health_checker.h @@ -0,0 +1,27 @@ +#pragma once + +#include "envoy/upstream/health_checker.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +class MockHealthChecker : public HealthChecker { +public: + MockHealthChecker(); + ~MockHealthChecker() override; + + MOCK_METHOD(void, addHostCheckCompleteCb, (HostStatusCb callback)); + MOCK_METHOD(void, start, ()); + + void runCallbacks(Upstream::HostSharedPtr host, HealthTransition changed_state) { + for (const auto& callback : callbacks_) { + callback(host, changed_state); + } + } + + std::list callbacks_; +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/host.h b/test/mocks/upstream/host.h index 0316440e53896..3c927b0208aa3 100644 --- a/test/mocks/upstream/host.h +++ b/test/mocks/upstream/host.h @@ -135,7 +135,8 @@ class MockHost : public Host { CreateConnectionData createHealthCheckConnection(Event::Dispatcher& dispatcher, - Network::TransportSocketOptionsSharedPtr) const override { + Network::TransportSocketOptionsSharedPtr, + const envoy::config::core::v3::Metadata*) const override { MockCreateConnectionData data = createConnection_(dispatcher, nullptr); return {Network::ClientConnectionPtr{data.connection_}, data.host_description_}; } diff --git a/test/mocks/upstream/host_set.cc b/test/mocks/upstream/host_set.cc new file mode 100644 index 0000000000000..1d49579073f04 --- /dev/null +++ b/test/mocks/upstream/host_set.cc @@ -0,0 +1,55 @@ +#include "host_set.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +using ::testing::Invoke; +using ::testing::Return; +using ::testing::ReturnRef; +MockHostSet::MockHostSet(uint32_t priority, uint32_t overprovisioning_factor) + : priority_(priority), overprovisioning_factor_(overprovisioning_factor) { + ON_CALL(*this, priority()).WillByDefault(Return(priority_)); + ON_CALL(*this, hosts()).WillByDefault(ReturnRef(hosts_)); + ON_CALL(*this, hostsPtr()).WillByDefault(Invoke([this]() { + return std::make_shared(hosts_); + })); + ON_CALL(*this, healthyHosts()).WillByDefault(ReturnRef(healthy_hosts_)); + ON_CALL(*this, healthyHostsPtr()).WillByDefault(Invoke([this]() { + return std::make_shared(healthy_hosts_); + })); + ON_CALL(*this, degradedHosts()).WillByDefault(ReturnRef(degraded_hosts_)); + ON_CALL(*this, degradedHostsPtr()).WillByDefault(Invoke([this]() { + return std::make_shared(degraded_hosts_); + })); + ON_CALL(*this, excludedHosts()).WillByDefault(ReturnRef(excluded_hosts_)); + ON_CALL(*this, excludedHostsPtr()).WillByDefault(Invoke([this]() { + return std::make_shared(excluded_hosts_); + })); + ON_CALL(*this, hostsPerLocality()).WillByDefault(Invoke([this]() -> const HostsPerLocality& { + return *hosts_per_locality_; + })); + ON_CALL(*this, hostsPerLocalityPtr()).WillByDefault(Return(hosts_per_locality_)); + ON_CALL(*this, healthyHostsPerLocality()) + .WillByDefault( + Invoke([this]() -> const HostsPerLocality& { return *healthy_hosts_per_locality_; })); + ON_CALL(*this, healthyHostsPerLocalityPtr()).WillByDefault(Return(healthy_hosts_per_locality_)); + ON_CALL(*this, degradedHostsPerLocality()) + .WillByDefault( + Invoke([this]() -> const HostsPerLocality& { return *degraded_hosts_per_locality_; })); + ON_CALL(*this, degradedHostsPerLocalityPtr()).WillByDefault(Return(degraded_hosts_per_locality_)); + ON_CALL(*this, excludedHostsPerLocality()) + .WillByDefault( + Invoke([this]() -> const HostsPerLocality& { return *excluded_hosts_per_locality_; })); + ON_CALL(*this, excludedHostsPerLocalityPtr()).WillByDefault(Return(excluded_hosts_per_locality_)); + ON_CALL(*this, localityWeights()).WillByDefault(Invoke([this]() -> LocalityWeightsConstSharedPtr { + return locality_weights_; + })); +} + +MockHostSet::~MockHostSet() = default; + +} // namespace Upstream + +} // namespace Envoy diff --git a/test/mocks/upstream/host_set.h b/test/mocks/upstream/host_set.h new file mode 100644 index 0000000000000..95ed9f90ee882 --- /dev/null +++ b/test/mocks/upstream/host_set.h @@ -0,0 +1,69 @@ +#pragma once + +#include "envoy/upstream/upstream.h" + +#include "common/common/callback_impl.h" +#include "common/upstream/upstream_impl.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +class MockHostSet : public HostSet { +public: + MockHostSet(uint32_t priority = 0, + uint32_t overprovisioning_factor = kDefaultOverProvisioningFactor); + ~MockHostSet() override; + + void runCallbacks(const HostVector added, const HostVector removed) { + member_update_cb_helper_.runCallbacks(priority(), added, removed); + } + + Common::CallbackHandle* addMemberUpdateCb(PrioritySet::PriorityUpdateCb callback) { + return member_update_cb_helper_.add(callback); + } + + // Upstream::HostSet + MOCK_METHOD(const HostVector&, hosts, (), (const)); + MOCK_METHOD(HostVectorConstSharedPtr, hostsPtr, (), (const)); + MOCK_METHOD(const HostVector&, healthyHosts, (), (const)); + MOCK_METHOD(HealthyHostVectorConstSharedPtr, healthyHostsPtr, (), (const)); + MOCK_METHOD(const HostVector&, degradedHosts, (), (const)); + MOCK_METHOD(DegradedHostVectorConstSharedPtr, degradedHostsPtr, (), (const)); + MOCK_METHOD(const HostVector&, excludedHosts, (), (const)); + MOCK_METHOD(ExcludedHostVectorConstSharedPtr, excludedHostsPtr, (), (const)); + MOCK_METHOD(const HostsPerLocality&, hostsPerLocality, (), (const)); + MOCK_METHOD(HostsPerLocalityConstSharedPtr, hostsPerLocalityPtr, (), (const)); + MOCK_METHOD(const HostsPerLocality&, healthyHostsPerLocality, (), (const)); + MOCK_METHOD(HostsPerLocalityConstSharedPtr, healthyHostsPerLocalityPtr, (), (const)); + MOCK_METHOD(const HostsPerLocality&, degradedHostsPerLocality, (), (const)); + MOCK_METHOD(HostsPerLocalityConstSharedPtr, degradedHostsPerLocalityPtr, (), (const)); + MOCK_METHOD(const HostsPerLocality&, excludedHostsPerLocality, (), (const)); + MOCK_METHOD(HostsPerLocalityConstSharedPtr, excludedHostsPerLocalityPtr, (), (const)); + MOCK_METHOD(LocalityWeightsConstSharedPtr, localityWeights, (), (const)); + MOCK_METHOD(absl::optional, chooseHealthyLocality, ()); + MOCK_METHOD(absl::optional, chooseDegradedLocality, ()); + MOCK_METHOD(uint32_t, priority, (), (const)); + uint32_t overprovisioningFactor() const override { return overprovisioning_factor_; } + void setOverprovisioningFactor(const uint32_t overprovisioning_factor) { + overprovisioning_factor_ = overprovisioning_factor; + } + + HostVector hosts_; + HostVector healthy_hosts_; + HostVector degraded_hosts_; + HostVector excluded_hosts_; + HostsPerLocalitySharedPtr hosts_per_locality_{new HostsPerLocalityImpl()}; + HostsPerLocalitySharedPtr healthy_hosts_per_locality_{new HostsPerLocalityImpl()}; + HostsPerLocalitySharedPtr degraded_hosts_per_locality_{new HostsPerLocalityImpl()}; + HostsPerLocalitySharedPtr excluded_hosts_per_locality_{new HostsPerLocalityImpl()}; + LocalityWeightsConstSharedPtr locality_weights_{{}}; + Common::CallbackManager member_update_cb_helper_; + uint32_t priority_{}; + uint32_t overprovisioning_factor_{}; + bool run_in_panic_mode_ = false; +}; +} // namespace Upstream + +} // namespace Envoy diff --git a/test/mocks/upstream/load_balancer.cc b/test/mocks/upstream/load_balancer.cc new file mode 100644 index 0000000000000..3cdb79d405dbf --- /dev/null +++ b/test/mocks/upstream/load_balancer.cc @@ -0,0 +1,15 @@ +#include "load_balancer.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +using ::testing::_; +using ::testing::Return; +MockLoadBalancer::MockLoadBalancer() { ON_CALL(*this, chooseHost(_)).WillByDefault(Return(host_)); } + +MockLoadBalancer::~MockLoadBalancer() = default; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/load_balancer.h b/test/mocks/upstream/load_balancer.h new file mode 100644 index 0000000000000..364b6a7eb1d32 --- /dev/null +++ b/test/mocks/upstream/load_balancer.h @@ -0,0 +1,23 @@ +#pragma once + +#include "envoy/upstream/load_balancer.h" + +#include "test/mocks/upstream/host.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +class MockLoadBalancer : public LoadBalancer { +public: + MockLoadBalancer(); + ~MockLoadBalancer() override; + + // Upstream::LoadBalancer + MOCK_METHOD(HostConstSharedPtr, chooseHost, (LoadBalancerContext * context)); + + std::shared_ptr host_{new MockHost()}; +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/load_balancer_context.cc b/test/mocks/upstream/load_balancer_context.cc index 5a424b07b8672..21e16847e8c37 100644 --- a/test/mocks/upstream/load_balancer_context.cc +++ b/test/mocks/upstream/load_balancer_context.cc @@ -10,7 +10,7 @@ MockLoadBalancerContext::MockLoadBalancerContext() { // By default, set loads which treat everything as healthy in the first priority. priority_load_.healthy_priority_load_ = HealthyLoad({100}); priority_load_.degraded_priority_load_ = DegradedLoad({0}); - ON_CALL(*this, determinePriorityLoad(_, _)).WillByDefault(ReturnRef(priority_load_)); + ON_CALL(*this, determinePriorityLoad(_, _, _)).WillByDefault(ReturnRef(priority_load_)); } MockLoadBalancerContext::~MockLoadBalancerContext() = default; diff --git a/test/mocks/upstream/load_balancer_context.h b/test/mocks/upstream/load_balancer_context.h index c716543a40863..553ae4e98e2d3 100644 --- a/test/mocks/upstream/load_balancer_context.h +++ b/test/mocks/upstream/load_balancer_context.h @@ -1,3 +1,4 @@ +#pragma once #include "envoy/upstream/load_balancer.h" #include "gmock/gmock.h" @@ -15,7 +16,8 @@ class MockLoadBalancerContext : public LoadBalancerContext { MOCK_METHOD(const Network::Connection*, downstreamConnection, (), (const)); MOCK_METHOD(const Http::RequestHeaderMap*, downstreamHeaders, (), (const)); MOCK_METHOD(const HealthyAndDegradedLoad&, determinePriorityLoad, - (const PrioritySet&, const HealthyAndDegradedLoad&)); + (const PrioritySet&, const HealthyAndDegradedLoad&, + const Upstream::RetryPriority::PriorityMappingFunc&)); MOCK_METHOD(bool, shouldSelectAnotherHost, (const Host&)); MOCK_METHOD(uint32_t, hostSelectionRetryCount, (), (const)); MOCK_METHOD(Network::Socket::OptionsSharedPtr, upstreamSocketOptions, (), (const)); diff --git a/test/mocks/upstream/mocks.cc b/test/mocks/upstream/mocks.cc deleted file mode 100644 index e5843f0bc64b6..0000000000000 --- a/test/mocks/upstream/mocks.cc +++ /dev/null @@ -1,181 +0,0 @@ -#include "test/mocks/upstream/mocks.h" - -#include -#include - -#include "envoy/upstream/load_balancer.h" - -#include "gmock/gmock.h" -#include "gtest/gtest.h" - -using testing::_; -using testing::Eq; -using testing::Invoke; -using testing::Return; -using testing::ReturnRef; -using testing::SaveArg; - -namespace Envoy { -namespace Upstream { - -MockHostSet::MockHostSet(uint32_t priority, uint32_t overprovisioning_factor) - : priority_(priority), overprovisioning_factor_(overprovisioning_factor) { - ON_CALL(*this, priority()).WillByDefault(Return(priority_)); - ON_CALL(*this, hosts()).WillByDefault(ReturnRef(hosts_)); - ON_CALL(*this, hostsPtr()).WillByDefault(Invoke([this]() { - return std::make_shared(hosts_); - })); - ON_CALL(*this, healthyHosts()).WillByDefault(ReturnRef(healthy_hosts_)); - ON_CALL(*this, healthyHostsPtr()).WillByDefault(Invoke([this]() { - return std::make_shared(healthy_hosts_); - })); - ON_CALL(*this, degradedHosts()).WillByDefault(ReturnRef(degraded_hosts_)); - ON_CALL(*this, degradedHostsPtr()).WillByDefault(Invoke([this]() { - return std::make_shared(degraded_hosts_); - })); - ON_CALL(*this, excludedHosts()).WillByDefault(ReturnRef(excluded_hosts_)); - ON_CALL(*this, excludedHostsPtr()).WillByDefault(Invoke([this]() { - return std::make_shared(excluded_hosts_); - })); - ON_CALL(*this, hostsPerLocality()).WillByDefault(Invoke([this]() -> const HostsPerLocality& { - return *hosts_per_locality_; - })); - ON_CALL(*this, hostsPerLocalityPtr()).WillByDefault(Return(hosts_per_locality_)); - ON_CALL(*this, healthyHostsPerLocality()) - .WillByDefault( - Invoke([this]() -> const HostsPerLocality& { return *healthy_hosts_per_locality_; })); - ON_CALL(*this, healthyHostsPerLocalityPtr()).WillByDefault(Return(healthy_hosts_per_locality_)); - ON_CALL(*this, degradedHostsPerLocality()) - .WillByDefault( - Invoke([this]() -> const HostsPerLocality& { return *degraded_hosts_per_locality_; })); - ON_CALL(*this, degradedHostsPerLocalityPtr()).WillByDefault(Return(degraded_hosts_per_locality_)); - ON_CALL(*this, excludedHostsPerLocality()) - .WillByDefault( - Invoke([this]() -> const HostsPerLocality& { return *excluded_hosts_per_locality_; })); - ON_CALL(*this, excludedHostsPerLocalityPtr()).WillByDefault(Return(excluded_hosts_per_locality_)); - ON_CALL(*this, localityWeights()).WillByDefault(Invoke([this]() -> LocalityWeightsConstSharedPtr { - return locality_weights_; - })); -} - -MockHostSet::~MockHostSet() = default; - -MockPrioritySet::MockPrioritySet() { - getHostSet(0); - ON_CALL(*this, hostSetsPerPriority()).WillByDefault(ReturnRef(host_sets_)); - ON_CALL(testing::Const(*this), hostSetsPerPriority()).WillByDefault(ReturnRef(host_sets_)); - ON_CALL(*this, addMemberUpdateCb(_)) - .WillByDefault(Invoke([this](PrioritySet::MemberUpdateCb cb) -> Common::CallbackHandle* { - return member_update_cb_helper_.add(cb); - })); - ON_CALL(*this, addPriorityUpdateCb(_)) - .WillByDefault(Invoke([this](PrioritySet::PriorityUpdateCb cb) -> Common::CallbackHandle* { - return priority_update_cb_helper_.add(cb); - })); -} - -MockPrioritySet::~MockPrioritySet() = default; - -HostSet& MockPrioritySet::getHostSet(uint32_t priority) { - if (host_sets_.size() < priority + 1) { - for (size_t i = host_sets_.size(); i <= priority; ++i) { - auto host_set = new NiceMock(i); - host_sets_.push_back(HostSetPtr{host_set}); - host_set->addMemberUpdateCb([this](uint32_t priority, const HostVector& hosts_added, - const HostVector& hosts_removed) { - runUpdateCallbacks(priority, hosts_added, hosts_removed); - }); - } - } - return *host_sets_[priority]; -} -void MockPrioritySet::runUpdateCallbacks(uint32_t priority, const HostVector& hosts_added, - const HostVector& hosts_removed) { - member_update_cb_helper_.runCallbacks(hosts_added, hosts_removed); - priority_update_cb_helper_.runCallbacks(priority, hosts_added, hosts_removed); -} - -MockRetryPriority::~MockRetryPriority() = default; - -MockCluster::MockCluster() { - ON_CALL(*this, info()).WillByDefault(Return(info_)); - ON_CALL(*this, initialize(_)) - .WillByDefault(Invoke([this](std::function callback) -> void { - EXPECT_EQ(nullptr, initialize_callback_); - initialize_callback_ = callback; - })); -} - -MockCluster::~MockCluster() = default; - -MockClusterRealPrioritySet::MockClusterRealPrioritySet() = default; -MockClusterRealPrioritySet::~MockClusterRealPrioritySet() = default; - -MockClusterMockPrioritySet::MockClusterMockPrioritySet() = default; -MockClusterMockPrioritySet::~MockClusterMockPrioritySet() = default; - -MockLoadBalancer::MockLoadBalancer() { ON_CALL(*this, chooseHost(_)).WillByDefault(Return(host_)); } -MockLoadBalancer::~MockLoadBalancer() = default; - -MockThreadAwareLoadBalancer::MockThreadAwareLoadBalancer() = default; -MockThreadAwareLoadBalancer::~MockThreadAwareLoadBalancer() = default; - -MockThreadLocalCluster::MockThreadLocalCluster() { - ON_CALL(*this, prioritySet()).WillByDefault(ReturnRef(cluster_.priority_set_)); - ON_CALL(*this, info()).WillByDefault(Return(cluster_.info_)); - ON_CALL(*this, loadBalancer()).WillByDefault(ReturnRef(lb_)); -} - -MockThreadLocalCluster::~MockThreadLocalCluster() = default; - -MockClusterUpdateCallbacksHandle::MockClusterUpdateCallbacksHandle() = default; -MockClusterUpdateCallbacksHandle::~MockClusterUpdateCallbacksHandle() = default; - -MockClusterManager::MockClusterManager(TimeSource&) : MockClusterManager() {} - -MockClusterManager::MockClusterManager() { - ON_CALL(*this, httpConnPoolForCluster(_, _, _, _)).WillByDefault(Return(&conn_pool_)); - ON_CALL(*this, tcpConnPoolForCluster(_, _, _)).WillByDefault(Return(&tcp_conn_pool_)); - ON_CALL(*this, httpAsyncClientForCluster(_)).WillByDefault(ReturnRef(async_client_)); - ON_CALL(*this, httpAsyncClientForCluster(_)).WillByDefault((ReturnRef(async_client_))); - ON_CALL(*this, bindConfig()).WillByDefault(ReturnRef(bind_config_)); - ON_CALL(*this, adsMux()).WillByDefault(Return(ads_mux_)); - ON_CALL(*this, grpcAsyncClientManager()).WillByDefault(ReturnRef(async_client_manager_)); - ON_CALL(*this, localClusterName()).WillByDefault((ReturnRef(local_cluster_name_))); - - // Matches are LIFO so "" will match first. - ON_CALL(*this, get(_)).WillByDefault(Return(&thread_local_cluster_)); - ON_CALL(*this, get(Eq(""))).WillByDefault(Return(nullptr)); - ON_CALL(*this, subscriptionFactory()).WillByDefault(ReturnRef(subscription_factory_)); -} - -MockClusterManager::~MockClusterManager() = default; - -MockHealthChecker::MockHealthChecker() { - ON_CALL(*this, addHostCheckCompleteCb(_)).WillByDefault(Invoke([this](HostStatusCb cb) -> void { - callbacks_.push_back(cb); - })); -} - -MockHealthChecker::~MockHealthChecker() = default; - -MockCdsApi::MockCdsApi() { - ON_CALL(*this, setInitializedCb(_)).WillByDefault(SaveArg<0>(&initialized_callback_)); -} - -MockCdsApi::~MockCdsApi() = default; - -MockClusterUpdateCallbacks::MockClusterUpdateCallbacks() = default; -MockClusterUpdateCallbacks::~MockClusterUpdateCallbacks() = default; - -MockClusterInfoFactory::MockClusterInfoFactory() = default; -MockClusterInfoFactory::~MockClusterInfoFactory() = default; - -MockRetryHostPredicate::MockRetryHostPredicate() = default; -MockRetryHostPredicate::~MockRetryHostPredicate() = default; - -MockClusterManagerFactory::MockClusterManagerFactory() = default; -MockClusterManagerFactory::~MockClusterManagerFactory() = default; - -} // namespace Upstream -} // namespace Envoy diff --git a/test/mocks/upstream/mocks.h b/test/mocks/upstream/mocks.h index 2e659cd59464d..879280b0aef13 100644 --- a/test/mocks/upstream/mocks.h +++ b/test/mocks/upstream/mocks.h @@ -1,10 +1,6 @@ #pragma once -#include -#include -#include -#include -#include +// NOLINT(namespace-envoy) #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/cluster/v3/cluster.pb.h" @@ -28,407 +24,26 @@ #include "test/mocks/secret/mocks.h" #include "test/mocks/stats/mocks.h" #include "test/mocks/tcp/mocks.h" +#include "test/mocks/upstream/basic_resource_limit.h" +#include "test/mocks/upstream/cds_api.h" +#include "test/mocks/upstream/cluster.h" #include "test/mocks/upstream/cluster_info.h" +#include "test/mocks/upstream/cluster_info_factory.h" +#include "test/mocks/upstream/cluster_manager.h" +#include "test/mocks/upstream/cluster_manager_factory.h" +#include "test/mocks/upstream/cluster_priority_set.h" +#include "test/mocks/upstream/cluster_real_priority_set.h" +#include "test/mocks/upstream/cluster_update_callbacks.h" +#include "test/mocks/upstream/cluster_update_callbacks_handle.h" +#include "test/mocks/upstream/health_check_event_logger.h" +#include "test/mocks/upstream/health_checker.h" +#include "test/mocks/upstream/host_set.h" +#include "test/mocks/upstream/load_balancer.h" #include "test/mocks/upstream/load_balancer_context.h" - -#include "gmock/gmock.h" -#include "gtest/gtest.h" - -using testing::NiceMock; - -namespace Envoy { -namespace Upstream { - -class MockHostSet : public HostSet { -public: - MockHostSet(uint32_t priority = 0, - uint32_t overprovisioning_factor = kDefaultOverProvisioningFactor); - ~MockHostSet() override; - - void runCallbacks(const HostVector added, const HostVector removed) { - member_update_cb_helper_.runCallbacks(priority(), added, removed); - } - - Common::CallbackHandle* addMemberUpdateCb(PrioritySet::PriorityUpdateCb callback) { - return member_update_cb_helper_.add(callback); - } - - // Upstream::HostSet - MOCK_METHOD(const HostVector&, hosts, (), (const)); - MOCK_METHOD(HostVectorConstSharedPtr, hostsPtr, (), (const)); - MOCK_METHOD(const HostVector&, healthyHosts, (), (const)); - MOCK_METHOD(HealthyHostVectorConstSharedPtr, healthyHostsPtr, (), (const)); - MOCK_METHOD(const HostVector&, degradedHosts, (), (const)); - MOCK_METHOD(DegradedHostVectorConstSharedPtr, degradedHostsPtr, (), (const)); - MOCK_METHOD(const HostVector&, excludedHosts, (), (const)); - MOCK_METHOD(ExcludedHostVectorConstSharedPtr, excludedHostsPtr, (), (const)); - MOCK_METHOD(const HostsPerLocality&, hostsPerLocality, (), (const)); - MOCK_METHOD(HostsPerLocalityConstSharedPtr, hostsPerLocalityPtr, (), (const)); - MOCK_METHOD(const HostsPerLocality&, healthyHostsPerLocality, (), (const)); - MOCK_METHOD(HostsPerLocalityConstSharedPtr, healthyHostsPerLocalityPtr, (), (const)); - MOCK_METHOD(const HostsPerLocality&, degradedHostsPerLocality, (), (const)); - MOCK_METHOD(HostsPerLocalityConstSharedPtr, degradedHostsPerLocalityPtr, (), (const)); - MOCK_METHOD(const HostsPerLocality&, excludedHostsPerLocality, (), (const)); - MOCK_METHOD(HostsPerLocalityConstSharedPtr, excludedHostsPerLocalityPtr, (), (const)); - MOCK_METHOD(LocalityWeightsConstSharedPtr, localityWeights, (), (const)); - MOCK_METHOD(absl::optional, chooseHealthyLocality, ()); - MOCK_METHOD(absl::optional, chooseDegradedLocality, ()); - MOCK_METHOD(uint32_t, priority, (), (const)); - uint32_t overprovisioningFactor() const override { return overprovisioning_factor_; } - void setOverprovisioningFactor(const uint32_t overprovisioning_factor) { - overprovisioning_factor_ = overprovisioning_factor; - } - - HostVector hosts_; - HostVector healthy_hosts_; - HostVector degraded_hosts_; - HostVector excluded_hosts_; - HostsPerLocalitySharedPtr hosts_per_locality_{new HostsPerLocalityImpl()}; - HostsPerLocalitySharedPtr healthy_hosts_per_locality_{new HostsPerLocalityImpl()}; - HostsPerLocalitySharedPtr degraded_hosts_per_locality_{new HostsPerLocalityImpl()}; - HostsPerLocalitySharedPtr excluded_hosts_per_locality_{new HostsPerLocalityImpl()}; - LocalityWeightsConstSharedPtr locality_weights_{{}}; - Common::CallbackManager member_update_cb_helper_; - uint32_t priority_{}; - uint32_t overprovisioning_factor_{}; - bool run_in_panic_mode_ = false; -}; - -class MockPrioritySet : public PrioritySet { -public: - MockPrioritySet(); - ~MockPrioritySet() override; - - HostSet& getHostSet(uint32_t priority); - void runUpdateCallbacks(uint32_t priority, const HostVector& hosts_added, - const HostVector& hosts_removed); - - MOCK_METHOD(Common::CallbackHandle*, addMemberUpdateCb, (MemberUpdateCb callback), (const)); - MOCK_METHOD(Common::CallbackHandle*, addPriorityUpdateCb, (PriorityUpdateCb callback), (const)); - MOCK_METHOD(const std::vector&, hostSetsPerPriority, (), (const)); - MOCK_METHOD(std::vector&, hostSetsPerPriority, ()); - MOCK_METHOD(void, updateHosts, - (uint32_t priority, UpdateHostsParams&& update_hosts_params, - LocalityWeightsConstSharedPtr locality_weights, const HostVector& hosts_added, - const HostVector& hosts_removed, absl::optional overprovisioning_factor)); - MOCK_METHOD(void, batchHostUpdate, (BatchUpdateCb&)); - - MockHostSet* getMockHostSet(uint32_t priority) { - getHostSet(priority); // Ensure the host set exists. - return reinterpret_cast(host_sets_[priority].get()); - } - - std::vector host_sets_; - Common::CallbackManager member_update_cb_helper_; - Common::CallbackManager - priority_update_cb_helper_; -}; - -class MockRetryPriority : public RetryPriority { -public: - MockRetryPriority(const HealthyLoad& healthy_priority_load, - const DegradedLoad& degraded_priority_load) - : priority_load_({healthy_priority_load, degraded_priority_load}) {} - MockRetryPriority(const MockRetryPriority& other) : priority_load_(other.priority_load_) {} - ~MockRetryPriority() override; - - const HealthyAndDegradedLoad& determinePriorityLoad(const PrioritySet&, - const HealthyAndDegradedLoad&) override { - return priority_load_; - } - - MOCK_METHOD(void, onHostAttempted, (HostDescriptionConstSharedPtr)); - -private: - const HealthyAndDegradedLoad priority_load_; -}; - -class MockRetryPriorityFactory : public RetryPriorityFactory { -public: - MockRetryPriorityFactory(const MockRetryPriority& retry_priority) - : retry_priority_(retry_priority) {} - RetryPrioritySharedPtr createRetryPriority(const Protobuf::Message&, - ProtobufMessage::ValidationVisitor&, - uint32_t) override { - return std::make_shared>(retry_priority_); - } - - std::string name() const override { return "envoy.test_retry_priority"; } - ProtobufTypes::MessagePtr createEmptyConfigProto() override { - // Using Struct instead of a custom per-filter empty config proto - // This is only allowed in tests. - return ProtobufTypes::MessagePtr{new Envoy::ProtobufWkt::Struct()}; - } - -private: - const MockRetryPriority& retry_priority_; -}; - -class MockCluster : public Cluster { -public: - MockCluster(); - ~MockCluster() override; - - // Upstream::Cluster - MOCK_METHOD(HealthChecker*, healthChecker, ()); - MOCK_METHOD(ClusterInfoConstSharedPtr, info, (), (const)); - MOCK_METHOD(Outlier::Detector*, outlierDetector, ()); - MOCK_METHOD(const Outlier::Detector*, outlierDetector, (), (const)); - MOCK_METHOD(void, initialize, (std::function callback)); - MOCK_METHOD(InitializePhase, initializePhase, (), (const)); - MOCK_METHOD(const Network::Address::InstanceConstSharedPtr&, sourceAddress, (), (const)); - - std::shared_ptr info_{new NiceMock()}; - std::function initialize_callback_; - Network::Address::InstanceConstSharedPtr source_address_; -}; - -// Note that we could template the two implementations below, but to avoid having to define the -// ctor/dtor (which is fairly expensive for mocks) in the header file we duplicate the code instead. - -// Use this when interaction with a real PrioritySet is needed, e.g. when update callbacks -// needs to be triggered. -class MockClusterRealPrioritySet : public MockCluster { -public: - MockClusterRealPrioritySet(); - ~MockClusterRealPrioritySet() override; - - // Upstream::Cluster - PrioritySetImpl& prioritySet() override { return priority_set_; } - const PrioritySet& prioritySet() const override { return priority_set_; } - - PrioritySetImpl priority_set_; -}; - -// Use this for additional convenience methods provided by MockPrioritySet. -class MockClusterMockPrioritySet : public MockCluster { -public: - MockClusterMockPrioritySet(); - ~MockClusterMockPrioritySet() override; - - // Upstream::Cluster - MockPrioritySet& prioritySet() override { return priority_set_; } - const PrioritySet& prioritySet() const override { return priority_set_; } - - NiceMock priority_set_; -}; - -class MockLoadBalancer : public LoadBalancer { -public: - MockLoadBalancer(); - ~MockLoadBalancer() override; - - // Upstream::LoadBalancer - MOCK_METHOD(HostConstSharedPtr, chooseHost, (LoadBalancerContext * context)); - - std::shared_ptr host_{new MockHost()}; -}; - -class MockThreadAwareLoadBalancer : public ThreadAwareLoadBalancer { -public: - MockThreadAwareLoadBalancer(); - ~MockThreadAwareLoadBalancer() override; - - // Upstream::ThreadAwareLoadBalancer - MOCK_METHOD(LoadBalancerFactorySharedPtr, factory, ()); - MOCK_METHOD(void, initialize, ()); -}; - -class MockThreadLocalCluster : public ThreadLocalCluster { -public: - MockThreadLocalCluster(); - ~MockThreadLocalCluster() override; - - // Upstream::ThreadLocalCluster - MOCK_METHOD(const PrioritySet&, prioritySet, ()); - MOCK_METHOD(ClusterInfoConstSharedPtr, info, ()); - MOCK_METHOD(LoadBalancer&, loadBalancer, ()); - - NiceMock cluster_; - NiceMock lb_; -}; - -class MockClusterManagerFactory : public ClusterManagerFactory { -public: - MockClusterManagerFactory(); - ~MockClusterManagerFactory() override; - - Secret::MockSecretManager& secretManager() override { return secret_manager_; }; - - MOCK_METHOD(ClusterManagerPtr, clusterManagerFromProto, - (const envoy::config::bootstrap::v3::Bootstrap& bootstrap)); - - MOCK_METHOD(Http::ConnectionPool::InstancePtr, allocateConnPool, - (Event::Dispatcher & dispatcher, HostConstSharedPtr host, ResourcePriority priority, - Http::Protocol protocol, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options)); - - MOCK_METHOD(Tcp::ConnectionPool::InstancePtr, allocateTcpConnPool, - (Event::Dispatcher & dispatcher, HostConstSharedPtr host, ResourcePriority priority, - const Network::ConnectionSocket::OptionsSharedPtr& options, - Network::TransportSocketOptionsSharedPtr)); - - MOCK_METHOD((std::pair), clusterFromProto, - (const envoy::config::cluster::v3::Cluster& cluster, ClusterManager& cm, - Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api)); - - MOCK_METHOD(CdsApiPtr, createCds, - (const envoy::config::core::v3::ConfigSource& cds_config, ClusterManager& cm)); - -private: - NiceMock secret_manager_; -}; - -class MockClusterUpdateCallbacksHandle : public ClusterUpdateCallbacksHandle { -public: - MockClusterUpdateCallbacksHandle(); - ~MockClusterUpdateCallbacksHandle() override; -}; - -class MockClusterManager : public ClusterManager { -public: - explicit MockClusterManager(TimeSource& time_source); - MockClusterManager(); - ~MockClusterManager() override; - - ClusterUpdateCallbacksHandlePtr - addThreadLocalClusterUpdateCallbacks(ClusterUpdateCallbacks& callbacks) override { - return ClusterUpdateCallbacksHandlePtr{addThreadLocalClusterUpdateCallbacks_(callbacks)}; - } - - Host::CreateConnectionData tcpConnForCluster(const std::string& cluster, - LoadBalancerContext* context) override { - MockHost::MockCreateConnectionData data = tcpConnForCluster_(cluster, context); - return {Network::ClientConnectionPtr{data.connection_}, data.host_description_}; - } - - ClusterManagerFactory& clusterManagerFactory() override { return cluster_manager_factory_; } - - // Upstream::ClusterManager - MOCK_METHOD(bool, addOrUpdateCluster, - (const envoy::config::cluster::v3::Cluster& cluster, - const std::string& version_info)); - MOCK_METHOD(void, setInitializedCb, (std::function)); - MOCK_METHOD(void, initializeSecondaryClusters, - (const envoy::config::bootstrap::v3::Bootstrap& bootstrap)); - MOCK_METHOD(ClusterInfoMap, clusters, ()); - MOCK_METHOD(ThreadLocalCluster*, get, (absl::string_view cluster)); - MOCK_METHOD(Http::ConnectionPool::Instance*, httpConnPoolForCluster, - (const std::string& cluster, ResourcePriority priority, Http::Protocol protocol, - LoadBalancerContext* context)); - MOCK_METHOD(Tcp::ConnectionPool::Instance*, tcpConnPoolForCluster, - (const std::string& cluster, ResourcePriority priority, - LoadBalancerContext* context)); - MOCK_METHOD(MockHost::MockCreateConnectionData, tcpConnForCluster_, - (const std::string& cluster, LoadBalancerContext* context)); - MOCK_METHOD(Http::AsyncClient&, httpAsyncClientForCluster, (const std::string& cluster)); - MOCK_METHOD(bool, removeCluster, (const std::string& cluster)); - MOCK_METHOD(void, shutdown, ()); - MOCK_METHOD(const envoy::config::core::v3::BindConfig&, bindConfig, (), (const)); - MOCK_METHOD(Config::GrpcMuxSharedPtr, adsMux, ()); - MOCK_METHOD(Grpc::AsyncClientManager&, grpcAsyncClientManager, ()); - MOCK_METHOD(const std::string, versionInfo, (), (const)); - MOCK_METHOD(const absl::optional&, localClusterName, (), (const)); - MOCK_METHOD(ClusterUpdateCallbacksHandle*, addThreadLocalClusterUpdateCallbacks_, - (ClusterUpdateCallbacks & callbacks)); - MOCK_METHOD(Config::SubscriptionFactory&, subscriptionFactory, ()); - - NiceMock conn_pool_; - NiceMock async_client_; - NiceMock tcp_conn_pool_; - NiceMock thread_local_cluster_; - envoy::config::core::v3::BindConfig bind_config_; - std::shared_ptr> ads_mux_; - NiceMock async_client_manager_; - absl::optional local_cluster_name_; - NiceMock cluster_manager_factory_; - NiceMock subscription_factory_; -}; - -class MockHealthChecker : public HealthChecker { -public: - MockHealthChecker(); - ~MockHealthChecker() override; - - MOCK_METHOD(void, addHostCheckCompleteCb, (HostStatusCb callback)); - MOCK_METHOD(void, start, ()); - - void runCallbacks(Upstream::HostSharedPtr host, HealthTransition changed_state) { - for (const auto& callback : callbacks_) { - callback(host, changed_state); - } - } - - std::list callbacks_; -}; - -class MockHealthCheckEventLogger : public HealthCheckEventLogger { -public: - MOCK_METHOD(void, logEjectUnhealthy, - (envoy::data::core::v3::HealthCheckerType, const HostDescriptionConstSharedPtr&, - envoy::data::core::v3::HealthCheckFailureType)); - MOCK_METHOD(void, logAddHealthy, - (envoy::data::core::v3::HealthCheckerType, const HostDescriptionConstSharedPtr&, - bool)); - MOCK_METHOD(void, logUnhealthy, - (envoy::data::core::v3::HealthCheckerType, const HostDescriptionConstSharedPtr&, - envoy::data::core::v3::HealthCheckFailureType, bool)); - MOCK_METHOD(void, logDegraded, - (envoy::data::core::v3::HealthCheckerType, const HostDescriptionConstSharedPtr&)); - MOCK_METHOD(void, logNoLongerDegraded, - (envoy::data::core::v3::HealthCheckerType, const HostDescriptionConstSharedPtr&)); -}; - -class MockCdsApi : public CdsApi { -public: - MockCdsApi(); - ~MockCdsApi() override; - - MOCK_METHOD(void, initialize, ()); - MOCK_METHOD(void, setInitializedCb, (std::function callback)); - MOCK_METHOD(const std::string, versionInfo, (), (const)); - - std::function initialized_callback_; -}; - -class MockClusterUpdateCallbacks : public ClusterUpdateCallbacks { -public: - MockClusterUpdateCallbacks(); - ~MockClusterUpdateCallbacks() override; - - MOCK_METHOD(void, onClusterAddOrUpdate, (ThreadLocalCluster & cluster)); - MOCK_METHOD(void, onClusterRemoval, (const std::string& cluster_name)); -}; - -class MockClusterInfoFactory : public ClusterInfoFactory, Logger::Loggable { -public: - MockClusterInfoFactory(); - ~MockClusterInfoFactory() override; - - MOCK_METHOD(ClusterInfoConstSharedPtr, createClusterInfo, (const CreateClusterInfoParams&)); -}; - -class MockRetryHostPredicate : public RetryHostPredicate { -public: - MockRetryHostPredicate(); - ~MockRetryHostPredicate() override; - - MOCK_METHOD(bool, shouldSelectAnotherHost, (const Host& candidate_host)); - MOCK_METHOD(void, onHostAttempted, (HostDescriptionConstSharedPtr)); -}; - -class TestRetryHostPredicateFactory : public RetryHostPredicateFactory { -public: - RetryHostPredicateSharedPtr createHostPredicate(const Protobuf::Message&, uint32_t) override { - return std::make_shared>(); - } - - std::string name() const override { return "envoy.test_host_predicate"; } - ProtobufTypes::MessagePtr createEmptyConfigProto() override { - // Using Struct instead of a custom per-filter empty config proto - // This is only allowed in tests. - return ProtobufTypes::MessagePtr{new Envoy::ProtobufWkt::Struct()}; - } -}; -} // namespace Upstream -} // namespace Envoy +#include "test/mocks/upstream/priority_set.h" +#include "test/mocks/upstream/retry_host_predicate.h" +#include "test/mocks/upstream/retry_priority.h" +#include "test/mocks/upstream/retry_priority_factory.h" +#include "test/mocks/upstream/test_retry_host_predicate_factory.h" +#include "test/mocks/upstream/thread_aware_load_balancer.h" +#include "test/mocks/upstream/thread_local_cluster.h" diff --git a/test/mocks/upstream/priority_set.cc b/test/mocks/upstream/priority_set.cc new file mode 100644 index 0000000000000..31724e10ce247 --- /dev/null +++ b/test/mocks/upstream/priority_set.cc @@ -0,0 +1,54 @@ +#include "priority_set.h" + +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { + +using ::testing::_; +using ::testing::Invoke; +using ::testing::ReturnRef; + +MockPrioritySet::MockPrioritySet() { + getHostSet(0); + ON_CALL(*this, hostSetsPerPriority()).WillByDefault(ReturnRef(host_sets_)); + ON_CALL(testing::Const(*this), hostSetsPerPriority()).WillByDefault(ReturnRef(host_sets_)); + ON_CALL(*this, addMemberUpdateCb(_)) + .WillByDefault(Invoke([this](PrioritySet::MemberUpdateCb cb) -> Common::CallbackHandle* { + return member_update_cb_helper_.add(cb); + })); + ON_CALL(*this, addPriorityUpdateCb(_)) + .WillByDefault(Invoke([this](PrioritySet::PriorityUpdateCb cb) -> Common::CallbackHandle* { + return priority_update_cb_helper_.add(cb); + })); +} + +MockPrioritySet::~MockPrioritySet() = default; + +HostSet& MockPrioritySet::getHostSet(uint32_t priority) { + if (host_sets_.size() < priority + 1) { + for (size_t i = host_sets_.size(); i <= priority; ++i) { + auto host_set = new ::testing::NiceMock(i); + host_sets_.push_back(HostSetPtr{host_set}); + host_set->addMemberUpdateCb([this](uint32_t priority, const HostVector& hosts_added, + const HostVector& hosts_removed) { + runUpdateCallbacks(priority, hosts_added, hosts_removed); + }); + } + } + return *host_sets_[priority]; +} + +void MockPrioritySet::runUpdateCallbacks(uint32_t priority, const HostVector& hosts_added, + const HostVector& hosts_removed) { + member_update_cb_helper_.runCallbacks(hosts_added, hosts_removed); + priority_update_cb_helper_.runCallbacks(priority, hosts_added, hosts_removed); +} + +} // namespace Upstream + +} // namespace Envoy diff --git a/test/mocks/upstream/priority_set.h b/test/mocks/upstream/priority_set.h new file mode 100644 index 0000000000000..d4c6ee5fd82a3 --- /dev/null +++ b/test/mocks/upstream/priority_set.h @@ -0,0 +1,42 @@ +#pragma once + +#include "envoy/upstream/upstream.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "host_set.h" + +namespace Envoy { +namespace Upstream { +class MockPrioritySet : public PrioritySet { +public: + MockPrioritySet(); + ~MockPrioritySet() override; + + HostSet& getHostSet(uint32_t priority); + void runUpdateCallbacks(uint32_t priority, const HostVector& hosts_added, + const HostVector& hosts_removed); + + MOCK_METHOD(Common::CallbackHandle*, addMemberUpdateCb, (MemberUpdateCb callback), (const)); + MOCK_METHOD(Common::CallbackHandle*, addPriorityUpdateCb, (PriorityUpdateCb callback), (const)); + MOCK_METHOD(const std::vector&, hostSetsPerPriority, (), (const)); + MOCK_METHOD(std::vector&, hostSetsPerPriority, ()); + MOCK_METHOD(void, updateHosts, + (uint32_t priority, UpdateHostsParams&& update_hosts_params, + LocalityWeightsConstSharedPtr locality_weights, const HostVector& hosts_added, + const HostVector& hosts_removed, absl::optional overprovisioning_factor)); + MOCK_METHOD(void, batchHostUpdate, (BatchUpdateCb&)); + + MockHostSet* getMockHostSet(uint32_t priority) { + getHostSet(priority); // Ensure the host set exists. + return reinterpret_cast(host_sets_[priority].get()); + } + + std::vector host_sets_; + Common::CallbackManager member_update_cb_helper_; + Common::CallbackManager + priority_update_cb_helper_; +}; +} // namespace Upstream + +} // namespace Envoy diff --git a/test/mocks/upstream/retry_host_predicate.cc b/test/mocks/upstream/retry_host_predicate.cc new file mode 100644 index 0000000000000..a6233d8141c44 --- /dev/null +++ b/test/mocks/upstream/retry_host_predicate.cc @@ -0,0 +1,10 @@ +#include "retry_host_predicate.h" + +namespace Envoy { +namespace Upstream { +MockRetryHostPredicate::MockRetryHostPredicate() = default; + +MockRetryHostPredicate::~MockRetryHostPredicate() = default; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/retry_host_predicate.h b/test/mocks/upstream/retry_host_predicate.h new file mode 100644 index 0000000000000..54ffb4749ee51 --- /dev/null +++ b/test/mocks/upstream/retry_host_predicate.h @@ -0,0 +1,19 @@ +#pragma once + +#include "envoy/upstream/retry.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +class MockRetryHostPredicate : public RetryHostPredicate { +public: + MockRetryHostPredicate(); + ~MockRetryHostPredicate() override; + + MOCK_METHOD(bool, shouldSelectAnotherHost, (const Host& candidate_host)); + MOCK_METHOD(void, onHostAttempted, (HostDescriptionConstSharedPtr)); +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/retry_priority.cc b/test/mocks/upstream/retry_priority.cc new file mode 100644 index 0000000000000..9df9fe988f61f --- /dev/null +++ b/test/mocks/upstream/retry_priority.cc @@ -0,0 +1,7 @@ +#include "retry_priority.h" + +namespace Envoy { +namespace Upstream { +MockRetryPriority::~MockRetryPriority() = default; +} +} // namespace Envoy diff --git a/test/mocks/upstream/retry_priority.h b/test/mocks/upstream/retry_priority.h new file mode 100644 index 0000000000000..708bfa0fc33e5 --- /dev/null +++ b/test/mocks/upstream/retry_priority.h @@ -0,0 +1,30 @@ +#pragma once + +#include "envoy/upstream/retry.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +class MockRetryPriority : public RetryPriority { +public: + MockRetryPriority(const HealthyLoad& healthy_priority_load, + const DegradedLoad& degraded_priority_load) + : priority_load_({healthy_priority_load, degraded_priority_load}) {} + MockRetryPriority(const MockRetryPriority& other) : priority_load_(other.priority_load_) {} + ~MockRetryPriority() override; + + const HealthyAndDegradedLoad& determinePriorityLoad(const PrioritySet&, + const HealthyAndDegradedLoad&, + const PriorityMappingFunc&) override { + return priority_load_; + } + + MOCK_METHOD(void, onHostAttempted, (HostDescriptionConstSharedPtr)); + +private: + const HealthyAndDegradedLoad priority_load_; +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/retry_priority_factory.h b/test/mocks/upstream/retry_priority_factory.h new file mode 100644 index 0000000000000..158359c22a482 --- /dev/null +++ b/test/mocks/upstream/retry_priority_factory.h @@ -0,0 +1,34 @@ +#pragma once + +#include "envoy/upstream/retry.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "retry_priority.h" + +namespace Envoy { +namespace Upstream { +using ::testing::NiceMock; +class MockRetryPriorityFactory : public RetryPriorityFactory { +public: + MockRetryPriorityFactory(const MockRetryPriority& retry_priority) + : retry_priority_(retry_priority) {} + RetryPrioritySharedPtr createRetryPriority(const Protobuf::Message&, + ProtobufMessage::ValidationVisitor&, + uint32_t) override { + return std::make_shared>(retry_priority_); + } + + std::string name() const override { return "envoy.test_retry_priority"; } + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + // Using Struct instead of a custom per-filter empty config proto + // This is only allowed in tests. + return ProtobufTypes::MessagePtr{new Envoy::ProtobufWkt::Struct()}; + } + +private: + const MockRetryPriority& retry_priority_; +}; +} // namespace Upstream + +} // namespace Envoy diff --git a/test/mocks/upstream/test_retry_host_predicate_factory.h b/test/mocks/upstream/test_retry_host_predicate_factory.h new file mode 100644 index 0000000000000..b436ae01bcacd --- /dev/null +++ b/test/mocks/upstream/test_retry_host_predicate_factory.h @@ -0,0 +1,26 @@ +#pragma once + +#include "envoy/upstream/retry.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "retry_host_predicate.h" + +namespace Envoy { +namespace Upstream { +using ::testing::NiceMock; +class TestRetryHostPredicateFactory : public RetryHostPredicateFactory { +public: + RetryHostPredicateSharedPtr createHostPredicate(const Protobuf::Message&, uint32_t) override { + return std::make_shared>(); + } + + std::string name() const override { return "envoy.test_host_predicate"; } + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + // Using Struct instead of a custom per-filter empty config proto + // This is only allowed in tests. + return ProtobufTypes::MessagePtr{new Envoy::ProtobufWkt::Struct()}; + } +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/thread_aware_load_balancer.cc b/test/mocks/upstream/thread_aware_load_balancer.cc new file mode 100644 index 0000000000000..46ed300485f28 --- /dev/null +++ b/test/mocks/upstream/thread_aware_load_balancer.cc @@ -0,0 +1,10 @@ +#include "thread_aware_load_balancer.h" + +namespace Envoy { +namespace Upstream { +MockThreadAwareLoadBalancer::MockThreadAwareLoadBalancer() = default; + +MockThreadAwareLoadBalancer::~MockThreadAwareLoadBalancer() = default; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/thread_aware_load_balancer.h b/test/mocks/upstream/thread_aware_load_balancer.h new file mode 100644 index 0000000000000..49b0cea8176f9 --- /dev/null +++ b/test/mocks/upstream/thread_aware_load_balancer.h @@ -0,0 +1,20 @@ +#pragma once + +#include "envoy/upstream/load_balancer.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +class MockThreadAwareLoadBalancer : public ThreadAwareLoadBalancer { +public: + MockThreadAwareLoadBalancer(); + ~MockThreadAwareLoadBalancer() override; + + // Upstream::ThreadAwareLoadBalancer + MOCK_METHOD(LoadBalancerFactorySharedPtr, factory, ()); + MOCK_METHOD(void, initialize, ()); +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/thread_local_cluster.cc b/test/mocks/upstream/thread_local_cluster.cc new file mode 100644 index 0000000000000..0ab62164b6ffb --- /dev/null +++ b/test/mocks/upstream/thread_local_cluster.cc @@ -0,0 +1,19 @@ +#include "thread_local_cluster.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +using ::testing::Return; +using ::testing::ReturnRef; +MockThreadLocalCluster::MockThreadLocalCluster() { + ON_CALL(*this, prioritySet()).WillByDefault(ReturnRef(cluster_.priority_set_)); + ON_CALL(*this, info()).WillByDefault(Return(cluster_.info_)); + ON_CALL(*this, loadBalancer()).WillByDefault(ReturnRef(lb_)); +} + +MockThreadLocalCluster::~MockThreadLocalCluster() = default; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/thread_local_cluster.h b/test/mocks/upstream/thread_local_cluster.h new file mode 100644 index 0000000000000..34eda63df6cb8 --- /dev/null +++ b/test/mocks/upstream/thread_local_cluster.h @@ -0,0 +1,27 @@ +#pragma once + +#include "envoy/upstream/thread_local_cluster.h" + +#include "cluster_priority_set.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "load_balancer.h" + +namespace Envoy { +namespace Upstream { +using ::testing::NiceMock; +class MockThreadLocalCluster : public ThreadLocalCluster { +public: + MockThreadLocalCluster(); + ~MockThreadLocalCluster() override; + + // Upstream::ThreadLocalCluster + MOCK_METHOD(const PrioritySet&, prioritySet, ()); + MOCK_METHOD(ClusterInfoConstSharedPtr, info, ()); + MOCK_METHOD(LoadBalancer&, loadBalancer, ()); + + NiceMock cluster_; + NiceMock lb_; +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh new file mode 100755 index 0000000000000..670564628c10a --- /dev/null +++ b/test/per_file_coverage.sh @@ -0,0 +1,112 @@ +#!/bin/bash + +# directory:coverage_percent +# for existing directories with low coverage. +declare -a KNOWN_LOW_COVERAGE=( +"source/common/network:94.0" +"source/common/http/http3:50.0" +"source/common/tracing:94.9" +"source/common/protobuf:94.9" +"source/common/secret:95.2" +"source/common/singleton:95.1" +"source/common/api:92.1" +"source/common/api/posix:92.1" +"source/common/json:90.6" +"source/common/filesystem:96.1" +"source/common/filesystem/posix:93.7" +"source/common/thread_local:95.7" +"source/common/crypto:0.0" +"source/common/common/posix:94.1" +"source/common/signal:85.1" +"source/exe:93.7" +"source/extensions:96.3" +"source/extensions/common:94.4" +"source/extensions/common/crypto:91.5" +"source/extensions/common/wasm:85.4" +"source/extensions/common/wasm/v8:85.4" +"source/extensions/common/wasm/null:77.8" +"source/extensions/filters/network/sni_cluster:90.3" +"source/extensions/filters/network/sni_dynamic_forward_proxy:90.9" +"source/extensions/filters/network/dubbo_proxy:96.1" +"source/extensions/filters/network/dubbo_proxy/router:95.1" +"source/extensions/filters/network/mongo_proxy:94.0" +"source/extensions/filters/network/common:96.1" +"source/extensions/filters/network/common/redis:96.2" +"source/extensions/filters/http/dynamic_forward_proxy:92.1" +"source/extensions/filters/http/cache:80.7" +"source/extensions/filters/http/cache/simple_http_cache:84.5" +"source/extensions/filters/http/ip_tagging:91.2" +"source/extensions/filters/http/grpc_json_transcoder:93.3" +"source/extensions/filters/listener:96.0" +"source/extensions/filters/listener/tls_inspector:92.4" +"source/extensions/filters/listener/http_inspector:93.3" +"source/extensions/filters/udp:91.1" +"source/extensions/filters/udp/dns_filter:89.2" +"source/extensions/filters/common:94.7" +"source/extensions/filters/common/expr:92.2" +"source/extensions/filters/common/rbac:87.2" +"source/extensions/filters/common/fault:95.8" +"source/extensions/filters/common/lua:95.9" +"source/extensions/grpc_credentials:92.0" +"source/extensions/health_checkers:95.9" +"source/extensions/health_checkers/redis:95.9" +"source/extensions/quic_listeners:84.8" +"source/extensions/quic_listeners/quiche:84.8" +"source/extensions/stat_sinks/statsd:85.2" +"source/extensions/tracers/opencensus:91.2" +"source/extensions/tracers/xray:95.3" +"source/extensions/transport_sockets:94.9" +"source/extensions/transport_sockets/tap:95.6" +"source/extensions/transport_sockets/tls:94.2" +"source/extensions/transport_sockets/tls/private_key:76.9" +"source/server:94.7" +"source/server/config_validation:77.2" +"source/server/admin:95.6" +) + +[[ -z "${SRCDIR}" ]] && SRCDIR="${PWD}" +COVERAGE_DIR="${SRCDIR}"/generated/coverage +COVERAGE_DATA="${COVERAGE_DIR}/coverage.dat" + +FAILED=0 +DEFAULT_COVERAGE_THRESHOLD=96.6 +DIRECTORY_THRESHOLD=$DEFAULT_COVERAGE_THRESHOLD + +# Unfortunately we have a bunch of preexisting directory with low coverage. +# Set their low bar as their current coverage level. +get_coverage_target() { + DIRECTORY_THRESHOLD=$DEFAULT_COVERAGE_THRESHOLD + for FILE_PERCENT in ${KNOWN_LOW_COVERAGE[@]} + do + if [[ $FILE_PERCENT =~ "$1:" ]]; then + DIRECTORY_THRESHOLD=$(echo $FILE_PERCENT | sed 's/.*://') + return + fi + done +} + +# Make sure that for each directory with code, coverage doesn't dip +# below the default coverage threshold. +for DIRECTORY in $(find source/* -type d) +do + get_coverage_target $DIRECTORY + COVERAGE_VALUE=$(lcov -e $COVERAGE_DATA "$DIRECTORY/*" -o /dev/null | grep line | cut -d ' ' -f 4) + COVERAGE_VALUE=${COVERAGE_VALUE%?} + # If the coverage number is 'n' (no data found) there is 0% coverage. This is + # probably a directory without source code, so we skip checks. + # + # We could insist that we validate that 0% coverage directories are in a + # documented list, but instead of adding busy-work for folks adding + # non-source-containing directories, we trust reviewers to notice if there's + # absolutely no tests for a full directory. + if [[ $COVERAGE_VALUE =~ "n" ]]; then + continue; + fi; + COVERAGE_FAILED=$(echo "${COVERAGE_VALUE}<${DIRECTORY_THRESHOLD}" | bc) + if test ${COVERAGE_FAILED} -eq 1; then + echo Code coverage for ${DIRECTORY} is lower than limit of ${DIRECTORY_THRESHOLD} \(${COVERAGE_VALUE}\) + FAILED=1 + fi +done + +exit $FAILED diff --git a/test/proto/BUILD b/test/proto/BUILD index 3e058be5ec2c5..f1ab09c623494 100644 --- a/test/proto/BUILD +++ b/test/proto/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -7,6 +5,8 @@ load( "envoy_proto_library", ) +licenses(["notice"]) # Apache 2 + envoy_package() exports_files(["bookstore.proto"]) diff --git a/test/proto/bookstore.proto b/test/proto/bookstore.proto index d814cb8f2e8df..62e697e219ee8 100644 --- a/test/proto/bookstore.proto +++ b/test/proto/bookstore.proto @@ -114,12 +114,53 @@ service Bookstore { body: "nested.content" }; } + rpc EchoResponseBodyPath(google.protobuf.Empty) returns (EchoBodyRequest) { + option (google.api.http) = { + get: "/echoResponseBodyPath" + response_body: "nested.content" + }; + } rpc EchoStruct(EchoStructReqResp) returns (EchoStructReqResp) { option (google.api.http) = { post: "/echoStruct" body: "content" }; } + // To test grpc transcoding with an unknown field. + // This could happen when the grpc server is using a updated proto with a new field, + // but Envoy transcoding config is still using the old version. + rpc GetBigBook(google.protobuf.Empty) returns (OldBigBook) { + option (google.api.http) = { + get: "/bigbook" + }; + } +} + +service ServiceWithResponseBody { + rpc EchoStruct(EchoStructReqResp) returns (EchoStructReqResp) { + option (google.api.http) = { + get: "/echoStruct" + response_body: "content" + }; + } +} + +service ServiceWithInvalidRequestBodyPath { + rpc EchoStruct(EchoStructReqResp) returns (EchoStructReqResp) { + option (google.api.http) = { + get: "/echoStruct" + body: "unknown.field" + }; + } +} + +service ServiceWithInvalidResponseBodyPath { + rpc EchoStruct(EchoStructReqResp) returns (EchoStructReqResp) { + option (google.api.http) = { + get: "/echoStruct" + response_body: "unknown.field" + }; + } } // A shelf resource. @@ -248,7 +289,29 @@ message DeepNestedBody { } Nested nested = 100000000; } + message Extra { + int32 field = 1; + } Nested nested = 1000000; + Extra extra = 50; + } + message Extra { + string field = 1; } Nested nested = 1; -} \ No newline at end of file + Extra extra = 2; +} + +// gRPC server is using BigBook, but envoy transcoder filter is using +// OldBigBook with missing `field1`. +message BigBook { + string field1 = 1; + string field2 = 2; + string field3 = 3; +} + +// The BigBook message with missing `field1`. +message OldBigBook { + string field2 = 2; + string field3 = 3; +} diff --git a/test/run_envoy_bazel_coverage.sh b/test/run_envoy_bazel_coverage.sh index 825803f28f97f..7bf6cc92e36e2 100755 --- a/test/run_envoy_bazel_coverage.sh +++ b/test/run_envoy_bazel_coverage.sh @@ -4,6 +4,7 @@ set -e [[ -z "${SRCDIR}" ]] && SRCDIR="${PWD}" [[ -z "${VALIDATE_COVERAGE}" ]] && VALIDATE_COVERAGE=true +[[ -z "${FUZZ_COVERAGE}" ]] && FUZZ_COVERAGE=false echo "Starting run_envoy_bazel_coverage.sh..." echo " PWD=$(pwd)" @@ -21,40 +22,44 @@ else COVERAGE_TARGETS=//test/... fi -# Make sure //test/coverage:coverage_tests is up-to-date. -SCRIPT_DIR="$(realpath "$(dirname "$0")")" -"${SCRIPT_DIR}"/coverage/gen_build.sh ${COVERAGE_TARGETS} +if [[ "${FUZZ_COVERAGE}" == "true" ]]; then + # Filter targets to just fuzz tests. + COVERAGE_TARGETS=$(bazel query "attr("tags", "fuzz_target", ${COVERAGE_TARGETS})") + BAZEL_BUILD_OPTIONS+=" --config=fuzz-coverage --test_tag_filters=-nocoverage" +else + BAZEL_BUILD_OPTIONS+=" --config=test-coverage --test_tag_filters=-nocoverage,-fuzz_target" +fi -BAZEL_USE_LLVM_NATIVE_COVERAGE=1 GCOV=llvm-profdata bazel coverage ${BAZEL_BUILD_OPTIONS} \ - -c fastbuild --copt=-DNDEBUG --instrumentation_filter=//source/...,//include/... \ - --test_timeout=2000 --cxxopt="-DENVOY_CONFIG_COVERAGE=1" --test_output=errors \ - --test_arg="--log-path /dev/null" --test_arg="-l trace" --test_env=HEAPCHECK= \ - //test/coverage:coverage_tests +bazel coverage ${BAZEL_BUILD_OPTIONS} ${COVERAGE_TARGETS} -COVERAGE_DIR="${SRCDIR}"/generated/coverage -mkdir -p "${COVERAGE_DIR}" +# Collecting profile and testlogs +[[ -z "${ENVOY_BUILD_PROFILE}" ]] || cp -f "$(bazel info output_base)/command.profile.gz" "${ENVOY_BUILD_PROFILE}/coverage.profile.gz" || true +[[ -z "${ENVOY_BUILD_DIR}" ]] || find bazel-testlogs/ -name test.log | tar zcf "${ENVOY_BUILD_DIR}/testlogs.tar.gz" -T - -COVERAGE_IGNORE_REGEX="(/external/|pb\.(validate\.)?(h|cc)|/chromium_url/|/test/|/tmp|/tools/|/third_party/|/source/extensions/quic_listeners/quiche/)" -COVERAGE_BINARY="bazel-bin/test/coverage/coverage_tests" -COVERAGE_DATA="${COVERAGE_DIR}/coverage.dat" +COVERAGE_DIR="${SRCDIR}"/generated/coverage && [[ ${FUZZ_COVERAGE} == "true" ]] && COVERAGE_DIR="${SRCDIR}"/generated/fuzz_coverage -echo "Merging coverage data..." -llvm-profdata merge -sparse -o ${COVERAGE_DATA} $(find -L bazel-out/k8-fastbuild/testlogs/test/coverage/coverage_tests/ -name coverage.dat) +rm -rf "${COVERAGE_DIR}" +mkdir -p "${COVERAGE_DIR}" -echo "Generating report..." -llvm-cov show "${COVERAGE_BINARY}" -instr-profile="${COVERAGE_DATA}" -Xdemangler=c++filt \ - -ignore-filename-regex="${COVERAGE_IGNORE_REGEX}" -output-dir=${COVERAGE_DIR} -format=html -sed -i -e 's|>proc/self/cwd/|>|g' "${COVERAGE_DIR}/index.html" -sed -i -e 's|>bazel-out/[^/]*/bin/\([^/]*\)/[^<]*/_virtual_includes/[^/]*|>\1|g' "${COVERAGE_DIR}/index.html" +COVERAGE_DATA="${COVERAGE_DIR}/coverage.dat" +cp bazel-out/_coverage/_coverage_report.dat "${COVERAGE_DATA}" -[[ -z "${ENVOY_COVERAGE_DIR}" ]] || rsync -av "${COVERAGE_DIR}"/ "${ENVOY_COVERAGE_DIR}" +COVERAGE_VALUE=$(genhtml --prefix ${PWD} --output "${COVERAGE_DIR}" "${COVERAGE_DATA}" | tee /dev/stderr | grep lines... | cut -d ' ' -f 4) +COVERAGE_VALUE=${COVERAGE_VALUE%?} -if [ "$VALIDATE_COVERAGE" == "true" ] +if [ "${FUZZ_COVERAGE}" == "true" ] then - COVERAGE_VALUE=$(llvm-cov export "${COVERAGE_BINARY}" -instr-profile="${COVERAGE_DATA}" \ - -ignore-filename-regex="${COVERAGE_IGNORE_REGEX}" -summary-only | \ - python3 -c "import sys, json; print(json.load(sys.stdin)['data'][0]['totals']['lines']['percent'])") - COVERAGE_THRESHOLD=97.0 + [[ -z "${ENVOY_FUZZ_COVERAGE_ARTIFACT}" ]] || tar zcf "${ENVOY_FUZZ_COVERAGE_ARTIFACT}" -C ${COVERAGE_DIR} --transform 's/^\./fuzz_coverage/' . +else + [[ -z "${ENVOY_COVERAGE_ARTIFACT}" ]] || tar zcf "${ENVOY_COVERAGE_ARTIFACT}" -C ${COVERAGE_DIR} --transform 's/^\./coverage/' . +fi + +if [[ "$VALIDATE_COVERAGE" == "true" ]]; then + if [[ "${FUZZ_COVERAGE}" == "true" ]]; then + COVERAGE_THRESHOLD=27.0 + else + COVERAGE_THRESHOLD=96.5 + fi COVERAGE_FAILED=$(echo "${COVERAGE_VALUE}<${COVERAGE_THRESHOLD}" | bc) if test ${COVERAGE_FAILED} -eq 1; then echo Code coverage ${COVERAGE_VALUE} is lower than limit of ${COVERAGE_THRESHOLD} @@ -63,4 +68,19 @@ then echo Code coverage ${COVERAGE_VALUE} is good and higher than limit of ${COVERAGE_THRESHOLD} fi fi + +# We want to allow per_file_coverage to fail without exiting this script. +set +e +if [[ "$VALIDATE_COVERAGE" == "true" ]] && [[ "{FUZZ_COVERAGE}" == "false" ]]; then + echo "Checking per-extension coverage" + output=$(./test/per_file_coverage.sh) + + if [ $? -eq 1 ]; then + echo Per-extension coverage failed: + echo $output + exit 1 + fi + echo Per-extension coverage passed. +fi + echo "HTML coverage report is in ${COVERAGE_DIR}/index.html" diff --git a/test/server/BUILD b/test/server/BUILD index eb90f57bfaaac..4718ce6669fb4 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_benchmark_test", @@ -13,6 +11,8 @@ load( load("//source/extensions:all_extensions.bzl", "envoy_all_extensions") load("//bazel:repositories.bzl", "PPC_SKIP_TARGETS", "WINDOWS_SKIP_TARGETS") +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( @@ -20,10 +20,12 @@ envoy_cc_test( srcs = ["api_listener_test.cc"], deps = [ ":utility_lib", - "//source/server:api_listener_lib", - "//source/server:listener_lib", + "//source/server:listener_manager_lib", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/server:listener_component_factory_mocks", + "//test/mocks/server:worker_factory_mocks", + "//test/mocks/server:worker_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", ], @@ -56,7 +58,7 @@ envoy_cc_test( "//test/common/upstream:utility_lib", "//test/mocks:common_lib", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:environment_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", @@ -70,13 +72,15 @@ envoy_cc_test( srcs = ["connection_handler_test.cc"], deps = [ "//source/common/common:utility_lib", + "//source/common/config:utility_lib", "//source/common/network:address_lib", "//source/common/network:connection_balancer_lib", + "//source/common/network:udp_default_writer_config", "//source/common/stats:stats_lib", "//source/server:active_raw_udp_listener_config", "//source/server:connection_handler_lib", + "//test/mocks/api:api_mocks", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", "//test/test_common:network_utility_lib", "//test/test_common:threadsafe_singleton_injector_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", @@ -89,7 +93,7 @@ envoy_cc_test( srcs = ["drain_manager_impl_test.cc"], deps = [ "//source/server:drain_manager_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", ], ) @@ -130,7 +134,7 @@ envoy_cc_test( "//source/common/stats:stats_lib", "//source/server:guarddog_lib", "//test/mocks:common_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:main_mocks", "//test/mocks/stats:stats_mocks", "//test/test_common:simulated_time_system_lib", "//test/test_common:utility_lib", @@ -188,8 +192,10 @@ envoy_cc_test( "//source/common/protobuf:utility_lib", "//source/server:lds_api_lib", "//test/mocks/config:config_mocks", + "//test/mocks/init:init_mocks", "//test/mocks/protobuf:protobuf_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:listener_manager_mocks", + "//test/mocks/upstream:upstream_mocks", "//test/test_common:environment_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", @@ -204,13 +210,18 @@ envoy_cc_test_library( data = ["//test/extensions/transport_sockets/tls/test_data:certs"], deps = [ "//source/common/init:manager_lib", - "//source/server:api_listener_lib", - "//source/server:listener_lib", + "//source/server:listener_manager_lib", "//test/mocks/init:init_mocks", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:drain_manager_mocks", + "//test/mocks/server:guard_dog_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/server:listener_component_factory_mocks", + "//test/mocks/server:worker_factory_mocks", + "//test/mocks/server:worker_mocks", "//test/test_common:environment_lib", "//test/test_common:simulated_time_system_lib", + "//test/test_common:test_runtime_lib", "//test/test_common:test_time_lib", "//test/test_common:threadsafe_singleton_injector_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", @@ -222,6 +233,7 @@ envoy_cc_test_library( envoy_cc_test( name = "listener_manager_impl_test", srcs = ["listener_manager_impl_test.cc"], + tags = ["fails_on_windows"], deps = [ ":listener_manager_impl_test_lib", ":utility_lib", @@ -233,6 +245,7 @@ envoy_cc_test( "//source/common/network:utility_lib", "//source/common/protobuf", "//source/extensions/filters/listener/original_dst:config", + "//source/extensions/filters/listener/proxy_protocol:config", "//source/extensions/filters/listener/tls_inspector:config", "//source/extensions/filters/network/http_connection_manager:config", "//source/extensions/filters/network/tcp_proxy:config", @@ -262,6 +275,7 @@ envoy_cc_test( ":utility_lib", "//source/extensions/quic_listeners/quiche:active_quic_listener_config_lib", "//source/extensions/quic_listeners/quiche:quic_transport_socket_factory_lib", + "//source/extensions/quic_listeners/quiche:udp_gso_batch_writer_config_lib", "//source/extensions/transport_sockets/raw_buffer:config", "//test/test_common:threadsafe_singleton_injector_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", @@ -286,11 +300,11 @@ envoy_cc_test( "//source/extensions/transport_sockets/raw_buffer:config", "//source/extensions/transport_sockets/tls:config", "//source/extensions/transport_sockets/tls:ssl_socket_lib", - "//source/server:api_listener_lib", "//source/server:filter_chain_manager_lib", - "//source/server:listener_lib", + "//source/server:listener_manager_lib", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:drain_manager_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:environment_lib", "//test/test_common:registry_lib", "//test/test_common:simulated_time_system_lib", @@ -306,13 +320,11 @@ envoy_cc_fuzz_test( corpus = "server_corpus", deps = [ "//source/common/thread_local:thread_local_lib", - "//source/server:proto_descriptors_lib", "//source/server:server_lib", "//test/integration:integration_lib", - "//test/mocks/server:server_mocks", - "//test/mocks/stats:stats_mocks", + "//test/mocks/server:options_mocks", + "//test/mocks/server:hot_restart_mocks", "//test/test_common:environment_lib", - "//test/test_common:test_time_lib", ] + select({ "//bazel:windows_x86_64": envoy_all_extensions(WINDOWS_SKIP_TARGETS), "//bazel:linux_ppc": envoy_all_extensions(PPC_SKIP_TARGETS), @@ -344,13 +356,11 @@ envoy_cc_test( ":static_validation_test_data", ], deps = [ - "//source/common/common:version_lib", + "//source/common/version:version_lib", "//source/extensions/access_loggers/file:config", "//source/extensions/filters/http/buffer:config", - "//source/extensions/filters/http/dynamo:config", "//source/extensions/filters/http/grpc_http1_bridge:config", "//source/extensions/filters/http/health_check:config", - "//source/extensions/filters/http/ratelimit:config", "//source/extensions/filters/http/router:config", "//source/extensions/filters/network/http_connection_manager:config", "//source/extensions/filters/network/redis_proxy:config", @@ -358,9 +368,14 @@ envoy_cc_test( "//source/extensions/tracers/zipkin:config", "//source/server:process_context_lib", "//source/server:server_lib", + "//test/common/config:dummy_config_proto_cc_proto", "//test/common/stats:stat_test_utility_lib", "//test/integration:integration_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:bootstrap_extension_factory_mocks", + "//test/mocks/server:hot_restart_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/server:options_mocks", + "//test/mocks/server:overload_manager_mocks", "//test/mocks/stats:stats_mocks", "//test/test_common:registry_lib", "//test/test_common:simulated_time_system_lib", @@ -400,7 +415,9 @@ envoy_cc_test( "//source/common/event:dispatcher_lib", "//source/server:worker_lib", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:guard_dog_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/server:overload_manager_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/test_common:utility_lib", ], @@ -418,7 +435,7 @@ envoy_cc_benchmark_binary( "//source/server:filter_chain_manager_lib", "//test/test_common:environment_lib", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", # tranport socket config registration "//source/extensions/transport_sockets/tls:config", ], diff --git a/test/server/http/BUILD b/test/server/admin/BUILD similarity index 63% rename from test/server/http/BUILD rename to test/server/admin/BUILD index 51d73c7346889..b2af4c15f6f0d 100644 --- a/test/server/http/BUILD +++ b/test/server/admin/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_library( @@ -14,9 +14,9 @@ envoy_cc_test_library( srcs = ["admin_instance.cc"], hdrs = ["admin_instance.h"], deps = [ - "//source/server/http:admin_lib", + "//source/server/admin:admin_lib", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", ], @@ -31,13 +31,11 @@ envoy_cc_test( "//include/envoy/runtime:runtime_interface", "//source/common/http:message_lib", "//source/common/json:json_loader_lib", - "//source/common/profiler:profiler_lib", "//source/common/protobuf", "//source/common/protobuf:utility_lib", "//source/common/stats:symbol_table_creator_lib", "//source/common/stats:thread_local_store_lib", - "//source/extensions/transport_sockets/tls:context_config_lib", - "//source/server/http:admin_lib", + "//source/server/admin:admin_lib", "//test/mocks/runtime:runtime_mocks", "//test/mocks/server:server_mocks", "//test/test_common:environment_lib", @@ -55,8 +53,8 @@ envoy_cc_test( name = "admin_filter_test", srcs = ["admin_filter_test.cc"], deps = [ - "//source/server/http:admin_filter_lib", - "//test/mocks/server:server_mocks", + "//source/server/admin:admin_filter_lib", + "//test/mocks/server:instance_mocks", "//test/test_common:environment_lib", ], ) @@ -67,17 +65,60 @@ envoy_cc_test( deps = [ ":admin_instance_lib", "//source/common/stats:thread_local_store_lib", - "//source/server/http:stats_handler_lib", + "//source/server/admin:stats_handler_lib", "//test/test_common:logging_lib", "//test/test_common:utility_lib", ], ) +envoy_cc_test( + name = "runtime_handler_test", + srcs = ["runtime_handler_test.cc"], + deps = [":admin_instance_lib"], +) + +envoy_cc_test( + name = "prometheus_stats_test", + srcs = ["prometheus_stats_test.cc"], + deps = [ + "//source/server/admin:prometheus_stats_lib", + "//test/test_common:utility_lib", + ], +) + +envoy_cc_test( + name = "logs_handler_test", + srcs = ["logs_handler_test.cc"], + deps = [ + ":admin_instance_lib", + ], +) + +envoy_cc_test( + name = "profiling_handler_test", + srcs = ["profiling_handler_test.cc"], + deps = [ + ":admin_instance_lib", + "//test/test_common:logging_lib", + ], +) + +envoy_cc_test( + name = "server_info_handler_test", + srcs = ["server_info_handler_test.cc"], + deps = [ + ":admin_instance_lib", + "//source/extensions/transport_sockets/tls:context_config_lib", + "//test/test_common:logging_lib", + "@envoy_api//envoy/admin/v3:pkg_cc_proto", + ], +) + envoy_cc_test( name = "config_tracker_impl_test", srcs = ["config_tracker_impl_test.cc"], deps = [ - "//source/server/http:config_tracker_lib", + "//source/server/admin:config_tracker_lib", "//test/mocks:common_lib", ], ) diff --git a/test/server/http/admin_filter_test.cc b/test/server/admin/admin_filter_test.cc similarity index 91% rename from test/server/http/admin_filter_test.cc rename to test/server/admin/admin_filter_test.cc index 7dad3e63d3f4e..ee51cdecc1698 100644 --- a/test/server/http/admin_filter_test.cc +++ b/test/server/admin/admin_filter_test.cc @@ -1,6 +1,6 @@ -#include "server/http/admin_filter.h" +#include "server/admin/admin_filter.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/test_common/environment.h" #include "gmock/gmock.h" @@ -45,6 +45,8 @@ TEST_P(AdminFilterTest, HeaderOnly) { EXPECT_CALL(callbacks_, encodeHeaders_(_, false)); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_.decodeHeaders(request_headers_, true)); + ASSERT_TRUE(callbacks_.stream_info_.responseCodeDetails().has_value()); + EXPECT_EQ(callbacks_.stream_info_.responseCodeDetails().value(), "admin_filter_response"); } TEST_P(AdminFilterTest, Body) { diff --git a/test/server/http/admin_instance.cc b/test/server/admin/admin_instance.cc similarity index 98% rename from test/server/http/admin_instance.cc rename to test/server/admin/admin_instance.cc index 279e5b1d590c0..a9126957c7c2b 100644 --- a/test/server/http/admin_instance.cc +++ b/test/server/admin/admin_instance.cc @@ -1,4 +1,4 @@ -#include "test/server/http/admin_instance.h" +#include "test/server/admin/admin_instance.h" namespace Envoy { namespace Server { diff --git a/test/server/http/admin_instance.h b/test/server/admin/admin_instance.h similarity index 94% rename from test/server/http/admin_instance.h rename to test/server/admin/admin_instance.h index aaec8f7a98b8c..7092d973c00e1 100644 --- a/test/server/http/admin_instance.h +++ b/test/server/admin/admin_instance.h @@ -1,10 +1,10 @@ #pragma once -#include "server/http/admin.h" +#include "server/admin/admin.h" #include "test/mocks/http/mocks.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/test_common/environment.h" #include "test/test_common/network_utility.h" diff --git a/test/server/http/admin_test.cc b/test/server/admin/admin_test.cc similarity index 59% rename from test/server/http/admin_test.cc rename to test/server/admin/admin_test.cc index 6c3dcf15e3f90..478354eea7071 100644 --- a/test/server/http/admin_test.cc +++ b/test/server/admin/admin_test.cc @@ -1,27 +1,26 @@ #include #include +#include #include -#include +#include #include "envoy/admin/v3/clusters.pb.h" #include "envoy/admin/v3/config_dump.pb.h" -#include "envoy/admin/v3/memory.pb.h" #include "envoy/admin/v3/server_info.pb.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" #include "envoy/json/json_object.h" -#include "envoy/runtime/runtime.h" +#include "envoy/upstream/outlier_detection.h" +#include "envoy/upstream/upstream.h" #include "common/http/message_impl.h" #include "common/json/json_loader.h" -#include "common/profiler/profiler.h" #include "common/protobuf/protobuf.h" #include "common/protobuf/utility.h" +#include "common/upstream/upstream_impl.h" -#include "extensions/transport_sockets/tls/context_config_impl.h" - -#include "test/server/http/admin_instance.h" +#include "test/server/admin/admin_instance.h" #include "test/test_common/logging.h" #include "test/test_common/printers.h" #include "test/test_common/utility.h" @@ -30,13 +29,9 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" -using testing::AllOf; -using testing::Ge; using testing::HasSubstr; using testing::Invoke; using testing::NiceMock; -using testing::Property; -using testing::Ref; using testing::Return; using testing::ReturnPointee; using testing::ReturnRef; @@ -48,63 +43,9 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, AdminInstanceTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); -TEST_P(AdminInstanceTest, AdminCpuProfiler) { - Buffer::OwnedImpl data; - Http::ResponseHeaderMapImpl header_map; - - // Can only get code coverage of AdminImpl::handlerCpuProfiler stopProfiler with - // a real profiler linked in (successful call to startProfiler). -#ifdef PROFILER_AVAILABLE - EXPECT_EQ(Http::Code::OK, postCallback("/cpuprofiler?enable=y", header_map, data)); - EXPECT_TRUE(Profiler::Cpu::profilerEnabled()); -#else - EXPECT_EQ(Http::Code::InternalServerError, - postCallback("/cpuprofiler?enable=y", header_map, data)); - EXPECT_FALSE(Profiler::Cpu::profilerEnabled()); -#endif - - EXPECT_EQ(Http::Code::OK, postCallback("/cpuprofiler?enable=n", header_map, data)); - EXPECT_FALSE(Profiler::Cpu::profilerEnabled()); -} - -TEST_P(AdminInstanceTest, AdminHeapProfilerOnRepeatedRequest) { - Buffer::OwnedImpl data; - Http::ResponseHeaderMapImpl header_map; - auto repeatResultCode = Http::Code::BadRequest; -#ifndef PROFILER_AVAILABLE - repeatResultCode = Http::Code::NotImplemented; -#endif - - postCallback("/heapprofiler?enable=y", header_map, data); - EXPECT_EQ(repeatResultCode, postCallback("/heapprofiler?enable=y", header_map, data)); - - postCallback("/heapprofiler?enable=n", header_map, data); - EXPECT_EQ(repeatResultCode, postCallback("/heapprofiler?enable=n", header_map, data)); -} - -TEST_P(AdminInstanceTest, AdminHeapProfiler) { - Buffer::OwnedImpl data; - Http::ResponseHeaderMapImpl header_map; - - // The below flow need to begin with the profiler not running - Profiler::Heap::stopProfiler(); - -#ifdef PROFILER_AVAILABLE - EXPECT_EQ(Http::Code::OK, postCallback("/heapprofiler?enable=y", header_map, data)); - EXPECT_TRUE(Profiler::Heap::isProfilerStarted()); - EXPECT_EQ(Http::Code::OK, postCallback("/heapprofiler?enable=n", header_map, data)); -#else - EXPECT_EQ(Http::Code::NotImplemented, postCallback("/heapprofiler?enable=y", header_map, data)); - EXPECT_FALSE(Profiler::Heap::isProfilerStarted()); - EXPECT_EQ(Http::Code::NotImplemented, postCallback("/heapprofiler?enable=n", header_map, data)); -#endif - - EXPECT_FALSE(Profiler::Heap::isProfilerStarted()); -} - TEST_P(AdminInstanceTest, MutatesErrorWithGet) { Buffer::OwnedImpl data; - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; const std::string path("/healthcheck/fail"); // TODO(jmarantz): the call to getCallback should be made to fail, but as an interim we will // just issue a warning, so that scripts using curl GET commands to mutate state can be fixed. @@ -113,20 +54,6 @@ TEST_P(AdminInstanceTest, MutatesErrorWithGet) { EXPECT_EQ(Http::Code::MethodNotAllowed, getCallback(path, header_map, data))); } -TEST_P(AdminInstanceTest, AdminBadProfiler) { - Buffer::OwnedImpl data; - AdminImpl admin_bad_profile_path(TestEnvironment::temporaryPath("some/unlikely/bad/path.prof"), - server_); - Http::ResponseHeaderMapImpl header_map; - const absl::string_view post = Http::Headers::get().MethodValues.Post; - request_headers_.setMethod(post); - admin_filter_.decodeHeaders(request_headers_, false); - EXPECT_NO_LOGS(EXPECT_EQ(Http::Code::InternalServerError, - admin_bad_profile_path.runCallback("/cpuprofiler?enable=y", header_map, - data, admin_filter_))); - EXPECT_FALSE(Profiler::Cpu::profilerEnabled()); -} - TEST_P(AdminInstanceTest, WriteAddressToFile) { std::ifstream address_file(address_out_path_); std::string address_from_file; @@ -151,7 +78,7 @@ TEST_P(AdminInstanceTest, CustomHandler) { // Test removable handler. EXPECT_NO_LOGS(EXPECT_TRUE(admin_.addHandler("/foo/bar", "hello", callback, true, false))); - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; Buffer::OwnedImpl response; EXPECT_EQ(Http::Code::Accepted, getCallback("/foo/bar", header_map, response)); @@ -199,29 +126,29 @@ TEST_P(AdminInstanceTest, EscapeHelpTextWithPunctuation) { const std::string planets = "jupiter>saturn>mars"; EXPECT_TRUE(admin_.addHandler("/planets", planets, callback, true, false)); - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; Buffer::OwnedImpl response; EXPECT_EQ(Http::Code::OK, getCallback("/", header_map, response)); const Http::HeaderString& content_type = header_map.ContentType()->value(); EXPECT_THAT(std::string(content_type.getStringView()), testing::HasSubstr("text/html")); - EXPECT_EQ(-1, response.search(planets.data(), planets.size(), 0)); + EXPECT_EQ(-1, response.search(planets.data(), planets.size(), 0, 0)); const std::string escaped_planets = "jupiter>saturn>mars"; - EXPECT_NE(-1, response.search(escaped_planets.data(), escaped_planets.size(), 0)); + EXPECT_NE(-1, response.search(escaped_planets.data(), escaped_planets.size(), 0, 0)); } TEST_P(AdminInstanceTest, HelpUsesFormForMutations) { - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; Buffer::OwnedImpl response; EXPECT_EQ(Http::Code::OK, getCallback("/", header_map, response)); const std::string logging_action = "
(); msg->set_value("bar"); @@ -287,19 +214,296 @@ TEST_P(AdminInstanceTest, ConfigDumpMaintainsOrder) { // Run it multiple times and validate that order is preserved. for (size_t i = 0; i < 5; i++) { Buffer::OwnedImpl response; - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; EXPECT_EQ(Http::Code::OK, getCallback("/config_dump", header_map, response)); const std::string output = response.toString(); EXPECT_EQ(expected_json, output); } } +// helper method for adding host's info +void addHostInfo(NiceMock& host, const std::string& hostname, + const std::string& address_url, envoy::config::core::v3::Locality& locality, + const std::string& hostname_for_healthcheck, + const std::string& healthcheck_address_url, int weight, int priority) { + ON_CALL(host, locality()).WillByDefault(ReturnRef(locality)); + + Network::Address::InstanceConstSharedPtr address = Network::Utility::resolveUrl(address_url); + ON_CALL(host, address()).WillByDefault(Return(address)); + ON_CALL(host, hostname()).WillByDefault(ReturnRef(hostname)); + + ON_CALL(host, hostnameForHealthChecks()).WillByDefault(ReturnRef(hostname_for_healthcheck)); + Network::Address::InstanceConstSharedPtr healthcheck_address = + Network::Utility::resolveUrl(healthcheck_address_url); + ON_CALL(host, healthCheckAddress()).WillByDefault(Return(healthcheck_address)); + + auto metadata = std::make_shared(); + ON_CALL(host, metadata()).WillByDefault(Return(metadata)); + + ON_CALL(host, health()).WillByDefault(Return(Upstream::Host::Health::Healthy)); + + ON_CALL(host, weight()).WillByDefault(Return(weight)); + ON_CALL(host, priority()).WillByDefault(Return(priority)); +} + +// Test that using ?include_eds parameter adds EDS to the config dump. +TEST_P(AdminInstanceTest, ConfigDumpWithEndpoint) { + Upstream::ClusterManager::ClusterInfoMap cluster_map; + ON_CALL(server_.cluster_manager_, clusters()).WillByDefault(ReturnPointee(&cluster_map)); + + NiceMock cluster; + cluster_map.emplace(cluster.info_->name_, cluster); + + ON_CALL(*cluster.info_, addedViaApi()).WillByDefault(Return(false)); + + Upstream::MockHostSet* host_set = cluster.priority_set_.getMockHostSet(0); + auto host = std::make_shared>(); + host_set->hosts_.emplace_back(host); + + envoy::config::core::v3::Locality locality; + const std::string hostname_for_healthcheck = "test_hostname_healthcheck"; + const std::string hostname = "foo.com"; + + addHostInfo(*host, hostname, "tcp://1.2.3.4:80", locality, hostname_for_healthcheck, + "tcp://1.2.3.5:90", 5, 6); + + Buffer::OwnedImpl response; + Http::TestResponseHeaderMapImpl header_map; + EXPECT_EQ(Http::Code::OK, getCallback("/config_dump?include_eds", header_map, response)); + std::string output = response.toString(); + const std::string expected_json = R"EOF({ + "configs": [ + { + "@type": "type.googleapis.com/envoy.admin.v3.EndpointsConfigDump", + "static_endpoint_configs": [ + { + "endpoint_config": { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "cluster_name": "fake_cluster", + "endpoints": [ + { + "locality": {}, + "lb_endpoints": [ + { + "endpoint": { + "address": { + "socket_address": { + "address": "1.2.3.4", + "port_value": 80 + } + }, + "health_check_config": { + "port_value": 90, + "hostname": "test_hostname_healthcheck" + }, + "hostname": "foo.com" + }, + "health_status": "HEALTHY", + "metadata": {}, + "load_balancing_weight": 5 + } + ], + "priority": 6 + } + ], + "policy": { + "overprovisioning_factor": 140 + } + } + } + ] + } + ] +} +)EOF"; + EXPECT_EQ(expected_json, output); +} + +// Test EDS config dump while multiple localities and priorities exist +TEST_P(AdminInstanceTest, ConfigDumpWithLocalityEndpoint) { + Upstream::ClusterManager::ClusterInfoMap cluster_map; + ON_CALL(server_.cluster_manager_, clusters()).WillByDefault(ReturnPointee(&cluster_map)); + + NiceMock cluster; + cluster_map.emplace(cluster.info_->name_, cluster); + + ON_CALL(*cluster.info_, addedViaApi()).WillByDefault(Return(false)); + + Upstream::MockHostSet* host_set_1 = cluster.priority_set_.getMockHostSet(0); + auto host_1 = std::make_shared>(); + host_set_1->hosts_.emplace_back(host_1); + + envoy::config::core::v3::Locality locality_1; + locality_1.set_region("oceania"); + locality_1.set_zone("hello"); + locality_1.set_sub_zone("world"); + + const std::string hostname_for_healthcheck = "test_hostname_healthcheck"; + const std::string hostname_1 = "foo.com"; + + addHostInfo(*host_1, hostname_1, "tcp://1.2.3.4:80", locality_1, hostname_for_healthcheck, + "tcp://1.2.3.5:90", 5, 6); + + auto host_2 = std::make_shared>(); + host_set_1->hosts_.emplace_back(host_2); + const std::string empty_hostname_for_healthcheck = ""; + const std::string hostname_2 = "boo.com"; + + addHostInfo(*host_2, hostname_2, "tcp://1.2.3.7:8", locality_1, empty_hostname_for_healthcheck, + "tcp://1.2.3.7:8", 3, 6); + + envoy::config::core::v3::Locality locality_2; + + auto host_3 = std::make_shared>(); + host_set_1->hosts_.emplace_back(host_3); + const std::string hostname_3 = "coo.com"; + + addHostInfo(*host_3, hostname_3, "tcp://1.2.3.8:8", locality_2, empty_hostname_for_healthcheck, + "tcp://1.2.3.8:8", 3, 4); + + std::vector locality_hosts = { + {Upstream::HostSharedPtr(host_1), Upstream::HostSharedPtr(host_2)}, + {Upstream::HostSharedPtr(host_3)}}; + auto hosts_per_locality = new Upstream::HostsPerLocalityImpl(std::move(locality_hosts), false); + + Upstream::LocalityWeightsConstSharedPtr locality_weights{new Upstream::LocalityWeights{1, 3}}; + ON_CALL(*host_set_1, hostsPerLocality()).WillByDefault(ReturnRef(*hosts_per_locality)); + ON_CALL(*host_set_1, localityWeights()).WillByDefault(Return(locality_weights)); + + Upstream::MockHostSet* host_set_2 = cluster.priority_set_.getMockHostSet(1); + auto host_4 = std::make_shared>(); + host_set_2->hosts_.emplace_back(host_4); + const std::string hostname_4 = "doo.com"; + + addHostInfo(*host_4, hostname_4, "tcp://1.2.3.9:8", locality_1, empty_hostname_for_healthcheck, + "tcp://1.2.3.9:8", 3, 2); + + Buffer::OwnedImpl response; + Http::TestResponseHeaderMapImpl header_map; + EXPECT_EQ(Http::Code::OK, getCallback("/config_dump?include_eds", header_map, response)); + std::string output = response.toString(); + const std::string expected_json = R"EOF({ + "configs": [ + { + "@type": "type.googleapis.com/envoy.admin.v3.EndpointsConfigDump", + "static_endpoint_configs": [ + { + "endpoint_config": { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "cluster_name": "fake_cluster", + "endpoints": [ + { + "locality": { + "region": "oceania", + "zone": "hello", + "sub_zone": "world" + }, + "lb_endpoints": [ + { + "endpoint": { + "address": { + "socket_address": { + "address": "1.2.3.4", + "port_value": 80 + } + }, + "health_check_config": { + "port_value": 90, + "hostname": "test_hostname_healthcheck" + }, + "hostname": "foo.com" + }, + "health_status": "HEALTHY", + "metadata": {}, + "load_balancing_weight": 5 + }, + { + "endpoint": { + "address": { + "socket_address": { + "address": "1.2.3.7", + "port_value": 8 + } + }, + "health_check_config": {}, + "hostname": "boo.com" + }, + "health_status": "HEALTHY", + "metadata": {}, + "load_balancing_weight": 3 + } + ], + "load_balancing_weight": 1, + "priority": 6 + }, + { + "locality": {}, + "lb_endpoints": [ + { + "endpoint": { + "address": { + "socket_address": { + "address": "1.2.3.8", + "port_value": 8 + } + }, + "health_check_config": {}, + "hostname": "coo.com" + }, + "health_status": "HEALTHY", + "metadata": {}, + "load_balancing_weight": 3 + } + ], + "load_balancing_weight": 3, + "priority": 4 + }, + { + "locality": { + "region": "oceania", + "zone": "hello", + "sub_zone": "world" + }, + "lb_endpoints": [ + { + "endpoint": { + "address": { + "socket_address": { + "address": "1.2.3.9", + "port_value": 8 + } + }, + "health_check_config": {}, + "hostname": "doo.com" + }, + "health_status": "HEALTHY", + "metadata": {}, + "load_balancing_weight": 3 + } + ], + "priority": 2 + } + ], + "policy": { + "overprovisioning_factor": 140 + } + } + } + ] + } + ] +} +)EOF"; + EXPECT_EQ(expected_json, output); + delete (hosts_per_locality); +} + // Test that using the resource query parameter filters the config dump. // We add both static and dynamic listener config to the dump, but expect only // dynamic in the JSON with ?resource=dynamic_listeners. TEST_P(AdminInstanceTest, ConfigDumpFiltersByResource) { Buffer::OwnedImpl response; - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; auto listeners = admin_.getConfigTracker().add("listeners", [] { auto msg = std::make_unique(); auto dyn_listener = msg->add_dynamic_listeners(); @@ -325,12 +529,99 @@ TEST_P(AdminInstanceTest, ConfigDumpFiltersByResource) { EXPECT_EQ(expected_json, output); } +// Test that using the resource query parameter filters the config dump including EDS. +// We add both static and dynamic endpoint config to the dump, but expect only +// dynamic in the JSON with ?resource=dynamic_endpoint_configs. +TEST_P(AdminInstanceTest, ConfigDumpWithEndpointFiltersByResource) { + Upstream::ClusterManager::ClusterInfoMap cluster_map; + ON_CALL(server_.cluster_manager_, clusters()).WillByDefault(ReturnPointee(&cluster_map)); + + NiceMock cluster_1; + cluster_map.emplace(cluster_1.info_->name_, cluster_1); + + ON_CALL(*cluster_1.info_, addedViaApi()).WillByDefault(Return(true)); + + Upstream::MockHostSet* host_set = cluster_1.priority_set_.getMockHostSet(0); + auto host_1 = std::make_shared>(); + host_set->hosts_.emplace_back(host_1); + + envoy::config::core::v3::Locality locality; + const std::string hostname_for_healthcheck = "test_hostname_healthcheck"; + const std::string hostname_1 = "foo.com"; + + addHostInfo(*host_1, hostname_1, "tcp://1.2.3.4:80", locality, hostname_for_healthcheck, + "tcp://1.2.3.5:90", 5, 6); + + NiceMock cluster_2; + cluster_2.info_->name_ = "fake_cluster_2"; + cluster_map.emplace(cluster_2.info_->name_, cluster_2); + + ON_CALL(*cluster_2.info_, addedViaApi()).WillByDefault(Return(false)); + + Upstream::MockHostSet* host_set_2 = cluster_2.priority_set_.getMockHostSet(0); + auto host_2 = std::make_shared>(); + host_set_2->hosts_.emplace_back(host_2); + const std::string hostname_2 = "boo.com"; + + addHostInfo(*host_2, hostname_2, "tcp://1.2.3.5:8", locality, hostname_for_healthcheck, + "tcp://1.2.3.4:1", 3, 4); + + Buffer::OwnedImpl response; + Http::TestResponseHeaderMapImpl header_map; + EXPECT_EQ(Http::Code::OK, + getCallback("/config_dump?include_eds&resource=dynamic_endpoint_configs", header_map, + response)); + std::string output = response.toString(); + const std::string expected_json = R"EOF({ + "configs": [ + { + "@type": "type.googleapis.com/envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig", + "endpoint_config": { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "cluster_name": "fake_cluster", + "endpoints": [ + { + "locality": {}, + "lb_endpoints": [ + { + "endpoint": { + "address": { + "socket_address": { + "address": "1.2.3.4", + "port_value": 80 + } + }, + "health_check_config": { + "port_value": 90, + "hostname": "test_hostname_healthcheck" + }, + "hostname": "foo.com" + }, + "health_status": "HEALTHY", + "metadata": {}, + "load_balancing_weight": 5 + } + ], + "priority": 6 + } + ], + "policy": { + "overprovisioning_factor": 140 + } + } + } + ] +} +)EOF"; + EXPECT_EQ(expected_json, output); +} + // Test that using the mask query parameter filters the config dump. // We add both static and dynamic listener config to the dump, but expect only // dynamic in the JSON with ?mask=dynamic_listeners. TEST_P(AdminInstanceTest, ConfigDumpFiltersByMask) { Buffer::OwnedImpl response; - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; auto listeners = admin_.getConfigTracker().add("listeners", [] { auto msg = std::make_unique(); auto dyn_listener = msg->add_dynamic_listeners(); @@ -383,7 +674,7 @@ ProtobufTypes::MessagePtr testDumpClustersConfig() { // only the desired resource and the fields specified in the mask. TEST_P(AdminInstanceTest, ConfigDumpFiltersByResourceAndMask) { Buffer::OwnedImpl response; - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; auto clusters = admin_.getConfigTracker().add("clusters", testDumpClustersConfig); const std::string expected_json = R"EOF({ "configs": [ @@ -412,7 +703,7 @@ TEST_P(AdminInstanceTest, ConfigDumpFiltersByResourceAndMask) { // of the config dump and the fields present in the mask query parameter. TEST_P(AdminInstanceTest, ConfigDumpNonExistentMask) { Buffer::OwnedImpl response; - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; auto clusters = admin_.getConfigTracker().add("clusters", testDumpClustersConfig); const std::string expected_json = R"EOF({ "configs": [ @@ -432,7 +723,7 @@ TEST_P(AdminInstanceTest, ConfigDumpNonExistentMask) { // resource query parameter. TEST_P(AdminInstanceTest, ConfigDumpNonExistentResource) { Buffer::OwnedImpl response; - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; auto listeners = admin_.getConfigTracker().add("listeners", [] { auto msg = std::make_unique(); msg->set_value("listeners_config"); @@ -445,7 +736,7 @@ TEST_P(AdminInstanceTest, ConfigDumpNonExistentResource) { // repeated field. TEST_P(AdminInstanceTest, ConfigDumpResourceNotRepeated) { Buffer::OwnedImpl response; - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; auto clusters = admin_.getConfigTracker().add("clusters", [] { auto msg = std::make_unique(); msg->set_version_info("foo"); @@ -455,167 +746,6 @@ TEST_P(AdminInstanceTest, ConfigDumpResourceNotRepeated) { getCallback("/config_dump?resource=version_info", header_map, response)); } -TEST_P(AdminInstanceTest, Memory) { - Http::ResponseHeaderMapImpl header_map; - Buffer::OwnedImpl response; - EXPECT_EQ(Http::Code::OK, getCallback("/memory", header_map, response)); - const std::string output_json = response.toString(); - envoy::admin::v3::Memory output_proto; - TestUtility::loadFromJson(output_json, output_proto); - EXPECT_THAT(output_proto, AllOf(Property(&envoy::admin::v3::Memory::allocated, Ge(0)), - Property(&envoy::admin::v3::Memory::heap_size, Ge(0)), - Property(&envoy::admin::v3::Memory::pageheap_unmapped, Ge(0)), - Property(&envoy::admin::v3::Memory::pageheap_free, Ge(0)), - Property(&envoy::admin::v3::Memory::total_thread_cache, Ge(0)))); -} - -TEST_P(AdminInstanceTest, ContextThatReturnsNullCertDetails) { - Http::ResponseHeaderMapImpl header_map; - Buffer::OwnedImpl response; - - // Setup a context that returns null cert details. - testing::NiceMock factory_context; - envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext config; - Extensions::TransportSockets::Tls::ClientContextConfigImpl cfg(config, factory_context); - Stats::IsolatedStoreImpl store; - Envoy::Ssl::ClientContextSharedPtr client_ctx( - server_.sslContextManager().createSslClientContext(store, cfg)); - - const std::string expected_empty_json = R"EOF({ - "certificates": [ - { - "ca_cert": [], - "cert_chain": [] - } - ] -} -)EOF"; - - // Validate that cert details are null and /certs handles it correctly. - EXPECT_EQ(nullptr, client_ctx->getCaCertInformation()); - EXPECT_TRUE(client_ctx->getCertChainInformation().empty()); - EXPECT_EQ(Http::Code::OK, getCallback("/certs", header_map, response)); - EXPECT_EQ(expected_empty_json, response.toString()); -} - -TEST_P(AdminInstanceTest, Runtime) { - Http::ResponseHeaderMapImpl header_map; - Buffer::OwnedImpl response; - - Runtime::MockSnapshot snapshot; - Runtime::MockLoader loader; - auto layer1 = std::make_unique>(); - auto layer2 = std::make_unique>(); - Runtime::Snapshot::EntryMap entries2{{"string_key", {"override", {}, {}, {}, {}}}, - {"extra_key", {"bar", {}, {}, {}, {}}}}; - Runtime::Snapshot::EntryMap entries1{{"string_key", {"foo", {}, {}, {}, {}}}, - {"int_key", {"1", 1, {}, {}, {}}}, - {"other_key", {"bar", {}, {}, {}, {}}}}; - - ON_CALL(*layer1, name()).WillByDefault(testing::ReturnRefOfCopy(std::string{"layer1"})); - ON_CALL(*layer1, values()).WillByDefault(testing::ReturnRef(entries1)); - ON_CALL(*layer2, name()).WillByDefault(testing::ReturnRefOfCopy(std::string{"layer2"})); - ON_CALL(*layer2, values()).WillByDefault(testing::ReturnRef(entries2)); - - std::vector layers; - layers.push_back(std::move(layer1)); - layers.push_back(std::move(layer2)); - EXPECT_CALL(snapshot, getLayers()).WillRepeatedly(testing::ReturnRef(layers)); - - const std::string expected_json = R"EOF({ - "layers": [ - "layer1", - "layer2" - ], - "entries": { - "extra_key": { - "layer_values": [ - "", - "bar" - ], - "final_value": "bar" - }, - "int_key": { - "layer_values": [ - "1", - "" - ], - "final_value": "1" - }, - "other_key": { - "layer_values": [ - "bar", - "" - ], - "final_value": "bar" - }, - "string_key": { - "layer_values": [ - "foo", - "override" - ], - "final_value": "override" - } - } -})EOF"; - - EXPECT_CALL(loader, snapshot()).WillRepeatedly(testing::ReturnPointee(&snapshot)); - EXPECT_CALL(server_, runtime()).WillRepeatedly(testing::ReturnPointee(&loader)); - EXPECT_EQ(Http::Code::OK, getCallback("/runtime", header_map, response)); - EXPECT_THAT(expected_json, JsonStringEq(response.toString())); -} - -TEST_P(AdminInstanceTest, RuntimeModify) { - Http::ResponseHeaderMapImpl header_map; - Buffer::OwnedImpl response; - - Runtime::MockLoader loader; - EXPECT_CALL(server_, runtime()).WillRepeatedly(testing::ReturnPointee(&loader)); - - std::unordered_map overrides; - overrides["foo"] = "bar"; - overrides["x"] = "42"; - overrides["nothing"] = ""; - EXPECT_CALL(loader, mergeValues(overrides)).Times(1); - EXPECT_EQ(Http::Code::OK, - postCallback("/runtime_modify?foo=bar&x=42¬hing=", header_map, response)); - EXPECT_EQ("OK\n", response.toString()); -} - -TEST_P(AdminInstanceTest, RuntimeModifyParamsInBody) { - Runtime::MockLoader loader; - EXPECT_CALL(server_, runtime()).WillRepeatedly(testing::ReturnPointee(&loader)); - - const std::string key = "routing.traffic_shift.foo"; - const std::string value = "numerator: 1\ndenominator: TEN_THOUSAND\n"; - const std::unordered_map overrides = {{key, value}}; - EXPECT_CALL(loader, mergeValues(overrides)).Times(1); - - const std::string body = fmt::format("{}={}", key, value); - Http::ResponseHeaderMapImpl header_map; - Buffer::OwnedImpl response; - EXPECT_EQ(Http::Code::OK, runCallback("/runtime_modify", header_map, response, "POST", body)); - EXPECT_EQ("OK\n", response.toString()); -} - -TEST_P(AdminInstanceTest, RuntimeModifyNoArguments) { - Http::ResponseHeaderMapImpl header_map; - Buffer::OwnedImpl response; - - EXPECT_EQ(Http::Code::BadRequest, postCallback("/runtime_modify", header_map, response)); - EXPECT_TRUE(absl::StartsWith(response.toString(), "usage:")); -} - -TEST_P(AdminInstanceTest, ReopenLogs) { - Http::ResponseHeaderMapImpl header_map; - Buffer::OwnedImpl response; - testing::NiceMock access_log_manager_; - - EXPECT_CALL(server_, accessLogManager()).WillRepeatedly(ReturnRef(access_log_manager_)); - EXPECT_CALL(access_log_manager_, reopen()); - EXPECT_EQ(Http::Code::OK, postCallback("/reopen_logs", header_map, response)); -} - TEST_P(AdminInstanceTest, ClustersJson) { Upstream::ClusterManager::ClusterInfoMap cluster_map; ON_CALL(server_.cluster_manager_, clusters()).WillByDefault(ReturnPointee(&cluster_map)); @@ -699,7 +829,7 @@ TEST_P(AdminInstanceTest, ClustersJson) { ON_CALL(*host, priority()).WillByDefault(Return(6)); Buffer::OwnedImpl response; - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; EXPECT_EQ(Http::Code::OK, getCallback("/clusters?format=json", header_map, response)); std::string output_json = response.toString(); envoy::admin::v3::Clusters output_proto; @@ -821,119 +951,5 @@ fake_cluster::1.2.3.4:80::local_origin_success_rate::93.2 EXPECT_EQ(expected_text, response2.toString()); } -TEST_P(AdminInstanceTest, GetRequest) { - EXPECT_CALL(server_.options_, toCommandLineOptions()).WillRepeatedly(Invoke([] { - Server::CommandLineOptionsPtr command_line_options = - std::make_unique(); - command_line_options->set_restart_epoch(2); - command_line_options->set_service_cluster("cluster"); - return command_line_options; - })); - NiceMock initManager; - ON_CALL(server_, initManager()).WillByDefault(ReturnRef(initManager)); - ON_CALL(server_.hot_restart_, version()).WillByDefault(Return("foo_version")); - - { - Http::ResponseHeaderMapImpl response_headers; - std::string body; - - ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Initialized)); - EXPECT_EQ(Http::Code::OK, admin_.request("/server_info", "GET", response_headers, body)); - envoy::admin::v3::ServerInfo server_info_proto; - EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), - HasSubstr("application/json")); - - // We only test that it parses as the proto and that some fields are correct, since - // values such as timestamps + Envoy version are tricky to test for. - TestUtility::loadFromJson(body, server_info_proto); - EXPECT_EQ(server_info_proto.state(), envoy::admin::v3::ServerInfo::LIVE); - EXPECT_EQ(server_info_proto.hot_restart_version(), "foo_version"); - EXPECT_EQ(server_info_proto.command_line_options().restart_epoch(), 2); - EXPECT_EQ(server_info_proto.command_line_options().service_cluster(), "cluster"); - } - - { - Http::ResponseHeaderMapImpl response_headers; - std::string body; - - ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Uninitialized)); - EXPECT_EQ(Http::Code::OK, admin_.request("/server_info", "GET", response_headers, body)); - envoy::admin::v3::ServerInfo server_info_proto; - EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), - HasSubstr("application/json")); - - // We only test that it parses as the proto and that some fields are correct, since - // values such as timestamps + Envoy version are tricky to test for. - TestUtility::loadFromJson(body, server_info_proto); - EXPECT_EQ(server_info_proto.state(), envoy::admin::v3::ServerInfo::PRE_INITIALIZING); - EXPECT_EQ(server_info_proto.command_line_options().restart_epoch(), 2); - EXPECT_EQ(server_info_proto.command_line_options().service_cluster(), "cluster"); - } - - Http::ResponseHeaderMapImpl response_headers; - std::string body; - - ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Initializing)); - EXPECT_EQ(Http::Code::OK, admin_.request("/server_info", "GET", response_headers, body)); - envoy::admin::v3::ServerInfo server_info_proto; - EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), - HasSubstr("application/json")); - - // We only test that it parses as the proto and that some fields are correct, since - // values such as timestamps + Envoy version are tricky to test for. - TestUtility::loadFromJson(body, server_info_proto); - EXPECT_EQ(server_info_proto.state(), envoy::admin::v3::ServerInfo::INITIALIZING); - EXPECT_EQ(server_info_proto.command_line_options().restart_epoch(), 2); - EXPECT_EQ(server_info_proto.command_line_options().service_cluster(), "cluster"); -} - -TEST_P(AdminInstanceTest, GetReadyRequest) { - NiceMock initManager; - ON_CALL(server_, initManager()).WillByDefault(ReturnRef(initManager)); - - { - Http::ResponseHeaderMapImpl response_headers; - std::string body; - - ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Initialized)); - EXPECT_EQ(Http::Code::OK, admin_.request("/ready", "GET", response_headers, body)); - EXPECT_EQ(body, "LIVE\n"); - EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), - HasSubstr("text/plain")); - } - - { - Http::ResponseHeaderMapImpl response_headers; - std::string body; - - ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Uninitialized)); - EXPECT_EQ(Http::Code::ServiceUnavailable, - admin_.request("/ready", "GET", response_headers, body)); - EXPECT_EQ(body, "PRE_INITIALIZING\n"); - EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), - HasSubstr("text/plain")); - } - - Http::ResponseHeaderMapImpl response_headers; - std::string body; - - ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Initializing)); - EXPECT_EQ(Http::Code::ServiceUnavailable, - admin_.request("/ready", "GET", response_headers, body)); - EXPECT_EQ(body, "INITIALIZING\n"); - EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), - HasSubstr("text/plain")); -} - -TEST_P(AdminInstanceTest, PostRequest) { - Http::ResponseHeaderMapImpl response_headers; - std::string body; - EXPECT_NO_LOGS(EXPECT_EQ(Http::Code::OK, - admin_.request("/healthcheck/fail", "POST", response_headers, body))); - EXPECT_EQ(body, "OK\n"); - EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), - HasSubstr("text/plain")); -} - } // namespace Server } // namespace Envoy diff --git a/test/server/http/config_tracker_impl_test.cc b/test/server/admin/config_tracker_impl_test.cc similarity index 98% rename from test/server/http/config_tracker_impl_test.cc rename to test/server/admin/config_tracker_impl_test.cc index 2fcd777fca554..9388c2e2ef112 100644 --- a/test/server/http/config_tracker_impl_test.cc +++ b/test/server/admin/config_tracker_impl_test.cc @@ -1,4 +1,4 @@ -#include "server/http/config_tracker_impl.h" +#include "server/admin/config_tracker_impl.h" #include "test/mocks/common.h" diff --git a/test/server/admin/logs_handler_test.cc b/test/server/admin/logs_handler_test.cc new file mode 100644 index 0000000000000..9fc99c0c62250 --- /dev/null +++ b/test/server/admin/logs_handler_test.cc @@ -0,0 +1,21 @@ +#include "test/server/admin/admin_instance.h" + +namespace Envoy { +namespace Server { + +INSTANTIATE_TEST_SUITE_P(IpVersions, AdminInstanceTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +TEST_P(AdminInstanceTest, ReopenLogs) { + Http::TestResponseHeaderMapImpl header_map; + Buffer::OwnedImpl response; + testing::NiceMock access_log_manager_; + + EXPECT_CALL(server_, accessLogManager()).WillRepeatedly(ReturnRef(access_log_manager_)); + EXPECT_CALL(access_log_manager_, reopen()); + EXPECT_EQ(Http::Code::OK, postCallback("/reopen_logs", header_map, response)); +} + +} // namespace Server +} // namespace Envoy diff --git a/test/server/admin/profiling_handler_test.cc b/test/server/admin/profiling_handler_test.cc new file mode 100644 index 0000000000000..353bc780e87b7 --- /dev/null +++ b/test/server/admin/profiling_handler_test.cc @@ -0,0 +1,82 @@ +#include "common/profiler/profiler.h" + +#include "test/server/admin/admin_instance.h" +#include "test/test_common/logging.h" + +namespace Envoy { +namespace Server { + +INSTANTIATE_TEST_SUITE_P(IpVersions, AdminInstanceTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +TEST_P(AdminInstanceTest, AdminCpuProfiler) { + Buffer::OwnedImpl data; + Http::TestResponseHeaderMapImpl header_map; + + // Can only get code coverage of AdminImpl::handlerCpuProfiler stopProfiler with + // a real profiler linked in (successful call to startProfiler). +#ifdef PROFILER_AVAILABLE + EXPECT_EQ(Http::Code::OK, postCallback("/cpuprofiler?enable=y", header_map, data)); + EXPECT_TRUE(Profiler::Cpu::profilerEnabled()); +#else + EXPECT_EQ(Http::Code::InternalServerError, + postCallback("/cpuprofiler?enable=y", header_map, data)); + EXPECT_FALSE(Profiler::Cpu::profilerEnabled()); +#endif + + EXPECT_EQ(Http::Code::OK, postCallback("/cpuprofiler?enable=n", header_map, data)); + EXPECT_FALSE(Profiler::Cpu::profilerEnabled()); +} + +TEST_P(AdminInstanceTest, AdminHeapProfilerOnRepeatedRequest) { + Buffer::OwnedImpl data; + Http::TestResponseHeaderMapImpl header_map; + auto repeatResultCode = Http::Code::BadRequest; +#ifndef PROFILER_AVAILABLE + repeatResultCode = Http::Code::NotImplemented; +#endif + + postCallback("/heapprofiler?enable=y", header_map, data); + EXPECT_EQ(repeatResultCode, postCallback("/heapprofiler?enable=y", header_map, data)); + + postCallback("/heapprofiler?enable=n", header_map, data); + EXPECT_EQ(repeatResultCode, postCallback("/heapprofiler?enable=n", header_map, data)); +} + +TEST_P(AdminInstanceTest, AdminHeapProfiler) { + Buffer::OwnedImpl data; + Http::TestResponseHeaderMapImpl header_map; + + // The below flow need to begin with the profiler not running + Profiler::Heap::stopProfiler(); + +#ifdef PROFILER_AVAILABLE + EXPECT_EQ(Http::Code::OK, postCallback("/heapprofiler?enable=y", header_map, data)); + EXPECT_TRUE(Profiler::Heap::isProfilerStarted()); + EXPECT_EQ(Http::Code::OK, postCallback("/heapprofiler?enable=n", header_map, data)); +#else + EXPECT_EQ(Http::Code::NotImplemented, postCallback("/heapprofiler?enable=y", header_map, data)); + EXPECT_FALSE(Profiler::Heap::isProfilerStarted()); + EXPECT_EQ(Http::Code::NotImplemented, postCallback("/heapprofiler?enable=n", header_map, data)); +#endif + + EXPECT_FALSE(Profiler::Heap::isProfilerStarted()); +} + +TEST_P(AdminInstanceTest, AdminBadProfiler) { + Buffer::OwnedImpl data; + AdminImpl admin_bad_profile_path(TestEnvironment::temporaryPath("some/unlikely/bad/path.prof"), + server_); + Http::TestResponseHeaderMapImpl header_map; + const absl::string_view post = Http::Headers::get().MethodValues.Post; + request_headers_.setMethod(post); + admin_filter_.decodeHeaders(request_headers_, false); + EXPECT_NO_LOGS(EXPECT_EQ(Http::Code::InternalServerError, + admin_bad_profile_path.runCallback("/cpuprofiler?enable=y", header_map, + data, admin_filter_))); + EXPECT_FALSE(Profiler::Cpu::profilerEnabled()); +} + +} // namespace Server +} // namespace Envoy diff --git a/test/server/admin/prometheus_stats_test.cc b/test/server/admin/prometheus_stats_test.cc new file mode 100644 index 0000000000000..ee0cae35a0c30 --- /dev/null +++ b/test/server/admin/prometheus_stats_test.cc @@ -0,0 +1,705 @@ +#include + +#include "server/admin/prometheus_stats.h" + +#include "test/mocks/stats/mocks.h" +#include "test/test_common/utility.h" + +using testing::NiceMock; +using testing::ReturnRef; + +namespace Envoy { +namespace Server { + +class HistogramWrapper { +public: + HistogramWrapper() : histogram_(hist_alloc()) {} + + ~HistogramWrapper() { hist_free(histogram_); } + + const histogram_t* getHistogram() { return histogram_; } + + void setHistogramValues(const std::vector& values) { + for (uint64_t value : values) { + hist_insert_intscale(histogram_, value, 0, 1); + } + } + + void setHistogramValuesWithCounts(const std::vector>& values) { + for (std::pair cv : values) { + hist_insert_intscale(histogram_, cv.first, 0, cv.second); + } + } + +private: + histogram_t* histogram_; +}; + +class PrometheusStatsFormatterTest : public testing::Test { +protected: + PrometheusStatsFormatterTest() : alloc_(*symbol_table_), pool_(*symbol_table_) {} + + ~PrometheusStatsFormatterTest() override { clearStorage(); } + + void addCounter(const std::string& name, Stats::StatNameTagVector cluster_tags) { + Stats::StatNameManagedStorage name_storage(baseName(name, cluster_tags), *symbol_table_); + Stats::StatNameManagedStorage tag_extracted_name_storage(name, *symbol_table_); + counters_.push_back(alloc_.makeCounter(name_storage.statName(), + tag_extracted_name_storage.statName(), cluster_tags)); + } + + void addGauge(const std::string& name, Stats::StatNameTagVector cluster_tags) { + Stats::StatNameManagedStorage name_storage(baseName(name, cluster_tags), *symbol_table_); + Stats::StatNameManagedStorage tag_extracted_name_storage(name, *symbol_table_); + gauges_.push_back(alloc_.makeGauge(name_storage.statName(), + tag_extracted_name_storage.statName(), cluster_tags, + Stats::Gauge::ImportMode::Accumulate)); + } + + using MockHistogramSharedPtr = Stats::RefcountPtr>; + void addHistogram(MockHistogramSharedPtr histogram) { histograms_.push_back(histogram); } + + MockHistogramSharedPtr makeHistogram(const std::string& name, + Stats::StatNameTagVector cluster_tags) { + auto histogram = MockHistogramSharedPtr(new NiceMock()); + histogram->name_ = baseName(name, cluster_tags); + histogram->setTagExtractedName(name); + histogram->setTags(cluster_tags); + histogram->used_ = true; + return histogram; + } + + Stats::StatName makeStat(absl::string_view name) { return pool_.add(name); } + + // Format tags into the name to create a unique stat_name for each name:tag combination. + // If the same stat_name is passed to makeGauge() or makeCounter(), even with different + // tags, a copy of the previous metric will be returned. + std::string baseName(const std::string& name, Stats::StatNameTagVector cluster_tags) { + std::string result = name; + for (const auto& name_tag : cluster_tags) { + result.append(fmt::format("<{}:{}>", symbol_table_->toString(name_tag.first), + symbol_table_->toString(name_tag.second))); + } + return result; + } + + void clearStorage() { + pool_.clear(); + counters_.clear(); + gauges_.clear(); + histograms_.clear(); + EXPECT_EQ(0, symbol_table_->numSymbols()); + } + + Stats::TestSymbolTable symbol_table_; + Stats::AllocatorImpl alloc_; + Stats::StatNamePool pool_; + std::vector counters_; + std::vector gauges_; + std::vector histograms_; +}; + +TEST_F(PrometheusStatsFormatterTest, MetricName) { + std::string raw = "vulture.eats-liver"; + std::string expected = "envoy_vulture_eats_liver"; + auto actual = PrometheusStatsFormatter::metricName(raw); + EXPECT_EQ(expected, actual); +} + +TEST_F(PrometheusStatsFormatterTest, SanitizeMetricName) { + std::string raw = "An.artist.plays-violin@019street"; + std::string expected = "envoy_An_artist_plays_violin_019street"; + auto actual = PrometheusStatsFormatter::metricName(raw); + EXPECT_EQ(expected, actual); +} + +TEST_F(PrometheusStatsFormatterTest, SanitizeMetricNameDigitFirst) { + std::string raw = "3.artists.play-violin@019street"; + std::string expected = "envoy_3_artists_play_violin_019street"; + auto actual = PrometheusStatsFormatter::metricName(raw); + EXPECT_EQ(expected, actual); +} + +TEST_F(PrometheusStatsFormatterTest, NamespaceRegistry) { + std::string raw = "vulture.eats-liver"; + std::string expected = "vulture_eats_liver"; + + EXPECT_FALSE(PrometheusStatsFormatter::registerPrometheusNamespace("3vulture")); + EXPECT_FALSE(PrometheusStatsFormatter::registerPrometheusNamespace(".vulture")); + + EXPECT_FALSE(PrometheusStatsFormatter::unregisterPrometheusNamespace("vulture")); + EXPECT_TRUE(PrometheusStatsFormatter::registerPrometheusNamespace("vulture")); + EXPECT_FALSE(PrometheusStatsFormatter::registerPrometheusNamespace("vulture")); + EXPECT_EQ(expected, PrometheusStatsFormatter::metricName(raw)); + EXPECT_TRUE(PrometheusStatsFormatter::unregisterPrometheusNamespace("vulture")); + + EXPECT_EQ("envoy_" + expected, PrometheusStatsFormatter::metricName(raw)); +} + +TEST_F(PrometheusStatsFormatterTest, FormattedTags) { + std::vector tags; + Stats::Tag tag1 = {"a.tag-name", "a.tag-value"}; + Stats::Tag tag2 = {"another_tag_name", "another_tag-value"}; + tags.push_back(tag1); + tags.push_back(tag2); + std::string expected = "a_tag_name=\"a.tag-value\",another_tag_name=\"another_tag-value\""; + auto actual = PrometheusStatsFormatter::formattedTags(tags); + EXPECT_EQ(expected, actual); +} + +TEST_F(PrometheusStatsFormatterTest, MetricNameCollison) { + + // Create two counters and two gauges with each pair having the same name, + // but having different tag names and values. + //`statsAsPrometheus()` should return two implying it found two unique stat names + + addCounter("cluster.test_cluster_1.upstream_cx_total", + {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); + addCounter("cluster.test_cluster_1.upstream_cx_total", + {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); + addGauge("cluster.test_cluster_2.upstream_cx_total", + {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); + addGauge("cluster.test_cluster_2.upstream_cx_total", + {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, + false, absl::nullopt); + EXPECT_EQ(2UL, size); +} + +TEST_F(PrometheusStatsFormatterTest, UniqueMetricName) { + + // Create two counters and two gauges, all with unique names. + // statsAsPrometheus() should return four implying it found + // four unique stat names. + + addCounter("cluster.test_cluster_1.upstream_cx_total", + {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); + addCounter("cluster.test_cluster_2.upstream_cx_total", + {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); + addGauge("cluster.test_cluster_3.upstream_cx_total", + {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); + addGauge("cluster.test_cluster_4.upstream_cx_total", + {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, + false, absl::nullopt); + EXPECT_EQ(4UL, size); +} + +TEST_F(PrometheusStatsFormatterTest, HistogramWithNoValuesAndNoTags) { + HistogramWrapper h1_cumulative; + h1_cumulative.setHistogramValues(std::vector(0)); + Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); + + auto histogram = makeHistogram("histogram1", {}); + ON_CALL(*histogram, cumulativeStatistics()).WillByDefault(ReturnRef(h1_cumulative_statistics)); + + addHistogram(histogram); + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, + false, absl::nullopt); + EXPECT_EQ(1UL, size); + + const std::string expected_output = R"EOF(# TYPE envoy_histogram1 histogram +envoy_histogram1_bucket{le="0.5"} 0 +envoy_histogram1_bucket{le="1"} 0 +envoy_histogram1_bucket{le="5"} 0 +envoy_histogram1_bucket{le="10"} 0 +envoy_histogram1_bucket{le="25"} 0 +envoy_histogram1_bucket{le="50"} 0 +envoy_histogram1_bucket{le="100"} 0 +envoy_histogram1_bucket{le="250"} 0 +envoy_histogram1_bucket{le="500"} 0 +envoy_histogram1_bucket{le="1000"} 0 +envoy_histogram1_bucket{le="2500"} 0 +envoy_histogram1_bucket{le="5000"} 0 +envoy_histogram1_bucket{le="10000"} 0 +envoy_histogram1_bucket{le="30000"} 0 +envoy_histogram1_bucket{le="60000"} 0 +envoy_histogram1_bucket{le="300000"} 0 +envoy_histogram1_bucket{le="600000"} 0 +envoy_histogram1_bucket{le="1800000"} 0 +envoy_histogram1_bucket{le="3600000"} 0 +envoy_histogram1_bucket{le="+Inf"} 0 +envoy_histogram1_sum{} 0 +envoy_histogram1_count{} 0 + +)EOF"; + + EXPECT_EQ(expected_output, response.toString()); +} + +TEST_F(PrometheusStatsFormatterTest, HistogramWithNonDefaultBuckets) { + HistogramWrapper h1_cumulative; + h1_cumulative.setHistogramValues(std::vector(0)); + Stats::ConstSupportedBuckets buckets{10, 20}; + Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram(), buckets); + + auto histogram = makeHistogram("histogram1", {}); + ON_CALL(*histogram, cumulativeStatistics()).WillByDefault(ReturnRef(h1_cumulative_statistics)); + + addHistogram(histogram); + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, + false, absl::nullopt); + EXPECT_EQ(1UL, size); + + const std::string expected_output = R"EOF(# TYPE envoy_histogram1 histogram +envoy_histogram1_bucket{le="10"} 0 +envoy_histogram1_bucket{le="20"} 0 +envoy_histogram1_bucket{le="+Inf"} 0 +envoy_histogram1_sum{} 0 +envoy_histogram1_count{} 0 + +)EOF"; + + EXPECT_EQ(expected_output, response.toString()); +} + +TEST_F(PrometheusStatsFormatterTest, HistogramWithHighCounts) { + HistogramWrapper h1_cumulative; + + // Force large counts to prove that the +Inf bucket doesn't overflow to scientific notation. + h1_cumulative.setHistogramValuesWithCounts(std::vector>({ + {1, 100000}, + {100, 1000000}, + {1000, 100000000}, + })); + + Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); + + auto histogram = makeHistogram("histogram1", {}); + ON_CALL(*histogram, cumulativeStatistics()).WillByDefault(ReturnRef(h1_cumulative_statistics)); + + addHistogram(histogram); + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, + false, absl::nullopt); + EXPECT_EQ(1UL, size); + + const std::string expected_output = R"EOF(# TYPE envoy_histogram1 histogram +envoy_histogram1_bucket{le="0.5"} 0 +envoy_histogram1_bucket{le="1"} 0 +envoy_histogram1_bucket{le="5"} 100000 +envoy_histogram1_bucket{le="10"} 100000 +envoy_histogram1_bucket{le="25"} 100000 +envoy_histogram1_bucket{le="50"} 100000 +envoy_histogram1_bucket{le="100"} 100000 +envoy_histogram1_bucket{le="250"} 1100000 +envoy_histogram1_bucket{le="500"} 1100000 +envoy_histogram1_bucket{le="1000"} 1100000 +envoy_histogram1_bucket{le="2500"} 101100000 +envoy_histogram1_bucket{le="5000"} 101100000 +envoy_histogram1_bucket{le="10000"} 101100000 +envoy_histogram1_bucket{le="30000"} 101100000 +envoy_histogram1_bucket{le="60000"} 101100000 +envoy_histogram1_bucket{le="300000"} 101100000 +envoy_histogram1_bucket{le="600000"} 101100000 +envoy_histogram1_bucket{le="1800000"} 101100000 +envoy_histogram1_bucket{le="3600000"} 101100000 +envoy_histogram1_bucket{le="+Inf"} 101100000 +envoy_histogram1_sum{} 105105105000 +envoy_histogram1_count{} 101100000 + +)EOF"; + + EXPECT_EQ(expected_output, response.toString()); +} + +TEST_F(PrometheusStatsFormatterTest, OutputWithAllMetricTypes) { + addCounter("cluster.test_1.upstream_cx_total", + {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); + addCounter("cluster.test_2.upstream_cx_total", + {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); + addGauge("cluster.test_3.upstream_cx_total", + {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); + addGauge("cluster.test_4.upstream_cx_total", + {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); + + const std::vector h1_values = {50, 20, 30, 70, 100, 5000, 200}; + HistogramWrapper h1_cumulative; + h1_cumulative.setHistogramValues(h1_values); + Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); + + auto histogram1 = + makeHistogram("cluster.test_1.upstream_rq_time", {{makeStat("key1"), makeStat("value1")}, + {makeStat("key2"), makeStat("value2")}}); + histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; + addHistogram(histogram1); + EXPECT_CALL(*histogram1, cumulativeStatistics()).WillOnce(ReturnRef(h1_cumulative_statistics)); + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, + false, absl::nullopt); + EXPECT_EQ(5UL, size); + + const std::string expected_output = R"EOF(# TYPE envoy_cluster_test_1_upstream_cx_total counter +envoy_cluster_test_1_upstream_cx_total{a_tag_name="a.tag-value"} 0 + +# TYPE envoy_cluster_test_2_upstream_cx_total counter +envoy_cluster_test_2_upstream_cx_total{another_tag_name="another_tag-value"} 0 + +# TYPE envoy_cluster_test_3_upstream_cx_total gauge +envoy_cluster_test_3_upstream_cx_total{another_tag_name_3="another_tag_3-value"} 0 + +# TYPE envoy_cluster_test_4_upstream_cx_total gauge +envoy_cluster_test_4_upstream_cx_total{another_tag_name_4="another_tag_4-value"} 0 + +# TYPE envoy_cluster_test_1_upstream_rq_time histogram +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="0.5"} 0 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1"} 0 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="5"} 0 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="10"} 0 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="25"} 1 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="50"} 2 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="100"} 4 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="250"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="500"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1000"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="2500"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="5000"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="10000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="30000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="60000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="300000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="600000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1800000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="3600000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="+Inf"} 7 +envoy_cluster_test_1_upstream_rq_time_sum{key1="value1",key2="value2"} 5532 +envoy_cluster_test_1_upstream_rq_time_count{key1="value1",key2="value2"} 7 + +)EOF"; + + EXPECT_EQ(expected_output, response.toString()); +} + +// Test that output groups all metrics of the same name (with different tags) together, +// as required by the Prometheus exposition format spec. Additionally, groups of metrics +// should be sorted by their tags; the format specifies that it is preferred that metrics +// are always grouped in the same order, and sorting is an easy way to ensure this. +TEST_F(PrometheusStatsFormatterTest, OutputSortedByMetricName) { + const std::vector h1_values = {50, 20, 30, 70, 100, 5000, 200}; + HistogramWrapper h1_cumulative; + h1_cumulative.setHistogramValues(h1_values); + Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); + + // Create the 3 clusters in non-sorted order to exercise the sorting. + // Create two of each metric type (counter, gauge, histogram) so that + // the output for each needs to be collected together. + for (const char* cluster : {"ccc", "aaa", "bbb"}) { + const Stats::StatNameTagVector tags{{makeStat("cluster"), makeStat(cluster)}}; + addCounter("cluster.upstream_cx_total", tags); + addCounter("cluster.upstream_cx_connect_fail", tags); + addGauge("cluster.upstream_cx_active", tags); + addGauge("cluster.upstream_rq_active", tags); + + for (const char* hist_name : {"cluster.upstream_rq_time", "cluster.upstream_response_time"}) { + auto histogram1 = makeHistogram(hist_name, tags); + histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; + addHistogram(histogram1); + EXPECT_CALL(*histogram1, cumulativeStatistics()) + .WillOnce(ReturnRef(h1_cumulative_statistics)); + } + } + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, + false, absl::nullopt); + EXPECT_EQ(6UL, size); + + const std::string expected_output = R"EOF(# TYPE envoy_cluster_upstream_cx_connect_fail counter +envoy_cluster_upstream_cx_connect_fail{cluster="aaa"} 0 +envoy_cluster_upstream_cx_connect_fail{cluster="bbb"} 0 +envoy_cluster_upstream_cx_connect_fail{cluster="ccc"} 0 + +# TYPE envoy_cluster_upstream_cx_total counter +envoy_cluster_upstream_cx_total{cluster="aaa"} 0 +envoy_cluster_upstream_cx_total{cluster="bbb"} 0 +envoy_cluster_upstream_cx_total{cluster="ccc"} 0 + +# TYPE envoy_cluster_upstream_cx_active gauge +envoy_cluster_upstream_cx_active{cluster="aaa"} 0 +envoy_cluster_upstream_cx_active{cluster="bbb"} 0 +envoy_cluster_upstream_cx_active{cluster="ccc"} 0 + +# TYPE envoy_cluster_upstream_rq_active gauge +envoy_cluster_upstream_rq_active{cluster="aaa"} 0 +envoy_cluster_upstream_rq_active{cluster="bbb"} 0 +envoy_cluster_upstream_rq_active{cluster="ccc"} 0 + +# TYPE envoy_cluster_upstream_response_time histogram +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="0.5"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="1"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="5"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="10"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="25"} 1 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="50"} 2 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="100"} 4 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="250"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="500"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="1000"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="2500"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="5000"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="10000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="30000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="60000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="300000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="600000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="1800000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="3600000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="+Inf"} 7 +envoy_cluster_upstream_response_time_sum{cluster="aaa"} 5532 +envoy_cluster_upstream_response_time_count{cluster="aaa"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="0.5"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="1"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="5"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="10"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="25"} 1 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="50"} 2 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="100"} 4 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="250"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="500"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="1000"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="2500"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="5000"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="10000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="30000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="60000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="300000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="600000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="1800000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="3600000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="+Inf"} 7 +envoy_cluster_upstream_response_time_sum{cluster="bbb"} 5532 +envoy_cluster_upstream_response_time_count{cluster="bbb"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="0.5"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="1"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="5"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="10"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="25"} 1 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="50"} 2 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="100"} 4 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="250"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="500"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="1000"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="2500"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="5000"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="10000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="30000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="60000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="300000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="600000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="1800000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="3600000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="+Inf"} 7 +envoy_cluster_upstream_response_time_sum{cluster="ccc"} 5532 +envoy_cluster_upstream_response_time_count{cluster="ccc"} 7 + +# TYPE envoy_cluster_upstream_rq_time histogram +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="0.5"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="1"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="5"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="10"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="25"} 1 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="50"} 2 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="100"} 4 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="250"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="500"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="1000"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="2500"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="5000"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="10000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="30000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="60000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="300000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="600000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="1800000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="3600000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="+Inf"} 7 +envoy_cluster_upstream_rq_time_sum{cluster="aaa"} 5532 +envoy_cluster_upstream_rq_time_count{cluster="aaa"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="0.5"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="1"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="5"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="10"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="25"} 1 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="50"} 2 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="100"} 4 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="250"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="500"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="1000"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="2500"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="5000"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="10000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="30000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="60000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="300000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="600000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="1800000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="3600000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="+Inf"} 7 +envoy_cluster_upstream_rq_time_sum{cluster="bbb"} 5532 +envoy_cluster_upstream_rq_time_count{cluster="bbb"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="0.5"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="1"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="5"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="10"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="25"} 1 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="50"} 2 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="100"} 4 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="250"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="500"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="1000"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="2500"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="5000"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="10000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="30000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="60000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="300000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="600000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="1800000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="3600000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="+Inf"} 7 +envoy_cluster_upstream_rq_time_sum{cluster="ccc"} 5532 +envoy_cluster_upstream_rq_time_count{cluster="ccc"} 7 + +)EOF"; + + EXPECT_EQ(expected_output, response.toString()); +} + +TEST_F(PrometheusStatsFormatterTest, OutputWithUsedOnly) { + addCounter("cluster.test_1.upstream_cx_total", + {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); + addCounter("cluster.test_2.upstream_cx_total", + {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); + addGauge("cluster.test_3.upstream_cx_total", + {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); + addGauge("cluster.test_4.upstream_cx_total", + {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); + + const std::vector h1_values = {50, 20, 30, 70, 100, 5000, 200}; + HistogramWrapper h1_cumulative; + h1_cumulative.setHistogramValues(h1_values); + Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); + + auto histogram1 = + makeHistogram("cluster.test_1.upstream_rq_time", {{makeStat("key1"), makeStat("value1")}, + {makeStat("key2"), makeStat("value2")}}); + histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; + addHistogram(histogram1); + EXPECT_CALL(*histogram1, cumulativeStatistics()).WillOnce(ReturnRef(h1_cumulative_statistics)); + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, + true, absl::nullopt); + EXPECT_EQ(1UL, size); + + const std::string expected_output = R"EOF(# TYPE envoy_cluster_test_1_upstream_rq_time histogram +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="0.5"} 0 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1"} 0 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="5"} 0 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="10"} 0 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="25"} 1 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="50"} 2 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="100"} 4 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="250"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="500"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1000"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="2500"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="5000"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="10000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="30000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="60000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="300000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="600000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1800000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="3600000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="+Inf"} 7 +envoy_cluster_test_1_upstream_rq_time_sum{key1="value1",key2="value2"} 5532 +envoy_cluster_test_1_upstream_rq_time_count{key1="value1",key2="value2"} 7 + +)EOF"; + + EXPECT_EQ(expected_output, response.toString()); +} + +TEST_F(PrometheusStatsFormatterTest, OutputWithUsedOnlyHistogram) { + const std::vector h1_values = {}; + HistogramWrapper h1_cumulative; + h1_cumulative.setHistogramValues(h1_values); + Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); + + auto histogram1 = + makeHistogram("cluster.test_1.upstream_rq_time", {{makeStat("key1"), makeStat("value1")}, + {makeStat("key2"), makeStat("value2")}}); + histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; + histogram1->used_ = false; + addHistogram(histogram1); + + { + const bool used_only = true; + EXPECT_CALL(*histogram1, cumulativeStatistics()).Times(0); + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, + response, used_only, absl::nullopt); + EXPECT_EQ(0UL, size); + } + + { + const bool used_only = false; + EXPECT_CALL(*histogram1, cumulativeStatistics()).WillOnce(ReturnRef(h1_cumulative_statistics)); + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, + response, used_only, absl::nullopt); + EXPECT_EQ(1UL, size); + } +} + +TEST_F(PrometheusStatsFormatterTest, OutputWithRegexp) { + addCounter("cluster.test_1.upstream_cx_total", + {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); + addCounter("cluster.test_2.upstream_cx_total", + {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); + addGauge("cluster.test_3.upstream_cx_total", + {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); + addGauge("cluster.test_4.upstream_cx_total", + {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); + + const std::vector h1_values = {50, 20, 30, 70, 100, 5000, 200}; + HistogramWrapper h1_cumulative; + h1_cumulative.setHistogramValues(h1_values); + Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); + + auto histogram1 = + makeHistogram("cluster.test_1.upstream_rq_time", {{makeStat("key1"), makeStat("value1")}, + {makeStat("key2"), makeStat("value2")}}); + histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; + addHistogram(histogram1); + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus( + counters_, gauges_, histograms_, response, false, + absl::optional{std::regex("cluster.test_1.upstream_cx_total")}); + EXPECT_EQ(1UL, size); + + const std::string expected_output = + R"EOF(# TYPE envoy_cluster_test_1_upstream_cx_total counter +envoy_cluster_test_1_upstream_cx_total{a_tag_name="a.tag-value"} 0 + +)EOF"; + + EXPECT_EQ(expected_output, response.toString()); +} + +} // namespace Server +} // namespace Envoy diff --git a/test/server/admin/runtime_handler_test.cc b/test/server/admin/runtime_handler_test.cc new file mode 100644 index 0000000000000..dfd7fc0bf1f94 --- /dev/null +++ b/test/server/admin/runtime_handler_test.cc @@ -0,0 +1,119 @@ +#include "test/server/admin/admin_instance.h" + +namespace Envoy { +namespace Server { + +INSTANTIATE_TEST_SUITE_P(IpVersions, AdminInstanceTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +TEST_P(AdminInstanceTest, Runtime) { + Http::TestResponseHeaderMapImpl header_map; + Buffer::OwnedImpl response; + + Runtime::MockSnapshot snapshot; + Runtime::MockLoader loader; + auto layer1 = std::make_unique>(); + auto layer2 = std::make_unique>(); + Runtime::Snapshot::EntryMap entries2{{"string_key", {"override", {}, {}, {}, {}}}, + {"extra_key", {"bar", {}, {}, {}, {}}}}; + Runtime::Snapshot::EntryMap entries1{{"string_key", {"foo", {}, {}, {}, {}}}, + {"int_key", {"1", 1, {}, {}, {}}}, + {"other_key", {"bar", {}, {}, {}, {}}}}; + + ON_CALL(*layer1, name()).WillByDefault(testing::ReturnRefOfCopy(std::string{"layer1"})); + ON_CALL(*layer1, values()).WillByDefault(testing::ReturnRef(entries1)); + ON_CALL(*layer2, name()).WillByDefault(testing::ReturnRefOfCopy(std::string{"layer2"})); + ON_CALL(*layer2, values()).WillByDefault(testing::ReturnRef(entries2)); + + std::vector layers; + layers.push_back(std::move(layer1)); + layers.push_back(std::move(layer2)); + EXPECT_CALL(snapshot, getLayers()).WillRepeatedly(testing::ReturnRef(layers)); + + const std::string expected_json = R"EOF({ + "layers": [ + "layer1", + "layer2" + ], + "entries": { + "extra_key": { + "layer_values": [ + "", + "bar" + ], + "final_value": "bar" + }, + "int_key": { + "layer_values": [ + "1", + "" + ], + "final_value": "1" + }, + "other_key": { + "layer_values": [ + "bar", + "" + ], + "final_value": "bar" + }, + "string_key": { + "layer_values": [ + "foo", + "override" + ], + "final_value": "override" + } + } +})EOF"; + + EXPECT_CALL(loader, snapshot()).WillRepeatedly(testing::ReturnPointee(&snapshot)); + EXPECT_CALL(server_, runtime()).WillRepeatedly(testing::ReturnPointee(&loader)); + EXPECT_EQ(Http::Code::OK, getCallback("/runtime", header_map, response)); + EXPECT_THAT(expected_json, JsonStringEq(response.toString())); +} + +TEST_P(AdminInstanceTest, RuntimeModify) { + Http::TestResponseHeaderMapImpl header_map; + Buffer::OwnedImpl response; + + Runtime::MockLoader loader; + EXPECT_CALL(server_, runtime()).WillRepeatedly(testing::ReturnPointee(&loader)); + + absl::node_hash_map overrides; + overrides["foo"] = "bar"; + overrides["x"] = "42"; + overrides["nothing"] = ""; + EXPECT_CALL(loader, mergeValues(overrides)).Times(1); + EXPECT_EQ(Http::Code::OK, + postCallback("/runtime_modify?foo=bar&x=42¬hing=", header_map, response)); + EXPECT_EQ("OK\n", response.toString()); +} + +TEST_P(AdminInstanceTest, RuntimeModifyParamsInBody) { + Runtime::MockLoader loader; + EXPECT_CALL(server_, runtime()).WillRepeatedly(testing::ReturnPointee(&loader)); + + const std::string key = "routing.traffic_shift.foo"; + const std::string value = "numerator: 1\ndenominator: TEN_THOUSAND\n"; + const absl::node_hash_map overrides = {{key, value}}; + EXPECT_CALL(loader, mergeValues(overrides)).Times(1); + + const std::string body = fmt::format("{}={}", key, value); + Http::TestResponseHeaderMapImpl header_map; + Buffer::OwnedImpl response; + EXPECT_EQ(Http::Code::OK, runCallback("/runtime_modify", header_map, response, "POST", body)); + EXPECT_EQ("OK\n", response.toString()); +} + +TEST_P(AdminInstanceTest, RuntimeModifyNoArguments) { + Http::TestResponseHeaderMapImpl header_map; + Buffer::OwnedImpl response; + + EXPECT_EQ(Http::Code::BadRequest, postCallback("/runtime_modify", header_map, response)); + EXPECT_TRUE(absl::StartsWith(response.toString(), "usage:")); +} + +} // namespace Server +} // namespace Envoy diff --git a/test/server/admin/server_info_handler_test.cc b/test/server/admin/server_info_handler_test.cc new file mode 100644 index 0000000000000..d9ef53339f951 --- /dev/null +++ b/test/server/admin/server_info_handler_test.cc @@ -0,0 +1,171 @@ +#include "envoy/admin/v3/memory.pb.h" + +#include "extensions/transport_sockets/tls/context_config_impl.h" + +#include "test/server/admin/admin_instance.h" +#include "test/test_common/logging.h" + +using testing::Ge; +using testing::HasSubstr; +using testing::Property; +using testing::Return; + +namespace Envoy { +namespace Server { + +INSTANTIATE_TEST_SUITE_P(IpVersions, AdminInstanceTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +TEST_P(AdminInstanceTest, ContextThatReturnsNullCertDetails) { + Http::TestResponseHeaderMapImpl header_map; + Buffer::OwnedImpl response; + + // Setup a context that returns null cert details. + testing::NiceMock factory_context; + envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext config; + Extensions::TransportSockets::Tls::ClientContextConfigImpl cfg(config, factory_context); + Stats::IsolatedStoreImpl store; + Envoy::Ssl::ClientContextSharedPtr client_ctx( + server_.sslContextManager().createSslClientContext(store, cfg)); + + const std::string expected_empty_json = R"EOF({ + "certificates": [ + { + "ca_cert": [], + "cert_chain": [] + } + ] +} +)EOF"; + + // Validate that cert details are null and /certs handles it correctly. + EXPECT_EQ(nullptr, client_ctx->getCaCertInformation()); + EXPECT_TRUE(client_ctx->getCertChainInformation().empty()); + EXPECT_EQ(Http::Code::OK, getCallback("/certs", header_map, response)); + EXPECT_EQ(expected_empty_json, response.toString()); +} + +TEST_P(AdminInstanceTest, Memory) { + Http::TestResponseHeaderMapImpl header_map; + Buffer::OwnedImpl response; + EXPECT_EQ(Http::Code::OK, getCallback("/memory", header_map, response)); + const std::string output_json = response.toString(); + envoy::admin::v3::Memory output_proto; + TestUtility::loadFromJson(output_json, output_proto); + EXPECT_THAT(output_proto, AllOf(Property(&envoy::admin::v3::Memory::allocated, Ge(0)), + Property(&envoy::admin::v3::Memory::heap_size, Ge(0)), + Property(&envoy::admin::v3::Memory::pageheap_unmapped, Ge(0)), + Property(&envoy::admin::v3::Memory::pageheap_free, Ge(0)), + Property(&envoy::admin::v3::Memory::total_thread_cache, Ge(0)))); +} + +TEST_P(AdminInstanceTest, GetReadyRequest) { + NiceMock initManager; + ON_CALL(server_, initManager()).WillByDefault(ReturnRef(initManager)); + + { + Http::TestResponseHeaderMapImpl response_headers; + std::string body; + + ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Initialized)); + EXPECT_EQ(Http::Code::OK, admin_.request("/ready", "GET", response_headers, body)); + EXPECT_EQ(body, "LIVE\n"); + EXPECT_THAT(std::string(response_headers.getContentTypeValue()), HasSubstr("text/plain")); + } + + { + Http::TestResponseHeaderMapImpl response_headers; + std::string body; + + ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Uninitialized)); + EXPECT_EQ(Http::Code::ServiceUnavailable, + admin_.request("/ready", "GET", response_headers, body)); + EXPECT_EQ(body, "PRE_INITIALIZING\n"); + EXPECT_THAT(std::string(response_headers.getContentTypeValue()), HasSubstr("text/plain")); + } + + Http::TestResponseHeaderMapImpl response_headers; + std::string body; + + ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Initializing)); + EXPECT_EQ(Http::Code::ServiceUnavailable, + admin_.request("/ready", "GET", response_headers, body)); + EXPECT_EQ(body, "INITIALIZING\n"); + EXPECT_THAT(std::string(response_headers.getContentTypeValue()), HasSubstr("text/plain")); +} + +TEST_P(AdminInstanceTest, GetRequest) { + EXPECT_CALL(server_.options_, toCommandLineOptions()).WillRepeatedly(Invoke([] { + Server::CommandLineOptionsPtr command_line_options = + std::make_unique(); + command_line_options->set_restart_epoch(2); + command_line_options->set_service_cluster("cluster"); + return command_line_options; + })); + NiceMock initManager; + ON_CALL(server_, initManager()).WillByDefault(ReturnRef(initManager)); + ON_CALL(server_.hot_restart_, version()).WillByDefault(Return("foo_version")); + + { + Http::TestResponseHeaderMapImpl response_headers; + std::string body; + + ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Initialized)); + EXPECT_EQ(Http::Code::OK, admin_.request("/server_info", "GET", response_headers, body)); + envoy::admin::v3::ServerInfo server_info_proto; + EXPECT_THAT(std::string(response_headers.getContentTypeValue()), HasSubstr("application/json")); + + // We only test that it parses as the proto and that some fields are correct, since + // values such as timestamps + Envoy version are tricky to test for. + TestUtility::loadFromJson(body, server_info_proto); + EXPECT_EQ(server_info_proto.state(), envoy::admin::v3::ServerInfo::LIVE); + EXPECT_EQ(server_info_proto.hot_restart_version(), "foo_version"); + EXPECT_EQ(server_info_proto.command_line_options().restart_epoch(), 2); + EXPECT_EQ(server_info_proto.command_line_options().service_cluster(), "cluster"); + } + + { + Http::TestResponseHeaderMapImpl response_headers; + std::string body; + + ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Uninitialized)); + EXPECT_EQ(Http::Code::OK, admin_.request("/server_info", "GET", response_headers, body)); + envoy::admin::v3::ServerInfo server_info_proto; + EXPECT_THAT(std::string(response_headers.getContentTypeValue()), HasSubstr("application/json")); + + // We only test that it parses as the proto and that some fields are correct, since + // values such as timestamps + Envoy version are tricky to test for. + TestUtility::loadFromJson(body, server_info_proto); + EXPECT_EQ(server_info_proto.state(), envoy::admin::v3::ServerInfo::PRE_INITIALIZING); + EXPECT_EQ(server_info_proto.command_line_options().restart_epoch(), 2); + EXPECT_EQ(server_info_proto.command_line_options().service_cluster(), "cluster"); + } + + Http::TestResponseHeaderMapImpl response_headers; + std::string body; + + ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Initializing)); + EXPECT_EQ(Http::Code::OK, admin_.request("/server_info", "GET", response_headers, body)); + envoy::admin::v3::ServerInfo server_info_proto; + EXPECT_THAT(std::string(response_headers.getContentTypeValue()), HasSubstr("application/json")); + + // We only test that it parses as the proto and that some fields are correct, since + // values such as timestamps + Envoy version are tricky to test for. + TestUtility::loadFromJson(body, server_info_proto); + EXPECT_EQ(server_info_proto.state(), envoy::admin::v3::ServerInfo::INITIALIZING); + EXPECT_EQ(server_info_proto.command_line_options().restart_epoch(), 2); + EXPECT_EQ(server_info_proto.command_line_options().service_cluster(), "cluster"); +} + +TEST_P(AdminInstanceTest, PostRequest) { + Http::TestResponseHeaderMapImpl response_headers; + std::string body; + EXPECT_NO_LOGS(EXPECT_EQ(Http::Code::OK, + admin_.request("/healthcheck/fail", "POST", response_headers, body))); + EXPECT_EQ(body, "OK\n"); + EXPECT_THAT(std::string(response_headers.getContentTypeValue()), HasSubstr("text/plain")); +} + +} // namespace Server +} // namespace Envoy diff --git a/test/server/http/stats_handler_test.cc b/test/server/admin/stats_handler_test.cc similarity index 50% rename from test/server/http/stats_handler_test.cc rename to test/server/admin/stats_handler_test.cc index 8c02e3846ffac..623438013b97e 100644 --- a/test/server/http/stats_handler_test.cc +++ b/test/server/admin/stats_handler_test.cc @@ -2,9 +2,9 @@ #include "common/stats/thread_local_store.h" -#include "server/http/stats_handler.h" +#include "server/admin/stats_handler.h" -#include "test/server/http/admin_instance.h" +#include "test/server/admin/admin_instance.h" #include "test/test_common/logging.h" #include "test/test_common/utility.h" @@ -39,7 +39,7 @@ class AdminStatsTest : public testing::TestWithParam tls_; Stats::AllocatorImpl alloc_; Stats::MockSink sink_; - std::unique_ptr store_; + Stats::ThreadLocalStoreImplPtr store_; }; INSTANTIATE_TEST_SUITE_P(IpVersions, AdminStatsTest, @@ -511,7 +511,7 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, AdminInstanceTest, TestUtility::ipTestParamsToString); TEST_P(AdminInstanceTest, StatsInvalidRegex) { - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; Buffer::OwnedImpl data; EXPECT_LOG_CONTAINS( "error", "Invalid regex: ", @@ -526,7 +526,7 @@ TEST_P(AdminInstanceTest, StatsInvalidRegex) { } TEST_P(AdminInstanceTest, PrometheusStatsInvalidRegex) { - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; Buffer::OwnedImpl data; EXPECT_LOG_CONTAINS( "error", ": *.ptest", @@ -549,464 +549,25 @@ TEST_P(AdminInstanceTest, TracingStatsDisabled) { } TEST_P(AdminInstanceTest, GetRequestJson) { - Http::ResponseHeaderMapImpl response_headers; + Http::TestResponseHeaderMapImpl response_headers; std::string body; EXPECT_EQ(Http::Code::OK, admin_.request("/stats?format=json", "GET", response_headers, body)); EXPECT_THAT(body, HasSubstr("{\"stats\":[")); - EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), - HasSubstr("application/json")); + EXPECT_THAT(std::string(response_headers.getContentTypeValue()), HasSubstr("application/json")); } TEST_P(AdminInstanceTest, RecentLookups) { - Http::ResponseHeaderMapImpl response_headers; + Http::TestResponseHeaderMapImpl response_headers; std::string body; // Recent lookup tracking is disabled by default. EXPECT_EQ(Http::Code::OK, admin_.request("/stats/recentlookups", "GET", response_headers, body)); EXPECT_THAT(body, HasSubstr("Lookup tracking is not enabled")); - EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), - HasSubstr("text/plain")); + EXPECT_THAT(std::string(response_headers.getContentTypeValue()), HasSubstr("text/plain")); // We can't test RecentLookups in admin unit tests as it doesn't work with a // fake symbol table. However we cover this solidly in integration tests. } -class HistogramWrapper { -public: - HistogramWrapper() : histogram_(hist_alloc()) {} - - ~HistogramWrapper() { hist_free(histogram_); } - - const histogram_t* getHistogram() { return histogram_; } - - void setHistogramValues(const std::vector& values) { - for (uint64_t value : values) { - hist_insert_intscale(histogram_, value, 0, 1); - } - } - - void setHistogramValuesWithCounts(const std::vector>& values) { - for (std::pair cv : values) { - hist_insert_intscale(histogram_, cv.first, 0, cv.second); - } - } - -private: - histogram_t* histogram_; -}; - -class PrometheusStatsFormatterTest : public testing::Test { -protected: - PrometheusStatsFormatterTest() - : symbol_table_(Stats::SymbolTableCreator::makeSymbolTable()), alloc_(*symbol_table_), - pool_(*symbol_table_) {} - - ~PrometheusStatsFormatterTest() override { clearStorage(); } - - void addCounter(const std::string& name, Stats::StatNameTagVector cluster_tags) { - Stats::StatNameManagedStorage storage(name, *symbol_table_); - Stats::StatName stat_name = storage.statName(); - counters_.push_back(alloc_.makeCounter(stat_name, stat_name, cluster_tags)); - } - - void addGauge(const std::string& name, Stats::StatNameTagVector cluster_tags) { - Stats::StatNameManagedStorage storage(name, *symbol_table_); - Stats::StatName stat_name = storage.statName(); - gauges_.push_back( - alloc_.makeGauge(stat_name, stat_name, cluster_tags, Stats::Gauge::ImportMode::Accumulate)); - } - - void addHistogram(const Stats::ParentHistogramSharedPtr histogram) { - histograms_.push_back(histogram); - } - - using MockHistogramSharedPtr = Stats::RefcountPtr>; - MockHistogramSharedPtr makeHistogram() { - return MockHistogramSharedPtr(new NiceMock()); - } - - Stats::StatName makeStat(absl::string_view name) { return pool_.add(name); } - - void clearStorage() { - pool_.clear(); - counters_.clear(); - gauges_.clear(); - histograms_.clear(); - EXPECT_EQ(0, symbol_table_->numSymbols()); - } - - Stats::SymbolTablePtr symbol_table_; - Stats::AllocatorImpl alloc_; - Stats::StatNamePool pool_; - std::vector counters_; - std::vector gauges_; - std::vector histograms_; -}; - -TEST_F(PrometheusStatsFormatterTest, MetricName) { - std::string raw = "vulture.eats-liver"; - std::string expected = "envoy_vulture_eats_liver"; - auto actual = PrometheusStatsFormatter::metricName(raw); - EXPECT_EQ(expected, actual); -} - -TEST_F(PrometheusStatsFormatterTest, SanitizeMetricName) { - std::string raw = "An.artist.plays-violin@019street"; - std::string expected = "envoy_An_artist_plays_violin_019street"; - auto actual = PrometheusStatsFormatter::metricName(raw); - EXPECT_EQ(expected, actual); -} - -TEST_F(PrometheusStatsFormatterTest, SanitizeMetricNameDigitFirst) { - std::string raw = "3.artists.play-violin@019street"; - std::string expected = "envoy_3_artists_play_violin_019street"; - auto actual = PrometheusStatsFormatter::metricName(raw); - EXPECT_EQ(expected, actual); -} - -TEST_F(PrometheusStatsFormatterTest, FormattedTags) { - std::vector tags; - Stats::Tag tag1 = {"a.tag-name", "a.tag-value"}; - Stats::Tag tag2 = {"another_tag_name", "another_tag-value"}; - tags.push_back(tag1); - tags.push_back(tag2); - std::string expected = "a_tag_name=\"a.tag-value\",another_tag_name=\"another_tag-value\""; - auto actual = PrometheusStatsFormatter::formattedTags(tags); - EXPECT_EQ(expected, actual); -} - -TEST_F(PrometheusStatsFormatterTest, MetricNameCollison) { - - // Create two counters and two gauges with each pair having the same name, - // but having different tag names and values. - //`statsAsPrometheus()` should return two implying it found two unique stat names - - addCounter("cluster.test_cluster_1.upstream_cx_total", - {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); - addCounter("cluster.test_cluster_1.upstream_cx_total", - {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); - addGauge("cluster.test_cluster_2.upstream_cx_total", - {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); - addGauge("cluster.test_cluster_2.upstream_cx_total", - {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); - - Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); - EXPECT_EQ(2UL, size); -} - -TEST_F(PrometheusStatsFormatterTest, UniqueMetricName) { - - // Create two counters and two gauges, all with unique names. - // statsAsPrometheus() should return four implying it found - // four unique stat names. - - addCounter("cluster.test_cluster_1.upstream_cx_total", - {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); - addCounter("cluster.test_cluster_2.upstream_cx_total", - {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); - addGauge("cluster.test_cluster_3.upstream_cx_total", - {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); - addGauge("cluster.test_cluster_4.upstream_cx_total", - {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); - - Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); - EXPECT_EQ(4UL, size); -} - -TEST_F(PrometheusStatsFormatterTest, HistogramWithNoValuesAndNoTags) { - HistogramWrapper h1_cumulative; - h1_cumulative.setHistogramValues(std::vector(0)); - Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); - - auto histogram = makeHistogram(); - histogram->name_ = "histogram1"; - histogram->used_ = true; - ON_CALL(*histogram, cumulativeStatistics()) - .WillByDefault(testing::ReturnRef(h1_cumulative_statistics)); - - addHistogram(histogram); - - Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); - EXPECT_EQ(1UL, size); - - const std::string expected_output = R"EOF(# TYPE envoy_histogram1 histogram -envoy_histogram1_bucket{le="0.5"} 0 -envoy_histogram1_bucket{le="1"} 0 -envoy_histogram1_bucket{le="5"} 0 -envoy_histogram1_bucket{le="10"} 0 -envoy_histogram1_bucket{le="25"} 0 -envoy_histogram1_bucket{le="50"} 0 -envoy_histogram1_bucket{le="100"} 0 -envoy_histogram1_bucket{le="250"} 0 -envoy_histogram1_bucket{le="500"} 0 -envoy_histogram1_bucket{le="1000"} 0 -envoy_histogram1_bucket{le="2500"} 0 -envoy_histogram1_bucket{le="5000"} 0 -envoy_histogram1_bucket{le="10000"} 0 -envoy_histogram1_bucket{le="30000"} 0 -envoy_histogram1_bucket{le="60000"} 0 -envoy_histogram1_bucket{le="300000"} 0 -envoy_histogram1_bucket{le="600000"} 0 -envoy_histogram1_bucket{le="1800000"} 0 -envoy_histogram1_bucket{le="3600000"} 0 -envoy_histogram1_bucket{le="+Inf"} 0 -envoy_histogram1_sum{} 0 -envoy_histogram1_count{} 0 -)EOF"; - - EXPECT_EQ(expected_output, response.toString()); -} - -TEST_F(PrometheusStatsFormatterTest, HistogramWithHighCounts) { - HistogramWrapper h1_cumulative; - - // Force large counts to prove that the +Inf bucket doesn't overflow to scientific notation. - h1_cumulative.setHistogramValuesWithCounts(std::vector>({ - {1, 100000}, - {100, 1000000}, - {1000, 100000000}, - })); - - Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); - - auto histogram = makeHistogram(); - histogram->name_ = "histogram1"; - histogram->used_ = true; - ON_CALL(*histogram, cumulativeStatistics()) - .WillByDefault(testing::ReturnRef(h1_cumulative_statistics)); - - addHistogram(histogram); - - Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); - EXPECT_EQ(1UL, size); - - const std::string expected_output = R"EOF(# TYPE envoy_histogram1 histogram -envoy_histogram1_bucket{le="0.5"} 0 -envoy_histogram1_bucket{le="1"} 0 -envoy_histogram1_bucket{le="5"} 100000 -envoy_histogram1_bucket{le="10"} 100000 -envoy_histogram1_bucket{le="25"} 100000 -envoy_histogram1_bucket{le="50"} 100000 -envoy_histogram1_bucket{le="100"} 100000 -envoy_histogram1_bucket{le="250"} 1100000 -envoy_histogram1_bucket{le="500"} 1100000 -envoy_histogram1_bucket{le="1000"} 1100000 -envoy_histogram1_bucket{le="2500"} 101100000 -envoy_histogram1_bucket{le="5000"} 101100000 -envoy_histogram1_bucket{le="10000"} 101100000 -envoy_histogram1_bucket{le="30000"} 101100000 -envoy_histogram1_bucket{le="60000"} 101100000 -envoy_histogram1_bucket{le="300000"} 101100000 -envoy_histogram1_bucket{le="600000"} 101100000 -envoy_histogram1_bucket{le="1800000"} 101100000 -envoy_histogram1_bucket{le="3600000"} 101100000 -envoy_histogram1_bucket{le="+Inf"} 101100000 -envoy_histogram1_sum{} 105105105000 -envoy_histogram1_count{} 101100000 -)EOF"; - - EXPECT_EQ(expected_output, response.toString()); -} - -TEST_F(PrometheusStatsFormatterTest, OutputWithAllMetricTypes) { - addCounter("cluster.test_1.upstream_cx_total", - {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); - addCounter("cluster.test_2.upstream_cx_total", - {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); - addGauge("cluster.test_3.upstream_cx_total", - {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); - addGauge("cluster.test_4.upstream_cx_total", - {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); - - const std::vector h1_values = {50, 20, 30, 70, 100, 5000, 200}; - HistogramWrapper h1_cumulative; - h1_cumulative.setHistogramValues(h1_values); - Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); - - auto histogram1 = makeHistogram(); - histogram1->name_ = "cluster.test_1.upstream_rq_time"; - histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; - histogram1->used_ = true; - histogram1->setTags({Stats::Tag{"key1", "value1"}, Stats::Tag{"key2", "value2"}}); - addHistogram(histogram1); - EXPECT_CALL(*histogram1, cumulativeStatistics()) - .WillOnce(testing::ReturnRef(h1_cumulative_statistics)); - - Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); - EXPECT_EQ(5UL, size); - - const std::string expected_output = R"EOF(# TYPE envoy_cluster_test_1_upstream_cx_total counter -envoy_cluster_test_1_upstream_cx_total{a_tag_name="a.tag-value"} 0 -# TYPE envoy_cluster_test_2_upstream_cx_total counter -envoy_cluster_test_2_upstream_cx_total{another_tag_name="another_tag-value"} 0 -# TYPE envoy_cluster_test_3_upstream_cx_total gauge -envoy_cluster_test_3_upstream_cx_total{another_tag_name_3="another_tag_3-value"} 0 -# TYPE envoy_cluster_test_4_upstream_cx_total gauge -envoy_cluster_test_4_upstream_cx_total{another_tag_name_4="another_tag_4-value"} 0 -# TYPE envoy_cluster_test_1_upstream_rq_time histogram -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="0.5"} 0 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1"} 0 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="5"} 0 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="10"} 0 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="25"} 1 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="50"} 2 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="100"} 4 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="250"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="500"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1000"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="2500"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="5000"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="10000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="30000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="60000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="300000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="600000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1800000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="3600000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="+Inf"} 7 -envoy_cluster_test_1_upstream_rq_time_sum{key1="value1",key2="value2"} 5532 -envoy_cluster_test_1_upstream_rq_time_count{key1="value1",key2="value2"} 7 -)EOF"; - - EXPECT_EQ(expected_output, response.toString()); -} - -TEST_F(PrometheusStatsFormatterTest, OutputWithUsedOnly) { - addCounter("cluster.test_1.upstream_cx_total", - {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); - addCounter("cluster.test_2.upstream_cx_total", - {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); - addGauge("cluster.test_3.upstream_cx_total", - {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); - addGauge("cluster.test_4.upstream_cx_total", - {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); - - const std::vector h1_values = {50, 20, 30, 70, 100, 5000, 200}; - HistogramWrapper h1_cumulative; - h1_cumulative.setHistogramValues(h1_values); - Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); - - auto histogram1 = makeHistogram(); - histogram1->name_ = "cluster.test_1.upstream_rq_time"; - histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; - histogram1->used_ = true; - histogram1->setTags({Stats::Tag{"key1", "value1"}, Stats::Tag{"key2", "value2"}}); - addHistogram(histogram1); - EXPECT_CALL(*histogram1, cumulativeStatistics()) - .WillOnce(testing::ReturnRef(h1_cumulative_statistics)); - - Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - true, absl::nullopt); - EXPECT_EQ(1UL, size); - - const std::string expected_output = R"EOF(# TYPE envoy_cluster_test_1_upstream_rq_time histogram -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="0.5"} 0 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1"} 0 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="5"} 0 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="10"} 0 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="25"} 1 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="50"} 2 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="100"} 4 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="250"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="500"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1000"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="2500"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="5000"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="10000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="30000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="60000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="300000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="600000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1800000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="3600000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="+Inf"} 7 -envoy_cluster_test_1_upstream_rq_time_sum{key1="value1",key2="value2"} 5532 -envoy_cluster_test_1_upstream_rq_time_count{key1="value1",key2="value2"} 7 -)EOF"; - - EXPECT_EQ(expected_output, response.toString()); -} - -TEST_F(PrometheusStatsFormatterTest, OutputWithUsedOnlyHistogram) { - const std::vector h1_values = {}; - HistogramWrapper h1_cumulative; - h1_cumulative.setHistogramValues(h1_values); - Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); - - auto histogram1 = makeHistogram(); - histogram1->name_ = "cluster.test_1.upstream_rq_time"; - histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; - histogram1->used_ = false; - histogram1->setTags({Stats::Tag{"key1", "value1"}, Stats::Tag{"key2", "value2"}}); - addHistogram(histogram1); - - { - const bool used_only = true; - EXPECT_CALL(*histogram1, cumulativeStatistics()).Times(0); - - Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, - response, used_only, absl::nullopt); - EXPECT_EQ(0UL, size); - } - - { - const bool used_only = false; - EXPECT_CALL(*histogram1, cumulativeStatistics()) - .WillOnce(testing::ReturnRef(h1_cumulative_statistics)); - - Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, - response, used_only, absl::nullopt); - EXPECT_EQ(1UL, size); - } -} - -TEST_F(PrometheusStatsFormatterTest, OutputWithRegexp) { - addCounter("cluster.test_1.upstream_cx_total", - {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); - addCounter("cluster.test_2.upstream_cx_total", - {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); - addGauge("cluster.test_3.upstream_cx_total", - {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); - addGauge("cluster.test_4.upstream_cx_total", - {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); - - const std::vector h1_values = {50, 20, 30, 70, 100, 5000, 200}; - HistogramWrapper h1_cumulative; - h1_cumulative.setHistogramValues(h1_values); - Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); - - auto histogram1 = makeHistogram(); - histogram1->name_ = "cluster.test_1.upstream_rq_time"; - histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; - histogram1->setTags({Stats::Tag{"key1", "value1"}, Stats::Tag{"key2", "value2"}}); - addHistogram(histogram1); - - Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus( - counters_, gauges_, histograms_, response, false, - absl::optional{std::regex("cluster.test_1.upstream_cx_total")}); - EXPECT_EQ(1UL, size); - - const std::string expected_output = - R"EOF(# TYPE envoy_cluster_test_1_upstream_cx_total counter -envoy_cluster_test_1_upstream_cx_total{a_tag_name="a.tag-value"} 0 -)EOF"; - - EXPECT_EQ(expected_output, response.toString()); -} - } // namespace Server } // namespace Envoy diff --git a/test/server/api_listener_test.cc b/test/server/api_listener_test.cc index f229823c59c32..ff9fa0d02fd0b 100644 --- a/test/server/api_listener_test.cc +++ b/test/server/api_listener_test.cc @@ -6,7 +6,10 @@ #include "server/listener_manager_impl.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" +#include "test/mocks/server/listener_component_factory.h" +#include "test/mocks/server/worker.h" +#include "test/mocks/server/worker_factory.h" #include "test/server/utility.h" #include "test/test_common/utility.h" @@ -52,7 +55,7 @@ name: test_api_listener cluster: dynamic_forward_proxy_cluster )EOF"; - const envoy::config::listener::v3::Listener config = parseListenerFromV2Yaml(yaml); + const envoy::config::listener::v3::Listener config = parseListenerFromV3Yaml(yaml); auto http_api_listener = HttpApiListener(config, *listener_manager_, config.name()); @@ -78,7 +81,7 @@ name: test_api_listener path: eds path )EOF"; - const envoy::config::listener::v3::Listener config = parseListenerFromV2Yaml(yaml); + const envoy::config::listener::v3::Listener config = parseListenerFromV3Yaml(yaml); EXPECT_THROW_WITH_MESSAGE( HttpApiListener(config, *listener_manager_, config.name()), EnvoyException, @@ -112,7 +115,7 @@ name: test_api_listener cluster: dynamic_forward_proxy_cluster )EOF"; - const envoy::config::listener::v3::Listener config = parseListenerFromV2Yaml(yaml); + const envoy::config::listener::v3::Listener config = parseListenerFromV3Yaml(yaml); auto http_api_listener = HttpApiListener(config, *listener_manager_, config.name()); diff --git a/test/server/config_validation/BUILD b/test/server/config_validation/BUILD index f785888ac01cb..3a710f8ff42e2 100644 --- a/test/server/config_validation/BUILD +++ b/test/server/config_validation/BUILD @@ -1,9 +1,9 @@ -licenses(["notice"]) # Apache 2 - -load("//bazel:envoy_build_system.bzl", "envoy_cc_fuzz_test", "envoy_cc_test", "envoy_package") +load("//bazel:envoy_build_system.bzl", "envoy_cc_fuzz_test", "envoy_cc_test", "envoy_cc_test_library", "envoy_package", "envoy_proto_library") load("//source/extensions:all_extensions.bzl", "envoy_all_extensions") load("//bazel:repositories.bzl", "PPC_SKIP_TARGETS", "WINDOWS_SKIP_TARGETS") +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( @@ -27,6 +27,7 @@ envoy_cc_test( "//include/envoy/upstream:resource_manager_interface", "//include/envoy/upstream:upstream_interface", "//source/common/api:api_lib", + "//source/common/singleton:manager_impl_lib", "//source/common/stats:stats_lib", "//source/extensions/transport_sockets/tls:context_lib", "//source/server/config_validation:cluster_manager_lib", @@ -39,7 +40,7 @@ envoy_cc_test( "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/runtime:runtime_mocks", "//test/mocks/secret:secret_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:admin_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:simulated_time_system_lib", @@ -48,10 +49,16 @@ envoy_cc_test( ], ) +filegroup( + name = "server_test_data", + srcs = glob(["test_data/**"]), +) + envoy_cc_test( name = "server_test", srcs = ["server_test.cc"], data = [ + ":server_test_data", "//configs:example_configs", "//test/config_test:example_configs_test_setup.sh", ], @@ -61,9 +68,10 @@ envoy_cc_test( "//source/extensions/transport_sockets/tls:config", "//source/server/config_validation:server_lib", "//test/integration:integration_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:options_mocks", "//test/mocks/stats:stats_mocks", "//test/test_common:environment_lib", + "//test/test_common:registry_lib", "//test/test_common:utility_lib", ], ) @@ -89,9 +97,8 @@ envoy_cc_fuzz_test( deps = [ "//source/common/common:thread_lib", "//source/server/config_validation:server_lib", - "//source/server:proto_descriptors_lib", "//test/integration:integration_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:options_mocks", "//test/test_common:environment_lib", ] + select({ "//bazel:windows_x86_64": envoy_all_extensions(WINDOWS_SKIP_TARGETS), @@ -99,3 +106,62 @@ envoy_cc_fuzz_test( "//conditions:default": envoy_all_extensions(), }), ) + +envoy_proto_library( + name = "xds_fuzz_proto", + srcs = ["xds_fuzz.proto"], +) + +envoy_cc_test_library( + name = "xds_verifier_lib", + srcs = ["xds_verifier.cc"], + hdrs = ["xds_verifier.h"], + deps = [ + ":xds_fuzz_proto_cc_proto", + "//source/common/common:assert_lib", + "//source/common/common:minimal_logger_lib", + "@envoy_api//envoy/admin/v3:pkg_cc_proto", + "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", + "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", + "@envoy_api//envoy/config/filter/network/http_connection_manager/v2:pkg_cc_proto", + "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", + "@envoy_api//envoy/config/route/v3:pkg_cc_proto", + ], +) + +envoy_cc_test( + name = "xds_verifier_test", + srcs = ["xds_verifier_test.cc"], + deps = [ + ":xds_verifier_lib", + "//test/config:utility_lib", + ], +) + +envoy_cc_test_library( + name = "xds_fuzz_lib", + srcs = ["xds_fuzz.cc"], + hdrs = ["xds_fuzz.h"], + deps = [ + ":xds_fuzz_proto_cc_proto", + ":xds_verifier_lib", + "//test/integration:http_integration_lib", + "@envoy_api//envoy/admin/v3:pkg_cc_proto", + "@envoy_api//envoy/api/v2:pkg_cc_proto", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", + "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", + "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", + "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", + "@envoy_api//envoy/config/route/v3:pkg_cc_proto", + ], +) + +envoy_cc_fuzz_test( + name = "xds_fuzz_test", + srcs = ["xds_fuzz_test.cc"], + corpus = "xds_corpus", + deps = [ + ":xds_fuzz_lib", + "//source/common/protobuf:utility_lib", + ], +) diff --git a/test/server/config_validation/cluster_manager_test.cc b/test/server/config_validation/cluster_manager_test.cc index cd6cfdbb9b562..55f7ccd987381 100644 --- a/test/server/config_validation/cluster_manager_test.cc +++ b/test/server/config_validation/cluster_manager_test.cc @@ -3,6 +3,7 @@ #include "envoy/upstream/upstream.h" #include "common/api/api_impl.h" +#include "common/grpc/context_impl.h" #include "common/http/context_impl.h" #include "common/singleton/manager_impl.h" @@ -18,7 +19,7 @@ #include "test/mocks/protobuf/mocks.h" #include "test/mocks/runtime/mocks.h" #include "test/mocks/secret/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/admin.h" #include "test/mocks/thread_local/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/simulated_time_system.h" @@ -35,7 +36,7 @@ TEST(ValidationClusterManagerTest, MockedMethods) { Api::ApiPtr api(Api::createApiForTest(stats_store, time_system)); NiceMock runtime; NiceMock tls; - NiceMock random; + NiceMock random; testing::NiceMock secret_manager; auto dns_resolver = std::make_shared>(); Extensions::TransportSockets::Tls::ContextManagerImpl ssl_context_manager{api->timeSource()}; diff --git a/test/server/config_validation/config_fuzz_test.cc b/test/server/config_validation/config_fuzz_test.cc index bd40a453634a9..107bb2eeb4157 100644 --- a/test/server/config_validation/config_fuzz_test.cc +++ b/test/server/config_validation/config_fuzz_test.cc @@ -6,11 +6,10 @@ #include "common/network/address_impl.h" #include "server/config_validation/server.h" -#include "server/proto_descriptors.h" #include "test/fuzz/fuzz_runner.h" #include "test/integration/server.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/options.h" #include "test/test_common/environment.h" namespace Envoy { diff --git a/test/server/config_validation/server_test.cc b/test/server/config_validation/server_test.cc index 98def03898513..c1e6de23ec488 100644 --- a/test/server/config_validation/server_test.cc +++ b/test/server/config_validation/server_test.cc @@ -1,11 +1,15 @@ #include +#include "envoy/server/filter_config.h" + #include "server/config_validation/server.h" #include "test/integration/server.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/options.h" #include "test/mocks/stats/mocks.h" #include "test/test_common/environment.h" +#include "test/test_common/registry.h" +#include "test/test_common/test_time.h" namespace Envoy { namespace Server { @@ -14,14 +18,14 @@ namespace { // Test param is the path to the config file to validate. class ValidationServerTest : public testing::TestWithParam { public: - static void SetupTestDirectory() { + static void setupTestDirectory() { TestEnvironment::exec( {TestEnvironment::runfilesPath("test/config_test/example_configs_test_setup.sh")}); directory_ = TestEnvironment::temporaryDirectory() + "/test/config_test/"; } static void SetUpTestSuite() { // NOLINT(readability-identifier-naming) - SetupTestDirectory(); + setupTestDirectory(); } protected: @@ -38,8 +42,34 @@ std::string ValidationServerTest::directory_ = ""; // tests than set of tests for ValidationServerTest. class ValidationServerTest_1 : public ValidationServerTest { public: - static const std::vector GetAllConfigFiles() { - SetupTestDirectory(); + static const std::vector getAllConfigFiles() { + setupTestDirectory(); + + auto files = TestUtility::listFiles(ValidationServerTest::directory_, false); + + // Strip directory part. options_ adds it for each test. + for (auto& file : files) { + file = file.substr(directory_.length() + 1); + } + return files; + } +}; + +// RuntimeFeatureValidationServerTest is used to test validation with non-default runtime +// values. +class RuntimeFeatureValidationServerTest : public ValidationServerTest { +public: + static void SetUpTestSuite() { // NOLINT(readability-identifier-naming) + setupTestDirectory(); + } + + static void setupTestDirectory() { + directory_ = + TestEnvironment::runfilesDirectory("envoy/test/server/config_validation/test_data/"); + } + + static const std::vector getAllConfigFiles() { + setupTestDirectory(); auto files = TestUtility::listFiles(ValidationServerTest::directory_, false); @@ -49,6 +79,33 @@ class ValidationServerTest_1 : public ValidationServerTest { } return files; } + + class TestConfigFactory : public Configuration::NamedNetworkFilterConfigFactory { + public: + std::string name() const override { return "envoy.filters.network.test"; } + + Network::FilterFactoryCb createFilterFactoryFromProto(const Protobuf::Message&, + Configuration::FactoryContext&) override { + // Validate that the validation server loaded the runtime data and installed the singleton. + auto* runtime = Runtime::LoaderSingleton::getExisting(); + if (runtime == nullptr) { + throw EnvoyException("Runtime::LoaderSingleton == nullptr"); + } + + if (!runtime->threadsafeSnapshot()->getBoolean("test.runtime.loaded", false)) { + throw EnvoyException( + "Found Runtime::LoaderSingleton, got wrong value for test.runtime.loaded"); + } + + return [](Network::FilterManager&) {}; + } + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return ProtobufTypes::MessagePtr{new ProtobufWkt::Struct()}; + } + + bool isTerminalFilter() override { return true; } + }; }; TEST_P(ValidationServerTest, Validate) { @@ -74,12 +131,16 @@ TEST_P(ValidationServerTest, NoopLifecycleNotifier) { // TODO(rlazarus): We'd like use this setup to replace //test/config_test (that is, run it against // all the example configs) but can't until light validation is implemented, mocking out access to // the filesystem for TLS certs, etc. In the meantime, these are the example configs that work -// as-is. -INSTANTIATE_TEST_SUITE_P(ValidConfigs, ValidationServerTest, - ::testing::Values("front-proxy_front-envoy.yaml", - "google_com_proxy.v2.yaml", - "grpc-bridge_server_envoy-proxy.yaml", - "front-proxy_service-envoy.yaml")); +// as-is. (Note, /dev/stdout as an access log file is invalid on Windows, no equivalent /dev/ +// exists.) + +auto testing_values = ::testing::Values("front-proxy_front-envoy.yaml", "google_com_proxy.v2.yaml", +#ifndef WIN32 + "grpc-bridge_server_envoy-proxy.yaml", +#endif + "front-proxy_service-envoy.yaml"); + +INSTANTIATE_TEST_SUITE_P(ValidConfigs, ValidationServerTest, testing_values); // Just make sure that all configs can be ingested without a crash. Processing of config files // may not be successful, but there should be no crash. @@ -91,7 +152,23 @@ TEST_P(ValidationServerTest_1, RunWithoutCrash) { } INSTANTIATE_TEST_SUITE_P(AllConfigs, ValidationServerTest_1, - ::testing::ValuesIn(ValidationServerTest_1::GetAllConfigFiles())); + ::testing::ValuesIn(ValidationServerTest_1::getAllConfigFiles())); + +TEST_P(RuntimeFeatureValidationServerTest, ValidRuntimeLoaderSingleton) { + TestConfigFactory factory; + Registry::InjectFactory registration(factory); + + auto local_address = Network::Utility::getLocalAddress(options_.localAddressIpVersion()); + + // If this fails, it's likely because TestConfigFactory threw an exception related to the + // runtime loader. + ASSERT_TRUE(validateConfig(options_, local_address, component_factory_, + Thread::threadFactoryForTest(), Filesystem::fileSystemForTest())); +} + +INSTANTIATE_TEST_SUITE_P( + AllConfigs, RuntimeFeatureValidationServerTest, + ::testing::ValuesIn(RuntimeFeatureValidationServerTest::getAllConfigFiles())); } // namespace } // namespace Server diff --git a/test/server/config_validation/test_data/runtime_config.yaml b/test/server/config_validation/test_data/runtime_config.yaml new file mode 100644 index 0000000000000..e4b29bc158be6 --- /dev/null +++ b/test/server/config_validation/test_data/runtime_config.yaml @@ -0,0 +1,27 @@ +--- +node: + id: "test" +layered_runtime: + layers: + - name: static-layer + static_layer: + "test.runtime.loaded": true +static_resources: + listeners: + - name: "test.listener" + address: + socket_address: + protocol: TCP + address: 0.0.0.0 + port_value: 0 + filter_chains: + - filters: + - name: envoy.filters.network.test + typed_config: + "@type": type.googleapis.com/google.protobuf.Struct +admin: + access_log_path: "/dev/null" + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 diff --git a/test/server/config_validation/xds_corpus/clusterfuzz-testcase-minimized-xds_fuzz_test-6524356210196480 b/test/server/config_validation/xds_corpus/clusterfuzz-testcase-minimized-xds_fuzz_test-6524356210196480 new file mode 100644 index 0000000000000..df27fe2695e75 --- /dev/null +++ b/test/server/config_validation/xds_corpus/clusterfuzz-testcase-minimized-xds_fuzz_test-6524356210196480 @@ -0,0 +1,54 @@ +actions { + add_listener { + listener_num: 256 + route_num: 6356993 + } +} +actions { + add_listener { + route_num: 16 + } +} +actions { + remove_listener { + } +} +actions { + add_route { + route_num: 1 + } +} +actions { + add_listener { + route_num: 11264 + } +} +actions { + add_listener { + listener_num: 2147483648 + route_num: 2147483648 + } +} +actions { + add_listener { + listener_num: 6356993 + route_num: 11264 + } +} +actions { + add_listener { + listener_num: 256 + route_num: 65537 + } +} +actions { + add_route { + route_num: 1 + } +} +actions { + add_listener { + listener_num: 2147483648 + route_num: 2 + } +} diff --git a/test/server/config_validation/xds_corpus/example0 b/test/server/config_validation/xds_corpus/example0 new file mode 100644 index 0000000000000..5f95c4c9cc563 --- /dev/null +++ b/test/server/config_validation/xds_corpus/example0 @@ -0,0 +1,31 @@ +actions { + add_listener { + listener_num : 0 + route_num : 0 + } +} +actions { + add_route { + route_num : 0 + } +} +actions { + add_listener { + listener_num : 1 + route_num : 1 + } +} +actions { + add_route { + route_num : 1 + } +} +actions { + add_listener { + listener_num : 2 + route_num : 2 + } +} +config { + sotw_or_delta : SOTW +} diff --git a/test/server/config_validation/xds_corpus/example1 b/test/server/config_validation/xds_corpus/example1 new file mode 100644 index 0000000000000..be2117ef6f6cc --- /dev/null +++ b/test/server/config_validation/xds_corpus/example1 @@ -0,0 +1,8 @@ +actions { + remove_listener { + listener_num: 1 + } +} +config { + sotw_or_delta: SOTW +} diff --git a/test/server/config_validation/xds_corpus/example10 b/test/server/config_validation/xds_corpus/example10 new file mode 100644 index 0000000000000..6c1736bb4b9bb --- /dev/null +++ b/test/server/config_validation/xds_corpus/example10 @@ -0,0 +1,22 @@ +actions { + add_listener { + route_num: 100728832 + } +} +actions { + add_route { + } +} +actions { + add_listener { + listener_num: 1 + route_num: 1 + } +} +actions { + add_route { + route_num: 1 + } +} +config { +} diff --git a/test/server/config_validation/xds_corpus/example13 b/test/server/config_validation/xds_corpus/example13 new file mode 100644 index 0000000000000..62b21361326b6 --- /dev/null +++ b/test/server/config_validation/xds_corpus/example13 @@ -0,0 +1,57 @@ +actions { + add_listener { + listener_num: 2 + route_num: 3 + } +} +actions { + add_route { + route_num: 3 + } +} +actions { + add_route { + route_num: 3 + } +} +actions { + add_route { + route_num: 1 + } +} +actions { + add_listener { + listener_num: 0 + route_num: 1 + } +} +actions { + add_route { + route_num: 0 + } +} +actions { + remove_listener { + listener_num: 1 + } +} +actions { + add_listener { + listener_num: 2 + route_num: 3 + } +} +actions { + add_route { + route_num: 1 + } +} +actions { + add_listener { + listener_num: 2 + route_num: 3 + } +} +config { + sotw_or_delta: SOTW +} diff --git a/test/server/config_validation/xds_corpus/example2 b/test/server/config_validation/xds_corpus/example2 new file mode 100644 index 0000000000000..31c3023cacab6 --- /dev/null +++ b/test/server/config_validation/xds_corpus/example2 @@ -0,0 +1,25 @@ +actions { + add_listener { + listener_num: 1 + route_num: 1 + } +} +actions { + add_route { + route_num: 1 + } +} +actions { + add_route { + route_num: 1 + } +} +actions { + add_listener { + listener_num: 2 + route_num: 2 + } +} +config { + sotw_or_delta : DELTA +} diff --git a/test/server/config_validation/xds_corpus/example3 b/test/server/config_validation/xds_corpus/example3 new file mode 100644 index 0000000000000..e64420e81a2bd --- /dev/null +++ b/test/server/config_validation/xds_corpus/example3 @@ -0,0 +1,19 @@ +actions { + add_route { + route_num : 0 + } +} +actions { + add_listener { + listener_num : 0 + route_num : 0 + } +} +actions { + remove_listener { + listener_num : 0 + } +} +config { + sotw_or_delta : SOTW +} diff --git a/test/server/config_validation/xds_corpus/example4 b/test/server/config_validation/xds_corpus/example4 new file mode 100644 index 0000000000000..3a7bb0203cdb4 --- /dev/null +++ b/test/server/config_validation/xds_corpus/example4 @@ -0,0 +1,26 @@ +actions { + add_listener { + listener_num: 1 + route_num: 2 + } +} +actions { + add_listener { + listener_num: 1 + route_num: 1 + } +} +actions { + add_route { + route_num: 1 + } +} +actions { + add_listener { + listener_num: 1 + route_num: 2 + } +} +config { + sotw_or_delta: DELTA +} diff --git a/test/server/config_validation/xds_corpus/example5 b/test/server/config_validation/xds_corpus/example5 new file mode 100644 index 0000000000000..692d470f8c877 --- /dev/null +++ b/test/server/config_validation/xds_corpus/example5 @@ -0,0 +1,33 @@ +actions { + add_route { + route_num: 4261412864 + } +} +actions { + remove_listener { + listener_num: 7012368 + } +} +actions { + add_route { + route_num: 4261412864 + } +} +actions { + remove_listener { + listener_num: 7012388 + } +} +actions { + add_route { + route_num: 7012388 + } +} +actions { + remove_listener { + listener_num: 7012352 + } +} +config { + sotw_or_delta: DELTA +} diff --git a/test/server/config_validation/xds_corpus/example6 b/test/server/config_validation/xds_corpus/example6 new file mode 100644 index 0000000000000..ee8c9cd55c8b5 --- /dev/null +++ b/test/server/config_validation/xds_corpus/example6 @@ -0,0 +1,24 @@ +actions { + add_listener { + listener_num: 1 + route_num: 1 + } +} +actions { + add_route { + route_num: 1 + } +} +actions { + add_route { + route_num: 1 + } +} +actions { + remove_listener { + listener_num: 1 + } +} +config { + sotw_or_delta: DELTA +} diff --git a/test/server/config_validation/xds_corpus/example7 b/test/server/config_validation/xds_corpus/example7 new file mode 100644 index 0000000000000..b54642b3bca2c --- /dev/null +++ b/test/server/config_validation/xds_corpus/example7 @@ -0,0 +1,11 @@ +actions { + add_route { + route_num: 1 + } +} +actions { + add_listener { + } +} +config { +} diff --git a/test/server/config_validation/xds_corpus/example8 b/test/server/config_validation/xds_corpus/example8 new file mode 100644 index 0000000000000..604b7e8dd491b --- /dev/null +++ b/test/server/config_validation/xds_corpus/example8 @@ -0,0 +1,15 @@ +actions { + add_route { + route_num: 1 + } +} +actions { + add_listener { + } +} +actions { + remove_listener { + } +} +config { +} diff --git a/test/server/config_validation/xds_corpus/example9 b/test/server/config_validation/xds_corpus/example9 new file mode 100644 index 0000000000000..7dc3edc7ac626 --- /dev/null +++ b/test/server/config_validation/xds_corpus/example9 @@ -0,0 +1,21 @@ +actions { + add_listener { + } +} +actions { + add_route { + route_num: 1 + } +} +actions { + add_listener { + listener_num: 1 + route_num: 1 + } +} +actions { + add_route { + } +} +config { +} diff --git a/test/server/config_validation/xds_fuzz.cc b/test/server/config_validation/xds_fuzz.cc new file mode 100644 index 0000000000000..9c1647c155cfb --- /dev/null +++ b/test/server/config_validation/xds_fuzz.cc @@ -0,0 +1,407 @@ +#include "test/server/config_validation/xds_fuzz.h" + +#include "envoy/api/v2/route.pb.h" +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" +#include "envoy/config/cluster/v3/cluster.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/config/listener/v3/listener.pb.h" +#include "envoy/config/route/v3/route.pb.h" + +namespace Envoy { + +// helper functions to build API responses +envoy::config::cluster::v3::Cluster XdsFuzzTest::buildCluster(const std::string& name) { + return ConfigHelper::buildCluster(name, "ROUND_ROBIN", api_version_); +}; + +envoy::config::endpoint::v3::ClusterLoadAssignment +XdsFuzzTest::buildClusterLoadAssignment(const std::string& name) { + return ConfigHelper::buildClusterLoadAssignment( + name, Network::Test::getLoopbackAddressString(ip_version_), + fake_upstreams_[0]->localAddress()->ip()->port(), api_version_); +} + +envoy::config::listener::v3::Listener XdsFuzzTest::buildListener(const std::string& listener_name, + const std::string& route_name) { + return ConfigHelper::buildListener(listener_name, route_name, + Network::Test::getLoopbackAddressString(ip_version_), + "ads_test", api_version_); +} + +envoy::config::route::v3::RouteConfiguration +XdsFuzzTest::buildRouteConfig(const std::string& route_name) { + return ConfigHelper::buildRouteConfig(route_name, "cluster_0", api_version_); +} + +// helper functions to send API responses +void XdsFuzzTest::updateListener( + const std::vector& listeners, + const std::vector& added_or_updated, + const std::vector& removed) { + ENVOY_LOG_MISC(debug, "Sending Listener DiscoveryResponse version {}", version_); + sendDiscoveryResponse(Config::TypeUrl::get().Listener, + listeners, added_or_updated, removed, + std::to_string(version_)); +} + +void XdsFuzzTest::updateRoute( + const std::vector& routes, + const std::vector& added_or_updated, + const std::vector& removed) { + ENVOY_LOG_MISC(debug, "Sending Route DiscoveryResponse version {}", version_); + sendDiscoveryResponse( + Config::TypeUrl::get().RouteConfiguration, routes, added_or_updated, removed, + std::to_string(version_)); +} + +XdsFuzzTest::XdsFuzzTest(const test::server::config_validation::XdsTestCase& input, + envoy::config::core::v3::ApiVersion api_version) + : HttpIntegrationTest( + Http::CodecClient::Type::HTTP2, TestEnvironment::getIpVersionsForTest()[0], + ConfigHelper::adsBootstrap(input.config().sotw_or_delta() == + test::server::config_validation::Config::SOTW + ? "GRPC" + : "DELTA_GRPC", + api_version)), + verifier_(input.config().sotw_or_delta()), actions_(input.actions()), version_(1), + api_version_(api_version), ip_version_(TestEnvironment::getIpVersionsForTest()[0]) { + use_lds_ = false; + create_xds_upstream_ = true; + tls_xds_upstream_ = false; + + // avoid listeners draining during the test + drain_time_ = std::chrono::seconds(60); + + if (input.config().sotw_or_delta() == test::server::config_validation::Config::SOTW) { + sotw_or_delta_ = Grpc::SotwOrDelta::Sotw; + } else { + sotw_or_delta_ = Grpc::SotwOrDelta::Delta; + } +} + +/** + * initialize an envoy configured with a fully dynamic bootstrap with ADS over gRPC + */ +void XdsFuzzTest::initialize() { + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* ads_config = bootstrap.mutable_dynamic_resources()->mutable_ads_config(); + auto* grpc_service = ads_config->add_grpc_services(); + + std::string cluster_name = "ads_cluster"; + grpc_service->mutable_envoy_grpc()->set_cluster_name(cluster_name); + auto* ads_cluster = bootstrap.mutable_static_resources()->add_clusters(); + ads_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); + ads_cluster->set_name("ads_cluster"); + }); + setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); + HttpIntegrationTest::initialize(); + if (xds_stream_ == nullptr) { + createXdsConnection(); + AssertionResult result = xds_connection_->waitForNewStream(*dispatcher_, xds_stream_); + RELEASE_ASSERT(result, result.message()); + xds_stream_->startGrpcStream(); + } +} + +void XdsFuzzTest::close() { + cleanUpXdsConnection(); + test_server_.reset(); + fake_upstreams_.clear(); +} + +/** + * @return true iff listener_name is in listeners_ (and removes it from the vector) + */ +bool XdsFuzzTest::eraseListener(const std::string& listener_name) { + const auto orig_size = listeners_.size(); + listeners_.erase(std::remove_if(listeners_.begin(), listeners_.end(), + [&](auto& listener) { return listener.name() == listener_name; }), + listeners_.end()); + return orig_size != listeners_.size(); +} + +/** + * @return true iff route_name has already been added to routes_ + */ +bool XdsFuzzTest::hasRoute(const std::string& route_name) { + return std::any_of(routes_.begin(), routes_.end(), + [&](auto& route) { return route.name() == route_name; }); +} + +/** + * send an xDS response to add a listener and update state accordingly + */ +void XdsFuzzTest::addListener(const std::string& listener_name, const std::string& route_name) { + ENVOY_LOG_MISC(debug, "Adding {} with reference to {}", listener_name, route_name); + lds_update_success_++; + bool removed = eraseListener(listener_name); + auto listener = buildListener(listener_name, route_name); + listeners_.push_back(listener); + + updateListener(listeners_, {listener}, {}); + + // use waitForAck instead of compareDiscoveryRequest as the client makes additional + // discoveryRequests at launch that we might not want to respond to yet + EXPECT_TRUE(waitForAck(Config::TypeUrl::get().Listener, std::to_string(version_))); + if (removed) { + verifier_.listenerUpdated(listener); + } else { + verifier_.listenerAdded(listener); + } +} + +/** + * send an xDS response to remove a listener and update state accordingly + */ +void XdsFuzzTest::removeListener(const std::string& listener_name) { + ENVOY_LOG_MISC(debug, "Removing {}", listener_name); + bool removed = eraseListener(listener_name); + + if (removed) { + lds_update_success_++; + updateListener(listeners_, {}, {listener_name}); + EXPECT_TRUE(waitForAck(Config::TypeUrl::get().Listener, std::to_string(version_))); + verifier_.listenerRemoved(listener_name); + } +} + +/** + * send an xDS response to add a route and update state accordingly + */ +void XdsFuzzTest::addRoute(const std::string& route_name) { + ENVOY_LOG_MISC(debug, "Adding {}", route_name); + auto route = buildRouteConfig(route_name); + + if (!hasRoute(route_name)) { + routes_.push_back(route); + } + + updateRoute(routes_, {route}, {}); + verifier_.routeAdded(route); + + EXPECT_TRUE(waitForAck(Config::TypeUrl::get().RouteConfiguration, std::to_string(version_))); +} + +/** + * wait for a specific ACK, ignoring any other ACKs that are made in the meantime + * @param the expected API type url of the ack + * @param the expected version number + * @return AssertionSuccess() if the ack was received, else an AssertionError() + */ +AssertionResult XdsFuzzTest::waitForAck(const std::string& expected_type_url, + const std::string& expected_version) { + if (sotw_or_delta_ == Grpc::SotwOrDelta::Sotw) { + API_NO_BOOST(envoy::api::v2::DiscoveryRequest) discovery_request; + do { + VERIFY_ASSERTION(xds_stream_->waitForGrpcMessage(*dispatcher_, discovery_request)); + ENVOY_LOG_MISC(debug, "Received gRPC message with type {} and version {}", + discovery_request.type_url(), discovery_request.version_info()); + } while (expected_type_url != discovery_request.type_url() || + expected_version != discovery_request.version_info()); + } else { + API_NO_BOOST(envoy::api::v2::DeltaDiscoveryRequest) delta_discovery_request; + do { + VERIFY_ASSERTION(xds_stream_->waitForGrpcMessage(*dispatcher_, delta_discovery_request)); + ENVOY_LOG_MISC(debug, "Received gRPC message with type {}", + delta_discovery_request.type_url()); + } while (expected_type_url != delta_discovery_request.type_url()); + } + version_++; + return AssertionSuccess(); +} + +/** + * run the sequence of actions defined in the fuzzed protobuf + */ +void XdsFuzzTest::replay() { + initialize(); + + // set up cluster + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "", {}, {}, {}, true)); + sendDiscoveryResponse(Config::TypeUrl::get().Cluster, + {buildCluster("cluster_0")}, + {buildCluster("cluster_0")}, {}, "0"); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "", + {"cluster_0"}, {"cluster_0"}, {})); + sendDiscoveryResponse( + Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment("cluster_0")}, + {buildClusterLoadAssignment("cluster_0")}, {}, "0"); + + // the client will not subscribe to the RouteConfiguration type URL until it receives a listener, + // and the ACKS it sends back seem to be an empty type URL so just don't check them until a + // listener is added + bool sent_listener = false; + + for (const auto& action : actions_) { + switch (action.action_selector_case()) { + case test::server::config_validation::Action::kAddListener: { + std::string listener_name = + absl::StrCat("listener_", action.add_listener().listener_num() % ListenersMax); + std::string route_name = + absl::StrCat("route_config_", action.add_listener().route_num() % RoutesMax); + addListener(listener_name, route_name); + if (!sent_listener) { + addRoute(route_name); + test_server_->waitForCounterEq("listener_manager.listener_create_success", 1, timeout_); + } + sent_listener = true; + break; + } + case test::server::config_validation::Action::kRemoveListener: { + std::string listener_name = + absl::StrCat("listener_", action.remove_listener().listener_num() % ListenersMax); + removeListener(listener_name); + break; + } + case test::server::config_validation::Action::kAddRoute: { + if (!sent_listener) { + ENVOY_LOG_MISC(debug, "Ignoring request to add route_{}", + action.add_route().route_num() % RoutesMax); + break; + } + std::string route_name = + absl::StrCat("route_config_", action.add_route().route_num() % RoutesMax); + addRoute(route_name); + break; + } + default: + break; + } + if (sent_listener) { + // wait for all of the updates to take effect + test_server_->waitForGaugeEq("listener_manager.total_listeners_warming", + verifier_.numWarming(), timeout_); + test_server_->waitForGaugeEq("listener_manager.total_listeners_active", verifier_.numActive(), + timeout_); + test_server_->waitForGaugeEq("listener_manager.total_listeners_draining", + verifier_.numDraining(), timeout_); + test_server_->waitForCounterEq("listener_manager.listener_modified", verifier_.numModified(), + timeout_); + test_server_->waitForCounterEq("listener_manager.listener_added", verifier_.numAdded(), + timeout_); + test_server_->waitForCounterEq("listener_manager.listener_removed", verifier_.numRemoved(), + timeout_); + test_server_->waitForCounterEq("listener_manager.lds.update_success", lds_update_success_, + timeout_); + } + ENVOY_LOG_MISC(debug, "warming {} ({}), active {} ({}), draining {} ({})", + verifier_.numWarming(), + test_server_->gauge("listener_manager.total_listeners_warming")->value(), + verifier_.numActive(), + test_server_->gauge("listener_manager.total_listeners_active")->value(), + verifier_.numDraining(), + test_server_->gauge("listener_manager.total_listeners_draining")->value()); + ENVOY_LOG_MISC(debug, "added {} ({}), modified {} ({}), removed {} ({})", verifier_.numAdded(), + test_server_->counter("listener_manager.listener_added")->value(), + verifier_.numModified(), + test_server_->counter("listener_manager.listener_modified")->value(), + verifier_.numRemoved(), + test_server_->counter("listener_manager.listener_removed")->value()); + } + + verifyState(); + close(); +} + +/** + * verify that each listener in the verifier has a matching listener in the config dump + */ +void XdsFuzzTest::verifyListeners() { + ENVOY_LOG_MISC(debug, "Verifying listeners"); + const auto& abstract_rep = verifier_.listeners(); + const auto dump = getListenersConfigDump().dynamic_listeners(); + + for (const auto& rep : abstract_rep) { + ENVOY_LOG_MISC(debug, "Verifying {} with state {}", rep.listener.name(), rep.state); + + auto listener_dump = std::find_if(dump.begin(), dump.end(), [&](auto& listener) { + return listener.name() == rep.listener.name(); + }); + + // there should be a listener of the same name in the dump + if (listener_dump == dump.end()) { + throw EnvoyException(fmt::format("Expected to find {} in config dump", rep.listener.name())); + } + + ENVOY_LOG_MISC(debug, "warm {}, active {}, drain: {}", listener_dump->has_warming_state(), + listener_dump->has_active_state(), listener_dump->has_draining_state()); + // the state should match + switch (rep.state) { + case XdsVerifier::DRAINING: + EXPECT_TRUE(listener_dump->has_draining_state()); + break; + case XdsVerifier::WARMING: + EXPECT_TRUE(listener_dump->has_warming_state()); + break; + case XdsVerifier::ACTIVE: + EXPECT_TRUE(listener_dump->has_active_state()); + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + } +} + +void XdsFuzzTest::verifyRoutes() { + auto dump = getRoutesConfigDump(); + + // go through routes in verifier and make sure each is in the config dump + auto routes = verifier_.routes(); + EXPECT_EQ(routes.size(), dump.size()); + for (const auto& route : routes) { + EXPECT_TRUE(std::any_of(dump.begin(), dump.end(), [&](const auto& dump_route) { + return route.first == dump_route.name(); + })); + } +} + +void XdsFuzzTest::verifyState() { + verifyListeners(); + ENVOY_LOG_MISC(debug, "Verified listeners"); + verifyRoutes(); + ENVOY_LOG_MISC(debug, "Verified routes"); + + EXPECT_EQ(test_server_->gauge("listener_manager.total_listeners_draining")->value(), + verifier_.numDraining()); + EXPECT_EQ(test_server_->gauge("listener_manager.total_listeners_warming")->value(), + verifier_.numWarming()); + EXPECT_EQ(test_server_->gauge("listener_manager.total_listeners_active")->value(), + verifier_.numActive()); + ENVOY_LOG_MISC(debug, "Verified stats"); + ENVOY_LOG_MISC(debug, "warming {} ({}), active {} ({}), draining {} ({})", verifier_.numWarming(), + test_server_->gauge("listener_manager.total_listeners_warming")->value(), + verifier_.numActive(), + test_server_->gauge("listener_manager.total_listeners_active")->value(), + verifier_.numDraining(), + test_server_->gauge("listener_manager.total_listeners_draining")->value()); +} + +envoy::admin::v3::ListenersConfigDump XdsFuzzTest::getListenersConfigDump() { + auto message_ptr = + test_server_->server().admin().getConfigTracker().getCallbacksMap().at("listeners")(); + return dynamic_cast(*message_ptr); +} + +std::vector XdsFuzzTest::getRoutesConfigDump() { + auto map = test_server_->server().admin().getConfigTracker().getCallbacksMap(); + + // there is no route config dump before envoy has a route + if (map.find("routes") == map.end()) { + return {}; + } + + auto message_ptr = map.at("routes")(); + auto dump = dynamic_cast(*message_ptr); + + // since the route config dump gives the RouteConfigurations as an Any, go through and cast them + // back to RouteConfigurations + std::vector dump_routes; + for (const auto& route : dump.dynamic_route_configs()) { + envoy::api::v2::RouteConfiguration dyn_route; + route.route_config().UnpackTo(&dyn_route); + dump_routes.push_back(dyn_route); + } + return dump_routes; +} + +} // namespace Envoy diff --git a/test/server/config_validation/xds_fuzz.h b/test/server/config_validation/xds_fuzz.h new file mode 100644 index 0000000000000..826175bf0241c --- /dev/null +++ b/test/server/config_validation/xds_fuzz.h @@ -0,0 +1,85 @@ +#pragma once + +#include +#include + +#include "envoy/admin/v3/config_dump.pb.h" +#include "envoy/config/cluster/v3/cluster.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/config/listener/v3/listener.pb.h" +#include "envoy/config/route/v3/route.pb.h" + +#include "test/common/grpc/grpc_client_integration.h" +#include "test/config/utility.h" +#include "test/integration/http_integration.h" +#include "test/server/config_validation/xds_fuzz.pb.h" +#include "test/server/config_validation/xds_verifier.h" + +#include "absl/types/optional.h" + +namespace Envoy { + +class XdsFuzzTest : public HttpIntegrationTest { +public: + XdsFuzzTest(const test::server::config_validation::XdsTestCase& input, + envoy::config::core::v3::ApiVersion api_version); + + envoy::config::cluster::v3::Cluster buildCluster(const std::string& name); + + envoy::config::endpoint::v3::ClusterLoadAssignment + buildClusterLoadAssignment(const std::string& name); + + envoy::config::listener::v3::Listener buildListener(const std::string& listener_name, + const std::string& route_name); + + envoy::config::route::v3::RouteConfiguration buildRouteConfig(const std::string& route_name); + + void updateListener(const std::vector& listeners, + const std::vector& added_or_updated, + const std::vector& removed); + + void + updateRoute(const std::vector& routes, + const std::vector& added_or_updated, + const std::vector& removed); + + void initialize() override; + void replay(); + void close(); + + const size_t ListenersMax = 3; + const size_t RoutesMax = 5; + +private: + void addListener(const std::string& listener_name, const std::string& route_name); + void removeListener(const std::string& listener_name); + void addRoute(const std::string& route_name); + + void verifyState(); + void verifyListeners(); + void verifyRoutes(); + + envoy::admin::v3::ListenersConfigDump getListenersConfigDump(); + std::vector getRoutesConfigDump(); + + bool eraseListener(const std::string& listener_name); + bool hasRoute(const std::string& route_name); + AssertionResult waitForAck(const std::string& expected_type_url, + const std::string& expected_version); + + XdsVerifier verifier_; + + Protobuf::RepeatedPtrField actions_; + std::vector routes_; + std::vector listeners_; + + uint64_t version_; + envoy::config::core::v3::ApiVersion api_version_; + + Network::Address::IpVersion ip_version_; + + std::chrono::seconds timeout_{5}; + uint64_t lds_update_success_{0}; +}; + +} // namespace Envoy diff --git a/test/server/config_validation/xds_fuzz.proto b/test/server/config_validation/xds_fuzz.proto new file mode 100644 index 0000000000000..5a672282f946a --- /dev/null +++ b/test/server/config_validation/xds_fuzz.proto @@ -0,0 +1,47 @@ +syntax = "proto3"; + +package test.server.config_validation; + +import "validate/validate.proto"; + +message AddListener { + // generates a new listener listener_x with number listener_num, which can later be removed by + // RemoveListener + // if listener_x had already been added, it will update listener_x's route_config + uint32 listener_num = 1; + // listener_x references route_y, which has number route_num + uint32 route_num = 2; +} + +message AddRoute { + // generates a new route route_y with number route_num which can later be removed by a RemoveRoute + uint32 route_num = 1; +} + +message RemoveListener { + // removes listener_x + uint32 listener_num = 1; +} + +message Action { + oneof action_selector { + option (validate.required) = true; + + AddListener add_listener = 1; + AddRoute add_route = 2; + RemoveListener remove_listener = 3; + } +} + +message Config { + enum SotwOrDelta { + SOTW = 0; + DELTA = 1; + } + SotwOrDelta sotw_or_delta = 1; +} + +message XdsTestCase { + repeated Action actions = 1; + Config config = 2; +} diff --git a/test/server/config_validation/xds_fuzz_test.cc b/test/server/config_validation/xds_fuzz_test.cc new file mode 100644 index 0000000000000..87f3e59690235 --- /dev/null +++ b/test/server/config_validation/xds_fuzz_test.cc @@ -0,0 +1,21 @@ +/* #include "common/protobuf/utility.h" */ + +#include "test/fuzz/fuzz_runner.h" +#include "test/server/config_validation/xds_fuzz.h" +#include "test/server/config_validation/xds_fuzz.pb.validate.h" + +namespace Envoy { + +DEFINE_PROTO_FUZZER(const test::server::config_validation::XdsTestCase& input) { + RELEASE_ASSERT(!TestEnvironment::getIpVersionsForTest().empty(), ""); + try { + TestUtility::validate(input); + } catch (const ProtoValidationException& e) { + ENVOY_LOG_MISC(debug, "ProtoValidationException: {}", e.what()); + return; + } + XdsFuzzTest test(input, envoy::config::core::v3::ApiVersion::V2); + test.replay(); +} + +} // namespace Envoy diff --git a/test/server/config_validation/xds_verifier.cc b/test/server/config_validation/xds_verifier.cc new file mode 100644 index 0000000000000..2501911a75d66 --- /dev/null +++ b/test/server/config_validation/xds_verifier.cc @@ -0,0 +1,329 @@ +#include "test/server/config_validation/xds_verifier.h" + +#include "common/common/logger.h" + +namespace Envoy { + +XdsVerifier::XdsVerifier(test::server::config_validation::Config::SotwOrDelta sotw_or_delta) + : num_warming_(0), num_active_(0), num_draining_(0), num_added_(0), num_modified_(0), + num_removed_(0) { + if (sotw_or_delta == test::server::config_validation::Config::SOTW) { + sotw_or_delta_ = SOTW; + } else { + sotw_or_delta_ = DELTA; + } + ENVOY_LOG_MISC(debug, "sotw_or_delta_ = {}", sotw_or_delta_); +} + +/** + * get the route referenced by a listener + */ +std::string XdsVerifier::getRoute(const envoy::config::listener::v3::Listener& listener) { + envoy::config::listener::v3::Filter filter0 = listener.filter_chains()[0].filters()[0]; + envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager conn_man; + filter0.typed_config().UnpackTo(&conn_man); + return conn_man.rds().route_config_name(); +} + +/** + * @return true iff the route listener refers to is in all_routes_ + */ +bool XdsVerifier::hasRoute(const envoy::config::listener::v3::Listener& listener) { + return hasRoute(getRoute(listener)); +} + +bool XdsVerifier::hasRoute(const std::string& name) { return all_routes_.contains(name); } + +bool XdsVerifier::hasActiveRoute(const envoy::config::listener::v3::Listener& listener) { + return hasActiveRoute(getRoute(listener)); +} + +bool XdsVerifier::hasActiveRoute(const std::string& name) { return active_routes_.contains(name); } + +bool XdsVerifier::hasListener(const std::string& name, ListenerState state) { + return std::any_of(listeners_.begin(), listeners_.end(), [&](const auto& rep) { + return rep.listener.name() == name && state == rep.state; + }); +} + +/** + * prints the currently stored listeners and their states + */ +void XdsVerifier::dumpState() { + ENVOY_LOG_MISC(debug, "Listener Dump:"); + for (const auto& rep : listeners_) { + ENVOY_LOG_MISC(debug, "Name: {}, Route {}, State: {}", rep.listener.name(), + getRoute(rep.listener), rep.state); + } +} + +/* + * if a listener is added for the first time, it will be added as active/warming depending on if + * envoy knows about its route config + * + * if a listener is updated (i.e. there is a already a listener by this name), there are 3 cases: + * 1. the old listener is active and the new is warming: + * - old will remain active + * - new will be added as warming, to replace the old when it gets its route + * 2. the old listener is active and new is active: + * - old is drained (seemingly instantaneously) + * - new is added as active + * 3. the old listener is warming and new is active/warming: + * - old is completely removed + * - new is added as warming/active as normal + */ + +/** + * update a listener when its route is changed, draining/removing the old listener and adding the + * updated listener + */ +void XdsVerifier::listenerUpdated(const envoy::config::listener::v3::Listener& listener) { + ENVOY_LOG_MISC(debug, "About to update listener {} to {}", listener.name(), getRoute(listener)); + dumpState(); + + if (std::any_of(listeners_.begin(), listeners_.end(), [&](auto& rep) { + return rep.listener.name() == listener.name() && + getRoute(listener) == getRoute(rep.listener) && rep.state != DRAINING; + })) { + ENVOY_LOG_MISC(debug, "Ignoring duplicate add of {}", listener.name()); + return; + } + + bool found = false; + for (auto it = listeners_.begin(); it != listeners_.end();) { + const auto& rep = *it; + ENVOY_LOG_MISC(debug, "checking {} for update", rep.listener.name()); + if (rep.listener.name() == listener.name()) { + // if we're updating a warming/active listener, num_modified_ must be incremented + if (rep.state != DRAINING && !found) { + num_modified_++; + found = true; + } + + if (rep.state == ACTIVE) { + if (hasActiveRoute(listener)) { + // if the new listener is ready to take traffic, the old listener will be removed + // it seems to be directly removed without being added to the config dump as draining + ENVOY_LOG_MISC(debug, "Removing {} after update", listener.name()); + num_active_--; + it = listeners_.erase(it); + continue; + } else { + // if the new listener has not gotten its route yet, the old listener will remain active + // until that happens + ENVOY_LOG_MISC(debug, "Keeping {} as ACTIVE", listener.name()); + } + } else if (rep.state == WARMING) { + // if the old listener is warming, it will be removed and replaced with the new + ENVOY_LOG_MISC(debug, "Removed warming listener {}", listener.name()); + num_warming_--; + it = listeners_.erase(it); + // don't increment it + continue; + } + } + ++it; + } + dumpState(); + listenerAdded(listener, true); +} + +/** + * add a new listener to listeners_ in either an active or warming state + * @param listener the listener to be added + * @param from_update whether this function was called from listenerUpdated, in which case + * num_added_ should not be incremented + */ +void XdsVerifier::listenerAdded(const envoy::config::listener::v3::Listener& listener, + bool from_update) { + if (!from_update) { + num_added_++; + } + + if (hasActiveRoute(listener)) { + ENVOY_LOG_MISC(debug, "Adding {} to listeners_ as ACTIVE", listener.name()); + listeners_.push_back({listener, ACTIVE}); + num_active_++; + } else { + num_warming_++; + ENVOY_LOG_MISC(debug, "Adding {} to listeners_ as WARMING", listener.name()); + listeners_.push_back({listener, WARMING}); + } + + ENVOY_LOG_MISC(debug, "listenerAdded({})", listener.name()); + dumpState(); +} + +/** + * remove a listener and drain it if it was active + * @param name the name of the listener to be removed + */ +void XdsVerifier::listenerRemoved(const std::string& name) { + bool found = false; + + for (auto it = listeners_.begin(); it != listeners_.end();) { + auto& rep = *it; + if (rep.listener.name() == name) { + if (rep.state == ACTIVE) { + // the listener will be drained before being removed + ENVOY_LOG_MISC(debug, "Changing {} to DRAINING", name); + found = true; + num_active_--; + num_draining_++; + rep.state = DRAINING; + } else if (rep.state == WARMING) { + // the listener will be removed immediately + ENVOY_LOG_MISC(debug, "Removed warming listener {}", name); + found = true; + num_warming_--; + it = listeners_.erase(it); + // don't increment it + continue; + } + } + ++it; + } + + if (found) { + num_removed_++; + } +} + +/** + * after a SOTW update, see if any listeners that are currently warming can become active + */ +void XdsVerifier::updateSotwListeners() { + ASSERT(sotw_or_delta_ == SOTW); + for (auto& rep : listeners_) { + // check all_routes_, not active_routes_ since this is SOTW, so any inactive routes will become + // active if this listener refers to them + if (hasRoute(rep.listener) && rep.state == WARMING) { + // it should successfully warm now + ENVOY_LOG_MISC(debug, "Moving {} to ACTIVE state", rep.listener.name()); + + // if the route was not originally added as active, change it now + if (!hasActiveRoute(rep.listener)) { + std::string route_name = getRoute(rep.listener); + auto it = all_routes_.find(route_name); + // all added routes should be in all_routes_ in SOTW + ASSERT(it != all_routes_.end()); + active_routes_.insert({route_name, it->second}); + } + + // if there were any active listeners that were waiting to be updated, they will now be + // removed and the warming listener will take their place + markForRemoval(rep); + num_warming_--; + num_active_++; + rep.state = ACTIVE; + } + } + listeners_.erase(std::remove_if(listeners_.begin(), listeners_.end(), + [&](auto& listener) { return listener.state == REMOVED; }), + listeners_.end()); +} + +/** + * after a delta update, update any listeners that refer to the added route + */ +void XdsVerifier::updateDeltaListeners(const envoy::config::route::v3::RouteConfiguration& route) { + for (auto& rep : listeners_) { + if (getRoute(rep.listener) == route.name() && rep.state == WARMING) { + // it should successfully warm now + ENVOY_LOG_MISC(debug, "Moving {} to ACTIVE state", rep.listener.name()); + + // if there were any active listeners that were waiting to be updated, they will now be + // removed and the warming listener will take their place + markForRemoval(rep); + num_warming_--; + num_active_++; + rep.state = ACTIVE; + } + } + // erase any active listeners that were replaced + listeners_.erase(std::remove_if(listeners_.begin(), listeners_.end(), + [&](auto& listener) { return listener.state == REMOVED; }), + listeners_.end()); +} + +/** + * @param listener a warming listener that has a corresponding active listener of the same name + * called after listener receives its route, so it will be moved to active and the old listener will + * be removed + */ +void XdsVerifier::markForRemoval(ListenerRepresentation& rep) { + ASSERT(rep.state == WARMING); + // find the old listener and mark it for removal + for (auto& old_rep : listeners_) { + if (old_rep.listener.name() == rep.listener.name() && + getRoute(old_rep.listener) != getRoute(rep.listener) && old_rep.state == ACTIVE) { + // mark it as removed to remove it after the loop so as not to invalidate the iterator in + // the caller function + old_rep.state = REMOVED; + num_active_--; + } + } +} + +/** + * called when a route that was previously added is re-added + * the original route might have been ignored if no resources refer to it, so we can add it here + */ +void XdsVerifier::routeUpdated(const envoy::config::route::v3::RouteConfiguration& route) { + if (!all_routes_.contains(route.name()) && + std::any_of(listeners_.begin(), listeners_.end(), + [&](auto& rep) { return getRoute(rep.listener) == route.name(); })) { + all_routes_.insert({route.name(), route}); + active_routes_.insert({route.name(), route}); + } + + ENVOY_LOG_MISC(debug, "Updating {}", route.name()); + if (sotw_or_delta_ == DELTA) { + updateDeltaListeners(route); + } else { + updateSotwListeners(); + } +} + +/** + * add a new route and update any listeners that refer to this route + */ +void XdsVerifier::routeAdded(const envoy::config::route::v3::RouteConfiguration& route) { + // routes that are not referenced by any resource are ignored, so this creates a distinction + // between SOTW and delta + // if an unreferenced route is sent in delta, it is ignored forever as it will not be sent in + // future RDS updates, whereas in SOTW it will be present in all future RDS updates, so if a + // listener that refers to it is added in the meantime, it will become active + if (!hasRoute(route.name())) { + all_routes_.insert({route.name(), route}); + } + + if (sotw_or_delta_ == DELTA && std::any_of(listeners_.begin(), listeners_.end(), [&](auto& rep) { + return getRoute(rep.listener) == route.name(); + })) { + if (!hasActiveRoute(route.name())) { + active_routes_.insert({route.name(), route}); + updateDeltaListeners(route); + } + updateDeltaListeners(route); + } else if (sotw_or_delta_ == SOTW) { + updateSotwListeners(); + } +} + +/** + * called after draining a listener, will remove it from listeners_ + */ +void XdsVerifier::drainedListener(const std::string& name) { + for (auto it = listeners_.begin(); it != listeners_.end(); ++it) { + if (it->listener.name() == name && it->state == DRAINING) { + ENVOY_LOG_MISC(debug, "Drained and removed {}", name); + num_draining_--; + listeners_.erase(it); + return; + } + } + throw EnvoyException(fmt::format("Tried to drain {} which is not draining", name)); +} + +} // namespace Envoy diff --git a/test/server/config_validation/xds_verifier.h b/test/server/config_validation/xds_verifier.h new file mode 100644 index 0000000000000..ffd7ff38231be --- /dev/null +++ b/test/server/config_validation/xds_verifier.h @@ -0,0 +1,85 @@ +#include "envoy/admin/v3/config_dump.pb.h" +#include "envoy/common/exception.h" +#include "envoy/config/cluster/v3/cluster.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.pb.h" +#include "envoy/config/listener/v3/listener.pb.h" +#include "envoy/config/route/v3/route.pb.h" + +#include "common/common/assert.h" + +#include "test/server/config_validation/xds_fuzz.pb.h" + +#include "absl/container/flat_hash_map.h" + +namespace Envoy { + +class XdsVerifier { +public: + XdsVerifier(test::server::config_validation::Config::SotwOrDelta sotw_or_delta); + void listenerAdded(const envoy::config::listener::v3::Listener& listener, + bool from_update = false); + void listenerUpdated(const envoy::config::listener::v3::Listener& listener); + void listenerRemoved(const std::string& name); + void drainedListener(const std::string& name); + + void routeAdded(const envoy::config::route::v3::RouteConfiguration& route); + void routeUpdated(const envoy::config::route::v3::RouteConfiguration& route); + + enum ListenerState { WARMING, ACTIVE, DRAINING, REMOVED }; + struct ListenerRepresentation { + envoy::config::listener::v3::Listener listener; + ListenerState state; + }; + + const std::vector& listeners() const { return listeners_; } + + const absl::flat_hash_map& + routes() const { + return active_routes_; + }; + + uint32_t numWarming() const { return num_warming_; } + uint32_t numActive() const { return num_active_; } + uint32_t numDraining() const { return num_draining_; } + + uint32_t numAdded() const { return num_added_; } + uint32_t numModified() const { return num_modified_; } + uint32_t numRemoved() const { return num_removed_; } + + void dumpState(); + + bool hasListener(const std::string& name, ListenerState state); + bool hasRoute(const envoy::config::listener::v3::Listener& listener); + bool hasRoute(const std::string& name); + bool hasActiveRoute(const envoy::config::listener::v3::Listener& listener); + bool hasActiveRoute(const std::string& name); + +private: + enum SotwOrDelta { SOTW, DELTA }; + + std::string getRoute(const envoy::config::listener::v3::Listener& listener); + void updateSotwListeners(); + void updateDeltaListeners(const envoy::config::route::v3::RouteConfiguration& route); + void markForRemoval(ListenerRepresentation& rep); + std::vector listeners_; + + // envoy ignores routes that are not referenced by any resources + // all_routes_ is used for SOTW, as every previous route is sent in each request + // active_routes_ holds the routes that envoy knows about, i.e. the routes that are/were + // referenced by a listener + absl::flat_hash_map all_routes_; + absl::flat_hash_map active_routes_; + + uint32_t num_warming_; + uint32_t num_active_; + uint32_t num_draining_; + + uint32_t num_added_; + uint32_t num_modified_; + uint32_t num_removed_; + + SotwOrDelta sotw_or_delta_; +}; + +} // namespace Envoy diff --git a/test/server/config_validation/xds_verifier_test.cc b/test/server/config_validation/xds_verifier_test.cc new file mode 100644 index 0000000000000..72ca229d05bae --- /dev/null +++ b/test/server/config_validation/xds_verifier_test.cc @@ -0,0 +1,227 @@ +#include "test/config/utility.h" +#include "test/server/config_validation/xds_verifier.h" + +#include "gtest/gtest.h" + +namespace Envoy { + +envoy::config::listener::v3::Listener buildListener(const std::string& listener_name, + const std::string& route_name) { + return ConfigHelper::buildListener(listener_name, route_name, "", "ads_test", + envoy::config::core::v3::ApiVersion::V3); +} + +envoy::config::route::v3::RouteConfiguration buildRoute(const std::string& route_name) { + return ConfigHelper::buildRouteConfig(route_name, "cluster_0", + envoy::config::core::v3::ApiVersion::V3); +} + +// add, warm, drain and remove a listener +TEST(XdsVerifier, Basic) { + XdsVerifier verifier(test::server::config_validation::Config::SOTW); + verifier.listenerAdded(buildListener("listener_0", "route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.WARMING)); + EXPECT_EQ(verifier.numAdded(), 1); + EXPECT_EQ(verifier.numWarming(), 1); + + verifier.routeAdded(buildRoute("route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.ACTIVE)); + EXPECT_TRUE(verifier.hasRoute("route_config_0") && verifier.hasActiveRoute("route_config_0")); + EXPECT_EQ(verifier.numAdded(), 1); + EXPECT_EQ(verifier.numWarming(), 0); + EXPECT_EQ(verifier.numActive(), 1); + + verifier.listenerRemoved("listener_0"); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.DRAINING)); + EXPECT_EQ(verifier.numDraining(), 1); + EXPECT_EQ(verifier.numRemoved(), 1); + EXPECT_EQ(verifier.numActive(), 0); + + verifier.drainedListener("listener_0"); + EXPECT_FALSE(verifier.hasListener("listener_0", verifier.DRAINING)); + EXPECT_EQ(verifier.numRemoved(), 1); +} + +TEST(XdsVerifier, RouteBeforeListenerSOTW) { + XdsVerifier verifier(test::server::config_validation::Config::SOTW); + // send a route first, so envoy will not accept it + verifier.routeAdded(buildRoute("route_config_0")); + EXPECT_TRUE(verifier.hasRoute("route_config_0")); + EXPECT_FALSE(verifier.hasActiveRoute("route_config_0")); + + // envoy still doesn't know about the route, so this will warm + verifier.listenerAdded(buildListener("listener_0", "route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.WARMING)); + EXPECT_EQ(verifier.numAdded(), 1); + EXPECT_EQ(verifier.numWarming(), 1); + + // send a new route, which will include route_config_0 since SOTW, so route_config_0 will become + // active + verifier.routeAdded(buildRoute("route_config_1")); + EXPECT_TRUE(verifier.hasRoute("route_config_1")); + EXPECT_FALSE(verifier.hasActiveRoute("route_config_1")); + EXPECT_TRUE(verifier.hasActiveRoute("route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.ACTIVE)); + EXPECT_EQ(verifier.numActive(), 1); +} + +TEST(XdsVerifier, RouteBeforeListenerDelta) { + XdsVerifier verifier(test::server::config_validation::Config::DELTA); + // send a route first, so envoy will not accept it + verifier.routeAdded(buildRoute("route_config_0")); + EXPECT_FALSE(verifier.hasActiveRoute("route_config_0")); + + // envoy still doesn't know about the route, so this will warm + verifier.listenerAdded(buildListener("listener_0", "route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.WARMING)); + EXPECT_EQ(verifier.numAdded(), 1); + EXPECT_EQ(verifier.numWarming(), 1); + + // send a new route, which will not include route_config_0 since SOTW, so route_config_0 will not + // become active + verifier.routeAdded(buildRoute("route_config_1")); + EXPECT_FALSE(verifier.hasActiveRoute("route_config_1")); + EXPECT_FALSE(verifier.hasActiveRoute("route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.WARMING)); + EXPECT_EQ(verifier.numWarming(), 1); +} + +TEST(XdsVerifier, UpdateWarmingListener) { + XdsVerifier verifier(test::server::config_validation::Config::SOTW); + verifier.listenerAdded(buildListener("listener_0", "route_config_0")); + verifier.listenerUpdated(buildListener("listener_0", "route_config_1")); + // the new listener should directly replace the old listener since it's warming + EXPECT_EQ(verifier.numModified(), 1); + EXPECT_EQ(verifier.numAdded(), 1); + + // send the route for the old listener, which should have been replaced with the update + verifier.routeAdded(buildRoute("route_config_0")); + EXPECT_FALSE(verifier.hasListener("listener_0", verifier.ACTIVE)); + + // now the new should become active + verifier.routeAdded(buildRoute("route_config_1")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.ACTIVE)); +} + +TEST(XdsVerifier, UpdateActiveListener) { + XdsVerifier verifier(test::server::config_validation::Config::SOTW); + + // add an active listener + verifier.listenerAdded(buildListener("listener_0", "route_config_0")); + verifier.routeAdded(buildRoute("route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.ACTIVE)); + + // send an update, which should keep the old listener active until the new warms + verifier.listenerUpdated(buildListener("listener_0", "route_config_1")); + EXPECT_EQ(verifier.numModified(), 1); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.ACTIVE)); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.WARMING)); + + // warm the new listener, which should remove the old + verifier.routeAdded(buildRoute("route_config_1")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.ACTIVE)); + EXPECT_FALSE(verifier.hasListener("listener_0", verifier.DRAINING)); + EXPECT_FALSE(verifier.hasListener("listener_0", verifier.WARMING)); + + EXPECT_EQ(verifier.numActive(), 1); +} + +TEST(XdsVerifier, UpdateActiveToActive) { + XdsVerifier verifier(test::server::config_validation::Config::SOTW); + + // add two active listeners to different routes + verifier.listenerAdded(buildListener("listener_0", "route_config_0")); + verifier.routeAdded(buildRoute("route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.ACTIVE)); + + // add an active listener + verifier.listenerAdded(buildListener("listener_1", "route_config_1")); + verifier.routeAdded(buildRoute("route_config_1")); + EXPECT_TRUE(verifier.hasListener("listener_1", verifier.ACTIVE)); + EXPECT_EQ(verifier.numAdded(), 2); + + // send an update, which should make the new listener active straight away and remove the old + // since its route is already active + verifier.listenerUpdated(buildListener("listener_0", "route_config_1")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.ACTIVE)); + EXPECT_FALSE(verifier.hasListener("listener_0", verifier.WARMING)); + EXPECT_EQ(verifier.numActive(), 2); +} + +TEST(XdsVerifier, WarmMultipleListenersSOTW) { + XdsVerifier verifier(test::server::config_validation::Config::SOTW); + + // add two warming listener to the same route + verifier.listenerAdded(buildListener("listener_0", "route_config_0")); + verifier.listenerAdded(buildListener("listener_1", "route_config_0")); + + // send the route, make sure both are active + verifier.routeAdded(buildRoute("route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.ACTIVE)); + EXPECT_TRUE(verifier.hasListener("listener_1", verifier.ACTIVE)); + EXPECT_EQ(verifier.numActive(), 2); +} + +TEST(XdsVerifier, WarmMultipleListenersDelta) { + XdsVerifier verifier(test::server::config_validation::Config::DELTA); + + // add two warming listener to the same route + verifier.listenerAdded(buildListener("listener_0", "route_config_0")); + verifier.listenerAdded(buildListener("listener_1", "route_config_0")); + + // send the route, make sure both are active + verifier.routeAdded(buildRoute("route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.ACTIVE)); + EXPECT_TRUE(verifier.hasListener("listener_1", verifier.ACTIVE)); + EXPECT_EQ(verifier.numActive(), 2); +} + +TEST(XdsVerifier, ResendRouteSOTW) { + XdsVerifier verifier(test::server::config_validation::Config::SOTW); + + // send a route that will be ignored + verifier.routeAdded(buildRoute("route_config_0")); + + // add a warming listener that refers to this route + verifier.listenerAdded(buildListener("listener_0", "route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.WARMING)); + + // send the same route again, make sure listener becomes active + verifier.routeUpdated(buildRoute("route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.ACTIVE)); +} + +TEST(XdsVerifier, ResendRouteDelta) { + XdsVerifier verifier(test::server::config_validation::Config::DELTA); + + // send a route that will be ignored + verifier.routeAdded(buildRoute("route_config_0")); + + // add a warming listener that refers to this route + verifier.listenerAdded(buildListener("listener_0", "route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.WARMING)); + + // send the same route again, make sure listener becomes active + verifier.routeUpdated(buildRoute("route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.ACTIVE)); +} + +TEST(XdsVerifier, RemoveThenAddListener) { + XdsVerifier verifier(test::server::config_validation::Config::SOTW); + + // add an active listener + verifier.listenerAdded(buildListener("listener_0", "route_config_0")); + verifier.routeAdded(buildRoute("route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.ACTIVE)); + + // remove it + verifier.listenerRemoved("listener_0"); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.DRAINING)); + + // and add it back, it should now be draining and active + verifier.listenerAdded(buildListener("listener_0", "route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.DRAINING)); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.ACTIVE)); +} + +} // namespace Envoy diff --git a/test/server/configuration_impl_test.cc b/test/server/configuration_impl_test.cc index 6806d8d2a639e..9b93be806e32b 100644 --- a/test/server/configuration_impl_test.cc +++ b/test/server/configuration_impl_test.cc @@ -18,7 +18,7 @@ #include "test/common/upstream/utility.h" #include "test/mocks/common.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/test_common/environment.h" #include "test/test_common/utility.h" @@ -102,7 +102,7 @@ TEST_F(ConfigurationImplTest, CustomStatsFlushInterval) { } )EOF"; - auto bootstrap = Upstream::parseBootstrapFromV2Json(json); + auto bootstrap = Upstream::parseBootstrapFromV3Json(json); MainImpl config; config.initialize(bootstrap, server_, cluster_manager_factory_); @@ -122,14 +122,24 @@ TEST_F(ConfigurationImplTest, SetUpstreamClusterPerConnectionBufferLimit) { "connect_timeout": "0.01s", "per_connection_buffer_limit_bytes": 8192, "lb_policy": "round_robin", - "hosts": [ - { - "socket_address" : { - "address": "127.0.0.1", - "port_value": 9999 + "load_assignment": { + "endpoints": [ + { + "lb_endpoints": [ + { + "endpoint": { + "address": { + "socket_address": { + "address": "127.0.0.1", + "port_value": 9999 + } } } - ] + } + ] + } + ] + } } ] }, @@ -145,7 +155,7 @@ TEST_F(ConfigurationImplTest, SetUpstreamClusterPerConnectionBufferLimit) { } )EOF"; - auto bootstrap = Upstream::parseBootstrapFromV2Json(json); + auto bootstrap = Upstream::parseBootstrapFromV3Json(json); MainImpl config; config.initialize(bootstrap, server_, cluster_manager_factory_); @@ -189,7 +199,7 @@ TEST_F(ConfigurationImplTest, NullTracerSetWhenTracingConfigurationAbsent) { } )EOF"; - auto bootstrap = Upstream::parseBootstrapFromV2Json(json); + auto bootstrap = Upstream::parseBootstrapFromV3Json(json); server_.local_info_.node_.set_cluster(""); MainImpl config; @@ -229,7 +239,7 @@ TEST_F(ConfigurationImplTest, NullTracerSetWhenHttpKeyAbsentFromTracerConfigurat } )EOF"; - auto bootstrap = Upstream::parseBootstrapFromV2Json(json); + auto bootstrap = Upstream::parseBootstrapFromV3Json(json); server_.local_info_.node_.set_cluster(""); MainImpl config; @@ -281,7 +291,7 @@ TEST_F(ConfigurationImplTest, ConfigurationFailsWhenInvalidTracerSpecified) { } )EOF"; - auto bootstrap = Upstream::parseBootstrapFromV2Json(json); + auto bootstrap = Upstream::parseBootstrapFromV3Json(json); MainImpl config; EXPECT_THROW_WITH_MESSAGE(config.initialize(bootstrap, server_, cluster_manager_factory_), EnvoyException, @@ -307,7 +317,7 @@ TEST_F(ConfigurationImplTest, ProtoSpecifiedStatsSink) { } )EOF"; - auto bootstrap = Upstream::parseBootstrapFromV2Json(json); + auto bootstrap = Upstream::parseBootstrapFromV3Json(json); auto& sink = *bootstrap.mutable_stats_sinks()->Add(); sink.set_name(Extensions::StatSinks::StatsSinkNames::get().Statsd); @@ -338,7 +348,7 @@ TEST_F(ConfigurationImplTest, StatsSinkWithInvalidName) { } )EOF"; - auto bootstrap = Upstream::parseBootstrapFromV2Json(json); + auto bootstrap = Upstream::parseBootstrapFromV3Json(json); envoy::config::metrics::v3::StatsSink& sink = *bootstrap.mutable_stats_sinks()->Add(); sink.set_name("envoy.invalid"); @@ -368,7 +378,7 @@ TEST_F(ConfigurationImplTest, StatsSinkWithNoName) { } )EOF"; - auto bootstrap = Upstream::parseBootstrapFromV2Json(json); + auto bootstrap = Upstream::parseBootstrapFromV3Json(json); bootstrap.mutable_stats_sinks()->Add(); @@ -397,7 +407,7 @@ TEST_F(ConfigurationImplTest, StatsSinkWithNoType) { } )EOF"; - auto bootstrap = Upstream::parseBootstrapFromV2Json(json); + auto bootstrap = Upstream::parseBootstrapFromV3Json(json); auto& sink = *bootstrap.mutable_stats_sinks()->Add(); udpa::type::v1::TypedStruct typed_struct; @@ -530,7 +540,7 @@ TEST_F(ConfigurationImplTest, AdminSocketOptions) { } )EOF"; - auto bootstrap = Upstream::parseBootstrapFromV2Json(json); + auto bootstrap = Upstream::parseBootstrapFromV3Json(json); InitialImpl config(bootstrap); Network::MockListenSocket socket_mock; @@ -616,7 +626,7 @@ TEST_F(ConfigurationImplTest, ExceedLoadBalancerHostWeightsLimit) { } )EOF"; - auto bootstrap = Upstream::parseBootstrapFromV2Json(json); + auto bootstrap = Upstream::parseBootstrapFromV3Json(json); MainImpl config; EXPECT_THROW_WITH_MESSAGE( @@ -722,7 +732,7 @@ TEST_F(ConfigurationImplTest, ExceedLoadBalancerLocalityWeightsLimit) { } )EOF"; - auto bootstrap = Upstream::parseBootstrapFromV2Json(json); + auto bootstrap = Upstream::parseBootstrapFromV3Json(json); MainImpl config; EXPECT_THROW_WITH_MESSAGE( @@ -730,6 +740,59 @@ TEST_F(ConfigurationImplTest, ExceedLoadBalancerLocalityWeightsLimit) { "The sum of weights of all localities at the same priority exceeds 4294967295"); } +TEST_F(ConfigurationImplTest, KillTimeoutWithoutSkew) { + const std::string json = R"EOF( + { + "watchdog": { + "kill_timeout": "1.0s", + }, + })EOF"; + + envoy::config::bootstrap::v3::Bootstrap bootstrap; + TestUtility::loadFromJson(json, bootstrap); + + MainImpl config; + config.initialize(bootstrap, server_, cluster_manager_factory_); + + EXPECT_EQ(std::chrono::milliseconds(1000), config.wdKillTimeout()); +} + +TEST_F(ConfigurationImplTest, CanSkewsKillTimeout) { + const std::string json = R"EOF( + { + "watchdog": { + "kill_timeout": "1.0s", + "max_kill_timeout_jitter": "0.5s" + }, + })EOF"; + + envoy::config::bootstrap::v3::Bootstrap bootstrap; + TestUtility::loadFromJson(json, bootstrap); + + MainImpl config; + config.initialize(bootstrap, server_, cluster_manager_factory_); + + EXPECT_LT(std::chrono::milliseconds(1000), config.wdKillTimeout()); + EXPECT_GE(std::chrono::milliseconds(1500), config.wdKillTimeout()); +} + +TEST_F(ConfigurationImplTest, DoesNotSkewIfKillTimeoutDisabled) { + const std::string json = R"EOF( + { + "watchdog": { + "max_kill_timeout_jitter": "0.5s" + }, + })EOF"; + + envoy::config::bootstrap::v3::Bootstrap bootstrap; + TestUtility::loadFromJson(json, bootstrap); + + MainImpl config; + config.initialize(bootstrap, server_, cluster_manager_factory_); + + EXPECT_EQ(std::chrono::milliseconds(0), config.wdKillTimeout()); +} + } // namespace } // namespace Configuration } // namespace Server diff --git a/test/server/connection_handler_test.cc b/test/server/connection_handler_test.cc index 247810612f14b..ebcc66a8c6563 100644 --- a/test/server/connection_handler_test.cc +++ b/test/server/connection_handler_test.cc @@ -1,21 +1,24 @@ #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/listener/v3/udp_listener_config.pb.h" +#include "envoy/network/exception.h" #include "envoy/network/filter.h" #include "envoy/server/active_udp_listener_config.h" #include "envoy/stats/scope.h" #include "common/common/utility.h" +#include "common/config/utility.h" #include "common/network/address_impl.h" #include "common/network/connection_balancer_impl.h" #include "common/network/io_socket_handle_impl.h" #include "common/network/raw_buffer_socket.h" +#include "common/network/udp_default_writer_config.h" #include "common/network/utility.h" #include "server/connection_handler_impl.h" +#include "test/mocks/api/mocks.h" #include "test/mocks/common.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" #include "test/test_common/network_utility.h" #include "test/test_common/threadsafe_singleton_injector.h" @@ -50,12 +53,11 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable> filter_chain_manager = nullptr) - : parent_(parent), socket_(std::make_shared()), + : parent_(parent), socket_(std::make_shared>()), socket_factory_(std::move(socket_factory)), tag_(tag), bind_to_port_(bind_to_port), hand_off_restored_destination_connections_(hand_off_restored_destination_connections), name_(name), listener_filters_timeout_(listener_filters_timeout), @@ -68,6 +70,7 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable(listener_name) .createActiveUdpListenerFactory(dummy, /*concurrency=*/1); + udp_writer_factory_ = std::make_unique(); ON_CALL(*socket_, socketType()).WillByDefault(Return(socket_type)); } @@ -95,6 +98,9 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable& accessLogs() const override { return empty_access_logs_; } + ResourceLimit& openConnections() override { return open_connections_; } + + void setMaxConnections(const uint32_t num_connections) { + open_connections_.setMax(num_connections); + } + void clearMaxConnections() { open_connections_.resetMax(); } ConnectionHandlerTest& parent_; - std::shared_ptr socket_; + std::shared_ptr> socket_; Network::ListenSocketFactorySharedPtr socket_factory_; uint64_t tag_; bool bind_to_port_; @@ -113,20 +125,56 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable udp_listener_factory_; + std::unique_ptr udp_writer_factory_; Network::ConnectionBalancerPtr connection_balancer_; + BasicResourceLimitImpl open_connections_; const std::vector empty_access_logs_; std::shared_ptr> inline_filter_chain_manager_; }; using TestListenerPtr = std::unique_ptr; + class MockUpstreamUdpFilter : public Network::UdpListenerReadFilter { + public: + MockUpstreamUdpFilter(ConnectionHandlerTest& parent, Network::UdpReadFilterCallbacks& callbacks) + : UdpListenerReadFilter(callbacks), parent_(parent) {} + ~MockUpstreamUdpFilter() override { + parent_.deleted_before_listener_ = !parent_.udp_listener_deleted_; + } + + MOCK_METHOD(void, onData, (Network::UdpRecvData&), (override)); + MOCK_METHOD(void, onReceiveError, (Api::IoError::IoErrorCode), (override)); + + private: + ConnectionHandlerTest& parent_; + }; + + class MockUpstreamUdpListener : public Network::UdpListener { + public: + explicit MockUpstreamUdpListener(ConnectionHandlerTest& parent) : parent_(parent) { + ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); + } + ~MockUpstreamUdpListener() override { parent_.udp_listener_deleted_ = true; } + + MOCK_METHOD(void, enable, (), (override)); + MOCK_METHOD(void, disable, (), (override)); + MOCK_METHOD(Event::Dispatcher&, dispatcher, (), (override)); + MOCK_METHOD(Network::Address::InstanceConstSharedPtr&, localAddress, (), (const, override)); + MOCK_METHOD(Api::IoCallUint64Result, send, (const Network::UdpSendData&), (override)); + MOCK_METHOD(Api::IoCallUint64Result, flush, (), (override)); + + private: + ConnectionHandlerTest& parent_; + Event::MockDispatcher dispatcher_; + }; + TestListener* addListener( uint64_t tag, bool bind_to_port, bool hand_off_restored_destination_connections, const std::string& name, Network::Listener* listener, Network::ListenerCallbacks** listener_callbacks = nullptr, Network::MockConnectionBalancer* connection_balancer = nullptr, Network::BalancedConnectionHandler** balanced_connection_handler = nullptr, - Network::Address::SocketType socket_type = Network::Address::SocketType::Stream, + Network::Socket::Type socket_type = Network::Socket::Type::Stream, std::chrono::milliseconds listener_filters_timeout = std::chrono::milliseconds(15000), bool continue_on_listener_filters_timeout = false, std::shared_ptr> overridden_filter_chain_manager = @@ -142,7 +190,7 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggablesocket_)); - if (socket_type == Network::Address::SocketType::Stream) { + if (socket_type == Network::Socket::Type::Stream) { EXPECT_CALL(dispatcher_, createListener_(_, _, _)) .WillOnce(Invoke([listener, listener_callbacks](Network::SocketSharedPtr&&, Network::ListenerCallbacks& cb, @@ -182,6 +230,8 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable os_sys_calls_; TestThreadsafeSingletonInjector os_calls_{&os_sys_calls_}; std::shared_ptr> listener_filter_matcher_; + bool udp_listener_deleted_ = false; + bool deleted_before_listener_ = false; }; // Verify that if a listener is removed while a rebalanced connection is in flight, we correctly @@ -230,6 +280,88 @@ TEST_F(ConnectionHandlerTest, RemoveListenerDuringRebalance) { #endif } +TEST_F(ConnectionHandlerTest, ListenerConnectionLimitEnforced) { + Network::ListenerCallbacks* listener_callbacks1; + auto listener1 = new NiceMock(); + TestListener* test_listener1 = + addListener(1, false, false, "test_listener1", listener1, &listener_callbacks1); + Network::Address::InstanceConstSharedPtr normal_address( + new Network::Address::Ipv4Instance("127.0.0.1", 10001)); + EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(normal_address)); + // Only allow a single connection on this listener. + test_listener1->setMaxConnections(1); + handler_->addListener(absl::nullopt, *test_listener1); + + auto listener2 = new NiceMock(); + Network::ListenerCallbacks* listener_callbacks2; + TestListener* test_listener2 = + addListener(2, false, false, "test_listener2", listener2, &listener_callbacks2); + Network::Address::InstanceConstSharedPtr alt_address( + new Network::Address::Ipv4Instance("127.0.0.2", 20002)); + EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(alt_address)); + // Do not allow any connections on this listener. + test_listener2->setMaxConnections(0); + handler_->addListener(absl::nullopt, *test_listener2); + + EXPECT_CALL(manager_, findFilterChain(_)).WillRepeatedly(Return(filter_chain_.get())); + EXPECT_CALL(factory_, createNetworkFilterChain(_, _)).WillRepeatedly(Return(true)); + Network::MockListenerFilter* test_filter = new Network::MockListenerFilter(); + EXPECT_CALL(*test_filter, destroy_()); + EXPECT_CALL(factory_, createListenerFilterChain(_)) + .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool { + manager.addAcceptFilter(listener_filter_matcher_, Network::ListenerFilterPtr{test_filter}); + return true; + })); + EXPECT_CALL(*test_filter, onAccept(_)) + .WillRepeatedly(Invoke([&](Network::ListenerFilterCallbacks&) -> Network::FilterStatus { + return Network::FilterStatus::Continue; + })); + + // For listener 2, verify its connection limit is independent of listener 1. + + // We expect that listener 2 accepts the connection, so there will be a call to + // createServerConnection and active cx should increase, while cx overflow remains the same. + listener_callbacks2->onAccept( + Network::ConnectionSocketPtr{new NiceMock()}); + EXPECT_EQ(0, handler_->numConnections()); + EXPECT_EQ(0, TestUtility::findCounter(stats_store_, "downstream_cx_total")->value()); + EXPECT_EQ(0, TestUtility::findGauge(stats_store_, "downstream_cx_active")->value()); + EXPECT_EQ(1, TestUtility::findCounter(stats_store_, "downstream_cx_overflow")->value()); + + // For listener 1, verify connections are limited after one goes active. + + // First connection attempt should result in an active connection being created. + auto conn1 = new NiceMock(); + EXPECT_CALL(dispatcher_, createServerConnection_()).WillOnce(Return(conn1)); + listener_callbacks1->onAccept( + Network::ConnectionSocketPtr{new NiceMock()}); + EXPECT_EQ(1, handler_->numConnections()); + // Note that these stats are not the per-worker stats, but the per-listener stats. + EXPECT_EQ(1, TestUtility::findCounter(stats_store_, "downstream_cx_total")->value()); + EXPECT_EQ(1, TestUtility::findGauge(stats_store_, "downstream_cx_active")->value()); + EXPECT_EQ(1, TestUtility::findCounter(stats_store_, "downstream_cx_overflow")->value()); + + // Don't expect server connection to be created, should be instantly closed and increment + // overflow stat. + listener_callbacks1->onAccept( + Network::ConnectionSocketPtr{new NiceMock()}); + EXPECT_EQ(1, handler_->numConnections()); + EXPECT_EQ(1, TestUtility::findCounter(stats_store_, "downstream_cx_total")->value()); + EXPECT_EQ(1, TestUtility::findGauge(stats_store_, "downstream_cx_active")->value()); + EXPECT_EQ(2, TestUtility::findCounter(stats_store_, "downstream_cx_overflow")->value()); + + // Check behavior again for good measure. + listener_callbacks1->onAccept( + Network::ConnectionSocketPtr{new NiceMock()}); + EXPECT_EQ(1, handler_->numConnections()); + EXPECT_EQ(1, TestUtility::findCounter(stats_store_, "downstream_cx_total")->value()); + EXPECT_EQ(1, TestUtility::findGauge(stats_store_, "downstream_cx_active")->value()); + EXPECT_EQ(3, TestUtility::findCounter(stats_store_, "downstream_cx_overflow")->value()); + + EXPECT_CALL(*listener1, onDestroy()); + EXPECT_CALL(*listener2, onDestroy()); +} + TEST_F(ConnectionHandlerTest, RemoveListener) { InSequence s; @@ -649,7 +781,7 @@ TEST_F(ConnectionHandlerTest, ContinueOnListenerFilterTimeout) { auto listener = new NiceMock(); TestListener* test_listener = addListener(1, true, false, "test_listener", listener, &listener_callbacks, nullptr, nullptr, - Network::Address::SocketType::Stream, std::chrono::milliseconds(15000), true); + Network::Socket::Type::Stream, std::chrono::milliseconds(15000), true); EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(local_address_)); handler_->addListener(absl::nullopt, *test_listener); @@ -733,7 +865,7 @@ TEST_F(ConnectionHandlerTest, ListenerFilterDisabledTimeout) { auto listener = new NiceMock(); TestListener* test_listener = addListener(1, true, false, "test_listener", listener, &listener_callbacks, nullptr, nullptr, - Network::Address::SocketType::Stream, std::chrono::milliseconds()); + Network::Socket::Type::Stream, std::chrono::milliseconds()); EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(local_address_)); handler_->addListener(absl::nullopt, *test_listener); @@ -803,7 +935,7 @@ TEST_F(ConnectionHandlerTest, UdpListenerNoFilterThrowsException) { auto listener = new NiceMock(); TestListener* test_listener = addListener(1, true, false, "test_listener", listener, nullptr, nullptr, nullptr, - Network::Address::SocketType::Datagram, std::chrono::milliseconds()); + Network::Socket::Type::Datagram, std::chrono::milliseconds()); EXPECT_CALL(factory_, createUdpListenerFilterChain(_, _)) .WillOnce(Invoke([&](Network::UdpListenerFilterManager&, Network::UdpReadFilterCallbacks&) -> bool { return true; })); @@ -838,7 +970,7 @@ TEST_F(ConnectionHandlerTest, TcpListenerInplaceUpdate) { std::make_shared>(); TestListener* new_test_listener = addListener(new_listener_tag, true, false, "test_listener", /* Network::Listener */ nullptr, - &new_listener_callbacks, nullptr, nullptr, Network::Address::SocketType::Stream, + &new_listener_callbacks, nullptr, nullptr, Network::Socket::Type::Stream, std::chrono::milliseconds(15000), false, overridden_filter_chain_manager); handler_->addListener(old_listener_tag, *new_test_listener); ASSERT_EQ(new_listener_callbacks, nullptr) @@ -926,6 +1058,33 @@ TEST_F(ConnectionHandlerTest, ListenerFilterWorks) { EXPECT_CALL(*listener, onDestroy()); } +// The read_filter should be deleted before the udp_listener is deleted. +TEST_F(ConnectionHandlerTest, ShutdownUdpListener) { + InSequence s; + + Network::MockUdpReadFilterCallbacks dummy_callbacks; + auto listener = new NiceMock(*this); + TestListener* test_listener = + addListener(1, true, false, "test_listener", listener, nullptr, nullptr, nullptr, + Network::Socket::Type::Datagram, std::chrono::milliseconds(), false, nullptr); + auto filter = std::make_unique>(*this, dummy_callbacks); + + EXPECT_CALL(factory_, createUdpListenerFilterChain(_, _)) + .WillOnce(Invoke([&](Network::UdpListenerFilterManager& udp_listener, + Network::UdpReadFilterCallbacks&) -> bool { + udp_listener.addReadFilter(std::move(filter)); + return true; + })); + EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(local_address_)); + EXPECT_CALL(dummy_callbacks.udp_listener_, onDestroy()); + + handler_->addListener(absl::nullopt, *test_listener); + handler_->stopListeners(); + + ASSERT_TRUE(deleted_before_listener_) + << "The read_filter_ should be deleted before the udp_listener_ is deleted."; +} + } // namespace } // namespace Server } // namespace Envoy diff --git a/test/server/drain_manager_impl_test.cc b/test/server/drain_manager_impl_test.cc index 94e5ce3c099e3..160080b34b1b9 100644 --- a/test/server/drain_manager_impl_test.cc +++ b/test/server/drain_manager_impl_test.cc @@ -4,7 +4,7 @@ #include "server/drain_manager_impl.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -17,10 +17,14 @@ namespace Envoy { namespace Server { namespace { -class DrainManagerImplTest : public testing::Test { -public: +constexpr int DrainTimeSeconds(600); + +class DrainManagerImplTest : public Event::TestUsingSimulatedTime, + public testing::TestWithParam { +protected: DrainManagerImplTest() { - ON_CALL(server_.options_, drainTime()).WillByDefault(Return(std::chrono::seconds(600))); + ON_CALL(server_.options_, drainTime()) + .WillByDefault(Return(std::chrono::seconds(DrainTimeSeconds))); ON_CALL(server_.options_, parentShutdownTime()) .WillByDefault(Return(std::chrono::seconds(900))); } @@ -48,32 +52,107 @@ TEST_F(DrainManagerImplTest, Default) { // Test drain sequence. Event::MockTimer* drain_timer = new Event::MockTimer(&server_.dispatcher_); - EXPECT_CALL(*drain_timer, enableTimer(_, _)); + const auto expected_delay = std::chrono::milliseconds(DrainTimeSeconds * 1000); + EXPECT_CALL(*drain_timer, enableTimer(expected_delay, nullptr)); ReadyWatcher drain_complete; drain_manager.startDrainSequence([&drain_complete]() -> void { drain_complete.ready(); }); - - // 600s which is the default drain time. - for (size_t i = 0; i < 599; i++) { - if (i < 598) { - EXPECT_CALL(*drain_timer, enableTimer(_, _)); - } else { - EXPECT_CALL(drain_complete, ready()); - } - drain_timer->invokeCallback(); - } - - EXPECT_CALL(server_, healthCheckFailed()).WillOnce(Return(false)); - EXPECT_TRUE(drain_manager.drainClose()); + EXPECT_CALL(drain_complete, ready()); + drain_timer->invokeCallback(); } TEST_F(DrainManagerImplTest, ModifyOnly) { InSequence s; DrainManagerImpl drain_manager(server_, envoy::config::listener::v3::Listener::MODIFY_ONLY); - EXPECT_CALL(server_, healthCheckFailed()).Times(0); + EXPECT_CALL(server_, healthCheckFailed()).Times(0); // Listener check will short-circuit EXPECT_FALSE(drain_manager.drainClose()); } +TEST_P(DrainManagerImplTest, DrainDeadline) { + const bool drain_gradually = GetParam(); + ON_CALL(server_.options_, drainStrategy()) + .WillByDefault(Return(drain_gradually ? Server::DrainStrategy::Gradual + : Server::DrainStrategy::Immediate)); + // TODO(auni53): Add integration tests for this once TestDrainManager is + // removed. + DrainManagerImpl drain_manager(server_, envoy::config::listener::v3::Listener::DEFAULT); + + // Ensure drainClose() behaviour is determined by the deadline. + drain_manager.startDrainSequence([] {}); + EXPECT_CALL(server_, healthCheckFailed()).WillRepeatedly(Return(false)); + ON_CALL(server_.random_, random()).WillByDefault(Return(DrainTimeSeconds * 2 - 1)); + ON_CALL(server_.options_, drainTime()) + .WillByDefault(Return(std::chrono::seconds(DrainTimeSeconds))); + + if (drain_gradually) { + // random() should be called when elapsed time < drain timeout + EXPECT_CALL(server_.random_, random()).Times(2); + EXPECT_FALSE(drain_manager.drainClose()); + simTime().advanceTimeWait(std::chrono::seconds(DrainTimeSeconds - 1)); + EXPECT_FALSE(drain_manager.drainClose()); + simTime().advanceTimeWait(std::chrono::seconds(1)); + EXPECT_TRUE(drain_manager.drainClose()); + + // Test that this still works if remaining time is negative + simTime().advanceTimeWait(std::chrono::seconds(1)); + EXPECT_TRUE(drain_manager.drainClose()); + simTime().advanceTimeWait(std::chrono::seconds(500)); + EXPECT_TRUE(drain_manager.drainClose()); + } else { + EXPECT_CALL(server_.random_, random()).Times(0); + EXPECT_TRUE(drain_manager.drainClose()); + simTime().advanceTimeWait(std::chrono::seconds(DrainTimeSeconds - 1)); + EXPECT_TRUE(drain_manager.drainClose()); + simTime().advanceTimeWait(std::chrono::seconds(1)); + EXPECT_TRUE(drain_manager.drainClose()); + simTime().advanceTimeWait(std::chrono::seconds(1)); + EXPECT_TRUE(drain_manager.drainClose()); + simTime().advanceTimeWait(std::chrono::seconds(500)); + EXPECT_TRUE(drain_manager.drainClose()); + } +} + +TEST_P(DrainManagerImplTest, DrainDeadlineProbability) { + const bool drain_gradually = GetParam(); + ON_CALL(server_.options_, drainStrategy()) + .WillByDefault(Return(drain_gradually ? Server::DrainStrategy::Gradual + : Server::DrainStrategy::Immediate)); + ON_CALL(server_.random_, random()).WillByDefault(Return(4)); + ON_CALL(server_.options_, drainTime()).WillByDefault(Return(std::chrono::seconds(3))); + + DrainManagerImpl drain_manager(server_, envoy::config::listener::v3::Listener::DEFAULT); + + EXPECT_CALL(server_, healthCheckFailed()).WillOnce(Return(true)); + EXPECT_TRUE(drain_manager.drainClose()); + EXPECT_CALL(server_, healthCheckFailed()).WillRepeatedly(Return(false)); + EXPECT_FALSE(drain_manager.drainClose()); + EXPECT_FALSE(drain_manager.draining()); + + drain_manager.startDrainSequence([] {}); + EXPECT_TRUE(drain_manager.draining()); + + if (drain_gradually) { + // random() should be called when elapsed time < drain timeout + EXPECT_CALL(server_.random_, random()).Times(2); + // Current elapsed time is 0 + // drainClose() will return true when elapsed time > (4 % 3 == 1). + EXPECT_FALSE(drain_manager.drainClose()); + simTime().advanceTimeWait(std::chrono::seconds(2)); + EXPECT_TRUE(drain_manager.drainClose()); + simTime().advanceTimeWait(std::chrono::seconds(1)); + EXPECT_TRUE(drain_manager.drainClose()); + } else { + EXPECT_CALL(server_.random_, random()).Times(0); + EXPECT_TRUE(drain_manager.drainClose()); + simTime().advanceTimeWait(std::chrono::seconds(2)); + EXPECT_TRUE(drain_manager.drainClose()); + simTime().advanceTimeWait(std::chrono::seconds(1)); + EXPECT_TRUE(drain_manager.drainClose()); + } +} + +INSTANTIATE_TEST_SUITE_P(DrainStrategies, DrainManagerImplTest, testing::Bool()); + } // namespace } // namespace Server } // namespace Envoy diff --git a/test/server/filter_chain_benchmark_test.cc b/test/server/filter_chain_benchmark_test.cc index d3e7b58b14994..a04047346b305 100644 --- a/test/server/filter_chain_benchmark_test.cc +++ b/test/server/filter_chain_benchmark_test.cc @@ -1,5 +1,4 @@ #include -#include #include "envoy/config/listener/v3/listener.pb.h" #include "envoy/config/listener/v3/listener_components.pb.h" @@ -11,8 +10,9 @@ #include "extensions/transport_sockets/well_known_names.h" +#include "test/benchmark/main.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/environment.h" #include "test/test_common/utility.h" @@ -28,7 +28,7 @@ namespace Server { namespace { class MockFilterChainFactoryBuilder : public FilterChainFactoryBuilder { - std::shared_ptr + Network::DrainableFilterChainSharedPtr buildFilterChain(const envoy::config::listener::v3::FilterChain&, FilterChainFactoryContextCreator&) const override { // A place holder to be found @@ -89,8 +89,10 @@ class MockConnectionSocket : public Network::ConnectionSocket { // Dummy method void close() override {} bool isOpen() const override { return false; } - Network::Address::SocketType socketType() const override { - return Network::Address::SocketType::Stream; + Network::Socket::Type socketType() const override { return Network::Socket::Type::Stream; } + Network::Address::Type addressType() const override { return local_address_->type(); } + absl::optional ipVersion() const override { + return Network::Address::IpVersion::v4; } void setLocalAddress(const Network::Address::InstanceConstSharedPtr&) override {} void restoreLocalAddress(const Network::Address::InstanceConstSharedPtr&) override {} @@ -102,6 +104,18 @@ class MockConnectionSocket : public Network::ConnectionSocket { void addOptions(const OptionsSharedPtr&) override {} const OptionsSharedPtr& options() const override { return options_; } void setRequestedServerName(absl::string_view) override {} + Api::SysCallIntResult bind(Network::Address::InstanceConstSharedPtr) override { return {0, 0}; } + Api::SysCallIntResult listen(int) override { return {0, 0}; } + Api::SysCallIntResult connect(const Network::Address::InstanceConstSharedPtr) override { + return {0, 0}; + } + Api::SysCallIntResult setSocketOption(int, int, const void*, socklen_t) override { + return {0, 0}; + } + Api::SysCallIntResult getSocketOption(int, int, void*, socklen_t*) const override { + return {0, 0}; + } + Api::SysCallIntResult setBlockingForTest(bool) override { return {0, 0}; } private: Network::IoHandlePtr io_handle_; @@ -164,9 +178,9 @@ const char YamlSingleDstPortBottom[] = R"EOF( - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a")EOF"; } // namespace -class FilterChainBenchmarkFixture : public benchmark::Fixture { +class FilterChainBenchmarkFixture : public ::benchmark::Fixture { public: - void SetUp(::benchmark::State& state) override { + void initialize(::benchmark::State& state) { int64_t input_size = state.range(0); std::vector port_chains; port_chains.reserve(input_size); @@ -179,6 +193,10 @@ class FilterChainBenchmarkFixture : public benchmark::Fixture { TestUtility::loadFromYaml(listener_yaml_config_, listener_config_); filter_chains_ = listener_config_.filter_chains(); } + + Envoy::Thread::MutexBasicLockable lock_; + Logger::Context logging_state_{spdlog::level::warn, Logger::Logger::DEFAULT_LOG_FORMAT, lock_, + false}; std::string listener_yaml_config_; envoy::config::listener::v3::Listener listener_config_; absl::Span filter_chains_; @@ -189,6 +207,12 @@ class FilterChainBenchmarkFixture : public benchmark::Fixture { // NOLINTNEXTLINE(readability-redundant-member-init) BENCHMARK_DEFINE_F(FilterChainBenchmarkFixture, FilterChainManagerBuildTest) (::benchmark::State& state) { + if (benchmark::skipExpensiveBenchmarks() && state.range(0) > 64) { + state.SkipWithError("Skipping expensive benchmark"); + return; + } + + initialize(state); NiceMock factory_context; for (auto _ : state) { FilterChainManagerImpl filter_chain_manager{ @@ -200,6 +224,12 @@ BENCHMARK_DEFINE_F(FilterChainBenchmarkFixture, FilterChainManagerBuildTest) BENCHMARK_DEFINE_F(FilterChainBenchmarkFixture, FilterChainFindTest) (::benchmark::State& state) { + if (benchmark::skipExpensiveBenchmarks() && state.range(0) > 64) { + state.SkipWithError("Skipping expensive benchmark"); + return; + } + + initialize(state); std::vector sockets; sockets.reserve(state.range(0)); for (int i = 0; i < state.range(0); i++) { @@ -222,12 +252,14 @@ BENCHMARK_REGISTER_F(FilterChainBenchmarkFixture, FilterChainManagerBuildTest) ->Ranges({ // scale of the chains {1, 4096}, - }); + }) + ->Unit(::benchmark::kMillisecond); BENCHMARK_REGISTER_F(FilterChainBenchmarkFixture, FilterChainFindTest) ->Ranges({ // scale of the chains {1, 4096}, - }); + }) + ->Unit(::benchmark::kMillisecond); /* clang-format off diff --git a/test/server/filter_chain_manager_impl_test.cc b/test/server/filter_chain_manager_impl_test.cc index 85a67482abfc5..4b78f2a70d88d 100644 --- a/test/server/filter_chain_manager_impl_test.cc +++ b/test/server/filter_chain_manager_impl_test.cc @@ -25,7 +25,8 @@ #include "extensions/transport_sockets/tls/ssl_socket.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/drain_manager.h" +#include "test/mocks/server/factory_context.h" #include "test/server/utility.h" #include "test/test_common/environment.h" #include "test/test_common/registry.h" @@ -51,7 +52,7 @@ class MockFilterChainFactoryBuilder : public FilterChainFactoryBuilder { .WillByDefault(Return(std::make_shared())); } - MOCK_METHOD(std::shared_ptr, buildFilterChain, + MOCK_METHOD(Network::DrainableFilterChainSharedPtr, buildFilterChain, (const envoy::config::listener::v3::FilterChain&, FilterChainFactoryContextCreator&), (const)); }; diff --git a/test/server/guarddog_impl_test.cc b/test/server/guarddog_impl_test.cc index 067ee8403aafc..e26856e011db3 100644 --- a/test/server/guarddog_impl_test.cc +++ b/test/server/guarddog_impl_test.cc @@ -3,6 +3,7 @@ #include #include "envoy/common/time.h" +#include "envoy/server/watchdog.h" #include "common/api/api_impl.h" #include "common/common/macros.h" @@ -11,7 +12,7 @@ #include "server/guarddog_impl.h" #include "test/mocks/common.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/main.h" #include "test/mocks/stats/mocks.h" #include "test/test_common/simulated_time_system.h" #include "test/test_common/test_time.h" @@ -89,7 +90,8 @@ INSTANTIATE_TEST_SUITE_P(TimeSystemType, GuardDogTestBase, class GuardDogDeathTest : public GuardDogTestBase { protected: GuardDogDeathTest() - : config_kill_(1000, 1000, 100, 1000), config_multikill_(1000, 1000, 1000, 500) {} + : config_kill_(1000, 1000, 100, 1000, 0), config_multikill_(1000, 1000, 1000, 500, 0), + config_multikill_threshold_(1000, 1000, 1000, 500, 60) {} /** * This does everything but the final forceCheckForTest() that should cause @@ -99,6 +101,7 @@ class GuardDogDeathTest : public GuardDogTestBase { InSequence s; initGuardDog(fakestats_, config_kill_); unpet_dog_ = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), "test_thread"); + dogs_.emplace_back(unpet_dog_); guard_dog_->forceCheckForTest(); time_system_->advanceTimeWait(std::chrono::milliseconds(99)); // 1 ms shy of death. } @@ -112,18 +115,47 @@ class GuardDogDeathTest : public GuardDogTestBase { initGuardDog(fakestats_, config_multikill_); auto unpet_dog_ = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), "test_thread"); + dogs_.emplace_back(unpet_dog_); guard_dog_->forceCheckForTest(); auto second_dog_ = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), "test_thread"); + dogs_.emplace_back(second_dog_); guard_dog_->forceCheckForTest(); time_system_->advanceTimeWait(std::chrono::milliseconds(499)); // 1 ms shy of multi-death. } + /** + * This does everything but the final forceCheckForTest() that should cause + * death for the multiple kill case using threshold (100% of watchdogs over the threshold). + */ + void setupForMultiDeathThreshold() { + InSequence s; + initGuardDog(fakestats_, config_multikill_threshold_); + + // Creates 5 watchdogs. + for (int i = 0; i < 5; ++i) { + auto dog = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), "test_thread"); + dogs_.emplace_back(dog); + + if (i == 0) { + unpet_dog_ = dog; + } else if (i == 1) { + second_dog_ = dog; + } + + guard_dog_->forceCheckForTest(); + } + + time_system_->advanceTimeWait(std::chrono::milliseconds(499)); // 1 ms shy of multi-death. + } + NiceMock config_kill_; NiceMock config_multikill_; + NiceMock config_multikill_threshold_; NiceMock fakestats_; WatchDogSharedPtr unpet_dog_; WatchDogSharedPtr second_dog_; + std::vector dogs_; // Tracks all watchdogs created. }; INSTANTIATE_TEST_SUITE_P(TimeSystemType, GuardDogDeathTest, @@ -174,6 +206,34 @@ TEST_P(GuardDogAlmostDeadTest, MultiKillNoFinalCheckTest) { SetupForMultiDeath(); } +TEST_P(GuardDogDeathTest, MultiKillThresholdDeathTest) { + auto die_function = [&]() -> void { + setupForMultiDeathThreshold(); + + // Pet the last two dogs so we're just at the threshold that causes death. + dogs_.at(4)->touch(); + dogs_.at(3)->touch(); + + time_system_->advanceTimeWait(std::chrono::milliseconds(2)); // 1 ms past multi-death. + guard_dog_->forceCheckForTest(); + }; + EXPECT_DEATH(die_function(), ""); +} + +TEST_P(GuardDogAlmostDeadTest, MultiKillUnderThreshold) { + // This does everything the death test does except it pets an additional watchdog + // that causes us to be under the threshold (60%) of multikill death. + setupForMultiDeathThreshold(); + + // Pet the last three dogs so we're just under the threshold that causes death. + dogs_.at(4)->touch(); + dogs_.at(3)->touch(); + dogs_.at(2)->touch(); + + time_system_->advanceTimeWait(std::chrono::milliseconds(2)); // 1 ms past multi-death. + guard_dog_->forceCheckForTest(); +} + TEST_P(GuardDogAlmostDeadTest, NearDeathTest) { // This ensures that if only one thread surpasses the multiple kill threshold // there is no death. The positive case is covered in MultiKillDeathTest. @@ -195,7 +255,7 @@ TEST_P(GuardDogAlmostDeadTest, NearDeathTest) { class GuardDogMissTest : public GuardDogTestBase { protected: - GuardDogMissTest() : config_miss_(500, 1000, 0, 0), config_mega_(1000, 500, 0, 0) {} + GuardDogMissTest() : config_miss_(500, 1000, 0, 0, 0), config_mega_(1000, 500, 0, 0, 0) {} void checkMiss(uint64_t count, const std::string& descriptor) { EXPECT_EQ(count, TestUtility::findCounter(stats_store_, "server.watchdog_miss")->value()) @@ -315,27 +375,27 @@ TEST_P(GuardDogMissTest, MissCountTest) { TEST_P(GuardDogTestBase, StartStopTest) { NiceMock stats; - NiceMock config(0, 0, 0, 0); + NiceMock config(0, 0, 0, 0, 0); initGuardDog(stats, config); } TEST_P(GuardDogTestBase, LoopIntervalNoKillTest) { NiceMock stats; - NiceMock config(40, 50, 0, 0); + NiceMock config(40, 50, 0, 0, 0); initGuardDog(stats, config); EXPECT_EQ(guard_dog_->loopIntervalForTest(), std::chrono::milliseconds(40)); } TEST_P(GuardDogTestBase, LoopIntervalTest) { NiceMock stats; - NiceMock config(100, 90, 1000, 500); + NiceMock config(100, 90, 1000, 500, 0); initGuardDog(stats, config); EXPECT_EQ(guard_dog_->loopIntervalForTest(), std::chrono::milliseconds(90)); } TEST_P(GuardDogTestBase, WatchDogThreadIdTest) { NiceMock stats; - NiceMock config(100, 90, 1000, 500); + NiceMock config(100, 90, 1000, 500, 0); initGuardDog(stats, config); auto watched_dog = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), "test_thread"); diff --git a/test/server/hot_restart_impl_test.cc b/test/server/hot_restart_impl_test.cc index 1b82f08ab6afd..1d127c2572c42 100644 --- a/test/server/hot_restart_impl_test.cc +++ b/test/server/hot_restart_impl_test.cc @@ -8,7 +8,7 @@ #include "test/mocks/api/hot_restart.h" #include "test/mocks/api/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/hot_restart.h" #include "test/test_common/logging.h" #include "test/test_common/threadsafe_singleton_injector.h" @@ -20,6 +20,7 @@ using testing::_; using testing::AnyNumber; using testing::Invoke; using testing::InvokeWithoutArgs; +using testing::Return; using testing::WithArg; namespace Envoy { @@ -42,8 +43,11 @@ class HotRestartImplTest : public testing::Test { EXPECT_CALL(os_sys_calls_, bind(_, _, _)).Times(2); // Test we match the correct stat with empty-slots before, after, or both. - hot_restart_ = std::make_unique(options_); + hot_restart_ = std::make_unique(0, 0); hot_restart_->drainParentListeners(); + + // We close both sockets. + EXPECT_CALL(os_sys_calls_, close(_)).Times(2); } Api::MockOsSysCalls os_sys_calls_; @@ -51,7 +55,6 @@ class HotRestartImplTest : public testing::Test { Api::MockHotRestartOsSysCalls hot_restart_os_sys_calls_; TestThreadsafeSingletonInjector hot_restart_os_calls{ &hot_restart_os_sys_calls_}; - NiceMock options_; std::vector buffer_; std::unique_ptr hot_restart_; }; @@ -77,6 +80,27 @@ TEST_F(HotRestartImplTest, VersionString) { } } +// Test that HotRestartDomainSocketInUseException is thrown when the domain socket is already +// in use, +TEST_F(HotRestartImplTest, DomainSocketAlreadyInUse) { + EXPECT_CALL(os_sys_calls_, bind(_, _, _)) + .WillOnce(Return(Api::SysCallIntResult{-1, SOCKET_ERROR_ADDR_IN_USE})); + EXPECT_CALL(os_sys_calls_, close(_)).Times(1); + + EXPECT_THROW(std::make_unique(0, 0), + Server::HotRestartDomainSocketInUseException); +} + +// Test that EnvoyException is thrown when the domain socket bind fails for reasons other than +// being in use. +TEST_F(HotRestartImplTest, DomainSocketError) { + EXPECT_CALL(os_sys_calls_, bind(_, _, _)) + .WillOnce(Return(Api::SysCallIntResult{-1, SOCKET_ERROR_ACCESS})); + EXPECT_CALL(os_sys_calls_, close(_)).Times(1); + + EXPECT_THROW(std::make_unique(0, 0), EnvoyException); +} + } // namespace } // namespace Server } // namespace Envoy diff --git a/test/server/hot_restarting_parent_test.cc b/test/server/hot_restarting_parent_test.cc index e178bb9b2ff39..a3f405d550db9 100644 --- a/test/server/hot_restarting_parent_test.cc +++ b/test/server/hot_restarting_parent_test.cc @@ -4,7 +4,8 @@ #include "server/hot_restarting_parent.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" +#include "test/mocks/server/listener_manager.h" #include "gtest/gtest.h" @@ -35,7 +36,8 @@ TEST_F(HotRestartingParentTest, GetListenSocketsForChildNotFound) { MockListenerManager listener_manager; std::vector> listeners; EXPECT_CALL(server_, listenerManager()).WillOnce(ReturnRef(listener_manager)); - EXPECT_CALL(listener_manager, listeners()).WillOnce(Return(listeners)); + EXPECT_CALL(listener_manager, listeners(ListenerManager::ListenerState::ACTIVE)) + .WillOnce(Return(listeners)); HotRestartMessage::Request request; request.mutable_pass_listen_socket()->set_address("tcp://127.0.0.1:80"); @@ -50,7 +52,8 @@ TEST_F(HotRestartingParentTest, GetListenSocketsForChildNotBindPort) { InSequence s; listeners.push_back(std::ref(*static_cast(&listener_config))); EXPECT_CALL(server_, listenerManager()).WillOnce(ReturnRef(listener_manager)); - EXPECT_CALL(listener_manager, listeners()).WillOnce(Return(listeners)); + EXPECT_CALL(listener_manager, listeners(ListenerManager::ListenerState::ACTIVE)) + .WillOnce(Return(listeners)); EXPECT_CALL(listener_config, listenSocketFactory()); EXPECT_CALL(listener_config.socket_factory_, localAddress()); EXPECT_CALL(listener_config, bindToPort()).WillOnce(Return(false)); diff --git a/test/server/lds_api_test.cc b/test/server/lds_api_test.cc index 513cd9ab5e415..54a84886e832d 100644 --- a/test/server/lds_api_test.cc +++ b/test/server/lds_api_test.cc @@ -9,18 +9,21 @@ #include "server/lds_api.h" #include "test/mocks/config/mocks.h" +#include "test/mocks/init/mocks.h" #include "test/mocks/protobuf/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/listener_manager.h" +#include "test/mocks/upstream/mocks.h" #include "test/test_common/environment.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" -using testing::_; -using testing::InSequence; -using testing::Invoke; -using testing::Return; -using testing::Throw; +using ::testing::_; +using ::testing::InSequence; +using ::testing::Invoke; +using ::testing::NiceMock; +using ::testing::Return; +using ::testing::Throw; namespace Envoy { namespace Server { @@ -73,19 +76,19 @@ class LdsApiTest : public testing::Test { listeners_.back().name_ = name; refs.emplace_back(listeners_.back()); } - EXPECT_CALL(listener_manager_, listeners()).WillOnce(Return(refs)); + EXPECT_CALL(listener_manager_, listeners(ListenerManager::WARMING | ListenerManager::ACTIVE)) + .WillOnce(Return(refs)); EXPECT_CALL(listener_manager_, beginListenerUpdate()); } - void addListener(Protobuf::RepeatedPtrField& listeners, - const std::string& listener_name) { + envoy::config::listener::v3::Listener buildListener(const std::string& listener_name) { envoy::config::listener::v3::Listener listener; listener.set_name(listener_name); auto socket_address = listener.mutable_address()->mutable_socket_address(); socket_address->set_address(listener_name); socket_address->set_port_value(1); listener.add_filter_chains(); - listeners.Add()->PackFrom(listener); + return listener; } std::shared_ptr> grpc_mux_; @@ -103,38 +106,11 @@ class LdsApiTest : public testing::Test { std::list> listeners_; }; -// Negative test for protoc-gen-validate constraints. -TEST_F(LdsApiTest, ValidateFail) { - InSequence s; - - setup(); - - Protobuf::RepeatedPtrField listeners; - envoy::config::listener::v3::Listener listener; - listeners.Add()->PackFrom(listener); - std::vector> existing_listeners; - EXPECT_CALL(listener_manager_, listeners()).WillOnce(Return(existing_listeners)); - EXPECT_CALL(listener_manager_, beginListenerUpdate()); - // Validate that the error state is passed to the listener manager. - EXPECT_CALL(listener_manager_, endListenerUpdate(_)) - .WillOnce(Invoke([](ListenerManager::FailureStates&& state) { - EXPECT_EQ(1, state.size()); - EXPECT_EQ("Proto constraint validation failed (ListenerValidationError.Address: " - "[\"value is required\"]): ", - state[0]->details()); - EXPECT_TRUE(state[0]->has_failed_configuration()); - })); - EXPECT_CALL(init_watcher_, ready()); - - EXPECT_THROW(lds_callbacks_->onConfigUpdate(listeners, ""), EnvoyException); -} - TEST_F(LdsApiTest, MisconfiguredListenerNameIsPresentInException) { InSequence s; setup(); - Protobuf::RepeatedPtrField listeners; std::vector> existing_listeners; // Construct a minimal listener that would pass proto validation. @@ -145,7 +121,8 @@ TEST_F(LdsApiTest, MisconfiguredListenerNameIsPresentInException) { socket_address->set_port_value(1); listener.add_filter_chains(); - EXPECT_CALL(listener_manager_, listeners()).WillOnce(Return(existing_listeners)); + EXPECT_CALL(listener_manager_, listeners(ListenerManager::WARMING | ListenerManager::ACTIVE)) + .WillOnce(Return(existing_listeners)); EXPECT_CALL(listener_manager_, beginListenerUpdate()); EXPECT_CALL(listener_manager_, addOrUpdateListener(_, _, true)) @@ -153,9 +130,9 @@ TEST_F(LdsApiTest, MisconfiguredListenerNameIsPresentInException) { EXPECT_CALL(listener_manager_, endListenerUpdate(_)); EXPECT_CALL(init_watcher_, ready()); - listeners.Add()->PackFrom(listener); + const auto decoded_resources = TestUtility::decodeResources({listener}); EXPECT_THROW_WITH_MESSAGE( - lds_callbacks_->onConfigUpdate(listeners, ""), EnvoyException, + lds_callbacks_->onConfigUpdate(decoded_resources.refvec_, ""), EnvoyException, "Error adding/updating listener(s) invalid-listener: something is wrong\n"); } @@ -164,17 +141,17 @@ TEST_F(LdsApiTest, EmptyListenersUpdate) { setup(); - Protobuf::RepeatedPtrField listeners; std::vector> existing_listeners; - EXPECT_CALL(listener_manager_, listeners()).WillOnce(Return(existing_listeners)); + EXPECT_CALL(listener_manager_, listeners(ListenerManager::WARMING | ListenerManager::ACTIVE)) + .WillOnce(Return(existing_listeners)); EXPECT_CALL(listener_manager_, beginListenerUpdate()); EXPECT_CALL(listener_manager_, endListenerUpdate(_)) .WillOnce(Invoke([](ListenerManager::FailureStates&& state) { EXPECT_EQ(0, state.size()); })); ; EXPECT_CALL(init_watcher_, ready()); - lds_callbacks_->onConfigUpdate(listeners, ""); + lds_callbacks_->onConfigUpdate({}, ""); } TEST_F(LdsApiTest, ListenerCreationContinuesEvenAfterException) { @@ -182,16 +159,16 @@ TEST_F(LdsApiTest, ListenerCreationContinuesEvenAfterException) { setup(); - Protobuf::RepeatedPtrField listeners; std::vector> existing_listeners; // Add 4 listeners - 2 valid and 2 invalid. - addListener(listeners, "valid-listener-1"); - addListener(listeners, "invalid-listener-1"); - addListener(listeners, "valid-listener-2"); - addListener(listeners, "invalid-listener-2"); + const auto listener_0 = buildListener("valid-listener-1"); + const auto listener_1 = buildListener("invalid-listener-1"); + const auto listener_2 = buildListener("valid-listener-2"); + const auto listener_3 = buildListener("invalid-listener-2"); - EXPECT_CALL(listener_manager_, listeners()).WillOnce(Return(existing_listeners)); + EXPECT_CALL(listener_manager_, listeners(ListenerManager::WARMING | ListenerManager::ACTIVE)) + .WillOnce(Return(existing_listeners)); EXPECT_CALL(listener_manager_, beginListenerUpdate()); EXPECT_CALL(listener_manager_, addOrUpdateListener(_, _, true)) @@ -203,7 +180,10 @@ TEST_F(LdsApiTest, ListenerCreationContinuesEvenAfterException) { EXPECT_CALL(init_watcher_, ready()); - EXPECT_THROW_WITH_MESSAGE(lds_callbacks_->onConfigUpdate(listeners, ""), EnvoyException, + const auto decoded_resources = + TestUtility::decodeResources({listener_0, listener_1, listener_2, listener_3}); + EXPECT_THROW_WITH_MESSAGE(lds_callbacks_->onConfigUpdate(decoded_resources.refvec_, ""), + EnvoyException, "Error adding/updating listener(s) invalid-listener-1: something is " "wrong\ninvalid-listener-2: something else is wrong\n"); } @@ -216,18 +196,19 @@ TEST_F(LdsApiTest, ValidateDuplicateListeners) { setup(); - Protobuf::RepeatedPtrField listeners; - addListener(listeners, "duplicate_listener"); - addListener(listeners, "duplicate_listener"); + const auto listener = buildListener("duplicate_listener"); std::vector> existing_listeners; - EXPECT_CALL(listener_manager_, listeners()).WillOnce(Return(existing_listeners)); + EXPECT_CALL(listener_manager_, listeners(ListenerManager::WARMING | ListenerManager::ACTIVE)) + .WillOnce(Return(existing_listeners)); EXPECT_CALL(listener_manager_, beginListenerUpdate()); EXPECT_CALL(listener_manager_, addOrUpdateListener(_, _, true)).WillOnce(Return(true)); EXPECT_CALL(listener_manager_, endListenerUpdate(_)); EXPECT_CALL(init_watcher_, ready()); - EXPECT_THROW_WITH_MESSAGE(lds_callbacks_->onConfigUpdate(listeners, ""), EnvoyException, + const auto decoded_resources = TestUtility::decodeResources({listener, listener}); + EXPECT_THROW_WITH_MESSAGE(lds_callbacks_->onConfigUpdate(decoded_resources.refvec_, ""), + EnvoyException, "Error adding/updating listener(s) duplicate_listener: duplicate " "listener duplicate_listener found\n"); } @@ -264,7 +245,9 @@ TEST_F(LdsApiTest, Basic) { expectAdd("listener2", "0", true); EXPECT_CALL(listener_manager_, endListenerUpdate(_)); EXPECT_CALL(init_watcher_, ready()); - lds_callbacks_->onConfigUpdate(response1.resources(), response1.version_info()); + const auto decoded_resources = + TestUtility::decodeResources(response1); + lds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()); EXPECT_EQ("0", lds_->versionInfo()); @@ -295,7 +278,9 @@ TEST_F(LdsApiTest, Basic) { expectAdd("listener1", "1", false); expectAdd("listener3", "1", true); EXPECT_CALL(listener_manager_, endListenerUpdate(_)); - lds_callbacks_->onConfigUpdate(response2.resources(), response2.version_info()); + const auto decoded_resources_2 = + TestUtility::decodeResources(response2); + lds_callbacks_->onConfigUpdate(decoded_resources_2.refvec_, response2.version_info()); EXPECT_EQ("1", lds_->versionInfo()); } @@ -326,7 +311,9 @@ TEST_F(LdsApiTest, UpdateVersionOnListenerRemove) { expectAdd("listener1", "0", true); EXPECT_CALL(listener_manager_, endListenerUpdate(_)); EXPECT_CALL(init_watcher_, ready()); - lds_callbacks_->onConfigUpdate(response1.resources(), response1.version_info()); + const auto decoded_resources = + TestUtility::decodeResources(response1); + lds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()); EXPECT_EQ("0", lds_->versionInfo()); @@ -342,7 +329,9 @@ TEST_F(LdsApiTest, UpdateVersionOnListenerRemove) { makeListenersAndExpectCall({"listener1"}); EXPECT_CALL(listener_manager_, removeListener("listener1")).WillOnce(Return(true)); EXPECT_CALL(listener_manager_, endListenerUpdate(_)); - lds_callbacks_->onConfigUpdate(response2.resources(), response2.version_info()); + const auto decoded_resources_2 = + TestUtility::decodeResources(response2); + lds_callbacks_->onConfigUpdate(decoded_resources_2.refvec_, response2.version_info()); EXPECT_EQ("1", lds_->versionInfo()); } @@ -363,7 +352,7 @@ version_info: '1' address: tcp://0.0.0.1 port_value: 61000 filter_chains: - - filters: + - filters: )EOF"; auto response1 = TestUtility::parseYaml(response1_yaml); @@ -372,7 +361,9 @@ version_info: '1' expectAdd("listener0", {}, true); EXPECT_CALL(listener_manager_, endListenerUpdate(_)); EXPECT_CALL(init_watcher_, ready()); - lds_callbacks_->onConfigUpdate(response1.resources(), response1.version_info()); + const auto decoded_resources = + TestUtility::decodeResources(response1); + lds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()); std::string response2_basic = R"EOF( version_info: '1' @@ -409,39 +400,10 @@ version_info: '1' // Can't check version here because of bazel sandbox paths for the certs. expectAdd("listener-8080", {}, true); EXPECT_CALL(listener_manager_, endListenerUpdate(_)); - EXPECT_NO_THROW(lds_callbacks_->onConfigUpdate(response2.resources(), response2.version_info())); -} - -// Validate behavior when the config is delivered but it fails PGV validation. -TEST_F(LdsApiTest, FailureInvalidConfig) { - InSequence s; - - setup(); - - // To test the case of valid JSON with invalid config, create a listener with no address. - const std::string response1_json = R"EOF( -{ - "version_info": "1", - "resources": [ - { - "@type": "type.googleapis.com/envoy.api.v2.Listener", - "name": "listener1", - "filter_chains": [ { "filters": null } ] - } - ] -} - )EOF"; - auto response1 = - TestUtility::parseYaml(response1_json); - - std::vector> existing_listeners; - EXPECT_CALL(listener_manager_, listeners()).WillOnce(Return(existing_listeners)); - EXPECT_CALL(listener_manager_, beginListenerUpdate()); - EXPECT_CALL(listener_manager_, endListenerUpdate(_)); - EXPECT_CALL(init_watcher_, ready()); - EXPECT_THROW(lds_callbacks_->onConfigUpdate(response1.resources(), response1.version_info()), - EnvoyException); - EXPECT_EQ("", lds_->versionInfo()); + const auto decoded_resources_2 = + TestUtility::decodeResources(response2); + EXPECT_NO_THROW( + lds_callbacks_->onConfigUpdate(decoded_resources_2.refvec_, response2.version_info())); } // Validate behavior when the config fails delivery at the subscription level. @@ -487,7 +449,9 @@ TEST_F(LdsApiTest, ReplacingListenerWithSameAddress) { expectAdd("listener2", "0", true); EXPECT_CALL(listener_manager_, endListenerUpdate(_)); EXPECT_CALL(init_watcher_, ready()); - lds_callbacks_->onConfigUpdate(response1.resources(), response1.version_info()); + const auto decoded_resources = + TestUtility::decodeResources(response1); + lds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()); EXPECT_EQ("0", lds_->versionInfo()); @@ -518,7 +482,9 @@ TEST_F(LdsApiTest, ReplacingListenerWithSameAddress) { expectAdd("listener1", "1", false); expectAdd("listener3", "1", true); EXPECT_CALL(listener_manager_, endListenerUpdate(_)); - lds_callbacks_->onConfigUpdate(response2.resources(), response2.version_info()); + const auto decoded_resources_2 = + TestUtility::decodeResources(response2); + lds_callbacks_->onConfigUpdate(decoded_resources_2.refvec_, response2.version_info()); } } // namespace diff --git a/test/server/listener_manager_impl_quic_only_test.cc b/test/server/listener_manager_impl_quic_only_test.cc index 8106aac6adfca..3a2283fa4cc7f 100644 --- a/test/server/listener_manager_impl_quic_only_test.cc +++ b/test/server/listener_manager_impl_quic_only_test.cc @@ -11,7 +11,16 @@ namespace Envoy { namespace Server { namespace { -class ListenerManagerImplQuicOnlyTest : public ListenerManagerImplTest {}; +class MockSupportsUdpGso : public Api::OsSysCallsImpl { +public: + MOCK_METHOD(bool, supportsUdpGso, (), (const)); +}; + +class ListenerManagerImplQuicOnlyTest : public ListenerManagerImplTest { +public: + NiceMock udp_gso_syscall_; + TestThreadsafeSingletonInjector os_calls{&udp_gso_syscall_}; +}; TEST_F(ListenerManagerImplQuicOnlyTest, QuicListenerFactoryAndSslContext) { const std::string yaml = TestEnvironment::substitute(R"EOF( @@ -26,58 +35,75 @@ TEST_F(ListenerManagerImplQuicOnlyTest, QuicListenerFactoryAndSslContext) { filters: [] transport_socket: name: envoy.transport_sockets.quic - config: - common_tls_context: - tls_certificates: - - certificate_chain: - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem" - private_key: - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem" - validation_context: - trusted_ca: - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem" - match_subject_alt_names: - - exact: localhost - - exact: 127.0.0.1 + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.quic.v3.QuicDownstreamTransport + downstream_tls_context: + common_tls_context: + tls_certificates: + - certificate_chain: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem" + private_key: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem" + validation_context: + trusted_ca: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem" + match_subject_alt_names: + - exact: localhost + - exact: 127.0.0.1 reuse_port: true udp_listener_config: udp_listener_name: "quiche_quic_listener" +udp_writer_config: + name: "udp_gso_batch_writer" + typed_config: + "@type": type.googleapis.com/envoy.config.listener.v3.UdpGsoBatchWriterOptions )EOF", Network::Address::IpVersion::v4); - envoy::config::listener::v3::Listener listener_proto = parseListenerFromV2Yaml(yaml); + envoy::config::listener::v3::Listener listener_proto = parseListenerFromV3Yaml(yaml); + ON_CALL(udp_gso_syscall_, supportsUdpGso()).WillByDefault(Return(true)); EXPECT_CALL(server_.random_, uuid()); expectCreateListenSocket(envoy::config::core::v3::SocketOption::STATE_PREBIND, -#ifdef SO_RXQ_OVFL - /* expected_num_options */ 3, // SO_REUSEPORT is on as configured +#ifdef SO_RXQ_OVFL // SO_REUSEPORT is on as configured + /* expected_num_options */ + Api::OsSysCallsSingleton::get().supportsUdpGro() ? 4 : 3, #else - /* expected_num_options */ 2, + /* expected_num_options */ + Api::OsSysCallsSingleton::get().supportsUdpGro() ? 3 : 2, #endif /* expected_creation_params */ {true, false}); - expectSetsockopt(os_sys_calls_, - /* expected_sockopt_level */ IPPROTO_IP, + expectSetsockopt(/* expected_sockopt_level */ IPPROTO_IP, /* expected_sockopt_name */ ENVOY_IP_PKTINFO, /* expected_value */ 1, /* expected_num_calls */ 1); #ifdef SO_RXQ_OVFL - expectSetsockopt(os_sys_calls_, - /* expected_sockopt_level */ SOL_SOCKET, + expectSetsockopt(/* expected_sockopt_level */ SOL_SOCKET, /* expected_sockopt_name */ SO_RXQ_OVFL, /* expected_value */ 1, /* expected_num_calls */ 1); #endif - - expectSetsockopt(os_sys_calls_, - /* expected_sockopt_level */ SOL_SOCKET, + expectSetsockopt(/* expected_sockopt_level */ SOL_SOCKET, /* expected_sockopt_name */ SO_REUSEPORT, /* expected_value */ 1, /* expected_num_calls */ 1); + if (Api::OsSysCallsSingleton::get().supportsUdpGro()) { + expectSetsockopt(/* expected_sockopt_level */ SOL_UDP, + /* expected_sockopt_name */ UDP_GRO, + /* expected_value */ 1, + /* expected_num_calls */ 1); + } manager_->addOrUpdateListener(listener_proto, "", true); EXPECT_EQ(1u, manager_->listeners().size()); EXPECT_FALSE(manager_->listeners()[0].get().udpListenerFactory()->isTransportConnectionless()); - manager_->listeners().front().get().listenSocketFactory().getListenSocket(); + Network::SocketSharedPtr listen_socket = + manager_->listeners().front().get().listenSocketFactory().getListenSocket(); + + Network::UdpPacketWriterPtr udp_packet_writer = + manager_->listeners().front().get().udpPacketWriterFactory()->get().createUdpPacketWriter( + listen_socket->ioHandle(), manager_->listeners()[0].get().listenerScope()); + EXPECT_TRUE(udp_packet_writer->isBatchMode()); // No filter chain found with non-matching transport protocol. EXPECT_EQ(nullptr, findFilterChain(1234, "127.0.0.1", "", "tls", {}, "8.8.8.8", 111)); diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index a032037daf9be..c47c22f44b6ec 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -55,7 +55,7 @@ class ListenerManagerImplWithRealFiltersTest : public ListenerManagerImplTest { * Create an IPv4 listener with a given name. */ envoy::config::listener::v3::Listener createIPv4Listener(const std::string& name) { - envoy::config::listener::v3::Listener listener = parseListenerFromV2Yaml(R"EOF( + envoy::config::listener::v3::Listener listener = parseListenerFromV3Yaml(R"EOF( address: socket_address: { address: 127.0.0.1, port_value: 1111 } filter_chains: @@ -75,10 +75,10 @@ class ListenerManagerImplWithRealFiltersTest : public ListenerManagerImplTest { const Network::SocketOptionName& expected_option, int expected_value, uint32_t expected_num_options = 1, ListenSocketCreationParams expected_creation_params = {true, true}) { - if (expected_option.has_value()) { + if (expected_option.hasValue()) { expectCreateListenSocket(expected_state, expected_num_options, expected_creation_params); - expectSetsockopt(os_sys_calls_, expected_option.level(), expected_option.option(), - expected_value, expected_num_options); + expectSetsockopt(expected_option.level(), expected_option.option(), expected_value, + expected_num_options); manager_->addOrUpdateListener(listener, "", true); EXPECT_EQ(1U, manager_->listeners().size()); } else { @@ -89,6 +89,65 @@ class ListenerManagerImplWithRealFiltersTest : public ListenerManagerImplTest { } }; +class ListenerManagerImplForInPlaceFilterChainUpdateTest : public ListenerManagerImplTest { +public: + envoy::config::listener::v3::Listener createDefaultListener() { + envoy::config::listener::v3::Listener listener_proto; + Protobuf::TextFormat::ParseFromString(R"EOF( + name: "foo" + address: { + socket_address: { + address: "127.0.0.1" + port_value: 1234 + } + } + filter_chains: {} + )EOF", + &listener_proto); + return listener_proto; + } + + void expectAddListener(const envoy::config::listener::v3::Listener& listener_proto, + ListenerHandle*) { + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(*worker_, addListener(_, _, _)); + manager_->addOrUpdateListener(listener_proto, "", true); + worker_->callAddCompletion(true); + EXPECT_EQ(1UL, manager_->listeners().size()); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); + } + + void expectUpdateToThenDrain(const envoy::config::listener::v3::Listener& new_listener_proto, + ListenerHandle* old_listener_handle) { + EXPECT_CALL(*worker_, addListener(_, _, _)); + EXPECT_CALL(*worker_, stopListener(_, _)); + EXPECT_CALL(*old_listener_handle->drain_manager_, startDrainSequence(_)); + + EXPECT_TRUE(manager_->addOrUpdateListener(new_listener_proto, "", true)); + + EXPECT_CALL(*worker_, removeListener(_, _)); + old_listener_handle->drain_manager_->drain_sequence_completion_(); + + EXPECT_CALL(*old_listener_handle, onDestroy()); + worker_->callRemovalCompletion(); + } + + void expectRemove(const envoy::config::listener::v3::Listener& listener_proto, + ListenerHandle* listener_handle) { + + EXPECT_CALL(*worker_, stopListener(_, _)); + EXPECT_CALL(*listener_factory_.socket_, close()); + EXPECT_CALL(*listener_handle->drain_manager_, startDrainSequence(_)); + EXPECT_TRUE(manager_->removeListener(listener_proto.name())); + + EXPECT_CALL(*worker_, removeListener(_, _)); + listener_handle->drain_manager_->drain_sequence_completion_(); + + EXPECT_CALL(*listener_handle, onDestroy()); + worker_->callRemovalCompletion(); + } +}; + class MockLdsApi : public LdsApi { public: MOCK_METHOD(std::string, versionInfo, (), (const)); @@ -106,7 +165,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, EmptyFilter) { EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); EXPECT_EQ(std::chrono::milliseconds(15000), manager_->listeners().front().get().listenerFiltersTimeout()); @@ -123,7 +182,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, DefaultListenerPerConnectionBuffe )EOF"; EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1024 * 1024U, manager_->listeners().back().get().perConnectionBufferLimitBytes()); } @@ -139,7 +198,7 @@ per_connection_buffer_limit_bytes: 8192 )EOF"; EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(8192U, manager_->listeners().back().get().perConnectionBufferLimitBytes()); } @@ -171,7 +230,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsTransportSocket) { Network::Address::IpVersion::v4); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); auto filter_chain = findFilterChain(1234, "127.0.0.1", "", "tls", {}, "8.8.8.8", 111); @@ -208,7 +267,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, Network::Address::IpVersion::v4); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); auto filter_chain = findFilterChain(1234, "127.0.0.1", "", "tls", {}, "8.8.8.8", 111); @@ -224,24 +283,27 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, DEPRECATED_FEATURE_TEST(TlsContex port_value: 1234 filter_chains: - filters: [] - tls_context: - common_tls_context: - tls_certificates: - - certificate_chain: - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem" - private_key: - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem" - validation_context: - trusted_ca: - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem" - verify_subject_alt_name: - - localhost - - 127.0.0.1 + transport_socket: + name: tls + typed_config: + "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem" + private_key: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem" + validation_context: + trusted_ca: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem" + match_subject_alt_names: + exact: localhost + exact: 127.0.0.1 )EOF", Network::Address::IpVersion::v4); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); auto filter_chain = findFilterChain(1234, "127.0.0.1", "", "tls", {}, "8.8.8.8", 111); @@ -277,8 +339,13 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, UdpAddress) { EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(*worker_, addListener(_, _, _)); EXPECT_CALL(listener_factory_, - createListenSocket(_, Network::Address::SocketType::Datagram, _, {{true, false}})); - EXPECT_CALL(os_sys_calls_, setsockopt_(_, _, _, _, _)).Times(testing::AtLeast(1)); + createListenSocket(_, Network::Socket::Type::Datagram, _, {{true, false}})) + .WillOnce(Invoke([this](const Network::Address::InstanceConstSharedPtr&, + Network::Socket::Type, const Network::Socket::OptionsSharedPtr&, + const ListenSocketCreationParams&) -> Network::SocketSharedPtr { + return listener_factory_.socket_; + })); + EXPECT_CALL(*listener_factory_.socket_, setSocketOption(_, _, _, _)).Times(testing::AtLeast(1)); EXPECT_CALL(os_sys_calls_, close(_)).WillRepeatedly(Return(Api::SysCallIntResult{0, errno})); manager_->addOrUpdateListener(listener_proto, "", true); EXPECT_EQ(1u, manager_->listeners().size()); @@ -295,7 +362,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, BadListenerConfig) { test: a )EOF"; - EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "test: Cannot find field"); } @@ -307,7 +374,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, BadListenerConfigNoFilterChains) port_value: 1234 )EOF"; - EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "no filter chains specified"); } @@ -323,8 +390,8 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, BadListenerConfig2UDPListenerFilt - name: envoy.filters.listener.original_dst )EOF"; - EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), - EnvoyException, "Only 1 UDP filter per listener supported"); + EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), + EnvoyException, "Only 1 UDP listener filter per listener supported"); } TEST_F(ListenerManagerImplWithRealFiltersTest, BadFilterConfig) { @@ -340,7 +407,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, BadFilterConfig) { typed_config: {} )EOF"; - EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "foo: Cannot find field"); } class NonTerminalFilterFactory : public Configuration::NamedNetworkFilterConfigFactory { @@ -373,11 +440,11 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TerminalNotLast) { filter_chains: - filters: - name: non_terminal - config: {} + typed_config: {} )EOF"; EXPECT_THROW_WITH_REGEX( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), EnvoyException, + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "Error: non-terminal filter named non_terminal of type non_terminal is the last " "filter in a network filter chain."); } @@ -391,13 +458,13 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, NotTerminalLast) { filter_chains: - filters: - name: envoy.filters.network.tcp_proxy - config: {} + typed_config: {} - name: unknown_but_will_not_be_processed - config: {} + typed_config: {} )EOF"; EXPECT_THROW_WITH_REGEX( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), EnvoyException, + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "Error: terminal filter named envoy.filters.network.tcp_proxy of type " "envoy.filters.network.tcp_proxy must be the last filter in a network filter chain."); } @@ -411,10 +478,10 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, BadFilterName) { filter_chains: - filters: - name: invalid - config: {} + typed_config: {} )EOF"; - EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "Didn't find a registered implementation for name: 'invalid'"); } @@ -458,11 +525,11 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, StatsScopeTest) { filter_chains: - filters: - name: stats_test - config: {} + typed_config: {} )EOF"; EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, ListenSocketCreationParams(false))); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); manager_->listeners().front().get().listenerScope().counterFromString("foo").inc(); EXPECT_EQ(1UL, server_.stats_store_.counterFromString("bar").value()); @@ -480,7 +547,7 @@ TEST_F(ListenerManagerImplTest, NotDefaultListenerFiltersTimeout) { )EOF"; EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true)); EXPECT_EQ(std::chrono::milliseconds(), manager_->listeners().front().get().listenerFiltersTimeout()); } @@ -501,8 +568,8 @@ TEST_F(ListenerManagerImplTest, ModifyOnlyDrainType) { ListenerHandle* listener_foo = expectListenerCreate(false, true, envoy::config::listener::v3::Listener::MODIFY_ONLY); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); - checkStats(1, 0, 0, 0, 1, 0); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); EXPECT_CALL(*listener_foo, onDestroy()); } @@ -525,8 +592,8 @@ drain_type: default ListenerHandle* listener_foo = expectListenerCreate(false, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); - checkStats(1, 0, 0, 0, 1, 0); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); // Update foo listener, but with a different address. Should throw. const std::string listener_foo_different_address_yaml = R"EOF( @@ -544,11 +611,11 @@ drain_type: modify_only expectListenerCreate(false, true, envoy::config::listener::v3::Listener::MODIFY_ONLY); EXPECT_CALL(*listener_foo_different_address, onDestroy()); EXPECT_THROW_WITH_MESSAGE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_different_address_yaml), + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_different_address_yaml), "", true), EnvoyException, "error updating listener: 'foo' has a different address " - "'127.0.0.1:1235' from existing listener"); + "'127.0.0.1:1235' from existing listener address '127.0.0.1:1234'"); EXPECT_CALL(*listener_foo, onDestroy()); } @@ -580,8 +647,8 @@ drain_type: default EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); - checkStats(1, 0, 0, 0, 1, 0); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); EXPECT_CALL(*listener_foo, onDestroy()); } @@ -612,8 +679,8 @@ drain_type: default EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); - checkStats(1, 0, 0, 0, 1, 0); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); EXPECT_CALL(*listener_foo, onDestroy()); } @@ -636,12 +703,12 @@ name: foo ListenerHandle* listener_foo = expectListenerCreate(false, false); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", false)); - checkStats(1, 0, 0, 0, 1, 0); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", false)); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); checkConfigDump(R"EOF( static_listeners: listener: - "@type": type.googleapis.com/envoy.api.v2.Listener + "@type": type.googleapis.com/envoy.config.listener.v3.Listener name: "foo" address: socket_address: @@ -663,16 +730,16 @@ name: foo filter_chains: - filters: - name: fake - config: {} + typed_config: {} )EOF"; EXPECT_FALSE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_update1_yaml), "", false)); - checkStats(1, 0, 0, 0, 1, 0); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "", false)); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); // Remove foo listener. Should be blocked. EXPECT_FALSE(manager_->removeListener("foo")); - checkStats(1, 0, 0, 0, 1, 0); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); EXPECT_CALL(*listener_foo, onDestroy()); } @@ -702,15 +769,24 @@ name: "foo" filter_chains: {} )EOF"; + const std::string listener_foo_address_update_yaml = R"EOF( +name: "foo" +address: + socket_address: + address: "127.0.0.1" + port_value: 1235 +filter_chains: {} + )EOF"; + Init::ManagerImpl server_init_mgr("server-init-manager"); Init::ExpectableWatcherImpl server_init_watcher("server-init-watcher"); { // Add and remove a listener before starting workers. ListenerHandle* listener_foo = expectListenerCreate(true, true); EXPECT_CALL(server_, initManager()).WillOnce(ReturnRef(server_init_mgr)); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "version1", true)); - checkStats(1, 0, 0, 0, 1, 0); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return("version1")); checkConfigDump(R"EOF( @@ -721,7 +797,7 @@ version_info: version1 warming_state: version_info: version1 listener: - "@type": type.googleapis.com/envoy.api.v2.Listener + "@type": type.googleapis.com/envoy.config.listener.v3.Listener name: foo address: socket_address: @@ -761,10 +837,10 @@ version_info: version1 EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); // Version 2 listener will be initialized by listener manager directly. EXPECT_CALL(listener_foo2->target_, initialize()).Times(1); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "version2", true)); // Version2 is in warming list as listener_foo2->target_ is not ready yet. - checkStats(/*added=*/2, 0, /*removed=*/1, /*warming=*/1, 0, 0); + checkStats(__LINE__, /*added=*/2, 0, /*removed=*/1, /*warming=*/1, 0, 0, 0); EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return("version2")); checkConfigDump(R"EOF( version_info: version2 @@ -774,7 +850,7 @@ version_info: version1 warming_state: version_info: version2 listener: - "@type": type.googleapis.com/envoy.api.v2.Listener + "@type": type.googleapis.com/envoy.config.listener.v3.Listener name: foo address: socket_address: @@ -785,6 +861,17 @@ version_info: version1 seconds: 2002002002 nanos: 2000000 )EOF"); + + // While it is in warming state, try updating the address. It should fail. + ListenerHandle* listener_foo3 = expectListenerCreate(true, true); + EXPECT_CALL(*listener_foo3, onDestroy()); + EXPECT_THROW_WITH_MESSAGE( + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_address_update_yaml), + "version3", true), + EnvoyException, + "error updating listener: 'foo' has a different address " + "'127.0.0.1:1235' from existing listener address '127.0.0.1:1234'"); + // Delete foo-listener again. EXPECT_CALL(*listener_foo2, onDestroy()); EXPECT_TRUE(manager_->removeListener("foo")); @@ -792,10 +879,9 @@ version_info: version1 } TEST_F(ListenerManagerImplTest, OverrideListener) { - time_system_.setSystemTime(std::chrono::milliseconds(1001001001001)); - InSequence s; + time_system_.setSystemTime(std::chrono::milliseconds(1001001001001)); auto* lds_api = new MockLdsApi(); EXPECT_CALL(listener_factory_, createLdsApi_(_)).WillOnce(Return(lds_api)); envoy::config::core::v3::ConfigSource lds_config; @@ -814,8 +900,8 @@ filter_chains: {} ListenerHandle* listener_foo = expectListenerCreate(false, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_TRUE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "version1", true)); - checkStats(1, 0, 0, 0, 1, 0); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "version1", true)); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); // Start workers and capture ListenerImpl. Network::ListenerConfig* listener_config = nullptr; @@ -829,75 +915,40 @@ filter_chains: {} manager_->startWorkers(guard_dog_); EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_create_success").value()); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); - // TODO(lambdai): No need to invoke `addListenerToWorkerForTest` explicitly when intelligent warm - // up procedure is added. - ListenerImpl* listener_impl = dynamic_cast(listener_config); - auto overridden_listener = absl::make_optional(1); - EXPECT_CALL(*worker_, addListener(_, _, _)) - .WillOnce(Invoke([](absl::optional, Network::ListenerConfig&, - auto completion) -> void { completion(true); })); - manager_->addListenerToWorkerForTest(*worker_, overridden_listener, *listener_impl, nullptr); - - EXPECT_EQ(1, server_.stats_store_.counter("listener_manager.listener_create_success").value()); - EXPECT_CALL(*listener_foo, onDestroy()); -} - -TEST_F(ListenerManagerImplTest, DrainFilterChains) { - time_system_.setSystemTime(std::chrono::milliseconds(1001001001001)); - - InSequence s; - - auto* lds_api = new MockLdsApi(); - EXPECT_CALL(listener_factory_, createLdsApi_(_)).WillOnce(Return(lds_api)); - envoy::config::core::v3::ConfigSource lds_config; - manager_->createLdsApi(lds_config); - - // Add foo listener. - const std::string listener_foo_yaml = R"EOF( -name: "foo" + // Update foo into warming. + const std::string listener_foo_update1_yaml = R"EOF( +name: foo address: socket_address: - address: "127.0.0.1" + address: 127.0.0.1 port_value: 1234 filter_chains: - filters: - - name: fake - config: {} + filter_chain_match: + destination_port: 1234 )EOF"; - ListenerHandle* listener_foo = expectListenerCreate(false, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + ListenerHandle* listener_foo_update1 = expectListenerOverridden(false); + EXPECT_CALL(*worker_, addListener(_, _, _)); + auto* timer = new Event::MockTimer(dynamic_cast(&server_.dispatcher())); + EXPECT_CALL(*timer, enableTimer(_, _)); EXPECT_TRUE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "version1", true)); - checkStats(1, 0, 0, 0, 1, 0); - - // Start workers and capture ListenerImpl. - Network::ListenerConfig* listener_config = nullptr; - EXPECT_CALL(*worker_, addListener(_, _, _)) - .WillOnce(Invoke([&listener_config](auto, Network::ListenerConfig& config, auto) -> void { - listener_config = &config; - })) - .RetiresOnSaturation(); - - EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); - - ENVOY_LOG_MISC(debug, "lambdai: config ptr {}", static_cast(listener_config)); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "", true)); + EXPECT_EQ(1UL, manager_->listeners().size()); - EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_create_success").value()); + worker_->callAddCompletion(true); + checkStats(__LINE__, 1, 1, 0, 0, 1, 0, 1); - // TODO(lambdai): No need to invoke `drainFilterChains` explicitly when intelligent warm - // up procedure is added. - ListenerImpl* listener_impl = dynamic_cast(listener_config); - ASSERT(listener_impl != nullptr); - auto* timer = new Event::MockTimer(dynamic_cast(&server_.dispatcher())); - EXPECT_CALL(*timer, enableTimer(_, _)); - manager_->drainFilterChainsForTest(listener_impl); EXPECT_CALL(*worker_, removeFilterChains(_, _, _)); timer->invokeCallback(); EXPECT_CALL(*listener_foo, onDestroy()); worker_->callDrainFilterChainsComplete(); + + EXPECT_EQ(1UL, manager_->listeners().size()); + EXPECT_CALL(*listener_foo_update1, onDestroy()); + EXPECT_EQ(1, server_.stats_store_.counter("listener_manager.listener_create_success").value()); } TEST_F(ListenerManagerImplTest, AddOrUpdateListener) { @@ -928,8 +979,8 @@ filter_chains: {} ListenerHandle* listener_foo = expectListenerCreate(false, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_TRUE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "version1", true)); - checkStats(1, 0, 0, 0, 1, 0); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "version1", true)); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return("version1")); checkConfigDump(R"EOF( version_info: version1 @@ -939,7 +990,7 @@ version_info: version1 warming_state: version_info: version1 listener: - "@type": type.googleapis.com/envoy.api.v2.Listener + "@type": type.googleapis.com/envoy.config.listener.v3.Listener name: foo address: socket_address: @@ -952,8 +1003,8 @@ version_info: version1 )EOF"); // Update duplicate should be a NOP. - EXPECT_FALSE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); - checkStats(1, 0, 0, 0, 1, 0); + EXPECT_FALSE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); // Update foo listener. Should share socket. const std::string listener_foo_update1_yaml = R"EOF( @@ -970,9 +1021,9 @@ per_connection_buffer_limit_bytes: 10 ListenerHandle* listener_foo_update1 = expectListenerCreate(false, true); EXPECT_CALL(*listener_foo, onDestroy()); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_update1_yaml), + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "version2", true)); - checkStats(1, 1, 0, 0, 1, 0); + checkStats(__LINE__, 1, 1, 0, 0, 1, 0, 0); EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return("version2")); checkConfigDump(R"EOF( version_info: version2 @@ -982,7 +1033,7 @@ version_info: version2 warming_state: version_info: version2 listener: - "@type": type.googleapis.com/envoy.api.v2.Listener + "@type": type.googleapis.com/envoy.config.listener.v3.Listener name: foo address: socket_address: @@ -1019,8 +1070,8 @@ version_info: version2 // Update duplicate should be a NOP. EXPECT_FALSE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_update1_yaml), "", true)); - checkStats(1, 1, 0, 0, 1, 0); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "", true)); + checkStats(__LINE__, 1, 1, 0, 0, 1, 0, 0); time_system_.setSystemTime(std::chrono::milliseconds(3003003003003)); @@ -1031,9 +1082,9 @@ version_info: version2 EXPECT_CALL(*worker_, stopListener(_, _)); EXPECT_CALL(*listener_foo_update1->drain_manager_, startDrainSequence(_)); EXPECT_TRUE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "version3", true)); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "version3", true)); worker_->callAddCompletion(true); - checkStats(1, 2, 0, 0, 1, 1); + checkStats(__LINE__, 1, 2, 0, 0, 1, 1, 0); EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return("version3")); checkConfigDump(R"EOF( version_info: version3 @@ -1043,7 +1094,7 @@ version_info: version3 active_state: version_info: version3 listener: - "@type": type.googleapis.com/envoy.api.v2.Listener + "@type": type.googleapis.com/envoy.config.listener.v3.Listener name: foo address: socket_address: @@ -1056,7 +1107,7 @@ version_info: version3 draining_state: version_info: version2 listener: - "@type": type.googleapis.com/envoy.api.v2.Listener + "@type": type.googleapis.com/envoy.config.listener.v3.Listener name: foo address: socket_address: @@ -1071,10 +1122,10 @@ version_info: version3 EXPECT_CALL(*worker_, removeListener(_, _)); listener_foo_update1->drain_manager_->drain_sequence_completion_(); - checkStats(1, 2, 0, 0, 1, 1); + checkStats(__LINE__, 1, 2, 0, 0, 1, 1, 0); EXPECT_CALL(*listener_foo_update1, onDestroy()); worker_->callRemovalCompletion(); - checkStats(1, 2, 0, 0, 1, 0); + checkStats(__LINE__, 1, 2, 0, 0, 1, 0, 0); time_system_.setSystemTime(std::chrono::milliseconds(4004004004004)); @@ -1092,10 +1143,10 @@ filter_chains: {} EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(*worker_, addListener(_, _, _)); EXPECT_TRUE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_bar_yaml), "version4", true)); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_bar_yaml), "version4", true)); EXPECT_EQ(2UL, manager_->listeners().size()); worker_->callAddCompletion(true); - checkStats(2, 2, 0, 0, 2, 0); + checkStats(__LINE__, 2, 2, 0, 0, 2, 0, 0); time_system_.setSystemTime(std::chrono::milliseconds(5005005005005)); @@ -1113,9 +1164,9 @@ filter_chains: {} EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(listener_baz->target_, initialize()); EXPECT_TRUE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_baz_yaml), "version5", true)); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_baz_yaml), "version5", true)); EXPECT_EQ(2UL, manager_->listeners().size()); - checkStats(3, 2, 0, 1, 2, 0); + checkStats(__LINE__, 3, 2, 0, 1, 2, 0, 0); EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return("version5")); checkConfigDump(R"EOF( version_info: version5 @@ -1124,7 +1175,7 @@ version_info: version5 active_state: version_info: version3 listener: - "@type": type.googleapis.com/envoy.api.v2.Listener + "@type": type.googleapis.com/envoy.config.listener.v3.Listener name: foo address: socket_address: @@ -1138,7 +1189,7 @@ version_info: version5 active_state: version_info: version4 listener: - "@type": type.googleapis.com/envoy.api.v2.Listener + "@type": type.googleapis.com/envoy.config.listener.v3.Listener name: bar address: socket_address: @@ -1152,7 +1203,7 @@ version_info: version5 warming_state: version_info: version5 listener: - "@type": type.googleapis.com/envoy.api.v2.Listener + "@type": type.googleapis.com/envoy.config.listener.v3.Listener name: baz address: socket_address: @@ -1165,8 +1216,8 @@ version_info: version5 )EOF"); // Update a duplicate baz that is currently warming. - EXPECT_FALSE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_baz_yaml), "", true)); - checkStats(3, 2, 0, 1, 2, 0); + EXPECT_FALSE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_baz_yaml), "", true)); + checkStats(__LINE__, 3, 2, 0, 1, 2, 0, 0); // Update baz while it is warming. const std::string listener_baz_update1_yaml = R"EOF( @@ -1178,7 +1229,7 @@ name: baz filter_chains: - filters: - name: fake - config: {} + typed_config: {} )EOF"; ListenerHandle* listener_baz_update1 = expectListenerCreate(true, true); @@ -1188,16 +1239,16 @@ name: baz })); EXPECT_CALL(listener_baz_update1->target_, initialize()); EXPECT_TRUE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_baz_update1_yaml), "", true)); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_baz_update1_yaml), "", true)); EXPECT_EQ(2UL, manager_->listeners().size()); - checkStats(3, 3, 0, 1, 2, 0); + checkStats(__LINE__, 3, 3, 0, 1, 2, 0, 0); // Finish initialization for baz which should make it active. EXPECT_CALL(*worker_, addListener(_, _, _)); listener_baz_update1->target_.ready(); EXPECT_EQ(3UL, manager_->listeners().size()); worker_->callAddCompletion(true); - checkStats(3, 3, 0, 0, 3, 0); + checkStats(__LINE__, 3, 3, 0, 0, 3, 0, 0); EXPECT_CALL(*listener_foo_update2, onDestroy()); EXPECT_CALL(*listener_bar, onDestroy()); @@ -1228,9 +1279,9 @@ name: foo ListenerHandle* listener_foo = expectListenerCreate(false, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(*worker_, addListener(_, _, _)); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); worker_->callAddCompletion(true); - checkStats(1, 0, 0, 0, 1, 0); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); // Remove foo into draining. std::function stop_completion; @@ -1242,24 +1293,24 @@ name: foo })); EXPECT_CALL(*listener_foo->drain_manager_, startDrainSequence(_)); EXPECT_TRUE(manager_->removeListener("foo")); - checkStats(1, 0, 1, 0, 0, 1); + checkStats(__LINE__, 1, 0, 1, 0, 0, 1, 0); EXPECT_CALL(*worker_, removeListener(_, _)); listener_foo->drain_manager_->drain_sequence_completion_(); - checkStats(1, 0, 1, 0, 0, 1); + checkStats(__LINE__, 1, 0, 1, 0, 0, 1, 0); // Add foo again. We should use the socket from draining. ListenerHandle* listener_foo2 = expectListenerCreate(false, true); EXPECT_CALL(*worker_, addListener(_, _, _)); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); worker_->callAddCompletion(true); - checkStats(2, 0, 1, 0, 1, 1); + checkStats(__LINE__, 2, 0, 1, 0, 1, 1, 0); EXPECT_CALL(*listener_factory_.socket_, close()).Times(0); stop_completion(); EXPECT_CALL(*listener_foo, onDestroy()); worker_->callRemovalCompletion(); - checkStats(2, 0, 1, 0, 1, 0); + checkStats(__LINE__, 2, 0, 1, 0, 1, 0, 0); EXPECT_CALL(*listener_foo2, onDestroy()); } @@ -1288,31 +1339,31 @@ name: foo ListenerHandle* listener_foo = expectListenerCreate(false, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(*worker_, addListener(_, _, _)); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); worker_->callAddCompletion(true); - checkStats(1, 0, 0, 0, 1, 0); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); // Remove foo into draining. EXPECT_CALL(*worker_, stopListener(_, _)); EXPECT_CALL(*listener_factory_.socket_, close()); EXPECT_CALL(*listener_foo->drain_manager_, startDrainSequence(_)); EXPECT_TRUE(manager_->removeListener("foo")); - checkStats(1, 0, 1, 0, 0, 1); + checkStats(__LINE__, 1, 0, 1, 0, 0, 1, 0); EXPECT_CALL(*worker_, removeListener(_, _)); listener_foo->drain_manager_->drain_sequence_completion_(); - checkStats(1, 0, 1, 0, 0, 1); + checkStats(__LINE__, 1, 0, 1, 0, 0, 1, 0); // Add foo again. We should use the socket from draining. ListenerHandle* listener_foo2 = expectListenerCreate(false, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(*worker_, addListener(_, _, _)); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); worker_->callAddCompletion(true); - checkStats(2, 0, 1, 0, 1, 1); + checkStats(__LINE__, 2, 0, 1, 0, 1, 1, 0); EXPECT_CALL(*listener_foo, onDestroy()); worker_->callRemovalCompletion(); - checkStats(2, 0, 1, 0, 1, 0); + checkStats(__LINE__, 2, 0, 1, 0, 1, 0, 0); EXPECT_CALL(*listener_foo2, onDestroy()); } @@ -1341,7 +1392,7 @@ name: foo EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, ListenSocketCreationParams(false))) .WillOnce(Invoke([this, &syscall_result, &real_listener_factory]( const Network::Address::InstanceConstSharedPtr& address, - Network::Address::SocketType socket_type, + Network::Socket::Type socket_type, const Network::Socket::OptionsSharedPtr& options, const ListenSocketCreationParams& params) -> Network::SocketSharedPtr { EXPECT_CALL(server_, hotRestart).Times(0); @@ -1352,7 +1403,7 @@ name: foo })); EXPECT_CALL(listener_foo->target_, initialize()); EXPECT_CALL(*listener_foo, onDestroy()); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); } TEST_F(ListenerManagerImplTest, ReusePortEqualToTrue) { @@ -1378,7 +1429,7 @@ reuse_port: true EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {{true, false}})) .WillOnce(Invoke([this, &syscall_result, &real_listener_factory]( const Network::Address::InstanceConstSharedPtr& address, - Network::Address::SocketType socket_type, + Network::Socket::Type socket_type, const Network::Socket::OptionsSharedPtr& options, const ListenSocketCreationParams& params) -> Network::SocketSharedPtr { EXPECT_CALL(server_, hotRestart).Times(0); @@ -1387,14 +1438,14 @@ reuse_port: true })); EXPECT_CALL(listener_foo->target_, initialize()); EXPECT_CALL(*listener_foo, onDestroy()); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); } TEST_F(ListenerManagerImplTest, NotSupportedDatagramUds) { ProdListenerComponentFactory real_listener_factory(server_); EXPECT_THROW_WITH_MESSAGE(real_listener_factory.createListenSocket( std::make_shared("/foo"), - Network::Address::SocketType::Datagram, nullptr, {true}), + Network::Socket::Type::Datagram, nullptr, {true}), EnvoyException, "socket type SocketType::Datagram not supported for pipes"); } @@ -1420,14 +1471,14 @@ name: foo EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})) .WillOnce(Throw(EnvoyException("can't bind"))); EXPECT_CALL(*listener_foo, onDestroy()); - EXPECT_THROW(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true), + EXPECT_THROW(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true), EnvoyException); checkConfigDump(R"EOF( dynamic_listeners: - name: foo error_state: failed_configuration: - "@type": type.googleapis.com/envoy.api.v2.Listener + "@type": type.googleapis.com/envoy.config.listener.v3.Listener name: foo address: socket_address: @@ -1508,9 +1559,9 @@ name: foo ListenerHandle* listener_foo = expectListenerCreate(false, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(*worker_, addListener(_, _, _)); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); worker_->callAddCompletion(true); - checkStats(1, 0, 0, 0, 1, 0); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); EXPECT_CALL(*listener_foo->drain_manager_, drainClose()).WillOnce(Return(false)); EXPECT_CALL(server_.drain_manager_, drainClose()).WillOnce(Return(false)); @@ -1519,7 +1570,7 @@ name: foo EXPECT_CALL(*worker_, stopListener(_, _)); EXPECT_CALL(*listener_foo->drain_manager_, startDrainSequence(_)); EXPECT_TRUE(manager_->removeListener("foo")); - checkStats(1, 0, 1, 0, 0, 1); + checkStats(__LINE__, 1, 0, 1, 0, 0, 1, 0); // NOTE: || short circuit here prevents the server drain manager from getting called. EXPECT_CALL(*listener_foo->drain_manager_, drainClose()).WillOnce(Return(true)); @@ -1527,7 +1578,7 @@ name: foo EXPECT_CALL(*worker_, removeListener(_, _)); listener_foo->drain_manager_->drain_sequence_completion_(); - checkStats(1, 0, 1, 0, 0, 1); + checkStats(__LINE__, 1, 0, 1, 0, 0, 1, 0); EXPECT_CALL(*listener_foo->drain_manager_, drainClose()).WillOnce(Return(false)); EXPECT_CALL(server_.drain_manager_, drainClose()).WillOnce(Return(true)); @@ -1536,7 +1587,7 @@ name: foo EXPECT_CALL(*listener_foo, onDestroy()); worker_->callRemovalCompletion(); EXPECT_EQ(0UL, manager_->listeners().size()); - checkStats(1, 0, 1, 0, 0, 0); + checkStats(__LINE__, 1, 0, 1, 0, 0, 0, 0); } TEST_F(ListenerManagerImplTest, RemoveListener) { @@ -1562,27 +1613,27 @@ name: foo ListenerHandle* listener_foo = expectListenerCreate(true, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(listener_foo->target_, initialize()); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); EXPECT_EQ(0UL, manager_->listeners().size()); - checkStats(1, 0, 0, 1, 0, 0); + checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); // Remove foo. EXPECT_CALL(*listener_foo, onDestroy()); EXPECT_TRUE(manager_->removeListener("foo")); EXPECT_EQ(0UL, manager_->listeners().size()); - checkStats(1, 0, 1, 0, 0, 0); + checkStats(__LINE__, 1, 0, 1, 0, 0, 0, 0); // Add foo again and initialize it. listener_foo = expectListenerCreate(true, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(listener_foo->target_, initialize()); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); - checkStats(2, 0, 1, 1, 0, 0); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); + checkStats(__LINE__, 2, 0, 1, 1, 0, 0, 0); EXPECT_CALL(*worker_, addListener(_, _, _)); listener_foo->target_.ready(); worker_->callAddCompletion(true); EXPECT_EQ(1UL, manager_->listeners().size()); - checkStats(2, 0, 1, 0, 1, 0); + checkStats(__LINE__, 2, 0, 1, 0, 1, 0, 0); // Update foo into warming. const std::string listener_foo_update1_yaml = R"EOF( @@ -1591,18 +1642,17 @@ name: foo socket_address: address: 127.0.0.1 port_value: 1234 +per_connection_buffer_limit_bytes: 999 filter_chains: -- filters: - - name: fake - config: {} +- filters: [] )EOF"; ListenerHandle* listener_foo_update1 = expectListenerCreate(true, true); EXPECT_CALL(listener_foo_update1->target_, initialize()); EXPECT_TRUE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_update1_yaml), "", true)); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "", true)); EXPECT_EQ(1UL, manager_->listeners().size()); - checkStats(2, 1, 1, 1, 1, 0); + checkStats(__LINE__, 2, 1, 1, 1, 1, 0, 0); // Remove foo which should remove both warming and active. EXPECT_CALL(*listener_foo_update1, onDestroy()); @@ -1610,14 +1660,14 @@ name: foo EXPECT_CALL(*listener_factory_.socket_, close()); EXPECT_CALL(*listener_foo->drain_manager_, startDrainSequence(_)); EXPECT_TRUE(manager_->removeListener("foo")); - checkStats(2, 1, 2, 0, 0, 1); + checkStats(__LINE__, 2, 1, 2, 0, 0, 1, 0); EXPECT_CALL(*worker_, removeListener(_, _)); listener_foo->drain_manager_->drain_sequence_completion_(); - checkStats(2, 1, 2, 0, 0, 1); + checkStats(__LINE__, 2, 1, 2, 0, 0, 1, 0); EXPECT_CALL(*listener_foo, onDestroy()); worker_->callRemovalCompletion(); EXPECT_EQ(0UL, manager_->listeners().size()); - checkStats(2, 1, 2, 0, 0, 0); + checkStats(__LINE__, 2, 1, 2, 0, 0, 0, 0); } // Validates that StopListener functionality works correctly when only inbound listeners are @@ -1643,13 +1693,14 @@ traffic_direction: INBOUND ListenerHandle* listener_foo = expectListenerCreate(true, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(listener_foo->target_, initialize()); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); - checkStats(1, 0, 0, 1, 0, 0); + auto foo_inbound_proto = parseListenerFromV3Yaml(listener_foo_yaml); + EXPECT_TRUE(manager_->addOrUpdateListener(foo_inbound_proto, "", true)); + checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); EXPECT_CALL(*worker_, addListener(_, _, _)); listener_foo->target_.ready(); worker_->callAddCompletion(true); EXPECT_EQ(1UL, manager_->listeners().size()); - checkStats(1, 0, 0, 0, 1, 0); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); // Add a listener in outbound direction. const std::string listener_foo_outbound_yaml = R"EOF( @@ -1667,7 +1718,7 @@ traffic_direction: OUTBOUND EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(listener_foo_outbound->target_, initialize()); EXPECT_TRUE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_outbound_yaml), "", true)); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_outbound_yaml), "", true)); EXPECT_CALL(*worker_, addListener(_, _, _)); listener_foo_outbound->target_.ready(); worker_->callAddCompletion(true); @@ -1695,7 +1746,7 @@ traffic_direction: OUTBOUND EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(*worker_, addListener(_, _, _)); EXPECT_TRUE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_bar_outbound_yaml), "", true)); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_bar_outbound_yaml), "", true)); EXPECT_EQ(3UL, manager_->listeners().size()); worker_->callAddCompletion(true); @@ -1710,7 +1761,16 @@ traffic_direction: INBOUND filter_chains: - filters: [] )EOF"; - EXPECT_FALSE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_bar_yaml), "", true)); + EXPECT_FALSE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_bar_yaml), "", true)); + + // Explicitly validate that in place filter chain update is not allowed. + auto in_place_foo_inbound_proto = foo_inbound_proto; + in_place_foo_inbound_proto.mutable_filter_chains(0) + ->mutable_filter_chain_match() + ->mutable_destination_port() + ->set_value(9999); + + EXPECT_FALSE(manager_->addOrUpdateListener(in_place_foo_inbound_proto, "", true)); EXPECT_CALL(*listener_foo, onDestroy()); EXPECT_CALL(*listener_foo_outbound, onDestroy()); EXPECT_CALL(*listener_bar_outbound, onDestroy()); @@ -1737,13 +1797,13 @@ name: foo ListenerHandle* listener_foo = expectListenerCreate(true, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(listener_foo->target_, initialize()); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); - checkStats(1, 0, 0, 1, 0, 0); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); + checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); EXPECT_CALL(*worker_, addListener(_, _, _)); listener_foo->target_.ready(); worker_->callAddCompletion(true); EXPECT_EQ(1UL, manager_->listeners().size()); - checkStats(1, 0, 0, 0, 1, 0); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); EXPECT_CALL(*worker_, stopListener(_, _)); EXPECT_CALL(*listener_factory_.socket_, close()); @@ -1761,7 +1821,7 @@ name: bar filter_chains: - filters: [] )EOF"; - EXPECT_FALSE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_bar_yaml), "", true)); + EXPECT_FALSE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_bar_yaml), "", true)); } // Validate that stopping a warming listener, removes directly from warming listener list. @@ -1786,13 +1846,13 @@ traffic_direction: INBOUND ListenerHandle* listener_foo = expectListenerCreate(true, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(listener_foo->target_, initialize()); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); - checkStats(1, 0, 0, 1, 0, 0); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); + checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); EXPECT_CALL(*worker_, addListener(_, _, _)); listener_foo->target_.ready(); worker_->callAddCompletion(true); EXPECT_EQ(1UL, manager_->listeners().size()); - checkStats(1, 0, 0, 0, 1, 0); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); // Update foo into warming. const std::string listener_foo_update1_yaml = R"EOF( @@ -1802,16 +1862,15 @@ traffic_direction: INBOUND socket_address: address: 127.0.0.1 port_value: 1234 +per_connection_buffer_limit_bytes: 999 filter_chains: -- filters: - - name: fake - config: {} +- filters: [] )EOF"; ListenerHandle* listener_foo_update1 = expectListenerCreate(true, true); EXPECT_CALL(listener_foo_update1->target_, initialize()); EXPECT_TRUE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_update1_yaml), "", true)); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "", true)); EXPECT_EQ(1UL, manager_->listeners().size()); // Stop foo which should remove warming listener. @@ -1843,7 +1902,44 @@ name: foo ListenerHandle* listener_foo = expectListenerCreate(false, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(*worker_, addListener(_, _, _)); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); + + EXPECT_CALL(*worker_, stopListener(_, _)); + EXPECT_CALL(*listener_foo->drain_manager_, startDrainSequence(_)); + worker_->callAddCompletion(false); + + EXPECT_CALL(*worker_, removeListener(_, _)); + listener_foo->drain_manager_->drain_sequence_completion_(); + + EXPECT_CALL(*listener_foo, onDestroy()); + worker_->callRemovalCompletion(); + + EXPECT_EQ( + 1UL, + server_.stats_store_.counterFromString("listener_manager.listener_create_failure").value()); +} + +TEST_F(ListenerManagerImplTest, StaticListenerAddFailure) { + InSequence s; + + EXPECT_CALL(*worker_, start(_)); + manager_->startWorkers(guard_dog_); + + // Add foo listener into active. + const std::string listener_foo_yaml = R"EOF( +name: foo +address: + socket_address: + address: 0.0.0.0 + port_value: 1234 +filter_chains: +- filters: [] + )EOF"; + + ListenerHandle* listener_foo = expectListenerCreate(false, false); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(*worker_, addListener(_, _, _)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", false)); EXPECT_CALL(*worker_, stopListener(_, _)); EXPECT_CALL(*listener_foo->drain_manager_, startDrainSequence(_)); @@ -1858,6 +1954,7 @@ name: foo EXPECT_EQ( 1UL, server_.stats_store_.counterFromString("listener_manager.listener_create_failure").value()); + EXPECT_EQ(0, manager_->listeners().size()); } TEST_F(ListenerManagerImplTest, StatsNameValidCharacterTest) { @@ -1870,7 +1967,7 @@ TEST_F(ListenerManagerImplTest, StatsNameValidCharacterTest) { - filters: [] )EOF"; - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); manager_->listeners().front().get().listenerScope().counterFromString("foo").inc(); EXPECT_EQ(1UL, server_.stats_store_.counterFromString("listener.[__1]_10000.foo").value()); @@ -1898,7 +1995,7 @@ name: foo ListenerHandle* listener_foo = expectListenerCreate(true, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, ListenSocketCreationParams(false))); EXPECT_CALL(listener_foo->target_, initialize()); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); // Add bar with same non-binding address. Should fail. const std::string listener_bar_yaml = R"EOF( @@ -1916,7 +2013,7 @@ name: bar ListenerHandle* listener_bar = expectListenerCreate(true, true); EXPECT_CALL(*listener_bar, onDestroy()); EXPECT_THROW_WITH_MESSAGE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_bar_yaml), "", true), + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_bar_yaml), "", true), EnvoyException, "error adding listener: 'bar' has duplicate address '0.0.0.0:1234' as existing listener"); @@ -1928,7 +2025,7 @@ name: bar listener_bar = expectListenerCreate(true, true); EXPECT_CALL(*listener_bar, onDestroy()); EXPECT_THROW_WITH_MESSAGE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_bar_yaml), "", true), + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_bar_yaml), "", true), EnvoyException, "error adding listener: 'bar' has duplicate address '0.0.0.0:1234' as existing listener"); @@ -1949,7 +2046,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithDestinationP socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: destination_port: 8080 @@ -1966,7 +2063,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithDestinationP EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // IPv4 client connects to unknown port - no match. @@ -1995,7 +2092,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithDestinationI socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: prefix_ranges: { address_prefix: 127.0.0.0, prefix_len: 8 } @@ -2012,7 +2109,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithDestinationI EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // IPv4 client connects to unknown IP - no match. @@ -2041,7 +2138,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithServerNamesM socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: server_names: "server1.example.com" @@ -2058,7 +2155,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithServerNamesM EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // TLS client without SNI - no match. @@ -2088,7 +2185,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithTransportPro socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: transport_protocol: "tls" @@ -2105,7 +2202,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithTransportPro EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // TCP client - no match. @@ -2130,7 +2227,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithApplicationP socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: application_protocols: "http/1.1" @@ -2148,7 +2245,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithApplicationP EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // TLS client without ALPN - no match. @@ -2156,7 +2253,10 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithApplicationP EXPECT_EQ(filter_chain, nullptr); // TLS client with "http/1.1" ALPN - using 1st filter chain. - filter_chain = findFilterChain(1234, "127.0.0.1", "", "tls", {"h2", "http/1.1"}, "8.8.8.8", 111); + filter_chain = findFilterChain( + 1234, "127.0.0.1", "", "tls", + {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, "8.8.8.8", + 111); ASSERT_NE(filter_chain, nullptr); EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport()); auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr); @@ -2174,10 +2274,10 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceTypeMa socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: - source_type: LOCAL + source_type: SAME_IP_OR_LOOPBACK transport_socket: name: tls typed_config: @@ -2191,7 +2291,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceTypeMa EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // EXTERNAL IPv4 client without "http/1.1" ALPN - no match. @@ -2199,8 +2299,10 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceTypeMa EXPECT_EQ(filter_chain, nullptr); // LOCAL IPv4 client with "http/1.1" ALPN - using 1st filter chain. - filter_chain = - findFilterChain(1234, "127.0.0.1", "", "tls", {"h2", "http/1.1"}, "127.0.0.1", 111); + filter_chain = findFilterChain( + 1234, "127.0.0.1", "", "tls", + {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, "127.0.0.1", + 111); ASSERT_NE(filter_chain, nullptr); EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport()); auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr); @@ -2211,8 +2313,10 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceTypeMa EXPECT_EQ(server_names.front(), "server1.example.com"); // LOCAL UDS client with "http/1.1" ALPN - using 1st filter chain. - filter_chain = - findFilterChain(0, "/tmp/test.sock", "", "tls", {"h2", "http/1.1"}, "/tmp/test.sock", 111); + filter_chain = findFilterChain( + 0, "/tmp/test.sock", "", "tls", + {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, + "/tmp/test.sock", 111); ASSERT_NE(filter_chain, nullptr); EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport()); transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr); @@ -2229,7 +2333,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceIpMatc socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: source_prefix_ranges: @@ -2248,7 +2352,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceIpMatc EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // IPv4 client with source 10.0.1.1. No match. @@ -2256,8 +2360,10 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceIpMatc EXPECT_EQ(filter_chain, nullptr); // IPv4 client with source 10.0.0.10, Match. - filter_chain = - findFilterChain(1234, "127.0.0.1", "", "tls", {"h2", "http/1.1"}, "10.0.0.10", 111); + filter_chain = findFilterChain( + 1234, "127.0.0.1", "", "tls", + {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, "10.0.0.10", + 111); ASSERT_NE(filter_chain, nullptr); EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport()); auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr); @@ -2273,8 +2379,10 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceIpMatc EXPECT_EQ(filter_chain, nullptr); // UDS client. No match. - filter_chain = - findFilterChain(0, "/tmp/test.sock", "", "tls", {"h2", "http/1.1"}, "/tmp/test.sock", 0); + filter_chain = findFilterChain( + 0, "/tmp/test.sock", "", "tls", + {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, + "/tmp/test.sock", 0); ASSERT_EQ(filter_chain, nullptr); } @@ -2285,7 +2393,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceIpv6Ma socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: source_prefix_ranges: @@ -2304,7 +2412,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceIpv6Ma EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // IPv6 client with matching subnet. Match. @@ -2325,7 +2433,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourcePortMa socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: source_ports: @@ -2343,7 +2451,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourcePortMa EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // Client with source port 100. Match. @@ -2358,7 +2466,10 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourcePortMa EXPECT_EQ(server_names.front(), "server1.example.com"); // Client with source port 101. No match. - filter_chain = findFilterChain(1234, "8.8.8.8", "", "tls", {"h2", "http/1.1"}, "4.4.4.4", 101); + filter_chain = findFilterChain( + 1234, "8.8.8.8", "", "tls", + {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, "4.4.4.4", + 101); ASSERT_EQ(filter_chain, nullptr); } @@ -2369,10 +2480,10 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainWithSourceType socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: - source_type: LOCAL + source_type: SAME_IP_OR_LOOPBACK transport_socket: name: tls typed_config: @@ -2407,12 +2518,14 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainWithSourceType EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // LOCAL TLS client with "http/1.1" ALPN - no match. - auto filter_chain = - findFilterChain(1234, "127.0.0.1", "", "tls", {"h2", "http/1.1"}, "127.0.0.1", 111); + auto filter_chain = findFilterChain( + 1234, "127.0.0.1", "", "tls", + {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, "127.0.0.1", + 111); EXPECT_EQ(filter_chain, nullptr); // LOCAL TLS client without "http/1.1" ALPN - using 1st filter chain. @@ -2427,7 +2540,10 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainWithSourceType EXPECT_EQ(server_names.front(), "server1.example.com"); // EXTERNAL TLS client with "http/1.1" ALPN - using 2nd filter chain. - filter_chain = findFilterChain(1234, "8.8.8.8", "", "tls", {"h2", "http/1.1"}, "4.4.4.4", 111); + filter_chain = findFilterChain( + 1234, "8.8.8.8", "", "tls", + {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, "4.4.4.4", + 111); ASSERT_NE(filter_chain, nullptr); EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport()); transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr); @@ -2452,7 +2568,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDestinati socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: # empty @@ -2489,7 +2605,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDestinati EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // IPv4 client connects to default port - using 1st filter chain. @@ -2538,7 +2654,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDestinati socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: # empty @@ -2575,7 +2691,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDestinati EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // IPv4 client connects to default IP - using 1st filter chain. @@ -2624,7 +2740,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithServerNam socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: # empty @@ -2670,7 +2786,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithServerNam EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // TLS client without SNI - using 1st filter chain. @@ -2723,7 +2839,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithTransport socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: # empty @@ -2742,7 +2858,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithTransport EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // TCP client - using 1st filter chain. @@ -2768,7 +2884,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithApplicati socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: # empty @@ -2787,7 +2903,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithApplicati EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // TLS client without ALPN - using 1st filter chain. @@ -2796,8 +2912,10 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithApplicati EXPECT_FALSE(filter_chain->transportSocketFactory().implementsSecureTransport()); // TLS client with "h2,http/1.1" ALPN - using 2nd filter chain. - filter_chain = - findFilterChain(1234, "127.0.0.1", "", "tls", {"h2", "http/1.1"}, "127.0.0.1", 111); + filter_chain = findFilterChain( + 1234, "127.0.0.1", "", "tls", + {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, "127.0.0.1", + 111); ASSERT_NE(filter_chain, nullptr); EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport()); auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr); @@ -2814,7 +2932,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithMultipleR socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: # empty @@ -2835,7 +2953,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithMultipleR EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // TLS client without SNI and ALPN - using 1st filter chain. @@ -2849,14 +2967,18 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithMultipleR EXPECT_EQ(filter_chain, nullptr); // TLS client with ALPN match but without SNI - using 1st filter chain. - filter_chain = - findFilterChain(1234, "127.0.0.1", "", "tls", {"h2", "http/1.1"}, "127.0.0.1", 111); + filter_chain = findFilterChain( + 1234, "127.0.0.1", "", "tls", + {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, "127.0.0.1", + 111); ASSERT_NE(filter_chain, nullptr); EXPECT_FALSE(filter_chain->transportSocketFactory().implementsSecureTransport()); // TLS client with exact SNI match and ALPN match - using 2nd filter chain. - filter_chain = findFilterChain(1234, "127.0.0.1", "server1.example.com", "tls", - {"h2", "http/1.1"}, "127.0.0.1", 111); + filter_chain = findFilterChain( + 1234, "127.0.0.1", "server1.example.com", "tls", + {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, "127.0.0.1", + 111); ASSERT_NE(filter_chain, nullptr); EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport()); auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr); @@ -2873,7 +2995,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDifferent socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: server_names: "example.com" @@ -2906,7 +3028,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDifferent EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); } @@ -2917,7 +3039,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: server_names: "example.com" @@ -2947,7 +3069,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); } @@ -2957,14 +3079,14 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithInvalidDesti socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: prefix_ranges: { address_prefix: a.b.c.d, prefix_len: 32 } )EOF", Network::Address::IpVersion::v4); - EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "malformed IP address: a.b.c.d"); } @@ -2974,14 +3096,14 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithInvalidServe socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: server_names: "*w.example.com" )EOF", Network::Address::IpVersion::v4); - EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "error adding listener '127.0.0.1:1234': partial wildcards are not " "supported in \"server_names\""); @@ -2993,19 +3115,21 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithSameMatch socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - - filter_chain_match: + - name : foo + filter_chain_match: transport_protocol: "tls" - - filter_chain_match: + - name: bar + filter_chain_match: transport_protocol: "tls" )EOF", Network::Address::IpVersion::v4); - EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, - "error adding listener '127.0.0.1:1234': multiple filter chains with " - "the same matching rules are defined"); + "error adding listener '127.0.0.1:1234': filter chain 'bar' has " + "the same matching rules defined as 'foo'"); } TEST_F(ListenerManagerImplWithRealFiltersTest, @@ -3015,20 +3139,21 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - - filter_chain_match: + - name: foo + filter_chain_match: transport_protocol: "tls" - - filter_chain_match: + - name: bar + filter_chain_match: transport_protocol: "tls" address_suffix: 127.0.0.0 )EOF", Network::Address::IpVersion::v4); - EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), - EnvoyException, - "error adding listener '127.0.0.1:1234': contains filter chains with " - "unimplemented fields"); + EXPECT_THROW_WITH_MESSAGE( + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, + "error adding listener '127.0.0.1:1234': filter chain 'bar' contains unimplemented fields"); } TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithOverlappingRules) { @@ -3037,7 +3162,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithOverlappi socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: server_names: "example.com" @@ -3046,7 +3171,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithOverlappi )EOF", Network::Address::IpVersion::v4); - EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "error adding listener '127.0.0.1:1234': multiple filter chains with " "overlapping matching rules are defined"); @@ -3066,7 +3191,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsFilterChainWithoutTlsInspector EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // TLS Inspector is automatically injected for filter chains with TLS requirements, @@ -3088,7 +3213,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: transport_protocol: "tls" @@ -3099,7 +3224,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // Make sure there is exactly 1 listener filter (and assume it's TLS Inspector). 2 filters @@ -3127,7 +3252,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SniFilterChainWithoutTlsInspector EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // TLS Inspector is automatically injected for filter chains with SNI requirements, @@ -3155,7 +3280,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, AlpnFilterChainWithoutTlsInspecto EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // TLS Inspector is automatically injected for filter chains with ALPN requirements, @@ -3184,7 +3309,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, CustomTransportProtocolWithSniWit EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // Make sure there are no listener filters (i.e. no automatically injected TLS Inspector). @@ -3223,7 +3348,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsCertificateInline) { EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); } @@ -3248,7 +3373,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsCertificateChainInlinePrivateK EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); } @@ -3268,7 +3393,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsCertificateIncomplete) { Network::Address::IpVersion::v4); EXPECT_THROW_WITH_MESSAGE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), EnvoyException, + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, TestEnvironment::substitute( "Failed to load incomplete certificate from {{ test_rundir }}" "/test/extensions/transport_sockets/tls/test_data/san_dns3_chain.pem, ", @@ -3291,7 +3416,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsCertificateInvalidCertificateC )EOF", Network::Address::IpVersion::v4); - EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "Failed to load certificate chain from "); } @@ -3317,7 +3442,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsCertificateInvalidIntermediate )EOF"), Network::Address::IpVersion::v4); - EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "Failed to load certificate chain from "); } @@ -3337,7 +3462,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsCertificateInvalidPrivateKey) )EOF", Network::Address::IpVersion::v4); - EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "Failed to load private key from "); } @@ -3359,7 +3484,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsCertificateInvalidTrustedCA) { )EOF", Network::Address::IpVersion::v4); - EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "Failed to load trusted CA certificates from "); } @@ -3385,7 +3510,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, Metadata) { route: { cluster: service_foo } listener_filters: - name: "envoy.filters.listener.original_dst" - config: {} + typed_config: {} )EOF", Network::Address::IpVersion::v4); Configuration::ListenerFactoryContext* listener_factory_context = nullptr; @@ -3400,7 +3525,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, Metadata) { listener_factory_context = &context; return ProdListenerComponentFactory::createListenerFilterFactoryList_(filters, context); })); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); ASSERT_NE(nullptr, listener_factory_context); EXPECT_EQ("test_value", Config::Metadata::metadataValue( &listener_factory_context->listenerMetadata(), "com.bar.foo", "baz") @@ -3415,13 +3540,13 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstFilter) { filter_chains: {} listener_filters: - name: "envoy.filters.listener.original_dst" - config: {} + typed_config: {} )EOF", Network::Address::IpVersion::v4); EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); Network::ListenerConfig& listener = manager_->listeners().back().get(); @@ -3429,6 +3554,9 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstFilter) { Network::FilterChainFactory& filterChainFactory = listener.filterChainFactory(); Network::MockListenerFilterManager manager; + // Return error when trying to retrieve the original dst on the invalid handle + EXPECT_CALL(os_sys_calls_, getsockopt_(_, _, _, _, _)).WillOnce(Return(-1)); + NiceMock callbacks; Network::AcceptedSocketImpl socket(std::make_unique(), Network::Address::InstanceConstSharedPtr{ @@ -3450,7 +3578,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstFilter) { } class OriginalDstTestFilter : public Extensions::ListenerFilters::OriginalDst::OriginalDstFilter { - Network::Address::InstanceConstSharedPtr getOriginalDst(os_fd_t) override { + Network::Address::InstanceConstSharedPtr getOriginalDst(Network::Socket&) override { return Network::Address::InstanceConstSharedPtr{ new Network::Address::Ipv4Instance("127.0.0.2", 2345)}; } @@ -3487,13 +3615,13 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstTestFilter) { filter_chains: {} listener_filters: - name: "test.listener.original_dst" - config: {} + typed_config: {} )EOF", Network::Address::IpVersion::v4); EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); Network::ListenerConfig& listener = manager_->listeners().back().get(); @@ -3524,7 +3652,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstTestFilter) { class OriginalDstTestFilterIPv6 : public Extensions::ListenerFilters::OriginalDst::OriginalDstFilter { - Network::Address::InstanceConstSharedPtr getOriginalDst(os_fd_t) override { + Network::Address::InstanceConstSharedPtr getOriginalDst(Network::Socket&) override { return Network::Address::InstanceConstSharedPtr{ new Network::Address::Ipv6Instance("1::2", 2345)}; } @@ -3561,13 +3689,13 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstTestFilterIPv6) { filter_chains: {} listener_filters: - name: "test.listener.original_dstipv6" - config: {} + typed_config: {} )EOF", Network::Address::IpVersion::v6); EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); Network::ListenerConfig& listener = manager_->listeners().back().get(); @@ -3610,13 +3738,13 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TransparentFreebindListenerDisabl )EOF", Network::Address::IpVersion::v4); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})) - .WillOnce(Invoke([&](Network::Address::InstanceConstSharedPtr, Network::Address::SocketType, + .WillOnce(Invoke([&](Network::Address::InstanceConstSharedPtr, Network::Socket::Type, const Network::Socket::OptionsSharedPtr& options, const ListenSocketCreationParams&) -> Network::SocketSharedPtr { EXPECT_EQ(options, nullptr); return listener_factory_.socket_; })); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); } @@ -3677,46 +3805,22 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, ReusePortListenerEnabledForTcp) { } TEST_F(ListenerManagerImplWithRealFiltersTest, ReusePortListenerDisabled) { - auto listener = createIPv4Listener("UdpListener"); listener.mutable_address()->mutable_socket_address()->set_protocol( envoy::config::core::v3::SocketAddress::UDP); - // For UDP, reuse_port is set to true forcibly, even it's set to false explicitly in config + // For UDP, verify that we fail if reuse port is false and concurrency is > 1. listener.set_reuse_port(false); - - // Port should be 0 for creating the shared socket, otherwise socket - // creation will happen worker thread. - listener.mutable_address()->mutable_socket_address()->set_port_value(0); - - // IpPacketInfo and RxQueueOverFlow are always set if supported - expectCreateListenSocket(envoy::config::core::v3::SocketOption::STATE_PREBIND, -#ifdef SO_RXQ_OVFL - /* expected_num_options */ 2, -#else - /* expected_num_options */ 1, -#endif - /* expected_creation_params */ {true, false}); - - expectSetsockopt(os_sys_calls_, - /* expected_sockopt_level */ IPPROTO_IP, - /* expected_sockopt_name */ ENVOY_IP_PKTINFO, - /* expected_value */ 1, - /* expected_num_calls */ 1); -#ifdef SO_RXQ_OVFL - expectSetsockopt(os_sys_calls_, - /* expected_sockopt_level */ SOL_SOCKET, - /* expected_sockopt_name */ SO_RXQ_OVFL, - /* expected_value */ 1, - /* expected_num_calls */ 1); -#endif - server_.options_.concurrency_ = 2; - manager_->addOrUpdateListener(listener, "", true); - EXPECT_EQ(1U, manager_->listeners().size()); + + EXPECT_THROW_WITH_MESSAGE( + manager_->addOrUpdateListener(listener, "", true), EnvoyException, + "Listening on UDP when concurrency is > 1 without the SO_REUSEPORT socket option results in " + "unstable packet proxying. Configure the reuse_port listener option or set concurrency = 1."); + EXPECT_EQ(0, manager_->listeners().size()); } TEST_F(ListenerManagerImplWithRealFiltersTest, LiteralSockoptListenerEnabled) { - const envoy::config::listener::v3::Listener listener = parseListenerFromV2Yaml(R"EOF( + const envoy::config::listener::v3::Listener listener = parseListenerFromV3Yaml(R"EOF( name: SockoptsListener address: socket_address: { address: 127.0.0.1, port_value: 1111 } @@ -3733,14 +3837,14 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, LiteralSockoptListenerEnabled) { expectCreateListenSocket(envoy::config::core::v3::SocketOption::STATE_PREBIND, /* expected_num_options */ 3); - expectSetsockopt(os_sys_calls_, - /* expected_sockopt_level */ 1, - /* expected_sockopt_name */ 2, - /* expected_value */ 3); - expectSetsockopt(os_sys_calls_, - /* expected_sockopt_level */ 4, - /* expected_sockopt_name */ 5, - /* expected_value */ 6); + expectSetsockopt( + /* expected_sockopt_level */ 1, + /* expected_sockopt_name */ 2, + /* expected_value */ 3); + expectSetsockopt( + /* expected_sockopt_level */ 4, + /* expected_sockopt_name */ 5, + /* expected_value */ 6); manager_->addOrUpdateListener(listener, "", true); EXPECT_EQ(1U, manager_->listeners().size()); } @@ -3764,7 +3868,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, AddressResolver) { Registry::InjectFactory register_resolver(mock_resolver); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); } @@ -3789,7 +3893,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, CRLFilename) { EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); } @@ -3817,7 +3921,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, CRLInline) { EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); } @@ -3840,7 +3944,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, InvalidCRLInline) { )EOF", Network::Address::IpVersion::v4); - EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "Failed to load CRL from "); } @@ -3862,7 +3966,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, CRLWithNoCA) { )EOF", Network::Address::IpVersion::v4); - EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "^Failed to load CRL from .* without trusted CA$"); } @@ -3885,7 +3989,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, VerifySanWithNoCA) { )EOF", Network::Address::IpVersion::v4); - EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "SAN-based verification of peer certificates without trusted CA " "is insecure and not allowed"); @@ -3910,7 +4014,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, VerifyIgnoreExpirationWithNoCA) { )EOF", Network::Address::IpVersion::v4); - EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "Certificate validity period is always ignored without trusted CA"); } @@ -3936,7 +4040,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, VerifyIgnoreExpirationWithCA) { )EOF", Network::Address::IpVersion::v4); - EXPECT_NO_THROW(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true)); + EXPECT_NO_THROW(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true)); } // Validate that dispatcher stats prefix is set correctly when enabled. @@ -3970,7 +4074,7 @@ name: test_api_listener cluster: dynamic_forward_proxy_cluster )EOF"; - ASSERT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", false)); + ASSERT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", false)); EXPECT_EQ(0U, manager_->listeners().size()); ASSERT_TRUE(manager_->apiListener().has_value()); } @@ -3999,7 +4103,7 @@ name: test_api_listener cluster: dynamic_forward_proxy_cluster )EOF"; - ASSERT_FALSE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true)); + ASSERT_FALSE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true)); EXPECT_EQ(0U, manager_->listeners().size()); ASSERT_FALSE(manager_->apiListener().has_value()); } @@ -4051,19 +4155,572 @@ name: test_api_listener_2 cluster: dynamic_forward_proxy_cluster )EOF"; - ASSERT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", false)); + ASSERT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", false)); EXPECT_EQ(0U, manager_->listeners().size()); ASSERT_TRUE(manager_->apiListener().has_value()); EXPECT_EQ("test_api_listener", manager_->apiListener()->get().name()); // Only one ApiListener is added. - ASSERT_FALSE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", false)); + ASSERT_FALSE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", false)); EXPECT_EQ(0U, manager_->listeners().size()); // The original ApiListener is there. ASSERT_TRUE(manager_->apiListener().has_value()); EXPECT_EQ("test_api_listener", manager_->apiListener()->get().name()); } +TEST_F(ListenerManagerImplTest, StopInplaceWarmingListener) { + InSequence s; + + EXPECT_CALL(*worker_, start(_)); + manager_->startWorkers(guard_dog_); + + // Add foo listener into warming. + const std::string listener_foo_yaml = R"EOF( +name: foo +traffic_direction: INBOUND +address: + socket_address: + address: 127.0.0.1 + port_value: 1234 +filter_chains: +- filters: [] + )EOF"; + + ListenerHandle* listener_foo = expectListenerCreate(true, true); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_foo->target_, initialize()); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); + EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); + + checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); + EXPECT_CALL(*worker_, addListener(_, _, _)); + listener_foo->target_.ready(); + worker_->callAddCompletion(true); + EXPECT_EQ(1UL, manager_->listeners().size()); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); + + // Update foo into warming. + const std::string listener_foo_update1_yaml = R"EOF( +name: foo +traffic_direction: INBOUND +address: + socket_address: + address: 127.0.0.1 + port_value: 1234 +filter_chains: +- filters: + filter_chain_match: + destination_port: 1234 + )EOF"; + + ListenerHandle* listener_foo_update1 = expectListenerOverridden(true); + EXPECT_CALL(listener_foo_update1->target_, initialize()); + EXPECT_TRUE( + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "", true)); + EXPECT_EQ(1, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); + + EXPECT_EQ(1UL, manager_->listeners().size()); + + // Stop foo which should remove warming listener. + EXPECT_CALL(*listener_foo_update1, onDestroy()); + EXPECT_CALL(*worker_, stopListener(_, _)); + EXPECT_CALL(*listener_factory_.socket_, close()); + EXPECT_CALL(*listener_foo, onDestroy()); + manager_->stopListeners(ListenerManager::StopListenersType::InboundOnly); + EXPECT_EQ(1, server_.stats_store_.counter("listener_manager.listener_stopped").value()); +} + +TEST_F(ListenerManagerImplTest, RemoveInplaceUpdatingListener) { + InSequence s; + + EXPECT_CALL(*worker_, start(_)); + manager_->startWorkers(guard_dog_); + + // Add foo listener into warming. + const std::string listener_foo_yaml = R"EOF( +name: foo +traffic_direction: INBOUND +address: + socket_address: + address: 127.0.0.1 + port_value: 1234 +filter_chains: +- filters: [] + )EOF"; + + ListenerHandle* listener_foo = expectListenerCreate(true, true); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_foo->target_, initialize()); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); + EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); + + checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); + EXPECT_CALL(*worker_, addListener(_, _, _)); + listener_foo->target_.ready(); + worker_->callAddCompletion(true); + EXPECT_EQ(1UL, manager_->listeners().size()); + + // Update foo into warming. + const std::string listener_foo_update1_yaml = R"EOF( +name: foo +traffic_direction: INBOUND +address: + socket_address: + address: 127.0.0.1 + port_value: 1234 +filter_chains: +- filters: + filter_chain_match: + destination_port: 1234 + )EOF"; + + ListenerHandle* listener_foo_update1 = expectListenerOverridden(true); + EXPECT_CALL(listener_foo_update1->target_, initialize()); + EXPECT_TRUE( + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "", true)); + EXPECT_EQ(1, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); + + EXPECT_EQ(1UL, manager_->listeners().size()); + checkStats(__LINE__, 1, 1, 0, 1, 1, 0, 0); + + // Remove foo which should remove both warming and active. + EXPECT_CALL(*listener_foo_update1, onDestroy()); + EXPECT_CALL(*worker_, stopListener(_, _)); + EXPECT_CALL(*listener_factory_.socket_, close()); + EXPECT_CALL(*listener_foo->drain_manager_, startDrainSequence(_)); + EXPECT_TRUE(manager_->removeListener("foo")); + checkStats(__LINE__, 1, 1, 1, 0, 0, 1, 0); + EXPECT_CALL(*worker_, removeListener(_, _)); + listener_foo->drain_manager_->drain_sequence_completion_(); + checkStats(__LINE__, 1, 1, 1, 0, 0, 1, 0); + EXPECT_CALL(*listener_foo, onDestroy()); + worker_->callRemovalCompletion(); + EXPECT_EQ(0UL, manager_->listeners().size()); + checkStats(__LINE__, 1, 1, 1, 0, 0, 0, 0); +} + +TEST_F(ListenerManagerImplTest, UpdateInplaceWarmingListener) { + InSequence s; + + EXPECT_CALL(*worker_, start(_)); + manager_->startWorkers(guard_dog_); + + // Add foo listener into warming. + const std::string listener_foo_yaml = R"EOF( +name: foo +traffic_direction: INBOUND +address: + socket_address: + address: 127.0.0.1 + port_value: 1234 +filter_chains: +- filters: [] + )EOF"; + + ListenerHandle* listener_foo = expectListenerCreate(true, true); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_foo->target_, initialize()); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); + EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); + + checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); + EXPECT_CALL(*worker_, addListener(_, _, _)); + listener_foo->target_.ready(); + worker_->callAddCompletion(true); + EXPECT_EQ(1UL, manager_->listeners().size()); + + // Update foo into warming. + const std::string listener_foo_update1_yaml = R"EOF( +name: foo +traffic_direction: INBOUND +address: + socket_address: + address: 127.0.0.1 + port_value: 1234 +filter_chains: +- filters: + filter_chain_match: + destination_port: 1234 + )EOF"; + + ListenerHandle* listener_foo_update1 = expectListenerOverridden(true); + EXPECT_CALL(listener_foo_update1->target_, initialize()); + EXPECT_TRUE( + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "", true)); + EXPECT_EQ(1, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); + + EXPECT_EQ(1UL, manager_->listeners().size()); + checkStats(__LINE__, 1, 1, 0, 1, 1, 0, 0); + + // Listener warmed up. + EXPECT_CALL(*worker_, addListener(_, _, _)); + EXPECT_CALL(*listener_foo, onDestroy()); + listener_foo_update1->target_.ready(); + worker_->callAddCompletion(true); + EXPECT_EQ(1UL, manager_->listeners().size()); + + EXPECT_CALL(*listener_foo_update1, onDestroy()); +} + +TEST_F(ListenerManagerImplTest, DrainageDuringInplaceUpdate) { + InSequence s; + + EXPECT_CALL(*worker_, start(_)); + manager_->startWorkers(guard_dog_); + + // Add foo listener into warming. + const std::string listener_foo_yaml = R"EOF( +name: foo +traffic_direction: INBOUND +address: + socket_address: + address: 127.0.0.1 + port_value: 1234 +filter_chains: +- filters: [] + )EOF"; + + ListenerHandle* listener_foo = expectListenerCreate(true, true); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_foo->target_, initialize()); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); + checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); + EXPECT_CALL(*worker_, addListener(_, _, _)); + listener_foo->target_.ready(); + worker_->callAddCompletion(true); + EXPECT_EQ(1UL, manager_->listeners().size()); + + // Update foo into warming. + const std::string listener_foo_update1_yaml = R"EOF( +name: foo +traffic_direction: INBOUND +address: + socket_address: + address: 127.0.0.1 + port_value: 1234 +filter_chains: +- filters: + filter_chain_match: + destination_port: 1234 + )EOF"; + + ListenerHandle* listener_foo_update1 = expectListenerOverridden(true); + EXPECT_CALL(listener_foo_update1->target_, initialize()); + EXPECT_TRUE( + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "", true)); + EXPECT_EQ(1UL, manager_->listeners().size()); + EXPECT_EQ(1, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); + checkStats(__LINE__, 1, 1, 0, 1, 1, 0, 0); + + // The warmed up starts the drain timer. + EXPECT_CALL(*worker_, addListener(_, _, _)); + EXPECT_CALL(server_.options_, drainTime()).WillOnce(Return(std::chrono::seconds(600))); + Event::MockTimer* filter_chain_drain_timer = new Event::MockTimer(&server_.dispatcher_); + EXPECT_CALL(*filter_chain_drain_timer, enableTimer(std::chrono::milliseconds(600000), _)); + listener_foo_update1->target_.ready(); + checkStats(__LINE__, 1, 1, 0, 0, 1, 0, 1); + + // Timer expires, worker close connections if any. + EXPECT_CALL(*worker_, removeFilterChains(_, _, _)); + filter_chain_drain_timer->invokeCallback(); + + // Once worker clean up is done, it's safe for the main thread to remove the original listener. + EXPECT_CALL(*listener_foo, onDestroy()); + worker_->callDrainFilterChainsComplete(); + checkStats(__LINE__, 1, 1, 0, 0, 1, 0, 0); + + EXPECT_CALL(*listener_foo_update1, onDestroy()); +} + +TEST(ListenerMessageUtilTest, ListenerMessageSameAreEquivalent) { + envoy::config::listener::v3::Listener listener1; + envoy::config::listener::v3::Listener listener2; + EXPECT_TRUE(Server::ListenerMessageUtil::filterChainOnlyChange(listener1, listener2)); +} + +TEST(ListenerMessageUtilTest, ListenerMessageHaveDifferentNameNotEquivalent) { + envoy::config::listener::v3::Listener listener1; + listener1.set_name("listener1"); + envoy::config::listener::v3::Listener listener2; + listener2.set_name("listener2"); + EXPECT_FALSE(Server::ListenerMessageUtil::filterChainOnlyChange(listener1, listener2)); +} + +TEST(ListenerMessageUtilTest, ListenerMessageHaveDifferentFilterChainsAreEquivalent) { + envoy::config::listener::v3::Listener listener1; + listener1.set_name("common"); + auto add_filter_chain_1 = listener1.add_filter_chains(); + add_filter_chain_1->set_name("127.0.0.1"); + + envoy::config::listener::v3::Listener listener2; + listener2.set_name("common"); + auto add_filter_chain_2 = listener2.add_filter_chains(); + add_filter_chain_2->set_name("127.0.0.2"); + + EXPECT_TRUE(Server::ListenerMessageUtil::filterChainOnlyChange(listener1, listener2)); +} + +TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateIfWorkerNotStarted) { + // Worker is not started yet. + auto listener_proto = createDefaultListener(); + ListenerHandle* listener_foo = expectListenerCreate(false, true); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + manager_->addOrUpdateListener(listener_proto, "", true); + EXPECT_EQ(1u, manager_->listeners().size()); + + // Mutate the listener message as filter chain change only. + auto new_listener_proto = listener_proto; + new_listener_proto.mutable_filter_chains(0) + ->mutable_filter_chain_match() + ->mutable_destination_port() + ->set_value(9999); + + EXPECT_CALL(*listener_foo, onDestroy()); + ListenerHandle* listener_foo_update1 = expectListenerCreate(false, true); + manager_->addOrUpdateListener(new_listener_proto, "", true); + EXPECT_CALL(*listener_foo_update1, onDestroy()); + EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); +} + +TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateIfAnyListenerIsNotTcp) { + EXPECT_CALL(*worker_, start(_)); + manager_->startWorkers(guard_dog_); + + auto listener_proto = createDefaultListener(); + + ListenerHandle* listener_foo = expectListenerCreate(false, true); + + expectAddListener(listener_proto, listener_foo); + + auto new_listener_proto = listener_proto; + new_listener_proto.mutable_address()->mutable_socket_address()->set_protocol( + envoy::config::core::v3::SocketAddress_Protocol::SocketAddress_Protocol_UDP); + + ListenerHandle* listener_foo_update1 = expectListenerCreate(false, true); + expectUpdateToThenDrain(new_listener_proto, listener_foo); + + expectRemove(new_listener_proto, listener_foo_update1); + + EXPECT_EQ(0UL, manager_->listeners().size()); + EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); +} + +TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, + TraditionalUpdateIfImplicitTlsInspectorChanges) { + + EXPECT_CALL(*worker_, start(_)); + manager_->startWorkers(guard_dog_); + + auto listener_proto = createDefaultListener(); + + ListenerHandle* listener_foo = expectListenerCreate(false, true); + expectAddListener(listener_proto, listener_foo); + + ListenerHandle* listener_foo_update1 = expectListenerCreate(false, true); + + auto new_listener_proto = listener_proto; + *new_listener_proto.mutable_filter_chains(0) + ->mutable_filter_chain_match() + ->mutable_application_protocols() + ->Add() = "alpn"; + expectUpdateToThenDrain(new_listener_proto, listener_foo); + + expectRemove(new_listener_proto, listener_foo_update1); + + EXPECT_EQ(0UL, manager_->listeners().size()); + EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); +} + +TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, + TraditionalUpdateIfImplicitProxyProtocolChanges) { + + EXPECT_CALL(*worker_, start(_)); + manager_->startWorkers(guard_dog_); + + auto listener_proto = createDefaultListener(); + + ListenerHandle* listener_foo = expectListenerCreate(false, true); + expectAddListener(listener_proto, listener_foo); + + ListenerHandle* listener_foo_update1 = expectListenerCreate(false, true); + + auto new_listener_proto = listener_proto; + new_listener_proto.mutable_filter_chains(0)->mutable_use_proxy_proto()->set_value(true); + + expectUpdateToThenDrain(new_listener_proto, listener_foo); + expectRemove(new_listener_proto, listener_foo_update1); + EXPECT_EQ(0UL, manager_->listeners().size()); + EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); +} + +TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateOnZeroFilterChain) { + EXPECT_CALL(*worker_, start(_)); + manager_->startWorkers(guard_dog_); + + auto listener_proto = createDefaultListener(); + + ListenerHandle* listener_foo = expectListenerCreate(false, true); + expectAddListener(listener_proto, listener_foo); + + auto new_listener_proto = listener_proto; + new_listener_proto.clear_filter_chains(); + EXPECT_CALL(server_.validation_context_, staticValidationVisitor()).Times(0); + EXPECT_CALL(server_.validation_context_, dynamicValidationVisitor()); + EXPECT_CALL(listener_factory_, createDrainManager_(_)); + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(new_listener_proto, "", true), + EnvoyException, + "error adding listener '127.0.0.1:1234': no filter chains specified"); + + expectRemove(listener_proto, listener_foo); + EXPECT_EQ(0UL, manager_->listeners().size()); + EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); +} + +TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, + TraditionalUpdateIfListenerConfigHasUpdateOtherThanFilterChain) { + EXPECT_CALL(*worker_, start(_)); + manager_->startWorkers(guard_dog_); + + auto listener_proto = createDefaultListener(); + + ListenerHandle* listener_foo = expectListenerCreate(false, true); + expectAddListener(listener_proto, listener_foo); + + ListenerHandle* listener_foo_update1 = expectListenerCreate(false, true); + + auto new_listener_proto = listener_proto; + new_listener_proto.set_traffic_direction(::envoy::config::core::v3::TrafficDirection::INBOUND); + expectUpdateToThenDrain(new_listener_proto, listener_foo); + + expectRemove(new_listener_proto, listener_foo_update1); + + EXPECT_EQ(0UL, manager_->listeners().size()); + EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); +} + +// This test execute an in place update first, then a traditional listener update. +// The second update is enforced by runtime. +TEST_F(ListenerManagerImplTest, RuntimeDisabledInPlaceUpdateFallbacksToTraditionalUpdate) { + InSequence s; + EXPECT_CALL(*worker_, start(_)); + manager_->startWorkers(guard_dog_); + + // Add foo listener. + const std::string listener_foo_yaml = R"EOF( +name: foo +address: + socket_address: + address: 127.0.0.1 + port_value: 1234 +filter_chains: +- filters: [] + )EOF"; + + ListenerHandle* listener_foo = expectListenerCreate(false, true); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + + EXPECT_CALL(*worker_, addListener(_, _, _)); + + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); + EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); + + worker_->callAddCompletion(true); + EXPECT_EQ(1UL, manager_->listeners().size()); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); + + // Add foo listener again. Will execute in place filter chain update path. + const std::string listener_foo_update1_yaml = R"EOF( + name: foo + address: + socket_address: + address: 127.0.0.1 + port_value: 1234 + filter_chains: + - filters: [] + filter_chain_match: + destination_port: 1234 + )EOF"; + + ListenerHandle* listener_foo_update1 = expectListenerOverridden(false, listener_foo); + EXPECT_CALL(*worker_, addListener(_, _, _)); + auto* timer = new Event::MockTimer(dynamic_cast(&server_.dispatcher())); + EXPECT_CALL(*timer, enableTimer(_, _)); + EXPECT_TRUE( + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "", true)); + EXPECT_EQ(1, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); + + EXPECT_EQ(1UL, manager_->listeners().size()); + worker_->callAddCompletion(true); + + EXPECT_CALL(*worker_, removeFilterChains(_, _, _)); + timer->invokeCallback(); + EXPECT_CALL(*listener_foo, onDestroy()); + worker_->callDrainFilterChainsComplete(); + + // Update foo again. This time we disable in place filter chain update in runtime. + // The traditional full listener update path is used. + auto in_place_update_disabled_guard = disableInplaceUpdateForThisTest(); + const std::string listener_foo_update2_yaml = R"EOF( + name: foo + address: + socket_address: + address: 127.0.0.1 + port_value: 1234 + filter_chains: + - filters: + filter_chain_match: + destination_port: 2345 + )EOF"; + + ListenerHandle* listener_foo_update2 = expectListenerCreate(false, true); + EXPECT_CALL(*worker_, addListener(_, _, _)); + EXPECT_CALL(*worker_, stopListener(_, _)); + EXPECT_CALL(*listener_foo_update1->drain_manager_, startDrainSequence(_)); + EXPECT_TRUE( + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update2_yaml), "", true)); + EXPECT_EQ(1, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); + + EXPECT_CALL(*worker_, removeListener(_, _)); + listener_foo_update1->drain_manager_->drain_sequence_completion_(); + + EXPECT_CALL(*listener_foo_update1, onDestroy()); + worker_->callRemovalCompletion(); + + EXPECT_CALL(*worker_, stopListener(_, _)); + EXPECT_CALL(*listener_factory_.socket_, close()); + EXPECT_CALL(*listener_foo_update2->drain_manager_, startDrainSequence(_)); + EXPECT_TRUE(manager_->removeListener("foo")); + + EXPECT_CALL(*worker_, removeListener(_, _)); + listener_foo_update2->drain_manager_->drain_sequence_completion_(); + + EXPECT_CALL(*listener_foo_update2, onDestroy()); + worker_->callRemovalCompletion(); + EXPECT_EQ(0UL, manager_->listeners().size()); +} + +// This test verifies that on default initialization the UDP Packet Writer +// is initialized in passthrough mode. (i.e. by using UdpDefaultWriter). +TEST_F(ListenerManagerImplTest, UdpDefaultWriterConfig) { + const envoy::config::listener::v3::Listener listener = parseListenerFromV3Yaml(R"EOF( +address: + socket_address: + address: 127.0.0.1 + protocol: UDP + port_value: 1234 +filter_chains: + filters: [] + )EOF"); + manager_->addOrUpdateListener(listener, "", true); + EXPECT_EQ(1U, manager_->listeners().size()); + Network::SocketSharedPtr listen_socket = + manager_->listeners().front().get().listenSocketFactory().getListenSocket(); + Network::UdpPacketWriterPtr udp_packet_writer = + manager_->listeners().front().get().udpPacketWriterFactory()->get().createUdpPacketWriter( + listen_socket->ioHandle(), manager_->listeners()[0].get().listenerScope()); + EXPECT_FALSE(udp_packet_writer->isBatchMode()); +} + } // namespace } // namespace Server } // namespace Envoy diff --git a/test/server/listener_manager_impl_test.h b/test/server/listener_manager_impl_test.h index 49a29cc4a6dc9..747859f669a12 100644 --- a/test/server/listener_manager_impl_test.h +++ b/test/server/listener_manager_impl_test.h @@ -12,9 +12,15 @@ #include "server/listener_manager_impl.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/drain_manager.h" +#include "test/mocks/server/guard_dog.h" +#include "test/mocks/server/instance.h" +#include "test/mocks/server/listener_component_factory.h" +#include "test/mocks/server/worker.h" +#include "test/mocks/server/worker_factory.h" #include "test/test_common/environment.h" #include "test/test_common/simulated_time_system.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/threadsafe_singleton_injector.h" #include "gmock/gmock.h" @@ -52,6 +58,10 @@ class ListenerManagerImplTest : public testing::Test { void SetUp() override { ON_CALL(server_, api()).WillByDefault(ReturnRef(*api_)); EXPECT_CALL(worker_factory_, createWorker_()).WillOnce(Return(worker_)); + ON_CALL(server_.validation_context_, staticValidationVisitor()) + .WillByDefault(ReturnRef(validation_visitor)); + ON_CALL(server_.validation_context_, dynamicValidationVisitor()) + .WillByDefault(ReturnRef(validation_visitor)); manager_ = std::make_unique(server_, listener_factory_, worker_factory_, enable_dispatcher_stats_); @@ -133,6 +143,33 @@ class ListenerManagerImplTest : public testing::Test { return raw_listener; } + ListenerHandle* expectListenerOverridden(bool need_init, ListenerHandle* origin = nullptr) { + auto raw_listener = new ListenerHandle(false); + // Simulate ListenerImpl: drain manager is copied from origin. + if (origin != nullptr) { + raw_listener->drain_manager_ = origin->drain_manager_; + } + // Overridden listener is always added by api. + EXPECT_CALL(server_.validation_context_, staticValidationVisitor()).Times(0); + EXPECT_CALL(server_.validation_context_, dynamicValidationVisitor()); + + EXPECT_CALL(listener_factory_, createNetworkFilterFactoryList(_, _)) + .WillOnce(Invoke( + [raw_listener, need_init]( + const Protobuf::RepeatedPtrField&, + Server::Configuration::FilterChainFactoryContext& filter_chain_factory_context) + -> std::vector { + std::shared_ptr notifier(raw_listener); + raw_listener->context_ = &filter_chain_factory_context; + if (need_init) { + filter_chain_factory_context.initManager().add(notifier->target_); + } + return {[notifier](Network::FilterManager&) -> void {}}; + })); + + return raw_listener; + } + const Network::FilterChain* findFilterChain(uint16_t destination_port, const std::string& destination_address, const std::string& server_name, const std::string& transport_protocol, @@ -171,8 +208,7 @@ class ListenerManagerImplTest : public testing::Test { ListenSocketCreationParams expected_creation_params = {true, true}) { EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, expected_creation_params)) .WillOnce(Invoke([this, expected_num_options, &expected_state]( - const Network::Address::InstanceConstSharedPtr&, - Network::Address::SocketType, + const Network::Address::InstanceConstSharedPtr&, Network::Socket::Type, const Network::Socket::OptionsSharedPtr& options, const ListenSocketCreationParams&) -> Network::SocketSharedPtr { EXPECT_NE(options.get(), nullptr); @@ -184,23 +220,26 @@ class ListenerManagerImplTest : public testing::Test { } /** - * Validate that setsockopt() is called the expected number of times with the expected options. + * Validate that setSocketOption() is called the expected number of times with the expected + * options. */ - void expectSetsockopt(NiceMock& os_sys_calls, int expected_sockopt_level, - int expected_sockopt_name, int expected_value, + void expectSetsockopt(int expected_sockopt_level, int expected_sockopt_name, int expected_value, uint32_t expected_num_calls = 1) { - EXPECT_CALL(os_sys_calls, - setsockopt_(_, expected_sockopt_level, expected_sockopt_name, _, sizeof(int))) + EXPECT_CALL(*listener_factory_.socket_, + setSocketOption(expected_sockopt_level, expected_sockopt_name, _, sizeof(int))) .Times(expected_num_calls) - .WillRepeatedly( - Invoke([expected_value](os_fd_t, int, int, const void* optval, socklen_t) -> int { + .WillRepeatedly(Invoke( + [expected_value](int, int, const void* optval, socklen_t) -> Api::SysCallIntResult { EXPECT_EQ(expected_value, *static_cast(optval)); - return 0; + return {0, 0}; })); } - void checkStats(uint64_t added, uint64_t modified, uint64_t removed, uint64_t warming, - uint64_t active, uint64_t draining) { + void checkStats(int line_num, uint64_t added, uint64_t modified, uint64_t removed, + uint64_t warming, uint64_t active, uint64_t draining, + uint64_t draining_filter_chains) { + SCOPED_TRACE(line_num); + EXPECT_EQ(added, server_.stats_store_.counter("listener_manager.listener_added").value()); EXPECT_EQ(modified, server_.stats_store_.counter("listener_manager.listener_modified").value()); EXPECT_EQ(removed, server_.stats_store_.counter("listener_manager.listener_removed").value()); @@ -216,6 +255,10 @@ class ListenerManagerImplTest : public testing::Test { .gauge("listener_manager.total_listeners_draining", Stats::Gauge::ImportMode::NeverImport) .value()); + EXPECT_EQ(draining_filter_chains, server_.stats_store_ + .gauge("listener_manager.total_filter_chains_draining", + Stats::Gauge::ImportMode::NeverImport) + .value()); } void checkConfigDump(const std::string& expected_dump_yaml) { @@ -228,11 +271,20 @@ class ListenerManagerImplTest : public testing::Test { EXPECT_EQ(expected_listeners_config_dump.DebugString(), listeners_config_dump.DebugString()); } + ABSL_MUST_USE_RESULT + auto disableInplaceUpdateForThisTest() { + auto scoped_runtime = std::make_unique(); + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.listener_in_place_filterchain_update", "false"}}); + return scoped_runtime; + } + NiceMock os_sys_calls_; TestThreadsafeSingletonInjector os_calls_{&os_sys_calls_}; Api::OsSysCallsImpl os_sys_calls_actual_; NiceMock server_; NiceMock listener_factory_; + NiceMock validation_visitor; MockWorker* worker_ = new MockWorker(); NiceMock worker_factory_; std::unique_ptr manager_; diff --git a/test/server/options_impl_test.cc b/test/server/options_impl_test.cc index efb4b2565f2dd..3898ffff14e3f 100644 --- a/test/server/options_impl_test.cc +++ b/test/server/options_impl_test.cc @@ -80,22 +80,23 @@ TEST_F(OptionsImplTest, V1Disallowed) { TEST_F(OptionsImplTest, All) { std::unique_ptr options = createOptionsImpl( - "envoy --mode validate --concurrency 2 -c hello --admin-address-path path --restart-epoch 1 " + "envoy --mode validate --concurrency 2 -c hello --admin-address-path path --restart-epoch 0 " "--local-address-ip-version v6 -l info --component-log-level upstream:debug,connection:trace " "--service-cluster cluster --service-node node --service-zone zone " "--file-flush-interval-msec 9000 " "--drain-time-s 60 --log-format [%v] --parent-shutdown-time-s 90 --log-path /foo/bar " "--disable-hot-restart --cpuset-threads --allow-unknown-static-fields " - "--reject-unknown-dynamic-fields --use-fake-symbol-table 0"); + "--reject-unknown-dynamic-fields --use-fake-symbol-table 0 --base-id 5 " + "--use-dynamic-base-id --base-id-path /foo/baz"); EXPECT_EQ(Server::Mode::Validate, options->mode()); EXPECT_EQ(2U, options->concurrency()); EXPECT_EQ("hello", options->configPath()); EXPECT_EQ("path", options->adminAddressPath()); EXPECT_EQ(Network::Address::IpVersion::v6, options->localAddressIpVersion()); - EXPECT_EQ(1U, options->restartEpoch()); + EXPECT_EQ(0U, options->restartEpoch()); EXPECT_EQ(spdlog::level::info, options->logLevel()); EXPECT_EQ(2, options->componentLogLevels().size()); - EXPECT_EQ("[[%g:%#] %v]", options->logFormat()); + EXPECT_EQ("[%v]", options->logFormat()); EXPECT_EQ("/foo/bar", options->logPath()); EXPECT_EQ("cluster", options->serviceClusterName()); EXPECT_EQ("node", options->serviceNodeName()); @@ -108,6 +109,9 @@ TEST_F(OptionsImplTest, All) { EXPECT_TRUE(options->allowUnknownStaticFields()); EXPECT_TRUE(options->rejectUnknownDynamicFields()); EXPECT_FALSE(options->fakeSymbolTableEnabled()); + EXPECT_EQ(5U, options->baseId()); + EXPECT_TRUE(options->useDynamicBaseId()); + EXPECT_EQ("/foo/baz", options->baseIdPath()); options = createOptionsImpl("envoy --mode init_only"); EXPECT_EQ(Server::Mode::InitOnly, options->mode()); @@ -150,10 +154,11 @@ TEST_F(OptionsImplTest, SetAll) { options->setAdminAddressPath("path"); options->setLocalAddressIpVersion(Network::Address::IpVersion::v6); options->setDrainTime(std::chrono::seconds(42)); + options->setDrainStrategy(Server::DrainStrategy::Immediate); + options->setParentShutdownTime(std::chrono::seconds(43)); options->setLogLevel(spdlog::level::trace); options->setLogFormat("%L %n %v"); options->setLogPath("/foo/bar"); - options->setParentShutdownTime(std::chrono::seconds(43)); options->setRestartEpoch(44); options->setFileFlushIntervalMsec(std::chrono::milliseconds(45)); options->setMode(Server::Mode::Validate); @@ -177,6 +182,7 @@ TEST_F(OptionsImplTest, SetAll) { EXPECT_EQ("path", options->adminAddressPath()); EXPECT_EQ(Network::Address::IpVersion::v6, options->localAddressIpVersion()); EXPECT_EQ(std::chrono::seconds(42), options->drainTime()); + EXPECT_EQ(Server::DrainStrategy::Immediate, options->drainStrategy()); EXPECT_EQ(spdlog::level::trace, options->logLevel()); EXPECT_EQ("%L %n %v", options->logFormat()); EXPECT_EQ("/foo/bar", options->logPath()); @@ -205,11 +211,13 @@ TEST_F(OptionsImplTest, SetAll) { EXPECT_EQ(envoy::admin::v3::CommandLineOptions::v6, command_line_options->local_address_ip_version()); EXPECT_EQ(options->drainTime().count(), command_line_options->drain_time().seconds()); + EXPECT_EQ(envoy::admin::v3::CommandLineOptions::Immediate, + command_line_options->drain_strategy()); + EXPECT_EQ(options->parentShutdownTime().count(), + command_line_options->parent_shutdown_time().seconds()); EXPECT_EQ(spdlog::level::to_string_view(options->logLevel()), command_line_options->log_level()); EXPECT_EQ(options->logFormat(), command_line_options->log_format()); EXPECT_EQ(options->logPath(), command_line_options->log_path()); - EXPECT_EQ(options->parentShutdownTime().count(), - command_line_options->parent_shutdown_time().seconds()); EXPECT_EQ(options->restartEpoch(), command_line_options->restart_epoch()); EXPECT_EQ(options->fileFlushIntervalMsec().count() / 1000, command_line_options->file_flush_interval().seconds()); @@ -225,6 +233,7 @@ TEST_F(OptionsImplTest, SetAll) { TEST_F(OptionsImplTest, DefaultParams) { std::unique_ptr options = createOptionsImpl("envoy -c hello"); EXPECT_EQ(std::chrono::seconds(600), options->drainTime()); + EXPECT_EQ(Server::DrainStrategy::Gradual, options->drainStrategy()); EXPECT_EQ(std::chrono::seconds(900), options->parentShutdownTime()); EXPECT_EQ("", options->adminAddressPath()); EXPECT_EQ(Network::Address::IpVersion::v4, options->localAddressIpVersion()); @@ -254,6 +263,7 @@ TEST_F(OptionsImplTest, OptionsAreInSyncWithProto) { Server::CommandLineOptionsPtr command_line_options = options->toCommandLineOptions(); // Failure of this condition indicates that the server_info proto is not in sync with the options. // If an option is added/removed, please update server_info proto as well to keep it in sync. + // Currently the following 7 options are not defined in proto, hence the count differs by 7. // 1. version - default TCLAP argument. // 2. help - default TCLAP argument. @@ -262,7 +272,13 @@ TEST_F(OptionsImplTest, OptionsAreInSyncWithProto) { // 5. use-fake-symbol-table - short-term override for rollout of real symbol-table implementation. // 6. hot restart version - print the hot restart version and exit. // 7. log-format-prefix-with-location - short-term override for rollout of dynamic log format. - EXPECT_EQ(options->count() - 7, command_line_options->GetDescriptor()->field_count()); + const uint32_t options_not_in_proto = 7; + + // There are two deprecated options: "max_stats" and "max_obj_name_len". + const uint32_t deprecated_options = 2; + + EXPECT_EQ(options->count() - options_not_in_proto, + command_line_options->GetDescriptor()->field_count() - deprecated_options); } TEST_F(OptionsImplTest, OptionsFromArgv) { @@ -415,22 +431,37 @@ TEST_F(OptionsImplTest, LogFormatDefault) { TEST_F(OptionsImplTest, LogFormatDefaultNoPrefix) { std::unique_ptr options = createOptionsImpl({"envoy", "-c", "hello", "--log-format-prefix-with-location", "0"}); - EXPECT_EQ(options->logFormat(), "[%Y-%m-%d %T.%e][%t][%l][%n] %v"); + EXPECT_EQ(options->logFormat(), "[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v"); } TEST_F(OptionsImplTest, LogFormatOverride) { std::unique_ptr options = - createOptionsImpl({"envoy", "-c", "hello", "--log-format", "%%v %v %t %v"}); + createOptionsImpl({"envoy", "-c", "hello", "--log-format", "%%v %v %t %v", + "--log-format-prefix-with-location 1"}); EXPECT_EQ(options->logFormat(), "%%v [%g:%#] %v %t [%g:%#] %v"); } TEST_F(OptionsImplTest, LogFormatOverrideNoPrefix) { std::unique_ptr options = - createOptionsImpl({"envoy", "-c", "hello", "--log-format", "%%v %v %t %v", - "--log-format-prefix-with-location 0"}); + createOptionsImpl({"envoy", "-c", "hello", "--log-format", "%%v %v %t %v"}); EXPECT_EQ(options->logFormat(), "%%v %v %t %v"); } +// Test that --base-id and --restart-epoch with non-default values are accepted. +TEST_F(OptionsImplTest, SetBaseIdAndRestartEpoch) { + std::unique_ptr options = + createOptionsImpl({"envoy", "-c", "hello", "--base-id", "99", "--restart-epoch", "999"}); + EXPECT_EQ(99U, options->baseId()); + EXPECT_EQ(999U, options->restartEpoch()); +} + +// Test that --use-dynamic-base-id and --restart-epoch with a non-default value is not accepted. +TEST_F(OptionsImplTest, SetUseDynamicBaseIdAndRestartEpoch) { + EXPECT_THROW_WITH_REGEX( + createOptionsImpl({"envoy", "-c", "hello", "--use-dynamic-base-id", "--restart-epoch", "1"}), + MalformedArgvException, "error: cannot use --restart-epoch=1 with --use-dynamic-base-id"); +} + #if defined(__linux__) using testing::DoAll; diff --git a/test/server/overload_manager_impl_test.cc b/test/server/overload_manager_impl_test.cc index ee801a6d97af0..73715e249c07b 100644 --- a/test/server/overload_manager_impl_test.cc +++ b/test/server/overload_manager_impl_test.cc @@ -130,6 +130,26 @@ class OverloadManagerImplTest : public testing::Test { )EOF"; } + std::string getConfigSimple() { + return R"EOF( + refresh_interval { + seconds: 1 + } + resource_monitors { + name: "envoy.resource_monitors.fake_resource1" + } + actions { + name: "envoy.overload_actions.dummy_action" + triggers { + name: "envoy.resource_monitors.fake_resource1" + threshold { + value: 0.9 + } + } + } + )EOF"; + } + std::unique_ptr createOverloadManager(const std::string& config) { return std::make_unique(dispatcher_, stats_, thread_local_, parseConfig(config), validation_visitor_, *api_); @@ -174,6 +194,7 @@ TEST_F(OverloadManagerImplTest, CallbackOnlyFiresWhenStateChanges) { const OverloadActionState& action_state = manager->getThreadLocalOverloadState().getState("envoy.overload_actions.dummy_action"); + // Update does not exceed fake_resource1 trigger threshold, no callback expected factory1_.monitor_->setPressure(0.5); timer_cb_(); EXPECT_FALSE(is_active); @@ -182,6 +203,7 @@ TEST_F(OverloadManagerImplTest, CallbackOnlyFiresWhenStateChanges) { EXPECT_EQ(0, active_gauge.value()); EXPECT_EQ(50, pressure_gauge1.value()); + // Update exceeds fake_resource1 trigger threshold, callback is expected factory1_.monitor_->setPressure(0.95); timer_cb_(); EXPECT_TRUE(is_active); @@ -190,7 +212,7 @@ TEST_F(OverloadManagerImplTest, CallbackOnlyFiresWhenStateChanges) { EXPECT_EQ(1, active_gauge.value()); EXPECT_EQ(95, pressure_gauge1.value()); - // Callback should not be invoked if action active state has not changed + // Callback should not be invoked if action state does not change factory1_.monitor_->setPressure(0.94); timer_cb_(); EXPECT_TRUE(is_active); @@ -198,23 +220,50 @@ TEST_F(OverloadManagerImplTest, CallbackOnlyFiresWhenStateChanges) { EXPECT_EQ(1, cb_count); EXPECT_EQ(94, pressure_gauge1.value()); - // Different triggers firing but overall action remains active so no callback expected - factory1_.monitor_->setPressure(0.5); + // The action is already active for fake_resource1 so no callback expected factory2_.monitor_->setPressure(0.9); timer_cb_(); EXPECT_TRUE(is_active); EXPECT_EQ(action_state, OverloadActionState::Active); EXPECT_EQ(1, cb_count); + EXPECT_EQ(90, pressure_gauge2.value()); + + // The action remains active for fake_resource2 so no callback expected + factory1_.monitor_->setPressure(0.5); + timer_cb_(); + EXPECT_TRUE(is_active); + EXPECT_EQ(action_state, OverloadActionState::Active); + EXPECT_EQ(1, cb_count); EXPECT_EQ(50, pressure_gauge1.value()); EXPECT_EQ(90, pressure_gauge2.value()); - factory2_.monitor_->setPressure(0.4); + // Both become inactive so callback is expected + factory2_.monitor_->setPressure(0.3); timer_cb_(); EXPECT_FALSE(is_active); EXPECT_EQ(action_state, OverloadActionState::Inactive); EXPECT_EQ(2, cb_count); - EXPECT_EQ(0, active_gauge.value()); - EXPECT_EQ(40, pressure_gauge2.value()); + EXPECT_EQ(30, pressure_gauge2.value()); + + // Different triggers, both become active, only one callback expected + factory1_.monitor_->setPressure(0.97); + factory2_.monitor_->setPressure(0.96); + timer_cb_(); + EXPECT_TRUE(is_active); + EXPECT_EQ(action_state, OverloadActionState::Active); + EXPECT_EQ(3, cb_count); + EXPECT_EQ(97, pressure_gauge1.value()); + EXPECT_EQ(96, pressure_gauge2.value()); + + // Different triggers, both become inactive, only one callback expected + factory1_.monitor_->setPressure(0.41); + factory2_.monitor_->setPressure(0.42); + timer_cb_(); + EXPECT_FALSE(is_active); + EXPECT_EQ(action_state, OverloadActionState::Inactive); + EXPECT_EQ(4, cb_count); + EXPECT_EQ(41, pressure_gauge1.value()); + EXPECT_EQ(42, pressure_gauge2.value()); manager->stop(); } @@ -239,10 +288,12 @@ TEST_F(OverloadManagerImplTest, SkippedUpdates) { setDispatcherExpectation(); // Save the post callback instead of executing it. + // Note that this test works for only one resource. If using the default config, + // two events fire, so a list of all post_cb's between timer_cb_'s would need to be invoked. Event::PostCb post_cb; ON_CALL(dispatcher_, post(_)).WillByDefault(Invoke([&](Event::PostCb cb) { post_cb = cb; })); - auto manager(createOverloadManager(getConfig())); + auto manager(createOverloadManager(getConfigSimple())); manager->start(); Stats::Counter& skipped_updates = stats_.counter("overload.envoy.resource_monitors.fake_resource1.skipped_updates"); diff --git a/test/server/server_corpus/api_boost_crash b/test/server/server_corpus/api_boost_crash new file mode 100644 index 0000000000000..2dc13ace4237d --- /dev/null +++ b/test/server/server_corpus/api_boost_crash @@ -0,0 +1,138 @@ +node { + client_features: "&" +} +static_resources { + listeners { + name: " " + address { + pipe { + path: "aa\000" + } + } + transparent { + } + } +} +stats_sinks { + typed_config { + [type.googleapis.com/envoy.api.v2.route.Route] { + route { + retry_policy { + retriable_status_codes: 115 + retriable_status_codes: 116 + retriable_status_codes: 97 + retriable_status_codes: 116 + retriable_status_codes: 101 + retriable_status_codes: 113 + retriable_status_codes: 32 + retriable_status_codes: 123 + retriable_status_codes: 40 + retriable_status_codes: 36 + retriable_status_codes: 32 + retriable_status_codes: 42 + retriable_status_codes: 115 + retriable_status_codes: 116 + retriable_status_codes: 97 + retriable_status_codes: 116 + retriable_status_codes: 101 + retriable_status_codes: 113 + retriable_status_codes: 32 + retriable_status_codes: 123 + retriable_status_codes: 40 + retriable_status_codes: 36 + retriable_status_codes: 32 + retriable_status_codes: 32 + retriable_status_codes: 99 + retriable_status_codes: 108 + retriable_status_codes: 117 + retriable_status_codes: 115 + retriable_status_codes: 116 + retriable_status_codes: 101 + retriable_status_codes: 114 + retriable_status_codes: 123 + retriable_status_codes: 115 + } + } + } + } +} +stats_sinks { + typed_config { + [type.googleapis.com/envoy.api.v2.route.Route] { + route { + retry_policy { + retriable_status_codes: 115 + retriable_status_codes: 116 + retriable_status_codes: 97 + retriable_status_codes: 116 + retriable_status_codes: 105 + retriable_status_codes: 99 + retriable_status_codes: 95 + retriable_status_codes: 114 + retriable_status_codes: 101 + retriable_status_codes: 115 + retriable_status_codes: 111 + retriable_status_codes: 117 + retriable_status_codes: 114 + retriable_status_codes: 99 + retriable_status_codes: 65 + retriable_status_codes: 101 + retriable_status_codes: 32 + retriable_status_codes: 99 + retriable_status_codes: 115 + retriable_status_codes: 116 + retriable_status_codes: 97 + retriable_status_codes: 32 + retriable_status_codes: 32 + retriable_status_codes: 99 + retriable_status_codes: 115 + retriable_status_codes: 116 + retriable_status_codes: 97 + } + } + } + } +} +stats_sinks { + typed_config { + [type.googleapis.com/envoy.api.v2.route.Route] { + route { + retry_policy { + retriable_status_codes: 115 + retriable_status_codes: 116 + retriable_status_codes: 97 + retriable_status_codes: 116 + retriable_status_codes: 101 + retriable_status_codes: 113 + retriable_status_codes: 32 + retriable_status_codes: 123 + retriable_status_codes: 40 + retriable_status_codes: 36 + retriable_status_codes: 32 + retriable_status_codes: 42 + retriable_status_codes: 115 + retriable_status_codes: 116 + retriable_status_codes: 97 + retriable_status_codes: 116 + retriable_status_codes: 101 + retriable_status_codes: 113 + retriable_status_codes: 32 + retriable_status_codes: 123 + retriable_status_codes: 40 + retriable_status_codes: 36 + retriable_status_codes: 32 + retriable_status_codes: 32 + retriable_status_codes: 99 + retriable_status_codes: 108 + retriable_status_codes: 117 + retriable_status_codes: 115 + retriable_status_codes: 116 + retriable_status_codes: 101 + retriable_status_codes: 114 + retriable_status_codes: 123 + retriable_status_codes: 115 + } + } + } + } +} \ No newline at end of file diff --git a/test/server/server_corpus/clusterfuzz-testcase-minimized-config_fuzz_test-5186283155750912 b/test/server/server_corpus/clusterfuzz-testcase-minimized-config_fuzz_test-5186283155750912 new file mode 100644 index 0000000000000..d5f214057b15a --- /dev/null +++ b/test/server/server_corpus/clusterfuzz-testcase-minimized-config_fuzz_test-5186283155750912 @@ -0,0 +1,70 @@ +static_resources { + clusters { + name: "www.google.com" + connect_timeout { + nanos: 61 + } + http2_protocol_options { + initial_stream_window_size { + value: 917504 + } + initial_connection_window_size { + value: 1952382976 + } + allow_connect: true + max_outbound_control_frames { + value: 1952382976 + } + stream_error_on_invalid_http_messaging: true + custom_settings_parameters { + identifier { + value: 65536 + } + value { + value: 7536640 + } + } + custom_settings_parameters { + identifier { + value: 65536 + } + value { + value: 7536640 + } + } + } + alt_stat_name: ";" + load_assignment { + cluster_name: "domains" + policy { + hidden_envoy_deprecated_disable_overprovisioning: true + } + } + lrs_server { + path: ":" + } + } +} +dynamic_resources { +} +stats_sinks { + hidden_envoy_deprecated_config { + fields { + key: "fffffffffffffffffffffffffff" + value { + } + } + } +} +stats_sinks { +} +stats_sinks { + typed_config { + type_url: "type.googleapis.com/envoy.api.v2.route.Route" + value: "J\004\022\002\010\001J\005\n\003\022\0019J\004\022\002\010\001b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000" + } +} +admin { +} +enable_dispatcher_stats: true +header_prefix: "*" diff --git a/test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-5714049408172032 b/test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-5714049408172032 new file mode 100644 index 0000000000000..db5e415d569b8 --- /dev/null +++ b/test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-5714049408172032 @@ -0,0 +1,18 @@ +cluster_manager { + load_stats_config { + api_type: GRPC + grpc_services { + google_grpc { + target_uri: "18446744073709551617" + stat_prefix: "2147483649" + channel_args { + args { + key: "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000" + value { + } + } + } + } + } + } +} diff --git a/test/server/server_corpus/crash-ac725507195d840cdb90bed3079b877e6e9419e3 b/test/server/server_corpus/crash-ac725507195d840cdb90bed3079b877e6e9419e3 new file mode 100644 index 0000000000000..11fd783652774 --- /dev/null +++ b/test/server/server_corpus/crash-ac725507195d840cdb90bed3079b877e6e9419e3 @@ -0,0 +1,22 @@ +dynamic_resources { +} +cluster_manager { + local_cluster_name: "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000" +} +hidden_envoy_deprecated_runtime { +} +admin { +} +stats_config { + use_all_default_tags { + value: true + } +} +layered_runtime { + layers { + disk_layer { + append_service_cluster: true + } + } +} +use_tcp_for_dns_lookups: true diff --git a/test/server/server_corpus/not_reached b/test/server/server_corpus/not_reached new file mode 100644 index 0000000000000..93b264ade6c91 --- /dev/null +++ b/test/server/server_corpus/not_reached @@ -0,0 +1 @@ +static_resources { clusters { name: " " connect_timeout { nanos: 4 } lb_policy: LOAD_BALANCING_POLICY_CONFIG } } \ No newline at end of file diff --git a/test/server/server_fuzz_test.cc b/test/server/server_fuzz_test.cc index d070444fdac83..4859db0e97cf9 100644 --- a/test/server/server_fuzz_test.cc +++ b/test/server/server_fuzz_test.cc @@ -3,17 +3,18 @@ #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/core/v3/address.pb.h" +#include "common/common/random_generator.h" #include "common/network/address_impl.h" #include "common/thread_local/thread_local_impl.h" #include "server/listener_hooks.h" -#include "server/proto_descriptors.h" #include "server/server.h" #include "test/common/runtime/utility.h" #include "test/fuzz/fuzz_runner.h" #include "test/integration/server.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/hot_restart.h" +#include "test/mocks/server/options.h" #include "test/mocks/stats/mocks.h" #include "test/test_common/environment.h" #include "test/test_common/test_time.h" @@ -100,7 +101,7 @@ DEFINE_PROTO_FUZZER(const envoy::config::bootstrap::v3::Bootstrap& input) { server = std::make_unique( init_manager, options, test_time.timeSystem(), std::make_shared("127.0.0.1"), hooks, restart, stats_store, - fakelock, component_factory, std::make_unique(), + fakelock, component_factory, std::make_unique(), thread_local_instance, Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(), nullptr); } catch (const EnvoyException& ex) { diff --git a/test/server/server_test.cc b/test/server/server_test.cc index 36ea86675dc36..82803584ce149 100644 --- a/test/server/server_test.cc +++ b/test/server/server_test.cc @@ -1,20 +1,28 @@ #include #include "envoy/config/core/v3/base.pb.h" +#include "envoy/network/exception.h" +#include "envoy/server/bootstrap_extension_config.h" #include "common/common/assert.h" -#include "common/common/version.h" #include "common/network/address_impl.h" #include "common/network/listen_socket_impl.h" #include "common/network/socket_option_impl.h" +#include "common/protobuf/protobuf.h" #include "common/thread_local/thread_local_impl.h" +#include "common/version/version.h" #include "server/process_context_impl.h" #include "server/server.h" +#include "test/common/config/dummy_config.pb.h" #include "test/common/stats/stat_test_utility.h" #include "test/integration/server.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/bootstrap_extension_factory.h" +#include "test/mocks/server/hot_restart.h" +#include "test/mocks/server/instance.h" +#include "test/mocks/server/options.h" +#include "test/mocks/server/overload_manager.h" #include "test/mocks/stats/mocks.h" #include "test/test_common/environment.h" #include "test/test_common/registry.h" @@ -47,6 +55,7 @@ TEST(ServerInstanceUtil, flushHelper) { c.inc(); store.gauge("world", Stats::Gauge::ImportMode::Accumulate).set(5); store.histogram("histogram", Stats::Histogram::Unit::Unspecified); + store.textReadout("text").set("is important"); std::list sinks; InstanceUtil::flushMetricsToSinks(sinks, store); @@ -64,6 +73,10 @@ TEST(ServerInstanceUtil, flushHelper) { ASSERT_EQ(snapshot.gauges().size(), 1); EXPECT_EQ(snapshot.gauges()[0].get().name(), "world"); EXPECT_EQ(snapshot.gauges()[0].get().value(), 5); + + ASSERT_EQ(snapshot.textReadouts().size(), 1); + EXPECT_EQ(snapshot.textReadouts()[0].get().name(), "text"); + EXPECT_EQ(snapshot.textReadouts()[0].get().value(), "is important"); })); c.inc(); InstanceUtil::flushMetricsToSinks(sinks, store); @@ -77,6 +90,7 @@ TEST(ServerInstanceUtil, flushHelper) { EXPECT_TRUE(snapshot.counters().empty()); EXPECT_TRUE(snapshot.gauges().empty()); EXPECT_EQ(snapshot.histograms().size(), 1); + EXPECT_TRUE(snapshot.textReadouts().empty()); })); InstanceUtil::flushMetricsToSinks(sinks, mock_store); } @@ -177,10 +191,10 @@ class ServerInstanceImplTestBase { *init_manager_, options_, time_system_, std::make_shared("127.0.0.1"), hooks_, restart_, stats_store_, fakelock_, component_factory_, - std::make_unique>(), *thread_local_, + std::make_unique>(), *thread_local_, Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(), std::move(process_context_)); - EXPECT_TRUE(server_->api().fileSystem().fileExists("/dev/null")); + EXPECT_TRUE(server_->api().fileSystem().fileExists(TestEnvironment::nullDevicePath())); } void initializeWithHealthCheckParams(const std::string& bootstrap_path, const double timeout, @@ -196,10 +210,10 @@ class ServerInstanceImplTestBase { *init_manager_, options_, time_system_, std::make_shared("127.0.0.1"), hooks_, restart_, stats_store_, fakelock_, component_factory_, - std::make_unique>(), *thread_local_, + std::make_unique>(), *thread_local_, Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(), nullptr); - EXPECT_TRUE(server_->api().fileSystem().fileExists("/dev/null")); + EXPECT_TRUE(server_->api().fileSystem().fileExists(TestEnvironment::nullDevicePath())); } Thread::ThreadPtr startTestServer(const std::string& bootstrap_path, @@ -244,7 +258,7 @@ class ServerInstanceImplTestBase { testing::NiceMock options_; DefaultListenerHooks hooks_; testing::NiceMock restart_; - std::unique_ptr thread_local_; + ThreadLocal::InstanceImplPtr thread_local_; Stats::TestIsolatedStoreImpl stats_store_; Thread::MutexBasicLockable fakelock_; TestComponentFactory component_factory_; @@ -280,8 +294,9 @@ class CustomStatsSink : public Stats::Sink { class CustomStatsSinkFactory : public Server::Configuration::StatsSinkFactory { public: // StatsSinkFactory - Stats::SinkPtr createStatsSink(const Protobuf::Message&, Server::Instance& server) override { - return std::make_unique(server.stats()); + Stats::SinkPtr createStatsSink(const Protobuf::Message&, + Server::Configuration::ServerFactoryContext& server) override { + return std::make_unique(server.scope()); } ProtobufTypes::MessagePtr createEmptyConfigProto() override { @@ -464,7 +479,22 @@ TEST_P(ServerInstanceImplTest, Stats) { EXPECT_EQ(2L, TestUtility::findGauge(stats_store_, "server.concurrency")->value()); EXPECT_EQ(3L, TestUtility::findGauge(stats_store_, "server.hot_restart_epoch")->value()); -// This stat only works in this configuration. +// The ENVOY_BUG stat works in release mode. +#if defined(NDEBUG) + // Test exponential back-off on a fixed line ENVOY_BUG. + for (int i = 0; i < 16; i++) { + ENVOY_BUG(false, ""); + } + EXPECT_EQ(5L, TestUtility::findCounter(stats_store_, "server.envoy_bug_failures")->value()); + // Another ENVOY_BUG increments the counter. + ENVOY_BUG(false, "Testing envoy bug assertion failure detection in release build."); + EXPECT_EQ(6L, TestUtility::findCounter(stats_store_, "server.envoy_bug_failures")->value()); +#else + // The ENVOY_BUG macro aborts in debug mode. + EXPECT_DEATH(ENVOY_BUG(false, ""), ""); +#endif + +// The ASSERT stat only works in this configuration. #if defined(NDEBUG) && defined(ENVOY_LOG_DEBUG_ASSERT_IN_RELEASE) ASSERT(false, "Testing debug assertion failure detection in release build."); EXPECT_EQ(1L, TestUtility::findCounter(stats_store_, "server.debug_assertion_failures")->value()); @@ -576,6 +606,7 @@ TEST_P(ServerInstanceImplTest, ValidationRejectDynamic) { options_.service_cluster_name_ = "some_cluster_name"; options_.service_node_name_ = "some_node_name"; options_.reject_unknown_dynamic_fields_ = true; + options_.ignore_unknown_dynamic_fields_ = true; // reject takes precedence over ignore EXPECT_NO_THROW(initialize("test/server/test_data/server/empty_bootstrap.yaml")); EXPECT_THAT_THROWS_MESSAGE( server_->messageValidationContext().staticValidationVisitor().onUnknownField("foo"), @@ -642,6 +673,48 @@ TEST_P(ServerInstanceImplTest, DEPRECATED_FEATURE_TEST(LoadsV2BootstrapFromPbTex EXPECT_FALSE(server_->localInfo().node().hidden_envoy_deprecated_build_version().empty()); } +// Validate that bootstrap v3 pb_text with new fields loads fails if V2 config is specified. +TEST_P(ServerInstanceImplTest, FailToLoadV3ConfigWhenV2SelectedFromPbText) { + options_.bootstrap_version_ = 2; + + EXPECT_THROW_WITH_REGEX( + initialize("test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.pb_text"), + EnvoyException, "Unable to parse file"); +} + +// Validate that we correctly parse a V2 file when configured to do so. +TEST_P(ServerInstanceImplTest, DEPRECATED_FEATURE_TEST(LoadsV2ConfigWhenV2SelectedFromPbText)) { + options_.bootstrap_version_ = 2; + + initialize("test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.pb_text"); + EXPECT_EQ(server_->localInfo().node().id(), "bootstrap_id"); +} + +// Validate that we correctly parse a V3 file when configured to do so. +TEST_P(ServerInstanceImplTest, LoadsV3ConfigWhenV2SelectedFromPbText) { + options_.bootstrap_version_ = 3; + + initialize("test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.pb_text"); +} + +// Validate that bootstrap v2 pb_text with deprecated fields loads fails if V3 config is specified. +TEST_P(ServerInstanceImplTest, FailToLoadV2ConfigWhenV3SelectedFromPbText) { + options_.bootstrap_version_ = 3; + + EXPECT_THROW_WITH_REGEX( + initialize("test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.pb_text"), + EnvoyException, "Unable to parse file"); +} + +// Validate that we blow up on invalid version number. +TEST_P(ServerInstanceImplTest, InvalidBootstrapVersion) { + options_.bootstrap_version_ = 1; + + EXPECT_THROW_WITH_REGEX( + initialize("test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.pb_text"), + EnvoyException, "Unknown bootstrap version 1."); +} + TEST_P(ServerInstanceImplTest, LoadsBootstrapFromConfigProtoOptions) { options_.config_proto_.mutable_node()->set_id("foo"); initialize("test/server/test_data/server/node_bootstrap.yaml"); @@ -756,6 +829,14 @@ TEST_P(ServerInstanceImplTest, BootstrapClusterManagerInitializationFail) { EnvoyException, "cluster manager: duplicate cluster 'service_google'"); } +// Regression tests for SdsApi throwing exceptions in initialize(). +TEST_P(ServerInstanceImplTest, BadSdsConfigSource) { + EXPECT_THROW_WITH_MESSAGE( + initialize("test/server/test_data/server/bad_sds_config_source.yaml"), EnvoyException, + "envoy.config.core.v3.ApiConfigSource must have a statically defined non-EDS cluster: " + "'sds-grpc' does not exist, was added via api, or is an EDS cluster"); +} + // Test for protoc-gen-validate constraint on invalid timeout entry of a health check config entry. TEST_P(ServerInstanceImplTest, BootstrapClusterHealthCheckInvalidTimeout) { EXPECT_THROW_WITH_REGEX( @@ -811,11 +892,13 @@ namespace { void bindAndListenTcpSocket(const Network::Address::InstanceConstSharedPtr& address, const Network::Socket::OptionsSharedPtr& options) { auto socket = std::make_unique(address, options, true); + auto& os_sys_calls = Api::OsSysCallsSingleton::get(); // Some kernels erroneously allow `bind` without SO_REUSEPORT for addresses // with some other socket already listening on it, see #7636. - if (::listen(socket->ioHandle().fd(), 1) != 0) { + if (SOCKET_FAILURE(os_sys_calls.listen(socket->ioHandle().fd(), 1).rc_)) { // Mimic bind exception for the test simplicity. - throw Network::SocketBindException(fmt::format("cannot listen: {}", strerror(errno)), errno); + throw Network::SocketBindException(fmt::format("cannot listen: {}", errorDetails(errno)), + errno); } } } // namespace @@ -830,7 +913,7 @@ TEST_P(ServerInstanceImplTest, BootstrapNodeWithSocketOptions) { // First attempt to bind and listen socket should fail due to the lack of SO_REUSEPORT socket // options. EXPECT_THAT_THROWS_MESSAGE(bindAndListenTcpSocket(address, nullptr), EnvoyException, - HasSubstr(strerror(EADDRINUSE))); + HasSubstr(errorDetails(SOCKET_ERROR_ADDR_IN_USE))); // Second attempt should succeed as kernel allows multiple sockets to listen the same address iff // both of them use SO_REUSEPORT socket option. @@ -912,7 +995,7 @@ TEST_P(ServerInstanceImplTest, NoOptionsPassed) { server_.reset(new InstanceImpl(*init_manager_, options_, time_system_, std::make_shared("127.0.0.1"), hooks_, restart_, stats_store_, fakelock_, component_factory_, - std::make_unique>(), + std::make_unique>(), *thread_local_, Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(), nullptr)), EnvoyException, @@ -993,6 +1076,53 @@ TEST_P(ServerInstanceImplTest, WithProcessContext) { EXPECT_FALSE(object_from_context.boolean_flag_); } +class FooBootstrapExtension : public BootstrapExtension {}; + +TEST_P(ServerInstanceImplTest, WithBootstrapExtensions) { + NiceMock mock_factory; + EXPECT_CALL(mock_factory, createEmptyConfigProto()).WillRepeatedly(Invoke([]() { + return std::make_unique(); + })); + EXPECT_CALL(mock_factory, name()).WillRepeatedly(Return("envoy_test.bootstrap.foo")); + EXPECT_CALL(mock_factory, createBootstrapExtension(_, _)) + .WillOnce(Invoke([](const Protobuf::Message& config, Configuration::ServerFactoryContext&) { + const auto* proto = dynamic_cast(&config); + EXPECT_NE(nullptr, proto); + EXPECT_EQ(proto->a(), "foo"); + return std::make_unique(); + })); + + Registry::InjectFactory registered_factory( + mock_factory); + + EXPECT_NO_THROW(initialize("test/server/test_data/server/bootstrap_extensions.yaml")); +} + +TEST_P(ServerInstanceImplTest, WithBootstrapExtensionsThrowingError) { + NiceMock mock_factory; + EXPECT_CALL(mock_factory, createEmptyConfigProto()).WillRepeatedly(Invoke([]() { + return std::make_unique(); + })); + EXPECT_CALL(mock_factory, name()).WillRepeatedly(Return("envoy_test.bootstrap.foo")); + EXPECT_CALL(mock_factory, createBootstrapExtension(_, _)) + .WillOnce(Invoke([](const Protobuf::Message&, + Configuration::ServerFactoryContext&) -> BootstrapExtensionPtr { + throw EnvoyException("Unable to initiate mock_bootstrap_extension."); + })); + + Registry::InjectFactory registered_factory( + mock_factory); + + EXPECT_THROW_WITH_REGEX(initialize("test/server/test_data/server/bootstrap_extensions.yaml"), + EnvoyException, "Unable to initiate mock_bootstrap_extension."); +} + +TEST_P(ServerInstanceImplTest, WithUnknownBootstrapExtensions) { + EXPECT_THROW_WITH_REGEX( + initialize("test/server/test_data/server/bootstrap_extensions.yaml"), EnvoyException, + "Didn't find a registered implementation for name: 'envoy_test.bootstrap.foo'"); +} + // Static configuration validation. We test with both allow/reject settings various aspects of // configuration from YAML. class StaticValidationTest @@ -1055,7 +1185,7 @@ TEST_P(StaticValidationTest, ClusterUnknownField) { // Custom StatsSink that registers both a Cluster update callback and Server lifecycle callback. class CallbacksStatsSink : public Stats::Sink, public Upstream::ClusterUpdateCallbacks { public: - CallbacksStatsSink(Server::Instance& server) + CallbacksStatsSink(Server::Configuration::ServerFactoryContext& server) : cluster_removal_cb_handle_( server.clusterManager().addThreadLocalClusterUpdateCallbacks(*this)), lifecycle_cb_handle_(server.lifecycleNotifier().registerCallback( @@ -1078,7 +1208,8 @@ class CallbacksStatsSink : public Stats::Sink, public Upstream::ClusterUpdateCal class CallbacksStatsSinkFactory : public Server::Configuration::StatsSinkFactory { public: // StatsSinkFactory - Stats::SinkPtr createStatsSink(const Protobuf::Message&, Server::Instance& server) override { + Stats::SinkPtr createStatsSink(const Protobuf::Message&, + Server::Configuration::ServerFactoryContext& server) override { return std::make_unique(server); } diff --git a/test/server/test_data/server/bad_sds_config_source.yaml b/test/server/test_data/server/bad_sds_config_source.yaml new file mode 100644 index 0000000000000..f5dab67400086 --- /dev/null +++ b/test/server/test_data/server/bad_sds_config_source.yaml @@ -0,0 +1,32 @@ +node: + id: bootstrap_id + cluster: bootstrap_cluster +static_resources: + clusters: + - name: xds-grpc + connect_timeout: 0.25s + type: STRICT_DNS + connect_timeout: 1s + load_assignment: + cluster_name: xds-grpc + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 12345 + transport_socket: + name: "envoy.transport_sockets.tls" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext" + common_tls_context: + tls_certificate_sds_secret_configs: + - name: default + sds_config: + api_config_source: + api_type: GRPC + grpc_services: + envoy_grpc: + cluster_name: "sds-grpc" + validation_context: {} diff --git a/test/server/test_data/server/bootstrap_extensions.yaml b/test/server/test_data/server/bootstrap_extensions.yaml new file mode 100644 index 0000000000000..8a85583403c9c --- /dev/null +++ b/test/server/test_data/server/bootstrap_extensions.yaml @@ -0,0 +1,5 @@ +bootstrap_extensions: + - name: envoy_test.bootstrap.foo + typed_config: + "@type": type.googleapis.com/test.common.config.DummyConfig + a: foo diff --git a/test/server/test_data/server/callbacks_stats_sink_bootstrap.yaml b/test/server/test_data/server/callbacks_stats_sink_bootstrap.yaml index cf8108dd2d7a7..3a065d86580d3 100644 --- a/test/server/test_data/server/callbacks_stats_sink_bootstrap.yaml +++ b/test/server/test_data/server/callbacks_stats_sink_bootstrap.yaml @@ -5,7 +5,7 @@ node: zone: bootstrap_zone sub_zone: bootstrap_sub_zone admin: - access_log_path: /dev/null + access_log_path: {{ null_device_path }} address: socket_address: address: {{ ntop_ip_loopback_address }} diff --git a/test/server/test_data/server/cluster_dupe_bootstrap.yaml b/test/server/test_data/server/cluster_dupe_bootstrap.yaml index b4c5422108c91..0bff93617c652 100644 --- a/test/server/test_data/server/cluster_dupe_bootstrap.yaml +++ b/test/server/test_data/server/cluster_dupe_bootstrap.yaml @@ -1,5 +1,5 @@ admin: - access_log_path: /dev/null + access_log_path: {{ null_device_path }} address: socket_address: address: {{ ntop_ip_loopback_address }} diff --git a/test/server/test_data/server/cluster_health_check_bootstrap.yaml b/test/server/test_data/server/cluster_health_check_bootstrap.yaml index 7d928f9ca4335..0d285796e4b18 100644 --- a/test/server/test_data/server/cluster_health_check_bootstrap.yaml +++ b/test/server/test_data/server/cluster_health_check_bootstrap.yaml @@ -1,5 +1,5 @@ admin: - access_log_path: /dev/null + access_log_path: {{ null_device_path }} address: socket_address: address: {{ ntop_ip_loopback_address }} @@ -14,4 +14,4 @@ static_resources: unhealthy_threshold: 1 healthy_threshold: 1 http_health_check: - path: "/" \ No newline at end of file + path: "/" diff --git a/test/server/test_data/server/node_bootstrap.pb_text b/test/server/test_data/server/node_bootstrap.pb_text index 5f184a784d134..f47df39a83510 100644 --- a/test/server/test_data/server/node_bootstrap.pb_text +++ b/test/server/test_data/server/node_bootstrap.pb_text @@ -7,7 +7,7 @@ node { } } admin { - access_log_path: "/dev/null" + access_log_path: "{{ null_device_path }}" address { socket_address { address: "{{ ntop_ip_loopback_address }}" diff --git a/test/server/test_data/server/node_bootstrap.yaml b/test/server/test_data/server/node_bootstrap.yaml index 2b9f69e6df7a1..bce1b610dc7ce 100644 --- a/test/server/test_data/server/node_bootstrap.yaml +++ b/test/server/test_data/server/node_bootstrap.yaml @@ -5,7 +5,7 @@ node: zone: bootstrap_zone sub_zone: bootstrap_sub_zone admin: - access_log_path: /dev/null + access_log_path: {{ null_device_path }} address: socket_address: address: {{ ntop_ip_loopback_address }} diff --git a/test/server/test_data/server/node_bootstrap_no_admin_port.yaml b/test/server/test_data/server/node_bootstrap_no_admin_port.yaml index 54fbe9a01e884..a7a602ccbdb82 100644 --- a/test/server/test_data/server/node_bootstrap_no_admin_port.yaml +++ b/test/server/test_data/server/node_bootstrap_no_admin_port.yaml @@ -5,4 +5,4 @@ node: zone: bootstrap_zone sub_zone: bootstrap_sub_zone admin: - access_log_path: /dev/null + access_log_path: {{ null_device_path }} diff --git a/test/server/test_data/server/node_bootstrap_with_admin_socket_options.yaml b/test/server/test_data/server/node_bootstrap_with_admin_socket_options.yaml index c13b6d7838382..77ba41f9985da 100644 --- a/test/server/test_data/server/node_bootstrap_with_admin_socket_options.yaml +++ b/test/server/test_data/server/node_bootstrap_with_admin_socket_options.yaml @@ -5,7 +5,7 @@ node: zone: bootstrap_zone sub_zone: bootstrap_sub_zone admin: - access_log_path: /dev/null + access_log_path: {{ null_device_path }} address: socket_address: address: {{ ntop_ip_loopback_address }} diff --git a/test/server/test_data/server/proxy_version_bootstrap.yaml b/test/server/test_data/server/proxy_version_bootstrap.yaml index 5af740e874cc2..253a7a7e3f11b 100644 --- a/test/server/test_data/server/proxy_version_bootstrap.yaml +++ b/test/server/test_data/server/proxy_version_bootstrap.yaml @@ -5,7 +5,7 @@ node: zone: bootstrap_zone sub_zone: bootstrap_sub_zone admin: - access_log_path: /dev/null + access_log_path: {{ null_device_path }} address: socket_address: address: {{ ntop_ip_loopback_address }} diff --git a/test/server/test_data/server/stats_sink_bootstrap.yaml b/test/server/test_data/server/stats_sink_bootstrap.yaml index cb5b85e2a5a76..ebd3c531d507c 100644 --- a/test/server/test_data/server/stats_sink_bootstrap.yaml +++ b/test/server/test_data/server/stats_sink_bootstrap.yaml @@ -5,7 +5,7 @@ node: zone: bootstrap_zone sub_zone: bootstrap_sub_zone admin: - access_log_path: /dev/null + access_log_path: {{ null_device_path }} address: socket_address: address: {{ ntop_ip_loopback_address }} diff --git a/test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.pb_text b/test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.pb_text new file mode 100644 index 0000000000000..f2134dd754fa4 --- /dev/null +++ b/test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.pb_text @@ -0,0 +1,9 @@ +static_resources { + clusters { + name: "cluster" + ignore_health_on_host_removal: true + connect_timeout { + seconds: 1 + } + } +} diff --git a/test/server/utility.h b/test/server/utility.h index 726d427483dd3..fafc08ae0500f 100644 --- a/test/server/utility.h +++ b/test/server/utility.h @@ -12,9 +12,10 @@ namespace Envoy { namespace Server { namespace { -inline envoy::config::listener::v3::Listener parseListenerFromV2Yaml(const std::string& yaml) { +inline envoy::config::listener::v3::Listener parseListenerFromV3Yaml(const std::string& yaml, + bool avoid_boosting = true) { envoy::config::listener::v3::Listener listener; - TestUtility::loadFromYaml(yaml, listener, true); + TestUtility::loadFromYaml(yaml, listener, true, avoid_boosting); return listener; } diff --git a/test/server/worker_impl_test.cc b/test/server/worker_impl_test.cc index 02afcc0a50fdc..6fd4291f2d9a2 100644 --- a/test/server/worker_impl_test.cc +++ b/test/server/worker_impl_test.cc @@ -1,10 +1,14 @@ +#include "envoy/network/exception.h" + #include "common/api/api_impl.h" #include "common/event/dispatcher_impl.h" #include "server/worker_impl.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/guard_dog.h" +#include "test/mocks/server/instance.h" +#include "test/mocks/server/overload_manager.h" #include "test/mocks/thread_local/mocks.h" #include "test/test_common/utility.h" diff --git a/test/test_common/BUILD b/test/test_common/BUILD index b735d286130be..7b0a5c972382c 100644 --- a/test/test_common/BUILD +++ b/test/test_common/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_basic_cc_library", @@ -9,6 +7,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_basic_cc_library( @@ -25,6 +25,7 @@ envoy_cc_test_library( hdrs = ["environment.h"], external_deps = [ "abseil_optional", + "abseil_symbolize", "bazel_runfiles", ], deps = [ @@ -39,7 +40,10 @@ envoy_cc_test_library( "//source/common/network:utility_lib", "//source/server:options_lib", "//test/common/runtime:utility_lib", - ], + ] + select({ + "//bazel:disable_signal_trace": [], + "//conditions:default": ["//source/common/signal:sigaction_lib"], + }), ) envoy_cc_test_library( @@ -51,12 +55,23 @@ envoy_cc_test_library( "//include/envoy/network:filter_interface", "//source/common/common:assert_lib", "//source/common/network:address_lib", + "//source/common/network:listen_socket_lib", "//source/common/network:raw_buffer_socket_lib", + "//source/common/network:socket_option_factory_lib", "//source/common/network:utility_lib", "//source/common/runtime:runtime_lib", ], ) +envoy_cc_test( + name = "network_utility_test", + srcs = ["network_utility_test.cc"], + deps = [ + ":environment_lib", + ":network_utility_lib", + ], +) + envoy_cc_test_library( name = "contention_lib", srcs = ["contention.cc"], @@ -112,6 +127,8 @@ envoy_cc_test_library( "//source/common/common:empty_string", "//source/common/common:thread_lib", "//source/common/common:utility_lib", + "//source/common/config:decoded_resource_lib", + "//source/common/config:opaque_resource_decoder_lib", "//source/common/config:version_converter_lib", "//source/common/filesystem:directory_lib", "//source/common/filesystem:filesystem_lib", @@ -251,6 +268,7 @@ envoy_cc_test_library( deps = [ ":only_one_thread_lib", ":test_time_system_interface", + ":utility_lib", "//source/common/event:event_impl_base_lib", "//source/common/event:real_time_system_lib", "//source/common/event:timer_lib", @@ -265,6 +283,7 @@ envoy_cc_test( ":utility_lib", "//source/common/event:libevent_scheduler_lib", "//test/mocks/event:event_mocks", + "//test/test_common:test_runtime_lib", ], ) @@ -277,3 +296,9 @@ envoy_cc_test( ":utility_lib", ], ) + +envoy_basic_cc_library( + name = "test_version_linkstamp", + srcs = ["test_version_linkstamp.cc"], + alwayslink = 1, +) diff --git a/test/test_common/environment.cc b/test/test_common/environment.cc index 87377b7022c70..9008d13091f70 100644 --- a/test/test_common/environment.cc +++ b/test/test_common/environment.cc @@ -5,7 +5,6 @@ #include #include #include -#include #include #include "envoy/common/platform.h" @@ -17,12 +16,20 @@ #include "common/common/utility.h" #include "common/filesystem/directory.h" +#include "absl/container/node_hash_map.h" + +#ifdef ENVOY_HANDLE_SIGNALS +#include "common/signal/signal_action.h" +#endif + #include "server/options_impl.h" #include "test/test_common/file_system_for_test.h" #include "test/test_common/network_utility.h" +#include "absl/debugging/symbolize.h" #include "absl/strings/match.h" +#include "absl/strings/str_format.h" #include "gtest/gtest.h" #include "spdlog/spdlog.h" @@ -38,13 +45,13 @@ std::string makeTempDir(std::string basename_template) { std::string name_template = "c:\\Windows\\TEMP\\" + basename_template; char* dirname = ::_mktemp(&name_template[0]); RELEASE_ASSERT(dirname != nullptr, fmt::format("failed to create tempdir from template: {} {}", - name_template, strerror(errno))); + name_template, errorDetails(errno))); TestEnvironment::createPath(dirname); #else std::string name_template = "/tmp/" + basename_template; char* dirname = ::mkdtemp(&name_template[0]); RELEASE_ASSERT(dirname != nullptr, fmt::format("failed to create tempdir from template: {} {}", - name_template, strerror(errno))); + name_template, errorDetails(errno))); #endif return std::string(dirname); } @@ -149,6 +156,7 @@ void TestEnvironment::renameFile(const std::string& old_name, const std::string& #ifdef WIN32 // use MoveFileEx, since ::rename will not overwrite an existing file. See // https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/rename-wrename?view=vs-2017 + // Note MoveFileEx cannot overwrite a directory as documented, nor a symlink, apparently. const BOOL rc = ::MoveFileEx(old_name.c_str(), new_name.c_str(), MOVEFILE_REPLACE_EXISTING); ASSERT_NE(0, rc); #else @@ -188,6 +196,31 @@ std::string TestEnvironment::getCheckedEnvVar(const std::string& var) { return optional.value(); } +void TestEnvironment::initializeTestMain(char* program_name) { +#ifdef WIN32 + _set_abort_behavior(0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT); + + _set_invalid_parameter_handler([](const wchar_t* expression, const wchar_t* function, + const wchar_t* file, unsigned int line, + uintptr_t pReserved) {}); + + WSADATA wsa_data; + const WORD version_requested = MAKEWORD(2, 2); + RELEASE_ASSERT(WSAStartup(version_requested, &wsa_data) == 0, ""); +#endif + +#ifdef __APPLE__ + UNREFERENCED_PARAMETER(program_name); +#else + absl::InitializeSymbolizer(program_name); +#endif + +#ifdef ENVOY_HANDLE_SIGNALS + // Enabled by default. Control with "bazel --define=signal_trace=disabled" + static Envoy::SignalAction handle_sigs; +#endif +} + void TestEnvironment::initializeOptions(int argc, char** argv) { argc_ = argc; argv_ = argv; @@ -212,10 +245,12 @@ std::vector TestEnvironment::getIpVersionsForTest() if (TestEnvironment::shouldRunTestForIpVersion(version)) { parameters.push_back(version); if (!Network::Test::supportsIpVersion(version)) { - ENVOY_LOG_TO_LOGGER(Logger::Registry::getLog(Logger::Id::testing), warn, - "Testing with IP{} addresses may not be supported on this machine. If " - "testing fails, set the environment variable ENVOY_IP_TEST_VERSIONS.", - Network::Test::addressVersionAsString(version)); + const auto version_string = Network::Test::addressVersionAsString(version); + ENVOY_LOG_TO_LOGGER( + Logger::Registry::getLog(Logger::Id::testing), warn, + "Testing with IP{} addresses may not be supported on this machine. If " + "testing fails, set the environment variable ENVOY_IP_TEST_VERSIONS to 'v{}only'.", + version_string, version_string); } } } @@ -232,6 +267,14 @@ const std::string& TestEnvironment::temporaryDirectory() { CONSTRUCT_ON_FIRST_USE(std::string, getTemporaryDirectory()); } +const std::string& TestEnvironment::nullDevicePath() { +#ifdef WIN32 + CONSTRUCT_ON_FIRST_USE(std::string, "NUL"); +#else + CONSTRUCT_ON_FIRST_USE(std::string, "/dev/null"); +#endif +} + std::string TestEnvironment::runfilesDirectory(const std::string& workspace) { RELEASE_ASSERT(runfiles_ != nullptr, ""); return runfiles_->Rlocation(workspace); @@ -248,7 +291,7 @@ const std::string TestEnvironment::unixDomainSocketDirectory() { std::string TestEnvironment::substitute(const std::string& str, Network::Address::IpVersion version) { - const std::unordered_map path_map = { + const absl::node_hash_map path_map = { {"test_tmpdir", TestEnvironment::temporaryDirectory()}, {"test_udsdir", TestEnvironment::unixDomainSocketDirectory()}, {"test_rundir", runfiles_ != nullptr ? TestEnvironment::runfilesDirectory() : "invalid"}, @@ -260,6 +303,10 @@ std::string TestEnvironment::substitute(const std::string& str, out_json_string = std::regex_replace(out_json_string, port_regex, it.second); } + // Substitute platform specific null device. + const std::regex null_device_regex(R"(\{\{ null_device_path \}\})"); + out_json_string = std::regex_replace(out_json_string, null_device_regex, nullDevicePath()); + // Substitute IP loopback addresses. const std::regex loopback_address_regex(R"(\{\{ ip_loopback_address \}\})"); out_json_string = std::regex_replace(out_json_string, loopback_address_regex, @@ -348,7 +395,7 @@ std::string TestEnvironment::temporaryFileSubstitute(const std::string& path, const std::string out_json_path = TestEnvironment::temporaryPath(name) + ".with.ports" + extension; { - std::ofstream out_json_file(out_json_path); + std::ofstream out_json_file(out_json_path, std::ios::binary); out_json_file << out_json_string; } return out_json_path; diff --git a/test/test_common/environment.h b/test/test_common/environment.h index ed9886ef63afc..06a97895bb4db 100644 --- a/test/test_common/environment.h +++ b/test/test_common/environment.h @@ -2,7 +2,6 @@ #include #include -#include #include #include "envoy/network/address.h" @@ -10,6 +9,7 @@ #include "common/json/json_loader.h" +#include "absl/container/node_hash_map.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/types/optional.h" @@ -18,9 +18,16 @@ namespace Envoy { class TestEnvironment { public: - using PortMap = std::unordered_map; + using PortMap = absl::node_hash_map; - using ParamMap = std::unordered_map; + using ParamMap = absl::node_hash_map; + + /** + * Perform common initialization steps needed to run a test binary. This + * method should be called first in all test main functions. + * @param program_name argv[0] test program is invoked with + */ + static void initializeTestMain(char* program_name); /** * Initialize command-line options for later access by tests in getOptions(). @@ -80,6 +87,23 @@ class TestEnvironment { return absl::StrCat(temporaryDirectory(), "/", path); } + /** + * Obtain platform specific null device path + * @return const std::string& null device path + */ + static const std::string& nullDevicePath(); + + /** + * Obtain platform specific new line character(s) + * @return absl::string_view platform specific new line character(s) + */ + static constexpr absl::string_view newLine +#ifdef WIN32 + {"\r\n"}; +#else + {"\n"}; +#endif + /** * Obtain read-only test input data directory. * @param workspace the name of the Bazel workspace where the input data is. diff --git a/test/test_common/global.h b/test/test_common/global.h index 3628180bd6fc7..872f81e520eba 100644 --- a/test/test_common/global.h +++ b/test/test_common/global.h @@ -88,7 +88,8 @@ class Globals { std::string describeActiveSingletonsHelper(); Thread::MutexBasicLockable map_mutex_; - absl::flat_hash_map> singleton_map_ GUARDED_BY(map_mutex_); + absl::flat_hash_map> + singleton_map_ ABSL_GUARDED_BY(map_mutex_); }; /** diff --git a/test/test_common/logging.cc b/test/test_common/logging.cc index 30da5cfacaefa..8e398ce529474 100644 --- a/test/test_common/logging.cc +++ b/test/test_common/logging.cc @@ -2,6 +2,8 @@ #include "common/common/assert.h" +#include "absl/synchronization/mutex.h" + namespace Envoy { LogLevelSetter::LogLevelSetter(spdlog::level::level_enum log_level) { @@ -22,14 +24,19 @@ LogLevelSetter::~LogLevelSetter() { } LogRecordingSink::LogRecordingSink(Logger::DelegatingLogSinkSharedPtr log_sink) - : Logger::SinkDelegate(log_sink) {} -LogRecordingSink::~LogRecordingSink() = default; + : Logger::SinkDelegate(log_sink) { + setDelegate(); +} + +LogRecordingSink::~LogRecordingSink() { restoreDelegate(); } void LogRecordingSink::log(absl::string_view msg) { - previous_delegate()->log(msg); + previousDelegate()->log(msg); + + absl::MutexLock ml(&mtx_); messages_.push_back(std::string(msg)); } -void LogRecordingSink::flush() { previous_delegate()->flush(); } +void LogRecordingSink::flush() { previousDelegate()->flush(); } } // namespace Envoy diff --git a/test/test_common/logging.h b/test/test_common/logging.h index 686ef5618ccad..d2c79a0a59018 100644 --- a/test/test_common/logging.h +++ b/test/test_common/logging.h @@ -8,6 +8,7 @@ #include "absl/strings/str_join.h" #include "absl/strings/str_split.h" +#include "absl/synchronization/mutex.h" #include "spdlog/spdlog.h" namespace Envoy { @@ -58,7 +59,8 @@ class LogRecordingSink : public Logger::SinkDelegate { const std::vector& messages() const { return messages_; } private: - std::vector messages_; + absl::Mutex mtx_; + std::vector messages_ ABSL_GUARDED_BY(mtx_); }; using StringPair = std::pair; @@ -92,7 +94,7 @@ using ExpectedLogMessages = std::vector; ASSERT_FALSE(expected_messages.empty()) << "Expected messages cannot be empty."; \ Envoy::LogLevelSetter save_levels(spdlog::level::trace); \ Envoy::Logger::DelegatingLogSinkSharedPtr sink_ptr = Envoy::Logger::Registry::getSink(); \ - sink_ptr->set_should_escape(escaped); \ + sink_ptr->setShouldEscape(escaped); \ Envoy::LogRecordingSink log_recorder(sink_ptr); \ stmt; \ if (log_recorder.messages().empty()) { \ diff --git a/test/test_common/network_utility.cc b/test/test_common/network_utility.cc index 1f40191733f15..ca0dc51c38aee 100644 --- a/test/test_common/network_utility.cc +++ b/test/test_common/network_utility.cc @@ -5,12 +5,12 @@ #include "envoy/common/platform.h" -#include "common/api/os_sys_calls_impl.h" #include "common/common/assert.h" #include "common/common/fmt.h" #include "common/network/address_impl.h" #include "common/network/listen_socket_impl.h" #include "common/network/raw_buffer_socket.h" +#include "common/network/socket_option_factory.h" #include "common/network/utility.h" #include "common/runtime/runtime_impl.h" @@ -21,50 +21,46 @@ namespace Network { namespace Test { Address::InstanceConstSharedPtr findOrCheckFreePort(Address::InstanceConstSharedPtr addr_port, - Address::SocketType type) { + Socket::Type type) { if (addr_port == nullptr || addr_port->type() != Address::Type::Ip) { ADD_FAILURE() << "Not an internet address: " << (addr_port == nullptr ? "nullptr" : addr_port->asString()); return nullptr; } - IoHandlePtr io_handle = addr_port->socket(type); + SocketImpl sock(type, addr_port); // Not setting REUSEADDR, therefore if the address has been recently used we won't reuse it here. // However, because we're going to use the address while checking if it is available, we'll need // to set REUSEADDR on listener sockets created by tests using an address validated by this means. - Api::SysCallIntResult result = addr_port->bind(io_handle->fd()); + Api::SysCallIntResult result = sock.bind(addr_port); const char* failing_fn = nullptr; if (result.rc_ != 0) { failing_fn = "bind"; - } else if (type == Address::SocketType::Stream) { + } else if (type == Socket::Type::Stream) { // Try listening on the port also, if the type is TCP. - result = Api::OsSysCallsSingleton::get().listen(io_handle->fd(), 1); + result = sock.listen(1); if (result.rc_ != 0) { failing_fn = "listen"; } } if (failing_fn != nullptr) { - if (result.errno_ == EADDRINUSE) { + if (result.errno_ == SOCKET_ERROR_ADDR_IN_USE) { // The port is already in use. Perfectly normal. return nullptr; - } else if (result.errno_ == EACCES) { + } else if (result.errno_ == SOCKET_ERROR_ACCESS) { // A privileged port, and we don't have privileges. Might want to log this. return nullptr; } // Unexpected failure. ADD_FAILURE() << failing_fn << " failed for '" << addr_port->asString() - << "' with error: " << strerror(result.errno_) << " (" << result.errno_ << ")"; + << "' with error: " << errorDetails(result.errno_) << " (" << result.errno_ + << ")"; return nullptr; } - // If the port we bind is zero, then the OS will pick a free port for us (assuming there are - // any), and we need to find out the port number that the OS picked so we can return it. - if (addr_port->ip()->port() == 0) { - return Address::addressFromFd(io_handle->fd()); - } - return addr_port; + return sock.localAddress(); } Address::InstanceConstSharedPtr findOrCheckFreePort(const std::string& addr_port, - Address::SocketType type) { + Socket::Type type) { auto instance = Utility::parseInternetAddressAndPort(addr_port); if (instance != nullptr) { instance = findOrCheckFreePort(instance, type); @@ -74,35 +70,35 @@ Address::InstanceConstSharedPtr findOrCheckFreePort(const std::string& addr_port return instance; } -const std::string getLoopbackAddressUrlString(const Address::IpVersion version) { +std::string getLoopbackAddressUrlString(const Address::IpVersion version) { if (version == Address::IpVersion::v6) { return std::string("[::1]"); } return std::string("127.0.0.1"); } -const std::string getLoopbackAddressString(const Address::IpVersion version) { +std::string getLoopbackAddressString(const Address::IpVersion version) { if (version == Address::IpVersion::v6) { return std::string("::1"); } return std::string("127.0.0.1"); } -const std::string getAnyAddressUrlString(const Address::IpVersion version) { +std::string getAnyAddressUrlString(const Address::IpVersion version) { if (version == Address::IpVersion::v6) { return std::string("[::]"); } return std::string("0.0.0.0"); } -const std::string getAnyAddressString(const Address::IpVersion version) { +std::string getAnyAddressString(const Address::IpVersion version) { if (version == Address::IpVersion::v6) { return std::string("::"); } return std::string("0.0.0.0"); } -const std::string addressVersionAsString(const Address::IpVersion version) { +std::string addressVersionAsString(const Address::IpVersion version) { if (version == Address::IpVersion::v4) { return std::string("v4"); } @@ -148,15 +144,8 @@ Address::InstanceConstSharedPtr getAnyAddress(const Address::IpVersion version, } bool supportsIpVersion(const Address::IpVersion version) { - Address::InstanceConstSharedPtr addr = getCanonicalLoopbackAddress(version); - IoHandlePtr io_handle = addr->socket(Address::SocketType::Stream); - if (0 != addr->bind(io_handle->fd()).rc_) { - // Socket bind failed. - RELEASE_ASSERT(io_handle->close().err_ == nullptr, ""); - return false; - } - RELEASE_ASSERT(io_handle->close().err_ == nullptr, ""); - return true; + return Network::SocketInterfaceSingleton::get().ipFamilySupported( + version == Address::IpVersion::v4 ? AF_INET : AF_INET6); } std::string ipVersionToDnsFamily(Network::Address::IpVersion version) { @@ -171,19 +160,25 @@ std::string ipVersionToDnsFamily(Network::Address::IpVersion version) { NOT_REACHED_GCOVR_EXCL_LINE; } -std::pair -bindFreeLoopbackPort(Address::IpVersion version, Address::SocketType type) { +std::pair +bindFreeLoopbackPort(Address::IpVersion version, Socket::Type type, bool reuse_port) { Address::InstanceConstSharedPtr addr = getCanonicalLoopbackAddress(version); - IoHandlePtr io_handle = addr->socket(type); - Api::SysCallIntResult result = addr->bind(io_handle->fd()); + SocketPtr sock = std::make_unique(type, addr); + if (reuse_port) { + sock->addOptions(SocketOptionFactory::buildReusePortOptions()); + Socket::applyOptions(sock->options(), *sock, + envoy::config::core::v3::SocketOption::STATE_PREBIND); + } + Api::SysCallIntResult result = sock->bind(addr); if (0 != result.rc_) { - io_handle->close(); + sock->close(); std::string msg = fmt::format("bind failed for address {} with error: {} ({})", - addr->asString(), strerror(result.errno_), result.errno_); + addr->asString(), errorDetails(result.errno_), result.errno_); ADD_FAILURE() << msg; throw EnvoyException(msg); } - return std::make_pair(Address::addressFromFd(io_handle->fd()), std::move(io_handle)); + + return std::make_pair(sock->localAddress(), std::move(sock)); } TransportSocketPtr createRawBufferSocket() { return std::make_unique(); } @@ -228,9 +223,7 @@ Api::IoCallUint64Result readFromSocket(IoHandle& handle, const Address::Instance UdpSyncPeer::UdpSyncPeer(Network::Address::IpVersion version) : socket_( std::make_unique(getCanonicalLoopbackAddress(version), nullptr, true)) { - RELEASE_ASSERT( - Api::OsSysCallsSingleton::get().setsocketblocking(socket_->ioHandle().fd(), true).rc_ != -1, - ""); + RELEASE_ASSERT(socket_->setBlockingForTest(true).rc_ != -1, ""); } void UdpSyncPeer::write(const std::string& buffer, const Network::Address::Instance& peer) { diff --git a/test/test_common/network_utility.h b/test/test_common/network_utility.h index 965ec8b6dfadd..d1d8d4cf32fa1 100644 --- a/test/test_common/network_utility.h +++ b/test/test_common/network_utility.h @@ -25,7 +25,7 @@ namespace Test { * listening, else nullptr if the address and port are not free. */ Address::InstanceConstSharedPtr findOrCheckFreePort(Address::InstanceConstSharedPtr addr_port, - Address::SocketType type); + Socket::Type type); /** * As above, but addr_port is specified as a string. For example: @@ -35,14 +35,14 @@ Address::InstanceConstSharedPtr findOrCheckFreePort(Address::InstanceConstShared * - [::]:45678 Check whether a specific port on all local IPv6 addresses is free. */ Address::InstanceConstSharedPtr findOrCheckFreePort(const std::string& addr_port, - Address::SocketType type); + Socket::Type type); /** * Get a URL ready IP loopback address as a string. * @param version IP address version of loopback address. * @return std::string URL ready loopback address as a string. */ -const std::string getLoopbackAddressUrlString(const Address::IpVersion version); +std::string getLoopbackAddressUrlString(const Address::IpVersion version); /** * Get a IP loopback address as a string. There are no square brackets around IPv6 addresses, this @@ -50,28 +50,28 @@ const std::string getLoopbackAddressUrlString(const Address::IpVersion version); * @param version IP address version of loopback address. * @return std::string loopback address as a string. */ -const std::string getLoopbackAddressString(const Address::IpVersion version); +std::string getLoopbackAddressString(const Address::IpVersion version); /** * Get a URL ready IP any address as a string. * @param version IP address version of any address. * @return std::string URL ready any address as a string. */ -const std::string getAnyAddressUrlString(const Address::IpVersion version); +std::string getAnyAddressUrlString(const Address::IpVersion version); /** * Get an IP any address as a string. * @param version IP address version of any address. * @return std::string any address as a string. */ -const std::string getAnyAddressString(const Address::IpVersion version); +std::string getAnyAddressString(const Address::IpVersion version); /** * Return a string version of enum IpVersion version. * @param version IP address version. * @return std::string string version of IpVersion. */ -const std::string addressVersionAsString(const Address::IpVersion version); +std::string addressVersionAsString(const Address::IpVersion version); /** * Returns a loopback address for the specified IP version (127.0.0.1 for IPv4 and ::1 for IPv6). @@ -108,14 +108,14 @@ std::string ipVersionToDnsFamily(Network::Address::IpVersion version); /** * Bind a socket to a free port on a loopback address, and return the socket's fd and bound address. - * Enables a test server to reliably "select" a port to listen on. Note that the socket option - * SO_REUSEADDR has NOT been set on the socket. + * Enables a test server to reliably "select" a port to listen on. * @param version the IP version of the loopback address. * @param type the type of socket to be bound. + * @param reuse_port specifies whether the socket option SO_REUSEADDR has been set on the socket. * @returns the address and the fd of the socket bound to that address. */ -std::pair -bindFreeLoopbackPort(Address::IpVersion version, Address::SocketType type); +std::pair +bindFreeLoopbackPort(Address::IpVersion version, Socket::Type type, bool reuse_port = false); /** * Create a transport socket for testing purposes. diff --git a/test/test_common/network_utility_test.cc b/test/test_common/network_utility_test.cc index 195fc0991b116..dc40f1d28f2f3 100644 --- a/test/test_common/network_utility_test.cc +++ b/test/test_common/network_utility_test.cc @@ -18,7 +18,7 @@ class NetworkUtilityTest : public testing::TestWithParam { }; INSTANTIATE_TEST_SUITE_P(IpVersions, NetworkUtilityTest, - testing::ValuesIn(TestEnvironment::getIpTestParameters())); + testing::ValuesIn(TestEnvironment::getIpVersionsForTest())); // This validates Network::Test::bindFreeLoopbackPort behaves as desired, i.e. that we don't have // a significant risk of flakes due to re-use of a port over short time intervals. We can't drive @@ -35,8 +35,8 @@ TEST_P(NetworkUtilityTest, DISABLED_ValidateBindFreeLoopbackPort) { std::map seen; const size_t kLimit = 50; for (size_t n = 0; n < kLimit; ++n) { - auto addr_fd = Network::Test::bindFreeLoopbackPort(version_, Address::SocketType::Stream); - Api::OsSysCallsSingleton::get().close(addr_fd.second); + auto addr_fd = Network::Test::bindFreeLoopbackPort(version_, Socket::Type::Stream); + addr_fd.second->close(); auto addr = addr_fd.first->asString(); auto search = seen.find(addr); if (search != seen.end()) { diff --git a/test/test_common/only_one_thread.h b/test/test_common/only_one_thread.h index 678e40b319a08..c726ae6d28fa5 100644 --- a/test/test_common/only_one_thread.h +++ b/test/test_common/only_one_thread.h @@ -20,7 +20,7 @@ class OnlyOneThread { private: ThreadFactory& thread_factory_; - ThreadId thread_advancing_time_ GUARDED_BY(mutex_); + ThreadId thread_advancing_time_ ABSL_GUARDED_BY(mutex_); mutable MutexBasicLockable mutex_; }; diff --git a/test/test_common/printers.cc b/test/test_common/printers.cc index c6573e80a5853..8a2ece63ad132 100644 --- a/test/test_common/printers.cc +++ b/test/test_common/printers.cc @@ -8,15 +8,12 @@ namespace Envoy { namespace Http { +// NOLINTNEXTLINE(readability-identifier-naming) void PrintTo(const HeaderMapImpl& headers, std::ostream* os) { - headers.iterate( - [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { - std::ostream* os = static_cast(context); - *os << "{'" << header.key().getStringView() << "','" << header.value().getStringView() - << "'}"; - return HeaderMap::Iterate::Continue; - }, - os); + headers.iterate([os](const HeaderEntry& header) -> HeaderMap::Iterate { + *os << "{'" << header.key().getStringView() << "','" << header.value().getStringView() << "'}"; + return HeaderMap::Iterate::Continue; + }); } void PrintTo(const HeaderMapPtr& headers, std::ostream* os) { diff --git a/test/test_common/simulated_time_system.cc b/test/test_common/simulated_time_system.cc index 92f7f81a81970..d361beddf4711 100644 --- a/test/test_common/simulated_time_system.cc +++ b/test/test_common/simulated_time_system.cc @@ -8,6 +8,7 @@ #include "common/common/lock_guard.h" #include "common/event/real_time_system.h" #include "common/event/timer_impl.h" +#include "common/runtime/runtime_features.h" namespace Envoy { namespace Event { @@ -50,11 +51,11 @@ class UnlockGuard { // mechanism used in RealTimeSystem timers is employed for simulated alarms. class SimulatedTimeSystemHelper::Alarm : public Timer { public: - Alarm(SimulatedTimeSystemHelper& time_system, Scheduler& base_scheduler, TimerCb cb, - Dispatcher& dispatcher) - : base_timer_(base_scheduler.createTimer([this, cb] { runAlarm(cb); }, dispatcher)), - time_system_(time_system), index_(time_system.nextIndex()), armed_(false), pending_(false) { - } + Alarm(SimulatedScheduler& simulated_scheduler, SimulatedTimeSystemHelper& time_system, + CallbackScheduler& cb_scheduler, TimerCb cb) + : cb_(cb_scheduler.createSchedulableCallback([this, cb] { runAlarm(cb); })), + simulated_scheduler_(simulated_scheduler), time_system_(time_system), armed_(false), + pending_(false) {} ~Alarm() override; @@ -68,21 +69,16 @@ class SimulatedTimeSystemHelper::Alarm : public Timer { const ScopeTrackedObject* scope) override; bool enabled() override { absl::MutexLock lock(&time_system_.mutex_); - return armed_ || base_timer_->enabled(); + return armed_ || cb_->enabled(); } - void disableTimerLockHeld() EXCLUSIVE_LOCKS_REQUIRED(time_system_.mutex_); - - void setTimeLockHeld(MonotonicTime time) EXCLUSIVE_LOCKS_REQUIRED(time_system_.mutex_) { - time_ = time; - } + void disableTimerLockHeld() ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_system_.mutex_); /** * Activates the timer so it will be run the next time the libevent loop is run, * typically via Dispatcher::run(). */ - void activateLockHeld(const ScopeTrackedObject* scope = nullptr) - EXCLUSIVE_LOCKS_REQUIRED(time_system_.mutex_) { + void activateLockHeld() ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_system_.mutex_) { ASSERT(armed_); armed_ = false; if (pending_) { @@ -97,21 +93,12 @@ class SimulatedTimeSystemHelper::Alarm : public Timer { // See class comment for UnlockGuard for details on saving // time_system_.mutex_ prior to running libevent, which may delete this. UnlockGuard unlocker(time_system_.mutex_); - std::chrono::milliseconds duration = std::chrono::milliseconds::zero(); - base_timer_->enableTimer(duration, scope); - } - - MonotonicTime time() const EXCLUSIVE_LOCKS_REQUIRED(time_system_.mutex_) { - ASSERT(armed_); - return time_; + cb_->scheduleCallbackCurrentIteration(); } SimulatedTimeSystemHelper& timeSystem() { return time_system_; } - uint64_t index() const { return index_; } private: - friend SimulatedTimeSystemHelper::CompareAlarms; - void runAlarm(TimerCb cb) { { absl::MutexLock lock(&time_system_.mutex_); @@ -123,26 +110,11 @@ class SimulatedTimeSystemHelper::Alarm : public Timer { time_system.decPending(); } - TimerPtr base_timer_; + SchedulableCallbackPtr cb_; + SimulatedScheduler& simulated_scheduler_; SimulatedTimeSystemHelper& time_system_; - MonotonicTime time_ GUARDED_BY(time_system_.mutex_); - const uint64_t index_; - bool armed_ GUARDED_BY(time_system_.mutex_); - bool pending_ GUARDED_BY(time_system_.mutex_); -}; - -// Compare two alarms, based on wakeup time and insertion order. Returns true if -// a comes before b. -bool SimulatedTimeSystemHelper::CompareAlarms::operator()(const Alarm* a, const Alarm* b) const - EXCLUSIVE_LOCKS_REQUIRED(a->time_system_.mutex_, b->time_system_.mutex_) { - if (a != b) { - if (a->time() < b->time()) { - return true; - } else if (a->time() == b->time() && a->index() < b->index()) { - return true; - } - } - return false; + bool armed_ ABSL_GUARDED_BY(time_system_.mutex_); + bool pending_ ABSL_GUARDED_BY(time_system_.mutex_); }; // Each timer is maintained and ordered by a common TimeSystem, but is @@ -151,16 +123,20 @@ bool SimulatedTimeSystemHelper::CompareAlarms::operator()(const Alarm* a, const // the expected thread. class SimulatedTimeSystemHelper::SimulatedScheduler : public Scheduler { public: - SimulatedScheduler(SimulatedTimeSystemHelper& time_system, Scheduler& base_scheduler) - : time_system_(time_system), base_scheduler_(base_scheduler) {} - TimerPtr createTimer(const TimerCb& cb, Dispatcher& dispatcher) override { - return std::make_unique(time_system_, base_scheduler_, cb, - dispatcher); + SimulatedScheduler(SimulatedTimeSystemHelper& time_system, CallbackScheduler& cb_scheduler) + : time_system_(time_system), cb_scheduler_(cb_scheduler), + schedule_ready_alarms_cb_(cb_scheduler.createSchedulableCallback( + [this] { time_system_.scheduleReadyAlarms(); })) {} + TimerPtr createTimer(const TimerCb& cb, Dispatcher& /*dispatcher*/) override { + return std::make_unique(*this, time_system_, cb_scheduler_, + cb); }; + void scheduleReadyAlarms() { schedule_ready_alarms_cb_->scheduleCallbackNextIteration(); } private: SimulatedTimeSystemHelper& time_system_; - Scheduler& base_scheduler_; + CallbackScheduler& cb_scheduler_; + SchedulableCallbackPtr schedule_ready_alarms_cb_; }; SimulatedTimeSystemHelper::Alarm::Alarm::~Alarm() { @@ -170,26 +146,42 @@ SimulatedTimeSystemHelper::Alarm::Alarm::~Alarm() { } void SimulatedTimeSystemHelper::Alarm::Alarm::disableTimer() { + cb_->cancel(); absl::MutexLock lock(&time_system_.mutex_); disableTimerLockHeld(); } void SimulatedTimeSystemHelper::Alarm::Alarm::disableTimerLockHeld() { if (armed_) { - time_system_.removeAlarmLockHeld(this); + time_system_.removeAlarmLockHeld(*this); armed_ = false; } + if (pending_) { + pending_ = false; + time_system_.decPendingLockHeld(); + } } void SimulatedTimeSystemHelper::Alarm::Alarm::enableHRTimer( - const std::chrono::microseconds& duration, const ScopeTrackedObject* scope) { + const std::chrono::microseconds& duration, const ScopeTrackedObject* /*scope*/) { + if (duration.count() != 0) { + disableTimer(); + } absl::MutexLock lock(&time_system_.mutex_); - disableTimerLockHeld(); + if (pending_) { + // Calling enableTimer on a timer that is already pending is a no-op. Timer will still fire + // based on the original time it was scheduled. + return; + } else if (armed_) { + disableTimerLockHeld(); + } + armed_ = true; - if (duration.count() == 0) { - activateLockHeld(scope); + if (duration.count() == 0 && !Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.activate_timers_next_event_loop")) { + activateLockHeld(); } else { - time_system_.addAlarmLockHeld(this, duration); + time_system_.addAlarmLockHeld(*this, duration, simulated_scheduler_); } } @@ -205,7 +197,7 @@ static int instance_count = 0; // will march forward only by calling.advanceTimeAsync(). SimulatedTimeSystemHelper::SimulatedTimeSystemHelper() : monotonic_time_(MonotonicTime(std::chrono::seconds(0))), - system_time_(real_time_source_.systemTime()), index_(0), pending_alarms_(0) { + system_time_(real_time_source_.systemTime()), pending_alarms_(0) { ++instance_count; ASSERT(instance_count <= 1); } @@ -241,15 +233,17 @@ void SimulatedTimeSystemHelper::advanceTimeWait(const Duration& duration) { waitForNoPendingLockHeld(); } -void SimulatedTimeSystemHelper::waitForNoPendingLockHeld() const EXCLUSIVE_LOCKS_REQUIRED(mutex_) { +void SimulatedTimeSystemHelper::waitForNoPendingLockHeld() const + ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_) { mutex_.Await(absl::Condition( +[](const uint32_t* pending_alarms) -> bool { return *pending_alarms == 0; }, &pending_alarms_)); } -Thread::CondVar::WaitStatus SimulatedTimeSystemHelper::waitFor( - Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, - const Duration& duration) noexcept EXCLUSIVE_LOCKS_REQUIRED(mutex) { +Thread::CondVar::WaitStatus SimulatedTimeSystemHelper::waitFor(Thread::MutexBasicLockable& mutex, + Thread::CondVar& condvar, + const Duration& duration) noexcept + ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) { only_one_thread_.checkOneThread(); // TODO(#10568): This real-time polling delay should not be necessary. Without @@ -278,8 +272,8 @@ Thread::CondVar::WaitStatus SimulatedTimeSystemHelper::waitFor( MonotonicTime next_wakeup = end_time; if (!alarms_.empty()) { // If there's another alarm pending, sleep forward to it. - Alarm* alarm = (*alarms_.begin()); - next_wakeup = std::min(alarmTimeLockHeld(alarm), next_wakeup); + const AlarmRegistration& alarm_registration = *alarms_.begin(); + next_wakeup = std::min(alarm_registration.time_, next_wakeup); } setMonotonicTimeLockHeld(next_wakeup); waitForNoPendingLockHeld(); @@ -295,66 +289,94 @@ Thread::CondVar::WaitStatus SimulatedTimeSystemHelper::waitFor( return Thread::CondVar::WaitStatus::Timeout; } -MonotonicTime SimulatedTimeSystemHelper::alarmTimeLockHeld(Alarm* alarm) NO_THREAD_SAFETY_ANALYSIS { +void SimulatedTimeSystemHelper::alarmActivateLockHeld(Alarm& alarm) ABSL_NO_THREAD_SAFETY_ANALYSIS { // We disable thread-safety analysis as the compiler can't detect that - // alarm_->timeSystem() == this, so we must be holding the right mutex. - ASSERT(&(alarm->timeSystem()) == this); - return alarm->time(); + // alarm_.timeSystem() == this, so we must be holding the right mutex. + ASSERT(&(alarm.timeSystem()) == this); + alarm.activateLockHeld(); } -void SimulatedTimeSystemHelper::alarmActivateLockHeld(Alarm* alarm) NO_THREAD_SAFETY_ANALYSIS { - // We disable thread-safety analysis as the compiler can't detect that - // alarm_->timeSystem() == this, so we must be holding the right mutex. - ASSERT(&(alarm->timeSystem()) == this); - alarm->activateLockHeld(); -} +void SimulatedTimeSystemHelper::addAlarmLockHeld( + Alarm& alarm, const std::chrono::microseconds& duration, + SimulatedScheduler& simulated_scheduler) ABSL_NO_THREAD_SAFETY_ANALYSIS { + ASSERT(&(alarm.timeSystem()) == this); + ASSERT(alarms_.size() == alarm_registrations_map_.size()); + ASSERT(alarm_registrations_map_.find(&alarm) == alarm_registrations_map_.end()); + + auto insert_result = alarms_.insert({monotonic_time_ + duration, random_source_.random(), alarm}); + ASSERT(insert_result.second); + alarm_registrations_map_.emplace(&alarm, insert_result.first); + if (duration.count() == 0) { + // Force the event loop to check for timers that are ready to execute since we just added an 0 + // delay alarm which is ready to execution in the next iteration of the event loop. + // TODO(antoniovicente) Refactor alarm tracking so it happens per scheduler and limit wakeup to + // a single event loop. -int64_t SimulatedTimeSystemHelper::nextIndex() { - absl::MutexLock lock(&mutex_); - return index_++; -} + // We don't want to activate the alarm under lock, as it will make a libevent call, and libevent + // itself uses locks: + // https://github.com/libevent/libevent/blob/29cc8386a2f7911eaa9336692a2c5544d8b4734f/event.c#L1917 + UnlockGuard unlocker(mutex_); + simulated_scheduler.scheduleReadyAlarms(); + } -void SimulatedTimeSystemHelper::addAlarmLockHeld( - Alarm* alarm, const std::chrono::microseconds& duration) NO_THREAD_SAFETY_ANALYSIS { - ASSERT(&(alarm->timeSystem()) == this); - alarm->setTimeLockHeld(monotonic_time_ + duration); - alarms_.insert(alarm); + // Sanity check that the parallel data structures used for alarm registration have the same number + // of entries. + ASSERT(alarms_.size() == alarm_registrations_map_.size()); } -void SimulatedTimeSystemHelper::removeAlarmLockHeld(Alarm* alarm) { alarms_.erase(alarm); } +void SimulatedTimeSystemHelper::removeAlarmLockHeld(Alarm& alarm) { + ASSERT(alarms_.size() == alarm_registrations_map_.size()); + + auto it = alarm_registrations_map_.find(&alarm); + ASSERT(it != alarm_registrations_map_.end()); + alarms_.erase(it->second); + alarm_registrations_map_.erase(it); -SchedulerPtr SimulatedTimeSystemHelper::createScheduler(Scheduler& base_scheduler) { - return std::make_unique(*this, base_scheduler); + // Sanity check that the parallel data structures used for alarm registration have the same number + // of entries. + ASSERT(alarms_.size() == alarm_registrations_map_.size()); +} + +SchedulerPtr SimulatedTimeSystemHelper::createScheduler(Scheduler& /*base_scheduler*/, + CallbackScheduler& cb_scheduler) { + return std::make_unique(*this, cb_scheduler); } void SimulatedTimeSystemHelper::setMonotonicTimeLockHeld(const MonotonicTime& monotonic_time) { + only_one_thread_.checkOneThread(); // We don't have a MutexLock construct that allows temporarily // dropping the lock to run a callback. The main issue here is that we must // be careful not to be holding mutex_ when an exception can be thrown. // That can only happen here in alarm->activate(), which is run with the mutex // released. if (monotonic_time >= monotonic_time_) { - // Alarms is a std::set ordered by wakeup time, so pulling off begin() each - // iteration gives you wakeup order. Also note that alarms may be added - // or removed during the call to activate() so it would not be correct to - // range-iterate over the set. - while (!alarms_.empty()) { - AlarmSet::iterator pos = alarms_.begin(); - Alarm* alarm = *pos; - MonotonicTime alarm_time = alarmTimeLockHeld(alarm); - if (alarm_time > monotonic_time) { - break; - } - ASSERT(alarm_time >= monotonic_time_); - system_time_ += - std::chrono::duration_cast(alarm_time - monotonic_time_); - monotonic_time_ = alarm_time; - alarms_.erase(pos); - alarmActivateLockHeld(alarm); - } system_time_ += std::chrono::duration_cast(monotonic_time - monotonic_time_); monotonic_time_ = monotonic_time; + scheduleReadyAlarmsLockHeld(); + } +} + +void SimulatedTimeSystemHelper::scheduleReadyAlarms() { + absl::MutexLock lock(&mutex_); + scheduleReadyAlarmsLockHeld(); +} + +void SimulatedTimeSystemHelper::scheduleReadyAlarmsLockHeld() { + // Alarms is a std::set ordered by wakeup time, so pulling off begin() each + // iteration gives you wakeup order. Also note that alarms may be added + // or removed during the call to activate() so it would not be correct to + // range-iterate over the set. + while (!alarms_.empty()) { + const AlarmRegistration& alarm_registration = *alarms_.begin(); + MonotonicTime alarm_time = alarm_registration.time_; + if (alarm_time > monotonic_time_) { + break; + } + + Alarm& alarm = alarm_registration.alarm_; + removeAlarmLockHeld(alarm); + alarmActivateLockHeld(alarm); } } diff --git a/test/test_common/simulated_time_system.h b/test/test_common/simulated_time_system.h index 04e411a171d3f..e8a369e4f9cc9 100644 --- a/test/test_common/simulated_time_system.h +++ b/test/test_common/simulated_time_system.h @@ -8,6 +8,9 @@ #include "test/test_common/only_one_thread.h" #include "test/test_common/test_time_system.h" +#include "test/test_common/utility.h" + +#include "absl/container/flat_hash_map.h" namespace Envoy { namespace Event { @@ -23,14 +26,14 @@ class SimulatedTimeSystemHelper : public TestTimeSystem { ~SimulatedTimeSystemHelper() override; // TimeSystem - SchedulerPtr createScheduler(Scheduler& base_scheduler) override; + SchedulerPtr createScheduler(Scheduler& base_scheduler, CallbackScheduler& cb_scheduler) override; // TestTimeSystem void advanceTimeWait(const Duration& duration) override; void advanceTimeAsync(const Duration& duration) override; - Thread::CondVar::WaitStatus - waitFor(Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, - const Duration& duration) noexcept EXCLUSIVE_LOCKS_REQUIRED(mutex) override; + Thread::CondVar::WaitStatus waitFor(Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, + const Duration& duration) noexcept + ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) override; // TimeSource SystemTime systemTime() override; @@ -64,10 +67,31 @@ class SimulatedTimeSystemHelper : public TestTimeSystem { class SimulatedScheduler; class Alarm; friend class Alarm; // Needed to reference mutex for thread annotations. - struct CompareAlarms { - bool operator()(const Alarm* a, const Alarm* b) const; + struct AlarmRegistration { + AlarmRegistration(MonotonicTime time, uint64_t randomness, Alarm& alarm) + : time_(time), randomness_(randomness), alarm_(alarm) {} + + MonotonicTime time_; + // Random tie-breaker for alarms scheduled for the same monotonic time used to mimic + // non-deterministic execution of real alarms scheduled for the same wall time. + uint64_t randomness_; + Alarm& alarm_; + + friend bool operator<(const AlarmRegistration& lhs, const AlarmRegistration& rhs) { + if (lhs.time_ != rhs.time_) { + return lhs.time_ < rhs.time_; + } + if (lhs.randomness_ != rhs.randomness_) { + return lhs.randomness_ < rhs.randomness_; + } + // Out of paranoia, use pointer comparison on the alarms as a final tie-breaker but also + // ASSERT that this branch isn't hit in debug modes since in practice the randomness_ + // associated with two registrations should never be equal. + ASSERT(false, "Alarm registration randomness_ for two alarms should never be equal."); + return &lhs.alarm_ < &rhs.alarm_; + } }; - using AlarmSet = std::set; + using AlarmSet = std::set; /** * Sets the time forward monotonically. If the supplied argument moves @@ -78,36 +102,41 @@ class SimulatedTimeSystemHelper : public TestTimeSystem { * @param monotonic_time The desired new current time. */ void setMonotonicTimeLockHeld(const MonotonicTime& monotonic_time) - EXCLUSIVE_LOCKS_REQUIRED(mutex_); + ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - MonotonicTime alarmTimeLockHeld(Alarm* alarm) EXCLUSIVE_LOCKS_REQUIRED(mutex_); - void alarmActivateLockHeld(Alarm* alarm) EXCLUSIVE_LOCKS_REQUIRED(mutex_); + /** + * Schedule expired alarms so they execute in their event loops. + */ + void scheduleReadyAlarms(); + void scheduleReadyAlarmsLockHeld() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - // The simulation keeps a unique ID for each alarm to act as a deterministic - // tie-breaker for alarm-ordering. - int64_t nextIndex(); + void alarmActivateLockHeld(Alarm& alarm) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Adds/removes an alarm. - void addAlarmLockHeld(Alarm*, const std::chrono::microseconds& duration) - EXCLUSIVE_LOCKS_REQUIRED(mutex_); - void removeAlarmLockHeld(Alarm*) EXCLUSIVE_LOCKS_REQUIRED(mutex_); + void addAlarmLockHeld(Alarm&, const std::chrono::microseconds& duration, + SimulatedScheduler& simulated_scheduler) + ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + void removeAlarmLockHeld(Alarm&) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Keeps track of how many alarms have been activated but not yet called, // which helps waitFor() determine when to give up and declare a timeout. - void incPendingLockHeld() EXCLUSIVE_LOCKS_REQUIRED(mutex_) { ++pending_alarms_; } + void incPendingLockHeld() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_) { ++pending_alarms_; } void decPending() { absl::MutexLock lock(&mutex_); - --pending_alarms_; + decPendingLockHeld(); } - void waitForNoPendingLockHeld() const EXCLUSIVE_LOCKS_REQUIRED(mutex_); + void decPendingLockHeld() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_) { --pending_alarms_; } + void waitForNoPendingLockHeld() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); RealTimeSource real_time_source_; // Used to initialize monotonic_time_ and system_time_; - MonotonicTime monotonic_time_ GUARDED_BY(mutex_); - SystemTime system_time_ GUARDED_BY(mutex_); - AlarmSet alarms_ GUARDED_BY(mutex_); - uint64_t index_ GUARDED_BY(mutex_); + MonotonicTime monotonic_time_ ABSL_GUARDED_BY(mutex_); + SystemTime system_time_ ABSL_GUARDED_BY(mutex_); + TestRandomGenerator random_source_ ABSL_GUARDED_BY(mutex_); + AlarmSet alarms_ ABSL_GUARDED_BY(mutex_); + absl::flat_hash_map + alarm_registrations_map_ ABSL_GUARDED_BY(mutex_); mutable absl::Mutex mutex_; - uint32_t pending_alarms_ GUARDED_BY(mutex_); + uint32_t pending_alarms_ ABSL_GUARDED_BY(mutex_); Thread::OnlyOneThread only_one_thread_; }; diff --git a/test/test_common/simulated_time_system_test.cc b/test/test_common/simulated_time_system_test.cc index df5565b455d04..13a435148aff3 100644 --- a/test/test_common/simulated_time_system_test.cc +++ b/test/test_common/simulated_time_system_test.cc @@ -3,8 +3,10 @@ #include "common/event/libevent_scheduler.h" #include "common/event/timer_impl.h" +#include "test/mocks/common.h" #include "test/mocks/event/mocks.h" #include "test/test_common/simulated_time_system.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "event2/event.h" @@ -15,19 +17,40 @@ namespace Event { namespace Test { namespace { -class SimulatedTimeSystemTest : public testing::Test { +enum class ActivateMode { DelayActivateTimers, EagerlyActivateTimers }; + +class SimulatedTimeSystemTest : public testing::TestWithParam { protected: SimulatedTimeSystemTest() - : scheduler_(time_system_.createScheduler(base_scheduler_)), + : scheduler_(time_system_.createScheduler(base_scheduler_, base_scheduler_)), start_monotonic_time_(time_system_.monotonicTime()), - start_system_time_(time_system_.systemTime()) {} + start_system_time_(time_system_.systemTime()) { + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.activate_timers_next_event_loop", + activateMode() == ActivateMode::DelayActivateTimers ? "true" : "false"}}); + } + + ActivateMode activateMode() { return GetParam(); } + + void trackPrepareCalls() { + base_scheduler_.registerOnPrepareCallback([this]() { output_.append(1, 'p'); }); + } + + void addTask(int64_t delay_ms, char marker, bool expect_monotonic = true) { + addCustomTask( + delay_ms, marker, []() {}, expect_monotonic); + } - void addTask(int64_t delay_ms, char marker) { + void addCustomTask(int64_t delay_ms, char marker, std::function cb, + bool expect_monotonic = true) { std::chrono::milliseconds delay(delay_ms); TimerPtr timer = scheduler_->createTimer( - [this, marker, delay]() { + [this, marker, delay, cb, expect_monotonic]() { output_.append(1, marker); - EXPECT_GE(time_system_.monotonicTime(), start_monotonic_time_ + delay); + if (expect_monotonic) { + EXPECT_GE(time_system_.monotonicTime(), start_monotonic_time_ + delay); + } + cb(); }, dispatcher_); timer->enableTimer(delay); @@ -44,7 +67,8 @@ class SimulatedTimeSystemTest : public testing::Test { base_scheduler_.run(Dispatcher::RunType::NonBlock); } - testing::NiceMock dispatcher_; + TestScopedRuntime scoped_runtime_; + Event::MockDispatcher dispatcher_; LibeventScheduler base_scheduler_; SimulatedTimeSystem time_system_; SchedulerPtr scheduler_; @@ -54,7 +78,11 @@ class SimulatedTimeSystemTest : public testing::Test { SystemTime start_system_time_; }; -TEST_F(SimulatedTimeSystemTest, AdvanceTimeAsync) { +INSTANTIATE_TEST_SUITE_P(DelayTimerActivation, SimulatedTimeSystemTest, + testing::Values(ActivateMode::DelayActivateTimers, + ActivateMode::EagerlyActivateTimers)); + +TEST_P(SimulatedTimeSystemTest, AdvanceTimeAsync) { EXPECT_EQ(start_monotonic_time_, time_system_.monotonicTime()); EXPECT_EQ(start_system_time_, time_system_.systemTime()); advanceMsAndLoop(5); @@ -62,7 +90,179 @@ TEST_F(SimulatedTimeSystemTest, AdvanceTimeAsync) { EXPECT_EQ(start_system_time_ + std::chrono::milliseconds(5), time_system_.systemTime()); } -TEST_F(SimulatedTimeSystemTest, AdvanceTimeWait) { +TEST_P(SimulatedTimeSystemTest, TimerTotalOrdering) { + trackPrepareCalls(); + + addTask(0, '0'); + addTask(1, '1'); + addTask(2, '2'); + EXPECT_EQ(3, timers_.size()); + + advanceMsAndLoop(5); + + // Verify order. + EXPECT_EQ("p012", output_); +} + +TEST_P(SimulatedTimeSystemTest, TimerPartialOrdering) { + trackPrepareCalls(); + + std::set outputs; + for (int i = 0; i < 100; ++i) { + addTask(0, '0'); + addTask(1, '1'); + addTask(1, '2'); + addTask(3, '3'); + EXPECT_EQ(4, timers_.size()); + + advanceMsAndLoop(5); + + outputs.insert(output_); + + // Cleanup before the next iteration. + output_.clear(); + timers_.clear(); + } + + // Execution order of timers 1 and 2 is non-deterministic because the two timers were scheduled + // for the same time. Verify that both orderings were observed. + EXPECT_THAT(outputs, testing::ElementsAre("p0123", "p0213")); +} + +TEST_P(SimulatedTimeSystemTest, TimerPartialOrdering2) { + trackPrepareCalls(); + + std::set outputs; + for (int i = 0; i < 100; ++i) { + addTask(0, '0'); + addTask(15, '1'); + advanceMsAndLoop(10); + + // Timer 1 has 5ms remaining, so timer 2 ends up scheduled at the same monotonic time as 1. + addTask(5, '2'); + addTask(6, '3'); + advanceMsAndLoop(10); + + outputs.insert(output_); + + // Cleanup before the next iteration. + output_.clear(); + timers_.clear(); + } + + // Execution order of timers 1 and 2 is non-deterministic because the two timers were scheduled + // for the same time. Verify that both orderings were observed. + EXPECT_THAT(outputs, testing::ElementsAre("p0p123", "p0p213")); +} + +// Timers that are scheduled to execute and but are disabled first do not trigger. +TEST_P(SimulatedTimeSystemTest, TimerOrderAndDisableTimer) { + trackPrepareCalls(); + + // Create 3 timers. The first timer should disable the second, so it doesn't trigger. + addCustomTask(0, '0', [this]() { timers_[1]->disableTimer(); }); + addTask(1, '1'); + addTask(2, '2'); + EXPECT_EQ(3, timers_.size()); + + // Expect timers to execute in order since the timers are scheduled at have different times and + // that timer 1 does not execute because it was disabled as part of 0's execution. + advanceMsAndLoop(5); + // Verify that timer 1 was skipped. + EXPECT_EQ("p02", output_); +} + +// Capture behavior of timers which are rescheduled without being disabled first. +TEST_P(SimulatedTimeSystemTest, TimerOrderAndRescheduleTimer) { + trackPrepareCalls(); + + // Reschedule timers 1, 2 and 4 without disabling first. + addCustomTask(0, '0', [this]() { + timers_[1]->enableTimer(std::chrono::milliseconds(0)); + timers_[2]->enableTimer(std::chrono::milliseconds(100)); + timers_[4]->enableTimer(std::chrono::milliseconds(0)); + }); + addTask(1, '1'); + addTask(2, '2'); + addTask(3, '3'); + addTask(10000, '4', false); + EXPECT_EQ(5, timers_.size()); + + // Rescheduling timers that are already scheduled to run in the current event loop iteration has + // no effect if the time delta is 0. Expect timers 0, 1 and 3 to execute in the original order. + // Timer 4 runs as part of the first wakeup since its new schedule time has a delta of 0. Timer 2 + // is delayed since it is rescheduled with a non-zero delta. + advanceMsAndLoop(5); + if (activateMode() == ActivateMode::DelayActivateTimers) { +#ifdef WIN32 + // Force it to run again to pick up next iteration callbacks. + // The event loop runs for a single iteration in NonBlock mode on Windows as a hack to work + // around LEVEL trigger fd registrations constantly firing events and preventing the NonBlock + // event loop from ever reaching the no-fd event and no-expired timers termination condition. It + // is not possible to get consistent event loop behavior since the time system does not override + // the base scheduler's run behavior, and libevent does not provide a mode where it runs at most + // N iterations before breaking out of the loop for us to prefer over the single iteration mode + // used on Windows. + advanceMsAndLoop(0); +#endif + EXPECT_EQ("p013p4", output_); + } else { + EXPECT_EQ("p0134", output_); + } + + advanceMsAndLoop(100); + if (activateMode() == ActivateMode::DelayActivateTimers) { + EXPECT_EQ("p013p4p2", output_); + } else { + EXPECT_EQ("p0134p2", output_); + } +} + +// Disable and re-enable timers that is already pending execution and verify that execution is +// delayed. +TEST_P(SimulatedTimeSystemTest, TimerOrderDisableAndRescheduleTimer) { + trackPrepareCalls(); + + // Disable and reschedule timers 1, 2 and 4 when timer 0 triggers. + addCustomTask(0, '0', [this]() { + timers_[1]->disableTimer(); + timers_[1]->enableTimer(std::chrono::milliseconds(0)); + timers_[2]->disableTimer(); + timers_[2]->enableTimer(std::chrono::milliseconds(100)); + timers_[4]->disableTimer(); + timers_[4]->enableTimer(std::chrono::milliseconds(0)); + }); + addTask(1, '1'); + addTask(2, '2'); + addTask(3, '3'); + addTask(10000, '4', false); + EXPECT_EQ(5, timers_.size()); + + // timer 0 is expected to run first and reschedule timers 1 and 2. Timer 3 should fire before + // timer 1 since timer 3's registration is unaffected. timer 1 runs in the same iteration + // because it is scheduled with zero delay. Timer 2 executes in a later iteration because it is + // re-enabled with a non-zero timeout. + advanceMsAndLoop(5); + if (activateMode() == ActivateMode::DelayActivateTimers) { +#ifdef WIN32 + // The event loop runs for a single iteration in NonBlock mode on Windows. Force it to run again + // to pick up next iteration callbacks. + advanceMsAndLoop(0); +#endif + EXPECT_THAT(output_, testing::AnyOf("p03p14", "p03p41")); + } else { + EXPECT_EQ("p0314", output_); + } + + advanceMsAndLoop(100); + if (activateMode() == ActivateMode::DelayActivateTimers) { + EXPECT_THAT(output_, testing::AnyOf("p03p14p2", "p03p41p2")); + } else { + EXPECT_EQ("p0314p2", output_); + } +} + +TEST_P(SimulatedTimeSystemTest, AdvanceTimeWait) { EXPECT_EQ(start_monotonic_time_, time_system_.monotonicTime()); EXPECT_EQ(start_system_time_, time_system_.systemTime()); @@ -84,7 +284,7 @@ TEST_F(SimulatedTimeSystemTest, AdvanceTimeWait) { EXPECT_EQ(start_system_time_ + std::chrono::milliseconds(5), time_system_.systemTime()); } -TEST_F(SimulatedTimeSystemTest, WaitFor) { +TEST_P(SimulatedTimeSystemTest, WaitFor) { EXPECT_EQ(start_monotonic_time_, time_system_.monotonicTime()); EXPECT_EQ(start_system_time_, time_system_.systemTime()); @@ -145,7 +345,7 @@ TEST_F(SimulatedTimeSystemTest, WaitFor) { thread->join(); } -TEST_F(SimulatedTimeSystemTest, Monotonic) { +TEST_P(SimulatedTimeSystemTest, Monotonic) { // Setting time forward works. time_system_.setMonotonicTime(start_monotonic_time_ + std::chrono::milliseconds(5)); EXPECT_EQ(start_monotonic_time_ + std::chrono::milliseconds(5), time_system_.monotonicTime()); @@ -155,7 +355,7 @@ TEST_F(SimulatedTimeSystemTest, Monotonic) { EXPECT_EQ(start_monotonic_time_ + std::chrono::milliseconds(5), time_system_.monotonicTime()); } -TEST_F(SimulatedTimeSystemTest, System) { +TEST_P(SimulatedTimeSystemTest, System) { // Setting time forward works. time_system_.setSystemTime(start_system_time_ + std::chrono::milliseconds(5)); EXPECT_EQ(start_system_time_ + std::chrono::milliseconds(5), time_system_.systemTime()); @@ -165,7 +365,7 @@ TEST_F(SimulatedTimeSystemTest, System) { EXPECT_EQ(start_system_time_ + std::chrono::milliseconds(3), time_system_.systemTime()); } -TEST_F(SimulatedTimeSystemTest, Ordering) { +TEST_P(SimulatedTimeSystemTest, Ordering) { addTask(5, '5'); addTask(3, '3'); addTask(6, '6'); @@ -176,7 +376,7 @@ TEST_F(SimulatedTimeSystemTest, Ordering) { EXPECT_EQ("356", output_); } -TEST_F(SimulatedTimeSystemTest, SystemTimeOrdering) { +TEST_P(SimulatedTimeSystemTest, SystemTimeOrdering) { addTask(5, '5'); addTask(3, '3'); addTask(6, '6'); @@ -190,7 +390,7 @@ TEST_F(SimulatedTimeSystemTest, SystemTimeOrdering) { EXPECT_EQ("356", output_); // callbacks don't get replayed. } -TEST_F(SimulatedTimeSystemTest, DisableTimer) { +TEST_P(SimulatedTimeSystemTest, DisableTimer) { addTask(5, '5'); addTask(3, '3'); addTask(6, '6'); @@ -202,7 +402,7 @@ TEST_F(SimulatedTimeSystemTest, DisableTimer) { EXPECT_EQ("36", output_); } -TEST_F(SimulatedTimeSystemTest, IgnoreRedundantDisable) { +TEST_P(SimulatedTimeSystemTest, IgnoreRedundantDisable) { addTask(5, '5'); timers_[0]->disableTimer(); timers_[0]->disableTimer(); @@ -210,7 +410,7 @@ TEST_F(SimulatedTimeSystemTest, IgnoreRedundantDisable) { EXPECT_EQ("", output_); } -TEST_F(SimulatedTimeSystemTest, OverrideEnable) { +TEST_P(SimulatedTimeSystemTest, OverrideEnable) { addTask(5, '5'); timers_[0]->enableTimer(std::chrono::milliseconds(6)); advanceMsAndLoop(5); @@ -219,7 +419,7 @@ TEST_F(SimulatedTimeSystemTest, OverrideEnable) { EXPECT_EQ("5", output_); } -TEST_F(SimulatedTimeSystemTest, DeleteTime) { +TEST_P(SimulatedTimeSystemTest, DeleteTime) { addTask(5, '5'); addTask(3, '3'); addTask(6, '6'); @@ -232,7 +432,7 @@ TEST_F(SimulatedTimeSystemTest, DeleteTime) { } // Regression test for issues documented in https://github.com/envoyproxy/envoy/pull/6956 -TEST_F(SimulatedTimeSystemTest, DuplicateTimer) { +TEST_P(SimulatedTimeSystemTest, DuplicateTimer) { // Set one alarm two times to test that pending does not get duplicated.. std::chrono::milliseconds delay(0); TimerPtr zero_timer = scheduler_->createTimer([this]() { output_.append(1, '2'); }, dispatcher_); @@ -268,7 +468,7 @@ TEST_F(SimulatedTimeSystemTest, DuplicateTimer) { thread->join(); } -TEST_F(SimulatedTimeSystemTest, Enabled) { +TEST_P(SimulatedTimeSystemTest, Enabled) { TimerPtr timer = scheduler_->createTimer({}, dispatcher_); timer->enableTimer(std::chrono::milliseconds(0)); EXPECT_TRUE(timer->enabled()); diff --git a/test/test_common/test_runtime.h b/test/test_common/test_runtime.h index 0532b5529f9f2..08e0c441efc34 100644 --- a/test/test_common/test_runtime.h +++ b/test/test_common/test_runtime.h @@ -16,6 +16,7 @@ #include "common/runtime/runtime_impl.h" #include "common/stats/isolated_store_impl.h" +#include "test/mocks/common.h" #include "test/mocks/event/mocks.h" #include "test/mocks/init/mocks.h" #include "test/mocks/local_info/mocks.h" @@ -43,7 +44,7 @@ class TestScopedRuntime { Event::MockDispatcher dispatcher_; testing::NiceMock tls_; Stats::IsolatedStoreImpl store_; - Runtime::MockRandomGenerator generator_; + Random::MockRandomGenerator generator_; Api::ApiPtr api_; testing::NiceMock local_info_; testing::NiceMock validation_visitor_; diff --git a/test/test_common/test_time.h b/test/test_common/test_time.h index bb7ceeb06ca40..31880b73b5e24 100644 --- a/test/test_common/test_time.h +++ b/test/test_common/test_time.h @@ -14,13 +14,14 @@ class TestRealTimeSystem : public TestTimeSystem { // TestTimeSystem void advanceTimeAsync(const Duration& duration) override; void advanceTimeWait(const Duration& duration) override; - Thread::CondVar::WaitStatus - waitFor(Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, - const Duration& duration) noexcept EXCLUSIVE_LOCKS_REQUIRED(mutex) override; + Thread::CondVar::WaitStatus waitFor(Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, + const Duration& duration) noexcept + ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) override; // Event::TimeSystem - Event::SchedulerPtr createScheduler(Scheduler& base_scheduler) override { - return real_time_system_.createScheduler(base_scheduler); + Event::SchedulerPtr createScheduler(Scheduler& base_scheduler, + CallbackScheduler& cb_scheduler) override { + return real_time_system_.createScheduler(base_scheduler, cb_scheduler); } // TimeSource diff --git a/test/test_common/test_time_system.h b/test/test_common/test_time_system.h index fd501f7b33932..bc5d389728797 100644 --- a/test/test_common/test_time_system.h +++ b/test/test_common/test_time_system.h @@ -56,13 +56,15 @@ class TestTimeSystem : public Event::TimeSystem { * @param duration The maximum amount of time to wait. * @return Thread::CondVar::WaitStatus whether the condition timed out or not. */ - virtual Thread::CondVar::WaitStatus - waitFor(Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, - const Duration& duration) noexcept EXCLUSIVE_LOCKS_REQUIRED(mutex) PURE; + virtual Thread::CondVar::WaitStatus waitFor(Thread::MutexBasicLockable& mutex, + Thread::CondVar& condvar, + const Duration& duration) noexcept + ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) PURE; template Thread::CondVar::WaitStatus waitFor(Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, - const D& duration) noexcept EXCLUSIVE_LOCKS_REQUIRED(mutex) { + const D& duration) noexcept + ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) { return waitFor(mutex, condvar, std::chrono::duration_cast(duration)); } }; @@ -91,7 +93,7 @@ class SingletonTimeSystemHelper { TestTimeSystem& timeSystem(const MakeTimeSystemFn& make_time_system); private: - std::unique_ptr time_system_ GUARDED_BY(mutex_); + std::unique_ptr time_system_ ABSL_GUARDED_BY(mutex_); Thread::MutexBasicLockable mutex_; }; @@ -107,14 +109,15 @@ template class DelegatingTestTimeSystemBase : public T timeSystem().advanceTimeWait(duration); } - Thread::CondVar::WaitStatus - waitFor(Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, - const Duration& duration) noexcept EXCLUSIVE_LOCKS_REQUIRED(mutex) override { + Thread::CondVar::WaitStatus waitFor(Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, + const Duration& duration) noexcept + ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) override { return timeSystem().waitFor(mutex, condvar, duration); } - SchedulerPtr createScheduler(Scheduler& base_scheduler) override { - return timeSystem().createScheduler(base_scheduler); + SchedulerPtr createScheduler(Scheduler& base_scheduler, + CallbackScheduler& cb_scheduler) override { + return timeSystem().createScheduler(base_scheduler, cb_scheduler); } SystemTime systemTime() override { return timeSystem().systemTime(); } MonotonicTime monotonicTime() override { return timeSystem().monotonicTime(); } diff --git a/test/test_common/test_time_system_test.cc b/test/test_common/test_time_system_test.cc index b4733d023599b..35fa9873f7ae7 100644 --- a/test/test_common/test_time_system_test.cc +++ b/test/test_common/test_time_system_test.cc @@ -26,8 +26,8 @@ TEST_F(TestTimeSystemTest, TwoRealsSameReference) { TEST_F(TestTimeSystemTest, SimThenRealConflict) { SimulatedTimeSystem t1; - EXPECT_DEATH_LOG_TO_STDERR({ DangerousDeprecatedTestTime t2; }, - ".*Two different types of time-systems allocated.*"); + EXPECT_DEATH({ DangerousDeprecatedTestTime t2; }, + ".*Two different types of time-systems allocated.*"); } TEST_F(TestTimeSystemTest, SimThenRealSerial) { @@ -37,8 +37,7 @@ TEST_F(TestTimeSystemTest, SimThenRealSerial) { TEST_F(TestTimeSystemTest, RealThenSim) { DangerousDeprecatedTestTime t1; - EXPECT_DEATH_LOG_TO_STDERR({ SimulatedTimeSystem t2; }, - ".*Two different types of time-systems allocated.*"); + EXPECT_DEATH({ SimulatedTimeSystem t2; }, ".*Two different types of time-systems allocated.*"); } TEST_F(TestTimeSystemTest, RealThenSimSerial) { diff --git a/test/test_common/test_version_linkstamp.cc b/test/test_common/test_version_linkstamp.cc new file mode 100644 index 0000000000000..76d1a8290781e --- /dev/null +++ b/test/test_common/test_version_linkstamp.cc @@ -0,0 +1,6 @@ +// NOLINT(namespace-envoy) +extern const char build_scm_revision[]; +extern const char build_scm_status[]; + +const char build_scm_revision[] = "0"; +const char build_scm_status[] = "test"; diff --git a/test/test_common/utility.cc b/test/test_common/utility.cc index 9d2678d2fe6b9..13feb48e5df5b 100644 --- a/test/test_common/utility.cc +++ b/test/test_common/utility.cc @@ -25,6 +25,7 @@ #include "common/common/lock_guard.h" #include "common/common/thread_impl.h" #include "common/common/utility.h" +#include "common/config/resource_name.h" #include "common/filesystem/directory.h" #include "common/filesystem/filesystem_impl.h" #include "common/json/json_loader.h" @@ -67,26 +68,18 @@ bool TestUtility::headerMapEqualIgnoreOrder(const Http::HeaderMap& lhs, return false; } - struct State { - const Http::HeaderMap& lhs; - bool equal; - }; - - State state{lhs, true}; - rhs.iterate( - [](const Http::HeaderEntry& header, void* context) -> Http::HeaderMap::Iterate { - State* state = static_cast(context); - const Http::HeaderEntry* entry = - state->lhs.get(Http::LowerCaseString(std::string(header.key().getStringView()))); - if (entry == nullptr || (entry->value() != header.value().getStringView())) { - state->equal = false; - return Http::HeaderMap::Iterate::Break; - } - return Http::HeaderMap::Iterate::Continue; - }, - &state); + bool equal = true; + rhs.iterate([&lhs, &equal](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + const Http::HeaderEntry* entry = + lhs.get(Http::LowerCaseString(std::string(header.key().getStringView()))); + if (entry == nullptr || (entry->value() != header.value().getStringView())) { + equal = false; + return Http::HeaderMap::Iterate::Break; + } + return Http::HeaderMap::Iterate::Continue; + }); - return state.equal; + return equal; } bool TestUtility::buffersEqual(const Buffer::Instance& lhs, const Buffer::Instance& rhs) { @@ -121,6 +114,25 @@ bool TestUtility::buffersEqual(const Buffer::Instance& lhs, const Buffer::Instan return true; } +bool TestUtility::rawSlicesEqual(const Buffer::RawSlice* lhs, const Buffer::RawSlice* rhs, + size_t num_slices) { + for (size_t slice = 0; slice < num_slices; slice++) { + auto rhs_slice = rhs[slice]; + auto lhs_slice = lhs[slice]; + if (rhs_slice.len_ != lhs_slice.len_) { + return false; + } + auto rhs_slice_data = static_cast(rhs_slice.mem_); + auto lhs_slice_data = static_cast(lhs_slice.mem_); + for (size_t offset = 0; offset < rhs_slice.len_; offset++) { + if (rhs_slice_data[offset] != lhs_slice_data[offset]) { + return false; + } + } + } + return true; +} + void TestUtility::feedBufferWithRandomCharacters(Buffer::Instance& buffer, uint64_t n_char, uint64_t seed) { const std::string sample = "Neque porro quisquam est qui dolorem ipsum.."; @@ -146,32 +158,56 @@ Stats::TextReadoutSharedPtr TestUtility::findTextReadout(Stats::Store& store, return findByName(store.textReadouts(), name); } -void TestUtility::waitForCounterEq(Stats::Store& store, const std::string& name, uint64_t value, - Event::TestTimeSystem& time_system) { +AssertionResult TestUtility::waitForCounterEq(Stats::Store& store, const std::string& name, + uint64_t value, Event::TestTimeSystem& time_system, + std::chrono::milliseconds timeout) { + auto end_time = time_system.monotonicTime() + timeout; while (findCounter(store, name) == nullptr || findCounter(store, name)->value() != value) { time_system.advanceTimeWait(std::chrono::milliseconds(10)); + if (timeout != std::chrono::milliseconds::zero() && time_system.monotonicTime() >= end_time) { + return AssertionFailure() << fmt::format("timed out waiting for {} to be {}", name, value); + } } + return AssertionSuccess(); } -void TestUtility::waitForCounterGe(Stats::Store& store, const std::string& name, uint64_t value, - Event::TestTimeSystem& time_system) { +AssertionResult TestUtility::waitForCounterGe(Stats::Store& store, const std::string& name, + uint64_t value, Event::TestTimeSystem& time_system, + std::chrono::milliseconds timeout) { + auto end_time = time_system.monotonicTime() + timeout; while (findCounter(store, name) == nullptr || findCounter(store, name)->value() < value) { time_system.advanceTimeWait(std::chrono::milliseconds(10)); + if (timeout != std::chrono::milliseconds::zero() && time_system.monotonicTime() >= end_time) { + return AssertionFailure() << fmt::format("timed out waiting for {} to be {}", name, value); + } } + return AssertionSuccess(); } -void TestUtility::waitForGaugeGe(Stats::Store& store, const std::string& name, uint64_t value, - Event::TestTimeSystem& time_system) { +AssertionResult TestUtility::waitForGaugeGe(Stats::Store& store, const std::string& name, + uint64_t value, Event::TestTimeSystem& time_system, + std::chrono::milliseconds timeout) { + auto end_time = time_system.monotonicTime() + timeout; while (findGauge(store, name) == nullptr || findGauge(store, name)->value() < value) { time_system.advanceTimeWait(std::chrono::milliseconds(10)); + if (timeout != std::chrono::milliseconds::zero() && time_system.monotonicTime() >= end_time) { + return AssertionFailure() << fmt::format("timed out waiting for {} to be {}", name, value); + } } + return AssertionSuccess(); } -void TestUtility::waitForGaugeEq(Stats::Store& store, const std::string& name, uint64_t value, - Event::TestTimeSystem& time_system) { +AssertionResult TestUtility::waitForGaugeEq(Stats::Store& store, const std::string& name, + uint64_t value, Event::TestTimeSystem& time_system, + std::chrono::milliseconds timeout) { + auto end_time = time_system.monotonicTime() + timeout; while (findGauge(store, name) == nullptr || findGauge(store, name)->value() != value) { time_system.advanceTimeWait(std::chrono::milliseconds(10)); + if (timeout != std::chrono::milliseconds::zero() && time_system.monotonicTime() >= end_time) { + return AssertionFailure() << fmt::format("timed out waiting for {} to be {}", name, value); + } } + return AssertionSuccess(); } std::list @@ -221,6 +257,31 @@ std::string TestUtility::xdsResourceName(const ProtobufWkt::Any& resource) { if (resource.type_url() == Config::TypeUrl::get().Runtime) { return TestUtility::anyConvert(resource).name(); } + if (resource.type_url() == Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3)) { + return TestUtility::anyConvert(resource).name(); + } + if (resource.type_url() == Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3)) { + return TestUtility::anyConvert(resource).name(); + } + if (resource.type_url() == Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3)) { + return TestUtility::anyConvert(resource).name(); + } + if (resource.type_url() == Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3)) { + return TestUtility::anyConvert(resource) + .cluster_name(); + } + if (resource.type_url() == Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3)) { + return TestUtility::anyConvert(resource).name(); + } + if (resource.type_url() == Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3)) { + return TestUtility::anyConvert(resource).name(); + } throw EnvoyException( absl::StrCat("xdsResourceName does not know about type URL ", resource.type_url())); } diff --git a/test/test_common/utility.h b/test/test_common/utility.h index e66100184743d..df96fff5d7f6d 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -19,6 +19,8 @@ #include "common/common/c_smart_ptr.h" #include "common/common/empty_string.h" #include "common/common/thread.h" +#include "common/config/decoded_resource_impl.h" +#include "common/config/opaque_resource_decoder_impl.h" #include "common/config/version_converter.h" #include "common/http/header_map_impl.h" #include "common/protobuf/message_validator_impl.h" @@ -85,22 +87,6 @@ namespace Envoy { ADD_FAILURE() << "Unexpected exception: " << std::string(e.what()); \ } -/* - Macro to use instead of EXPECT_DEATH when stderr is produced by a logger. - It temporarily installs stderr sink and restores the original logger sink after the test - completes and stderr_sink object goes of of scope. - EXPECT_DEATH(statement, regex) test passes when statement causes crash and produces error message - matching regex. Test fails when statement does not crash or it crashes but message does not - match regex. If a message produced during crash is redirected away from strerr, the test fails. - By installing StderrSinkDelegate, the macro forces EXPECT_DEATH to send any output produced by - statement to stderr. -*/ -#define EXPECT_DEATH_LOG_TO_STDERR(statement, message) \ - do { \ - Envoy::Logger::StderrSinkDelegate stderr_sink(Envoy::Logger::Registry::getSink()); \ - EXPECT_DEATH(statement, message); \ - } while (false) - #define VERIFY_ASSERTION(statement) \ do { \ ::testing::AssertionResult status = statement; \ @@ -156,6 +142,18 @@ class TestUtility { */ static bool buffersEqual(const Buffer::Instance& lhs, const Buffer::Instance& rhs); + /** + * Compare 2 RawSlice pointers. + * @param lhs supplies raw slice 1. + * @param rhs supplies raw slice 2. + * @param num_slices The number of slices to compare. It is assumed lhs and rhs have the same + * number. + * @return true if for num_slices, all lhs raw slices are equal to the corresponding rhs raw slice + * in length and a byte by byte data comparison. false otherwise + */ + static bool rawSlicesEqual(const Buffer::RawSlice* lhs, const Buffer::RawSlice* rhs, + size_t num_slices); + /** * Feed a buffer with random characters. * @param buffer supplies the buffer to be fed. @@ -197,14 +195,19 @@ class TestUtility { static Stats::GaugeSharedPtr findGauge(Stats::Store& store, const std::string& name); /** - * Wait till Counter value is equal to the passed ion value. + * Wait for a counter to == a given value. * @param store supplies the stats store. * @param name supplies the name of the counter to wait for. * @param value supplies the value of the counter. * @param time_system the time system to use for waiting. + * @param timeout the maximum time to wait before timing out, or 0 for no timeout. + * @return AssertionSuccess() if the counter was == to the value within the timeout, else + * AssertionFailure(). */ - static void waitForCounterEq(Stats::Store& store, const std::string& name, uint64_t value, - Event::TestTimeSystem& time_system); + static AssertionResult + waitForCounterEq(Stats::Store& store, const std::string& name, uint64_t value, + Event::TestTimeSystem& time_system, + std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()); /** * Wait for a counter to >= a given value. @@ -212,9 +215,14 @@ class TestUtility { * @param name counter name. * @param value target value. * @param time_system the time system to use for waiting. + * @param timeout the maximum time to wait before timing out, or 0 for no timeout. + * @return AssertionSuccess() if the counter was >= to the value within the timeout, else + * AssertionFailure(). */ - static void waitForCounterGe(Stats::Store& store, const std::string& name, uint64_t value, - Event::TestTimeSystem& time_system); + static AssertionResult + waitForCounterGe(Stats::Store& store, const std::string& name, uint64_t value, + Event::TestTimeSystem& time_system, + std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()); /** * Wait for a gauge to >= a given value. @@ -222,9 +230,14 @@ class TestUtility { * @param name gauge name. * @param value target value. * @param time_system the time system to use for waiting. + * @param timeout the maximum time to wait before timing out, or 0 for no timeout. + * @return AssertionSuccess() if the counter gauge >= to the value within the timeout, else + * AssertionFailure(). */ - static void waitForGaugeGe(Stats::Store& store, const std::string& name, uint64_t value, - Event::TestTimeSystem& time_system); + static AssertionResult + waitForGaugeGe(Stats::Store& store, const std::string& name, uint64_t value, + Event::TestTimeSystem& time_system, + std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()); /** * Wait for a gauge to == a given value. @@ -232,9 +245,14 @@ class TestUtility { * @param name gauge name. * @param value target value. * @param time_system the time system to use for waiting. + * @param timeout the maximum time to wait before timing out, or 0 for no timeout. + * @return AssertionSuccess() if the gauge was == to the value within the timeout, else + * AssertionFailure(). */ - static void waitForGaugeEq(Stats::Store& store, const std::string& name, uint64_t value, - Event::TestTimeSystem& time_system); + static AssertionResult + waitForGaugeEq(Stats::Store& store, const std::string& name, uint64_t value, + Event::TestTimeSystem& time_system, + std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()); /** * Find a readout in a stats store. @@ -299,6 +317,20 @@ class TestUtility { return differencer.Compare(lhs, rhs); } + /** + * Compare two decoded resources for equality. + * + * @param lhs decoded resource on LHS. + * @param rhs decoded resource on RHS. + * @return bool indicating whether the decoded resources are equal. + */ + static bool decodedResourceEq(const Config::DecodedResource& lhs, + const Config::DecodedResource& rhs) { + return lhs.name() == rhs.name() && lhs.aliases() == rhs.aliases() && + lhs.version() == rhs.version() && lhs.hasResource() == rhs.hasResource() && + (!lhs.hasResource() || protoEqual(lhs.resource(), rhs.resource())); + } + /** * Compare two JSON strings serialized from ProtobufWkt::Struct for equality. When two identical * ProtobufWkt::Struct are serialized into JSON strings, the results have the same set of @@ -522,8 +554,9 @@ class TestUtility { // Strict variants of Protobuf::MessageUtil static void loadFromJson(const std::string& json, Protobuf::Message& message, - bool preserve_original_type = false) { - MessageUtil::loadFromJson(json, message, ProtobufMessage::getStrictValidationVisitor()); + bool preserve_original_type = false, bool avoid_boosting = false) { + MessageUtil::loadFromJson(json, message, ProtobufMessage::getStrictValidationVisitor(), + !avoid_boosting); if (!preserve_original_type) { Config::VersionConverter::eraseOriginalTypeInformation(message); } @@ -534,8 +567,9 @@ class TestUtility { } static void loadFromYaml(const std::string& yaml, Protobuf::Message& message, - bool preserve_original_type = false) { - MessageUtil::loadFromYaml(yaml, message, ProtobufMessage::getStrictValidationVisitor()); + bool preserve_original_type = false, bool avoid_boosting = false) { + MessageUtil::loadFromYaml(yaml, message, ProtobufMessage::getStrictValidationVisitor(), + !avoid_boosting); if (!preserve_original_type) { Config::VersionConverter::eraseOriginalTypeInformation(message); } @@ -555,10 +589,14 @@ class TestUtility { } template - static void loadFromYamlAndValidate(const std::string& yaml, MessageType& message) { - MessageUtil::loadFromYamlAndValidate(yaml, message, - ProtobufMessage::getStrictValidationVisitor()); - Config::VersionConverter::eraseOriginalTypeInformation(message); + static void loadFromYamlAndValidate(const std::string& yaml, MessageType& message, + bool preserve_original_type = false, + bool avoid_boosting = false) { + MessageUtil::loadFromYamlAndValidate( + yaml, message, ProtobufMessage::getStrictValidationVisitor(), avoid_boosting); + if (!preserve_original_type) { + Config::VersionConverter::eraseOriginalTypeInformation(message); + } } template static void validate(const MessageType& message) { @@ -584,6 +622,139 @@ class TestUtility { MessageUtil::loadFromJson(json, message); return message; } + + /** + * Extract the Protobuf binary format of a google.protobuf.Message as a string. + * @param message message of type type.googleapis.com/google.protobuf.Message. + * @return std::string of the Protobuf binary object. + */ + static std::string getProtobufBinaryStringFromMessage(const Protobuf::Message& message) { + std::string pb_binary_str; + pb_binary_str.reserve(message.ByteSizeLong()); + message.SerializeToString(&pb_binary_str); + return pb_binary_str; + } + + template + static Config::DecodedResourcesWrapper + decodeResources(std::initializer_list resources, + const std::string& name_field = "name") { + Config::DecodedResourcesWrapper decoded_resources; + for (const auto& resource : resources) { + auto owned_resource = std::make_unique(resource); + decoded_resources.owned_resources_.emplace_back(new Config::DecodedResourceImpl( + std::move(owned_resource), MessageUtil::getStringField(resource, name_field), {}, "")); + decoded_resources.refvec_.emplace_back(*decoded_resources.owned_resources_.back()); + } + return decoded_resources; + } + + template + static Config::DecodedResourcesWrapper + decodeResources(const Protobuf::RepeatedPtrField& resources, + const std::string& version, const std::string& name_field = "name") { + TestOpaqueResourceDecoderImpl resource_decoder(name_field); + return Config::DecodedResourcesWrapper(resource_decoder, resources, version); + } + + template + static Config::DecodedResourcesWrapper + decodeResources(const envoy::service::discovery::v3::DiscoveryResponse& resources, + const std::string& name_field = "name") { + return decodeResources(resources.resources(), resources.version_info(), + name_field); + } + + template + static Config::DecodedResourcesWrapper decodeResources( + const Protobuf::RepeatedPtrField& resources, + const std::string& name_field = "name") { + Config::DecodedResourcesWrapper decoded_resources; + TestOpaqueResourceDecoderImpl resource_decoder(name_field); + for (const auto& resource : resources) { + decoded_resources.owned_resources_.emplace_back( + new Config::DecodedResourceImpl(resource_decoder, resource)); + decoded_resources.refvec_.emplace_back(*decoded_resources.owned_resources_.back()); + } + return decoded_resources; + } + + template + class TestOpaqueResourceDecoderImpl : public Config::OpaqueResourceDecoderImpl { + public: + TestOpaqueResourceDecoderImpl(absl::string_view name_field) + : Config::OpaqueResourceDecoderImpl(ProtobufMessage::getStrictValidationVisitor(), + name_field) {} + }; + + /** + * Returns the string representation of a envoy::config::core::v3::ApiVersion. + * + * @param api_version to be converted. + * @return std::string representation of envoy::config::core::v3::ApiVersion. + */ + static std::string + getVersionStringFromApiVersion(envoy::config::core::v3::ApiVersion api_version) { + switch (api_version) { + case envoy::config::core::v3::ApiVersion::AUTO: + return "AUTO"; + case envoy::config::core::v3::ApiVersion::V2: + return "V2"; + case envoy::config::core::v3::ApiVersion::V3: + return "V3"; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + } + + /** + * Returns the fully-qualified name of a service, rendered from service_full_name_template. + * + * @param service_full_name_template the service fully-qualified name template. + * @param api_version version of a service. + * @param use_alpha if the alpha version is preferred. + * @param service_namespace to override the service namespace. + * @return std::string full path of a service method. + */ + static std::string + getVersionedServiceFullName(const std::string& service_full_name_template, + envoy::config::core::v3::ApiVersion api_version, + bool use_alpha = false, + const std::string& service_namespace = EMPTY_STRING) { + switch (api_version) { + case envoy::config::core::v3::ApiVersion::AUTO: + FALLTHRU; + case envoy::config::core::v3::ApiVersion::V2: + return fmt::format(service_full_name_template, use_alpha ? "v2alpha" : "v2", + service_namespace); + + case envoy::config::core::v3::ApiVersion::V3: + return fmt::format(service_full_name_template, "v3", service_namespace); + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + } + + /** + * Returns the full path of a service method. + * + * @param service_full_name_template the service fully-qualified name template. + * @param method_name the method name. + * @param api_version version of a service method. + * @param use_alpha if the alpha version is preferred. + * @param service_namespace to override the service namespace. + * @return std::string full path of a service method. + */ + static std::string getVersionedMethodPath(const std::string& service_full_name_template, + absl::string_view method_name, + envoy::config::core::v3::ApiVersion api_version, + bool use_alpha = false, + const std::string& service_namespace = EMPTY_STRING) { + return absl::StrCat("/", + getVersionedServiceFullName(service_full_name_template, api_version, + use_alpha, service_namespace), + "/", method_name); + } }; /** @@ -633,28 +804,29 @@ namespace Http { */ #define DEFINE_TEST_INLINE_HEADER_FUNCS(name) \ public: \ - const HeaderEntry* name() const override { return header_map_.name(); } \ + const HeaderEntry* name() const override { return header_map_->name(); } \ void append##name(absl::string_view data, absl::string_view delimiter) override { \ - header_map_.append##name(data, delimiter); \ - header_map_.verifyByteSizeInternalForTest(); \ + header_map_->append##name(data, delimiter); \ + header_map_->verifyByteSizeInternalForTest(); \ } \ void setReference##name(absl::string_view value) override { \ - header_map_.setReference##name(value); \ - header_map_.verifyByteSizeInternalForTest(); \ + header_map_->setReference##name(value); \ + header_map_->verifyByteSizeInternalForTest(); \ } \ void set##name(absl::string_view value) override { \ - header_map_.set##name(value); \ - header_map_.verifyByteSizeInternalForTest(); \ + header_map_->set##name(value); \ + header_map_->verifyByteSizeInternalForTest(); \ } \ void set##name(uint64_t value) override { \ - header_map_.set##name(value); \ - header_map_.verifyByteSizeInternalForTest(); \ + header_map_->set##name(value); \ + header_map_->verifyByteSizeInternalForTest(); \ } \ size_t remove##name() override { \ - size_t headers_removed = header_map_.remove##name(); \ - header_map_.verifyByteSizeInternalForTest(); \ + const size_t headers_removed = header_map_->remove##name(); \ + header_map_->verifyByteSizeInternalForTest(); \ return headers_removed; \ - } + } \ + absl::string_view get##name##Value() const override { return header_map_->get##name##Value(); } /** * Base class for all test header map types. This class wraps an underlying real header map @@ -668,23 +840,23 @@ template class TestHeaderMapImplBase : public Inte TestHeaderMapImplBase() = default; TestHeaderMapImplBase(const std::initializer_list>& values) { for (auto& value : values) { - header_map_.addCopy(LowerCaseString(value.first), value.second); + header_map_->addCopy(LowerCaseString(value.first), value.second); } - header_map_.verifyByteSizeInternalForTest(); + header_map_->verifyByteSizeInternalForTest(); } TestHeaderMapImplBase(const TestHeaderMapImplBase& rhs) - : TestHeaderMapImplBase(rhs.header_map_) {} + : TestHeaderMapImplBase(*rhs.header_map_) {} TestHeaderMapImplBase(const HeaderMap& rhs) { - HeaderMapImpl::copyFrom(header_map_, rhs); - header_map_.verifyByteSizeInternalForTest(); + HeaderMapImpl::copyFrom(*header_map_, rhs); + header_map_->verifyByteSizeInternalForTest(); } TestHeaderMapImplBase& operator=(const TestHeaderMapImplBase& rhs) { if (this == &rhs) { return *this; } clear(); - HeaderMapImpl::copyFrom(header_map_, rhs); - header_map_.verifyByteSizeInternalForTest(); + HeaderMapImpl::copyFrom(*header_map_, rhs); + header_map_->verifyByteSizeInternalForTest(); return *this; } @@ -706,86 +878,112 @@ template class TestHeaderMapImplBase : public Inte size_t remove(const std::string& key) { return remove(LowerCaseString(key)); } // HeaderMap - bool operator==(const HeaderMap& rhs) const override { return header_map_.operator==(rhs); } - bool operator!=(const HeaderMap& rhs) const override { return header_map_.operator!=(rhs); } + bool operator==(const HeaderMap& rhs) const override { return header_map_->operator==(rhs); } + bool operator!=(const HeaderMap& rhs) const override { return header_map_->operator!=(rhs); } void addViaMove(HeaderString&& key, HeaderString&& value) override { - header_map_.addViaMove(std::move(key), std::move(value)); - header_map_.verifyByteSizeInternalForTest(); + header_map_->addViaMove(std::move(key), std::move(value)); + header_map_->verifyByteSizeInternalForTest(); } void addReference(const LowerCaseString& key, absl::string_view value) override { - header_map_.addReference(key, value); - header_map_.verifyByteSizeInternalForTest(); + header_map_->addReference(key, value); + header_map_->verifyByteSizeInternalForTest(); } void addReferenceKey(const LowerCaseString& key, uint64_t value) override { - header_map_.addReferenceKey(key, value); - header_map_.verifyByteSizeInternalForTest(); + header_map_->addReferenceKey(key, value); + header_map_->verifyByteSizeInternalForTest(); } void addReferenceKey(const LowerCaseString& key, absl::string_view value) override { - header_map_.addReferenceKey(key, value); - header_map_.verifyByteSizeInternalForTest(); + header_map_->addReferenceKey(key, value); + header_map_->verifyByteSizeInternalForTest(); } void addCopy(const LowerCaseString& key, uint64_t value) override { - header_map_.addCopy(key, value); - header_map_.verifyByteSizeInternalForTest(); + header_map_->addCopy(key, value); + header_map_->verifyByteSizeInternalForTest(); } void addCopy(const LowerCaseString& key, absl::string_view value) override { - header_map_.addCopy(key, value); - header_map_.verifyByteSizeInternalForTest(); + header_map_->addCopy(key, value); + header_map_->verifyByteSizeInternalForTest(); } void appendCopy(const LowerCaseString& key, absl::string_view value) override { - header_map_.appendCopy(key, value); - header_map_.verifyByteSizeInternalForTest(); + header_map_->appendCopy(key, value); + header_map_->verifyByteSizeInternalForTest(); } void setReference(const LowerCaseString& key, absl::string_view value) override { - header_map_.setReference(key, value); - header_map_.verifyByteSizeInternalForTest(); + header_map_->setReference(key, value); + header_map_->verifyByteSizeInternalForTest(); } void setReferenceKey(const LowerCaseString& key, absl::string_view value) override { - header_map_.setReferenceKey(key, value); + header_map_->setReferenceKey(key, value); } void setCopy(const LowerCaseString& key, absl::string_view value) override { - header_map_.setCopy(key, value); - header_map_.verifyByteSizeInternalForTest(); + header_map_->setCopy(key, value); + header_map_->verifyByteSizeInternalForTest(); } - uint64_t byteSize() const override { return header_map_.byteSize(); } - const HeaderEntry* get(const LowerCaseString& key) const override { return header_map_.get(key); } - void iterate(HeaderMap::ConstIterateCb cb, void* context) const override { - header_map_.iterate(cb, context); + uint64_t byteSize() const override { return header_map_->byteSize(); } + const HeaderEntry* get(const LowerCaseString& key) const override { + return header_map_->get(key); } - void iterateReverse(HeaderMap::ConstIterateCb cb, void* context) const override { - header_map_.iterateReverse(cb, context); - } - HeaderMap::Lookup lookup(const LowerCaseString& key, const HeaderEntry** entry) const override { - return header_map_.lookup(key, entry); + void iterate(HeaderMap::ConstIterateCb cb) const override { header_map_->iterate(cb); } + void iterateReverse(HeaderMap::ConstIterateCb cb) const override { + header_map_->iterateReverse(cb); } void clear() override { - header_map_.clear(); - header_map_.verifyByteSizeInternalForTest(); + header_map_->clear(); + header_map_->verifyByteSizeInternalForTest(); } size_t remove(const LowerCaseString& key) override { - size_t headers_removed = header_map_.remove(key); - header_map_.verifyByteSizeInternalForTest(); + size_t headers_removed = header_map_->remove(key); + header_map_->verifyByteSizeInternalForTest(); + return headers_removed; + } + size_t removeIf(const HeaderMap::HeaderMatchPredicate& predicate) override { + size_t headers_removed = header_map_->removeIf(predicate); + header_map_->verifyByteSizeInternalForTest(); return headers_removed; } size_t removePrefix(const LowerCaseString& key) override { - size_t headers_removed = header_map_.removePrefix(key); - header_map_.verifyByteSizeInternalForTest(); + size_t headers_removed = header_map_->removePrefix(key); + header_map_->verifyByteSizeInternalForTest(); return headers_removed; } - size_t size() const override { return header_map_.size(); } - bool empty() const override { return header_map_.empty(); } + size_t size() const override { return header_map_->size(); } + bool empty() const override { return header_map_->empty(); } void dumpState(std::ostream& os, int indent_level = 0) const override { - header_map_.dumpState(os, indent_level); + header_map_->dumpState(os, indent_level); + } + + using Handle = typename CustomInlineHeaderRegistry::Handle; + const HeaderEntry* getInline(Handle handle) const override { + return header_map_->getInline(handle); + } + void appendInline(Handle handle, absl::string_view data, absl::string_view delimiter) override { + header_map_->appendInline(handle, data, delimiter); + header_map_->verifyByteSizeInternalForTest(); + } + void setReferenceInline(Handle handle, absl::string_view value) override { + header_map_->setReferenceInline(handle, value); + header_map_->verifyByteSizeInternalForTest(); + } + void setInline(Handle handle, absl::string_view value) override { + header_map_->setInline(handle, value); + header_map_->verifyByteSizeInternalForTest(); + } + void setInline(Handle handle, uint64_t value) override { + header_map_->setInline(handle, value); + header_map_->verifyByteSizeInternalForTest(); + } + size_t removeInline(Handle handle) override { + const size_t rc = header_map_->removeInline(handle); + header_map_->verifyByteSizeInternalForTest(); + return rc; } - Impl header_map_; + std::unique_ptr header_map_{Impl::create()}; }; /** * Typed test implementations for all of the concrete header types. */ -using TestHeaderMapImpl = TestHeaderMapImplBase; - class TestRequestHeaderMapImpl : public TestHeaderMapImplBase { public: @@ -907,6 +1105,35 @@ MATCHER_P(RepeatedProtoEq, expected, "") { return equal; } +MATCHER_P(DecodedResourcesEq, expected, "") { + const bool equal = std::equal(arg.begin(), arg.end(), expected.begin(), expected.end(), + TestUtility::decodedResourceEq); + if (!equal) { + const auto format_resources = + [](const std::vector& resources) -> std::string { + std::vector resource_strs; + std::transform( + resources.begin(), resources.end(), std::back_inserter(resource_strs), + [](const Config::DecodedResourceRef& resource) -> std::string { + return fmt::format( + "", resource.get().name(), + absl::StrJoin(resource.get().aliases(), ","), resource.get().version(), + resource.get().hasResource() ? resource.get().resource().DebugString() : "(none)"); + }); + return absl::StrJoin(resource_strs, ", "); + }; + *result_listener << "\n" + << TestUtility::addLeftAndRightPadding("Expected resources:") << "\n" + << format_resources(expected) << "\n" + << TestUtility::addLeftAndRightPadding("are not equal to actual resources:") + << "\n" + << format_resources(arg) << "\n" + << TestUtility::addLeftAndRightPadding("") // line full of padding + << "\n"; + } + return equal; +} + MATCHER_P(Percent, rhs, "") { envoy::type::v3::FractionalPercent expected; expected.set_numerator(rhs); diff --git a/test/test_common/utility_test.cc b/test/test_common/utility_test.cc index a229892e03980..648d65cda3655 100644 --- a/test/test_common/utility_test.cc +++ b/test/test_common/utility_test.cc @@ -7,23 +7,24 @@ namespace Envoy { TEST(HeaderMapEqualIgnoreOrder, ActuallyEqual) { - Http::TestHeaderMapImpl lhs{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; - Http::TestHeaderMapImpl rhs{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + Http::TestRequestHeaderMapImpl lhs{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + Http::TestRequestHeaderMapImpl rhs{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; EXPECT_TRUE(TestUtility::headerMapEqualIgnoreOrder(lhs, rhs)); EXPECT_EQ(lhs, rhs); } TEST(HeaderMapEqualIgnoreOrder, IgnoreOrder) { - Http::TestHeaderMapImpl lhs{{":method", "GET"}, {":authority", "host"}, {":path", "/"}}; - Http::TestHeaderMapImpl rhs{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + Http::TestRequestHeaderMapImpl lhs{{":method", "GET"}, {":authority", "host"}, {":path", "/"}}; + Http::TestRequestHeaderMapImpl rhs{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; EXPECT_TRUE(TestUtility::headerMapEqualIgnoreOrder(lhs, rhs)); EXPECT_THAT(&lhs, HeaderMapEqualIgnoreOrder(&rhs)); EXPECT_FALSE(lhs == rhs); } TEST(HeaderMapEqualIgnoreOrder, NotEqual) { - Http::TestHeaderMapImpl lhs{{":method", "GET"}, {":authority", "host"}, {":authority", "host"}}; - Http::TestHeaderMapImpl rhs{{":method", "GET"}, {":authority", "host"}}; + Http::TestRequestHeaderMapImpl lhs{ + {":method", "GET"}, {":authority", "host"}, {":authority", "host"}}; + Http::TestRequestHeaderMapImpl rhs{{":method", "GET"}, {":authority", "host"}}; EXPECT_FALSE(TestUtility::headerMapEqualIgnoreOrder(lhs, rhs)); } diff --git a/test/test_runner.cc b/test/test_runner.cc index 5eedad5ae23f6..c90555899f1ac 100644 --- a/test/test_runner.cc +++ b/test/test_runner.cc @@ -44,12 +44,15 @@ std::string findAndRemove(const std::regex& pattern, int& argc, char**& argv) { // This class is created iff a test is run with the special runtime override flag. class RuntimeManagingListener : public ::testing::EmptyTestEventListener { public: - RuntimeManagingListener(std::string& runtime_override) : runtime_override_(runtime_override) {} + RuntimeManagingListener(std::string& runtime_override, bool disable = false) + : runtime_override_(runtime_override), disable_(disable) {} // On each test start, edit RuntimeFeaturesDefaults with our custom runtime defaults. void OnTestStart(const ::testing::TestInfo&) override { if (!runtime_override_.empty()) { - if (!Runtime::RuntimeFeaturesPeer::addFeature(runtime_override_)) { + bool reset = disable_ ? Runtime::RuntimeFeaturesPeer::disableFeature(runtime_override_) + : Runtime::RuntimeFeaturesPeer::enableFeature(runtime_override_); + if (!reset) { // If the entry was already in the hash map, don't remove it OnTestEnd. runtime_override_.clear(); } @@ -59,10 +62,14 @@ class RuntimeManagingListener : public ::testing::EmptyTestEventListener { // As each test ends, clean up the RuntimeFeaturesDefaults state. void OnTestEnd(const ::testing::TestInfo&) override { if (!runtime_override_.empty()) { - Runtime::RuntimeFeaturesPeer::removeFeature(runtime_override_); + disable_ ? Runtime::RuntimeFeaturesPeer::enableFeature(runtime_override_) + : Runtime::RuntimeFeaturesPeer::disableFeature(runtime_override_); } } std::string runtime_override_; + // This marks whether the runtime feature was enabled by default and needs to be overridden to + // false. + bool disable_; }; } // namespace @@ -94,15 +101,28 @@ int TestRunner::RunTests(int argc, char** argv) { // Before letting TestEnvironment latch argv and argc, remove any runtime override flag. // This allows doing test overrides of Envoy runtime features without adding // test flags to the Envoy production command line. - const std::regex PATTERN{"--runtime-feature-override-for-tests=(.*)", std::regex::optimize}; - std::string runtime_override = findAndRemove(PATTERN, argc, argv); - if (!runtime_override.empty()) { + const std::regex ENABLE_PATTERN{"--runtime-feature-override-for-tests=(.*)", + std::regex::optimize}; + std::string runtime_override_enable = findAndRemove(ENABLE_PATTERN, argc, argv); + if (!runtime_override_enable.empty()) { ENVOY_LOG_TO_LOGGER(Logger::Registry::getLog(Logger::Id::testing), info, - "Running with runtime feature override {}", runtime_override); + "Running with runtime feature override enable {}", runtime_override_enable); // Set up a listener which will create a global runtime and set the feature // to true for the duration of each test instance. ::testing::TestEventListeners& listeners = ::testing::UnitTest::GetInstance()->listeners(); - listeners.Append(new RuntimeManagingListener(runtime_override)); + listeners.Append(new RuntimeManagingListener(runtime_override_enable)); + } + const std::regex DISABLE_PATTERN{"--runtime-feature-disable-for-tests=(.*)", + std::regex::optimize}; + std::string runtime_override_disable = findAndRemove(DISABLE_PATTERN, argc, argv); + if (!runtime_override_disable.empty()) { + ENVOY_LOG_TO_LOGGER(Logger::Registry::getLog(Logger::Id::testing), info, + "Running with runtime feature override disable {}", + runtime_override_disable); + // Set up a listener which will create a global runtime and set the feature + // to false for the duration of each test instance. + ::testing::TestEventListeners& listeners = ::testing::UnitTest::GetInstance()->listeners(); + listeners.Append(new RuntimeManagingListener(runtime_override_disable, true)); } #ifdef ENVOY_CONFIG_COVERAGE diff --git a/test/tools/config_load_check/BUILD b/test/tools/config_load_check/BUILD index da23f11daedfe..68e744520fd68 100644 --- a/test/tools/config_load_check/BUILD +++ b/test/tools/config_load_check/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_binary", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_library( diff --git a/test/tools/router_check/BUILD b/test/tools/router_check/BUILD index 874fff81929ec..992fb394d424a 100644 --- a/test/tools/router_check/BUILD +++ b/test/tools/router_check/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_binary", @@ -8,6 +6,8 @@ load( "envoy_proto_library", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_binary( @@ -35,7 +35,7 @@ envoy_cc_test_library( "//source/common/router:config_lib", "//source/common/stats:stats_lib", "//source/exe:platform_impl_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:printers_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", diff --git a/test/tools/router_check/coverage.h b/test/tools/router_check/coverage.h index 13019eec1af5d..051ea3a50f7c0 100644 --- a/test/tools/router_check/coverage.h +++ b/test/tools/router_check/coverage.h @@ -3,8 +3,6 @@ #include "envoy/config/route/v3/route.pb.h" #include "envoy/router/router.h" -#include "test/mocks/server/mocks.h" - namespace Envoy { class RouteCoverage : Logger::Loggable { public: diff --git a/test/tools/router_check/router.cc b/test/tools/router_check/router.cc index 74520ac641b88..e79671f255d2d 100644 --- a/test/tools/router_check/router.cc +++ b/test/tools/router_check/router.cc @@ -3,16 +3,15 @@ #include #include #include -#include #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/route/v3/route.pb.h" #include "envoy/type/v3/percent.pb.h" +#include "common/common/random_generator.h" #include "common/network/utility.h" #include "common/protobuf/message_validator_impl.h" #include "common/protobuf/utility.h" -#include "common/runtime/runtime_impl.h" #include "common/stream_info/stream_info_impl.h" #include "test/test_common/printers.h" @@ -84,7 +83,7 @@ RouterCheckTool RouterCheckTool::create(const std::string& router_config_file, void RouterCheckTool::assignUniqueRouteNames( envoy::config::route::v3::RouteConfiguration& route_config) { - Runtime::RandomGeneratorImpl random; + Random::RandomGeneratorImpl random; for (auto& host : *route_config.mutable_virtual_hosts()) { for (auto& route : *host.mutable_routes()) { route.set_name(random.uuid()); diff --git a/test/tools/router_check/router.h b/test/tools/router_check/router.h index 19d2f86d746e7..04da7a40af7df 100644 --- a/test/tools/router_check/router.h +++ b/test/tools/router_check/router.h @@ -14,7 +14,7 @@ #include "common/router/config_impl.h" #include "common/stats/fake_symbol_table_impl.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/test_common/global.h" #include "test/test_common/printers.h" #include "test/test_common/utility.h" diff --git a/test/tools/router_check/test/BUILD b/test/tools/router_check/test/BUILD index 4e8e7f8885b85..3e8d50e06b8fc 100644 --- a/test/tools/router_check/test/BUILD +++ b/test/tools/router_check/test/BUILD @@ -1,22 +1,20 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", "envoy_sh_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_sh_test( name = "router_tool_test", srcs = ["route_tests.sh"], + cc_binary = ["//test/tools/router_check:router_check_tool"], data = [ ":configs", - "//test/tools/router_check:router_check_tool", ], - # TODO: This script invocation does not work on Windows, see: https://github.com/bazelbuild/bazel/issues/10959 - tags = ["fails_on_windows"], ) filegroup( diff --git a/test/tools/schema_validator/BUILD b/test/tools/schema_validator/BUILD index 5bfa78514ffc2..c0d198f6f5d59 100644 --- a/test/tools/schema_validator/BUILD +++ b/test/tools/schema_validator/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_binary", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_binary( diff --git a/test/tools/type_whisperer/BUILD b/test/tools/type_whisperer/BUILD index 5d27871280581..9e23abf89d397 100644 --- a/test/tools/type_whisperer/BUILD +++ b/test/tools/type_whisperer/BUILD @@ -1,7 +1,7 @@ -licenses(["notice"]) # Apache 2 - load("//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package") +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/tools/wee8_compile/BUILD b/test/tools/wee8_compile/BUILD index 9c363c7d9275b..d1184b0717508 100644 --- a/test/tools/wee8_compile/BUILD +++ b/test/tools/wee8_compile/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_binary", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_binary( diff --git a/tools/BUILD b/tools/BUILD index fbd9abfc774bd..a3313f01becce 100644 --- a/tools/BUILD +++ b/tools/BUILD @@ -1,5 +1,4 @@ -licenses(["notice"]) # Apache 2 - +load("@rules_python//python:defs.bzl", "py_library") load( "//bazel:envoy_build_system.bzl", "envoy_cc_binary", @@ -8,6 +7,8 @@ load( "envoy_py_test_binary", ) +licenses(["notice"]) # Apache 2 + envoy_package() exports_files([ diff --git a/tools/api/generate_go_protobuf.py b/tools/api/generate_go_protobuf.py index ba6ca16ead1e5..620e80bb32c58 100755 --- a/tools/api/generate_go_protobuf.py +++ b/tools/api/generate_go_protobuf.py @@ -40,8 +40,8 @@ def generateProtobufs(output): # Example output directory: # go_out/envoy/config/bootstrap/v2 rule_dir, proto = rule.decode()[len('@envoy_api//'):].rsplit(':', 1) - input_dir = os.path.join(bazel_bin, 'external', 'envoy_api', rule_dir, 'linux_amd64_stripped', - proto + '%', IMPORT_BASE, rule_dir) + input_dir = os.path.join(bazel_bin, 'external', 'envoy_api', rule_dir, proto + '_', IMPORT_BASE, + rule_dir) input_files = glob.glob(os.path.join(input_dir, '*.go')) output_dir = os.path.join(output, rule_dir) @@ -80,7 +80,14 @@ def findLastSyncSHA(repo): def updatedSinceSHA(repo, last_sha): # Determine if there are changes to API since last SHA - return git(None, 'rev-list', '%s..HEAD' % last_sha, 'api/envoy').split() + return git(None, 'rev-list', '%s..HEAD' % last_sha).split() + + +def writeRevisionInfo(repo, sha): + # Put a file in the generated code root containing the latest mirrored SHA + dst = os.path.join(repo, 'envoy', 'COMMIT') + with open(dst, 'w') as fh: + fh.write(sha) def syncGoProtobufs(output, repo): @@ -90,6 +97,7 @@ def syncGoProtobufs(output, repo): git(repo, 'rm', '-r', 'envoy') # Copy subtree at envoy from output to repo shutil.copytree(os.path.join(output, 'envoy'), dst) + git(repo, 'add', 'envoy') def publishGoProtobufs(repo, sha): @@ -101,15 +109,22 @@ def publishGoProtobufs(repo, sha): git(repo, 'push', 'origin', BRANCH) +def updated(repo): + return len( + [f for f in git(repo, 'diff', 'HEAD', '--name-only').splitlines() if f != 'envoy/COMMIT']) > 0 + + if __name__ == "__main__": workspace = check_output(['bazel', 'info', 'workspace']).decode().strip() output = os.path.join(workspace, OUTPUT_BASE) generateProtobufs(output) repo = os.path.join(workspace, REPO_BASE) cloneGoProtobufs(repo) + syncGoProtobufs(output, repo) last_sha = findLastSyncSHA(repo) changes = updatedSinceSHA(repo, last_sha) - if changes: + if updated(repo): print('Changes detected: %s' % changes) - syncGoProtobufs(output, repo) - publishGoProtobufs(repo, changes[0]) + new_sha = changes[0] + writeRevisionInfo(repo, new_sha) + publishGoProtobufs(repo, new_sha) diff --git a/tools/api/validate_structure.py b/tools/api/validate_structure.py index 3b59af2021367..05cfe0cff5025 100755 --- a/tools/api/validate_structure.py +++ b/tools/api/validate_structure.py @@ -17,12 +17,6 @@ 'config/common/tap', ] -# These are trees that allow v3+ protos, but only a strict whitelist. -V3_RESTRICTED_PATHS = { - 'config/accesslog/v3': ['accesslog.proto'], - 'service/discovery/v3': ['ads.proto', 'discovery.proto'], -} - # These are the only legacy trees that we permit not to terminate with a versioned suffix. VERSIONLESS_PATHS = [ 'annotations', @@ -71,13 +65,6 @@ def ValidateProtoPath(proto_path): if str(proto_path).startswith(p): raise ValidationError('v3+ protos are not allowed in %s' % p) - # Validate v3 restricted paths. - for p in V3_RESTRICTED_PATHS: - if str(proto_path).startswith(p): - allowed_files = V3_RESTRICTED_PATHS[p] - if proto_path.name not in allowed_files: - raise ValidationError('Only %s allowed in %s' % (allowed_files, p)) - # Validate a list of proto paths. def ValidateProtoPaths(proto_paths): diff --git a/tools/api_boost/api_boost.py b/tools/api_boost/api_boost.py index e644680e33927..eda6eaf940881 100755 --- a/tools/api_boost/api_boost.py +++ b/tools/api_boost/api_boost.py @@ -44,7 +44,8 @@ def ApiBoostFile(llvm_include_path, debug_log, path): result = sp.run([ './bazel-bin/external/envoy_dev/clang_tools/api_booster/api_booster', '--extra-arg-before=-xc++', - '--extra-arg=-isystem%s' % llvm_include_path, '--extra-arg=-Wno-undefined-internal', path + '--extra-arg=-isystem%s' % llvm_include_path, '--extra-arg=-Wno-undefined-internal', + '--extra-arg=-Wno-old-style-cast', path ], capture_output=True, check=True) @@ -100,8 +101,7 @@ def ApiBoostTree(target_paths, # tool in place before we can start boosting. if generate_compilation_database: print('Building compilation database for %s' % dep_build_targets) - sp.run(['./tools/gen_compilation_database.py', '--run_bazel_build', '--include_headers'] + - dep_build_targets, + sp.run(['./tools/gen_compilation_database.py', '--include_headers'] + dep_build_targets, check=True) if build_api_booster: diff --git a/tools/api_boost/testdata/BUILD b/tools/api_boost/testdata/BUILD index 17f7233955bcd..148b1b5c47873 100644 --- a/tools/api_boost/testdata/BUILD +++ b/tools/api_boost/testdata/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/tools/api_boost/testdata/deprecate.cc.gold b/tools/api_boost/testdata/deprecate.cc.gold index bee1dacfe56e7..0158efa26d9a0 100644 --- a/tools/api_boost/testdata/deprecate.cc.gold +++ b/tools/api_boost/testdata/deprecate.cc.gold @@ -1,11 +1,11 @@ #include "envoy/config/cluster/v4alpha/cluster.pb.h" #include "envoy/config/route/v4alpha/route_components.pb.h" -#include "envoy/type/matcher/v3/string.pb.h" +#include "envoy/type/matcher/v4alpha/string.pb.h" void test() { envoy::config::route::v4alpha::VirtualHost vhost; vhost.hidden_envoy_deprecated_per_filter_config(); vhost.mutable_hidden_envoy_deprecated_per_filter_config(); - static_cast(envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kHiddenEnvoyDeprecatedRegex); + static_cast(envoy::type::matcher::v4alpha::StringMatcher::MatchPatternCase::kHiddenEnvoyDeprecatedRegex); static_cast(envoy::config::cluster::v4alpha::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB); } diff --git a/tools/api_proto_plugin/BUILD b/tools/api_proto_plugin/BUILD index 5c6c535a4a88a..788b6d6e42951 100644 --- a/tools/api_proto_plugin/BUILD +++ b/tools/api_proto_plugin/BUILD @@ -1,9 +1,9 @@ -licenses(["notice"]) # Apache 2 - load("@bazel_skylib//rules:common_settings.bzl", "string_flag") load("@rules_python//python:defs.bzl", "py_library") load("//tools/type_whisperer:type_database.bzl", "type_database") +licenses(["notice"]) # Apache 2 + py_library( name = "api_proto_plugin", srcs = [ diff --git a/tools/api_proto_plugin/plugin.bzl b/tools/api_proto_plugin/plugin.bzl index 95568e47123ad..0b19fb60031f1 100644 --- a/tools/api_proto_plugin/plugin.bzl +++ b/tools/api_proto_plugin/plugin.bzl @@ -36,7 +36,7 @@ def api_proto_plugin_impl(target, ctx, output_group, mnemonic, output_suffixes): # extractions. See https://github.com/bazelbuild/bazel/issues/3971. import_paths = [] for f in target[ProtoInfo].transitive_sources.to_list(): - import_paths += ["{}={}".format(_path_ignoring_repository(f), f.path)] + import_paths.append("{}={}".format(_path_ignoring_repository(f), f.path)) # The outputs live in the ctx.label's package root. We add some additional # path information to match with protoc's notion of path relative locations. @@ -56,9 +56,9 @@ def api_proto_plugin_impl(target, ctx, output_group, mnemonic, output_suffixes): inputs = depset(transitive = [inputs] + [ctx.attr._type_db.files]) if len(ctx.attr._type_db.files.to_list()) != 1: fail("{} must have one type database file".format(ctx.attr._type_db)) - args += ["--api_proto_plugin_opt=type_db_path=" + ctx.attr._type_db.files.to_list()[0].path] + args.append("--api_proto_plugin_opt=type_db_path=" + ctx.attr._type_db.files.to_list()[0].path) if hasattr(ctx.attr, "_extra_args"): - args += ["--api_proto_plugin_opt=extra_args=" + ctx.attr._extra_args[BuildSettingInfo].value] + args.append("--api_proto_plugin_opt=extra_args=" + ctx.attr._extra_args[BuildSettingInfo].value) args += [src.path for src in target[ProtoInfo].direct_sources] env = {} diff --git a/tools/bazel.rc b/tools/bazel.rc deleted file mode 100644 index 77a70a875a02e..0000000000000 --- a/tools/bazel.rc +++ /dev/null @@ -1,4 +0,0 @@ -# This is intended to fail build process when this tools/bazel.rc is processed. -# Bazel will print what is processed and raise an error since --dummy_unknown_option is not recognized. - -common --dummy_unknown_option="ERROR: tools/bazel.rc is being processed, either due to old version of bazel or wrongly symlinked from .bazelrc. Update your bazel and symlink to top level .bazelrc instead." diff --git a/tools/clang_tools/README.md b/tools/clang_tools/README.md index a53ad6038af30..30bbdbddcd756 100644 --- a/tools/clang_tools/README.md +++ b/tools/clang_tools/README.md @@ -14,7 +14,7 @@ framework for writing Clang tools in the style of `clang-format` and To build tools in this tree, a Clang binary install must be available. If you are building Envoy with `clang`, this should already be true of your system. You can find prebuilt binary releases of Clang at https://releases.llvm.org. You -will need the Clang version used by Envoy in CI (currently clang-9.0). +will need the Clang version used by Envoy in CI (currently clang-10.0). To build a tool, set the following environment variable: @@ -36,7 +36,7 @@ generates this and also does setup of the Bazel cache paths to allow external dependencies to be located: ```console -tools/gen_compilation_database.py --run_bazel_build --include_headers +tools/gen_compilation_database.py --include_headers ``` Finally, the tool can be run against source files in the Envoy tree: diff --git a/tools/clang_tools/api_booster/BUILD b/tools/clang_tools/api_booster/BUILD index 296e318c01fe0..d6affe19640b6 100644 --- a/tools/clang_tools/api_booster/BUILD +++ b/tools/clang_tools/api_booster/BUILD @@ -24,6 +24,7 @@ clang_tools_cc_library( srcs = ["proto_cxx_utils.cc"], hdrs = ["proto_cxx_utils.h"], deps = [ + "@com_google_absl//absl/container:node_hash_map", "@com_google_absl//absl/strings", "@com_google_absl//absl/types:optional", ], diff --git a/tools/clang_tools/api_booster/main.cc b/tools/clang_tools/api_booster/main.cc index c1ca773024ce9..976ddc969fcd8 100644 --- a/tools/clang_tools/api_booster/main.cc +++ b/tools/clang_tools/api_booster/main.cc @@ -27,6 +27,7 @@ #include "tools/type_whisperer/api_type_db.h" +#include "absl/container/node_hash_map.h" #include "absl/strings/str_cat.h" // Enable to see debug log messages. @@ -243,7 +244,7 @@ class ApiBooster : public clang::ast_matchers::MatchFinder::MatchCallback, const clang::SourceManager& source_manager) { auto* direct_callee = call_expr.getDirectCallee(); if (direct_callee != nullptr) { - const std::unordered_map ValidateNameToArg = { + const absl::node_hash_map ValidateNameToArg = { {"loadFromYamlAndValidate", 1}, {"loadFromFileAndValidate", 1}, {"downcastAndValidate", -1}, @@ -489,7 +490,7 @@ class ApiBooster : public clang::ast_matchers::MatchFinder::MatchCallback, !absl::StartsWith(proto_type_name, "envoy.test") && !absl::StartsWith(proto_type_name, "envoy.tracers.xray.daemon")) { // Die hard if we don't have a useful proto type for something that looks - // like an API type(modulo a short whitelist). + // like an API type(modulo a short allowlist). std::cerr << "Unknown API type: " << proto_type_name << std::endl; // TODO(htuch): maybe there is a nicer way to terminate AST traversal? ::exit(1); diff --git a/tools/clang_tools/api_booster/proto_cxx_utils.cc b/tools/clang_tools/api_booster/proto_cxx_utils.cc index 42cc92e7c4c41..194bdc0e6bf7b 100644 --- a/tools/clang_tools/api_booster/proto_cxx_utils.cc +++ b/tools/clang_tools/api_booster/proto_cxx_utils.cc @@ -40,10 +40,10 @@ std::string ProtoCxxUtils::protoToCxxType(const std::string& proto_type_name, bo absl::optional ProtoCxxUtils::renameMethod(absl::string_view method_name, - const std::unordered_map renames) { + const absl::node_hash_map renames) { // Simple O(N * M) match, where M is constant (the set of prefixes/suffixes) so // should be fine. - for (const auto field_rename : renames) { + for (const auto& field_rename : renames) { const std::vector GeneratedMethodPrefixes = { "clear_", "set_", "has_", "mutable_", "set_allocated_", "release_", "add_", "", }; @@ -63,7 +63,7 @@ ProtoCxxUtils::renameMethod(absl::string_view method_name, absl::optional ProtoCxxUtils::renameConstant(absl::string_view constant_name, - const std::unordered_map renames) { + const absl::node_hash_map renames) { if (constant_name.size() < 2 || constant_name[0] != 'k' || !isupper(constant_name[1])) { return {}; } @@ -91,7 +91,7 @@ ProtoCxxUtils::renameConstant(absl::string_view constant_name, absl::optional ProtoCxxUtils::renameEnumValue(absl::string_view enum_value_name, - const std::unordered_map renames) { + const absl::node_hash_map renames) { const auto it = renames.find(std::string(enum_value_name)); if (it == renames.cend()) { return {}; diff --git a/tools/clang_tools/api_booster/proto_cxx_utils.h b/tools/clang_tools/api_booster/proto_cxx_utils.h index 22b816455bc98..10eff61a79104 100644 --- a/tools/clang_tools/api_booster/proto_cxx_utils.h +++ b/tools/clang_tools/api_booster/proto_cxx_utils.h @@ -1,8 +1,8 @@ #pragma once #include -#include +#include "absl/container/node_hash_map.h" #include "absl/strings/str_join.h" #include "absl/strings/str_split.h" #include "absl/types/optional.h" @@ -25,18 +25,18 @@ class ProtoCxxUtils { // field in proto, and if so, return the new method name. static absl::optional renameMethod(absl::string_view method_name, - const std::unordered_map renames); + const absl::node_hash_map renames); // Given a constant, e.g. kFooBar, determine if it needs upgrading. We need // this for synthesized oneof cases. static absl::optional renameConstant(absl::string_view constant_name, - const std::unordered_map renames); + const absl::node_hash_map renames); // Given an enum value, e.g. FOO_BAR determine if it needs upgrading. static absl::optional renameEnumValue(absl::string_view enum_value_name, - const std::unordered_map renames); + const absl::node_hash_map renames); // Convert from a protobuf type, e.g. foo.bar.v2, to a C++ type, e.g. // foo::bar::v2. diff --git a/tools/clang_tools/api_booster/proto_cxx_utils_test.cc b/tools/clang_tools/api_booster/proto_cxx_utils_test.cc index 6b4e0789ba108..2a06413bd4d25 100644 --- a/tools/clang_tools/api_booster/proto_cxx_utils_test.cc +++ b/tools/clang_tools/api_booster/proto_cxx_utils_test.cc @@ -1,5 +1,3 @@ -#include - #include "gtest/gtest.h" #include "proto_cxx_utils.h" @@ -32,7 +30,7 @@ TEST(ProtoCxxUtils, ProtoToCxxType) { // Validate proto field accessor upgrades. TEST(ProtoCxxUtils, RenameMethod) { - const std::unordered_map renames = { + const absl::node_hash_map renames = { {"foo", "bar"}, {"bar", "baz"}, }; @@ -52,7 +50,7 @@ TEST(ProtoCxxUtils, RenameMethod) { // Validate proto constant upgrades. TEST(ProtoCxxUtils, RenameConstant) { - const std::unordered_map renames = { + const absl::node_hash_map renames = { {"foo_bar", "bar_foo"}, {"foo_baz", "baz"}, }; @@ -63,7 +61,7 @@ TEST(ProtoCxxUtils, RenameConstant) { // Validate proto enum value upgrades. TEST(ProtoCxxUtils, RenameEnumValue) { - const std::unordered_map renames = { + const absl::node_hash_map renames = { {"FOO_BAR", "BAR_FOO"}, }; EXPECT_EQ(absl::nullopt, ProtoCxxUtils::renameEnumValue("FOO_BAZ", renames)); diff --git a/tools/clang_tools/support/BUILD.prebuilt b/tools/clang_tools/support/BUILD.prebuilt index 277c9ad802f8f..e77dcb0fe2685 100644 --- a/tools/clang_tools/support/BUILD.prebuilt +++ b/tools/clang_tools/support/BUILD.prebuilt @@ -1,8 +1,8 @@ -# Clang 9.0 library pre-built Bazel. +# Clang 10.0 library pre-built Bazel. # # This file was mostly manually assembled (with some hacky Python scripts) from -# clang+llvm-9.0.0-x86_64-linux-gnu-ubuntu-16.04.tar.xz and corresponding -# https://github.com/llvm/llvm-project.git source. It needs Clang 9.0 to work. +# clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz and corresponding +# https://github.com/llvm/llvm-project.git source. It needs Clang 10.0 to work. # # The BUILD file has sufficient dependency relationships # between the prebuilt libraries in a clang-llvm distribution to support building libtooling @@ -152,6 +152,7 @@ cc_library( ":clang_basic", ":clang_lex", ":clang_sema", + ":llvm_frontend_omp", ":llvm_mc", ":llvm_mcparser", ":llvm_support", @@ -306,6 +307,12 @@ cc_library( hdrs = glob(["llvm/Demangle/**"]), ) +cc_library( + name = "llvm_frontend_omp", + srcs = ["lib/libLLVMFrontendOpenMP.a"], + hdrs = glob(["llvm/Frontend/OpenMP/**"]), +) + cc_library( name = "llvm_mc", srcs = ["lib/libLLVMMC.a"], diff --git a/tools/clang_tools/support/clang_tools.bzl b/tools/clang_tools/support/clang_tools.bzl index ece24fc872315..398b80330b663 100644 --- a/tools/clang_tools/support/clang_tools.bzl +++ b/tools/clang_tools/support/clang_tools.bzl @@ -1,24 +1,30 @@ +load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test") + +_clang_tools_copts = [ + "-fno-exceptions", + "-fno-rtti", +] + def clang_tools_cc_binary(name, copts = [], tags = [], deps = [], **kwargs): - native.cc_binary( + cc_binary( name = name, - copts = copts + [ - "-fno-exceptions", - "-fno-rtti", - ], + copts = copts + _clang_tools_copts, tags = tags + ["manual"], deps = deps + ["@envoy//bazel/foreign_cc:zlib"], **kwargs ) -def clang_tools_cc_library(name, **kwargs): - native.cc_library( +def clang_tools_cc_library(name, copts = [], **kwargs): + cc_library( name = name, + copts = copts + _clang_tools_copts, **kwargs ) -def clang_tools_cc_test(name, deps = [], **kwargs): - native.cc_test( +def clang_tools_cc_test(name, copts = [], deps = [], **kwargs): + cc_test( name = name, + copts = copts + _clang_tools_copts, deps = deps + ["@com_google_googletest//:gtest_main"], **kwargs ) diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index c9683f8cce4de..0f1ebfc5b9553 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -25,13 +25,13 @@ PROTO_SUFFIX = (".proto") # Files in these paths can make reference to protobuf stuff directly -GOOGLE_PROTOBUF_WHITELIST = ("ci/prebuilt", "source/common/protobuf", "api/test") +GOOGLE_PROTOBUF_ALLOWLIST = ("ci/prebuilt", "source/common/protobuf", "api/test") REPOSITORIES_BZL = "bazel/repositories.bzl" # Files matching these exact names can reference real-world time. These include the class # definitions for real-world time, the construction of them in main(), and perf annotation. # For now it includes the validation server but that really should be injected too. -REAL_TIME_WHITELIST = ("./source/common/common/utility.h", +REAL_TIME_ALLOWLIST = ("./source/common/common/utility.h", "./source/extensions/common/aws/utility.cc", "./source/common/event/real_time_system.cc", "./source/common/event/real_time_system.h", "./source/exe/main_common.cc", @@ -46,11 +46,11 @@ # Tests in these paths may make use of the Registry::RegisterFactory constructor or the # REGISTER_FACTORY macro. Other locations should use the InjectFactory helper class to # perform temporary registrations. -REGISTER_FACTORY_TEST_WHITELIST = ("./test/common/config/registry_test.cc", +REGISTER_FACTORY_TEST_ALLOWLIST = ("./test/common/config/registry_test.cc", "./test/integration/clusters/", "./test/integration/filters/") # Files in these paths can use MessageLite::SerializeAsString -SERIALIZE_AS_STRING_WHITELIST = ( +SERIALIZE_AS_STRING_ALLOWLIST = ( "./source/common/config/version_converter.cc", "./source/common/protobuf/utility.cc", "./source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc", @@ -58,34 +58,40 @@ "./test/common/config/version_converter_test.cc", "./test/common/grpc/codec_test.cc", "./test/common/grpc/codec_fuzz_test.cc", + "./test/extensions/filters/http/common/fuzz/uber_filter.h", ) # Files in these paths can use Protobuf::util::JsonStringToMessage -JSON_STRING_TO_MESSAGE_WHITELIST = ("./source/common/protobuf/utility.cc") +JSON_STRING_TO_MESSAGE_ALLOWLIST = ("./source/common/protobuf/utility.cc") # Histogram names which are allowed to be suffixed with the unit symbol, all of the pre-existing # ones were grandfathered as part of PR #8484 for backwards compatibility. -HISTOGRAM_WITH_SI_SUFFIX_WHITELIST = ("downstream_cx_length_ms", "downstream_cx_length_ms", +HISTOGRAM_WITH_SI_SUFFIX_ALLOWLIST = ("downstream_cx_length_ms", "downstream_cx_length_ms", "initialization_time_ms", "loop_duration_us", "poll_delay_us", "request_time_ms", "upstream_cx_connect_ms", "upstream_cx_length_ms") # Files in these paths can use std::regex -STD_REGEX_WHITELIST = ( +STD_REGEX_ALLOWLIST = ( "./source/common/common/utility.cc", "./source/common/common/regex.h", "./source/common/common/regex.cc", "./source/common/stats/tag_extractor_impl.h", "./source/common/stats/tag_extractor_impl.cc", - "./source/common/access_log/access_log_formatter.cc", + "./source/common/formatter/substitution_formatter.cc", "./source/extensions/filters/http/squash/squash_filter.h", - "./source/extensions/filters/http/squash/squash_filter.cc", "./source/server/http/utils.h", - "./source/server/http/utils.cc", "./source/server/http/stats_handler.h", - "./source/server/http/stats_handler.cc", "./tools/clang_tools/api_booster/main.cc", - "./tools/clang_tools/api_booster/proto_cxx_utils.cc", "./source/common/common/version.cc") + "./source/extensions/filters/http/squash/squash_filter.cc", "./source/server/admin/utils.h", + "./source/server/admin/utils.cc", "./source/server/admin/stats_handler.h", + "./source/server/admin/stats_handler.cc", "./source/server/admin/prometheus_stats.h", + "./source/server/admin/prometheus_stats.cc", "./tools/clang_tools/api_booster/main.cc", + "./tools/clang_tools/api_booster/proto_cxx_utils.cc", "./source/common/version/version.cc") # Only one C++ file should instantiate grpc_init -GRPC_INIT_WHITELIST = ("./source/common/grpc/google_grpc_context.cc") +GRPC_INIT_ALLOWLIST = ("./source/common/grpc/google_grpc_context.cc") -CLANG_FORMAT_PATH = os.getenv("CLANG_FORMAT", "clang-format-9") +# These files should not throw exceptions. Add HTTP/1 when exceptions removed. +EXCEPTION_DENYLIST = ("./source/common/http/http2/codec_impl.h", + "./source/common/http/http2/codec_impl.cc") + +CLANG_FORMAT_PATH = os.getenv("CLANG_FORMAT", "clang-format-10") BUILDIFIER_PATH = paths.getBuildifier() BUILDOZER_PATH = paths.getBuildozer() ENVOY_BUILD_FIXER_PATH = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), @@ -96,6 +102,13 @@ INCLUDE_ANGLE_LEN = len(INCLUDE_ANGLE) PROTO_PACKAGE_REGEX = re.compile(r"^package (\S+);\n*", re.MULTILINE) X_ENVOY_USED_DIRECTLY_REGEX = re.compile(r'.*\"x-envoy-.*\".*') +DESIGNATED_INITIALIZER_REGEX = re.compile(r"\{\s*\.\w+\s*\=") +MANGLED_PROTOBUF_NAME_REGEX = re.compile(r"envoy::[a-z0-9_:]+::[A-Z][a-z]\w*_\w*_[A-Z]{2}") +HISTOGRAM_SI_SUFFIX_REGEX = re.compile(r"(?<=HISTOGRAM\()[a-zA-Z0-9_]+_(b|kb|mb|ns|us|ms|s)(?=,)") +TEST_NAME_STARTING_LOWER_CASE_REGEX = re.compile(r"TEST(_.\(.*,\s|\()[a-z].*\)\s\{") +EXTENSIONS_CODEOWNERS_REGEX = re.compile(r'.*(extensions[^@]*\s+)(@.*)') +COMMENT_REGEX = re.compile(r"//|\*") +DURATION_VALUE_REGEX = re.compile(r'\b[Dd]uration\(([0-9.]+)') # yapf: disable PROTOBUF_TYPE_ERRORS = { @@ -211,11 +224,7 @@ def readFile(path): # lookPath searches for the given executable in all directories in PATH # environment variable. If it cannot be found, empty string is returned. def lookPath(executable): - for path_dir in os.environ["PATH"].split(os.pathsep): - executable_path = os.path.join(path_dir, executable) - if os.path.exists(executable_path): - return executable_path - return "" + return shutil.which(executable) or '' # pathExists checks whether the given path exists. This function assumes that @@ -243,13 +252,13 @@ def checkTools(): "users".format(CLANG_FORMAT_PATH)) else: error_messages.append( - "Command {} not found. If you have clang-format in version 9.x.x " + "Command {} not found. If you have clang-format in version 10.x.x " "installed, but the binary name is different or it's not available in " "PATH, please use CLANG_FORMAT environment variable to specify the path. " "Examples:\n" - " export CLANG_FORMAT=clang-format-9.0.0\n" - " export CLANG_FORMAT=/opt/bin/clang-format-9\n" - " export CLANG_FORMAT=/usr/local/opt/llvm@9/bin/clang-format".format(CLANG_FORMAT_PATH)) + " export CLANG_FORMAT=clang-format-10.0.0\n" + " export CLANG_FORMAT=/opt/bin/clang-format-10\n" + " export CLANG_FORMAT=/usr/local/opt/llvm@10/bin/clang-format".format(CLANG_FORMAT_PATH)) def checkBazelTool(name, path, var): bazel_tool_abs_path = lookPath(path) @@ -303,54 +312,59 @@ def packageNameForProto(file_path): # To avoid breaking the Lyft import, we just check for path inclusion here. -def whitelistedForProtobufDeps(file_path): +def allowlistedForProtobufDeps(file_path): return (file_path.endswith(PROTO_SUFFIX) or file_path.endswith(REPOSITORIES_BZL) or \ - any(path_segment in file_path for path_segment in GOOGLE_PROTOBUF_WHITELIST)) + any(path_segment in file_path for path_segment in GOOGLE_PROTOBUF_ALLOWLIST)) # Real-world time sources should not be instantiated in the source, except for a few # specific cases. They should be passed down from where they are instantied to where # they need to be used, e.g. through the ServerInstance, Dispatcher, or ClusterManager. -def whitelistedForRealTime(file_path): +def allowlistedForRealTime(file_path): if file_path.endswith(".md"): return True - return file_path in REAL_TIME_WHITELIST + return file_path in REAL_TIME_ALLOWLIST -def whitelistedForRegisterFactory(file_path): +def allowlistedForRegisterFactory(file_path): if not file_path.startswith("./test/"): return True - return any(file_path.startswith(prefix) for prefix in REGISTER_FACTORY_TEST_WHITELIST) + return any(file_path.startswith(prefix) for prefix in REGISTER_FACTORY_TEST_ALLOWLIST) -def whitelistedForSerializeAsString(file_path): - return file_path in SERIALIZE_AS_STRING_WHITELIST +def allowlistedForSerializeAsString(file_path): + return file_path in SERIALIZE_AS_STRING_ALLOWLIST -def whitelistedForJsonStringToMessage(file_path): - return file_path in JSON_STRING_TO_MESSAGE_WHITELIST +def allowlistedForJsonStringToMessage(file_path): + return file_path in JSON_STRING_TO_MESSAGE_ALLOWLIST -def whitelistedForHistogramSiSuffix(name): - return name in HISTOGRAM_WITH_SI_SUFFIX_WHITELIST +def allowlistedForHistogramSiSuffix(name): + return name in HISTOGRAM_WITH_SI_SUFFIX_ALLOWLIST -def whitelistedForStdRegex(file_path): - return file_path.startswith("./test") or file_path in STD_REGEX_WHITELIST or file_path.endswith( +def allowlistedForStdRegex(file_path): + return file_path.startswith("./test") or file_path in STD_REGEX_ALLOWLIST or file_path.endswith( DOCS_SUFFIX) -def whitelistedForGrpcInit(file_path): - return file_path in GRPC_INIT_WHITELIST +def allowlistedForGrpcInit(file_path): + return file_path in GRPC_INIT_ALLOWLIST -def whitelistedForUnpackTo(file_path): +def allowlistedForUnpackTo(file_path): return file_path.startswith("./test") or file_path in [ "./source/common/protobuf/utility.cc", "./source/common/protobuf/utility.h" ] +def denylistedForExceptions(file_path): + return (file_path in EXCEPTION_DENYLIST or isInSubdir(file_path, 'tools/testdata')) and \ + not file_path.endswith(DOCS_SUFFIX) + + def findSubstringAndReturnError(pattern, file_path, error_message): text = readFile(file_path) if pattern in text: @@ -409,7 +423,7 @@ def hasInvalidAngleBracketDirectory(line): VERSION_HISTORY_NEW_LINE_REGEX = re.compile("\* ([a-z \-_]+): ([a-z:`]+)") -VERSION_HISTORY_NEW_SECTION_REGEX = re.compile("^-----[-]+$") +VERSION_HISTORY_SECTION_NAME = re.compile("^[A-Z][A-Za-z ]*$") RELOADABLE_FLAG_REGEX = re.compile(".*(.)(envoy.reloadable_features.[^ ]*)\s.*") # Check for punctuation in a terminal ref clause, e.g. # :ref:`panic mode. ` @@ -417,8 +431,6 @@ def hasInvalidAngleBracketDirectory(line): def checkCurrentReleaseNotes(file_path, error_messages): - in_changes_section = False - first_word_of_prior_line = '' next_word_to_check = '' # first word after : prior_line = '' @@ -437,12 +449,15 @@ def endsWithPeriod(prior_line): def reportError(message): error_messages.append("%s:%d: %s" % (file_path, line_number + 1, message)) - if VERSION_HISTORY_NEW_SECTION_REGEX.match(line): - # The second section is deprecations, which are not sorted. - if in_changes_section: + if VERSION_HISTORY_SECTION_NAME.match(line): + if line == "Deprecated": + # The deprecations section is last, and does not have enforced formatting. break - # If we see a section marker we are now in the changes section. - in_changes_section = True + + # Reset all parsing at the start of a section. + first_word_of_prior_line = '' + next_word_to_check = '' # first word after : + prior_line = '' # make sure flags are surrounded by ``s flag_match = RELOADABLE_FLAG_REGEX.match(line) @@ -450,7 +465,7 @@ def reportError(message): if not flag_match.groups()[0].startswith('`'): reportError("Flag `%s` should be enclosed in back ticks" % flag_match.groups()[1]) - if line.startswith("*"): + if line.startswith("* "): if not endsWithPeriod(prior_line): reportError("The following release note does not end with a '.'\n %s" % prior_line) @@ -557,6 +572,26 @@ def isInSubdir(filename, *subdirs): return False +# Determines if given token exists in line without leading or trailing token characters +# e.g. will return True for a line containing foo() but not foo_bar() or baz_foo +def tokenInLine(token, line): + index = 0 + while True: + index = line.find(token, index) + # the following check has been changed from index < 1 to index < 0 because + # this function incorrectly returns false when the token in question is the + # first one in a line. The following line returns false when the token is present: + # (no leading whitespace) violating_symbol foo; + if index < 0: + break + if index == 0 or not (line[index - 1].isalnum() or line[index - 1] == '_'): + if index + len(token) >= len(line) or not (line[index + len(token)].isalnum() or + line[index + len(token)] == '_'): + return True + index = index + 1 + return False + + def checkSourceLine(line, file_path, reportError): # Check fixable errors. These may have been fixed already. if line.find(". ") != -1: @@ -581,7 +616,7 @@ def checkSourceLine(line, file_path, reportError): # Some errors cannot be fixed automatically, and actionable, consistent, # navigable messages should be emitted to make it easy to find and fix # the errors by hand. - if not whitelistedForProtobufDeps(file_path): + if not allowlistedForProtobufDeps(file_path): if '"google/protobuf' in line or "google::protobuf" in line: reportError("unexpected direct dependency on google.protobuf, use " "the definitions in common/protobuf/protobuf.h instead.") @@ -594,47 +629,73 @@ def checkSourceLine(line, file_path, reportError): # We don't check here for std::shared_timed_mutex because that may # legitimately show up in comments, for example this one. reportError("Don't use , use absl::Mutex for reader/writer locks.") - if not whitelistedForRealTime(file_path) and not "NO_CHECK_FORMAT(real_time)" in line: + if not allowlistedForRealTime(file_path) and not "NO_CHECK_FORMAT(real_time)" in line: if "RealTimeSource" in line or \ ("RealTimeSystem" in line and not "TestRealTimeSystem" in line) or \ "std::chrono::system_clock::now" in line or "std::chrono::steady_clock::now" in line or \ "std::this_thread::sleep_for" in line or hasCondVarWaitFor(line): reportError("Don't reference real-world time sources from production code; use injection") - if not whitelistedForRegisterFactory(file_path): + duration_arg = DURATION_VALUE_REGEX.search(line) + if duration_arg and duration_arg.group(1) != "0" and duration_arg.group(1) != "0.0": + # Matching duration(int-const or float-const) other than zero + reportError( + "Don't use ambiguous duration(value), use an explicit duration type, e.g. Event::TimeSystem::Milliseconds(value)" + ) + if not allowlistedForRegisterFactory(file_path): if "Registry::RegisterFactory<" in line or "REGISTER_FACTORY" in line: reportError("Don't use Registry::RegisterFactory or REGISTER_FACTORY in tests, " "use Registry::InjectFactory instead.") - if not whitelistedForUnpackTo(file_path): + if not allowlistedForUnpackTo(file_path): if "UnpackTo" in line: reportError("Don't use UnpackTo() directly, use MessageUtil::unpackTo() instead") # Check that we use the absl::Time library - if "std::get_time" in line: + if tokenInLine("std::get_time", line): if "test/" in file_path: reportError("Don't use std::get_time; use TestUtility::parseTime in tests") else: reportError("Don't use std::get_time; use the injectable time system") - if "std::put_time" in line: + if tokenInLine("std::put_time", line): reportError("Don't use std::put_time; use absl::Time equivalent instead") - if "gmtime" in line: + if tokenInLine("gmtime", line): reportError("Don't use gmtime; use absl::Time equivalent instead") - if "mktime" in line: + if tokenInLine("mktime", line): reportError("Don't use mktime; use absl::Time equivalent instead") - if "localtime" in line: + if tokenInLine("localtime", line): reportError("Don't use localtime; use absl::Time equivalent instead") - if "strftime" in line: + if tokenInLine("strftime", line): reportError("Don't use strftime; use absl::FormatTime instead") - if "strptime" in line: + if tokenInLine("strptime", line): reportError("Don't use strptime; use absl::FormatTime instead") + if tokenInLine("strerror", line): + reportError("Don't use strerror; use Envoy::errorDetails instead") + # Prefer using abseil hash maps/sets over std::unordered_map/set for performance optimizations and + # non-deterministic iteration order that exposes faulty assertions. + # See: https://abseil.io/docs/cpp/guides/container#hash-tables + if "std::unordered_map" in line: + reportError("Don't use std::unordered_map; use absl::flat_hash_map instead or " + "absl::node_hash_map if pointer stability of keys/values is required") + if "std::unordered_set" in line: + reportError("Don't use std::unordered_set; use absl::flat_hash_set instead or " + "absl::node_hash_set if pointer stability of keys/values is required") if "std::atomic_" in line: # The std::atomic_* free functions are functionally equivalent to calling # operations on std::atomic objects, so prefer to use that instead. reportError("Don't use free std::atomic_* functions, use std::atomic members instead.") + # Blocking the use of std::any, std::optional, std::variant for now as iOS 11/macOS 10.13 + # does not support these functions at runtime. + # See: https://github.com/envoyproxy/envoy/issues/12341 + if tokenInLine("std::any", line): + reportError("Don't use std::any; use absl::any instead") + if tokenInLine("std::optional", line): + reportError("Don't use std::optional; use absl::optional instead") + if tokenInLine("std::variant", line): + reportError("Don't use std::variant; use absl::variant instead") if "__attribute__((packed))" in line and file_path != "./include/envoy/common/platform.h": # __attribute__((packed)) is not supported by MSVC, we have a PACKED_STRUCT macro that # can be used instead reportError("Don't use __attribute__((packed)), use the PACKED_STRUCT macro defined " "in include/envoy/common/platform.h instead") - if re.search("\{\s*\.\w+\s*\=", line): + if DESIGNATED_INITIALIZER_REGEX.search(line): # Designated initializers are not part of the C++14 standard and are not supported # by MSVC reportError("Don't use designated initializers in struct initialization, " @@ -646,17 +707,17 @@ def checkSourceLine(line, file_path, reportError): reportError("Don't use 'using testing::Test;, elaborate the type instead") if line.startswith("using testing::TestWithParams;"): reportError("Don't use 'using testing::Test;, elaborate the type instead") - if re.search("TEST(_.\(.*,\s|\()[a-z].*\)\s\{", line): + if TEST_NAME_STARTING_LOWER_CASE_REGEX.search(line): # Matches variants of TEST(), TEST_P(), TEST_F() etc. where the test name begins # with a lowercase letter. reportError("Test names should be CamelCase, starting with a capital letter") - if not whitelistedForSerializeAsString(file_path) and "SerializeAsString" in line: + if not allowlistedForSerializeAsString(file_path) and "SerializeAsString" in line: # The MessageLite::SerializeAsString doesn't generate deterministic serialization, # use MessageUtil::hash instead. reportError( "Don't use MessageLite::SerializeAsString for generating deterministic serialization, use MessageUtil::hash instead." ) - if not whitelistedForJsonStringToMessage(file_path) and "JsonStringToMessage" in line: + if not allowlistedForJsonStringToMessage(file_path) and "JsonStringToMessage" in line: # Centralize all usage of JSON parsing so it is easier to make changes in JSON parsing # behavior. reportError("Don't use Protobuf::util::JsonStringToMessage, use TestUtility::loadFromJson.") @@ -668,20 +729,20 @@ def checkSourceLine(line, file_path, reportError): '->histogramFromString(' in line or '->textReadoutFromString(' in line): reportError("Don't lookup stats by name at runtime; use StatName saved during construction") - if re.search("envoy::[a-z0-9_:]+::[A-Z][a-z]\w*_\w*_[A-Z]{2}", line): + if MANGLED_PROTOBUF_NAME_REGEX.search(line): reportError("Don't use mangled Protobuf names for enum constants") - hist_m = re.search("(?<=HISTOGRAM\()[a-zA-Z0-9_]+_(b|kb|mb|ns|us|ms|s)(?=,)", line) - if hist_m and not whitelistedForHistogramSiSuffix(hist_m.group(0)): + hist_m = HISTOGRAM_SI_SUFFIX_REGEX.search(line) + if hist_m and not allowlistedForHistogramSiSuffix(hist_m.group(0)): reportError( "Don't suffix histogram names with the unit symbol, " "it's already part of the histogram object and unit-supporting sinks can use this information natively, " "other sinks can add the suffix automatically on flush should they prefer to do so.") - if not whitelistedForStdRegex(file_path) and "std::regex" in line: + if not allowlistedForStdRegex(file_path) and "std::regex" in line: reportError("Don't use std::regex in code that handles untrusted input. Use RegexMatcher") - if not whitelistedForGrpcInit(file_path): + if not allowlistedForGrpcInit(file_path): grpc_init_or_shutdown = line.find("grpc_init()") grpc_shutdown = line.find("grpc_shutdown()") if grpc_init_or_shutdown == -1 or (grpc_shutdown != -1 and @@ -693,11 +754,20 @@ def checkSourceLine(line, file_path, reportError): reportError("Don't call grpc_init() or grpc_shutdown() directly, instantiate " + "Grpc::GoogleGrpcContext. See #8282") + if denylistedForExceptions(file_path): + throw = line.find("throw") + if throw != -1: + comment_match = COMMENT_REGEX.search(line) + if comment_match is None or comment_match.start(0) > throw: + reportError("Don't introduce throws into exception-free files, use error " + + "statuses instead.") + def checkBuildLine(line, file_path, reportError): - if "@bazel_tools" in line and not (isSkylarkFile(file_path) or file_path.startswith("./bazel/")): + if "@bazel_tools" in line and not (isSkylarkFile(file_path) or file_path.startswith("./bazel/") or + "python/runfiles" in line): reportError("unexpected @bazel_tools reference, please indirect via a definition in //bazel") - if not whitelistedForProtobufDeps(file_path) and '"protobuf"' in line: + if not allowlistedForProtobufDeps(file_path) and '"protobuf"' in line: reportError("unexpected direct external dependency on protobuf, use " "//source/common/protobuf instead.") if (envoy_build_rule_check and not isSkylarkFile(file_path) and not isWorkspaceFile(file_path) and @@ -723,7 +793,7 @@ def fixBuildPath(file_path): if os.system("%s %s %s" % (ENVOY_BUILD_FIXER_PATH, file_path, file_path)) != 0: error_messages += ["envoy_build_fixer rewrite failed for file: %s" % file_path] - if os.system("%s -mode=fix %s" % (BUILDIFIER_PATH, file_path)) != 0: + if os.system("%s -lint=fix -mode=fix %s" % (BUILDIFIER_PATH, file_path)) != 0: error_messages += ["buildifier rewrite failed for file: %s" % file_path] return error_messages @@ -875,6 +945,21 @@ def checkOwners(dir_name, owned_directories, error_messages): error_messages.append("New directory %s appears to not have owners in CODEOWNERS" % dir_name) +def checkApiShadowStarlarkFiles(api_shadow_root, file_path, error_messages): + command = "diff -u " + command += file_path + " " + api_shadow_starlark_path = api_shadow_root + re.sub(r"\./api/", '', file_path) + command += api_shadow_starlark_path + + error_message = executeCommand(command, "invalid .bzl in generated_api_shadow", file_path) + if operation_type == "check": + error_messages += error_message + elif operation_type == "fix" and len(error_message) != 0: + shutil.copy(file_path, api_shadow_starlark_path) + + return error_messages + + def checkFormatVisitor(arg, dir_name, names): """Run checkFormat in parallel for the given files. @@ -891,7 +976,7 @@ def checkFormatVisitor(arg, dir_name, names): # python lists are passed as references, this is used to collect the list of # async results (futures) from running checkFormat and passing them back to # the caller. - pool, result_list, owned_directories, error_messages = arg + pool, result_list, owned_directories, api_shadow_root, error_messages = arg # Sanity check CODEOWNERS. This doesn't need to be done in a multi-threaded # manner as it is a small and limited list. @@ -904,6 +989,10 @@ def checkFormatVisitor(arg, dir_name, names): checkOwners(dir_name[len(source_prefix):], owned_directories, error_messages) for file_name in names: + if dir_name.startswith("./api") and isSkylarkFile(file_name): + result = pool.apply_async(checkApiShadowStarlarkFiles, + args=(api_shadow_root, dir_name + "/" + file_name, error_messages)) + result_list.append(result) result = pool.apply_async(checkFormatReturnTraceOnError, args=(dir_name + "/" + file_name,)) result_list.append(result) @@ -970,6 +1059,7 @@ def checkErrorMessages(error_messages): operation_type = args.operation_type target_path = args.target_path + api_shadow_root = args.api_shadow_prefix envoy_build_rule_check = not args.skip_envoy_build_rule_check namespace_check = args.namespace_check namespace_check_excluded_paths = args.namespace_check_excluded_paths + [ @@ -1005,7 +1095,7 @@ def ownedDirectories(error_messages): for line in f: # If this line is of the form "extensions/... @owner1 @owner2" capture the directory # name and store it in the list of directories with documented owners. - m = re.search(r'.*(extensions[^@]*\s+)(@.*)', line) + m = EXTENSIONS_CODEOWNERS_REGEX.search(line) if m is not None and not line.startswith('#'): owned.append(m.group(1).strip()) owners = re.findall('@\S+', m.group(2).strip()) @@ -1035,8 +1125,8 @@ def PooledCheckFormat(path_predicate): # For each file in target_path, start a new task in the pool and collect the # results (results is passed by reference, and is used as an output). for root, _, files in os.walk(target_path): - checkFormatVisitor((pool, results, owned_directories, error_messages), root, - [f for f in files if path_predicate(f)]) + checkFormatVisitor((pool, results, owned_directories, api_shadow_root, error_messages), + root, [f for f in files if path_predicate(f)]) # Close the pool to new tasks, wait for all of the running tasks to finish, # then collect the error messages. diff --git a/tools/code_format/check_format_test_helper.py b/tools/code_format/check_format_test_helper.py index 67276235adce1..da3f576605aa1 100755 --- a/tools/code_format/check_format_test_helper.py +++ b/tools/code_format/check_format_test_helper.py @@ -61,12 +61,12 @@ def fixFileExpectingSuccess(file, extra_input_files=None): command, infile, outfile, status, stdout = fixFileHelper(file, extra_input_files=extra_input_files) if status != 0: - print("FAILED:") + print("FAILED: " + infile) emitStdoutAsError(stdout) return 1 status, stdout, stderr = runCommand('diff ' + outfile + ' ' + infile + '.gold') if status != 0: - print("FAILED:") + print("FAILED: " + infile) emitStdoutAsError(stdout + stderr) return 1 return 0 @@ -166,6 +166,10 @@ def runChecks(): "Don't reference real-world time sources from production code; use injection") errors += checkUnfixableError("real_time_source.cc", real_time_inject_error) errors += checkUnfixableError("real_time_system.cc", real_time_inject_error) + errors += checkUnfixableError( + "duration_value.cc", + "Don't use ambiguous duration(value), use an explicit duration type, e.g. Event::TimeSystem::Milliseconds(value)" + ) errors += checkUnfixableError("system_clock.cc", real_time_inject_error) errors += checkUnfixableError("steady_clock.cc", real_time_inject_error) errors += checkUnfixableError( @@ -230,6 +234,22 @@ def runChecks(): "test/register_factory.cc", "Don't use Registry::RegisterFactory or REGISTER_FACTORY in tests, use " "Registry::InjectFactory instead.") + errors += checkUnfixableError("strerror.cc", + "Don't use strerror; use Envoy::errorDetails instead") + errors += checkUnfixableError( + "std_unordered_map.cc", "Don't use std::unordered_map; use absl::flat_hash_map instead " + + "or absl::node_hash_map if pointer stability of keys/values is required") + errors += checkUnfixableError( + "std_unordered_set.cc", "Don't use std::unordered_set; use absl::flat_hash_set instead " + + "or absl::node_hash_set if pointer stability of keys/values is required") + errors += checkUnfixableError("std_any.cc", "Don't use std::any; use absl::any instead") + errors += checkUnfixableError("std_optional.cc", + "Don't use std::optional; use absl::optional instead") + errors += checkUnfixableError("std_variant.cc", + "Don't use std::variant; use absl::variant instead") + errors += checkUnfixableError( + "throw.cc", "Don't introduce throws into exception-free files, use error statuses instead.") + errors += checkFileExpectingOK("commented_throw.cc") # The following files have errors that can be automatically fixed. errors += checkAndFixError("over_enthusiastic_spaces.cc", @@ -269,6 +289,7 @@ def runChecks(): "term absl::make_unique< should be replaced with standard library term std::make_unique<") errors += checkFileExpectingOK("real_time_source_override.cc") + errors += checkFileExpectingOK("duration_value_zero.cc") errors += checkFileExpectingOK("time_system_wait_for.cc") errors += checkFileExpectingOK("clang_format_off.cc") return errors diff --git a/tools/code_format/envoy_build_fixer.py b/tools/code_format/envoy_build_fixer.py index 865e9fffd48d2..9af90f0f7e219 100755 --- a/tools/code_format/envoy_build_fixer.py +++ b/tools/code_format/envoy_build_fixer.py @@ -2,7 +2,7 @@ # Enforces: # - License headers on Envoy BUILD files -# - envoy_package() top-level invocation for standard Envoy package setup. +# - envoy_package() or envoy_extension_package() top-level invocation for standard Envoy package setup. # - Infers API dependencies from source files. # - Misc. cleanups: avoids redundant blank lines, removes unused loads. # - Maybe more later? @@ -19,6 +19,9 @@ # Where does Buildozer live? BUILDOZER_PATH = paths.getBuildozer() +# Where does Buildifier live? +BUILDIFIER_PATH = paths.getBuildifier() + # Canonical Envoy license. LICENSE_STRING = 'licenses(["notice"]) # Apache 2\n\n' @@ -28,8 +31,9 @@ # Match an Envoy rule, e.g. envoy_cc_library( in a BUILD file. ENVOY_RULE_REGEX = re.compile(r'envoy[_\w]+\(') -# Match a load() statement for the envoy_package macro. +# Match a load() statement for the envoy_package macros. PACKAGE_LOAD_BLOCK_REGEX = re.compile('("envoy_package".*?\)\n)', re.DOTALL) +EXTENSION_PACKAGE_LOAD_BLOCK_REGEX = re.compile('("envoy_extension_package".*?\)\n)', re.DOTALL) # Match Buildozer 'print' output. Example of Buildozer print output: # cc_library json_transcoder_filter_lib [json_transcoder_filter.cc] (missing) (missing) @@ -67,20 +71,29 @@ def RunBuildozer(cmds, contents): # Add an Apache 2 license and envoy_package() import and rule as needed. -def FixPackageAndLicense(contents): +def FixPackageAndLicense(path, contents): + regex_to_use = PACKAGE_LOAD_BLOCK_REGEX + package_string = 'envoy_package' + + if 'source/extensions' in path: + regex_to_use = EXTENSION_PACKAGE_LOAD_BLOCK_REGEX + package_string = 'envoy_extension_package' + # Ensure we have an envoy_package import load if this is a real Envoy package. We also allow # the prefix to be overridden if envoy is included in a larger workspace. if re.search(ENVOY_RULE_REGEX, contents): + new_load = 'new_load {}//bazel:envoy_build_system.bzl %s' % package_string contents = RunBuildozer([ - ('new_load {}//bazel:envoy_build_system.bzl envoy_package'.format( - os.getenv("ENVOY_BAZEL_PREFIX", "")), '__pkg__'), + (new_load.format(os.getenv("ENVOY_BAZEL_PREFIX", "")), '__pkg__'), ], contents) # Envoy package is inserted after the load block containing the # envoy_package import. - if 'envoy_package()' not in contents: - contents = re.sub(PACKAGE_LOAD_BLOCK_REGEX, r'\1\nenvoy_package()\n\n', contents) - if 'envoy_package()' not in contents: - raise EnvoyBuildFixerError('Unable to insert envoy_package()') + package_and_parens = package_string + '()' + if package_and_parens not in contents: + contents = re.sub(regex_to_use, r'\1\n%s\n\n' % package_and_parens, contents) + if package_and_parens not in contents: + raise EnvoyBuildFixerError('Unable to insert %s' % package_and_parens) + # Delete old licenses. if re.search(OLD_LICENSES_REGEX, contents): contents = re.sub(OLD_LICENSES_REGEX, '', contents) @@ -89,14 +102,15 @@ def FixPackageAndLicense(contents): return contents -# Remove trailing blank lines, unnecessary double blank lines. -def FixEmptyLines(contents): - return re.sub('\n\s*$', '\n', re.sub('\n\n\n', '\n\n', contents)) - - -# Misc. Buildozer cleanups. -def FixBuildozerCleanups(contents): - return RunBuildozer([('fix unusedLoads', '__pkg__')], contents) +# Run Buildifier commands on a string with lint mode. +def BuildifierLint(contents): + r = subprocess.run([BUILDIFIER_PATH, '-lint=fix', '-mode=fix', '-type=build'], + input=contents.encode(), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + if r.returncode != 0: + raise EnvoyBuildFixerError('buildozer execution failed: %s' % r) + return r.stdout.decode('utf-8') # Find all the API headers in a C++ source file. @@ -169,10 +183,9 @@ def FixBuild(path): with open(path, 'r') as f: contents = f.read() xforms = [ - FixPackageAndLicense, - FixEmptyLines, + functools.partial(FixPackageAndLicense, path), functools.partial(FixApiDeps, path), - FixBuildozerCleanups, + BuildifierLint, ] for xform in xforms: contents = xform(contents) diff --git a/tools/code_format/header_order.py b/tools/code_format/header_order.py index 9962d825a3f50..427fb0c053b10 100755 --- a/tools/code_format/header_order.py +++ b/tools/code_format/header_order.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Enforce header order in a given file. This will only reorder in the first sequence of contiguous # #include statements, so it will not play well with #ifdef. @@ -12,17 +12,15 @@ # enough to handle block splitting and correctly detecting the main header subject to the Envoy # canonical paths. -from __future__ import print_function - import argparse import common +import pathlib import re import sys def ReorderHeaders(path): - with open(path, 'r') as f: - source = f.read() + source = pathlib.Path(path).read_text(encoding='utf-8') all_lines = iter(source.split('\n')) before_includes_lines = [] @@ -117,7 +115,6 @@ def regex_filter(regex): include_dir_order = args.include_dir_order.split(',') reorderd_source = ReorderHeaders(target_path) if args.rewrite: - with open(target_path, 'w') as f: - f.write(reorderd_source) + pathlib.Path(target_path).write_text(reorderd_source, encoding='utf-8') else: - sys.stdout.write(reorderd_source) + sys.stdout.buffer.write(reorderd_source.encode('utf-8')) diff --git a/tools/code_format/requirements.txt b/tools/code_format/requirements.txt index 4ab3842b87d92..c27e0d44afaa2 100644 --- a/tools/code_format/requirements.txt +++ b/tools/code_format/requirements.txt @@ -1,2 +1,2 @@ -flake8==3.7.8 -yapf==0.28.0 +flake8==3.8.3 +yapf==0.30.0 diff --git a/tools/config_validation/BUILD b/tools/config_validation/BUILD new file mode 100644 index 0000000000000..5ca3d0ef9a4af --- /dev/null +++ b/tools/config_validation/BUILD @@ -0,0 +1,16 @@ +load("@rules_python//python:defs.bzl", "py_binary") +load("@config_validation_pip3//:requirements.bzl", "requirement") + +licenses(["notice"]) # Apache 2 + +py_binary( + name = "validate_fragment", + srcs = ["validate_fragment.py"], + data = ["//tools/type_whisperer:all_protos_with_ext_pb_text.pb_text"], + visibility = ["//visibility:public"], + deps = [ + requirement("PyYAML"), + "@bazel_tools//tools/python/runfiles", + "@com_google_protobuf//:protobuf_python", + ], +) diff --git a/tools/config_validation/requirements.txt b/tools/config_validation/requirements.txt new file mode 100644 index 0000000000000..7a997b5e44bdb --- /dev/null +++ b/tools/config_validation/requirements.txt @@ -0,0 +1 @@ +PyYAML==5.3.1 diff --git a/tools/config_validation/validate_fragment.py b/tools/config_validation/validate_fragment.py new file mode 100644 index 0000000000000..d272f37fb006c --- /dev/null +++ b/tools/config_validation/validate_fragment.py @@ -0,0 +1,72 @@ +# Validate a YAML fragment against an Envoy API proto3 type. +# +# Example usage: +# +# bazel run //tools/config_validation:validate_fragment -- \ +# envoy.config.bootstrap.v3.Bootstrap $PWD/configs/google_com_proxy.v2.yaml + +import json +import pathlib +import sys + +import yaml + +from google.protobuf import descriptor_pb2 +from google.protobuf import descriptor_pool +from google.protobuf import json_format +from google.protobuf import message_factory +from google.protobuf import text_format + +from bazel_tools.tools.python.runfiles import runfiles + +import argparse + + +def ValidateFragment(type_name, fragment): + """Validate a dictionary representing a JSON/YAML fragment against an Envoy API proto3 type. + + Throws Protobuf errors on parsing exceptions, successful validations produce + no result. + + Args: + type_name: a string providing the type name, e.g. + envoy.config.bootstrap.v3.Bootstrap. + fragment: a dictionary representing the parsed JSON/YAML configuration + fragment. + """ + json_fragment = json.dumps(fragment) + + r = runfiles.Create() + all_protos_pb_text_path = r.Rlocation( + 'envoy/tools/type_whisperer/all_protos_with_ext_pb_text.pb_text') + file_desc_set = descriptor_pb2.FileDescriptorSet() + text_format.Parse(pathlib.Path(all_protos_pb_text_path).read_text(), + file_desc_set, + allow_unknown_extension=True) + + pool = descriptor_pool.DescriptorPool() + for f in file_desc_set.file: + pool.Add(f) + desc = pool.FindMessageTypeByName(type_name) + msg = message_factory.MessageFactory(pool=pool).GetPrototype(desc)() + json_format.Parse(json_fragment, msg, descriptor_pool=pool) + + +def ParseArgs(): + parser = argparse.ArgumentParser( + description='Validate a YAML fragment against an Envoy API proto3 type.') + parser.add_argument( + 'message_type', + help='a string providing the type name, e.g. envoy.config.bootstrap.v3.Bootstrap.') + parser.add_argument('fragment_path', nargs='?', help='Path to a YAML configuration fragment.') + parser.add_argument('-s', required=False, help='YAML configuration fragment.') + + return parser.parse_args() + + +if __name__ == '__main__': + parsed_args = ParseArgs() + message_type = parsed_args.message_type + content = parsed_args.s if (parsed_args.fragment_path is None) else pathlib.Path( + parsed_args.fragment_path).read_text() + ValidateFragment(message_type, yaml.safe_load(content)) diff --git a/tools/debugging/valgrind-suppressions.txt b/tools/debugging/valgrind-suppressions.txt index d985fca478a14..bfb08d95b3871 100644 --- a/tools/debugging/valgrind-suppressions.txt +++ b/tools/debugging/valgrind-suppressions.txt @@ -4,3 +4,27 @@ fun:free ... } +{ + re2 cond-jump failure + Memcheck:Cond + fun:_ZNK3re210SparseSetTIvE8containsEi + ... +} +{ + re2 uninit-value + Memcheck:Value8 + fun:_ZNK3re210SparseSetTIvE8containsEi + ... +} +{ + re2 cond-jump failure + Memcheck:Cond + fun:_ZNK3re211SparseArrayIiE9has_indexEi + ... +} +{ + re2 uninit-value + Memcheck:Value8 + fun:_ZNK3re211SparseArrayIiE9has_indexEi + ... +} diff --git a/tools/deprecate_version/deprecate_version.py b/tools/deprecate_version/deprecate_version.py index aba1579734a08..9cbde123785e2 100644 --- a/tools/deprecate_version/deprecate_version.py +++ b/tools/deprecate_version/deprecate_version.py @@ -57,9 +57,10 @@ def CreateIssues(access_token, runtime_and_pr): Args: access_token: GitHub access token (see comment at top of file). - runtime_and_pr: a list of runtime guards and the PRs they were added. + runtime_and_pr: a list of runtime guards and the PRs and commits they were added. """ - repo = github.Github(access_token).get_repo('envoyproxy/envoy') + git = github.Github(access_token) + repo = git.get_repo('envoyproxy/envoy') # Find GitHub label objects for LABELS. labels = [] @@ -70,24 +71,44 @@ def CreateIssues(access_token, runtime_and_pr): raise DeprecateVersionError('Unknown labels (expected %s, got %s)' % (LABELS, labels)) issues = [] - for runtime_guard, pr in runtime_and_pr: + for runtime_guard, pr, commit in runtime_and_pr: # Who is the author? - pr_info = repo.get_pull(pr) + if pr: + # Extract PR title, number, and author. + pr_info = repo.get_pull(pr) + change_title = pr_info.title + number = ('#%d') % pr + login = pr_info.user.login + else: + # Extract commit message, sha, and author. + # Only keep commit message title (remove description), and truncate to 50 characters. + change_title = commit.message.split('\n')[0][:50] + number = ('commit %s') % commit.hexsha + email = commit.author.email + # Use the commit author's email to search through users for their login. + search_user = git.search_users(email.split('@')[0] + " in:email") + login = search_user[0].login if search_user else None title = '%s deprecation' % (runtime_guard) - body = ('#%d (%s) introduced a runtime guarded feature. This issue ' - 'tracks source code cleanup.') % (pr, pr_info.title) + body = ('Your change %s (%s) introduced a runtime guarded feature. It has been 6 months since ' + 'the new code has been exercised by default, so it\'s time to remove the old code ' + 'path. This issue tracks source code cleanup so we don\'t forget.') % (number, + change_title) + print(title) print(body) - print(' >> Assigning to %s' % pr_info.user.login) + print(' >> Assigning to %s' % (login or email)) + search_title = '%s in:title' % title # TODO(htuch): Figure out how to do this without legacy and faster. - exists = repo.legacy_search_issues('open', '"%s"' % title) or repo.legacy_search_issues( - 'closed', '"%s"' % title) + exists = repo.legacy_search_issues('open', search_title) or repo.legacy_search_issues( + 'closed', search_title) if exists: + print("Issue with %s already exists" % search_title) + print(exists) print(' >> Issue already exists, not posting!') else: - issues.append((title, body, pr_info.user)) + issues.append((title, body, login)) if not issues: print('No features to deprecate in this release') @@ -95,22 +116,23 @@ def CreateIssues(access_token, runtime_and_pr): if GetConfirmation(): print('Creating issues...') - for title, body, user in issues: + for title, body, login in issues: try: - repo.create_issue(title, body=body, assignees=[user.login], labels=labels) + repo.create_issue(title, body=body, assignees=[login], labels=labels) except github.GithubException as e: try: - body += '\ncc @' + user.login + if login: + body += '\ncc @' + login repo.create_issue(title, body=body, labels=labels) print(('unable to assign issue %s to %s. Add them to the Envoy proxy org' - 'and assign it their way.') % (title, user.login)) + 'and assign it their way.') % (title, login)) except github.GithubException as e: print('GithubException while creating issue.') raise def GetRuntimeAndPr(): - """Returns a list of tuples of [runtime features to deprecate, PR the feature was added] + """Returns a list of tuples of [runtime features to deprecate, PR, commit the feature was added] """ repo = Repo(os.getcwd()) @@ -140,14 +162,16 @@ def GetRuntimeAndPr(): if runtime_guard == 'envoy.reloadable_features.test_feature_true': found_test_feature_true = True continue - pr = (int(re.search('\(#(\d+)\)', commit.message).group(1))) + pr_num = re.search('\(#(\d+)\)', commit.message) + # Some commits may not come from a PR (if they are part of a security point release). + pr = (int(pr_num.group(1))) if pr_num else None pr_date = date.fromtimestamp(commit.committed_date) removable = (pr_date < removal_date) # Add the runtime guard and PR to the list to file issues about. print('Flag ' + runtime_guard + ' added at ' + str(pr_date) + ' ' + (removable and 'and is safe to remove' or 'is not ready to remove')) if removable: - features_to_flip.append((runtime_guard, pr)) + features_to_flip.append((runtime_guard, pr, commit)) print('Failed to find test_feature_false. Script needs fixing') sys.exit(1) diff --git a/tools/gen_compilation_database.py b/tools/gen_compilation_database.py index b5b3c5a4a1beb..0d65eaec3b64b 100755 --- a/tools/gen_compilation_database.py +++ b/tools/gen_compilation_database.py @@ -3,25 +3,13 @@ import argparse import glob import json +import logging import os import shlex import subprocess from pathlib import Path -def runBazelBuildForCompilationDatabase(bazel_options, bazel_targets): - query_targets = ' union '.join(bazel_targets) - query = ' union '.join( - q.format(query_targets) for q in [ - 'attr(include_prefix, ".+", kind(cc_library, deps({})))', - 'attr(strip_include_prefix, ".+", kind(cc_library, deps({})))', - 'attr(generator_function, ".*proto_library", kind(cc_.*, deps({})))', - ]) - build_targets = subprocess.check_output(["bazel", "query", "--notool_deps", - query]).decode().splitlines() - subprocess.check_call(["bazel", "build"] + bazel_options + build_targets) - - # This method is equivalent to https://github.com/grailbio/bazel-compilation-database/blob/master/generate.sh def generateCompilationDatabase(args): # We need to download all remote outputs for generated source code. This option lives here to override those @@ -30,12 +18,10 @@ def generateCompilationDatabase(args): "--config=compdb", "--remote_download_outputs=all", ] - if args.run_bazel_build: - runBazelBuildForCompilationDatabase(bazel_options, args.bazel_targets) subprocess.check_call(["bazel", "build"] + bazel_options + [ "--aspects=@bazel_compdb//:aspects.bzl%compilation_database_aspect", - "--output_groups=compdb_files" + "--output_groups=compdb_files,header_files" ] + args.bazel_targets) execroot = subprocess.check_output(["bazel", "info", "execution_root"] + @@ -87,6 +73,9 @@ def modifyCompileCommand(target, args): if isHeader(target["file"]): options += " -Wno-pragma-once-outside-header -Wno-unused-const-variable" options += " -Wno-unused-function" + if not target["file"].startswith("external/"): + # *.h file is treated as C header by default while our headers files are all C++17. + options = "-x c++ -std=c++17 -fexceptions " + options target["command"] = " ".join([cc, options]) return target @@ -101,7 +90,6 @@ def fixCompilationDatabase(args, db): if __name__ == "__main__": parser = argparse.ArgumentParser(description='Generate JSON compilation database') - parser.add_argument('--run_bazel_build', action='store_true') parser.add_argument('--include_external', action='store_true') parser.add_argument('--include_genfiles', action='store_true') parser.add_argument('--include_headers', action='store_true') diff --git a/tools/git/modified_since_last_github_commit.sh b/tools/git/modified_since_last_github_commit.sh index bbb9d388a239b..e8e805ce9c97c 100755 --- a/tools/git/modified_since_last_github_commit.sh +++ b/tools/git/modified_since_last_github_commit.sh @@ -4,4 +4,4 @@ declare -r BASE="$(dirname "$0")" declare -r TARGET_PATH=$1 declare -r EXTENSION=$2 -git diff --name-only $("${BASE}"/last_github_commit.sh)..HEAD | grep "\.${EXTENSION}$" +git diff --name-only $("${BASE}"/last_github_commit.sh) | grep "\.${EXTENSION}$" diff --git a/tools/proto_format/active_protos_gen.py b/tools/proto_format/active_protos_gen.py index 37b871d93f2e4..bd29cc197d7d3 100755 --- a/tools/proto_format/active_protos_gen.py +++ b/tools/proto_format/active_protos_gen.py @@ -11,10 +11,10 @@ BUILD_FILE_TEMPLATE = string.Template( """# DO NOT EDIT. This file is generated by tools/proto_format/active_protos_gen.py. -licenses(["notice"]) # Apache 2 - load("@rules_proto//proto:defs.bzl", "proto_library") +licenses(["notice"]) # Apache 2 + # This tracks active development versions of protos. proto_library( name = "active_protos", diff --git a/tools/proto_format/proto_sync.py b/tools/proto_format/proto_sync.py index cf952597e88d0..e3b7668913fb0 100755 --- a/tools/proto_format/proto_sync.py +++ b/tools/proto_format/proto_sync.py @@ -95,7 +95,7 @@ def GetDestinationPath(src): matches[0])).joinpath(src_path.name.split('.')[0] + ".proto") -def GetAbsDestinationPath(dst_root, src): +def GetAbsRelDestinationPath(dst_root, src): """Obtain absolute path from a proto file path combined with destination root. Creates the parent directory if necessary. @@ -107,7 +107,7 @@ def GetAbsDestinationPath(dst_root, src): rel_dst_path = GetDestinationPath(src) dst = dst_root.joinpath(rel_dst_path) dst.parent.mkdir(0o755, parents=True, exist_ok=True) - return dst + return dst, rel_dst_path def ProtoPrint(src, dst): @@ -203,6 +203,10 @@ def GetImportDeps(proto_path): if import_path.startswith('udpa/annotations/'): imports.append('@com_github_cncf_udpa//udpa/annotations:pkg') continue + # Special case handling for UDPA core. + if import_path.startswith('udpa/core/v1/'): + imports.append('@com_github_cncf_udpa//udpa/core/v1:pkg') + continue # Explicit remapping for external deps, compute paths for envoy/*. if import_path in external_proto_deps.EXTERNAL_PROTO_IMPORT_BAZEL_DEP_MAP: imports.append(external_proto_deps.EXTERNAL_PROTO_IMPORT_BAZEL_DEP_MAP[import_path]) @@ -326,7 +330,50 @@ def GitStatus(path): return subprocess.check_output(['git', 'status', '--porcelain', str(path)]).decode() +def GitModifiedFiles(path, suffix): + """Obtain a list of modified files since the last commit merged by GitHub. + + Args: + path: path to examine. + suffix: path suffix to filter with. + Return: + A list of strings providing the paths of modified files in the repo. + """ + try: + modified_files = subprocess.check_output( + ['tools/git/modified_since_last_github_commit.sh', 'api', 'proto']).decode().split() + return modified_files + except subprocess.CalledProcessError as e: + if e.returncode == 1: + return [] + raise + + +# If we're not forcing format, i.e. FORCE_PROTO_FORMAT=yes, in the environment, +# then try and see if we can skip reformatting based on some simple path +# heuristics. This saves a ton of time, since proto format and sync is not +# running under Bazel and can't do change detection. +def ShouldSync(path, api_proto_modified_files, py_tools_modified_files): + if os.getenv('FORCE_PROTO_FORMAT') == 'yes': + return True + # If tools change, safest thing to do is rebuild everything. + if len(py_tools_modified_files) > 0: + return True + # Check to see if the basename of the file has been modified since the last + # GitHub commit. If so, rebuild. This is safe and conservative across package + # migrations in v3 and v4alpha; we could achieve a lower rate of false + # positives if we examined package migration annotations, at the expense of + # complexity. + for p in api_proto_modified_files: + if os.path.basename(p) in path: + return True + # Otherwise we can safely skip syncing. + return False + + def Sync(api_root, mode, labels, shadow): + api_proto_modified_files = GitModifiedFiles('api', 'proto') + py_tools_modified_files = GitModifiedFiles('tools', 'py') with tempfile.TemporaryDirectory() as tmp: dst_dir = pathlib.Path(tmp).joinpath("b") paths = [] @@ -339,7 +386,13 @@ def Sync(api_root, mode, labels, shadow): dst_src_paths = defaultdict(list) for path in paths: if os.stat(path).st_size > 0: - dst_src_paths[GetAbsDestinationPath(dst_dir, path)].append(path) + abs_dst_path, rel_dst_path = GetAbsRelDestinationPath(dst_dir, path) + if ShouldSync(path, api_proto_modified_files, py_tools_modified_files): + dst_src_paths[abs_dst_path].append(path) + else: + print('Skipping sync of %s' % path) + src_path = str(pathlib.Path(api_root, rel_dst_path)) + shutil.copy(src_path, abs_dst_path) with mp.Pool() as p: pkg_deps = p.map(SyncProtoFile, dst_src_paths.items()) SyncBuildFiles(mode, dst_dir) diff --git a/tools/protodoc/BUILD b/tools/protodoc/BUILD index 812ceac0c66bb..51bb3a9fbde91 100644 --- a/tools/protodoc/BUILD +++ b/tools/protodoc/BUILD @@ -1,5 +1,11 @@ +load("@rules_python//python:defs.bzl", "py_binary") +load("@protodoc_pip3//:requirements.bzl", "requirement") +load("//bazel:envoy_build_system.bzl", "envoy_package", "envoy_proto_library") + licenses(["notice"]) # Apache 2 +envoy_package() + py_binary( name = "generate_empty", srcs = ["generate_empty.py"], @@ -7,14 +13,23 @@ py_binary( deps = [":protodoc"], ) +envoy_proto_library( + name = "manifest_proto", + srcs = ["manifest.proto"], +) + py_binary( name = "protodoc", srcs = ["protodoc.py"], + data = ["//docs:protodoc_manifest.yaml"], visibility = ["//visibility:public"], deps = [ + ":manifest_proto_py_proto", "//tools/api_proto_plugin", + "//tools/config_validation:validate_fragment", "@com_envoyproxy_protoc_gen_validate//validate:validate_py", "@com_github_cncf_udpa//udpa/annotations:pkg_py_proto", "@com_google_protobuf//:protobuf_python", + requirement("PyYAML"), ], ) diff --git a/tools/protodoc/manifest.proto b/tools/protodoc/manifest.proto new file mode 100644 index 0000000000000..4757c76a8c10e --- /dev/null +++ b/tools/protodoc/manifest.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; + +package tools.protodoc; + +import "google/protobuf/struct.proto"; + +// Additional structure information consumed by protodoc when generating +// documentation for a field. +message Description { + message EdgeConfiguration { + // Example secure edge default for the field. + google.protobuf.Value example = 1; + + // Additional note to include in the configuration warning. + string note = 2; + } + + // Additional information for when this field is used in edge deployments. + EdgeConfiguration edge_config = 1; + + // TODO: add additional information here to reflect things like Envoy + // implementation status. +} + +message Manifest { + // Map from fully qualified field name to additional information to be used in + // protodoc generation. + map fields = 1; +} diff --git a/tools/protodoc/protodoc.bzl b/tools/protodoc/protodoc.bzl index b25ae7a3577df..0ed26121fe6ab 100644 --- a/tools/protodoc/protodoc.bzl +++ b/tools/protodoc/protodoc.bzl @@ -3,7 +3,7 @@ load("//tools/api_proto_plugin:plugin.bzl", "api_proto_plugin_aspect", "api_prot def _protodoc_impl(target, ctx): return api_proto_plugin_impl(target, ctx, "rst", "protodoc", [".rst"]) -# Bazel aspect (https://docs.bazel.build/versions/master/skylark/aspects.html) +# Bazel aspect (https://docs.bazel.build/versions/master/starlark/aspects.html) # that can be invoked from the CLI to produce docs via //tools/protodoc for # proto_library targets. Example use: # diff --git a/tools/protodoc/protodoc.py b/tools/protodoc/protodoc.py index f360741db2333..8eeeceb9e225a 100755 --- a/tools/protodoc/protodoc.py +++ b/tools/protodoc/protodoc.py @@ -10,11 +10,26 @@ import pathlib import re import string +import sys + +from google.protobuf import json_format +from bazel_tools.tools.python.runfiles import runfiles +import yaml + +# We have to do some evil things to sys.path due to the way that Python module +# resolution works; we have both tools/ trees in bazel_tools and envoy. By +# default, Bazel leaves us with a sys.path in which the @bazel_tools repository +# takes precedence. Now that we're done with importing runfiles above, we can +# just remove it from the sys.path. +sys.path = [p for p in sys.path if not p.endswith('bazel_tools')] from tools.api_proto_plugin import annotations from tools.api_proto_plugin import plugin from tools.api_proto_plugin import visitor +from tools.config_validation import validate_fragment +from tools.protodoc import manifest_pb2 +from udpa.annotations import security_pb2 from udpa.annotations import status_pb2 from validate import validate_pb2 @@ -173,15 +188,20 @@ def FormatExtension(extension): Returns: RST formatted extension description. """ - extension_metadata = json.loads(pathlib.Path( - os.getenv('EXTENSION_DB_PATH')).read_text())[extension] - anchor = FormatAnchor('extension_' + extension) - status = EXTENSION_STATUS_VALUES.get(extension_metadata['status'], '') - security_posture = EXTENSION_SECURITY_POSTURES[extension_metadata['security_posture']] - return EXTENSION_TEMPLATE.substitute(anchor=anchor, - extension=extension, - status=status, - security_posture=security_posture) + try: + extension_metadata = json.loads(pathlib.Path( + os.getenv('EXTENSION_DB_PATH')).read_text())[extension] + anchor = FormatAnchor('extension_' + extension) + status = EXTENSION_STATUS_VALUES.get(extension_metadata['status'], '') + security_posture = EXTENSION_SECURITY_POSTURES[extension_metadata['security_posture']] + return EXTENSION_TEMPLATE.substitute(anchor=anchor, + extension=extension, + status=status, + security_posture=security_posture) + except KeyError as e: + sys.stderr.write( + '\n\nDid you forget to add an entry to source/extensions/extensions_build_config.bzl?\n\n') + exit(1) # Raising the error buries the above message in tracebacks. def FormatHeaderFromFile(style, source_code_info, proto_name): @@ -388,13 +408,39 @@ def FormatAnchor(label): return '.. _%s:\n\n' % label -def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field): +def FormatSecurityOptions(security_option, field, type_context, edge_config): + sections = [] + + if security_option.configure_for_untrusted_downstream: + sections.append( + Indent(4, 'This field should be configured in the presence of untrusted *downstreams*.')) + if security_option.configure_for_untrusted_upstream: + sections.append( + Indent(4, 'This field should be configured in the presence of untrusted *upstreams*.')) + if edge_config.note: + sections.append(Indent(4, edge_config.note)) + + example_dict = json_format.MessageToDict(edge_config.example) + validate_fragment.ValidateFragment(field.type_name[1:], example_dict) + field_name = type_context.name.split('.')[-1] + example = {field_name: example_dict} + sections.append( + Indent(4, 'Example configuration for untrusted environments:\n\n') + + Indent(4, '.. code-block:: yaml\n\n') + + '\n'.join(IndentLines(6, + yaml.dump(example).split('\n')))) + + return '.. attention::\n' + '\n\n'.join(sections) + + +def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field, protodoc_manifest): """Format a FieldDescriptorProto as RST definition list item. Args: outer_type_context: contextual information for enclosing message. type_context: contextual information for message/enum/field. field: FieldDescriptorProto. + protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. @@ -441,18 +487,30 @@ def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field): else: formatted_oneof_comment = '' + # If there is a udpa.annotations.security option, include it after the comment. + if field.options.HasExtension(security_pb2.security): + manifest_description = protodoc_manifest.fields.get(type_context.name) + if not manifest_description: + raise ProtodocError('Missing protodoc manifest YAML for %s' % type_context.name) + formatted_security_options = FormatSecurityOptions( + field.options.Extensions[security_pb2.security], field, type_context, + manifest_description.edge_config) + else: + formatted_security_options = '' + comment = '(%s) ' % ', '.join([FormatFieldType(type_context, field)] + field_annotations) + formatted_leading_comment - return anchor + field.name + '\n' + MapLines(functools.partial(Indent, 2), - comment + formatted_oneof_comment) + return anchor + field.name + '\n' + MapLines(functools.partial( + Indent, 2), comment + formatted_oneof_comment) + formatted_security_options -def FormatMessageAsDefinitionList(type_context, msg): +def FormatMessageAsDefinitionList(type_context, msg, protodoc_manifest): """Format a DescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. msg: DescriptorProto. + protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. @@ -472,7 +530,8 @@ def FormatMessageAsDefinitionList(type_context, msg): type_context.oneof_names[index] = oneof_decl.name return '\n'.join( FormatFieldAsDefinitionListItem(type_context, type_context.ExtendField(index, field.name), - field) for index, field in enumerate(msg.field)) + '\n' + field, protodoc_manifest) + for index, field in enumerate(msg.field)) + '\n' def FormatEnumValueAsDefinitionListItem(type_context, enum_value): @@ -525,6 +584,15 @@ class RstFormatVisitor(visitor.Visitor): See visitor.Visitor for visitor method docs comments. """ + def __init__(self): + r = runfiles.Create() + with open(r.Rlocation('envoy/docs/protodoc_manifest.yaml'), 'r') as f: + # Load as YAML, emit as JSON and then parse as proto to provide type + # checking. + protodoc_manifest_untyped = yaml.safe_load(f.read()) + self.protodoc_manifest = manifest_pb2.Manifest() + json_format.Parse(json.dumps(protodoc_manifest_untyped), self.protodoc_manifest) + def VisitEnum(self, enum_proto, type_context): normal_enum_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(EnumCrossRefLabel(normal_enum_type)) @@ -553,7 +621,8 @@ def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): return '' return anchor + header + proto_link + formatted_leading_comment + FormatMessageAsJson( type_context, msg_proto) + FormatMessageAsDefinitionList( - type_context, msg_proto) + '\n'.join(nested_msgs) + '\n' + '\n'.join(nested_enums) + type_context, msg_proto, + self.protodoc_manifest) + '\n'.join(nested_msgs) + '\n' + '\n'.join(nested_enums) def VisitFile(self, file_proto, type_context, services, msgs, enums): has_messages = True diff --git a/tools/protodoc/requirements.txt b/tools/protodoc/requirements.txt new file mode 100644 index 0000000000000..7a997b5e44bdb --- /dev/null +++ b/tools/protodoc/requirements.txt @@ -0,0 +1 @@ +PyYAML==5.3.1 diff --git a/tools/protoxform/BUILD b/tools/protoxform/BUILD index d5d46bce81ee4..fa15105ca927f 100644 --- a/tools/protoxform/BUILD +++ b/tools/protoxform/BUILD @@ -1,9 +1,12 @@ +load("@rules_python//python:defs.bzl", "py_binary", "py_test") + licenses(["notice"]) # Apache 2 py_binary( name = "merge_active_shadow", srcs = ["merge_active_shadow.py"], deps = [ + "//tools/api_proto_plugin", "@com_envoyproxy_protoc_gen_validate//validate:validate_py", "@com_github_cncf_udpa//udpa/annotations:pkg_py_proto", "@com_google_googleapis//google/api:annotations_py_proto", @@ -17,6 +20,7 @@ py_test( srcs = ["merge_active_shadow_test.py"], deps = [ ":merge_active_shadow", + "//tools/api_proto_plugin", "@com_google_protobuf//:protobuf_python", ], ) diff --git a/tools/protoxform/merge_active_shadow.py b/tools/protoxform/merge_active_shadow.py index 13f6f8c63bcc2..cac6dbfe58e5e 100644 --- a/tools/protoxform/merge_active_shadow.py +++ b/tools/protoxform/merge_active_shadow.py @@ -4,10 +4,13 @@ # 2. Recovering deprecated (sub)message types. # 3. Misc. fixups for oneof metadata and reserved ranges/names. +from collections import defaultdict import copy import pathlib import sys +from tools.api_proto_plugin import type_context as api_type_context + from google.protobuf import descriptor_pb2 from google.protobuf import text_format @@ -17,12 +20,13 @@ from envoy.annotations import deprecation_pb2 as _ from envoy.annotations import resource_pb2 as _ from udpa.annotations import migrate_pb2 as _ +from udpa.annotations import security_pb2 as _ from udpa.annotations import sensitive_pb2 as _ from udpa.annotations import status_pb2 as _ from udpa.annotations import versioning_pb2 as _ -# Set reserved_range in target_proto to reflex previous_reserved_range skipping +# Set reserved_range in target_proto to reflect previous_reserved_range skipping # skip_reserved_numbers. def AdjustReservedRange(target_proto, previous_reserved_range, skip_reserved_numbers): del target_proto.reserved_range[:] @@ -61,49 +65,112 @@ def MergeActiveShadowEnum(active_proto, shadow_proto, target_proto): tv.CopyFrom(sv) +# Adjust source code info comments path to reflect insertions of oneof fields +# inside the middle of an existing collection of fields. +def AdjustSourceCodeInfo(type_context, field_index, field_adjustment): + + def HasPathPrefix(s, t): + return len(s) <= len(t) and all(p[0] == p[1] for p in zip(s, t)) + + for loc in type_context.source_code_info.proto.location: + if HasPathPrefix(type_context.path + [2], loc.path): + path_field_index = len(type_context.path) + 1 + if path_field_index < len(loc.path) and loc.path[path_field_index] >= field_index: + loc.path[path_field_index] += field_adjustment + + # Merge active/shadow DescriptorProtos to a fresh target DescriptorProto. -def MergeActiveShadowMessage(active_proto, shadow_proto, target_proto): +def MergeActiveShadowMessage(type_context, active_proto, shadow_proto, target_proto): target_proto.MergeFrom(active_proto) if not shadow_proto: return shadow_fields = {f.name: f for f in shadow_proto.field} skip_reserved_numbers = [] # For every reserved name, check to see if it's in the shadow, and if so, - # reintroduce in target_proto. + # reintroduce in target_proto. We track both the normal fields we need to add + # back in (extra_simple_fields) and those that belong to oneofs + # (extra_oneof_fields). The latter require special treatment, as we can't just + # append them to the end of the message, they need to be reordered. + extra_simple_fields = [] + extra_oneof_fields = defaultdict(list) # oneof index -> list of fields del target_proto.reserved_name[:] for n in active_proto.reserved_name: hidden_n = 'hidden_envoy_deprecated_' + n if hidden_n in shadow_fields: f = shadow_fields[hidden_n] skip_reserved_numbers.append(f.number) - missing_field = target_proto.field.add() - missing_field.MergeFrom(f) + missing_field = copy.deepcopy(f) # oneof fields from the shadow need to have their index set to the # corresponding index in active/target_proto. if missing_field.HasField('oneof_index'): oneof_name = shadow_proto.oneof_decl[missing_field.oneof_index].name missing_oneof_index = None - for oneof_index, oneof_decl in enumerate(active_proto.oneof_decl): + for oneof_index, oneof_decl in enumerate(target_proto.oneof_decl): if oneof_decl.name == oneof_name: missing_oneof_index = oneof_index - assert (missing_oneof_index is not None) + if missing_oneof_index is None: + missing_oneof_index = len(target_proto.oneof_decl) + target_proto.oneof_decl.add().MergeFrom( + shadow_proto.oneof_decl[missing_field.oneof_index]) missing_field.oneof_index = missing_oneof_index + extra_oneof_fields[missing_oneof_index].append(missing_field) + else: + extra_simple_fields.append(missing_field) else: target_proto.reserved_name.append(n) - # protoprint.py expects that oneof fields are consecutive, so need to sort for - # this. - if len(active_proto.oneof_decl) > 0: - fields = copy.deepcopy(target_proto.field) - fields.sort(key=lambda f: f.oneof_index if f.HasField('oneof_index') else -1) - del target_proto.field[:] - for f in fields: - target_proto.field.append(f) + # Copy existing fields, as we need to nuke them. + existing_fields = copy.deepcopy(target_proto.field) + del target_proto.field[:] + # Rebuild fields, taking into account extra_oneof_fields. protoprint.py + # expects that oneof fields are consecutive, so need to sort for this. + current_oneof_index = None + + def AppendExtraOneofFields(current_oneof_index, last_oneof_field_index): + # Add fields from extra_oneof_fields for current_oneof_index. + for oneof_f in extra_oneof_fields[current_oneof_index]: + target_proto.field.add().MergeFrom(oneof_f) + field_adjustment = len(extra_oneof_fields[current_oneof_index]) + # Fixup the comments in source code info. Note that this is really + # inefficient, O(N^2) in the worst case, but since we have relatively few + # deprecated fields, is the easiest to implement method. + if last_oneof_field_index is not None: + AdjustSourceCodeInfo(type_context, last_oneof_field_index, field_adjustment) + del extra_oneof_fields[current_oneof_index] + return field_adjustment + + field_index = 0 + for f in existing_fields: + if current_oneof_index is not None: + field_oneof_index = f.oneof_index if f.HasField('oneof_index') else None + # Are we exiting the oneof? If so, add the respective extra_one_fields. + if field_oneof_index != current_oneof_index: + field_index += AppendExtraOneofFields(current_oneof_index, field_index) + current_oneof_index = field_oneof_index + elif f.HasField('oneof_index'): + current_oneof_index = f.oneof_index + target_proto.field.add().MergeFrom(f) + field_index += 1 + if current_oneof_index is not None: + # No need to adjust source code info here, since there are no comments for + # trailing deprecated fields, so just set field index to None. + AppendExtraOneofFields(current_oneof_index, None) + # Non-oneof fields are easy to treat, we just append them to the existing + # fields. They don't get any comments, but that's fine in the generated + # shadows. + for f in extra_simple_fields: + target_proto.field.add().MergeFrom(f) + for oneof_index in sorted(extra_oneof_fields.keys()): + for f in extra_oneof_fields[oneof_index]: + target_proto.field.add().MergeFrom(f) + # Same is true for oneofs that are exclusively from the shadow. AdjustReservedRange(target_proto, active_proto.reserved_range, skip_reserved_numbers) # Visit nested message types del target_proto.nested_type[:] shadow_msgs = {msg.name: msg for msg in shadow_proto.nested_type} - for msg in active_proto.nested_type: - MergeActiveShadowMessage(msg, shadow_msgs.get(msg.name), target_proto.nested_type.add()) + for index, msg in enumerate(active_proto.nested_type): + MergeActiveShadowMessage( + type_context.ExtendNestedMessage(index, msg.name, msg.options.deprecated), msg, + shadow_msgs.get(msg.name), target_proto.nested_type.add()) # Visit nested enum types del target_proto.enum_type[:] shadow_enums = {msg.name: msg for msg in shadow_proto.enum_type} @@ -119,11 +186,16 @@ def MergeActiveShadowMessage(active_proto, shadow_proto, target_proto): # Merge active/shadow FileDescriptorProtos, returning a the resulting FileDescriptorProto. def MergeActiveShadowFile(active_file_proto, shadow_file_proto): target_file_proto = copy.deepcopy(active_file_proto) + source_code_info = api_type_context.SourceCodeInfo(target_file_proto.name, + target_file_proto.source_code_info) + package_type_context = api_type_context.TypeContext(source_code_info, target_file_proto.package) # Visit message types del target_file_proto.message_type[:] shadow_msgs = {msg.name: msg for msg in shadow_file_proto.message_type} - for msg in active_file_proto.message_type: - MergeActiveShadowMessage(msg, shadow_msgs.get(msg.name), target_file_proto.message_type.add()) + for index, msg in enumerate(active_file_proto.message_type): + MergeActiveShadowMessage( + package_type_context.ExtendMessage(index, msg.name, msg.options.deprecated), msg, + shadow_msgs.get(msg.name), target_file_proto.message_type.add()) # Visit enum types del target_file_proto.enum_type[:] shadow_enums = {msg.name: msg for msg in shadow_file_proto.enum_type} diff --git a/tools/protoxform/merge_active_shadow_test.py b/tools/protoxform/merge_active_shadow_test.py index 2fb4c983945dc..7a2961fb1c50d 100644 --- a/tools/protoxform/merge_active_shadow_test.py +++ b/tools/protoxform/merge_active_shadow_test.py @@ -2,11 +2,18 @@ import merge_active_shadow +from tools.api_proto_plugin import type_context as api_type_context + from google.protobuf import descriptor_pb2 from google.protobuf import text_format class MergeActiveShadowTest(unittest.TestCase): + # Dummy type context for tests that don't care about this. + def fakeTypeContext(self): + fake_source_code_info = descriptor_pb2.SourceCodeInfo() + source_code_info = api_type_context.SourceCodeInfo('fake', fake_source_code_info) + return api_type_context.TypeContext(source_code_info, 'fake_package') # Poor man's text proto equivalence. Tensorflow has better tools for this, # i.e. assertProto2Equal. @@ -118,6 +125,271 @@ def testMergeActiveShadowEnum(self): """ self.assertTextProtoEq(target_pb_text, str(target_proto)) + def testMergeActiveShadowMessageComments(self): + """MergeActiveShadowMessage preserves comment field correspondence.""" + active_pb_text = """ +field { + number: 9 + name: "oneof_1_0" + oneof_index: 0 +} +field { + number: 1 + name: "simple_field_0" +} +field { + number: 0 + name: "oneof_2_0" + oneof_index: 2 +} +field { + number: 8 + name: "oneof_2_1" + oneof_index: 2 +} +field { + number: 3 + name: "oneof_0_0" + oneof_index: 1 +} +field { + number: 4 + name: "newbie" +} +field { + number: 7 + name: "oneof_3_0" + oneof_index: 3 +} +reserved_name: "missing_oneof_field_0" +reserved_name: "missing_oneof_field_1" +reserved_name: "missing_oneof_field_2" +oneof_decl { + name: "oneof_0" +} +oneof_decl { + name: "oneof_1" +} +oneof_decl { + name: "oneof_2" +} +oneof_decl { + name: "oneof_3" +} + """ + active_proto = descriptor_pb2.DescriptorProto() + text_format.Merge(active_pb_text, active_proto) + active_source_code_info_text = """ +location { + path: [4, 1, 2, 4] + leading_comments: "field_4" +} +location { + path: [4, 1, 2, 5] + leading_comments: "field_5" +} +location { + path: [4, 1, 2, 3] + leading_comments: "field_3" +} +location { + path: [4, 1, 2, 0] + leading_comments: "field_0" +} +location { + path: [4, 1, 2, 1] + leading_comments: "field_1" +} +location { + path: [4, 0, 2, 2] + leading_comments: "ignore_0" +} +location { + path: [4, 1, 2, 6] + leading_comments: "field_6" +} +location { + path: [4, 1, 2, 2] + leading_comments: "field_2" +} +location { + path: [3] + leading_comments: "ignore_1" +} +""" + active_source_code_info = descriptor_pb2.SourceCodeInfo() + text_format.Merge(active_source_code_info_text, active_source_code_info) + shadow_pb_text = """ +field { + number: 10 + name: "hidden_envoy_deprecated_missing_oneof_field_0" + oneof_index: 0 +} +field { + number: 11 + name: "hidden_envoy_deprecated_missing_oneof_field_1" + oneof_index: 3 +} +field { + number: 11 + name: "hidden_envoy_deprecated_missing_oneof_field_2" + oneof_index: 2 +} +oneof_decl { + name: "oneof_0" +} +oneof_decl { + name: "oneof_1" +} +oneof_decl { + name: "oneof_2" +} +oneof_decl { + name: "some_removed_oneof" +} +oneof_decl { + name: "oneof_3" +} +""" + shadow_proto = descriptor_pb2.DescriptorProto() + text_format.Merge(shadow_pb_text, shadow_proto) + target_proto = descriptor_pb2.DescriptorProto() + source_code_info = api_type_context.SourceCodeInfo('fake', active_source_code_info) + fake_type_context = api_type_context.TypeContext(source_code_info, 'fake_package') + merge_active_shadow.MergeActiveShadowMessage(fake_type_context.ExtendMessage(1, "foo", False), + active_proto, shadow_proto, target_proto) + target_pb_text = """ +field { + name: "oneof_1_0" + number: 9 + oneof_index: 0 +} +field { + name: "hidden_envoy_deprecated_missing_oneof_field_0" + number: 10 + oneof_index: 0 +} +field { + name: "simple_field_0" + number: 1 +} +field { + name: "oneof_2_0" + number: 0 + oneof_index: 2 +} +field { + name: "oneof_2_1" + number: 8 + oneof_index: 2 +} +field { + name: "hidden_envoy_deprecated_missing_oneof_field_2" + number: 11 + oneof_index: 2 +} +field { + name: "oneof_0_0" + number: 3 + oneof_index: 1 +} +field { + name: "newbie" + number: 4 +} +field { + name: "oneof_3_0" + number: 7 + oneof_index: 3 +} +field { + name: "hidden_envoy_deprecated_missing_oneof_field_1" + number: 11 + oneof_index: 4 +} +oneof_decl { + name: "oneof_0" +} +oneof_decl { + name: "oneof_1" +} +oneof_decl { + name: "oneof_2" +} +oneof_decl { + name: "oneof_3" +} +oneof_decl { + name: "some_removed_oneof" +} + """ + target_source_code_info_text = """ +location { + path: 4 + path: 1 + path: 2 + path: 6 + leading_comments: "field_4" +} +location { + path: 4 + path: 1 + path: 2 + path: 7 + leading_comments: "field_5" +} +location { + path: 4 + path: 1 + path: 2 + path: 4 + leading_comments: "field_3" +} +location { + path: 4 + path: 1 + path: 2 + path: 0 + leading_comments: "field_0" +} +location { + path: 4 + path: 1 + path: 2 + path: 2 + leading_comments: "field_1" +} +location { + path: 4 + path: 0 + path: 2 + path: 2 + leading_comments: "ignore_0" +} +location { + path: 4 + path: 1 + path: 2 + path: 8 + leading_comments: "field_6" +} +location { + path: 4 + path: 1 + path: 2 + path: 3 + leading_comments: "field_2" +} +location { + path: 3 + leading_comments: "ignore_1" +} +""" + self.maxDiff = None + self.assertTextProtoEq(target_pb_text, str(target_proto)) + self.assertTextProtoEq(target_source_code_info_text, + str(fake_type_context.source_code_info.proto)) + def testMergeActiveShadowMessage(self): """MergeActiveShadowMessage recovers shadow fields with oneofs.""" active_pb_text = """ @@ -180,20 +452,13 @@ def testMergeActiveShadowMessage(self): shadow_proto = descriptor_pb2.DescriptorProto() text_format.Merge(shadow_pb_text, shadow_proto) target_proto = descriptor_pb2.DescriptorProto() - merge_active_shadow.MergeActiveShadowMessage(active_proto, shadow_proto, target_proto) + merge_active_shadow.MergeActiveShadowMessage(self.fakeTypeContext(), active_proto, shadow_proto, + target_proto) target_pb_text = """ field { name: "foo" number: 1 } -field { - name: "baz" - number: 3 -} -field { - name: "newbie" - number: 4 -} field { name: "bar" number: 0 @@ -204,6 +469,14 @@ def testMergeActiveShadowMessage(self): number: 2 oneof_index: 2 } +field { + name: "baz" + number: 3 +} +field { + name: "newbie" + number: 4 +} oneof_decl { name: "ign" } @@ -222,7 +495,8 @@ def testMergeActiveShadowMessageNoShadowMessage(self): shadow_proto = descriptor_pb2.DescriptorProto() active_proto.nested_type.add().name = 'foo' target_proto = descriptor_pb2.DescriptorProto() - merge_active_shadow.MergeActiveShadowMessage(active_proto, shadow_proto, target_proto) + merge_active_shadow.MergeActiveShadowMessage(self.fakeTypeContext(), active_proto, shadow_proto, + target_proto) self.assertEqual(target_proto.nested_type[0].name, 'foo') def testMergeActiveShadowMessageNoShadowEnum(self): @@ -231,7 +505,8 @@ def testMergeActiveShadowMessageNoShadowEnum(self): shadow_proto = descriptor_pb2.DescriptorProto() active_proto.enum_type.add().name = 'foo' target_proto = descriptor_pb2.DescriptorProto() - merge_active_shadow.MergeActiveShadowMessage(active_proto, shadow_proto, target_proto) + merge_active_shadow.MergeActiveShadowMessage(self.fakeTypeContext(), active_proto, shadow_proto, + target_proto) self.assertEqual(target_proto.enum_type[0].name, 'foo') def testMergeActiveShadowMessageMissing(self): @@ -240,7 +515,8 @@ def testMergeActiveShadowMessageMissing(self): shadow_proto = descriptor_pb2.DescriptorProto() shadow_proto.nested_type.add().name = 'foo' target_proto = descriptor_pb2.DescriptorProto() - merge_active_shadow.MergeActiveShadowMessage(active_proto, shadow_proto, target_proto) + merge_active_shadow.MergeActiveShadowMessage(self.fakeTypeContext(), active_proto, shadow_proto, + target_proto) self.assertEqual(target_proto.nested_type[0].name, 'foo') def testMergeActiveShadowFileMissing(self): diff --git a/tools/protoxform/migrate.py b/tools/protoxform/migrate.py index 1be44af91acb0..10d6be2740144 100644 --- a/tools/protoxform/migrate.py +++ b/tools/protoxform/migrate.py @@ -8,7 +8,7 @@ from tools.protoxform import options from tools.protoxform import utils -from envoy.annotations import resource_pb2 +from envoy_api_canonical.envoy.annotations import resource_pb2 from udpa.annotations import migrate_pb2 from udpa.annotations import status_pb2 from google.api import annotations_pb2 @@ -57,8 +57,8 @@ def UpgradeType(match): # We need to deal with envoy.api.* normalization in the v2 API. We won't # need this in v3+, so rather than churn docs, we just have this workaround. type_desc = self._typedb.types[api_v2_type_name] - repl_type = type_desc.next_version_type_name[len( - 'envoy.'):] if type_desc.next_version_type_name else normalized_type_name + repl_type = type_desc.next_version_type_name[ + len('envoy.'):] if type_desc.next_version_type_name else normalized_type_name # TODO(htuch): this should really either go through the type database or # via the descriptor pool and annotations, but there are only two of these # we need for the initial v2 -> v3 docs cut, so hard coding for now. @@ -191,6 +191,8 @@ def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): def VisitEnum(self, enum_proto, type_context): upgraded_proto = copy.deepcopy(enum_proto) + if upgraded_proto.options.deprecated and not self._envoy_internal_shadow: + options.AddHideOption(upgraded_proto.options) for v in upgraded_proto.value: if v.options.deprecated: # We need special handling for the zero field, as proto3 needs some value @@ -249,6 +251,8 @@ def VersionUpgradeXform(n, envoy_internal_shadow, file_proto, params): v(N+1) FileDescriptorProto message. """ # Load type database. + if params['type_db_path']: + utils.LoadTypeDb(params['type_db_path']) typedb = utils.GetTypeDb() # If this isn't a proto in an upgraded package, return None. if file_proto.name not in typedb.next_version_protos or not typedb.next_version_protos[ diff --git a/tools/protoxform/protoprint.py b/tools/protoxform/protoprint.py index 804b93b866003..092c86d6bca7e 100755 --- a/tools/protoxform/protoprint.py +++ b/tools/protoxform/protoprint.py @@ -30,12 +30,13 @@ from google.protobuf import text_format # Note: we have to include those proto definitions to make FormatOptions work, -# this also serves as whitelist of extended options. +# this also serves as allowlist of extended options. from google.api import annotations_pb2 as _ from validate import validate_pb2 as _ from envoy.annotations import deprecation_pb2 as _ from envoy.annotations import resource_pb2 from udpa.annotations import migrate_pb2 +from udpa.annotations import security_pb2 as _ from udpa.annotations import sensitive_pb2 as _ from udpa.annotations import status_pb2 @@ -566,6 +567,8 @@ def VisitService(self, service_proto, type_context): trailing_comment, methods) def VisitEnum(self, enum_proto, type_context): + if protoxform_options.HasHideOption(enum_proto.options): + return '' leading_comment, trailing_comment = FormatTypeContextComments(type_context) formatted_options = FormatOptions(enum_proto.options) reserved_fields = FormatReserved(enum_proto) diff --git a/tools/protoxform/protoxform.bzl b/tools/protoxform/protoxform.bzl index d3ea805348963..abdbac95b3963 100644 --- a/tools/protoxform/protoxform.bzl +++ b/tools/protoxform/protoxform.bzl @@ -13,7 +13,7 @@ def _protoxform_impl(target, ctx): ], ) -# Bazel aspect (https://docs.bazel.build/versions/master/skylark/aspects.html) +# Bazel aspect (https://docs.bazel.build/versions/master/starlark/aspects.html) # that can be invoked from the CLI to perform API transforms via //tools/protoxform for # proto_library targets. Example use: # diff --git a/tools/protoxform/protoxform.py b/tools/protoxform/protoxform.py index 9331877aa17fc..4bc9b55a2365f 100755 --- a/tools/protoxform/protoxform.py +++ b/tools/protoxform/protoxform.py @@ -16,9 +16,10 @@ # during FileDescriptorProto printing. from google.api import annotations_pb2 as _ from validate import validate_pb2 as _ -from envoy.annotations import deprecation_pb2 as _ -from envoy.annotations import resource_pb2 +from envoy_api_canonical.envoy.annotations import deprecation_pb2 as _ +from envoy_api_canonical.envoy.annotations import resource_pb2 from udpa.annotations import migrate_pb2 +from udpa.annotations import security_pb2 as _ from udpa.annotations import sensitive_pb2 as _ from udpa.annotations import status_pb2 diff --git a/tools/spelling/check_spelling.sh b/tools/spelling/check_spelling.sh index df43aadcecf7f..f6a7eea839c0e 100755 --- a/tools/spelling/check_spelling.sh +++ b/tools/spelling/check_spelling.sh @@ -67,16 +67,16 @@ if [[ ! ${ACTUAL_SHA} == ${EXPECT_SHA} ]]; then fi chmod +x "${TMP_DIR}/misspell" - + # Spell checking # All the skipping files are defined in tools/spelling/spelling_skip_files.txt SPELLING_SKIP_FILES="${ROOTDIR}/tools/spelling/spelling_skip_files.txt" -# All the ignore words are defined in tools/spelling/spelling_whitelist_words.txt -SPELLING_WHITELIST_WORDS_FILE="${ROOTDIR}/tools/spelling/spelling_whitelist_words.txt" +# All the ignore words are defined in tools/spelling/spelling_allowlist_words.txt +SPELLING_ALLOWLIST_WORDS_FILE="${ROOTDIR}/tools/spelling/spelling_allowlist_words.txt" -WHITELIST_WORDS=$(echo -n $(cat "${SPELLING_WHITELIST_WORDS_FILE}" | \ +ALLOWLIST_WORDS=$(echo -n $(cat "${SPELLING_ALLOWLIST_WORDS_FILE}" | \ grep -v "^#"|grep -v "^$") | tr ' ' ',') SKIP_FILES=$(echo $(cat "${SPELLING_SKIP_FILES}") | sed "s| | -e |g") git ls-files | grep -v -e ${SKIP_FILES} | xargs "${TMP_DIR}/misspell" -i \ - "${WHITELIST_WORDS}" ${MISSPELL_ARGS} + "${ALLOWLIST_WORDS}" ${MISSPELL_ARGS} diff --git a/tools/spelling/spelling_whitelist_words.txt b/tools/spelling/spelling_allowlist_words.txt similarity index 100% rename from tools/spelling/spelling_whitelist_words.txt rename to tools/spelling/spelling_allowlist_words.txt diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index 2bc47abeb63f8..40f0ea30ffce8 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -4,6 +4,7 @@ # are allowed for any otherwise correctly spelled word. ABI ACK +ACL AES AFAICT ALPN @@ -17,6 +18,7 @@ ASCII ASSERTs AST AWS +Allowlisted BACKTRACE BSON BPF @@ -24,6 +26,7 @@ CAS CB CDS CEL +ceil CHACHA CHLO CHMOD @@ -77,14 +80,17 @@ ECMP ECONNREFUSED EDESTRUCTION EDF +EINPROGRESS EINVAL ELB +EMSGSIZE ENOTFOUND ENOTSUP ENV EOF EOS EOY +EPOLLOUT EPOLLRDHUP EQ ERANGE @@ -118,6 +124,8 @@ GETting GLB GOAWAY GRPC +GRO +GSO GSS GTEST GURL @@ -139,6 +147,7 @@ IDL IETF INADDR INET +INVAL IO IOS IP @@ -147,6 +156,7 @@ IPV IPs IPv ITOA +Injectable Isode Iters JSON @@ -170,14 +180,17 @@ LHS LLVM LPT LRS +Loggable MB MD MERCHANTABILITY MGET +MQ MSET MSVC MTLS MTU +MULTIFRAME NACK NACKed NACKs @@ -226,6 +239,7 @@ PROT Postgre Postgres Prereq +QDCOUNT QUIC QoS RAII @@ -272,6 +286,7 @@ SIGINT SIGPIPE SIGSEGV SIGTERM +SMTP SNI SOTW SPD @@ -289,6 +304,7 @@ STL STRLEN STS SVG +Symbolizer TBD TCLAP TCP @@ -330,9 +346,11 @@ WASM WAVM WIP WKT +WRONGPASS WRR WS WSA +WSS Welford's Wi XDS @@ -359,6 +377,7 @@ alloc alloca allocator allowlist +allowlisted alls alphanumerics amongst @@ -414,12 +433,15 @@ bools borks broadcasted buf +buflen bugprone builtin builtins bulkstrings bursty bytecode +bytestream +cacheable cacheability callee callsite @@ -474,6 +496,7 @@ coroutines cors cout coverity +cplusplus cpuset creds crypto @@ -499,6 +522,7 @@ dechunked decl decls decompressor +decompressors decrement decrypt dedup @@ -556,6 +580,7 @@ epoll errno etag etags +evaluator evbuffer evbuffers evconnlistener @@ -611,6 +636,7 @@ gmock goog google goto +gso gzip hackery hacky @@ -633,6 +659,7 @@ hoc hostname hostnames hostset +hotrestart hrefs huffman hystrix @@ -650,6 +677,7 @@ inflight -ing init initializer +initializers inlined inlining inobservability @@ -757,6 +785,8 @@ mutexes mux muxed mysql +namelen +nameserver namespace namespaces namespacing @@ -791,6 +821,7 @@ openssl opentracing optimizations optname +optval ostream outlier outliers @@ -833,6 +864,7 @@ postfix postfixes postgres postgresql +pragma pre preallocate preallocating @@ -844,11 +876,14 @@ precompute precomputed predeclared prefetch +prefetched +prefetches preflight preorder prepend prepended prev +probabilistically proc profiler programmatically @@ -913,6 +948,7 @@ reimplements rele releasor reloadable +remoting reparse repeatability reperform @@ -933,7 +969,10 @@ restarter resync retriable retriggers +revalidation rmdir +rocketmq +rewriter rollout roundtrip rpcs @@ -952,8 +991,10 @@ sanitizer satisfiable scalability sched +schedulable schemas scopekey +secp sendmsg sendmmsg sendto @@ -974,6 +1015,7 @@ snapshotted sockaddr socketpair sockfd +socklen sockopt sockopts somestring @@ -1081,7 +1123,9 @@ tuples typedef typeid typesafe +uber ucontext +udpa uint un- unacked @@ -1133,8 +1177,7 @@ vptr wakeup wakeups websocket -whitelist -whitelisted +wepoll whitespace whitespaces wildcard diff --git a/tools/testdata/check_format/add_envoy_package.BUILD.gold b/tools/testdata/check_format/add_envoy_package.BUILD.gold index 69f20390a6f7e..01852a416018b 100644 --- a/tools/testdata/check_format/add_envoy_package.BUILD.gold +++ b/tools/testdata/check_format/add_envoy_package.BUILD.gold @@ -1,7 +1,7 @@ -licenses(["notice"]) # Apache 2 - load("//bazel:envoy_build_system.bzl", "envoy_package") +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_binary( diff --git a/tools/testdata/check_format/bad_envoy_build_sys_ref.BUILD b/tools/testdata/check_format/bad_envoy_build_sys_ref.BUILD index 0ffd61847fbb7..f1381ba24acef 100644 --- a/tools/testdata/check_format/bad_envoy_build_sys_ref.BUILD +++ b/tools/testdata/check_format/bad_envoy_build_sys_ref.BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "@envoy//bazel:envoy_build_system.bzl", "envoy_cc_binary", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_binary( diff --git a/tools/testdata/check_format/bad_envoy_build_sys_ref.BUILD.gold b/tools/testdata/check_format/bad_envoy_build_sys_ref.BUILD.gold index 218f4d656df39..f2dba3a21d963 100644 --- a/tools/testdata/check_format/bad_envoy_build_sys_ref.BUILD.gold +++ b/tools/testdata/check_format/bad_envoy_build_sys_ref.BUILD.gold @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_binary", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_binary( diff --git a/tools/testdata/check_format/canonical_api_deps.BUILD b/tools/testdata/check_format/canonical_api_deps.BUILD index e342c9cf9b24a..3bf4571431073 100644 --- a/tools/testdata/check_format/canonical_api_deps.BUILD +++ b/tools/testdata/check_format/canonical_api_deps.BUILD @@ -1,7 +1,7 @@ -licenses(["notice"]) # Apache 2 - load("//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package") +licenses(["notice"]) # Apache 2 + envoy_package() # Deps can be inferred, irrelevant deps are removed. diff --git a/tools/testdata/check_format/canonical_api_deps.BUILD.gold b/tools/testdata/check_format/canonical_api_deps.BUILD.gold index 759f31e1cb423..69bfe69e0d492 100644 --- a/tools/testdata/check_format/canonical_api_deps.BUILD.gold +++ b/tools/testdata/check_format/canonical_api_deps.BUILD.gold @@ -1,7 +1,7 @@ -licenses(["notice"]) # Apache 2 - load("//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package") +licenses(["notice"]) # Apache 2 + envoy_package() # Deps can be inferred, irrelevant deps are removed. diff --git a/tools/testdata/check_format/canonical_spacing.BUILD.gold b/tools/testdata/check_format/canonical_spacing.BUILD.gold index 69f20390a6f7e..01852a416018b 100644 --- a/tools/testdata/check_format/canonical_spacing.BUILD.gold +++ b/tools/testdata/check_format/canonical_spacing.BUILD.gold @@ -1,7 +1,7 @@ -licenses(["notice"]) # Apache 2 - load("//bazel:envoy_build_system.bzl", "envoy_package") +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_binary( diff --git a/tools/testdata/check_format/commented_throw.cc b/tools/testdata/check_format/commented_throw.cc new file mode 100644 index 0000000000000..7f209bb1836de --- /dev/null +++ b/tools/testdata/check_format/commented_throw.cc @@ -0,0 +1,7 @@ +namespace Envoy { + +void foo() { + // throw std::runtime_error("error"); +} + +} // namespace Envoy diff --git a/tools/testdata/check_format/duration_value.cc b/tools/testdata/check_format/duration_value.cc new file mode 100644 index 0000000000000..39275d769e023 --- /dev/null +++ b/tools/testdata/check_format/duration_value.cc @@ -0,0 +1,9 @@ +#include + +namespace Envoy { + +std::chrono::duration foo() { + return std::chrono::steady_clock::duration(12345); +} + +} // namespace Envoy diff --git a/tools/testdata/check_format/duration_value_zero.cc b/tools/testdata/check_format/duration_value_zero.cc new file mode 100644 index 0000000000000..ebbcce9bf51ab --- /dev/null +++ b/tools/testdata/check_format/duration_value_zero.cc @@ -0,0 +1,13 @@ +#include + +namespace Envoy { + +std::chrono::duration foo_int() { + return std::chrono::steady_clock::duration(0); +} + +std::chrono::duration foo_decimal() { + return std::chrono::steady_clock::duration(0.0); +} + +} // namespace Envoy diff --git a/tools/testdata/check_format/header_order.cc b/tools/testdata/check_format/header_order.cc index 76cc4fb07fb0d..d387bb3415d53 100644 --- a/tools/testdata/check_format/header_order.cc +++ b/tools/testdata/check_format/header_order.cc @@ -2,7 +2,7 @@ #include "common/api/api_impl.h" #include "common/api/os_sys_calls_impl.h" #include "common/common/utility.h" -#include "common/common/version.h" +#include "common/version/version.h" #include "common/config/resources.h" #include "common/config/utility.h" #include "common/local_info/local_info_impl.h" @@ -37,4 +37,4 @@ namespace Envoy { // Something awesome goes here. -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/tools/testdata/check_format/header_order.cc.gold b/tools/testdata/check_format/header_order.cc.gold index bb098bb0ac98f..5f6d6433a91b2 100644 --- a/tools/testdata/check_format/header_order.cc.gold +++ b/tools/testdata/check_format/header_order.cc.gold @@ -18,7 +18,6 @@ #include "common/api/api_impl.h" #include "common/api/os_sys_calls_impl.h" #include "common/common/utility.h" -#include "common/common/version.h" #include "common/config/resources.h" #include "common/config/utility.h" #include "common/local_info/local_info_impl.h" @@ -30,6 +29,7 @@ #include "common/singleton/manager_impl.h" #include "common/stats/thread_local_store.h" #include "common/upstream/cluster_manager_impl.h" +#include "common/version/version.h" #include "server/configuration_impl.h" #include "server/connection_handler_impl.h" @@ -42,4 +42,4 @@ namespace Envoy { // Something awesome goes here. -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/tools/testdata/check_format/remove_unused_loads.BUILD b/tools/testdata/check_format/remove_unused_loads.BUILD index 586df2dc56329..6a7bcfdc23b59 100644 --- a/tools/testdata/check_format/remove_unused_loads.BUILD +++ b/tools/testdata/check_format/remove_unused_loads.BUILD @@ -1,8 +1,8 @@ -licenses(["notice"]) # Apache 2 - load("//foo.bzl", "bar") load("//bazel:envoy_build_system.bzl", "envoy_package", "envoy_cc_library") +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_binary( diff --git a/tools/testdata/check_format/remove_unused_loads.BUILD.gold b/tools/testdata/check_format/remove_unused_loads.BUILD.gold index 69f20390a6f7e..01852a416018b 100644 --- a/tools/testdata/check_format/remove_unused_loads.BUILD.gold +++ b/tools/testdata/check_format/remove_unused_loads.BUILD.gold @@ -1,7 +1,7 @@ -licenses(["notice"]) # Apache 2 - load("//bazel:envoy_build_system.bzl", "envoy_package") +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_binary( diff --git a/tools/testdata/check_format/skip_envoy_package.BUILD b/tools/testdata/check_format/skip_envoy_package.BUILD index 51736f94d67d7..11d7a8c872b1b 100644 --- a/tools/testdata/check_format/skip_envoy_package.BUILD +++ b/tools/testdata/check_format/skip_envoy_package.BUILD @@ -1,3 +1,5 @@ +load("@rules_cc//cc:defs.bzl", "cc_binary") + licenses(["notice"]) # Apache 2 cc_binary( diff --git a/tools/testdata/check_format/skip_envoy_package.BUILD.gold b/tools/testdata/check_format/skip_envoy_package.BUILD.gold index 51736f94d67d7..11d7a8c872b1b 100644 --- a/tools/testdata/check_format/skip_envoy_package.BUILD.gold +++ b/tools/testdata/check_format/skip_envoy_package.BUILD.gold @@ -1,3 +1,5 @@ +load("@rules_cc//cc:defs.bzl", "cc_binary") + licenses(["notice"]) # Apache 2 cc_binary( diff --git a/tools/testdata/check_format/std_any.cc b/tools/testdata/check_format/std_any.cc new file mode 100644 index 0000000000000..24f2b5576aaa7 --- /dev/null +++ b/tools/testdata/check_format/std_any.cc @@ -0,0 +1,7 @@ +#include + +namespace Envoy { + void bar() { + std::any foo; + } +} // namespace Envoy diff --git a/tools/testdata/check_format/std_optional.cc b/tools/testdata/check_format/std_optional.cc new file mode 100644 index 0000000000000..693aa481e8892 --- /dev/null +++ b/tools/testdata/check_format/std_optional.cc @@ -0,0 +1,7 @@ +#include + +namespace Envoy { + void bar() { + std::optional foo; + } +} // namespace Envoy diff --git a/tools/testdata/check_format/std_unordered_map.cc b/tools/testdata/check_format/std_unordered_map.cc new file mode 100644 index 0000000000000..ed838faf2cd86 --- /dev/null +++ b/tools/testdata/check_format/std_unordered_map.cc @@ -0,0 +1,7 @@ +#include + +namespace Envoy { + +std::unordered_map foo; + +} // namespace Envoy diff --git a/tools/testdata/check_format/std_unordered_set.cc b/tools/testdata/check_format/std_unordered_set.cc new file mode 100644 index 0000000000000..258bed7836c8a --- /dev/null +++ b/tools/testdata/check_format/std_unordered_set.cc @@ -0,0 +1,7 @@ +#include + +namespace Envoy { + +std::unordered_set foo; + +} // namespace Envoy diff --git a/tools/testdata/check_format/std_variant.cc b/tools/testdata/check_format/std_variant.cc new file mode 100644 index 0000000000000..60a02f15cddc9 --- /dev/null +++ b/tools/testdata/check_format/std_variant.cc @@ -0,0 +1,7 @@ +#include + +namespace Envoy { + void bar() { + std::variant foo; + } +} // namespace Envoy diff --git a/tools/testdata/check_format/strerror.cc b/tools/testdata/check_format/strerror.cc new file mode 100644 index 0000000000000..8105987e1f8c2 --- /dev/null +++ b/tools/testdata/check_format/strerror.cc @@ -0,0 +1,9 @@ +#include + +namespace Envoy { + +char* get_error_illegal(int err) { return strerror(err); } +char* get_error_legal1(int err) { return some_other_strerror(err); } +char* get_error_legal2(int err) { return strerror2(err); } + +} // namespace Envoy diff --git a/tools/testdata/check_format/throw.cc b/tools/testdata/check_format/throw.cc new file mode 100644 index 0000000000000..3c67c7208b7f2 --- /dev/null +++ b/tools/testdata/check_format/throw.cc @@ -0,0 +1,7 @@ +namespace Envoy { + +void foo() { + throw std::runtime_error("error"); +} + +} // namespace Envoy diff --git a/tools/testdata/check_format/update_license.BUILD.gold b/tools/testdata/check_format/update_license.BUILD.gold index c66178a8f94a3..eff2213c760c3 100644 --- a/tools/testdata/check_format/update_license.BUILD.gold +++ b/tools/testdata/check_format/update_license.BUILD.gold @@ -1,5 +1,5 @@ -licenses(["notice"]) # Apache 2 - load("//some:thing.bzl", "foo") +licenses(["notice"]) # Apache 2 + foo() diff --git a/tools/testdata/check_format/version_history/current.rst b/tools/testdata/check_format/version_history/current.rst index ef39a42021012..c2ecaddad3ed8 100644 --- a/tools/testdata/check_format/version_history/current.rst +++ b/tools/testdata/check_format/version_history/current.rst @@ -1,13 +1,25 @@ 1.10.0 (pending) ================ -Changes -------- +Section One +----------------------------- +*Some doc text* * zzzzz: this should be alphabatized after a. * aaaaa: this should be alphabatized before z. * aaaaa: aaaa is before 'this'. * access log: Added should be added not Added. +Another Section +--------------- +*Doc string here* + +* server: changed server code. +* upstream: made a change. + Deprecated ---------- + +* no +* enforcement +* here diff --git a/tools/testdata/protoxform/BUILD b/tools/testdata/protoxform/BUILD index 382cffec50e4e..6769f453f6ff7 100644 --- a/tools/testdata/protoxform/BUILD +++ b/tools/testdata/protoxform/BUILD @@ -1,3 +1,5 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") + licenses(["notice"]) # Apache 2 proto_library( diff --git a/tools/testdata/protoxform/envoy/active_non_terminal/v2/BUILD b/tools/testdata/protoxform/envoy/active_non_terminal/v2/BUILD index 4c756ea941378..00f83f0bf1f74 100644 --- a/tools/testdata/protoxform/envoy/active_non_terminal/v2/BUILD +++ b/tools/testdata/protoxform/envoy/active_non_terminal/v2/BUILD @@ -1,3 +1,5 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") + licenses(["notice"]) # Apache 2 proto_library( diff --git a/tools/testdata/protoxform/envoy/active_terminal/v2/BUILD b/tools/testdata/protoxform/envoy/active_terminal/v2/BUILD index d97319b2631ac..db6244be9a36f 100644 --- a/tools/testdata/protoxform/envoy/active_terminal/v2/BUILD +++ b/tools/testdata/protoxform/envoy/active_terminal/v2/BUILD @@ -1,3 +1,5 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") + licenses(["notice"]) # Apache 2 proto_library( diff --git a/tools/testdata/protoxform/envoy/frozen/v2/BUILD b/tools/testdata/protoxform/envoy/frozen/v2/BUILD index bbbcaffdbc754..9226f3e713617 100644 --- a/tools/testdata/protoxform/envoy/frozen/v2/BUILD +++ b/tools/testdata/protoxform/envoy/frozen/v2/BUILD @@ -1,3 +1,5 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") + licenses(["notice"]) # Apache 2 proto_library( diff --git a/tools/testdata/protoxform/envoy/frozen/v3/BUILD b/tools/testdata/protoxform/envoy/frozen/v3/BUILD index bbbcaffdbc754..9226f3e713617 100644 --- a/tools/testdata/protoxform/envoy/frozen/v3/BUILD +++ b/tools/testdata/protoxform/envoy/frozen/v3/BUILD @@ -1,3 +1,5 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") + licenses(["notice"]) # Apache 2 proto_library( diff --git a/tools/testdata/protoxform/envoy/v2/BUILD b/tools/testdata/protoxform/envoy/v2/BUILD index 08fcd58369157..18cca27da4c6c 100644 --- a/tools/testdata/protoxform/envoy/v2/BUILD +++ b/tools/testdata/protoxform/envoy/v2/BUILD @@ -1,3 +1,5 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") + licenses(["notice"]) # Apache 2 proto_library( diff --git a/tools/testdata/protoxform/envoy/v2/sample.proto b/tools/testdata/protoxform/envoy/v2/sample.proto index 73649fb32db0a..be4b61a9230ba 100644 --- a/tools/testdata/protoxform/envoy/v2/sample.proto +++ b/tools/testdata/protoxform/envoy/v2/sample.proto @@ -19,6 +19,11 @@ message Sample { string key = 1; string value = 2; } + enum DeprecateEnum { + option deprecated = true; + FIRST = 0; + SECOND = 1; + } repeated Entry entries = 1; string will_deprecated = 2 [deprecated = true]; string will_rename_compoent = 3 [(udpa.annotations.field_migrate).rename = "renamed_component"]; diff --git a/tools/testdata/protoxform/envoy/v2/sample.proto.active_or_frozen.gold b/tools/testdata/protoxform/envoy/v2/sample.proto.active_or_frozen.gold index 577b8ddcc1f22..5c5fe19f4997c 100644 --- a/tools/testdata/protoxform/envoy/v2/sample.proto.active_or_frozen.gold +++ b/tools/testdata/protoxform/envoy/v2/sample.proto.active_or_frozen.gold @@ -18,6 +18,13 @@ enum SomeEnum { } message Sample { + enum DeprecateEnum { + option deprecated = true; + + FIRST = 0; + SECOND = 1; + } + message Entry { string key = 1; diff --git a/tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.envoy_internal.gold b/tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.envoy_internal.gold index 3f10d5e043c4f..c9dbe1062f91c 100644 --- a/tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.envoy_internal.gold +++ b/tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.envoy_internal.gold @@ -20,6 +20,13 @@ enum SomeEnum { message Sample { option (udpa.annotations.versioning).previous_message_type = "envoy.v2.Sample"; + enum DeprecateEnum { + option deprecated = true; + + FIRST = 0; + SECOND = 1; + } + message Entry { option (udpa.annotations.versioning).previous_message_type = "envoy.v2.Sample.Entry"; diff --git a/tools/testdata/protoxform/external/BUILD b/tools/testdata/protoxform/external/BUILD index 96986f3e19ae6..3908c1ec3a49f 100644 --- a/tools/testdata/protoxform/external/BUILD +++ b/tools/testdata/protoxform/external/BUILD @@ -1,3 +1,5 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") + licenses(["notice"]) # Apache 2 proto_library( diff --git a/tools/type_whisperer/BUILD b/tools/type_whisperer/BUILD index 3acb95c8adbea..27463e42a4c6e 100644 --- a/tools/type_whisperer/BUILD +++ b/tools/type_whisperer/BUILD @@ -1,11 +1,12 @@ -licenses(["notice"]) # Apache 2 - +load("@rules_python//python:defs.bzl", "py_binary") load("//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", "envoy_proto_library") load("//tools/type_whisperer:api_build_file.bzl", "api_build_file") load("//tools/type_whisperer:file_descriptor_set_text.bzl", "file_descriptor_set_text") load("//tools/type_whisperer:type_database.bzl", "type_database") load("//tools/type_whisperer:proto_cc_source.bzl", "proto_cc_source") +licenses(["notice"]) # Apache 2 + envoy_package() envoy_proto_library( @@ -70,6 +71,12 @@ file_descriptor_set_text( deps = ["@envoy_api_canonical//:all_protos"], ) +file_descriptor_set_text( + name = "all_protos_with_ext_pb_text", + with_external_deps = True, + deps = ["@envoy_api_canonical//:all_protos"], +) + proto_cc_source( name = "embedded_all_protos", constant = "AllProtosPbText", @@ -94,6 +101,7 @@ envoy_cc_library( "//source/common/protobuf", "//tools/type_whisperer:api_type_db_proto_cc_proto", "@com_github_cncf_udpa//udpa/annotations:pkg_cc_proto", + "@com_google_absl//absl/container:node_hash_map", ], ) diff --git a/tools/type_whisperer/api_type_db.h b/tools/type_whisperer/api_type_db.h index d5ee2dc7a3241..853364646e8cb 100644 --- a/tools/type_whisperer/api_type_db.h +++ b/tools/type_whisperer/api_type_db.h @@ -1,7 +1,8 @@ #pragma once -#include +#include +#include "absl/container/node_hash_map.h" #include "absl/strings/string_view.h" #include "absl/types/optional.h" @@ -24,7 +25,7 @@ struct TypeInformation { const bool enum_type_; // Field or enum value renames. - std::unordered_map renames_; + absl::node_hash_map renames_; }; // We don't expose the raw API type database to consumers, as this requires RTTI diff --git a/tools/type_whisperer/file_descriptor_set_text.bzl b/tools/type_whisperer/file_descriptor_set_text.bzl index 2ed8c7c315fc4..18a5c2e720503 100644 --- a/tools/type_whisperer/file_descriptor_set_text.bzl +++ b/tools/type_whisperer/file_descriptor_set_text.bzl @@ -1,3 +1,5 @@ +load("@rules_proto//proto:defs.bzl", "ProtoInfo") + def _file_descriptor_set_text(ctx): file_descriptor_sets = depset() for dep in ctx.attr.deps: @@ -9,7 +11,7 @@ def _file_descriptor_set_text(ctx): args = [ctx.outputs.pb_text.path] for dep in file_descriptor_sets.to_list(): ws_name = dep.owner.workspace_name - if (not ws_name) or ws_name in ctx.attr.proto_repositories: + if (not ws_name) or ws_name in ctx.attr.proto_repositories or ctx.attr.with_external_deps: args.append(dep.path) ctx.actions.run( @@ -30,6 +32,10 @@ file_descriptor_set_text = rule( default = ["envoy_api_canonical"], allow_empty = False, ), + "with_external_deps": attr.bool( + doc = "Include file descriptors for external dependencies.", + default = False, + ), "_file_descriptor_set_text_gen": attr.label( default = Label("//tools/type_whisperer:file_descriptor_set_text_gen"), executable = True, diff --git a/tools/type_whisperer/proto_build_targets_gen.py b/tools/type_whisperer/proto_build_targets_gen.py index c1fcda72ee3d6..e7dcfdd269947 100644 --- a/tools/type_whisperer/proto_build_targets_gen.py +++ b/tools/type_whisperer/proto_build_targets_gen.py @@ -15,8 +15,8 @@ map(re.compile, [ r'envoy[\w\.]*\.(v1alpha\d?|v1)', r'envoy[\w\.]*\.(v2alpha\d?|v2)', - r'envoy\.type\.matcher', - r'envoy\.type', + r'envoy\.type\.matcher$', + r'envoy\.type$', r'envoy\.config\.cluster\.redis', r'envoy\.config\.retry\.previous_priorities', ])) diff --git a/tools/type_whisperer/type_whisperer.bzl b/tools/type_whisperer/type_whisperer.bzl index 248b1752eeb6b..b9df280829ad2 100644 --- a/tools/type_whisperer/type_whisperer.bzl +++ b/tools/type_whisperer/type_whisperer.bzl @@ -3,7 +3,7 @@ load("//tools/api_proto_plugin:plugin.bzl", "api_proto_plugin_aspect", "api_prot def _type_whisperer_impl(target, ctx): return api_proto_plugin_impl(target, ctx, "types_pb_text", "TypeWhisperer", [".types.pb_text"]) -# Bazel aspect (https://docs.bazel.build/versions/master/skylark/aspects.html) +# Bazel aspect (https://docs.bazel.build/versions/master/starlark/aspects.html) # that can be invoked from the CLI to perform API type analysis via //tools/type_whisperer for # proto_library targets. Example use: # diff --git a/tools/vscode/refresh_compdb.sh b/tools/vscode/refresh_compdb.sh new file mode 100755 index 0000000000000..4a81bc5714b77 --- /dev/null +++ b/tools/vscode/refresh_compdb.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +[[ -z "${SKIP_PROTO_FORMAT}" ]] && tools/proto_format/proto_format.sh fix + +# Setting TEST_TMPDIR here so the compdb headers won't be overwritten by another bazel run +TEST_TMPDIR=${BUILD_DIR:-/tmp}/envoy-compdb tools/gen_compilation_database.py + +# Kill clangd to reload the compilation database +killall -v /opt/llvm/bin/clangd